--- linux-3.13.0.orig/Documentation/ABI/removed/net_dma +++ linux-3.13.0/Documentation/ABI/removed/net_dma @@ -0,0 +1,8 @@ +What: tcp_dma_copybreak sysctl +Date: Removed in kernel v3.13 +Contact: Dan Williams +Description: + Formerly the lower limit, in bytes, of the size of socket reads + that will be offloaded to a DMA copy engine. Removed due to + coherency issues of the cpu potentially touching the buffers + while dma is in flight. --- linux-3.13.0.orig/Documentation/ABI/stable/sysfs-firmware-opal-dump +++ linux-3.13.0/Documentation/ABI/stable/sysfs-firmware-opal-dump @@ -0,0 +1,41 @@ +What: /sys/firmware/opal/dump +Date: Feb 2014 +Contact: Stewart Smith +Description: + This directory exposes interfaces for interacting with + the FSP and platform dumps through OPAL firmware interface. + + This is only for the powerpc/powernv platform. + + initiate_dump: When '1' is written to it, + we will initiate a dump. + Read this file for supported commands. + + 0xXX-0xYYYY: A directory for dump of type 0xXX and + id 0xYYYY (in hex). The name of this + directory should not be relied upon to + be in this format, only that it's unique + among all dumps. For determining the type + and ID of the dump, use the id and type files. + Do not rely on any particular size of dump + type or dump id. + + Each dump has the following files: + id: An ASCII representation of the dump ID + in hex (e.g. '0x01') + type: An ASCII representation of the type of + dump in the format "0x%x %s" with the ID + in hex and a description of the dump type + (or 'unknown'). + Type '0xffffffff unknown' is used when + we could not get the type from firmware. + e.g. '0x02 System/Platform Dump' + dump: A binary file containing the dump. + The size of the dump is the size of this file. + acknowledge: When 'ack' is written to this, we will + acknowledge that we've retrieved the + dump to the service processor. It will + then remove it, making the dump + inaccessible. + Reading this file will get a list of + supported actions. --- linux-3.13.0.orig/Documentation/ABI/stable/sysfs-firmware-opal-elog +++ linux-3.13.0/Documentation/ABI/stable/sysfs-firmware-opal-elog @@ -0,0 +1,60 @@ +What: /sys/firmware/opal/elog +Date: Feb 2014 +Contact: Stewart Smith +Description: + This directory exposes error log entries retrieved + through the OPAL firmware interface. + + Each error log is identified by a unique ID and will + exist until explicitly acknowledged to firmware. + + Each log entry has a directory in /sys/firmware/opal/elog. + + Log entries may be purged by the service processor + before retrieved by firmware or retrieved/acknowledged by + Linux if there is no room for more log entries. + + In the event that Linux has retrieved the log entries + but not explicitly acknowledged them to firmware and + the service processor needs more room for log entries, + the only remaining copy of a log message may be in + Linux. + + Typically, a user space daemon will monitor for new + entries, read them out and acknowledge them. + + The service processor may be able to store more log + entries than firmware can, so after you acknowledge + an event from Linux you may instantly get another one + from the queue that was generated some time in the past. + + The raw log format is a binary format. We currently + do not parse this at all in kernel, leaving it up to + user space to solve the problem. In future, we may + do more parsing in kernel and add more files to make + it easier for simple user space processes to extract + more information. + + For each log entry (directory), there are the following + files: + + id: An ASCII representation of the ID of the + error log, in hex - e.g. "0x01". + + type: An ASCII representation of the type id and + description of the type of error log. + Currently just "0x00 PEL" - platform error log. + In the future there may be additional types. + + raw: A read-only binary file that can be read + to get the raw log entry. These are + <16kb, often just hundreds of bytes and + "average" 2kb. + + acknowledge: Writing 'ack' to this file will acknowledge + the error log to firmware (and in turn + the service processor, if applicable). + Shortly after acknowledging it, the log + entry will be removed from sysfs. + Reading this file will list the supported + operations (curently just acknowledge). \ No newline at end of file --- linux-3.13.0.orig/Documentation/ABI/testing/ima_policy +++ linux-3.13.0/Documentation/ABI/testing/ima_policy @@ -23,7 +23,7 @@ [fowner]] lsm: [[subj_user=] [subj_role=] [subj_type=] [obj_user=] [obj_role=] [obj_type=]] - option: [[appraise_type=]] + option: [[appraise_type=]] [permit_directio] base: func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK] mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] --- linux-3.13.0.orig/Documentation/ABI/testing/sysfs-module +++ linux-3.13.0/Documentation/ABI/testing/sysfs-module @@ -49,3 +49,4 @@ O - out-of-tree module F - force-loaded module C - staging driver module + X - unsigned module --- linux-3.13.0.orig/Documentation/arm/00-INDEX +++ linux-3.13.0/Documentation/arm/00-INDEX @@ -34,3 +34,5 @@ - NWFPE floating point emulator documentation swp_emulation - SWP/SWPB emulation handler/logging description +uefi.txt + - [U]EFI configuration and runtime services documentation --- linux-3.13.0.orig/Documentation/arm/uefi.txt +++ linux-3.13.0/Documentation/arm/uefi.txt @@ -0,0 +1,64 @@ +UEFI, the Unified Extensible Firmware Interface, is a specification +governing the behaviours of compatible firmware interfaces. It is +maintained by the UEFI Forum - http://www.uefi.org/. + +UEFI is an evolution of its predecessor 'EFI', so the terms EFI and +UEFI are used somewhat interchangeably in this document and associated +source code. As a rule, anything new uses 'UEFI', whereas 'EFI' refers +to legacy code or specifications. + +UEFI support in Linux +===================== +Booting on a platform with firmware compliant with the UEFI specification +makes it possible for the kernel to support additional features: +- UEFI Runtime Services +- Retrieving various configuration information through the standardised + interface of UEFI configuration tables. (ACPI, SMBIOS, ...) + +For actually enabling [U]EFI support, enable: +- CONFIG_EFI=y +- CONFIG_EFI_VARS=y or m + +The implementation depends on receiving information about the UEFI environment +in a Flattened Device Tree (FDT) - so is only available with CONFIG_OF. + +UEFI stub +========= +The "stub" is a feature that extends the Image/zImage into a valid UEFI +PE/COFF executable, including a loader application that makes it possible to +load the kernel directly from the UEFI shell, boot menu, or one of the +lightweight bootloaders like Gummiboot or rEFInd. + +The kernel image built with stub support remains a valid kernel image for +booting in non-UEFI environments. + +UEFI kernel support on ARM +========================== +UEFI kernel support on the ARM architectures (arm and arm64) is only available +when boot is performed through the stub. + +When booting in UEFI mode, the stub deletes any memory nodes from a provided DT. +Instead, the kernel reads the UEFI memory map. + +The stub populates the FDT /chosen node with (and the kernel scans for) the +following parameters: +________________________________________________________________________________ +Name | Size | Description +================================================================================ +linux,uefi-system-table | 64-bit | Physical address of the UEFI System Table. +-------------------------------------------------------------------------------- +linux,uefi-mmap-start | 64-bit | Physical address of the UEFI memory map, + | | populated by the UEFI GetMemoryMap() call. +-------------------------------------------------------------------------------- +linux,uefi-mmap-size | 32-bit | Size in bytes of the UEFI memory map + | | pointed to in previous entry. +-------------------------------------------------------------------------------- +linux,uefi-mmap-desc-size | 32-bit | Size in bytes of each entry in the UEFI + | | memory map. +-------------------------------------------------------------------------------- +linux,uefi-mmap-desc-ver | 32-bit | Version of the mmap descriptor format. +-------------------------------------------------------------------------------- +linux,uefi-stub-kern-ver | string | Copy of linux_banner from build. +-------------------------------------------------------------------------------- + +For verbose debug messages, specify 'uefi_debug' on the kernel command line. --- linux-3.13.0.orig/Documentation/arm64/booting.txt +++ linux-3.13.0/Documentation/arm64/booting.txt @@ -85,6 +85,10 @@ Header notes: - code0/code1 are responsible for branching to stext. +- when booting through EFI, code0/code1 are initially skipped. + res5 is an offset to the PE header and the PE header has the EFI + entry point (efi_stub_entry). When the stub has done its work, it + jumps to code0 to resume the normal boot process. The image must be placed at the specified offset (currently 0x80000) from the start of the system RAM and called there. The start of the --- linux-3.13.0.orig/Documentation/arm64/memory.txt +++ linux-3.13.0/Documentation/arm64/memory.txt @@ -35,7 +35,7 @@ ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap] -ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device +ffffffbffbc00000 ffffffbffbdfffff 2MB fixed mappings ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space @@ -60,7 +60,7 @@ fffffdfe00000000 fffffdfffbbfffff ~8GB [guard, future vmmemap] -fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk device +fffffdfffbc00000 fffffdfffbdfffff 2MB fixed mappings fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O space --- linux-3.13.0.orig/Documentation/devices.txt +++ linux-3.13.0/Documentation/devices.txt @@ -353,6 +353,7 @@ 133 = /dev/exttrp External device trap 134 = /dev/apm_bios Advanced Power Management BIOS 135 = /dev/rtc Real Time Clock + 137 = /dev/vhci Bluetooth virtual HCI driver 139 = /dev/openprom SPARC OpenBoot PROM 140 = /dev/relay8 Berkshire Products Octal relay card 141 = /dev/relay16 Berkshire Products ISO-16 relay card --- linux-3.13.0.orig/Documentation/devicetree/bindings/arm/pmu.txt +++ linux-3.13.0/Documentation/devicetree/bindings/arm/pmu.txt @@ -7,6 +7,7 @@ Required properties: - compatible : should be one of + "apm,potenza-pmu" "arm,armv8-pmuv3" "arm,cortex-a15-pmu" "arm,cortex-a9-pmu" --- linux-3.13.0.orig/Documentation/devicetree/bindings/arm/psci.txt +++ linux-3.13.0/Documentation/devicetree/bindings/arm/psci.txt @@ -21,7 +21,15 @@ Main node required properties: - - compatible : Must be "arm,psci" + - compatible : should contain at least one of: + + * "arm,psci" : for implementations complying to PSCI versions prior to + 0.2. For these cases function IDs must be provided. + + * "arm,psci-0.2" : for implementations complying to PSCI 0.2. Function + IDs are not required and should be ignored by an OS with PSCI 0.2 + support, but are permitted to be present for compatibility with + existing software when "arm,psci" is later in the compatible list. - method : The method of calling the PSCI firmware. Permitted values are: @@ -45,6 +53,8 @@ Example: +Case 1: PSCI v0.1 only. + psci { compatible = "arm,psci"; method = "smc"; @@ -53,3 +63,28 @@ cpu_on = <0x95c10002>; migrate = <0x95c10003>; }; + + +Case 2: PSCI v0.2 only + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + +Case 3: PSCI v0.2 and PSCI v0.1. + + A DTB may provide IDs for use by kernels without PSCI 0.2 support, + enabling firmware and hypervisors to support existing and new kernels. + These IDs will be ignored by kernels with PSCI 0.2 support, which will + use the standard PSCI 0.2 IDs exclusively. + + psci { + compatible = "arm,psci-0.2", "arm,psci"; + method = "hvc"; + + cpu_on = < arbitrary value >; + cpu_off = < arbitrary value >; + + ... + }; --- linux-3.13.0.orig/Documentation/devicetree/bindings/ata/ahci-platform.txt +++ linux-3.13.0/Documentation/devicetree/bindings/ata/ahci-platform.txt @@ -10,6 +10,8 @@ Optional properties: - dma-coherent : Present if dma operations are coherent +- clocks : a list of phandle + clock specifier pairs +- target-supply : regulator for SATA target power Example: sata@ffe08000 { --- linux-3.13.0.orig/Documentation/devicetree/bindings/ata/apm-xgene.txt +++ linux-3.13.0/Documentation/devicetree/bindings/ata/apm-xgene.txt @@ -0,0 +1,76 @@ +* APM X-Gene 6.0 Gb/s SATA host controller nodes + +SATA host controller nodes are defined to describe on-chip Serial ATA +controllers. Each SATA controller (pair of ports) have its own node. + +Required properties: +- compatible : Shall contain: + * "apm,xgene-ahci" +- reg : First memory resource shall be the AHCI memory + resource. + Second memory resource shall be the host controller + core memory resource. + Third memory resource shall be the host controller + diagnostic memory resource. + 4th memory resource shall be the host controller + AXI memory resource. + 5th optional memory resource shall be the host + controller MUX memory resource if required. +- interrupts : Interrupt-specifier for SATA host controller IRQ. +- clocks : Reference to the clock entry. +- phys : A list of phandles + phy-specifiers, one for each + entry in phy-names. +- phy-names : Should contain: + * "sata-phy" for the SATA 6.0Gbps PHY + +Optional properties: +- status : Shall be "ok" if enabled or "disabled" if disabled. + Default is "ok". + +Example: + sataclk: sataclk { + compatible = "fixed-clock"; + #clock-cells = <1>; + clock-frequency = <100000000>; + clock-output-names = "sataclk"; + }; + + phy2: phy@1f22a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f22a000 0x0 0x100>; + #phy-cells = <1>; + }; + + phy3: phy@1f23a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f23a000 0x0 0x100>; + #phy-cells = <1>; + }; + + sata2: sata@1a400000 { + compatible = "apm,xgene-ahci"; + reg = <0x0 0x1a400000 0x0 0x1000>, + <0x0 0x1f220000 0x0 0x1000>, + <0x0 0x1f22d000 0x0 0x1000>, + <0x0 0x1f22e000 0x0 0x1000>, + <0x0 0x1f227000 0x0 0x1000>; + interrupts = <0x0 0x87 0x4>; + status = "ok"; + clocks = <&sataclk 0>; + phys = <&phy2 0>; + phy-names = "sata-phy"; + }; + + sata3: sata@1a800000 { + compatible = "apm,xgene-ahci-pcie"; + reg = <0x0 0x1a800000 0x0 0x1000>, + <0x0 0x1f230000 0x0 0x1000>, + <0x0 0x1f23d000 0x0 0x1000>, + <0x0 0x1f23e000 0x0 0x1000>, + <0x0 0x1f237000 0x0 0x1000>; + interrupts = <0x0 0x88 0x4>; + status = "ok"; + clocks = <&sataclk 0>; + phys = <&phy3 0>; + phy-names = "sata-phy"; + }; --- linux-3.13.0.orig/Documentation/devicetree/bindings/ata/marvell.txt +++ linux-3.13.0/Documentation/devicetree/bindings/ata/marvell.txt @@ -1,7 +1,7 @@ * Marvell Orion SATA Required Properties: -- compatibility : "marvell,orion-sata" +- compatibility : "marvell,orion-sata" or "marvell,armada-370-sata" - reg : Address range of controller - interrupts : Interrupt controller is using - nr-ports : Number of SATA ports in use. --- linux-3.13.0.orig/Documentation/devicetree/bindings/chosen.txt +++ linux-3.13.0/Documentation/devicetree/bindings/chosen.txt @@ -0,0 +1,46 @@ +The chosen node +--------------- + +The chosen node does not represent a real device, but serves as a place +for passing data between firmware and the operating system, like boot +arguments. Data in the chosen node does not represent the hardware. + + +stdout-path property +-------------------- + +Device trees may specify the device to be used for boot console output +with a stdout-path property under /chosen, as described in ePAPR, e.g. + +/ { + chosen { + stdout-path = "/serial@f00:115200"; + }; + + serial@f00 { + compatible = "vendor,some-uart"; + reg = <0xf00 0x10>; + }; +}; + +If the character ":" is present in the value, this terminates the path. +The meaning of any characters following the ":" is device-specific, and +must be specified in the relevant binding documentation. + +For UART devices, the preferred binding is a string in the form: + + {{{}}} + +where + + baud - baud rate in decimal + parity - 'n' (none), 'o', (odd) or 'e' (even) + bits - number of data bits + flow - 'r' (rts) + +For example: 115200n8r + +Implementation note: Linux will look for the property "linux,stdout-path" or +on PowerPC "stdout" if "stdout-path" is not found. However, the +"linux,stdout-path" and "stdout" properties are deprecated. New platforms +should only use the "stdout-path" property. --- linux-3.13.0.orig/Documentation/devicetree/bindings/dma/ti-edma.txt +++ linux-3.13.0/Documentation/devicetree/bindings/dma/ti-edma.txt @@ -29,6 +29,6 @@ dma-channels = <64>; ti,edma-regions = <4>; ti,edma-slots = <256>; - ti,edma-xbar-event-map = <1 12 - 2 13>; + ti,edma-xbar-event-map = /bits/ 16 <1 12 + 2 13>; }; --- linux-3.13.0.orig/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt +++ linux-3.13.0/Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt @@ -0,0 +1,57 @@ +* Synopsys DesignWare APB GPIO controller + +Required properties: +- compatible : Should contain "snps,dw-apb-gpio" +- reg : Address and length of the register set for the device. +- #address-cells : should be 1 (for addressing port subnodes). +- #size-cells : should be 0 (port subnodes). + +The GPIO controller has a configurable number of ports, each of which are +represented as child nodes with the following properties: + +Required properties: +- compatible : "snps,dw-apb-gpio-port" +- gpio-controller : Marks the device node as a gpio controller. +- #gpio-cells : Should be 1. It is the pin number. +- reg : The integer port index of the port, a single cell. + +Optional properties: +- interrupt-controller : The first port may be configured to be an interrupt +controller. +- #interrupt-cells : Specifies the number of cells needed to encode an + interrupt. Shall be set to 2. The first cell defines the interrupt number, + the second encodes the triger flags encoded as described in + Documentation/devicetree/bindings/interrupts.txt +- interrupt-parent : The parent interrupt controller. +- interrupts : The interrupt to the parent controller raised when GPIOs + generate the interrupts. +- snps,nr-gpios : The number of pins in the port, a single cell. + +Example: + +gpio: gpio@20000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x20000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <1>; + snps,nr-gpios = <8>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&vic1>; + interrupts = <0>; + }; + + portb: gpio-controller@1 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <1>; + snps,nr-gpios = <8>; + reg = <1>; + }; +}; --- linux-3.13.0.orig/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt +++ linux-3.13.0/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt @@ -5,7 +5,11 @@ - reg : Offset and length of the register set for the device - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c" - or "marvell,mv78230-i2c" + or "marvell,mv78230-i2c" or "marvell,mv78230-a0-i2c" + Note: Only use "marvell,mv78230-a0-i2c" for a very rare, + initial version of the SoC which had broken offload + support. Linux auto-detects this and sets it + appropriately. - interrupts : The interrupt number Optional properties : --- linux-3.13.0.orig/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt +++ linux-3.13.0/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt @@ -4,11 +4,13 @@ 1) Interrupt client nodes ------------------------- -Nodes that describe devices which generate interrupts must contain an either an -"interrupts" property or an "interrupts-extended" property. These properties -contain a list of interrupt specifiers, one per output interrupt. The format of -the interrupt specifier is determined by the interrupt controller to which the -interrupts are routed; see section 2 below for details. +Nodes that describe devices which generate interrupts must contain an +"interrupts" property, an "interrupts-extended" property, or both. If both are +present, the latter should take precedence; the former may be provided simply +for compatibility with software that does not recognize the latter. These +properties contain a list of interrupt specifiers, one per output interrupt. The +format of the interrupt specifier is determined by the interrupt controller to +which the interrupts are routed; see section 2 below for details. Example: interrupt-parent = <&intc1>; @@ -28,10 +30,6 @@ Example: interrupts-extended = <&intc1 5 1>, <&intc2 1 0>; -A device node may contain either "interrupts" or "interrupts-extended", but not -both. If both properties are present, then the operating system should log an -error and use only the data in "interrupts". - 2) Interrupt controller nodes ----------------------------- --- linux-3.13.0.orig/Documentation/devicetree/bindings/net/apm-xgene-enet.txt +++ linux-3.13.0/Documentation/devicetree/bindings/net/apm-xgene-enet.txt @@ -0,0 +1,68 @@ +APM X-Gene SoC Ethernet nodes + +Ethernet nodes are defined to describe on-chip ethernet interfaces in +APM X-Gene SoC. + +Required properties for all the ethernet interfaces: +- compatible: Should be "apm,xgene-enet" +- reg: Address and length of the register set for the device. It contains the + information of registers in the same order as described by reg-names +- reg-names: Should contain the register set names + - "enet_csr": Ethernet control and status register address space + - "ring_csr": Descriptor ring control and status register address space + - "ring_cmd": Descriptor ring command register address space +- interrupts: Ethernet main interrupt +- clocks: Reference to the clock entry. +- local-mac-address: MAC address assigned to this device +- phy-connection-type: Interface type between ethernet device and PHY device + +Required properties for ethernet interfaces that have external PHY: +- phy-handle: Reference to a PHY node connected to this device + +- mdio: Device tree subnode with the following required properties: + - compatible: Must be "apm,xgene-mdio". + - #address-cells: Must be <1>. + - #size-cells: Must be <0>. + + For the phy on the mdio bus, there must be a node with the following fields: + - compatible: PHY identifier. Please refer ./phy.txt for the format. + - reg: The ID number for the phy. + +Optional properties: +- status: Should be "ok" or "disabled" for enabled/disabled. Default is "ok". + +Example: + menetclk: menetclk { + compatible = "apm,xgene-device-clock"; + clock-output-names = "menetclk"; + status = "ok"; + }; + + menet: ethernet@17020000 { + compatible = "apm,xgene-enet"; + status = "disabled"; + reg = <0x0 0x17020000 0x0 0xd100>, + <0x0 0X17030000 0x0 0X400>, + <0x0 0X10000000 0x0 0X200>; + reg-names = "enet_csr", "ring_csr", "ring_cmd"; + interrupts = <0x0 0x3c 0x4>; + clocks = <&menetclk 0>; + local-mac-address = [00 01 73 00 00 01]; + phy-connection-type = "rgmii"; + phy-handle = <&menetphy>; + mdio { + compatible = "apm,xgene-mdio"; + #address-cells = <1>; + #size-cells = <0>; + menetphy: menetphy@3 { + compatible = "ethernet-phy-id001c.c915"; + reg = <0x3>; + }; + + }; + }; + +/* Board-specific peripheral configurations */ +&menet { + status = "ok"; +}; --- linux-3.13.0.orig/Documentation/devicetree/bindings/net/cpsw.txt +++ linux-3.13.0/Documentation/devicetree/bindings/net/cpsw.txt @@ -24,16 +24,18 @@ - ti,hwmods : Must be "cpgmac0" - no_bd_ram : Must be 0 or 1 - dual_emac : Specifies Switch to act as Dual EMAC +- syscon : Phandle to the system control device node, which is + the control module device of the am33x Slave Properties: Required properties: - phy_id : Specifies slave phy id - phy-mode : The interface between the SoC and the PHY (a string that of_get_phy_mode() can understand) -- mac-address : Specifies slave MAC address Optional properties: - dual_emac_res_vlan : Specifies VID to be used to segregate the ports +- mac-address : Specifies slave MAC address Note: "ti,hwmods" field is used to fetch the base address and irq resources from TI, omap hwmod data base during device registration. @@ -58,6 +60,7 @@ active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; + syscon = <&cm>; cpsw_emac0: slave@0 { phy_id = <&davinci_mdio>, <0>; phy-mode = "rgmii-txid"; @@ -86,6 +89,7 @@ active_slave = <0>; cpts_clock_mult = <0x80000000>; cpts_clock_shift = <29>; + syscon = <&cm>; cpsw_emac0: slave@0 { phy_id = <&davinci_mdio>, <0>; phy-mode = "rgmii-txid"; --- linux-3.13.0.orig/Documentation/devicetree/bindings/net/micrel-ks8851.txt +++ linux-3.13.0/Documentation/devicetree/bindings/net/micrel-ks8851.txt @@ -7,3 +7,4 @@ Optional properties: - local-mac-address : Ethernet mac address to use +- vdd-supply: supply for Ethernet mac --- linux-3.13.0.orig/Documentation/devicetree/bindings/pci/xgene-pci.txt +++ linux-3.13.0/Documentation/devicetree/bindings/pci/xgene-pci.txt @@ -0,0 +1,52 @@ +* AppliedMicro X-Gene PCIe interface + +Required properties: +- device_type: set to "pci" +- compatible: should contain "xgene,pcie" to identify the core. +- reg: A list of physical base address and length for each set of controller + registers. Must contain an entry for each entry in the reg-names + property. +- reg-names: Must include the following entries: + "csr": controller configuration registers. + "cfg": pcie configuration space registers. +- #address-cells: set to <3> +- #size-cells: set to <2> +- ranges: ranges for the outbound memory, I/O regions. +- dma-ranges: ranges for the inbound memory regions. +- #interrupt-cells: set to <1> +- interrupt-map-mask and interrupt-map: standard PCI properties + to define the mapping of the PCIe interface to interrupt + numbers. +- clocks: from common clock binding: handle to pci clock. + +Optional properties: +- status: Either "ok" or "disabled". + +Example: + +SoC specific DT Entry: + pcie0: pcie@1f2b0000 { + status = "disabled"; + device_type = "pci"; + compatible = "apm,xgene-pcie"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */ + 0xe0 0xd0000000 0x0 0x00200000>; /* PCI config space */ + reg-names = "csr", "cfg"; + ranges = <0x01000000 0x00 0x00000000 0xe0 0x00000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x10000000 0xe0 0x10000000 0x00 0x80000000>; /* mem */ + dma-ranges = <0x42000000 0x40 0x00000000 0x40 0x00000000 0x40 0x00000000>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1 + 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1 + 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1 + 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; + clocks = <&pcie0clk 0>; + }; + +Board specific DT Entry: + &pcie0 { + status = "ok"; + }; --- linux-3.13.0.orig/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt +++ linux-3.13.0/Documentation/devicetree/bindings/phy/apm-xgene-phy.txt @@ -0,0 +1,79 @@ +* APM X-Gene 15Gbps Multi-purpose PHY nodes + +PHY nodes are defined to describe on-chip 15Gbps Multi-purpose PHY. Each +PHY (pair of lanes) has its own node. + +Required properties: +- compatible : Shall be "apm,xgene-phy". +- reg : PHY memory resource is the SDS PHY access resource. +- #phy-cells : Shall be 1 as it expects one argument for setting + the mode of the PHY. Possible values are 0 (SATA), + 1 (SGMII), 2 (PCIe), 3 (USB), and 4 (XFI). + +Optional properties: +- status : Shall be "ok" if enabled or "disabled" if disabled. + Default is "ok". +- clocks : Reference to the clock entry. +- apm,tx-eye-tuning : Manual control to fine tune the capture of the serial + bit lines from the automatic calibrated position. + Two set of 3-tuple setting for each (up to 3) + supported link speed on the host. Range from 0 to + 127 in unit of one bit period. Default is 10. +- apm,tx-eye-direction : Eye tuning manual control direction. 0 means sample + data earlier than the nominal sampling point. 1 means + sample data later than the nominal sampling point. + Two set of 3-tuple setting for each (up to 3) + supported link speed on the host. Default is 0. +- apm,tx-boost-gain : Frequency boost AC (LSB 3-bit) and DC (2-bit) + gain control. Two set of 3-tuple setting for each + (up to 3) supported link speed on the host. Range is + between 0 to 31 in unit of dB. Default is 3. +- apm,tx-amplitude : Amplitude control. Two set of 3-tuple setting for + each (up to 3) supported link speed on the host. + Range is between 0 to 199500 in unit of uV. + Default is 199500 uV. +- apm,tx-pre-cursor1 : 1st pre-cursor emphasis taps control. Two set of + 3-tuple setting for each (up to 3) supported link + speed on the host. Range is 0 to 273000 in unit of + uV. Default is 0. +- apm,tx-pre-cursor2 : 2st pre-cursor emphasis taps control. Two set of + 3-tuple setting for each (up to 3) supported link + speed on the host. Range is 0 to 127400 in unit uV. + Default is 0x0. +- apm,tx-post-cursor : Post-cursor emphasis taps control. Two set of + 3-tuple setting for Gen1, Gen2, and Gen3. Range is + between 0 to 0x1f in unit of 18.2mV. Default is 0xf. +- apm,tx-speed : Tx operating speed. One set of 3-tuple for each + supported link speed on the host. + 0 = 1-2Gbps + 1 = 2-4Gbps (1st tuple default) + 2 = 4-8Gbps + 3 = 8-15Gbps (2nd tuple default) + 4 = 2.5-4Gbps + 5 = 4-5Gbps + 6 = 5-6Gbps + 7 = 6-16Gbps (3rd tuple default) + +NOTE: PHY override parameters are board specific setting. + +Example: + phy1: phy@1f21a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f21a000 0x0 0x100>; + #phy-cells = <1>; + status = "disabled"; + }; + + phy2: phy@1f22a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f22a000 0x0 0x100>; + #phy-cells = <1>; + status = "ok"; + }; + + phy3: phy@1f23a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f23a000 0x0 0x100>; + #phy-cells = <1>; + status = "ok"; + }; --- linux-3.13.0.orig/Documentation/devicetree/bindings/rng/apm,rng.txt +++ linux-3.13.0/Documentation/devicetree/bindings/rng/apm,rng.txt @@ -0,0 +1,17 @@ +APM X-Gene SoC random number generator. + +Required properties: + +- compatible : should be "apm,xgene-rng" +- reg : specifies base physical address and size of the registers map +- clocks : phandle to clock-controller plus clock-specifier pair +- interrupts : specify the fault interrupt for the RNG device + +Example: + + rng: rng@10520000 { + compatible = "apm,xgene-rng"; + reg = <0x0 0x10520000 0x0 0x100>; + interrupts = <0x0 0x41 0x4>; + clocks = <&rngpkaclk 0>; + }; --- linux-3.13.0.orig/Documentation/devicetree/bindings/rtc/xgene-rtc.txt +++ linux-3.13.0/Documentation/devicetree/bindings/rtc/xgene-rtc.txt @@ -0,0 +1,28 @@ +* APM X-Gene Real Time Clock + +RTC controller for the APM X-Gene Real Time Clock + +Required properties: +- compatible : Should be "apm,xgene-rtc" +- reg: physical base address of the controller and length of memory mapped + region. +- interrupts: IRQ line for the RTC. +- #clock-cells: Should be 1. +- clocks: Reference to the clock entry. + +Example: + +rtcclk: rtcclk { + compatible = "fixed-clock"; + #clock-cells = <1>; + clock-frequency = <100000000>; + clock-output-names = "rtcclk"; +}; + +rtc: rtc@10510000 { + compatible = "apm,xgene-rtc"; + reg = <0x0 0x10510000 0x0 0x400>; + interrupts = <0x0 0x46 0x4>; + #clock-cells = <1>; + clocks = <&rtcclk 0>; +}; --- linux-3.13.0.orig/Documentation/devicetree/bindings/serial/of-serial.txt +++ linux-3.13.0/Documentation/devicetree/bindings/serial/of-serial.txt @@ -37,6 +37,7 @@ - auto-flow-control: one way to enable automatic flow control support. The driver is allowed to detect support for the capability even without this property. +- has-hw-flow-control: the hardware has flow control capability. Example: --- linux-3.13.0.orig/Documentation/devicetree/bindings/spi/efm32-spi.txt +++ linux-3.13.0/Documentation/devicetree/bindings/spi/efm32-spi.txt @@ -3,7 +3,7 @@ Required properties: - #address-cells: see spi-bus.txt - #size-cells: see spi-bus.txt -- compatible: should be "efm32,spi" +- compatible: should be "energymicro,efm32-spi" - reg: Offset and length of the register set for the controller - interrupts: pair specifying rx and tx irq - clocks: phandle to the spi clock @@ -15,7 +15,7 @@ spi1: spi@0x4000c400 { /* USART1 */ #address-cells = <1>; #size-cells = <0>; - compatible = "efm32,spi"; + compatible = "energymicro,efm32-spi"; reg = <0x4000c400 0x400>; interrupts = <15 16>; clocks = <&cmu 20>; --- linux-3.13.0.orig/Documentation/efi-stub.txt +++ linux-3.13.0/Documentation/efi-stub.txt @@ -1,13 +1,21 @@ The EFI Boot Stub --------------------------- -On the x86 platform, a bzImage can masquerade as a PE/COFF image, -thereby convincing EFI firmware loaders to load it as an EFI -executable. The code that modifies the bzImage header, along with the -EFI-specific entry point that the firmware loader jumps to are -collectively known as the "EFI boot stub", and live in +On the x86 and ARM platforms, a kernel zImage/bzImage can masquerade +as a PE/COFF image, thereby convincing EFI firmware loaders to load +it as an EFI executable. The code that modifies the bzImage header, +along with the EFI-specific entry point that the firmware loader +jumps to are collectively known as the "EFI boot stub", and live in arch/x86/boot/header.S and arch/x86/boot/compressed/eboot.c, -respectively. +respectively. For ARM the EFI stub is implemented in +arch/arm/boot/compressed/efi-header.S and +arch/arm/boot/compressed/efi-stub.c. EFI stub code that is shared +between architectures is in drivers/firmware/efi/efi-stub-helper.c. + +For arm64, there is no compressed kernel support, so the Image itself +masquerades as a PE/COFF image and the EFI stub is linked into the +kernel. The arm64 EFI stub lives in arch/arm64/kernel/efi-entry.S +and arch/arm64/kernel/efi-stub.c. By using the EFI boot stub it's possible to boot a Linux kernel without the use of a conventional EFI boot loader, such as grub or @@ -23,7 +31,10 @@ System Partiion (ESP) and renamed with the extension ".efi". Without the extension the EFI firmware loader will refuse to execute it. It's not possible to execute bzImage.efi from the usual Linux file systems -because EFI firmware doesn't have support for them. +because EFI firmware doesn't have support for them. For ARM the +arch/arm/boot/zImage should be copied to the system partition, and it +may not need to be renamed. Similarly for arm64, arch/arm64/boot/Image +should be copied but not necessarily renamed. **** Passing kernel parameters from the EFI shell @@ -63,3 +74,11 @@ because the image we're executing is interpreted by the EFI shell, which understands relative paths, whereas the rest of the command line is passed to bzImage.efi. + + +**** The "dtb=" option + +For the ARM and arm64 architectures, we also need to be able to provide a +device tree to the kernel. This is done with the "dtb=" command line option, +and is processed in the same manner as the "initrd=" option that is +described above. --- linux-3.13.0.orig/Documentation/filesystems/Locking +++ linux-3.13.0/Documentation/filesystems/Locking @@ -65,6 +65,7 @@ struct file *, unsigned open_flag, umode_t create_mode, int *opened); int (*tmpfile) (struct inode *, struct dentry *, umode_t); + int (*dentry_open)(struct dentry *, struct file *, const struct cred *); locking rules: all may block @@ -93,6 +94,7 @@ update_time: no atomic_open: yes tmpfile: no +dentry_open: no Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on victim. --- linux-3.13.0.orig/Documentation/filesystems/overlayfs.txt +++ linux-3.13.0/Documentation/filesystems/overlayfs.txt @@ -0,0 +1,199 @@ +Written by: Neil Brown + +Overlay Filesystem +================== + +This document describes a prototype for a new approach to providing +overlay-filesystem functionality in Linux (sometimes referred to as +union-filesystems). An overlay-filesystem tries to present a +filesystem which is the result over overlaying one filesystem on top +of the other. + +The result will inevitably fail to look exactly like a normal +filesystem for various technical reasons. The expectation is that +many use cases will be able to ignore these differences. + +This approach is 'hybrid' because the objects that appear in the +filesystem do not all appear to belong to that filesystem. In many +cases an object accessed in the union will be indistinguishable +from accessing the corresponding object from the original filesystem. +This is most obvious from the 'st_dev' field returned by stat(2). + +While directories will report an st_dev from the overlay-filesystem, +all non-directory objects will report an st_dev from the lower or +upper filesystem that is providing the object. Similarly st_ino will +only be unique when combined with st_dev, and both of these can change +over the lifetime of a non-directory object. Many applications and +tools ignore these values and will not be affected. + +Upper and Lower +--------------- + +An overlay filesystem combines two filesystems - an 'upper' filesystem +and a 'lower' filesystem. When a name exists in both filesystems, the +object in the 'upper' filesystem is visible while the object in the +'lower' filesystem is either hidden or, in the case of directories, +merged with the 'upper' object. + +It would be more correct to refer to an upper and lower 'directory +tree' rather than 'filesystem' as it is quite possible for both +directory trees to be in the same filesystem and there is no +requirement that the root of a filesystem be given for either upper or +lower. + +The lower filesystem can be any filesystem supported by Linux and does +not need to be writable. The lower filesystem can even be another +overlayfs. The upper filesystem will normally be writable and if it +is it must support the creation of trusted.* extended attributes, and +must provide valid d_type in readdir responses, at least for symbolic +links - so NFS is not suitable. + +A read-only overlay of two read-only filesystems may use any +filesystem type. + +Directories +----------- + +Overlaying mainly involves directories. If a given name appears in both +upper and lower filesystems and refers to a non-directory in either, +then the lower object is hidden - the name refers only to the upper +object. + +Where both upper and lower objects are directories, a merged directory +is formed. + +At mount time, the two directories given as mount options are combined +into a merged directory: + + mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper /overlay + +Then whenever a lookup is requested in such a merged directory, the +lookup is performed in each actual directory and the combined result +is cached in the dentry belonging to the overlay filesystem. If both +actual lookups find directories, both are stored and a merged +directory is created, otherwise only one is stored: the upper if it +exists, else the lower. + +Only the lists of names from directories are merged. Other content +such as metadata and extended attributes are reported for the upper +directory only. These attributes of the lower directory are hidden. + +whiteouts and opaque directories +-------------------------------- + +In order to support rm and rmdir without changing the lower +filesystem, an overlay filesystem needs to record in the upper filesystem +that files have been removed. This is done using whiteouts and opaque +directories (non-directories are always opaque). + +The overlay filesystem uses extended attributes with a +"trusted.overlay." prefix to record these details. + +A whiteout is created as a symbolic link with target +"(overlay-whiteout)" and with xattr "trusted.overlay.whiteout" set to "y". +When a whiteout is found in the upper level of a merged directory, any +matching name in the lower level is ignored, and the whiteout itself +is also hidden. + +A directory is made opaque by setting the xattr "trusted.overlay.opaque" +to "y". Where the upper filesystem contains an opaque directory, any +directory in the lower filesystem with the same name is ignored. + +readdir +------- + +When a 'readdir' request is made on a merged directory, the upper and +lower directories are each read and the name lists merged in the +obvious way (upper is read first, then lower - entries that already +exist are not re-added). This merged name list is cached in the +'struct file' and so remains as long as the file is kept open. If the +directory is opened and read by two processes at the same time, they +will each have separate caches. A seekdir to the start of the +directory (offset 0) followed by a readdir will cause the cache to be +discarded and rebuilt. + +This means that changes to the merged directory do not appear while a +directory is being read. This is unlikely to be noticed by many +programs. + +seek offsets are assigned sequentially when the directories are read. +Thus if + - read part of a directory + - remember an offset, and close the directory + - re-open the directory some time later + - seek to the remembered offset + +there may be little correlation between the old and new locations in +the list of filenames, particularly if anything has changed in the +directory. + +Readdir on directories that are not merged is simply handled by the +underlying directory (upper or lower). + + +Non-directories +--------------- + +Objects that are not directories (files, symlinks, device-special +files etc.) are presented either from the upper or lower filesystem as +appropriate. When a file in the lower filesystem is accessed in a way +the requires write-access, such as opening for write access, changing +some metadata etc., the file is first copied from the lower filesystem +to the upper filesystem (copy_up). Note that creating a hard-link +also requires copy_up, though of course creation of a symlink does +not. + +The copy_up may turn out to be unnecessary, for example if the file is +opened for read-write but the data is not modified. + +The copy_up process first makes sure that the containing directory +exists in the upper filesystem - creating it and any parents as +necessary. It then creates the object with the same metadata (owner, +mode, mtime, symlink-target etc.) and then if the object is a file, the +data is copied from the lower to the upper filesystem. Finally any +extended attributes are copied up. + +Once the copy_up is complete, the overlay filesystem simply +provides direct access to the newly created file in the upper +filesystem - future operations on the file are barely noticed by the +overlay filesystem (though an operation on the name of the file such as +rename or unlink will of course be noticed and handled). + + +Non-standard behavior +--------------------- + +The copy_up operation essentially creates a new, identical file and +moves it over to the old name. The new file may be on a different +filesystem, so both st_dev and st_ino of the file may change. + +Any open files referring to this inode will access the old data and +metadata. Similarly any file locks obtained before copy_up will not +apply to the copied up file. + +On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and +fsetxattr(2) will fail with EROFS. + +If a file with multiple hard links is copied up, then this will +"break" the link. Changes will not be propagated to other names +referring to the same inode. + +Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory +object in overlayfs will not contain valid absolute paths, only +relative paths leading up to the filesystem's root. This will be +fixed in the future. + +Some operations are not atomic, for example a crash during copy_up or +rename will leave the filesystem in an inconsistent state. This will +be addressed in the future. + +Changes to underlying filesystems +--------------------------------- + +Offline changes, when the overlay is not mounted, are allowed to either +the upper or the lower trees. + +Changes to the underlying filesystems while part of a mounted overlay +filesystem are not allowed. If the underlying filesystem is changed, +the behavior of the overlay is undefined, though it will not result in +a crash or deadlock. --- linux-3.13.0.orig/Documentation/filesystems/proc.txt +++ linux-3.13.0/Documentation/filesystems/proc.txt @@ -1377,8 +1377,8 @@ For example, if a task is using all allowed memory, its badness score will be 1000. If it is using half of its allowed memory, its score will be 500. -There is an additional factor included in the badness score: root -processes are given 3% extra memory over other tasks. +There is an additional factor included in the badness score: the current memory +and swap usage is discounted by 3% for root processes. The amount of "allowed" memory depends on the context in which the oom killer was called. If it is due to the memory assigned to the allocating task's cpuset --- linux-3.13.0.orig/Documentation/filesystems/vfs.txt +++ linux-3.13.0/Documentation/filesystems/vfs.txt @@ -362,6 +362,7 @@ int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned open_flag, umode_t create_mode, int *opened); int (*tmpfile) (struct inode *, struct dentry *, umode_t); + int (*dentry_open)(struct dentry *, struct file *, const struct cred *); }; Again, all methods are called without any locks being held, unless @@ -681,6 +682,12 @@ but instead uses bmap to find out where the blocks in the file are and uses those addresses directly. + dentry_open: this is an alternative to f_op->open(), the difference is that + this method may open a file not necessarily originating from the same + filesystem as the one i_op->open() was called on. It may be + useful for stacking filesystems which want to allow native I/O directly + on underlying files. + invalidatepage: If a page has PagePrivate set, then invalidatepage will be called when part or all of the page is to be removed --- linux-3.13.0.orig/Documentation/i2c/busses/i2c-piix4 +++ linux-3.13.0/Documentation/i2c/busses/i2c-piix4 @@ -13,7 +13,7 @@ * AMD SP5100 (SB700 derivative found on some server mainboards) Datasheet: Publicly available at the AMD website http://support.amd.com/us/Embedded_TechDocs/44413.pdf - * AMD Hudson-2, CZ + * AMD Hudson-2, ML, CZ Datasheet: Not publicly available * Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge Datasheet: Publicly available at the SMSC website http://www.smsc.com --- linux-3.13.0.orig/Documentation/input/elantech.txt +++ linux-3.13.0/Documentation/input/elantech.txt @@ -504,9 +504,12 @@ * reg_10 bit 7 6 5 4 3 2 1 0 - 0 0 0 0 0 0 0 A + 0 0 0 0 R F T A A: 1 = enable absolute tracking + T: 1 = enable two finger mode auto correct + F: 1 = disable ABS Position Filter + R: 1 = enable real hardware resolution 6.2 Native absolute mode 6 byte packet format ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --- linux-3.13.0.orig/Documentation/kernel-parameters.txt +++ linux-3.13.0/Documentation/kernel-parameters.txt @@ -652,6 +652,10 @@ /proc//coredump_filter. See also Documentation/filesystems/proc.txt. + cpufreq_driver= [X86] Allow only the named cpu frequency scaling driver + to register. Example: cpufreq_driver=powernow-k8 + Format: { none | STRING } + cpuidle.off=1 [CPU_IDLE] disable the cpuidle sub-system @@ -890,6 +894,12 @@ edd= [EDD] Format: {"off" | "on" | "skip[mbr]"} + efi= [EFI] + Format: { "old_map" } + old_map [X86-64]: switch to the old ioremap-based EFI + runtime services mapping. 32-bit still uses this one by + default. + efi_no_storage_paranoia [EFI; X86] Using this parameter you can use more than 50% of your efi variable storage. Use this parameter only if @@ -962,6 +972,13 @@ parameter will force ia64_sal_cache_flush to call ia64_pal_cache_flush instead of SAL_CACHE_FLUSH. + forcepae [X86-32] + Forcefully enable Physical Address Extension (PAE). + Many Pentium M systems disable PAE but may have a + functionally usable PAE implementation. + Note: This parameter is unsupported, may cause unknown + problems, and will taint the kernel. + ftrace=[tracer] [FTRACE] will set and start the specified tracer as early as possible in order to facilitate early @@ -1121,6 +1138,7 @@ i8042.notimeout [HW] Ignore timeout condition signalled by controller i8042.reset [HW] Reset the controller during init and cleanup i8042.unlock [HW] Unlock (ignore) the keylock + i8042.kbdreset [HW] Reset device connected to KBD port i810= [HW,DRM] --- linux-3.13.0.orig/Documentation/lzo.txt +++ linux-3.13.0/Documentation/lzo.txt @@ -0,0 +1,164 @@ + +LZO stream format as understood by Linux's LZO decompressor +=========================================================== + +Introduction + + This is not a specification. No specification seems to be publicly available + for the LZO stream format. This document describes what input format the LZO + decompressor as implemented in the Linux kernel understands. The file subject + of this analysis is lib/lzo/lzo1x_decompress_safe.c. No analysis was made on + the compressor nor on any other implementations though it seems likely that + the format matches the standard one. The purpose of this document is to + better understand what the code does in order to propose more efficient fixes + for future bug reports. + +Description + + The stream is composed of a series of instructions, operands, and data. The + instructions consist in a few bits representing an opcode, and bits forming + the operands for the instruction, whose size and position depend on the + opcode and on the number of literals copied by previous instruction. The + operands are used to indicate : + + - a distance when copying data from the dictionary (past output buffer) + - a length (number of bytes to copy from dictionary) + - the number of literals to copy, which is retained in variable "state" + as a piece of information for next instructions. + + Optionally depending on the opcode and operands, extra data may follow. These + extra data can be a complement for the operand (eg: a length or a distance + encoded on larger values), or a literal to be copied to the output buffer. + + The first byte of the block follows a different encoding from other bytes, it + seems to be optimized for literal use only, since there is no dictionary yet + prior to that byte. + + Lengths are always encoded on a variable size starting with a small number + of bits in the operand. If the number of bits isn't enough to represent the + length, up to 255 may be added in increments by consuming more bytes with a + rate of at most 255 per extra byte (thus the compression ratio cannot exceed + around 255:1). The variable length encoding using #bits is always the same : + + length = byte & ((1 << #bits) - 1) + if (!length) { + length = ((1 << #bits) - 1) + length += 255*(number of zero bytes) + length += first-non-zero-byte + } + length += constant (generally 2 or 3) + + For references to the dictionary, distances are relative to the output + pointer. Distances are encoded using very few bits belonging to certain + ranges, resulting in multiple copy instructions using different encodings. + Certain encodings involve one extra byte, others involve two extra bytes + forming a little-endian 16-bit quantity (marked LE16 below). + + After any instruction except the large literal copy, 0, 1, 2 or 3 literals + are copied before starting the next instruction. The number of literals that + were copied may change the meaning and behaviour of the next instruction. In + practice, only one instruction needs to know whether 0, less than 4, or more + literals were copied. This is the information stored in the variable + in this implementation. This number of immediate literals to be copied is + generally encoded in the last two bits of the instruction but may also be + taken from the last two bits of an extra operand (eg: distance). + + End of stream is declared when a block copy of distance 0 is seen. Only one + instruction may encode this distance (0001HLLL), it takes one LE16 operand + for the distance, thus requiring 3 bytes. + + IMPORTANT NOTE : in the code some length checks are missing because certain + instructions are called under the assumption that a certain number of bytes + follow because it has already been garanteed before parsing the instructions. + They just have to "refill" this credit if they consume extra bytes. This is + an implementation design choice independant on the algorithm or encoding. + +Byte sequences + + First byte encoding : + + 0..17 : follow regular instruction encoding, see below. It is worth + noting that codes 16 and 17 will represent a block copy from + the dictionary which is empty, and that they will always be + invalid at this place. + + 18..21 : copy 0..3 literals + state = (byte - 17) = 0..3 [ copy literals ] + skip byte + + 22..255 : copy literal string + length = (byte - 17) = 4..238 + state = 4 [ don't copy extra literals ] + skip byte + + Instruction encoding : + + 0 0 0 0 X X X X (0..15) + Depends on the number of literals copied by the last instruction. + If last instruction did not copy any literal (state == 0), this + encoding will be a copy of 4 or more literal, and must be interpreted + like this : + + 0 0 0 0 L L L L (0..15) : copy long literal string + length = 3 + (L ?: 15 + (zero_bytes * 255) + non_zero_byte) + state = 4 (no extra literals are copied) + + If last instruction used to copy between 1 to 3 literals (encoded in + the instruction's opcode or distance), the instruction is a copy of a + 2-byte block from the dictionary within a 1kB distance. It is worth + noting that this instruction provides little savings since it uses 2 + bytes to encode a copy of 2 other bytes but it encodes the number of + following literals for free. It must be interpreted like this : + + 0 0 0 0 D D S S (0..15) : copy 2 bytes from <= 1kB distance + length = 2 + state = S (copy S literals after this block) + Always followed by exactly one byte : H H H H H H H H + distance = (H << 2) + D + 1 + + If last instruction used to copy 4 or more literals (as detected by + state == 4), the instruction becomes a copy of a 3-byte block from the + dictionary from a 2..3kB distance, and must be interpreted like this : + + 0 0 0 0 D D S S (0..15) : copy 3 bytes from 2..3 kB distance + length = 3 + state = S (copy S literals after this block) + Always followed by exactly one byte : H H H H H H H H + distance = (H << 2) + D + 2049 + + 0 0 0 1 H L L L (16..31) + Copy of a block within 16..48kB distance (preferably less than 10B) + length = 2 + (L ?: 7 + (zero_bytes * 255) + non_zero_byte) + Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S + distance = 16384 + (H << 14) + D + state = S (copy S literals after this block) + End of stream is reached if distance == 16384 + + 0 0 1 L L L L L (32..63) + Copy of small block within 16kB distance (preferably less than 34B) + length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte) + Always followed by exactly one LE16 : D D D D D D D D : D D D D D D S S + distance = D + 1 + state = S (copy S literals after this block) + + 0 1 L D D D S S (64..127) + Copy 3-4 bytes from block within 2kB distance + state = S (copy S literals after this block) + length = 3 + L + Always followed by exactly one byte : H H H H H H H H + distance = (H << 3) + D + 1 + + 1 L L D D D S S (128..255) + Copy 5-8 bytes from block within 2kB distance + state = S (copy S literals after this block) + length = 5 + L + Always followed by exactly one byte : H H H H H H H H + distance = (H << 3) + D + 1 + +Authors + + This document was written by Willy Tarreau on 2014/07/19 during an + analysis of the decompression code available in Linux 3.16-rc5. The code is + tricky, it is possible that this document contains mistakes or that a few + corner cases were overlooked. In any case, please report any doubt, fix, or + proposed updates to the author(s) so that the document can be updated. --- linux-3.13.0.orig/Documentation/misc-devices/apm-xgene-qmtm +++ linux-3.13.0/Documentation/misc-devices/apm-xgene-qmtm @@ -0,0 +1,149 @@ +AppliedMicro X-Gene SOC Queue Manager/Traffic Manager Document + +Copyright (c) 2013 Applied Micro Circuits Corporation. +Author: Ravi Patel + +This program is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License version 2 as published by +the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + + +Overview: +QMTM is a device which interacts with CPU, Ethernet, PktDMA and Security +subsystems through AXI BUS. Its centralized resource manager/driver exports +APIs for CPU, Ethernet, PktDMA and Security subsystems to +1. Initialize & allocate queue & pbn. +2. Read queue state so that subsystems driver knows how much more work it can + offload to its subsystem. +3. Apply QoS for subsystems on their queues. + +Layout: +The layout represents run-time flow of messages between Ethernet subsystem, CPU +and QMTM in APM X-Gene SOC. PktDMA and Security subsystems interfaces with QMTM +in the same way as Ethernet. + + + CPU + o-------------------------o + | | + +----|[2] {5}|----+ + | | [1] {4} | | +Register | o------+-----------+------o | Register +Write | CPU WR | | CPU RD | Write +to start v MSG v ^ MSG v to notify +packet | | DDR | | packet +transmit | o--------+-----o-----+--------o | received + | | Q0 M . M M | M M . M Q1 | | + | | for S . S S | S S . S for | | + | | ETH G . G G | G G . G ETH | | + | | TX n . 2 1 | 1 2 . n RX | | + | o----------+---o---+----------o | + | | | | + v v ^ v + | | | | +o--------|-------------|-------|-------------|--------o +| | | | | | Coherent +| v v ^ v | I/O +| | | | | | BUS +o--------|-------------|-------|-------------|--------o + | [3] | | {3} | + v QMTM RD v ^ QMTM WR v + | MSG | | MSG | + o----+----o--------+---o---+--------o----+----o + | Queue 0 | | | Queue 1 | + | Command o ETH PBN 0 o CPU PBN 0 o Command | Queue Manager/ + | Register| | |Register | Traffic Manager + o---------o---+--------o--------+---o---------o + | | + v ^ +Ethernet RD MSG | [4] ETH {2} | Ethernet WR MSG +from its PBN o---+-----------------+---o to CPU PBN + | | + | Egress MAC Ingress MAC | + | [5] {1} | + o------|-----------|------o + v ^ + | | + TX Data RX Data + +Transmit Flow +[1] CPU (Ethernet driver) prepares 32 byte Ethernet egress work message and + enqueues the message to the queue in DDR. +[2] CPU (Ethernet driver) notifies QMTM that there is a message enqueued in + Ethernet transmit queue (e.g. Q0). +[3] QMTM prefetches the Ethernet egress work messages into Ethernet PBN + (e.g. ETH PBN 0). +[4] Ethernet reads work messages from its PBN. +[5] Ethernet DMAs payload from DDR to its egress FIFO and transmits data out. + +Receive Flow +{1} Ethernet receives data and DMAs payload from its ingress FIFO to DDR and + prepares a Ethernet ingress work message. +{2} Ethernet pushes work message to QMTM. +{3} QMTM prefetches the Ethernet ingress work messages into CPU PBN + (e.g. CPU PBN 0) and then interrupts CPU. +{4} CPU (Ethernet driver) dequeues the 32 bytes Ethernet ingress work message + from the queue in DDR. +{5} CPU (Ethernet driver) notifies QMTM that it dequeued a message from + Ethernet receive queue (e.g. Q1). + + +Definition: +1. QMTM (Queue/Traffic Manager) + QMTM manages queues and pbns for CPU, Ethernet, PktDMA and Security + Subsystems. It also performs flow control and QoS on queues. + +2. Slave/Client/Agent + Ethernet, PktDMA (XOR Engine) and Security Engine Subsystems & CPU whose + queues and pbn are managed by QMTM are called slave/client/agent. + +3. Queue + Queue is circular FIFO memory for QMTM hardware in which a 16 bytes, + 32 bytes or 64 bytes message is dequeued OR enqueued between CPU and + Ethernet, PktDMA and Security Engine Subsystems. + +4. PB + Each subsystem in the APM X-Gene SoC device has a prefetch buffer for + storing messages in order to pipeline the QMTM processing latency with the + subsystem processing latency. The number of buffers prefetched depends on + the QM/TM processing latency and the subsystem data rate. + + There are multiple PB for a subsystem, each PB is assigned a number which is + called PBN. Each subsystem and CPU can have max 32 pbns, however QMTM limits + how many pbns it supports for each subsystem. + +5. Message + The subsystems in the APM X-Gene SoC communicate with a central Queue Manager + (QM) that manages all the messages queued to the subsystems. The subsystems + communicate with the QM using messages that include information about the + work to be performed and the location of the buffers or data on which the + work is to be performed. + + A message consists of 16, 32 or 64 bytes which resides in a queue. A message + which is + a. 16 bytes contains information of data buffer, length, etc. allocated by + subsystem driver. + b. 32 or 64 bytes contains information about the work to be done for a + subsystem. Each subsystem defines its own format of work message. + + Each subsystem defines their own format of work message. A message (or queue + descriptor) has attribute fields (QMTM specific) which are common for all + subsystem work messages. The remaining fields of a message are specific to + subsystem. QMTM device doesn't have any knowledge of these subsystems + specific fields and the data operation which subsystem is going to perform + using these fields. + + e.g. + 1. Ethernet work message includes data address & length which is used by + Ethernet DMA engine for copying the data to/from its internal FIFO. + 2. PktDMA work message includes multiple data addresses & lengths for + doing scatter/gather, XOR operations and result data address/es to give + back result to the CPU (driver). + 3. Security work message includes data address & length for doing encryption + or decryption, the type of encryption or decryption and result data + address to give back result to the CPU (driver). --- linux-3.13.0.orig/Documentation/misc-devices/mei/mei-amt-version.c +++ linux-3.13.0/Documentation/misc-devices/mei/mei-amt-version.c @@ -115,8 +115,6 @@ struct mei_client *cl; struct mei_connect_client_data data; - mei_deinit(me); - me->verbose = verbose; me->fd = open("/dev/mei", O_RDWR); --- linux-3.13.0.orig/Documentation/module-signing.txt +++ linux-3.13.0/Documentation/module-signing.txt @@ -53,7 +53,8 @@ If this is off (ie. "permissive"), then modules for which the key is not available and modules that are unsigned are permitted, but the kernel will - be marked as being tainted. + be marked as being tainted, and the concerned modules will be marked as + tainted, shown with the character 'X'. If this is on (ie. "restrictive"), only modules that have a valid signature that can be verified by a public key in the kernel's possession --- linux-3.13.0.orig/Documentation/networking/fman.txt +++ linux-3.13.0/Documentation/networking/fman.txt @@ -0,0 +1,232 @@ +The FMan driver +Sysfs file description + +Author: Andrei Sorin Pistirica +Updated: 2010-12-08 + +FMAN SYSFS statistics counters +------------------------------ +The document contains statistics counters description for every type of +FMAN port and for all FMAN modules as well (FMAN, FMAN:DMA and FMAN:PCD). +Every counter is related to a specific register mapped as well. + +Port type: Tx +------------- +port_frame: FMBM_TFRC + "counts the total number of frames flowing on the Tx port, regardless of + whether they got transmitted or discarded." + +port_discard_frame: FMBM_TFDC + "counts the number of frames that were discarded due to DMA error indication + that was sensed during the process of frame payload or frame context + loading." + +port_dealloc_buf:FMBM_TBDC + "counts the number of buffer deallocate operations." + +port_enq_total:FMQM_PnETFC + "counts the number of enqueue operations preformed for this portID." + +port_length_err:FMBM_TFLEDC + "counts the number of frames that were discarded due to frame length error." + +port_unsupprted_format:FMBM_TFUFDC + "counts the number of frames that were discarded due to frame format + error - frame descriptor contains unsupported format." + +port_deq_total:FMQM_PnDTFC + "counts the total number of FDs that dequeued from the QMan for this + portID." + +port_deq_from_default:FMQM_PnDDCFQC + "counts the number of time the portID used the default confirmation FQID + from the BMI.s FMBM_TDCFQID" + +port_deq_confirm:FMQM_PnDCC + "counts the number of times the PortID got a FD from the dequeue response + that require confirmation." + +Port type: Rx +------------- +port_frame:FMBM_RFRC + "counts the total number of frames received on the Rx port." + +port_discard_frame:FMBM_RFDC + "counts the number of frames received on the Rx port that were not able + to enter the receive queue system due to WRED algorithm." + +port_dealloc_buf:FMBM_RBDC + "counts the number of buffer deallocate operations." + +port_enq_total:FMQM_PnETFC + "counts the number of enqueue operations preformed for this portID" + +port_rx_bad_frame:FMBM_RBFC + "counts the number of frames received on the Rx port with an error + indication" + +port_rx_large_frame:FMBM_RLFC + "counts the number of frames received on the Rx port with an over size + indication. Over size indication is marked when frame size exceeds the + maximum configured in the corresponding MAC configuration register" + +port_rx_out_of_buffers_discard:FMBM_RODC + "counts the number of frames received on the Rx port that were not able + to enter the receive queue system due to lack of external buffers, or + the lack of suitable buffers that cause the S/G list to grow beyond 16 + entries, or the lack of suitable buffer to hold S/G list (including + start margin), or the lack of suitable buffer to hold the frame header + (including start margin)." + +port_rx_filter_frame:FMBM_RFFC + "counts the number of frames received on the Rx port that were filtered + out by the parse and classify modules of the Fman" + +Port type: Oh +------------- +Oh ports have the same counters as RX and TX ports. The counters are listed +below and the description can be found at the other ports (above): + port_frame + port_discard_frame + port_dealloc_buf + port_enq_total + port_length_err + port_unsupprted_format + port_deq_total + port_deq_from_default + port_deq_confirm + port_rx_bad_frame + port_rx_large_frame + port_rx_out_of_buffers_discard + +Fman: +----- +enq_total_frame:FMQM_ETFC + "counts the total number of enqueue operations the QMI performed." + +deq_total_frame:FMQM_DTFC + "counts the total number of FDs that dequeued from the Qman." + +deq_0:FMQM_DC0 + "counts the number of times the QMI received a NULL FD from the QMan as a + response to a dequeue request (command)" + +deq_1:FMQM_DC1 + "counts the number of times the QMI got 1 FD from the QMan as a response + to a dequeue request (command)" + +deq_2:FMQM_DC2 + "counts the number of times the QMI got 2 FD from the QMan as a response + to a dequeue request (command)" + +deq_from_default:FMQM_DDCFQC + "counts the number of times the QMI used the default confirmation FQID + from the BMI's FMBM_TCFQID" + +deq_from_context:FMQM_CBCFQC + "counts the number of times the QMI used the override confirmation FQID + from the FD command field" + +deq_from_fd:FMQM_DFOCFQC + "counts the number of times the QMI used the override confirmation FQID + from the FD command field" + +deq_confirm:(FMQM_DCC + "counts the number of times the QMI gets an FD from the dequeue response + that requires confirmation" + +Fman: DMA +--------- +The FMAN:DMA counters are read from FMDMSR register. They reports bus error +events that are recognized by the FMan DMA controller on all of the +FMan DMA channels. + The counters are: + cmq_not_empty + bus_error + read_buf_ecc_error + write_buf_ecc_sys_error + write_buf_ecc_fm_error + +Fman: PCD +--------- +pcd_kg_total:FMKG_TPC + "count of packets passed in the keygen on all schemes" + +pcd_plcr_yellow:FMPL_YPC + "counts the total number of YELLOW packets that exit the Policer" + +pcd_plcr_red:FMPL_RPC + "counter counts the total number of RED packets that exit the Policer" + +pcd_plcr_recolored_to_red:FMPL_RRPC + "counts the number of packets that changed color to RED by the Policer" + +pcd_plcr_recolored_to_yellow:FMPL_RYPC + "counts the number of packets that changed color to YELLOW by the Policer" + +pcd_plcr_total:FMPL_TPC + "counts the total number of packets passed in the Policer" + +pcd_plcr_length_mismatch:FMPL_FLMC + "counts the number of packets with length mismatch indicated by an offset + value of 0xFF in the selected parser result entry or when the calculated + frame offset result is greater than the packet full length provided by + FD length" + +pcd_prs_parse_dispatch:FMPR_PDS + "counts the number of times the parser block was dispatched by FPM" + +pcd_prs_l2_parse_result_returned:FMPR_L2RRS + "counts the number of times L2 parse result was returned (including with + errors)" + +pcd_prs_l3_parse_result_returned:FMPR_L3RRS + "counts the number of times L3 parse result was returned (including with + errors) + +pcd_prs_l4_parse_result_returned:FMPR_L4RRS + "counts the number of times L4 parse result was returned (including with + errors)" + +pcd_prs_shim_parse_result_returned:FMPR_SRRS + "counts the number of times Shim parse result was returned (including with + errors)" + +pcd_prs_l2_parse_result_returned_with_err:FMPR_L2RRES + "counts the number of times L2 parse result was returned with errors" + +pcd_prs_l3_parse_result_returned_with_err:FMPR_L3RRES + "counts the number of times L3 parse result was returned with errors" + +pcd_prs_l4_parse_result_returned_with_err:FMPR_L4RRES + "counts the number of times L4 parse result was returned with errors" + +pcd_prs_shim_parse_result_returned_with_err:FMPR_SRRES + "counts the number of times Shim parse result was returned with errors" + +pcd_prs_soft_prs_cycles:FMPR_SPCS + "counts the number of cycles spent executing soft parser instruction + (including stall cycles)" + +pcd_prs_soft_prs_stall_cycles:FMPR_SPSCS + "counts the number of cycles stalled waiting for parser internal memory + reads while executing soft parser instruction." + +pcd_prs_hard_prs_cycle_incl_stall_cycles:FMPR_HXSCS + "counts the number of cycles spent executing hard parser (including stall + cycles)" + +pcd_prs_muram_read_cycles:FMPR_MRCS + "counts the number of cycles while performing FMan Memory read" + +pcd_prs_muram_read_stall_cycles:FMPR_MRSCS + "counts the number of cycles stalled while performing FMan Memory read" + +pcd_prs_muram_write_cycles:FMPR_MWCS + "counts the number of cycles while performing FMan Memory write" + +pcd_prs_muram_write_stall_cycles:FMPR_MWSCS + "counts the number of cycles stalled while performing FMan Memory write" + +pcd_prs_fpm_command_stall_cycles:FMPR_FCSCS + "counts the number of cycles stalled while performing a FPM command" --- linux-3.13.0.orig/Documentation/networking/i40evf.txt +++ linux-3.13.0/Documentation/networking/i40evf.txt @@ -0,0 +1,47 @@ +Linux* Base Driver for Intel(R) Network Connection +================================================== + +Intel XL710 X710 Virtual Function Linux driver. +Copyright(c) 2013 Intel Corporation. + +Contents +======== + +- Identifying Your Adapter +- Known Issues/Troubleshooting +- Support + +This file describes the i40evf Linux* Base Driver for the Intel(R) XL710 +X710 Virtual Function. + +The i40evf driver supports XL710 and X710 virtual function devices that +can only be activated on kernels with CONFIG_PCI_IOV enabled. + +The guest OS loading the i40evf driver must support MSI-X interrupts. + +Identifying Your Adapter +======================== + +For more information on how to identify your adapter, go to the Adapter & +Driver ID Guide at: + + http://support.intel.com/support/go/network/adapter/idguide.htm + +Known Issues/Troubleshooting +============================ + + +Support +======= + +For general information, go to the Intel support website at: + + http://support.intel.com + +or the Intel Wired Networking project hosted by Sourceforge at: + + http://sourceforge.net/projects/e1000 + +If an issue is identified with the released source code on the supported +kernel with a supported adapter, email the specific information related +to the issue to e1000-devel@lists.sf.net --- linux-3.13.0.orig/Documentation/networking/ip-sysctl.txt +++ linux-3.13.0/Documentation/networking/ip-sysctl.txt @@ -542,12 +542,6 @@ not receive a window scaling option from them. Default: 0 -tcp_dma_copybreak - INTEGER - Lower limit, in bytes, of the size of socket reads that will be - offloaded to a DMA copy engine, if one is present in the system - and CONFIG_NET_DMA is enabled. - Default: 4096 - tcp_thin_linear_timeouts - BOOLEAN Enable dynamic triggering of linear timeouts for thin streams. If set, a check is performed upon retransmission by timeout to --- linux-3.13.0.orig/Documentation/networking/timestamping.txt +++ linux-3.13.0/Documentation/networking/timestamping.txt @@ -85,7 +85,7 @@ by the network device and will be empty without that support. -SIOCSHWTSTAMP: +SIOCSHWTSTAMP, SIOCGHWTSTAMP: Hardware time stamping must also be initialized for each device driver that is expected to do hardware time stamping. The parameter is defined in @@ -115,6 +115,10 @@ space is responsible to ensure that multiple processes don't interfere with each other and that the settings are reset. +Any process can read the actual configuration by passing this +structure to ioctl(SIOCGHWTSTAMP) in the same way. However, this has +not been implemented in all drivers. + /* possible values for hwtstamp_config->tx_type */ enum { /* @@ -157,7 +161,8 @@ A driver which supports hardware time stamping must support the SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with -the actual values as described in the section on SIOCSHWTSTAMP. +the actual values as described in the section on SIOCSHWTSTAMP. It +should also support SIOCGHWTSTAMP. Time stamps for received packets must be stored in the skb. To get a pointer to the shared time stamp structure of the skb call skb_hwtstamps(). Then --- linux-3.13.0.orig/Documentation/oops-tracing.txt +++ linux-3.13.0/Documentation/oops-tracing.txt @@ -265,6 +265,9 @@ 13: 'O' if an externally-built ("out-of-tree") module has been loaded. + 14: 'X' if an unsigned module has been loaded in a kernel supporting + module signature. + The primary reason for the 'Tainted: ' string is to tell kernel debuggers if this is a clean kernel or if anything unusual has occurred. Tainting is permanent: even if an offending module is --- linux-3.13.0.orig/Documentation/ramoops.txt +++ linux-3.13.0/Documentation/ramoops.txt @@ -14,11 +14,19 @@ 1. Ramoops concepts -Ramoops uses a predefined memory area to store the dump. The start and size of -the memory area are set using two variables: +Ramoops uses a predefined memory area to store the dump. The start and size +and type of the memory area are set using three variables: * "mem_address" for the start * "mem_size" for the size. The memory size will be rounded down to a power of two. + * "mem_type" to specifiy if the memory type (default is pgprot_writecombine). + +Typically the default value of mem_type=0 should be used as that sets the pstore +mapping to pgprot_writecombine. Setting mem_type=1 attempts to use +pgprot_noncached, which only works on some platforms. This is because pstore +depends on atomic operations. At least on ARM, pgprot_noncached causes the +memory to be mapped strongly ordered, and atomic operations on strongly ordered +memory are implementation defined, and won't work on many ARMs such as omaps. The memory area is divided into "record_size" chunks (also rounded down to power of two) and each oops/panic writes a "record_size" chunk of @@ -55,6 +63,7 @@ static struct ramoops_platform_data ramoops_data = { .mem_size = <...>, .mem_address = <...>, + .mem_type = <...>, .record_size = <...>, .dump_oops = <...>, .ecc = <...>, --- linux-3.13.0.orig/Documentation/sound/alsa/ALSA-Configuration.txt +++ linux-3.13.0/Documentation/sound/alsa/ALSA-Configuration.txt @@ -2026,8 +2026,8 @@ ------------------- Module for sound cards based on the Asus AV66/AV100/AV200 chips, - i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX, - HDAV1.3 (Deluxe), and HDAV1.3 Slim. + i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe), + Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim. This module supports autoprobe and multiple cards. --- linux-3.13.0.orig/Documentation/stable_kernel_rules.txt +++ linux-3.13.0/Documentation/stable_kernel_rules.txt @@ -29,6 +29,9 @@ Procedure for submitting patches to the -stable tree: + - If the patch covers files in net/ or drivers/net please follow netdev stable + submission guidelines as described in + Documentation/networking/netdev-FAQ.txt - Send the patch, after verifying that it follows the above rules, to stable@vger.kernel.org. You must note the upstream commit ID in the changelog of your submission, as well as the kernel version you wish --- linux-3.13.0.orig/Documentation/sysctl/kernel.txt +++ linux-3.13.0/Documentation/sysctl/kernel.txt @@ -33,6 +33,7 @@ - domainname - hostname - hotplug +- kexec_load_disabled - kptr_restrict - kstack_depth_to_print [ X86 only ] - l2cr [ PPC only ] @@ -287,6 +288,18 @@ ============================================================== +kexec_load_disabled: + +A toggle indicating if the kexec_load syscall has been disabled. This +value defaults to 0 (false: kexec_load enabled), but can be set to 1 +(true: kexec_load disabled). Once true, kexec can no longer be used, and +the toggle cannot be set back to false. This allows a kexec image to be +loaded before disabling the syscall, allowing a system to set up (and +later use) an image without it being altered. Generally used together +with the "modules_disabled" sysctl. + +============================================================== + kptr_restrict: This toggle indicates whether restrictions are placed on @@ -331,7 +344,7 @@ in an otherwise modular kernel. This toggle defaults to off (0), but can be set true (1). Once true, modules can be neither loaded nor unloaded, and the toggle cannot be set back -to false. +to false. Generally used with the "kexec_load_disabled" toggle. ============================================================== @@ -742,6 +755,8 @@ 1024 - A module from drivers/staging was loaded. 2048 - The system is working around a severe firmware bug. 4096 - An out-of-tree module has been loaded. +8192 - An unsigned module has been loaded in a kernel supporting module + signature. ============================================================== --- linux-3.13.0.orig/Documentation/sysctl/vm.txt +++ linux-3.13.0/Documentation/sysctl/vm.txt @@ -669,7 +669,8 @@ set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8) The initial value is zero. Kernel does not use this value at boot time to set -the high water marks for each per cpu page list. +the high water marks for each per cpu page list. If the user writes '0' to this +sysctl, it will revert to this default behavior. ============================================================== --- linux-3.13.0.orig/Documentation/video4linux/gspca.txt +++ linux-3.13.0/Documentation/video4linux/gspca.txt @@ -55,6 +55,7 @@ sonixj 0458:7025 Genius Eye 311Q sn9c20x 0458:7029 Genius Look 320s sonixj 0458:702e Genius Slim 310 NB +sn9c20x 0458:7045 Genius Look 1320 V2 sn9c20x 0458:704a Genius Slim 1320 sn9c20x 0458:704c Genius i-Look 1321 sn9c20x 045e:00f4 LifeCam VX-6000 (SN9C20x + OV9650) --- linux-3.13.0.orig/Documentation/virtual/kvm/api.txt +++ linux-3.13.0/Documentation/virtual/kvm/api.txt @@ -2327,7 +2327,7 @@ Capability: basic Architectures: arm, arm64 Type: vcpu ioctl -Parameters: struct struct kvm_vcpu_init (in) +Parameters: struct kvm_vcpu_init (in) Returns: 0 on success; -1 on error Errors:  EINVAL:    the target is unknown, or the combination of features is invalid. @@ -2346,6 +2346,8 @@ Depends on KVM_CAP_ARM_PSCI. - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). + - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. + Depends on KVM_CAP_ARM_PSCI_0_2. 4.83 KVM_ARM_PREFERRED_TARGET @@ -2699,6 +2701,21 @@ external interrupt has just been delivered into the guest. User space should put the acknowledged interrupt vector into the 'epr' field. + /* KVM_EXIT_SYSTEM_EVENT */ + struct { +#define KVM_SYSTEM_EVENT_SHUTDOWN 1 +#define KVM_SYSTEM_EVENT_RESET 2 + __u32 type; + __u64 flags; + } system_event; + +If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered +a system-level event using some architecture specific mechanism (hypercall +or some special instruction). In case of ARM/ARM64, this is triggered using +HVC instruction based PSCI call from the vcpu. The 'type' field describes +the system-level event type. The 'flags' field describes architecture +specific flags for the system-level event. + /* Fix the size of the union. */ char padding[256]; }; --- linux-3.13.0.orig/Documentation/virtual/kvm/mmu.txt +++ linux-3.13.0/Documentation/virtual/kvm/mmu.txt @@ -425,6 +425,20 @@ Since only 19 bits are used to store generation-number on mmio spte, all pages are zapped when there is an overflow. +Unfortunately, a single memory access might access kvm_memslots(kvm) multiple +times, the last one happening when the generation number is retrieved and +stored into the MMIO spte. Thus, the MMIO spte might be created based on +out-of-date information, but with an up-to-date generation number. + +To avoid this, the generation number is incremented again after synchronize_srcu +returns; thus, the low bit of kvm_memslots(kvm)->generation is only 1 during a +memslot update, while some SRCU readers might be using the old copy. We do not +want to use an MMIO sptes created with an odd generation number, and we can do +this without losing a bit in the MMIO spte. The low bit of the generation +is not stored in MMIO spte, and presumed zero when it is extracted out of the +spte. If KVM is unlucky and creates an MMIO spte while the low bit is 1, +the next access to the spte will always be a cache miss. + Further reading =============== --- linux-3.13.0.orig/Documentation/vm/hwpoison.txt +++ linux-3.13.0/Documentation/vm/hwpoison.txt @@ -84,6 +84,11 @@ PR_MCE_KILL_EARLY: Early kill PR_MCE_KILL_LATE: Late kill PR_MCE_KILL_DEFAULT: Use system global default + Note that if you want to have a dedicated thread which handles + the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should + call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise, + the SIGBUS is sent to the main thread. + PR_MCE_KILL_GET return current mode --- linux-3.13.0.orig/Documentation/x86/x86_64/mm.txt +++ linux-3.13.0/Documentation/x86/x86_64/mm.txt @@ -12,6 +12,8 @@ ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... +ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks +... unused hole ... ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls @@ -28,4 +30,11 @@ Current X86-64 implementations only support 40 bits of address space, but we support up to 46 bits. This expands into MBZ space in the page tables. +->trampoline_pgd: + +We map EFI runtime services in the aforementioned PGD in the virtual +range of 64Gb (arbitrarily set, can be raised if needed) + +0xffffffef00000000 - 0xffffffff00000000 + -Andi Kleen, Jul 2004 --- linux-3.13.0.orig/MAINTAINERS +++ linux-3.13.0/MAINTAINERS @@ -665,6 +665,13 @@ F: drivers/net/appletalk/ F: net/appletalk/ +APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER +M: Iyappan Subramanian +M: Keyur Chudgar +S: Supported +F: drivers/net/ethernet/apm/xgene/ +F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt + APTINA CAMERA SENSOR PLL M: Laurent Pinchart L: linux-media@vger.kernel.org @@ -2158,6 +2165,11 @@ S: Supported F: drivers/net/ethernet/cisco/enic/ +CISCO VIC LOW LATENCY NIC DRIVER +M: Upinder Malhi +S: Supported +F: drivers/infiniband/hw/usnic + CIRRUS LOGIC EP93XX ETHERNET DRIVER M: Hartley Sweeten L: netdev@vger.kernel.org @@ -4474,7 +4486,7 @@ S: Maintained F: drivers/char/hw_random/ixp4xx-rng.c -INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e) +INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf) M: Jeff Kirsher M: Jesse Brandeburg M: Bruce Allan @@ -4483,6 +4495,7 @@ M: Greg Rose M: Alex Duyck M: John Ronciak +M: Mitch Williams L: e1000-devel@lists.sourceforge.net W: http://www.intel.com/support/feedback.htm W: http://e1000.sourceforge.net/ @@ -4498,6 +4511,7 @@ F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt F: Documentation/networking/i40e.txt +F: Documentation/networking/i40evf.txt F: drivers/net/ethernet/intel/ INTEL-MID GPIO DRIVER @@ -6345,6 +6359,13 @@ F: include/scsi/osd_* F: fs/exofs/ +OVERLAYFS FILESYSTEM +M: Miklos Szeredi +L: linux-fsdevel@vger.kernel.org +S: Supported +F: fs/overlayfs/* +F: Documentation/filesystems/overlayfs.txt + P54 WIRELESS DRIVER M: Christian Lamparter L: linux-wireless@vger.kernel.org @@ -6533,6 +6554,13 @@ S: Maintained F: drivers/pci/host/*designware* +PCI DRIVER FOR APPLIEDMICRO XGENE +M: Tanmay Inamdar +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: drivers/pci/host/pci-xgene.c + PCMCIA SUBSYSTEM P: Linux PCMCIA Team L: linux-pcmcia@lists.infradead.org --- linux-3.13.0.orig/Makefile +++ linux-3.13.0/Makefile @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 13 -SUBLEVEL = 0 -EXTRAVERSION = -NAME = One Giant Leap for Frogkind +SUBLEVEL = 11 +EXTRAVERSION = -ckt20 +NAME = King of Alienated Frog Porn # *DOCUMENTATION* # To see a list of typical targets execute "make help" @@ -352,6 +352,12 @@ AFLAGS_KERNEL = CFLAGS_GCOV = -fprofile-arcs -ftest-coverage +# Prefer linux-backports-modules +ifneq ($(KBUILD_SRC),) +ifneq ($(shell if test -e $(KBUILD_OUTPUT)/ubuntu-build; then echo yes; fi),yes) +UBUNTUINCLUDE := -I/usr/src/linux-headers-lbm-$(KERNELRELEASE) +endif +endif # Use USERINCLUDE when you must reference the UAPI directories only. USERINCLUDE := \ @@ -364,12 +370,16 @@ # Use LINUXINCLUDE when you must reference the include/ directory. # Needed to be compatible with the O= option LINUXINCLUDE := \ + $(UBUNTUINCLUDE) \ -I$(srctree)/arch/$(hdr-arch)/include \ -Iarch/$(hdr-arch)/include/generated \ $(if $(KBUILD_SRC), -I$(srctree)/include) \ -Iinclude \ $(USERINCLUDE) +# UBUNTU: Include our third party driver stuff too +LINUXINCLUDE += -Iubuntu/include $(if $(KBUILD_SRC),-I$(srctree)/ubuntu/include) + KBUILD_CPPFLAGS := -D__KERNEL__ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ @@ -522,7 +532,7 @@ # Objects we will link into vmlinux / subdirs we need to visit init-y := init/ -drivers-y := drivers/ sound/ firmware/ +drivers-y := drivers/ sound/ firmware/ ubuntu/ net-y := net/ libs-y := lib/ core-y := usr/ @@ -617,6 +627,8 @@ endif endif +KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments) + ifdef CONFIG_DEBUG_INFO KBUILD_CFLAGS += -g KBUILD_AFLAGS += -gdwarf-2 @@ -940,6 +952,7 @@ $(error Headers not exportable for the $(SRCARCH) architecture)) $(Q)$(MAKE) $(hdr-inst)=include/uapi $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/asm $(hdr-dst) + $(Q)$(MAKE) $(hdr-inst)=ubuntu/include dst=include oldheaders= PHONY += headers_check_all headers_check_all: headers_install_all @@ -949,6 +962,7 @@ headers_check: headers_install $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1 $(Q)$(MAKE) $(hdr-inst)=arch/$(hdr-arch)/include/uapi/asm $(hdr-dst) HDRCHECK=1 + $(Q)$(MAKE) $(hdr-inst)=ubuntu/include dst=include oldheaders= HDRCHECK=1 # --------------------------------------------------------------------------- # Modules --- linux-3.13.0.orig/arch/Kconfig +++ linux-3.13.0/arch/Kconfig @@ -325,6 +325,7 @@ - secure_computing is called from a ptrace_event()-safe context - secure_computing return value is checked and a return value of -1 results in the system call being skipped immediately. + - seccomp syscall wired up config SECCOMP_FILTER def_bool y --- linux-3.13.0.orig/arch/alpha/lib/csum_partial_copy.c +++ linux-3.13.0/arch/alpha/lib/csum_partial_copy.c @@ -378,6 +378,11 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { - return csum_partial_copy_from_user((__force const void __user *)src, - dst, len, sum, NULL); + __wsum checksum; + mm_segment_t oldfs = get_fs(); + set_fs(KERNEL_DS); + checksum = csum_partial_copy_from_user((__force const void __user *)src, + dst, len, sum, NULL); + set_fs(oldfs); + return checksum; } --- linux-3.13.0.orig/arch/alpha/mm/fault.c +++ linux-3.13.0/arch/alpha/mm/fault.c @@ -156,6 +156,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/arc/boot/dts/nsimosci.dts +++ linux-3.13.0/arch/arc/boot/dts/nsimosci.dts @@ -11,13 +11,16 @@ / { compatible = "snps,nsimosci"; - clock-frequency = <80000000>; /* 80 MHZ */ + clock-frequency = <20000000>; /* 20 MHZ */ #address-cells = <1>; #size-cells = <1>; interrupt-parent = <&intc>; chosen { - bootargs = "console=tty0 consoleblank=0"; + /* this is for console on PGU */ + /* bootargs = "console=tty0 consoleblank=0"; */ + /* this is for console on serial */ + bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug"; }; aliases { @@ -43,33 +46,32 @@ #interrupt-cells = <1>; }; - uart0: serial@c0000000 { - compatible = "snps,dw-apb-uart"; - reg = <0xc0000000 0x2000>; + uart0: serial@f0000000 { + compatible = "ns8250"; + reg = <0xf0000000 0x2000>; interrupts = <11>; - #clock-frequency = <80000000>; clock-frequency = <3686400>; baud = <115200>; reg-shift = <2>; reg-io-width = <4>; - status = "okay"; + no-loopback-test = <1>; }; - pgu0: pgu@c9000000 { + pgu0: pgu@f9000000 { compatible = "snps,arcpgufb"; - reg = <0xc9000000 0x400>; + reg = <0xf9000000 0x400>; }; - ps2: ps2@c9001000 { + ps2: ps2@f9001000 { compatible = "snps,arc_ps2"; - reg = <0xc9000400 0x14>; + reg = <0xf9000400 0x14>; interrupts = <13>; interrupt-names = "arc_ps2_irq"; }; - eth0: ethernet@c0003000 { + eth0: ethernet@f0003000 { compatible = "snps,oscilan"; - reg = <0xc0003000 0x44>; + reg = <0xf0003000 0x44>; interrupts = <7>, <8>; interrupt-names = "rx", "tx"; }; --- linux-3.13.0.orig/arch/arc/configs/nsimosci_defconfig +++ linux-3.13.0/arch/arc/configs/nsimosci_defconfig @@ -54,6 +54,7 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_ARC=y CONFIG_SERIAL_ARC_CONSOLE=y # CONFIG_HW_RANDOM is not set --- linux-3.13.0.orig/arch/arc/include/asm/kgdb.h +++ linux-3.13.0/arch/arc/include/asm/kgdb.h @@ -19,7 +19,7 @@ * register API yet */ #undef DBG_MAX_REG_NUM -#define GDB_MAX_REGS 39 +#define GDB_MAX_REGS 87 #define BREAK_INSTR_SIZE 2 #define CACHE_FLUSH_IS_SAFE 1 @@ -33,23 +33,27 @@ extern void kgdb_trap(struct pt_regs *regs); -enum arc700_linux_regnums { +/* This is the numbering of registers according to the GDB. See GDB's + * arc-tdep.h for details. + * + * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */ +enum arc_linux_regnums { _R0 = 0, _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13, _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24, _R25, _R26, - _BTA = 27, - _LP_START = 28, - _LP_END = 29, - _LP_COUNT = 30, - _STATUS32 = 31, - _BLINK = 32, - _FP = 33, - __SP = 34, - _EFA = 35, - _RET = 36, - _ORIG_R8 = 37, - _STOP_PC = 38 + _FP = 27, + __SP = 28, + _R30 = 30, + _BLINK = 31, + _LP_COUNT = 60, + _STOP_PC = 64, + _RET = 64, + _LP_START = 65, + _LP_END = 66, + _STATUS32 = 67, + _ECR = 76, + _BTA = 82, }; #else --- linux-3.13.0.orig/arch/arc/include/asm/pgtable.h +++ linux-3.13.0/arch/arc/include/asm/pgtable.h @@ -259,7 +259,8 @@ #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) #define pte_page(x) (mem_map + \ - (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT))) + (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \ + PAGE_SHIFT))) #define mk_pte(page, pgprot) \ ({ \ --- linux-3.13.0.orig/arch/arc/include/asm/processor.h +++ linux-3.13.0/arch/arc/include/asm/processor.h @@ -69,18 +69,19 @@ #define release_segments(mm) do { } while (0) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) /* * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. * Look in process.c for details of kernel stack layout */ -#define KSTK_ESP(tsk) (tsk->thread.ksp) +#define TSK_K_ESP(tsk) (tsk->thread.ksp) -#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ +#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \ sizeof(struct callee_regs) + off))) -#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) -#define KSTK_FP(tsk) KSTK_REG(tsk, 0) +#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) +#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) /* * Do necessary setup to start up a newly executed thread. --- linux-3.13.0.orig/arch/arc/kernel/entry.S +++ linux-3.13.0/arch/arc/kernel/entry.S @@ -614,11 +614,13 @@ resume_kernel_mode: -#ifdef CONFIG_PREEMPT - - ; This is a must for preempt_schedule_irq() + ; Disable Interrupts from this point on + ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq() + ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe IRQ_DISABLE r9 +#ifdef CONFIG_PREEMPT + ; Can't preempt if preemption disabled GET_CURR_THR_INFO_FROM_SP r10 ld r8, [r10, THREAD_INFO_PREEMPT_COUNT] --- linux-3.13.0.orig/arch/arc/kernel/signal.c +++ linux-3.13.0/arch/arc/kernel/signal.c @@ -67,7 +67,7 @@ sigset_t *set) { int err; - err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, + err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs, sizeof(sf->uc.uc_mcontext.regs.scratch)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); @@ -83,7 +83,7 @@ if (!err) set_current_blocked(&set); - err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), + err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), sizeof(sf->uc.uc_mcontext.regs.scratch)); return err; --- linux-3.13.0.orig/arch/arc/kernel/stacktrace.c +++ linux-3.13.0/arch/arc/kernel/stacktrace.c @@ -64,9 +64,9 @@ frame_info->task = tsk; - frame_info->regs.r27 = KSTK_FP(tsk); - frame_info->regs.r28 = KSTK_ESP(tsk); - frame_info->regs.r31 = KSTK_BLINK(tsk); + frame_info->regs.r27 = TSK_K_FP(tsk); + frame_info->regs.r28 = TSK_K_ESP(tsk); + frame_info->regs.r31 = TSK_K_BLINK(tsk); frame_info->regs.r63 = (unsigned int)__switch_to; /* In the prologue of __switch_to, first FP is saved on stack --- linux-3.13.0.orig/arch/arc/mm/cache_arc700.c +++ linux-3.13.0/arch/arc/mm/cache_arc700.c @@ -282,7 +282,7 @@ #else /* if V-P const for loop, PTAG can be written once outside loop */ if (full_page_op) - write_aux_reg(ARC_REG_DC_PTAG, paddr); + write_aux_reg(aux_tag, paddr); #endif while (num_lines-- > 0) { @@ -296,7 +296,7 @@ write_aux_reg(aux_cmd, vaddr); vaddr += L1_CACHE_BYTES; #else - write_aux_reg(aux, paddr); + write_aux_reg(aux_cmd, paddr); paddr += L1_CACHE_BYTES; #endif } --- linux-3.13.0.orig/arch/arc/mm/fault.c +++ linux-3.13.0/arch/arc/mm/fault.c @@ -162,6 +162,8 @@ /* TBD: switch to pagefault_out_of_memory() */ if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; --- linux-3.13.0.orig/arch/arm/Kconfig +++ linux-3.13.0/arch/arm/Kconfig @@ -6,6 +6,7 @@ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT if MMU @@ -871,7 +872,7 @@ bool "ARMv5 based platforms (ARM926T, XSCALE, PJ1, ...)" depends on !ARCH_MULTI_V6_V7 select ARCH_MULTI_V4_V5 - select CPU_ARM926T if (!CPU_ARM946E || CPU_ARM1020 || \ + select CPU_ARM926T if !(CPU_ARM946E || CPU_ARM1020 || \ CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \ CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_FEROCEON) @@ -1543,6 +1544,7 @@ choice prompt "Memory split" + depends on MMU default VMSPLIT_3G help Select the desired split between kernel and user memory. @@ -1560,6 +1562,7 @@ config PAGE_OFFSET hex + default PHYS_OFFSET if !MMU default 0x40000000 if VMSPLIT_1G default 0x80000000 if VMSPLIT_2G default 0xC0000000 @@ -2261,6 +2264,8 @@ source "drivers/Kconfig" +source "ubuntu/Kconfig" + source "fs/Kconfig" source "arch/arm/Kconfig.debug" --- linux-3.13.0.orig/arch/arm/Kconfig.debug +++ linux-3.13.0/arch/arm/Kconfig.debug @@ -1033,7 +1033,7 @@ default 0xf1600000 if ARCH_INTEGRATOR default 0xf1c28000 if DEBUG_SUNXI_UART0 default 0xf1c28400 if DEBUG_SUNXI_UART1 - default 0xf2100000 if DEBUG_PXA_UART1 + default 0xf6200000 if DEBUG_PXA_UART1 default 0xf4090000 if ARCH_LPC32XX default 0xf4200000 if ARCH_GEMINI default 0xf8009000 if DEBUG_VEXPRESS_UART0_CA9 --- linux-3.13.0.orig/arch/arm/boot/compressed/Makefile +++ linux-3.13.0/arch/arm/boot/compressed/Makefile @@ -3,6 +3,7 @@ # # create a compressed vmlinuz image from the original vmlinux # +KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) -fno-stack-protector OBJS = --- linux-3.13.0.orig/arch/arm/boot/compressed/head.S +++ linux-3.13.0/arch/arm/boot/compressed/head.S @@ -400,8 +400,7 @@ add sp, sp, r6 #endif - tst r4, #1 - bleq cache_clean_flush + bl cache_clean_flush adr r0, BSYM(restart) add r0, r0, r6 @@ -1050,6 +1049,8 @@ b call_cache_fn __armv4_mpu_cache_flush: + tst r4, #1 + movne pc, lr mov r2, #1 mov r3, #0 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache @@ -1067,6 +1068,8 @@ mov pc, lr __fa526_cache_flush: + tst r4, #1 + movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache mcr p15, 0, r1, c7, c5, 0 @ flush I cache @@ -1075,13 +1078,16 @@ __armv6_mmu_cache_flush: mov r1, #0 - mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D + tst r4, #1 + mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB - mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified + mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified mcr p15, 0, r1, c7, c10, 4 @ drain WB mov pc, lr __armv7_mmu_cache_flush: + tst r4, #1 + bne iflush mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) mov r10, #0 @@ -1142,6 +1148,8 @@ mov pc, lr __armv5tej_mmu_cache_flush: + tst r4, #1 + movne pc, lr 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache bne 1b mcr p15, 0, r0, c7, c5, 0 @ flush I cache @@ -1149,6 +1157,8 @@ mov pc, lr __armv4_mmu_cache_flush: + tst r4, #1 + movne pc, lr mov r2, #64*1024 @ default: 32K dcache size (*2) mov r11, #32 @ default: 32 byte line size mrc p15, 0, r3, c0, c0, 1 @ read cache type @@ -1182,6 +1192,8 @@ __armv3_mmu_cache_flush: __armv3_mpu_cache_flush: + tst r4, #1 + movne pc, lr mov r1, #0 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 mov pc, lr --- linux-3.13.0.orig/arch/arm/boot/dts/am33xx.dtsi +++ linux-3.13.0/arch/arm/boot/dts/am33xx.dtsi @@ -102,6 +102,11 @@ ranges; ti,hwmods = "l3_main"; + cm: syscon@44e10000 { + compatible = "ti,am33xx-controlmodule", "syscon"; + reg = <0x44e10000 0x800>; + }; + intc: interrupt-controller@48200000 { compatible = "ti,omap2-intc"; interrupt-controller; @@ -114,7 +119,7 @@ compatible = "ti,edma3"; ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; reg = <0x49000000 0x10000>, - <0x44e10f90 0x10>; + <0x44e10f90 0x40>; interrupts = <12 13 14>; #dma-cells = <1>; dma-channels = <64>; @@ -422,7 +427,7 @@ ti,hwmods = "usb_otg_hs"; status = "disabled"; - usb_ctrl_mod: control@44e10000 { + usb_ctrl_mod: control@44e10620 { compatible = "ti,am335x-usb-ctrl-module"; reg = <0x44e10620 0x10 0x44e10648 0x4>; @@ -525,7 +530,7 @@ "tx14", "tx15"; }; - cppi41dma: dma-controller@07402000 { + cppi41dma: dma-controller@47402000 { compatible = "ti,am3359-cppi41"; reg = <0x47400000 0x1000 0x47402000 0x1000 @@ -651,6 +656,7 @@ */ interrupts = <40 41 42 43>; ranges; + syscon = <&cm>; davinci_mdio: mdio@4a101000 { compatible = "ti,davinci_mdio"; --- linux-3.13.0.orig/arch/arm/boot/dts/armada-370-xp.dtsi +++ linux-3.13.0/arch/arm/boot/dts/armada-370-xp.dtsi @@ -152,7 +152,7 @@ }; sata@a0000 { - compatible = "marvell,orion-sata"; + compatible = "marvell,armada-370-sata"; reg = <0xa0000 0x5000>; interrupts = <55>; clocks = <&gateclk 15>, <&gateclk 30>; --- linux-3.13.0.orig/arch/arm/boot/dts/armada-xp-db.dts +++ linux-3.13.0/arch/arm/boot/dts/armada-xp-db.dts @@ -40,7 +40,7 @@ /* Device Bus parameters are required */ /* Read parameters */ - devbus,bus-width = <8>; + devbus,bus-width = <16>; devbus,turn-off-ps = <60000>; devbus,badr-skew-ps = <0>; devbus,acc-first-ps = <124000>; --- linux-3.13.0.orig/arch/arm/boot/dts/armada-xp-gp.dts +++ linux-3.13.0/arch/arm/boot/dts/armada-xp-gp.dts @@ -49,7 +49,7 @@ /* Device Bus parameters are required */ /* Read parameters */ - devbus,bus-width = <8>; + devbus,bus-width = <16>; devbus,turn-off-ps = <60000>; devbus,badr-skew-ps = <0>; devbus,acc-first-ps = <124000>; --- linux-3.13.0.orig/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ linux-3.13.0/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts @@ -23,7 +23,7 @@ memory { device_type = "memory"; - reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */ + reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */ }; soc { @@ -37,7 +37,7 @@ /* Device Bus parameters are required */ /* Read parameters */ - devbus,bus-width = <8>; + devbus,bus-width = <16>; devbus,turn-off-ps = <60000>; devbus,badr-skew-ps = <0>; devbus,acc-first-ps = <124000>; --- linux-3.13.0.orig/arch/arm/boot/dts/at91sam9263.dtsi +++ linux-3.13.0/arch/arm/boot/dts/at91sam9263.dtsi @@ -506,6 +506,7 @@ compatible = "atmel,hsmci"; reg = <0xfff80000 0x600>; interrupts = <10 IRQ_TYPE_LEVEL_HIGH 0>; + pinctrl-names = "default"; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -515,6 +516,7 @@ compatible = "atmel,hsmci"; reg = <0xfff84000 0x600>; interrupts = <11 IRQ_TYPE_LEVEL_HIGH 0>; + pinctrl-names = "default"; #address-cells = <1>; #size-cells = <0>; status = "disabled"; --- linux-3.13.0.orig/arch/arm/boot/dts/at91sam9g45.dtsi +++ linux-3.13.0/arch/arm/boot/dts/at91sam9g45.dtsi @@ -618,6 +618,7 @@ compatible = "atmel,hsmci"; reg = <0xfff80000 0x600>; interrupts = <11 IRQ_TYPE_LEVEL_HIGH 0>; + pinctrl-names = "default"; dmas = <&dma 1 AT91_DMA_CFG_PER_ID(0)>; dma-names = "rxtx"; #address-cells = <1>; @@ -629,6 +630,7 @@ compatible = "atmel,hsmci"; reg = <0xfffd0000 0x600>; interrupts = <29 IRQ_TYPE_LEVEL_HIGH 0>; + pinctrl-names = "default"; dmas = <&dma 1 AT91_DMA_CFG_PER_ID(13)>; dma-names = "rxtx"; #address-cells = <1>; --- linux-3.13.0.orig/arch/arm/boot/dts/exynos5250-arndale.dts +++ linux-3.13.0/arch/arm/boot/dts/exynos5250-arndale.dts @@ -286,6 +286,7 @@ regulator-name = "vdd_g3d"; regulator-min-microvolt = <1000000>; regulator-max-microvolt = <1000000>; + regulator-always-on; regulator-boot-on; op_mode = <1>; }; --- linux-3.13.0.orig/arch/arm/boot/dts/imx25.dtsi +++ linux-3.13.0/arch/arm/boot/dts/imx25.dtsi @@ -157,7 +157,7 @@ #size-cells = <0>; compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; reg = <0x43fa4000 0x4000>; - clocks = <&clks 62>, <&clks 62>; + clocks = <&clks 78>, <&clks 78>; clock-names = "ipg", "per"; interrupts = <14>; status = "disabled"; @@ -351,7 +351,7 @@ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; #pwm-cells = <2>; reg = <0x53fa0000 0x4000>; - clocks = <&clks 106>, <&clks 36>; + clocks = <&clks 106>, <&clks 52>; clock-names = "ipg", "per"; interrupts = <36>; }; @@ -370,7 +370,7 @@ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; #pwm-cells = <2>; reg = <0x53fa8000 0x4000>; - clocks = <&clks 107>, <&clks 36>; + clocks = <&clks 107>, <&clks 52>; clock-names = "ipg", "per"; interrupts = <41>; }; @@ -411,7 +411,7 @@ pwm4: pwm@53fc8000 { compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; reg = <0x53fc8000 0x4000>; - clocks = <&clks 108>, <&clks 36>; + clocks = <&clks 108>, <&clks 52>; clock-names = "ipg", "per"; interrupts = <42>; }; @@ -457,7 +457,7 @@ compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; #pwm-cells = <2>; reg = <0x53fe0000 0x4000>; - clocks = <&clks 105>, <&clks 36>; + clocks = <&clks 105>, <&clks 52>; clock-names = "ipg", "per"; interrupts = <26>; }; --- linux-3.13.0.orig/arch/arm/boot/dts/imx53.dtsi +++ linux-3.13.0/arch/arm/boot/dts/imx53.dtsi @@ -87,7 +87,7 @@ ipu: ipu@18000000 { #crtc-cells = <1>; compatible = "fsl,imx53-ipu"; - reg = <0x18000000 0x080000000>; + reg = <0x18000000 0x08000000>; interrupts = <11 10>; clocks = <&clks 59>, <&clks 110>, <&clks 61>; clock-names = "bus", "di0", "di1"; @@ -1111,7 +1111,7 @@ compatible = "fsl,imx53-vpu"; reg = <0x63ff4000 0x1000>; interrupts = <9>; - clocks = <&clks 63>, <&clks 63>; + clocks = <&clks 64>, <&clks 63>; clock-names = "per", "ahb"; iram = <&ocram>; status = "disabled"; --- linux-3.13.0.orig/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts +++ linux-3.13.0/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts @@ -30,6 +30,16 @@ bootargs = "console=ttyS0,115200n8 earlyprintk"; }; + mbus { + pcie-controller { + status = "okay"; + + pcie@1,0 { + status = "okay"; + }; + }; + }; + ocp@f1000000 { pinctrl@10000 { pmx_usb_led: pmx-usb-led { @@ -73,14 +83,6 @@ ehci@50000 { status = "okay"; }; - - pcie-controller { - status = "okay"; - - pcie@1,0 { - status = "okay"; - }; - }; }; gpio-leds { --- linux-3.13.0.orig/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi +++ linux-3.13.0/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi @@ -4,6 +4,16 @@ / { model = "ZyXEL NSA310"; + mbus { + pcie-controller { + status = "okay"; + + pcie@1,0 { + status = "okay"; + }; + }; + }; + ocp@f1000000 { pinctrl: pinctrl@10000 { @@ -26,14 +36,6 @@ status = "okay"; nr-ports = <2>; }; - - pcie-controller { - status = "okay"; - - pcie@1,0 { - status = "okay"; - }; - }; }; gpio_poweroff { --- linux-3.13.0.orig/arch/arm/boot/dts/ste-ccu8540.dts +++ linux-3.13.0/arch/arm/boot/dts/ste-ccu8540.dts @@ -18,6 +18,7 @@ compatible = "st-ericsson,ccu8540", "st-ericsson,u8540"; memory@0 { + device_type = "memory"; reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>; }; --- linux-3.13.0.orig/arch/arm/boot/dts/tegra124.dtsi +++ linux-3.13.0/arch/arm/boot/dts/tegra124.dtsi @@ -87,14 +87,6 @@ status = "disabled"; }; - serial@70006400 { - compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; - reg = <0x70006400 0x40>; - reg-shift = <2>; - interrupts = ; - status = "disabled"; - }; - rtc@7000e000 { compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc"; reg = <0x7000e000 0x100>; --- linux-3.13.0.orig/arch/arm/common/edma.c +++ linux-3.13.0/arch/arm/common/edma.c @@ -1423,55 +1423,38 @@ #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) -static int edma_of_read_u32_to_s16_array(const struct device_node *np, - const char *propname, s16 *out_values, - size_t sz) +static int edma_xbar_event_map(struct device *dev, struct device_node *node, + struct edma_soc_info *pdata, size_t sz) { - int ret; - - ret = of_property_read_u16_array(np, propname, out_values, sz); - if (ret) - return ret; - - /* Terminate it */ - *out_values++ = -1; - *out_values++ = -1; - - return 0; -} - -static int edma_xbar_event_map(struct device *dev, - struct device_node *node, - struct edma_soc_info *pdata, int len) -{ - int ret, i; + const char pname[] = "ti,edma-xbar-event-map"; struct resource res; void __iomem *xbar; - const s16 (*xbar_chans)[2]; + s16 (*xbar_chans)[2]; + size_t nelm = sz / sizeof(s16); u32 shift, offset, mux; + int ret, i; - xbar_chans = devm_kzalloc(dev, - len/sizeof(s16) + 2*sizeof(s16), - GFP_KERNEL); + xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL); if (!xbar_chans) return -ENOMEM; ret = of_address_to_resource(node, 1, &res); if (ret) - return -EIO; + return -ENOMEM; xbar = devm_ioremap(dev, res.start, resource_size(&res)); if (!xbar) return -ENOMEM; - ret = edma_of_read_u32_to_s16_array(node, - "ti,edma-xbar-event-map", - (s16 *)xbar_chans, - len/sizeof(u32)); + ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm); if (ret) return -EIO; - for (i = 0; xbar_chans[i][0] != -1; i++) { + /* Invalidate last entry for the other user of this mess */ + nelm >>= 1; + xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1; + + for (i = 0; i < nelm; i++) { shift = (xbar_chans[i][1] & 0x03) << 3; offset = xbar_chans[i][1] & 0xfffffffc; mux = readl(xbar + offset); @@ -1480,8 +1463,7 @@ writel(mux, (xbar + offset)); } - pdata->xbar_chans = xbar_chans; - + pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; return 0; } --- linux-3.13.0.orig/arch/arm/configs/multi_v7_defconfig +++ linux-3.13.0/arch/arm/configs/multi_v7_defconfig @@ -118,6 +118,7 @@ CONFIG_USB=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_ISP1760_HCD=y --- linux-3.13.0.orig/arch/arm/crypto/aes_glue.c +++ linux-3.13.0/arch/arm/crypto/aes_glue.c @@ -93,6 +93,6 @@ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("aes"); -MODULE_ALIAS("aes-asm"); +MODULE_ALIAS_CRYPTO("aes"); +MODULE_ALIAS_CRYPTO("aes-asm"); MODULE_AUTHOR("David McCullough "); --- linux-3.13.0.orig/arch/arm/crypto/aesbs-core.S_shipped +++ linux-3.13.0/arch/arm/crypto/aesbs-core.S_shipped @@ -58,14 +58,18 @@ # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ 7 +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a +.fpu neon + .text .syntax unified @ ARMv7-capable assembler is expected to handle this #ifdef __thumb2__ @@ -74,8 +78,6 @@ .code 32 #endif -.fpu neon - .type _bsaes_decrypt8,%function .align 4 _bsaes_decrypt8: @@ -2095,9 +2097,11 @@ vld1.8 {q8}, [r0] @ initial tweak adr r2, .Lxts_magic +#ifndef XTS_CHAIN_TWEAK tst r9, #0xf @ if not multiple of 16 it ne @ Thumb2 thing, sanity check in ARM subne r9, #0x10 @ subtract another 16 bytes +#endif subs r9, #0x80 blo .Lxts_dec_short --- linux-3.13.0.orig/arch/arm/crypto/aesbs-glue.c +++ linux-3.13.0/arch/arm/crypto/aesbs-glue.c @@ -137,7 +137,7 @@ dst += AES_BLOCK_SIZE; } while (--blocks); } - err = blkcipher_walk_done(desc, &walk, 0); + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } return err; } @@ -158,7 +158,7 @@ bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes, &ctx->dec, walk.iv); kernel_neon_end(); - err = blkcipher_walk_done(desc, &walk, 0); + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } while (walk.nbytes) { u32 blocks = walk.nbytes / AES_BLOCK_SIZE; @@ -182,7 +182,7 @@ dst += AES_BLOCK_SIZE; src += AES_BLOCK_SIZE; } while (--blocks); - err = blkcipher_walk_done(desc, &walk, 0); + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } return err; } @@ -268,7 +268,7 @@ bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes, &ctx->enc, walk.iv); kernel_neon_end(); - err = blkcipher_walk_done(desc, &walk, 0); + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } return err; } @@ -292,7 +292,7 @@ bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes, &ctx->dec, walk.iv); kernel_neon_end(); - err = blkcipher_walk_done(desc, &walk, 0); + err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); } return err; } --- linux-3.13.0.orig/arch/arm/crypto/bsaes-armv7.pl +++ linux-3.13.0/arch/arm/crypto/bsaes-armv7.pl @@ -701,14 +701,18 @@ # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY # define XTS_CHAIN_TWEAK -# define __ARM_ARCH__ 7 +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_MAX_ARCH__ 7 #endif #ifdef __thumb__ # define adrl adr #endif -#if __ARM_ARCH__>=7 +#if __ARM_MAX_ARCH__>=7 +.arch armv7-a +.fpu neon + .text .syntax unified @ ARMv7-capable assembler is expected to handle this #ifdef __thumb2__ @@ -717,8 +721,6 @@ .code 32 #endif -.fpu neon - .type _bsaes_decrypt8,%function .align 4 _bsaes_decrypt8: @@ -2076,9 +2078,11 @@ vld1.8 {@XMM[8]}, [r0] @ initial tweak adr $magic, .Lxts_magic +#ifndef XTS_CHAIN_TWEAK tst $len, #0xf @ if not multiple of 16 it ne @ Thumb2 thing, sanity check in ARM subne $len, #0x10 @ subtract another 16 bytes +#endif subs $len, #0x80 blo .Lxts_dec_short --- linux-3.13.0.orig/arch/arm/crypto/sha1_glue.c +++ linux-3.13.0/arch/arm/crypto/sha1_glue.c @@ -175,5 +175,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); MODULE_AUTHOR("David McCullough "); --- linux-3.13.0.orig/arch/arm/include/asm/arch_timer.h +++ linux-3.13.0/arch/arm/include/asm/arch_timer.h @@ -78,6 +78,15 @@ return val; } +static inline u64 arch_counter_get_cntpct(void) +{ + u64 cval; + + isb(); + asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); + return cval; +} + static inline u64 arch_counter_get_cntvct(void) { u64 cval; --- linux-3.13.0.orig/arch/arm/include/asm/cacheflush.h +++ linux-3.13.0/arch/arm/include/asm/cacheflush.h @@ -212,6 +212,7 @@ static inline void __flush_icache_all(void) { __flush_icache_preferred(); + dsb(); } /* @@ -471,7 +472,6 @@ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ "isb \n\t" \ "bl v7_flush_dcache_"__stringify(level)" \n\t" \ - "clrex \n\t" \ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ --- linux-3.13.0.orig/arch/arm/include/asm/div64.h +++ linux-3.13.0/arch/arm/include/asm/div64.h @@ -156,7 +156,7 @@ /* Select the best insn combination to perform the */ \ /* actual __m * __n / (__p << 64) operation. */ \ if (!__c) { \ - asm ( "umull %Q0, %R0, %1, %Q2\n\t" \ + asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \ "mov %Q0, #0" \ : "=&r" (__res) \ : "r" (__m), "r" (__n) \ --- linux-3.13.0.orig/arch/arm/include/asm/futex.h +++ linux-3.13.0/arch/arm/include/asm/futex.h @@ -3,11 +3,6 @@ #ifdef __KERNEL__ -#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP) -/* ARM doesn't provide unprivileged exclusive memory accessors */ -#include -#else - #include #include #include @@ -164,6 +159,5 @@ return ret; } -#endif /* !(CPU_USE_DOMAINS && SMP) */ #endif /* __KERNEL__ */ #endif /* _ASM_ARM_FUTEX_H */ --- linux-3.13.0.orig/arch/arm/include/asm/kvm_host.h +++ linux-3.13.0/arch/arm/include/asm/kvm_host.h @@ -36,7 +36,7 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HAVE_ONE_REG -#define KVM_VCPU_MAX_FEATURES 1 +#define KVM_VCPU_MAX_FEATURES 2 #include --- linux-3.13.0.orig/arch/arm/include/asm/kvm_mmu.h +++ linux-3.13.0/arch/arm/include/asm/kvm_mmu.h @@ -114,11 +114,34 @@ pmd_val(*pmd) |= L_PMD_S2_RDWR; } +/* Open coded p*d_addr_end that can deal with 64bit addresses */ +#define kvm_pgd_addr_end(addr, end) \ +({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#define kvm_pud_addr_end(addr,end) (end) + +#define kvm_pmd_addr_end(addr, end) \ +({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, - unsigned long size) +#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) + +static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { + return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; +} + +static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, + unsigned long size) +{ + if (!vcpu_has_cache_enabled(vcpu)) + kvm_flush_dcache_to_poc((void *)hva, size); + /* * If we are going to insert an instruction page and the icache is * either VIPT or PIPT, there is a potential problem where the host @@ -139,7 +162,7 @@ } } -#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) +void stage2_flush_vm(struct kvm *kvm); #endif /* !__ASSEMBLY__ */ --- linux-3.13.0.orig/arch/arm/include/asm/kvm_psci.h +++ linux-3.13.0/arch/arm/include/asm/kvm_psci.h @@ -18,6 +18,10 @@ #ifndef __ARM_KVM_PSCI_H__ #define __ARM_KVM_PSCI_H__ -bool kvm_psci_call(struct kvm_vcpu *vcpu); +#define KVM_ARM_PSCI_0_1 1 +#define KVM_ARM_PSCI_0_2 2 + +int kvm_psci_version(struct kvm_vcpu *vcpu); +int kvm_psci_call(struct kvm_vcpu *vcpu); #endif /* __ARM_KVM_PSCI_H__ */ --- linux-3.13.0.orig/arch/arm/include/asm/memory.h +++ linux-3.13.0/arch/arm/include/asm/memory.h @@ -30,14 +30,15 @@ */ #define UL(x) _AC(x, UL) +/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) + #ifdef CONFIG_MMU /* - * PAGE_OFFSET - the virtual address of the start of the kernel image * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ -#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) @@ -104,10 +105,6 @@ #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif -#ifndef PAGE_OFFSET -#define PAGE_OFFSET PLAT_PHYS_OFFSET -#endif - /* * The module can be at any place in ram in nommu mode. */ --- linux-3.13.0.orig/arch/arm/include/asm/pgtable-2level.h +++ linux-3.13.0/arch/arm/include/asm/pgtable-2level.h @@ -140,6 +140,7 @@ #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_VECTORS (_AT(pteval_t, 0x0f) << 2) /* 1111 */ #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) #ifndef __ASSEMBLY__ --- linux-3.13.0.orig/arch/arm/include/asm/pgtable-3level.h +++ linux-3.13.0/arch/arm/include/asm/pgtable-3level.h @@ -120,13 +120,16 @@ /* * 2nd stage PTE definitions for LPAE. */ -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ +#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ +#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ +#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ +#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) -#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ +#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ +#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ + +#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ /* * Hyp-mode PL2 PTE definitions for LPAE. --- linux-3.13.0.orig/arch/arm/include/asm/psci.h +++ linux-3.13.0/arch/arm/include/asm/psci.h @@ -29,16 +29,19 @@ int (*cpu_off)(struct psci_power_state state); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); int (*migrate)(unsigned long cpuid); + int (*affinity_info)(unsigned long target_affinity, + unsigned long lowest_affinity_level); + int (*migrate_info_type)(void); }; extern struct psci_operations psci_ops; extern struct smp_operations psci_smp_ops; #ifdef CONFIG_ARM_PSCI -void psci_init(void); +int psci_init(void); bool psci_smp_available(void); #else -static inline void psci_init(void) { } +static inline int psci_init(void) { } static inline bool psci_smp_available(void) { return false; } #endif --- linux-3.13.0.orig/arch/arm/include/asm/spinlock.h +++ linux-3.13.0/arch/arm/include/asm/spinlock.h @@ -37,18 +37,9 @@ static inline void dsb_sev(void) { -#if __LINUX_ARM_ARCH__ >= 7 - __asm__ __volatile__ ( - "dsb ishst\n" - SEV - ); -#else - __asm__ __volatile__ ( - "mcr p15, 0, %0, c7, c10, 4\n" - SEV - : : "r" (0) - ); -#endif + + dsb(ishst); + __asm__(SEV); } /* --- linux-3.13.0.orig/arch/arm/include/asm/thread_info.h +++ linux-3.13.0/arch/arm/include/asm/thread_info.h @@ -43,16 +43,6 @@ __u32 extra[2]; /* Xscale 'acc' register, etc */ }; -struct arm_restart_block { - union { - /* For user cache flushing */ - struct { - unsigned long start; - unsigned long end; - } cache; - }; -}; - /* * low level task data that entry.S needs immediate access to. * __switch_to() assumes cpu_context follows immediately after cpu_domain. @@ -78,7 +68,6 @@ unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif struct restart_block restart_block; - struct arm_restart_block arm_restart_block; }; #define INIT_THREAD_INFO(tsk) \ --- linux-3.13.0.orig/arch/arm/include/asm/tls.h +++ linux-3.13.0/arch/arm/include/asm/tls.h @@ -1,6 +1,9 @@ #ifndef __ASMARM_TLS_H #define __ASMARM_TLS_H +#include +#include + #ifdef __ASSEMBLY__ #include .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 @@ -50,6 +53,49 @@ #endif #ifndef __ASSEMBLY__ + +static inline void set_tls(unsigned long val) +{ + struct thread_info *thread; + + thread = current_thread_info(); + + thread->tp_value[0] = val; + + /* + * This code runs with preemption enabled and therefore must + * be reentrant with respect to switch_tls. + * + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + * + * If we're preempted here, switch_tls will load TPIDRURO from + * thread_info upon resuming execution and the following mcr + * is merely redundant. + */ + barrier(); + + if (!tls_emu) { + if (has_tls_reg) { + asm("mcr p15, 0, %0, c13, c0, 3" + : : "r" (val)); + } else { +#ifdef CONFIG_KUSER_HELPERS + /* + * User space must never try to access this + * directly. Expect your app to break + * eventually if you do so. The user helper + * at 0xffff0fe0 must be used instead. (see + * entry-armv.S for details) + */ + *((unsigned int *)0xffff0ff0) = val; +#endif + } + + } +} + static inline unsigned long get_tpuser(void) { unsigned long reg = 0; @@ -59,5 +105,23 @@ return reg; } + +static inline void set_tpuser(unsigned long val) +{ + /* Since TPIDRURW is fully context-switched (unlike TPIDRURO), + * we need not update thread_info. + */ + if (has_tls_reg && !tls_emu) { + asm("mcr p15, 0, %0, c13, c0, 2" + : : "r" (val)); + } +} + +static inline void flush_tls(void) +{ + set_tls(0); + set_tpuser(0); +} + #endif #endif /* __ASMARM_TLS_H */ --- linux-3.13.0.orig/arch/arm/include/asm/uaccess.h +++ linux-3.13.0/arch/arm/include/asm/uaccess.h @@ -171,8 +171,9 @@ #define __put_user_check(x,p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ + const typeof(*(p)) __user *__tmp_p = (p); \ register const typeof(*(p)) __r2 asm("r2") = (x); \ - register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ --- linux-3.13.0.orig/arch/arm/include/asm/unistd.h +++ linux-3.13.0/arch/arm/include/asm/unistd.h @@ -15,7 +15,7 @@ #include -#define __NR_syscalls (380) +#define __NR_syscalls (384) #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) #define __ARCH_WANT_STAT64 --- linux-3.13.0.orig/arch/arm/include/uapi/asm/kvm.h +++ linux-3.13.0/arch/arm/include/uapi/asm/kvm.h @@ -20,6 +20,7 @@ #define __ARM_KVM_H__ #include +#include #include #define __KVM_HAVE_GUEST_DEBUG @@ -83,6 +84,7 @@ #define KVM_VGIC_V2_CPU_SIZE 0x2000 #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ +#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ struct kvm_vcpu_init { __u32 target; @@ -173,9 +175,9 @@ #define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) #define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) -#define KVM_PSCI_RET_SUCCESS 0 -#define KVM_PSCI_RET_NI ((unsigned long)-1) -#define KVM_PSCI_RET_INVAL ((unsigned long)-2) -#define KVM_PSCI_RET_DENIED ((unsigned long)-3) +#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS +#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED +#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS +#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED #endif /* __ARM_KVM_H__ */ --- linux-3.13.0.orig/arch/arm/include/uapi/asm/unistd.h +++ linux-3.13.0/arch/arm/include/uapi/asm/unistd.h @@ -406,6 +406,10 @@ #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) #define __NR_kcmp (__NR_SYSCALL_BASE+378) #define __NR_finit_module (__NR_SYSCALL_BASE+379) +#define __NR_sched_setattr (__NR_SYSCALL_BASE+380) +#define __NR_sched_getattr (__NR_SYSCALL_BASE+381) +#define __NR_renameat2 (__NR_SYSCALL_BASE+382) +#define __NR_seccomp (__NR_SYSCALL_BASE+383) /* * This may need to be greater than __NR_last_syscall+1 in order to --- linux-3.13.0.orig/arch/arm/kernel/calls.S +++ linux-3.13.0/arch/arm/kernel/calls.S @@ -389,6 +389,10 @@ CALL(sys_process_vm_writev) CALL(sys_kcmp) CALL(sys_finit_module) +/* 380 */ CALL(sys_ni_syscall) /* sys_sched_setattr */ + CALL(sys_ni_syscall) /* sys_sched_getattr */ + CALL(sys_ni_syscall) /* sys_renameat2 */ + CALL(sys_seccomp) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted --- linux-3.13.0.orig/arch/arm/kernel/entry-header.S +++ linux-3.13.0/arch/arm/kernel/entry-header.S @@ -132,6 +132,10 @@ orrne r5, V7M_xPSR_FRAMEPTRALIGN biceq r5, V7M_xPSR_FRAMEPTRALIGN + @ ensure bit 0 is cleared in the PC, otherwise behaviour is + @ unpredictable + bic r4, #1 + @ write basic exception frame stmdb r2!, {r1, r3-r5} ldmia sp, {r1, r3-r5} @@ -204,26 +208,21 @@ #endif .endif msr spsr_cxsf, \rpsr -#if defined(CONFIG_CPU_V6) - ldr r0, [sp] - strex r1, r2, [sp] @ clear the exclusive monitor - ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr -#elif defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr -#else - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) + @ We must avoid clrex due to Cortex-A15 erratum #830321 + sub r0, sp, #4 @ uninhabited address + strex r1, r2, [r0] @ clear the exclusive monitor #endif + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .endm .macro restore_user_regs, fast = 0, offset = 0 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC]! @ get pc msr spsr_cxsf, r1 @ save in spsr_svc -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) + @ We must avoid clrex due to Cortex-A15 erratum #830321 strex r1, r2, [sp] @ clear the exclusive monitor -#elif defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor #endif .if \fast ldmdb sp, {r1 - lr}^ @ get calling r1 - lr @@ -268,7 +267,10 @@ .endif ldr lr, [sp, #S_SP] @ top of the stack ldrd r0, r1, [sp, #S_LR] @ calling lr and pc - clrex @ clear the exclusive monitor + + @ We must avoid clrex due to Cortex-A15 erratum #830321 + strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor + stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context ldmia sp, {r0 - r12} mov sp, lr @@ -289,13 +291,16 @@ .endm #else /* ifdef CONFIG_CPU_V7M */ .macro restore_user_regs, fast = 0, offset = 0 - clrex @ clear the exclusive monitor mov r2, sp load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC] @ get pc add sp, sp, #\offset + S_SP msr spsr_cxsf, r1 @ save in spsr_svc + + @ We must avoid clrex due to Cortex-A15 erratum #830321 + strex r1, r2, [sp] @ clear the exclusive monitor + .if \fast ldmdb sp, {r1 - r12} @ get calling r1 - r12 .else --- linux-3.13.0.orig/arch/arm/kernel/irq.c +++ linux-3.13.0/arch/arm/kernel/irq.c @@ -163,7 +163,7 @@ c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; --- linux-3.13.0.orig/arch/arm/kernel/machine_kexec.c +++ linux-3.13.0/arch/arm/kernel/machine_kexec.c @@ -184,3 +184,10 @@ soft_restart(reboot_entry_phys); } + +void arch_crash_save_vmcoreinfo(void) +{ +#ifdef CONFIG_ARM_LPAE + VMCOREINFO_CONFIG(ARM_LPAE); +#endif +} --- linux-3.13.0.orig/arch/arm/kernel/process.c +++ linux-3.13.0/arch/arm/kernel/process.c @@ -334,6 +334,8 @@ memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); + flush_tls(); + thread_notify(THREAD_NOTIFY_FLUSH, thread); } --- linux-3.13.0.orig/arch/arm/kernel/psci.c +++ linux-3.13.0/arch/arm/kernel/psci.c @@ -17,63 +17,58 @@ #include #include +#include +#include +#include #include #include #include #include #include +#include struct psci_operations psci_ops; static int (*invoke_psci_fn)(u32, u32, u32, u32); +typedef int (*psci_initcall_t)(const struct device_node *); enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, PSCI_FN_CPU_OFF, PSCI_FN_MIGRATE, + PSCI_FN_AFFINITY_INFO, + PSCI_FN_MIGRATE_INFO_TYPE, PSCI_FN_MAX, }; static u32 psci_function_id[PSCI_FN_MAX]; -#define PSCI_RET_SUCCESS 0 -#define PSCI_RET_EOPNOTSUPP -1 -#define PSCI_RET_EINVAL -2 -#define PSCI_RET_EPERM -3 - static int psci_to_linux_errno(int errno) { switch (errno) { case PSCI_RET_SUCCESS: return 0; - case PSCI_RET_EOPNOTSUPP: + case PSCI_RET_NOT_SUPPORTED: return -EOPNOTSUPP; - case PSCI_RET_EINVAL: + case PSCI_RET_INVALID_PARAMS: return -EINVAL; - case PSCI_RET_EPERM: + case PSCI_RET_DENIED: return -EPERM; }; return -EINVAL; } -#define PSCI_POWER_STATE_ID_MASK 0xffff -#define PSCI_POWER_STATE_ID_SHIFT 0 -#define PSCI_POWER_STATE_TYPE_MASK 0x1 -#define PSCI_POWER_STATE_TYPE_SHIFT 16 -#define PSCI_POWER_STATE_AFFL_MASK 0x3 -#define PSCI_POWER_STATE_AFFL_SHIFT 24 - static u32 psci_power_state_pack(struct psci_power_state state) { - return ((state.id & PSCI_POWER_STATE_ID_MASK) - << PSCI_POWER_STATE_ID_SHIFT) | - ((state.type & PSCI_POWER_STATE_TYPE_MASK) - << PSCI_POWER_STATE_TYPE_SHIFT) | - ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) - << PSCI_POWER_STATE_AFFL_SHIFT); + return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT) + & PSCI_0_2_POWER_STATE_ID_MASK) | + ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT) + & PSCI_0_2_POWER_STATE_TYPE_MASK) | + ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT) + & PSCI_0_2_POWER_STATE_AFFL_MASK); } /* @@ -110,6 +105,14 @@ return function_id; } +static int psci_get_version(void) +{ + int err; + + err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); + return err; +} + static int psci_cpu_suspend(struct psci_power_state state, unsigned long entry_point) { @@ -153,26 +156,36 @@ return psci_to_linux_errno(err); } -static const struct of_device_id psci_of_match[] __initconst = { - { .compatible = "arm,psci", }, - {}, -}; +static int psci_affinity_info(unsigned long target_affinity, + unsigned long lowest_affinity_level) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_AFFINITY_INFO]; + err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0); + return err; +} -void __init psci_init(void) +static int psci_migrate_info_type(void) { - struct device_node *np; - const char *method; - u32 id; + int err; + u32 fn; - np = of_find_matching_node(NULL, psci_of_match); - if (!np) - return; + fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE]; + err = invoke_psci_fn(fn, 0, 0, 0); + return err; +} + +static int get_set_conduit_method(struct device_node *np) +{ + const char *method; - pr_info("probing function IDs from device-tree\n"); + pr_info("probing for conduit method from DT.\n"); if (of_property_read_string(np, "method", &method)) { - pr_warning("missing \"method\" property\n"); - goto out_put_node; + pr_warn("missing \"method\" property\n"); + return -ENXIO; } if (!strcmp("hvc", method)) { @@ -180,10 +193,99 @@ } else if (!strcmp("smc", method)) { invoke_psci_fn = __invoke_psci_fn_smc; } else { - pr_warning("invalid \"method\" property: %s\n", method); + pr_warn("invalid \"method\" property: %s\n", method); + return -EINVAL; + } + return 0; +} + +static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); +} + +static void psci_sys_poweroff(void) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); +} + +/* + * PSCI Function IDs for v0.2+ are well defined so use + * standard values. + */ +static int psci_0_2_init(struct device_node *np) +{ + int err, ver; + + err = get_set_conduit_method(np); + + if (err) + goto out_put_node; + + ver = psci_get_version(); + + if (ver == PSCI_RET_NOT_SUPPORTED) { + /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */ + pr_err("PSCI firmware does not comply with the v0.2 spec.\n"); + err = -EOPNOTSUPP; goto out_put_node; + } else { + pr_info("PSCIv%d.%d detected in firmware.\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + + if (PSCI_VERSION_MAJOR(ver) == 0 && + PSCI_VERSION_MINOR(ver) < 2) { + err = -EINVAL; + pr_err("Conflicting PSCI version detected.\n"); + goto out_put_node; + } } + pr_info("Using standard PSCI v0.2 function IDs\n"); + psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND; + psci_ops.cpu_suspend = psci_cpu_suspend; + + psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; + psci_ops.cpu_off = psci_cpu_off; + + psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON; + psci_ops.cpu_on = psci_cpu_on; + + psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE; + psci_ops.migrate = psci_migrate; + + psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO; + psci_ops.affinity_info = psci_affinity_info; + + psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] = + PSCI_0_2_FN_MIGRATE_INFO_TYPE; + psci_ops.migrate_info_type = psci_migrate_info_type; + + arm_pm_restart = psci_sys_reset; + + pm_power_off = psci_sys_poweroff; + +out_put_node: + of_node_put(np); + return err; +} + +/* + * PSCI < v0.2 get PSCI Function IDs via DT. + */ +static int psci_0_1_init(struct device_node *np) +{ + u32 id; + int err; + + err = get_set_conduit_method(np); + + if (err) + goto out_put_node; + + pr_info("Using PSCI v0.1 Function IDs from DT\n"); + if (!of_property_read_u32(np, "cpu_suspend", &id)) { psci_function_id[PSCI_FN_CPU_SUSPEND] = id; psci_ops.cpu_suspend = psci_cpu_suspend; @@ -206,5 +308,25 @@ out_put_node: of_node_put(np); - return; + return err; +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci", .data = psci_0_1_init}, + { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, + {}, +}; + +int __init psci_init(void) +{ + struct device_node *np; + const struct of_device_id *matched_np; + psci_initcall_t init_fn; + + np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); + if (!np) + return -ENODEV; + + init_fn = (psci_initcall_t)matched_np->data; + return init_fn(np); } --- linux-3.13.0.orig/arch/arm/kernel/psci_smp.c +++ linux-3.13.0/arch/arm/kernel/psci_smp.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include @@ -66,6 +68,36 @@ /* We should never return */ panic("psci: cpu %d failed to shutdown\n", cpu); } + +int __ref psci_cpu_kill(unsigned int cpu) +{ + int err, i; + + if (!psci_ops.affinity_info) + return 1; + /* + * cpu_kill could race with cpu_die and we can + * potentially end up declaring this cpu undead + * while it is dying. So, try again a few times. + */ + + for (i = 0; i < 10; i++) { + err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); + if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { + pr_info("CPU%d killed.\n", cpu); + return 1; + } + + msleep(10); + pr_info("Retrying again to check for CPU kill\n"); + } + + pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", + cpu, err); + /* Make platform_cpu_kill() fail. */ + return 0; +} + #endif bool __init psci_smp_available(void) @@ -78,5 +110,6 @@ .smp_boot_secondary = psci_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = psci_cpu_die, + .cpu_kill = psci_cpu_kill, #endif }; --- linux-3.13.0.orig/arch/arm/kernel/ptrace.c +++ linux-3.13.0/arch/arm/kernel/ptrace.c @@ -908,7 +908,7 @@ PTRACE_SYSCALL_EXIT, }; -static int tracehook_report_syscall(struct pt_regs *regs, +static void tracehook_report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) { unsigned long ip; @@ -926,7 +926,6 @@ current_thread_info()->syscall = -1; regs->ARM_ip = ip; - return current_thread_info()->syscall; } asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) @@ -938,7 +937,9 @@ return -1; if (test_thread_flag(TIF_SYSCALL_TRACE)) - scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); + tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); + + scno = current_thread_info()->syscall; if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, scno); --- linux-3.13.0.orig/arch/arm/kernel/stacktrace.c +++ linux-3.13.0/arch/arm/kernel/stacktrace.c @@ -83,13 +83,16 @@ return trace->nr_entries >= trace->max_entries; } -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +/* This must be noinline to so that our skip calculation works correctly */ +static noinline void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; + data.no_sched_functions = nosched; if (tsk != current) { #ifdef CONFIG_SMP @@ -102,7 +105,6 @@ trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else - data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ @@ -111,11 +113,12 @@ } else { register unsigned long current_sp asm ("sp"); - data.no_sched_functions = 0; + /* We don't want this function nor the caller */ + data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)save_stack_trace_tsk; + frame.pc = (unsigned long)__save_stack_trace; } walk_stackframe(&frame, save_trace, &data); @@ -123,9 +126,14 @@ trace->entries[trace->nr_entries++] = ULONG_MAX; } +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} + void save_stack_trace(struct stack_trace *trace) { - save_stack_trace_tsk(current, trace); + __save_stack_trace(current, trace, 0); } EXPORT_SYMBOL_GPL(save_stack_trace); #endif --- linux-3.13.0.orig/arch/arm/kernel/thumbee.c +++ linux-3.13.0/arch/arm/kernel/thumbee.c @@ -45,7 +45,7 @@ switch (cmd) { case THREAD_NOTIFY_FLUSH: - thread->thumbee_state = 0; + teehbr_write(0); break; case THREAD_NOTIFY_SWITCH: current_thread_info()->thumbee_state = teehbr_read(); --- linux-3.13.0.orig/arch/arm/kernel/traps.c +++ linux-3.13.0/arch/arm/kernel/traps.c @@ -510,8 +510,6 @@ return regs->ARM_r0; } -static long do_cache_op_restart(struct restart_block *); - static inline int __do_cache_op(unsigned long start, unsigned long end) { @@ -520,24 +518,8 @@ do { unsigned long chunk = min(PAGE_SIZE, end - start); - if (signal_pending(current)) { - struct thread_info *ti = current_thread_info(); - - ti->restart_block = (struct restart_block) { - .fn = do_cache_op_restart, - }; - - ti->arm_restart_block = (struct arm_restart_block) { - { - .cache = { - .start = start, - .end = end, - }, - }, - }; - - return -ERESTART_RESTARTBLOCK; - } + if (fatal_signal_pending(current)) + return 0; ret = flush_cache_user_range(start, start + chunk); if (ret) @@ -550,15 +532,6 @@ return 0; } -static long do_cache_op_restart(struct restart_block *unused) -{ - struct arm_restart_block *restart_block; - - restart_block = ¤t_thread_info()->arm_restart_block; - return __do_cache_op(restart_block->cache.start, - restart_block->cache.end); -} - static inline int do_cache_op(unsigned long start, unsigned long end, int flags) { @@ -578,7 +551,6 @@ #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) asmlinkage int arm_syscall(int no, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); siginfo_t info; if ((no >> 16) != (__ARM_NR_BASE>> 16)) @@ -629,21 +601,7 @@ return regs->ARM_r0; case NR(set_tls): - thread->tp_value[0] = regs->ARM_r0; - if (tls_emu) - return 0; - if (has_tls_reg) { - asm ("mcr p15, 0, %0, c13, c0, 3" - : : "r" (regs->ARM_r0)); - } else { - /* - * User space must never try to access this directly. - * Expect your app to break eventually if you do so. - * The user helper at 0xffff0fe0 must be used instead. - * (see entry-armv.S for details) - */ - *((unsigned int *)0xffff0ff0) = regs->ARM_r0; - } + set_tls(regs->ARM_r0); return 0; #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG --- linux-3.13.0.orig/arch/arm/kvm/arm.c +++ linux-3.13.0/arch/arm/kvm/arm.c @@ -193,6 +193,7 @@ case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_ONE_REG: case KVM_CAP_ARM_PSCI: + case KVM_CAP_ARM_PSCI_0_2: r = 1; break; case KVM_CAP_COALESCED_MMIO: @@ -478,15 +479,6 @@ return ret; } - /* - * Handle the "start in power-off" case by calling into the - * PSCI code. - */ - if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { - *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; - kvm_psci_call(vcpu); - } - return 0; } @@ -700,6 +692,24 @@ return -EINVAL; } +static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, + struct kvm_vcpu_init *init) +{ + int ret; + + ret = kvm_vcpu_set_target(vcpu, init); + if (ret) + return ret; + + /* + * Handle the "start in power-off" case by marking the VCPU as paused. + */ + if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) + vcpu->arch.pause = true; + + return 0; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -713,8 +723,7 @@ if (copy_from_user(&init, argp, sizeof(init))) return -EFAULT; - return kvm_vcpu_set_target(vcpu, &init); - + return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); } case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { --- linux-3.13.0.orig/arch/arm/kvm/handle_exit.c +++ linux-3.13.0/arch/arm/kvm/handle_exit.c @@ -40,14 +40,18 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) { + int ret; + trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), kvm_vcpu_hvc_get_imm(vcpu)); - if (kvm_psci_call(vcpu)) + ret = kvm_psci_call(vcpu); + if (ret < 0) { + kvm_inject_undefined(vcpu); return 1; + } - kvm_inject_undefined(vcpu); - return 1; + return ret; } static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) @@ -91,6 +95,8 @@ else kvm_vcpu_block(vcpu); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return 1; } --- linux-3.13.0.orig/arch/arm/kvm/init.S +++ linux-3.13.0/arch/arm/kvm/init.S @@ -98,6 +98,10 @@ mrc p15, 0, r0, c10, c2, 1 mcr p15, 4, r0, c10, c2, 1 + @ Invalidate the stale TLBs from Bootloader + mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH + dsb ish + @ Set the HSCTLR to: @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) @ - Endianness: Kernel config --- linux-3.13.0.orig/arch/arm/kvm/mmu.c +++ linux-3.13.0/arch/arm/kvm/mmu.c @@ -42,6 +42,8 @@ static unsigned long hyp_idmap_end; static phys_addr_t hyp_idmap_vector; +#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) + #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) @@ -145,7 +147,7 @@ pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { - addr = pud_addr_end(addr, end); + addr = kvm_pud_addr_end(addr, end); continue; } @@ -155,13 +157,13 @@ * move on. */ clear_pud_entry(kvm, pud, addr); - addr = pud_addr_end(addr, end); + addr = kvm_pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { - addr = pmd_addr_end(addr, end); + addr = kvm_pmd_addr_end(addr, end); continue; } @@ -176,10 +178,10 @@ */ if (kvm_pmd_huge(*pmd) || page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); - next = pmd_addr_end(addr, end); + next = kvm_pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); - next = pud_addr_end(addr, end); + next = kvm_pud_addr_end(addr, end); } } @@ -187,6 +189,99 @@ } } +static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, + phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); + kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); + } + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, + phys_addr_t addr, phys_addr_t end) +{ + pmd_t *pmd; + phys_addr_t next; + + pmd = pmd_offset(pud, addr); + do { + next = kvm_pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (kvm_pmd_huge(*pmd)) { + hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); + kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); + } else { + stage2_flush_ptes(kvm, pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); +} + +static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, + phys_addr_t addr, phys_addr_t end) +{ + pud_t *pud; + phys_addr_t next; + + pud = pud_offset(pgd, addr); + do { + next = kvm_pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); + kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); + } else { + stage2_flush_pmds(kvm, pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); +} + +static void stage2_flush_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t end = addr + PAGE_SIZE * memslot->npages; + phys_addr_t next; + pgd_t *pgd; + + pgd = kvm->arch.pgd + pgd_index(addr); + do { + next = kvm_pgd_addr_end(addr, end); + stage2_flush_puds(kvm, pgd, addr, next); + } while (pgd++, addr = next, addr != end); +} + +/** + * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 + * @kvm: The struct kvm pointer + * + * Go through the stage 2 page tables and invalidate any cache lines + * backing memory already mapped to the VM. + */ +void stage2_flush_vm(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) + stage2_flush_memslot(kvm, memslot); + + spin_unlock(&kvm->mmu_lock); + srcu_read_unlock(&kvm->srcu, idx); +} + /** * free_boot_hyp_pgd - free HYP boot page tables * @@ -199,14 +294,14 @@ if (boot_hyp_pgd) { unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); - kfree(boot_hyp_pgd); + free_pages((unsigned long)boot_hyp_pgd, pgd_order); boot_hyp_pgd = NULL; } if (hyp_pgd) unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); - kfree(init_bounce_page); + free_page((unsigned long)init_bounce_page); init_bounce_page = NULL; mutex_unlock(&kvm_hyp_pgd_mutex); @@ -236,7 +331,7 @@ for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); - kfree(hyp_pgd); + free_pages((unsigned long)hyp_pgd, pgd_order); hyp_pgd = NULL; } @@ -713,7 +808,7 @@ kvm_set_s2pmd_writable(&new_pmd); kvm_set_pfn_dirty(pfn); } - coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE); + coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); } else { pte_t new_pte = pfn_pte(pfn, PAGE_S2); @@ -721,7 +816,7 @@ kvm_set_s2pte_writable(&new_pte); kvm_set_pfn_dirty(pfn); } - coherent_icache_guest_page(kvm, hva, PAGE_SIZE); + coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); } @@ -928,7 +1023,7 @@ size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; phys_addr_t phys_base; - init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL); + init_bounce_page = (void *)__get_free_page(GFP_KERNEL); if (!init_bounce_page) { kvm_err("Couldn't allocate HYP init bounce page\n"); err = -ENOMEM; @@ -954,8 +1049,9 @@ (unsigned long)phys_base); } - hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); - boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); + hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); + boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order); + if (!hyp_pgd || !boot_hyp_pgd) { kvm_err("Hyp mode PGD not allocated\n"); err = -ENOMEM; --- linux-3.13.0.orig/arch/arm/kvm/psci.c +++ linux-3.13.0/arch/arm/kvm/psci.c @@ -27,6 +27,36 @@ * as described in ARM document number ARM DEN 0022A. */ +#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) + +static unsigned long psci_affinity_mask(unsigned long affinity_level) +{ + if (affinity_level <= 3) + return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level); + + return 0; +} + +static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) +{ + /* + * NOTE: For simplicity, we make VCPU suspend emulation to be + * same-as WFI (Wait-for-interrupt) emulation. + * + * This means for KVM the wakeup events are interrupts and + * this is consistent with intended use of StateID as described + * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A). + * + * Further, we also treat power-down request to be same as + * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2 + * specification (ARM DEN 0022A). This means all suspend states + * for KVM will preserve the register state. + */ + kvm_vcpu_block(vcpu); + + return PSCI_RET_SUCCESS; +} + static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) { vcpu->arch.pause = true; @@ -38,6 +68,7 @@ struct kvm_vcpu *vcpu = NULL, *tmp; wait_queue_head_t *wq; unsigned long cpu_id; + unsigned long context_id; unsigned long mpidr; phys_addr_t target_pc; int i; @@ -54,14 +85,21 @@ } } + /* + * Make sure the caller requested a valid CPU and that the CPU is + * turned off. + */ if (!vcpu) - return KVM_PSCI_RET_INVAL; + return PSCI_RET_INVALID_PARAMS; + if (!vcpu->arch.pause) { + if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) + return PSCI_RET_ALREADY_ON; + else + return PSCI_RET_INVALID_PARAMS; + } target_pc = *vcpu_reg(source_vcpu, 2); - - wq = kvm_arch_vcpu_wq(vcpu); - if (!waitqueue_active(wq)) - return KVM_PSCI_RET_INVAL; + context_id = *vcpu_reg(source_vcpu, 3); kvm_reset_vcpu(vcpu); @@ -76,25 +114,160 @@ kvm_vcpu_set_be(vcpu); *vcpu_pc(vcpu) = target_pc; + /* + * NOTE: We always update r0 (or x0) because for PSCI v0.1 + * the general puspose registers are undefined upon CPU_ON. + */ + *vcpu_reg(vcpu, 0) = context_id; vcpu->arch.pause = false; smp_mb(); /* Make sure the above is visible */ + wq = kvm_arch_vcpu_wq(vcpu); wake_up_interruptible(wq); - return KVM_PSCI_RET_SUCCESS; + return PSCI_RET_SUCCESS; } -/** - * kvm_psci_call - handle PSCI call if r0 value is in range - * @vcpu: Pointer to the VCPU struct - * - * Handle PSCI calls from guests through traps from HVC instructions. - * The calling convention is similar to SMC calls to the secure world where - * the function number is placed in r0 and this function returns true if the - * function number specified in r0 is withing the PSCI range, and false - * otherwise. - */ -bool kvm_psci_call(struct kvm_vcpu *vcpu) +static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) +{ + int i; + unsigned long mpidr; + unsigned long target_affinity; + unsigned long target_affinity_mask; + unsigned long lowest_affinity_level; + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tmp; + + target_affinity = *vcpu_reg(vcpu, 1); + lowest_affinity_level = *vcpu_reg(vcpu, 2); + + /* Determine target affinity mask */ + target_affinity_mask = psci_affinity_mask(lowest_affinity_level); + if (!target_affinity_mask) + return PSCI_RET_INVALID_PARAMS; + + /* Ignore other bits of target affinity */ + target_affinity &= target_affinity_mask; + + /* + * If one or more VCPU matching target affinity are running + * then ON else OFF + */ + kvm_for_each_vcpu(i, tmp, kvm) { + mpidr = kvm_vcpu_get_mpidr(tmp); + if (((mpidr & target_affinity_mask) == target_affinity) && + !tmp->arch.pause) { + return PSCI_0_2_AFFINITY_LEVEL_ON; + } + } + + return PSCI_0_2_AFFINITY_LEVEL_OFF; +} + +static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) +{ + memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); + vcpu->run->system_event.type = type; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; +} + +static void kvm_psci_system_off(struct kvm_vcpu *vcpu) +{ + kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN); +} + +static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) +{ + kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); +} + +int kvm_psci_version(struct kvm_vcpu *vcpu) +{ + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) + return KVM_ARM_PSCI_0_2; + + return KVM_ARM_PSCI_0_1; +} + +static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) +{ + int ret = 1; + unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); + unsigned long val; + + switch (psci_fn) { + case PSCI_0_2_FN_PSCI_VERSION: + /* + * Bits[31:16] = Major Version = 0 + * Bits[15:0] = Minor Version = 2 + */ + val = 2; + break; + case PSCI_0_2_FN_CPU_SUSPEND: + case PSCI_0_2_FN64_CPU_SUSPEND: + val = kvm_psci_vcpu_suspend(vcpu); + break; + case PSCI_0_2_FN_CPU_OFF: + kvm_psci_vcpu_off(vcpu); + val = PSCI_RET_SUCCESS; + break; + case PSCI_0_2_FN_CPU_ON: + case PSCI_0_2_FN64_CPU_ON: + val = kvm_psci_vcpu_on(vcpu); + break; + case PSCI_0_2_FN_AFFINITY_INFO: + case PSCI_0_2_FN64_AFFINITY_INFO: + val = kvm_psci_vcpu_affinity_info(vcpu); + break; + case PSCI_0_2_FN_MIGRATE: + case PSCI_0_2_FN64_MIGRATE: + val = PSCI_RET_NOT_SUPPORTED; + break; + case PSCI_0_2_FN_MIGRATE_INFO_TYPE: + /* + * Trusted OS is MP hence does not require migration + * or + * Trusted OS is not present + */ + val = PSCI_0_2_TOS_MP; + break; + case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU: + case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU: + val = PSCI_RET_NOT_SUPPORTED; + break; + case PSCI_0_2_FN_SYSTEM_OFF: + kvm_psci_system_off(vcpu); + /* + * We should'nt be going back to guest VCPU after + * receiving SYSTEM_OFF request. + * + * If user space accidently/deliberately resumes + * guest VCPU after SYSTEM_OFF request then guest + * VCPU should see internal failure from PSCI return + * value. To achieve this, we preload r0 (or x0) with + * PSCI return value INTERNAL_FAILURE. + */ + val = PSCI_RET_INTERNAL_FAILURE; + ret = 0; + break; + case PSCI_0_2_FN_SYSTEM_RESET: + kvm_psci_system_reset(vcpu); + /* + * Same reason as SYSTEM_OFF for preloading r0 (or x0) + * with PSCI return value INTERNAL_FAILURE. + */ + val = PSCI_RET_INTERNAL_FAILURE; + ret = 0; + break; + default: + return -EINVAL; + } + + *vcpu_reg(vcpu, 0) = val; + return ret; +} + +static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); unsigned long val; @@ -102,20 +275,45 @@ switch (psci_fn) { case KVM_PSCI_FN_CPU_OFF: kvm_psci_vcpu_off(vcpu); - val = KVM_PSCI_RET_SUCCESS; + val = PSCI_RET_SUCCESS; break; case KVM_PSCI_FN_CPU_ON: val = kvm_psci_vcpu_on(vcpu); break; case KVM_PSCI_FN_CPU_SUSPEND: case KVM_PSCI_FN_MIGRATE: - val = KVM_PSCI_RET_NI; + val = PSCI_RET_NOT_SUPPORTED; break; - default: - return false; + return -EINVAL; } *vcpu_reg(vcpu, 0) = val; - return true; + return 1; +} + +/** + * kvm_psci_call - handle PSCI call if r0 value is in range + * @vcpu: Pointer to the VCPU struct + * + * Handle PSCI calls from guests through traps from HVC instructions. + * The calling convention is similar to SMC calls to the secure world + * where the function number is placed in r0. + * + * This function returns: > 0 (success), 0 (success but exit to user + * space), and < 0 (errors) + * + * Errors: + * -EINVAL: Unrecognized PSCI function + */ +int kvm_psci_call(struct kvm_vcpu *vcpu) +{ + switch (kvm_psci_version(vcpu)) { + case KVM_ARM_PSCI_0_2: + return kvm_psci_0_2_call(vcpu); + case KVM_ARM_PSCI_0_1: + return kvm_psci_0_1_call(vcpu); + default: + return -EINVAL; + }; } --- linux-3.13.0.orig/arch/arm/mach-at91/clock.c +++ linux-3.13.0/arch/arm/mach-at91/clock.c @@ -963,6 +963,7 @@ } at91_pmc_write(AT91_PMC_SCDR, scdr); + at91_pmc_write(AT91_PMC_PCDR, pcdr); if (cpu_is_sama5d3()) at91_pmc_write(AT91_PMC_PCDR1, pcdr1); --- linux-3.13.0.orig/arch/arm/mach-at91/pm.h +++ linux-3.13.0/arch/arm/mach-at91/pm.h @@ -45,7 +45,7 @@ " mcr p15, 0, %0, c7, c0, 4\n\t" " str %5, [%1, %2]" : - : "r" (0), "r" (AT91_BASE_SYS), "r" (AT91RM9200_SDRAMC_LPR), + : "r" (0), "r" (at91_ramc_base[0]), "r" (AT91RM9200_SDRAMC_LPR), "r" (1), "r" (AT91RM9200_SDRAMC_SRR), "r" (lpr)); } --- linux-3.13.0.orig/arch/arm/mach-at91/sam9_smc.c +++ linux-3.13.0/arch/arm/mach-at91/sam9_smc.c @@ -101,7 +101,7 @@ /* Pulse register */ val = __raw_readl(base + AT91_SMC_PULSE); - config->nwe_setup = val & AT91_SMC_NWEPULSE; + config->nwe_pulse = val & AT91_SMC_NWEPULSE; config->ncs_write_pulse = (val & AT91_SMC_NCS_WRPULSE) >> 8; config->nrd_pulse = (val & AT91_SMC_NRDPULSE) >> 16; config->ncs_read_pulse = (val & AT91_SMC_NCS_RDPULSE) >> 24; --- linux-3.13.0.orig/arch/arm/mach-highbank/Makefile +++ linux-3.13.0/arch/arm/mach-highbank/Makefile @@ -1,3 +1,5 @@ +KBUILD_CFLAGS += -I$(srctree)/arch/arm/mach-highbank/include + obj-y := highbank.o system.o smc.o plus_sec := $(call as-instr,.arch_extension sec,+sec) --- linux-3.13.0.orig/arch/arm/mach-imx/Makefile +++ linux-3.13.0/arch/arm/mach-imx/Makefile @@ -100,11 +100,9 @@ obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o -ifeq ($(CONFIG_PM),y) obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o # i.MX6SL reuses i.MX6Q code obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o -endif # i.MX5 based machines obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o --- linux-3.13.0.orig/arch/arm/mach-imx/clk-imx6q.c +++ linux-3.13.0/arch/arm/mach-imx/clk-imx6q.c @@ -161,7 +161,7 @@ post_div_table[1].div = 1; post_div_table[2].div = 1; video_div_table[1].div = 1; - video_div_table[2].div = 1; + video_div_table[3].div = 1; }; /* type name parent_name base div_mask */ @@ -479,6 +479,9 @@ if (IS_ENABLED(CONFIG_PCI_IMX6)) clk_set_parent(clk[lvds1_sel], clk[sata_ref]); + /* Set initial power mode */ + imx6q_set_lpm(WAIT_CLOCKED); + np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); base = of_iomap(np, 0); WARN_ON(!base); --- linux-3.13.0.orig/arch/arm/mach-imx/clk-imx6sl.c +++ linux-3.13.0/arch/arm/mach-imx/clk-imx6sl.c @@ -261,6 +261,9 @@ clk_prepare_enable(clks[IMX6SL_CLK_USBPHY2_GATE]); } + /* Set initial power mode */ + imx6q_set_lpm(WAIT_CLOCKED); + np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt"); base = of_iomap(np, 0); WARN_ON(!base); --- linux-3.13.0.orig/arch/arm/mach-imx/common.h +++ linux-3.13.0/arch/arm/mach-imx/common.h @@ -143,13 +143,11 @@ void imx_cpu_die(unsigned int cpu); int imx_cpu_kill(unsigned int cpu); -#ifdef CONFIG_PM void imx6q_pm_init(void); void imx6q_pm_set_ccm_base(void __iomem *base); +#ifdef CONFIG_PM void imx5_pm_init(void); #else -static inline void imx6q_pm_init(void) {} -static inline void imx6q_pm_set_ccm_base(void __iomem *base) {} static inline void imx5_pm_init(void) {} #endif --- linux-3.13.0.orig/arch/arm/mach-imx/devices/platform-ipu-core.c +++ linux-3.13.0/arch/arm/mach-imx/devices/platform-ipu-core.c @@ -77,7 +77,7 @@ pdev = platform_device_alloc("mx3-camera", 0); if (!pdev) - goto err; + return ERR_PTR(-ENOMEM); pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); if (!pdev->dev.dma_mask) --- linux-3.13.0.orig/arch/arm/mach-imx/pm-imx6q.c +++ linux-3.13.0/arch/arm/mach-imx/pm-imx6q.c @@ -228,8 +228,6 @@ regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT, IMX6Q_GPR1_GINT); - /* Set initial power mode */ - imx6q_set_lpm(WAIT_CLOCKED); suspend_set_ops(&imx6q_pm_ops); } --- linux-3.13.0.orig/arch/arm/mach-mvebu/Makefile +++ linux-3.13.0/arch/arm/mach-mvebu/Makefile @@ -3,7 +3,7 @@ AFLAGS_coherency_ll.o := -Wa,-march=armv7-a -obj-y += system-controller.o +obj-y += system-controller.o mvebu-soc-id.o obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o obj-$(CONFIG_ARCH_MVEBU) += coherency.o coherency_ll.o pmsu.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o --- linux-3.13.0.orig/arch/arm/mach-mvebu/armada-370-xp.c +++ linux-3.13.0/arch/arm/mach-mvebu/armada-370-xp.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include "armada-370-xp.h" #include "common.h" #include "coherency.h" +#include "mvebu-soc-id.h" static void __init armada_370_xp_map_io(void) { @@ -45,8 +47,38 @@ #endif } +static void __init i2c_quirk(void) +{ + struct device_node *np; + u32 dev, rev; + + /* + * Only revisons more recent than A0 support the offload + * mechanism. We can exit only if we are sure that we can + * get the SoC revision and it is more recent than A0. + */ + if (mvebu_get_soc_id(&rev, &dev) == 0 && dev > MV78XX0_A0_REV) + return; + + for_each_compatible_node(np, NULL, "marvell,mv78230-i2c") { + struct property *new_compat; + + new_compat = kzalloc(sizeof(*new_compat), GFP_KERNEL); + + new_compat->name = kstrdup("compatible", GFP_KERNEL); + new_compat->length = sizeof("marvell,mv78230-a0-i2c"); + new_compat->value = kstrdup("marvell,mv78230-a0-i2c", + GFP_KERNEL); + + of_update_property(np, new_compat); + } + return; +} + static void __init armada_370_xp_dt_init(void) { + if (of_machine_is_compatible("plathome,openblocks-ax3-4")) + i2c_quirk(); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } --- linux-3.13.0.orig/arch/arm/mach-mvebu/mvebu-soc-id.c +++ linux-3.13.0/arch/arm/mach-mvebu/mvebu-soc-id.c @@ -0,0 +1,119 @@ +/* + * ID and revision information for mvebu SoCs + * + * Copyright (C) 2014 Marvell + * + * Gregory CLEMENT + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * All the mvebu SoCs have information related to their variant and + * revision that can be read from the PCI control register. This is + * done before the PCI initialization to avoid any conflict. Once the + * ID and revision are retrieved, the mapping is freed. + */ + +#define pr_fmt(fmt) "mvebu-soc-id: " fmt + +#include +#include +#include +#include +#include +#include +#include "mvebu-soc-id.h" + +#define PCIE_DEV_ID_OFF 0x0 +#define PCIE_DEV_REV_OFF 0x8 + +#define SOC_ID_MASK 0xFFFF0000 +#define SOC_REV_MASK 0xFF + +static u32 soc_dev_id; +static u32 soc_rev; +static bool is_id_valid; + +static const struct of_device_id mvebu_pcie_of_match_table[] = { + { .compatible = "marvell,armada-xp-pcie", }, + { .compatible = "marvell,armada-370-pcie", }, + {}, +}; + +int mvebu_get_soc_id(u32 *dev, u32 *rev) +{ + if (is_id_valid) { + *dev = soc_dev_id; + *rev = soc_rev; + return 0; + } else + return -1; +} + +static int __init mvebu_soc_id_init(void) +{ + struct device_node *np; + int ret = 0; + void __iomem *pci_base; + struct clk *clk; + struct device_node *child; + + np = of_find_matching_node(NULL, mvebu_pcie_of_match_table); + if (!np) + return ret; + + /* + * ID and revision are available from any port, so we + * just pick the first one + */ + child = of_get_next_child(np, NULL); + if (child == NULL) { + pr_err("cannot get pci node\n"); + ret = -ENOMEM; + goto clk_err; + } + + clk = of_clk_get_by_name(child, NULL); + if (IS_ERR(clk)) { + pr_err("cannot get clock\n"); + ret = -ENOMEM; + goto clk_err; + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("cannot enable clock\n"); + goto clk_err; + } + + pci_base = of_iomap(child, 0); + if (pci_base == NULL) { + pr_err("cannot map registers\n"); + ret = -ENOMEM; + goto res_ioremap; + } + + /* SoC ID */ + soc_dev_id = readl(pci_base + PCIE_DEV_ID_OFF) >> 16; + + /* SoC revision */ + soc_rev = readl(pci_base + PCIE_DEV_REV_OFF) & SOC_REV_MASK; + + is_id_valid = true; + + pr_info("MVEBU SoC ID=0x%X, Rev=0x%X\n", soc_dev_id, soc_rev); + + iounmap(pci_base); + +res_ioremap: + clk_disable_unprepare(clk); + +clk_err: + of_node_put(child); + of_node_put(np); + + return ret; +} +core_initcall(mvebu_soc_id_init); + --- linux-3.13.0.orig/arch/arm/mach-mvebu/mvebu-soc-id.h +++ linux-3.13.0/arch/arm/mach-mvebu/mvebu-soc-id.h @@ -0,0 +1,32 @@ +/* + * Marvell EBU SoC ID and revision definitions. + * + * Copyright (C) 2014 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_MVEBU_SOC_ID_H +#define __LINUX_MVEBU_SOC_ID_H + +/* Armada XP ID */ +#define MV78230_DEV_ID 0x7823 +#define MV78260_DEV_ID 0x7826 +#define MV78460_DEV_ID 0x7846 + +/* Armada XP Revision */ +#define MV78XX0_A0_REV 0x1 +#define MV78XX0_B0_REV 0x2 + +#ifdef CONFIG_ARCH_MVEBU +int mvebu_get_soc_id(u32 *dev, u32 *rev); +#else +static inline int mvebu_get_soc_id(u32 *dev, u32 *rev) +{ + return -1; +} +#endif + +#endif /* __LINUX_MVEBU_SOC_ID_H */ --- linux-3.13.0.orig/arch/arm/mach-omap2/board-flash.c +++ linux-3.13.0/arch/arm/mach-omap2/board-flash.c @@ -142,7 +142,7 @@ board_nand_data.nr_parts = nr_parts; board_nand_data.devsize = nand_type; - board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW; + board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW; gpmc_nand_init(&board_nand_data, gpmc_t); } #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */ --- linux-3.13.0.orig/arch/arm/mach-omap2/cclock3xxx_data.c +++ linux-3.13.0/arch/arm/mach-omap2/cclock3xxx_data.c @@ -454,7 +454,8 @@ .clkdm_name = "dpll4_clkdm", }; -DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops); +DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, + dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); static struct clk dpll4_m5x2_ck_3630 = { .name = "dpll4_m5x2_ck", --- linux-3.13.0.orig/arch/arm/mach-omap2/common.h +++ linux-3.13.0/arch/arm/mach-omap2/common.h @@ -236,6 +236,7 @@ extern void __init gic_init_irq(void); extern void gic_dist_disable(void); +extern void gic_dist_enable(void); extern bool gic_dist_disabled(void); extern void gic_timer_retrigger(void); extern void omap_smc1(u32 fn, u32 arg); --- linux-3.13.0.orig/arch/arm/mach-omap2/control.c +++ linux-3.13.0/arch/arm/mach-omap2/control.c @@ -314,7 +314,8 @@ scratchpad_contents.public_restore_ptr = virt_to_phys(omap3_restore_3630); else if (omap_rev() != OMAP3430_REV_ES3_0 && - omap_rev() != OMAP3430_REV_ES3_1) + omap_rev() != OMAP3430_REV_ES3_1 && + omap_rev() != OMAP3430_REV_ES3_1_2) scratchpad_contents.public_restore_ptr = virt_to_phys(omap3_restore); else --- linux-3.13.0.orig/arch/arm/mach-omap2/cpuidle44xx.c +++ linux-3.13.0/arch/arm/mach-omap2/cpuidle44xx.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -80,6 +81,8 @@ int index) { struct idle_statedata *cx = state_ptr + index; + int cpu_id = smp_processor_id(); + u32 mpuss_can_lose_context = 0; /* * CPU0 has to wait and stay ON until CPU1 is OFF state. @@ -104,6 +107,11 @@ } } + mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && + (cx->mpu_logic_state == PWRDM_POWER_OFF); + + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); + /* * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. @@ -118,9 +126,8 @@ * Call idle CPU cluster PM enter notifier chain * to save GIC and wakeupgen context. */ - if ((cx->mpu_state == PWRDM_POWER_RET) && - (cx->mpu_logic_state == PWRDM_POWER_OFF)) - cpu_cluster_pm_enter(); + if (mpuss_can_lose_context) + cpu_cluster_pm_enter(); } omap4_enter_lowpower(dev->cpu, cx->cpu_state); @@ -128,9 +135,23 @@ /* Wakeup CPU1 only if it is not offlined */ if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { + + if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && + mpuss_can_lose_context) + gic_dist_disable(); + clkdm_wakeup(cpu_clkdm[1]); omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON); clkdm_allow_idle(cpu_clkdm[1]); + + if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && + mpuss_can_lose_context) { + while (gic_dist_disabled()) { + udelay(1); + cpu_relax(); + } + gic_timer_retrigger(); + } } /* @@ -143,10 +164,11 @@ * Call idle CPU cluster PM exit notifier chain * to restore GIC and wakeupgen context. */ - if (dev->cpu == 0 && (cx->mpu_state == PWRDM_POWER_RET) && - (cx->mpu_logic_state == PWRDM_POWER_OFF)) + if (dev->cpu == 0 && mpuss_can_lose_context) cpu_cluster_pm_exit(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); + fail: cpuidle_coupled_parallel_barrier(dev, &abort_barrier); cpu_done[dev->cpu] = false; @@ -154,6 +176,16 @@ return index; } +/* + * For each cpu, setup the broadcast timer because local timers + * stops for the states above C1. + */ +static void omap_setup_broadcast_timer(void *arg) +{ + int cpu = smp_processor_id(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); +} + static struct cpuidle_driver omap4_idle_driver = { .name = "omap4_idle", .owner = THIS_MODULE, @@ -171,8 +203,7 @@ /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ .exit_latency = 328 + 440, .target_residency = 960, - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | - CPUIDLE_FLAG_TIMER_STOP, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, .enter = omap_enter_idle_coupled, .name = "C2", .desc = "CPUx OFF, MPUSS CSWR", @@ -181,8 +212,7 @@ /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ .exit_latency = 460 + 518, .target_residency = 1100, - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | - CPUIDLE_FLAG_TIMER_STOP, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, .enter = omap_enter_idle_coupled, .name = "C3", .desc = "CPUx OFF, MPUSS OSWR", @@ -213,5 +243,8 @@ if (!cpu_clkdm[0] || !cpu_clkdm[1]) return -ENODEV; + /* Configure the broadcast timer on each cpu */ + on_each_cpu(omap_setup_broadcast_timer, NULL, 1); + return cpuidle_register(&omap4_idle_driver, cpu_online_mask); } --- linux-3.13.0.orig/arch/arm/mach-omap2/gpmc.c +++ linux-3.13.0/arch/arm/mach-omap2/gpmc.c @@ -1339,7 +1339,7 @@ of_property_read_bool(np, "gpmc,time-para-granularity"); } -#ifdef CONFIG_MTD_NAND +#if IS_ENABLED(CONFIG_MTD_NAND) static const char * const nand_xfer_types[] = { [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", @@ -1429,7 +1429,7 @@ } #endif -#ifdef CONFIG_MTD_ONENAND +#if IS_ENABLED(CONFIG_MTD_ONENAND) static int gpmc_probe_onenand_child(struct platform_device *pdev, struct device_node *child) { --- linux-3.13.0.orig/arch/arm/mach-omap2/irq.c +++ linux-3.13.0/arch/arm/mach-omap2/irq.c @@ -222,6 +222,7 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs *regs) { u32 irqnr; + int handled_irq = 0; do { irqnr = readl_relaxed(base_addr + 0x98); @@ -249,8 +250,15 @@ if (irqnr) { irqnr = irq_find_mapping(domain, irqnr); handle_IRQ(irqnr, regs); + handled_irq = 1; } } while (irqnr); + + /* If an irq is masked or deasserted while active, we will + * keep ending up here with no irq handled. So remove it from + * the INTC with an ack.*/ + if (!handled_irq) + omap_ack_irq(NULL); } asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs) --- linux-3.13.0.orig/arch/arm/mach-omap2/mux.c +++ linux-3.13.0/arch/arm/mach-omap2/mux.c @@ -183,8 +183,10 @@ m0_entry = mux->muxnames[0]; /* First check for full name in mode0.muxmode format */ - if (mode0_len && strncmp(muxname, m0_entry, mode0_len)) - continue; + if (mode0_len) + if (strncmp(muxname, m0_entry, mode0_len) || + (strlen(m0_entry) != mode0_len)) + continue; /* Then check for muxmode only */ for (i = 0; i < OMAP_MUX_NR_MODES; i++) { --- linux-3.13.0.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ linux-3.13.0/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -271,6 +271,9 @@ else omap_pm_ops.finish_suspend(save_state); + if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu) + gic_dist_enable(); + /* * Restore the CPUx power state to ON otherwise CPUx * power domain can transitions to programmed low power --- linux-3.13.0.orig/arch/arm/mach-omap2/omap4-common.c +++ linux-3.13.0/arch/arm/mach-omap2/omap4-common.c @@ -127,6 +127,12 @@ __raw_writel(0x0, gic_dist_base_addr + GIC_DIST_CTRL); } +void gic_dist_enable(void) +{ + if (gic_dist_base_addr) + __raw_writel(0x1, gic_dist_base_addr + GIC_DIST_CTRL); +} + bool gic_dist_disabled(void) { return !(__raw_readl(gic_dist_base_addr + GIC_DIST_CTRL) & 0x1); --- linux-3.13.0.orig/arch/arm/mach-omap2/omap_hwmod.h +++ linux-3.13.0/arch/arm/mach-omap2/omap_hwmod.h @@ -41,6 +41,7 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type1; extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2; extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3; +extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type4; /* * OCP SYSCONFIG bit shifts/masks TYPE1. These are for IPs compliant @@ -81,6 +82,16 @@ #define SYSC_TYPE3_MIDLEMODE_SHIFT 2 #define SYSC_TYPE3_MIDLEMODE_MASK (0x3 << SYSC_TYPE3_MIDLEMODE_SHIFT) +/* + * OCP SYSCONFIG bit shifts/masks TYPE4. + */ +#define SYSC_TYPE4_SIDLEMODE_SHIFT 2 +#define SYSC_TYPE4_SIDLEMODE_MASK (0x3 << SYSC_TYPE4_SIDLEMODE_SHIFT) +#define SYSC_TYPE4_SOFTRESET_SHIFT 1 +#define SYSC_TYPE4_SOFTRESET_MASK (1 << SYSC_TYPE4_SOFTRESET_SHIFT) +#define SYSC_TYPE4_AUTOIDLE_SHIFT 0 +#define SYSC_TYPE4_AUTOIDLE_MASK (1 << SYSC_TYPE4_AUTOIDLE_SHIFT) + /* OCP SYSSTATUS bit shifts/masks */ #define SYSS_RESETDONE_SHIFT 0 #define SYSS_RESETDONE_MASK (1 << SYSS_RESETDONE_SHIFT) --- linux-3.13.0.orig/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c +++ linux-3.13.0/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c @@ -209,6 +209,7 @@ .sysc_offs = 0x84, .syss_offs = 0x88, .sysc_flags = SYSS_HAS_RESET_STATUS, + .sysc_fields = &omap_hwmod_sysc_type4, }; static struct omap_hwmod_class am33xx_aes0_hwmod_class = { --- linux-3.13.0.orig/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ linux-3.13.0/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -4783,6 +4783,59 @@ .user = OCP_USER_MPU | OCP_USER_SDMA, }; +/* + Crypto modules AES0/1 belong to: + PD_L4_PER power domain + CD_L4_SEC clock domain + On the L3, the AES modules are mapped to + L3_CLK2: Peripherals and multimedia sub clock domain +*/ + +static struct omap_hwmod_class_sysconfig omap4_aes1_sysc = { + .rev_offs = 0x80, + .sysc_offs = 0x84, + .syss_offs = 0x88, + .sysc_flags = SYSS_HAS_RESET_STATUS, + .sysc_fields = &omap_hwmod_sysc_type4, +}; + +static struct omap_hwmod_class omap4_aes1_hwmod_class = { + .name = "aes1", + .sysc = &omap4_aes1_sysc, +}; + +static struct omap_hwmod omap4_aes1_hwmod = { + .name = "aes", + .class = &omap4_aes1_hwmod_class, + .clkdm_name = "l4_secure_clkdm", + .main_clk = "aes1_fck", + .prcm = { + .omap4 = { + .clkctrl_offs = OMAP4_CM_L4SEC_AES1_CLKCTRL_OFFSET, + .context_offs = OMAP4_RM_L4SEC_AES1_CONTEXT_OFFSET, + .modulemode = MODULEMODE_SWCTRL, + }, + }, +}; + +/* l3_main_2 -> aes1 */ +static struct omap_hwmod_addr_space omap4_aes1_addrs[] = { + { + .pa_start = 0x4B500000, + .pa_end = 0x4B500000 + SZ_1M - 1, + .flags = ADDR_TYPE_RT + }, + { } +}; + +static struct omap_hwmod_ocp_if omap4_l3_main_2__aes1 = { + .master = &omap44xx_l3_main_2_hwmod, + .slave = &omap4_aes1_hwmod, + .clk = "aes1_fck", + .addr = omap4_aes1_addrs, + .user = OCP_USER_MPU | OCP_USER_SDMA, +}; + static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l3_main_1__dmm, &omap44xx_mpu__dmm, @@ -4937,6 +4990,7 @@ &omap44xx_l4_abe__wd_timer3_dma, &omap44xx_mpu__emif1, &omap44xx_mpu__emif2, + &omap4_l3_main_2__aes1, NULL, }; --- linux-3.13.0.orig/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ linux-3.13.0/arch/arm/mach-omap2/omap_hwmod_54xx_data.c @@ -895,7 +895,7 @@ * current exception. */ - .flags = HWMOD_EXT_OPT_MAIN_CLK, + .flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE, .main_clk = "pad_clks_ck", .prcm = { .omap4 = { --- linux-3.13.0.orig/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ linux-3.13.0/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1669,7 +1669,7 @@ .class = &dra7xx_uart_hwmod_class, .clkdm_name = "l4per_clkdm", .main_clk = "uart3_gfclk_mux", - .flags = HWMOD_SWSUP_SIDLE_ACT, + .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS, .prcm = { .omap4 = { .clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET, --- linux-3.13.0.orig/arch/arm/mach-omap2/omap_hwmod_common_data.c +++ linux-3.13.0/arch/arm/mach-omap2/omap_hwmod_common_data.c @@ -59,6 +59,16 @@ .sidle_shift = SYSC_TYPE3_SIDLEMODE_SHIFT, }; +/** + * struct omap_hwmod_sysc_type4 - TYPE4 sysconfig scheme. + * Used by some IPs on AM33xx + */ +struct omap_hwmod_sysc_fields omap_hwmod_sysc_type4 = { + .sidle_shift = SYSC_TYPE4_SIDLEMODE_SHIFT, + .srst_shift = SYSC_TYPE4_SOFTRESET_SHIFT, + .autoidle_shift = SYSC_TYPE4_AUTOIDLE_SHIFT, +}; + struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = { .manager_count = 2, .has_framedonetv_irq = 0 --- linux-3.13.0.orig/arch/arm/mach-omap2/timer.c +++ linux-3.13.0/arch/arm/mach-omap2/timer.c @@ -513,11 +513,11 @@ rate = clk_get_rate(sys_clk); /* Numerator/denumerator values refer TRM Realtime Counter section */ switch (rate) { - case 1200000: + case 12000000: num = 64; den = 125; break; - case 1300000: + case 13000000: num = 768; den = 1625; break; @@ -529,11 +529,11 @@ num = 192; den = 625; break; - case 2600000: + case 26000000: num = 384; den = 1625; break; - case 2700000: + case 27000000: num = 256; den = 1125; break; --- linux-3.13.0.orig/arch/arm/mach-orion5x/common.h +++ linux-3.13.0/arch/arm/mach-orion5x/common.h @@ -21,7 +21,7 @@ #define ORION_MBUS_DEVBUS_BOOT_ATTR 0x0f #define ORION_MBUS_DEVBUS_TARGET(cs) 0x01 #define ORION_MBUS_DEVBUS_ATTR(cs) (~(1 << cs)) -#define ORION_MBUS_SRAM_TARGET 0x00 +#define ORION_MBUS_SRAM_TARGET 0x09 #define ORION_MBUS_SRAM_ATTR 0x00 /* --- linux-3.13.0.orig/arch/arm/mach-pxa/am300epd.c +++ linux-3.13.0/arch/arm/mach-pxa/am300epd.c @@ -30,6 +30,7 @@ #include #include +#include #include #include "generic.h" --- linux-3.13.0.orig/arch/arm/mach-pxa/corgi.c +++ linux-3.13.0/arch/arm/mach-pxa/corgi.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -711,6 +712,8 @@ sharpsl_nand_partitions[1].size = 53 * 1024 * 1024; platform_add_devices(devices, ARRAY_SIZE(devices)); + + regulator_has_full_constraints(); } static void __init fixup_corgi(struct tag *tags, char **cmdline, --- linux-3.13.0.orig/arch/arm/mach-pxa/hx4700.c +++ linux-3.13.0/arch/arm/mach-pxa/hx4700.c @@ -892,6 +892,8 @@ mdelay(10); gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1); mdelay(10); + + regulator_has_full_constraints(); } MACHINE_START(H4700, "HP iPAQ HX4700") --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/addr-map.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/addr-map.h @@ -39,6 +39,11 @@ #define DMEMC_SIZE 0x00100000 /* + * Reserved space for low level debug virtual addresses within + * 0xf6200000..0xf6201000 + */ + +/* * Internal Memory Controller (PXA27x and later) */ #define IMEMC_PHYS 0x58000000 --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/balloon3.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/balloon3.h @@ -14,6 +14,8 @@ #ifndef ASM_ARCH_BALLOON3_H #define ASM_ARCH_BALLOON3_H +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ + enum balloon3_features { BALLOON3_FEATURE_OHCI, BALLOON3_FEATURE_MMC, --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/corgi.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/corgi.h @@ -13,6 +13,7 @@ #ifndef __ASM_ARCH_CORGI_H #define __ASM_ARCH_CORGI_H 1 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ /* * Corgi (Non Standard) GPIO Definitions --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/csb726.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/csb726.h @@ -11,6 +11,8 @@ #ifndef CSB726_H #define CSB726_H +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + #define CSB726_GPIO_IRQ_LAN 52 #define CSB726_GPIO_IRQ_SM501 53 #define CSB726_GPIO_MMC_DETECT 100 --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/gumstix.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/gumstix.h @@ -6,6 +6,7 @@ * published by the Free Software Foundation. */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ /* BTRESET - Reset line to Bluetooth module, active low signal. */ #define GPIO_GUMSTIX_BTRESET 7 --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/hx4700.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/hx4700.h @@ -14,6 +14,7 @@ #include #include +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ #define HX4700_ASIC3_GPIO_BASE PXA_NR_BUILTIN_GPIO #define HX4700_EGPIO_BASE (HX4700_ASIC3_GPIO_BASE + ASIC3_NUM_GPIOS) --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/idp.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/idp.h @@ -23,6 +23,7 @@ * IDP hardware. */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ #define IDP_FLASH_PHYS (PXA_CS0_PHYS) #define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS) --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/palmld.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/palmld.h @@ -13,6 +13,8 @@ #ifndef _INCLUDE_PALMLD_H_ #define _INCLUDE_PALMLD_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/palmt5.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/palmt5.h @@ -15,6 +15,8 @@ #ifndef _INCLUDE_PALMT5_H_ #define _INCLUDE_PALMT5_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/palmtc.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/palmtc.h @@ -16,6 +16,8 @@ #ifndef _INCLUDE_PALMTC_H_ #define _INCLUDE_PALMTC_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/palmtx.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/palmtx.h @@ -16,6 +16,8 @@ #ifndef _INCLUDE_PALMTX_H_ #define _INCLUDE_PALMTX_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /** HERE ARE GPIOs **/ /* GPIOs */ --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/pcm027.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/pcm027.h @@ -23,6 +23,8 @@ * Definitions of CPU card resources only */ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* phyCORE-PXA270 (PCM027) Interrupts */ #define PCM027_IRQ(x) (IRQ_BOARD_START + (x)) #define PCM027_BTDET_IRQ PCM027_IRQ(0) --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h @@ -20,6 +20,7 @@ */ #include +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ /* * definitions relevant only when the PCM-990 --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/poodle.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/poodle.h @@ -15,6 +15,8 @@ #ifndef __ASM_ARCH_POODLE_H #define __ASM_ARCH_POODLE_H 1 +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* * GPIOs */ --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/spitz.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/spitz.h @@ -15,8 +15,8 @@ #define __ASM_ARCH_SPITZ_H 1 #endif +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */ #include -#include /* Spitz/Akita GPIOs */ --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/tosa.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/tosa.h @@ -13,6 +13,8 @@ #ifndef _ASM_ARCH_TOSA_H_ #define _ASM_ARCH_TOSA_H_ 1 +#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */ + /* TOSA Chip selects */ #define TOSA_LCDC_PHYS PXA_CS4_PHYS /* Internel Scoop */ --- linux-3.13.0.orig/arch/arm/mach-pxa/include/mach/trizeps4.h +++ linux-3.13.0/arch/arm/mach-pxa/include/mach/trizeps4.h @@ -10,6 +10,8 @@ #ifndef _TRIPEPS4_H_ #define _TRIPEPS4_H_ +#include "irqs.h" /* PXA_GPIO_TO_IRQ */ + /* physical memory regions */ #define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */ #define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */ --- linux-3.13.0.orig/arch/arm/mach-pxa/poodle.c +++ linux-3.13.0/arch/arm/mach-pxa/poodle.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -454,6 +455,7 @@ pxa_set_i2c_info(NULL); i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices)); poodle_init_spi(); + regulator_has_full_constraints(); } static void __init fixup_poodle(struct tag *tags, char **cmdline, --- linux-3.13.0.orig/arch/arm/mach-pxa/spitz.c +++ linux-3.13.0/arch/arm/mach-pxa/spitz.c @@ -969,6 +969,8 @@ spitz_nor_init(); spitz_nand_init(); spitz_i2c_init(); + + regulator_has_full_constraints(); } static void __init spitz_fixup(struct tag *tags, char **cmdline, --- linux-3.13.0.orig/arch/arm/mach-sa1100/include/mach/collie.h +++ linux-3.13.0/arch/arm/mach-sa1100/include/mach/collie.h @@ -13,6 +13,8 @@ #ifndef __ASM_ARCH_COLLIE_H #define __ASM_ARCH_COLLIE_H +#include "hardware.h" /* Gives GPIO_MAX */ + extern void locomolcd_power(int on); #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1) --- linux-3.13.0.orig/arch/arm/mach-sa1100/pm.c +++ linux-3.13.0/arch/arm/mach-sa1100/pm.c @@ -81,6 +81,7 @@ /* * Ensure not to come back here if it wasn't intended */ + RCSR = RCSR_SMR; PSPR = 0; /* --- linux-3.13.0.orig/arch/arm/mach-tegra/reset-handler.S +++ linux-3.13.0/arch/arm/mach-tegra/reset-handler.S @@ -51,6 +51,7 @@ THUMB( it ne ) bne cpu_resume @ no + tegra_get_soc_id TEGRA_APB_MISC_BASE, r6 /* Are we on Tegra20? */ cmp r6, #TEGRA20 beq 1f @ Yes --- linux-3.13.0.orig/arch/arm/mach-tegra/tegra.c +++ linux-3.13.0/arch/arm/mach-tegra/tegra.c @@ -74,10 +74,20 @@ static void __init tegra_init_cache(void) { #ifdef CONFIG_CACHE_L2X0 + static const struct of_device_id pl310_ids[] __initconst = { + { .compatible = "arm,pl310-cache", }, + {} + }; + + struct device_node *np; int ret; void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; u32 aux_ctrl, cache_type; + np = of_find_matching_node(NULL, pl310_ids); + if (!np) + return; + cache_type = readl(p + L2X0_CACHE_TYPE); aux_ctrl = (cache_type & 0x700) << (17-8); aux_ctrl |= 0x7C400001; --- linux-3.13.0.orig/arch/arm/mm/Kconfig +++ linux-3.13.0/arch/arm/mm/Kconfig @@ -446,7 +446,6 @@ config CPU_32v6 bool - select CPU_USE_DOMAINS if CPU_V6 && MMU select TLS_REG_EMUL if !CPU_32v6K && !MMU config CPU_32v6K @@ -671,7 +670,7 @@ config SWP_EMULATE bool "Emulate SWP/SWPB instructions" - depends on !CPU_USE_DOMAINS && CPU_V7 + depends on CPU_V7 default y if SMP select HAVE_PROC_CPU if PROC_FS help @@ -799,6 +798,7 @@ config KUSER_HELPERS bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + depends on MMU default y help Warning: disabling this option may break user programs. --- linux-3.13.0.orig/arch/arm/mm/abort-ev6.S +++ linux-3.13.0/arch/arm/mm/abort-ev6.S @@ -17,12 +17,6 @@ */ .align 5 ENTRY(v6_early_abort) -#ifdef CONFIG_CPU_V6 - sub r1, sp, #4 @ Get unused stack location - strex r0, r1, [r1] @ Clear the exclusive monitor -#elif defined(CONFIG_CPU_32v6K) - clrex -#endif mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* --- linux-3.13.0.orig/arch/arm/mm/abort-ev7.S +++ linux-3.13.0/arch/arm/mm/abort-ev7.S @@ -13,12 +13,6 @@ */ .align 5 ENTRY(v7_early_abort) - /* - * The effect of data aborts on on the exclusive access monitor are - * UNPREDICTABLE. Do a CLREX to clear the state - */ - clrex - mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR --- linux-3.13.0.orig/arch/arm/mm/alignment.c +++ linux-3.13.0/arch/arm/mm/alignment.c @@ -40,6 +40,7 @@ * This code is not portable to processors with late data abort handling. */ #define CODING_BITS(i) (i & 0x0e000000) +#define COND_BITS(i) (i & 0xf0000000) #define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */ #define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ @@ -817,6 +818,8 @@ break; case 0x04000000: /* ldr or str immediate */ + if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */ + goto bad; offset.un = OFFSET_BITS(instr); handler = do_alignment_ldrstr; break; --- linux-3.13.0.orig/arch/arm/mm/dma-mapping.c +++ linux-3.13.0/arch/arm/mm/dma-mapping.c @@ -1358,7 +1358,7 @@ *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); - if (gfp & GFP_ATOMIC) + if (!(gfp & __GFP_WAIT)) return __iommu_alloc_atomic(dev, size, handle); /* --- linux-3.13.0.orig/arch/arm/mm/hugetlbpage.c +++ linux-3.13.0/arch/arm/mm/hugetlbpage.c @@ -56,8 +56,3 @@ { return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); } - -int pmd_huge_support(void) -{ - return 1; -} --- linux-3.13.0.orig/arch/arm/mm/init.c +++ linux-3.13.0/arch/arm/mm/init.c @@ -345,10 +345,11 @@ #endif #ifdef CONFIG_BLK_DEV_INITRD /* FDT scan will populate initrd_start */ - if (initrd_start) { + if (initrd_start && !phys_initrd_size) { phys_initrd_start = __virt_to_phys(initrd_start); phys_initrd_size = initrd_end - initrd_start; } + initrd_start = initrd_end = 0; if (phys_initrd_size && !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", --- linux-3.13.0.orig/arch/arm/mm/mm.h +++ linux-3.13.0/arch/arm/mm/mm.h @@ -38,6 +38,7 @@ struct mem_type { pteval_t prot_pte; + pteval_t prot_pte_s2; pmdval_t prot_l1; pmdval_t prot_sect; unsigned int domain; --- linux-3.13.0.orig/arch/arm/mm/mmu.c +++ linux-3.13.0/arch/arm/mm/mmu.c @@ -231,12 +231,16 @@ #endif /* ifdef CONFIG_CPU_CP15 / else */ #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN +#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, + .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | + s2_policy(L_PTE_S2_MT_DEV_SHARED) | + L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, @@ -458,7 +462,18 @@ cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; s2_pgprot = cp->pte_s2; - hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; + hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; + s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; + + /* + * We don't use domains on ARMv6 (since this causes problems with + * v6/v7 kernels), so we must use a separate memory type for user + * r/o, kernel r/w to map the vectors page. + */ +#ifndef CONFIG_ARM_LPAE + if (cpu_arch == CPU_ARCH_ARMv6) + vecs_pgprot |= L_PTE_MT_VECTORS; +#endif /* * ARMv6 and above have extended page tables. @@ -1337,8 +1352,8 @@ return; /* remap kernel code and data */ - map_start = init_mm.start_code; - map_end = init_mm.brk; + map_start = init_mm.start_code & PMD_MASK; + map_end = ALIGN(init_mm.brk, PMD_SIZE); /* get a handle on things... */ pgd0 = pgd_offset_k(0); @@ -1373,7 +1388,7 @@ } /* remap pmds for kernel mapping */ - phys = __pa(map_start) & PMD_MASK; + phys = __pa(map_start); do { *pmdk++ = __pmd(phys | pmdprot); phys += PMD_SIZE; --- linux-3.13.0.orig/arch/arm/mm/proc-macros.S +++ linux-3.13.0/arch/arm/mm/proc-macros.S @@ -112,13 +112,9 @@ * 100x 1 0 1 r/o no acc * 10x0 1 0 1 r/o no acc * 1011 0 0 1 r/w no acc - * 110x 0 1 0 r/w r/o - * 11x0 0 1 0 r/w r/o - * 1111 0 1 1 r/w r/w - * - * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed: * 110x 1 1 1 r/o r/o * 11x0 1 1 1 r/o r/o + * 1111 0 1 1 r/w r/w */ .macro armv6_mt_table pfx \pfx\()_mt_table: @@ -137,7 +133,7 @@ .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED .long 0x00 @ unused .long 0x00 @ unused - .long 0x00 @ unused + .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS .endm .macro armv6_set_pte_ext pfx @@ -158,24 +154,21 @@ tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 -#ifdef CONFIG_CPU_USE_DOMAINS - @ allow kernel read/write access to read-only user pages tstne r3, #PTE_EXT_APX - bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 -#endif + + @ user read-only -> kernel read-only + bicne r3, r3, #PTE_EXT_AP0 tst r1, #L_PTE_XN orrne r3, r3, #PTE_EXT_XN - orr r3, r3, r2 + eor r3, r3, r2 tst r1, #L_PTE_YOUNG tstne r1, #L_PTE_PRESENT moveq r3, #0 -#ifndef CONFIG_CPU_USE_DOMAINS tstne r1, #L_PTE_NONE movne r3, #0 -#endif str r3, [r0] mcr p15, 0, r0, c7, c10, 1 @ flush_pte --- linux-3.13.0.orig/arch/arm/mm/proc-v6.S +++ linux-3.13.0/arch/arm/mm/proc-v6.S @@ -208,7 +208,6 @@ mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache - mcr p15, 0, r0, c7, c10, 4 @ drain write buffer #ifdef CONFIG_MMU mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs mcr p15, 0, r0, c2, c0, 2 @ TTB control register @@ -218,6 +217,8 @@ ALT_UP(orr r8, r8, #TTB_FLAGS_UP) mcr p15, 0, r8, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and + @ complete invalidations adr r5, v6_crval ldmia r5, {r5, r6} ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables --- linux-3.13.0.orig/arch/arm/mm/proc-v7-2level.S +++ linux-3.13.0/arch/arm/mm/proc-v7-2level.S @@ -90,21 +90,14 @@ tst r1, #L_PTE_USER orrne r3, r3, #PTE_EXT_AP1 -#ifdef CONFIG_CPU_USE_DOMAINS - @ allow kernel read/write access to read-only user pages - tstne r3, #PTE_EXT_APX - bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 -#endif tst r1, #L_PTE_XN orrne r3, r3, #PTE_EXT_XN tst r1, #L_PTE_YOUNG tstne r1, #L_PTE_VALID -#ifndef CONFIG_CPU_USE_DOMAINS eorne r1, r1, #L_PTE_NONE tstne r1, #L_PTE_NONE -#endif moveq r3, #0 ARM( str r3, [r0, #2048]! ) --- linux-3.13.0.orig/arch/arm/mm/proc-v7.S +++ linux-3.13.0/arch/arm/mm/proc-v7.S @@ -211,7 +211,6 @@ /* Auxiliary Debug Modes Control 1 Register */ #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ -#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */ #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ /* Auxiliary Debug Modes Control 2 Register */ @@ -234,7 +233,6 @@ /* Auxiliary Debug Modes Control 1 Register */ mrc p15, 1, r0, c15, c1, 1 orr r0, r0, #PJ4B_CLEAN_LINE - orr r0, r0, #PJ4B_BCK_OFF_STREX orr r0, r0, #PJ4B_INTER_PARITY bic r0, r0, #PJ4B_STATIC_BP mcr p15, 1, r0, c15, c1, 1 @@ -351,7 +349,6 @@ 4: mov r10, #0 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate - dsb #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup @@ -360,6 +357,7 @@ mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR #endif + dsb @ Complete invalidations #ifndef CONFIG_ARM_THUMBEE mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE and r0, r0, #(0xf << 12) @ ThumbEE enabled field --- linux-3.13.0.orig/arch/arm/mm/proc-xscale.S +++ linux-3.13.0/arch/arm/mm/proc-xscale.S @@ -535,7 +535,7 @@ mrc p15, 0, r5, c15, c1, 0 @ CP access reg mrc p15, 0, r6, c13, c0, 0 @ PID mrc p15, 0, r7, c3, c0, 0 @ domain ID - mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg + mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg mrc p15, 0, r9, c1, c0, 0 @ control reg bic r4, r4, #2 @ clear frequency change bit stmia r0, {r4 - r9} @ store cp regs @@ -552,7 +552,7 @@ mcr p15, 0, r6, c13, c0, 0 @ PID mcr p15, 0, r7, c3, c0, 0 @ domain ID mcr p15, 0, r1, c2, c0, 0 @ translation table base addr - mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg + mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg mov r0, r9 @ control register b cpu_resume_mmu ENDPROC(cpu_xscale_do_resume) --- linux-3.13.0.orig/arch/arm/plat-orion/irq.c +++ linux-3.13.0/arch/arm/plat-orion/irq.c @@ -15,8 +15,51 @@ #include #include #include +#include #include #include +#include + +#ifdef CONFIG_MULTI_IRQ_HANDLER +/* + * Compiling with both non-DT and DT support enabled, will + * break asm irq handler used by non-DT boards. Therefore, + * we provide a C-style irq handler even for non-DT boards, + * if MULTI_IRQ_HANDLER is set. + * + * Notes: + * - this is prepared for Kirkwood and Dove only, update + * accordingly if you add Orion5x or MV78x00. + * - Orion5x uses different macro names and has only one + * set of CAUSE/MASK registers. + * - MV78x00 uses the same macro names but has a third + * set of CAUSE/MASK registers. + * + */ + +static void __iomem *orion_irq_base = IRQ_VIRT_BASE; + +asmlinkage void +__exception_irq_entry orion_legacy_handle_irq(struct pt_regs *regs) +{ + u32 stat; + + stat = readl_relaxed(orion_irq_base + IRQ_CAUSE_LOW_OFF); + stat &= readl_relaxed(orion_irq_base + IRQ_MASK_LOW_OFF); + if (stat) { + unsigned int hwirq = __fls(stat); + handle_IRQ(hwirq, regs); + return; + } + stat = readl_relaxed(orion_irq_base + IRQ_CAUSE_HIGH_OFF); + stat &= readl_relaxed(orion_irq_base + IRQ_MASK_HIGH_OFF); + if (stat) { + unsigned int hwirq = 32 + __fls(stat); + handle_IRQ(hwirq, regs); + return; + } +} +#endif void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr) { @@ -35,6 +78,10 @@ ct->chip.irq_unmask = irq_gc_mask_set_bit; irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); + +#ifdef CONFIG_MULTI_IRQ_HANDLER + set_handle_irq(orion_legacy_handle_irq); +#endif } #ifdef CONFIG_OF --- linux-3.13.0.orig/arch/arm/xen/grant-table.c +++ linux-3.13.0/arch/arm/xen/grant-table.c @@ -51,3 +51,8 @@ { return -ENOSYS; } + +int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status) +{ + return 0; +} --- linux-3.13.0.orig/arch/arm64/Kconfig +++ linux-3.13.0/arch/arm64/Kconfig @@ -2,6 +2,7 @@ def_bool y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_COMPAT_IPC_PARSE_VERSION select ARCH_WANT_FRAME_POINTERS @@ -12,6 +13,7 @@ select CLONE_BACKWARDS select COMMON_CLK select GENERIC_CLOCKEVENTS + select GENERIC_EARLY_IOREMAP select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW @@ -113,6 +115,8 @@ Express). config ARCH_XGENE + select PCI + select PCI_DOMAINS bool "AppliedMicro X-Gene SOC Family" help This enables support for AppliedMicro X-Gene SOC Family @@ -124,6 +128,23 @@ config ARM_AMBA bool +config PCI + bool "PCI support" + help + Find out whether you have a PCI motherboard. PCI is the name of a + bus system, i.e. the way the CPU talks to the other stuff inside + your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or + VESA. If you have PCI, say Y, otherwise N. + +config PCI_DOMAINS + bool + depends on PCI + +config PCI_SYSCALL + def_bool PCI + +source "drivers/pci/Kconfig" +source "drivers/pci/pcie/Kconfig" endmenu menu "Kernel Features" @@ -248,6 +269,20 @@ This is useful if you cannot or don't want to change the command-line options your boot loader passes to the kernel. +config EFI + bool "UEFI runtime support" + depends on OF && !CPU_BIG_ENDIAN + select LIBFDT + select UCS2_STRING + select EFI_PARAMS_FROM_FDT + default y + help + This option provides support for runtime services provided + by UEFI firmware (such as non-volatile variables, realtime + clock, and platform reset). A UEFI stub is also provided to + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + endmenu menu "Userspace binary formats" @@ -279,6 +314,10 @@ source "drivers/Kconfig" +source "ubuntu/Kconfig" + +source "drivers/firmware/Kconfig" + source "fs/Kconfig" source "arch/arm64/kvm/Kconfig" --- linux-3.13.0.orig/arch/arm64/boot/dts/apm-mustang.dts +++ linux-3.13.0/arch/arm64/boot/dts/apm-mustang.dts @@ -24,3 +24,23 @@ reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */ }; }; + +&pcie0clk { + status = "ok"; +}; + +&pcie0 { + status = "ok"; +}; + +&menet { + status = "ok"; +}; + +&sgenet0 { + status = "ok"; +}; + +&xgenet { + status = "ok"; +}; --- linux-3.13.0.orig/arch/arm64/boot/dts/apm-storm.dtsi +++ linux-3.13.0/arch/arm64/boot/dts/apm-storm.dtsi @@ -97,12 +97,22 @@ clock-frequency = <50000000>; }; + pmu { + compatible = "apm,potenza-pmu", "arm,armv8-pmuv3"; + interrupts = <1 12 0xff04>; + }; + soc { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; ranges; + scu: system-clk-controller@17000000 { + compatible = "apm,xgene-scu","syscon"; + reg = <0x0 0x17000000 0x0 0x400>; + }; + clocks { #address-cells = <2>; #size-cells = <2>; @@ -118,7 +128,7 @@ compatible = "apm,xgene-pcppll-clock"; #clock-cells = <1>; clocks = <&refclk 0>; - clock-names = "pcppll"; + clock-names = "refclk"; reg = <0x0 0x17000100 0x0 0x1000>; clock-output-names = "pcppll"; type = <0>; @@ -128,7 +138,7 @@ compatible = "apm,xgene-socpll-clock"; #clock-cells = <1>; clocks = <&refclk 0>; - clock-names = "socpll"; + clock-names = "refclk"; reg = <0x0 0x17000120 0x0 0x1000>; clock-output-names = "socpll"; type = <1>; @@ -141,7 +151,7 @@ clock-names = "socplldiv2"; clock-mult = <1>; clock-div = <2>; - clock-output-names = "socplldiv2"; + clock-output-names = "socpll"; }; qmlclk: qmlclk { @@ -167,15 +177,320 @@ clock-output-names = "ethclk"; }; - eth8clk: eth8clk { + menetclk: menetclk { compatible = "apm,xgene-device-clock"; #clock-cells = <1>; clocks = <ðclk 0>; - clock-names = "eth8clk"; reg = <0x0 0x1702C000 0x0 0x1000>; reg-names = "csr-reg"; - clock-output-names = "eth8clk"; + clock-output-names = "menetclk"; + }; + + sge0clk: sge0clk@1f21c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f21c000 0x0 0x1000>; + reg-names = "csr-reg"; + csr-mask = <0x3>; + clock-output-names = "sge0clk"; + }; + + xge0clk: xge0clk@1f61c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f61c000 0x0 0x1000>; + reg-names = "csr-reg"; + csr-mask = <0x3>; + clock-output-names = "xge0clk"; + }; + + sataphy1clk: sataphy1clk@1f21c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f21c000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "sataphy1clk"; + csr-offset = <0x4>; + csr-mask = <0x3a>; + enable-offset = <0x0>; + enable-mask = <0x06>; + }; + + sataphy2clk: sataphy1clk@1f22c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f22c000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "sataphy2clk"; + csr-offset = <0x4>; + csr-mask = <0x3a>; + enable-offset = <0x0>; + enable-mask = <0x06>; + }; + + sataphy3clk: sataphy1clk@1f23c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f23c000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "sataphy3clk"; + csr-offset = <0x4>; + csr-mask = <0x3a>; + enable-offset = <0x0>; + enable-mask = <0x06>; + }; + + sata01clk: sata01clk@1f21c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f21c000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "sata01clk"; + csr-offset = <0x4>; + csr-mask = <0x05>; + enable-offset = <0x0>; + enable-mask = <0x39>; + }; + + sata23clk: sata23clk@1f22c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f22c000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "sata23clk"; + csr-offset = <0x4>; + csr-mask = <0x05>; + enable-offset = <0x0>; + enable-mask = <0x39>; + }; + + sata45clk: sata45clk@1f23c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f23c000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "sata45clk"; + csr-offset = <0x4>; + csr-mask = <0x05>; + enable-offset = <0x0>; + enable-mask = <0x39>; + }; + + pcie0clk: pcie0clk@1f2bc000 { + status = "disabled"; + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f2bc000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "pcie0clk"; + }; + + pcie1clk: pcie1clk@1f2cc000 { + status = "disabled"; + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f2cc000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "pcie1clk"; + }; + + pcie2clk: pcie2clk@1f2dc000 { + status = "disabled"; + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f2dc000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "pcie2clk"; + }; + + pcie3clk: pcie3clk@1f50c000 { + status = "disabled"; + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f50c000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "pcie3clk"; + }; + + pcie4clk: pcie4clk@1f51c000 { + status = "disabled"; + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f51c000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "pcie4clk"; }; + + rtcclk: rtcclk@17000000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x17000000 0x0 0x2000>; + reg-names = "csr-reg"; + csr-offset = <0xc>; + csr-mask = <0x2>; + enable-offset = <0x10>; + enable-mask = <0x2>; + clock-output-names = "rtcclk"; + }; + + rngpkaclk: rngpkaclk@17000000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x17000000 0x0 0x2000>; + reg-names = "csr-reg"; + csr-offset = <0xc>; + csr-mask = <0x10>; + enable-offset = <0x10>; + enable-mask = <0x10>; + clock-output-names = "rngpkaclk"; + }; + }; + + msi: msi@79000000 { + compatible = "xgene,gic-msi"; + reg = <0x00 0x79000000 0x0 0x900000>; + msi-available-ranges = <0x0 0x1000>; + interrupts = < 0x0 0x10 0x4 + 0x0 0x11 0x4 + 0x0 0x12 0x4 + 0x0 0x13 0x4 + 0x0 0x14 0x4 + 0x0 0x15 0x4 + 0x0 0x16 0x4 + 0x0 0x17 0x4 + 0x0 0x18 0x4 + 0x0 0x19 0x4 + 0x0 0x1a 0x4 + 0x0 0x1b 0x4 + 0x0 0x1c 0x4 + 0x0 0x1d 0x4 + 0x0 0x1e 0x4 + 0x0 0x1f 0x4>; + }; + + pcie0: pcie@1f2b0000 { + status = "disabled"; + device_type = "pci"; + compatible = "apm,xgene-pcie"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */ + 0xe0 0xd0000000 0x0 0x00200000>; /* PCI config space */ + reg-names = "csr", "cfg"; + ranges = <0x01000000 0x00 0x00000000 0xe0 0x00000000 0x00 0x00010000 /* io */ + 0x02000000 0x00 0x10000000 0xe0 0x10000000 0x00 0x80000000>; /* mem */ + ib-ranges = <0x42000000 0x40 0x00000000 0x40 0x00000000 0x40 0x00000000 /* dma */ + 0x00000000 0x0 0x79000000 0x00 0x79000000 0x0 0x00800000>; /* msi */ + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1 + 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1 + 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1 + 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; + clocks = <&pcie0clk 0>; + }; + + pcie1: pcie@1f2c0000 { + status = "disabled"; + device_type = "pci"; + compatible = "apm,xgene-pcie"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = < 0x00 0x1f2c0000 0x0 0x00010000 /* Controller registers */ + 0xd0 0xd0000000 0x0 0x00200000>; /* PCI config space */ + reg-names = "csr", "cfg"; + ranges = <0x01000000 0x0 0x00000000 0xd0 0x00000000 0x00 0x00010000 /* io */ + 0x02000000 0x0 0x10000000 0xd0 0x10000000 0x00 0x80000000>; /* mem */ + ib-ranges = <0x42000000 0x40 0x00000000 0x40 0x00000000 0x40 0x00000000 /* dma */ + 0x00000000 0x0 0x79000000 0x00 0x79000000 0x0 0x00800000>; /* msi */ + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x1 + 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x1 + 0x0 0x0 0x0 0x3 &gic 0x0 0xca 0x1 + 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>; + clocks = <&pcie1clk 0>; + }; + + pcie2: pcie@1f2d0000 { + status = "disabled"; + device_type = "pci"; + compatible = "apm,xgene-pcie"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = < 0x00 0x1f2d0000 0x0 0x00010000 /* Controller registers */ + 0x90 0xd0000000 0x0 0x00200000>; /* PCI config space */ + reg-names = "csr", "cfg"; + ranges = <0x01000000 0x0 0x00000000 0x90 0x00000000 0x0 0x00010000 /* io */ + 0x02000000 0x0 0x10000000 0x90 0x10000000 0x0 0x80000000>; /* mem */ + ib-ranges = <0x42000000 0x40 0x00000000 0x40 0x00000000 0x40 0x00000000 /* dma */ + 0x00000000 0x0 0x79000000 0x00 0x79000000 0x0 0x00800000>; /* msi */ + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x1 + 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x1 + 0x0 0x0 0x0 0x3 &gic 0x0 0xd0 0x1 + 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>; + clocks = <&pcie2clk 0>; + }; + + pcie3: pcie@1f500000 { + status = "disabled"; + device_type = "pci"; + compatible = "apm,xgene-pcie"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = < 0x00 0x1f500000 0x0 0x00010000 /* Controller registers */ + 0xa0 0xd0000000 0x0 0x00200000>; /* PCI config space */ + reg-names = "csr", "cfg"; + ranges = <0x01000000 0x0 0x00000000 0xa0 0x00000000 0x0 0x00010000 /* io */ + 0x02000000 0x0 0x10000000 0xa0 0x10000000 0x0 0x80000000>; /* mem */ + ib-ranges = <0x42000000 0x40 0x00000000 0x40 0x00000000 0x40 0x00000000 /* dma */ + 0x00000000 0x0 0x79000000 0x00 0x79000000 0x0 0x00800000>; /* msi */ + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x1 + 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x1 + 0x0 0x0 0x0 0x3 &gic 0x0 0xd6 0x1 + 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>; + clocks = <&pcie3clk 0>; + }; + + pcie4: pcie@1f510000 { + status = "disabled"; + device_type = "pci"; + compatible = "apm,xgene-pcie"; + #interrupt-cells = <1>; + #size-cells = <2>; + #address-cells = <3>; + reg = < 0x00 0x1f510000 0x0 0x00010000 /* Controller registers */ + 0xc0 0xd0000000 0x0 0x00200000>; /* PCI config space */ + reg-names = "csr", "cfg"; + ranges = <0x01000000 0x0 0x00000000 0xc0 0x00000000 0x0 0x00010000 /* io */ + 0x02000000 0x0 0x10000000 0xc0 0x10000000 0x0 0x80000000>; /* mem */ + ib-ranges = <0x42000000 0x40 0x00000000 0x40 0x00000000 0x40 0x00000000 /* dma */ + 0x00000000 0x0 0x79000000 0x00 0x79000000 0x0 0x00800000>; /* msi */ + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x1 + 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x1 + 0x0 0x0 0x0 0x3 &gic 0x0 0xdc 0x1 + 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>; + clocks = <&pcie4clk 0>; }; serial0: serial@1c020000 { @@ -187,5 +502,180 @@ interrupt-parent = <&gic>; interrupts = <0x0 0x4c 0x4>; }; + + serial1: serial@1c021000 { + status = "disabled"; + device_type = "serial"; + compatible = "ns16550a"; + reg = <0x0 0x1c021000 0x0 0x1000>; + reg-shift = <2>; + clock-frequency = <10000000>; /* Updated by bootloader */ + interrupt-parent = <&gic>; + interrupts = <0x0 0x4d 0x4>; + }; + + reboot@17000014 { + compatible = "syscon-reboot"; + regmap = <&scu>; + offset = <0x14>; + mask = <0x1>; + }; + + phy1: phy@1f21a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f21a000 0x0 0x100>; + #phy-cells = <1>; + clocks = <&sataphy1clk 0>; + status = "disabled"; + apm,tx-boost-gain = <30 30 30 30 30 30>; + apm,tx-eye-tuning = <2 10 10 2 10 10>; + }; + + phy2: phy@1f22a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f22a000 0x0 0x100>; + #phy-cells = <1>; + clocks = <&sataphy2clk 0>; + status = "ok"; + apm,tx-boost-gain = <30 30 30 30 30 30>; + apm,tx-eye-tuning = <1 10 10 2 10 10>; + }; + + phy3: phy@1f23a000 { + compatible = "apm,xgene-phy"; + reg = <0x0 0x1f23a000 0x0 0x100>; + #phy-cells = <1>; + clocks = <&sataphy3clk 0>; + status = "ok"; + apm,tx-boost-gain = <31 31 31 31 31 31>; + apm,tx-eye-tuning = <2 10 10 2 10 10>; + }; + + sata1: sata@1a000000 { + compatible = "apm,xgene-ahci"; + reg = <0x0 0x1a000000 0x0 0x1000>, + <0x0 0x1f210000 0x0 0x1000>, + <0x0 0x1f21d000 0x0 0x1000>, + <0x0 0x1f21e000 0x0 0x1000>, + <0x0 0x1f217000 0x0 0x1000>; + interrupts = <0x0 0x86 0x4>; + status = "disabled"; + clocks = <&sata01clk 0>; + phys = <&phy1 0>; + phy-names = "sata-phy"; + }; + + sata2: sata@1a400000 { + compatible = "apm,xgene-ahci"; + reg = <0x0 0x1a400000 0x0 0x1000>, + <0x0 0x1f220000 0x0 0x1000>, + <0x0 0x1f22d000 0x0 0x1000>, + <0x0 0x1f22e000 0x0 0x1000>, + <0x0 0x1f227000 0x0 0x1000>; + interrupts = <0x0 0x87 0x4>; + status = "ok"; + clocks = <&sata23clk 0>; + phys = <&phy2 0>; + phy-names = "sata-phy"; + }; + + sata3: sata@1a800000 { + compatible = "apm,xgene-ahci"; + reg = <0x0 0x1a800000 0x0 0x1000>, + <0x0 0x1f230000 0x0 0x1000>, + <0x0 0x1f23d000 0x0 0x1000>, + <0x0 0x1f23e000 0x0 0x1000>; + interrupts = <0x0 0x88 0x4>; + status = "ok"; + clocks = <&sata45clk 0>; + phys = <&phy3 0>; + phy-names = "sata-phy"; + }; + + rtc: rtc@10510000 { + compatible = "apm,xgene-rtc"; + reg = <0x0 0x10510000 0x0 0x400>; + interrupts = <0x0 0x46 0x4>; + #clock-cells = <1>; + clocks = <&rtcclk 0>; + }; + + dwgpio: dwgpio@1c024000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x0 0x1c024000 0x0 0x1000>; + reg-io-width = <4>; + #address-cells = <1>; + #size-cells = <0>; + + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + snps,nr-gpios = <32>; + reg = <0>; + }; + }; + + menet: ethernet@17020000 { + compatible = "apm,xgene-enet"; + status = "disabled"; + reg = <0x0 0x17020000 0x0 0xd100>, + <0x0 0X17030000 0x0 0Xc300>, + <0x0 0X10000000 0x0 0X200>; + reg-names = "enet_csr", "ring_csr", "ring_cmd"; + interrupts = <0x0 0x3c 0x4>; + dma-coherent; + clocks = <&menetclk 0>; + /* mac address will be overwritten by the bootloader */ + local-mac-address = [00 00 00 00 00 00]; + phy-connection-type = "rgmii"; + phy-handle = <&menetphy>; + mdio { + compatible = "apm,xgene-mdio"; + #address-cells = <1>; + #size-cells = <0>; + menetphy: menetphy@3 { + compatible = "ethernet-phy-id001c.c915"; + reg = <0x3>; + }; + + }; + }; + + sgenet0: ethernet@1f210000 { + compatible = "apm,xgene-enet"; + status = "disabled"; + reg = <0x0 0x1f210000 0x0 0xd100>, + <0x0 0x1f200000 0x0 0Xc300>, + <0x0 0x1B000000 0x0 0X200>; + reg-names = "enet_csr", "ring_csr", "ring_cmd"; + interrupts = <0x0 0xA0 0x4>; + dma-coherent; + clocks = <&sge0clk 0>; + local-mac-address = [00 00 00 00 00 00]; + phy-connection-type = "sgmii"; + }; + + xgenet: ethernet@1f610000 { + compatible = "apm,xgene-enet"; + status = "disabled"; + reg = <0x0 0x1f610000 0x0 0xd100>, + <0x0 0x1f600000 0x0 0Xc300>, + <0x0 0x18000000 0x0 0X200>; + reg-names = "enet_csr", "ring_csr", "ring_cmd"; + interrupts = <0x0 0x60 0x4>; + dma-coherent; + clocks = <&xge0clk 0>; + /* mac address will be overwritten by the bootloader */ + local-mac-address = [00 00 00 00 00 00]; + phy-connection-type = "xgmii"; + }; + + rng: rng@10520000 { + compatible = "apm,xgene-rng"; + reg = <0x0 0x10520000 0x0 0x100>; + interrupts = <0x0 0x41 0x4>; + clocks = <&rngpkaclk 0>; + }; + }; }; --- linux-3.13.0.orig/arch/arm64/include/asm/Kbuild +++ linux-3.13.0/arch/arm64/include/asm/Kbuild @@ -10,6 +10,7 @@ generic-y += div64.h generic-y += dma.h generic-y += emergency-restart.h +generic-y += early_ioremap.h generic-y += errno.h generic-y += ftrace.h generic-y += hw_irq.h @@ -28,7 +29,6 @@ generic-y += pci.h generic-y += percpu.h generic-y += poll.h -generic-y += posix_types.h generic-y += resource.h generic-y += scatterlist.h generic-y += sections.h --- linux-3.13.0.orig/arch/arm64/include/asm/arch_timer.h +++ linux-3.13.0/arch/arm64/include/asm/arch_timer.h @@ -135,6 +135,15 @@ #endif } +static inline u64 arch_counter_get_cntpct(void) +{ + /* + * AArch64 kernel and user space mandate the use of CNTVCT. + */ + BUG(); + return 0; +} + static inline u64 arch_counter_get_cntvct(void) { u64 cval; --- linux-3.13.0.orig/arch/arm64/include/asm/atomic.h +++ linux-3.13.0/arch/arm64/include/asm/atomic.h @@ -64,7 +64,7 @@ int result; asm volatile("// atomic_add_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " add %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" @@ -72,6 +72,7 @@ : "Ir" (i) : "cc", "memory"); + smp_mb(); return result; } @@ -96,7 +97,7 @@ int result; asm volatile("// atomic_sub_return\n" -"1: ldaxr %w0, %2\n" +"1: ldxr %w0, %2\n" " sub %w0, %w0, %w3\n" " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" @@ -104,6 +105,7 @@ : "Ir" (i) : "cc", "memory"); + smp_mb(); return result; } @@ -112,17 +114,20 @@ unsigned long tmp; int oldval; + smp_mb(); + asm volatile("// atomic_cmpxchg\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " cmp %w1, %w3\n" " b.ne 2f\n" -" stlxr %w0, %w4, %2\n" +" stxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) : "cc", "memory"); + smp_mb(); return oldval; } @@ -183,7 +188,7 @@ unsigned long tmp; asm volatile("// atomic64_add_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " add %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" @@ -191,6 +196,7 @@ : "Ir" (i) : "cc", "memory"); + smp_mb(); return result; } @@ -215,7 +221,7 @@ unsigned long tmp; asm volatile("// atomic64_sub_return\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " sub %0, %0, %3\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b" @@ -223,6 +229,7 @@ : "Ir" (i) : "cc", "memory"); + smp_mb(); return result; } @@ -231,17 +238,20 @@ long oldval; unsigned long res; + smp_mb(); + asm volatile("// atomic64_cmpxchg\n" -"1: ldaxr %1, %2\n" +"1: ldxr %1, %2\n" " cmp %1, %3\n" " b.ne 2f\n" -" stlxr %w0, %4, %2\n" +" stxr %w0, %4, %2\n" " cbnz %w0, 1b\n" "2:" : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) : "Ir" (old), "r" (new) : "cc", "memory"); + smp_mb(); return oldval; } @@ -253,11 +263,12 @@ unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" -"1: ldaxr %0, %2\n" +"1: ldxr %0, %2\n" " subs %0, %0, #1\n" " b.mi 2f\n" " stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" +" dmb ish\n" "2:" : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : --- linux-3.13.0.orig/arch/arm64/include/asm/cacheflush.h +++ linux-3.13.0/arch/arm64/include/asm/cacheflush.h @@ -116,6 +116,7 @@ static inline void __flush_icache_all(void) { asm("ic ialluis"); + dsb(); } #define flush_dcache_mmap_lock(mapping) \ --- linux-3.13.0.orig/arch/arm64/include/asm/cmpxchg.h +++ linux-3.13.0/arch/arm64/include/asm/cmpxchg.h @@ -29,7 +29,7 @@ switch (size) { case 1: asm volatile("// __xchg1\n" - "1: ldaxrb %w0, %2\n" + "1: ldxrb %w0, %2\n" " stlxrb %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) @@ -38,7 +38,7 @@ break; case 2: asm volatile("// __xchg2\n" - "1: ldaxrh %w0, %2\n" + "1: ldxrh %w0, %2\n" " stlxrh %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) @@ -47,7 +47,7 @@ break; case 4: asm volatile("// __xchg4\n" - "1: ldaxr %w0, %2\n" + "1: ldxr %w0, %2\n" " stlxr %w1, %w3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) @@ -56,7 +56,7 @@ break; case 8: asm volatile("// __xchg8\n" - "1: ldaxr %0, %2\n" + "1: ldxr %0, %2\n" " stlxr %w1, %3, %2\n" " cbnz %w1, 1b\n" : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) @@ -67,6 +67,7 @@ BUILD_BUG(); } + smp_mb(); return ret; } --- linux-3.13.0.orig/arch/arm64/include/asm/compat.h +++ linux-3.13.0/arch/arm64/include/asm/compat.h @@ -37,8 +37,8 @@ typedef s32 compat_time_t; typedef s32 compat_clock_t; typedef s32 compat_pid_t; -typedef u32 __compat_uid_t; -typedef u32 __compat_gid_t; +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; typedef u16 __compat_uid16_t; typedef u16 __compat_gid16_t; typedef u32 __compat_uid32_t; --- linux-3.13.0.orig/arch/arm64/include/asm/cpu_ops.h +++ linux-3.13.0/arch/arm64/include/asm/cpu_ops.h @@ -39,6 +39,7 @@ * from the cpu to be killed. * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * cpu being killed. + * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. */ struct cpu_operations { const char *name; @@ -49,6 +50,7 @@ #ifdef CONFIG_HOTPLUG_CPU int (*cpu_disable)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); + int (*cpu_kill)(unsigned int cpu); #endif }; --- linux-3.13.0.orig/arch/arm64/include/asm/cputype.h +++ linux-3.13.0/arch/arm64/include/asm/cputype.h @@ -30,6 +30,16 @@ #define MPIDR_HWID_BITMASK 0xff00ffffff +#define MPIDR_LEVEL_BITS_SHIFT 3 +#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_LEVEL_SHIFT(level) \ + (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ + ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK) + #define read_cpuid(reg) ({ \ u64 __val; \ asm("mrs %0, " reg : "=r" (__val)); \ @@ -41,6 +51,7 @@ #define ARM_CPU_PART_AEM_V8 0xD0F0 #define ARM_CPU_PART_FOUNDATION 0xD000 +#define ARM_CPU_PART_CORTEX_A53 0xD030 #define ARM_CPU_PART_CORTEX_A57 0xD070 #define APM_CPU_PART_POTENZA 0x0000 --- linux-3.13.0.orig/arch/arm64/include/asm/dma-mapping.h +++ linux-3.13.0/arch/arm64/include/asm/dma-mapping.h @@ -26,8 +26,6 @@ #include #include -#define ARCH_HAS_DMA_GET_REQUIRED_MASK - #define DMA_ERROR_CODE (~(dma_addr_t)0) extern struct dma_map_ops *dma_ops; --- linux-3.13.0.orig/arch/arm64/include/asm/dma.h +++ linux-3.13.0/arch/arm64/include/asm/dma.h @@ -0,0 +1,18 @@ +/* + * Based on linux/arch/arm/include/asm/dma.h + */ +#ifndef __ASM_ARM_DMA_H +#define __ASM_ARM_DMA_H + +/* + * This is the maximum virtual address which can be DMA'd from. + */ +#define MAX_DMA_ADDRESS (~0ULL) + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* __ASM_ARM_DMA_H */ --- linux-3.13.0.orig/arch/arm64/include/asm/efi.h +++ linux-3.13.0/arch/arm64/include/asm/efi.h @@ -0,0 +1,15 @@ +#ifndef _ASM_EFI_H +#define _ASM_EFI_H + +#include + +#ifdef CONFIG_EFI +extern void efi_init(void); +extern void efi_idmap_init(void); +extern unsigned long arm64_efi_facility; +#else +#define efi_init() +#define efi_idmap_init() +#endif + +#endif /* _ASM_EFI_H */ --- linux-3.13.0.orig/arch/arm64/include/asm/fixmap.h +++ linux-3.13.0/arch/arm64/include/asm/fixmap.h @@ -0,0 +1,67 @@ +/* + * fixmap.h: compile-time virtual memory allocation + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1998 Ingo Molnar + * Copyright (C) 2013 Mark Salter + * + * Adapted from arch/x86_64 version. + * + */ + +#ifndef _ASM_ARM64_FIXMAP_H +#define _ASM_ARM64_FIXMAP_H + +#ifndef __ASSEMBLY__ +#include +#include + +/* + * Here we define all the compile-time 'special' virtual + * addresses. The point is to have a constant address at + * compile time, but to set the physical address only + * in the boot process. + * + * These 'compile-time allocated' memory buffers are + * page-sized. Use set_fixmap(idx,phys) to associate + * physical memory with fixmap indices. + * + */ +enum fixed_addresses { + FIX_EARLYCON_MEM_BASE, + __end_of_permanent_fixed_addresses, + + /* + * Temporary boot-time mappings, used by early_ioremap(), + * before ioremap() is functional. + */ +#ifdef CONFIG_ARM64_64K_PAGES +#define NR_FIX_BTMAPS 4 +#else +#define NR_FIX_BTMAPS 64 +#endif +#define FIX_BTMAPS_SLOTS 7 +#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) + + FIX_BTMAP_END = __end_of_permanent_fixed_addresses, + FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) + +extern void __early_set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags); + +#define __set_fixmap __early_set_fixmap + +#include + +#endif /* !__ASSEMBLY__ */ +#endif /* _ASM_ARM64_FIXMAP_H */ --- linux-3.13.0.orig/arch/arm64/include/asm/futex.h +++ linux-3.13.0/arch/arm64/include/asm/futex.h @@ -24,10 +24,11 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ asm volatile( \ -"1: ldaxr %w1, %2\n" \ +"1: ldxr %w1, %2\n" \ insn "\n" \ "2: stlxr %w3, %w0, %2\n" \ " cbnz %w3, 1b\n" \ +" dmb ish\n" \ "3:\n" \ " .pushsection .fixup,\"ax\"\n" \ "4: mov %w0, %w5\n" \ @@ -110,11 +111,12 @@ return -EFAULT; asm volatile("// futex_atomic_cmpxchg_inatomic\n" -"1: ldaxr %w1, %2\n" +"1: ldxr %w1, %2\n" " sub %w3, %w1, %w4\n" " cbnz %w3, 3f\n" "2: stlxr %w3, %w5, %2\n" " cbnz %w3, 1b\n" +" dmb ish\n" "3:\n" " .pushsection .fixup,\"ax\"\n" "4: mov %w0, %w6\n" --- linux-3.13.0.orig/arch/arm64/include/asm/hw_breakpoint.h +++ linux-3.13.0/arch/arm64/include/asm/hw_breakpoint.h @@ -79,7 +79,6 @@ */ #define ARM_MAX_BRP 16 #define ARM_MAX_WRP 16 -#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) /* Virtual debug register bases. */ #define AARCH64_DBG_REG_BVR 0 --- linux-3.13.0.orig/arch/arm64/include/asm/hwcap.h +++ linux-3.13.0/arch/arm64/include/asm/hwcap.h @@ -30,6 +30,7 @@ #define COMPAT_HWCAP_IDIVA (1 << 17) #define COMPAT_HWCAP_IDIVT (1 << 18) #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) +#define COMPAT_HWCAP_LPAE (1 << 20) #define COMPAT_HWCAP_EVTSTRM (1 << 21) #ifndef __ASSEMBLY__ --- linux-3.13.0.orig/arch/arm64/include/asm/io.h +++ linux-3.13.0/arch/arm64/include/asm/io.h @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -118,10 +119,27 @@ #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); }) /* + * A typesafe __io() helper + */ +static inline void __iomem *__typesafe_io(unsigned long addr) +{ + return (void __iomem *)addr; +} + +/* * I/O port access primitives. */ -#define IO_SPACE_LIMIT 0xffff #define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) +#if defined(CONFIG_PCI) +#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff) +#define __io(a) __typesafe_io((unsigned long)PCI_IOBASE + \ + ((a) & IO_SPACE_LIMIT)) +#else +#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT) +#endif +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *addr); +extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); static inline u8 inb(unsigned long addr) { --- linux-3.13.0.orig/arch/arm64/include/asm/kvm_arm.h +++ linux-3.13.0/arch/arm64/include/asm/kvm_arm.h @@ -62,6 +62,7 @@ * RW: 64bit by default, can be overriden for 32bit VMs * TAC: Trap ACTLR * TSC: Trap SMC + * TVM: Trap VM ops (until M+C set in SCTLR_EL1) * TSW: Trap cache operations by set/way * TWE: Trap WFE * TWI: Trap WFI @@ -74,7 +75,7 @@ * SWIO: Turn set/way invalidates into set/way clean+invalidate */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ - HCR_BSU_IS | HCR_FB | HCR_TAC | \ + HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_IMO | HCR_FMO | \ HCR_SWIO | HCR_TIDCP | HCR_RW) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) --- linux-3.13.0.orig/arch/arm64/include/asm/kvm_asm.h +++ linux-3.13.0/arch/arm64/include/asm/kvm_asm.h @@ -79,7 +79,8 @@ #define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */ #define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */ #define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */ -#define c10_AMAIR (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ +#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ #define NR_CP15_REGS (NR_SYS_REGS * 2) --- linux-3.13.0.orig/arch/arm64/include/asm/kvm_host.h +++ linux-3.13.0/arch/arm64/include/asm/kvm_host.h @@ -26,7 +26,12 @@ #include #include -#define KVM_MAX_VCPUS 4 +#if defined(CONFIG_KVM_ARM_MAX_VCPUS) +#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS +#else +#define KVM_MAX_VCPUS 0 +#endif + #define KVM_USER_MEM_SLOTS 32 #define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 @@ -34,7 +39,7 @@ #include #include -#define KVM_VCPU_MAX_FEATURES 2 +#define KVM_VCPU_MAX_FEATURES 3 struct kvm_vcpu; int kvm_target_cpu(void); --- linux-3.13.0.orig/arch/arm64/include/asm/kvm_mmu.h +++ linux-3.13.0/arch/arm64/include/asm/kvm_mmu.h @@ -106,7 +106,6 @@ return true; } -static inline void kvm_clean_dcache_area(void *addr, size_t size) {} static inline void kvm_clean_pgd(pgd_t *pgd) {} static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} static inline void kvm_clean_pte(pte_t *pte) {} @@ -122,11 +121,25 @@ pmd_val(*pmd) |= PMD_S2_RDWR; } +#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) +#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) +#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) + struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, - unsigned long size) +#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) + +static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) +{ + return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; +} + +static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, + unsigned long size) { + if (!vcpu_has_cache_enabled(vcpu)) + kvm_flush_dcache_to_poc((void *)hva, size); + if (!icache_is_aliasing()) { /* PIPT */ flush_icache_range(hva, hva + size); } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ @@ -135,7 +148,7 @@ } } -#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) +void stage2_flush_vm(struct kvm *kvm); #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ --- linux-3.13.0.orig/arch/arm64/include/asm/kvm_psci.h +++ linux-3.13.0/arch/arm64/include/asm/kvm_psci.h @@ -18,6 +18,10 @@ #ifndef __ARM64_KVM_PSCI_H__ #define __ARM64_KVM_PSCI_H__ -bool kvm_psci_call(struct kvm_vcpu *vcpu); +#define KVM_ARM_PSCI_0_1 1 +#define KVM_ARM_PSCI_0_2 2 + +int kvm_psci_version(struct kvm_vcpu *vcpu); +int kvm_psci_call(struct kvm_vcpu *vcpu); #endif /* __ARM64_KVM_PSCI_H__ */ --- linux-3.13.0.orig/arch/arm64/include/asm/memory.h +++ linux-3.13.0/arch/arm64/include/asm/memory.h @@ -49,13 +49,15 @@ #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) #define MODULES_END (PAGE_OFFSET) #define MODULES_VADDR (MODULES_END - SZ_64M) -#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) +#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE) #define TASK_SIZE_64 (UL(1) << VA_BITS) #ifdef CONFIG_COMPAT #define TASK_SIZE_32 UL(0x100000000) #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) #else #define TASK_SIZE TASK_SIZE_64 #endif /* CONFIG_COMPAT */ --- linux-3.13.0.orig/arch/arm64/include/asm/mmu.h +++ linux-3.13.0/arch/arm64/include/asm/mmu.h @@ -27,5 +27,8 @@ extern void paging_init(void); extern void setup_mm_for_reboot(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); +extern void init_mem_pgprot(void); +/* create an identity mapping for memory (or io if map_io is true) */ +extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io); #endif --- linux-3.13.0.orig/arch/arm64/include/asm/mmu_context.h +++ linux-3.13.0/arch/arm64/include/asm/mmu_context.h @@ -151,6 +151,15 @@ { unsigned int cpu = smp_processor_id(); + /* + * init_mm.pgd does not contain any user mappings and it is always + * active for kernel addresses in TTBR1. Just set the reserved TTBR0. + */ + if (next == &init_mm) { + cpu_set_reserved_ttbr0(); + return; + } + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) check_and_switch_context(next, tsk); } --- linux-3.13.0.orig/arch/arm64/include/asm/msi_bitmap.h +++ linux-3.13.0/arch/arm64/include/asm/msi_bitmap.h @@ -0,0 +1,36 @@ +#ifndef MSI_BITMAP_H +#define MSI_BITMAP_H + +/* + * Copyright 2008, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + * Borrowed from powerpc arch + */ + +#include +#include + +struct msi_bitmap { + struct device_node *of_node; + unsigned long *bitmap; + spinlock_t lock; + unsigned int irq_count; +}; + +int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num); +void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset, + unsigned int num); +void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq); + +int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp); + +int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, + struct device_node *of_node); +void msi_bitmap_free(struct msi_bitmap *bmp); + +#endif /* MSI_BITMAP_H */ --- linux-3.13.0.orig/arch/arm64/include/asm/pci.h +++ linux-3.13.0/arch/arm64/include/asm/pci.h @@ -0,0 +1,74 @@ +/* + * Based on arch/arm/include/asm/pci.h + */ +#ifndef ASMARM_PCI_H +#define ASMARM_PCI_H + +#ifdef __KERNEL__ +#include +#include + +#include /* for pci_sys_data */ + +extern unsigned long pcibios_min_io; +#define PCIBIOS_MIN_IO pcibios_min_io +extern unsigned long pcibios_min_mem; +#define PCIBIOS_MIN_MEM pcibios_min_mem + +static inline int pcibios_assign_all_busses(void) +{ + return pci_has_flag(PCI_REASSIGN_ALL_RSRC); +} + +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_domain_nr(struct pci_bus *bus) +{ + struct pci_sys_data *root = bus->sysdata; + + return root->domain; +} + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return pci_domain_nr(bus); +} +#endif /* CONFIG_PCI_DOMAINS */ + +static inline void pcibios_penalize_isa_irq(int irq, int active) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +/* + * The PCI address space does equal the physical memory address space. + * The networking and block device layers use this boolean for bounce + * buffer decisions. + */ +#define PCI_DMA_BUS_IS_PHYS (1) + +#ifdef CONFIG_PCI +static inline void pci_dma_burst_advice(struct pci_dev *pdev, + enum pci_dma_burst_strategy *strat, + unsigned long *strategy_parameter) +{ + *strat = PCI_DMA_BURST_INFINITY; + *strategy_parameter = ~0UL; +} +#endif + +#define HAVE_PCI_MMAP +extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, + int write_combine); + +/* + * Dummy implementation; always return 0. + */ +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return 0; +} + +#endif /* __KERNEL__ */ + +#endif --- linux-3.13.0.orig/arch/arm64/include/asm/pcibios.h +++ linux-3.13.0/arch/arm64/include/asm/pcibios.h @@ -0,0 +1,98 @@ +/* + * Based on arch/arm/include/asm/mach/pci.h + * + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_PCIBIOS_H +#define __ASM_PCIBIOS_H + +#include + +struct pci_sys_data; +struct pci_ops; +struct pci_bus; +struct device; + +struct hw_pci { +#ifdef CONFIG_PCI_DOMAINS + int domain; +#endif + struct pci_ops *ops; + int nr_controllers; + void **private_data; + int (*setup) (int nr, struct pci_sys_data *); + struct pci_bus *(*scan) (int nr, struct pci_sys_data *); + void (*preinit) (void); + void (*postinit) (void); + u8(*swizzle) (struct pci_dev *dev, u8 *pin); + int (*map_irq) (const struct pci_dev *dev, u8 slot, u8 pin); + resource_size_t(*align_resource) (struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); + void (*add_bus) (struct pci_bus *bus); + void (*remove_bus) (struct pci_bus *bus); +}; + +/* + * Per-controller structure + */ +struct pci_sys_data { +#ifdef CONFIG_PCI_DOMAINS + int domain; +#endif + struct list_head node; + int busnr; /* primary bus number */ + u64 mem_offset; /* bus->cpu memory mapping offset */ + unsigned long io_offset; /* bus->cpu IO mapping offset */ + struct pci_bus *bus; /* PCI bus */ + struct list_head resources; /* root bus resources (apertures) */ + struct resource io_res; + char io_res_name[12]; + /* Bridge swizzling */ + u8(*swizzle) (struct pci_dev *, u8 *); + /* IRQ mapping */ + int (*map_irq) (const struct pci_dev *, u8, u8); + /* Resource alignement requirements */ + resource_size_t(*align_resource) (struct pci_dev *dev, + const struct resource *res, + resource_size_t start, + resource_size_t size, + resource_size_t align); + void (*add_bus) (struct pci_bus *bus); + void (*remove_bus) (struct pci_bus *bus); + void *private_data; /* platform controller private data */ +}; + +/* + * Call this with your hw_pci struct to initialise the PCI system. + */ +void pci_common_init_dev(struct device *, struct hw_pci *); + +/* + * Compatibility wrapper for older platforms that do not care about + * passing the parent device. + */ +static inline void pci_common_init(struct hw_pci *hw) +{ + pci_common_init_dev(NULL, hw); +} + +/* + * Setup early fixed I/O mapping. + */ +#if defined(CONFIG_PCI) +extern void pci_map_io_early(unsigned long pfn); +#else +static inline void pci_map_io_early(unsigned long pfn) +{ +} +#endif + +#endif /* __ASM_PCIBIOS_H */ --- linux-3.13.0.orig/arch/arm64/include/asm/pgtable.h +++ linux-3.13.0/arch/arm64/include/asm/pgtable.h @@ -136,10 +136,10 @@ /* * The following only work if pte_present(). Undefined behaviour otherwise. */ -#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) -#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & PTE_AF) -#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) +#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) +#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) +#define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) +#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) @@ -168,7 +168,7 @@ pte_t *ptep, pte_t pte) { if (pte_valid_user(pte)) { - if (pte_exec(pte)) + if (!pte_special(pte) && pte_exec(pte)) __sync_icache_dcache(pte, addr); if (!pte_dirty(pte)) pte = pte_wrprotect(pte); @@ -253,11 +253,11 @@ * Mark the prot value as uncacheable and unbufferable. */ #define pgprot_noncached(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE)) + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) #define pgprot_writecombine(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) #define pgprot_dmacoherent(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)) + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, --- linux-3.13.0.orig/arch/arm64/include/uapi/asm/kvm.h +++ linux-3.13.0/arch/arm64/include/uapi/asm/kvm.h @@ -31,6 +31,7 @@ #define KVM_NR_SPSR 5 #ifndef __ASSEMBLY__ +#include #include #include @@ -55,8 +56,10 @@ #define KVM_ARM_TARGET_AEM_V8 0 #define KVM_ARM_TARGET_FOUNDATION_V8 1 #define KVM_ARM_TARGET_CORTEX_A57 2 +#define KVM_ARM_TARGET_XGENE_POTENZA 3 +#define KVM_ARM_TARGET_CORTEX_A53 4 -#define KVM_ARM_NUM_TARGETS 3 +#define KVM_ARM_NUM_TARGETS 5 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ #define KVM_ARM_DEVICE_TYPE_SHIFT 0 @@ -76,6 +79,7 @@ #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ +#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ struct kvm_vcpu_init { __u32 target; @@ -158,10 +162,10 @@ #define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) #define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) -#define KVM_PSCI_RET_SUCCESS 0 -#define KVM_PSCI_RET_NI ((unsigned long)-1) -#define KVM_PSCI_RET_INVAL ((unsigned long)-2) -#define KVM_PSCI_RET_DENIED ((unsigned long)-3) +#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS +#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED +#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS +#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED #endif --- linux-3.13.0.orig/arch/arm64/include/uapi/asm/posix_types.h +++ linux-3.13.0/arch/arm64/include/uapi/asm/posix_types.h @@ -0,0 +1,10 @@ +#ifndef __ASM_POSIX_TYPES_H +#define __ASM_POSIX_TYPES_H + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +#define __kernel_old_uid_t __kernel_old_uid_t + +#include + +#endif /* __ASM_POSIX_TYPES_H */ --- linux-3.13.0.orig/arch/arm64/kernel/Makefile +++ linux-3.13.0/arch/arm64/kernel/Makefile @@ -4,6 +4,8 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) +CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) \ + -I$(src)/../../../scripts/dtc/libfdt # Object file lists. arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ @@ -18,7 +20,10 @@ arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o +obj-$(CONFIG_PCI) += pcibios.o +obj-$(CONFIG_PCI_MSI) += msi_bitmap.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) head-y := head.o --- linux-3.13.0.orig/arch/arm64/kernel/early_printk.c +++ linux-3.13.0/arch/arm64/kernel/early_printk.c @@ -26,6 +26,8 @@ #include #include +#include + static void __iomem *early_base; static void (*printch)(char ch); @@ -142,7 +144,7 @@ /* no options parsing yet */ if (paddr) - early_base = early_io_map(paddr, EARLYCON_IOBASE); + early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr); printch = match->printch; early_console = &early_console_dev; --- linux-3.13.0.orig/arch/arm64/kernel/efi-entry.S +++ linux-3.13.0/arch/arm64/kernel/efi-entry.S @@ -0,0 +1,109 @@ +/* + * EFI entry point. + * + * Copyright (C) 2013, 2014 Red Hat, Inc. + * Author: Mark Salter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include + +#include + +#define EFI_LOAD_ERROR 0x8000000000000001 + + __INIT + + /* + * We arrive here from the EFI boot manager with: + * + * * CPU in little-endian mode + * * MMU on with identity-mapped RAM + * * Icache and Dcache on + * + * We will most likely be running from some place other than where + * we want to be. The kernel image wants to be placed at TEXT_OFFSET + * from start of RAM. + */ +ENTRY(efi_stub_entry) + /* + * Create a stack frame to save FP/LR with extra space + * for image_addr variable passed to efi_entry(). + */ + stp x29, x30, [sp, #-32]! + + /* + * Call efi_entry to do the real work. + * x0 and x1 are already set up by firmware. Current runtime + * address of image is calculated and passed via *image_addr. + * + * unsigned long efi_entry(void *handle, + * efi_system_table_t *sys_table, + * unsigned long *image_addr) ; + */ + adrp x8, _text + add x8, x8, #:lo12:_text + add x2, sp, 16 + str x8, [x2] + bl efi_entry + cmn x0, #1 + b.eq efi_load_fail + + /* + * efi_entry() will have relocated the kernel image if necessary + * and we return here with device tree address in x0 and the kernel + * entry point stored at *image_addr. Save those values in registers + * which are callee preserved. + */ + mov x20, x0 // DTB address + ldr x0, [sp, #16] // relocated _text address + mov x21, x0 + + /* + * Flush dcache covering current runtime addresses + * of kernel text/data. Then flush all of icache. + */ + adrp x1, _text + add x1, x1, #:lo12:_text + adrp x2, _edata + add x2, x2, #:lo12:_edata + sub x1, x2, x1 + + bl __flush_dcache_area + ic ialluis + + /* Turn off Dcache and MMU */ + mrs x0, CurrentEL + cmp x0, #PSR_MODE_EL2t + ccmp x0, #PSR_MODE_EL2h, #0x4, ne + b.ne 1f + mrs x0, sctlr_el2 + bic x0, x0, #1 << 0 // clear SCTLR.M + bic x0, x0, #1 << 2 // clear SCTLR.C + msr sctlr_el2, x0 + isb + b 2f +1: + mrs x0, sctlr_el1 + bic x0, x0, #1 << 0 // clear SCTLR.M + bic x0, x0, #1 << 2 // clear SCTLR.C + msr sctlr_el1, x0 + isb +2: + /* Jump to kernel entry point */ + mov x0, x20 + mov x1, xzr + mov x2, xzr + mov x3, xzr + br x21 + +efi_load_fail: + mov x0, #EFI_LOAD_ERROR + ldp x29, x30, [sp], #32 + ret + +ENDPROC(efi_stub_entry) --- linux-3.13.0.orig/arch/arm64/kernel/efi-stub.c +++ linux-3.13.0/arch/arm64/kernel/efi-stub.c @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2013, 2014 Linaro Ltd; + * + * This file implements the EFI boot stub for the arm64 kernel. + * Adapted from ARM version by Mark Salter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include +#include +#include + +/* + * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from + * start of kernel and may not cross a 2MiB boundary. We set alignment to + * 2MiB so we know it won't cross a 2MiB boundary. + */ +#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ +#define MAX_FDT_OFFSET SZ_512M + +#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) + +#define efi_call_phys1(f, arg1) f(arg1) +#define efi_call_phys2(f, arg1, arg2) f(arg1, arg2) +#define efi_call_phys3(f, arg1, arg2, arg3) f(arg1, arg2, arg3) +#define efi_call_phys4(f, arg1, arg2, arg3, arg4) f(arg1, arg2, arg3, arg4) +#define efi_call_phys5(f, arg1, arg2, arg3, arg4, arg5) f(arg1, arg2, arg3, arg4, arg5) + +static void efi_char16_printk(efi_system_table_t *sys_table_arg, + efi_char16_t *str); + +static efi_status_t efi_open_volume(efi_system_table_t *sys_table, + void *__image, void **__fh); +static efi_status_t efi_file_close(void *handle); + +static efi_status_t +efi_file_read(void *handle, unsigned long *size, void *addr); + +static efi_status_t +efi_file_size(efi_system_table_t *sys_table, void *__fh, + efi_char16_t *filename_16, void **handle, u64 *file_sz); + +/* Include shared EFI stub code */ +#include "../../../drivers/firmware/efi/efi-stub-helper.c" +#include "../../../drivers/firmware/efi/fdt.c" +#include "../../../drivers/firmware/efi/arm-stub.c" + + +static efi_status_t handle_kernel_image(efi_system_table_t *sys_table, + unsigned long *image_addr, + unsigned long *image_size, + unsigned long *reserve_addr, + unsigned long *reserve_size, + unsigned long dram_base, + efi_loaded_image_t *image) +{ + efi_status_t status; + unsigned long kernel_size, kernel_memsize = 0; + + /* Relocate the image, if required. */ + kernel_size = _edata - _text; + if (*image_addr != (dram_base + TEXT_OFFSET)) { + kernel_memsize = kernel_size + (_end - _edata); + status = efi_relocate_kernel(sys_table, image_addr, + kernel_size, kernel_memsize, + dram_base + TEXT_OFFSET, + PAGE_SIZE); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table, "Failed to relocate kernel\n"); + return status; + } + if (*image_addr != (dram_base + TEXT_OFFSET)) { + pr_efi_err(sys_table, "Failed to alloc kernel memory\n"); + efi_free(sys_table, kernel_memsize, *image_addr); + return EFI_ERROR; + } + *image_size = kernel_memsize; + } + + + return EFI_SUCCESS; +} --- linux-3.13.0.orig/arch/arm64/kernel/efi.c +++ linux-3.13.0/arch/arm64/kernel/efi.c @@ -0,0 +1,478 @@ +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013, 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +struct efi_memory_map memmap; + +static efi_runtime_services_t *runtime; + +static u64 efi_system_table; + +static int uefi_debug __initdata; + +unsigned long arm64_efi_facility; + +static int __init uefi_debug_setup(char *str) +{ + uefi_debug = 1; + + return 0; +} +early_param("uefi_debug", uefi_debug_setup); + +static int __init is_normal_ram(efi_memory_desc_t *md) +{ + if (md->attribute & EFI_MEMORY_WB) + return 1; + return 0; +} + +static void __init efi_setup_idmap(void) +{ + struct memblock_region *r; + efi_memory_desc_t *md; + u64 paddr, npages, size; + + for_each_memblock(memory, r) + create_id_mapping(r->base, r->size, 0); + + /* map runtime io spaces */ + for_each_efi_memory_desc(&memmap, md) { + if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md)) + continue; + paddr = md->phys_addr; + npages = md->num_pages; + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + create_id_mapping(paddr, size, 1); + } +} + +static int __init uefi_init(void) +{ + efi_char16_t *c16; + char vendor[100] = "unknown"; + int i, retval; + + efi.systab = early_memremap(efi_system_table, + sizeof(efi_system_table_t)); + if (efi.systab == NULL) { + pr_warn("Unable to map EFI system table.\n"); + return -ENOMEM; + } + + set_bit(EFI_BOOT, &arm64_efi_facility); + set_bit(EFI_64BIT, &arm64_efi_facility); + + /* + * Verify the EFI Table + */ + if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { + pr_err("System table signature incorrect\n"); + return -EINVAL; + } + if ((efi.systab->hdr.revision >> 16) < 2) + pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", + efi.systab->hdr.revision >> 16, + efi.systab->hdr.revision & 0xffff); + + /* Show what we know for posterity */ + c16 = early_memremap(efi.systab->fw_vendor, + sizeof(vendor)); + if (c16) { + for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) + vendor[i] = c16[i]; + vendor[i] = '\0'; + } + + pr_info("EFI v%u.%.02u by %s\n", + efi.systab->hdr.revision >> 16, + efi.systab->hdr.revision & 0xffff, vendor); + + retval = efi_config_init(NULL); + if (retval == 0) + set_bit(EFI_CONFIG_TABLES, &arm64_efi_facility); + + early_memunmap(c16, sizeof(vendor)); + early_memunmap(efi.systab, sizeof(efi_system_table_t)); + + return retval; +} + +static __initdata char memory_type_name[][32] = { + {"Reserved"}, + {"Loader Code"}, + {"Loader Data"}, + {"Boot Code"}, + {"Boot Data"}, + {"Runtime Code"}, + {"Runtime Data"}, + {"Conventional Memory"}, + {"Unusable Memory"}, + {"ACPI Reclaim Memory"}, + {"ACPI Memory NVS"}, + {"Memory Mapped I/O"}, + {"MMIO Port Space"}, + {"PAL Code"}, +}; + +/* + * Return true for RAM regions we want to permanently reserve. + */ +static __init int is_reserve_region(efi_memory_desc_t *md) +{ + if (!is_normal_ram(md)) + return 0; + + if (md->attribute & EFI_MEMORY_RUNTIME) + return 1; + + if (md->type == EFI_ACPI_RECLAIM_MEMORY || + md->type == EFI_RESERVED_TYPE) + return 1; + + return 0; +} + +static __init void reserve_regions(void) +{ + efi_memory_desc_t *md; + u64 paddr, npages, size; + + if (uefi_debug) + pr_info("Processing EFI memory map:\n"); + + for_each_efi_memory_desc(&memmap, md) { + paddr = md->phys_addr; + npages = md->num_pages; + + if (uefi_debug) + pr_info(" 0x%012llx-0x%012llx [%s]", + paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, + memory_type_name[md->type]); + + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (is_normal_ram(md)) + early_init_dt_add_memory_arch(paddr, size); + + if (is_reserve_region(md) || + md->type == EFI_BOOT_SERVICES_CODE || + md->type == EFI_BOOT_SERVICES_DATA) { + memblock_reserve(paddr, size); + if (uefi_debug) + pr_cont("*"); + } + + if (uefi_debug) + pr_cont("\n"); + } +} + + +static u64 __init free_one_region(u64 start, u64 end) +{ + u64 size = end - start; + + if (uefi_debug) + pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1); + + free_bootmem_late(start, size); + return size; +} + +static u64 __init free_region(u64 start, u64 end) +{ + u64 map_start, map_end, total = 0; + + if (end <= start) + return total; + + map_start = (u64)memmap.phys_map; + map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map)); + map_start &= PAGE_MASK; + + if (start < map_end && end > map_start) { + /* region overlaps UEFI memmap */ + if (start < map_start) + total += free_one_region(start, map_start); + + if (map_end < end) + total += free_one_region(map_end, end); + } else + total += free_one_region(start, end); + + return total; +} + +static void __init free_boot_services(void) +{ + u64 total_freed = 0; + u64 keep_end, free_start, free_end; + efi_memory_desc_t *md; + + /* + * If kernel uses larger pages than UEFI, we have to be careful + * not to inadvertantly free memory we want to keep if there is + * overlap at the kernel page size alignment. We do not want to + * free is_reserve_region() memory nor the UEFI memmap itself. + * + * The memory map is sorted, so we keep track of the end of + * any previous region we want to keep, remember any region + * we want to free and defer freeing it until we encounter + * the next region we want to keep. This way, before freeing + * it, we can clip it as needed to avoid freeing memory we + * want to keep for UEFI. + */ + + keep_end = 0; + free_start = 0; + + for_each_efi_memory_desc(&memmap, md) { + u64 paddr, npages, size; + + if (is_reserve_region(md)) { + /* + * We don't want to free any memory from this region. + */ + if (free_start) { + /* adjust free_end then free region */ + if (free_end > md->phys_addr) + free_end -= PAGE_SIZE; + total_freed += free_region(free_start, free_end); + free_start = 0; + } + keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); + continue; + } + + if (md->type != EFI_BOOT_SERVICES_CODE && + md->type != EFI_BOOT_SERVICES_DATA) { + /* no need to free this region */ + continue; + } + + /* + * We want to free memory from this region. + */ + paddr = md->phys_addr; + npages = md->num_pages; + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (free_start) { + if (paddr <= free_end) + free_end = paddr + size; + else { + total_freed += free_region(free_start, free_end); + free_start = paddr; + free_end = paddr + size; + } + } else { + free_start = paddr; + free_end = paddr + size; + } + if (free_start < keep_end) { + free_start += PAGE_SIZE; + if (free_start >= free_end) + free_start = 0; + } + } + if (free_start) + total_freed += free_region(free_start, free_end); + + if (total_freed) + pr_info("Freed 0x%llx bytes of EFI boot services memory", + total_freed); +} + +void __init efi_init(void) +{ + struct efi_fdt_params params; + + /* Grab UEFI information placed in FDT by stub */ + if (!efi_get_fdt_params(¶ms, uefi_debug)) + return; + + efi_system_table = params.system_table; + + memblock_reserve(params.mmap & PAGE_MASK, + PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); + memmap.phys_map = (void *)params.mmap; + memmap.map = early_memremap(params.mmap, params.mmap_size); + memmap.map_end = memmap.map + params.mmap_size; + memmap.desc_size = params.desc_size; + memmap.desc_version = params.desc_ver; + + if (uefi_init() < 0) + return; + + reserve_regions(); +} + +int efi_enabled(int facility) +{ + return test_bit(facility, &arm64_efi_facility) != 0; +} +EXPORT_SYMBOL(efi_enabled); + +void __init efi_idmap_init(void) +{ + if (!efi_enabled(EFI_BOOT)) + return; + + /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ + efi_setup_idmap(); +} + +static int __init remap_region(efi_memory_desc_t *md, void **new) +{ + u64 paddr, vaddr, npages, size; + + paddr = md->phys_addr; + npages = md->num_pages; + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (is_normal_ram(md)) + vaddr = (__force u64)ioremap_cache(paddr, size); + else + vaddr = (__force u64)ioremap(paddr, size); + + if (!vaddr) { + pr_err("Unable to remap 0x%llx pages @ %p\n", + npages, (void *)paddr); + return 0; + } + + /* adjust for any rounding when EFI and system pagesize differs */ + md->virt_addr = vaddr + (md->phys_addr - paddr); + + if (uefi_debug) + pr_info(" EFI remap 0x%012llx => %p\n", + md->phys_addr, (void *)md->virt_addr); + + memcpy(*new, md, memmap.desc_size); + *new += memmap.desc_size; + + return 1; +} + +/* + * Switch UEFI from an identity map to a kernel virtual map + */ +static int __init arm64_enter_virtual_mode(void) +{ + efi_memory_desc_t *md; + phys_addr_t virtmap_phys; + void *virtmap, *virt_md; + efi_status_t status; + u64 mapsize; + int count = 0; + unsigned long flags; + + if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return -1; + } + + pr_info("Remapping and enabling EFI services.\n"); + + /* replace early memmap mapping with permanent mapping */ + mapsize = memmap.map_end - memmap.map; + early_memunmap(memmap.map, mapsize); + memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, + mapsize); + memmap.map_end = memmap.map + mapsize; + + efi.memmap = &memmap; + + /* Map the runtime regions */ + virtmap = kmalloc(mapsize, GFP_KERNEL); + if (!virtmap) { + pr_err("Failed to allocate EFI virtual memmap\n"); + return -1; + } + virtmap_phys = virt_to_phys(virtmap); + virt_md = virtmap; + + for_each_efi_memory_desc(&memmap, md) { + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + if (remap_region(md, &virt_md)) + ++count; + } + + efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); + if (efi.systab) + set_bit(EFI_SYSTEM_TABLES, &arm64_efi_facility); + + local_irq_save(flags); + cpu_switch_mm(idmap_pg_dir, &init_mm); + + /* Call SetVirtualAddressMap with the physical address of the map */ + runtime = efi.systab->runtime; + efi.set_virtual_address_map = runtime->set_virtual_address_map; + + status = efi.set_virtual_address_map(count * memmap.desc_size, + memmap.desc_size, + memmap.desc_version, + (efi_memory_desc_t *)virtmap_phys); + cpu_set_reserved_ttbr0(); + flush_tlb_all(); + local_irq_restore(flags); + + kfree(virtmap); + + free_boot_services(); + + if (status != EFI_SUCCESS) { + pr_err("Failed to set EFI virtual address map! [%lx]\n", + status); + return -1; + } + + /* Set up runtime services function pointers */ + runtime = efi.systab->runtime; + efi.get_time = runtime->get_time; + efi.set_time = runtime->set_time; + efi.get_wakeup_time = runtime->get_wakeup_time; + efi.set_wakeup_time = runtime->set_wakeup_time; + efi.get_variable = runtime->get_variable; + efi.get_next_variable = runtime->get_next_variable; + efi.set_variable = runtime->set_variable; + efi.query_variable_info = runtime->query_variable_info; + efi.update_capsule = runtime->update_capsule; + efi.query_capsule_caps = runtime->query_capsule_caps; + efi.get_next_high_mono_count = runtime->get_next_high_mono_count; + efi.reset_system = runtime->reset_system; + + set_bit(EFI_RUNTIME_SERVICES, &arm64_efi_facility); + + return 0; +} +early_initcall(arm64_enter_virtual_mode); --- linux-3.13.0.orig/arch/arm64/kernel/entry.S +++ linux-3.13.0/arch/arm64/kernel/entry.S @@ -275,7 +275,6 @@ * Stack or PC alignment exception handling */ mrs x0, far_el1 - mov x1, x25 mov x2, sp b do_sp_pc_abort el1_undef: --- linux-3.13.0.orig/arch/arm64/kernel/head.S +++ linux-3.13.0/arch/arm64/kernel/head.S @@ -107,8 +107,18 @@ /* * DO NOT MODIFY. Image header expected by Linux boot-loaders. */ +#ifdef CONFIG_EFI +efi_head: + /* + * This add instruction has no meaningful effect except that + * its opcode forms the magic "MZ" signature required by UEFI. + */ + add x13, x18, #0x16 + b stext +#else b stext // branch to kernel start, magic .long 0 // reserved +#endif .quad TEXT_OFFSET // Image load offset from start of RAM .quad 0 // reserved .quad 0 // reserved @@ -119,7 +129,109 @@ .byte 0x52 .byte 0x4d .byte 0x64 +#ifdef CONFIG_EFI + .long pe_header - efi_head // Offset to the PE header. +#else .word 0 // reserved +#endif + +#ifdef CONFIG_EFI + .align 3 +pe_header: + .ascii "PE" + .short 0 +coff_header: + .short 0xaa64 // AArch64 + .short 2 // nr_sections + .long 0 // TimeDateStamp + .long 0 // PointerToSymbolTable + .long 1 // NumberOfSymbols + .short section_table - optional_header // SizeOfOptionalHeader + .short 0x206 // Characteristics. + // IMAGE_FILE_DEBUG_STRIPPED | + // IMAGE_FILE_EXECUTABLE_IMAGE | + // IMAGE_FILE_LINE_NUMS_STRIPPED +optional_header: + .short 0x20b // PE32+ format + .byte 0x02 // MajorLinkerVersion + .byte 0x14 // MinorLinkerVersion + .long _edata - stext // SizeOfCode + .long 0 // SizeOfInitializedData + .long 0 // SizeOfUninitializedData + .long efi_stub_entry - efi_head // AddressOfEntryPoint + .long stext - efi_head // BaseOfCode + +extra_header_fields: + .quad 0 // ImageBase + .long 0x20 // SectionAlignment + .long 0x8 // FileAlignment + .short 0 // MajorOperatingSystemVersion + .short 0 // MinorOperatingSystemVersion + .short 0 // MajorImageVersion + .short 0 // MinorImageVersion + .short 0 // MajorSubsystemVersion + .short 0 // MinorSubsystemVersion + .long 0 // Win32VersionValue + + .long _edata - efi_head // SizeOfImage + + // Everything before the kernel image is considered part of the header + .long stext - efi_head // SizeOfHeaders + .long 0 // CheckSum + .short 0xa // Subsystem (EFI application) + .short 0 // DllCharacteristics + .quad 0 // SizeOfStackReserve + .quad 0 // SizeOfStackCommit + .quad 0 // SizeOfHeapReserve + .quad 0 // SizeOfHeapCommit + .long 0 // LoaderFlags + .long 0x6 // NumberOfRvaAndSizes + + .quad 0 // ExportTable + .quad 0 // ImportTable + .quad 0 // ResourceTable + .quad 0 // ExceptionTable + .quad 0 // CertificationTable + .quad 0 // BaseRelocationTable + + // Section table +section_table: + + /* + * The EFI application loader requires a relocation section + * because EFI applications must be relocatable. This is a + * dummy section as far as we are concerned. + */ + .ascii ".reloc" + .byte 0 + .byte 0 // end of 0 padding of section name + .long 0 + .long 0 + .long 0 // SizeOfRawData + .long 0 // PointerToRawData + .long 0 // PointerToRelocations + .long 0 // PointerToLineNumbers + .short 0 // NumberOfRelocations + .short 0 // NumberOfLineNumbers + .long 0x42100040 // Characteristics (section flags) + + + .ascii ".text" + .byte 0 + .byte 0 + .byte 0 // end of 0 padding of section name + .long _edata - stext // VirtualSize + .long stext - efi_head // VirtualAddress + .long _edata - stext // SizeOfRawData + .long stext - efi_head // PointerToRawData + + .long 0 // PointerToRelocations (0 for executables) + .long 0 // PointerToLineNumbers (0 for executables) + .short 0 // NumberOfRelocations (0 for executables) + .short 0 // NumberOfLineNumbers (0 for executables) + .long 0xe0500020 // Characteristics (section flags) + .align 5 +#endif ENTRY(stext) mov x21, x0 // x21=FDT @@ -412,7 +524,7 @@ * - identity mapping to enable the MMU (low address, TTBR0) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled, including the FDT blob (TTBR1) - * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1) + * - pgd entry for fixed mappings (TTBR1) */ __create_page_tables: pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses @@ -465,15 +577,12 @@ sub x6, x6, #1 // inclusive range create_block_map x0, x7, x3, x5, x6 1: -#ifdef CONFIG_EARLY_PRINTK /* - * Create the pgd entry for the UART mapping. The full mapping is done - * later based earlyprintk kernel parameter. + * Create the pgd entry for the fixed mappings. */ - ldr x5, =EARLYCON_IOBASE // UART virtual address + ldr x5, =FIXADDR_TOP // Fixed mapping virtual address add x0, x26, #2 * PAGE_SIZE // section table address create_pgd_entry x26, x0, x5, x6, x7 -#endif ret ENDPROC(__create_page_tables) .ltorg --- linux-3.13.0.orig/arch/arm64/kernel/irq.c +++ linux-3.13.0/arch/arm64/kernel/irq.c @@ -105,7 +105,7 @@ c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; --- linux-3.13.0.orig/arch/arm64/kernel/kuser32.S +++ linux-3.13.0/arch/arm64/kernel/kuser32.S @@ -38,12 +38,13 @@ .inst 0xe92d00f0 // push {r4, r5, r6, r7} .inst 0xe1c040d0 // ldrd r4, r5, [r0] .inst 0xe1c160d0 // ldrd r6, r7, [r1] - .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] + .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] .inst 0xe0303004 // eors r3, r0, r4 .inst 0x00313005 // eoreqs r3, r1, r5 .inst 0x01a23e96 // stlexdeq r3, r6, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffff9 // beq 1b + .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} .inst 0xe12fff1e // bx lr @@ -55,11 +56,12 @@ .align 5 __kuser_cmpxchg: // 0xffff0fc0 - .inst 0xe1923e9f // 1: ldaex r3, [r2] + .inst 0xe1923f9f // 1: ldrex r3, [r2] .inst 0xe0533000 // subs r3, r3, r0 .inst 0x01823e91 // stlexeq r3, r1, [r2] .inst 0x03330001 // teqeq r3, #1 .inst 0x0afffffa // beq 1b + .inst 0xf57ff05b // dmb ish .inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe12fff1e // bx lr --- linux-3.13.0.orig/arch/arm64/kernel/msi_bitmap.c +++ linux-3.13.0/arch/arm64/kernel/msi_bitmap.c @@ -0,0 +1,134 @@ +/* + * Copyright 2006-2008, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 of the + * License. + * + * Borrowed from powerpc arch + */ + +#include +#include +#include +#include +#include + +int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num) +{ + unsigned long flags; + int offset, order = get_count_order(num); + + spin_lock_irqsave(&bmp->lock, flags); + /* + * This is fast, but stricter than we need. We might want to add + * a fallback routine which does a linear search with no alignment. + */ + offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order); + spin_unlock_irqrestore(&bmp->lock, flags); + + pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n", + num, order, offset); + + return offset; +} + +void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset, + unsigned int num) +{ + unsigned long flags; + int order = get_count_order(num); + + pr_debug("msi_bitmap: freeing 0x%x (2^%d) at offset 0x%x\n", + num, order, offset); + + spin_lock_irqsave(&bmp->lock, flags); + bitmap_release_region(bmp->bitmap, offset, order); + spin_unlock_irqrestore(&bmp->lock, flags); +} + +void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq) +{ + unsigned long flags; + + pr_debug("msi_bitmap: reserving hwirq 0x%x\n", hwirq); + + spin_lock_irqsave(&bmp->lock, flags); + bitmap_allocate_region(bmp->bitmap, hwirq, 0); + spin_unlock_irqrestore(&bmp->lock, flags); +} + +/** + * msi_bitmap_reserve_dt_hwirqs - Reserve irqs specified in the device tree. + * @bmp: pointer to the MSI bitmap. + * + * Looks in the device tree to see if there is a property specifying which + * irqs can be used for MSI. If found those irqs reserved in the device tree + * are reserved in the bitmap. + * + * Returns 0 for success, < 0 if there was an error, and > 0 if no property + * was found in the device tree. + **/ +int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp) +{ + int i, j, len = 2; + u32 array[2]; + u32 *p = array; + int ret; + + if (!bmp->of_node) + return 1; + + ret = of_property_read_u32_array(bmp->of_node, + "msi-available-ranges", p, len); + if (ret) + return ret; + + bitmap_allocate_region(bmp->bitmap, 0, get_count_order(bmp->irq_count)); + + spin_lock(&bmp->lock); + + /* Format is: ( )+ */ + len /= 2 * sizeof(u32); + for (i = 0; i < len; i++, p += 2) { + for (j = 0; j < *(p + 1); j++) + bitmap_release_region(bmp->bitmap, *p + j, 0); + } + + spin_unlock(&bmp->lock); + + return 0; +} + +int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, + struct device_node *of_node) +{ + int size; + + if (!irq_count) + return -EINVAL; + + size = BITS_TO_LONGS(irq_count) * sizeof(long); + pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size); + + bmp->bitmap = kzalloc(size, GFP_KERNEL); + if (!bmp->bitmap) { + pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n"); + return -ENOMEM; + } + + /* We zalloc'ed the bitmap, so all irqs are free by default */ + spin_lock_init(&bmp->lock); + bmp->of_node = of_node_get(of_node); + bmp->irq_count = irq_count; + + return 0; +} + +void msi_bitmap_free(struct msi_bitmap *bmp) +{ + /* we can't free the bitmap we don't know if it's bootmem etc. */ + of_node_put(bmp->of_node); + bmp->bitmap = NULL; +} --- linux-3.13.0.orig/arch/arm64/kernel/pcibios.c +++ linux-3.13.0/arch/arm64/kernel/pcibios.c @@ -0,0 +1,689 @@ +/* + * Based on linux/arch/arm/kernel/bios32.c + * + * PCI bios-type initialisation for PCI machines + * + * Bits taken from various places. + */ +#include +#include +#include +#include +#include +#include +#include + +static int debug_pci; + +/* + * We can't use pci_find_device() here since we are + * called from interrupt context. + */ +static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, + int warn) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + u16 status; + + /* + * ignore host bridge - we handle + * that separately + */ + if (dev->bus->number == 0 && dev->devfn == 0) + continue; + + pci_read_config_word(dev, PCI_STATUS, &status); + if (status == 0xffff) + continue; + + if ((status & status_mask) == 0) + continue; + + /* clear the status errors */ + pci_write_config_word(dev, PCI_STATUS, status & status_mask); + + if (warn) + printk("(%s: %04X) ", pci_name(dev), status); + } + + list_for_each_entry(dev, &bus->devices, bus_list) + if (dev->subordinate) + pcibios_bus_report_status(dev->subordinate, status_mask, warn); +} + +void pcibios_report_status(u_int status_mask, int warn) +{ + struct list_head *l; + + list_for_each(l, &pci_root_buses) { + struct pci_bus *bus = pci_bus_b(l); + + pcibios_bus_report_status(bus, status_mask, warn); + } +} + +/* + * We don't use this to fix the device, but initialisation of it. + * It's not the correct use for this, but it works. + * Note that the arbiter/ISA bridge appears to be buggy, specifically in + * the following area: + * 1. park on CPU + * 2. ISA bridge ping-pong + * 3. ISA bridge master handling of target RETRY + * + * Bug 3 is responsible for the sound DMA grinding to a halt. We now + * live with bug 2. + */ +static void pci_fixup_83c553(struct pci_dev *dev) +{ + /* + * Set memory region to start at address 0, and enable IO + */ + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, + PCI_BASE_ADDRESS_SPACE_MEMORY); + pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO); + + dev->resource[0].end -= dev->resource[0].start; + dev->resource[0].start = 0; + + /* + * All memory requests from ISA to be channelled to PCI + */ + pci_write_config_byte(dev, 0x48, 0xff); + + /* + * Enable ping-pong on bus master to ISA bridge transactions. + * This improves the sound DMA substantially. The fixed + * priority arbiter also helps (see below). + */ + pci_write_config_byte(dev, 0x42, 0x01); + + /* + * Enable PCI retry + */ + pci_write_config_byte(dev, 0x40, 0x22); + + /* + * We used to set the arbiter to "park on last master" (bit + * 1 set), but unfortunately the CyberPro does not park the + * bus. We must therefore park on CPU. Unfortunately, this + * may trigger yet another bug in the 553. + */ + pci_write_config_byte(dev, 0x83, 0x02); + + /* + * Make the ISA DMA request lowest priority, and disable + * rotating priorities completely. + */ + pci_write_config_byte(dev, 0x80, 0x11); + pci_write_config_byte(dev, 0x81, 0x00); + + /* + * Route INTA input to IRQ 11, and set IRQ11 to be level + * sensitive. + */ + pci_write_config_word(dev, 0x44, 0xb000); + outb(0x08, 0x4d1); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, + pci_fixup_83c553); + +static void pci_fixup_unassign(struct pci_dev *dev) +{ + dev->resource[0].end -= dev->resource[0].start; + dev->resource[0].start = 0; +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, + pci_fixup_unassign); + +/* + * Prevent the PCI layer from seeing the resources allocated to this device + * if it is the host bridge by marking it as such. These resources are of + * no consequence to the PCI layer (they are handled elsewhere). + */ +static void pci_fixup_dec21285(struct pci_dev *dev) +{ + int i; + + if (dev->devfn == 0) { + dev->class &= 0xff; + dev->class |= PCI_CLASS_BRIDGE_HOST << 8; + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, + pci_fixup_dec21285); + +/* + * PCI IDE controllers use non-standard I/O port decoding, respect it. + */ +static void pci_fixup_ide_bases(struct pci_dev *dev) +{ + struct resource *r; + int i; + + if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) + return; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + r = dev->resource + i; + if ((r->start & ~0x80) == 0x374) { + r->start |= 2; + r->end = r->start; + } + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); + +/* + * Put the DEC21142 to sleep + */ +static void pci_fixup_dec21142(struct pci_dev *dev) +{ + pci_write_config_dword(dev, 0x40, 0x80000000); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, + pci_fixup_dec21142); + +/* + * The CY82C693 needs some rather major fixups to ensure that it does + * the right thing. Idea from the Alpha people, with a few additions. + * + * We ensure that the IDE base registers are set to 1f0/3f4 for the + * primary bus, and 170/374 for the secondary bus. Also, hide them + * from the PCI subsystem view as well so we won't try to perform + * our own auto-configuration on them. + * + * In addition, we ensure that the PCI IDE interrupts are routed to + * IRQ 14 and IRQ 15 respectively. + * + * The above gets us to a point where the IDE on this device is + * functional. However, The CY82C693U _does not work_ in bus + * master mode without locking the PCI bus solid. + */ +static void pci_fixup_cy82c693(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + u32 base0, base1; + + if (dev->class & 0x80) { /* primary */ + base0 = 0x1f0; + base1 = 0x3f4; + } else { /* secondary */ + base0 = 0x170; + base1 = 0x374; + } + + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, + base0 | PCI_BASE_ADDRESS_SPACE_IO); + pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, + base1 | PCI_BASE_ADDRESS_SPACE_IO); + + dev->resource[0].start = 0; + dev->resource[0].end = 0; + dev->resource[0].flags = 0; + + dev->resource[1].start = 0; + dev->resource[1].end = 0; + dev->resource[1].flags = 0; + } else if (PCI_FUNC(dev->devfn) == 0) { + /* + * Setup IDE IRQ routing. + */ + pci_write_config_byte(dev, 0x4b, 14); + pci_write_config_byte(dev, 0x4c, 15); + + /* + * Disable FREQACK handshake, enable USB. + */ + pci_write_config_byte(dev, 0x4d, 0x41); + + /* + * Enable PCI retry, and PCI post-write buffer. + */ + pci_write_config_byte(dev, 0x44, 0x17); + + /* + * Enable ISA master and DMA post write buffering. + */ + pci_write_config_byte(dev, 0x45, 0x03); + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, + pci_fixup_cy82c693); + +static void pci_fixup_it8152(struct pci_dev *dev) +{ + int i; + /* fixup for ITE 8152 devices */ + /* FIXME: add defines for class 0x68000 and 0x80103 */ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST || + dev->class == 0x68000 || dev->class == 0x80103) { + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, + pci_fixup_it8152); + +/* + * If the bus contains any of these devices, then we must not turn on + * parity checking of any kind. Currently this is CyberPro 20x0 only. + */ +static inline int pdev_bad_for_parity(struct pci_dev *dev) +{ + return ((dev->vendor == PCI_VENDOR_ID_INTERG && + (dev->device == PCI_DEVICE_ID_INTERG_2000 || + dev->device == PCI_DEVICE_ID_INTERG_2010)) || + (dev->vendor == PCI_VENDOR_ID_ITE && + dev->device == PCI_DEVICE_ID_ITE_8152)); + +} + +/* + * pcibios_fixup_bus - Called after each bus is probed, + * but before its children are examined. + */ +void pcibios_fixup_bus(struct pci_bus *bus) +{ + struct pci_dev *dev; + u16 features = + PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK; + + /* + * Walk the devices on this bus, working out what we can + * and can't support. + */ + list_for_each_entry(dev, &bus->devices, bus_list) { + u16 status; + + pci_read_config_word(dev, PCI_STATUS, &status); + + /* + * If any device on this bus does not support fast back + * to back transfers, then the bus as a whole is not able + * to support them. Having fast back to back transfers + * on saves us one PCI cycle per transaction. + */ + if (!(status & PCI_STATUS_FAST_BACK)) + features &= ~PCI_COMMAND_FAST_BACK; + + if (pdev_bad_for_parity(dev)) + features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + + switch (dev->class >> 8) { + case PCI_CLASS_BRIDGE_PCI: + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status); + status |= + PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_MASTER_ABORT; + status &= + ~(PCI_BRIDGE_CTL_BUS_RESET | + PCI_BRIDGE_CTL_FAST_BACK); + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status); + break; + + case PCI_CLASS_BRIDGE_CARDBUS: + pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, + &status); + status |= + PCI_CB_BRIDGE_CTL_PARITY | + PCI_CB_BRIDGE_CTL_MASTER_ABORT; + pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, + status); + break; + } + } + + /* + * Now walk the devices again, this time setting them up. + */ + list_for_each_entry(dev, &bus->devices, bus_list) { + u16 cmd; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + cmd |= features; + pci_write_config_word(dev, PCI_COMMAND, cmd); + + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, + L1_CACHE_BYTES >> 2); + } + + /* + * Propagate the flags to the PCI bridge. + */ + if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) { + if (features & PCI_COMMAND_FAST_BACK) + bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK; + if (features & PCI_COMMAND_PARITY) + bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY; + } + + /* + * Report what we did for this bus + */ + printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n", + bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis"); +} +EXPORT_SYMBOL(pcibios_fixup_bus); + +void pcibios_add_bus(struct pci_bus *bus) +{ + struct pci_sys_data *sys = bus->sysdata; + if (sys->add_bus) + sys->add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + struct pci_sys_data *sys = bus->sysdata; + if (sys->remove_bus) + sys->remove_bus(bus); +} + +/* + * Swizzle the device pin each time we cross a bridge. If a platform does + * not provide a swizzle function, we perform the standard PCI swizzling. + * + * The default swizzling walks up the bus tree one level at a time, applying + * the standard swizzle function at each step, stopping when it finds the PCI + * root bus. This will return the slot number of the bridge device on the + * root bus and the interrupt pin on that device which should correspond + * with the downstream device interrupt. + * + * Platforms may override this, in which case the slot and pin returned + * depend entirely on the platform code. However, please note that the + * PCI standard swizzle is implemented on plug-in cards and Cardbus based + * PCI extenders, so it can not be ignored. + */ +static u8 pcibios_swizzle(struct pci_dev *dev, u8 *pin) +{ + struct pci_sys_data *sys = dev->sysdata; + int slot, oldpin = *pin; + + if (sys->swizzle) + slot = sys->swizzle(dev, pin); + else + slot = pci_common_swizzle(dev, pin); + + if (debug_pci) + printk("PCI: %s swizzling pin %d => pin %d slot %d\n", + pci_name(dev), oldpin, *pin, slot); + + return slot; +} + +/* + * Map a slot/pin to an IRQ. + */ +static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_sys_data *sys = dev->sysdata; + int irq = -1; + + if (sys->map_irq) + irq = sys->map_irq(dev, slot, pin); + + if (debug_pci) + printk("PCI: %s mapping slot %d pin %d => irq %d\n", + pci_name(dev), slot, pin, irq); + + return irq; +} + +static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) +{ + int ret; + struct pci_host_bridge_window *window; + + if (list_empty(&sys->resources)) { + pci_add_resource_offset(&sys->resources, + &iomem_resource, sys->mem_offset); + } + + list_for_each_entry(window, &sys->resources, list) { + if (resource_type(window->res) == IORESOURCE_IO) + return 0; + } + + sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io; + sys->io_res.end = (busnr + 1) * SZ_64K - 1; + sys->io_res.flags = IORESOURCE_IO; + sys->io_res.name = sys->io_res_name; + sprintf(sys->io_res_name, "PCI%d I/O", busnr); + + ret = request_resource(&ioport_resource, &sys->io_res); + if (ret) { + pr_err("PCI: unable to allocate I/O port region (%d)\n", ret); + return ret; + } + pci_add_resource_offset(&sys->resources, &sys->io_res, sys->io_offset); + + return 0; +} + +static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, + struct list_head *head) +{ + struct pci_sys_data *sys = NULL; + int ret; + int nr, busnr; + + for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { + sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL); + if (!sys) + panic("PCI: unable to allocate sys data!"); + +#ifdef CONFIG_PCI_DOMAINS + sys->domain = hw->domain; +#endif + sys->busnr = busnr; + sys->swizzle = hw->swizzle; + sys->map_irq = hw->map_irq; + sys->align_resource = hw->align_resource; + sys->add_bus = hw->add_bus; + sys->remove_bus = hw->remove_bus; + INIT_LIST_HEAD(&sys->resources); + + if (hw->private_data) + sys->private_data = hw->private_data[nr]; + + ret = hw->setup(nr, sys); + + if (ret > 0) { + ret = pcibios_init_resources(nr, sys); + if (ret) { + kfree(sys); + break; + } + + if (hw->scan) + sys->bus = hw->scan(nr, sys); + else + sys->bus = pci_scan_root_bus(parent, sys->busnr, + hw->ops, sys, + &sys->resources); + + if (!sys->bus) + panic("PCI: unable to scan bus!"); + + busnr = sys->bus->busn_res.end + 1; + + list_add(&sys->node, head); + } else { + kfree(sys); + if (ret < 0) + break; + } + } +} + +void pci_common_init_dev(struct device *parent, struct hw_pci *hw) +{ + struct pci_sys_data *sys; + LIST_HEAD(head); + + pci_add_flags(PCI_REASSIGN_ALL_RSRC); + if (hw->preinit) + hw->preinit(); + pcibios_init_hw(parent, hw, &head); + if (hw->postinit) + hw->postinit(); + + pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq); + + list_for_each_entry(sys, &head, node) { + struct pci_bus *bus = sys->bus; + + if (!pci_has_flag(PCI_PROBE_ONLY)) { + /* + * Size the bridge windows. + */ + pci_bus_size_bridges(bus); + + /* + * Assign resources. + */ + pci_bus_assign_resources(bus); + } + + /* + * Tell drivers about devices found. + */ + pci_bus_add_devices(bus); + } +} + +#ifndef CONFIG_PCI_HOST_ITE8152 +void pcibios_set_master(struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} +#endif + +char *__init pcibios_setup(char *str) +{ + if (!strcmp(str, "debug")) { + debug_pci = 1; + return NULL; + } else if (!strcmp(str, "firmware")) { + pci_add_flags(PCI_PROBE_ONLY); + return NULL; + } + return str; +} + +/* + * From arch/i386/kernel/pci-i386.c: + * + * We need to avoid collisions with `mirrored' VGA ports + * and other strange ISA hardware, so we always want the + * addresses to be allocated in the 0x000-0x0ff region + * modulo 0x400. + * + * Why? Because some silly external IO cards only decode + * the low 10 bits of the IO address. The 0x00-0xff region + * is reserved for motherboard devices that decode all 16 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff, + * but we want to try to avoid allocating at 0x2900-0x2bff + * which might be mirrored at 0x0100-0x03ff.. + */ +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, + resource_size_t align) +{ + struct pci_dev *dev = data; + struct pci_sys_data *sys = dev->sysdata; + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO && start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + + start = (start + align - 1) & ~(align - 1); + + if (sys->align_resource) + return sys->align_resource(dev, res, start, size, align); + + return start; +} + +/** + * pcibios_enable_device - Enable I/O and memory. + * @dev: PCI device to be enabled + */ +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + u16 cmd, old_cmd; + int idx; + struct resource *r; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + old_cmd = cmd; + for (idx = 0; idx < 6; idx++) { + /* Only set up the requested stuff */ + if (!(mask & (1 << idx))) + continue; + + r = dev->resource + idx; + if (!r->start && r->end) { + printk(KERN_ERR "PCI: Device %s not available because" + " of resource collisions\n", pci_name(dev)); + return -EINVAL; + } + if (r->flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (r->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + + /* + * Bridges (eg, cardbus bridges) need to be fully enabled + */ + if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) + cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY; + + if (cmd != old_cmd) { + printk("PCI: enabling device %s (%04x -> %04x)\n", + pci_name(dev), old_cmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + struct pci_sys_data *root = dev->sysdata; + unsigned long phys; + + if (mmap_state == pci_mmap_io) + return -EINVAL; + else + phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT); + + /* + * Mark this as IO + */ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (remap_pfn_range(vma, vma->vm_start, phys, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} --- linux-3.13.0.orig/arch/arm64/kernel/perf_event.c +++ linux-3.13.0/arch/arm64/kernel/perf_event.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -363,26 +364,53 @@ } static void +armpmu_disable_percpu_irq(void *data) +{ + unsigned int irq = *(unsigned int *)data; + disable_percpu_irq(irq); +} + +static void armpmu_release_hardware(struct arm_pmu *armpmu) { - int i, irq, irqs; + int irq; + unsigned int i, irqs; struct platform_device *pmu_device = armpmu->plat_device; irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (!irqs) + return; - for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) - continue; - irq = platform_get_irq(pmu_device, i); - if (irq >= 0) - free_irq(irq, armpmu); + irq = platform_get_irq(pmu_device, 0); + if (irq <= 0) + return; + + if (irq_is_percpu(irq)) { + on_each_cpu(armpmu_disable_percpu_irq, &irq, 1); + free_percpu_irq(irq, &cpu_hw_events); + } else { + for (i = 0; i < irqs; ++i) { + if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) + continue; + irq = platform_get_irq(pmu_device, i); + if (irq > 0) + free_irq(irq, armpmu); + } } } +static void +armpmu_enable_percpu_irq(void *data) +{ + unsigned int irq = *(unsigned int *)data; + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + static int armpmu_reserve_hardware(struct arm_pmu *armpmu) { - int i, err, irq, irqs; + int err, irq; + unsigned int i, irqs; struct platform_device *pmu_device = armpmu->plat_device; if (!pmu_device) { @@ -391,39 +419,59 @@ } irqs = min(pmu_device->num_resources, num_possible_cpus()); - if (irqs < 1) { + if (!irqs) { pr_err("no irqs for PMUs defined\n"); return -ENODEV; } - for (i = 0; i < irqs; ++i) { - err = 0; - irq = platform_get_irq(pmu_device, i); - if (irq < 0) - continue; - - /* - * If we have a single PMU interrupt that we can't shift, - * assume that we're running on a uniprocessor machine and - * continue. Otherwise, continue without this interrupt. - */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { - pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); - continue; - } + irq = platform_get_irq(pmu_device, 0); + if (irq <= 0) { + pr_err("failed to get valid irq for PMU device\n"); + return -ENODEV; + } + + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, armpmu->handle_irq, + "arm-pmu", &cpu_hw_events); - err = request_irq(irq, armpmu->handle_irq, - IRQF_NOBALANCING, - "arm-pmu", armpmu); if (err) { - pr_err("unable to request IRQ%d for ARM PMU counters\n", - irq); + pr_err("unable to request percpu IRQ%d for ARM PMU counters\n", + irq); armpmu_release_hardware(armpmu); return err; } - cpumask_set_cpu(i, &armpmu->active_irqs); + on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); + } else { + for (i = 0; i < irqs; ++i) { + err = 0; + irq = platform_get_irq(pmu_device, i); + if (irq <= 0) + continue; + + /* + * If we have a single PMU interrupt that we can't shift, + * assume that we're running on a uniprocessor machine and + * continue. Otherwise, continue without this interrupt. + */ + if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); + continue; + } + + err = request_irq(irq, armpmu->handle_irq, + IRQF_NOBALANCING, + "arm-pmu", armpmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + armpmu_release_hardware(armpmu); + return err; + } + + cpumask_set_cpu(i, &armpmu->active_irqs); + } } return 0; --- linux-3.13.0.orig/arch/arm64/kernel/process.c +++ linux-3.13.0/arch/arm64/kernel/process.c @@ -109,33 +109,63 @@ } #endif +/* + * Called by kexec, immediately prior to machine_kexec(). + * + * This must completely disable all secondary CPUs; simply causing those CPUs + * to execute e.g. a RAM-based pin loop is not sufficient. This allows the + * kexec'd kernel to use any and all RAM as it sees fit, without having to + * avoid any code or data used by any SW CPU pin loop. The CPU hotplug + * functionality embodied in disable_nonboot_cpus() to achieve this. + */ void machine_shutdown(void) { -#ifdef CONFIG_SMP - smp_send_stop(); -#endif + disable_nonboot_cpus(); } +/* + * Halting simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. + */ void machine_halt(void) { - machine_shutdown(); + local_irq_disable(); + smp_send_stop(); while (1); } +/* + * Power-off simply requires that the secondary CPUs stop performing any + * activity (executing tasks, handling interrupts). smp_send_stop() + * achieves this. When the system power is turned off, it will take all CPUs + * with it. + */ void machine_power_off(void) { - machine_shutdown(); + local_irq_disable(); + smp_send_stop(); if (pm_power_off) pm_power_off(); } +/* + * Restart requires that the secondary CPUs stop performing any activity + * while the primary CPU resets the system. Systems with a single CPU can + * use soft_restart() as their machine descriptor's .restart hook, since that + * will cause the only available CPU to reset. Systems with multiple CPUs must + * provide a HW restart implementation, to ensure that all CPUs reset at once. + * This is required so that any code running after reset on the primary CPU + * doesn't have to co-ordinate with other CPUs to ensure they aren't still + * executing pre-reset code, and using RAM that the primary CPU's code wishes + * to use. Implementing such co-ordination would be essentially impossible. + */ void machine_restart(char *cmd) { - machine_shutdown(); - /* Disable interrupts first */ local_irq_disable(); local_fiq_disable(); + smp_send_stop(); /* Now call the architecture specific reboot code. */ if (arm_pm_restart) @@ -190,9 +220,27 @@ { } +static void tls_thread_flush(void) +{ + asm ("msr tpidr_el0, xzr"); + + if (is_compat_task()) { + current->thread.tp_value = 0; + + /* + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + */ + barrier(); + asm ("msr tpidrro_el0, xzr"); + } +} + void flush_thread(void) { fpsimd_flush_thread(); + tls_thread_flush(); flush_ptrace_hw_breakpoint(current); } --- linux-3.13.0.orig/arch/arm64/kernel/psci.c +++ linux-3.13.0/arch/arm64/kernel/psci.c @@ -18,12 +18,17 @@ #include #include #include +#include +#include +#include +#include #include #include #include #include #include +#include #define PSCI_POWER_STATE_TYPE_STANDBY 0 #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 @@ -40,58 +45,52 @@ int (*cpu_off)(struct psci_power_state state); int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); int (*migrate)(unsigned long cpuid); + int (*affinity_info)(unsigned long target_affinity, + unsigned long lowest_affinity_level); + int (*migrate_info_type)(void); }; static struct psci_operations psci_ops; static int (*invoke_psci_fn)(u64, u64, u64, u64); +typedef int (*psci_initcall_t)(const struct device_node *); enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, PSCI_FN_CPU_OFF, PSCI_FN_MIGRATE, + PSCI_FN_AFFINITY_INFO, + PSCI_FN_MIGRATE_INFO_TYPE, PSCI_FN_MAX, }; static u32 psci_function_id[PSCI_FN_MAX]; -#define PSCI_RET_SUCCESS 0 -#define PSCI_RET_EOPNOTSUPP -1 -#define PSCI_RET_EINVAL -2 -#define PSCI_RET_EPERM -3 - static int psci_to_linux_errno(int errno) { switch (errno) { case PSCI_RET_SUCCESS: return 0; - case PSCI_RET_EOPNOTSUPP: + case PSCI_RET_NOT_SUPPORTED: return -EOPNOTSUPP; - case PSCI_RET_EINVAL: + case PSCI_RET_INVALID_PARAMS: return -EINVAL; - case PSCI_RET_EPERM: + case PSCI_RET_DENIED: return -EPERM; }; return -EINVAL; } -#define PSCI_POWER_STATE_ID_MASK 0xffff -#define PSCI_POWER_STATE_ID_SHIFT 0 -#define PSCI_POWER_STATE_TYPE_MASK 0x1 -#define PSCI_POWER_STATE_TYPE_SHIFT 16 -#define PSCI_POWER_STATE_AFFL_MASK 0x3 -#define PSCI_POWER_STATE_AFFL_SHIFT 24 - static u32 psci_power_state_pack(struct psci_power_state state) { - return ((state.id & PSCI_POWER_STATE_ID_MASK) - << PSCI_POWER_STATE_ID_SHIFT) | - ((state.type & PSCI_POWER_STATE_TYPE_MASK) - << PSCI_POWER_STATE_TYPE_SHIFT) | - ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) - << PSCI_POWER_STATE_AFFL_SHIFT); + return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT) + & PSCI_0_2_POWER_STATE_ID_MASK) | + ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT) + & PSCI_0_2_POWER_STATE_TYPE_MASK) | + ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT) + & PSCI_0_2_POWER_STATE_AFFL_MASK); } /* @@ -128,6 +127,14 @@ return function_id; } +static int psci_get_version(void) +{ + int err; + + err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); + return err; +} + static int psci_cpu_suspend(struct psci_power_state state, unsigned long entry_point) { @@ -171,28 +178,36 @@ return psci_to_linux_errno(err); } -static const struct of_device_id psci_of_match[] __initconst = { - { .compatible = "arm,psci", }, - {}, -}; +static int psci_affinity_info(unsigned long target_affinity, + unsigned long lowest_affinity_level) +{ + int err; + u32 fn; -int __init psci_init(void) + fn = psci_function_id[PSCI_FN_AFFINITY_INFO]; + err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0); + return err; +} + +static int psci_migrate_info_type(void) { - struct device_node *np; - const char *method; - u32 id; - int err = 0; + int err; + u32 fn; - np = of_find_matching_node(NULL, psci_of_match); - if (!np) - return -ENODEV; + fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE]; + err = invoke_psci_fn(fn, 0, 0, 0); + return err; +} + +static int get_set_conduit_method(struct device_node *np) +{ + const char *method; - pr_info("probing function IDs from device-tree\n"); + pr_info("probing for conduit method from DT.\n"); if (of_property_read_string(np, "method", &method)) { - pr_warning("missing \"method\" property\n"); - err = -ENXIO; - goto out_put_node; + pr_warn("missing \"method\" property\n"); + return -ENXIO; } if (!strcmp("hvc", method)) { @@ -200,11 +215,99 @@ } else if (!strcmp("smc", method)) { invoke_psci_fn = __invoke_psci_fn_smc; } else { - pr_warning("invalid \"method\" property: %s\n", method); - err = -EINVAL; + pr_warn("invalid \"method\" property: %s\n", method); + return -EINVAL; + } + return 0; +} + +static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); +} + +static void psci_sys_poweroff(void) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); +} + +/* + * PSCI Function IDs for v0.2+ are well defined so use + * standard values. + */ +static int psci_0_2_init(struct device_node *np) +{ + int err, ver; + + err = get_set_conduit_method(np); + + if (err) + goto out_put_node; + + ver = psci_get_version(); + + if (ver == PSCI_RET_NOT_SUPPORTED) { + /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */ + pr_err("PSCI firmware does not comply with the v0.2 spec.\n"); + err = -EOPNOTSUPP; goto out_put_node; + } else { + pr_info("PSCIv%d.%d detected in firmware.\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + + if (PSCI_VERSION_MAJOR(ver) == 0 && + PSCI_VERSION_MINOR(ver) < 2) { + err = -EINVAL; + pr_err("Conflicting PSCI version detected.\n"); + goto out_put_node; + } } + pr_info("Using standard PSCI v0.2 function IDs\n"); + psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND; + psci_ops.cpu_suspend = psci_cpu_suspend; + + psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; + psci_ops.cpu_off = psci_cpu_off; + + psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON; + psci_ops.cpu_on = psci_cpu_on; + + psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE; + psci_ops.migrate = psci_migrate; + + psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO; + psci_ops.affinity_info = psci_affinity_info; + + psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] = + PSCI_0_2_FN_MIGRATE_INFO_TYPE; + psci_ops.migrate_info_type = psci_migrate_info_type; + + arm_pm_restart = psci_sys_reset; + + pm_power_off = psci_sys_poweroff; + +out_put_node: + of_node_put(np); + return err; +} + +/* + * PSCI < v0.2 get PSCI Function IDs via DT. + */ +static int psci_0_1_init(struct device_node *np) +{ + u32 id; + int err; + + err = get_set_conduit_method(np); + + if (err) + goto out_put_node; + + pr_info("Using PSCI v0.1 Function IDs from DT\n"); + if (!of_property_read_u32(np, "cpu_suspend", &id)) { psci_function_id[PSCI_FN_CPU_SUSPEND] = id; psci_ops.cpu_suspend = psci_cpu_suspend; @@ -230,6 +333,27 @@ return err; } +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci", .data = psci_0_1_init}, + { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, + {}, +}; + +int __init psci_init(void) +{ + struct device_node *np; + const struct of_device_id *matched_np; + psci_initcall_t init_fn; + + np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); + + if (!np) + return -ENODEV; + + init_fn = (psci_initcall_t)matched_np->data; + return init_fn(np); +} + #ifdef CONFIG_SMP static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu) @@ -251,7 +375,7 @@ { int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry)); if (err) - pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); + pr_err("failed to boot CPU%d (%d)\n", cpu, err); return err; } @@ -278,7 +402,36 @@ ret = psci_ops.cpu_off(state); - pr_crit("psci: unable to power off CPU%u (%d)\n", cpu, ret); + pr_crit("unable to power off CPU%u (%d)\n", cpu, ret); +} + +static int cpu_psci_cpu_kill(unsigned int cpu) +{ + int err, i; + + if (!psci_ops.affinity_info) + return 1; + /* + * cpu_kill could race with cpu_die and we can + * potentially end up declaring this cpu undead + * while it is dying. So, try again a few times. + */ + + for (i = 0; i < 10; i++) { + err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); + if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { + pr_info("CPU%d killed.\n", cpu); + return 1; + } + + msleep(10); + pr_info("Retrying again to check for CPU kill\n"); + } + + pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", + cpu, err); + /* Make op_cpu_kill() fail. */ + return 0; } #endif @@ -290,6 +443,7 @@ #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = cpu_psci_cpu_disable, .cpu_die = cpu_psci_cpu_die, + .cpu_kill = cpu_psci_cpu_kill, #endif }; --- linux-3.13.0.orig/arch/arm64/kernel/ptrace.c +++ linux-3.13.0/arch/arm64/kernel/ptrace.c @@ -81,7 +81,8 @@ break; } } - for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { + + for (i = 0; i < ARM_MAX_WRP; ++i) { if (current->thread.debug.hbp_watch[i] == bp) { info.si_errno = -((i << 1) + 1); break; @@ -650,11 +651,16 @@ reg = task_pt_regs(target)->regs[idx]; } - ret = copy_to_user(ubuf, ®, sizeof(reg)); - if (ret) - break; + if (kbuf) { + memcpy(kbuf, ®, sizeof(reg)); + kbuf += sizeof(reg); + } else { + ret = copy_to_user(ubuf, ®, sizeof(reg)); + if (ret) + break; - ubuf += sizeof(reg); + ubuf += sizeof(reg); + } } return ret; @@ -684,11 +690,16 @@ unsigned int idx = start + i; compat_ulong_t reg; - ret = copy_from_user(®, ubuf, sizeof(reg)); - if (ret) - return ret; + if (kbuf) { + memcpy(®, kbuf, sizeof(reg)); + kbuf += sizeof(reg); + } else { + ret = copy_from_user(®, ubuf, sizeof(reg)); + if (ret) + return ret; - ubuf += sizeof(reg); + ubuf += sizeof(reg); + } switch (idx) { case 15: @@ -821,6 +832,7 @@ compat_ulong_t val) { int ret; + mm_segment_t old_fs = get_fs(); if (off & 3 || off >= COMPAT_USER_SZ) return -EIO; @@ -828,10 +840,13 @@ if (off >= sizeof(compat_elf_gregset_t)) return 0; + set_fs(KERNEL_DS); ret = copy_regset_from_user(tsk, &user_aarch32_view, REGSET_COMPAT_GPR, off, sizeof(compat_ulong_t), &val); + set_fs(old_fs); + return ret; } --- linux-3.13.0.orig/arch/arm64/kernel/setup.c +++ linux-3.13.0/arch/arm64/kernel/setup.c @@ -41,7 +41,9 @@ #include #include #include +#include +#include #include #include #include @@ -54,6 +56,7 @@ #include #include #include +#include unsigned int processor_id; EXPORT_SYMBOL(processor_id); @@ -67,7 +70,8 @@ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ - COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) + COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ + COMPAT_HWCAP_LPAE) unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; #endif @@ -221,13 +225,19 @@ *cmdline_p = boot_command_line; + init_mem_pgprot(); + early_ioremap_init(); + parse_early_param(); + efi_init(); arm64_memblock_init(); paging_init(); request_standard_resources(); + efi_idmap_init(); + unflatten_device_tree(); psci_init(); --- linux-3.13.0.orig/arch/arm64/kernel/signal32.c +++ linux-3.13.0/arch/arm64/kernel/signal32.c @@ -151,8 +151,7 @@ case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, - &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); @@ -181,7 +180,7 @@ case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; default: /* this is just in case for now ... */ err |= __put_user(from->si_pid, &to->si_pid); --- linux-3.13.0.orig/arch/arm64/kernel/smp.c +++ linux-3.13.0/arch/arm64/kernel/smp.c @@ -219,6 +219,19 @@ return 0; } +static int op_cpu_kill(unsigned int cpu) +{ + /* + * If we have no means of synchronising with the dying CPU, then assume + * that it is really dead. We can only wait for an arbitrary length of + * time and hope that it's dead, so let's skip the wait and just hope. + */ + if (!cpu_ops[cpu]->cpu_kill) + return 1; + + return cpu_ops[cpu]->cpu_kill(cpu); +} + static DECLARE_COMPLETION(cpu_died); /* @@ -232,6 +245,15 @@ return; } pr_notice("CPU%u: shutdown\n", cpu); + + /* + * Now that the dying CPU is beyond the point of no return w.r.t. + * in-kernel synchronisation, try to get the firwmare to help us to + * verify that it has really left the kernel before we consider + * clobbering anything it might still be using. + */ + if (!op_cpu_kill(cpu)) + pr_warn("CPU%d may not have shut down cleanly\n", cpu); } /* --- linux-3.13.0.orig/arch/arm64/kernel/stacktrace.c +++ linux-3.13.0/arch/arm64/kernel/stacktrace.c @@ -48,7 +48,11 @@ frame->sp = fp + 0x10; frame->fp = *(unsigned long *)(fp); - frame->pc = *(unsigned long *)(fp + 8); + /* + * -4 here because we care about the PC at time of bl, + * not where the return will go. + */ + frame->pc = *(unsigned long *)(fp + 8) - 4; return 0; } --- linux-3.13.0.orig/arch/arm64/kernel/sys_compat.c +++ linux-3.13.0/arch/arm64/kernel/sys_compat.c @@ -79,6 +79,12 @@ case __ARM_NR_compat_set_tls: current->thread.tp_value = regs->regs[0]; + + /* + * Protect against register corruption from context switch. + * See comment in tls_thread_flush. + */ + barrier(); asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); return 0; --- linux-3.13.0.orig/arch/arm64/kernel/vdso.c +++ linux-3.13.0/arch/arm64/kernel/vdso.c @@ -238,6 +238,8 @@ vdso_data->use_syscall = use_syscall; vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { vdso_data->cs_cycle_last = tk->clock->cycle_last; @@ -245,8 +247,6 @@ vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->cs_mult = tk->mult; vdso_data->cs_shift = tk->shift; - vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; - vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; } smp_wmb(); --- linux-3.13.0.orig/arch/arm64/kernel/vdso/Makefile +++ linux-3.13.0/arch/arm64/kernel/vdso/Makefile @@ -48,7 +48,7 @@ # Actual build commands quiet_cmd_vdsold = VDSOL $@ - cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ + cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< --- linux-3.13.0.orig/arch/arm64/kernel/vdso/gettimeofday.S +++ linux-3.13.0/arch/arm64/kernel/vdso/gettimeofday.S @@ -103,6 +103,8 @@ bl __do_get_tspec seqcnt_check w9, 1b + mov x30, x2 + cmp w0, #CLOCK_MONOTONIC b.ne 6f @@ -118,6 +120,9 @@ ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne b.ne 8f + /* xtime_coarse_nsec is already right-shifted */ + mov x12, #0 + /* Get coarse timespec. */ adr vdso_data, _vdso_data 3: seqcnt_acquire @@ -156,7 +161,7 @@ lsr x11, x11, x12 stp x10, x11, [x1, #TSPEC_TV_SEC] mov x0, xzr - ret x2 + ret 7: mov x30, x2 8: /* Syscall fallback. */ --- linux-3.13.0.orig/arch/arm64/kvm/Kconfig +++ linux-3.13.0/arch/arm64/kvm/Kconfig @@ -36,6 +36,17 @@ ---help--- Provides host support for ARM processors. +config KVM_ARM_MAX_VCPUS + int "Number maximum supported virtual CPUs per VM" + depends on KVM_ARM_HOST + default 4 + help + Static number of max supported virtual CPUs per VM. + + If you choose a high number, the vcpu structures will be quite + large, so only choose a reasonable number that you expect to + actually use. + config KVM_ARM_VGIC bool depends on KVM_ARM_HOST && OF --- linux-3.13.0.orig/arch/arm64/kvm/guest.c +++ linux-3.13.0/arch/arm64/kvm/guest.c @@ -207,20 +207,28 @@ unsigned long implementor = read_cpuid_implementor(); unsigned long part_number = read_cpuid_part_number(); - if (implementor != ARM_CPU_IMP_ARM) - return -EINVAL; + switch (implementor) { + case ARM_CPU_IMP_ARM: + switch (part_number) { + case ARM_CPU_PART_AEM_V8: + return KVM_ARM_TARGET_AEM_V8; + case ARM_CPU_PART_FOUNDATION: + return KVM_ARM_TARGET_FOUNDATION_V8; + case ARM_CPU_PART_CORTEX_A53: + return KVM_ARM_TARGET_CORTEX_A53; + case ARM_CPU_PART_CORTEX_A57: + return KVM_ARM_TARGET_CORTEX_A57; + }; + break; + case ARM_CPU_IMP_APM: + switch (part_number) { + case APM_CPU_PART_POTENZA: + return KVM_ARM_TARGET_XGENE_POTENZA; + }; + break; + }; - switch (part_number) { - case ARM_CPU_PART_AEM_V8: - return KVM_ARM_TARGET_AEM_V8; - case ARM_CPU_PART_FOUNDATION: - return KVM_ARM_TARGET_FOUNDATION_V8; - case ARM_CPU_PART_CORTEX_A57: - /* Currently handled by the generic backend */ - return KVM_ARM_TARGET_CORTEX_A57; - default: - return -EINVAL; - } + return -EINVAL; } int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, --- linux-3.13.0.orig/arch/arm64/kvm/handle_exit.c +++ linux-3.13.0/arch/arm64/kvm/handle_exit.c @@ -30,18 +30,19 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - if (kvm_psci_call(vcpu)) + int ret; + + ret = kvm_psci_call(vcpu); + if (ret < 0) { + kvm_inject_undefined(vcpu); return 1; + } - kvm_inject_undefined(vcpu); - return 1; + return ret; } static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { - if (kvm_psci_call(vcpu)) - return 1; - kvm_inject_undefined(vcpu); return 1; } @@ -65,6 +66,8 @@ else kvm_vcpu_block(vcpu); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return 1; } --- linux-3.13.0.orig/arch/arm64/kvm/hyp-init.S +++ linux-3.13.0/arch/arm64/kvm/hyp-init.S @@ -74,6 +74,10 @@ msr mair_el2, x4 isb + /* Invalidate the stale TLBs from Bootloader */ + tlbi alle2 + dsb sy + mrs x4, sctlr_el2 and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 ldr x5, =SCTLR_EL2_FLAGS --- linux-3.13.0.orig/arch/arm64/kvm/sys_regs.c +++ linux-3.13.0/arch/arm64/kvm/sys_regs.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -121,6 +122,48 @@ } /* + * Generic accessor for VM registers. Only called as long as HCR_TVM + * is set. + */ +static bool access_vm_reg(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + unsigned long val; + + BUG_ON(!p->is_write); + + val = *vcpu_reg(vcpu, p->Rt); + if (!p->is_aarch32) { + vcpu_sys_reg(vcpu, r->reg) = val; + } else { + vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL; + if (!p->is_32bit) + vcpu_cp15(vcpu, r->reg + 1) = val >> 32; + } + return true; +} + +/* + * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the + * guest enables the MMU, we stop trapping the VM sys_regs and leave + * it in complete control of the caches. + */ +static bool access_sctlr(struct kvm_vcpu *vcpu, + const struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + access_vm_reg(vcpu, p, r); + + if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ + vcpu->arch.hcr_el2 &= ~HCR_TVM; + stage2_flush_vm(vcpu->kvm); + } + + return true; +} + +/* * We could trap ID_DFR0 and tell the guest we don't support performance * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was * NAKed, so it will read the PMCR anyway. @@ -185,32 +228,32 @@ NULL, reset_mpidr, MPIDR_EL1 }, /* SCTLR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), - NULL, reset_val, SCTLR_EL1, 0x00C50078 }, + access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, /* CPACR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), NULL, reset_val, CPACR_EL1, 0 }, /* TTBR0_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), - NULL, reset_unknown, TTBR0_EL1 }, + access_vm_reg, reset_unknown, TTBR0_EL1 }, /* TTBR1_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), - NULL, reset_unknown, TTBR1_EL1 }, + access_vm_reg, reset_unknown, TTBR1_EL1 }, /* TCR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), - NULL, reset_val, TCR_EL1, 0 }, + access_vm_reg, reset_val, TCR_EL1, 0 }, /* AFSR0_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), - NULL, reset_unknown, AFSR0_EL1 }, + access_vm_reg, reset_unknown, AFSR0_EL1 }, /* AFSR1_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), - NULL, reset_unknown, AFSR1_EL1 }, + access_vm_reg, reset_unknown, AFSR1_EL1 }, /* ESR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), - NULL, reset_unknown, ESR_EL1 }, + access_vm_reg, reset_unknown, ESR_EL1 }, /* FAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), - NULL, reset_unknown, FAR_EL1 }, + access_vm_reg, reset_unknown, FAR_EL1 }, /* PAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), NULL, reset_unknown, PAR_EL1 }, @@ -224,17 +267,17 @@ /* MAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), - NULL, reset_unknown, MAIR_EL1 }, + access_vm_reg, reset_unknown, MAIR_EL1 }, /* AMAIR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), - NULL, reset_amair_el1, AMAIR_EL1 }, + access_vm_reg, reset_amair_el1, AMAIR_EL1 }, /* VBAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), NULL, reset_val, VBAR_EL1, 0 }, /* CONTEXTIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), - NULL, reset_val, CONTEXTIDR_EL1, 0 }, + access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, /* TPIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), NULL, reset_unknown, TPIDR_EL1 }, @@ -305,14 +348,32 @@ NULL, reset_val, FPEXC32_EL2, 0x70 }, }; -/* Trapped cp15 registers */ +/* + * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, + * depending on the way they are accessed (as a 32bit or a 64bit + * register). + */ static const struct sys_reg_desc cp15_regs[] = { + { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, + { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, + { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, + { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, + { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, + { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, + { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, + { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, + { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, + /* * DC{C,I,CI}SW operations: */ { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, + { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, @@ -326,6 +387,14 @@ { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, + + { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, + { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, + { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, + { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, + { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, + + { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, }; /* Target specific emulation tables */ @@ -437,6 +506,8 @@ u32 hsr = kvm_vcpu_get_hsr(vcpu); int Rt2 = (hsr >> 10) & 0xf; + params.is_aarch32 = true; + params.is_32bit = false; params.CRm = (hsr >> 1) & 0xf; params.Rt = (hsr >> 5) & 0xf; params.is_write = ((hsr & 1) == 0); @@ -480,6 +551,8 @@ struct sys_reg_params params; u32 hsr = kvm_vcpu_get_hsr(vcpu); + params.is_aarch32 = true; + params.is_32bit = true; params.CRm = (hsr >> 1) & 0xf; params.Rt = (hsr >> 5) & 0xf; params.is_write = ((hsr & 1) == 0); @@ -549,6 +622,8 @@ struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_hsr(vcpu); + params.is_aarch32 = false; + params.is_32bit = false; params.Op0 = (esr >> 20) & 3; params.Op1 = (esr >> 14) & 0x7; params.CRn = (esr >> 10) & 0xf; --- linux-3.13.0.orig/arch/arm64/kvm/sys_regs.h +++ linux-3.13.0/arch/arm64/kvm/sys_regs.h @@ -30,6 +30,8 @@ u8 Op2; u8 Rt; bool is_write; + bool is_aarch32; + bool is_32bit; /* Only valid if is_aarch32 is true */ }; struct sys_reg_desc { --- linux-3.13.0.orig/arch/arm64/kvm/sys_regs_generic_v8.c +++ linux-3.13.0/arch/arm64/kvm/sys_regs_generic_v8.c @@ -88,8 +88,13 @@ &genericv8_target_table); kvm_register_target_sys_reg_table(KVM_ARM_TARGET_FOUNDATION_V8, &genericv8_target_table); + kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A53, + &genericv8_target_table); kvm_register_target_sys_reg_table(KVM_ARM_TARGET_CORTEX_A57, &genericv8_target_table); + kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA, + &genericv8_target_table); + return 0; } late_initcall(sys_reg_genericv8_init); --- linux-3.13.0.orig/arch/arm64/lib/bitops.S +++ linux-3.13.0/arch/arm64/lib/bitops.S @@ -46,11 +46,12 @@ mov x2, #1 add x1, x1, x0, lsr #3 // Get word offset lsl x4, x2, x3 // Create mask -1: ldaxr x2, [x1] +1: ldxr x2, [x1] lsr x0, x2, x3 // Save old value of bit \instr x2, x2, x4 // toggle bit stlxr w5, x2, [x1] cbnz w5, 1b + dmb ish and x0, x0, #1 3: ret ENDPROC(\name ) --- linux-3.13.0.orig/arch/arm64/lib/clear_user.S +++ linux-3.13.0/arch/arm64/lib/clear_user.S @@ -46,7 +46,7 @@ sub x1, x1, #2 4: adds x1, x1, #1 b.mi 5f - strb wzr, [x0] +USER(9f, strb wzr, [x0] ) 5: mov x0, #0 ret ENDPROC(__clear_user) --- linux-3.13.0.orig/arch/arm64/lib/copy_from_user.S +++ linux-3.13.0/arch/arm64/lib/copy_from_user.S @@ -15,7 +15,6 @@ */ #include -#include /* * Copy from user space to a kernel buffer (alignment handled by the hardware) @@ -28,39 +27,63 @@ * x0 - bytes not copied */ ENTRY(__copy_from_user) - add x4, x1, x2 // upper user buffer boundary - subs x2, x2, #8 - b.mi 2f -1: -USER(9f, ldr x3, [x1], #8 ) - subs x2, x2, #8 - str x3, [x0], #8 - b.pl 1b -2: adds x2, x2, #4 - b.mi 3f -USER(9f, ldr w3, [x1], #4 ) - sub x2, x2, #4 - str w3, [x0], #4 -3: adds x2, x2, #2 - b.mi 4f -USER(9f, ldrh w3, [x1], #2 ) - sub x2, x2, #2 - strh w3, [x0], #2 -4: adds x2, x2, #1 - b.mi 5f -USER(9f, ldrb w3, [x1] ) - strb w3, [x0] -5: mov x0, #0 - ret +#include "copy_template.S" ENDPROC(__copy_from_user) .section .fixup,"ax" - .align 2 -9: sub x2, x4, x1 - mov x3, x2 -10: strb wzr, [x0], #1 // zero remaining buffer space - subs x3, x3, #1 - b.ne 10b - mov x0, x2 // bytes not copied + .align 2 +8: + /* + * Count bytes remain + * dst points to (dst + tmp1) + */ + mov x0, count + sub dst, dst, tmp1 + b .Lfinalize +9: + /* + * 16 bytes remain + * dst is accurate + */ + mov x0, #16 + b .Lfinalize +10: + /* + * count is accurate + * dst is accurate + */ + mov x0, count + b .Lfinalize +11: + /* + *(count + tmp2) bytes remain + * dst points to the start of the remaining bytes + */ + add x0, count, tmp2 + b .Lfinalize +12: + /* + * (count + 128) bytes remain + * dst is accurate + */ + add x0, count, #128 + b .Lfinalize +13: + /* + * (count + 128) bytes remain + * dst is pre-biased to (dst + 16) + */ + add x0, count, #128 + add dst, dst, #16 +.Lfinalize: + /* + * Zeroize remaining destination-buffer + */ + mov count, x0 +20: + /* Zero remaining buffer space */ + strb wzr, [dst], #1 + subs count, count, #1 + b.ne 20b ret .previous --- linux-3.13.0.orig/arch/arm64/lib/copy_template.S +++ linux-3.13.0/arch/arm64/lib/copy_template.S @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2013, Applied Micro Circuits Corporation + * Copyright (c) 2012-2013, Linaro Limited + * + * Author: Feng Kan + * Author: Philipp Tomsich + * + * The code is adopted from the memcpy routine by Linaro Limited. + * + * This file is free software: you may copy, redistribute and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation, either version 2 of the License, or (at your + * option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * 1 Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2 Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3 Neither the name of the Linaro nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include + +dstin .req x0 +src .req x1 +count .req x2 +tmp1 .req x3 +tmp1w .req w3 +tmp2 .req x4 +tmp2w .req w4 +tmp3 .req x5 +tmp3w .req w5 +dst .req x6 + +A_l .req x7 +A_h .req x8 +B_l .req x9 +B_h .req x10 +C_l .req x11 +C_h .req x12 +D_l .req x13 +D_h .req x14 + + mov dst, dstin + cmp count, #64 + b.ge .Lcpy_not_short + cmp count, #15 + b.le .Ltail15tiny + + /* + * Deal with small copies quickly by dropping straight into the + * exit block. + */ +.Ltail63: + /* + * Copy up to 48 bytes of data. At this point we only need the + * bottom 6 bits of count to be accurate. + */ + ands tmp1, count, #0x30 + b.eq .Ltail15 + add dst, dst, tmp1 + add src, src, tmp1 + cmp tmp1w, #0x20 + b.eq 1f + b.lt 2f + USER(8f, ldp A_l, A_h, [src, #-48]) + USER(8f, stp A_l, A_h, [dst, #-48]) +1: + USER(8f, ldp A_l, A_h, [src, #-32]) + USER(8f, stp A_l, A_h, [dst, #-32]) +2: + USER(8f, ldp A_l, A_h, [src, #-16]) + USER(8f, stp A_l, A_h, [dst, #-16]) + +.Ltail15: + ands count, count, #15 + beq 1f + add src, src, count + USER(9f, ldp A_l, A_h, [src, #-16]) + add dst, dst, count + USER(9f, stp A_l, A_h, [dst, #-16]) +1: + b .Lsuccess + +.Ltail15tiny: + /* + * Copy up to 15 bytes of data. Does not assume additional data + * being copied. + */ + tbz count, #3, 1f + USER(10f, ldr tmp1, [src], #8) + USER(10f, str tmp1, [dst], #8) +1: + tbz count, #2, 1f + USER(10f, ldr tmp1w, [src], #4) + USER(10f, str tmp1w, [dst], #4) +1: + tbz count, #1, 1f + USER(10f, ldrh tmp1w, [src], #2) + USER(10f, strh tmp1w, [dst], #2) +1: + tbz count, #0, 1f + USER(10f, ldrb tmp1w, [src]) + USER(10f, strb tmp1w, [dst]) +1: + b .Lsuccess + +.Lcpy_not_short: + /* + * We don't much care about the alignment of DST, but we want SRC + * to be 128-bit (16 byte) aligned so that we don't cross cache line + * boundaries on both loads and stores. + */ + neg tmp2, src + ands tmp2, tmp2, #15 /* Bytes to reach alignment. */ + b.eq 2f + sub count, count, tmp2 + /* + * Copy more data than needed; it's faster than jumping + * around copying sub-Quadword quantities. We know that + * it can't overrun. + */ + USER(11f, ldp A_l, A_h, [src]) + add src, src, tmp2 + USER(11f, stp A_l, A_h, [dst]) + add dst, dst, tmp2 + /* There may be less than 63 bytes to go now. */ + cmp count, #63 + b.le .Ltail63 +2: + subs count, count, #128 + b.ge .Lcpy_body_large + /* + * Less than 128 bytes to copy, so handle 64 here and then jump + * to the tail. + */ + USER(12f, ldp A_l, A_h, [src]) + USER(12f, ldp B_l, B_h, [src, #16]) + USER(12f, ldp C_l, C_h, [src, #32]) + USER(12f, ldp D_l, D_h, [src, #48]) + USER(12f, stp A_l, A_h, [dst]) + USER(12f, stp B_l, B_h, [dst, #16]) + USER(12f, stp C_l, C_h, [dst, #32]) + USER(12f, stp D_l, D_h, [dst, #48]) + tst count, #0x3f + add src, src, #64 + add dst, dst, #64 + b.ne .Ltail63 + b .Lsuccess + + /* + * Critical loop. Start at a new cache line boundary. Assuming + * 64 bytes per line this ensures the entire loop is in one line. + */ + .p2align 6 +.Lcpy_body_large: + /* There are at least 128 bytes to copy. */ + USER(12f, ldp A_l, A_h, [src, #0]) + sub dst, dst, #16 /* Pre-bias. */ + USER(13f, ldp B_l, B_h, [src, #16]) + USER(13f, ldp C_l, C_h, [src, #32]) + USER(13f, ldp D_l, D_h, [src, #48]!) /* src += 64 - Pre-bias. */ +1: + USER(13f, stp A_l, A_h, [dst, #16]) + USER(13f, ldp A_l, A_h, [src, #16]) + USER(13f, stp B_l, B_h, [dst, #32]) + USER(13f, ldp B_l, B_h, [src, #32]) + USER(13f, stp C_l, C_h, [dst, #48]) + USER(13f, ldp C_l, C_h, [src, #48]) + USER(13f, stp D_l, D_h, [dst, #64]!) + USER(13f, ldp D_l, D_h, [src, #64]!) + subs count, count, #64 + b.ge 1b + USER(13f, stp A_l, A_h, [dst, #16]) + USER(13f, stp B_l, B_h, [dst, #32]) + USER(13f, stp C_l, C_h, [dst, #48]) + USER(13f, stp D_l, D_h, [dst, #64]) + add src, src, #16 + add dst, dst, #64 + 16 + tst count, #0x3f + b.ne .Ltail63 +.Lsuccess: + /* Nothing left to copy */ + mov x0, #0 + ret --- linux-3.13.0.orig/arch/arm64/lib/copy_to_user.S +++ linux-3.13.0/arch/arm64/lib/copy_to_user.S @@ -15,7 +15,6 @@ */ #include -#include /* * Copy to user space from a kernel buffer (alignment handled by the hardware) @@ -28,34 +27,38 @@ * x0 - bytes not copied */ ENTRY(__copy_to_user) - add x4, x0, x2 // upper user buffer boundary - subs x2, x2, #8 - b.mi 2f -1: - ldr x3, [x1], #8 - subs x2, x2, #8 -USER(9f, str x3, [x0], #8 ) - b.pl 1b -2: adds x2, x2, #4 - b.mi 3f - ldr w3, [x1], #4 - sub x2, x2, #4 -USER(9f, str w3, [x0], #4 ) -3: adds x2, x2, #2 - b.mi 4f - ldrh w3, [x1], #2 - sub x2, x2, #2 -USER(9f, strh w3, [x0], #2 ) -4: adds x2, x2, #1 - b.mi 5f - ldrb w3, [x1] -USER(9f, strb w3, [x0] ) -5: mov x0, #0 - ret +#include "copy_template.S" ENDPROC(__copy_to_user) .section .fixup,"ax" - .align 2 -9: sub x0, x4, x0 // bytes not copied + .align 2 +8: +10: + /* + * count is accurate + */ + mov x0, count + b .Lfinalize +9: + /* + * 16 bytes remain + */ + mov x0, #16 + b .Lfinalize +11: + /* + *(count + tmp2) bytes remain + * dst points to the start of the remaining bytes + */ + add x0, count, tmp2 + b .Lfinalize +12: +13: + /* + * (count + 128) bytes remain + */ + add x0, count, #128 + b .Lfinalize +.Lfinalize: ret .previous --- linux-3.13.0.orig/arch/arm64/mm/Makefile +++ linux-3.13.0/arch/arm64/mm/Makefile @@ -1,5 +1,5 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ - ioremap.o mmap.o pgd.o mmu.o \ + ioremap.o iomap.o mmap.o pgd.o mmu.o \ context.o tlb.o proc.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o --- linux-3.13.0.orig/arch/arm64/mm/copypage.c +++ linux-3.13.0/arch/arm64/mm/copypage.c @@ -32,3 +32,4 @@ { clear_page(kaddr); } +EXPORT_SYMBOL(__cpu_clear_user_page); --- linux-3.13.0.orig/arch/arm64/mm/flush.c +++ linux-3.13.0/arch/arm64/mm/flush.c @@ -79,7 +79,8 @@ return; if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { - __flush_dcache_area(page_address(page), PAGE_SIZE); + __flush_dcache_area(page_address(page), + PAGE_SIZE << compound_order(page)); __flush_icache_all(); } else if (icache_is_aivivt()) { __flush_icache_all(); --- linux-3.13.0.orig/arch/arm64/mm/hugetlbpage.c +++ linux-3.13.0/arch/arm64/mm/hugetlbpage.c @@ -51,12 +51,11 @@ int pud_huge(pud_t pud) { +#ifndef __PAGETABLE_PMD_FOLDED return !(pud_val(pud) & PUD_TABLE_BIT); -} - -int pmd_huge_support(void) -{ - return 1; +#else + return 0; +#endif } static __init int setup_hugepagesz(char *opt) --- linux-3.13.0.orig/arch/arm64/mm/iomap.c +++ linux-3.13.0/arch/arm64/mm/iomap.c @@ -0,0 +1,31 @@ +/* + * Based on linux/arch/arm/mm/iomap.c + * + * Map IO port and PCI memory spaces so that {read,write}[bwl] can + * be used to access this memory. + */ +#include +#include +#include +#include + +#ifdef __io +void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + return __io(port); +} +EXPORT_SYMBOL(ioport_map); + +void ioport_unmap(void __iomem *addr) +{ +} +EXPORT_SYMBOL(ioport_unmap); +#endif + +#ifdef CONFIG_PCI +unsigned long pcibios_min_io = 0x1000; +EXPORT_SYMBOL(pcibios_min_io); + +unsigned long pcibios_min_mem = 0x01000000; +EXPORT_SYMBOL(pcibios_min_mem); +#endif --- linux-3.13.0.orig/arch/arm64/mm/ioremap.c +++ linux-3.13.0/arch/arm64/mm/ioremap.c @@ -25,6 +25,10 @@ #include #include +#include +#include +#include + static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, pgprot_t prot, void *caller) { @@ -98,3 +102,97 @@ __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); + +#ifndef CONFIG_ARM64_64K_PAGES +static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; +#endif + +static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + + pgd = pgd_offset_k(addr); + BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); + + pud = pud_offset(pgd, addr); + BUG_ON(pud_none(*pud) || pud_bad(*pud)); + + return pmd_offset(pud, addr); +} + +static inline pte_t * __init early_ioremap_pte(unsigned long addr) +{ + pmd_t *pmd = early_ioremap_pmd(addr); + + BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd)); + + return pte_offset_kernel(pmd, addr); +} + +void __init early_ioremap_init(void) +{ + pmd_t *pmd; + + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); +#ifndef CONFIG_ARM64_64K_PAGES + /* need to populate pmd for 4k pagesize only */ + pmd_populate_kernel(&init_mm, pmd, bm_pte); +#endif + /* + * The boot-ioremap range spans multiple pmds, for which + * we are not prepared: + */ + BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) + != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); + + if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { + WARN_ON(1); + pr_warn("pmd %p != %p\n", + pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); + pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", + fix_to_virt(FIX_BTMAP_BEGIN)); + pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", + fix_to_virt(FIX_BTMAP_END)); + + pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); + pr_warn("FIX_BTMAP_BEGIN: %d\n", + FIX_BTMAP_BEGIN); + } + + early_ioremap_setup(); +} + +void __init __early_set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *pte; + + if (idx >= __end_of_fixed_addresses) { + BUG(); + return; + } + + pte = early_ioremap_pte(addr); + + if (pgprot_val(flags)) + set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); + else { + pte_clear(&init_mm, addr, pte); + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); + } +} + +#ifdef CONFIG_PCI +int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) +{ + BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); + + return ioremap_page_range((unsigned long)PCI_IOBASE + offset, + (unsigned long)PCI_IOBASE + offset + SZ_64K, + phys_addr, + __pgprot(PROT_NORMAL_NC)); +} +EXPORT_SYMBOL_GPL(pci_ioremap_io); +#endif --- linux-3.13.0.orig/arch/arm64/mm/mmu.c +++ linux-3.13.0/arch/arm64/mm/mmu.c @@ -125,7 +125,7 @@ /* * Adjust the PMD section entries according to the CPU in use. */ -static void __init init_mem_pgprot(void) +void __init init_mem_pgprot(void) { pteval_t default_pgprot; int i; @@ -168,7 +168,8 @@ } static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, - unsigned long end, unsigned long pfn) + unsigned long end, unsigned long pfn, + pgprot_t prot) { pte_t *pte; @@ -180,16 +181,28 @@ pte = pte_offset_kernel(pmd, addr); do { - set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); + set_pte(pte, pfn_pte(pfn, prot)); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); } static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys) + unsigned long end, phys_addr_t phys, + int map_io) { pmd_t *pmd; unsigned long next; + pmdval_t prot_sect; + pgprot_t prot_pte; + + if (map_io) { + prot_sect = PMD_TYPE_SECT | PMD_SECT_AF | + PMD_ATTRINDX(MT_DEVICE_nGnRE); + prot_pte = __pgprot(PROT_DEVICE_nGnRE); + } else { + prot_sect = prot_sect_kernel; + prot_pte = PAGE_KERNEL_EXEC; + } /* * Check for initial section mappings in the pgd/pud and remove them. @@ -203,23 +216,33 @@ do { next = pmd_addr_end(addr, end); /* try section mapping first */ - if (((addr | next | phys) & ~SECTION_MASK) == 0) - set_pmd(pmd, __pmd(phys | prot_sect_kernel)); - else - alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); + if (((addr | next | phys) & ~SECTION_MASK) == 0) { + pmd_t old_pmd =*pmd; + set_pmd(pmd, __pmd(phys | prot_sect)); + /* + * Check for previous table entries created during + * boot (__create_page_tables) and flush them. + */ + if (!pmd_none(old_pmd)) + flush_tlb_all(); + } else { + alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), + prot_pte); + } phys += next - addr; } while (pmd++, addr = next, addr != end); } static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys) + unsigned long end, unsigned long phys, + int map_io) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; do { next = pud_addr_end(addr, end); - alloc_init_pmd(pud, addr, next, phys); + alloc_init_pmd(pud, addr, next, phys, map_io); phys += next - addr; } while (pud++, addr = next, addr != end); } @@ -228,70 +251,43 @@ * Create the page directory entries and any necessary page tables for the * mapping specified by 'md'. */ -static void __init create_mapping(phys_addr_t phys, unsigned long virt, - phys_addr_t size) +static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + int map_io) { unsigned long addr, length, end, next; - pgd_t *pgd; - - if (virt < VMALLOC_START) { - pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n", - phys, virt); - return; - } addr = virt & PAGE_MASK; length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); - pgd = pgd_offset_k(addr); end = addr + length; do { next = pgd_addr_end(addr, end); - alloc_init_pud(pgd, addr, next, phys); + alloc_init_pud(pgd, addr, next, phys, map_io); phys += next - addr; } while (pgd++, addr = next, addr != end); } -#ifdef CONFIG_EARLY_PRINTK -/* - * Create an early I/O mapping using the pgd/pmd entries already populated - * in head.S as this function is called too early to allocated any memory. The - * mapping size is 2MB with 4KB pages or 64KB or 64KB pages. - */ -void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) +static void __init create_mapping(phys_addr_t phys, unsigned long virt, + phys_addr_t size) { - unsigned long size, mask; - bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES); - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - /* - * No early pte entries with !ARM64_64K_PAGES configuration, so using - * sections (pmd). - */ - size = page64k ? PAGE_SIZE : SECTION_SIZE; - mask = ~(size - 1); - - pgd = pgd_offset_k(virt); - pud = pud_offset(pgd, virt); - if (pud_none(*pud)) - return NULL; - pmd = pmd_offset(pud, virt); - - if (page64k) { - if (pmd_none(*pmd)) - return NULL; - pte = pte_offset_kernel(pmd, virt); - set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE)); - } else { - set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE)); + if (virt < VMALLOC_START) { + pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", + &phys, virt); + return; } + __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0); +} - return (void __iomem *)((virt & mask) + (phys & ~mask)); +void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io) +{ + if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) { + pr_warn("BUG: not creating id mapping for %pa\n", &addr); + return; + } + __create_mapping(&idmap_pg_dir[pgd_index(addr)], + addr, addr, size, map_io); } -#endif static void __init map_mem(void) { @@ -349,7 +345,6 @@ { void *zero_page; - init_mem_pgprot(); map_mem(); /* --- linux-3.13.0.orig/arch/avr32/Makefile +++ linux-3.13.0/arch/avr32/Makefile @@ -11,7 +11,7 @@ KBUILD_DEFCONFIG := atstk1002_defconfig -KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic +KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__ KBUILD_AFLAGS += -mrelax -mno-pic KBUILD_CFLAGS_MODULE += -mno-relax LDFLAGS_vmlinux += --relax --- linux-3.13.0.orig/arch/avr32/boards/mimc200/fram.c +++ linux-3.13.0/arch/avr32/boards/mimc200/fram.c @@ -11,6 +11,7 @@ #define FRAM_VERSION "1.0" #include +#include #include #include #include --- linux-3.13.0.orig/arch/avr32/mm/fault.c +++ linux-3.13.0/arch/avr32/mm/fault.c @@ -142,6 +142,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/cris/mm/fault.c +++ linux-3.13.0/arch/cris/mm/fault.c @@ -176,6 +176,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/frv/mm/fault.c +++ linux-3.13.0/arch/frv/mm/fault.c @@ -168,6 +168,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/ia64/configs/generic_defconfig +++ linux-3.13.0/arch/ia64/configs/generic_defconfig @@ -25,7 +25,6 @@ CONFIG_CRASH_DUMP=y CONFIG_EFI_VARS=y CONFIG_BINFMT_MISC=m -CONFIG_ACPI_PROCFS=y CONFIG_ACPI_BUTTON=m CONFIG_ACPI_FAN=m CONFIG_ACPI_DOCK=y --- linux-3.13.0.orig/arch/ia64/configs/tiger_defconfig +++ linux-3.13.0/arch/ia64/configs/tiger_defconfig @@ -26,7 +26,6 @@ CONFIG_KEXEC=y CONFIG_EFI_VARS=y CONFIG_BINFMT_MISC=m -CONFIG_ACPI_PROCFS=y CONFIG_ACPI_BUTTON=m CONFIG_ACPI_FAN=m CONFIG_ACPI_PROCESSOR=m --- linux-3.13.0.orig/arch/ia64/configs/zx1_defconfig +++ linux-3.13.0/arch/ia64/configs/zx1_defconfig @@ -16,7 +16,6 @@ CONFIG_CRASH_DUMP=y CONFIG_EFI_VARS=y CONFIG_BINFMT_MISC=y -CONFIG_ACPI_PROCFS=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_PACKET=y --- linux-3.13.0.orig/arch/ia64/hp/common/sba_iommu.c +++ linux-3.13.0/arch/ia64/hp/common/sba_iommu.c @@ -242,7 +242,7 @@ struct pci_dev *sac_only_dev; }; -static struct ioc *ioc_list; +static struct ioc *ioc_list, *ioc_found; static int reserve_sba_gart = 1; static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t); @@ -1807,20 +1807,13 @@ { SX2000_IOC_ID, "sx2000", NULL }, }; -static struct ioc * __init -ioc_init(unsigned long hpa, void *handle) +static void ioc_init(unsigned long hpa, struct ioc *ioc) { - struct ioc *ioc; struct ioc_iommu *info; - ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); - if (!ioc) - return NULL; - ioc->next = ioc_list; ioc_list = ioc; - ioc->handle = handle; ioc->ioc_hpa = ioremap(hpa, 0x1000); ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); @@ -1861,8 +1854,6 @@ "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, hpa, ioc->iov_size >> 20, ioc->ibase); - - return ioc; } @@ -2041,22 +2032,21 @@ #define sba_map_ioc_to_node(ioc, handle) #endif -static int __init -acpi_sba_ioc_add(struct acpi_device *device, - const struct acpi_device_id *not_used) +static void acpi_sba_ioc_add(struct ioc *ioc) { - struct ioc *ioc; + acpi_handle handle = ioc->handle; acpi_status status; u64 hpa, length; struct acpi_device_info *adi; - status = hp_acpi_csr_space(device->handle, &hpa, &length); + ioc_found = ioc->next; + status = hp_acpi_csr_space(handle, &hpa, &length); if (ACPI_FAILURE(status)) - return 1; + goto err; - status = acpi_get_object_info(device->handle, &adi); + status = acpi_get_object_info(handle, &adi); if (ACPI_FAILURE(status)) - return 1; + goto err; /* * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI @@ -2077,13 +2067,13 @@ if (!iovp_shift) iovp_shift = 12; - ioc = ioc_init(hpa, device->handle); - if (!ioc) - return 1; - + ioc_init(hpa, ioc); /* setup NUMA node association */ - sba_map_ioc_to_node(ioc, device->handle); - return 0; + sba_map_ioc_to_node(ioc, handle); + return; + + err: + kfree(ioc); } static const struct acpi_device_id hp_ioc_iommu_device_ids[] = { @@ -2091,9 +2081,26 @@ {"HWP0004", 0}, {"", 0}, }; + +static int acpi_sba_ioc_attach(struct acpi_device *device, + const struct acpi_device_id *not_used) +{ + struct ioc *ioc; + + ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); + if (!ioc) + return -ENOMEM; + + ioc->next = ioc_found; + ioc_found = ioc; + ioc->handle = device->handle; + return 1; +} + + static struct acpi_scan_handler acpi_sba_ioc_handler = { .ids = hp_ioc_iommu_device_ids, - .attach = acpi_sba_ioc_add, + .attach = acpi_sba_ioc_attach, }; static int __init acpi_sba_ioc_init_acpi(void) @@ -2128,9 +2135,12 @@ #endif /* - * ioc_list should be populated by the acpi_sba_ioc_handler's .attach() + * ioc_found should be populated by the acpi_sba_ioc_handler's .attach() * routine, but that only happens if acpi_scan_init() has already run. */ + while (ioc_found) + acpi_sba_ioc_add(ioc_found); + if (!ioc_list) { #ifdef CONFIG_IA64_GENERIC /* --- linux-3.13.0.orig/arch/ia64/kernel/time.c +++ linux-3.13.0/arch/ia64/kernel/time.c @@ -384,21 +384,6 @@ .name = "timer" }; -static struct platform_device rtc_efi_dev = { - .name = "rtc-efi", - .id = -1, -}; - -static int __init rtc_init(void) -{ - if (platform_device_register(&rtc_efi_dev) < 0) - printk(KERN_ERR "unable to register rtc device...\n"); - - /* not necessarily an error */ - return 0; -} -module_init(rtc_init); - void read_persistent_clock(struct timespec *ts) { efi_gettimeofday(ts); --- linux-3.13.0.orig/arch/ia64/mm/fault.c +++ linux-3.13.0/arch/ia64/mm/fault.c @@ -172,6 +172,8 @@ */ if (fault & VM_FAULT_OOM) { goto out_of_memory; + } else if (fault & VM_FAULT_SIGSEGV) { + goto bad_area; } else if (fault & VM_FAULT_SIGBUS) { signal = SIGBUS; goto bad_area; --- linux-3.13.0.orig/arch/ia64/mm/hugetlbpage.c +++ linux-3.13.0/arch/ia64/mm/hugetlbpage.c @@ -114,11 +114,6 @@ return 0; } -int pmd_huge_support(void) -{ - return 0; -} - struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { --- linux-3.13.0.orig/arch/ia64/pci/fixup.c +++ linux-3.13.0/arch/ia64/pci/fixup.c @@ -5,6 +5,7 @@ #include #include +#include #include @@ -63,7 +64,7 @@ pci_read_config_word(pdev, PCI_COMMAND, &config); if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; - dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); + dev_printk(KERN_DEBUG, &pdev->dev, "Video device with shadowed ROM\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); --- linux-3.13.0.orig/arch/m32r/mm/fault.c +++ linux-3.13.0/arch/m32r/mm/fault.c @@ -200,6 +200,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/m68k/Kconfig +++ linux-3.13.0/arch/m68k/Kconfig @@ -17,6 +17,7 @@ select FPU if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE + select HAVE_FUTEX_CMPXCHG if MMU && FUTEX select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_REL select MODULES_USE_ELF_RELA --- linux-3.13.0.orig/arch/m68k/mm/fault.c +++ linux-3.13.0/arch/m68k/mm/fault.c @@ -153,6 +153,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto map_err; else if (fault & VM_FAULT_SIGBUS) goto bus_err; BUG(); --- linux-3.13.0.orig/arch/m68k/mm/hwtest.c +++ linux-3.13.0/arch/m68k/mm/hwtest.c @@ -28,9 +28,11 @@ int hwreg_present( volatile void *regp ) { int ret = 0; + unsigned long flags; long save_sp, save_vbr; long tmp_vectors[3]; + local_irq_save(flags); __asm__ __volatile__ ( "movec %/vbr,%2\n\t" "movel #Lberr1,%4@(8)\n\t" @@ -46,6 +48,7 @@ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr) : "a" (regp), "a" (tmp_vectors) ); + local_irq_restore(flags); return( ret ); } @@ -58,9 +61,11 @@ int hwreg_write( volatile void *regp, unsigned short val ) { int ret; + unsigned long flags; long save_sp, save_vbr; long tmp_vectors[3]; + local_irq_save(flags); __asm__ __volatile__ ( "movec %/vbr,%2\n\t" "movel #Lberr2,%4@(8)\n\t" @@ -78,6 +83,7 @@ : "=&d" (ret), "=&r" (save_sp), "=&r" (save_vbr) : "a" (regp), "a" (tmp_vectors), "g" (val) ); + local_irq_restore(flags); return( ret ); } --- linux-3.13.0.orig/arch/metag/include/asm/barrier.h +++ linux-3.13.0/arch/metag/include/asm/barrier.h @@ -15,6 +15,7 @@ volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE; barrier(); *flushptr = 0; + barrier(); } #else /* CONFIG_METAG_META21 */ @@ -35,6 +36,7 @@ *flushptr = 0; *flushptr = 0; *flushptr = 0; + barrier(); } #endif /* !CONFIG_METAG_META21 */ @@ -68,6 +70,7 @@ volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; barrier(); *flushptr = 0; + barrier(); } #define smp_mb() fence() #define smp_rmb() fence() --- linux-3.13.0.orig/arch/metag/include/asm/processor.h +++ linux-3.13.0/arch/metag/include/asm/processor.h @@ -22,6 +22,8 @@ /* Add an extra page of padding at the top of the stack for the guard page. */ #define STACK_TOP (TASK_SIZE - PAGE_SIZE) #define STACK_TOP_MAX STACK_TOP +/* Maximum virtual space for stack */ +#define STACK_SIZE_MAX (1 << 28) /* 256 MB */ /* This decides where the kernel will search for a free chunk of vm * space during mmap's. @@ -147,8 +149,8 @@ unsigned long get_wchan(struct task_struct *p); -#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) -#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0) #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) --- linux-3.13.0.orig/arch/metag/mm/fault.c +++ linux-3.13.0/arch/metag/mm/fault.c @@ -141,6 +141,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/metag/mm/hugetlbpage.c +++ linux-3.13.0/arch/metag/mm/hugetlbpage.c @@ -110,11 +110,6 @@ return 0; } -int pmd_huge_support(void) -{ - return 1; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { --- linux-3.13.0.orig/arch/microblaze/mm/fault.c +++ linux-3.13.0/arch/microblaze/mm/fault.c @@ -224,6 +224,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/mips/boot/compressed/decompress.c +++ linux-3.13.0/arch/mips/boot/compressed/decompress.c @@ -13,6 +13,7 @@ #include #include +#include #include --- linux-3.13.0.orig/arch/mips/cavium-octeon/octeon-irq.c +++ linux-3.13.0/arch/mips/cavium-octeon/octeon-irq.c @@ -635,7 +635,7 @@ cpumask_clear(&new_affinity); cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); } - __irq_set_affinity_locked(data, &new_affinity); + irq_set_affinity_locked(data, &new_affinity, false); } static int octeon_irq_ciu_set_affinity(struct irq_data *data, --- linux-3.13.0.orig/arch/mips/cavium-octeon/setup.c +++ linux-3.13.0/arch/mips/cavium-octeon/setup.c @@ -458,6 +458,18 @@ octeon_kill_core(NULL); } +static char __read_mostly octeon_system_type[80]; + +static int __init init_octeon_system_type(void) +{ + snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)", + cvmx_board_type_to_string(octeon_bootinfo->board_type), + octeon_model_get_string(read_c0_prid())); + + return 0; +} +early_initcall(init_octeon_system_type); + /** * Return a string representing the system type * @@ -465,11 +477,7 @@ */ const char *octeon_board_type_string(void) { - static char name[80]; - sprintf(name, "%s (%s)", - cvmx_board_type_to_string(octeon_bootinfo->board_type), - octeon_model_get_string(read_c0_prid())); - return name; + return octeon_system_type; } const char *get_system_type(void) --- linux-3.13.0.orig/arch/mips/include/asm/ftrace.h +++ linux-3.13.0/arch/mips/include/asm/ftrace.h @@ -24,7 +24,7 @@ asm volatile ( \ "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\ " li %[" STR(error) "], 0\n" \ - "2:\n" \ + "2: .insn\n" \ \ ".section .fixup, \"ax\"\n" \ "3: li %[" STR(error) "], 1\n" \ @@ -46,7 +46,7 @@ asm volatile ( \ "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\ " li %[" STR(error) "], 0\n" \ - "2:\n" \ + "2: .insn\n" \ \ ".section .fixup, \"ax\"\n" \ "3: li %[" STR(error) "], 1\n" \ --- linux-3.13.0.orig/arch/mips/include/asm/mipsregs.h +++ linux-3.13.0/arch/mips/include/asm/mipsregs.h @@ -14,6 +14,7 @@ #define _ASM_MIPSREGS_H #include +#include #include #include --- linux-3.13.0.orig/arch/mips/include/asm/ptrace.h +++ linux-3.13.0/arch/mips/include/asm/ptrace.h @@ -23,7 +23,7 @@ struct pt_regs { #ifdef CONFIG_32BIT /* Pad bytes for argument save space on the stack. */ - unsigned long pad0[6]; + unsigned long pad0[8]; #endif /* Saved main processor registers. */ --- linux-3.13.0.orig/arch/mips/include/asm/reg.h +++ linux-3.13.0/arch/mips/include/asm/reg.h @@ -12,116 +12,194 @@ #ifndef __ASM_MIPS_REG_H #define __ASM_MIPS_REG_H - -#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H) - -#define EF_R0 6 -#define EF_R1 7 -#define EF_R2 8 -#define EF_R3 9 -#define EF_R4 10 -#define EF_R5 11 -#define EF_R6 12 -#define EF_R7 13 -#define EF_R8 14 -#define EF_R9 15 -#define EF_R10 16 -#define EF_R11 17 -#define EF_R12 18 -#define EF_R13 19 -#define EF_R14 20 -#define EF_R15 21 -#define EF_R16 22 -#define EF_R17 23 -#define EF_R18 24 -#define EF_R19 25 -#define EF_R20 26 -#define EF_R21 27 -#define EF_R22 28 -#define EF_R23 29 -#define EF_R24 30 -#define EF_R25 31 +#define MIPS32_EF_R0 6 +#define MIPS32_EF_R1 7 +#define MIPS32_EF_R2 8 +#define MIPS32_EF_R3 9 +#define MIPS32_EF_R4 10 +#define MIPS32_EF_R5 11 +#define MIPS32_EF_R6 12 +#define MIPS32_EF_R7 13 +#define MIPS32_EF_R8 14 +#define MIPS32_EF_R9 15 +#define MIPS32_EF_R10 16 +#define MIPS32_EF_R11 17 +#define MIPS32_EF_R12 18 +#define MIPS32_EF_R13 19 +#define MIPS32_EF_R14 20 +#define MIPS32_EF_R15 21 +#define MIPS32_EF_R16 22 +#define MIPS32_EF_R17 23 +#define MIPS32_EF_R18 24 +#define MIPS32_EF_R19 25 +#define MIPS32_EF_R20 26 +#define MIPS32_EF_R21 27 +#define MIPS32_EF_R22 28 +#define MIPS32_EF_R23 29 +#define MIPS32_EF_R24 30 +#define MIPS32_EF_R25 31 /* * k0/k1 unsaved */ -#define EF_R26 32 -#define EF_R27 33 +#define MIPS32_EF_R26 32 +#define MIPS32_EF_R27 33 -#define EF_R28 34 -#define EF_R29 35 -#define EF_R30 36 -#define EF_R31 37 +#define MIPS32_EF_R28 34 +#define MIPS32_EF_R29 35 +#define MIPS32_EF_R30 36 +#define MIPS32_EF_R31 37 /* * Saved special registers */ -#define EF_LO 38 -#define EF_HI 39 +#define MIPS32_EF_LO 38 +#define MIPS32_EF_HI 39 -#define EF_CP0_EPC 40 -#define EF_CP0_BADVADDR 41 -#define EF_CP0_STATUS 42 -#define EF_CP0_CAUSE 43 -#define EF_UNUSED0 44 - -#define EF_SIZE 180 - -#endif - -#if defined(CONFIG_64BIT) && !defined(WANT_COMPAT_REG_H) - -#define EF_R0 0 -#define EF_R1 1 -#define EF_R2 2 -#define EF_R3 3 -#define EF_R4 4 -#define EF_R5 5 -#define EF_R6 6 -#define EF_R7 7 -#define EF_R8 8 -#define EF_R9 9 -#define EF_R10 10 -#define EF_R11 11 -#define EF_R12 12 -#define EF_R13 13 -#define EF_R14 14 -#define EF_R15 15 -#define EF_R16 16 -#define EF_R17 17 -#define EF_R18 18 -#define EF_R19 19 -#define EF_R20 20 -#define EF_R21 21 -#define EF_R22 22 -#define EF_R23 23 -#define EF_R24 24 -#define EF_R25 25 +#define MIPS32_EF_CP0_EPC 40 +#define MIPS32_EF_CP0_BADVADDR 41 +#define MIPS32_EF_CP0_STATUS 42 +#define MIPS32_EF_CP0_CAUSE 43 +#define MIPS32_EF_UNUSED0 44 + +#define MIPS32_EF_SIZE 180 + +#define MIPS64_EF_R0 0 +#define MIPS64_EF_R1 1 +#define MIPS64_EF_R2 2 +#define MIPS64_EF_R3 3 +#define MIPS64_EF_R4 4 +#define MIPS64_EF_R5 5 +#define MIPS64_EF_R6 6 +#define MIPS64_EF_R7 7 +#define MIPS64_EF_R8 8 +#define MIPS64_EF_R9 9 +#define MIPS64_EF_R10 10 +#define MIPS64_EF_R11 11 +#define MIPS64_EF_R12 12 +#define MIPS64_EF_R13 13 +#define MIPS64_EF_R14 14 +#define MIPS64_EF_R15 15 +#define MIPS64_EF_R16 16 +#define MIPS64_EF_R17 17 +#define MIPS64_EF_R18 18 +#define MIPS64_EF_R19 19 +#define MIPS64_EF_R20 20 +#define MIPS64_EF_R21 21 +#define MIPS64_EF_R22 22 +#define MIPS64_EF_R23 23 +#define MIPS64_EF_R24 24 +#define MIPS64_EF_R25 25 /* * k0/k1 unsaved */ -#define EF_R26 26 -#define EF_R27 27 +#define MIPS64_EF_R26 26 +#define MIPS64_EF_R27 27 -#define EF_R28 28 -#define EF_R29 29 -#define EF_R30 30 -#define EF_R31 31 +#define MIPS64_EF_R28 28 +#define MIPS64_EF_R29 29 +#define MIPS64_EF_R30 30 +#define MIPS64_EF_R31 31 /* * Saved special registers */ -#define EF_LO 32 -#define EF_HI 33 - -#define EF_CP0_EPC 34 -#define EF_CP0_BADVADDR 35 -#define EF_CP0_STATUS 36 -#define EF_CP0_CAUSE 37 +#define MIPS64_EF_LO 32 +#define MIPS64_EF_HI 33 -#define EF_SIZE 304 /* size in bytes */ +#define MIPS64_EF_CP0_EPC 34 +#define MIPS64_EF_CP0_BADVADDR 35 +#define MIPS64_EF_CP0_STATUS 36 +#define MIPS64_EF_CP0_CAUSE 37 + +#define MIPS64_EF_SIZE 304 /* size in bytes */ + +#if defined(CONFIG_32BIT) + +#define EF_R0 MIPS32_EF_R0 +#define EF_R1 MIPS32_EF_R1 +#define EF_R2 MIPS32_EF_R2 +#define EF_R3 MIPS32_EF_R3 +#define EF_R4 MIPS32_EF_R4 +#define EF_R5 MIPS32_EF_R5 +#define EF_R6 MIPS32_EF_R6 +#define EF_R7 MIPS32_EF_R7 +#define EF_R8 MIPS32_EF_R8 +#define EF_R9 MIPS32_EF_R9 +#define EF_R10 MIPS32_EF_R10 +#define EF_R11 MIPS32_EF_R11 +#define EF_R12 MIPS32_EF_R12 +#define EF_R13 MIPS32_EF_R13 +#define EF_R14 MIPS32_EF_R14 +#define EF_R15 MIPS32_EF_R15 +#define EF_R16 MIPS32_EF_R16 +#define EF_R17 MIPS32_EF_R17 +#define EF_R18 MIPS32_EF_R18 +#define EF_R19 MIPS32_EF_R19 +#define EF_R20 MIPS32_EF_R20 +#define EF_R21 MIPS32_EF_R21 +#define EF_R22 MIPS32_EF_R22 +#define EF_R23 MIPS32_EF_R23 +#define EF_R24 MIPS32_EF_R24 +#define EF_R25 MIPS32_EF_R25 +#define EF_R26 MIPS32_EF_R26 +#define EF_R27 MIPS32_EF_R27 +#define EF_R28 MIPS32_EF_R28 +#define EF_R29 MIPS32_EF_R29 +#define EF_R30 MIPS32_EF_R30 +#define EF_R31 MIPS32_EF_R31 +#define EF_LO MIPS32_EF_LO +#define EF_HI MIPS32_EF_HI +#define EF_CP0_EPC MIPS32_EF_CP0_EPC +#define EF_CP0_BADVADDR MIPS32_EF_CP0_BADVADDR +#define EF_CP0_STATUS MIPS32_EF_CP0_STATUS +#define EF_CP0_CAUSE MIPS32_EF_CP0_CAUSE +#define EF_UNUSED0 MIPS32_EF_UNUSED0 +#define EF_SIZE MIPS32_EF_SIZE + +#elif defined(CONFIG_64BIT) + +#define EF_R0 MIPS64_EF_R0 +#define EF_R1 MIPS64_EF_R1 +#define EF_R2 MIPS64_EF_R2 +#define EF_R3 MIPS64_EF_R3 +#define EF_R4 MIPS64_EF_R4 +#define EF_R5 MIPS64_EF_R5 +#define EF_R6 MIPS64_EF_R6 +#define EF_R7 MIPS64_EF_R7 +#define EF_R8 MIPS64_EF_R8 +#define EF_R9 MIPS64_EF_R9 +#define EF_R10 MIPS64_EF_R10 +#define EF_R11 MIPS64_EF_R11 +#define EF_R12 MIPS64_EF_R12 +#define EF_R13 MIPS64_EF_R13 +#define EF_R14 MIPS64_EF_R14 +#define EF_R15 MIPS64_EF_R15 +#define EF_R16 MIPS64_EF_R16 +#define EF_R17 MIPS64_EF_R17 +#define EF_R18 MIPS64_EF_R18 +#define EF_R19 MIPS64_EF_R19 +#define EF_R20 MIPS64_EF_R20 +#define EF_R21 MIPS64_EF_R21 +#define EF_R22 MIPS64_EF_R22 +#define EF_R23 MIPS64_EF_R23 +#define EF_R24 MIPS64_EF_R24 +#define EF_R25 MIPS64_EF_R25 +#define EF_R26 MIPS64_EF_R26 +#define EF_R27 MIPS64_EF_R27 +#define EF_R28 MIPS64_EF_R28 +#define EF_R29 MIPS64_EF_R29 +#define EF_R30 MIPS64_EF_R30 +#define EF_R31 MIPS64_EF_R31 +#define EF_LO MIPS64_EF_LO +#define EF_HI MIPS64_EF_HI +#define EF_CP0_EPC MIPS64_EF_CP0_EPC +#define EF_CP0_BADVADDR MIPS64_EF_CP0_BADVADDR +#define EF_CP0_STATUS MIPS64_EF_CP0_STATUS +#define EF_CP0_CAUSE MIPS64_EF_CP0_CAUSE +#define EF_SIZE MIPS64_EF_SIZE #endif /* CONFIG_64BIT */ --- linux-3.13.0.orig/arch/mips/include/asm/syscall.h +++ linux-3.13.0/arch/mips/include/asm/syscall.h @@ -29,7 +29,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, struct task_struct *task, struct pt_regs *regs, unsigned int n) { - unsigned long usp = regs->regs[29]; + unsigned long usp __maybe_unused = regs->regs[29]; switch (n) { case 0: case 1: case 2: case 3: --- linux-3.13.0.orig/arch/mips/include/uapi/asm/unistd.h +++ linux-3.13.0/arch/mips/include/uapi/asm/unistd.h @@ -369,16 +369,20 @@ #define __NR_process_vm_writev (__NR_Linux + 346) #define __NR_kcmp (__NR_Linux + 347) #define __NR_finit_module (__NR_Linux + 348) +#define __NR_sched_setattr (__NR_Linux + 349) +#define __NR_sched_getattr (__NR_Linux + 350) +#define __NR_renameat2 (__NR_Linux + 351) +#define __NR_seccomp (__NR_Linux + 352) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 348 +#define __NR_Linux_syscalls 352 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 348 +#define __NR_O32_Linux_syscalls 352 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -695,16 +699,20 @@ #define __NR_kcmp (__NR_Linux + 306) #define __NR_finit_module (__NR_Linux + 307) #define __NR_getdents64 (__NR_Linux + 308) +#define __NR_sched_setattr (__NR_Linux + 309) +#define __NR_sched_getattr (__NR_Linux + 310) +#define __NR_renameat2 (__NR_Linux + 311) +#define __NR_seccomp (__NR_Linux + 312) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 308 +#define __NR_Linux_syscalls 312 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 308 +#define __NR_64_Linux_syscalls 312 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1025,15 +1033,19 @@ #define __NR_process_vm_writev (__NR_Linux + 310) #define __NR_kcmp (__NR_Linux + 311) #define __NR_finit_module (__NR_Linux + 312) +#define __NR_sched_setattr (__NR_Linux + 313) +#define __NR_sched_getattr (__NR_Linux + 314) +#define __NR_renameat2 (__NR_Linux + 315) +#define __NR_seccomp (__NR_Linux + 316) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 312 +#define __NR_Linux_syscalls 316 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 312 +#define __NR_N32_Linux_syscalls 316 #endif /* _UAPI_ASM_UNISTD_H */ --- linux-3.13.0.orig/arch/mips/kernel/binfmt_elfo32.c +++ linux-3.13.0/arch/mips/kernel/binfmt_elfo32.c @@ -58,12 +58,6 @@ #include -/* - * When this file is selected, we are definitely running a 64bit kernel. - * So using the right regs define in asm/reg.h - */ -#define WANT_COMPAT_REG_H - /* These MUST be defined before elf.h gets included */ extern void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs); #define ELF_CORE_COPY_REGS(_dest, _regs) elf32_core_copy_regs(_dest, _regs); @@ -135,21 +129,21 @@ { int i; - for (i = 0; i < EF_R0; i++) + for (i = 0; i < MIPS32_EF_R0; i++) grp[i] = 0; - grp[EF_R0] = 0; + grp[MIPS32_EF_R0] = 0; for (i = 1; i <= 31; i++) - grp[EF_R0 + i] = (elf_greg_t) regs->regs[i]; - grp[EF_R26] = 0; - grp[EF_R27] = 0; - grp[EF_LO] = (elf_greg_t) regs->lo; - grp[EF_HI] = (elf_greg_t) regs->hi; - grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc; - grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr; - grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status; - grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause; -#ifdef EF_UNUSED0 - grp[EF_UNUSED0] = 0; + grp[MIPS32_EF_R0 + i] = (elf_greg_t) regs->regs[i]; + grp[MIPS32_EF_R26] = 0; + grp[MIPS32_EF_R27] = 0; + grp[MIPS32_EF_LO] = (elf_greg_t) regs->lo; + grp[MIPS32_EF_HI] = (elf_greg_t) regs->hi; + grp[MIPS32_EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc; + grp[MIPS32_EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr; + grp[MIPS32_EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status; + grp[MIPS32_EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause; +#ifdef MIPS32_EF_UNUSED0 + grp[MIPS32_EF_UNUSED0] = 0; #endif } --- linux-3.13.0.orig/arch/mips/kernel/irq-gic.c +++ linux-3.13.0/arch/mips/kernel/irq-gic.c @@ -255,11 +255,13 @@ /* Setup Intr to Pin mapping */ if (pin & GIC_MAP_TO_NMI_MSK) { + int i; + GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin); /* FIXME: hack to route NMI to all cpu's */ - for (cpu = 0; cpu < NR_CPUS; cpu += 32) { + for (i = 0; i < NR_CPUS; i += 32) { GICWRITE(GIC_REG_ADDR(SHARED, - GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)), + GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)), 0xffffffff); } } else { --- linux-3.13.0.orig/arch/mips/kernel/irq-msc01.c +++ linux-3.13.0/arch/mips/kernel/irq-msc01.c @@ -131,7 +131,7 @@ board_bind_eic_interrupt = &msc_bind_eic_interrupt; - for (; nirq >= 0; nirq--, imp++) { + for (; nirq > 0; nirq--, imp++) { int n = imp->im_irq; switch (imp->im_type) { --- linux-3.13.0.orig/arch/mips/kernel/irq_cpu.c +++ linux-3.13.0/arch/mips/kernel/irq_cpu.c @@ -56,6 +56,8 @@ .irq_mask_ack = mask_mips_irq, .irq_unmask = unmask_mips_irq, .irq_eoi = unmask_mips_irq, + .irq_disable = mask_mips_irq, + .irq_enable = unmask_mips_irq, }; /* @@ -92,6 +94,8 @@ .irq_mask_ack = mips_mt_cpu_irq_ack, .irq_unmask = unmask_mips_irq, .irq_eoi = unmask_mips_irq, + .irq_disable = mask_mips_irq, + .irq_enable = unmask_mips_irq, }; void __init mips_cpu_irq_init(void) --- linux-3.13.0.orig/arch/mips/kernel/mcount.S +++ linux-3.13.0/arch/mips/kernel/mcount.S @@ -123,7 +123,11 @@ nop #endif b ftrace_stub +#ifdef CONFIG_32BIT + addiu sp, sp, 8 +#else nop +#endif static_trace: MCOUNT_SAVE_REGS @@ -133,6 +137,9 @@ move a1, AT /* arg2: parent's return address */ MCOUNT_RESTORE_REGS +#ifdef CONFIG_32BIT + addiu sp, sp, 8 +#endif .globl ftrace_stub ftrace_stub: RETURN_BACK @@ -177,6 +184,11 @@ jal prepare_ftrace_return nop MCOUNT_RESTORE_REGS +#ifndef CONFIG_DYNAMIC_FTRACE +#ifdef CONFIG_32BIT + addiu sp, sp, 8 +#endif +#endif RETURN_BACK END(ftrace_graph_caller) --- linux-3.13.0.orig/arch/mips/kernel/mips_ksyms.c +++ linux-3.13.0/arch/mips/kernel/mips_ksyms.c @@ -14,6 +14,7 @@ #include #include #include +#include extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_user_nocheck_asm(char *__to, @@ -26,6 +27,13 @@ extern long __strnlen_user_asm(const char *s); /* + * Core architecture code + */ +#ifdef CONFIG_CPU_R4K_FPU +EXPORT_SYMBOL_GPL(_save_fp); +#endif + +/* * String functions */ EXPORT_SYMBOL(memset); --- linux-3.13.0.orig/arch/mips/kernel/ptrace.c +++ linux-3.13.0/arch/mips/kernel/ptrace.c @@ -170,6 +170,7 @@ __get_user(fregs[i], i + (__u64 __user *) data); __get_user(child->thread.fpu.fcr31, data + 64); + child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* FIR may not be written. */ @@ -265,36 +266,160 @@ /* regset get/set implementations */ -static int gpr_get(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) +#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) + +static int gpr32_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); + u32 uregs[ELF_NGREG] = {}; + unsigned i; - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - regs, 0, sizeof(*regs)); + for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) { + /* k0/k1 are copied as zero. */ + if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27) + continue; + + uregs[i] = regs->regs[i - MIPS32_EF_R0]; + } + + uregs[MIPS32_EF_LO] = regs->lo; + uregs[MIPS32_EF_HI] = regs->hi; + uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc; + uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr; + uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status; + uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, + sizeof(uregs)); } -static int gpr_set(struct task_struct *target, - const struct user_regset *regset, - unsigned int pos, unsigned int count, - const void *kbuf, const void __user *ubuf) +static int gpr32_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { - struct pt_regs newregs; - int ret; + struct pt_regs *regs = task_pt_regs(target); + u32 uregs[ELF_NGREG]; + unsigned start, num_regs, i; + int err; - ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &newregs, - 0, sizeof(newregs)); - if (ret) - return ret; + start = pos / sizeof(u32); + num_regs = count / sizeof(u32); + + if (start + num_regs > ELF_NGREG) + return -EIO; - *task_pt_regs(target) = newregs; + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, + sizeof(uregs)); + if (err) + return err; + + for (i = start; i < num_regs; i++) { + /* + * Cast all values to signed here so that if this is a 64-bit + * kernel, the supplied 32-bit values will be sign extended. + */ + switch (i) { + case MIPS32_EF_R1 ... MIPS32_EF_R25: + /* k0/k1 are ignored. */ + case MIPS32_EF_R28 ... MIPS32_EF_R31: + regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; + break; + case MIPS32_EF_LO: + regs->lo = (s32)uregs[i]; + break; + case MIPS32_EF_HI: + regs->hi = (s32)uregs[i]; + break; + case MIPS32_EF_CP0_EPC: + regs->cp0_epc = (s32)uregs[i]; + break; + } + } return 0; } +#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ + +#ifdef CONFIG_64BIT + +static int gpr64_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + u64 uregs[ELF_NGREG] = {}; + unsigned i; + + for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) { + /* k0/k1 are copied as zero. */ + if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27) + continue; + + uregs[i] = regs->regs[i - MIPS64_EF_R0]; + } + + uregs[MIPS64_EF_LO] = regs->lo; + uregs[MIPS64_EF_HI] = regs->hi; + uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc; + uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr; + uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status; + uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, + sizeof(uregs)); +} + +static int gpr64_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + u64 uregs[ELF_NGREG]; + unsigned start, num_regs, i; + int err; + + start = pos / sizeof(u64); + num_regs = count / sizeof(u64); + + if (start + num_regs > ELF_NGREG) + return -EIO; + + err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, + sizeof(uregs)); + if (err) + return err; + + for (i = start; i < num_regs; i++) { + switch (i) { + case MIPS64_EF_R1 ... MIPS64_EF_R25: + /* k0/k1 are ignored. */ + case MIPS64_EF_R28 ... MIPS64_EF_R31: + regs->regs[i - MIPS64_EF_R0] = uregs[i]; + break; + case MIPS64_EF_LO: + regs->lo = uregs[i]; + break; + case MIPS64_EF_HI: + regs->hi = uregs[i]; + break; + case MIPS64_EF_CP0_EPC: + regs->cp0_epc = uregs[i]; + break; + } + } + + return 0; +} + +#endif /* CONFIG_64BIT */ + static int fpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, @@ -322,14 +447,16 @@ REGSET_FPR, }; +#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) + static const struct user_regset mips_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(unsigned int), .align = sizeof(unsigned int), - .get = gpr_get, - .set = gpr_set, + .get = gpr32_get, + .set = gpr32_set, }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, @@ -349,14 +476,18 @@ .n = ARRAY_SIZE(mips_regsets), }; +#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ + +#ifdef CONFIG_64BIT + static const struct user_regset mips64_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(unsigned long), .align = sizeof(unsigned long), - .get = gpr_get, - .set = gpr_set, + .get = gpr64_get, + .set = gpr64_set, }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, @@ -369,25 +500,26 @@ }; static const struct user_regset_view user_mips64_view = { - .name = "mips", + .name = "mips64", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, .regsets = mips64_regsets, - .n = ARRAY_SIZE(mips_regsets), + .n = ARRAY_SIZE(mips64_regsets), }; +#endif /* CONFIG_64BIT */ + const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_32BIT return &user_mips_view; -#endif - +#else #ifdef CONFIG_MIPS32_O32 - if (test_thread_flag(TIF_32BIT_REGS)) - return &user_mips_view; + if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) + return &user_mips_view; #endif - return &user_mips64_view; +#endif } long arch_ptrace(struct task_struct *child, long request, @@ -587,7 +719,7 @@ break; #endif case FPC_CSR: - child->thread.fpu.fcr31 = data; + child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X; break; case DSP_BASE ... DSP_BASE + 5: { dspreg_t *dregs; --- linux-3.13.0.orig/arch/mips/kernel/scall32-o32.S +++ linux-3.13.0/arch/mips/kernel/scall32-o32.S @@ -563,3 +563,7 @@ PTR sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_ni_syscall /* sched_setattr */ + PTR sys_ni_syscall /* 4350 */ /* sched_getattr */ + PTR sys_ni_syscall /* renameat2 */ + PTR sys_seccomp --- linux-3.13.0.orig/arch/mips/kernel/scall64-64.S +++ linux-3.13.0/arch/mips/kernel/scall64-64.S @@ -425,4 +425,8 @@ PTR sys_kcmp PTR sys_finit_module PTR sys_getdents64 + PTR sys_sched_setattr /* sched_setattr */ + PTR sys_sched_getattr /* 5310 */ /* sched_getattr */ + PTR sys_ni_syscall /* renameat2 */ + PTR sys_seccomp .size sys_call_table,.-sys_call_table --- linux-3.13.0.orig/arch/mips/kernel/scall64-n32.S +++ linux-3.13.0/arch/mips/kernel/scall64-n32.S @@ -418,4 +418,8 @@ PTR compat_sys_process_vm_writev /* 6310 */ PTR sys_kcmp PTR sys_finit_module + PTR sys_ni_syscall /* sched_setattr */ + PTR sys_ni_syscall /* sched_getattr */ + PTR sys_ni_syscall /* 6315 */ /* renameat2 */ + PTR sys_seccomp .size sysn32_call_table,.-sysn32_call_table --- linux-3.13.0.orig/arch/mips/kernel/scall64-o32.S +++ linux-3.13.0/arch/mips/kernel/scall64-o32.S @@ -541,4 +541,8 @@ PTR compat_sys_process_vm_writev PTR sys_kcmp PTR sys_finit_module + PTR sys_ni_syscall /* sched_setattr */ + PTR sys_ni_syscall /* 4350 */ /* sched_getattr */ + PTR sys_ni_syscall /* renameat2 */ + PTR sys_seccomp .size sys32_call_table,.-sys32_call_table --- linux-3.13.0.orig/arch/mips/kernel/smp.c +++ linux-3.13.0/arch/mips/kernel/smp.c @@ -109,10 +109,10 @@ else #endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); - cpu_report(); per_cpu_trap_init(false); mips_clockevent_init(); mp_ops->init_secondary(); + cpu_report(); /* * XXX parity protection should be folded in here when it's converted --- linux-3.13.0.orig/arch/mips/kernel/unaligned.c +++ linux-3.13.0/arch/mips/kernel/unaligned.c @@ -605,7 +605,6 @@ case sdc1_op: die_if_kernel("Unaligned FP access in kernel code", regs); BUG_ON(!used_math()); - BUG_ON(!is_fpu_owner()); lose_fpu(1); /* Save FPU state for the emulator. */ res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, --- linux-3.13.0.orig/arch/mips/kvm/kvm_locore.S +++ linux-3.13.0/arch/mips/kvm/kvm_locore.S @@ -428,7 +428,7 @@ /* Setup status register for running guest in UM */ .set at or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) - and v1, v1, ~ST0_CU0 + and v1, v1, ~(ST0_CU0 | ST0_MX) .set noat mtc0 v1, CP0_STATUS ehb --- linux-3.13.0.orig/arch/mips/kvm/kvm_mips.c +++ linux-3.13.0/arch/mips/kvm/kvm_mips.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -304,7 +305,7 @@ if (cpu_has_veic || cpu_has_vint) { size = 0x200 + VECTORSPACING * 64; } else { - size = 0x200; + size = 0x4000; } /* Save Linux EBASE */ @@ -395,6 +396,7 @@ if (vcpu->arch.kseg0_commpage) kfree(vcpu->arch.kseg0_commpage); + kfree(vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) @@ -423,11 +425,13 @@ vcpu->mmio_needed = 0; } + lose_fpu(1); + + local_irq_disable(); /* Check if we have any exceptions/interrupts pending */ kvm_mips_deliver_interrupts(vcpu, kvm_read_c0_guest_cause(vcpu->arch.cop0)); - local_irq_disable(); kvm_guest_enter(); r = __kvm_mips_vcpu_run(run, vcpu); @@ -1027,9 +1031,6 @@ { uint32_t status = read_c0_status(); - if (cpu_has_fpu) - status |= (ST0_CU1); - if (cpu_has_dsp) status |= (ST0_MX); --- linux-3.13.0.orig/arch/mips/kvm/kvm_mips_emul.c +++ linux-3.13.0/arch/mips/kvm/kvm_mips_emul.c @@ -1571,17 +1571,17 @@ arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); #else /* UserLocal not implemented */ - er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); + er = EMULATE_FAIL; #endif break; default: - printk("RDHWR not supported\n"); + kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); er = EMULATE_FAIL; break; } } else { - printk("Emulate RI not supported @ %p: %#x\n", opc, inst); + kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst); er = EMULATE_FAIL; } @@ -1590,6 +1590,7 @@ */ if (er == EMULATE_FAIL) { vcpu->arch.pc = curr_pc; + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); } return er; } --- linux-3.13.0.orig/arch/mips/kvm/trace.h +++ linux-3.13.0/arch/mips/kvm/trace.h @@ -26,18 +26,18 @@ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_ARGS(vcpu, reason), TP_STRUCT__entry( - __field(struct kvm_vcpu *, vcpu) + __field(unsigned long, pc) __field(unsigned int, reason) ), TP_fast_assign( - __entry->vcpu = vcpu; + __entry->pc = vcpu->arch.pc; __entry->reason = reason; ), TP_printk("[%s]PC: 0x%08lx", kvm_mips_exit_types_str[__entry->reason], - __entry->vcpu->arch.pc) + __entry->pc) ); #endif /* _TRACE_KVM_H */ --- linux-3.13.0.orig/arch/mips/lantiq/dts/easy50712.dts +++ linux-3.13.0/arch/mips/lantiq/dts/easy50712.dts @@ -8,6 +8,7 @@ }; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; --- linux-3.13.0.orig/arch/mips/loongson/common/Makefile +++ linux-3.13.0/arch/mips/loongson/common/Makefile @@ -11,7 +11,8 @@ # Serial port support # obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_SERIAL_8250) += serial.o +loongson-serial-$(CONFIG_SERIAL_8250) := serial.o +obj-y += $(loongson-serial-m) $(loongson-serial-y) obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o obj-$(CONFIG_LOONGSON_MC146818) += rtc.o --- linux-3.13.0.orig/arch/mips/mm/fault.c +++ linux-3.13.0/arch/mips/mm/fault.c @@ -158,6 +158,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/mips/mm/hugetlbpage.c +++ linux-3.13.0/arch/mips/mm/hugetlbpage.c @@ -85,11 +85,6 @@ return (pud_val(pud) & _PAGE_HUGE) != 0; } -int pmd_huge_support(void) -{ - return 1; -} - struct page * follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) --- linux-3.13.0.orig/arch/mips/mm/tlbex.c +++ linux-3.13.0/arch/mips/mm/tlbex.c @@ -1057,6 +1057,7 @@ struct mips_huge_tlb_info { int huge_pte; int restore_scratch; + bool need_reload_pte; }; static struct mips_huge_tlb_info @@ -1071,6 +1072,7 @@ rv.huge_pte = scratch; rv.restore_scratch = 0; + rv.need_reload_pte = false; if (check_for_high_segbits) { UASM_i_MFC0(p, tmp, C0_BADVADDR); @@ -1259,6 +1261,7 @@ } else { htlb_info.huge_pte = K0; htlb_info.restore_scratch = 0; + htlb_info.need_reload_pte = true; vmalloc_mode = refill_noscratch; /* * create the plain linear handler @@ -1295,6 +1298,8 @@ } #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT uasm_l_tlb_huge_update(&l, p); + if (htlb_info.need_reload_pte) + UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); build_huge_update_entries(&p, htlb_info.huge_pte, K1); build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, htlb_info.restore_scratch); --- linux-3.13.0.orig/arch/mips/oprofile/backtrace.c +++ linux-3.13.0/arch/mips/oprofile/backtrace.c @@ -92,7 +92,7 @@ /* This marks the end of the previous function, which means we overran. */ break; - stack_size = (unsigned) stack_adjustment; + stack_size = (unsigned long) stack_adjustment; } else if (is_ra_save_ins(&ip)) { int ra_slot = ip.i_format.simmediate; if (ra_slot < 0) --- linux-3.13.0.orig/arch/mips/power/hibernate.S +++ linux-3.13.0/arch/mips/power/hibernate.S @@ -43,6 +43,7 @@ bne t1, t3, 1b PTR_L t0, PBE_NEXT(t0) bnez t0, 0b + jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */ PTR_LA t0, saved_regs PTR_L ra, PT_R31(t0) PTR_L sp, PT_R29(t0) --- linux-3.13.0.orig/arch/mips/ralink/dts/mt7620a_eval.dts +++ linux-3.13.0/arch/mips/ralink/dts/mt7620a_eval.dts @@ -7,6 +7,7 @@ model = "Ralink MT7620A evaluation board"; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; --- linux-3.13.0.orig/arch/mips/ralink/dts/rt2880_eval.dts +++ linux-3.13.0/arch/mips/ralink/dts/rt2880_eval.dts @@ -7,6 +7,7 @@ model = "Ralink RT2880 evaluation board"; memory@0 { + device_type = "memory"; reg = <0x8000000 0x2000000>; }; --- linux-3.13.0.orig/arch/mips/ralink/dts/rt3052_eval.dts +++ linux-3.13.0/arch/mips/ralink/dts/rt3052_eval.dts @@ -7,6 +7,7 @@ model = "Ralink RT3052 evaluation board"; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; --- linux-3.13.0.orig/arch/mips/ralink/dts/rt3883_eval.dts +++ linux-3.13.0/arch/mips/ralink/dts/rt3883_eval.dts @@ -7,6 +7,7 @@ model = "Ralink RT3883 evaluation board"; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; --- linux-3.13.0.orig/arch/mn10300/mm/fault.c +++ linux-3.13.0/arch/mn10300/mm/fault.c @@ -262,6 +262,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/openrisc/mm/fault.c +++ linux-3.13.0/arch/openrisc/mm/fault.c @@ -171,6 +171,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/parisc/Kconfig +++ linux-3.13.0/arch/parisc/Kconfig @@ -22,6 +22,7 @@ select GENERIC_SMP_IDLE_THREAD select GENERIC_STRNCPY_FROM_USER select SYSCTL_ARCH_UNALIGN_ALLOW + select SYSCTL_EXCEPTION_TRACE select HAVE_MOD_ARCH_SPECIFIC select VIRT_TO_BUS select MODULES_USE_ELF_RELA --- linux-3.13.0.orig/arch/parisc/Makefile +++ linux-3.13.0/arch/parisc/Makefile @@ -48,7 +48,12 @@ # These flags should be implied by an hppa-linux configuration, but they # are not in gcc 3.2. -cflags-y += -mno-space-regs -mfast-indirect-calls +cflags-y += -mno-space-regs + +# -mfast-indirect-calls is only relevant for 32-bit kernels. +ifndef CONFIG_64BIT +cflags-y += -mfast-indirect-calls +endif # Currently we save and restore fpregs on all kernel entry/interruption paths. # If that gets optimized, we might need to disable the use of fpregs in the --- linux-3.13.0.orig/arch/parisc/include/asm/cacheflush.h +++ linux-3.13.0/arch/parisc/include/asm/cacheflush.h @@ -132,7 +132,6 @@ static inline void *kmap(struct page *page) { might_sleep(); - flush_dcache_page(page); return page_address(page); } @@ -144,7 +143,6 @@ static inline void *kmap_atomic(struct page *page) { pagefault_disable(); - flush_dcache_page(page); return page_address(page); } --- linux-3.13.0.orig/arch/parisc/include/asm/ldcw.h +++ linux-3.13.0/arch/parisc/include/asm/ldcw.h @@ -33,11 +33,18 @@ #endif /*!CONFIG_PA20*/ -/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ +/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. + We don't explicitly expose that "*a" may be written as reload + fails to find a register in class R1_REGS when "a" needs to be + reloaded when generating 64-bit PIC code. Instead, we clobber + memory to indicate to the compiler that the assembly code reads + or writes to items other than those listed in the input and output + operands. This may pessimize the code somewhat but __ldcw is + usually used within code blocks surrounded by memory barriors. */ #define __ldcw(a) ({ \ unsigned __ret; \ - __asm__ __volatile__(__LDCW " 0(%2),%0" \ - : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ + __asm__ __volatile__(__LDCW " 0(%1),%0" \ + : "=r" (__ret) : "r" (a) : "memory"); \ __ret; \ }) --- linux-3.13.0.orig/arch/parisc/include/asm/page.h +++ linux-3.13.0/arch/parisc/include/asm/page.h @@ -29,7 +29,8 @@ void clear_page_asm(void *page); void copy_page_asm(void *to, void *from); #define clear_user_page(vto, vaddr, page) clear_page_asm(vto) -#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom) +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *pg); /* #define CONFIG_PARISC_TMPALIAS */ --- linux-3.13.0.orig/arch/parisc/include/asm/processor.h +++ linux-3.13.0/arch/parisc/include/asm/processor.h @@ -53,6 +53,8 @@ #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX DEFAULT_TASK_SIZE +#define STACK_SIZE_MAX (1 << 30) /* 1 GB */ + #endif #ifndef __ASSEMBLY__ --- linux-3.13.0.orig/arch/parisc/include/uapi/asm/shmbuf.h +++ linux-3.13.0/arch/parisc/include/uapi/asm/shmbuf.h @@ -36,23 +36,16 @@ unsigned int __unused2; }; -#ifdef CONFIG_64BIT -/* The 'unsigned int' (formerly 'unsigned long') data types below will - * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on - * a wide kernel, but if some of these values are meant to contain pointers - * they may need to be 'long long' instead. -PB XXX FIXME - */ -#endif struct shminfo64 { - unsigned int shmmax; - unsigned int shmmin; - unsigned int shmmni; - unsigned int shmseg; - unsigned int shmall; - unsigned int __unused1; - unsigned int __unused2; - unsigned int __unused3; - unsigned int __unused4; + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; }; #endif /* _PARISC_SHMBUF_H */ --- linux-3.13.0.orig/arch/parisc/include/uapi/asm/signal.h +++ linux-3.13.0/arch/parisc/include/uapi/asm/signal.h @@ -69,8 +69,6 @@ #define SA_NOMASK SA_NODEFER #define SA_ONESHOT SA_RESETHAND -#define SA_RESTORER 0x04000000 /* obsolete -- ignored */ - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 --- linux-3.13.0.orig/arch/parisc/kernel/cache.c +++ linux-3.13.0/arch/parisc/kernel/cache.c @@ -388,6 +388,20 @@ } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); +void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, + struct page *pg) +{ + /* Copy using kernel mapping. No coherency is needed (all in + kunmap) for the `to' page. However, the `from' page needs to + be flushed through a mapping equivalent to the user mapping + before it can be accessed through the kernel mapping. */ + preempt_disable(); + flush_dcache_page_asm(__pa(vfrom), vaddr); + preempt_enable(); + copy_page_asm(vto, vfrom); +} +EXPORT_SYMBOL(copy_user_page); + void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) { unsigned long flags; --- linux-3.13.0.orig/arch/parisc/kernel/hardware.c +++ linux-3.13.0/arch/parisc/kernel/hardware.c @@ -1210,7 +1210,8 @@ {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, - {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, + {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"}, + {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"}, {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, --- linux-3.13.0.orig/arch/parisc/kernel/sys_parisc32.c +++ linux-3.13.0/arch/parisc/kernel/sys_parisc32.c @@ -4,6 +4,7 @@ * Copyright (C) 2000-2001 Hewlett Packard Company * Copyright (C) 2000 John Marvin * Copyright (C) 2001 Matthew Wilcox + * Copyright (C) 2014 Helge Deller * * These routines maintain argument size conversion between 32bit and 64bit * environment. Based heavily on sys_ia32.c and sys_sparc32.c. @@ -11,44 +12,8 @@ #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include - -#undef DEBUG - -#ifdef DEBUG -#define DBG(x) printk x -#else -#define DBG(x) -#endif asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, int r22, int r21, int r20) @@ -57,3 +22,12 @@ current->comm, current->pid, r20); return -ENOSYS; } + +asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags, + compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd, + const char __user * pathname) +{ + return sys_fanotify_mark(fanotify_fd, flags, + ((__u64)mask1 << 32) | mask0, + dfd, pathname); +} --- linux-3.13.0.orig/arch/parisc/kernel/syscall.S +++ linux-3.13.0/arch/parisc/kernel/syscall.S @@ -74,7 +74,7 @@ /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ -#define __NR_lws_entries (2) +#define __NR_lws_entries (3) lws_entry: gate lws_start, %r0 /* increase privilege */ @@ -502,7 +502,7 @@ /*************************************************** - Implementing CAS as an atomic operation: + Implementing 32bit CAS as an atomic operation: %r26 - Address to examine %r25 - Old value to check (old) @@ -589,10 +589,13 @@ # endif /* ENABLE_LWS_DEBUG */ + rsm PSW_SM_I, %r0 /* Disable interrupts */ + /* COW breaks can cause contention on UP systems */ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ + ssm PSW_SM_I, %r0 b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ @@ -619,15 +622,17 @@ stw %r1, 4(%sr2,%r20) #endif /* The load and store could fail */ -1: ldw 0(%sr3,%r26), %r28 +1: ldw,ma 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 -2: stw %r24, 0(%sr3,%r26) +2: stw,ma %r24, 0(%sr3,%r26) /* Free lock */ - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) #endif + /* Enable interrupts */ + ssm PSW_SM_I, %r0 /* Return to userspace, set no error */ b lws_exit copy %r0, %r21 @@ -639,6 +644,7 @@ #if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) #endif + ssm PSW_SM_I, %r0 b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ nop @@ -653,6 +659,230 @@ ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) + /*************************************************** + New CAS implementation which uses pointers and variable size + information. The value pointed by old and new MUST NOT change + while performing CAS. The lock only protect the value at %r26. + + %r26 - Address to examine + %r25 - Pointer to the value to check (old) + %r24 - Pointer to the value to set (new) + %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit) + %r28 - Return non-zero on failure + %r21 - Kernel error code + + %r21 has the following meanings: + + EAGAIN - CAS is busy, ldcw failed, try again. + EFAULT - Read or write failed. + + Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only) + + ****************************************************/ + + /* ELF32 Process entry path */ +lws_compare_and_swap_2: +#ifdef CONFIG_64BIT + /* Clip the input registers */ + depdi 0, 31, 32, %r26 + depdi 0, 31, 32, %r25 + depdi 0, 31, 32, %r24 + depdi 0, 31, 32, %r23 +#endif + + /* Check the validity of the size pointer */ + subi,>>= 4, %r23, %r0 + b,n lws_exit_nosys + + /* Jump to the functions which will load the old and new values into + registers depending on the their size */ + shlw %r23, 2, %r29 + blr %r29, %r0 + nop + + /* 8bit load */ +4: ldb 0(%sr3,%r25), %r25 + b cas2_lock_start +5: ldb 0(%sr3,%r24), %r24 + nop + nop + nop + nop + nop + + /* 16bit load */ +6: ldh 0(%sr3,%r25), %r25 + b cas2_lock_start +7: ldh 0(%sr3,%r24), %r24 + nop + nop + nop + nop + nop + + /* 32bit load */ +8: ldw 0(%sr3,%r25), %r25 + b cas2_lock_start +9: ldw 0(%sr3,%r24), %r24 + nop + nop + nop + nop + nop + + /* 64bit load */ +#ifdef CONFIG_64BIT +10: ldd 0(%sr3,%r25), %r25 +11: ldd 0(%sr3,%r24), %r24 +#else + /* Load new value into r22/r23 - high/low */ +10: ldw 0(%sr3,%r25), %r22 +11: ldw 4(%sr3,%r25), %r23 + /* Load new value into fr4 for atomic store later */ +12: flddx 0(%sr3,%r24), %fr4 +#endif + +cas2_lock_start: + /* Load start of lock table */ + ldil L%lws_lock_start, %r20 + ldo R%lws_lock_start(%r20), %r28 + + /* Extract four bits from r26 and hash lock (Bits 4-7) */ + extru %r26, 27, 4, %r20 + + /* Find lock to use, the hash is either one of 0 to + 15, multiplied by 16 (keep it 16-byte aligned) + and add to the lock table offset. */ + shlw %r20, 4, %r20 + add %r20, %r28, %r20 + + rsm PSW_SM_I, %r0 /* Disable interrupts */ + /* COW breaks can cause contention on UP systems */ + LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ + cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */ +cas2_wouldblock: + ldo 2(%r0), %r28 /* 2nd case */ + ssm PSW_SM_I, %r0 + b lws_exit /* Contended... */ + ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ + + /* + prev = *addr; + if ( prev == old ) + *addr = new; + return prev; + */ + + /* NOTES: + This all works becuse intr_do_signal + and schedule both check the return iasq + and see that we are on the kernel page + so this process is never scheduled off + or is ever sent any signal of any sort, + thus it is wholly atomic from usrspaces + perspective + */ +cas2_action: + /* Jump to the correct function */ + blr %r29, %r0 + /* Set %r28 as non-zero for now */ + ldo 1(%r0),%r28 + + /* 8bit CAS */ +13: ldb,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +14: stb,ma %r24, 0(%sr3,%r26) + b cas2_end + copy %r0, %r28 + nop + nop + + /* 16bit CAS */ +15: ldh,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +16: sth,ma %r24, 0(%sr3,%r26) + b cas2_end + copy %r0, %r28 + nop + nop + + /* 32bit CAS */ +17: ldw,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +18: stw,ma %r24, 0(%sr3,%r26) + b cas2_end + copy %r0, %r28 + nop + nop + + /* 64bit CAS */ +#ifdef CONFIG_64BIT +19: ldd,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +20: std,ma %r24, 0(%sr3,%r26) + copy %r0, %r28 +#else + /* Compare first word */ +19: ldw,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r22, %r0 + b,n cas2_end + /* Compare second word */ +20: ldw,ma 4(%sr3,%r26), %r29 + sub,= %r29, %r23, %r0 + b,n cas2_end + /* Perform the store */ +21: fstdx %fr4, 0(%sr3,%r26) + copy %r0, %r28 +#endif + +cas2_end: + /* Free lock */ + stw,ma %r20, 0(%sr2,%r20) + /* Enable interrupts */ + ssm PSW_SM_I, %r0 + /* Return to userspace, set no error */ + b lws_exit + copy %r0, %r21 + +22: + /* Error occurred on load or store */ + /* Free lock */ + stw %r20, 0(%sr2,%r20) + ssm PSW_SM_I, %r0 + ldo 1(%r0),%r28 + b lws_exit + ldo -EFAULT(%r0),%r21 /* set errno */ + nop + nop + nop + + /* Exception table entries, for the load and store, return EFAULT. + Each of the entries must be relocated. */ + ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page) +#ifndef CONFIG_64BIT + ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page) +#endif + /* Make sure nothing else is placed on this page */ .align PAGE_SIZE END(linux_gateway_page) @@ -669,8 +899,9 @@ /* Light-weight-syscall table */ /* Start of lws table. */ ENTRY(lws_table) - LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ - LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ + LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */ + LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */ + LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */ END(lws_table) /* End of lws table */ --- linux-3.13.0.orig/arch/parisc/kernel/syscall_table.S +++ linux-3.13.0/arch/parisc/kernel/syscall_table.S @@ -286,11 +286,11 @@ ENTRY_COMP(msgsnd) ENTRY_COMP(msgrcv) ENTRY_SAME(msgget) /* 190 */ - ENTRY_SAME(msgctl) - ENTRY_SAME(shmat) + ENTRY_COMP(msgctl) + ENTRY_COMP(shmat) ENTRY_SAME(shmdt) ENTRY_SAME(shmget) - ENTRY_SAME(shmctl) /* 195 */ + ENTRY_COMP(shmctl) /* 195 */ ENTRY_SAME(ni_syscall) /* streams1 */ ENTRY_SAME(ni_syscall) /* streams2 */ ENTRY_SAME(lstat64) @@ -323,7 +323,7 @@ ENTRY_SAME(epoll_ctl) /* 225 */ ENTRY_SAME(epoll_wait) ENTRY_SAME(remap_file_pages) - ENTRY_SAME(semtimedop) + ENTRY_COMP(semtimedop) ENTRY_COMP(mq_open) ENTRY_SAME(mq_unlink) /* 230 */ ENTRY_COMP(mq_timedsend) @@ -392,7 +392,7 @@ ENTRY_COMP(vmsplice) ENTRY_COMP(move_pages) /* 295 */ ENTRY_SAME(getcpu) - ENTRY_SAME(epoll_pwait) + ENTRY_COMP(epoll_pwait) ENTRY_COMP(statfs64) ENTRY_COMP(fstatfs64) ENTRY_COMP(kexec_load) /* 300 */ @@ -418,7 +418,7 @@ ENTRY_SAME(accept4) /* 320 */ ENTRY_SAME(prlimit64) ENTRY_SAME(fanotify_init) - ENTRY_COMP(fanotify_mark) + ENTRY_DIFF(fanotify_mark) ENTRY_COMP(clock_adjtime) ENTRY_SAME(name_to_handle_at) /* 325 */ ENTRY_COMP(open_by_handle_at) --- linux-3.13.0.orig/arch/parisc/kernel/traps.c +++ linux-3.13.0/arch/parisc/kernel/traps.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -42,9 +43,6 @@ #include "../math-emu/math-emu.h" /* for handle_fpe() */ -#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ - /* dumped to the console via printk) */ - #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) DEFINE_SPINLOCK(pa_dbit_lock); #endif @@ -160,6 +158,17 @@ } } +static DEFINE_RATELIMIT_STATE(_hppa_rs, + DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); + +#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \ + if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \ + printk(fmt, ##__VA_ARGS__); \ + show_regs(regs); \ + } \ +} + + static void do_show_stack(struct unwind_frame_info *info) { int i = 1; @@ -229,12 +238,10 @@ if (err == 0) return; /* STFU */ - printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", + parisc_printk_ratelimited(1, regs, + KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); -#ifdef PRINT_USER_FAULTS - /* XXX for debugging only */ - show_regs(regs); -#endif + return; } @@ -321,14 +328,11 @@ (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); } -#ifdef PRINT_USER_FAULTS - if (unlikely(iir != GDB_BREAK_INSN)) { - printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", + if (unlikely(iir != GDB_BREAK_INSN)) + parisc_printk_ratelimited(0, regs, + KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", iir & 31, (iir>>13) & ((1<<13)-1), task_pid_nr(current), current->comm); - show_regs(regs); - } -#endif /* send standard GDB signal */ handle_gdb_break(regs, TRAP_BRKPT); @@ -758,11 +762,9 @@ default: if (user_mode(regs)) { -#ifdef PRINT_USER_FAULTS - printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n", - task_pid_nr(current), current->comm); - show_regs(regs); -#endif + parisc_printk_ratelimited(0, regs, KERN_DEBUG + "handle_interruption() pid=%d command='%s'\n", + task_pid_nr(current), current->comm); /* SIGBUS, for lack of a better one. */ si.si_signo = SIGBUS; si.si_code = BUS_OBJERR; @@ -779,16 +781,10 @@ if (user_mode(regs)) { if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { -#ifdef PRINT_USER_FAULTS - if (fault_space == 0) - printk(KERN_DEBUG "User Fault on Kernel Space "); - else - printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", - code); - printk(KERN_CONT "pid=%d command='%s'\n", - task_pid_nr(current), current->comm); - show_regs(regs); -#endif + parisc_printk_ratelimited(0, regs, KERN_DEBUG + "User fault %d on space 0x%08lx, pid=%d command='%s'\n", + code, fault_space, + task_pid_nr(current), current->comm); si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SEGV_MAPERR; --- linux-3.13.0.orig/arch/parisc/mm/fault.c +++ linux-3.13.0/arch/parisc/mm/fault.c @@ -19,10 +19,6 @@ #include #include -#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ - /* dumped to the console via printk) */ - - /* Various important other fields */ #define bit22set(x) (x & 0x00000200) #define bits23_25set(x) (x & 0x000001c0) @@ -34,6 +30,8 @@ DEFINE_PER_CPU(struct exception_data, exception_data); +int show_unhandled_signals = 1; + /* * parisc_acctyp(unsigned int inst) -- * Given a PA-RISC memory access instruction, determine if the @@ -173,6 +171,32 @@ return 0; } +/* + * Print out info about fatal segfaults, if the show_unhandled_signals + * sysctl is set: + */ +static inline void +show_signal_msg(struct pt_regs *regs, unsigned long code, + unsigned long address, struct task_struct *tsk, + struct vm_area_struct *vma) +{ + if (!unhandled_signal(tsk, SIGSEGV)) + return; + + if (!printk_ratelimit()) + return; + + pr_warn("\n"); + pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx", + tsk->comm, code, address); + print_vma_addr(KERN_CONT " in ", regs->iaoq[0]); + if (vma) + pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n", + vma->vm_start, vma->vm_end); + + show_regs(regs); +} + void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address) { @@ -232,6 +256,8 @@ */ if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto bad_area; BUG(); @@ -270,16 +296,8 @@ if (user_mode(regs)) { struct siginfo si; -#ifdef PRINT_USER_FAULTS - printk(KERN_DEBUG "\n"); - printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", - task_pid_nr(tsk), tsk->comm, code, address); - if (vma) { - printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", - vma->vm_start, vma->vm_end); - } - show_regs(regs); -#endif + show_signal_msg(regs, code, address, tsk, vma); + switch (code) { case 15: /* Data TLB miss fault/Data page fault */ /* send SIGSEGV when outside of vma */ --- linux-3.13.0.orig/arch/powerpc/Kconfig +++ linux-3.13.0/arch/powerpc/Kconfig @@ -139,6 +139,7 @@ select OLD_SIGACTION if PPC32 select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK + select ARCH_SUPPORTS_ATOMIC_RMW config GENERIC_CSUM def_bool CPU_LITTLE_ENDIAN @@ -381,7 +382,7 @@ config KEXEC bool "kexec system call" - depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) + depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -439,6 +440,14 @@ default "4" depends on NEED_MULTIPLE_NODES +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config HAVE_MEMORYLESS_NODES + def_bool y + depends on NUMA + config ARCH_SELECT_MEMORY_MODEL def_bool y depends on PPC64 @@ -735,6 +744,16 @@ help Freescale General-purpose Timers support +config HAS_FSL_QBMAN + bool "Datapath Acceleration Queue and Buffer management" + help + Datapath Acceleration Queue and Buffer management + +config HAS_FSL_PME + bool + depends on HAS_FSL_QBMAN + default n + # Yes MCA RS/6000s exist but Linux-PPC does not currently support any config MCA bool @@ -1020,6 +1039,8 @@ source "drivers/Kconfig" +source "ubuntu/Kconfig" + source "fs/Kconfig" source "arch/powerpc/sysdev/qe_lib/Kconfig" --- linux-3.13.0.orig/arch/powerpc/Makefile +++ linux-3.13.0/arch/powerpc/Makefile @@ -149,7 +149,9 @@ CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) -KBUILD_CPPFLAGS += -Iarch/$(ARCH) +asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1) + +KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr) KBUILD_AFLAGS += -Iarch/$(ARCH) KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) CPP = $(CC) -E $(KBUILD_CFLAGS) @@ -225,7 +227,7 @@ all: zImage # With make 3.82 we cannot mix normal and wildcard targets -BOOT_TARGETS1 := zImage zImage.initrd uImage +BOOT_TARGETS1 := zImage zImage.initrd uImage vmlinux.strip BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% uImage.% PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) --- linux-3.13.0.orig/arch/powerpc/configs/pseries_defconfig +++ linux-3.13.0/arch/powerpc/configs/pseries_defconfig @@ -353,3 +353,4 @@ CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y --- linux-3.13.0.orig/arch/powerpc/configs/pseries_le_defconfig +++ linux-3.13.0/arch/powerpc/configs/pseries_le_defconfig @@ -350,3 +350,4 @@ # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_DEV_NX=y CONFIG_CRYPTO_DEV_NX_ENCRYPT=m +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y --- linux-3.13.0.orig/arch/powerpc/crypto/sha1.c +++ linux-3.13.0/arch/powerpc/crypto/sha1.c @@ -154,4 +154,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); -MODULE_ALIAS("sha1-powerpc"); +MODULE_ALIAS_CRYPTO("sha1"); +MODULE_ALIAS_CRYPTO("sha1-powerpc"); --- linux-3.13.0.orig/arch/powerpc/include/asm/bitops.h +++ linux-3.13.0/arch/powerpc/include/asm/bitops.h @@ -46,6 +46,11 @@ #include #include +/* PPC bit number conversion */ +#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be)) +#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) +#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) + /* * clear_bit doesn't imply a memory barrier */ --- linux-3.13.0.orig/arch/powerpc/include/asm/compat.h +++ linux-3.13.0/arch/powerpc/include/asm/compat.h @@ -8,7 +8,11 @@ #include #define COMPAT_USER_HZ 100 +#ifdef __BIG_ENDIAN__ #define COMPAT_UTS_MACHINE "ppc\0\0" +#else +#define COMPAT_UTS_MACHINE "ppcle\0\0" +#endif typedef u32 compat_size_t; typedef s32 compat_ssize_t; @@ -200,10 +204,11 @@ /* * We can't access below the stack pointer in the 32bit ABI and - * can access 288 bytes in the 64bit ABI + * can access 288 bytes in the 64bit big-endian ABI, + * or 512 bytes with the new ELFv2 little-endian ABI. */ if (!is_32bit_task()) - usp -= 288; + usp -= USER_REDZONE_SIZE; return (void __user *) (usp - len); } --- linux-3.13.0.orig/arch/powerpc/include/asm/cputable.h +++ linux-3.13.0/arch/powerpc/include/asm/cputable.h @@ -90,6 +90,18 @@ * if the error is fatal, 1 if it was fully recovered and 0 to * pass up (not CPU originated) */ int (*machine_check)(struct pt_regs *regs); + + /* + * Processor specific early machine check handler which is + * called in real mode to handle SLB and TLB errors. + */ + long (*machine_check_early)(struct pt_regs *regs); + + /* + * Processor specific routine to flush tlbs. + */ + void (*flush_tlb)(unsigned long inval_selector); + }; extern struct cpu_spec *cur_cpu_spec; @@ -177,6 +189,7 @@ #define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) #define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) +#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) #ifndef __ASSEMBLY__ @@ -433,6 +446,7 @@ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) +#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ @@ -454,8 +468,8 @@ #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ - CPU_FTRS_POWER7 | CPU_FTRS_POWER8 | CPU_FTRS_CELL | \ - CPU_FTRS_PA6T | CPU_FTR_VSX) + CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \ + CPU_FTRS_CELL | CPU_FTRS_PA6T | CPU_FTR_VSX) #endif #else enum { --- linux-3.13.0.orig/arch/powerpc/include/asm/dma-mapping.h +++ linux-3.13.0/arch/powerpc/include/asm/dma-mapping.h @@ -134,6 +134,7 @@ } extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern int __dma_set_mask(struct device *dev, u64 dma_mask); #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) --- linux-3.13.0.orig/arch/powerpc/include/asm/eeh.h +++ linux-3.13.0/arch/powerpc/include/asm/eeh.h @@ -117,6 +117,16 @@ return edev ? edev->pdev : NULL; } +/* Return values from eeh_ops::next_error */ +enum { + EEH_NEXT_ERR_NONE = 0, + EEH_NEXT_ERR_INF, + EEH_NEXT_ERR_FROZEN_PE, + EEH_NEXT_ERR_FENCED_PHB, + EEH_NEXT_ERR_DEAD_PHB, + EEH_NEXT_ERR_DEAD_IOC +}; + /* * The struct is used to trace the registered EEH operation * callback functions. Actually, those operation callback --- linux-3.13.0.orig/arch/powerpc/include/asm/exception-64e.h +++ linux-3.13.0/arch/powerpc/include/asm/exception-64e.h @@ -214,10 +214,21 @@ #define TLB_MISS_STATS_SAVE_INFO_BOLTED #endif +#ifndef CONFIG_RELOCATABLE #define SET_IVOR(vector_number, vector_offset) \ li r3,vector_offset@l; \ ori r3,r3,interrupt_base_book3e@l; \ mtspr SPRN_IVOR##vector_number,r3; +#else /* !CONFIG_RELOCATABLE */ +/* In relocatable case the value of the constant expression 'expr' is only + * offset. So instead, we should loads the address of label 'name'. + */ +#define SET_IVOR(vector_number, vector_offset) \ + LOAD_REG_ADDR(r3,interrupt_base_book3e);\ + rlwinm r3,r3,0,15,0; \ + ori r3,r3,vector_offset@l; \ + mtspr SPRN_IVOR##vector_number,r3; +#endif /* CONFIG_RELOCATABLE */ #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ --- linux-3.13.0.orig/arch/powerpc/include/asm/exception-64s.h +++ linux-3.13.0/arch/powerpc/include/asm/exception-64s.h @@ -147,6 +147,14 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) /* + * Set an SPR from a register if the CPU has the given feature + */ +#define OPT_SET_SPR(ra, spr, ftr) \ +BEGIN_FTR_SECTION_NESTED(943) \ + mtspr spr,ra; \ +END_FTR_SECTION_NESTED(ftr,ftr,943) + +/* * Save a register to the PACA if the CPU has the given feature */ #define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \ @@ -301,9 +309,12 @@ beq 4f; /* if from kernel mode */ \ ACCOUNT_CPU_USER_ENTRY(r9, r10); \ SAVE_PPR(area, r9, r10); \ -4: std r2,GPR2(r1); /* save r2 in stackframe */ \ - SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ - SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ +4: EXCEPTION_PROLOG_COMMON_2(area) \ + EXCEPTION_PROLOG_COMMON_3(n) \ + ACCOUNT_STOLEN_TIME + +/* Save original regs values from save area to stack frame. */ +#define EXCEPTION_PROLOG_COMMON_2(area) \ ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ ld r10,area+EX_R10(r13); \ std r9,GPR9(r1); \ @@ -318,11 +329,16 @@ ld r10,area+EX_CFAR(r13); \ std r10,ORIG_GPR3(r1); \ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ + GET_CTR(r10, area); \ + std r10,_CTR(r1); + +#define EXCEPTION_PROLOG_COMMON_3(n) \ + std r2,GPR2(r1); /* save r2 in stackframe */ \ + SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ + SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ mflr r9; /* Get LR, later save to stack */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ std r9,_LINK(r1); \ - GET_CTR(r10, area); \ - std r10,_CTR(r1); \ lbz r10,PACASOFTIRQEN(r13); \ mfspr r11,SPRN_XER; /* save XER in stackframe */ \ std r10,SOFTE(r1); \ @@ -332,8 +348,7 @@ li r10,0; \ ld r11,exception_marker@toc(r2); \ std r10,RESULT(r1); /* clear regs->result */ \ - std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ - ACCOUNT_STOLEN_TIME + std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ /* * Exception vectors. --- linux-3.13.0.orig/arch/powerpc/include/asm/hugetlb.h +++ linux-3.13.0/arch/powerpc/include/asm/hugetlb.h @@ -127,7 +127,7 @@ unsigned long addr, pte_t *ptep) { #ifdef CONFIG_PPC64 - return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); + return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); #else return __pte(pte_update(ptep, ~0UL, 0)); #endif --- linux-3.13.0.orig/arch/powerpc/include/asm/hvcall.h +++ linux-3.13.0/arch/powerpc/include/asm/hvcall.h @@ -274,6 +274,12 @@ /* Platform specific hcalls, used by KVM */ #define H_RTAS 0xf000 +/* Values for 2nd argument to H_SET_MODE */ +#define H_SET_MODE_RESOURCE_SET_CIABR 1 +#define H_SET_MODE_RESOURCE_SET_DAWR 2 +#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3 +#define H_SET_MODE_RESOURCE_LE 4 + #ifndef __ASSEMBLY__ /** --- linux-3.13.0.orig/arch/powerpc/include/asm/iommu.h +++ linux-3.13.0/arch/powerpc/include/asm/iommu.h @@ -79,6 +79,7 @@ #ifdef CONFIG_IOMMU_API struct iommu_group *it_group; #endif + void (*set_bypass)(struct iommu_table *tbl, bool enable); }; struct scatterlist; --- linux-3.13.0.orig/arch/powerpc/include/asm/kvm_book3s.h +++ linux-3.13.0/arch/powerpc/include/asm/kvm_book3s.h @@ -271,6 +271,29 @@ return vcpu->arch.pc; } +static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.shared->msr & MSR_LE; +} + +static inline bool kvmppc_is_bigendian(struct kvm_vcpu *vcpu) +{ + return !kvmppc_need_byteswap(vcpu); +} + +static inline int kvmppc_ld32(struct kvm_vcpu *vcpu, ulong *eaddr, + u32 *ptr, bool data) +{ + int ret; + + ret = kvmppc_ld(vcpu, eaddr, sizeof(u32), ptr, data); + + if (kvmppc_need_byteswap(vcpu)) + *ptr = swab32(*ptr); + + return ret; +} + static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) { ulong pc = kvmppc_get_pc(vcpu); @@ -278,7 +301,7 @@ /* Load the instruction manually if it failed to do so in the * exit path */ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) - kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); + kvmppc_ld32(vcpu, &pc, &vcpu->arch.last_inst, false); return vcpu->arch.last_inst; } @@ -295,7 +318,7 @@ /* Load the instruction manually if it failed to do so in the * exit path */ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) - kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); + kvmppc_ld32(vcpu, &pc, &vcpu->arch.last_inst, false); return vcpu->arch.last_inst; } --- linux-3.13.0.orig/arch/powerpc/include/asm/kvm_booke.h +++ linux-3.13.0/arch/powerpc/include/asm/kvm_booke.h @@ -98,6 +98,11 @@ return vcpu->arch.pc; } +static inline bool kvmppc_is_bigendian(struct kvm_vcpu *vcpu) +{ + return 1; +} + static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) { return vcpu->arch.fault_dear; --- linux-3.13.0.orig/arch/powerpc/include/asm/kvm_host.h +++ linux-3.13.0/arch/powerpc/include/asm/kvm_host.h @@ -616,6 +616,7 @@ spinlock_t tbacct_lock; u64 busy_stolen; u64 busy_preempt; + unsigned long intr_msr; #endif }; --- linux-3.13.0.orig/arch/powerpc/include/asm/kvm_ppc.h +++ linux-3.13.0/arch/powerpc/include/asm/kvm_ppc.h @@ -53,13 +53,13 @@ extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rt, unsigned int bytes, - int is_bigendian); + unsigned int rt, unsigned int bytes, + int not_reverse); extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rt, unsigned int bytes, - int is_bigendian); + unsigned int rt, unsigned int bytes, + int not_reverse); extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, - u64 val, unsigned int bytes, int is_bigendian); + u64 val, unsigned int bytes, int not_reverse); extern int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu); --- linux-3.13.0.orig/arch/powerpc/include/asm/machdep.h +++ linux-3.13.0/arch/powerpc/include/asm/machdep.h @@ -57,10 +57,10 @@ void (*hpte_removebolted)(unsigned long ea, int psize, int ssize); void (*flush_hash_range)(unsigned long number, int local); - void (*hugepage_invalidate)(struct mm_struct *mm, + void (*hugepage_invalidate)(unsigned long vsid, + unsigned long addr, unsigned char *hpte_slot_array, - unsigned long addr, int psize); - + int psize, int ssize); /* special for kexec, to be called in real mode, linear mapping is * destroyed as well */ void (*hpte_clear_all)(void); @@ -170,6 +170,9 @@ int (*system_reset_exception)(struct pt_regs *regs); int (*machine_check_exception)(struct pt_regs *regs); + /* Called during machine check exception to retrive fixup address. */ + bool (*mce_check_early_recovery)(struct pt_regs *regs); + /* Motherboard/chipset features. This is a kind of general purpose * hook used to control some machine specific features (like reset * lines, chip power control, etc...). --- linux-3.13.0.orig/arch/powerpc/include/asm/mce.h +++ linux-3.13.0/arch/powerpc/include/asm/mce.h @@ -0,0 +1,198 @@ +/* + * Machine check exception header file. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright 2013 IBM Corporation + * Author: Mahesh Salgaonkar + */ + +#ifndef __ASM_PPC64_MCE_H__ +#define __ASM_PPC64_MCE_H__ + +#include + +/* + * Machine Check bits on power7 and power8 + */ +#define P7_SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42)) /* P8 too */ + +/* SRR1 bits for machine check (On Power7 and Power8) */ +#define P7_SRR1_MC_IFETCH(srr1) ((srr1) & PPC_BITMASK(43, 45)) /* P8 too */ + +#define P7_SRR1_MC_IFETCH_UE (0x1 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_PARITY (0x2 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_MULTIHIT (0x3 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_SLB_BOTH (0x4 << PPC_BITLSHIFT(45)) +#define P7_SRR1_MC_IFETCH_TLB_MULTIHIT (0x5 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_UE_TLB_RELOAD (0x6 << PPC_BITLSHIFT(45)) /* P8 too */ +#define P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL (0x7 << PPC_BITLSHIFT(45)) + +/* SRR1 bits for machine check (On Power8) */ +#define P8_SRR1_MC_IFETCH_ERAT_MULTIHIT (0x4 << PPC_BITLSHIFT(45)) + +/* DSISR bits for machine check (On Power7 and Power8) */ +#define P7_DSISR_MC_UE (PPC_BIT(48)) /* P8 too */ +#define P7_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) /* P8 too */ +#define P7_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) /* P8 too */ +#define P7_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) /* P8 too */ +#define P7_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) /* P8 too */ +#define P7_DSISR_MC_SLB_MULTIHIT (PPC_BIT(56)) /* P8 too */ +#define P7_DSISR_MC_SLB_MULTIHIT_PARITY (PPC_BIT(57)) /* P8 too */ + +/* + * DSISR bits for machine check (Power8) in addition to above. + * Secondary DERAT Multihit + */ +#define P8_DSISR_MC_ERAT_MULTIHIT_SEC (PPC_BIT(54)) + +/* SLB error bits */ +#define P7_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_ERAT_MULTIHIT | \ + P7_DSISR_MC_SLB_PARITY_MFSLB | \ + P7_DSISR_MC_SLB_MULTIHIT | \ + P7_DSISR_MC_SLB_MULTIHIT_PARITY) + +#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \ + P8_DSISR_MC_ERAT_MULTIHIT_SEC) +enum MCE_Version { + MCE_V1 = 1, +}; + +enum MCE_Severity { + MCE_SEV_NO_ERROR = 0, + MCE_SEV_WARNING = 1, + MCE_SEV_ERROR_SYNC = 2, + MCE_SEV_FATAL = 3, +}; + +enum MCE_Disposition { + MCE_DISPOSITION_RECOVERED = 0, + MCE_DISPOSITION_NOT_RECOVERED = 1, +}; + +enum MCE_Initiator { + MCE_INITIATOR_UNKNOWN = 0, + MCE_INITIATOR_CPU = 1, +}; + +enum MCE_ErrorType { + MCE_ERROR_TYPE_UNKNOWN = 0, + MCE_ERROR_TYPE_UE = 1, + MCE_ERROR_TYPE_SLB = 2, + MCE_ERROR_TYPE_ERAT = 3, + MCE_ERROR_TYPE_TLB = 4, +}; + +enum MCE_UeErrorType { + MCE_UE_ERROR_INDETERMINATE = 0, + MCE_UE_ERROR_IFETCH = 1, + MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2, + MCE_UE_ERROR_LOAD_STORE = 3, + MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4, +}; + +enum MCE_SlbErrorType { + MCE_SLB_ERROR_INDETERMINATE = 0, + MCE_SLB_ERROR_PARITY = 1, + MCE_SLB_ERROR_MULTIHIT = 2, +}; + +enum MCE_EratErrorType { + MCE_ERAT_ERROR_INDETERMINATE = 0, + MCE_ERAT_ERROR_PARITY = 1, + MCE_ERAT_ERROR_MULTIHIT = 2, +}; + +enum MCE_TlbErrorType { + MCE_TLB_ERROR_INDETERMINATE = 0, + MCE_TLB_ERROR_PARITY = 1, + MCE_TLB_ERROR_MULTIHIT = 2, +}; + +struct machine_check_event { + enum MCE_Version version:8; /* 0x00 */ + uint8_t in_use; /* 0x01 */ + enum MCE_Severity severity:8; /* 0x02 */ + enum MCE_Initiator initiator:8; /* 0x03 */ + enum MCE_ErrorType error_type:8; /* 0x04 */ + enum MCE_Disposition disposition:8; /* 0x05 */ + uint8_t reserved_1[2]; /* 0x06 */ + uint64_t gpr3; /* 0x08 */ + uint64_t srr0; /* 0x10 */ + uint64_t srr1; /* 0x18 */ + union { /* 0x20 */ + struct { + enum MCE_UeErrorType ue_error_type:8; + uint8_t effective_address_provided; + uint8_t physical_address_provided; + uint8_t reserved_1[5]; + uint64_t effective_address; + uint64_t physical_address; + uint8_t reserved_2[8]; + } ue_error; + + struct { + enum MCE_SlbErrorType slb_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } slb_error; + + struct { + enum MCE_EratErrorType erat_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } erat_error; + + struct { + enum MCE_TlbErrorType tlb_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } tlb_error; + } u; +}; + +struct mce_error_info { + enum MCE_ErrorType error_type:8; + union { + enum MCE_UeErrorType ue_error_type:8; + enum MCE_SlbErrorType slb_error_type:8; + enum MCE_EratErrorType erat_error_type:8; + enum MCE_TlbErrorType tlb_error_type:8; + } u; + uint8_t reserved[2]; +}; + +#define MAX_MC_EVT 100 + +/* Release flags for get_mce_event() */ +#define MCE_EVENT_RELEASE true +#define MCE_EVENT_DONTRELEASE false + +extern void save_mce_event(struct pt_regs *regs, long handled, + struct mce_error_info *mce_err, uint64_t nip, + uint64_t addr); +extern int get_mce_event(struct machine_check_event *mce, bool release); +extern void release_mce_event(void); +extern void machine_check_queue_event(void); +extern void machine_check_print_event_info(struct machine_check_event *evt); +extern uint64_t get_mce_fault_addr(struct machine_check_event *evt); + +#endif /* __ASM_PPC64_MCE_H__ */ --- linux-3.13.0.orig/arch/powerpc/include/asm/mmu-hash64.h +++ linux-3.13.0/arch/powerpc/include/asm/mmu-hash64.h @@ -22,6 +22,7 @@ */ #include #include +#include /* * Segment table @@ -496,7 +497,7 @@ */ struct subpage_prot_table { unsigned long maxaddr; /* only addresses < this are protected */ - unsigned int **protptrs[2]; + unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)]; unsigned int *low_prot[4]; }; --- linux-3.13.0.orig/arch/powerpc/include/asm/opal.h +++ linux-3.13.0/arch/powerpc/include/asm/opal.h @@ -33,6 +33,28 @@ u64 rd_loc; /* r11 */ }; +/* + * SG entry + * + * WARNING: The current implementation requires each entry + * to represent a block that is 4k aligned *and* each block + * size except the last one in the list to be as well. + */ +struct opal_sg_entry { + __be64 data; + __be64 length; +}; + +/* SG list */ +struct opal_sg_list { + __be64 length; + __be64 next; + struct opal_sg_entry entry[]; +}; + +/* We calculate number of sg entries based on PAGE_SIZE */ +#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) + extern long opal_query_takeover(u64 *hal_size, u64 *hal_align); extern long opal_do_takeover(struct opal_takeover_args *args); @@ -61,6 +83,8 @@ #define OPAL_INTERNAL_ERROR -11 #define OPAL_BUSY_EVENT -12 #define OPAL_HARDWARE_FROZEN -13 +#define OPAL_WRONG_STATE -14 +#define OPAL_ASYNC_COMPLETION -15 /* API Tokens (in r0) */ #define OPAL_CONSOLE_WRITE 1 @@ -129,9 +153,28 @@ #define OPAL_LPC_READ 67 #define OPAL_LPC_WRITE 68 #define OPAL_RETURN_CPU 69 +#define OPAL_REINIT_CPUS 70 +#define OPAL_ELOG_READ 71 +#define OPAL_ELOG_WRITE 72 +#define OPAL_ELOG_ACK 73 +#define OPAL_ELOG_RESEND 74 +#define OPAL_ELOG_SIZE 75 #define OPAL_FLASH_VALIDATE 76 #define OPAL_FLASH_MANAGE 77 #define OPAL_FLASH_UPDATE 78 +#define OPAL_DUMP_INIT 81 +#define OPAL_DUMP_INFO 82 +#define OPAL_DUMP_READ 83 +#define OPAL_DUMP_ACK 84 +#define OPAL_GET_MSG 85 +#define OPAL_CHECK_ASYNC_COMPLETION 86 +#define OPAL_SYNC_HOST_REBOOT 87 +#define OPAL_SENSOR_READ 88 +#define OPAL_GET_PARAM 89 +#define OPAL_SET_PARAM 90 +#define OPAL_DUMP_RESEND 91 +#define OPAL_SYNC_HOST_REBOOT 87 +#define OPAL_DUMP_INFO2 94 #ifndef __ASSEMBLY__ @@ -211,7 +254,19 @@ OPAL_EVENT_ERROR_LOG = 0x40, OPAL_EVENT_EPOW = 0x80, OPAL_EVENT_LED_STATUS = 0x100, - OPAL_EVENT_PCI_ERROR = 0x200 + OPAL_EVENT_PCI_ERROR = 0x200, + OPAL_EVENT_DUMP_AVAIL = 0x400, + OPAL_EVENT_MSG_PENDING = 0x800, +}; + +enum OpalMessageType { + OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc, + * additional params function-specific + */ + OPAL_MSG_MEM_ERR, + OPAL_MSG_EPOW, + OPAL_MSG_SHUTDOWN, + OPAL_MSG_TYPE_MAX, }; /* Machine check related definitions */ @@ -356,6 +411,12 @@ OPAL_LPC_FW = 2, }; +struct opal_msg { + uint32_t msg_type; + uint32_t reserved; + uint64_t params[8]; +}; + struct opal_machine_check_event { enum OpalMCE_Version version:8; /* 0x00 */ uint8_t in_use; /* 0x01 */ @@ -472,9 +533,9 @@ }; struct OpalIoPhbErrorCommon { - uint32_t version; - uint32_t ioType; - uint32_t len; + __be32 version; + __be32 ioType; + __be32 len; }; struct OpalIoP7IOCPhbErrorData { @@ -539,64 +600,69 @@ struct OpalIoPhb3ErrorData { struct OpalIoPhbErrorCommon common; - uint32_t brdgCtl; + __be32 brdgCtl; /* PHB3 UTL regs */ - uint32_t portStatusReg; - uint32_t rootCmplxStatus; - uint32_t busAgentStatus; + __be32 portStatusReg; + __be32 rootCmplxStatus; + __be32 busAgentStatus; /* PHB3 cfg regs */ - uint32_t deviceStatus; - uint32_t slotStatus; - uint32_t linkStatus; - uint32_t devCmdStatus; - uint32_t devSecStatus; + __be32 deviceStatus; + __be32 slotStatus; + __be32 linkStatus; + __be32 devCmdStatus; + __be32 devSecStatus; /* cfg AER regs */ - uint32_t rootErrorStatus; - uint32_t uncorrErrorStatus; - uint32_t corrErrorStatus; - uint32_t tlpHdr1; - uint32_t tlpHdr2; - uint32_t tlpHdr3; - uint32_t tlpHdr4; - uint32_t sourceId; + __be32 rootErrorStatus; + __be32 uncorrErrorStatus; + __be32 corrErrorStatus; + __be32 tlpHdr1; + __be32 tlpHdr2; + __be32 tlpHdr3; + __be32 tlpHdr4; + __be32 sourceId; - uint32_t rsv3; + __be32 rsv3; /* Record data about the call to allocate a buffer */ - uint64_t errorClass; - uint64_t correlator; + __be64 errorClass; + __be64 correlator; - uint64_t nFir; /* 000 */ - uint64_t nFirMask; /* 003 */ - uint64_t nFirWOF; /* 008 */ + __be64 nFir; /* 000 */ + __be64 nFirMask; /* 003 */ + __be64 nFirWOF; /* 008 */ /* PHB3 MMIO Error Regs */ - uint64_t phbPlssr; /* 120 */ - uint64_t phbCsr; /* 110 */ - uint64_t lemFir; /* C00 */ - uint64_t lemErrorMask; /* C18 */ - uint64_t lemWOF; /* C40 */ - uint64_t phbErrorStatus; /* C80 */ - uint64_t phbFirstErrorStatus; /* C88 */ - uint64_t phbErrorLog0; /* CC0 */ - uint64_t phbErrorLog1; /* CC8 */ - uint64_t mmioErrorStatus; /* D00 */ - uint64_t mmioFirstErrorStatus; /* D08 */ - uint64_t mmioErrorLog0; /* D40 */ - uint64_t mmioErrorLog1; /* D48 */ - uint64_t dma0ErrorStatus; /* D80 */ - uint64_t dma0FirstErrorStatus; /* D88 */ - uint64_t dma0ErrorLog0; /* DC0 */ - uint64_t dma0ErrorLog1; /* DC8 */ - uint64_t dma1ErrorStatus; /* E00 */ - uint64_t dma1FirstErrorStatus; /* E08 */ - uint64_t dma1ErrorLog0; /* E40 */ - uint64_t dma1ErrorLog1; /* E48 */ - uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS]; - uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS]; + __be64 phbPlssr; /* 120 */ + __be64 phbCsr; /* 110 */ + __be64 lemFir; /* C00 */ + __be64 lemErrorMask; /* C18 */ + __be64 lemWOF; /* C40 */ + __be64 phbErrorStatus; /* C80 */ + __be64 phbFirstErrorStatus; /* C88 */ + __be64 phbErrorLog0; /* CC0 */ + __be64 phbErrorLog1; /* CC8 */ + __be64 mmioErrorStatus; /* D00 */ + __be64 mmioFirstErrorStatus; /* D08 */ + __be64 mmioErrorLog0; /* D40 */ + __be64 mmioErrorLog1; /* D48 */ + __be64 dma0ErrorStatus; /* D80 */ + __be64 dma0FirstErrorStatus; /* D88 */ + __be64 dma0ErrorLog0; /* DC0 */ + __be64 dma0ErrorLog1; /* DC8 */ + __be64 dma1ErrorStatus; /* E00 */ + __be64 dma1FirstErrorStatus; /* E08 */ + __be64 dma1ErrorLog0; /* E40 */ + __be64 dma1ErrorLog1; /* E48 */ + __be64 pestA[OPAL_PHB3_NUM_PEST_REGS]; + __be64 pestB[OPAL_PHB3_NUM_PEST_REGS]; +}; + +enum { + OPAL_REINIT_CPUS_HILE_BE = (1 << 0), + OPAL_REINIT_CPUS_HILE_LE = (1 << 1), }; typedef struct oppanel_line { @@ -607,6 +673,9 @@ /* /sys/firmware/opal */ extern struct kobject *opal_kobj; +/* /ibm,opal */ +extern struct device_node *opal_node; + /* API functions */ int64_t opal_console_write(int64_t term_number, __be64 *length, const uint8_t *buffer); @@ -715,24 +784,49 @@ int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); int64_t opal_get_epow_status(__be64 *status); int64_t opal_set_system_attention_led(uint8_t led_action); -int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, - uint16_t *pci_error_type, uint16_t *severity); +int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe, + __be16 *pci_error_type, __be16 *severity); int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_return_cpu(void); +int64_t opal_reinit_cpus(uint64_t flags); -int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); -int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); +int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); +int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val); int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t data, uint32_t sz); int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, __be32 *data, uint32_t sz); + +int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id); +int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type); +int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset); +int64_t opal_send_ack_elog(uint64_t log_id); +void opal_resend_pending_logs(void); + int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); int64_t opal_manage_flash(uint8_t op); int64_t opal_update_flash(uint64_t blk_list); +int64_t opal_dump_init(uint8_t dump_type); +int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size); +int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type); +int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer); +int64_t opal_dump_ack(uint32_t dump_id); +int64_t opal_dump_resend_notification(void); + +int64_t opal_get_msg(uint64_t buffer, size_t size); +int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token); +int64_t opal_sync_host_reboot(void); +int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer, + size_t length); +int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer, + size_t length); +int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data); /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); +extern int early_init_dt_scan_recoverable_ranges(unsigned long node, + const char *uname, int depth, void *data); extern int opal_get_chars(uint32_t vtermno, char *buf, int count); extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); @@ -744,6 +838,8 @@ int depth, void *data); extern int opal_notifier_register(struct notifier_block *nb); +extern int opal_message_notifier_register(enum OpalMessageType msg_type, + struct notifier_block *nb); extern void opal_notifier_enable(void); extern void opal_notifier_disable(void); extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); @@ -751,6 +847,13 @@ extern int opal_get_chars(uint32_t vtermno, char *buf, int count); extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len); +extern int __opal_async_get_token(void); +extern int opal_async_get_token_interruptible(void); +extern int __opal_async_release_token(int token); +extern int opal_async_release_token(int token); +extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg); +extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); + extern void hvc_opal_init_early(void); struct rtc_time; @@ -759,13 +862,22 @@ extern unsigned long opal_get_boot_time(void); extern void opal_nvram_init(void); extern void opal_flash_init(void); +extern void opal_flash_term_callback(void); +extern int opal_elog_init(void); +extern void opal_platform_dump_init(void); +extern void opal_msglog_init(void); extern int opal_machine_check(struct pt_regs *regs); +extern bool opal_mce_check_early_recovery(struct pt_regs *regs); extern void opal_shutdown(void); extern void opal_lpc_init(void); +struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, + unsigned long vmalloc_size); +void opal_free_sg_list(struct opal_sg_list *sg); + #endif /* __ASSEMBLY__ */ #endif /* __OPAL_H */ --- linux-3.13.0.orig/arch/powerpc/include/asm/paca.h +++ linux-3.13.0/arch/powerpc/include/asm/paca.h @@ -152,6 +152,15 @@ */ struct opal_machine_check_event *opal_mc_evt; #endif +#ifdef CONFIG_PPC_BOOK3S_64 + /* Exclusive emergency stack pointer for machine check exception. */ + void *mc_emergency_sp; + /* + * Flag to check whether we are in machine check early handler + * and already using emergency stack. + */ + u16 in_mce; +#endif /* Stuff for accurate time accounting */ u64 user_time; /* accumulated usermode TB ticks */ --- linux-3.13.0.orig/arch/powerpc/include/asm/page.h +++ linux-3.13.0/arch/powerpc/include/asm/page.h @@ -112,6 +112,8 @@ /* See Description below for VIRT_PHYS_OFFSET */ #ifdef CONFIG_RELOCATABLE_PPC32 #define VIRT_PHYS_OFFSET virt_phys_offset +#elif defined(CONFIG_PPC_BOOK3E_64) +#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START) #else #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START) #endif --- linux-3.13.0.orig/arch/powerpc/include/asm/perf_event_server.h +++ linux-3.13.0/arch/powerpc/include/asm/perf_event_server.h @@ -14,6 +14,7 @@ #include #include +/* Update perf_event_print_debug() if this changes */ #define MAX_HWEVENTS 8 #define MAX_EVENT_ALTERNATIVES 8 #define MAX_LIMITED_HWCOUNTERS 2 @@ -60,8 +61,7 @@ #define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ #define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ #define PPMU_HAS_SIER 0x00000040 /* Has SIER */ -#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ -#define PPMU_EBB 0x00000100 /* supports event based branch */ +#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ /* * Values for flags to get_alternatives() --- linux-3.13.0.orig/arch/powerpc/include/asm/pgtable-ppc64.h +++ linux-3.13.0/arch/powerpc/include/asm/pgtable-ppc64.h @@ -195,6 +195,7 @@ static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, + unsigned long set, int huge) { #ifdef PTE_ATOMIC_UPDATES @@ -205,14 +206,15 @@ andi. %1,%0,%6\n\ bne- 1b \n\ andc %1,%0,%4 \n\ + or %1,%1,%7\n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*ptep) - : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) : "cc" ); #else unsigned long old = pte_val(*ptep); - *ptep = __pte(old & ~clr); + *ptep = __pte((old & ~clr) | set); #endif /* huge pages use the old page table lock */ if (!huge) @@ -231,9 +233,9 @@ { unsigned long old; - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; - old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); return (old & _PAGE_ACCESSED) != 0; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -252,7 +254,7 @@ if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - pte_update(mm, addr, ptep, _PAGE_RW, 0); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); } static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, @@ -261,7 +263,7 @@ if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - pte_update(mm, addr, ptep, _PAGE_RW, 1); + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); } /* @@ -284,14 +286,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); return __pte(old); } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) { - pte_update(mm, addr, ptep, ~0UL, 0); + pte_update(mm, addr, ptep, ~0UL, 0, 0); } @@ -411,7 +413,7 @@ } extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp); + pmd_t *pmdp, unsigned long old_pmd); #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); @@ -506,7 +508,9 @@ extern unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, unsigned long clr); + pmd_t *pmdp, + unsigned long clr, + unsigned long set); static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) @@ -515,7 +519,7 @@ if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; - old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); return ((old & _PAGE_ACCESSED) != 0); } @@ -542,7 +546,7 @@ if ((pmd_val(*pmdp) & _PAGE_RW) == 0) return; - pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); } #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH @@ -558,5 +562,19 @@ #define __HAVE_ARCH_PMDP_INVALIDATE extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); + +#define pmd_move_must_withdraw pmd_move_must_withdraw +struct spinlock; +static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl) +{ + /* + * Archs like ppc64 use pgtable to store per pmd + * specific information. So when we switch the pmd, + * we should also withdraw and deposit the pgtable + */ + return true; +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ --- linux-3.13.0.orig/arch/powerpc/include/asm/ppc_asm.h +++ linux-3.13.0/arch/powerpc/include/asm/ppc_asm.h @@ -306,11 +306,16 @@ * ld rY,ADDROFF(name)(rX) */ #ifdef __powerpc64__ +#ifdef HAVE_AS_ATHIGH +#define __AS_ATHIGH high +#else +#define __AS_ATHIGH h +#endif #define LOAD_REG_IMMEDIATE(reg,expr) \ lis reg,(expr)@highest; \ ori reg,reg,(expr)@higher; \ rldicr reg,reg,32,31; \ - oris reg,reg,(expr)@h; \ + oris reg,reg,(expr)@__AS_ATHIGH; \ ori reg,reg,(expr)@l; #define LOAD_REG_ADDR(reg,name) \ --- linux-3.13.0.orig/arch/powerpc/include/asm/processor.h +++ linux-3.13.0/arch/powerpc/include/asm/processor.h @@ -373,6 +373,8 @@ extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); +extern void fp_enable(void); +extern void vec_enable(void); extern void load_fp_state(struct thread_fp_state *fp); extern void store_fp_state(struct thread_fp_state *fp); extern void load_vr_state(struct thread_vr_state *vr); --- linux-3.13.0.orig/arch/powerpc/include/asm/pte-hash64-64k.h +++ linux-3.13.0/arch/powerpc/include/asm/pte-hash64-64k.h @@ -46,11 +46,31 @@ * in order to deal with 64K made of 4K HW pages. Thus we override the * generic accessors and iterators here */ -#define __real_pte(e,p) ((real_pte_t) { \ - (e), (pte_val(e) & _PAGE_COMBO) ? \ - (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) -#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ - (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) +#define __real_pte __real_pte +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) +{ + real_pte_t rpte; + + rpte.pte = pte; + rpte.hidx = 0; + if (pte_val(pte) & _PAGE_COMBO) { + /* + * Make sure we order the hidx load against the _PAGE_COMBO + * check. The store side ordering is done in __hash_page_4K + */ + smp_rmb(); + rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); + } + return rpte; +} + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + if ((pte_val(rpte.pte) & _PAGE_COMBO)) + return (rpte.hidx >> (index<<2)) & 0xf; + return (pte_val(rpte.pte) >> 12) & 0xf; +} + #define __rpte_to_pte(r) ((r).pte) #define __rpte_sub_valid(rpte, index) \ (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) --- linux-3.13.0.orig/arch/powerpc/include/asm/ptrace.h +++ linux-3.13.0/arch/powerpc/include/asm/ptrace.h @@ -28,11 +28,23 @@ #ifdef __powerpc64__ +/* + * Size of redzone that userspace is allowed to use below the stack + * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in + * the new ELFv2 little-endian ABI, so we allow the larger amount. + * + * For kernel code we allow a 288-byte redzone, in order to conserve + * kernel stack space; gcc currently only uses 288 bytes, and will + * hopefully allow explicit control of the redzone size in future. + */ +#define USER_REDZONE_SIZE 512 +#define KERNEL_REDZONE_SIZE 288 + #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ - STACK_FRAME_OVERHEAD + 288) + STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) #define STACK_FRAME_MARKER 12 /* Size of dummy stack frame allocated when calling signal handler. */ @@ -41,6 +53,8 @@ #else /* __powerpc64__ */ +#define USER_REDZONE_SIZE 0 +#define KERNEL_REDZONE_SIZE 0 #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) --- linux-3.13.0.orig/arch/powerpc/include/asm/reg.h +++ linux-3.13.0/arch/powerpc/include/asm/reg.h @@ -118,8 +118,10 @@ #define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) #ifdef __BIG_ENDIAN__ #define MSR_ __MSR +#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV) #else #define MSR_ (__MSR | MSR_LE) +#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV | MSR_LE) #endif #define MSR_KERNEL (MSR_ | MSR_64BIT) #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) @@ -213,6 +215,7 @@ #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ #define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ #define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */ +#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */ #define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */ #define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ #define SPRN_CTRLF 0x088 @@ -260,6 +263,10 @@ #define SPRN_HRMOR 0x139 /* Real mode offset register */ #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ +#define SPRN_PMICR 0x354 /* Power Management Idle Control Reg */ +#define SPRN_PMSR 0x355 /* Power Management Status Reg */ +#define SPRN_PMCR 0x374 /* Power Management Control Register */ + /* HFSCR and FSCR bit numbers are the same */ #define FSCR_TAR_LG 8 /* Enable Target Address Register */ #define FSCR_EBB_LG 7 /* Enable Event Based Branching */ @@ -643,12 +650,14 @@ #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */ #define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */ #define MMCR0_TBEE 0x00400000UL /* time base exception enable */ +#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */ #define MMCR0_EBE 0x00100000UL /* Event based branch enable */ #define MMCR0_PMCC 0x000c0000UL /* PMC control */ #define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ #define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/ #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */ +#define MMCR0_PMAO_SYNC 0x00000800UL /* PMU interrupt is synchronous */ #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */ #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */ #define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */ @@ -682,6 +691,7 @@ #define SPRN_EBBHR 804 /* Event based branch handler register */ #define SPRN_EBBRR 805 /* Event based branch return register */ #define SPRN_BESCR 806 /* Branch event status and control register */ +#define BESCR_GE 0x8000000000000000ULL /* Global Enable */ #define SPRN_PMC1 787 #define SPRN_PMC2 788 --- linux-3.13.0.orig/arch/powerpc/include/asm/rtas.h +++ linux-3.13.0/arch/powerpc/include/asm/rtas.h @@ -150,19 +150,53 @@ #define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500 struct rtas_error_log { - unsigned long version:8; /* Architectural version */ - unsigned long severity:3; /* Severity level of error */ - unsigned long disposition:2; /* Degree of recovery */ - unsigned long extended:1; /* extended log present? */ - unsigned long /* reserved */ :2; /* Reserved for future use */ - unsigned long initiator:4; /* Initiator of event */ - unsigned long target:4; /* Target of failed operation */ - unsigned long type:8; /* General event or error*/ - unsigned long extended_log_length:32; /* length in bytes */ - unsigned char buffer[1]; /* Start of extended log */ + /* Byte 0 */ + uint8_t byte0; /* Architectural version */ + + /* Byte 1 */ + uint8_t byte1; + /* XXXXXXXX + * XXX 3: Severity level of error + * XX 2: Degree of recovery + * X 1: Extended log present? + * XX 2: Reserved + */ + + /* Byte 2 */ + uint8_t byte2; + /* XXXXXXXX + * XXXX 4: Initiator of event + * XXXX 4: Target of failed operation + */ + uint8_t byte3; /* General event or error*/ + __be32 extended_log_length; /* length in bytes */ + unsigned char buffer[1]; /* Start of extended log */ /* Variable length. */ }; +static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog) +{ + return (elog->byte1 & 0xE0) >> 5; +} + +static inline uint8_t rtas_error_disposition(const struct rtas_error_log *elog) +{ + return (elog->byte1 & 0x18) >> 3; +} + +static inline uint8_t rtas_error_extended(const struct rtas_error_log *elog) +{ + return (elog->byte1 & 0x04) >> 2; +} + +#define rtas_error_type(x) ((x)->byte3) + +static inline +uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog) +{ + return be32_to_cpu(elog->extended_log_length); +} + #define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG 14 #define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8)) @@ -172,32 +206,35 @@ */ struct rtas_ext_event_log_v6 { /* Byte 0 */ - uint32_t log_valid:1; /* 1:Log valid */ - uint32_t unrecoverable_error:1; /* 1:Unrecoverable error */ - uint32_t recoverable_error:1; /* 1:recoverable (correctable */ - /* or successfully retried) */ - uint32_t degraded_operation:1; /* 1:Unrecoverable err, bypassed*/ - /* - degraded operation (e.g. */ - /* CPU or mem taken off-line) */ - uint32_t predictive_error:1; - uint32_t new_log:1; /* 1:"New" log (Always 1 for */ - /* data returned from RTAS */ - uint32_t big_endian:1; /* 1: Big endian */ - uint32_t :1; /* reserved */ + uint8_t byte0; + /* XXXXXXXX + * X 1: Log valid + * X 1: Unrecoverable error + * X 1: Recoverable (correctable or successfully retried) + * X 1: Bypassed unrecoverable error (degraded operation) + * X 1: Predictive error + * X 1: "New" log (always 1 for data returned from RTAS) + * X 1: Big Endian + * X 1: Reserved + */ + /* Byte 1 */ - uint32_t :8; /* reserved */ + uint8_t byte1; /* reserved */ + /* Byte 2 */ - uint32_t powerpc_format:1; /* Set to 1 (indicating log is */ - /* in PowerPC format */ - uint32_t :3; /* reserved */ - uint32_t log_format:4; /* Log format indicator. Define */ - /* format used for byte 12-2047 */ + uint8_t byte2; + /* XXXXXXXX + * X 1: Set to 1 (indicating log is in PowerPC format) + * XXX 3: Reserved + * XXXX 4: Log format used for bytes 12-2047 + */ + /* Byte 3 */ - uint32_t :8; /* reserved */ + uint8_t byte3; /* reserved */ /* Byte 4-11 */ uint8_t reserved[8]; /* reserved */ /* Byte 12-15 */ - uint32_t company_id; /* Company ID of the company */ + __be32 company_id; /* Company ID of the company */ /* that defines the format for */ /* the vendor specific log type */ /* Byte 16-end of log */ @@ -205,6 +242,18 @@ /* Variable length. */ }; +static +inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log) +{ + return ext_log->byte2 & 0x0F; +} + +static +inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log) +{ + return be32_to_cpu(ext_log->company_id); +} + /* pSeries event log format */ /* Two bytes ASCII section IDs */ @@ -227,14 +276,26 @@ /* Vendor specific Platform Event Log Format, Version 6, section header */ struct pseries_errorlog { - uint16_t id; /* 0x00 2-byte ASCII section ID */ - uint16_t length; /* 0x02 Section length in bytes */ + __be16 id; /* 0x00 2-byte ASCII section ID */ + __be16 length; /* 0x02 Section length in bytes */ uint8_t version; /* 0x04 Section version */ uint8_t subtype; /* 0x05 Section subtype */ - uint16_t creator_component; /* 0x06 Creator component ID */ + __be16 creator_component; /* 0x06 Creator component ID */ uint8_t data[]; /* 0x08 Start of section data */ }; +static +inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect) +{ + return be16_to_cpu(sect->id); +} + +static +inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect) +{ + return be16_to_cpu(sect->length); +} + struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, uint16_t section_id); --- linux-3.13.0.orig/arch/powerpc/include/asm/sections.h +++ linux-3.13.0/arch/powerpc/include/asm/sections.h @@ -8,6 +8,7 @@ #ifdef __powerpc64__ +extern char __start_interrupts[]; extern char __end_interrupts[]; extern char __prom_init_toc_start[]; @@ -21,6 +22,17 @@ return 0; } +static inline int overlaps_interrupt_vector_text(unsigned long start, + unsigned long end) +{ + unsigned long real_start, real_end; + real_start = __start_interrupts - _stext; + real_end = __end_interrupts - _stext; + + return start < (unsigned long)__va(real_end) && + (unsigned long)__va(real_start) < end; +} + static inline int overlaps_kernel_text(unsigned long start, unsigned long end) { return start < (unsigned long)__init_end && --- linux-3.13.0.orig/arch/powerpc/include/asm/smp.h +++ linux-3.13.0/arch/powerpc/include/asm/smp.h @@ -207,6 +207,7 @@ extern unsigned long __secondary_hold_spinloop; extern unsigned long __secondary_hold_acknowledge; extern char __secondary_hold; +extern unsigned long __run_at_kexec; extern void __early_start(void); #endif /* __ASSEMBLY__ */ --- linux-3.13.0.orig/arch/powerpc/include/asm/switch_to.h +++ linux-3.13.0/arch/powerpc/include/asm/switch_to.h @@ -16,13 +16,15 @@ extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); #ifdef CONFIG_PPC_BOOK3S_64 -static inline void save_tar(struct thread_struct *prev) +static inline void save_early_sprs(struct thread_struct *prev) { if (cpu_has_feature(CPU_FTR_ARCH_207S)) prev->tar = mfspr(SPRN_TAR); + if (cpu_has_feature(CPU_FTR_DSCR)) + prev->dscr = mfspr(SPRN_DSCR); } #else -static inline void save_tar(struct thread_struct *prev) {} +static inline void save_early_sprs(struct thread_struct *prev) {} #endif extern void load_up_fpu(void); @@ -86,6 +88,8 @@ { #ifdef CONFIG_PPC_BOOK3S_64 /* EBB perf events are not inherited, so clear all EBB state. */ + t->thread.ebbrr = 0; + t->thread.ebbhr = 0; t->thread.bescr = 0; t->thread.mmcr2 = 0; t->thread.mmcr0 = 0; --- linux-3.13.0.orig/arch/powerpc/include/asm/systbl.h +++ linux-3.13.0/arch/powerpc/include/asm/systbl.h @@ -190,7 +190,7 @@ SYSCALL_SPU(capget) SYSCALL_SPU(capset) COMPAT_SYS(sigaltstack) -COMPAT_SYS_SPU(sendfile) +SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile) SYSCALL(ni_syscall) SYSCALL(ni_syscall) PPC_SYS(vfork) --- linux-3.13.0.orig/arch/powerpc/include/asm/thread_info.h +++ linux-3.13.0/arch/powerpc/include/asm/thread_info.h @@ -91,8 +91,7 @@ #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_32BIT 4 /* 32 bit binary */ -#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */ -#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ +#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ #define TIF_NOHZ 9 /* in adaptive nohz mode */ @@ -115,8 +114,7 @@ #define _TIF_NEED_RESCHED (1< -static inline int cpu_to_node(int cpu) -{ - return numa_cpu_lookup_table[cpu]; -} - #define parent_node(node) (node) #define cpumask_of_node(node) ((node) == -1 ? \ --- linux-3.13.0.orig/arch/powerpc/include/uapi/asm/cputable.h +++ linux-3.13.0/arch/powerpc/include/uapi/asm/cputable.h @@ -41,5 +41,6 @@ #define PPC_FEATURE2_EBB 0x10000000 #define PPC_FEATURE2_ISEL 0x08000000 #define PPC_FEATURE2_TAR 0x04000000 +#define PPC_FEATURE2_VEC_CRYPTO 0x02000000 #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */ --- linux-3.13.0.orig/arch/powerpc/kernel/Makefile +++ linux-3.13.0/arch/powerpc/kernel/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o +obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj64-$(CONFIG_RELOCATABLE) += reloc_64.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o --- linux-3.13.0.orig/arch/powerpc/kernel/asm-offsets.c +++ linux-3.13.0/arch/powerpc/kernel/asm-offsets.c @@ -232,6 +232,10 @@ DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); #endif /* CONFIG_PPC_STD_MMU_64 */ DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); +#ifdef CONFIG_PPC_BOOK3S_64 + DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); + DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); +#endif DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); @@ -484,6 +488,7 @@ DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); + DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); #endif #ifdef CONFIG_PPC_BOOK3S DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); --- linux-3.13.0.orig/arch/powerpc/kernel/cacheinfo.c +++ linux-3.13.0/arch/powerpc/kernel/cacheinfo.c @@ -794,6 +794,9 @@ { remove_index_dirs(cache_dir); + /* Remove cache dir from sysfs */ + kobject_del(cache_dir->kobj); + kobject_put(cache_dir->kobj); kfree(cache_dir); --- linux-3.13.0.orig/arch/powerpc/kernel/cpu_setup_power.S +++ linux-3.13.0/arch/powerpc/kernel/cpu_setup_power.S @@ -29,7 +29,7 @@ mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR bl __init_LPCR - bl __init_TLB + bl __init_tlb_power7 mtlr r11 blr @@ -42,7 +42,7 @@ mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR bl __init_LPCR - bl __init_TLB + bl __init_tlb_power7 mtlr r11 blr @@ -59,7 +59,7 @@ oris r3, r3, LPCR_AIL_3@h bl __init_LPCR bl __init_HFSCR - bl __init_TLB + bl __init_tlb_power8 bl __init_PMU_HV mtlr r11 blr @@ -78,7 +78,7 @@ oris r3, r3, LPCR_AIL_3@h bl __init_LPCR bl __init_HFSCR - bl __init_TLB + bl __init_tlb_power8 bl __init_PMU_HV mtlr r11 blr @@ -134,15 +134,31 @@ mtspr SPRN_HFSCR,r3 blr -__init_TLB: - /* - * Clear the TLB using the "IS 3" form of tlbiel instruction - * (invalidate by congruence class). P7 has 128 CCs, P8 has 512 - * so we just always do 512 - */ +/* + * Clear the TLB using the specified IS form of tlbiel instruction + * (invalidate by congruence class). P7 has 128 CCs., P8 has 512. + * + * r3 = IS field + */ +__init_tlb_power7: + li r3,0xc00 /* IS field = 0b11 */ +_GLOBAL(__flush_tlb_power7) + li r6,128 + mtctr r6 + mr r7,r3 /* IS field */ + ptesync +2: tlbiel r7 + addi r7,r7,0x1000 + bdnz 2b + ptesync +1: blr + +__init_tlb_power8: + li r3,0xc00 /* IS field = 0b11 */ +_GLOBAL(__flush_tlb_power8) li r6,512 mtctr r6 - li r7,0xc00 /* IS field = 0b11 */ + mr r7,r3 /* IS field */ ptesync 2: tlbiel r7 addi r7,r7,0x1000 --- linux-3.13.0.orig/arch/powerpc/kernel/cputable.c +++ linux-3.13.0/arch/powerpc/kernel/cputable.c @@ -71,6 +71,10 @@ extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_power8(void); extern void __restore_cpu_a2(void); +extern void __flush_tlb_power7(unsigned long inval_selector); +extern void __flush_tlb_power8(unsigned long inval_selector); +extern long __machine_check_early_realmode_p7(struct pt_regs *regs); +extern long __machine_check_early_realmode_p8(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); @@ -105,7 +109,8 @@ PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \ PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \ - PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR) + PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ + PPC_FEATURE2_VEC_CRYPTO) #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_HAS_ALTIVEC_COMP) @@ -440,6 +445,8 @@ .oprofile_cpu_type = "ppc64/ibm-compat-v1", .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, + .flush_tlb = __flush_tlb_power7, + .machine_check_early = __machine_check_early_realmode_p7, .platform = "power7", }, { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ @@ -456,6 +463,8 @@ .oprofile_cpu_type = "ppc64/ibm-compat-v1", .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, + .flush_tlb = __flush_tlb_power8, + .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, { /* Power7 */ @@ -474,6 +483,8 @@ .oprofile_type = PPC_OPROFILE_POWER4, .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, + .flush_tlb = __flush_tlb_power7, + .machine_check_early = __machine_check_early_realmode_p7, .platform = "power7", }, { /* Power7+ */ @@ -492,13 +503,15 @@ .oprofile_type = PPC_OPROFILE_POWER4, .cpu_setup = __setup_cpu_power7, .cpu_restore = __restore_cpu_power7, + .flush_tlb = __flush_tlb_power7, + .machine_check_early = __machine_check_early_realmode_p7, .platform = "power7+", }, { /* Power8E */ .pvr_mask = 0xffff0000, .pvr_value = 0x004b0000, .cpu_name = "POWER8E (raw)", - .cpu_features = CPU_FTRS_POWER8, + .cpu_features = CPU_FTRS_POWER8E, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, @@ -510,6 +523,8 @@ .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, + .flush_tlb = __flush_tlb_power8, + .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, { /* Power8 */ @@ -528,6 +543,8 @@ .oprofile_type = PPC_OPROFILE_INVALID, .cpu_setup = __setup_cpu_power8, .cpu_restore = __restore_cpu_power8, + .flush_tlb = __flush_tlb_power8, + .machine_check_early = __machine_check_early_realmode_p8, .platform = "power8", }, { /* Cell Broadband Engine */ --- linux-3.13.0.orig/arch/powerpc/kernel/crash_dump.c +++ linux-3.13.0/arch/powerpc/kernel/crash_dump.c @@ -98,17 +98,19 @@ size_t csize, unsigned long offset, int userbuf) { void *vaddr; + phys_addr_t paddr; if (!csize) return 0; csize = min_t(size_t, csize, PAGE_SIZE); + paddr = pfn << PAGE_SHIFT; - if ((min_low_pfn < pfn) && (pfn < max_pfn)) { - vaddr = __va(pfn << PAGE_SHIFT); + if (memblock_is_region_memory(paddr, csize)) { + vaddr = __va(paddr); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); } else { - vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); + vaddr = __ioremap(paddr, PAGE_SIZE, 0); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); iounmap(vaddr); } --- linux-3.13.0.orig/arch/powerpc/kernel/dma.c +++ linux-3.13.0/arch/powerpc/kernel/dma.c @@ -191,12 +191,10 @@ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) -int dma_set_mask(struct device *dev, u64 dma_mask) +int __dma_set_mask(struct device *dev, u64 dma_mask) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - if (ppc_md.dma_set_mask) - return ppc_md.dma_set_mask(dev, dma_mask); if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); if (!dev->dma_mask || !dma_supported(dev, dma_mask)) @@ -204,6 +202,12 @@ *dev->dma_mask = dma_mask; return 0; } +int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (ppc_md.dma_set_mask) + return ppc_md.dma_set_mask(dev, dma_mask); + return __dma_set_mask(dev, dma_mask); +} EXPORT_SYMBOL(dma_set_mask); u64 dma_get_required_mask(struct device *dev) --- linux-3.13.0.orig/arch/powerpc/kernel/eeh_driver.c +++ linux-3.13.0/arch/powerpc/kernel/eeh_driver.c @@ -626,84 +626,90 @@ { struct eeh_pe *pe, *phb_pe; struct pci_bus *bus; - struct pci_controller *hose, *tmp; + struct pci_controller *hose; unsigned long flags; - int rc = 0; + int rc; - /* - * The return value from next_error() has been classified as follows. - * It might be good to enumerate them. However, next_error() is only - * supported by PowerNV platform for now. So it would be fine to use - * integer directly: - * - * 4 - Dead IOC 3 - Dead PHB - * 2 - Fenced PHB 1 - Frozen PE - * 0 - No error found - * - */ - rc = eeh_ops->next_error(&pe); - if (rc <= 0) - return; - - switch (rc) { - case 4: - /* Mark all PHBs in dead state */ - eeh_serialize_lock(&flags); - list_for_each_entry_safe(hose, tmp, - &hose_list, list_node) { - phb_pe = eeh_phb_pe_get(hose); - if (!phb_pe) continue; - eeh_pe_state_mark(phb_pe, - EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); + do { + rc = eeh_ops->next_error(&pe); + + switch (rc) { + case EEH_NEXT_ERR_DEAD_IOC: + /* Mark all PHBs in dead state */ + eeh_serialize_lock(&flags); + + /* Purge all events */ + eeh_remove_event(NULL); + + list_for_each_entry(hose, &hose_list, list_node) { + phb_pe = eeh_phb_pe_get(hose); + if (!phb_pe) continue; + + eeh_pe_state_mark(phb_pe, + EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); + } + + eeh_serialize_unlock(flags); + + break; + case EEH_NEXT_ERR_FROZEN_PE: + case EEH_NEXT_ERR_FENCED_PHB: + case EEH_NEXT_ERR_DEAD_PHB: + /* Mark the PE in fenced state */ + eeh_serialize_lock(&flags); + + /* Purge all events of the PHB */ + eeh_remove_event(pe); + + if (rc == EEH_NEXT_ERR_DEAD_PHB) + eeh_pe_state_mark(pe, + EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); + else + eeh_pe_state_mark(pe, + EEH_PE_ISOLATED | EEH_PE_RECOVERING); + + eeh_serialize_unlock(flags); + + break; + case EEH_NEXT_ERR_NONE: + return; + default: + pr_warn("%s: Invalid value %d from next_error()\n", + __func__, rc); + return; } - eeh_serialize_unlock(flags); - /* Purge all events */ - eeh_remove_event(NULL); - break; - case 3: - case 2: - case 1: - /* Mark the PE in fenced state */ - eeh_serialize_lock(&flags); - if (rc == 3) - eeh_pe_state_mark(pe, - EEH_PE_ISOLATED | EEH_PE_PHB_DEAD); - else - eeh_pe_state_mark(pe, - EEH_PE_ISOLATED | EEH_PE_RECOVERING); - eeh_serialize_unlock(flags); - - /* Purge all events of the PHB */ - eeh_remove_event(pe); - break; - default: - pr_err("%s: Invalid value %d from next_error()\n", - __func__, rc); - return; - } - - /* - * For fenced PHB and frozen PE, it's handled as normal - * event. We have to remove the affected PHBs for dead - * PHB and IOC - */ - if (rc == 2 || rc == 1) - eeh_handle_normal_event(pe); - else { - list_for_each_entry_safe(hose, tmp, - &hose_list, list_node) { - phb_pe = eeh_phb_pe_get(hose); - if (!phb_pe || !(phb_pe->state & EEH_PE_PHB_DEAD)) - continue; - - bus = eeh_pe_bus_get(phb_pe); - /* Notify all devices that they're about to go down. */ - eeh_pe_dev_traverse(pe, eeh_report_failure, NULL); - pcibios_remove_pci_devices(bus); + /* + * For fenced PHB and frozen PE, it's handled as normal + * event. We have to remove the affected PHBs for dead + * PHB and IOC + */ + if (rc == EEH_NEXT_ERR_FROZEN_PE || + rc == EEH_NEXT_ERR_FENCED_PHB) { + eeh_handle_normal_event(pe); + } else { + list_for_each_entry(hose, &hose_list, list_node) { + phb_pe = eeh_phb_pe_get(hose); + if (!phb_pe || + !(phb_pe->state & EEH_PE_PHB_DEAD)) + continue; + + /* Notify all devices to be down */ + bus = eeh_pe_bus_get(phb_pe); + eeh_pe_dev_traverse(pe, + eeh_report_failure, NULL); + pcibios_remove_pci_devices(bus); + } } - } + + /* + * If we have detected dead IOC, we needn't proceed + * any more since all PHBs would have been removed + */ + if (rc == EEH_NEXT_ERR_DEAD_IOC) + break; + } while (rc != EEH_NEXT_ERR_NONE); } /** --- linux-3.13.0.orig/arch/powerpc/kernel/entry_64.S +++ linux-3.13.0/arch/powerpc/kernel/entry_64.S @@ -432,12 +432,6 @@ std r24,THREAD_VRSAVE(r3) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_PPC64 -BEGIN_FTR_SECTION - mfspr r25,SPRN_DSCR - std r25,THREAD_DSCR(r3) -END_FTR_SECTION_IFSET(CPU_FTR_DSCR) -#endif and. r0,r0,r22 beq+ 1f andc r22,r22,r0 @@ -664,9 +658,23 @@ bl .restore_interrupts SCHEDULE_USER b .ret_from_except_lite - -2: bl .save_nvgprs +2: +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM + bne 3f /* only restore TM if nothing else to do */ + addi r3,r1,STACK_FRAME_OVERHEAD + bl .restore_tm_state + b restore +3: +#endif + bl .save_nvgprs + /* + * Use a non volatile GPR to save and restore our thread_info flags + * across the call to restore_interrupts. + */ + mr r30,r4 bl .restore_interrupts + mr r4,r30 addi r3,r1,STACK_FRAME_OVERHEAD bl .do_notify_resume b .ret_from_except --- linux-3.13.0.orig/arch/powerpc/kernel/exceptions-64e.S +++ linux-3.13.0/arch/powerpc/kernel/exceptions-64e.S @@ -309,8 +309,8 @@ EXCEPTION_STUB(0x300, hypercall) EXCEPTION_STUB(0x320, ehpriv) - .globl interrupt_end_book3e -interrupt_end_book3e: + .globl __end_interrupts +__end_interrupts: /* Critical Input Interrupt */ START_EXCEPTION(critical_input); @@ -494,7 +494,7 @@ beq+ 1f LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) - LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) + LOAD_REG_IMMEDIATE(r15,__end_interrupts) cmpld cr0,r10,r14 cmpld cr1,r10,r15 blt+ cr0,1f @@ -560,7 +560,7 @@ beq+ 1f LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) - LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) + LOAD_REG_IMMEDIATE(r15,__end_interrupts) cmpld cr0,r10,r14 cmpld cr1,r10,r15 blt+ cr0,1f @@ -1099,7 +1099,15 @@ * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping */ /* Now we branch the new virtual address mapped by this entry */ +#ifdef CONFIG_RELOCATABLE + /* We have to find out address from lr. */ + bl 1f /* Find our address */ +1: mflr r6 + addi r6,r6,(2f - 1b) + tovirt(r6,r6) +#else LOAD_REG_IMMEDIATE(r6,2f) +#endif lis r7,MSR_KERNEL@h ori r7,r7,MSR_KERNEL@l mtspr SPRN_SRR0,r6 @@ -1350,9 +1358,17 @@ mflr r28 b 3b -_STATIC(init_core_book3e) +_GLOBAL(init_core_book3e) /* Establish the interrupt vector base */ +#ifdef CONFIG_RELOCATABLE +/* In relocatable case the value of the constant expression 'expr' is only + * offset. So instead, we should loads the address of label 'name'. + */ + tovirt(r2,r2) + LOAD_REG_ADDR(r3, interrupt_base_book3e) +#else LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) +#endif mtspr SPRN_IVPR,r3 sync blr --- linux-3.13.0.orig/arch/powerpc/kernel/exceptions-64s.S +++ linux-3.13.0/arch/powerpc/kernel/exceptions-64s.S @@ -155,8 +155,35 @@ */ HMT_MEDIUM_PPR_DISCARD SET_SCRATCH0(r13) /* save r13 */ +#ifdef CONFIG_PPC_P7_NAP +BEGIN_FTR_SECTION + /* Running native on arch 2.06 or later, check if we are + * waking up from nap. We only handle no state loss and + * supervisor state loss. We do -not- handle hypervisor + * state loss at this time. + */ + mfspr r13,SPRN_SRR1 + rlwinm. r13,r13,47-31,30,31 + OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) + beq 9f + + mfspr r13,SPRN_SRR1 + rlwinm. r13,r13,47-31,30,31 + /* waking up from powersave (nap) state */ + cmpwi cr1,r13,2 + /* Total loss of HV state is fatal. let's just stay stuck here */ + OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) + bgt cr1,. +9: + OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) +#endif /* CONFIG_PPC_P7_NAP */ EXCEPTION_PROLOG_0(PACA_EXMC) +BEGIN_FTR_SECTION + b machine_check_pSeries_early +FTR_SECTION_ELSE b machine_check_pSeries_0 +ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE) . = 0x300 .globl data_access_pSeries @@ -405,6 +432,64 @@ .align 7 /* moved from 0x200 */ +machine_check_pSeries_early: +BEGIN_FTR_SECTION + EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200) + /* + * Register contents: + * R13 = PACA + * R9 = CR + * Original R9 to R13 is saved on PACA_EXMC + * + * Switch to mc_emergency stack and handle re-entrancy (though we + * currently don't test for overflow). Save MCE registers srr1, + * srr0, dar and dsisr and then set ME=1 + * + * We use paca->in_mce to check whether this is the first entry or + * nested machine check. We increment paca->in_mce to track nested + * machine checks. + * + * If this is the first entry then set stack pointer to + * paca->mc_emergency_sp, otherwise r1 is already pointing to + * stack frame on mc_emergency stack. + * + * NOTE: We are here with MSR_ME=0 (off), which means we risk a + * checkstop if we get another machine check exception before we do + * rfid with MSR_ME=1. + */ + mr r11,r1 /* Save r1 */ + lhz r10,PACA_IN_MCE(r13) + cmpwi r10,0 /* Are we in nested machine check */ + bne 0f /* Yes, we are. */ + /* First machine check entry */ + ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */ +0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ + addi r10,r10,1 /* increment paca->in_mce */ + sth r10,PACA_IN_MCE(r13) + std r11,GPR1(r1) /* Save r1 on the stack. */ + std r11,0(r1) /* make stack chain pointer */ + mfspr r11,SPRN_SRR0 /* Save SRR0 */ + std r11,_NIP(r1) + mfspr r11,SPRN_SRR1 /* Save SRR1 */ + std r11,_MSR(r1) + mfspr r11,SPRN_DAR /* Save DAR */ + std r11,_DAR(r1) + mfspr r11,SPRN_DSISR /* Save DSISR */ + std r11,_DSISR(r1) + std r9,_CCR(r1) /* Save CR in stackframe */ + /* Save r9 through r13 from EXMC save area to stack frame. */ + EXCEPTION_PROLOG_COMMON_2(PACA_EXMC) + mfmsr r11 /* get MSR value */ + ori r11,r11,MSR_ME /* turn on ME bit */ + ori r11,r11,MSR_RI /* turn on RI bit */ + ld r12,PACAKBASE(r13) /* get high part of &label */ + LOAD_HANDLER(r12, machine_check_handle_early) + mtspr SPRN_SRR0,r12 + mtspr SPRN_SRR1,r11 + rfid + b . /* prevent speculative execution */ +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) + machine_check_pSeries: .globl machine_check_fwnmi machine_check_fwnmi: @@ -688,30 +773,6 @@ STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) - /* - * Machine check is different because we use a different - * save area: PACA_EXMC instead of PACA_EXGEN. - */ - .align 7 - .globl machine_check_common -machine_check_common: - - mfspr r10,SPRN_DAR - std r10,PACA_EXGEN+EX_DAR(r13) - mfspr r10,SPRN_DSISR - stw r10,PACA_EXGEN+EX_DSISR(r13) - EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) - FINISH_NAP - DISABLE_INTS - ld r3,PACA_EXGEN+EX_DAR(r13) - lwz r4,PACA_EXGEN+EX_DSISR(r13) - std r3,_DAR(r1) - std r4,_DSISR(r1) - bl .save_nvgprs - addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_exception - b .ret_from_except - STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) @@ -1080,6 +1141,30 @@ #endif /* __DISABLED__ */ + /* + * Machine check is different because we use a different + * save area: PACA_EXMC instead of PACA_EXGEN. + */ + .align 7 + .globl machine_check_common +machine_check_common: + + mfspr r10,SPRN_DAR + std r10,PACA_EXGEN+EX_DAR(r13) + mfspr r10,SPRN_DSISR + stw r10,PACA_EXGEN+EX_DSISR(r13) + EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) + FINISH_NAP + DISABLE_INTS + ld r3,PACA_EXGEN+EX_DAR(r13) + lwz r4,PACA_EXGEN+EX_DSISR(r13) + std r3,_DAR(r1) + std r4,_DSISR(r1) + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + bl .machine_check_exception + b .ret_from_except + .align 7 .globl alignment_common alignment_common: @@ -1263,6 +1348,120 @@ #endif /* CONFIG_PPC_POWERNV */ +#define MACHINE_CHECK_HANDLER_WINDUP \ + /* Clear MSR_RI before setting SRR0 and SRR1. */\ + li r0,MSR_RI; \ + mfmsr r9; /* get MSR value */ \ + andc r9,r9,r0; \ + mtmsrd r9,1; /* Clear MSR_RI */ \ + /* Move original SRR0 and SRR1 into the respective regs */ \ + ld r9,_MSR(r1); \ + mtspr SPRN_SRR1,r9; \ + ld r3,_NIP(r1); \ + mtspr SPRN_SRR0,r3; \ + ld r9,_CTR(r1); \ + mtctr r9; \ + ld r9,_XER(r1); \ + mtxer r9; \ + ld r9,_LINK(r1); \ + mtlr r9; \ + REST_GPR(0, r1); \ + REST_8GPRS(2, r1); \ + REST_GPR(10, r1); \ + ld r11,_CCR(r1); \ + mtcr r11; \ + /* Decrement paca->in_mce. */ \ + lhz r12,PACA_IN_MCE(r13); \ + subi r12,r12,1; \ + sth r12,PACA_IN_MCE(r13); \ + REST_GPR(11, r1); \ + REST_2GPRS(12, r1); \ + /* restore original r1. */ \ + ld r1,GPR1(r1) + + /* + * Handle machine check early in real mode. We come here with + * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack. + */ + .align 7 + .globl machine_check_handle_early +machine_check_handle_early: + std r0,GPR0(r1) /* Save r0 */ + EXCEPTION_PROLOG_COMMON_3(0x200) + bl .save_nvgprs + addi r3,r1,STACK_FRAME_OVERHEAD + bl .machine_check_early + ld r12,_MSR(r1) +#ifdef CONFIG_PPC_P7_NAP + /* + * Check if thread was in power saving mode. We come here when any + * of the following is true: + * a. thread wasn't in power saving mode + * b. thread was in power saving mode with no state loss or + * supervisor state loss + * + * Go back to nap again if (b) is true. + */ + rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */ + beq 4f /* No, it wasn;t */ + /* Thread was in power saving mode. Go back to nap again. */ + cmpwi r11,2 + bne 3f + /* Supervisor state loss */ + li r0,1 + stb r0,PACA_NAPSTATELOST(r13) +3: bl .machine_check_queue_event + MACHINE_CHECK_HANDLER_WINDUP + GET_PACA(r13) + ld r1,PACAR1(r13) + b .power7_enter_nap_mode +4: +#endif + /* + * Check if we are coming from hypervisor userspace. If yes then we + * continue in host kernel in V mode to deliver the MC event. + */ + rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */ + beq 5f + andi. r11,r12,MSR_PR /* See if coming from user. */ + bne 9f /* continue in V mode if we are. */ + +5: +#ifdef CONFIG_KVM_BOOK3S_64_HV + /* + * We are coming from kernel context. Check if we are coming from + * guest. if yes, then we can continue. We will fall through + * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest. + */ + lbz r11,HSTATE_IN_GUEST(r13) + cmpwi r11,0 /* Check if coming from guest */ + bne 9f /* continue if we are. */ +#endif + /* + * At this point we are not sure about what context we come from. + * Queue up the MCE event and return from the interrupt. + * But before that, check if this is an un-recoverable exception. + * If yes, then stay on emergency stack and panic. + */ + andi. r11,r12,MSR_RI + bne 2f +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b +2: + /* + * Return from MC interrupt. + * Queue up the MCE event so that we can log it later, while + * returning from kernel or opal call. + */ + bl .machine_check_queue_event + MACHINE_CHECK_HANDLER_WINDUP + rfid +9: + /* Deliver the machine check to host kernel in V mode. */ + MACHINE_CHECK_HANDLER_WINDUP + b machine_check_pSeries + /* * r13 points to the PACA, r9 contains the saved CR, * r12 contain the saved SRR1, SRR0 is still ready for return --- linux-3.13.0.orig/arch/powerpc/kernel/fpu.S +++ linux-3.13.0/arch/powerpc/kernel/fpu.S @@ -81,6 +81,22 @@ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* + * Enable use of the FPU, and VSX if possible, for the caller. + */ +_GLOBAL(fp_enable) + mfmsr r3 + ori r3,r3,MSR_FP +#ifdef CONFIG_VSX +BEGIN_FTR_SECTION + oris r3,r3,MSR_VSX@h +END_FTR_SECTION_IFSET(CPU_FTR_VSX) +#endif + SYNC + MTMSRD(r3) + isync /* (not necessary for arch 2.02 and later) */ + blr + +/* * Load state from memory into FP registers including FPSCR. * Assumes the caller has enabled FP in the MSR. */ --- linux-3.13.0.orig/arch/powerpc/kernel/head_64.S +++ linux-3.13.0/arch/powerpc/kernel/head_64.S @@ -91,6 +91,10 @@ __secondary_hold_acknowledge: .llong 0x0 + .globl __run_at_kexec +__run_at_kexec: + .llong 0x0 /* Flag for the secondary kernel from kexec. */ + #ifdef CONFIG_RELOCATABLE /* This flag is set to 1 by a loader if the kernel should run * at the loaded address instead of the linked address. This @@ -126,6 +130,10 @@ /* Grab our physical cpu number */ mr r24,r3 /* stash r4 for book3e */ +#ifdef CONFIG_PPC_FSL_BOOK3E + /* we need to setup initial TLB entry. */ + li r4,0 +#endif mr r25,r4 /* Tell the master cpu we're here */ @@ -418,12 +426,31 @@ /* process relocations for the final address of the kernel */ lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ sldi r25,r25,32 +#if defined(CONFIG_PPC_BOOK3E) + tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ +#endif +#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP) + /* If relocated we need to restore this flag on that relocated address. */ + ld r7,__run_at_kexec-_stext(r26) + std r7,__run_at_kexec-_stext(r26) +#endif + lwz r7,__run_at_load-_stext(r26) +#if defined(CONFIG_PPC_BOOK3E) + tophys(r26,r26) /* Restore for the remains. */ +#endif cmplwi cr0,r7,1 /* flagged to stay where we are ? */ bne 1f add r25,r25,r26 1: mr r3,r25 bl .relocate +#if defined(CONFIG_PPC_BOOK3E) + /* In relocatable case we always have to load the address of label 'name' + * to set IVPR. So after .relocate we have to update IVPR with current + * address of label. + */ + bl .init_core_book3e +#endif #endif /* @@ -438,12 +465,12 @@ tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */ #endif mr. r4,r26 /* In some cases the loader may */ +#if defined(CONFIG_PPC_BOOK3E) + tovirt(r4,r4) +#endif beq 9f /* have already put us at zero */ li r6,0x100 /* Start offset, the first 0x100 */ /* bytes were copied earlier. */ -#ifdef CONFIG_PPC_BOOK3E - tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */ -#endif #ifdef CONFIG_RELOCATABLE /* @@ -451,12 +478,24 @@ * variable __run_at_load, if it is set the kernel is treated as relocatable * kernel, otherwise it will be moved to PHYSICAL_START */ +#if defined(CONFIG_PPC_BOOK3E) + tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ +#endif lwz r7,__run_at_load-_stext(r26) +#if defined(CONFIG_PPC_BOOK3E) + tophys(r26,r26) /* Restore for the remains. */ +#endif cmplwi cr0,r7,1 bne 3f +#ifdef CONFIG_PPC_BOOK3E + LOAD_REG_ADDR(r5, __end_interrupts) + LOAD_REG_ADDR(r11, _stext) + sub r5,r5,r11 +#else /* just copy interrupts */ LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext) +#endif b 5f 3: #endif @@ -475,6 +514,9 @@ p_end: .llong _end - _stext 4: /* Now copy the rest of the kernel up to _end */ +#if defined(CONFIG_PPC_BOOK3E) + tovirt(r26,r26) +#endif addis r5,r26,(p_end - _stext)@ha ld r5,(p_end - _stext)@l(r5) /* get _end */ 5: bl .copy_and_flush /* copy the rest */ --- linux-3.13.0.orig/arch/powerpc/kernel/idle_power7.S +++ linux-3.13.0/arch/powerpc/kernel/idle_power7.S @@ -84,6 +84,22 @@ std r9,_MSR(r1) std r1,PACAR1(r13) + /* + * Go to real mode to do the nap, as required by the architecture. + * Also, we need to be in real mode before setting hwthread_state, + * because as soon as we do that, another thread can switch + * the MMU context to the guest. + */ + LOAD_REG_IMMEDIATE(r5, MSR_IDLE) + li r6, MSR_RI + andc r6, r9, r6 + LOAD_REG_ADDR(r7, power7_enter_nap_mode) + mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ + mtspr SPRN_SRR0, r7 + mtspr SPRN_SRR1, r5 + rfid + +_GLOBAL(power7_enter_nap_mode) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* Tell KVM we're napping */ li r4,KVM_HWTHREAD_IN_NAP --- linux-3.13.0.orig/arch/powerpc/kernel/iommu.c +++ linux-3.13.0/arch/powerpc/kernel/iommu.c @@ -1088,6 +1088,14 @@ memset(tbl->it_map, 0xff, sz); iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); + /* + * Disable iommu bypass, otherwise the user can DMA to all of + * our physical memory via the bypass window instead of just + * the pages that has been explicitly mapped into the iommu + */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, false); + return 0; } EXPORT_SYMBOL_GPL(iommu_take_ownership); @@ -1102,6 +1110,10 @@ /* Restore bit#0 set by iommu_init_table() */ if (tbl->it_offset == 0) set_bit(0, tbl->it_map); + + /* The kernel owns the device now, we can restore the iommu bypass */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, true); } EXPORT_SYMBOL_GPL(iommu_release_ownership); --- linux-3.13.0.orig/arch/powerpc/kernel/legacy_serial.c +++ linux-3.13.0/arch/powerpc/kernel/legacy_serial.c @@ -48,6 +48,9 @@ static unsigned int legacy_serial_count; static int legacy_serial_console = -1; +static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | + UPF_SHARE_IRQ | UPF_FIXED_PORT; + static unsigned int tsi_serial_in(struct uart_port *p, int offset) { unsigned int tmp; @@ -153,8 +156,6 @@ { u64 addr; const __be32 *addrp; - upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ - | UPF_FIXED_PORT; struct device_node *tsi = of_get_parent(np); /* We only support ports that have a clock frequency properly @@ -185,9 +186,11 @@ * IO port value. It will be fixed up later along with the irq */ if (tsi && !strcmp(tsi->type, "tsi-bridge")) - return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0); + return add_legacy_port(np, -1, UPIO_TSI, addr, addr, + NO_IRQ, legacy_port_flags, 0); else - return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); + return add_legacy_port(np, -1, UPIO_MEM, addr, addr, + NO_IRQ, legacy_port_flags, 0); } static int __init add_legacy_isa_port(struct device_node *np, @@ -233,7 +236,7 @@ /* Add port, irq will be dealt with later */ return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), - taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0); + taddr, NO_IRQ, legacy_port_flags, 0); } @@ -306,7 +309,7 @@ * IO port value. It will be fixed up later along with the irq */ return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, - UPF_BOOT_AUTOCONF, np != pci_dev); + legacy_port_flags, np != pci_dev); } #endif --- linux-3.13.0.orig/arch/powerpc/kernel/machine_kexec.c +++ linux-3.13.0/arch/powerpc/kernel/machine_kexec.c @@ -196,7 +196,9 @@ /* Values we need to export to the second kernel via the device tree. */ static phys_addr_t kernel_end; +static phys_addr_t crashk_base; static phys_addr_t crashk_size; +static unsigned long long mem_limit; static struct property kernel_end_prop = { .name = "linux,kernel-end", @@ -207,7 +209,7 @@ static struct property crashk_base_prop = { .name = "linux,crashkernel-base", .length = sizeof(phys_addr_t), - .value = &crashk_res.start, + .value = &crashk_base }; static struct property crashk_size_prop = { @@ -219,9 +221,11 @@ static struct property memory_limit_prop = { .name = "linux,memory-limit", .length = sizeof(unsigned long long), - .value = &memory_limit, + .value = &mem_limit, }; +#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) + static void __init export_crashk_values(struct device_node *node) { struct property *prop; @@ -237,8 +241,9 @@ of_remove_property(node, prop); if (crashk_res.start != 0) { + crashk_base = cpu_to_be_ulong(crashk_res.start), of_add_property(node, &crashk_base_prop); - crashk_size = resource_size(&crashk_res); + crashk_size = cpu_to_be_ulong(resource_size(&crashk_res)); of_add_property(node, &crashk_size_prop); } @@ -246,6 +251,7 @@ * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ + mem_limit = cpu_to_be_ulong(memory_limit); of_update_property(node, &memory_limit_prop); } @@ -264,7 +270,7 @@ of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ - kernel_end = __pa(_end); + kernel_end = cpu_to_be_ulong(__pa(_end)); of_add_property(node, &kernel_end_prop); export_crashk_values(node); --- linux-3.13.0.orig/arch/powerpc/kernel/machine_kexec_64.c +++ linux-3.13.0/arch/powerpc/kernel/machine_kexec_64.c @@ -30,72 +30,6 @@ #include #include -int default_machine_kexec_prepare(struct kimage *image) -{ - int i; - unsigned long begin, end; /* limits of segment */ - unsigned long low, high; /* limits of blocked memory range */ - struct device_node *node; - const unsigned long *basep; - const unsigned int *sizep; - - if (!ppc_md.hpte_clear_all) - return -ENOENT; - - /* - * Since we use the kernel fault handlers and paging code to - * handle the virtual mode, we must make sure no destination - * overlaps kernel static data or bss. - */ - for (i = 0; i < image->nr_segments; i++) - if (image->segment[i].mem < __pa(_end)) - return -ETXTBSY; - - /* - * For non-LPAR, we absolutely can not overwrite the mmu hash - * table, since we are still using the bolted entries in it to - * do the copy. Check that here. - * - * It is safe if the end is below the start of the blocked - * region (end <= low), or if the beginning is after the - * end of the blocked region (begin >= high). Use the - * boolean identity !(a || b) === (!a && !b). - */ - if (htab_address) { - low = __pa(htab_address); - high = low + htab_size_bytes; - - for (i = 0; i < image->nr_segments; i++) { - begin = image->segment[i].mem; - end = begin + image->segment[i].memsz; - - if ((begin < high) && (end > low)) - return -ETXTBSY; - } - } - - /* We also should not overwrite the tce tables */ - for_each_node_by_type(node, "pci") { - basep = of_get_property(node, "linux,tce-base", NULL); - sizep = of_get_property(node, "linux,tce-size", NULL); - if (basep == NULL || sizep == NULL) - continue; - - low = *basep; - high = low + (*sizep); - - for (i = 0; i < image->nr_segments; i++) { - begin = image->segment[i].mem; - end = begin + image->segment[i].memsz; - - if ((begin < high) && (end > low)) - return -ETXTBSY; - } - } - - return 0; -} - #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE) static void copy_segments(unsigned long ind) @@ -237,7 +171,7 @@ if (!cpu_online(cpu)) { printk(KERN_INFO "kexec: Waking offline cpu %d.\n", cpu); - cpu_up(cpu); + WARN_ON(cpu_up(cpu)); } } } @@ -367,8 +301,90 @@ /* NOTREACHED */ } +#ifdef CONFIG_PPC_BOOK3E +int default_machine_kexec_prepare(struct kimage *image) +{ + int i; + /* + * Since we use the kernel fault handlers and paging code to + * handle the virtual mode, we must make sure no destination + * overlaps kernel static data or bss. + */ + for (i = 0; i < image->nr_segments; i++) + if (image->segment[i].mem < __pa(_end)) + return -ETXTBSY; + return 0; +} +#else /* CONFIG_PPC_BOOK3E */ +int default_machine_kexec_prepare(struct kimage *image) +{ + int i; + unsigned long begin, end; /* limits of segment */ + unsigned long low, high; /* limits of blocked memory range */ + struct device_node *node; + const unsigned long *basep; + const unsigned int *sizep; + + if (!ppc_md.hpte_clear_all) + return -ENOENT; + + /* + * Since we use the kernel fault handlers and paging code to + * handle the virtual mode, we must make sure no destination + * overlaps kernel static data or bss. + */ + for (i = 0; i < image->nr_segments; i++) + if (image->segment[i].mem < __pa(_end)) + return -ETXTBSY; + + /* + * For non-LPAR, we absolutely can not overwrite the mmu hash + * table, since we are still using the bolted entries in it to + * do the copy. Check that here. + * + * It is safe if the end is below the start of the blocked + * region (end <= low), or if the beginning is after the + * end of the blocked region (begin >= high). Use the + * boolean identity !(a || b) === (!a && !b). + */ + if (htab_address) { + low = __pa(htab_address); + high = low + htab_size_bytes; + + for (i = 0; i < image->nr_segments; i++) { + begin = image->segment[i].mem; + end = begin + image->segment[i].memsz; + + if ((begin < high) && (end > low)) + return -ETXTBSY; + } + } + + /* We also should not overwrite the tce tables */ + for_each_node_by_type(node, "pci") { + basep = of_get_property(node, "linux,tce-base", NULL); + sizep = of_get_property(node, "linux,tce-size", NULL); + if (basep == NULL || sizep == NULL) + continue; + + low = *basep; + high = low + (*sizep); + + for (i = 0; i < image->nr_segments; i++) { + begin = image->segment[i].mem; + end = begin + image->segment[i].memsz; + + if ((begin < high) && (end > low)) + return -ETXTBSY; + } + } + + return 0; +} + /* Values we need to export to the second kernel via the device tree. */ static unsigned long htab_base; +static unsigned long htab_size; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -379,7 +395,7 @@ static struct property htab_size_prop = { .name = "linux,htab-size", .length = sizeof(unsigned long), - .value = &htab_size_bytes, + .value = &htab_size, }; static int __init export_htab_values(void) @@ -403,11 +419,13 @@ if (prop) of_remove_property(node, prop); - htab_base = __pa(htab_address); + htab_base = cpu_to_be64(__pa(htab_address)); of_add_property(node, &htab_base_prop); + htab_size = cpu_to_be64(htab_size_bytes); of_add_property(node, &htab_size_prop); of_node_put(node); return 0; } late_initcall(export_htab_values); +#endif /* !CONFIG_PPC_BOOK3E */ --- linux-3.13.0.orig/arch/powerpc/kernel/mce.c +++ linux-3.13.0/arch/powerpc/kernel/mce.c @@ -0,0 +1,352 @@ +/* + * Machine check exception handling. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright 2013 IBM Corporation + * Author: Mahesh Salgaonkar + */ + +#undef DEBUG +#define pr_fmt(fmt) "mce: " fmt + +#include +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU(int, mce_nest_count); +static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event); + +/* Queue for delayed MCE events. */ +static DEFINE_PER_CPU(int, mce_queue_count); +static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue); + +static void machine_check_process_queued_event(struct irq_work *work); +struct irq_work mce_event_process_work = { + .func = machine_check_process_queued_event, +}; + +static void mce_set_error_info(struct machine_check_event *mce, + struct mce_error_info *mce_err) +{ + mce->error_type = mce_err->error_type; + switch (mce_err->error_type) { + case MCE_ERROR_TYPE_UE: + mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type; + break; + case MCE_ERROR_TYPE_SLB: + mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type; + break; + case MCE_ERROR_TYPE_ERAT: + mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type; + break; + case MCE_ERROR_TYPE_TLB: + mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type; + break; + case MCE_ERROR_TYPE_UNKNOWN: + default: + break; + } +} + +/* + * Decode and save high level MCE information into per cpu buffer which + * is an array of machine_check_event structure. + */ +void save_mce_event(struct pt_regs *regs, long handled, + struct mce_error_info *mce_err, + uint64_t nip, uint64_t addr) +{ + uint64_t srr1; + int index = __get_cpu_var(mce_nest_count)++; + struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); + + /* + * Return if we don't have enough space to log mce event. + * mce_nest_count may go beyond MAX_MC_EVT but that's ok, + * the check below will stop buffer overrun. + */ + if (index >= MAX_MC_EVT) + return; + + /* Populate generic machine check info */ + mce->version = MCE_V1; + mce->srr0 = nip; + mce->srr1 = regs->msr; + mce->gpr3 = regs->gpr[3]; + mce->in_use = 1; + + mce->initiator = MCE_INITIATOR_CPU; + if (handled) + mce->disposition = MCE_DISPOSITION_RECOVERED; + else + mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; + mce->severity = MCE_SEV_ERROR_SYNC; + + srr1 = regs->msr; + + /* + * Populate the mce error_type and type-specific error_type. + */ + mce_set_error_info(mce, mce_err); + + if (!addr) + return; + + if (mce->error_type == MCE_ERROR_TYPE_TLB) { + mce->u.tlb_error.effective_address_provided = true; + mce->u.tlb_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_SLB) { + mce->u.slb_error.effective_address_provided = true; + mce->u.slb_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) { + mce->u.erat_error.effective_address_provided = true; + mce->u.erat_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_UE) { + mce->u.ue_error.effective_address_provided = true; + mce->u.ue_error.effective_address = addr; + } + return; +} + +/* + * get_mce_event: + * mce Pointer to machine_check_event structure to be filled. + * release Flag to indicate whether to free the event slot or not. + * 0 <= do not release the mce event. Caller will invoke + * release_mce_event() once event has been consumed. + * 1 <= release the slot. + * + * return 1 = success + * 0 = failure + * + * get_mce_event() will be called by platform specific machine check + * handle routine and in KVM. + * When we call get_mce_event(), we are still in interrupt context and + * preemption will not be scheduled until ret_from_expect() routine + * is called. + */ +int get_mce_event(struct machine_check_event *mce, bool release) +{ + int index = __get_cpu_var(mce_nest_count) - 1; + struct machine_check_event *mc_evt; + int ret = 0; + + /* Sanity check */ + if (index < 0) + return ret; + + /* Check if we have MCE info to process. */ + if (index < MAX_MC_EVT) { + mc_evt = &__get_cpu_var(mce_event[index]); + /* Copy the event structure and release the original */ + if (mce) + *mce = *mc_evt; + if (release) + mc_evt->in_use = 0; + ret = 1; + } + /* Decrement the count to free the slot. */ + if (release) + __get_cpu_var(mce_nest_count)--; + + return ret; +} + +void release_mce_event(void) +{ + get_mce_event(NULL, true); +} + +/* + * Queue up the MCE event which then can be handled later. + */ +void machine_check_queue_event(void) +{ + int index; + struct machine_check_event evt; + + if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) + return; + + index = __get_cpu_var(mce_queue_count)++; + /* If queue is full, just return for now. */ + if (index >= MAX_MC_EVT) { + __get_cpu_var(mce_queue_count)--; + return; + } + __get_cpu_var(mce_event_queue[index]) = evt; + + /* Queue irq work to process this event later. */ + irq_work_queue(&mce_event_process_work); +} + +/* + * process pending MCE event from the mce event queue. This function will be + * called during syscall exit. + */ +static void machine_check_process_queued_event(struct irq_work *work) +{ + int index; + + /* + * For now just print it to console. + * TODO: log this error event to FSP or nvram. + */ + while (__get_cpu_var(mce_queue_count) > 0) { + index = __get_cpu_var(mce_queue_count) - 1; + machine_check_print_event_info( + &__get_cpu_var(mce_event_queue[index])); + __get_cpu_var(mce_queue_count)--; + } +} + +void machine_check_print_event_info(struct machine_check_event *evt) +{ + const char *level, *sevstr, *subtype; + static const char *mc_ue_types[] = { + "Indeterminate", + "Instruction fetch", + "Page table walk ifetch", + "Load/Store", + "Page table walk Load/Store", + }; + static const char *mc_slb_types[] = { + "Indeterminate", + "Parity", + "Multihit", + }; + static const char *mc_erat_types[] = { + "Indeterminate", + "Parity", + "Multihit", + }; + static const char *mc_tlb_types[] = { + "Indeterminate", + "Parity", + "Multihit", + }; + + /* Print things out */ + if (evt->version != MCE_V1) { + pr_err("Machine Check Exception, Unknown event version %d !\n", + evt->version); + return; + } + switch (evt->severity) { + case MCE_SEV_NO_ERROR: + level = KERN_INFO; + sevstr = "Harmless"; + break; + case MCE_SEV_WARNING: + level = KERN_WARNING; + sevstr = ""; + break; + case MCE_SEV_ERROR_SYNC: + level = KERN_ERR; + sevstr = "Severe"; + break; + case MCE_SEV_FATAL: + default: + level = KERN_ERR; + sevstr = "Fatal"; + break; + } + + printk("%s%s Machine check interrupt [%s]\n", level, sevstr, + evt->disposition == MCE_DISPOSITION_RECOVERED ? + "Recovered" : "[Not recovered"); + printk("%s Initiator: %s\n", level, + evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown"); + switch (evt->error_type) { + case MCE_ERROR_TYPE_UE: + subtype = evt->u.ue_error.ue_error_type < + ARRAY_SIZE(mc_ue_types) ? + mc_ue_types[evt->u.ue_error.ue_error_type] + : "Unknown"; + printk("%s Error type: UE [%s]\n", level, subtype); + if (evt->u.ue_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.ue_error.effective_address); + if (evt->u.ue_error.physical_address_provided) + printk("%s Physial address: %016llx\n", + level, evt->u.ue_error.physical_address); + break; + case MCE_ERROR_TYPE_SLB: + subtype = evt->u.slb_error.slb_error_type < + ARRAY_SIZE(mc_slb_types) ? + mc_slb_types[evt->u.slb_error.slb_error_type] + : "Unknown"; + printk("%s Error type: SLB [%s]\n", level, subtype); + if (evt->u.slb_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.slb_error.effective_address); + break; + case MCE_ERROR_TYPE_ERAT: + subtype = evt->u.erat_error.erat_error_type < + ARRAY_SIZE(mc_erat_types) ? + mc_erat_types[evt->u.erat_error.erat_error_type] + : "Unknown"; + printk("%s Error type: ERAT [%s]\n", level, subtype); + if (evt->u.erat_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.erat_error.effective_address); + break; + case MCE_ERROR_TYPE_TLB: + subtype = evt->u.tlb_error.tlb_error_type < + ARRAY_SIZE(mc_tlb_types) ? + mc_tlb_types[evt->u.tlb_error.tlb_error_type] + : "Unknown"; + printk("%s Error type: TLB [%s]\n", level, subtype); + if (evt->u.tlb_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.tlb_error.effective_address); + break; + default: + case MCE_ERROR_TYPE_UNKNOWN: + printk("%s Error type: Unknown\n", level); + break; + } +} + +uint64_t get_mce_fault_addr(struct machine_check_event *evt) +{ + switch (evt->error_type) { + case MCE_ERROR_TYPE_UE: + if (evt->u.ue_error.effective_address_provided) + return evt->u.ue_error.effective_address; + break; + case MCE_ERROR_TYPE_SLB: + if (evt->u.slb_error.effective_address_provided) + return evt->u.slb_error.effective_address; + break; + case MCE_ERROR_TYPE_ERAT: + if (evt->u.erat_error.effective_address_provided) + return evt->u.erat_error.effective_address; + break; + case MCE_ERROR_TYPE_TLB: + if (evt->u.tlb_error.effective_address_provided) + return evt->u.tlb_error.effective_address; + break; + default: + case MCE_ERROR_TYPE_UNKNOWN: + break; + } + return 0; +} +EXPORT_SYMBOL(get_mce_fault_addr); --- linux-3.13.0.orig/arch/powerpc/kernel/mce_power.c +++ linux-3.13.0/arch/powerpc/kernel/mce_power.c @@ -0,0 +1,313 @@ +/* + * Machine check exception handling CPU-side for power7 and power8 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright 2013 IBM Corporation + * Author: Mahesh Salgaonkar + */ + +#undef DEBUG +#define pr_fmt(fmt) "mce_power: " fmt + +#include +#include +#include +#include +#include + +/* flush SLBs and reload */ +static void flush_and_reload_slb(void) +{ + struct slb_shadow *slb; + unsigned long i, n; + + /* Invalidate all SLBs */ + asm volatile("slbmte %0,%0; slbia" : : "r" (0)); + +#ifdef CONFIG_KVM_BOOK3S_HANDLER + /* + * If machine check is hit when in guest or in transition, we will + * only flush the SLBs and continue. + */ + if (get_paca()->kvm_hstate.in_guest) + return; +#endif + + /* For host kernel, reload the SLBs from shadow SLB buffer. */ + slb = get_slb_shadow(); + if (!slb) + return; + + n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE); + + /* Load up the SLB entries from shadow SLB */ + for (i = 0; i < n; i++) { + unsigned long rb = be64_to_cpu(slb->save_area[i].esid); + unsigned long rs = be64_to_cpu(slb->save_area[i].vsid); + + rb = (rb & ~0xFFFul) | i; + asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb)); + } +} + +static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) +{ + long handled = 1; + + /* + * flush and reload SLBs for SLB errors and flush TLBs for TLB errors. + * reset the error bits whenever we handle them so that at the end + * we can check whether we handled all of them or not. + * */ + if (dsisr & slb_error_bits) { + flush_and_reload_slb(); + /* reset error bits */ + dsisr &= ~(slb_error_bits); + } + if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); + /* reset error bits */ + dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; + } + /* Any other errors we don't understand? */ + if (dsisr & 0xffffffffUL) + handled = 0; + + return handled; +} + +static long mce_handle_derror_p7(uint64_t dsisr) +{ + return mce_handle_derror(dsisr, P7_DSISR_MC_SLB_ERRORS); +} + +static long mce_handle_common_ierror(uint64_t srr1) +{ + long handled = 0; + + switch (P7_SRR1_MC_IFETCH(srr1)) { + case 0: + break; + case P7_SRR1_MC_IFETCH_SLB_PARITY: + case P7_SRR1_MC_IFETCH_SLB_MULTIHIT: + /* flush and reload SLBs for SLB errors. */ + flush_and_reload_slb(); + handled = 1; + break; + case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); + handled = 1; + } + break; + default: + break; + } + + return handled; +} + +static long mce_handle_ierror_p7(uint64_t srr1) +{ + long handled = 0; + + handled = mce_handle_common_ierror(srr1); + + if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) { + flush_and_reload_slb(); + handled = 1; + } + return handled; +} + +static void mce_get_common_ierror(struct mce_error_info *mce_err, uint64_t srr1) +{ + switch (P7_SRR1_MC_IFETCH(srr1)) { + case P7_SRR1_MC_IFETCH_SLB_PARITY: + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY; + break; + case P7_SRR1_MC_IFETCH_SLB_MULTIHIT: + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; + break; + case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: + mce_err->error_type = MCE_ERROR_TYPE_TLB; + mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; + break; + case P7_SRR1_MC_IFETCH_UE: + case P7_SRR1_MC_IFETCH_UE_IFU_INTERNAL: + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH; + break; + case P7_SRR1_MC_IFETCH_UE_TLB_RELOAD: + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = + MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH; + break; + } +} + +static void mce_get_ierror_p7(struct mce_error_info *mce_err, uint64_t srr1) +{ + mce_get_common_ierror(mce_err, srr1); + if (P7_SRR1_MC_IFETCH(srr1) == P7_SRR1_MC_IFETCH_SLB_BOTH) { + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE; + } +} + +static void mce_get_derror_p7(struct mce_error_info *mce_err, uint64_t dsisr) +{ + if (dsisr & P7_DSISR_MC_UE) { + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE; + } else if (dsisr & P7_DSISR_MC_UE_TABLEWALK) { + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = + MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE; + } else if (dsisr & P7_DSISR_MC_ERAT_MULTIHIT) { + mce_err->error_type = MCE_ERROR_TYPE_ERAT; + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; + } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT) { + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; + } else if (dsisr & P7_DSISR_MC_SLB_PARITY_MFSLB) { + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY; + } else if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { + mce_err->error_type = MCE_ERROR_TYPE_TLB; + mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; + } else if (dsisr & P7_DSISR_MC_SLB_MULTIHIT_PARITY) { + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE; + } +} + +static long mce_handle_ue_error(struct pt_regs *regs) +{ + long handled = 0; + + /* + * On specific SCOM read via MMIO we may get a machine check + * exception with SRR0 pointing inside opal. If that is the + * case OPAL may have recovery address to re-read SCOM data in + * different way and hence we can recover from this MC. + */ + + if (ppc_md.mce_check_early_recovery) { + if (ppc_md.mce_check_early_recovery(regs)) + handled = 1; + } + return handled; +} + +long __machine_check_early_realmode_p7(struct pt_regs *regs) +{ + uint64_t srr1, nip, addr; + long handled = 1; + struct mce_error_info mce_error_info = { 0 }; + + srr1 = regs->msr; + nip = regs->nip; + + /* + * Handle memory errors depending whether this was a load/store or + * ifetch exception. Also, populate the mce error_type and + * type-specific error_type from either SRR1 or DSISR, depending + * whether this was a load/store or ifetch exception + */ + if (P7_SRR1_MC_LOADSTORE(srr1)) { + handled = mce_handle_derror_p7(regs->dsisr); + mce_get_derror_p7(&mce_error_info, regs->dsisr); + addr = regs->dar; + } else { + handled = mce_handle_ierror_p7(srr1); + mce_get_ierror_p7(&mce_error_info, srr1); + addr = regs->nip; + } + + /* Handle UE error. */ + if (mce_error_info.error_type == MCE_ERROR_TYPE_UE) + handled = mce_handle_ue_error(regs); + + save_mce_event(regs, handled, &mce_error_info, nip, addr); + return handled; +} + +static void mce_get_ierror_p8(struct mce_error_info *mce_err, uint64_t srr1) +{ + mce_get_common_ierror(mce_err, srr1); + if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) { + mce_err->error_type = MCE_ERROR_TYPE_ERAT; + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; + } +} + +static void mce_get_derror_p8(struct mce_error_info *mce_err, uint64_t dsisr) +{ + mce_get_derror_p7(mce_err, dsisr); + if (dsisr & P8_DSISR_MC_ERAT_MULTIHIT_SEC) { + mce_err->error_type = MCE_ERROR_TYPE_ERAT; + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; + } +} + +static long mce_handle_ierror_p8(uint64_t srr1) +{ + long handled = 0; + + handled = mce_handle_common_ierror(srr1); + + if (P7_SRR1_MC_IFETCH(srr1) == P8_SRR1_MC_IFETCH_ERAT_MULTIHIT) { + flush_and_reload_slb(); + handled = 1; + } + return handled; +} + +static long mce_handle_derror_p8(uint64_t dsisr) +{ + return mce_handle_derror(dsisr, P8_DSISR_MC_SLB_ERRORS); +} + +long __machine_check_early_realmode_p8(struct pt_regs *regs) +{ + uint64_t srr1, nip, addr; + long handled = 1; + struct mce_error_info mce_error_info = { 0 }; + + srr1 = regs->msr; + nip = regs->nip; + + if (P7_SRR1_MC_LOADSTORE(srr1)) { + handled = mce_handle_derror_p8(regs->dsisr); + mce_get_derror_p8(&mce_error_info, regs->dsisr); + addr = regs->dar; + } else { + handled = mce_handle_ierror_p8(srr1); + mce_get_ierror_p8(&mce_error_info, srr1); + addr = regs->nip; + } + + /* Handle UE error. */ + if (mce_error_info.error_type == MCE_ERROR_TYPE_UE) + handled = mce_handle_ue_error(regs); + + save_mce_event(regs, handled, &mce_error_info, nip, addr); + return handled; +} --- linux-3.13.0.orig/arch/powerpc/kernel/misc_32.S +++ linux-3.13.0/arch/powerpc/kernel/misc_32.S @@ -57,11 +57,14 @@ mtlr r0 blr +/* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); + */ _GLOBAL(call_do_irq) mflr r0 stw r0,4(r1) lwz r10,THREAD+KSP_LIMIT(r2) - addi r11,r3,THREAD_INFO_GAP + addi r11,r4,THREAD_INFO_GAP stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) mr r1,r4 stw r10,8(r1) --- linux-3.13.0.orig/arch/powerpc/kernel/misc_64.S +++ linux-3.13.0/arch/powerpc/kernel/misc_64.S @@ -481,6 +481,49 @@ #ifdef CONFIG_KEXEC +#ifdef CONFIG_PPC_BOOK3E +/* BOOK3E have no a real MMU mode so we have to setup the initial TLB + * for a core to map v:0 to p:0 as 1:1. This current implementation + * assume that 1G is enough for kexec. + */ +#include +kexec_create_tlb: + /* Invalidate all TLBs to avoid any TLB conflict. */ + PPC_TLBILX_ALL(0,R0) + sync + isync + + mfspr r10,SPRN_TLB1CFG + andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */ + subi r10,r10,1 /* Often its always safe to use last */ + lis r9,MAS0_TLBSEL(1)@h + rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */ + +/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it. + */ +#ifdef CONFIG_SMP +#define M_IF_SMP MAS2_M +#else +#define M_IF_SMP 0 +#endif + mtspr SPRN_MAS0,r9 + + lis r9,(MAS1_VALID|MAS1_IPROT)@h + ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l + mtspr SPRN_MAS1,r9 + + LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP) + mtspr SPRN_MAS2,r9 + + LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX) + mtspr SPRN_MAS3,r9 + li r9,0 + mtspr SPRN_MAS7,r9 + + tlbwe + isync + blr +#endif /* kexec_smp_wait(void) * @@ -494,6 +537,10 @@ */ _GLOBAL(kexec_smp_wait) lhz r3,PACAHWCPUID(r13) +#ifdef CONFIG_PPC_BOOK3E + /* Create a 1:1 mapping. */ + bl kexec_create_tlb +#endif bl real_mode li r4,KEXEC_STATE_REAL_MODE @@ -510,6 +557,7 @@ * don't overwrite r3 here, it is live for kexec_wait above. */ real_mode: /* assume normal blr return */ +#ifndef CONFIG_PPC_BOOK3E 1: li r9,MSR_RI li r10,MSR_DR|MSR_IR mflr r11 /* return address to SRR0 */ @@ -521,7 +569,10 @@ mtspr SPRN_SRR1,r10 mtspr SPRN_SRR0,r11 rfid - +#else + /* the real mode is nothing for book3e. */ + blr +#endif /* * kexec_sequence(newstack, start, image, control, clear_all()) @@ -564,9 +615,15 @@ lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ /* disable interrupts, we are overwriting kernel data next */ +#ifndef CONFIG_PPC_BOOK3E mfmsr r3 rlwinm r3,r3,0,17,15 mtmsrd r3,1 +#else + wrteei 0 + /* Create a 1:1 mapping. */ + bl kexec_create_tlb +#endif /* copy dest pages, flush whole dest image */ mr r3,r29 @@ -583,15 +640,23 @@ bl .copy_and_flush /* (dest, src, copy limit, start offset) */ 1: /* assume normal blr return */ + /* notify we're going into kexec kernel for SMP. */ + LOAD_REG_ADDR(r3,__run_at_kexec) + li r4,1 + std r4,0(r3) + sync + /* release other cpus to the new kernel secondary start at 0x60 */ mflr r5 li r6,1 stw r6,kexec_flag-1b(5) +#ifndef CONFIG_PPC_BOOK3E /* clear out hardware hash page table and tlb */ ld r5,0(r27) /* deref function descriptor */ mtctr r5 bctrl /* ppc_md.hpte_clear_all(void); */ +#endif /* * kexec image calling is: --- linux-3.13.0.orig/arch/powerpc/kernel/pci-common.c +++ linux-3.13.0/arch/powerpc/kernel/pci-common.c @@ -1483,9 +1483,15 @@ return pci_enable_resources(dev, mask); } +/* Before assuming too much here, take care to realize that we need sign + * extension from 32-bit pointers to 64-bit resource addresses to work. + */ resource_size_t pcibios_io_space_offset(struct pci_controller *hose) { - return (unsigned long) hose->io_base_virt - _IO_BASE; + long vbase = (long)hose->io_base_virt; + long io_base = _IO_BASE; + + return (resource_size_t)(vbase - io_base); } static void pcibios_setup_phb_resources(struct pci_controller *hose, --- linux-3.13.0.orig/arch/powerpc/kernel/process.c +++ linux-3.13.0/arch/powerpc/kernel/process.c @@ -74,6 +74,48 @@ struct task_struct *last_task_used_spe = NULL; #endif +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +void giveup_fpu_maybe_transactional(struct task_struct *tsk) +{ + /* + * If we are saving the current thread's registers, and the + * thread is in a transactional state, set the TIF_RESTORE_TM + * bit so that we know to restore the registers before + * returning to userspace. + */ + if (tsk == current && tsk->thread.regs && + MSR_TM_ACTIVE(tsk->thread.regs->msr) && + !test_thread_flag(TIF_RESTORE_TM)) { + tsk->thread.tm_orig_msr = tsk->thread.regs->msr; + set_thread_flag(TIF_RESTORE_TM); + } + + giveup_fpu(tsk); +} + +void giveup_altivec_maybe_transactional(struct task_struct *tsk) +{ + /* + * If we are saving the current thread's registers, and the + * thread is in a transactional state, set the TIF_RESTORE_TM + * bit so that we know to restore the registers before + * returning to userspace. + */ + if (tsk == current && tsk->thread.regs && + MSR_TM_ACTIVE(tsk->thread.regs->msr) && + !test_thread_flag(TIF_RESTORE_TM)) { + tsk->thread.tm_orig_msr = tsk->thread.regs->msr; + set_thread_flag(TIF_RESTORE_TM); + } + + giveup_altivec(tsk); +} + +#else +#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) +#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) +#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ + #ifdef CONFIG_PPC_FPU /* * Make sure the floating-point register state in the @@ -102,13 +144,13 @@ */ BUG_ON(tsk != current); #endif - giveup_fpu(tsk); + giveup_fpu_maybe_transactional(tsk); } preempt_enable(); } } EXPORT_SYMBOL_GPL(flush_fp_to_thread); -#endif +#endif /* CONFIG_PPC_FPU */ void enable_kernel_fp(void) { @@ -116,11 +158,11 @@ #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) - giveup_fpu(current); + giveup_fpu_maybe_transactional(current); else giveup_fpu(NULL); /* just enables FP for kernel */ #else - giveup_fpu(last_task_used_math); + giveup_fpu_maybe_transactional(last_task_used_math); #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_fp); @@ -132,11 +174,11 @@ #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) - giveup_altivec(current); + giveup_altivec_maybe_transactional(current); else giveup_altivec_notask(); #else - giveup_altivec(last_task_used_altivec); + giveup_altivec_maybe_transactional(last_task_used_altivec); #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_altivec); @@ -153,7 +195,7 @@ #ifdef CONFIG_SMP BUG_ON(tsk != current); #endif - giveup_altivec(tsk); + giveup_altivec_maybe_transactional(tsk); } preempt_enable(); } @@ -182,8 +224,8 @@ void giveup_vsx(struct task_struct *tsk) { - giveup_fpu(tsk); - giveup_altivec(tsk); + giveup_fpu_maybe_transactional(tsk); + giveup_altivec_maybe_transactional(tsk); __giveup_vsx(tsk); } @@ -479,7 +521,48 @@ return false; return true; } + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM +static void tm_reclaim_thread(struct thread_struct *thr, + struct thread_info *ti, uint8_t cause) +{ + unsigned long msr_diff = 0; + + /* + * If FP/VSX registers have been already saved to the + * thread_struct, move them to the transact_fp array. + * We clear the TIF_RESTORE_TM bit since after the reclaim + * the thread will no longer be transactional. + */ + if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) { + msr_diff = thr->tm_orig_msr & ~thr->regs->msr; + if (msr_diff & MSR_FP) + memcpy(&thr->transact_fp, &thr->fp_state, + sizeof(struct thread_fp_state)); + if (msr_diff & MSR_VEC) + memcpy(&thr->transact_vr, &thr->vr_state, + sizeof(struct thread_vr_state)); + clear_ti_thread_flag(ti, TIF_RESTORE_TM); + msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; + } + + tm_reclaim(thr, thr->regs->msr, cause); + + /* Having done the reclaim, we now have the checkpointed + * FP/VSX values in the registers. These might be valid + * even if we have previously called enable_kernel_fp() or + * flush_fp_to_thread(), so update thr->regs->msr to + * indicate their current validity. + */ + thr->regs->msr |= msr_diff; +} + +void tm_reclaim_current(uint8_t cause) +{ + tm_enable(); + tm_reclaim_thread(¤t->thread, current_thread_info(), cause); +} + static inline void tm_reclaim_task(struct task_struct *tsk) { /* We have to work out if we're switching from/to a task that's in the @@ -502,9 +585,11 @@ /* Stash the original thread MSR, as giveup_fpu et al will * modify it. We hold onto it to see whether the task used - * FP & vector regs. + * FP & vector regs. If the TIF_RESTORE_TM flag is set, + * tm_orig_msr is already set. */ - thr->tm_orig_msr = thr->regs->msr; + if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM)) + thr->tm_orig_msr = thr->regs->msr; TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " "ccr=%lx, msr=%lx, trap=%lx)\n", @@ -512,7 +597,7 @@ thr->regs->ccr, thr->regs->msr, thr->regs->trap); - tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED); + tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED); TM_DEBUG("--- tm_reclaim on pid %d complete\n", tsk->pid); @@ -588,6 +673,43 @@ tm_reclaim_task(prev); } } + +/* + * This is called if we are on the way out to userspace and the + * TIF_RESTORE_TM flag is set. It checks if we need to reload + * FP and/or vector state and does so if necessary. + * If userspace is inside a transaction (whether active or + * suspended) and FP/VMX/VSX instructions have ever been enabled + * inside that transaction, then we have to keep them enabled + * and keep the FP/VMX/VSX state loaded while ever the transaction + * continues. The reason is that if we didn't, and subsequently + * got a FP/VMX/VSX unavailable interrupt inside a transaction, + * we don't know whether it's the same transaction, and thus we + * don't know which of the checkpointed state and the transactional + * state to use. + */ +void restore_tm_state(struct pt_regs *regs) +{ + unsigned long msr_diff; + + clear_thread_flag(TIF_RESTORE_TM); + if (!MSR_TM_ACTIVE(regs->msr)) + return; + + msr_diff = current->thread.tm_orig_msr & ~regs->msr; + msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; + if (msr_diff & MSR_FP) { + fp_enable(); + load_fp_state(¤t->thread.fp_state); + regs->msr |= current->thread.fpexc_mode; + } + if (msr_diff & MSR_VEC) { + vec_enable(); + load_vr_state(¤t->thread.vr_state); + } + regs->msr |= msr_diff; +} + #else #define tm_recheckpoint_new_task(new) #define __switch_to_tm(prev) @@ -604,15 +726,15 @@ WARN_ON(!irqs_disabled()); - /* Back up the TAR across context switches. + /* Back up the TAR and DSCR across context switches. * Note that the TAR is not available for use in the kernel. (To * provide this, the TAR should be backed up/restored on exception * entry/exit instead, and be in pt_regs. FIXME, this should be in * pt_regs anyway (for debug).) - * Save the TAR here before we do treclaim/trecheckpoint as these - * will change the TAR. + * Save the TAR and DSCR here before we do treclaim/trecheckpoint as + * these will change them. */ - save_tar(&prev->thread); + save_early_sprs(&prev->thread); __switch_to_tm(prev); @@ -927,6 +1049,15 @@ flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); + /* + * Flush TM state out so we can copy it. __switch_to_tm() does this + * flush but it removes the checkpointed state from the current CPU and + * transitions the CPU out of TM mode. Hence we need to call + * tm_recheckpoint_new_task() (on the same task) to restore the + * checkpointed state back and the TM mode. + */ + __switch_to_tm(src); + tm_recheckpoint_new_task(src); *dst = *src; --- linux-3.13.0.orig/arch/powerpc/kernel/prom.c +++ linux-3.13.0/arch/powerpc/kernel/prom.c @@ -737,6 +737,11 @@ spinning_secondaries = boot_cpu_count - 1; #endif +#ifdef CONFIG_PPC_POWERNV + /* Scan and build the list of machine check recoverable ranges */ + of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL); +#endif + DBG(" <- early_init_devtree()\n"); } --- linux-3.13.0.orig/arch/powerpc/kernel/reloc_64.S +++ linux-3.13.0/arch/powerpc/kernel/reloc_64.S @@ -69,8 +69,8 @@ * R_PPC64_RELATIVE ones. */ mtctr r8 -5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ - cmpwi r0,R_PPC64_RELATIVE +5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */ + cmpdi r0,R_PPC64_RELATIVE bne 6f ld r6,0(r9) /* reloc->r_offset */ ld r0,16(r9) /* reloc->r_addend */ @@ -81,6 +81,7 @@ 6: blr +.balign 8 p_dyn: .llong __dynamic_start - 0b p_rela: .llong __rela_dyn_start - 0b p_st: .llong _stext - 0b --- linux-3.13.0.orig/arch/powerpc/kernel/rtas.c +++ linux-3.13.0/arch/powerpc/kernel/rtas.c @@ -993,32 +993,36 @@ (struct rtas_ext_event_log_v6 *)log->buffer; struct pseries_errorlog *sect; unsigned char *p, *log_end; + uint32_t ext_log_length = rtas_error_extended_log_length(log); + uint8_t log_format = rtas_ext_event_log_format(ext_log); + uint32_t company_id = rtas_ext_event_company_id(ext_log); /* Check that we understand the format */ - if (log->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || - ext_log->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || - ext_log->company_id != RTAS_V6EXT_COMPANY_ID_IBM) + if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) || + log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || + company_id != RTAS_V6EXT_COMPANY_ID_IBM) return NULL; - log_end = log->buffer + log->extended_log_length; + log_end = log->buffer + ext_log_length; p = ext_log->vendor_log; while (p < log_end) { sect = (struct pseries_errorlog *)p; - if (sect->id == section_id) + if (pseries_errorlog_id(sect) == section_id) return sect; - p += sect->length; + p += pseries_errorlog_length(sect); } return NULL; } +/* We assume to be passed big endian arguments */ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) { struct rtas_args args; unsigned long flags; char *buff_copy, *errbuf = NULL; - int nargs; + int nargs, nret, token; int rc; if (!capable(CAP_SYS_ADMIN)) @@ -1027,10 +1031,13 @@ if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) return -EFAULT; - nargs = args.nargs; + nargs = be32_to_cpu(args.nargs); + nret = be32_to_cpu(args.nret); + token = be32_to_cpu(args.token); + if (nargs > ARRAY_SIZE(args.args) - || args.nret > ARRAY_SIZE(args.args) - || nargs + args.nret > ARRAY_SIZE(args.args)) + || nret > ARRAY_SIZE(args.args) + || nargs + nret > ARRAY_SIZE(args.args)) return -EINVAL; /* Copy in args. */ @@ -1038,14 +1045,14 @@ nargs * sizeof(rtas_arg_t)) != 0) return -EFAULT; - if (args.token == RTAS_UNKNOWN_SERVICE) + if (token == RTAS_UNKNOWN_SERVICE) return -EINVAL; args.rets = &args.args[nargs]; - memset(args.rets, 0, args.nret * sizeof(rtas_arg_t)); + memset(args.rets, 0, nret * sizeof(rtas_arg_t)); /* Need to handle ibm,suspend_me call specially */ - if (args.token == ibm_suspend_me_token) { + if (token == ibm_suspend_me_token) { rc = rtas_ibm_suspend_me(&args); if (rc) return rc; @@ -1062,7 +1069,7 @@ /* A -1 return code indicates that the last command couldn't be completed due to a hardware error. */ - if (args.rets[0] == -1) + if (be32_to_cpu(args.rets[0]) == -1) errbuf = __fetch_rtas_last_error(buff_copy); unlock_rtas(flags); @@ -1077,7 +1084,7 @@ /* Copy out args. */ if (copy_to_user(uargs->args + nargs, args.args + nargs, - args.nret * sizeof(rtas_arg_t)) != 0) + nret * sizeof(rtas_arg_t)) != 0) return -EFAULT; return 0; --- linux-3.13.0.orig/arch/powerpc/kernel/rtas_flash.c +++ linux-3.13.0/arch/powerpc/kernel/rtas_flash.c @@ -705,7 +705,7 @@ if (rtas_token("ibm,update-flash-64-and-reboot") == RTAS_UNKNOWN_SERVICE) { pr_info("rtas_flash: no firmware flash support\n"); - return 1; + return -EINVAL; } rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); --- linux-3.13.0.orig/arch/powerpc/kernel/rtasd.c +++ linux-3.13.0/arch/powerpc/kernel/rtasd.c @@ -150,8 +150,8 @@ struct rtas_error_log *errlog = (struct rtas_error_log *)buf; printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n", - error_log_cnt, rtas_event_type(errlog->type), - errlog->severity); + error_log_cnt, rtas_event_type(rtas_error_type(errlog)), + rtas_error_severity(errlog)); } } @@ -159,14 +159,16 @@ { int len; struct rtas_error_log *err; + uint32_t extended_log_length; /* rtas fixed header */ len = 8; err = (struct rtas_error_log *)buf; - if (err->extended && err->extended_log_length) { + extended_log_length = rtas_error_extended_log_length(err); + if (rtas_error_extended(err) && extended_log_length) { /* extended header */ - len += err->extended_log_length; + len += extended_log_length; } if (rtas_error_log_max == 0) @@ -293,15 +295,13 @@ static void handle_rtas_event(const struct rtas_error_log *log) { - if (log->type == RTAS_TYPE_PRRN) { - /* For PRRN Events the extended log length is used to denote - * the scope for calling rtas update-nodes. - */ - if (prrn_is_enabled()) - prrn_schedule_update(log->extended_log_length); - } + if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled()) + return; - return; + /* For PRRN Events the extended log length is used to denote + * the scope for calling rtas update-nodes. + */ + prrn_schedule_update(rtas_error_extended_log_length(log)); } #else --- linux-3.13.0.orig/arch/powerpc/kernel/setup-common.c +++ linux-3.13.0/arch/powerpc/kernel/setup-common.c @@ -456,9 +456,17 @@ } for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { + bool avail; + DBG(" thread %d -> cpu %d (hard id %d)\n", j, cpu, be32_to_cpu(intserv[j])); - set_cpu_present(cpu, true); + + avail = of_device_is_available(dn); + if (!avail) + avail = !of_property_match_string(dn, + "enable-method", "spin-table"); + + set_cpu_present(cpu, avail); set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j])); set_cpu_possible(cpu, true); cpu++; --- linux-3.13.0.orig/arch/powerpc/kernel/setup_64.c +++ linux-3.13.0/arch/powerpc/kernel/setup_64.c @@ -544,7 +544,8 @@ /* * Stack space used when we detect a bad kernel stack pointer, and - * early in SMP boots before relocation is enabled. + * early in SMP boots before relocation is enabled. Exclusive emergency + * stack for machine checks. */ static void __init emergency_stack_init(void) { @@ -567,6 +568,13 @@ sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); sp += THREAD_SIZE; paca[i].emergency_sp = __va(sp); + +#ifdef CONFIG_PPC_BOOK3S_64 + /* emergency stack for machine check exception handling. */ + sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); + sp += THREAD_SIZE; + paca[i].mc_emergency_sp = __va(sp); +#endif } } --- linux-3.13.0.orig/arch/powerpc/kernel/signal.c +++ linux-3.13.0/arch/powerpc/kernel/signal.c @@ -203,8 +203,7 @@ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) { - tm_enable(); - tm_reclaim(¤t->thread, regs->msr, TM_CAUSE_SIGNAL); + tm_reclaim_current(TM_CAUSE_SIGNAL); if (MSR_TM_TRANSACTIONAL(regs->msr)) return current->thread.ckpt_regs.gpr[1]; } --- linux-3.13.0.orig/arch/powerpc/kernel/signal_32.c +++ linux-3.13.0/arch/powerpc/kernel/signal_32.c @@ -519,6 +519,13 @@ { unsigned long msr = regs->msr; + /* Remove TM bits from thread's MSR. The MSR in the sigcontext + * just indicates to userland that we were doing a transaction, but we + * don't want to return in transactional state. This also ensures + * that flush_fp_to_thread won't set TIF_RESTORE_TM again. + */ + regs->msr &= ~MSR_TS_MASK; + /* Make sure floating point registers are stored in regs */ flush_fp_to_thread(current); @@ -1056,13 +1063,6 @@ /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; regs->msr |= (MSR_KERNEL & MSR_LE); -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state: - */ - regs->msr &= ~MSR_TS_MASK; -#endif return 1; badframe: @@ -1484,13 +1484,6 @@ regs->nip = (unsigned long) ka->sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state: - */ - regs->msr &= ~MSR_TS_MASK; -#endif return 1; badframe: --- linux-3.13.0.orig/arch/powerpc/kernel/signal_64.c +++ linux-3.13.0/arch/powerpc/kernel/signal_64.c @@ -65,8 +65,8 @@ struct siginfo __user *pinfo; void __user *puc; struct siginfo info; - /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ - char abigap[288]; + /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ + char abigap[USER_REDZONE_SIZE]; } __attribute__ ((aligned (16))); static const char fmt32[] = KERN_INFO \ @@ -192,6 +192,13 @@ BUG_ON(!MSR_TM_ACTIVE(regs->msr)); + /* Remove TM bits from thread's MSR. The MSR in the sigcontext + * just indicates to userland that we were doing a transaction, but we + * don't want to return in transactional state. This also ensures + * that flush_fp_to_thread won't set TIF_RESTORE_TM again. + */ + regs->msr &= ~MSR_TS_MASK; + flush_fp_to_thread(current); #ifdef CONFIG_ALTIVEC @@ -749,13 +756,6 @@ /* Make sure signal handler doesn't get spurious FP exceptions */ current->thread.fp_state.fpscr = 0; -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM - /* Remove TM bits from thread's MSR. The MSR in the sigcontext - * just indicates to userland that we were doing a transaction, but we - * don't want to return in transactional state: - */ - regs->msr &= ~MSR_TS_MASK; -#endif /* Set up to return from userspace. */ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { --- linux-3.13.0.orig/arch/powerpc/kernel/smp.c +++ linux-3.13.0/arch/powerpc/kernel/smp.c @@ -384,6 +384,7 @@ #ifdef CONFIG_PPC64 paca[boot_cpuid].__current = current; #endif + set_numa_node(numa_cpu_lookup_table[boot_cpuid]); current_set[boot_cpuid] = task_thread_info(current); } @@ -744,6 +745,12 @@ } traverse_core_siblings(cpu, true); + /* + * numa_node_id() works after this. + */ + set_numa_node(numa_cpu_lookup_table[cpu]); + set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); + smp_wmb(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); --- linux-3.13.0.orig/arch/powerpc/kernel/traps.c +++ linux-3.13.0/arch/powerpc/kernel/traps.c @@ -285,6 +285,21 @@ /* What should we do here? We could issue a shutdown or hard reset. */ } + +/* + * This function is called in real mode. Strictly no printk's please. + * + * regs->nip and regs->msr contains srr0 and ssr1. + */ +long machine_check_early(struct pt_regs *regs) +{ + long handled = 0; + + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) + handled = cur_cpu_spec->machine_check_early(regs); + return handled; +} + #endif /* @@ -1384,7 +1399,6 @@ TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n", regs->nip, regs->msr); - tm_enable(); /* We can only have got here if the task started using FP after * beginning the transaction. So, the transactional regs are just a @@ -1393,8 +1407,7 @@ * transaction, and probably retry but now with FP enabled. So the * checkpointed FP registers need to be loaded. */ - tm_reclaim(¤t->thread, current->thread.regs->msr, - TM_CAUSE_FAC_UNAV); + tm_reclaim_current(TM_CAUSE_FAC_UNAV); /* Reclaim didn't save out any FPRs to transact_fprs. */ /* Enable FP for the task: */ @@ -1403,11 +1416,19 @@ /* This loads and recheckpoints the FP registers from * thread.fpr[]. They will remain in registers after the * checkpoint so we don't need to reload them after. + * If VMX is in use, the VRs now hold checkpointed values, + * so we don't want to load the VRs from the thread_struct. */ - tm_recheckpoint(¤t->thread, regs->msr); + tm_recheckpoint(¤t->thread, MSR_FP); + + /* If VMX is in use, get the transactional values back */ + if (regs->msr & MSR_VEC) { + do_load_up_transact_altivec(¤t->thread); + /* At this point all the VSX state is loaded, so enable it */ + regs->msr |= MSR_VSX; + } } -#ifdef CONFIG_ALTIVEC void altivec_unavailable_tm(struct pt_regs *regs) { /* See the comments in fp_unavailable_tm(). This function operates @@ -1417,18 +1438,21 @@ TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx," "MSR=%lx\n", regs->nip, regs->msr); - tm_enable(); - tm_reclaim(¤t->thread, current->thread.regs->msr, - TM_CAUSE_FAC_UNAV); + tm_reclaim_current(TM_CAUSE_FAC_UNAV); regs->msr |= MSR_VEC; - tm_recheckpoint(¤t->thread, regs->msr); + tm_recheckpoint(¤t->thread, MSR_VEC); current->thread.used_vr = 1; + + if (regs->msr & MSR_FP) { + do_load_up_transact_fpu(¤t->thread); + regs->msr |= MSR_VSX; + } } -#endif -#ifdef CONFIG_VSX void vsx_unavailable_tm(struct pt_regs *regs) { + unsigned long orig_msr = regs->msr; + /* See the comments in fp_unavailable_tm(). This works similarly, * though we're loading both FP and VEC registers in here. * @@ -1440,18 +1464,30 @@ "MSR=%lx\n", regs->nip, regs->msr); - tm_enable(); + current->thread.used_vsr = 1; + + /* If FP and VMX are already loaded, we have all the state we need */ + if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) { + regs->msr |= MSR_VSX; + return; + } + /* This reclaims FP and/or VR regs if they're already enabled */ - tm_reclaim(¤t->thread, current->thread.regs->msr, - TM_CAUSE_FAC_UNAV); + tm_reclaim_current(TM_CAUSE_FAC_UNAV); regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode | MSR_VSX; - /* This loads & recheckpoints FP and VRs. */ - tm_recheckpoint(¤t->thread, regs->msr); - current->thread.used_vsr = 1; + + /* This loads & recheckpoints FP and VRs; but we have + * to be sure not to overwrite previously-valid state. + */ + tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr); + + if (orig_msr & MSR_FP) + do_load_up_transact_fpu(¤t->thread); + if (orig_msr & MSR_VEC) + do_load_up_transact_altivec(¤t->thread); } -#endif #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ void performance_monitor_exception(struct pt_regs *regs) --- linux-3.13.0.orig/arch/powerpc/kernel/vdso32/getcpu.S +++ linux-3.13.0/arch/powerpc/kernel/vdso32/getcpu.S @@ -30,8 +30,8 @@ V_FUNCTION_BEGIN(__kernel_getcpu) .cfi_startproc mfspr r5,SPRN_USPRG3 - cmpdi cr0,r3,0 - cmpdi cr1,r4,0 + cmpwi cr0,r3,0 + cmpwi cr1,r4,0 clrlwi r6,r5,16 rlwinm r7,r5,16,31-15,31-0 beq cr0,1f --- linux-3.13.0.orig/arch/powerpc/kernel/vector.S +++ linux-3.13.0/arch/powerpc/kernel/vector.S @@ -37,6 +37,16 @@ #endif /* + * Enable use of VMX/Altivec for the caller. + */ +_GLOBAL(vec_enable) + mfmsr r3 + oris r3,r3,MSR_VEC@h + MTMSRD(r3) + isync + blr + +/* * Load state from memory into VMX registers including VSCR. * Assumes the caller has enabled VMX in the MSR. */ --- linux-3.13.0.orig/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ linux-3.13.0/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -262,7 +262,7 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) { - kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); + kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); } /* @@ -544,7 +544,7 @@ * If we fail, we just return to the guest and try executing it again. */ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { - ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); + ret = kvmppc_ld32(vcpu, &srr0, &last_inst, false); if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) return RESUME_GUEST; vcpu->arch.last_inst = last_inst; --- linux-3.13.0.orig/arch/powerpc/kvm/book3s_hv.c +++ linux-3.13.0/arch/powerpc/kvm/book3s_hv.c @@ -85,10 +85,13 @@ /* CPU points to the first thread of the core */ if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { +#ifdef CONFIG_PPC_ICP_NATIVE int real_cpu = cpu + vcpu->arch.ptid; if (paca[real_cpu].kvm_hstate.xics_phys) xics_wake_cpu(real_cpu); - else if (cpu_online(cpu)) + else +#endif + if (cpu_online(cpu)) smp_send_reschedule(cpu); } put_cpu(); @@ -537,6 +540,48 @@ vcpu->arch.dtl.dirty = true; } +static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, + unsigned long resource, unsigned long value1, + unsigned long value2) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *v; + int n; + + switch (resource) { + case H_SET_MODE_RESOURCE_LE: + if (value1) + return H_P3; + if (value2) + return H_P4; + + switch (mflags) { + case 0: + mutex_lock(&kvm->lock); + kvm->arch.lpcr &= ~LPCR_ILE; + kvm_for_each_vcpu(n, v, kvm) + v->arch.intr_msr &= ~MSR_LE; + mutex_unlock(&kvm->lock); + kick_all_cpus_sync(); + return H_SUCCESS; + + case 1: + mutex_lock(&kvm->lock); + kvm->arch.lpcr |= LPCR_ILE; + kvm_for_each_vcpu(n, v, kvm) + v->arch.intr_msr |= MSR_LE; + mutex_unlock(&kvm->lock); + kick_all_cpus_sync(); + return H_SUCCESS; + + default: + return H_UNSUPPORTED_FLAG_START; + } + default: + return H_P2; + } +} + int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) { unsigned long req = kvmppc_get_gpr(vcpu, 3); @@ -602,6 +647,12 @@ /* Send the error out to userspace via KVM_RUN */ return rc; + case H_SET_MODE: + ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6), + kvmppc_get_gpr(vcpu, 7)); + break; case H_XIRR: case H_CPPR: @@ -1017,6 +1068,7 @@ spin_lock_init(&vcpu->arch.vpa_update_lock); spin_lock_init(&vcpu->arch.tbacct_lock); vcpu->arch.busy_preempt = TB_NIL; + vcpu->arch.intr_msr = MSR_SF | MSR_ME; kvmppc_mmu_book3s_hv_init(vcpu); --- linux-3.13.0.orig/arch/powerpc/kvm/book3s_hv_builtin.c +++ linux-3.13.0/arch/powerpc/kvm/book3s_hv_builtin.c @@ -156,6 +156,12 @@ unsigned long align_size; struct memblock_region *reg; phys_addr_t selected_size = 0; + + /* + * We need CMA reservation only when we are in HV mode + */ + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return; /* * We cannot use memblock_phys_mem_size() here, because * memblock_analyze() has not been called yet. --- linux-3.13.0.orig/arch/powerpc/kvm/book3s_hv_ras.c +++ linux-3.13.0/arch/powerpc/kvm/book3s_hv_ras.c @@ -12,6 +12,7 @@ #include #include #include +#include /* SRR1 bits for machine check on POWER7 */ #define SRR1_MC_LDSTERR (1ul << (63-42)) @@ -58,18 +59,6 @@ } } -/* POWER7 TLB flush */ -static void flush_tlb_power7(struct kvm_vcpu *vcpu) -{ - unsigned long i, rb; - - rb = TLBIEL_INVAL_SET_LPID; - for (i = 0; i < POWER7_TLB_SETS; ++i) { - asm volatile("tlbiel %0" : : "r" (rb)); - rb += 1 << TLBIEL_INVAL_SET_SHIFT; - } -} - /* * On POWER7, see if we can handle a machine check that occurred inside * the guest in real mode, without switching to the host partition. @@ -79,9 +68,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) { unsigned long srr1 = vcpu->arch.shregs.msr; -#ifdef CONFIG_PPC_POWERNV - struct opal_machine_check_event *opal_evt; -#endif + struct machine_check_event mce_evt; long handled = 1; if (srr1 & SRR1_MC_LDSTERR) { @@ -96,7 +83,8 @@ DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI); } if (dsisr & DSISR_MC_TLB_MULTI) { - flush_tlb_power7(vcpu); + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID); dsisr &= ~DSISR_MC_TLB_MULTI; } /* Any other errors we don't understand? */ @@ -113,28 +101,38 @@ reload_slb(vcpu); break; case SRR1_MC_IFETCH_TLBMULTI: - flush_tlb_power7(vcpu); + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID); break; default: handled = 0; } -#ifdef CONFIG_PPC_POWERNV /* - * See if OPAL has already handled the condition. - * We assume that if the condition is recovered then OPAL + * See if we have already handled the condition in the linux host. + * We assume that if the condition is recovered then linux host * will have generated an error log event that we will pick * up and log later. + * Don't release mce event now. In case if condition is not + * recovered we do guest exit and go back to linux host machine + * check handler. Hence we need make sure that current mce event + * is available for linux host to consume. */ - opal_evt = local_paca->opal_mc_evt; - if (opal_evt->version == OpalMCE_V1 && - (opal_evt->severity == OpalMCE_SEV_NO_ERROR || - opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED)) + if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) + goto out; + + if (mce_evt.version == MCE_V1 && + (mce_evt.severity == MCE_SEV_NO_ERROR || + mce_evt.disposition == MCE_DISPOSITION_RECOVERED)) handled = 1; +out: + /* + * If we have handled the error, then release the mce event because + * we will be delivering machine check to guest. + */ if (handled) - opal_evt->in_use = 0; -#endif + release_mce_event(); return handled; } --- linux-3.13.0.orig/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ linux-3.13.0/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -676,8 +676,7 @@ 12: mr r6,r10 mr r10,r0 mr r7,r11 - li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11,r11,63 + ld r11,VCPU_INTR_MSR(r4) b 5f 11: beq 5f mfspr r0,SPRN_DEC @@ -896,8 +895,7 @@ mtspr SPRN_SRR0, r10 mtspr SPRN_SRR1, r11 li r10, BOOK3S_INTERRUPT_EXTERNAL - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 + ld r11,VCPU_INTR_MSR(r9) 2: mr r4, r9 mtspr SPRN_LPCR, r8 b fast_guest_return @@ -1386,8 +1384,7 @@ mtspr SPRN_SRR0, r10 mtspr SPRN_SRR1, r11 li r10, BOOK3S_INTERRUPT_DATA_STORAGE - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 + ld r11,VCPU_INTR_MSR(r9) fast_interrupt_c_return: 6: ld r7, VCPU_CTR(r9) lwz r8, VCPU_XER(r9) @@ -1415,10 +1412,19 @@ lwz r8, 0(r10) mtmsrd r3 + andi. r0, r11, MSR_LE + /* Store the result */ stw r8, VCPU_LAST_INST(r9) + beq after_inst_store + + /* Swap and store the result */ + addi r4, r9, VCPU_LAST_INST + stwbrx r8, 0, r4 + /* Unset guest mode. */ +after_inst_store: li r0, KVM_GUEST_MODE_HOST_HV stb r0, HSTATE_IN_GUEST(r13) b guest_exit_cont @@ -1456,8 +1462,7 @@ 1: mtspr SPRN_SRR0, r10 mtspr SPRN_SRR1, r11 li r10, BOOK3S_INTERRUPT_INST_STORAGE - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 + ld r11,VCPU_INTR_MSR(r9) b fast_interrupt_c_return 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ @@ -1783,8 +1788,7 @@ beq mc_cont /* If not, deliver a machine check. SRR0/1 are already set */ li r10, BOOK3S_INTERRUPT_MACHINE_CHECK - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 + ld r11,VCPU_INTR_MSR(r9) b fast_interrupt_c_return /* --- linux-3.13.0.orig/arch/powerpc/kvm/book3s_pr.c +++ linux-3.13.0/arch/powerpc/kvm/book3s_pr.c @@ -627,7 +627,7 @@ u32 last_inst = kvmppc_get_last_inst(vcpu); int ret; - ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); + ret = kvmppc_ld32(vcpu, &srr0, &last_inst, false); if (ret == -ENOENT) { ulong msr = vcpu->arch.shared->msr; --- linux-3.13.0.orig/arch/powerpc/kvm/book3s_pr_papr.c +++ linux-3.13.0/arch/powerpc/kvm/book3s_pr_papr.c @@ -258,6 +258,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) { + int rc, idx; + switch (cmd) { case H_ENTER: return kvmppc_h_pr_enter(vcpu); @@ -286,8 +288,11 @@ break; case H_RTAS: if (list_empty(&vcpu->kvm->arch.rtas_tokens)) - return RESUME_HOST; - if (kvmppc_rtas_hcall(vcpu)) + break; + idx = srcu_read_lock(&vcpu->kvm->srcu); + rc = kvmppc_rtas_hcall(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + if (rc) break; kvmppc_set_gpr(vcpu, 3, 0); return EMULATE_DONE; --- linux-3.13.0.orig/arch/powerpc/kvm/book3s_segment.S +++ linux-3.13.0/arch/powerpc/kvm/book3s_segment.S @@ -289,6 +289,15 @@ #endif stw r0, SVCPU_LAST_INST(r13) +#ifdef CONFIG_PPC64 + andi. r9, r4, MSR_LE + beq no_ld_last_inst + + /* swap and store the result */ + addi r9, r13, SVCPU_LAST_INST + stwbrx r0, 0, r9 +#endif + no_ld_last_inst: /* Unset guest mode */ --- linux-3.13.0.orig/arch/powerpc/kvm/e500_mmu.c +++ linux-3.13.0/arch/powerpc/kvm/e500_mmu.c @@ -127,7 +127,7 @@ } static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, - unsigned int eaddr, int as) + gva_t eaddr, int as) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); unsigned int victim, tsized; --- linux-3.13.0.orig/arch/powerpc/kvm/emulate.c +++ linux-3.13.0/arch/powerpc/kvm/emulate.c @@ -219,7 +219,6 @@ * lmw * stmw * - * XXX is_bigendian should depend on MMU mapping or MSR[LE] */ /* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ --- linux-3.13.0.orig/arch/powerpc/kvm/powerpc.c +++ linux-3.13.0/arch/powerpc/kvm/powerpc.c @@ -673,9 +673,13 @@ } int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rt, unsigned int bytes, int is_bigendian) + unsigned int rt, unsigned int bytes, int not_reverse) { int idx, ret; + int is_bigendian = not_reverse; + + if (!kvmppc_is_bigendian(vcpu)) + is_bigendian = !not_reverse; if (bytes > sizeof(run->mmio.data)) { printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, @@ -711,21 +715,25 @@ /* Same as above, but sign extends */ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rt, unsigned int bytes, int is_bigendian) + unsigned int rt, unsigned int bytes, int not_reverse) { int r; vcpu->arch.mmio_sign_extend = 1; - r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); + r = kvmppc_handle_load(run, vcpu, rt, bytes, not_reverse); return r; } int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, - u64 val, unsigned int bytes, int is_bigendian) + u64 val, unsigned int bytes, int not_reverse) { void *data = run->mmio.data; int idx, ret; + int is_bigendian = not_reverse; + + if (!kvmppc_is_bigendian(vcpu)) + is_bigendian = !not_reverse; if (bytes > sizeof(run->mmio.data)) { printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, --- linux-3.13.0.orig/arch/powerpc/mm/fault.c +++ linux-3.13.0/arch/powerpc/mm/fault.c @@ -432,6 +432,8 @@ */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { + if (fault & VM_FAULT_SIGSEGV) + goto bad_area; rc = mm_fault_error(regs, address, fault); if (rc >= MM_FAULT_RETURN) goto bail; --- linux-3.13.0.orig/arch/powerpc/mm/hash_native_64.c +++ linux-3.13.0/arch/powerpc/mm/hash_native_64.c @@ -418,18 +418,18 @@ local_irq_restore(flags); } -static void native_hugepage_invalidate(struct mm_struct *mm, +static void native_hugepage_invalidate(unsigned long vsid, + unsigned long addr, unsigned char *hpte_slot_array, - unsigned long addr, int psize) + int psize, int ssize) { - int ssize = 0, i; - int lock_tlbie; + int i; struct hash_pte *hptep; int actual_psize = MMU_PAGE_16M; unsigned int max_hpte_count, valid; unsigned long flags, s_addr = addr; unsigned long hpte_v, want_v, shift; - unsigned long hidx, vpn = 0, vsid, hash, slot; + unsigned long hidx, vpn = 0, hash, slot; shift = mmu_psize_defs[psize].shift; max_hpte_count = 1U << (PMD_SHIFT - shift); @@ -443,15 +443,6 @@ /* get the vpn */ addr = s_addr + (i * (1ul << shift)); - if (!is_kernel_addr(addr)) { - ssize = user_segment_size(addr); - vsid = get_vsid(mm->context.id, addr, ssize); - WARN_ON(vsid == 0); - } else { - vsid = get_kernel_vsid(addr, mmu_kernel_ssize); - ssize = mmu_kernel_ssize; - } - vpn = hpt_vpn(addr, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); if (hidx & _PTEIDX_SECONDARY) @@ -471,22 +462,13 @@ else /* Invalidate the hpte. NOTE: this also unlocks it */ hptep->v = 0; + /* + * We need to do tlb invalidate for all the address, tlbie + * instruction compares entry_VA in tlb with the VA specified + * here + */ + tlbie(vpn, psize, actual_psize, ssize, 0); } - /* - * Since this is a hugepage, we just need a single tlbie. - * use the last vpn. - */ - lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); - if (lock_tlbie) - raw_spin_lock(&native_tlbie_lock); - - asm volatile("ptesync":::"memory"); - __tlbie(vpn, psize, actual_psize, ssize); - asm volatile("eieio; tlbsync; ptesync":::"memory"); - - if (lock_tlbie) - raw_spin_unlock(&native_tlbie_lock); - local_irq_restore(flags); } --- linux-3.13.0.orig/arch/powerpc/mm/hash_utils_64.c +++ linux-3.13.0/arch/powerpc/mm/hash_utils_64.c @@ -206,6 +206,20 @@ if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; + /* + * If relocatable, check if it overlaps interrupt vectors that + * are copied down to real 0. For relocatable kernel + * (e.g. kdump case) we copy interrupt vectors down to real + * address 0. Mark that region as executable. This is + * because on p8 system with relocation on exception feature + * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence + * in order to execute the interrupt handlers in virtual + * mode the vector region need to be marked as executable. + */ + if ((PHYSICAL_START > MEMORY_START) && + overlaps_interrupt_vector_text(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; + hash = hpt_hash(vpn, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); @@ -945,6 +959,22 @@ trap, vsid, ssize, psize, lpsize, pte); } +static void check_paca_psize(unsigned long ea, struct mm_struct *mm, + int psize, bool user_region) +{ + if (user_region) { + if (psize != get_paca_psize(ea)) { + get_paca()->context = mm->context; + slb_flush_and_rebolt(); + } + } else if (get_paca()->vmalloc_sllp != + mmu_psize_defs[mmu_vmalloc_psize].sllp) { + get_paca()->vmalloc_sllp = + mmu_psize_defs[mmu_vmalloc_psize].sllp; + slb_vmalloc_update(); + } +} + /* Result code is: * 0 - handled * 1 - normal page fault @@ -1066,6 +1096,8 @@ WARN_ON(1); } #endif + check_paca_psize(ea, mm, psize, user_region); + goto bail; } @@ -1106,17 +1138,8 @@ #endif } } - if (user_region) { - if (psize != get_paca_psize(ea)) { - get_paca()->context = mm->context; - slb_flush_and_rebolt(); - } - } else if (get_paca()->vmalloc_sllp != - mmu_psize_defs[mmu_vmalloc_psize].sllp) { - get_paca()->vmalloc_sllp = - mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_vmalloc_update(); - } + + check_paca_psize(ea, mm, psize, user_region); #endif /* CONFIG_PPC_64K_PAGES */ #ifdef CONFIG_PPC_HAS_HASH_64K --- linux-3.13.0.orig/arch/powerpc/mm/hugepage-hash64.c +++ linux-3.13.0/arch/powerpc/mm/hugepage-hash64.c @@ -18,6 +18,57 @@ #include #include +static void invalidate_old_hpte(unsigned long vsid, unsigned long addr, + pmd_t *pmdp, unsigned int psize, int ssize) +{ + int i, max_hpte_count, valid; + unsigned long s_addr; + unsigned char *hpte_slot_array; + unsigned long hidx, shift, vpn, hash, slot; + + s_addr = addr & HPAGE_PMD_MASK; + hpte_slot_array = get_hpte_slot_array(pmdp); + /* + * IF we try to do a HUGE PTE update after a withdraw is done. + * we will find the below NULL. This happens when we do + * split_huge_page_pmd + */ + if (!hpte_slot_array) + return; + + if (ppc_md.hugepage_invalidate) + return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array, + psize, ssize); + /* + * No bluk hpte removal support, invalidate each entry + */ + shift = mmu_psize_defs[psize].shift; + max_hpte_count = HPAGE_PMD_SIZE >> shift; + for (i = 0; i < max_hpte_count; i++) { + /* + * 8 bits per each hpte entries + * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit] + */ + valid = hpte_valid(hpte_slot_array, i); + if (!valid) + continue; + hidx = hpte_hash_index(hpte_slot_array, i); + + /* get the vpn */ + addr = s_addr + (i * (1ul << shift)); + vpn = hpt_vpn(addr, vsid, ssize); + hash = hpt_hash(vpn, shift, ssize); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; + + slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; + slot += hidx & _PTEIDX_GROUP_IX; + ppc_md.hpte_invalidate(slot, vpn, psize, + MMU_PAGE_16M, ssize, 0); + } +} + + int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, pmd_t *pmdp, unsigned long trap, int local, int ssize, unsigned int psize) @@ -33,7 +84,9 @@ * atomically mark the linux large page PMD busy and dirty */ do { - old_pmd = pmd_val(*pmdp); + pmd_t pmd = ACCESS_ONCE(*pmdp); + + old_pmd = pmd_val(pmd); /* If PMD busy, retry the access */ if (unlikely(old_pmd & _PAGE_BUSY)) return 0; @@ -85,6 +138,15 @@ vpn = hpt_vpn(ea, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); hpte_slot_array = get_hpte_slot_array(pmdp); + if (psize == MMU_PAGE_4K) { + /* + * invalidate the old hpte entry if we have that mapped via 64K + * base page size. This is because demote_segment won't flush + * hash page table entries. + */ + if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) + invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize); + } valid = hpte_valid(hpte_slot_array, index); if (valid) { @@ -107,11 +169,8 @@ * safely update this here. */ valid = 0; - new_pmd &= ~_PAGE_HPTEFLAGS; hpte_slot_array[index] = 0; - } else - /* clear the busy bits and set the hash pte bits */ - new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; + } } if (!valid) { @@ -119,15 +178,17 @@ /* insert new entry */ pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; -repeat: - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; - - /* clear the busy bits and set the hash pte bits */ - new_pmd = (new_pmd & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; + new_pmd |= _PAGE_HASHPTE; /* Add in WIMG bits */ rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | - _PAGE_COHERENT | _PAGE_GUARDED)); + _PAGE_GUARDED)); + /* + * enable the memory coherence always + */ + rflags |= HPTE_R_M; +repeat: + hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; /* Insert into the hash table, primary slot */ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, @@ -168,8 +229,17 @@ mark_hpte_slot_valid(hpte_slot_array, index, slot); } /* - * No need to use ldarx/stdcx here + * Mark the pte with _PAGE_COMBO, if we are trying to hash it with + * base page size 4k. + */ + if (psize == MMU_PAGE_4K) + new_pmd |= _PAGE_COMBO; + /* + * The hpte valid is stored in the pgtable whose address is in the + * second half of the PMD. Order this against clearing of the busy bit in + * huge pmd. */ + smp_wmb(); *pmdp = __pmd(new_pmd & ~_PAGE_BUSY); return 0; } --- linux-3.13.0.orig/arch/powerpc/mm/hugetlbpage.c +++ linux-3.13.0/arch/powerpc/mm/hugetlbpage.c @@ -86,11 +86,6 @@ */ return ((pgd_val(pgd) & 0x3) != 0x0); } - -int pmd_huge_support(void) -{ - return 1; -} #else int pmd_huge(pmd_t pmd) { @@ -106,11 +101,6 @@ { return 0; } - -int pmd_huge_support(void) -{ - return 0; -} #endif pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) --- linux-3.13.0.orig/arch/powerpc/mm/numa.c +++ linux-3.13.0/arch/powerpc/mm/numa.c @@ -31,6 +31,8 @@ #include #include #include +#include +#include #include #include #include @@ -152,9 +154,22 @@ } } -static void map_cpu_to_node(int cpu, int node) +static void reset_numa_cpu_lookup_table(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + numa_cpu_lookup_table[cpu] = -1; +} + +static void update_numa_cpu_lookup_table(unsigned int cpu, int node) { numa_cpu_lookup_table[cpu] = node; +} + +static void map_cpu_to_node(int cpu, int node) +{ + update_numa_cpu_lookup_table(cpu, node); dbg("adding cpu %d to node %d\n", cpu, node); @@ -522,11 +537,24 @@ */ static int numa_setup_cpu(unsigned long lcpu) { - int nid = 0; - struct device_node *cpu = of_get_cpu_node(lcpu, NULL); + int nid; + struct device_node *cpu; + + /* + * If a valid cpu-to-node mapping is already available, use it + * directly instead of querying the firmware, since it represents + * the most recent mapping notified to us by the platform (eg: VPHN). + */ + if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) { + map_cpu_to_node(lcpu, nid); + return nid; + } + + cpu = of_get_cpu_node(lcpu, NULL); if (!cpu) { WARN_ON(1); + nid = 0; goto out; } @@ -560,8 +588,8 @@ case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: unmap_cpu_from_node(lcpu); - break; ret = NOTIFY_OK; + break; #endif } return ret; @@ -1067,6 +1095,7 @@ */ setup_node_to_cpumask_map(); + reset_numa_cpu_lookup_table(); register_cpu_notifier(&ppc64_numa_nb); cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, (void *)(unsigned long)boot_cpuid); @@ -1388,8 +1417,11 @@ long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; u64 flags = 1; int hwcpu = get_hard_smp_processor_id(cpu); + int i; rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); + for (i = 0; i < 6; i++) + retbuf[i] = cpu_to_be64(retbuf[i]); vphn_unpack_associativity(retbuf, associativity); return rc; @@ -1445,6 +1477,33 @@ return 0; } +static int update_lookup_table(void *data) +{ + struct topology_update_data *update; + + if (!data) + return -EINVAL; + + /* + * Upon topology update, the numa-cpu lookup table needs to be updated + * for all threads in the core, including offline CPUs, to ensure that + * future hotplug operations respect the cpu-to-node associativity + * properly. + */ + for (update = data; update; update = update->next) { + int nid, base, j; + + nid = update->new_nid; + base = cpu_first_thread_sibling(update->cpu); + + for (j = 0; j < threads_per_core; j++) { + update_numa_cpu_lookup_table(base + j, nid); + } + } + + return 0; +} + /* * Update the node maps and sysfs entries for each cpu whose home node * has changed. Returns 1 when the topology has changed, and 0 otherwise. @@ -1513,6 +1572,14 @@ stop_machine(update_cpu_topology, &updates[0], &updated_cpus); + /* + * Update the numa-cpu lookup table with the new mappings, even for + * offline CPUs. It is best to perform this update from the stop- + * machine context. + */ + stop_machine(update_lookup_table, &updates[0], + cpumask_of(raw_smp_processor_id())); + for (ud = &updates[0]; ud; ud = ud->next) { unregister_cpu_under_node(ud->cpu, ud->old_nid); register_cpu_under_node(ud->cpu, ud->new_nid); --- linux-3.13.0.orig/arch/powerpc/mm/pgtable_64.c +++ linux-3.13.0/arch/powerpc/mm/pgtable_64.c @@ -55,6 +55,9 @@ #include "mmu_decl.h" +#define CREATE_TRACE_POINTS +#include + /* Some sanity checking */ #if TASK_SIZE_USER64 > PGTABLE_RANGE #error TASK_SIZE_USER64 exceeds pagetable range @@ -499,7 +502,8 @@ } unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, unsigned long clr) + pmd_t *pmdp, unsigned long clr, + unsigned long set) { unsigned long old, tmp; @@ -515,17 +519,19 @@ andi. %1,%0,%6\n\ bne- 1b \n\ andc %1,%0,%4 \n\ + or %1,%1,%7\n\ stdcx. %1,0,%3 \n\ bne- 1b" : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) - : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) + : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) : "cc" ); #else old = pmd_val(*pmdp); - *pmdp = __pmd(old & ~clr); + *pmdp = __pmd((old & ~clr) | set); #endif + trace_hugepage_update(addr, old, clr, set); if (old & _PAGE_HASHPTE) - hpte_do_hugepage_flush(mm, addr, pmdp); + hpte_do_hugepage_flush(mm, addr, pmdp, old); return old; } @@ -629,10 +635,11 @@ * If we didn't had the splitting flag set, go and flush the * HPTE entries. */ + trace_hugepage_splitting(address, old); if (!(old & _PAGE_SPLITTING)) { /* We need to flush the hpte */ if (old & _PAGE_HASHPTE) - hpte_do_hugepage_flush(vma->vm_mm, address, pmdp); + hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old); } } @@ -691,13 +698,14 @@ assert_spin_locked(&mm->page_table_lock); WARN_ON(!pmd_trans_huge(pmd)); #endif + trace_hugepage_set_pmd(addr, pmd); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); } void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); } /* @@ -705,7 +713,7 @@ * neesd to be flushed. */ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp) + pmd_t *pmdp, unsigned long old_pmd) { int ssize, i; unsigned long s_addr; @@ -727,12 +735,29 @@ if (!hpte_slot_array) return; - /* get the base page size */ + /* get the base page size,vsid and segment size */ +#ifdef CONFIG_DEBUG_VM psize = get_slice_psize(mm, s_addr); + BUG_ON(psize == MMU_PAGE_16M); +#endif + if (old_pmd & _PAGE_COMBO) + psize = MMU_PAGE_4K; + else + psize = MMU_PAGE_64K; + + if (!is_kernel_addr(s_addr)) { + ssize = user_segment_size(s_addr); + vsid = get_vsid(mm->context.id, s_addr, ssize); + WARN_ON(vsid == 0); + } else { + vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); + ssize = mmu_kernel_ssize; + } if (ppc_md.hugepage_invalidate) - return ppc_md.hugepage_invalidate(mm, hpte_slot_array, - s_addr, psize); + return ppc_md.hugepage_invalidate(vsid, s_addr, + hpte_slot_array, + psize, ssize); /* * No bluk hpte removal support, invalidate each entry */ @@ -750,15 +775,6 @@ /* get the vpn */ addr = s_addr + (i * (1ul << shift)); - if (!is_kernel_addr(addr)) { - ssize = user_segment_size(addr); - vsid = get_vsid(mm->context.id, addr, ssize); - WARN_ON(vsid == 0); - } else { - vsid = get_kernel_vsid(addr, mmu_kernel_ssize); - ssize = mmu_kernel_ssize; - } - vpn = hpt_vpn(addr, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); if (hidx & _PTEIDX_SECONDARY) @@ -824,7 +840,7 @@ unsigned long old; pgtable_t *pgtable_slot; - old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); + old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); old_pmd = __pmd(old); /* * We have pmd == none and we are holding page_table_lock. --- linux-3.13.0.orig/arch/powerpc/mm/subpage-prot.c +++ linux-3.13.0/arch/powerpc/mm/subpage-prot.c @@ -78,7 +78,7 @@ pte = pte_offset_map_lock(mm, pmd, addr, &ptl); arch_enter_lazy_mmu_mode(); for (; npages > 0; --npages) { - pte_update(mm, addr, pte, 0, 0); + pte_update(mm, addr, pte, 0, 0, 0); addr += PAGE_SIZE; ++pte; } --- linux-3.13.0.orig/arch/powerpc/mm/tlb_hash64.c +++ linux-3.13.0/arch/powerpc/mm/tlb_hash64.c @@ -31,6 +31,8 @@ #include #include +#include + DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); /* @@ -214,10 +216,12 @@ if (ptep == NULL) continue; pte = pte_val(*ptep); + if (hugepage_shift) + trace_hugepage_invalidate(start, pte_val(pte)); if (!(pte & _PAGE_HASHPTE)) continue; if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) - hpte_do_hugepage_flush(mm, start, (pmd_t *)pte); + hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte); else hpte_need_flush(mm, start, ptep, pte, 0); } --- linux-3.13.0.orig/arch/powerpc/perf/callchain.c +++ linux-3.13.0/arch/powerpc/perf/callchain.c @@ -243,7 +243,7 @@ sp = regs->gpr[1]; perf_callchain_store(entry, next_ip); - for (;;) { + while (entry->nr < PERF_MAX_STACK_DEPTH) { fp = (unsigned long __user *) sp; if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) return; --- linux-3.13.0.orig/arch/powerpc/perf/core-book3s.c +++ linux-3.13.0/arch/powerpc/perf/core-book3s.c @@ -78,6 +78,7 @@ #define MMCR0_FC56 0 #define MMCR0_PMAO 0 #define MMCR0_EBE 0 +#define MMCR0_BHRBA 0 #define MMCR0_PMCC 0 #define MMCR0_PMCC_U6 0 @@ -120,6 +121,7 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {} void power_pmu_flush_branch_stack(void) {} static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} +static void pmao_restore_workaround(bool ebb) { } #endif /* CONFIG_PPC32 */ static bool regs_use_siar(struct pt_regs *regs) @@ -483,7 +485,7 @@ * check that the PMU supports EBB, meaning those that don't can still * use bit 63 of the event code for something else if they wish. */ - return (ppmu->flags & PPMU_EBB) && + return (ppmu->flags & PPMU_ARCH_207S) && ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); } @@ -502,8 +504,11 @@ if (!leader->attr.pinned || !leader->attr.exclusive) return -EINVAL; - if (event->attr.inherit || event->attr.sample_period || - event->attr.enable_on_exec || event->attr.freq) + if (event->attr.freq || + event->attr.inherit || + event->attr.sample_type || + event->attr.sample_period || + event->attr.enable_on_exec) return -EINVAL; } @@ -542,13 +547,21 @@ if (!ebb) goto out; - /* Enable EBB and read/write to all 6 PMCs for userspace */ - mmcr0 |= MMCR0_EBE | MMCR0_PMCC_U6; + /* Enable EBB and read/write to all 6 PMCs and BHRB for userspace */ + mmcr0 |= MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC_U6; - /* Add any bits from the user reg, FC or PMAO */ + /* + * Add any bits from the user MMCR0, FC or PMAO. This is compatible + * with pmao_restore_workaround() because we may add PMAO but we never + * clear it here. + */ mmcr0 |= current->thread.mmcr0; - /* Be careful not to set PMXE if userspace had it cleared */ + /* + * Be careful not to set PMXE if userspace had it cleared. This is also + * compatible with pmao_restore_workaround() because it has already + * cleared PMXE and we leave PMAO alone. + */ if (!(current->thread.mmcr0 & MMCR0_PMXE)) mmcr0 &= ~MMCR0_PMXE; @@ -559,13 +572,94 @@ out: return mmcr0; } -#endif /* CONFIG_PPC64 */ -static void perf_event_interrupt(struct pt_regs *regs); - -void perf_event_print_debug(void) +static void pmao_restore_workaround(bool ebb) { + unsigned pmcs[6]; + + if (!cpu_has_feature(CPU_FTR_PMAO_BUG)) + return; + + /* + * On POWER8E there is a hardware defect which affects the PMU context + * switch logic, ie. power_pmu_disable/enable(). + * + * When a counter overflows PMXE is cleared and FC/PMAO is set in MMCR0 + * by the hardware. Sometime later the actual PMU exception is + * delivered. + * + * If we context switch, or simply disable/enable, the PMU prior to the + * exception arriving, the exception will be lost when we clear PMAO. + * + * When we reenable the PMU, we will write the saved MMCR0 with PMAO + * set, and this _should_ generate an exception. However because of the + * defect no exception is generated when we write PMAO, and we get + * stuck with no counters counting but no exception delivered. + * + * The workaround is to detect this case and tweak the hardware to + * create another pending PMU exception. + * + * We do that by setting up PMC6 (cycles) for an imminent overflow and + * enabling the PMU. That causes a new exception to be generated in the + * chip, but we don't take it yet because we have interrupts hard + * disabled. We then write back the PMU state as we want it to be seen + * by the exception handler. When we reenable interrupts the exception + * handler will be called and see the correct state. + * + * The logic is the same for EBB, except that the exception is gated by + * us having interrupts hard disabled as well as the fact that we are + * not in userspace. The exception is finally delivered when we return + * to userspace. + */ + + /* Only if PMAO is set and PMAO_SYNC is clear */ + if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO) + return; + + /* If we're doing EBB, only if BESCR[GE] is set */ + if (ebb && !(current->thread.bescr & BESCR_GE)) + return; + + /* + * We are already soft-disabled in power_pmu_enable(). We need to hard + * enable to actually prevent the PMU exception from firing. + */ + hard_irq_disable(); + + /* + * This is a bit gross, but we know we're on POWER8E and have 6 PMCs. + * Using read/write_pmc() in a for loop adds 12 function calls and + * almost doubles our code size. + */ + pmcs[0] = mfspr(SPRN_PMC1); + pmcs[1] = mfspr(SPRN_PMC2); + pmcs[2] = mfspr(SPRN_PMC3); + pmcs[3] = mfspr(SPRN_PMC4); + pmcs[4] = mfspr(SPRN_PMC5); + pmcs[5] = mfspr(SPRN_PMC6); + + /* Ensure all freeze bits are unset */ + mtspr(SPRN_MMCR2, 0); + + /* Set up PMC6 to overflow in one cycle */ + mtspr(SPRN_PMC6, 0x7FFFFFFE); + + /* Enable exceptions and unfreeze PMC6 */ + mtspr(SPRN_MMCR0, MMCR0_PMXE | MMCR0_PMCjCE | MMCR0_PMAO); + + /* Now we need to refreeze and restore the PMCs */ + mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMAO); + + mtspr(SPRN_PMC1, pmcs[0]); + mtspr(SPRN_PMC2, pmcs[1]); + mtspr(SPRN_PMC3, pmcs[2]); + mtspr(SPRN_PMC4, pmcs[3]); + mtspr(SPRN_PMC5, pmcs[4]); + mtspr(SPRN_PMC6, pmcs[5]); } +#endif /* CONFIG_PPC64 */ + +static void perf_event_interrupt(struct pt_regs *regs); /* * Read one performance monitor counter (PMC). @@ -645,6 +739,57 @@ } } +/* Called from sysrq_handle_showregs() */ +void perf_event_print_debug(void) +{ + unsigned long sdar, sier, flags; + u32 pmcs[MAX_HWEVENTS]; + int i; + + if (!ppmu->n_counter) + return; + + local_irq_save(flags); + + pr_info("CPU: %d PMU registers, ppmu = %s n_counters = %d", + smp_processor_id(), ppmu->name, ppmu->n_counter); + + for (i = 0; i < ppmu->n_counter; i++) + pmcs[i] = read_pmc(i + 1); + + for (; i < MAX_HWEVENTS; i++) + pmcs[i] = 0xdeadbeef; + + pr_info("PMC1: %08x PMC2: %08x PMC3: %08x PMC4: %08x\n", + pmcs[0], pmcs[1], pmcs[2], pmcs[3]); + + if (ppmu->n_counter > 4) + pr_info("PMC5: %08x PMC6: %08x PMC7: %08x PMC8: %08x\n", + pmcs[4], pmcs[5], pmcs[6], pmcs[7]); + + pr_info("MMCR0: %016lx MMCR1: %016lx MMCRA: %016lx\n", + mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCRA)); + + sdar = sier = 0; +#ifdef CONFIG_PPC64 + sdar = mfspr(SPRN_SDAR); + + if (ppmu->flags & PPMU_HAS_SIER) + sier = mfspr(SPRN_SIER); + + if (ppmu->flags & PPMU_ARCH_207S) { + pr_info("MMCR2: %016lx EBBHR: %016lx\n", + mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR)); + pr_info("EBBRR: %016lx BESCR: %016lx\n", + mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR)); + } +#endif + pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n", + mfspr(SPRN_SIAR), sdar, sier); + + local_irq_restore(flags); +} + /* * Check if a set of events can all go on the PMU at once. * If they can't, this will look at alternative codes for the events @@ -851,7 +996,22 @@ } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); local64_add(delta, &event->count); - local64_sub(delta, &event->hw.period_left); + + /* + * A number of places program the PMC with (0x80000000 - period_left). + * We never want period_left to be less than 1 because we will program + * the PMC with a value >= 0x800000000 and an edge detected PMC will + * roll around to 0 before taking an exception. We have seen this + * on POWER8. + * + * To fix this, clamp the minimum value of period_left to 1. + */ + do { + prev = local64_read(&event->hw.period_left); + val = prev - delta; + if (val < 1) + val = 1; + } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); } /* @@ -973,11 +1133,12 @@ } /* - * Set the 'freeze counters' bit, clear EBE/PMCC/PMAO/FC56. + * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56 */ val = mmcr0 = mfspr(SPRN_MMCR0); val |= MMCR0_FC; - val &= ~(MMCR0_EBE | MMCR0_PMCC | MMCR0_PMAO | MMCR0_FC56); + val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO | + MMCR0_FC56); /* * The barrier is to make sure the mtspr has been @@ -1144,9 +1305,17 @@ cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; out_enable: + pmao_restore_workaround(ebb); + + if (ppmu->flags & PPMU_ARCH_207S) + mtspr(SPRN_MMCR2, 0); + mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); mb(); + if (cpuhw->bhrb_users) + ppmu->config_bhrb(cpuhw->bhrb_filter); + write_mmcr0(cpuhw, mmcr0); /* @@ -1158,8 +1327,6 @@ } out: - if (cpuhw->bhrb_users) - ppmu->config_bhrb(cpuhw->bhrb_filter); local_irq_restore(flags); } @@ -1547,7 +1714,7 @@ if (has_branch_stack(event)) { /* PMU has BHRB enabled */ - if (!(ppmu->flags & PPMU_BHRB)) + if (!(ppmu->flags & PPMU_ARCH_207S)) return -EOPNOTSUPP; } --- linux-3.13.0.orig/arch/powerpc/perf/power8-pmu.c +++ linux-3.13.0/arch/powerpc/perf/power8-pmu.c @@ -10,6 +10,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "power8-pmu: " fmt + #include #include #include @@ -25,15 +27,48 @@ #define PM_BRU_FIN 0x10068 #define PM_BR_MPRED_CMPL 0x400f6 +/* All L1 D cache load references counted at finish, gated by reject */ +#define PM_LD_REF_L1 0x100ee +/* Load Missed L1 */ +#define PM_LD_MISS_L1 0x3e054 +/* Store Missed L1 */ +#define PM_ST_MISS_L1 0x300f0 +/* L1 cache data prefetches */ +#define PM_L1_PREF 0x0d8b8 +/* Instruction fetches from L1 */ +#define PM_INST_FROM_L1 0x04080 +/* Demand iCache Miss */ +#define PM_L1_ICACHE_MISS 0x200fd +/* Instruction Demand sectors wriittent into IL1 */ +#define PM_L1_DEMAND_WRITE 0x0408c +/* Instruction prefetch written into IL1 */ +#define PM_IC_PREF_WRITE 0x0408e +/* The data cache was reloaded from local core's L3 due to a demand load */ +#define PM_DATA_FROM_L3 0x4c042 +/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */ +#define PM_DATA_FROM_L3MISS 0x300fe +/* All successful D-side store dispatches for this thread */ +#define PM_L2_ST 0x17080 +/* All successful D-side store dispatches for this thread that were L2 Miss */ +#define PM_L2_ST_MISS 0x17082 +/* Total HW L3 prefetches(Load+store) */ +#define PM_L3_PREF_ALL 0x4e052 +/* Data PTEG reload */ +#define PM_DTLB_MISS 0x300fc +/* ITLB Reloaded */ +#define PM_ITLB_MISS 0x400fc + /* * Raw event encoding for POWER8: * * 60 56 52 48 44 40 36 32 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - * | [ thresh_cmp ] [ thresh_ctl ] - * | | - * *- EBB (Linux) thresh start/stop OR FAB match -* + * | | [ ] [ thresh_cmp ] [ thresh_ctl ] + * | | | | + * | | *- IFM (Linux) thresh start/stop OR FAB match -* + * | *- BHRB (Linux) + * *- EBB (Linux) * * 28 24 20 16 12 8 4 0 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | @@ -83,9 +118,18 @@ * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)  * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE) * + * if EBB and BHRB: + * MMCRA[32:33] = IFM + * */ #define EVENT_EBB_MASK 1ull +#define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT +#define EVENT_BHRB_MASK 1ull +#define EVENT_BHRB_SHIFT 62 +#define EVENT_WANTS_BHRB (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) +#define EVENT_IFM_MASK 3ull +#define EVENT_IFM_SHIFT 60 #define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */ #define EVENT_THR_CMP_MASK 0x3ff #define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */ @@ -110,6 +154,12 @@ #define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) #define EVENT_PSEL_MASK 0xff /* PMCxSEL value */ +/* Bits defined by Linux */ +#define EVENT_LINUX_MASK \ + ((EVENT_EBB_MASK << EVENT_EBB_SHIFT) | \ + (EVENT_BHRB_MASK << EVENT_BHRB_SHIFT) | \ + (EVENT_IFM_MASK << EVENT_IFM_SHIFT)) + #define EVENT_VALID_MASK \ ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \ (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \ @@ -118,7 +168,7 @@ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \ (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \ - (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \ + EVENT_LINUX_MASK | \ EVENT_PSEL_MASK) /* MMCRA IFM bits - POWER8 */ @@ -142,10 +192,11 @@ * * 28 24 20 16 12 8 4 0 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - * | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] - * EBB -* | | - * | | Count of events for each PMC. - * L1 I/D qualifier -* | p1, p2, p3, p4, p5, p6. + * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1] + * | | | | + * BHRB IFM -* | | | Count of events for each PMC. + * EBB -* | | p1, p2, p3, p4, p5, p6. + * L1 I/D qualifier -* | * nc - number of counters -* * * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints @@ -164,6 +215,9 @@ #define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24) #define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK) +#define CNST_IFM_VAL(v) (((v) & EVENT_IFM_MASK) << 25) +#define CNST_IFM_MASK CNST_IFM_VAL(EVENT_IFM_MASK) + #define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22) #define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3) @@ -210,6 +264,7 @@ #define MMCRA_THR_SEL_SHIFT 16 #define MMCRA_THR_CMP_SHIFT 32 #define MMCRA_SDAR_MODE_TLB (1ull << 42) +#define MMCRA_IFM_SHIFT 30 static inline bool event_is_fab_match(u64 event) @@ -234,20 +289,22 @@ pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK; unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK; cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; - ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK; - - /* Clear the EBB bit in the event, so event checks work below */ - event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT); + ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK; if (pmc) { + u64 base_event; + if (pmc > 6) return -1; - mask |= CNST_PMC_MASK(pmc); - value |= CNST_PMC_VAL(pmc); + /* Ignore Linux defined bits when checking event below */ + base_event = event & ~EVENT_LINUX_MASK; - if (pmc >= 5 && event != 0x500fa && event != 0x600f4) + if (pmc >= 5 && base_event != 0x500fa && base_event != 0x600f4) return -1; + + mask |= CNST_PMC_MASK(pmc); + value |= CNST_PMC_VAL(pmc); } if (pmc <= 4) { @@ -268,9 +325,10 @@ * HV writable, and there is no API for guest kernels to modify * it. The solution is for the hypervisor to initialise the * field to zeroes, and for us to only ever allow events that - * have a cache selector of zero. + * have a cache selector of zero. The bank selector (bit 3) is + * irrelevant, as long as the rest of the value is 0. */ - if (cache) + if (cache & 0x7) return -1; } else if (event & EVENT_IS_L1) { @@ -311,6 +369,15 @@ /* EBB events must specify the PMC */ return -1; + if (event & EVENT_WANTS_BHRB) { + if (!ebb) + /* Only EBB events can request BHRB */ + return -1; + + mask |= CNST_IFM_MASK; + value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT); + } + /* * All events must agree on EBB, either all request it or none. * EBB events are pinned & exclusive, so this should never actually @@ -400,6 +467,11 @@ mmcra |= val << MMCRA_THR_CMP_SHIFT; } + if (event[i] & EVENT_WANTS_BHRB) { + val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK; + mmcra |= val << MMCRA_IFM_SHIFT; + } + hwc[i] = pmc - 1; } @@ -557,6 +629,8 @@ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, + [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1, + [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1, }; static u64 power8_bhrb_filter_map(u64 branch_sample_type) @@ -596,6 +670,116 @@ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); } +#define C(x) PERF_COUNT_HW_CACHE_##x + +/* + * Table of generalized cache-related events. + * 0 means not supported, -1 means nonsensical, other values + * are event codes. + */ +static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_LD_REF_L1, + [ C(RESULT_MISS) ] = PM_LD_MISS_L1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_ST_MISS_L1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_L1_PREF, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(L1I) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1, + [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(LL) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3, + [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = PM_L2_ST, + [ C(RESULT_MISS) ] = PM_L2_ST_MISS, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL, + [ C(RESULT_MISS) ] = 0, + }, + }, + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_DTLB_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = PM_ITLB_MISS, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(BPU) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = PM_BRU_FIN, + [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +#undef C + static struct power_pmu power8_pmu = { .name = "POWER8", .n_counter = 6, @@ -608,9 +792,10 @@ .get_constraint = power8_get_constraint, .get_alternatives = power8_get_alternatives, .disable_pmc = power8_disable_pmc, - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, + .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, + .cache_events = &power8_cache_events, .attr_groups = power8_pmu_attr_groups, .bhrb_nr = 32, }; @@ -630,6 +815,9 @@ /* Tell userspace that EBB is supported */ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB; + if (cpu_has_feature(CPU_FTR_PMAO_BUG)) + pr_info("PMAO restore workaround active.\n"); + return 0; } early_initcall(init_power8_pmu); --- linux-3.13.0.orig/arch/powerpc/platforms/85xx/Kconfig +++ linux-3.13.0/arch/powerpc/platforms/85xx/Kconfig @@ -253,6 +253,8 @@ select ARCH_REQUIRE_GPIOLIB select GPIO_MPC8XXX select HAS_RAPIDIO + select HAS_FSL_QBMAN + select HAS_FSL_PME select PPC_EPAPR_HV_PIC help This option enables support for the FSL CoreNet based boards. --- linux-3.13.0.orig/arch/powerpc/platforms/85xx/corenet_generic.c +++ linux-3.13.0/arch/powerpc/platforms/85xx/corenet_generic.c @@ -64,6 +64,9 @@ .compatible = "simple-bus" }, { + .compatible = "fsl,dpaa" + }, + { .compatible = "fsl,srio", }, { @@ -96,6 +99,32 @@ return of_platform_bus_probe(NULL, of_device_ids, NULL); } +/* Early setup is required for large chunks of contiguous (and coarsely-aligned) + * memory. The following shoe-horns Qman/Bman "init_early" calls into the + * platform setup to let them parse their CCSR nodes early on. */ +#ifdef CONFIG_FSL_QMAN_CONFIG +void __init qman_init_early(void); +#endif +#ifdef CONFIG_FSL_BMAN_CONFIG +void __init bman_init_early(void); +#endif +#ifdef CONFIG_FSL_PME2_CTRL +void __init pme2_init_early(void); +#endif + +__init void corenet_ds_init_early(void) +{ +#ifdef CONFIG_FSL_QMAN_CONFIG + qman_init_early(); +#endif +#ifdef CONFIG_FSL_BMAN_CONFIG + bman_init_early(); +#endif +#ifdef CONFIG_FSL_PME2_CTRL + pme2_init_early(); +#endif +} + static const char * const boards[] __initconst = { "fsl,P2041RDB", "fsl,P3041DS", @@ -103,6 +132,7 @@ "fsl,P5020DS", "fsl,P5040DS", "fsl,T4240QDS", + "fsl,T4240RDB", "fsl,B4860QDS", "fsl,B4420QDS", "fsl,B4220QDS", @@ -116,6 +146,7 @@ "fsl,P5020DS-hv", "fsl,P5040DS-hv", "fsl,T4240QDS-hv", + "fsl,T4240RDB-hv", "fsl,B4860QDS-hv", "fsl,B4420QDS-hv", "fsl,B4220QDS-hv", @@ -173,6 +204,7 @@ #else .power_save = e500_idle, #endif + .init_early = corenet_ds_init_early, }; machine_arch_initcall(corenet_generic, corenet_gen_publish_devices); --- linux-3.13.0.orig/arch/powerpc/platforms/85xx/smp.c +++ linux-3.13.0/arch/powerpc/platforms/85xx/smp.c @@ -176,6 +176,9 @@ int hw_cpu = get_hard_smp_processor_id(nr); int ioremappable; int ret = 0; +#ifdef CONFIG_PPC64 + unsigned long *ptr = NULL; +#endif WARN_ON(nr < 0 || nr >= NR_CPUS); WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS); @@ -264,11 +267,18 @@ #else smp_generic_kick_cpu(nr); - flush_spin_table(spin_table); - out_be32(&spin_table->pir, hw_cpu); - out_be64((u64 *)(&spin_table->addr_h), - __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); - flush_spin_table(spin_table); + ptr = (unsigned long *)((unsigned long)&__run_at_kexec); + /* We shouldn't access spin_table from the bootloader to up any + * secondary cpu for kexec kernel, and kexec kernel already + * know how to jump to generic_secondary_smp_init. + */ + if (!*ptr) { + flush_spin_table(spin_table); + out_be32(&spin_table->pir, hw_cpu); + out_be64((u64 *)(&spin_table->addr_h), + __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); + flush_spin_table(spin_table); + } #endif local_irq_restore(flags); @@ -293,6 +303,7 @@ }; #ifdef CONFIG_KEXEC +#ifdef CONFIG_PPC32 atomic_t kexec_down_cpus = ATOMIC_INIT(0); void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) @@ -311,6 +322,14 @@ if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0,1); } +#else +void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) +{ + local_irq_disable(); + hard_irq_disable(); + mpic_teardown_this_cpu(secondary); +} +#endif static void map_and_flush(unsigned long paddr) { @@ -362,11 +381,14 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image) { +#ifdef CONFIG_PPC32 int timeout = INT_MAX; int i, num_cpus = num_present_cpus(); +#endif mpc85xx_smp_flush_dcache_kexec(image); +#ifdef CONFIG_PPC32 if (image->type == KEXEC_TYPE_DEFAULT) smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); @@ -384,6 +406,7 @@ if ( i == smp_processor_id() ) continue; mpic_reset_core(i); } +#endif default_machine_kexec(image); } --- linux-3.13.0.orig/arch/powerpc/platforms/cell/spufs/inode.c +++ linux-3.13.0/arch/powerpc/platforms/cell/spufs/inode.c @@ -164,7 +164,7 @@ struct dentry *dentry, *tmp; mutex_lock(&dir->d_inode->i_mutex); - list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { + list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { spin_lock(&dentry->d_lock); if (!(d_unhashed(dentry)) && dentry->d_inode) { dget_dlock(dentry); --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/Kconfig +++ linux-3.13.0/arch/powerpc/platforms/powernv/Kconfig @@ -11,6 +11,12 @@ select PPC_UDBG_16550 select PPC_SCOM select ARCH_RANDOM + select CPU_FREQ + select CPU_FREQ_GOV_PERFORMANCE + select CPU_FREQ_GOV_POWERSAVE + select CPU_FREQ_GOV_USERSPACE + select CPU_FREQ_GOV_ONDEMAND + select CPU_FREQ_GOV_CONSERVATIVE default y config POWERNV_MSI --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/Makefile +++ linux-3.13.0/arch/powerpc/platforms/powernv/Makefile @@ -1,6 +1,7 @@ -obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o +obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o -obj-y += rng.o +obj-y += rng.o opal-elog.o opal-dump.o opal-sensor.o +obj-y += opal-msglog.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/eeh-ioda.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -114,6 +114,7 @@ ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); #endif /* CONFIG_DEBUG_FS */ + /** * ioda_eeh_post_init - Chip dependent post initialization * @hose: PCI controller @@ -221,6 +222,22 @@ return ret; } +static void ioda_eeh_phb_diag(struct pci_controller *hose) +{ + struct pnv_phb *phb = hose->private_data; + long rc; + + rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, + PNV_PCI_DIAG_BUF_SIZE); + if (rc != OPAL_SUCCESS) { + pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", + __func__, hose->global_number, rc); + return; + } + + pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); +} + /** * ioda_eeh_get_state - Retrieve the state of PE * @pe: EEH PE @@ -233,7 +250,7 @@ { s64 ret = 0; u8 fstate; - u16 pcierr; + __be16 pcierr; u32 pe_no; int result; struct pci_controller *hose = pe->phb; @@ -267,11 +284,14 @@ result = 0; result &= ~EEH_STATE_RESET_ACTIVE; - if (pcierr != OPAL_EEH_PHB_ERROR) { + if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { result |= EEH_STATE_MMIO_ACTIVE; result |= EEH_STATE_DMA_ACTIVE; result |= EEH_STATE_MMIO_ENABLED; result |= EEH_STATE_DMA_ENABLED; + } else if (!(pe->state & EEH_PE_ISOLATED)) { + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + ioda_eeh_phb_diag(hose); } return result; @@ -315,6 +335,15 @@ __func__, fstate, hose->global_number, pe_no); } + /* Dump PHB diag-data for frozen PE */ + if (result != EEH_STATE_NOT_SUPPORT && + (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) != + (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) && + !(pe->state & EEH_PE_ISOLATED)) { + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + ioda_eeh_phb_diag(hose); + } + return result; } @@ -490,8 +519,7 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) { struct pci_controller *hose = pe->phb; - struct eeh_dev *edev; - struct pci_dev *dev; + struct pci_bus *bus; int ret; /* @@ -520,76 +548,18 @@ if (pe->type & EEH_PE_PHB) { ret = ioda_eeh_phb_reset(hose, option); } else { - if (pe->type & EEH_PE_DEVICE) { - /* - * If it's device PE, we didn't refer to the parent - * PCI bus yet. So we have to figure it out indirectly. - */ - edev = list_first_entry(&pe->edevs, - struct eeh_dev, list); - dev = eeh_dev_to_pci_dev(edev); - dev = dev->bus->self; - } else { - /* - * If it's bus PE, the parent PCI bus is already there - * and just pick it up. - */ - dev = pe->bus->self; - } - - /* - * Do reset based on the fact that the direct upstream bridge - * is root bridge (port) or not. - */ - if (dev->bus->number == 0) + bus = eeh_pe_bus_get(pe); + if (pci_is_root_bus(bus) || + pci_is_root_bus(bus->parent)) ret = ioda_eeh_root_reset(hose, option); else - ret = ioda_eeh_bridge_reset(hose, dev, option); + ret = ioda_eeh_bridge_reset(hose, bus->self, option); } return ret; } /** - * ioda_eeh_get_log - Retrieve error log - * @pe: EEH PE - * @severity: Severity level of the log - * @drv_log: buffer to store the log - * @len: space of the log buffer - * - * The function is used to retrieve error log from P7IOC. - */ -static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, - char *drv_log, unsigned long len) -{ - s64 ret; - unsigned long flags; - struct pci_controller *hose = pe->phb; - struct pnv_phb *phb = hose->private_data; - - spin_lock_irqsave(&phb->lock, flags); - - ret = opal_pci_get_phb_diag_data2(phb->opal_id, - phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); - if (ret) { - spin_unlock_irqrestore(&phb->lock, flags); - pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n", - __func__, hose->global_number, pe->addr, ret); - return -EIO; - } - - /* - * FIXME: We probably need log the error in somewhere. - * Lets make it up in future. - */ - /* pr_info("%s", phb->diag.blob); */ - - spin_unlock_irqrestore(&phb->lock, flags); - - return 0; -} - -/** * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE * @pe: EEH PE * @@ -670,167 +640,6 @@ } } -static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose, - struct OpalIoPhbErrorCommon *common) -{ - struct OpalIoP7IOCPhbErrorData *data; - int i; - - data = (struct OpalIoP7IOCPhbErrorData *)common; - - pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n\n", - hose->global_number, common->version); - - pr_info(" brdgCtl: %08x\n", data->brdgCtl); - - pr_info(" portStatusReg: %08x\n", data->portStatusReg); - pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); - pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); - - pr_info(" deviceStatus: %08x\n", data->deviceStatus); - pr_info(" slotStatus: %08x\n", data->slotStatus); - pr_info(" linkStatus: %08x\n", data->linkStatus); - pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); - pr_info(" devSecStatus: %08x\n", data->devSecStatus); - - pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); - pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); - pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); - pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); - pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); - pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); - pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); - pr_info(" sourceId: %08x\n", data->sourceId); - - pr_info(" errorClass: %016llx\n", data->errorClass); - pr_info(" correlator: %016llx\n", data->correlator); - pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); - pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); - pr_info(" lemFir: %016llx\n", data->lemFir); - pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); - pr_info(" lemWOF: %016llx\n", data->lemWOF); - pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); - pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); - pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); - pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); - pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); - pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); - pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); - pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); - pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); - pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); - pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); - pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); - pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); - pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); - pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); - pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); - - for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { - if ((data->pestA[i] >> 63) == 0 && - (data->pestB[i] >> 63) == 0) - continue; - - pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); - pr_info(" PESTB: %016llx\n", data->pestB[i]); - } -} - -static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose, - struct OpalIoPhbErrorCommon *common) -{ - struct OpalIoPhb3ErrorData *data; - int i; - - data = (struct OpalIoPhb3ErrorData*)common; - pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n", - hose->global_number, common->version); - - pr_info(" brdgCtl: %08x\n", data->brdgCtl); - - pr_info(" portStatusReg: %08x\n", data->portStatusReg); - pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); - pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); - - pr_info(" deviceStatus: %08x\n", data->deviceStatus); - pr_info(" slotStatus: %08x\n", data->slotStatus); - pr_info(" linkStatus: %08x\n", data->linkStatus); - pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); - pr_info(" devSecStatus: %08x\n", data->devSecStatus); - - pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); - pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); - pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); - pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); - pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); - pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); - pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); - pr_info(" sourceId: %08x\n", data->sourceId); - pr_info(" errorClass: %016llx\n", data->errorClass); - pr_info(" correlator: %016llx\n", data->correlator); - pr_info(" nFir: %016llx\n", data->nFir); - pr_info(" nFirMask: %016llx\n", data->nFirMask); - pr_info(" nFirWOF: %016llx\n", data->nFirWOF); - pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); - pr_info(" PhbCsr: %016llx\n", data->phbCsr); - pr_info(" lemFir: %016llx\n", data->lemFir); - pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); - pr_info(" lemWOF: %016llx\n", data->lemWOF); - pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); - pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); - pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); - pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); - pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); - pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); - pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); - pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); - pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); - pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); - pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); - pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); - pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); - pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); - pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); - pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); - - for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { - if ((data->pestA[i] >> 63) == 0 && - (data->pestB[i] >> 63) == 0) - continue; - - pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); - pr_info(" PESTB: %016llx\n", data->pestB[i]); - } -} - -static void ioda_eeh_phb_diag(struct pci_controller *hose) -{ - struct pnv_phb *phb = hose->private_data; - struct OpalIoPhbErrorCommon *common; - long rc; - - rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, - PNV_PCI_DIAG_BUF_SIZE); - if (rc != OPAL_SUCCESS) { - pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", - __func__, hose->global_number, rc); - return; - } - - common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; - switch (common->ioType) { - case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: - ioda_eeh_p7ioc_phb_diag(hose, common); - break; - case OPAL_PHB_ERROR_DATA_TYPE_PHB3: - ioda_eeh_phb3_phb_diag(hose, common); - break; - default: - pr_warning("%s: Unrecognized I/O chip %d\n", - __func__, common->ioType); - } -} - static int ioda_eeh_get_phb_pe(struct pci_controller *hose, struct eeh_pe **pe) { @@ -884,12 +693,12 @@ */ static int ioda_eeh_next_error(struct eeh_pe **pe) { - struct pci_controller *hose, *tmp; + struct pci_controller *hose; struct pnv_phb *phb; - u64 frozen_pe_no; - u16 err_type, severity; + __be64 frozen_pe_no; + __be16 err_type, severity; long rc; - int ret = 1; + int ret = EEH_NEXT_ERR_NONE; /* * While running here, it's safe to purge the event queue. @@ -899,7 +708,7 @@ eeh_remove_event(NULL); opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + list_for_each_entry(hose, &hose_list, list_node) { /* * If the subordinate PCI buses of the PHB has been * removed, we needn't take care of it any more. @@ -920,8 +729,8 @@ } /* If the PHB doesn't have error, stop processing */ - if (err_type == OPAL_EEH_NO_ERROR || - severity == OPAL_EEH_SEV_NO_ERROR) { + if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || + be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { pr_devel("%s: No error found on PHB#%x\n", __func__, hose->global_number); continue; @@ -933,66 +742,91 @@ * specific PHB. */ pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", - __func__, err_type, severity, - frozen_pe_no, hose->global_number); - switch (err_type) { + __func__, be16_to_cpu(err_type), be16_to_cpu(severity), + be64_to_cpu(frozen_pe_no), hose->global_number); + switch (be16_to_cpu(err_type)) { case OPAL_EEH_IOC_ERROR: - if (severity == OPAL_EEH_SEV_IOC_DEAD) { - list_for_each_entry_safe(hose, tmp, - &hose_list, list_node) { + if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { + list_for_each_entry(hose, &hose_list, + list_node) { phb = hose->private_data; phb->eeh_state |= PNV_EEH_STATE_REMOVED; } pr_err("EEH: dead IOC detected\n"); - ret = 4; - goto out; - } else if (severity == OPAL_EEH_SEV_INF) { + ret = EEH_NEXT_ERR_DEAD_IOC; + } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { pr_info("EEH: IOC informative error " "detected\n"); ioda_eeh_hub_diag(hose); + ret = EEH_NEXT_ERR_NONE; } break; case OPAL_EEH_PHB_ERROR: - if (severity == OPAL_EEH_SEV_PHB_DEAD) { + if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { if (ioda_eeh_get_phb_pe(hose, pe)) break; pr_err("EEH: dead PHB#%x detected\n", hose->global_number); phb->eeh_state |= PNV_EEH_STATE_REMOVED; - ret = 3; - goto out; - } else if (severity == OPAL_EEH_SEV_PHB_FENCED) { + ret = EEH_NEXT_ERR_DEAD_PHB; + } else if (be16_to_cpu(severity) == + OPAL_EEH_SEV_PHB_FENCED) { if (ioda_eeh_get_phb_pe(hose, pe)) break; pr_err("EEH: fenced PHB#%x detected\n", hose->global_number); - ret = 2; - goto out; - } else if (severity == OPAL_EEH_SEV_INF) { + ret = EEH_NEXT_ERR_FENCED_PHB; + } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { pr_info("EEH: PHB#%x informative error " "detected\n", hose->global_number); ioda_eeh_phb_diag(hose); + ret = EEH_NEXT_ERR_NONE; } break; case OPAL_EEH_PE_ERROR: - if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) + if (ioda_eeh_get_pe(hose, be64_to_cpu(frozen_pe_no), + pe)) break; pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", (*pe)->addr, (*pe)->phb->global_number); - ret = 1; - goto out; + ret = EEH_NEXT_ERR_FROZEN_PE; + break; + default: + pr_warn("%s: Unexpected error type %d\n", + __func__, be16_to_cpu(err_type)); + } + + /* + * EEH core will try recover from fenced PHB or + * frozen PE. In the time for frozen PE, EEH core + * enable IO path for that before collecting logs, + * but it ruins the site. So we have to dump the + * log in advance here. + */ + if ((ret == EEH_NEXT_ERR_FROZEN_PE || + ret == EEH_NEXT_ERR_FENCED_PHB) && + !((*pe)->state & EEH_PE_ISOLATED)) { + eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); + ioda_eeh_phb_diag(hose); } + + /* + * If we have no errors on the specific PHB or only + * informative error there, we continue poking it. + * Otherwise, we need actions to be taken by upper + * layer. + */ + if (ret > EEH_NEXT_ERR_INF) + break; } - ret = 0; -out: return ret; } @@ -1001,7 +835,6 @@ .set_option = ioda_eeh_set_option, .get_state = ioda_eeh_get_state, .reset = ioda_eeh_reset, - .get_log = ioda_eeh_get_log, .configure_bridge = ioda_eeh_configure_bridge, .next_error = ioda_eeh_next_error }; --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal-async.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal-async.c @@ -0,0 +1,203 @@ +/* + * PowerNV OPAL asynchronous completion interfaces + * + * Copyright 2013 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define N_ASYNC_COMPLETIONS 64 + +static DECLARE_BITMAP(opal_async_complete_map, N_ASYNC_COMPLETIONS) = {~0UL}; +static DECLARE_BITMAP(opal_async_token_map, N_ASYNC_COMPLETIONS); +static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait); +static DEFINE_SPINLOCK(opal_async_comp_lock); +static struct semaphore opal_async_sem; +static struct opal_msg *opal_async_responses; +static unsigned int opal_max_async_tokens; + +int __opal_async_get_token(void) +{ + unsigned long flags; + int token; + + spin_lock_irqsave(&opal_async_comp_lock, flags); + token = find_first_bit(opal_async_complete_map, opal_max_async_tokens); + if (token >= opal_max_async_tokens) { + token = -EBUSY; + goto out; + } + + if (__test_and_set_bit(token, opal_async_token_map)) { + token = -EBUSY; + goto out; + } + + __clear_bit(token, opal_async_complete_map); + +out: + spin_unlock_irqrestore(&opal_async_comp_lock, flags); + return token; +} + +int opal_async_get_token_interruptible(void) +{ + int token; + + /* Wait until a token is available */ + if (down_interruptible(&opal_async_sem)) + return -ERESTARTSYS; + + token = __opal_async_get_token(); + if (token < 0) + up(&opal_async_sem); + + return token; +} + +int __opal_async_release_token(int token) +{ + unsigned long flags; + + if (token < 0 || token >= opal_max_async_tokens) { + pr_err("%s: Passed token is out of range, token %d\n", + __func__, token); + return -EINVAL; + } + + spin_lock_irqsave(&opal_async_comp_lock, flags); + __set_bit(token, opal_async_complete_map); + __clear_bit(token, opal_async_token_map); + spin_unlock_irqrestore(&opal_async_comp_lock, flags); + + return 0; +} + +int opal_async_release_token(int token) +{ + int ret; + + ret = __opal_async_release_token(token); + if (ret) + return ret; + + up(&opal_async_sem); + + return 0; +} + +int opal_async_wait_response(uint64_t token, struct opal_msg *msg) +{ + if (token >= opal_max_async_tokens) { + pr_err("%s: Invalid token passed\n", __func__); + return -EINVAL; + } + + if (!msg) { + pr_err("%s: Invalid message pointer passed\n", __func__); + return -EINVAL; + } + + wait_event(opal_async_wait, test_bit(token, opal_async_complete_map)); + memcpy(msg, &opal_async_responses[token], sizeof(*msg)); + + return 0; +} + +static int opal_async_comp_event(struct notifier_block *nb, + unsigned long msg_type, void *msg) +{ + struct opal_msg *comp_msg = msg; + unsigned long flags; + + if (msg_type != OPAL_MSG_ASYNC_COMP) + return 0; + + memcpy(&opal_async_responses[comp_msg->params[0]], comp_msg, + sizeof(*comp_msg)); + spin_lock_irqsave(&opal_async_comp_lock, flags); + __set_bit(comp_msg->params[0], opal_async_complete_map); + spin_unlock_irqrestore(&opal_async_comp_lock, flags); + + wake_up(&opal_async_wait); + + return 0; +} + +static struct notifier_block opal_async_comp_nb = { + .notifier_call = opal_async_comp_event, + .next = NULL, + .priority = 0, +}; + +static int __init opal_async_comp_init(void) +{ + struct device_node *opal_node; + const __be32 *async; + int err; + + opal_node = of_find_node_by_path("/ibm,opal"); + if (!opal_node) { + pr_err("%s: Opal node not found\n", __func__); + err = -ENOENT; + goto out; + } + + async = of_get_property(opal_node, "opal-msg-async-num", NULL); + if (!async) { + pr_err("%s: %s has no opal-msg-async-num\n", + __func__, opal_node->full_name); + err = -ENOENT; + goto out_opal_node; + } + + opal_max_async_tokens = be32_to_cpup(async); + if (opal_max_async_tokens > N_ASYNC_COMPLETIONS) + opal_max_async_tokens = N_ASYNC_COMPLETIONS; + + err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP, + &opal_async_comp_nb); + if (err) { + pr_err("%s: Can't register OPAL event notifier (%d)\n", + __func__, err); + goto out_opal_node; + } + + opal_async_responses = kzalloc( + sizeof(*opal_async_responses) * opal_max_async_tokens, + GFP_KERNEL); + if (!opal_async_responses) { + pr_err("%s: Out of memory, failed to do asynchronous " + "completion init\n", __func__); + err = -ENOMEM; + goto out_opal_node; + } + + /* Initialize to 1 less than the maximum tokens available, as we may + * require to pop one during emergency through synchronous call to + * __opal_async_get_token() + */ + sema_init(&opal_async_sem, opal_max_async_tokens - 1); + +out_opal_node: + of_node_put(opal_node); +out: + return err; +} +subsys_initcall(opal_async_comp_init); --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal-dump.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal-dump.c @@ -0,0 +1,453 @@ +/* + * PowerNV OPAL Dump Interface + * + * Copyright 2013,2014 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#define DUMP_TYPE_FSP 0x01 + +struct dump_obj { + struct kobject kobj; + struct bin_attribute dump_attr; + uint32_t id; /* becomes object name */ + uint32_t type; + uint32_t size; + char *buffer; +}; +#define to_dump_obj(x) container_of(x, struct dump_obj, kobj) + +struct dump_attribute { + struct attribute attr; + ssize_t (*show)(struct dump_obj *dump, struct dump_attribute *attr, + char *buf); + ssize_t (*store)(struct dump_obj *dump, struct dump_attribute *attr, + const char *buf, size_t count); +}; +#define to_dump_attr(x) container_of(x, struct dump_attribute, attr) + +static ssize_t dump_id_show(struct dump_obj *dump_obj, + struct dump_attribute *attr, + char *buf) +{ + return sprintf(buf, "0x%x\n", dump_obj->id); +} + +static const char* dump_type_to_string(uint32_t type) +{ + switch (type) { + case 0x01: return "SP Dump"; + case 0x02: return "System/Platform Dump"; + case 0x03: return "SMA Dump"; + default: return "unknown"; + } +} + +static ssize_t dump_type_show(struct dump_obj *dump_obj, + struct dump_attribute *attr, + char *buf) +{ + + return sprintf(buf, "0x%x %s\n", dump_obj->type, + dump_type_to_string(dump_obj->type)); +} + +static ssize_t dump_ack_show(struct dump_obj *dump_obj, + struct dump_attribute *attr, + char *buf) +{ + return sprintf(buf, "ack - acknowledge dump\n"); +} + +/* + * Send acknowledgement to OPAL + */ +static int64_t dump_send_ack(uint32_t dump_id) +{ + int rc; + + rc = opal_dump_ack(dump_id); + if (rc) + pr_warn("%s: Failed to send ack to Dump ID 0x%x (%d)\n", + __func__, dump_id, rc); + return rc; +} + +static void delay_release_kobj(void *kobj) +{ + kobject_put((struct kobject *)kobj); +} + +static ssize_t dump_ack_store(struct dump_obj *dump_obj, + struct dump_attribute *attr, + const char *buf, + size_t count) +{ + dump_send_ack(dump_obj->id); + sysfs_schedule_callback(&dump_obj->kobj, delay_release_kobj, + &dump_obj->kobj, THIS_MODULE); + return count; +} + +/* Attributes of a dump + * The binary attribute of the dump itself is dynamic + * due to the dynamic size of the dump + */ +static struct dump_attribute id_attribute = + __ATTR(id, 0666, dump_id_show, NULL); +static struct dump_attribute type_attribute = + __ATTR(type, 0666, dump_type_show, NULL); +static struct dump_attribute ack_attribute = + __ATTR(acknowledge, 0660, dump_ack_show, dump_ack_store); + +static ssize_t init_dump_show(struct dump_obj *dump_obj, + struct dump_attribute *attr, + char *buf) +{ + return sprintf(buf, "1 - initiate dump\n"); +} + +static int64_t dump_fips_init(uint8_t type) +{ + int rc; + + rc = opal_dump_init(type); + if (rc) + pr_warn("%s: Failed to initiate FipS dump (%d)\n", + __func__, rc); + return rc; +} + +static ssize_t init_dump_store(struct dump_obj *dump_obj, + struct dump_attribute *attr, + const char *buf, + size_t count) +{ + dump_fips_init(DUMP_TYPE_FSP); + pr_info("%s: Initiated FSP dump\n", __func__); + return count; +} + +static struct dump_attribute initiate_attribute = + __ATTR(initiate_dump, 0600, init_dump_show, init_dump_store); + +static struct attribute *initiate_attrs[] = { + &initiate_attribute.attr, + NULL, +}; + +static struct attribute_group initiate_attr_group = { + .attrs = initiate_attrs, +}; + +static struct kset *dump_kset; + +static ssize_t dump_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct dump_attribute *attribute; + struct dump_obj *dump; + + attribute = to_dump_attr(attr); + dump = to_dump_obj(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(dump, attribute, buf); +} + +static ssize_t dump_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct dump_attribute *attribute; + struct dump_obj *dump; + + attribute = to_dump_attr(attr); + dump = to_dump_obj(kobj); + + if (!attribute->store) + return -EIO; + + return attribute->store(dump, attribute, buf, len); +} + +static const struct sysfs_ops dump_sysfs_ops = { + .show = dump_attr_show, + .store = dump_attr_store, +}; + +static void dump_release(struct kobject *kobj) +{ + struct dump_obj *dump; + + dump = to_dump_obj(kobj); + vfree(dump->buffer); + kfree(dump); +} + +static struct attribute *dump_default_attrs[] = { + &id_attribute.attr, + &type_attribute.attr, + &ack_attribute.attr, + NULL, +}; + +static struct kobj_type dump_ktype = { + .sysfs_ops = &dump_sysfs_ops, + .release = &dump_release, + .default_attrs = dump_default_attrs, +}; + +static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type) +{ + __be32 id, size, type; + int rc; + + type = cpu_to_be32(0xffffffff); + + rc = opal_dump_info2(&id, &size, &type); + if (rc == OPAL_PARAMETER) + rc = opal_dump_info(&id, &size); + + *dump_id = be32_to_cpu(id); + *dump_size = be32_to_cpu(size); + *dump_type = be32_to_cpu(type); + + if (rc) + pr_warn("%s: Failed to get dump info (%d)\n", + __func__, rc); + return rc; +} + +static int64_t dump_read_data(struct dump_obj *dump) +{ + struct opal_sg_list *list; + uint64_t addr; + int64_t rc; + + /* Allocate memory */ + dump->buffer = vzalloc(PAGE_ALIGN(dump->size)); + if (!dump->buffer) { + pr_err("%s : Failed to allocate memory\n", __func__); + rc = -ENOMEM; + goto out; + } + + /* Generate SG list */ + list = opal_vmalloc_to_sg_list(dump->buffer, dump->size); + if (!list) { + rc = -ENOMEM; + goto out; + } + + /* First entry address */ + addr = __pa(list); + + /* Fetch data */ + rc = OPAL_BUSY_EVENT; + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { + rc = opal_dump_read(dump->id, addr); + if (rc == OPAL_BUSY_EVENT) { + opal_poll_events(NULL); + msleep(20); + } + } + + if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) + pr_warn("%s: Extract dump failed for ID 0x%x\n", + __func__, dump->id); + + /* Free SG list */ + opal_free_sg_list(list); + +out: + return rc; +} + +static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t pos, size_t count) +{ + ssize_t rc; + + struct dump_obj *dump = to_dump_obj(kobj); + + if (!dump->buffer) { + rc = dump_read_data(dump); + + if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) { + vfree(dump->buffer); + dump->buffer = NULL; + + return -EIO; + } + if (rc == OPAL_PARTIAL) { + /* On a partial read, we just return EIO + * and rely on userspace to ask us to try + * again. + */ + pr_info("%s: Platform dump partially read.ID = 0x%x\n", + __func__, dump->id); + return -EIO; + } + } + + memcpy(buffer, dump->buffer + pos, count); + + /* You may think we could free the dump buffer now and retrieve + * it again later if needed, but due to current firmware limitation, + * that's not the case. So, once read into userspace once, + * we keep the dump around until it's acknowledged by userspace. + */ + + return count; +} + +static struct dump_obj *create_dump_obj(uint32_t id, size_t size, + uint32_t type) +{ + struct dump_obj *dump; + int rc; + + dump = kzalloc(sizeof(*dump), GFP_KERNEL); + if (!dump) + return NULL; + + dump->kobj.kset = dump_kset; + + kobject_init(&dump->kobj, &dump_ktype); + + sysfs_bin_attr_init(&dump->dump_attr); + + dump->dump_attr.attr.name = "dump"; + dump->dump_attr.attr.mode = 0400; + dump->dump_attr.size = size; + dump->dump_attr.read = dump_attr_read; + + dump->id = id; + dump->size = size; + dump->type = type; + + rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id); + if (rc) { + kobject_put(&dump->kobj); + return NULL; + } + + rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr); + if (rc) { + kobject_put(&dump->kobj); + return NULL; + } + + pr_info("%s: New platform dump. ID = 0x%x Size %u\n", + __func__, dump->id, dump->size); + + kobject_uevent(&dump->kobj, KOBJ_ADD); + + return dump; +} + +static int process_dump(void) +{ + int rc; + uint32_t dump_id, dump_size, dump_type; + struct dump_obj *dump; + char name[22]; + + rc = dump_read_info(&dump_id, &dump_size, &dump_type); + if (rc != OPAL_SUCCESS) + return rc; + + sprintf(name, "0x%x-0x%x", dump_type, dump_id); + + /* we may get notified twice, let's handle + * that gracefully and not create two conflicting + * entries. + */ + if (kset_find_obj(dump_kset, name)) + return 0; + + dump = create_dump_obj(dump_id, dump_size, dump_type); + if (!dump) + return -1; + + return 0; +} + +static void dump_work_fn(struct work_struct *work) +{ + process_dump(); +} + +static DECLARE_WORK(dump_work, dump_work_fn); + +static void schedule_process_dump(void) +{ + schedule_work(&dump_work); +} + +/* + * New dump available notification + * + * Once we get notification, we add sysfs entries for it. + * We only fetch the dump on demand, and create sysfs asynchronously. + */ +static int dump_event(struct notifier_block *nb, + unsigned long events, void *change) +{ + if (events & OPAL_EVENT_DUMP_AVAIL) + schedule_process_dump(); + + return 0; +} + +static struct notifier_block dump_nb = { + .notifier_call = dump_event, + .next = NULL, + .priority = 0 +}; + +void __init opal_platform_dump_init(void) +{ + int rc; + + dump_kset = kset_create_and_add("dump", NULL, opal_kobj); + if (!dump_kset) { + pr_warn("%s: Failed to create dump kset\n", __func__); + return; + } + + rc = sysfs_create_group(&dump_kset->kobj, &initiate_attr_group); + if (rc) { + pr_warn("%s: Failed to create initiate dump attr group\n", + __func__); + kobject_put(&dump_kset->kobj); + return; + } + + rc = opal_notifier_register(&dump_nb); + if (rc) { + pr_warn("%s: Can't register OPAL event notifier (%d)\n", + __func__, rc); + return; + } + + opal_dump_resend_notification(); +} --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal-elog.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal-elog.c @@ -0,0 +1,320 @@ +/* + * Error log support on PowerNV. + * + * Copyright 2013,2014 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct elog_obj { + struct kobject kobj; + struct bin_attribute raw_attr; + uint64_t id; + uint64_t type; + size_t size; + char *buffer; +}; +#define to_elog_obj(x) container_of(x, struct elog_obj, kobj) + +struct elog_attribute { + struct attribute attr; + ssize_t (*show)(struct elog_obj *elog, struct elog_attribute *attr, + char *buf); + ssize_t (*store)(struct elog_obj *elog, struct elog_attribute *attr, + const char *buf, size_t count); +}; +#define to_elog_attr(x) container_of(x, struct elog_attribute, attr) + +static ssize_t elog_id_show(struct elog_obj *elog_obj, + struct elog_attribute *attr, + char *buf) +{ + return sprintf(buf, "0x%llx\n", elog_obj->id); +} + +static const char *elog_type_to_string(uint64_t type) +{ + switch (type) { + case 0: return "PEL"; + default: return "unknown"; + } +} + +static ssize_t elog_type_show(struct elog_obj *elog_obj, + struct elog_attribute *attr, + char *buf) +{ + return sprintf(buf, "0x%llx %s\n", + elog_obj->type, + elog_type_to_string(elog_obj->type)); +} + +static ssize_t elog_ack_show(struct elog_obj *elog_obj, + struct elog_attribute *attr, + char *buf) +{ + return sprintf(buf, "ack - acknowledge log message\n"); +} + +static void delay_release_kobj(void *kobj) +{ + kobject_put((struct kobject *)kobj); +} + +static ssize_t elog_ack_store(struct elog_obj *elog_obj, + struct elog_attribute *attr, + const char *buf, + size_t count) +{ + opal_send_ack_elog(elog_obj->id); + sysfs_schedule_callback(&elog_obj->kobj, delay_release_kobj, + &elog_obj->kobj, THIS_MODULE); + return count; +} + +static struct elog_attribute id_attribute = + __ATTR(id, 0666, elog_id_show, NULL); +static struct elog_attribute type_attribute = + __ATTR(type, 0666, elog_type_show, NULL); +static struct elog_attribute ack_attribute = + __ATTR(acknowledge, 0660, elog_ack_show, elog_ack_store); + +static struct kset *elog_kset; + +static ssize_t elog_attr_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct elog_attribute *attribute; + struct elog_obj *elog; + + attribute = to_elog_attr(attr); + elog = to_elog_obj(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(elog, attribute, buf); +} + +static ssize_t elog_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t len) +{ + struct elog_attribute *attribute; + struct elog_obj *elog; + + attribute = to_elog_attr(attr); + elog = to_elog_obj(kobj); + + if (!attribute->store) + return -EIO; + + return attribute->store(elog, attribute, buf, len); +} + +static const struct sysfs_ops elog_sysfs_ops = { + .show = elog_attr_show, + .store = elog_attr_store, +}; + +static void elog_release(struct kobject *kobj) +{ + struct elog_obj *elog; + + elog = to_elog_obj(kobj); + kfree(elog->buffer); + kfree(elog); +} + +static struct attribute *elog_default_attrs[] = { + &id_attribute.attr, + &type_attribute.attr, + &ack_attribute.attr, + NULL, +}; + +static struct kobj_type elog_ktype = { + .sysfs_ops = &elog_sysfs_ops, + .release = &elog_release, + .default_attrs = elog_default_attrs, +}; + +/* Maximum size of a single log on FSP is 16KB */ +#define OPAL_MAX_ERRLOG_SIZE 16384 + +static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t pos, size_t count) +{ + int opal_rc; + + struct elog_obj *elog = to_elog_obj(kobj); + + /* We may have had an error reading before, so let's retry */ + if (!elog->buffer) { + elog->buffer = kzalloc(elog->size, GFP_KERNEL); + if (!elog->buffer) + return -EIO; + + opal_rc = opal_read_elog(__pa(elog->buffer), + elog->size, elog->id); + if (opal_rc != OPAL_SUCCESS) { + pr_err("ELOG: log read failed for log-id=%llx\n", + elog->id); + kfree(elog->buffer); + elog->buffer = NULL; + return -EIO; + } + } + + memcpy(buffer, elog->buffer + pos, count); + + return count; +} + +static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) +{ + struct elog_obj *elog; + int rc; + + elog = kzalloc(sizeof(*elog), GFP_KERNEL); + if (!elog) + return NULL; + + elog->kobj.kset = elog_kset; + + kobject_init(&elog->kobj, &elog_ktype); + + sysfs_bin_attr_init(&elog->raw_attr); + + elog->raw_attr.attr.name = "raw"; + elog->raw_attr.attr.mode = 0400; + elog->raw_attr.size = size; + elog->raw_attr.read = raw_attr_read; + + elog->id = id; + elog->size = size; + elog->type = type; + + elog->buffer = kzalloc(elog->size, GFP_KERNEL); + + if (elog->buffer) { + rc = opal_read_elog(__pa(elog->buffer), + elog->size, elog->id); + if (rc != OPAL_SUCCESS) { + pr_err("ELOG: log read failed for log-id=%llx\n", + elog->id); + kfree(elog->buffer); + elog->buffer = NULL; + } + } + + rc = kobject_add(&elog->kobj, NULL, "0x%llx", id); + if (rc) { + kobject_put(&elog->kobj); + return NULL; + } + + rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr); + if (rc) { + kobject_put(&elog->kobj); + return NULL; + } + + kobject_uevent(&elog->kobj, KOBJ_ADD); + + return elog; +} + +static void elog_work_fn(struct work_struct *work) +{ + __be64 size; + __be64 id; + __be64 type; + uint64_t elog_size; + uint64_t log_id; + uint64_t elog_type; + int rc; + char name[2+16+1]; + + rc = opal_get_elog_size(&id, &size, &type); + if (rc != OPAL_SUCCESS) { + pr_err("ELOG: Opal log read failed\n"); + return; + } + + elog_size = be64_to_cpu(size); + log_id = be64_to_cpu(id); + elog_type = be64_to_cpu(type); + + BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); + + if (elog_size >= OPAL_MAX_ERRLOG_SIZE) + elog_size = OPAL_MAX_ERRLOG_SIZE; + + sprintf(name, "0x%llx", log_id); + + /* we may get notified twice, let's handle + * that gracefully and not create two conflicting + * entries. + */ + if (kset_find_obj(elog_kset, name)) + return; + + create_elog_obj(log_id, elog_size, elog_type); +} + +static DECLARE_WORK(elog_work, elog_work_fn); + +static int elog_event(struct notifier_block *nb, + unsigned long events, void *change) +{ + /* check for error log event */ + if (events & OPAL_EVENT_ERROR_LOG_AVAIL) + schedule_work(&elog_work); + return 0; +} + +static struct notifier_block elog_nb = { + .notifier_call = elog_event, + .next = NULL, + .priority = 0 +}; + +int __init opal_elog_init(void) +{ + int rc = 0; + + elog_kset = kset_create_and_add("elog", NULL, opal_kobj); + if (!elog_kset) { + pr_warn("%s: failed to create elog kset\n", __func__); + return -1; + } + + rc = opal_notifier_register(&elog_nb); + if (rc) { + pr_err("%s: Can't register OPAL event notifier (%d)\n", + __func__, rc); + return rc; + } + + /* We are now ready to pull error logs from opal. */ + opal_resend_pending_logs(); + + return 0; +} --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal-flash.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal-flash.c @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -76,11 +77,8 @@ /* Validate buffer size */ #define VALIDATE_BUF_SIZE 4096 -/* XXX: Assume candidate image size is <= 256MB */ -#define MAX_IMAGE_SIZE 0x10000000 - -/* Flash sg list version */ -#define SG_LIST_VERSION (1UL) +/* XXX: Assume candidate image size is <= 1GB */ +#define MAX_IMAGE_SIZE 0x40000000 /* Image status */ enum { @@ -103,27 +101,6 @@ uint32_t size; }; -/* Scatter/gather entry */ -struct opal_sg_entry { - void *data; - long length; -}; - -/* We calculate number of entries based on PAGE_SIZE */ -#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) - -/* - * This struct is very similar but not identical to that - * needed by the opal flash update. All we need to do for - * opal is rewrite num_entries into a version/length and - * translate the pointers to absolute. - */ -struct opal_sg_list { - unsigned long num_entries; - struct opal_sg_list *next; - struct opal_sg_entry entry[SG_ENTRIES_PER_NODE]; -}; - struct validate_flash_t { int status; /* Return status */ void *buf; /* Candiate image buffer */ @@ -152,11 +129,16 @@ */ static inline void opal_flash_validate(void) { - struct validate_flash_t *args_buf = &validate_flash_data; - - args_buf->status = opal_validate_flash(__pa(args_buf->buf), - &(args_buf->buf_size), - &(args_buf->result)); + long ret; + void *buf = validate_flash_data.buf; + __be32 size = cpu_to_be32(validate_flash_data.buf_size); + __be32 result; + + ret = opal_validate_flash(__pa(buf), &size, &result); + + validate_flash_data.status = ret; + validate_flash_data.buf_size = be32_to_cpu(size); + validate_flash_data.result = be32_to_cpu(result); } /* @@ -289,94 +271,11 @@ } /* - * Free sg list - */ -static void free_sg_list(struct opal_sg_list *list) -{ - struct opal_sg_list *sg1; - while (list) { - sg1 = list->next; - kfree(list); - list = sg1; - } - list = NULL; -} - -/* - * Build candidate image scatter gather list - * - * list format: - * ----------------------------------- - * | VER (8) | Entry length in bytes | - * ----------------------------------- - * | Pointer to next entry | - * ----------------------------------- - * | Address of memory area 1 | - * ----------------------------------- - * | Length of memory area 1 | - * ----------------------------------- - * | ......... | - * ----------------------------------- - * | ......... | - * ----------------------------------- - * | Address of memory area N | - * ----------------------------------- - * | Length of memory area N | - * ----------------------------------- - */ -static struct opal_sg_list *image_data_to_sglist(void) -{ - struct opal_sg_list *sg1, *list = NULL; - void *addr; - int size; - - addr = image_data.data; - size = image_data.size; - - sg1 = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL); - if (!sg1) - return NULL; - - list = sg1; - sg1->num_entries = 0; - while (size > 0) { - /* Translate virtual address to physical address */ - sg1->entry[sg1->num_entries].data = - (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT); - - if (size > PAGE_SIZE) - sg1->entry[sg1->num_entries].length = PAGE_SIZE; - else - sg1->entry[sg1->num_entries].length = size; - - sg1->num_entries++; - if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { - sg1->next = kzalloc((sizeof(struct opal_sg_list)), - GFP_KERNEL); - if (!sg1->next) { - pr_err("%s : Failed to allocate memory\n", - __func__); - goto nomem; - } - - sg1 = sg1->next; - sg1->num_entries = 0; - } - addr += PAGE_SIZE; - size -= PAGE_SIZE; - } - return list; -nomem: - free_sg_list(list); - return NULL; -} - -/* * OPAL update flash */ static int opal_flash_update(int op) { - struct opal_sg_list *sg, *list, *next; + struct opal_sg_list *list; unsigned long addr; int64_t rc = OPAL_PARAMETER; @@ -386,32 +285,13 @@ goto flash; } - list = image_data_to_sglist(); + list = opal_vmalloc_to_sg_list(image_data.data, image_data.size); if (!list) goto invalid_img; /* First entry address */ addr = __pa(list); - /* Translate sg list address to absolute */ - for (sg = list; sg; sg = next) { - next = sg->next; - /* Don't translate NULL pointer for last entry */ - if (sg->next) - sg->next = (struct opal_sg_list *)__pa(sg->next); - else - sg->next = NULL; - - /* Make num_entries into the version/length field */ - sg->num_entries = (SG_LIST_VERSION << 56) | - (sg->num_entries * sizeof(struct opal_sg_entry) + 16); - } - - pr_alert("FLASH: Image is %u bytes\n", image_data.size); - pr_alert("FLASH: Image update requested\n"); - pr_alert("FLASH: Image will be updated during system reboot\n"); - pr_alert("FLASH: This will take several minutes. Do not power off!\n"); - flash: rc = opal_update_flash(addr); @@ -419,6 +299,47 @@ return rc; } +/* Return CPUs to OPAL before starting FW update */ +static void flash_return_cpu(void *info) +{ + int cpu = smp_processor_id(); + + if (!cpu_online(cpu)) + return; + + /* Disable IRQ */ + hard_irq_disable(); + + /* Return the CPU to OPAL */ + opal_return_cpu(); +} + +/* This gets called just before system reboots */ +void opal_flash_term_callback(void) +{ + struct cpumask mask; + + if (update_flash_data.status != FLASH_IMG_READY) + return; + + pr_alert("FLASH: Flashing new firmware\n"); + pr_alert("FLASH: Image is %u bytes\n", image_data.size); + pr_alert("FLASH: Performing flash and reboot/shutdown\n"); + pr_alert("FLASH: This will take several minutes. Do not power off!\n"); + + /* Small delay to help getting the above message out */ + msleep(500); + + /* Return secondary CPUs to firmware */ + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + if (!cpumask_empty(&mask)) + smp_call_function_many(&mask, + flash_return_cpu, NULL, false); + /* Hard disable interrupts */ + hard_irq_disable(); +} + /* * Show candidate image status */ --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal-msglog.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal-msglog.c @@ -0,0 +1,120 @@ +/* + * PowerNV OPAL in-memory console interface + * + * Copyright 2014 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +/* OPAL in-memory console. Defined in OPAL source at core/console.c */ +struct memcons { + __be64 magic; +#define MEMCONS_MAGIC 0x6630696567726173L + __be64 obuf_phys; + __be64 ibuf_phys; + __be32 obuf_size; + __be32 ibuf_size; + __be32 out_pos; +#define MEMCONS_OUT_POS_WRAP 0x80000000u +#define MEMCONS_OUT_POS_MASK 0x00ffffffu + __be32 in_prod; + __be32 in_cons; +}; + +static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, + struct bin_attribute *bin_attr, char *to, + loff_t pos, size_t count) +{ + struct memcons *mc = bin_attr->private; + const char *conbuf; + size_t ret, first_read = 0; + uint32_t out_pos, avail; + + if (!mc) + return -ENODEV; + + out_pos = be32_to_cpu(ACCESS_ONCE(mc->out_pos)); + + /* Now we've read out_pos, put a barrier in before reading the new + * data it points to in conbuf. */ + smp_rmb(); + + conbuf = phys_to_virt(be64_to_cpu(mc->obuf_phys)); + + /* When the buffer has wrapped, read from the out_pos marker to the end + * of the buffer, and then read the remaining data as in the un-wrapped + * case. */ + if (out_pos & MEMCONS_OUT_POS_WRAP) { + + out_pos &= MEMCONS_OUT_POS_MASK; + avail = be32_to_cpu(mc->obuf_size) - out_pos; + + ret = memory_read_from_buffer(to, count, &pos, + conbuf + out_pos, avail); + + if (ret < 0) + goto out; + + first_read = ret; + to += first_read; + count -= first_read; + pos -= avail; + } + + /* Sanity check. The firmware should not do this to us. */ + if (out_pos > be32_to_cpu(mc->obuf_size)) { + pr_err("OPAL: memory console corruption. Aborting read.\n"); + return -EINVAL; + } + + ret = memory_read_from_buffer(to, count, &pos, conbuf, out_pos); + + if (ret < 0) + goto out; + + ret += first_read; +out: + return ret; +} + +static struct bin_attribute opal_msglog_attr = { + .attr = {.name = "msglog", .mode = 0444}, + .read = opal_msglog_read +}; + +void __init opal_msglog_init(void) +{ + u64 mcaddr; + struct memcons *mc; + + if (of_property_read_u64(opal_node, "ibm,opal-memcons", &mcaddr)) { + pr_warn("OPAL: Property ibm,opal-memcons not found, no message log\n"); + return; + } + + mc = phys_to_virt(mcaddr); + if (!mc) { + pr_warn("OPAL: memory console address is invalid\n"); + return; + } + + if (be64_to_cpu(mc->magic) != MEMCONS_MAGIC) { + pr_warn("OPAL: memory console version is invalid\n"); + return; + } + + opal_msglog_attr.private = mc; + + if (sysfs_create_bin_file(opal_kobj, &opal_msglog_attr) != 0) + pr_warn("OPAL: sysfs file creation failed\n"); +} --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal-sensor.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal-sensor.c @@ -0,0 +1,66 @@ +/* + * PowerNV sensor code + * + * Copyright (C) 2013 IBM + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include + +static DEFINE_MUTEX(opal_sensor_mutex); + +/* + * This will return sensor information to driver based on the requested sensor + * handle. A handle is an opaque id for the powernv, read by the driver from the + * device tree.. + */ +int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data) +{ + int ret, token; + struct opal_msg msg; + __be32 data; + + token = opal_async_get_token_interruptible(); + if (token < 0) { + pr_err("%s: Couldn't get the token, returning\n", __func__); + ret = token; + goto out; + } + + mutex_lock(&opal_sensor_mutex); + ret = opal_sensor_read(sensor_hndl, token, &data); + if (ret != OPAL_ASYNC_COMPLETION) + goto out_token; + + ret = opal_async_wait_response(token, &msg); + if (ret) { + pr_err("%s: Failed to wait for the async response, %d\n", + __func__, ret); + goto out_token; + } + + *sensor_data = be32_to_cpu(data); + ret = be64_to_cpu(msg.params[1]); + +out_token: + mutex_unlock(&opal_sensor_mutex); + opal_async_release_token(token); +out: + return ret; +} +EXPORT_SYMBOL_GPL(opal_get_sensor_data); --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal-wrappers.S +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -123,6 +123,24 @@ OPAL_CALL(opal_lpc_read, OPAL_LPC_READ); OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE); OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU); +OPAL_CALL(opal_reinit_cpus, OPAL_REINIT_CPUS); +OPAL_CALL(opal_read_elog, OPAL_ELOG_READ); +OPAL_CALL(opal_send_ack_elog, OPAL_ELOG_ACK); +OPAL_CALL(opal_get_elog_size, OPAL_ELOG_SIZE); +OPAL_CALL(opal_resend_pending_logs, OPAL_ELOG_RESEND); +OPAL_CALL(opal_write_elog, OPAL_ELOG_WRITE); OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); +OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT); +OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO); +OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2); +OPAL_CALL(opal_dump_read, OPAL_DUMP_READ); +OPAL_CALL(opal_dump_ack, OPAL_DUMP_ACK); +OPAL_CALL(opal_get_msg, OPAL_GET_MSG); +OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION); +OPAL_CALL(opal_dump_resend_notification, OPAL_DUMP_RESEND); +OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT); +OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ); +OPAL_CALL(opal_get_param, OPAL_GET_PARAM); +OPAL_CALL(opal_set_param, OPAL_SET_PARAM); --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal-xscom.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal-xscom.c @@ -71,11 +71,11 @@ } } -static u64 opal_scom_unmangle(u64 reg) +static u64 opal_scom_unmangle(u64 addr) { /* * XSCOM indirect addresses have the top bit set. Additionally - * the reset of the top 3 nibbles is always 0. + * the rest of the top 3 nibbles is always 0. * * Because the debugfs interface uses signed offsets and shifts * the address left by 3, we basically cannot use the top 4 bits @@ -86,10 +86,13 @@ * conversion here. To leave room for further xscom address * expansion, we only clear out the top byte * + * For in-kernel use, we also support the real indirect bit, so + * we test for any of the top 5 bits + * */ - if (reg & (1ull << 59)) - reg = (reg & ~(0xffull << 56)) | (1ull << 63); - return reg; + if (addr & (0x1full << 59)) + addr = (addr & ~(0xffull << 56)) | (1ull << 63); + return addr; } static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) @@ -98,8 +101,8 @@ int64_t rc; __be64 v; - reg = opal_scom_unmangle(reg); - rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); + reg = opal_scom_unmangle(m->addr + reg); + rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v)); *value = be64_to_cpu(v); return opal_xscom_err_xlate(rc); } @@ -109,8 +112,8 @@ struct opal_scom_map *m = map; int64_t rc; - reg = opal_scom_unmangle(reg); - rc = opal_xscom_write(m->chip, m->addr + reg, value); + reg = opal_scom_unmangle(m->addr + reg); + rc = opal_xscom_write(m->chip, reg, value); return opal_xscom_err_xlate(rc); } --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/opal.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/opal.c @@ -18,9 +18,12 @@ #include #include #include +#include #include +#include #include #include +#include #include "powernv.h" @@ -30,40 +33,70 @@ struct opal { u64 base; u64 entry; + u64 size; } opal; -static struct device_node *opal_node; +struct mcheck_recoverable_range { + u64 start_addr; + u64 end_addr; + u64 recover_addr; +}; + +static struct mcheck_recoverable_range *mc_recoverable_range; +static int mc_recoverable_range_len; + +struct device_node *opal_node; static DEFINE_SPINLOCK(opal_write_lock); extern u64 opal_mc_secondary_handler[]; static unsigned int *opal_irqs; static unsigned int opal_irq_count; static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); +static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX]; static DEFINE_SPINLOCK(opal_notifier_lock); static uint64_t last_notified_mask = 0x0ul; static atomic_t opal_notifier_hold = ATOMIC_INIT(0); +static void opal_reinit_cores(void) +{ + /* Do the actual re-init, This will clobber all FPRs, VRs, etc... + * + * It will preserve non volatile GPRs and HSPRG0/1. It will + * also restore HIDs and other SPRs to their original value + * but it might clobber a bunch. + */ +#ifdef __BIG_ENDIAN__ + opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE); +#else + opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_LE); +#endif +} + int __init early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data) { - const void *basep, *entryp; - unsigned long basesz, entrysz; + const void *basep, *entryp, *sizep; + unsigned long basesz, entrysz, runtimesz; if (depth != 1 || strcmp(uname, "ibm,opal") != 0) return 0; basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz); entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz); + sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz); - if (!basep || !entryp) + if (!basep || !entryp || !sizep) return 1; opal.base = of_read_number(basep, basesz/4); opal.entry = of_read_number(entryp, entrysz/4); + opal.size = of_read_number(sizep, runtimesz/4); pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n", opal.base, basep, basesz); pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n", opal.entry, entryp, entrysz); + pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%ld)\n", + opal.size, sizep, runtimesz); powerpc_firmware_features |= FW_FEATURE_OPAL; if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { @@ -77,6 +110,72 @@ printk("OPAL V1 detected !\n"); } + /* Reinit all cores with the right endian */ + opal_reinit_cores(); + + /* Restore some bits */ + if (cur_cpu_spec->cpu_restore) + cur_cpu_spec->cpu_restore(); + + return 1; +} + +int __init early_init_dt_scan_recoverable_ranges(unsigned long node, + const char *uname, int depth, void *data) +{ + unsigned long i, psize, size; + const __be32 *prop; + + if (depth != 1 || strcmp(uname, "ibm,opal") != 0) + return 0; + + prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize); + + if (!prop) + return 1; + + pr_debug("Found machine check recoverable ranges.\n"); + + /* + * Calculate number of available entries. + * + * Each recoverable address range entry is (start address, len, + * recovery address), 2 cells each for start and recovery address, + * 1 cell for len, totalling 5 cells per entry. + */ + mc_recoverable_range_len = psize / (sizeof(*prop) * 5); + + /* Sanity check */ + if (!mc_recoverable_range_len) + return 1; + + /* Size required to hold all the entries. */ + size = mc_recoverable_range_len * + sizeof(struct mcheck_recoverable_range); + + /* + * Allocate a buffer to hold the MC recoverable ranges. We would be + * accessing them in real mode, hence it needs to be within + * RMO region. + */ + mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64), + ppc64_rma_size)); + memset(mc_recoverable_range, 0, size); + + for (i = 0; i < mc_recoverable_range_len; i++) { + mc_recoverable_range[i].start_addr = + of_read_number(prop + (i * 5) + 0, 2); + mc_recoverable_range[i].end_addr = + mc_recoverable_range[i].start_addr + + of_read_number(prop + (i * 5) + 2, 1); + mc_recoverable_range[i].recover_addr = + of_read_number(prop + (i * 5) + 3, 2); + + pr_debug("Machine check recoverable range: %llx..%llx: %llx\n", + mc_recoverable_range[i].start_addr, + mc_recoverable_range[i].end_addr, + mc_recoverable_range[i].recover_addr); + } return 1; } @@ -88,14 +187,10 @@ if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) return -ENODEV; - /* Hookup some exception handlers. We use the fwnmi area at 0x7000 - * to provide the glue space to OPAL + /* Hookup some exception handlers except machine check. We use the + * fwnmi area at 0x7000 to provide the glue space to OPAL */ glue = 0x7000; - opal_register_exception_handler(OPAL_MACHINE_CHECK_HANDLER, - __pa(opal_mc_secondary_handler[0]), - glue); - glue += 128; opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 0, glue); glue += 128; @@ -154,14 +249,14 @@ void opal_notifier_enable(void) { int64_t rc; - uint64_t evt = 0; + __be64 evt = 0; atomic_set(&opal_notifier_hold, 0); /* Process pending events */ rc = opal_poll_events(&evt); if (rc == OPAL_SUCCESS && evt) - opal_do_notifier(evt); + opal_do_notifier(be64_to_cpu(evt)); } void opal_notifier_disable(void) @@ -169,6 +264,95 @@ atomic_set(&opal_notifier_hold, 1); } +/* + * Opal message notifier based on message type. Allow subscribers to get + * notified for specific messgae type. + */ +int opal_message_notifier_register(enum OpalMessageType msg_type, + struct notifier_block *nb) +{ + if (!nb) { + pr_warning("%s: Invalid argument (%p)\n", + __func__, nb); + return -EINVAL; + } + if (msg_type > OPAL_MSG_TYPE_MAX) { + pr_warning("%s: Invalid message type argument (%d)\n", + __func__, msg_type); + return -EINVAL; + } + return atomic_notifier_chain_register( + &opal_msg_notifier_head[msg_type], nb); +} + +static void opal_message_do_notify(uint32_t msg_type, void *msg) +{ + /* notify subscribers */ + atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type], + msg_type, msg); +} + +static void opal_handle_message(void) +{ + s64 ret; + /* + * TODO: pre-allocate a message buffer depending on opal-msg-size + * value in /proc/device-tree. + */ + static struct opal_msg msg; + + ret = opal_get_msg(__pa(&msg), sizeof(msg)); + /* No opal message pending. */ + if (ret == OPAL_RESOURCE) + return; + + /* check for errors. */ + if (ret) { + pr_warning("%s: Failed to retrive opal message, err=%lld\n", + __func__, ret); + return; + } + + /* Sanity check */ + if (msg.msg_type > OPAL_MSG_TYPE_MAX) { + pr_warning("%s: Unknown message type: %u\n", + __func__, msg.msg_type); + return; + } + opal_message_do_notify(msg.msg_type, (void *)&msg); +} + +static int opal_message_notify(struct notifier_block *nb, + unsigned long events, void *change) +{ + if (events & OPAL_EVENT_MSG_PENDING) + opal_handle_message(); + return 0; +} + +static struct notifier_block opal_message_nb = { + .notifier_call = opal_message_notify, + .next = NULL, + .priority = 0, +}; + +static int __init opal_message_init(void) +{ + int ret, i; + + for (i = 0; i < OPAL_MSG_TYPE_MAX; i++) + ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]); + + ret = opal_notifier_register(&opal_message_nb); + if (ret) { + pr_err("%s: Can't register OPAL event notifier (%d)\n", + __func__, ret); + return ret; + } + return 0; +} +early_initcall(opal_message_init); + int opal_get_chars(uint32_t vtermno, char *buf, int count) { s64 rc; @@ -254,119 +438,94 @@ return written; } +static int opal_recover_mce(struct pt_regs *regs, + struct machine_check_event *evt) +{ + int recovered = 0; + uint64_t ea = get_mce_fault_addr(evt); + + if (!(regs->msr & MSR_RI)) { + /* If MSR_RI isn't set, we cannot recover */ + recovered = 0; + } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { + /* Platform corrected itself */ + recovered = 1; + } else if (ea && !is_kernel_addr(ea)) { + /* + * Faulting address is not in kernel text. We should be fine. + * We need to find which process uses this address. + * For now, kill the task if we have received exception when + * in userspace. + * + * TODO: Queue up this address for hwpoisioning later. + */ + if (user_mode(regs) && !is_global_init(current)) { + _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); + recovered = 1; + } else + recovered = 0; + } else if (user_mode(regs) && !is_global_init(current) && + evt->severity == MCE_SEV_ERROR_SYNC) { + /* + * If we have received a synchronous error when in userspace + * kill the task. + */ + _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); + recovered = 1; + } + return recovered; +} + int opal_machine_check(struct pt_regs *regs) { - struct opal_machine_check_event *opal_evt = get_paca()->opal_mc_evt; - struct opal_machine_check_event evt; - const char *level, *sevstr, *subtype; - static const char *opal_mc_ue_types[] = { - "Indeterminate", - "Instruction fetch", - "Page table walk ifetch", - "Load/Store", - "Page table walk Load/Store", - }; - static const char *opal_mc_slb_types[] = { - "Indeterminate", - "Parity", - "Multihit", - }; - static const char *opal_mc_erat_types[] = { - "Indeterminate", - "Parity", - "Multihit", - }; - static const char *opal_mc_tlb_types[] = { - "Indeterminate", - "Parity", - "Multihit", - }; - - /* Copy the event structure and release the original */ - evt = *opal_evt; - opal_evt->in_use = 0; + struct machine_check_event evt; + + if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) + return 0; /* Print things out */ - if (evt.version != OpalMCE_V1) { + if (evt.version != MCE_V1) { pr_err("Machine Check Exception, Unknown event version %d !\n", evt.version); return 0; } - switch(evt.severity) { - case OpalMCE_SEV_NO_ERROR: - level = KERN_INFO; - sevstr = "Harmless"; - break; - case OpalMCE_SEV_WARNING: - level = KERN_WARNING; - sevstr = ""; - break; - case OpalMCE_SEV_ERROR_SYNC: - level = KERN_ERR; - sevstr = "Severe"; - break; - case OpalMCE_SEV_FATAL: - default: - level = KERN_ERR; - sevstr = "Fatal"; - break; - } - - printk("%s%s Machine check interrupt [%s]\n", level, sevstr, - evt.disposition == OpalMCE_DISPOSITION_RECOVERED ? - "Recovered" : "[Not recovered"); - printk("%s Initiator: %s\n", level, - evt.initiator == OpalMCE_INITIATOR_CPU ? "CPU" : "Unknown"); - switch(evt.error_type) { - case OpalMCE_ERROR_TYPE_UE: - subtype = evt.u.ue_error.ue_error_type < - ARRAY_SIZE(opal_mc_ue_types) ? - opal_mc_ue_types[evt.u.ue_error.ue_error_type] - : "Unknown"; - printk("%s Error type: UE [%s]\n", level, subtype); - if (evt.u.ue_error.effective_address_provided) - printk("%s Effective address: %016llx\n", - level, evt.u.ue_error.effective_address); - if (evt.u.ue_error.physical_address_provided) - printk("%s Physial address: %016llx\n", - level, evt.u.ue_error.physical_address); - break; - case OpalMCE_ERROR_TYPE_SLB: - subtype = evt.u.slb_error.slb_error_type < - ARRAY_SIZE(opal_mc_slb_types) ? - opal_mc_slb_types[evt.u.slb_error.slb_error_type] - : "Unknown"; - printk("%s Error type: SLB [%s]\n", level, subtype); - if (evt.u.slb_error.effective_address_provided) - printk("%s Effective address: %016llx\n", - level, evt.u.slb_error.effective_address); - break; - case OpalMCE_ERROR_TYPE_ERAT: - subtype = evt.u.erat_error.erat_error_type < - ARRAY_SIZE(opal_mc_erat_types) ? - opal_mc_erat_types[evt.u.erat_error.erat_error_type] - : "Unknown"; - printk("%s Error type: ERAT [%s]\n", level, subtype); - if (evt.u.erat_error.effective_address_provided) - printk("%s Effective address: %016llx\n", - level, evt.u.erat_error.effective_address); - break; - case OpalMCE_ERROR_TYPE_TLB: - subtype = evt.u.tlb_error.tlb_error_type < - ARRAY_SIZE(opal_mc_tlb_types) ? - opal_mc_tlb_types[evt.u.tlb_error.tlb_error_type] - : "Unknown"; - printk("%s Error type: TLB [%s]\n", level, subtype); - if (evt.u.tlb_error.effective_address_provided) - printk("%s Effective address: %016llx\n", - level, evt.u.tlb_error.effective_address); - break; - default: - case OpalMCE_ERROR_TYPE_UNKNOWN: - printk("%s Error type: Unknown\n", level); - break; - } - return evt.severity == OpalMCE_SEV_FATAL ? 0 : 1; + machine_check_print_event_info(&evt); + + if (opal_recover_mce(regs, &evt)) + return 1; + return 0; +} + +static uint64_t find_recovery_address(uint64_t nip) +{ + int i; + + for (i = 0; i < mc_recoverable_range_len; i++) + if ((nip >= mc_recoverable_range[i].start_addr) && + (nip < mc_recoverable_range[i].end_addr)) + return mc_recoverable_range[i].recover_addr; + return 0; +} + +bool opal_mce_check_early_recovery(struct pt_regs *regs) +{ + uint64_t recover_addr = 0; + + if (!opal.base || !opal.size) + goto out; + + if ((regs->nip >= opal.base) && + (regs->nip <= (opal.base + opal.size))) + recover_addr = find_recovery_address(regs->nip); + + /* + * Setup regs->nip to rfi into fixup address. + */ + if (recover_addr) + regs->nip = recover_addr; + +out: + return !!recover_addr; } static irqreturn_t opal_interrupt(int irq, void *data) @@ -375,7 +534,7 @@ opal_handle_interrupt(virq_to_hw(irq), &events); - opal_do_notifier(events); + opal_do_notifier(be64_to_cpu(events)); return IRQ_HANDLED; } @@ -440,8 +599,14 @@ /* Create "opal" kobject under /sys/firmware */ rc = opal_sysfs_init(); if (rc == 0) { + /* Setup error log interface */ + rc = opal_elog_init(); /* Setup code update interface */ opal_flash_init(); + /* Setup platform dump extract interface */ + opal_platform_dump_init(); + /* Setup message log interface. */ + opal_msglog_init(); } return 0; @@ -458,3 +623,66 @@ opal_irqs[i] = 0; } } + +/* Convert a region of vmalloc memory to an opal sg list */ +struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, + unsigned long vmalloc_size) +{ + struct opal_sg_list *sg, *first = NULL; + unsigned long i = 0; + + sg = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!sg) + goto nomem; + + first = sg; + + while (vmalloc_size > 0) { + uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT; + uint64_t length = min(vmalloc_size, PAGE_SIZE); + + sg->entry[i].data = cpu_to_be64(data); + sg->entry[i].length = cpu_to_be64(length); + i++; + + if (i >= SG_ENTRIES_PER_NODE) { + struct opal_sg_list *next; + + next = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!next) + goto nomem; + + sg->length = cpu_to_be64( + i * sizeof(struct opal_sg_entry) + 16); + i = 0; + sg->next = cpu_to_be64(__pa(next)); + sg = next; + } + + vmalloc_addr += length; + vmalloc_size -= length; + } + + sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); + + return first; + +nomem: + pr_err("%s : Failed to allocate memory\n", __func__); + opal_free_sg_list(first); + return NULL; +} + +void opal_free_sg_list(struct opal_sg_list *sg) +{ + while (sg) { + uint64_t next = be64_to_cpu(sg->next); + + kfree(sg); + + if (next) + sg = __va(next); + else + sg = NULL; + } +} --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/pci-ioda.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/pci-ioda.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -460,9 +461,39 @@ return; pe = &phb->ioda.pe_array[pdn->pe_number]; + WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); set_iommu_table_base(&pdev->dev, &pe->tce32_table); } +static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, + struct pci_dev *pdev, u64 dma_mask) +{ + struct pci_dn *pdn = pci_get_pdn(pdev); + struct pnv_ioda_pe *pe; + uint64_t top; + bool bypass = false; + + if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) + return -ENODEV;; + + pe = &phb->ioda.pe_array[pdn->pe_number]; + if (pe->tce_bypass_enabled) { + top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; + bypass = (dma_mask >= top); + } + + if (bypass) { + dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); + set_dma_ops(&pdev->dev, &dma_direct_ops); + set_dma_offset(&pdev->dev, pe->tce_bypass_base); + } else { + dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); + set_dma_ops(&pdev->dev, &dma_iommu_ops); + set_iommu_table_base(&pdev->dev, &pe->tce32_table); + } + return 0; +} + static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; @@ -657,6 +688,56 @@ __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); } +static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) +{ + struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, + tce32_table); + uint16_t window_id = (pe->pe_number << 1 ) + 1; + int64_t rc; + + pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); + if (enable) { + phys_addr_t top = memblock_end_of_DRAM(); + + top = roundup_pow_of_two(top); + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, + pe->pe_number, + window_id, + pe->tce_bypass_base, + top); + } else { + rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, + pe->pe_number, + window_id, + pe->tce_bypass_base, + 0); + + /* + * We might want to reset the DMA ops of all devices on + * this PE. However in theory, that shouldn't be necessary + * as this is used for VFIO/KVM pass-through and the device + * hasn't yet been returned to its kernel driver + */ + } + if (rc) + pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); + else + pe->tce_bypass_enabled = enable; +} + +static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb, + struct pnv_ioda_pe *pe) +{ + /* TVE #1 is selected by PCI address bit 59 */ + pe->tce_bypass_base = 1ull << 59; + + /* Install set_bypass callback for VFIO */ + pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass; + + /* Enable bypass by default */ + pnv_pci_ioda2_set_bypass(&pe->tce32_table, true); +} + static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { @@ -727,6 +808,8 @@ else pnv_ioda_setup_bus_dma(pe, pe->pbus); + /* Also create a bypass window */ + pnv_pci_ioda2_setup_bypass_pe(phb, pe); return; fail: if (pe->tce32_seg >= 0) @@ -818,7 +901,6 @@ unsigned int is_64, struct msi_msg *msg) { struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); - struct pci_dn *pdn = pci_get_pdn(dev); struct irq_data *idata; struct irq_chip *ichip; unsigned int xive_num = hwirq - phb->msi_base; @@ -834,7 +916,7 @@ return -ENXIO; /* Force 32-bit MSI on some broken devices */ - if (pdn && pdn->force_32bit_msi) + if (dev->no_64bit_msi) is_64 = 0; /* Assign XIVE to PE */ @@ -1287,6 +1369,7 @@ /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; + phb->dma_set_mask = pnv_pci_ioda_dma_set_mask; /* Setup shutdown function for kexec */ phb->shutdown = pnv_pci_ioda_shutdown; --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/pci.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/pci.c @@ -50,9 +50,8 @@ { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pci_get_pdn(pdev); - if (pdn && pdn->force_32bit_msi && !phb->msi32_support) + if (pdev->no_64bit_msi && !phb->msi32_support) return -ENODEV; return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV; @@ -124,77 +123,220 @@ } #endif /* CONFIG_PCI_MSI */ -static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb) +static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, + struct OpalIoPhbErrorCommon *common) { - struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc; + struct OpalIoP7IOCPhbErrorData *data; int i; - pr_info("PHB %d diagnostic data:\n", phb->hose->global_number); + data = (struct OpalIoP7IOCPhbErrorData *)common; + pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", + hose->global_number, be32_to_cpu(common->version)); + + if (data->brdgCtl) + pr_info(" brdgCtl: %08x\n", + be32_to_cpu(data->brdgCtl)); + if (data->portStatusReg || data->rootCmplxStatus || + data->busAgentStatus) + pr_info(" UtlSts: %08x %08x %08x\n", + be32_to_cpu(data->portStatusReg), + be32_to_cpu(data->rootCmplxStatus), + be32_to_cpu(data->busAgentStatus)); + if (data->deviceStatus || data->slotStatus || + data->linkStatus || data->devCmdStatus || + data->devSecStatus) + pr_info(" RootSts: %08x %08x %08x %08x %08x\n", + be32_to_cpu(data->deviceStatus), + be32_to_cpu(data->slotStatus), + be32_to_cpu(data->linkStatus), + be32_to_cpu(data->devCmdStatus), + be32_to_cpu(data->devSecStatus)); + if (data->rootErrorStatus || data->uncorrErrorStatus || + data->corrErrorStatus) + pr_info(" RootErrSts: %08x %08x %08x\n", + be32_to_cpu(data->rootErrorStatus), + be32_to_cpu(data->uncorrErrorStatus), + be32_to_cpu(data->corrErrorStatus)); + if (data->tlpHdr1 || data->tlpHdr2 || + data->tlpHdr3 || data->tlpHdr4) + pr_info(" RootErrLog: %08x %08x %08x %08x\n", + be32_to_cpu(data->tlpHdr1), + be32_to_cpu(data->tlpHdr2), + be32_to_cpu(data->tlpHdr3), + be32_to_cpu(data->tlpHdr4)); + if (data->sourceId || data->errorClass || + data->correlator) + pr_info(" RootErrLog1: %08x %016llx %016llx\n", + be32_to_cpu(data->sourceId), + be64_to_cpu(data->errorClass), + be64_to_cpu(data->correlator)); + if (data->p7iocPlssr || data->p7iocCsr) + pr_info(" PhbSts: %016llx %016llx\n", + data->p7iocPlssr, data->p7iocCsr); + if (data->lemFir || data->lemErrorMask || + data->lemWOF) + pr_info(" Lem: %016llx %016llx %016llx\n", + be64_to_cpu(data->lemFir), + be64_to_cpu(data->lemErrorMask), + be64_to_cpu(data->lemWOF)); + if (data->phbErrorStatus || data->phbFirstErrorStatus || + data->phbErrorLog0 || data->phbErrorLog1) + pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", + be64_to_cpu(data->phbErrorStatus), + be64_to_cpu(data->phbFirstErrorStatus), + be64_to_cpu(data->phbErrorLog0), + be64_to_cpu(data->phbErrorLog1)); + if (data->mmioErrorStatus || data->mmioFirstErrorStatus || + data->mmioErrorLog0 || data->mmioErrorLog1) + pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", + be64_to_cpu(data->mmioErrorStatus), + be64_to_cpu(data->mmioFirstErrorStatus), + be64_to_cpu(data->mmioErrorLog0), + be64_to_cpu(data->mmioErrorLog1)); + if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || + data->dma0ErrorLog0 || data->dma0ErrorLog1) + pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", + be64_to_cpu(data->dma0ErrorStatus), + be64_to_cpu(data->dma0FirstErrorStatus), + be64_to_cpu(data->dma0ErrorLog0), + be64_to_cpu(data->dma0ErrorLog1)); + if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || + data->dma1ErrorLog0 || data->dma1ErrorLog1) + pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", + be64_to_cpu(data->dma1ErrorStatus), + be64_to_cpu(data->dma1FirstErrorStatus), + be64_to_cpu(data->dma1ErrorLog0), + be64_to_cpu(data->dma1ErrorLog1)); - pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl); + for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { + if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && + (be64_to_cpu(data->pestB[i]) >> 63) == 0) + continue; - pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg); - pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus); - pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus); - - pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus); - pr_info(" slotStatus = 0x%08x\n", data->slotStatus); - pr_info(" linkStatus = 0x%08x\n", data->linkStatus); - pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus); - pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus); - - pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus); - pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus); - pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus); - pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1); - pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2); - pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3); - pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4); - pr_info(" sourceId = 0x%08x\n", data->sourceId); - - pr_info(" errorClass = 0x%016llx\n", data->errorClass); - pr_info(" correlator = 0x%016llx\n", data->correlator); - - pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr); - pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr); - pr_info(" lemFir = 0x%016llx\n", data->lemFir); - pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask); - pr_info(" lemWOF = 0x%016llx\n", data->lemWOF); - pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus); - pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus); - pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0); - pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1); - pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus); - pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus); - pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0); - pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1); - pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus); - pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus); - pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0); - pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1); - pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus); - pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus); - pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0); - pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1); + pr_info(" PE[%3d] A/B: %016llx %016llx\n", + i, be64_to_cpu(data->pestA[i]), + be64_to_cpu(data->pestB[i])); + } +} - for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { - if ((data->pestA[i] >> 63) == 0 && - (data->pestB[i] >> 63) == 0) +static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, + struct OpalIoPhbErrorCommon *common) +{ + struct OpalIoPhb3ErrorData *data; + int i; + + data = (struct OpalIoPhb3ErrorData*)common; + pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", + hose->global_number, be32_to_cpu(common->version)); + if (data->brdgCtl) + pr_info(" brdgCtl: %08x\n", + be32_to_cpu(data->brdgCtl)); + if (data->portStatusReg || data->rootCmplxStatus || + data->busAgentStatus) + pr_info(" UtlSts: %08x %08x %08x\n", + be32_to_cpu(data->portStatusReg), + be32_to_cpu(data->rootCmplxStatus), + be32_to_cpu(data->busAgentStatus)); + if (data->deviceStatus || data->slotStatus || + data->linkStatus || data->devCmdStatus || + data->devSecStatus) + pr_info(" RootSts: %08x %08x %08x %08x %08x\n", + be32_to_cpu(data->deviceStatus), + be32_to_cpu(data->slotStatus), + be32_to_cpu(data->linkStatus), + be32_to_cpu(data->devCmdStatus), + be32_to_cpu(data->devSecStatus)); + if (data->rootErrorStatus || data->uncorrErrorStatus || + data->corrErrorStatus) + pr_info(" RootErrSts: %08x %08x %08x\n", + be32_to_cpu(data->rootErrorStatus), + be32_to_cpu(data->uncorrErrorStatus), + be32_to_cpu(data->corrErrorStatus)); + if (data->tlpHdr1 || data->tlpHdr2 || + data->tlpHdr3 || data->tlpHdr4) + pr_info(" RootErrLog: %08x %08x %08x %08x\n", + be32_to_cpu(data->tlpHdr1), be32_to_cpu(data->tlpHdr2), + be32_to_cpu(data->tlpHdr3), be32_to_cpu(data->tlpHdr4)); + if (data->sourceId || data->errorClass || + data->correlator) + pr_info(" RootErrLog1: %08x %016llx %016llx\n", + be32_to_cpu(data->sourceId), + be64_to_cpu(data->errorClass), + be64_to_cpu(data->correlator)); + if (data->nFir || data->nFirMask || + data->nFirWOF) + pr_info(" nFir: %016llx %016llx %016llx\n", + be64_to_cpu(data->nFir), + be64_to_cpu(data->nFirMask), + be64_to_cpu(data->nFirWOF)); + if (data->phbPlssr || data->phbCsr) + pr_info(" PhbSts: %016llx %016llx\n", + be64_to_cpu(data->phbPlssr), be64_to_cpu(data->phbCsr)); + if (data->lemFir || data->lemErrorMask || + data->lemWOF) + pr_info(" Lem: %016llx %016llx %016llx\n", + be64_to_cpu(data->lemFir), + be64_to_cpu(data->lemErrorMask), + be64_to_cpu(data->lemWOF)); + if (data->phbErrorStatus || data->phbFirstErrorStatus || + data->phbErrorLog0 || data->phbErrorLog1) + pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", + be64_to_cpu(data->phbErrorStatus), + be64_to_cpu(data->phbFirstErrorStatus), + be64_to_cpu(data->phbErrorLog0), + be64_to_cpu(data->phbErrorLog1)); + if (data->mmioErrorStatus || data->mmioFirstErrorStatus || + data->mmioErrorLog0 || data->mmioErrorLog1) + pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", + be64_to_cpu(data->mmioErrorStatus), + be64_to_cpu(data->mmioFirstErrorStatus), + be64_to_cpu(data->mmioErrorLog0), + be64_to_cpu(data->mmioErrorLog1)); + if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || + data->dma0ErrorLog0 || data->dma0ErrorLog1) + pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", + be64_to_cpu(data->dma0ErrorStatus), + be64_to_cpu(data->dma0FirstErrorStatus), + be64_to_cpu(data->dma0ErrorLog0), + be64_to_cpu(data->dma0ErrorLog1)); + if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || + data->dma1ErrorLog0 || data->dma1ErrorLog1) + pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", + be64_to_cpu(data->dma1ErrorStatus), + be64_to_cpu(data->dma1FirstErrorStatus), + be64_to_cpu(data->dma1ErrorLog0), + be64_to_cpu(data->dma1ErrorLog1)); + + for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { + if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && + (be64_to_cpu(data->pestB[i]) >> 63) == 0) continue; - pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]); - pr_info(" PESTB = 0x%016llx\n", data->pestB[i]); + + pr_info(" PE[%3d] A/B: %016llx %016llx\n", + i, be64_to_cpu(data->pestA[i]), + be64_to_cpu(data->pestB[i])); } } -static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb) +void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, + unsigned char *log_buff) { - switch(phb->model) { - case PNV_PHB_MODEL_P7IOC: - pnv_pci_dump_p7ioc_diag_data(phb); + struct OpalIoPhbErrorCommon *common; + + if (!hose || !log_buff) + return; + + common = (struct OpalIoPhbErrorCommon *)log_buff; + switch (be32_to_cpu(common->ioType)) { + case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: + pnv_pci_dump_p7ioc_diag_data(hose, common); + break; + case OPAL_PHB_ERROR_DATA_TYPE_PHB3: + pnv_pci_dump_phb3_diag_data(hose, common); break; default: - pr_warning("PCI %d: Can't decode this PHB diag data\n", - phb->hose->global_number); + pr_warn("%s: Unrecognized ioType %d\n", + __func__, be32_to_cpu(common->ioType)); } } @@ -222,7 +364,7 @@ * with the normal errors generated when probing empty slots */ if (has_diag) - pnv_pci_dump_phb_diag_data(phb); + pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob); else pr_warning("PCI %d: No diag data available\n", phb->hose->global_number); @@ -553,6 +695,16 @@ pnv_pci_dma_fallback_setup(hose, pdev); } +int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +{ + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + struct pnv_phb *phb = hose->private_data; + + if (phb && phb->dma_set_mask) + return phb->dma_set_mask(phb, pdev, dma_mask); + return __dma_set_mask(&pdev->dev, dma_mask); +} + void pnv_pci_shutdown(void) { struct pci_controller *hose; --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/pci.h +++ linux-3.13.0/arch/powerpc/platforms/powernv/pci.h @@ -54,7 +54,9 @@ struct iommu_table tce32_table; phys_addr_t tce_inval_reg_phys; - /* XXX TODO: Add support for additional 64-bit iommus */ + /* 64-bit TCE bypass region */ + bool tce_bypass_enabled; + uint64_t tce_bypass_base; /* MSIs. MVE index is identical for for 32 and 64 bit MSI * and -1 if not supported. (It's actually identical to the @@ -113,6 +115,8 @@ unsigned int hwirq, unsigned int virq, unsigned int is_64, struct msi_msg *msg); void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); + int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev, + u64 dma_mask); void (*fixup_phb)(struct pci_controller *hose); u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); void (*shutdown)(struct pnv_phb *phb); @@ -177,6 +181,7 @@ unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; struct OpalIoP7IOCPhbErrorData p7ioc; struct OpalIoP7IOCErrorData hub_diag; + struct OpalIoPhb3ErrorData phb3; } diag; }; @@ -186,6 +191,8 @@ extern struct pnv_eeh_ops ioda_eeh_ops; #endif +void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, + unsigned char *log_buff); int pnv_pci_cfg_read(struct device_node *dn, int where, int size, u32 *val); int pnv_pci_cfg_write(struct device_node *dn, --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/powernv.h +++ linux-3.13.0/arch/powerpc/platforms/powernv/powernv.h @@ -7,12 +7,20 @@ static inline void pnv_smp_init(void) { } #endif +struct pci_dev; + #ifdef CONFIG_PCI extern void pnv_pci_init(void); extern void pnv_pci_shutdown(void); +extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask); #else static inline void pnv_pci_init(void) { } static inline void pnv_pci_shutdown(void) { } + +static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +{ + return -ENODEV; +} #endif extern void pnv_lpc_init(void); --- linux-3.13.0.orig/arch/powerpc/platforms/powernv/setup.c +++ linux-3.13.0/arch/powerpc/platforms/powernv/setup.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -97,11 +98,32 @@ of_node_put(root); } +static void pnv_prepare_going_down(void) +{ + /* + * Disable all notifiers from OPAL, we can't + * service interrupts anymore anyway + */ + opal_notifier_disable(); + + /* Soft disable interrupts */ + local_irq_disable(); + + /* + * Return secondary CPUs to firwmare if a flash update + * is pending otherwise we will get all sort of error + * messages about CPU being stuck etc.. This will also + * have the side effect of hard disabling interrupts so + * past this point, the kernel is effectively dead. + */ + opal_flash_term_callback(); +} + static void __noreturn pnv_restart(char *cmd) { long rc = OPAL_BUSY; - opal_notifier_disable(); + pnv_prepare_going_down(); while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_cec_reboot(); @@ -118,7 +140,7 @@ { long rc = OPAL_BUSY; - opal_notifier_disable(); + pnv_prepare_going_down(); while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_cec_power_down(0); @@ -140,6 +162,13 @@ { } +static int pnv_dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (dev_is_pci(dev)) + return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask); + return __dma_set_mask(dev, dma_mask); +} + static void pnv_shutdown(void) { /* Let the PCI code clear up IODA tables */ @@ -152,18 +181,62 @@ } #ifdef CONFIG_KEXEC +static void pnv_kexec_wait_secondaries_down(void) +{ + int my_cpu, i, notified = -1; + + my_cpu = get_cpu(); + + for_each_online_cpu(i) { + uint8_t status; + int64_t rc; + + if (i == my_cpu) + continue; + + for (;;) { + rc = opal_query_cpu_status(get_hard_smp_processor_id(i), + &status); + if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED) + break; + barrier(); + if (i != notified) { + printk(KERN_INFO "kexec: waiting for cpu %d " + "(physical %d) to enter OPAL\n", + i, paca[i].hw_cpu_id); + notified = i; + } + } + } +} + static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) { xics_kexec_teardown_cpu(secondary); - /* Return secondary CPUs to firmware on OPAL v3 */ - if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) { + /* On OPAL v3, we return all CPUs to firmware */ + + if (!firmware_has_feature(FW_FEATURE_OPALv3)) + return; + + if (secondary) { + /* Return secondary CPUs to firmware on OPAL v3 */ mb(); get_paca()->kexec_state = KEXEC_STATE_REAL_MODE; mb(); /* Return the CPU to OPAL */ opal_return_cpu(); + } else if (crash_shutdown) { + /* + * On crash, we don't wait for secondaries to go + * down as they might be unreachable or hung, so + * instead we just wait a bit and move on. + */ + mdelay(1); + } else { + /* Primary waits for the secondaries to have reached OPAL */ + pnv_kexec_wait_secondaries_down(); } } #endif /* CONFIG_KEXEC */ @@ -177,6 +250,7 @@ ppc_md.power_off = pnv_power_off; ppc_md.halt = pnv_halt; ppc_md.machine_check_exception = opal_machine_check; + ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery; } #ifdef CONFIG_PPC_POWERNV_RTAS @@ -225,6 +299,7 @@ .machine_shutdown = pnv_shutdown, .power_save = power7_idle, .calibrate_decr = generic_calibrate_decr, + .dma_set_mask = pnv_dma_set_mask, #ifdef CONFIG_KEXEC .kexec_cpu_down = pnv_kexec_cpu_down, #endif --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/eeh_pseries.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -464,6 +464,7 @@ } else { result = EEH_STATE_NOT_SUPPORT; } + break; default: result = EEH_STATE_NOT_SUPPORT; } --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -35,12 +35,7 @@ #include "offline_states.h" /* This version can't take the spinlock, because it never returns */ -static struct rtas_args rtas_stop_self_args = { - .token = RTAS_UNKNOWN_SERVICE, - .nargs = 0, - .nret = 1, - .rets = &rtas_stop_self_args.args[0], -}; +static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = CPU_STATE_OFFLINE; @@ -93,15 +88,20 @@ static void rtas_stop_self(void) { - struct rtas_args *args = &rtas_stop_self_args; + struct rtas_args args = { + .token = cpu_to_be32(rtas_stop_self_token), + .nargs = 0, + .nret = 1, + .rets = &args.args[0], + }; local_irq_disable(); - BUG_ON(args->token == RTAS_UNKNOWN_SERVICE); + BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); printk("cpu %u (hwid %u) Ready to die...\n", smp_processor_id(), hard_smp_processor_id()); - enter_rtas(__pa(args)); + enter_rtas(__pa(&args)); panic("Alas, I survived.\n"); } @@ -392,10 +392,10 @@ } } - rtas_stop_self_args.token = rtas_token("stop-self"); + rtas_stop_self_token = rtas_token("stop-self"); qcss_tok = rtas_token("query-cpu-stopped-state"); - if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE || + if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || qcss_tok == RTAS_UNKNOWN_SERVICE) { printk(KERN_INFO "CPU Hotplug not supported by firmware " "- disabling.\n"); --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/hotplug-memory.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -164,7 +164,7 @@ } static inline int pseries_remove_memory(struct device_node *np) { - return -EOPNOTSUPP; + return 0; } #endif /* CONFIG_MEMORY_HOTREMOVE */ --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/io_event_irq.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/io_event_irq.c @@ -82,9 +82,9 @@ * RTAS_TYPE_IO only exists in extended event log version 6 or later. * No need to check event log version. */ - if (unlikely(elog->type != RTAS_TYPE_IO)) { - printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d", - elog->type); + if (unlikely(rtas_error_type(elog) != RTAS_TYPE_IO)) { + printk_once(KERN_WARNING"io_event_irq: Unexpected event type %d", + rtas_error_type(elog)); return NULL; } --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/lpar.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/lpar.c @@ -429,16 +429,17 @@ spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); } -static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm, - unsigned char *hpte_slot_array, - unsigned long addr, int psize) +static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, + unsigned long addr, + unsigned char *hpte_slot_array, + int psize, int ssize) { - int ssize = 0, i, index = 0; + int i, index = 0; unsigned long s_addr = addr; unsigned int max_hpte_count, valid; unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; - unsigned long shift, hidx, vpn = 0, vsid, hash, slot; + unsigned long shift, hidx, vpn = 0, hash, slot; shift = mmu_psize_defs[psize].shift; max_hpte_count = 1U << (PMD_SHIFT - shift); @@ -451,15 +452,6 @@ /* get the vpn */ addr = s_addr + (i * (1ul << shift)); - if (!is_kernel_addr(addr)) { - ssize = user_segment_size(addr); - vsid = get_vsid(mm->context.id, addr, ssize); - WARN_ON(vsid == 0); - } else { - vsid = get_kernel_vsid(addr, mmu_kernel_ssize); - ssize = mmu_kernel_ssize; - } - vpn = hpt_vpn(addr, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); if (hidx & _PTEIDX_SECONDARY) --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/mobility.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/mobility.c @@ -24,10 +24,10 @@ static struct kobject *mobility_kobj; struct update_props_workarea { - u32 phandle; - u32 state; - u64 reserved; - u32 nprops; + __be32 phandle; + __be32 state; + __be64 reserved; + __be32 nprops; } __packed; #define NODE_ACTION_MASK 0xff000000 @@ -53,11 +53,11 @@ return rc; } -static int delete_dt_node(u32 phandle) +static int delete_dt_node(__be32 phandle) { struct device_node *dn; - dn = of_find_node_by_phandle(phandle); + dn = of_find_node_by_phandle(be32_to_cpu(phandle)); if (!dn) return -ENOENT; @@ -126,7 +126,7 @@ return 0; } -static int update_dt_node(u32 phandle, s32 scope) +static int update_dt_node(__be32 phandle, s32 scope) { struct update_props_workarea *upwa; struct device_node *dn; @@ -135,6 +135,7 @@ char *prop_data; char *rtas_buf; int update_properties_token; + u32 nprops; u32 vd; update_properties_token = rtas_token("ibm,update-properties"); @@ -145,7 +146,7 @@ if (!rtas_buf) return -ENOMEM; - dn = of_find_node_by_phandle(phandle); + dn = of_find_node_by_phandle(be32_to_cpu(phandle)); if (!dn) { kfree(rtas_buf); return -ENOENT; @@ -161,6 +162,7 @@ break; prop_data = rtas_buf + sizeof(*upwa); + nprops = be32_to_cpu(upwa->nprops); /* On the first call to ibm,update-properties for a node the * the first property value descriptor contains an empty @@ -169,17 +171,17 @@ */ if (*prop_data == 0) { prop_data++; - vd = *(u32 *)prop_data; + vd = be32_to_cpu(*(__be32 *)prop_data); prop_data += vd + sizeof(vd); - upwa->nprops--; + nprops--; } - for (i = 0; i < upwa->nprops; i++) { + for (i = 0; i < nprops; i++) { char *prop_name; prop_name = prop_data; prop_data += strlen(prop_name) + 1; - vd = *(u32 *)prop_data; + vd = be32_to_cpu(*(__be32 *)prop_data); prop_data += sizeof(vd); switch (vd) { @@ -211,13 +213,13 @@ return 0; } -static int add_dt_node(u32 parent_phandle, u32 drc_index) +static int add_dt_node(__be32 parent_phandle, __be32 drc_index) { struct device_node *dn; struct device_node *parent_dn; int rc; - parent_dn = of_find_node_by_phandle(parent_phandle); + parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle)); if (!parent_dn) return -ENOENT; @@ -236,7 +238,7 @@ int pseries_devicetree_update(s32 scope) { char *rtas_buf; - u32 *data; + __be32 *data; int update_nodes_token; int rc; @@ -253,17 +255,17 @@ if (rc && rc != 1) break; - data = (u32 *)rtas_buf + 4; - while (*data & NODE_ACTION_MASK) { + data = (__be32 *)rtas_buf + 4; + while (be32_to_cpu(*data) & NODE_ACTION_MASK) { int i; - u32 action = *data & NODE_ACTION_MASK; - int node_count = *data & NODE_COUNT_MASK; + u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK; + u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK; data++; for (i = 0; i < node_count; i++) { - u32 phandle = *data++; - u32 drc_index; + __be32 phandle = *data++; + __be32 drc_index; switch (action) { case DELETE_DT_NODE: --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/msi.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/msi.c @@ -428,7 +428,7 @@ */ again: if (type == PCI_CAP_ID_MSI) { - if (pdn->force_32bit_msi) { + if (pdev->no_64bit_msi) { rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); if (rc < 0) { /* --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/pci_dlpar.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/pci_dlpar.c @@ -118,10 +118,10 @@ } } - /* Unregister the bridge device from sysfs and remove the PCI bus */ - device_unregister(b->bridge); + /* Remove the PCI bus and unregister the bridge device from sysfs */ phb->bus = NULL; pci_remove_bus(b); + device_unregister(b->bridge); /* Now release the IO resource */ if (res->flags & IORESOURCE_IO) --- linux-3.13.0.orig/arch/powerpc/platforms/pseries/ras.c +++ linux-3.13.0/arch/powerpc/platforms/pseries/ras.c @@ -236,7 +236,8 @@ rtas_elog = (struct rtas_error_log *)ras_log_buf; - if ((status == 0) && (rtas_elog->severity >= RTAS_SEVERITY_ERROR_SYNC)) + if (status == 0 && + rtas_error_severity(rtas_elog) >= RTAS_SEVERITY_ERROR_SYNC) fatal = 1; else fatal = 0; @@ -300,13 +301,14 @@ /* If it isn't an extended log we can use the per cpu 64bit buffer */ h = (struct rtas_error_log *)&savep[1]; - if (!h->extended) { + if (!rtas_error_extended(h)) { memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); } else { - int len; + int len, error_log_length; - len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX); + error_log_length = 8 + rtas_error_extended_log_length(h); + len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX); memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX); memcpy(global_mce_data_buf, h, len); errhdr = (struct rtas_error_log *)global_mce_data_buf; @@ -350,23 +352,24 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err) { int recovered = 0; + int disposition = rtas_error_disposition(err); if (!(regs->msr & MSR_RI)) { /* If MSR_RI isn't set, we cannot recover */ recovered = 0; - } else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) { + } else if (disposition == RTAS_DISP_FULLY_RECOVERED) { /* Platform corrected itself */ recovered = 1; - } else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) { + } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) { /* Platform corrected itself but could be degraded */ printk(KERN_ERR "MCE: limited recovery, system may " "be degraded\n"); recovered = 1; } else if (user_mode(regs) && !is_global_init(current) && - err->severity == RTAS_SEVERITY_ERROR_SYNC) { + rtas_error_severity(err) == RTAS_SEVERITY_ERROR_SYNC) { /* * If we received a synchronous error when in userspace --- linux-3.13.0.orig/arch/powerpc/sysdev/axonram.c +++ linux-3.13.0/arch/powerpc/sysdev/axonram.c @@ -155,7 +155,7 @@ } *kaddr = (void *)(bank->ph_addr + offset); - *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT; + *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; return 0; } --- linux-3.13.0.orig/arch/powerpc/xmon/xmon.c +++ linux-3.13.0/arch/powerpc/xmon/xmon.c @@ -171,7 +171,11 @@ #define REG "%.8lx" #endif +#ifdef __LITTLE_ENDIAN__ +#define GETWORD(v) (((v)[3] << 24) + ((v)[2] << 16) + ((v)[1] << 8) + (v)[0]) +#else #define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3]) +#endif #define isxdigit(c) (('0' <= (c) && (c) <= '9') \ || ('a' <= (c) && (c) <= 'f') \ @@ -288,10 +292,11 @@ args.token = rtas_token("set-indicator"); if (args.token == RTAS_UNKNOWN_SERVICE) return; - args.nargs = 3; - args.nret = 1; + args.token = cpu_to_be32(args.token); + args.nargs = cpu_to_be32(3); + args.nret = cpu_to_be32(1); args.rets = &args.args[3]; - args.args[0] = SURVEILLANCE_TOKEN; + args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN); args.args[1] = 0; args.args[2] = 0; enter_rtas(__pa(&args)); @@ -2051,6 +2056,10 @@ DUMP(p, stab_addr, "lx"); #endif DUMP(p, emergency_sp, "p"); +#ifdef CONFIG_PPC_BOOK3S_64 + DUMP(p, mc_emergency_sp, "p"); + DUMP(p, in_mce, "x"); +#endif DUMP(p, data_offset, "lx"); DUMP(p, hw_cpu_id, "x"); DUMP(p, cpu_start, "x"); --- linux-3.13.0.orig/arch/s390/Kconfig +++ linux-3.13.0/arch/s390/Kconfig @@ -93,6 +93,7 @@ select ARCH_INLINE_WRITE_UNLOCK_IRQ select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE select ARCH_SAVE_PAGE_KEYS if HIBERNATION + select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT @@ -117,6 +118,7 @@ select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST + select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZ4 --- linux-3.13.0.orig/arch/s390/crypto/aes_s390.c +++ linux-3.13.0/arch/s390/crypto/aes_s390.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "crypt_s390.h" #define AES_KEYLEN_128 1 @@ -32,6 +33,7 @@ #define AES_KEYLEN_256 4 static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); static char keylen_flag; struct s390_aes_ctx { @@ -758,43 +760,67 @@ return aes_set_key(tfm, in_key, key_len); } +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(ctrptr + i, AES_BLOCK_SIZE); + } + return n; +} + static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[AES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; if (!walk->nbytes) return ret; - memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= AES_BLOCK_SIZE) { - /* only use complete blocks, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(AES_BLOCK_SIZE - 1); - for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, - AES_BLOCK_SIZE); - crypto_inc(ctrblk + i, AES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = AES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, sctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > AES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrptr, AES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ @@ -802,14 +828,15 @@ out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, sctx->key, buf, in, - AES_BLOCK_SIZE, ctrblk); + AES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != AES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrbuf, AES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); + return ret; } @@ -949,7 +976,7 @@ module_init(aes_s390_init); module_exit(aes_s390_fini); -MODULE_ALIAS("aes-all"); +MODULE_ALIAS_CRYPTO("aes-all"); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); MODULE_LICENSE("GPL"); --- linux-3.13.0.orig/arch/s390/crypto/des_s390.c +++ linux-3.13.0/arch/s390/crypto/des_s390.c @@ -25,6 +25,7 @@ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); struct s390_des_ctx { u8 iv[DES_BLOCK_SIZE]; @@ -105,29 +106,35 @@ } static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, - u8 *iv, struct blkcipher_walk *walk) + struct blkcipher_walk *walk) { + struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; + struct { + u8 iv[DES_BLOCK_SIZE]; + u8 key[DES3_KEY_SIZE]; + } param; if (!nbytes) goto out; - memcpy(iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.key, ctx->key, DES3_KEY_SIZE); do { /* only use complete blocks */ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; - ret = crypt_s390_kmc(func, iv, out, in, n); + ret = crypt_s390_kmc(func, ¶m, out, in, n); if (ret < 0 || ret != n) return -EIO; nbytes &= DES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } while ((nbytes = walk->nbytes)); - memcpy(walk->iv, iv, DES_BLOCK_SIZE); + memcpy(walk->iv, param.iv, DES_BLOCK_SIZE); out: return ret; @@ -179,22 +186,20 @@ struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk); } static int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk); } static struct crypto_alg cbc_des_alg = { @@ -327,22 +332,20 @@ struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk); } static int cbc_des3_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk); } static struct crypto_alg cbc_des3_alg = { @@ -366,54 +369,80 @@ } }; +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* align to block size, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1); + for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE); + crypto_inc(ctrptr + i, DES_BLOCK_SIZE); + } + return n; +} + static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, - struct s390_des_ctx *ctx, struct blkcipher_walk *walk) + struct s390_des_ctx *ctx, + struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[DES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; + + if (!walk->nbytes) + return ret; - memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= DES_BLOCK_SIZE) { - /* align to block size, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(DES_BLOCK_SIZE - 1); - for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, - DES_BLOCK_SIZE); - crypto_inc(ctrblk + i, DES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = DES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, ctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > DES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE, DES_BLOCK_SIZE); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrptr, DES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } - + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, ctx->key, buf, in, - DES_BLOCK_SIZE, ctrblk); + DES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != DES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrbuf, DES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE); return ret; } @@ -587,8 +616,8 @@ module_init(des_s390_init); module_exit(des_s390_exit); -MODULE_ALIAS("des"); -MODULE_ALIAS("des3_ede"); +MODULE_ALIAS_CRYPTO("des"); +MODULE_ALIAS_CRYPTO("des3_ede"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); --- linux-3.13.0.orig/arch/s390/crypto/ghash_s390.c +++ linux-3.13.0/arch/s390/crypto/ghash_s390.c @@ -160,7 +160,7 @@ module_init(ghash_mod_init); module_exit(ghash_mod_exit); -MODULE_ALIAS("ghash"); +MODULE_ALIAS_CRYPTO("ghash"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation"); --- linux-3.13.0.orig/arch/s390/crypto/sha1_s390.c +++ linux-3.13.0/arch/s390/crypto/sha1_s390.c @@ -103,6 +103,6 @@ module_init(sha1_s390_init); module_exit(sha1_s390_fini); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); --- linux-3.13.0.orig/arch/s390/crypto/sha256_s390.c +++ linux-3.13.0/arch/s390/crypto/sha256_s390.c @@ -143,7 +143,7 @@ module_init(sha256_s390_init); module_exit(sha256_s390_fini); -MODULE_ALIAS("sha256"); -MODULE_ALIAS("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha224"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm"); --- linux-3.13.0.orig/arch/s390/crypto/sha512_s390.c +++ linux-3.13.0/arch/s390/crypto/sha512_s390.c @@ -86,7 +86,7 @@ } }; -MODULE_ALIAS("sha512"); +MODULE_ALIAS_CRYPTO("sha512"); static int sha384_init(struct shash_desc *desc) { @@ -126,7 +126,7 @@ } }; -MODULE_ALIAS("sha384"); +MODULE_ALIAS_CRYPTO("sha384"); static int __init init(void) { --- linux-3.13.0.orig/arch/s390/include/asm/atomic.h +++ linux-3.13.0/arch/s390/include/asm/atomic.h @@ -15,23 +15,29 @@ #include #include +#include #include #define ATOMIC_INIT(i) { (i) } +#define __ATOMIC_NO_BARRIER "\n" + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #define __ATOMIC_OR "lao" #define __ATOMIC_AND "lan" #define __ATOMIC_ADD "laa" +#define __ATOMIC_BARRIER "bcr 14,0\n" -#define __ATOMIC_LOOP(ptr, op_val, op_string) \ +#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ ({ \ int old_val; \ \ typecheck(atomic_t *, ptr); \ asm volatile( \ + __barrier \ op_string " %0,%2,%1\n" \ + __barrier \ : "=d" (old_val), "+Q" ((ptr)->counter) \ : "d" (op_val) \ : "cc", "memory"); \ @@ -43,8 +49,9 @@ #define __ATOMIC_OR "or" #define __ATOMIC_AND "nr" #define __ATOMIC_ADD "ar" +#define __ATOMIC_BARRIER "\n" -#define __ATOMIC_LOOP(ptr, op_val, op_string) \ +#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ ({ \ int old_val, new_val; \ \ @@ -82,7 +89,7 @@ static inline int atomic_add_return(int i, atomic_t *v) { - return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i; + return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; } static inline void atomic_add(int i, atomic_t *v) @@ -94,12 +101,10 @@ : "+Q" (v->counter) : "i" (i) : "cc", "memory"); - } else { - atomic_add_return(i, v); + return; } -#else - atomic_add_return(i, v); #endif + __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER); } #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) @@ -115,12 +120,12 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { - __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND); + __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { - __ATOMIC_LOOP(v, mask, __ATOMIC_OR); + __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); } #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) @@ -157,19 +162,24 @@ #ifdef CONFIG_64BIT +#define __ATOMIC64_NO_BARRIER "\n" + #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #define __ATOMIC64_OR "laog" #define __ATOMIC64_AND "lang" #define __ATOMIC64_ADD "laag" +#define __ATOMIC64_BARRIER "bcr 14,0\n" -#define __ATOMIC64_LOOP(ptr, op_val, op_string) \ +#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ ({ \ long long old_val; \ \ typecheck(atomic64_t *, ptr); \ asm volatile( \ + __barrier \ op_string " %0,%2,%1\n" \ + __barrier \ : "=d" (old_val), "+Q" ((ptr)->counter) \ : "d" (op_val) \ : "cc", "memory"); \ @@ -181,8 +191,9 @@ #define __ATOMIC64_OR "ogr" #define __ATOMIC64_AND "ngr" #define __ATOMIC64_ADD "agr" +#define __ATOMIC64_BARRIER "\n" -#define __ATOMIC64_LOOP(ptr, op_val, op_string) \ +#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ ({ \ long long old_val, new_val; \ \ @@ -220,17 +231,32 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) { - return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i; + return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; +} + +static inline void atomic64_add(long long i, atomic64_t *v) +{ +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { + asm volatile( + "agsi %0,%1\n" + : "+Q" (v->counter) + : "i" (i) + : "cc", "memory"); + return; + } +#endif + __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); } static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) { - __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND); + __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); } static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) { - __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR); + __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); } #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) @@ -334,25 +360,13 @@ } while (atomic64_cmpxchg(v, old, new) != old); } -#endif /* CONFIG_64BIT */ - static inline void atomic64_add(long long i, atomic64_t *v) { -#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES - if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { - asm volatile( - "agsi %0,%1\n" - : "+Q" (v->counter) - : "i" (i) - : "cc", "memory"); - } else { - atomic64_add_return(i, v); - } -#else atomic64_add_return(i, v); -#endif } +#endif /* CONFIG_64BIT */ + static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) { long long c, old; --- linux-3.13.0.orig/arch/s390/include/asm/bitops.h +++ linux-3.13.0/arch/s390/include/asm/bitops.h @@ -47,14 +47,18 @@ #include #include +#include + +#define __BITOPS_NO_BARRIER "\n" #ifndef CONFIG_64BIT #define __BITOPS_OR "or" #define __BITOPS_AND "nr" #define __BITOPS_XOR "xr" +#define __BITOPS_BARRIER "\n" -#define __BITOPS_LOOP(__addr, __val, __op_string) \ +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ ({ \ unsigned long __old, __new; \ \ @@ -67,7 +71,7 @@ " jl 0b" \ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ : "d" (__val) \ - : "cc"); \ + : "cc", "memory"); \ __old; \ }) @@ -78,17 +82,20 @@ #define __BITOPS_OR "laog" #define __BITOPS_AND "lang" #define __BITOPS_XOR "laxg" +#define __BITOPS_BARRIER "bcr 14,0\n" -#define __BITOPS_LOOP(__addr, __val, __op_string) \ +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ ({ \ unsigned long __old; \ \ typecheck(unsigned long *, (__addr)); \ asm volatile( \ + __barrier \ __op_string " %0,%2,%1\n" \ + __barrier \ : "=d" (__old), "+Q" (*(__addr)) \ : "d" (__val) \ - : "cc"); \ + : "cc", "memory"); \ __old; \ }) @@ -97,8 +104,9 @@ #define __BITOPS_OR "ogr" #define __BITOPS_AND "ngr" #define __BITOPS_XOR "xgr" +#define __BITOPS_BARRIER "\n" -#define __BITOPS_LOOP(__addr, __val, __op_string) \ +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ ({ \ unsigned long __old, __new; \ \ @@ -111,7 +119,7 @@ " jl 0b" \ : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ : "d" (__val) \ - : "cc"); \ + : "cc", "memory"); \ __old; \ }) @@ -149,12 +157,12 @@ "oi %0,%b1\n" : "+Q" (*caddr) : "i" (1 << (nr & 7)) - : "cc"); + : "cc", "memory"); return; } #endif mask = 1UL << (nr & (BITS_PER_LONG - 1)); - __BITOPS_LOOP(addr, mask, __BITOPS_OR); + __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER); } static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) @@ -170,12 +178,12 @@ "ni %0,%b1\n" : "+Q" (*caddr) : "i" (~(1 << (nr & 7))) - : "cc"); + : "cc", "memory"); return; } #endif mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); - __BITOPS_LOOP(addr, mask, __BITOPS_AND); + __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER); } static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) @@ -191,12 +199,12 @@ "xi %0,%b1\n" : "+Q" (*caddr) : "i" (1 << (nr & 7)) - : "cc"); + : "cc", "memory"); return; } #endif mask = 1UL << (nr & (BITS_PER_LONG - 1)); - __BITOPS_LOOP(addr, mask, __BITOPS_XOR); + __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER); } static inline int @@ -206,8 +214,7 @@ unsigned long old, mask; mask = 1UL << (nr & (BITS_PER_LONG - 1)); - old = __BITOPS_LOOP(addr, mask, __BITOPS_OR); - barrier(); + old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER); return (old & mask) != 0; } @@ -218,8 +225,7 @@ unsigned long old, mask; mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); - old = __BITOPS_LOOP(addr, mask, __BITOPS_AND); - barrier(); + old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER); return (old & ~mask) != 0; } @@ -230,8 +236,7 @@ unsigned long old, mask; mask = 1UL << (nr & (BITS_PER_LONG - 1)); - old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR); - barrier(); + old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER); return (old & mask) != 0; } --- linux-3.13.0.orig/arch/s390/include/asm/ccwdev.h +++ linux-3.13.0/arch/s390/include/asm/ccwdev.h @@ -219,7 +219,7 @@ #define to_ccwdev(n) container_of(n, struct ccw_device, dev) #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) -extern struct ccw_device *ccw_device_probe_console(void); +extern struct ccw_device *ccw_device_probe_console(struct ccw_driver *); extern void ccw_device_wait_idle(struct ccw_device *); extern int ccw_device_force_console(struct ccw_device *); --- linux-3.13.0.orig/arch/s390/include/asm/compat.h +++ linux-3.13.0/arch/s390/include/asm/compat.h @@ -38,7 +38,8 @@ #define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ - PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME) + PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ + PSW32_ASC_PRIMARY) #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "s390\0\0\0\0" --- linux-3.13.0.orig/arch/s390/include/asm/lowcore.h +++ linux-3.13.0/arch/s390/include/asm/lowcore.h @@ -142,9 +142,9 @@ __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */ /* Interrupt response block */ - __u8 irb[64]; /* 0x0300 */ + __u8 irb[96]; /* 0x0300 */ - __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ + __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */ /* * 0xe00 contains the address of the IPL Parameter Information @@ -288,12 +288,13 @@ __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */ /* Interrupt response block. */ - __u8 irb[64]; /* 0x0400 */ + __u8 irb[96]; /* 0x0400 */ + __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */ /* Per cpu primary space access list */ - __u32 paste[16]; /* 0x0440 */ + __u32 paste[16]; /* 0x0480 */ - __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */ + __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */ /* * 0xe00 contains the address of the IPL Parameter Information --- linux-3.13.0.orig/arch/s390/include/uapi/asm/statfs.h +++ linux-3.13.0/arch/s390/include/uapi/asm/statfs.h @@ -35,11 +35,11 @@ struct statfs64 { unsigned int f_type; unsigned int f_bsize; - unsigned long f_blocks; - unsigned long f_bfree; - unsigned long f_bavail; - unsigned long f_files; - unsigned long f_ffree; + unsigned long long f_blocks; + unsigned long long f_bfree; + unsigned long long f_bavail; + unsigned long long f_files; + unsigned long long f_ffree; __kernel_fsid_t f_fsid; unsigned int f_namelen; unsigned int f_frsize; --- linux-3.13.0.orig/arch/s390/kernel/compat_linux.c +++ linux-3.13.0/arch/s390/kernel/compat_linux.c @@ -245,7 +245,7 @@ struct group_info *group_info; int retval; - if (!capable(CAP_SETGID)) + if (!may_setgroups()) return -EPERM; if ((unsigned)gidsetsize > NGROUPS_MAX) return -EINVAL; --- linux-3.13.0.orig/arch/s390/kernel/head64.S +++ linux-3.13.0/arch/s390/kernel/head64.S @@ -59,7 +59,7 @@ .quad 0 # cr12: tracing off .quad 0 # cr13: home space segment table .quad 0xc0000000 # cr14: machine check handling off - .quad 0 # cr15: linkage stack operations + .quad .Llinkage_stack # cr15: linkage stack operations .Lpcmsk:.quad 0x0000000180000000 .L4malign:.quad 0xffffffffffc00000 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 @@ -67,12 +67,15 @@ .Lparmaddr: .quad PARMAREA .align 64 -.Lduct: .long 0,0,0,0,.Lduald,0,0,0 +.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0 .long 0,0,0,0,0,0,0,0 +.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0 .align 128 .Lduald:.rept 8 .long 0x80000000,0,0,0 # invalid access-list entries .endr +.Llinkage_stack: + .long 0,0,0x89000000,0,0,0,0x8a000000,0 ENTRY(_ehead) --- linux-3.13.0.orig/arch/s390/kernel/ptrace.c +++ linux-3.13.0/arch/s390/kernel/ptrace.c @@ -326,9 +326,14 @@ unsigned long mask = PSW_MASK_USER; mask |= is_ri_task(child) ? PSW_MASK_RI : 0; - if ((data & ~mask) != PSW_USER_BITS) + if ((data ^ PSW_USER_BITS) & ~mask) + /* Invalid psw mask. */ + return -EINVAL; + if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) + /* Invalid address-space-control bits */ return -EINVAL; if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) + /* Invalid addressing mode bits */ return -EINVAL; } *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; @@ -664,9 +669,12 @@ mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; /* Build a 64 bit psw mask from 31 bit mask. */ - if ((tmp & ~mask) != PSW32_USER_BITS) + if ((tmp ^ PSW32_USER_BITS) & ~mask) /* Invalid psw mask. */ return -EINVAL; + if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) + /* Invalid address-space-control bits */ + return -EINVAL; regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | (regs->psw.mask & PSW_MASK_BA) | (__u64)(tmp & mask) << 32; --- linux-3.13.0.orig/arch/s390/kernel/time.c +++ linux-3.13.0/arch/s390/kernel/time.c @@ -226,7 +226,7 @@ vdso_data->wtom_clock_sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; vdso_data->wtom_clock_nsec = tk->xtime_nsec + - + (tk->wall_to_monotonic.tv_nsec << tk->shift); + + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); nsecps = (u64) NSEC_PER_SEC << tk->shift; while (vdso_data->wtom_clock_nsec >= nsecps) { vdso_data->wtom_clock_nsec -= nsecps; --- linux-3.13.0.orig/arch/s390/kvm/diag.c +++ linux-3.13.0/arch/s390/kvm/diag.c @@ -121,7 +121,7 @@ * - gpr 4 contains the index on the bus (optionally) */ ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, - vcpu->run->s.regs.gprs[2], + vcpu->run->s.regs.gprs[2] & 0xffffffff, 8, &vcpu->run->s.regs.gprs[3], vcpu->run->s.regs.gprs[4]); @@ -137,7 +137,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) { - int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; + int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); --- linux-3.13.0.orig/arch/s390/kvm/interrupt.c +++ linux-3.13.0/arch/s390/kvm/interrupt.c @@ -71,6 +71,7 @@ return 0; if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) return 1; + return 0; case KVM_S390_INT_EMERGENCY: if (psw_extint_disabled(vcpu)) return 0; --- linux-3.13.0.orig/arch/s390/kvm/kvm-s390.c +++ linux-3.13.0/arch/s390/kvm/kvm-s390.c @@ -398,7 +398,7 @@ vcpu->arch.sie_block->ecb2 = 8; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) vfacilities; - hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); + hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, (unsigned long) vcpu); vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; @@ -746,7 +746,8 @@ if (rc == 0) { if (kvm_is_ucontrol(vcpu->kvm)) - rc = -EOPNOTSUPP; + /* Don't exit for host interrupts. */ + rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; else rc = kvm_handle_sie_intercept(vcpu); } @@ -801,18 +802,6 @@ BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); - switch (kvm_run->exit_reason) { - case KVM_EXIT_S390_SIEIC: - case KVM_EXIT_UNKNOWN: - case KVM_EXIT_INTR: - case KVM_EXIT_S390_RESET: - case KVM_EXIT_S390_UCONTROL: - case KVM_EXIT_S390_TSCH: - break; - default: - BUG(); - } - vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { @@ -885,7 +874,7 @@ * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit * KVM_S390_STORE_STATUS_PREFIXED: -> prefix */ -int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) +int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr) { unsigned char archmode = 1; int prefix; @@ -903,15 +892,6 @@ } else prefix = 0; - /* - * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy - * copying in vcpu load/put. Lets update our copies before we save - * it into the save area - */ - save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - save_fp_regs(vcpu->arch.guest_fpregs.fprs); - save_access_regs(vcpu->run->s.regs.acrs); - if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs), vcpu->arch.guest_fpregs.fprs, 128, prefix)) return -EFAULT; @@ -956,6 +936,20 @@ return 0; } +int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) +{ + /* + * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy + * copying in vcpu load/put. Lets update our copies before we save + * it into the save area + */ + save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); + save_fp_regs(vcpu->arch.guest_fpregs.fprs); + save_access_regs(vcpu->run->s.regs.acrs); + + return kvm_s390_store_status_unloaded(vcpu, addr); +} + static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap) { --- linux-3.13.0.orig/arch/s390/kvm/kvm-s390.h +++ linux-3.13.0/arch/s390/kvm/kvm-s390.h @@ -150,8 +150,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ -int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, - unsigned long addr); +int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); +int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); void s390_vcpu_block(struct kvm_vcpu *vcpu); void s390_vcpu_unblock(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu); --- linux-3.13.0.orig/arch/s390/kvm/priv.c +++ linux-3.13.0/arch/s390/kvm/priv.c @@ -688,7 +688,7 @@ break; reg = (reg + 1) % 16; } while (1); - + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } @@ -725,7 +725,7 @@ break; reg = (reg + 1) % 16; } while (1); - + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); return 0; } --- linux-3.13.0.orig/arch/s390/kvm/sigp.c +++ linux-3.13.0/arch/s390/kvm/sigp.c @@ -130,6 +130,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) { struct kvm_s390_interrupt_info *inti; + int rc = SIGP_CC_ORDER_CODE_ACCEPTED; inti = kzalloc(sizeof(*inti), GFP_ATOMIC); if (!inti) @@ -139,6 +140,8 @@ spin_lock_bh(&li->lock); if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { kfree(inti); + if ((action & ACTION_STORE_ON_STOP) != 0) + rc = -ESHUTDOWN; goto out; } list_add_tail(&inti->list, &li->list); @@ -150,7 +153,7 @@ out: spin_unlock_bh(&li->lock); - return SIGP_CC_ORDER_CODE_ACCEPTED; + return rc; } static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) @@ -174,6 +177,16 @@ unlock: spin_unlock(&fi->lock); VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); + + if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) { + /* If the CPU has already been stopped, we still have + * to save the status when doing stop-and-store. This + * has to be done after unlocking all spinlocks. */ + struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); + rc = kvm_s390_store_status_unloaded(dst_vcpu, + KVM_S390_STORE_STATUS_NOADDR); + } + return rc; } --- linux-3.13.0.orig/arch/s390/mm/fault.c +++ linux-3.13.0/arch/s390/mm/fault.c @@ -239,6 +239,12 @@ do_no_context(regs); else pagefault_out_of_memory(); + } else if (fault & VM_FAULT_SIGSEGV) { + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + do_no_context(regs); + else + do_sigsegv(regs, SEGV_MAPERR); } else if (fault & VM_FAULT_SIGBUS) { /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) --- linux-3.13.0.orig/arch/s390/mm/hugetlbpage.c +++ linux-3.13.0/arch/s390/mm/hugetlbpage.c @@ -223,11 +223,6 @@ return 0; } -int pmd_huge_support(void) -{ - return 1; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmdp, int write) { --- linux-3.13.0.orig/arch/s390/mm/page-states.c +++ linux-3.13.0/arch/s390/mm/page-states.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #define ESSA_SET_STABLE 1 #define ESSA_SET_UNUSED 2 @@ -41,6 +43,14 @@ if (!cmma_flag) return; + /* + * Disable CMM for dump, otherwise the tprot based memory + * detection can fail because of unstable pages. + */ + if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) { + cmma_flag = 0; + return; + } asm volatile( " .insn rrf,0xb9ab0000,%1,%1,0,0\n" "0: la %0,0\n" --- linux-3.13.0.orig/arch/s390/mm/pgtable.c +++ linux-3.13.0/arch/s390/mm/pgtable.c @@ -810,11 +810,21 @@ pte_t *ptep; down_read(&mm->mmap_sem); +retry: ptep = get_locked_pte(current->mm, addr, &ptl); if (unlikely(!ptep)) { up_read(&mm->mmap_sem); return -EFAULT; } + if (!(pte_val(*ptep) & _PAGE_INVALID) && + (pte_val(*ptep) & _PAGE_PROTECT)) { + pte_unmap_unlock(*ptep, ptl); + if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { + up_read(&mm->mmap_sem); + return -EFAULT; + } + goto retry; + } new = old = pgste_get_lock(ptep); pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | --- linux-3.13.0.orig/arch/s390/net/bpf_jit_comp.c +++ linux-3.13.0/arch/s390/net/bpf_jit_comp.c @@ -276,7 +276,6 @@ case BPF_S_LD_W_IND: case BPF_S_LD_H_IND: case BPF_S_LD_B_IND: - case BPF_S_LDX_B_MSH: case BPF_S_LD_IMM: case BPF_S_LD_MEM: case BPF_S_MISC_TXA: @@ -812,7 +811,7 @@ return NULL; memset(header, 0, sz); header->pages = sz / PAGE_SIZE; - hole = sz - (bpfsize + sizeof(*header)); + hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header)); /* Insert random number of illegal instructions before BPF code * and make sure the first instruction starts at an even address. */ --- linux-3.13.0.orig/arch/score/mm/fault.c +++ linux-3.13.0/arch/score/mm/fault.c @@ -114,6 +114,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/sh/kernel/dumpstack.c +++ linux-3.13.0/arch/sh/kernel/dumpstack.c @@ -115,7 +115,7 @@ */ static void print_trace_address(void *data, unsigned long addr, int reliable) { - printk(data); + printk("%s", (char *)data); printk_address(addr, reliable); } --- linux-3.13.0.orig/arch/sh/kernel/kgdb.c +++ linux-3.13.0/arch/sh/kernel/kgdb.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include --- linux-3.13.0.orig/arch/sh/mm/fault.c +++ linux-3.13.0/arch/sh/mm/fault.c @@ -353,6 +353,8 @@ } else { if (fault & VM_FAULT_SIGBUS) do_sigbus(regs, error_code, address); + else if (fault & VM_FAULT_SIGSEGV) + bad_area(regs, error_code, address); else BUG(); } --- linux-3.13.0.orig/arch/sh/mm/hugetlbpage.c +++ linux-3.13.0/arch/sh/mm/hugetlbpage.c @@ -83,11 +83,6 @@ return 0; } -int pmd_huge_support(void) -{ - return 0; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { --- linux-3.13.0.orig/arch/sparc/Kconfig +++ linux-3.13.0/arch/sparc/Kconfig @@ -26,7 +26,7 @@ select RTC_DRV_M48T59 select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG - select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL if SPARC64 select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_PCI_IOMAP @@ -66,6 +66,7 @@ select HAVE_SYSCALL_TRACEPOINTS select HAVE_CONTEXT_TRACKING select HAVE_DEBUG_KMEMLEAK + select SPARSE_IRQ select RTC_DRV_CMOS select RTC_DRV_BQ4802 select RTC_DRV_SUN4V @@ -76,6 +77,7 @@ select ARCH_HAVE_NMI_SAFE_CMPXCHG select HAVE_C_RECORDMCOUNT select NO_BOOTMEM + select ARCH_SUPPORTS_ATOMIC_RMW config ARCH_DEFCONFIG string --- linux-3.13.0.orig/arch/sparc/crypto/aes_glue.c +++ linux-3.13.0/arch/sparc/crypto/aes_glue.c @@ -499,6 +499,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); #include "crop_devid.c" --- linux-3.13.0.orig/arch/sparc/crypto/camellia_glue.c +++ linux-3.13.0/arch/sparc/crypto/camellia_glue.c @@ -322,6 +322,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); #include "crop_devid.c" --- linux-3.13.0.orig/arch/sparc/crypto/crc32c_glue.c +++ linux-3.13.0/arch/sparc/crypto/crc32c_glue.c @@ -176,6 +176,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); -MODULE_ALIAS("crc32c"); +MODULE_ALIAS_CRYPTO("crc32c"); #include "crop_devid.c" --- linux-3.13.0.orig/arch/sparc/crypto/des_glue.c +++ linux-3.13.0/arch/sparc/crypto/des_glue.c @@ -532,6 +532,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); -MODULE_ALIAS("des"); +MODULE_ALIAS_CRYPTO("des"); #include "crop_devid.c" --- linux-3.13.0.orig/arch/sparc/crypto/md5_glue.c +++ linux-3.13.0/arch/sparc/crypto/md5_glue.c @@ -185,6 +185,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); -MODULE_ALIAS("md5"); +MODULE_ALIAS_CRYPTO("md5"); #include "crop_devid.c" --- linux-3.13.0.orig/arch/sparc/crypto/sha1_glue.c +++ linux-3.13.0/arch/sparc/crypto/sha1_glue.c @@ -180,6 +180,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); #include "crop_devid.c" --- linux-3.13.0.orig/arch/sparc/crypto/sha256_glue.c +++ linux-3.13.0/arch/sparc/crypto/sha256_glue.c @@ -237,7 +237,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated"); -MODULE_ALIAS("sha224"); -MODULE_ALIAS("sha256"); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); #include "crop_devid.c" --- linux-3.13.0.orig/arch/sparc/crypto/sha512_glue.c +++ linux-3.13.0/arch/sparc/crypto/sha512_glue.c @@ -222,7 +222,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated"); -MODULE_ALIAS("sha384"); -MODULE_ALIAS("sha512"); +MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha512"); #include "crop_devid.c" --- linux-3.13.0.orig/arch/sparc/include/asm/atomic_32.h +++ linux-3.13.0/arch/sparc/include/asm/atomic_32.h @@ -21,7 +21,7 @@ extern int __atomic_add_return(int, atomic_t *); extern int atomic_cmpxchg(atomic_t *, int, int); -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +extern int atomic_xchg(atomic_t *, int); extern int __atomic_add_unless(atomic_t *, int, int); extern void atomic_set(atomic_t *, int); --- linux-3.13.0.orig/arch/sparc/include/asm/cmpxchg_32.h +++ linux-3.13.0/arch/sparc/include/asm/cmpxchg_32.h @@ -11,22 +11,14 @@ #ifndef __ARCH_SPARC_CMPXCHG__ #define __ARCH_SPARC_CMPXCHG__ -static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) -{ - __asm__ __volatile__("swap [%2], %0" - : "=&r" (val) - : "0" (val), "r" (m) - : "memory"); - return val; -} - +extern unsigned long __xchg_u32(volatile u32 *m, u32 new); extern void __xchg_called_with_bad_pointer(void); static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { case 4: - return xchg_u32(ptr, x); + return __xchg_u32(ptr, x); } __xchg_called_with_bad_pointer(); return x; --- linux-3.13.0.orig/arch/sparc/include/asm/hypervisor.h +++ linux-3.13.0/arch/sparc/include/asm/hypervisor.h @@ -2944,6 +2944,16 @@ unsigned long reg_val); #endif +#define HV_FAST_T5_GET_PERFREG 0x1a8 +#define HV_FAST_T5_SET_PERFREG 0x1a9 + +#ifndef __ASSEMBLY__ +unsigned long sun4v_t5_get_perfreg(unsigned long reg_num, + unsigned long *reg_val); +unsigned long sun4v_t5_set_perfreg(unsigned long reg_num, + unsigned long reg_val); +#endif + /* Function numbers for HV_CORE_TRAP. */ #define HV_CORE_SET_VER 0x00 #define HV_CORE_PUTCHAR 0x01 @@ -2975,6 +2985,7 @@ #define HV_GRP_VF_CPU 0x0205 #define HV_GRP_KT_CPU 0x0209 #define HV_GRP_VT_CPU 0x020c +#define HV_GRP_T5_CPU 0x0211 #define HV_GRP_DIAG 0x0300 #ifndef __ASSEMBLY__ --- linux-3.13.0.orig/arch/sparc/include/asm/irq_64.h +++ linux-3.13.0/arch/sparc/include/asm/irq_64.h @@ -37,7 +37,7 @@ * * ino_bucket->irq allocation is made during {sun4v_,}build_irq(). */ -#define NR_IRQS 255 +#define NR_IRQS (2048) extern void irq_install_pre_handler(int irq, void (*func)(unsigned int, void *, void *), @@ -57,11 +57,8 @@ unsigned long iclr_base); extern void sun4u_destroy_msi(unsigned int irq); -extern unsigned char irq_alloc(unsigned int dev_handle, - unsigned int dev_ino); -#ifdef CONFIG_PCI_MSI -extern void irq_free(unsigned int irq); -#endif +unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino); +void irq_free(unsigned int irq); extern void __init init_IRQ(void); extern void fixup_irqs(void); --- linux-3.13.0.orig/arch/sparc/include/asm/ldc.h +++ linux-3.13.0/arch/sparc/include/asm/ldc.h @@ -53,13 +53,14 @@ /* Allocate state for a channel. */ extern struct ldc_channel *ldc_alloc(unsigned long id, const struct ldc_channel_config *cfgp, - void *event_arg); + void *event_arg, + const char *name); /* Shut down and free state for a channel. */ extern void ldc_free(struct ldc_channel *lp); /* Register TX and RX queues of the link with the hypervisor. */ -extern int ldc_bind(struct ldc_channel *lp, const char *name); +extern int ldc_bind(struct ldc_channel *lp); /* For non-RAW protocols we need to complete a handshake before * communication can proceed. ldc_connect() does that, if the --- linux-3.13.0.orig/arch/sparc/include/asm/oplib_64.h +++ linux-3.13.0/arch/sparc/include/asm/oplib_64.h @@ -62,7 +62,8 @@ /* You must call prom_init() before using any of the library services, * preferably as early as possible. Pass it the romvec pointer. */ -extern void prom_init(void *cif_handler, void *cif_stack); +extern void prom_init(void *cif_handler); +extern void prom_init_report(void); /* Boot argument acquisition, returns the boot command line string. */ extern char *prom_getbootargs(void); --- linux-3.13.0.orig/arch/sparc/include/asm/page_64.h +++ linux-3.13.0/arch/sparc/include/asm/page_64.h @@ -57,18 +57,21 @@ typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long iopte; } iopte_t; typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pud; } pud_t; typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pte_val(x) ((x).pte) #define iopte_val(x) ((x).iopte) #define pmd_val(x) ((x).pmd) +#define pud_val(x) ((x).pud) #define pgd_val(x) ((x).pgd) #define pgprot_val(x) ((x).pgprot) #define __pte(x) ((pte_t) { (x) } ) #define __iopte(x) ((iopte_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } ) +#define __pud(x) ((pud_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } ) @@ -77,18 +80,21 @@ typedef unsigned long pte_t; typedef unsigned long iopte_t; typedef unsigned long pmd_t; +typedef unsigned long pud_t; typedef unsigned long pgd_t; typedef unsigned long pgprot_t; #define pte_val(x) (x) #define iopte_val(x) (x) #define pmd_val(x) (x) +#define pud_val(x) (x) #define pgd_val(x) (x) #define pgprot_val(x) (x) #define __pte(x) (x) #define __iopte(x) (x) #define __pmd(x) (x) +#define __pud(x) (x) #define __pgd(x) (x) #define __pgprot(x) (x) @@ -96,21 +102,14 @@ typedef pte_t *pgtable_t; -/* These two values define the virtual address space range in which we - * must forbid 64-bit user processes from making mappings. It used to - * represent precisely the virtual address space hole present in most - * early sparc64 chips including UltraSPARC-I. But now it also is - * further constrained by the limits of our page tables, which is - * 43-bits of virtual address. - */ -#define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL) -#define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL) +extern unsigned long sparc64_va_hole_top; +extern unsigned long sparc64_va_hole_bottom; /* The next two defines specify the actual exclusion region we * enforce, wherein we use a 4GB red zone on each side of the VA hole. */ -#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL)) -#define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL)) +#define VA_EXCLUDE_START (sparc64_va_hole_bottom - (1UL << 32UL)) +#define VA_EXCLUDE_END (sparc64_va_hole_top + (1UL << 32UL)) #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ _AC(0x0000000070000000,UL) : \ @@ -118,20 +117,16 @@ #include -#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X))) extern unsigned long PAGE_OFFSET; #endif /* !(__ASSEMBLY__) */ -/* The maximum number of physical memory address bits we support, this - * is used to size various tables used to manage kernel TLB misses and - * also the sparsemem code. +/* The maximum number of physical memory address bits we support. The + * largest value we can support is whatever "KPGD_SHIFT + KPTE_BITS" + * evaluates to. */ -#define MAX_PHYS_ADDRESS_BITS 47 +#define MAX_PHYS_ADDRESS_BITS 53 -/* These two shift counts are used when indexing sparc64_valid_addr_bitmap - * and kpte_linear_bitmap. - */ #define ILOG2_4MB 22 #define ILOG2_256MB 28 --- linux-3.13.0.orig/arch/sparc/include/asm/pgalloc_64.h +++ linux-3.13.0/arch/sparc/include/asm/pgalloc_64.h @@ -15,6 +15,13 @@ extern struct kmem_cache *pgtable_cache; +static inline void __pgd_populate(pgd_t *pgd, pud_t *pud) +{ + pgd_set(pgd, pud); +} + +#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD) + static inline pgd_t *pgd_alloc(struct mm_struct *mm) { return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); @@ -25,7 +32,23 @@ kmem_cache_free(pgtable_cache, pgd); } -#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) +static inline void __pud_populate(pud_t *pud, pmd_t *pmd) +{ + pud_set(pud, pmd); +} + +#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD) + +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return kmem_cache_alloc(pgtable_cache, + GFP_KERNEL|__GFP_REPEAT); +} + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + kmem_cache_free(pgtable_cache, pud); +} static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { @@ -91,4 +114,7 @@ #define __pmd_free_tlb(tlb, pmd, addr) \ pgtable_free_tlb(tlb, pmd, false) +#define __pud_free_tlb(tlb, pud, addr) \ + pgtable_free_tlb(tlb, pud, false) + #endif /* _SPARC64_PGALLOC_H */ --- linux-3.13.0.orig/arch/sparc/include/asm/pgtable_64.h +++ linux-3.13.0/arch/sparc/include/asm/pgtable_64.h @@ -20,11 +20,10 @@ #include #include -#include - /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). * The page copy blockops can use 0x6000000 to 0x8000000. - * The TSB is mapped in the 0x8000000 to 0xa000000 range. + * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range. + * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range. * The PROM resides in an area spanning 0xf0000000 to 0x100000000. * The vmalloc area spans 0x100000000 to 0x200000000. * Since modules need to be in the lowest 32-bits of the address space, @@ -33,17 +32,15 @@ * 0x400000000. */ #define TLBTEMP_BASE _AC(0x0000000006000000,UL) -#define TSBMAP_BASE _AC(0x0000000008000000,UL) +#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL) +#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL) #define MODULES_VADDR _AC(0x0000000010000000,UL) #define MODULES_LEN _AC(0x00000000e0000000,UL) #define MODULES_END _AC(0x00000000f0000000,UL) #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) #define VMALLOC_START _AC(0x0000000100000000,UL) -#define VMALLOC_END _AC(0x0000010000000000,UL) -#define VMEMMAP_BASE _AC(0x0000010000000000,UL) - -#define vmemmap ((struct page *)VMEMMAP_BASE) +#define VMEMMAP_BASE VMALLOC_END /* PMD_SHIFT determines the size of the area a second-level page * table can map @@ -53,13 +50,25 @@ #define PMD_MASK (~(PMD_SIZE-1)) #define PMD_BITS (PAGE_SHIFT - 3) -/* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) +/* PUD_SHIFT determines the size of the area a third-level page + * table can map + */ +#define PUD_SHIFT (PMD_SHIFT + PMD_BITS) +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) +#define PUD_BITS (PAGE_SHIFT - 3) + +/* PGDIR_SHIFT determines what a fourth-level page table entry can map */ +#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS) #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_BITS (PAGE_SHIFT - 3) -#if (PGDIR_SHIFT + PGDIR_BITS) != 43 +#if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS) +#error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support +#endif + +#if (PGDIR_SHIFT + PGDIR_BITS) != 53 #error Page table parameters do not cover virtual address space properly. #endif @@ -69,19 +78,32 @@ #ifndef __ASSEMBLY__ +extern unsigned long VMALLOC_END; + +#define vmemmap ((struct page *)VMEMMAP_BASE) + #include +bool kern_addr_valid(unsigned long addr); + /* Entries per page directory level. */ #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) #define PTRS_PER_PMD (1UL << PMD_BITS) +#define PTRS_PER_PUD (1UL << PUD_BITS) #define PTRS_PER_PGD (1UL << PGDIR_BITS) /* Kernel has a separate 44bit address space. */ #define FIRST_USER_ADDRESS 0 -#define pte_ERROR(e) __builtin_trap() -#define pmd_ERROR(e) __builtin_trap() -#define pgd_ERROR(e) __builtin_trap() +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ + __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \ + __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ + __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) #endif /* !(__ASSEMBLY__) */ @@ -90,6 +112,7 @@ #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */ +#define _PAGE_PUD_HUGE _PAGE_PMD_HUGE /* Advertise support for _PAGE_SPECIAL */ #define __HAVE_ARCH_PTE_SPECIAL @@ -258,8 +281,8 @@ { unsigned long mask, tmp; - /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) - * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) + /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7) + * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8) * * Even if we use negation tricks the result is still a 6 * instruction sequence, so don't try to play fancy and just @@ -289,10 +312,10 @@ " .previous\n" : "=r" (mask), "=r" (tmp) : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | - _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | + _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | - _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | + _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); @@ -633,29 +656,29 @@ { pte_t pte = __pte(pmd_val(pmd)); - return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); + return pte_val(pte) & _PAGE_PMD_HUGE; } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline unsigned long pmd_young(pmd_t pmd) +static inline unsigned long pmd_pfn(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_young(pte); + return pte_pfn(pte); } -static inline unsigned long pmd_write(pmd_t pmd) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline unsigned long pmd_young(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_write(pte); + return pte_young(pte); } -static inline unsigned long pmd_pfn(pmd_t pmd) +static inline unsigned long pmd_write(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_pfn(pte); + return pte_write(pte); } static inline unsigned long pmd_trans_huge(pmd_t pmd) @@ -719,20 +742,6 @@ return __pmd(pte_val(pte)); } -static inline pmd_t pmd_mknotpresent(pmd_t pmd) -{ - unsigned long mask; - - if (tlb_type == hypervisor) - mask = _PAGE_PRESENT_4V; - else - mask = _PAGE_PRESENT_4U; - - pmd_val(pmd) &= ~mask; - - return pmd; -} - static inline pmd_t pmd_mksplitting(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); @@ -757,6 +766,22 @@ #define pmd_none(pmd) (!pmd_val(pmd)) +/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is + * very simple, it's just the physical address. PTE tables are of + * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and + * the top bits outside of the range of any physical address size we + * support are clear as well. We also validate the physical itself. + */ +#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) + +#define pud_none(pud) (!pud_val(pud)) + +#define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK) + +#define pgd_none(pgd) (!pgd_val(pgd)) + +#define pgd_bad(pgd) (pgd_val(pgd) & ~PAGE_MASK) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); @@ -790,16 +815,34 @@ #define pud_page_vaddr(pud) \ ((unsigned long) __va(pud_val(pud))) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) -#define pmd_bad(pmd) (0) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) -#define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) (0) #define pud_present(pud) (pud_val(pud) != 0U) #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) +#define pgd_page_vaddr(pgd) \ + ((unsigned long) __va(pgd_val(pgd))) +#define pgd_present(pgd) (pgd_val(pgd) != 0U) +#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL) + +static inline unsigned long pud_large(pud_t pud) +{ + pte_t pte = __pte(pud_val(pud)); + + return pte_val(pte) & _PAGE_PMD_HUGE; +} + +static inline unsigned long pud_pfn(pud_t pud) +{ + pte_t pte = __pte(pud_val(pud)); + + return pte_pfn(pte); +} /* Same in both SUN4V and SUN4U. */ #define pte_none(pte) (!pte_val(pte)) +#define pgd_set(pgdp, pudp) \ + (pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp)))) + /* to find an entry in a page-table-directory. */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) @@ -807,6 +850,11 @@ /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) +/* Find an entry in the third-level page table.. */ +#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) +#define pud_offset(pgdp, address) \ + ((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address)) + /* Find an entry in the second-level page table.. */ #define pmd_offset(pudp, address) \ ((pmd_t *) pud_page_vaddr(*(pudp)) + \ @@ -879,7 +927,6 @@ #endif extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; -extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD]; extern void paging_init(void); extern unsigned long find_ecache_flush_span(unsigned long size); @@ -893,6 +940,10 @@ extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd); +#define __HAVE_ARCH_PMDP_INVALIDATE +extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); + #define __HAVE_ARCH_PGTABLE_DEPOSIT extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable); @@ -919,18 +970,6 @@ extern pte_t pgoff_to_pte(unsigned long); #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) -extern unsigned long sparc64_valid_addr_bitmap[]; - -/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ -static inline bool kern_addr_valid(unsigned long addr) -{ - unsigned long paddr = __pa(addr); - - if ((paddr >> 41UL) != 0UL) - return false; - return test_bit(paddr >> 22, sparc64_valid_addr_bitmap); -} - extern int page_in_phys_avail(unsigned long paddr); /* --- linux-3.13.0.orig/arch/sparc/include/asm/setup.h +++ linux-3.13.0/arch/sparc/include/asm/setup.h @@ -24,6 +24,10 @@ } #endif +#ifdef CONFIG_SPARC64 +extern void __init start_early_boot(void); +#endif + extern void sun_do_break(void); extern int stop_a_enabled; extern int scons_pwroff; --- linux-3.13.0.orig/arch/sparc/include/asm/spitfire.h +++ linux-3.13.0/arch/sparc/include/asm/spitfire.h @@ -45,6 +45,8 @@ #define SUN4V_CHIP_NIAGARA3 0x03 #define SUN4V_CHIP_NIAGARA4 0x04 #define SUN4V_CHIP_NIAGARA5 0x05 +#define SUN4V_CHIP_SPARC_M6 0x06 +#define SUN4V_CHIP_SPARC_M7 0x07 #define SUN4V_CHIP_SPARC64X 0x8a #define SUN4V_CHIP_UNKNOWN 0xff --- linux-3.13.0.orig/arch/sparc/include/asm/thread_info_64.h +++ linux-3.13.0/arch/sparc/include/asm/thread_info_64.h @@ -63,7 +63,8 @@ struct pt_regs *kern_una_regs; unsigned int kern_una_insn; - unsigned long fpregs[0] __attribute__ ((aligned(64))); + unsigned long fpregs[(7 * 256) / sizeof(unsigned long)] + __attribute__ ((aligned(64))); }; #endif /* !(__ASSEMBLY__) */ @@ -102,6 +103,7 @@ #define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */ #define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */ #define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */ +#define FAULT_CODE_BAD_RA 0x20 /* Bad RA for sun4v */ #if PAGE_SHIFT == 13 #define THREAD_SIZE (2*PAGE_SIZE) --- linux-3.13.0.orig/arch/sparc/include/asm/tlbflush_64.h +++ linux-3.13.0/arch/sparc/include/asm/tlbflush_64.h @@ -34,6 +34,8 @@ { } +void flush_tlb_kernel_range(unsigned long start, unsigned long end); + #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE extern void flush_tlb_pending(void); @@ -48,11 +50,6 @@ #ifndef CONFIG_SMP -#define flush_tlb_kernel_range(start,end) \ -do { flush_tsb_kernel_range(start,end); \ - __flush_tlb_kernel_range(start,end); \ -} while (0) - static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) { __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); @@ -63,11 +60,6 @@ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); -#define flush_tlb_kernel_range(start, end) \ -do { flush_tsb_kernel_range(start,end); \ - smp_flush_tlb_kernel_range(start, end); \ -} while (0) - #define global_flush_tlb_page(mm, vaddr) \ smp_flush_tlb_page(mm, vaddr) --- linux-3.13.0.orig/arch/sparc/include/asm/tsb.h +++ linux-3.13.0/arch/sparc/include/asm/tsb.h @@ -133,9 +133,24 @@ sub TSB, 0x8, TSB; \ TSB_STORE(TSB, TAG); - /* Do a kernel page table walk. Leaves physical PTE pointer in - * REG1. Jumps to FAIL_LABEL on early page table walk termination. - * VADDR will not be clobbered, but REG2 will. + /* Do a kernel page table walk. Leaves valid PTE value in + * REG1. Jumps to FAIL_LABEL on early page table walk + * termination. VADDR will not be clobbered, but REG2 will. + * + * There are two masks we must apply to propagate bits from + * the virtual address into the PTE physical address field + * when dealing with huge pages. This is because the page + * table boundaries do not match the huge page size(s) the + * hardware supports. + * + * In these cases we propagate the bits that are below the + * page table level where we saw the huge page mapping, but + * are still within the relevant physical bits for the huge + * page size in question. So for PMD mappings (which fall on + * bit 23, for 8MB per PMD) we must propagate bit 22 for a + * 4MB huge page. For huge PUDs (which fall on bit 33, for + * 8GB per PUD), we have to accomodate 256MB and 2GB huge + * pages. So for those we propagate bits 32 to 28. */ #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \ sethi %hi(swapper_pg_dir), REG1; \ @@ -145,15 +160,40 @@ andn REG2, 0x7, REG2; \ ldx [REG1 + REG2], REG1; \ brz,pn REG1, FAIL_LABEL; \ - sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ + sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ + andn REG2, 0x7, REG2; \ + ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + brz,pn REG1, FAIL_LABEL; \ + sethi %uhi(_PAGE_PUD_HUGE), REG2; \ + brz,pn REG1, FAIL_LABEL; \ + sllx REG2, 32, REG2; \ + andcc REG1, REG2, %g0; \ + sethi %hi(0xf8000000), REG2; \ + bne,pt %xcc, 697f; \ + sllx REG2, 1, REG2; \ + sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + sethi %uhi(_PAGE_PMD_HUGE), REG2; \ brz,pn REG1, FAIL_LABEL; \ - sllx VADDR, 64 - PMD_SHIFT, REG2; \ + sllx REG2, 32, REG2; \ + andcc REG1, REG2, %g0; \ + be,pn %xcc, 698f; \ + sethi %hi(0x400000), REG2; \ +697: brgez,pn REG1, FAIL_LABEL; \ + andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + ba,pt %xcc, 699f; \ + or REG1, REG2, REG1; \ +698: sllx VADDR, 64 - PMD_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ - add REG1, REG2, REG1; + ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + brgez,pn REG1, FAIL_LABEL; \ + nop; \ +699: /* PMD has been loaded into REG1, interpret the value, seeing * if it is a HUGE PMD or a normal one. If it is not valid @@ -171,7 +211,8 @@ andcc REG1, REG2, %g0; \ be,pt %xcc, 700f; \ sethi %hi(4 * 1024 * 1024), REG2; \ - andn REG1, REG2, REG1; \ + brgez,pn REG1, FAIL_LABEL; \ + andn REG1, REG2, REG1; \ and VADDR, REG2, REG2; \ brlz,pt REG1, PTE_LABEL; \ or REG1, REG2, REG1; \ @@ -197,6 +238,11 @@ andn REG2, 0x7, REG2; \ ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ brz,pn REG1, FAIL_LABEL; \ + sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ + andn REG2, 0x7, REG2; \ + ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ @@ -245,8 +291,6 @@ (KERNEL_TSB_SIZE_BYTES / 16) #define KERNEL_TSB4M_NENTRIES 4096 -#define KTSB_PHYS_SHIFT 15 - /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries * and the found TTE will be left in REG1. REG3 and REG4 must @@ -255,17 +299,15 @@ * VADDR and TAG will be preserved and not clobbered by this macro. */ #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ -661: sethi %hi(swapper_tsb), REG1; \ - or REG1, %lo(swapper_tsb), REG1; \ +661: sethi %uhi(swapper_tsb), REG1; \ + sethi %hi(swapper_tsb), REG2; \ + or REG1, %ulo(swapper_tsb), REG1; \ + or REG2, %lo(swapper_tsb), REG2; \ .section .swapper_tsb_phys_patch, "ax"; \ .word 661b; \ .previous; \ -661: nop; \ - .section .tsb_ldquad_phys_patch, "ax"; \ - .word 661b; \ - sllx REG1, KTSB_PHYS_SHIFT, REG1; \ - sllx REG1, KTSB_PHYS_SHIFT, REG1; \ - .previous; \ + sllx REG1, 32, REG1; \ + or REG1, REG2, REG1; \ srlx VADDR, PAGE_SHIFT, REG2; \ and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \ sllx REG2, 4, REG2; \ @@ -280,17 +322,15 @@ * we can make use of that for the index computation. */ #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ -661: sethi %hi(swapper_4m_tsb), REG1; \ - or REG1, %lo(swapper_4m_tsb), REG1; \ +661: sethi %uhi(swapper_4m_tsb), REG1; \ + sethi %hi(swapper_4m_tsb), REG2; \ + or REG1, %ulo(swapper_4m_tsb), REG1; \ + or REG2, %lo(swapper_4m_tsb), REG2; \ .section .swapper_4m_tsb_phys_patch, "ax"; \ .word 661b; \ .previous; \ -661: nop; \ - .section .tsb_ldquad_phys_patch, "ax"; \ - .word 661b; \ - sllx REG1, KTSB_PHYS_SHIFT, REG1; \ - sllx REG1, KTSB_PHYS_SHIFT, REG1; \ - .previous; \ + sllx REG1, 32, REG1; \ + or REG1, REG2, REG1; \ and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \ sllx REG2, 4, REG2; \ add REG1, REG2, REG2; \ --- linux-3.13.0.orig/arch/sparc/include/asm/vio.h +++ linux-3.13.0/arch/sparc/include/asm/vio.h @@ -118,12 +118,18 @@ u8 vdisk_type; #define VD_DISK_TYPE_SLICE 0x01 /* Slice in block device */ #define VD_DISK_TYPE_DISK 0x02 /* Entire block device */ - u16 resv1; + u8 vdisk_mtype; /* v1.1 */ +#define VD_MEDIA_TYPE_FIXED 0x01 /* Fixed device */ +#define VD_MEDIA_TYPE_CD 0x02 /* CD Device */ +#define VD_MEDIA_TYPE_DVD 0x03 /* DVD Device */ + u8 resv1; u32 vdisk_block_size; u64 operations; - u64 vdisk_size; + u64 vdisk_size; /* v1.1 */ u64 max_xfer_size; - u64 resv2[2]; + u32 phys_block_size; /* v1.2 */ + u32 resv2; + u64 resv3[1]; }; struct vio_disk_desc { @@ -259,7 +265,7 @@ unsigned int ring_size) { return (dr->pending - - ((dr->prod - dr->cons) & (ring_size - 1))); + ((dr->prod - dr->cons) & (ring_size - 1)) - 1); } #define VIO_MAX_TYPE_LEN 32 --- linux-3.13.0.orig/arch/sparc/include/asm/visasm.h +++ linux-3.13.0/arch/sparc/include/asm/visasm.h @@ -39,6 +39,14 @@ 297: wr %o5, FPRS_FEF, %fprs; \ 298: +#define VISEntryHalfFast(fail_label) \ + rd %fprs, %o5; \ + andcc %o5, FPRS_FEF, %g0; \ + be,pt %icc, 297f; \ + nop; \ + ba,a,pt %xcc, fail_label; \ +297: wr %o5, FPRS_FEF, %fprs; + #define VISExitHalf \ wr %o5, 0, %fprs; --- linux-3.13.0.orig/arch/sparc/include/uapi/asm/swab.h +++ linux-3.13.0/arch/sparc/include/uapi/asm/swab.h @@ -9,9 +9,9 @@ { __u16 ret; - __asm__ __volatile__ ("lduha [%1] %2, %0" + __asm__ __volatile__ ("lduha [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab16p __arch_swab16p @@ -20,9 +20,9 @@ { __u32 ret; - __asm__ __volatile__ ("lduwa [%1] %2, %0" + __asm__ __volatile__ ("lduwa [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab32p __arch_swab32p @@ -31,9 +31,9 @@ { __u64 ret; - __asm__ __volatile__ ("ldxa [%1] %2, %0" + __asm__ __volatile__ ("ldxa [%2] %3, %0" : "=r" (ret) - : "r" (addr), "i" (ASI_PL)); + : "m" (*addr), "r" (addr), "i" (ASI_PL)); return ret; } #define __arch_swab64p __arch_swab64p --- linux-3.13.0.orig/arch/sparc/kernel/cpu.c +++ linux-3.13.0/arch/sparc/kernel/cpu.c @@ -493,6 +493,18 @@ sparc_pmu_type = "niagara5"; break; + case SUN4V_CHIP_SPARC_M6: + sparc_cpu_type = "SPARC-M6"; + sparc_fpu_type = "SPARC-M6 integrated FPU"; + sparc_pmu_type = "sparc-m6"; + break; + + case SUN4V_CHIP_SPARC_M7: + sparc_cpu_type = "SPARC-M7"; + sparc_fpu_type = "SPARC-M7 integrated FPU"; + sparc_pmu_type = "sparc-m7"; + break; + case SUN4V_CHIP_SPARC64X: sparc_cpu_type = "SPARC64-X"; sparc_fpu_type = "SPARC64-X integrated FPU"; --- linux-3.13.0.orig/arch/sparc/kernel/cpumap.c +++ linux-3.13.0/arch/sparc/kernel/cpumap.c @@ -327,6 +327,8 @@ case SUN4V_CHIP_NIAGARA3: case SUN4V_CHIP_NIAGARA4: case SUN4V_CHIP_NIAGARA5: + case SUN4V_CHIP_SPARC_M6: + case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC64X: rover_inc_table = niagara_iterate_method; break; --- linux-3.13.0.orig/arch/sparc/kernel/ds.c +++ linux-3.13.0/arch/sparc/kernel/ds.c @@ -1200,14 +1200,14 @@ ds_cfg.tx_irq = vdev->tx_irq; ds_cfg.rx_irq = vdev->rx_irq; - lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); + lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS"); if (IS_ERR(lp)) { err = PTR_ERR(lp); goto out_free_ds_states; } dp->lp = lp; - err = ldc_bind(lp, "DS"); + err = ldc_bind(lp); if (err) goto out_free_ldc; --- linux-3.13.0.orig/arch/sparc/kernel/dtlb_prot.S +++ linux-3.13.0/arch/sparc/kernel/dtlb_prot.S @@ -24,11 +24,11 @@ mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr /* PROT ** ICACHE line 2: More real fault processing */ + ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup - ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5 - ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4 - nop + ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault + nop nop nop nop --- linux-3.13.0.orig/arch/sparc/kernel/entry.h +++ linux-3.13.0/arch/sparc/kernel/entry.h @@ -66,13 +66,10 @@ extern struct pause_patch_entry __pause_3insn_patch, __pause_3insn_patch_end; -extern void __init per_cpu_patch(void); extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, struct sun4v_1insn_patch_entry *); extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, struct sun4v_2insn_patch_entry *); -extern void __init sun4v_patch(void); -extern void __init boot_cpu_id_too_large(int cpu); extern unsigned int dcache_parity_tl1_occurred; extern unsigned int icache_parity_tl1_occurred; --- linux-3.13.0.orig/arch/sparc/kernel/head_64.S +++ linux-3.13.0/arch/sparc/kernel/head_64.S @@ -282,8 +282,8 @@ stx %l2, [%l4 + 0x0] ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low /* 4MB align */ - srlx %l3, 22, %l3 - sllx %l3, 22, %l3 + srlx %l3, ILOG2_4MB, %l3 + sllx %l3, ILOG2_4MB, %l3 stx %l3, [%l4 + 0x8] /* Leave service as-is, "call-method" */ @@ -427,6 +427,12 @@ cmp %g2, '5' be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA5, %g4 + cmp %g2, '6' + be,pt %xcc, 5f + mov SUN4V_CHIP_SPARC_M6, %g4 + cmp %g2, '7' + be,pt %xcc, 5f + mov SUN4V_CHIP_SPARC_M7, %g4 ba,pt %xcc, 49f nop @@ -585,6 +591,12 @@ cmp %g1, SUN4V_CHIP_NIAGARA5 be,pt %xcc, niagara4_patch nop + cmp %g1, SUN4V_CHIP_SPARC_M6 + be,pt %xcc, niagara4_patch + nop + cmp %g1, SUN4V_CHIP_SPARC_M7 + be,pt %xcc, niagara4_patch + nop call generic_patch_copyops nop @@ -660,14 +672,12 @@ sethi %hi(init_thread_union), %g6 or %g6, %lo(init_thread_union), %g6 ldx [%g6 + TI_TASK], %g4 - mov %sp, %l6 wr %g0, ASI_P, %asi mov 1, %g1 sllx %g1, THREAD_SHIFT, %g1 sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 add %g6, %g1, %sp - mov 0, %fp /* Set per-cpu pointer initially to zero, this makes * the boot-cpu use the in-kernel-image per-cpu areas @@ -694,44 +704,14 @@ nop #endif - mov %l6, %o1 ! OpenPROM stack call prom_init mov %l7, %o0 ! OpenPROM cif handler - /* Initialize current_thread_info()->cpu as early as possible. - * In order to do that accurately we have to patch up the get_cpuid() - * assembler sequences. And that, in turn, requires that we know - * if we are on a Starfire box or not. While we're here, patch up - * the sun4v sequences as well. + /* To create a one-register-window buffer between the kernel's + * initial stack and the last stack frame we use from the firmware, + * do the rest of the boot from a C helper function. */ - call check_if_starfire - nop - call per_cpu_patch - nop - call sun4v_patch - nop - -#ifdef CONFIG_SMP - call hard_smp_processor_id - nop - cmp %o0, NR_CPUS - blu,pt %xcc, 1f - nop - call boot_cpu_id_too_large - nop - /* Not reached... */ - -1: -#else - mov 0, %o0 -#endif - sth %o0, [%g6 + TI_CPU] - - call prom_init_report - nop - - /* Off we go.... */ - call start_kernel + call start_early_boot nop /* Not reached... */ --- linux-3.13.0.orig/arch/sparc/kernel/hvapi.c +++ linux-3.13.0/arch/sparc/kernel/hvapi.c @@ -46,6 +46,7 @@ { .group = HV_GRP_VF_CPU, }, { .group = HV_GRP_KT_CPU, }, { .group = HV_GRP_VT_CPU, }, + { .group = HV_GRP_T5_CPU, }, { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, }; --- linux-3.13.0.orig/arch/sparc/kernel/hvcalls.S +++ linux-3.13.0/arch/sparc/kernel/hvcalls.S @@ -821,3 +821,19 @@ retl nop ENDPROC(sun4v_vt_set_perfreg) + +ENTRY(sun4v_t5_get_perfreg) + mov %o1, %o4 + mov HV_FAST_T5_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_t5_get_perfreg) + +ENTRY(sun4v_t5_set_perfreg) + mov HV_FAST_T5_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_t5_set_perfreg) --- linux-3.13.0.orig/arch/sparc/kernel/hvtramp.S +++ linux-3.13.0/arch/sparc/kernel/hvtramp.S @@ -110,7 +110,6 @@ sllx %g5, THREAD_SHIFT, %g5 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 add %g6, %g5, %sp - mov 0, %fp call init_irqwork_curcpu nop --- linux-3.13.0.orig/arch/sparc/kernel/ioport.c +++ linux-3.13.0/arch/sparc/kernel/ioport.c @@ -278,7 +278,8 @@ } order = get_order(len_total); - if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) + va = __get_free_pages(gfp, order); + if (va == 0) goto err_nopages; if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) @@ -443,7 +444,7 @@ } order = get_order(len_total); - va = (void *) __get_free_pages(GFP_KERNEL, order); + va = (void *) __get_free_pages(gfp, order); if (va == NULL) { printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); goto err_nopages; --- linux-3.13.0.orig/arch/sparc/kernel/irq_64.c +++ linux-3.13.0/arch/sparc/kernel/irq_64.c @@ -47,8 +47,6 @@ #include "cpumap.h" #include "kstack.h" -#define NUM_IVECS (IMAP_INR + 1) - struct ino_bucket *ivector_table; unsigned long ivector_table_pa; @@ -107,55 +105,196 @@ #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) -static struct { - unsigned int dev_handle; - unsigned int dev_ino; - unsigned int in_use; -} irq_table[NR_IRQS]; -static DEFINE_SPINLOCK(irq_alloc_lock); +static unsigned long hvirq_major __initdata; +static int __init early_hvirq_major(char *p) +{ + int rc = kstrtoul(p, 10, &hvirq_major); + + return rc; +} +early_param("hvirq", early_hvirq_major); + +static int hv_irq_version; + +/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie + * based interfaces, but: + * + * 1) Several OSs, Solaris and Linux included, use them even when only + * negotiating version 1.0 (or failing to negotiate at all). So the + * hypervisor has a workaround that provides the VIRQ interfaces even + * when only verion 1.0 of the API is in use. + * + * 2) Second, and more importantly, with major version 2.0 these VIRQ + * interfaces only were actually hooked up for LDC interrupts, even + * though the Hypervisor specification clearly stated: + * + * The new interrupt API functions will be available to a guest + * when it negotiates version 2.0 in the interrupt API group 0x2. When + * a guest negotiates version 2.0, all interrupt sources will only + * support using the cookie interface, and any attempt to use the + * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the + * ENOTSUPPORTED error being returned. + * + * with an emphasis on "all interrupt sources". + * + * To correct this, major version 3.0 was created which does actually + * support VIRQs for all interrupt sources (not just LDC devices). So + * if we want to move completely over the cookie based VIRQs we must + * negotiate major version 3.0 or later of HV_GRP_INTR. + */ +static bool sun4v_cookie_only_virqs(void) +{ + if (hv_irq_version >= 3) + return true; + return false; +} -unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino) +static void __init irq_init_hv(void) { - unsigned long flags; - unsigned char ent; + unsigned long hv_error, major, minor = 0; + + if (tlb_type != hypervisor) + return; - BUILD_BUG_ON(NR_IRQS >= 256); + if (hvirq_major) + major = hvirq_major; + else + major = 3; - spin_lock_irqsave(&irq_alloc_lock, flags); + hv_error = sun4v_hvapi_register(HV_GRP_INTR, major, &minor); + if (!hv_error) + hv_irq_version = major; + else + hv_irq_version = 1; - for (ent = 1; ent < NR_IRQS; ent++) { - if (!irq_table[ent].in_use) + pr_info("SUN4V: Using IRQ API major %d, cookie only virqs %s\n", + hv_irq_version, + sun4v_cookie_only_virqs() ? "enabled" : "disabled"); +} + +/* This function is for the timer interrupt.*/ +int __init arch_probe_nr_irqs(void) +{ + return 1; +} + +#define DEFAULT_NUM_IVECS (0xfffU) +static unsigned int nr_ivec = DEFAULT_NUM_IVECS; +#define NUM_IVECS (nr_ivec) + +static unsigned int __init size_nr_ivec(void) +{ + if (tlb_type == hypervisor) { + switch (sun4v_chip_type) { + /* Athena's devhandle|devino is large.*/ + case SUN4V_CHIP_SPARC64X: + nr_ivec = 0xffff; break; + } } - if (ent >= NR_IRQS) { - printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); - ent = 0; - } else { - irq_table[ent].dev_handle = dev_handle; - irq_table[ent].dev_ino = dev_ino; - irq_table[ent].in_use = 1; - } + return nr_ivec; +} + +struct irq_handler_data { + union { + struct { + unsigned int dev_handle; + unsigned int dev_ino; + }; + unsigned long sysino; + }; + struct ino_bucket bucket; + unsigned long iclr; + unsigned long imap; +}; + +static inline unsigned int irq_data_to_handle(struct irq_data *data) +{ + struct irq_handler_data *ihd = data->handler_data; + + return ihd->dev_handle; +} + +static inline unsigned int irq_data_to_ino(struct irq_data *data) +{ + struct irq_handler_data *ihd = data->handler_data; + + return ihd->dev_ino; +} - spin_unlock_irqrestore(&irq_alloc_lock, flags); +static inline unsigned long irq_data_to_sysino(struct irq_data *data) +{ + struct irq_handler_data *ihd = data->handler_data; - return ent; + return ihd->sysino; } -#ifdef CONFIG_PCI_MSI void irq_free(unsigned int irq) { - unsigned long flags; + void *data = irq_get_handler_data(irq); - if (irq >= NR_IRQS) - return; + kfree(data); + irq_set_handler_data(irq, NULL); + irq_free_descs(irq, 1); +} - spin_lock_irqsave(&irq_alloc_lock, flags); +unsigned int irq_alloc(unsigned int dev_handle, unsigned int dev_ino) +{ + int irq; - irq_table[irq].in_use = 0; + irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL); + if (irq <= 0) + goto out; - spin_unlock_irqrestore(&irq_alloc_lock, flags); + return irq; +out: + return 0; +} + +static unsigned int cookie_exists(u32 devhandle, unsigned int devino) +{ + unsigned long hv_err, cookie; + struct ino_bucket *bucket; + unsigned int irq = 0U; + + hv_err = sun4v_vintr_get_cookie(devhandle, devino, &cookie); + if (hv_err) { + pr_err("HV get cookie failed hv_err = %ld\n", hv_err); + goto out; + } + + if (cookie & ((1UL << 63UL))) { + cookie = ~cookie; + bucket = (struct ino_bucket *) __va(cookie); + irq = bucket->__irq; + } +out: + return irq; +} + +static unsigned int sysino_exists(u32 devhandle, unsigned int devino) +{ + unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); + struct ino_bucket *bucket; + unsigned int irq; + + bucket = &ivector_table[sysino]; + irq = bucket_get_irq(__pa(bucket)); + + return irq; +} + +void ack_bad_irq(unsigned int irq) +{ + pr_crit("BAD IRQ ack %d\n", irq); +} + +void irq_install_pre_handler(int irq, + void (*func)(unsigned int, void *, void *), + void *arg1, void *arg2) +{ + pr_warn("IRQ pre handler NOT supported.\n"); } -#endif /* * /proc/interrupts printing: @@ -206,15 +345,6 @@ return tid; } -struct irq_handler_data { - unsigned long iclr; - unsigned long imap; - - void (*pre_handler)(unsigned int, void *, void *); - void *arg1; - void *arg2; -}; - #ifdef CONFIG_SMP static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) { @@ -316,8 +446,8 @@ static void sun4v_irq_enable(struct irq_data *data) { - unsigned int ino = irq_table[data->irq].dev_ino; unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); + unsigned int ino = irq_data_to_sysino(data); int err; err = sun4v_intr_settarget(ino, cpuid); @@ -337,8 +467,8 @@ static int sun4v_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { - unsigned int ino = irq_table[data->irq].dev_ino; unsigned long cpuid = irq_choose_cpu(data->irq, mask); + unsigned int ino = irq_data_to_sysino(data); int err; err = sun4v_intr_settarget(ino, cpuid); @@ -351,7 +481,7 @@ static void sun4v_irq_disable(struct irq_data *data) { - unsigned int ino = irq_table[data->irq].dev_ino; + unsigned int ino = irq_data_to_sysino(data); int err; err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); @@ -362,7 +492,7 @@ static void sun4v_irq_eoi(struct irq_data *data) { - unsigned int ino = irq_table[data->irq].dev_ino; + unsigned int ino = irq_data_to_sysino(data); int err; err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); @@ -373,14 +503,13 @@ static void sun4v_virq_enable(struct irq_data *data) { - unsigned long cpuid, dev_handle, dev_ino; + unsigned long dev_handle = irq_data_to_handle(data); + unsigned long dev_ino = irq_data_to_ino(data); + unsigned long cpuid; int err; cpuid = irq_choose_cpu(data->irq, data->affinity); - dev_handle = irq_table[data->irq].dev_handle; - dev_ino = irq_table[data->irq].dev_ino; - err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); if (err != HV_EOK) printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " @@ -403,14 +532,13 @@ static int sun4v_virt_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { - unsigned long cpuid, dev_handle, dev_ino; + unsigned long dev_handle = irq_data_to_handle(data); + unsigned long dev_ino = irq_data_to_ino(data); + unsigned long cpuid; int err; cpuid = irq_choose_cpu(data->irq, mask); - dev_handle = irq_table[data->irq].dev_handle; - dev_ino = irq_table[data->irq].dev_ino; - err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); if (err != HV_EOK) printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " @@ -422,11 +550,10 @@ static void sun4v_virq_disable(struct irq_data *data) { - unsigned long dev_handle, dev_ino; + unsigned long dev_handle = irq_data_to_handle(data); + unsigned long dev_ino = irq_data_to_ino(data); int err; - dev_handle = irq_table[data->irq].dev_handle; - dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_valid(dev_handle, dev_ino, HV_INTR_DISABLED); @@ -438,12 +565,10 @@ static void sun4v_virq_eoi(struct irq_data *data) { - unsigned long dev_handle, dev_ino; + unsigned long dev_handle = irq_data_to_handle(data); + unsigned long dev_ino = irq_data_to_ino(data); int err; - dev_handle = irq_table[data->irq].dev_handle; - dev_ino = irq_table[data->irq].dev_ino; - err = sun4v_vintr_set_state(dev_handle, dev_ino, HV_INTR_STATE_IDLE); if (err != HV_EOK) @@ -479,31 +604,10 @@ .flags = IRQCHIP_EOI_IF_HANDLED, }; -static void pre_flow_handler(struct irq_data *d) -{ - struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d); - unsigned int ino = irq_table[d->irq].dev_ino; - - handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); -} - -void irq_install_pre_handler(int irq, - void (*func)(unsigned int, void *, void *), - void *arg1, void *arg2) -{ - struct irq_handler_data *handler_data = irq_get_handler_data(irq); - - handler_data->pre_handler = func; - handler_data->arg1 = arg1; - handler_data->arg2 = arg2; - - __irq_set_preflow_handler(irq, pre_flow_handler); -} - unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) { - struct ino_bucket *bucket; struct irq_handler_data *handler_data; + struct ino_bucket *bucket; unsigned int irq; int ino; @@ -537,119 +641,166 @@ return irq; } -static unsigned int sun4v_build_common(unsigned long sysino, - struct irq_chip *chip) +static unsigned int sun4v_build_common(u32 devhandle, unsigned int devino, + void (*handler_data_init)(struct irq_handler_data *data, + u32 devhandle, unsigned int devino), + struct irq_chip *chip) { - struct ino_bucket *bucket; - struct irq_handler_data *handler_data; + struct irq_handler_data *data; unsigned int irq; - BUG_ON(tlb_type != hypervisor); + irq = irq_alloc(devhandle, devino); + if (!irq) + goto out; - bucket = &ivector_table[sysino]; - irq = bucket_get_irq(__pa(bucket)); - if (!irq) { - irq = irq_alloc(0, sysino); - bucket_set_irq(__pa(bucket), irq); - irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, - "IVEC"); + data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); + if (unlikely(!data)) { + pr_err("IRQ handler data allocation failed.\n"); + irq_free(irq); + irq = 0; + goto out; } - handler_data = irq_get_handler_data(irq); - if (unlikely(handler_data)) - goto out; + irq_set_handler_data(irq, data); + handler_data_init(data, devhandle, devino); + irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC"); + data->imap = ~0UL; + data->iclr = ~0UL; +out: + return irq; +} - handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); - if (unlikely(!handler_data)) { - prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); - prom_halt(); - } - irq_set_handler_data(irq, handler_data); +static unsigned long cookie_assign(unsigned int irq, u32 devhandle, + unsigned int devino) +{ + struct irq_handler_data *ihd = irq_get_handler_data(irq); + unsigned long hv_error, cookie; - /* Catch accidental accesses to these things. IMAP/ICLR handling - * is done by hypervisor calls on sun4v platforms, not by direct - * register accesses. + /* handler_irq needs to find the irq. cookie is seen signed in + * sun4v_dev_mondo and treated as a non ivector_table delivery. */ - handler_data->imap = ~0UL; - handler_data->iclr = ~0UL; + ihd->bucket.__irq = irq; + cookie = ~__pa(&ihd->bucket); -out: - return irq; + hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie); + if (hv_error) + pr_err("HV vintr set cookie failed = %ld\n", hv_error); + + return hv_error; } -unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) +static void cookie_handler_data(struct irq_handler_data *data, + u32 devhandle, unsigned int devino) { - unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino); + data->dev_handle = devhandle; + data->dev_ino = devino; +} - return sun4v_build_common(sysino, &sun4v_irq); +static unsigned int cookie_build_irq(u32 devhandle, unsigned int devino, + struct irq_chip *chip) +{ + unsigned long hv_error; + unsigned int irq; + + irq = sun4v_build_common(devhandle, devino, cookie_handler_data, chip); + + hv_error = cookie_assign(irq, devhandle, devino); + if (hv_error) { + irq_free(irq); + irq = 0; + } + + return irq; } -unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) +static unsigned int sun4v_build_cookie(u32 devhandle, unsigned int devino) { - struct irq_handler_data *handler_data; - unsigned long hv_err, cookie; - struct ino_bucket *bucket; unsigned int irq; - bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); - if (unlikely(!bucket)) - return 0; - - /* The only reference we store to the IRQ bucket is - * by physical address which kmemleak can't see, tell - * it that this object explicitly is not a leak and - * should be scanned. - */ - kmemleak_not_leak(bucket); + irq = cookie_exists(devhandle, devino); + if (irq) + goto out; - __flush_dcache_range((unsigned long) bucket, - ((unsigned long) bucket + - sizeof(struct ino_bucket))); + irq = cookie_build_irq(devhandle, devino, &sun4v_virq); - irq = irq_alloc(devhandle, devino); +out: + return irq; +} + +static void sysino_set_bucket(unsigned int irq) +{ + struct irq_handler_data *ihd = irq_get_handler_data(irq); + struct ino_bucket *bucket; + unsigned long sysino; + + sysino = sun4v_devino_to_sysino(ihd->dev_handle, ihd->dev_ino); + BUG_ON(sysino >= nr_ivec); + bucket = &ivector_table[sysino]; bucket_set_irq(__pa(bucket), irq); +} - irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq, - "IVEC"); +static void sysino_handler_data(struct irq_handler_data *data, + u32 devhandle, unsigned int devino) +{ + unsigned long sysino; - handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); - if (unlikely(!handler_data)) - return 0; + sysino = sun4v_devino_to_sysino(devhandle, devino); + data->sysino = sysino; +} - /* In order to make the LDC channel startup sequence easier, - * especially wrt. locking, we do not let request_irq() enable - * the interrupt. - */ - irq_set_status_flags(irq, IRQ_NOAUTOEN); - irq_set_handler_data(irq, handler_data); +static unsigned int sysino_build_irq(u32 devhandle, unsigned int devino, + struct irq_chip *chip) +{ + unsigned int irq; - /* Catch accidental accesses to these things. IMAP/ICLR handling - * is done by hypervisor calls on sun4v platforms, not by direct - * register accesses. - */ - handler_data->imap = ~0UL; - handler_data->iclr = ~0UL; + irq = sun4v_build_common(devhandle, devino, sysino_handler_data, chip); + if (!irq) + goto out; - cookie = ~__pa(bucket); - hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); - if (hv_err) { - prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " - "err=%lu\n", devhandle, devino, hv_err); - prom_halt(); - } + sysino_set_bucket(irq); +out: + return irq; +} +static int sun4v_build_sysino(u32 devhandle, unsigned int devino) +{ + int irq; + + irq = sysino_exists(devhandle, devino); + if (irq) + goto out; + + irq = sysino_build_irq(devhandle, devino, &sun4v_irq); +out: return irq; } -void ack_bad_irq(unsigned int irq) +unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) { - unsigned int ino = irq_table[irq].dev_ino; + unsigned int irq; - if (!ino) - ino = 0xdeadbeef; + if (sun4v_cookie_only_virqs()) + irq = sun4v_build_cookie(devhandle, devino); + else + irq = sun4v_build_sysino(devhandle, devino); - printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n", - ino, irq); + return irq; +} + +unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) +{ + int irq; + + irq = cookie_build_irq(devhandle, devino, &sun4v_virq); + if (!irq) + goto out; + + /* This is borrowed from the original function. + */ + irq_set_status_flags(irq, IRQ_NOAUTOEN); + +out: + return irq; } void *hardirq_stack[NR_CPUS]; @@ -720,9 +871,12 @@ for (irq = 0; irq < NR_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); - struct irq_data *data = irq_desc_get_irq_data(desc); + struct irq_data *data; unsigned long flags; + if (!desc) + continue; + data = irq_desc_get_irq_data(desc); raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action && !irqd_is_per_cpu(data)) { if (data->chip->irq_set_affinity) @@ -922,16 +1076,22 @@ .name = "timer", }; -/* Only invoked on boot processor. */ -void __init init_IRQ(void) +static void __init irq_ivector_init(void) { - unsigned long size; + unsigned long size, order; + unsigned int ivecs; - map_prom_timers(); - kill_prom_timer(); + /* If we are doing cookie only VIRQs then we do not need the ivector + * table to process interrupts. + */ + if (sun4v_cookie_only_virqs()) + return; - size = sizeof(struct ino_bucket) * NUM_IVECS; - ivector_table = kzalloc(size, GFP_KERNEL); + ivecs = size_nr_ivec(); + size = sizeof(struct ino_bucket) * ivecs; + order = get_order(size); + ivector_table = (struct ino_bucket *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!ivector_table) { prom_printf("Fatal error, cannot allocate ivector_table\n"); prom_halt(); @@ -940,6 +1100,15 @@ ((unsigned long) ivector_table) + size); ivector_table_pa = __pa(ivector_table); +} + +/* Only invoked on boot processor.*/ +void __init init_IRQ(void) +{ + irq_init_hv(); + irq_ivector_init(); + map_prom_timers(); + kill_prom_timer(); if (tlb_type == hypervisor) sun4v_init_mondo_queues(); --- linux-3.13.0.orig/arch/sparc/kernel/ktlb.S +++ linux-3.13.0/arch/sparc/kernel/ktlb.S @@ -47,14 +47,6 @@ KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) TSB_LOCK_TAG(%g1, %g2, %g7) - - /* Load and check PTE. */ - ldxa [%g5] ASI_PHYS_USE_EC, %g5 - mov 1, %g7 - sllx %g7, TSB_TAG_INVALID_BIT, %g7 - brgez,a,pn %g5, kvmap_itlb_longpath - TSB_STORE(%g1, %g7) - TSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ @@ -118,6 +110,12 @@ ba,pt %xcc, kvmap_dtlb_load nop +kvmap_linear_early: + sethi %hi(kern_linear_pte_xor), %g7 + ldx [%g7 + %lo(kern_linear_pte_xor)], %g2 + ba,pt %xcc, kvmap_dtlb_tsb4m_load + xor %g2, %g4, %g5 + .align 32 kvmap_dtlb_tsb4m_load: TSB_LOCK_TAG(%g1, %g2, %g7) @@ -146,105 +144,17 @@ /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */ KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load) #endif - /* TSB entry address left in %g1, lookup linear PTE. - * Must preserve %g1 and %g6 (TAG). - */ -kvmap_dtlb_tsb4m_miss: - /* Clear the PAGE_OFFSET top virtual bits, shift - * down to get PFN, and make sure PFN is in range. - */ -661: sllx %g4, 0, %g5 - .section .page_offset_shift_patch, "ax" - .word 661b - .previous - - /* Check to see if we know about valid memory at the 4MB - * chunk this physical address will reside within. + /* Linear mapping TSB lookup failed. Fallthrough to kernel + * page table based lookup. */ -661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2 - .section .page_offset_shift_patch, "ax" - .word 661b - .previous - - brnz,pn %g2, kvmap_dtlb_longpath - nop - - /* This unconditional branch and delay-slot nop gets patched - * by the sethi sequence once the bitmap is properly setup. - */ - .globl valid_addr_bitmap_insn -valid_addr_bitmap_insn: - ba,pt %xcc, 2f - nop - .subsection 2 - .globl valid_addr_bitmap_patch -valid_addr_bitmap_patch: - sethi %hi(sparc64_valid_addr_bitmap), %g7 - or %g7, %lo(sparc64_valid_addr_bitmap), %g7 - .previous - -661: srlx %g5, ILOG2_4MB, %g2 - .section .page_offset_shift_patch, "ax" - .word 661b - .previous - - srlx %g2, 6, %g5 - and %g2, 63, %g2 - sllx %g5, 3, %g5 - ldx [%g7 + %g5], %g5 - mov 1, %g7 - sllx %g7, %g2, %g7 - andcc %g5, %g7, %g0 - be,pn %xcc, kvmap_dtlb_longpath - -2: sethi %hi(kpte_linear_bitmap), %g2 - - /* Get the 256MB physical address index. */ -661: sllx %g4, 0, %g5 - .section .page_offset_shift_patch, "ax" - .word 661b - .previous - - or %g2, %lo(kpte_linear_bitmap), %g2 - -661: srlx %g5, ILOG2_256MB, %g5 - .section .page_offset_shift_patch, "ax" - .word 661b - .previous - - and %g5, (32 - 1), %g7 - - /* Divide by 32 to get the offset into the bitmask. */ - srlx %g5, 5, %g5 - add %g7, %g7, %g7 - sllx %g5, 3, %g5 - - /* kern_linear_pte_xor[(mask >> shift) & 3)] */ - ldx [%g2 + %g5], %g2 - srlx %g2, %g7, %g7 - sethi %hi(kern_linear_pte_xor), %g5 - and %g7, 3, %g7 - or %g5, %lo(kern_linear_pte_xor), %g5 - sllx %g7, 3, %g7 - ldx [%g5 + %g7], %g2 - .globl kvmap_linear_patch kvmap_linear_patch: - ba,pt %xcc, kvmap_dtlb_tsb4m_load - xor %g2, %g4, %g5 + ba,a,pt %xcc, kvmap_linear_early kvmap_dtlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) TSB_LOCK_TAG(%g1, %g2, %g7) - - /* Load and check PTE. */ - ldxa [%g5] ASI_PHYS_USE_EC, %g5 - mov 1, %g7 - sllx %g7, TSB_TAG_INVALID_BIT, %g7 - brgez,a,pn %g5, kvmap_dtlb_longpath - TSB_STORE(%g1, %g7) - TSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ @@ -276,13 +186,8 @@ #ifdef CONFIG_SPARSEMEM_VMEMMAP kvmap_vmemmap: - sub %g4, %g5, %g5 - srlx %g5, 22, %g5 - sethi %hi(vmemmap_table), %g1 - sllx %g5, 3, %g5 - or %g1, %lo(vmemmap_table), %g1 - ba,pt %xcc, kvmap_dtlb_load - ldx [%g1 + %g5], %g5 + KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) + ba,a,pt %xcc, kvmap_dtlb_load #endif kvmap_dtlb_nonlinear: @@ -294,8 +199,8 @@ #ifdef CONFIG_SPARSEMEM_VMEMMAP /* Do not use the TSB for vmemmap. */ - mov (VMEMMAP_BASE >> 40), %g5 - sllx %g5, 40, %g5 + sethi %hi(VMEMMAP_BASE), %g5 + ldx [%g5 + %lo(VMEMMAP_BASE)], %g5 cmp %g4,%g5 bgeu,pn %xcc, kvmap_vmemmap nop @@ -307,8 +212,8 @@ sethi %hi(MODULES_VADDR), %g5 cmp %g4, %g5 blu,pn %xcc, kvmap_dtlb_longpath - mov (VMALLOC_END >> 40), %g5 - sllx %g5, 40, %g5 + sethi %hi(VMALLOC_END), %g5 + ldx [%g5 + %lo(VMALLOC_END)], %g5 cmp %g4, %g5 bgeu,pn %xcc, kvmap_dtlb_longpath nop --- linux-3.13.0.orig/arch/sparc/kernel/ldc.c +++ linux-3.13.0/arch/sparc/kernel/ldc.c @@ -1078,7 +1078,8 @@ struct ldc_channel *ldc_alloc(unsigned long id, const struct ldc_channel_config *cfgp, - void *event_arg) + void *event_arg, + const char *name) { struct ldc_channel *lp; const struct ldc_mode_ops *mops; @@ -1093,6 +1094,8 @@ err = -EINVAL; if (!cfgp) goto out_err; + if (!name) + goto out_err; switch (cfgp->mode) { case LDC_MODE_RAW: @@ -1185,6 +1188,21 @@ INIT_HLIST_HEAD(&lp->mh_list); + snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); + snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); + + err = request_irq(lp->cfg.rx_irq, ldc_rx, 0, + lp->rx_irq_name, lp); + if (err) + goto out_free_txq; + + err = request_irq(lp->cfg.tx_irq, ldc_tx, 0, + lp->tx_irq_name, lp); + if (err) { + free_irq(lp->cfg.rx_irq, lp); + goto out_free_txq; + } + return lp; out_free_txq: @@ -1237,31 +1255,14 @@ * state. This does not initiate a handshake, ldc_connect() does * that. */ -int ldc_bind(struct ldc_channel *lp, const char *name) +int ldc_bind(struct ldc_channel *lp) { unsigned long hv_err, flags; int err = -EINVAL; - if (!name || - (lp->state != LDC_STATE_INIT)) + if (lp->state != LDC_STATE_INIT) return -EINVAL; - snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); - snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); - - err = request_irq(lp->cfg.rx_irq, ldc_rx, 0, - lp->rx_irq_name, lp); - if (err) - return err; - - err = request_irq(lp->cfg.tx_irq, ldc_tx, 0, - lp->tx_irq_name, lp); - if (err) { - free_irq(lp->cfg.rx_irq, lp); - return err; - } - - spin_lock_irqsave(&lp->lock, flags); enable_irq(lp->cfg.rx_irq); @@ -1336,7 +1337,7 @@ if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || lp->hs_state != LDC_HS_OPEN) - err = -EINVAL; + err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL); else err = start_handshake(lp); --- linux-3.13.0.orig/arch/sparc/kernel/nmi.c +++ linux-3.13.0/arch/sparc/kernel/nmi.c @@ -141,7 +141,6 @@ static __init void nmi_cpu_busy(void *data) { - local_irq_enable_in_hardirq(); while (endflag == 0) mb(); } --- linux-3.13.0.orig/arch/sparc/kernel/pci_schizo.c +++ linux-3.13.0/arch/sparc/kernel/pci_schizo.c @@ -581,7 +581,7 @@ { unsigned long csr_reg, csr, csr_error_bits; irqreturn_t ret = IRQ_NONE; - u16 stat; + u32 stat; csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; csr = upa_readq(csr_reg); @@ -617,7 +617,7 @@ pbm->name); ret = IRQ_HANDLED; } - pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); + pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat); if (stat & (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | @@ -625,7 +625,7 @@ PCI_STATUS_SIG_SYSTEM_ERROR)) { printk("%s: PCI bus error, PCI_STATUS[%04x]\n", pbm->name, stat); - pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); + pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff); ret = IRQ_HANDLED; } return ret; --- linux-3.13.0.orig/arch/sparc/kernel/pcr.c +++ linux-3.13.0/arch/sparc/kernel/pcr.c @@ -191,12 +191,41 @@ .pcr_nmi_disable = PCR_N4_PICNPT, }; +static u64 n5_pcr_read(unsigned long reg_num) +{ + unsigned long val; + + (void) sun4v_t5_get_perfreg(reg_num, &val); + + return val; +} + +static void n5_pcr_write(unsigned long reg_num, u64 val) +{ + (void) sun4v_t5_set_perfreg(reg_num, val); +} + +static const struct pcr_ops n5_pcr_ops = { + .read_pcr = n5_pcr_read, + .write_pcr = n5_pcr_write, + .read_pic = n4_pic_read, + .write_pic = n4_pic_write, + .nmi_picl_value = n4_picl_value, + .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | + PCR_N4_UTRACE | PCR_N4_TOE | + (26 << PCR_N4_SL_SHIFT)), + .pcr_nmi_disable = PCR_N4_PICNPT, +}; + + static unsigned long perf_hsvc_group; static unsigned long perf_hsvc_major; static unsigned long perf_hsvc_minor; static int __init register_perf_hsvc(void) { + unsigned long hverror; + if (tlb_type == hypervisor) { switch (sun4v_chip_type) { case SUN4V_CHIP_NIAGARA1: @@ -215,6 +244,10 @@ perf_hsvc_group = HV_GRP_VT_CPU; break; + case SUN4V_CHIP_NIAGARA5: + perf_hsvc_group = HV_GRP_T5_CPU; + break; + default: return -ENODEV; } @@ -222,10 +255,12 @@ perf_hsvc_major = 1; perf_hsvc_minor = 0; - if (sun4v_hvapi_register(perf_hsvc_group, - perf_hsvc_major, - &perf_hsvc_minor)) { - printk("perfmon: Could not register hvapi.\n"); + hverror = sun4v_hvapi_register(perf_hsvc_group, + perf_hsvc_major, + &perf_hsvc_minor); + if (hverror) { + pr_err("perfmon: Could not register hvapi(0x%lx).\n", + hverror); return -ENODEV; } } @@ -254,6 +289,10 @@ pcr_ops = &n4_pcr_ops; break; + case SUN4V_CHIP_NIAGARA5: + pcr_ops = &n5_pcr_ops; + break; + default: ret = -ENODEV; break; --- linux-3.13.0.orig/arch/sparc/kernel/perf_event.c +++ linux-3.13.0/arch/sparc/kernel/perf_event.c @@ -960,6 +960,8 @@ cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; } +static void sparc_pmu_start(struct perf_event *event, int flags); + /* On this PMU each PIC has it's own PCR control register. */ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) { @@ -972,20 +974,13 @@ struct perf_event *cp = cpuc->event[i]; struct hw_perf_event *hwc = &cp->hw; int idx = hwc->idx; - u64 enc; if (cpuc->current_idx[i] != PIC_NO_INDEX) continue; - sparc_perf_event_set_period(cp, hwc, idx); cpuc->current_idx[i] = idx; - enc = perf_event_get_enc(cpuc->events[i]); - cpuc->pcr[idx] &= ~mask_for_index(idx); - if (hwc->state & PERF_HES_STOPPED) - cpuc->pcr[idx] |= nop_for_index(idx); - else - cpuc->pcr[idx] |= event_encoding(enc, idx); + sparc_pmu_start(cp, PERF_EF_RELOAD); } out: for (i = 0; i < cpuc->n_events; i++) { @@ -1101,7 +1096,6 @@ int i; local_irq_save(flags); - perf_pmu_disable(event->pmu); for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event[i]) { @@ -1127,7 +1121,6 @@ } } - perf_pmu_enable(event->pmu); local_irq_restore(flags); } @@ -1361,7 +1354,6 @@ unsigned long flags; local_irq_save(flags); - perf_pmu_disable(event->pmu); n0 = cpuc->n_events; if (n0 >= sparc_pmu->max_hw_events) @@ -1394,7 +1386,6 @@ ret = 0; out: - perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; } @@ -1662,7 +1653,8 @@ sparc_pmu = &niagara2_pmu; return true; } - if (!strcmp(sparc_pmu_type, "niagara4")) { + if (!strcmp(sparc_pmu_type, "niagara4") || + !strcmp(sparc_pmu_type, "niagara5")) { sparc_pmu = &niagara4_pmu; return true; } @@ -1671,9 +1663,12 @@ int __init init_hw_perf_events(void) { + int err; + pr_info("Performance events: "); - if (!supported_pmu()) { + err = pcr_arch_init(); + if (err || !supported_pmu()) { pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); return 0; } @@ -1685,7 +1680,7 @@ return 0; } -early_initcall(init_hw_perf_events); +pure_initcall(init_hw_perf_events); void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) --- linux-3.13.0.orig/arch/sparc/kernel/process_64.c +++ linux-3.13.0/arch/sparc/kernel/process_64.c @@ -58,9 +58,12 @@ { if (tlb_type != hypervisor) { touch_nmi_watchdog(); + local_irq_enable(); } else { unsigned long pstate; + local_irq_enable(); + /* The sun4v sleeping code requires that we have PSTATE.IE cleared over * the cpu sleep hypervisor call. */ @@ -82,7 +85,6 @@ : "=&r" (pstate) : "i" (PSTATE_IE)); } - local_irq_enable(); } #ifdef CONFIG_HOTPLUG_CPU @@ -279,6 +281,8 @@ printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n", gp->tpc, gp->o7, gp->i7, gp->rpc); } + + touch_nmi_watchdog(); } memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); @@ -304,6 +308,9 @@ struct global_pmu_snapshot *pp; int i, num; + if (!pcr_ops) + return; + pp = &global_cpu_snapshot[this_cpu].pmu; num = 1; @@ -351,6 +358,8 @@ (cpu == this_cpu ? '*' : ' '), cpu, pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); + + touch_nmi_watchdog(); } memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); --- linux-3.13.0.orig/arch/sparc/kernel/setup_64.c +++ linux-3.13.0/arch/sparc/kernel/setup_64.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -174,7 +175,7 @@ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; -void __init per_cpu_patch(void) +static void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; unsigned long ver; @@ -266,7 +267,7 @@ } } -void __init sun4v_patch(void) +static void __init sun4v_patch(void) { extern void sun4v_hvapi_init(void); @@ -335,14 +336,25 @@ } } -#ifdef CONFIG_SMP -void __init boot_cpu_id_too_large(int cpu) +void __init start_early_boot(void) { - prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", - cpu, NR_CPUS); - prom_halt(); + int cpu; + + check_if_starfire(); + per_cpu_patch(); + sun4v_patch(); + + cpu = hard_smp_processor_id(); + if (cpu >= NR_CPUS) { + prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", + cpu, NR_CPUS); + prom_halt(); + } + current_thread_info()->cpu = cpu; + + prom_init_report(); + start_kernel(); } -#endif /* On Ultra, we support all of the v8 capabilities. */ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | @@ -500,12 +512,16 @@ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || sun4v_chip_type == SUN4V_CHIP_SPARC64X) cap |= HWCAP_SPARC_BLKINIT; if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || sun4v_chip_type == SUN4V_CHIP_SPARC64X) cap |= HWCAP_SPARC_N2; } @@ -533,6 +549,8 @@ sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || sun4v_chip_type == SUN4V_CHIP_SPARC64X) cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | AV_SPARC_ASI_BLK_INIT | @@ -540,6 +558,8 @@ if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || + sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || sun4v_chip_type == SUN4V_CHIP_SPARC64X) cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | AV_SPARC_FMAF); --- linux-3.13.0.orig/arch/sparc/kernel/smp_64.c +++ linux-3.13.0/arch/sparc/kernel/smp_64.c @@ -151,7 +151,7 @@ #define NUM_ROUNDS 64 /* magic value */ #define NUM_ITERS 5 /* likewise */ -static DEFINE_SPINLOCK(itc_sync_lock); +static DEFINE_RAW_SPINLOCK(itc_sync_lock); static unsigned long go[SLAVE + 1]; #define DEBUG_TICK_SYNC 0 @@ -259,7 +259,7 @@ go[MASTER] = 0; membar_safe("#StoreLoad"); - spin_lock_irqsave(&itc_sync_lock, flags); + raw_spin_lock_irqsave(&itc_sync_lock, flags); { for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { while (!go[MASTER]) @@ -270,7 +270,7 @@ membar_safe("#StoreLoad"); } } - spin_unlock_irqrestore(&itc_sync_lock, flags); + raw_spin_unlock_irqrestore(&itc_sync_lock, flags); } #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) @@ -823,13 +823,17 @@ void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); + irq_enter(); generic_smp_call_function_interrupt(); + irq_exit(); } void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); + irq_enter(); generic_smp_call_function_single_interrupt(); + irq_exit(); } static void tsb_sync(void *info) @@ -1395,7 +1399,6 @@ void __init smp_cpus_done(unsigned int max_cpus) { - pcr_arch_init(); } void smp_send_reschedule(int cpu) @@ -1480,6 +1483,13 @@ pud_t *pud; pmd_t *pmd; + if (pgd_none(*pgd)) { + pud_t *new; + + new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + pgd_populate(&init_mm, pgd, new); + } + pud = pud_offset(pgd, addr); if (pud_none(*pud)) { pmd_t *new; --- linux-3.13.0.orig/arch/sparc/kernel/sun4v_tlb_miss.S +++ linux-3.13.0/arch/sparc/kernel/sun4v_tlb_miss.S @@ -195,6 +195,11 @@ ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 sun4v_itlb_error: + rdpr %tl, %g1 + cmp %g1, 1 + ble,pt %icc, sun4v_bad_ra + or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_ITLB, %g1 + sethi %hi(sun4v_err_itlb_vaddr), %g1 stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)] sethi %hi(sun4v_err_itlb_ctx), %g1 @@ -206,15 +211,10 @@ sethi %hi(sun4v_err_itlb_error), %g1 stx %o0, [%g1 + %lo(sun4v_err_itlb_error)] + sethi %hi(1f), %g7 rdpr %tl, %g4 - cmp %g4, 1 - ble,pt %icc, 1f - sethi %hi(2f), %g7 ba,pt %xcc, etraptl1 - or %g7, %lo(2f), %g7 - -1: ba,pt %xcc, etrap -2: or %g7, %lo(2b), %g7 +1: or %g7, %lo(1f), %g7 mov %l4, %o1 call sun4v_itlb_error_report add %sp, PTREGS_OFF, %o0 @@ -222,6 +222,11 @@ /* NOTREACHED */ sun4v_dtlb_error: + rdpr %tl, %g1 + cmp %g1, 1 + ble,pt %icc, sun4v_bad_ra + or %g0, FAULT_CODE_BAD_RA | FAULT_CODE_DTLB, %g1 + sethi %hi(sun4v_err_dtlb_vaddr), %g1 stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)] sethi %hi(sun4v_err_dtlb_ctx), %g1 @@ -233,21 +238,23 @@ sethi %hi(sun4v_err_dtlb_error), %g1 stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)] + sethi %hi(1f), %g7 rdpr %tl, %g4 - cmp %g4, 1 - ble,pt %icc, 1f - sethi %hi(2f), %g7 ba,pt %xcc, etraptl1 - or %g7, %lo(2f), %g7 - -1: ba,pt %xcc, etrap -2: or %g7, %lo(2b), %g7 +1: or %g7, %lo(1f), %g7 mov %l4, %o1 call sun4v_dtlb_error_report add %sp, PTREGS_OFF, %o0 /* NOTREACHED */ +sun4v_bad_ra: + or %g0, %g4, %g5 + ba,pt %xcc, sparc64_realfault_common + or %g1, %g0, %g4 + + /* NOTREACHED */ + /* Instruction Access Exception, tl0. */ sun4v_iacc: ldxa [%g0] ASI_SCRATCHPAD, %g2 --- linux-3.13.0.orig/arch/sparc/kernel/sys32.S +++ linux-3.13.0/arch/sparc/kernel/sys32.S @@ -44,7 +44,7 @@ SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) SIGN1(sys32_select, compat_sys_select, %o0) -SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) +SIGN1(sys32_futex, compat_sys_futex, %o1) SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) --- linux-3.13.0.orig/arch/sparc/kernel/sys_sparc_64.c +++ linux-3.13.0/arch/sparc/kernel/sys_sparc_64.c @@ -332,7 +332,7 @@ long err; /* No need for backward compatibility. We can start fresh... */ - if (call <= SEMCTL) { + if (call <= SEMTIMEDOP) { switch (call) { case SEMOP: err = sys_semtimedop(first, ptr, --- linux-3.13.0.orig/arch/sparc/kernel/syscalls.S +++ linux-3.13.0/arch/sparc/kernel/syscalls.S @@ -189,7 +189,8 @@ mov %i0, %l5 ! IEU1 5: call %l7 ! CTI Group brk forced srl %i5, 0, %o5 ! IEU1 - ba,a,pt %xcc, 3f + ba,pt %xcc, 3f + sra %o0, 0, %o0 /* Linux native system calls enter here... */ .align 32 @@ -217,7 +218,6 @@ 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] ret_sys_call: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 - sra %o0, 0, %o0 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 sllx %g2, 32, %g2 --- linux-3.13.0.orig/arch/sparc/kernel/trampoline_64.S +++ linux-3.13.0/arch/sparc/kernel/trampoline_64.S @@ -110,10 +110,13 @@ brnz,pn %g1, 1b nop - sethi %hi(p1275buf), %g2 - or %g2, %lo(p1275buf), %g2 - ldx [%g2 + 0x10], %l2 - add %l2, -(192 + 128), %sp + /* Get onto temporary stack which will be in the locked + * kernel image. + */ + sethi %hi(tramp_stack), %g1 + or %g1, %lo(tramp_stack), %g1 + add %g1, TRAMP_STACK_SIZE, %g1 + sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp flushw /* Setup the loop variables: @@ -395,7 +398,6 @@ sllx %g5, THREAD_SHIFT, %g5 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 add %g6, %g5, %sp - mov 0, %fp rdpr %pstate, %o1 or %o1, PSTATE_IE, %o1 --- linux-3.13.0.orig/arch/sparc/kernel/traps_64.c +++ linux-3.13.0/arch/sparc/kernel/traps_64.c @@ -2102,6 +2102,11 @@ atomic_inc(&sun4v_nonresum_oflow_cnt); } +static void sun4v_tlb_error(struct pt_regs *regs) +{ + die_if_kernel("TLB/TSB error", regs); +} + unsigned long sun4v_err_itlb_vaddr; unsigned long sun4v_err_itlb_ctx; unsigned long sun4v_err_itlb_pte; @@ -2109,8 +2114,7 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) { - if (tl > 1) - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); @@ -2123,7 +2127,7 @@ sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx, sun4v_err_itlb_pte, sun4v_err_itlb_error); - prom_halt(); + sun4v_tlb_error(regs); } unsigned long sun4v_err_dtlb_vaddr; @@ -2133,8 +2137,7 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) { - if (tl > 1) - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); @@ -2147,7 +2150,7 @@ sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx, sun4v_err_dtlb_pte, sun4v_err_dtlb_error); - prom_halt(); + sun4v_tlb_error(regs); } void hypervisor_tlbop_error(unsigned long err, unsigned long op) --- linux-3.13.0.orig/arch/sparc/kernel/tsb.S +++ linux-3.13.0/arch/sparc/kernel/tsb.S @@ -162,10 +162,10 @@ nop .previous - rdpr %tl, %g3 - cmp %g3, 1 + rdpr %tl, %g7 + cmp %g7, 1 bne,pn %xcc, winfix_trampoline - nop + mov %g3, %g4 ba,pt %xcc, etrap rd %pc, %g7 call hugetlb_setup --- linux-3.13.0.orig/arch/sparc/kernel/unaligned_64.c +++ linux-3.13.0/arch/sparc/kernel/unaligned_64.c @@ -166,17 +166,23 @@ unsigned long compute_effective_address(struct pt_regs *regs, unsigned int insn, unsigned int rd) { + int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; - int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + unsigned long addr; if (insn & 0x2000) { maybe_flush_windows(rs1, 0, rd, from_kernel); - return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd, from_kernel); - return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); + addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); } + + if (!from_kernel && test_thread_flag(TIF_32BIT)) + addr &= 0xffffffff; + + return addr; } /* This is just to make gcc think die_if_kernel does return... */ --- linux-3.13.0.orig/arch/sparc/kernel/viohs.c +++ linux-3.13.0/arch/sparc/kernel/viohs.c @@ -714,7 +714,7 @@ cfg.tx_irq = vio->vdev->tx_irq; cfg.rx_irq = vio->vdev->rx_irq; - lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg); + lp = ldc_alloc(vio->vdev->channel_id, &cfg, event_arg, vio->name); if (IS_ERR(lp)) return PTR_ERR(lp); @@ -746,7 +746,7 @@ err = 0; if (state == LDC_STATE_INIT) { - err = ldc_bind(vio->lp, vio->name); + err = ldc_bind(vio->lp); if (err) printk(KERN_WARNING "%s: Port %lu bind failed, " "err=%d\n", --- linux-3.13.0.orig/arch/sparc/kernel/vmlinux.lds.S +++ linux-3.13.0/arch/sparc/kernel/vmlinux.lds.S @@ -35,8 +35,9 @@ SECTIONS { - /* swapper_low_pmd_dir is sparc64 only */ - swapper_low_pmd_dir = 0x0000000000402000; +#ifdef CONFIG_SPARC64 + swapper_pg_dir = 0x0000000000402000; +#endif . = INITIAL_ADDRESS; .text TEXTSTART : { @@ -122,11 +123,6 @@ *(.swapper_4m_tsb_phys_patch) __swapper_4m_tsb_phys_patch_end = .; } - .page_offset_shift_patch : { - __page_offset_shift_patch = .; - *(.page_offset_shift_patch) - __page_offset_shift_patch_end = .; - } .popc_3insn_patch : { __popc_3insn_patch = .; *(.popc_3insn_patch) --- linux-3.13.0.orig/arch/sparc/lib/NG2memcpy.S +++ linux-3.13.0/arch/sparc/lib/NG2memcpy.S @@ -236,6 +236,7 @@ */ VISEntryHalf + membar #Sync alignaddr %o1, %g0, %g0 add %o1, (64 - 1), %o4 --- linux-3.13.0.orig/arch/sparc/lib/NG4memcpy.S +++ linux-3.13.0/arch/sparc/lib/NG4memcpy.S @@ -41,6 +41,10 @@ #endif #endif +#if !defined(EX_LD) && !defined(EX_ST) +#define NON_USER_COPY +#endif + #ifndef EX_LD #define EX_LD(x) x #endif @@ -197,9 +201,13 @@ mov EX_RETVAL(%o3), %o0 .Llarge_src_unaligned: +#ifdef NON_USER_COPY + VISEntryHalfFast(.Lmedium_vis_entry_fail) +#else + VISEntryHalf +#endif andn %o2, 0x3f, %o4 sub %o2, %o4, %o2 - VISEntryHalf alignaddr %o1, %g0, %g1 add %o1, %o4, %o1 EX_LD(LOAD(ldd, %g1 + 0x00, %f0)) @@ -240,6 +248,10 @@ nop ba,a,pt %icc, .Lmedium_unaligned +#ifdef NON_USER_COPY +.Lmedium_vis_entry_fail: + or %o0, %o1, %g2 +#endif .Lmedium: LOAD(prefetch, %o1 + 0x40, #n_reads_strong) andcc %g2, 0x7, %g0 --- linux-3.13.0.orig/arch/sparc/lib/atomic32.c +++ linux-3.13.0/arch/sparc/lib/atomic32.c @@ -40,6 +40,19 @@ } EXPORT_SYMBOL(__atomic_add_return); +int atomic_xchg(atomic_t *v, int new) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(ATOMIC_HASH(v), flags); + ret = v->counter; + v->counter = new; + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); + return ret; +} +EXPORT_SYMBOL(atomic_xchg); + int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -132,3 +145,17 @@ return (unsigned long)prev; } EXPORT_SYMBOL(__cmpxchg_u32); + +unsigned long __xchg_u32(volatile u32 *ptr, u32 new) +{ + unsigned long flags; + u32 prev; + + spin_lock_irqsave(ATOMIC_HASH(ptr), flags); + prev = *ptr; + *ptr = new; + spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); + + return (unsigned long)prev; +} +EXPORT_SYMBOL(__xchg_u32); --- linux-3.13.0.orig/arch/sparc/lib/memmove.S +++ linux-3.13.0/arch/sparc/lib/memmove.S @@ -8,9 +8,11 @@ .text ENTRY(memmove) /* o0=dst o1=src o2=len */ - mov %o0, %g1 + brz,pn %o2, 99f + mov %o0, %g1 + cmp %o0, %o1 - bleu,pt %xcc, memcpy + bleu,pt %xcc, 2f add %o1, %o2, %g7 cmp %g7, %o0 bleu,pt %xcc, memcpy @@ -24,7 +26,34 @@ stb %g7, [%o0] bne,pt %icc, 1b sub %o0, 1, %o0 - +99: retl mov %g1, %o0 + + /* We can't just call memcpy for these memmove cases. On some + * chips the memcpy uses cache initializing stores and when dst + * and src are close enough, those can clobber the source data + * before we've loaded it in. + */ +2: or %o0, %o1, %g7 + or %o2, %g7, %g7 + andcc %g7, 0x7, %g0 + bne,pn %xcc, 4f + nop + +3: ldx [%o1], %g7 + add %o1, 8, %o1 + subcc %o2, 8, %o2 + add %o0, 8, %o0 + bne,pt %icc, 3b + stx %g7, [%o0 - 0x8] + ba,a,pt %xcc, 99b + +4: ldub [%o1], %g7 + add %o1, 1, %o1 + subcc %o2, 1, %o2 + add %o0, 1, %o0 + bne,pt %icc, 4b + stb %g7, [%o0 - 0x1] + ba,a,pt %xcc, 99b ENDPROC(memmove) --- linux-3.13.0.orig/arch/sparc/lib/memset.S +++ linux-3.13.0/arch/sparc/lib/memset.S @@ -3,8 +3,9 @@ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * - * Returns 0, if ok, and number of bytes not yet set if exception - * occurs and we were called as clear_user. + * Calls to memset returns initial %o0. Calls to bzero returns 0, if ok, and + * number of bytes not yet set if exception occurs and we were called as + * clear_user. */ #include @@ -65,6 +66,8 @@ .globl __memset_start, __memset_end __memset_start: memset: + mov %o0, %g1 + mov 1, %g4 and %o1, 0xff, %g3 sll %g3, 8, %g2 or %g3, %g2, %g3 @@ -89,6 +92,7 @@ sub %o0, %o2, %o0 __bzero: + clr %g4 mov %g0, %g3 1: cmp %o1, 7 @@ -151,8 +155,8 @@ bne,a 8f EX(stb %g3, [%o0], and %o1, 1) 8: - retl - clr %o0 + b 0f + nop 7: be 13b orcc %o1, 0, %g0 @@ -164,6 +168,12 @@ bne 8b EX(stb %g3, [%o0 - 1], add %o1, 1) 0: + andcc %g4, 1, %g0 + be 5f + nop + retl + mov %g1, %o0 +5: retl clr %o0 __memset_end: --- linux-3.13.0.orig/arch/sparc/math-emu/math_32.c +++ linux-3.13.0/arch/sparc/math-emu/math_32.c @@ -499,7 +499,7 @@ case 0: fsr = *pfsr; if (IR == -1) IR = 2; /* fcc is always fcc0 */ - fsr &= ~0xc00; fsr |= (IR << 10); break; + fsr &= ~0xc00; fsr |= (IR << 10); *pfsr = fsr; break; case 1: rd->s = IR; break; --- linux-3.13.0.orig/arch/sparc/mm/fault_32.c +++ linux-3.13.0/arch/sparc/mm/fault_32.c @@ -252,6 +252,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/sparc/mm/fault_64.c +++ linux-3.13.0/arch/sparc/mm/fault_64.c @@ -96,38 +96,51 @@ pte_t *ptep, pte; unsigned long pa; u32 insn = 0; - unsigned long pstate; - if (pgd_none(*pgdp)) - goto outret; + if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) + goto out; pudp = pud_offset(pgdp, tpc); - if (pud_none(*pudp)) - goto outret; - pmdp = pmd_offset(pudp, tpc); - if (pmd_none(*pmdp)) - goto outret; - - /* This disables preemption for us as well. */ - __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); - __asm__ __volatile__("wrpr %0, %1, %%pstate" - : : "r" (pstate), "i" (PSTATE_IE)); - ptep = pte_offset_map(pmdp, tpc); - pte = *ptep; - if (!pte_present(pte)) + if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) goto out; - pa = (pte_pfn(pte) << PAGE_SHIFT); - pa += (tpc & ~PAGE_MASK); + /* This disables preemption for us as well. */ + local_irq_disable(); - /* Use phys bypass so we don't pollute dtlb/dcache. */ - __asm__ __volatile__("lduwa [%1] %2, %0" - : "=r" (insn) - : "r" (pa), "i" (ASI_PHYS_USE_EC)); + pmdp = pmd_offset(pudp, tpc); + if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) + goto out_irq_enable; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (pmd_trans_huge(*pmdp)) { + if (pmd_trans_splitting(*pmdp)) + goto out_irq_enable; + + pa = pmd_pfn(*pmdp) << PAGE_SHIFT; + pa += tpc & ~HPAGE_MASK; + + /* Use phys bypass so we don't pollute dtlb/dcache. */ + __asm__ __volatile__("lduwa [%1] %2, %0" + : "=r" (insn) + : "r" (pa), "i" (ASI_PHYS_USE_EC)); + } else +#endif + { + ptep = pte_offset_map(pmdp, tpc); + pte = *ptep; + if (pte_present(pte)) { + pa = (pte_pfn(pte) << PAGE_SHIFT); + pa += (tpc & ~PAGE_MASK); + + /* Use phys bypass so we don't pollute dtlb/dcache. */ + __asm__ __volatile__("lduwa [%1] %2, %0" + : "=r" (insn) + : "r" (pa), "i" (ASI_PHYS_USE_EC)); + } + pte_unmap(ptep); + } +out_irq_enable: + local_irq_enable(); out: - pte_unmap(ptep); - __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); -outret: return insn; } @@ -153,7 +166,8 @@ } static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, - unsigned int insn, int fault_code) + unsigned long fault_addr, unsigned int insn, + int fault_code) { unsigned long addr; siginfo_t info; @@ -161,10 +175,18 @@ info.si_code = code; info.si_signo = sig; info.si_errno = 0; - if (fault_code & FAULT_CODE_ITLB) + if (fault_code & FAULT_CODE_ITLB) { addr = regs->tpc; - else - addr = compute_effective_address(regs, insn, 0); + } else { + /* If we were able to probe the faulting instruction, use it + * to compute a precise fault address. Otherwise use the fault + * time provided address which may only have page granularity. + */ + if (insn) + addr = compute_effective_address(regs, insn, 0); + else + addr = fault_addr; + } info.si_addr = (void __user *) addr; info.si_trapno = 0; @@ -239,7 +261,7 @@ /* The si_code was set to make clear whether * this was a SEGV_MAPERR or SEGV_ACCERR fault. */ - do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); + do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); return; } @@ -259,18 +281,6 @@ show_regs(regs); } -static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, - unsigned long addr) -{ - static int times; - - if (times++ < 10) - printk(KERN_ERR "FAULT[%s:%d]: 32-bit process " - "reports 64-bit fault address [%lx]\n", - current->comm, current->pid, addr); - show_regs(regs); -} - asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); @@ -300,10 +310,8 @@ goto intr_or_no_mm; } } - if (unlikely((address >> 32) != 0)) { - bogus_32bit_fault_address(regs, address); + if (unlikely((address >> 32) != 0)) goto intr_or_no_mm; - } } if (regs->tstate & TSTATE_PRIV) { @@ -340,6 +348,9 @@ down_read(&mm->mmap_sem); } + if (fault_code & FAULT_CODE_BAD_RA) + goto do_sigbus; + vma = find_vma(mm, address); if (!vma) goto bad_area; @@ -437,6 +448,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); @@ -525,7 +538,7 @@ * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); + do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code); /* Kernel mode? Handle exceptions or die */ if (regs->tstate & TSTATE_PRIV) --- linux-3.13.0.orig/arch/sparc/mm/gup.c +++ linux-3.13.0/arch/sparc/mm/gup.c @@ -73,7 +73,7 @@ struct page *head, *page, *tail; int refs; - if (!pmd_large(pmd)) + if (!(pmd_val(pmd) & _PAGE_VALID)) return 0; if (write && !pmd_write(pmd)) @@ -160,6 +160,36 @@ return 1; } +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next, flags; + pgd_t *pgdp; + int nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + + local_irq_save(flags); + pgdp = pgd_offset(mm, addr); + do { + pgd_t pgd = *pgdp; + + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + break; + if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) + break; + } while (pgdp++, addr = next, addr != end); + local_irq_restore(flags); + + return nr; +} + int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { --- linux-3.13.0.orig/arch/sparc/mm/hugetlbpage.c +++ linux-3.13.0/arch/sparc/mm/hugetlbpage.c @@ -232,11 +232,6 @@ return 0; } -int pmd_huge_support(void) -{ - return 0; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { --- linux-3.13.0.orig/arch/sparc/mm/init_64.c +++ linux-3.13.0/arch/sparc/mm/init_64.c @@ -73,7 +73,6 @@ * 'cpu' properties, but we need to have this table setup before the * MDESC is initialized. */ -unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; #ifndef CONFIG_DEBUG_PAGEALLOC /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. @@ -82,10 +81,11 @@ */ extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; #endif +extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; static unsigned long cpu_pgsz_mask; -#define MAX_BANKS 32 +#define MAX_BANKS 1024 static struct linux_prom64_registers pavail[MAX_BANKS]; static int pavail_ents; @@ -163,10 +163,6 @@ cmp_p64, NULL); } -unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES / - sizeof(unsigned long)]; -EXPORT_SYMBOL(sparc64_valid_addr_bitmap); - /* Kernel physical address base and size in bytes. */ unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; @@ -350,6 +346,10 @@ mm = vma->vm_mm; + /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ + if (!pte_accessible(mm, pte)) + return; + spin_lock_irqsave(&mm->context.lock, flags); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -588,7 +588,7 @@ int i, tlb_ent = sparc64_highest_locked_tlbent(); tte_vaddr = (unsigned long) KERNBASE; - phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; + phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; tte_data = kern_large_tte(phys_page); kern_locked_tte_data = tte_data; @@ -834,7 +834,10 @@ if ((addr & p->mask) == p->val) return i; } - return -1; + /* The following condition has been observed on LDOM guests.*/ + WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" + " rule. Some physical memory will be owned by node 0."); + return 0; } static u64 memblock_nid_range(u64 start, u64 end, int *nid) @@ -1355,9 +1358,144 @@ static struct linux_prom64_registers pall[MAX_BANKS] __initdata; static int pall_ents __initdata; -#ifdef CONFIG_DEBUG_PAGEALLOC +static unsigned long max_phys_bits = 40; + +bool kern_addr_valid(unsigned long addr) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if ((long)addr < 0L) { + unsigned long pa = __pa(addr); + + if ((addr >> max_phys_bits) != 0UL) + return false; + + return pfn_valid(pa >> PAGE_SHIFT); + } + + if (addr >= (unsigned long) KERNBASE && + addr < (unsigned long)&_end) + return true; + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + return 0; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return 0; + + if (pud_large(*pud)) + return pfn_valid(pud_pfn(*pud)); + + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return 0; + + if (pmd_large(*pmd)) + return pfn_valid(pmd_pfn(*pmd)); + + pte = pte_offset_kernel(pmd, addr); + if (pte_none(*pte)) + return 0; + + return pfn_valid(pte_pfn(*pte)); +} +EXPORT_SYMBOL(kern_addr_valid); + +static unsigned long __ref kernel_map_hugepud(unsigned long vstart, + unsigned long vend, + pud_t *pud) +{ + const unsigned long mask16gb = (1UL << 34) - 1UL; + u64 pte_val = vstart; + + /* Each PUD is 8GB */ + if ((vstart & mask16gb) || + (vend - vstart <= mask16gb)) { + pte_val ^= kern_linear_pte_xor[2]; + pud_val(*pud) = pte_val | _PAGE_PUD_HUGE; + + return vstart + PUD_SIZE; + } + + pte_val ^= kern_linear_pte_xor[3]; + pte_val |= _PAGE_PUD_HUGE; + + vend = vstart + mask16gb + 1UL; + while (vstart < vend) { + pud_val(*pud) = pte_val; + + pte_val += PUD_SIZE; + vstart += PUD_SIZE; + pud++; + } + return vstart; +} + +static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, + bool guard) +{ + if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) + return true; + + return false; +} + +static unsigned long __ref kernel_map_hugepmd(unsigned long vstart, + unsigned long vend, + pmd_t *pmd) +{ + const unsigned long mask256mb = (1UL << 28) - 1UL; + const unsigned long mask2gb = (1UL << 31) - 1UL; + u64 pte_val = vstart; + + /* Each PMD is 8MB */ + if ((vstart & mask256mb) || + (vend - vstart <= mask256mb)) { + pte_val ^= kern_linear_pte_xor[0]; + pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; + + return vstart + PMD_SIZE; + } + + if ((vstart & mask2gb) || + (vend - vstart <= mask2gb)) { + pte_val ^= kern_linear_pte_xor[1]; + pte_val |= _PAGE_PMD_HUGE; + vend = vstart + mask256mb + 1UL; + } else { + pte_val ^= kern_linear_pte_xor[2]; + pte_val |= _PAGE_PMD_HUGE; + vend = vstart + mask2gb + 1UL; + } + + while (vstart < vend) { + pmd_val(*pmd) = pte_val; + + pte_val += PMD_SIZE; + vstart += PMD_SIZE; + pmd++; + } + + return vstart; +} + +static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend, + bool guard) +{ + if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) + return true; + + return false; +} + static unsigned long __ref kernel_map_range(unsigned long pstart, - unsigned long pend, pgprot_t prot) + unsigned long pend, pgprot_t prot, + bool use_huge) { unsigned long vstart = PAGE_OFFSET + pstart; unsigned long vend = PAGE_OFFSET + pend; @@ -1376,19 +1514,34 @@ pmd_t *pmd; pte_t *pte; + if (pgd_none(*pgd)) { + pud_t *new; + + new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + alloc_bytes += PAGE_SIZE; + pgd_populate(&init_mm, pgd, new); + } pud = pud_offset(pgd, vstart); if (pud_none(*pud)) { pmd_t *new; + if (kernel_can_map_hugepud(vstart, vend, use_huge)) { + vstart = kernel_map_hugepud(vstart, vend, pud); + continue; + } new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pud_populate(&init_mm, pud, new); } pmd = pmd_offset(pud, vstart); - if (!pmd_present(*pmd)) { + if (pmd_none(*pmd)) { pte_t *new; + if (kernel_can_map_hugepmd(vstart, vend, use_huge)) { + vstart = kernel_map_hugepmd(vstart, vend, pmd); + continue; + } new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); alloc_bytes += PAGE_SIZE; pmd_populate_kernel(&init_mm, pmd, new); @@ -1411,100 +1564,34 @@ return alloc_bytes; } -extern unsigned int kvmap_linear_patch[1]; -#endif /* CONFIG_DEBUG_PAGEALLOC */ - -static void __init kpte_set_val(unsigned long index, unsigned long val) -{ - unsigned long *ptr = kpte_linear_bitmap; - - val <<= ((index % (BITS_PER_LONG / 2)) * 2); - ptr += (index / (BITS_PER_LONG / 2)); - - *ptr |= val; -} - -static const unsigned long kpte_shift_min = 28; /* 256MB */ -static const unsigned long kpte_shift_max = 34; /* 16GB */ -static const unsigned long kpte_shift_incr = 3; - -static unsigned long kpte_mark_using_shift(unsigned long start, unsigned long end, - unsigned long shift) +static void __init flush_all_kernel_tsbs(void) { - unsigned long size = (1UL << shift); - unsigned long mask = (size - 1UL); - unsigned long remains = end - start; - unsigned long val; - - if (remains < size || (start & mask)) - return start; - - /* VAL maps: - * - * shift 28 --> kern_linear_pte_xor index 1 - * shift 31 --> kern_linear_pte_xor index 2 - * shift 34 --> kern_linear_pte_xor index 3 - */ - val = ((shift - kpte_shift_min) / kpte_shift_incr) + 1; - - remains &= ~mask; - if (shift != kpte_shift_max) - remains = size; - - while (remains) { - unsigned long index = start >> kpte_shift_min; + int i; - kpte_set_val(index, val); + for (i = 0; i < KERNEL_TSB_NENTRIES; i++) { + struct tsb *ent = &swapper_tsb[i]; - start += 1UL << kpte_shift_min; - remains -= 1UL << kpte_shift_min; + ent->tag = (1UL << TSB_TAG_INVALID_BIT); } +#ifndef CONFIG_DEBUG_PAGEALLOC + for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) { + struct tsb *ent = &swapper_4m_tsb[i]; - return start; -} - -static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) -{ - unsigned long smallest_size, smallest_mask; - unsigned long s; - - smallest_size = (1UL << kpte_shift_min); - smallest_mask = (smallest_size - 1UL); - - while (start < end) { - unsigned long orig_start = start; - - for (s = kpte_shift_max; s >= kpte_shift_min; s -= kpte_shift_incr) { - start = kpte_mark_using_shift(start, end, s); - - if (start != orig_start) - break; - } - - if (start == orig_start) - start = (start + smallest_size) & ~smallest_mask; + ent->tag = (1UL << TSB_TAG_INVALID_BIT); } +#endif } -static void __init init_kpte_bitmap(void) -{ - unsigned long i; - - for (i = 0; i < pall_ents; i++) { - unsigned long phys_start, phys_end; - - phys_start = pall[i].phys_addr; - phys_end = phys_start + pall[i].reg_size; - - mark_kpte_bitmap(phys_start, phys_end); - } -} +extern unsigned int kvmap_linear_patch[1]; static void __init kernel_physical_mapping_init(void) { -#ifdef CONFIG_DEBUG_PAGEALLOC unsigned long i, mem_alloced = 0UL; + bool use_huge = true; +#ifdef CONFIG_DEBUG_PAGEALLOC + use_huge = false; +#endif for (i = 0; i < pall_ents; i++) { unsigned long phys_start, phys_end; @@ -1512,7 +1599,7 @@ phys_end = phys_start + pall[i].reg_size; mem_alloced += kernel_map_range(phys_start, phys_end, - PAGE_KERNEL); + PAGE_KERNEL, use_huge); } printk("Allocated %ld bytes for kernel page tables.\n", @@ -1521,8 +1608,9 @@ kvmap_linear_patch[0] = 0x01000000; /* nop */ flushi(&kvmap_linear_patch[0]); + flush_all_kernel_tsbs(); + __flush_tlb_all(); -#endif } #ifdef CONFIG_DEBUG_PAGEALLOC @@ -1532,7 +1620,7 @@ unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); kernel_map_range(phys_start, phys_end, - (enable ? PAGE_KERNEL : __pgprot(0))); + (enable ? PAGE_KERNEL : __pgprot(0)), false); flush_tsb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); @@ -1560,76 +1648,56 @@ unsigned long PAGE_OFFSET; EXPORT_SYMBOL(PAGE_OFFSET); -static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits) -{ - unsigned long final_shift; - unsigned int val = *insn; - unsigned int cnt; - - /* We are patching in ilog2(max_supported_phys_address), and - * we are doing so in a manner similar to a relocation addend. - * That is, we are adding the shift value to whatever value - * is in the shift instruction count field already. - */ - cnt = (val & 0x3f); - val &= ~0x3f; - - /* If we are trying to shift >= 64 bits, clear the destination - * register. This can happen when phys_bits ends up being equal - * to MAX_PHYS_ADDRESS_BITS. - */ - final_shift = (cnt + (64 - phys_bits)); - if (final_shift >= 64) { - unsigned int rd = (val >> 25) & 0x1f; +unsigned long VMALLOC_END = 0x0000010000000000UL; +EXPORT_SYMBOL(VMALLOC_END); - val = 0x80100000 | (rd << 25); - } else { - val |= final_shift; - } - *insn = val; - - __asm__ __volatile__("flush %0" - : /* no outputs */ - : "r" (insn)); -} - -static void __init page_offset_shift_patch(unsigned long phys_bits) -{ - extern unsigned int __page_offset_shift_patch; - extern unsigned int __page_offset_shift_patch_end; - unsigned int *p; - - p = &__page_offset_shift_patch; - while (p < &__page_offset_shift_patch_end) { - unsigned int *insn = (unsigned int *)(unsigned long)*p; - - page_offset_shift_patch_one(insn, phys_bits); - - p++; - } -} +unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; +unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; static void __init setup_page_offset(void) { - unsigned long max_phys_bits = 40; - if (tlb_type == cheetah || tlb_type == cheetah_plus) { + /* Cheetah/Panther support a full 64-bit virtual + * address, so we can use all that our page tables + * support. + */ + sparc64_va_hole_top = 0xfff0000000000000UL; + sparc64_va_hole_bottom = 0x0010000000000000UL; + max_phys_bits = 42; } else if (tlb_type == hypervisor) { switch (sun4v_chip_type) { case SUN4V_CHIP_NIAGARA1: case SUN4V_CHIP_NIAGARA2: + /* T1 and T2 support 48-bit virtual addresses. */ + sparc64_va_hole_top = 0xffff800000000000UL; + sparc64_va_hole_bottom = 0x0000800000000000UL; + max_phys_bits = 39; break; case SUN4V_CHIP_NIAGARA3: + /* T3 supports 48-bit virtual addresses. */ + sparc64_va_hole_top = 0xffff800000000000UL; + sparc64_va_hole_bottom = 0x0000800000000000UL; + max_phys_bits = 43; break; case SUN4V_CHIP_NIAGARA4: case SUN4V_CHIP_NIAGARA5: case SUN4V_CHIP_SPARC64X: - default: + case SUN4V_CHIP_SPARC_M6: + /* T4 and later support 52-bit virtual addresses. */ + sparc64_va_hole_top = 0xfff8000000000000UL; + sparc64_va_hole_bottom = 0x0008000000000000UL; max_phys_bits = 47; break; + case SUN4V_CHIP_SPARC_M7: + default: + /* M7 and later support 52-bit virtual addresses. */ + sparc64_va_hole_top = 0xfff8000000000000UL; + sparc64_va_hole_bottom = 0x0008000000000000UL; + max_phys_bits = 49; + break; } } @@ -1639,12 +1707,16 @@ prom_halt(); } - PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits); + PAGE_OFFSET = sparc64_va_hole_top; + VMALLOC_END = ((sparc64_va_hole_bottom >> 1) + + (sparc64_va_hole_bottom >> 2)); - pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", + pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", PAGE_OFFSET, max_phys_bits); - - page_offset_shift_patch(max_phys_bits); + pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n", + VMALLOC_START, VMALLOC_END); + pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n", + VMEMMAP_BASE, VMEMMAP_BASE << 1); } static void __init tsb_phys_patch(void) @@ -1689,21 +1761,42 @@ #define NUM_KTSB_DESCR 1 #endif static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; -extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; + +/* The swapper TSBs are loaded with a base sequence of: + * + * sethi %uhi(SYMBOL), REG1 + * sethi %hi(SYMBOL), REG2 + * or REG1, %ulo(SYMBOL), REG1 + * or REG2, %lo(SYMBOL), REG2 + * sllx REG1, 32, REG1 + * or REG1, REG2, REG1 + * + * When we use physical addressing for the TSB accesses, we patch the + * first four instructions in the above sequence. + */ static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) { - pa >>= KTSB_PHYS_SHIFT; + unsigned long high_bits, low_bits; + + high_bits = (pa >> 32) & 0xffffffff; + low_bits = (pa >> 0) & 0xffffffff; while (start < end) { unsigned int *ia = (unsigned int *)(unsigned long)*start; - ia[0] = (ia[0] & ~0x3fffff) | (pa >> 10); + ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10); __asm__ __volatile__("flush %0" : : "r" (ia)); - ia[1] = (ia[1] & ~0x3ff) | (pa & 0x3ff); + ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10); __asm__ __volatile__("flush %0" : : "r" (ia + 1)); + ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff); + __asm__ __volatile__("flush %0" : : "r" (ia + 2)); + + ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff); + __asm__ __volatile__("flush %0" : : "r" (ia + 3)); + start++; } } @@ -1842,7 +1935,6 @@ /* paging_init() sets up the page tables */ static unsigned long last_valid_pfn; -pgd_t swapper_pg_dir[PTRS_PER_PGD]; static void sun4u_pgprot_init(void); static void sun4v_pgprot_init(void); @@ -1880,7 +1972,7 @@ BUILD_BUG_ON(NR_CPUS > 4096); - kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; + kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; /* Invalidate both kernel TSBs. */ @@ -1936,7 +2028,7 @@ shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); real_end = (unsigned long)_end; - num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); + num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); printk("Kernel: Using %d locked TLB entries for main kernel image.\n", num_kernel_image_mappings); @@ -1945,16 +2037,10 @@ */ init_mm.pgd += ((shift) / (sizeof(pgd_t))); - memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); + memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); - /* Now can init the kernel/bad page tables. */ - pud_set(pud_offset(&swapper_pg_dir[0], 0), - swapper_low_pmd_dir + (shift / sizeof(pgd_t))); - inherit_prom_mappings(); - init_kpte_bitmap(); - /* Ok, we can use our TLB miss and window trap handlers safely. */ setup_tba(); @@ -2061,70 +2147,6 @@ return 0; } -static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; -static int pavail_rescan_ents __initdata; - -/* Certain OBP calls, such as fetching "available" properties, can - * claim physical memory. So, along with initializing the valid - * address bitmap, what we do here is refetch the physical available - * memory list again, and make sure it provides at least as much - * memory as 'pavail' does. - */ -static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) -{ - int i; - - read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); - - for (i = 0; i < pavail_ents; i++) { - unsigned long old_start, old_end; - - old_start = pavail[i].phys_addr; - old_end = old_start + pavail[i].reg_size; - while (old_start < old_end) { - int n; - - for (n = 0; n < pavail_rescan_ents; n++) { - unsigned long new_start, new_end; - - new_start = pavail_rescan[n].phys_addr; - new_end = new_start + - pavail_rescan[n].reg_size; - - if (new_start <= old_start && - new_end >= (old_start + PAGE_SIZE)) { - set_bit(old_start >> 22, bitmap); - goto do_next_page; - } - } - - prom_printf("mem_init: Lost memory in pavail\n"); - prom_printf("mem_init: OLD start[%lx] size[%lx]\n", - pavail[i].phys_addr, - pavail[i].reg_size); - prom_printf("mem_init: NEW start[%lx] size[%lx]\n", - pavail_rescan[i].phys_addr, - pavail_rescan[i].reg_size); - prom_printf("mem_init: Cannot continue, aborting.\n"); - prom_halt(); - - do_next_page: - old_start += PAGE_SIZE; - } - } -} - -static void __init patch_tlb_miss_handler_bitmap(void) -{ - extern unsigned int valid_addr_bitmap_insn[]; - extern unsigned int valid_addr_bitmap_patch[]; - - valid_addr_bitmap_insn[1] = valid_addr_bitmap_patch[1]; - mb(); - valid_addr_bitmap_insn[0] = valid_addr_bitmap_patch[0]; - flushi(&valid_addr_bitmap_insn[0]); -} - static void __init register_page_bootmem_info(void) { #ifdef CONFIG_NEED_MULTIPLE_NODES @@ -2137,18 +2159,6 @@ } void __init mem_init(void) { - unsigned long addr, last; - - addr = PAGE_OFFSET + kern_base; - last = PAGE_ALIGN(kern_size) + addr; - while (addr < last) { - set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); - addr += PAGE_SIZE; - } - - setup_valid_addr_bitmap_from_pavail(sparc64_valid_addr_bitmap); - patch_tlb_miss_handler_bitmap(); - high_memory = __va(last_valid_pfn << PAGE_SHIFT); register_page_bootmem_info(); @@ -2238,18 +2248,9 @@ EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP -unsigned long vmemmap_table[VMEMMAP_SIZE]; - -static long __meminitdata addr_start, addr_end; -static int __meminitdata node_start; - int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, int node) { - unsigned long phys_start = (vstart - VMEMMAP_BASE); - unsigned long phys_end = (vend - VMEMMAP_BASE); - unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; - unsigned long end = VMEMMAP_ALIGN(phys_end); unsigned long pte_base; pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | @@ -2260,47 +2261,52 @@ _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); - for (; addr < end; addr += VMEMMAP_CHUNK) { - unsigned long *vmem_pp = - vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); - void *block; + pte_base |= _PAGE_PMD_HUGE; - if (!(*vmem_pp & _PAGE_VALID)) { - block = vmemmap_alloc_block(1UL << 22, node); - if (!block) + vstart = vstart & PMD_MASK; + vend = ALIGN(vend, PMD_SIZE); + for (; vstart < vend; vstart += PMD_SIZE) { + pgd_t *pgd = pgd_offset_k(vstart); + unsigned long pte; + pud_t *pud; + pmd_t *pmd; + + if (pgd_none(*pgd)) { + pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node); + + if (!new) return -ENOMEM; + pgd_populate(&init_mm, pgd, new); + } - *vmem_pp = pte_base | __pa(block); + pud = pud_offset(pgd, vstart); + if (pud_none(*pud)) { + pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node); - /* check to see if we have contiguous blocks */ - if (addr_end != addr || node_start != node) { - if (addr_start) - printk(KERN_DEBUG " [%lx-%lx] on node %d\n", - addr_start, addr_end-1, node_start); - addr_start = addr; - node_start = node; - } - addr_end = addr + VMEMMAP_CHUNK; + if (!new) + return -ENOMEM; + pud_populate(&init_mm, pud, new); } - } - return 0; -} -void __meminit vmemmap_populate_print_last(void) -{ - if (addr_start) { - printk(KERN_DEBUG " [%lx-%lx] on node %d\n", - addr_start, addr_end-1, node_start); - addr_start = 0; - addr_end = 0; - node_start = 0; + pmd = pmd_offset(pud, vstart); + + pte = pmd_val(*pmd); + if (!(pte & _PAGE_VALID)) { + void *block = vmemmap_alloc_block(PMD_SIZE, node); + + if (!block) + return -ENOMEM; + + pmd_val(*pmd) = pte_base | __pa(block); + } } + + return 0; } void vmemmap_free(unsigned long start, unsigned long end) { } - #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, @@ -2613,6 +2619,10 @@ pte = pmd_val(entry); + /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ + if (!(pte & _PAGE_VALID)) + return; + /* We are fabricating 8MB pages using 4MB real hw pages. */ pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); @@ -2693,3 +2703,26 @@ } } #endif + +#ifdef CONFIG_SMP +#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range +#else +#define do_flush_tlb_kernel_range __flush_tlb_kernel_range +#endif + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { + if (start < LOW_OBP_ADDRESS) { + flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); + do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); + } + if (end > HI_OBP_ADDRESS) { + flush_tsb_kernel_range(HI_OBP_ADDRESS, end); + do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end); + } + } else { + flush_tsb_kernel_range(start, end); + do_flush_tlb_kernel_range(start, end); + } +} --- linux-3.13.0.orig/arch/sparc/mm/init_64.h +++ linux-3.13.0/arch/sparc/mm/init_64.h @@ -8,15 +8,8 @@ */ #define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS) -#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) -#define KPTE_BITMAP_BYTES \ - ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4) -#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL) -#define VALID_ADDR_BITMAP_BYTES \ - ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8) extern unsigned long kern_linear_pte_xor[4]; -extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; extern unsigned int sparc64_highest_unlocked_tlb_ent; extern unsigned long sparc64_kern_pri_context; extern unsigned long sparc64_kern_pri_nuc_bits; @@ -38,15 +31,4 @@ extern void prom_world(int enter); -#ifdef CONFIG_SPARSEMEM_VMEMMAP -#define VMEMMAP_CHUNK_SHIFT 22 -#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT) -#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL) -#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) - -#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ - sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT) -extern unsigned long vmemmap_table[VMEMMAP_SIZE]; -#endif - #endif /* _SPARC64_MM_INIT_H */ --- linux-3.13.0.orig/arch/sparc/mm/srmmu.c +++ linux-3.13.0/arch/sparc/mm/srmmu.c @@ -458,10 +458,12 @@ void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) { + unsigned long flags; + if (mm->context == NO_CONTEXT) { - spin_lock(&srmmu_context_spinlock); + spin_lock_irqsave(&srmmu_context_spinlock, flags); alloc_context(old_mm, mm); - spin_unlock(&srmmu_context_spinlock); + spin_unlock_irqrestore(&srmmu_context_spinlock, flags); srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); } @@ -986,14 +988,15 @@ void destroy_context(struct mm_struct *mm) { + unsigned long flags; if (mm->context != NO_CONTEXT) { flush_cache_mm(mm); srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); flush_tlb_mm(mm); - spin_lock(&srmmu_context_spinlock); + spin_lock_irqsave(&srmmu_context_spinlock, flags); free_context(mm->context); - spin_unlock(&srmmu_context_spinlock); + spin_unlock_irqrestore(&srmmu_context_spinlock, flags); mm->context = NO_CONTEXT; } } --- linux-3.13.0.orig/arch/sparc/mm/tlb.c +++ linux-3.13.0/arch/sparc/mm/tlb.c @@ -135,7 +135,7 @@ #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, - pmd_t pmd, bool exec) + pmd_t pmd) { unsigned long end; pte_t *pte; @@ -143,8 +143,11 @@ pte = pte_offset_map(&pmd, vaddr); end = vaddr + HPAGE_SIZE; while (vaddr < end) { - if (pte_val(*pte) & _PAGE_VALID) + if (pte_val(*pte) & _PAGE_VALID) { + bool exec = pte_exec(*pte); + tlb_batch_add_one(mm, vaddr, exec); + } pte++; vaddr += PAGE_SIZE; } @@ -178,19 +181,30 @@ } if (!pmd_none(orig)) { - pte_t orig_pte = __pte(pmd_val(orig)); - bool exec = pte_exec(orig_pte); - addr &= HPAGE_MASK; if (pmd_trans_huge(orig)) { + pte_t orig_pte = __pte(pmd_val(orig)); + bool exec = pte_exec(orig_pte); + tlb_batch_add_one(mm, addr, exec); tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); } else { - tlb_batch_pmd_scan(mm, addr, orig, exec); + tlb_batch_pmd_scan(mm, addr, orig); } } } +void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t entry = *pmdp; + + pmd_val(entry) &= ~_PAGE_VALID; + + set_pmd_at(vma->vm_mm, address, pmdp, entry); + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); +} + void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { --- linux-3.13.0.orig/arch/sparc/mm/tsb.c +++ linux-3.13.0/arch/sparc/mm/tsb.c @@ -133,7 +133,19 @@ mm->context.tsb_block[tsb_idx].tsb_nentries = tsb_bytes / sizeof(struct tsb); - base = TSBMAP_BASE; + switch (tsb_idx) { + case MM_TSB_BASE: + base = TSBMAP_8K_BASE; + break; +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + case MM_TSB_HUGE: + base = TSBMAP_4M_BASE; + break; +#endif + default: + BUG(); + } + tte = pgprot_val(PAGE_KERNEL_LOCKED); tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); --- linux-3.13.0.orig/arch/sparc/net/bpf_jit_comp.c +++ linux-3.13.0/arch/sparc/net/bpf_jit_comp.c @@ -83,9 +83,9 @@ #define BNE (F2(0, 2) | CONDNE) #ifdef CONFIG_SPARC64 -#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20)) +#define BE_PTR (F2(0, 1) | CONDE | (2 << 20)) #else -#define BNE_PTR BNE +#define BE_PTR BE #endif #define SETHI(K, REG) \ @@ -600,7 +600,7 @@ case BPF_S_ANC_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); - emit_branch(BNE_PTR, cleanup_addr + 4); + emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load32(r_A, struct net_device, ifindex, r_A); break; @@ -613,7 +613,7 @@ case BPF_S_ANC_HATYPE: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); - emit_branch(BNE_PTR, cleanup_addr + 4); + emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load16(r_A, struct net_device, type, r_A); break; --- linux-3.13.0.orig/arch/sparc/power/hibernate_asm.S +++ linux-3.13.0/arch/sparc/power/hibernate_asm.S @@ -54,8 +54,8 @@ nop /* Write PAGE_OFFSET to %g7 */ - sethi %uhi(PAGE_OFFSET), %g7 - sllx %g7, 32, %g7 + sethi %hi(PAGE_OFFSET), %g7 + ldx [%g7 + %lo(PAGE_OFFSET)], %g7 setuw (PAGE_SIZE-8), %g3 --- linux-3.13.0.orig/arch/sparc/prom/bootstr_64.c +++ linux-3.13.0/arch/sparc/prom/bootstr_64.c @@ -14,7 +14,10 @@ * the .bss section or it will break things. */ -#define BARG_LEN 256 +/* We limit BARG_LEN to 1024 because this is the size of the + * 'barg_out' command line buffer in the SILO bootloader. + */ +#define BARG_LEN 1024 struct { int bootstr_len; int bootstr_valid; --- linux-3.13.0.orig/arch/sparc/prom/cif.S +++ linux-3.13.0/arch/sparc/prom/cif.S @@ -11,11 +11,10 @@ .text .globl prom_cif_direct prom_cif_direct: + save %sp, -192, %sp sethi %hi(p1275buf), %o1 or %o1, %lo(p1275buf), %o1 - ldx [%o1 + 0x0010], %o2 ! prom_cif_stack - save %o2, -192, %sp - ldx [%i1 + 0x0008], %l2 ! prom_cif_handler + ldx [%o1 + 0x0008], %l2 ! prom_cif_handler mov %g4, %l0 mov %g5, %l1 mov %g6, %l3 --- linux-3.13.0.orig/arch/sparc/prom/init_64.c +++ linux-3.13.0/arch/sparc/prom/init_64.c @@ -26,13 +26,13 @@ * It gets passed the pointer to the PROM vector. */ -extern void prom_cif_init(void *, void *); +extern void prom_cif_init(void *); -void __init prom_init(void *cif_handler, void *cif_stack) +void __init prom_init(void *cif_handler) { phandle node; - prom_cif_init(cif_handler, cif_stack); + prom_cif_init(cif_handler); prom_chosen_node = prom_finddevice(prom_chosen_path); if (!prom_chosen_node || (s32)prom_chosen_node == -1) --- linux-3.13.0.orig/arch/sparc/prom/p1275.c +++ linux-3.13.0/arch/sparc/prom/p1275.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -20,7 +21,6 @@ struct { long prom_callback; /* 0x00 */ void (*prom_cif_handler)(long *); /* 0x08 */ - unsigned long prom_cif_stack; /* 0x10 */ } p1275buf; extern void prom_world(int); @@ -37,8 +37,8 @@ { unsigned long flags; - raw_local_save_flags(flags); - raw_local_irq_restore((unsigned long)PIL_NMI); + local_save_flags(flags); + local_irq_restore((unsigned long)PIL_NMI); raw_spin_lock(&prom_entry_lock); prom_world(1); @@ -46,11 +46,10 @@ prom_world(0); raw_spin_unlock(&prom_entry_lock); - raw_local_irq_restore(flags); + local_irq_restore(flags); } void prom_cif_init(void *cif_handler, void *cif_stack) { p1275buf.prom_cif_handler = (void (*)(long *))cif_handler; - p1275buf.prom_cif_stack = (unsigned long)cif_stack; } --- linux-3.13.0.orig/arch/tile/include/asm/compat.h +++ linux-3.13.0/arch/tile/include/asm/compat.h @@ -281,7 +281,6 @@ u32 dummy, u32 low, u32 high); long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, u32 dummy, u32 low, u32 high); -long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len); long compat_sys_sync_file_range2(int fd, unsigned int flags, u32 offset_lo, u32 offset_hi, u32 nbytes_lo, u32 nbytes_hi); --- linux-3.13.0.orig/arch/tile/mm/fault.c +++ linux-3.13.0/arch/tile/mm/fault.c @@ -444,6 +444,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/tile/mm/hugetlbpage.c +++ linux-3.13.0/arch/tile/mm/hugetlbpage.c @@ -166,11 +166,6 @@ return !!(pud_val(pud) & _PAGE_HUGE_PAGE); } -int pmd_huge_support(void) -{ - return 1; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { --- linux-3.13.0.orig/arch/um/drivers/ubd_kern.c +++ linux-3.13.0/arch/um/drivers/ubd_kern.c @@ -1277,7 +1277,7 @@ while(1){ struct ubd *dev = q->queuedata; - if(dev->end_sg == 0){ + if(dev->request == NULL){ struct request *req = blk_fetch_request(q); if(req == NULL) return; @@ -1299,7 +1299,8 @@ return; } prepare_flush_request(req, io_req); - submit_request(io_req, dev); + if (submit_request(io_req, dev) == false) + return; } while(dev->start_sg < dev->end_sg){ --- linux-3.13.0.orig/arch/um/kernel/trap.c +++ linux-3.13.0/arch/um/kernel/trap.c @@ -80,6 +80,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { goto out_of_memory; + } else if (fault & VM_FAULT_SIGSEGV) { + goto out; } else if (fault & VM_FAULT_SIGBUS) { err = -EACCES; goto out; --- linux-3.13.0.orig/arch/x86/Kconfig +++ linux-3.13.0/arch/x86/Kconfig @@ -125,6 +125,7 @@ select RTC_LIB select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 + select ARCH_SUPPORTS_ATOMIC_RMW config INSTRUCTION_DECODER def_bool y @@ -865,7 +866,7 @@ config X86_UP_APIC bool "Local APIC support on uniprocessors" - depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI + depends on X86_32 && !SMP && !X86_32_NON_STANDARD ---help--- A local APIC (Advanced Programmable Interrupt Controller) is an integrated interrupt controller in the CPU. If you have a single-CPU @@ -876,6 +877,10 @@ performance counters), and the NMI watchdog which detects hard lockups. +config X86_UP_APIC_MSI + def_bool y + select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI + config X86_UP_IOAPIC bool "IO-APIC support on uniprocessors" depends on X86_UP_APIC @@ -977,10 +982,27 @@ default y depends on X86_32 ---help--- - This option is required by programs like DOSEMU to run 16-bit legacy - code on X86 processors. It also may be needed by software like - XFree86 to initialize some video cards via BIOS. Disabling this - option saves about 6k. + This option is required by programs like DOSEMU to run + 16-bit real mode legacy code on x86 processors. It also may + be needed by software like XFree86 to initialize some video + cards via BIOS. Disabling this option saves about 6K. + +config X86_16BIT + bool "Enable support for 16-bit segments" if EXPERT + default y + ---help--- + This option is required by programs like Wine to run 16-bit + protected mode legacy code on x86 processors. Disabling + this option saves about 300 bytes on i386, or around 6K text + plus 16K runtime memory on x86-64, + +config X86_ESPFIX32 + def_bool y + depends on X86_16BIT && X86_32 + +config X86_ESPFIX64 + def_bool y + depends on X86_16BIT && X86_64 config TOSHIBA tristate "Toshiba Laptop support" @@ -1595,6 +1617,7 @@ config EFI_STUB bool "EFI stub support" depends on EFI + select RELOCATABLE ---help--- This kernel feature allows a bzImage to be loaded directly by EFI firmware without the use of a bootloader. @@ -1890,6 +1913,10 @@ def_bool y depends on X86_64 || X86_PAE +config ARCH_ENABLE_HUGEPAGE_MIGRATION + def_bool y + depends on X86_64 && HUGETLB_PAGE && MIGRATION + menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER @@ -2397,6 +2424,8 @@ source "drivers/Kconfig" +source "ubuntu/Kconfig" + source "drivers/firmware/Kconfig" source "fs/Kconfig" --- linux-3.13.0.orig/arch/x86/boot/compressed/misc.c +++ linux-3.13.0/arch/x86/boot/compressed/misc.c @@ -401,6 +401,8 @@ unsigned char *output, unsigned long output_len) { + unsigned char *output_orig = output; + real_mode = rmode; sanitize_boot_params(real_mode); @@ -439,7 +441,12 @@ debug_putstr("\nDecompressing Linux... "); decompress(input_data, input_len, NULL, NULL, output, NULL, error); parse_elf(output); - handle_relocations(output, output_len); + /* + * 32-bit always performs relocations. 64-bit relocations are only + * needed if kASLR has chosen a different load address. + */ + if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig) + handle_relocations(output, output_len); debug_putstr("done.\nBooting the kernel.\n"); return; } --- linux-3.13.0.orig/arch/x86/boot/cpucheck.c +++ linux-3.13.0/arch/x86/boot/cpucheck.c @@ -69,6 +69,13 @@ cpu_vendor[2] == A32('M', 'x', '8', '6'); } +static int is_intel(void) +{ + return cpu_vendor[0] == A32('G', 'e', 'n', 'u') && + cpu_vendor[1] == A32('i', 'n', 'e', 'I') && + cpu_vendor[2] == A32('n', 't', 'e', 'l'); +} + static int has_fpu(void) { u16 fcw = -1, fsw = -1; @@ -239,6 +246,24 @@ asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); err = check_flags(); + } else if (err == 0x01 && + !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) && + is_intel() && cpu.level == 6 && + (cpu.model == 9 || cpu.model == 13)) { + /* PAE is disabled on this Pentium M but can be forced */ + if (cmdline_find_option_bool("forcepae")) { + puts("WARNING: Forcing PAE in CPU flags\n"); + set_bit(X86_FEATURE_PAE, cpu.flags); + err = check_flags(); + } + else { + puts("ERROR: PAE is disabled on this Pentium M\n" + "(PAE can potentially be enabled with " + "kernel parameter\n" + "\"forcepae\" - this is unsupported, may " + "cause unknown\n" + "problems, and will taint the kernel)\n"); + } } if (err_flags_ptr) --- linux-3.13.0.orig/arch/x86/boot/header.S +++ linux-3.13.0/arch/x86/boot/header.S @@ -91,10 +91,9 @@ .section ".bsdata", "a" bugger_off_msg: - .ascii "Direct floppy boot is not supported. " - .ascii "Use a boot loader program instead.\r\n" + .ascii "Use a boot loader.\r\n" .ascii "\n" - .ascii "Remove disk and press any key to reboot ...\r\n" + .ascii "Remove disk and press any key to reboot...\r\n" .byte 0 #ifdef CONFIG_EFI_STUB @@ -108,7 +107,7 @@ #else .word 0x8664 # x86-64 #endif - .word 3 # nr_sections + .word 4 # nr_sections .long 0 # TimeDateStamp .long 0 # PointerToSymbolTable .long 1 # NumberOfSymbols @@ -250,6 +249,25 @@ .word 0 # NumberOfLineNumbers .long 0x60500020 # Characteristics (section flags) + # + # The offset & size fields are filled in by build.c. + # + .ascii ".bss" + .byte 0 + .byte 0 + .byte 0 + .byte 0 + .long 0 + .long 0x0 + .long 0 # Size of initialized data + # on disk + .long 0x0 + .long 0 # PointerToRelocations + .long 0 # PointerToLineNumbers + .word 0 # NumberOfRelocations + .word 0 # NumberOfLineNumbers + .long 0xc8000080 # Characteristics (section flags) + #endif /* CONFIG_EFI_STUB */ # Kernel attributes; used by setup. This is part 1 of the --- linux-3.13.0.orig/arch/x86/boot/tools/build.c +++ linux-3.13.0/arch/x86/boot/tools/build.c @@ -142,7 +142,7 @@ #ifdef CONFIG_EFI_STUB -static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) +static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset) { unsigned int pe_header; unsigned short num_sections; @@ -163,10 +163,10 @@ put_unaligned_le32(size, section + 0x8); /* section header vma field */ - put_unaligned_le32(offset, section + 0xc); + put_unaligned_le32(vma, section + 0xc); /* section header 'size of initialised data' field */ - put_unaligned_le32(size, section + 0x10); + put_unaligned_le32(datasz, section + 0x10); /* section header 'file offset' field */ put_unaligned_le32(offset, section + 0x14); @@ -178,6 +178,11 @@ } } +static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) +{ + update_pecoff_section_header_fields(section_name, offset, size, size, offset); +} + static void update_pecoff_setup_and_reloc(unsigned int size) { u32 setup_offset = 0x200; @@ -202,9 +207,6 @@ pe_header = get_unaligned_le32(&buf[0x3c]); - /* Size of image */ - put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); - /* * Size of code: Subtract the size of the first sector (512 bytes) * which includes the header. @@ -219,6 +221,22 @@ update_pecoff_section_header(".text", text_start, text_sz); } +static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz) +{ + unsigned int pe_header; + unsigned int bss_sz = init_sz - file_sz; + + pe_header = get_unaligned_le32(&buf[0x3c]); + + /* Size of uninitialized data */ + put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]); + + /* Size of image */ + put_unaligned_le32(init_sz, &buf[pe_header + 0x50]); + + update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0); +} + #endif /* CONFIG_EFI_STUB */ @@ -270,6 +288,9 @@ int fd; void *kernel; u32 crc = 0xffffffffUL; +#ifdef CONFIG_EFI_STUB + unsigned int init_sz; +#endif /* Defaults for old kernel */ #ifdef CONFIG_X86_32 @@ -343,7 +364,9 @@ put_unaligned_le32(sys_size, &buf[0x1f4]); #ifdef CONFIG_EFI_STUB - update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); + update_pecoff_text(setup_sectors * 512, i + (sys_size * 16)); + init_sz = get_unaligned_le32(&buf[0x260]); + update_pecoff_bss(i + (sys_size * 16), init_sz); #ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */ efi_stub_entry -= 0x200; --- linux-3.13.0.orig/arch/x86/boot/video.h +++ linux-3.13.0/arch/x86/boot/video.h @@ -80,7 +80,7 @@ u16 xmode_n; /* Size of unprobed mode range */ }; -#define __videocard struct card_info __attribute__((section(".videocards"))) +#define __videocard struct card_info __attribute__((used,section(".videocards"))) extern struct card_info video_cards[], video_cards_end[]; int mode_defined(u16 mode); /* video.c */ --- linux-3.13.0.orig/arch/x86/configs/i386_defconfig +++ linux-3.13.0/arch/x86/configs/i386_defconfig @@ -60,7 +60,6 @@ CONFIG_HIBERNATION=y CONFIG_PM_DEBUG=y CONFIG_PM_TRACE_RTC=y -CONFIG_ACPI_PROCFS=y CONFIG_ACPI_DOCK=y CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set --- linux-3.13.0.orig/arch/x86/configs/x86_64_defconfig +++ linux-3.13.0/arch/x86/configs/x86_64_defconfig @@ -58,7 +58,6 @@ CONFIG_HIBERNATION=y CONFIG_PM_DEBUG=y CONFIG_PM_TRACE_RTC=y -CONFIG_ACPI_PROCFS=y CONFIG_ACPI_DOCK=y CONFIG_CPU_FREQ=y # CONFIG_CPU_FREQ_STAT is not set --- linux-3.13.0.orig/arch/x86/crypto/aes_glue.c +++ linux-3.13.0/arch/x86/crypto/aes_glue.c @@ -66,5 +66,5 @@ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("aes"); -MODULE_ALIAS("aes-asm"); +MODULE_ALIAS_CRYPTO("aes"); +MODULE_ALIAS_CRYPTO("aes-asm"); --- linux-3.13.0.orig/arch/x86/crypto/aesni-intel_glue.c +++ linux-3.13.0/arch/x86/crypto/aesni-intel_glue.c @@ -989,7 +989,7 @@ src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC); if (!src) return -ENOMEM; - assoc = (src + req->cryptlen + auth_tag_len); + assoc = (src + req->cryptlen); scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0); scatterwalk_map_and_copy(assoc, req->assoc, 0, req->assoclen, 0); @@ -1014,7 +1014,7 @@ scatterwalk_done(&src_sg_walk, 0, 0); scatterwalk_done(&assoc_sg_walk, 0, 0); } else { - scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1); + scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1); kfree(src); } return retval; @@ -1373,4 +1373,4 @@ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); --- linux-3.13.0.orig/arch/x86/crypto/blowfish_glue.c +++ linux-3.13.0/arch/x86/crypto/blowfish_glue.c @@ -481,5 +481,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized"); -MODULE_ALIAS("blowfish"); -MODULE_ALIAS("blowfish-asm"); +MODULE_ALIAS_CRYPTO("blowfish"); +MODULE_ALIAS_CRYPTO("blowfish-asm"); --- linux-3.13.0.orig/arch/x86/crypto/camellia_aesni_avx2_glue.c +++ linux-3.13.0/arch/x86/crypto/camellia_aesni_avx2_glue.c @@ -582,5 +582,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized"); -MODULE_ALIAS("camellia"); -MODULE_ALIAS("camellia-asm"); +MODULE_ALIAS_CRYPTO("camellia"); +MODULE_ALIAS_CRYPTO("camellia-asm"); --- linux-3.13.0.orig/arch/x86/crypto/camellia_aesni_avx_glue.c +++ linux-3.13.0/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -574,5 +574,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized"); -MODULE_ALIAS("camellia"); -MODULE_ALIAS("camellia-asm"); +MODULE_ALIAS_CRYPTO("camellia"); +MODULE_ALIAS_CRYPTO("camellia-asm"); --- linux-3.13.0.orig/arch/x86/crypto/camellia_glue.c +++ linux-3.13.0/arch/x86/crypto/camellia_glue.c @@ -1725,5 +1725,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized"); -MODULE_ALIAS("camellia"); -MODULE_ALIAS("camellia-asm"); +MODULE_ALIAS_CRYPTO("camellia"); +MODULE_ALIAS_CRYPTO("camellia-asm"); --- linux-3.13.0.orig/arch/x86/crypto/cast5_avx_glue.c +++ linux-3.13.0/arch/x86/crypto/cast5_avx_glue.c @@ -494,4 +494,4 @@ MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("cast5"); +MODULE_ALIAS_CRYPTO("cast5"); --- linux-3.13.0.orig/arch/x86/crypto/cast6_avx_glue.c +++ linux-3.13.0/arch/x86/crypto/cast6_avx_glue.c @@ -611,4 +611,4 @@ MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("cast6"); +MODULE_ALIAS_CRYPTO("cast6"); --- linux-3.13.0.orig/arch/x86/crypto/crc32-pclmul_glue.c +++ linux-3.13.0/arch/x86/crypto/crc32-pclmul_glue.c @@ -197,5 +197,5 @@ MODULE_AUTHOR("Alexander Boyko "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("crc32"); -MODULE_ALIAS("crc32-pclmul"); +MODULE_ALIAS_CRYPTO("crc32"); +MODULE_ALIAS_CRYPTO("crc32-pclmul"); --- linux-3.13.0.orig/arch/x86/crypto/crc32c-intel_glue.c +++ linux-3.13.0/arch/x86/crypto/crc32c-intel_glue.c @@ -280,5 +280,5 @@ MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware."); MODULE_LICENSE("GPL"); -MODULE_ALIAS("crc32c"); -MODULE_ALIAS("crc32c-intel"); +MODULE_ALIAS_CRYPTO("crc32c"); +MODULE_ALIAS_CRYPTO("crc32c-intel"); --- linux-3.13.0.orig/arch/x86/crypto/crct10dif-pclmul_glue.c +++ linux-3.13.0/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -147,5 +147,5 @@ MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ."); MODULE_LICENSE("GPL"); -MODULE_ALIAS("crct10dif"); -MODULE_ALIAS("crct10dif-pclmul"); +MODULE_ALIAS_CRYPTO("crct10dif"); +MODULE_ALIAS_CRYPTO("crct10dif-pclmul"); --- linux-3.13.0.orig/arch/x86/crypto/fpu.c +++ linux-3.13.0/arch/x86/crypto/fpu.c @@ -17,6 +17,7 @@ #include #include #include +#include #include struct crypto_fpu_ctx { @@ -159,3 +160,5 @@ { crypto_unregister_template(&crypto_fpu_tmpl); } + +MODULE_ALIAS_CRYPTO("fpu"); --- linux-3.13.0.orig/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ linux-3.13.0/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -24,10 +24,6 @@ .align 16 .Lbswap_mask: .octa 0x000102030405060708090a0b0c0d0e0f -.Lpoly: - .octa 0xc2000000000000000000000000000001 -.Ltwo_one: - .octa 0x00000001000000000000000000000001 #define DATA %xmm0 #define SHASH %xmm1 @@ -134,28 +130,3 @@ .Lupdate_just_ret: ret ENDPROC(clmul_ghash_update) - -/* - * void clmul_ghash_setkey(be128 *shash, const u8 *key); - * - * Calculate hash_key << 1 mod poly - */ -ENTRY(clmul_ghash_setkey) - movaps .Lbswap_mask, BSWAP - movups (%rsi), %xmm0 - PSHUFB_XMM BSWAP %xmm0 - movaps %xmm0, %xmm1 - psllq $1, %xmm0 - psrlq $63, %xmm1 - movaps %xmm1, %xmm2 - pslldq $8, %xmm1 - psrldq $8, %xmm2 - por %xmm1, %xmm0 - # reduction - pshufd $0b00100100, %xmm2, %xmm1 - pcmpeqd .Ltwo_one, %xmm1 - pand .Lpoly, %xmm1 - pxor %xmm1, %xmm0 - movups %xmm0, (%rdi) - ret -ENDPROC(clmul_ghash_setkey) --- linux-3.13.0.orig/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ linux-3.13.0/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -30,8 +30,6 @@ void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, const be128 *shash); -void clmul_ghash_setkey(be128 *shash, const u8 *key); - struct ghash_async_ctx { struct cryptd_ahash *cryptd_tfm; }; @@ -58,13 +56,23 @@ const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); + be128 *x = (be128 *)key; + u64 a, b; if (keylen != GHASH_BLOCK_SIZE) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } - clmul_ghash_setkey(&ctx->shash, key); + /* perform multiplication by 'x' in GF(2^128) */ + a = be64_to_cpu(x->a); + b = be64_to_cpu(x->b); + + ctx->shash.a = (__be64)((b << 1) | (a >> 63)); + ctx->shash.b = (__be64)((a << 1) | (b >> 63)); + + if (a >> 63) + ctx->shash.b ^= cpu_to_be64(0xc2); return 0; } @@ -333,4 +341,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm, " "acclerated by PCLMULQDQ-NI"); -MODULE_ALIAS("ghash"); +MODULE_ALIAS_CRYPTO("ghash"); --- linux-3.13.0.orig/arch/x86/crypto/salsa20_glue.c +++ linux-3.13.0/arch/x86/crypto/salsa20_glue.c @@ -119,5 +119,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); -MODULE_ALIAS("salsa20"); -MODULE_ALIAS("salsa20-asm"); +MODULE_ALIAS_CRYPTO("salsa20"); +MODULE_ALIAS_CRYPTO("salsa20-asm"); --- linux-3.13.0.orig/arch/x86/crypto/serpent_avx2_glue.c +++ linux-3.13.0/arch/x86/crypto/serpent_avx2_glue.c @@ -558,5 +558,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized"); -MODULE_ALIAS("serpent"); -MODULE_ALIAS("serpent-asm"); +MODULE_ALIAS_CRYPTO("serpent"); +MODULE_ALIAS_CRYPTO("serpent-asm"); --- linux-3.13.0.orig/arch/x86/crypto/serpent_avx_glue.c +++ linux-3.13.0/arch/x86/crypto/serpent_avx_glue.c @@ -617,4 +617,4 @@ MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("serpent"); +MODULE_ALIAS_CRYPTO("serpent"); --- linux-3.13.0.orig/arch/x86/crypto/serpent_sse2_glue.c +++ linux-3.13.0/arch/x86/crypto/serpent_sse2_glue.c @@ -618,4 +618,4 @@ MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("serpent"); +MODULE_ALIAS_CRYPTO("serpent"); --- linux-3.13.0.orig/arch/x86/crypto/sha1_ssse3_glue.c +++ linux-3.13.0/arch/x86/crypto/sha1_ssse3_glue.c @@ -237,4 +237,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); --- linux-3.13.0.orig/arch/x86/crypto/sha256_ssse3_glue.c +++ linux-3.13.0/arch/x86/crypto/sha256_ssse3_glue.c @@ -318,5 +318,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); -MODULE_ALIAS("sha256"); -MODULE_ALIAS("sha224"); +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha224"); --- linux-3.13.0.orig/arch/x86/crypto/sha512_ssse3_glue.c +++ linux-3.13.0/arch/x86/crypto/sha512_ssse3_glue.c @@ -141,7 +141,7 @@ /* save number of bits */ bits[1] = cpu_to_be64(sctx->count[0] << 3); - bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; + bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128 and append length */ index = sctx->count[0] & 0x7f; @@ -326,5 +326,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated"); -MODULE_ALIAS("sha512"); -MODULE_ALIAS("sha384"); +MODULE_ALIAS_CRYPTO("sha512"); +MODULE_ALIAS_CRYPTO("sha384"); --- linux-3.13.0.orig/arch/x86/crypto/twofish_avx_glue.c +++ linux-3.13.0/arch/x86/crypto/twofish_avx_glue.c @@ -579,4 +579,4 @@ MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("twofish"); +MODULE_ALIAS_CRYPTO("twofish"); --- linux-3.13.0.orig/arch/x86/crypto/twofish_glue.c +++ linux-3.13.0/arch/x86/crypto/twofish_glue.c @@ -96,5 +96,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized"); -MODULE_ALIAS("twofish"); -MODULE_ALIAS("twofish-asm"); +MODULE_ALIAS_CRYPTO("twofish"); +MODULE_ALIAS_CRYPTO("twofish-asm"); --- linux-3.13.0.orig/arch/x86/crypto/twofish_glue_3way.c +++ linux-3.13.0/arch/x86/crypto/twofish_glue_3way.c @@ -495,5 +495,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized"); -MODULE_ALIAS("twofish"); -MODULE_ALIAS("twofish-asm"); +MODULE_ALIAS_CRYPTO("twofish"); +MODULE_ALIAS_CRYPTO("twofish-asm"); --- linux-3.13.0.orig/arch/x86/ia32/ia32entry.S +++ linux-3.13.0/arch/x86/ia32/ia32entry.S @@ -151,6 +151,16 @@ 1: movl (%rbp),%ebp _ASM_EXTABLE(1b,ia32_badarg) ASM_CLAC + + /* + * Sysenter doesn't filter flags, so we need to clear NT + * ourselves. To save a few cycles, we can check whether + * NT was set instead of doing an unconditional popfq. + */ + testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp) + jnz sysenter_fix_flags +sysenter_flags_fixed: + orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) CFI_REMEMBER_STATE @@ -184,6 +194,8 @@ TRACE_IRQS_ON ENABLE_INTERRUPTS_SYSEXIT32 + CFI_RESTORE_STATE + #ifdef CONFIG_AUDITSYSCALL .macro auditsys_entry_common movl %esi,%r9d /* 6th arg: 4th syscall arg */ @@ -226,7 +238,6 @@ .endm sysenter_auditsys: - CFI_RESTORE_STATE auditsys_entry_common movl %ebp,%r9d /* reload 6th syscall arg */ jmp sysenter_dispatch @@ -235,6 +246,11 @@ auditsys_exit sysexit_from_sys_call #endif +sysenter_fix_flags: + pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) + popfq_cfi + jmp sysenter_flags_fixed + sysenter_tracesys: #ifdef CONFIG_AUDITSYSCALL testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) --- linux-3.13.0.orig/arch/x86/include/asm/cpufeature.h +++ linux-3.13.0/arch/x86/include/asm/cpufeature.h @@ -203,6 +203,7 @@ #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ +#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ @@ -216,9 +217,13 @@ #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */ #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ +#define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */ +#define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */ +#define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */ /* * BUG word(s) --- linux-3.13.0.orig/arch/x86/include/asm/desc.h +++ linux-3.13.0/arch/x86/include/asm/desc.h @@ -251,7 +251,8 @@ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; } -#define _LDT_empty(info) \ +/* This intentionally ignores lm, since 32-bit apps don't have that field. */ +#define LDT_empty(info) \ ((info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ @@ -261,11 +262,18 @@ (info)->seg_not_present == 1 && \ (info)->useable == 0) -#ifdef CONFIG_X86_64 -#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0)) -#else -#define LDT_empty(info) (_LDT_empty(info)) -#endif +/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */ +static inline bool LDT_zero(const struct user_desc *info) +{ + return (info->base_addr == 0 && + info->limit == 0 && + info->contents == 0 && + info->read_exec_only == 0 && + info->seg_32bit == 0 && + info->limit_in_pages == 0 && + info->seg_not_present == 0 && + info->useable == 0); +} static inline void clear_LDT(void) { --- linux-3.13.0.orig/arch/x86/include/asm/dma-mapping.h +++ linux-3.13.0/arch/x86/include/asm/dma-mapping.h @@ -163,8 +163,6 @@ { struct dma_map_ops *ops = get_dma_ops(dev); - WARN_ON(irqs_disabled()); /* for portability */ - if (dma_release_from_coherent(dev, get_order(size), vaddr)) return; --- linux-3.13.0.orig/arch/x86/include/asm/efi.h +++ linux-3.13.0/arch/x86/include/asm/efi.h @@ -1,6 +1,24 @@ #ifndef _ASM_X86_EFI_H #define _ASM_X86_EFI_H +/* + * We map the EFI regions needed for runtime services non-contiguously, + * with preserved alignment on virtual addresses starting from -4G down + * for a total max space of 64G. This way, we provide for stable runtime + * services addresses across kernels so that a kexec'd kernel can still + * use them. + * + * This is the main reason why we're doing stable VA mappings for RT + * services. + * + * This flag is used in conjuction with a chicken bit called + * "efi=old_map" which can be used as a fallback to the old runtime + * services mapping method in case there's some b0rkage with a + * particular EFI implementation (haha, it is hard to hold up the + * sarcasm here...). + */ +#define EFI_OLD_MEMMAP EFI_ARCH_1 + #ifdef CONFIG_X86_32 #define EFI_LOADER_SIGNATURE "EL32" @@ -69,24 +87,31 @@ efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ (u64)(a4), (u64)(a5), (u64)(a6)) +#define _efi_call_virtX(x, f, ...) \ +({ \ + efi_status_t __s; \ + \ + efi_sync_low_kernel_mappings(); \ + preempt_disable(); \ + __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \ + preempt_enable(); \ + __s; \ +}) + #define efi_call_virt0(f) \ - efi_call0((efi.systab->runtime->f)) -#define efi_call_virt1(f, a1) \ - efi_call1((efi.systab->runtime->f), (u64)(a1)) -#define efi_call_virt2(f, a1, a2) \ - efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2)) -#define efi_call_virt3(f, a1, a2, a3) \ - efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3)) -#define efi_call_virt4(f, a1, a2, a3, a4) \ - efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4)) -#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ - efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4), (u64)(a5)) -#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ - efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ - (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) + _efi_call_virtX(0, f) +#define efi_call_virt1(f, a1) \ + _efi_call_virtX(1, f, (u64)(a1)) +#define efi_call_virt2(f, a1, a2) \ + _efi_call_virtX(2, f, (u64)(a1), (u64)(a2)) +#define efi_call_virt3(f, a1, a2, a3) \ + _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3)) +#define efi_call_virt4(f, a1, a2, a3, a4) \ + _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4)) +#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ + _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5)) +#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ + _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, u32 type, u64 attribute); @@ -95,12 +120,19 @@ extern int add_efi_memmap; extern unsigned long x86_efi_facility; +extern struct efi_scratch efi_scratch; extern void efi_set_executable(efi_memory_desc_t *md, bool executable); extern int efi_memblock_x86_reserve_range(void); extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); extern void efi_unmap_memmap(void); extern void efi_memory_uc(u64 addr, unsigned long size); +extern void __init efi_map_region(efi_memory_desc_t *md); +extern void efi_sync_low_kernel_mappings(void); +extern void efi_setup_page_tables(void); +extern void __init old_map_region(efi_memory_desc_t *md); +extern void __init runtime_code_page_mkexec(void); +extern void __init efi_runtime_mkexec(void); #ifdef CONFIG_EFI --- linux-3.13.0.orig/arch/x86/include/asm/elf.h +++ linux-3.13.0/arch/x86/include/asm/elf.h @@ -155,8 +155,9 @@ #define elf_check_arch(x) \ ((x)->e_machine == EM_X86_64) -#define compat_elf_check_arch(x) \ - (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64) +#define compat_elf_check_arch(x) \ + (elf_check_arch_ia32(x) || \ + (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64)) #if __USER32_DS != __USER_DS # error "The following code assumes __USER32_DS == __USER_DS" --- linux-3.13.0.orig/arch/x86/include/asm/espfix.h +++ linux-3.13.0/arch/x86/include/asm/espfix.h @@ -0,0 +1,16 @@ +#ifndef _ASM_X86_ESPFIX_H +#define _ASM_X86_ESPFIX_H + +#ifdef CONFIG_X86_64 + +#include + +DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); +DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); + +extern void init_espfix_bsp(void); +extern void init_espfix_ap(void); + +#endif /* CONFIG_X86_64 */ + +#endif /* _ASM_X86_ESPFIX_H */ --- linux-3.13.0.orig/arch/x86/include/asm/fpu-internal.h +++ linux-3.13.0/arch/x86/include/asm/fpu-internal.h @@ -368,7 +368,7 @@ preempt_disable(); tsk->thread.fpu_counter = 0; __drop_fpu(tsk); - clear_used_math(); + clear_stopped_child_used_math(tsk); preempt_enable(); } --- linux-3.13.0.orig/arch/x86/include/asm/hugetlb.h +++ linux-3.13.0/arch/x86/include/asm/hugetlb.h @@ -52,6 +52,7 @@ static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { + ptep_clear_flush(vma, addr, ptep); } static inline int huge_pte_none(pte_t pte) --- linux-3.13.0.orig/arch/x86/include/asm/irqflags.h +++ linux-3.13.0/arch/x86/include/asm/irqflags.h @@ -129,7 +129,7 @@ #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ -#define INTERRUPT_RETURN iretq +#define INTERRUPT_RETURN jmp native_iret #define USERGS_SYSRET64 \ swapgs; \ sysretq; --- linux-3.13.0.orig/arch/x86/include/asm/kvm_host.h +++ linux-3.13.0/arch/x86/include/asm/kvm_host.h @@ -462,7 +462,7 @@ bool nmi_injected; /* Trying to inject an NMI this entry */ struct mtrr_state_type mtrr_state; - u32 pat; + u64 pat; int switch_db_regs; unsigned long db[KVM_NR_DB_REGS]; @@ -480,6 +480,7 @@ u64 mmio_gva; unsigned access; gfn_t mmio_gfn; + u64 mmio_gen; struct kvm_pmu pmu; @@ -762,6 +763,8 @@ struct x86_instruction_info *info, enum x86_intercept_stage stage); void (*handle_external_intr)(struct kvm_vcpu *vcpu); + + int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); }; struct kvm_arch_async_pf { @@ -980,6 +983,20 @@ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); } +static inline u64 get_canonical(u64 la) +{ + return ((int64_t)la << 16) >> 16; +} + +static inline bool is_noncanonical_address(u64 la) +{ +#ifdef CONFIG_X86_64 + return get_canonical(la) != la; +#else + return false; +#endif +} + #define TSS_IOPB_BASE_OFFSET 0x66 #define TSS_BASE_SIZE 0x68 #define TSS_IOPB_SIZE (65536 / 8) @@ -1038,7 +1055,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_define_shared_msr(unsigned index, u32 msr); -void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); +int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); --- linux-3.13.0.orig/arch/x86/include/asm/kvm_para.h +++ linux-3.13.0/arch/x86/include/asm/kvm_para.h @@ -2,6 +2,7 @@ #define _ASM_X86_KVM_PARA_H #include +#include #include extern void kvmclock_init(void); @@ -16,10 +17,15 @@ } #endif /* CONFIG_KVM_GUEST */ -/* This instruction is vmcall. On non-VT architectures, it will generate a - * trap that we will then rewrite to the appropriate instruction. +#ifdef CONFIG_DEBUG_RODATA +#define KVM_HYPERCALL \ + ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL) +#else +/* On AMD processors, vmcall will generate a trap that we will + * then rewrite to the appropriate instruction. */ #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" +#endif /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall * instruction. The hypervisor may replace it with something else but only the @@ -85,28 +91,9 @@ return ret; } -static inline uint32_t kvm_cpuid_base(void) -{ - if (boot_cpu_data.cpuid_level < 0) - return 0; /* So we don't blow up on old processors */ - - if (cpu_has_hypervisor) - return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); - - return 0; -} - -static inline bool kvm_para_available(void) -{ - return kvm_cpuid_base() != 0; -} - -static inline unsigned int kvm_arch_para_features(void) -{ - return cpuid_eax(KVM_CPUID_FEATURES); -} - #ifdef CONFIG_KVM_GUEST +bool kvm_para_available(void); +unsigned int kvm_arch_para_features(void); void __init kvm_guest_init(void); void kvm_async_pf_task_wait(u32 token); void kvm_async_pf_task_wake(u32 token); @@ -126,6 +113,16 @@ #define kvm_async_pf_task_wait(T) do {} while(0) #define kvm_async_pf_task_wake(T) do {} while(0) +static inline bool kvm_para_available(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + static inline u32 kvm_read_and_reset_pf_reason(void) { return 0; --- linux-3.13.0.orig/arch/x86/include/asm/page_32_types.h +++ linux-3.13.0/arch/x86/include/asm/page_32_types.h @@ -20,7 +20,6 @@ #define THREAD_SIZE_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) -#define STACKFAULT_STACK 0 #define DOUBLEFAULT_STACK 1 #define NMI_STACK 0 #define DEBUG_STACK 0 --- linux-3.13.0.orig/arch/x86/include/asm/page_64_types.h +++ linux-3.13.0/arch/x86/include/asm/page_64_types.h @@ -14,12 +14,11 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) -#define STACKFAULT_STACK 1 -#define DOUBLEFAULT_STACK 2 -#define NMI_STACK 3 -#define DEBUG_STACK 4 -#define MCE_STACK 5 -#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#define DOUBLEFAULT_STACK 1 +#define NMI_STACK 2 +#define DEBUG_STACK 3 +#define MCE_STACK 4 +#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) --- linux-3.13.0.orig/arch/x86/include/asm/pgtable_64_types.h +++ linux-3.13.0/arch/x86/include/asm/pgtable_64_types.h @@ -61,6 +61,8 @@ #define MODULES_VADDR _AC(0xffffffffa0000000, UL) #define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) +#define ESPFIX_PGD_ENTRY _AC(-2, UL) +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT) #define EARLY_DYNAMIC_PAGE_TABLES 64 --- linux-3.13.0.orig/arch/x86/include/asm/pgtable_types.h +++ linux-3.13.0/arch/x86/include/asm/pgtable_types.h @@ -121,7 +121,8 @@ /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ - _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) + _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ + _PAGE_SOFT_DIRTY) #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) @@ -382,7 +383,8 @@ */ extern pte_t *lookup_address(unsigned long address, unsigned int *level); extern phys_addr_t slow_virt_to_phys(void *__address); - +extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, + unsigned numpages, unsigned long page_flags); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_DEFS_H */ --- linux-3.13.0.orig/arch/x86/include/asm/ptrace.h +++ linux-3.13.0/arch/x86/include/asm/ptrace.h @@ -232,6 +232,22 @@ #define ARCH_HAS_USER_SINGLE_STEP_INFO +/* + * When hitting ptrace_stop(), we cannot return using SYSRET because + * that does not restore the full CPU state, only a minimal set. The + * ptracer can change arbitrary register values, which is usually okay + * because the usual ptrace stops run off the signal delivery path which + * forces IRET; however, ptrace_event() stops happen in arbitrary places + * in the kernel and don't force IRET path. + * + * So force IRET path after a ptrace stop. + */ +#define arch_ptrace_stop_needed(code, info) \ +({ \ + set_thread_flag(TIF_NOTIFY_RESUME); \ + false; \ +}) + struct user_desc; extern int do_get_thread_area(struct task_struct *p, int idx, struct user_desc __user *info); --- linux-3.13.0.orig/arch/x86/include/asm/pvclock.h +++ linux-3.13.0/arch/x86/include/asm/pvclock.h @@ -95,6 +95,7 @@ struct pvclock_vsyscall_time_info { struct pvclock_vcpu_time_info pvti; + u32 migrate_count; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) --- linux-3.13.0.orig/arch/x86/include/asm/setup.h +++ linux-3.13.0/arch/x86/include/asm/setup.h @@ -64,6 +64,8 @@ #ifndef _SETUP +#include + /* * This is set up by the setup-routine at boot-time */ --- linux-3.13.0.orig/arch/x86/include/asm/topology.h +++ linux-3.13.0/arch/x86/include/asm/topology.h @@ -119,9 +119,10 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu); -#ifdef ENABLE_TOPO_DEFINES #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) + +#ifdef ENABLE_TOPO_DEFINES #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) #endif --- linux-3.13.0.orig/arch/x86/include/asm/traps.h +++ linux-3.13.0/arch/x86/include/asm/traps.h @@ -39,6 +39,7 @@ #ifdef CONFIG_TRACING asmlinkage void trace_page_fault(void); +#define trace_stack_segment stack_segment #define trace_divide_error divide_error #define trace_bounds bounds #define trace_invalid_op invalid_op --- linux-3.13.0.orig/arch/x86/include/asm/vga.h +++ linux-3.13.0/arch/x86/include/asm/vga.h @@ -17,10 +17,4 @@ #define vga_readb(x) (*(x)) #define vga_writeb(x, y) (*(y) = (x)) -#ifdef CONFIG_FB_EFI -#define __ARCH_HAS_VGA_DEFAULT_DEVICE -extern struct pci_dev *vga_default_device(void); -extern void vga_set_default_device(struct pci_dev *pdev); -#endif - #endif /* _ASM_X86_VGA_H */ --- linux-3.13.0.orig/arch/x86/include/asm/vsyscall.h +++ linux-3.13.0/arch/x86/include/asm/vsyscall.h @@ -34,7 +34,7 @@ native_read_tscp(&p); } else { /* Load per CPU data from GDT */ - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); + asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); } return p; --- linux-3.13.0.orig/arch/x86/include/asm/xsave.h +++ linux-3.13.0/arch/x86/include/asm/xsave.h @@ -6,9 +6,14 @@ #define XSTATE_CPUID 0x0000000d -#define XSTATE_FP 0x1 -#define XSTATE_SSE 0x2 -#define XSTATE_YMM 0x4 +#define XSTATE_FP 0x1 +#define XSTATE_SSE 0x2 +#define XSTATE_YMM 0x4 +#define XSTATE_BNDREGS 0x8 +#define XSTATE_BNDCSR 0x10 +#define XSTATE_OPMASK 0x20 +#define XSTATE_ZMM_Hi256 0x40 +#define XSTATE_Hi16_ZMM 0x80 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) @@ -23,7 +28,7 @@ /* * These are the features that the OS can handle currently. */ -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) +#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM) #ifdef CONFIG_X86_64 #define REX_PREFIX "0x48, " --- linux-3.13.0.orig/arch/x86/include/uapi/asm/ldt.h +++ linux-3.13.0/arch/x86/include/uapi/asm/ldt.h @@ -28,6 +28,13 @@ unsigned int seg_not_present:1; unsigned int useable:1; #ifdef __x86_64__ + /* + * Because this bit is not present in 32-bit user code, user + * programs can pass uninitialized values here. Therefore, in + * any context in which a user_desc comes from a 32-bit program, + * the kernel must act as though lm == 0, regardless of the + * actual value. + */ unsigned int lm:1; #endif }; --- linux-3.13.0.orig/arch/x86/include/uapi/asm/msr-index.h +++ linux-3.13.0/arch/x86/include/uapi/asm/msr-index.h @@ -184,6 +184,7 @@ #define MSR_AMD64_PATCH_LOADER 0xc0010020 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_STATUS 0xc0010141 +#define MSR_AMD64_LS_CFG 0xc0011020 #define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_BU_CFG2 0xc001102a #define MSR_AMD64_IBSFETCHCTL 0xc0011030 --- linux-3.13.0.orig/arch/x86/include/uapi/asm/vmx.h +++ linux-3.13.0/arch/x86/include/uapi/asm/vmx.h @@ -67,6 +67,7 @@ #define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_INVEPT 50 #define EXIT_REASON_PREEMPTION_TIMER 52 +#define EXIT_REASON_INVVPID 53 #define EXIT_REASON_WBINVD 54 #define EXIT_REASON_XSETBV 55 #define EXIT_REASON_APIC_WRITE 56 @@ -114,6 +115,7 @@ { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ { EXIT_REASON_INVD, "INVD" }, \ + { EXIT_REASON_INVVPID, "INVVPID" }, \ { EXIT_REASON_INVPCID, "INVPCID" } #endif /* _UAPIVMX_H */ --- linux-3.13.0.orig/arch/x86/kernel/Makefile +++ linux-3.13.0/arch/x86/kernel/Makefile @@ -29,6 +29,7 @@ obj-y += syscall_$(BITS).o obj-$(CONFIG_X86_64) += vsyscall_64.o obj-$(CONFIG_X86_64) += vsyscall_emu_64.o +obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o topology.o kdebugfs.o obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o --- linux-3.13.0.orig/arch/x86/kernel/apic/apic.c +++ linux-3.13.0/arch/x86/kernel/apic/apic.c @@ -1283,7 +1283,7 @@ unsigned int value, queued; int i, j, acked = 0; unsigned long long tsc = 0, ntsc; - long long max_loops = cpu_khz; + long long max_loops = cpu_khz ? cpu_khz : 1000000; if (cpu_has_tsc) rdtscll(tsc); @@ -1380,7 +1380,7 @@ break; } if (queued) { - if (cpu_has_tsc) { + if (cpu_has_tsc && cpu_khz) { rdtscll(ntsc); max_loops = (cpu_khz << 10) - (ntsc - tsc); } else --- linux-3.13.0.orig/arch/x86/kernel/cpu/amd.c +++ linux-3.13.0/arch/x86/kernel/cpu/amd.c @@ -219,7 +219,7 @@ */ WARN_ONCE(1, "WARNING: This combination of AMD" " processors is not suitable for SMP.\n"); - add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); } static void init_amd_k7(struct cpuinfo_x86 *c) @@ -508,6 +508,23 @@ set_cpu_cap(c, X86_FEATURE_EXTD_APICID); } #endif + + /* + * This is only needed to tell the kernel whether to use VMCALL + * and VMMCALL. VMMCALL is never executed except under virt, so + * we can set it unconditionally. + */ + set_cpu_cap(c, X86_FEATURE_VMMCALL); + + /* F16h erratum 793, CVE-2013-6885 */ + if (c->x86 == 0x16 && c->x86_model <= 0xf) { + u64 val; + + rdmsrl(MSR_AMD64_LS_CFG, val); + if (!(val & BIT(15))) + wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15)); + } + } static const int amd_erratum_383[]; --- linux-3.13.0.orig/arch/x86/kernel/cpu/common.c +++ linux-3.13.0/arch/x86/kernel/cpu/common.c @@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s) { + if (strlen(s)) + return 0; setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); setup_clear_cpu_cap(X86_FEATURE_AVX); @@ -284,8 +286,13 @@ raw_local_save_flags(eflags); BUG_ON(eflags & X86_EFLAGS_AC); - if (cpu_has(c, X86_FEATURE_SMAP)) + if (cpu_has(c, X86_FEATURE_SMAP)) { +#ifdef CONFIG_X86_SMAP set_in_cr4(X86_CR4_SMAP); +#else + clear_in_cr4(X86_CR4_SMAP); +#endif + } } /* @@ -1135,7 +1142,7 @@ /* Flags to clear on syscall */ wrmsrl(MSR_SYSCALL_MASK, X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| - X86_EFLAGS_IOPL|X86_EFLAGS_AC); + X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); } /* --- linux-3.13.0.orig/arch/x86/kernel/cpu/intel.c +++ linux-3.13.0/arch/x86/kernel/cpu/intel.c @@ -154,6 +154,21 @@ setup_clear_cpu_cap(X86_FEATURE_ERMS); } } + + /* + * Intel Quark Core DevMan_001.pdf section 6.4.11 + * "The operating system also is required to invalidate (i.e., flush) + * the TLB when any changes are made to any of the page table entries. + * The operating system must reload CR3 to cause the TLB to be flushed" + * + * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should + * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE + * to be modified + */ + if (c->x86 == 5 && c->x86_model == 9) { + pr_info("Disabling PGE capability bit\n"); + setup_clear_cpu_cap(X86_FEATURE_PGE); + } } #ifdef CONFIG_X86_32 @@ -196,6 +211,14 @@ } } +static int forcepae; +static int __init forcepae_setup(char *__unused) +{ + forcepae = 1; + return 1; +} +__setup("forcepae", forcepae_setup); + static void intel_workarounds(struct cpuinfo_x86 *c) { unsigned long lo, hi; @@ -226,6 +249,17 @@ clear_cpu_cap(c, X86_FEATURE_SEP); /* + * PAE CPUID issue: many Pentium M report no PAE but may have a + * functionally usable PAE implementation. + * Forcefully enable PAE if kernel parameter "forcepae" is present. + */ + if (forcepae) { + printk(KERN_WARNING "PAE forced!\n"); + set_cpu_cap(c, X86_FEATURE_PAE); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); + } + + /* * P4 Xeon errata 037 workaround. * Hardware prefetcher may cause stale data to be loaded into the cache. */ @@ -628,7 +662,7 @@ tlb_flushall_shift = 5; break; case 0x63a: /* Ivybridge */ - tlb_flushall_shift = 1; + tlb_flushall_shift = 2; break; default: tlb_flushall_shift = 6; --- linux-3.13.0.orig/arch/x86/kernel/cpu/mshyperv.c +++ linux-3.13.0/arch/x86/kernel/cpu/mshyperv.c @@ -26,6 +26,7 @@ #include #include #include +#include struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); @@ -66,6 +67,7 @@ .rating = 400, /* use this when running on Hyperv*/ .read = read_hv_clock, .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static void __init ms_hyperv_init_platform(void) @@ -91,20 +93,16 @@ lapic_timer_frequency = hv_lapic_frequency; printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n", lapic_timer_frequency); - - /* - * On Hyper-V, when we are booting off an EFI firmware stack, - * we do not have many legacy devices including PIC, PIT etc. - */ - if (efi_enabled(EFI_BOOT)) { - printk(KERN_INFO "HyperV: Using null_legacy_pic\n"); - legacy_pic = &null_legacy_pic; - } } #endif if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); + +#ifdef CONFIG_X86_IO_APIC + no_timer_check = 1; +#endif + } const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { --- linux-3.13.0.orig/arch/x86/kernel/cpu/perf_event.c +++ linux-3.13.0/arch/x86/kernel/cpu/perf_event.c @@ -1192,6 +1192,9 @@ for (i = 0; i < cpuc->n_events; i++) { if (event == cpuc->event_list[i]) { + if (i >= cpuc->n_events - cpuc->n_added) + --cpuc->n_added; + if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(cpuc, event); --- linux-3.13.0.orig/arch/x86/kernel/cpu/perf_event_intel.c +++ linux-3.13.0/arch/x86/kernel/cpu/perf_event_intel.c @@ -1385,6 +1385,15 @@ intel_pmu_lbr_read(); /* + * CondChgd bit 63 doesn't mean any overflow status. Ignore + * and clear the bit. + */ + if (__test_and_clear_bit(63, (unsigned long *)&status)) { + if (!status) + goto done; + } + + /* * PEBS overflow sets bit 62 in the global status register */ if (__test_and_clear_bit(62, (unsigned long *)&status)) { --- linux-3.13.0.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ linux-3.13.0/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -2878,6 +2878,17 @@ return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id()); } +/* + * Using uncore_pmu_event_init pmu event_init callback + * as a detection point for uncore events. + */ +static int uncore_pmu_event_init(struct perf_event *event); + +static bool is_uncore_event(struct perf_event *event) +{ + return event->pmu->event_init == uncore_pmu_event_init; +} + static int uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) { @@ -2892,13 +2903,18 @@ return -EINVAL; n = box->n_events; - box->event_list[n] = leader; - n++; + + if (is_uncore_event(leader)) { + box->event_list[n] = leader; + n++; + } + if (!dogrp) return n; list_for_each_entry(event, &leader->sibling_list, group_entry) { - if (event->state <= PERF_EVENT_STATE_OFF) + if (!is_uncore_event(event) || + event->state <= PERF_EVENT_STATE_OFF) continue; if (n >= max_count) --- linux-3.13.0.orig/arch/x86/kernel/dumpstack_64.c +++ linux-3.13.0/arch/x86/kernel/dumpstack_64.c @@ -24,7 +24,6 @@ [ DEBUG_STACK-1 ] = "#DB", [ NMI_STACK-1 ] = "NMI", [ DOUBLEFAULT_STACK-1 ] = "#DF", - [ STACKFAULT_STACK-1 ] = "#SS", [ MCE_STACK-1 ] = "#MC", #if DEBUG_STKSZ > EXCEPTION_STKSZ [ N_EXCEPTION_STACKS ... --- linux-3.13.0.orig/arch/x86/kernel/early-quirks.c +++ linux-3.13.0/arch/x86/kernel/early-quirks.c @@ -203,18 +203,15 @@ revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); /* - * Revision 13 of all triggering devices id in this quirk have - * a problem draining interrupts when irq remapping is enabled, - * and should be flagged as broken. Additionally revisions 0x12 - * and 0x22 of device id 0x3405 has this problem. + * Revision <= 13 of all triggering devices id in this quirk + * have a problem draining interrupts when irq remapping is + * enabled, and should be flagged as broken. Additionally + * revision 0x22 of device id 0x3405 has this problem. */ - if (revision == 0x13) + if (revision <= 0x13) set_irq_remapping_broken(); - else if ((device == 0x3405) && - ((revision == 0x12) || - (revision == 0x22))) + else if (device == 0x3405 && revision == 0x22) set_irq_remapping_broken(); - } /* --- linux-3.13.0.orig/arch/x86/kernel/entry_32.S +++ linux-3.13.0/arch/x86/kernel/entry_32.S @@ -431,8 +431,9 @@ jnz sysenter_audit sysenter_do_call: cmpl $(NR_syscalls), %eax - jae syscall_badsys + jae sysenter_badsys call *sys_call_table(,%eax,4) +sysenter_after_call: movl %eax,PT_EAX(%esp) LOCKDEP_SYS_EXIT DISABLE_INTERRUPTS(CLBR_ANY) @@ -513,6 +514,7 @@ jae syscall_badsys syscall_call: call *sys_call_table(,%eax,4) +syscall_after_call: movl %eax,PT_EAX(%esp) # store the return value syscall_exit: LOCKDEP_SYS_EXIT @@ -527,6 +529,7 @@ restore_all: TRACE_IRQS_IRET restore_all_notrace: +#ifdef CONFIG_X86_ESPFIX32 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS # Warning: PT_OLDSS(%esp) contains the wrong/random values if we # are returning to the kernel. @@ -537,6 +540,7 @@ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax CFI_REMEMBER_STATE je ldt_ss # returning to user-space with LDT SS +#endif restore_nocheck: RESTORE_REGS 4 # skip orig_eax/error_code irq_return: @@ -549,13 +553,9 @@ .previous _ASM_EXTABLE(irq_return,iret_exc) +#ifdef CONFIG_X86_ESPFIX32 CFI_RESTORE_STATE ldt_ss: - larl PT_OLDSS(%esp), %eax - jnz restore_nocheck - testl $0x00400000, %eax # returning to 32bit stack? - jnz restore_nocheck # allright, normal return - #ifdef CONFIG_PARAVIRT /* * The kernel can't run on a non-flat stack if paravirt mode @@ -597,6 +597,7 @@ lss (%esp), %esp /* switch to espfix segment */ CFI_ADJUST_CFA_OFFSET -8 jmp restore_nocheck +#endif CFI_ENDPROC ENDPROC(system_call) @@ -687,8 +688,13 @@ END(syscall_fault) syscall_badsys: - movl $-ENOSYS,PT_EAX(%esp) - jmp resume_userspace + movl $-ENOSYS,%eax + jmp syscall_after_call +END(syscall_badsys) + +sysenter_badsys: + movl $-ENOSYS,%eax + jmp sysenter_after_call END(syscall_badsys) CFI_ENDPROC /* @@ -704,6 +710,7 @@ * the high word of the segment base from the GDT and swiches to the * normal stack and adjusts ESP with the matching offset. */ +#ifdef CONFIG_X86_ESPFIX32 /* fixup the stack */ mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ @@ -713,8 +720,10 @@ pushl_cfi %eax lss (%esp), %esp /* switch to the normal stack segment */ CFI_ADJUST_CFA_OFFSET -8 +#endif .endm .macro UNWIND_ESPFIX_STACK +#ifdef CONFIG_X86_ESPFIX32 movl %ss, %eax /* see if on espfix stack */ cmpw $__ESPFIX_SS, %ax @@ -725,6 +734,7 @@ /* switch to normal stack */ FIXUP_ESPFIX_STACK 27: +#endif .endm /* @@ -1355,11 +1365,13 @@ ENTRY(nmi) RING0_INT_FRAME ASM_CLAC +#ifdef CONFIG_X86_ESPFIX32 pushl_cfi %eax movl %ss, %eax cmpw $__ESPFIX_SS, %ax popl_cfi %eax je nmi_espfix_stack +#endif cmpl $ia32_sysenter_target,(%esp) je nmi_stack_fixup pushl_cfi %eax @@ -1399,6 +1411,7 @@ FIX_STACK 24, nmi_stack_correct, 1 jmp nmi_stack_correct +#ifdef CONFIG_X86_ESPFIX32 nmi_espfix_stack: /* We have a RING0_INT_FRAME here. * @@ -1420,6 +1433,7 @@ lss 12+4(%esp), %esp # back to espfix stack CFI_ADJUST_CFA_OFFSET -24 jmp irq_return +#endif CFI_ENDPROC END(nmi) --- linux-3.13.0.orig/arch/x86/kernel/entry_64.S +++ linux-3.13.0/arch/x86/kernel/entry_64.S @@ -58,6 +58,7 @@ #include #include #include +#include #include /* Avoid __ASSEMBLER__'ifying just for this. */ @@ -101,6 +102,39 @@ #endif .endm +#ifdef CONFIG_FRAME_POINTER +/* + * Stack traces will stop at the ftrace trampoline if the frame pointer + * is not set up properly. If fentry is used, we need to save a frame + * pointer for the parent as well as the function traced, because the + * fentry is called before the stack frame is set up, where as mcount + * is called afterward. + */ +.macro create_frame parent rip +#ifdef CC_USING_FENTRY + pushq \parent + pushq %rbp + movq %rsp, %rbp +#endif + pushq \rip + pushq %rbp + movq %rsp, %rbp +.endm + +.macro restore_frame +#ifdef CC_USING_FENTRY + addq $16, %rsp +#endif + popq %rbp + addq $8, %rsp +.endm +#else +.macro create_frame parent rip +.endm +.macro restore_frame +.endm +#endif /* CONFIG_FRAME_POINTER */ + ENTRY(ftrace_caller) /* Check if tracing was disabled (quick check) */ cmpl $0, function_trace_stop @@ -160,9 +194,13 @@ /* regs go into 4th parameter */ leaq (%rsp), %rcx + create_frame %rsi, %rdi + GLOBAL(ftrace_regs_call) call ftrace_stub + restore_frame + /* Copy flags back to SS, to restore them */ movq EFLAGS(%rsp), %rax movq %rax, SS(%rsp) @@ -541,11 +579,14 @@ testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? jz 1f - testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET - jnz int_ret_from_sys_call - - RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET - jmp ret_from_sys_call # go to the SYSRET fastpath + /* + * By the time we get here, we have no idea whether our pt_regs, + * ti flags, and ti status came from the 64-bit SYSCALL fast path, + * the slow path, or one of the ia32entry paths. + * Use int_ret_from_sys_call to return, since it can safely handle + * all of the above. + */ + jmp int_ret_from_sys_call 1: subq $REST_SKIP, %rsp # leave space for volatiles @@ -1041,32 +1082,52 @@ irq_return: INTERRUPT_RETURN - _ASM_EXTABLE(irq_return, bad_iret) -#ifdef CONFIG_PARAVIRT ENTRY(native_iret) - iretq - _ASM_EXTABLE(native_iret, bad_iret) + /* + * Are we returning to a stack segment from the LDT? Note: in + * 64-bit mode SS:RSP on the exception stack is always valid. + */ +#ifdef CONFIG_X86_ESPFIX64 + testb $4,(SS-RIP)(%rsp) + jnz native_irq_return_ldt #endif - .section .fixup,"ax" -bad_iret: +.global native_irq_return_iret +native_irq_return_iret: /* - * The iret traps when the %cs or %ss being restored is bogus. - * We've lost the original trap vector and error code. - * #GPF is the most likely one to get for an invalid selector. - * So pretend we completed the iret and took the #GPF in user mode. - * - * We are now running with the kernel GS after exception recovery. - * But error_entry expects us to have user GS to match the user %cs, - * so swap back. + * This may fault. Non-paranoid faults on return to userspace are + * handled by fixup_bad_iret. These include #SS, #GP, and #NP. + * Double-faults due to espfix64 are handled in do_double_fault. + * Other faults here are fatal. */ - pushq $0 + iretq +#ifdef CONFIG_X86_ESPFIX64 +native_irq_return_ldt: + pushq_cfi %rax + pushq_cfi %rdi SWAPGS - jmp general_protection - - .previous + movq PER_CPU_VAR(espfix_waddr),%rdi + movq %rax,(0*8)(%rdi) /* RAX */ + movq (2*8)(%rsp),%rax /* RIP */ + movq %rax,(1*8)(%rdi) + movq (3*8)(%rsp),%rax /* CS */ + movq %rax,(2*8)(%rdi) + movq (4*8)(%rsp),%rax /* RFLAGS */ + movq %rax,(3*8)(%rdi) + movq (6*8)(%rsp),%rax /* SS */ + movq %rax,(5*8)(%rdi) + movq (5*8)(%rsp),%rax /* RSP */ + movq %rax,(4*8)(%rdi) + andl $0xffff0000,%eax + popq_cfi %rdi + orq PER_CPU_VAR(espfix_stack),%rax + SWAPGS + movq %rax,%rsp + popq_cfi %rax + jmp native_irq_return_iret +#endif /* edi: workmask, edx: work */ retint_careful: @@ -1110,9 +1171,9 @@ call preempt_schedule_irq jmp exit_intr #endif - CFI_ENDPROC END(common_interrupt) + /* * End of kprobes section */ @@ -1484,7 +1545,7 @@ paranoidzeroentry_ist debug do_debug DEBUG_STACK paranoidzeroentry_ist int3 do_int3 DEBUG_STACK -paranoiderrorentry stack_segment do_stack_segment +errorentry stack_segment do_stack_segment #ifdef CONFIG_XEN zeroentry xen_debug do_debug zeroentry xen_int3 do_int3 @@ -1594,16 +1655,15 @@ /* * There are two places in the kernel that can potentially fault with - * usergs. Handle them here. The exception handlers after iret run with - * kernel gs again, so don't set the user space flag. B stepping K8s - * sometimes report an truncated RIP for IRET exceptions returning to - * compat mode. Check for these here too. + * usergs. Handle them here. B stepping K8s sometimes report a + * truncated RIP for IRET exceptions returning to compat mode. Check + * for these here too. */ error_kernelspace: incl %ebx - leaq irq_return(%rip),%rcx + leaq native_irq_return_iret(%rip),%rcx cmpq %rcx,RIP+8(%rsp) - je error_swapgs + je error_bad_iret movl %ecx,%eax /* zero extend */ cmpq %rax,RIP+8(%rsp) je bstep_iret @@ -1614,7 +1674,15 @@ bstep_iret: /* Fix truncated RIP */ movq %rcx,RIP+8(%rsp) - jmp error_swapgs + /* fall through */ + +error_bad_iret: + SWAPGS + mov %rsp,%rdi + call fixup_bad_iret + mov %rax,%rsp + decl %ebx /* Return to usergs */ + jmp error_sti CFI_ENDPROC END(error_entry) --- linux-3.13.0.orig/arch/x86/kernel/espfix_64.c +++ linux-3.13.0/arch/x86/kernel/espfix_64.c @@ -0,0 +1,208 @@ +/* ----------------------------------------------------------------------- * + * + * Copyright 2014 Intel Corporation; author: H. Peter Anvin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * ----------------------------------------------------------------------- */ + +/* + * The IRET instruction, when returning to a 16-bit segment, only + * restores the bottom 16 bits of the user space stack pointer. This + * causes some 16-bit software to break, but it also leaks kernel state + * to user space. + * + * This works around this by creating percpu "ministacks", each of which + * is mapped 2^16 times 64K apart. When we detect that the return SS is + * on the LDT, we copy the IRET frame to the ministack and use the + * relevant alias to return to userspace. The ministacks are mapped + * readonly, so if the IRET fault we promote #GP to #DF which is an IST + * vector and thus has its own stack; we then do the fixup in the #DF + * handler. + * + * This file sets up the ministacks and the related page tables. The + * actual ministack invocation is in entry_64.S. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Note: we only need 6*8 = 48 bytes for the espfix stack, but round + * it up to a cache line to avoid unnecessary sharing. + */ +#define ESPFIX_STACK_SIZE (8*8UL) +#define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE) + +/* There is address space for how many espfix pages? */ +#define ESPFIX_PAGE_SPACE (1UL << (PGDIR_SHIFT-PAGE_SHIFT-16)) + +#define ESPFIX_MAX_CPUS (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE) +#if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS +# error "Need more than one PGD for the ESPFIX hack" +#endif + +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) + +/* This contains the *bottom* address of the espfix stack */ +DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); +DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); + +/* Initialization mutex - should this be a spinlock? */ +static DEFINE_MUTEX(espfix_init_mutex); + +/* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */ +#define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE) +static void *espfix_pages[ESPFIX_MAX_PAGES]; + +static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD] + __aligned(PAGE_SIZE); + +static unsigned int page_random, slot_random; + +/* + * This returns the bottom address of the espfix stack for a specific CPU. + * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case + * we have to account for some amount of padding at the end of each page. + */ +static inline unsigned long espfix_base_addr(unsigned int cpu) +{ + unsigned long page, slot; + unsigned long addr; + + page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random; + slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE; + addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE); + addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16); + addr += ESPFIX_BASE_ADDR; + return addr; +} + +#define PTE_STRIDE (65536/PAGE_SIZE) +#define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE) +#define ESPFIX_PMD_CLONES PTRS_PER_PMD +#define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES)) + +#define PGTABLE_PROT ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX) + +static void init_espfix_random(void) +{ + unsigned long rand; + + /* + * This is run before the entropy pools are initialized, + * but this is hopefully better than nothing. + */ + if (!arch_get_random_long(&rand)) { + /* The constant is an arbitrary large prime */ + rdtscll(rand); + rand *= 0xc345c6b72fd16123UL; + } + + slot_random = rand % ESPFIX_STACKS_PER_PAGE; + page_random = (rand / ESPFIX_STACKS_PER_PAGE) + & (ESPFIX_PAGE_SPACE - 1); +} + +void __init init_espfix_bsp(void) +{ + pgd_t *pgd_p; + pteval_t ptemask; + + ptemask = __supported_pte_mask; + + /* Install the espfix pud into the kernel page directory */ + pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; + pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page); + + /* Randomize the locations */ + init_espfix_random(); + + /* The rest is the same as for any other processor */ + init_espfix_ap(); +} + +void init_espfix_ap(void) +{ + unsigned int cpu, page; + unsigned long addr; + pud_t pud, *pud_p; + pmd_t pmd, *pmd_p; + pte_t pte, *pte_p; + int n; + void *stack_page; + pteval_t ptemask; + + /* We only have to do this once... */ + if (likely(this_cpu_read(espfix_stack))) + return; /* Already initialized */ + + cpu = smp_processor_id(); + addr = espfix_base_addr(cpu); + page = cpu/ESPFIX_STACKS_PER_PAGE; + + /* Did another CPU already set this up? */ + stack_page = ACCESS_ONCE(espfix_pages[page]); + if (likely(stack_page)) + goto done; + + mutex_lock(&espfix_init_mutex); + + /* Did we race on the lock? */ + stack_page = ACCESS_ONCE(espfix_pages[page]); + if (stack_page) + goto unlock_done; + + ptemask = __supported_pte_mask; + + pud_p = &espfix_pud_page[pud_index(addr)]; + pud = *pud_p; + if (!pud_present(pud)) { + pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); + pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); + paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); + for (n = 0; n < ESPFIX_PUD_CLONES; n++) + set_pud(&pud_p[n], pud); + } + + pmd_p = pmd_offset(&pud, addr); + pmd = *pmd_p; + if (!pmd_present(pmd)) { + pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); + pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); + paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); + for (n = 0; n < ESPFIX_PMD_CLONES; n++) + set_pmd(&pmd_p[n], pmd); + } + + pte_p = pte_offset_kernel(&pmd, addr); + stack_page = (void *)__get_free_page(GFP_KERNEL); + pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); + for (n = 0; n < ESPFIX_PTE_CLONES; n++) + set_pte(&pte_p[n*PTE_STRIDE], pte); + + /* Job is done for this CPU and any CPU which shares this page */ + ACCESS_ONCE(espfix_pages[page]) = stack_page; + +unlock_done: + mutex_unlock(&espfix_init_mutex); +done: + this_cpu_write(espfix_stack, addr); + this_cpu_write(espfix_waddr, (unsigned long)stack_page + + (addr & ~PAGE_MASK)); +} --- linux-3.13.0.orig/arch/x86/kernel/ftrace.c +++ linux-3.13.0/arch/x86/kernel/ftrace.c @@ -77,8 +77,7 @@ return addr >= start && addr < end; } -static int -do_ftrace_mod_code(unsigned long ip, const void *new_code) +static unsigned long text_ip_addr(unsigned long ip) { /* * On x86_64, kernel text mappings are mapped read-only with @@ -91,7 +90,7 @@ if (within(ip, (unsigned long)_text, (unsigned long)_etext)) ip = (unsigned long)__va(__pa_symbol(ip)); - return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE); + return ip; } static const unsigned char *ftrace_nop_replace(void) @@ -123,8 +122,10 @@ if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; + ip = text_ip_addr(ip); + /* replace the text with the new text */ - if (do_ftrace_mod_code(ip, new_code)) + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) return -EPERM; sync_core(); @@ -221,37 +222,51 @@ return -EINVAL; } -int ftrace_update_ftrace_func(ftrace_func_t func) +static unsigned long ftrace_update_func; + +static int update_ftrace_func(unsigned long ip, void *new) { - unsigned long ip = (unsigned long)(&ftrace_call); - unsigned char old[MCOUNT_INSN_SIZE], *new; + unsigned char old[MCOUNT_INSN_SIZE]; int ret; - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); - new = ftrace_call_replace(ip, (unsigned long)func); + memcpy(old, (void *)ip, MCOUNT_INSN_SIZE); + + ftrace_update_func = ip; + /* Make sure the breakpoints see the ftrace_update_func update */ + smp_wmb(); /* See comment above by declaration of modifying_ftrace_code */ atomic_inc(&modifying_ftrace_code); ret = ftrace_modify_code(ip, old, new); + atomic_dec(&modifying_ftrace_code); + + return ret; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call); + unsigned char *new; + int ret; + + new = ftrace_call_replace(ip, (unsigned long)func); + ret = update_ftrace_func(ip, new); + /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); - memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); - ret = ftrace_modify_code(ip, old, new); + ret = update_ftrace_func(ip, new); } - atomic_dec(&modifying_ftrace_code); - return ret; } static int is_ftrace_caller(unsigned long ip) { - if (ip == (unsigned long)(&ftrace_call) || - ip == (unsigned long)(&ftrace_regs_call)) + if (ip == ftrace_update_func) return 1; return 0; @@ -677,45 +692,41 @@ #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); -static int ftrace_mod_jmp(unsigned long ip, - int old_offset, int new_offset) +static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) { - unsigned char code[MCOUNT_INSN_SIZE]; + static union ftrace_code_union calc; - if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) - return -EFAULT; + /* Jmp not a call (ignore the .e8) */ + calc.e8 = 0xe9; + calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); - if (code[0] != 0xe9 || old_offset != *(int *)(&code[1])) - return -EINVAL; + /* + * ftrace external locks synchronize the access to the static variable. + */ + return calc.code; +} - *(int *)(&code[1]) = new_offset; +static int ftrace_mod_jmp(unsigned long ip, void *func) +{ + unsigned char *new; - if (do_ftrace_mod_code(ip, &code)) - return -EPERM; + new = ftrace_jmp_replace(ip, (unsigned long)func); - return 0; + return update_ftrace_func(ip, new); } int ftrace_enable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); - int old_offset, new_offset; - - old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); - new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); - return ftrace_mod_jmp(ip, old_offset, new_offset); + return ftrace_mod_jmp(ip, &ftrace_graph_caller); } int ftrace_disable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); - int old_offset, new_offset; - - old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE); - new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE); - return ftrace_mod_jmp(ip, old_offset, new_offset); + return ftrace_mod_jmp(ip, &ftrace_stub); } #endif /* !CONFIG_DYNAMIC_FTRACE */ --- linux-3.13.0.orig/arch/x86/kernel/head_32.S +++ linux-3.13.0/arch/x86/kernel/head_32.S @@ -544,6 +544,10 @@ /* This is global to keep gas from relaxing the jumps */ ENTRY(early_idt_handler) cld + + cmpl $2,(%esp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + cmpl $2,%ss:early_recursion_flag je hlt_loop incl %ss:early_recursion_flag @@ -594,8 +598,9 @@ pop %edx pop %ecx pop %eax - addl $8,%esp /* drop vector number and error code */ decl %ss:early_recursion_flag +is_nmi: + addl $8,%esp /* drop vector number and error code */ iret ENDPROC(early_idt_handler) --- linux-3.13.0.orig/arch/x86/kernel/head_64.S +++ linux-3.13.0/arch/x86/kernel/head_64.S @@ -343,6 +343,9 @@ ENTRY(early_idt_handler) cld + cmpl $2,(%rsp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + cmpl $2,early_recursion_flag(%rip) jz 1f incl early_recursion_flag(%rip) @@ -405,8 +408,9 @@ popq %rdx popq %rcx popq %rax - addq $16,%rsp # drop vector number and error code decl early_recursion_flag(%rip) +is_nmi: + addq $16,%rsp # drop vector number and error code INTERRUPT_RETURN ENDPROC(early_idt_handler) --- linux-3.13.0.orig/arch/x86/kernel/i387.c +++ linux-3.13.0/arch/x86/kernel/i387.c @@ -86,10 +86,19 @@ void __kernel_fpu_end(void) { - if (use_eager_fpu()) - math_state_restore(); - else + if (use_eager_fpu()) { + /* + * For eager fpu, most the time, tsk_used_math() is true. + * Restore the user math as we are done with the kernel usage. + * At few instances during thread exit, signal handling etc, + * tsk_used_math() is false. Those few places will take proper + * actions, so we don't need to restore the math here. + */ + if (likely(tsk_used_math(current))) + math_state_restore(); + } else { stts(); + } } EXPORT_SYMBOL(__kernel_fpu_end); --- linux-3.13.0.orig/arch/x86/kernel/i8259.c +++ linux-3.13.0/arch/x86/kernel/i8259.c @@ -299,13 +299,31 @@ static void init_8259A(int auto_eoi) { unsigned long flags; + unsigned char probe_val = ~(1 << PIC_CASCADE_IR); + unsigned char new_val; i8259A_auto_eoi = auto_eoi; raw_spin_lock_irqsave(&i8259A_lock, flags); - outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ + /* + * Check to see if we have a PIC. + * Mask all except the cascade and read + * back the value we just wrote. If we don't + * have a PIC, we will read 0xff as opposed to the + * value we wrote. + */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ + outb(probe_val, PIC_MASTER_IMR); + new_val = inb(PIC_MASTER_IMR); + if (new_val != probe_val) { + printk(KERN_INFO "Using NULL legacy PIC\n"); + legacy_pic = &null_legacy_pic; + raw_spin_unlock_irqrestore(&i8259A_lock, flags); + return; + } + + outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ /* * outb_pic - this has to work on a wide range of PC hardware. --- linux-3.13.0.orig/arch/x86/kernel/kprobes/core.c +++ linux-3.13.0/arch/x86/kernel/kprobes/core.c @@ -1017,6 +1017,15 @@ regs->flags &= ~X86_EFLAGS_IF; trace_hardirqs_off(); regs->ip = (unsigned long)(jp->entry); + + /* + * jprobes use jprobe_return() which skips the normal return + * path of the function, and this messes up the accounting of the + * function graph tracer to get messed up. + * + * Pause function graph tracing while performing the jprobe function. + */ + pause_graph_tracing(); return 1; } @@ -1042,24 +1051,25 @@ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); u8 *addr = (u8 *) (regs->ip - 1); struct jprobe *jp = container_of(p, struct jprobe, kp); + void *saved_sp = kcb->jprobe_saved_sp; if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { - if (stack_addr(regs) != kcb->jprobe_saved_sp) { + if (stack_addr(regs) != saved_sp) { struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; printk(KERN_ERR "current sp %p does not match saved sp %p\n", - stack_addr(regs), kcb->jprobe_saved_sp); + stack_addr(regs), saved_sp); printk(KERN_ERR "Saved registers for jprobe %p\n", jp); show_regs(saved_regs); printk(KERN_ERR "Current registers\n"); show_regs(regs); BUG(); } + /* It's OK to start function graph tracing again */ + unpause_graph_tracing(); *regs = kcb->jprobe_saved_regs; - memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), - kcb->jprobes_stack, - MIN_STACK_SIZE(kcb->jprobe_saved_sp)); + memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); preempt_enable_no_resched(); return 1; } --- linux-3.13.0.orig/arch/x86/kernel/kvm.c +++ linux-3.13.0/arch/x86/kernel/kvm.c @@ -280,7 +280,14 @@ static void __init paravirt_ops_setup(void) { pv_info.name = "KVM"; - pv_info.paravirt_enabled = 1; + + /* + * KVM isn't paravirt in the sense of paravirt_enabled. A KVM + * guest kernel works like a bare metal kernel with additional + * features, and paravirt_enabled is about features that are + * missing. + */ + pv_info.paravirt_enabled = 0; if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) pv_cpu_ops.io_delay = kvm_io_delay; @@ -500,6 +507,38 @@ #endif } +static noinline uint32_t __kvm_cpuid_base(void) +{ + if (boot_cpu_data.cpuid_level < 0) + return 0; /* So we don't blow up on old processors */ + + if (cpu_has_hypervisor) + return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); + + return 0; +} + +static inline uint32_t kvm_cpuid_base(void) +{ + static int kvm_cpuid_base = -1; + + if (kvm_cpuid_base == -1) + kvm_cpuid_base = __kvm_cpuid_base(); + + return kvm_cpuid_base; +} + +bool kvm_para_available(void) +{ + return kvm_cpuid_base() != 0; +} +EXPORT_SYMBOL_GPL(kvm_para_available); + +unsigned int kvm_arch_para_features(void) +{ + return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); +} + static uint32_t __init kvm_detect(void) { return kvm_cpuid_base(); --- linux-3.13.0.orig/arch/x86/kernel/kvmclock.c +++ linux-3.13.0/arch/x86/kernel/kvmclock.c @@ -263,8 +263,6 @@ #endif kvm_get_preset_lpj(); clocksource_register_hz(&kvm_clock, NSEC_PER_SEC); - pv_info.paravirt_enabled = 1; - pv_info.name = "KVM"; if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT)) pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); --- linux-3.13.0.orig/arch/x86/kernel/ldt.c +++ linux-3.13.0/arch/x86/kernel/ldt.c @@ -229,6 +229,11 @@ } } + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { + error = -EINVAL; + goto out_unlock; + } + fill_ldt(&ldt, &ldt_info); if (oldmode) ldt.avl = 0; --- linux-3.13.0.orig/arch/x86/kernel/microcode_intel_early.c +++ linux-3.13.0/arch/x86/kernel/microcode_intel_early.c @@ -321,7 +321,7 @@ unsigned int mc_saved_count = mc_saved_data->mc_saved_count; int i; - while (leftover) { + while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { mc_header = (struct microcode_header_intel *)ucode_ptr; mc_size = get_totalsize(mc_header); --- linux-3.13.0.orig/arch/x86/kernel/paravirt_patch_64.c +++ linux-3.13.0/arch/x86/kernel/paravirt_patch_64.c @@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); -DEF_NATIVE(pv_cpu_ops, iret, "iretq"); DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); @@ -50,7 +49,6 @@ PATCH_SITE(pv_irq_ops, save_fl); PATCH_SITE(pv_irq_ops, irq_enable); PATCH_SITE(pv_irq_ops, irq_disable); - PATCH_SITE(pv_cpu_ops, iret); PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); PATCH_SITE(pv_cpu_ops, usergs_sysret32); PATCH_SITE(pv_cpu_ops, usergs_sysret64); --- linux-3.13.0.orig/arch/x86/kernel/pci-dma.c +++ linux-3.13.0/arch/x86/kernel/pci-dma.c @@ -100,8 +100,10 @@ flag |= __GFP_ZERO; again: page = NULL; - if (!(flag & GFP_ATOMIC)) + /* CMA can be used only in the context which permits sleeping */ + if (flag & __GFP_WAIT) page = dma_alloc_from_contiguous(dev, count, get_order(size)); + /* fallback */ if (!page) page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); if (!page) --- linux-3.13.0.orig/arch/x86/kernel/process_64.c +++ linux-3.13.0/arch/x86/kernel/process_64.c @@ -286,24 +286,9 @@ fpu = switch_fpu_prepare(prev_p, next_p, cpu); - /* - * Reload esp0, LDT and the page table pointer: - */ + /* Reload esp0 and ss1. */ load_sp0(tss, next); - /* - * Switch DS and ES. - * This won't pick up thread selector changes, but I guess that is ok. - */ - savesegment(es, prev->es); - if (unlikely(next->es | prev->es)) - loadsegment(es, next->es); - - savesegment(ds, prev->ds); - if (unlikely(next->ds | prev->ds)) - loadsegment(ds, next->ds); - - /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * @@ -312,41 +297,101 @@ savesegment(fs, fsindex); savesegment(gs, gsindex); + /* + * Load TLS before restoring any segments so that segment loads + * reference the correct GDT entries. + */ load_TLS(next, cpu); /* - * Leave lazy mode, flushing any hypercalls made here. - * This must be done before restoring TLS segments so - * the GDT and LDT are properly updated, and must be - * done before math_state_restore, so the TS bit is up - * to date. + * Leave lazy mode, flushing any hypercalls made here. This + * must be done after loading TLS entries in the GDT but before + * loading segments that might reference them, and and it must + * be done before math_state_restore, so the TS bit is up to + * date. */ arch_end_context_switch(next_p); + /* Switch DS and ES. + * + * Reading them only returns the selectors, but writing them (if + * nonzero) loads the full descriptor from the GDT or LDT. The + * LDT for next is loaded in switch_mm, and the GDT is loaded + * above. + * + * We therefore need to write new values to the segment + * registers on every context switch unless both the new and old + * values are zero. + * + * Note that we don't need to do anything for CS and SS, as + * those are saved and restored as part of pt_regs. + */ + savesegment(es, prev->es); + if (unlikely(next->es | prev->es)) + loadsegment(es, next->es); + + savesegment(ds, prev->ds); + if (unlikely(next->ds | prev->ds)) + loadsegment(ds, next->ds); + /* * Switch FS and GS. * - * Segment register != 0 always requires a reload. Also - * reload when it has changed. When prev process used 64bit - * base always reload to avoid an information leak. + * These are even more complicated than FS and GS: they have + * 64-bit bases are that controlled by arch_prctl. Those bases + * only differ from the values in the GDT or LDT if the selector + * is 0. + * + * Loading the segment register resets the hidden base part of + * the register to 0 or the value from the GDT / LDT. If the + * next base address zero, writing 0 to the segment register is + * much faster than using wrmsr to explicitly zero the base. + * + * The thread_struct.fs and thread_struct.gs values are 0 + * if the fs and gs bases respectively are not overridden + * from the values implied by fsindex and gsindex. They + * are nonzero, and store the nonzero base addresses, if + * the bases are overridden. + * + * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should + * be impossible. + * + * Therefore we need to reload the segment registers if either + * the old or new selector is nonzero, and we need to override + * the base address if next thread expects it to be overridden. + * + * This code is unnecessarily slow in the case where the old and + * new indexes are zero and the new base is nonzero -- it will + * unnecessarily write 0 to the selector before writing the new + * base address. + * + * Note: This all depends on arch_prctl being the only way that + * user code can override the segment base. Once wrfsbase and + * wrgsbase are enabled, most of this code will need to change. */ if (unlikely(fsindex | next->fsindex | prev->fs)) { loadsegment(fs, next->fsindex); + /* - * Check if the user used a selector != 0; if yes - * clear 64bit base, since overloaded base is always - * mapped to the Null selector + * If user code wrote a nonzero value to FS, then it also + * cleared the overridden base address. + * + * XXX: if user code wrote 0 to FS and cleared the base + * address itself, we won't notice and we'll incorrectly + * restore the prior base address next time we reschdule + * the process. */ if (fsindex) prev->fs = 0; } - /* when next process has a 64bit base use it */ if (next->fs) wrmsrl(MSR_FS_BASE, next->fs); prev->fsindex = fsindex; if (unlikely(gsindex | next->gsindex | prev->gs)) { load_gs_index(next->gsindex); + + /* This works (and fails) the same way as fsindex above. */ if (gsindex) prev->gs = 0; } --- linux-3.13.0.orig/arch/x86/kernel/pvclock.c +++ linux-3.13.0/arch/x86/kernel/pvclock.c @@ -141,7 +141,46 @@ set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); } +static struct pvclock_vsyscall_time_info *pvclock_vdso_info; + +static struct pvclock_vsyscall_time_info * +pvclock_get_vsyscall_user_time_info(int cpu) +{ + if (!pvclock_vdso_info) { + BUG(); + return NULL; + } + + return &pvclock_vdso_info[cpu]; +} + +struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu) +{ + return &pvclock_get_vsyscall_user_time_info(cpu)->pvti; +} + #ifdef CONFIG_X86_64 +static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l, + void *v) +{ + struct task_migration_notifier *mn = v; + struct pvclock_vsyscall_time_info *pvti; + + pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu); + + /* this is NULL when pvclock vsyscall is not initialized */ + if (unlikely(pvti == NULL)) + return NOTIFY_DONE; + + pvti->migrate_count++; + + return NOTIFY_DONE; +} + +static struct notifier_block pvclock_migrate = { + .notifier_call = pvclock_task_migrate, +}; + /* * Initialize the generic pvclock vsyscall state. This will allocate * a/some page(s) for the per-vcpu pvclock information, set up a @@ -155,12 +194,17 @@ WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); + pvclock_vdso_info = i; + for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, __pa(i) + (idx*PAGE_SIZE), PAGE_KERNEL_VVAR); } + + register_task_migration_notifier(&pvclock_migrate); + return 0; } #endif --- linux-3.13.0.orig/arch/x86/kernel/quirks.c +++ linux-3.13.0/arch/x86/kernel/quirks.c @@ -529,7 +529,7 @@ return; pci_read_config_dword(nb_ht, 0x60, &val); - node = val & 7; + node = pcibus_to_node(dev->bus) | (val & 7); /* * Some hardware may return an invalid node ID, * so check it first: --- linux-3.13.0.orig/arch/x86/kernel/reboot.c +++ linux-3.13.0/arch/x86/kernel/reboot.c @@ -181,6 +181,16 @@ }, }, + /* ASRock */ + { /* Handle problems with rebooting on ASRock Q1900DC-ITX */ + .callback = set_pci_reboot, + .ident = "ASRock Q1900DC-ITX", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"), + DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"), + }, + }, + /* ASUS */ { /* Handle problems with rebooting on ASUS P4S800 */ .callback = set_bios_reboot, @@ -385,7 +395,46 @@ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), }, }, - + { /* Handle problems with rebooting on the Latitude E6520. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E6520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6520"), + }, + }, + { /* Handle problems with rebooting on the OptiPlex 790. */ + .callback = set_pci_reboot, + .ident = "Dell OptiPlex 790", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 790"), + }, + }, + { /* Handle problems with rebooting on the OptiPlex 990. */ + .callback = set_pci_reboot, + .ident = "Dell OptiPlex 990", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"), + }, + }, + { /* Handle problems with rebooting on the Latitude E6220. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E6220", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6220"), + }, + }, + { /* Handle problems with rebooting on the OptiPlex 390. */ + .callback = set_pci_reboot, + .ident = "Dell OptiPlex 390", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 390"), + }, + }, { } }; --- linux-3.13.0.orig/arch/x86/kernel/resource.c +++ linux-3.13.0/arch/x86/kernel/resource.c @@ -37,10 +37,12 @@ void arch_remove_reservations(struct resource *avail) { - /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */ + /* + * Trim out BIOS area (high 2MB) and E820 regions. We do not remove + * the low 1MB unconditionally, as this area is needed for some ISA + * cards requiring a memory range, e.g. the i82365 PCMCIA controller. + */ if (avail->flags & IORESOURCE_MEM) { - if (avail->start < BIOS_END) - avail->start = BIOS_END; resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END); remove_e820_regions(avail); --- linux-3.13.0.orig/arch/x86/kernel/signal.c +++ linux-3.13.0/arch/x86/kernel/signal.c @@ -673,6 +673,11 @@ * handler too. */ regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); + /* + * Ensure the signal handler starts with the new fpu state. + */ + if (used_math()) + drop_init_fpu(current); } signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP)); } --- linux-3.13.0.orig/arch/x86/kernel/smpboot.c +++ linux-3.13.0/arch/x86/kernel/smpboot.c @@ -243,6 +243,13 @@ check_tsc_sync_target(); /* + * Enable the espfix hack for this CPU + */ +#ifdef CONFIG_X86_ESPFIX64 + init_espfix_ap(); +#endif + + /* * We need to hold vector_lock so there the set of online cpus * does not change while we are assigning vectors to cpus. Holding * this lock ensures we don't half assign or remove an irq from a cpu. @@ -1280,6 +1287,9 @@ for_each_cpu(sibling, cpu_sibling_mask(cpu)) cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); + for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) + cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); + cpumask_clear(cpu_llc_shared_mask(cpu)); cpumask_clear(cpu_sibling_mask(cpu)); cpumask_clear(cpu_core_mask(cpu)); c->phys_proc_id = 0; --- linux-3.13.0.orig/arch/x86/kernel/tls.c +++ linux-3.13.0/arch/x86/kernel/tls.c @@ -27,6 +27,58 @@ return -ESRCH; } +static bool tls_desc_okay(const struct user_desc *info) +{ + /* + * For historical reasons (i.e. no one ever documented how any + * of the segmentation APIs work), user programs can and do + * assume that a struct user_desc that's all zeros except for + * entry_number means "no segment at all". This never actually + * worked. In fact, up to Linux 3.19, a struct user_desc like + * this would create a 16-bit read-write segment with base and + * limit both equal to zero. + * + * That was close enough to "no segment at all" until we + * hardened this function to disallow 16-bit TLS segments. Fix + * it up by interpreting these zeroed segments the way that they + * were almost certainly intended to be interpreted. + * + * The correct way to ask for "no segment at all" is to specify + * a user_desc that satisfies LDT_empty. To keep everything + * working, we accept both. + * + * Note that there's a similar kludge in modify_ldt -- look at + * the distinction between modes 1 and 0x11. + */ + if (LDT_empty(info) || LDT_zero(info)) + return true; + + /* + * espfix is required for 16-bit data segments, but espfix + * only works for LDT segments. + */ + if (!info->seg_32bit) + return false; + + /* Only allow data segments in the TLS array. */ + if (info->contents > 1) + return false; + + /* + * Non-present segments with DPL 3 present an interesting attack + * surface. The kernel should handle such segments correctly, + * but TLS is very difficult to protect in a sandbox, so prevent + * such segments from being created. + * + * If userspace needs to remove a TLS entry, it can still delete + * it outright. + */ + if (info->seg_not_present) + return false; + + return true; +} + static void set_tls_desc(struct task_struct *p, int idx, const struct user_desc *info, int n) { @@ -40,7 +92,7 @@ cpu = get_cpu(); while (n-- > 0) { - if (LDT_empty(info)) + if (LDT_empty(info) || LDT_zero(info)) desc->a = desc->b = 0; else fill_ldt(desc, info); @@ -66,6 +118,9 @@ if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; + if (!tls_desc_okay(&info)) + return -EINVAL; + if (idx == -1) idx = info.entry_number; @@ -192,6 +247,7 @@ { struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; const struct user_desc *info; + int i; if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || @@ -205,6 +261,10 @@ else info = infobuf; + for (i = 0; i < count / sizeof(struct user_desc); i++) + if (!tls_desc_okay(info + i)) + return -EINVAL; + set_tls_desc(target, GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), info, count / sizeof(struct user_desc)); --- linux-3.13.0.orig/arch/x86/kernel/traps.c +++ linux-3.13.0/arch/x86/kernel/traps.c @@ -221,33 +221,41 @@ coprocessor_segment_overrun) DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) -#ifdef CONFIG_X86_32 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) -#endif DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) #ifdef CONFIG_X86_64 /* Runs on IST stack */ -dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) -{ - enum ctx_state prev_state; - - prev_state = exception_enter(); - if (notify_die(DIE_TRAP, "stack segment", regs, error_code, - X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { - preempt_conditional_sti(regs); - do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); - preempt_conditional_cli(regs); - } - exception_exit(prev_state); -} - dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) { static const char str[] = "double fault"; struct task_struct *tsk = current; +#ifdef CONFIG_X86_ESPFIX64 + extern unsigned char native_irq_return_iret[]; + + /* + * If IRET takes a non-IST fault on the espfix64 stack, then we + * end up promoting it to a doublefault. In that case, modify + * the stack to make it look like we just entered the #GP + * handler from user space, similar to bad_iret. + */ + if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && + regs->cs == __KERNEL_CS && + regs->ip == (unsigned long)native_irq_return_iret) + { + struct pt_regs *normal_regs = task_pt_regs(current); + + /* Fake a #GP(0) from userspace. */ + memmove(&normal_regs->ip, (void *)regs->sp, 5*8); + normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ + regs->ip = (unsigned long)general_protection; + regs->sp = (unsigned long)&normal_regs->orig_ax; + return; + } +#endif + exception_enter(); /* Return not checked because double check cannot be ignored */ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); @@ -361,7 +369,7 @@ * for scheduling or signal handling. The actual stack switch is done in * entry.S */ -asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) +asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) { struct pt_regs *regs = eregs; /* Did already sync */ @@ -380,6 +388,35 @@ *regs = *eregs; return regs; } + +struct bad_iret_stack { + void *error_entry_ret; + struct pt_regs regs; +}; + +asmlinkage __visible notrace +struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) +{ + /* + * This is called from entry_64.S early in handling a fault + * caused by a bad iret to user mode. To handle the fault + * correctly, we want move our stack frame to task_pt_regs + * and we want to pretend that the exception came from the + * iret target. + */ + struct bad_iret_stack *new_stack = + container_of(task_pt_regs(current), + struct bad_iret_stack, regs); + + /* Copy the IRET target to the new stack. */ + memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); + + /* Copy the remainder of the stack from the current stack. */ + memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); + + BUG_ON(!user_mode_vm(&new_stack->regs)); + return new_stack; +} #endif /* @@ -426,7 +463,7 @@ * then it's very likely the result of an icebp/int01 trap. * User wants a sigtrap for that. */ - if (!dr6 && user_mode(regs)) + if (!dr6 && user_mode_vm(regs)) user_icebp = 1; /* Catch kmemcheck conditions first of all! */ @@ -752,7 +789,7 @@ set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); set_intr_gate(X86_TRAP_TS, invalid_TSS); set_intr_gate(X86_TRAP_NP, segment_not_present); - set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); + set_intr_gate(X86_TRAP_SS, stack_segment); set_intr_gate(X86_TRAP_GP, general_protection); set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); set_intr_gate(X86_TRAP_MF, coprocessor_error); --- linux-3.13.0.orig/arch/x86/kernel/tsc.c +++ linux-3.13.0/arch/x86/kernel/tsc.c @@ -386,7 +386,7 @@ goto success; } } - pr_err("Fast TSC calibration failed\n"); + pr_info("Fast TSC calibration failed\n"); return 0; success: @@ -722,9 +722,9 @@ tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); if (!(freq->flags & CPUFREQ_CONST_LOOPS)) mark_tsc_unstable("cpufreq changes"); - } - set_cyc2ns_scale(tsc_khz, freq->cpu); + set_cyc2ns_scale(tsc_khz, freq->cpu); + } return 0; } @@ -974,14 +974,17 @@ x86_init.timers.tsc_pre_init(); - if (!cpu_has_tsc) + if (!cpu_has_tsc) { + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; + } tsc_khz = x86_platform.calibrate_tsc(); cpu_khz = tsc_khz; if (!tsc_khz) { mark_tsc_unstable("could not calculate TSC khz"); + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return; } --- linux-3.13.0.orig/arch/x86/kernel/vsyscall_64.c +++ linux-3.13.0/arch/x86/kernel/vsyscall_64.c @@ -125,10 +125,10 @@ if (!show_unhandled_signals) return; - pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", - level, current->comm, task_pid_nr(current), - message, regs->ip, regs->cs, - regs->sp, regs->ax, regs->si, regs->di); + printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", + level, current->comm, task_pid_nr(current), + message, regs->ip, regs->cs, + regs->sp, regs->ax, regs->si, regs->di); } static int addr_to_vsyscall_nr(unsigned long addr) --- linux-3.13.0.orig/arch/x86/kernel/xsave.c +++ linux-3.13.0/arch/x86/kernel/xsave.c @@ -268,8 +268,6 @@ if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) return -1; - drop_init_fpu(tsk); /* trigger finit */ - return 0; } @@ -377,7 +375,7 @@ * thread's fpu state, reconstruct fxstate from the fsave * header. Sanitize the copied state etc. */ - struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; + struct fpu *fpu = &tsk->thread.fpu; struct user_i387_ia32_struct env; int err = 0; @@ -391,16 +389,20 @@ */ drop_fpu(tsk); - if (__copy_from_user(xsave, buf_fx, state_size) || + if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || __copy_from_user(&env, buf, sizeof(env))) { + fpu_finit(fpu); err = -1; } else { sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); - set_used_math(); } - if (use_eager_fpu()) + set_used_math(); + if (use_eager_fpu()) { + preempt_disable(); math_state_restore(); + preempt_enable(); + } return err; } else { --- linux-3.13.0.orig/arch/x86/kvm/cpuid.c +++ linux-3.13.0/arch/x86/kvm/cpuid.c @@ -43,14 +43,14 @@ return ret; } -void kvm_update_cpuid(struct kvm_vcpu *vcpu) +int kvm_update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; struct kvm_lapic *apic = vcpu->arch.apic; best = kvm_find_cpuid_entry(vcpu, 1, 0); if (!best) - return; + return 0; /* Update OSXSAVE bit */ if (cpu_has_xsave && best->function == 0x1) { @@ -78,7 +78,17 @@ xstate_required_size(vcpu->arch.guest_supported_xcr0); } + /* + * The existing code assumes virtual address is 48-bit in the canonical + * address checks; exit if it is ever changed. + */ + best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); + if (best && ((best->eax & 0xff00) >> 8) != 48 && + ((best->eax & 0xff00) >> 8) != 0) + return -EINVAL; + kvm_pmu_cpuid_update(vcpu); + return 0; } static int is_efer_nx(void) @@ -141,10 +151,9 @@ } vcpu->arch.cpuid_nent = cpuid->nent; cpuid_fix_nx_cap(vcpu); - r = 0; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); - kvm_update_cpuid(vcpu); + r = kvm_update_cpuid(vcpu); out_free: vfree(cpuid_entries); @@ -168,9 +177,7 @@ vcpu->arch.cpuid_nent = cpuid->nent; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); - kvm_update_cpuid(vcpu); - return 0; - + r = kvm_update_cpuid(vcpu); out: return r; } --- linux-3.13.0.orig/arch/x86/kvm/cpuid.h +++ linux-3.13.0/arch/x86/kvm/cpuid.h @@ -3,7 +3,7 @@ #include "x86.h" -void kvm_update_cpuid(struct kvm_vcpu *vcpu); +int kvm_update_cpuid(struct kvm_vcpu *vcpu); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index); int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, --- linux-3.13.0.orig/arch/x86/kvm/emulate.c +++ linux-3.13.0/arch/x86/kvm/emulate.c @@ -498,11 +498,6 @@ masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); } -static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) -{ - register_address_increment(ctxt, &ctxt->_eip, rel); -} - static u32 desc_limit_scaled(struct desc_struct *desc) { u32 limit = get_desc_limit(desc); @@ -576,6 +571,40 @@ return emulate_exception(ctxt, NM_VECTOR, 0, false); } +static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, + int cs_l) +{ + switch (ctxt->op_bytes) { + case 2: + ctxt->_eip = (u16)dst; + break; + case 4: + ctxt->_eip = (u32)dst; + break; +#ifdef CONFIG_X86_64 + case 8: + if ((cs_l && is_noncanonical_address(dst)) || + (!cs_l && (dst >> 32) != 0)) + return emulate_gp(ctxt, 0); + ctxt->_eip = dst; + break; +#endif + default: + WARN(1, "unsupported eip assignment size\n"); + } + return X86EMUL_CONTINUE; +} + +static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) +{ + return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64); +} + +static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) +{ + return assign_eip_near(ctxt, ctxt->_eip + rel); +} + static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) { u16 selector; @@ -1409,11 +1438,12 @@ } /* Does not support long mode */ -static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, - u16 selector, int seg) +static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + u16 selector, int seg, u8 cpl, + struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; - u8 dpl, rpl, cpl; + u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ @@ -1441,7 +1471,6 @@ } rpl = selector & 3; - cpl = ctxt->ops->cpl(ctxt); /* NULL selector is not valid for TR, CS and SS (except for long mode) */ if ((seg == VCPU_SREG_CS @@ -1498,6 +1527,15 @@ if (rpl > cpl || dpl != cpl) goto exception; } + /* in long-mode d/b must be clear if l is set */ + if (seg_desc.d && seg_desc.l) { + u64 efer = 0; + + ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); + if (efer & EFER_LMA) + goto exception; + } + /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; @@ -1537,12 +1575,21 @@ } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg); + if (desc) + *desc = seg_desc; return X86EMUL_CONTINUE; exception: emulate_exception(ctxt, err_vec, err_code, true); return X86EMUL_PROPAGATE_FAULT; } +static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + u16 selector, int seg) +{ + u8 cpl = ctxt->ops->cpl(ctxt); + return __load_segment_descriptor(ctxt, selector, seg, cpl, NULL); +} + static void write_register_operand(struct operand *op) { /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ @@ -1937,17 +1984,31 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned short sel; + unsigned short sel, old_sel; + struct desc_struct old_desc, new_desc; + const struct x86_emulate_ops *ops = ctxt->ops; + u8 cpl = ctxt->ops->cpl(ctxt); + + /* Assignment of RIP may only fail in 64-bit mode */ + if (ctxt->mode == X86EMUL_MODE_PROT64) + ops->get_segment(ctxt, &old_sel, &old_desc, NULL, + VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); - rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, + &new_desc); if (rc != X86EMUL_CONTINUE) return rc; - ctxt->_eip = 0; - memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); - return X86EMUL_CONTINUE; + rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); + if (rc != X86EMUL_CONTINUE) { + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); + /* assigning eip failed; restore the old cs */ + ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); + return rc; + } + return rc; } static int em_grp45(struct x86_emulate_ctxt *ctxt) @@ -1958,13 +2019,15 @@ case 2: /* call near abs */ { long int old_eip; old_eip = ctxt->_eip; - ctxt->_eip = ctxt->src.val; + rc = assign_eip_near(ctxt, ctxt->src.val); + if (rc != X86EMUL_CONTINUE) + break; ctxt->src.val = old_eip; rc = em_push(ctxt); break; } case 4: /* jmp abs */ - ctxt->_eip = ctxt->src.val; + rc = assign_eip_near(ctxt, ctxt->src.val); break; case 5: /* jmp far */ rc = em_jmp_far(ctxt); @@ -1996,26 +2059,47 @@ static int em_ret(struct x86_emulate_ctxt *ctxt) { - ctxt->dst.type = OP_REG; - ctxt->dst.addr.reg = &ctxt->_eip; - ctxt->dst.bytes = ctxt->op_bytes; - return em_pop(ctxt); + int rc; + unsigned long eip; + + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); + if (rc != X86EMUL_CONTINUE) + return rc; + + return assign_eip_near(ctxt, eip); } static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned long cs; + unsigned long eip, cs; + u16 old_cs; + int cpl = ctxt->ops->cpl(ctxt); + struct desc_struct old_desc, new_desc; + const struct x86_emulate_ops *ops = ctxt->ops; + + if (ctxt->mode == X86EMUL_MODE_PROT64) + ops->get_segment(ctxt, &old_cs, &old_desc, NULL, + VCPU_SREG_CS); - rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - if (ctxt->op_bytes == 4) - ctxt->_eip = (u32)ctxt->_eip; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); + /* Outer-privilege level return is not implemented */ + if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) + return X86EMUL_UNHANDLEABLE; + rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, + &new_desc); + if (rc != X86EMUL_CONTINUE) + return rc; + rc = assign_eip_far(ctxt, eip, new_desc.l); + if (rc != X86EMUL_CONTINUE) { + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); + ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); + } return rc; } @@ -2221,7 +2305,7 @@ * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ - if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) + if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); @@ -2234,25 +2318,13 @@ setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); - switch (ctxt->mode) { - case X86EMUL_MODE_PROT32: - if ((msr_data & 0xfffc) == 0x0) - return emulate_gp(ctxt, 0); - break; - case X86EMUL_MODE_PROT64: - if (msr_data == 0x0) - return emulate_gp(ctxt, 0); - break; - default: - break; - } + if ((msr_data & 0xfffc) == 0x0) + return emulate_gp(ctxt, 0); ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); - cs_sel = (u16)msr_data; - cs_sel &= ~SELECTOR_RPL_MASK; + cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; - ss_sel &= ~SELECTOR_RPL_MASK; - if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { + if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } @@ -2261,10 +2333,11 @@ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); - ctxt->_eip = msr_data; + ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); - *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; + *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : + (u32)msr_data; return X86EMUL_CONTINUE; } @@ -2273,7 +2346,7 @@ { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; - u64 msr_data; + u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; @@ -2289,6 +2362,9 @@ else usermode = X86EMUL_MODE_PROT32; + rcx = reg_read(ctxt, VCPU_REGS_RCX); + rdx = reg_read(ctxt, VCPU_REGS_RDX); + cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); @@ -2306,6 +2382,9 @@ ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; + if (is_noncanonical_address(rcx) || + is_noncanonical_address(rdx)) + return emulate_gp(ctxt, 0); break; } cs_sel |= SELECTOR_RPL_MASK; @@ -2314,8 +2393,8 @@ ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); - ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); - *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); + ctxt->_eip = rdx; + *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; } @@ -2404,6 +2483,7 @@ struct tss_segment_16 *tss) { int ret; + u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; @@ -2426,23 +2506,30 @@ set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); + cpl = tss->cs & 3; + /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ - ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR); + ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); + ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); + ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); + ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); + ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; @@ -2521,6 +2608,7 @@ struct tss_segment_32 *tss) { int ret; + u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); @@ -2539,7 +2627,8 @@ /* * SDM says that segment selectors are loaded before segment - * descriptors + * descriptors. This is important because CPL checks will + * use CS.RPL. */ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); @@ -2553,43 +2642,45 @@ * If we're switching between Protected Mode and VM86, we need to make * sure to update the mode before loading the segment descriptors so * that the selectors are interpreted correctly. - * - * Need to get rflags to the vcpu struct immediately because it - * influences the CPL which is checked at least when loading the segment - * descriptors and when pushing an error code to the new kernel stack. - * - * TODO Introduce a separate ctxt->ops->set_cpl callback */ - if (ctxt->eflags & X86_EFLAGS_VM) + if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; - else + cpl = 3; + } else { ctxt->mode = X86EMUL_MODE_PROT32; - - ctxt->ops->set_rflags(ctxt, ctxt->eflags); + cpl = tss->cs & 3; + } /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ - ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); + ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, + cpl, NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); + ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); + ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); + ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); + ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS); + ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; - ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS); + ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, + NULL); if (ret != X86EMUL_CONTINUE) return ret; @@ -2854,10 +2945,13 @@ static int em_call(struct x86_emulate_ctxt *ctxt) { + int rc; long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; - jmp_rel(ctxt, rel); + rc = jmp_rel(ctxt, rel); + if (rc != X86EMUL_CONTINUE) + return rc; return em_push(ctxt); } @@ -2866,34 +2960,50 @@ u16 sel, old_cs; ulong old_eip; int rc; + struct desc_struct old_desc, new_desc; + const struct x86_emulate_ops *ops = ctxt->ops; + int cpl = ctxt->ops->cpl(ctxt); - old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); old_eip = ctxt->_eip; + ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); - if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, + &new_desc); + if (rc != X86EMUL_CONTINUE) return X86EMUL_CONTINUE; - ctxt->_eip = 0; - memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); + rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); + if (rc != X86EMUL_CONTINUE) + goto fail; ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) - return rc; + goto fail; ctxt->src.val = old_eip; - return em_push(ctxt); + rc = em_push(ctxt); + /* If we failed, we tainted the memory, but the very least we should + restore cs */ + if (rc != X86EMUL_CONTINUE) + goto fail; + return rc; +fail: + ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); + return rc; + } static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) { int rc; + unsigned long eip; - ctxt->dst.type = OP_REG; - ctxt->dst.addr.reg = &ctxt->_eip; - ctxt->dst.bytes = ctxt->op_bytes; - rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); + if (rc != X86EMUL_CONTINUE) + return rc; + rc = assign_eip_near(ctxt, eip); if (rc != X86EMUL_CONTINUE) return rc; rsp_increment(ctxt, ctxt->src.val); @@ -3223,20 +3333,24 @@ static int em_loop(struct x86_emulate_ctxt *ctxt) { + int rc = X86EMUL_CONTINUE; + register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); - return X86EMUL_CONTINUE; + return rc; } static int em_jcxz(struct x86_emulate_ctxt *ctxt) { + int rc = X86EMUL_CONTINUE; + if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); - return X86EMUL_CONTINUE; + return rc; } static int em_in(struct x86_emulate_ctxt *ctxt) @@ -4092,6 +4206,7 @@ fetch_register_operand(op); break; case OpCL: + op->type = OP_IMM; op->bytes = 1; op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; break; @@ -4099,6 +4214,7 @@ rc = decode_imm(ctxt, op, 1, true); break; case OpOne: + op->type = OP_IMM; op->bytes = 1; op->val = 1; break; @@ -4157,21 +4273,27 @@ ctxt->memop.bytes = ctxt->op_bytes + 2; goto mem_common; case OpES: + op->type = OP_IMM; op->val = VCPU_SREG_ES; break; case OpCS: + op->type = OP_IMM; op->val = VCPU_SREG_CS; break; case OpSS: + op->type = OP_IMM; op->val = VCPU_SREG_SS; break; case OpDS: + op->type = OP_IMM; op->val = VCPU_SREG_DS; break; case OpFS: + op->type = OP_IMM; op->val = VCPU_SREG_FS; break; case OpGS: + op->type = OP_IMM; op->val = VCPU_SREG_GS; break; case OpImplicit: @@ -4595,7 +4717,8 @@ if (rc != X86EMUL_CONTINUE) goto done; } - ctxt->dst.orig_val = ctxt->dst.val; + /* Copy full 64-bit value for CMPXCHG8B. */ + ctxt->dst.orig_val64 = ctxt->dst.val64; special_insn: @@ -4633,7 +4756,7 @@ break; case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; @@ -4662,7 +4785,7 @@ break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ @@ -4782,7 +4905,7 @@ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) - jmp_rel(ctxt, ctxt->src.val); + rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); --- linux-3.13.0.orig/arch/x86/kvm/i8254.c +++ linux-3.13.0/arch/x86/kvm/i8254.c @@ -37,6 +37,7 @@ #include "irq.h" #include "i8254.h" +#include "x86.h" #ifndef CONFIG_X86_64 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) @@ -261,8 +262,10 @@ return; timer = &pit->pit_state.timer; + mutex_lock(&pit->pit_state.lock); if (hrtimer_cancel(timer)) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + mutex_unlock(&pit->pit_state.lock); } static void destroy_pit_timer(struct kvm_pit *pit) @@ -349,6 +352,23 @@ atomic_set(&ps->pending, 0); ps->irq_ack = 1; + /* + * Do not allow the guest to program periodic timers with small + * interval, since the hrtimers are not throttled by the host + * scheduler. + */ + if (ps->is_periodic) { + s64 min_period = min_timer_period_us * 1000LL; + + if (ps->period < min_period) { + pr_info_ratelimited( + "kvm: requested %lld ns " + "i8254 timer period limited to %lld ns\n", + ps->period, min_period); + ps->period = min_period; + } + } + hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval), HRTIMER_MODE_ABS); } --- linux-3.13.0.orig/arch/x86/kvm/irq.c +++ linux-3.13.0/arch/x86/kvm/irq.c @@ -108,7 +108,7 @@ vector = kvm_cpu_get_extint(v); - if (kvm_apic_vid_enabled(v->kvm) || vector != -1) + if (vector != -1) return vector; /* PIC */ return kvm_get_apic_interrupt(v); /* APIC */ --- linux-3.13.0.orig/arch/x86/kvm/lapic.c +++ linux-3.13.0/arch/x86/kvm/lapic.c @@ -71,9 +71,6 @@ #define VEC_POS(v) ((v) & (32 - 1)) #define REG_POS(v) (((v) >> 5) << 4) -static unsigned int min_timer_period_us = 500; -module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); - static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) { *((u32 *) (apic->regs + reg_off)) = val; @@ -326,8 +323,12 @@ static inline void apic_set_irr(int vec, struct kvm_lapic *apic) { - apic->irr_pending = true; apic_set_vector(vec, apic->regs + APIC_IRR); + /* + * irr_pending must be true if any interrupt is pending; set it after + * APIC_IRR to avoid race with apic_clear_irr + */ + apic->irr_pending = true; } static inline int apic_search_irr(struct kvm_lapic *apic) @@ -355,31 +356,92 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) { - apic->irr_pending = false; - apic_clear_vector(vec, apic->regs + APIC_IRR); - if (apic_search_irr(apic) != -1) - apic->irr_pending = true; + struct kvm_vcpu *vcpu; + + vcpu = apic->vcpu; + + if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) { + /* try to update RVI */ + apic_clear_vector(vec, apic->regs + APIC_IRR); + kvm_make_request(KVM_REQ_EVENT, vcpu); + } else { + apic->irr_pending = false; + apic_clear_vector(vec, apic->regs + APIC_IRR); + if (apic_search_irr(apic) != -1) + apic->irr_pending = true; + } } static inline void apic_set_isr(int vec, struct kvm_lapic *apic) { - if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) + struct kvm_vcpu *vcpu; + + if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) + return; + + vcpu = apic->vcpu; + + /* + * With APIC virtualization enabled, all caching is disabled + * because the processor can modify ISR under the hood. Instead + * just set SVI. + */ + if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) + kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec); + else { ++apic->isr_count; - BUG_ON(apic->isr_count > MAX_APIC_VECTOR); + BUG_ON(apic->isr_count > MAX_APIC_VECTOR); + /* + * ISR (in service register) bit is set when injecting an interrupt. + * The highest vector is injected. Thus the latest bit set matches + * the highest bit in ISR. + */ + apic->highest_isr_cache = vec; + } +} + +static inline int apic_find_highest_isr(struct kvm_lapic *apic) +{ + int result; + /* - * ISR (in service register) bit is set when injecting an interrupt. - * The highest vector is injected. Thus the latest bit set matches - * the highest bit in ISR. + * Note that isr_count is always 1, and highest_isr_cache + * is always -1, with APIC virtualization enabled. */ - apic->highest_isr_cache = vec; + if (!apic->isr_count) + return -1; + if (likely(apic->highest_isr_cache != -1)) + return apic->highest_isr_cache; + + result = find_highest_vector(apic->regs + APIC_ISR); + ASSERT(result == -1 || result >= 16); + + return result; } static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) { - if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) + struct kvm_vcpu *vcpu; + if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) + return; + + vcpu = apic->vcpu; + + /* + * We do get here for APIC virtualization enabled if the guest + * uses the Hyper-V APIC enlightenment. In this case we may need + * to trigger a new interrupt delivery by writing the SVI field; + * on the other hand isr_count and highest_isr_cache are unused + * and must be left alone. + */ + if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) + kvm_x86_ops->hwapic_isr_update(vcpu->kvm, + apic_find_highest_isr(apic)); + else { --apic->isr_count; - BUG_ON(apic->isr_count < 0); - apic->highest_isr_cache = -1; + BUG_ON(apic->isr_count < 0); + apic->highest_isr_cache = -1; + } } int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) @@ -459,22 +521,6 @@ __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); } -static inline int apic_find_highest_isr(struct kvm_lapic *apic) -{ - int result; - - /* Note that isr_count is always 1 with vid enabled */ - if (!apic->isr_count) - return -1; - if (likely(apic->highest_isr_cache != -1)) - return apic->highest_isr_cache; - - result = find_highest_vector(apic->regs + APIC_ISR); - ASSERT(result == -1 || result >= 16); - - return result; -} - void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr) { struct kvm_lapic *apic = vcpu->arch.apic; @@ -1611,6 +1657,13 @@ if (vector == -1) return -1; + /* + * We get here even with APIC virtualization enabled, if doing + * nested virtualization and L1 runs with the "acknowledge interrupt + * on exit" mode. Then we cannot inject the interrupt via RVI, + * because the process would deliver it through the IDT. + */ + apic_set_isr(vector, apic); apic_update_ppr(apic); apic_clear_irr(vector, apic); --- linux-3.13.0.orig/arch/x86/kvm/mmu.c +++ linux-3.13.0/arch/x86/kvm/mmu.c @@ -198,16 +198,20 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); /* - * spte bits of bit 3 ~ bit 11 are used as low 9 bits of generation number, - * the bits of bits 52 ~ bit 61 are used as high 10 bits of generation - * number. + * the low bit of the generation number is always presumed to be zero. + * This disables mmio caching during memslot updates. The concept is + * similar to a seqcount but instead of retrying the access we just punt + * and ignore the cache. + * + * spte bits 3-11 are used as bits 1-9 of the generation number, + * the bits 52-61 are used as bits 10-19 of the generation number. */ -#define MMIO_SPTE_GEN_LOW_SHIFT 3 +#define MMIO_SPTE_GEN_LOW_SHIFT 2 #define MMIO_SPTE_GEN_HIGH_SHIFT 52 -#define MMIO_GEN_SHIFT 19 -#define MMIO_GEN_LOW_SHIFT 9 -#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 1) +#define MMIO_GEN_SHIFT 20 +#define MMIO_GEN_LOW_SHIFT 10 +#define MMIO_GEN_LOW_MASK ((1 << MMIO_GEN_LOW_SHIFT) - 2) #define MMIO_GEN_MASK ((1 << MMIO_GEN_SHIFT) - 1) #define MMIO_MAX_GEN ((1 << MMIO_GEN_SHIFT) - 1) @@ -235,12 +239,7 @@ static unsigned int kvm_current_mmio_generation(struct kvm *kvm) { - /* - * Init kvm generation close to MMIO_MAX_GEN to easily test the - * code of handling generation number wrap-around. - */ - return (kvm_memslots(kvm)->generation + - MMIO_MAX_GEN - 150) & MMIO_GEN_MASK; + return kvm_memslots(kvm)->generation & MMIO_GEN_MASK; } static void mark_mmio_spte(struct kvm *kvm, u64 *sptep, u64 gfn, @@ -2659,6 +2658,9 @@ int emulate = 0; gfn_t pseudo_gfn; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return 0; + for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { if (iterator.level == level) { mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, @@ -2829,6 +2831,9 @@ bool ret = false; u64 spte = 0ull; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return false; + if (!page_fault_can_be_fast(error_code)) return false; @@ -3150,7 +3155,7 @@ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; - vcpu_clear_mmio_info(vcpu, ~0ul); + vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; @@ -3224,6 +3229,9 @@ struct kvm_shadow_walk_iterator iterator; u64 spte = 0ull; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return spte; + walk_shadow_page_lockless_begin(vcpu); for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) if (!is_shadow_present_pte(spte)) @@ -4369,8 +4377,8 @@ * The very rare case: if the generation-number is round, * zap all shadow pages. */ - if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) { - printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); + if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { + printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n"); kvm_mmu_invalidate_zap_all_pages(kvm); } } @@ -4510,6 +4518,9 @@ u64 spte; int nr_sptes = 0; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + return nr_sptes; + walk_shadow_page_lockless_begin(vcpu); for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { sptes[iterator.level-1] = spte; --- linux-3.13.0.orig/arch/x86/kvm/paging_tmpl.h +++ linux-3.13.0/arch/x86/kvm/paging_tmpl.h @@ -569,6 +569,9 @@ if (FNAME(gpte_changed)(vcpu, gw, top_level)) goto out_gpte_changed; + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) + goto out_gpte_changed; + for (shadow_walk_init(&it, vcpu, addr); shadow_walk_okay(&it) && it.level > gw->level; shadow_walk_next(&it)) { @@ -820,6 +823,11 @@ */ mmu_topup_memory_caches(vcpu); + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) { + WARN_ON(1); + return; + } + spin_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry(vcpu, gva, iterator) { level = iterator.level; --- linux-3.13.0.orig/arch/x86/kvm/svm.c +++ linux-3.13.0/arch/x86/kvm/svm.c @@ -2989,10 +2989,8 @@ u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ r = cr_interception(svm); - if (irqchip_in_kernel(svm->vcpu.kvm)) { - clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); + if (irqchip_in_kernel(svm->vcpu.kvm)) return r; - } if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) return r; kvm_run->exit_reason = KVM_EXIT_SET_TPR; @@ -3202,7 +3200,7 @@ msr.host_initiated = false; svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; - if (svm_set_msr(&svm->vcpu, &msr)) { + if (kvm_set_msr(&svm->vcpu, &msr)) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(&svm->vcpu, 0); } else { @@ -3484,9 +3482,9 @@ if (exit_code >= ARRAY_SIZE(svm_exit_handlers) || !svm_exit_handlers[exit_code]) { - kvm_run->exit_reason = KVM_EXIT_UNKNOWN; - kvm_run->hw.hardware_exit_reason = exit_code; - return 0; + WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code); + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; } return svm_exit_handlers[exit_code](svm); @@ -3554,6 +3552,8 @@ if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; + clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); + if (irr == -1) return; --- linux-3.13.0.orig/arch/x86/kvm/vmx.c +++ linux-3.13.0/arch/x86/kvm/vmx.c @@ -439,6 +439,7 @@ #endif int gs_ldt_reload_needed; int fs_reload_needed; + unsigned long vmcs_host_cr4; /* May not match real cr4 */ } host_state; struct { int vm86_active; @@ -1056,7 +1057,9 @@ == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); } -static void nested_vmx_vmexit(struct kvm_vcpu *vcpu); +static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, + u32 exit_intr_info, + unsigned long exit_qualification); static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, u32 reason, unsigned long qualification); @@ -1906,7 +1909,9 @@ if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; - nested_vmx_vmexit(vcpu); + nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, + vmcs_read32(VM_EXIT_INTR_INFO), + vmcs_readl(EXIT_QUALIFICATION)); return 1; } @@ -2255,12 +2260,12 @@ nested_vmx_secondary_ctls_low = 0; nested_vmx_secondary_ctls_high &= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | - SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_WBINVD_EXITING; if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ - nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; + nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_EPT; nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | VMX_EPT_INVEPT_BIT; @@ -2275,6 +2280,10 @@ } else nested_vmx_ept_caps = 0; + if (enable_unrestricted_guest) + nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_UNRESTRICTED_GUEST; + /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high); nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK | @@ -2545,12 +2554,15 @@ break; msr = find_msr_entry(vmx, msr_index); if (msr) { + u64 old_msr_data = msr->data; msr->data = data; if (msr - vmx->guest_msrs < vmx->save_nmsrs) { preempt_disable(); - kvm_set_shared_msr(msr->index, msr->data, - msr->mask); + ret = kvm_set_shared_msr(msr->index, msr->data, + msr->mask); preempt_enable(); + if (ret) + msr->data = old_msr_data; } break; } @@ -4131,11 +4143,16 @@ u32 low32, high32; unsigned long tmpl; struct desc_ptr dt; + unsigned long cr4; vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ - vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ + /* Save the most likely value for this task's CR4 in the VMCS. */ + cr4 = read_cr4(); + vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ + vmx->host_state.vmcs_host_cr4 = cr4; + vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 /* @@ -4587,25 +4604,8 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) { - if (is_guest_mode(vcpu)) { - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - if (to_vmx(vcpu)->nested.nested_run_pending) - return 0; - if (nested_exit_on_nmi(vcpu)) { - nested_vmx_vmexit(vcpu); - vmcs12->vm_exit_reason = EXIT_REASON_EXCEPTION_NMI; - vmcs12->vm_exit_intr_info = NMI_VECTOR | - INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK; - /* - * The NMI-triggered VM exit counts as injection: - * clear this one and block further NMIs. - */ - vcpu->arch.nmi_pending = 0; - vmx_set_nmi_mask(vcpu, true); - return 0; - } - } + if (to_vmx(vcpu)->nested.nested_run_pending) + return 0; if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) return 0; @@ -4617,23 +4617,8 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) { - if (is_guest_mode(vcpu)) { - struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - - if (to_vmx(vcpu)->nested.nested_run_pending) - return 0; - if (nested_exit_on_intr(vcpu)) { - nested_vmx_vmexit(vcpu); - vmcs12->vm_exit_reason = - EXIT_REASON_EXTERNAL_INTERRUPT; - vmcs12->vm_exit_intr_info = 0; - /* - * fall through to normal code, but now in L1, not L2 - */ - } - } - - return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && + return (!to_vmx(vcpu)->nested.nested_run_pending && + vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); } @@ -5129,7 +5114,7 @@ msr.data = data; msr.index = ecx; msr.host_initiated = false; - if (vmx_set_msr(vcpu, &msr) != 0) { + if (kvm_set_msr(vcpu, &msr) != 0) { trace_kvm_msr_write_ex(ecx, data); kvm_inject_gp(vcpu, 0); return 1; @@ -6401,6 +6386,12 @@ return 1; } +static int handle_invvpid(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -6446,6 +6437,7 @@ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op, [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op, [EXIT_REASON_INVEPT] = handle_invept, + [EXIT_REASON_INVVPID] = handle_invvpid, }; static const int kvm_vmx_max_exit_handlers = @@ -6644,7 +6636,7 @@ else if (is_page_fault(intr_info)) return enable_ept; else if (is_no_device(intr_info) && - !(nested_read_cr0(vmcs12) & X86_CR0_TS)) + !(vmcs12->guest_cr0 & X86_CR0_TS)) return 0; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); @@ -6675,7 +6667,7 @@ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: - case EXIT_REASON_INVEPT: + case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: /* * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! @@ -6777,7 +6769,9 @@ return handle_invalid_guest_state(vcpu); if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { - nested_vmx_vmexit(vcpu); + nested_vmx_vmexit(vcpu, exit_reason, + vmcs_read32(VM_EXIT_INTR_INFO), + vmcs_readl(EXIT_QUALIFICATION)); return 1; } @@ -6838,10 +6832,10 @@ && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { - vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; - vcpu->run->hw.hardware_exit_reason = exit_reason; + WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; } - return 0; } static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) @@ -7140,7 +7134,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - unsigned long debugctlmsr; + unsigned long debugctlmsr, cr4; /* Record the guest's net vcpu time for enforced NMI injections. */ if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) @@ -7161,6 +7155,12 @@ if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); + cr4 = read_cr4(); + if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { + vmcs_writel(HOST_CR4, cr4); + vmx->host_state.vmcs_host_cr4 = cr4; + } + /* When single-stepping over STI and MOV SS, we must clear the * corresponding interruptibility bits in the guest state. Otherwise * vmentry fails as it then expects bit 14 (BS) in pending debug @@ -7332,8 +7332,8 @@ struct vcpu_vmx *vmx = to_vmx(vcpu); free_vpid(vmx); - free_nested(vmx); free_loaded_vmcs(vmx->loaded_vmcs); + free_nested(vmx); kfree(vmx->guest_msrs); kvm_vcpu_uninit(vcpu); kmem_cache_free(kvm_vcpu_cache, vmx); @@ -7518,15 +7518,14 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { - struct vmcs12 *vmcs12; - nested_vmx_vmexit(vcpu); - vmcs12 = get_vmcs12(vcpu); + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + u32 exit_reason; if (fault->error_code & PFERR_RSVD_MASK) - vmcs12->vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; + exit_reason = EXIT_REASON_EPT_MISCONFIG; else - vmcs12->vm_exit_reason = EXIT_REASON_EPT_VIOLATION; - vmcs12->exit_qualification = vcpu->arch.exit_qualification; + exit_reason = EXIT_REASON_EPT_VIOLATION; + nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification); vmcs12->guest_physical_address = fault->address; } @@ -7564,7 +7563,9 @@ /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ if (vmcs12->exception_bitmap & (1u << PF_VECTOR)) - nested_vmx_vmexit(vcpu); + nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, + vmcs_read32(VM_EXIT_INTR_INFO), + vmcs_readl(EXIT_QUALIFICATION)); else kvm_inject_page_fault(vcpu, fault); } @@ -7640,9 +7641,11 @@ vmcs_write64(VMCS_LINK_POINTER, -1ull); - vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, - (vmcs_config.pin_based_exec_ctrl | - vmcs12->pin_based_vm_exec_control)); + exec_control = vmcs12->pin_based_vm_exec_control; + exec_control |= vmcs_config.pin_based_exec_ctrl; + exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER | + PIN_BASED_POSTED_INTR); + vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, @@ -7678,7 +7681,9 @@ if (!vmx->rdtscp_enabled) exec_control &= ~SECONDARY_EXEC_RDTSCP; /* Take the following fields only from vmcs12 */ - exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + SECONDARY_EXEC_APIC_REGISTER_VIRT); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) exec_control |= vmcs12->secondary_vm_exec_control; @@ -8099,6 +8104,35 @@ } } +static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { + if (vmx->nested.nested_run_pending) + return -EBUSY; + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, + NMI_VECTOR | INTR_TYPE_NMI_INTR | + INTR_INFO_VALID_MASK, 0); + /* + * The NMI-triggered VM exit counts as injection: + * clear this one and block further NMIs. + */ + vcpu->arch.nmi_pending = 0; + vmx_set_nmi_mask(vcpu, true); + return 0; + } + + if ((kvm_cpu_has_interrupt(vcpu) || external_intr) && + nested_exit_on_intr(vcpu)) { + if (vmx->nested.nested_run_pending) + return -EBUSY; + nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); + } + + return 0; +} + /* * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), @@ -8110,7 +8144,9 @@ * exit-information fields only. Other fields are modified by L1 with VMWRITE, * which already writes to vmcs12 directly. */ -static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, + u32 exit_reason, u32 exit_intr_info, + unsigned long exit_qualification) { /* update guest state fields: */ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); @@ -8201,10 +8237,10 @@ /* update exit information fields: */ - vmcs12->vm_exit_reason = to_vmx(vcpu)->exit_reason; - vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); + vmcs12->vm_exit_reason = exit_reason; + vmcs12->exit_qualification = exit_qualification; - vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); + vmcs12->vm_exit_intr_info = exit_intr_info; if ((vmcs12->vm_exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) @@ -8370,7 +8406,9 @@ * and modify vmcs12 to make it see what it would expect to see there if * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) */ -static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) +static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, + u32 exit_intr_info, + unsigned long exit_qualification) { struct vcpu_vmx *vmx = to_vmx(vcpu); int cpu; @@ -8380,7 +8418,8 @@ WARN_ON_ONCE(vmx->nested.nested_run_pending); leave_guest_mode(vcpu); - prepare_vmcs12(vcpu, vmcs12); + prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, + exit_qualification); cpu = get_cpu(); vmx->loaded_vmcs = &vmx->vmcs01; @@ -8421,6 +8460,9 @@ nested_vmx_succeed(vcpu); if (enable_shadow_vmcs) vmx->nested.sync_shadow_vmcs = true; + + /* in case we halted in L2 */ + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } /* @@ -8548,6 +8590,8 @@ .check_intercept = vmx_check_intercept, .handle_external_intr = vmx_handle_external_intr, + + .check_nested_events = vmx_check_nested_events, }; static int __init vmx_init(void) --- linux-3.13.0.orig/arch/x86/kvm/x86.c +++ linux-3.13.0/arch/x86/kvm/x86.c @@ -94,6 +94,9 @@ static bool ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); +unsigned int min_timer_period_us = 500; +module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); + bool kvm_has_tsc_control; EXPORT_SYMBOL_GPL(kvm_has_tsc_control); u32 kvm_max_guest_tsc_khz; @@ -222,20 +225,25 @@ shared_msr_update(i, shared_msrs_global.msrs[i]); } -void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) +int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); + int err; if (((value ^ smsr->values[slot].curr) & mask) == 0) - return; + return 0; smsr->values[slot].curr = value; - wrmsrl(shared_msrs_global.msrs[slot], value); + err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); + if (err) + return 1; + if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } + return 0; } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); @@ -916,7 +924,6 @@ } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); - /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. @@ -924,8 +931,34 @@ */ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { + switch (msr->index) { + case MSR_FS_BASE: + case MSR_GS_BASE: + case MSR_KERNEL_GS_BASE: + case MSR_CSTAR: + case MSR_LSTAR: + if (is_noncanonical_address(msr->data)) + return 1; + break; + case MSR_IA32_SYSENTER_EIP: + case MSR_IA32_SYSENTER_ESP: + /* + * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if + * non-canonical address is written on Intel but not on + * AMD (which ignores the top 32-bits, because it does + * not implement 64-bit SYSENTER). + * + * 64-bit code should hence be able to write a non-canonical + * value on AMD. Making the address canonical ensures that + * vmentry does not fail on Intel after writing a non-canonical + * value, and that something deterministic happens if the guest + * invokes 64-bit SYSENTER. + */ + msr->data = get_canonical(msr->data); + } return kvm_x86_ops->set_msr(vcpu, msr); } +EXPORT_SYMBOL_GPL(kvm_set_msr); /* * Adapt set_msr() to msr_io()'s calling convention @@ -4861,7 +4894,7 @@ ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); - if (!is_guest_mode(vcpu)) { + if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; @@ -5499,7 +5532,7 @@ goto out; } if (ops->disabled_by_bios()) { - printk(KERN_ERR "kvm: disabled by bios\n"); + printk(KERN_WARNING "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } @@ -5761,8 +5794,10 @@ kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } -static void inject_pending_event(struct kvm_vcpu *vcpu) +static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) { + int r; + /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, @@ -5772,17 +5807,23 @@ vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); - return; + return 0; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); - return; + return 0; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); - return; + return 0; + } + + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { + r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); + if (r != 0) + return r; } /* try to inject new event if pending */ @@ -5793,12 +5834,25 @@ kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_injectable_intr(vcpu)) { + /* + * Because interrupts can be injected asynchronously, we are + * calling check_nested_events again here to avoid a race condition. + * See https://lkml.org/lkml/2014/7/2/60 for discussion about this + * proposal and current concerns. Perhaps we should be setting + * KVM_REQ_EVENT only on certain events and not unconditionally? + */ + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { + r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); + if (r != 0) + return r; + } if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } + return 0; } static void process_nmi(struct kvm_vcpu *vcpu) @@ -5898,10 +5952,10 @@ goto out; } - inject_pending_event(vcpu); - + if (inject_pending_event(vcpu, req_int_win) != 0) + req_immediate_exit = true; /* enable NMI/IRQ window open exits if needed */ - if (vcpu->arch.nmi_pending) + else if (vcpu->arch.nmi_pending) req_immediate_exit = kvm_x86_ops->enable_nmi_window(vcpu) != 0; else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) @@ -6089,6 +6143,20 @@ } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); +#ifdef CONFIG_X86_32 + /* + * This is a hack around a current bug on i386 + * that causes TIF_NEED_RESCHED not being propagated + * into the per-cpu counter. This causes cond_resched() + * which is what kvm_resched() calls to exit without + * actually rescheduling. We continue immediately with + * the loop but vcpu_enter_guest checks for the process + * flag and immediately exits and this loops busily + * runs forever. + */ + if (tif_need_resched()) + set_preempt_need_resched(); +#endif kvm_resched(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } @@ -6160,7 +6228,7 @@ frag->len -= len; } - if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { + if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; /* FIXME: return into emulator if single-stepping. */ @@ -7218,6 +7286,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { + if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) + kvm_x86_ops->check_nested_events(vcpu, false); + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) --- linux-3.13.0.orig/arch/x86/kvm/x86.h +++ linux-3.13.0/arch/x86/kvm/x86.h @@ -78,15 +78,23 @@ vcpu->arch.mmio_gva = gva & PAGE_MASK; vcpu->arch.access = access; vcpu->arch.mmio_gfn = gfn; + vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; +} + +static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; } /* - * Clear the mmio cache info for the given gva, - * specially, if gva is ~0ul, we clear all mmio cache info. + * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we + * clear all mmio cache info. */ +#define MMIO_GVA_ANY (~(gva_t)0) + static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) { - if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) + if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) return; vcpu->arch.mmio_gva = 0; @@ -94,7 +102,8 @@ static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) { - if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK)) + if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && + vcpu->arch.mmio_gva == (gva & PAGE_MASK)) return true; return false; @@ -102,7 +111,8 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) { - if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) + if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && + vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) return true; return false; @@ -125,5 +135,7 @@ #define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) extern u64 host_xcr0; +extern unsigned int min_timer_period_us; + extern struct static_key kvm_no_apic_vcpu; #endif --- linux-3.13.0.orig/arch/x86/mm/dump_pagetables.c +++ linux-3.13.0/arch/x86/mm/dump_pagetables.c @@ -30,11 +30,13 @@ unsigned long start_address; unsigned long current_address; const struct addr_marker *marker; + unsigned long lines; }; struct addr_marker { unsigned long start_address; const char *name; + unsigned long max_lines; }; /* indices for address_markers; keep sync'd w/ address_markers below */ @@ -45,6 +47,7 @@ LOW_KERNEL_NR, VMALLOC_START_NR, VMEMMAP_START_NR, + ESPFIX_START_NR, HIGH_KERNEL_NR, MODULES_VADDR_NR, MODULES_END_NR, @@ -67,6 +70,7 @@ { PAGE_OFFSET, "Low Kernel Mapping" }, { VMALLOC_START, "vmalloc() Area" }, { VMEMMAP_START, "Vmemmap" }, + { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, { __START_KERNEL_map, "High Kernel Mapping" }, { MODULES_VADDR, "Modules" }, { MODULES_END, "End Modules" }, @@ -163,7 +167,7 @@ pgprot_t new_prot, int level) { pgprotval_t prot, cur; - static const char units[] = "KMGTPE"; + static const char units[] = "BKMGTPE"; /* * If we have a "break" in the series, we need to flush the state that @@ -178,6 +182,7 @@ st->current_prot = new_prot; st->level = level; st->marker = address_markers; + st->lines = 0; seq_printf(m, "---[ %s ]---\n", st->marker->name); } else if (prot != cur || level != st->level || st->current_address >= st->marker[1].start_address) { @@ -188,17 +193,21 @@ /* * Now print the actual finished series */ - seq_printf(m, "0x%0*lx-0x%0*lx ", - width, st->start_address, - width, st->current_address); - - delta = (st->current_address - st->start_address) >> 10; - while (!(delta & 1023) && unit[1]) { - delta >>= 10; - unit++; + if (!st->marker->max_lines || + st->lines < st->marker->max_lines) { + seq_printf(m, "0x%0*lx-0x%0*lx ", + width, st->start_address, + width, st->current_address); + + delta = st->current_address - st->start_address; + while (!(delta & 1023) && unit[1]) { + delta >>= 10; + unit++; + } + seq_printf(m, "%9lu%c ", delta, *unit); + printk_prot(m, st->current_prot, st->level); } - seq_printf(m, "%9lu%c ", delta, *unit); - printk_prot(m, st->current_prot, st->level); + st->lines++; /* * We print markers for special areas of address space, @@ -206,7 +215,16 @@ * This helps in the interpretation. */ if (st->current_address >= st->marker[1].start_address) { + if (st->marker->max_lines && + st->lines > st->marker->max_lines) { + unsigned long nskip = + st->lines - st->marker->max_lines; + seq_printf(m, "... %lu entr%s skipped ... \n", + nskip, + nskip == 1 ? "y" : "ies"); + } st->marker++; + st->lines = 0; seq_printf(m, "---[ %s ]---\n", st->marker->name); } --- linux-3.13.0.orig/arch/x86/mm/fault.c +++ linux-3.13.0/arch/x86/mm/fault.c @@ -833,11 +833,8 @@ unsigned int fault) { struct task_struct *tsk = current; - struct mm_struct *mm = tsk->mm; int code = BUS_ADRERR; - up_read(&mm->mmap_sem); - /* Kernel mode? Handle exceptions or die: */ if (!(error_code & PF_USER)) { no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); @@ -868,7 +865,6 @@ unsigned long address, unsigned int fault) { if (fatal_signal_pending(current) && !(error_code & PF_USER)) { - up_read(¤t->mm->mmap_sem); no_context(regs, error_code, address, 0, 0); return; } @@ -876,14 +872,11 @@ if (fault & VM_FAULT_OOM) { /* Kernel mode? Handle exceptions or die: */ if (!(error_code & PF_USER)) { - up_read(¤t->mm->mmap_sem); no_context(regs, error_code, address, SIGSEGV, SEGV_MAPERR); return; } - up_read(¤t->mm->mmap_sem); - /* * We ran out of memory, call the OOM killer, and return the * userspace (which will retry the fault, or kill us if we got @@ -894,6 +887,8 @@ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| VM_FAULT_HWPOISON_LARGE)) do_sigbus(regs, error_code, address, fault); + else if (fault & VM_FAULT_SIGSEGV) + bad_area_nosemaphore(regs, error_code, address); else BUG(); } @@ -1001,6 +996,12 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) { + if (!IS_ENABLED(CONFIG_X86_SMAP)) + return false; + + if (!static_cpu_has(X86_FEATURE_SMAP)) + return false; + if (error_code & PF_USER) return false; @@ -1087,11 +1088,9 @@ if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - if (static_cpu_has(X86_FEATURE_SMAP)) { - if (unlikely(smap_violation(error_code, regs))) { - bad_area_nosemaphore(regs, error_code, address); - return; - } + if (unlikely(smap_violation(error_code, regs))) { + bad_area_nosemaphore(regs, error_code, address); + return; } /* @@ -1211,6 +1210,7 @@ return; if (unlikely(fault & VM_FAULT_ERROR)) { + up_read(&mm->mmap_sem); mm_fault_error(regs, error_code, address, fault); return; } --- linux-3.13.0.orig/arch/x86/mm/gup.c +++ linux-3.13.0/arch/x86/mm/gup.c @@ -172,7 +172,7 @@ */ if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; - if (unlikely(pmd_large(pmd))) { + if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they --- linux-3.13.0.orig/arch/x86/mm/hugetlbpage.c +++ linux-3.13.0/arch/x86/mm/hugetlbpage.c @@ -58,11 +58,6 @@ { return NULL; } - -int pmd_huge_support(void) -{ - return 0; -} #else struct page * @@ -71,20 +66,21 @@ return ERR_PTR(-EINVAL); } +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ int pmd_huge(pmd_t pmd) { - return !!(pmd_val(pmd) & _PAGE_PSE); + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; } int pud_huge(pud_t pud) { return !!(pud_val(pud) & _PAGE_PSE); } - -int pmd_huge_support(void) -{ - return 1; -} #endif /* x86_64 also uses this file */ --- linux-3.13.0.orig/arch/x86/mm/init_64.c +++ linux-3.13.0/arch/x86/mm/init_64.c @@ -1110,7 +1110,7 @@ unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PFN_ALIGN(&__stop___ex_table); unsigned long rodata_end = PFN_ALIGN(&__end_rodata); - unsigned long all_end = PFN_ALIGN(&_end); + unsigned long all_end; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); @@ -1121,7 +1121,16 @@ /* * The rodata/data/bss/brk section (but not the kernel text!) * should also be not-executable. + * + * We align all_end to PMD_SIZE because the existing mapping + * is a full PMD. If we would align _brk_end to PAGE_SIZE we + * split the PMD and the reminder between _brk_end and the end + * of the PMD will remain mapped executable. + * + * Any PMD which was setup after the one which covers _brk_end + * has been zapped already via cleanup_highmem(). */ + all_end = roundup((unsigned long)_brk_end, PMD_SIZE); set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); rodata_test(); --- linux-3.13.0.orig/arch/x86/mm/mmap.c +++ linux-3.13.0/arch/x86/mm/mmap.c @@ -35,12 +35,12 @@ .flags = -1, }; -static unsigned int stack_maxrandom_size(void) +static unsigned long stack_maxrandom_size(void) { - unsigned int max = 0; + unsigned long max = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { - max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; + max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT; } return max; --- linux-3.13.0.orig/arch/x86/mm/pageattr.c +++ linux-3.13.0/arch/x86/mm/pageattr.c @@ -30,6 +30,7 @@ */ struct cpa_data { unsigned long *vaddr; + pgd_t *pgd; pgprot_t mask_set; pgprot_t mask_clr; int numpages; @@ -322,17 +323,9 @@ return prot; } -/* - * Lookup the page table entry for a virtual address. Return a pointer - * to the entry and the level of the mapping. - * - * Note: We return pud and pmd either when the entry is marked large - * or when the present bit is not set. Otherwise we would return a - * pointer to a nonexisting mapping. - */ -pte_t *lookup_address(unsigned long address, unsigned int *level) +static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address, + unsigned int *level) { - pgd_t *pgd = pgd_offset_k(address); pud_t *pud; pmd_t *pmd; @@ -361,8 +354,31 @@ return pte_offset_kernel(pmd, address); } + +/* + * Lookup the page table entry for a virtual address. Return a pointer + * to the entry and the level of the mapping. + * + * Note: We return pud and pmd either when the entry is marked large + * or when the present bit is not set. Otherwise we would return a + * pointer to a nonexisting mapping. + */ +pte_t *lookup_address(unsigned long address, unsigned int *level) +{ + return __lookup_address_in_pgd(pgd_offset_k(address), address, level); +} EXPORT_SYMBOL_GPL(lookup_address); +static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, + unsigned int *level) +{ + if (cpa->pgd) + return __lookup_address_in_pgd(cpa->pgd + pgd_index(address), + address, level); + + return lookup_address(address, level); +} + /* * This is necessary because __pa() does not work on some * kinds of memory, like vmalloc() or the alloc_remap() @@ -389,7 +405,7 @@ psize = page_level_size(level); pmask = page_level_mask(level); offset = virt_addr & ~pmask; - phys_addr = pte_pfn(*pte) << PAGE_SHIFT; + phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; return (phys_addr | offset); } EXPORT_SYMBOL_GPL(slow_virt_to_phys); @@ -437,7 +453,7 @@ * Check for races, another CPU might have split this page * up already: */ - tmp = lookup_address(address, &level); + tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) goto out_unlock; @@ -543,7 +559,8 @@ } static int -__split_large_page(pte_t *kpte, unsigned long address, struct page *base) +__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, + struct page *base) { pte_t *pbase = (pte_t *)page_address(base); unsigned long pfn, pfninc = 1; @@ -556,7 +573,7 @@ * Check for races, another CPU might have split this page * up for us already: */ - tmp = lookup_address(address, &level); + tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) { spin_unlock(&pgd_lock); return 1; @@ -632,7 +649,8 @@ return 0; } -static int split_large_page(pte_t *kpte, unsigned long address) +static int split_large_page(struct cpa_data *cpa, pte_t *kpte, + unsigned long address) { struct page *base; @@ -644,15 +662,390 @@ if (!base) return -ENOMEM; - if (__split_large_page(kpte, address, base)) + if (__split_large_page(cpa, kpte, address, base)) __free_page(base); return 0; } +static bool try_to_free_pte_page(pte_t *pte) +{ + int i; + + for (i = 0; i < PTRS_PER_PTE; i++) + if (!pte_none(pte[i])) + return false; + + free_page((unsigned long)pte); + return true; +} + +static bool try_to_free_pmd_page(pmd_t *pmd) +{ + int i; + + for (i = 0; i < PTRS_PER_PMD; i++) + if (!pmd_none(pmd[i])) + return false; + + free_page((unsigned long)pmd); + return true; +} + +static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) +{ + pte_t *pte = pte_offset_kernel(pmd, start); + + while (start < end) { + set_pte(pte, __pte(0)); + + start += PAGE_SIZE; + pte++; + } + + if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { + pmd_clear(pmd); + return true; + } + return false; +} + +static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, + unsigned long start, unsigned long end) +{ + if (unmap_pte_range(pmd, start, end)) + if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) + pud_clear(pud); +} + +static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) +{ + pmd_t *pmd = pmd_offset(pud, start); + + /* + * Not on a 2MB page boundary? + */ + if (start & (PMD_SIZE - 1)) { + unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; + unsigned long pre_end = min_t(unsigned long, end, next_page); + + __unmap_pmd_range(pud, pmd, start, pre_end); + + start = pre_end; + pmd++; + } + + /* + * Try to unmap in 2M chunks. + */ + while (end - start >= PMD_SIZE) { + if (pmd_large(*pmd)) + pmd_clear(pmd); + else + __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); + + start += PMD_SIZE; + pmd++; + } + + /* + * 4K leftovers? + */ + if (start < end) + return __unmap_pmd_range(pud, pmd, start, end); + + /* + * Try again to free the PMD page if haven't succeeded above. + */ + if (!pud_none(*pud)) + if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) + pud_clear(pud); +} + +static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end) +{ + pud_t *pud = pud_offset(pgd, start); + + /* + * Not on a GB page boundary? + */ + if (start & (PUD_SIZE - 1)) { + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; + unsigned long pre_end = min_t(unsigned long, end, next_page); + + unmap_pmd_range(pud, start, pre_end); + + start = pre_end; + pud++; + } + + /* + * Try to unmap in 1G chunks? + */ + while (end - start >= PUD_SIZE) { + + if (pud_large(*pud)) + pud_clear(pud); + else + unmap_pmd_range(pud, start, start + PUD_SIZE); + + start += PUD_SIZE; + pud++; + } + + /* + * 2M leftovers? + */ + if (start < end) + unmap_pmd_range(pud, start, end); + + /* + * No need to try to free the PUD page because we'll free it in + * populate_pgd's error path + */ +} + +static int alloc_pte_page(pmd_t *pmd) +{ + pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pte) + return -1; + + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); + return 0; +} + +static int alloc_pmd_page(pud_t *pud) +{ + pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pmd) + return -1; + + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); + return 0; +} + +static void populate_pte(struct cpa_data *cpa, + unsigned long start, unsigned long end, + unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, start); + + while (num_pages-- && start < end) { + + /* deal with the NX bit */ + if (!(pgprot_val(pgprot) & _PAGE_NX)) + cpa->pfn &= ~_PAGE_NX; + + set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); + + start += PAGE_SIZE; + cpa->pfn += PAGE_SIZE; + pte++; + } +} + +static int populate_pmd(struct cpa_data *cpa, + unsigned long start, unsigned long end, + unsigned num_pages, pud_t *pud, pgprot_t pgprot) +{ + unsigned int cur_pages = 0; + pmd_t *pmd; + + /* + * Not on a 2M boundary? + */ + if (start & (PMD_SIZE - 1)) { + unsigned long pre_end = start + (num_pages << PAGE_SHIFT); + unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; + + pre_end = min_t(unsigned long, pre_end, next_page); + cur_pages = (pre_end - start) >> PAGE_SHIFT; + cur_pages = min_t(unsigned int, num_pages, cur_pages); + + /* + * Need a PTE page? + */ + pmd = pmd_offset(pud, start); + if (pmd_none(*pmd)) + if (alloc_pte_page(pmd)) + return -1; + + populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); + + start = pre_end; + } + + /* + * We mapped them all? + */ + if (num_pages == cur_pages) + return cur_pages; + + while (end - start >= PMD_SIZE) { + + /* + * We cannot use a 1G page so allocate a PMD page if needed. + */ + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + pmd = pmd_offset(pud, start); + + set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); + + start += PMD_SIZE; + cpa->pfn += PMD_SIZE; + cur_pages += PMD_SIZE >> PAGE_SHIFT; + } + + /* + * Map trailing 4K pages. + */ + if (start < end) { + pmd = pmd_offset(pud, start); + if (pmd_none(*pmd)) + if (alloc_pte_page(pmd)) + return -1; + + populate_pte(cpa, start, end, num_pages - cur_pages, + pmd, pgprot); + } + return num_pages; +} + +static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, + pgprot_t pgprot) +{ + pud_t *pud; + unsigned long end; + int cur_pages = 0; + + end = start + (cpa->numpages << PAGE_SHIFT); + + /* + * Not on a Gb page boundary? => map everything up to it with + * smaller pages. + */ + if (start & (PUD_SIZE - 1)) { + unsigned long pre_end; + unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; + + pre_end = min_t(unsigned long, end, next_page); + cur_pages = (pre_end - start) >> PAGE_SHIFT; + cur_pages = min_t(int, (int)cpa->numpages, cur_pages); + + pud = pud_offset(pgd, start); + + /* + * Need a PMD page? + */ + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, + pud, pgprot); + if (cur_pages < 0) + return cur_pages; + + start = pre_end; + } + + /* We mapped them all? */ + if (cpa->numpages == cur_pages) + return cur_pages; + + pud = pud_offset(pgd, start); + + /* + * Map everything starting from the Gb boundary, possibly with 1G pages + */ + while (end - start >= PUD_SIZE) { + set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); + + start += PUD_SIZE; + cpa->pfn += PUD_SIZE; + cur_pages += PUD_SIZE >> PAGE_SHIFT; + pud++; + } + + /* Map trailing leftover */ + if (start < end) { + int tmp; + + pud = pud_offset(pgd, start); + if (pud_none(*pud)) + if (alloc_pmd_page(pud)) + return -1; + + tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, + pud, pgprot); + if (tmp < 0) + return cur_pages; + + cur_pages += tmp; + } + return cur_pages; +} + +/* + * Restrictions for kernel page table do not necessarily apply when mapping in + * an alternate PGD. + */ +static int populate_pgd(struct cpa_data *cpa, unsigned long addr) +{ + pgprot_t pgprot = __pgprot(_KERNPG_TABLE); + bool allocd_pgd = false; + pgd_t *pgd_entry; + pud_t *pud = NULL; /* shut up gcc */ + int ret; + + pgd_entry = cpa->pgd + pgd_index(addr); + + /* + * Allocate a PUD page and hand it down for mapping. + */ + if (pgd_none(*pgd_entry)) { + pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); + if (!pud) + return -1; + + set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); + allocd_pgd = true; + } + + pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); + pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); + + ret = populate_pud(cpa, addr, pgd_entry, pgprot); + if (ret < 0) { + unmap_pud_range(pgd_entry, addr, + addr + (cpa->numpages << PAGE_SHIFT)); + + if (allocd_pgd) { + /* + * If I allocated this PUD page, I can just as well + * free it in this error path. + */ + pgd_clear(pgd_entry); + free_page((unsigned long)pud); + } + return ret; + } + cpa->numpages = ret; + return 0; +} + static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, int primary) { + if (cpa->pgd) + return populate_pgd(cpa, vaddr); + /* * Ignore all non primary paths. */ @@ -697,7 +1090,7 @@ else address = *cpa->vaddr; repeat: - kpte = lookup_address(address, &level); + kpte = _lookup_address_cpa(cpa, address, &level); if (!kpte) return __cpa_process_fault(cpa, address, primary); @@ -761,7 +1154,7 @@ /* * We have to split the large page: */ - err = split_large_page(kpte, address); + err = split_large_page(cpa, kpte, address); if (!err) { /* * Do a global flush tlb after splitting the large page @@ -910,6 +1303,8 @@ int ret, cache, checkalias; unsigned long baddr = 0; + memset(&cpa, 0, sizeof(cpa)); + /* * Check, if we are requested to change a not supported * feature: @@ -1356,6 +1751,7 @@ { unsigned long tempaddr = (unsigned long) page_address(page); struct cpa_data cpa = { .vaddr = &tempaddr, + .pgd = NULL, .numpages = numpages, .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), .mask_clr = __pgprot(0), @@ -1374,6 +1770,7 @@ { unsigned long tempaddr = (unsigned long) page_address(page); struct cpa_data cpa = { .vaddr = &tempaddr, + .pgd = NULL, .numpages = numpages, .mask_set = __pgprot(0), .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), @@ -1434,6 +1831,36 @@ #endif /* CONFIG_DEBUG_PAGEALLOC */ +int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, + unsigned numpages, unsigned long page_flags) +{ + int retval = -EINVAL; + + struct cpa_data cpa = { + .vaddr = &address, + .pfn = pfn, + .pgd = pgd, + .numpages = numpages, + .mask_set = __pgprot(0), + .mask_clr = __pgprot(0), + .flags = 0, + }; + + if (!(__supported_pte_mask & _PAGE_NX)) + goto out; + + if (!(page_flags & _PAGE_NX)) + cpa.mask_clr = __pgprot(_PAGE_NX); + + cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); + + retval = __change_page_attr_set_clr(&cpa, 0); + __flush_tlb_all(); + +out: + return retval; +} + /* * The testcases use internal knowledge of the implementation that shouldn't * be exposed to the rest of the kernel. Include these directly here. --- linux-3.13.0.orig/arch/x86/net/bpf_jit.S +++ linux-3.13.0/arch/x86/net/bpf_jit.S @@ -140,7 +140,7 @@ push %r9; \ push SKBDATA; \ /* rsi already has offset */ \ - mov $SIZE,%ecx; /* size */ \ + mov $SIZE,%edx; /* size */ \ call bpf_internal_load_pointer_neg_helper; \ test %rax,%rax; \ pop SKBDATA; \ --- linux-3.13.0.orig/arch/x86/net/bpf_jit_comp.c +++ linux-3.13.0/arch/x86/net/bpf_jit_comp.c @@ -171,7 +171,7 @@ memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ header->pages = sz / PAGE_SIZE; - hole = sz - (proglen + sizeof(*header)); + hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header)); /* insert a random number of int3 instructions before BPF code */ *image_ptr = &header->image[prandom_u32() % hole]; --- linux-3.13.0.orig/arch/x86/pci/common.c +++ linux-3.13.0/arch/x86/pci/common.c @@ -448,6 +448,22 @@ DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"), }, }, + { + .callback = set_scan_all, + .ident = "Stratus/NEC ftServer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"), + }, + }, + { + .callback = set_scan_all, + .ident = "Stratus/NEC ftServer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"), + }, + }, {} }; --- linux-3.13.0.orig/arch/x86/pci/fixup.c +++ linux-3.13.0/arch/x86/pci/fixup.c @@ -350,9 +350,7 @@ pci_read_config_word(pdev, PCI_COMMAND, &config); if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; - dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); - if (!vga_default_device()) - vga_set_default_device(pdev); + dev_printk(KERN_DEBUG, &pdev->dev, "Video device with shadowed ROM\n"); } } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, --- linux-3.13.0.orig/arch/x86/pci/i386.c +++ linux-3.13.0/arch/x86/pci/i386.c @@ -162,6 +162,10 @@ return start; if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; + } else if (res->flags & IORESOURCE_MEM) { + /* The low 1MB range is reserved for ISA cards */ + if (start < BIOS_END) + start = BIOS_END; } return start; } --- linux-3.13.0.orig/arch/x86/platform/efi/efi.c +++ linux-3.13.0/arch/x86/platform/efi/efi.c @@ -12,6 +12,8 @@ * Bibo Mao * Chandramouli Narayanan * Huang Ying + * Copyright (C) 2013 SuSE Labs + * Borislav Petkov - runtime services VA mapping * * Copied from efi_32.c to eliminate the duplicated code between EFI * 32/64 support code. --ying 2007-10-26 @@ -436,7 +438,7 @@ * - Not within any part of the kernel * - Not the bios reserved area */ - if ((start+size >= __pa_symbol(_text) + if ((start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) || !e820_all_mapped(start, start+size, E820_RAM) || memblock_is_region_reserved(start, size)) { @@ -715,7 +717,7 @@ set_memory_nx(addr, npages); } -static void __init runtime_code_page_mkexec(void) +void __init runtime_code_page_mkexec(void) { efi_memory_desc_t *md; void *p; @@ -741,21 +743,56 @@ set_memory_uc(addr, npages); } +void __init old_map_region(efi_memory_desc_t *md) +{ + u64 start_pfn, end_pfn, end; + unsigned long size; + void *va; + + start_pfn = PFN_DOWN(md->phys_addr); + size = md->num_pages << PAGE_SHIFT; + end = md->phys_addr + size; + end_pfn = PFN_UP(end); + + if (pfn_range_is_mapped(start_pfn, end_pfn)) { + va = __va(md->phys_addr); + + if (!(md->attribute & EFI_MEMORY_WB)) + efi_memory_uc((u64)(unsigned long)va, size); + } else + va = efi_ioremap(md->phys_addr, size, + md->type, md->attribute); + + md->virt_addr = (u64) (unsigned long) va; + if (!va) + pr_err("ioremap of 0x%llX failed!\n", + (unsigned long long)md->phys_addr); +} + /* * This function will switch the EFI runtime services to virtual mode. - * Essentially, look through the EFI memmap and map every region that - * has the runtime attribute bit set in its memory descriptor and update - * that memory descriptor with the virtual address obtained from ioremap(). - * This enables the runtime services to be called without having to + * Essentially, we look through the EFI memmap and map every region that + * has the runtime attribute bit set in its memory descriptor into the + * ->trampoline_pgd page table using a top-down VA allocation scheme. + * + * The old method which used to update that memory descriptor with the + * virtual address obtained from ioremap() is still supported when the + * kernel is booted with efi=old_map on its command line. Same old + * method enabled the runtime services to be called without having to * thunk back into physical mode for every invocation. + * + * The new method does a pagetable switch in a preemption-safe manner + * so that we're in a different address space when calling a runtime + * function. For function arguments passing we do copy the PGDs of the + * kernel page table into ->trampoline_pgd prior to each call. */ void __init efi_enter_virtual_mode(void) { efi_memory_desc_t *md, *prev_md = NULL; - efi_status_t status; + void *p, *new_memmap = NULL; unsigned long size; - u64 end, systab, start_pfn, end_pfn; - void *p, *va, *new_memmap = NULL; + efi_status_t status; + u64 end, systab; int count = 0; efi.systab = NULL; @@ -764,7 +801,6 @@ * We don't do virtual mode, since we don't do runtime services, on * non-native EFI */ - if (!efi_is_native()) { efi_unmap_memmap(); return; @@ -795,6 +831,7 @@ continue; } prev_md = md; + } for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { @@ -807,36 +844,24 @@ continue; } + efi_map_region(md); + size = md->num_pages << EFI_PAGE_SHIFT; end = md->phys_addr + size; - start_pfn = PFN_DOWN(md->phys_addr); - end_pfn = PFN_UP(end); - if (pfn_range_is_mapped(start_pfn, end_pfn)) { - va = __va(md->phys_addr); - - if (!(md->attribute & EFI_MEMORY_WB)) - efi_memory_uc((u64)(unsigned long)va, size); - } else - va = efi_ioremap(md->phys_addr, size, - md->type, md->attribute); - - md->virt_addr = (u64) (unsigned long) va; - - if (!va) { - pr_err("ioremap of 0x%llX failed!\n", - (unsigned long long)md->phys_addr); - continue; - } - systab = (u64) (unsigned long) efi_phys.systab; if (md->phys_addr <= systab && systab < end) { systab += md->virt_addr - md->phys_addr; + efi.systab = (efi_system_table_t *) (unsigned long) systab; } + new_memmap = krealloc(new_memmap, (count + 1) * memmap.desc_size, GFP_KERNEL); + if (!new_memmap) + goto err_out; + memcpy(new_memmap + (count * memmap.desc_size), md, memmap.desc_size); count++; @@ -844,6 +869,9 @@ BUG_ON(!efi.systab); + efi_setup_page_tables(); + efi_sync_low_kernel_mappings(); + status = phys_efi_set_virtual_address_map( memmap.desc_size * count, memmap.desc_size, @@ -876,8 +904,8 @@ efi.query_variable_info = virt_efi_query_variable_info; efi.update_capsule = virt_efi_update_capsule; efi.query_capsule_caps = virt_efi_query_capsule_caps; - if (__supported_pte_mask & _PAGE_NX) - runtime_code_page_mkexec(); + + efi_runtime_mkexec(); kfree(new_memmap); @@ -887,6 +915,11 @@ EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL); + + return; + + err_out: + pr_err("Error reallocating memory, EFI runtime non-functional!\n"); } /* @@ -1006,3 +1039,15 @@ return EFI_SUCCESS; } EXPORT_SYMBOL_GPL(efi_query_variable_store); + +static int __init parse_efi_cmdline(char *str) +{ + if (*str == '=') + str++; + + if (!strncmp(str, "old_map", 7)) + set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); + + return 0; +} +early_param("efi", parse_efi_cmdline); --- linux-3.13.0.orig/arch/x86/platform/efi/efi_32.c +++ linux-3.13.0/arch/x86/platform/efi/efi_32.c @@ -37,9 +37,16 @@ * claim EFI runtime service handler exclusively and to duplicate a memory in * low memory space say 0 - 3G. */ - static unsigned long efi_rt_eflags; +void efi_sync_low_kernel_mappings(void) {} +void efi_setup_page_tables(void) {} + +void __init efi_map_region(efi_memory_desc_t *md) +{ + old_map_region(md); +} + void efi_call_phys_prelog(void) { struct desc_ptr gdt_descr; @@ -67,3 +74,9 @@ local_irq_restore(efi_rt_eflags); } + +void __init efi_runtime_mkexec(void) +{ + if (__supported_pte_mask & _PAGE_NX) + runtime_code_page_mkexec(); +} --- linux-3.13.0.orig/arch/x86/platform/efi/efi_64.c +++ linux-3.13.0/arch/x86/platform/efi/efi_64.c @@ -38,10 +38,28 @@ #include #include #include +#include static pgd_t *save_pgd __initdata; static unsigned long efi_flags __initdata; +/* + * We allocate runtime services regions bottom-up, starting from -4G, i.e. + * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. + */ +static u64 efi_va = -4 * (1UL << 30); +#define EFI_VA_END (-68 * (1UL << 30)) + +/* + * Scratch space used for switching the pagetable in the EFI stub + */ +struct efi_scratch { + u64 r15; + u64 prev_cr3; + pgd_t *efi_pgt; + bool use_pgd; +}; + static void __init early_code_mapping_set_exec(int executable) { efi_memory_desc_t *md; @@ -65,6 +83,9 @@ int pgd; int n_pgds; + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + early_code_mapping_set_exec(1); local_irq_save(efi_flags); @@ -86,6 +107,10 @@ */ int pgd; int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); + + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + for (pgd = 0; pgd < n_pgds; pgd++) set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); kfree(save_pgd); @@ -94,6 +119,90 @@ early_code_mapping_set_exec(0); } +/* + * Add low kernel mappings for passing arguments to EFI functions. + */ +void efi_sync_low_kernel_mappings(void) +{ + unsigned num_pgds; + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); + + if (efi_enabled(EFI_OLD_MEMMAP)) + return; + + num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); + + memcpy(pgd + pgd_index(PAGE_OFFSET), + init_mm.pgd + pgd_index(PAGE_OFFSET), + sizeof(pgd_t) * num_pgds); +} + +void efi_setup_page_tables(void) +{ + efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; + + if (!efi_enabled(EFI_OLD_MEMMAP)) + efi_scratch.use_pgd = true; +} + +static void __init __map_region(efi_memory_desc_t *md, u64 va) +{ + pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); + unsigned long pf = 0, size; + u64 end; + + if (!(md->attribute & EFI_MEMORY_WB)) + pf |= _PAGE_PCD; + + size = md->num_pages << PAGE_SHIFT; + end = va + size; + + if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) + pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", + md->phys_addr, va); +} + +void __init efi_map_region(efi_memory_desc_t *md) +{ + unsigned long size = md->num_pages << PAGE_SHIFT; + u64 pa = md->phys_addr; + + if (efi_enabled(EFI_OLD_MEMMAP)) + return old_map_region(md); + + /* + * Make sure the 1:1 mappings are present as a catch-all for b0rked + * firmware which doesn't update all internal pointers after switching + * to virtual mode and would otherwise crap on us. + */ + __map_region(md, md->phys_addr); + + efi_va -= size; + + /* Is PA 2M-aligned? */ + if (!(pa & (PMD_SIZE - 1))) { + efi_va &= PMD_MASK; + } else { + u64 pa_offset = pa & (PMD_SIZE - 1); + u64 prev_va = efi_va; + + /* get us the same offset within this 2M page */ + efi_va = (efi_va & PMD_MASK) + pa_offset; + + if (efi_va > prev_va) + efi_va -= PMD_SIZE; + } + + if (efi_va < EFI_VA_END) { + pr_warn(FW_WARN "VA address range overflow!\n"); + return; + } + + /* Do the VA map */ + __map_region(md, efi_va); + md->virt_addr = efi_va; +} + void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, u32 type, u64 attribute) { @@ -113,3 +222,12 @@ return (void __iomem *)__va(phys_addr); } + +void __init efi_runtime_mkexec(void) +{ + if (!efi_enabled(EFI_OLD_MEMMAP)) + return; + + if (__supported_pte_mask & _PAGE_NX) + runtime_code_page_mkexec(); +} --- linux-3.13.0.orig/arch/x86/platform/efi/efi_stub_64.S +++ linux-3.13.0/arch/x86/platform/efi/efi_stub_64.S @@ -34,10 +34,47 @@ mov %rsi, %cr0; \ mov (%rsp), %rsp + /* stolen from gcc */ + .macro FLUSH_TLB_ALL + movq %r15, efi_scratch(%rip) + movq %r14, efi_scratch+8(%rip) + movq %cr4, %r15 + movq %r15, %r14 + andb $0x7f, %r14b + movq %r14, %cr4 + movq %r15, %cr4 + movq efi_scratch+8(%rip), %r14 + movq efi_scratch(%rip), %r15 + .endm + + .macro SWITCH_PGT + cmpb $0, efi_scratch+24(%rip) + je 1f + movq %r15, efi_scratch(%rip) # r15 + # save previous CR3 + movq %cr3, %r15 + movq %r15, efi_scratch+8(%rip) # prev_cr3 + movq efi_scratch+16(%rip), %r15 # EFI pgt + movq %r15, %cr3 + 1: + .endm + + .macro RESTORE_PGT + cmpb $0, efi_scratch+24(%rip) + je 2f + movq efi_scratch+8(%rip), %r15 + movq %r15, %cr3 + movq efi_scratch(%rip), %r15 + FLUSH_TLB_ALL + 2: + .endm + ENTRY(efi_call0) SAVE_XMM subq $32, %rsp + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -47,7 +84,9 @@ SAVE_XMM subq $32, %rsp mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -57,7 +96,9 @@ SAVE_XMM subq $32, %rsp mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -68,7 +109,9 @@ subq $32, %rsp mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -80,7 +123,9 @@ mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $32, %rsp RESTORE_XMM ret @@ -93,7 +138,9 @@ mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $48, %rsp RESTORE_XMM ret @@ -109,8 +156,15 @@ mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call6) + + .data +ENTRY(efi_scratch) + .fill 3,8,0 + .byte 0 --- linux-3.13.0.orig/arch/x86/syscalls/syscall_32.tbl +++ linux-3.13.0/arch/x86/syscalls/syscall_32.tbl @@ -357,3 +357,7 @@ 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 349 i386 kcmp sys_kcmp 350 i386 finit_module sys_finit_module +351 i386 sched_setattr sys_ni_syscall +352 i386 sched_getattr sys_ni_syscall +353 i386 renameat2 sys_ni_syscall +354 i386 seccomp sys_seccomp --- linux-3.13.0.orig/arch/x86/syscalls/syscall_64.tbl +++ linux-3.13.0/arch/x86/syscalls/syscall_64.tbl @@ -212,10 +212,10 @@ 203 common sched_setaffinity sys_sched_setaffinity 204 common sched_getaffinity sys_sched_getaffinity 205 64 set_thread_area -206 common io_setup sys_io_setup +206 64 io_setup sys_io_setup 207 common io_destroy sys_io_destroy 208 common io_getevents sys_io_getevents -209 common io_submit sys_io_submit +209 64 io_submit sys_io_submit 210 common io_cancel sys_io_cancel 211 64 get_thread_area 212 common lookup_dcookie sys_lookup_dcookie @@ -320,6 +320,10 @@ 311 64 process_vm_writev sys_process_vm_writev 312 common kcmp sys_kcmp 313 common finit_module sys_finit_module +314 common sched_setattr sys_ni_syscall +315 common sched_getattr sys_ni_syscall +316 common renameat2 sys_ni_syscall +317 common seccomp sys_seccomp # # x32-specific system call numbers start at 512 to avoid cache impact @@ -356,3 +360,5 @@ 540 x32 process_vm_writev compat_sys_process_vm_writev 541 x32 setsockopt compat_sys_setsockopt 542 x32 getsockopt compat_sys_getsockopt +543 x32 io_setup compat_sys_io_setup +544 x32 io_submit compat_sys_io_submit --- linux-3.13.0.orig/arch/x86/vdso/vclock_gettime.c +++ linux-3.13.0/arch/x86/vdso/vclock_gettime.c @@ -85,18 +85,15 @@ cycle_t ret; u64 last; u32 version; + u32 migrate_count; u8 flags; unsigned cpu, cpu1; /* - * Note: hypervisor must guarantee that: - * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. - * 2. that per-CPU pvclock time info is updated if the - * underlying CPU changes. - * 3. that version is increased whenever underlying CPU - * changes. - * + * When looping to get a consistent (time-info, tsc) pair, we + * also need to deal with the possibility we can switch vcpus, + * so make sure we always re-fetch time-info for the current vcpu. */ do { cpu = __getcpu() & VGETCPU_CPU_MASK; @@ -105,20 +102,27 @@ * __getcpu() calls (Gleb). */ - pvti = get_pvti(cpu); + /* Make sure migrate_count will change if we leave the VCPU. */ + do { + pvti = get_pvti(cpu); + migrate_count = pvti->migrate_count; + + cpu1 = cpu; + cpu = __getcpu() & VGETCPU_CPU_MASK; + } while (unlikely(cpu != cpu1)); version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); /* * Test we're still on the cpu as well as the version. - * We could have been migrated just after the first - * vgetcpu but before fetching the version, so we - * wouldn't notice a version change. + * - We must read TSC of pvti's VCPU. + * - KVM doesn't follow the versioning protocol, so data could + * change before version if we left the VCPU. */ - cpu1 = __getcpu() & VGETCPU_CPU_MASK; - } while (unlikely(cpu != cpu1 || - (pvti->pvti.version & 1) || - pvti->pvti.version != version)); + smp_rmb(); + } while (unlikely((pvti->pvti.version & 1) || + pvti->pvti.version != version || + pvti->migrate_count != migrate_count)); if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) *mode = VCLOCK_NONE; --- linux-3.13.0.orig/arch/x86/vdso/vdso32/sigreturn.S +++ linux-3.13.0/arch/x86/vdso/vdso32/sigreturn.S @@ -17,6 +17,7 @@ .text .globl __kernel_sigreturn .type __kernel_sigreturn,@function + nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ ALIGN __kernel_sigreturn: .LSTART_sigreturn: --- linux-3.13.0.orig/arch/x86/vdso/vma.c +++ linux-3.13.0/arch/x86/vdso/vma.c @@ -117,30 +117,45 @@ struct linux_binprm; -/* Put the vdso above the (randomized) stack with another randomized offset. - This way there is no hole in the middle of address space. - To save memory make sure it is still in the same PTE as the stack top. - This doesn't give that many random bits */ +/* + * Put the vdso above the (randomized) stack with another randomized + * offset. This way there is no hole in the middle of address space. + * To save memory make sure it is still in the same PTE as the stack + * top. This doesn't give that many random bits. + * + * Note that this algorithm is imperfect: the distribution of the vdso + * start address within a PMD is biased toward the end. + * + * Only used for the 64-bit and x32 vdsos. + */ static unsigned long vdso_addr(unsigned long start, unsigned len) { unsigned long addr, end; unsigned offset; - end = (start + PMD_SIZE - 1) & PMD_MASK; + + /* + * Round up the start address. It can start out unaligned as a result + * of stack start randomization. + */ + start = PAGE_ALIGN(start); + + /* Round the lowest possible end address up to a PMD boundary. */ + end = (start + len + PMD_SIZE - 1) & PMD_MASK; if (end >= TASK_SIZE_MAX) end = TASK_SIZE_MAX; end -= len; - /* This loses some more bits than a modulo, but is cheaper */ - offset = get_random_int() & (PTRS_PER_PTE - 1); - addr = start + (offset << PAGE_SHIFT); - if (addr >= end) - addr = end; + + if (end > start) { + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); + addr = start + (offset << PAGE_SHIFT); + } else { + addr = start; + } /* - * page-align it here so that get_unmapped_area doesn't - * align it wrongfully again to the next page. addr can come in 4K - * unaligned here as a result of stack start randomization. + * Forcibly align the final address in case we have a hardware + * issue that requires alignment for performance reasons. */ - addr = PAGE_ALIGN(addr); addr = align_vdso_addr(addr); return addr; --- linux-3.13.0.orig/arch/x86/xen/grant-table.c +++ linux-3.13.0/arch/x86/xen/grant-table.c @@ -36,92 +36,126 @@ #include #include +#include #include #include #include #include +#include #include -static int map_pte_fn(pte_t *pte, struct page *pmd_page, - unsigned long addr, void *data) -{ - unsigned long **frames = (unsigned long **)data; - - set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); - (*frames)++; - return 0; -} - -/* - * This function is used to map shared frames to store grant status. It is - * different from map_pte_fn above, the frames type here is uint64_t. - */ -static int map_pte_fn_status(pte_t *pte, struct page *pmd_page, - unsigned long addr, void *data) -{ - uint64_t **frames = (uint64_t **)data; - - set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); - (*frames)++; - return 0; -} - -static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, - unsigned long addr, void *data) -{ - - set_pte_at(&init_mm, addr, pte, __pte(0)); - return 0; -} +static struct gnttab_vm_area { + struct vm_struct *area; + pte_t **ptes; +} gnttab_shared_vm_area, gnttab_status_vm_area; int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, void **__shared) { - int rc; void *shared = *__shared; + unsigned long addr; + unsigned long i; + + if (shared == NULL) + *__shared = shared = gnttab_shared_vm_area.area->addr; - if (shared == NULL) { - struct vm_struct *area = - alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); - BUG_ON(area == NULL); - shared = area->addr; - *__shared = shared; + addr = (unsigned long)shared; + + for (i = 0; i < nr_gframes; i++) { + set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], + mfn_pte(frames[i], PAGE_KERNEL)); + addr += PAGE_SIZE; } - rc = apply_to_page_range(&init_mm, (unsigned long)shared, - PAGE_SIZE * nr_gframes, - map_pte_fn, &frames); - return rc; + return 0; } int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, grant_status_t **__shared) { - int rc; grant_status_t *shared = *__shared; + unsigned long addr; + unsigned long i; + + if (shared == NULL) + *__shared = shared = gnttab_status_vm_area.area->addr; + + addr = (unsigned long)shared; - if (shared == NULL) { - /* No need to pass in PTE as we are going to do it - * in apply_to_page_range anyhow. */ - struct vm_struct *area = - alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); - BUG_ON(area == NULL); - shared = area->addr; - *__shared = shared; + for (i = 0; i < nr_gframes; i++) { + set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i], + mfn_pte(frames[i], PAGE_KERNEL)); + addr += PAGE_SIZE; } - rc = apply_to_page_range(&init_mm, (unsigned long)shared, - PAGE_SIZE * nr_gframes, - map_pte_fn_status, &frames); - return rc; + return 0; } void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) { - apply_to_page_range(&init_mm, (unsigned long)shared, - PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); + pte_t **ptes; + unsigned long addr; + unsigned long i; + + if (shared == gnttab_status_vm_area.area->addr) + ptes = gnttab_status_vm_area.ptes; + else + ptes = gnttab_shared_vm_area.ptes; + + addr = (unsigned long)shared; + + for (i = 0; i < nr_gframes; i++) { + set_pte_at(&init_mm, addr, ptes[i], __pte(0)); + addr += PAGE_SIZE; + } +} + +static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) +{ + area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL); + if (area->ptes == NULL) + return -ENOMEM; + + area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes); + if (area->area == NULL) { + kfree(area->ptes); + return -ENOMEM; + } + + return 0; +} + +static void arch_gnttab_vfree(struct gnttab_vm_area *area) +{ + free_vm_area(area->area); + kfree(area->ptes); +} + +int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status) +{ + int ret; + + if (!xen_pv_domain()) + return 0; + + ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared); + if (ret < 0) + return ret; + + /* + * Always allocate the space for the status frames in case + * we're migrated to a host with V2 support. + */ + ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status); + if (ret < 0) + goto err; + + return 0; + err: + arch_gnttab_vfree(&gnttab_shared_vm_area); + return -ENOMEM; } --- linux-3.13.0.orig/arch/x86/xen/platform-pci-unplug.c +++ linux-3.13.0/arch/x86/xen/platform-pci-unplug.c @@ -69,6 +69,80 @@ return 0; } +bool xen_has_pv_devices() +{ + if (!xen_domain()) + return false; + + /* PV domains always have them. */ + if (xen_pv_domain()) + return true; + + /* And user has xen_platform_pci=0 set in guest config as + * driver did not modify the value. */ + if (xen_platform_pci_unplug == 0) + return false; + + if (xen_platform_pci_unplug & XEN_UNPLUG_NEVER) + return false; + + if (xen_platform_pci_unplug & XEN_UNPLUG_ALL) + return true; + + /* This is an odd one - we are going to run legacy + * and PV drivers at the same time. */ + if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) + return true; + + /* And the caller has to follow with xen_pv_{disk,nic}_devices + * to be certain which driver can load. */ + return false; +} +EXPORT_SYMBOL_GPL(xen_has_pv_devices); + +static bool __xen_has_pv_device(int state) +{ + /* HVM domains might or might not */ + if (xen_hvm_domain() && (xen_platform_pci_unplug & state)) + return true; + + return xen_has_pv_devices(); +} + +bool xen_has_pv_nic_devices(void) +{ + return __xen_has_pv_device(XEN_UNPLUG_ALL_NICS | XEN_UNPLUG_ALL); +} +EXPORT_SYMBOL_GPL(xen_has_pv_nic_devices); + +bool xen_has_pv_disk_devices(void) +{ + return __xen_has_pv_device(XEN_UNPLUG_ALL_IDE_DISKS | + XEN_UNPLUG_AUX_IDE_DISKS | XEN_UNPLUG_ALL); +} +EXPORT_SYMBOL_GPL(xen_has_pv_disk_devices); + +/* + * This one is odd - it determines whether you want to run PV _and_ + * legacy (IDE) drivers together. This combination is only possible + * under HVM. + */ +bool xen_has_pv_and_legacy_disk_devices(void) +{ + if (!xen_domain()) + return false; + + /* N.B. This is only ever used in HVM mode */ + if (xen_pv_domain()) + return false; + + if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(xen_has_pv_and_legacy_disk_devices); + void xen_unplug_emulated_devices(void) { int r; --- linux-3.13.0.orig/arch/x86/xen/setup.c +++ linux-3.13.0/arch/x86/xen/setup.c @@ -556,13 +556,7 @@ } #endif /* CONFIG_X86_64 */ } -void xen_enable_nmi(void) -{ -#ifdef CONFIG_X86_64 - if (register_callback(CALLBACKTYPE_nmi, nmi)) - BUG(); -#endif -} + void __init xen_arch_setup(void) { xen_panic_handler_init(); @@ -580,7 +574,6 @@ xen_enable_sysenter(); xen_enable_syscall(); - xen_enable_nmi(); #ifdef CONFIG_ACPI if (!(xen_start_info->flags & SIF_INITDOMAIN)) { printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); --- linux-3.13.0.orig/arch/x86/xen/spinlock.c +++ linux-3.13.0/arch/x86/xen/spinlock.c @@ -274,7 +274,7 @@ printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); return; } - + printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); pv_lock_ops.unlock_kick = xen_unlock_kick; } @@ -290,6 +290,9 @@ if (!xen_pvspin) return 0; + if (!xen_domain()) + return 0; + static_key_slow_inc(¶virt_ticketlocks_enabled); return 0; } --- linux-3.13.0.orig/arch/x86/xen/time.c +++ linux-3.13.0/arch/x86/xen/time.c @@ -444,7 +444,7 @@ irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| - IRQF_FORCE_RESUME, + IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, name, NULL); memcpy(evt, xen_clockevent, sizeof(*evt)); --- linux-3.13.0.orig/arch/xtensa/include/asm/pgtable.h +++ linux-3.13.0/arch/xtensa/include/asm/pgtable.h @@ -67,7 +67,12 @@ #define VMALLOC_START 0xC0000000 #define VMALLOC_END 0xC7FEFFFF #define TLBTEMP_BASE_1 0xC7FF0000 -#define TLBTEMP_BASE_2 0xC7FF8000 +#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) +#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE +#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) +#else +#define TLBTEMP_SIZE ICACHE_WAY_SIZE +#endif /* * For the Xtensa architecture, the PTE layout is as follows: --- linux-3.13.0.orig/arch/xtensa/include/asm/traps.h +++ linux-3.13.0/arch/xtensa/include/asm/traps.h @@ -22,25 +22,37 @@ static inline void spill_registers(void) { - +#if XCHAL_NUM_AREGS > 16 __asm__ __volatile__ ( - "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t" - "mov a12, a0\n\t" - "rsr a13, sar\n\t" - "xsr a14, ps\n\t" - "movi a0, _spill_registers\n\t" - "rsync\n\t" - "callx0 a0\n\t" - "mov a0, a12\n\t" - "wsr a13, sar\n\t" - "wsr a14, ps\n\t" - : : -#if defined(CONFIG_FRAME_POINTER) - : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15", + " call12 1f\n" + " _j 2f\n" + " retw\n" + " .align 4\n" + "1:\n" + " _entry a1, 48\n" + " addi a12, a0, 3\n" +#if XCHAL_NUM_AREGS > 32 + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" + " _entry a1, 48\n" + " mov a12, a0\n" + " .endr\n" +#endif + " _entry a1, 48\n" +#if XCHAL_NUM_AREGS % 12 == 0 + " mov a8, a8\n" +#elif XCHAL_NUM_AREGS % 12 == 4 + " mov a12, a12\n" +#elif XCHAL_NUM_AREGS % 12 == 8 + " mov a4, a4\n" +#endif + " retw\n" + "2:\n" + : : : "a12", "a13", "memory"); #else - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", + __asm__ __volatile__ ( + " mov a12, a12\n" + : : : "memory"); #endif - "memory"); } #endif /* _XTENSA_TRAPS_H */ --- linux-3.13.0.orig/arch/xtensa/include/asm/uaccess.h +++ linux-3.13.0/arch/xtensa/include/asm/uaccess.h @@ -52,7 +52,12 @@ */ .macro get_fs ad, sp GET_CURRENT(\ad,\sp) +#if THREAD_CURRENT_DS > 1020 + addi \ad, \ad, TASK_THREAD + l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD +#else l32i \ad, \ad, THREAD_CURRENT_DS +#endif .endm /* --- linux-3.13.0.orig/arch/xtensa/include/uapi/asm/ioctls.h +++ linux-3.13.0/arch/xtensa/include/uapi/asm/ioctls.h @@ -28,17 +28,17 @@ #define TCSETSW 0x5403 #define TCSETSF 0x5404 -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) +#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */ +#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */ +#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */ +#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */ #define TCSBRK _IO('t', 29) #define TCXONC _IO('t', 30) #define TCFLSH _IO('t', 31) -#define TIOCSWINSZ _IOW('t', 103, struct winsize) -#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */ +#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */ #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ @@ -88,7 +88,6 @@ #define TIOCSETD _IOW('T', 35, int) #define TIOCGETD _IOR('T', 36, int) #define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/ -#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/ #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ @@ -114,8 +113,10 @@ #define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ -#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */ -#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ +#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */ + /* _IOR('T', 90, struct serial_multiport_struct) */ +#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */ + /* _IOW('T', 91, struct serial_multiport_struct) */ #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ --- linux-3.13.0.orig/arch/xtensa/include/uapi/asm/unistd.h +++ linux-3.13.0/arch/xtensa/include/uapi/asm/unistd.h @@ -384,7 +384,8 @@ #define __NR_pivot_root 175 __SYSCALL(175, sys_pivot_root, 2) #define __NR_umount 176 -__SYSCALL(176, sys_umount, 2) +__SYSCALL(176, sys_oldumount, 1) +#define __ARCH_WANT_SYS_OLDUMOUNT #define __NR_swapoff 177 __SYSCALL(177, sys_swapoff, 1) #define __NR_sync 178 --- linux-3.13.0.orig/arch/xtensa/kernel/entry.S +++ linux-3.13.0/arch/xtensa/kernel/entry.S @@ -1001,9 +1001,8 @@ movi a7, 4 # sizeof(unsigned int) access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp - addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 - _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill - _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp + _bgeui a6, SYS_XTENSA_COUNT, .Lill + _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp /* Fall through for ATOMIC_CMP_SWP. */ @@ -1015,27 +1014,26 @@ l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 1 # and return 1 - addi a6, a6, 1 # restore a6 (really necessary?) rfe 1: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 0 # return 0 (note that we cannot set - addi a6, a6, 1 # restore a6 (really necessary?) rfe .Lnswp: /* Atomic set, add, and exg_add. */ TRY l32i a7, a3, 0 # orig + addi a6, a6, -SYS_XTENSA_ATOMIC_SET add a0, a4, a7 # + arg moveqz a0, a4, a6 # set + addi a6, a6, SYS_XTENSA_ATOMIC_SET TRY s32i a0, a3, 0 # write new value mov a0, a2 mov a2, a7 l32i a7, a0, PT_AREG7 # restore a7 l32i a0, a0, PT_AREG0 # restore a0 - addi a6, a6, 1 # restore a6 (really necessary?) rfe CATCH @@ -1044,7 +1042,7 @@ movi a2, -EFAULT rfe -.Lill: l32i a7, a2, PT_AREG0 # restore a7 +.Lill: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EINVAL rfe @@ -1117,6 +1115,13 @@ * We basically restore WINDOWBASE and WINDOWSTART to the condition when * we entered the spill routine and jump to the user exception handler. * + * Note that we only need to restore the bits in windowstart that have not + * been spilled yet by the _spill_register routine. Luckily, a3 contains a + * rotated windowstart with only those bits set for frames that haven't been + * spilled yet. Because a3 is rotated such that bit 0 represents the register + * frame for the current windowbase - 1, we need to rotate a3 left by the + * value of the current windowbase + 1 and move it to windowstart. + * * a0: value of depc, original value in depc * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE * a3: exctable, original value in excsave1 @@ -1131,10 +1136,15 @@ /* We need to make sure the current registers (a0-a3) are preserved. * To do this, we simply set the bit for the current window frame * in WS, so that the exception handlers save them to the task stack. + * + * Note: we use a3 to set the windowbase, so we take a special care + * of it, saving it in the original _spill_registers frame across + * the exception handler call. */ xsr a3, excsave1 # get spill-mask slli a3, a3, 1 # shift left by one + addi a3, a3, 1 # set the bit for the current window frame slli a2, a3, 32-WSBITS src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... @@ -1588,7 +1598,7 @@ rsr a0, excvaddr bltu a0, a3, 2f - addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) + addi a1, a0, -TLBTEMP_SIZE bgeu a1, a3, 2f /* Check if we have to restore an ITLB mapping. */ @@ -1794,6 +1804,43 @@ ENDPROC(system_call) +/* + * Spill live registers on the kernel stack macro. + * + * Entry condition: ps.woe is set, ps.excm is cleared + * Exit condition: windowstart has single bit set + * May clobber: a12, a13 + */ + .macro spill_registers_kernel + +#if XCHAL_NUM_AREGS > 16 + call12 1f + _j 2f + retw + .align 4 +1: + _entry a1, 48 + addi a12, a0, 3 +#if XCHAL_NUM_AREGS > 32 + .rept (XCHAL_NUM_AREGS - 32) / 12 + _entry a1, 48 + mov a12, a0 + .endr +#endif + _entry a1, 48 +#if XCHAL_NUM_AREGS % 12 == 0 + mov a8, a8 +#elif XCHAL_NUM_AREGS % 12 == 4 + mov a12, a12 +#elif XCHAL_NUM_AREGS % 12 == 8 + mov a4, a4 +#endif + retw +2: +#else + mov a12, a12 +#endif + .endm /* * Task switch. @@ -1806,21 +1853,25 @@ entry a1, 16 - mov a12, a2 # preserve 'prev' (a2) - mov a13, a3 # and 'next' (a3) + mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO l32i a5, a3, TASK_THREAD_INFO - save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER + save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER - s32i a0, a12, THREAD_RA # save return address - s32i a1, a12, THREAD_SP # save stack pointer +#if THREAD_RA > 1020 || THREAD_SP > 1020 + addi a10, a2, TASK_THREAD + s32i a0, a10, THREAD_RA - TASK_THREAD # save return address + s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer +#else + s32i a0, a2, THREAD_RA # save return address + s32i a1, a2, THREAD_SP # save stack pointer +#endif /* Disable ints while we manipulate the stack pointer. */ - movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL - xsr a14, ps + rsil a14, LOCKLEVEL rsr a3, excsave1 rsync s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ @@ -1835,7 +1886,7 @@ /* Flush register file. */ - call0 _spill_registers # destroys a3, a4, and SAR + spill_registers_kernel /* Set kernel stack (and leave critical section) * Note: It's save to set it here. The stack will not be overwritten @@ -1851,13 +1902,12 @@ /* restore context of the task 'next' */ - l32i a0, a13, THREAD_RA # restore return address - l32i a1, a13, THREAD_SP # restore stack pointer + l32i a0, a11, THREAD_RA # restore return address + l32i a1, a11, THREAD_SP # restore stack pointer - load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER + load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER wsr a14, ps - mov a2, a12 # return 'prev' rsync retw --- linux-3.13.0.orig/arch/xtensa/kernel/pci-dma.c +++ linux-3.13.0/arch/xtensa/kernel/pci-dma.c @@ -49,9 +49,8 @@ /* We currently don't support coherent memory outside KSEG */ - if (ret < XCHAL_KSEG_CACHED_VADDR - || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) - BUG(); + BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || + ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); if (ret != 0) { @@ -68,10 +67,11 @@ void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { - long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; + unsigned long addr = (unsigned long)vaddr + + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; - if (addr < 0 || addr >= XCHAL_KSEG_SIZE) - BUG(); + BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || + addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); free_pages(addr, get_order(size)); } --- linux-3.13.0.orig/arch/xtensa/kernel/vectors.S +++ linux-3.13.0/arch/xtensa/kernel/vectors.S @@ -376,38 +376,42 @@ beqz a2, 1f # if at start of vector, don't restore addi a0, a0, -128 - bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 - bbsi a0, 7, 2f + bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12 + + /* + * This fixup handler is for the extremely unlikely case where the + * overflow handler's reference thru a0 gets a hardware TLB refill + * that bumps out the (distinct, aliasing) TLB entry that mapped its + * prior references thru a9/a13, and where our reference now thru + * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill). + */ + movi a2, window_overflow_restore_a0_fixup + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + + bbsi.l a0, 7, 2f /* * Restore a0 as saved by _WindowOverflow8(). - * - * FIXME: we really need a fixup handler for this L32E, - * for the extremely unlikely case where the overflow handler's - * reference thru a0 gets a hardware TLB refill that bumps out - * the (distinct, aliasing) TLB entry that mapped its prior - * references thru a9, and where our reference now thru a9 - * gets a 2nd-level miss exception (not hardware TLB refill). */ - l32e a2, a9, -16 - wsr a2, depc # replace the saved a0 - j 1f + l32e a0, a9, -16 + wsr a0, depc # replace the saved a0 + j 3f 2: /* * Restore a0 as saved by _WindowOverflow12(). - * - * FIXME: we really need a fixup handler for this L32E, - * for the extremely unlikely case where the overflow handler's - * reference thru a0 gets a hardware TLB refill that bumps out - * the (distinct, aliasing) TLB entry that mapped its prior - * references thru a13, and where our reference now thru a13 - * gets a 2nd-level miss exception (not hardware TLB refill). */ - l32e a2, a13, -16 - wsr a2, depc # replace the saved a0 + l32e a0, a13, -16 + wsr a0, depc # replace the saved a0 +3: + xsr a3, excsave1 + movi a0, 0 + s32i a0, a3, EXC_TABLE_FIXUP + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE 1: /* * Restore WindowBase while leaving all address registers restored. @@ -449,6 +453,7 @@ s32i a0, a2, PT_DEPC +_DoubleExceptionVector_handle_exception: addx4 a0, a0, a3 l32i a0, a0, EXC_TABLE_FAST_USER xsr a3, excsave1 @@ -464,11 +469,120 @@ rotw -3 j 1b - .end literal_prefix ENDPROC(_DoubleExceptionVector) /* + * Fixup handler for TLB miss in double exception handler for window owerflow. + * We get here with windowbase set to the window that was being spilled and + * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12 + * (bit set) window. + * + * We do the following here: + * - go to the original window retaining a0 value; + * - set up exception stack to return back to appropriate a0 restore code + * (we'll need to rotate window back and there's no place to save this + * information, use different return address for that); + * - handle the exception; + * - go to the window that was being spilled; + * - set up window_overflow_restore_a0_fixup as a fixup routine; + * - reload a0; + * - restore the original window; + * - reset the default fixup routine; + * - return to user. By the time we get to this fixup handler all information + * about the conditions of the original double exception that happened in + * the window overflow handler is lost, so we just return to userspace to + * retry overflow from start. + * + * a0: value of depc, original value in depc + * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE + * a3: exctable, original value in excsave1 + */ + +ENTRY(window_overflow_restore_a0_fixup) + + rsr a0, ps + extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH + rsr a2, windowbase + sub a0, a2, a0 + extui a0, a0, 0, 3 + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + + _beqi a0, 1, .Lhandle_1 + _beqi a0, 3, .Lhandle_3 + + .macro overflow_fixup_handle_exception_pane n + + rsr a0, depc + rotw -\n + + xsr a3, excsave1 + wsr a2, depc + l32i a2, a3, EXC_TABLE_KSTK + s32i a0, a2, PT_AREG0 + + movi a0, .Lrestore_\n + s32i a0, a2, PT_DEPC + rsr a0, exccause + j _DoubleExceptionVector_handle_exception + + .endm + + overflow_fixup_handle_exception_pane 2 +.Lhandle_1: + overflow_fixup_handle_exception_pane 1 +.Lhandle_3: + overflow_fixup_handle_exception_pane 3 + + .macro overflow_fixup_restore_a0_pane n + + rotw \n + /* Need to preserve a0 value here to be able to handle exception + * that may occur on a0 reload from stack. It may occur because + * TLB miss handler may not be atomic and pointer to page table + * may be lost before we get here. There are no free registers, + * so we need to use EXC_TABLE_DOUBLE_SAVE area. + */ + xsr a3, excsave1 + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE + movi a2, window_overflow_restore_a0_fixup + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + bbsi.l a0, 7, 1f + l32e a0, a9, -16 + j 2f +1: + l32e a0, a13, -16 +2: + rotw -\n + + .endm + +.Lrestore_2: + overflow_fixup_restore_a0_pane 2 + +.Lset_default_fixup: + xsr a3, excsave1 + s32i a2, a3, EXC_TABLE_DOUBLE_SAVE + movi a2, 0 + s32i a2, a3, EXC_TABLE_FIXUP + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + xsr a3, excsave1 + rfe + +.Lrestore_1: + overflow_fixup_restore_a0_pane 1 + j .Lset_default_fixup +.Lrestore_3: + overflow_fixup_restore_a0_pane 3 + j .Lset_default_fixup + +ENDPROC(window_overflow_restore_a0_fixup) + + .end literal_prefix +/* * Debug interrupt vector * * There is not much space here, so simply jump to another handler. --- linux-3.13.0.orig/arch/xtensa/kernel/vmlinux.lds.S +++ linux-3.13.0/arch/xtensa/kernel/vmlinux.lds.S @@ -262,13 +262,13 @@ .UserExceptionVector.literal) SECTION_VECTOR (_DoubleExceptionVector_literal, .DoubleExceptionVector.literal, - DOUBLEEXC_VECTOR_VADDR - 16, + DOUBLEEXC_VECTOR_VADDR - 40, SIZEOF(.UserExceptionVector.text), .UserExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, - 32, + 40, .DoubleExceptionVector.literal) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; --- linux-3.13.0.orig/arch/xtensa/mm/fault.c +++ linux-3.13.0/arch/xtensa/mm/fault.c @@ -117,6 +117,8 @@ if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); --- linux-3.13.0.orig/arch/xtensa/platforms/xtfpga/setup.c +++ linux-3.13.0/arch/xtensa/platforms/xtfpga/setup.c @@ -194,7 +194,7 @@ * Ethernet -- OpenCores Ethernet MAC (ethoc driver) */ -static struct resource ethoc_res[] __initdata = { +static struct resource ethoc_res[] = { [0] = { /* register space */ .start = OETH_REGS_PADDR, .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1, @@ -212,7 +212,7 @@ }, }; -static struct ethoc_platform_data ethoc_pdata __initdata = { +static struct ethoc_platform_data ethoc_pdata = { /* * The MAC address for these boards is 00:50:c2:13:6f:xx. * The last byte (here as zero) is read from the DIP switches on the @@ -222,7 +222,7 @@ .phy_id = -1, }; -static struct platform_device ethoc_device __initdata = { +static struct platform_device ethoc_device = { .name = "ethoc", .id = -1, .num_resources = ARRAY_SIZE(ethoc_res), @@ -236,13 +236,13 @@ * UART */ -static struct resource serial_resource __initdata = { +static struct resource serial_resource = { .start = DUART16552_PADDR, .end = DUART16552_PADDR + 0x1f, .flags = IORESOURCE_MEM, }; -static struct plat_serial8250_port serial_platform_data[] __initdata = { +static struct plat_serial8250_port serial_platform_data[] = { [0] = { .mapbase = DUART16552_PADDR, .irq = DUART16552_INTNUM, @@ -255,7 +255,7 @@ { }, }; -static struct platform_device xtavnet_uart __initdata = { +static struct platform_device xtavnet_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { --- linux-3.13.0.orig/block/blk-cgroup.c +++ linux-3.13.0/block/blk-cgroup.c @@ -80,7 +80,7 @@ blkg->q = q; INIT_LIST_HEAD(&blkg->q_node); blkg->blkcg = blkcg; - blkg->refcnt = 1; + atomic_set(&blkg->refcnt, 1); /* root blkg uses @q->root_rl, init rl only for !root blkgs */ if (blkcg != &blkcg_root) { @@ -399,11 +399,8 @@ /* release the blkcg and parent blkg refs this blkg has been holding */ css_put(&blkg->blkcg->css); - if (blkg->parent) { - spin_lock_irq(blkg->q->queue_lock); + if (blkg->parent) blkg_put(blkg->parent); - spin_unlock_irq(blkg->q->queue_lock); - } blkg_free(blkg); } @@ -862,6 +859,13 @@ { lockdep_assert_held(q->queue_lock); + /* + * @q could be exiting and already have destroyed all blkgs as + * indicated by NULL root_blkg. If so, don't confuse policies. + */ + if (!q->root_blkg) + return; + blk_throtl_drain(q); } --- linux-3.13.0.orig/block/blk-cgroup.h +++ linux-3.13.0/block/blk-cgroup.h @@ -18,6 +18,7 @@ #include #include #include +#include /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX @@ -104,7 +105,7 @@ struct request_list rl; /* reference count */ - int refcnt; + atomic_t refcnt; /* is this blkg online? protected by both blkcg and q locks */ bool online; @@ -253,13 +254,12 @@ * blkg_get - get a blkg reference * @blkg: blkg to get * - * The caller should be holding queue_lock and an existing reference. + * The caller should be holding an existing reference. */ static inline void blkg_get(struct blkcg_gq *blkg) { - lockdep_assert_held(blkg->q->queue_lock); - WARN_ON_ONCE(!blkg->refcnt); - blkg->refcnt++; + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + atomic_inc(&blkg->refcnt); } void __blkg_release_rcu(struct rcu_head *rcu); @@ -267,14 +267,11 @@ /** * blkg_put - put a blkg reference * @blkg: blkg to put - * - * The caller should be holding queue_lock. */ static inline void blkg_put(struct blkcg_gq *blkg) { - lockdep_assert_held(blkg->q->queue_lock); - WARN_ON_ONCE(blkg->refcnt <= 0); - if (!--blkg->refcnt) + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + if (atomic_dec_and_test(&blkg->refcnt)) call_rcu(&blkg->rcu_head, __blkg_release_rcu); } @@ -435,9 +432,9 @@ uint64_t v; do { - start = u64_stats_fetch_begin_bh(&stat->syncp); + start = u64_stats_fetch_begin_irq(&stat->syncp); v = stat->cnt; - } while (u64_stats_fetch_retry_bh(&stat->syncp, start)); + } while (u64_stats_fetch_retry_irq(&stat->syncp, start)); return v; } @@ -508,9 +505,9 @@ struct blkg_rwstat tmp; do { - start = u64_stats_fetch_begin_bh(&rwstat->syncp); + start = u64_stats_fetch_begin_irq(&rwstat->syncp); tmp = *rwstat; - } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start)); return tmp; } --- linux-3.13.0.orig/block/blk-core.c +++ linux-3.13.0/block/blk-core.c @@ -506,6 +506,9 @@ del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); blk_sync_queue(q); + if (q->mq_ops) + blk_mq_free_queue(q); + spin_lock_irq(lock); if (q->queue_lock != &q->__queue_lock) q->queue_lock = &q->__queue_lock; @@ -2325,7 +2328,7 @@ if (!req->bio) return false; - trace_block_rq_complete(req->q, req); + trace_block_rq_complete(req->q, req, nr_bytes); /* * For fs requests, rq is just carrier of independent bio's --- linux-3.13.0.orig/block/blk-iopoll.c +++ linux-3.13.0/block/blk-iopoll.c @@ -14,9 +14,6 @@ #include "blk.h" -int blk_iopoll_enabled = 1; -EXPORT_SYMBOL(blk_iopoll_enabled); - static unsigned int blk_iopoll_budget __read_mostly = 256; static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); --- linux-3.13.0.orig/block/blk-lib.c +++ linux-3.13.0/block/blk-lib.c @@ -119,6 +119,14 @@ atomic_inc(&bb.done); submit_bio(type, bio); + + /* + * We can loop for a long time in here, if someone does + * full device discards (like mkfs). Be nice and allow + * us to schedule out to avoid softlocking if preempt + * is disabled. + */ + cond_resched(); } blk_finish_plug(&plug); --- linux-3.13.0.orig/block/blk-mq-cpumap.c +++ linux-3.13.0/block/blk-mq-cpumap.c @@ -95,7 +95,7 @@ unsigned int *map; /* If cpus are offline, map them to first hctx */ - map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL, + map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, reg->numa_node); if (!map) return NULL; --- linux-3.13.0.orig/block/blk-mq-tag.c +++ linux-3.13.0/block/blk-mq-tag.c @@ -36,7 +36,8 @@ { int tag; - tag = percpu_ida_alloc(&tags->free_tags, gfp); + tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ? + TASK_UNINTERRUPTIBLE : TASK_RUNNING); if (tag < 0) return BLK_MQ_TAG_FAIL; return tag + tags->nr_reserved_tags; @@ -52,7 +53,8 @@ return BLK_MQ_TAG_FAIL; } - tag = percpu_ida_alloc(&tags->reserved_tags, gfp); + tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ? + TASK_UNINTERRUPTIBLE : TASK_RUNNING); if (tag < 0) return BLK_MQ_TAG_FAIL; return tag; --- linux-3.13.0.orig/block/blk-mq.c +++ linux-3.13.0/block/blk-mq.c @@ -179,6 +179,8 @@ rq->mq_ctx = ctx; rq->cmd_flags = rw_flags; + rq->start_time = jiffies; + set_start_time_ns(rq); ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } @@ -299,7 +301,7 @@ struct bio *bio = rq->bio; unsigned int bytes = 0; - trace_block_rq_complete(rq->q, rq); + trace_block_rq_complete(rq->q, rq, blk_rq_bytes(rq)); while (bio) { struct bio *next = bio->bi_next; --- linux-3.13.0.orig/block/blk-settings.c +++ linux-3.13.0/block/blk-settings.c @@ -553,7 +553,7 @@ bottom = max(b->physical_block_size, b->io_min) + alignment; /* Verify that top and bottom intervals line up */ - if (max(top, bottom) & (min(top, bottom) - 1)) { + if (max(top, bottom) % min(top, bottom)) { t->misaligned = 1; ret = -1; } @@ -594,7 +594,7 @@ /* Find lowest common alignment_offset */ t->alignment_offset = lcm(t->alignment_offset, alignment) - & (max(t->physical_block_size, t->io_min) - 1); + % max(t->physical_block_size, t->io_min); /* Verify that new alignment_offset is on a logical block boundary */ if (t->alignment_offset & (t->logical_block_size - 1)) { --- linux-3.13.0.orig/block/blk-sysfs.c +++ linux-3.13.0/block/blk-sysfs.c @@ -518,17 +518,15 @@ * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * - * Caveat: - * Hopefully the low level driver will have finished any - * outstanding requests first... + * Note: + * The low level driver must have finished any outstanding requests first + * via blk_cleanup_queue(). **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); - blk_sync_queue(q); - blkcg_exit_queue(q); if (q->elevator) { @@ -545,9 +543,6 @@ percpu_counter_destroy(&q->mq_usage_counter); - if (q->mq_ops) - blk_mq_free_queue(q); - blk_trace_shutdown(q); bdi_destroy(&q->backing_dev_info); --- linux-3.13.0.orig/block/blk-tag.c +++ linux-3.13.0/block/blk-tag.c @@ -27,18 +27,15 @@ EXPORT_SYMBOL(blk_queue_find_tag); /** - * __blk_free_tags - release a given set of tag maintenance info + * blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * - * Tries to free the specified @bqt. Returns true if it was - * actually freed and false if there are still references using it + * Drop the reference count on @bqt and frees it when the last reference + * is dropped. */ -static int __blk_free_tags(struct blk_queue_tag *bqt) +void blk_free_tags(struct blk_queue_tag *bqt) { - int retval; - - retval = atomic_dec_and_test(&bqt->refcnt); - if (retval) { + if (atomic_dec_and_test(&bqt->refcnt)) { BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < bqt->max_depth); @@ -50,9 +47,8 @@ kfree(bqt); } - - return retval; } +EXPORT_SYMBOL(blk_free_tags); /** * __blk_queue_free_tags - release tag maintenance info @@ -69,28 +65,13 @@ if (!bqt) return; - __blk_free_tags(bqt); + blk_free_tags(bqt); q->queue_tags = NULL; queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** - * blk_free_tags - release a given set of tag maintenance info - * @bqt: the tag map to free - * - * For externally managed @bqt frees the map. Callers of this - * function must guarantee to have released all the queues that - * might have been using this tag map. - */ -void blk_free_tags(struct blk_queue_tag *bqt) -{ - if (unlikely(!__blk_free_tags(bqt))) - BUG(); -} -EXPORT_SYMBOL(blk_free_tags); - -/** * blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device * --- linux-3.13.0.orig/block/blk-throttle.c +++ linux-3.13.0/block/blk-throttle.c @@ -1292,6 +1292,9 @@ struct blkg_rwstat rwstat = { }, tmp; int i, cpu; + if (tg->stats_cpu == NULL) + return 0; + for_each_possible_cpu(cpu) { struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); --- linux-3.13.0.orig/block/blk.h +++ linux-3.13.0/block/blk.h @@ -113,7 +113,7 @@ q->flush_queue_delayed = 1; return NULL; } - if (unlikely(blk_queue_dying(q)) || + if (unlikely(blk_queue_bypass(q)) || !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) return NULL; } --- linux-3.13.0.orig/block/cfq-iosched.c +++ linux-3.13.0/block/cfq-iosched.c @@ -1275,12 +1275,16 @@ static void cfq_update_group_weight(struct cfq_group *cfqg) { - BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); - if (cfqg->new_weight) { cfqg->weight = cfqg->new_weight; cfqg->new_weight = 0; } +} + +static void +cfq_update_group_leaf_weight(struct cfq_group *cfqg) +{ + BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); if (cfqg->new_leaf_weight) { cfqg->leaf_weight = cfqg->new_leaf_weight; @@ -1299,7 +1303,7 @@ /* add to the service tree */ BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); - cfq_update_group_weight(cfqg); + cfq_update_group_leaf_weight(cfqg); __cfq_group_service_tree_add(st, cfqg); /* @@ -1323,6 +1327,7 @@ */ while ((parent = cfqg_parent(pos))) { if (propagate) { + cfq_update_group_weight(pos); propagate = !parent->nr_active++; parent->children_weight += pos->weight; } @@ -3595,6 +3600,11 @@ blkcg = bio_blkcg(bio); cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); + if (!cfqg) { + cfqq = &cfqd->oom_cfqq; + goto out; + } + cfqq = cic_to_cfqq(cic, is_sync); /* @@ -3631,7 +3641,7 @@ } else cfqq = &cfqd->oom_cfqq; } - +out: if (new_cfqq) kmem_cache_free(cfq_pool, new_cfqq); @@ -3661,12 +3671,17 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, struct bio *bio, gfp_t gfp_mask) { - const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); - const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); + int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); + int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); struct cfq_queue **async_cfqq = NULL; struct cfq_queue *cfqq = NULL; if (!is_sync) { + if (!ioprio_valid(cic->ioprio)) { + struct task_struct *tsk = current; + ioprio = task_nice_ioprio(tsk); + ioprio_class = task_nice_ioclass(tsk); + } async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); cfqq = *async_cfqq; } --- linux-3.13.0.orig/block/compat_ioctl.c +++ linux-3.13.0/block/compat_ioctl.c @@ -690,6 +690,7 @@ case BLKROSET: case BLKDISCARD: case BLKSECDISCARD: + case BLKZEROOUT: /* * the ones below are implemented in blkdev_locked_ioctl, * but we call blkdev_ioctl, which gets the lock for us --- linux-3.13.0.orig/block/genhd.c +++ linux-3.13.0/block/genhd.c @@ -28,10 +28,10 @@ /* for extended dynamic devt allocation, currently only one major is used */ #define NR_EXT_DEVT (1 << MINORBITS) -/* For extended devt allocation. ext_devt_mutex prevents look up +/* For extended devt allocation. ext_devt_lock prevents look up * results from going away underneath its user. */ -static DEFINE_MUTEX(ext_devt_mutex); +static DEFINE_SPINLOCK(ext_devt_lock); static DEFINE_IDR(ext_devt_idr); static struct device_type disk_type; @@ -420,9 +420,13 @@ } /* allocate ext devt */ - mutex_lock(&ext_devt_mutex); - idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL); - mutex_unlock(&ext_devt_mutex); + idr_preload(GFP_KERNEL); + + spin_lock(&ext_devt_lock); + idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); + spin_unlock(&ext_devt_lock); + + idr_preload_end(); if (idx < 0) return idx == -ENOSPC ? -EBUSY : idx; @@ -441,15 +445,13 @@ */ void blk_free_devt(dev_t devt) { - might_sleep(); - if (devt == MKDEV(0, 0)) return; if (MAJOR(devt) == BLOCK_EXT_MAJOR) { - mutex_lock(&ext_devt_mutex); + spin_lock(&ext_devt_lock); idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); - mutex_unlock(&ext_devt_mutex); + spin_unlock(&ext_devt_lock); } } @@ -665,7 +667,6 @@ sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); device_del(disk_to_dev(disk)); - blk_free_devt(disk_to_dev(disk)->devt); } EXPORT_SYMBOL(del_gendisk); @@ -690,13 +691,13 @@ } else { struct hd_struct *part; - mutex_lock(&ext_devt_mutex); + spin_lock(&ext_devt_lock); part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); if (part && get_disk(part_to_disk(part))) { *partno = part->partno; disk = part_to_disk(part); } - mutex_unlock(&ext_devt_mutex); + spin_unlock(&ext_devt_lock); } return disk; @@ -1069,9 +1070,16 @@ struct disk_part_tbl *old_ptbl = disk->part_tbl; struct disk_part_tbl *new_ptbl; int len = old_ptbl ? old_ptbl->len : 0; - int target = partno + 1; + int i, target; size_t size; - int i; + + /* + * check for int overflow, since we can get here from blkpg_ioctl() + * with a user passed 'partno'. + */ + target = partno + 1; + if (target < 0) + return -EINVAL; /* disk_max_parts() is zero during initialization, ignore if so */ if (disk_max_parts(disk) && target > disk_max_parts(disk)) @@ -1098,6 +1106,7 @@ { struct gendisk *disk = dev_to_disk(dev); + blk_free_devt(dev->devt); disk_release_events(disk); kfree(disk->random); disk_replace_part_tbl(disk, NULL); --- linux-3.13.0.orig/block/partition-generic.c +++ linux-3.13.0/block/partition-generic.c @@ -211,6 +211,7 @@ static void part_release(struct device *dev) { struct hd_struct *p = dev_to_part(dev); + blk_free_devt(dev->devt); free_part_stats(p); free_part_info(p); kfree(p); @@ -253,7 +254,6 @@ rcu_assign_pointer(ptbl->last_lookup, NULL); kobject_put(part->holder_dir); device_del(part_to_dev(part)); - blk_free_devt(part_devt(part)); hd_struct_put(part); } --- linux-3.13.0.orig/block/scsi_ioctl.c +++ linux-3.13.0/block/scsi_ioctl.c @@ -28,6 +28,9 @@ #include #include #include +#include +#include +#include #include #include @@ -487,7 +490,7 @@ if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { err = DRIVER_ERROR << 24; - goto out; + goto error; } memset(sense, 0, sizeof(sense)); @@ -497,7 +500,6 @@ blk_execute_rq(q, disk, rq, 0); -out: err = rq->errors & 0xff; /* only 8 bit SCSI status */ if (err) { if (rq->sense_len && rq->sense) { @@ -692,8 +694,17 @@ case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: + case BLKFLSBUF: + case BLKROSET: return 0; case CDROM_GET_CAPABILITY: + case CDROM_DRIVE_STATUS: + case FDGETPRM: + case RAID_VERSION: + case MTIOCGET: +#ifdef CONFIG_COMPAT + case 0x801c6d02: /* MTIOCGET32 */ +#endif /* Keep this until we remove the printk below. udev sends it * and we do not want to spam dmesg about it. CD-ROMs do * not have partitions, so we get here only for disks. --- linux-3.13.0.orig/crypto/842.c +++ linux-3.13.0/crypto/842.c @@ -180,3 +180,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("842 Compression Algorithm"); +MODULE_ALIAS_CRYPTO("842"); --- linux-3.13.0.orig/crypto/aes_generic.c +++ linux-3.13.0/crypto/aes_generic.c @@ -1474,4 +1474,5 @@ MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); +MODULE_ALIAS_CRYPTO("aes-generic"); --- linux-3.13.0.orig/crypto/af_alg.c +++ linux-3.13.0/crypto/af_alg.c @@ -21,6 +21,7 @@ #include #include #include +#include struct alg_type_list { const struct af_alg_type *type; @@ -243,6 +244,7 @@ sock_init_data(newsock, sk2); sock_graft(sk2, newsock); + security_sk_clone(sk, sk2); err = type->accept(ask->private, sk2); if (err) { @@ -447,6 +449,9 @@ { struct af_alg_completion *completion = req->data; + if (err == -EINPROGRESS) + return; + completion->err = err; complete(&completion->completion); } --- linux-3.13.0.orig/crypto/algapi.c +++ linux-3.13.0/crypto/algapi.c @@ -495,8 +495,8 @@ struct crypto_template *crypto_lookup_template(const char *name) { - return try_then_request_module(__crypto_lookup_template(name), "%s", - name); + return try_then_request_module(__crypto_lookup_template(name), + "crypto-%s", name); } EXPORT_SYMBOL_GPL(crypto_lookup_template); --- linux-3.13.0.orig/crypto/ansi_cprng.c +++ linux-3.13.0/crypto/ansi_cprng.c @@ -476,4 +476,5 @@ MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); module_init(prng_mod_init); module_exit(prng_mod_fini); -MODULE_ALIAS("stdrng"); +MODULE_ALIAS_CRYPTO("stdrng"); +MODULE_ALIAS_CRYPTO("ansi_cprng"); --- linux-3.13.0.orig/crypto/anubis.c +++ linux-3.13.0/crypto/anubis.c @@ -704,3 +704,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Anubis Cryptographic Algorithm"); +MODULE_ALIAS_CRYPTO("anubis"); --- linux-3.13.0.orig/crypto/api.c +++ linux-3.13.0/crypto/api.c @@ -216,11 +216,11 @@ alg = crypto_alg_lookup(name, type, mask); if (!alg) { - request_module("%s", name); + request_module("crypto-%s", name); if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask & CRYPTO_ALG_NEED_FALLBACK)) - request_module("%s-all", name); + request_module("crypto-%s-all", name); alg = crypto_alg_lookup(name, type, mask); } --- linux-3.13.0.orig/crypto/arc4.c +++ linux-3.13.0/crypto/arc4.c @@ -166,3 +166,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); MODULE_AUTHOR("Jon Oberheide "); +MODULE_ALIAS_CRYPTO("arc4"); --- linux-3.13.0.orig/crypto/async_tx/async_xor.c +++ linux-3.13.0/crypto/async_tx/async_xor.c @@ -78,8 +78,6 @@ tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, xor_src_cnt, unmap->len, dma_flags); - src_list[0] = tmp; - if (unlikely(!tx)) async_tx_quiesce(&submit->depend_tx); @@ -92,6 +90,7 @@ xor_src_cnt, unmap->len, dma_flags); } + src_list[0] = tmp; dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); --- linux-3.13.0.orig/crypto/authenc.c +++ linux-3.13.0/crypto/authenc.c @@ -721,3 +721,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec"); +MODULE_ALIAS_CRYPTO("authenc"); --- linux-3.13.0.orig/crypto/authencesn.c +++ linux-3.13.0/crypto/authencesn.c @@ -814,3 +814,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert "); MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers"); +MODULE_ALIAS_CRYPTO("authencesn"); --- linux-3.13.0.orig/crypto/blowfish_generic.c +++ linux-3.13.0/crypto/blowfish_generic.c @@ -138,4 +138,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); -MODULE_ALIAS("blowfish"); +MODULE_ALIAS_CRYPTO("blowfish"); +MODULE_ALIAS_CRYPTO("blowfish-generic"); --- linux-3.13.0.orig/crypto/camellia_generic.c +++ linux-3.13.0/crypto/camellia_generic.c @@ -1098,4 +1098,5 @@ MODULE_DESCRIPTION("Camellia Cipher Algorithm"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("camellia"); +MODULE_ALIAS_CRYPTO("camellia"); +MODULE_ALIAS_CRYPTO("camellia-generic"); --- linux-3.13.0.orig/crypto/cast5_generic.c +++ linux-3.13.0/crypto/cast5_generic.c @@ -549,4 +549,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast5 Cipher Algorithm"); -MODULE_ALIAS("cast5"); +MODULE_ALIAS_CRYPTO("cast5"); +MODULE_ALIAS_CRYPTO("cast5-generic"); --- linux-3.13.0.orig/crypto/cast6_generic.c +++ linux-3.13.0/crypto/cast6_generic.c @@ -291,4 +291,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast6 Cipher Algorithm"); -MODULE_ALIAS("cast6"); +MODULE_ALIAS_CRYPTO("cast6"); +MODULE_ALIAS_CRYPTO("cast6-generic"); --- linux-3.13.0.orig/crypto/cbc.c +++ linux-3.13.0/crypto/cbc.c @@ -289,3 +289,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CBC block cipher algorithm"); +MODULE_ALIAS_CRYPTO("cbc"); --- linux-3.13.0.orig/crypto/ccm.c +++ linux-3.13.0/crypto/ccm.c @@ -879,5 +879,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Counter with CBC MAC"); -MODULE_ALIAS("ccm_base"); -MODULE_ALIAS("rfc4309"); +MODULE_ALIAS_CRYPTO("ccm_base"); +MODULE_ALIAS_CRYPTO("rfc4309"); +MODULE_ALIAS_CRYPTO("ccm"); --- linux-3.13.0.orig/crypto/chainiv.c +++ linux-3.13.0/crypto/chainiv.c @@ -359,3 +359,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Chain IV Generator"); +MODULE_ALIAS_CRYPTO("chainiv"); --- linux-3.13.0.orig/crypto/cmac.c +++ linux-3.13.0/crypto/cmac.c @@ -313,3 +313,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CMAC keyed hash algorithm"); +MODULE_ALIAS_CRYPTO("cmac"); --- linux-3.13.0.orig/crypto/crc32.c +++ linux-3.13.0/crypto/crc32.c @@ -156,3 +156,4 @@ MODULE_AUTHOR("Alexander Boyko "); MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32"); MODULE_LICENSE("GPL"); +MODULE_ALIAS_CRYPTO("crc32"); --- linux-3.13.0.orig/crypto/crc32c.c +++ linux-3.13.0/crypto/crc32c.c @@ -170,3 +170,4 @@ MODULE_AUTHOR("Clay Haapala "); MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); MODULE_LICENSE("GPL"); +MODULE_ALIAS_CRYPTO("crc32c"); --- linux-3.13.0.orig/crypto/crct10dif_generic.c +++ linux-3.13.0/crypto/crct10dif_generic.c @@ -124,4 +124,5 @@ MODULE_AUTHOR("Tim Chen "); MODULE_DESCRIPTION("T10 DIF CRC calculation."); MODULE_LICENSE("GPL"); -MODULE_ALIAS("crct10dif"); +MODULE_ALIAS_CRYPTO("crct10dif"); +MODULE_ALIAS_CRYPTO("crct10dif-generic"); --- linux-3.13.0.orig/crypto/cryptd.c +++ linux-3.13.0/crypto/cryptd.c @@ -955,3 +955,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Software async crypto daemon"); +MODULE_ALIAS_CRYPTO("cryptd"); --- linux-3.13.0.orig/crypto/crypto_null.c +++ linux-3.13.0/crypto/crypto_null.c @@ -149,9 +149,9 @@ .coa_decompress = null_compress } } } }; -MODULE_ALIAS("compress_null"); -MODULE_ALIAS("digest_null"); -MODULE_ALIAS("cipher_null"); +MODULE_ALIAS_CRYPTO("compress_null"); +MODULE_ALIAS_CRYPTO("digest_null"); +MODULE_ALIAS_CRYPTO("cipher_null"); static int __init crypto_null_mod_init(void) { --- linux-3.13.0.orig/crypto/crypto_user.c +++ linux-3.13.0/crypto/crypto_user.c @@ -466,7 +466,7 @@ type -= CRYPTO_MSG_BASE; link = &crypto_dispatch[type]; - if (!capable(CAP_NET_ADMIN)) + if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) && --- linux-3.13.0.orig/crypto/ctr.c +++ linux-3.13.0/crypto/ctr.c @@ -466,4 +466,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CTR Counter block mode"); -MODULE_ALIAS("rfc3686"); +MODULE_ALIAS_CRYPTO("rfc3686"); +MODULE_ALIAS_CRYPTO("ctr"); --- linux-3.13.0.orig/crypto/cts.c +++ linux-3.13.0/crypto/cts.c @@ -350,3 +350,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC"); +MODULE_ALIAS_CRYPTO("cts"); --- linux-3.13.0.orig/crypto/deflate.c +++ linux-3.13.0/crypto/deflate.c @@ -222,4 +222,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP"); MODULE_AUTHOR("James Morris "); - +MODULE_ALIAS_CRYPTO("deflate"); --- linux-3.13.0.orig/crypto/des_generic.c +++ linux-3.13.0/crypto/des_generic.c @@ -971,8 +971,6 @@ .cia_decrypt = des3_ede_decrypt } } } }; -MODULE_ALIAS("des3_ede"); - static int __init des_generic_mod_init(void) { return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs)); @@ -989,4 +987,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); MODULE_AUTHOR("Dag Arne Osvik "); -MODULE_ALIAS("des"); +MODULE_ALIAS_CRYPTO("des"); +MODULE_ALIAS_CRYPTO("des-generic"); +MODULE_ALIAS_CRYPTO("des3_ede"); +MODULE_ALIAS_CRYPTO("des3_ede-generic"); --- linux-3.13.0.orig/crypto/ecb.c +++ linux-3.13.0/crypto/ecb.c @@ -185,3 +185,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ECB block cipher algorithm"); +MODULE_ALIAS_CRYPTO("ecb"); --- linux-3.13.0.orig/crypto/eseqiv.c +++ linux-3.13.0/crypto/eseqiv.c @@ -267,3 +267,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator"); +MODULE_ALIAS_CRYPTO("eseqiv"); --- linux-3.13.0.orig/crypto/fcrypt.c +++ linux-3.13.0/crypto/fcrypt.c @@ -420,3 +420,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("FCrypt Cipher Algorithm"); MODULE_AUTHOR("David Howells "); +MODULE_ALIAS_CRYPTO("fcrypt"); --- linux-3.13.0.orig/crypto/gcm.c +++ linux-3.13.0/crypto/gcm.c @@ -1441,6 +1441,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Galois/Counter Mode"); MODULE_AUTHOR("Mikko Herranen "); -MODULE_ALIAS("gcm_base"); -MODULE_ALIAS("rfc4106"); -MODULE_ALIAS("rfc4543"); +MODULE_ALIAS_CRYPTO("gcm_base"); +MODULE_ALIAS_CRYPTO("rfc4106"); +MODULE_ALIAS_CRYPTO("rfc4543"); +MODULE_ALIAS_CRYPTO("gcm"); --- linux-3.13.0.orig/crypto/ghash-generic.c +++ linux-3.13.0/crypto/ghash-generic.c @@ -172,4 +172,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); -MODULE_ALIAS("ghash"); +MODULE_ALIAS_CRYPTO("ghash"); +MODULE_ALIAS_CRYPTO("ghash-generic"); --- linux-3.13.0.orig/crypto/hmac.c +++ linux-3.13.0/crypto/hmac.c @@ -271,3 +271,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HMAC hash algorithm"); +MODULE_ALIAS_CRYPTO("hmac"); --- linux-3.13.0.orig/crypto/khazad.c +++ linux-3.13.0/crypto/khazad.c @@ -880,3 +880,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Khazad Cryptographic Algorithm"); +MODULE_ALIAS_CRYPTO("khazad"); --- linux-3.13.0.orig/crypto/krng.c +++ linux-3.13.0/crypto/krng.c @@ -62,4 +62,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Kernel Random Number Generator"); -MODULE_ALIAS("stdrng"); +MODULE_ALIAS_CRYPTO("stdrng"); +MODULE_ALIAS_CRYPTO("krng"); --- linux-3.13.0.orig/crypto/lrw.c +++ linux-3.13.0/crypto/lrw.c @@ -400,3 +400,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); +MODULE_ALIAS_CRYPTO("lrw"); --- linux-3.13.0.orig/crypto/lz4.c +++ linux-3.13.0/crypto/lz4.c @@ -104,3 +104,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZ4 Compression Algorithm"); +MODULE_ALIAS_CRYPTO("lz4"); --- linux-3.13.0.orig/crypto/lz4hc.c +++ linux-3.13.0/crypto/lz4hc.c @@ -104,3 +104,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZ4HC Compression Algorithm"); +MODULE_ALIAS_CRYPTO("lz4hc"); --- linux-3.13.0.orig/crypto/lzo.c +++ linux-3.13.0/crypto/lzo.c @@ -103,3 +103,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO Compression Algorithm"); +MODULE_ALIAS_CRYPTO("lzo"); --- linux-3.13.0.orig/crypto/md4.c +++ linux-3.13.0/crypto/md4.c @@ -255,4 +255,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD4 Message Digest Algorithm"); - +MODULE_ALIAS_CRYPTO("md4"); --- linux-3.13.0.orig/crypto/md5.c +++ linux-3.13.0/crypto/md5.c @@ -168,3 +168,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Message Digest Algorithm"); +MODULE_ALIAS_CRYPTO("md5"); --- linux-3.13.0.orig/crypto/michael_mic.c +++ linux-3.13.0/crypto/michael_mic.c @@ -184,3 +184,4 @@ MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Michael MIC"); MODULE_AUTHOR("Jouni Malinen "); +MODULE_ALIAS_CRYPTO("michael_mic"); --- linux-3.13.0.orig/crypto/pcbc.c +++ linux-3.13.0/crypto/pcbc.c @@ -295,3 +295,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCBC block cipher algorithm"); +MODULE_ALIAS_CRYPTO("pcbc"); --- linux-3.13.0.orig/crypto/pcrypt.c +++ linux-3.13.0/crypto/pcrypt.c @@ -565,3 +565,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert "); MODULE_DESCRIPTION("Parallel crypto wrapper"); +MODULE_ALIAS_CRYPTO("pcrypt"); --- linux-3.13.0.orig/crypto/rmd128.c +++ linux-3.13.0/crypto/rmd128.c @@ -327,3 +327,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger "); MODULE_DESCRIPTION("RIPEMD-128 Message Digest"); +MODULE_ALIAS_CRYPTO("rmd128"); --- linux-3.13.0.orig/crypto/rmd160.c +++ linux-3.13.0/crypto/rmd160.c @@ -371,3 +371,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger "); MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); +MODULE_ALIAS_CRYPTO("rmd160"); --- linux-3.13.0.orig/crypto/rmd256.c +++ linux-3.13.0/crypto/rmd256.c @@ -346,3 +346,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger "); MODULE_DESCRIPTION("RIPEMD-256 Message Digest"); +MODULE_ALIAS_CRYPTO("rmd256"); --- linux-3.13.0.orig/crypto/rmd320.c +++ linux-3.13.0/crypto/rmd320.c @@ -395,3 +395,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger "); MODULE_DESCRIPTION("RIPEMD-320 Message Digest"); +MODULE_ALIAS_CRYPTO("rmd320"); --- linux-3.13.0.orig/crypto/salsa20_generic.c +++ linux-3.13.0/crypto/salsa20_generic.c @@ -248,4 +248,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm"); -MODULE_ALIAS("salsa20"); +MODULE_ALIAS_CRYPTO("salsa20"); +MODULE_ALIAS_CRYPTO("salsa20-generic"); --- linux-3.13.0.orig/crypto/seed.c +++ linux-3.13.0/crypto/seed.c @@ -476,3 +476,4 @@ MODULE_DESCRIPTION("SEED Cipher Algorithm"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Hye-Shik Chang , Kim Hyun "); +MODULE_ALIAS_CRYPTO("seed"); --- linux-3.13.0.orig/crypto/seqiv.c +++ linux-3.13.0/crypto/seqiv.c @@ -362,3 +362,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Sequence Number IV Generator"); +MODULE_ALIAS_CRYPTO("seqiv"); --- linux-3.13.0.orig/crypto/serpent_generic.c +++ linux-3.13.0/crypto/serpent_generic.c @@ -665,5 +665,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm"); MODULE_AUTHOR("Dag Arne Osvik "); -MODULE_ALIAS("tnepres"); -MODULE_ALIAS("serpent"); +MODULE_ALIAS_CRYPTO("tnepres"); +MODULE_ALIAS_CRYPTO("serpent"); +MODULE_ALIAS_CRYPTO("serpent-generic"); --- linux-3.13.0.orig/crypto/sha1_generic.c +++ linux-3.13.0/crypto/sha1_generic.c @@ -153,4 +153,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); +MODULE_ALIAS_CRYPTO("sha1-generic"); --- linux-3.13.0.orig/crypto/sha256_generic.c +++ linux-3.13.0/crypto/sha256_generic.c @@ -384,5 +384,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); -MODULE_ALIAS("sha224"); -MODULE_ALIAS("sha256"); +MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha224-generic"); +MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha256-generic"); --- linux-3.13.0.orig/crypto/sha512_generic.c +++ linux-3.13.0/crypto/sha512_generic.c @@ -287,5 +287,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms"); -MODULE_ALIAS("sha384"); -MODULE_ALIAS("sha512"); +MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha384-generic"); +MODULE_ALIAS_CRYPTO("sha512"); +MODULE_ALIAS_CRYPTO("sha512-generic"); --- linux-3.13.0.orig/crypto/tea.c +++ linux-3.13.0/crypto/tea.c @@ -270,8 +270,9 @@ crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs)); } -MODULE_ALIAS("xtea"); -MODULE_ALIAS("xeta"); +MODULE_ALIAS_CRYPTO("tea"); +MODULE_ALIAS_CRYPTO("xtea"); +MODULE_ALIAS_CRYPTO("xeta"); module_init(tea_mod_init); module_exit(tea_mod_fini); --- linux-3.13.0.orig/crypto/tgr192.c +++ linux-3.13.0/crypto/tgr192.c @@ -676,8 +676,9 @@ crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs)); } -MODULE_ALIAS("tgr160"); -MODULE_ALIAS("tgr128"); +MODULE_ALIAS_CRYPTO("tgr192"); +MODULE_ALIAS_CRYPTO("tgr160"); +MODULE_ALIAS_CRYPTO("tgr128"); module_init(tgr192_mod_init); module_exit(tgr192_mod_fini); --- linux-3.13.0.orig/crypto/twofish_generic.c +++ linux-3.13.0/crypto/twofish_generic.c @@ -211,4 +211,5 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); -MODULE_ALIAS("twofish"); +MODULE_ALIAS_CRYPTO("twofish"); +MODULE_ALIAS_CRYPTO("twofish-generic"); --- linux-3.13.0.orig/crypto/vmac.c +++ linux-3.13.0/crypto/vmac.c @@ -713,3 +713,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("VMAC hash algorithm"); +MODULE_ALIAS_CRYPTO("vmac"); --- linux-3.13.0.orig/crypto/wp512.c +++ linux-3.13.0/crypto/wp512.c @@ -1167,8 +1167,9 @@ crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs)); } -MODULE_ALIAS("wp384"); -MODULE_ALIAS("wp256"); +MODULE_ALIAS_CRYPTO("wp512"); +MODULE_ALIAS_CRYPTO("wp384"); +MODULE_ALIAS_CRYPTO("wp256"); module_init(wp512_mod_init); module_exit(wp512_mod_fini); --- linux-3.13.0.orig/crypto/xcbc.c +++ linux-3.13.0/crypto/xcbc.c @@ -286,3 +286,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XCBC keyed hash algorithm"); +MODULE_ALIAS_CRYPTO("xcbc"); --- linux-3.13.0.orig/crypto/xts.c +++ linux-3.13.0/crypto/xts.c @@ -362,3 +362,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XTS block cipher mode"); +MODULE_ALIAS_CRYPTO("xts"); --- linux-3.13.0.orig/crypto/zlib.c +++ linux-3.13.0/crypto/zlib.c @@ -378,3 +378,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Zlib Compression Algorithm"); MODULE_AUTHOR("Sony Corporation"); +MODULE_ALIAS_CRYPTO("zlib"); --- linux-3.13.0.orig/debian.master/NOTES +++ linux-3.13.0/debian.master/NOTES @@ -0,0 +1,4 @@ +eSCO patch removed. Replaced upstream with a disable_esco module parm. +airprime: Module gone, use option driver instead +AppArmor: Patch is all there and ported. Ooops when enabled, so default + off (still can be enabled apparmor=1) --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/abiname +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/abiname @@ -0,0 +1 @@ +54 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/amd64/ignore +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/amd64/ignore @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/amd64/ignore.modules +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/amd64/ignore.modules @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/arm64/ignore +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/arm64/ignore @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/arm64/ignore.modules +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/arm64/ignore.modules @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/armhf/ignore +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/armhf/ignore @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/armhf/ignore.modules +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/armhf/ignore.modules @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/i386/ignore +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/i386/ignore @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/i386/ignore.modules +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/i386/ignore.modules @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/powerpc/ignore +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/powerpc/ignore @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/powerpc/ignore.modules +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/powerpc/ignore.modules @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/ppc64el/ignore +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/ppc64el/ignore @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/abi/3.13.0-54.90/ppc64el/ignore.modules +++ linux-3.13.0/debian.master/abi/3.13.0-54.90/ppc64el/ignore.modules @@ -0,0 +1 @@ +1 --- linux-3.13.0.orig/debian.master/changelog +++ linux-3.13.0/debian.master/changelog @@ -0,0 +1,19693 @@ +linux (3.13.0-54.91) trusty; urgency=medium + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1458618 + + [ Upstream Kernel Changes ] + + * [3.13-stable only] Revert "gianfar: Carefully free skbs in functions + called by netpoll." + - LP: #1454746 + + -- Luis Henriques Tue, 26 May 2015 17:19:30 +0100 + +linux (3.13.0-54.90) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1458618 + + [ Andy Whitcroft ] + + * [Config] push off linux-lts-{utopic, vivid}-tools-common + - LP: #1405807 + + [ Brad Figg ] + + * hyper-v -- add hid and fb drivers to linux-virtual + - LP: #1444179 + + [ Upstream Kernel Changes ] + + * DT doc: net: cpsw mac-address is optional + - LP: #1452628 + * net: cpsw: Add missing return value + - LP: #1452628 + * net: cpsw: header, Add missing include + - LP: #1452628 + * net: cpsw: Add am33xx MACID readout + - LP: #1452628 + * am33xx: define syscon control module device node + - LP: #1452628 + * arm: dts: am33xx, Add syscon phandle to cpsw node + - LP: #1452628 + * net: cpsw: do not register cpts twice + - LP: #1452620 + * x86: kvm: Revert "remove sched notifier for cross-cpu migrations" + - LP: #1450584 + * x86: vdso: fix pvclock races with task migration + - LP: #1450584 + * n_tty: Fix read buffer overwrite when no newline + - LP: #1381005, #1454746 + * KVM: x86: Fix lost interrupt on irr_pending race + - LP: #1454746 + * writeback: add missing INITIAL_JIFFIES init in + global_update_bandwidth() + - LP: #1454746 + * nbd: fix possible memory leak + - LP: #1454746 + * mfd: kempld-core: Fix callback return value check + - LP: #1454746 + * KVM: nVMX: mask unrestricted_guest if disabled on L0 + - LP: #1454746 + * spi: trigger trace event for message-done before mesg->complete + - LP: #1454746 + * powerpc/pseries: Little endian fixes for post mobility device tree + update + - LP: #1454746 + * net: ethernet: pcnet32: Setup the SRAM and NOUFLO on Am79C97{3, 5} + - LP: #1454746 + * perf: Fix irq_work 'tail' recursion + - LP: #1454746 + * arm64: Use the reserved TTBR0 if context switching to the init_mm + - LP: #1454746 + * selinux: fix sel_write_enforce broken return value + - LP: #1454746 + * mm: fix anon_vma->degree underflow in anon_vma endless growing + prevention + - LP: #1454746 + * mm/memory hotplug: postpone the reset of obsolete pgdat + - LP: #1454746 + * hfsplus: fix B-tree corruption after insertion at position 0 + - LP: #1454746 + * ARC: SA_SIGINFO ucontext regs off-by-one + - LP: #1454746 + * writeback: fix possible underflow in write bandwidth calculation + - LP: #1454746 + * iio: fix drivers that check buffer->scan_mask + - LP: #1454746 + * iio: inv_mpu6050: Clear timestamps fifo while resetting hardware fifo + - LP: #1454746 + * iio: core: Fix double free. + - LP: #1454746 + * USB: ftdi_sio: Added custom PID for Synapse Wireless product + - LP: #1454746 + * iwlwifi: dvm: run INIT firmware again upon .start() + - LP: #1454746 + * USB: keyspan_pda: add new device id + - LP: #1454746 + * cifs: smb2_clone_range() - exit on unhandled error + - LP: #1454746 + * cifs: fix use-after-free bug in find_writable_file + - LP: #1454746 + * can: flexcan: Deferred on Regulator return EPROBE_DEFER + - LP: #1454746 + * usb: xhci: handle Config Error Change (CEC) in xhci driver + - LP: #1454746 + * usb: xhci: apply XHCI_AVOID_BEI quirk to all Intel xHCI controllers + - LP: #1454746 + * USB: ftdi_sio: Use jtag quirk for SNAP Connect E10 + - LP: #1454746 + * tty: serial: fsl_lpuart: clear receive flag on FIFO flush + - LP: #1454746 + * radeon: Do not directly dereference pointers to BIOS area. + - LP: #1454746 + * iio: imu: Use iio_trigger_get for indio_dev->trig assignment + - LP: #1454746 + * dmaengine: edma: fix memory leak when terminating running transfers + - LP: #1454746 + * dmaengine: omap-dma: Fix memory leak when terminating running transfer + - LP: #1454746 + * x86/reboot: Add ASRock Q1900DC-ITX mainboard reboot quirk + - LP: #1454746 + * mac80211: fix RX A-MPDU session reorder timer deletion + - LP: #1454746 + * tcp: prevent fetching dst twice in early demux code + - LP: #1454746 + * net: use for_each_netdev_safe() in rtnl_group_changelink() + - LP: #1454746 + * xen-netfront: transmit fully GSO-sized packets + - LP: #1454746 + * tcp: fix FRTO undo on cumulative ACK of SACKed range + - LP: #1454746 + * PCI: cpcihp: Add missing curly braces in cpci_configure_slot() + - LP: #1454746 + * sh_veu: v4l2_dev wasn't set + - LP: #1454746 + * media: s5p-mfc: fix mmap support for 64bit arch + - LP: #1454746 + * cpuidle: ACPI: do not overwrite name and description of C0 + - LP: #1454746 + * ioctx_alloc(): fix vma (and file) leak on failure + - LP: #1454746 + * ALSA: hda/realtek - Make more stable to get pin sense for ALC283 + - LP: #1454746 + * be2iscsi: Fix kernel panic when device initialization fails + - LP: #1454746 + * Defer processing of REQ_PREEMPT requests for blocked devices + - LP: #1454746 + * ALSA: hda - Fix headphone pin config for Lifebook T731 + - LP: #1454746 + * ocfs2: _really_ sync the right range + - LP: #1454746 + * ALSA: usb - Creative USB X-Fi Pro SB1095 volume knob support + - LP: #1454746 + * iscsi target: fix oops when adding reject pdu + - LP: #1454746 + * net/mlx4_en: Call register_netdevice in the proper location + - LP: #1454746 + * ipv6: protect skb->sk accesses from recursive dereference inside the + stack + - LP: #1454746 + * tcp: tcp_make_synack() should clear skb->tstamp + - LP: #1454746 + * 8139cp: Call dev_kfree_skby_any instead of kfree_skb. + - LP: #1454746 + * 8139too: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * r8169: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * bonding: Call dev_kfree_skby_any instead of kfree_skb. + - LP: #1454746 + * bnx2: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * tg3: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * ixgb: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * benet: Call dev_kfree_skby_any instead of kfree_skb. + - LP: #1454746 + * gianfar: Carefully free skbs in functions called by netpoll. + - LP: #1454746 + * ip_forward: Drop frames with attached skb->sk + - LP: #1454746 + * tcp: fix possible deadlock in tcp_send_fin() + - LP: #1454746 + * tcp: avoid looping in tcp_send_fin() + - LP: #1454746 + * net: do not deplete pfmemalloc reserve + - LP: #1454746 + * net: fix crash in build_skb() + - LP: #1454746 + * ipv4: Missing sk_nulls_node_init() in ping_unhash(). + - LP: #1454746 + * Linux 3.13.11-ckt20 + - LP: #1454746 + * of: Add support for ePAPR "stdout-path" property + - LP: #1438585 + * lib: add glibc style strchrnul() variant + - LP: #1438585 + * of: Create unlocked version of for_each_child_of_node() + - LP: #1438585 + * of: Make of_find_node_by_path() handle /aliases + - LP: #1438585 + * of: Create of_console_check() for selecting a console specified in + /chosen + - LP: #1438585 + * of: Enable console on serial ports specified by /chosen/stdout-path + - LP: #1438585 + * of: correct of_console_check()'s return value + - LP: #1438585 + * of: Add bindings for chosen node, stdout-path + - LP: #1438585 + * of: add optional options parameter to of_find_node_by_path() + - LP: #1438585 + * of: support passing console options with stdout-path + - LP: #1438585 + * (upstream) net/mlx4_core: Adjust command timeouts to conform to the + firmware spec + - LP: #1455121 + * arm64: kernel: add MPIDR_EL1 accessors macros + - LP: #1455372 + * of: reimplement the matching method for __of_match_node() + - LP: #1455372 + * arm64: remove redundant "psci:" prefixes + - LP: #1455372 + * arm64: remove return value form psci_init() + - LP: #1455372 + * arm: KVM: Don't return PSCI_INVAL if waitqueue is inactive + - LP: #1455372 + * KVM: Add capability to advertise PSCI v0.2 support + - LP: #1455372 + * ARM/ARM64: KVM: Add common header for PSCI related defines + - LP: #1455372 + * ARM/ARM64: KVM: Add base for PSCI v0.2 emulation + - LP: #1455372 + * KVM: Documentation: Add info regarding KVM_ARM_VCPU_PSCI_0_2 feature + - LP: #1455372 + * ARM/ARM64: KVM: Make kvm_psci_call() return convention more flexible + - LP: #1455372 + * KVM: Add KVM_EXIT_SYSTEM_EVENT to user space API header + - LP: #1455372 + * ARM/ARM64: KVM: Emulate PSCI v0.2 SYSTEM_OFF and SYSTEM_RESET + - LP: #1455372 + * ARM/ARM64: KVM: Emulate PSCI v0.2 AFFINITY_INFO + - LP: #1455372 + * ARM/ARM64: KVM: Emulate PSCI v0.2 MIGRATE_INFO_TYPE and related + functions + - LP: #1455372 + * ARM/ARM64: KVM: Fix CPU_ON emulation for PSCI v0.2 + - LP: #1455372 + * ARM/ARM64: KVM: Emulate PSCI v0.2 CPU_SUSPEND + - LP: #1455372 + * ARM/ARM64: KVM: Advertise KVM_CAP_ARM_PSCI_0_2 to user space + - LP: #1455372 + * PSCI: Add initial support for PSCIv0.2 functions + - LP: #1455372 + * Documentation: devicetree: Add new binding for PSCIv0.2 + - LP: #1455372 + * ARM: Check if a CPU has gone offline + - LP: #1455372 + * arm64: KVM: Enable minimalistic support for Cortex-A53 + - LP: #1455372 + * HID: multitouch: add support of clickpads + - LP: #1456881 + * vhost/scsi: potential memory corruption + - LP: #1457807 + - CVE-2015-4036 + + -- Luis Henriques Tue, 26 May 2015 11:21:53 +0100 + +linux (3.13.0-53.89) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1456976 + + [ Upstream Kernel Changes ] + + * tcp: Fix crash in TCP Fast Open + - LP: #1447371 + - CVE-2015-3332 + + -- Luis Henriques Wed, 20 May 2015 11:00:58 +0100 + +linux (3.13.0-53.88) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1454785 + + [ Upstream Kernel Changes ] + + * mmc: card: Don't access RPMB partitions for normal read/write + - LP: #1454013 + + -- Brad Figg Wed, 13 May 2015 10:15:29 -0700 + +linux (3.13.0-53.87) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1452736 + + [ dann frazier ] + + * [Config] CONFIG_{EFI_PARAMS_FROM_FDT,GENERIC_EARLY_IOREMAP,LIBFDT}=y + - LP: #1441876 + * Move get_dram_base to arm private file + - LP: #1441876 + * arm64: Implement efi_enabled() + - LP: #1441876 + * [Config] CONFIG_RTC_DRV_EFI=y on arm64 + - LP: #1441291 + + [ Kamal Mostafa ] + + * Fix "mei: me: release hw from reset only during the reset flow" + - LP: #1450813 + + [ Stefan Bader ] + + * SAUCE: vesafb: Set mtrr:3 (write-combining) as default + - LP: #1434581 + + [ Upstream Kernel Changes ] + + * Revert "net: cx82310_eth: use common match macro" + - LP: #1451900 + * netfilter: nf_conntrack: reserve two bytes for nf_ct_ext->len + - LP: #1442080 + - CVE-2014-9715 + * add generic fixmap.h + - LP: #1441876 + * mm: create generic early_ioremap() support + - LP: #1441876 + * arm64: initialize pgprot info earlier in boot + - LP: #1441876 + * arm64: add early_ioremap support + - LP: #1441876 + * arm64: fixmap: fix missing sub-page offset for earlyprintk + - LP: #1441876 + * efi: create memory map iteration helper + - LP: #1441876 + * efi: Add get_dram_base() helper function + - LP: #1441876 + * lib: add fdt_empty_tree.c + - LP: #1441876 + * doc: efi-stub.txt updates for ARM + - LP: #1441876 + * efi: add helper function to get UEFI params from FDT + - LP: #1441876 + * arm64: Add function to create identity mappings + - LP: #1441876 + * efi: Add shared FDT related functions for ARM/ARM64 + - LP: #1441876 + * arm64: add EFI runtime services + - LP: #1441876 + * doc: arm: add UEFI support documentation + - LP: #1441876 + * arm64: efi: add EFI stub + - LP: #1441876 + * doc: arm64: add description of EFI stub support + - LP: #1441876 + * efi/arm64: ignore dtb= when UEFI SecureBoot is enabled + - LP: #1441876 + * arm64: efi: only attempt efi map setup if booting via EFI + - LP: #1441876 + * PCI: Don't clear ASPM bits when the FADT declares it's unsupported + - LP: #1441335 + * regmap: Skip read-only registers in regcache_sync() + - LP: #1448830 + * rtc: ia64: allow other architectures to use EFI RTC + - LP: #1441291 + * rtc: Disable EFI rtc for x86 + - LP: #1441291 + * mei: me: fix hw ready reset flow + - LP: #1450813 + * Input: serio - add firmware_id sysfs attribute + - LP: #1414930 + * Input: i8042 - add firmware_id support + - LP: #1414930 + * Input: Add INPUT_PROP_TOPBUTTONPAD device property + - LP: #1414930 + * Input: synaptics - report INPUT_PROP_TOPBUTTONPAD property + - LP: #1414930 + * Input: synaptics - add a matches_pnp_id helper function + - LP: #1414930 + * Input: synaptics - change min/max quirk table to pnp-id matching + - LP: #1414930 + * Input: psmouse - add psmouse_matches_pnp_id helper function + - LP: #1414930 + * Input: synaptics - split synaptics_resolution(), query first + - LP: #1414930 + * Input: synaptics - log queried and quirked dimension values + - LP: #1414930 + * Input: synaptics - remove obsolete min/max quirk for X240 + - LP: #1414930 + * Input: synaptics - add min/max quirk for pnp-id LEN2002 (Edge E531) + - LP: #1414930 + * Input: synaptics - add min/max quirk for Lenovo T440s + - LP: #1414930 + * Input: synaptics - adjust min/max for Lenovo ThinkPad X1 Carbon 2nd + - LP: #1414930 + * Input: synaptics - adjust min/max on Thinkpad E540 + - LP: #1414930 + * Input: synaptics - support min/max board id in min_max_pnpid_table + - LP: #1414930 + * Input: synaptics - skip quirks when post-2013 dimensions + - LP: #1414930 + * Input: synaptics - query min dimensions for fw v8.1 + - LP: #1414930 + * Input: synaptics - fix middle button on Lenovo 2015 products + - LP: #1414930 + * Input: synaptics - handle spurious release of trackstick buttons + - LP: #1414930 + * Input: synaptics - do not retrieve the board id on old firmwares + - LP: #1414930 + * Input: synaptics - retrieve the extended capabilities in query $10 + - LP: #1414930 + * Input: synaptics - remove TOPBUTTONPAD property for Lenovos 2015 + - LP: #1414930 + * Input: synaptics - re-route tracksticks buttons on the Lenovo 2015 + series + - LP: #1414930 + * Input: synaptics - remove X1 Carbon 3rd gen from the topbuttonpad list + - LP: #1414930 + * Input: synaptics - remove X250 from the topbuttonpad list + - LP: #1414930 + * drm/dp_helper: don't return EPROTO for defers (v2) + - LP: #1450322 + * iio: mxs-lradc: separate touchscreen and buffer virtual channels + - LP: #1451900 + * iio: mxs-lradc: make ADC reads not disable touchscreen interrupts + - LP: #1451900 + * iio: mxs-lradc: make ADC reads not unschedule touchscreen conversions + - LP: #1451900 + * iio: mxs-lradc: only update the buffer when its conversions have + finished + - LP: #1451900 + * iio: imu: adis16400: Fix sign extension + - LP: #1451900 + * iio:adc:mcp3422 Fix incorrect scales table + - LP: #1451900 + * iio: ad5686: fix optional reference voltage declaration + - LP: #1451900 + * usb: dwc3: dwc3-omap: Fix disable IRQ + - LP: #1451900 + * KVM: emulate: fix CMPXCHG8B on 32-bit hosts + - LP: #1451900 + * xhci: Allocate correct amount of scratchpad buffers + - LP: #1451900 + * USB: usbfs: don't leak kernel data in siginfo + - LP: #1451900 + * efi/libstub: Fix boundary checking in efi_high_alloc() + - LP: #1451900 + * USB: ftdi_sio: add PIDs for Actisense USB devices + - LP: #1451900 + * USB: serial: fix potential use-after-free after failed probe + - LP: #1451900 + * USB: serial: fix tty-device error handling at probe + - LP: #1451900 + * mac80211: Send EAPOL frames at lowest rate + - LP: #1451900 + * ARC: Fix KSTK_ESP() + - LP: #1451900 + * USB: serial: cp210x: Adding Seletek device id's + - LP: #1451900 + * mei: make device disabled on stop unconditionally + - LP: #1451900 + * NFSv4: Don't call put_rpccred() under the rcu_read_lock() + - LP: #1451900 + * btrfs: fix lost return value due to variable shadowing + - LP: #1451900 + * KVM: MIPS: Fix trace event to save PC directly + - LP: #1451900 + * usb: ftdi_sio: Add jtag quirk support for Cyber Cortex AV boards + - LP: #1451900 + * eCryptfs: don't pass fs-specific ioctl commands through + - LP: #1451900 + * drm/radeon: do a posting read in r100_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in rs600_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in r600_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in evergreen_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in si_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in cik_set_irq + - LP: #1451900 + * drm/radeon: fix DRM_IOCTL_RADEON_CS oops + - LP: #1451900 + * drm/radeon: fix interlaced modes on DCE8 + - LP: #1451900 + * ACPI / video: Load the module even if ACPI is disabled + - LP: #1451900 + * ASoC: omap-pcm: Correct dma mask + - LP: #1451900 + * x86/asm/entry/64: Remove a bogus 'ret_from_fork' optimization + - LP: #1451900 + * Btrfs: fix data loss in the fast fsync path + - LP: #1451900 + * Btrfs:__add_inode_ref: out of bounds memory read when looking for + extended ref. + - LP: #1451900 + * xhci: fix reporting of 0-sized URBs in control endpoint + - LP: #1451900 + * xhci: Workaround for PME stuck issues in Intel xhci + - LP: #1451900 + * Change email address for 8250_pci + - LP: #1451900 + * tty: fix up atime/mtime mess, take four + - LP: #1451900 + * console: Fix console name size mismatch + - LP: #1451900 + * net: irda: fix wait_until_sent poll timeout + - LP: #1451900 + * USB: serial: fix infinite wait_until_sent timeout + - LP: #1451900 + * TTY: fix tty_wait_until_sent on 64-bit machines + - LP: #1451900 + * sunrpc: fix braino in ->poll() + - LP: #1451900 + * netfilter: xt_socket: fix a stack corruption bug + - LP: #1451900 + * svcrpc: fix memory leak in gssp_accept_sec_context_upcall + - LP: #1451900 + * ipv4: ip_check_defrag should correctly check return value of + skb_copy_bits + - LP: #1451900 + * net: phy: Fix verification of EEE support in phy_init_eee + - LP: #1451900 + * openvswitch: Fix net exit. + - LP: #1451900 + * team: fix possible null pointer dereference in team_handle_frame + - LP: #1451900 + * net: compat: Ignore MSG_CMSG_COMPAT in compat_sys_{send, recv}msg + - LP: #1451900 + * rtnetlink: ifla_vf_policy: fix misuses of NLA_BINARY + - LP: #1451900 + * rtnetlink: call ->dellink on failure when ->newlink exists + - LP: #1451900 + * gen_stats.c: Duplicate xstats buffer for later use + - LP: #1451900 + * ipv4: ip_check_defrag should not assume that skb_network_offset is zero + - LP: #1451900 + * ematch: Fix auto-loading of ematch modules. + - LP: #1451900 + * net: reject creation of netdev names with colons + - LP: #1451900 + * macvtap: make sure neighbour code can push ethernet header + - LP: #1451900 + * usb: plusb: Add support for National Instruments host-to-host cable + - LP: #1451900 + * udp: only allow UFO for packets from SOCK_DGRAM sockets + - LP: #1451900 + * net: ping: Return EAFNOSUPPORT when appropriate. + - LP: #1451900 + * team: don't traverse port list using rcu in team_set_mac_address + - LP: #1451900 + * cpuset: Fix cpuset sched_relax_domain_level + - LP: #1451900 + * workqueue: fix hang involving racing cancel[_delayed]_work_sync()'s for + PREEMPT_NONE + - LP: #1451900 + * bnx2x: Force fundamental reset for EEH recovery + - LP: #1451900 + * spi: pl022: Fix race in giveback() leading to driver lock-up + - LP: #1451900 + * tpm/ibmvtpm: Additional LE support for tpm_ibmvtpm_send + - LP: #1451900 + * libsas: Fix Kernel Crash in smp_execute_task + - LP: #1451900 + * can: add missing initialisations in CAN related skbuffs + - LP: #1451900 + * can: kvaser_usb: Avoid double free on URB submission failures + - LP: #1451900 + * can: kvaser_usb: Read all messages in a bulk-in URB buffer + - LP: #1451900 + * ftrace: Fix en(dis)able graph caller when en(dis)abling record via + sysctl + - LP: #1451900 + * ftrace: Fix ftrace enable ordering of sysctl ftrace_enabled + - LP: #1451900 + * drm/radeon: drop setting UPLL to sleep mode + - LP: #1451900 + * xen-pciback: limit guest control of command register + - LP: #1451900 + * ALSA: hda - Fix built-in mic on Compaq Presario CQ60 + - LP: #1451900 + * ALSA: control: Add sanity checks for user ctl id name string + - LP: #1451900 + * ALSA: snd-usb: add quirks for Roland UA-22 + - LP: #1451900 + * ALSA: hda - Set single_adc_amp flag for CS420x codecs + - LP: #1451900 + * ALSA: hda - Add workaround for MacBook Air 5,2 built-in mic + - LP: #1451900 + * nilfs2: fix deadlock of segment constructor during recovery + - LP: #1451900 + * ALSA: hda - Don't access stereo amps for mono channel widgets + - LP: #1451900 + * ipvs: add missing ip_vs_pe_put in sync code + - LP: #1451900 + * ARM: at91: pm: fix at91rm9200 standby + - LP: #1451900 + * rbd: drop an unsafe assertion + - LP: #1451900 + * fuse: notify: don't move pages + - LP: #1451900 + * fuse: set stolen page uptodate + - LP: #1451900 + * dm thin: fix to consistently zero-fill reads to unprovisioned blocks + - LP: #1451900 + * dm: hold suspend_lock while suspending device during device deletion + - LP: #1451900 + * dm io: deal with wandering queue limits when handling REQ_DISCARD and + REQ_WRITE_SAME + - LP: #1451900 + * crypto: arm/aes update NEON AES module to latest OpenSSL version + - LP: #1451900 + * mac80211: drop unencrypted frames in mesh fwding + - LP: #1451900 + * mac80211: disable u-APSD queues by default + - LP: #1451900 + * ASoC: ak4671: Fix control-less DAPM routes + - LP: #1451900 + * ASoC: da732x: Fix control-less DAPM routes + - LP: #1451900 + * ASoC: sn95031: Fix control-less DAPM routes + - LP: #1451900 + * virtio_console: init work unconditionally + - LP: #1451900 + * virtio_console: avoid config access from irq + - LP: #1451900 + * clocksource: efm32: Fix a NULL pointer dereference + - LP: #1451900 + * x86/vdso: Fix the build on GCC5 + - LP: #1451900 + * ASoC: sgtl5000: remove useless register write clearing CHRGPUMP_POWERUP + - LP: #1451900 + * regmap: regcache-rbtree: Fix present bitmap resize + - LP: #1451900 + * regulator: Only enable disabled regulators on resume + - LP: #1451900 + * regulator: core: Fix enable GPIO reference counting + - LP: #1451900 + * vt6655: RFbSetPower fix missing rate RATE_12M + - LP: #1451900 + * x86/asm/entry/32: Fix user_mode() misuses + - LP: #1451900 + * ASoC: adav80x: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: ak4641: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: cs4271: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: pcm1681: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: tas5086: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm2000: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8731: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8903: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8904: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8955: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8960: Fix wrong value references for boolean kctl + - LP: #1451900 + * crypto: aesni - fix memory usage in GCM decryption + - LP: #1451900 + * phy: Find the right match in devm_phy_destroy() + - LP: #1451900 + * x86/fpu: Avoid math_state_restore() without used_math() in + __restore_xstate_sig() + - LP: #1451900 + * x86/fpu: Drop_fpu() should not assume that tsk equals current + - LP: #1451900 + * can: kvaser_usb: Fix tx queue start/stop race conditions + - LP: #1451900 + * nl80211: ignore HT/VHT capabilities without QoS/WMM + - LP: #1451900 + * ALSA: hda - Treat stereo-to-mono mix properly + - LP: #1451900 + * pagemap: do not leak physical addresses to non-privileged userspace + - LP: #1451900 + * of/irq: Fix of_irq_parse_one() returned error codes + - LP: #1451900 + * iscsi-target: Avoid early conn_logout_comp for iser connections + - LP: #1451900 + * tcm_qla2xxx: Fix incorrect use of __transport_register_session + - LP: #1451900 + * target: Fix reference leak in target_get_sess_cmd() error path + - LP: #1451900 + * tcm_fc: missing curly braces in ft_invl_hw_context() + - LP: #1451900 + * target/pscsi: Fix NULL pointer dereference in get_device_type + - LP: #1451900 + * target: Fix virtual LUN=0 target_configure_device failure OOPs + - LP: #1451900 + * xfrm: release dst_orig in case of error in xfrm_lookup() + - LP: #1451900 + * dmaengine: dw: append MODULE_ALIAS for platform driver + - LP: #1451900 + * sparc32: destroy_context() and switch_mm() needs to disable interrupts. + - LP: #1451900 + * sparc: semtimedop() unreachable due to comparison error + - LP: #1451900 + * sparc: perf: Remove redundant perf_pmu_{en|dis}able calls + - LP: #1451900 + * sparc: perf: Make counting mode actually work + - LP: #1451900 + * sparc: Touch NMI watchdog when walking cpus and calling printk + - LP: #1451900 + * sparc64: Fix several bugs in memmove(). + - LP: #1451900 + * net: sysctl_net_core: check SNDBUF and RCVBUF for min length + - LP: #1451900 + * rds: avoid potential stack overflow + - LP: #1451900 + * inet_diag: fix possible overflow in inet_diag_dump_one_icsk() + - LP: #1451900 + * caif: fix MSG_OOB test in caif_seqpkt_recvmsg() + - LP: #1451900 + * rxrpc: bogus MSG_PEEK test in rxrpc_recvmsg() + - LP: #1451900 + * ipv6: fix backtracking for throw routes + - LP: #1451900 + * tcp: fix tcp fin memory accounting + - LP: #1451900 + * net: compat: Update get_compat_msghdr() to match + copy_msghdr_from_user() behaviour + - LP: #1451900 + * tcp: make connect() mem charging friendly + - LP: #1451900 + * Linux 3.13.11-ckt19 + - LP: #1451900 + + -- Luis Henriques Thu, 07 May 2015 14:47:02 +0100 + +linux (3.13.0-52.86) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1451288 + + [ Upstream Kernel Changes ] + + * audit: create private file name copies when auditing inodes + - LP: #1450442 + + -- Brad Figg Sun, 03 May 2015 18:36:19 -0700 + +linux (3.13.0-52.85) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1450101 + + [ Upstream Kernel Changes ] + + * fs: take i_mutex during prepare_binprm for set[ug]id executables + - LP: #1447373 + - CVE-2015-3339 + + -- Luis Henriques Fri, 24 Apr 2015 17:26:50 +0100 + +linux (3.13.0-51.84) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1444141 + * Merged back Ubuntu-3.13.0-49.83 security release + + -- Luis Henriques Tue, 14 Apr 2015 21:38:57 +0100 + +linux (3.13.0-50.82) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1442285 + + [ Andy Whitcroft ] + + * [Config] CONFIG_DEFAULT_MMAP_MIN_ADDR needs to match on armhf and arm64 + - LP: #1418140 + + [ Chris J Arges ] + + * [Config] CONFIG_PCIEASPM_DEBUG=y + - LP: #1398544 + + [ Upstream Kernel Changes ] + + * KEYS: request_key() should reget expired keys rather than give + EKEYEXPIRED + - LP: #1124250 + * audit: correctly record file names with different path name types + - LP: #1439441 + * KVM: x86: Check for nested events if there is an injectable interrupt + - LP: #1413540 + * be2iscsi: fix memory leak in error path + - LP: #1440156 + * block: remove old blk_iopoll_enabled variable + - LP: #1440156 + * be2iscsi: Fix handling timed out MBX completion from FW + - LP: #1440156 + * be2iscsi: Fix doorbell format for EQ/CQ/RQ s per SLI spec. + - LP: #1440156 + * be2iscsi: Fix the session cleanup when reboot/shutdown happens + - LP: #1440156 + * be2iscsi: Fix scsi_cmnd leakage in driver. + - LP: #1440156 + * be2iscsi : Fix DMA Out of SW-IOMMU space error + - LP: #1440156 + * be2iscsi: Fix retrieving MCCQ_WRB in non-embedded Mbox path + - LP: #1440156 + * be2iscsi: Fix exposing Host in sysfs after adapter initialization is + complete + - LP: #1440156 + * be2iscsi: Fix interrupt Coalescing mechanism. + - LP: #1440156 + * be2iscsi: Fix TCP parameters while connection offloading. + - LP: #1440156 + * be2iscsi: Fix memory corruption in MBX path + - LP: #1440156 + * be2iscsi: Fix destroy MCC-CQ before MCC-EQ is destroyed + - LP: #1440156 + * be2iscsi: add an missing goto in error path + - LP: #1440156 + * be2iscsi: remove potential junk pointer free + - LP: #1440156 + * be2iscsi: Fix memory leak in mgmt_set_ip() + - LP: #1440156 + * be2iscsi: Fix the sparse warning introduced in previous submission + - LP: #1440156 + * be2iscsi: Fix updating the boot enteries in sysfs + - LP: #1440156 + * be2iscsi: Fix processing CQE before connection resources are freed + - LP: #1440156 + * be2iscsi : Fix kernel panic during reboot/shutdown + - LP: #1440156 + * fixed invalid assignment of 64bit mask to host dma_boundary for scatter + gather segment boundary limit. + - LP: #1440156 + * quota: Store maximum space limit in bytes + - LP: #1441284 + * ip: zero sockaddr returned on error queue + - LP: #1441284 + * net: rps: fix cpu unplug + - LP: #1441284 + * ipv6: stop sending PTB packets for MTU < 1280 + - LP: #1441284 + * netxen: fix netxen_nic_poll() logic + - LP: #1441284 + * udp_diag: Fix socket skipping within chain + - LP: #1441284 + * ping: Fix race in free in receive path + - LP: #1441284 + * bnx2x: fix napi poll return value for repoll + - LP: #1441284 + * net: don't OOPS on socket aio + - LP: #1441284 + * bridge: dont send notification when skb->len == 0 in rtnl_bridge_notify + - LP: #1441284 + * ipv4: tcp: get rid of ugly unicast_sock + - LP: #1441284 + * ppp: deflate: never return len larger than output buffer + - LP: #1441284 + * net: sctp: fix passing wrong parameter header to param_type2af in + sctp_process_param + - LP: #1441284 + * ARM: pxa: add regulator_has_full_constraints to corgi board file + - LP: #1441284 + * ARM: pxa: add regulator_has_full_constraints to poodle board file + - LP: #1441284 + * ARM: pxa: add regulator_has_full_constraints to spitz board file + - LP: #1441284 + * hx4700: regulator: declare full constraints + - LP: #1441284 + * HID: input: fix confusion on conflicting mappings + - LP: #1441284 + * HID: fixup the conflicting keyboard mappings quirk + - LP: #1441284 + * megaraid_sas: disable interrupt_mask before enabling hardware + interrupts + - LP: #1441284 + * PCI: Generate uppercase hex for modalias var in uevent + - LP: #1441284 + * usb: core: buffer: smallest buffer should start at ARCH_DMA_MINALIGN + - LP: #1441284 + * tty/serial: at91: enable peripheral clock before accessing I/O + registers + - LP: #1441284 + * tty/serial: at91: fix error handling in atmel_serial_probe() + - LP: #1441284 + * axonram: Fix bug in direct_access + - LP: #1441284 + * ksoftirqd: Enable IRQs and call cond_resched() before poking RCU + - LP: #1441284 + * TPM: Add new TPMs to the tail of the list to prevent inadvertent change + of dev + - LP: #1441284 + * char: tpm: Add missing error check for devm_kzalloc + - LP: #1441284 + * tpm_tis: verify interrupt during init + - LP: #1441284 + * tpm: Fix NULL return in tpm_ibmvtpm_get_desired_dma + - LP: #1441284 + * tpm/tpm_i2c_stm_st33: Fix potential bug in tpm_stm_i2c_send + - LP: #1441284 + * tpm/tpm_i2c_stm_st33: Add status check when reading data on the FIFO + - LP: #1441284 + * mmc: sdhci-pxav3: fix unbalanced clock issues during probe + - LP: #1441284 + * iwlwifi: mvm: validate tid and sta_id in ba_notif + - LP: #1441284 + * power: bq24190: Fix ignored supplicants + - LP: #1441284 + * ARM: DRA7: hwmod: Fix boot crash with DEBUG_LL enabled on UART3 + - LP: #1441284 + * Bluetooth: ath3k: Add support of AR3012 bluetooth 13d3:3423 device + - LP: #1411193, #1441284 + * cfq-iosched: fix incorrect filing of rt async cfqq + - LP: #1441284 + * smack: fix possible use after frees in task_security() callers + - LP: #1441284 + * xfs: ensure buffer types are set correctly + - LP: #1441284 + * xfs: inode unlink does not set AGI buffer type + - LP: #1441284 + * xfs: set buf types when converting extent formats + - LP: #1441284 + * xfs: set superblock buffer type correctly + - LP: #1441284 + * btrfs: set proper message level for skinny metadata + - LP: #1441284 + * KVM: s390: base hrtimer on a monotonic clock + - LP: #1441284 + * PCI: Fix infinite loop with ROM image of size 0 + - LP: #1441284 + * USB: cp210x: add ID for RUGGEDCOM USB Serial Console + - LP: #1441284 + * clk: zynq: Force CPU_2X clock to be ungated + - LP: #1441284 + * mmc: sdhci-pxav3: Remove checks for mandatory host clock + - LP: #1441284 + * mmc: sdhci-pxav3: fix race between runtime pm and irq + - LP: #1441284 + * power_supply: 88pm860x: Fix leaked power supply on probe fail + - LP: #1441284 + * staging: comedi: comedi_compat32.c: fix COMEDI_CMD copy back + - LP: #1441284 + * mmc: sdhci-pxav3: fix setting of pdata->clk_delay_cycles + - LP: #1441284 + * ARM: 8284/1: sa1100: clear RCSR_SMR on resume + - LP: #1441284 + * usb: musb: omap2plus bus glue needs USB host support + - LP: #1441284 + * USB: add flag for HCDs that can't receive wakeup requests (isp1760-hcd) + - LP: #1441284 + * USB: fix use-after-free bug in usb_hcd_unlink_urb() + - LP: #1441284 + * iwlwifi: mvm: always use mac color zero + - LP: #1441284 + * iwlwifi: pcie: disable the SCD_BASE_ADDR when we resume from WoWLAN + - LP: #1441284 + * vt: provide notifications on selection changes + - LP: #1441284 + * tty: Prevent untrappable signals from malicious program + - LP: #1441284 + * cpufreq: Set cpufreq_cpu_data to NULL before putting kobject + - LP: #1441284 + * lmedm04: Fix usb_submit_urb BOGUS urb xfer, pipe 1 != type 3 in + interrupt urb + - LP: #1441284 + * mei: mask interrupt set bit on clean reset bit + - LP: #1441284 + * mei: me: release hw from reset only during the reset flow + - LP: #1441284 + * MIPS: KVM: Deliver guest interrupts after local_irq_disable() + - LP: #1441284 + * KVM: MIPS: Don't leak FPU/DSP to guest + - LP: #1441284 + * ALSA: hda - Add the pin fixup for HP Envy TS bass speaker + - LP: #1441284 + * ALSA: hda - Set up GPIO for Toshiba Satellite S50D + - LP: #1441284 + * xen/manage: Fix USB interaction issues when resuming + - LP: #1441284 + * drm/i915: Correct the IOSF Dev_FN field for IOSF transfers + - LP: #1441284 + * cfq-iosched: handle failure of cfq group allocation + - LP: #1441284 + * tracing: Fix unmapping loop in tracing_mark_write + - LP: #1441284 + * fsnotify: fix handling of renames in audit + - LP: #1441284 + * drm/radeon: workaround for CP HW bug on CIK + - LP: #1441284 + * drm/radeon: only enable kv/kb dpm interrupts once v3 + - LP: #1441284 + * NFSv4.1: Fix a kfree() of uninitialised pointers in + decode_cb_sequence_args + - LP: #1441284 + * cpufreq: speedstep-smi: enable interrupts when waiting + - LP: #1441284 + * mm/hugetlb: pmd_huge() returns true for non-present hugepage + - LP: #1441284 + * mm: cleanup follow_page_mask() + - LP: #1441284 + * mm/hugetlb: take page table lock in follow_huge_pmd() + - LP: #1441284 + * mm/hugetlb: fix getting refcount 0 page in hugetlb_fault() + - LP: #1441284 + * mm/hugetlb: add migration/hwpoisoned entry check in + hugetlb_change_protection + - LP: #1441284 + * mm/hugetlb: add migration entry check in __unmap_hugepage_range + - LP: #1441284 + * mm: softdirty: unmapped addresses between VMAs are clean + - LP: #1441284 + * proc/pagemap: walk page tables under pte lock + - LP: #1441284 + * mm: when stealing freepages, also take pages created by splitting buddy + page + - LP: #1441284 + * mm/mmap.c: fix arithmetic overflow in __vm_enough_memory() + - LP: #1441284 + * mm/nommu.c: fix arithmetic overflow in __vm_enough_memory() + - LP: #1441284 + * iscsi-target: Drop problematic active_ts_list usage + - LP: #1441284 + * target: Fix PR_APTPL_BUF_LEN buffer size limitation + - LP: #1441284 + * mm/compaction: fix wrong order check in compact_finished() + - LP: #1441284 + * mm/memory.c: actually remap enough memory + - LP: #1441284 + * mm: hwpoison: drop lru_add_drain_all() in __soft_offline_page() + - LP: #1441284 + * ARC: fix page address calculation if PAGE_OFFSET != LINUX_LINK_BASE + - LP: #1441284 + * drm/radeon/dp: Set EDP_CONFIGURATION_SET for bridge chips if necessary + - LP: #1441284 + * drm/radeon: fix voltage setup on hawaii + - LP: #1441284 + * ALSA: hdspm - Constrain periods to 2 on older cards + - LP: #1441284 + * jffs2: fix handling of corrupted summary length + - LP: #1441284 + * dm mirror: do not degrade the mirror on discard error + - LP: #1441284 + * dm io: reject unsupported DISCARD requests with EOPNOTSUPP + - LP: #1441284 + * target: Add missing WRITE_SAME end-of-device sanity check + - LP: #1441284 + * target: Check for LBA + sectors wrap-around in sbc_parse_cdb + - LP: #1441284 + * Btrfs: fix fsync data loss after adding hard link to inode + - LP: #1441284 + * Added Little Endian support to vtpm module + - LP: #1441284 + * sg: fix read() error reporting + - LP: #1441284 + * IB/qib: Do not write EEPROM + - LP: #1441284 + * md/raid5: Fix livelock when array is both resyncing and degraded. + - LP: #1441284 + * dm: fix a race condition in dm_get_md + - LP: #1441284 + * dm snapshot: fix a possible invalid memory access on unload + - LP: #1441284 + * cpufreq: s3c: remove incorrect __init annotations + - LP: #1441284 + * libceph: assert both regular and lingering lists in __remove_osd() + - LP: #1441284 + * libceph: change from BUG to WARN for __remove_osd() asserts + - LP: #1441284 + * libceph: fix double __remove_osd() problem + - LP: #1441284 + * MIPS: Export FP functions used by lose_fpu(1) for KVM + - LP: #1441284 + * kdb: fix incorrect counts in KDB summary command output + - LP: #1441284 + * blk-throttle: check stats_cpu before reading it from sysfs + - LP: #1441284 + * procfs: fix race between symlink removals and traversals + - LP: #1441284 + * autofs4 copy_dev_ioctl(): keep the value of ->size we'd used for + allocation + - LP: #1441284 + * pktgen: fix UDP checksum computation + - LP: #1441284 + * ipv6: fix ipv6_cow_metrics for non DST_HOST case + - LP: #1441284 + * clk-gate: fix bit # check in clk_register_gate() + - LP: #1441284 + * ALSA: off by one bug in snd_riptide_joystick_probe() + - LP: #1441284 + * ath5k: fix spontaneus AR5312 freezes + - LP: #1441284 + * pinctrl: pinctrl-imx: don't use invalid value of conf_reg + - LP: #1441284 + * ALSA: hda - Add one more node in the EAPD supporting candidate list + - LP: #1436745, #1441284 + * ALSA: hda - Add pin configs for ASUS mobo with IDT 92HD73XX codec + - LP: #1441284 + * drm/i915/bdw: PCI IDs ending in 0xb are ULT. + - LP: #1441284 + * xfs: Fix quota type in quota structures when reusing quota file + - LP: #1441284 + * gpiolib: of: allow of_gpiochip_find_and_xlate to find more than one + chip per node + - LP: #1441284 + * gpio: tps65912: fix wrong container_of arguments + - LP: #1441284 + * ALSA: pcm: Don't leave PREPARED state after draining + - LP: #1441284 + * metag: Fix KSTK_EIP() and KSTK_ESP() macros + - LP: #1441284 + * md/raid1: fix read balance when a drive is write-mostly. + - LP: #1441284 + * drm/radeon: use drm_mode_vrefresh() rather than mode->vrefresh + - LP: #1441284 + * drm/radeon: fix 1 RB harvest config setup for TN/RL + - LP: #1441284 + * arm64: compat Fix siginfo_t -> compat_siginfo_t conversion on big + endian + - LP: #1441284 + * nilfs2: fix potential memory overrun on inode + - LP: #1441284 + * HID: i2c-hid: Limit reads to wMaxInputLength bytes for input events + - LP: #1441284 + * Linux 3.13.11-ckt18 + - LP: #1441284 + * ipv6: Don't reduce hop limit for an interface + - LP: #1441103 + - CVE-2015-2922 + * x86/microcode/intel: Guard against stack overflow in the loader + - LP: #1438504 + - CVE-2015-2666 + + -- Brad Figg Thu, 09 Apr 2015 10:06:28 -0700 + +linux (3.13.0-49.83) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * powerpc/perf: Cap 64bit userspace backtraces to PERF_MAX_STACK_DEPTH + - LP: #1442180 + + -- Luis Henriques Fri, 10 Apr 2015 18:46:02 +0100 + +linux (3.13.0-49.81) trusty; urgency=low + + [ Kamal Mostafa ] + + * Release Tracking Bug + - LP: #1436016 + + [ Alex Hung ] + + * SAUCE: ACPI / blacklist: blacklist Win8 OSI for HP Pavilion dv6 + - LP: #1416940 + + [ Andy Whitcroft ] + + * [Packaging] generate live watchdog blacklists + - LP: #1432837 + + [ Ben Widawsky ] + + * SAUCE: i915_bdw: drm/i915/bdw: enable eDRAM. + - LP: #1430855 + + [ Chris J Arges ] + + * [Config] Add ibmvfc to d-i + - LP: #1416001 + + [ Seth Forshee ] + + * [Config] updateconfigs - enable X86_UP_APIC_MSI + + [ Upstream Kernel Changes ] + + * net: add sysfs helpers for netdev_adjacent logic + - LP: #1410852 + * net: Mark functions as static in core/dev.c + - LP: #1410852 + * net: rename sysfs symlinks on device name change + - LP: #1410852 + * btrfs: fix null pointer dereference in clone_fs_devices when name is + null + - LP: #1429804 + * cdc-acm: add sanity checks + - LP: #1413992 + * x86: thinkpad_acpi.c: fixed spacing coding style issue + - LP: #1417915 + * thinkpad_acpi: support new BIOS version string pattern + - LP: #1417915 + * net: sctp: fix slab corruption from use after free on INIT collisions + - LP: #1416506 + - CVE-2015-1421 + * ipv4: try to cache dst_entries which would cause a redirect + - LP: #1420027 + - CVE-2015-1465 + * x86, mm/ASLR: Fix stack randomization on 64-bit systems + - LP: #1423757 + - CVE-2015-1593 + * net: llc: use correct size for sysctl timeout entries + - LP: #1425271 + - CVE-2015-2041 + * net: rds: use correct size for max unacked packets and bytes + - LP: #1425274 + - CVE-2015-2042 + * Btrfs: clear compress-force when remounting with compress option + - LP: #1434183 + * ext4: merge uninitialized extents + - LP: #1430184 + * btrfs: filter invalid arg for btrfs resize + - LP: #1435441 + * Bluetooth: Add firmware update for Atheros 0cf3:311f + * Bluetooth: btusb: Add IMC Networks (Broadcom based) + * Bluetooth: sort the list of IDs in the source code + * Bluetooth: append new supported device to the list [0b05:17d0] + * Bluetooth: Add support for Intel bootloader devices + * Bluetooth: Ignore isochronous endpoints for Intel USB bootloader + * Bluetooth: Add support for Acer [13D3:3432] + * Bluetooth: Add support for Broadcom device of Asus Z97-DELUXE + motherboard + * Add a new PID/VID 0227/0930 for AR3012. + * Bluetooth: Add support for Acer [0489:e078] + * Bluetooth: Add USB device 04ca:3010 as Atheros AR3012 + * x86: mm: move mmap_sem unlock from mm_fault_error() to caller + * vm: add VM_FAULT_SIGSEGV handling support + * vm: make stack guard page errors return VM_FAULT_SIGSEGV rather than + SIGBUS + * spi/pxa2xx: Clear cur_chip pointer before starting next message + * spi: dw: Fix detecting FIFO depth + * spi: dw-mid: fix FIFO size + * ASoC: wm8960: Fix capture sample rate from 11250 to 11025 + * regulator: core: fix race condition in regulator_put() + * ASoC: omap-mcbsp: Correct CBM_CFS dai format configuration + * can: c_can: end pending transmission on network stop (ifdown) + * nfs: fix dio deadlock when O_DIRECT flag is flipped + * NFSv4.1: Fix an Oops in nfs41_walk_client_list + * Input: i8042 - add noloop quirk for Medion Akoya E7225 (MD98857) + * mac80211: properly set CCK flag in radiotap + * nl80211: fix per-station group key get/del and memory leak + * i2c: s3c2410: fix ABBA deadlock by keeping clock prepared + * usb-storage/SCSI: blacklist FUA on JMicron 152d:2566 USB-SATA + controller + * drm/i915: Only fence tiled region of object. + * drm/i915: Fix and clean BDW PCH identification + * drm/i915: BDW Fix Halo PCI IDs marked as ULT. + * ALSA: seq-dummy: remove deadlock-causing events on close + * drivers/rtc/rtc-s5m.c: terminate s5m_rtc_id array with empty element + * drivers: net: cpsw: discard dual emac default vlan configuration + * can: kvaser_usb: Do not sleep in atomic context + * can: kvaser_usb: Send correct context to URB completion + * can: kvaser_usb: Retry the first bulk transfer on -ETIMEDOUT + * can: kvaser_usb: Fix state handling upon BUS_ERROR events + * quota: Switch ->get_dqblk() and ->set_dqblk() to use bytes as space + units + * rbd: fix rbd_dev_parent_get() when parent_overlap == 0 + * rbd: drop parent_ref in rbd_dev_unprobe() unconditionally + * dm cache: fix missing ERR_PTR returns and handling + * dm thin: don't allow messages to be sent to a pool target in READ_ONLY + or FAIL mode + * net: cls_bpf: fix size mismatch on filter preparation + * net: cls_bpf: fix auto generation of per list handles + * ipv6: replacing a rt6_info needs to purge possible propagated rt6_infos + too + * perf: Tighten (and fix) the grouping condition + * arc: mm: Fix build failure + * MIPS: IRQ: Fix disable_irq on CPU IRQs + * Complete oplock break jobs before closing file handle + * smpboot: Add missing get_online_cpus() in + smpboot_register_percpu_thread() + * ASoC: atmel_ssc_dai: fix start event for I2S mode + * spi: fsl-dspi: Fix memory leak + * spi: spi-fsl-dspi: Remove usage of devm_kzalloc + * ALSA: ak411x: Fix stall in work callback + * lib/checksum.c: fix carry in csum_tcpudp_nofold + * MIPS: Fix kernel lockup or crash after CPU offline/online + * gpio: sysfs: fix memory leak in gpiod_export_link + * gpio: sysfs: fix memory leak in gpiod_sysfs_set_active_low + * PCI: Add NEC variants to Stratus ftServer PCIe DMI check + * ASoC: sgtl5000: add delay before first I2C access + * PCI: Handle read-only BARs on AMD CS553x devices + * mm: pagewalk: call pte_hole() for VM_PFNMAP during walk_page_range + * nilfs2: fix deadlock of segment constructor over I_SYNC flag + * tcp: ipv4: initialize unicast_sock sk_pacing_rate + * caif: remove wrong dev_net_set() call + * qlge: Fix qlge_update_hw_vlan_features to handle if interface is down + * ip6_gre: fix endianness errors in ip6gre_err + * spi: dw: revisit FIFO size detection again + * Linux 3.13.11-ckt17 + + -- Kamal Mostafa Tue, 24 Mar 2015 11:58:44 -0700 + +linux (3.13.0-48.80) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1431263 + * Merged back all changes that were in Ubuntu-3.13.0-47.78 + + [ Upstream Kernel Changes ] + + * xfs: remote attribute overwrite causes transaction overrun + - LP: #1429821 + - CVE-2015-0274 + + -- Luis Henriques Thu, 12 Mar 2015 10:21:27 +0000 + +linux (3.13.0-47.78) trusty; urgency=low + + [ Seth Forshee ] + + * Release Tracking Bug + - LP: #1427733 + + [ Rodrigo Vivi ] + + * SAUCE: drm/i915: Fix and clean BDW PCH identification + - LP: #1423292 + * SAUCE: drm/i915: BDW Fix Halo PCI IDs marked as ULT. + - LP: #1423292 + + [ Upstream Kernel Changes ] + + * ext4: prevent bugon on race between write/fcntl + * Bluetooth: ath3k: workaround the compatibility issue with xHCI + controller + - LP: #1400215 + * openvswitch: Silence RCU lockdep checks from flow lookup. + - LP: #1408972 + * openvswitch: Use exact lookup for flow_get and flow_del. + - LP: #1408972 + * splice: Apply generic position and size checks to each write + - LP: #1416498 + - CVE-2014-7822 + * ALSA: hda - enable mute led quirk for one more hp machine. + - LP: #1410704 + * crypto: prefix module autoloading with "crypto-" + - LP: #1427438 + * crypto: add missing crypto module aliases + - LP: #1427438 + * crypto: include crypto- module prefix in template + - LP: #1427438 + * crypto: crc32c - add missing crypto module alias + - LP: #1427438 + * drm/i915: Invalidate media caches on gen7 + - LP: #1427438 + * drm/i915: Force the CS stall for invalidate flushes + - LP: #1427438 + * audit: restore AUDIT_LOGINUID unset ABI + - LP: #1427438 + * parisc: fix out-of-register compiler error in ldcw inline assembler + function + - LP: #1427438 + * kvm: x86: drop severity of "generation wraparound" message + - LP: #1427438 + * udf: Verify i_size when loading inode + - LP: #1427438 + * udf: Verify symlink size before loading it + - LP: #1427438 + * udf: Check path length when reading symlink + - LP: #1427438 + * udf: Check component length before reading it + - LP: #1427438 + * crypto: af_alg - fix backlog handling + - LP: #1427438 + * ASoC: dwc: Ensure FIFOs are flushed to prevent channel swap + - LP: #1427438 + * video/logo: prevent use of logos after they have been freed + - LP: #1427438 + * video/fbdev: fix defio's fsync + - LP: #1427438 + * Add USB_EHCI_EXYNOS to multi_v7_defconfig + - LP: #1427438 + * drm/i915: Swap primary planes on gen2 for FBC + - LP: #1427438 + * drm/i915: Don't swap planes on 830M + - LP: #1427438 + * drm/i915: Don't call intel_prepare_page_flip() multiple times on gen2-4 + - LP: #1427438 + * x86, vdso: Use asm volatile in __getcpu + - LP: #1427438 + * drivers: net: cpsw: enable interrupts after napi enable and clearing + previous interrupts + - LP: #1427438 + * net: ethernet: cpsw: fix hangs with interrupts + - LP: #1427438 + * ALSA: hda - Fix wrong gpio_dir & gpio_mask hint setups for IDT/STAC + codecs + - LP: #1427438 + * drm/radeon: KV has three PPLLs (v2) + - LP: #1427438 + * drm/radeon: properly filter DP1.2 4k modes on non-DP1.2 hw + - LP: #1427438 + * virtio_pci: defer kfree until release callback + - LP: #1427438 + * virtio_pci: document why we defer kfree + - LP: #1427438 + * mm: propagate error from stack expansion even for guard page + - LP: #1427438 + * ALSA: hda - Add new GPU codec ID to snd-hda + - LP: #1427438 + * ALSA: hda - Add new GPU codec ID 0x10de0070 to snd-hda + - LP: #1427438 + * ALSA: hda - Add new GPU codec ID 0x10de0072 to snd-hda + - LP: #1427438 + * vfio-pci: Fix the check on pci device type in vfio_pci_probe() + - LP: #1427438 + * mm: prevent endless growth of anon_vma hierarchy + - LP: #1427438 + * mm: protect set_page_dirty() from ongoing truncation + - LP: #1427438 + * mm, vmscan: prevent kswapd livelock due to pfmemalloc-throttled process + being killed + - LP: #1427438 + * HID: roccat: potential out of bounds in pyra_sysfs_write_settings() + - LP: #1427438 + * mm: Don't count the stack guard page towards RLIMIT_STACK + - LP: #1427438 + * mm: fix corner case in anon_vma endless growing prevention + - LP: #1427438 + * usb: musb: stuff leak of struct usb_hcd + - LP: #1427438 + * usb: gadget: udc: atmel: change setting for DMA + - LP: #1427438 + * usb: gadget: udc: atmel: fix possible IN hang issue + - LP: #1427438 + * ARM: clk-imx6q: fix video divider for rev T0 1.0 + - LP: #1427438 + * ARM: dts: imx25: Fix the SPI1 clocks + - LP: #1427438 + * USB: cp210x: fix ID for production CEL MeshConnect USB Stick + - LP: #1427438 + * USB: keyspan: fix null-deref at probe + - LP: #1427438 + * ARM: omap5/dra7xx: Fix frequency typos + - LP: #1427438 + * LOCKD: Fix a race when initialising nlmsvc_timeout + - LP: #1427438 + * NFSv4.1: Fix client id trunking on Linux + - LP: #1427438 + * USB: cp210x: add IDs for CEL USB sticks and MeshWorks devices + - LP: #1427438 + * USB: qcserial/option: make AT URCs work for Sierra Wireless MC73xx + - LP: #1427438 + * USB: EHCI: fix initialization bug in iso_stream_schedule() + - LP: #1427438 + * OHCI: add a quirk for ULi M5237 blocking on reset + - LP: #1427438 + * mei: clean reset bit before reset + - LP: #1427438 + * target: Drop arbitrary maximum I/O size limit + - LP: #1427438 + * usb: gadget: udc: atmel: fix possible oops when unloading module + - LP: #1427438 + * USB: console: fix uninitialised ldisc semaphore + - LP: #1427438 + * USB: console: fix potential use after free + - LP: #1427438 + * mmc: sdhci: Fix sleep in atomic after inserting SD card + - LP: #1427438 + * usb: dwc3: gadget: Fix TRB preparation during SG + - LP: #1427438 + * usb: dwc3: gadget: Stop TRB preparation after limit is reached + - LP: #1427438 + * ftrace/jprobes/x86: Fix conflict between jprobes and function graph + tracing + - LP: #1427438 + * clocksource: exynos_mct: Fix bitmask regression for exynos4_mct_write + - LP: #1427438 + * time: settimeofday: Validate the values of tv from user + - LP: #1427438 + * Input: i8042 - reset keyboard to fix Elantech touchpad detection + - LP: #1427438 + * drm/radeon: fix VM flush on cayman/aruba (v3) + - LP: #1427438 + * drm/radeon: fix VM flush on SI (v3) + - LP: #1427438 + * drm/radeon: fix VM flush on CIK (v3) + - LP: #1427438 + * drm/radeon: add a dpm quirk list + - LP: #1427438 + * Input: elantech - support new ICs types for version 4 + - LP: #1427438 + * Input: I8042 - add Acer Aspire 7738 to the nomux list + - LP: #1427438 + * drm/i915: Fix mutex->owner inspection race under DEBUG_MUTEXES + - LP: #1427438 + * drm/radeon: add si dpm quirk list + - LP: #1427438 + * pinctrl: Fix two deadlocks + - LP: #1427438 + * gpio / ACPI: register to ACPI events automatically + - LP: #1427438 + * gpio: fix memory and reference leaks in gpiochip_add error path + - LP: #1427438 + * gpio: fix sleep-while-atomic in gpiochip_remove + - LP: #1427438 + * can: dev: fix crtlmode_supported check + - LP: #1427438 + * can: kvaser_usb: Don't free packets when tight on URBs + - LP: #1427438 + * can: kvaser_usb: Reset all URB tx contexts upon channel close + - LP: #1427438 + * can: kvaser_usb: Don't send a RESET_CHIP for non-existing channels + - LP: #1427438 + * gpio: sysfs: fix gpio-chip device-attribute leak + - LP: #1427438 + * gpio: sysfs: fix gpio device-attribute leak + - LP: #1427438 + * gpiolib: of: Correct error handling in of_get_named_gpiod_flags + - LP: #1427438 + * ALSA: usb-audio: Add mic volume fix quirk for Logitech Webcam C210 + - LP: #1427438 + * fix deadlock in cifs_ioctl_clone() + - LP: #1427438 + * ipr: wait for aborted command responses + - LP: #1427438 + * libata: allow sata_sil24 to opt-out of tag ordered submission + - LP: #1427438 + * scripts/recordmcount.pl: There is no -m32 gcc option on Super-H anymore + - LP: #1427438 + * libata: prevent HSM state change race between ISR and PIO + - LP: #1427438 + * bus: mvebu-mbus: fix support of MBus window 13 + - LP: #1427438 + * ARM: dts: imx25: Fix PWM "per" clocks + - LP: #1427438 + * x86, boot: Skip relocs when load address unchanged + - LP: #1427438 + * x86, hyperv: Mark the Hyper-V clocksource as being continuous + - LP: #1427438 + * x86, tls, ldt: Stop checking lm in LDT_empty + - LP: #1427438 + * x86, tls: Interpret an all-zero struct user_desc as "no segment" + - LP: #1427438 + * x86/apic: Re-enable PCI_MSI support for non-SMP X86_32 + - LP: #1427438 + * x86/tsc: Change Fast TSC calibration failed from error to info + - LP: #1427438 + * dm cache: share cache-metadata object across inactive and active DM + tables + - LP: #1427438 + * dm cache: fix problematic dual use of a single migration count variable + - LP: #1427438 + * time: adjtimex: Validate the ADJ_FREQUENCY values + - LP: #1427438 + * ntp: Fixup adjtimex freq validation on 32-bit systems + - LP: #1427438 + * Linux 3.13.11-ckt16 + - LP: #1427438 + + -- Seth Forshee Tue, 03 Mar 2015 09:33:08 -0600 + +linux (3.13.0-46.79) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * IB/core: Prevent integer overflow in ib_umem_get address arithmetic + - LP: #1413741 + - CVE-2015-8159 + + -- Luis Henriques Mon, 09 Mar 2015 17:42:23 +0000 + +linux (3.13.0-46.77) trusty; urgency=low + + [ Seth Forshee ] + + * Revert "ipv6: fix swapped ipv4/ipv6 mtu_reduced callbacks" + - LP: #1404558 + * Release Tracking Bug + - LP: #1427292 + + -- Seth Forshee Mon, 02 Mar 2015 11:33:20 -0600 + +linux (3.13.0-46.76) trusty; urgency=low + + [ Kamal Mostafa ] + + * Release Tracking Bug + - LP: #1426060 + + [ Upstream Kernel Changes ] + + * clocksource: arch_timer: Only use the virtual counter (CNTVCT) on arm64 + - LP: #1426043 + + -- Kamal Mostafa Thu, 26 Feb 2015 09:53:44 -0800 + +linux (3.13.0-46.75) trusty; urgency=low + + [ Seth Forshee ] + + * Release Tracking Bug + - LP: #1419963 + + [ Andy Whitcroft ] + + * [Debian] arm64 -- build ubuntu drivers + - LP: #1411284 + * hyper-v -- fix comment handing in /etc/network/interfaces + - LP: #1413020 + + [ Kamal Mostafa ] + + * [Packaging] force "dpkg-source -I -i" behavior + + [ Upstream Kernel Changes ] + + * Revert "[SCSI] mpt2sas: Remove phys on topology change." + - LP: #1419838 + * Revert "[SCSI] mpt3sas: Remove phys on topology change" + - LP: #1419838 + * Btrfs: fix transaction abortion when remounting btrfs from RW to RO + - LP: #1411320 + * Btrfs: fix a crash of clone with inline extents's split + - LP: #1413129 + * net/mlx4_en: Add VXLAN ndo calls to the PF net device ops too + - LP: #1407760 + * KVM: x86: SYSENTER emulation is broken + - LP: #1414651 + - CVE-2015-0239 + * powerpc/xmon: Fix another endiannes issue in RTAS call from xmon + - LP: #1415919 + * ipv6: fix swapped ipv4/ipv6 mtu_reduced callbacks + - LP: #1404558, #1419837 + * usb: gadget: at91_udc: move prepare clk into process context + - LP: #1419837 + * KVM: x86: Fix far-jump to non-canonical check + - LP: #1419837 + * x86/tls: Validate TLS entries to protect espfix + - LP: #1419837 + * userns: Check euid no fsuid when establishing an unprivileged uid + mapping + - LP: #1419837 + * userns: Document what the invariant required for safe unprivileged + mappings. + - LP: #1419837 + * userns: Only allow the creator of the userns unprivileged mappings + - LP: #1419837 + * x86_64, switch_to(): Load TLS descriptors before switching DS and ES + - LP: #1419837 + * isofs: Fix infinite looping over CE entries + - LP: #1419837 + * batman-adv: Calculate extra tail size based on queued fragments + - LP: #1419837 + * KEYS: close race between key lookup and freeing + - LP: #1419837 + * isofs: Fix unchecked printing of ER records + - LP: #1419837 + * x86_64, vdso: Fix the vdso address randomization algorithm + - LP: #1419837 + * groups: Consolidate the setgroups permission checks + - LP: #1419837 + * userns: Don't allow setgroups until a gid mapping has been setablished + - LP: #1419837 + * userns: Don't allow unprivileged creation of gid mappings + - LP: #1419837 + * move d_rcu from overlapping d_child to overlapping d_alias + - LP: #1419837 + * deal with deadlock in d_walk() + - LP: #1419837 + * Linux 3.13.11-ckt14 + - LP: #1419837 + * gre: fix the inner mac header in nbma tunnel xmit path + - LP: #1419838 + * netlink: Always copy on mmap TX. + - LP: #1419838 + * netlink: Don't reorder loads/stores before marking mmap netlink frame + as available + - LP: #1419838 + * in6: fix conflict with glibc + - LP: #1419838 + * tg3: tg3_disable_ints using uninitialized mailbox value to disable + interrupts + - LP: #1419838 + * batman-adv: Unify fragment size calculation + - LP: #1419838 + * batman-adv: avoid NULL dereferences and fix if check + - LP: #1419838 + * net: Fix stacked vlan offload features computation + - LP: #1419838 + * net: Reset secmark when scrubbing packet + - LP: #1419838 + * tcp: Do not apply TSO segment limit to non-TSO packets + - LP: #1419838 + * alx: fix alx_poll() + - LP: #1419838 + * team: avoid possible underflow of count_pending value for notify_peers + and mcast_rejoin + - LP: #1419838 + * enic: fix rx skb checksum + - LP: #1419838 + * net/core: Handle csum for CHECKSUM_COMPLETE VXLAN forwarding + - LP: #1419838 + * macvlan: unregister net device when netdev_upper_dev_link() fails + - LP: #1419838 + * netfilter: conntrack: disable generic tracking for known protocols + - LP: #1419838 + * xen-netfront: Fix handling packets on compound pages with skb_linearize + - LP: #1317811, #1419838 + * xen-netfront: use correct linear area after linearizing an skb + - LP: #1317811, #1419838 + * eCryptfs: Force RO mount when encrypted view is enabled + - LP: #1419838 + * smiapp: Take mutex during PLL update in sensor initialisation + - LP: #1419838 + * smiapp-pll: Correct clock debug prints + - LP: #1419838 + * sound: simplify au0828 quirk table + - LP: #1419838 + * sound: Update au0828 quirks table + - LP: #1419838 + * af9005: fix kernel panic on init if compiled without IR + - LP: #1419838 + * writeback: fix a subtle race condition in I_DIRTY clearing + - LP: #1419838 + * usb: renesas_usbhs: gadget: fix NULL pointer dereference in + ep_disable() + - LP: #1419838 + * KVM: s390: flush CPU on load control + - LP: #1419838 + * UBI: Fix double free after do_sync_erase() + - LP: #1419838 + * UBI: Fix invalid vfree() + - LP: #1419838 + * Drivers: hv: vmbus: Fix a race condition when unregistering a device + - LP: #1419838 + * driver core: Fix unbalanced device reference in drivers_probe + - LP: #1419838 + * PCI: Restore detection of read-only BARs + - LP: #1419838 + * scsi: correct return values for .eh_abort_handler implementations + - LP: #1419838 + * drm/radeon: fix typo in CI dpm disable + - LP: #1419838 + * ARM: tegra: Re-add removed SoC id macro to tegra_resume() + - LP: #1419838 + * arm64: Add COMPAT_HWCAP_LPAE + - LP: #1419838 + * genhd: check for int overflow in disk_expand_part_tbl() + - LP: #1419838 + * ftrace/x86: Add frames pointers to trampoline as necessary + - LP: #1419838 + * drm/ttm: Avoid memory allocation from shrinker functions. + - LP: #1419838 + * ASoC: sigmadsp: Refuse to load firmware files with a non-supported + version + - LP: #1419838 + * drm/radeon: work around a hw bug in MGCG on CIK + - LP: #1419838 + * Btrfs: make sure we wait on logged extents when fsycning two subvols + - LP: #1419838 + * Btrfs: do not move em to modified list when unpinning + - LP: #1419838 + * megaraid_sas: corrected return of wait_event from abort frame path + - LP: #1419838 + * ASoC: max98090: Fix ill-defined sidetone route + - LP: #1419838 + * blk-mq: use 'nr_cpu_ids' as highest CPU ID count for hwq <-> cpu map + - LP: #1419838 + * nfs41: fix nfs4_proc_layoutget error handling + - LP: #1419838 + * cdc-acm: memory leak in error case + - LP: #1419838 + * USB: cdc-acm: check for valid interfaces + - LP: #1419838 + * x86/asm/traps: Disable tracing and kprobes in fixup_bad_iret and + sync_regs + - LP: #1419838 + * uvcvideo: Fix destruction order in uvc_delete() + - LP: #1419838 + * HID: i2c-hid: fix race condition reading reports + - LP: #1419838 + * mfd: tc6393xb: Fail ohci suspend if full state restore is required + - LP: #1419838 + * serial: samsung: wait for transfer completion before clock disable + - LP: #1419838 + * mmc: dw_mmc: avoid write to CDTHRCTL on older versions + - LP: #1419838 + * Bluetooth: ath3k: Add support of MCI 13d3:3408 bt device + - LP: #1419838 + * eCryptfs: Remove buggy and unnecessary write in file name decode + routine + - LP: #1419838 + * n_tty: Fix read_buf race condition, increment read_head after pushing + data + - LP: #1419838 + * dm cache: only use overwrite optimisation for promotion when in + writeback mode + - LP: #1419838 + * dm cache: dirty flag was mistakenly being cleared when promoting via + overwrite + - LP: #1419838 + * dm bufio: fix memleak when using a dm_buffer's inline bio + - LP: #1419838 + * ath9k_hw: fix hardware queue allocation + - LP: #1419838 + * ath9k: fix BE/BK queue order + - LP: #1419838 + * ath5k: fix hardware queue index assignment + - LP: #1419838 + * tcm_loop: Fix wrong I_T nexus association + - LP: #1419838 + * iwlwifi: dvm: fix flush support for old firmware + - LP: #1419838 + * iommu/vt-d: Fix an off-by-one bug in __domain_mapping() + - LP: #1419838 + * dm crypt: use memzero_explicit for on-stack buffer + - LP: #1419838 + * mnt: Implicitly add MNT_NODEV on remount when it was implicitly added + by mount + - LP: #1419838 + * mnt: Update unprivileged remount test + - LP: #1419838 + * umount: Disallow unprivileged mount force + - LP: #1419838 + * md/raid56: Don't perform reads to support writes until stripe is ready. + - LP: #1419838 + * md/raid5: avoid livelock caused by non-aligned writes. + - LP: #1419838 + * md/raid5: fetch_block must fetch all the blocks handle_stripe_dirtying + wants. + - LP: #1419838 + * drm/i915: Disallow pin ioctl completely for kms drivers + - LP: #1419838 + * drm/vmwgfx: Don't use memory accounting for kernel-side fence objects + - LP: #1419838 + * drm/vmwgfx: Fix fence event code + - LP: #1419838 + * hp_accel: Add support for HP ZBook 15 + - LP: #1419838 + * drm/radeon: check the right ring in radeon_evict_flags() + - LP: #1419838 + * swiotlb-xen: pass dev_addr to xen_dma_unmap_page and + xen_dma_sync_single_for_cpu + - LP: #1419838 + * swiotlb-xen: call xen_dma_sync_single_for_device when appropriate + - LP: #1419838 + * clocksource: arch_timer: Fix code to use physical timers when requested + - LP: #1419838 + * ALSA: hda - Fix built-in mic at resume on Lenovo Ideapad S210 + - LP: #1419838 + * can: peak_usb: fix memset() usage + - LP: #1419838 + * can: peak_usb: fix cleanup sequence order in case of error during init + - LP: #1419838 + * ALSA: usb-audio: Don't resubmit pending URBs at MIDI error recovery + - LP: #1419838 + * KEYS: Fix stale key registration at error path + - LP: #1419838 + * thermal: Fix error path in thermal_init() + - LP: #1419838 + * blk-mq: Fix a use-after-free + - LP: #1419838 + * fs: nfsd: Fix signedness bug in compare_blob + - LP: #1419838 + * nfsd4: fix xdr4 inclusion of escaped char + - LP: #1419838 + * userns: Rename id_map_mutex to userns_state_mutex + - LP: #1419838 + * drm/i915: Don't complain about stolen conflicts on gen3 + - LP: #1419838 + * ALSA: hda - Add EAPD fixup for ASUS Z99He laptop + - LP: #1419838 + * Btrfs: fix fs corruption on transaction abort if device supports + discard + - LP: #1419838 + * ncpfs: return proper error from NCP_IOC_SETROOT ioctl + - LP: #1419838 + * drivers/rtc/rtc-sirfsoc.c: move hardware initilization earlier in probe + - LP: #1419838 + * rtc: omap: fix missing wakealarm attribute + - LP: #1419838 + * exit: pidns: alloc_pid() leaks pid_namespace if child_reaper is exiting + - LP: #1419838 + * perf/x86/intel/uncore: Make sure only uncore events are collected + - LP: #1419838 + * perf: Fix events installation during moving group + - LP: #1419838 + * KVM: nVMX: Disable unrestricted mode if ept=0 + - LP: #1419838 + * drm/i915: save/restore GMBUS freq across suspend/resume on gen4 + - LP: #1419838 + * pstore-ram: Fix hangs by using write-combine mappings + - LP: #1419838 + * pstore-ram: Allow optional mapping with pgprot_noncached + - LP: #1419838 + * userns: Add a knob to disable setgroups on a per user namespace basis + - LP: #1419838 + * userns: Allow setting gid_maps without privilege when setgroups is + disabled + - LP: #1419838 + * userns: Unbreak the unprivileged remount tests + - LP: #1419838 + * HID: i2c-hid: prevent buffer overflow in early IRQ + - LP: #1419838 + * mac80211: fix multicast LED blinking and counter + - LP: #1419838 + * cfg80211: avoid mem leak on driver hint set + - LP: #1419838 + * mtd: tests: abort torturetest on erase errors + - LP: #1419838 + * tracing/sched: Check preempt_count() for current when reading + task->state + - LP: #1419838 + * iscsi,iser-target: Initiate termination only once + - LP: #1419838 + * iser-target: Fix flush + disconnect completion handling + - LP: #1419838 + * iser-target: Parallelize CM connection establishment + - LP: #1419838 + * iser-target: Fix connected_handler + teardown flow race + - LP: #1419838 + * iser-target: Handle ADDR_CHANGE event for listener cm_id + - LP: #1419838 + * iser-target: Fix implicit termination of connections + - LP: #1419838 + * genirq: Prevent proc race against freeing of irq descriptors + - LP: #1419838 + * x86/tls: Disallow unusual TLS segments + - LP: #1419838 + * powerpc/powernv: Switch off MMU before entering nap/sleep/rvwinkle mode + - LP: #1419838 + * ARC: [nsimosci] move peripherals to match model to FPGA + - LP: #1419838 + * scsi: blacklist RSOC for Microsoft iSCSI target devices + - LP: #1419838 + * rtlwifi: rtl8192ce: Set fw_ready flag + - LP: #1419838 + * iscsi-target: Fail connection on short sendmsg writes + - LP: #1419838 + * mac80211: free management frame keys when removing station + - LP: #1419838 + * ceph: do_sync is never initialized + - LP: #1419838 + * x86/tls: Don't validate lm in set_thread_area() after all + - LP: #1419838 + * ALSA: usb-audio: extend KEF X300A FU 10 tweak to Arcam rPAC + - LP: #1419838 + * mnt: Fix a memory stomp in umount + - LP: #1419838 + * ocfs2: fix journal commit deadlock + - LP: #1419838 + * tick/powerclamp: Remove tick_nohz_idle abuse + - LP: #1419838 + * Linux 3.13.11-ckt15 + - LP: #1419838 + + [ Xiong Zhang ] + + * SAUCE: ubuntu/i915: power on sink if dpcd read fail + - LP: #1416451 + + -- Seth Forshee Mon, 09 Feb 2015 14:08:50 -0600 + +linux (3.13.0-45.74) trusty; urgency=low + + [ Seth Forshee ] + + * Release Tracking Bug + - LP: #1410384 + + [ Jesse Barnes ] + + * SAUCE: drm/i915/vlv: assert and de-assert sideband reset at boot and + resume v3 + - LP: #1401963 + + [ K. Y. Srinivasan ] + + * SAUCE: storvsc: force SPC-3 compliance on win8 and win8 r2 hosts + - LP: #1406867 + + [ Timo Aaltonen ] + + * SAUCE: Switch VLV/BYT to use i915_bdw. + - LP: #1401963 + + [ Upstream Kernel Changes ] + + * Revert "xhci: clear root port wake on bits if controller isn't wake-up + capable" + - LP: #1408779 + * KVM: PPC: BOOK3S: HV: CMA: Reserve cma region only in hypervisor mode + - LP: #1400209 + * e1000e: Fix no connectivity when driver loaded with cable out + - LP: #1400365 + * net/mlx4_core: Enable CQE/EQE stride support + - LP: #1400127 + * net/mlx4_core: Cache line EQE size support + - LP: #1400127 + * net/mlx4_en: Add mlx4_en_get_cqe helper + - LP: #1400127 + * net/mlx4_core: Introduce mlx4_get_module_info for cable module info + reading + - LP: #1400127 + * ethtool, net/mlx4_en: Cable info, get_module_info/eeprom ethtool + support + - LP: #1400127 + * net/mlx4_core: Introduce ACCESS_REG CMD and eth_prot_ctrl dev cap + - LP: #1400127 + * net/mlx4_core: Add ethernet backplane autoneg device capability + - LP: #1400127 + * ethtool, net/mlx4_en: Add 100M, 20G, 56G speeds ethtool reporting + support + - LP: #1400127 + * net/mlx4_en: Use PTYS register to query ethtool settings + - LP: #1400127 + * net/mlx4_en: Use PTYS register to set ethtool settings (Speed) + - LP: #1400127 + * net/mlx4_en: Add support for setting rxvlan offload OFF/ON + - LP: #1400127 + * net/mlx4_en: Add ethtool support for [rx|tx]vlan offload set to OFF/ON + - LP: #1400127 + * net/mlx4_core: Prevent VF from changing port configuration + - LP: #1400127 + * net/mlx4_en: mlx4_en_set_settings() always fails when autoneg is set + - LP: #1400127 + * ipv4: fix nexthop attlen check in fib_nh_match + - LP: #1408779 + * vxlan: fix a use after free in vxlan_encap_bypass + - LP: #1408779 + * vxlan: using pskb_may_pull as early as possible + - LP: #1408779 + * vxlan: fix a free after use + - LP: #1408779 + * ipv4: fix a potential use after free in ip_tunnel_core.c + - LP: #1408779 + * ax88179_178a: fix bonding failure + - LP: #1408779 + * tcp: md5: do not use alloc_percpu() + - LP: #1408779 + * ipv4: dst_entry leak in ip_send_unicast_reply() + - LP: #1408779 + * drivers/net, ipv6: Select IPv6 fragment idents for virtio UFO packets + - LP: #1408779 + * drivers/net: macvtap and tun depend on INET + - LP: #1408779 + * ip6_tunnel: Use ip6_tnl_dev_init as the ndo_init function. + - LP: #1408779 + * vti6: Use vti6_dev_init as the ndo_init function. + - LP: #1408779 + * sit: Use ipip6_tunnel_init as the ndo_init function. + - LP: #1408779 + * gre6: Move the setting of dev->iflink into the ndo_init functions. + - LP: #1408779 + * vxlan: Do not reuse sockets for a different address family + - LP: #1408779 + * net: sctp: fix memory leak in auth key management + - LP: #1408779 + * smsc911x: power-up phydev before doing a software reset. + - LP: #1408779 + * sunvdc: add cdrom and v1.1 protocol support + - LP: #1408779 + * sunvdc: compute vdisk geometry from capacity + - LP: #1408779 + * sunvdc: limit each sg segment to a page + - LP: #1408779 + * vio: fix reuse of vio_dring slot + - LP: #1408779 + * sunvdc: don't call VD_OP_GET_VTOC + - LP: #1408779 + * sparc64: Fix crashes in schizo_pcierr_intr_other(). + - LP: #1408779 + * sparc64: Do irq_{enter,exit}() around generic_smp_call_function*(). + - LP: #1408779 + * sparc32: Implement xchg and atomic_xchg using ATOMIC_HASH locks + - LP: #1408779 + * sparc64: Fix constraints on swab helpers. + - LP: #1408779 + * inetdevice: fixed signed integer overflow + - LP: #1408779 + * ipv4: Fix incorrect error code when adding an unreachable route + - LP: #1408779 + * ieee802154: fix error handling in ieee802154fake_probe() + - LP: #1408779 + * qmi_wwan: Add support for HP lt4112 LTE/HSPA+ Gobi 4G Modem + - LP: #1408779 + * pptp: fix stack info leak in pptp_getname() + - LP: #1408779 + * ipx: fix locking regression in ipx_sendmsg and ipx_recvmsg + - LP: #1408779 + * aio: fix uncorrent dirty pages accouting when truncating AIO ring + buffer + - LP: #1408779 + * spi: dw: Fix dynamic speed change. + - LP: #1408779 + * USB: serial: cp210x: add IDs for CEL MeshConnect USB Stick + - LP: #1408779 + * iio: Fix IIO_EVENT_CODE_EXTRACT_DIR bit mask + - LP: #1408779 + * usb: serial: ftdi_sio: add PIDs for Matrix Orbital products + - LP: #1408779 + * USB: keyspan: fix tty line-status reporting + - LP: #1408779 + * USB: keyspan: fix overrun-error reporting + - LP: #1408779 + * USB: ssu100: fix overrun-error reporting + - LP: #1408779 + * nfsd: correctly define v4.2 support attributes + - LP: #1408779 + * SUNRPC: Fix locking around callback channel reply receive + - LP: #1408779 + * nfsd: Fix slot wake up race in the nfsv4.1 callback code + - LP: #1408779 + * bnx2fc: do not add shared skbs to the fcoe_rx_list + - LP: #1408779 + * scsi: add Intel Multi-Flex to scsi scan blacklist + - LP: #1408779 + * ARM: 8216/1: xscale: correct auxiliary register in suspend/resume + - LP: #1408779 + * USB: xhci: don't start a halted endpoint before its new dequeue is set + - LP: #1408779 + * USB: xhci: Reset a halted endpoint immediately when we encounter a + stall. + - LP: #1408779 + * usb: xhci: rework root port wake bits if controller isn't allowed to + wakeup + - LP: #1408779 + * ALSA: hda - Limit 40bit DMA for AMD HDMI controllers + - LP: #1408779 + * PCI/MSI: Add device flag indicating that 64-bit MSIs don't work + - LP: #1408779 + * gpu/radeon: Set flag to indicate broken 64-bit MSI + - LP: #1408779 + * sound/radeon: Move 64-bit MSI quirk from arch to driver + - LP: #1408779 + * powerpc/powernv: Honor the generic "no_64bit_msi" flag + - LP: #1408779 + * powerpc/pseries: Honor the generic "no_64bit_msi" flag + - LP: #1408779 + * MIPS: Loongson: Make platform serial setup always built-in. + - LP: #1408779 + * net/ping: handle protocol mismatching scenario + - LP: #1408779 + * usb-quirks: Add reset-resume quirk for MS Wireless Laser Mouse 6000 + - LP: #1408779 + * Input: xpad - use proper endpoint type + - LP: #1408779 + * powerpc: 32 bit getcpu VDSO function uses 64 bit instructions + - LP: #1408779 + * ARM: 8222/1: mvebu: enable strex backoff delay + - LP: #1408779 + * ARM: 8226/1: cacheflush: get rid of restarting block + - LP: #1408779 + * staging: r8188eu: Add new device ID for DLink GO-USB-N150 + - LP: #1408779 + * btrfs: zero out left over bytes after processing compression streams + - LP: #1408779 + * smiapp: Only some selection targets are settable + - LP: #1408779 + * i2c: omap: fix NACK and Arbitration Lost irq handling + - LP: #1408779 + * drm/nouveau/gf116: remove copy1 engine + - LP: #1408779 + * drm/i915: More cautious with pch fifo underruns + - LP: #1408779 + * drm/i915: Unlock panel even when LVDS is disabled + - LP: #1408779 + * AHCI: Add DeviceIDs for Sunrise Point-LP SATA controller + - LP: #1408779 + * sata_fsl: fix error handling of irq_of_parse_and_map + - LP: #1408779 + * drm/radeon: kernel panic in drm_calc_vbltimestamp_from_scanoutpos with + 3.18.0-rc6 + - LP: #1408779 + * mm: frontswap: invalidate expired data on a dup-store failure + - LP: #1408779 + * mm/vmpressure.c: fix race in vmpressure_work_fn() + - LP: #1408779 + * drivers/input/evdev.c: don't kfree() a vmalloc address + - LP: #1408779 + * mm: fix swapoff hang after page migration and fork + - LP: #1408779 + * mm: fix anon_vma_clone() error treatment + - LP: #1408779 + * slab: fix nodeid bounds check for non-contiguous node IDs + - LP: #1408779 + * ahci: disable MSI on SAMSUNG 0xa800 SSD + - LP: #1408779 + * i2c: davinci: generate STP always when NACK is received + - LP: #1408779 + * ip_tunnel: the lack of vti_link_ops' dellink() cause kernel panic + - LP: #1408779 + * ipv6: gre: fix wrong skb->protocol in WCCP + - LP: #1408779 + * Fix race condition between vxlan_sock_add and vxlan_sock_release + - LP: #1408779 + * tg3: fix ring init when there are more TX than RX channels + - LP: #1408779 + * net/mlx4_core: Limit count field to 24 bits in qp_alloc_res + - LP: #1408779 + * rtnetlink: release net refcnt on error in do_setlink() + - LP: #1408779 + * net: mvneta: fix Tx interrupt delay + - LP: #1408779 + * net: mvneta: fix race condition in mvneta_tx() + - LP: #1408779 + * net: sctp: use MAX_HEADER for headroom reserve in output path + - LP: #1408779 + * Linux 3.13.11-ckt13 + - LP: #1408779 + * ipv6: fix swapped ipv4/ipv6 mtu_reduced callbacks + - LP: #1404558 + * arm64: Fix machine_shutdown() definition + - LP: #1404335 + * arm64: Fix deadlock scenario with smp_send_stop() + - LP: #1404335 + * iwlwifi: mvm: a few more SKUs for 7260 and 3160 + * iwlwifi: fix and add 7265 series HW IDs + - LP: #1408222 + + -- Seth Forshee Tue, 13 Jan 2015 11:37:56 -0600 + +linux (3.13.0-44.73) trusty; urgency=low + + [ Kamal Mostafa ] + + * Release Tracking Bug + - LP: #1402872 + + [ AceLan Kao ] + + * SAUCE: Add use_native_backlight quirk for HP ProBook 6570b + - LP: #1359010 + + [ Andy Whitcroft ] + + * Revert "SAUCE: (no-up) arm64: optimized copy_to_user and copy_from_user + assembly code" + - LP: #1398596 + * [Config] updateconfigs to balance CONFIG_SCOM_DEBUGFS + + [ Upstream Kernel Changes ] + + * iwlwifi: mvm: fix merge damage + - LP: #1393317 + * iwlwifi: remove IWL_UCODE_TLV_FLAGS_SCHED_SCAN flag + - LP: #1393317 + * iwlwifi: mvm: disable scheduled scan to prevent firmware crash + - LP: #1393317 + * iwlwifi: mvm: enable scheduled scan on newest firmware + - LP: #1393317 + * x86: kvm: use alternatives for VMCALL vs. VMMCALL if kernel text is + read-only + - LP: #1379340 + * phylib: introduce PHY_INTERFACE_MODE_XGMII for 10G PHY + - LP: #1381084 + * of: make of_get_phy_mode parse 'phy-connection-type' + - LP: #1381084 + * xen-netfront: Remove BUGs on paged skb data which crosses a page + boundary + - LP: #1275879 + * ACPI / blacklist: blacklist Win8 OSI for Dell Vostro 3546 + - LP: #1383589 + * powerpc/pseries: Fix endiannes issue in RTAS call from xmon + - LP: #1396235 + * mmc: sdhci-pci-o2micro: Fix Dell E5440 issue + - LP: #1346067 + * mfd: rtsx: Fix PM suspend for 5227 & 5249 + - LP: #1359052 + * drivers:scsi:storvsc: Fix a bug in handling ring buffer failures that + may result in I/O freeze + - LP: #1400289 + * arm64: optimized copy_to_user and copy_from_user assembly code + - LP: #1400349 + * net:socket: set msg_namelen to 0 if msg_name is passed as NULL in + msghdr struct from userland. + - LP: #1335478 + * drm/radeon: initialize sadb to NULL in the audio code + - LP: #1402714 + * powerpc/vphn: NUMA node code expects big-endian + - LP: #1401150 + * ALSA: usb-audio: Fix device_del() sysfs warnings at disconnect + - LP: #1402853 + * ALSA: hda - Add mute LED pin quirk for HP 15 touchsmart + - LP: #1334950, #1402853 + * rcu: Make callers awaken grace-period kthread + - LP: #1402853 + * rcu: Use rcu_gp_kthread_wake() to wake up grace period kthreads + - LP: #1402853 + * net: sctp: fix NULL pointer dereference in af->from_addr_param on + malformed packet + - LP: #1402853 + * KVM: x86: Don't report guest userspace emulation error to userspace + - LP: #1402853 + * [media] ttusb-dec: buffer overflow in ioctl + - LP: #1402853 + * arm64: __clear_user: handle exceptions on strb + - LP: #1402853 + * ARM: pxa: fix hang on startup with DEBUG_LL + - LP: #1402853 + * samsung-laptop: Add broken-acpi-video quirk for NC210/NC110 + - LP: #1402853 + * acer-wmi: Add Aspire 5741 to video_vendor_dmi_table + - LP: #1402853 + * acer-wmi: Add acpi_backlight=video quirk for the Acer KAV80 + - LP: #1402853 + * rbd: Fix error recovery in rbd_obj_read_sync() + - LP: #1402853 + * [media] ds3000: fix LNB supply voltage on Tevii S480 on initialization + - LP: #1402853 + * powerpc: do_notify_resume can be called with bad thread_info flags + argument + - LP: #1402853 + * USB: kobil_sct: fix non-atomic allocation in write path + - LP: #1402853 + * USB: opticon: fix non-atomic allocation in write path + - LP: #1402853 + * regulator: max77693: Fix use of uninitialized regulator config + - LP: #1402853 + * USB: cdc-acm: add device id for GW Instek AFG-2225 + - LP: #1402853 + * usb: Do not allow usb_alloc_streams on unconfigured devices + - LP: #1402853 + * usb-storage: handle a skipped data phase + - LP: #1402853 + * xhci: Switch only Intel Lynx Point-LP ports to EHCI on shutdown. + - LP: #1402853 + * xhci: no switching back on non-ULT Haswell + - LP: #1402853 + * of: Fix overflow bug in string property parsing functions + - LP: #1402853 + * spi: fsl-dspi: Fix CTAR selection + - LP: #1402853 + * Btrfs: fix kfree on list_head in btrfs_lookup_csums_range error cleanup + - LP: #1402853 + * staging:iio:ade7758: Fix NULL pointer deref when enabling buffer + - LP: #1402853 + * staging:iio:ade7758: Fix check if channels are enabled in prenable + - LP: #1402853 + * staging:iio:ade7758: Remove "raw" from channel name + - LP: #1402853 + * USB: cdc-acm: only raise DTR on transitions from B0 + - LP: #1402853 + * serial: Fix divide-by-zero fault in uart_get_divisor() + - LP: #1402853 + * tty: Fix high cpu load if tty is unreleaseable + - LP: #1402853 + * tty: Prevent "read/write wait queue active!" log flooding + - LP: #1402853 + * tty/vt: don't set font mappings on vc not supporting this + - LP: #1402853 + * spi: pxa2xx: toggle clocks on suspend if not disabled by runtime PM + - LP: #1402853 + * sysfs: driver core: Fix glue dir race condition by gdp_mutex + - LP: #1402853 + * i2c: at91: don't account as iowait + - LP: #1402853 + * nfsd: don't try to reuse an expired DRC entry off the list + - LP: #1402853 + * nfsd: don't halt scanning the DRC LRU list when there's an RC_INPROG + entry + - LP: #1402853 + * dm bufio: change __GFP_IO to __GFP_FS in shrinker callbacks + - LP: #1402853 + * xtensa: re-wire umount syscall to sys_oldumount + - LP: #1402853 + * dm raid: ensure superblock's size matches device's logical block size + - LP: #1402853 + * ahci: disable MSI instead of NCQ on Samsung pci-e SSDs on macbooks + - LP: #1402853 + * ahci: Add Device IDs for Intel Sunrise Point PCH + - LP: #1402853 + * power: charger-manager: Fix accessing invalidated power supply after + charger unbind + - LP: #1402853 + * mac80211: use secondary channel offset IE also beacons during CSA + - LP: #1402853 + * mac80211: schedule the actual switch of the station before CSA count 0 + - LP: #1402853 + * mac80211: properly flush delayed scan work on interface removal + - LP: #1402853 + * mac80211: fix use-after-free in defragmentation + - LP: #1402853 + * tun: Fix csum_start with VLAN acceleration + - LP: #1402853 + * macvtap: Fix csum_start when VLAN tags are present + - LP: #1402853 + * dm thin: grab a virtual cell before looking up the mapping + - LP: #1402853 + * KVM: x86: Fix uninitialized op->type for some immediate values + - LP: #1402853 + * crypto: caam - fix missing dma unmap on error path + - LP: #1402853 + * hwrng: pseries - port to new read API and fix stack corruption + - LP: #1402853 + * drm/radeon: set correct CE ram size for CIK + - LP: #1402853 + * drm/radeon: make sure mode init is complete in bandwidth_update + - LP: #1402853 + * drm/radeon: use gart for DMA IB tests + - LP: #1402853 + * drm/radeon: add missing crtc unlock when setting up the MC + - LP: #1402853 + * ALSA: hda_intel: Add Device IDs for Intel Sunrise Point PCH + - LP: #1402853 + * ALSA: hda_intel: Add DeviceIDs for Sunrise Point-LP + - LP: #1402853 + * Input: alps - ignore potential bare packets when device is out of sync + - LP: #1402853 + * Input: alps - allow up to 2 invalid packets without resetting device + - LP: #1402853 + * scsi: only re-lock door after EH on devices that were reset + - LP: #1402853 + * dm btree: fix a recursion depth bug in btree walking code + - LP: #1402853 + * parisc: Use compat layer for msgctl, shmat, shmctl and semtimedop + syscalls + - LP: #1402853 + * ALSA: usb-audio: Fix memory leak in FTU quirk + - LP: #1402853 + * audit: keep inode pinned + - LP: #1402853 + * nfs: fix pnfs direct write memory leak + - LP: #1402853 + * nfs: Fix use of uninitialized variable in nfs_getattr() + - LP: #1402853 + * NFSv4: Ensure that we remove NFSv4.0 delegations when state has expired + - LP: #1402853 + * NFSv4.1: nfs41_clear_delegation_stateid shouldn't trust + NFS_DELEGATED_STATE + - LP: #1402853 + * NFSv4: Fix races between nfs_remove_bad_delegation() and delegation + return + - LP: #1402853 + * NFSv4: Ensure that we call FREE_STATEID when NFSv4.x stateids are + revoked + - LP: #1402853 + * NFS: Don't try to reclaim delegation open state if recovery failed + - LP: #1402853 + * libceph: do not crash on large auth tickets + - LP: #1402853 + * ARM: 8191/1: decompressor: ensure I-side picks up relocated code + - LP: #1402853 + * ARM: 8198/1: make kuser helpers depend on MMU + - LP: #1402853 + * zram: avoid kunmap_atomic() of a NULL pointer + - LP: #1402853 + * Input: alps - ignore bad data on Dell Latitudes E6440 and E7440 + - LP: #1402853 + * firewire: cdev: prevent kernel stack leaking into ioctl arguments + - LP: #1402853 + * md: Always set RECOVERY_NEEDED when clearing RECOVERY_FROZEN + - LP: #1402853 + * nfs: Don't busy-wait on SIGKILL in __nfs_iocounter_wait + - LP: #1402853 + * target: Don't call TFO->write_pending if data_length == 0 + - LP: #1402853 + * vhost-scsi: Take configfs group dependency during + VHOST_SCSI_SET_ENDPOINT + - LP: #1402853 + * srp-target: Retry when QP creation fails with ENOMEM + - LP: #1402853 + * ASoC: fsi: remove unsupported PAUSE flag + - LP: #1402853 + * ASoC: rsnd: remove unsupported PAUSE flag + - LP: #1402853 + * ib_isert: Add max_send_sge=2 minimum for control PDU responses + - LP: #1402853 + * iser-target: Handle DEVICE_REMOVAL event on network portal listener + correctly + - LP: #1402853 + * ASoC: dpcm: Fix race between FE/BE updates and trigger + - LP: #1402853 + * mac80211: Fix regression that triggers a kernel BUG with CCMP + - LP: #1402853 + * rt2x00: do not align payload on modern H/W + - LP: #1402853 + * ath9k: Add version/revision macros for QCA9531 + - LP: #1402853 + * ath9k: Fix RTC_DERIVED_CLK usage + - LP: #1402853 + * ASoC: sgtl5000: Fix SMALL_POP bit definition + - LP: #1402853 + * ALSA: usb-audio: Add ctrl message delay quirk for Marantz/Denon devices + - LP: #1402853 + * bitops: Fix shift overflow in GENMASK macros + - LP: #1402853 + * x86: Require exact match for 'noxsave' command line option + - LP: #1402853 + * drm/i915: drop WaSetupGtModeTdRowDispatch:snb + - LP: #1402853 + * ASoC: wm_adsp: Avoid attempt to free buffers that might still be in use + - LP: #1402853 + * can: dev: avoid calling kfree_skb() from interrupt context + - LP: #1402853 + * can: esd_usb2: fix memory leak on disconnect + - LP: #1402853 + * x86, mm: Set NX across entire PMD at boot + - LP: #1402853 + * of/irq: Drop obsolete 'interrupts' vs 'interrupts-extended' text + - LP: #1402853 + * of/base: Fix PowerPC address parsing hack + - LP: #1402853 + * clockevent: sun4i: Fix race condition in the probe code + - LP: #1402853 + * MIPS: oprofile: Fix backtrace on 64-bit kernel + - LP: #1402853 + * ACPI / PM: Ignore wakeup setting if the ACPI companion can't wake up + - LP: #1402853 + * IB/isert: Adjust CQ size to HW limits + - LP: #1402853 + * drm/radeon: fix endian swapping in vbios fetch for tdp table + - LP: #1402853 + * Linux 3.13.11-ckt12 + - LP: #1402853 + * mm: Remove false WARN_ON from pagecache_isize_extended() + - LP: #1402764 + + -- Kamal Mostafa Mon, 15 Dec 2014 16:00:52 -0800 + +linux (3.13.0-43.72) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1400408 + + [ Upstream Kernel Changes ] + + * x86_64, traps: Fix the espfix64 #DF fixup and rewrite it in C + - LP: #1398795 + - CVE-2014-9090 + * x86_64, traps: Rework bad_iret + - LP: #1398795 + - CVE-2014-9090 + * x86, kvm: Clear paravirt_enabled on KVM guests for espfix32's benefit + - LP: #1400314 + - CVE-2014-8134 + + -- Luis Henriques Mon, 08 Dec 2014 17:27:21 +0000 + +linux (3.13.0-42.71) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1399663 + + [ Upstream Kernel Changes ] + + * x86_64, traps: Stop using IST for #SS + - LP: #1398795 + - CVE-2014-9090 + + -- Luis Henriques Fri, 05 Dec 2014 14:29:10 +0000 + +linux (3.13.0-41.70) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1396112 + + [ Chris J Arges ] + + * [Config] CONFIG_SCOM_DEBUGFS=y for powerpc/powerpc64-smp + ppc64el/generic + - LP: #1395855 + + [ Upstream Kernel Changes ] + + * Revert "KVM: x86: Handle errors when RIP is set during far jumps" + - LP: #1393477 + * Revert "net/macb: add pinctrl consumer support" + - LP: #1393477 + * Revert "iwlwifi: mvm: treat EAPOLs like mgmt frames wrt rate" + - LP: #1393477 + * Revert "ipmi: simplify locking" + - LP: #1383921 + * ACPI / blacklist: add Win8 OSI quirks for some Dell laptop models + - LP: #1339456 + * ACPI / battery: Accelerate battery resume callback + - LP: #838543 + * tools: cpu-hotplug fix unexpected operator error + * netlink: reset network header before passing to taps + - LP: #1393477 + * rtnetlink: fix VF info size + - LP: #1393477 + * myri10ge: check for DMA mapping errors + - LP: #1393477 + * tcp: don't use timestamp from repaired skb-s to calculate RTT (v2) + - LP: #1393477 + * sit: Fix ipip6_tunnel_lookup device matching criteria + - LP: #1393477 + * tcp: fix tcp_release_cb() to dispatch via address family for + mtu_reduced() + - LP: #1393477 + * tcp: fix ssthresh and undo for consecutive short FRTO episodes + - LP: #1393477 + * packet: handle too big packets for PACKET_V3 + - LP: #1393477 + * openvswitch: fix panic with multiple vlan headers + - LP: #1393477 + * vxlan: fix incorrect initializer in union vxlan_addr + - LP: #1393477 + * l2tp: fix race while getting PMTU on PPP pseudo-wire + - LP: #1393477 + * bonding: fix div by zero while enslaving and transmitting + - LP: #1393477 + * bridge: Check if vlan filtering is enabled only once. + - LP: #1393477 + * bridge: Fix br_should_learn to check vlan_enabled + - LP: #1393477 + * net: allow macvlans to move to net namespace + - LP: #1393477 + * tg3: Work around HW/FW limitations with vlan encapsulated frames + - LP: #1393477 + * tg3: Allow for recieve of full-size 8021AD frames + - LP: #1393477 + * xfrm: Generate blackhole routes only from route lookup functions + - LP: #1393477 + * xfrm: Generate queueing routes only from route lookup functions + - LP: #1393477 + * macvtap: Fix race between device delete and open. + - LP: #1393477 + * gro: fix aggregation for skb using frag_list + - LP: #1393477 + * hyperv: Fix a bug in netvsc_start_xmit() + - LP: #1393477 + * ip6_gre: fix flowi6_proto value in xmit path + - LP: #1393477 + * team: avoid race condition in scheduling delayed work + - LP: #1393477 + * sctp: handle association restarts when the socket is closed. + - LP: #1393477 + * tcp: fixing TLP's FIN recovery + - LP: #1393477 + * sparc64: Do not disable interrupts in nmi_cpu_busy() + - LP: #1393477 + * sparc64: Fix pcr_ops initialization and usage bugs. + - LP: #1393477 + * sparc32: dma_alloc_coherent must honour gfp flags + - LP: #1393477 + * sparc64: sun4v TLB error power off events + - LP: #1393477 + * sparc64: Fix corrupted thread fault code. + - LP: #1393477 + * sparc64: find_node adjustment + - LP: #1393477 + * sparc64: Move request_irq() from ldc_bind() to ldc_alloc() + - LP: #1393477 + * sparc: Let memset return the address argument + - LP: #1393477 + * sparc64: Fix reversed start/end in flush_tlb_kernel_range() + - LP: #1393477 + * sparc64: Fix lockdep warnings on reboot on Ultra-5 + - LP: #1393477 + * sparc64: Fix FPU register corruption with AES crypto offload. + - LP: #1393477 + * sparc64: Do not define thread fpregs save area as zero-length array. + - LP: #1393477 + * sparc64: Fix hibernation code refrence to PAGE_OFFSET. + - LP: #1393477 + * sparc64: correctly recognise M6 and M7 cpu type + - LP: #1393477 + * sparc64: support M6 and M7 for building CPU distribution map + - LP: #1393477 + * sparc64: cpu hardware caps support for sparc M6 and M7 + - LP: #1393477 + * sparc64: T5 PMU + - LP: #1393477 + * sparc64: Switch to 4-level page tables. + - LP: #1393477 + * sparc64: Define VA hole at run time, rather than at compile time. + - LP: #1393477 + * sparc64: Adjust KTSB assembler to support larger physical addresses. + - LP: #1393477 + * sparc64: Fix physical memory management regressions with large + max_phys_bits. + - LP: #1393477 + * sparc64: Use kernel page tables for vmemmap. + - LP: #1393477 + * sparc64: Increase MAX_PHYS_ADDRESS_BITS to 53. + - LP: #1393477 + * sparc64: Adjust vmalloc region size based upon available virtual + address bits. + - LP: #1393477 + * sparc64: sparse irq + - LP: #1393477 + * sparc64: Kill unnecessary tables and increase MAX_BANKS. + - LP: #1393477 + * sparc64: Increase size of boot string to 1024 bytes + - LP: #1393477 + * sparc64: Fix register corruption in top-most kernel stack frame during + boot. + - LP: #1393477 + * sparc64: Implement __get_user_pages_fast(). + - LP: #1393477 + * ext4: check EA value offset when loading + - LP: #1393477 + * jbd2: free bh when descriptor block checksum fails + - LP: #1393477 + * ext4: don't check quota format when there are no quota files + - LP: #1393477 + * target: Fix queue full status NULL pointer for SCF_TRANSPORT_TASK_SENSE + - LP: #1393477 + * vfs: fix data corruption when blocksize < pagesize for mmaped data + - LP: #1393477 + * ext4: fix mmap data corruption when blocksize < pagesize + - LP: #1393477 + * ext4: grab missed write_count for EXT4_IOC_SWAP_BOOT + - LP: #1393477 + * qla_target: don't delete changed nacls + - LP: #1393477 + * target: Fix APTPL metadata handling for dynamic MappedLUNs + - LP: #1393477 + * iser-target: Disable TX completion interrupt coalescing + - LP: #1393477 + * ext4: don't orphan or truncate the boot loader inode + - LP: #1393477 + * ext4: add ext4_iget_normal() which is to be used for dir tree lookups + - LP: #1393477 + * ext4: fix reservation overflow in ext4_da_write_begin + - LP: #1393477 + * ext4: Replace open coded mdata csum feature to helper function + - LP: #1393477 + * ext4: move error report out of atomic context in + ext4_init_block_bitmap() + - LP: #1393477 + * ARC: [nsimosci] Allow "headless" models to boot + - LP: #1393477 + * ARC: Update order of registers in KGDB to match GDB 7.5 + - LP: #1393477 + * ext4: check s_chksum_driver when looking for bg csum presence + - LP: #1393477 + * drm/radeon: fix speaker allocation setup + - LP: #1393477 + * drm/radeon: use gart memory for DMA ring tests + - LP: #1393477 + * compiler: define OPTIMIZER_HIDE_VAR() macro + - LP: #1393477 + * random: add and use memzero_explicit() for clearing data + - LP: #1393477 + * ALSA: pcm: use the same dma mmap codepath both for arm and arm64 + - LP: #1393477 + * ALSA: ALC283 codec - Avoid pop noise on headphones during + suspend/resume + - LP: #1393477 + * ALSA: usb-audio: Add support for Steinberg UR22 USB interface + - LP: #1393477 + * ALSA: hda - hdmi: Fix missing ELD change event on plug/unplug + - LP: #1393477 + * arm64: compat: fix compat types affecting struct compat_elf_prpsinfo + - LP: #1393477 + * freezer: Do not freeze tasks killed by OOM killer + - LP: #1393477 + * OOM, PM: OOM killed task shouldn't escape PM suspend + - LP: #1393477 + * qxl: don't create too large primary surface + - LP: #1393477 + * MIPS: tlbex: Properly fix HUGE TLB Refill exception handler + - LP: #1393477 + * drm/cirrus: bind also to qemu-xen-traditional + - LP: #1393477 + * cpufreq: intel_pstate: Fix setting max_perf_pct in performance policy + - LP: #1393477 + * cpufreq: expose scaling_cur_freq sysfs file for set_policy() drivers + - LP: #1393477 + * cpufreq: intel_pstate: Reflect current no_turbo state correctly + - LP: #1393477 + * intel_pstate: Don't lose sysfs settings during cpu offline + - LP: #1393477 + * intel_pstate: Fix BYT frequency reporting + - LP: #1393477 + * intel_pstate: Correct BYT VID values. + - LP: #1393477 + * MIPS: ftrace: Fix a microMIPS build problem + - LP: #1393477 + * kvm: x86: don't kill guest on unknown exit reason + - LP: #1393477 + * kvm: fix excessive pages un-pinning in kvm_iommu_map error path. + - LP: #1393477 + * KVM: x86: use new CS.RPL as CPL during task switch + - LP: #1393477 + * KVM: x86: Handle errors when RIP is set during far jumps + - LP: #1393477 + * KVM: x86: Fix far-jump to non-canonical check + - LP: #1393477 + * staging:iio:ad5933: Fix NULL pointer deref when enabling buffer + - LP: #1393477 + * staging:iio:ad5933: Drop "raw" from channel names + - LP: #1393477 + * iio: st_sensors: Fix buffer copy + - LP: #1393477 + * iio: mxs-lradc: Propagate the real error code on platform_get_irq() + failure + - LP: #1393477 + * iio: adc: mxs-lradc: Disable the clock on probe failure + - LP: #1393477 + * spi: pl022: Fix incorrect dma_unmap_sg + - LP: #1393477 + * mac80211: fix typo in starting baserate for rts_cts_rate_idx + - LP: #1393477 + * usb: dwc3: gadget: fix set_halt() bug with pending transfers + - LP: #1393477 + * usb: gadget: function: acm: make f_acm pass USB20CV Chapter9 + - LP: #1393477 + * ext3: Don't check quota format when there are no quota files + - LP: #1393477 + * quota: Properly return errors from dquot_writeback_dquots() + - LP: #1393477 + * USB: serial: cp210x: add Silicon Labs 358x VID and PID + - LP: #1393477 + * usb: serial: ftdi_sio: add Awinda Station and Dongle products + - LP: #1393477 + * usb: option: add support for Telit LE910 + - LP: #1393477 + * USB: option: add Haier CE81B CDMA modem + - LP: #1393477 + * x86, apic: Handle a bad TSC more gracefully + - LP: #1393477 + * i3200_edac: Report CE events properly + - LP: #1393477 + * i82860_edac: Report CE events properly + - LP: #1393477 + * cpc925_edac: Report UE events properly + - LP: #1393477 + * e7xxx_edac: Report CE events properly + - LP: #1393477 + * scsi: Fix error handling in SCSI_IOCTL_SEND_COMMAND + - LP: #1393477 + * usb: serial: ftdi_sio: add "bricked" FTDI device PID + - LP: #1393477 + * usb: musb: cppi41: restart hrtimer only if not yet done + - LP: #1393477 + * usb: gadget: udc: core: fix kernel oops with soft-connect + - LP: #1393477 + * nfsd4: fix crash on unknown operation number + - LP: #1393477 + * iwlwifi: configure the LTR + - LP: #1393477 + * mac80211: add vif to flush call + - LP: #1393477 + * iwlwifi: dvm: drop non VO frames when flushing + - LP: #1393477 + * usb: dwc3: gadget: Properly initialize LINK TRB + - LP: #1393477 + * Input: i8042 - quirks for Fujitsu Lifebook A544 and Lifebook AH544 + - LP: #1393477 + * posix-timers: Fix stack info leak in timer_create() + - LP: #1393477 + * futex: Fix a race condition between REQUEUE_PI and task death + - LP: #1393477 + * PM / Sleep: fix recovery during resuming from hibernation + - LP: #1393477 + * ALSA: pcm: Zero-clear reserved fields of PCM status ioctl in compat + mode + - LP: #1393477 + * ima: check xattr value length and type in the ima_inode_setxattr() + - LP: #1393477 + * evm: check xattr value length and type in evm_inode_setxattr() + - LP: #1393477 + * drm/radeon/dpm: disable ulv support on SI + - LP: #1393477 + * drm/radeon: dpm fixes for asrock systems + - LP: #1393477 + * drm/radeon: remove invalid pci id + - LP: #1393477 + * x86, pageattr: Prevent overflow in slow_virt_to_phys() for X86_PAE + - LP: #1393477 + * cgroup/kmemleak: add kmemleak_free() for cgroup deallocations. + - LP: #1393477 + * mm: free compound page with correct order + - LP: #1393477 + * mm, thp: fix collapsing of hugepages on madvise + - LP: #1393477 + * lib/bitmap.c: fix undefined shift in __bitmap_shift_{left|right}() + - LP: #1393477 + * ext4: fix overflow when updating superblock backups after resize + - LP: #1393477 + * ext4: fix oops when loading block bitmap failed + - LP: #1393477 + * ext4: enable journal checksum when metadata checksum feature enabled + - LP: #1393477 + * ext4: bail out from make_indexed_dir() on first error + - LP: #1393477 + * PCI: Rename sysfs 'enabled' file back to 'enable' + - LP: #1393477 + * wireless: rt2x00: add new rt2800usb device + - LP: #1393477 + * fs: allow open(dir, O_TMPFILE|..., 0) with mode 0 + - LP: #1393477 + * tracing/syscalls: Ignore numbers outside NR_syscalls' range + - LP: #1393477 + * x86_64, entry: Fix out of bounds read on sysenter + - LP: #1393477 + * ACPI / EC: Add support to disallow QR_EC to be issued when SCI_EVT + isn't set + - LP: #1393477 + * ACPI / EC: Fix regression due to conflicting firmware behavior between + Samsung and Acer. + - LP: #1393477 + * net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks + - LP: #1393477 + * net: sctp: fix panic on duplicate ASCONF chunks + - LP: #1393477 + * net: sctp: fix remote memory pressure from excessive queueing + - LP: #1393477 + * Linux 3.13.11.11 + - LP: #1393477 + * MAINTAINERS: Update APM X-Gene section + - LP: #1381084 + * Documentation: dts: Update section header for APM X-Gene + - LP: #1381084 + * dtb: Add 10GbE node to APM X-Gene SoC device tree + - LP: #1381084 + * drivers: net: xgene: Preparing for adding 10GbE support + - LP: #1381084 + * drivers: net: xgene: Add 10GbE support + - LP: #1381084 + * drivers: net: xgene: Add 10GbE ethtool support + - LP: #1381084 + * dtb: Add SGMII based 1GbE node to APM X-Gene SoC device tree + - LP: #1381216 + * drivers: net: xgene: Preparing for adding SGMII based 1GbE + - LP: #1381216 + * drivers: net: xgene: Add SGMII based 1GbE support + - LP: #1381216 + * drivers: net: xgene: Add SGMII based 1GbE ethtool support + - LP: #1381216 + * drivers: net: xgene: Rewrite buggy loop in xgene_enet_ecc_init() + - LP: #1381216 + * dtb: xgene: fix: Backward compatibility with older firmware + - LP: #1381084, #1381216 + * drivers: net: xgene: Backward compatibility with older firmware + - LP: #1381084, #1381216 + * drivers: net: xgene: fix: Use separate resources + - LP: #1381216 + * HID: Add the transport-driver functions to the HIDP driver. + - LP: #1393764 + * ipc: fix compat msgrcv with negative msgtyp + - LP: #1393355 + + -- Luis Henriques Tue, 25 Nov 2014 12:07:01 +0000 + +linux (3.13.0-40.69) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - re-used previous tracking bug + + [ Upstream Kernel Changes ] + + * regmap: fix kernel hang on regmap_bulk_write with zero val_count. + + -- Luis Henriques Thu, 13 Nov 2014 17:13:01 +0000 + +linux (3.13.0-40.68) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1388943 + * SAUCE: DEP8 test to run our regression tests + - LP: #1385330 + * SAUCE: The very first thing we should do when testing is make sure we + are testing the correct kernel + - LP: #1385330 + + [ dann frazier ] + + * [Config] Disable CONFIG_IPMI_SI_PROBE_DEFAULTS on armhf and arm64 + - LP: #1388952 + + [ Duc Dang ] + + * SAUCE: (no-up) [PCIE] APM X-Gene: Remove debug messages in MSI + interrupt handler path. + - LP: #1382244 + * SAUCE: (no-up) PCI: X-Gene: Fix max payload size and phantom function + configuration + - LP: #1386261 + + [ McAulay, Alistair ] + + * SAUCE: drm/i915: Rework GPU reset sequence to match driver load & thaw + - LP: #1384469 + + [ Timo Aaltonen ] + + * SAUCE: i915_bdw: Fix cherry-pick typo + - LP: #1384469 + + [ Upstream Kernel Changes ] + + * Revert "mac80211: disable uAPSD if all ACs are under ACM" + - LP: #1381234 + * Revert "iwlwifi: dvm: don't enable CTS to self" + - LP: #1381234 + * Revert "lzo: properly check for overruns" + - LP: #1387886 + * drm/i915: provide interface for audio driver to query cdclk + - LP: #1381168 + * regulatory: add NUL to alpha2 + - LP: #1381234 + * percpu: fix pcpu_alloc_pages() failure path + - LP: #1381234 + * percpu: perform tlb flush after pcpu_map_pages() failure + - LP: #1381234 + * cgroup: reject cgroup names with '\n' + - LP: #1381234 + * vfs: add d_is_dir() + - LP: #1381234 + * CIFS: Fix directory rename error + - LP: #1381234 + * usb: phy: twl4030-usb: Fix lost interrupts after ID pin goes down + - LP: #1381234 + * rtlwifi: rtl8192cu: Add new ID + - LP: #1381234 + * CIFS: Fix wrong restart readdir for SMB1 + - LP: #1381234 + * CIFS: Fix wrong filename length for SMB2 + - LP: #1381234 + * ahci: Add Device IDs for Intel 9 Series PCH + - LP: #1381234 + * ata_piix: Add Device IDs for Intel 9 Series PCH + - LP: #1381234 + * USB: zte_ev: fix removed PIDs + - LP: #1381234 + * USB: ftdi_sio: add support for NOVITUS Bono E thermal printer + - LP: #1381234 + * USB: sierra: avoid CDC class functions on "68A3" devices + - LP: #1381234 + * USB: sierra: add 1199:68AA device ID + - LP: #1381234 + * iommu/arm-smmu: fix programming of SMMU_CBn_TCR for stage 1 + - LP: #1381234 + * iommu/arm-smmu: remove pgtable_page_{c,d}tor() + - LP: #1381234 + * usb: gadget: fusb300_udc.h: Fix typo in include guard + - LP: #1381234 + * usb: phy: tegra: Avoid use of sizeof(void) + - LP: #1381234 + * arm64: use irq_set_affinity with force=false when migrating irqs + - LP: #1381234 + * block: Fix dev_t minor allocation lifetime + - LP: #1381234 + * usb: dwc3: core: fix order of PM runtime calls + - LP: #1381234 + * usb: dwc3: core: fix ordering for PHY suspend + - LP: #1381234 + * usb: dwc3: omap: fix ordering for runtime pm calls + - LP: #1381234 + * iommu/fsl: Fix warning resulting from adding PCI device twice + - LP: #1381234 + * ahci: add pcid for Marvel 0x9182 controller + - LP: #1381234 + * drm/i915: Fix EIO/wedged handling in gem fault handler + - LP: #1381234 + * ACPI / RTC: Fix CMOS RTC opregion handler accesses to wrong addresses + - LP: #1381234 + * drm/i915: Evict CS TLBs between batches + - LP: #1381234 + * drm/i915: Wait for vblank before enabling the TV encoder + - LP: #1381234 + * lockd: fix rpcbind crash on lockd startup failure + - LP: #1381234 + * drm/radeon: fix semaphore value init + - LP: #1381234 + * drm/radeon: add connector quirk for fujitsu board + - LP: #1381234 + * imx-drm: ipuv3-plane: fix ipu_plane_dpms() + - LP: #1381234 + * usb: host: xhci: fix compliance mode workaround + - LP: #1381234 + * Input: elantech - fix detection of touchpad on ASUS s301l + - LP: #1381234 + * USB: ftdi_sio: Add support for GE Healthcare Nemo Tracker device + - LP: #1381234 + * uwb: init beacon cache entry before registering uwb device + - LP: #1381234 + * nfs: fix kernel warning when removing proc entry + - LP: #1381234 + * drm/radeon/dpm: set the thermal type properly for special configs + - LP: #1381234 + * dm cache: fix race causing dirty blocks to be marked as clean + - LP: #1381234 + * libceph: gracefully handle large reply messages from the mon + - LP: #1381234 + * Input: serport - add compat handling for SPIOCSTYPE ioctl + - LP: #1381234 + * usb: hub: take hub->hdev reference when processing from eventlist + - LP: #1381234 + * eventpoll: fix uninitialized variable in epoll_ctl + - LP: #1381234 + * kcmp: fix standard comparison bug + - LP: #1381234 + * fsnotify/fdinfo: use named constants instead of hardcoded values + - LP: #1381234 + * fs/notify: don't show f_handle if exportfs_encode_inode_fh failed + - LP: #1381234 + * arm64: flush TLS registers during exec + - LP: #1381234 + * storage: Add single-LUN quirk for Jaz USB Adapter + - LP: #1381234 + * xhci: Fix null pointer dereference if xhci initialization fails + - LP: #1381234 + * usb: xhci: Fix OOPS in xhci error handling code + - LP: #1381234 + * xhci: fix oops when xhci resumes from hibernate with hw lpm capable + devices + - LP: #1381234 + * drm/ast: open key before detect chips + - LP: #1381234 + * drm/ast: AST2000 cannot be detected correctly + - LP: #1381234 + * futex: Unlock hb->lock in futex_wait_requeue_pi() error path + - LP: #1381234 + * jiffies: Fix timeval conversion to jiffies + - LP: #1381234 + * alarmtimer: Return relative times in timer_gettime + - LP: #1381234 + * alarmtimer: Do not signal SIGEV_NONE timers + - LP: #1381234 + * alarmtimer: Lock k_itimer during timer callback + - LP: #1381234 + * parisc: Implement new LWS CAS supporting 64 bit operations. + - LP: #1381234 + * don't bugger nd->seq on set_root_rcu() from follow_dotdot_rcu() + - LP: #1381234 + * be careful with nd->inode in path_init() and follow_dotdot_rcu() + - LP: #1381234 + * iio:inkern: fix overwritten -EPROBE_DEFER in of_iio_channel_get_by_name + - LP: #1381234 + * iio:trigger: modify return value for iio_trigger_get + - LP: #1381234 + * iio: accel: bma180: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: adc: ad_sigma_delta: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: hid_sensor_hub: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: st_sensors: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: gyro: itg3200: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: inv_mpu6050: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: meter: ade7758: Fix indio_dev->trig assignment + - LP: #1381234 + * MIPS: ZBOOT: add missing include + - LP: #1381234 + * spi: fsl: Don't use devm_kzalloc in master->setup callback + - LP: #1381234 + * spi: dw: Don't use devm_kzalloc in master->setup callback + - LP: #1381234 + * ARM: 8133/1: use irq_set_affinity with force=false when migrating irqs + - LP: #1381234 + * cx18: fix kernel oops with tda8290 tuner + - LP: #1381234 + * ASoC: davinci-mcasp: Correct rx format unit configuration + - LP: #1381234 + * spi: sirf: enable RX_IO_DMA_INT interrupt + - LP: #1381234 + * perf: Fix a race condition in perf_remove_from_context() + - LP: #1381234 + * ASoC: samsung-i2s: Maintain CDCLK settings across + i2s_{shutdown/startup} + - LP: #1381234 + * ASoC: samsung-i2s: Check secondary DAI exists before referencing + - LP: #1381234 + * ALSA: hda - Fix invalid pin powermap without jack detection + - LP: #1381234 + * Input: atkbd - do not try 'deactivate' keyboard on any LG laptops + - LP: #1381234 + * Input: i8042 - add Fujitsu U574 to no_timeout dmi table + - LP: #1381234 + * Input: i8042 - add nomux quirk for Avatar AVIU-145A6 + - LP: #1381234 + * iio: adc: at91: don't use the last converted data register + - LP: #1381234 + * iio:magnetometer: bugfix magnetometers gain values + - LP: #1381234 + * drm/i915: Fix SRC_COPY width on 830/845g + - LP: #1381234 + * Target/iser: Get isert_conn reference once got to connected_handler + - LP: #1381234 + * Target/iser: Don't put isert_conn inside disconnected handler + - LP: #1381234 + * ARM: 8148/1: flush TLS and thumbee register state during exec + - LP: #1381234 + * x86, ia64: Move EFI_FB vga_default_device() initialization to + pci_vga_fixup() + - LP: #1381234 + * vgaarb: Don't default exclusively to first video device with mem+io + - LP: #1381234 + * iscsi-target: Fix memory corruption in + iscsit_logout_post_handler_diffcid + - LP: #1381234 + * iscsi-target: avoid NULL pointer in iscsi_copy_param_list failure + - LP: #1381234 + * NFSv4: nfs4_state_manager() vs. nfs_server_remove_lists() + - LP: #1381234 + * NFSv4: Fix another bug in the close/open_downgrade code + - LP: #1381234 + * drm/radeon: don't reset dma on NI/SI init + - LP: #1381234 + * drm/radeon: don't reset sdma on CIK init + - LP: #1381234 + * drm/radeon: don't reset dma on r6xx-evergreen init + - LP: #1381234 + * vgaswitcheroo: add vga_switcheroo_fini_domain_pm_ops + - LP: #1381234 + * drm/radeon/px: fix module unload + - LP: #1381234 + * drm/nouveau/runpm: fix module unload + - LP: #1381234 + * libiscsi: fix potential buffer overrun in __iscsi_conn_send_pdu + - LP: #1381234 + * USB: EHCI: unlink QHs even after the controller has stopped + - LP: #1381234 + * USB: storage: Add quirk for Adaptec USBConnect 2000 USB-to-SCSI Adapter + - LP: #1381234 + * USB: storage: Add quirk for Ariston Technologies iConnect USB to SCSI + adapter + - LP: #1381234 + * USB: storage: Add quirks for Entrega/Xircom USB to SCSI converters + - LP: #1381234 + * drm/nouveau: ltc/gf100-: fix cbc issues on certain boards + - LP: #1381234 + * iwlwifi: increase DEFAULT_MAX_TX_POWER + - LP: #1381234 + * iwlwifi: mvm: treat EAPOLs like mgmt frames wrt rate + - LP: #1381234 + * workqueue: apply __WQ_ORDERED to create_singlethread_workqueue() + - LP: #1381234 + * can: flexcan: mark TX mailbox as TX_INACTIVE + - LP: #1381234 + * can: flexcan: correctly initialize mailboxes + - LP: #1381234 + * can: flexcan: implement workaround for errata ERR005829 + - LP: #1381234 + * can: flexcan: put TX mailbox into TX_INACTIVE mode after tx-complete + - LP: #1381234 + * can: at91_can: add missing prepare and unprepare of the clock + - LP: #1381234 + * IB/qib: Correct reference counting in debugfs qp_stats + - LP: #1381234 + * videobuf2-dma-sg: fix for wrong GFP mask to sg_alloc_table_from_pages + - LP: #1381234 + * vb2: fix plane index sanity check in vb2_plane_cookie() + - LP: #1381234 + * adv7604: fix inverted condition + - LP: #1381234 + * md/raid1: intialise start_next_window for READ case to avoid hang + - LP: #1381234 + * md/raid1: be more cautious where we read-balance during resync. + - LP: #1381234 + * md/raid1: clean up request counts properly in close_sync() + - LP: #1381234 + * md/raid1: make sure resync waits for conflicting writes to complete. + - LP: #1381234 + * md/raid1: Don't use next_resync to determine how far resync has + progressed + - LP: #1381234 + * md/raid1: update next_resync under resync_lock. + - LP: #1381234 + * md/raid1: count resync requests in nr_pending. + - LP: #1381234 + * md/raid1: fix_read_error should act on all non-faulty devices. + - LP: #1381234 + * ALSA: pcm: fix fifo_size frame calculation + - LP: #1381234 + * Fix nasty 32-bit overflow bug in buffer i/o code. + - LP: #1381234 + * drm/radeon/cik: use a separate counter for CP init timeout + - LP: #1381234 + * parisc: Only use -mfast-indirect-calls option for 32-bit kernel builds + - LP: #1381234 + * sched: Fix unreleased llc_shared_mask bit during CPU hotplug + - LP: #1381234 + * MIPS: mcount: Adjust stack pointer for static trace in MIPS32 + - LP: #1381234 + * nilfs2: fix data loss with mmap() + - LP: #1381234 + * ocfs2/dlm: do not get resource spinlock if lockres is new + - LP: #1381234 + * mm, slab: initialize object alignment on cache creation + - LP: #1381234 + * mm: softdirty: keep bit when zapping file pte + - LP: #1381234 + * shmem: fix nlink for rename overwrite directory + - LP: #1381234 + * ARM: 8165/1: alignment: don't break misaligned NEON load/store + - LP: #1381234 + * ASoC: core: fix possible ZERO_SIZE_PTR pointer dereferencing error. + - LP: #1381234 + * cpufreq: integrator: fix integrator_cpufreq_remove return type + - LP: #1381234 + * drm/i915: Flush the PTEs after updating them before suspend + - LP: #1381234 + * ARM: 8178/1: fix set_tls for !CONFIG_KUSER_HELPERS + - LP: #1381234 + * md/raid5: disable 'DISCARD' by default due to safety concerns. + - LP: #1381234 + * mm: migrate: Close race between migration completion and mprotect + - LP: #1381234 + * mm: numa: Do not mark PTEs pte_numa when splitting huge pages + - LP: #1381234 + * Fix problem recognizing symlinks + - LP: #1381234 + * mm: memcontrol: do not iterate uninitialized memcgs + - LP: #1381234 + * perf: fix perf bug in fork() + - LP: #1381234 + * mm: page_alloc: fix zone allocation fairness on UP + - LP: #1381234 + * init/Kconfig: Hide printk log config if CONFIG_PRINTK=n + - LP: #1381234 + * init/Kconfig: Fix HAVE_FUTEX_CMPXCHG to not break up the EXPERT menu + - LP: #1381234 + * genhd: fix leftover might_sleep() in blk_free_devt() + - LP: #1381234 + * Linux 3.13.11.9 + - LP: #1381234 + * NVMe: Initialize device reference count earlier + - LP: #1382221 + * nfs: fix duplicate proc entries + - LP: #1376245 + * xfs: xfs_dir2_block_to_sf temp buffer allocation fails + - LP: #1382333 + * UPSTREAM: kernel: Mark function as static in kernel/seccomp.c + - LP: #1379020 + * ARM: 8087/1: ptrace: reload syscall number after secure_computing() + check + - LP: #1379020 + * seccomp: create internal mode-setting function + - LP: #1379020 + * seccomp: extract check/assign mode helpers + - LP: #1379020 + * seccomp: split mode setting routines + - LP: #1379020 + * seccomp: add "seccomp" syscall + - LP: #1379020 + * ARM: add seccomp syscall + - LP: #1379020 + * MIPS: add seccomp syscall + - LP: #1379020 + * sched: move no_new_privs into new atomic flags + - LP: #1379020 + * seccomp: split filter prep from check and apply + - LP: #1379020 + * seccomp: introduce writer locking + - LP: #1379020 + * seccomp: allow mode setting across threads + - LP: #1379020 + * seccomp: implement SECCOMP_FILTER_FLAG_TSYNC + - LP: #1379020 + * seccomp: Replace BUG(!spin_is_locked()) with assert_spin_lock + - LP: #1379020 + * fs: Add a missing permission check to do_umount + - LP: #1383358 + - CVE-2014-7975 + * mfd: rtsx_pcr: Fix MSI enable error handling + - LP: #1366841 + * xen/balloon: Don't continue ballooning when BP_ECANCELED is encountered + - LP: #1304001 + * Bluetooth: Fix HCI H5 corrupted ack value + - LP: #1387886 + * dmaengine: fix xor sources continuation + - LP: #1387886 + * siano: add support for PCTV 77e + - LP: #1387886 + * em28xx-v4l: give back all active video buffers to the vb2 core properly + on streaming stop + - LP: #1387886 + * em28xx-v4l: fix video buffer field order reporting in progressive mode + - LP: #1387886 + * crypto: caam - fix addressing of struct member + - LP: #1387886 + * x86, fpu: shift drop_init_fpu() from save_xstate_sig() to + handle_signal() + - LP: #1387886 + * x86, fpu: __restore_xstate_sig()->math_state_restore() needs + preempt_disable() + - LP: #1387886 + * KVM: do not bias the generation number in kvm_current_mmio_generation + - LP: #1387886 + * kvm: fix potentially corrupt mmio cache + - LP: #1387886 + * kvm: x86: fix stale mmio cache bug + - LP: #1387886 + * UBIFS: fix free log space calculation + - LP: #1387886 + * Bluetooth: Fix issue with USB suspend in btusb driver + - LP: #1387886 + * mmc: rtsx_pci_sdmmc: fix incorrect last byte in R2 response + - LP: #1387886 + * KVM: s390: unintended fallthrough for external call + - LP: #1387886 + * UBI: add missing kmem_cache_free() in process_pool_aeb error path + - LP: #1387886 + * PCI: Increase IBM ipr SAS Crocodile BARs to at least system page size + - LP: #1387886 + * drbd: compute the end before rb_insert_augmented() + - LP: #1387886 + * Bluetooth: Fix setting correct security level when initiating SMP + - LP: #1387886 + * mmc: tmio: prevent endless loop in tmio_mmc_set_clock() + - LP: #1387886 + * iwlwifi: Add missing PCI IDs for the 7260 series + - LP: #1387886 + * media: usb: uvc: add a quirk for Dell XPS M1330 webcam + - LP: #1387886 + * USB: serial: cp210x: added Ketra N1 wireless interface support + - LP: #1387886 + * USB: cp210x: add support for Seluxit USB dongle + - LP: #1387886 + * PCI: Generate uppercase hex for modalias interface class + - LP: #1387886 + * PCI: mvebu: Fix uninitialized variable in mvebu_get_tgt_attr() + - LP: #1387886 + * xfs: ensure WB_SYNC_ALL writeback handles partial pages correctly + - LP: #1387886 + * v4l2-common: fix overflow in v4l_bound_align_image() + - LP: #1387886 + * USB: Add device quirk for ASUS T100 Base Station keyboard + - LP: #1387886 + * mei: bus: fix possible boundaries violation + - LP: #1387886 + * firmware_class: make sure fw requests contain a name + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup vmbus_post_msg() + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup vmbus_teardown_gpadl() + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup vmbus_establish_gpadl() + - LP: #1387886 + * Drivers: hv: vmbus: Fix a bug in vmbus_open() + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup vmbus_close_internal() + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup hv_post_message() + - LP: #1387886 + * spi: dw-mid: respect 8 bit mode + - LP: #1387886 + * spi: dw-mid: terminate ongoing transfers at exit + - LP: #1387886 + * kvm: don't take vcpu mutex for obviously invalid vcpu ioctls + - LP: #1387886 + * x86/intel/quark: Switch off CR4.PGE so TLB flush uses CR3 instead + - LP: #1387886 + * ARM: at91: fix at91sam9263ek DT mmc pinmuxing settings + - LP: #1387886 + * ARM: at91/PMC: don't forget to write PMC_PCDR register to disable + clocks + - LP: #1387886 + * Fixing lease renewal + - LP: #1387886 + * lockd: Try to reconnect if statd has moved + - LP: #1387886 + * qla2xxx: Use correct offset to req-q-out for reserve calculation + - LP: #1387886 + * power: charger-manager: Fix NULL pointer exception with missing + cm-fuel-gauge + - LP: #1387886 + * rt2800: correct BBP1_TX_POWER_CTRL mask + - LP: #1387886 + * regmap: fix NULL pointer dereference in _regmap_write/read + - LP: #1387886 + * Documentation: lzo: document part of the encoding + - LP: #1387886 + * lzo: check for length overrun in variable length encoding. + - LP: #1387886 + * regmap: debugfs: fix possbile NULL pointer dereference + - LP: #1387886 + * regmap: fix possible ZERO_SIZE_PTR pointer dereferencing error. + - LP: #1387886 + * net_dma: simple removal + - LP: #1387886 + * libata-sff: Fix controllers with no ctl port + - LP: #1387886 + * NFSv4: Fix lock recovery when CREATE_SESSION/SETCLIENTID_CONFIRM fails + - LP: #1387886 + * NFSv4: fix open/lock state recovery error handling + - LP: #1387886 + * tty: omap-serial: fix division by zero + - LP: #1387886 + * serial: 8250: Add Quark X1000 to 8250_pci.c + - LP: #1387886 + * missing data dependency barrier in prepend_name() + - LP: #1387886 + * be2iscsi: check ip buffer before copying + - LP: #1387886 + * framebuffer: fix border color + - LP: #1387886 + * framebuffer: fix screen corruption when copying + - LP: #1387886 + * mpc85xx_edac: Make L2 interrupt shared too + - LP: #1387886 + * NFSv4.1: Fix an NFSv4.1 state renewal regression + - LP: #1387886 + * xen-blkback: fix leak on grant map error path + - LP: #1387886 + * m68k: Disable/restore interrupts in hwreg_present()/hwreg_write() + - LP: #1387886 + * ASoC: tlv320aic3x: fix PLL D configuration + - LP: #1387886 + * dm bufio: update last_accessed when relinking a buffer + - LP: #1387886 + * dm bufio: when done scanning return from __scan immediately + - LP: #1387886 + * dm log userspace: fix memory leak in dm_ulog_tfr_init failure path + - LP: #1387886 + * ecryptfs: avoid to access NULL pointer when write metadata in xattr + - LP: #1387886 + * x86_64, entry: Filter RFLAGS.NT on entry from userspace + - LP: #1387886 + * ASoC: soc-dapm: fix use after free + - LP: #1387886 + * pata_serverworks: disable 64-KB DMA transfers on Broadcom OSB4 IDE + Controller + - LP: #1387886 + * drm/ast: Fix HW cursor image + - LP: #1387886 + * x86: Reject x32 executables if x32 ABI not supported + - LP: #1387886 + * kill wbuf_queued/wbuf_dwork_lock + - LP: #1387886 + * fs: Fix theoretical division by 0 in super_cache_scan(). + - LP: #1387886 + * fs: make cont_expand_zero interruptible + - LP: #1387886 + * fix misuses of f_count() in ppp and netlink + - LP: #1387886 + * block: fix alignment_offset math that assumes io_min is a power-of-2 + - LP: #1387886 + * fanotify: enable close-on-exec on events' fd when requested in + fanotify_init() + - LP: #1387886 + * mm: clear __GFP_FS when PF_MEMALLOC_NOIO is set + - LP: #1387886 + * Input: i8042 - add noloop quirk for Asus X750LN + - LP: #1387886 + * um: ubd: Fix for processes stuck in D state forever + - LP: #1387886 + * kernel: add support for gcc 5 + - LP: #1387886 + * ALSA: emu10k1: Fix deadlock in synth voice lookup + - LP: #1387886 + * libceph: ceph-msgr workqueue needs a resque worker + - LP: #1387886 + * mnt: Prevent pivot_root from creating a loop in the mount tree + - LP: #1387886 + * modules, lock around setting of MODULE_STATE_UNFORMED + - LP: #1387886 + * virtio_pci: fix virtio spec compliance on restore + - LP: #1387886 + * selinux: fix inode security list corruption + - LP: #1387886 + * pstore: Fix duplicate {console,ftrace}-efi entries + - LP: #1387886 + * futex: Ensure get_futex_key_refs() always implies a barrier + - LP: #1387886 + * x86,kvm,vmx: Preserve CR4 across VM entry + - LP: #1387886 + * crypto: caam - remove duplicated sg copy functions + - LP: #1387886 + * Linux 3.13.11.10 + - LP: #1387886 + * ipmi: Turn off default probing of interfaces + - LP: #1388952 + + -- Tim Gardner Wed, 29 Oct 2014 14:39:14 -0400 + +linux (3.13.0-39.66) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1386629 + + [ Upstream Kernel Changes ] + + * KVM: x86: Check non-canonical addresses upon WRMSR + - LP: #1384539 + - CVE-2014-3610 + * KVM: x86: Prevent host from panicking on shared MSR writes. + - LP: #1384539 + - CVE-2014-3610 + * KVM: x86: Improve thread safety in pit + - LP: #1384540 + - CVE-2014-3611 + * KVM: x86: Fix wrong masking on relative jump/call + - LP: #1384545 + - CVE-2014-3647 + * KVM: x86: Warn if guest virtual address space is not 48-bits + - LP: #1384545 + - CVE-2014-3647 + * KVM: x86: Emulator fixes for eip canonical checks on near branches + - LP: #1384545 + - CVE-2014-3647 + * KVM: x86: emulating descriptor load misses long-mode case + - LP: #1384545 + - CVE-2014-3647 + * KVM: x86: Handle errors when RIP is set during far jumps + - LP: #1384545 + - CVE-2014-3647 + * kvm: vmx: handle invvpid vm exit gracefully + - LP: #1384544 + - CVE-2014-3646 + * Input: synaptics - gate forcepad support by DMI check + - LP: #1381815 + + -- Luis Henriques Tue, 28 Oct 2014 10:29:51 +0000 + +linux (3.13.0-38.65) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1379244 + + [ Andy Whitcroft ] + + * Revert "SAUCE: scsi: hyper-v storsvc switch up to SPC-3" + - LP: #1354397 + * [Config] linux-image-extra is additive to linux-image + - LP: #1375310 + * [Config] linux-image-extra postrm is not needed on purge + - LP: #1375310 + + [ Upstream Kernel Changes ] + + * Revert "KVM: x86: Increase the number of fixed MTRR regs to 10" + - LP: #1377564 + * Revert "USB: option,zte_ev: move most ZTE CDMA devices to zte_ev" + - LP: #1377564 + * aufs: bugfix, stop calling security_mmap_file() again + - LP: #1371316 + * ipvs: fix ipv6 hook registration for local replies + - LP: #1349768 + * Drivers: add blist flags + - LP: #1354397 + * sd: fix a bug in deriving the FLUSH_TIMEOUT from the basic I/O timeout + - LP: #1354397 + * drm/i915/bdw: Add 42ms delay for IPS disable + - LP: #1374389 + * drm/i915: add null render states for gen6, gen7 and gen8 + - LP: #1374389 + * drm/i915/bdw: 3D_CHICKEN3 has write mask bits + - LP: #1374389 + * drm/i915/bdw: Disable idle DOP clock gating + - LP: #1374389 + * drm/i915: call lpt_init_clock_gating on BDW too + - LP: #1374389 + * drm/i915: shuffle panel code + - LP: #1374389 + * drm/i915: extract backlight minimum brightness from VBT + - LP: #1374389 + * drm/i915: respect the VBT minimum backlight brightness + - LP: #1374389 + * drm/i915/bdw: Apply workarounds in render ring init function + - LP: #1374389 + * drm/i915/bdw: Cleanup pre prod workarounds + - LP: #1374389 + * drm/i915: Replace hardcoded cacheline size with macro + - LP: #1374389 + * drm/i915: Refactor Broadwell PIPE_CONTROL emission into a helper. + - LP: #1374389 + * drm/i915: Add the WaCsStallBeforeStateCacheInvalidate:bdw workaround. + - LP: #1374389 + * drm/i915/bdw: Remove BDW preproduction W/As until C stepping. + - LP: #1374389 + * mptfusion: enable no_write_same for vmware scsi disks + - LP: #1371591 + * iommu/amd: Fix cleanup_domain for mass device removal + - LP: #1375266 + * cifs: mask off top byte in get_rfc1002_length() + - LP: #1372482 + * Input: synaptics - add support for ForcePads + - LP: #1377564 + * ASoC: pxa-ssp: drop SNDRV_PCM_FMTBIT_S24_LE + - LP: #1377564 + * drm/radeon: add bapm module parameter + - LP: #1377564 + * drm/radeon: Add missing lines to ci_set_thermal_temperature_range + - LP: #1377564 + * drm/radeon: Add ability to get and change dpm state when radeon PX card + is turned off + - LP: #1377564 + * ALSA: hda/realtek - Avoid setting wrong COEF on ALC269 & co + - LP: #1377564 + * of/irq: Fix lookup to use 'interrupts-extended' property first + - LP: #1377564 + * Possible null ptr deref in SMB2_tcon + - LP: #1377564 + * CIFS: Fix SMB2 readdir error handling + - LP: #1377564 + * CIFS: Fix wrong directory attributes after rename + - LP: #1377564 + * md/raid6: avoid data corruption during recovery of double-degraded + RAID6 + - LP: #1377564 + * ARM: dts: i.MX53: fix apparent bug in VPU clks + - LP: #1377564 + * pata_scc: propagate return value of scc_wait_after_reset + - LP: #1377564 + * libata: widen Crucial M550 blacklist matching + - LP: #1377564 + * ALSA: hda - restore the gpio led after resume + - LP: #1358116, #1377564 + * md/raid10: fix memory leak when reshaping a RAID10. + - LP: #1377564 + * md/raid10: Fix memory leak when raid10 reshape completes. + - LP: #1377564 + * MIPS: OCTEON: make get_system_type() thread-safe + - LP: #1377564 + * can: c_can: checking IS_ERR() instead of NULL + - LP: #1377564 + * HID: logitech: perform bounds checking on device_id early enough + - LP: #1377564 + * firmware: Do not use WARN_ON(!spin_is_locked()) + - LP: #1377564 + * drm/radeon: add new KV pci id + - LP: #1377564 + * drm/radeon: add new bonaire pci ids + - LP: #1377564 + * drm/radeon: add additional SI pci ids + - LP: #1377564 + * ibmveth: Fix endian issues with rx_no_buffer statistic + - LP: #1377564 + * spi/omap-mcspi: Fix the spi task hangs waiting dma_rx + - LP: #1377564 + * xtensa: replace IOCTL code definitions with constants + - LP: #1377564 + * xtensa: fix address checks in dma_{alloc,free}_coherent + - LP: #1377564 + * xtensa: fix access to THREAD_RA/THREAD_SP/THREAD_DS + - LP: #1377564 + * xtensa: fix TLBTEMP_BASE_2 region handling in fast_second_level_miss + - LP: #1377564 + * xtensa: fix a6 and a7 handling in fast_syscall_xtensa + - LP: #1377564 + * staging: lustre: Remove circular dependency on header + - LP: #1377564 + * USB: option: reduce interrupt-urb logging verbosity + - LP: #1377564 + * USB: option: add VIA Telecom CDS7 chipset device id + - LP: #1377564 + * USB: zte_ev: remove duplicate Gobi PID + - LP: #1377564 + * USB: zte_ev: remove duplicate Qualcom PID + - LP: #1377564 + * USB: ftdi_sio: add Basic Micro ATOM Nano USB2Serial PID + - LP: #1377564 + * USB: serial: pl2303: add device id for ztek device + - LP: #1377564 + * USB: ftdi_sio: Added PID for new ekey device + - LP: #1377564 + * xhci: Treat not finding the event_seg on COMP_STOP the same as + COMP_STOP_INVAL + - LP: #1377564 + * usb: xhci: amd chipset also needs short TX quirk + - LP: #1377564 + * xhci: rework cycle bit checking for new dequeue pointers + - LP: #1377564 + * spi/pxa2xx: Add ACPI ID for Intel Braswell + - LP: #1377564 + * ALSA: core: fix buffer overflow in snd_info_get_line() + - LP: #1377564 + * HID: logitech-dj: prevent false errors to be shown + - LP: #1377564 + * usb: ehci: using wIndex + 1 for hub port + - LP: #1377564 + * staging/rtl8188eu: add 0df6:0076 Sitecom Europe B.V. + - LP: #1377564 + * staging: r8188eu: Add new USB ID + - LP: #1377564 + * mtd: nand: omap: Fix 1-bit Hamming code scheme, omap_calculate_ecc() + - LP: #1377564 + * trace: Fix epoll hang when we race with new entries + - LP: #1377564 + * cfq-iosched: Fix wrong children_weight calculation + - LP: #1377564 + * USB: sisusb: add device id for Magic Control USB video + - LP: #1377564 + * NFSv4: Fix problems with close in the presence of a delegation + - LP: #1377564 + * usb: hub: Prevent hub autosuspend if usbcore.autosuspend is -1 + - LP: #1377564 + * ARM: 8128/1: abort: don't clear the exclusive monitors + - LP: #1377564 + * ARM: 8129/1: errata: work around Cortex-A15 erratum 830321 using dummy + strex + - LP: #1377564 + * USB: serial: fix potential stack buffer overflow + - LP: #1377564 + * USB: serial: fix potential heap buffer overflow + - LP: #1377564 + * ext4: update i_disksize coherently with block allocation on error path + - LP: #1377564 + * jbd2: fix infinite loop when recovering corrupt journal blocks + - LP: #1377564 + * jbd2: fix descriptor block size handling errors with journal_csum + - LP: #1377564 + * memblock, memhotplug: fix wrong type in memblock_find_in_range_node(). + - LP: #1377564 + * xattr: fix check for simultaneous glibc header inclusion + - LP: #1377564 + * KVM: s390: Fix user triggerable bug in dead code + - LP: #1377564 + * KVM: s390/mm: try a cow on read only pages for key ops + - LP: #1377564 + * regmap: Fix regcache debugfs initialization + - LP: #1377564 + * regmap: Fix handling of volatile registers for format_write() chips + - LP: #1377564 + * ASoC: rt5640: Do not allow regmap to use bulk read-write operations + - LP: #1377564 + * drm/i915: Remove bogus __init annotation from DMI callbacks + - LP: #1377564 + * hwmon: (ds1621) Update zbits after conversion rate change + - LP: #1377564 + * arm64: ptrace: fix compat hardware watchpoint reporting + - LP: #1377564 + * ARM/ARM64: KVM: Nuke Hyp-mode tlbs before enabling MMU + - LP: #1377564 + * arm/arm64: KVM: Complete WFI/WFE instructions + - LP: #1377564 + * get rid of propagate_umount() mistakenly treating slaves as busy. + - LP: #1377564 + * fix EBUSY on umount() from MNT_SHRINKABLE + - LP: #1377564 + * regmap: Don't attempt block writes when syncing cache on single_rw + devices + - LP: #1377564 + * drm/vmwgfx: Fix a potential infinite spin waiting for fifo idle + - LP: #1377564 + * ALSA: hda - Fix digital mic on Acer Aspire 3830TG + - LP: #1377564 + * xfs: don't dirty buffers beyond EOF + - LP: #1377564 + * xfs: don't zero partial page cache pages during O_DIRECT writes + - LP: #1377564 + * xfs: don't zero partial page cache pages during O_DIRECT writes + - LP: #1377564 + * ALSA: hda - Fix COEF setups for ALC1150 codec + - LP: #1377564 + * i2c: rcar: fix MNR interrupt handling + - LP: #1377564 + * i2c: mv64xxx: continue probe when clock-frequency is missing + - LP: #1377564 + * i2c: at91: Fix a race condition during signal handling in + at91_do_twi_xfer. + - LP: #1377564 + * i2c: at91: add bound checking on SMBus block length bytes + - LP: #1377564 + * aio: add missing smp_rmb() in read_events_ring + - LP: #1377564 + * KEYS: Fix use-after-free in assoc_array_gc() + - LP: #1377564 + * ACPI / cpuidle: fix deadlock between cpuidle_lock and cpu_hotplug.lock + - LP: #1377564 + * USB: fix build error with CONFIG_PM_RUNTIME disabled + - LP: #1377564 + * Linux 3.13.11.8 + - LP: #1377564 + * powerpc: Fix kdump hang issue on p8 with relocation on exception + enabled. + - LP: #1352056 + * net-gre-gro: Fix a bug that breaks the forwarding path + - LP: #1377851 + + -- Luis Henriques Thu, 09 Oct 2014 10:31:36 +0100 + +linux (3.13.0-37.64) trusty; urgency=low + + [ Joseph Salisbury ] + + * Release Tracking Bug + - LP: #1372576 + + [ dann frazier ] + + * [Config] CONFIG_HW_RANDOM_XGENE=m on arm64 + + [ Edward Lin ] + + * SAUCE: Add use_native_backlight quirk for Dell Inspiron 5721/3521 + - LP: #1354253, #1354313 + + [ Tim Gardner ] + + * SAUCE: Fix nfs oops stable regression + - LP: #1348670 + * [Config] Add mpt3sas to d-i + - LP: #1368907 + * [Config] CONFIG_X86_16BIT=y + - LP: #1371601 + + [ Timo Aaltonen ] + + * SAUCE: i915_bdw: Rebase to v3.15.8 + - LP: #1359213 + + [ Upstream Kernel Changes ] + + * Revert "x86-64, modify_ldt: Make support for 16-bit segments a runtime + option" + - LP: #1371601 + * mmc: rtsx: add R1-no-CRC mmc command type handle + - LP: #1365378 + * rpc_pipe: remove the clntXX dir if creating the pipe fails + - LP: #1365869 + * sunrpc: add an "info" file for the dummy gssd pipe + - LP: #1365869 + * rpc_pipe: fix cleanup of dummy gssd directory when notification fails + - LP: #1365869 + * hwrng: xgene - add support for APM X-Gene SoC RNG support + - LP: #1365593 + * Documentation: rng: Add X-Gene SoC RNG driver documentation + - LP: #1365593 + * arm64: dts: add random number generator dts node to APM X-Gene + platform. + - LP: #1365593 + * xen/balloon: cancel ballooning if adding new memory failed + - LP: #1304001 + * x86/xen: resume timer irqs early + - LP: #1368724 + * xen/manage: Always freeze/thaw processes when suspend/resuming + - LP: #1368724 + * scsi_transport_sas: move bsg destructor into sas_rphy_remove + - LP: #1368991 + * drm/i915: Enable 5.4Ghz (HBR2) link rate for Displayport 1.2-capable + devices + - LP: #1369633 + * bnx2x: Fix link for KR with swapped polarity lane + - LP: #1370716 + * drm: add DRM_CAPs for cursor size + - LP: #1359213 + * drm/dp: Add AUX channel infrastructure + - LP: #1359213 + * drm/dp: Add drm_dp_dpcd_read_link_status() + - LP: #1359213 + * drm/dp: Add DisplayPort link helpers + - LP: #1359213 + * drm/dp: Allow registering AUX channels as I2C busses + - LP: #1359213 + * drm/dp: let drivers specify the name of the I2C-over-AUX adapter + - LP: #1359213 + * drm/dp: make aux retries less chatty + - LP: #1359213 + * Bluetooth: Enable Atheros 0cf3:311e for firmware upload + - LP: #1371477 + * bnx2x: fix crash during TSO tunneling + - LP: #1371601 + * inetpeer: get rid of ip_id_count + - LP: #1371601 + * ip: make IP identifiers less predictable + - LP: #1371601 + * tcp: Fix integer-overflows in TCP veno + - LP: #1371601 + * tcp: Fix integer-overflow in TCP vegas + - LP: #1371601 + * macvlan: Initialize vlan_features to turn on offload support. + - LP: #1371601 + * net: Correctly set segment mac_len in skb_segment(). + - LP: #1371601 + * iovec: make sure the caller actually wants anything in + memcpy_fromiovecend + - LP: #1371601 + * batman-adv: Fix out-of-order fragmentation support + - LP: #1371601 + * sctp: fix possible seqlock seadlock in sctp_packet_transmit() + - LP: #1371601 + * sparc64: Fix argument sign extension for compat_sys_futex(). + - LP: #1371601 + * sparc64: Make itc_sync_lock raw + - LP: #1371601 + * sparc64: Fix executable bit testing in set_pmd_at() paths. + - LP: #1371601 + * sparc64: Fix huge PMD invalidation. + - LP: #1371601 + * sparc64: Fix bugs in get_user_pages_fast() wrt. THP. + - LP: #1371601 + * sparc64: Fix hex values in comment above pte_modify(). + - LP: #1371601 + * sparc64: Don't use _PAGE_PRESENT in pte_modify() mask. + - LP: #1371601 + * sparc64: Handle 32-bit tasks properly in compute_effective_address(). + - LP: #1371601 + * sparc64: Fix top-level fault handling bugs. + - LP: #1371601 + * sparc64: Fix range check in kern_addr_valid(). + - LP: #1371601 + * sparc64: Use 'ILOG2_4MB' instead of constant '22'. + - LP: #1371601 + * sparc64: Add basic validations to {pud,pmd}_bad(). + - LP: #1371601 + * sparc64: Give more detailed information in {pgd,pmd}_ERROR() and kill + pte_ERROR(). + - LP: #1371601 + * sparc64: Don't bark so loudly about 32-bit tasks generating 64-bit + fault addresses. + - LP: #1371601 + * sparc64: Fix huge TSB mapping on pre-UltraSPARC-III cpus. + - LP: #1371601 + * sparc64: Add membar to Niagara2 memcpy code. + - LP: #1371601 + * sparc64: Do not insert non-valid PTEs into the TSB hash table. + - LP: #1371601 + * sparc64: Guard against flushing openfirmware mappings. + - LP: #1371601 + * bbc-i2c: Fix BBC I2C envctrl on SunBlade 2000 + - LP: #1371601 + * sunsab: Fix detection of BREAK on sunsab serial console + - LP: #1371601 + * sparc64: ldc_connect() should not return EINVAL when handshake is in + progress. + - LP: #1371601 + * arch/sparc/math-emu/math_32.c: drop stray break operator + - LP: #1371601 + * x86-64, espfix: Don't leak bits 31:16 of %esp returning to 16-bit stack + - LP: #1371601 + * x86, espfix: Move espfix definitions into a separate header file + - LP: #1371601 + * x86, espfix: Fix broken header guard + - LP: #1371601 + * x86, espfix: Make espfix64 a Kconfig option, fix UML + - LP: #1371601 + * x86, espfix: Make it possible to disable 16-bit support + - LP: #1371601 + * x86_64/entry/xen: Do not invoke espfix64 on Xen + - LP: #1371601 + * ALSA: usb-audio: fix BOSS ME-25 MIDI regression + - LP: #1371601 + * ASoC: wm8994: Prevent double lock of accdet_lock mutex on wm1811 + - LP: #1371601 + * v4l: vsp1: Remove the unneeded vsp1_video_buffer video field + - LP: #1371601 + * ASoC: max98090: Fix missing free_irq + - LP: #1371601 + * KVM: x86: Inter-privilege level ret emulation is not implemeneted + - LP: #1371601 + * au0828: Only alt setting logic when needed + - LP: #1371601 + * ASoC: pcm: fix dpcm_path_put in dpcm runtime update + - LP: #1371601 + * crypto: ux500 - make interrupt mode plausible + - LP: #1371601 + * Bluetooth: btmrvl: wait for HOST_SLEEP_ENABLE event in suspend + - LP: #1371601 + * ASoC: adau1701: fix adau1701_reg_read() + - LP: #1371601 + * ASoC: wm_adsp: Add missing MODULE_LICENSE + - LP: #1371601 + * regulator: arizona-ldo1: remove bypass functionality + - LP: #1371601 + * ASoC: samsung: Correct I2S DAI suspend/resume ops + - LP: #1371601 + * drm/tilcdc: panel: fix dangling sysfs connector node + - LP: #1371601 + * drm/tilcdc: slave: fix dangling sysfs connector node + - LP: #1371601 + * drm/tilcdc: tfp410: fix dangling sysfs connector node + - LP: #1371601 + * drm/tilcdc: panel: fix leak when unloading the module + - LP: #1371601 + * drm/tilcdc: fix release order on exit + - LP: #1371601 + * drm/tilcdc: fix double kfree + - LP: #1371601 + * ACPICA: Utilities: Fix memory leak in acpi_ut_copy_iobject_to_iobject + - LP: #1371601 + * stable_kernel_rules: Add pointer to netdev-FAQ for network patches + - LP: #1371601 + * USB: ehci-pci: USB host controller support for Intel Quark X1000 + - LP: #1371601 + * debugfs: Fix corrupted loop in debugfs_remove_recursive + - LP: #1371601 + * serial: core: Preserve termios c_cflag for console resume + - LP: #1371601 + * mtd/ftl: fix the double free of the buffers allocated in build_maps() + - LP: #1371601 + * ext4: Fix block zeroing when punching holes in indirect block files + - LP: #1371601 + * ext4: fix punch hole on files with indirect mapping + - LP: #1371601 + * x86: don't exclude low BIOS area when allocating address space for + non-PCI cards + - LP: #1371601 + * PCI: Configure ASPM when enabling device + - LP: #1371601 + * Bluetooth: never linger on process exit + - LP: #1371601 + * ASoC: blackfin: use samples to set silence + - LP: #1371601 + * USB: OHCI: fix bugs in debug routines + - LP: #1371601 + * USB: OHCI: don't lose track of EDs when a controller dies + - LP: #1371601 + * mei: start disconnect request timer consistently + - LP: #1371601 + * mei: fix return value on disconnect timeout + - LP: #1371601 + * USB: Fix persist resume of some SS USB devices + - LP: #1371601 + * media-device: Remove duplicated memset() in media_enum_entities() + - LP: #1371601 + * Bluetooth: Avoid use of session socket after the session gets freed + - LP: #1371601 + * xc5000: Fix get_frequency() + - LP: #1371601 + * xc4000: Fix get_frequency() + - LP: #1371601 + * CAPABILITIES: remove undefined caps from all processes + - LP: #1371601 + * scsi: add a blacklist flag which enables VPD page inquiries + - LP: #1371601 + * bfa: Fix undefined bit shift on big-endian architectures with 32-bit + DMA address + - LP: #1371601 + * hpsa: fix bad -ENOMEM return value in hpsa_big_passthru_ioctl + - LP: #1371601 + * Drivers: scsi: storvsc: Change the limits to reflect the values on the + host + - LP: #1371601 + * Drivers: scsi: storvsc: Set cmd_per_lun to reflect value supported by + the Host + - LP: #1371601 + * Drivers: scsi: storvsc: Filter commands based on the storage protocol + version + - LP: #1371601 + * Drivers: scsi: storvsc: Fix a bug in handling VMBUS protocol version + - LP: #1371601 + * Drivers: scsi: storvsc: Implement a eh_timed_out handler + - LP: #1371601 + * drivers: scsi: storvsc: Set srb_flags in all cases + - LP: #1371601 + * drivers: scsi: storvsc: Correctly handle TEST_UNIT_READY failure + - LP: #1371601 + * x86_64/vsyscall: Fix warn_bad_vsyscall log output + - LP: #1371601 + * KVM: PPC: Book3S PR: Take SRCU read lock around RTAS kvm_read_guest() + call + - LP: #1371601 + * spi: orion: fix incorrect handling of cell-index DT property + - LP: #1371601 + * mfd: omap-usb-host: Fix improper mask use. + - LP: #1371601 + * tpm: Add missing tpm_do_selftest to ST33 I2C driver + - LP: #1371601 + * tpm: missing tpm_chip_put in tpm_get_random() + - LP: #1371601 + * scsi: do not issue SCSI RSOC command to Promise Vtrak E610f + - LP: #1371601 + * hwmon: (ads1015) Fix off-by-one for valid channel index checking + - LP: #1371601 + * ALSA: hda - fix an external mic jack problem on a HP machine + - LP: #1350148, #1371601 + * MIPS: tlbex: Fix a missing statement for HUGETLB + - LP: #1371601 + * MIPS: Prevent user from setting FCSR cause bits + - LP: #1371601 + * KVM: x86: always exit on EOIs for interrupts listed in the IOAPIC redir + table + - LP: #1371601 + * MIPS: Remove BUG_ON(!is_fpu_owner()) in do_ade() + - LP: #1371601 + * MIPS: ptrace: Test correct task's flags in task_user_regset_view() + - LP: #1371601 + * MIPS: asm/reg.h: Make 32- and 64-bit definitions available at the same + time + - LP: #1371601 + * MIPS: ptrace: Change GP regset to use correct core dump register layout + - LP: #1371601 + * md/raid1,raid10: always abort recover on write error. + - LP: #1371601 + * ext4: fix ext4_discard_allocated_blocks() if we can't allocate the pa + struct + - LP: #1371601 + * hwmon: (lm85) Fix various errors on attribute writes + - LP: #1371601 + * hwmon: (lm78) Fix overflow problems seen when writing large temperature + limits + - LP: #1371601 + * hwmon: (amc6821) Fix possible race condition bug + - LP: #1371601 + * MIPS: GIC: Prevent array overrun + - LP: #1371601 + * mnt: Add tests for unprivileged remount cases that have found to be + faulty + - LP: #1371601 + * ARM: OMAP3: Fix choice of omap3_restore_es function in OMAP34XX + rev3.1.2 case. + - LP: #1371601 + * netlabel: fix a problem when setting bits below the previously lowest + bit + - LP: #1371601 + * netlabel: fix the horribly broken catmap functions + - LP: #1371601 + * netlabel: fix the catmap walking functions + - LP: #1371601 + * drivers/i2c/busses: use correct type for dma_map/unmap + - LP: #1371601 + * NFSD: Decrease nfsd_users in nfsd_startup_generic fail + - LP: #1371601 + * MIPS: O32/32-bit: Fix bug which can cause incorrect system call + restarts + - LP: #1371601 + * IB/srp: Fix deadlock between host removal and multipathd + - LP: #1371601 + * USB: serial: ftdi_sio: Annotate the current Xsens PID assignments + - LP: #1371601 + * USB: serial: ftdi_sio: Add support for new Xsens devices + - LP: #1371601 + * USB: devio: fix issue with log flooding + - LP: #1371601 + * CIFS: Fix async reading on reconnects + - LP: #1371601 + * CIFS: Fix STATUS_CANNOT_DELETE error mapping for SMB2 + - LP: #1371601 + * xfs: ensure verifiers are attached to recovered buffers + - LP: #1371601 + * drm/tegra: add MODULE_DEVICE_TABLEs + - LP: #1371601 + * ALSA: virtuoso: add Xonar Essence STX II support + - LP: #1371601 + * hwmon: (gpio-fan) Prevent overflow problem when writing large limits + - LP: #1371601 + * hwmon: (sis5595) Prevent overflow problem when writing large limits + - LP: #1371601 + * NFS: Fix /proc/fs/nfsfs/servers and /proc/fs/nfsfs/volumes + - LP: #1371601 + * drm/ttm: Fix possible division by 0 in ttm_dma_pool_shrink_scan(). + - LP: #1371601 + * drm/ttm: Choose a pool to shrink correctly in + ttm_dma_pool_shrink_scan(). + - LP: #1371601 + * drm/ttm: Use mutex_trylock() to avoid deadlock inside shrinker + functions. + - LP: #1371601 + * drm/ttm: Fix possible stack overflow by recursive shrinker calls. + - LP: #1371601 + * drm/ttm: Pass GFP flags in order to avoid deadlock. + - LP: #1371601 + * powerpc/mm/numa: Fix break placement + - LP: #1371601 + * powerpc/pci: Reorder pci bus/bridge unregistration during PHB removal + - LP: #1371601 + * drm/radeon: load the lm63 driver for an lm64 thermal chip. + - LP: #1371601 + * drm/radeon: set VM base addr using the PFP v2 + - LP: #1371601 + * drm/radeon/atom: add new voltage fetch function for hawaii + - LP: #1371601 + * drm/radeon/dpm: handle voltage info fetching on hawaii + - LP: #1371601 + * drm/radeon: re-enable dpm by default on cayman + - LP: #1371601 + * drm/radeon: re-enable dpm by default on BTC + - LP: #1371601 + * drm/radeon: use packet2 for nop on hawaii with old firmware + - LP: #1371601 + * drm/radeon: tweak ACCEL_WORKING2 query for hawaii + - LP: #1371601 + * KVM: nVMX: fix "acknowledge interrupt on exit" when APICv is in use + - LP: #1371601 + * RDMA/iwcm: Use a default listen backlog if needed + - LP: #1371601 + * x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub + - LP: #1371601 + * net: sun4i-emac: fix memory leak on bad packet + - LP: #1371601 + * hwmon: (ads1015) Fix out-of-bounds array access + - LP: #1371601 + * hwmon: (dme1737) Prevent overflow problem when writing large limits + - LP: #1371601 + * s390/locking: Reenable optimistic spinning + - LP: #1371601 + * ring-buffer: Up rb_iter_peek() loop count to 3 + - LP: #1371601 + * ring-buffer: Always reset iterator to reader page + - LP: #1371601 + * kernel/smp.c:on_each_cpu_cond(): fix warning in fallback path + - LP: #1371601 + * drm/i915: read HEAD register back in init_ring_common() to enforce + ordering + - LP: #1371601 + * vm_is_stack: use for_each_thread() rather then buggy + while_each_thread() + - LP: #1371601 + * libceph: set last_piece in ceph_msg_data_pages_cursor_init() correctly + - LP: #1371601 + * drm/nouveau: Bump version from 1.1.1 to 1.1.2 + - LP: #1371601 + * ALSA: usb-audio: fix BOSS ME-25 MIDI regression + - LP: #1371601 + * ALSA: hda/ca0132 - Don't try loading firmware at resume when already + failed + - LP: #1371601 + * carl9170: fix sending URBs with wrong type when using full-speed + - LP: #1371601 + * powerpc/pseries: Failure on removing device node + - LP: #1371601 + * Btrfs: Fix memory corruption by ulist_add_merge() on 32bit arch + - LP: #1371601 + * Btrfs: fix csum tree corruption, duplicate and outdated checksums + - LP: #1371601 + * ext4: fix BUG_ON in mb_free_blocks() + - LP: #1371601 + * x86/espfix/xen: Fix allocation of pages for paravirt page tables + - LP: #1371601 + * Linux 3.13.11.7 + - LP: #1371601 + * HID: magicmouse: sanity check report size in raw_event() callback + - LP: #1370025 + - CVE-2014-3181 + * HID: fix a couple of off-by-ones + - LP: #1370035 + - CVE-2014-3184 + * USB: whiteheat: Added bounds checking for bulk command response + - LP: #1370036 + - CVE-2014-3185 + * HID: picolcd: sanity check report size in raw_event() callback + - LP: #1370038 + - CVE-2014-3186 + * KEYS: Fix termination condition in assoc array garbage collection + - LP: #1370041 + - CVE-2014-3631 + * udf: Fold udf_fill_inode() into __udf_read_inode() + - LP: #1370042 + - CVE-2014-6410 + * udf: Avoid infinite loop when processing indirect ICBs + - LP: #1370042 + - CVE-2014-6410 + * libceph: add process_one_ticket() helper + - LP: #1370044, #1370046, #1370047 + - CVE-2014-6418 + * libceph: do not hard code max auth ticket len + - LP: #1370044, #1370046, #1370047 + - CVE-2014-6418 + + -- Joseph Salisbury Mon, 22 Sep 2014 15:51:07 -0400 + +linux (3.13.0-36.63) trusty; urgency=low + + [ Joseph Salisbury ] + + * Release Tracking Bug + - LP: #1365052 + + [ Feng Kan ] + + * SAUCE: (no-up) irqchip:gic: change access of gicc_ctrl register to read + modify write. + - LP: #1357527 + * SAUCE: (no-up) arm64: optimized copy_to_user and copy_from_user + assembly code + - LP: #1358949 + + [ Ming Lei ] + + * SAUCE: (no-up) Drop APM X-Gene SoC Ethernet driver + - LP: #1360140 + * [Config] Drop XGENE entries + - LP: #1360140 + * [Config] CONFIG_NET_XGENE=m for arm64 + - LP: #1360140 + + [ Stefan Bader ] + + * SAUCE: Add compat macro for skb_get_hash + - LP: #1358162 + * SAUCE: bcache: prevent crash on changing writeback_running + - LP: #1357295 + + [ Suman Tripathi ] + + * SAUCE: (no-up) arm64: Fix the csr-mask for APM X-Gene SoC AHCI SATA PHY + clock DTS node. + - LP: #1359489 + * SAUCE: (no-up) ahci_xgene: Skip the PHY and clock initialization if + already configured by the firmware. + - LP: #1359501 + * SAUCE: (no-up) ahci_xgene: Fix the link down in first attempt for the + APM X-Gene SoC AHCI SATA host controller driver. + - LP: #1359507 + + [ Tuan Phan ] + + * SAUCE: (no-up) pci-xgene-msi: fixed deadlock in irq_set_affinity + - LP: #1359514 + + [ Upstream Kernel Changes ] + + * iwlwifi: mvm: Add a missed beacons threshold + - LP: #1349572 + * mac80211: reset probe_send_count also in HW_CONNECTION_MONITOR case + - LP: #1349572 + * genirq: Add an accessor for IRQ_PER_CPU flag + - LP: #1357527 + * arm64: perf: add support for percpu pmu interrupt + - LP: #1357527 + * cifs: sanity check length of data to send before sending + - LP: #1283101 + * KVM: nVMX: Pass vmexit parameters to nested_vmx_vmexit + - LP: #1329434 + * KVM: nVMX: Rework interception of IRQs and NMIs + - LP: #1329434 + * KVM: vmx: disable APIC virtualization in nested guests + - LP: #1329434 + * HID: Add transport-driver functions to the USB HID interface. + - LP: #1353021 + * ahci_xgene: Removing NCQ support from the APM X-Gene SoC AHCI SATA Host + Controller driver. + - LP: #1358498 + * fold d_kill() and d_free() + - LP: #1354234 + * fold try_prune_one_dentry() + - LP: #1354234 + * new helper: dentry_free() + - LP: #1354234 + * expand the call of dentry_lru_del() in dentry_kill() + - LP: #1354234 + * dentry_kill(): don't try to remove from shrink list + - LP: #1354234 + * don't remove from shrink list in select_collect() + - LP: #1354234 + * more graceful recovery in umount_collect() + - LP: #1354234 + * dcache: don't need rcu in shrink_dentry_list() + - LP: #1354234 + * lift the "already marked killed" case into shrink_dentry_list() + * split dentry_kill() + - LP: #1354234 + * expand dentry_kill(dentry, 0) in shrink_dentry_list() + - LP: #1354234 + * shrink_dentry_list(): take parent's ->d_lock earlier + - LP: #1354234 + * dealing with the rest of shrink_dentry_list() livelock + - LP: #1354234 + * dentry_kill() doesn't need the second argument now + - LP: #1354234 + * dcache: add missing lockdep annotation + - LP: #1354234 + * fs: convert use of typedef ctl_table to struct ctl_table + - LP: #1354234 + * lock_parent: don't step on stale ->d_parent of all-but-freed one + - LP: #1354234 + * tools/testing/selftests/ptrace/peeksiginfo.c: add PAGE_SIZE definition + - LP: #1358855 + * x86, irq, pic: Probe for legacy PIC and set legacy_pic appropriately + - LP: #1317697 + * bnx2x: Fix kernel crash and data miscompare after EEH recovery + - LP: #1353105 + * bnx2x: Adapter not recovery from EEH error injection + - LP: #1353105 + * Fix: module signature vs tracepoints: add new TAINT_UNSIGNED_MODULE + - LP: #1359670 + * bcache: fix crash on shutdown in passthrough mode + - LP: #1357295 + * bcache: fix uninterruptible sleep in writeback thread + - LP: #1357295 + * namespaces: Use task_lock and not rcu to protect nsproxy + - LP: #1328088 + * MAINTAINERS: Add entry for APM X-Gene SoC ethernet driver + - LP: #1360140 + * Documentation: dts: Add bindings for APM X-Gene SoC ethernet driver + - LP: #1360140 + * dts: Add bindings for APM X-Gene SoC ethernet driver + - LP: #1360140 + * drivers: net: Add APM X-Gene SoC ethernet driver support. + - LP: #1360140 + * powerpc/mm: Add new "set" flag argument to pte/pmd update function + - LP: #1357014 + * powerpc/thp: Add write barrier after updating the valid bit + - LP: #1357014 + * powerpc/thp: Don't recompute vsid and ssize in loop on invalidate + - LP: #1357014 + * powerpc/thp: Invalidate old 64K based hash page mapping before insert + of 4k pte + - LP: #1357014 + * powerpc/thp: Handle combo pages in invalidate + - LP: #1357014 + * powerpc/thp: Invalidate with vpn in loop + - LP: #1357014 + * powerpc/thp: Use ACCESS_ONCE when loading pmdp + - LP: #1357014 + * powerpc/mm: Use read barrier when creating real_pte + - LP: #1357014 + * powerpc/thp: Add tracepoints to track hugepage invalidate + - LP: #1357014 + * powerpc: subpage_protect: Increase the array size to take care of 64TB + - LP: #1357014 + * mfd: rtsx: Add set pull control macro and simplify rtl8411 + - LP: #1361086 + * mfd: rtsx: Add support for card reader rtl8402 + - LP: #1361086 + * kvm: iommu: fix the third parameter of kvm_iommu_put_pages + (CVE-2014-3601) + - LP: #1362443 + - CVE-2014-3601 + * isofs: Fix unbounded recursion when processing relocated directories + - LP: #1362447, #1362448 + - CVE-2014-5472 + * net: sctp: inherit auth_capable on INIT collisions + - LP: #1349804 + - CVE-2014-5077 + * blk-mq: fix initializing request's start time + - LP: #1297522 + + [ Vinayak Kale ] + + * SAUCE: (no-up) dt-bindings: Add Potenza PMU binding + - LP: #1357527 + * SAUCE: (no-up) arm64: dts: Add PMU node for APM X-Gene Storm SOC + - LP: #1357527 + + -- Joseph Salisbury Wed, 03 Sep 2014 12:13:43 -0400 + +linux (3.13.0-35.62) trusty; urgency=low + + [ Joseph Salisbury ] + + * Release Tracking Bug + - LP: #1357148 + + [ Brad Figg ] + + * Start new release + + [ dann frazier ] + + * SAUCE: (no-up) Fix build failure on arm64 + - LP: #1353657 + * [debian] Allow for package revisions condusive for branching + + [ David Henningsson ] + + * SAUCE: Call broadwell specific functions from the hda driver + - LP: #1317865 + + [ Edward Lin ] + + * SAUCE: (no-up) Add use native backlight quirk for Dell Inspiron + 5547/5447 + - LP: #1332437 + + [ Imre Deak ] + + * SAUCE: drm/i915: move power domain init earlier during system resume + - LP: #1353405 + + [ Jani Nikula ] + + * SAUCE: drm/i915: use lane count and link rate from VBT as minimums for + eDP + - LP: #1338582 + * SAUCE: drm/i915/dp: force eDP lane count to max available lanes on BDW + - LP: #1338582 + * SAUCE: drm/i915: provide interface for audio driver to query cdclk + - LP: #1188091 + * SAUCE: drm/i915: demote opregion excessive timeout WARN_ONCE to + DRM_INFO_ONCE + - LP: #1351014 + + [ Joseph Salisbury ] + + * [Config] updateconfigs after Linux 3.13.11.6 updates + + [ Luis Henriques ] + + * Revert "[Packaging] linux-udeb-flavour -- standardise on linux prefix" + + [ Ming Lei ] + + * Revert "SAUCE: (no-up) ata: Fix the dma state machine lockup for the + IDENTIFY DEVICE PIO mode command." + - LP: #1335645 + + [ Paulo Zanoni ] + + * SAUCE: drm/i915: consider the source max DP lane count too + - LP: #1338582 + + [ Tim Gardner ] + + * [Config] CONFIG_GPIO_SYSFS=y + - LP: #1342153 + * [Config] CONFIG_KEYS_DEBUG_PROC_KEYS=y + - LP: #1344405 + * [Config] updateconfigs + * [Config] CONFIG_SCSI_IPR_TRACE=y, CONFIG_SCSI_IPR_DUMP=y + - LP: #1343109 + * [Config] CONFIG_CONTEXT_TRACKING_FORCE=n + - LP: #1349028 + + [ Timo Aaltonen ] + + * SAUCE: Fix a typo in hda i915_bdw support. + - LP: #1343140 + + [ Upstream Kernel Changes ] + + * Revert "net/mlx4_en: Fix bad use of dev_id" + - LP: #1347012 + * Revert "ACPI / AC: Remove AC's proc directory." + - LP: #1356913 + * Revert "mac80211: move "bufferable MMPDU" check to fix AP mode scan" + - LP: #1356913 + * mm, pcp: allow restoring percpu_pagelist_fraction default + - LP: #1347088 + * net: Fix permission check in netlink_connect() + - LP: #1312989 + * netlink: Rename netlink_capable netlink_allowed + - LP: #1312989 + * net: Move the permission check in sock_diag_put_filterinfo to + packet_diag_dump + - LP: #1312989 + * net: Add variants of capable for use on on sockets + - LP: #1312989 + * net: Add variants of capable for use on netlink messages + - LP: #1312989 + * net: Use netlink_ns_capable to verify the permisions of netlink + messages + - LP: #1312989 + * netlink: Only check file credentials for implicit destinations + - LP: #1312989 + * igb: fix stats for i210 rx_fifo_errors + - LP: #1338893 + * HID: use multi input quirk for 22b9:2968 + - LP: #1339567 + * crypto/nx: disable NX on little endian builds + - LP: #1338666 + * ACPI / video: Add Dell Inspiron 5737 to the blacklist + - LP: #1250401 + * Input: elantech - deal with clickpads reporting right button events + - LP: #1188025 + * net/mlx4_core: Enforce irq affinity changes immediatly + - LP: #1326108 + * cpumask: Utility function to set n'th cpu - local cpu first + - LP: #1326108 + * net/mlx4_en: Use affinity hint + - LP: #1326108 + * net/mlx4_en: Don't use irq_affinity_notifier to track changes in IRQ + affinity map + - LP: #1326108 + * net/mlx4_en: IRQ affinity hint is not cleared on port down + - LP: #1326108 + * Subject: net: Allow tc changes in user namespaces + - LP: #1344049 + * net-gro: restore frag0 optimization + - LP: #1344323 + * Bluetooth: Fix redundant encryption request for reauthentication + - LP: #1347088 + * Bluetooth: Fix check for connection encryption + - LP: #1347088 + * introduce for_each_thread() to replace the buggy while_each_thread() + - LP: #1347088 + * NFS: Don't declare inode uptodate unless all attributes were checked + - LP: #1347088 + * usb: dwc3: gadget: clear stall when disabling endpoint + - LP: #1347088 + * ACPICA: utstring: Check array index bound before use. + - LP: #1347088 + * mtip32xx: Increase timeout for STANDBY IMMEDIATE command + - LP: #1347088 + * mtip32xx: Remove dfs_parent after pci unregister + - LP: #1347088 + * mtip32xx: Fix ERO and NoSnoop values in PCIe upstream on AMD systems + - LP: #1347088 + * extcon: max77693: Fix two NULL pointer exceptions on missing pdata + - LP: #1347088 + * extcon: max8997: Fix NULL pointer exception on missing pdata + - LP: #1347088 + * builddeb: use $OBJCOPY variable instead of objcopy + - LP: #1347088 + * bluetooth: hci_ldisc: fix deadlock condition + - LP: #1347088 + * powerpc/pseries: Fix overwritten PE state + - LP: #1347088 + * PCI: Add new ID for Intel GPU "spurious interrupt" quirk + - LP: #1347088 + * x86-32, espfix: Remove filter for espfix32 due to race + - LP: #1347088 + * genirq: Sanitize spurious interrupt detection of threaded irqs + - LP: #1347088 + * Drivers: hv: balloon: Ensure pressure reports are posted regularly + - LP: #1347088 + * x86, x32: Use compat shims for io_{setup,submit} + - LP: #1347088 + * iwlwifi: pcie: try to get ownership several times + - LP: #1347088 + * ext4: fix data integrity sync in ordered mode + - LP: #1347088 + * UBIFS: fix an mmap and fsync race condition + - LP: #1347088 + * [media] rtl28xxu: add USB ID for Genius TVGo DVB-T03 + - LP: #1347088 + * [media] rtl28xxu: add 1b80:d395 Peak DVB-T USB + - LP: #1347088 + * [media] rtl28xxu: add [1b80:d39d] Sveon STV20 + - LP: #1347088 + * [media] rtl28xxu: add [1b80:d3af] Sveon STV27 + - LP: #1347088 + * ASoC: max98090: Fix reset at resume time + - LP: #1347088 + * ACPI: Fix conflict between customized DSDT and DSDT local copy + - LP: #1347088 + * PM / OPP: fix incorrect OPP count handling in of_init_opp_table + - LP: #1347088 + * Target/iser: Bail from accept_np if np_thread is trying to close + - LP: #1347088 + * Target/iser: Fix hangs in connection teardown + - LP: #1347088 + * HID: core: fix validation of report id 0 + - LP: #1347088 + * IB/srp: Fix a sporadic crash triggered by cable pulling + - LP: #1347088 + * Target/iser: Improve cm events handling + - LP: #1347088 + * Target/iser: Wait for proper cleanup before unloading + - LP: #1347088 + * mtd: nand: omap: fix BCHx ecc.correct to return detected bit-flips in + erased-page + - LP: #1347088 + * mtd: eLBC NAND: fix subpage write support + - LP: #1347088 + * reiserfs: call truncate_setsize under tailpack mutex + - LP: #1347088 + * ARM: stacktrace: avoid listing stacktrace functions in stacktrace + - LP: #1347088 + * SUNRPC: Fix a module reference leak in svc_handle_xprt + - LP: #1347088 + * [media] uvcvideo: Fix clock param realtime setting + - LP: #1347088 + * [media] ivtv: Fix Oops when no firmware is loaded + - LP: #1347088 + * CIFS: Fix memory leaks in SMB2_open + - LP: #1347088 + * iio:adc:max1363 incorrect resolutions for max11604, max11605, max11610 + and max11611. + - LP: #1347088 + * staging/mt29f_spinand: Terminate of match table + - LP: #1347088 + * mac80211: fix IBSS join by initializing last_scan_completed + - LP: #1347088 + * KVM: lapic: sync highest ISR to hardware apic on EOI + - LP: #1347088 + * s390/time: cast tv_nsec to u64 prior to shift in update_vsyscall + - LP: #1347088 + * ahci: add PCI ID for Marvell 88SE91A0 SATA Controller + - LP: #1347088 + * ext4: fix zeroing of page during writeback + - LP: #1347088 + * ext4: fix wrong assert in ext4_mb_normalize_request() + - LP: #1347088 + * IB/mlx5: add missing padding at end of struct mlx5_ib_create_cq + - LP: #1347088 + * IB/mlx5: add missing padding at end of struct mlx5_ib_create_srq + - LP: #1347088 + * IB/qib: Fix port in pkey change event + - LP: #1347088 + * IB/ipath: Translate legacy diagpkt into newer extended diagpkt + - LP: #1347088 + * mei: me: drop harmful wait optimization + - LP: #1347088 + * mei: me: read H_CSR after asserting reset + - LP: #1347088 + * usb: usbtest: fix unlink write error with pattern 1 + - LP: #1347088 + * s390/lowcore: reserve 96 bytes for IRB in lowcore + - LP: #1347088 + * mac80211: fix a memory leak on sta rate selection table + - LP: #1347088 + * mac80211: don't check netdev state for debugfs read/write + - LP: #1347088 + * mtd: pxa3xx_nand: make the driver work on big-endian systems + - LP: #1347088 + * hv: use correct order when freeing monitor_pages + - LP: #1347088 + * usb: qcserial: add Netgear AirCard 341U + - LP: #1347088 + * usb: qcserial: add additional Sierra Wireless QMI devices + - LP: #1347088 + * IB/umad: Fix error handling + - LP: #1347088 + * RDMA/cxgb4: Add missing padding at end of struct c4iw_create_cq_resp + - LP: #1347088 + * MIPS: KVM: Allocate at least 16KB for exception handlers + - LP: #1347088 + * block: virtio_blk: don't hold spin lock during world switch + - LP: #1347088 + * nfsd: getattr for FATTR4_WORD0_FILES_AVAIL needs the statfs buffer + - LP: #1347088 + * ASoC: tlv320aci3x: Fix custom snd_soc_dapm_put_volsw_aic3x() function + - LP: #1347088 + * UBIFS: Remove incorrect assertion in shrink_tnc() + - LP: #1347088 + * Bluetooth: Fix L2CAP deadlock + - LP: #1347088 + * vgaswitcheroo: switch the mux to the igp on power down when runpm is + enabled + - LP: #1347088 + * drm/radeon: fix typo in radeon_connector_is_dp12_capable() + - LP: #1347088 + * drm/radeon/dp: fix lane/clock setup for dp 1.2 capable devices + - LP: #1347088 + * drm/radeon/atom: fix dithering on certain panels + - LP: #1347088 + * drm/radeon: only apply hdmi bpc pll flags when encoder mode is hdmi + - LP: #1347088 + * ahci: Add Device ID for HighPoint RocketRaid 642L + - LP: #1347088 + * mm: fix sleeping function warning from __put_anon_vma + - LP: #1347088 + * hugetlb: restrict hugepage_migration_support() to x86_64 + - LP: #1347088 + * kthread: fix return value of kthread_create() upon SIGKILL. + - LP: #1347088 + * mm: vmscan: do not throttle based on pfmemalloc reserves if node has no + ZONE_NORMAL + - LP: #1347088 + * memcg: do not hang on OOM when killed by userspace OOM access to memory + reserves + - LP: #1347088 + * mm: page_alloc: use word-based accesses for get/set pageblock bitmaps + - LP: #1347088 + * mm/memory-failure.c-failure: send right signal code to correct thread + - LP: #1347088 + * mm/memory-failure.c: don't let collect_procs() skip over processes for + MF_ACTION_REQUIRED + - LP: #1347088 + * mm/memory-failure.c: support use of a dedicated thread to handle + SIGBUS(BUS_MCEERR_AO) + - LP: #1347088 + * powerpc/serial: Use saner flags when creating legacy ports + - LP: #1347088 + * ALSA: hda/realtek - Add support of ALC891 codec + - LP: #1347088 + * rbd: use reference counts for image requests + - LP: #1347088 + * iscsi-target: Reject mutual authentication with reflected CHAP_C + - LP: #1347088 + * powerpc/mm: Check paca psize is up to date for huge mappings + - LP: #1347088 + * IB/umad: Fix use-after-free on close + - LP: #1347088 + * mm: vmscan: clear kswapd's special reclaim powers before exiting + - LP: #1347088 + * rtc: rtc-at91rm9200: fix infinite wait for ACKUPD irq + - LP: #1347088 + * ptrace: fix fork event messages across pid namespaces + - LP: #1347088 + * idr: fix overflow bug during maximum ID calculation at maximum height + - LP: #1347088 + * Input: elantech - don't set bit 1 of reg_10 when the no_hw_res quirk is + set + - LP: #1347088 + * nfsd4: fix FREE_STATEID lockowner leak + - LP: #1347088 + * Btrfs: fix double free in find_lock_delalloc_range + - LP: #1347088 + * target: Set CMD_T_ACTIVE bit for Task Management Requests + - LP: #1347088 + * target: Use complete_all for se_cmd->t_transport_stop_comp + - LP: #1347088 + * iscsi-target: Fix ABORT_TASK + connection reset iscsi_queue_req memory + leak + - LP: #1347088 + * drm/nv50-/mc: fix kms pageflip events by reordering irq handling order. + - LP: #1347088 + * drm/nouveau/kms/nv04-nv40: fix pageflip events via special case. + - LP: #1347088 + * NFS: populate ->net in mount data when remounting + - LP: #1347088 + * watchdog: kempld-wdt: Use the correct value when configuring the + prescaler with the watchdog + - LP: #1347088 + * watchdog: ath79_wdt: avoid spurious restarts on AR934x + - LP: #1347088 + * watchdog: sp805: Set watchdog_device->timeout from ->set_timeout() + - LP: #1347088 + * fs,userns: Change inode_capable to capable_wrt_inode_uidgid + - LP: #1347088 + * powerpc: Add AT_HWCAP2 to indicate V.CRYPTO category support + - LP: #1347088 + * powerpc: Correct DSCR during TM context switch + - LP: #1347088 + * powerpc: Don't setup CPUs with bad status + - LP: #1347088 + * Target/iscsi: Fix sendtargets response pdu for iser transport + - LP: #1347088 + * target: Report correct response length for some commands + - LP: #1347088 + * dm thin: update discard_granularity to reflect the thin-pool blocksize + - LP: #1347088 + * ALSA: compress: Cancel the optimization of compiler and fix the size of + struct for all platform. + - LP: #1347088 + * hwmon: (ina2xx) Cast to s16 on shunt and current regs + - LP: #1347088 + * evm: prohibit userspace writing 'security.evm' HMAC value + - LP: #1347088 + * ALSA: hda - Add quirk for external mic on Lifebook U904 + - LP: #1328587, #1347088 + * ALSA: hda/realtek - Add more entry for enable HP mute led + - LP: #1347088 + * ALSA: hda/realtek - Add more entry for enable HP mute led + - LP: #1347088 + * staging: iio: tsl2x7x_core: fix proximity treshold + - LP: #1347088 + * iio: Fix endianness issue in ak8975_read_axis() + - LP: #1347088 + * rtmutex: Handle deadlock detection smarter + - LP: #1347088 + * rtmutex: Detect changes in the pi lock chain + - LP: #1347088 + * drm/i915: Disable FBC by default also on Haswell and later + - LP: #1347088 + * drm/i915: Avoid div-by-zero when pixel_multiplier is zero + - LP: #1347088 + * drm/i915: Reorder semaphore deadlock check + - LP: #1347088 + * iio: adc: at91: signedness bug in at91_adc_get_trigger_value_by_name() + - LP: #1347088 + * rtmutex: Plug slow unlock race + - LP: #1347088 + * ACPI / ia64 / sba_iommu: Restore the working initialization ordering + - LP: #1347088 + * epoll: fix use-after-free in eventpoll_release_file + - LP: #1347088 + * drm/nouveau/kms: reference vblank for crtc during pageflip. + - LP: #1347088 + * ARM: mvebu: DT: fix OpenBlocks AX3-4 RAM size + - LP: #1347088 + * USB: EHCI: avoid BIOS handover on the HASEE E200 + - LP: #1347088 + * arm64: Bug fix in stack alignment exception + - LP: #1347088 + * arm64: ptrace: change fs when passing kernel pointer to regset code + - LP: #1347088 + * arm64: uid16: fix __kernel_old_{gid,uid}_t definitions + - LP: #1347088 + * arm64: ptrace: fix empty registers set in prstatus of aarch32 process + core + - LP: #1347088 + * ALSA: control: Protect user controls against concurrent access + - LP: #1347088 + * ALSA: control: Fix replacing user controls + - LP: #1347088 + * ALSA: control: Don't access controls outside of protected regions + - LP: #1347088 + * ALSA: control: Handle numid overflow + - LP: #1347088 + * ALSA: control: Make sure that id->index does not overflow + - LP: #1347088 + * Bluetooth: Fix SSP acceptor just-works confirmation without MITM + - LP: #1347088 + * Bluetooth: Fix setting correct authentication information for SMP STK + - LP: #1347088 + * Bluetooth: Fix indicating discovery state when canceling inquiry + - LP: #1347088 + * Bluetooth: Fix locking of hdev when calling into SMP code + - LP: #1347088 + * Bluetooth: Allow change security level on ATT_CID in slave role + - LP: #1347088 + * rt2x00: disable TKIP on USB + - LP: #1347088 + * b43: fix frequency reported on G-PHY with /new/ firmware + - LP: #1347088 + * rt2x00: fix rfkill regression on rt2500pci + - LP: #1347088 + * blkcg: fix use-after-free in __blkg_release_rcu() by making blkcg_gq + refcnt an atomic_t + - LP: #1347088 + * rbd: handle parent_overlap on writes correctly + - LP: #1347088 + * ALSA: hda - hdmi: call overridden init on resume + - LP: #1347088 + * x86_32, entry: Do syscall exit work on badsys (CVE-2014-4508) + - LP: #1347088 + * hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned + entry + - LP: #1347088 + * kernel/watchdog.c: remove preemption restrictions when restarting + lockup detector + - LP: #1347088 + * DMA, CMA: fix possible memory leak + - LP: #1347088 + * mm: fix crashes from mbind() merging vmas + - LP: #1347088 + * drm/i915: Hold the table lock whilst walking the file's idr and + counting the objects in debugfs + - LP: #1347088 + * [CIFS] fix mount failure with broken pathnames when smb3 mount with + mapchars option + - LP: #1347088 + * aio: fix aio request leak when events are reaped by userspace + - LP: #1347088 + * aio: fix kernel memory disclosure in io_getevents() introduced in v3.10 + - LP: #1347088 + * nfs: Fix cache_validity check in nfs_write_pageuptodate() + - LP: #1347088 + * powerpc: Don't skip ePAPR spin-table CPUs + - LP: #1347088 + * net: allwinner: emac: Add missing free_irq + - LP: #1347088 + * ALSA: usb-audio: Fix races at disconnection and PCM closing + - LP: #1347088 + * recordmcount/MIPS: Fix possible incorrect mcount_loc table entries in + modules + - LP: #1347088 + * MIPS: MSC: Prevent out-of-bounds writes to MIPS SC ioremap'd region + - LP: #1347088 + * ALSA: hda - restore BCLK M/N values when resuming HSW/BDW display + controller + - LP: #1347088 + * target: Fix left-over se_lun->lun_sep pointer OOPs + - LP: #1347088 + * iscsi-target: Explicily clear login response PDU in exception path + - LP: #1347088 + * efi-pstore: Fix an overflow on 32-bit builds + - LP: #1347088 + * lz4: fix another possible overrun + - LP: #1347088 + * iscsi-target: Avoid rejecting incorrect ITT for Data-Out + - LP: #1347088 + * iscsi-target: fix iscsit_del_np deadlock on unload + - LP: #1347088 + * Linux 3.13.11.5 + - LP: #1347088 + * powerpc/powernv: Fix endianness problems in EEH + - LP: #1340200 + * libahci: export ahci_qc_issue() and ahci_start_fix_rx() + - LP: #1335645 + * ahci_xgene: fix the dma state machine lockup for the IDENTIFY DEVICE + PIO mode command. + - LP: #1335645 + * fix build error in gpio-dwapb patch + - LP: #1348808 + * usb: Check if port status is equal to RxDetect + - LP: #1322409 + * net/mlx4_en: Protect MAC address modification with the state_lock mutex + - LP: #1347012 + * net/mlx4_en: Fix errors in MAC address changing when port is down + - LP: #1347012 + * bonding: Advertize vxlan offload features when supported + - LP: #1347012 + * net/mlx4_core: Fix the error flow when probing with invalid VF + configuration + - LP: #1347012 + * net/mlx4_en: Don't configure the HW vxlan parser when vxlan offloading + isn't set + - LP: #1347012 + * net/mlx4_core: Keep only one driver entry release mlx4_priv + - LP: #1347012 + * net/mlx4_core: Preserve pci_dev_data after __mlx4_remove_one() + - LP: #1347012 + * net/mlx4_core: Defer VF initialization till PF is fully initialized + - LP: #1347012 + * net/mlx4_core: Adjust port number in qp_attach wrapper when detaching + - LP: #1347012 + * net/mlx4_core: Fix slave id computation for single port VF + - LP: #1347012 + * net/mlx4_core: Load the Eth driver first + - LP: #1347012 + * net/mlx4_core: Don't issue PCIe speed/width checks for VFs + - LP: #1347012 + * net/mlx4_core: Add UPDATE_QP SRIOV wrapper support + - LP: #1347012 + * net/mlx4_core: Reset RoCE VF gids when guest driver goes down + - LP: #1347012 + * net/mlx4_en: Reduce memory consumption on kdump kernel + - LP: #1347012 + * net/mlx4_core: Use low memory profile on kdump kernel + - LP: #1347012 + * net/mlx4_en: current_mac isn't updated in port up + - LP: #1347012 + * net/mlx4_en: Disable blueflame using ethtool private flags + - LP: #1347012 + * net/mlx4_en: Fix mac_hash database inconsistency + - LP: #1347012 + * ext4: handle symlink properly with inline_data + - LP: #1349020 + * net/mlx4_en: cq->irq_desc wasn't set in legacy EQ's + - LP: #1354242 + * rtl8821ae: fixup staging driver for revised + ieee80211_is_robust_mgmt_frame + - LP: #1354469 + * ahci_xgene: Fix the watermark threshold for the APM X-Gene SATA host + controller driver. + - LP: #1350087 + * ahci_xgene: Use correct OOB tunning parameters for APM X-Gene SoC AHCI + SATA Host controller driver. + - LP: #1350087 + * sunrpc: create a new dummy pipe for gssd to hold open + - LP: #1327563 + * sunrpc: replace sunrpc_net->gssd_running flag with a more reliable + check + - LP: #1327563 + * nfs: check if gssd is running before attempting to use krb5i auth in + SETCLIENTID call + - LP: #1327563 + * ACPI / PAD: call schedule() when need_resched() is true + - LP: #1356913 + * KVM: ioapic: fix assignment of ioapic->rtc_status.pending_eoi + (CVE-2014-0155) + - LP: #1356913 + * target: Explicitly clear ramdisk_mcp backend pages + - LP: #1356913 + * sctp: Fix sk_ack_backlog wrap-around problem + - LP: #1356913 + * mm: hugetlb: fix copy_hugetlb_page_range() + - LP: #1356913 + * x86_32, entry: Store badsys error code in %eax + - LP: #1356913 + * shmem: fix faulting into a hole while it's punched + - LP: #1356913 + * shmem: fix faulting into a hole, not taking i_mutex + - LP: #1356913 + * shmem: fix splicing from a hole while it's punched + - LP: #1356913 + * ipvs: Fix panic due to non-linear skb + - LP: #1356913 + * ALSA: hda - verify pin:converter connection on unsol event for HSW and + VLV + - LP: #1356913 + * ALSA: hda - verify pin:cvt connection on preparing a stream for Intel + HDMI codec + - LP: #1356913 + * x86/xen: safely map and unmap grant frames when in atomic context + - LP: #1356913 + * ext4: Fix buffer double free in ext4_alloc_branch() + - LP: #1356913 + * ARM: OMAP2+: Fix parser-bug in platform muxing code + - LP: #1356913 + * KVM: x86: Increase the number of fixed MTRR regs to 10 + - LP: #1356913 + * KVM: x86: preserve the high 32-bits of the PAT register + - LP: #1356913 + * usb: musb: ux500: don't propagate the OF node + - LP: #1356913 + * usb: gadget: f_fs: fix NULL pointer dereference when there are no + strings + - LP: #1356913 + * staging: iio/ad7291: fix error code in ad7291_probe() + - LP: #1356913 + * iio: of_iio_channel_get_by_name() returns non-null pointers for error + legs + - LP: #1356913 + * irqchip: spear_shirq: Fix interrupt offset + - LP: #1356913 + * USB: option: add device ID for SpeedUp SU9800 usb 3g modem + - LP: #1356913 + * USB: ftdi_sio: fix null deref at port probe + - LP: #1356913 + * usb: option: add/modify Olivetti Olicard modems + - LP: #1356913 + * scsi_error: fix invalid setting of host byte + - LP: #1356913 + * xhci: Use correct SLOT ID when handling a reset device command + - LP: #1356913 + * xhci: correct burst count field for isoc transfers on 1.0 xhci hosts + - LP: #1356913 + * xhci: clear root port wake on bits if controller isn't wake-up capable + - LP: #1356913 + * xhci: Fix runtime suspended xhci from blocking system suspend. + - LP: #1356913 + * ibmvscsi: Abort init sequence during error recovery + - LP: #1356913 + * ibmvscsi: Add memory barriers for send / receive + - LP: #1356913 + * virtio-scsi: avoid cancelling uninitialized work items + - LP: #1356913 + * virtio-scsi: fix various bad behavior on aborted requests + - LP: #1356913 + * MIPS: KVM: Fix memory leak on VCPU + - LP: #1356913 + * ext4: Fix hole punching for files with indirect blocks + - LP: #1356913 + * usb: musb: Fix panic upon musb_am335x module removal + - LP: #1356913 + * usb: musb: Ensure that cppi41 timer gets armed on premature DMA TX irq + - LP: #1356913 + * nfsd: fix rare symlink decoding bug + - LP: #1356913 + * tools: ffs-test: fix header values endianess + - LP: #1356913 + * usb-storage/SCSI: Add broken_fua blacklist flag + - LP: #1356913 + * drm/radeon/dpm: fix typo in vddci setup for eg/btc + - LP: #1356913 + * drm/radeon/dpm: fix vddci setup typo on cayman + - LP: #1356913 + * tracing: Remove ftrace_stop/start() from reading the trace file + - LP: #1356913 + * usb: chipidea: udc: delete td from req's td list at ep_dequeue + - LP: #1356913 + * drm/radeon/cik: fix typo in EOP packet + - LP: #1356913 + * md: flush writes before starting a recovery. + - LP: #1356913 + * drm/vmwgfx: Fix incorrect write to read-only register v2: + - LP: #1356913 + * mm: page_alloc: fix CMA area initialisation when pageblock > MAX_ORDER + - LP: #1356913 + * /proc/stat: convert to single_open_size() + - LP: #1356913 + * nick kvfree() from apparmor + - LP: #1356913 + * fs/seq_file: fallback to vmalloc allocation + - LP: #1356913 + * lz4: add overrun checks to lz4_uncompress_unknownoutputsize() + - LP: #1356913 + * arm64: mm: Make icache synchronisation logic huge page aware + - LP: #1356913 + * workqueue: fix dev_set_uevent_suppress() imbalance + - LP: #1356913 + * cpuset,mempolicy: fix sleeping function called from invalid context + - LP: #1356913 + * crypto: sha512_ssse3 - fix byte count to bit count conversion + - LP: #1356913 + * thermal: hwmon: Make the check for critical temp valid consistent + - LP: #1356913 + * clk: s2mps11: Fix double free corruption during driver unbind + - LP: #1356913 + * hwmon: (amc6821) Fix permissions for temp2_input + - LP: #1356913 + * hwmon: (adm1029) Ensure the fan_div cache is updated in set_fan_div + - LP: #1356913 + * hwmon: (adm1021) Fix cache problem when writing temperature limits + - LP: #1356913 + * ext4: fix unjournalled bg descriptor while initializing inode bitmap + - LP: #1356913 + * ext4: clarify error count warning messages + - LP: #1356913 + * ext4: clarify ext4_error message in ext4_mb_generate_buddy_error() + - LP: #1356913 + * ext4: disable synchronous transaction batching if max_batch_time==0 + - LP: #1356913 + * intel_pstate: Fix setting VID + - LP: #1356913 + * intel_pstate: don't touch turbo bit if turbo disabled or unavailable. + - LP: #1356913 + * intel_pstate: Set CPU number before accessing MSRs + - LP: #1356913 + * USB: cp210x: add support for Corsair usb dongle + - LP: #1356913 + * usb: option: Add ID for Telewell TW-LTE 4G v2 + - LP: #1356913 + * ACPI / EC: Avoid race condition related to advance_transaction() + - LP: #1356913 + * ACPI / EC: Add asynchronous command byte write support + - LP: #1356913 + * ACPI / EC: Remove duplicated ec_wait_ibf0() waiter + - LP: #1356913 + * ACPI / EC: Fix race condition in ec_transaction_completed() + - LP: #1356913 + * ACPI / battery: Retry to get battery information if failed during + probing + - LP: #1356913 + * hwmon: (adm1031) Fix writes to limit registers + - LP: #1356913 + * workqueue: zero cpumask of wq_numa_possible_cpumask on init + - LP: #1356913 + * hwmon: (emc2103) Clamp limits instead of bailing out + - LP: #1356913 + * arm64: implement TASK_SIZE_OF + - LP: #1356913 + * iio: ti_am335x_adc: Fix: Use same step id at FIFOs both ends + - LP: #1356913 + * cpufreq: Makefile: fix compilation for davinci platform + - LP: #1356913 + * drm/i915: Don't clobber the GTT when it's within stolen memory + - LP: #1356913 + * Drivers: hv: vmbus: Fix a bug in the channel callback dispatch code + - LP: #1356913 + * USB: ftdi_sio: Add extra PID. + - LP: #1356913 + * crypto: caam - fix memleak in caam_jr module + - LP: #1356913 + * dm: allocate a special workqueue for deferred device removal + - LP: #1356913 + * dm io: fix a race condition in the wake up code for sync_io + - LP: #1356913 + * drm/radeon/dp: return -EIO for flags not zero case + - LP: #1356913 + * drm/radeon: fix typo in golden register setup on evergreen + - LP: #1356913 + * drm/radeon: fix typo in ci_stop_dpm() + - LP: #1356913 + * drm/radeon/dpm: Reenabling SS on Cayman + - LP: #1356913 + * powerpc/perf: Add PPMU_ARCH_207S define + - LP: #1356913 + * powerpc/perf: Clear MMCR2 when enabling PMU + - LP: #1356913 + * powerpc/perf: Never program book3s PMCs with values >= 0x80000000 + - LP: #1356913 + * USB: serial: ftdi_sio: Add Infineon Triboard + - LP: #1356913 + * phy: core: Fix error path in phy_create() + - LP: #1356913 + * ext4: fix a potential deadlock in __ext4_es_shrink() + - LP: #1356913 + * parisc: add serial ports of C8000/1GHz machine to hardware database + - LP: #1356913 + * parisc: fix fanotify_mark() syscall on 32bit compat kernel + - LP: #1356913 + * parisc: drop unused defines and header includes + - LP: #1356913 + * clk: spear3xx: Use proper control register offset + - LP: #1356913 + * Bluetooth: Ignore H5 non-link packets in non-active state + - LP: #1356913 + * iwlwifi: update the 7265 series HW IDs + - LP: #1356913 + * mwifiex: fix Tx timeout issue + - LP: #1356913 + * x86, tsc: Fix cpufreq lockup + - LP: #1356913 + * perf/x86/intel: ignore CondChgd bit to avoid false NMI handling + - LP: #1356913 + * perf: Do not allow optimized switch for non-cloned events + - LP: #1356913 + * xen/manage: fix potential deadlock when resuming the console + - LP: #1356913 + * iwlwifi: dvm: don't enable CTS to self + - LP: #1356913 + * iwlwifi: mvm: disable CTS to Self + - LP: #1356913 + * xen/balloon: set ballooned out pages as invalid in p2m + - LP: #1356913 + * mtd: devices: elm: fix elm_context_save() and elm_context_restore() + functions + - LP: #1356913 + * fuse: timeout comparison fix + - LP: #1356913 + * fuse: ignore entry-timeout on LOOKUP_REVAL + - LP: #1356913 + * fuse: handle large user and group ID + - LP: #1356913 + * alarmtimer: Fix bug where relative alarm timers were treated as + absolute + - LP: #1356913 + * irqchip: gic: Add support for cortex a7 compatible string + - LP: #1356913 + * net: mvneta: fix operation in 10 Mbit/s mode + - LP: #1356913 + * net: mvneta: Fix big endian issue in mvneta_txq_desc_csum() + - LP: #1356913 + * igb: Workaround for i210 Errata 25: Slow System Clock + - LP: #1356913 + * x86/efi: Include a .bss section within the PE/COFF headers + - LP: #1356913 + * igb: do a reset on SR-IOV re-init if device is down + - LP: #1356913 + * iio:core: Handle error when mask type is not separate + - LP: #1356913 + * of/irq: do irq resolution in platform_get_irq_byname() + - LP: #1356913 + * platform_get_irq: Revert to platform_get_resource if of_irq_get fails + - LP: #1356913 + * aio: protect reqs_available updates from changes in interrupt handlers + - LP: #1356913 + * hwmon: (da9052) Don't use dash in the name attribute + - LP: #1356913 + * hwmon: (da9055) Don't use dash in the name attribute + - LP: #1356913 + * PM / sleep: Fix request_firmware() error at resume + - LP: #1356913 + * ALSA: hda - Fix broken PM due to incomplete i915 initialization + - LP: #1356913 + * tracing: Add ftrace_trace_stack into __trace_puts/__trace_bputs + - LP: #1356913 + * tracing: Fix graph tracer with stack tracer on other archs + - LP: #1356913 + * tracing: Add TRACE_ITER_PRINTK flag check in __trace_puts/__trace_bputs + - LP: #1356913 + * dm thin metadata: do not allow the data block size to change + - LP: #1356913 + * dm cache metadata: do not allow the data block size to change + - LP: #1356913 + * quota: missing lock in dqcache_shrink_scan() + - LP: #1356913 + * ring-buffer: Fix polling on trace_pipe + - LP: #1356913 + * sched: Fix possible divide by zero in avg_atom() calculation + - LP: #1356913 + * locking/mutex: Disable optimistic spinning on some architectures + - LP: #1356913 + * drm/qxl: return IRQ_NONE if it was not our irq + - LP: #1356913 + * hwmon: (adt7470) Fix writes to temperature limit registers + - LP: #1356913 + * cpufreq: move policy kobj to policy->cpu at resume + - LP: #1356913 + * drm/radeon: avoid leaking edid data + - LP: #1356913 + * drm/radeon: set default bl level to something reasonable + - LP: #1356913 + * usb: chipidea: udc: Disable auto ZLP generation on ep0 + - LP: #1356913 + * irqchip: gic: Fix core ID calculation when topology is read from DT + - LP: #1356913 + * slab_common: fix the check for duplicate slab names + - LP: #1356913 + * xtensa: add fixup for double exception raised in window overflow + - LP: #1356913 + * [media] media: v4l2-core: v4l2-dv-timings.c: Cleaning up code wrong + value used in aspect ratio + - LP: #1356913 + * [media] hdpvr: fix two audio bugs + - LP: #1356913 + * block: don't assume last put of shared tags is for the host + - LP: #1356913 + * blkcg: don't call into policy draining if root_blkg is already gone + - LP: #1356913 + * block: provide compat ioctl for BLKZEROOUT + - LP: #1356913 + * libata: support the ata host which implements a queue depth less than + 32 + - LP: #1356913 + * [media] tda10071: force modulation to QPSK on DVB-S + - LP: #1356913 + * [media] gspca_pac7302: Add new usb-id for Genius i-Look 317 + - LP: #1356913 + * s390/ptrace: fix PSW mask check + - LP: #1356913 + * ahci: add support for the Promise FastTrak TX8660 SATA HBA (ahci mode) + - LP: #1356913 + * Input: fix defuzzing logic + - LP: #1356913 + * tracing: Fix wraparound problems in "uptime" trace clock + - LP: #1356913 + * drm/i915: Reorder the semaphore deadlock check, again + - LP: #1356913 + * libata: introduce ata_host->n_tags to avoid oops on SAS controllers + - LP: #1356913 + * drm/radeon: fix irq ring buffer overflow handling + - LP: #1356913 + * coredump: fix the setting of PF_DUMPCORE + - LP: #1356913 + * fs: umount on symlink leaks mnt count + - LP: #1356913 + * hwmon: (smsc47m192) Fix temperature limit and vrm write operations + - LP: #1356913 + * parisc: Remove SA_RESTORER define + - LP: #1356913 + * drm/radeon: fix cut and paste issue for hawaii. + - LP: #1356913 + * parport: fix menu breakage + - LP: #1356913 + * Fix gcc-4.9.0 miscompilation of load_balance() in scheduler + - LP: #1356913 + * scsi: handle flush errors properly + - LP: #1356913 + * cfg80211: fix mic_failure tracing + - LP: #1356913 + * iio: buffer: Fix demux table creation + - LP: #1356913 + * iio:bma180: Fix scale factors to report correct acceleration units + - LP: #1356913 + * iio:bma180: Missing check for frequency fractional part + - LP: #1356913 + * powerpc/perf: Fix MMCR2 handling for EBB + - LP: #1356913 + * ath9k: fix aggregation session lockup + - LP: #1356913 + * sched_clock: Avoid corrupting hrtimer tree during suspend + - LP: #1356913 + * staging: vt6655: Fix Warning on boot handle_irq_event_percpu. + - LP: #1356913 + * staging: vt6655: Fix disassociated messages every 10 seconds + - LP: #1356913 + * can: c_can_platform: Fix raminit, use devm_ioremap() instead of + devm_ioremap_resource() + - LP: #1356913 + * crypto: arm-aes - fix encryption of unaligned data + - LP: #1356913 + * ARM: fix alignment of keystone page table fixup + - LP: #1356913 + * net: sendmsg: fix NULL pointer dereference + - LP: #1356913 + * mm/page-writeback.c: fix divide by zero in bdi_dirty_limits() + - LP: #1356913 + * mm, thp: do not allow thp faults to avoid cpuset restrictions + - LP: #1356913 + * rapidio/tsi721_dma: fix failure to obtain transaction descriptor + - LP: #1356913 + * memcg: oom_notify use-after-free fix + - LP: #1356913 + * crypto: af_alg - properly label AF_ALG socket + - LP: #1356913 + * printk: rename printk_sched to printk_deferred + - LP: #1356913 + * timer: Fix lock inversion between hrtimer_bases.lock and scheduler + locks + - LP: #1356913 + * dm bufio: fully initialize shrinker + - LP: #1356913 + * dm cache: fix race affecting dirty block count + - LP: #1356913 + * qlcnic: info leak in qlcnic_dcb_peer_app_info() + - LP: #1356913 + * netlink: rate-limit leftover bytes warning and print process name + - LP: #1356913 + * bridge: Prevent insertion of FDB entry with disallowed vlan + - LP: #1356913 + * net: tunnels - enable module autoloading + - LP: #1356913 + * net: fix inet_getid() and ipv6_select_ident() bugs + - LP: #1356913 + * team: fix mtu setting + - LP: #1356913 + * tcp: fix cwnd undo on DSACK in F-RTO + - LP: #1356913 + * sh_eth: use RNC mode for packet reception + - LP: #1356913 + * sh_eth: fix SH7619/771x support + - LP: #1356913 + * net: filter: fix typo in sparc BPF JIT + - LP: #1356913 + * net: filter: fix sparc32 typo + - LP: #1356913 + * net: qmi_wwan: add Olivetti Olicard modems + - LP: #1356913 + * net: force a list_del() in unregister_netdevice_many() + - LP: #1356913 + * ipip, sit: fix ipv4_{update_pmtu,redirect} calls + - LP: #1356913 + * sfc: PIO:Restrict to 64bit arch and use 64-bit writes. + - LP: #1356913 + * ipv4: fix a race in ip4_datagram_release_cb() + - LP: #1356913 + * rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 + - LP: #1356913 + * vxlan: use dev->needed_headroom instead of dev->hard_header_len + - LP: #1356913 + * udp: ipv4: do not waste time in __udp4_lib_mcast_demux_lookup + - LP: #1356913 + * ip_tunnel: fix ip_tunnel_lookup + - LP: #1356913 + * slip: Fix deadlock in write_wakeup + - LP: #1356913 + * slcan: Port write_wakeup deadlock fix from slip + - LP: #1356913 + * net: sctp: propagate sysctl errors from proc_do* properly + - LP: #1356913 + * tcp: fix tcp_match_skb_to_sack() for unaligned SACK at end of an skb + - LP: #1356913 + * net: sctp: check proc_dointvec result in proc_sctp_do_auth + - LP: #1356913 + * 8021q: fix a potential memory leak + - LP: #1356913 + * net: huawei_cdc_ncm: increase command buffer size + - LP: #1356913 + * ipv4: fix dst race in sk_dst_get() + - LP: #1356913 + * ipv4: irq safe sk_dst_[re]set() and ipv4_sk_update_pmtu() fix + - LP: #1356913 + * net: fix sparse warning in sk_dst_set() + - LP: #1356913 + * vlan: free percpu stats in device destructor + - LP: #1356913 + * bnx2x: fix possible panic under memory stress + - LP: #1356913 + * tcp: Fix divide by zero when pushing during tcp-repair + - LP: #1356913 + * ipv4: icmp: Fix pMTU handling for rare case + - LP: #1356913 + * net: qmi_wwan: Add ID for Telewell TW-LTE 4G v2 + - LP: #1356913 + * net: qmi_wwan: add two Sierra Wireless/Netgear devices + - LP: #1356913 + * net: Fix NETDEV_CHANGE notifier usage causing spurious arp flush + - LP: #1356913 + * igmp: fix the problem when mc leave group + - LP: #1356913 + * tcp: fix false undo corner cases + - LP: #1356913 + * appletalk: Fix socket referencing in skb + - LP: #1356913 + * netlink: Fix handling of error from netlink_dump(). + - LP: #1356913 + * be2net: set EQ DB clear-intr bit in be_open() + - LP: #1356913 + * tipc: clear 'next'-pointer of message fragments before reassembly + - LP: #1356913 + * net: sctp: fix information leaks in ulpevent layer + - LP: #1356913 + * net: pppoe: use correct channel MTU when using Multilink PPP + - LP: #1356913 + * sunvnet: clean up objects created in vnet_new() on vnet_exit() + - LP: #1356913 + * net: huawei_cdc_ncm: add "subclass 3" devices + - LP: #1356913 + * dns_resolver: assure that dns_query() result is null-terminated + - LP: #1356913 + * dns_resolver: Null-terminate the right string + - LP: #1356913 + * ipv4: fix buffer overflow in ip_options_compile() + - LP: #1356913 + * x86/xen: no need to explicitly register an NMI callback + - LP: #1356913 + * Linux 3.13.11.6 + - LP: #1356913 + + -- Joseph Salisbury Thu, 14 Aug 2014 17:12:19 -0400 + +linux (3.13.0-34.60) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1356396 + + [ Upstream Kernel Changes ] + + * mnt: Only change user settable mount flags in remount + - CVE-2014-5206 + * mnt: Move the test for MNT_LOCK_READONLY from change_mount_flags into + do_remount + - CVE-2014-5206 + * mnt: Correct permission checks in do_remount + - CVE-2014-5207 + * mnt: Change the default remount atime from relatime to the existing + value + - CVE-2014-5207 + + -- Brad Figg Tue, 29 Jul 2014 08:25:07 -0700 + +linux (3.13.0-33.58) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1349897 + + [ Upstream Kernel Changes ] + + * mm: numa: do not automatically migrate KSM pages + - LP: #1346917 + * net: fix UDP tunnel GSO of frag_list GRO packets + - LP: #1331219 + * auditsc: audit_krule mask accesses need bounds checking + - LP: #1347088 + * n_tty: Fix buffer overruns with larger-than-4k pastes + - LP: #1208740 + + -- Tim Gardner Fri, 18 Jul 2014 14:57:50 +0000 + +linux (3.13.0-32.57) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * l2tp: Privilege escalation in ppp over l2tp sockets + - LP: #1341472 + - CVE-2014-4943 + + -- Luis Henriques Mon, 14 Jul 2014 10:03:44 +0100 + +linux (3.13.0-32.56) trusty; urgency=low + + [ Luis Henriques ] + + * Merged back Ubuntu-3.13.0-30.55 security release + * Revert "x86_64,ptrace: Enforce RIP <= TASK_SIZE_MAX (CVE-2014-4699)" + - LP: #1337339 + * Release Tracking Bug + - LP: #1338524 + + [ Upstream Kernel Changes ] + + * ptrace,x86: force IRET path after a ptrace_stop() + - LP: #1337339 + - CVE-2014-4699 + * hpsa: add new Smart Array PCI IDs (May 2014) + - LP: #1337516 + + -- Luis Henriques Mon, 07 Jul 2014 11:38:37 +0100 + +linux (3.13.0-31.55) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1336278 + + [ Andy Whitcroft ] + + * [Config] switch hyper-keyboard to virtual + - LP: #1325306 + * [Packaging] linux-udeb-flavour -- standardise on linux prefix + + [ dann frazier ] + + * [Config] CONFIG_GPIO_DWAPB=m + - LP: #1334823 + + [ Feng Kan ] + + * SAUCE: (no-up) arm64: dts: Add Designware GPIO dts binding to APM + X-Gene platform + - LP: #1334823 + + [ John Johansen ] + + * SAUCE: (no-up) apparmor: fix apparmor spams log with warning message + - LP: #1308761 + + [ Kamal Mostafa ] + + * [Config] updateconfigs ACPI_PROCFS_POWER=y after v3.13.11.4 rebase + + [ Loc Ho ] + + * SAUCE: (no-up) phy-xgene: Use correct tuning for Mustang + - LP: #1335636 + + [ Michael Ellerman ] + + * SAUCE: (no-up) powerpc/perf: Ensure all EBB register state is cleared + on fork() + - LP: #1328914 + + [ Ming Lei ] + + * Revert "SAUCE: (no-up) rtc: Add X-Gene SoC Real Time Clock Driver" + - LP: #1274305 + + [ Suman Tripathi ] + + * SAUCE: (no-up) libahci: Implement the function ahci_restart_engine to + restart the port dma engine. + - LP: #1335645 + * SAUCE: (no-up) ata: Fix the dma state machine lockup for the IDENTIFY + DEVICE PIO mode command. + - LP: #1335645 + + [ Tim Gardner ] + + * [Config] CONFIG_POWERNV_CPUFREQ=y for powerpc, ppc64el + - LP: #1324571 + * [Debian] Add UTS_UBUNTU_RELEASE_ABI to utsrelease.h + - LP: #1327619 + * [Config] CONFIG_HAVE_MEMORYLESS_NODES=y + - LP: #1332063 + * [Config] CONFIG_HID_RMI=m + - LP: #1305522 + + [ Upstream Kernel Changes ] + + * Revert "offb: Add palette hack for little endian" + - LP: #1333430 + * Revert "net: mvneta: fix usage as a module on RGMII configurations" + - LP: #1333837 + * Revert "USB: serial: add usbid for dell wwan card to sierra.c" + - LP: #1333837 + * Revert "macvlan : fix checksums error when we are in bridge mode" + - LP: #1333838 + * serial: uart: add hw flow control support configuration + - LP: #1328295 + * mm/numa: Remove BUG_ON() in __handle_mm_fault() + - LP: #1323165 + * Tools: hv: Handle the case when the target file exists correctly + - LP: #1306215 + * Documentation/devicetree/bindings: add documentation for the APM X-Gene + SoC RTC DTS binding + - LP: #1274305 + * drivers/rtc: add APM X-Gene SoC RTC driver + - LP: #1274305 + * arm64: add APM X-Gene SoC RTC DTS entry + - LP: #1274305 + * powerpc/perf: Add Power8 cache & TLB events + - LP: #1328914 + * powerpc/perf: Configure BHRB filter before enabling PMU interrupts + - LP: #1328914 + * powerpc/perf: Define perf_event_print_debug() to print PMU register + values + - LP: #1328914 + * powerpc: Add a cpu feature CPU_FTR_PMAO_BUG + - LP: #1328914 + * powerpc/perf: Add lost exception workaround + - LP: #1328914 + * powerpc/perf: Reject EBB events which specify a sample_type + - LP: #1328914 + * powerpc/perf: Clean up the EBB hash defines a little + - LP: #1328914 + * powerpc/perf: Avoid mutating event in power8_get_constraint() + - LP: #1328914 + * powerpc/perf: Add BHRB constraint and IFM MMCRA handling for EBB + - LP: #1328914 + * powerpc/perf: Enable BHRB access for EBB events + - LP: #1328914 + * powerpc/perf: Fix handling of L3 events with bank == 1 + - LP: #1328914 + * Bluetooth: Add support for Intel Bluetooth device [8087:0a2a] + - LP: #1329184 + * iwlwifi: mvm: disable beacon filtering + - LP: #1293569 + * SUNRPC: Ensure that call_connect times out correctly + - LP: #1322407 + * SUNRPC: Ensure call_connect_status() deals correctly with SOFTCONN + tasks + - LP: #1322407 + * bitops: Fix signedness of compile-time hweight implementations + - LP: #1321791 + * cpumask.h: silence warning with -Wsign-compare + - LP: #1321791 + * fbdev/fb.h: silence warning with -Wsign-compare + - LP: #1321791 + * rtlwifi: make MSI support a module parameter + - LP: #1296591 + * rtlwifi: rtl8188ee: add msi module parameter + - LP: #1296591 + * rtlwifi: rtl8723be: add msi module parameter + - LP: #1296591 + * net: avoid dependency of net_get_random_once on nop patching + - LP: #1330671 + * x86-64, modify_ldt: Make support for 16-bit segments a runtime option + - LP: #1328965 + * ALSA: usb-audio: Prevent printk ratelimiting from spamming kernel log + while DEBUG not defined + - LP: #1319457 + * btrfs: fix defrag 32-bit integer overflow + - LP: #1324953 + * dell-laptop: Only install the i8042 filter when rfkill is active + - LP: #1289238 + * kthread: ensure locality of task_struct allocations + - LP: #1332063 + * slub: search partial list on numa_mem_id(), instead of numa_node_id() + - LP: #1332063 + * powerpc/numa: Enable USE_PERCPU_NUMA_NODE_ID + - LP: #1332063 + * powerpc/numa: Enable CONFIG_HAVE_MEMORYLESS_NODES + - LP: #1332063 + * drm/i915: Allow user modes to exceed DVI 165MHz limit + - LP: #1332220 + * HID: rmi: introduce RMI driver for Synaptics touchpads + - LP: #1305522 + * HID: rmi: do not stop the device at the end of probe + - LP: #1305522 + * HID: rmi: check for the existence of some optional queries before + reading query 12 + - LP: #1305522 + * HID: rmi: do not fetch more than 16 bytes in a query + - LP: #1305522 + * HID: rmi: fix wrong struct field name + - LP: #1305522 + * HID: rmi: fix masks for x and w_x data + - LP: #1305522 + * HID: rmi: do not handle touchscreens through hid-rmi + - LP: #1305522 + * ipv6: Fix regression caused by efe4208 in udp_v6_mcast_next() + - LP: #1332420 + * HID: core: do not scan constant input report + - LP: #1333837 + * drm/radeon: fix audio pin counts for DCE6+ (v2) + - LP: #1333837 + * mac80211: fix software remain-on-channel implementation + - LP: #1333837 + * mac80211: exclude AP_VLAN interfaces from tx power calculation + - LP: #1333837 + * iwlwifi: add new 7265 HW IDs + - LP: #1333837 + * parisc: fix epoll_pwait syscall on compat kernel + - LP: #1333837 + * iwlwifi: add MODULE_FIRMWARE for 7265 + - LP: #1333837 + * dma: edma: fix incorrect SG list handling + - LP: #1333837 + * ALSA: hda/realtek - Add support of ALC288 codec + - LP: #1333837 + * xen/spinlock: Don't enable them unconditionally. + - LP: #1333837 + * tick-common: Fix wrong check in tick_check_replacement() + - LP: #1333837 + * tick-sched: Check tick_nohz_enabled in tick_nohz_switch_to_nohz() + - LP: #1333837 + * ALSA: hda - add headset mic detect quirk for a Dell laptop + - LP: #1297581, #1333837 + * ALSA: hda/realtek - Add headset Mic support for Dell machine + - LP: #1333837 + * staging: r8188eu: Calling rtw_get_stainfo() with a NULL sta_addr will + return NULL + - LP: #1333837 + * cifs: Wait for writebacks to complete before attempting write. + - LP: #1333837 + * mlx4_en: don't use napi_synchronize inside mlx4_en_netpoll + - LP: #1333837 + * mei: ignore client writing state during cb completion + - LP: #1333837 + * staging: r8712u: Fix case where ethtype was never obtained and always + be checked against 0 + - LP: #1333837 + * staging: r8188eu: Fix case where ethtype was never obtained and always + be checked against 0 + - LP: #1333837 + * USB: serial: ftdi_sio: add id for Brainboxes serial cards + - LP: #1333837 + * usb: option driver, add support for Telit UE910v2 + - LP: #1333837 + * USB: cp210x: Add 8281 (Nanotec Plug & Drive) + - LP: #1333837 + * USB: pl2303: add ids for Hewlett-Packard HP POS pole displays + - LP: #1333837 + * USB: usb_wwan: fix handling of missing bulk endpoints + - LP: #1333837 + * USB: fix crash during hotplug of PCI USB controller card + - LP: #1333837 + * USB: cdc-acm: Remove Motorola/Telit H24 serial interfaces from ACM + driver + - LP: #1333837 + * Drivers: hv: vmbus: Negotiate version 3.0 when running on ws2012r2 + hosts + - LP: #1333837 + * serial: omap: Fix missing pm_runtime_resume handling by simplifying + code + - LP: #1333837 + * drm/radeon: disable mclk dpm on R7 260X + - LP: #1333837 + * drm/radeon: fix runpm handling on APUs (v4) + - LP: #1333837 + * drm/radeon: add support for newer mc ucode on SI (v2) + - LP: #1333837 + * drm/radeon: add support for newer mc ucode on CI (v2) + - LP: #1333837 + * drm/radeon: re-enable mclk dpm on R7 260X asics + - LP: #1333837 + * drm/radeon: memory leak on bo reservation failure. v2 + - LP: #1333837 + * drm/radeon/si: make sure mc ucode is loaded before checking the size + - LP: #1333837 + * drm/radeon/ci: make sure mc ucode is loaded before checking the size + - LP: #1333837 + * init/Kconfig: move the trusted keyring config option to general setup + - LP: #1333837 + * mm/hugetlb.c: add cond_resched_lock() in return_unused_surplus_pages() + - LP: #1333837 + * thp: close race between split and zap huge pages + - LP: #1333837 + * coredump: fix va_list corruption + - LP: #1333837 + * powerpc/tm: Disable IRQ in tm_recheckpoint + - LP: #1333837 + * ACPI / EC: Process rather than discard events in acpi_ec_clear + - LP: #1333837 + * ath9k: Fix sequence number assignment for non-data frames + - LP: #1333837 + * xhci: Switch Intel Lynx Point ports to EHCI on shutdown. + - LP: #1333837 + * iio: adc: at91_adc: Repair broken platform_data support + - LP: #1333837 + * iio: querying buffer scan_mask should return 0/1 + - LP: #1333837 + * iio: cm36651: Fix i2c client leak and possible NULL pointer dereference + - LP: #1333837 + * libata: Update queued trim blacklist for M5x0 drives + - LP: #1333837 + * pata_at91: fix ata_host_activate() failure handling + - LP: #1333837 + * ext4: avoid possible overflow in ext4_map_blocks() + - LP: #1333837 + * ext4: FIBMAP ioctl causes BUG_ON due to handle EXT_MAX_BLOCKS + - LP: #1333837 + * ext4: note the error in ext4_end_bio() + - LP: #1333837 + * ext4: fix jbd2 warning under heavy xattr load + - LP: #1333837 + * ext4: move ext4_update_i_disksize() into mpage_map_and_submit_extent() + - LP: #1333837 + * ext4: use i_size_read in ext4_unaligned_aio() + - LP: #1333837 + * locks: allow __break_lease to sleep even when break_time is 0 + - LP: #1333837 + * usb: gadget: zero: Fix SuperSpeed enumeration for alternate setting 1 + - LP: #1333837 + * ahci: do not request irq for dummy port + - LP: #1333837 + * genirq: Allow forcing cpu affinity of interrupts + - LP: #1333837 + * irqchip: Gic: Support forced affinity setting + - LP: #1333837 + * clocksource: Exynos_mct: Use irq_force_affinity() in cpu bringup + - LP: #1333837 + * clocksource: Exynos_mct: Register clock event after request_irq() + - LP: #1333837 + * nfsd: set timeparms.to_maxval in setup_callback_client + - LP: #1333837 + * ahci: Do not receive interrupts sent by dummy ports + - LP: #1333837 + * libata/ahci: accommodate tag ordered controllers + - LP: #1333837 + * drm/radeon: disable dpm on rv770 by default + - LP: #1333837 + * Input: synaptics - add min/max quirk for ThinkPad T431s, L440, L540, S1 + Yoga and X1 + - LP: #1333837 + * drm/radeon: fix count in cik_sdma_ring_test() + - LP: #1333837 + * drm/radeon: properly unregister hwmon interface (v2) + - LP: #1333837 + * drm/radeon/pm: don't walk the crtc list before it has been initialized + (v2) + - LP: #1333837 + * drm/radeon: fix ATPX detection on non-VGA GPUs + - LP: #1333837 + * drm/radeon: don't allow runpm=1 on systems with out ATPX + - LP: #1333837 + * mm: make fixup_user_fault() check the vma access rights too + - LP: #1333837 + * ARM: 8027/1: fix do_div() bug in big-endian systems + - LP: #1333837 + * ARM: 8030/1: ARM : kdump : add arch_crash_save_vmcoreinfo + - LP: #1333837 + * ARM: pxa: hx4700.h: include "irqs.h" for PXA_NR_BUILTIN_GPIO + - LP: #1333837 + * ARM: tegra: remove UART5/UARTE from tegra124.dtsi + - LP: #1333837 + * USB: serial: fix sysfs-attribute removal deadlock + - LP: #1333837 + * 8250_core: Fix unwanted TX chars write + - LP: #1333837 + * serial: 8250: Fix thread unsafe __dma_tx_complete function + - LP: #1333837 + * Btrfs: fix inode caching vs tree log + - LP: #1333837 + * xhci: For streams the css flag most be read from the stream-ctx on ep + stop + - LP: #1333837 + * usb: xhci: Prefer endpoint context dequeue pointer over stopped_trb + - LP: #1333837 + * USB: io_ti: fix firmware download on big-endian machines + - LP: #1333837 + * usb: qcserial: add Sierra Wireless EM7355 + - LP: #1333837 + * usb: qcserial: add Sierra Wireless MC73xx + - LP: #1333837 + * usb: qcserial: add Sierra Wireless MC7305/MC7355 + - LP: #1333837 + * usb: option: add Olivetti Olicard 500 + - LP: #1333837 + * usb: option: add Alcatel L800MA + - LP: #1333837 + * usb: option: add and update a number of CMOTech devices + - LP: #1333837 + * word-at-a-time: avoid undefined behaviour in zero_bytemask macro + - LP: #1333837 + * s390/chsc: fix SEI usage on old FW levels + - LP: #1333837 + * irqchip: armada-370-xp: fix invalid cast of signed value into unsigned + variable + - LP: #1333837 + * irqchip: armada-370-xp: implement the ->check_device() msi_chip + operation + - LP: #1333837 + * irqchip: armada-370-xp: Fix releasing of MSIs + - LP: #1333837 + * ASoC: dapm: Fix widget double free with auto-disable DAPM kcontrol + - LP: #1333837 + * drm/i915: Don't check gmch state on inherited configs + - LP: #1333837 + * drm/vmwgfx: Make sure user-space can't DMA across buffer object + boundaries v2 + - LP: #1333837 + * of/irq: do irq resolution in platform_get_irq + - LP: #1333837 + * s390/bpf,jit: initialize A register if 1st insn is BPF_S_LDX_B_MSH + - LP: #1333837 + * drm/i915: Don't WARN nor handle unexpected hpd interrupts on gmch + platforms + - LP: #1333837 + * module: remove warning about waiting module removal. + - LP: #1333837 + * ALSA: hda - add headset mic detect quirk for a Dell laptop + - LP: #1297581, #1333837 + * arm: KVM: fix possible misalignment of PGDs and bounce page + - LP: #1333837 + * KVM: ARM: vgic: Fix sgi dispatch problem + - LP: #1333837 + * KVM: async_pf: mm->mm_users can not pin apf->mm + - LP: #1333837 + * ftrace/module: Hardcode ftrace_module_init() call into load_module() + - LP: #1333837 + * [SCSI] mpt2sas: Don't disable device twice at suspend. + - LP: #1333837 + * [SCSI] virtio-scsi: Skip setting affinity on uninitialized vq + - LP: #1333837 + * drivercore: deferral race condition fix + - LP: #1333837 + * hrtimer: Prevent all reprogramming if hang detected + - LP: #1333837 + * hrtimer: Prevent remote enqueue of leftmost timers + - LP: #1333837 + * timer: Prevent overflow in apply_slack + - LP: #1333837 + * ARC: !PREEMPT: Ensure Return to kernel mode is IRQ safe + - LP: #1333837 + * aio: fix potential leak in aio_run_iocb(). + - LP: #1333837 + * dm cache: fix writethrough mode quiescing in cache_map + - LP: #1333837 + * fix races between __d_instantiate() and checks of dentry flags + - LP: #1333837 + * net: Fix ns_capable check in sock_diag_put_filterinfo + - LP: #1333837 + * rt2x00: fix beaconing on USB + - LP: #1333837 + * rtlwifi: rtl8188ee: initialize packet_beacon + - LP: #1333837 + * Input: synaptics - add min/max quirk for ThinkPad Edge E431 + - LP: #1333837 + * Input: atkbd - fix keyboard not working on some LG laptops + - LP: #1333837 + * Bluetooth: Fix triggering BR/EDR L2CAP Connect too early + - LP: #1333837 + * drm/i915: Break encoder->crtc link separately in intel_sanitize_crtc() + - LP: #1333837 + * iio:imu:mpu6050: Fixed segfault in Invensens MPU driver due to null + dereference + - LP: #1333837 + * ALSA: hda - add headset mic detect quirk for a Dell laptop + - LP: #1297581, #1333837 + * rtlwifi: rtl8192se: Fix regression due to commit 1bf4bbb + - LP: #1333837 + * rtl8192cu: Fix unbalanced irq enable in error path of rtl92cu_hw_init() + - LP: #1333837 + * drm/radeon/uvd: use lower clocks on old UVD to boot v2 + - LP: #1333837 + * drm/radeon: use pflip irq on R600+ v2 + - LP: #1333837 + * drm/radeon: check buffer relocation offset + - LP: #1333837 + * drm/nouveau/acpi: allow non-optimus setups to load vbios from acpi + - LP: #1333837 + * drm/nouveau: fix another lock unbalance in nouveau_crtc_page_flip + - LP: #1333837 + * ALSA: usb-audio: work around corrupted TEAC UD-H01 feedback data + - LP: #1333837 + * USB: OHCI: fix problem with global suspend on ATI controllers + - LP: #1333837 + * usb: qcserial: add a number of Dell devices + - LP: #1333837 + * usb: storage: shuttle_usbat: fix discs being detected twice + - LP: #1333837 + * fsl-usb: do not test for PHY_CLK_VALID bit on controller version 1.6 + - LP: #1333837 + * tty: serial: 8250_core.c Bug fix for Exar chips. + - LP: #1333837 + * drivers/tty/hvc: don't free hvc_console_setup after init + - LP: #1333837 + * tty: Fix lockless tty buffer race + - LP: #1333837 + * USB: Nokia 305 should be treated as unusual dev + - LP: #1333837 + * USB: Nokia 5300 should be treated as unusual dev + - LP: #1333837 + * HID: add NO_INIT_REPORTS quirk for Synaptics Touch Pad V 103S + - LP: #1333837 + * ALSA: hda - hdmi: Set converter channel count even without sink + - LP: #1333837 + * Input: elantech - fix touchpad initialization on Gigabyte U2442 + - LP: #1333837 + * posix_acl: handle NULL ACL in posix_acl_equiv_mode + - LP: #1333837 + * mm/page-writeback.c: fix divide by zero in pos_ratio_polynom + - LP: #1333837 + * mm: compaction: detect when scanners meet in isolate_freepages + - LP: #1333837 + * mm/compaction: make isolate_freepages start at pageblock boundary + - LP: #1333837 + * autofs: fix lockref lookup + - LP: #1333837 + * libata: Blacklist queued trim for Crucial M500 + - LP: #1333837 + * Linux 3.13.11.3 + - LP: #1333837 + * net: sctp: wake up all assocs if sndbuf policy is per socket + - LP: #1333838 + * net: sctp: test if association is dead in sctp_wake_up_waiters + - LP: #1333838 + * l2tp: take PMTU from tunnel UDP socket + - LP: #1333838 + * net: core: don't account for udp header size when computing seglen + - LP: #1333838 + * bonding: Remove debug_fs files when module init fails + - LP: #1333838 + * bridge: Fix double free and memory leak around br_allowed_ingress + - LP: #1333838 + * ipv6: Limit mtu to 65575 bytes + - LP: #1333838 + * gre: don't allow to add the same tunnel twice + - LP: #1333838 + * vti: don't allow to add the same tunnel twice + - LP: #1333838 + * ipv4: return valid RTA_IIF on ip route get + - LP: #1333838 + * filter: prevent nla extensions to peek beyond the end of the message + - LP: #1333838 + * ip6_gre: don't allow to remove the fb_tunnel_dev + - LP: #1333838 + * vlan: Fix lockdep warning when vlan dev handle notification + - LP: #1333838 + * net: Find the nesting level of a given device by type. + - LP: #1333838 + * net: Allow for more then a single subclass for netif_addr_lock + - LP: #1333838 + * vlan: Fix lockdep warning with stacked vlan devices. + - LP: #1333838 + * macvlan: Fix lockdep warnings with stacked macvlan devices + - LP: #1333838 + * tg3: update rx_jumbo_pending ring param only when jumbo frames are + enabled + - LP: #1333838 + * net: sctp: cache auth_enable per endpoint + - LP: #1333838 + * rtnetlink: Warn when interface's information won't fit in our packet + - LP: #1333838 + * rtnetlink: Only supply IFLA_VF_PORTS information when RTEXT_FILTER_VF + is set + - LP: #1333838 + * ipv6: fib: fix fib dump restart + - LP: #1333838 + * bridge: Handle IFLA_ADDRESS correctly when creating bridge device + - LP: #1333838 + * sctp: reset flowi4_oif parameter on route lookup + - LP: #1333838 + * net: qmi_wwan: add Sierra Wireless EM7355 + - LP: #1333838 + * net: qmi_wwan: add Sierra Wireless MC73xx + - LP: #1333838 + * net: qmi_wwan: add Sierra Wireless MC7305/MC7355 + - LP: #1333838 + * net: qmi_wwan: add Olivetti Olicard 500 + - LP: #1333838 + * net: qmi_wwan: add Alcatel L800MA + - LP: #1333838 + * net: qmi_wwan: add a number of CMOTech devices + - LP: #1333838 + * net: qmi_wwan: add a number of Dell devices + - LP: #1333838 + * slip: fix spinlock variant + - LP: #1333838 + * net: sctp: Potentially-Failed state should not be reached from + unconfirmed state + - LP: #1333838 + * net: sctp: Don't transition to PF state when transport has exhausted + 'Path.Max.Retrans'. + - LP: #1333838 + * mactap: Fix checksum errors for non-gso packets in bridge mode + - LP: #1333838 + * tcp_cubic: fix the range of delayed_ack + - LP: #1333838 + * vsock: Make transport the proto owner + - LP: #1333838 + * net: cdc_ncm: fix buffer overflow + - LP: #1333838 + * ip_tunnel: Set network header properly for IP_ECN_decapsulate() + - LP: #1333838 + * net: cdc_mbim: __vlan_find_dev_deep need rcu_read_lock + - LP: #1333838 + * net: ipv4: ip_forward: fix inverted local_df test + - LP: #1333838 + * net: ipv6: send pkttoobig immediately if orig frag size > mtu + - LP: #1333838 + * ipv4: fib_semantics: increment fib_info_cnt after fib_info allocation + - LP: #1333838 + * net: cdc_mbim: handle unaccelerated VLAN tagged frames + - LP: #1333838 + * macvlan: Don't propagate IFF_ALLMULTI changes on down interfaces. + - LP: #1333838 + * sfc: fix calling of free_irq with already free vector + - LP: #1333838 + * ip6_tunnel: fix potential NULL pointer dereference + - LP: #1333838 + * net: filter: x86: fix JIT address randomization + - LP: #1333838 + * net: filter: s390: fix JIT address randomization + - LP: #1333838 + * ipv6: fix calculation of option len in ip6_append_data + - LP: #1333838 + * rtnetlink: wait for unregistering devices in rtnl_link_unregister() + - LP: #1333838 + * net: gro: make sure skb->cb[] initial content has not to be zero + - LP: #1333838 + * batman-adv: fix reference counting imbalance while sending fragment + - LP: #1333838 + * batman-adv: increase orig refcount when storing ref in gw_node + - LP: #1333838 + * batman-adv: fix local TT check for outgoing arp requests in DAT + - LP: #1333838 + * ip_tunnel: Initialize the fallback device properly + - LP: #1333838 + * ipv4: initialise the itag variable in __mkroute_input + - LP: #1333838 + * net-gro: reset skb->truesize in napi_reuse_skb() + - LP: #1333838 + * netfilter: ipv4: defrag: set local_df flag on defragmented skb + - LP: #1333838 + * ima: introduce ima_kernel_read() + - LP: #1333838 + * ima: audit log files opened with O_DIRECT flag + - LP: #1333838 + * percpu: make pcpu_alloc_chunk() use pcpu_mem_free() instead of kfree() + - LP: #1333838 + * workqueue: fix bugs in wq_update_unbound_numa() failure path + - LP: #1333838 + * [media] fc2580: fix tuning failure on 32-bit arch + - LP: #1333838 + * memory: mvebu-devbus: fix the conversion of the bus width + - LP: #1333838 + * ARM: orion5x: fix target ID for crypto SRAM window + - LP: #1333838 + * workqueue: make rescuer_thread() empty wq->maydays list before exiting + - LP: #1333838 + * workqueue: fix a possible race condition between rescuer and + pwq-release + - LP: #1333838 + * spi: core: Ignore unsupported Dual/Quad Transfer Mode bits + - LP: #1333838 + * device_cgroup: rework device access check and exception checking + - LP: #1333838 + * PCI: mvebu: fix off-by-one in the computed size of the mbus windows + - LP: #1333838 + * bus: mvebu-mbus: allow several windows with the same target/attribute + - LP: #1333838 + * PCI: mvebu: split PCIe BARs into multiple MBus windows when needed + - LP: #1333838 + * ARM: mvebu: fix NOR bus-width in Armada XP GP Device Tree + - LP: #1333838 + * ARM: mvebu: fix NOR bus-width in Armada XP DB Device Tree + - LP: #1333838 + * ARM: mvebu: fix NOR bus-width in Armada XP OpenBlocks AX3 Device Tree + - LP: #1333838 + * crypto: caam - add allocation failure handling in SPRINTFCAT macro + - LP: #1333838 + * ARM: common: edma: Fix xbar mapping + - LP: #1333838 + * clk: Fix double free due to devm_clk_register() + - LP: #1333838 + * [media] media-device: fix infoleak in ioctl media_enum_entities() + - LP: #1333838 + * ARM: dts: kirkwood: fix mislocated pcie-controller nodes + - LP: #1333838 + * device_cgroup: check if exception removal is allowed + - LP: #1333838 + * md: avoid possible spinning md thread at shutdown. + - LP: #1333838 + * ACPI: Remove Kconfig symbol ACPI_PROCFS + - LP: #1333838 + * ACPI: Revert "ACPI: Remove CONFIG_ACPI_PROCFS_POWER and cm_sbsc.c" + - LP: #1333838 + * ACPI: Revert "ACPI / Battery: Remove battery's proc directory" + - LP: #1333838 + * NFSd: Move default initialisers from create_client() to alloc_client() + - LP: #1333838 + * NFSd: call rpc_destroy_wait_queue() from free_client() + - LP: #1333838 + * genirq: Provide irq_force_affinity fallback for non-SMP + - LP: #1333838 + * libata: clean up ZPODD when a port is detached + - LP: #1333838 + * ACPI / blacklist: Add dmi_enable_osi_linux quirk for Asus EEE PC 1015PX + - LP: #1333838 + * ACPI: Revert "ACPI / AC: convert ACPI ac driver to platform bus" + - LP: #1333838 + * ACPI / processor: do not mark present at boot but not onlined CPU as + onlined + - LP: #1333838 + * NFSD: Call ->set_acl with a NULL ACL structure if no entries + - LP: #1333838 + * ALSA: hda - add headset mic detect quirks for three Dell laptops + - LP: #1297581, #1333838 + * gpio: mcp23s08: Bug fix of SPI device tree registration. + - LP: #1333838 + * drm/i915/vlv: reset VLV media force wake request register + - LP: #1333838 + * ARM: dts: i.MX53: Fix ipu register space size + - LP: #1333838 + * mm, thp: close race between mremap() and split_huge_page() + - LP: #1333838 + * intel_pstate: Set turbo VID for BayTrail + - LP: #1333838 + * powerpc/powernv: Reset root port in firmware + - LP: #1333838 + * hrtimer: Set expiry time before switch_hrtimer_base() + - LP: #1333838 + * hwmon: (emc1403) fix inverted store_hyst() + - LP: #1333838 + * hwmon: (emc1403) Fix resource leak on module unload + - LP: #1333838 + * hwmon: (emc1403) Support full range of known chip revision numbers + - LP: #1333838 + * iommu/amd: Fix interrupt remapping for aliased devices + - LP: #1333838 + * ASoC: wm8962: Update register CLASS_D_CONTROL_1 to be non-volatile + - LP: #1333838 + * [media] V4L2: ov7670: fix a wrong index, potentially Oopsing the kernel + from user-space + - LP: #1333838 + * [media] V4L2: fix VIDIOC_CREATE_BUFS in 64- / 32-bit compatibility mode + - LP: #1333838 + * x86, mm, hugetlb: Add missing TLB page invalidation for hugetlb_cow() + - LP: #1333838 + * i2c: designware: Mask all interrupts during i2c controller enable + - LP: #1333838 + * i2c: s3c2410: resume race fix + - LP: #1333838 + * i2c: rcar: bail out on zero length transfers + - LP: #1333838 + * dm crypt: fix cpu hotplug crash by removing per-cpu structure + - LP: #1333838 + * metag: fix memory barriers + - LP: #1333838 + * metag: Reduce maximum stack size to 256MB + - LP: #1333838 + * drm/i915: restore backlight precision when converting from ACPI + - LP: #1333838 + * drm/i915: Increase WM memory latency values on SNB + - LP: #1333838 + * PCI: shpchp: Check bridge's secondary (not primary) bus speed + - LP: #1333838 + * parisc: ratelimit userspace segfault printing + - LP: #1333838 + * parisc: Improve LWS-CAS performance + - LP: #1333838 + * Target/iser: Fix wrong connection requests list addition + - LP: #1333838 + * Target/iser: Fix iscsit_accept_np and rdma_cm racy flow + - LP: #1333838 + * iscsi-target: Change BUG_ON to REJECT in iscsit_process_nop_out + - LP: #1333838 + * tcm_fc: Fix free-after-use regression in ft_free_cmd + - LP: #1333838 + * target: Don't allow setting WC emulation if device doesn't support + - LP: #1333838 + * arm: dts: Fix missing device_type="memory" for ste-ccu8540 + - LP: #1333838 + * mips: dts: Fix missing device_type="memory" property in memory nodes + - LP: #1333838 + * arm64: fix pud_huge() for 2-level pagetables + - LP: #1333838 + * libceph: fix corruption when using page_count 0 page in rbd + - LP: #1333838 + * clk: tegra: use pll_ref as the pll_e parent + - LP: #1333838 + * clk: tegra: Fix wrong value written to PLLE_AUX + - LP: #1333838 + * target: fix memory leak on XCOPY + - LP: #1333838 + * sysfs: make sure read buffer is zeroed + - LP: #1333838 + * cfg80211: free sme on connection failures + - LP: #1333838 + * sched: Sanitize irq accounting madness + - LP: #1333838 + * sched: Use CPUPRI_NR_PRIORITIES instead of MAX_RT_PRIO in cpupri check + - LP: #1333838 + * mac80211: fix suspend vs. association race + - LP: #1333838 + * mac80211: fix on-channel remain-on-channel + - LP: #1333838 + * af_iucv: wrong mapping of sent and confirmed skbs + - LP: #1333838 + * perf: Limit perf_event_attr::sample_period to 63 bits + - LP: #1333838 + * perf: Prevent false warning in perf_swevent_add + - LP: #1333838 + * drm/gf119-/disp: fix nasty bug which can clobber SOR0's clock setup + - LP: #1333838 + * drm/radeon: also try GART for CPU accessed buffers + - LP: #1333838 + * drm/radeon: handle non-VGA class pci devices with ATRM + - LP: #1333838 + * drm/radeon: fix register typo on si + - LP: #1333838 + * drm/radeon: avoid segfault on device open when accel is not working. + - LP: #1333838 + * drm/radeon/pm: don't allow debugfs/sysfs access when PX card is off + (v2) + - LP: #1333838 + * can: peak_pci: prevent use after free at netdev removal + - LP: #1333838 + * nfsd4: remove lockowner when removing lock stateid + - LP: #1333838 + * nfsd4: warn on finding lockowner without stateid's + - LP: #1333838 + * dma: mv_xor: Flush descriptors before activating a channel + - LP: #1333838 + * dmaengine: fix dmaengine_unmap failure + - LP: #1333838 + * hwpoison, hugetlb: lock_page/unlock_page does not match for handling a + free hugepage + - LP: #1333838 + * mm/memory-failure.c: fix memory leak by race between poison and + unpoison + - LP: #1333838 + * ARM: OMAP3: clock: Back-propagate rate change from cam_mclk to dpll4_m5 + on all OMAP3 platforms + - LP: #1333838 + * dmaengine: dw: went back to plain {request,free}_irq() calls + - LP: #1333838 + * ARM: omap5: hwmod_data: Correct IDLEMODE for McPDM + - LP: #1333838 + * Input: synaptics - add min/max quirk for the ThinkPad W540 + - LP: #1333838 + * ARM: OMAP2+: nand: Fix NAND on OMAP2 and OMAP3 boards + - LP: #1333838 + * futex: Add another early deadlock detection check + - LP: #1333838 + * futex: Prevent attaching to kernel threads + - LP: #1333838 + * ARM: OMAP4: Fix the boot regression with CPU_IDLE enabled + - LP: #1333838 + * cpufreq: remove race while accessing cur_policy + - LP: #1333838 + * cpufreq: cpu0: drop wrong devm usage + - LP: #1333838 + * ARM: imx: fix error handling in ipu device registration + - LP: #1333838 + * ALSA: hda - Fix onboard audio on Intel H97/Z97 chipsets + - LP: #1333838 + * ARM: 8051/1: put_user: fix possible data corruption in put_user + - LP: #1333838 + * ARM: 8064/1: fix v7-M signal return + - LP: #1333838 + * Input: synaptics - T540p - unify with other LEN0034 models + - LP: #1333838 + * drm/i915: Only copy back the modified fields to userspace from + execbuffer + - LP: #1333838 + * dm cache: always split discards on cache block boundaries + - LP: #1333838 + * virtio_blk: don't crash, report error if virtqueue is broken. + - LP: #1333838 + * virtio_blk: fix race between start and stop queue + - LP: #1333838 + * powerpc: Fix 64 bit builds with binutils 2.24 + - LP: #1333838 + * powerpc, kexec: Fix "Processor X is stuck" issue during kexec from ST + mode + - LP: #1333838 + * rtmutex: Fix deadlock detector for real + - LP: #1333838 + * drm/radeon: avoid crash if VM command submission isn't available + - LP: #1333838 + * drm/radeon: don't allow RADEON_GEM_DOMAIN_CPU for command submission + - LP: #1333838 + * iwlwifi: mvm: fix setting channel in monitor mode + - LP: #1333838 + * Staging: speakup: Move pasting into a work item + - LP: #1333838 + * USB: Avoid runtime suspend loops for HCDs that can't handle + suspend/resume + - LP: #1333838 + * can: only rename enabled led triggers when changing the netdev name + - LP: #1333838 + * USB: io_ti: fix firmware download on big-endian machines (part 2) + - LP: #1333838 + * USB: ftdi_sio: add NovaTech OrionLXm product ID + - LP: #1333838 + * USB: serial: option: add support for Novatel E371 PCIe card + - LP: #1333838 + * USB: cdc-wdm: properly include types.h + - LP: #1333838 + * md: always set MD_RECOVERY_INTR when aborting a reshape or other + "resync". + - LP: #1333838 + * xhci: delete endpoints from bandwidth list before freeing whole device + - LP: #1333838 + * md: always set MD_RECOVERY_INTR when interrupting a reshape thread. + - LP: #1333838 + * ALSA: hda/analog - Fix silent output on ASUS A8JN + - LP: #1333838 + * drm/radeon/dpm: resume fixes for some systems + - LP: #1333838 + * drm/radeon: use the CP DMA on CIK + - LP: #1333838 + * ALSA: hda/realtek - Correction of fixup codes for PB V7900 laptop + - LP: #1333838 + * ALSA: hda/realtek - Fix COEF widget NID for ALC260 replacer fixup + - LP: #1333838 + * iser-target: Add missing target_put_sess_cmd for ImmedateData failure + - LP: #1333838 + * iscsi-target: Fix wrong buffer / buffer overrun in + iscsi_change_param_value() + - LP: #1333838 + * percpu-refcount: fix usage of this_cpu_ops + - LP: #1333838 + * target: Fix alua_access_state attribute OOPs for un-configured devices + - LP: #1333838 + * mm: rmap: fix use-after-free in __put_anon_vma + - LP: #1333838 + * mm: add !pte_present() check on existing hugetlb_entry callbacks + - LP: #1333838 + * target: Fix NULL pointer dereference for XCOPY in target_put_sess_cmd + - LP: #1333838 + * Linux 3.13.11.4 + - LP: #1333838 + * powerpc/powernv: Infrastructure to read opal messages in generic + format. + - LP: #1334268 + * powerpc/powernv: Infrastructure to support OPAL async completion + - LP: #1334268 + * powerpc/powernv: Enable fetching of platform sensor data + - LP: #1334268 + * powerpc/powernv: Fix endian issues with sensor code + - LP: #1334268 + * powerpc/powernv: Add OPAL message log interface + - LP: #1334268 + * powerpc/powernv: Fix kexec races going back to OPAL + - LP: #1334268 + * powerpc/powernv: Fix little endian issues in OPAL flash code + - LP: #1334268 + * powerpc/powernv: Fix little endian issues with opal_do_notifier calls + - LP: #1334268 + * powerpc/powernv: Fix little endian issues in OPAL error log code + - LP: #1334268 + * powerpc/powernv: Create OPAL sglist helper functions and fix endian + issues + - LP: #1334268 + * powerpc/powernv: Fix little endian issues in OPAL dump code + - LP: #1334268 + * powerpc: Fix error return in rtas_flash module init + - LP: #1334268 + * powerpc/powernv: Increase candidate fw image size + - LP: #1334268 + * powerpc/powernv: Return secondary CPUs to firmware before FW update + - LP: #1334268 + * powerpc/powernv: Pass buffer size to OPAL validate flash call + - LP: #1334268 + * gpio: add a driver for the Synopsys DesignWare APB GPIO block + - LP: #1334823 + * gpio: dwapb: drop irq_setup_generic_chip() + - LP: #1334823 + * gpio: dwapb: use a second irq chip + - LP: #1334823 + * lzo: properly check for overruns + - LP: #1335313 + - CVE-2014-4608 + * lz4: ensure length does not wrap + - LP: #1335314 + - CVE-2014-4611 + * netfilter: nf_nat: fix oops on netns removal + - LP: #1314274 + * ALSA: hda - add device ID for Broadwell display audio controller + - LP: #1188091 + * ALSA: hda - add codec ID for Broadwell display audio codec + - LP: #1188091 + * ALSA: hda/hdmi - apply all Haswell fix-ups to Broadwell display codec + - LP: #1188091 + * ALSA: hda - using POS_FIX_LPIB on Broadwell HDMI Audio + - LP: #1188091 + + -- Luis Henriques Tue, 01 Jul 2014 15:10:43 +0100 + +linux (3.13.0-30.55) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * x86_64,ptrace: Enforce RIP <= TASK_SIZE_MAX (CVE-2014-4699) + - LP: #1337339 + - CVE-2014-4699 + + -- Luis Henriques Thu, 03 Jul 2014 16:15:57 +0100 + +linux (3.13.0-30.54) trusty; urgency=low + + [ Adam Conrad ] + + * [Config] Enable building the sata-modules udeb on ppc64el. + - LP: #1323980 + + [ Anton Blanchard ] + + * SAUCE: (no-up) powerpc: 64bit sendfile is capped at 2GB + - LP: #1328230 + + [ Ben Widawsky ] + + * SAUCE: i915_bdw: drm/i915: Fix PSR programming + - LP: #1321729 + * SAUCE: i915_bdw: drm/i915: Correct PPGTT total size + - LP: #1321729 + + [ Chris Wilson ] + + * SAUCE: i915_bdw: drm/i915: Broadwell expands ACTHD to 64bit + - LP: #1321729 + + [ Damien Lespiau ] + + * SAUCE: i915_bdw: drm/i915/bdw: Implement + Wa4x4STCOptimizationDisable:bdw + - LP: #1321729 + + [ Dave Chiluk ] + + * [Config] Enable CONFIG_IP_VS_IPV6=y + - LP: #1300739 + + [ Kamal Mostafa ] + + * [Config] add debian/gbp.conf + * Release Tracking Bug + - LP: #1328286 + + [ Mika Kuoppala ] + + * SAUCE: i915_bdw: drm/i915: add render state initialization + - LP: #1321729 + + [ Paulo Zanoni ] + + * SAUCE: i915_bdw: drm/i915: fix assert_cursor on BDW + - LP: #1321729 + + [ Steven Rostedt ] + + * SAUCE: i915_bdw: drm/i915: Do not dereference pointers from ring buffer + in evict event + - LP: #1321729 + + [ Tim Gardner ] + + * [Config] CONFIG_POWERNV_CPUFREQ=y for ppc64el + - LP: #1324571 + * [Debian] Treat vdso install as an environment variable + * [Config] Treat vdso install as an environment variable + * [Config] CONFIG_MLX4_DEBUG=y + - LP: #1328256 + * [Config] CONFIG_I40EVF=m, CONFIG_I40E_DCB=y, CONFIG_I40E_VXLAN=y + - LP: #1328037 + + [ Timo Aaltonen ] + + * SAUCE: i915_bdw: Rebase to drm-intel-next-2014-03-07 + fixes + - LP: #1321729 + * SAUCE: i915_bdw: Add BDW specific power well calls + - LP: #1317865 + + [ Upstream Kernel Changes ] + + * drm: expose subpixel order name routine v3 + * drm: dp helper: Add DP test sink CRC definition. + * drm: export cmdline and preferred mode functions from fb helper + * hugetlb: ensure hugepage access is denied if hugepages are not + supported + - LP: #1328251 + * powerpc/powernv: Move SG list structure to header file + - LP: #1326015 + * powerpc/powernv: Read OPAL error log and export it through sysfs + - LP: #1326015 + * powerpc/powernv Platform dump interface + - LP: #1326015 + * pci_regs.h: Add PCI bus link speed and width defines + - LP: #1328037 + * net_tstamp: Add SIOCGHWTSTAMP ioctl to match SIOCSHWTSTAMP + - LP: #1328037 + * PCI/MSI: Add pci_enable_msi_range() and pci_enable_msix_range() + - LP: #1328037 + * net: Change skb_get_rxhash to skb_get_hash + - LP: #1328037 + * net: Add utility functions to clear rxhash + - LP: #1328037 + * net: Add function to set the rxhash + - LP: #1328037 + * i40e: set pf_id based on device and function numbers + - LP: #1328037 + * i40e: register file updates + - LP: #1328037 + * i40e: clear AQ head and tail registers + - LP: #1328037 + * i40e: simplify aq head-tail-len setups + - LP: #1328037 + * i40e: firmware version fields offsets update + - LP: #1328037 + * i40e: allow one more vector for VFs + - LP: #1328037 + * i40e: select reset counters correctly + - LP: #1328037 + * i40e: retry call on timeout + - LP: #1328037 + * i40e: properly add VF MAC addresses + - LP: #1328037 + * i40e: fix debugging messages + - LP: #1328037 + * i40e: default debug mask setting + - LP: #1328037 + * i40e: add interrupt test + - LP: #1328037 + * i40e: add support for triggering EMPR + - LP: #1328037 + * i40e: restrict diag test length + - LP: #1328037 + * i40e: sync header files with hardware + - LP: #1328037 + * i40e: separate TSYNVALID and TSYNINDX fields in Rx descriptor + - LP: #1328037 + * i40e: check multi-bit state correctly + - LP: #1328037 + * i40e: get media type during link info + - LP: #1328037 + * i40e: Add flag for L2 VEB filtering + - LP: #1328037 + * i40e: enable early hardware support + - LP: #1328037 + * i40e: whitespace + - LP: #1328037 + * i40e: Bump version + - LP: #1328037 + * i40e: refactor reset code + - LP: #1328037 + * i40e: Enable all PCTYPEs except FCOE for RSS. + - LP: #1328037 + * i40e: only set up the rings to be used + - LP: #1328037 + * i40e: clear test state bit after all ethtool tests + - LP: #1328037 + * i40e: refactor ethtool tests + - LP: #1328037 + * i40e: add num_VFs message + - LP: #1328037 + * i40e: Add a new variable to track number of pf instances + - LP: #1328037 + * i40e: restrict diag test messages + - LP: #1328037 + * i40e: loopback info and set loopback fix + - LP: #1328037 + * i40e: complain about out-of-range descriptor request + - LP: #1328037 + * i40e: remove and fix confusing define name + - LP: #1328037 + * i40e: Bump version number + - LP: #1328037 + * i40e: fix up some of the ethtool connection reporting + - LP: #1328037 + * i40e: fix pf reset after offline test + - LP: #1328037 + * i40e: Tell the stack about our actual number of queues + - LP: #1328037 + * i40e: init flow control settings to disabled + - LP: #1328037 + * i40e: trivial fixes + - LP: #1328037 + * i40e: use same number of queues as CPUs + - LP: #1328037 + * i40e: reinit flow for the main VSI + - LP: #1328037 + * i40e: function to reconfigure RSS queues and rebuild + - LP: #1328037 + * i40e: Add basic support for get/set channels for RSS + - LP: #1328037 + * i40e: rtnl_lock in reset path fixes + - LP: #1328037 + * i40e: support for suspend and resume + - LP: #1328037 + * i40e: Remove FCoE in i40e_virtchnl_pf.c code + - LP: #1328037 + * i40e: Fix dump output from debugfs calls + - LP: #1328037 + * i40e: prevent null pointer exception in dump descriptor + - LP: #1328037 + * i40e: simplify error messages for dump descriptor + - LP: #1328037 + * i40e: fix up scanf decoders + - LP: #1328037 + * i40e: more print_hex_dump use + - LP: #1328037 + * i40e: Fix wrong mask bits being used in misc interrupt + - LP: #1328037 + * i40e: Bump version number + - LP: #1328037 + * i40e: Fix off by one in i40e_dbg_command_write + - LP: #1328037 + * i40e: make functions static and remove dead code + - LP: #1328037 + * i40evf: main driver core + - LP: #1328037 + * i40evf: transmit and receive functionality + - LP: #1328037 + * i40evf: core ethtool functionality + - LP: #1328037 + * i40evf: virtual channel interface + - LP: #1328037 + * i40evf: driver core headers + - LP: #1328037 + * i40evf: init code and hardware support + - LP: #1328037 + * i40evf: add driver to kernel build system + - LP: #1328037 + * i40evf: A0 silicon specific + - LP: #1328037 + * i40e: using for_each_set_bit to simplify the code + - LP: #1328037 + * i40e: Suppress HMC error to Interrupt message level + - LP: #1328037 + * i40e: Populate and check pci bus speed and width + - LP: #1328037 + * i40e: add wake-on-lan support + - LP: #1328037 + * i40e: fix curly brace use and return type + - LP: #1328037 + * i40e: Implementation of VXLAN ndo's + - LP: #1328037 + * i40e: Rx checksum offload for VXLAN + - LP: #1328037 + * i40e: move i40e_reset_vf + - LP: #1328037 + * i40e: refactor VF reset flow + - LP: #1328037 + * i40e: remove redundant code + - LP: #1328037 + * i40e: remove chatty log messages + - LP: #1328037 + * i40e: fix error return + - LP: #1328037 + * i40e: be more informative + - LP: #1328037 + * i40e: make a define from a large constant + - LP: #1328037 + * i40e: update led set args + - LP: #1328037 + * i40e: report VF MAC addresses correctly + - LP: #1328037 + * i40e: Dump the whole NVM, not half + - LP: #1328037 + * i40e: fix mac address checking + - LP: #1328037 + * i40e: Change the ethtool NVM read method to use AQ + - LP: #1328037 + * i40e: fix constant cast issues + - LP: #1328037 + * i40e: guard against vf message races + - LP: #1328037 + * i40e: add header file flag _I40E_TXRX_H_ + - LP: #1328037 + * i40e: use functions to enable and disable icr 0 + - LP: #1328037 + * i40e: reinit buffer size each time + - LP: #1328037 + * i40e: fix error handling when alloc of vsi array fails + - LP: #1328037 + * i40e: keep allocated memory in structs + - LP: #1328037 + * i40e: catch unset q_vector + - LP: #1328037 + * i40e: Fix ring allocation + - LP: #1328037 + * i40e: I40E_FLAG_MQ_ENABLED is not used + - LP: #1328037 + * i40e: Remove unnecessary prototypes + - LP: #1328037 + * i40e: remove un-necessary io-write + - LP: #1328037 + * i40e: Record dma buffer info for dummy packets + - LP: #1328037 + * i40e: Fix SR-IOV VF port VLAN + - LP: #1328037 + * i40e: fix whitespace + - LP: #1328037 + * i40e: avoid unnecessary register read + - LP: #1328037 + * i40e: Do not enable default port on the VEB + - LP: #1328037 + * i40e: use struct assign instead of memcpy + - LP: #1328037 + * i40e: don't allocate zero size + - LP: #1328037 + * i40e: acknowledge VFLR when disabling SR-IOV + - LP: #1328037 + * i40e: support VFs on PFs other than 0 + - LP: #1328037 + * i40e: Fix VF driver MAC address configuration + - LP: #1328037 + * i40e: use correct struct for get and update vsi params + - LP: #1328037 + * i40e: Hide the Port VLAN VLAN ID + - LP: #1328037 + * i40e: Admin queue shutdown fixes + - LP: #1328037 + * i40e: check asq alive before notify + - LP: #1328037 + * i40e: Do not allow AQ calls from ndo-ops + - LP: #1328037 + * i40e: Expose AQ debugfs hooks + - LP: #1328037 + * i40e: Do not enable broadcast promiscuous by default + - LP: #1328037 + * i40e: Stop accepting any VLAN tag on VLAN 0 filter set + - LP: #1328037 + * i40e: Allow VF to set already assigned MAC address + - LP: #1328037 + * i40e: Bump version + - LP: #1328037 + * i40e: Add code to wait for FW to complete in reset path + - LP: #1328037 + * i40e: update firmware api to 1.1 + - LP: #1328037 + * i40e: Reduce range of interrupt reg in reg test + - LP: #1328037 + * i40e: move PF ID init from PF reset to SC init + - LP: #1328037 + * i40e: check MAC type before any REG access + - LP: #1328037 + * i40e: rework shadow ram read functions + - LP: #1328037 + * i40e: whitespace paren and comment tweaks + - LP: #1328037 + * i40e: Enable/Disable PF switch LB on SR-IOV configure changes + - LP: #1328037 + * i40e: remove redundant AQ enable + - LP: #1328037 + * i40e: correctly setup ARQ descriptors + - LP: #1328037 + * i40e: Re-enable interrupt on ICR0 + - LP: #1328037 + * i40e: use kernel specific defines + - LP: #1328037 + * i40e: Fix GPL header + - LP: #1328037 + * i40e: Fix MAC format in Write MAC address AQ cmd + - LP: #1328037 + * i40e: add a comment on barrier and fix panic on reset + - LP: #1328037 + * i40e: disable packet split + - LP: #1328037 + * i40e: Cleanup reconfig rss path + - LP: #1328037 + * i40e: release NVM resource reservation on startup + - LP: #1328037 + * i40e: remove interrupt on AQ error + - LP: #1328037 + * i40e: accept pf to pf adminq messages + - LP: #1328037 + * i40e: shorten wordy fields + - LP: #1328037 + * i40e: trivial: formatting and checkpatch fixes + - LP: #1328037 + * i40e: fix spelling errors + - LP: #1328037 + * i40e: Add a dummy packet template + - LP: #1328037 + * i40e: Turn flow director off in MFP mode + - LP: #1328037 + * i40e: use assignment instead of memcpy + - LP: #1328037 + * i40e: drop unused macros + - LP: #1328037 + * i40e: Update the Current NVM version Low value + - LP: #1328037 + * i40e: Bump version + - LP: #1328037 + * i40e: fix long lines + - LP: #1328037 + * i40e: Cleanup Doxygen warnings + - LP: #1328037 + * i40e: Setting queue count to 1 using ethtool is valid + - LP: #1328037 + * i40e: do not bail when disabling if Tx queue disable fails + - LP: #1328037 + * i40e: allow VF to remove any MAC filter + - LP: #1328037 + * i40e: check for possible incorrect ipv6 checksum + - LP: #1328037 + * i40e: adjust ITR max and min values + - LP: #1328037 + * i40e: clear qtx_head before enabling Tx queue + - LP: #1328037 + * i40e: call clear_pxe after adminq is initialized + - LP: #1328037 + * i40e: enable PTP + - LP: #1328037 + * i40e: fix log message wording + - LP: #1328037 + * i40e: Bump version + - LP: #1328037 + * i40evf: fix s390 build failure due to implicit prefetch.h + - LP: #1328037 + * i40e: remove extra register write + - LP: #1328037 + * i40e: associate VMDq queue with VM type + - LP: #1328037 + * i40e: make message meaningful + - LP: #1328037 + * i40e: whitespace fixes + - LP: #1328037 + * i40e: trivial cleanup + - LP: #1328037 + * i40e: Bump version number + - LP: #1328037 + * i40e: Warn admin to reload VF driver on port VLAN configuration + - LP: #1328037 + * i40e: Retain MAC filters on port VLAN deletion + - LP: #1328037 + * i40e: Remove autogenerated Module.symvers file. + - LP: #1328037 + * i40e: check desc pointer before printing + - LP: #1328037 + * i40e: updates to AdminQ interface + - LP: #1328037 + * i40e: fix compile warning on checksum_local + - LP: #1328037 + * i40e: Change firmware workaround + - LP: #1328037 + * i40e: whitespace fixes + - LP: #1328037 + * i40e: rename defines + - LP: #1328037 + * i40e: refactor flow director + - LP: #1328037 + * i40e: implement DCB support infastructure + - LP: #1328037 + * i40e: add DCB and DCBNL support + - LP: #1328037 + * i40e: add DCB option to Kconfig + - LP: #1328037 + * i40e: Fix device ID define names to align to standard + - LP: #1328037 + * i40e: Add missing braces to i40e_dcb_need_reconfig() + - LP: #1328037 + * i40e: spelling error + - LP: #1328037 + * i40e: bump driver version + - LP: #1328037 + * i40evf: trivial fixes + - LP: #1328037 + * i40evf: clean up memsets + - LP: #1328037 + * i40e: Setting i40e_down bit for tx_timeout + - LP: #1328037 + * i40e: remove dead code + - LP: #1328037 + * i40e: set VF state to active when reset is complete + - LP: #1328037 + * i40e: reset VFs after PF reset + - LP: #1328037 + * i40e: enable extant VFs + - LP: #1328037 + * i40e: don't handle VF reset on unload + - LP: #1328037 + * i40evf: clean up adapter struct + - LP: #1328037 + * i40evf: fix bogus comment + - LP: #1328037 + * i40evf: don't store unnecessary array of strings + - LP: #1328037 + * i40evf: change type of flags variable + - LP: #1328037 + * i40evf: refactor reset handling + - LP: #1328037 + * net: i40evf: Remove duplicate include + - LP: #1328037 + * i40e: Use pci_enable_msix_range() instead of pci_enable_msix() + - LP: #1328037 + * i40evf: request reset on tx hang + - LP: #1328037 + * i40evf: remove VLAN filters on close + - LP: #1328037 + * i40evf: fix multiple crashes on remove + - LP: #1328037 + * i40evf: get rid of pci_using_dac + - LP: #1328037 + * i40evf: fix up strings in init task + - LP: #1328037 + * i40evf: remove bogus comment + - LP: #1328037 + * i40evf: don't guess device name + - LP: #1328037 + * i40evf: store ring size in ring structs + - LP: #1328037 + * i40evf: update version and copyright date + - LP: #1328037 + * i40evf: remove errant space + - LP: #1328037 + * i40e: remove unnecessary delay + - LP: #1328037 + * i40e: tighten up ring enable/disable flow + - LP: #1328037 + * i40e: Change MSIX to MSI-X + - LP: #1328037 + * i40e and i40evf: Bump driver versions + - LP: #1328037 + * i40evf: Enable the ndo_set_features netdev op + - LP: #1328037 + * i40e: Flow Director sideband accounting + - LP: #1328037 + * i40e: Prevent overflow due to kzalloc + - LP: #1328037 + * i40e/i40evf: i40e implementation for skb_set_hash + - LP: #1328037 + * i40e: clean up comment style + - LP: #1328037 + * i40e: Remove a FW workaround for Number of MSIX vectors + - LP: #1328037 + * i40e: count timeout events + - LP: #1328037 + * i40e: Remove a redundant filter addition + - LP: #1328037 + * i40e: Fix static checker warning + - LP: #1328037 + * i40e: fix nvm version and remove firmware report + - LP: #1328037 + * i40e/i40evf: carefully fill tx ring + - LP: #1328037 + * i40e/i40evf: Bump pf&vf build versions + - LP: #1328037 + * i40e: delete netdev after deleting napi and vectors + - LP: #1328037 + * i40e: Fix a bug in the update logic for FDIR SB filter. + - LP: #1328037 + * i40e/i40evf: Some flow director HW definition fixes + - LP: #1328037 + * i40e: make string references to q be queue + - LP: #1328037 + * i40e: cleanup strings + - LP: #1328037 + * i40e: simplified init string + - LP: #1328037 + * i40e: Fix function comments + - LP: #1328037 + * i40e: Define a new state variable to keep track of feature auto disable + - LP: #1328037 + * i40e: Add code to handle FD table full condition + - LP: #1328037 + * i40e: Bug fix for FDIR replay logic + - LP: #1328037 + * i40e: Let MDD events be handled by MDD handler + - LP: #1328037 + * i40e/i40evf: Use correct number of VF vectors + - LP: #1328037 + * i40e/i40evf: Use dma_set_mask_and_coherent + - LP: #1328037 + * net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq + - LP: #1328037 + * i40e: Don't receive packets when the napi budget == 0 + - LP: #1328037 + * i40evf: Rename i40e_ptype_lookup i40evf_ptype_lookup + - LP: #1328037 + * net/i40e: Avoid double setting of NETIF_F_SG for the HW encapsulation + feature mask + - LP: #1328037 + * i40e: support VF link state ndo + - LP: #1328037 + * i40evf: correctly program RSS HLUT table + - LP: #1328037 + * i40evf: use min_t + - LP: #1328037 + * i40e: Patch to enable Ethtool/netdev feature flag for NTUPLE control + - LP: #1328037 + * i40e: Refactor and cleanup i40e_open(), adding i40e_vsi_open() + - LP: #1328037 + * i40e/i40evf: enable hardware feature head write back + - LP: #1328037 + * i40e/i40evf: reduce context descriptors + - LP: #1328037 + * i40e: potential array underflow in i40e_vc_process_vf_msg() + - LP: #1328037 + * i40e/i40evf: Bump build versions + - LP: #1328037 + * i40e/i40evf: Add EEE LPI stats + - LP: #1328037 + * i40e: Fix a message string + - LP: #1328037 + * i40evf: don't shut down admin queue on error + - LP: #1328037 + * i40evf: clean up init error messages + - LP: #1328037 + * i40e: Delete ATR filter on RST + - LP: #1328037 + * i40evf: fix oops in watchdog handler + - LP: #1328037 + * i40e: Make the alloc and free queue vector calls orthogonal + - LP: #1328037 + * i40e: eeprom integrity check on load and empr + - LP: #1328037 + * i40e: Cleanup in FDIR SB ethtool code + - LP: #1328037 + * i40e: Add functionality for FD SB to drop packets + - LP: #1328037 + * i40evf: remove double space after return + - LP: #1328037 + * i40e: check for netdev before debugfs use + - LP: #1328037 + * i40e/i40evf: Add an FD message level + - LP: #1328037 + * i40e: Use DEBUG_FD message level for an FD message + - LP: #1328037 + * i40e: fix function kernel doc description + - LP: #1328037 + * i40e/i40evf: fix error checking path + - LP: #1328037 + * i40e/i40evf: Remove addressof casts to same type + - LP: #1328037 + * i40e: Remove casts of pointer to same type + - LP: #1328037 + * i40evf: remove open-coded skb_cow_head + - LP: #1328037 + * i40evf: program RSS LUT correctly + - LP: #1328037 + * i40e: remove open-coded skb_cow_head + - LP: #1328037 + * i40e: fix TCP flag replication for hardware offload + - LP: #1328037 + * e1000e/igb/ixgbe/i40e: fix message terminations + - LP: #1328037 + * i40e: fix Timesync Tx interrupt handler code + - LP: #1328037 + * mm: use paravirt friendly ops for NUMA hinting ptes + - LP: #1313450 + + [ Ville Syrjälä ] + + * SAUCE: i915_bdw: drm/i915: Fix scanline counter fixup on BDW + - LP: #1321729 + + -- Kamal Mostafa Mon, 09 Jun 2014 15:09:43 -0700 + +linux (3.13.0-29.53) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * futex-prevent-requeue-pi-on-same-futex.patch futex: Forbid uaddr == + uaddr2 in futex_requeue(..., requeue_pi=1) + - LP: #1326367 + - CVE-2014-3153 + * futex: Validate atomic acquisition in futex_lock_pi_atomic() + - LP: #1326367 + - CVE-2014-3153 + * futex: Always cleanup owner tid in unlock_pi + - LP: #1326367 + - CVE-2014-3153 + * futex: Make lookup_pi_state more robust + - LP: #1326367 + - CVE-2014-3153 + + -- Brad Figg Wed, 04 Jun 2014 08:25:41 -0700 + +linux (3.13.0-29.52) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - re-used previous tracking bug + + [ Adam Conrad ] + + * [Config] Normalize AHCI configs on powerpc/ppc64el with the rest of the + architectures + - LP: #1323980 + + [ Tanmay Inamdar ] + + * SAUCE: Add MSI/MSI-X driver for APM PCI bus + - LP: #1318977 + + -- Luis Henriques Wed, 28 May 2014 09:38:41 +0100 + +linux (3.13.0-28.51) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1322112 + + [ Adam Lee ] + + * SAUCE: (no-up) rtlwifi: rtl8723be: disable MSI interrupts mode + - LP: #1310512, #1320070 + + [ Andy Whitcroft ] + + * [Packaging] ppc64el is a powerpc kernel arch and needs its quirks + - LP: #1318848 + + [ Ben Collins ] + + * [Config] Switch to grub-ieee1275 as recommended on book3e systems + - LP: #1318629 + + [ Upstream Kernel Changes ] + + * Revert "PCI: Enable INTx in pci_reenable_device() only when MSI/MSI-X + not enabled" + - LP: #1320946 + * Bluetooth: allocate static minor for vhci + - LP: #1317336 + * core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle + errors + - LP: #1320946 + * __dentry_path() fixes + - LP: #1320946 + * drm/i915: quirk invert brightness for Acer Aspire 5336 + - LP: #1320946 + * w1: fix w1_send_slave dropping a slave id + - LP: #1320946 + * ARM: 7954/1: mm: remove remaining domain support from ARMv6 + - LP: #1320946 + * matroxfb: restore the registers M_ACCESS and M_PITCH + - LP: #1320946 + * framebuffer: fix cfb_copyarea + - LP: #1320946 + * mach64: use unaligned access + - LP: #1320946 + * mach64: fix cursor when character width is not a multiple of 8 pixels + - LP: #1320946 + * tgafb: fix mode setting with fbset + - LP: #1320946 + * tgafb: fix data copying + - LP: #1320946 + * hvc: ensure hvc_init is only ever called once in hvc_console.c + - LP: #1320946 + * ARM: dts: Keep G3D regulator always on for exynos5250-arndale + - LP: #1320946 + * PCI: mvebu: Fix potential issue in range parsing + - LP: #1320946 + * usb: dwc3: fix wrong bit mask in dwc3_event_devt + - LP: #1320946 + * x86, AVX-512: AVX-512 Feature Detection + - LP: #1320946 + * x86, AVX-512: Enable AVX-512 States Context Switch + - LP: #1320946 + * s390/cio: fix driver callback initialization for ccw consoles + - LP: #1320946 + * ARM: Fix default CPU selection for ARCH_MULTI_V5 + - LP: #1320946 + * omap3isp: preview: Fix the crop margins + - LP: #1320946 + * ACPICA: Restore code that repairs NULL package elements in return + values. + - LP: #1320946 + * media: gspca: sn9c20x: add ID for Genius Look 1320 V2 + - LP: #1320946 + * m88rs2000: add caps FE_CAN_INVERSION_AUTO + - LP: #1320946 + * m88rs2000: prevent frontend crash on continuous transponder scans + - LP: #1320946 + * mmc: sdhci-bcm-kona: fix build errors when built-in + - LP: #1320946 + * usb: musb: avoid NULL pointer dereference + - LP: #1320946 + * uvcvideo: Do not use usb_set_interface on bulk EP + - LP: #1320946 + * drm/i915: Don't clobber CHICKEN_PIPESL_1 on BDW + - LP: #1320946 + * usb: dwc3: fix randconfig build errors + - LP: #1320946 + * usb: gadget: atmel_usba: fix crashed during stopping when DEBUG is + enabled + - LP: #1320946 + * blktrace: fix accounting of partially completed requests + - LP: #1320946 + * rtlwifi: rtl8192cu: Fix too long disable of IRQs + - LP: #1320946 + * rtlwifi: rtl8192se: Fix too long disable of IRQs + - LP: #1320946 + * rtlwifi: rtl8188ee: Fix too long disable of IRQs + - LP: #1320946 + * rtlwifi: rtl8723ae: Fix too long disable of IRQs + - LP: #1320946 + * xhci: Prevent runtime pm from autosuspending during initialization + - LP: #1320946 + * staging:serqt_usb2: Fix sparse warning restricted __le16 degrades to + integer + - LP: #1320946 + * mtd: atmel_nand: Disable subpage NAND write when using Atmel PMECC + - LP: #1320946 + * iwlwifi: dvm: take mutex when sending SYNC BT config command + - LP: #1320946 + * virtio_balloon: don't softlockup on huge balloon changes. + - LP: #1320946 + * arm64: Make DMA coherent and strongly ordered mappings not executable + - LP: #1320946 + * arm64: Do not synchronise I and D caches for special ptes + - LP: #1320946 + * ARM: OMAP2+: INTC: Acknowledge stuck active interrupts + - LP: #1320946 + * ARM: dts: am33xx: correcting dt node unit address for usb + - LP: #1320946 + * mtip32xx: Set queue bounce limit + - LP: #1320946 + * mtip32xx: Unmap the DMA segments before completing the IO request + - LP: #1320946 + * mtip32xx: mtip_async_complete() bug fixes + - LP: #1320946 + * ath9k: fix ready time of the multicast buffer queue + - LP: #1320946 + * KVM: s390: Optimize ucontrol path + - LP: #1320946 + * mei: fix memory leak of pending write cb objects + - LP: #1320946 + * usb: gadget: tcm_usb_gadget: stop format strings + - LP: #1320946 + * usb: phy: Add ulpi IDs for SMSC USB3320 and TI TUSB1210 + - LP: #1320946 + * USB: unbind all interfaces before rebinding any + - LP: #1320946 + * IB/ipath: Fix potential buffer overrun in sending diag packet routine + - LP: #1320946 + * IB/qib: Fix debugfs ordering issue with multiple HCAs + - LP: #1320946 + * IB/qib: add missing braces in do_qib_user_sdma_queue_create() + - LP: #1320946 + * IB/nes: Return an error on ib_copy_from_udata() failure instead of NULL + - LP: #1320946 + * ALSA: hda/realtek - Restore default value for ALC283 + - LP: #1320946 + * mfd: sec-core: Fix possible NULL pointer dereference when i2c_new_dummy + error + - LP: #1320946 + * regulator: arizona-ldo1: Correct default regulator init_data + - LP: #1320946 + * ASoC: cs42l73: Fix mask bits for SOC_VALUE_ENUM_SINGLE + - LP: #1320946 + * ASoC: cs42l52: Fix mask bits for SOC_VALUE_ENUM_SINGLE + - LP: #1320946 + * drm/i915: Do not dereference pointers from ring buffer in evict event + - LP: #1320946 + * mfd: Include all drivers in subsystem menu + - LP: #1320946 + * mfd: max8997: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: max77686: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: max8998: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: max8925: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: 88pm860x: Fix I2C device resource leak on regmap init fail + - LP: #1320946 + * mfd: 88pm860x: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: max77693: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: 88pm800: Fix I2C device resource leak if probe fails + - LP: #1320946 + * mfd: tps65910: Fix possible invalid pointer dereference on + regmap_add_irq_chip fail + - LP: #1320946 + * MIPS: KVM: Pass reserved instruction exceptions to guest + - LP: #1320946 + * ASoC: cs42l51: Fix SOC_DOUBLE_R_SX_TLV shift values for ADC, PCM, and + Analog kcontrols + - LP: #1320946 + * mac80211: fix potential use-after-free + - LP: #1320946 + * mac80211: fix suspend vs. authentication race + - LP: #1320946 + * mac80211: fix WPA with VLAN on AP side with ps-sta again + - LP: #1320946 + * pid: get pid_t ppid of task in init_pid_ns + - LP: #1320946 + * audit: restore order of tty and ses fields in log output + - LP: #1320946 + * audit: convert PPIDs to the inital PID namespace. + - LP: #1320946 + * mfd: kempld-core: Fix potential hang-up during boot + - LP: #1320946 + * drm/i915: Fix unsafe loop iteration over vma whilst unbinding them + - LP: #1320946 + * powerpc/compat: 32-bit little endian machine name is ppcle, not ppc + - LP: #1320946 + * clk: s2mps11: Fix possible NULL pointer dereference + - LP: #1320946 + * spi: efm32: use $vendor,$device scheme for compatible string + - LP: #1320946 + * ALSA: hda - add headset mic detect quirks for three Dell laptops + - LP: #1297581, #1320946 + * KVM: PPC: Book3S HV: Fix KVM hang with CONFIG_KVM_XICS=n + - LP: #1320946 + * gpio: mxs: Allow for recursive enable_irq_wake() call + - LP: #1320946 + * nfsd4: buffer-length check for SUPPATTR_EXCLCREAT + - LP: #1320946 + * nfsd4: session needs room for following op to error out + - LP: #1320946 + * nfsd4: leave reply buffer space for failed setattr + - LP: #1320946 + * nfsd4: fix test_stateid error reply encoding + - LP: #1320946 + * nfsd: notify_change needs elevated write count + - LP: #1320946 + * dm cache: prevent corruption caused by discard_block_size > + cache_block_size + - LP: #1320946 + * dm transaction manager: fix corruption due to non-atomic transaction + commit + - LP: #1320946 + * dm: take care to copy the space map roots before locking the superblock + - LP: #1320946 + * aio: v4 ensure access to ctx->ring_pages is correctly serialised for + migration + - LP: #1320946 + * NFSD: Traverse unconfirmed client through hash-table + - LP: #1320946 + * lockd: ensure we tear down any live sockets when socket creation fails + during lockd_up + - LP: #1320946 + * drm/i915/tv: fix gen4 composite s-video tv-out + - LP: #1320946 + * dm thin: fix dangling bio in process_deferred_bios error path + - LP: #1320946 + * em28xx: fix PCTV 290e LNA oops + - LP: #1320946 + * NFSv4: Fix a use-after-free problem in open() + - LP: #1320946 + * nfsd4: fix setclientid encode size + - LP: #1320946 + * MIPS: Hibernate: Flush TLB entries in swsusp_arch_resume() + - LP: #1320946 + * ALSA: hda - Enable beep for ASUS 1015E + - LP: #1320946 + * nfsd: check passed socket's net matches NFSd superblock's one + - LP: #1320946 + * s390/bitops,atomic: add missing memory barriers + - LP: #1320946 + * IB/mthca: Return an error on ib_copy_to_udata() failure + - LP: #1320946 + * IB/ehca: Returns an error on ib_copy_to_udata() failure + - LP: #1320946 + * drm/qxl: unset a pointer in sync_obj_unref + - LP: #1320946 + * smarter propagate_mnt() + - LP: #1320946 + * don't bother with {get,put}_write_access() on non-regular files + - LP: #1320946 + * reiserfs: fix race in readdir + - LP: #1320946 + * drm/vmwgfx: correct fb_fix_screeninfo.line_length + - LP: #1320946 + * ALSA: hda - Fix silent speaker output due to mute LED fixup + - LP: #1320946 + * drm/radeon: clear needs_reset flag if IB test fails + - LP: #1320946 + * drm/radeon: call drm_edid_to_eld when we update the edid + - LP: #1320946 + * drm/radeon: fix endian swap on hawaii clear state buffer setup + - LP: #1320946 + * drm/radeon: fix typo in spectre_golden_registers + - LP: #1320946 + * sh: fix format string bug in stack tracer + - LP: #1320946 + * ocfs2: dlm: fix lock migration crash + - LP: #1320946 + * ocfs2: dlm: fix recovery hung + - LP: #1320946 + * ocfs2: do not put bh when buffer_uptodate failed + - LP: #1320946 + * ocfs2: fix panic on kfree(xattr->name) + - LP: #1320946 + * xattr: guard against simultaneous glibc header inclusion + - LP: #1320946 + * drm/i915: move power domain init earlier during system resume + - LP: #1320946 + * Skip intel_crt_init for Dell XPS 8700 + - LP: #1320946 + * dm cache: fix a lock-inversion + - LP: #1320946 + * thinkpad_acpi: Fix inconsistent mute LED after resume + - LP: #1320946 + * iser-target: Add missing se_cmd put for WRITE_PENDING in tx_comp_err + - LP: #1320946 + * iscsi-target: Fix ERL=2 ASYNC_EVENT connection pointer bug + - LP: #1320946 + * Target/sbc: Initialize COMPARE_AND_WRITE write_sg scatterlist + - LP: #1320946 + * mm: page_alloc: spill to remote nodes before waking kswapd + - LP: #1320946 + * mm: try_to_unmap_cluster() should lock_page() before mlocking + - LP: #1320946 + * mm: hugetlb: fix softlockup when a large number of hugepages are freed. + - LP: #1320946 + * hung_task: check the value of "sysctl_hung_task_timeout_sec" + - LP: #1320946 + * DRM: armada: fix corruption while loading cursors + - LP: #1320946 + * ALSA: ice1712: Fix boundary checks in PCM pointer ops + - LP: #1320946 + * lib/percpu_counter.c: fix bad percpu counter state during suspend + - LP: #1320946 + * md/raid1: r1buf_pool_alloc: free allocate pages when subsequent + allocation fails. + - LP: #1320946 + * ALSA: hda - add headset mic detect quirk for a Dell laptop + - LP: #1297581, #1320946 + * b43: Fix machine check error due to improper access of + B43_MMIO_PSM_PHY_HDR + - LP: #1320946 + * x86-64, modify_ldt: Ban 16-bit segments on 64-bit kernels + - LP: #1320946 + * target/tcm_fc: Fix use-after-free of ft_tpg + - LP: #1320946 + * ib_srpt: Use correct ib_sg_dma primitives + - LP: #1320946 + * Linux 3.13.11.1 + - LP: #1320946 + * Linux 3.13.11.2 + - LP: #1320946 + * hp-wireless: new driver for hp wireless button for Windows 8 + - LP: #1303737 + * x86, platform: Make HP_WIRELESS option text more descriptive + - LP: #1303737 + * ACPI: blacklist win8 OSI for Dell Inspiron 7737 + - LP: #1288161 + + -- Luis Henriques Thu, 22 May 2014 12:20:44 +0100 + +linux (3.13.0-27.50) trusty; urgency=low + + [ Brad Figg ] + + * Revert "rtlwifi: Set the link state" + + -- Brad Figg Thu, 15 May 2014 10:21:43 -0700 + +linux (3.13.0-27.49) trusty; urgency=low + + [ Brad Figg ] + + * Revert "SAUCE: (no-up) HID: rmi: do not stop the device at the end of + probe" + * Revert "SAUCE: (no-up) HID: rmi: introduce RMI driver for Synaptics + touchpads" + * Revert "[Config] CONFIG_HID_RMI=m" + + -- Brad Figg Wed, 14 May 2014 10:29:07 -0700 + +linux (3.13.0-26.48) trusty; urgency=low + + [ Benjamin Tissoires ] + + * SAUCE: (no-up) HID: rmi: introduce RMI driver for Synaptics touchpads + - LP: #1305522 + * SAUCE: (no-up) HID: rmi: do not stop the device at the end of probe + - LP: #1305522 + + [ Kamal Mostafa ] + + * Merged back Ubuntu-3.13.0-24.47 security release + * Revert "n_tty: Fix n_tty_write crash when echoing in raw mode" + - LP: #1314762 + * Release Tracking Bug + - LP: #1316835 + + [ Tim Gardner ] + + * [Config] CONFIG_HID_RMI=m + - LP: #1305522 + * [Config] CONFIG_CRYPTO_DEV_NX=n for ppc64el + - LP: #1314625 + * [Config] CONFIG_ZSWAP=y + - LP: #1315203 + * Add rpcsec_gss_krb5 to generic inclusion list + - LP: #769527 + + [ Upstream Kernel Changes ] + + * HID: hidraw: make comment more accurate and nicer + - LP: #1305522 + * HID: remove hid_get_raw_report in struct hid_device + - LP: #1305522 + * HID: i2c-hid: implement ll_driver transport-layer callbacks + - LP: #1305522 + * HID: add inliners for ll_driver transport-layer callbacks + - LP: #1305522 + * HID: Add transport-driver callbacks to the hid_ll_driver struct + - LP: #1305522 + * drm/nouveau: fail runtime pm properly. + - LP: #1313986 + * drm/nouveau: don't suspend/resume display on runtime s/r + - LP: #1313986 + * n_tty: Fix n_tty_write crash when echoing in raw mode + - LP: #1314762 + - CVE-2014-0196 + * floppy: ignore kernel-only members in FDRAWCMD ioctl input + - LP: #1316729 + - CVE-2014-1737 + * floppy: don't write kernel-only members to FDRAWCMD ioctl output + - LP: #1316735 + - CVE-2014-1738 + + -- Kamal Mostafa Tue, 06 May 2014 15:30:29 -0700 + +linux (3.13.0-25.47) trusty; urgency=low + + [ Joseph Salisbury ] + + * Release Tracking Bug + - LP: #1313868 + + [ Adam Lee ] + + * [Config] CONFIG_RTL8723BE=m, CONFIG_RTL8723_COMMON=m + - LP: #1240940 + + [ Alex Hung ] + + * SAUCE: (no-up) dell-led: add mic mute led interface + - LP: #1308297 + + [ Andy Whitcroft ] + + * SAUCE: (no-up) powerpc: Increase COMMAND_LINE_SIZE to 2048 from 512. + - LP: #1306677 + + [ Ben Collins ] + + * [Config] Disable PAMU on Freescale kernels + - LP: #1311738 + + [ Tim Gardner ] + + * Revert "SAUCE: x86, hyperv: bypass the timer_irq_works() check" + - LP: #1311683 + * SAUCE: (no-up) ALSA: usb-audio: Suppress repetitive debug messages from + retire_playback_urb() + - LP: #1305133 + * SAUCE: (no-up) 'BUG:' message unnecessarily triggers kerneloops + - LP: #1305480 + * [Config] CONFIG_POWERNV_CPUFREQ=m + - LP: #1309576 + * [Config] CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y for ppc64el + - LP: #1309576 + * [Config] CONFIG_TRANSPARENT_HUGEPAGE=n for arm64 + - LP: #1309221 + * [Config] CONFIG_MEMCG_KMEM=y + - LP: #1309586 + * [Config] CONFIG_CRASH_DUMP=y for ppc64el + - LP: #1312783 + + [ Upstream Kernel Changes ] + + * Revert "rtlwifi: rtl8188ee: enable MSI interrupts mode" + - LP: #1310512 + * mac80211: add length check in ieee80211_is_robust_mgmt_frame() + - LP: #1240940 + * rtlwifi: rtl8723ae: rtl8723-common: Create new driver for common code + - LP: #1240940 + * rtlwifi: rtl8723ae: rtl8723-common: Copy common firmware code + - LP: #1240940 + * rtlwifi: rtl8723ae: rtl8723-common: Copy common dynamic power + management code + - LP: #1240940 + * rtlwifi: rtl8723be: Add new driver + - LP: #1240940 + * selinux: correctly label /proc inodes in use before the policy is + loaded + - LP: #1309007 + * net: sctp: fix skb leakage in COOKIE ECHO path of chunk->auth_chunk + - LP: #1309007 + * bridge: multicast: add sanity check for query source addresses + - LP: #1309007 + * tipc: allow connection shutdown callback to be invoked in advance + - LP: #1309007 + * tipc: fix connection refcount leak + - LP: #1309007 + * tipc: drop subscriber connection id invalidation + - LP: #1309007 + * tipc: fix memory leak during module removal + - LP: #1309007 + * tipc: don't log disabled tasklet handler errors + - LP: #1309007 + * inet: frag: make sure forced eviction removes all frags + - LP: #1309007 + * net: unix: non blocking recvmsg() should not return -EINTR + - LP: #1309007 + * ipv6: Fix exthdrs offload registration. + - LP: #1309007 + * bnx2: Fix shutdown sequence + - LP: #1309007 + * pkt_sched: fq: do not hold qdisc lock while allocating memory + - LP: #1309007 + * Xen-netback: Fix issue caused by using gso_type wrongly + - LP: #1309007 + * vlan: Set correct source MAC address with TX VLAN offload enabled + - LP: #1309007 + * tcp: tcp_release_cb() should release socket ownership + - LP: #1309007 + * bridge: multicast: add sanity check for general query destination + - LP: #1309007 + * bridge: multicast: enable snooping on general queries only + - LP: #1309007 + * net: socket: error on a negative msg_namelen + - LP: #1309007 + * bonding: set correct vlan id for alb xmit path + - LP: #1309007 + * eth: fec: Fix lost promiscuous mode after reconnecting cable + - LP: #1309007 + * ipv6: Avoid unnecessary temporary addresses being generated + - LP: #1309007 + * ipv6: ip6_append_data_mtu do not handle the mtu of the second fragment + properly + - LP: #1309007 + * net: cdc_ncm: fix control message ordering + - LP: #1309007 + * vxlan: fix potential NULL dereference in arp_reduce() + - LP: #1309007 + * vxlan: fix nonfunctional neigh_reduce() + - LP: #1309007 + * tcp: syncookies: do not use getnstimeofday() + - LP: #1309007 + * rtnetlink: fix fdb notification flags + - LP: #1309007 + * ipmr: fix mfc notification flags + - LP: #1309007 + * ip6mr: fix mfc notification flags + - LP: #1309007 + * net: micrel : ks8851-ml: add vdd-supply support + - LP: #1309007 + * netpoll: fix the skb check in pkt_is_ns + - LP: #1309007 + * tipc: fix spinlock recursion bug for failed subscriptions + - LP: #1309007 + * ip_tunnel: Fix dst ref-count. + - LP: #1309007 + * tg3: Do not include vlan acceleration features in vlan_features + - LP: #1309007 + * virtio-net: correct error handling of virtqueue_kick() + - LP: #1309007 + * usbnet: include wait queue head in device structure + - LP: #1309007 + * vlan: Set hard_header_len according to available acceleration + - LP: #1309007 + * vhost: fix total length when packets are too short + - LP: #1309007 + - CVE-2014-0077 + * tcp: fix get_timewait4_sock() delay computation on 64bit + - LP: #1309007 + * xen-netback: remove pointless clause from if statement + - LP: #1309007 + * ipv6: some ipv6 statistic counters failed to disable bh + - LP: #1309007 + * netlink: don't compare the nul-termination in nla_strcmp + - LP: #1309007 + * xen-netback: disable rogue vif in kthread context + - LP: #1309007 + * Call efx_set_channels() before efx->type->dimension_resources() + - LP: #1309007 + * net: vxlan: fix crash when interface is created with no group + - LP: #1309007 + * isdnloop: Validate NUL-terminated strings from user. + - LP: #1309007 + * isdnloop: several buffer overflows + - LP: #1309007 + * powernow-k6: disable cache when changing frequency + - LP: #1309007 + * powernow-k6: correctly initialize default parameters + - LP: #1309007 + * powernow-k6: reorder frequencies + - LP: #1309007 + * ARC: [nsimosci] Change .dts to use generic 8250 UART + - LP: #1309007 + * ARC: [nsimosci] Unbork console + - LP: #1309007 + * futex: Allow architectures to skip futex_atomic_cmpxchg_inatomic() test + - LP: #1309007 + * m68k: Skip futex_atomic_cmpxchg_inatomic() test + - LP: #1309007 + * crypto: ghash-clmulni-intel - use C implementation for setkey() + - LP: #1309007 + * Linux 3.13.10 + - LP: #1309007 + * cpufreq: powernv: cpufreq driver for powernv platform + - LP: #1309576 + * cpufreq: powernv: Use cpufreq_frequency_table.driver_data to store + pstate ids + - LP: #1309576 + * cpufreq: powernv: Select CPUFreq related Kconfig options for powernv + - LP: #1309576 + * support Thinkpad X1 Carbon 2nd generation's adaptive keyboard + - LP: #1309609 + * save and restore adaptive keyboard mode for suspend and,resume + - LP: #1309609 + * user namespace: fix incorrect memory barriers + - LP: #1311683 + * Char: ipmi_bt_sm, fix infinite loop + - LP: #1311683 + * x86, hyperv: Bypass the timer_irq_works() check + - LP: #1311683 + * x86: Adjust irq remapping quirk for older revisions of 5500/5520 + chipsets + - LP: #1311683 + * PCI: designware: Fix RC BAR to be single 64-bit non-prefetchable memory + BAR + - LP: #1311683 + * PCI: designware: Fix iATU programming for cfg1, io and mem viewport + - LP: #1311683 + * ACPI / button: Add ACPI Button event via netlink routine + - LP: #1311683 + * PCI: Enable INTx in pci_reenable_device() only when MSI/MSI-X not + enabled + - LP: #1311683 + * staging: comedi: 8255_pci: initialize MITE data window + - LP: #1311683 + * tty: Set correct tty name in 'active' sysfs attribute + - LP: #1311683 + * tty: Fix low_latency BUG + - LP: #1311683 + * SCSI: sd: don't fail if the device doesn't recognize SYNCHRONIZE CACHE + - LP: #1311683 + * pid_namespace: pidns_get() should check task_active_pid_ns() != NULL + - LP: #1311683 + * Bluetooth: Fix removing Long Term Key + - LP: #1311683 + * ima: restore the original behavior for sending data with ima template + - LP: #1311683 + * backing_dev: fix hung task on sync + - LP: #1311683 + * bdi: avoid oops on device removal + - LP: #1311683 + * xfs: fix directory hash ordering bug + - LP: #1311683 + * Btrfs: skip submitting barrier for missing device + - LP: #1311683 + * Btrfs: fix deadlock with nested trans handles + - LP: #1311683 + * ext4: fix error return from ext4_ext_handle_uninitialized_extents() + - LP: #1311683 + * ext4: fix partial cluster handling for bigalloc file systems + - LP: #1311683 + * ext4: fix premature freeing of partial clusters split across leaf + blocks + - LP: #1311683 + * jffs2: Fix segmentation fault found in stress test + - LP: #1311683 + * jffs2: Fix crash due to truncation of csize + - LP: #1311683 + * jffs2: avoid soft-lockup in jffs2_reserve_space_gc() + - LP: #1311683 + * jffs2: remove from wait queue after schedule() + - LP: #1311683 + * sparc32: fix build failure for arch_jump_label_transform + - LP: #1311683 + * sparc64: don't treat 64-bit syscall return codes as 32-bit + - LP: #1311683 + * sparc64: Make sure %pil interrupts are enabled during hypervisor yield. + - LP: #1311683 + * wait: fix reparent_leader() vs EXIT_DEAD->EXIT_ZOMBIE race + - LP: #1311683 + * exit: call disassociate_ctty() before exit_task_namespaces() + - LP: #1311683 + * Linux 3.13.11 + - LP: #1311683 + * powerpc/le: Enable RTAS events support + - LP: #1312230 + * net: ipv4: current group_info should be put after using. + - CVE-2014-2851 + * powerpc/relocate fix relocate processing in LE mode + - LP: #1312783 + + -- Joseph Salisbury Mon, 28 Apr 2014 15:02:45 -0400 + +linux (3.13.0-24.47) trusty; urgency=low + + [ Peter Hurley ] + + * n_tty: Fix n_tty_write crash when echoing in raw mode + + -- Brad Figg Wed, 30 Apr 2014 15:08:31 -0700 + +linux (3.13.0-24.46) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] d-i -- add nvme devices to block-modules udeb + - LP: #1303710 + + [ Paolo Pisati ] + + * [Config] build vexpress a9 dtb + - LP: #1303657 + * [Config] disable HVC_DCC + - LP: #1303657 + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1305158 + * rebase to v3.13.9 + * CONFIG_RTLBTCOEXIST=m + - LP: #1296591 + + [ Upstream Kernel Changes ] + + * HID: Bluetooth: hidp: make sure input buffers are big enough + - LP: #1252874 + * ACPI / video: Add systems that should favour native backlight interface + - LP: #1303419 + * rds: prevent dereference of a NULL device in rds_iw_laddr_check + - LP: #1302222 + - CVE-2014-2678 + * x86/efi: Fix 32-bit fallout + - LP: #1301590 + * drm/nouveau/devinit: tidy up the subdev class definition + - LP: #1158689 + * drm/nouveau/device: provide a way for devinit to mark engines as + disabled + - LP: #1158689 + * drm/nv50-/devinit: prevent use of engines marked as disabled by + hw/vbios + - LP: #1158689 + * rtlwifi: btcoexist: Add new mini driver + - LP: #1296591 + * rtlwifi: Prepare existing drivers for new driver + - LP: #1296591 + * rtlwifi: add MSI interrupts mode support + - LP: #1296591 + * rtlwifi: rtl8188ee: enable MSI interrupts mode + - LP: #1296591 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.9 + + -- Tim Gardner Fri, 04 Apr 2014 09:26:27 -0400 + +linux (3.13.0-23.45) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1302083 + + [ Emil Goode ] + + * SAUCE: (no-up) brcmsmac: fix deadlock on missing firmware + - LP: #1300416 + + [ Moni Shoua ] + + * SAUCE: (no-up) IB/core: Don't resolve passive side RoCE L2 address in cma req handler + Merged at the request of Mellanox engineers. It shold be making its way upstream. + http://www.spinics.net/lists/linux-rdma/msg19417.html + + [ Or Gerlitz ] + + * [Config] CONFIG_INFINIBAND_USNIC=m + + [ Tim Gardner ] + + * [Config] CONFIG_MLX4_EN_VXLAN=y + + [ Upstream Kernel Changes ] + + * This set of patches essentially updates the infiniband subsystem to 3.14 plus some + bug fixes from 3.15. The initial set of commits were determined using: + + git log --pretty=oneline --reverse v3.13..cd6362befe4cc7bf589a5236d2a780af2d47bcc9 -- drivers/infiniband drivers/net/ethernet/mellanox include/linux/mlx* include/rdma + + A few tree-wide patches were dropped since they were not essential. + This is the list of commits that were actually applied. As you can see there were + a few back ports, but all were simple context collisions and easily resolved. + + (cherry picked from commit 7c6d74d23a33a946bcf08ba2d3e52d31943b7342) + (cherry picked from commit 7b25d81b7ff03bb1893a9c7f97797b891a772deb) + (cherry picked from commit 73e74ab4e0d030f28d640507998156a22d4211f8) + (cherry picked from commit eb17711bc1d6611e934af5b6dabc225936084128) + (cherry picked from commit 8e1a28e8e6797449dfdfa4739002d1e5939355a8) + (cherry picked from commit 84c864038d6d991be81344fc3168ec2c2f7a8d06) + (cherry picked from commit d03a68f8217ea03492e4f7928db222dc6544792a) + (cherry picked from commit be902ab122fcc59ba6a8588e820c31861d75a5a4) + (cherry picked from commit e4b59a1cb6f8feb03f356b0abfd20451f05d7470) + (cherry picked from commit 0276a330617a0cf380f09e5065299078d3d45886) + (cherry picked from commit 982290a7fe36e528af292d3e3b61939b1900bfc6) + (cherry picked from commit c5266d40b0a26546d0ebedb44dd4145088b85cb8) + (cherry picked from commit 7ffdf726cfe0d188907bdbb0e7729fb35a69c219) + (cherry picked from commit 837052d0ccc5a789a578f8b628ba154b63bd51ea) + (cherry picked from commit c0623e587d869b4b18e077d64a8524ea364b5b77) + (cherry picked from commit ad7d4eaed995d76fb24a18e202fdf5072197ff0a) + (cherry picked from commit 2156d9a8ac0202f0158d407063cb850afffd3f56) + (cherry picked from commit b912b2f8fc71df4c3ffa7a9fe2c2227e8bcdaa07) + (cherry picked from commit 9ba75fb0c4b92416b94640b5a043c323a457f14a) + (cherry picked from commit 74b9c3ea847f060c784e86453f1ad77dd05a7a8f) + (cherry picked from commit e6a767582942d6fd9da0ddea673f5a7017a365c7) + (cherry picked from commit fe5e8a1acc7fd877b6706053cf88c418c33fe7a3) + (cherry picked from commit be8348df6efac6b602f2ad3210139bccf0dbe3d7) + (cherry picked from commit e3cf00d0a87f025db5855a43a67c67a41fa79fef) + (cherry picked from commit 8192d4acb5c5376c0f6756f2106ab243036c8c7d) + (cherry picked from commit 301a0dd68e5ddd22d992a58f466b621987d9df3b) + (cherry picked from commit 2183b990b67b761f81c68a18f60df028e080cf05) + (cherry picked from commit 8af94ac66a4d53a96278ecbb9ef2e8592f0d9ba3) + (cherry picked from commit 256d6a6ac52ee02e897cec88ecc96c3ae7f9cb88) + (cherry picked from commit 60b215e8b267f911751a043de63181dab1b69706) + (cherry picked from commit b85caf479b577f000067002259539ad4341d4530) + (cherry picked from commit 3f92bed3d6c073f41efc0777ecd3442aa1f03d20) + (cherry picked from commit 6214105460842759020bdd7f4dbb50afa1be9d17) + (cherry picked from commit c7845bcafe4d2ecd5c479fa64d1b425c21dde17c) + (cherry picked from commit e45e614e4015a489d2f8013eaed45d498d884e86) + (cherry picked from commit 248567f79304b953ea492fb92ade097b62ed09b2) + (cherry picked from commit 6a54d9f9a04ed35e6615a47974c1ef02ff3a62cb) + (cherry picked from commit c5f855e08a97edc107c4a3b73809ed629c1dcac1) + (cherry picked from commit 9f637f7936025aef57f247b11036bad18bb87c06) + (cherry picked from commit c30392ab5bb536fef268c22804dafded15170d14) + (cherry picked from commit 3108bccb3d9afbd32931d775f5dd5ee157eaa5a9) + (cherry picked from commit d9fe40916387bab884e458c99399c149b033506c) + (cherry picked from commit 24e42754f676d34e5c26d6b7b30f36df8004ec08) + (cherry picked from commit c1be5232d21de68f46637e617225b9b7c586451a) + (cherry picked from commit 90f1d1b41b70474bf73d07d4300196901cd81718) + (cherry picked from commit 240ae00e4d834e387f4f09e236130f520e357a70) + (back ported from commit 4de6580360867d44adecb2d05febed1c8d186c82) + (cherry picked from commit 0a9b7d59d5a8e2b97406a29a8a807bbc5ce7092e) + (cherry picked from commit a37a1a428431d3e7e9f53530b5c56ff7867bd487) + (cherry picked from commit c1c98501121eefa0888a42566ec7233a1626f678) + (back ported from commit dd5f03beb4f76ae65d76d8c22a8815e424fc607c) + (cherry picked from commit c9218a9e677856d6647ea82d821f22ccbffc988c) + (cherry picked from commit f088cbb8d8547a89af258a3223657f9a69b811e4) + (cherry picked from commit 4942c0b4b64478ff45c3bbf4d40aebd66de0bcc5) + (cherry picked from commit 2d97436f5b06217beb6c91a0cd0ae0f0d79b61cc) + (cherry picked from commit 5db5765e255de4072eb0e35facfeafce53af001b) + (cherry picked from commit 61f78268936e781a104b4ac06b7e47d760800c40) + (cherry picked from commit 6dcebe614c667fca73aaf0cfbd1e70bc9179538e) + (cherry picked from commit d1db47c5eed89a1c8b60e780aeadd870bb0d3894) + (cherry picked from commit af2e2e35a23e4aeecfe4332a7140c81e0f09b7e3) + (cherry picked from commit 7b85627b9f02f9b0fb2ef5f021807f4251135857) + (back ported from commit d487ee77740ccf79d7dc1935d4daa77887283028) + (cherry picked from commit 297e0dad720664dad44baa2cdd13f871979fb58c) + (cherry picked from commit eb6ab13267be87dcad3e611500b7cb404ed4479c) + (cherry picked from commit a3a5a82627492c8947d8866ddf421e9248088466) + (cherry picked from commit dc87a90f5d61d7f01cfb63d92d5eaa27718e5b19) + (cherry picked from commit 9bd626e79df67b2ba3b0c91a4640ab7bca1af04d) + (cherry picked from commit 40aca6ffcac57dd9c65877a59a8bbb210c4691ca) + (cherry picked from commit 37721d8501a9a1fbe87527c24d127a914b29fd7f) + (cherry picked from commit ed4c54e5b4baf55a7a67a80fa766334855c94854) + (cherry picked from commit 2f85d24e604c1532723c4b5791816b533baed2c3) + (cherry picked from commit 990acea616e99355703b503c1e50fb9c7ddff6b9) + (cherry picked from commit f282651de676d10e395bc7923f0087fbbba12ed7) + (cherry picked from commit 31ab8acbf6618c89fec77f7706df7daaa319feb5) + (cherry picked from commit 9392fa06411cf93885c4cafc8058085d98f52fec) + (cherry picked from commit 27cdef637c25705b433d5c4deeef4cf8dcb75d6a) + (cherry picked from commit 8ce96afa8239f13bdf5ab35839bac46c103bbedc) + (cherry picked from commit 6cd28f044b47aeeba91807d97d6f3ea5a048e88d) + (cherry picked from commit 5462eddd7a78131ccb514d52473625d99769215e) + (cherry picked from commit 05633102d85b50f35325dfbedafcedd6c5b3264c) + (cherry picked from commit 437708c44395a11e474fb33b4fd7f29483118e51) + (cherry picked from commit d9d5713ca628dc211d8b4a1da5fb9e0cfe592b92) + (cherry picked from commit a384b20e417ae0f5f1f359600b4bdcc34265b256) + (cherry picked from commit 298589b1cb626adf4beba6dd8e3cd4b64e8799be) + (cherry picked from commit e08a8761d89b7625144c3fbf0ff9643159135c96) + (cherry picked from commit 0b6e81b91070bdbe0defb9101384ebb26835e401) + (cherry picked from commit 042b9adae899e1b497282d92205d3fef42d5ca8d) + (cherry picked from commit ada388f7afad1e2e87acbfe30600fdaff9bd6327) + (cherry picked from commit 3bdb31f688276505ede23280885948e934304674) + (cherry picked from commit bde51583f49bd87e452e9504d489926638046b11) + (cherry picked from commit db81a5c374b5bd650c5e6ae85d026709751db103) + (cherry picked from commit 05bdb2ab6b09f2306f0afe0f60f4b9abffa7aba4) + (cherry picked from commit 9e9c47d07d447e09a66ee528c3ebad9ba359af6a) + (cherry picked from commit 1bde6e301cf6217da9238086c958f532b16e504d) + (cherry picked from commit 8c8a49148b95c4d7c5f58a6866a30ea02485d7a3) + (cherry picked from commit 57761d8df8efc7cc1227f9bc22e0dda01b0dd91b) + (cherry picked from commit 676687c69697d2081d25afd14ee90937d1fb0c8e) + (cherry picked from commit 9e65dc371b5c8d7476c81353137efc13cc1bdabd) + (cherry picked from commit 78c0f98cc9dd46824fa66f35f14ea24ba733d145) + (cherry picked from commit 1a4c3a3dc5fdeef2a7bdf4ac7d81df58c3c0a51e) + (cherry picked from commit d07875bd0d1517185534b5f9eef469426ba42cb9) + (cherry picked from commit ab576627c8f97c08297d81537be17df161171923) + (cherry picked from commit a80e21b3b2d151d79bb9be42334ab10d40195324) + (cherry picked from commit 9d8abf45944e4f1c18a04070fc3ed2f3ffcbbcb6) + (cherry picked from commit 4196670be786d529ab7f6c18f5077141ce1b787e) + (cherry picked from commit acc4fccf4eff5b29e545995b75de77e60ea44aae) + (cherry picked from commit 4ce5a5744a2f5479e58c6788cbe3987b8071b62e) + (cherry picked from commit ddf8bd349115c2bc85a62e3d94018c9976ac72f7) + (cherry picked from commit 5071456fe244e409adcda7226e5f0b5d1b879fd3) + (cherry picked from commit ad4885d279b63c65347220236d07669a2f59634b) + (cherry picked from commit b4a26a27287a7f81933ba016aeed6c69dd155323) + (cherry picked from commit 0f0132001fd239bb67c1f68436b95cc79de89736) + (cherry picked from commit 6ecde51dd7894ffe2f959cca1fea3ea2b9ee2394) + (cherry picked from commit 0861565f501ce3fcea9394d4b98c02b1f6de6b9e) + (cherry picked from commit f809309a251a13bd97cc189c3fa428782aab9716) + (cherry picked from commit 7d9eacf9457efc6b614665e1095336c11ad83f0d) + (cherry picked from commit fd8b48b22a2b7cdf21f15b01cae379e6159a7eea) + (cherry picked from commit a61d93d92f5c9533898098abb5f187840900aeb5) + (cherry picked from commit 09de3f1313a30d8a22e488c9a5b96a9560cae96d) + (cherry picked from commit 99932d4fc03a13bb3e94938fe25458fabc8f2fc3) + (cherry picked from commit 169a1d85d084edeb0736ad80fe439639ac938dcd) + (cherry picked from commit 367d56f7b4d5ce61e883c64f81786c7a3ae88eea) + (cherry picked from commit 57352ef4f5f19969a50d42e84b274287993b576f) + (cherry picked from commit 97989356af0ec8b1b1658d804892abb354127330) + (cherry picked from commit 56cb456746a15c1025a178466492ca4c373b1a63) + (cherry picked from commit 2a2083f7f3568c0192daa6ac0e6fa35d953f47bd) + (cherry picked from commit 7855bff42ea9938a0853321256f4c8ce3628aa73) + (cherry picked from commit de123268300fd33b7f7668fda3264059daffa6ef) + (cherry picked from commit 299603e8370a93dd5d8e8d800f0dff1ce2c53d36) + (cherry picked from commit bf5a755f5e9186406bbf50f4087100af5bd68e40) + (cherry picked from commit 600adc18eba823f9fd8ed5fec8b04f11dddf3884) + (back ported from commit b582ef0990d457f7ce8ccf827af51a575ca0b4a6) + (cherry picked from commit e27a2f839598e48bb345f9fc86d4d54c3944db50) + (back ported from commit dc01e7d3447793fd9e4090aa9d50c549848b5a18) + (cherry picked from commit b5aaab12b2b4bc4acab7384c17a87f3406e5047d) + (cherry picked from commit 920a0fde5a32fd40b4d3c94ad72f7fc7039db8e3) + (cherry picked from commit 438e38fadca2f6e57eeecc08326c8a95758594d4) + (cherry picked from commit 97a5221f56bad2e1c7e8ab55da4ac4748ef59c64) + (cherry picked from commit 02512482321c531df4abf73943529f8b44d869e2) + (cherry picked from commit bb2146bc883e86b835e30644757a6d4a649a7ce8) + (back ported from commit ca9f9f703950e5cb300526549b4f1b0a6605a5c5) + (cherry picked from commit fd8daa45f2bd9b876e0dbb9503ccc5a5252844f2) + (cherry picked from commit b97b33a3df0439401f80f041eda507d4fffa0dbf) + (cherry picked from commit 93591aaa62f89820f4ae0558f01eaf9a359738da) + (cherry picked from commit 15bffdffccb3204eb1e993f60eee65c439a03136) + (cherry picked from commit 9813337a4b16ea5b1701b1d00f7e410f5decdfa5) + (cherry picked from commit 313c2d375b1c9b648d9d4b96ec1b8185ac6a78c5) + (cherry picked from commit 28d222bbaa5122fb4bb0e607e39ab149a010e587) + (cherry picked from commit ec5709403e6893acb4f7ca40514ebd29c3116836) + (cherry picked from commit 9717218bb2982f5f214d84473c70542f1e42bfd7) + (cherry picked from commit d0ceebd7508d5bf6e81367640959aef7e0de4947) + (cherry picked from commit c120e9e03090b4f9578ca38ef4250ff3805b6e3f) + (cherry picked from commit 6ee51a4e866bbb0921180b457ed16cd172859346) + (cherry picked from commit 9cd593529c8652785bc9962acc79b6b176741f99) + (cherry picked from commit b6ffaeffaea4d92f05f5ba1ef54df407cb7c8517) + (cherry picked from commit 2f5bb473681b88819a9de28ac3a47e7737815a92) + (cherry picked from commit 5ea8bbfc49291b7e23161fe4de0bf3e4a4e34b18) + (cherry picked from commit ceb5433b3a54979216d794e45147d25c24c94999) + (cherry picked from commit aa9a2d51a3e70b15a898bec7dde3ce5726fec641) + (cherry picked from commit e81f44b66b456a7dcfbdeffeb355458cd6a58973) + (cherry picked from commit 7a2cea2aaae2d5eb5c00c49c52180c7c2c66130a) + (cherry picked from commit 05eb23893c2cf9502a9cec0c32e7f1d1ed2895c8) + (cherry picked from commit 38be0a347c91133843474e12baacd252d0fd1c30) + (cherry picked from commit 82373701be26b893eaf7372db0af84235a51998a) + (cherry picked from commit 1ab95d37bcc3ff2d69e3871e4f056bab7aed0b85) + (cherry picked from commit f74462acf8f390528c8b7937f227c6c90d017f3b) + (cherry picked from commit 449fc48866f7d84b0d9a19201de18a4dd4d3488c) + (cherry picked from commit dd41cc3bb90efd455df514899a5d3cf245182eb1) + (cherry picked from commit e471b40321a94f07d13b8a9e4b064885cf08835d) + (cherry picked from commit bfd2793c9559ae73ae021797f1d4b097c27f24be) + (cherry picked from commit b74757944d69f8cd7de5284fc7e8649d965361ab) + (cherry picked from commit d18f141a1a7cfa5ffad8433e43062b05a8d1b82a) + (cherry picked from commit 1b136de120dda625109f2afe1e3d04e256be9ec1) + (cherry picked from commit a66132f3eb514f42c49a3e8f57aab2ccd0360f06) + + * mlx4_core: Roll back round robin bitmap allocation commit for CQs, SRQs, and MPTs + * net/mlx4_core: Remove zeroed out of explicit QUERY_FUNC_CAP fields + * net/mlx4_core: Rename QUERY_FUNC_CAP fields + * net/mlx4_core: Introduce nic_info new flag in QUERY_FUNC_CAP + * net/mlx4_core: Expose physical port id as PF/VF capability + * net/mlx4_en: Implement ndo_get_phys_port_id + * net/mlx4_en: Configure the XPS queue mapping on driver load + * net/mlx4_core: Set CQE/EQE size to 64B by default + * net/mlx4_en: Ignore irrelevant hypervisor events + * net/mlx4_en: Add NAPI support for transmit side + * net/mlx4_core: Check port number for validity before accessing data + * infiniband: slight optimization of addr compare + * net/mlx4_core: Add basic support for TCP/IP offloads under tunneling + * net/mlx4_en: Add netdev support for TCP/IP offloads of vxlan tunneling + * net: mlx4: slight optimization of addr compare + * mlx4_en: Add PTP hardware clock + * mlx4_en: Only cycle port if HW timestamp config changes + * net/mlx4_core: Warn if device doesn't have enough PCI bandwidth + * net/mlx4_en: fix error return code in mlx4_en_get_qp() + * mlx4_en: Select PTP_1588_CLOCK + * net/mlx4_en: call gro handler for encapsulated frames + * RDMA/ocrdma: Fix AV_VALID bit position + * RDMA/ocrdma: Fix OCRDMA_GEN2_FAMILY macro definition + * IB/usnic: Add Cisco VIC low-level hardware driver + * IB/usnic: Change WARN_ON to lockdep_assert_held + * IB/usnic: Add struct usnic_transport_spec + * IB/usnic: Push all forwarding state to usnic_fwd.[hc] + * IB/usnic: Port over main.c and verbs.c to the usnic_fwd.h + * IB/usnic: Port over usnic_ib_qp_grp.[hc] to new usnic_fwd.h + * IB/usnic: Port over sysfs to new usnic_fwd.h + * IB/usnic: Update ABI and Version file for UDP support + * IB/usnic: Add UDP support to usnic_fwd.[hc] + * IB:usnic: Add UDP support to usnic_transport.[hc] + * IB/usnic: Add UDP support in u*verbs.c, u*main.c and u*util.h + * IB/usnic: Add UDP support in usnic_ib_qp_grp.[hc] + * IB/core: Add RDMA_TRANSPORT_USNIC_UDP + * IB/usnic: Remove superflous parentheses + * IB/usnic: Use for_each_sg instead of a for-loop + * IB/usnic: Expose flows via debugfs + * IB/usnic: Fix typo "Ignorning" -> "Ignoring" + * IB/usnic: Append documentation to usnic_transport.h and cleanup + * IB/mlx5: Remove unused code in mr.c + * mlx5_core: Remove dead code + * IB/mlx5: Fix micro UAR allocator + * IB/core: Add flow steering support for IPoIB UD traffic + * IB/core: Add support for IB L2 device-managed steering + * mlx4_core: Add support for steerable IB UD QPs + * IB/mlx4: Enable device-managed steering support for IB ports too + * IB/mlx4: Add mechanism to support flow steering over IB links + * IB/mlx4: Add support for steerable IB UD QPs + * IB/core: Ethernet L2 attributes in verbs/cm structures + * net/mlx4_core: clean up cq_res_start_move_to() + * net/mlx4_core: clean up srq_res_start_move_to() + * IB/usnic: Fix endianness-related warnings + * IB/usnic: Add dependency on CONFIG_INET + * IB/core: Add support for RDMA_NODE_USNIC_UDP + * IB/usnic: Advertise usNIC devices as RDMA_NODE_USNIC_UDP + * IB/usnic: Set userspace/kernel ABI ver to 4 + * IB/usnic: Remove unused variable in usnic_debugfs_exit() + * IB/mlx4: Fix error return code + * IB/cma: IBoE (RoCE) IP-based GID addressing + * IB/mlx4: Use IBoE (RoCE) IP based GIDs in the port GID table + * IB/mlx4: Handle Ethernet L2 parameters for IP based GID addressing + * IB/isert: seperate connection protection domains and dma MRs + * IB/isert: Avoid frwr notation, user fastreg + * IB/isert: Move fastreg descriptor creation to a function + * IB/isert: pass scatterlist instead of cmd to fast_reg_mr routine + * RDMA/ocrdma: Handle Ethernet L2 parameters for IP based GID addressing + * RDMA/ocrdma: Populate GID table with IP based gids + * IB/core: Resolve Ethernet L2 addresses when modifying QP + * IB/core: Make ib_addr a core IB module + * IB/cm: Fix missing unlock on error in cm_init_qp_rtr_attr() + * IB/mlx4: Add dependency INET + * RDMA/ocrdma: Move ocrdma_inetaddr_event outside of "#if CONFIG_IPV6" + * RDMA/ocrdma: Add dependency on INET + * IB/mlx4: Use IS_ENABLED(CONFIG_IPV6) + * IB/usnic: Use GFP_ATOMIC under spinlock + * net/mlx4_core: Remove unnecessary validation for port number + * RDMA/cma: Handle global/non-linklocal IPv6 addresses in cma_check_linklocal() + * IB/core: Fix unused variable warning + * IPoIB: Report operstate consistently when brought up without a link + * RDMA/amso1100: Add check if cache memory was allocated before freeing it + * IB/usnic: Remove unused includes of + * RDMA/cxgb4: Fix gcc warning on 32-bit arch + * mlx5_core: Fix out arg size in access_register command + * IB/mlx5: Clear out struct before create QP command + * mlx5_core: Use mlx5 core style warning + * IB/mlx5: Make sure doorbell record is visible before doorbell + * IB/mlx5: Implement modify CQ + * IB/mlx5: Add support for resize CQ + * mlx5_core: Improve debugfs readability + * mlx5_core: Fix PowerPC support + * IB/mlx5: Allow creation of QPs with zero-length work queues + * IB/mlx5: Abort driver cleanup if teardown hca fails + * IB/mlx5: Remove old field for create mkey mailbox + * IB/mlx5: Verify reserved fields are cleared + * iscsi-target: Convert gfp_t parameter to task state bitmask + * IB/mlx5: Fix RC transport send queue overhead computation + * IB/mlx5: Fix binary compatibility with libmlx5 + * IB/mlx5: Don't set "block multicast loopback" capability + * RDMA/nes: Fix error return code + * RDMA/amso1100: Fix error return code + * iser-target: Fix leak on failure in isert_conn_create_fastreg_pool + * IB/srpt: replace strict_strtoul() with kstrtoul() + * IB/mlx4: Don't allocate range of steerable UD QPs for Ethernet-only device + * IB/mlx4: Make sure GID index 0 is always occupied + * IB/mlx4: Move rtnl locking to the right place + * IB/mlx4: Do IBoE locking earlier when initializing the GID table + * IB/mlx4: Do IBoE GID table resets per-port + * IB/mlx4: Build the port IBoE GID table properly under bonding + * IB: Report using RoCE IP based gids in port caps + * RDMA/cxgb4: Add missing neigh_release in LE-Workaround path + * mlx5: Add include of because of kzalloc()/kfree() use + * IB/mlx5: Remove dependency on X86 + * IB/usnic: Fix smatch endianness error + * IB/iser: Avoid dereferencing iscsi_iser conn object when not bound to iser connection + * IB/iser: Fix use after free in iser_snd_completion() + * RDMA/ocrdma: Fix traffic class shift + * RDMA/ocrdma: Fix load time panic during GID table init + * netdevice: add queue selection fallback handler for ndo_select_queue + * net,IB/mlx: Bump all Mellanox driver versions + * net/mlx4: Support shutdown() interface + * net/mlx4_core: Fix memory access error in mlx4_QUERY_DEV_CAP_wrapper() + * net/mlx4_core: mlx4_init_slave() shouldn't access comm channel before PF is ready + * net/mlx4_core: Fix wrong dump of the vxlan offloads device capability + * net/mlx4_en: Handle vxlan steering rules for mac address changes + * net/mlx4_core: Load the IB driver when the device supports IBoE + * net/mlx4_en: Deregister multicast vxlan steering rules when going down + * net-gro: Prepare GRO stack for the upcoming tunneling support + * net-gre-gro: Add GRE support to the GRO stack + * net: gro: change GRO overflow strategy + * net: Add GRO support for UDP encapsulating protocols + * net: Export gro_find_by_type helpers + * net: Add GRO support for vxlan traffic + * net/udp_offload: Handle static checker complaints + * net/ipv4: Use non-atomic allocation of udp offloads structure instance + * net/vxlan: Go over all candidate streams for GRO matching + * gre_offload: statically build GRE offloading support + * net/mlx4_core: pass pci_device_id.driver_data to __mlx4_init_one during reset + * net/mlx4: Set number of RX rings in a utility function + * net/mlx4: Fix limiting number of IRQ's instead of RSS queues + * net/mlx4_en: Fix bad use of dev_id + * net/mlx4_en: Fix UP limit in ieee_ets->prio_tc + * net/mlx4_en: Verify mlx4_en module parameters + * net/mlx4_en: Pad ethernet packets smaller than 17 bytes + * net/mlx4_en: Move queue stopped/waked counters to be per ring + * net/mlx4: Replace mlx4_en_mac_to_u64() with mlx4_mac_to_u64() + * net/mlx4_en: Fix selftest failing on non 10G link speed + * net/mlx4_core: Fix sparse warning + * net/mlx4_en: Use union for BlueFlame WQE + * net/mlx4_en: Change Connect-X description in kconfig + * net/mlx4_en: mlx4_en_verify_params() can be static + * IB/mlx5_core: remove unreachable function call in module init + * mlx4: Adjust QP1 multiplexing for RoCE/SRIOV + * mlx4_core: For RoCE, allow slaves to set the GID entry at that slave's index + * mlx4: In RoCE allow guests to have multiple GIDS + * mlx4: Add ref counting to port MAC table for RoCE + * mlx4: Implement IP based gids support for RoCE/SRIOV + * mlx4_ib: Fix SIDR support of for UD QPs under SRIOV/RoCE + * mlx4: Activate RoCE/SRIOV + * mlx4: Call dev_kfree_skby_any instead of dev_kfree_skb. + * cxgb4/iw_cxgb4: Treat CPL_ERR_KEEPALV_NEG_ADVICE as negative advice + * cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes + * mlx4: Don't receive packets when the napi budget == 0 + * IB/mlx4_ib: Adapt code to use caps.num_ports instead of a constant + * net/mlx4: Add data structures to support N-Ports per VF + * net/mlx4: Add utils for N-Port VFs + * net/mlx4: Adapt code for N-Port VF + * net/mlx4: Adapt num_vfs/probed_vf params for single port VF + * mlx4: Use actual number of PCI functions (PF + VFs) for alias GUID logic + * RDMA/cxgb4: set error code on kmalloc() failure + * net/mlx4: USe one wrapper that returns -EPERM + * mlx4: Add support for CONFIG_DEV command + * net/mlx4: Implement vxlan ndo calls + * net/mlx4: Set proper build dependancy with vxlan + + [ Wen-chien Jesse Sung ] + + * SAUCE: Bluetooth: Give restart command more time to complete its job + - LP: #1301908 + + -- Tim Gardner Thu, 03 Apr 2014 06:06:15 -0600 + +linux (3.13.0-22.44) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1301562 + + [ dann frazier ] + + * [Config] enable linux-tools on arm64 + https://lists.ubuntu.com/archives/kernel-team/2014-April/041332.html + + [ Greg Kurz ] + + * SAUCE: powerpc/le: Big endian arguments for ppc_rtas() + - LP: #1289518 + + [ Mahesh Salgaonkar ] + + * SAUCE: powerpc/book3s: Fix CFAR clobbering issue in machine check + handler. + - LP: #1301424 + * SAUCE: powerpc/book3s: Recover from MC in sapphire on SCOM read via + MMIO. + - LP: #1301424 + * SAUCE: powerpc/book3s: Fix mc_recoverable_range buffer overrun issue. + - LP: #1301424 + + [ Paolo Pisati ] + + * [Config] armhf: USB_STORAGE=y + https://lists.ubuntu.com/archives/kernel-team/2014-April/041349.html + + [ Stefan Bader ] + + * SAUCE: kvm: Force preempt folding in kvm on i386 + - LP: #1268906 + + [ Tim Gardner ] + + * SAUCE: Drop lttng in favor of lttng-modules + The kernel version was down rev on an rc release. + + [ Tomas Winkler ] + + * SAUCE: (no-up) mei: me: do not load the driver if the FW doesn't + support MEI interface + - LP: #1301118 + + [ Upstream Kernel Changes ] + + * drm/i915: Deprecated UMS support + - LP: #1284816 + * powerpc/book3s: Split the common exception prolog logic into two + section. + - LP: #1301424 + * powerpc/book3s: Introduce exclusive emergency stack for machine check + exception. + - LP: #1301424 + * powerpc/book3s: handle machine check in Linux host. + - LP: #1301424 + * powerpc/book3s: Return from interrupt if coming from evil context. + - LP: #1301424 + * powerpc/book3s: Introduce a early machine check hook in cpu_spec. + - LP: #1301424 + * powerpc/book3s: Add flush_tlb operation in cpu_spec. + - LP: #1301424 + * powerpc/book3s: Flush SLB/TLBs if we get SLB/TLB machine check errors + on power7. + - LP: #1301424 + * powerpc/book3s: Flush SLB/TLBs if we get SLB/TLB machine check errors + on power8. + - LP: #1301424 + * powerpc/book3s: Decode and save machine check event. + - LP: #1301424 + * powerpc/book3s: Queue up and process delayed MCE events. + - LP: #1301424 + * powerpc/powernv: Remove machine check handling in OPAL. + - LP: #1301424 + * powerpc/powernv: Machine check exception handling. + - LP: #1301424 + * powerpc: Fix "attempt to move .org backwards" error + - LP: #1301424 + * powerpc: Fix endian issues in power7/8 machine check handler + - LP: #1301424 + * Move precessing of MCE queued event out from syscall exit path. + - LP: #1301424 + + -- Andy Whitcroft Wed, 02 Apr 2014 15:58:48 +0100 + +linux (3.13.0-21.43) trusty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: kvm: BIOS disabled kvm support should be a warning + - LP: #1300247 + * SAUCE: nouveau: missing outputs should be warnings + - LP: #1300244 + + [ John Johansen ] + + * Revert "SAUCE: Add config option to disable new apparmor 3 semantics" + * Revert "SAUCE: apparmor: fix uninitialized lsm_audit membe" + * Revert "SAUCE: (no-up) apparmor: Fix tasks not subject to, reloaded + policy" + * Revert "SAUCE: apparmor: allocate path lookup buffers during init" + * Revert "SAUCE: apparmor: fix unix domain sockets to be mediated on + connection" + * Revert "SAUCE: (no-up) apparmor: Sync to apparmor 3 - alpha 4 snapshot" + * SAUCE: (no-up) apparmor: Sync to apparmor3 - alpha6 snapshot + - LP: #1298611 + + [ Tetsuo Handa ] + + * SAUCE: kthread: Do not leave kthread_create() immediately upon SIGKILL. + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1300412 + * [Config] updateconfigs after AA patch set + * [Config] CONFIG_ZSWAP=n, CONFIG_ZBUD=n for all arches + * [Config] CONFIG_XILINX_LL_TEMAC=m for powerpc + * [Config] CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y for ppc64el + * [Config] CONFIG_WLAN=y for arm64 + * [Config] CONFIG_VORTEX=m for ppc64el + * [Config] CONFIG_WIMAX=m for ppc64el + * [Config] CONFIG_WATCHDOG=y for ppc64el + * [Config] CONFIG_VME_BUS=m for ppc64el + * [Config] CONFIG_VIRT_DRIVERS=y for ppc64el + * [Config] CONFIG_VIDEO_OUTPUT_CONTROL=m for ppc64el + * [Config] CONFIG_VERSION_SIGNATURE="" for powerpc64-emb + * [Config] CONFIG_UWB=m for ppc64el + + [ Upstream Kernel Changes ] + + * vhost: validate vhost_get_vq_desc return value + - CVE-2014-0055 + * net: use kfree_skb_list() helper + * skbuff: skb_segment: s/frag/nskb_frag/ + * skbuff: skb_segment: s/skb_frag/frag/ + * skbuff: skb_segment: s/skb/head_skb/ + * skbuff: skb_segment: s/fskb/list_skb/ + * skbuff: skb_segment: orphan frags before copying + - CVE-2014-0131 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.8 + + -- Tim Gardner Mon, 31 Mar 2014 12:38:11 -0600 + +linux (3.13.0-20.42) trusty; urgency=low + + [ Adam Conrad ] + + * [Packaging] Set bootloader and loader on ppc64el to grub + + [ Andy Whitcroft ] + + * rebase to v3.13.7 + * [Config] updateconfigs following rebase to v3.13.7 + * cloud-tools -- pull in init scripts for Hyper-V daemons + * cloud-tools -- detect Hyper-V VM to avoid starting + * cloud-tools -- update IF_NAME to DEVICE in hv_* scripts + - LP: #1295401 + * [Config] cloud-tools -- ensure we force older hv-kvp-daemon-init off + * [Config] fix up Breaks/Replaces on linux-cloud-tools-common to fix upgrades + + [ Emmanuel Grumbach ] + + * SAUCE: (no-up) iwlwifi: mvm: disable uAPSD due to bugs in the firmware + + [ James Bottomley ] + + * SAUCE: (no-up) fix our current target reap infrastructure + - LP: #1283604 + * SAUCE: (no-up) dual scan thread bug fix + - LP: #1283604 + + [ K. Y. Srinivasan ] + + * SAUCE: (no-up) Tools: hv: vssdaemon: Ignore VFAT mounts during the + Freeze operation + - LP: #1298192 + + [ Paolo Pisati ] + + * [Config] disable HW_RANDOM_EXYNOS, USB_DWC3_EXYNOS, + PHY_EXYNOS_MIPI_VIDEO + - LP: #1294353 + * [Config] armhf: generic: AHCI_IMX=y, SERIAL_FSL_LPUART=m + - LP: #1294951 + * [Config] armhf: generic: MFD_TI_AM335X_TSCADC and USB_DWC3_OMAP = m + - LP: #1294962 + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1298585 + * [Config] ignore.modules + * SAUCE: i2c-cpm: Add missing includes for powerpc + * [Config] CONFIG_ABX500_CORE=y for ppc64el + * [Config] CONFIG_ALX=m for powerpc + * [Config] CONFIG_ACENIC_OMIT_TIGON_I=n for ppc64el + * [Config] CONFIG_ACORN_PARTITION_*=n for ppc64el + * [Config] CONFIG_ANDROID=n for powerpc + * [Config] CONFIG_ASYNC_RAID6_TEST=m for ppc64el + * [Config] CONFIG_BIG_KEYS=y for ppc64el + * [Config] CONFIG_BLK_DEV_INTEGRITY=y for ppc64el + * [Config] CONFIG_BSD_PROCESS_ACCT=y for ppc64el + * [Config] CONFIG_BLK_DEV_SR_VENDOR=n for ppc64el + * [Config] CONFIG_CFG80211=m for ppc64el + * [Config] CONFIG_CHARGER_BQ24190=m for powerpc + * [Config] CONFIG_CHARGER_BQ24735=m for powerpc + * [Config] CONFIG_EXPERT=y for ppc64el + * [Config] CONFIG_ATA_SFF=y on ppc64el + * [Config] CONFIG_ATA_GENERIC=y for ppc64el, powerpc + * [Config] CONFIG_CHR_DEV_ST=m for ppc64el + * [Config] CONFIG_CHECKPOINT_RESTORE=y for ppc64el + * [Config] CONFIG_CHELSIO_T1_1G=y for ppc64el + * [Config] CONFIG_CHR_DEV_OSST=m for ppc64el + * [Config] CONFIG_CHR_DEV_SCH=m for ppc64el + * [Config] CONFIG_CPU_FREQ_STAT=y for powerpc + * [Config] CONFIG_DDR=y for ppc64el + * [Config] CONFIG_DEBUG_BUGVERBOSE=y for powerpc + * [Config] CONFIG_EXT4_USE_FOR_EXT23=y for powerpc, ppc64el + * [Config] CONFIG_E100=m, CONFIG_E1000=m, CONFIG_E1000E=m for ppc64el + * [Config] CONFIG_EZX_PCAP=n for all arches + * [Config] CONFIG_DYNAMIC_DEBUG=y for powerpc + * [Config] CONFIG_ENABLE_MUST_CHECK=n for ppc64el + * [Config] CONFIG_ENABLE_WARN_DEPRECATED=n for ppc64el + * [Config] CONFIG_FB_3DFX=m for all arches + * [Config] CONFIG_FB_MATROX=m for ppc64el + * [Config] CONFIG_FB_RADEON=m for ppc64el + * [Config] CONFIG_FB_SAVAGE_I2C=y for all arches + * [Config] CONFIG_FIREWIRE=m for ppc64el + * [Config] CONFIG_FTR_FIXUP_SELFTEST=n for ppc64el + * [Config] CONFIG_HAMRADIO=y for ppc64el + * [Config] CONFIG_I2C_CHARDEV=m for ppc64el + * [Config] CONFIG_I2C_MUX=m for ppc64el + * [Config] CONFIG_I2C_STUB=m for ppc64el + * [Config] CONFIG_I2O=m for ppc64el + * [Config] CONFIG_INET_XFRM_MODE_BEET=m, CONFIG_INET_XFRM_MODE_TRANSPORT=m, CONFIG_INET_XFRM_MODE_TUNNEL=m for ppc64el + * [Config] CONFIG_INFINIBAND_IPOIB_DEBUG=n, CONFIG_INFINIBAND_MTHCA_DEBUG=n for ppc64el + * [Config] CONFIG_INFINIBAND_NES=m, CONFIG_INFINIBAND_OCRDMA=m, CONFIG_INFINIBAND_QIB=m for ppc64el + * [Config] CONFIG_INPUT_FF_MEMLESS=m for ppc64el + * [Config] CONFIG_INTERVAL_TREE_TEST=m for ppc64el + * [Config] CONFIG_IPACK_BUS=m for ppc64el + * [Config] CONFIG_ISDN=y for ppc64el + * [Config] CONFIG_ISO9660_FS=m for ppc64el + * [Config] CONFIG_KGDB=y for ppc64el + * [Config] CONFIG_KVM_GUEST=y for ppc64el + * [Config] CONFIG_L2TP_V3=y for powerpc + * [Config] CONFIG_MAILBOX=y for ppc64el + * [Config] CONFIG_MD_LINEAR=m, CONFIG_MD_RAID0=m, CONFIG_MD_RAID1=m for ppc64el + * [Config] CONFIG_MEDIA_SUPPORT=m for ppc64el + * [Config] CONFIG_MEMORY=y for ppc64el + * [Config] CONFIG_MEMSTICK=m for ppc64el + * [Config] CONFIG_MFD_SM501_GPIO=n for ppc64el + * [Config] CONFIG_MLX4_DEBUG=n for ppc64el + * [Config] CONFIG_MMC_BLOCK=m for ppc64el + * [Config] CONFIG_MOUSE_PS2=m for ppc64el + * [Config] CONFIG_NET_9P=m for ppc64el + * [Config] CONFIG_MSDOS_FS=m for ppc64el + * [Config] CONFIG_MSI_BITMAP_SELFTEST=n for ppc64el + * [Config] CONFIG_MTD=m for arm64 + * [Config] CONFIG_NETCONSOLE=m for ppc64el + * [Config] CONFIG_NETFILTER_XT_TARGET_NOTRACK=m for ppc64el + * [Config] CONFIG_NETPOLL_TRAP=n for ppc64el + * [Config] CONFIG_NET_IPIP=m for ppc64el + * [Config] CONFIG_NET_TEAM=m for all arches + * [Config] CONFIG_NFC=m for ppc64el + * [Config] CONFIG_NL80211_TESTMODE=n for all arches + * [Config] CONFIG_NLS_CODEPAGE_437=y for powerpc + * [Config] CONFIG_NLS_ASCII=m, CONFIG_NLS_ISO8859_1=m, CONFIG_NLS_UTF8=m for ppc64el + * [Config] CONFIG_NOP_USB_XCEIV=m for ppc64el + * [Config] CONFIG_NOTIFIER_ERROR_INJECTION=m for ppc64el + * [Config] CONFIG_OPROFILE=m for ppc64el + * [Config] CONFIG_PARPORT_1284=y for ppc64el + * [Config] CONFIG_PARPORT_AX88796=m, CONFIG_PARPORT_PC_FIFO=y, CONFIG_PARPORT_SERIAL=m for ppc64el + * [Config] CONFIG_PCI_IOV=y, CONFIG_PCI_PASID=y, CONFIG_PCI_PRI=y, CONFIG_PCI_REALLOC_ENABLE_AUTO=y, CONFIG_PCI_STUB=m for ppc64el + * [Config] CONFIG_PCNET32=m for ppc64el + * [Config] CONFIG_SCSI_DH_EMC=m for ppc64el + * [Config] CONFIG_SCSI_DH_HP_SW=m for ppc64el + * [Config] CONFIG_SCSI_FC_ATTRS=m for ppc64el + * [Config] CONFIG_SCSI_IPR=m for ppc64el + * [Config] CONFIG_SCSI_LOGGING=y for ppc64el + * [Config] CONFIG_SCSI_OSD_INITIATOR=m for ppc64el + * [Config] CONFIG_SCSI_SCAN_ASYNC=y for ppc64el + * [Config] CONFIG_SCSI_SYM53C8XX_2=m for ppc64el + + [ Timo Aaltonen ] + + * SAUCE: i915_bdw: Provide an ubuntu/i915 driver for Broadwell graphics + - LP: #1294144 + * SAUCE: i915_bdw: Add DP_AUX definitions + - LP: #1294144 + * SAUCE: i915_bdw: Update intel_ips.h file location + - LP: #1294144 + * SAUCE: i915_bdw: Revert "ACPI / i915: replace open-coded _DSM code with + helper functions" + - LP: #1294144 + * SAUCE: i915_bdw: Add an include back to intel_opregion.c + - LP: #1294144 + * SAUCE: i915_bdw: Only support Broadwell with ubuntu/i915 driver + - LP: #1294144 + * SAUCE: i915_bdw: Add i915_bdw_gpu_*() calls for ubuntu/i915 + - LP: #1294144 + * i915_bdw: [Config] Enable CONFIG_DRM_I915_BDW=m, and disable UMS + - LP: #1294144 + * SAUCE: i915_bdw: Rename ubuntu/i915 driver to i915_bdw + - LP: #1294144 + + [ Upstream Kernel Changes ] + + * netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages + - CVE-2014-2523 + * Input: ALPS - add support for "Dolphin" devices + - LP: #1190867 + * x86/mm/pageattr: Lookup address in an arbitrary PGD + - LP: #1297658 + * x86/mm/pageattr: Add a PGD pagetable populating function + - LP: #1297658 + * x86/mm/pageattr: Add a PUD pagetable populating function + - LP: #1297658 + * x86/mm/pageattr: Add a PMD pagetable populating function + - LP: #1297658 + * x86/mm/pageattr: Add a PTE pagetable populating function + - LP: #1297658 + * x86/mm/pageattr: Add a PUD error unwinding path + - LP: #1297658 + * x86/mm/pageattr: Add last levels of error path + - LP: #1297658 + * x86/mm/cpa: Map in an arbitrary pgd + - LP: #1297658 + * x86/efi: Runtime services virtual mapping + - LP: #1297658 + * x86/efi: Check krealloc return value + - LP: #1297658 + * Include apm-mustang.dtb in kernel-image udeb + * drm/i915: add i915_reset_count + - LP: #1294144 + * drm/i915: add i915_get_reset_stats_ioctl + - LP: #1294144 + * drm: add DRM_INFO_ONCE() to print a one-time DRM_INFO() message + - LP: #1294144 + * drm: provide a helper for the encoder possible_crtcs mask + - LP: #1294144 + * drm: Move drm_encoder_crtc_ok() to core + - LP: #1294144 + * drm: Pass the display mode to drm_calc_timestamping_constants() + - LP: #1294144 + * drm: Pass the display mode to drm_calc_vbltimestamp_from_scanoutpos() + - LP: #1294144 + * drm: Pass 'flags' from the caller to .get_scanout_position() + - LP: #1294144 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.7 + - LP: #1268468 + + -- Andy Whitcroft Mon, 24 Mar 2014 10:41:31 +0000 + +linux (3.13.0-19.40) trusty; urgency=low + + [ Tim Gardner ] + + * [Config] Add new mlx modules to d-i + * [Config] Fix d-i spelling error: ahxi_xgene --> ahci_xgene + * [Config] Added Muti-Arch support for linux-headers-PKGVER-ABINUM, + linux-tools-common, and linux-cloud-tools-common + - LP: #1295112 + * Release Tracking Bug + - LP: #1296484 + + [ Upstream Kernel Changes ] + + * Drivers: hv: vmbus: Specify the target CPU that should receive + notification + - LP: #1295813 + + -- Tim Gardner Sun, 23 Mar 2014 19:54:50 -0600 + +linux (3.13.0-19.39) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1295462 + + [ Andy Whitcroft ] + + * ubuntu: aufs3 -- update update scripting + * ubuntu: AUFS -- update to 75dbb997b5812e16771bec20e92449ba0b1705d9 + + [ Anup Patel ] + + * SAUCE: (no-up) KVM: Documentation: Fix typo for KVM_ARM_VCPU_INIT ioctl + * SAUCE: (no-up) arm64: KVM: Force undefined exception for Guest SMC + intructions + + [ dann frazier ] + + * SAUCE: (no-up) Fix pcie-xgene build failure + + [ Emmanuel Grumbach ] + + * SAUCE: iwlwifi: mvm: send udev event upon firmware error to dump logs + + [ Feng Kan ] + + * SAUCE: (no-up) power: reset: Add generic SYSCON register mapped reset + - LP: #1284433 + * SAUCE: (no-up) arm64: dts: Add X-Gene reboot driver dts node + - LP: #1284433 + + [ Hans de Goede ] + + * SAUCE: (no-up) libahci: Allow drivers to override start_engine + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: Add support for devices with more then 1 + clock + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: Add support for an optional regulator for + sata-target power + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: Add enable_ / disable_resources helper + functions + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: "Library-ise" ahci_probe functionality + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: "Library-ise" suspend / resume + functionality + - LP: #1282920 + + [ Iyappan Subramanian ] + + * SAUCE: (no-up) Documentation: APM X-Gene SoC Ethernet DTS binding + documentation + * SAUCE: (no-up) arm64: dts: APM X-Gene SoC Ethernet device tree nodes + * SAUCE: (no-up) drivers: net: APM X-Gene SoC Ethernet base driver + * SAUCE: (no-up) drivers: net: APM X-Gene SoC Ethernet driver error + handling + * SAUCE: (no-up) drivers: net: APM X-Gene SoC Ethernet driver ethtool + support + + [ Loc Ho ] + + * SAUCE: (no-up) Documentation: Add APM X-Gene SoC 15Gbps Multi-purpose + PHY driver binding documentation + - LP: #1282920 + * SAUCE: (no-up) PHY: add APM X-Gene SoC 15Gbps Multi-purpose PHY driver + - LP: #1282920 + * SAUCE: (no-up) arm64: Add APM X-Gene SoC 15Gbps Multi-purpose PHY DTS + entries + - LP: #1282920 + * SAUCE: (no-up) Documentation: Add documentation for the APM X-Gene SoC + SATA host controller DTS binding + * SAUCE: (no-up) ata: Add APM X-Gene SoC AHCI SATA host controller driver + * SAUCE: (no-up) ata: Fix compiler warning with APM X-Gene host + controller driver + * SAUCE: (no-up) arm64: Add APM X-Gene SoC AHCI SATA host controller DTS + entries + * SAUCE: (no-up) clk: arm64: Fix the clock-names property for pcppll, + socpll, and socplldiv2 + + [ Marc Zyngier ] + + * SAUCE: (no-up) arm64: KVM: force cache clean on page fault when caches + are off + * SAUCE: (no-up) arm64: KVM: allows discrimination of AArch32 sysreg + access + * SAUCE: (no-up) arm64: KVM: trap VM system registers until MMU and + caches are ON + * SAUCE: (no-up) ARM: KVM: introduce kvm_p*d_addr_end + * SAUCE: (no-up) arm64: KVM: flush VM pages before letting the guest + enable caches + * SAUCE: (no-up) ARM: KVM: force cache clean on page fault when caches + are off + + [ Ming Lei ] + + * SAUCE: (no-up) arm64: apm-storm: support ttyS1 + * SAUCE: (no-up) apm: pcie: fix hang when no card connected + + [ Paolo Pisati ] + + * [Config] armhf: generic: disable CPU_IDLE + * [Config] armhf: CPU_FREQ=y + * [Config] armhf: TI_THERMAL=y + + [ Rameshwar Prasad Sahu ] + + * SAUCE: (no-up) rtc: Add X-Gene SoC Real Time Clock Driver + + [ Ravi Patel ] + + * SAUCE: (no-up) Documentation: misc-devices: APM X-Gene SoC QMTM + * SAUCE: (no-up) Documentation: devicetree: bindings for APM X-Gene SoC + QMTM + * SAUCE: (no-up) misc: xgene: base driver for APM X-Gene SoC QMTM + * SAUCE: (no-up) arm64: boot: dts: entries for APM X-Gene SoC QMTM + * SAUCE: (no-up) misc: xgene: error handling for APM X-Gene SoC QMTM + + [ Roger Quadros ] + + * SAUCE: (no-up) ata: ahci_platform: Manage SATA PHY + - LP: #1282920 + * SAUCE: (no-up) ata: ahci_platform: runtime resume the device before use + - LP: #1282920 + + [ Seth Forshee ] + + * SAUCE: iwlwifi: mvm: Free sram dump immediately after using it + * SAUCE: iwlwifi: mvm: Only notify userspace of fw error dump when one is + created + * SAUCE: iwlwifi: mvm: Don't create fw error dump if there's nothing to + dump + + [ Tanmay Inamdar ] + + * SAUCE: (no-up) arm64: PCI(e) arch support + * SAUCE: (no-up) pci: APM X-Gene PCIe controller driver + * SAUCE: (no-up) arm64: dts: APM X-Gene PCIe device tree nodes + * SAUCE: (no-up) dt-bindings: pci: xgene pcie device tree bindings + * SAUCE: (no-up) MAINTAINERS: entry for APM X-Gene PCIe host driver + + [ Tim Gardner ] + + * [Config] CONFIG_POWER_RESET_SYSCON=y for arm64 + * SAUCE: (no-up) Restrict CONFIG_POWER_RESET_SYSCON to arm64 only + * SAUCE: iwlwifi: Fix FTBS for armhf + * [Config] CONFIG_PHY_XGENE=m for arm64 + * [Config] CONFIG_AHCI_XGENE=m for arm64 + * [Config] CONFIG_NET_XGENE=m for arm64 + * [Config] CONFIG_RTC_DRV_XGENE=m for arm64 + * SAUCE: (no-up) Add drivers/phy/phy-core.ko to generic inclusion list + * [Config] Enable PCI for arm64 + * SAUCE: arm64: export __cpu_clear_user_page for modules + * SAUCE: ARCH_HAS_DMA_GET_REQUIRED_MASK=n for arm64 + * [Config] CONFIG_PCI_XGENE=y + * [Config] CONFIG_PHY_XGENE=y for arm64 + * [Config] CONFIG_RTC_DRV_XGENE=y for arm64 + * d-i: Add ahxi_xgene and xgene-enet + + [ Upstream Kernel Changes ] + + * Staging: rtl8821ae: rc.c: fix up function prototypes + * Staging: rtl8821ae: removed unused functions and variables + * Staging: rtl8821ae: add TODO file + * Staging: rtl8812ae: disable due to build errors + * staging: r8821ae: Fix build problems + * staging: r8821ae: Enable build by reverting BROKEN marking + * staging: rtl8821ae: Fixed the size of array to macro as discussed by + Linus + * staging/rtl8821ae: fix build, depends on MAC80211 + * powerpc: Reclaim two unused thread_info flag bits + * powerpc: Don't corrupt transactional state when using FP/VMX in kernel + * powerpc: Fix transactional FP/VMX/VSX unavailable handlers + * Drivers: hv: Ballon: Make pressure posting thread sleep interruptibly + - LP: #1294253 + * mac80211: don't validate unchanged AP bandwidth while tracking + - LP: #1294558 + * phy-core: Don't propagate -ENOSUPP from phy_pm_runtime_get_sync to + caller + * CONFIG_XGENE_QMTM=m for arm64 + + -- Tim Gardner Thu, 20 Mar 2014 21:51:11 -0400 + +linux (3.13.0-18.38) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1293725 + * [Config] Add hv_balloon to d-i virtio-modules + - LP: #1292216 + + [ Andy Whitcroft ] + + * [Config] d-i -- add virtio_scsi to virtio-modules + - LP: #1288607 + + [ Colin Ian King ] + + * SAUCE: intel_pstate: inform user that thermald is worth considering + + [ dann frazier ] + + * [Config] arm64: KVM=y + + [ Gerd Hoffmann ] + + * SAUCE: vmbus: add missing breaks + - LP: #1287398 + * SAUCE: vmbus: use resource for hyperv mmio region + - LP: #1287398 + * SAUCE: hyperv-fb: add support for generation 2 virtual machines. + - LP: #1287398 + * SAUCE: hyperv-fb: kick off efifb early + - LP: #1287398 + + [ Haiyang Zhang ] + + * SAUCE: hyperv: Change the receive buffer size for legacy hosts + - LP: #1290151 + + [ K. Y. Srinivasan ] + + * SAUCE: Drivers: hv: vmbus: Extract the mmio information from DSDT + - LP: #1287398 + + [ Paolo Pisati ] + + * SAUCE: leds-gpio: of: introduce MODULE_DEVICE_TABLE for module + autoloading + * [Config] amhf: LEDS_TRIGGER_HEARTBEAT=y + + [ Upstream Kernel Changes ] + + * Revert "xhci 1.0: Limit arbitrarily-aligned scatter gather." + - LP: #1293361 + * Revert "USBNET: ax88179_178a: enable tso if usb host supports sg dma" + - LP: #1293361 + * powerpc/tm: Fix crash when forking inside a transaction + * AX88179_178A: Add VID:DID for Lenovo OneLinkDock Gigabit LAN + - LP: #1291890 + * drm/vmwgfx: Fix a surface reference corner-case in legacy emulation + mode + * Input: wacom - scale up touch width and height values for Intuos Pro + * Input: wacom - make sure touch_max is set for touch devices + * Input: wacom - add support for three new Intuos devices + * Input: wacom - add reporting of SW_MUTE_DEVICE events + * Input: wacom - fix wacom->shared guards for dual input devices + * Input: wacom - add support for DTU-1031 + * net: fix for a race condition in the inet frag code + - CVE-2014-0100 + * net: sctp: fix sctp_sf_do_5_1D_ce to verify if we/peer is AUTH capable + - CVE-2014-0101 + * KEYS: Make the keyring cycle detector ignore other keyrings of the same + name + - CVE-2014-0102 + * ipv6: don't set DST_NOCOUNT for remotely added routes + - CVE-2014-2309 + + -- Andy Whitcroft Thu, 13 Mar 2014 10:06:05 +0000 + +linux (3.13.0-17.37) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1290484 + * Revert "SAUCE: hv: Add vss daemon to Makefile" + * Revert "SAUCE: (no-up) tools/hv: add basic Makefile" + * Revert "SAUCE: (no-up) hv -- bodge hv_kvp_daemon so it can use the + local linux/hyperv.h" + * Revert "SAUCE: (no-up) hv -- bodge hv_vss_daemon so it can use the + local linux/hyperv.h" + * Revert "SAUCE: SELinux: security_load_policy: Silence frame-larger-than + warning" + * Revert "SAUCE: ARM: OMAP4460: cpuidle: Extend + PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD on cpuidle" + * ubuntu: overlayfs -- use kernel service credentials for copy up and + xattr manipulations + * [Packaging] tools -- hv tools build correctly against the built headers + * [Packaging] cloud-tools -- add the hv_fcopy_daemon to the package + * rebase to v3.13.6 + + [ Anton Blanchard ] + + * SAUCE: ibmveth: Fix endian issues with MAC addresses + + [ Bjarke Istrup Pedersen ] + + * SAUCE: hv: Add hyperv.h to uapi headers + - LP: #1282700 + + [ Fengguang Wu ] + + * SAUCE: Drivers: hv: fcopy_open() can be static + - LP: #1282700 + + [ K. Y. Srinivasan ] + + * SAUCE: Drivers: hv: Implement the file copy service + - LP: #1282700 + + [ Tim Gardner ] + + * SAUCE: (no-up) mei: Fix stable update misapplication + * SAUCE: (no-up) mei_me: Add module parameter to disable MSI + + [ Upstream Kernel Changes ] + + * arm64: KVM: Add Kconfig option for max VCPUs per-Guest + * arm64: KVM: Support X-Gene guest VCPU on APM X-Gene host + * ACPI / EC: Clear stale EC events on Samsung systems + * drm/ttm: Fix TTM object open regression + * SELinux: security_load_policy: Silence frame-larger-than warning + * ARM: OMAP4460: cpuidle: Extend PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD on + cpuidle + + [ Upstream Kernel Changes ] + + * rebase to v3.13.6 + - LP: #1282369 + - LP: #1260303 + + -- Tim Gardner Wed, 05 Mar 2014 06:52:34 -0700 + +linux (3.13.0-16.36) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1287903 + + [ Andy Whitcroft ] + + * Revert "[Config] lowlatency -- turn CONFIG_IRQ_FORCED_THREADING_DEFAULT + off temporarily" + + [ Chris Bainbridge ] + + * SAUCE: x86: set Pentium M as PAE capable + - LP: #930447 + + [ Dave Jones ] + + * SAUCE: taint: repurpose TAINT_UNSAFE_SMP to TAINT_CPU_OUT_OF_SPEC + - LP: #930447 + + [ Paolo Pisati ] + + * [Config] SND_DAVINCI_SOC && SND_AM33XX_SOC_EVM =m + * [Config] armhf: DRM_TILCDC=m + + [ Philippe Bergheaud ] + + * SAUCE: powerpc: fix xmon disassembler for little-endian + - LP: #1286255 + + [ Tim Gardner ] + + * [Config] CONFIG_MICROCODE_EARLY=y + * [Config] CONFIG_R8821AE=m + * [Config] Add some virtio drivers to -virtual + - LP: #1287401 + * [Config] inclusion-list: vesafb and virtio_balloon are built-in + * SAUCE: vmwgfx: Expose U32_MAX + + [ Upstream Kernel Changes ] + + * Revert "drm/vmwgfx: Fix regression caused by "drm/ttm: make ttm + reservation calls behave like reservation calls"" + * Revert "drm/vmwgfx: Fix the driver for large dma addresses" + * usb: ehci: fix deadlock when threadirqs option is used + - LP: #1274987, #1279081 + * Staging: rtl8812ae: Add Realtek 8821 PCI WIFI driver + - LP: #1287298 + * intel_pstate: Remove periodic P state boost + * intel_pstate: Add trace point to report internal state. + * intel_pstate: Take core C0 time into account for core busy calculation + * intel_pstate: Use LFM bus ratio as min ratio/P state + * intel_pstate: Add support for Baytrail turbo P states + * intel_pstate: Change busy calculation to use fixed point math. + * PM / hibernate: Fix restore hang in freeze_processes() + * ipmi: remove deprecated IRQF_DISABLED + * ipmi: use USEC_PER_SEC instead of 1000000 for more meaningful + * ipmi: fix timeout calculation when bmc is disconnected + * ipmi: Cleanup error return + * ipmi: Add missing rv in ipmi_parisc_probe() + * drm/ttm: ttm object security fixes for render nodes + * drivers: gpu: Mark functions as static in vmwgfx_kms.c + * drivers: gpu: Mark functions as static in vmwgfx_buffer.c + * drivers: gpu: Mark functions as static in vmwgfx_fence.c + * drm/vmwgfx: Fix the driver for large dma addresses + * drm/vmwgfx: Update the svga3d register header file for new device + version + * drm/vmwgfx: Update the driver user-space interface for guest-backed + objects + * drm/vmwgfx: Replace vram_size with prim_bb_mem for calculation of max + resolution + * drm/vmwgfx: Update the svga register definition + * drm/vmwgfx: Adapt capability reporting to new hardware version + * drm/vmwgfx: Add MOB management + * drm/vmwgfx: Hook up MOBs to TTM as a separate memory type + * drm/vmwgfx: Read bounding box memory from the appropriate register + * drm/vmwgfx: Add the possibility to validate a buffer as a MOB + * drm/vmwgfx: Hook up guest-backed queries + * drm/vmwgfx: Detach backing store from its resources when it is evicted + * drm/vmwgfx: Hook up guest-backed contexts + * drm/vmwgfx: Hook up guest-backed surfaces + * drm/vmwgfx: Add guest-backed shaders + * drm/vmwgfx: Validate guest-backed shader const commands + * drm/vmwgfx: Add new unused (by user-space) commands to the verifier + * drm/vmwgfx: Enable 3D for new hardware version + * drm/vmwgfx: Fix up the vmwgfx_drv.h header for new files + * drm/vmwgfx: Extend the command verifier to handle guest-backed on / off + * drm/vmwgfx: Make sure that the multisampling is off + * drm/vmwgfx: Implement a buffer object synccpu ioctl. + * drm/vmwgfx: Add a parameter to get max MOB memory size + * drm/vmwgfx: Block the BIND_SHADERCONSTS command + * drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf + * drm/vmwgfx: Persistent tracking of context bindings + * drm/vmwgfx: Ditch the vmw_dummy_query_bo_prepare function + * drm/vmwgfx: Use the linux DMA api also for MOBs + * drm/vmwgfx: Update otable definitions + * drm/vmwgfx: Fix surface framebuffer check for guest-backed surfaces + * drm/vmwgfx: Implement 64-bit Otable- and MOB binding v2 + * drm/vmwgfx: Silence the device command verifier + * drm/vmwgfx: Invalidate surface on non-readback unbind + * drm/vmwgfx: Fix recently introduced sparse / smatch warnings and errors + * drm/vmwgfx: Don't commit staged bindings if execbuf fails + * drm/vmwgfx: Fix regression caused by "drm/ttm: make ttm reservation + calls behave like reservation calls" + * drm/vmwgfx: Fix SET_SHADER_CONST emulation on guest-backed devices + * drm/vmwgfx: Fix legacy surface reference size copyback + * drm/vmwgfx: Emulate legacy shaders on guest-backed devices v2 + * drm/vmwgfx: Detect old user-space drivers and set up legacy emulation + v2 + * drm/vmwgfx: Reemit context bindings when necessary v2 + * vmwgfx: Fix unitialized stack read in vmw_setup_otable_base + * drm/vmwgfx: Fix a couple of sparse warnings and errors + * drm/vmwgfx: Get maximum mob size from register SVGA_REG_MOB_MAX_SIZE + * drm/vmwgfx: unlock on error path in vmw_execbuf_process() + * drm/vmwgfx: Remove stray const + * drm/vmwgfx: Fix possible integer overflow + * drm/vmwgfx: Fix command defines and checks + * drm/vmwgfx: Remove some unused surface formats + * drm/vmwgfx: Make sure backing mobs are cleared when allocated. Update + driver date. + * drm/vmwgfx: avoid null pointer dereference at failure paths + + -- Tim Gardner Mon, 03 Mar 2014 13:04:10 -0700 + +linux (3.13.0-15.35) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1287175 + + [ Andy Whitcroft ] + + * [Config] tools -- enable cpupower on ppc64el + * [Config] ppc64el -- enable perf tools + * [Config] powerpc -- enable perf tools + * [Config] ppc64el -- reduce MAX_ORDER with 64k pages + * [Config] ppc64el -- switch to 64K system pages + + [ Benjamin Herrenschmidt ] + + * SAUCE: powerpc/powernv: Add iommu DMA bypass support for IODA2 + + [ Paul Mackerras ] + + * SAUCE: powerpc: Increase stack redzone for 64-bit userspace to 512 + bytes + + [ Upstream Kernel Changes ] + + * perf trace: Fix ioctl 'request' beautifier build problems on !(i386 || + x86_64) arches + * kvm, vmx: Really fix lazy FPU on nested guest + - LP: #1278531 + + -- Tim Gardner Mon, 03 Mar 2014 13:22:56 +0000 + +linux (3.13.0-14.34) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1285851 + + [ Andy Whitcroft ] + + * [Config] d-i -- add hyperv_keyboard to serial-modules udeb + - LP: #1285434 + + [ Hannes Frederic Sowa ] + + * SAUCE: ipv6: honor IPV6_PKTINFO with v4 mapped addresses on sendmsg + - LP: #1284535 + + [ Jason Wang ] + + * SAUCE: x86, hyperv: bypass the timer_irq_works() check + - LP: #1282693 + + [ Paolo Pisati ] + + * [Config] disable FB_OMAP2, DRM_OMAP=m + + [ Upstream Kernel Changes ] + + * ipv6: make IPV6_RECVPKTINFO work for ipv4 datagrams + - LP: #1284535 + * mei: remove flash_work_queue + * mei: drop redundant list_del_init + * mei: cleanup mei_irq_read_handler + * mei: enable marking internal commands + * mei: me: set dma mask using DMA mapping API + * Documentation/misc-devices/mei/mei-amt-version.c: remove unneeded call + of mei_deinit() + * mei: do not run reset flow from the interrupt thread + * mei: nfc: mei_nfc_free has to be called under lock + * mei: fix syntax in comments and debug output + * mei: revamp mei reset state machine + * mei: limit the number of consecutive resets + * mei: set client's read_cb to NULL when flow control fails + + -- Tim Gardner Wed, 26 Feb 2014 08:43:20 -0500 + +linux (3.13.0-13.33) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1284614 + * [Config] powerpc -- CONFIG_SCSI_IBMVSCSI=y + * [Config] CONFIG_RT_GROUP_SCHED=n + - LP: #1284731 + + [ Stefan Bader ] + + * [Config] Revert back to build IPMI as a module on all arches + + [ Tim Gardner ] + + * rebase to v3.13.5 + + [ Upstream Kernel Changes ] + + * cifs: ensure that uncached writes handle unmapped areas correctly + * rebase to v3.13.5 + + -- Andy Whitcroft Tue, 25 Feb 2014 12:15:09 +0000 + +linux (3.13.0-12.32) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1283074 + + * Revert "SAUCE: Drivers: hv: vmbus: Specify the target CPU that should + receive notification" + * [Packaging] tools -- clean up generic so it gives more targetted hints + * [Packaging] tools -- fix relative links + * rebase to v3.13.4 + + [ Colin Ian King ] + + * [Config][v2] armhf, arm64, powerpc, ppc64el: IPMI_SI=m + + [ Paolo Pisati ] + + * [Config] armhf: SOC_AM33XX=y + * [Config] armhf: TI_CPSW, TI_CPTS and TI_DAVINCI_[CPDMA|MDIO] = y + * [Config] armhf: RTC_DRV_OMAP=y + * [Config] build beaglebone and bleaglebone black dtbs + + [ Tony Breeds ] + + * SAUCE: powerpc/le: Ensure that the 'stop-self' RTAS token is handled + correctly + - LP: #1282712 + + [ Upstream Kernel Changes ] + + * Drivers: hv: vmbus: Specify the target CPU that should receive + notification + - LP: #1282694 + * Drivers: hv: vmbus: Don't timeout during the initial connection with + host + - LP: #1282694 + * hyperv: Add support for physically discontinuous receive buffer + - LP: #1282695 + * Input: hyperv-keyboard - pass through 0xE1 prefix + - LP: #1282699 + * rebase to v3.13.4 + + -- Andy Whitcroft Fri, 21 Feb 2014 12:51:17 +0000 + +linux (3.13.0-11.31) trusty; urgency=low + + [ Colin Ian King ] + + * [config] Set IPMI suppoort default to "y" + + [ Tim Gardner ] + + * [Config] CONFIG_USER_NS=y for ppc64el + * [Config] CONFIG_RESOURCE_COUNTERS=y for ppc64el + * Release Tracking Bug + - LP: #1282243 + + -- Tim Gardner Wed, 19 Feb 2014 12:04:43 -0500 + +linux (3.13.0-10.30) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] tools -- fix up do_tools interface which is assumed by + external tooling + + [ Tim Gardner ] + + * [Config] CONFIG_CGROUP_SCHED=y for ppc64el + * [Config] CONFIG_BLK_CGROUP=y for ppc64el + * [Config] CONFIG_USB_XHCI_HCD=y for ppc64el + * [Config] CONFIG_CGROUP_PERF=y for ppc64el + * Release Tracking Bug + - LP: #1281783 + + [ Upstream Kernel Changes ] + + * be2net: Use MCC_CREATE_EXT_V1 cmd for Skyhawk-R + - LP: #1281446 + * be2net: don't set "pport" field when querying "pvid" + - LP: #1281446 + * be2net: Log the profile-id used by FW during driver initialization + - LP: #1281446 + * be2net: do not call be_set/get_fw_log_level() on Skyhawk-R + - LP: #1281446 + * be2net: ignore mac-addr set call for an already programmed mac-addr + - LP: #1281446 + * be2net: fix incorrect setting of cmd_privileges for VFs + - LP: #1281446 + * be2net: Remove "10Gbps" from driver description string + - LP: #1281446 + * be2net: do not use frag index in the RX-compl entry + - LP: #1281446 + * be2net: use GET_MAC_LIST cmd to query mac-address from a pmac-id + - LP: #1281446 + * be2net: cleanup wake-on-lan code + - LP: #1281446 + * be2net: update driver version to 10.0.x + - LP: #1281446 + * be2net: Fix be_vlan_add/rem_vid() routines + - LP: #1281446 + + -- Tim Gardner Tue, 18 Feb 2014 13:13:24 -0700 + +linux (3.13.0-9.29) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1280291 + * [Config] lowlatency -- turn CONFIG_IRQ_FORCED_THREADING_DEFAULT off + temporarily + * [Packaging] tools -- split out "cloud only" tools into a cloud tools + package + + [ Paolo Pisati ] + + * [Config] armhf: DISPLAY_CONNECTOR_HDMI && DISPLAY_ENCODER_TPD12S015 =y + + [ Tim Gardner ] + + * rebase to v3.13.3 + + [ Upstream Kernel Changes ] + + * Revert "mmc: sdhci-pci: Fix possibility of chip->fixes being null" + * Revert "mmc: sdhci-pci: Fix BYT sd card getting stuck in runtime + suspend" + * NVMe: Avoid shift operation when writing cq head doorbell + - LP: #1256155 + * NVMe: remove deprecated IRQF_DISABLED + - LP: #1256155 + * NVMe: compat SG_IO ioctl + - LP: #1256155 + * NVMe: Fix lockdep warnings + - LP: #1256155 + * NVMe: Cache dev->pci_dev in a local pointer + - LP: #1256155 + * NVMe: Device resume error handling + - LP: #1256155 + * NVMe: Schedule reset for failed controllers + - LP: #1256155 + * NVMe: Abort timed out commands + - LP: #1256155 + * NVMe: Surprise removal handling + - LP: #1256155 + * NVMe: Async IO queue deletion + - LP: #1256155 + * NVMe: Dynamically allocate partition numbers + - LP: #1256155 + * NVMe: Disable admin queue on init failure + - LP: #1256155 + * NVMe: Add a pci_driver shutdown method + - LP: #1256155 + * NVMe: Include device and queue numbers in interrupt name + - LP: #1256155 + * NVMe: Correct uses of INIT_WORK + - LP: #1256155 + * NVMe: Namespace use after free on surprise removal + - LP: #1256155 + * intel_pstate: Add setting voltage value for baytrail P states. + - LP: #1270736 + * mmc: sdhci-pci: break out definitions to header file + * mmc: sdhci-pci: add support of O2Micro/BayHubTech SD hosts + * mmc: sdhci-pci: Fix BYT sd card getting stuck in runtime suspend + * mmc: sdhci-pci: Fix possibility of chip->fixes being null + + [ Upstream Kernel Changes ] + + * rebase to v3.13.3 + + -- Tim Gardner Thu, 13 Feb 2014 06:08:09 -0700 + +linux (3.13.0-8.28) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1278963 + + [ Paolo Pisati ] + + * [Config] armhf: RTC_DRV_PL031=y + + [ Serge Hallyn ] + + * SAUCE: Overlayfs: allow unprivileged mounts + + [ Upstream Kernel Changes ] + + * kexec: add sysctl to disable kexec_load + - LP: #1259570 + * SELinux: Fix kernel BUG on empty security contexts. + - CVE-2014-1874 + + -- Tim Gardner Tue, 11 Feb 2014 08:35:39 -0500 + +linux (3.13.0-8.27) trusty; urgency=low + + [ John Johansen ] + + * SAUCE: Add config option to disable new apparmor 3 semantics + -LP: #1270215 + + [ Tim Gardner ] + + * [debian] Fix indep_hdrs_pkg_name + - LP: #1134441 + * Update lttng to 00808267d3ba7cdcddfed7bec7e62a40463c1307 Version 2.4.0-rc3 + * Enabled lttng build + * Don't build lttng for armhf + lttng hates gcc-4.8 for armhf + * Release Tracking Bug + - LP: #1277309 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.2 + - LP: #1260303 + - LP: #1260303 + - LP: #1268468 + + -- Tim Gardner Thu, 06 Feb 2014 09:25:51 -0700 + +linux (3.13.0-7.26) trusty; urgency=low + + [ John Johansen ] + + * SAUCE: apparmor: fix uninitialized lsm_audit membe + - LP: #1268727 + * Add config option to optionally enable new apparmor 3 semantics + + [ Tim Gardner ] + + * [Config] Add lowlatency to getabis + * [Config] CONFIG_SECURITY_APPARMOR_AA3_SEMANTICS=y + - LP: #1270215 + * Release Tracking Bug + - LP: #1276810 + + [ Upstream Kernel Changes ] + + * x86, x32: Correct invalid use of user timespec in the kernel + - LP: #1274349 + - CVE-2014-0038 + + -- Tim Gardner Wed, 05 Feb 2014 15:49:44 -0500 + +linux (3.13.0-7.25) trusty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: fix fmd headers" + * SAUCE: fix fmd headers + + -- Andy Whitcroft Tue, 04 Feb 2014 09:36:09 +0000 + +linux (3.13.0-7.24) trusty; urgency=low + + [ Stefan Bader ] + + * [Config] Make vmwgfx driver enable the framebuffer device + + [ Tim Gardner ] + + * rebase to v3.13.1 + * [Config] CONFIG_NFS_FS=m for ppc64el + * [Config] CONFIG_X86_SYSFB=n + https://lists.ubuntu.com/archives/kernel-team/2014-February/038166.html + * Release Tracking Bug + - LP: #1275898 + + [ Upstream Kernel Changes ] + + * i2c: piix4: Add support for AMD ML and CZ SMBus changes + - LP: #1272525 + * i2c: piix4: Use different message for AMD Auxiliary SMBus Controller + - LP: #1272525 + * mm: ignore VM_SOFTDIRTY on VMA merging + - LP: #1274917 + * drm/radeon: disable dpm on BTC + - LP: #1266984 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.1 + + -- Tim Gardner Thu, 30 Jan 2014 15:24:48 +0000 + +linux (3.13.0-6.23) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] fix up architecture for linux-tools + + -- Andy Whitcroft Thu, 30 Jan 2014 09:00:41 +0000 + +linux (3.13.0-6.22) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] limit linux-udebs- to matching arch + * [Config] powerpc -- disable perf to fix FTBFS + * [Config] ppc64el -- fix up missing udebs + + -- Andy Whitcroft Wed, 29 Jan 2014 16:00:28 +0000 + +linux (3.13.0-6.21) trusty; urgency=low + + [ Andy Fleming ] + + * SAUCE: net: Add support for handling queueing in hardware + * SAUCE: of_mdio: Add of_phy_attach function + * SAUCE: phylib: Add generic 10G driver + * SAUCE: phylib: Support attaching to gen10g_driver + * SAUCE: phylib: Add Clause 45 read/write functions + + [ Andy Whitcroft ] + + * SAUCE: fix fmd headers + * [Packaging] lowlatency -- merge out of tree flavours + * SAUCE: allow IRQs to be irq-threaded by default via config + * [Config]: enable CONFIG_IRQ_FORCED_THREADING_DEFAULT for lowlatency + * [Config] powerpc -- fix up Build-depends: + * Release Tracking Bug + - LP: #1273747 + + [ Ben Collins ] + + * SAUCE: PPC: PCI: Fix pcibios_io_space_offset() so it works for 32-bit + ptr/64-bit rsrcs + * SAUCE: Revert "phy: vitesse make vsc824x_add_skew static" + * SAUCE: Fixup freescale usb phy driver to work on ppc64 + * SAUCE: xgmac_mdio: Silence read errors + * SAUCE: Provide booke stub for kvmppc_is_bigendian() + * SAUCE: Fix stack overflow on ppc32 + * SAUCE: Use resource_size_t instead of long for PCI resource address + * SAUCE: net/phy: Export function for use by dpaa_eth + * [Packaging] powerpc -- merge out of tree powerpc arch + + [ Bjorn Helgaas ] + + * SAUCE: Revert "EISA: Log device resources in dmesg" + - LP: #1251816 + * SAUCE: Revert "EISA: Initialize device before its resources" + - LP: #1251816 + + [ Emil Medve ] + + * SAUCE: phylib: Minimum hack to get the generic 10G PHY driver to work + with 10G "fixed-link"s + + [ Kumar Gala ] + + * SAUCE: fsl_qbman: Add drivers for Freescale DPAA Qman & Bman + * SAUCE: fsl_pme2: Add support for DPAA PME + * SAUCE: fmd: FMD14 integration + * SAUCE: dpaa_eth: Ethernet driver for Freescale QorIQ DPA Architecture + * SAUCE: powerpc/85xx: Add DPAA/networking support for CoreNet + + [ Madalin Bucur ] + + * SAUCE: net/flow: remove sleeping and deferral mechanism from + flow_cache_flush + * SAUCE: net/phy: abort genphy_read_status when link changes during speed + and duplex reading + + [ Stefan Bader ] + + * [Config] move some VMWare related modules into main package + - LP: #1271669 + + [ Tim Gardner ] + + * [Config] Add r815x to nic-modules + - LP: #1273735 + * [Config] CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y + - LP: #239479 + + [ Upstream Kernel Changes ] + + * mmc: sdhci-pci: break out definitions to header file + - LP: #1239938 + * mmc: sdhci-pci: add support of O2Micro/BayHubTech SD hosts + - LP: #1239938 + * powerpc/book3e: rename interrupt_end_book3e with __end_interrupts + * powerpc/book3e: support CONFIG_RELOCATABLE + * book3e/kexec/kdump: enable kexec for kernel + * book3e/kexec/kdump: create a 1:1 TLB mapping + * book3e/kexec/kdump: introduce a kexec kernel flag + * book3e/kexec/kdump: implement ppc64 kexec specfic + * book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET + * book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB + + -- Andy Whitcroft Tue, 28 Jan 2014 22:59:46 +0000 + +linux (3.13.0-5.20) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] tools -- do not make symlinks when not making packages + * [Packaging] tools -- tidy up control.stub.in ordering + * [Packaging] tools -- tools-common is shared and not conditional + * rebase to v3.13 + + [ Dirk Brandewie ] + + * SAUCE: intel_pstate: Add setting voltage value for baytrail P states. + + [ KY Srinivasan ] + + * SAUCE: Drivers: hv: vmbus: Specify the target CPU that should receive + notification + + [ Upstream Kernel Changes ] + + * rebase to v3.13 + - LP: #1270603 + + -- Andy Whitcroft Fri, 17 Jan 2014 15:45:31 +0000 + +linux (3.13.0-4.19) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] libunwind8-dev is now available for ppc64el + * [Packaging] tools -- make cpupower optional + * [Packaging] tools -- enable correctly for x86 + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1269872 + + [ Upstream Kernel Changes ] + + * SAUCE: ARM: OMAP: hwmod: Add SYSC offsets for AES IP + * SAUCE: ARM: OMAP4: hwmod: Add hwmod data for AES IP + * SAUCE: OMAP: AM33xx: hwmod: Correct AES module SYSC type + * SAUCE: crypto: omap-aes: add error check for pm_runtime_get_sync + + [ Upstream Kernel Changes ] + + * rebase to 85ce70fdf48aa290b4845311c2dd815d7f8d1fa5 + + -- Tim Gardner Wed, 15 Jan 2014 13:23:05 +0000 + +linux (3.13.0-3.18) trusty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.13-rc8 + * [Packaging] efi -- allow EFI signatures on any arch + + [ Tim Gardner ] + + * [Config] Fix vcs-git path + * Release Tracking Bug + - LP: #1268683 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc8 + + -- Andy Whitcroft Sun, 12 Jan 2014 11:58:01 +0000 + +linux (3.13.0-2.17) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1267809 + * [Config] apply Platform support>>CPUIdle driver>>CPU Idle config + defaults + * [Config] apply Platform support>>CPU Frequency scaling config defaults + * [Config] CONFIG_PARIDE_EPATC8=y + * [Config] apply Device Drivers >> Broadcom specific AMBA config defaults + * [Config] apply Bus options >> PCI support >> RapidIO support >> RapidIO + Switch drivers config defaults + * [Config] apply Cryptographic API config defaults + * [Config] apply Device Drivers >> Common Clock Framework config defaults + * [Config] apply Device Drivers >> Distributed Switch Architecture + drivers config defaults + * [Config] apply Device Drivers >> Graphics support >> Backlight & LCD + device support config defaults + * [Config] apply Device Drivers >> Graphics support >> Support for frame + buffer devices >> Bootup logo config defaults + * annotations -- update in tree annotations + * [Config] apply Bus options >> PCI support >> RapidIO support config + defaults + * [Config] CONFIG_POWER_AVS=y CONFIG_RESET_CONTROLLER=y + * [Config] apply Device Drivers >> 1-wire Bus Masters config defaults + * [Config] apply CAN Device Drivers >> Platform CAN drivers with Netlink + support config defaults + * [Config] apply Device Drivers >> Character devices >> Serial drivers + config defaults + * [Config] apply Device Drivers >> Generic Thermal sysfs driver config + defaults + * [Config] apply Device Drivers >> Character devices >> TPM Hardware + Support config defaults + * [Config] apply Device Drivers >> Character devices config defaults + * [Config] apply Device Drivers >> HID support >> USB HID support >> USB + HID transport layer config defaults + * [Config] apply Device Drivers >> HID support >> HID bus support config + defaults + * [Config] apply Device Drivers >> HID support >> USB HID support config + defaults + * annotations -- update in tree annotations + * [Config] apply Device Drivers >> GPIO Support config defaults + * [Config] update configs for apparmour update + + [ John Johansen ] + + * SAUCE: (no-up) apparmor: Sync to apparmor 3 - alpha 4 snapshot + * SAUCE: apparmor: fix unix domain sockets to be mediated on connection + - LP: #1208988 + * SAUCE: apparmor: allocate path lookup buffers during init + - LP: #1208988 + * SAUCE: (no-up) apparmor: Fix tasks not subject to, reloaded policy + - LP: #1236455 + + [ Tim Gardner ] + + * Revert "[Debian] getabis: Preface module with package name" + * [Config] Added ppc64el to getabis + * [packaging] Bump ABI for every new release + + -- Andy Whitcroft Fri, 10 Jan 2014 11:48:39 +0000 + +linux (3.13.0-1.16) trusty; urgency=low + + * First 3.13 upload. + * Release tracker + - LP: #1266852 + + -- Tim Gardner Tue, 07 Jan 2014 09:21:26 -0700 + +linux (3.13.0-0.15) trusty; urgency=low + + [ Tim Gardner ] + + * rebase to v3.13-rc7 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc7 + + -- Tim Gardner Sun, 05 Jan 2014 06:13:33 -0700 + +linux (3.13.0-0.14) trusty; urgency=low + + [ Andy Whitcroft ] + + * rebase to 7a262d2ed9fa42fad8c4f243f8025580b58cf2f6 + + [ Tim Gardner ] + + * Remove ubuntu/dm-raid4-5 in favor of CONFIG_MD_RAID456 + * Update lttng to Version 2.4.0-rc2 + * lttng: Disabled trace_kvm_async_pf_completed + * [Config] CONFIG_IMA=y + - LP: #1244627 + + [ Upstream Kernel Changes ] + + * rebase to 7a262d2ed9fa42fad8c4f243f8025580b58cf2f6 + + -- Tim Gardner Thu, 02 Jan 2014 12:57:13 -0700 + +linux (3.13.0-0.13) trusty; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: aufs3 -- (no-up) aufs3-base.patch + * ubuntu: aufs3 -- (no-up) aufs3-mmap.patch + * ubuntu: aufs3 -- (no-up) aufs3-standalone.patch + * ubuntu: AUFS (no-squash): basic framework and update machinary + * ubuntu: AUFS -- update to 7b136a27b021da9010d8b6c101939dd298e46be7 + * ubuntu: aufs3 -- enable + * ubuntu: aufs3 -- update configs + + -- Andy Whitcroft Thu, 02 Jan 2014 09:41:02 +0000 + +linux (3.13.0-0.12) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc6 + + -- Tim Gardner Tue, 31 Dec 2013 06:16:03 -0700 + +linux (3.13.0-0.11) trusty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: suspicious unlocked ->status reading and writing in ipc/sem.c + * [Config] ppc64el -- initial defconfig based -generic flavour + * [Config] initial defconfig for ppc64el + * [Config] ubuntuise ppc64el config + * [Config] ubuntuise ppc64el config part 2 + * [Config] d-i -- update empty udebs list + * [Config] ppc64el -- split extras package + + [ Anton Blanchard ] + + * SAUCE: KVM: PPC: Book3S HV: Add little-endian guest support + + [ Benjamin Herrenschmidt ] + + * SAUCE: powerpc/powernv: Add calls to support little endian + + [ Cédric Le Goater ] + + * SAUCE: KVM: PPC: Book3S: add helper routine to load guest instructions + * SAUCE: KVM: PPC: Book3S: add helper routines to detect endian order + * SAUCE: KVM: PPC: Book3S: MMIO emulation support for little endian + guests + + [ Paul E. McKenney ] + + * SAUCE: powerpc: Make 64-bit non-VMX copy_tofrom_user() bi-endian + + -- Andy Whitcroft Fri, 27 Dec 2013 16:48:55 +0000 + +linux (3.13.0-0.10) trusty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.13-rc5 + * [Config] updateconfigs following rebase to v3.13-rc5 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc5 + - LP: #1260303 + - LP: #1260303 + - LP: #1260225 + + -- Andy Whitcroft Mon, 23 Dec 2013 12:48:28 +0000 + +linux (3.13.0-0.9) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] d-i -- allow missing firmware + + -- Andy Whitcroft Fri, 20 Dec 2013 17:57:06 +0000 + +linux (3.13.0-0.8) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] annotations -- first pass over entire config + * [Config] drop libunwind8-dev from Build-Depends for ppc64el + + [ Tim Gardner ] + + * [Config] Add arm64 device tree files + - LP: #1262901 + + -- Andy Whitcroft Thu, 19 Dec 2013 18:36:43 +0000 + +linux (3.13.0-0.7) trusty; urgency=low + + [ Rajesh B Prathipati ] + + * SAUCE: powerpc: Make unaligned accesses endian-safe for powerpc + + [ Tim Gardner ] + + * [Config] CONFIG_REGULATOR_S2MPS11=n for FTBS + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc4 + - LP: #1259790 + - LP: #1259437 + - LP: #1259435 + + -- Tim Gardner Fri, 13 Dec 2013 07:56:34 -0700 + +linux (3.13.0-0.6) trusty; urgency=low + + [ Paolo Pisati ] + + * [Config] armhf: arm64: VIRTIO_[BLK|MMIO|NET|CONSOLE|BALLOON]=y + * [Config] i386: amd64: VIRTIO_CONSOLE=y + + [ Tim Gardner ] + + * [Config] CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y + + -- Tim Gardner Sun, 08 Dec 2013 09:22:01 -0700 + +linux (3.13.0-0.5) trusty; urgency=low + + [ Andy Whitcroft ] + + * correct bug listing for v3.13-rc2 rebase + * [Config] ppc64el -- create linux-libc-dev + * [Debian] Improve tools version message + - LP: #1257715 + + [ Serge Hallyn ] + + * SAUCE: fork: Allow CLONE_PARENT after setns(CLONE_NEWPID)] + - LP: #1248590 + * SAUCE: vfs: Fix a regression in mounting proc + + [ Tim Gardner ] + + * [Config] Build-in ohci-pci + - LP: #1244176 + * Rebase to v3.13-rc3 + + [ Upstream Kernel Changes ] + + * Revert "Revert "fork: unify and tighten up CLONE_NEWUSER/CLONE_NEWPID + checks"" + - LP: #1248590 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc3 + - LP: #1256840 + - LP: #1256212 + + -- Tim Gardner Sat, 07 Dec 2013 07:55:39 -0700 + +linux (3.13.0-0.4) trusty; urgency=low + + [ Tim Gardner ] + + * Rebase to v3.13-rc2 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc2 + + -- Tim Gardner Fri, 29 Nov 2013 23:54:05 -0500 + +linux (3.13.0-0.3) trusty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: Fix DocBook FTBS" + + [ Tim Gardner ] + + * [Debian] Re-sign modules after debug objcopy + - LP: #1253155 + * [Config] CONFIG_EXT4_USE_FOR_EXT23=y + + [ Upstream Kernel Changes ] + + * doc: fix generation of device-drivers + * rebase to b975dc3689fc6a3718ad288ce080924f9cb7e176 + + -- Tim Gardner Tue, 26 Nov 2013 12:24:42 -0700 + +linux (3.13.0-0.2) trusty; urgency=low + + [ Tim Gardner ] + + * SAUCE: Fix DocBook FTBS + + -- Tim Gardner Mon, 25 Nov 2013 13:24:15 -0700 + +linux (3.13.0-0.1) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to 7e3528c3660a2e8602abc7858b0994d611f74bc3 + + -- Tim Gardner Tue, 12 Nov 2013 07:28:53 -0700 + +linux (3.13.0-0.0) trusty; urgency=low + + * Major release bump. + + -- Andy Whitcroft Tue, 12 Nov 2013 21:37:52 +0000 + +linux (3.12.0-2.7) trusty; urgency=low + + * Fixed armhf ABI build failure. + * Release tracker + - LP: #1249477 + + -- Tim Gardner Fri, 08 Nov 2013 16:22:45 -0700 + +linux (3.12.0-2.6) trusty; urgency=low + + [ Joseph Salisbury ] + + * SAUCE: tg3: Add support for new 57786 device id. + - LP: #1242610 + + [ Tim Gardner ] + + * [Config] CONFIG_OABI_COMPAT=n + * [Config] add the wandboard to shipped dtb + - LP: #1249421 + * Release tracker + - LP: #1249477 + + -- Tim Gardner Fri, 08 Nov 2013 12:23:18 -0700 + +linux (3.12.0-2.5) trusty; urgency=low + + [ Andy Whitcroft ] + + * rebase to mainline v3.12 + * [Config] updateconfigs following rebase to v3.12 + * postinst -- improve relative symlink detection with missing files + - LP: #1248053 + * postinst -- fix unchanged link detection + * [Config] update configs following addition of apparmor fixes + + [ Anthony Wong ] + + * SAUCE: Work around broken ACPI backlight on Dell Inspiron 5537 + - LP: #1231305 + + [ John Johansen ] + + * SAUCE: (no-up) apparmor: Sync to apparmor 3 - alpha 4 snapshot + * SAUCE: apparmor: fix unix domain sockets to be mediated on connection + - LP: #1208988 + * SAUCE: apparmor: allocate path lookup buffers during init + - LP: #1208988 + + [ Tim Gardner ] + + * [Config] Remove superfluous ubuntu/lttng-modules + + [ Upstream Kernel Changes ] + + * Revert "fork: unify and tighten up CLONE_NEWUSER/CLONE_NEWPID checks" + - LP: #1248590 + + [ Upstream Kernel Changes ] + + * rebase to v3.12 + - LP: #1222850 + + [ Adam Conrad ] + * etc/getabis: Fetch arm64/generic abis as well + + -- Andy Whitcroft Wed, 06 Nov 2013 21:00:21 +0000 + +linux (3.12.0-1.3) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] arm64 -- add arch to the configuration handlers + * [Config] arm64 -- add generic flavour + * [Config] arm64 -- default config + * [Config] arm64 -- fix up various FTBFS config options + * SAUCE: arm64: export __copy_in_user to modules + * [Config] arm64 -- disable ABI/module checks + * [Config] arm64 -- enforcer -- add arm64 to the enforcer + * [Config] arm64 -- enable udebs for arm64 + + [ Colin Watson ] + + * [Config] Clean up various udeb Provides + + [ Paolo Pisati ] + + * [Config] AHCI_IMX=y + * [Config] build imx*-wandboard dtbs + + [ Serge Hallyn ] + + * SAUCE: device_cgroup: remove can_attach + + [ Tim Gardner ] + + * rebase to v3.12-rc7 + * SAUCE: KVM: Fix modprobe failure for kvm_intel/kvm_amd + * Release tracker + - LP: #1245932 + + [ Upstream Kernel Changes ] + + * rebase to v3.12-rc7 + - LP: #1180881 + - LP: #1180881 + - LP: #1217957 + + -- Tim Gardner Sun, 27 Oct 2013 22:08:55 -0600 + +linux (3.12.0-0.2) trusty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: (no-up) scsi: hyper-v storage -- mark as preferring READ + CAPACITY (16) at SPC-2" + * Revert "SAUCE: (no-up) scsi: hyper-v storage -- mark as VPD capable at + SPC-2" + * Revert "SAUCE: (no-up) scsi: add scsi device flag to request READ + CAPACITY (16) be preferred" + * Revert "SAUCE: (no-up) scsi: add scsi device flag to request VPD pages + be used at SPC-2" + * Revert "overlayfs: Update to v19" + * Revert "ubuntu: overlayfs v18 -- -- overlayfs: implement show_options" + * Revert "ubuntu: overlayfs v18 -- -- overlayfs: add statfs support" + * Revert "ubuntu: overlayfs v18 -- -- overlay filesystem" + * Revert "ubuntu: overlayfs v18 -- -- vfs: introduce + clone_private_mount()" + * Revert "ubuntu: overlayfs v18 -- -- vfs: export do_splice_direct() to + modules" + * Revert "ubuntu: overlayfs v18 -- -- overlay: overlay filesystem + documentation" + * ubuntu: overlayfs v20 -- overlayfs: add statfs support + * [Config] fix linux-libc-dev generation for arm64 + * [Config] fix linux-libc-dev generation for x32 + * [Config] add linux-libc-dev generation for ppc64el + + [ Erez Zadok ] + + * ubuntu: overlayfs v20 -- overlayfs: implement show_options + + [ Miklos Szeredi ] + + * ubuntu: overlayfs v20 -- vfs: add i_op->dentry_open() + * ubuntu: overlayfs v20 -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs v20 -- vfs: export __inode_permission() to modules + * ubuntu: overlayfs v20 -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs v20 -- overlay filesystem + * ubuntu: overlayfs v20 -- fs: limit filesystem stacking depth + + [ Neil Brown ] + + * ubuntu: overlayfs v20 -- overlay: overlay filesystem documentation + + [ Paolo Pisati ] + + * [Config] arm: VIRTIO_[BLK|NET|MMIO]=y + + [ Seth Forshee ] + + * SAUCE: (no-up) ACPI: Disable Windows 8 compatibility for some Lenovo + ThinkPads + - LP: #1183856 + + [ Tim Gardner ] + + * [Config] CONFIG_CRYPTO_CRCT10DIF=y, CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m + * rebase to v3.12-rc2 + * updateconfigs + * [Config] disable CONFIG_FB_VESA enforcer check + * [Config] Disable lttng for FTBS + * rebase to v3.12-rc3 + * rebase to v3.12-rc4 + * [Config] CONFIG_ANDROID=n + - LP: #1235161 + * [Config] CONFIG_L2TP_V3=y + - LP: #1235914 + * [Config] CONFIG_USB_OTG=n for all arches + * Release tracker + - LP: #1242811 + + [ Upstream Kernel Changes ] + + * scsi: hyper-v storsvc switch up to SPC-3 + + * rebase to v3.12-rc6 + - LP: #1235977 + - LP: #1235523 + - LP: #1239392 + - LP: #1227491 + + * rebase to v3.12-rc3 + - LP: #1231931 + + * rebase to v3.12-rc2 + - LP: #1213820 + - LP: #1213055 + - LP: #1198030 + + -- Tim Gardner Mon, 23 Sep 2013 07:41:07 -0600 + +linux (3.11.0-8.15) saucy; urgency=low + + [ Tim Gardner ] + + * Release tracker + - LP: #1227969 + + * Update lttng + Updated to git://git.lttng.org/lttng-modules.git 9998f5216f4641a79e158135 + Version 2.3.0+ + + [ Upstream Kernel Changes ] + + * igb: Add additional get_phy_id call for i354 devices + - LP: #1219619 + * igb: Read flow control for i350 from correct EEPROM section + - LP: #1219619 + * timekeeping: Fix HRTICK related deadlock from ntp lock changes + Required for lttng update. + + -- Tim Gardner Thu, 19 Sep 2013 07:41:49 -0600 + +linux (3.11.0-7.14) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] sort out linux-tools naming + - LP: #1205284 + * [Packaging] linux-tools: switch to common generic version helper + + [ Paolo Pisati ] + + * [Config] highbank: ecx1000: CPU_IDLE causes instabilities, disable it + + [ Tim Gardner ] + + * Release tracker + - LP: #1226160 + + [ Tony Lindgren ] + + * SAUCE: ARM: dts: Fix muxing and regulator for wl12xx on the SDIO bus + for pandaboard + + [ Upstream Kernel Changes ] + + * USB: handle LPM errors during device suspend correctly + - LP: #1011415 + * usb: don't check pm qos NO_POWER_OFF flag in usb_port_suspend() + - LP: #1011415 + * usb: Don't fail port power resume on device disconnect. + - LP: #1011415 + + [ Upstream Kernel Changes ] + + * rebase to v3.11.1 + + -- Tim Gardner Wed, 11 Sep 2013 07:30:17 -0600 + +linux (3.11.0-7.13) saucy; urgency=low + + * Release tracker + - LP: #1223545 + + [ Andy Whitcroft ] + + * SAUCE: (no-up) scsi: add scsi device flag to request VPD pages be used at SPC-2 + - LP: #1223499 + * SAUCE: (no-up) scsi: add scsi device flag to request READ CAPACITY (16) be preferred + - LP: #1223499 + * SAUCE: (no-up) scsi: hyper-v storage -- mark as VPD capable at SPC-2 + - LP: #1223499 + * SAUCE: (no-up) scsi: hyper-v storage -- mark as preferring READ CAPACITY (16) at SPC-2 + - LP: #1223499 + + [ Maximiliano Curia ] + + * SAUCE: (no-up) Only let characters through when there are active readers. + - LP: #1208740 + + [ Tim Gardner ] + + * [Debian] getabis: Commit new ABI directory, remove the old + * [Config] CONFIG_EFIVAR_FS=y + - LP: #1223195 + * [Config] CONFIG_EFI_VARS_PSTORE=m, + CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=n + * SAUCE: (no-up) USB: input: cm109.c: Convert high volume dev_err() to dev_err_ratelimited() + - LP: #1222850 + + [ Upstream Kernel Changes ] + + * Intel xhci: refactor EHCI/xHCI port switching + - LP: #1210858 + + -- Tim Gardner Tue, 10 Sep 2013 09:00:19 -0600 + +linux (3.11.0-6.12) saucy; urgency=low + + * Release tracker + - LP: #1222893 + + [ Andy Whitcroft ] + + * Revert "ubuntu: (no-squash) AUFS3 -- aufs3-standalone.patch" + * Revert "ubuntu: (no-squash) AUFS3 -- aufs3-base.patch" + * ubuntu: (no-squash) AUFS3 -- aufs3-base.patch + * ubuntu: (no-squash) AUFS3 -- aufs3-standalone.patch + - LP: #1222407 + + [ Paolo Pisati ] + + * [Config] restore mmc boot on panda + + [ Tyler Hicks ] + + * SAUCE: apparmor: Use shash crypto API interface for profile hashes + - LP: #1216294 + + [ Upstream Kernel Changes ] + + * net: calxedaxgmac: remove NETIF_F_FRAGLIST setting + * net: calxedaxgmac: read correct field in xgmac_desc_get_buf_len + * net: calxedaxgmac: fix race between xgmac_tx_complete and xgmac_tx_err + * net: calxedaxgmac: fix possible skb free before tx complete + * net: calxedaxgmac: update ring buffer tx_head after barriers + * net: calxedaxgmac: fix race with tx queue stop/wake + * net: calxedaxgmac: enable interrupts after napi_enable + * net: calxedaxgmac: fix various errors in xgmac_set_rx_mode + * net: calxedaxgmac: remove some unused statistic counters + * net: calxedaxgmac: fix rx DMA mapping API size mismatches + * net: calxedaxgmac: fix xgmac_xmit DMA mapping error handling + * mfd: rtsx: Read vendor setting from config space + - LP: #1201698 + + -- Tim Gardner Mon, 09 Sep 2013 07:21:06 -0600 + +linux (3.11.0-5.11) saucy; urgency=low + + * Release tracker + - LP: #1221886 + + [ Adam Lee ] + + * SAUCE: Bluetooth: Add support for 04ca:2007 + - LP: #1153448 + * SAUCE: Bluetooth: Add support for 105b:e065 + - LP: #1161261 + + [ Gavin Guo ] + + * SAUCE: Bluetooth: Add support for Broadcom 413c:8143 + - LP: #1166113 + + [ Upstream Kernel Changes ] + + * igb: Reset the link when EEE setting changed + - LP: #1219619 + * igb: Read register for latch_on without return value + - LP: #1219619 + * igb: Added rcu_lock to avoid race + - LP: #1219619 + * igb: don't allow SR-IOV without MSI-X + - LP: #1219619 + * igb: Update MTU so that it is always at least a standard frame size + - LP: #1219619 + * igb: Refactor of init_nvm_params + - LP: #1219619 + * igb: Refactor NVM read functions to accommodate devices with no flash + - LP: #1219619 + * igb: Add device support for flashless SKU of i210 device + - LP: #1219619 + * igb: Fix get_fw_version function for all parts + - LP: #1219619 + * igb: Add macro for size of RETA indirection table + - LP: #1219619 + * igb: Expose RSS indirection table for ethtool + - LP: #1219619 + * igb: Don't look for a PBA in the iNVM when flashless + - LP: #1219619 + * igb: Implementation of 1-sec delay for i210 devices + - LP: #1219619 + * igb: New PHY_ID for i354 device + - LP: #1219619 + * igb: M88E1543 PHY downshift implementation + - LP: #1219619 + * igb: No PHPM support in i354 devices + - LP: #1219619 + * igb: Support to get 2_5G link status for appropriate media type + - LP: #1219619 + * igb: Get speed and duplex for 1G non_copper devices + - LP: #1219619 + * igb: Implementation to report advertised/supported link on i354 devices + - LP: #1219619 + * igb: Update version number + - LP: #1219619 + * Bluetooth: Take proper tty_struct references + - LP: #1189998 + * Bluetooth: Remove the device from the list in the destructor + - LP: #1189998 + * Bluetooth: Move the tty initialization and cleanup out of open/close + - LP: #1189998 + * Bluetooth: Implement .activate, .shutdown and .carrier_raised methods + - LP: #1189998 + * Bluetooth: Fix the reference counting of tty_port + - LP: #1189998 + * Bluetooth: Purge the dlc->tx_queue to avoid circular dependency + - LP: #1189998 + + [ Wen-chien Jesse Sung ] + + * SAUCE: Bluetooth: Support for loading broadcom patchram firmware + - LP: #1065400 + * SAUCE: Bluetooth: Add support for 13d3:3388 and 13d3:3389 + - LP: #1065400 + + -- Tim Gardner Thu, 05 Sep 2013 08:06:17 -0600 + +linux (3.11.0-5.10) saucy; urgency=low + + [ Andy Whitcroft ] + + * Release tracker + - LP: #1220222 + * Revert "[Config] Fix ubuntu directoy Kbuilds" + * Revert "aufs update dropped some Kbuild files" + * Revert "ubuntu: AUFS -- follow rename of loop.h into drivers/block" + * Revert "ubuntu: AUFS -- update to + 8e503d4142c189ed6c47a2177ad2cd058e8d340e" + * Revert "ubuntu: (no-squash) AUFS3 -- aufs3-standalone.patch" + * Revert "ubuntu: (no-squash) AUFS3 -- aufs3-base.patch" + * rebase to v3.11 final + * [Config] clean up ubuntu/Kconfig and ubuntu/Makefile + * ubuntu: AUFS (no-squash): basic framework and update machinary + * ubuntu: (no-squash) AUFS3 -- aufs3-base.patch + * ubuntu: (no-squash) AUFS3 -- aufs3-standalone.patch + * ubuntu: AUFS -- update to 5ac5fe26a90a818218310e208d17688fddb07622 + * ubuntu: (no-squash) AUFS -- enable aufs + * ubuntu: AUFS -- fix remaining d_count references to use accessor + * ubuntu: lttng -- follow rename of pid_ns + * SAUCE: disable stack-protector for ARM compressed bootloader + + [ Paolo Pisati ] + + * [Config] ARM_ATAG_DTB_COMPAT=y + + [ Rob Herring ] + + * [Config] Enable KVM and virtio for armhf generic-lpae + + [ Tim Gardner ] + + * [Config] CONFIG_ARPD=y + * [Config] CONFIG_ZSWAP=y + - LP: #1215379 + + [ Upstream Kernel Changes ] + + * uvcvideo: quirk PROBE_DEF for Dell SP2008WFP monitor. + - LP: #1217957 + * ARM: use phys_addr_t for DMA zone sizes + * ARM: highbank: enable DMA zone for LPAE + * ARM: highbank: select ARCH_HAS_HOLES_MEMORYMODEL + * ARM: highbank: select required errata work-arounds + * DMA: fix AMBA PL08x compilation issue with 64bit DMA address type + * DMA: fix printk warning in AMBA PL08x DMA driver + * ARM: highbank: select ARCH_DMA_ADDR_T_64BIT for LPAE + * ARM: move outer_cache declaration out of ifdef + * ARM: highbank: avoid L2 cache smc calls when PL310 is not present + * ARM: highbank: clean-up some unused includes + * ARM: xen: only set pm function ptrs for Xen guests + + [ Upstream Kernel Changes ] + + * rebase to v3.11 + + -- Andy Whitcroft Tue, 03 Sep 2013 17:08:06 +0100 + +linux (3.11.0-4.9) saucy; urgency=low + + [ Tim Gardner ] + + * rebase to v3.11-rc7 + * Release tracker + - LP: #1216962 + + [ Upstream Kernel Changes ] + + * mwifiex: do not create AP and P2P interfaces upon driver loading + - LP: #1212720 + + -- Tim Gardner Mon, 26 Aug 2013 06:25:35 -0600 + +linux (3.11.0-3.8) saucy; urgency=low + + [ Johannes Berg ] + + * SAUCE: mac80211: ignore (E)CSA in probe response frames + - LP: #1201470 + + -- Tim Gardner Fri, 23 Aug 2013 09:47:36 -0600 + +linux (3.11.0-3.7) saucy; urgency=low + + [ Tim Gardner ] + + * SAUCE: (no-up) hv_vss_daemon -- prevent self-daemonising to allow + upstart to track + * SAUCE: (no-up) hv -- bodge hv_vss_daemon so it can use the local + linux/hyperv.h + * SAUCE: hv: Add vss daemon to Makefile + * [Debian] Add hv_vss_daemon to tools package + - LP: #1213282 + * [Config] Fix ubuntu directoy Kbuilds + - LP: #1181755 + + -- Tim Gardner Tue, 20 Aug 2013 08:34:05 -0600 + +linux (3.11.0-3.6) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] tools: conditionalise x86 and hyper-v tools sensibly + * [Config] tools: enable x86 and hyper-v + + [ John Johansen ] + + * Revert "SAUCE: (no-up) apparmor: Sync to apparmor 3 dev stable + snapshot" + * Revert "SAUCE: (no-up) apparmor: fix apparmor module status for none + root users" + * SAUCE: (no-up) apparmor: Sync to apparmor 3 - alpha 4 snapshot + + [ Joseph Salisbury ] + + * SAUCE: (no-up) intel_ips: blacklist ASUSTek G60JX laptops + - LP: #1210848 + + [ Kamal Mostafa ] + + * [debian] tools: ship 'cpupower' in linux-tools + - LP: #1158668 + * [Config] Build-dep on libpci-dev for cpu tools + - LP: #1158668 + + [ Tim Gardner ] + + * rebase to v3.11-rc6 + * Release tracker + - LP: #1213941 + + -- Tim Gardner Fri, 16 Aug 2013 07:02:07 -0600 + +linux (3.11.0-2.5) saucy; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_PM_DEBUG=y + - LP: #1210539 + * rebase to v3.11-rc5 + * Release tracker + - LP: #1211378 + + -- Tim Gardner Mon, 12 Aug 2013 06:10:39 -0600 + +linux (3.11.0-1.4) saucy; urgency=low + + [ Tim Gardner ] + + * Bump ABI when making changes to the inclusion list lest + you cause conflicts with existing installed kernel + packages. + - LP: #1210331 + + -- Tim Gardner Fri, 09 Aug 2013 03:03:51 +0100 + +linux (3.11.0-0.3) saucy; urgency=low + + [ Tim Gardner ] + + * [Config] Include rbd and kvm in the virtual inclusion list + - LP: #1206961 + * [Config] Removed obsolete inclusion list entries + + -- Tim Gardner Tue, 06 Aug 2013 08:52:14 +0100 + +linux (3.11.0-0.2) saucy; urgency=low + + [ Bruce Allan ] + + * SAUCE: (no-up) e1000e: fix I217/I218 PHY initialization flow + - LP: #1206757 + * SAUCE: (no-up) e1000e: enable support for new device IDs + - LP: #1206757 + + [ John Johansen ] + + * SAUCE: (no-up) apparmor: Sync to apparmor 3 dev stable snapshot + + [ Paolo Pisati ] + + * build vexpress a15 dtb + * [Config] disable Broadcom bcm support (ARCH_BCM) + * [Config] disable Allwinner a1x support (ARCH_SUNXI) + * [Config] disable WonderMedia WM8850 support (ARCH_WM8850) + * [Config] disable Rockchip support (ARCH_ROCKCHIP) + * [Config] disable STMicroelectronics STiH41x SOCs (ARCH_STI) + * [Config] disable TI Keystone, AM43xx and OMAP5 support + * [Config] ARM_APPENDED_DTB=y + + [ Tim Gardner ] + + * rebase to v3.11-rc4 + * overlayfs: Update to v19 + * [Config] Enable overlayfs + * SAUCE: Fix lttng compile errors + + [ Upstream Kernel Changes ] + + * rebase to v3.11-rc4 + - LP: #1163720 + - LP: #1162026 + - LP: #1195636 + - LP: #1195597 + - LP: #1180409 + - LP: #1168430 + + -- Tim Gardner Sun, 04 Aug 2013 03:45:31 -0600 + +linux (3.11.0-0.1) saucy; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to 64ccccf8525fee499625b517c0faadf784c79e93 + - LP: #1163720 + - LP: #1162026 + - LP: #1195636 + - LP: #1195597 + - LP: #1180409 + - LP: #1168430 + + -- Tim Gardner Mon, 08 Jul 2013 08:50:46 -0600 + +linux (3.10.0-2.10) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_INTEL_MEI*=m + - LP: #1196155 + * [Config] CONFIG_DEBUG_INFO=y + + [ Stephen Warren ] + + * [Config] fix Calxeda xgmac module filename + + [ Upstream Kernel Changes ] + + * Revert "serial: 8250_pci: add support for another kind of NetMos + Technology PCI 9835 Multi-I/O Controller" + - LP: #1190967 + * mfd: lpc_ich: Add support for Intel Avoton SoC + - LP: #1196658 + + -- Andy Whitcroft Fri, 05 Jul 2013 18:08:02 +0100 + +linux (3.10.0-2.9) saucy; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.10 + + [ John Johansen ] + + * Revert "SAUCE: (no-up) apparmor: Fix quieting of audit messages for + network mediation" + * Revert "SAUCE: (no-up) apparmor: Fix compile warnings" + * Revert "SAUCE: (no-up) AppArmor: basic networking rules" + * Revert "SAUCE: (no-up) apparmor: Add the ability to mediate mount" + * Revert "SAUCE: (no-up) AppArmor: Add profile introspection file to + interface" + * Revert "SAUCE: (no-up) AppArmor: Disable Add PR_{GET,SET}_NO_NEW_PRIVS + to prevent execve from granting privs" + * SAUCE: (no-up) apparmor: Sync to apparmor 3 dev stable snapshot + + [ Upstream Kernel Changes ] + + * rebase to v3.10 + + -- Andy Whitcroft Mon, 01 Jul 2013 17:42:29 +0100 + +linux (3.10.0-1.8) saucy; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1195717 + + [ Andy Whitcroft ] + + * Revert "ubuntu: overlayfs -- follow change to do_splice_direct + interface" + * Revert "ubuntu: overlayfs -- expose do_splice_direct prototype" + * Revert "SAUCE: ubuntu: overlayfs -- ovl_path_open should not take path + reference" + * Revert "ubuntu: overlayfs -- add FS_ALIAS" + * Revert "ubuntu: overlayfs -- + overlayfs-copy-up-i_uid-i_gid-from-the-underlying-inode" + * Revert "ubuntu: overlayfs -- ovl-switch-to-inode_permission" + * Revert "ubuntu: overlayfs -- vfs-export-inode_permission-to-modules" + * Revert "ubuntu: overlayfs -- overlayfs-create-new-inode-in-ovl_link" + * Revert "ubuntu: overlayfs -- + overlayfs-fix-possible-leak-in-ovl_new_inode" + * Revert "ubuntu: overlayfs -- fs-limit-filesystem-stacking-depth" + * Revert "ubuntu: overlayfs -- overlay-overlay-filesystem-documentation" + * Revert "ubuntu: overlayfs -- overlayfs-implement-show_options" + * Revert "ubuntu: overlayfs -- overlayfs-add-statfs-support" + * Revert "ubuntu: overlayfs -- overlay filesystem" + * Revert "ubuntu: overlayfs -- vfs-introduce-clone_private_mount" + * Revert "ubuntu: overlayfs -- vfs-export-do_splice_direct-to-modules" + * Revert "ubuntu: overlayfs -- vfs-add-i_op-dentry_open" + * ubuntu: overlayfs v18 -- -- overlayfs: add statfs support + + [ Erez Zadok ] + + * ubuntu: overlayfs v18 -- -- overlayfs: implement show_options + + [ Miklos Szeredi ] + + * ubuntu: overlayfs v18 -- -- vfs: add i_op->dentry_open() + * ubuntu: overlayfs v18 -- -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs v18 -- -- vfs: export __inode_permission() to modules + * ubuntu: overlayfs v18 -- -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs v18 -- -- overlay filesystem + * ubuntu: overlayfs v18 -- -- fs: limit filesystem stacking depth + + [ Neil Brown ] + + * ubuntu: overlayfs v18 -- -- overlay: overlay filesystem documentation + + [ Tim Gardner ] + + * [Config] CONFIG_SUNRPC_DEBUG=y + - LP: #1127319 + + -- Andy Whitcroft Fri, 28 Jun 2013 10:26:52 +0100 + +linux (3.10.0-0.7) saucy; urgency=low + + [ Andy Whitcroft ] + + * autopkgtest: switch Depends: to build-essential + + -- Andy Whitcroft Tue, 25 Jun 2013 08:40:55 +0100 + +linux (3.10.0-0.6) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_ARCH_TEGRA to fix FTBFS on armhf + * SAUCE: ubuntu: overlayfs -- ovl_path_open should not take path reference + - LP: #1098378 + * ubuntu: AUFS -- update to 4f14cef47eb7c23eda7198931fbab1040866b6ee + * ubuntu: overlayfs -- expose do_splice_direct prototype + * ubuntu: overlayfs -- follow change to do_splice_direct interface + * [Config] flip CONFIG_NO_HZ_FULL_ALL off as it is overheating machines + - LP: #1192691 + + [ Stefan Bader ] + + * (d-i) Add dm-snapshot to md-modules + - LP: #1191726 + + [ Tim Gardner ] + + * Release tracker + - LP: #1194149 + * [Config] CONFIG_WIL6210=n for armhf + * [Config] d-i: Add calxedaxgmac to nic-modules + - LP: #1192358 + * [debian] Use dh_strip + - LP: #1192759 + * [Config] Enable perf for armhf + * do_tools=false when cross compiling + * [Config] CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y + - LP: #1108082 + + [ Upstream Kernel Changes ] + + * nsp32: switch reset delay to msleep() as it is tooo long + * alx: add a simple AR816x/AR817x device driver + Plucked from linux-next. replaces ubuntu/alx in favor of + 'to be merged' version in 3.11. + * rebase to v3.10-rc7 + - LP: #1189363 + + -- Tim Gardner Sat, 22 Jun 2013 18:10:31 -0600 + +linux (3.10.0-0.5) saucy; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.10-rc6 + * [Config] updateconfigs following rebase to v3.10-rc6 + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc6 + + -- Andy Whitcroft Mon, 17 Jun 2013 11:12:39 +0100 + +linux (3.10.0-0.4) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Config] updateconfigs following rebase to v3.10-rc4 + + -- Andy Whitcroft Mon, 10 Jun 2013 11:42:28 +0100 + +linux (3.10.0-0.3) saucy; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.10-rc5 + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc5 + - LP: #1186170 + + -- Andy Whitcroft Mon, 10 Jun 2013 09:23:31 +0100 + +linux (3.10.0-0.2) saucy; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.10-rc4 + * [Config] CONFIG_BINFMT_SCRIPT=y to fix booting initramfs scripts + * [Config] enable SND_PCM_DEBUG SND_PCM_XRUN_DEBUG + - LP: #1187744 + * [Config] enforce CONFIG_BINFMT_SCRIPT=y + + [ Dave Chiluk ] + + * SAUCE: ncpfs: fix rmdir returns Device or resource busy + - LP: #1035226 + + [ Tim Gardner ] + + * rebase to v3.10-rc3 + * [Config] sparc be gone + * [Config] ia64 be gone + * d-i: block-modules provides nbd-modules + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc4 + * rebase to v3.10-rc3 + + -- Tim Gardner Tue, 28 May 2013 06:16:46 -0600 + +linux (3.10.0-0.1) saucy; urgency=low + + [ Tim Gardner ] + + * UBUNTU: Disabled lttng + * UBUNTU: Disable aufs for FTBS + * UBUNTU: Disabled alx + * UBUNTU: alx: rename NETIF_F_HW_VLAN_* feature flags to NETIF_F_HW_VLAN_CTAG_* + * UBUNTU: rebase to v3.10-rc2 + * UBUNTU: SAUCE: uvcvideo: quirk PROBE_DEF for Alienware X51 OmniVision webcam + + [ Andy Whitcroft ] + + * UBUNTU: [Config] update standards version to 3.9.4.0 + * UBUNTU: [Config] squash duplicate package description (long and short) + * UBUNTU: [Config] fix up Vcs-git: to point to saucy + * UBUNTU: [Config] drop depenancy on util-linux as is Essential + * UBUNTU: [Config] drop redundant Build-Conficts: + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc2 + - LP: #1180351 + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc1 + - LP: #1172151 + - LP: #1089795 + - LP: #1167270 + - LP: #1128840 + + -- Tim Gardner Tue, 14 May 2013 13:41:07 -0600 + +linux (3.10.0-0.0) saucy; urgency=low + + * Dummy + + -- Tim Gardner Thu, 09 May 2013 20:30:40 +0100 + +linux (3.9.0-2.6) saucy; urgency=low + + [ Tim Gardner ] + + * rebase to v3.9.2 + + -- Tim Gardner Thu, 09 May 2013 20:30:40 +0100 + +linux (3.9.0-1.5) saucy; urgency=low + + [ Tim Gardner ] + + * rebase to v3.9.1 + + -- Tim Gardner Wed, 08 May 2013 12:49:45 -0400 + +linux (3.9.0-0.4) saucy; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: overlayfs -- add FS_ALIAS + + [ Tim Gardner ] + + * Added lttng + - LP: #1175784 + + -- Tim Gardner Thu, 02 May 2013 17:17:13 -0400 + +linux (3.9.0-0.3) saucy; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to v3.9 + + -- Tim Gardner Mon, 29 Apr 2013 18:20:00 -0400 + +linux (3.9.0-0.2) saucy; urgency=low + + [ Tim Gardner ] + + * Enable extras packaging for amd64/i386. + Fixes build depenencies with brittany and linux-meta. + + -- Tim Gardner Mon, 29 Apr 2013 05:37:01 -0600 + +linux (3.9.0-0.1) saucy; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc8 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc7 + - LP: #1128840 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc6 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc5 + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc4 + - LP: #1095315 + - LP: #886975 + - LP: #1086921 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc3 + - LP: #1155016 + - LP: #1103594 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc2 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc1 + - LP: #901105 + - LP: #961286 + - LP: #1011792 + - LP: #1128934 + - LP: #886975 + - LP: #978807 + + -- Tim Gardner Wed, 20 Feb 2013 09:12:39 -0700 + +linux (3.8.0-7.14) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_RCU_USER_QS=n + * [Config] CONFIG_MTD_ONENAND_SIM=n + * annotations: add annotations for CONFIG_CC_STACKPROTECTOR + + [ Upstream Kernel Changes ] + + * rebase to v3.8 + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1130111 + * UBUNTU: SAUCE: rt2x00: rt2x00pci_regbusy_read() - only print register access failure once + - LP: #1128840 + + -- Tim Gardner Mon, 18 Feb 2013 09:25:56 -0700 + +linux (3.8.0-6.13) raring; urgency=low + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1125364 + * Add ahci modules to d-i + - LP: #1124415 + + + [ Chris Wilson ] + + * SAUCE: drm/i915: Wait for pending flips to complete before tearing down + the encoders + - LP: #1097315 + + -- Tim Gardner Wed, 13 Feb 2013 12:16:48 -0700 + +linux (3.8.0-6.12) raring; urgency=low + + [Tim Gardner] + + * perf: NO_LIBPERL=1 + * Fix linux-headers dependency + * Release Tracking Bug + - LP: #1124362 + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_AUFS_EXPORT to allow nfs exports + - LP: #1121699 + + [ Daniel Vetter ] + + * SAUCE: drm/i915: write backlight harder + - LP: #954661 + + -- Tim Gardner Wed, 13 Feb 2013 10:25:11 -0700 + +linux (3.8.0-6.11) raring; urgency=low + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1122071 + * rebase to v3.8-rc7 + * Add libaudit-dev as a build dependency + * Build perf with NO_LIBPYTHON=1 to avoid a python build dependency. + + [ Leann Ogasawara ] + + * [Config] Remove CONFIG_SATA_AHCI annotation + + -- Tim Gardner Fri, 08 Feb 2013 07:41:13 -0500 + +linux (3.8.0-5.10) raring; urgency=low + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1118568 + + * Bump ABI to fix install issue with 3.8.0-4.8. + Moving drivers/ata/*ahci* to linux-image caused an + install conflict with linux-image-extras without an + ABI bump. + + [ Jan Beulich ] + + * SAUCE: xen-pciback: rate limit error messages from + xen_pcibk_enable_msi{, x}() + - LP: #1117336 + - CVE-2013-0231 + + -- Tim Gardner Thu, 07 Feb 2013 05:38:12 -0700 + +linux (3.8.0-4.9) raring; urgency=low + + [ Herton Ronaldo Krzesinski ] + + * d-i: Add mellanox ethernet drivers to nic-modules + - LP: #1015339 + + [ Joseph Salisbury ] + + * SAUCE: ACPI: Add DMI entry for Sony VGN-FW41E_H + - LP: #1113547 + + [ Kamal Mostafa ] + + * SAUCE: alx driver import script + + [ Qualcomm Atheros, Inc ] + + * SAUCE: alx: Update to heads/master + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1117673 + + * [debian] Remove dangling symlink from headers package + - LP: #1112442 + * [config] CONFIG_ALX=m + * [Config] Add alx to d-i nic-modules + * [Config] CONFIG_SATA_AHCI=m + - LP: #1056563 + + -- Leann Ogasawara Tue, 05 Feb 2013 05:54:32 -0800 + +linux (3.8.0-4.8) raring; urgency=low + + [ Allen Ibara ] + + * SAUCE: imx6: dts: Add IMX6Q AHCI support + + [ Andy Whitcroft ] + + * rebase to v3.8-rc6 + * updateconfigs following rebase to v3.8-rc6 + + [Leann Ogasawara] + + * Release Tracking Bug + - LP: #1112573 + + [ Paolo Pisati ] + + * SAUCE: imx6: enable sata clk if SATA_AHCI_PLATFORM + * [Config] SERIAL_AMBA_PL011=y (vexpress serial console) + * [Config] MMC_ARMMMCI=y (vexpress mmc) + * [Config] FB_ARMCLCD=y (vexpress framebuffer) + + [ Seth Forshee ] + + * [Config] CONFIG_MAC80211_MESSAGE_TRACING=y + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc6 + - LP: #1107477 + + -- Leann Ogasawara Fri, 01 Feb 2013 07:20:59 -0800 + +linux (3.8.0-3.7) raring; urgency=low + + [ Andy Green ] + + * SAUCE: ARM: OMAP2+: add cpu id register to MAC address helper + * SAUCE: ARM: omap2 add mac address allocation register api + * SAUCE: ARM: omap2 panda register ethernet and wlan for automatic mac + allocation + + [ Leann Ogasawara ] + + * rebase to v3.8-rc5 + * Release Tracking Bug + - LP: #1111486 + + [ Paolo Pisati ] + + * SAUCE: davinci: vpss: compilation fix + * [Config] enable TI OMAP4 support (Pandaboard/ES) + * [Config] OMAP_USB2=y (since TWL6030_USB depends on it) + * [Config] enable Freescale IMX6 support (SabreLite) + * [Config] SERIAL_IMX_CONSOLE=y + * [Config] MMC_*_IMX=y + * [Config] disable USB_SUSPEND + * [Config] USB_MXS_PHY=y + * [Config] USB_CHIPIDEA=y + * SAUCE: DTB: add support for multiple DTBs + * SAUCE: DTB: build imx6q-sabrelite + * SAUCE: DTB: build beaglexm + * SAUCE: DTB: build panda/panda es + * [Config] disable CPU_FREQ + * [Config] PANEL_TFP410=y (video DVI output) + * [Config] SND_OMAP_SOC*=y + * [Config] SND_IMX_SOC*=y + * [Config] I2C_IMX=y + * [Config] SPI_IMX=m + + [ Stefan Bader ] + + * [Config] Move 9p modules into generic package + - LP: #1107658 + + [ Tony Lindgren ] + + * SAUCE: ARM: OMAP2+: Limit omap initcalls to omap only on multiplatform + kernels + * SAUCE: ARM: OMAP2+: Use omap initcalls + * SAUCE: ARM: OMAP: Fix i2c cmdline initcall for multiplatform + * SAUCE: ARM: OMAP: Fix dmaengine init for multiplatform + * SAUCE: ARM: OMAP2+: Add multiplatform debug_ll support + * SAUCE: ARM: OMAP2+: Disable code that currently does not work with + multiplaform + * SAUCE: ARM: OMAP2+: Enable ARCH_MULTIPLATFORM support + * SAUCE: ARM: OMAP2+: Add minimal support for booting vexpress + * SAUCE: ARM: OMAP2+: Remove now obsolete uncompress.h and debug-macro.S + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc5 + - LP: #1096789 + + -- Leann Ogasawara Thu, 31 Jan 2013 06:44:52 -0800 + +linux (3.8.0-2.6) raring; urgency=low + + [ Adam Conrad ] + + * Fix up linux-tools -> SRCPKGNAME-tools rename + + [ Andy Whitcroft ] + + * [Config] re-disable CONFIG_SOUND_OSS + - LP: #1105230 + + [ Arend van Spriel ] + + * SAUCE: brcmsmac: fix tx status processing + + [Leann Ogasawara] + + * Release Tracking Bug + - LP: #1105104 + + -- Leann Ogasawara Fri, 25 Jan 2013 11:56:30 -0800 + +linux (3.8.0-1.5) raring; urgency=low + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1101235 + + [ Dudley Du ] + + * SAUCE: Input: add support for Cypress PS/2 Trackpads + - LP: #978807 + + [ Kamal Mostafa ] + + * SAUCE: Input: increase struct ps2dev cmdbuf[] to 8 bytes + * SAUCE: Input: Cypress PS/2 Trackpad simulated multitouch + * [Config] Add CONFIG_PS2_CYPRESS + + [ Tim Gardner ] + + * rebase to v3.8-rc4 + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc4 + - LP: #1095315 + - LP: #886975 + - LP: #1086921 + + -- Leann Ogasawara Thu, 17 Jan 2013 10:50:22 -0800 + +linux (3.8.0-0.4) raring; urgency=low + + [ Leann Ogasawara ] + + * [Config] Update CONFIG_TOUCHSCREEN_EGALAX build annotation + * [Config] Update CONFIG_IIO build annotation + * [Config] Update CONFIG_TOUCHSCREEN_EETI annotation + * [Config] Remove CONFIG_SPI_DW_MMIO annotation + * [Config] Remove CONFIG_SPI_PL022 annotation + * [Config] Update CONFIG_EZX_PCAP annotation + * [Config] Update CONFIG_SENSORS_AK8975 annotation + * [Config] Disable CONFIG_DRM_MGAG200 + - LP: #1042903 + + -- Leann Ogasawara Mon, 14 Jan 2013 10:01:50 -0800 + +linux (3.8.0-0.3) raring; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: (no-up) trace: add trace events for open(), exec() and + uselib()" + + [ Scott James Remnant ] + + * SAUCE: (no-up) trace: add trace events for open(), exec() and uselib() + (for v3.7+) + - LP: #1085766, #462111 + + -- Andy Whitcroft Fri, 11 Jan 2013 16:57:27 +0000 + +linux (3.8.0-0.2) raring; urgency=low + + [ Tim Gardner ] + + * [packaging] Add macro to selectively disable building perf + * [packaging] Cannot depend on universe package libaudit-dev + + -- Tim Gardner Thu, 10 Jan 2013 12:43:24 -0700 + +linux (3.8.0-0.1) raring; urgency=low + + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc3 + - LP: #1096789 + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc2 + - LP: #1082357 + - LP: #1075882 + + -- Andy Whitcroft Mon, 17 Dec 2012 10:35:09 +0000 + +linux (3.7.0-7.15) raring; urgency=low + + [ Chris J Arges ] + + * SAUCE: add eeprom_bad_csum_allow module parameter + - LP: #1070182 + + [ Leann Ogasawara ] + + * Add ceph to linux-image for virtual instances + - LP: #1063784 + + [ Serge Hallyn ] + + * SAUCE: net: dev_change_net_namespace: send a KOBJ_REMOVED/KOBJ_ADD + + [ Tim Gardner ] + + * [Config] CONFIG_SLUB_DEBUG=y + - LP: #1090308 + + [ Upstream Kernel Changes ] + + * Revert "[SCSI] sd: Implement support for WRITE SAME" + - LP: #1089818 + + -- Leann Ogasawara Wed, 12 Dec 2012 06:50:20 -0800 + +linux (3.7.0-6.14) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] annotations: all new annotations scheme including defaults + * [Configs] apply annotation updates to main configs (top section) + + [ Leann Ogasawara ] + + * Revert "SAUCE: include and for mmc_core arm + build" + * Revert "SAUCE: [arm] fixup __aeabi_uldivmod undefined build error" + * Temporarily disable module check for build + + [ Stefan Bader ] + + * ubuntu: dm-raid45: Adapt to upstream interface changes + * Re-enable build of dm-raid45 + + [ Tim Gardner ] + + * SAUCE: Moved scripts/fw-to-ihex.sh to debian/scripts/misc + * SAUCE: ACPICA: Fix ACPI mutex object allocation memory leak on error + * SAUCE: drm: Fix possible EDID memory allocation oops + * SAUCE: ttm: Fix possible _manager memory allocation oops + * SAUCE: iwlwifi: iwlagn_request_scan: Fix check for priv->scan_request + * SAUCE: i915: intel_set_mode: Reduce stack allocation from 500 bytes to + 2 pointers + + [ Tomas Hozza ] + + * SAUCE: tools: hv: Netlink source address validation allows DoS + - LP: #1084777 + - CVE-2012-5532 + + [ Upstream Kernel Changes ] + + * rebase to v3.7 + + -- Leann Ogasawara Wed, 05 Dec 2012 14:11:12 -0800 + +linux (3.7.0-5.13) raring; urgency=low + + [ Lino Sanfilippo ] + + * SAUCE: inotify, fanotify: replace fsnotify_put_group() with + fsnotify_destroy_group() + - LP: #922906 + * SAUCE: fsnotify: introduce fsnotify_get_group() + - LP: #922906 + * SAUCE: fsnotify: use reference counting for groups + - LP: #922906 + * SAUCE: fsnotify: take groups mark_lock before mark lock + - LP: #922906 + * SAUCE: fanotify: add an extra flag to mark_remove_from_mask that + indicates wheather a mark should be destroyed + - LP: #922906 + * SAUCE: fsnotify: use a mutex instead of a spinlock to protect a groups + mark list + - LP: #922906 + * SAUCE: fsnotify: pass group to fsnotify_destroy_mark() + - LP: #922906 + * SAUCE: fsnotify: introduce locked versions of fsnotify_add_mark() and + fsnotify_remove_mark() + - LP: #922906 + * SAUCE: fsnotify: dont put marks on temporary list when clearing marks + by group + - LP: #922906 + * SAUCE: fsnotify: change locking order + - LP: #922906 + + [ Tim Gardner ] + + * [Config] CONFIG_NFC_LLCP=y + * [Config] get-firmware: Filter new files through fwinfo + * [Config] CONFIG_MTD_NAND_DOCG4=m for all arches + * [Config] CONFIG_DRM_EXYNOS_HDMI=y + * [Config] CONFIG_XEN=y for all arches + * [Config] CONFIG_SND_OMAP_SOC_ZOOM2=m + * [Config] CONFIG_MMC_DW_EXYNOS=m + * [Config] CONFIG_GPIO_ADNP=m + * [Config] find-obsolete-firmware: Use correct path + * rebase to v3.7-rc8 + - LP: #1084640 + + [ Upstream Kernel Changes ] + + * Revert "VFS: don't do protected {sym,hard}links by default" + - LP: #1084192 + + -- Tim Gardner Wed, 28 Nov 2012 16:07:08 +0000 + +linux (3.7.0-4.12) raring; urgency=low + + [ Tim Gardner ] + + * Revert "[Config] Use -j1 for headers_install" + * Revert "[Config] install-arch-headers needs a valid config" + Strayed into the weeds in search of the root cause of the periodic + build failure. + Fixes powerpc FTBS introduced in -4.11. + * [Config] hmake -j1 + The kernel makefile appears to have parallel dependency + problems for the install_headers target. This appears to be root + cause for a periodic build failure on N-way machines. + + -- Leann Ogasawara Tue, 27 Nov 2012 12:33:06 -0800 + +linux (3.7.0-4.11) raring; urgency=low + + [ Tim Gardner ] + + * [Config] Use -j1 for headers_install + Also fixes a powerpc FTBS introduced by + "[Config] install-arch-headers needs a valid config". + + -- Tim Gardner Tue, 27 Nov 2012 10:19:30 -0700 + +linux (3.7.0-4.10) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] add rebuild-test support for autopkgtest + - LP: #1081500 + * [tests] move build tests out of the way + - LP: #1081500 + * [tests] add an autopkgtest rebuild test + - LP: #1081500 + + [ Tim Gardner ] + + * rebase to v3.7-rc7 + * SAUCE: Remove emi62 files duplicated in linux-firmware + * SAUCE: Remove sb16 files duplicated in linux-firmware + * SAUCE: Remove whiteheat files duplicated in linux-firmware + * SAUCE: Remove yamaha files duplicated in linux-firmware + * SAUCE: Remove dsp56k files used only by m68k + * SAUCE: firmware: Remove last vestiges of dabusb + * SAUCE: Remove vicam files duplicated in linux-firmware + * [Config] install-arch-headers needs a valid config + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc7 + - LP: #1076840 + - LP: #1081466 + + -- Leann Ogasawara Wed, 21 Nov 2012 06:07:23 -0800 + +linux (3.7.0-3.9) raring; urgency=low + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_X86_CPUFREQ_NFORCE2=y + - LP: #1079900 + * Add nfsv3 to nfs-modules udeb + + [ Paolo Pisati ] + + * [Config] SND_OMAP_SOC*=y + - LP: #1019321 + + [ Stefan Bader ] + + * SAUCE: (no-up) xen/netfront: handle compound page fragments on transmit + - LP: #1078926 + + [ Tim Gardner ] + + * Revert "SAUCE: SECCOMP: audit: always report seccomp violations" + - LP: #1079469 + * Revert "SAUCE: omap3 clocks .dev_id = NULL" + * rebase to v3.7-rc6 + * SAUCE: script to detect obsolete firmware + * SAUCE: Remove yam files duplicated in linux-firmware + * SAUCE: Remove tehuti files duplicated in linux-firmware + * SAUCE: Remove matrox files duplicated in linux-firmware + * SAUCE: Remove cxgb3 files duplicated in linux-firmware + * SAUCE: Remove r128 files duplicated in linux-firmware + * SAUCE: Remove acenic files duplicated in linux-firmware + * SAUCE: Remove keyspan files duplicated in linux-firmware + * SAUCE: Remove sun files duplicated in linux-firmware + * SAUCE: Remove radeon files duplicated in linux-firmware + * SAUCE: Update bnx2x firmware to 7.8.2.0 + * [Config] generic.inclusion-list: econet has disappeared + + [ Upstream Kernel Changes ] + + * seccomp: forcing auditing of kill condition + - LP: #1079469 + * rebase to v3.7-rc6 + + -- Leann Ogasawara Tue, 20 Nov 2012 12:28:55 -0800 + +linux (3.7.0-2.8) raring; urgency=low + + [ Andy Whitcroft ] + + * Revert "overlayfs: disable until FTBS is fixed" + * Revert "ubuntu: overlayfs" + * Revert "ubuntu: AUFS" + * ubuntu: overlayfs -- overlayfs: add statfs support + * ubuntu: overlayfs -- ovl: switch to __inode_permission() + * ubuntu: overlayfs -- overlayfs: copy up i_uid/i_gid from the underlying + inode + - LP: #944386 + * ubuntu: AUFS (no-squash): basic framework and update machinary + * ubuntu: AUFS (no-squash) -- aufs3-base.patch + * ubuntu: AUFS (no-squash) -- aufs3-standalone.patch + * ubuntu: AUFS: aufs-update -- follow the uapi header changes + * ubuntu: AUFS -- update to f2873474324d0a31af4340554b9715f51331bc7f + * ubuntu: AUFS (no-squash) -- reenable + - LP: #1079193 + + [ Erez Zadok ] + + * ubuntu: overlayfs -- overlayfs: implement show_options + + [ Miklos Szeredi ] + + * ubuntu: overlayfs -- vfs: add i_op->dentry_open() + * ubuntu: overlayfs -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs -- overlay filesystem + * ubuntu: overlayfs -- fs: limit filesystem stacking depth + * ubuntu: overlayfs -- vfs: export __inode_permission() to modules + + [ Neil Brown ] + + * ubuntu: overlayfs -- overlay: overlay filesystem documentation + + [ Robin Dong ] + + * ubuntu: overlayfs -- overlayfs: fix possible leak in ovl_new_inode + * ubuntu: overlayfs -- overlayfs: create new inode in ovl_link + + -- Andy Whitcroft Thu, 15 Nov 2012 13:35:12 +0000 + +linux (3.7.0-1.7) raring; urgency=low + + [ Tim Gardner ] + + * [Config] Drop dependency on libaudit-dev + Its a universe package which causes an FTBS on the builders. + libaudit-dev is not strictly required for the perf tools build. + + -- Tim Gardner Wed, 14 Nov 2012 10:08:13 -0700 + +linux (3.7.0-1.6) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] enforce -- switch CONFIG_NVRAM to more readable form + * [Config] better encode the CONFIG_NVRAM constaint + * enforcer -- fix debugging output + + [ Ben Collins ] + + * [Config] Add custom_override rule to allow for alternate kernel + file/install + * [Config] Use SRCPKGNAME as prefix for indep linux headers package + + [ Tim Gardner ] + + * [Config] Dropped armel + * Drop highbank from ABI fetch list + * [Config] Use dh_prep instead of 'dh_clean -k' + * [Config] Build depend on libaudit-dev, libunwind8-dev for tools + * [Config] Document binary-indep dependency chain + * rebase to v3.7-rc5 + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc5 + + -- Tim Gardner Tue, 13 Nov 2012 07:13:37 -0500 + +linux (3.7.0-0.5) raring; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_AMD_IOMMU_V2=m + - LP: #1071520 + * [Config] CONFIG_MTD_ONENAND_SIM=n for armel + Fixes FTBS + + -- Tim Gardner Thu, 08 Nov 2012 15:45:39 -0500 + +linux (3.7.0-0.4) raring; urgency=low + + [ Ben Collins ] + + * [Config] Update enforce rule for CONFIG_NVRAM to better suit flavours + + [ Tim Gardner ] + + * [Config] do_tools=false for arm + + -- Tim Gardner Thu, 08 Nov 2012 05:39:51 -0700 + +linux (3.7.0-0.3) raring; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_DRM_EXYNOS_HDMI=n for armhf + * [Config] CONFIG_MTD_NAND_DOCG4=n for armel/armhf + * [Config] Drop highbank harder + + -- Tim Gardner Wed, 07 Nov 2012 18:11:45 +0000 + +linux (3.7.0-0.2) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] add fs/udf to linux-image to support DVD/CD formats in virtual + instances + - LP: #1066921 + * [Config] drop highbank builds + + [ Jeremy Kerr ] + + * SAUCE: efivarfs: Implement exclusive access for {get, set}_variable + - LP: #1063061 + + [ Leann Ogasawara ] + + * Reinstate dropped.txt from Ubuntu-3.7.0-0.1-rc1 + + [ Tim Gardner ] + + * [Config] Dropped powerpc/ppc64 in favour of the community kernel + * [Config] CONFIG_MODULE_SIG=y for amd64,i386, and highbank + * rebase to v3.7-rc4 + * SAUCE: MODSIGN: Emit error for incorrectly signed module + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc4 + + -- Tim Gardner Mon, 05 Nov 2012 05:35:41 -0700 + +linux (3.7.0-0.1) raring; urgency=low + + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc3 + - LP: #1056078 + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc2 + - LP: #1060729 + - LP: #1059523 + - LP: #1006690 + - LP: #1049623 + - LP: #1046512 + - LP: #1052499 + - LP: #1037642 + - LP: #559939 + - LP: #1052460 + - LP: #939161 + - LP: #1046734 + + -- Tim Gardner Tue, 02 Oct 2012 08:13:07 -0600 + +linux (3.6.0-0.1) UNRELEASED; urgency=low + + + [ Upstream Kernel Changes ] + + * rebase to v3.6 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc7 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc6 + - LP: #1000424 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc5 + - LP: #1040077 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc4 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc3 + - LP: #1038651 + - LP: #1034779 + + -- Leann Ogasawara Tue, 24 Jul 2012 06:37:09 -0700 + +linux (3.5.0-6.6) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION enable IPv6 + experimental features + * SAUCE: highbank -- export clock functions for modules + * [Config] highbank -- reenable CONFIG_TOUCHSCREEN_W90X900 + * [Config] highbank -- renenable CONFIG_SERIO_AMBAKMI + * [Config] highbank -- reenable CONFIG_RFKILL_GPIO + * [Config] highbank -- reenable CONFIG_MMC_SDHCI_PXAV3 + * [Config] highbank -- reenable CONFIG_MMC_SDHCI_PXAV3 + * [Config] highbank -- reenable CONFIG_KEYBOARD_SAMSUNG + * [Config] highbank -- reenable CONFIG_FB_ARMCLCD + * [Config] highbank -- reenable CONFIG_DW_DMAC + * [Config] highbank -- reenable CONFIG_USB_R8A66597_HCD + * [Config] highbank -- reenable CONFIG_USB_MV_UDC + * [Config] highbank -- reenable CONFIG_USB_DWC3 + * [Config] highbank -- reenable CONFIG_SATA_MV + * [Config] highbank -- reenable CONFIG_PATA_ARASAN_CF + * [Config] highbank -- CONFIG_CAN_C_CAN_PLATFORM + * [Config] highbank -- reenable CONFIG_MMC_ARMMMCI + * [Config] highbank -- reenable CONFIG_SERIAL_AMBA_PL010 + * [Config] highbank -- reenable CONFIG_ATMEL_PWM + * [Config] highbank -- enable CONFIG_CHECKPOINT_RESTORE + * [Config] highbank -- enable CONFIG_EXPERT + * [Config] highbank -- enable CONFIG_CHECKPOINT_RESTORE + * [Config] enable CONFIG_USB_DYNAMIC_MINORS + * [Config] enable CONFIG_USB_EHCI_TT_NEWSCHED + * [Config] enable CONFIG_USB_ETH_EEM + * [Config] enable CONFIG_USB_HCD_BCMA/CONFIG_USB_HCD_SSB + * [Config] disable CONFIG_USB_M66592 + * [Config] enable CONFIG_USB_NET2272 + * [Config] enable CONFIG_USB_R8A66597 + * [Config] annotate: CONFIG_USB_OMAP not required for our h/w + * [Config] set CONFIG_USB_MUSB_HDRC=m for omap + * [Config] annotate: CONFIG_USB_G_MULTI fix rule + * [Config] CONFIG_USB_GPIO_VBUS=m for OMAP + * [Config] Enable CONFIG_DRM_AST/_CIRRUS_QEMU/_MGAG200 + * [Config] sync configuration armhf omap -> armel omap + * [Config] annotate: CONFIG_IIO triggers build failures on OMAP4 + * [Config] disable CONFIG_OMAP_IOVMM is deprecated + + [ Bryan Wu ] + + * [Config] change default IO scheduler from CFQ to Deadline + + [ Leann Ogasawara ] + + * Revert "[Config] Temporarily disable CONFIG_MV643XX_ETH on powerpc" + * [Config] Disable CONFIG_MOUSE_INPORT + + [ Tim Gardner ] + + * SAUCE: firmware: Update bnx2x to current firmware version 7.2.51 + * [Config] Add bnx2x firmware to nic-modules udeb + * SAUCE: Add script to convert firmware to ihex format + * SAUCE: firmware: Upgrade bnx2 to current versions + * [Config] Add tigon firmware to nic-modules udeb + * [Config] CONFIG_EARLY_PRINTK_DBGP=y + - LP: #1026761 + * SAUCE: Remove redundant cis firmware + * SAUCE: Remove redundant emi26 firmware + * SAUCE: Remove redundant ttusb-budget firmware + * SAUCE: Remove redundant sun/cassini firmware + * SAUCE: Remove redundant ositech/Xilinx7OD firmware + * SAUCE: Remove redundant 3com/typhoon.bin firmware + * SAUCE: Remove redundant yamaha/ds1 firmware + * SAUCE: Remove redundant keyspan_pda firmware + * rebase to v3.5 + + [ Upstream Kernel Changes ] + + * rebase to v3.5 + - LP: #1027828 + + -- Leann Ogasawara Mon, 23 Jul 2012 05:57:04 -0700 + +linux (3.5.0-5.5) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Config] annotations: initial import of configuration annotations + + [ Bryan Wu ] + + * [Config] enforcer -- add CONFIG_I2C_DESIGNWARE_PLATFORM enforce checker + + [ Leann Ogasawara ] + + * Rebase to v3.5-rc7 + + [ Manoj Iyer ] + + * SAUCE: Bluetooth: btusb: Add vendor specific ID (0a5c:21f4) BCM20702A0 + - LP: #1010281 + + [ Tim Gardner ] + + * [Config] enable CONFIG_I2C_HELPER_AUTO for all flavours as policy + expects + * [Config] CONFIG_I2O_CONFIG_OLD_IOCTL=n + * [Config] CONFIG_BRIDGE_EBT_ULOG=n + * [Config] CONFIG_IP_NF_QUEUE=n + * [Config] CONFIG_MTD_DOC2000=n + * [Config] CONFIG_PRINT_QUOTA_WARNING=n + * [Config] CONFIG_PRISM54=n + * [Config] CONFIG_SCx200_I2C=n + * [Config] CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc7 + + -- Leann Ogasawara Mon, 16 Jul 2012 15:38:41 -0700 + +linux (3.5.0-4.4) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] getabis should be extracting all packages + - LP: #1021174 + * [Config] getabis -- series uses linux-image-extra + - LP: #1021174 + * rebase to v3.5-rc6 + + [ Bryan Wu ] + + * [Config] built-in CONFIG_MICREL_PHY as other PHY drivers for all + flavours + * [Config] sync CONFIG_MOUSE_PS2_ config for all flavours + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_RT2800USB_RT35XX and CONFIG_RT2800USB_RT53XX + - LP: #1019561 + + [ Paolo Pisati ] + + * [Config] SND_OMAP_SOC, SND_OMAP_SOC_MCBSP and SND_OMAP_SOC_OMAP3_BEAGLE =y + - LP: #1019321 + + [ Stefan Bader ] + + * SAUCE: (pre-up) net: dont use __netdev_alloc_skb for bounce buffer + - LP: #1018456 + * (config) Disable ACPI_PROCFS_POWER + + [ Tim Gardner ] + + * [Config] CONFIG_ACPI_BGRT=y + * Extract firmware module info during getabi + - LP: #1021174 + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc6 + + -- Leann Ogasawara Mon, 09 Jul 2012 08:50:20 -0700 + +linux (3.5.0-3.3) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_MEMTEST=y + - LP: #1004535 + * [Config] config-check: add support for a cut operation + * [Config] enforcer -- switch to cut where appropriate + + [ Leann Ogasawara ] + + * Rebase to v3.5-rc5 + * [Config] Updateconfigs after rebase to v3.5-rc5 + + [ Luis Henriques ] + + * SAUCE: ocfs2: Fix NULL pointer dereferrence in + __ocfs2_change_file_space + - LP: #1006012 + + [ Seth Forshee ] + + * SAUCE: (drop after 3.5) drm/i915: ignore pipe select bit when checking + for LVDS register initialization + - LP: #1012800 + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc5 + - LP: #1013183 + - LP: #1017017 + - LP: #884652 + + -- Leann Ogasawara Mon, 02 Jul 2012 06:41:58 -0700 + +linux (3.5.0-2.2) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.5-rc4 + + [ Arend van Spriel ] + + * SAUCE: (drop after 3.5) brcmsmac: fix NULL pointer crash in + brcms_c_regd_init() + - LP: #950320 + + [ Bryan Wu ] + + * [Config] Sync CONFIG_CGROUP_MEM_RES_CTLR_SWAP for ARM + + [ Chris J Arges ] + + * PACKAGING: add .gnu_debuglink sections to .ko files + - LP: #669641 + + [ Leann Ogasawara ] + + * d-i: Add hid-generic to input-modules + - LP: #1017879 + + [ Ming Lei ] + + * SAUCE: Revert "mmc: omap_hsmmc: Enable Auto CMD12" + - LP: #1017717, #225 + + [ Paolo Pisati ] + + * SAUCE: Revert "Fix OMAP EHCI suspend/resume failure (i693)" + - LP: #1017718 + * [Config] Disable generic USB_EHCI_HCD_PLATFORM on omap3 + + [ Seth Forshee ] + + * SAUCE: (drop after 3.5) brcm80211: smac: don't set up tx power limits + during initialization + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: always set channel specified + by mac80211 + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: remove unused code for 40MHz + channels + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: clean up channel.c + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: inform mac80211 of the X2 + regulatory domain + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: enable/disable radio on + regulatory updates + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: use mac80211 channel data for + tx power limits + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: don't validate channels + against internal regulatory data + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: use current regulatory domain + when checking whether OFDM is allowed + - LP: #950320 + + [ Tim Gardner ] + + * [Config] Enable CONFIG_CGROUPS for highbank + - LP: #1014692 + * [Config] FB_OMAP*=y and PANEL_TFP410=y + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc4 + + -- Leann Ogasawara Tue, 26 Jun 2012 06:21:05 -0700 + +linux (3.5.0-1.1) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Config] highbank -- enable CONFIG_RFKILL=y and CONFIG_CAN=m + + [ Leann Ogasawara ] + + * Rebase to v3.5-rc1 + * [Config] Remove USB_DEVICEFS from the config enforcer + * [Config] Updateconfigs after rebase to v3.5-rc1 + * [Config] Temporarily disable CONFIG_MACH_NOKIA_RX51 on arm + * [Config] Temporarily disable CONFIG_TOUCHSCREEN_EETI on arm + * [Config] Temporarily disable CONFIG_TOUCHSCREEN_EGALAX on arm + * [Config] Temporarily disable CONFIG_EZX_PCAP on arm + * [Config] Temporarily disable CONFIG_LIS3L02DQ on arm + * [Config] Temporarily disable CONFIG_TI_CPSW on arm + * [Config] Temporarily disable CONFIG_GPIO_EM on arm + * [Config] Temporarily disable CONFIG_SERIAL_8250_EM on armhf + * [Config] Temporarily disable CONFIG_STMMAC_ETH on armhf + * [Config] Temporarily disable CONFIG_HW_RANDOM_ATMEL on armhf + * Rebase to v3.5-rc2 + * [Config] Updateconfigs after rebase to v3.5-rc2 + * [Config] Temporarily disable CONFIG_MV643XX_ETH on powerpc + * Rebase to v3.5-rc3 + * [Config] Updateconfigs after rebase to v3.5-rc3 + + [ Paul Mundt ] + + * SAUCE: fix bug.h's inclusion of kernel.h + + [ Stefan Bader ] + + * SAUCE: Fix compile failures of dm-raid45 + * [Config] Enable dm-raid45 + * Move dependency on crda to extra package + - LP: #657901 + * SAUCE: Mask CR4 writes on older Xen hypervisors + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc3 + - LP: #993162 + - LP: #925577 + * rebase to v3.5-rc2 + * rebase to v3.5-rc1 + - LP: #955892 + - LP: #978038 + - LP: #987371 + - LP: #929545 + - LP: #942316 + - LP: #903853 + + -- Leann Ogasawara Fri, 08 Jun 2012 14:28:46 -0700 + +linux (3.4.0-5.11) quantal-proposed; urgency=low + + [ Leann Ogasawara ] + + * [Config] Disable CONFIG_ARM_LPAE + - LP: #1009061 + + [ Oleksij Rempel ] + + * SAUCE: b43: do not call ieee80211_unregister_hw if we are not registred + - LP: #1008905 + + [ Paolo Pisati ] + + * [Config] omap3: MFD_OMAP_USB_HOST is usb host in omap2+. + - LP: #1009061 + + -- Leann Ogasawara Tue, 05 Jun 2012 08:06:28 -0700 + +linux (3.4.0-4.10) quantal; urgency=low + + [ Leann Ogasawara ] + + * Temporarily disable ABI and module check + + -- Leann Ogasawara Mon, 04 Jun 2012 20:27:31 -0700 + +linux (3.4.0-4.9) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] fix config split to avoid the shared config + * [Config] updateconfigs following split config fix + * [Config] linux-image-extras needs full postinst + * [Config] CONFIG_BLK_DEV_NVME commonise across architectures + * [Config] CONFIG_HP_WATCHDOG enable as module + * [Config] CONFIG_PDC_ADMA is not boot essential + * [Config] CONFIG_XEN_ACPI_PROCESSOR should be enabled on x86 + * [Config] CONFIG_VT6655/CONFIG_VT6656=m + * [Config] CONFIG_TRANZPORT=m commonise + * [Config] CONFIG_R3964=m commonise + * [Config] CONFIG_SCSI_DH=m commonise + * [Config] CONFIG_SCSI_IBMVSCSIS=m commonise + * [Config] CONFIG_AMD_PHY=y phys are not autoloadable + * [Config] CONFIG_SCSI_QLA_ISCSI=m commonise + * [Config] CONFIG_SCSI_SPI_ATTR=m commonise + * [Config] CONFIG_USB_SN9C102 is deprecated disable + * [Config] CONFIG_USB_SI470X=m commonise + * [Config] CONFIG_USB_ET61X251=m commonise + * [Config] CONFIG_RTS_PSTOR=m commonise + * [Config] CONFIG_SCANLOG=m commonise + * [Config] CONFIG_SCSI_SYM53C8XX_2=m commonise + * [Config] CONFIG_SM_FTL=m commonise + * [Config] CONFIG_SOLO6X10=m commonise + * [Config] CONFIG_SND_PCM_OSS=n using pulseaudio emulation instead + * [Config] CONFIG_SPI_DESIGNWARE=m commonise + * [Config] CONFIG_SPI_SPIDEV=m commonise + * [Config] CONFIG_TABLET_USB_WACOM=m commonise + * [Config] CONFIG_TPS65010=m commonise + * [Config] CONFIG_STE10XP=y commonise + * [Config] CONFIG_X25_ASY=m commonise + * [Config] CONFIG_USB_MON=m commonise + * [Config] CONFIG_VME_BUS=m commonise + * [Config] CONFIG_W35UND=m commonise + * [Config] -CONFIG_TCG_TPM=y commonise + * [Config] highbank -- commonise filesystems + * [Config] highbank -- commonise subsystems + * [Config] highbank -- commonise network protocols + * [Config] highbank -- commonise input drivers + * [Config] highbank -- commonise CRYPTO options + * [Config] highbank -- commonise HID options + * [Config] highbank -- commonise sensors options + * [Config] highbank -- commonise EXPORTFS/FHANDLE + * [Config] highbank -- commonise CONFIG_CRYPTO_LZO + * [Config] highbank -- commonise ENCRYPTED_KEYS + * [Config] highbank -- commonise CONFIG_ATALK + * [Config] highbank -- commonise INET/INET6 + * [Config] highbank -- commonise NLS + * [Config] highbank -- commonise BLK/CHR + * [Config] highbank -- CONFIG_EXT2_FS=y boot essential on highbank + * [Config] highbank -- commonise INET/INET6 part 2 + * [Config] highbank -- commonise PHY settings + * [Config] highbank -- commonise CRC settings + * [Config] highbank -- commonise BINFMT settings + * [Config] highbank -- commonise DM settings + * [Config] highbank -- commonise RTC_DRV settings + * [Config] highbank -- commonise KEYBOARD/MOUSE settings + * [Config] highbank -- commonise USB settings + * [Config] highbank -- commonise GPIO settings + * [Config] highbank -- commonise I2C settings + * [Config] highbank -- commonise numerous subsystem selectors + * [Config] highbank -- commonise A-C modules missmatches + * [Config] highbank -- commonise D-F modules missmatches + * [Config] CONFIG_AUDIT_LOGINUID_IMMUTABLE incompatible with upstart + * [Config] highbank -- commonise G-I modules missmatches + * [Config] highbank -- commonise J-L modules missmatches + * [Config] highbank -- commonise M modules missmatches + * [Config] highbank -- commonise N-P modules missmatches + * [Config] highbank -- commonise Q-R modules missmatches + * [Config] highbank -- commonise S modules missmatches -- part 1 + * [Config] highbank -- commonise S modules missmatches -- part 2 + * [Config] highbank -- commonise T modules missmatches + * [Config] highbank -- commonise U-Z modules missmatches + + [ Ike Panhc ] + + * [Config] add highbank flavour + - LP: #1000831 + + [ Mark Langsdorf ] + + * SAUCE: arm highbank: add support for pl320-ipc driver + - LP: #1000831 + + [ Rob Herring ] + + * SAUCE: input: add a key driver for highbank + - LP: #1000831 + * SAUCE: ARM: highbank: Add smc calls to enable/disable the L2 + - LP: #1000831 + * SAUCE: force DMA buffers to non-bufferable on highbank + - LP: #1000831 + * SAUCE: net: calxedaxgmac: fix net timeout recovery + - LP: #1000831 + + [ Tim Gardner ] + + * [Config] CONFIG_IWLWIFI_EXPERIMENTAL_MFP=n + * [Config] CONFIG_PCI_REALLOC_ENABLE_AUTO=y + * [Config] CONFIG_CIFS_EXPERIMENTAL has disappeared + * [Config] Homogenize CIFS configs across all arches + * [Config] armhf should not be skipabi or skipmodules + - LP: #1006913 + + -- Leann Ogasawara Mon, 04 Jun 2012 05:52:49 -0700 + +linux (3.4.0-3.8) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] include include/generated/compile.h + - LP: #942569 + * [Config] fix up postinst to ensure we know which error is which + - LP: #1002388 + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: async_populate_rootfs: fix build warnings + - LP: #1003417 + + [ John Johansen ] + + * Revert "SAUCE: AppArmor: Add the ability to mediate mount" + * SAUCE: apparmor: Add the ability to mediate mount + * SAUCE: AppArmor: basic networking rules + * SAUCE: apparmor: fix profile lookup for unconfined + - LP: #978038, #987371 + * SAUCE: apparmor: fix long path failure due to disconnected path + - LP: #955892 + + [ Mario Limonciello ] + + * SAUCE: dell-laptop: rfkill blacklist Dell XPS 13z, 15 + - LP: #901410 + + [ Stefan Bader ] + + * (config) Built-in xen-acpi-processor + + [ Tim Gardner ] + + * [Config] CONFIG_NET_DSA=m + - LP: #1004148 + * [Config] Ensure CONFIG_XEN_ACPI_PROCESSOR=y for amd64 + + -- Leann Ogasawara Fri, 25 May 2012 11:38:33 -0700 + +linux (3.4.0-3.7) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] drop the virtual flavour in favour of a split generic et al + * [Config] enforcer -- drop IDLE enforcement + * [Config] enable CONFIG_SCSI_VIRTIO=m for amd64 + * [Config] updateconfigs following removal of -virtual + + [ Leann Ogasawara ] + + * Rebase to v3.4 + + [ Seth Forshee ] + + * [Config] disable CONFIG_B43_BCMA_EXTRA + + [ Tim Gardner ] + + * [Config] Check for extras when building udebs + * [Config] Collapsed generic-pae into generic [i386] + + [ Upstream Kernel Changes ] + + * rebase to v3.4 + + -- Leann Ogasawara Mon, 21 May 2012 07:23:47 -0700 + +linux (3.4.0-2.6) quantal; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: overlayfs -- overlayfs: update touch_atime() usage" + * Revert "ubuntu: overlayfs -- overlayfs: switch from d_alloc_root() to + d_make_root()" + * Revert "ubuntu: overlayfs -- overlayfs: follow header cleanup" + * Revert "ubuntu: overlayfs -- overlayfs: apply device cgroup and + security permissions to overlay files" + * Revert "ubuntu: overlayfs -- fs: limit filesystem stacking depth" + * Revert "ubuntu: overlayfs -- overlay: overlay filesystem documentation" + * Revert "ubuntu: overlayfs -- overlayfs: implement show_options" + * Revert "ubuntu: overlayfs -- overlayfs: add statfs support" + * Revert "ubuntu: overlayfs -- overlay filesystem" + * Revert "ubuntu: overlayfs -- vfs: introduce clone_private_mount()" + * Revert "ubuntu: overlayfs -- vfs: export do_splice_direct() to modules" + * Revert "ubuntu: overlayfs -- vfs: add i_op->open()" + * Revert "ubuntu: overlayfs -- vfs: pass struct path to __dentry_open()" + * ubuntu: overlayfs -- overlayfs: add statfs support + * ubuntu: overlayfs -- inode_only_permission: export inode level + permissions checks + * ubuntu: overlayfs -- overlayfs: switch to use inode_only_permissions + + [ Erez Zadok ] + + * ubuntu: overlayfs -- overlayfs: implement show_options + + [ Miklos Szeredi ] + + * ubuntu: overlayfs -- vfs: pass struct path to __dentry_open() + * ubuntu: overlayfs -- vfs: add i_op->open() + * ubuntu: overlayfs -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs -- overlay filesystem + * ubuntu: overlayfs -- fs: limit filesystem stacking depth + + [ Neil Brown ] + + * ubuntu: overlayfs -- overlay: overlay filesystem documentation + + [ Robin Dong ] + + * ubuntu: overlayfs -- overlayfs: fix possible leak in ovl_new_inode + * ubuntu: overlayfs -- overlayfs: create new inode in ovl_link + + [ Tim Gardner ] + + * [Config] perarch and indep tools builds need separate build directories + * Prevent upgrading a non-PAE CPU + * perf is not parallel build safe + + -- Leann Ogasawara Wed, 16 May 2012 08:43:18 -0700 + +linux (3.4.0-2.5) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] perarch and indep tools builds need separate build directories + + [ Tim Gardner ] + + * Prevent upgrading a non-PAE CPU + * [Config] build debug + * [Config] perf tools are not parallel build safe + + -- Leann Ogasawara Tue, 15 May 2012 11:37:53 -0700 + +linux (3.4.0-2.4) quantal; urgency=low + + [ Leann Ogasawara ] + + * Revert "SAUCE: fsam7400: use UMH_WAIT_PROC consistently" + * Revert "ubuntu: fsam7400 select CHECK_SIGNATURE and depend on X86" + * Revert "ubuntu: fsam7400: Depend on CHECK_SIGNATURE" + * Revert "ubuntu: fsam7400 -- Cleanup Makefile" + * Revert "ubuntu: fsam7400 -- kill switch for Fujitsu Siemens Amilo M + 7400" + * Revert "ubuntu: omnibook: fix source file newline" + * Revert "ubuntu: omnibook -- update BOM" + * Revert "SAUCE: Make CONFIG_{OMNIBOOK, AVERATEC_5100P, PACKARDBELL_E5} + depend on X86" + * Revert "ubuntu: omnibook -- Added missing BOM file" + * Revert "ubuntu: omnibook -- support Toshiba (HP) netbooks" + * Revert "ubuntu: nx-emu - i386: mmap randomization for executable + mappings" + * Revert "SAUCE: disable_nx should not be in __cpuinitdata section for + X86_32" + * Revert "ubuntu: nx-emu - i386: NX emulation" + * Revert "ubuntu: rfkill drivers -- version 1.3" + * Temporarily disable module check + * [Config] Remove CONFIG_FSAM7400 + * [Config] Remove CONFIG_OMNIBOOK + * [Config] Update configs + * Rebase to v3.4-rc7 + * SAUCE: genirq: export handle_edge_irq() and irq_to_desc() + + [ Tim Gardner ] + + * Updated generic-pae description + * Rebase to v3.4-rc6 + * install-tools depends on build targets + + [ Upstream Kernel Changes ] + + * kconfig: in debug mode some 0 length message prints occur + * rebase to v3.4-rc7 + * rebase to v3.3-rc6 + + -- Leann Ogasawara Mon, 14 May 2012 08:22:56 -0700 + +linux (3.4.0-1.3) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] control.stub is an intermediate product not a dependancy + - LP: #992414 + + [ Leann Ogasawara ] + + * remove i386 generic from getabis + + [ Upstream Kernel Changes ] + + * (pre-stable) b43: only reload config after successful initialization + - LP: #950295 + + -- Leann Ogasawara Wed, 02 May 2012 09:48:14 -0700 + +linux (3.4.0-1.2) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] add build depends for flex, bison and pkg-config + + -- Andy Whitcroft Tue, 01 May 2012 13:15:41 +0100 + +linux (3.4.0-1.1) quantal; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: overlayfs -- overlayfs: apply device cgroup and security + permissions to overlay files + - LP: #915941, #918212 + - CVE-2012-0055 + + [ Leann Ogasawara ] + + * Open Q + * Rebase to v3.4-rc5 + * [Config] Temporarily disable CONFIG_TOUCHSCREEN_EETI on arm + * [Config] Temporarily disable CONFIG_TOUCHSCREEN_EGALAX on arm + * [Config] Temporarily disable CONFIG_EZX_PCAP on arm + * [Config] Temporarily disable CONFIG_MFD_OMAP_USB_HOST on arm + * [Config] Temporarily disable CONFIG_LIS3L02DQ on arm + * [Config] Temporarily disable CONFIG_USB_EHCI_HCD_PLATFORM on arm + * [Config] Temporarily disable CONFIG_TI_CPSW on arm + * [Config] Temporarily disable CONFIG_AX88796 on arm + + [ Upstream Kernel Changes ] + + * vfs: pass struct path to __dentry_open() + * vfs: add i_op->open() + * vfs: export do_splice_direct() to modules + * vfs: introduce clone_private_mount() + * overlay filesystem + * overlayfs: add statfs support + * overlayfs: implement show_options + * overlay: overlay filesystem documentation + * fs: limit filesystem stacking depth + * overlayfs: follow header cleanup + * overlayfs: switch from d_alloc_root() to d_make_root() + * overlayfs: update touch_atime() usage + * rebase to v3.4-rc5 + - LP: #950490 + * rebase to v3.4-rc4 + * rebase to v3.4-rc3 + * rebase to v3.4-rc2 + * rebase to v3.4-rc1 + * rebase to v3.3 + * rebase to v3.3-rc7 + * rebase to v3.3-rc6 + * rebase to v3.3-rc5 + * rebase to v3.3-rc4 + - LP: #900802 + - LP: #930842 + * rebase to v3.3-rc3 + - LP: #924320 + - LP: #923316 + - LP: #923409 + - LP: #918254 + * rebase to v3.3-rc2 + * rebase to v3.3-rc1 + - LP: #795823 + - LP: #909419 + - LP: #910792 + - LP: #878701 + - LP: #724831 + + -- Leann Ogasawara Wed, 25 Jan 2012 06:50:04 -0800 + +linux (3.2.0-10.18) precise; urgency=low + + [ Tim Gardner ] + + * SAUCE: ecryptfs: Print inode on metadata error + + [ Upstream Kernel Changes ] + + * Revert "proc: enable writing to /proc/pid/mem" + - LP: #919115 + - CVE-2012-0056 + * (pre-stable) ALSA: HDA: Use LPIB position fix for Macbook Pro 7, 1 + - LP: #909419 + + -- Andy Whitcroft Tue, 24 Jan 2012 10:15:12 +0000 + +linux (3.2.0-10.17) precise; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: overlayfs -- fs: limit filesystem stacking depth" + * Revert "SAUCE: overlayfs -- overlay: overlay filesystem documentation" + * Revert "SAUCE: overlayfs -- overlayfs: implement show_options" + * Revert "SAUCE: overlayfs -- overlayfs: add statfs support" + * Revert "SAUCE: overlayfs -- overlay filesystem" + * Revert "SAUCE: overlayfs -- vfs: introduce clone_private_mount()" + * Revert "SAUCE: overlayfs -- vfs: export do_splice_direct() to modules" + * Revert "SAUCE: overlayfs -- vfs: add i_op->open()" + * ensure debian/ is not excluded from git by default + * add new scripting to handle buglinks in rebases + * ubuntu: overlayfs -- overlayfs: add statfs support + * ubuntu: overlayfs -- overlayfs: apply device cgroup and security + permissions to overlay files + - LP: #915941, #918212 + - CVE-2012-0055 + + [ Erez Zadok ] + + * ubuntu: overlayfs -- overlayfs: implement show_options + + [ Leann Ogasawara ] + + * Revert "SAUCE: dmar: disable if ricoh multifunction detected" + * [Config] Disable CONFIG_INTEL_IOMMU_DEFAULT_ON + - LP: #907377, #911236 + * [Config] Enable CONFIG_IRQ_REMAP + + [ Miklos Szeredi ] + + * ubuntu: overlayfs -- vfs: pass struct path to __dentry_open() + * ubuntu: overlayfs -- vfs: add i_op->open() + * ubuntu: overlayfs -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs -- overlay filesystem + * ubuntu: overlayfs -- fs: limit filesystem stacking depth + + [ Neil Brown ] + + * ubuntu: overlayfs -- overlay: overlay filesystem documentation + + [ Upstream Kernel Changes ] + + * (pre-stable) x86/PCI: amd: factor out MMCONFIG discovery + - LP: #647043 + * (pre-stable) PNP: work around Dell 1536/1546 BIOS MMCONFIG bug that + breaks USB + - LP: #647043 + + -- Leann Ogasawara Mon, 16 Jan 2012 07:10:08 -0800 + +linux (3.2.0-9.16) precise; urgency=low + + [ Andy Whitcroft ] + + * [Config] Enable numerous CONFIG_VIDEO_* cards on ARM + * [Config] pull ARM sound modules =m + * [Config] CONFIG_RTC_DRV_TEST is for testing only + * [Config] CONFIG_USB_DUMMY_HCD is testing only + * [Config] CONFIG_USB_FILE_STORAGE is deprecated + + [ Leann Ogasawara ] + + * Revert "[Config] Temporarily disable CONFIG_CAN_TI_HECC on armel" + * [Config] Enable CONFIG_HW_RANDOM_PASEMI=m + * [Config] Enable CONFIG_MMC_TMIO=m + * [Config] Enable CONFIG_MTD_NAND_FSL_ELBC=m + * [Config] Enable CONFIG_ISI=m + * [Config] Enable CONFIG_MMC=y + * [Config] Enable CONFIG_LIRC_PARALLEL=m + * [Config] Enable CONFIG_MAC_EMUMOUSEBTN=m + * [Config] Enable CONFIG_CHR_DEV_SG=y + * [Config] Enable CONFIG_GPIO_PCA953X=m + * [Config] Enable CONFIG_GPIO_TWL4030=m + * [Config] Enable CONFIG_INET_DIAG=m + * [Config] Enable CONFIG_NLS_ISO8859_1=m + * [Config] Enable CONFIG_NVRAM=m + * [Config] Enable CONFIG_SLIP=m + * [Config] Enable CONFIG_PC300TOO=m + * [Config] Enable CONFIG_TUN=y + * [Config] Enable CONFIG_NET_CLS_CGROUP=m + * [Config] Enable CONFIG_THERMAL=y + * [Config] Enable CONFIG_PPP=y + * [Config] Enable CONFIG_PCI_STUB=m + * Rebase to v3.2.1 + * [Config] Enable CONFIG_RTL8192E=m + * [Config] Enable CONFIG_RTS5139=m + + [ Stefan Bader ] + + * [Config] Make CONFIG_VIRTIO_(NET|BLK)=y + + [ Upstream Kernel Changes ] + + * ARM: restart: add restart hook to machine_desc record + * ARM: restart: allow platforms more flexibility specifying restart mode + * ARM: restart: move reboot failure handing into machine_restart() + * ARM: restart: remove argument to setup_mm_for_reboot() + * ARM: 7159/1: OMAP: Introduce local common.h files + * ARM: restart: only perform setup for restart when soft-restarting + * ARM: 7189/1: OMAP3: Fix build break in cpuidle34xx.c because of irq + function + * ARM: idmap: populate identity map pgd at init time using .init.text + * ARM: suspend: use idmap_pgd instead of suspend_pgd + * ARM: proc-*.S: place cpu_reset functions into .idmap.text section + * ARM: idmap: use idmap_pgd when setting up mm for reboot + * ARM: head.S: only include __turn_mmu_on in the initial identity mapping + * ARM: SMP: use idmap_pgd for mapping MMU enable during secondary booting + * ARM: 7194/1: OMAP: Fix build after a merge between v3.2-rc4 and ARM + restart changes + * ARM: lib: add call_with_stack function for safely changing stack + * ARM: reset: implement soft_restart for jumping to a physical address + * ARM: stop: execute platform callback from cpu_stop code + * ARM: kexec: use soft_restart for branching to the reboot buffer + * ARM: restart: omap: use new restart hook + * topdown mmap support + - LP: #861296 + + [ Upstream Kernel Changes ] + + * Rebase to v3.2.1 + + -- Leann Ogasawara Fri, 13 Jan 2012 20:32:08 +0100 + +linux (3.2.0-8.15) precise; urgency=low + + [ Leann Ogasawara ] + + * [Config] Disable CONFIG_ACPI_PROCFS + * Remove server from getabis + * Temporarily disable module check + * [Config] Disable CONFIG_MTD_TESTS + * [Config] Disable CONFIG_X86_E_POWERSAVER + * [Config] Set CONFIG_ARCNET=m + * [Config] Enable CONFIG_ATM_DUMMY=m + * [Config] Enable CONFIG_BLK_DEV_MD=y + * ubuntu: fsam7400 select CHECK_SIGNATURE and depend on X86 + * [Config] Enable CONFIG_BLK_DEV_SD=y + * [Config] Enable CONFIG_BLK_DEV_SR=y + * [Config] Enable CONFIG_BLK_DEV_UB=m + * [Config] Enable CONFIG_COPS=m + * [Config] Enable CONFIG_DVB_USB_EC168=m + * [Config] Enable CONFIG_ENC28J60=m + * [Config] Enable CONFIG_FB_UVESA=m + * [Config] Enable CONFIG_FB_ATY=m + * [Config] Enable CONFIG_BROADCOM_PHY=y + * [Config] Enable CONFIG_CICADA_PHY=y + * [Config] Enable CONFIG_DAVICOM_PHY=y + * [Config] Enable CONFIG_ICPLUS_PHY=y + * [Config] Enable CONFIG_LSI_ET1011C_PHY=y + * [Config] Enable CONFIG_LXT_PHY=y + * [Config] Enable CONFIG_MARVELL_PHY=y + * [Config] Enable CONFIG_NATIONAL_PHY=y + * [Config] Enable CONFIG_QSEMI_PHY=y + * [Config] Enable CONFIG_SMSC_PHY=y + * [Config] Enable CONFIG_VITESSE_PHY=y + * Add 3w-sas to scsi-modules + - LP: #776542 + + [ Mathieu Trudel-Lapierre ] + + * SAUCE: ipv6: make the net.ipv6.conf.all.use_tempaddr sysctl propagate + to interface settings + + [ Paolo Pisati ] + + * Revert "SAUCE: omap3: beagle: if rev unknown, assume xM revision C" + - LP: #912199 + * Revert "SAUCE: omap3: beagle: detect new xM revision B" + - LP: #912199 + * Revert "SAUCE: omap3: beaglexm: fix DVI initialization" + - LP: #912199 + + [ Upstream Kernel Changes ] + + * Bluetooth: Add support for BCM20702A0 [0a5c:21e3] + - LP: #906832 + + -- Leann Ogasawara Fri, 06 Jan 2012 10:02:03 -0800 + +linux (3.2.0-8.14) precise; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_SND_USB_6FIRE + - LP: #912197 + * rebase to mainline v3.2 final release + * updateconfigs following rebase to v3.2 final + * ubuntu: AUFS -- add BOM and automated update script + * ubuntu: AUFS -- include the aufs_types.h file in linux-libc-headers + - LP: #684666 + * ubuntu: AUFS -- update aufs-update to track new locations of headers + * ubuntu: AUFS -- clean up the aufs updater and BOM + * ubuntu: AUFS -- documentation on updating aufs2 + * ubuntu: AUFS -- aufs3-base.patch + * ubuntu: AUFS -- aufs3-standalone.patch + * ubuntu: AUFS -- fix undefined __devcgroup_inode_permission + * ubuntu: AUFS -- fix undefined security_path_link + * ubuntu: AUFS -- update to 4cf5db36bcd9748e8e7270022f295f84d1fc2245 + * ubuntu: AUFS -- updateconfigs following update + * ubuntu: AUFS -- suppress benign plink warning messages + - LP: #621195 + * ubuntu: AUFS -- enable in config and makefile + * ubuntu: AUFS -- disable in favor of overlayfs + * [Config] linux-virtual -- should include the extX modules + - LP: #912308 + + [ Tyler Hicks ] + + * SAUCE: eCryptfs: Improve statfs reporting + - LP: #885744 + + [ Upstream Kernel Changes ] + + * rebase to upstream v3.2 + + -- Leann Ogasawara Mon, 26 Dec 2011 20:24:30 -0800 + +linux (3.2.0-7.13) precise; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to upstream 3.2-rc7 + + -- Leann Ogasawara Mon, 19 Dec 2011 09:14:34 -0800 + +linux (3.2.0-6.12) precise; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to upstream v3.2-rc6 + + -- Leann Ogasawara Fri, 16 Dec 2011 10:19:02 -0800 + +linux (3.2.0-5.11) precise; urgency=low + + [ Andy Whitcroft ] + + * enforcer -- allow arch and flavour predicates to take lists + * enforcer -- simplify armel/armhf specific options + * enforcer -- fix incorrectly specified flavour matches + + [ Leann Ogasawara ] + + * [Config] Disable IRQ_REMAP + * [Config] Enable CONFIG_SENSORS_LM95245=m + * [Config] Enable CONFIG_SENSORS_MAX1668=m + * [Config] Enable CONFIG_SENSORS_NTC_THERMISTOR=m + * [Config] Enable CONFIG_SENSORS_MAX6639=m + * [Config] Enable CONFIG_SENSORS_MAX6642=m + * [Config] Enable CONFIG_SENSORS_LINEAGE=m + * [Config] Enable CONFIG_CRYPTO_SALSA20=m + * [Config] Enable CONFIG_PATA_TOSHIBA=m + * [Config] Enable CONFIG_POHMELFS=m + * [Config] Enable CONFIG_NET_PACKET_ENGINE=y + * [Config] Enable CONFIG_PATA_OPTI=m + * add overlayfs to virtual inclusion list + - LP: #903897 + * add veth to virtual inclusion list + - LP: #903897 + * SAUCE: resolve WARNING: at drivers/block/floppy.c:2929 do_fd_request + + [ Paolo Pisati ] + + * [Config] DEFAULT_MMAP_MIN_ADDR=32k on arm + - LP: #903346 + + [ Tim Gardner ] + + * [Config] CONFIG_LOCKUP_DETECTOR=y + - LP: #903615 + + [ Upstream Kernel Changes ] + + * rebase to upstream 55b02d2f + + -- Leann Ogasawara Mon, 12 Dec 2011 07:08:10 -0800 + +linux (3.2.0-4.10) precise; urgency=low + + [ Kyle McMartin ] + + * SAUCE: dmar: disable if ricoh multifunction detected + - LP: #894070 + + [ Seth Forshee ] + + * SAUCE: dell-wmi: Demote unknown WMI event message to pr_debug + - LP: #581312 + + [ Tim Gardner ] + + * Start new release, Bump ABI, rebase to 3.2-rc5 + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_SENSORS_AK8975=m + + -- Tim Gardner Sat, 10 Dec 2011 08:57:04 -0700 + +linux (3.2.0-3.9) precise; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: ext4: correct partial write discard size calculation + - LP: #894768 + + [ Leann Ogasawara ] + + * Revert "SAUCE: x86, microcode, AMD: Restrict microcode reporting" + - LP: #892615 + + [ Matthew Garrett ] + + * SAUCE: pci: Rework ASPM disable code + + [ Upstream Kernel Changes ] + + * x86: Fix boot failures on older AMD CPU's + - LP: #892615 + * EHCI : Fix a regression in the ISO scheduler + - LP: #899165 + + -- Leann Ogasawara Mon, 05 Dec 2011 10:37:36 -0800 + +linux (3.2.0-3.8) precise; urgency=low + + [ Andy Whitcroft ] + + * armhf -- add d-i configuration + * armhf -- disable ABI checks for armhf + * armhf -- add arch to getabis config + + -- Andy Whitcroft Sat, 03 Dec 2011 14:22:52 +0000 + +linux (3.2.0-3.7) precise; urgency=low + + [ Stefan Bader ] + + * SAUCE: x86/paravirt: PTE updates in k(un)map_atomic need to be + synchronous, regardless of lazy_mmu mode + - LP: #854050 + + [ Tim Gardner ] + + * rebase to v3.2-rc4 + + -- Leann Ogasawara Fri, 02 Dec 2011 11:53:56 -0800 + +linux (3.2.0-2.6) precise; urgency=low + + [ Andy Whitcroft ] + + * armhf -- fix omap flavour to build on armhf + * [Config] CONFIG_PATA_MACIO=y to fix MAC qemu boot + + [ Borislav Petkov ] + + * SAUCE: x86, microcode, AMD: Restrict microcode reporting + - LP: #892615 + + [ Colin Watson ] + + * Add pata_macio to pata-modules + + [ Tim Gardner ] + + * [Config] Prefer crda over wireless-crda + * [Config] Fix virtual inclusion list. + - LP: #897795 + + -- Leann Ogasawara Wed, 30 Nov 2011 06:09:35 -0800 + +linux (3.2.0-2.5) precise; urgency=low + + [ Paolo Pisati ] + + * [Config] PANEL_DVI=y + + -- Leann Ogasawara Mon, 28 Nov 2011 09:13:24 -0800 + +linux (3.2.0-2.4) precise; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.2-rc3 + + [ Leann Ogasawara ] + + * Revert "SAUCE: xen: Do not use pv spinlocks on HVM" + * Revert "fix ERROR: __devcgroup_inode_permission undefined" + * Revert "olpc_dcon_xo_1_5 needs delay.h" + * Revert "olpc_dcon_xo_1 needs delay.h" + * rebase to 6fe4c6d4 + * [Config] updateconfigs after rebase to 6fe4c6d4 + + [ Tim Gardner ] + + * [Config] Replace wireless-crda with crda,wireless-regdb + - LP: #856421 + * [Config] Relax the dependencies on crda + + [ Upstream Kernel Changes ] + + * (pre-stable) HID: bump maximum global item tag report size to 96 bytes + - LP: #724831 + * Ubuntu: remove coreutils|fileutils package dependency + - LP: #892814 + * iio: iio_event_getfd -- fix ev_int build failure + + [ Upstream Kernel Changes ] + + * Rebase to v3.2-rc3 + + -- Andy Whitcroft Thu, 24 Nov 2011 16:20:45 +0000 + +linux (3.2.0-1.3) precise; urgency=low + + [ Upstream Kernel Changes ] + + * Ubuntu: Add ext2 to fs-core-modules + - LP: #893395 + + -- Leann Ogasawara Mon, 21 Nov 2011 20:42:33 -0800 + +linux (3.2.0-1.2) precise; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_NFC and associated devices + * SAUCE: allow brcmsmac and b43 to both build + + [ Soren Hansen ] + + * Add ixgbe driver to d-i + - LP: #891969 + + -- Leann Ogasawara Mon, 21 Nov 2011 08:33:46 -0800 + +linux (3.2.0-1.1) precise; urgency=low + [ Andy Whitcroft ] + + * armhf -- enable armhf and create the first flavours + * SAUCE: ensure root is ready before running usermodehelpers in it + * [Config] enforcer -- ensure CONFIG_FAT_FS is built-in on arm + + [ Leann Ogasawara ] + + * Temporarily ignore module check + * [Config] Enable PCI_IOV on powerpc + * [Config] Temporarily disable CONFIG_PASEMI_MAC on powerpc + * rebase to v3.2-rc2 + * SAUCE: include for cpuidle34xx arm build + * SAUCE: include for linux/mtd/map.h arm build + * SAUCE: include and for mmc_core arm build + * SAUCE: select ARM_AMBA if OMAP3_EMU + * [Config] updateconfigs after select ARM_AMBA + * [Config] Temporarily disable CONFIG_KVM_BOOK3S_32 on powerpc + * [Config] Enable CONFIG_EXT2_FS=m + * [Config] Build in CONFIG_SATA_AHCI=y + * Resolve linux-image-extra's install dependency + + [ Seth Forshee ] + + * [Config] Enable EVENT_POWER_TRACING_DEPRECATED=y for powertop + * SAUCE: (drop after 3.2) Input: ALPS - move protocol information to + Documentation + * SAUCE: (drop after 3.2) Input: ALPS - add protocol version field in + alps_model_info + * SAUCE: (drop after 3.2) Input: ALPS - remove assumptions about packet + size + * SAUCE: (drop after 3.2) Input: ALPS - add support for protocol versions + 3 and 4 + * SAUCE: (drop after 3.2) Input: ALPS - add semi-MT support for v3 + protocol + * SAUCE: (drop after 3.2) Input: ALPS - add documentation for protocol + versions 3 and 4 + + [ Stefan Bader ] + + * [Config] Built-in xen-netfront and xen-blkfront + * Fix build of dm-raid45 and re-enable it + + [ Tim Gardner ] + + * [Config] CONFIG_USB_XHCI_HCD=y + - LP: #886167 + * [Config] CONFIG_R6040=m + - LP: #650899 + * SAUCE: Add a new entry (413c:8197) to Bluetooth USB device ID table + - LP: #854399 + * [Config] Consolidated amd64 server flavour into generic + * [Config] updateconfigs after rebase to 3.2-rc1 + * [Config] Disabled dm-raid4-5 + * [Config] Disabled ndiswrapper + * [Config] Disable vt6656 + * [Config] exclude ppp-modules for virtual flavour + * [Config] CONFIG_MEMSTICK_R592=m + - LP: #238208 + + [ Upstream Kernel Changes ] + + * CHROMIUM: seccomp_filter: new mode with configurable syscall filters + - LP: #887780 + * CHROMIUM: seccomp_filter: add process state reporting + - LP: #887780 + * CHROMIUM: seccomp_filter: Document what seccomp_filter is and how it + works. + - LP: #887780 + * CHROMIUM: x86: add HAVE_SECCOMP_FILTER and seccomp_execve + - LP: #887780 + * CHROMIUM: arm: select HAVE_SECCOMP_FILTER + - LP: #887780 + * CHROMIUM: seccomp_filters: move to btrees + * CHROMIUM: enable CONFIG_BTREE + * CHROMIUM: seccomp_filter: kill NR_syscall references + * CHROMIUM: seccomp_filters: guard all ftrace wrapper code + * CHROMIUM: seccomp_filters: clean up warnings; kref mistake + * CHROMIUM: seccomp_filter: remove "skip" from copy and add drop helper + * CHROMIUM: seccomp_filter: allow CAP_SYS_ADMIN management of execve + * CHROMIUM: seccomp_filter: inheritance documentation + * CHROMIUM: seccomp_filter: make inherited filters composable + * CHROMIUM: Fix seccomp_t compile error + - LP: #887780 + * CHROMIUM: Fix kref usage + - LP: #887780 + * CHROMIUM: enable CONFIG_SECCOMP_FILTER and CONFIG_HAVE_SECCOMP_FILTER + * rebase to v3.2-rc2 + + -- Leann Ogasawara Mon, 31 Oct 2011 09:24:39 -0400 + +linux (3.1.0-2.3) precise; urgency=low + + [ Tim Gardner ] + + * Add postinit and postrm scripts to the extras package + - LP: #882120 + + -- Leann Ogasawara Fri, 28 Oct 2011 12:48:33 -0700 + +linux (3.1.0-2.2) precise; urgency=low + + [ Andy Whitcroft ] + + * debian: add locking to protect debian/files from parallel update + + [ Leann Ogasawara ] + + * rebase to v3.1 + + [ Upstream Kernel Changes ] + + * rebase to v3.1 + + -- Leann Ogasawara Wed, 19 Oct 2011 07:12:38 -0700 + +linux (3.1.0-1.1) precise; urgency=low + + [ Andiry Xu ] + + * SAUCE: (drop during 3.2 merge) xHCI: AMD isoc link TRB chain bit quirk + - LP: #872811 + + [ Andy Whitcroft ] + + * Revert "ubuntu: compcache -- follow changes to bd_claim/bd_release" + - LP: #832694 + * Revert "ubuntu: compcache -- version 0.5.3" + - LP: #832694 + * [Config] standardise CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + * [Config] Enable CONFIG_MACVTAP=m + - LP: #822601 + * record the compiler in the ABI and check for inconsistant builds + * [Config] move ECRYPT_FS back to =y for all architectures + - LP: #827197 + * [Config] enable CONFIG_DRM_VMWGFX=m + - LP: #698009 + * [Config] re-fix ECRYPT_FS=y + - LP: #827197 + * enforcer -- ensure we have CONFIG_ECRYPT_FS=y + - LP: #827197 + * [Config] dropping compcache configuration options + * [Config] standardise on HZ=250 + * SAUCE: headers_install: fix #include "..." usage for userspace + - LP: #824377 + * make module-inclusion selection retain the left overs + * add a new linux-image-extras package for virtual + + [ Colin Watson ] + + * Deliver more Atheros, Ralink, and iwlagn NIC drivers to d-i + + [ edwin_rong ] + + * SAUCE: Staging: add driver for Realtek RTS5139 cardreader + - LP: #824273 + + [ Greg Kroah-Hartman ] + + * SAUCE: staging: rts5139: add vmalloc.h to some files to fix the build. + - LP: #824273 + + [ Jesse Sung ] + + * SAUCE: Unregister input device only if it is registered + - LP: #839238 + + [ Jiri Kosina ] + + * SAUCE: HID: add MacBookAir4, 2 to hid_have_special_driver[] + + [ Joshua V. Dillon ] + + * SAUCE: HID: add support for MacBookAir4,2 keyboard. + + [ Kees Cook ] + + * [Config] enable and enforce SECCOMP_FILTER on x86 + + [ Keng-Yu Lin ] + + * [Config] Enable CONFIG_RTS5139=m on i386/amd64 + - LP: #824273 + + [ Leann Ogasawara ] + + * Revert "ubuntu: overlayfs -- ovl: make lower mount read-only" + * Revert "ubuntu: overlayfs -- fs: limit filesystem stacking depth" + * Revert "ubuntu: overlayfs -- ovl: improve stack use of lookup and + readdir" + * Revert "ubuntu: overlayfs -- ovl: fix overlayfs over overlayfs" + * Revert "ubuntu: overlayfs -- overlayfs: implement show_options" + * Revert "ubuntu: overlayfs -- overlayfs: add statfs support" + * Revert "ubuntu: overlayfs -- overlay filesystem" + * Revert "ubuntu: overlayfs -- overlay: overlay filesystem documentation" + * Revert "SAUCE: ARM: OMAP: Add macros for comparing silicon revision" + * Revert "SAUCE: OMAP: DSS2: check for both cpu type and revision, rather + than just revision" + * Revert "SAUCE: OMAP: DSS2: enable hsclk in dsi_pll_init for OMAP36XX" + * Revert "ubuntu: fsam7400 disable driver" + - LP: #876030 + * rebase to v3.1-rc1 + * [Config] updateconfigs after rebase to v3.1-rc1 + * rebase to v3.1-rc2 + * [Config] Updateconfigs after rebase to v3.1-rc2 + * ubuntu: Yama - update calls to generic_permission() and + inode->i_op->permission() + * ubuntu: ndiswrapper -- remove netdev_priv macro + * ubuntu: aufs -- Temporarily disable due to build failure + * [Config] Diable INTEL_MID_PTI on armel + * [Config] Temporarily disable CONFIG_FTMAC100 on armel + * [Config] Temporarily disable CONFIG_FTGMAC100 on armel + * [Config] Temporarily disable CONFIG_CAN_TI_HECC on armel + * [Config] Temporarily disable CONFIG_VIDEO_OMAP2_VOUT on armel + * [Config] Set CONFIG_DM_MIRROR=m on amd64, i386, and arm + * [Config] Set CONFIG_DM_MULTIPATH=m on amd64, i386, and arm + * [Config] Set CONFIG_DM_SNAPSHOT=m on amd64, i386, and arm + * [Config] Enable CONFIG_EDAC_AMD8111=m on powerpc + * [Config] Enable CONFIG_EDAC_AMD8131=m on powerpc + * [Config] Enable CONFIG_EDAC_CPC925=m on powerpc + * [Config] Enable CONFIG_EDAC_PASEMI=m on powerpc + * [Config] Enable CONFIG_ECHO=m on powerpc + * [Config] Enable CONFIG_ET131X=m on powerpc + * [Config] Set CONFIG_FB_MATROX=m + * [Config] Enable CONFIG_FB_UDL=m on powerpc + * [Config] Set CONFIG_FB_VIRTUAL=n + * [Config] Enable CONFIG_FB_VGA16=m on powerpc + * [Config] Enable CONFIG_GPIO_MAX732X=m on arm + * [Config] Enable CONFIG_GPIO_PCF857X=m on arm + * [Config] Set CONFIG_HOTPLUG_PCI_FAKE=m + * [Config] Enable CONFIG_HOTPLUG_PCI=y on powerpc + * [Config] Enable CONFIG_HOTPLUG_PCI_CPCI=y on powerpc + * [Config] Enable CONFIG_HP_ILO=m on powerpc-smp + * [Config] Enable CONFIG_I2C_PASEMI=m on powerpc + * [Config] Enable CONFIG_IBM_BSR=m on powerpc + * [Config] Enable CONFIG_IBMVETH=m on powerpc + * [Config] Enable CONFIG_IDE_PHISON=m on powerpc + * [Config] Enable CONFIG_IGB=m on powerpc + * [Config] Enable CONFIG_IIO=m on powerpc + * [Config] Enable CONFIG_INFINIBAND_NES=m + * [Config] Enable CONFIG_IPMI_HANDLER=m on arm + * [Config] Enable CONFIG_IWL3945=m on powerpc + * [Config] Disable CONFIG_KVM_BOOK3S_64 + * [Config] Enable CONFIG_LAPBETHER=m on arm + * [Config] Enable CONFIG_LEDS_GPIO=m on powerpc + * [Config] Enable CONFIG_LEDS_CLEVO_MAIL=m all arch's + * [Config] Enable CONFIG_LEDS_PCA9532=m on powerpc + * [Config] Enable CONFIG_LEDS_PCA955X=m on powerpc + * [Config] Enable CONFIG_LEDS_TRIGGER_DEFAULT_ON=m on powerpc + * [Config] Set CONFIG_LEDS_TRIGGER_HEARTBEAT=m on arm and powerpc + * [Config] Set CONFIG_LEDS_TRIGGER_TIMER=m on powerpc + * [Config] Enable CONFIG_LINE6_USB=m on arm and powerpc + * [Config] Enable CONFIG_MEMSTICK=m on arm + * [Config] Enable CONFIG_MTD_AFS_PARTS=m on arm + * [Config] Enable CONFIG_MTD_ALAUDA=m on arm + * [Config] Enable CONFIG_MTD_AR7_PARTS=m on arm + * [Config] Enable CONFIG_MTD_ARM_INTEGRATOR=m on arm + * [Config] Enable CONFIG_MOXA_SMARTIO=m on powerpc + * [Config] Enable CONFIG_MTD_DATAFLASH=m on arm + * [Config] Enable CONFIG_MTD_GPIO_ADDR=m on arm + * [Config] Enable CONFIG_MTD_IMPA7=m on arm + * [Config] Enable CONFIG_MTD_NAND_GPIO=m on arm + * [Config] Enable CONFIG_MTD_NAND_NANDSIM=m on arm + * [Config] Enable CONFIG_MTD_NAND_PASEMI=m on powerpc + * [Config] Enable CONFIG_MTD_NAND_PLATFORM=m on arm + * [Config] Enable CONFIG_MTD_NAND_TMIO=m on arm + * [Config] Enable CONFIG_MTD_SST25L=m on arm + * [Config] Enable CONFIG_NET_CLS_CGROUP=y on arm + * [Config] Enable CONFIG_NET_CLS_FLOW=m on arm + * [Config] Enable CONFIG_NET_CLS_U32=m on arm + * [Config] Enable CONFIG_NET_DCCPPROBE=m on arm + * [Config] Enable CONFIG_NET_SCH_INGRESS=m on arm + * [Config] Enable CONFIG_NET_TCPPROBE=m on arm + * [Config] Enable CONFIG_PASEMI_MAC=m on powerpc + * [Config] Enable CONFIG_PATA_NS87410=m on powerpc + * [Config] Enable CONFIG_I2C_GPIO=m on powerpc64-smp + * [Config] Enable CONFIG_PANEL=m on powerpc + * [Config] Enable CONFIG_PATA_CMD640_PCI=m on powerpc + * SAUCE: x86: reboot: Make Dell Latitude E6520 use reboot=pci + - LP: #833705 + * [Config] Add CONFIG_EFI_VARS=y to the enforcer + - LP: #837332 + * [Config] Update CONFIG_EFI_VARS enforcer check + * [Config] Add aufs to virtual flavor inclusion list + - LP: #844159 + * SAUCE: x86: reboot: Make Dell Optiplex 790 use reboot=pci + - LP: #818933 + * SAUCE: x86: reboot: Make Dell Optiplex 990 use reboot=pci + - LP: #768039 + * SAUCE: x86: reboot: Make Dell Latitude E6220 use reboot=pci + - LP: #838402 + * [Config] Add igbvf to the virtual flavor inclusion list + - LP: #794570 + * [Config] Add ixgbevf to the virtual inclusion list + - LP: #872411 + * [Config] Transition -generic and -server to be identical + * rebase to v3.1-rc10 + + [ Luke Yelavich ] + + * [Config] Disable legacy IDE drivers on powerpc + + [ Ming Lei ] + + * SAUCE: fireware: add NO_MSI quirks for o2micro controller + - LP: #801719 + * SAUCE: ata_piix: make DVD Drive recognisable on systems with Intel + Sandybridge chipsets(v2) + - LP: #737388, #782389, #794642 + + [ Paolo Pisati ] + + * [Config] Compile-in vfat support for armel + - LP: #853783 + + [ Randy Dunlap ] + + * SAUCE: staging: fix rts5139 depends & build + - LP: #824273 + + [ Rene Bolldorf ] + + * SAUCE: (drop after 3.0) ideapad: Check if acpi already handle backlight + power in 'ideapad_backlight_notify_power' to avoid a page fault + + [ Seth Forshee ] + + * SAUCE: (no-up) Input: elantech - Add v3 hardware support + - LP: #681904 + * SAUCE: (drop after 3.1) usb_storage: Don't freeze in usb-stor-scan + - LP: #810020 + + [ Stefan Bader ] + + * (config) Package macvlan and macvtap for virtual + * [Config] Force perf to use libiberty for demangling + - LP: #783660 + * SAUCE: xen: Do not use pv spinlocks on HVM + - LP: #838026 + + [ Tim Gardner ] + + * [Config] Clean up tools rules + * [Config] Package x86_energy_perf_policy and turbostat + - LP: #797556 + * rebase to v3.1-rc3 + * [Config] Simplify binary-udebs dependencies + * [Config] kernel preparation cannot be parallelized + * [Config] Linearize module/abi checks + * [Config] Linearize and simplify tree preparation rules + * [Config] Build kernel image in parallel with modules + * [Config] Set concurrency for kmake invocations + * [Config] Improve install-arch-headers speed + * [Config] Fix binary-perarch dependencies + * [Config] Removed stamp-flavours target + * [Config] Serialize binary indep targets + * [Config] Use build stamp directly + * [Config] Restore prepare-% target + * rebase to v3.1-rc4 + * rebase to v3.1-rc5 + * [Config] Disable makedumpfile for i386/amd64 + * rebase to v3.1-rc6 + * [Config] Fix binary-% build target + * rebase to v3.1-rc7 + * rebase to v3.1-rc8 + * SAUCE: Add a new entry (413c:8197) to Bluetooth USB device ID table + - LP: #854399 + * [Config] Enable ftrace support in the mac80211 layer + - LP: #865171 + * rebase to v3.1-rc9 + * SAUCE: usb/core/devio.c: Check for printer class specific request + - LP: #872711 + + [ Upstream Kernel Changes ] + + * overlay filesystem + * overlayfs: add statfs support + * overlayfs: implement show_options + * overlay: overlay filesystem documentation + * fs: limit filesystem stacking depth + + [ Will Drewry ] + + * SAUCE: seccomp_filter: new mode with configurable syscall filters + * SAUCE: seccomp_filter: add process state reporting + * SAUCE: seccomp_filter: Document what seccomp_filter is and how it + works. + * SAUCE: seccomp_filter: add HAVE_SECCOMP_FILTER and seccomp_execve + + [ Upstream Kernel Changes ] + + * rebase to v3.1-rc1 + * rebase to v3.1-rc2 + * rebase to v3.1-rc3 + +CONFIG_BLK_DEV_BSGLIB=y + +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 + -CONFIG_VIDEO_OMAP2_VOUT_VRFB=y + * rebase to v3.1-rc4 + * rebase to v3.1-rc5 + * rebase to v3.1-rc6 + * rebase to v3.1-rc7 + * rebase to v3.1-rc8 + * rebase to v3.1-rc9 + * rebase to v3.1-rc10 + + -- Leann Ogasawara Wed, 10 Aug 2011 15:43:38 -0700 + +linux (3.1.0-1.0) oneiric; urgency=low + + [ Leann Ogasawara ] + + * Open P-series + + -- Leann Ogasawara Wed, 10 Aug 2011 13:42:40 -0700 + +linux (3.0.0-8.10) oneiric; urgency=low + + [ Adam Jackson ] + + * SAUCE: drm/i915/pch: Fix integer math bugs in panel fitting + - LP: #753994 + + [ John Johansen ] + + * [Config] Enable missing IPv6 options + + [ Leann Ogasawara ] + + * [Config] Disable config IWLWIFI_DEVICE_SVTOOL + - LP: #819925 + * Rebase to 3.0.1 + + [ Upstream Kernel Changes ] + + * x86, intel, power: Correct the MSR_IA32_ENERGY_PERF_BIAS message + * ALSA: hda - Turn on extra EAPDs on Conexant codecs + - LP: #783582 + * KVM: Remove SMEP bit from CR4_RESERVED_BITS + - LP: #796476 + * KVM: Add SMEP support when setting CR4 + - LP: #796476 + * KVM: Mask function7 ebx against host capability word9 + - LP: #796476 + * KVM: Add instruction fetch checking when walking guest page table + - LP: #796476 + + [ Upstream Kernel Changes ] + + * rebase to v3.0.1 + + -- Leann Ogasawara Fri, 05 Aug 2011 11:32:25 -0700 + +linux (3.0.0-7.9) oneiric; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Upstream] add local prefix to oss local change_bits" + * Revert "SAUCE: add tracing for user initiated readahead requests" + * Revert "SAUCE: vfs: Add a trace point in the mark_inode_dirty function" + * Revert "SAUCE: Input: ALPS - Enable Intellimouse mode for Lenovo + Zhaoyang E47" + * Revert "SAUCE: fix documentation strings for struct input_keymap_entry" + * Revert "SAUCE: vt -- fix handoff numbering to 1..n and add range checks + (grub)" + * Revert "SAUCE: vt -- fix handoff numbering to 1..n and add range + checks" + * Revert "SAUCE: vt -- allow grub to request automatic vt_handoff" + * Revert "SAUCE: vt -- maintain bootloader screen mode and content until + vt switch" + * [Config] enable CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 + - LP: #816035 + * ubuntu: Yama: if an underlying filesystem provides a permissions op use + it + * SAUCE: (no-up) add tracing for user initiated readahead requests + * SAUCE: vt -- maintain bootloader screen mode and content until vt + switch + * SAUCE: vt -- allow grub to request automatic vt_handoff + + [ Arjan van de Ven ] + + * SAUCE: (no-up) vfs: Add a trace point in the mark_inode_dirty function + + [ Kees Cook ] + + * Revert "SAUCE: (no-up) Disable building the ACPI debugfs source" + * [Config] enforce ACPI_CUSTOM_METHOD disabled + + [ Keng-Yu Lin ] + + * SAUCE: (no-up) Input: ALPS - Enable Intellimouse mode for Lenovo + Zhaoyang E47 + - LP: #632884, #803005 + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_BLK_DEV_CMD64X=m on powerpc + - LP: #513131 + * [Config] Enable CONFIG_RT2800PCI_RT53XX=y + - LP: #815064 + + [ Rezwanul Kabir ] + + * SAUCE: (no-up) Add support for Intellimouse Mode in ALPS touchpad on + Dell E2 series Laptops + - LP: #632884 + + [ Upstream Kernel Changes ] + + * Revert "yama: if an underlying filesystem provides a permissions op use + it" + * Revert "Add support for Intellimouse Mode in ALPS touchpad on Dell E2 + series Laptops" + * Revert "tty: include linux/slab.h for kfree" + * Revert "gpio/ml_ioh_gpio: include linux/slab.h for kfree" + * Revert "pch_dma: add include/slab.h for kfree" + * mmc: Added quirks for Ricoh 1180:e823 lower base clock frequency + - LP: #773524 + * oss: rename local change_bits to avoid powerpc bitsops.h definition + + -- Leann Ogasawara Mon, 25 Jul 2011 09:08:01 -0700 + +linux (3.0.0-7.8) oneiric; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: overlayfs -- overlayfs: add statfs support + * [Config] enable CONFIG_OVERLAYFS + + [ Erez Zadok ] + + * ubuntu: overlayfs -- overlayfs: implement show_options + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_ALIM7101_WDT=m on powerpc + * [Config] Enable CONFIG_ASUS_OLED=m on powerpc + * [Config] Disable CONFIG_ATM_DUMMY on arm + * [Config] Enable CONFIG_BLK_DEV_DRBD=m on powerpc + * Temporarily disable module check on arm + * Rebase to 3.0 final + * [Config] Enable CONFIG_CAN_TI_HECC=m on arm + * [Config] Set CONFIG_CDROM_PKTCDVD=m on amd64 and i386 + * [Config] Enable CONFIG_CRYPTO_CCM=m on powerpc + * [Config] Enable CONFIG_CRYPTO_DEV_HIFN_795X=m on powerpc + * [Config] Enable CONFIG_CRYPTO_GCM=m on powerpc + * [Config] Set CRYPTO_LZO=m on powerpc64-smp + * [Config] Enable CONFIG_DM9000=m on arm + * [Config] Set CONFIG_DISPLAY_SUPPORT=m on arm + * [Config] Enable CONFIG_DL2K=m on amd64 and i386 + + [ Miklos Szeredi ] + + * ubuntu: overlayfs -- vfs: add i_op->open() + * ubuntu: overlayfs -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs -- overlay filesystem + * ubuntu: overlayfs -- ovl: fix overlayfs over overlayfs + * ubuntu: overlayfs -- ovl: improve stack use of lookup and readdir + * ubuntu: overlayfs -- fs: limit filesystem stacking depth + * ubuntu: overlayfs -- ovl: make lower mount read-only + + [ Neil Brown ] + + * ubuntu: overlayfs -- overlay: overlay filesystem documentation + + [ Tim Gardner ] + + * [Config] Add enic/fnic to udebs + - LP: #801610 + + [ Upstream Kernel Changes ] + + * yama: if an underlying filesystem provides a permissions op use it + + [ Major Kernel Changes ] + + * Rebase to 3.0 final + + -- Leann Ogasawara Thu, 21 Jul 2011 07:01:32 -0700 + +linux (3.0.0-6.7) oneiric; urgency=low + + [ Eagon Yager ] + + * [Config] Fix misspelled 'skipmodule' in arm makefile. + + [ Keng-Yu Lin ] + + * SAUCE: Input: ALPS - Enable Intellimouse mode for Lenovo Zhaoyang E47 + - LP: #632884, #803005 + + [ Leann Ogasawara ] + + * Revert "[Config] Temporarily disable CONFIG_SMC91X on armel-omap" + * Revert "[Config] Temporarily Disable CONFIG_BRCMSMAC on arm" + * Revert "[Config] Temporarily Disable CONFIG_RTL8192SE on powerpc" + * Revert "[Config] Temporarily Disable CONFIG_RTL8192SE on arm" + * Revert "[Config] Temporarily disable CONFIG_BRCMSMAC on powerpc" + * [Config] Set CONFIG_ACPI_PCI_SLOT=m + * [Config] Set CONFIG_ACPI_SBS=m + * [Config] Set CONFIG_ACPI_WMI=m + * [Config] Set CONFIG_AD7150=m on arm + * [Config] Set CONFIG_AD7152=m on arm + * [Config] Drop CONFIG_GPIO_S5PV210 + * [Config] Drop CONFIG_GPIO_S5PC100 + * [Config] Drop CONFIG_GPIO_PLAT_SAMSUNG + * [Config] Drop CONFIG_GPIO_EXYNOS4 + + [ Stefan Bader ] + + * SAUCE: Re-enable RODATA for i386 virtual + - LP: #809838 + + [ Upstream Kernel Changes ] + + * Revert "Quirk to fix suspend/resume on Lenovo Edge 11,13,14,15" + * (drop after 3.0.0) acer-wmi: Add support for Aspire 1830 wlan hotkey + - LP: #771758 + + -- Leann Ogasawara Wed, 20 Jul 2011 06:36:02 -0700 + +linux (3.0.0-5.6) oneiric; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_RTL8192CU=m + * Rebase to -rc7 + + -- Tim Gardner Mon, 11 Jul 2011 22:13:50 +0100 + +linux (3.0.0-4.5) oneiric; urgency=low + + [ Ming Lei ] + + * SAUCE: fix yama_ptracer_del lockdep warning + - LP: #791019 + + [ Seth Forshee ] + + * SAUCE: (drop after 3.0) asus-wmi: Add callback for hotkey filtering + * SAUCE: (drop after 3.0) eeepc-wmi: Add support for T101MT Home/Express Gate key + * SAUCE: (drop after 3.0) asus-wmi: Enable autorepeat for hotkey input device + * [Config] CONFIG_{ASUS,ASUS_NB,EEEPC}_WMI=m + - LP: #805218 + + [ Tim Gardner ] + + * [Config] updateconfigs after rebase to -rc6+ + Rebased against 4dd1b49c6d215dc41ce50c80b4868388b93f31a3 + * Adopt a 3 digit verion, e.g., 3.0.0-x.x + * Revert "UBUNTU: add dependancies for module-init-tools" + This dependency is no longer required for a 3 digit version. + + -- Tim Gardner Tue, 05 Jul 2011 14:03:04 +0100 + +linux (3.0-3.4) oneiric; urgency=low + + [ Keng-Yu Lin ] + + * SAUCE: Revert: "dell-laptop: Toggle the unsupported hardware + killswitch" + - LP: #775281 + + [ Leann Ogasawara ] + + * rebase to v3.0-rc5 + * [Config] updateconfigs after rebase to 3.0-rc5 + + [ Tim Gardner ] + + * [Config] Remove ubuntu/rtl8192se + * [Config] Added armel ABI files + * [Config] Removed armel versatile flavour + * [Config] CONFIG_INTEL_MEI=m + - LP: #716867 + + [ Upstream Kernel Changes ] + + * ALSA: hda - Enable auto-parser as default for Conexant codecs + + [ Upstream Kernel Changes ] + + * rebase to v3.0-rc5 + + -- Leann Ogasawara Thu, 30 Jun 2011 14:27:10 +0100 + +linux (3.0-2.3) oneiric; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- update to 0e2bafab74f0d1463383faeb93f9fc5eb8c2c54e + + [ Leann Ogasawara ] + + * rebase to v3.0-rc4 + * [Config] updateconfigs after rebase to 3.0-rc4 + * fix ERROR: __devcgroup_inode_permission undefined + + [ Stefan Bader ] + + * SAUCE: iscsitarget: Remove driver from the kernel + + [ Tim Gardner ] + + * SAUCE: rtl8192se: Force a build for a 2.6/3.0 kernel + * [Config] Add grub-efi as a recommended bootloader for server and + generic + - LP: #800910 + + [ Upstream Kernel Changes ] + + * Fix node_start/end_pfn() definition for mm/page_cgroup.c + + [ Leann Ogasawara ] + + * rebase to v3.0-rc4 + + -- Leann Ogasawara Fri, 24 Jun 2011 11:51:12 -0700 + +linux (3.0-1.2) oneiric; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_CAN_CALC_BITTIMING + + [ Leann Ogasawara ] + + * rebase to v3.0-rc3 + * [Config] updateconfigs after rebase to 3.0-rc3 + + [ Upstream Kernel Changes ] + + * perf: clear out make flags when calling kernel make kernelver + + [ Leann Ogasawara ] + + * rebase to v3.0-rc3 + + -- Leann Ogasawara Tue, 14 Jun 2011 07:25:35 -0700 + +linux (3.0-0.1) oneiric; urgency=low + + [ Andy Whitcroft ] + + * use the packaging version in the kernel + * use the kernels idea of its version for version_signature + * add dependancies for module-init-tools + * update control files to version 3 + * printchanges/insertchanges allow override of prev_release + * correct Vcs-Git: to point to oneiric + + [ Leann Ogasawara ] + + * rebase to v3.0-rc1 + * [Config] updateconfigs after rebase to 3.0-rc1 + * ubuntu: dm-raid4-5 fix up build failure + * [Config] Temporarily Disable CONFIG_GPIO_EXYNOS4 on arm + * [Config] Temporarily Disable CONFIG_GPIO_PLAT_SAMSUNG on arm + * [Config] Temporarily Disable CONFIG_GPIO_S5PC100 on arm + * [Config] Temporarily Disable CONFIG_GPIO_S5PV210 on arm + * [Config] Temporarily disable CONFIG_BRCMSMAC on powerpc + * [Config] Temporarily Disable CONFIG_BRCMSMAC on arm + * [Config] Temporarily Disable CONFIG_RTL8192SE on arm + * [Config] Temporarily Disable CONFIG_RTL8192SE on powerpc + * [Config] Temporarily disable CONFIG_SMC91X on armel-omap + * rebase to v3.0-rc2 + + [ Manoj Iyer ] + + * SAUCE: mmc: Enable MMC card reader for RICOH [1180:e823] + - LP: #790754 + + [ Upstream Kernel Changes ] + + * Revert "x86 idle: EXPORT_SYMBOL(default_idle, pm_idle) only when APM + demands it" + * drm/i915: fix regression after clock gating init split + + [ Major Kernel Changes ] + + * rebase from v2.6.39 to v3.0-rc1 + * rebase from v3.0-rc1 to v3.0-rc2 + + -- Andy Whitcroft Thu, 09 Jun 2011 15:18:33 +0100 + +linux (2.6.39-3.10) oneiric; urgency=low + + [ Colin Ian King ] + + * SAUCE: S3 early resume debug via keyboard LEDs + + [ Ingo Molnar ] + + * ubuntu: nx-emu - i386: NX emulation + * ubuntu: nx-emu - i386: mmap randomization for executable mappings + + [ Leann Ogasawara ] + + * Revert "[Config] Disable CONFIG_FT1000 on powerpc64-smp" + * Revert "[Config] Disable CONFIG_DM_RAID45" + * [Config] enable CONFIG_BRCMFMAC=y + * [Config] enable CONFIG_MDIO_BITBANG=m across all arch's and flavors + * [Config] enable CONFIG_VIDEO_OUTPUT_CONTROL=m on armel-omap + + [ Robert Nelson ] + + * SAUCE: omap3: beagle: detect new xM revision B + - LP: #770679 + * SAUCE: omap3: beagle: detect new xM revision C + - LP: #770679 + * SAUCE: omap3: beagle: if rev unknown, assume xM revision C + - LP: #770679 + + [ Stefan Bader ] + + * SAUCE: Convert dm-raid45 to new block plugging + + -- Leann Ogasawara Mon, 23 May 2011 11:46:43 -0700 + +linux (2.6.39-3.9) oneiric; urgency=low + + [ Leann Ogasawara ] + + * [Config] Disable CONFIG_SCSI_LPFC_DEBUG_FS + * rebase to v2.6.39 + * [Config] enable CONFIG_LLC2=m across all arch's and flavours + * [Config] enable CONFIG_INPUT_APANEL=m + + [ Thomas Schlichter ] + + * SAUCE: vesafb: mtrr module parameter is uint, not bool + - LP: #778043 + * SAUCE: vesafb: enable mtrr WC by default + - LP: #778043 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc7 to v2.6.39 + + -- Andy Whitcroft Fri, 20 May 2011 09:52:32 +0100 + +linux (2.6.39-2.8) oneiric; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- aufs2-standalone.patch aufs2.1-37" + * Revert "ubuntu: AUFS -- aufs2-base.patch aufs2.1-37" + * Revert "[Config] Disable CONFIG_AUFS_FS" + * ubuntu: AUFS -- aufs2-base.patch aufs2.1-39 + * ubuntu: AUFS -- aufs2-standalone.patch aufs2.1-39 + * ubuntu: AUFS -- update to c6b76974311efc5bf3eddf921cd015b6aae46935 + * ubuntu: AUFS -- clean up the aufs updater and BOM + * ubuntu: AUFS -- documentation on updating aufs2 + + [ Kees Cook ] + + * ubuntu: Yama - LSM hooks + * ubuntu: Yama - create task_free security callback + * ubuntu: Yama - add ptrace relationship tracking interface + * ubuntu: Yama - unconditionally chain to Yama LSM + + [ Leann Ogasawara ] + + * Revert "SAUCE: Fix drivers/staging/easycap FTBS" + * Revert "[Config] Disable CONFIG_EASYCAP" + * ubuntu: fsam7400 disable driver + * ubuntu: omnibook disable driver + * ubuntu: rfkill disable driver + + [ Tim Gardner ] + + * SAUCE: Fix extra reference in fb_open() + + -- Leann Ogasawara Mon, 16 May 2011 09:23:56 -0700 + +linux (2.6.39-2.7) oneiric; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc7 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc6 to v2.6.39-rc7 + + -- Leann Ogasawara Tue, 10 May 2011 10:18:28 +0200 + +linux (2.6.39-1.6) oneiric; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc6 + * SAUCE: [arm] fixup __aeabi_uldivmod undefined build error + + [ Tim Gardner ] + + * [Config] updateconfigs after rebase to 2.6.39-rc6 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc5 to v2.6.39-rc6 + - LP: #740126 + + -- Leann Ogasawara Thu, 05 May 2011 09:46:12 -0700 + +linux (2.6.39-0.5) oneiric; urgency=low + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: Revert "x86, hibernate: Initialize mmu_cr4_features during boot" + - LP: #764758 + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc5 + * [Config] updateconfigs following rebase to v2.6.39-rc5 + + [ Paolo Pisati ] + + * [Config] s/USB_MUSB_TUSB6010/USB_MUSB_OMAP2PLUS/ on omap3 to get musb + - LP: #759913 + + [ Stefan Bader ] + + * Include nls_iso8859-1 for virtual images + - LP: #732046 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc4 to v2.6.39-rc5 + + -- Leann Ogasawara Wed, 27 Apr 2011 06:39:42 -0700 + +linux (2.6.39-0.4) oneiric; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc4 + * [Config] updateconfigs following rebase to v2.6.39-rc4 + * fixup powerpc implicit declaration of function + 'crash_kexec_wait_realmode' + * [Config] Disable CONFIG_FT1000 on powerpc64-smp + + [ Tim Gardner ] + + * [Config] CONFIG_TRANSPARENT_HUGEPAGE=y + - LP: #769503 + * [Config] Add cachefiles.ko to virtual flavour + - LP: #770430 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc3 to v2.6.39-rc4 + + -- Leann Ogasawara Tue, 19 Apr 2011 06:25:20 -0700 + +linux (2.6.39-0.3) oneiric; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc3 + * crash_kexec_wait_realmode() undefined when !SMP + + [ Tim Gardner ] + + * [Config] CONFIG_PM_ADVANCED_DEBUG=y for i386/amd64 + - LP: #632327 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc2 to v2.6.39-rc3 + + -- Leann Ogasawara Tue, 12 Apr 2011 06:52:24 -0700 + +linux (2.6.39-0.2) oneiric; urgency=low + + [ Gustavo F. Padovan ] + + * SAUCE: Revert "Bluetooth: Add new PID for Atheros 3011" + - LP: #720949 + + [ John Johansen ] + + * AppArmor: Fix masking of capabilities in complain mode + - LP: #748656 + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc2 + * [Config] updateconfigs following rebase to v2.6.39-rc2 + * hv_mouse needs delay.h + * olpc_dcon_xo_1 needs delay.h + * olpc_dcon_xo_1_5 needs delay.h + * Update dropped.txt for Oneiric + + [ Steve Langasek ] + + * [Config] Make linux-libc-dev coinstallable under multiarch + - LP: #750585 + + [ Upstream Kernel Changes ] + + * x86, hibernate: Initialize mmu_cr4_features during boot + - LP: #752870 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc1 to v2.6.39-rc2 + + -- Leann Ogasawara Wed, 06 Apr 2011 11:04:15 -0700 + +linux (2.6.39-0.1) oneiric; urgency=low + + [ Brad Figg ] + + * [Config] Set CONFIG_NR_CPUS=256 for amd64 generic + - LP: #737124 + + [ Henrik Rydberg ] + + * SAUCE: HID: hid-ntrig: add support for 1b96:0006 model + * SAUCE: HID: ntrig: fix suspend/resume on recent models + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: (drop after 2.6.39) v4l: make sure drivers supply a zeroed + struct v4l2_subdev + - LP: #745213 + + [ Kees Cook ] + + * [Config] packaging: adjust perms on vmlinuz as well + * SAUCE: nx-emu: further clarify dmesg reporting + - LP: #745181 + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc1 + * [Config] updateconfigs following rebase to v2.6.39-rc1 + * [Config] Disable CONFIG_DM_RAID45 + * [Config] Disable CONFIG_SCSI_ISCSITARGET + * [Config] Disable CONFIG_EASYCAP + * [Config] Disable CONFIG_AUFS_FS + * update bnx2 firmware files in d-i/firmware/nic-modules + * xhci-pci.c resolve implicit declaration of kzalloc + * [Config] Enable CONFIG_DRM_PSB for only x86 + * [Config] Enable CONFIG_RTS_PSTOR for only x86 + * mfd/asic3: Fix typo, s/irq_data/data/ + + [ Luke Yelavich ] + + * [Config] Disable CONFIG_CRASH_DUMP on 32-bit powerpc kernels + - LP: #745358 + * [Config] Disable CONFIG_DRM_RADEON_KMS on powerpc kernels + * [Config] Build some framebuffer drivers as modules for powerpc kernels. + + [ Manoj Iyer ] + + * SAUCE: thinkpad-acpi: module autoloading for newer Lenovo ThinkPads. + - LP: #745217 + + [ Tim Gardner ] + + * SAUCE: INR_OPEN=4096 + - LP: #663090 + * SAUCE: Increase the default hard limit for open FDs to 4096 + - LP: #663090 + + [ Upstream Kernel Changes ] + + * (drop after 2.6.39-rc1) arm: versatile : Fix typo introduced in irq + namespace cleanup + * (drop after 2.6.39-rc1) [media] staging: altera-jtag needs delay.h + * ALSA: pcm: fix infinite loop in snd_pcm_update_hw_ptr0() + + [ Major Kernel Changes ] + + * rebase from v2.6.38 to v2.6.39-rc1 + + -- Leann Ogasawara Thu, 31 Mar 2011 12:50:10 -0700 + +linux (2.6.39-0.0) oneiric; urgency=low + + [ Leann Ogasawara ] + + * Open Oneiric + + -- Leann Ogasawara Thu, 31 Mar 2011 12:29:23 -0700 + +linux (2.6.38-7.39) natty; urgency=low + + [ Leann Ogasawara ] + + * No change upload. This is just to rebuild with gcc-4.5.2-7ubuntu1. + + -- Leann Ogasawara Thu, 24 Mar 2011 09:27:45 -0700 + +linux (2.6.38-7.38) natty; urgency=low + + [ Leann Ogasawara ] + + * No change upload take 2. 2.6.38-7.37 was accidentally uploaded before + gcc-4.5.2-6ubuntu5 finished building on all arches. + + -- Leann Ogasawara Tue, 22 Mar 2011 06:12:47 -0700 + +linux (2.6.38-7.37) natty; urgency=low + + [ Leann Ogasawara ] + + * No change upload. This is just to rebuild with gcc-4.5.2-6ubuntu5. + + -- Leann Ogasawara Sun, 20 Mar 2011 16:02:48 -0700 + +linux (2.6.38-7.36) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: KLUDGE: work around failed 'shrink-wrap' compiler + optimisation" + * purge last vestiges of maverick + * [Config] switch CONFIG_FB_VESA back to module + + [ Chris Wilson ] + + * SAUCE: drm/i915: Fix pipelined fencing + - LP: #717114 + + [ Loïc Minier ] + + * Include nls_cp437 module in virtual for fat + - LP: #732046 + * Support arch= cross-compilation for any arch + * Fix couple of typos in 0-common-vars.mk + * Enforce DEFAULT_MMAP_MIN_ADDR on armhf + * Add armhf to Debian -> Linux arch map + * Add initial armhf.mk + * Enable common packages for armhf + + [ Upstream Kernel Changes ] + + * Yama: fix default relationship to check thread group + - LP: #737676 + + -- Andy Whitcroft Fri, 18 Mar 2011 18:18:02 +0000 + +linux (2.6.38-7.35) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to 2fbfac4e053861925fa3fffcdc327649b09af54c + * rebase fixes bug #715330 + * [Config] disable CONFIG_SCSI_QLA_ISCSI for powerpc 32bit to fix FTBS + * rebase to v2.6.38 final + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: Apply OPTION_BLACKLIST_SENDSETUP also for ZTE MF626 + - LP: #636091 + + [ Tim Gardner ] + + * [Confg] CONFIG_BOOT_PRINTK_DELAY=y + + [ Upstream Kernel Changes ] + + * Yama: use thread group leader when creating match + - LP: #729839 + * (drop after 2.6.38) ahci: AHCI mode SATA patch for Intel Patsburg SATA + RAID controller + - LP: #735240 + * (drop after v2.6.38) x86, quirk: Fix SB600 revision check + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc8 to v2.6.38 final + - LP: #715330 + + -- Andy Whitcroft Tue, 15 Mar 2011 19:04:19 +0000 + +linux (2.6.38-6.34) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] normalise CONFIG_INTEL_TXT + * SAUCE: KLUDGE: work around failed 'shrink-wrap' compiler optimisation + - LP: #730860 + * rebase to mainline v2.6.38-rc8 + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc7 + fb62c00a6d8942775abc23d1621db1252e2d93d1 + to v2.6.38-rc8 + + -- Andy Whitcroft Tue, 08 Mar 2011 11:54:48 +0000 + +linux (2.6.38-6.33) natty; urgency=low + + [ Andy Whitcroft ] + + * d-i -- enable speakup-modules udeb + - LP: #672699 + * rebase to 493f3358cb289ccf716c5a14fa5bb52ab75943e5 + * [Config] debian PPC64 configuration + * [Config] cleanup powerpc config fixing unexpected inconsistancies + * [Config] resync ppc64 configuration + * SAUCE: match up ENTRY/END naming for 32/64 bit + * rebase to fb62c00a6d8942775abc23d1621db1252e2d93d1 + * [Config] update configs after rebase to + fb62c00a6d8942775abc23d1621db1252e2d93d1 + * [Config] pps_gen_parport no longer built + + [ Corentin Chary ] + + * SAUCE: (drop after 2.6.38) eeepc-wmi: reorder keymap + - LP: #689393 + * SAUCE: (drop after 2.6.38) eeepc-wmi: add wlan key found on 1015P + - LP: #689393 + + [ John Johansen ] + + * SAUCE: Fix aufs calling of security_path_mknod + - LP: #724456 + + [ Kees Cook ] + + * SAUCE: proc: hide kernel addresses via %pK in /proc//stack + + [ Tim Gardner ] + + * rebase to 2.6.38-rc7 + + [ Upstream Kernel Changes ] + + * Revert "drm/i915: fix corruptions on i8xx due to relaxed fencing" + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc6 to v2.6.38-rc7 + + fb62c00a6d8942775abc23d1621db1252e2d93d1 + - LP: #721389 + - LP: #722925 + - LP: #723672 + - LP: #723676 + - LP: #715318 + + -- Andy Whitcroft Mon, 07 Mar 2011 15:33:17 +0000 + +linux (2.6.38-5.32) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to mainline 6f576d57f1fa0d6026b495d8746d56d949989161 + * [Config] updateconfigs following rebase to v2.6.38-rc6 + * [Config] enable CONFIG_DMAR + - LP: #552311 + + [ Upstream Kernel Changes ] + + * drm/i915: skip FDI & PCH enabling for DP_A + - LP: #561802, #600453, #681877 + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc5 to v2.6.38-rc6 + - LP: #718402 + - LP: #719524 + - LP: #721126 + - LP: #719691 + - LP: #722689 + - LP: #722310 + + -- Andy Whitcroft Tue, 22 Feb 2011 13:28:39 +0000 + +linux (2.6.38-4.31) natty; urgency=low + + [ Andy Whitcroft ] + + * add in bugs closed by upstream patches pulled in by rebases + * rebase to 795abaf1e4e188c4171e3cd3dbb11a9fcacaf505 + * [Config] enable CONFIG_VSX to allow use of vector instuctions + * resync with maverick 98defa1c5773a3d7e4c524967eb01d5bae035816 + * rebase to mainline v2.6.38-rc5 + * SAUCE: ecryptfs: read on a directory should return EISDIR if not + supported + - LP: #719691 + + [ Colin Ian King ] + + * SAUCE: Dell All-In-One: Remove need for Dell module alias + + [ Manoj Iyer ] + + * SAUCE: (drop after 2.6.38) add ricoh 0xe823 pci id. + - LP: #717435 + + [ Tim Gardner ] + + * [Config] CONFIG_CRYPTO_CRC32C_INTEL=y + + [ Upstream Kernel Changes ] + + * Quirk to fix suspend/resume on Lenovo Edge 11,13,14,15 + - LP: #702434 + * vfs: fix BUG_ON() in fs/namei.c:1461 + + [ Vladislav P ] + + * SAUCE: Release BTM while sleeping to avoid deadlock. + - LP: #713837 + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc4 to v2.6.38-rc5 + - LP: #579276 + - LP: #715877 + - LP: #713769 + - LP: #716811 + * resync with Maverick Ubuntu-2.6.35-27.47 + + -- Andy Whitcroft Fri, 11 Feb 2011 17:24:09 +0000 + +linux (2.6.38-3.30) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.38-rc4 + * ppc64 -- add basic architecture + * ubuntu: AUFS -- update to 65835da20b77c98fb538c9114fc31f5de1328230 + + [ Colin Ian King ] + + * SAUCE: Add WMI hotkeys support for Dell All-In-One series + - LP: #676997 + * SAUCE: Add WMI hotkeys support for another Dell All-In-One series + - LP: #701530 + * SAUCE: Dell WMI: Use sparse keymaps and tidy up code. + - LP: #701530 + + [ Dan Rosenberg ] + + * SAUCE: (drop after 2.6.38) Convert net %p usage %pK + + [ Kees Cook ] + + * Revert "SAUCE: kernel: make /proc/kallsyms mode 400 to reduce ease of + attacking" + * SAUCE: (drop after 2.6.38) use %pK for /proc/kallsyms and /proc/modules + + [ Tim Gardner ] + + * [Config] CONFIG_BLK_CGROUP=y + - LP: #706394 + * [Config] CONFIG_DELL_WMI_AIO=m + + [ Upstream Kernel Changes ] + + * drm/i915/lvds: Restore dithering on native modes for gen2/3 + - LP: #711568 + + [ Upstream Kernel Changes ] + + * rebase from v2.6.38-rc3 to v2.6.38-rc4. + - LP: #701271 + - LP: #708521 + - LP: #710371 + + -- Andy Whitcroft Tue, 08 Feb 2011 02:07:18 +0000 + +linux (2.6.38-2.29) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to 1f0324caefd39985e9fe052fac97da31694db31e + * [Config] updateconfigs following rebase to + 1f0324caefd39985e9fe052fac97da31694db31e + * rebase to 70d1f365568e0cdbc9f4ab92428e1830fdb09ab0 + * [Config] reenable HIBERNATE + - LP: #710877 + * rebase to v2.6.38-rc3 + * [Config] reenable CONFIG_CRASH_DUMP + + [ Kamal Mostafa ] + + * SAUCE: rtl8192se: fix source file perms + * SAUCE: rtl8192se: fix source file newline + * SAUCE: omnibook: fix source file newline + + [ Kees Cook ] + + * [Config] packaging: really make System.map mode 0600 + + [ Ricardo Salveti de Araujo ] + + * SAUCE: OMAP3630: PM: don't warn the user with a trace in case of + PM34XX_ERRATUM + + [ Soren Hansen ] + + * SAUCE: nbd: Remove module-level ioctl mutex + + [ Tim Gardner ] + + * SAUCE: Disable building the ACPI debugfs source + + [ Upstream Kernel Changes ] + + * Set physical start and alignment 1M for virtual i386 + - LP: #710754 + + [ Upstream Kernel Changes ] + + * rebase from v2.6.38-rc2 + c723fdab8aa728dc2bf0da6a0de8bb9c3f588d84 + to v2.6.38-rc3 + - LP: #707902 + + -- Andy Whitcroft Fri, 28 Jan 2011 16:30:32 +0000 + +linux (2.6.38-1.28) natty; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- update to b1cee06249dfa0ab30951e7f06490a75c155b620 + + [ Ricardo Salveti de Araujo ] + + * SAUCE: omap3: beaglexm: fix DVI initialization + * [Config] omap: move CONFIG_PANEL_GENERIC_DPI to build in to make + display work at Beagle + + -- Andy Whitcroft Fri, 28 Jan 2011 10:51:57 +0000 + +linux (2.6.38-1.27) natty; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- update aufs-update to track new locations of headers + * ubuntu: AUFS -- update to c5021514085a5d96364e096dbd34cadb2251abfd + * SAUCE: ensure root is ready before running usermodehelpers in it + * correct the Vcs linkage to point to natty + * rebase to linux tip e78bf5e6cbe837daa6ab628a5f679548742994d3 + * [Config] update configs following rebase + e78bf5e6cbe837daa6ab628a5f679548742994d3 + * SAUCE: Yama: follow changes to generic_permission + * ubuntu: compcache -- follow changes to bd_claim/bd_release + * ubuntu: iscsitarget -- follow changes to open_bdev_exclusive + * ubuntu: ndiswrapper -- fix interaction between __packed and packed + * ubuntu: AUFS -- update to 806051bcbeec27748aae2b7957726a4e63ff308e + * update package version to match payload version + * rebase to e6f597a1425b5af64917be3448b29e2d5a585ac8 + * rebase to v2.6.38-rc1 + * [Config] updateconfigs following rebase to v2.6.38-rc1 + * SAUCE: x86 fix up jiffies/jiffies_64 handling + * rebase to linus tip 2b1caf6ed7b888c95a1909d343799672731651a5 + * [Config] updateconfigs following rebase to + 2b1caf6ed7b888c95a1909d343799672731651a5 + * [Config] disable CONFIG_TRANSPARENT_HUGEPAGE to fix i386 boot crashes + * ubuntu: AUFS -- suppress benign plink warning messages + - LP: #621195 + * [Config] CONFIG_NR_CPUS=256 for amd64 -server flavour + * rebase to v2.6.38-rc2 + * rebase to mainline d315777b32a4696feb86f2a0c9e9f39c94683649 + * rebase to c723fdab8aa728dc2bf0da6a0de8bb9c3f588d84 + * [Config] update configs following rebase to + c723fdab8aa728dc2bf0da6a0de8bb9c3f588d84 + * [Config] disable CONFIG_AD7152 to fix FTBS on armel versatile + * [Config] disable CONFIG_AD7150 to fix FTBS on armel versatile + * [Config] disable CONFIG_RTL8192CE to fix FTBS on armel omap + * [Config] disable CONFIG_MANTIS_CORE to fix FTBS on armel versatile + + [ Kees Cook ] + + * SAUCE: kernel: make /proc/kallsyms mode 400 to reduce ease of attacking + + [ Stefan Bader ] + + * Temporarily disable RODATA for virtual i386 + - LP: #699828 + + [ Tim Gardner ] + + * [Config] CONFIG_NLS_DEFAULT=utf8 + - LP: #683690 + * [Config] CONFIG_HIBERNATION=n + * update bnx2 firmware files in d-i/firmware/nic-modules + + [ Upstream Kernel Changes ] + + * Revert "drm/radeon/bo: add some fallback placements for VRAM only + objects." + * packaging: make System.map mode 0600 + * thinkpad_acpi: Always report scancodes for hotkeys + - LP: #702407 + * sched: tg->se->load should be initialised to tg->shares + * Input: sysrq -- ensure sysrq_enabled and __sysrq_enabled are consistent + * brcm80211: include linux/slab.h for kfree + * pch_dma: add include/slab.h for kfree + * i2c-eg20t: include linux/slab.h for kfree + * gpio/ml_ioh_gpio: include linux/slab.h for kfree + * tty: include linux/slab.h for kfree + * winbond: include linux/delay.h for mdelay et al + + [ Upstream Kernel Changes ] + + * mark the start of v2.6.38 versioning + * rebase v2.6.37 to v2.6.38-rc2 + c723fdab8aa728dc2bf0da6a0de8bb9c3f588d84 + - LP: #689886 + - LP: #702125 + - LP: #608775 + - LP: #215802 + - LP: #686333 + - LP: #677830 + - LP: #677652 + - LP: #696493 + - LP: #697240 + - LP: #689036 + - LP: #705323 + - LP: #686692 + + -- Andy Whitcroft Sun, 09 Jan 2011 13:44:52 +0000 + +linux (2.6.37-12.26) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.37-rc8 + * [Config] armel -- reenable omap flavour + * [Config] disable CONFIG_MACH_OMAP3517EVM to fix FTBS on armel omap + * [Config] disable CONFIG_GPIO_VX855 to fix FTBS on omap armel + * [Config] disable CONFIG_WESTBRIDGE_ASTORIA to fix FTBS on omap armel + * [Config] disable CONFIG_TI_DAVINCI_EMAC to fix FTBS on omap armel + * rebase to mainline 989d873fc5b6a96695b97738dea8d9f02a60f8ab + * [Config] track missing modules + * rebase to v2.6.37 final + + [ Chase Douglas ] + + * SAUCE: (drop after 2.6.37) HID: magicmouse: Don't report REL_{X, Y} for + Magic Trackpad + + [ Stefan Bader ] + + * Revert "SAUCE: blkfront: default to sd devices" + - LP: #684875 + + [ Tim Gardner ] + + * Revert "SAUCE: (no-up) libata: Ignore HPA by default." + - LP: #380138 + * [Config] Added autofs4.ko to -virtual flavour + - LP: #692917 + + [ Upstream Kernel Changes ] + + * Add support for Intellimouse Mode in ALPS touchpad on Dell E2 series + Laptops + - LP: #632884 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.37-rc8 + * rebase to mainline 989d873fc5b6a96695b97738dea8d9f02a60f8ab + * rebase to v2.6.37 final + + -- Andy Whitcroft Thu, 23 Dec 2010 18:34:13 +0000 + +linux (2.6.37-11.25) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] d-i -- add hpsa to the list of block devices + - LP: #684304 + * [Config] add vmw-balloon driver to -virtual flavour + - LP: #592039 + * rebase to v2.6.37-rc7 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.37-rc7 + + -- Andy Whitcroft Tue, 21 Dec 2010 13:35:28 +0000 + +linux (2.6.37-10.24) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.37-rc6 + * updateconfigs following rebase to v2.6.37-rc6 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.37-rc6 + + -- Andy Whitcroft Thu, 16 Dec 2010 12:34:19 +0000 + +linux (2.6.37-9.23) natty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: vt -- fix handoff numbering to 1..n and add range checks + - LP: #689606 + * SAUCE: vt -- fix handoff numbering to 1..n and add range checks (grub) + - LP: #689606 + + [ Kees Cook ] + + * SAUCE: RO/NX protection for loadable kernel, fix ftrace + - LP: #690190 + + -- Andy Whitcroft Wed, 15 Dec 2010 19:29:57 +0000 + +linux (2.6.37-9.22) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.35-rc5 + * [Config] updateconfigs following rebase to v2.6.37-rc5 + * (no-up) add support for installed header files to ubuntu directory + - LP: #684666 + * ubuntu: AUFS -- include the aufs_types.h file in linux-libc-headers + - LP: #684666 + * ubuntu: dm-raid4-5 -- follow changes to bio flags + * ubuntu: dm-raid4-5 -- re-enable + * ubuntu: omnibook -- update BOM + * ubuntu: ndiswrapper -- update BOM to match actual version + * ubuntu: ndiswrapper -- follow removal of the BKL and locked ioctl + * ubuntu: ndiswrapper -- re-enable + * ubuntu: iscsitarget -- re-instate copy_io_context + * ubuntu: iscsitarget -- follow changes to semaphore initialisation + * ubuntu: iscsitarget -- convert NIPQUAD to %pI4 + * ubuntu: iscsitarget -- re-enable + + [ Kees Cook ] + + * [Config] update config for CONFIG_DEBUG_SET_MODULE_RONX + + [ Manoj Iyer ] + + * SAUCE: Enable jack sense for Thinkpad Edge 13 + - LP: #685015 + + [ Tim Gardner ] + + * [Config] CONFIG_9P_FSCACHE=y,CONFIG_9P_FS_POSIX_ACL=y + * [Config] CONFIG_CRYPTO_CRC32C=y + - LP: #681819 + * [Config] CONFIG_9P_FSCACHE=n + * [Config] Add nfsd modules to -virtual flavour + - LP: #688070 + + [ Upstream Kernel Changes ] + + * Revert "Staging: zram: work around oops due to startup ordering snafu" + * NFS: Fix panic after nfs_umount() + - LP: #683938 + * x86: Add NX protection for kernel data + * x86: Add RO/NX protection for loadable kernel modules + * x86: Resume trampoline must be executable + * x86: RO/NX protection for loadable kernel, jump_table fix + + [ Upstream Kernel Changes ] + + * rebase to v2.6.37-rc5 + + -- Andy Whitcroft Thu, 09 Dec 2010 18:15:35 +0000 + +linux (2.6.37-8.21) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- include the aufs_types.h file in + linux-libc-headers" + * Revert "(no-up) add support for installed header files to ubuntu + directory" + + -- Andy Whitcroft Sun, 05 Dec 2010 17:33:28 +0000 + +linux (2.6.37-8.20) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Upstream] drivers/serial/mfd.c: Fix ARM compile error" + * Revert "SAUCE: Nouveau: Disable acceleration on MacBook Pros" + * Revert "SAUCE: Nouveau: Add quirk framework to disable acceleration" + * Revert "SAUCE: i915 -- disable powersave by default" + * SAUCE: enable Marvell 9128 PCIe SATA controller + - LP: #658521 + * [Config] evtchn has been renamed + * (no-up) add support for installed header files to ubuntu directory + - LP: #684666 + * ubuntu: AUFS -- include the aufs_types.h file in linux-libc-headers + - LP: #684666 + + [ Tim Gardner ] + + * [Config] MISS: evtchn, NEW : xen-evtchn + * rebase to v2.6.37-rc4 + + [ Upstream Kernel Changes ] + + * drm/i915: Clean conflicting modesetting registers upon init + - LP: #683775 + * rebase to v2.6.37-rc4 + + -- Andy Whitcroft Fri, 03 Dec 2010 18:42:07 +0000 + +linux (2.6.37-7.19) natty; urgency=low + + [ Tim Gardner ] + + * [Config] Add bnx2 firmware to nic-modules udeb + - LP: #676245 + + -- Andy Whitcroft Fri, 26 Nov 2010 17:53:45 +0000 + +linux (2.6.37-7.18) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Upstream] USB: option: Remove duplicate AMOI_VENDOR_ID" + * Revert "SAUCE: Add extra headers to linux-libc-dev" + * Revert "SAUCE: Enable speedstep for sonoma processors." + * [Config] enable CONFIG_BT_HCIUART_ATH3K + * [Config] enable CONFIG_IWLWIFI_DEBUGFS + * [Config] standardise CONFIG_MII + * [Config] standardise CONFIG_PRISM2_USB + * [Config] standardise CONFIG_SCSI_QLA_ISCSI + * [Config] build in CONFIG_AGP + * [Config] build in CONFIG_AGP_INTEL + * [Config] build in CONFIG_AGP_AMD + * [Config] build in CONFIG_AGP_AMD64 + * [Config] build in CONFIG_AGP_NVIDIA + * [Config] build in CONFIG_AGP_VIA + * [Config] disable CONFIG_SCSI_QLA_ISCSI for FTBS (arm) + * (no-up): document the new ## scheme + * [Config] harmonise CONFIG_SERIAL_8250_NR_UARTS + * [Config] update CONFIG_SERIAL_8250_RUNTIME_UARTS=32 + - LP: #675453 + + [ Mathieu J. Poirier ] + + * SAUCE: ARM: Adding vdd_sdi regulator supply to OMAP3EVM + + [ Upstream Kernel Changes ] + + * nx-emu: fix inverted report of disable_nx + + -- Andy Whitcroft Tue, 23 Nov 2010 21:00:39 +0000 + +linux (2.6.37-6.17) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- aufs2-standalone.patch + aufs2.1-36-UNRELEASED-20101103" + * Revert "ubuntu: AUFS -- aufs2-base.patch + aufs2.1-36-UNRELEASED-20101103" + * [Config] standardise CONFIG_BT + * [Config] standardise CONFIG_IRDA + * [Config] standardise CONFIG_LAPB + * [Config] standardise CONFIG_RDS + * [Config] standardise CONFIG_RFKILL + * [Config] standardise CONFIG_TIPC + * [Config] standardise CONFIG_X25 + * [Config] standardise CONFIG_INPUT_EVDEV + * [Config] standardise CONFIG_INPUT_JOYDEV + * [Config] standardise CONFIG_INPUT_JOYSTICK + * [Config] standardise CONFIG_INPUT_TOUCHSCREEN + * [Config] CONFIG_INPUT_TOUCHSCREEN=n for FTBS (arm) + * [Config] CONFIG_IRDA=n for FTBS (arm) + * ubuntu: AUFS -- aufs2-base.patch aufs2.1-37 + * ubuntu: AUFS -- aufs2-standalone.patch aufs2.1-37 + * ubuntu: AUFS -- update to 097bf62d6f49619359d34bf17f242df38562489a + + [ Tim Gardner ] + + * SAUCE: Fix drivers/staging/easycap FTBS + * [Config] CONFIG_EASYCAP=m after fixing FTBS + + [ Upstream Kernel Changes ] + + * Revert "x86: Add NX protection for kernel data" + + -- Andy Whitcroft Mon, 22 Nov 2010 18:09:10 +0000 + +linux (2.6.37-6.16) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Config] update config for CONFIG_DEBUG_SET_MODULE_RONX" + * rebase to v2.6.37-rc3 + + [ Tim Gardner ] + + * [Config] CONFIG_SCHED_AUTOGROUP=y + + [ Upstream Kernel Changes ] + + * Revert "x86: Add RO/NX protection for loadable kernel modules" + * sched: automated per session task groups + * rebase to v2.6.37-rc3 + + -- Andy Whitcroft Mon, 22 Nov 2010 10:11:13 +0000 + +linux (2.6.37-6.15) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] standardise CONFIG_CEPH_FS + * [Config] standardise CONFIG_SCSI_LPFC_DEBUG_FS + * [Config] standardise CONFIG_SCSI_PROC_FS + * [Config] standardise CONFIG_UBIFS_FS + * [Config] standardise CONFIG_USB_GADGET_DEBUG_FS + + [ Kees Cook ] + + * [Config] update config for CONFIG_DEBUG_SET_MODULE_RONX + + [ Manoj Iyer ] + + * SAUCE: Enable jack sense for Thinkpad Edge 11 + - LP: #677210 + * SAUCE: enable rfkill for rtl8192se driver + - LP: #640992 + + [ Tim Gardner ] + + * [Config] CONFIG_EASYCAP=n for FTBS + * Rebase to v2.6.32-rc2+git + + [ Upstream Kernel Changes ] + + * x86: Fix improper large page preservation + * x86: Add NX protection for kernel data + * x86: Add RO/NX protection for loadable kernel modules + + [ Upstream Kernel Changes ] + + * Rebase to Linus 2.6.37-rc2+git + + -- Andy Whitcroft Sat, 20 Nov 2010 11:40:00 +0000 + +linux (2.6.37-5.14) natty; urgency=low + + [ Upstream Kernel Changes ] + + * PCI: fix offset check for sysfs mmapped files + - LP: #676963 + + -- Andy Whitcroft Thu, 18 Nov 2010 18:12:27 +0000 + +linux (2.6.37-5.13) natty; urgency=low + + [ Andy Whitcroft ] + + * rebased to v2.6.37-rc2 + * updateconfigs following rebase to v2.6.37-rc2 + + [ Tim Gardner ] + + * [Config] Added NFS and related modules to virtual flavour + - LP: #659084 + + [ Upstream Kernel Changes ] + + * x86, cpu: Rename verify_cpu_64.S to verify_cpu.S + * x86, cpu: Clear XD_DISABLED flag on Intel to regain NX + * x86, cpu: Call verify_cpu during 32bit CPU startup + * x86, cpu: Only CPU features determine NX capabilities + + [ Upstream Changes ] + + * rebased to v2.67.37-rc2 + + -- Andy Whitcroft Tue, 16 Nov 2010 13:13:29 +0000 + +linux (2.6.37-4.12) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Upstream] HID: magicmouse: add param for scroll speed" + * Revert "[Upstream] HID: magicmouse: properly account for scroll + movement in state" + * Revert "[Upstream] HID: magicmouse: disable and add module param for + scroll acceleration" + * Revert "[Upstream] HID: magicmouse: scroll on entire surface, not just + middle of mouse" + + [ Henrik Rydberg ] + + * SAUCE: hid: ntrig: remove sysfs nodes + * SAUCE: hid: ntrig: Setup input filtering manually + * SAUCE: hid: ntrig: New ghost-filtering event logic + + [ Manoj Iyer ] + + * SAUCE: Added quirk to recognize GE0301 3G modem as an interface. + - LP: #348861 + + [ Upstream Kernel Changes ] + + * Revert "mmc: fix all hangs related to mmc/sd card insert/removal during + suspend/resume" + * Revert "[ARM] implement arch_randomize_brk()" + * Revert "ARM: stack protector: change the canary value per task" + * Revert "ARM: initial stack protector (-fstack-protector) support" + * Revert "ALSA: hda - Handle pin NID 0x1a on ALC259/269" + * Revert "ALSA: hda - Handle missing NID 0x1b on ALC259 codec" + * Revert "perf probe: Add kernel source path option" + * hid: ntrig: Support single-touch devices + * hid: ntrig: Mask pen switch events + * net: rtnetlink.h -- only include linux/netdevice.h when used by the + kernel + - LP: #673073 + * Fix userspace build of linux/fs.h + + -- Andy Whitcroft Mon, 15 Nov 2010 19:31:44 +0000 + +linux (2.6.37-3.11) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- update to + b37c575759dc4535ccc03241c584ad5fe69e3b25" + * Revert "ubuntu: AUFS -- track changes to the arguements to fop fsync()" + * Revert "ubuntu: AUFS -- update to standalone 2.6.35-rcN as at 20100601" + * Revert "ubuntu: AUFS -- update to standalone 2.6.34 as at 20100601" + * Revert "ubuntu: AUFS -- aufs2 base patch for linux-2.6.34" + * [Config] Disable intel_idle for -virtual kernels + - LP: #651370 + * [Config] enforcer -- ensure we never enable CONFIG_IMA + * debian -- pass the correct flavour name when checking configs + * [Config] enforcer -- ensure CONFIG_INTEL_IDLE is off for -virtual + * [Config] ensure CONFIG_IPV6=y for powerpc + * [Config] enforcer -- ensure CONFIG_IPV6=y + * ubuntu: AUFS -- aufs2-base.patch aufs2.1-36-UNRELEASED-20101103 + * ubuntu: AUFS -- aufs2-standalone.patch aufs2.1-36-UNRELEASED-20101103 + * ubuntu: AUFS -- update to aufs2.1-36-UNRELEASED-20101103 + * ubuntu: AUFS -- re-enable + * ubuntu: AUFS -- track changes to work queue initialisation + * ubuntu: AUFS -- track changes to llseek in v2.6.37-rc1 + * SAUCE: fbcon -- fix race between open and removal of framebuffers + * SAUCE: fbcon -- fix OOPs triggered by race prevention fixes + - LP: #614008 + * SAUCE: drm -- stop early access to drm devices + + [ Jeremy Kerr ] + + * [Config] Build-in powermac ZILOG serial driver + - LP: #673346 + + [ Kees Cook ] + + * SAUCE: nx-emu: use upstream ASLR when possible + + [ Tim Gardner ] + + * [Config] Use correct be2iscsi module name in d-i/modules/scsi-modules + - LP: #628776 + + [ Upstream Kernel Changes ] + + * i386: NX emulation + * nx-emu: drop exec-shield sysctl, merge with disable_nx + * nx-emu: standardize boottime message prefix + * mmap randomization for executable mappings on 32-bit + * exec-randomization: brk away from exec rand area + + -- Andy Whitcroft Thu, 11 Nov 2010 23:46:37 +0000 + +linux (2.6.37-2.10) natty; urgency=low + + [ Andy Whitcroft ] + + * reinstate armel config changes: + * [Config] CONFIG_GPIO_PCH=n for armel FTBS + * [Config] CONFIG_GPIO_VX855=n for armel FTBS + + -- Andy Whitcroft Wed, 03 Nov 2010 22:20:35 +0000 + +linux (2.6.37-2.9) natty; urgency=low + + [ Andy Whitcroft ] + + * config -- fix genportsconfig + * [Config] move powerpc over from ports to distro + * bump master version number to match contained kernel + * SAUCE: fix documentation strings for struct input_keymap_entry + * usb: gadget: goku_udc: add registered flag bit + + -- Andy Whitcroft Tue, 02 Nov 2010 15:14:11 +0000 + +linux (2.6.36-2.8) natty; urgency=low + + [ Tim Gardner ] + + * [Config]: fix changed CONFIG_SYSFS_DEPRECATED_V2 enforcement rules + * [Config]: TWL4030_CORE=n for FTBS + * [Config]: CONFIG_ATH6K_LEGACY=n for FTBS + * [Config]: CONFIG_SOLO6X10=n for FTBS + * [Config]: CONFIG_GPIO_PCH=n for armel FTBS + * [Config]: CONFIG_GPIO_VX855=n for armel FTBS + * [Config]: CONFIG_DRM_NOUVEAU=n for armel FTBS + * [Config]: CONFIG_LINE6_USB=n for armel FTBS + * [Config]: CONFIG_SENSORS_AK8975=n for armel FTBS + * [Config]: CONFIG_I2C_I801=n for armel FTBS + * UBUNTU: SAUCE: AppArmor: Fix unpack of network tables. + * AppArmor: compatibility patch for v5 interface + * AppArmor: compatibility patch for v5 network controll + * Dropped (pre-stable): input: Support Clickpad devices in ClickZone mode + * Dropped: UBUNTU: SAUCE: libata: Add ALPM power state accounting to the AHCI driver + * Dropped: UBUNTU: SAUCE: Added quirk to recognize GE0301 3G modem as an interface. + * Dropped: hid: 3m: Convert to MT slots + * Dropped: HID: magicmouse: don't allow hidinput to initialize the device + * Dropped: HID: magicmouse: simplify touch data bit manipulation + * Dropped: HID: magicmouse: simplify touch down logic + * Dropped: HID: magicmouse: enable Magic Trackpad support + * Dropped: UBUNTU: SAUCE: hid: ntrig: remove sysfs nodes + * Dropped: UBUNTU: SAUCE: hid: ntrig: Setup input filtering manually + * Dropped: UBUNTU: SAUCE: hid: ntrig: New ghost-filtering event logic + * Dropped: UBUNTU: SAUCE: hid: ntrig: identify firmware version (wiggled) + * Dropped: UBUNTU: (pre-stable): input: Support Clickpad devices in ClickZone mode + * Dropped: UBUNTU: SAUCE: KMS: cache the EDID information of the LVDS + * Dropped: UBUNTU: SAUCE: fbcon -- fix race between open and removal of framebuffers + * Dropped: UBUNTU: SAUCE: fbcon -- fix OOPs triggered by race prevention fixes + * Dropped: UBUNTU: SAUCE: x86: implement cs-limit nx-emulation for ia32 + * Dropped: UBUNTU: SAUCE: x86: more tightly confine cs-limit nx-emulation to ia32 only + * Dropped: UBUNTU: SAUCE: [um] Don't use nx_enabled under UML + * Dropped: UBUNTU: SAUCE: x86: brk away from exec rand area + + [ Upstream Kernel Changes ] + + * rebased against 2.6.27-rc1 + + -- Tim Gardner Fri, 22 Oct 2010 19:35:05 -0600 + +linux (2.6.36-1.7) natty; urgency=low + + [ Andy Whitcroft ] + + * rebased to v2.6.36 final + * [Config] update configs following rebase to v2.6.36 final + * [Config] update ports configs following rebase to v2.6.36 final + + [ Upstream Kernel Changes ] + + * rebased to v2.6.36 final + + -- Andy Whitcroft Thu, 21 Oct 2010 14:28:57 +0100 + +linux (2.6.36-1.6) natty; urgency=low + + [ Upstream Kernel Changes ] + + * drop broadcom staging driver preview: + * Revert "Staging: Add initial release of brcm80211 - Broadcom 802.11n + wireless LAN driver." + + -- Andy Whitcroft Wed, 20 Oct 2010 10:41:25 +0100 + +linux (2.6.36-1.5) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.36-rc8 + * updateconfigs following rebase to v2.6.36-rc8 + * updateportsconfigs following rebase to v2.6.36-rc8 + * config -- simplify the kernelconfig interface + * config -- add new config mode 'dumpconfigs' + + [ Tim Gardner ] + + * Simplify the use of CROSS_COMPILER + + [ Upstream Kernel Changes ] + + * drop broadcom staging driver preview: + * Revert "staging: brcm80211: Make compiling of brcm80211.ko and + brcmfmac.ko mutually exclusive." + * Revert "staging: brcm80211: Fix compile issue when BRCM80211_PCI is not + set." + * Revert "Staging: brcm80211: remove driver specific -W options" + * Revert "Staging: brcm80211: clean up makefile cflag lines" + * Revert "staging: brcm80211: add fullmac driver" + * Revert "staging: brcm80211: use string native library" + * Revert "staging: brcm80211: use native ctype library" + * Revert "staging: brcm80211: fix remaining checkpatch errors." + * Revert "staging: brcm80211: fix "ERROR: trailing whitespace."" + * Revert "staging: brcm80211: fix "ERROR: spaces required around that + ..."" + * Revert "staging: brcm80211: fix "ERROR: spaces prohibited around that + ':' ..."" + * Revert "staging: brcm80211: fix "ERROR: space required before that + ..."" + * Revert "staging: brcm80211: fix "ERROR: space required after that ..."" + * Revert "staging: brcm80211: fix "ERROR: space required after that close + brace"" + * Revert "staging: brcm80211: fix "ERROR: space prohibited before + ...close square bracket"" + * Revert "staging: brcm80211: fix "ERROR: space prohibited after that + ..."" + * Revert "staging: brcm80211: fix "ERROR: need consistent spacing around + '*'"" + * Revert "staging: brcm80211: fix 'ERROR: "(foo*)" should be "(foo *)"'" + * Revert "staging: brcm80211: fix "ERROR: Macros w/ mult. statements ... + do - while loop"" + * Revert "staging: brcm80211: fix "ERROR: Macros w/ complex values ... + parenthesis"" + * Revert "staging: brcm80211: fix "ERROR: do not initialise statics to 0 + or NULL"" + * Revert "staging: brcm80211: fix "ERROR: do not initialise globals to 0 + or NULL"" + * Revert "staging: brcm80211: fix "ERROR: while should follow close brace + '}'"" + * Revert "staging: brcm80211: fix "ERROR: that open brace { ... prev + line"" + * Revert "staging: brcm80211: fix "ERROR: trailing statements should be + on next line"" + * Revert "staging: brcm80211: fix "ERROR: do not use assignment in if + condition"" + * Revert "staging: brcm80211: fix "ERROR: return is not a function, + paren..."" + * Revert "staging: brcm80211: fix "ERROR: open brace '{' following + function dec..."" + * Revert "staging: brcm80211: fix 'ERROR: "foo * bar" should be "foo + *bar"'" + * Revert "staging: brcm80211: Fix URLs for firmware files." + * Revert "staging: brcm80211: use '%pM' format to print MAC address" + * Revert "staging: brcm80211: Add contact info to TODO list." + * Revert "staging: brcm80211: Fix some initialisation failure paths" + * Export dump_{write,seek} to binary loader modules + * rebase to v2.6.36-rc8. + + -- Andy Whitcroft Tue, 19 Oct 2010 18:58:11 +0100 + +linux (2.6.36-0.4) natty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: perf: increase stack footprint to avoid stack-protector warning + (fixes FTBS on powerpc) + + -- Andy Whitcroft Thu, 14 Oct 2010 13:16:16 +0100 + +linux (2.6.36-0.3) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] disable CONFIG_SCSI_QLA_ISCSI to fix FTBS on powerpc + + -- Andy Whitcroft Thu, 14 Oct 2010 03:01:30 +0100 + +linux (2.6.36-0.2) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] updateportsconfigs following rebase to 2.6.36-rc7 + (fix FTBS on powerpc) + + -- Andy Whitcroft Wed, 13 Oct 2010 23:25:12 +0100 + +linux (2.6.36-0.1) natty; urgency=low + + [ Andy Whitcroft ] + + * reduce disk usage during buildd builds + - LP: #645653 + * [Config] enforcer -- ensure CONFIG_INIT_PASS_ALL_PARAMS is y + * [Config] armel -- drop omap flavour + + [ Tim Gardner ] + + * Added dropped patch list + * more dropped patches + * [Config] Disable aufs, dmraid-4.5, ndis-wrapper + * [Config] Add support for cross compiling armel + * [Config] CONFIG_SCSI_QLA_ISCSI=n for armel + * [Upstream] drivers/serial/mfd.c: Fix ARM compile error + * [Config]: updateconfigs after adding brcm80211 + * staging: brcm80211: Fix Makefile syntax error + * rebased to v2.6.36-rc7 + + [ Upstream Kernel Changes ] + + * (upstream) IPS driver: don't toggle CPU turbo on unsupported CPUs + * (upstream) IPS driver: verify BIOS provided limits + * intel_ips: Print MCP limit exceeded values. + * Staging: Add initial release of brcm80211 - Broadcom 802.11n wireless + LAN driver. + * staging: brcm80211: Fix some initialisation failure paths + * staging: brcm80211: Add contact info to TODO list. + * staging: brcm80211: use '%pM' format to print MAC address + * staging: brcm80211: Fix URLs for firmware files. + * staging: brcm80211: fix 'ERROR: "foo * bar" should be "foo *bar"' + * staging: brcm80211: fix "ERROR: open brace '{' following function + dec..." + * staging: brcm80211: fix "ERROR: return is not a function, paren..." + * staging: brcm80211: fix "ERROR: do not use assignment in if condition" + * staging: brcm80211: fix "ERROR: trailing statements should be on next + line" + * staging: brcm80211: fix "ERROR: that open brace { ... prev line" + * staging: brcm80211: fix "ERROR: while should follow close brace '}'" + * staging: brcm80211: fix "ERROR: do not initialise globals to 0 or NULL" + * staging: brcm80211: fix "ERROR: do not initialise statics to 0 or NULL" + * staging: brcm80211: fix "ERROR: Macros w/ complex values ... + parenthesis" + * staging: brcm80211: fix "ERROR: Macros w/ mult. statements ... do - + while loop" + * staging: brcm80211: fix 'ERROR: "(foo*)" should be "(foo *)"' + * staging: brcm80211: fix "ERROR: need consistent spacing around '*'" + * staging: brcm80211: fix "ERROR: space prohibited after that ..." + * staging: brcm80211: fix "ERROR: space prohibited before ...close square + bracket" + * staging: brcm80211: fix "ERROR: space required after that close brace" + * staging: brcm80211: fix "ERROR: space required after that ..." + * staging: brcm80211: fix "ERROR: space required before that ..." + * staging: brcm80211: fix "ERROR: spaces prohibited around that ':' ..." + * staging: brcm80211: fix "ERROR: spaces required around that ..." + * staging: brcm80211: fix "ERROR: trailing whitespace." + * staging: brcm80211: fix remaining checkpatch errors. + * staging: brcm80211: use native ctype library + * staging: brcm80211: use string native library + * staging: brcm80211: add fullmac driver + * Staging: brcm80211: clean up makefile cflag lines + * Staging: brcm80211: remove driver specific -W options + * staging: brcm80211: Fix compile issue when BRCM80211_PCI is not set. + * staging: brcm80211: Make compiling of brcm80211.ko and brcmfmac.ko + mutually exclusive. + + -- Andy Whitcroft Tue, 12 Oct 2010 16:00:27 +0100 + +linux (2.6.35-22.33) maverick; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: Add support for Intellimouse Mode in ALPS touchpad on + Dell E2 series Laptops" + - LP: #641320 + + [ Brian Rogers ] + + * SAUCE: ir-core: Fix null dereferences in the protocols sysfs interface + - LP: #624701 + + [ Christopher James Halse Rogers ] + + * SAUCE: Nouveau: Add quirk framework to disable acceleration + - LP: #544088, #546393 + * SAUCE: Nouveau: Disable acceleration on MacBook Pros + - LP: #546393 + + [ John Johansen ] + + * Revert "SAUCE: AppArmor: allow newer tools to load policy on older + kernels" + * SAUCE: AppArmor: allow newer tools to load policy on older kernels + - LP: #639758 + + [ Mathieu J. Poirier ] + + * SAUCE: Adding vdd_sdi regulator supply to OMAP3EVM + + [ Upstream Kernel Changes ] + + * ALSA: HDA: Enable internal speaker on Dell M101z + - LP: #640254 + + -- Leann Ogasawara Fri, 17 Sep 2010 13:21:28 -0700 + +linux (2.6.35-22.32) maverick; urgency=low + + [ Arjan van de Ven ] + + * SAUCE: libata: Add ALPM power state accounting to the AHCI driver + + [ David Henningsson ] + + * SAUCE: ALSA: HDA: Enable internal mic on Dell E6410 and Dell E6510 + - LP: #605047, #628961 + + [ John Johansen ] + + * [Upstream] AppArmor: Fix splitting an fqname into separate namespace + and profile names + - LP: #615947 + * [Upstream] AppArmor: Fix locking from removal of profile namespace + - LP: #615947 + * SAUCE: AppArmor: allow newer tools to load policy on older kernels + - LP: #639758 + * SAUCE: Improve Amazon EBS performance for EC2 + - LP: #634316 + + [ Leann Ogasawara ] + + * Revert "SAUCE: i915 KMS -- blacklist i855" + * Revert "SAUCE: i915 KMS -- blacklist i845g" + * Revert "SAUCE: i915 KMS -- blacklist i830" + * Revert "SAUCE: i915 KMS -- support disabling KMS for known broken + devices" + * execute module-inclusion within a subshell + - LP: #621175 + + [ Upstream Kernel Changes ] + + * (pre-stable) bounce: call flush_dcache_page() after bounce_copy_vec() + - LP: #633227 + * (pre-stable) drm/i915: don't enable self-refresh on Ironlake + - LP: #629711 + * (pre-stable) mm: Move vma_stack_continue into mm.h + * x86, hwmon: Fix unsafe smp_processor_id() in thermal_throttle_add_dev + - LP: #601073 + * PM / Runtime: Make runtime_status attribute not debug-only (v. 2) + * PM / Runtime: Add runtime PM statistics (v3) + * compat: Make compat_alloc_user_space() incorporate the access_ok() + - CVE-2010-3081 + * x86-64, compat: Test %rax for the syscall number, not %eax + - CVE-2010-3301 + * x86-64, compat: Retruncate rax after ia32 syscall entry tracing + - CVE-2010-3301 + + -- Leann Ogasawara Tue, 14 Sep 2010 08:46:49 -0700 + +linux (2.6.35-21.31) maverick; urgency=low + + [ Andy Whitcroft ] + + * bodge linux-libc-dev package version due to ti-omap4 error + * linux-libc-dev -- ensure we can only build this on debian.master + + -- Leann Ogasawara Mon, 13 Sep 2010 09:54:31 -0700 + +linux (2.6.35-21.30) maverick; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: fbcon -- fix OOPs triggered by race prevention fixes + - LP: #614008 + + [ Daniel Lezcano ] + + * SAUCE: fix compilation warning when CONFIG_SECURITY is not set + + [ Henrik Rydberg ] + + * SAUCE: Input: wacom - add fuzz parameters to features + * SAUCE: Input: wacom - collect device quirks into single function + * SAUCE: Input: wacom - add support for the Bamboo Touch trackpad + * SAUCE: Input: wacom - add a quirk for low resolution Bamboo devices + * SAUCE: hid: ntrig: Remove unused device ids + * SAUCE: hid: ntrig: remove sysfs nodes + * SAUCE: hid: ntrig: Correct logic for quirks + * SAUCE: hid: ntrig: zero-initialize ntrig struct + * SAUCE: hid: ntrig: Setup input filtering manually + * SAUCE: hid: ntrig: New ghost-filtering event logic + + [ Leann Ogasawara ] + + * SAUCE: ndiswrapper: Initialize buffer index and check its value + - LP: #613796 + + [ Manoj Iyer ] + + * SAUCE: Add support for Intellimouse Mode in ALPS touchpad on Dell E2 + series Laptops + - LP: #632884 + + [ Ping Cheng ] + + * SAUCE: Input: wacom - parse the Bamboo device family + + [ Rafi Rubin ] + + * SAUCE: hid: ntrig: identify firmware version (wiggled) + + [ Tim Gardner ] + + * [Config] CONFIG_NL80211_TESTMODE=n + + [ Upstream Kernel Changes ] + + * Revert "input: mt: Add support for the Bamboo Touch trackpad" + * e1000e: initial support for 82579 LOMs + * e1000e: correct MAC-PHY interconnect register offset for 82579 + * (pre-stable) ALSA: hda - Add a new hp-laptop model for Conexant 5066, + tested on HP G60 + - LP: #587388 + * DSS2: Don't power off a panel twice + - LP: #588243 + * mmc: build fix: mmc_pm_notify is only available with CONFIG_PM=y + * Input: i8042 - reset keyboard controller wehen resuming from S2R + - LP: #86820 + * ALSA: hda - Fix beep frequency on IDT 92HD73xx and 92HD71Bxx codecs + - LP: #414795 + * agp/intel: Support the extended physical addressing bits on + Sandybridge. + - LP: #632488 + * drm/i915,intel_agp: Add support for Sandybridge D0 + - LP: #632488 + * (pre-stable) intel_agp,i915: Add more sandybridge graphics device ids + - LP: #632488 + * mmc: omap: fix for bus width which improves SD card's peformance. + + -- Leann Ogasawara Tue, 07 Sep 2010 09:58:52 -0700 + +linux (2.6.35-20.29) maverick; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: i915 KMS -- support disabling KMS for known broken devices + - LP: #563277 + * SAUCE: i915 KMS -- blacklist i830 + - LP: #542208, #563277 + * SAUCE: i915 KMS -- blacklist i845g + - LP: #541492, #563277 + * SAUCE: i915 KMS -- blacklist i855 + - LP: #511001, #541511, #563277 + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_SENSORS_PKGTEMP=m + - LP: #601073 + * ARM: Temporarily disable module check for armel + * rebase to v2.6.35.4 + * [Config] update configs following rebase to v2.6.35.4 + + [ Ricardo Salveti de Araujo ] + + * [Config] Change CONFIG_LEDS_TRIGGER_HEARTBEAT from module to built-in + in Omap + + [ Tim Gardner ] + + * [Config] Added be2net, be2scsi to udebs + - LP: #628776 + + [ Upstream Kernel Changes ] + + * x86, cpu: Package Level Thermal Control, Power Limit Notification + definitions + - LP: #601073 + * x86, hwmon: Package Level Thermal/Power: pkgtemp hwmon driver + - LP: #601073 + * x86, hwmon: Package Level Thermal/Power: thermal throttling handler + - LP: #601073 + * x86, hwmon: Package Level Thermal/Power: power limit + - LP: #601073 + * x86, hwmon: Package Level Thermal/Power: pkgtemp documentation + - LP: #601073 + * hid: 3m: Adjust to sequential MT HID protocol + * hid: 3m: Convert to MT slots + * hid: 3m: Correct touchscreen emulation + * hid: 3m: Adjust major / minor axes to scale + * input: bcm5974: Adjust major / minor to scale + * HID: magicmouse: don't allow hidinput to initialize the device + * HID: magicmouse: simplify multitouch feature request + * HID: magicmouse: simplify touch data bit manipulation + * HID: magicmouse: simplify touch down logic + * HID: magicmouse: remove timestamp logic + * HID: magicmouse: enable Magic Trackpad support + * HID: magicmouse: Adjust major / minor axes to scale + * mmc: fix all hangs related to mmc/sd card insert/removal during + suspend/resume + - LP: #477106 + * drm/i915: fix VGA plane disable for Ironlake+ + - LP: #602281 + + -- Leann Ogasawara Mon, 30 Aug 2010 08:38:01 -0700 + +linux (2.6.35-19.28) maverick; urgency=low + + [ Leann Ogasawara ] + + * No changes from 2.6.35-19.27. Some armel udebs were accidentally deleted + from the archive and a no-change rebuild was attempted. However, the ABI + did not get bumped and resulted in build failures for 2.6.35-19.27. Fix + up the ABI and re-upload. + + -- Leann Ogasawara Sat, 28 Aug 2010 16:42:27 -0700 + +linux (2.6.35-19.27) maverick; urgency=low + + [ Leann Ogasawara ] + + * No changes from 2.6.35-19.26. Some armel udebs were accidentally deleted + from the archive. + + -- Leann Ogasawara Fri, 27 Aug 2010 08:58:35 -0700 + +linux (2.6.35-19.26) maverick; urgency=low + + [ Upstream Kernel Changes ] + + * ARM: OMAP: Beagle: revision detection + * ARM: OMAP: Beagle: only Cx boards use pin 23 for write protect + * ARM: OMAP: Beagle: no gpio_wp pin connection on xM + + -- Leann Ogasawara Thu, 26 Aug 2010 09:15:09 -0700 + +linux (2.6.35-19.25) maverick; urgency=low + + [ Jarod Wilson ] + + * SAUCE: Bring in staging/lirc from 2.6.36 + - LP: #609234 + * SAUCE: Update ir-core to linuxtv/other which should be merged for + 2.6.36. + - LP: #609234 + * SAUCE: Fix memleaks in imon and mceusb drivers + - LP: #609234 + * SAUCE: Bring in streamzap support from linuxtv/other + - LP: #609234 + + [ Mario Limonciello ] + + * Remove ubuntu/lirc in favor of staging/lirc from 2.6.36 + - LP: #609234 + + [ Mathieu J. Poirier ] + + * SAUCE: ARM: adding i2c eeprom driver to read EDID + - LP: #608279 + + [ Upstream Kernel Changes ] + + * intel_idle: disable module support + - LP: #615265 + * (pre-stable) ALSA: hda - Ensure codec patch files are checked for the + correct codec ID + * (pre-stable) ALSA: hda - Rename iMic to Int Mic on Lenovo NB0763 + - LP: #605101 + * (pre-stable) ALSA: HDA: Use model=auto for LG R510 + - LP: #495134 + * (pre-stable) ALSA: HDA: Add Sony VAIO quirk for ALC269 + - LP: #519066 + * (pre-stable) ALSA: HDA: Fix front mic on Dell Precision M6500 + - LP: #519066 + * input: mt: Initialize slots to unused (rev2) + * input: mt: Add support for the Bamboo Touch trackpad + * hid: Add a hid quirk for input sync override + + -- Leann Ogasawara Mon, 23 Aug 2010 12:42:52 -0700 + +linux (2.6.35-18.24) maverick; urgency=low + + [ Colin Watson ] + + * Pass DEB_MAINT_PARAMS to hook scripts + + [ Leann Ogasawara ] + + * [Config] Add CONFIG_INPUT_UINPUT=y to config enforcer + - LP: #584812 + * rebase to v2.6.35.3 + + [ Upstream Kernel Changes ] + + * (pre-stable) dell-wmi: Add support for eject key on Dell Studio 1555 + - LP: #609234 + * can: add limit for nframes and clean up signed/unsigned variables + - CVE-2010-2959 + * drm: Initialize ioctl struct when no user data is present + - CVE-2010-2803 + * ARM: initial stack protector (-fstack-protector) support + * ARM: stack protector: change the canary value per task + * [ARM] implement arch_randomize_brk() + * [ARM] add address randomization to mmap() + * ARM: fix ASLR of PIE executables + + -- Leann Ogasawara Sun, 22 Aug 2010 19:22:04 -0700 + +linux (2.6.35-17.23) maverick; urgency=low + + [ Jeremy Kerr ] + + * [Config] build-in uinput module + - LP: #584812 + + [ Leann Ogasawara ] + + * Revert "[Config] [FTBS] ia64: Temporarily disable CONFIG_CEPH_FS" + * Revert "[Config] [FTBS] ia64: Temporarily disable gpiolib" + * Revert "[Config] [FTBS] sparc: Temporarily disable + CONFIG_MTD_NAND_DENALI" + * Revert "[Config] [FTBS] sparc: Temporarily disable + CONFIG_MFD_JANZ_CMODIO" + * Revert "[Config] [FTBS] sparc: Temporarily disable + CONFIG_INFINIBAND_QIB" + * [Config] Enable INTEL_IPS + - LP: #601057 + * Remove ia64 support + * [Config] Update portsconfigs after removing ia64 support + * Remove sparc support + * [Config] Update portsconfigs after removing sparc support + + [ Linus Torvalds ] + + * (pre-stable) mm: fix page table unmap for stack guard page properly + + [ Mathieu J. Poirier ] + + * SAUCE: (no-up) ARM: Resetting power_mode to its original value. + - LP: #591941 + + [ Upstream Kernel Changes ] + + * timer: add on-stack deferrable timer interfaces + - LP: #601057 + * x86 platform driver: intelligent power sharing driver + - LP: #601057 + * IPS driver: add GPU busy and turbo checking + - LP: #601057 + * X86: intel_ips, check for kzalloc properly + - LP: #601057 + * ips driver: make it less chatty + - LP: #601057 + + -- Leann Ogasawara Tue, 17 Aug 2010 09:38:08 -0700 + +linux (2.6.35-16.22) maverick; urgency=low + + [ Andy Whitcroft ] + + * debian -- more agressivly clean up after depmod on purge + - LP: #618591 + + [ Henrik Rydberg ] + + * SAUCE: hid: 3m: Simplify touchsreen emulation logic + + [ Leann Ogasawara ] + + * ubuntu: iscsitarget -- version 1.4.20.2 + * ubuntu: rtl8192se -- update to version 0017.0507.2010 + * rebase to v2.6.35.2 + * [Config] update configs following rebase to v2.6.35.2 + * [Config] update ports configs following rebase to v2.6.35.2 + + [ Luke Yelavich ] + + * [Config] Enable new firewire stack on powerpc + + [ Mathieu J. Poirier ] + + * SAUCE: (drop after 2.6.35) ARM: Using gpmc function to init nand flash. + - LP: #608266 + + -- Leann Ogasawara Thu, 12 Aug 2010 09:58:01 -0700 + +linux (2.6.35-15.21) maverick; urgency=low + + [ Luke Yelavich ] + + * [Config] CONFIG_SND_USB_UA101=m for all architectures + + [ Upstream Kernel Changes ] + + * Input: introduce MT event slots + * Input: document the MT event slot protocol + * (pre-stable) sched: Revert nohz_ratelimit() for now + * (pre-stable) drm/radeon/kms: add missing copy from user + - LP: #606081 + + [ Leann Ogasawara ] + + * rebase to v2.6.35.1 + + -- Leann Ogasawara Mon, 09 Aug 2010 09:24:04 -0700 + +linux (2.6.35-14.20) maverick; urgency=low + + [ Andy Whitcroft ] + + * update Vcs-Git to point to maverick repo + * debian -- include the debian packaging in the -source package + - LP: #608674 + * select debian source format 1.0 + * add support for building selected stages of kernel + - LP: #603087 + * cleanup conditional dependancy handling + - LP: #603087 + + [ Upstream Kernel Changes ] + + * ALSA: hda - Handle missing NID 0x1b on ALC259 codec + - LP: #582199, #586418, #588031 + * ALSA: hda - Handle pin NID 0x1a on ALC259/269 + - LP: #582199, #586418, #588031 + * sched: Revert nohz_ratelimit() for now + + -- Leann Ogasawara Tue, 03 Aug 2010 08:46:47 -0700 + +linux (2.6.35-14.19) maverick; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.35 + + -- Leann Ogasawara Sun, 01 Aug 2010 10:35:56 -0700 + +linux (2.6.35-13.18) maverick; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: (no-up) Modularize vesafb -- fix initialisation + * SAUCE: add tracing for user initiated readahead requests + * SAUCE: vt -- maintain bootloader screen mode and content until vt + switch + * SAUCE: vt -- allow grub to request automatic vt_handoff + * SAUCE: fbcon -- fix race between open and removal of framebuffers + * SAUCE: drm -- stop early access to drm devices + + [ Bryan Wu ] + + * CONFIG: compile in OTG driver and Transceiver driver + - LP: #566645 + * remove OTG modules from modules list file + + [ John Johansen ] + + * SAUCE: AppArmor: -- sync to AppArmor mainline 2010-07-27 + - LP: #581525, #599450 + * SAUCE: AppArmor: -- sync to AppArmor mainline 2010-07-29 + * SAUCE: AppArmor 2.4 compatibility patch + * SAUCE: AppArmor: Allow dfa backward compatibility with broken userspace + * SAUCE: fix pv-ops for legacy Xen + * SAUCE: blkfront: default to sd devices + * [Config] Build in drivers required for Xen pv-ops + + [ Leann Ogasawara ] + + * Revert "[Upstream] i915: Use the correct mask to detect i830 aperture + size." + + [ Lee Jones ] + + * SAUCE: ARM: OMAP: Add macros for comparing silicon revision + - LP: #608095 + * SAUCE: OMAP: DSS2: check for both cpu type and revision, rather than + just revision + - LP: #608095 + * SAUCE: OMAP: DSS2: enable hsclk in dsi_pll_init for OMAP36XX + - LP: #608095 + * SAUCE: ARM: OMAP: Beagle: support twl gpio differences on xM + - LP: #608095 + + [ Upstream Kernel Changes ] + + * agp/intel: Use the correct mask to detect i830 aperture size. + - LP: #597075 + + -- Leann Ogasawara Fri, 30 Jul 2010 15:46:59 -0700 + +linux (2.6.35-12.17) maverick; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc6 + * [Config] update configs following rebase to v2.6.35-rc6 + * [Config] update ports configs following rebase to v2.6.35-rc6 + * SAUCE: [FTBS] armel: define KEY_F10 and KEYF11 + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc6 + + -- Leann Ogasawara Fri, 23 Jul 2010 16:16:38 +0200 + +linux (2.6.35-11.16) maverick; urgency=low + + [ Leann Ogasawara ] + + * Bump ABI for new compiler update + + -- Leann Ogasawara Fri, 23 Jul 2010 10:24:58 +0200 + +linux (2.6.35-10.15) maverick; urgency=low + + [ Leann Ogasawara ] + + * Revert "SAUCE: ensure vga16fb loads if no other driver claims the VGA + device" + * [Config] Enable CONFIG_M686=y + - LP: #592495 + + [ Upstream Kernel Changes ] + + * tracing: Add alignment to syscall metadata declarations + + -- Leann Ogasawara Tue, 20 Jul 2010 18:18:49 +0200 + +linux (2.6.35-9.14) maverick; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- add BOM and automated update script + * ubuntu: AUFS -- update to b37c575759dc4535ccc03241c584ad5fe69e3b25 + + [ John Johansen ] + + * [Config] Enable DRBD as a module + + [ Kees Cook ] + + * SAUCE: Yama: verify inode is symlink to avoid bind mounts + - LP: #604407 + + [ Leann Ogasawara ] + + * [Config] Disable CONFIG_DRM_VMWGFX (staging driver) + - LP: #606139 + * [Config] ports: Disable CONFIG_DRM_VMWGFX (staging driver) + - LP: #606139 + * [Config] Enable CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y + * [Config] ports: Enable CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y + + [ Lee Jones ] + + * Stop ARM boards crashing when CUPS is loaded + - LP: #601226 + + [ Upstream Kernel Changes ] + + * perf probe: Support tracing an entry of array + * perf probe: Support static and global variables + + -- Leann Ogasawara Fri, 16 Jul 2010 14:38:17 -0700 + +linux (2.6.35-8.13) maverick; urgency=low + + [ Kees Cook ] + + * SAUCE: Yama: check PTRACE using thread group leader + * SAUCE: Yama: search for PTRACE exceptions via thread group leader + - LP: #603716 + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc5 + * [Config] update configs following rebase to v2.6.35-rc5 + + [ Nicolas Pitre ] + + * SAUCE: make ndiswrapper available on X86 only + + [ Tim Gardner ] + + * [Config] Added ums-cypress to udeb + - LP: #576066 + * SAUCE: fix build error with CONFIG_BLK_DEV_INITRD=n + * [Config] CONFIG_NDISWRAPPER=m across all configs + + [ Upstream Kernel Changes ] + + * HID: magicmouse: report last touch up + * rebase to 2.6.35-rc5 + + -- Leann Ogasawara Tue, 13 Jul 2010 18:57:59 -0700 + +linux (2.6.35-7.12) maverick; urgency=low + + [ Tim Gardner ] + + * [Upstream] i915: Use the correct mask to detect i830 aperture size. + - LP: #597075 + + [ Upstream Kernel Changes ] + + * (drop after 2.6.35) drm/radeon/kms: add ioport register access + (squashed) + + -- Tim Gardner Thu, 08 Jul 2010 09:53:13 -0600 + +linux (2.6.35-7.11) maverick; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_X86_MRST=n + + [ Upstream Kernel Changes ] + + * (drop after 2.6.35-rc5) writeback: remove writeback_inodes_wbc + * (drop after 2.6.35-rc5) writeback: split writeback_inodes_wb + * (drop after 2.6.35-rc5) writeback: simplify the write back thread queue + + -- Tim Gardner Tue, 06 Jul 2010 18:39:08 -0600 + +linux (2.6.35-7.10) maverick; urgency=low + + [ Kees Cook ] + + * SAUCE: security: create task_free security callback + * SAUCE: Yama: add PTRACE exception tracking and interface + * SAUCE: security: unconditionally chain to Yama LSM + * Revert "SAUCE: ptrace: restrict ptrace scope to children" + * Revert "SAUCE: fs: block hardlinks to non-accessible sources" + * Revert "SAUCE: fs: block cross-uid sticky symlinks" + * [Upstream] security: Yama LSM + * [Config] Enable CONFIG_SECURITY_YAMA=y + + [ Tim Gardner ] + + * [Config] updateconfigs/updateportsconfigs after rebase to 2.6.35-rc4 + + [ Upstream Kernel Changes ] + + * rebase to 2.6.35-rc4 + + -- Leann Ogasawara Thu, 01 Jul 2010 08:55:57 -0700 + +linux (2.6.35-6.9) maverick; urgency=low + + [ Tim Gardner ] + + * [Upstream] direct_splice_actor() should not use pos in sd + - LP: #588861 + + -- Leann Ogasawara Mon, 28 Jun 2010 12:35:49 -0700 + +linux (2.6.35-6.8) maverick; urgency=low + + [ Mathieu J. Poirier ] + + * ARM: Adding regulator supply for vdds_sdi. + - LP: #597904 + + -- Leann Ogasawara Sun, 27 Jun 2010 16:34:43 -0700 + +linux (2.6.35-6.7) maverick; urgency=low + + [ Alberto Milone ] + + * [Upstream] Add support for the ATIF ACPI method to the radeon driver + + [ Chase Douglas ] + + * [Upstream] HID: magicmouse: scroll on entire surface, not just middle + of mouse + * [Upstream] HID: magicmouse: disable and add module param for scroll + acceleration + * [Upstream] HID: magicmouse: properly account for scroll movement in + state + * [Upstream] HID: magicmouse: add param for scroll speed + * [Upstream] HID: magicmouse: enable horizontal scrolling + + [ Henrik Rydberg ] + + * [Upstream] Input: evdev - convert to dynamic event buffer + * [Upstream] Input: evdev - use driver hint to compute size of event + buffer + * [Upstream] Input: bcm5974 - set the average number of events per MT + event packet + * [Upstream] Input: hid-input - use a larger event buffer for MT devices + * [Upstream] Input: evdev - never leave the client buffer empty after + write + + [ John Johansen ] + + * SAUCE: AppArmor: -- mainline 2010-06-23 + * SAUCE: AppArmor 2.4 compatibility patch + * SAUCE: fs: block hardlinks to non-accessible sources AppArmor portion + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_INTR_REMAP=y + - LP: #597091 + * [Config] Enable CONFIG_X86_X2APIC + - LP: #597091 + + [ Mathieu J. Poirier ] + + * [Config] ARM: Turning off CONFIG_CPU_IDLE on omap + - LP: #594382 + + -- Leann Ogasawara Thu, 24 Jun 2010 12:19:48 -0700 + +linux (2.6.35-5.6) maverick; urgency=low + + [ Amit Kucheria ] + + * [Config] update omap flavour description + + [ Andy Whitcroft ] + + * update to ubuntu-debian:508b7aa34b578c0d1e51bfb571f2bfb824dc65ac + - LP: #570500, #576274 + * SAUCE: add option to hand off all kernel parameters to init + - LP: #586386 + * [Config] enable passing all kernel command line to init + - LP: #586386 + * [Config] disable CONFIG_VMI + - LP: #537601 + * [Config] enable CONFIG_IPV6_SIT_6RD + - LP: #591869 + * [Config] enable CONFIG_VMWARE_BALOON as module + - LP: #592039 + + [ Leann Ogasawara ] + + * Revert "SAUCE: pm: Config option to disable handling of console during + suspend/resume" + - LP: #594885 + * [Config] Remove CONFIG_PM_DISABLE_CONSOLE + * [Config] ports: enable passing all kernel command line to init + - LP: #586386 + * [Config] Enable CONFIG_FB_VESA=y for x86 + * [Config] Add CONFIG_FRAMEBUFFER_CONSOLE=y to config enforcer + * [Config] Add CONFIG_FB_VESA=y for x86 to config enforcer + * [Config] Enable CONFIG_TASK_DELAY_ACCT=y + - LP: #493156 + + [ Mathieu Poirier ] + + * ARM: Adding MosChip MCS7830 to nic-usb + - LP: #584920 + + [ Upstream Kernel Changes ] + + * Revert "[Upstream] docbook: need xmldoclinks for all doc types" + * docbook: need xmldoclinks for all doc types + * perf probe: Add kernel source path option + + -- Leann Ogasawara Thu, 17 Jun 2010 08:05:29 -0700 + +linux (2.6.35-4.5) maverick; urgency=low + + [ Leann Ogasawara ] + + * Revert "[Upstream] (evdev) Use driver hint to compute the evdev buffer + size (rev2)" + * Revert "[Upstream] (evdev) Convert to dynamic event buffer (rev4)" + * Revert "[Upstream] (evdev) Use multi-reader buffer to save space + (rev4)" + * Revert "SAUCE: drivers: Remove some duplicate device entries in various + modules" + * [Upstream] USB: option: Remove duplicate AMOI_VENDOR_ID + * [Upstream] Revert "USB: Adding support for HTC Smartphones to ipaq" + * [Upstream] p54usb: Comment out duplicate Medion MD40900 device id + + [ Tim Gardner ] + + * [Config] CONFIG_NFS_FSCACHE=y + - LP: #440522 + * [Config] CONFIG_FSCACHE_STATS=y, CONFIG_FSCACHE_HISTOGRAM=y + - LP: #440522 + + -- Leann Ogasawara Wed, 16 Jun 2010 08:43:07 -0700 + +linux (2.6.35-3.4) maverick; urgency=low + + [ Andy Whitcroft ] + + * debian -- ensure the version number is clean + + [ Henrik Rydberg ] + + * [Upstream] Introduce MT event slots (rev 5) + * [Upstream] Document the MT event slot protocol (rev5) + * [Upstream] (evdev) Use multi-reader buffer to save space (rev4) + * [Upstream] (evdev) Convert to dynamic event buffer (rev4) + * [Upstream] (evdev) Use driver hint to compute the evdev buffer size + (rev2) + + [ Leann Ogasawara ] + + * Revert "SAUCE: Add MODULE_ALIAS for Dell WMI module" + * Revert "SAUCE: hostap: send events on data interface as well as master + interface" + * Revert "Fix webcam having USB ID 0ac8:303b" + * Revert "SAUCE: toshiba_acpi -- pull in current -dev version of driver" + * rebase to v2.6.35-rc3 + + [ Maxim Levitsky ] + + * [Config] Enable new Smartmedia/xD translation layer + - LP: #202490 + + [ Upstream Kernel Changes ] + + * net: fix deliver_no_wcard regression on loopback device + + [ Upstream changes ] + + * rebased to v2.6.35-rc3 + + -- Leann Ogasawara Thu, 10 Jun 2010 16:15:22 -0700 + +linux (2.6.35-2.3) maverick; urgency=low + + [ Bryan Wu ] + + * CONFIG: enforce -- make sure we disable CONFIG_LOCALVERSION_AUTO + + [ Leann Ogasawara ] + + * [Config] armel: Enable CONFIG_BNX2=m + * [Config] ports: Enable CONFIG_BNX2X=m + * SAUCE: armel: define get_dma_ops to fix FTBS + + [ Tim Gardner ] + + * [Upstream] net: Print num_rx_queues imbalance warning only when there + are allocated queues + - LP: #591416 + + -- Leann Ogasawara Wed, 09 Jun 2010 08:27:41 -0700 + +linux (2.6.35-2.2) maverick; urgency=low + + [ Andy Whitcroft ] + + * [Config] d-i: make armel configuration versatile flavour specific + - LP: #588805 + * [Config] d-i: enable .udebs for omap flavour + - LP: #588805 + + [ Kees Cook ] + + * ptrace: limit scope to attach only (allow read) + - LP: #589656 + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc2 + * [Config] update configs following rebase to v2.6.35-rc2 + * [Config] update port configs following rebase to v2.6.35-rc2 + + [ Lee Jones ] + + * Enable perf to be more helpful when perf_ does not exist. + - LP: #570500 + * 'fdr editconfig' modification. Easily skip over unwanted menuconfigs. + + [ Tim Gardner ] + + * [Config] Update bnx2 udeb firmware files + - LP: #589304 + + [ Upstream changes ] + + * rebased to v2.6.35-rc2 + + -- Leann Ogasawara Mon, 07 Jun 2010 09:45:04 -0700 + +linux (2.6.35-1.1) maverick; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- update to standalone 2.6.35-rcN as at 20100601 + - LP: #587888 + * ubuntu: AUFS -- track changes to the arguements to fop fsync() + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc1 + * [Config] update configs following rebase to v2.6.35-rc1 + * [Config] update port configs following rebase to v2.6.35-rc1 + * SAUCE: lirc: rename usb_buffer_alloc() and usb_buffer_free() + * SAUCE: ndiswrapper: rename usb_buffer_alloc() and usb_buffer_free() + * SAUCE: ndiswrapper: convert multicast list to list_head + * [Config] [FTBS] armel: Temporarily disable CONFIG_GPIO_JANZ_TTL + * [Config] [FTBS] ia64: Temporarily disable gpiolib + * [Config] [FTBS] ia64: Temporarily disable CONFIG_CEPH_FS + * [Config] [FTBS] sparc: Temporarily disable CONFIG_INFINIBAND_QIB + * [Config] [FTBS] sparc: Temporarily disable CONFIG_MFD_JANZ_CMODIO + * [Config] [FTBS] armel: Temporarily disable CONFIG_MFD_JANZ_CMODIO + * [Config] [FTBS] armel: Temporarily disable CONFIG_DT3155 + * [Config] [FTBS] sparc: Temporarily disable CONFIG_MTD_NAND_DENALI + * [Config] [FTBS] armel: Temporarily disable bnx2 + * [Config] [FTBS] armel: Temporarily disable CONFIG_SERIAL_UARTLITE + * SAUCE: [FTBS] armel: Don't include asm/agp.h for ttm + * SAUCE: [FTBS] armel: include linux/dma-mapping.h + * SAUCE: [FTBS] armel: replace omap_set_gpio_debounce with + gpio_set_debounce + + [ Upstream Kernel Changes ] + + * of/usb: fix build error due to of_node pointer move + * n2_crypto: Fix build after of_device/of_platform_driver changes. + * powerpc/fsl-booke: fix the case where we are not in the first page + * powerpc/fsl-booke: Move the entry setup code into a seperate file + * powerpc/kexec: Add support for FSL-BookE + * greth: Fix build after OF device conversions. + + [ Upstream changes ] + + * rebased to v2.6.35-rc1 + + -- Leann Ogasawara Fri, 04 Jun 2010 23:01:52 -0700 + +linux (2.6.35-1.0) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * Null entry. + + -- Leann Ogasawara Wed, 02 Jun 2010 15:17:41 -0700 + +linux (2.6.34-5.14) maverick; urgency=low + + [ Tim Gardner ] + + * [Config] Added module inclusion support + * [Config] Added virtual flavour module inclusion list and d-i package + definitions + + -- Leann Ogasawara Wed, 02 Jun 2010 12:58:14 -0700 + +linux (2.6.34-5.13) maverick; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- aufs2 20091209" + * Revert "ubuntu: AUFS -- export various core functions + (aufs2-standalone.patch)" + * Revert "ubuntu: AUFS -- export various core functions + (aufs2-base.patch)" + * ubuntu: AUFS -- aufs2 base patch for linux-2.6.34 + - LP: #587888 + * ubuntu: AUFS -- aufs2 standalone patch for linux-2.6.34 + - LP: #587888 + * ubuntu: AUFS -- update to standalone 2.6.34 as at 20100601 + - LP: #587888 + * [Config] AUFS -- enable aufs options + - LP: #587888 + + -- Leann Ogasawara Tue, 01 Jun 2010 08:56:43 -0700 + +linux (2.6.34-5.12) maverick; urgency=low + + [ Andy Whitcroft ] + + * enforce -- ensure SYSFS compatibility is disabled + + [ Chase Douglas ] + + * build with libdw-dev for perf probe symbol support + * maverick ftrace configuration changes + + [ Kees Cook ] + + * Revert "SAUCE: x86: brk away from exec rand area" + * Revert "SAUCE: [um] Don't use nx_enabled under UML" + * Revert "SAUCE: [x86] implement cs-limit nx-emulation for ia32" + * SAUCE: x86: implement cs-limit nx-emulation for ia32 + - LP: #369978 + * SAUCE: x86: more tightly confine cs-limit nx-emulation to ia32 only + * SAUCE: x86: brk away from exec rand area + - LP: #452175 + * SAUCE: ptrace: restrict ptrace scope to children + + [ Leann Ogasawara ] + + * Add new omap flavour to getabis + * [Config] Enable CONFIG_FRAMEBUFFER_CONSOLE=y for all archs + - LP: #585490 + * build/modules: Temorarily add ignore.modules + * ubuntu: iscsitarget -- version 1.4.20.1 + + [ Loïc Minier ] + + * SAUCE: [um] Don't use nx_enabled under UML + - LP: #524849 + + -- Leann Ogasawara Fri, 28 May 2010 08:27:17 -0700 + +linux (2.6.34-4.11) maverick; urgency=low + + [ Amit Kucheria ] + + * SAUCE: omap: remove calls to usb_nop_xceiv_register from board files + * [Config] Add support for OMAP-mainline flavour + + [ Andy Whitcroft ] + + * SAUCE: powerpc: fix compile error when ptrace.h is included from + userspace + - LP: #583733 + + [ Chase Douglas ] + + * Revert "SAUCE: Don't register vga16fb framebuffer if other framebuffers + are present" + * Revert "SAUCE: Disable function tracing after hitting __schedule_bug" + * Revert "SAUCE: drm/i915: don't change DRM configuration when releasing + load detect pipe" + + [ Kees Cook ] + + * SAUCE: fs: block cross-uid sticky symlinks + * SAUCE: fs: block hardlinks to non-accessible sources + + [ Koen Kooi ] + + * SAUCE: board-omap3-beagle: add DSS2 support + + [ Leann Ogasawara ] + + * Revert "staging/go7007 -- disable" + * Revert "[Config] staging/winbond -- disable" + * Revert "Disable 4MB page tables for Atom, work around errata AAE44" + * Revert "SAUCE: sync before umount to reduce time taken by ext4 umount" + * Revert "SAUCE: Enable an e1000e Intel Corporation 82567 Gigabit + controller" + * Revert "SAUCE: Fix MODULE_IMPORT/MODULE_EXPORT" + * Revert "SAUCE: Created MODULE_EXPORT/MODULE_IMPORT macros" + * Revert "SAUCE: input/mouse/alps: Do not call psmouse_reset() for alps" + * Revert "SAUCE: r8169: disable TSO by default for RTL8111/8168B + chipsets." + * Revert "[Upstream] b43: Declare all possible firmware files." + * Revert "add Breaks: against hardy lvm2" + * Revert "SAUCE: Guest OS does not recognize a lun with non zero target + id on Vmware ESX Server" + * Revert "SAUCE: Catch nonsense keycodes and silently ignore" + * [Config] Enable CONFIG_ECRYPT_FS=y for ports + * [Config] Enable CONFIG_USB=y for armel and sparc + * [Config] Enable CONFIG_SCSI=y for ia64 and sparc + * [Config] Enable CONFIG_RFKILL=y for ports + * [Config] Enable CONFIG_ATH9K_DEBUGFS=y + * [Config] Enable CONFIG_IWMC3200TOP_DEBUGFS=y + * [Config] Enable CONFIG_RCU_FAST_NO_HZ=y + * [Config] Enable CONFIG_IWLWIFI_DEVICE_TRACING=y + * [Config] Enable CONFIG_LIBERTAS_MESH=y + * [Config] Enable CONFIG_MMC_RICOH_MMC=y + * [Config] CONFIG_RT2800USB_UNKNOWN=y + * [Config] Enable CONFIG_VGA_SWITCHEROO=y + * [Config] Enable CONFIG_CEPH_FS=m + * [Config] Enable CONFIG_CRYPTO_PCRYPT=m + * [Config] Enable CONFIG_EEEPC_WMI=m + * [Config] Enable CONFIG_RT2800PCI=m + * [Config] Enable CONFIG_SCSI_HPSA=m + * [Config] Enable CONFIG_VHOST_NET=m + * [Config] Disable CONFIG_SND_HDA_INPUT_BEEP_MODE by default + - LP: #582350 + * [Config] Disable CONFIG_SOUND_OSS* and CONFIG_SND_*OSS + - LP: #579300 + * [Config] Enable CONFIG_PCIEASPM=y + - LP: #333990 + * [Config] updateconfigs for OMAP flavour + + [ Loïc Minier ] + + * Enable perf tools on armel + + [ Tim Gardner ] + + * SAUCE: Updated ndiswrapper to 1.56 + - LP: #582555 + * [Config] Added virtual flavour + * [Config] Remove support for sub-flavours + * [Config] Removed amd64 preempt flavour + * [Config] updateconfigs, updateportsconfigs after flavour munging + + -- Leann Ogasawara Tue, 25 May 2010 09:34:55 -0700 + +linux (2.6.34-3.10) maverick; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34 + + [ Upstream changes ] + + * rebased to v2.6.34 + + -- Leann Ogasawara Tue, 18 May 2010 17:35:35 -0700 + +linux (2.6.34-2.9) maverick; urgency=low + + [ Leann Ogasawara ] + + * [Config] [FTBS] Disable comedi for armel + + -- Leann Ogasawara Thu, 13 May 2010 23:20:55 +0200 + +linux (2.6.34-2.8) maverick; urgency=low + + [ Leann Ogasawara ] + + * Drop lpia + * [Config] [FTBS] disable KVM + * [Config] [FTBS] disable ipr for armel + + -- Leann Ogasawara Thu, 13 May 2010 16:07:52 +0200 + +linux (2.6.34-2.7) maverick; urgency=low + + [ Leann Ogasawara ] + + * [Config] disable CONFIG_SCSI_IPR on powerpc + * [Config] Remove 386 flavour per UDS discussion + + -- Leann Ogasawara Wed, 12 May 2010 18:26:43 +0200 + +linux (2.6.34-1.6) maverick; urgency=low + + [ Chase Douglas ] + + * enforce CONFIG_TMPFS_POSIX_ACL=y + - LP: #575940 + * don't force module dependency checking + - LP: #577029 + + [ Kees Cook ] + + * SAUCE: mmap_min_addr check CAP_SYS_RAWIO only for write + - LP: #568844 + + [ Leann Ogasawara ] + + * Revert "SAUCE: ata: blacklist FUJITSU MHW2160BH PL" + * rebase to v2.6.34-rc7 + * [Config] update configs following rebase to v2.6.34-rc7 + * [Config] update port configs following rebase to v2.6.34-rc7 + * Add btrfs to the udebs + + [ Tim Gardner ] + + * [Config] Add atl1c to nic-modules udeb + - LP: #557130 + + [ Upstream changes ] + + * rebased to v2.6.34-rc7 + + -- Leann Ogasawara Tue, 11 May 2010 11:29:08 +0200 + +linux (2.6.34-1.5) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34-rc6 + * [Config] update configs following rebase to v2.6.34-rc6 + * [Config] update port configs following rebase to v2.6.34-rc6 + + [ Upstream changes ] + + * rebased to v2.6.34-rc6 + + -- Leann Ogasawara Fri, 30 Apr 2010 15:54:05 +0100 + +linux (2.6.34-1.4) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34-rc5 + * [Config] update ports configs following rebase to v2.6.34-rc5 + + [ Upstream changes ] + + * rebased to v2.6.34-rc5 + + -- Leann Ogasawara Thu, 22 Apr 2010 15:36:12 -0700 + +linux (2.6.34-1.3) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34-rc4 + * [Config] update configs following rebase to v2.6.34-rc4 + * [Config] update port configs following rebase to v2.6.34-rc4 + * ubuntu: dm-raid4-5 -- update to compile with 2.6.34-rc4 + + [ Upstream changes ] + + * rebased to v2.6.34-rc4 + + -- Leann Ogasawara Tue, 13 Apr 2010 18:33:44 -0700 + +linux (2.6.34-1.2) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * Temorarily disable building linux-doc + * rebase to v2.6.34-rc3 + * [Config] update configs following rebase to v2.6.34-rc3 + * [Config] update port configs following rebase to v2.6.34-rc3 + + [ Upstream changes ] + + * rebased to v2.6.34-rc3 + + -- Leann Ogasawara Tue, 30 Mar 2010 16:55:44 -0700 + +linux (2.6.34-1.1) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34-rc2 + * ubuntu: dm-raid4-5 -- update to compile with 2.6.34-rc2 + * [Config] update port configs following rebase to v2.6.34-rc2 + * [Config] update configs following rebase to v2.6.34-rc2 + + [ Upstream changes ] + + * rebased to v2.6.34-rc2 + + -- Leann Ogasawara Wed, 24 Mar 2010 23:00:39 -0700 + +linux (2.6.33-1.1) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * ubuntu: dm-raid4-5 -- update to compile with 2.6.33 + * ubuntu: lirc -- drop explicit include of linux/autoconf.h + * ubuntu: lirc -- pass kfifo to kfifo_alloc and move spinlock + * ubuntu: lirc -- rename kfifo_put and kfifo_get + * ubuntu: iscsitarget -- rename daddr inet_sock field + * rebased to v2.6.33 + * [Config] update configs following rebase to v2.6.33 + * [Config] update ports configs following rebase to v2.6.33 + + [ Upstream changes ] + + * rebased to v2.6.33 + + -- Leann Ogasawara Tue, 23 Mar 2010 03:55:46 -0700 + +linux (2.6.33-0.0) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * Null entry. + + -- Leann Ogasawara Wed, 17 Mar 2010 07:48:56 -0700 + +linux (2.6.32-16.25) lucid; urgency=low + + [ Andy Whitcroft ] + + * linux-tools -- move to Suggests: with explicit seeding + - LP: #534635 + + [ Tim Gardner ] + + * [Config] CONFIG_HID=m + + [ Upstream Kernel Changes ] + + * (pre-stable) sched: Fix SMT scheduler regression in + find_busiest_queue() + * KVM: introduce kvm_vcpu_on_spin + * KVM: VMX: Add support for Pause-Loop Exiting + + -- Andy Whitcroft Tue, 09 Mar 2010 14:13:51 +0000 + +linux (2.6.32-16.24) lucid; urgency=low + + [ Andy Whitcroft ] + + * armel -- perf userspace does not support arm + * ia64 -- libelf-dev/binutils-dev to not provide necessary libraries + + -- Andy Whitcroft Sat, 06 Mar 2010 11:42:12 +0000 + +linux (2.6.32-16.23) lucid; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: PM report driver and device suspend/resume times -- move config + * update to standards version 3.8.4.0 + * printenv -- expose all of the package selectors + * source package -- cleanup source content control + * doc package -- ensure we do build package content on buildd + * lintian -- correct the address in the debian/copyright + * lintian -- update debhelper package version dependancy + * lintian -- fix ghostscript dependancy + * lintian -- add required misc:Depends + * lintian -- move our debhelper compat level to debian/compat + * perf -- build the kernel carried tools + * perf -- add linux-tools carrying the version switches and manuals + * SAUCE: fix up Kconfig for staging drivers + * [Config] enable NOUVEAU etc following drm backport + * update DRM to mainline v2.6.33 + * [Config] Remove AppArmor config options that no longer exist (ports) + * [Config] updateportsconfigs following drm update + + [ John Johansen ] + + * ubuntu: AppArmor -- update to mainline 2010-03-04 + * SAUCE: AppArmor: Reintroduce AppArmor 2.4 compatibility + * SAUCE: AppArmor: replace strim with strstrip for 2.6.32 kernels + * [Config] Remove AppArmor config options that no longer exist + + [ Manoj Iyer ] + + * ubuntu: rtl8192se -- version 2010-0115,0014 + - LP: #530275 + * [Config] added CONFIG_RTL8192SE module. + - LP: #530275 + + [ Tim Gardner ] + + * [Config] Added vmw_pvscsi to d-i/scsi-modules + - LP: #531017 + * [Upstream] netfilter: xt_recent: Add an entry reaper + + [ Upstream Kernel Changes ] + + * Revert "KVM: x86 emulator: Check CPL level during privilege instruction + emulation" + * Revert "KVM: x86 emulator: Fix popf emulation" + * Revert "KVM: x86 emulator: Check IOPL level during io instruction + emulation" + * Revert "KVM: x86 emulator: Add Virtual-8086 mode of emulation" + * Revert "KVM: fix memory access during x86 emulation." + * Add vlan (8021.Q) module package for d-i. + * (pre-stable) drm/i915: blacklist lid status: Sony VGN-BX196VP, Dell + Inspiron 700m + - LP: #515246 + * [Upstream] docbook: need xmldoclinks for all doc types + * x86: set_personality_ia32() misses force_personality32 + * lib: Introduce generic list_sort function + * drm/nv50: Implement ctxprog/state generation. + * drm/nv50: Remove redundant/incorrect ctxvals initialisation. + * (pre-stable) drm/i915: blacklist lid status: Sony VGN-BX196VP, Dell + Inspiron 700m + - LP: #515246 + + -- Andy Whitcroft Fri, 05 Mar 2010 15:40:38 +0000 + +linux (2.6.32-15.22) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Config] added new config option CONFIG_SR_REPORT_TIME_LIMIT" + * Revert "SAUCE: PM report driver and device suspend/resume times." + * [Config] set CONFIG_SR_REPORT_TIME_LIMIT + + [ Manoj Iyer ] + + * SAUCE: PM report driver and device suspend/resume times. + + -- Andy Whitcroft Tue, 02 Mar 2010 01:35:37 +0000 + +linux (2.6.32-15.21) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "(pre-stable) drm/i915: Increase fb alignment to 64k" + * Revert "[Config] lenovo-sl-laptop -- enable" + * Revert "ubuntu: lenovo-sl-laptop -- git tip (b19a08f81f)" + * armel -- cramfs module will no longer be built + * d-i -- make all modules optional + * rename the debug packages to match archive standard + - LP: #527837 + * lenovo-sl-laptop is no longer built + + [ Colin Ian King ] + + * Disable 4MB page tables for Atom, work around errata AAE44 + - LP: #523112 + + [ Colin Watson ] + + * ubuntu: dm-raid4-5: Depend on XOR_BLOCKS + * ubuntu: fsam7400: Depend on CHECK_SIGNATURE + + [ Jesse Barnes ] + + * SAUCE: drm/i915: don't change DRM configuration when releasing load + detect pipe + - LP: #488328 + + [ Loïc Minier ] + + * [Config] armel Update versatile initrd configs + - LP: #524893 + * SAUCE: [um] Don't use nx_enabled under UML + - LP: #524849 + + [ Manoj Iyer ] + + * [Config] added new config option CONFIG_SR_REPORT_TIME_LIMIT + + [ Mario Limonciello ] + + * SAUCE: v3 - Add Dell Business Class Netbook LED driver + + [ Rafael J. Wysocki ] + + * SAUCE: PM report driver and device suspend/resume times. + + [ Surbhi Palande ] + + * Revert "[Upstream] e1000e: enhance frame fragment detection" + - CVE-2009-4538 + * Revert "[Upstream] e1000: enhance frame fragment detection" + - CVE-2009-4536 + + [ Tim Gardner ] + + * [Config] Enabled CONFIG_LEDS_DELL_NETBOOKS=m + * SAUCE: (pre-stable) netfilter: xt_recent: fix buffer overflow + * SAUCE: (pre-stable) netfilter: xt_recent: fix false match + + [ Upstream Kernel Changes ] + + * Revert "(pre-stable) eCryptfs: Add getattr function" + * Fix potential crash with sys_move_pages + * futex_lock_pi() key refcnt fix + * futex: Handle user space corruption gracefully + * futex: Handle futex value corruption gracefully + * Fix race in tty_fasync() properly + * hwmon: (w83781d) Request I/O ports individually for probing + * hwmon: (lm78) Request I/O ports individually for probing + * hwmon: (adt7462) Wrong ADT7462_VOLT_COUNT + * ALSA: ctxfi - fix PTP address initialization + * drm/i915: disable hotplug detect before Ironlake CRT detect + * drm/i915: enable self-refresh on 965 + * drm/i915: Disable SR when more than one pipe is enabled + * drm/i915: Fix DDC on some systems by clearing BIOS GMBUS setup. + * drm/i915: Add HP nx9020/SamsungSX20S to ACPI LID quirk list + * drm/i915: Fix the incorrect DMI string for Samsung SX20S laptop + * drm/i915: Add MALATA PC-81005 to ACPI LID quirk list + * usb: r8a66597-hcd: Flush the D-cache for the pipe-in transfer buffers. + * i2c-tiny-usb: Fix on big-endian systems + * drm/i915: handle FBC and self-refresh better + * drm/i915: Increase fb alignment to 64k + * drm/i915: Update write_domains on active list after flush. + * regulator: Fix display of null constraints for regulators + * ALSA: hda-intel: Avoid divide by zero crash + * CPUFREQ: Fix use after free of struct powernow_k8_data + * freeze_bdev: don't deactivate successfully frozen MS_RDONLY sb + * cciss: Make cciss_seq_show handle holes in the h->drv[] array + * ioat: fix infinite timeout checking in ioat2_quiesce + * resource: add helpers for fetching rlimits + * fs/exec.c: restrict initial stack space expansion to rlimit + * cifs: fix length calculation for converted unicode readdir names + * NFS: Fix a reference leak in nfs_wb_cancel_page() + * NFS: Try to commit unstable writes in nfs_release_page() + * NFSv4: Don't allow posix locking against servers that don't support it + * NFSv4: Ensure that the NFSv4 locking can recover from stateid errors + * NFS: Fix an Oops when truncating a file + * NFS: Fix a umount race + * NFS: Fix a bug in nfs_fscache_release_page() + * NFS: Fix the mapping of the NFSERR_SERVERFAULT error + * md: fix 'degraded' calculation when starting a reshape. + * V4L/DVB: dvb-core: fix initialization of feeds list in demux filter + * Export the symbol of getboottime and mmonotonic_to_bootbased + * kvmclock: count total_sleep_time when updating guest clock + * KVM: PIT: control word is write-only + * tpm_infineon: fix suspend/resume handler for pnp_driver + * amd64_edac: Do not falsely trigger kerneloops + * netfilter: nf_conntrack: fix memory corruption with multiple namespaces + * netfilter: nf_conntrack: per netns nf_conntrack_cachep + * netfilter: nf_conntrack: restrict runtime expect hashsize modifications + * netfilter: xtables: compat out of scope fix + * netfilter: nf_conntrack: fix hash resizing with namespaces + * drm/i915: remove full registers dump debug + * drm/i915: add i915_lp_ring_sync helper + * drm/i915: Don't wait interruptible for possible plane buffer flush + * dasd: remove strings from s390dbf + * crypto: padlock-sha - Add import/export support + * wmi: Free the allocated acpi objects through wmi_get_event_data + * dell-wmi, hp-wmi, msi-wmi: check wmi_get_event_data() return value + * /dev/mem: introduce size_inside_page() + * devmem: check vmalloc address on kmem read/write + * devmem: fix kmem write bug on memory holes + * SCSI: mptfusion : mptscsih_abort return value should be SUCCESS instead + of value 0. + * sh: Couple kernel and user write page perm bits for CONFIG_X2TLB + * ALSA: hda - use WARN_ON_ONCE() for zero-division detection + * dst: call cond_resched() in dst_gc_task() + * ALSA: hda - Improved MacBook (Pro) 5,1 / 5,2 support + * befs: fix leak + * rtc-fm3130: add missing braces + * Call flush_dcache_page after PIO data transfers in libata-sff.c + * ahci: add Acer G725 to broken suspend list + * pktgen: Fix freezing problem + * x86/amd-iommu: Fix IOMMU-API initialization for iommu=pt + * x86/amd-iommu: Fix deassignment of a device from the pt_domain + * x86: Re-get cfg_new in case reuse/move irq_desc + * Staging: fix rtl8187se compilation errors with mac80211 + * ALSA: usb-audio - Avoid Oops after disconnect + * serial: 8250: add serial transmitter fully empty test + * sysfs: sysfs_sd_setattr set iattrs unconditionally + * class: Free the class private data in class_release + * USB: usbfs: only copy the actual data received + * USB: usbfs: properly clean up the as structure on error paths + * rtl8187: Add new device ID + * ACPI: Add NULL pointer check in acpi_bus_start + * ACPI: fix High cpu temperature with 2.6.32 + * drm/radeon/kms: use udelay for short delays + * NFS: Too many GETATTR and ACCESS calls after direct I/O + * eCryptfs: Add getattr function + * b43: Fix throughput regression + * ath9k: Fix sequence numbers for PAE frames + * mac80211: Fix probe request filtering in IBSS mode + * iwlwifi: Fix to set correct ht configuration + * dm stripe: avoid divide by zero with invalid stripe count + * dm log: userspace fix overhead_size calcuations + * Linux 2.6.32.9 + * sfc: Fix SFE4002 initialisation + * sfc: Fix sign of efx_mcdi_poll_reboot() error in efx_mcdi_poll() + * sfc: SFE4002/SFN4112F: Widen temperature and voltage tolerances + * (pre-stable) HID: handle joysticks with large number of buttons + - LP: #492056 + * (pre-stable) HID: extend mask for BUTTON usage page + - LP: #492056 + * PM: Measure device suspend and resume times + * e1000: enhance frame fragment detection + - CVE-2009-4536 + * e1000e: enhance frame fragment detection + - CVE-2009-4538 + * KVM: fix memory access during x86 emulation. + - CVE-2010-0306 + * KVM: x86 emulator: Add Virtual-8086 mode of emulation + - CVE-2010-0306 + * KVM: x86 emulator: Check IOPL level during io instruction emulation + - CVE-2010-0306 + * KVM: x86 emulator: Fix popf emulation + - CVE-2010-0306 + * KVM: x86 emulator: Check CPL level during privilege instruction + emulation + - CVE-2010-0306 + * Input: wacom - ensure the device is initialized properly upon resume + * Input: wacom - add defines for packet lengths of various devices + * Input: wacom - add support for new LCD tablets + - LP: #516777 + + -- Andy Whitcroft Mon, 01 Mar 2010 22:56:28 +0000 + +linux (2.6.32-14.20) lucid; urgency=low + + [ Andy Whitcroft ] + + * rebuild following the GCC update to match compiler for out of tree modules + * Revert "[Config] drbd -- enable" + * Revert "ubuntu: drbd -- version 8.3.1" + * SAUCE: khubd -- switch USB product/manufacturer/serial handling to RCU + - LP: #510937 + + -- Andy Whitcroft Fri, 19 Feb 2010 18:47:18 +0000 + +linux (2.6.32-14.19) lucid; urgency=low + + [ Andy Whitcroft ] + + * ensure we build the source package contents when enabled + - LP: #522308 + * [Config] enable CONFIG_X86_MCE_XEON75XX + * SAUCE: AppArmor -- add linux/kref.h for struct kref + * [Config] enable CONFIG_HID_ORTEK + * enable udeb generation for arm versatile flavour + - LP: #522515 + + [ John Johansen ] + + * ubuntu: AppArmor -- update to mainline 2010-02-18 + - LP: #439560, #496110, #507069 + + [ Johnathon Harris ] + + * SAUCE: HID: add support for Ortek WKB-2000 + - LP: #405390 + + [ Upstream Kernel Changes ] + + * tpm_tis: TPM_STS_DATA_EXPECT workaround + - LP: #490487 + * x86, mce: Xeon75xx specific interface to get corrected memory error + information + * x86, mce: Rename cpu_specific_poll to mce_cpu_specific_poll + * x86, mce: Make xeon75xx memory driver dependent on PCI + * drm/edid: Unify detailed block parsing between base and extension + blocks + - LP: #500999 + * (pre-stable) eCryptfs: Add getattr function + - LP: #390833 + + -- Andy Whitcroft Thu, 18 Feb 2010 19:22:02 +0000 + +linux (2.6.32-13.18) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "enforcer -- make the enforcement configuration common" + * Revert "(pre-stable) Input: ALPS - add interleaved protocol support + (Dell E6x00 series)" + * Revert "(pre-stable) driver-core: fix devtmpfs crash on s390" + * Revert "(pre-stable) Driver-Core: devtmpfs - set root directory mode to + 0755" + * Revert "SAUCE: Adds support for COMPAL JHL90 webcam" + * Revert "SAUCE: fix kernel oops in VirtualBox during paravirt patching" + * Revert "SAUCE: make fc transport removal of target configurable" + * enforcer -- make the enforcement configuration common + * getabis -- add preempt flavour to the list + * [Config] enforce DEVTMPFS options + * [Config] armel -- cleanup to-be builtin modules + * [Config] cleanup ports configs + * [Config] enable CRYPTO_GHASH_CLMUL_NI_INTEL + - LP: #485536 + * add printdebian target to find branch target + * distclean -- do not remove debian.env + * [Config] generic-pae switch to M586TSC + - LP: #519448 + * git-ubuntu-log -- commonise duplicated log handling + * git-ubuntu-log -- tighten up Bug: NNNN matching + * git-ubuntu-log -- sort the bug numbers + + [ Chris Wilson ] + + * (pre-stable) drm/i915: Increase fb alignment to 64k + - LP: #404064 + + [ Eric Miao ] + + * arm -- enable ubuntu/ directory + + [ Huang Ying ] + + * SAUCE: crypto: ghash - Add PCLMULQDQ accelerated implementation + * SAUCE: crypto: ghash-intel - Fix building failure on x86_32 + + [ Loïc Minier ] + + * [Config] cleanup preempt configuration + * [Config] versatile: Fix video output + - LP: #517594 + * [Config] armel DEFAULT_MMAP_MIN_ADDR=32768 + * [Config] Large update to armel/versatile + * [Config] versatile: Add RTC support + * [Config] armel: Enable NEON + * [Config] versatile: Builtin MMC support + * [Config] versatile Builtin SCSI controller + * [Config] armel Disable dma_cache_sync callers + * [Config] armel Disable asm/time.h users + * [Config] armel Disable out of range udelay() + * [Config] armel Disable flush_cache_range() users + * [Config] armel -- Enable ubuntu/ drivers + + [ Steve Conklin ] + + * SAUCE: drm/i915: Add display hotplug event on Ironlake + * SAUCE: drm/i915: Add ACPI OpRegion support for Ironlake + + [ Upstream Kernel Changes ] + + * Revert "[Upstream]: oprofile/x86: add Xeon 7500 series support" + * Revert "Revert "[Bluetooth] Eliminate checks for impossible conditions + in IRQ handler"" + * clockevent: Don't remove broadcast device when cpu is dead + * clockevents: Add missing include to pacify sparse + * ACPI: don't cond_resched if irq is disabled + * be2net: Add support for next generation of BladeEngine device. + * be2net: Add the new PCI IDs to PCI_DEVICE_TABLE. + * mpt2sas: New device SAS2208 support is added + * ar9170: Add support for D-Link DWA 160 A2 + * powerpc/fsl: Add PCI device ids for new QoirQ chips + * davinci: dm646x: Add support for 3.x silicon revision + * Input: ALPS - add interleaved protocol support (Dell E6x00 series) + * Driver-Core: devtmpfs - set root directory mode to 0755 + * driver-core: fix devtmpfs crash on s390 + * vfs: get_sb_single() - do not pass options twice + * ALSA: hda - Add PCI IDs for Nvidia G2xx-series + * V4L/DVB (13569): smsusb: add autodetection support for five additional + Hauppauge USB IDs + * USB: mos7840: add device IDs for B&B electronics devices + * USB: ftdi_sio: add USB device ID's for B&B Electronics line + * V4L/DVB (13168): Add support for Asus Europa Hybrid DVB-T card (SAA7134 + SubVendor ID: 0x1043 Device ID: 0x4847) + * iTCO_wdt: Add support for Intel Ibex Peak + * atl1c:use common_task instead of reset_task and link_chg_task + * atl1e:disable NETIF_F_TSO6 for hardware limit + * V4L/DVB (13680a): DocBook/media: copy images after building HTML + * V4L/DVB (13680b): DocBook/media: create links for included sources + * netfilter: xtables: fix conntrack match v1 ipt-save output + * partitions: read whole sector with EFI GPT header + * partitions: use sector size for EFI GPT + * ALSA: ice1724 - Patch for suspend/resume for ESI Juli@ + * sched: Fix isolcpus boot option + * sched: Fix missing sched tunable recalculation on cpu add/remove + * nohz: Prevent clocksource wrapping during idle + * nfsd: Fix sort_pacl in fs/nfsd/nf4acl.c to actually sort groups + * timers, init: Limit the number of per cpu calibration bootup messages + * PCI: Always set prefetchable base/limit upper32 registers + * iscsi class: modify handling of replacement timeout + * NFS: Revert default r/wsize behavior + * HID: fixup quirk for NCR devices + * scsi_devinfo: update Hitachi entries (v2) + * scsi_dh: create sysfs file, dh_state for all SCSI disk devices + * scsi_transport_fc: remove invalid BUG_ON + * lpfc: fix hang on SGI ia64 platform + * libfc: fix typo in retry check on received PRLI + * libfc: fix ddp in fc_fcp for 0 xid + * fcoe: remove redundant checking of netdev->netdev_ops + * libfc: Fix wrong scsi return status under FC_DATA_UNDRUN + * libfc: lport: fix minor documentation errors + * libfc: don't WARN_ON in lport_timeout for RESET state + * fcoe: initialize return value in fcoe_destroy + * libfc: Fix frags in frame exceeding SKB_MAX_FRAGS in fc_fcp_send_data + * libfc: fix memory corruption caused by double frees and bad error + handling + * libfc: fix free of fc_rport_priv with timer pending + * libfc: remote port gets stuck in restart state without really + restarting + * fcoe, libfc: fix an libfc issue with queue ramp down in libfc + * fcoe: Fix checking san mac address + * fcoe: Fix getting san mac for VLAN interface + * qlge: Remove explicit setting of PCI Dev CTL reg. + * qlge: Set PCIE max read request size. + * qlge: Don't fail open when port is not initialized. + * qlge: Add handler for DCBX firmware event. + * qlge: Bonding fix for mode 6. + * PCI: AER: fix aer inject result in kernel oops + * DMI: allow omitting ident strings in DMI tables + * Input: i8042 - remove identification strings from DMI tables + * Input: i8042 - add Gigabyte M1022M to the noloop list + * Input: i8042 - add Dritek quirk for Acer Aspire 5610. + * ALSA: hda - select IbexPeak handler for Calpella + * ALSA: hda - Fix quirk for Maxdata obook4-1 + * ALSA: hda - Add missing Line-Out and PCM switches as slave + * iTCO_wdt.c - cleanup chipset documentation + * iTCO_wdt: add PCI ID for the Intel EP80579 (Tolapai) SoC + * iTCO_wdt: Add Intel Cougar Point and PCH DeviceIDs + * ahci: disable SNotification capability for ich8 + * ata_piix: fix MWDMA handling on PIIX3 + * md: fix small irregularity with start_ro module parameter + * V4L/DVB (13826): uvcvideo: Fix controls blacklisting + * cio: fix double free in case of probe failure + * cio: dont panic in non-fatal conditions + * netiucv: displayed TX bytes value much too high + * ipc ns: fix memory leak (idr) + * ALSA: hda - Fix HP T5735 automute + * hwmon: (fschmd) Fix a memleak on multiple opens of /dev/watchdog + * UBI: fix memory leak in update path + * UBI: initialise update marker + * ASoC: fix a memory-leak in wm8903 + * mac80211: check that ieee80211_set_power_mgmt only handles STA + interfaces. + * cfg80211: fix channel setting for wext + * KVM: S390: fix potential array overrun in intercept handling + * KVM: only allow one gsi per fd + * KVM: Fix race between APIC TMR and IRR + * KVM: MMU: bail out pagewalk on kvm_read_guest error + * KVM: x86: Fix host_mapping_level() + * KVM: x86: Fix probable memory leak of vcpu->arch.mce_banks + * KVM: x86: Fix leak of free lapic date in kvm_arch_vcpu_init() + * KVM: fix lock imbalance in kvm_*_irq_source_id() + * KVM: only clear irq_source_id if irqchip is present + * IPoIB: Clear ipoib_neigh.dgid in ipoib_neigh_alloc() + * x86: Reenable TSC sync check at boot, even with NONSTOP_TSC + * ACPI: enable C2 and Turbo-mode on Nehalem notebooks on A/C + - LP: #516325 + * iwlwifi: Fix throughput stall issue in HT mode for 5000 + * fnctl: f_modown should call write_lock_irqsave/restore + * x86, msr/cpuid: Pass the number of minors when unregistering MSR and + CPUID drivers. + * Linux 2.6.32.7 + * scsi_lib: Fix bug in completion of bidi commands + * mptsas: Fix issue with chain pools allocation on katmai + * mm: add new 'read_cache_page_gfp()' helper function + * drm/i915: Selectively enable self-reclaim + * firewire: ohci: fix crashes with TSB43AB23 on 64bit systems + * S390: fix single stepped svcs with TRACE_IRQFLAGS=y + * x86: Set hotpluggable nodes in nodes_possible_map + * x86: Remove "x86 CPU features in debugfs" (CONFIG_X86_CPU_DEBUG) + * libata: retry FS IOs even if it has failed with AC_ERR_INVALID + * zcrypt: Do not remove coprocessor for error 8/72 + * dasd: fix possible NULL pointer errors + * ACPI: Add a generic API for _OSC -v2 + * ACPI: Add platform-wide _OSC support. + * ACPI: fix OSC regression that caused aer and pciehp not to load + * ACPI: Advertise to BIOS in _OSC: _OST on _PPC changes + * UBI: fix volume creation input checking + * e1000/e1000e: don't use small hardware rx buffers + * drm/i915: Reload hangcheck timer too for Ironlake + * Fix a leak in affs_fill_super() + * Fix failure exits in bfs_fill_super() + * fix oops in fs/9p late mount failure + * fix leak in romfs_fill_super() + * Fix remount races with symlink handling in affs + * fix affs parse_options() + * Fix failure exit in ipathfs + * mm: fix migratetype bug which slowed swapping + * FDPIC: Respect PT_GNU_STACK exec protection markings when creating + NOMMU stack + * Split 'flush_old_exec' into two functions + * sparc: TIF_ABI_PENDING bit removal + * x86: get rid of the insane TIF_ABI_PENDING bit + * Input: winbond-cir - remove dmesg spam + * x86: Disable HPET MSI on ATI SB700/SB800 + * iwlwifi: set default aggregation frame count limit to 31 + * drm/i915: only enable hotplug for detected outputs + * firewire: core: add_descriptor size check + * SECURITY: selinux, fix update_rlimit_cpu parameter + * regulator: Specify REGULATOR_CHANGE_STATUS for WM835x LED constraints + * x86: Add Dell OptiPlex 760 reboot quirk + - LP: #488319 + * x86: Add quirk for Intel DG45FC board to avoid low memory corruption + * x86/amd-iommu: Fix possible integer overflow + * clocksource: fix compilation if no GENERIC_TIME + * tcp: update the netstamp_needed counter when cloning sockets + * sky2: Fix oops in sky2_xmit_frame() after TX timeout + * net: restore ip source validation + * af_packet: Don't use skb after dev_queue_xmit() + * ax25: netrom: rose: Fix timer oopses + * KVM: allow userspace to adjust kvmclock offset + * oprofile/x86: add Xeon 7500 series support + * oprofile/x86: fix crash when profiling more than 28 events + * libata: retry link resume if necessary + * mm: percpu-vmap fix RCU list walking + * mm: purge fragmented percpu vmap blocks + * block: fix bio_add_page for non trivial merge_bvec_fn case + * Fix 'flush_old_exec()/setup_new_exec()' split + * random: drop weird m_time/a_time manipulation + * random: Remove unused inode variable + * block: fix bugs in bio-integrity mempool usage + * usb: r8a66597-hdc disable interrupts fix + * connector: Delete buggy notification code. + * be2net: Bug fix to support newer generation of BE ASIC + * be2net: Fix memset() arg ordering. + * mm: flush dcache before writing into page to avoid alias + * mac80211: fix NULL pointer dereference when ftrace is enabled + * imxfb: correct location of callbacks in suspend and resume + * mx3fb: some debug and initialisation fixes + * starfire: clean up properly if firmware loading fails + * kernel/cred.c: use kmem_cache_free + * uartlite: fix crash when using as console + * pktcdvd: removing device does not remove its sysfs dir + * ath9k: fix eeprom INI values override for 2GHz-only cards + * ath9k: fix beacon slot/buffer leak + * powerpc: TIF_ABI_PENDING bit removal + * NET: fix oops at bootime in sysctl code + * Linux 2.6.32.8 + + -- Andy Whitcroft Wed, 10 Feb 2010 18:56:52 +0000 + +linux (2.6.32-12.17) lucid; urgency=low + + [ Andy Whitcroft ] + + * restore linux-image prefix -- master + * enforce -- we require SELINUX enabled -- master + * enforce -- ensure APPARMOR is our default LSM -- master + * make doc package completely optional -- master + * make source package completely optional -- master + * make linux-libc-dev completly optional -- master + * convert package disable to a deps list -- master + * allow common headers to switch from indep to arch -- master + * convert binary package disable to a deps list -- master + * add configuration option for a full source build tree -- master + * add support for uImage kernels in package control scripts + * getabis -- cleanup and parameterise repository list -- master + * getabis -- move configuration to etc/getabi -- master + * kernelconfig -- move configuration to etc -- master + * rules -- make debian/debian.env master for branch name + * set the current branch name -- master + * pull back common debian.master files into debian -- master + * enforcer -- make the enforcement configuration common + * insert-changes -- correctly link to debian/rules in DROOT + + [ Colin Watson ] + + * future-proof ddeb handling against buildd changes + + [ Eric Miao ] + + * SAUCE: Make CONFIG_{OMNIBOOK, AVERATEC_5100P, PACKARDBELL_E5} depend on + X86 + + [ Loïc Minier ] + + * Add modules.builtin.bin to prerm rm list + - LP: #516584 + + [ Tim Gardner ] + + * [Config] Implement the amd64 preempt flavour + + [ Upstream Kernel Changes ] + + * syslog: distinguish between /proc/kmsg and syscalls + - LP: #515623 + * sfc: Fix polling for slow MCDI operations + * sfc: Fix conditions for MDIO self-test + * sfc: QT202x: Remove unreliable MMD check at initialisation + * sfc: Add workspace for GMAC bug workaround to MCDI MAC_STATS buffer + * sfc: Use fixed-size buffers for MCDI NVRAM requests + + -- Andy Whitcroft Fri, 05 Feb 2010 07:09:31 +0000 + +linux (2.6.32-12.16) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: acpi battery -- delay first lookup of the battery until + first use" + * SAUCE: acpi battery -- move first lookup asynchronous + - LP: #507211 + * [Config] update configs to cleanup generic configs + * [Config] disable CONFIG_X86_CPU_DEBUG for amd64 + * [Config] enable USER_NS + - LP: #480739, #509808 + + [ Heiko Carstens ] + + * (pre-stable) driver-core: fix devtmpfs crash on s390 + - LP: #512370 + + [ John Johansen ] + + * [Config] for server and virtual flavours make CONFIG_SCSI_SYM53C8XX_2=y + - LP: #494565 + * [Config] VIRTIO=y for server/virtual flavours + - LP: #494565 + + [ Kay Sievers ] + + * (pre-stable) Driver-Core: devtmpfs - set root directory mode to 0755 + - LP: #512370 + + [ Kees Cook ] + + * SAUCE: x86: brk away from exec rand area + - LP: #452175 + + [ Leann Ogasawara ] + + * [Upstream] e1000: enhance frame fragment detection + - CVE-2009-4536 + * [Upstream] e1000e: enhance frame fragment detection + - CVE-2009-4538 + + [ Sebastian Kapfer ] + + * (pre-stable) Input: ALPS - add interleaved protocol support (Dell E6x00 + series) + - LP: #296610 + + [ Upstream Kernel Changes ] + + * inotify: do not reuse watch descriptors + - LP: #485556 + * inotify: only warn once for inotify problems + * revert "drivers/video/s3c-fb.c: fix clock setting for Samsung SoC + Framebuffer" + * memcg: ensure list is empty at rmdir + * drm/i915: remove loop in Ironlake interrupt handler + * block: Fix incorrect reporting of partition alignment + * x86, mce: Thermal monitoring depends on APIC being enabled + * futexes: Remove rw parameter from get_futex_key() + * page allocator: update NR_FREE_PAGES only when necessary + * x86, apic: use physical mode for IBM summit platforms + * edac: i5000_edac critical fix panic out of bounds + * x86: SGI UV: Fix mapping of MMIO registers + * mfd: WM835x GPIO direction register is not locked + * mfd: Correct WM835x ISINK ramp time defines + * ALSA: hda - Fix missing capture mixer for ALC861/660 codecs + * V4L/DVB (13868): gspca - sn9c20x: Fix test of unsigned. + * reiserfs: truncate blocks not used by a write + * HID: add device IDs for new model of Apple Wireless Keyboard + * PCI/cardbus: Add a fixup hook and fix powerpc + * Input: pmouse - move Sentelic probe down the list + * asus-laptop: add Lenovo SL hotkey support + * sched: Fix cpu_clock() in NMIs, on !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK + * sparc64: Fix NMI programming when perf events are active. + * sparc64: Fix Niagara2 perf event handling. + * i2c: Do not use device name after device_unregister + * i2c/pca: Don't use *_interruptible + * serial/8250_pnp: add a new Fujitsu Wacom Tablet PC device + * sched: Fix task priority bug + * vfs: Fix vmtruncate() regression + * Linux 2.6.32.5 + * x86, msr/cpuid: Register enough minors for the MSR and CPUID drivers + * V4L/DVB (13900): gspca - sunplus: Fix bridge exchanges. + * Staging: asus_oled: fix oops in 2.6.32.2 + * Staging: hv: fix smp problems in the hyperv core code + * tty: fix race in tty_fasync + * ecryptfs: use after free + * ecryptfs: initialize private persistent file before dereferencing + pointer + * nozomi: quick fix for the close/close bug + * serial: 8250_pnp: use wildcard for serial Wacom tablets + * usb: serial: fix memory leak in generic driver + * USB: fix bitmask merge error + * USB: Don't use GFP_KERNEL while we cannot reset a storage device + * USB: EHCI: fix handling of unusual interrupt intervals + * USB: EHCI & UHCI: fix race between root-hub suspend and port resume + * USB: add missing delay during remote wakeup + * USB: add speed values for USB 3.0 and wireless controllers + * ACPI: EC: Accelerate query execution + * ACPI: EC: Add wait for irq storm + * SCSI: enclosure: fix oops while iterating enclosure_status array + * drm/i915: Read the response after issuing DDC bus switch command + * drm/i915: try another possible DDC bus for the SDVO device with + multiple outputs + * block: bdev_stack_limits wrapper + * DM: Fix device mapper topology stacking + * x86/PCI/PAT: return EINVAL for pci mmap WC request for !pat_enabled + * USB: fix usbstorage for 2770:915d delivers no FAT + * vmalloc: remove BUG_ON due to racy counting of VM_LAZY_FREE + * perf timechart: Use tid not pid for COMM change + * perf events: Dont report side-band events on each cpu for + per-task-per-cpu events + * perf: Honour event state for aux stream data + * Linux 2.6.32.6 + + -- Andy Whitcroft Wed, 27 Jan 2010 16:40:23 +0000 + +linux (2.6.32-11.15) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "(pre-stable) drm/radeon/kms: fix crtc vblank update for r600" + * Revert "(pre-stable) sched: Fix balance vs hotplug race" + * Revert "[Upstream] acerhdf: Limit modalias matching to supported + boards" + * Revert "[Upstream] mmc: prevent dangling block device from accessing + stale queues" + * Revert "SAUCE: Fix nx_enable reporting" + * Revert "SAUCE: [x86] fix report of cs-limit nx-emulation" + * Revert "SAUCE: [x86] implement cs-limit nx-emulation for ia32" + * SAUCE: i915 -- disable powersave by default + - LP: #492392 + + [ Kees Cook ] + + * SAUCE: [x86] implement cs-limit nx-emulation for ia32 + - LP: #369978 + * SAUCE: [x86] fix report of cs-limit nx-emulation + - LP: #454285 + * SAUCE: Fix nx_enable reporting + - LP: #454285 + + [ Tim Gardner ] + + * [Upstream] b43: Declare all possible firmware files. + - LP: #488636 + * [Config] updateconfigs after adding pvscsi + - LP: #497156 + * [Config] CONFIG_BT=m + + [ Upstream Kernel Changes ] + + * Revert "x86: Side-step lguest problem by only building cmpxchg8b_emu + for pre-Pentium" + * SCSI: ipr: fix EEH recovery + * SCSI: qla2xxx: dpc thread can execute before scsi host has been added + * SCSI: st: fix mdata->page_order handling + * SCSI: fc class: fix fc_transport_init error handling + * sched: Fix task_hot() test order + * x86, cpuid: Add "volatile" to asm in native_cpuid() + * sched: Select_task_rq_fair() must honour SD_LOAD_BALANCE + * clockevents: Prevent clockevent_devices list corruption on cpu hotplug + * pata_hpt3x2n: fix clock turnaround + * pata_cmd64x: fix overclocking of UDMA0-2 modes + * ASoC: wm8974: fix a wrong bit definition + * sound: sgio2audio/pdaudiocf/usb-audio: initialize PCM buffer + * ALSA: hda - Fix missing capsrc_nids for ALC88x + * acerhdf: limit modalias matching to supported + - LP: #435958 + * ACPI: EC: Fix MSI DMI detection + * ACPI: Use the return result of ACPI lid notifier chain correctly + * powerpc: Handle VSX alignment faults correctly in little-endian mode + * ASoC: Do not write to invalid registers on the wm9712. + * drm/radeon: fix build on 64-bit with some compilers. + * USB: emi62: fix crash when trying to load EMI 6|2 firmware + * USB: option: support hi speed for modem Haier CE100 + * USB: Fix a bug on appledisplay.c regarding signedness + * USB: musb: gadget_ep0: avoid SetupEnd interrupt + * Bluetooth: Prevent ill-timed autosuspend in USB driver + * USB: rename usb_configure_device + * USB: fix bugs in usb_(de)authorize_device + * drivers/net/usb: Correct code taking the size of a pointer + * x86: SGI UV: Fix writes to led registers on remote uv hubs + * md: Fix unfortunate interaction with evms + * dma: at_hdmac: correct incompatible type for argument 1 of + 'spin_lock_bh' + * dma-debug: Do not add notifier when dma debugging is disabled. + * dma-debug: Fix bug causing build warning + * cifs: NULL out tcon, pSesInfo, and srvTcp pointers when chasing DFS + referrals + * x86/amd-iommu: Fix initialization failure panic + * ioat3: fix p-disabled q-continuation + * ioat2,3: put channel hardware in known state at init + * KVM: MMU: remove prefault from invlpg handler + * KVM: LAPIC: make sure IRR bitmap is scanned after vm load + * Libertas: fix buffer overflow in lbs_get_essid() + * iwmc3200wifi: fix array out-of-boundary access + * mac80211: fix propagation of failed hardware reconfigurations + * mac80211: fix WMM AP settings application + * mac80211: Fix IBSS merge + * cfg80211: fix race between deauth and assoc response + * ath5k: fix SWI calibration interrupt storm + * ath9k: wake hardware for interface IBSS/AP/Mesh removal + * ath9k: Fix TX queue draining + * ath9k: fix missed error codes in the tx status check + * ath9k: wake hardware during AMPDU TX actions + * ath9k: fix suspend by waking device prior to stop + * ath9k_hw: Fix possible OOB array indexing in gen_timer_index[] on + 64-bit + * ath9k_hw: Fix AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB and its shift value + in 0x4054 + * iwl3945: disable power save + * iwl3945: fix panic in iwl3945 driver + * iwlwifi: fix EEPROM/OTP reading endian annotations and a bug + * iwlwifi: fix more eeprom endian bugs + * iwlwifi: fix 40MHz operation setting on cards that do not allow it + * mac80211: fix race with suspend and dynamic_ps_disable_work + * NOMMU: Optimise away the {dac_,}mmap_min_addr tests + * 'sysctl_max_map_count' should be non-negative + * kernel/sysctl.c: fix the incomplete part of + sysctl_max_map_count-should-be-non-negative.patch + * V4L/DVB (13596): ov511.c typo: lock => unlock + * x86/ptrace: make genregs[32]_get/set more robust + * memcg: avoid oom-killing innocent task in case of use_hierarchy + * e100: Fix broken cbs accounting due to missing memset. + * ipv6: reassembly: use seperate reassembly queues for conntrack and + local delivery + * netfilter: fix crashes in bridge netfilter caused by fragment jumps + * hwmon: (sht15) Off-by-one error in array index + incorrect constants + * b43: avoid PPC fault during resume + * Keys: KEYCTL_SESSION_TO_PARENT needs TIF_NOTIFY_RESUME architecture + support + * sched: Fix balance vs hotplug race + * drm/radeon/kms: fix crtc vblank update for r600 + * drm: disable all the possible outputs/crtcs before entering KMS mode + * S390: dasd: support DIAG access for read-only devices + * xen: fix is_disconnected_device/exists_disconnected_device + * xen: improvement to wait_for_devices() + * xen: wait up to 5 minutes for device connetion + * orinoco: fix GFP_KERNEL in orinoco_set_key with interrupts disabled + * udf: Try harder when looking for VAT inode + * Add unlocked version of inode_add_bytes() function + * quota: decouple fs reserved space from quota reservation + * ext4: Convert to generic reserved quota's space management. + * ext4: fix sleep inside spinlock issue with quota and dealloc (#14739) + * x86, msr: Unify rdmsr_on_cpus/wrmsr_on_cpus + * cpumask: use modern cpumask style in drivers/edac/amd64_edac.c + * amd64_edac: unify MCGCTL ECC switching + * x86, msr: Add support for non-contiguous cpumasks + * x86, msr: msrs_alloc/free for CONFIG_SMP=n + * amd64_edac: fix driver instance freeing + * amd64_edac: make driver loading more robust + * amd64_edac: fix forcing module load/unload + * sched: Sched_rt_periodic_timer vs cpu hotplug + * ext4: Update documentation to correct the inode_readahead_blks option + name + * lguest: fix bug in setting guest GDT entry + * vmscan: do not evict inactive pages when skipping an active list scan + * ksm: fix mlockfreed to munlocked + * rt2x00: Disable powersaving for rt61pci and rt2800pci. + * generic_permission: MAY_OPEN is not write access + * Linux 2.6.32.3 + * untangle the do_mremap() mess + * fasync: split 'fasync_helper()' into separate add/remove functions + * ASoC: fix params_rate() macro use in several codecs + * modules: Skip empty sections when exporting section notes + * exofs: simple_write_end does not mark_inode_dirty + * nfsd: make sure data is on disk before calling ->fsync + * sunrpc: fix peername failed on closed listener + * SUNRPC: Fix up an error return value in + gss_import_sec_context_kerberos() + * SUNRPC: Fix the return value in gss_import_sec_context() + * sunrpc: on successful gss error pipe write, don't return error + * drm/i915: Update LVDS connector status when receiving ACPI LID event + * drm/i915: fix order of fence release wrt flushing + * drm/i915: Permit pinning whilst the device is 'suspended' + * drm: remove address mask param for drm_pci_alloc() + * drm/i915: Enable/disable the dithering for LVDS based on VBT setting + * drm/i915: Make the BPC in FDI rx/transcoder be consistent with that in + pipeconf on Ironlake + * drm/i915: Select the correct BPC for LVDS on Ironlake + * drm/i915: fix unused var + * rtc_cmos: convert shutdown to new pnp_driver->shutdown + * drivers/cpuidle/governors/menu.c: fix undefined reference to + `__udivdi3' + * cgroups: fix 2.6.32 regression causing BUG_ON() in cgroup_diput() + * lib/rational.c needs module.h + * dma-debug: allow DMA_BIDIRECTIONAL mappings to be synced with + DMA_FROM_DEVICE and + * kernel/signal.c: fix kernel information leak with print-fatal-signals=1 + * mmc_block: add dev_t initialization check + * mmc_block: fix probe error cleanup bug + * mmc_block: fix queue cleanup + * ALSA: hda - Fix ALC861-VD capture source mixer + * ALSA: ac97: Add Dell Dimension 2400 to Headphone/Line Jack Sense + blacklist + * ALSA: atiixp: Specify codec for Foxconn RC4107MA-RS2 + - LP: #498863 + * ASoC: Fix WM8350 DSP mode B configuration + * netfilter: ebtables: enforce CAP_NET_ADMIN + * netfilter: nf_ct_ftp: fix out of bounds read in update_nl_seq() + * hwmon: (coretemp) Fix TjMax for Atom N450/D410/D510 CPUs + * hwmon: (adt7462) Fix pin 28 monitoring + * quota: Fix dquot_transfer for filesystems different from ext4 + * xen: fix hang on suspend. + * iwlwifi: fix iwl_queue_used bug when read_ptr == write_ptr + * ath5k: Fix eeprom checksum check for custom sized eeproms + * cfg80211: fix syntax error on user regulatory hints + * iwl: off by one bug + * mac80211: add missing sanity checks for action frames + * drm/i915: remove render reclock support + * libertas: Remove carrier signaling from the scan code + * kernel/sysctl.c: fix stable merge error in NOMMU mmap_min_addr + * mac80211: fix skb buffering issue (and fixes to that) + * fix braindamage in audit_tree.c untag_chunk() + * fix more leaks in audit_tree.c tag_chunk() + * module: handle ppc64 relocating kcrctabs when CONFIG_RELOCATABLE=y + * ipv6: skb_dst() can be NULL in ipv6_hop_jumbo(). + * agp/intel-agp: Clear entire GTT on startup + * Linux 2.6.32.4 + * ethtool: Add reset operation + * gro: Name the GRO result enumeration type + * gro: Change all receive functions to return GRO result codes + * sfc: 10Xpress: Initialise pause advertising flags + * sfc: 10Xpress: Report support for pause frames + * sfc: Remove redundant header gmii.h + * sfc: Remove redundant hardware initialisation + * sfc: Rename Falcon-specific board code and types + * sfc: Remove boards.h, moving last remaining declaration to falcon.h + * sfc: Remove versioned bitfield macros + * sfc: Move RX data FIFO thresholds out of struct efx_nic_type + * sfc: Update hardware definitions for Siena + * sfc: Rename register I/O header and functions used by both Falcon and + Siena + * sfc: Eliminate indirect lookups of queue size constants + * sfc: Define DMA address mask explicitly in terms of descriptor field + width + * sfc: Move all TX DMA length limiting into tx.c + * sfc: Change order of device removal to reverse of probe order + * sfc: Remove declarations of nonexistent functions + * sfc: Move efx_xmit_done() declaration into correct stanza + * sfc: Move shared members of struct falcon_nic_data into struct efx_nic + * sfc: Maintain interrupt moderation values in ticks, not microseconds + * sfc: Removed kernel-doc for nonexistent member of efx_phy_operations + * sfc: Remove pointless abstraction of memory BAR number + * sfc: Remove incorrect assertion from efx_pci_remove_main() + * sfc: Remove unnecessary tests of efx->membase + * sfc: Move MTD probe after netdev registration and name allocation + * sfc: Remove unused code for non-autoneg speed/duplex switching + * sfc: Rename 'xfp' file and functions to reflect reality + * sfc: Really allow RX checksum offload to be disabled + * sfc: Feed GRO result into RX allocation policy and interrupt moderation + * sfc: Enable heuristic selection between page and skb RX buffers + * sfc: Remove pointless abstraction of memory BAR number (2) + * sfc: Remove redundant gotos from __efx_rx_packet() + * sfc: Remove ridiculously paranoid assertions + * sfc: Move assertions and buffer cleanup earlier in efx_rx_packet_lro() + * sfc: Record RX queue number on GRO path + * sfc: SFT9001: Reset LED configuration correctly after blinking + * sfc: Use a single blink implementation + * sfc: Rename efx_board::init_leds to init_phy and use for SFN4111T + * sfc: Make board information explicitly Falcon-specific + * sfc: Move definition of struct falcon_nic_data into falcon.h + * sfc: Move struct falcon_board into struct falcon_nic_data + * sfc: Move all I2C stuff into struct falcon_board + * sfc: Gather link state fields in struct efx_nic into new struct + efx_link_state + * sfc: Remove unnecessary casts to struct sk_buff * + * sfc: Remove redundant efx_xmit() function + * sfc: Combine high-level header files + * sfc: Log interrupt and reset type names, not numbers + * sfc: Fix descriptor cache sizes + * sfc: Treat all MAC registers as 128-bit + * sfc: Strengthen EFX_ASSERT_RESET_SERIALISED + * sfc: Comment corrections + * sfc: Remove unused constant + * sfc: Clean up struct falcon_board and struct falcon_board_data + * sfc: Fix bugs in RX queue flushing + * sfc: Remove unused function efx_flush_queues() + * sfc: Only switch Falcon MAC clocks as necessary + * sfc: Hold MAC lock for longer in efx_init_port() + * sfc: Split MAC stats DMA initiation and completion + * sfc: Move Falcon board/PHY/MAC monitoring code to falcon.c + * sfc: Simplify XMAC link polling + * sfc: Change MAC promiscuity and multicast hash at the same time + * sfc: Move inline comment into kernel-doc + * sfc: Do not set net_device::trans_start in self-test + * sfc: Simplify PHY polling + * sfc: QT202x: Reset before reading PHY id + * sfc: Replace MDIO spinlock with mutex + * sfc: Always start Falcon using the XMAC + * sfc: Limit some hardware workarounds to Falcon + * sfc: Remove EFX_WORKAROUND_9141 macro + * sfc: Remove another unused workaround macro + * sfc: Remove some redundant whitespace + * sfc: Decouple NIC revision number from Falcon PCI revision number + * sfc: Move descriptor cache base addresses to struct efx_nic_type + * sfc: Clean up RX event handling + * sfc: Remove redundant writes to INT_ADR_KER + * sfc: Remove duplicate hardware structure definitions + * sfc: Turn pause frame generation on and off at the MAC, not the RX FIFO + * sfc: Move Falcon NIC operations to efx_nic_type + * sfc: Refactor link configuration + * sfc: Generalise link state monitoring + * sfc: Add power-management and wake-on-LAN support + * sfc: Implement ethtool reset operation + * sfc: Add efx_nic_type operation for register self-test + * sfc: Add efx_nic_type operation for NVRAM self-test + * sfc: Add efx_nic_type operation for identity LED control + * sfc: Separate shared NIC code from Falcon-specific and rename + accordingly + * sfc: Fold falcon_probe_nic_variant() into falcon_probe_nic() + * sfc: Extend loopback mode enumeration + * sfc: Remove static PHY data and enumerations + * sfc: Extend MTD driver for use with new NICs + * sfc: Allow for additional checksum offload features + * sfc: Rename falcon.h to nic.h + * sfc: Move shared NIC code from falcon.c to new source file nic.c + * sfc: Add firmware protocol definitions (MCDI) + * sfc: Add support for SFC9000 family (1) + * sfc: Add support for SFC9000 family (2) + * sfc: Implement TSO for TCP/IPv6 + * sfc: Update version, copyright dates, authors + * drivers/net/sfc: Correct code taking the size of a pointer + * sfc: Move PHY software state initialisation from init() into probe() + * sfc: Include XGXS in XMAC link status check except in XGMII loopback + * sfc: Fix DMA mapping cleanup in case of an error in TSO + * sfc: QT2025C: Work around PHY bug + * sfc: QT2025C: Switch into self-configure mode when not in loopback + * sfc: QT2025C: Work around PHY firmware initialisation bug + * sfc: QT2025C: Add error message for suspected bad SFP+ cables + * sfc: Disable TX descriptor prefetch watchdog + * [SCSI] vmw_pvscsi: SCSI driver for VMware's virtual HBA. + - LP: #497156 + + -- Andy Whitcroft Tue, 19 Jan 2010 16:12:47 +0000 + +linux (2.6.32-10.14) lucid; urgency=low + + [ Alex Deucher ] + + * SAUCE: drm/radeon/kms: fix LVDS setup on r4xx + - LP: #493795 + + [ Andy Whitcroft ] + + * Revert "(pre-stable) acpi: Use the ARB_DISABLE for the CPU which model + id is less than 0x0f." + * config-check -- ensure the checks get run at build time + * config-check -- check the processed config during updateconfigs + * config-check -- CONFIG_SECCOMP may not be present + * TUN is now built in ignore + * SAUCE: acpi battery -- delay first lookup of the battery until first + use + * SAUCE: async_populate_rootfs: move rootfs init earlier + * ubuntu: AppArmor -- update to mainline 2010-01-06 + * SAUCE: move RLIMIT_CORE pipe dumper marker to 1 + - LP: #498525 + + [ Dave Airlie ] + + * (pre-stable) drm/radeon/kms: fix crtc vblank update for r600 + + [ Leann Ogasawara ] + + * Add asix to nic-usb-modules file + - LP: #499785 + + [ Peter Zijlstra ] + + * (pre-stable) sched: Fix balance vs hotplug race + + [ Tim Gardner ] + + * [Config] Enable CONFIG_FUNCTION_TRACER + - LP: #497989 + * [Config] Drop lpia from getabis + * [Config] Build in TUN/TAP driver + - LP: #499491 + * [Config] DH_COMPAT=5 + + [ Upstream Kernel Changes ] + + * Revert "(pre-stable) drm/i915: Avoid NULL dereference with + component_only tv_modes" + * Revert "(pre-stable) drm/i915: Fix sync to vblank when VGA output is + turned off" + * USB: usb-storage: fix bug in fill_inquiry + * USB: option: add pid for ZTE + * firewire: ohci: handle receive packets with a data length of zero + * rcu: Prepare for synchronization fixes: clean up for non-NO_HZ handling + of ->completed counter + * rcu: Fix synchronization for rcu_process_gp_end() uses of ->completed + counter + * rcu: Fix note_new_gpnum() uses of ->gpnum + * rcu: Remove inline from forward-referenced functions + * perf_event: Fix invalid type in ioctl definition + * perf_event: Initialize data.period in perf_swevent_hrtimer() + * perf: Don't free perf_mmap_data until work has been done + * PM / Runtime: Fix lockdep warning in __pm_runtime_set_status() + * sched: Check for an idle shared cache in select_task_rq_fair() + * sched: Fix affinity logic in select_task_rq_fair() + * sched: Rate-limit newidle + * sched: Fix and clean up rate-limit newidle code + * x86/amd-iommu: attach devices to pre-allocated domains early + * x86/amd-iommu: un__init iommu_setup_msi + * x86, Calgary IOMMU quirk: Find nearest matching Calgary while walking + up the PCI tree + * x86: Fix iommu=nodac parameter handling + * x86: GART: pci-gart_64.c: Use correct length in strncmp + * x86: ASUS P4S800 reboot=bios quirk + - LP: #366682 + * x86, apic: Enable lapic nmi watchdog on AMD Family 11h + * ssb: Fix range check in sprom write + * ath5k: allow setting txpower to 0 + * ath5k: enable EEPROM checksum check + * hrtimer: Fix /proc/timer_list regression + * ALSA: hrtimer - Fix lock-up + * ALSA: hda - Terradici HDA controllers does not support 64-bit mode + * KVM: x86 emulator: limit instructions to 15 bytes + * KVM: s390: Fix prefix register checking in arch/s390/kvm/sigp.c + * KVM: s390: Make psw available on all exits, not just a subset + * KVM: fix irq_source_id size verification + * KVM: x86: include pvclock MSRs in msrs_to_save + * x86: Prevent GCC 4.4.x (pentium-mmx et al) function prologue wreckage + * x86: Use -maccumulate-outgoing-args for sane mcount prologues + * x86, mce: don't restart timer if disabled + * x86/mce: Set up timer unconditionally + * x86: SGI UV: Fix BAU initialization + * x86: Fix duplicated UV BAU interrupt vector + * x86: Add new Intel CPU cache size descriptors + * x86: Fix typo in Intel CPU cache size descriptor + * pata_hpt{37x|3x2n}: fix timing register masks (take 2) + * s390: clear high-order bits of registers after sam64 + * V4L/DVB: Fix test in copy_reg_bits() + * bsdacct: fix uid/gid misreporting + * UBI: flush wl before clearing update marker + * jbd2: don't wipe the journal on a failed journal checksum + * USB: xhci: Add correct email and files to MAINTAINERS entry. + * USB: musb_gadget_ep0: fix unhandled endpoint 0 IRQs, again + * USB: option.c: add support for D-Link DWM-162-U5 + * USB: usbtmc: repeat usb_bulk_msg until whole message is transfered + * USB: usb-storage: add BAD_SENSE flag + * USB: Close usb_find_interface race v3 + * pxa/em-x270: fix usb hub power up/reset sequence + * hfs: fix a potential buffer overflow + * SUNRPC: IS_ERR/PTR_ERR confusion + * NFS: Fix nfs_migrate_page() + * md/bitmap: protect against bitmap removal while being updated. + * futex: Take mmap_sem for get_user_pages in fault_in_user_writeable + * devpts_get_tty() should validate inode + * debugfs: fix create mutex racy fops and private data + * Driver core: fix race in dev_driver_string + * Serial: Do not read IIR in serial8250_start_tx when UART_BUG_TXEN + * mac80211: Fix bug in computing crc over dynamic IEs in beacon + * mac80211: Fixed bug in mesh portal paths + * mac80211: Revert 'Use correct sign for mesh active path refresh' + * mac80211: fix scan abort sanity checks + * wireless: correctly report signal value for IEEE80211_HW_SIGNAL_UNSPEC + * rtl8187: Fix wrong rfkill switch mask for some models + * x86: Fix bogus warning in apic_noop.apic_write() + * mm: hugetlb: fix hugepage memory leak in mincore() + * mm: hugetlb: fix hugepage memory leak in walk_page_range() + * powerpc/windfarm: Add detection for second cpu pump + * powerpc/therm_adt746x: Record pwm invert bit at module load time] + * powerpc: Fix usage of 64-bit instruction in 32-bit altivec code + * drm/radeon/kms: Add quirk for HIS X1300 board + * drm/radeon/kms: handle vblanks properly with dpms on + * drm/radeon/kms: fix legacy crtc2 dpms + * drm/radeon/kms: fix vram setup on rs600 + * drm/radeon/kms: rs6xx/rs740: clamp vram to aperture size + * drm/ttm: Fix build failure due to missing struct page + * drm/i915: Set the error code after failing to insert new offset into mm + ht. + * drm/i915: Add the missing clonemask for display port on Ironlake + * xen/xenbus: make DEVICE_ATTR()s static + * xen: re-register runstate area earlier on resume. + * xen: restore runstate_info even if !have_vcpu_info_placement + * xen: correctly restore pfn_to_mfn_list_list after resume + * xen: register timer interrupt with IRQF_TIMER + * xen: register runstate on secondary CPUs + * xen: don't call dpm_resume_noirq() with interrupts disabled. + * xen: register runstate info for boot CPU early + * xen: call clock resume notifier on all CPUs + * xen: improve error handling in do_suspend. + * xen: don't leak IRQs over suspend/resume. + * xen: use iret for return from 64b kernel to 32b usermode + * xen: explicitly create/destroy stop_machine workqueues outside + suspend/resume region. + * Xen balloon: fix totalram_pages counting. + * xen: try harder to balloon up under memory pressure. + * dm exception store: free tmp_store on persistent flag error + * dm snapshot: only take lock for statustype info not table + * dm crypt: move private iv fields to structs + * dm crypt: restructure essiv error path + * dm: avoid _hash_lock deadlock + * dm snapshot: cope with chunk size larger than origin + * dm crypt: separate essiv allocation from initialisation + * dm crypt: make wipe message also wipe essiv key + * slc90e66: fix UDMA handling + * tcp: Stalling connections: Fix timeout calculation routine + * ip_fragment: also adjust skb->truesize for packets not owned by a + socket + * b44 WOL setup: one-bit-off stack corruption kernel panic fix + * sparc64: Don't specify IRQF_SHARED for LDC interrupts. + * sparc64: Fix overly strict range type matching for PCI devices. + * sparc64: Fix stack debugging IRQ stack regression. + * sparc: Set UTS_MACHINE correctly. + * b43legacy: avoid PPC fault during resume + * tracing: Fix event format export + * ath9k: Fix TX hang poll routine + * ath9k: fix processing of TX PS null data frames + * ath9k: Fix maximum tx fifo settings for single stream devices + * ath9k: fix tx status reporting + * mac80211: Fix dynamic power save for scanning. + * drm/i915: Fix sync to vblank when VGA output is turned off + * memcg: fix memory.memsw.usage_in_bytes for root cgroup + * thinkpad-acpi: fix default brightness_mode for R50e/R51 + * thinkpad-acpi: preserve rfkill state across suspend/resume + * ipw2100: fix rebooting hang with driver loaded + * matroxfb: fix problems with display stability + * acerhdf: add new BIOS versions + * asus-laptop: change light sens default values. + * vmalloc: conditionalize build of pcpu_get_vm_areas() + * ACPI: Use the ARB_DISABLE for the CPU which model id is less than 0x0f. + * net: Fix userspace RTM_NEWLINK notifications. + * ext3: Fix data / filesystem corruption when write fails to copy data + * V4L/DVB (13116): gspca - ov519: Webcam 041e:4067 added. + * bcm63xx_enet: fix compilation failure after get_stats_count removal + * x86: Under BIOS control, restore AP's APIC_LVTTHMR to the BSP value + * drm/i915: Avoid NULL dereference with component_only tv_modes + * drm/i915: PineView only has LVDS and CRT ports + * drm/i915: Fix LVDS stability issue on Ironlake + * mm: sigbus instead of abusing oom + * ipvs: zero usvc and udest + * jffs2: Fix long-standing bug with symlink garbage collection. + * intel-iommu: Detect DMAR in hyperspace at probe time. + * intel-iommu: Apply BIOS sanity checks for interrupt remapping too. + * intel-iommu: Check for an RMRR which ends before it starts. + * intel-iommu: Fix oops with intel_iommu=igfx_off + * intel-iommu: ignore page table validation in pass through mode + * netfilter: xtables: document minimal required version + * perf_event: Fix incorrect range check on cpu number + * implement early_io{re,un}map for ia64 + * Linux 2.6.32.2 + + -- Andy Whitcroft Thu, 07 Jan 2010 15:28:43 +0000 + +linux (2.6.32-9.13) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_B43_PHY_LP + - LP: #493059 + * include modules.builtin in the binary debs + * config-check -- add a configuration enforcer + * config-check -- add a unit-test suite to the checker + * [Config] Enable CONFIG_SYN_COOKIES for versatile + * [Config] Enable CONFIG_SECURITY_SMACK for ports + * [Config] Enable CONFIG_SECURITY_FILE_CAPABILITIES for ports + * [Config] Disable CONFIG_COMPAT_BRK for ports + * getabis -- add armel versatile to the list + + [ Brad Figg ] + + * SAUCE: Increase the default prealloc buffer for HDA audio devices + (non-modem) + + [ Manoj Iyer ] + + * ubuntu: onmibook -- Added missing BOM file + + [ Tim Gardner ] + + * ubuntu: fsam7400 -- Cleanup Makefile + + [ Upstream Kernel Changes ] + + * Revert "ext4: Fix insufficient checks in EXT4_IOC_MOVE_EXT" + * signal: Fix alternate signal stack check + * SCSI: scsi_lib_dma: fix bug with dma maps on nested scsi objects + * SCSI: osd_protocol.h: Add missing #include + * SCSI: megaraid_sas: fix 64 bit sense pointer truncation + * ext4: fix potential buffer head leak when add_dirent_to_buf() returns + ENOSPC + * ext4: avoid divide by zero when trying to mount a corrupted file system + * ext4: fix the returned block count if EXT4_IOC_MOVE_EXT fails + * ext4: fix lock order problem in ext4_move_extents() + * ext4: fix possible recursive locking warning in EXT4_IOC_MOVE_EXT + * ext4: plug a buffer_head leak in an error path of ext4_iget() + * ext4: make sure directory and symlink blocks are revoked + * ext4: fix i_flags access in ext4_da_writepages_trans_blocks() + * ext4: journal all modifications in ext4_xattr_set_handle + * ext4: don't update the superblock in ext4_statfs() + * ext4: fix uninit block bitmap initialization when s_meta_first_bg is + non-zero + * ext4: fix block validity checks so they work correctly with meta_bg + * ext4: avoid issuing unnecessary barriers + * ext4: fix error handling in ext4_ind_get_blocks() + * ext4: make trim/discard optional (and off by default) + * ext4: make "norecovery" an alias for "noload" + * ext4: Fix double-free of blocks with EXT4_IOC_MOVE_EXT + * ext4: initialize moved_len before calling ext4_move_extents() + * ext4: move_extent_per_page() cleanup + * jbd2: Add ENOMEM checking in and for + jbd2_journal_write_metadata_buffer() + * ext4: Return the PTR_ERR of the correct pointer in + setup_new_group_blocks() + * ext4: Avoid data / filesystem corruption when write fails to copy data + * ext4: wait for log to commit when umounting + * ext4: remove blocks from inode prealloc list on failure + * ext4: ext4_get_reserved_space() must return bytes instead of blocks + * ext4: quota macros cleanup + * ext4: fix incorrect block reservation on quota transfer. + * ext4: Wait for proper transaction commit on fsync + * ext4: Fix insufficient checks in EXT4_IOC_MOVE_EXT + * ext4: Fix potential fiemap deadlock (mmap_sem vs. i_data_sem) + * Linux 2.6.32.1 + * kbuild: generate modules.builtin + * (pre-stable) drm/i915: Fix sync to vblank when VGA output is turned off + - LP: #494461 + * (pre-stable) drm/i915: Avoid NULL dereference with component_only + tv_modes + - LP: #494045 + + [ Zhao Yakui ] + + * (pre-stable) acpi: Use the ARB_DISABLE for the CPU which model id is + less than 0x0f. + - LP: #481765 + + -- Andy Whitcroft Thu, 17 Dec 2009 15:41:21 +0000 + +linux (2.6.32-8.12) lucid; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: AppArmor -- add linux/err.h for ERR_PTR + + -- Andy Whitcroft Sat, 12 Dec 2009 10:56:16 +0000 + +linux (2.6.32-8.11) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: default ATI Radeon KMS to off until userspace catches + up" + * Revert "SAUCE: AppArmor: Fix oops there is no tracer and doing unsafe + transition." + * Revert "SAUCE: AppArmor: Fix refcounting bug causing leak of creds" + * Revert "SAUCE: AppArmor: Fix cap audit_caching preemption disabling" + * Revert "SAUCE: AppArmor: Fix Oops when in apparmor_bprm_set_creds" + * Revert "SAUCE: AppArmor: Fix oops after profile removal" + * Revert "SAUCE: AppArmor: AppArmor disallows truncate of deleted files." + * Revert "SAUCE: AppArmor: AppArmor fails to audit change_hat correctly" + * Revert "SAUCE: AppArmor: Policy load and replacement can fail to alloc + mem" + * Revert "SAUCE: AppArmor: AppArmor wrongly reports allow perms as + denied" + * Revert "SAUCE: AppArmor: Fix mediation of "deleted" paths" + * Revert "SAUCE: AppArmor: Fix off by 2 error in getprocattr mem + allocation" + * Revert "SAUCE: AppArmor: Set error code after structure + initialization." + * Revert "AppArmor -- fix pstrace_may_access rename" + * Revert "ubuntu: AppArmor security module" + * Revert "SAUCE: Add config option to set a default LSM" + * Revert "ubuntu: fsam7400 -- sw kill switch driver" + * Revert "[Config] fsam7400 -- enable" + * Revert "[Config] AUFS -- enable" + * Revert "ubuntu: AUFS -- aufs2-30 20090727" + * Revert "ubuntu: AUFS -- export various core functions -- fixes" + * Revert "ubuntu: AUFS -- export various core functions" + * Revert "[Config] ubuntu/iscsitarget -- disable" + * Revert "[Config] iscsitarget -- enable" + * Revert "ubuntu: iscsitarget -- SVN revision r214" + * update Vcs-Git to point to the correct repository + - LP: #493589 + * update build environment overrides to lucid + - LP: #493589 + * [Config] enable CONFIG_DEVTMPFS + * [Config] update all configs following AppArmor 2009-12-08 update + * SAUCE: isapnp_init: make isa PNP scans occur async + * [Config] fsam7400 -- enable + * [Config] omnibook -- enable + * [Config] cleanup CONFIG_AUDIT + * ubuntu: AUFS -- export various core functions (aufs2-base.patch) + * ubuntu: AUFS -- export various core functions (aufs2-standalone.patch) + * ubuntu: AUFS -- aufs2 20091209 + * [Config] AUFS -- enable + * [Config] iscsitarget -- enable + + [ Arjan van de Ven ] + + * SAUCE: KMS: cache the EDID information of the LVDS + + [ Colin Watson ] + + * bnx2: update d-i firmware filenames + - LP: #494052 + * add cdc_ether to nic-usb-modules udeb + - LP: #495060 + + [ John Johansen ] + + * ubuntu: AppArmor -- mainline 2009-10-08 + + [ Manoj Iyer ] + + * ubuntu: fsam7400 -- kill switch for Fujitsu Siemens Amilo M 7400 + * ubuntu: omnibook -- support Toshiba (HP) netbooks + * ubuntu: iscsitarget --- version 1.4.19 + - LP: #494693 + + [ Surbhi Palande ] + + * SAUCE: Make populate_rootfs asynchronous + + [ Tim Gardner ] + + * Parallelize flavour builds and packaging + * [Config] Enable CONFIG_KSM + + [ Upstream Kernel Changes ] + + * Config option to set a default LSM + * LSM: Add security_path_chroot(). + * LSM: Add security_path_chroot(). + * LSM: Move security_path_chmod()/security_path_chown() to after + mutex_lock(). + * ext4: Fix insufficient checks in EXT4_IOC_MOVE_EXT + + -- Andy Whitcroft Fri, 11 Dec 2009 17:45:19 +0000 + +linux (2.6.32-7.10) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] disable CONFIG_THUMB2_KERNEL to fix arm FTBFS + + -- Andy Whitcroft Sun, 06 Dec 2009 12:56:48 +0000 + +linux (2.6.32-7.9) lucid; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: set /proc/acpi/video/*/DOS to 4 by default + - LP: #458982 + * SAUCE: ensure vga16fb loads if no other driver claims the VGA device + * [Config] update configs following versatile switch to V7 + * rebased to v2.6.32 + * [Config] update configs following rebase to v2.6.32 + * [Config] update ports configs following rebase to v2.6.32 + * SAUCE: default ATI Radeon KMS to off until userspace catches up + + [ Arjan van de Ven ] + + * SAUCE: vfs: Add a trace point in the mark_inode_dirty function + + [ Leann Ogasawara ] + + * [SCSI] megaraid_sas: remove sysfs poll_mode_io world writeable + permissions + - CVE-2009-3939 + + [ Loic Minier ] + + * SAUCE: select a v7 CPU for versatile + + [ Takashi Iwai ] + + * SAUCE: ALSA: hda - Add power on/off counter + + [ Upstream changes ] + + * rebased to v2.6.32 + + -- Andy Whitcroft Fri, 04 Dec 2009 10:44:50 +0000 + +linux (2.6.32-6.8) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] disable SSB devices for armel + + -- Andy Whitcroft Sat, 28 Nov 2009 12:16:40 +0000 + +linux (2.6.32-6.7) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: default ATI Radeon KMS to off until userspace catches up" + * Revert "SAUCE: Dell XPS710 reboot quirk" + * Revert "SAUCE: Link acpi-cpufreq.o first" + * Revert "SAUCE: LPIA Logical reset of USB port on resume" + * Revert "SAUCE: LPIA Reboot fix for Intel Crownbeach development boards" + * Revert "SAUCE: Enable HDMI audio codec on Studio XPS 1340" + * Revert "SAUCE: Dell laptop digital mic does not work, PCI 1028:0271" + * Revert "Add Dell Dimension 9200 reboot quirk" + * Revert "SAUCE: Correctly blacklist Thinkpad r40e in ACPI" + * Revert "SAUCE: tulip: Define ULI PCI ID's" + * Revert "SAUCE: Lower warning level of some PCI messages" + * Revert "mac80211: fix two issues in debugfs" + Drop a number of known redundant commits as identified in the Ubuntu + delta review blueprint. + + * reenable armel versatile flavour + * [Config] disable CONFIG_USB_DEVICEFS + + [ Tim Gardner ] + + * [Config] udeb: Add squashfs to fs-core-modules + - LP: #352615 + * [Config] Create a real squashfs udeb + - LP: #352615 + + + -- Andy Whitcroft Fri, 27 Nov 2009 17:31:16 +0000 + +linux (2.6.32-5.6) lucid; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.32-rc8 + * update configs following rebase to v2.6.32-rc8 + * update ports configs since rebase to v2.6.32-rc8 + * [Config] enable cgroup options + - LP: #480739 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.32-rc8 + + -- Andy Whitcroft Mon, 23 Nov 2009 11:16:14 +0000 + +linux (2.6.32-4.5) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] SERIO_LIBPS2 and SERIO_I8042 must match + * rebase to v2.6.32-rc7 + * resync with Karmic proposed + + [ John Johansen ] + + * SAUCE: AppArmor: Fix oops after profile removal + - LP: #475619 + * SAUCE: AppArmor: Fix Oops when in apparmor_bprm_set_creds + - LP: #437258 + * SAUCE: AppArmor: Fix cap audit_caching preemption disabling + - LP: #479102 + * SAUCE: AppArmor: Fix refcounting bug causing leak of creds + - LP: #479115 + * SAUCE: AppArmor: Fix oops there is no tracer and doing unsafe + transition. + - LP: #480112 + + [ Ubuntu Changes ] + + * resync with Karmic proposed (ddbc670a86a3dee18541a3734149f250ff307adf) + + [ Upstream Kernel Changes ] + + * rebase to v2.6.32-rc7 + + -- Andy Whitcroft Fri, 13 Nov 2009 11:35:13 +0000 + +linux (2.6.32-3.4) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] SERIO_LIBPS2 and SERIO_I8042 must match + * [Upstream] add local prefix to oss local change_bits + + [ Upstream Kernel Changes ] + + * mtd/maps: gpio-addr-flash: pull in linux/ headers rather than asm/ + * mtd/maps: gpio-addr-flash: depend on GPIO arch support + + -- Andy Whitcroft Wed, 11 Nov 2009 14:47:04 +0000 + +linux (2.6.32-3.3) lucid; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.32-rc6 + * [Config] update configs following rebase to v2.6.32-rc6 + * [Config] update ports configs following rebase to v2.6.32-rc6 + * resync with Karmic Ubuntu-2.6.31-15.49 + * [Config] add module ignores for broken drivers + + [ John Johansen ] + + * SAUCE: AppArmor: AppArmor wrongly reports allow perms as denied + - LP: #453335 + * SAUCE: AppArmor: Policy load and replacement can fail to alloc mem + - LP: #458299 + * SAUCE: AppArmor: AppArmor fails to audit change_hat correctly + - LP: #462824 + * SAUCE: AppArmor: AppArmor disallows truncate of deleted files. + - LP: #451375 + + [ Kees Cook ] + + * SAUCE: Fix nx_enable reporting + - LP: #454285 + + [ Scott James Remnant ] + + * Revert "SAUCE: trace: add trace_event for the open() syscall" + * SAUCE: trace: add trace events for open(), exec() and uselib() + - LP: #462111 + + [ Stefan Bader ] + + * SAUCE: Fix sub-flavour script to not stop on missing directories + - LP: #453073 + + [ Ubuntu Changes ] + + * resync with Karmic Ubuntu-2.6.31-15.49 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.32-rc6 + - LP: #464552 + + -- Andy Whitcroft Tue, 10 Nov 2009 15:00:57 +0000 + +linux (2.6.32-2.2) lucid; urgency=low + + [ Andy Whitcroft ] + + * install the full changelog with the binary package + * changelog -- explicitly note rebases and clean history + * reinstate armel.mk with no flavours + - LP: #449637 + * [Upstream] block: silently error unsupported empty barriers too + - LP: #420423 + * [Config] udate configs following karmic resync + * [Config] update ports configs following karmic resync + * [Upstream] lirc -- follow removal of .id element + + [ Colin Watson ] + + * Use section 'admin' rather than 'base' + * Add more e100 firmware to nic-modules + - LP: #451872 + * Add qla1280 firmware to scsi-modules + - LP: #381037 + + [ John Johansen ] + + * SAUCE: AppArmor: Set error code after structure initialization. + - LP: #427948 + * SAUCE: AppArmor: Fix off by 2 error in getprocattr mem allocation + - LP: #446595 + * SAUCE: AppArmor: Fix mediation of "deleted" paths + + [ Kees Cook ] + + * SAUCE: [x86] fix report of cs-limit nx-emulation + - LP: #454285 + + [ Leann Ogasawara ] + + * SAUCE: (drop after 2.6.31) input: Add support for filtering input + events + - LP: #430809 + * SAUCE: (drop after 2.6.31) dell-laptop: Trigger rfkill updates on wifi + toggle switch press + - LP: #430809 + + [ Luke Yelavich ] + + * SAUCE: Add sr_mod to the scsi-modules udeb for powerpc + * [Config] Add sd_mod to scsi-modules udeb for powerpc + + [ Mario Limonciello ] + + * SAUCE: Update to LIRC 0.8.6 + - LP: #432678 + * SAUCE: dell-laptop: Store the HW switch status internally rather than + requerying every time + - LP: #430809 + * SAUCE: dell-laptop: Blacklist machines not supporting dell-laptop + - LP: #430809 + + [ Stefan Bader ] + + * [Upstream] acerhdf: Limit modalias matching to supported boards + - LP: #435958 + + [ Tim Gardner ] + + * [Upstream] i915: Fix i2c init message + - LP: #409361 + * [Config] Add sym53c8xx.ko to virtual sub-flavour + - LP: #439415 + * [Config] Add d101m_ucode.bin to d-i/firmware/nic-modules + - LP: #439456 + * [Config] Set default I/O scheduler back to CFQ for desktop flavours + - LP: #381300 + * SAUCE: Created MODULE_EXPORT/MODULE_IMPORT macros + - LP: #430694 + * SAUCE: Use MODULE_IMPORT macro to tie intel_agp to i915 + - LP: #430694 + * [Config] CONFIG_GFS2_FS_LOCKING_DLM=y + - LP: #416325 + * SAUCE: Fix MODULE_IMPORT/MODULE_EXPORT + - LP: #430694 + * SAUCE: Raise the default console 'quiet' level to 2 + * [Config] CONFIG_X86_PAT=y + * [Config] Add armel arch to linux-libc-dev arches. + - LP: #449637 + * [Config] CONFIG_X86_MCE + * [Upstream] (drop after 2.6.31) Input: synaptics - add another Protege + M300 to rate blacklist + - LP: #433801 + + [ Upstream Kernel Changes ] + + * sgi-gru: Fix kernel stack buffer overrun, CVE-2009-2584 + * drm/i915: Fix FDI M/N setting according with correct color depth + - LP: #416792 + + -- Andy Whitcroft Thu, 22 Oct 2009 16:53:33 +0100 + +linux (2.6.32-1.1) lucid; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.32-rc3 + * [Config] update configs following rebase to 2.6.32-rc3 + * [Config] update ports configs following rebase to 2.6.32-rc3 + * AppArmor -- fix pstrace_may_access rename + * staging/android -- disable + * ubuntu: dm-raid-45 -- update to compile with 2.6.32 + * ubuntu: drbd -- disable + * staging/comdi -- disable + * staging/go7007 -- disable + * [Config] staging/winbond -- disable + * [Config] ubuntu/iscsitarget -- disable + * [d-i] cbc and ecb are builtin make them optional in udebs + * rebase to v2.6.32-rc5 + * [Config] update configs following rebase to v2.6.32-rc5 + * [Config] update ports configs following rebase to v2.6.31-rc5 + + [ Tim Gardner ] + + * [Config] Add cpio as a build dependency. + + [ Upstream Kernel Changes ] + + * rebase to v2.6.32-rc3 + * rebase to v2.6.32-rc5 + + -- Andy Whitcroft Mon, 05 Oct 2009 15:48:58 +0100 + +linux (2.6.31-11.37) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] Increase kernel log buffer to 256K for amd64 flavours + - LP: #424810 + * [Config] Set HZ=100 for amd64 flavours + - LP: #438234 + * [Upstream] e1000e: Emit notice instead of an error when + pci_enable_pcie_error_reporting() fails + - LP: #436370 + + [ Upstream Kernel Changes ] + + * n_tty: honor opost flag for echoes + * n_tty: move echoctl check and clean up logic + - LP: #438310 + + * Revert "[Upstream] drm/i915: Check that the relocation points to within + the target" - Use upstream cherry-pick. + * drm/i915: Check that the relocation points to within the target + - LP: #429241 + + * drm/i915: fix tiling on IGDNG + * drm/i915: add B43 chipset support + * agp/intel: Add B43 chipset support + Intel request from kernel team mailing list. + + * HID: completely remove apple mightymouse from blacklist + - LP: #428111 + + -- Tim Gardner Mon, 28 Sep 2009 11:47:29 -0600 + +linux (2.6.31-11.36) karmic; urgency=low + + [ Brian Rogers ] + + * SAUCE: (drop after 2.6.31) em28xx: ir-kbd-i2c init data needs a + persistent object + * SAUCE: (drop after 2.6.31) saa7134: ir-kbd-i2c init data needs a + persistent object + + [ Takashi Iwai ] + + * [Upstream] ALSA: hda - Add another entry for Nvidia HDMI device + - LP: #416482 + + [ Tyler Hicks ] + + * SAUCE: (drop after 2.6.31) eCryptfs: Prevent lower dentry from going + negative during unlink + + [ Upstream Kernel Changes ] + + * sg: fix oops in the error path in sg_build_indirect() + * mpt2sas : Rescan topology from Interrupt context instead of work thread + * mpt2sas: Prevent sending command to FW while Host Reset + * mpt2sas: setting SDEV into RUNNING state from Interrupt context + * mpt2sas: Raid 10 Volume is showing as Raid 1E in dmesg + * SCSI: fix oops during scsi scanning + * SCSI: libsrp: fix memory leak in srp_ring_free() + * cfg80211: fix looping soft lockup in find_ie() + * ath5k: write PCU registers on initial reset + * binfmt_elf: fix PT_INTERP bss handling + * TPM: Fixup boot probe timeout for tpm_tis driver + * md: Fix "strchr" [drivers/md/dm-log-userspace.ko] undefined! + * x86/amd-iommu: fix broken check in amd_iommu_flush_all_devices + * fix undefined reference to user_shm_unlock + * perf_counter: Fix buffer overflow in perf_copy_attr() + * perf_counter: Start counting time enabled when group leader gets + enabled + * powerpc/perf_counters: Reduce stack usage of power_check_constraints + * powerpc: Fix bug where perf_counters breaks oprofile + * powerpc/ps3: Workaround for flash memory I/O error + * block: don't assume device has a request list backing in nr_requests + store + * agp/intel: remove restore in resume + * ALSA: cs46xx - Fix minimum period size + * ASoC: Fix WM835x Out4 capture enumeration + * sound: oxygen: work around MCE when changing volume + * mlx4_core: Allocate and map sufficient ICM memory for EQ context + * perf stat: Change noise calculation to use stddev + * x86: Fix x86_model test in es7000_apic_is_cluster() + * x86/i386: Make sure stack-protector segment base is cache aligned + * PCI: apply nv_msi_ht_cap_quirk on resume too + * x86, pat: Fix cacheflush address in change_page_attr_set_clr() + * ARM: 5691/1: fix cache aliasing issues between kmap() and kmap_atomic() + with highmem + * KVM guest: do not batch pte updates from interrupt context + * KVM: Fix coalesced interrupt reporting in IOAPIC + * KVM: VMX: Check cpl before emulating debug register access + * KVM guest: fix bogus wallclock physical address calculation + * KVM: x86: Disallow hypercalls for guest callers in rings > 0 + * KVM: VMX: Fix cr8 exiting control clobbering by EPT + * KVM: x86 emulator: Implement zero-extended immediate decoding + * KVM: MMU: make __kvm_mmu_free_some_pages handle empty list + * KVM: x86 emulator: fix jmp far decoding (opcode 0xea) + * KVM: limit lapic periodic timer frequency + * libata: fix off-by-one error in ata_tf_read_block() + * PCI quirk: update 82576 device ids in SR-IOV quirks list + * PCI: Unhide the SMBus on the Compaq Evo D510 USDT + * powerpc/pseries: Fix to handle slb resize across migration + * Linux 2.6.31.1 + + -- Tim Gardner Thu, 24 Sep 2009 13:04:28 -0600 + +linux (2.6.31-10.35) karmic; urgency=low + + [ Amit Kucheria ] + + * Disable CONFIG_UEVENT_HELPER_PATH + + [ Andy Whitcroft ] + + * [Config] Enable CONFIG_USB_GADGET_DUMMY_HCD + * remove the tlsup driver + * remove lmpcm logitech driver support + + [ Bryan Wu ] + + * Add 3 missing files to prerm remove file list + - LP: #345623, #415832 + + [ Chris Wilson ] + + * [Upstream] drm/i915: Check that the relocation points to within the + target + - LP: #429241 + + [ Luke Yelavich ] + + * [Config] Set CONFIG_EXT4_FS=y on ports architectures + + [ Manoj Iyer ] + + * SAUCE: Added quirk to recognize GE0301 3G modem as an interface. + - LP: #348861 + + [ Tim Gardner ] + + * Revert "[Upstream] ACPI: Add Thinkpad W500, W700, & W700ds to OSI(Linux) white-list" + * Revert "[Upstream] ACPI: Add Thinkpad R400 & Thinkpad R500 to OSI(Linux) white-list" + * Revert "[Upstream] ACPI: Add Thinkpad X300 & Thinkpad X301 to OSI(Linux) white-list" + * Revert "[Upstream] ACPI: Add Thinkpad X200, X200s, X200t to OSI(Linux) white-list" + * Revert "[Upstream] ACPI: Add Thinkpad T400 & Thinkpad T500 to OSI(Linux) white-list" + Upstream suggests that this is not the right approach. + + * [Config] Set default I/O scheduler to DEADLINE + CFQ seems to have some load related problems which are often exacerbated by sreadahead. + - LP: #381300 + + [ ubuntu@tjworld.net ] + + * SAUCE: ipw2200: Enable LED by default + - LP: #21367 + + [ Upstream Kernel Changes ] + + * ALSA: hda - Add support for new AMD HD audio devices + - LP: #430564 + + -- Andy Whitcroft Wed, 16 Sep 2009 15:37:49 +0100 + +linux (2.6.31-10.34) karmic; urgency=low + + [ Ted Tso ] + + * [Upstream] ext3: Don't update superblock write time when filesystem is + read-only + - LP: #427822 + + -- Tim Gardner Tue, 15 Sep 2009 16:00:45 -0600 + +linux (2.6.31-10.33) karmic; urgency=low + + [ Leann Ogasawara ] + + * [Upstream] dvb-usb: fix tuning with Cinergy T2 + - LP: #421258 + + [ Tim Gardner ] + + * [Config] Unconditionally copy files from sub-flavours lists. + (really, really fix it this time) + - LP: #423426 + * [Config] Set CONFIG_CACHEFILES=m for all flavours + + [ Upstream Kernel Changes ] + + * ext4: Don't update superblock write time when filesystem is read-only + - LP: #427822 + + -- Tim Gardner Tue, 15 Sep 2009 07:50:21 -0600 + +linux (2.6.31-10.32) karmic; urgency=low + + [ Eric Miao ] + + * [Config] enable module support for memory stick + - LP: #159951 + + [ Tim Gardner ] + + * [Config] Unconditionally copy files from sub-flavours lists. + - LP: #423426 + + -- Tim Gardner Thu, 10 Sep 2009 15:57:55 -0600 + +linux (2.6.31-10.31) karmic; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.31 final + + [ Colin Watson ] + + * [Config] Recommend grub-pc in linux-image + - LP: #385741 + + [ Ike Panhc ] + + * [Upstream] Pull latest update of lenovo-sl-laptop + + [ Peter Feuerer ] + + * [Upstream] (drop after 2.6.31) acerhdf: fix fan control for AOA150 + model + - LP: #426691 + + [ Tim Gardner ] + + * [Config] De-macro some package names. + + [ Upstream Changes ] + + * rebase to 2.6.31 final. + + -- Andy Whitcroft Thu, 10 Sep 2009 09:38:10 +0100 + +linux (2.6.31-10.30) karmic; urgency=low + + [ Amit Kucheria ] + + * [Config] Enable CONFIG_USB_DEVICEFS + - LP: #417748 + * [Config] Populate the config-update template a bit more + + [ Andy Whitcroft ] + + * rebase to v2.6.31-rc9 + * [Config] update configs following rebase to v2.6.31-rc9 + * [Config] update ports configs following rebase to v2.6.31-rc9 + + [ Colin Ian King ] + + * SAUCE: wireless: hostap, fix oops due to early probing interrupt + - LP: #254837 + + [ Jerone Young ] + + * [Upstream] ACPI: Add Thinkpad T400 & Thinkpad T500 to OSI(Linux) + white-list + - LP: #281732 + * [Upstream] ACPI: Add Thinkpad X200, X200s, X200t to OSI(Linux) + white-list + - LP: #281732 + * [Upstream] ACPI: Add Thinkpad X300 & Thinkpad X301 to OSI(Linux) + white-list + - LP: #281732 + * [Upstream] ACPI: Add Thinkpad R400 & Thinkpad R500 to OSI(Linux) + white-list + - LP: #281732 + * [Upstream] ACPI: Add Thinkpad W500, W700, & W700ds to OSI(Linux) + white-list + - LP: #281732 + + [ John Johansen ] + + * SAUCE: AppArmor: Fix profile attachment for regexp based profile names + - LP: #419308 + * SAUCE: AppArmor: Return the correct error codes on profile + addition/removal + - LP: #408473 + * SAUCE: AppArmor: Fix OOPS in profile listing, and display full list + - LP: #408454 + * SAUCE: AppArmor: Fix mapping of pux to new internal permission format + - LP: #419222 + * SAUCE: AppArmor: Fix change_profile failure + - LP: #401931 + * SAUCE: AppArmor: Tell git to ignore generated include files + - LP: #419505 + + [ Stefan Bader ] + + * [Upstream] acpi: video: Loosen strictness of video bus detection code + - LP: #333386 + * SAUCE: Remove ov511 driver from ubuntu subdirectory + + [ Tim Gardner ] + + * [Config] Exclude char-modules from non-x86 udeb creation + * SAUCE: Notify the ACPI call chain of AC events + * [Config] CONFIG_SATA_VIA=m + - LP: #403385 + * [Config] Build in all phylib support modules. + * [Config] Don't fail when sub-flavour files are missing + - LP: #423426 + * [Config] Set CONFIG_LSM_MMAP_MIN_ADDR=0 + - LP: #423513 + + [ Upstream ] + + * Rebased against v2.6.31-rc9 + + -- Andy Whitcroft Mon, 07 Sep 2009 11:33:45 +0100 + +linux (2.6.31-9.29) karmic; urgency=low + + [ Leann Ogasawara ] + + * [Upstream] agp/intel: support for new chip variant of IGDNG mobile + - LP: #419993 + * [Config] d-i/modules: Add new char-modules file, initialize with + intel-agp + - LP: #420605 + + [ Upstream ] + + * Rebased against 2.6.31-rc8 plus some inotify regression patches: + up through git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + adda766193ea1cf3137484a9521972d080d0b7af. + + -- Tim Gardner Fri, 28 Aug 2009 06:31:30 -0600 + +linux (2.6.31-8.28) karmic; urgency=low + + [ Ike Panhc ] + + * [Config] Let nic-shared-modules depends on crypto-modules + - LP: #360966 + + [ Leann Ogasawara ] + + * [Upstream] (drop after 2.6.31) drm/i915: increase default latency + constant + - LP: #412492 + + [ Mario Limonciello ] + + * [Upstream]: (drop after 2.6.31) dell-laptop: don't change softblock + status if HW switch is disabled + - LP: #418721 + * [Upstream]: (drop after 2.6.31) compal-laptop: Add support for known + Compal made Dell laptops + * [Upstream]: (drop after 2.6.31) compal-laptop: Replace sysfs support + with rfkill support + + [ Tim Gardner ] + + * [Config] Add acpiphp to virtual sub-flavour + - LP: #364916 + * Drop KSM patch set for now because of instabilities with encrypted swap. + - LP: #418781 + + -- Tim Gardner Wed, 26 Aug 2009 08:14:26 -0600 + +linux (2.6.31-7.27) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] updateconfigs updateportsconfigs after 2.6.31-rc7 rebase + * SAUCE: (drop after 2.6.31) Added KSM from mmotm-2009-08-20-19-18 + Replaces previous ksm patches from 2.6.31-6.25 + * [Config] KSM=y + + [ Upstream ] + + * Rebased against v2.6.31-rc7 + + -- Tim Gardner Sat, 22 Aug 2009 20:32:11 -0600 + +linux (2.6.31-6.26) karmic; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_AUFS_BR_RAMFS + - LP: #414738 + * split out debian directory ready for abstraction + * add printdebian target to find branch target + * abstracted debian -- debian/files is not abstracted + * abstracted debian -- packages must be built in debian/ + * abstracted debian -- kernel-wedge needs to work in debian/ + * abstracted debian -- ensure we install the copyright file + * abstracted-debian -- drop the debian directories from headers + * abstracted-debian -- drop the debian directories from headers part 2 + * SAUCE: ubuntu-insert-changes -- follow abstracted debian + * [Upstream] aoe: ensure we initialise the request_queue correctly V2 + - LP: #410198 + + [ Luke Yelavich ] + + * [Config] Ports: Disable CONFIG_CPU_FREQ_DEBUG on powerpc-smp + * [Config] Ports: Re-enable windfarm modules on powerpc64-smp + - LP: #413150 + * [Config] Ports: Build all cpu frequency scaling governors into ports + kernels + * [Config] Ports: Build ext2 and ext3 modules into ports kernels + * [Config] Ports: CONFIG_PACKET=y for all ports kernels + * [Config] Ports: Enable PS3 network driver + + [ Stefan Bader ] + + * abstracted debian -- call $(DEBIAN)/rules using make + + [ Tim Gardner ] + + * [Config] Abstract the debian directory + * SAUCE: Improve error reporting in postinst + - LP: #358564 + + -- Tim Gardner Sun, 16 Aug 2009 20:33:28 -0600 + +linux (2.6.31-6.25) karmic; urgency=low + + [ Andy Whitcroft ] + + * script to generate Ubuntu changes from changelog + * [Config] standardise ANDROID options + * [Config] standardise CONFIG_ATM as module + * [Config] standardise CONFIG_LIB80211 as module + * [Config] disable CONFIG_PRINT_QUOTA_WARNING + * [Config] set CONFIG_CRAMFS as module + * [Config] enable CONFIG_DAB and modules + * [Config] set CONFIG_MAC80211_HWSIM as module + * [Config] set CONFIG_NET_CLS_FLOW as module + * [Config] set CONFIG_NF_CONNTRACK_SANE as module + * [Config] set CONFIG_NF_CT_PROTO_DCCP as module + * [Config] set CONFIG_RTC_DRV_DS1511 as module + * [Config] set CONFIG_RTC_DRV_R9701 as module + * [Config] set CONFIG_RTC_DRV_S35390A as module + * [Config] set CONFIG_TOIM3232_DONGLE as module + * [Config] standardise CONFIG_USB_MIDI_GADGET as module + * [Config] standardise CONFIG_USB_G_PRINTER as module + * [Config] standardise CONFIG_USB_SERIAL_IR as module + * [Config] set CONFIG_USB_SERIAL_IUU as module + * [Config] standardise CONFIG_USB_STORAGE_CYPRESS_ATACB as module + * [Config] standardise CONFIG_USB_STORAGE_ONETOUCH as module + * cleanup remains of dm-loop + * drop thinkpad ec and smapi support + * drop appleir + * [Config] update configs following rebase to v2.6.31-rc6 + * rebase to v2.6.31-rc6 + + [ Hugh Dickins ] + + * SAUCE: ksm patch 1, drop after 2.6.31 + * SAUCE: ksm patch 2, drop after 2.6.31 + * SAUCE: ksm patch 3, drop after 2.6.31 + * SAUCE: ksm patch 4, drop after 2.6.31 + * SAUCE: ksm patch 5, drop after 2.6.31 + * SAUCE: ksm patch 7, drop after 2.6.31 + + [ Izik Eidus ] + + * SAUCE: ksm patch 0, drop after 2.6.31 + * SAUCE: ksm patch 6, drop after 2.6.31 + * SAUCE: ksm patch 8, drop after 2.6.31 + * SAUCE: ksm patch 9, drop after 2.6.31 + + [ Luke Yelavich ] + + * [Config] Ports: Re-add PS3 modules to udebs + + [ Michael Casadevall ] + + * [Config] Update SPARC config and d-i files to reflect what can be built + + [ Tim Gardner ] + + * [Config] Removed armel package support + * [Config] Enabled CONFIG_KSM=y + + [ Upstream Kernel Changes ] + + * Rebased against v2.6.31-rc6 + * ARM: Cleanup: Revert "ARM: Add more cache memory types macros" + * ARM: Cleanup: Revert "Do not use OOB with MLC NAND" + * ARM: Cleanup: Revert "ARM: Make ARM arch aware of ubuntu/ drivers" + * ARM: Cleanup: Revert "ARM: IMX51: Make video capture drivers compile" + * ARM: Cleanup: Revert "ARM: IMX51: Fix isl29003 HWMON driver for i2c + changes" + * ARM: Cleanup: Revert "ARM: IMX51: IPU irq handler deadlock fix" + * ARM: Cleanup: Revert "ARM: IMX51: Babbage 2.5 needs a different system + revision" + * ARM: Cleanup: Revert "ARM: IMX51: Compile-in the IMX51 cpufreq driver + by default" + * ARM: Cleanup: Revert "ARM: IMX51: Enable ZONE_DMA for ARCH_MXC" + * ARM: Cleanup: Revert "ARM: IMX51: Make ARCH_MXC auto-enable + ARCH_MXC_CANONICAL" + * ARM: Cleanup: Revert "ARM: IMX51: Unconditionally disable + CONFIG_GPIOLIB" + * ARM: Cleanup: Revert "ARM: IMX51: Minimal changes for USB to work on + 2.6.31" + * ARM: Cleanup: Revert "ARM: IMX51: Fix plat-mxc/timer.c to handle imx51" + * ARM: Cleanup: Revert "ARM: IMX51: Make it compile." + * ARM: Cleanup: Revert "ARM: IMX51: Clean-up the craziness of including + mxc_uart.h _everywhere_" + * ARM: Cleanup: Revert "ARM: IMX51: Move board-mx51* header files to the + correct location" + * ARM: Cleanup: Revert "ARM: IMX51: Changed from snd_card_new to + snd_card_create" + * ARM: Cleanup: Revert "ARM: IMX51: Fix up merge error in Kconfig" + * ARM: Cleanup: Revert "ARM: IMX51: mxc_timer_init prototype" + * ARM: Cleanup: Revert "ARM: IMX51: Removed the mxc_gpio_port structure." + * ARM: Cleanup: Revert "ARM: IMX51: Added external declaration for + mxc_map_io." + * ARM: Cleanup: Revert "ARM: IMX51: Get to bus_id by calling dev_name." + * ARM: Cleanup: Revert "ARM: IMX51: Get to bus_id by calling dev_name." + * ARM: Cleanup: Revert "ARM: IMX51: snd_soc_machine structure replaced + with snd_soc_card." + * ARM: Cleanup: Revert "ARM: IMX51: codec structure was moved to the card + structure" + * ARM: Cleanup: Revert "ARM: IMX51: Hack to add defines for + DMA_MODE_READ/WRITE/MASK" + * ARM: Cleanup: Revert "ARM: IMX51: Add SoC and board support for + Freescale mx51 platform" + * Driver core: add new device to bus's list before probing + * [Upstream] (drop after 2.6.31) ALSA: hda - Reduce click noise at + power-saving + - LP: #381693, #399750, #380892 + + -- Andy Whitcroft Fri, 14 Aug 2009 11:32:23 +0100 + +linux (2.6.31-5.24) karmic; urgency=low + + [ Amit Kucheria ] + + * ARM: IMX51: Make video capture drivers compile + * [Config] IMX51: Config updates + + [ Andy Whitcroft ] + + * remove leftovers of dm-bbr + + [ Leann Ogasawara ] + + * Add pata_cs5535 to pata-modules + - LP: #318805 + + [ Luke Yelavich ] + + * [Config] CONFIG_PPC64=y for powerpc64-smp + * [Config] Set the maximum number of CPUs to 1024 for powerpc64-smp + * [Config] CONFIG_PPC_PS3=y for powerpc64-smp + * [Config] CONFIG_PPC_MAPLE=y on powerpc64-smp + * [Config] CONFIG_PPC_PASEMI=y on powerpc64-smp + * [Config] CONFIG_CPU_FREQ_PMAC64=y on powerpc64-smp + * [Config] Enable all PS3 drivers in powerpc64-smp + + [ Mario Limonciello ] + + * LIRC -- fix lirc-i2c 2.6.31 compilation + + [ Matthew Garrett ] + + * [Upstream] dell-laptop: Fix rfkill state queries + + [ Tim Gardner ] + + * [Config] Ignore armel ABI and module changes + * [Config] Update configs after rebase against 2.6.31-rc5 + + [ Upstream ] + + * Rebased to 2.6.31-rc5 + + -- Andy Whitcroft Tue, 28 Jul 2009 10:10:09 +0100 + +linux (2.6.31-4.23) karmic; urgency=low + + [ Andy Whitcroft ] + + * AUFS -- update to aufs2-30 20090727 + * [Config] enable AUFS FUSE support + + [ Luke Yelavich ] + + * [Config] CONFIG_JFS_FS=m on sparc + + [ Tim Gardner ] + + * [Upstream] dell-laptop: Fix rfkill state setting. + + -- Andy Whitcroft Mon, 27 Jul 2009 11:11:47 +0100 + +linux (2.6.31-4.22) karmic; urgency=low + + [ Amit Kucheria ] + + * ARM: IMX51: Add SoC and board support for Freescale mx51 platform + * ARM: IMX51: Move board-mx51* header files to the correct location + * ARM: IMX51: Clean-up the craziness of including mxc_uart.h _everywhere_ + * ARM: IMX51: Make it compile. + * ARM: IMX51: Unconditionally disable CONFIG_GPIOLIB + * ARM: IMX51: Make ARCH_MXC auto-enable ARCH_MXC_CANONICAL + * ARM: IMX51: Enable ZONE_DMA for ARCH_MXC + * ARM: IMX51: Compile-in the IMX51 cpufreq driver by default + * ARM: IMX51: Fix isl29003 HWMON driver for i2c changes + * ARM: USB: musb: Refer to musb_otg_timer_func under correct #ifdef + * ARM: staging: udlfb: Add vmalloc.h include + * UBUNTU [Config]: Bring imx51 config upto date with other flavours + + [ Brad Figg ] + + * ARM: IMX51: Hack to add defines for DMA_MODE_READ/WRITE/MASK + * ARM: IMX51: codec structure was moved to the card structure + * ARM: IMX51: snd_soc_machine structure replaced with snd_soc_card. + * ARM: IMX51: Get to bus_id by calling dev_name. + * ARM: IMX51: Get to bus_id by calling dev_name. + * ARM: IMX51: Added external declaration for mxc_map_io. + * ARM: IMX51: Removed the mxc_gpio_port structure. + * ARM: IMX51: mxc_timer_init prototype + * ARM: IMX51: Fix up merge error in Kconfig + * ARM: IMX51: Changed from snd_card_new to snd_card_create + + [ Dinh Nguyen ] + + * ARM: IMX51: Fix plat-mxc/timer.c to handle imx51 + * ARM: IMX51: Minimal changes for USB to work on 2.6.31 + * ARM: IMX51: Babbage 2.5 needs a different system revision + * ARM: IMX51: IPU irq handler deadlock fix + + [ Tim Gardner ] + + * [Config] Enabled CONFIG_CAN=m + - LP: #327243 + * [Config] Enabled CONFIG_SERIAL=m + - LP: #397189 + + -- Tim Gardner Fri, 24 Jul 2009 06:19:10 -0600 + +linux (2.6.31-4.21) karmic; urgency=low + + [ Amit Kucheria ] + + * dm-raid-4-5: Add missing brackets around test_bit() + + [ John Johansen ] + + * AppArmor: Fix change_profile failing lpn401931 + * AppArmor: Fix determination of forced AUDIT messages. + * AppArmor: Fix oops in auditing of the policy interface offset + + -- Andy Whitcroft Thu, 23 Jul 2009 19:18:30 +0100 + +linux (2.6.31-4.20) karmic; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: iscsitarget -- update to SVN revision r214 + * SAUCE: iscsitarget -- renable driver + * [Config] consolidate lpia/lpia and i386/generic configs + * [Config] enable CRYPTO modules for all architectures + * [Config] enable cryptoloop + * [Config] enable various filesystems for armel + * [Config] sync i386 generic and generic-pae + * [Config] add the 386 (486 processors and above) flavour + * [Config] re-set DEFAULT_MMAP_MIN_ADDR + - LP: #399914 + * add genconfigs/genportsconfigs to extract the built configs + * updateconfigs -- alter concatenation order allow easier updates + * intelfb -- INTELFB now conflicts with DRM_I915 + * printchanges -- rebase tree does not have stable tags use changelog + * AppArmor: fix argument size missmatch on 64 bit builds + + [ Ike Panhc ] + + * Ship bnx2x firmware in nic-modules udeb + - LP: #360966 + + [ Jeff Mahoney ] + + * AppArmor: fix build failure on ia64 + + [ John Johansen ] + + * AppArmour: ensure apparmor enabled parmater is off if AppArmor fails to + initialize. + * AppArmour: fix auditing of domain transitions to include target profile + information + * AppArmor: fix C99 violation + * AppArmor: revert reporting of create to write permission. + * SAUCE: Add config option to set a default LSM + * [Config] enable AppArmor by default + * AppArmor: Fix NULL pointer dereference oops in profile attachment. + + [ Keith Packard ] + + * SAUCE: drm/i915: Allow frame buffers up to 4096x4096 on 915/945 class + hardware + - LP: #351756 + + [ Luke Yelavich ] + + * [Config] add .o files found in arch/powerpc/lib to all powerpc kernel + header packages + - LP: #355344 + + [ Michael Casadevall ] + + * [Config] update SPARC config files to allow success build + + [ Scott James Remnant ] + + * SAUCE: trace: add trace_event for the open() syscall + + [ Stefan Bader ] + + * SAUCE: jfs: Fix early release of acl in jfs_get_acl + - LP: #396780 + + [ Tim Gardner ] + + * [Upstream] Fix Soltech TA12 volume hotkeys not sending key release + - LP: #397499 + * [Upstream] USB Option driver - Add USB ID for Novatel MC727/U727/USB727 + refresh + - LP: #365291 + * [Config] SSB/B44 are common across all arches/flavours. + + [ Upstream ] + + * Rebased to 2.6.31-rc4 + + -- Andy Whitcroft Thu, 23 Jul 2009 08:41:39 +0100 + +linux (2.6.31-3.19) karmic; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Config] Disabled NDISWRAPPER" + * ndiswrapper -- fix i386 compilation failures on cmpxchg8b + * AUFS -- export various core functions + * AUFS -- export various core functions -- fixes + * AUFS -- core filesystem + * AUFS -- track changes in v2.6.31 + * [Config] Enable AUFS + * droppped 'iwl3945: do not send scan command if channel count zero' as it + is already upstream but failed to auto-drop on rebase. + + [ Eric Paris ] + + * SAUCE: fsnotify: use def_bool in kconfig instead of letting the user + choose + * SAUCE: inotify: check filename before dropping repeat events + * SAUCE: fsnotify: fix inotify tail drop check with path entries + + -- Andy Whitcroft Tue, 14 Jul 2009 12:52:55 +0100 + +linux (2.6.31-3.18) karmic; urgency=low + + [ Andy Whitcroft ] + + * Revert "Add splice-2.6.23.patch from AUFS to export a symbol needed by + AUFS" + * Revert "Add put_filp.patch from AUFS to export a symbol needed by AUFS" + * Revert "Add sec_perm-2.6.24.patch from AUFS - export + security_inode_permission" + * clear out left over AUFS files and modifications + + [ Luke Yelavich ] + + * [Config] Enable CONFIG_USB_ISP116X_HCD on sparc + * SAUCE: Explicitly include header files to allow apparmor to build on + powerpc + * [Config] Enable CONFIG_BLK_DEV_IDECD on powerpc + + [ Tim Gardner ] + + * [Config] Dropped ubuntu/misc/wireless/acx + * [Config] Disabled NDISWRAPPER until the compile issues are fixed. + + [ Upstream ] + + * Rebased to 2.6.31-rc3 + + -- Andy Whitcroft Fri, 10 Jul 2009 18:59:33 +0100 + +linux (2.6.31-2.17) karmic; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_BLK_DEV_CRYPTOLOOP=m for sparc + * compcache -- remove redundant Kconfig entries part 2 + * compcache -- clean up CCFLAGS declarations + * [Config] enable AppArmor + * AppArmor: fix operator precidence issue in as_path_link + + [ John Johansen ] + + * AppArmor security module + * AppArmor: Correct mapping of file permissions. + * AppArmor: Turn auditing of ptrace on + + [ Luke Yelavich ] + + * [Config] disable CONFIG_DM_RAID45 on powerpc + + -- Andy Whitcroft Fri, 10 Jul 2009 15:02:05 +0100 + +linux (2.6.31-2.16) karmic; urgency=low + + [ Andy Whitcroft ] + + * compcache -- remove redundant Kconfig entries + added ignore and ignore.modules for all arches since the compcache update + changes the modules names as well as some compcache ABI values. + + [ Manoj Iyer ] + + * SAUCE: updated dm-raid45 module version to 2009.04.24 (2.6.30-rc3) + * SAUCE: update compcache version to 0.5.3 + + [ Tim Gardner ] + + * [Config]: Fix sparc FTBS by adding ignore.modules + + -- Tim Gardner Mon, 06 Jul 2009 13:35:29 -0600 + +linux (2.6.31-2.15) karmic; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: default ATI Radeon KMS to off until userspace catches up + * [Config] Update configs following rebase to 2.6.31-rc2 + * [Config] update ports configs following update to 2.6.31-rc2 + + [ Luke Yelavich ] + + * [Config] powerpc - Disable CONFIG_RDS + + [ Matt Zimmerman ] + + * Rename linux-doc-PKGVER to linux-doc and clean up its description + - LP: #382115 + + [ Upstream Kernel Changes ] + + * rebased to mainline 2.6.31-rc2 + + -- Andy Whitcroft Sat, 04 Jul 2009 17:39:13 +0100 + +linux (2.6.31-1.14) karmic; urgency=low + + [ Andy Whitcroft ] + + * update ndiswrapper to 1.55 + * remove leftovers of gfs + * [Config] powerpc: enable CONFIG_PPC_DISABLE_WERROR + + [ Luke Yelavich ] + + * [Config] re-enable and build the ide-pmac driver into powerpc kernels + * [Config] Build the ServerWorks Frodo / Apple K2 SATA driver into the + kernel + + [ Manoj Iyer ] + + * Remove snd-bt-sco ubuntu driver + + [ Michael Casadevall ] + + * [Config] updates ia64 config and d-i folders to allow succesful build + * [Config] Update powerpc and sparc for 2.6.31 + + [ Upstream Kernel Changes ] + + * intel-iommu: fix Identity Mapping to be arch independent + - LP: #384695 + * ACPI: video: prevent NULL deref in acpi_get_pci_dev() + + -- Andy Whitcroft Tue, 30 Jun 2009 17:47:32 +0100 + +linux (2.6.31-1.13) karmic; urgency=low + + [ Andy Whitcroft ] + + * REBASE: rebased to mainline 2.6.31-rc1 + - "UBUNTU: SAUCE: UHCI USB quirk for resume" + no longer applies, using deprecated interfaces, LPIA only, dropped + - "UBUNTU: SAUCE: Mask off garbage in Dell WMI scan code data" + changes now upstream, dropped + * [Config] Update configs following rebase to 2.6.31-rc1 + * [Config] update ports configs following update to 2.6.31-rc1 + + * [Config] disable broken staging driver CONFIG_STLC45XX + * SAUCE: fix compcache to use updates accessors + * [Config] disable staging driver CONFIG_VT6655 + * SAUCE: fix DRDB to use updates accessors + * [Disable] ndiswrapper needs update + * [Disable] LIRC I2C needs update + * [Disable] CONFIG_LENOVO_SL_LAPTOP needs update + * [Config] disable I2C_DESIGNWARE does not compile + * [Config] disable CONFIG_TLSUP for lpia + * [Config] disable CONFIG_FB_UDL for arm + * SAUCE: disable adding scsi headers to linux-libc-dev + + [ Mario Limonciello ] + + * SAUCE: Add LIRC drivers + + -- Andy Whitcroft Thu, 25 Jun 2009 12:06:22 +0100 + +linux (2.6.30-10.12) karmic; urgency=low + + [ Andy Whitcroft ] + + * [Config] split out the ports configs into their own family + * [Config] update configs following introduction of ports family + + [ Upstream Kernel Changes ] + + * Revert "Rename linux-doc-PKGVER to linux-doc and clean up its + description". Fixes linux-doc package name conflicts for now. + - LP: #382115 + + -- Tim Gardner Mon, 22 Jun 2009 09:17:14 -0600 + +linux (2.6.30-10.11) karmic; urgency=low + + [ Amit Kucheria ] + + * [Config] Comment splitconfig.pl and misc cleanup + * [Config] Rename all configs to the new naming scheme + * [Config] Splitconfig rework + * [Config] Rename scripts/misc/oldconfig to kernelconfig + * [Config] Fix build system for new config split + * [Config] Run updateconfigs after the splitconfig rework + + [ Andy Whitcroft ] + + * Revert "SAUCE: Default to i915.modeset=0 if CONFIG_DRM_I915_KMS=y" + * [Config] standardise CONFIG_STAGING=y + * [Config] standardise CONFIG_RD_LZMA=y + * [Config] CONFIG_PCI_IOV=y + * [Config] CONFIG_PCI_STUB=m + * [Config] merge kernel configs more agressively + + [ Colin Watson ] + + * [Config] Run kernel-wedge in $(builddir) rather than at the top level + * [Config] Add support for including firmware in udebs + * [Config] Ship bnx2 firmware in nic-modules udeb + - LP: #384861 + + [ Luke Yelavich ] + + * [Config] ports - Import of ports architectures into kernel packaging + infrastructure + * [Config] ports - Do not update ports kernel configurations by default + * [Config] ports - Disable ABI checking for ports architectures + * [Config] ports - Build drivers in ubuntu sub-directory on powerpc + * [Config] ports - Add control.d/vars.* files for ports architectures + * [Config] ports - Add ports architectures for linux-libc-dev + * [Config] ports - Create powerpc specific message-modules and + block-modules udebs + * [Config] ports - Add configuration files for ports architectures + + [ Manoj Iyer ] + + * [Config] Enable CONFIG_BLK_DEV_AEC62XX=m for amd64 and i386 + - LP: #329864 + + [ Michael Casadevall ] + + * [Config] ports - Fix compression of kernels + + [ Stefan Bader ] + + * [Upstream] mmc: prevent dangling block device from accessing stale + queues + - LP: #383668 + + [ Tim Gardner ] + + * [Config] Recommend grub-pc in linux-image + - LP: #385741 + * [Config] Implement i386 generic and generic-pae flavours + * [Config] ports - Add control info after integrating ports arches + * [Config] Removed auto-generated files from git + * [Config] Added netxen_nic to nic-modules + - LP: #389603 + + [ Matt Zimmerman ] + + * Rename linux-doc-PKGVER to linux-doc and clean up its description + - LP: #382115 + + -- Tim Gardner Mon, 15 Jun 2009 14:38:26 -0600 + +linux (2.6.30-9.10) karmic; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_SECURITY_TOMOYO=y (amd64, i386, lpia) + * [Config] CONFIG_KEXEC_JUMP=y (amd64, lpia) + * [Config] CONFIG_LENOVO_SL_LAPTOP=m (amd64, lpia) + * [Config] CONFIG_POHMELFS_CRYPTO=y (i386, amd64) + * [Config] CONFIG_SERIAL_MAX3100=m (i386, amd64, lpia) + * [Config] CONFIG_VIDEO_GO7007=m (amd64, i386) + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30 final + + -- Andy Whitcroft Fri, 05 Jun 2009 11:42:53 +0100 + +linux (2.6.30-8.9) karmic; urgency=low + + [ Andy Whitcroft ] + + * Config update removed the following options: + CONFIG_EDAC_AMD8111=m + CONFIG_EDAC_AMD8131=m + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc8 + + -- Andy Whitcroft Wed, 03 Jun 2009 09:21:13 +0100 + +linux (2.6.30-7.8) karmic; urgency=low + + [ Andy Whitcroft ] + + * Enabled NEW configration options: + Paravirtualization layer for spinlocks (PARAVIRT_SPINLOCKS) [N/y/?] Y + Cisco FNIC Driver (FCOE_FNIC) [N/m/y/?] M + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc7 + + -- Andy Whitcroft Sat, 23 May 2009 23:47:24 +0100 + +linux (2.6.30-6.7) karmic; urgency=low + + [ Andy Whitcroft ] + + * Dropped: UBUNTU: SAUCE: input: Blacklist digitizers from joydev.c (now + upstream) + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc6 + + -- Andy Whitcroft Mon, 18 May 2009 18:05:54 +0100 + +linux (2.6.30-5.6) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] Enable Keyspan USB serial device firmware in kernel module + - LP: #334285 + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc5 + + -- Tim Gardner Mon, 11 May 2009 12:02:16 -0600 + +linux (2.6.30-4.5) karmic; urgency=low + + [ Colin Watson ] + + * Build-Conflict with findutils (= 4.4.1-1ubuntu1), to avoid + /usr/include/asm/* going missing + - LP: #373214 + + -- Stefan Bader Fri, 08 May 2009 11:09:08 +0200 + +linux (2.6.30-3.4) karmic; urgency=low + + [ Kees Cook ] + + * SAUCE: [x86] implement cs-limit nx-emulation for ia32 + - LP: #369978 + + [ Stefan Bader ] + + * SAUCE: input: Blacklist digitizers from joydev.c + - LP: #300143 + + -- Tim Gardner Fri, 01 May 2009 14:00:42 -0600 + +linux (2.6.30-2.3) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] Enabled CC_STACKPROTECTOR=y for all x86en + - LP: #369152 + * SAUCE: Default to i915_modeset=0 if CONFIG_DRM_I915_KMS=y + * [Config] CONFIG_DRM_I915_KMS=y + * [Config] Set CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR to appropriate ARCH + minimums + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc4 + + -- Tim Gardner Thu, 30 Apr 2009 09:17:05 -0600 + +linux (2.6.30-1.2) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] armel: disable staging drivers, fixes FTBS + * [Config] armel imx51: Disable CONFIG_MTD_NAND_MXC, fixes FTBS + + [ Upstream Kernel Changes ] + + * mpt2sas: Change reset_type enum to avoid namespace collision. + Submitted upstream. + + -- Tim Gardner Tue, 28 Apr 2009 16:54:41 -0600 + +linux (2.6.30-1.1) karmic; urgency=low + + * Initial release after rebasing against v2.6.30-rc3 + + -- Tim Gardner Thu, 12 Mar 2009 19:16:07 -0600 --- linux-3.13.0.orig/debian.master/changelog.historical +++ linux-3.13.0/debian.master/changelog.historical @@ -0,0 +1,4171 @@ + +linux (2.6.28-9.31) jaunty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: cpufreq-nforce2: probe failures are not errors + - LP: #332170 + * SAUCE: mmc: add MODALIAS linkage for MMC/SD devices + - LP: #30335 + * remove test-suspend script + - LP: #333856 + + [ Kees Cook ] + + * handle relative paths in modules.dep + Fixes 2.6.28-9.30 FTBS. + + [ Upstream Kernel Changes ] + + * ricoh_mmc: Handle newer models of Ricoh controllers + + -- Tim Gardner Wed, 11 Mar 2009 08:19:24 -0600 + +linux (2.6.28-9.30) jaunty; urgency=low + + [ Amit Kucheria ] + + * ARM:mx51 Add SoC and board support for mx51 platforms + * ARM:mx51 Add CONFIG_ARCH_MXC_CANONICAL to disable parts of Freescale's + code + * MMC: Add support for 8-bit cards + * Add ARM:MX51 SoC support to the build system + * ARM: Make ARM arch aware of ubuntu/ drivers + * ARM: Add imx51 configuration + * Disable d-i modules for imx51 and mv78xx0 + * Disable Apparmor on boot for ARM + * Updating imx51 config + + [ Jason Liu ] + + * Do not use OOB with MLC NAND + + [ Richard Zhu ] + + * Support the eMMC4.3 card + + [ Rob Herring ] + + * ARM: Add more cache memory types macros + + [ Tim Gardner ] + + * Set CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y for i386/amd64/lpia + + [ Manoj Iyer ] + + * Enable CONFIG_RTL8187SE=m + + [ Upstream Kernel Changes ] + + * USB: EHCI: slow down ITD reuse + - LP: #329437 + + -- Tim Gardner Sun, 08 Mar 2009 14:14:15 -0600 + +linux (2.6.28-9.29) jaunty; urgency=low + + [ Andy Whitcroft ] + + * link-headers -- only link directories which do not already exist + - LP: #315252 + + [ Daniel Marjamäki ] + + * SAUCE: (drop after 2.6.28) netxen: fix memory leak in + drivers/net/netxen_nic_init.c + - LP: #330813 + + [ Dhananjay Phadke ] + + * SAUCE: (drop after 2.6.28) netxen: fix endianness in firmware commands + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: fix ipv6 offload and tx cleanup + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: fix link speed reporting for some + boards + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: firmware init fix + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: cleanup mac list on driver unload + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: hold tx lock while sending firmware + commands + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: handle dma mapping failures + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: avoid invalid iounmap + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: include ipv6.h (fixes build failure) + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: fix vlan tso/checksum offload + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: reduce memory footprint + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: revert jumbo ringsize + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: fix msi-x interrupt handling + - LP: #330813 + * SAUCE: (drop after 2.6.28) netxen: remove pcie workaround + - LP: #330813 + + [ Hannes Eder ] + + * SAUCE: (drop after 2.6.28) drivers/net/netxen: fix sparse warnings: use + NULL pointer instead of plain integer + - LP: #330813 + + [ Huaxu Wan ] + + * SAUCE: report rfkill changes event if interface is down + - LP: #193970 + + [ Tim Gardner ] + + * MV78XX0 must specify a target in the vars definition. + + [ Upstream Kernel Changes ] + + * Revert "ext4: wait on all pending commits in ext4_sync_fs()" + * jbd2: Fix return value of jbd2_journal_start_commit() + * jbd2: Avoid possible NULL dereference in + jbd2_journal_begin_ordered_truncate() + * ext4: Fix to read empty directory blocks correctly in 64k + * ext4: Fix lockdep warning + * ext4: Initialize preallocation list_head's properly + * ext4: Implement range_cyclic in ext4_da_writepages instead of + write_cache_pages + * ext4: Fix NULL dereference in ext4_ext_migrate()'s error handling + * ext4: Add fallback for find_group_flex + * ext4: Fix deadlock in ext4_write_begin() and ext4_da_write_begin() + * Added mv78xx0 flavor + + -- Tim Gardner Fri, 06 Mar 2009 06:13:31 -0700 + +linux (2.6.28-8.28) jaunty; urgency=low + + [ Alexey Starikovskiy ] + + * SAUCE: ACPI: EC: Limit workaround for ASUS notebooks even more + - LP: #288385 + + [ Scott James Remnant ] + + * SAUCE: Auto-load esp module when device opened. + * SAUCE: Auto-load bridge module when socket opened. + * SAUCE: Auto-load af_netlink module when socket opened. + * SAUCE: Auto-load wanrouter module when socket opened. + * SAUCE: Auto-load ip_queue module when socket opened. + * SAUCE: Auto-load ip6_queue module when socket opened. + * SAUCE: Auto-load cn module when socket opened. + * SAUCE: Auto-load scsi_transport_iscsi module when socket opened. + * SAUCE: Auto-load ftl module when device opened. + * SAUCE: Auto-load pcd module when device opened. + * SAUCE: Auto-load pf module when device opened. + * SAUCE: Auto-load nftl module when device opened. + * SAUCE: Auto-load mousedev module when psaux device opened. + * SAUCE: Auto-load mousedev module when /dev/input/mice opened. + * SAUCE: Auto-load rng-core module when device opened. + * SAUCE: Auto-load openprom module when device opened. + * SAUCE: Auto-load applicom module when device opened. + * SAUCE: Auto-load toshiba module when device opened. + * SAUCE: Auto-load cyclades module when device opened. + * SAUCE: Auto-load riscom8 module when device opened. + * SAUCE: Auto-load specialix module when device opened. + * SAUCE: Auto-load videodev module when device opened. + * SAUCE: Auto-load i2c_dev module when device opened. + * SAUCE: Auto-load mtdchar module when device opened. + * SAUCE: Auto-load pt module when device opened. + * SAUCE: Auto-load pg module when device opened. + * SAUCE: Auto-load cdc_acm module when device opened. + * SAUCE: Auto-load msr module when device opened. + * SAUCE: Auto-load cpuid module when device opened. + * SAUCE: quickcam: Enable double-buffering by default + * SAUCE: libata: Ignore HPA by default. + * SAUCE: hostap: Change initial operation mode to managed (infra) + * SAUCE: floppy: Provide a PnP device table in the module. + - LP: #255651 + * SAUCE: Auto-load mwave module when device opened. + * Build CONFIG_FUSE_FS into kernel, not as module. + + [ Stefan Bader ] + + * Enable build of ext4 as a module on LPIA + - LP: #331848 + + [ Tim Gardner ] + + * Update configs to fix LPIA FTBS + + -- Tim Gardner Thu, 05 Mar 2009 10:43:24 -0700 + +linux (2.6.28-8.27) jaunty; urgency=low + + [ Amit Kucheria ] + + * Updating configs (arm:ixp4xx) + + [ Andy Whitcroft ] + + * SAUCE: enable Intel HDMI output + + [ Manoj Iyer ] + + * SAUCE: Added quirk for Linksys WUSB600N USB wifi-n networking adapter + - LP: #323473 + + [ Steve Beattie ] + + * fix apparmor memory leak on unlinked file ops + - LP: #329489 + + [ Tim Gardner ] + + * SAUCE: Dell XPS710 reboot quirk + - LP: #323592 + * SAUCE: (drop after 2.6.28) ieee80211: Add infrastructure to obsolete + scan results + - LP: #336055 + * Add modules.order to the linux-image package. + + [ Upstream Kernel Changes ] + + * iwlwifi: fix time interval misuse in iwl_poll_{direct_}bit + * x86: only scan the root bus in early PCI quirks + - LP: #267295 + * ALSA: hda - Intel HDMI audio support + * ALSA: hda - Fix unused function in patch_intelhdmi.c + * ALSA: handle SiI1392 HDMI codec in patch_intelhdmi.c + * ALSA: hda-intel: reorder HDMI audio enabling sequence + * ALSA: introduce snd_print_pcm_rates() + * ALSA: create hda_eld.c for ELD routines and proc interface + * ALSA: ELD proc interface for HDMI sinks + * ALSA: hda: make standalone hdmi_fill_audio_infoframe() + * ALSA: hda: make global snd_print_channel_allocation() + * ALSA: hda: HDMI channel allocations for audio infoframe + * ALSA: hda: HDMI channel mapping cleanups + * ALSA: hda: minor code cleanups + * ALSA: hda: rename sink_eld to hdmi_eld + * ALSA: hda - Release ELD proc file + * ALSA: hda - minor HDMI code cleanups + * ALSA: hda - report selected CA index for Audio InfoFrame + * ALSA: hda - Add Intel vendor id string + + -- Tim Gardner Wed, 25 Feb 2009 14:23:46 -0700 + +linux (2.6.28-8.26) jaunty; urgency=low + + [ Amit Kucheria ] + + * Updating configs (armel:ixp4xx) + - LP: #331510 + + [ Tim Gardner ] + + * Add more missing modules + + -- Tim Gardner Tue, 24 Feb 2009 06:58:53 -0700 + +linux (2.6.28-8.25) jaunty; urgency=low + + [ Scott James Remnant ] + + * SAUCE: Prefer powernow-k8 to acpi-cpufreq + * Change CONFIG_X86_P4_CLOCKMOD to be a module again. + + [ Tim Gardner ] + + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Initialize the new + group descriptor when resizing the filesystem" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Add sanity check + to make_indexed_dir" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: only use + i_size_high for regular files" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Add sanity checks + for the superblock before mounting the filesystem" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Fix + s_dirty_blocks_counter if block allocation failed with nodelalloc" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Init the complete + page while building buddy cache" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Don't allow new + groups to be added during block allocation" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: mark the + blocks/inode bitmap beyond end of group as used" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Use new + buffer_head flag to check uninit group bitmaps initialization" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Fix the race + between read_inode_bitmap() and ext4_new_inode()" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Fix race between + read_block_bitmap() and mark_diskspace_used()" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: don't use blocks + freed but not yet committed in buddy cache init" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: cleanup mballoc + header files" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Use + EXT4_GROUP_INFO_NEED_INIT_BIT during resize" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Add blocks added + during resize to bitmap" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Don't overwrite + allocation_context ac_status" + * Revert "SAUCE: (revert before 2.6.28.y update) jbd2: Add barrier not + supported test to journal_wait_on_commit_record" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Widen type of + ext4_sb_info.s_mb_maxs[]" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: avoid ext4_error + when mounting a fs with a single bg" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Fix the delalloc + writepages to allocate blocks at the right offset." + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: tone down + ext4_da_writepages warnings" + * Revert "SAUCE: (revert before 2.6.28.y update) ext4: Add support for + non-native signed/unsigned htree hash algorithms" + * Enabled X86_ACPI_CPUFREQ=y + + [ Upstream Kernel Changes ] + + * ath9k: quiet harmless ForceXPAon messages + - LP: #321474 + * [WATCHDOG] iTCO_wdt: fix SMI_EN regression 2 + - LP: #314050 + * pid: implement ns_of_pid + * mqueue: fix si_pid value in mqueue do_notify() + * powerpc/vsx: Fix VSX alignment handler for regs 32-63 + * sata_nv: give up hardreset on nf2 + * Fix Intel IOMMU write-buffer flushing + * SCSI: libiscsi: fix iscsi pool leak + * x86/cpa: make sure cpa is safe to call in lazy mmu mode + * sched: SCHED_OTHER vs SCHED_IDLE isolation + * x86, vm86: fix preemption bug + * Add support for VT6415 PCIE PATA IDE Host Controller + * ext2/xip: refuse to change xip flag during remount with busy inodes + * 3c505: do not set pcb->data.raw beyond its size + * Bluetooth: Fix TX error path in btsdio driver + * ext4: Add support for non-native signed/unsigned htree hash algorithms + * ext4: tone down ext4_da_writepages warnings + * ext4: Fix the delalloc writepages to allocate blocks at the right + offset. + * ext4: avoid ext4_error when mounting a fs with a single bg + * ext4: Widen type of ext4_sb_info.s_mb_maxs[] + * jbd2: Add barrier not supported test to journal_wait_on_commit_record + * ext4: Don't overwrite allocation_context ac_status + * ext4: Add blocks added during resize to bitmap + * ext4: Use EXT4_GROUP_INFO_NEED_INIT_BIT during resize + * ext4: cleanup mballoc header files + * ext4: don't use blocks freed but not yet committed in buddy cache init + * ext4: Fix race between read_block_bitmap() and mark_diskspace_used() + * ext4: Fix the race between read_inode_bitmap() and ext4_new_inode() + * ext4: Use new buffer_head flag to check uninit group bitmaps + initialization + * ext4: mark the blocks/inode bitmap beyond end of group as used + * ext4: Don't allow new groups to be added during block allocation + * ext4: Init the complete page while building buddy cache + * ext4: Fix s_dirty_blocks_counter if block allocation failed with + nodelalloc + * ext4: Add sanity checks for the superblock before mounting the + filesystem + * ext4: only use i_size_high for regular files + * ext4: Add sanity check to make_indexed_dir + * ext4: Initialize the new group descriptor when resizing the filesystem + * Fix longstanding "error: storage size of '__mod_dmi_device_table' isn't + known" + * Linux 2.6.28.7 + + -- Tim Gardner Thu, 19 Feb 2009 06:45:55 -0700 + +linux (2.6.28-8.24) jaunty; urgency=low + + [ Scott James Remnant ] + + * Change CPU_FREQ_DEFAULT_GOV_ONDEMAND to y + * SAUCE: Link acpi-cpufreq.o first + + [ Tim Gardner ] + + * Build in CPU Frequency scaling drivers + + -- Tim Gardner Wed, 18 Feb 2009 06:12:24 -0700 + +linux (2.6.28-8.23) jaunty; urgency=low + + [ Andy Whitcroft ] + + * include the kernel configuration in the sub-flavour images + - LP: #328859 + + [ Tim Gardner ] + + * Revert "SAUCE: (drop after 2.6.28) [eCryptfs] Regression in unencrypted + filename symlinks" in favor of upstream commit. + * Fix compile issues with qc-usb + * SAUCE: (remove after 2.6.28) V4L/DVB (10216): saa7127: fix broken + S-Video with saa7129 + - LP: #329267 + + [ Upstream Kernel Changes ] + + * Subject:SAUCE: LP#193970 iwlagn: fix hw-rfkill while the interface is + down + - LP: #193970 + * x86, vmi: put a missing paravirt_release_pmd in pgd_dtor + * nbd: fix I/O hang on disconnected nbds + * mac80211: restrict to AP in outgoing interface heuristic + * w1: w1 temp calculation overflow fix + * zd1211rw: adding 0ace:0xa211 as a ZD1211 device + * zd1211rw: treat MAXIM_NEW_RF(0x08) as UW2453_RF(0x09) for TP-Link + WN322/422G + * parport: parport_serial, don't bind netmos ibm 0299 + * syscall define: fix uml compile bug + * kernel-doc: fix syscall wrapper processing + * Fix page writeback thinko, causing Berkeley DB slowdown + * write-back: fix nr_to_write counter + * writeback: fix break condition + * mm: rearrange exit_mmap() to unlock before arch_exit_mmap + * powerpc/fsl-booke: Fix mapping functions to use phys_addr_t + * lockd: fix regression in lockd's handling of blocked locks + * sctp: Fix crc32c calculations on big-endian arhes. + * sctp: Correctly start rtx timer on new packet transmissions. + * sctp: Properly timestamp outgoing data chunks for rtx purposes + * net: Fix frag_list handling in skb_seq_read + * net: Fix OOPS in skb_seq_read(). + * drivers/net/skfp: if !capable(CAP_NET_ADMIN): inverted logic + * ipv4: fix infinite retry loop in IP-Config + * net: Fix userland breakage wrt. linux/if_tunnel.h + * net: packet socket packet_lookup_frame fix + * packet: Avoid lock_sock in mmap handler + * sungem: Soft lockup in sungem on Netra AC200 when switching interface + up + * udp: Fix UDP short packet false positive + * udp: increments sk_drops in __udp_queue_rcv_skb() + * ipv6: Disallow rediculious flowlabel option sizes. + * ipv6: Copy cork options in ip6_append_data + * net: 4 bytes kernel memory disclosure in SO_BSDCOMPAT gsopt try #2 + * sky2: fix hard hang with netconsoling and iface going up + * tun: Add some missing TUN compat ioctl translations. + * tun: Fix unicast filter overflow + * virtio_net: Fix MAX_PACKET_LEN to support 802.1Q VLANs + * tcp: splice as many packets as possible at once + * tcp: Fix length tcp_splice_data_recv passes to skb_splice_bits. + * sparc: Enable syscall wrappers for 64-bit (CVE-2009-0029) + * sparc64: Annotate sparc64 specific syscalls with SYSCALL_DEFINEx() + * ALSA: hda - Add missing terminator in slave dig-out array + * ALSA: mtpav - Fix initial value for input hwport + * HID: adjust report descriptor fixup for MS 1028 receiver + * ide/libata: fix ata_id_is_cfa() (take 4) + * libata: fix EH device failure handling + * netfilter: fix tuple inversion for Node information request + * netfilter: xt_sctp: sctp chunk mapping doesn't work + * x86: microcode_amd: fix wrong handling of equivalent CPU id + * ide-cd: fix DMA for non bio-backed requests + * net: Fix data corruption when splicing from sockets. + * Linux 2.6.28.6 + * eCryptfs: Regression in unencrypted filename symlinks + + -- Tim Gardner Mon, 16 Feb 2009 06:43:51 -0700 + +linux (2.6.28-8.22) jaunty; urgency=low + + [ Amit Kucheria ] + + * Remove perm-blacklist + + [ Andy Whitcroft ] + + * SAUCE: psmouse/synaptics: ensure we reset the device on resume + - LP: #317270 + + [ Tim Gardner ] + + * Add lpia to getabi script + * SAUCE: tracer for sreadahead + + -- Amit Kucheria Fri, 13 Feb 2009 15:23:21 +0200 + +linux (2.6.28-8.21) jaunty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: switch the Asus Pundit P1-AH2 to old acpi sleep ordering + - LP: #327267 + + [ Tim Gardner ] + + * Added LPIA arch support + * Added libdrm-dev as a 'Replaces' to linux-libc-dev + * SAUCE: LPIA support for 9202 HDA Sigmatel codec + * SAUCE: Add an X86_LPIA Kconfig option + * SAUCE: UHCI USB quirk for resume + * SAUCE: LPIA Reboot fix for Intel Crownbeach development boards + * SAUCE: LPIA Logical reset of USB port on resume + * Set CONFIG_WIRELESS_OLD_REGULATORY=n, added wireless-crda + as an install dependency. + + [ Upstream Kernel Changes ] + + * Revert "Revert "x86, early_ioremap: fix fencepost error"" + - LP: #312554 + * drm/i915: capture last_vblank count at IRQ uninstall time too + - LP: #320813 + * drm/i915: add get_vblank_counter function for GM45 + - LP: #320813 + * Staging: comedi: fix Kbuild + * Staging: meilhaus: fix Kbuild + * Staging: android: binder: fix arm build errors + * Staging: android: timed_gpio: Fix build to build on kernels after + 2.6.25. + * Staging: android: fix build error on 64bit boxes + * Staging: android: Add lowmemorykiller documentation. + * Staging: android: task_get_unused_fd_flags: fix the wrong usage of + tsk->signal + * staging: agnx: drivers/staging/agnx/agnx.h needs + * Staging: usbip: usbip_start_threads(): handle kernel_thread failure + * Staging: poch: fix verification of memory area + * Documentation: move DMA-mapping.txt to Doc/PCI/ + * sgi-xp: fix writing past the end of kzalloc()'d space + * do_wp_page: fix regression with execute in place + * wait: prevent exclusive waiter starvation + * shm: fix shmctl(SHM_INFO) lockup with !CONFIG_SHMEM + * revert "rlimit: permit setting RLIMIT_NOFILE to RLIM_INFINITY" + * prevent kprobes from catching spurious page faults + * sound: usb-audio: handle wMaxPacketSize for FIXED_ENDPOINT devices + * md: Ensure an md array never has too many devices. + * md: Fix a bug in linear.c causing which_dev() to return the wrong + device. + * ACPI: Enable bit 11 in _PDC to advertise hw coord + * ACPI: dock: Don't eval _STA on every show_docked sysfs read + * ieee1394: ohci1394: increase AT req. retries, fix ack_busy_X from + Panasonic camcorders and others + * firewire: ohci: increase AT req. retries, fix ack_busy_X from Panasonic + camcorders and others + * firewire: sbp2: fix DMA mapping leak on the failure path + * firewire: sbp2: add workarounds for 2nd and 3rd generation iPods + * ieee1394: sbp2: add workarounds for 2nd and 3rd generation iPods + * module: remove over-zealous check in __module_get() + * x86: APIC: enable workaround on AMD Fam10h CPUs + * eeepc-laptop: fix oops when changing backlight brightness during + eeepc-laptop init + * eeepc-laptop: Add support for extended hotkeys + * e1000: fix bug with shared interrupt during reset + * e1000: Fix PCI enable to honor the need_ioport flag + * agp/intel: Fix broken ® symbol in device name. + * ALSA: hda - Add quirk for FSC Amilo Xi2550 + * ALSA: hda - Add missing COEF initialization for ALC887 + * ALSA: hda - Add missing initialization for ALC272 + * asus_acpi: Add R1F support + * panasonic-laptop: fix X[ ARRAY_SIZE(X) ] + * ACPI: Skip the first two elements in the _BCL package + * ACPI: proc_dir_entry 'video/VGA' already registered + * ACPI: disable ACPI cleanly when bad RSDP found + * ACPICA: Fix table entry truncation calculation + * PCI: properly clean up ASPM link state on device remove + * PCI: return error on failure to read PCI ROMs + * seq_file: move traverse so it can be used from seq_read + * seq_file: fix big-enough lseek() + read() + * serial: set correct baud_base for Oxford Semiconductor Ltd EXSYS + EX-41092 Dual 16950 Serial adapter + * Add support for '8-port RS-232 MIC-3620 from advantech' + * mm: fix error case in mlock downgrade reversion + * elf core dump: fix get_user use + * ACPI: video: Fix reversed brightness behavior on ThinkPad SL series + * ipw2200: fix scanning while associated + * XFS: set b_error from bio error in xfs_buf_bio_end_io + * Revert USB: option: add Pantech cards + * USB: option: New mobile broadband modems to be supported + * USB: new id for ti_usb_3410_5052 driver + * USB: two more usb ids for ti_usb_3410_5052 + * USB: usb-storage: add Pentax to the bad-vendor list + * sata_via: Add VT8261 support + * nbd: do not allow two clients at the same time + * sctp: Fix another socket race during accept/peeloff + * Linux 2.6.28.5 + + -- Tim Gardner Mon, 09 Feb 2009 16:11:28 -0700 + +linux (2.6.28-7.20) jaunty; urgency=low + + [ Tim Gardner ] + + * SAUCE: Input: atkbd - Samsung NC10 key repeat fix + + [ Upstream Kernel Changes ] + + * Manually revert "mlock: downgrade mmap sem while populating mlocked + regions" + * xen: make sysfs files behave as their names suggest + * sata_mv: fix 8-port timeouts on 508x/6081 chips + * m68knommu: set NO_DMA + * PCI/MSI: bugfix/utilize for msi_capability_init() + * x86: use early clobbers in usercopy*.c + * netfilter: ctnetlink: fix scheduling while atomic + * orinoco: move kmalloc(..., GFP_KERNEL) outside spinlock in + orinoco_ioctl_set_genie + * fbdev/atyfb: Fix DSP config on some PowerMacs & PowerBooks + * kmalloc: return NULL instead of link failure + * sata_nv: rename nv_nf2_hardreset() + * sata_nv: fix MCP5x reset + * sata_nv: ck804 has borked hardreset too + * Fix memory corruption in console selection + * Add enable_ms to jsm driver + * nfsd: only set file_lock.fl_lmops in nfsd4_lockt if a stateowner is + found + * nfsd: Ensure nfsv4 calls the underlying filesystem on LOCKT + * iwlwifi: fix rs_get_rate WARN_ON() + * p54: fix lm87 checksum endianness + * p54: fix p54_read_eeprom to cope with tx_hdr_len + * p54usb: rewriting rx/tx routines to make use of usb_anchor's facilities + * minstrel: fix warning if lowest supported rate index is not 0 + * PCI: irq and pci_ids patch for Intel Tigerpoint DeviceIDs + * cpuidle: Add decaying history logic to menu idle predictor + * ACPI: Avoid array address overflow when _CST MWAIT hint bits are set + * video: always update the brightness when poking "brightness" + * Newly inserted battery might differ from one just removed, so update of + battery info fields is required. + * ACPI: Do not modify SCI_EN directly + * dlm: initialize file_lock struct in GETLK before copying conflicting + lock + * sata_mv: Fix chip type for Hightpoint RocketRaid 1740/1742 + * ACPICA: Allow multiple backslash prefix in namepaths + * Linux 2.6.28.4 + + -- Tim Gardner Sat, 07 Feb 2009 18:53:42 -0700 + +linux (2.6.28-7.19) jaunty; urgency=low + + * Fix missing modules FTBS + + -- Tim Gardner Thu, 05 Feb 2009 15:28:15 -0700 + +linux (2.6.28-7.18) jaunty; urgency=low + + [ Alok Kataria ] + + * SAUCE: (drop after 2.6.29) x86: add a synthetic TSC_RELIABLE feature + bit + - LP: #319945 + * SAUCE: (drop after 2.6.29) x86: add X86_FEATURE_HYPERVISOR feature bit + - LP: #319945 + * SAUCE: (drop after 2.6.29) x86: Hypervisor detection and get tsc_freq + from hypervisor + - LP: #319945 + * SAUCE: (drop after 2.6.29) x86: Add a synthetic TSC_RELIABLE feature + bit. + - LP: #319945 + * SAUCE: (drop after 2.6.29) x86: Skip verification by the watchdog for + TSC clocksource. + - LP: #319945 + * SAUCE: (drop after 2.6.29) x86: VMware: Fix vmware_get_tsc code + - LP: #319945 + * SAUCE: (drop after 2.6.29) x86: vmware: look for DMI string in the + product serial key + - LP: #319945 + + [ Andy Whitcroft ] + + * SAUCE: toshiba_acpi -- pull in current -dev version of driver + - LP: #269831 + * SAUCE: toshiba_acpi -- add acpi hotkey kernel thread + - LP: #269831 + * move toshiba laptops back from tlsup to toshiba_acpi + - LP: #269831 + + [ Aneesh Kumar K.V ] + + * SAUCE: (revert before 2.6.28.y update) ext4: Fix the delalloc + writepages to allocate blocks at the right offset. + * SAUCE: (revert before 2.6.28.y update) ext4: avoid ext4_error when + mounting a fs with a single bg + * SAUCE: (revert before 2.6.28.y update) ext4: Don't overwrite + allocation_context ac_status + * SAUCE: (revert before 2.6.28.y update) ext4: Add blocks added during + resize to bitmap + * SAUCE: (revert before 2.6.28.y update) ext4: Use + EXT4_GROUP_INFO_NEED_INIT_BIT during resize + * SAUCE: (revert before 2.6.28.y update) ext4: cleanup mballoc header + files + * SAUCE: (revert before 2.6.28.y update) ext4: don't use blocks freed but + not yet committed in buddy cache init + * SAUCE: (revert before 2.6.28.y update) ext4: Fix race between + read_block_bitmap() and mark_diskspace_used() + * SAUCE: (revert before 2.6.28.y update) ext4: Fix the race between + read_inode_bitmap() and ext4_new_inode() + * SAUCE: (revert before 2.6.28.y update) ext4: Use new buffer_head flag + to check uninit group bitmaps initialization + * SAUCE: (revert before 2.6.28.y update) ext4: mark the blocks/inode + bitmap beyond end of group as used + * SAUCE: (revert before 2.6.28.y update) ext4: Don't allow new groups to + be added during block allocation + * SAUCE: (revert before 2.6.28.y update) ext4: Init the complete page + while building buddy cache + * SAUCE: (revert before 2.6.28.y update) ext4: Fix s_dirty_blocks_counter + if block allocation failed with nodelalloc + + [ Hannes Eder ] + + * SAUCE: (drop after 2.6.29) x86: vmware - fix sparse warnings + - LP: #319945 + + [ Luke Yelavich ] + + * hid modules have hyphens instead of underscores in their names + + [ Mark Fasheh ] + + * SAUCE: (revert before 2.6.28.y update) jbd2: Add BH_JBDPrivateStart + + [ Theodore Ts'o ] + + * SAUCE: (revert before 2.6.28.y update) ext4: Add support for non-native + signed/unsigned htree hash algorithms + * SAUCE: (revert before 2.6.28.y update) ext4: tone down + ext4_da_writepages warnings + * SAUCE: (revert before 2.6.28.y update) jbd2: Add barrier not supported + test to journal_wait_on_commit_record + * SAUCE: (revert before 2.6.28.y update) ext4: Add sanity checks for the + superblock before mounting the filesystem + * SAUCE: (revert before 2.6.28.y update) ext4: only use i_size_high for + regular files + * SAUCE: (revert before 2.6.28.y update) ext4: Add sanity check to + make_indexed_dir + * SAUCE: (revert before 2.6.28.y update) jbd2: On a __journal_expect() + assertion failure printk "JBD2", not "EXT3-fs" + * SAUCE: (revert before 2.6.28.y update) ext4: Initialize the new group + descriptor when resizing the filesystem + + [ Tyler Hicks ] + + * SAUCE: (drop after 2.6.28) [eCryptfs] Regression in unencrypted + filename symlinks + - LP: #322532 + + [ Upstream Kernel Changes ] + + * Input: atkbd - broaden the Dell DMI signatures + - LP: #261721 + * ti_usb_3410_5052: support alternate firmware + * ath5k: fix mesh point operation + * mac80211: decrement ref count to netdev after launching mesh discovery + * inotify: clean up inotify_read and fix locking problems + * fuse: destroy bdi on umount + * fuse: fix missing fput on error + * fuse: fix NULL deref in fuse_file_alloc() + * x86, mm: fix pte_free() + * klist.c: bit 0 in pointer can't be used as flag + * sysfs: fix problems with binary files + * x86: fix page attribute corruption with cpa() + * USB: fix toggle mismatch in disable_endpoint paths + * sound: virtuoso: enable UART on Xonar HDAV1.3 + * USB: usbmon: Implement compat_ioctl + * USB: fix char-device disconnect handling + * USB: storage: add unusual devs entry + * alpha: nautilus - fix compile failure with gcc-4.3 + * alpha: fix vmalloc breakage + * resources: skip sanity check of busy resources + * rtl8187: Add termination packet to prevent stall + * it821x: Add ultra_mask quirk for Vortex86SX + * libata: pata_via: support VX855, future chips whose IDE controller use + 0x0571 + * serial_8250: support for Sealevel Systems Model 7803 COMM+8 + * drm: stash AGP include under the do-we-have-AGP ifdef + * Fix OOPS in mmap_region() when merging adjacent VM_LOCKED file segments + * bnx2x: Block nvram access when the device is inactive + * ext3: Add sanity check to make_indexed_dir + * rtl8187: Fix error in setting OFDM power settings for RTL8187L + * epoll: drop max_user_instances and rely only on max_user_watches + * gpiolib: fix request related issue + * sgi-xpc: Remove NULL pointer dereference. + * sgi-xpc: ensure flags are updated before bte_copy + * include/linux: Add bsg.h to the Kernel exported headers + * ALSA: hda - Fix PCM reference NID for STAC/IDT analog outputs + * ALSA: hda - add another MacBook Pro 4, 1 subsystem ID + * ALSA: hda - Add quirk for HP DV6700 laptop + * crypto: authenc - Fix zero-length IV crash + * crypto: ccm - Fix handling of null assoc data + * x86, pat: fix reserve_memtype() for legacy 1MB range + * x86, pat: fix PTE corruption issue while mapping RAM using /dev/mem + * PCI hotplug: fix lock imbalance in pciehp + * dmaengine: fix dependency chaining + * NET: net_namespace, fix lock imbalance + * relay: fix lock imbalance in relay_late_setup_files + * Linux 2.6.28.3 + * ALSA: Enable SPDIF output on ALC655 + * ALSA: hda - Add ASUS V1Sn support + * ALSA: hda - support detecting HD Audio devices with PCI class code + * ALSA: hda: alc883 model for ASUS P5Q-EM boards + * ALSA: hda - Add quirk for MSI 7260 mobo + * ALSA: hda - Add quirk for Sony VAIO VGN-SR19XN + * ALSA: oxygen: add Claro halo support + * ALSA: hda - Add a new function to seek for a codec ID + * ALSA: patch_sigmatel: Add missing Gateway entries and autodetection + * ALSA: hda - More fixes on Gateway entries + * ALSA: hda - Add MCP67 HDMI support + * ALSA: hda - fix name for ALC1200 + * LSA: hda - Add HP Acacia detection + * ALSA: hda - Add quirk for HP 2230s + * ALSA: hda - Add quirk for Dell Inspiron Mini9 + * ALSA: hda - add support for Intel DX58SO board + * ALSA: hda - Fix silent headphone output on Panasonic CF-74 + * ALSA: USB quirk for Logitech Quickcam Pro 9000 name + * ALSA: hda - add quirks for some 82801H variants to use ALC883_MITAC + + [ Yasunori Goto ] + + * SAUCE: (revert before 2.6.28.y update) ext4: Widen type of + ext4_sb_info.s_mb_maxs[] + + -- Tim Gardner Mon, 02 Feb 2009 23:07:13 -0700 + +linux (2.6.28-6.17) jaunty; urgency=low + + [ Amit Kucheria ] + + * Updating configs: ARMEL/versatile + + -- Amit Kucheria Fri, 30 Jan 2009 13:36:59 +0200 + +linux (2.6.28-6.16) jaunty; urgency=low + + [ Luke Yelavich ] + + * Add hid quirks to input-modules udeb + + [ Tim Gardner ] + + * Revert "[arm] Fix kexec on ARM by properly calling the relocation + function". This patch was deemed 'bogus' by Russell King on the + ARM mailing list. + + [ Upstream Kernel Changes ] + + * PCI: keep ASPM link state consistent throughout PCIe hierarchy + * security: introduce missing kfree + * rt2x00: add USB ID for the Linksys WUSB200. + * p54usb: Add USB ID for Thomson Speedtouch 121g + * lib/idr.c: use kmem_cache_zalloc() for the idr_layer cache + * sgi-xp: eliminate false detection of no heartbeat + * sched: fix update_min_vruntime + * IA64: Turn on CONFIG_HAVE_UNSTABLE_CLOCK + * sound: virtuoso: do not overwrite EEPROM on Xonar D2/D2X + * ALSA: hda - Add quirk for another HP dv5 + * ALSA: hda - Fix HP dv5 mic input + * ALSA: hda - Don't reset HP pinctl in patch_sigmatel.c + * ALSA: hda - make laptop-eapd model back for AD1986A + * drivers/net/irda/irda-usb.c: fix buffer overflow + * usb-storage: add last-sector hacks + * usb-storage: set CAPACITY_HEURISTICS flag for bad vendors + * pkt_sched: sch_htb: Fix deadlock in hrtimers triggered by HTB + * ipv6: Fix fib6_dump_table walker leak + * sctp: Avoid memory overflow while FWD-TSN chunk is received with bad + stream ID + * pkt_sched: cls_u32: Fix locking in u32_change() + * r6040: fix wrong logic in mdio code + * r6040: save and restore MIER correctly in the interrupt routine + * r6040: bump release number to 0.19 + * tcp: don't mask EOF and socket errors on nonblocking splice receive + * p54usb: fix traffic stalls / packet drop + * netfilter: x_tables: fix match/target revision lookup + * netfilter: ebtables: fix inversion in match code + * netfilter: nf_conntrack: fix ICMP/ICMPv6 timeout sysctls on big-endian + * dell_rbu: use scnprintf() instead of less secure sprintf() + * powerpc: is_hugepage_only_range() must account for both 4kB and 64kB + slices + * hwmon: (abituguru3) Fix CONFIG_DMI=n fallback to probe + * mm: write_cache_pages cyclic fix + * mm: write_cache_pages early loop termination + * mm: write_cache_pages writepage error fix + * mm: write_cache_pages integrity fix + * mm: write_cache_pages cleanups + * mm: write_cache_pages optimise page cleaning + * mm: write_cache_pages terminate quickly + * mm: write_cache_pages more terminate quickly + * mm: do_sync_mapping_range integrity fix + * mm: direct IO starvation improvement + * fs: remove WB_SYNC_HOLD + * fs: sync_sb_inodes fix + * fs: sys_sync fix + * Linux 2.6.28.2 + + -- Tim Gardner Sun, 25 Jan 2009 13:36:16 -0700 + +linux (2.6.28-5.15) jaunty; urgency=low + + [ Tim Gardner ] + + * Revert "Enabled CONFIG_PID_NS=y for i386/amd64" + Somehow this commit also reverted the 7 prior commits (which is bad). + * Enabled CONFIG_PID_NS=y for i386/amd64 (version 2) + + -- Tim Gardner Thu, 22 Jan 2009 13:48:34 -0700 + +linux (2.6.28-5.14) jaunty; urgency=low + + [ Ben Collins ] + + * lirc_gpio: Forward ported to current kernel (jaunty) + * configs: Enable LIRC_GPIO on 64-bit/32-bit x86 + - LP: #298791 + + [ Jeff Layton ] + + * SAUCE: cifs: make sure we allocate enough storage for socket address + - LP: #318565 + + [ Tim Gardner ] + + * check-abi: Return success when ABI skip is requested and no ABI files exist. + This ought to fix the armel FTBS. + + -- Tim Gardner Thu, 22 Jan 2009 06:42:49 -0700 + +linux (2.6.28-5.13) jaunty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: don't use buggy _BCL/_BCM/_BQC for backlight control" + + [ Tim Gardner ] + + * Fix udeb generation breakage caused by the previous armel versatile + flavour config update. + + -- Tim Gardner Wed, 21 Jan 2009 12:38:35 -0700 + +linux (2.6.28-5.12) jaunty; urgency=low + + [ Ante ] + + * Update drbd to 8.3.0 + + [ Dave Airlie ] + + * i915/drm: provide compat defines for userspace for certain struct + + [ Eric Anholt ] + + * drm/i915: Don't double-unpin buffers if we take a signal in + * drm/i915: Don't complain when interrupted while pinning in execbuffers. + * drm/i915: Don't allow objects to get bound while VT switched. + + [ Jani Monoses ] + + * Fix webcam having USB ID 0ac8:303b + - LP: #292086 + + [ Jesse Barnes ] + + * drm/i915: set vblank enabled flag correctly across IRQ + * drm/i915: don't enable vblanks on disabled pipes + + [ Michael Casadevall ] + + * [arm] Fix kexec on ARM by properly calling the relocation function + + [ Tim Gardner ] + + * Enabled CONFIG_PID_NS=y for i386/amd64 + * SAUCE: Increase ATA_TMOUT_PMP_SRST_WAIT to 5 seconds. + - LP: #318978 + * Update armel versatile config + - LP: #314789 + * Enabled CONFIG_RT2860=m for i386/amd64 + * Enabled CONFIG_RT2870=m for i386/amd64 + + [ Upstream Kernel Changes ] + + * Input: atkbd - add keyboard quirk for HP Pavilion ZV6100 laptop + - LP: #291878 + * ALSA: hda - Add quirk for another HP dv7 + * ALSA: hda - Add quirk for HP6730B laptop + * ALSA: caiaq - Fix Oops with MIDI + * ALSA: hda - Fix typos for AD1882 codecs + * x86: fix intel x86_64 llc_shared_map/cpu_llc_id anomolies + * x86: default to SWIOTLB=y on x86_64 + * CIFS: make sure that DFS pathnames are properly formed + * ring-buffer: prevent false positive warning + * ring-buffer: fix dangling commit race + * iwlwifi: use GFP_KERNEL to allocate Rx SKB memory + * tx493[89]ide: Fix length for __ide_flush_dcache_range + * tx4939ide: Do not use zero count PRD entry + * SCSI: eata: fix the data buffer accessors conversion regression + * USB: emi26: fix oops on load + * x86, UV: remove erroneous BAU initialization + * x86: fix incorrect __read_mostly on _boot_cpu_pda + * vmalloc.c: fix flushing in vmap_page_range() + * fs: symlink write_begin allocation context fix + * cgroups: fix a race between cgroup_clone and umount + * dm raid1: fix error count + * dm log: fix dm_io_client leak on error paths + * minix: fix add link's wrong position calculation + * md: fix bitmap-on-external-file bug. + * sched_clock: prevent scd->clock from moving backwards, take #2 + * devices cgroup: allow mkfifo + * SCSI: aha152x_cs: Fix regression that keeps driver from using shared + interrupts + * ioat: fix self test for multi-channel case + * USB: isp1760: use a specific PLX bridge instead of any bdridge + * USB: isp1760: Fix probe in PCI glue code + * USB: unusual_devs.h additions for Pentax K10D + * inotify: fix type errors in interfaces + * Move compat system call declarations to compat header file + * Convert all system calls to return a long + * Rename old_readdir to sys_old_readdir + * Remove __attribute__((weak)) from sys_pipe/sys_pipe2 + * Make sys_pselect7 static + * Make sys_syslog a conditional system call + * System call wrapper infrastructure + * powerpc: Enable syscall wrappers for 64-bit + * s390: enable system call wrappers + * System call wrapper special cases + * System call wrappers part 01 + * System call wrappers part 02 + * System call wrappers part 03 + * System call wrappers part 04 + * System call wrappers part 05 + * System call wrappers part 06 + * System call wrappers part 07 + * System call wrappers part 08 + * System call wrappers part 09 + * System call wrappers part 10 + * System call wrappers part 11 + * System call wrappers part 12 + * System call wrappers part 13 + * System call wrappers part 14 + * System call wrappers part 15 + * System call wrappers part 16 + * System call wrappers part 17 + * System call wrappers part 18 + * System call wrappers part 19 + * System call wrappers part 20 + * System call wrappers part 21 + * System call wrappers part 22 + * System call wrappers part 23 + * System call wrappers part 24 + * System call wrappers part 25 + * System call wrappers part 26 + * System call wrappers part 27 + * System call wrappers part 28 + * System call wrappers part 29 + * System call wrappers part 30 + * System call wrappers part 31 + * System call wrappers part 32 + * System call wrappers part 33 + * s390 specific system call wrappers + * x86: fix RIP printout in early_idt_handler + * Fix timeouts in sys_pselect7 + * USB: another unusual_devs entry for another bad Argosy storage device + * USB: storage: extend unusual range for 067b:3507 + * USB: storage: recognizing and enabling Nokia 5200 cell phoes + * HID: fix error condition propagation in hid-sony driver + * fix switch_names() breakage in short-to-short case + * nfs: remove redundant tests on reading new pages + * eCryptfs: check readlink result was not an error before using it + * mvsas: increase port type detection delay to suit Seagate's 10k6 drive ST3450856SS 0003 + * x86: avoid theoretical vmalloc fault loop + * ath9k: enable RXing of beacons on STA/IBSS + * mm lockless pagecache barrier fix + * powerpc: Disable Collaborative Memory Manager for kdump + * ibmvfc: Delay NPIV login retry and add retries + * ibmvfc: Improve async event handling + * getrusage: RUSAGE_THREAD should return ru_utime and ru_stime + * ath5k: ignore the return value of ath5k_hw_noise_floor_calibration + * mm: fix assertion + * XFS: truncate readdir offsets to signed 32 bit values + * Linux 2.6.28.1 + * eCryptfs: Filename Encryption: Tag 70 packets + * eCryptfs: Filename Encryption: Header updates + * eCryptfs: Filename Encryption: Encoding and encryption functions + * eCryptfs: Filename Encryption: filldir, lookup, and readlink + * eCryptfs: Filename Encryption: mount option + * eCryptfs: Replace %Z with %z + * eCryptfs: Fix data types (int/size_t) + * eCryptfs: kerneldoc for ecryptfs_parse_tag_70_packet() + * eCryptfs: Clean up ecryptfs_decode_from_filename() + * fs/ecryptfs/inode.c: cleanup kerneldoc + * staging-p80211: Kill directly reference of netdev->priv + * staging-slicoss: Kill directly reference of netdev->priv + * staging-winbond: Kill directly reference of netdev->priv + * Staging: go7007: fixes due to video_usercopy api change + * Staging: go7007: fixes due v4l2_file_operations api change + * staging: correct dubious use of !x & y + * Staging: w35und: make wb35_probe() and wb35_disconnect() funtions static + * Staging: w35und: remove unused wb35_open() and wb35_close() functions + * Staging: w35und: use msleep() and udelay() + * Staging: w35und: remove the no-op pa_stall_execution macro + * Staging: w35und: purb typedef removal + * Staging: w35und: reg queue struct typedef removal + * Staging: w35und: wb35reg struct typedef removal + * Staging: w35und: padapter struct typedef removal + * Staging: w35und: merge wblinux struct to adapter + * Staging: w35und: wb35_probe() cleanup + * Staging: w35und: remove usb_submit_urb wrapper function + * Staging: w35und: remove usb_alloc_urb wrapper function + * w35und: remove dead code from wbusb_f.h + * Staging: w35und: remove true/false boolean macros + * Staging: w35und: OS_MEMORY_ALLOC wrapper removal + * Staging: w35und: usb_put_dev() is missing from wb35_disconnect() + * Staging: w35und: remove macro magic from MLME_GetNextPacket() + * Staging: w35und: plug memory leak in wbsoft_tx() + * Staging: w35und: move supported band initialization out of wb35_probe() + * Staging: w35und: remove timer wrappers + * Staging: w35und: remove atomic op wrappers + * Staging: w35und: remove memcpy/memcmp wrappers + * Staging: w35und: remove abs() and BIT() macros + * Staging: w35und: remove unused macros from common.h + * Staging: w35und: remove unused link status code + * Staging: w35und: #include cleanup + * Staging: w35und: remove some dead code + * Staging: w35und: move source files to one directory + * Staging: w35und: move struct wbsoft_priv to core.h and use it + * Staging: w35und: remove ->adapter from struct _HW_DATA_T + * Staging: w35und: clean up adapter.h a bit + * Staging: w35und: merge struct wb35_adapter to struct wbsoft_priv + * Staging: w35und: remove global struct ieee80211_hw + * Staging: w35und: inline DRIVER_AUTHOR and DRIVER_DESC macros + * Staging: w35und: clean up wblinux.c a bit + * Staging: w35und: remove unused ->ShutDowned member from struct + LOCAL_PARA + * Staging: w35und: move global wbsoft_enabled to struct wbsoft_priv + * Staging: w35und: move packet_came() to wb35rx.c + * Staging: w35und: remove ->skb_array from struct wbsoft_priv + * Staging: w35und: remove ->shutdown from struct wbsoft_priv + * Staging: w35und: make functions local to mds.c static + * Staging: w35und: make functions local to mlmetxrx.c static + * Staging: w35und: remove dead code from mto.c + * Staging: w35und: make functions local to wb35rx.c static + * Staging: w35und: make functions local to wb35tx.c static + * Staging: w35und: remove dead code from wbhal.c + * Staging: w35und: remove rxisr.c as dead code + * Staging: w35und: fix Kconfig + * Staging: w35und: fix config build warnings + * Staging: wlan-ng: Remove PCI/PLX/PCMCIA files. + * Staging: wlan-ng: Update Help text to mention prism3 devices. + * Staging: wlan-ng: Delete PCI/PLX/PCMCIA-specific code. + * Staging: wlan-ng: Make wlan-ng use WEXT mode by default. + * Staging: wlan-ng: Eliminate more <2.6 kernel support. + * Staging: wlan-ng: Eliminate all backwards-compatibility for <2.6.13 kernels. + * Staging: wlan-ng: Eliminate a boatload of tertiaryAP-only code. + * Staging: wlan-ng: Remove AP-only code from MLME functions. + * Staging: wlan-ng: Get rid of the MTU tests in the rx conversion path. + * Staging: wlan-ng: Eliminate one more rx mtu test. + * Staging: wlan-ng: Eliminate local 'version.h' + * Staging: wlan-ng: Eliminate usage of procfs. + * Staging: wlan-ng: Use standard kernel integer (u32/s32/etc) types. + * Staging: wlan-ng: Eliminate all backwards-compatible kernel code. + * Staging: wlan-ng: Wireless Extension support is mandatory. + * Staging: wlan-ng: use WIRELESS_EXT, not CONFIG_WIRELESS_EXT + * Staging: wlan-ng: Delete a large pile of now-unused code. + * Staging: wlan-ng: Delete a pile of unused mibs. And fix WEXT SET_TXPOWER. + * Staging: wlan-ng: Consolidate wlan-ng into a single module. + * Staging: wlan-ng: Purge all MIBs not used internally. + * Staging: wlan-ng: p80211netdev.c fix netdev alloc to prevent oops on device start + * Staging: wlan-ng: prism2_usb.c always enable the card in probe_usb + * Staging: wlan-ng: hfa384x_usb.c use newest version of 384x_drvr_start + * Staging: wlan-ng: p80211wext.c add latest changes & remove extra nulls from wext_handlers + * Staging: wlan-ng: p80211wext don't set default key id twice + * Staging: wlan-ng: hfa384x_usbin_callback: check for hardware removed + * Staging: wlan-ng: p80211conv.c copy code from wlan-ng-devel branch to not drop packets + * Staging: wlan-ng: remove unused #include + * Staging: wlan-ng: p80211wext.c: use ARRAY_SIZE + * Staging: wlan-ng: fix compiler warnings + * Staging: wlan-ng: skb_p80211_to_ether() - payload_length is unsigned, check before subtraction + * Staging: at76_usb: update drivers/staging/at76_usb w/ mac80211 port + * Staging: at76_usb: fix build breakage + * Staging: at76_usb: remove compiler warnings + * Staging: at76_usb: fix up all remaining checkpatch.pl warnings + * Staging: at76_usb: cleanup dma on stack issues + * Staging: poch: Block size bug fix + * Staging: poch: Update TODO list + * Staging: poch: Correct pages from bytes. + * Staging: poch: minor fixes + * Staging: poch: Fix build warnings + * Staging: poch: Rx control register init + * Staging: poch: Fix user space protocol syncing + * Staging: poch: Fine grained locking + * Staging: sxg: remove typedefs + * Staging: sxg: break the build in a cleaner way when !x86 + * Staging: sxg: update README + * staging: struct device - replace bus_id with dev_name(), dev_set_name() + * Staging: echo: remove typedefs + * Staging: echo: Lindent drivers/staging/echo + * Staging: go7007: saa7134 updates + * Staging: go7007: add sensoray 2250/2251 support + * Staging: go7007: Convert driver to use video_ioctl2 + * Staging: go7007: annotate code pointers + * Staging: go7007: fix minor build warnings + * Staging: go7007: small cleanup + * Staging: go7007: add some more v4l2 ioctls + * Staging: et131x: Cleanup et131x_debug.h defines + * Staging: et131x: fix build failure + * Staging: et131x: remove unused variable in et1310_tx.c + * Staging: usbip: cleanup kerneldoc + * Staging: slicoss: use kzalloc + * Staging: slicoss: use correct type for memory allcations + * Staging: slicoss: use request_firmware + * Staging: add agnx wireless driver + * Staging: agnx: fix build errors due to ssid removal + * Staging: agnx: fix build errors due to rate control API changes + * Staging: agnx: fix build warnings + * Staging: add otus Atheros wireless network driver + * Staging: otus: fix netdev->priv usage + * Staging: otus: fix name clash + * Staging: otus: fix urb callback function type + * Staging: otus: remove dependence on kernel version + * Staging: add rt2860 wireless driver + * Staging: rt2860: disable root hack for reading files + * Staging: rt2860: fix up netdev->priv usage + * Staging: rt2860: use standard bit-reverse function + * Staging: rt2860: Fix minor compiler warnings + * Staging: rt2860: enable WPA_SUPPLICANT support + * Staging: Add ServerEngines benet 10Gb ethernet driver + * Staging: benet: fix netif api breakage + * Staging: benet: fix up netdev->priv change + * Staging: benet: build is broken unless CONFIG_NETPOLL is enabled + * Staging: benet: patch to remove subdirectories + * Staging: benet: fix build errors when CONFIG_NETPOLL is off + * Staging: benet: fix build error. + * Staging: benet: patch to use offsetof() instead of AMAP_BYTE_OFFSET() + * Staging: benet: fix problems reported by checkpatch + * Staging: benet: cleanup a check while posting rx buffers + * Staging: add comedi core + * Staging: comedi: fix up a lot of checkpatch.pl warnings + * Staging: comedi: fix checkpatch.pl errors in comedi_fops.c + * Staging: comedi: fix build error in comedilib.h + * Staging: comedi: add kcomedilib to the tree + * Staging: comedi: set up infrastructure for individual drivers + * Staging: comedi: add local copy of interrupt.h + * Staging: comedi: add pci and usb wrapper header files + * Staging: comedi: comedi driver common function module + * Staging: comedi: add mite comedi pci driver + * Staging: comedi: add usb usbdux driver + * Staging: comedi: add usb usbduxfast driver + * Staging: comedi: add usb dt9812 driver + * Staging: comedi: add comedi_bond driver + * Staging: comedi: add comedi_test driver + * Staging: comedi: add comedi_parport driver + * Staging: comedi: dt9812: fix up a lot of coding style issues + * Staging: comedi: dt9812: remove dt9812.h + * Staging: comedi: dt9812: remove typedefs + * Staging: comedi: dt9812: fix sparse warnings + * Staging: comedi: usbdux: remove kernel version checks + * Staging: comedi: usbdux: code style cleanups + * Staging: comedi: usbdux: remove // comments + * Staging: comedi: usbdux: fix up printk calls + * Staging: comedi: usbdux: remove checkpatch.pl warnings + * Staging: comedi: usbdux: remove typedef + * Staging: comedi: usbdux: remove comedi usb wrappers + * Staging: comedi: usbduxfast: remove comedi usb wrappers + * Staging: comedi: dt9812: remove #ifdef that is not needed + * Staging: comedi: remove usb wrappers + * Staging: comedi: remove PCI wrappers + * Staging: comedi: add icp_multi driver + * Staging: comedi: add me4000 driver + * Staging: comedi: fix checkpatch.pl issues in comedi_bond.c + * Staging: comedi: fix checkpatch.pl issues in comedi_fc.c + * Staging: comedi: remove typedefs from comedi_bond.c + * Staging: comedi: fix sparse issues in comedi_bond.c + * Staging: comedi: fix checkpatch.pl issues in comedi_test.c + * Staging: comedi: fix sparse issues in comedi_test.c + * Staging: comedi: remove typedefs from comedi_test.c + * Staging: comedi: fix comedi_parport.c checkpatch.pl issues. + * Staging: comedi: fix comedi_fc.h checkpatch.pl issues. + * Staging: comedi: fix comedi_pci.h checkpatch.pl issues. + * Staging: comedi: comedi_pci.h: remove unneeded wrapper + * Staging: comedi: comedi_pci.h: remove comedi_pci_enable_no_regions + * Staging: comedi: comedi_pci.h: remove comedi_pci_disable_no_regions + * Staging: comedi: add s626 driver + * Staging: comedi: add rtd520 driver + * Staging: comedi: add me_daq driver + * Staging: comedi: me_daq: fix checkpatch.pl issues + * Staging: comedi: me_daq: remove typedefs + * Staging: comedi: me_daq: fix sparse issues + * Staging: comedi: fix checkpatch.pl warning in interrupt.h + * Staging: comedi: fix build if CONFIG_PROC_FS is not set + * Staging: add asus_oled driver + * Staging: asus_oled: fix build dependancy + * Staging: Add the Meilhaus ME-IDS driver package + * Staging: meilhaus: fix __symbol_get problems + * Staging: add lcd-panel driver + * Staging: panel: major checkpatch cleanup + * Staging: panel: remove ifdefs and code for pre-2.6 kernels + * Staging: panel: remove support for smartcards + * Staging: add Driver for Altera PCI Express Chaining DMA reference design + * Staging: add rtl8187se driver + * Staging: rtl8187se: remove unneeded files + * Staging: rtl8187se: make the built module be the proper name + * Staging: rtl8187se: remove duplicate pci ids + * Staging: me4000: switch to list_for_each*() + * Staging: usbip: switch to list_for_each_entry() + * Staging: add princeton instruments usb camera driver + * Staging: add mimio xi driver + * Staging: add rt2870 wireless driver + * Staging: rt2870: disable root hack for reading files + * Staging: rt2870: fix up netdev->priv usage + * Staging: add frontier tranzport and alphatrack drivers + * Staging: frontier: remove unused alphatrack_sysfs.c file + * Staging: frontier: fix compiler warnings + * Staging: add epl stack + * Staging: epl: run Lindent on all kernel/*.h files + * Staging: epl: run Lindent on all user/*.h files + * Staging: epl: run Lindent on *.h files + * Staging: epl: run Lindent on *.c files + * Staging: epl: hr timers all run in hard irq context now + * Staging: epl: fix netdev->priv b0rkage + * Staging: add android framework + * Staging: android: add binder driver + * Staging: android: binder: Fix gcc warnings about improper format specifiers for size_t in printk + * staging: android: binder: Fix use of euid + * Staging: android: add logging driver + * Staging: android: add ram_console driver + * Staging: android: add timed_gpio driver + * Staging: android: timed_gpio: Rename android_timed_gpio to timed_gpio + * Staging: android: remove dummy android.c driver + * Staging: android: add lowmemorykiller driver + * Staging: android: binder: fix build errors + * staging: __FUNCTION__ is gcc-specific, use __func__ + * V4L/DVB (10176a): Switch remaining clear_user_page users over to + clear_user_highpage + + [ Zhenyu Wang ] + + * agp/intel: add support for G41 chipset + + -- Tim Gardner Sun, 18 Jan 2009 20:22:54 -0700 + +linux (2.6.28-4.11) jaunty; urgency=low + + [ Mario Limonciello ] + + * SAUCE: Enable HDMI audio codec on Studio XPS 1340 + - LP: #309508 + + [ Tim Gardner ] + + * Fix armel d-i FTBSs + + [ Upstream Kernel Changes ] + + * USB: re-enable interface after driver unbinds + + -- Tim Gardner Tue, 13 Jan 2009 16:33:08 -0700 + +linux (2.6.28-4.10) jaunty; urgency=low + + [ Andy Whitcroft ] + + * update kernel bootloader recommends: to prefer grub + - LP: #314004 + * SAUCE: don't use buggy _BCL/_BCM/_BQC for backlight control + - LP: #311716 + * SAUCE: test-suspend -- add the suspend test scripts + - LP: #316419 + + [ Colin Watson ] + + * Enable udebs for armel + + [ Tim Gardner ] + + * SAUCE: Dell laptop digital mic does not work, PCI 1028:0271 + - LP: #309508 + * Enable CIFS_XATTR=y and CONFIG_CIFS_POSIX=y + - LP: #220658 + + -- Tim Gardner Thu, 08 Jan 2009 10:38:22 -0700 + +linux (2.6.28-4.9) jaunty; urgency=low + + [ Tim Gardner ] + + * Restore DM_CRYPT, AES, ECB, and CBC as modules. This fixes + some installer issues with encrypted /home and Private directories. + * Take one more stab at building armel without module or ABI errors. + + -- Tim Gardner Tue, 06 Jan 2009 08:38:23 -0700 + +linux (2.6.28-4.8) jaunty; urgency=low + + * Fix i386/amd64 FTBS by ignoring all module and ABI changes, + not something you would normally do, but I'm sure the ABI + has not changed. This will probably also allow the ARM builds to complete. + + -- Tim Gardner Mon, 05 Jan 2009 14:42:58 -0700 + +linux (2.6.28-4.7) jaunty; urgency=low + + [ Tim Gardner ] + + * Enable CONFIG_ATH5K=m for i386/amd64 + - LP: #306719 + * Build all i386/amd64 AGP/DRM components as modules. + - LP: #312721 + * git commands are now installed outside the default $PATH + Use 'git CMD' instead of 'git-CMD'. + * Build in most PATA/SATA drivers. This should allow most i386/amd64 systems to boot + without an initramfs, though some support work is still required in initramfs-tools + and grub. + - LP: #311730 + + -- Tim Gardner Fri, 02 Jan 2009 07:33:09 -0700 + +linux (2.6.28-4.6) jaunty; urgency=low + + [ Tim Gardner ] + + * Enable CONFIG_X86_E_POWERSAVER=m for i386 generic + - LP: #237405 + * Build i386 AGP drivers as modules + - LP: #312721 + * Build i386 DRM as a module + - LP: #312721 + + [ Upstream Kernel Changes ] + + * drm/i915: Add missing userland definitions for gem init/execbuffer. + - LP: #308387 + + -- Tim Gardner Mon, 29 Dec 2008 09:16:47 -0700 + +linux (2.6.28-4.5) jaunty; urgency=low + + [ Andy Whitcroft ] + + * clean up module dependancy information on package removal/purge + - LP: #300773 + + [ Tim Gardner ] + + * Update iscsitarget to 0.4.17 + * Build in ext{234} + * Build in Crypto modules AES, CBC, ECB + * Build in ACPI AC,BATTERY,BUTTON,FAN,PCI_SLOT,PROCESSOR,SBS,THERMAL,WMI + * Build in AGP intel,via,sis,ali,amd,amd64,efficeon,nvidia,sworks + * Build in ata,dev_dm,dev_loop,dev_md,dev_sd,dev_sr + * Build in BT l2cap,rfcomm,sco + * Reduce CONFIG_LEGACY_PTY_COUNT to 0 + * Build in CDROM_PKTCDVD and CHR_DEV_SG + * Build in CPU_FREQ + GOV_CONSERVATIVE,GOV_ONDEMAND,GOV_POWERSAVE,GOV_USERSPACE,STAT,TABLE + * Build in DM CRYPT,MIRROR,MULTIPATH,SNAPSHOT + * Build in DRM + * Build in HID + * Build in HOTPLUG PCI,PCIE + * Build in I2C + * Build in IEEE1394 OHCI1394 + * Build in INPUT EVDEV + * Build in IPV6 + * Build in MMC + * Build in PACKET + * Enable both IEEE1394 (Firewire) stacks as modules + - LP: #276463 + * Disable SUNRPC_REGISTER_V4 + - LP: #306016 + * Enable dm-raid4-5 + - LP: #309378 + * Build in PPP + * Build in RFKILL + * Build in USB SERIAL + + [ Upstream Kernel Changes ] + + * Rebased to v2.6.28 + + -- Tim Gardner Thu, 18 Dec 2008 21:18:44 -0700 + +linux (2.6.28-3.4) jaunty; urgency=low + + [ Tim Gardner ] + + * Build ecryptfs into the kernel + - LP: #302870 + * Deprecated gnbd + + [ Upstream Kernel Changes ] + + * Rebased to v2.6.28-rc8 + + -- Tim Gardner Wed, 10 Dec 2008 22:45:13 -0700 + +linux (2.6.28-2.3) jaunty; urgency=low + + [ Andy Whitcroft ] + + * update the templates so that we have spaces following the title line + + [ Tim Gardner ] + + * Add upload number to kernel version signature. This has the side effect + of renaming kernel packages back to the original way, e.g., without '-ub' + in the name. + + -- Tim Gardner Thu, 04 Dec 2008 12:18:31 -0700 + +linux (2.6.28-2.2) jaunty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: (no-up) version: Implement version_signature proc file." + * SAUCE: (no-up) version: Implement version_signature proc file. + * SAUCE: serial: RS485 ioctl structure uses __u32 include linux/types.h + - LP: #303711 + + [ Tim Gardner ] + + * UBUNTU: Removed CONFIG_DRM_VIA_CHROME9 since it is upstream. + * UBUNTU: Removed ubuntu/via_chrome9 + + [ Upstream Kernel Changes ] + + * Rebased to v2.6.28-rc7 + + -- Tim Gardner Tue, 02 Dec 2008 07:33:32 -0700 + +linux (2.6.28-1.1) jaunty; urgency=low + + [ Amit Kucheria ] + + * SAUCE: make fc transport removal of target configurable + * SAUCE: pm: Config option to disable handling of console during + suspend/resume + * SAUCE: Adds support for COMPAL JHL90 webcam + * Map armel to arm to all editconfigs to work correctly + * Add armel to getabis for completeness sake + * Add -ub to our versioning to allow kerneloops.org to identify us + + [ Andy Whitcroft ] + + * Fix Vcs-Git path for the kernel repository. + - LP: #296915 + + [ Ben Collins ] + + * SAUCE: Lower warning level of some PCI messages + - LP: #159241 + * SAUCE: input/mouse/alps: Do not call psmouse_reset() for alps + * SAUCE: tulip: Let dmfe handle davicom on non-sparc + * SAUCE: tulip: Define ULI PCI ID's + * SAUCE: (no-up) version: Implement version_signature proc file. + * SAUCE: (no-up) connector.h: Add idx/val for drbd + * SAUCE: (no-up) swap: Add notify_swap_entry_free callback for compcache + * SAUCE: drivers: Remove some duplicate device entries in various modules + * SAUCE: (no-up) [AppArmor] merge with upstream subversion r1291 + * SAUCE: (no-up) Enable ubuntu extra subdirectory + * SAUCE: (no-up) ACPI: initramfs DSDT override support + * ubuntu: Add drbd module + * ubuntu: Add iscsitarget module + * ubuntu: Add BOM for iscsitarget + * ubuntu: Add squashfs driver + * SAUCE: (no-up) Check for squashfs superblock in initramfs mounting. + * ubuntu: Add aufs module + * ubuntu: Added atl2 driver + * ubuntu: Added et131x driver + * ubuntu: Add dm-raid4-5 driver + * ubuntu: Add ndiswrapper driver + * ubuntu: Added ram backed compressed swap module (compcache) + * ubuntu: Add misc drivers from hardy lum + * ubuntu: Add heci driver 3.2.0.24 + * ubuntu: Add ov511 and bt-sco drivers + * ubuntu: Add acx, prism2_usb wireless drivers + * ubuntu: Add at76 driver to build + * ubuntu: Add fsam7400 sw kill switch driver + * ubuntu: Added qc-usb driver + * ubuntu: e1000e: Upgraded module to 0.4.1.7 + * ubuntu: Added rfkill drivers + * ubuntu: VIA - Add VIA DRM Chrome9 3D engine + * ubuntu: unionfs: Added v1.4 module from hardy + * ubuntu: Add LIRC driver + * ubuntu: Add GFS driver + * ubuntu: New tlsup driver for toshiba laptops + * SAUCE: (no-up) Export lookup_has for aufs + * SAUCE: (no-up) Modularize vesafb + * ubuntu: Config files + * Disable some modules that need porting to 2.6.28 + * ubuntu: Fixup headers creation to include arch/*/include + * ubuntu/module-check: Ignore comment lines + + [ Chuck Short ] + + * SAUCE: ata: blacklist FUJITSU MHW2160BH PL + + [ cking ] + + * SAUCE: Enable speedstep for sonoma processors. + + [ Colin Ian King ] + + * ubuntu: Add dm-loop + * SAUCE: cx88: Support Leadtek WinFast DTV2000 H version J. + * SAUCE: fix kernel oops in VirtualBox during paravirt patching + * SAUCE: qc-usb: Enable Logitech QuickCam Messenger + * SAUCE: appleir: Enable driver for new MacBook Pro + + [ Colin Watson ] + + * Enable configfs, fuse, jfs, reiserfs, and xfs for armel + * Extend debian/d-i/ modules handling to make armel easier to support + * Create udebs for armel + + [ Fabio M. Di Nitto ] + + * ubuntu: update GFS Cluster File System + + [ Kees Cook ] + + * SAUCE: AppArmor: update to upstream subversion r1302 + + [ Leann Ogasawara ] + + * Add automatic model setting for Samsung Q45 + * Add Dell Dimension 9200 reboot quirk + + [ Mackenzie Morgan ] + + * SAUCE: Add quirk for ASUS Z37E to make sound audible after resume + + [ Matthew Garrett ] + + * SAUCE: hostap: send events on data interface as well as master + interface + + [ Michael Frey (Senior Manager, MID ] + + * SAUCE: Send HCI_RESET for Broadcomm 2046 + + [ Michael Haas ] + + * add proper aufs source tree from 20080922 + * Fix AUFS compilation in vfsub.c + * Add splice-2.6.23.patch from AUFS to export a symbol needed by AUFS + * Add put_filp.patch from AUFS to export a symbol needed by AUFS + * Add deny_write_access.patch from AUFS - export deny_write_access + * Add sec_perm-2.6.24.patch from AUFS - export security_inode_permission + * make sure TMPFS_MAGIC is defined in AUFS Makefile + * SAUCE: Revert aufs changes from AppArmor merge + + [ Mohamed Abbas ] + + * SAUCE: iwlagn -- fix rfkill when on when driver loaded + + [ Phillip Lougher ] + + * SAUCE: r8169: disable TSO by default for RTL8111/8168B chipsets. + + [ Stefan Bader ] + + * SAUCE: (no-up) Export dm_disk function of device-mapper + * SAUCE: Restore VT fonts on switch + * SAUCE: mmc: Increase power_up deleay to fix TI readers + * gfs1: GFS1 can't create more than 4kb file + * uvcvideo: Commit streaming parameters when enabling the video stream. + + [ Tim Gardner ] + + * SAUCE: Add extra headers to linux-libc-dev + * SAUCE: Catch nonsense keycodes and silently ignore + * SAUCE: Added support for HDAPS on various ThinkPads from Lenovo and IBM + * SAUCE: Guest OS does not recognize a lun with non zero target id on + Vmware ESX Server + * SAUCE: (no-up) Take care of orinoco_cs overlap with hostap_cs + * ubuntu: Add GNBD driver + * SAUCE: e1000e: Map NV RAM dynamically only when needed. + * SAUCE: Correctly blacklist Thinkpad r40e in ACPI + * SAUCE: Update Wacom tablet driver to 1.49 + * SAUCE: Fix Wacom tablet 1.49 porting errors + * SAUCE: Enable an e1000e Intel Corporation 82567 Gigabit controller + * SAUCE: Fix Oops in wlan_setup + * SAUCE: ipw2200: change default policy for auto-associate + * Dell Wireless 365 needs BTUSB_RESET quirk. + * ndiswrapper remote buffer overflows on long ESSIDs (CVE 2008-4395) + * Disabled ubuntu/e1000e config + + [ Upstream Kernel Changes ] + + * Revert "[Bluetooth] Eliminate checks for impossible conditions in IRQ + handler" + * Revert "x86, early_ioremap: fix fencepost error" + * mac80211: fix two issues in debugfs + * iwl3945: do not send scan command if channel count zero + + -- Ben Collins Fri, 07 Nov 2008 09:37:42 -0700 + +linux (2.6.27-8.17) intrepid-proposed; urgency=low + + [ John W. Linville ] + + * SAUCE: iwlagn: avoid sleep in softirq context + -LP: #286285 + + [ Tim Gardner ] + + * Dell Wireless 365 needs BTUSB_RESET quirk. + - LP: #293670 + * SAUCE: ALSA: hda: make a STAC_DELL_EQ option (version 2) + - LP: #293271 + + [ Upstream Kernel Changes ] + + * iwlagn: downgrade BUG_ON in interrupt + * Input: atkbd - expand Latitude's force release quirk to other Dells + * fbcon_set_all_vcs: fix kernel crash when switching the rotated consoles + * modules: fix module "notes" kobject leak + * Driver core: Fix cleanup in device_create_vargs(). + * Driver core: Clarify device cleanup. + * ath9k/mac80211: disallow fragmentation in ath9k, report to userspace + * md: Fix rdev_size_store with size == 0 + * xfs: fix remount rw with unrecognized options + * OHCI: Allow broken controllers to auto-stop + * USB: OHCI: fix endless polling behavior + * USB: Fix s3c2410_udc usb speed handling + * USB: EHCI: log a warning if ehci-hcd is not loaded first + * usb gadget: cdc ethernet notification bugfix + * usb: musb_hdrc build fixes + * drm/i915: fix ioremap of a user address for non-root (CVE-2008-3831) + * DVB: au0828: add support for another USB id for Hauppauge HVR950Q + * DVB: sms1xxx: support two new revisions of the Hauppauge WinTV + MiniStick + * security: avoid calling a NULL function pointer in + drivers/video/tvaudio.c + * Linux 2.6.27.3 + -LP: #294152 + + * gpiolib: fix oops in gpio_get_value_cansleep() + * edac cell: fix incorrect edac_mode + * x86 ACPI: fix breakage of resume on 64-bit UP systems with SMP kernel + * sched: fix the wrong mask_len + * USB: cdc-wdm: make module autoload work + * USB: don't rebind drivers after failed resume or reset + * USB: fix memory leak in cdc-acm + * USB: Speedtouch: add pre_reset and post_reset routines + * dm kcopyd: avoid queue shuffle + * dm snapshot: fix primary_pe race + * amd_iommu: fix nasty bug that caused ILLEGAL_DEVICE_TABLE_ENTRY errors + * CIFS: fix saving of resume key before CIFSFindNext + * netfilter: xt_iprange: fix range inversion match + * netfilter: snmp nat leaks memory in case of failure + * netfilter: restore lost ifdef guarding defrag exception + * anon_vma_prepare: properly lock even newly allocated entries + * hvc_console: Fix free_irq in spinlocked section + * ACPI Suspend: Enable ACPI during resume if SCI_EN is not set + * ACPI suspend: Blacklist HP xw4600 Workstation for old code ordering + * ACPI suspend: Always use the 32-bit waking vector + * proc: fix vma display mismatch between /proc/pid/{maps,smaps} + * SCSI: scsi_dh: add Dell product information into rdac device handler + * PCI hotplug: cpqphp: fix kernel NULL pointer dereference + * V4L/DVB (9300): pvrusb2: Fix deadlock problem + * Linux 2.6.27.4 + -LP: #294155 + + -- Tim Gardner Tue, 04 Nov 2008 12:16:07 -0700 + +linux (2.6.27-7.16) intrepid-security; urgency=low + + [ Tim Gardner ] + + * ndiswrapper remote buffer overflows on long ESSIDs (CVE 2008-4395) + - LP: #275860 + + [ Upstream Kernel Changes ] + + * ext[234]: Avoid printk floods in the face of directory corruption + (CVE-2008-3528) + + -- Tim Gardner Mon, 03 Nov 2008 13:34:42 -0700 + +linux (2.6.27-7.15) intrepid-security; urgency=low + + [ Upstream Kernel Changes ] + + * tcp: Restore ordering of TCP options for the sake of inter-operability + - LP: #264019 + + -- Tim Gardner Mon, 27 Oct 2008 19:28:06 -0600 + +linux (2.6.27-7.14) intrepid; urgency=low + + [ Tim Gardner ] + + * Disable ath5k in 2.6.27 + - LP: #288148 + + -- Tim Gardner Thu, 23 Oct 2008 07:40:43 -0600 + +linux (2.6.27-7.13) intrepid; urgency=low + + [ Stefan Bader ] + + * gfs1: GFS1 can't create more than 4kb file + + [ Tim Gardner ] + + * Revert "SAUCE: x86: Reserve FIRST_DEVICE_VECTOR in used_vectors + bitmap.". Use upstream commit to avoid future conflicts. + * Revert "STABLE queue: mac80211: fix two issues in debugfs". + Use upstream commit to avoid future conflicts. + * Revert "x86, early_ioremap: fix fencepost error" + Use upstream commit to avoid future conflicts. + + [ Upstream Kernel Changes ] + + * sched_rt.c: resch needed in rt_rq_enqueue() for the root rt_rq + * x86: Reserve FIRST_DEVICE_VECTOR in used_vectors bitmap. + * mac80211: fix two issues in debugfs + * Fix barrier fail detection in XFS + * tty: Termios locking - sort out real_tty confusions and lock reads + * CIFS: make sure we have the right resume info before calling + CIFSFindNext + * rfkill: update LEDs for all state changes + * libertas: clear current command on card removal + * b43legacy: Fix failure in rate-adjustment mechanism + * x86, early_ioremap: fix fencepost error + * x86: SB450: skip IRQ0 override if it is not routed to INT2 of IOAPIC + * x86: improve UP kernel when CPU-hotplug and SMP is enabled + * sky2: Fix WOL regression + * netdrvr: atl1e: Don't take the mdio_lock in atl1e_probe + * Linux 2.6.27.2 + + [ Amit Kucheria ] + + * Ubuntu: agp: Fix stolen memory counting on G4X. + -LP: 285572 + + [ Scott Remnant ] + + * add MODULE_ALIAS to load ipmi_devintf with ipmi_si + + -- Tim Gardner Sun, 19 Oct 2008 10:06:21 -0600 + +linux (2.6.27-7.12) intrepid; urgency=low + + [ Chuck Short ] + + * xen: Add xen modules to virtual flavours. + + [ Mario Limonciello ] + + * SAUCE: Add back in lost commit for Apple BT Wireless Keyboard + - LP: #162083 + + [ Tim Gardner ] + + * Remove depmod created files from packages. + - LP: #250511 + * Changed default TCP congestion algorithm to 'cubic' (again) + - LP: #278801 + * Update configs for 'disable CONFIG_DYNAMIC_FTRACE' + - LP: #263555 + + [ Upstream Kernel Changes ] + + * x86: register a platform RTC device if PNP doesn't describe it + * disable CONFIG_DYNAMIC_FTRACE due to possible memory corruption on + module unload + + -- Tim Gardner Fri, 17 Oct 2008 11:25:39 -0600 + +linux (2.6.27-7.11) intrepid; urgency=low + + [ Amit Kucheria ] + + * STABLE queue: mac80211: fix two issues in debugfs + - LP: #275227 + * SAUCE: Adds support for COMPAL JHL90 webcam + + [ Ben Collins ] + + * SAUCE: (no-up) x86: Quiet "Kernel alive" messages + - LP: #39985 + * SAUCE: (no-up) Modularize vesafb + * build/config: Enable vesafb module + * build: Switch to vesafb as preferred. + + [ Leann Ogasawara ] + + * Add Dell Dimension 9200 reboot quirk + - LP: #271370 + + [ Michael Haas ] + + * SAUCE: Revert aufs changes from AppArmor merge + + [ Tim Gardner ] + + * fix virtio udeb layout + - LP: #257739 + * Enabled CONFIG_EXT4DEV_FS=m + * Changed default TCP congestion algorithm to 'cubic' + - LP: #278801 + * SAUCE: ipw2200: change default policy for auto-associate + - LP: #264104 + + [ Upstream Kernel Changes ] + + * x86, early_ioremap: fix fencepost error + - LP: #263543 + + -- Tim Gardner Sat, 11 Oct 2008 08:07:42 -0600 + +linux (2.6.27-7.10) intrepid; urgency=low + + [ Alexey Starikovskiy ] + + * SAUCE: ACPI: EC: do transaction from interrupt context + - LP: #277802 + + [ Ben Collins ] + + * build/d-i: Change virtio-modules udeb to prio standard + + [ Colin Ian King ] + + * SAUCE: Blacklist IBM 2656 in serio/i8042 + - LP: #21558 + + [ Henrik Rydberg ] + + * Revert "SAUCE: applesmc: Add MacBookAir" + * SAUCE: [PATCH 1/5] hwmon: applesmc: Specified number of bytes to read + should match actual + * SAUCE: [PATCH 2/5] hwmon: applesmc: Fix the 'wait status failed: c != + 8' problem + * SAUCE: [PATCH 3/5] hwmon: applesmc: Prolong status wait + * SAUCE: [PATCH 4/5] hwmon: applesmc: Allow for variable ALV0 and ALV1 + package length + * SAUCE: [PATCH 5/5] hwmon: applesmc: Add support for Macbook Air + * SAUCE: hwmon: applesmc: Add support for Macbook Pro 4 + * SAUCE: hwmon: applesmc: Add support for Macbook Pro 3 + * SAUCE: hwmon: applesmc: Lighter wait mechanism, drastic improvement + + [ Leann Ogasawara ] + + * Add automatic model setting for Samsung Q45 + - LP: #200210 + + [ Tim Gardner ] + + * SAUCE: Correctly blacklist Thinkpad r40e in ACPI + - LP: #278794 + * SAUCE: Update Wacom tablet driver to 1.49 + - LP: #260675 + * SAUCE: ALPS touchpad for Dell Latitude E6500/E6400 + - LP: #270643 + * SAUCE: Fix Wacom tablet 1.49 porting errors + * SAUCE: Enable an e1000e Intel Corporation 82567 Gigabit controller + * SAUCE: Fix Oops in wlan_setup + - LP: #263309 + + [ Upstream Kernel Changes ] + + * ath9k: fix oops on trying to hold the wrong spinlock + * [Bluetooth] Fix double frees on error paths of btusb and bpa10x drivers + * [Bluetooth] Add reset quirk for new Targus and Belkin dongles + * [Bluetooth] Add reset quirk for A-Link BlueUSB21 dongle + * Revert "ax25: Fix std timer socket destroy handling." + * ax25: Quick fix for making sure unaccepted sockets get destroyed. + * netrom: Fix sock_orphan() use in nr_release + * Revert "V4L/DVB (8904): cx88: add missing unlock_kernel" + * SLOB: fix bogus ksize calculation + * net: only invoke dev->change_rx_flags when device is UP + * tcp: Fix possible double-ack w/ user dma + * net: Fix netdev_run_todo dead-lock + * tcp: Fix tcp_hybla zero congestion window growth with small rho and large cwnd. + * [MIPS] Sibyte: Register PIO PATA device only for Swarm and Litte Sur + * eeepc-laptop: Fix hwmon interface + * hwmon: (it87) Prevent power-off on Shuttle SN68PT + * hwmon: Define sysfs interface for energy consumption register + * hwmon: (adt7473) Fix some bogosity in documentation file + * hwmon: (abituguru3) Enable reading from AUX3 fan on Abit AT8 32X + * hwmon: (abituguru3) Enable DMI probing feature on Abit AT8 32X + * [CPUFREQ] correct broken links and email addresses + * SLOB: fix bogus ksize calculation fix + * Don't allow splice() to files opened with O_APPEND + * Linux 2.6.27 + + -- Tim Gardner Wed, 08 Oct 2008 21:19:34 -0600 + +linux (2.6.27-6.9) intrepid; urgency=low + + [ Kees Cook ] + + * SAUCE: AppArmor: update to upstream subversion r1302 + - LP: #269921 + + [ Stefan Bader ] + + * Update configuration files to be compliant to desktop specs + - LP: #279019 + + [ Tim Gardner ] + + * Add support in e1000e for a couple of ICH10 PCI IDs + * Enable CONFIG_INPUT_PCSPKR=m + - LP: #275453 + + [ Upstream Kernel Changes ] + + * V4L/DVB (8559a): Fix a merge conflict at gspca/sonixb + * V4L/DVB (8789): wm8739: remove wrong kfree + * V4L/DVB (8883): w9968cf: Fix order of usb_alloc_urb validation + * V4L/DVB (8884): em28xx-audio: fix memory leak + * V4L/DVB (8885): cpia2_usb: fix memory leak + * V4L/DVB (8886): ov511: fix memory leak + * V4L/DVB (8887): gspca: fix memory leak + * V4L/DVB (8892): pvrusb2: Handle USB ID 2040:2950 same as 2040:2900 + * V4L/DVB (8904): cx88: add missing unlock_kernel + * V4L/DVB (8905): ov511: fix exposure sysfs attribute bug + * V4L/DVB (8909): gspca: PAC 7302 webcam 093a:262a added. + * hrtimer: migrate pending list on cpu offline + * hrtimer: fix migration of CB_IRQSAFE_NO_SOFTIRQ hrtimers + * hrtimer: mark migration state + * hrtimer: prevent migration of per CPU hrtimers + * [IA64] Put the space for cpu0 per-cpu area into .data section + * powerpc: Fix PCI in Holly device tree + * powerpc: Fix failure to shutdown with CPU hotplug + * mfd: Fix Kconfig accroding to the new gpiolib symbols + * mfd: Fix asic3 compilation + * x86: fix typo in enable_mtrr_cleanup early parameter + * ipsec: Fix pskb_expand_head corruption in xfrm_state_check_space + * iucv: Fix mismerge again. + * ALSA: ASoC: Fix cs4270 error path + * ALSA: hda - Fix model for Dell Inspiron 1525 + * sctp: Fix kernel panic while process protocol violation parameter + * x86: Fix broken LDT access in VMI + * x86, vmi: fix broken LDT access + * tcp: Fix NULL dereference in tcp_4_send_ack() + * ipv6: NULL pointer dereferrence in tcp_v6_send_ack + * XFRM,IPv6: initialize ip6_dst_blackhole_ops.kmem_cachep + * af_key: Free dumping state on socket close + * dm: always allow one page in dm_merge_bvec + * dm: cope with access beyond end of device in dm_merge_bvec + * dm mpath: add missing path switching locking + * MN10300: Fix IRQ handling + * pxa2xx_spi: fix build breakage + * e1000e: write protect ICHx NVM to prevent malicious write/erase + * powerpc: Fix boot hang regression on MPC8544DS + * ASoC: Set correct name for WM8753 rec mixer output + * ALSA: snd-powermac: mixers for PowerMac G4 AGP + * ALSA: snd-powermac: HP detection for 1st iMac G3 SL + * fbcon: fix monochrome color value calculation + * inotify: fix lock ordering wrt do_page_fault's mmap_sem + * braille_console: only register notifiers when the braille console is used + * fix error-path NULL deref in alloc_posix_timer() + * memory hotplug: missing zone->lock in test_pages_isolated() + * mm: tiny-shmem nommu fix + * mm: handle initialising compound pages at orders greater than MAX_ORDER + * e1000e: reset swflag after resetting hardware + * e1000e: do not ever sleep in interrupt context + * e1000e: remove phy read from inside spinlock + * e1000e: drop stats lock + * e1000e: debug contention on NVM SWFLAG + * e1000e: update version from k4 to k6 + * Check mapped ranges on sysfs resource files + * e1000e: Fix incorrect debug warning + * [MIPS] Build fix: Fix irq flags type + * [MIPS] SMTC: Build fix: Fix filename in Makefile + * [MIPS] SMTC: Fix holes in SMTC and FPU affinity support. + * [MIPS] SMTC: Close tiny holes in the SMTC IPI replay system. + * [MIPS] SMTC: Fix SMTC dyntick support. + * [S390] nohz: Fix __udelay. + * [S390] qdio: prevent stack clobber + * Fix init/main.c to use regular printk with '%pF' for initcall fn + * x86 setup: correct segfault in generation of 32-bit reloc kernel + * selinux: Fix an uninitialized variable BUG/panic in selinux_secattr_to_sid() + * rtc: fix kernel panic on second use of SIGIO nofitication + * fbdev: fix recursive notifier and locking when fbdev console is blanked + * orion_spi: fix handling of default transfer speed + * include/linux/stacktrace.h: declare struct task_struct + * cpusets: remove pj from cpuset maintainers + * MAINTAINERS: add mailing list for man-pages + * SubmitChecklist: interfaces changes should CC linux-api@ + * Documentation/HOWTO: info about interface changes should CC linux-api@vger + * dw_dmac: fix copy/paste bug in tasklet + * leds-fsg: change order of initialization and deinitialization + * leds-pca955x: add proper error handling and fix bogus memory handling + * ACPI: Make /proc/acpi/wakeup interface handle PCI devices (again) + * clockevents: check broadcast tick device not the clock events device + * V4L/DVB (8919): cx18: Fix tuner audio input for Compro H900 cards + * V4L/DVB (8926): gspca: Bad fix of leak memory (changeset 43d2ead315b1). + * V4L/DVB (8933): gspca: Disable light frquency for zc3xx cs2102 Kokom. + * V4L/DVB (8935): em28xx-cards: Remove duplicate entry (EM2800_BOARD_KWORLD_USB2800) + * V4L/DVB (8955): bttv: Prevent NULL pointer dereference in radio_open + * V4L/DVB (8957): zr36067: Restore the default pixel format + * V4L/DVB (8958): zr36067: Return proper bytes-per-line value + * V4L/DVB (8960): drivers/media/video/cafe_ccic.c needs mm.h + * V4L/DVB (8961): zr36067: Fix RGBR pixel format + * V4L/DVB (8963): s2255drv field count fix + * V4L/DVB (8967): Use correct XC3028L firmware for AMD ATI TV Wonder 600 + * V4L/DVB (8978): sms1xxx: fix product name for Hauppauge WinTV MiniStick + * V4L/DVB (8979): sms1xxx: Add new USB product ID for Hauppauge WinTV MiniStick + * V4L/DVB (9029): Fix deadlock in demux code + * V4L/DVB (9037): Fix support for Hauppauge Nova-S SE + * V4L/DVB (9043): S5H1420: Fix size of shadow-array to avoid overflow + * V4L/DVB (9053): fix buffer overflow in uvc-video + * V4L/DVB (9075): gspca: Bad check of returned status in i2c_read() spca561. + * V4L/DVB (9080): gspca: Add a delay after writing to the sonixj sensors. + * V4L/DVB (9092): gspca: Bad init values for sonixj ov7660. + * V4L/DVB (9099): em28xx: Add detection for K-WORLD DVB-T 310U + * V4L/DVB (9103): em28xx: HVR-900 B3C0 - fix audio clicking issue + * x86: gart iommu have direct mapping when agp is present too + * ide-cd: temporary tray close fix + * ide-dma: fix ide_build_dmatable() for TRM290 + * IDE: Fix platform device registration in Swarm IDE driver (v2) + * ide-cd: Optiarc DVD RW AD-7200A does play audio + * ide: workaround for bogus gcc warning in ide_sysfs_register_port() + * [MIPS] Fix CMP Kconfig configuration and mark as broken. + * [MIPS] IP27: Fix build errors if CONFIG_MAPPED_KERNEL=y + * x86 ACPI: Blacklist two HP machines with buggy BIOSes + * kgdb, x86: Avoid invoking kgdb_nmicallback twice per NMI + * kgdb: call touch_softlockup_watchdog on resume + * atmel-mci: Initialize BLKR before sending data transfer command + * Marker depmod fix core kernel list + * Linux 2.6.27-rc9 + + -- Tim Gardner Sun, 05 Oct 2008 21:27:49 -0600 + +linux (2.6.27-5.8) intrepid; urgency=low + + [ Amit Kucheria ] + + * Update AUFS-related Kconfig + - LP: #264048 + + [ Michael Haas ] + + * add proper aufs source tree from 20080922 + * Fix AUFS compilation in vfsub.c + * Add splice-2.6.23.patch from AUFS to export a symbol needed by AUFS + * Add put_filp.patch from AUFS to export a symbol needed by AUFS + * apply (modified) lhash.patch from AUFS to export __lookup_hash() + * Add deny_write_access.patch from AUFS - export deny_write_access + * Add sec_perm-2.6.24.patch from AUFS - export security_inode_permission + * make sure TMPFS_MAGIC is defined in AUFS Makefile + + [ Tim Gardner ] + + * Enabled CONFIG_IPWIRELESS + - LP: #274748 + * Enabled CONFIG_E1000E, disabled CONFIG_E1000E_NEW + This takes advantage of the upstream NVM protection fix in + commit 4a7703582836f55a1cbad0e2c1c6ebbee3f9b3a7. + + [ Upstream Kernel Changes ] + + * Revert "[Bluetooth] Eliminate checks for impossible conditions in IRQ + handler" + * [SCSI] qla2xxx: Defer enablement of RISC interrupts until ISP + initialization completes. + * PCI: Fix pcie_aspm=force + * PCI: fix compiler warnings in pci_get_subsys() + * UBIFS: create the name of the background thread in every case + * UBIFS: TNC / GC race fixes + * UBIFS: remove incorrect assert + * UBIFS: fix printk format warnings + * AMD IOMMU: set iommu sunc flag after command queuing + * AMD IOMMU: protect completion wait loop with iommu lock + * sparc64: Fix disappearing PCI devices on e3500. + * x86, oprofile: BUG scheduling while atomic + * ALSA: ASoC: Fix at32-pcm build breakage with PM enabled + * ath9k: connectivity is lost after Group rekeying is done + * wireless: zd1211rw: add device ID fix wifi dongle "trust nw-3100" + * [IA64] Ski simulator doesn't need check_sal_cache_flush + * [IA64] kexec fails on systems with blocks of uncached memory + * ath9k: Fix IRQ nobody cared issue with ath9k + * [Bluetooth] Fix I/O errors on MacBooks with Broadcom chips + * [Bluetooth] Fix wrong URB handling of btusb driver + * [Bluetooth] Fix USB disconnect handling of btusb driver + * sparc64: Fix missing devices due to PCI bridge test in + of_create_pci_dev(). + * [WATCHDOG] ibmasr: remove unnecessary spin_unlock() + * [WATCHDOG] wdt285: fix sparse warnings + * [WATCHDOG] unlocked_ioctl changes + * x86: fix 27-rc crash on vsmp due to paravirt during module load + * sched: fix init_hrtick() section mismatch warning + * clockevents: prevent cpu online to interfere with nohz + * x86: prevent stale state of c1e_mask across CPU offline/online + * clockevents: prevent stale tick_next_period for onlining CPUs + * clockevents: check broadcast device not tick device + * clockevents: prevent mode mismatch on cpu online + * x86: prevent C-states hang on AMD C1E enabled machines + * x86: c1e_idle: don't mark TSC unstable if CPU has invariant TSC + * timers: fix build error in !oneshot case + * ALSA: ASoC: maintainers - update email address for Liam Girdwood + * ibmasr: remove unnecessary spin_unlock() + * smb.h: do not include linux/time.h in userspace + * kernel-doc: allow structs whose members are all private + * kexec: fix segmentation fault in kimage_add_entry + * Documentation/DMA-mapping.txt: update for pci_dma_mapping_error() + changes + * sys_paccept: disable paccept() until API design is resolved + * mm: tiny-shmem fix lock ordering: mmap_sem vs i_mutex + * Documentation/sysctl/kernel.txt: fix softlockup_thresh description + * memcg: check under limit at shrink_usage + * atmel_serial: update the powersave handler to match serial core + * [SCSI] Fix hang with split requests + * USB Storage: Sierra: Non-configurable TRU-Install + * USB Serial: Sierra: Device addition & version rev + * USB: ehci: fix some ehci hangs and crashes + * USB: Fix the Nokia 6300 storage-mode. + * USB: Correct Sierra Wireless USB EVDO Modem Device ID + * USB: fix hcd interrupt disabling + * USB: update of Documentation/usb/anchors.txt + * usb gadget: fix omap_udc DMA regression + * USB: Fixing Nokia 3310c in storage mode + * usb: musb: fix include path + * USB: fix EHCI periodic transfers + * usb-serial: Add Siemens EF81 to PL-2303 hack triggers + * USB: SERIAL CP2101 add device IDs + * USB: unusual_devs addition for RockChip MP3 player + * USB: fsl_usb2_udc: fix VDBG() format string + * usb serial: ti_usb_3410_5052 obviously broken by firmware changes + * USB: ftdi_sio: Add 0x5050/0x0900 USB IDs (Papouch Quido USB 4/4) + * USB: serial: add ZTE CDMA Tech id to option driver + * USB Serial: Sierra: Add MC8785 VID/PID + * USB: drivers/usb/musb/: disable it on SuperH + * usb: ftdi_sio: add support for Domintell devices + * usb: unusual devs patch for Nokia 5310 Music Xpress + * USB: revert recovery from transient errors + * [MIPS] au1000: Fix gpio direction + * [MIPS] Fixe the definition of PTRS_PER_PGD + * x86: prevent stale state of c1e_mask across CPU offline/online, fix + * x86: disable apm on the olpc + * i2c-powermac: Fix section for probe and remove functions + * i2c-dev: Return correct error code on class_create() failure + * i2c: Fix mailing lists in two MAINTAINERS entries + * ath9k: disable MIB interrupts to fix interrupt storm + * 9p: implement proper trans module refcounting and unregistration + * 9p-trans_fd: fix trans_fd::p9_conn_destroy() + * 9p-trans_fd: clean up p9_conn_create() + * 9p-trans_fd: don't do fs segment mangling in p9_fd_poll() + * 9p-trans_fd: fix and clean up module init/exit paths + * 9p: introduce missing kfree + * 9p: use an IS_ERR test rather than a NULL test + * 9p: fix put_data error handling + * netfilter: ip6t_{hbh,dst}: Rejects not-strict mode on rule insertion + * MN10300: Move asm-arm/cnt32_to_63.h to include/linux/ + * MN10300: Make sched_clock() report time since boot + * ALSA: fix locking in snd_pcm_open*() and snd_rawmidi_open*() + * ALSA: remove unneeded power_mutex lock in snd_pcm_drop + * IPoIB: Fix crash when path record fails after path flush + * [XFS] Fix extent list corruption in xfs_iext_irec_compact_full(). + * [XFS] Remove xfs_iext_irec_compact_full() + * kgdb: could not write to the last of valid memory with kgdb + * kgdb, x86, arm, mips, powerpc: ignore user space single stepping + * kgdb, x86_64: gdb serial has BX and DX reversed + * kgdb, x86_64: fix PS CS SS registers in gdb serial + * kgdboc,tty: Fix tty polling search to use name correctly + * ARM: Delete ARM's own cnt32_to_63.h + * m32r: remove the unused NOHIGHMEM option + * m32r: don't offer CONFIG_ISA + * m32r: export empty_zero_page + * m32r: export __ndelay + * m32r/kernel/: cleanups + * [MIPS] au1000: Make sure GPIO value is zero or one + * [MIPS] IP27: Switch to dynamic interrupt routing avoding panic on + error. + * [MIPS] BCM47xx: Fix build error due to missing PCI functions + * [SSB] Initialise dma_mask for SSB_BUSTYPE_SSB devices + * Swarm: Fix crash due to missing initialization + * ide-tape: fix vendor strings + * ide: note that IDE generic may prevent other drivers from attaching + * cdrom: update ioctl documentation + * [SCSI] qlogicpti: fix sg list traversal error in continuation entries + * sata_nv: reinstate nv_hardreset() for non generic controllers + * scsi: fix fall out of sg-chaining patch in qlogicpti + * ALSA: make the CS4270 driver a new-style I2C driver + * ALSA: ASoC: Fix another cs4270 error path + * Fix NULL pointer dereference in proc_sys_compare + * kconfig: fix silentoldconfig + * kconfig: readd lost change count + * mm owner: fix race between swapoff and exit + * Linux 2.6.27-rc8 + * e1000e: write protect ICHx NVM to prevent malicious write/erase + + -- Amit Kucheria Tue, 30 Sep 2008 18:22:35 +0300 + +linux (2.6.27-4.7) intrepid; urgency=low + + [ Ben Collins ] + + * build/abi: Add gfs1 to perm blacklist + * build/abi: Ignored changes in gfs2 symbols + + [ Fabio M. Di Nitto ] + + * Revert "SAUCE: Export gfs2 symbols required for gfs1 kernel module" + * ubuntu: update GFS Cluster File System + + [ Stefan Bader ] + + * SAUCE: x86: Reserve FIRST_DEVICE_VECTOR in used_vectors bitmap. + - LP: #276334 + + [ Tim Gardner ] + + * Revert "Disable e1000e until the NVRAM corruption problem is found." + * Add atl1e and atl2 to Debian installer bits + - LP: #273904 + * SAUCE: e1000e: Map NV RAM dynamically only when needed. + - LP: #263555 + + -- Tim Gardner Fri, 26 Sep 2008 20:51:22 -0600 + +linux (2.6.27-4.6) intrepid; urgency=low + + [ Tim Gardner ] + + * Disable e1000e until the NVRAM corruption problem is found. + - LP: #263555 + + [ Upstream Kernel Changes ] + + * Revert "[Bluetooth] Eliminate checks for impossible conditions in IRQ + handler" + + -- Ben Collins Tue, 23 Sep 2008 09:53:57 -0400 + +linux (2.6.27-4.5) intrepid; urgency=low + + [ Upstream Kernel Changes ] + + * Revert "b43/b43legacy: add RFKILL_STATE_HARD_BLOCKED support" + * udf: Fix lock inversion between iprune_mutex and alloc_mutex (v2) + * udf: Fix error paths in udf_new_inode() + * [SCSI] sd: select CRC_T10DIF only when necessary + * [SCSI] zfcp: Fix request queue locking + * [SCSI] zfcp: Correctly query end flag in gpn_ft response + * [SCSI] zfcp: Simplify ccw notify handler + * [SCSI] zfcp: Fix reference counter for remote ports + * [SCSI] zfcp: channel cannot be detached due to refcount imbalance + * [SCSI] zfcp: Remove duplicated unlikely() macros. + * [SCSI] scsi_dh: make check_sense return ADD_TO_MLQUEUE + * [SCSI] make scsi_check_sense HARDWARE_ERROR return ADD_TO_MLQUEUE on + retry + * [SCSI] fix check of PQ and PDT bits for WLUNs + * pcm037: add rts/cts support for serial port + * i.MX serial: fix init failure + * imx serial: set RXD mux bit on i.MX27 and i.MX31 + * imx serial: fix rts handling for non imx1 based hardware + * mlx4_core: Set RAE and init mtt_sz field in FRMR MPT entries + * udf: add llseek method + * PCI/iommu: blacklist DMAR on Intel G31/G33 chipsets + * PCI: Fix printk warnings in probe.c + * PCI: Fix printk warnings in setup-bus.c + * PCI Hotplug: fakephp: fix deadlock... again + * clockevents: remove WARN_ON which was used to gather information + * ocfs2: Fix a bug in direct IO read. + * arch/x86/kernel/kdebugfs.c: introduce missing kfree + * [IA64] fix compile failure with non modular builds + * [IA64] fix up bte.h + * [IA64] arch/ia64/sn/pci/tioca_provider.c: introduce missing kfree + * PCI: fix pciehp_free_irq() + * [IA64] prevent ia64 from invoking irq handlers on offline CPUs + * ide: Fix pointer arithmetic in hpt3xx driver code (3rd try) + * add deprecated ide-scsi to feature-removal-schedule.txt + * swiotlb: fix back-off path when memory allocation fails + * sparc64: Fix interrupt register calculations on Psycho and Sabre. + * VIDEO_SH_MOBILE_CEU should depend on HAS_DMA + * m68k: Update defconfigs for 2.6.27-rc6 + * sparc32: Fix function signature of of_bus_sbus_get_flags(). + * sched: fix 2.6.27-rc5 couldn't boot on tulsa machine randomly + * sched: fix deadlock in setting scheduler parameter to zero + * KVM: SVM: fix random segfaults with NPT enabled + * KVM: SVM: fix guest global tlb flushes with NPT + * KVM: VMX: Always return old for clear_flush_young() when using EPT + * clocksource, acpi_pm.c: fix check for monotonicity + * [ARM] OMAP: Fix MMC device data + * block: disable sysfs parts of the disk command filter + * ath9k: Assign seq# when mac80211 requests this + * sg: disable interrupts inside sg_copy_buffer + * MN10300: Change the fault handler to check in_atomic() not + in_interrupt() + * [Bluetooth] Fix regression from using default link policy + * netlink: fix overrun in attribute iteration + * x86: fix possible x86_64 and EFI regression + * sparc64: Fix PCI error interrupt registry on PSYCHO. + * sparc: Fix user_regset 'n' field values. + * niu: panic on reset + * PCI: re-add debug prints for unmodified BARs + * [ARM] 5245/1: Fix warning about unused return value in drivers/pcmcia + * [ARM] 5246/1: tosa: add proper clock alias for tc6393xb clock + * [ARM] 5247/1: tosa: SW_EAR_IN support + * [ARM] Fix PCI_DMA_BUS_IS_PHYS for ARM + * ata: duplicate variable sparse warning + * sata_inic162x: enable LED blinking + * [libata] LBA28/LBA48 off-by-one bug in ata.h + * proc: more debugging for "already registered" case + * include/linux/ioport.h: add missing macro argument for devm_release_* + family + * cpuset: avoid changing cpuset's cpus when -errno returned + * cpuset: hotplug documentation fix + * coredump_filter: add description of bit 4 + * bfs: fix Lockdep warning + * mm: ifdef Quicklists in /proc/meminfo + * spi_mpc83xx: fix clockrate calculation for low speed + * spi_mpc83xx: reject invalid transfer sizes + * pxa2xx_spi: chipselect bugfixes + * pxa2xx_spi: dma bugfixes + * mm: mark the correct zone as full when scanning zonelists + * Documentation/ABI: /sys/class/gpio + * MAINTAINERS: fix USB VIDEO CLASS mail list address + * ia64: fix panic during `modprobe -r xpc' + * atmel_lcdfb: disable LCD and DMA engines when suspending + * spi_s3c24xx: fix section warning + * rescan_partitions(): make device capacity errors non-fatal + * memstick: fix MSProHG 8-bit interface mode support + * Add Uwe Kleine-König to .mailmap + * xen: fix for xen guest with mem > 3.7G + * x86/paravirt: Remove duplicate paravirt_pagetable_setup_{start, done}() + * crypto: talitos - Avoid consecutive packets going out with same IV + * slub: fixed uninitialized counter in struct kmem_cache_node + * udp: Fix rcv socket locking + * IB/mlx4: Fix up fast register page list format + * [MIPS] VR41xx: unsigned irq cannot be negative + * x86: completely disable NOPL on 32 bits + * [S390] cio: Fix driver_data handling for ccwgroup devices. + * [S390] cio: fix orb initialization in cio_start_key + * sparc64: Fix OOPS in psycho_pcierr_intr_other(). + * sparc64: Fix SMP bootup with CONFIG_STACK_DEBUG or ftrace. + * RDMA/nes: Fix client side QP destroy + * IPoIB: Fix deadlock on RTNL between bcast join comp and ipoib_stop() + * clockevents: make device shutdown robust + * powerpc: Fix interrupt values for DMA2 in MPC8610 HPCD device tree + * hpplus: fix build regression + * Fix PNP build failure, bugzilla #11276 + * warn: Turn the netdev timeout WARN_ON() into a WARN() + * [XFS] Move memory allocations for log tracing out of the critical path + * [XFS] Fix regression introduced by remount fixup + * [XFS] Prevent direct I/O from mapping extents beyond eof + * [XFS] Fix barrier status change detection. + * [XFS] Prevent lockdep false positives when locking two inodes. + * [XFS] Fix use-after-free with buffers + * [XFS] Don't do I/O beyond eof when unreserving space + * powerpc: Holly board needs dtbImage target + * Fix compile failure with non modular builds + * [ARM] 5249/1: davinci: remove redundant check in davinci_psc_config() + * [ARM] omap: back out 'internal_clock' support + * sctp: set the skb->ip_summed correctly when sending over loopback. + * [ARM] 5255/1: Update jornada ssp to remove build errors/warnings + * sctp: do not enable peer features if we can't do them. + * sctp: Fix oops when INIT-ACK indicates that peer doesn't support AUTH + * bnx2: Promote vector field in bnx2_irq structure from u16 to unsigned + int + * forcedeth: call restore mac addr in nv_shutdown path + * e1000: prevent corruption of EEPROM/NVM + * e100: Use pci_pme_active to clear PME_Status and disable PME# + * md: Don't wait UNINTERRUPTIBLE for other resync to finish + * atstk1000: fix build breakage with BOARD_ATSTK100X_SW2_CUSTOM=y + * avr32: add .gitignore files + * avr32: add generic_find_next_le_bit bit function + * avr32: fix sys_sync_file_range() call convention + * avr32: nmi_enter() without nmi_exit() + * KVM: ia64: 'struct fdesc' build fix + * hwmon: (atxp1) Fix device detection logic + * hwmon: (it87) Fix fan tachometer reading in IT8712F rev 0x7 (I) + * hwmon: (ad7414) Make ad7414_update_device() static + * tmio_mmc: fix compilation with debug enabled + * atmel-mci: debugfs: enable clock before dumping regs + * atmel-mci: Fix memory leak in atmci_regs_show + * atmel-mci: Fix bogus debugfs file size + * atmel-mci: Set MMC_CAP_NEEDS_POLL if no detect_pin + * mmc_block: handle error from mmc_register_driver() + * mmc_test: initialize mmc_test_lock statically + * [MIPS] Fix 64-bit IP checksum code + * [MIPS] SMTC: Clear TIF_FPUBOUND on clone / fork. + * [MIPS] Fix potential latency problem due to non-atomic cpu_wait. + * [MIPS] vmlinux.lds.S: handle .text.* + * MAINTAINERS: Trivial whitespace cleanups + * MAINTAINERS: Various fixes + * Linux 2.6.27-rc7 + + -- Tim Gardner Sun, 21 Sep 2008 21:49:28 -0600 + +linux (2.6.27-3.4) intrepid; urgency=low + + [ Colin Ian King ] + + * SAUCE: fix kernel oops in VirtualBox during paravirt patching + - LP: #246067 + * SAUCE: qc-usb: Enable Logitech QuickCam Messenger + - LP: #209901 + * SAUCE: appleir: Enable driver for new MacBook Pro + - LP: #157919 + + [ Tim Gardner ] + + * Enabled CONFIG_DEBUG_RODATA=y + + [ Upstream Kernel Changes ] + + * Revert "ALSA: hda - Added model selection for iMac 24"" + * Revert "x86: fix HPET regression in 2.6.26 versus 2.6.25, check hpet + against BAR, v3" + * Revert "[ARM] use the new byteorder headers" + * Revert "mac80211: Use IWEVASSOCREQIE instead of IWEVCUSTOM" + * Revert "crypto: camellia - Use kernel-provided bitops, unaligned access + helpers" + * svcrdma: Fix race between svc_rdma_recvfrom thread and the dto_tasklet + * sched, cpuset: rework sched domains and CPU hotplug handling (v4) + * ACPI: Fix now signed module parameter. + * ACPI: Change package length error to warning + * ACPI: Fix now signed module parameter. + * ACPI: Fix typo in "Disable MWAIT via DMI on broken Compal board" + * acpi: add checking for NULL early param + * UBIFS: fix zero-length truncations + * Input: bcm5974 - add maintainer entry + * sh64: re-add the __strnlen_user() prototype + * sh: fix ptrace_64.c:user_disable_single_step() + * PNPACPI: ignore the producer/consumer bit for extended IRQ descriptors + * UBIFS: always read hashed-key nodes under TNC mutex + * UBIFS: allow for racing between GC and TNC + * [CIFS] Fix plaintext authentication + * sparc32: Implement smp_call_function_single(). + * sh: crash kernel resource fix + * sh: fix kexec entry point for crash kernels + * sh: fix platform_resource_setup_memory() section mismatch + * sh: update Migo-R defconfig + * sh: update AP325RXA defconfig + * sh: fix semtimedop syscall + * cifs: fix O_APPEND on directio mounts + * [CIFS] update cifs change log + * [CIFS] Turn off Unicode during session establishment for plaintext + authentication + * ACPI: thinkpad-acpi: wan radio control is not experimental + * sparc: Fix resource flags for PCI children in OF device tree. + * remove blk_register_filter and blk_unregister_filter in gendisk + * ALSA: oxygen: fix distorted output on AK4396-based cards + * ipv6: When we droped a packet, we should return NET_RX_DROP instead of + 0 + * pkt_sched: Fix locking of qdisc_root with qdisc_root_sleeping_lock() + * net: Unbreak userspace usage of linux/mroute.h + * Don't trigger softlockup detector on network fs blocked tasks + * Resource handling: add 'insert_resource_expand_to_fit()' function + * sparc64: setup_valid_addr_bitmap_from_pavail() should be __init + * UBIFS: do not update min_idx_lebs in stafs + * UBIFS: push empty flash hack down + * UBIFS: remove incorrect index space check + * UBIFS: improve statfs reporting + * UBIFS: fix assertion + * UBIFS: add forgotten gc_idx_lebs component + * UBIFS: introduce LEB overhead + * UBIFS: improve statfs reporting even more + * UBIFS: fill f_fsid + * drm/radeon: downgrade debug message from info to debug. + * Remove invalidate_partition call from do_md_stop. + * Fix problem with waiting while holding rcu read lock in md/bitmap.c + * ALSA: hda: Distortion fix for dell_m6_core_init + * ALSA: ASoC: fix pxa2xx-i2s clk_get call + * block: restore original behavior of /proc/partition when there's no + partition + * debugobjects: fix lockdep warning + * avr32: Fix lockup after Java stack underflow in user mode + * avr32: pm_standby low-power ram bug fix + * nfsd: fix compound state allocation error handling + * sunrpc: fix possible overrun on read of /proc/sys/sunrpc/transports + * nfsd: fix buffer overrun decoding NFSv4 acl + * audit: Moved variable declaration to beginning of function + * Fix modules_install on RO nfs-exported trees. + * Remove '#include ' from mm/page_isolation.c + * dabusb_fpga_download(): fix a memory leak + * [MTD] mtdchar.c: Fix regression in MEMGETREGIONINFO ioctl() + * ALSA: hda - Fix ALC663 auto-probe + * ALSA: hda - Add mic-boost controls to ALC662/663 auto configuration + * Un-break printk strings in x86 PCI probing code + * kernel/resource.c: fix new kernel-doc warning + * softlockup: minor cleanup, don't check task->state twice + * fix typo in arch/parisc/hpux/fs.c + * m68k: atari_keyb_init operator precedence fix + * ACPI: Fix typo in "Disable MWAIT via DMI on broken Compal board" + * don't diff generated firmware files + * IDE: compile fix for sff_dma_ops + * IDE: palm_bk3710: fix compile warning for unused variable + * ide: fix hwif_to_node() + * palm_bk3710: improve IDE registration + * ide-disk: remove stale init_idedisk_capacity() documentation + * ide/Kconfig: mark ide-scsi as deprecated + * net/wireless/Kconfig: clarify the description for + CONFIG_WIRELESS_EXT_SYSFS + * iwlwifi: do not use GFP_DMA in iwl_tx_queue_init + * iwlwifi: workaround interrupt handling no some platforms + * iwlwifi: fix apm_stop (wrong bit polarity for FLAG_INIT_DONE) + * iwlwifi: fix 64bit platform firmware loading + * orinoco: Multicast to the specified addresses + * wireless/libertas/if_cs.c: fix memory leaks + * mac80211: Fix debugfs union misuse and pointer corruption + * rt2x00: Compiler warning unmasked by fix of BUILD_BUG_ON + * ath9k: Incorrect key used when group and pairwise ciphers are + different. + * ath9: Fix ath_rx_flush_tid() for IRQs disabled kernel warning message. + * net/xfrm: Use an IS_ERR test rather than a NULL test + * ipv: Re-enable IP when MTU > 68 + * NTFS: update homepage + * mm: make setup_zone_migrate_reserve() aware of overlapping nodes + * VFS: fix dio write returning EIO when try_to_release_page fails + * acer-wmi: remove debugfs entries upon unloading + * mm/bootmem: silence section mismatch warning - + contig_page_data/bootmem_node_data + * MAINTAINERS: add a maintainer for the BCM5974 multitouch driver + * 8250: improve workaround for UARTs that don't re-assert THRE correctly + * mmc: at91_mci: don't use coherent dma buffers + * pid_ns: zap_pid_ns_processes: fix the ->child_reaper changing + * pid_ns: (BUG 11391) change ->child_reaper when init->group_leader exits + * cirrusfb: check_par fixes + * devcgroup: fix race against rmdir() + * mm: show quicklist usage in /proc/meminfo + * mm: size of quicklists shouldn't be proportional to the number of CPUs + * ipc: document the new auto_msgmni proc file + * hp-wmi: update to match current rfkill semantics + * hp-wmi: add proper hotkey support + * tdfxfb: fix SDRAM memory size detection + * tdfxfb: fix frame buffer name overrun + * rtc_time_to_tm: fix signed/unsigned arithmetic + * ibft: fix target info parsing in ibft module + * sysfs: document files in /sys/firmware/sgi_uv/ + * rtc-cmos: wake again from S5 + * pm_qos_requirement might sleep + * drivers/char/random.c: fix a race which can lead to a bogus BUG() + * ipsec: Fix deadlock in xfrm_state management. + * [x86] Fix TSC calibration issues + * tipc: Don't use structure names which easily globally conflict. + * sparc64: Fix IPI call locking. + * [ARM] omap: fix gpio.c build error + * sparc64: Prevent sparc64 from invoking irq handlers on offline CPUs + * powerpc: Fix uninitialised variable in VSX alignment code + * powerpc: Only make kernel text pages of linear mapping executable + * powerpc: Make sure _etext is after all kernel text + * powerpc: Work around gcc's -fno-omit-frame-pointer bug + * powerpc: Fix build error with 64K pages and !hugetlbfs + * powerpc: Fix for getting CPU number in power_save_ppc32_restore() + * UBIFS: amend f_fsid + * net/usb/pegasus: avoid hundreds of diagnostics + * ixgbe: initialize interrupt throttle rate + * pcnet-cs, axnet_cs: add new IDs, remove dup ID with less info + * netxen: Remove workaround for chipset quirk + * Split up PIT part of TSC calibration from native_calibrate_tsc + * iwlwifi: W/A for the TSF correction in IBSS + * iwlwifi: fix hidden ssid discovery in passive channels + * iwlwifi: remove false rxon if rx chain changes + * iwlwifi: fix station mimo power save values + * iwlwifi: fix rx_chain computation + * iwlwifi: fix Tx cmd memory allocation failure handling + * iwlwifi: call apm stop on exit + * iwlwifi: fix STATUS_EXIT_PENDING is not set on pci_remove + * ath9k: Fix TX status reporting + * ath9k: Fix TX control flag use for no ACK and RTS/CTS + * V4L/DVB (8555): au8522: add mechanism to configure IF frequency for vsb + and qam + * V4L/DVB (8556): au0828: add support for Hauppauge Woodbury + * V4L/DVB (8598): au8522: clean up function au8522_set_if + * V4L/DVB (8599): au8522: remove if frequency settings from vsb/qam + modulation tables + * V4L/DVB (8600): au0828: explicitly set 6 MHz IF frequency in + hauppauge_hvr950q_config + * V4L/DVB (8629): v4l2-ioctl: do not try to handle private V4L1 ioctls + * V4L/DVB (8633): ivtv: update ivtv version number + * V4L/DVB (8648): ivtv: improve CC support + * V4L/DVB (8660): gspca: Simplify the scan of URB packets in pac7311. + * V4L/DVB (8661): gspca: Bug in the previous changeset about pac7311. + * V4L/DVB (8663): gspca: Webcam 0c45:6128 added in sonixj. + * V4L/DVB (8664): gspca: The bridge/sensor of the webcam 093a:2621 is a + PAC 7302. + * V4L/DVB (8665): gspca: Fix the 640x480 resolution of the webcam + 093a:2621. + * V4L/DVB (8666): gspca: Bad scanning of frames in pac7311. + * V4L/DVB (8667): gspca: Bad probe of Z-Star/Vimicro webcams with pas106 + sensor. + * V4L/DVB (8668): gspca: Conflict GSPCA / ET61X251 for the webcam + 102c:6251. + * V4L/DVB (8669): gspca: Add white balance control for spca561 rev 012A. + * V4L/DVB (8671): gspca: Remove the unused field 'dev_name' of the device + structure. + * V4L/DVB (8672): gspca: Big rewrite of spca561. + * V4L/DVB (8673): gspca: Bad frame scanning again and bad init in + pac7311. + * V4L/DVB (8674): gspca: Webcam 0c45:612e added in sonixj. + * V4L/DVB (8675): gspca: Pixmap PJPG (Pixart 73xx JPEG) added, generated + by pac7311. + * V4L/DVB (8678): Remove the dead CONFIG_RADIO_MIROPCM20{,_RDS} code + * V4L/DVB (8681): v4l2-ioctl.c: fix warning + * V4L/DVB (8682): V4L: fix return value of register video func + * V4L/DVB (8701): cx18: Add missing lock for when the irq handler + manipulates the queues + * V4L/DVB (8703): gspca: Do controls work for spca561 revision 12a. + * V4L/DVB (8705): gspca: Adjust some control limits in spca561. + * V4L/DVB (8706): Make contrast and brightness work for pac7302. + * V4L/DVB (8707): gspca: Colors, hflip and vflip controls added for + pac7302. + * V4L/DVB (8709): gspca: Fix initialization and controls of sn9x110 - + ov7630. + * V4L/DVB (8710): gspca: Bad color control in sonixj. + * V4L/DVB (8711): gspca: Bad controls and quantization table of pac7311. + * V4L/DVB (8712): gspca: Bad start of sonixj webcams since changeset + a8779025e7e8. + * V4L/DVB (8713): gspca: Bad color control again in sonixj. + * V4L/DVB (8714): gspca: Bad start of sn9c110 and sensor om6802. + * V4L/DVB (8715): gspca: Change the name of some webcam in the gspca doc. + * V4L/DVB (8716): gspca: Bad start of sn9c110 and sensor ov7630. + * V4L/DVB (8717): gspca: Frame buffer too small for small resolutions + (sonixj and t613). + * V4L/DVB (8718): gspca: suspend/resume added. + * V4L/DVB (8719): gspca: Have VIDIOC_QUERYCTRL more compliant to the + spec. + * V4L/DVB (8720): gspca: V4L2_CAP_SENSOR_UPSIDE_DOWN added as a cap for + some webcams. + * V4L/DVB (8722): sms1xxx: fix typo in license header + * V4L/DVB (8726): link tuner before saa7134 + * V4L/DVB (8727): V4L1: make PMS not autoprobe when builtin. + * V4L/DVB (8728): 1-make-pms-not-autoprobe-when-builtin update + * V4L/DVB (8749): Fix error code, when camera is not turned on by sonypi + * V4L/DVB (8750): V4L: check inval in video_register_device_index() + * V4L/DVB (8751): vivi: Fix some issues at vivi register routine + * V4L/DVB (8757): v4l-dvb: fix a bunch of sparse warnings + * V4L/DVB (8769): cx18: Simplify queue flush logic to prevent oops in + cx18_flush_queues() + * V4L/DVB (8778): radio: fix incorrect video_register_device result check + * V4L/DVB (8779): v4l: fix more incorrect video_register_device result + checks + * V4L/DVB (8790): saa7115: call i2c_set_clientdata only when state != + NULL + * V4L/DVB (8803): s5h1409: Enable QAM_AUTO mode + * V4L/DVB (8804): s5h1411: Enable QAM_AUTO mode + * V4L/DVB (8805): Steven Toth email address change + * V4L/DVB (8809): gspca: Revert commit + 9a9335776548d01525141c6e8f0c12e86bbde982 + * V4L/DVB (8810): gspca: Compile error when CONFIG_PM not defined. + * V4L/DVB (8812): gspca: Do pac73xx webcams work. + * V4L/DVB (8813): gspca: Adjust SOF detection for pac73xx. + * V4L/DVB (8814): gspca: Set DISABLED the disabled controls at query + control time. + * V4L/DVB (8815): gspca: Fix problems with disabled controls. + * V4L/DVB (8816): gspca: Set disabled ctrls and fix a register pb with + ovxxxx in sonixb. + * V4L/DVB (8817): gspca: LED and proble changes in sonixb. + * V4L/DVB (8818): gspca: Reinitialize the device on resume. + * V4L/DVB (8819): gspca: Initialize the ov519 at open time and source + cleanup. + * V4L/DVB (8820): gspca: Change initialization and gamma of zc3xx - + pas106. + * V4L/DVB (8822): gspca: Change some subdriver functions for + suspend/resume. + * V4L/DVB (8823): gspca: H and V flips work for ov7670 only in ov519. + * V4L/DVB (8824): gspca: Too much code removed in the suspend/resume + changeset. + * V4L/DVB (8825): gspca: More controls for pac73xx and new webcam + 093a:2624. + * V4L/DVB (8826): gspca: Webcam Labtec 2200 (093a:2626) added in pac7311. + * V4L/DVB (8827): gspca: Stop pac7302 autogain oscillation. + * V4L/DVB (8828): gspca: Set the clock at the end of initialization in + sonixj. + * V4L/DVB (8829): gspca: Have a clean kmalloc-ated buffer for USB + exchanges. + * V4L/DVB (8830): gspca: Move some probe code to the new init function. + * V4L/DVB (8831): gspca: Resolve webcam conflicts between some drivers. + * V4L/DVB (8832): gspca: Bad pixelformat of vc0321 webcams. + * V4L/DVB (8833): gspca: Cleanup the sonixb code. + * V4L/DVB (8834): gspca: Have a bigger buffer for sn9c10x compressed + images. + * V4L/DVB (8835): gspca: Same pixfmt as the sn9c102 driver and raw Bayer + added in sonixb. + * V4L/DVB (8837): dvb: fix I2C adapters name size + * V4L/DVB (8839): dib0700: add comment to identify 35th USB id pair + * V4L/DVB (8840): dib0700: add basic support for Hauppauge Nova-TD-500 + (84xxx) + * V4L/DVB (8842): vivi_release(): fix use-after-free + * V4L/DVB (8843): tda10048_firmware_upload(): fix a memory leak + * V4L/DVB (8844): dabusb_fpga_download(): fix a memory leak + * bnx2x: Accessing un-mapped page + * SELinux: memory leak in security_context_to_sid_core + * x86: add io delay quirk for Presario F700 + * mmap: fix petty bug in anonymous shared mmap offset handling + * x86: Change warning message in TSC calibration. + * PCI: fix pbus_size_mem() resource alignment for CardBus controllers + * [ARM] omap: fix build error in ohci-omap.c + * [ARM] remove unused #include + * ACPI: Make Len Brown the ACPI maintainer again + * fujitsu-laptop: fix regression for P8010 in 2.6.27-rc + * ACPI: Avoid bogus timeout about SMbus check + * acer-wmi: remove debugfs entries upon unloading + * forgotten refcount on sysctl root table + * V4L/DVB (8868): gspca: Support for vga modes with sif sensors in + sonixb. + * V4L/DVB (8869): gspca: Move the Sonix webcams with TAS5110C1B from + sn9c102 to gspca. + * V4L/DVB (8870): gspca: Fix dark room problem with sonixb. + * V4L/DVB (8872): gspca: Bad image format and offset with rev072a of + spca561. + * V4L/DVB (8873): gspca: Bad image offset with rev012a of spca561 and + adjust exposure. + * V4L/DVB (8874): gspca: Adjust hstart for sn9c103/ov7630 and update + usb-id's. + * [ARM] omap: fix virtual vs physical address space confusions + * V4L/DVB (8876): budget: udelay changed to mdelay + * V4L/DVB (8877): b2c2 and bt8xx: udelay to mdelay + * V4L/DVB (8880): PATCH: Fix parents on some webcam drivers + * V4L/DVB (8881): gspca: After 'while (retry--) {...}', retry will be -1 + but not 0. + * powerpc/spufs: Fix multiple get_spu_context() + * powerpc/spufs: Fix race for a free SPU + * Input: bcm5974 - small formatting cleanup + * Input: bcm5974 - improve finger tracking and counting + * Input: bcm5974 - add BTN_TOUCH event for mousedev benefit + * Input: i8042 - make Lenovo 3000 N100 blacklist entry more specific + * sh: resume_kernel fix for kernel oops built with CONFIG_BKL_PREEMPT=y. + * sh64: resume_kernel fix for kernel oops built with + CONFIG_BKL_PREEMPT=y. + * i2c: fix i2c-sh_mobile timing issues + * clockevents: prevent clockevent event_handler ending up handler_noop + * clockevents: prevent endless loop in periodic broadcast handler + * clockevents: enforce reprogram in oneshot setup + * clockevents: prevent multiple init/shutdown + * clockevents: prevent endless loop lockup + * HPET: make minimum reprogramming delta useful + * [MTD] [NAND] tmio_nand: fix base address programming + * Fix conditional export of kvh.h and a.out.h to userspace. + * async_tx: fix the bug in async_tx_run_dependencies + * sched_clock: fix NOHZ interaction + * sched: fix process time monotonicity + * UBIFS: fix division by zero + * UBIFS: make minimum fanout 3 + * [MIPS] Fix data bus error recovery + * [MIPS] Fix WARNING: at kernel/smp.c:290 + * [MIPS] TXx9: Fix txx9_pcode initialization + * [MIPS] TX39xx: Add missing local_flush_icache_range initialization + * [MIPS] Probe initrd header only if explicitly specified + * res_counter: fix off-by-one bug in setting limit + * forcedeth: fix kexec regression + * atmel_lcdfb: fix oops in rmmod when framebuffer fails to register + * tracehook: comment pasto fixes + * drivers/mmc/card/block.c: fix refcount leak in mmc_block_open() + * x86: boot: stub out unimplemented CPU feature words + * x86: add NOPL as a synthetic CPU feature bit + * x86: use X86_FEATURE_NOPL in alternatives + * clockevents: broadcast fixup possible waiters + * x86: HPET fix moronic 32/64bit thinko + * x86: HPET: read back compare register before reading counter + * Fix CONFIG_AC97_BUS dependency + * [ARM] 5241/1: provide ioremap_wc() + * ntp: fix calculation of the next jiffie to trigger RTC sync + * clocksource, acpi_pm.c: use proper read function also in errata mode + * clocksource, acpi_pm.c: check for monotonicity + * x86: delay early cpu initialization until cpuid is done + * x86: move mtrr cpu cap setting early in early_init_xxxx + * sched: arch_reinit_sched_domains() must destroy domains to force + rebuild + * x86, xen: Use native_pte_flags instead of native_pte_val for .pte_flags + * x86: pda_init(): fix memory leak when using CPU hotplug + * x86: cpu_init(): fix memory leak when using CPU hotplug + * powerpc/spufs: Fix possible scheduling of a context to multiple SPEs + * netfilter: nf_conntrack_sip: de-static helper pointers + * netfilter: nf_conntrack_gre: more locking around keymap list + * netfilter: nf_conntrack_gre: nf_ct_gre_keymap_flush() fixlet + * netfilter: nf_conntrack_irc: make sure string is terminated before + calling simple_strtoul + * pkt_sched: Fix qdisc state in net_tx_action() + * powerpc: Fix rare boot build breakage + * ahci, pata_marvell: play nicely together + * sata_mv: add RocketRaid 1720 PCI ID to driver + * ahci: disable PMP for marvell ahcis + * sata_nv: disable hardreset for generic + * libata-sff: kill spurious WARN_ON() in ata_hsm_move() + * pata_sil680: remove duplicate pcim_enable_device + * ahci: RAID mode SATA patch for Intel Ibex Peak DeviceIDs + * [MIPS] IP22: Fix detection of second HPC3 on Challenge S + * xen: fix 2.6.27-rc5 xen balloon driver warnings + * x86: disable static NOPLs on 32 bits + * netns : fix kernel panic in timewait socket destruction + * bridge: don't allow setting hello time to zero + * NFS: Restore missing hunk in NFS mount option parser + * usb: fix null deferences in low level usb serial + * Fix format of MAINTAINERS + * sparc64: Disable timer interrupts in fixup_irqs(). + * [Bluetooth] Fix reference counting during ACL config stage + * [Bluetooth] Enforce correct authentication requirements + * [Bluetooth] Reject L2CAP connections on an insecure ACL link + * [S390] CVE-2008-1514: prevent ptrace padding area read/write in 31-bit + mode + * [S390] cio: Correct cleanup on error. + * [S390] cio: handle ssch() return codes correctly. + * [S390] cio: allow offline processing for disconnected devices + * ipsec: Restore larval states and socket policies in dump + * update Documentation/filesystems/Locking for 2.6.27 changes + * MAINTAINERS: add Atheros maintainer for atlx + * lib: Correct printk %pF to work on all architectures + * x86: fix memmap=exactmap boot argument + * clockevents: remove WARN_ON which was used to gather information + * ipv6: Fix OOPS in ip6_dst_lookup_tail(). + * Linux 2.6.27-rc6 + + -- Ben Collins Tue, 02 Sep 2008 12:45:56 -0400 + +linux (2.6.27-2.3) intrepid; urgency=low + + [ Ben Collins ] + + * build/retag: Make script save .orig of tags for later use + * ubuntu/lirc: Fix device_create call + * build/firmware: Put in-kernel firmware into version specific subdir + - LP: #262115 + * Rebase on linux-2.6 git. + * ABI bump + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: (no-up) Apparmor warning fixes + + [ John Johansen ] + + * SAUCE: (no-up) Proper AppArmor ptrace updates for newer lsm API + + [ Mackenzie Morgan ] + + * SAUCE: Add quirk for ASUS Z37E to make sound audible after resume + - LP: #25896 + + -- Ben Collins Wed, 27 Aug 2008 14:03:05 -0400 + +linux (2.6.27-1.2) intrepid; urgency=low + + [ Amit Kucheria ] + + * SAUCE: make fc transport removal of target configurable + * SAUCE: pm: Config option to disable handling of console during + suspend/resume + + [ Ben Collins ] + + * SAUCE: Lower warning level of some PCI messages + * SAUCE: input/mouse/alps: Do not call psmouse_reset() for alps + * SAUCE: tulip: Let dmfe handle davicom on non-sparc + * SAUCE: tulip: Define ULI PCI ID's + * SAUCE: (no-up) version: Implement version_signature proc file. + * SAUCE: (no-up) connector.h: Add idx/val for drbd + * SAUCE: (no-up) swap: Add notify_swap_entry_free callback for compcache + * SAUCE: drivers: Remove some duplicate device entries in various modules + * SAUCE: (no-up) [AppArmor] merge with upstream subversion r1291 + * SAUCE: apparmor: Update for changes to ptrace lsm hooks + * SAUCE: (no-up) Enable ubuntu extra subdirectory + * SAUCE: applesmc: Add MacBookAir + * SAUCE: (no-up) ACPI: initramfs DSDT override support + * ubuntu: Add drbd module + * ubuntu: Add iscsitarget module + * ubuntu: Add BOM for iscsitarget + * ubuntu: Add squashfs driver + * SAUCE: (no-up) Check for squashfs superblock in initramfs mounting. + * ubuntu: Add aufs module + * ubuntu: Added atl2 driver + * ubuntu: Added et131x driver + * ubuntu: Add dm-raid4-5 driver + * ubuntu: Add ndiswrapper driver + * ubuntu: Added ram backed compressed swap module (compcache) + * ubuntu: Add misc drivers from hardy lum + * ubuntu: Add heci driver 3.2.0.24 + * ubuntu: Add ov511 and bt-sco drivers + * ubuntu: Add acx, prism2_usb wireless drivers + * ubuntu: Add at76 driver to build + * ubuntu: Add fsam7400 sw kill switch driver + * ubuntu: Added qc-usb driver + * ubuntu: e1000e: Upgraded module to 0.4.1.7 + * ubuntu: Added rfkill drivers + * ubuntu: VIA - Add VIA DRM Chrome9 3D engine + * ubuntu: unionfs: Added v1.4 module from hardy + * ubuntu: Add LIRC driver + * ubuntu: Add GFS driver + * ubuntu: New tlsup driver for toshiba laptops + * Update config files + * build/d-i: Remove obsolete dm modules + + [ Chuck Short ] + + * SAUCE: ata: blacklist FUJITSU MHW2160BH PL + + [ Colin Ian King ] + + * ubuntu: Add dm-loop + * SAUCE: Enable speedstep for sonoma processors. + + [ Dennis Noordsij ] + + * SAUCE: Work around ACPI corruption upon suspend on some Dell machines. + + [ Fabio M. Di Nitto ] + + * SAUCE: Export gfs2 symbols required for gfs1 kernel module + + [ Matthew Garrett ] + + * SAUCE: hostap: send events on data interface as well as master + interface + + [ Michael Frey (Senior Manager, MID ] + + * SAUCE: Send HCI_RESET for Broadcomm 2046 + + [ Phillip Lougher ] + + * SAUCE: r8169: disable TSO by default for RTL8111/8168B chipsets. + + [ Stefan Bader ] + + * SAUCE: (no-up) Export dm_disk function of device-mapper + * SAUCE: Restore VT fonts on switch + * SAUCE: mmc: Increase power_up deleay to fix TI readers + + [ Tim Gardner ] + + * SAUCE: Add extra headers to linux-libc-dev + * SAUCE: Catch nonsense keycodes and silently ignore + * SAUCE: Added support for HDAPS on various ThinkPads from Lenovo and IBM + * SAUCE: Guest OS does not recognize a lun with non zero target id on + Vmware ESX Server + * SAUCE: (no-up) Take care of orinoco_cs overlap with hostap_cs + * ubuntu: Add GNBD driver + + -- Ben Collins Sat, 23 Aug 2008 15:48:35 -0400 + +linux (2.6.27-0.0) intrepid; urgency=low + + * Not uploaded, placeholder for new release + + -- Ben Collins Sat, 23 Aug 2008 15:48:35 -0400 + +linux (2.6.26-5.17) intrepid; urgency=low + + [ Ben Collins ] + + * build/abi: Add tosh_smm symbol to blacklist + + -- Ben Collins Fri, 15 Aug 2008 09:29:34 -0400 + +linux (2.6.26-5.16) intrepid; urgency=low + + [ Ben Collins ] + + * Revert "SAUCE: toshiba_acpi: Rewrote most of the proc entry bits." + * Revert "SAUCE: Update toshiba_acpi.c to version 0.19a" + * build/config: Disable in-kernel toshiba driver(s) + * ubuntu/tlsup: New driver for toshiba laptops + * build/config: Enable TLSUP driver + * SAUCE: e1000e: Fix E1000E_ENABLED logic to check for our E1000E_NEW + driver as well + * ubuntu/e1000e: Remove E1000E_ENABLED option in local config + * build/config: Update configs to have E1000E_ENABLED set + * ubuntu/prism2: Remove duplicate device + + [ Fabio M. Di Nitto ] + + * SAUCE: Export gfs2 symbols required for gfs1 kernel module + + [ Stefan Bader ] + + * SAUCE: x86: HPET rework for SB700 + - LP: #255910 + + [ Tim Gardner ] + + * Add GNBD driver + * Enable GNBD driver + * SAUCE: Add GFS driver + * SAUCE: Enable gfs driver configs + * b43: Linksys WMP54G (BCM4306/3) card in a PCI format has an SPROM + coding + + [ Upstream Kernel Changes ] + + * KVM: x86 emulator: emulate clflush + * USB: quirk PLL power down mode + + -- Ben Collins Mon, 11 Aug 2008 13:19:28 -0400 + +linux (2.6.26-5.15) intrepid; urgency=low + + [ Ben Collins ] + + * Revert "SAUCE: Add blacklist support to fix Belkin bluetooth dongle." + - Superceded by upstream changes. + * build/config: New option enabled for uvcvideo + * build/control: Add Vcs-Git meta data to control file + * SAUCE: toshiba_acpi: Rewrote most of the new code + * abi/perm-blacklist: Add emu10k1 driver to blacklist + + [ Upstream Kernel Changes ] + + * pxamci: trivial fix of DMA alignment register bit clearing + * udplite: Protection against coverage value wrap-around + * ipv6: use timer pending + * ipv6: __KERNEL__ ifdef struct ipv6_devconf + * hdlcdrv: Fix CRC calculation. + * quota: fix possible infinite loop in quota code + * isofs: fix minor filesystem corruption + * KVM: VMX: Fix a wrong usage of vmcs_config + * KVM: SVM: fix suspend/resume support + * KVM: mmu_shrink: kvm_mmu_zap_page requires slots_lock to be held + * KVM: VMX: Add ept_sync_context in flush_tlb + * KVM: x86 emulator: Fix HLT instruction + * KVM: MMU: nuke shadowed pgtable pages and ptes on memslot destruction + * KVM: MMU: Fix potential race setting upper shadow ptes on nonpae hosts + * Patch Upstream: x86 ptrace: fix PTRACE_GETFPXREGS error + * rcu: fix rcu_try_flip_waitack_needed() to prevent grace-period stall + * Fix typos from signal_32/64.h merge + * x86 reboot quirks: add Dell Precision WorkStation T5400 + * USB: fix usb serial pm counter decrement for disconnected interfaces + * x86, suspend, acpi: enter Big Real Mode + * markers: fix duplicate modpost entry + * Fix build on COMPAT platforms when CONFIG_EPOLL is disabled + * proc: fix /proc/*/pagemap some more + * cpusets: fix wrong domain attr updates + * x86: fix crash due to missing debugctlmsr on AMD K6-3 + * ide-cd: fix oops when using growisofs + * rtc-at91rm9200: avoid spurious irqs + * vmlinux.lds: move __attribute__((__cold__)) functions back into final + .text section + * ARM: fix fls() for 64-bit arguments + * tcp: Clear probes_out more aggressively in tcp_ack(). + * sparc64: Fix lockdep issues in LDC protocol layer. + * sparc64: Fix cpufreq notifier registry. + * sparc64: Do not define BIO_VMERGE_BOUNDARY. + * iop-adma: fix platform driver hotplug/coldplug + * myri10ge: do not forget to setup the single slice pointers + * myri10ge: do not use mgp->max_intr_slots before loading the firmware + * ALSA: trident - pause s/pdif output + * V4L: cx18: Upgrade to newer firmware & update documentation + * DVB: dib0700: add support for Hauppauge Nova-TD Stick 52009 + * V4L: uvcvideo: Fix a buffer overflow in format descriptor parsing + * V4L: uvcvideo: Use GFP_NOIO when allocating memory during resume + * V4L: uvcvideo: Don't free URB buffers on suspend + * V4L: uvcvideo: Make input device support optional + * V4L: uvcvideo: Add support for Medion Akoya Mini E1210 integrated + webcam + * V4L: saa7134: Copy tuner data earlier to avoid overwriting manual tuner + type + * V4L: cx23885: Bugfix for concurrent use of /dev/video0 and /dev/video1 + * DVB: cx23885: Ensure PAD_CTRL is always reset to a sensible default + * DVB: cx23885: DVB Transport cards using DVB port VIDB/TS1 did not + stream + * DVB: cx23885: Reallocated the sram to avoid concurrent VIDB/C issues + * DVB: cx23885: SRAM changes for the 885 and 887 silicon parts + * x86: fix kernel_physical_mapping_init() for large x86 systems + * eCryptfs: use page_alloc not kmalloc to get a page of memory + * UML - Fix boot crash + * ixgbe: remove device ID for unsupported device + * mpc52xx_psc_spi: fix block transfer + * tmpfs: fix kernel BUG in shmem_delete_inode + * markers: fix markers read barrier for multiple probes + * VFS: increase pseudo-filesystem block size to PAGE_SIZE + * cpufreq acpi: only call _PPC after cpufreq ACPI init funcs got called + already + * b43legacy: Release mutex in error handling code + * ath5k: don't enable MSI, we cannot handle it yet + * Fix off-by-one error in iov_iter_advance() + * Linux 2.6.26.1 + * ftrace: remove unneeded documentation + * romfs_readpage: don't report errors for pages beyond i_size + * netfilter: nf_nat_sip: c= is optional for session + * SCSI: bsg: fix bsg_mutex hang with device removal + * x86: idle process - add checking for NULL early param + * x86: io delay - add checking for NULL early param + * Close race in md_probe + * Kprobe smoke test lockdep warning + * netfilter: xt_time: fix time's time_mt()'s use of do_div() + * linear: correct disk numbering error check + * SCSI: ch: fix ch_remove oops + * NFS: Ensure we zap only the access and acl caches when setting new acls + * jbd: fix race between free buffer and commit transaction + * Input: i8042 - add Intel D845PESV to nopnp list + * Input: i8042 - add Gericom Bellagio to nomux blacklist + * Input: i8042 - add Acer Aspire 1360 to nomux blacklist + * Bluetooth: Signal user-space for HIDP and BNEP socket errors + * Add compat handler for PTRACE_GETSIGINFO + * ALSA: hda - Fix wrong volumes in AD1988 auto-probe mode + * ALSA: hda - Fix DMA position inaccuracy + * ALSA: hda - Add missing Thinkpad Z60m support + * ALSA: emu10k1 - Fix inverted Analog/Digital mixer switch on Audigy2 + * vfs: fix lookup on deleted directory + * Ath5k: fix memory corruption + * Ath5k: kill tasklets on shutdown + * sound: ensure device number is valid in snd_seq_oss_synth_make_info + * Linux 2.6.26.2 + + -- Ben Collins Sun, 03 Aug 2008 13:25:02 -0400 + +linux (2.6.26-5.14) intrepid; urgency=low + + [ Ben Collins ] + + * SAUCE: applesmc: Add MacBookAir + * build: Do not build ddeb unless we are on the buildd + * build: control: Consistency in arch fields. + * SAUCE: Update toshiba_acpi.c to version 0.19a + - LP: #77026 + * build: Added perm blacklist support and per-module support to abi-check + - Blacklist p80211 module from abi checks + * ubuntu/lirc: Get rid of drivers symlink and use real include stuff + + + [ Colin Ian King ] + + * SAUCE: acerhk module - add support for Amilo A1650g keyboard + - LP: #84159 + * SAUCE: rt2x00: Fix OOPS on failed creation of rt2x00lib workqueue + - LP: #249242 + + [ Mario Limonciello ] + + * Add LIRC back in + + [ Tim Gardner ] + + * Makefile race condition can lead to ndiswrapper build failure + - LP: #241547 + * update linux-wlan-ng (prism2_usb) to upstream version 1861 + - LP: #245026 + + [ Upstream Kernel Changes ] + + * Fix typos from signal_32/64.h merge + + -- Ben Collins Fri, 01 Aug 2008 00:05:01 -0400 + +linux (2.6.26-5.13) intrepid; urgency=low + + [ Ben Collins ] + + * build: Make makedumpfile an amd64/i386 only build-dep + * ubuntu/acerhk: Fixup assembly to compile with newer binutils + + -- Ben Collins Sat, 26 Jul 2008 16:41:50 -0400 + +linux (2.6.26-4.12) intrepid; urgency=low + + [ Ben Collins ] + + * e1000e: Upgraded module to 0.4.1.7 upstream. Placed in ubuntu/, + in-kernel driver disabled + * config: Disable e1000e in-kernel, and enable newer driver in ubuntu/ + * rfkill: Update to 1.3 drivers, and move to common location + * ubuntu: Actually link kconfig/kbuild into rfkill subdir + * config: Enable loading dsdt from initramfs + - LP: #246222 + * ubuntu: [compcache] Update to fix crashes in improper BUG() + * build: Create a retag scripts to recover tags from rebases + * build: Updates for dbg pkg + * build: Make sure no empty lines show up in debian/files + * ubuntu: atl1e: Add new driver from 2.6.27-pre-rc1 + - LP: #243894 + * sys_getcwd: Fix some brokeness introduced by AppArmor __d_path + changes + - LP: #251223 + * ubuntu: unionfs: Added v1.4 module from hardy + * build: Add sub-flavour infrastructure, and virtual subflav + + [ Eric Piel ] + + * ACPI: Allow custom DSDT tables to be loaded from initramfs + + [ Kees Cook ] + + * AppArmor: Smack VFS patches + + [ Mario Limonciello ] + + * Work around ACPI corruption upon suspend on some Dell machines. + - LP: #183033 + + [ Tim Gardner ] + + * Export usbhid_modify_dquirk for LBM module bcm5974 + - LP: #250838 + * VIA - Add VIA DRM Chrome9 3D engine + - LP: #251862 + * Define TRUE/FALSE for VIA DRM driver. + + -- Ben Collins Tue, 15 Jul 2008 12:51:39 -0400 + +linux (2.6.26-4.11) intrepid; urgency=low + + [ Ben Collins ] + + * config: Enable bcm5974 driver in all configs + + [ 2.6.26-4.10 ] + + [ Amit Kucheria ] + + * Fix typo in GSPCA Makefile and make it compile + + [ Ben Collins ] + + * ubuntu: Remove UVC driver in favor of in-kernel one (-rc9) + * config: Updates for -rc9 + * ubuntu: Add acx, prism2_usb wireless drivers + * config: Enable prism2_usb and acx drivers. + * ubuntu: Add at76 driver to build + * config: Enable at76_usb driver. + * iscsitarget: Fix prototype for bi_end_io callback. + * acx: Fix section type mismatch warnings + * fsam7400: Add sw kill switch driver + * config: Enable fsam7400 driver + * qc-usb: Added new driver + * config: Enable qc-usb driver + * drbd: Remove built-in connector usage + * drbd: Do not define idx/val for connector here + * connector.h: Add idx/val for drbd + * bcm5974: Added new driver + + [ Kees Cook ] + + * SAUCE: [AppArmor] merge with upstream subversion r1291 + * SAUCE: [AppArmor] fix typo in selinux_inode_link + * SAUCE: [AppArmor] aufs patches + + [ Michael Frey (Senior Manager, MID ] + + * SAUCE: Send HCI_RESET for Broadcomm 2046 + - LP: #241749 + + [ Tim Gardner ] + + * SAUCE: Medion Akoya Mini E1210 + + [ Upstream Kernel Changes ] + + * Revert "BAST: Remove old IDE driver" + * ARM: OMAP: DMA: Don't mark channel active in omap_enable_channel_irq + * ARM: OMAP: Correcting the gpmc prefetch control register address + * debugobjects: fix lockdep warning + * [ARM] 5115/1: pxafb: fix ifdef for command line option handling + * [ARM] 5116/1: pxafb: cleanup and fix order of failure handling + * [ARM] 5109/1: Mark rtc sa1100 driver as wakeup source before + registering it + * [ARM] Export dma_sync_sg_for_device() + * fix cgroup-inflicted breakage in block_dev.c + * [patch for 2.6.26 2/4] vfs: utimensat(): be consistent with utime() for + immutable and append-only files + * [patch for 2.6.26 1/4] vfs: utimensat(): ignore tv_sec if tv_nsec == + UTIME_OMIT or UTIME_NOW + * [patch for 2.6.26 3/4] vfs: utimensat(): fix error checking for + {UTIME_NOW,UTIME_OMIT} case + * [patch for 2.6.26 4/4] vfs: utimensat(): fix write access check for + futimens() + * [patch 1/4] vfs: path_{get,put}() cleanups + * [patch 2/4] fs: make struct file arg to d_path const + * [patch 3/4] vfs: fix ERR_PTR abuse in generic_readlink + * [patch 4/4] flock: remove unused fields from file_lock_operations + * [patch 3/3] vfs: make d_path() consistent across mount operations + * [patch 1/3] vfs: dcache sparse fixes + * [patch 2/3] vfs: dcache cleanups + * udf: Fix regression in UDF anchor block detection + * [SCSI] ses: Fix timeout + * netfilter: ip6table_mangle: don't reroute in LOCAL_IN + * [SCSI] esp: Fix OOPS in esp_reset_cleanup(). + * kernel/audit.c: nlh->nlmsg_type is gotten more than once + * audit: fix kernel-doc parameter notation + * remove useless argument type in audit_filter_user() + * Blackfin arch: fix bug - kernel boot fails when Spinlock and rw-lock + debugging enabled + * Blackfin arch: fix up section mismatch warning + * mac80211: implement EU regulatory domain + * b43: Do not return TX_BUSY from op_tx + * b43legacy: Do not return TX_BUSY from op_tx + * b43: Fix possible MMIO access while device is down + * b43legacy: Fix possible NULL pointer dereference in DMA code + * rt2x00: Fix unbalanced mutex locking + * iwlwifi: improve scanning band selection management + * [SCSI] esp: tidy up target reference counting + * [ARM] 5117/1: pxafb: fix __devinit/exit annotations + * thermal: Create CONFIG_THERMAL_HWMON=n + * ACPI: don't walk tables if ACPI was disabled + * dock: bay: Don't call acpi_walk_namespace() when ACPI is disabled. + * x86: shift bits the right way in native_read_tscp + * x86: section/warning fixes + * V4L/DVB (8004): Fix INPUT dependency at budget-ci + * V4L/DVB (8005): Fix OOPS if frontend is null + * V4L/DVB (8007): cx18/cx25840: the S-Video LUMA input can use all + In1-In8 inputs + * V4L/DVB (8008): cx18: remove duplicate audio and video input enums + * V4L/DVB (8010): em28xx: Properly register extensions for already + attached devices + * V4L/DVB (8011): em28xx: enable DVB for HVR-900 + * V4L/DVB (8012): gl861: sleep a little to avoid I2C errors + * V4L/DVB (8013): gl861: remove useless identify_state + * V4L/DVB (8015): gl861: replace non critical msleep(0) with msleep(1) to + be on the safe side + * V4L/DVB (8017): Ensure em28xx extensions only get run against devs that + support them + * V4L/DVB (8018): Add em2860 chip ID + * V4L/DVB (8020): Fix callbacks functions of saa7134_empress + * V4L/DVB (8022): saa7134: fix race between opening and closing the + device + * V4L/DVB (8026): Avoids an OOPS if dev struct can't be successfully + recovered + * V4L/DVB (8027): saa7134: Avermedia A700: only s-video and composite + input are working + * V4L/DVB (8028): Improve error messages for tda1004x attach + * V4L/DVB (8029): Improve error message at tda1004x_attach + * V4L/DVB (8034): tda18271: fix IF notch frequency handling + * V4L/DVB (8035): tda18271: dont touch EB14 if rf_cal lookup is out of + range + * V4L/DVB (8036): tda18271: toggle rf agc speed mode on TDA18271HD/C2 + only + * V4L/DVB (8037): tda18271: ensure that the thermometer is off during + channel configuration + * V4L/DVB (8039): pxa-camera: fix platform_get_irq() error handling. + * V4L/DVB (8040): soc-camera: remove soc_camera_host_class class + * V4L/DVB (8042): DVB-USB UMT-010 channel scan oops + * V4L/DVB (8043): au0828: add support for additional USB device id's + * V4L/DVB (8044): au8522: tuning optimizations + * V4L/DVB (8048): saa7134: Fix entries for Avermedia A16d and Avermedia + E506 + * V4L/DVB (8061): cx18: only select tuner / frontend modules if + !DVB_FE_CUSTOMISE + * V4L/DVB (8063): cx18: Fix unintended auto configurations in + cx18-av-core + * V4L/DVB (8066): cx18: Fix audio mux input definitions for HVR-1600 Line + In 2 and FM radio + * V4L/DVB (8067): cx18: Fix firmware load for case when digital capture + happens first + * V4L/DVB (8068): cx18: Add I2C slave reset via GPIO upon initialization + * V4L/DVB (8069): cx18: Fix S-Video and Compsite inputs for the Yuan + MPC718 and enable card entry + * V4L/DVB (8071): tda10023: Fix possible kernel oops during + initialisation + * V4L/DVB (8073): av7110: Catch another type of ARM crash + * V4L/DVB (8074): av7110: OSD transfers should not be interrupted + * V4L/DVB (8075): stv0299: Uncorrected block count and bit error rate + fixed + * V4L/DVB (8092): videodev: simplify and fix standard enumeration + * V4L/DVB (8096): au8522: prevent false-positive lock status + * V4L/DVB (8097): xc5000: check device hardware state to determine if + firmware download is needed + * V4L/DVB (8100): V4L/vivi: fix possible memory leak in vivi_fillbuff + * V4L/DVB (8108): Fix open/close race in saa7134 + * s2io: fix documentation about intr_type + * tc35815: Mark carrier-off before starting PHY + * tc35815: Fix receiver hangup on Rx FIFO overflow + * ixgbe: fix EEH recovery during reset on PPC + * igb: fix EEH recovery during reset on PPC + * e1000e: fix EEH recovery during reset on PPC + * pcnet_cs, axnet_cs: clear bogus interrupt before request_irq + * drivers/net/r6040.c: Eliminate double sizeof + * ipg: fix jumbo frame compilation + * ipg: use NULL, not zero, for pointers + * [netdrvr] 3c59x: remove irqs_disabled warning from local_bh_enable + * [netdrvr] netxen: fix netxen_pci_tbl[] breakage + * e100: Do pci_dma_sync after skb_alloc for proper operation on ixp4xx + * e1000: only enable TSO6 via ethtool when using correct hardware + * [netdrvr] Fix IOMMU overflow checking in s2io.c + * qla3xxx: Hold RTNL while calling dev_close() + * Hold RTNL while calling dev_close() + * sata_uli: hardreset is broken + * rt2x00: Fix lock dependency errror + * prism: islpci_eth.c endianness fix + * mac80211: fix an oops in several failure paths in key allocation + * firewire: fw-sbp2: fix parsing of logical unit directories + * kbuild: fix a.out.h export to userspace with O= build. + * Ensure interrupted recovery completed properly (v1 metadata plus + bitmap) + * Don't acknowlege that stripe-expand is complete until it really is. + * Fix error paths if md_probe fails. + * hamradio: remove unused variable + * tcp: calculate tcp_mem based on low memory instead of all memory + * tcp: fix for splice receive when used with software LRO + * af_unix: fix 'poll for write'/connected DGRAM sockets + * netdevice: Fix typo of dev_unicast_add() comment + * pkt_sched: ERR_PTR() ususally encodes an negative errno, not positive. + * pkt_sched: Remove CONFIG_NET_SCH_RR + * include/linux/netdevice.h: don't export MAX_HEADER to userspace + * tcp: /proc/net/tcp rto,ato values not scaled properly (v2) + * netlink: Fix some doc comments in net/netlink/attr.c + * CONNECTOR: add a proc entry to list connectors + * inet fragments: fix race between inet_frag_find and + inet_frag_secret_rebuild + * net/inet_lro: remove setting skb->ip_summed when not LRO-able + * netlabel: Fix a problem when dumping the default IPv6 static labels + * ipv6 route: Convert rt6_device_match() to use RT6_LOOKUP_F_xxx flags. + * sched: fix cpu hotplug + * Fix and clean top .gitignore + * x86: fix cpu hotplug crash + * ptrace GET/SET FPXREGS broken + * Input: add KEY_MEDIA_REPEAT definition + * Input: fix locking in force-feedback core + * [ARM] 5131/1: Annotate platform_secondary_init with trace_hardirqs_off + * ide: fix /proc/ide/ide?/mate reporting + * netfilter: nf_conntrack_tcp: fixing to check the lower bound of valid + ACK + * textsearch: fix Boyer-Moore text search bug + * hostap: don't report useless WDS frames by default + * hostap: fix sparse warnings + * mac80211: don't accept WEP keys other than WEP40 and WEP104 + * V4L/DVB (8145a): USB Video Class driver + * [IA64] Bugfix for system with 32 cpus + * [IA64] export account_system_vtime + * sched: fix divide error when trying to configure rt_period to zero + * x86: fix NODES_SHIFT Kconfig range + * block: Fix the starving writes bug in the anticipatory IO scheduler + * Properly notify block layer of sync writes + * rcu: fix hotplug vs rcu race + * I2C: S3C2410: Check ACK on byte transmission + * I2C: S3C2410: Fixup error codes returned rom a transfer. + * I2C: S3C2410: Add MODULE_ALIAS() for s3c2440 device. + * PCI: Restrict VPD read permission to root + * powerpc/bootwrapper: update for initrd with simpleImage + * i2c: Documentation: fix device matching description + * i2c: Fix bad hint about irqs in i2c.h + * powerpc/legacy_serial: Bail if reg-offset/shift properties are present + * powerpc/mpc5200: Fix lite5200b suspend/resume + * ipv4: fix sysctl documentation of time related values + * net-sched: change tcf_destroy_chain() to clear start of filter list + * net-sched: fix filter destruction in atm/hfsc qdisc destruction + * netlink: Unneeded local variable + * net: Tyop of sk_filter() comment + * netdevice: Fix wrong string handle in kernel command line parsing + * net: fib_rules: fix error code for unsupported families + * dm crypt: use cond_resched + * V4L/DVB (8178): uvc: Fix compilation breakage for the other drivers, if + uvc is selected + * PCI: Limit VPD read/write lengths for Broadcom 5706, 5708, 5709 rev. + * PCI: acpiphp: cleanup notify handler on all root bridges + * drivers/input/ff-core.c needs + * DRM/i915: only use tiled blits on 965+ + * tty: Fix inverted logic in send_break + * x86: fix Intel Mac booting with EFI + * arch/x86/mm/init_64.c: early_memtest(): fix types + * 9p: fix O_APPEND in legacy mode + * slub: Do not use 192 byte sized cache if minimum alignment is 128 byte + * Do not overwrite nr_zones on !NUMA when initialising zlcache_ptr + * [MIPS] IP32: Fix unexpected irq 71 + * [MIPS] IP22: Fix crashes due to wrong L1_CACHE_BYTES + * [MIPS] cevt-txx9: Reset timer counter on initialization + * hrtimer: prevent migration for raising softirq + * svcrpc: fix handling of garbage args + * OHCI: Fix problem if SM501 and another platform driver is selected + * USB: fix cdc-acm resume() + * USB: ehci - fix timer regression + * USB: ohci - record data toggle after unlink + * USB: mass storage: new id for US_SC_CYP_ATACB + * sisusbvga: Fix oops on disconnect. + * USB: New device ID for ftdi_sio driver + * USB: fix interrupt disabling for HCDs with shared interrupt handlers + * USB: don't lose disconnections during suspend + * USB: another option device id + * USB: add a pl2303 device id + * USB: fix Oops on loading ipaq module since 2.6.26 + * USB: adding comment for ipaq forcing number of ports + * [MIPS] Fix bug in atomic_sub_if_positive. + * xen: fix address truncation in pte mfn<->pfn conversion + * sata_sil24: add DID for another adaptec flavor + * ahci: always clear all bits in irq_stat + * libata-sff: improve HSM violation reporting + * sata_mv: safer logic for limit_warnings + * Update maintainers for powerpc + * Christoph has moved + * mm: dirty page accounting vs VM_MIXEDMAP + * rtc: rtc_read_alarm() handles wraparound + * firmware: fix the request_firmware() dummy + * serial: fix serial_match_port() for dynamic major tty-device numbers + * get_user_pages(): fix possible page leak on oom + * rtc-x1205: Fix alarm set + * rtc: fix CMOS time error after writing /proc/acpi/alarm + * pci: VT3336 can't do MSI either + * Miguel Ojeda has moved + * ext3: add missing unlock to error path in ext3_quota_write() + * ext4: add missing unlock to an error path in ext4_quota_write() + * reiserfs: add missing unlock to an error path in reiserfs_quota_write() + * ecryptfs: remove unnecessary mux from ecryptfs_init_ecryptfs_miscdev() + * lib: taint kernel in common report_bug() WARN path. + * gpio: pca953x (i2c) handles max7310 too + * fsl_diu_fb: fix build with CONFIG_PM=y, plus fix some warnings + * Update taskstats-struct document for scaled time accounting + * cciss: fix regression that no device nodes are created if no logical + drives are configured. + * delay accounting: maintainer update + * Doc*/kernel-parameters.txt: fix stale references + * hdaps: add support for various newer Lenovo thinkpads + * mn10300: export certain arch symbols required to build allmodconfig + * mn10300: provide __ucmpdi2() for MN10300 + * Introduce rculist.h + * man-pages is supported + * ntfs: update help text + * add kernel-doc for simple_read_from_buffer and memory_read_from_buffer + * w100fb: do not depend on SHARPSL + * w100fb: add 80 MHz modeline + * MFD maintainer + * cgroups: document the effect of attaching PID 0 to a cgroup + * spi: fix the read path in spidev + * doc: doc maintainers + * security: filesystem capabilities: fix fragile setuid fixup code + * security: filesystem capabilities: fix CAP_SETPCAP handling + * Alpha Linux kernel fails with inconsistent kallsyms data + * cpusets: document proc status cpus and mems allowed lists + * MAINTAINERS: update the email address of Andreas Dilger + * cciss: read config to obtain max outstanding commands per controller + * olpc: sdhci: add quirk for the Marvell CaFe's vdd/powerup issue + * olpc: sdhci: add quirk for the Marvell CaFe's interrupt timeout + * cpumask: introduce new APIs + * mm: switch node meminfo Active & Inactive pages to Kbytes + * Update MAINTAINERS file for the TPM device driver + * devcgroup: fix odd behaviour when writing 'a' to devices.allow + * doc: document the relax_domain_level kernel boot argument + * mmc: don't use DMA on newer ENE controllers + * mempolicy: mask off internal flags for userspace API + * x86 ACPI: normalize segment descriptor register on resume + * x86 ACPI: fix resume from suspend to RAM on uniprocessor x86-64 + * softlockup: print a module list on being stuck + * ide: fix hwif->gendev refcounting + * ide: ide_unregister() warm-plug bugfix + * ide: ide_unregister() locking bugfix + * ahci: give another shot at clearing all bits in irq_stat + * Fix clear_refs_write() use of struct mm_walk + * Move _RET_IP_ and _THIS_IP_ to include/linux/kernel.h + * Fix pagemap_read() use of struct mm_walk + * Linux 2.6.26-rc9 + * Revert "USB: don't explicitly reenable root-hub status interrupts" + * Revert "PCI: Correct last two HP entries in the bfsort whitelist" + * iwlwifi: fix incorrect 5GHz rates reported in monitor mode + * iwlwifi: drop skb silently for Tx request in monitor mode + * libertas: support USB persistence on suspend/resume (resend) + * tcp: net/ipv4/tcp.c needs linux/scatterlist.h + * tcp: fix a size_t < 0 comparison in tcp_read_sock + * bridge: fix use-after-free in br_cleanup_bridges() + * Add missing skb->dev assignment in Frame Relay RX code + * forcedeth: fix lockdep warning on ethtool -s + * ehea: fix might sleep problem + * ehea: add MODULE_DEVICE_TABLE + * ehea: fix race condition + * ehea: Access iph->tot_len with correct endianness + * pasemi_mac: Access iph->tot_len with correct endianness + * ibm_newemac: Fixes kernel crashes when speed of cable connected changes + * ibm_newemac: Fixes entry of short packets + * fs_enet: restore promiscuous and multicast settings in restart() + * can: add sanity checks + * x86: KVM guest: Add memory clobber to hypercalls + * KVM: IOAPIC: Fix level-triggered irq injection hang + * [SCSI] erase invalid data returned by device + * pxamci: fix byte aligned DMA transfers + * vsprintf: split out '%s' handling logic + * vsprintf: split out '%p' handling logic + * vsprintf: add infrastructure support for extended '%p' specifiers + * vsprintf: add support for '%pS' and '%pF' pointer formats + * powerpc: Fix unterminated of_device_id array in legacy_serial.c + * [UML] fix gcc ICEs and unresolved externs + * ocfs2/dlm: Fixes oops in dlm_new_lockres() + * hostap_cs: correct poor NULL checks in suspend/resume routines + * drivers/net/wireless/iwlwifi/iwl-3945.c Fix type issue on 64bit + * mac80211: move netif_carrier_on to after + ieee80211_bss_info_change_notify + * mac80211: Only flush workqueue when last interface was removed + * zd1211rw: add ID for AirTies WUS-201 + * ssb-pcicore: Fix IRQ-vector init on embedded devices + * mac80211: don't report selected IBSS when not found + * crypto: tcrypt - Fix memory leak in test_cipher + * sctp: Mark the tsn as received after all allocations finish + * [S390] protect _PAGE_SPECIAL bit against mprotect + * irda: via-ircc proper dma freeing + * irda: New device ID for nsc-ircc + * irda: Fix netlink error path return value + * [SCSI] mptspi: fix oops in mptspi_dv_renegotiate_work() + * Correct hash flushing from huge_ptep_set_wrprotect() + * ide: add __ide_default_irq() inline helper + * palm_bk3710: fix IDECLK period calculation + * it8213: fix return value in it8213_init_one() + * [MIPS] Atlas, decstation: Fix section mismatches triggered by + defconfigs + * [MIPS] Fix 32bit kernels on R4k with 128 byte cache line size + * NFS: Fix readdir cache invalidation + * SUNRPC: Fix a double-free in rpcbind + * SUNRPC: Fix an rpcbind breakage for the case of IPv6 lookups + * reiserfs: discard prealloc in reiserfs_delete_inode + * Fix broken fix for fsl-diu-db + * RDMA/cxgb3: Fix regression caused by class_device -> device conversion + * ipv6: fix race between ipv6_del_addr and DAD timer + * sctp: Add documentation for sctp sysctl variable + * kernel/printk.c: Made printk_recursion_bug_msg static. + * powerpc: Add missing reference to coherent_dma_mask + * rc80211_pid: Fix fast_start parameter handling + * rt2x00: Disable synchronization during initialization + * zd1211rw: stop beacons on remove_interface + * libertas: fix memory alignment problems on the blackfin + * netfilter: nf_conntrack_tcp: fix endless loop + * netfilter: nf_nat_snmp_basic: fix a range check in NAT for SNMP + * md: ensure all blocks are uptodate or locked when syncing + * sched: fix cpu hotplug + * x86: fix /dev/mem compatibility under PAT + * crypto: chainiv - Invoke completion function + * ocfs2: Fix flags in ocfs2_file_lock + * kernel/kprobes.c: Made kprobe_blacklist static. + * arch/x86/kernel/.gitignore: Added vmlinux.lds to .gitignore file + because it shouldn't be tracked. + * ftrace: Documentation + * Fix PREEMPT_RCU without HOTPLUG_CPU + * sched: fix cpu hotplug, cleanup + * exec: fix stack excutability without PT_GNU_STACK + * slub: Fix use-after-preempt of per-CPU data structure + * Documentation: clarify tcp_{r,w}mem sysctl docs + * ip: sysctl documentation cleanup + * tcp: correct kcalloc usage + * ipv4: fib_trie: Fix lookup error return + * netlabel: netlink_unicast calls kfree_skb on error path by itself + * ipv6: missed namespace context in ipv6_rthdr_rcv + * xfrm: Add a XFRM_STATE_AF_UNSPEC flag to xfrm_usersa_info + * tun: Persistent devices can get stuck in xoff state + * tpm: add Intel TPM TIS device HID + * rapidio: fix device reference counting + * Fix name of Russell King in various comments + * rtc: fix reported IRQ rate for when HPET is enabled + * libata-acpi: filter out DIPM enable + * Added Targa Visionary 1000 IDE adapter to pata_sis.c + * libata-acpi: don't call sleeping function from invalid context + * Fix reference counting race on log buffers + * [SCSI] ipr: Fix HDIO_GET_IDENTITY oops for SATA devices + * IPMI: return correct value from ipmi_write + * x86: fix ldt limit for 64 bit + * [SCSI] fusion: default MSI to disabled for SPI and FC controllers + * [SCSI] bsg: fix oops on remove + * drivers/char/pcmcia/ipwireless/hardware.c fix resource leak + * drivers/isdn/i4l/isdn_common.c fix small resource leak + * fbdev: bugfix for multiprocess defio + * serial8250: sanity check nr_uarts on all paths. + * ov7670: clean up ov7670_read semantics + * rtc-fm3130: fix chip naming + * rtc-pcf8563: add chip id + * OProfile kernel maintainership changes + * frv: fix irqs_disabled() to return an int, not an unsigned long + * cifs: fix inode leak in cifs_get_inode_info_unix + * cifs: fix wksidarr declaration to be big-endian friendly + * cpusets, hotplug, scheduler: fix scheduler domain breakage + * Documentation/HOWTO: correct wrong kernel bugzilla FAQ URL + * devcgroup: always show positive major/minor num + * devcgroup: fix permission check when adding entry to child cgroup + * Linux 2.6.26 + + -- Ben Collins Mon, 14 Jul 2008 13:41:50 -0400 + +linux (2.6.26-3.9) intrepid; urgency=low + + * abi: Add dca and ioatdma to modules.ignore + + [ 2.6.26-3.8 ] + + [ Ben Collins ] + + * ubuntu: Add heci driver 3.2.0.24 + * ubuntu: Add heci to kconfig/kbuild + * config: Enable heci module on all flavours + * dm-bbr: Update to get it to compile with 2.6.26 + * config: Enable dm-bbr + * ubuntu: Add some media drivers + * config: Enable misc media drivers + * udeb: Switch to uvesafb in fb-modules + * abi: Add more modules to ignore (known) + + [ 2.6.26-3.7 ] + + [Amit Kucheria] + + * SAUCE: make fc transport removal of target configurable + - LP: #163075 + * SAUCE: pm: Config option to disable handling of console during + suspend/resume + + [Ben Collins] + + * SAUCE: input/mouse/alps: Do not call psmouse_reset() for alps + * SAUCE: irda: Default to dongle type 9 on IBM hardware + * SAUCE: tulip: Let dmfe handle davicom on non-sparc + * SAUCE: tulip: Define ULI PCI ID's + * SAUCE: version: Implement version_signature proc file. + * build: Cleanup arches + * build: Remove remnants of unused binary-custom infrastructure + * build: Remove disable_d_i (not needed) and cleanup ppa build stuff + * ubuntu: New modules, acer-acpi + * build: Remove -virtual, and rebuild configs + * ubuntu: Add drbd module + * acer-acpi: Fix makefile + * x86/Kconfig: Fix missing quote for ubuntu Kconfig source + * ubuntu: Add iscsitarget module + * ubuntu: Added Amiga FS driver + * ubuntu: Add squashfs driver + * ubuntu: Remove asfs (Amiga FS). Need to be in linux-ports instead + * squashfs: Move headers to real include directory + * build/configs: The Great Config Consistency Check of 2008 + * ubuntu: Move third-party includes to ubuntu/include + * ubuntu: Add aufs module + * ubuntu: Added atl2 driver + * ubuntu: Add dm-radi4-5 driver + * build: Add CONFIG_DEBUG_SECTION_MISMATCH=y to get old style warnings + from build + * ubuntu/Makefile: Fixup dm-raid4-5 and add kludge for kbuild + * squashfs: Fixes for VFS changes + * ubuntu/dm-raid4-5: Fixups for moved/renamed headers/functions in core + md + * ubuntu: Add ndiswrapper driver + * d-i: Update module listings + * build: Disable xd block device (ancient) + * ndiswrapper: Fixup makefile + * d-i: Remove efi-modules. The only module, efivars, is built-in + * build: Remove install-source, obsolete and caused build failure + * Ubuntu-2.6.26-1.3 + * build: linux-doc rules got broken when disabling html side. Fixed now. + * Ubuntu-2.6.26-1.4 + * x86: Update to -rc6 allows CONFIG_PCI_OLPC to work with PCI_GOANY + * d-i: Make virtio-ring optional (it's built-in on i386) + * Ubuntu-2.6.26-1.4 + * Ubuntu-2.6.26-1.5 + * config: Enable DVB devices + * ubuntu/aufs: Make aufs a bool config, since it needs to be built-in + * config: Build aufs into the kernels + * build: Fix arguments passed to link-headers script + * config: Disable early printk + * d-i: Move isofs to storage-core and kill st (scsi tape) from list + * config: Enable non-promiscuous access to /dev/mem + * x86: Add option to disable decompression info messages + * config: Enable no-bz-chatter config options + * build: Re-add linux-source package + * d-i: Re-add socket-modules. Accidentally removed + - LP: #241295 + * Ubuntu-2.6.26-2.6 + * Use makedumpfile to generate a vmcoreinfo file. + * build: Build-Depend on makedumpfile for vmcoreinfo generation + * build: Remove debug print from git-ubuntu-log + * Updated configs for -rc7 + * build: postinst, do not call depmod with -F + * config: Enable rtc-cmos as a built-in driver. + * control: Provide ndiswrapper-modules-1.9 + * build: Generate vmcoreinfo in image build for crashdumps without debug + image + * config: Disable vesafb, since we'll prefer uvesafb + * build: Copy uvesafb module to initrd mod directory + * abi-check: New, more robust script + * config: Enable heap randomization by default + * abi-check: Cleanup output and call with perl (not $SHELL) + * abi: Ignore missing vesafb (known) + * config: Disable pcspkr (in favor of snd-pcsp) + * swap: Add notify_swap_entry_free callback for compcache + * compcache: Added ram backed compressed swap module + * ubuntu: Enable kbuild and kconfig for compcache + * config: Enable compcache and tlsf allocator as modules + * config: Updated for -rc8. Disables XEN on i386 + * config: Switch i386-server to 64G, enable PAE, 64-bit res, and XEN + * ubuntu: Add misc drivers from hardy lum + * ubuntu: Enable build of misc/ subdir + * config: Enable misc drivers + * aufs: Fix warning about single non-string-literal arg to printf style + function + * drivers: Remove some duplicate device entries in various modules + * config: Disable some duplicate drivers + * keyspan: Remove duplicate device ID's + * check-aliases: Cleanup output, and fix rolling checks + * ubuntu: Disable dm-bbr for now + * dm-bbr: First cut at forward portiong. Still needs work. + * ubuntu: Disable dm-bbr in kbuild/kconfig + + [Chuck Short] + + * SAUCE: ata: blacklist FUJITSU MHW2160BH PL + - LP: #175834 + * SAUCE: [USB]: add ASUS LCM to the blacklist + + [Colin Ian King] + + * SAUCE: airprime.c supports more devices + - LP: #208250 + * SAUCE: Enable speedstep for sonoma processors. + - LP: #132271 + * Add dm-loop + * Add dm-loop BOM + + [Kyle McMartin] + + * SAUCE: fix orinoco_cs oops + + [Mario Limonciello] + + * SAUCE: Enable Reset and SCO workaround on Dell 410 BT adapter + + [Matthew Garrett] + + * SAUCE: hostap: send events on data interface as well as master + interface + + [Phillip Lougher] + + * SAUCE: r8169: disable TSO by default for RTL8111/8168B chipsets. + + [Stefan Bader] + + * SAUCE: Export dm_disk function of device-mapper + * SAUCE: Restore VT fonts on switch + * SAUCE: Always use SCO protocol (disable eSCO support) Bug: #39414 + * SAUCE: mmc: Increase power_up deleay to fix TI readers OriginalAuthor: + Pascal Terjan Bug: #137686 + * SAUCE: Add blacklist support to fix Belkin bluetooth dongle. Bug: + #140511 + * SAUCE: Lower warning level of pci resource allocation messages. Bug: + 159241 + * SAUCE: Lower message level for PCI memory and I/O allocation. + - LP: #159241 + * Modify log generation to catch bug numbers when adding with git-am. + + [Tim Gardner] + + * Added the debian directory. Ignore: yes + * Add support for UBUNTUINCLUDE Ignore: yes + * LUM headers go in /usr/src Ignore: yes + * First pass at 2.6.25 configs Ignore: yes + * i386 -generic builds. Ignore: yes + * SAUCE: Increase CONFIG_IDE_MAX_HWIFS to 8 (from 4) + * SAUCE: Add extra headers to linux-libc-dev OriginalAuthor: Soren Hansen + OriginalLocation: + https://lists.ubuntu.com/archives/kernel-team/2007-November/001891.html + * Set CONFIG_DEVKMEM=n Ignore: yes + * Enabled ALSA and CGROUPS for i386 Ignore: yes + * Enabled amd64 configs. Ignore: yes + * CONFIG_STANDALONE=n Ignore: yes + * CONFIG_BLK_DEV_4DRIVES=n for i386 Ignore: yes + * CONFIG: CONFIG_DEFAULT_RELATIME=y for all flavours. Ignore: yes + * Set CONFIG_EDD_OFF=y Ignore: yes + * SAUCE: Blacklist Bluetooth Dell Wireless 370 for SCO MTU + OriginalAuthor: Mario Limonciello Bug: + #209715 + * SAUCE: Catch nonsense keycodes and silently ignore + * SAUCE: frame buffer regression - screen blank except for blinking + cursor after fbcon vtswitch OriginalAuthor: Matthew Garrett + Bug: #201591 + * SAUCE: Added support for HDAPS on various ThinkPads from Lenovo and IBM + OriginalAuthor: Klaus S. Madsen + OriginalAuthor: Chuck Short + * SAUCE: Guest OS does not recognize a lun with non zero target id on + Vmware ESX Server + * SAUCE: orinoco_cs.ko missing + * Set CONFIG_FB_VESA=m for i386/amd64 Ignore: yes + * Set CONFIG_PM_DISABLE_CONSOLE=y for all flavours Ignore: yes + * Thorough review of amd64 -generic config Ignore: yes + * Build PPA packages for Hardy until the Intrepid archive is opened. + * Deleted obsolete flavours Ignore: yes + * Don't build docs for PPA Ignore: yes + * Build all standard packages in PPA. Ignore: yes + * Remove duplicate USB ids + * SAUCE: DVB-USB UMT-010 driver oops on install Bug: #115284 + * Update configs after rebase to 2.6.26-rc1 Ignore: yes + * Update configs after rebase Ignore: yes + * Disable V4L until the build issues get ironed out. Ignore: yes + * Update configs after rebase. Ignore: yes + * Another device enable pass Ignore: yes + * Update configs after merge. Ignore: yes + * SAUCE: fn key doesn't work in hardy with macbook pro fourth generation + (4,1) + - LP: #207127 + * Enabled CONFIG_CIFS_DFS_UPCALL=y and CONFIG_CIFS_UPCALL=y + - LP: #236830 + + [Upstream Kernel Changes] + + * Revert "[WATCHDOG] hpwdt: Add CFLAGS to get driver working" + * mac80211: detect driver tx bugs + * hwmon: (lm85) Fix function RANGE_TO_REG() + * hwmon: (adt7473) Initialize max_duty_at_overheat before use + * hwmon: Update the sysfs interface documentation + * hwmon: (abituguru3) Identify Abit AW8D board as such + * hwmon: (w83791d) new maintainer + * hwmon: (abituguru3) update driver detection + * hwmon: (lm75) sensor reading bugfix + * ipv6: Remove options header when setsockopt's optlen is 0 + * ipv6: Drop packets for loopback address from outside of the box. + * sched: rt: dont stop the period timer when there are tasks wanting to + run + * sched: fix wait_for_completion_timeout() spurious failure under heavy + load + * x86: fix NULL pointer deref in __switch_to + * xen: Use wmb instead of rmb in xen_evtchn_do_upcall(). + * xen: mask unwanted pte bits in __supported_pte_mask + * xen: don't drop NX bit + * sched: refactor wait_for_completion_timeout() + * Ext4: Fix online resize block group descriptor corruption + * [IA64] SN2: security hole in sn2_ptc_proc_write + * alpha: fix module load failures on smp (bug #10926) + * alpha: link failure fix + * alpha: fix compile failures with gcc-4.3 (bug #10438) + * alpha: resurrect Cypress IDE quirk + * pppoe: warning fix + * sctp: Make sure N * sizeof(union sctp_addr) does not overflow. + * netns: Don't receive new packets in a dead network namespace. + * Add return value to reserve_bootmem_node() + * Slab: Fix memory leak in fallback_alloc() + * Fix performance regression on lmbench select benchmark + * ALSA: aw2 - Fix Oops at initialization + * ALSA: sb - Fix wrong assertions + * futexes: fix fault handling in futex_lock_pi + * IB/mthca: Clear ICM pages before handing to FW + * tty_driver: Update required method documentation + * removed unused var real_tty on n_tty_ioctl() + * Fix ZERO_PAGE breakage with vmware + * mm: fix race in COW logic + * NFS: Reduce the NFS mount code stack usage. + * NFS: Fix filehandle size comparisons in the mount code + * NFS: nfs_updatepage(): don't mark page as dirty if an error occurred + * alpha: fix compile error in arch/alpha/mm/init.c + * KVM: Fix race between timer migration and vcpu migration + * KVM: close timer injection race window in __vcpu_run + * KVM: MMU: Fix rmap_write_protect() hugepage iteration bug + * KVM: MMU: large page update_pte issue with non-PAE 32-bit guests + (resend) + * KVM: MMU: Fix oops on guest userspace access to guest pagetable + * KVM: ioapic: fix lost interrupt when changing a device's irq + * KVM: VMX: Fix host msr corruption with preemption enabled + * [GFS2] BUG: unable to handle kernel paging request at ffff81002690e000 + * xen: remove support for non-PAE 32-bit + * kgdb: documentation update - remove kgdboe + * kgdb: sparse fix + * [IA64] Fix boot failure on ia64/sn2 + * [IA64] Handle count==0 in sn2_ptc_proc_write() + * [IA64] Eliminate NULL test after alloc_bootmem in iosapic_alloc_rte() + * [GFS2] fix gfs2 block allocation (cleaned up) + * x86: Add structs and functions for paravirt clocksource + * x86: Make xen use the paravirt clocksource structs and functions + * KVM: Make kvm host use the paravirt clocksource structs + * x86: KVM guest: Use the paravirt clocksource structs and functions + * KVM: Remove now unused structs from kvm_para.h + * enable bus mastering on i915 at resume time + * Linux 2.6.26-rc8 + * # Ubuntu external driver commit. + * # Ubuntu commit template. + + -- Ben Collins Sat, 21 Jun 2008 09:05:15 -0400 + +linux (2.6.26-2.6) intrepid; urgency=low + + [Ben Collins] + + * Revert "SAUCE: Export symbols for aufs (in lum) (not needed) + * config: Enable DVB devices + * ubuntu/aufs: Make aufs a bool config, since it needs to be built-in + * config: Build aufs into the kernels + * build: Fix arguments passed to link-headers script + * config: Disable early printk + * d-i: Move isofs to storage-core and kill st (scsi tape) from list + * config: Enable non-promiscuous access to /dev/mem + * x86: Add option to disable decompression info messages + * config: Enable no-bz-chatter config options + * build: Re-add linux-source package + * d-i: Re-add socket-modules. Accidentally removed + - LP: #241295 + + [Colin Ian King] + + * Add dm-loop + + [Tim Gardner] + + * Revert "SAUCE: USB bluetooth device 0x0e5e:0x6622 floods errors to + syslog (merged upstream) + + -- Ben Collins Mon, 16 Jun 2008 10:56:01 -0400 + +linux (2.6.26-1.5) intrepid; urgency=low + + * d-i: Make virtio-ring optional (it's built-in on i386) + * Rebased on 2.6.26-rc6 + + [Ubuntu-2.6.26-1.4 Changes below] + + * build: linux-doc rules got broken when disabling html side. Fixed now. + + [Ubuntu-2.6.26-1.3 Changes below] + + * build: Remove install-source, obsolete and caused build failure + + [Ubuntu-2.6.26-1.2 Changes below] + + * Remove efi-modules from d-i module list (efivars is built-in). Caused a + build failure. + * Patch to arch/x86/xen/time.c to remove __divdi3 usage (build failure on + i386). + + [Ubuntu-2.6.26-1.1 Changes below] + + [Amit Kucheria] + + * SAUCE: make fc transport removal of target configurable + * SAUCE: Add AGP support for Radeon Mobility 9000 chipset + * SAUCE: pm: Config option to disable handling of console during + suspend/resume + + [Ben Collins] + + * SAUCE: input/mouse/alps: Do not call psmouse_reset() for alps + * SAUCE: irda: Default to dongle type 9 on IBM hardware + * SAUCE: tulip: Let dmfe handle davicom on non-sparc + * SAUCE: tulip: Define ULI PCI ID's + * SAUCE: version: Implement version_signature proc file. + * build: Remove remnants of unused binary-custom infrastructure + * mmc_block: Fix bad allocation on 64-bit (zero len array) + * ubuntu: New modules, acer-acpi + * build: Remove -virtual, and rebuild configs + * ubuntu: Add drbd module + * ubuntu: Add iscsitarget module + * ubuntu: Add squashfs driver + * build/configs: The Great Config Consistency Check of 2008 + * ubuntu: Add aufs module + * ubuntu: Added atl2 driver + * ubuntu: Add dm-radi4-5 driver + * build: Add CONFIG_DEBUG_SECTION_MISMATCH=y to get old style warnings + from build + * squashfs: Fixes for VFS changes + * ubuntu/dm-raid4-5: Fixups for moved/renamed headers/functions in core + md + * ubuntu: Add ndiswrapper driver + * d-i: Update module listings + + [Chuck Short] + + * SAUCE: ata: blacklist FUJITSU MHW2160BH PL + * SAUCE: [USB]: add ASUS LCM to the blacklist + + [Colin Ian King] + + * SAUCE: Enable speedstep for sonoma processors. + * SAUCE: airprime.c supports more devices + + [Kyle McMartin] + + * SAUCE: fix orinoco_cs oops + + [Mario Limonciello] + + * SAUCE: Enable Reset and SCO workaround on Dell 410 BT adapter + + [Matthew Garrett] + + * SAUCE: hostap: send events on data interface as well as master + interface + + [Phillip Lougher] + + * SAUCE: r8169: disable TSO by default for RTL8111/8168B chipsets. + + [Stefan Bader] + + * SAUCE: Export dm_disk function of device-mapper + * SAUCE: Restore VT fonts on switch + * SAUCE: Always use SCO protocol (disable eSCO support) Bug: #39414 + * SAUCE: mmc: Increase power_up deleay to fix TI readers + * SAUCE: Add blacklist support to fix Belkin bluetooth dongle. + * SAUCE: Lower warning level of pci resource allocation messages. + * SAUCE: Lower message level for PCI memory and I/O allocation. + - LP: #159241 + * Modify log generation to catch bug numbers when adding with git-am. + + [Tim Gardner] + + * SAUCE: hdaps module does not load on Thinkpad T61P + * SAUCE: Add extra headers to linux-libc-dev + * SAUCE: Export symbols for aufs (in lum). + * SAUCE: USB bluetooth device 0x0e5e:0x6622 floods errors to syslog + * SAUCE: Blacklist Bluetooth Dell Wireless 370 for SCO MTU + * SAUCE: Catch nonsense keycodes and silently ignore + * SAUCE: frame buffer regression - screen blank except for blinking + cursor after fbcon vtswitch + * SAUCE: Added support for HDAPS on various ThinkPads from Lenovo and IBM + * SAUCE: Guest OS does not recognize a lun with non zero target id on + Vmware ESX Server + * SAUCE: Modualrize vesafb + * SAUCE: DVB-USB UMT-010 driver oops on install + * SAUCE: fn key doesn't work in hardy with macbook pro fourth generation + (4,1) + - LP: #207127 + + -- Ben Collins Wed, 11 Jun 2008 05:28:35 -0400 --- linux-3.13.0.orig/debian.master/config/amd64/config.common.amd64 +++ linux-3.13.0/debian.master/config/amd64/config.common.amd64 @@ -0,0 +1,318 @@ +# +# Config options for config.common.amd64 automatically generated by splitconfig.pl +# +CONFIG_64BIT=y +CONFIG_AC97_BUS=m +# CONFIG_AHCI_IMX is not set +CONFIG_AIC79XX_RESET_DELAY_MS=5000 +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_RANDOM=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_PIIX=y +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_AUDIT_ARCH=y +CONFIG_BCH=m +CONFIG_COMEDI_ISA_DRIVERS=y +CONFIG_COMPACTION=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CPU_IDLE=y +CONFIG_CRASH_DUMP=y +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_DEADLINE=y +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_EXYNOS_VIDEO=y +CONFIG_EZX_PCAP=y +CONFIG_FB_ATY128=m +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_DDC=m +# CONFIG_FB_MACMODES is not set +CONFIG_FB_RADEON=m +CONFIG_FB_SVGALIB=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_UDL=m +CONFIG_FB_VOODOO1=m +# CONFIG_FMAN_P1023 is not set +CONFIG_FMAN_P3040_P4080_P5020=y +# CONFIG_FONTS is not set +CONFIG_FRAME_WARN=1024 +CONFIG_FSL_FMAN=y +CONFIG_FUNCTION_TRACER=y +CONFIG_GENERIC_PHY=m +CONFIG_GPIO_GENERIC=m +CONFIG_GPIO_TWL6040=m +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_AOUT is not set +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_HIBERNATION=y +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_SHPC=m +CONFIG_HUGETLB_PAGE=y +# CONFIG_HW_RANDOM_ATMEL is not set +# CONFIG_HZ_100 is not set +CONFIG_I2C_ALGOBIT=m +# CONFIG_IDE is not set +# CONFIG_IKCONFIG is not set +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_IMA=y +CONFIG_INFINIBAND_AMSO1100=m +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m +CONFIG_IOMMU_SUPPORT=y +CONFIG_IPMI_SI_PROBE_DEFAULTS=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_KSM=y +CONFIG_KVM=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LIBERTAS_MESH=y +CONFIG_LIS3L02DQ=m +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MEMORY_HOTREMOVE=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_TMIO is not set +CONFIG_MII=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MTD=m +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CMDLINE_PARTS=m +CONFIG_MTD_NAND=m +CONFIG_MTD_NAND_BCH=m +CONFIG_MTD_NAND_ECC=m +CONFIG_MTD_NAND_IDS=m +CONFIG_MTD_SM_COMMON=m +CONFIG_MUSB_PIO_ONLY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NET_TULIP=y +CONFIG_NODES_SHIFT=6 +CONFIG_NOP_USB_XCEIV=m +CONFIG_NO_HZ_FULL=y +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_NVRAM=m +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_PARIDE_EPATC8=y +CONFIG_PATA_SIS=y +CONFIG_PCCARD=m +CONFIG_PCIEPORTBUS=y +CONFIG_PERCPU_TEST=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_PMIC_ADP5520=y +CONFIG_PMIC_DA903X=y +CONFIG_PM_DEBUG=y +CONFIG_PM_RUNTIME=y +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PPS=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PROBE_EVENTS=y +# CONFIG_PSTORE_CONSOLE is not set +CONFIG_PSTORE_RAM=m +CONFIG_PTP_1588_CLOCK=m +CONFIG_RAPIDIO_CPS_GEN2=m +CONFIG_RAPIDIO_CPS_XX=m +CONFIG_RAPIDIO_TSI568=m +CONFIG_RAPIDIO_TSI57X=m +CONFIG_RAPIDIO_TSI721=m +CONFIG_RBTREE_TEST=m +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_FANOUT=64 +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_USER_QS=y +CONFIG_REED_SOLOMON=m +CONFIG_REGULATOR_FIXED_VOLTAGE=m +# CONFIG_REGULATOR_S2MPS11 is not set +CONFIG_REISERFS_FS=m +CONFIG_RFKILL=y +CONFIG_RT2800USB_UNKNOWN=y +# CONFIG_RT2X00_LIB_DEBUGFS is not set +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS3232=m +CONFIG_SAMSUNG_USB2PHY=m +CONFIG_SAMSUNG_USB3PHY=m +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_SATA_HIGHBANK is not set +CONFIG_SATA_SVW=m +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SERIAL_8250_PCI=y +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIO_I8042=y +CONFIG_SERIO_PARKBD=m +CONFIG_SERIO_PCIPS2=m +CONFIG_SERIO_PS2MULT=m +CONFIG_SERIO_RAW=m +CONFIG_SND=m +CONFIG_SND_COMPRESS_OFFLOAD=m +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_OPL3_LIB_SEQ=m +# CONFIG_SND_OPL4_LIB_SEQ is not set +CONFIG_SND_PCM=m +# CONFIG_SND_SBAWE_SEQ is not set +CONFIG_SND_SOC=m +CONFIG_SND_SOC_I2C_AND_SPI=m +CONFIG_SND_TIMER=m +CONFIG_SOUND=m +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPI_PXA2XX_PCI=m +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SRAM=y +CONFIG_STACK_TRACER=y +CONFIG_STAGING=y +# CONFIG_STANDALONE is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SWIOTLB=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_SYS_HYPERVISOR=y +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TIGON3=m +CONFIG_TIMER_STATS=y +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TWL4030_CORE is not set +CONFIG_TWL6040_CORE=y +CONFIG_UIO_AEC=m +CONFIG_UIO_CIF=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_MF624=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_SERCOS3=m +CONFIG_UNUSED_SYMBOLS=y +CONFIG_USB_ADUTUX=m +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_C67X00_HCD=m +CONFIG_USB_CATC=m +CONFIG_USB_CHIPIDEA=m +CONFIG_USB_CYPRESS_CY7C63=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_DWC3=m +# CONFIG_USB_DWC3_OMAP is not set +CONFIG_USB_DYNAMIC_MINORS=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHSET_TEST_FIXTURE=m +CONFIG_USB_EMI26=m +CONFIG_USB_EMI62=m +CONFIG_USB_EZUSB_FX2=m +CONFIG_USB_FOTG210_HCD=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_FUSBH200_HCD=m +CONFIG_USB_GADGET=m +# CONFIG_USB_G_MULTI is not set +CONFIG_USB_HCD_BCMA=m +CONFIG_USB_HCD_SSB=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_IPHETH=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_ISP116X_HCD=m +CONFIG_USB_ISP1301=m +CONFIG_USB_ISP1362_HCD=m +CONFIG_USB_ISP1760_HCD=m +CONFIG_USB_KAWETH=m +CONFIG_USB_LCD=m +CONFIG_USB_LD=m +CONFIG_USB_LED=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USB_MUSB_HDRC=m +CONFIG_USB_MUSB_TUSB6010=m +CONFIG_USB_OHCI_HCD_PLATFORM=y +# CONFIG_USB_OTG is not set +CONFIG_USB_OXU210HP_HCD=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_PRINTER=m +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_RCAR_PHY=m +CONFIG_USB_RIO500=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_SERIAL=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SL811_HCD=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_SUPPORT=y +CONFIG_USB_TEST=m +CONFIG_USB_TMC=m +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_USS720=m +CONFIG_USB_WUSB_CBAF=m +CONFIG_USB_YUREX=m +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VGASTATE=m +CONFIG_VGA_CONSOLE=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_WIL6210=m +# CONFIG_WIMAX_GDM72XX_SDIO is not set +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_SPEEDSTEP_LIB=m +CONFIG_XEN=y +CONFIG_XEN_MAX_DOMAIN_MEMORY=500 +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_TEST=m +CONFIG_XZ_DEC_X86=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DMA_FLAG=1 --- linux-3.13.0.orig/debian.master/config/amd64/config.flavour.generic +++ linux-3.13.0/debian.master/config/amd64/config.flavour.generic @@ -0,0 +1,10 @@ +# +# Config options for config.flavour.generic automatically generated by splitconfig.pl +# +CONFIG_HZ=250 +# CONFIG_HZ_1000 is not set +CONFIG_HZ_250=y +# CONFIG_IRQ_FORCED_THREADING_DEFAULT is not set +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_RCU is not set +CONFIG_PREEMPT_VOLUNTARY=y --- linux-3.13.0.orig/debian.master/config/amd64/config.flavour.lowlatency +++ linux-3.13.0/debian.master/config/amd64/config.flavour.lowlatency @@ -0,0 +1,10 @@ +# +# Config options for config.flavour.lowlatency automatically generated by splitconfig.pl +# +CONFIG_HZ=1000 +CONFIG_HZ_1000=y +# CONFIG_HZ_250 is not set +CONFIG_IRQ_FORCED_THREADING_DEFAULT=y +CONFIG_PREEMPT=y +CONFIG_PREEMPT_RCU=y +# CONFIG_PREEMPT_VOLUNTARY is not set --- linux-3.13.0.orig/debian.master/config/annotations +++ linux-3.13.0/debian.master/config/annotations @@ -0,0 +1,2147 @@ +# ARCH: x86 arm powerpc + +# not a thing +CONFIG_USB_OHCI_HCD_PLATFORM - flag +CONFIG_USB_EHCI_HCD_PLATFORM - flag +CONFIG_USB_OHCI_HCD_PPC_OF_LE - flag + +CONFIG_64BIT - flag + +# Overall defaults +set +all !flag EXPERIMENTAL !flag IGNORE !flag DEPRECATED ?type tristate = m + +# Mark debugging symbols. +# default all debug symbols off +set +pattern '(^|_)DEBUG(_|$)' ?type bool = n flag +set +pattern '(^|_)DEBUG(_|$)' ?type tristate = n flag +# exceptions +CONFIG_DEBUG_FS y note +CONFIG_DEBUG_KERNEL y note +CONFIG_DEBUG_RODATA y mark +CONFIG_DEBUG_SET_MODULE_RONX y mark +CONFIG_SLUB_DEBUG y +CONFIG_SCHED_DEBUG y + +# Menu: ROOT +CONFIG_SYSTEM_TRUSTED_KEYRING y + +# Menu: ROOT (arm) +CONFIG_ARM_DMA_IOMMU_ALIGNMENT 8 +CONFIG_ARM_PATCH_PHYS_VIRT y + +# Menu: ROOT (powerpc) +CONFIG_RELOCATABLE y +CONFIG_CPU_BIG_ENDIAN n +CONFIG_PPC64 y +CONFIG_CPU_LITTLE_ENDIAN y +CONFIG_NR_IRQS 512 +CONFIG_SCOM_DEBUGFS n + +# Menu: ROOT (x86) + +# Menu: Advanced setup (powerpc) +CONFIG_RELOCATABLE y + +# Menu: Boot options (arm) +CONFIG_KEXEC y +CONFIG_CRASH_DUMP y + +# Menu: Bus options (arm) + +# Menu: Bus options (powerpc) +CONFIG_FSL_LBC y + +# Menu: Bus options (x86) +CONFIG_ISA_DMA_API y +CONFIG_X86_SYSFB y +CONFIG_ISA y +CONFIG_EISA y +CONFIG_ALIX y +CONFIG_NET5501 y +CONFIG_GEOS y + +# Menu: Bus options >> PCCard (PCMCIA/CardBus) support +CONFIG_PCCARD p policy<(arch armel armhf ppc64el &/ value n) | value m> note +CONFIG_PCMCIA_LOAD_CIS y +CONFIG_CARDBUS y +CONFIG_YENTA_O2 y +CONFIG_YENTA_RICOH y +CONFIG_YENTA_TI y +CONFIG_YENTA_ENE_TUNE y +CONFIG_YENTA_TOSHIBA y + +# Menu: Bus options >> PCI support +CONFIG_PCI y +CONFIG_PCIEPORTBUS y +CONFIG_HOTPLUG_PCI_PCIE y +CONFIG_PCIEAER_INJECT n flag +CONFIG_PCI_IOAPIC p policy<(arch i386 amd64 &/ value y) | value m> note + +# Menu: Bus options >> PCI support (arm) + +# Menu: Bus options >> PCI support (powerpc) + +# Menu: Bus options >> PCI support (x86) +CONFIG_PCI_MMCONFIG y +CONFIG_PCI_GOANY y + +# Menu: Bus options >> PCI support >> PCI host controller drivers +CONFIG_PCI_IMX6 y +CONFIG_PCI_TEGRA y + +# Menu: Bus options >> PCI support >> RapidIO support +CONFIG_RAPIDIO y +CONFIG_RAPIDIO_DISC_TIMEOUT 30 +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS n +CONFIG_RAPIDIO_DMA_ENGINE y + +# Menu: Bus options >> PCI support >> RapidIO support (powerpc) + +# Menu: Bus options >> PCI support >> RapidIO support (x86) + +# Menu: Bus options >> PCI support >> RapidIO support >> RapidIO Switch drivers + +# Menu: Bus options >> PCI support >> Support for PCI Hotplug +CONFIG_HOTPLUG_PCI y +CONFIG_HOTPLUG_PCI_ACPI y +CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM y + +# Menu: CAN Device Drivers + +# Menu: CAN Device Drivers >> CAN USB interfaces + +# Menu: CAN Device Drivers >> Platform CAN drivers with Netlink support +CONFIG_CAN_CALC_BITTIMING y +CONFIG_CAN_LEDS y + +# Menu: CAN Device Drivers >> Platform CAN drivers with Netlink support >> Bosch CC770 and Intel AN82527 devices + +# Menu: CAN Device Drivers >> Platform CAN drivers with Netlink support >> Bosch C_CAN/D_CAN devices + +# Menu: CAN Device Drivers >> Platform CAN drivers with Netlink support >> Philips/NXP SJA1000 devices +CONFIG_CAN_PEAK_PCIEC y + +# Menu: CPU Power Management >> CPU Frequency scaling +CONFIG_CPU_FREQ y +CONFIG_CPU_FREQ_STAT y note +CONFIG_CPU_FREQ_STAT_DETAILS y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE y note +CONFIG_CPU_FREQ_GOV_PERFORMANCE y note +CONFIG_CPU_FREQ_GOV_POWERSAVE y note +CONFIG_CPU_FREQ_GOV_USERSPACE y note +CONFIG_CPU_FREQ_GOV_ONDEMAND y note +CONFIG_CPU_FREQ_GOV_CONSERVATIVE y note +CONFIG_CRYPTO_DEV_NX y + +# Menu: CPU Power Management >> CPU Frequency scaling >> ARM CPU frequency scaling drivers + +# Menu: CPU Power Management >> CPU Frequency scaling >> AVR32 CPU frequency scaling drivers + +# Menu: CPU Power Management >> CPU Frequency scaling >> CPUFreq processor drivers + +# Menu: CPU Power Management >> CPU Frequency scaling >> MIPS CPUFreq processor drivers + +# Menu: CPU Power Management >> CPU Frequency scaling >> PowerPC CPU frequency scaling drivers + +# Menu: CPU Power Management >> CPU Frequency scaling >> SH CPU Frequency scaling + +# Menu: CPU Power Management >> CPU Frequency scaling >> SPARC CPU frequency scaling drivers + +# Menu: CPU Power Management >> CPU Frequency scaling >> x86 CPU frequency scaling drivers +CONFIG_X86_INTEL_PSTATE y +CONFIG_X86_PCC_CPUFREQ y note +CONFIG_X86_ACPI_CPUFREQ y note +CONFIG_X86_ACPI_CPUFREQ_CPB y +CONFIG_X86_POWERNOW_K8 y note +CONFIG_X86_SPEEDSTEP_CENTRINO y note +CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE y +CONFIG_X86_SPEEDSTEP_ICH y note +CONFIG_X86_SPEEDSTEP_SMI y note +CONFIG_X86_CPUFREQ_NFORCE2 y note +CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK y + +# Menu: CPU Power Management >> CPU Idle +CONFIG_CPU_IDLE y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS y +CONFIG_CPU_IDLE_GOV_LADDER y +CONFIG_CPU_IDLE_GOV_MENU y + +# Menu: CPU Power Management >> CPU Idle >> ARM CPU Idle Drivers +CONFIG_ARM_BIG_LITTLE_CPUIDLE y +CONFIG_ARM_HIGHBANK_CPUIDLE p policy<(arch armhf & flavour generic &/ value n) | value y> note + +# Menu: Cryptographic API +set +tree-menu 'Cryptographic API' ?flag EXPERIMENTAL ?type bool = n +set +tree-menu 'Cryptographic API' ?flag EXPERIMENTAL ?type tristate = n +set +tree-menu 'Cryptographic API' !flag EXPERIMENTAL ?type tristate = m + +CONFIG_CRYPTO y mark +CONFIG_CRYPTO_MANAGER y note +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS y flag +CONFIG_CRYPTO_TEST m flag +CONFIG_CRYPTO_CBC y note +CONFIG_CRYPTO_ECB y note +CONFIG_CRYPTO_HMAC y note +CONFIG_CRYPTO_CRC32C y note +CONFIG_CRYPTO_CRC32C_INTEL y note +CONFIG_CRYPTO_CRCT10DIF y note +CONFIG_CRYPTO_MD5 y note +CONFIG_CRYPTO_SHA1 y note +CONFIG_CRYPTO_SHA256 y note +CONFIG_CRYPTO_SHA512 y note +CONFIG_CRYPTO_AES y note +CONFIG_CRYPTO_LZO y note + +# Menu: Cryptographic API >> Asymmetric (public-key cryptographic) key type +CONFIG_ASYMMETRIC_KEY_TYPE y note +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE y note +CONFIG_PUBLIC_KEY_ALGO_RSA y note +CONFIG_X509_CERTIFICATE_PARSER y note + +# Menu: Cryptographic API >> Hardware crypto devices +CONFIG_CRYPTO_HW y +CONFIG_CRYPTO_DEV_PADLOCK y note +CONFIG_CRYPTO_DEV_HIFN_795X_RNG y +CONFIG_CRYPTO_DEV_NX_COMPRESS y note + +# Menu: Device Drivers +CONFIG_POWER_AVS y +CONFIG_RESET_CONTROLLER y +CONFIG_VEXPRESS_CONFIG y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM y + +# Menu: Device Drivers >> 1-wire Bus Masters + +# Menu: Device Drivers >> 1-wire Slaves +CONFIG_W1_SLAVE_DS2408_READBACK y +CONFIG_W1_SLAVE_DS2433_CRC n + +# Menu: Device Drivers >> ATA/ATAPI/MFM/RLL support (DEPRECATED) +set +tree 'Device Drivers>>ATA/ATAPI/MFM/RLL support (DEPRECATED)' = n +# +CONFIG_IDE p policy<(arch powerpc ppc64el &/ value y) | value n> note +CONFIG_IDE_GD y +CONFIG_IDE_GD_ATA y +CONFIG_BLK_DEV_IDECD y +CONFIG_IDEPCI_PCIBUS_ORDER y +CONFIG_BLK_DEV_GENERIC y +CONFIG_BLK_DEV_AMD74XX y +CONFIG_IDE_PROC_FS y + +# Menu: Device Drivers >> Accelerometers + +# Menu: Device Drivers >> Accessibility support + +# Menu: Device Drivers >> Active cards + +# Menu: Device Drivers >> Amplifiers + +# Menu: Device Drivers >> Analog to digital converters +CONFIG_LP8788_ADC y +CONFIG_EXYNOS_ADC y + +# Menu: Device Drivers >> Android +CONFIG_ANDROID n + +# Menu: Device Drivers >> Atmel devices (AVR32 and AT91) + +# Menu: Device Drivers >> Auxiliary Display support +CONFIG_AUXDISPLAY y +CONFIG_KS0108_DELAY 2 +CONFIG_CFAG12864B_RATE 20 + +# Menu: Device Drivers >> Block devices +set +tree-menu 'Device Drivers>>Block devices' ?flag EXPERIMENTAL ?type bool = n +set +tree-menu 'Device Drivers>>Block devices' ?flag EXPERIMENTAL ?type tristate = n +set +tree-menu 'Device Drivers>>Block devices' !flag EXPERIMENTAL ?type tristate = m +# +CONFIG_BLK_DEV y +CONFIG_PARIDE_EPATC8 y +CONFIG_CISS_SCSI_TAPE y +CONFIG_BLK_DEV_UMEM m note +CONFIG_BLK_DEV_LOOP y note +CONFIG_BLK_DEV_LOOP_MIN_COUNT 8 +CONFIG_DRBD_FAULT_INJECTION n +CONFIG_BLK_DEV_RAM y note +CONFIG_BLK_DEV_RAM_COUNT 16 +CONFIG_BLK_DEV_RAM_SIZE 65536 +CONFIG_BLK_DEV_XIP n note +CONFIG_CDROM_PKTCDVD_BUFFERS 8 +CONFIG_CDROM_PKTCDVD_WCACHE n mark +CONFIG_XEN_BLKDEV_FRONTEND y note +CONFIG_VIRTIO_BLK y note +CONFIG_BLK_DEV_HD n note mark +CONFIG_BLK_DEV_RBD m note +CONFIG_MG_DISK_RES 0 + +# Menu: Device Drivers >> Broadcom specific AMBA +CONFIG_BCMA_HOST_PCI y +CONFIG_BCMA_HOST_SOC y +CONFIG_BCMA_DRIVER_GMAC_CMN y +CONFIG_BCMA_DRIVER_GPIO y + +# Menu: Device Drivers >> Bus devices +CONFIG_IMX_WEIM y +CONFIG_OMAP_INTERCONNECT y note +CONFIG_ARM_CCI y + +# Menu: Device Drivers >> Character devices +set +tree 'Device Drivers>>Character devices' ?flag EXPERIMENTAL ?type bool = n +set +tree 'Device Drivers>>Character devices' ?flag EXPERIMENTAL ?type tristate = n +set +tree 'Device Drivers>>Character devices' !flag EXPERIMENTAL ?type tristate = m +# +CONFIG_TTY y +CONFIG_VT y +CONFIG_CONSOLE_TRANSLATIONS y +CONFIG_VT_CONSOLE y +CONFIG_VT_HW_CONSOLE_BINDING y +CONFIG_UNIX98_PTYS y +CONFIG_DEVPTS_MULTIPLE_INSTANCES y +CONFIG_LEGACY_PTYS y +CONFIG_LEGACY_PTY_COUNT 0 +CONFIG_SERIAL_NONSTANDARD y +CONFIG_CYZ_INTR n +CONFIG_DEVKMEM n mark +CONFIG_TTY_PRINTK y +CONFIG_LP_CONSOLE n +CONFIG_HVC_XEN y +CONFIG_HVC_XEN_FRONTEND y +CONFIG_HW_RANDOM y +CONFIG_HW_RANDOM_ATMEL n note +CONFIG_NVRAM p policy<(arch powerpc &/ value y) | value m> note +CONFIG_MAX_RAW_DEVS 256 +CONFIG_HPET y +CONFIG_HPET_MMAP y +CONFIG_HPET_MMAP_DEFAULT y +CONFIG_HVC_DCC y +CONFIG_VIRTIO_CONSOLE y +CONFIG_HVC_CONSOLE y +CONFIG_HVC_OLD_HVSI n +CONFIG_HVC_OPAL y +CONFIG_HVC_RTAS y +CONFIG_HVC_UDBG n + +# Menu: Device Drivers >> Character devices >> IPMI top-level message handler +CONFIG_IPMI_PANIC_EVENT n + +# Menu: Device Drivers >> Character devices >> PCMCIA character devices + +# Menu: Device Drivers >> Character devices >> Serial drivers +CONFIG_SERIAL_8250 y note +CONFIG_SERIAL_8250_PNP y +CONFIG_SERIAL_8250_CONSOLE y +CONFIG_SERIAL_8250_DMA y +CONFIG_SERIAL_8250_PCI y +CONFIG_SERIAL_8250_NR_UARTS 48 +CONFIG_SERIAL_8250_RUNTIME_UARTS 32 +CONFIG_SERIAL_8250_EXTENDED y +CONFIG_SERIAL_8250_MANY_PORTS y +CONFIG_SERIAL_8250_SHARE_IRQ y +CONFIG_SERIAL_8250_DETECT_IRQ n +CONFIG_SERIAL_8250_RSA y +CONFIG_SERIAL_KGDB_NMI y +CONFIG_SERIAL_MAX310X y +CONFIG_SERIAL_SCCNXP y +CONFIG_SERIAL_SCCNXP_CONSOLE y +CONFIG_SERIAL_ALTERA_UART_MAXPORTS 4 +CONFIG_SERIAL_ALTERA_UART_BAUDRATE 115200 +CONFIG_SERIAL_ARC_NR_PORTS 1 +CONFIG_SERIAL_RP2_NR_UARTS 32 +CONFIG_SERIAL_AMBA_PL011 y +CONFIG_SERIAL_AMBA_PL011_CONSOLE y +CONFIG_SERIAL_OF_PLATFORM y +CONFIG_SERIAL_8250_EM n +CONFIG_SERIAL_IMX y +CONFIG_SERIAL_IMX_CONSOLE y +CONFIG_SERIAL_SH_SCI_NR_UARTS 2 +CONFIG_SERIAL_OMAP p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_SERIAL_OMAP_CONSOLE y + +# Menu: Device Drivers >> Character devices >> TPM Hardware Support +CONFIG_TCG_TPM y note +CONFIG_TCG_TIS y +CONFIG_TCG_IBMVTPM y + +# Menu: Device Drivers >> Common Clock Framework +CONFIG_COMMON_CLK_VERSATILE y +CONFIG_COMMON_CLK_XGENE y + +# Menu: Device Drivers >> Connector - unified userspace <-> kernelspace linker +CONFIG_CONNECTOR y +CONFIG_PROC_EVENTS y + +# Menu: Device Drivers >> Customise DVB Frontends +CONFIG_DVB_DUMMY_FE n note + +# Menu: Device Drivers >> DMA Engine support +CONFIG_DMADEVICES y +CONFIG_NET_DMA y note +CONFIG_ASYNC_TX_DMA y +CONFIG_DMATEST n flag +CONFIG_AMBA_PL08X y +CONFIG_DMA_OMAP y + +# Menu: Device Drivers >> Dallas's 1-wire support +CONFIG_W1_CON y + +# Menu: Device Drivers >> Device Tree and Open Firmware support +CONFIG_PROC_DEVICETREE y +CONFIG_OF_SELFTEST n flag + +# Menu: Device Drivers >> Digital gyroscope sensors + +# Menu: Device Drivers >> Digital to analog converters + +# Menu: Device Drivers >> Distributed Switch Architecture drivers + +# Menu: Device Drivers >> EDAC (Error Detection And Correction) reporting +CONFIG_EDAC y +CONFIG_EDAC_AMD64_ERROR_INJECTION n +CONFIG_EDAC_SBRIDGE m note + +# Menu: Device Drivers >> Encoders, decoders, sensors and other helper chips + +# Menu: Device Drivers >> External Connector Class (extcon) support +set +tree 'Device Drivers >> External Connector Class (extcon) support' ?flag EXPERIMENTAL ?type bool = n +set +tree 'Device Drivers >> External Connector Class (extcon) support' ?flag EXPERIMENTAL ?type tristate = n +set +tree 'Device Drivers >> External Connector Class (extcon) support' !flag EXPERIMENTAL ?type tristate = m +# +CONFIG_EXTCON y + +# Menu: Device Drivers >> FMC support + +# Menu: Device Drivers >> Frequency Synthesizers DDS/PLL >> Clock Generator/Distribution + +# Menu: Device Drivers >> Frequency Synthesizers DDS/PLL >> Phase-Locked Loop (PLL) frequency synthesizers + +# Menu: Device Drivers >> Fusion MPT device support +CONFIG_FUSION y +CONFIG_FUSION_MAX_SGE 128 +CONFIG_FUSION_LOGGING y + +# Menu: Device Drivers >> GPIO Support +set +tree 'Device Drivers>>GPIO Support' = - mark +set +all ?mark 'GPIO' ?flag EXPERIMENTAL ?type bool = n +set +all ?mark 'GPIO' ?flag EXPERIMENTAL ?type tristate = n +set +all ?mark 'GPIO' !flag EXPERIMENTAL ?type tristate = m +# standard forms: +set +pattern ^GPIO_ ?mark 'GPIO' ?type bool !flag EXPERIMENTAL = y +# +CONFIG_GPIO_SYSFS y +CONFIG_GPIOLIB y +CONFIG_GPIO_EM n note +CONFIG_GPIO_TWL4030 y note + +# Menu: Device Drivers >> Generic Driver Options +CONFIG_UEVENT_HELPER_PATH "" +CONFIG_DEVTMPFS y +CONFIG_DEVTMPFS_MOUNT y +CONFIG_STANDALONE p policy<(arch i386 amd64 &/ value n) | value y> +CONFIG_PREVENT_FIRMWARE_BUILD y +CONFIG_FW_LOADER y note +CONFIG_FIRMWARE_IN_KERNEL y +CONFIG_EXTRA_FIRMWARE "" +CONFIG_FW_LOADER_USER_HELPER y +CONFIG_DMA_CMA y +CONFIG_CMA_SIZE_MBYTES 16 +CONFIG_CMA_SIZE_SEL_MBYTES y +CONFIG_CMA_ALIGNMENT 8 +CONFIG_CMA_AREAS 7 + +# Menu: Device Drivers >> Generic Dynamic Voltage and Frequency Scaling (DVFS) support +CONFIG_PM_DEVFREQ y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND y +CONFIG_DEVFREQ_GOV_PERFORMANCE y +CONFIG_DEVFREQ_GOV_POWERSAVE y +CONFIG_DEVFREQ_GOV_USERSPACE y + +# Menu: Device Drivers >> Generic Target Core Mod (TCM) and ConfigFS Infrastructure + +# Menu: Device Drivers >> Generic Thermal sysfs driver +CONFIG_THERMAL y +CONFIG_THERMAL_HWMON y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE y +CONFIG_THERMAL_GOV_FAIR_SHARE y +CONFIG_THERMAL_GOV_STEP_WISE y +CONFIG_THERMAL_GOV_USER_SPACE y +CONFIG_CPU_THERMAL y +CONFIG_THERMAL_EMULATION y + +# Menu: Device Drivers >> Generic powercap sysfs driver +CONFIG_POWERCAP y + +# Menu: Device Drivers >> Graphics support +set +tree 'Device Drivers>>Graphics support' = - mark +set +all ?mark 'Graphics' ?flag EXPERIMENTAL ?type bool = n +set +all ?mark 'Graphics' ?flag EXPERIMENTAL ?type tristate = m note +set +all ?mark 'Graphics' !flag EXPERIMENTAL ?type tristate = m +set +pattern ^FB_[^_]+$ ?mark Graphics ?type bool !flag EXPERIMENTAL = y +set +pattern ^FB_[^_]+$ ?mark Graphics ?type bool ?flag EXPERIMENTAL = y note +set +pattern _(KMS|BACKLIGHT|I2C)$ ?mark Graphics ?type bool !flag EXPERIMENTAL = y +# +CONFIG_VGA_ARB y +CONFIG_VGA_ARB_MAX_GPUS 16 +CONFIG_VGA_SWITCHEROO y +CONFIG_DRM_LOAD_EDID_FIRMWARE y + +# Menu: Device Drivers >> Graphics support >> /dev/agpgart (AGP Support) +CONFIG_AGP y note mark +CONFIG_AGP_AMD64 y note +CONFIG_AGP_INTEL y note +CONFIG_AGP_VIA y note +CONFIG_AGP_AMD y note +CONFIG_AGP_NVIDIA y note + +# Menu: Device Drivers >> Graphics support >> Backlight & LCD device support +CONFIG_BACKLIGHT_LCD_SUPPORT y +CONFIG_BACKLIGHT_CLASS_DEVICE y + +# Menu: Device Drivers >> Graphics support >> Console display driver support +CONFIG_VGA_CONSOLE y +CONFIG_VGACON_SOFT_SCROLLBACK n +CONFIG_FRAMEBUFFER_CONSOLE y note +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION n + +# Menu: Device Drivers >> Graphics support >> Direct Rendering Manager (XFree86 4.1.0 and higher DRI support) +CONFIG_DRM p mark policy<(arch armel armhf &/ value y) | value m> note +CONFIG_NOUVEAU_DEBUG 5 +CONFIG_NOUVEAU_DEBUG_DEFAULT 3 +CONFIG_DRM_I915_FBDEV y +CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT y +CONFIG_DRM_VMWGFX_FBCON n +CONFIG_DRM_GMA600 y +CONFIG_DRM_GMA3600 y +CONFIG_DRM_EXYNOS n flag +CONFIG_DRM_RCAR_LVDS y +CONFIG_DRM_OMAP n + +# Menu: Device Drivers >> Graphics support >> Direct Rendering Manager (XFree86 4.1.0 and higher DRI support) >> I2C encoder or helper chips + +# Menu: Device Drivers >> Graphics support >> Exynos Video driver support +CONFIG_EXYNOS_VIDEO y + +# Menu: Device Drivers >> Graphics support >> Marvell MMP Display Subsystem support + +# Menu: Device Drivers >> Graphics support >> OMAP Display Device Drivers (new device model) + +# Menu: Device Drivers >> Graphics support >> OMAP2+ Display Subsystem support +CONFIG_OMAP2_DSS p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_OMAP2_DSS_DEBUGFS n +CONFIG_OMAP2_DSS_DPI y +CONFIG_OMAP2_DSS_VENC y +CONFIG_OMAP4_DSS_HDMI y +CONFIG_OMAP2_DSS_SDI y +CONFIG_OMAP2_DSS_DSI n +CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK 0 +CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET y + +# Menu: Device Drivers >> Graphics support >> Support for frame buffer devices +CONFIG_FB y +CONFIG_FIRMWARE_EDID y +CONFIG_FB_MODE_HELPERS y +CONFIG_FB_TILEBLITTING y +CONFIG_FB_PM2_FIFO_DISCONNECT y +CONFIG_FB_CYBER2000_DDC y +CONFIG_FB_MATROX_MILLENIUM y +CONFIG_FB_MATROX_MYSTIQUE y +CONFIG_FB_MATROX_G y +CONFIG_FB_RADEON p policy<(arch powerpc &/ value y) | value m> note +CONFIG_FB_ATY128 p policy<(arch powerpc &/ value y) | value m> note +CONFIG_FB_ATY_CT y +CONFIG_FB_ATY_GX y +CONFIG_FB_S3_DDC y +CONFIG_FB_SIS_300 y +CONFIG_FB_SIS_315 y +CONFIG_FB_VIA_X_COMPATIBILITY y +CONFIG_FB_3DFX p policy<(arch powerpc &/ value y) | value m> note +CONFIG_FB_3DFX_ACCEL y +CONFIG_FB_VOODOO1 p policy<(arch powerpc &/ value y) | value m> note +CONFIG_FB_CARMINE_DRAM_EVAL y +CONFIG_FB_TMIO_ACCELL y +CONFIG_FB_VIRTUAL n flag +CONFIG_FB_MB862XX_PCI_GDC y + +# Menu: Device Drivers >> Graphics support >> Support for frame buffer devices >> Bootup logo +CONFIG_LOGO n + +# Menu: Device Drivers >> Graphics support >> Support for frame buffer devices >> Framebuffer foreign endianness support +CONFIG_FB_FOREIGN_ENDIAN n + +# Menu: Device Drivers >> Graphics support >> Support for frame buffer devices >> OMAP2+ frame buffer support +CONFIG_FB_OMAP2 p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_FB_OMAP2_NUM_FBS 3 + +# Menu: Device Drivers >> HID support +set +tree 'Device Drivers>>HID support' = - mark +# enable modular HID drivers by default +set +all ?mark 'HID' ?flag EXPERIMENTAL ?type bool = n +set +all ?mark 'HID' ?flag EXPERIMENTAL ?type tristate = m note` +set +all ?mark 'HID' !flag EXPERIMENTAL ?type tristate = m +# standard forms +set +pattern _FF$ ?mark HID ?type bool !flag EXPERIMENTAL = y + +# Menu: Device Drivers >> HID support >> HID bus support +CONFIG_HID_BATTERY_STRENGTH y +CONFIG_HIDRAW y + +# Menu: Device Drivers >> HID support >> HID bus support >> Special HID drivers +CONFIG_HID_PICOLCD_FB y +CONFIG_HID_PICOLCD_BACKLIGHT y +CONFIG_HID_PICOLCD_LCD y +CONFIG_HID_PICOLCD_LEDS y +CONFIG_HID_PICOLCD_CIR y + +# Menu: Device Drivers >> HID support >> I2C HID support + +# Menu: Device Drivers >> HID support >> USB HID support +CONFIG_HID_PID y + +# Menu: Device Drivers >> HID support >> USB HID support >> USB HID transport layer +CONFIG_USB_HIDDEV y + +# Menu: Device Drivers >> HID support >> USB HID support >> USB HID transport layer >> USB HID Boot Protocol drivers + +# Menu: Device Drivers >> HSI support + +# Menu: Device Drivers >> Hardware Monitoring support +CONFIG_HWMON y mark +CONFIG_HWMON y mark mark +CONFIG_HWMON y + +# Menu: Device Drivers >> Hardware Monitoring support >> PMBus support + +# Menu: Device Drivers >> Hardware Spinlock drivers +CONFIG_HWSPINLOCK_OMAP y note + +# Menu: Device Drivers >> Hid Sensor IIO Common + +# Menu: Device Drivers >> I2C Algorithms +CONFIG_I2C_ALGOBIT p policy<(arch powerpc &/ value y) | value m> note + +# Menu: Device Drivers >> I2C Hardware Bus support +CONFIG_I2C_IMX y +CONFIG_I2C_OMAP p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> I2C support +CONFIG_I2C y +CONFIG_I2C_COMPAT y mark +CONFIG_I2C_HELPER_AUTO y + +# Menu: Device Drivers >> I2C support >> I2C bus multiplexing support + +# Menu: Device Drivers >> I2C support >> I2C bus multiplexing support >> Multiplexer I2C Chip support + +# Menu: Device Drivers >> I2O device support +CONFIG_I2O_CONFIG_OLD_IOCTL n + +# Menu: Device Drivers >> IEEE 1394 (FireWire) support + +# Menu: Device Drivers >> IIO staging drivers +CONFIG_IIO_SIMPLE_DUMMY_EVENTS n note +CONFIG_IIO_SIMPLE_DUMMY_BUFFER n note + +# Menu: Device Drivers >> IIO staging drivers >> Accelerometers +CONFIG_LIS3L02DQ p policy<(arch armhf &/ value n) | value m> flag + +# Menu: Device Drivers >> IIO staging drivers >> Active energy metering IC + +# Menu: Device Drivers >> IIO staging drivers >> Analog digital bi-direction converters + +# Menu: Device Drivers >> IIO staging drivers >> Analog to digital converters +CONFIG_AD799X_RING_BUFFER y + +# Menu: Device Drivers >> IIO staging drivers >> Capacitance to digital converters + +# Menu: Device Drivers >> IIO staging drivers >> Digital gyroscope sensors + +# Menu: Device Drivers >> IIO staging drivers >> Direct Digital Synthesis + +# Menu: Device Drivers >> IIO staging drivers >> Light sensors + +# Menu: Device Drivers >> IIO staging drivers >> Magnetometer sensors + +# Menu: Device Drivers >> IIO staging drivers >> Network Analyzer, Impedance Converters + +# Menu: Device Drivers >> IIO staging drivers >> Resolver to digital converters + +# Menu: Device Drivers >> IOMMU Hardware Support +set +tree 'Device Drivers>>IOMMU Hardware Support' ?pattern _IOMMU$ ?type bool !flag EXPERIMENTAL = y +# +CONFIG_IOMMU_SUPPORT y +CONFIG_INTEL_IOMMU_DEFAULT_ON n note +CONFIG_OMAP_IOVMM n note +CONFIG_SHMOBILE_IOMMU_ADDRSIZE_2048MB y + +# Menu: Device Drivers >> ISDN feature submodules + +# Menu: Device Drivers >> ISDN support +CONFIG_ISDN y +CONFIG_HYSDN_CAPI y + +# Menu: Device Drivers >> ISDN support >> CAPI 2.0 subsystem + +# Menu: Device Drivers >> ISDN support >> CAPI 2.0 subsystem >> Active AVM cards +CONFIG_CAPI_AVM y + +# Menu: Device Drivers >> ISDN support >> CAPI 2.0 subsystem >> Active Eicon DIVA Server cards +CONFIG_CAPI_EICON y + +# Menu: Device Drivers >> ISDN support >> Modular ISDN driver + +# Menu: Device Drivers >> ISDN support >> Old ISDN4Linux (deprecated) + +# Menu: Device Drivers >> ISDN support >> Siemens Gigaset support + +# Menu: Device Drivers >> Industrial I/O support + +# Menu: Device Drivers >> IndustryPack bus support + +# Menu: Device Drivers >> Inertial measurement units + +# Menu: Device Drivers >> InfiniBand support + +# Menu: Device Drivers >> Input device support >> Generic input layer (needed for keyboard, mouse, ...) +set +tree 'Device Drivers,Input device support,INPUT' = - mark +set +all ?mark 'INPUT' ?flag EXPERIMENTAL ?type bool = n +set +all ?mark 'INPUT' ?flag EXPERIMENTAL ?type tristate = m note +set +all ?mark 'INPUT' !flag EXPERIMENTAL ?type tristate = m +set +pattern ^MOUSE_PS2_ ?mark INPUT ?type bool !flag EXPERIMENTAL = y +set +pattern ^TOUCHSCREEN_ ?mark INPUT ?type bool !flag EXPERIMENTAL = y +set +pattern _FF$ ?mark INPUT ?type bool !flag EXPERIMENTAL = y +# +CONFIG_INPUT y +CONFIG_INPUT_MOUSEDEV y +CONFIG_INPUT_MOUSEDEV_PSAUX y +CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768 +CONFIG_INPUT_EVDEV y + +# Menu: Device Drivers >> Input device support >> Generic input layer (needed for keyboard, mouse, ...) >> Joysticks/Gamepads +CONFIG_INPUT_JOYSTICK y +CONFIG_JOYSTICK_IFORCE_USB y +CONFIG_JOYSTICK_IFORCE_232 y +CONFIG_JOYSTICK_XPAD_LEDS y + +# Menu: Device Drivers >> Input device support >> Generic input layer (needed for keyboard, mouse, ...) >> Keyboards +CONFIG_INPUT_KEYBOARD y +CONFIG_KEYBOARD_ATKBD y note +CONFIG_KEYBOARD_IMX n +CONFIG_KEYBOARD_OMAP4 p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_KEYBOARD_TWL4030 p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> Input device support >> Generic input layer (needed for keyboard, mouse, ...) >> Mice +CONFIG_INPUT_MOUSE y +CONFIG_MOUSE_INPORT n note + +# Menu: Device Drivers >> Input device support >> Generic input layer (needed for keyboard, mouse, ...) >> Miscellaneous devices +CONFIG_INPUT_MISC y +CONFIG_INPUT_KXTJ9_POLLED_MODE n +CONFIG_INPUT_UINPUT y + +# Menu: Device Drivers >> Input device support >> Generic input layer (needed for keyboard, mouse, ...) >> Tablets +CONFIG_INPUT_TABLET y + +# Menu: Device Drivers >> Input device support >> Generic input layer (needed for keyboard, mouse, ...) >> Touchscreens +CONFIG_INPUT_TOUCHSCREEN y + +# Menu: Device Drivers >> Input device support >> Hardware I/O ports +CONFIG_SERIO y note +CONFIG_SERIO_I8042 y note +CONFIG_SERIO_SERPORT p policy<(arch armhf &/ value y) | value m> note +CONFIG_SERIO_LIBPS2 y note + +# Menu: Device Drivers >> LED Support +CONFIG_NEW_LEDS y + +# Menu: Device Drivers >> LED Support >> LED Class Support +CONFIG_LEDS_CLASS y +CONFIG_LEDS_PCA9532_GPIO y +CONFIG_LEDS_GPIO p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_LEDS_PWM p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_LEDS_ASIC3 y + +# Menu: Device Drivers >> LED Support >> LED Class Support >> LED Trigger support +CONFIG_LEDS_TRIGGERS y +CONFIG_LEDS_TRIGGER_HEARTBEAT p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_LEDS_TRIGGER_CPU y + +# Menu: Device Drivers >> Light sensors + +# Menu: Device Drivers >> MMC/SD/SDIO card support +CONFIG_MMC y +CONFIG_MMC_BLOCK p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MMC_TEST n flag +CONFIG_MMC_SDHCI p policy<(arch armel armhf highbank &/ value y) | value m> note +CONFIG_MMC_SDHCI_PLTFM p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MMC_OMAP p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MMC_OMAP_HS p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> Macintosh device drivers +CONFIG_MACINTOSH_DRIVERS y + +# Menu: Device Drivers >> Magnetometer sensors + +# Menu: Device Drivers >> Mailbox Hardware Support +CONFIG_MAILBOX y +CONFIG_PL320_MBOX y +CONFIG_OMAP_MBOX_KFIFO_SIZE 256 + +# Menu: Device Drivers >> Memory Controller drivers +CONFIG_MEMORY y +CONFIG_TEGRA20_MC y +CONFIG_TEGRA30_MC y + +# Menu: Device Drivers >> Memory Technology Device (MTD) support +CONFIG_MTD p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MTD_BLKDEVS p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MTD_BLOCK p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_SM_FTL m note +CONFIG_MTD_OF_PARTS p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> Memory Technology Device (MTD) support >> Enable UBI - Unsorted block images + +# Menu: Device Drivers >> Memory Technology Device (MTD) support >> LPDDR flash memory drivers + +# Menu: Device Drivers >> Memory Technology Device (MTD) support >> Mapping drivers for chip access +CONFIG_MTD_COMPLEX_MAPPINGS y +CONFIG_MTD_PHYSMAP_COMPAT n + +# Menu: Device Drivers >> Memory Technology Device (MTD) support >> NAND Device Support +CONFIG_MTD_NAND p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MTD_NAND_OMAP2 p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MTD_NAND_OMAP_BCH p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> Memory Technology Device (MTD) support >> OneNAND Device Support + +# Menu: Device Drivers >> Memory Technology Device (MTD) support >> RAM/ROM/Flash chip drivers + +# Menu: Device Drivers >> Memory Technology Device (MTD) support >> Self-contained MTD device drivers + +# Menu: Device Drivers >> Microsoft Hyper-V guest support + +# Menu: Device Drivers >> Misc devices +CONFIG_CS5535_MFGPT n note +CONFIG_SRAM y + +# Menu: Device Drivers >> Misc devices >> EEPROM support + +# Menu: Device Drivers >> Misc devices >> Silicon Labs C2 port support + +# Menu: Device Drivers >> Misc devices >> Texas Instruments shared transport line discipline + +# Menu: Device Drivers >> Multifunction device drivers +set +tree 'Device Drivers>>Multifunction device drivers' ?pattern ^MFD_ ?type bool !flag EXPERIMENTAL = y +CONFIG_EZX_PCAP p policy<(arch armhf &/ value n) | value m> flag +CONFIG_MFD_SM501 p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MFD_OMAP_USB_HOST y + +# Menu: Device Drivers >> Multifunction device drivers >> STMicroelectronics STMPE +CONFIG_MFD_STMPE y + +# Menu: Device Drivers >> Multifunction device drivers >> STMicroelectronics STMPE >> STMicroelectronics STMPE Interface Drivers +CONFIG_STMPE_I2C y +CONFIG_STMPE_SPI y + +# Menu: Device Drivers >> Multimedia Capabilities Port drivers + +# Menu: Device Drivers >> Multimedia support +CONFIG_MEDIA_SUPPORT p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_MEDIA_ANALOG_TV_SUPPORT y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT y +CONFIG_MEDIA_RC_SUPPORT y +CONFIG_VIDEO_FIXED_MINOR_RANGES n +CONFIG_DVB_NET y +CONFIG_DVB_MAX_ADAPTERS 8 +CONFIG_DVB_DYNAMIC_MINORS y +CONFIG_SMS_SIANO_RC y +CONFIG_MEDIA_SUBDRV_AUTOSELECT y + +# Menu: Device Drivers >> Multimedia support >> AM/FM radio receivers/transmitters support +CONFIG_MEDIA_RADIO_SUPPORT y + +# Menu: Device Drivers >> Multimedia support >> AM/FM radio receivers/transmitters support >> Customize TV tuners + +# Menu: Device Drivers >> Multimedia support >> AM/FM radio receivers/transmitters support >> Radio Adapters +CONFIG_RADIO_ADAPTERS y +CONFIG_RADIO_SI470X y + +# Menu: Device Drivers >> Multimedia support >> AM/FM radio receivers/transmitters support >> Radio Adapters >> ISA radio devices +CONFIG_V4L_RADIO_ISA_DRIVERS y + +# Menu: Device Drivers >> Multimedia support >> Cameras/video grabbers support +CONFIG_MEDIA_CAMERA_SUPPORT y + +# Menu: Device Drivers >> Multimedia support >> Cameras/video grabbers support >> ISA and parallel port devices +CONFIG_MEDIA_PARPORT_SUPPORT y + +# Menu: Device Drivers >> Multimedia support >> Cameras/video grabbers support >> Media test drivers +CONFIG_V4L_TEST_DRIVERS y + +# Menu: Device Drivers >> Multimedia support >> Cameras/video grabbers support >> Memory-to-memory multimedia devices +CONFIG_V4L_MEM2MEM_DRIVERS y + +# Menu: Device Drivers >> Multimedia support >> Cameras/video grabbers support >> V4L platform devices +CONFIG_V4L_PLATFORM_DRIVERS y + +# Menu: Device Drivers >> Multimedia support >> Media PCI Adapters +CONFIG_MEDIA_PCI_SUPPORT y +CONFIG_VIDEO_SAA7134_RC y +CONFIG_DVB_AV7110_OSD y + +# Menu: Device Drivers >> Multimedia support >> Media USB Adapters +CONFIG_MEDIA_USB_SUPPORT y +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV y +CONFIG_USB_PWC_INPUT_EVDEV y +CONFIG_VIDEO_PVRUSB2_SYSFS y +CONFIG_VIDEO_PVRUSB2_DVB y +CONFIG_VIDEO_STK1160_AC97 y +CONFIG_VIDEO_CX231XX_RC y +CONFIG_DVB_USB_DIBUSB_MB_FAULTY n + +# Menu: Device Drivers >> Multimedia support >> Media USB Adapters >> GSPCA based webcams + +# Menu: Device Drivers >> Multimedia support >> Remote Controller devices +CONFIG_RC_DEVICES y + +# Menu: Device Drivers >> Multimedia support >> Remote controller decoders +CONFIG_RC_DECODERS y + +# Menu: Device Drivers >> Multiple devices driver support (RAID and LVM) +set +tree 'Device Drivers>>MD' ?flag EXPERIMENTAL ?type bool = n +set +tree 'Device Drivers>>MD' ?flag EXPERIMENTAL ?type tristate = m note +set +tree 'Device Drivers>>MD' !flag EXPERIMENTAL ?type tristate = m +# +CONFIG_MD y +CONFIG_BLK_DEV_MD y +CONFIG_MD_AUTODETECT y +CONFIG_BLK_DEV_DM y +CONFIG_DM_UEVENT y + +# Menu: Device Drivers >> Network device support +set +pattern '^NET_VENDOR_' = y +# +CONFIG_NETDEVICES y +CONFIG_FDDI y +CONFIG_HIPPI n note +CONFIG_PPP y +CONFIG_PPP_FILTER y +CONFIG_PPP_MULTILINK y +CONFIG_SLIP_COMPRESSED y +CONFIG_SLIP_SMART y +CONFIG_SLIP_MODE_SLIP6 y +CONFIG_XEN_NETDEV_FRONTEND y note + +# Menu: Device Drivers >> Network device support >> ARCnet support + +# Menu: Device Drivers >> Network device support >> ATM drivers +CONFIG_ATM_DRIVERS y + +# Menu: Device Drivers >> Network device support >> Ethernet driver support +CONFIG_ETHERNET y +CONFIG_NET_CALXEDA_XGMAC p policy<(arch armhf &/ value y) | value m> note +CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL n + +# Menu: Device Drivers >> Network device support >> IEEE 802.15.4 drivers +CONFIG_IEEE802154_FAKEHARD n flag + +# Menu: Device Drivers >> Network device support >> Network core driver support +CONFIG_NET_CORE y +CONFIG_NET_FC y +CONFIG_NETCONSOLE_DYNAMIC y +CONFIG_NETPOLL_TRAP n +CONFIG_TUN y +CONFIG_VIRTIO_NET p policy<(arch i386 amd64 &/ value y) | (arch armel armhf &/ value n) | value m> note + +# Menu: Device Drivers >> Network device support >> Network core driver support >> Ethernet team driver support + +# Menu: Device Drivers >> Network device support >> PHY Device support and infrastructure +set +tree 'Device Drivers>>Network device support>>PHY Device support and infrastructure' ?flag EXPERIMENTAL ?type bool = n +set +tree 'Device Drivers>>Network device support>>PHY Device support and infrastructure' ?flag EXPERIMENTAL ?type tristate = y note +set +tree 'Device Drivers>>Network device support>>PHY Device support and infrastructure' !flag EXPERIMENTAL ?type tristate = y note +# +CONFIG_PHYLIB y +CONFIG_STE10XP y +CONFIG_FIXED_PHY y + +# Menu: Device Drivers >> Network device support >> S/390 network device drivers + +# Menu: Device Drivers >> Network device support >> Wan interfaces support +CONFIG_WAN y +CONFIG_DSCC4_PCISYNC y +CONFIG_DSCC4_PCI_RST y +CONFIG_DLCI_MAX 8 + +# Menu: Device Drivers >> Network device support >> Wireless LAN +CONFIG_WLAN y +CONFIG_B43_BCMA y +CONFIG_B43_PCMCIA n +CONFIG_B43_SDIO n +CONFIG_B43_PHY_N y +CONFIG_B43_PHY_LP y +CONFIG_B43_PHY_HT y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE y +CONFIG_BRCMFMAC_USB y +CONFIG_BRCMDBG - flag + +# Menu: Device Drivers >> Network device support >> Wireless LAN >> Atheros Wireless Cards +CONFIG_ATH6KL_USB m note + +# Menu: Device Drivers >> Network device support >> Wireless LAN >> Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) + +# Menu: Device Drivers >> Network device support >> Wireless LAN >> Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) >> Debugging Options +CONFIG_IWLWIFI_DEBUGFS y +CONFIG_IWLWIFI_DEVICE_TRACING y + +# Menu: Device Drivers >> Network device support >> Wireless LAN >> Ralink driver support +CONFIG_RT2800PCI_RT33XX y +CONFIG_RT2800PCI_RT35XX y +CONFIG_RT2800PCI_RT53XX y +CONFIG_RT2800PCI_RT3290 y +CONFIG_RT2800USB_RT33XX y +CONFIG_RT2800USB_RT35XX y +CONFIG_RT2800USB_RT3573 y +CONFIG_RT2800USB_RT53XX y +CONFIG_RT2800USB_RT55XX y + +# Menu: Device Drivers >> Network device support >> Wireless LAN >> Realtek rtlwifi family of devices +CONFIG_RTL8192DE m note + +# Menu: Device Drivers >> Network device support >> Wireless LAN >> TI Wireless LAN support +CONFIG_WL_TI y + +# Menu: Device Drivers >> Network device support >> Wireless LAN >> TI Wireless LAN support >> TI wl1251 driver support + +# Menu: Device Drivers >> PHY Subsystem +CONFIG_TWL4030_USB y + +# Menu: Device Drivers >> PPS support +CONFIG_PPS_CLIENT_KTIMER n flag + +# Menu: Device Drivers >> PTP clock support + +# Menu: Device Drivers >> Parallel port support +CONFIG_PARPORT_PC_FIFO y +CONFIG_PARPORT_PC_SUPERIO n +CONFIG_PARPORT_1284 y + +# Menu: Device Drivers >> Passive cards +set +tree 'Device Drivers>>Passive cards' ?pattern '^HISAX_' !pattern '^HISAX_NO_' ?type bool = y + +# Menu: Device Drivers >> Pin controllers +set +tree 'Device Drivers>>Pin controllers' ?pattern '^PINCTRL_' ?type bool = y +CONFIG_PINMUX y +CONFIG_PINCONF y + +# Menu: Device Drivers >> Platform support for Chrome hardware +CONFIG_CHROME_PLATFORMS y + +# Menu: Device Drivers >> Plug and Play support +CONFIG_PNP y +CONFIG_ISAPNP y +CONFIG_PNPBIOS y +CONFIG_PNPBIOS_PROC_FS y + +# Menu: Device Drivers >> Power supply class support +CONFIG_POWER_SUPPLY y +CONFIG_BATTERY_BQ27X00_I2C y +CONFIG_BATTERY_BQ27X00_PLATFORM y +CONFIG_CHARGER_MANAGER y +CONFIG_POWER_RESET_RESTART y + +# Menu: Device Drivers >> Power supply class support >> Board level reset or power off +CONFIG_POWER_RESET y +CONFIG_POWER_RESET_GPIO y +CONFIG_POWER_RESET_VEXPRESS y + +# Menu: Device Drivers >> Pressure sensors + +# Menu: Device Drivers >> Pulse-Width Modulation (PWM) Support +CONFIG_PWM y + +# Menu: Device Drivers >> Real Time Clock +CONFIG_RTC_CLASS y +CONFIG_RTC_HCTOSYS y +CONFIG_RTC_SYSTOHC y +CONFIG_RTC_HCTOSYS_DEVICE "rtc0" +CONFIG_RTC_INTF_SYSFS y +CONFIG_RTC_INTF_PROC y +CONFIG_RTC_INTF_DEV y +CONFIG_RTC_INTF_DEV_UIE_EMUL n +CONFIG_RTC_DRV_TEST n flag +CONFIG_RTC_DRV_M41T80_WDT y +CONFIG_RTC_DRV_CMOS p policy<(arch i386 amd64 &/ value y) | value m> note +CONFIG_RTC_DRV_TWL4030 p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> Remoteproc drivers + +# Menu: Device Drivers >> SCSI device support + +# Menu: Device Drivers >> SCSI device support >> SCSI device support +CONFIG_SCSI y +CONFIG_SCSI_PROC_FS y +CONFIG_BLK_DEV_SD y note +CONFIG_BLK_DEV_SR y note +CONFIG_BLK_DEV_SR_VENDOR n note +CONFIG_CHR_DEV_SG y note +CONFIG_SCSI_MULTI_LUN y +CONFIG_SCSI_CONSTANTS y +CONFIG_SCSI_LOGGING y +CONFIG_SCSI_SCAN_ASYNC y +CONFIG_SCSI_OSD_DPRINT_SENSE 1 + +# Menu: Device Drivers >> SCSI device support >> SCSI device support >> PCMCIA SCSI adapter support +CONFIG_SCSI_LOWLEVEL_PCMCIA y + +# Menu: Device Drivers >> SCSI device support >> SCSI device support >> SCSI Device Handlers + +# Menu: Device Drivers >> SCSI device support >> SCSI device support >> SCSI Transports +CONFIG_SCSI_SPI_ATTRS p policy<(arch i386 amd64 &/ value y) | value m> note +CONFIG_SCSI_FC_TGT_ATTRS y +CONFIG_SCSI_SAS_ATA y +CONFIG_SCSI_SAS_HOST_SMP y +CONFIG_SCSI_SRP_TGT_ATTRS y + +# Menu: Device Drivers >> SCSI device support >> SCSI device support >> SCSI low-level drivers +CONFIG_SCSI_LOWLEVEL y +CONFIG_AIC7XXX_CMDS_PER_DEVICE 8 +CONFIG_AIC7XXX_RESET_DELAY_MS 15000 +CONFIG_AIC7XXX_DEBUG_MASK 0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT y +CONFIG_SCSI_AIC7XXX_OLD n note +CONFIG_AIC79XX_CMDS_PER_DEVICE 32 +CONFIG_AIC79XX_RESET_DELAY_MS 5000 +CONFIG_AIC79XX_DEBUG_MASK 0 +CONFIG_AIC79XX_REG_PRETTY_PRINT y +CONFIG_SCSI_MVSAS_TASKLET n +CONFIG_MEGARAID_NEWGEN y +CONFIG_SCSI_MPT2SAS_MAX_SGE 128 +CONFIG_SCSI_MPT2SAS_LOGGING n +CONFIG_SCSI_MPT3SAS_MAX_SGE 128 +CONFIG_SCSI_MPT3SAS_LOGGING n +CONFIG_SCSI_FLASHPOINT y +CONFIG_SCSI_EATA_TAGGED_QUEUE y +CONFIG_SCSI_EATA_LINKED_COMMANDS y +CONFIG_SCSI_EATA_MAX_TAGS 16 +CONFIG_SCSI_IZIP_EPP16 n +CONFIG_SCSI_IZIP_SLOW_CTR n +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE 1 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS 16 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS 64 +CONFIG_SCSI_SYM53C8XX_MMIO y +CONFIG_SCSI_IPR_TRACE y +CONFIG_SCSI_IPR_DUMP y +CONFIG_SCSI_DEBUG m +CONFIG_SCSI_VIRTIO m note +CONFIG_SCSI_GENERIC_NCR53C400 y +CONFIG_SCSI_U14_34F_TAGGED_QUEUE y +CONFIG_SCSI_U14_34F_LINKED_COMMANDS y +CONFIG_SCSI_U14_34F_MAX_TAGS 8 + +# Menu: Device Drivers >> SPI support +CONFIG_SPI y +CONFIG_SPI_XILINX n +CONFIG_SPI_SPIDEV m note +CONFIG_SPI_OMAP24XX y + +# Menu: Device Drivers >> Samsung thermal drivers + +# Menu: Device Drivers >> Sensors used on soc_camera driver + +# Menu: Device Drivers >> Serial ATA and Parallel ATA drivers +CONFIG_ATA y mark +CONFIG_ATA_VERBOSE_ERROR y +CONFIG_ATA_ACPI y +CONFIG_SATA_PMP y +CONFIG_SATA_AHCI_PLATFORM p policy<(arch armhf &/ value y) | value m> note +CONFIG_ATA_SFF y +CONFIG_ATA_BMDMA y +CONFIG_ATA_PIIX p policy<(arch i386 amd64 &/ value y) | value m> note +CONFIG_SATA_SVW p policy<(arch powerpc &/ value y) | value m> note +CONFIG_PATA_HPT3X3_DMA n note +CONFIG_PATA_SIS p policy<(arch i386 amd64 &/ value y) | value m> note +CONFIG_PATA_ACPI p policy<(arch i386 amd64 &/ value m)> note +CONFIG_ATA_GENERIC p policy<(arch i386 amd64 &/ value y) | value m> note + +# Menu: Device Drivers >> SoC Audio support for SuperH + +# Menu: Device Drivers >> Sonics Silicon Backplane + +# Menu: Device Drivers >> Sony MemoryStick card support + +# Menu: Device Drivers >> Sound card support +CONFIG_SOUND p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture +CONFIG_SND p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_SND_PCM_OSS n note + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> ALSA for SoC audio support +CONFIG_SND_SOC p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_SND_OMAP_SOC p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_SND_OMAP_SOC_OMAP_TWL4030 p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> ALSA for SoC audio support >> SoC Audio for Freescale MXS CPUs + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> ALSA for SoC audio support >> SoC Audio for Freescale PowerPC CPUs + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> ALSA for SoC audio support >> SoC Audio for Freescale i.MX CPUs + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> ALSA for SoC audio support >> SoC Audio support for Ux500 platform + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> ARM sound devices + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> Apple Onboard Audio driver + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> FireWire sound devices +CONFIG_SND_FIREWIRE y + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> GSC sound devices + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> Generic sound devices +CONFIG_SND_DRIVERS y + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> ISA sound devices +CONFIG_SND_ISA y + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> MIPS sound devices + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> PCI sound devices +CONFIG_SND_PCI y + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> PCI sound devices >> Intel HD Audio + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> PCMCIA sound devices +CONFIG_SND_PCMCIA y + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> PowerPC sound devices + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> SPI sound devices +CONFIG_SND_SPI y + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> SUPERH sound devices + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> Sparc sound devices + +# Menu: Device Drivers >> Sound card support >> Advanced Linux Sound Architecture >> USB sound devices +CONFIG_SND_USB y +CONFIG_SND_USB_CAIAQ_INPUT y + +# Menu: Device Drivers >> Sound card support >> Open Sound System (DEPRECATED) + +# Menu: Device Drivers >> Sound card support >> Open Sound System (DEPRECATED) >> OSS sound modules + +# Menu: Device Drivers >> Speakup console speech + +# Menu: Device Drivers >> Staging drivers +CONFIG_STAGING y +CONFIG_PANEL_PARPORT 0 +CONFIG_PANEL_PROFILE 5 +CONFIG_PANEL_CHANGE_MESSAGE n +CONFIG_ZSMALLOC y note +CONFIG_ZRAM y mark note +CONFIG_SBE_PMCC4_NCOMM y + +# Menu: Device Drivers >> Staging drivers >> DSP Bridge driver + +# Menu: Device Drivers >> Staging drivers >> Data acquisition support (comedi) +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB 2048 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB 20480 + +# Menu: Device Drivers >> Staging drivers >> Data acquisition support (comedi) >> Comedi ISA and PC/104 drivers +CONFIG_COMEDI_ISA_DRIVERS y + +# Menu: Device Drivers >> Staging drivers >> Data acquisition support (comedi) >> Comedi PCI drivers +CONFIG_COMEDI_PCI_DRIVERS y + +# Menu: Device Drivers >> Staging drivers >> Data acquisition support (comedi) >> Comedi PCMCIA drivers +CONFIG_COMEDI_PCMCIA_DRIVERS y + +# Menu: Device Drivers >> Staging drivers >> Data acquisition support (comedi) >> Comedi USB drivers +CONFIG_COMEDI_USB_DRIVERS y + +# Menu: Device Drivers >> Staging drivers >> Data acquisition support (comedi) >> Comedi misc drivers +CONFIG_COMEDI_MISC_DRIVERS y + +# Menu: Device Drivers >> Staging drivers >> GCT GDM72xx WiMAX support +CONFIG_WIMAX_GDM72XX_QOS y +CONFIG_WIMAX_GDM72XX_K_MODE y +CONFIG_WIMAX_GDM72XX_WIMAX2 y +CONFIG_WIMAX_GDM72XX_USB y +CONFIG_WIMAX_GDM72XX_USB_PM y + +# Menu: Device Drivers >> Staging drivers >> Line6 USB support +CONFIG_LINE6_USB_IMPULSE_RESPONSE n + +# Menu: Device Drivers >> Staging drivers >> Media staging drivers +CONFIG_STAGING_MEDIA y +CONFIG_DT3155_CCIR n note +CONFIG_DT3155_STREAMING y + +# Menu: Device Drivers >> Staging drivers >> Media staging drivers >> Linux Infrared Remote Control IR receiver/transmitter drivers +CONFIG_LIRC_STAGING y +CONFIG_LIRC_SERIAL_TRANSMITTER y + +# Menu: Device Drivers >> TI VLYNQ + +# Menu: Device Drivers >> Temperature sensors + +# Menu: Device Drivers >> Texas Instruments WL128x FM driver (ST based) + +# Menu: Device Drivers >> Texas Instruments thermal drivers +CONFIG_OMAP4_THERMAL y +CONFIG_DRA752_THERMAL y + +# Menu: Device Drivers >> Triggers - standalone + +# Menu: Device Drivers >> USB Network Adapters +CONFIG_USB_USBNET p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_USB_NET_SMSC95XX p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_USB_KC2190 y note + +# Menu: Device Drivers >> USB Peripheral Controller +CONFIG_USB_M66592 n note +CONFIG_USB_DUMMY_HCD n flag + +# Menu: Device Drivers >> USB Physical Layer drivers +CONFIG_NOP_USB_XCEIV p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> USB support +CONFIG_USB_SUPPORT y + +# Menu: Device Drivers >> USB support >> Support for Host-side USB +CONFIG_USB y +CONFIG_USB_OTG n note mark +CONFIG_USB_OTG_WHITELIST n note +CONFIG_USB_OTG_BLACKLIST_HUB n note +CONFIG_USB_XHCI_HCD y note +CONFIG_USB_EHCI_HCD y note +CONFIG_USB_OHCI_HCD y note +CONFIG_USB_UHCI_HCD y note +CONFIG_USB_HCD_BCMA p policy<(arch armel armhf &/ value n) | value m> note +CONFIG_USB_HCD_SSB p policy<(arch armel armhf &/ value n) | value m> note +CONFIG_USB_MUSB_HDRC p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_USB_MUSB_TUSB6010 p policy<(arch armel armhf &/ value n) | value m> note +CONFIG_USB_SISUSBVGA_CON n note +CONFIG_USB_MUSB_OMAP2PLUS y +CONFIG_USB_INVENTRA_DMA p policy<(arch armel armhf &/ value y) | value n> note + +# Menu: Device Drivers >> USB support >> Support for Host-side USB >> USB DSL modem support + +# Menu: Device Drivers >> USB support >> Support for Host-side USB >> USB Serial Converter support +CONFIG_USB_SERIAL_GENERIC y +CONFIG_USB_SERIAL_SAFE_PADDED n +CONFIG_USB_SERIAL_DEBUG m note + +# Menu: Device Drivers >> USB support >> USB Gadget Support +CONFIG_USB_GADGET p policy<(arch armel armhf &/ value y) | value m> note +CONFIG_USB_GADGETFS m note +CONFIG_USB_FUNCTIONFS m note +CONFIG_USB_G_MULTI p policy<(arch armel armhf &/ value m) | value n> note + +# Menu: Device Drivers >> Ultra Wideband devices + +# Menu: Device Drivers >> Userspace I/O drivers + +# Menu: Device Drivers >> VFIO Non-Privileged userspace driver framework + +# Menu: Device Drivers >> VME bridge support + +# Menu: Device Drivers >> Virtio drivers +CONFIG_VIRTIO_PCI p policy<(arch i386 amd64 &/ value y) | (arch armel armhf &/ value n) | value m> note +CONFIG_VIRTIO_BALLOON p policy<(arch armel armhf &/ value n) | value m> note +CONFIG_VIRTIO_MMIO p policy<(arch armel armhf &/ value n) | value m> note +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES y + +# Menu: Device Drivers >> Virtualization drivers +CONFIG_VIRT_DRIVERS y + +# Menu: Device Drivers >> Voltage and Current Regulator Support +CONFIG_REGULATOR y +CONFIG_REGULATOR_FIXED_VOLTAGE p policy<(arch armel armhf &/ value y) | value m> note + +# Menu: Device Drivers >> Watchdog Timer Support +CONFIG_WATCHDOG y +CONFIG_WATCHDOG_CORE y + +# Menu: Device Drivers >> WiMAX Wireless Broadband devices + +# Menu: Device Drivers >> X86 Platform Specific Device Drivers +CONFIG_X86_PLATFORM_DEVICES y + +# Menu: Device Drivers >> Xen driver support +CONFIG_XEN_BALLOON y +CONFIG_XEN_SELFBALLOONING y +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG y +CONFIG_XEN_SCRUB_PAGES y +CONFIG_XEN_BACKEND y +CONFIG_XEN_COMPAT_XENFS y +CONFIG_XEN_SYS_HYPERVISOR y +CONFIG_XEN_ACPI_PROCESSOR p policy<(arch i386 amd64 &/ value y) | value m> +CONFIG_XEN_MCE_LOG y + +# Menu: Device Drivers >> iwl3945 / iwl4965 Debugging Options + +# Menu: Enable loadable module support +CONFIG_MODULES y +CONFIG_MODULE_FORCE_LOAD n +CONFIG_MODULE_UNLOAD y +CONFIG_MODULE_FORCE_UNLOAD n +CONFIG_MODVERSIONS y +CONFIG_MODULE_SRCVERSION_ALL y +CONFIG_MODULE_SIG y +CONFIG_MODULE_SIG_FORCE n +CONFIG_MODULE_SIG_ALL y +CONFIG_MODULE_SIG_SHA512 y + +# Menu: Enable the block layer +CONFIG_BLOCK y +CONFIG_BLK_DEV_THROTTLING y note + +# Menu: Executable file formats / Emulations +CONFIG_BINFMT_ELF y + +# Menu: Executable file formats / Emulations (x86) +CONFIG_IA32_EMULATION y +CONFIG_IA32_AOUT n note mark +CONFIG_X86_X32 y + +# Menu: File systems +set +tree 'File systems' ?flag EXPERIMENTAL ?type bool = n +set +tree 'File systems' ?flag EXPERIMENTAL ?type tristate = m note` +set +tree 'File systems' !flag EXPERIMENTAL ?type tristate = m +set +tree 'File systems' ?pattern _POSIX_ACL$ !flag EXPERIMENTAL = y +set +tree 'File systems' ?pattern _SECURITY$ !flag EXPERIMENTAL = y +set +tree 'File systems' ?pattern _XATTR$ !flag EXPERIMENTAL = y +# +CONFIG_EXT2_FS n note +CONFIG_EXT3_FS n note +CONFIG_EXT4_FS y note +CONFIG_EXT4_USE_FOR_EXT23 y +CONFIG_REISERFS_CHECK n +CONFIG_REISERFS_PROC_INFO n +CONFIG_JFS_STATISTICS y +CONFIG_XFS_QUOTA y +CONFIG_XFS_RT y +CONFIG_GFS2_FS_LOCKING_DLM y +CONFIG_OCFS2_FS_STATS y +CONFIG_OCFS2_DEBUG_MASKLOG y +CONFIG_BTRFS_FS_POSIX_ACL y +CONFIG_FILE_LOCKING y +CONFIG_DNOTIFY y +CONFIG_INOTIFY_USER y +CONFIG_FANOTIFY y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS y +CONFIG_QUOTA y +CONFIG_QUOTA_NETLINK_INTERFACE y +CONFIG_PRINT_QUOTA_WARNING n +CONFIG_FUSE_FS y note + +# Menu: File systems >> CD-ROM/DVD Filesystems +CONFIG_JOLIET y +CONFIG_ZISOFS y + +# Menu: File systems >> Caches +CONFIG_FSCACHE_STATS y +CONFIG_FSCACHE_HISTOGRAM n +CONFIG_FSCACHE_OBJECT_LIST n +CONFIG_CACHEFILES_HISTOGRAM n + +# Menu: File systems >> DOS/FAT/NT Filesystems +CONFIG_VFAT_FS y +CONFIG_FAT_DEFAULT_CODEPAGE 437 +CONFIG_FAT_DEFAULT_IOCHARSET "iso8859-1" +CONFIG_NTFS_RW n + +# Menu: File systems >> Distributed Lock Manager (DLM) + +# Menu: File systems >> Miscellaneous filesystems +CONFIG_MISC_FILESYSTEMS y +CONFIG_ECRYPT_FS y note +CONFIG_JFFS2_FS_DEBUG 0 +CONFIG_JFFS2_FS_WRITEBUFFER y +CONFIG_JFFS2_FS_WBUF_VERIFY n +CONFIG_JFFS2_SUMMARY n +CONFIG_JFFS2_COMPRESSION_OPTIONS y +CONFIG_JFFS2_ZLIB y +CONFIG_JFFS2_LZO y +CONFIG_JFFS2_RTIME y +CONFIG_JFFS2_RUBIN n +CONFIG_JFFS2_CMODE_FAVOURLZO y +CONFIG_UBIFS_FS_ADVANCED_COMPR n +CONFIG_UBIFS_FS_LZO y +CONFIG_UBIFS_FS_ZLIB y +CONFIG_LOGFS n note +CONFIG_SQUASHFS_ZLIB y +CONFIG_SQUASHFS_LZO y +CONFIG_SQUASHFS_XZ y +CONFIG_SQUASHFS_4K_DEVBLK_SIZE n +CONFIG_SQUASHFS_4K_DEVBLK_SIZE n note +CONFIG_SQUASHFS_4K_DEVBLK_SIZE n +CONFIG_SQUASHFS_4K_DEVBLK_SIZE n note +CONFIG_SQUASHFS_EMBEDDED n +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE 3 +CONFIG_ROMFS_BACKED_BY_BLOCK y +CONFIG_PSTORE y +CONFIG_PSTORE_CONSOLE n note +CONFIG_PSTORE_FTRACE n +CONFIG_F2FS_STAT_FS y + +# Menu: File systems >> Native language support +CONFIG_NLS y note +CONFIG_NLS_CODEPAGE_437 y + +# Menu: File systems >> Network File Systems +CONFIG_NETWORK_FILESYSTEMS y +CONFIG_NFS_V3_ACL y +CONFIG_NFS_SWAP y +CONFIG_NFS_V4_1 y +CONFIG_NFS_V4_2 y +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN "kernel.org" +CONFIG_NFS_FSCACHE y +CONFIG_NFS_USE_LEGACY_DNS n +CONFIG_NFSD_V3 y +CONFIG_NFSD_V3_ACL y +CONFIG_NFSD_V4 y note +CONFIG_CIFS_STATS y +CONFIG_CIFS_STATS2 n +CONFIG_CIFS_WEAK_PW_HASH y +CONFIG_CIFS_UPCALL y +CONFIG_CIFS_POSIX y +CONFIG_CIFS_ACL y +CONFIG_CIFS_DEBUG y +CONFIG_CIFS_DEBUG2 n +CONFIG_CIFS_DFS_UPCALL y +CONFIG_CIFS_SMB2 y +CONFIG_CIFS_FSCACHE y +CONFIG_NCPFS_PACKET_SIGNING y +CONFIG_NCPFS_IOCTL_LOCKING y +CONFIG_NCPFS_STRONG y +CONFIG_NCPFS_NFS_NS y +CONFIG_NCPFS_OS2_NS y +CONFIG_NCPFS_SMALLDOS n +CONFIG_NCPFS_NLS y +CONFIG_NCPFS_EXTRAS y + +# Menu: File systems >> Pseudo filesystems +CONFIG_PROC_FS y +CONFIG_PROC_KCORE y +CONFIG_PROC_VMCORE y +CONFIG_PROC_SYSCTL y +CONFIG_PROC_PAGE_MONITOR y +CONFIG_SYSFS y +CONFIG_TMPFS y +CONFIG_HUGETLBFS y + +# Menu: Firmware Drivers +CONFIG_EDD y note + +# Menu: Firmware Drivers >> EFI (Extensible Firmware Interface) Support +CONFIG_EFI_VARS y note + +# Menu: Firmware Drivers >> Google Firmware Drivers + +# Menu: Firmware Drivers >> Google Firmware Drivers >> Google Firmware Drivers + +# Menu: Floating point emulation (arm) + +# Menu: General setup +CONFIG_KERNEL_GZIP y +CONFIG_IKCONFIG n note +CONFIG_SLUB_DEBUG y +CONFIG_RD_GZIP y +CONFIG_RD_BZIP2 y +CONFIG_RD_LZMA y +CONFIG_RD_XZ y +CONFIG_RD_LZO y +CONFIG_RD_LZ4 y +CONFIG_SLUB y + +# Menu: General setup (arm) +# Menu: General setup (powerpc) +# Menu: General setup (x86) +CONFIG_OPROFILE_EVENT_MULTIPLEX n +CONFIG_KPROBES y +CONFIG_JUMP_LABEL y +CONFIG_UPROBES y + +# Menu: General setup >> CPU/Task time and stats accounting + +# Menu: General setup >> Configure standard kernel features (expert users) +CONFIG_EXPERT y + +# Menu: General setup >> Control Group support +CONFIG_CGROUPS y +CONFIG_CGROUP_HUGETLB y +CONFIG_BLK_CGROUP y note + +# Menu: General setup >> Control Group support >> Group CPU scheduler +CONFIG_CGROUP_SCHED y +CONFIG_FAIR_GROUP_SCHED y +CONFIG_CFS_BANDWIDTH y note +CONFIG_RT_GROUP_SCHED y + +# Menu: General setup >> GCOV-based kernel profiling +CONFIG_GCOV_KERNEL n + +# Menu: General setup >> IRQ subsystem + +# Menu: General setup >> Kernel Performance Events And Counters + +# Menu: General setup >> Namespaces support +CONFIG_NAMESPACES y +CONFIG_UTS_NS y +CONFIG_IPC_NS y +CONFIG_USER_NS y +CONFIG_PID_NS y +CONFIG_NET_NS y + +# Menu: General setup >> RCU Subsystem +CONFIG_TREE_RCU y + +# Menu: General setup >> Timers subsystem + +# Menu: IO Schedulers +CONFIG_IOSCHED_DEADLINE y +CONFIG_IOSCHED_CFQ y +CONFIG_CFQ_GROUP_IOSCHED y +CONFIG_DEFAULT_DEADLINE y + +# Menu: Kernel Features +# Menu: Kernel Features (arm) +CONFIG_PREEMPT_VOLUNTARY y +CONFIG_COMPACTION y +CONFIG_MIGRATION y +CONFIG_KSM y +CONFIG_TRANSPARENT_HUGEPAGE y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS y + +CONFIG_CC_STACKPROTECTOR y mark + +# Menu: Kernel hacking +CONFIG_MAGIC_SYSRQ y +CONFIG_PANIC_ON_OOPS n note + +# Menu: Kernel hacking (arm) +# Menu: Kernel hacking (powerpc) +# Menu: Kernel hacking (x86) +CONFIG_STRICT_DEVMEM y +CONFIG_DEBUG_RODATA y +CONFIG_DEBUG_SET_MODULE_RONX y +CONFIG_EARLY_PRINTK y +CONFIG_DEBUG_NX_TEST n flag + +# Menu: Kernel hacking >> Compile-time checks and compiler options +CONFIG_DEBUG_FS y note +CONFIG_DEBUG_INFO y note +CONFIG_DEBUG_INFO_REDUCED n + +# Menu: Kernel hacking >> Debug Lockups and Hangs +CONFIG_LOCKUP_DETECTOR y + +# Menu: Kernel hacking >> Kernel debugging +CONFIG_DEBUG_KERNEL y note +CONFIG_SCHED_DEBUG y + +# Menu: Kernel hacking >> Kernel debugging >> KGDB: kernel debugger +CONFIG_KGDB y +CONFIG_KGDB_SERIAL_CONSOLE y note + +# Menu: Kernel hacking >> Lock Debugging (spinlocks, mutexes, etc...) + +# Menu: Kernel hacking >> Memory Debugging + +# Menu: Kernel hacking >> Memory Debugging >> kmemcheck: trap use of uninitialized memory + +# Menu: Kernel hacking >> RCU Debugging +CONFIG_RCU_TORTURE_TEST n flag + +# Menu: Kernel hacking >> Runtime Testing +CONFIG_LKDTM n flag +CONFIG_BACKTRACE_SELF_TEST n flag + +# Menu: Kernel hacking >> Sample kernel code +CONFIG_SAMPLES n + +# Menu: Kernel hacking >> Tracers +CONFIG_FTRACE y +CONFIG_FUNCTION_TRACER y +CONFIG_FUNCTION_GRAPH_TRACER y +CONFIG_SCHED_TRACER y +CONFIG_FTRACE_SYSCALLS y +CONFIG_BRANCH_PROFILE_NONE y +CONFIG_STACK_TRACER y +CONFIG_BLK_DEV_IO_TRACE y +CONFIG_KPROBE_EVENT y +CONFIG_UPROBE_EVENT y +CONFIG_DYNAMIC_FTRACE y +CONFIG_FUNCTION_PROFILER y +CONFIG_FTRACE_STARTUP_TEST n flag +CONFIG_MMIOTRACE y +CONFIG_MMIOTRACE_TEST n flag +CONFIG_RING_BUFFER_BENCHMARK n flag +CONFIG_RING_BUFFER_STARTUP_TEST n flag + +# Menu: Kernel hacking >> printk and dmesg options +CONFIG_PRINTK_TIME y +CONFIG_DYNAMIC_DEBUG y + +# Menu: Kernel options +CONFIG_APM_EMULATION p policy<(arch armel armhf &/ value n) | value m> note + +# Menu: Kernel options (powerpc) +CONFIG_KEXEC y +CONFIG_CRASH_DUMP y + +# Menu: Library routines +CONFIG_CRC16 y note +CONFIG_CRC_T10DIF y note +CONFIG_CRC32 y note +CONFIG_CRC32_SELFTEST n flag +CONFIG_XZ_DEC y note +CONFIG_FONTS p policy<(arch armel armhf &/ value y) | value n> +CONFIG_FONT_8x8 y +CONFIG_FONT_8x16 y +CONFIG_FONT_6x11 n +CONFIG_FONT_7x14 n +CONFIG_FONT_PEARL_8x8 n +CONFIG_FONT_ACORN_8x8 y +CONFIG_FONT_MINI_4x6 n +CONFIG_FONT_SUN8x16 n +CONFIG_FONT_SUN12x22 n +CONFIG_FONT_10x18 n + +# Menu: Networking options +CONFIG_PACKET y note +CONFIG_UNIX y note +CONFIG_NETWORK_SECMARK y +CONFIG_ATM_CLIP_NO_ICMP n +CONFIG_ATM_BR2684_IPFILTER n +CONFIG_BRIDGE_IGMP_SNOOPING y +CONFIG_VLAN_8021Q_GVRP y +CONFIG_IPX_INTERN n +CONFIG_IPDDP_ENCAP y +CONFIG_X25 m note +CONFIG_LAPB m note +CONFIG_IEEE802154 m note +CONFIG_MAC802154 - note +CONFIG_DCB y +CONFIG_DNS_RESOLVER y note +CONFIG_BATMAN_ADV_BLA y +CONFIG_BATMAN_ADV_DAT y +CONFIG_BPF_JIT y note +CONFIG_COPS_DAYNA y +CONFIG_COPS_TANGENT y + +# Menu: Networking options >> DCCP CCIDs Configuration + +# Menu: Networking options >> DCCP Kernel Hacking + +# Menu: Networking options >> Network packet filtering framework (Netfilter) +CONFIG_NETFILTER y mark + +# Menu: Networking options >> Network packet filtering framework (Netfilter) >> Advanced netfilter configuration +CONFIG_NETFILTER_ADVANCED y +CONFIG_BRIDGE_NETFILTER y + +# Menu: Networking options >> Network packet filtering framework (Netfilter) >> Advanced netfilter configuration >> DECnet: Netfilter Configuration + +# Menu: Networking options >> Network packet filtering framework (Netfilter) >> Core Netfilter Configuration +CONFIG_NF_CONNTRACK_MARK y +CONFIG_NF_CONNTRACK_SECMARK y +CONFIG_NF_CONNTRACK_ZONES y +CONFIG_NF_CONNTRACK_PROCFS n flag +CONFIG_NF_CONNTRACK_EVENTS y +CONFIG_NF_CONNTRACK_TIMEOUT y +CONFIG_NF_CONNTRACK_TIMESTAMP y +CONFIG_NETFILTER_NETLINK_QUEUE_CT y + +# Menu: Networking options >> Network packet filtering framework (Netfilter) >> Ethernet Bridge tables (ebtables) support +CONFIG_BRIDGE_EBT_ULOG n flag + +# Menu: Networking options >> Network packet filtering framework (Netfilter) >> IP set support +CONFIG_IP_SET_MAX 256 + +# Menu: Networking options >> Network packet filtering framework (Netfilter) >> IP virtual server support +CONFIG_IP_VS_IPV6 y +CONFIG_IP_VS_TAB_BITS 12 +CONFIG_IP_VS_PROTO_TCP y +CONFIG_IP_VS_PROTO_UDP y +CONFIG_IP_VS_PROTO_ESP y +CONFIG_IP_VS_PROTO_AH y +CONFIG_IP_VS_PROTO_SCTP y +CONFIG_IP_VS_SH_TAB_BITS 8 +CONFIG_IP_VS_NFCT y + +# Menu: Networking options >> Network packet filtering framework (Netfilter) >> IP: Netfilter Configuration + +# Menu: Networking options >> Network packet filtering framework (Netfilter) >> IPv6: Netfilter Configuration + +# Menu: Networking options >> Network testing +CONFIG_NET_DROP_MONITOR n + +# Menu: Networking options >> QoS and/or fair queueing +CONFIG_NET_SCHED y mark +CONFIG_CLS_U32_PERF n +CONFIG_CLS_U32_MARK y +CONFIG_NET_EMATCH y +CONFIG_NET_EMATCH_STACK 32 +CONFIG_NET_CLS_ACT y +CONFIG_GACT_PROB y +CONFIG_NET_CLS_IND n flag + +# Menu: Networking options >> TCP/IP networking +CONFIG_INET y note +CONFIG_IP_MULTICAST y +CONFIG_IP_ADVANCED_ROUTER y +CONFIG_IP_FIB_TRIE_STATS y +CONFIG_IP_MULTIPLE_TABLES y +CONFIG_IP_ROUTE_MULTIPATH y +CONFIG_IP_ROUTE_VERBOSE y +CONFIG_IP_PNP y +CONFIG_IP_PNP_DHCP y +CONFIG_IP_PNP_BOOTP n +CONFIG_IP_PNP_RARP n +CONFIG_NET_IPGRE_BROADCAST y note +CONFIG_IP_MROUTE y +CONFIG_IP_MROUTE_MULTIPLE_TABLES n +CONFIG_IP_PIMSM_V1 y +CONFIG_IP_PIMSM_V2 y +CONFIG_SYN_COOKIES y mark +CONFIG_INET_LRO y note +CONFIG_TCP_MD5SIG y mark +CONFIG_NETLABEL y mark +CONFIG_RDS m note + +# Menu: Networking options >> TCP/IP networking >> Layer Two Tunneling Protocol (L2TP) +CONFIG_L2TP_V3 y + +# Menu: Networking options >> TCP/IP networking >> TCP: advanced congestion control +CONFIG_TCP_CONG_ADVANCED y +CONFIG_TCP_CONG_CUBIC y note +CONFIG_DEFAULT_CUBIC y + +# Menu: Networking options >> TCP/IP networking >> The IPv6 protocol +CONFIG_IPV6 y note +CONFIG_IPV6_ROUTER_PREF y +CONFIG_IPV6_ROUTE_INFO y +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION m note +CONFIG_IPV6_SIT_6RD y +CONFIG_IPV6_MULTIPLE_TABLES y +CONFIG_IPV6_SUBTREES y +CONFIG_IPV6_MROUTE y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES y +CONFIG_IPV6_PIMSM_V2 y + +# Menu: Networking options >> TCP/IP networking >> The SCTP Protocol +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 y +CONFIG_SCTP_COOKIE_HMAC_MD5 y +CONFIG_SCTP_COOKIE_HMAC_SHA1 y + +# Menu: Networking options >> TCP/IP networking >> The TIPC Protocol +CONFIG_TIPC m note +CONFIG_TIPC_PORTS 8191 + +# Menu: Networking support +CONFIG_NET y + +# Menu: Networking support >> Amateur Radio support +CONFIG_HAMRADIO y + +# Menu: Networking support >> Amateur Radio support >> Amateur Radio AX.25 Level 2 protocol + +# Menu: Networking support >> Amateur Radio support >> Amateur Radio AX.25 Level 2 protocol >> AX.25 network device drivers + +# Menu: Networking support >> Bluetooth subsystem support +CONFIG_BT m + +# Menu: Networking support >> Bluetooth subsystem support >> Bluetooth device drivers + +# Menu: Networking support >> CAIF support + +# Menu: Networking support >> CAN bus subsystem support + +# Menu: Networking support >> IrDA (infrared) subsystem support + +# Menu: Networking support >> IrDA (infrared) subsystem support >> Infrared-port device drivers +CONFIG_DONGLE y + +# Menu: Networking support >> NFC subsystem support +CONFIG_NFC m note mark +CONFIG_NFC_NCI m note + +# Menu: Networking support >> NFC subsystem support >> Near Field Communication (NFC) devices + +# Menu: Networking support >> Plan 9 Resource Sharing Support (9P2000) + +# Menu: Networking support >> RF switch subsystem support +CONFIG_RFKILL y + +# Menu: Networking support >> WiMAX Wireless Broadband support + +# Menu: Networking support >> Wireless +CONFIG_WIRELESS y + +# Menu: Networking support >> Wireless >> Generic IEEE 802.11 Networking Stack (mac80211) + +# Menu: Networking support >> Wireless >> Generic IEEE 802.11 Networking Stack (mac80211) >> Select mac80211 debugging features + +# Menu: Partition Types +set +tree 'Partition Types' ?pattern _PARTITION$ !flag EXPERIMENTAL = y +set +tree 'Partition Types' ?pattern _DISKLABEL$ !flag EXPERIMENTAL = y + +# Menu: Platform support (powerpc) +CONFIG_KVM_GUEST y + +# Menu: Platform support >> 82xx-based boards (PQ II) (powerpc) + +# Menu: Platform support >> 83xx-based boards (powerpc) + +# Menu: Platform support >> 86xx-based boards (powerpc) + +# Menu: Platform support >> Cell Broadband Engine options (powerpc) + +# Menu: Platform support >> Freescale Book-E Machine Type (powerpc) + +# Menu: Platform support >> Freescale Ethernet driver platform-specific options (powerpc) + +# Menu: Platform support >> MPC8xx CPM Options (powerpc) + +# Menu: Platform support >> PA Semi SoC-based platforms (powerpc) + +# Menu: Platform support >> PA Semi SoC-based platforms >> PA Semi PWRficient options (powerpc) + +# Menu: Platform support >> Sony PS3 (powerpc) + +# Menu: Platform support >> Sony PS3 >> PS3 Platform Options (powerpc) + +# Menu: Platform support >> WSP platform selection (powerpc) + +# Menu: Power management and ACPI options +CONFIG_SUSPEND y +CONFIG_SUSPEND_FREEZER y +CONFIG_HIBERNATION y +CONFIG_APM_EMULATION p policy<(arch armel armhf &/ value n) | value m> note + +# Menu: Power management and ACPI options >> ACPI (Advanced Configuration and Power Interface) Support +CONFIG_ACPI y mark +CONFIG_ACPI_AC y note +CONFIG_ACPI_BATTERY y note +CONFIG_ACPI_BUTTON y note +CONFIG_ACPI_FAN y note +CONFIG_ACPI_PROCESSOR y note +CONFIG_ACPI_THERMAL y note +CONFIG_ACPI_CONTAINER y note +CONFIG_ACPI_HED y note +CONFIG_ACPI_CUSTOM_METHOD n note + +# Menu: Power management and ACPI options >> APM (Advanced Power Management) BIOS support (x86) +CONFIG_APM m mark + +# Menu: Power management and ACPI options >> Memory power savings + +# Menu: Power management options +CONFIG_APM_EMULATION p policy<(arch armel armhf &/ value n) | value m> note + +# Menu: Processor support (powerpc) +CONFIG_SMP y + +# Menu: Processor type and features + +# Menu: Processor type and features (x86) +CONFIG_CC_STACKPROTECTOR y mark +CONFIG_KEXEC y +CONFIG_CRASH_DUMP y +CONFIG_RELOCATABLE y +CONFIG_TOSHIBA n note + +# Menu: Processor type and features >> Linux guest support (x86) +CONFIG_HYPERVISOR_GUEST y +CONFIG_PARAVIRT y +CONFIG_KVM_GUEST y + +# Menu: Security options +CONFIG_KEYS y +CONFIG_PERSISTENT_KEYRINGS y +CONFIG_TRUSTED_KEYS y note +CONFIG_ENCRYPTED_KEYS y note +CONFIG_SECURITY_DMESG_RESTRICT n +CONFIG_SECURITY y mark +CONFIG_SECURITYFS y +CONFIG_SECURITY_NETWORK y +CONFIG_SECURITY_NETWORK_XFRM n +CONFIG_SECURITY_PATH y +CONFIG_INTEL_TXT p policy<(arch i386 amd64 &/ value y) | value n)> +CONFIG_LSM_MMAP_MIN_ADDR 0 +CONFIG_SECURITY_SELINUX y +CONFIG_SECURITY_SELINUX_BOOTPARAM y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE 0 +CONFIG_SECURITY_SELINUX_DISABLE y note +CONFIG_SECURITY_SELINUX_DEVELOP y +CONFIG_SECURITY_SELINUX_AVC_STATS y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE 1 +CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX n +CONFIG_SECURITY_SMACK y +CONFIG_SECURITY_TOMOYO y +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY 2048 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG 1024 +CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER n +CONFIG_SECURITY_TOMOYO_POLICY_LOADER "/sbin/tomoyo-init" +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER "/sbin/init" +CONFIG_SECURITY_APPARMOR y +CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE 1 +CONFIG_SECURITY_YAMA y +CONFIG_SECURITY_YAMA_STACKED y +CONFIG_INTEGRITY_SIGNATURE y +CONFIG_IMA n note +CONFIG_EVM y +CONFIG_DEFAULT_SECURITY_APPARMOR y + +# Menu: System Type (arm) +CONFIG_ARCH_MULTIPLATFORM p policy<(arch armhf &/ value y) | value n> + +# Menu: System Type >> ARM Ltd. Versatile Express family (arm) + +# Menu: System Type >> ARM Ltd. Versatile Express family >> Versatile Express platform type (arm) + +# Menu: System Type >> Atmel AT91 System-on-Chip (arm) + +# Menu: System Type >> Atmel AT91 System-on-Chip >> Atmel Non-DT world (arm) + +# Menu: System Type >> Broadcom SoC Selection (arm) + +# Menu: System Type >> CLPS711X/EP721X/EP731X Implementations (arm) + +# Menu: System Type >> CSR SiRF atlas6/primaII/Marco/Polo Specific Features (arm) + +# Menu: System Type >> Cavium Networks CNS3XXX family (arm) + +# Menu: System Type >> Cavium Networks CNS3XXX family >> CNS3XXX platform type (arm) + +# Menu: System Type >> Cirrus EP93xx Implementation Options (arm) + +# Menu: System Type >> Cortina Systems Gemini Implementations (arm) + +# Menu: System Type >> Footbridge Implementations (arm) + +# Menu: System Type >> Freescale i.MX family (arm) + +# Menu: System Type >> Freescale i.MX family >> Freescale i.MX support (arm) + +# Menu: System Type >> IOP13XX Implementation Options (arm) + +# Menu: System Type >> IOP32x Implementation Options (arm) + +# Menu: System Type >> IOP33x Implementation Options (arm) + +# Menu: System Type >> Integrator Options (arm) + +# Menu: System Type >> Intel IXP4xx Implementation Options (arm) + +# Menu: System Type >> Intel PXA2xx/PXA3xx Implementations (arm) + +# Menu: System Type >> Kendin/Micrel KS8695 Implementations (arm) + +# Menu: System Type >> Marvell Dove Implementations (arm) + +# Menu: System Type >> Marvell Kirkwood Implementations (arm) + +# Menu: System Type >> Marvell MV78xx0 Implementations (arm) + +# Menu: System Type >> Marvell PXA168/910/MMP2 Implmentations (arm) + +# Menu: System Type >> Marvell SOC with device tree (arm) + +# Menu: System Type >> Multiple platform selection (arm) + +# Menu: System Type >> NUC950 Machines (arm) + +# Menu: System Type >> NUC960 Machines (arm) + +# Menu: System Type >> NVIDIA Tegra (arm) + +# Menu: System Type >> NVIDIA Tegra >> NVIDIA Tegra options (arm) + +# Menu: System Type >> NetX Implementations (arm) + +# Menu: System Type >> Nomadik boards (arm) + +# Menu: System Type >> Orion Implementations (arm) + +# Menu: System Type >> Qualcomm MSM Board Type (arm) + +# Menu: System Type >> RealView platform type (arm) + +# Menu: System Type >> S5PC110 Machines (arm) + +# Menu: System Type >> S5PV210 Machines (arm) + +# Menu: System Type >> SA11x0 Implementations (arm) + +# Menu: System Type >> SAMSUNG EXYNOS SoCs Support (arm) + +# Menu: System Type >> SAMSUNG S3C24XX SoCs Support (arm) + +# Menu: System Type >> ST SPEAr Family (arm) + +# Menu: System Type >> ST-Ericsson AB U300/U335 Platform (arm) + +# Menu: System Type >> STMicroelectronics Consumer Electronics SOCs with Device Trees (arm) + +# Menu: System Type >> SuperH / SH-Mobile Driver Options + +# Menu: System Type >> TI DaVinci Implementations (arm) + +# Menu: System Type >> TI OMAP Common Features (arm) + +# Menu: System Type >> TI OMAP1 specific features (arm) + +# Menu: System Type >> TI OMAP2/3/4 Specific Features (arm) + +# Menu: System Type >> Timer and clock configuration (arm) + +# Menu: System Type >> Use 8-bit SDHCI bus width (arm) + +# Menu: System Type >> Ux500 target platform (boards) (arm) + +# Menu: System Type >> Versatile platform type (arm) + +# Menu: System Type >> W90P910 Machines (arm) + +# Menu: Ubuntu Supplied Third-Party Device Drivers + +# Menu: Userspace binary formats +CONFIG_BINFMT_ELF y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS y +CONFIG_BINFMT_SCRIPT y +CONFIG_BINFMT_MISC y +CONFIG_COREDUMP y + +# Menu: Userspace binary formats (arm) + +# Menu: Virtualization +# Menu: Virtualization (arm) +# Menu: Virtualization (powerpc) +# Menu: Virtualization (x86) +CONFIG_VIRTUALIZATION y +CONFIG_KVM p policy<(arch powerpc &/ value y) | value m> +CONFIG_KVM_MMU_AUDIT n +CONFIG_KVM_DEVICE_ASSIGNMENT y +CONFIG_LGUEST n + +# temporarily disabled options -- build failures. +#CONFIG_TI_CPSW p policy<(arch armel armhf &/ value n) | value m> flag +#CONFIG_USB_MUSB_DSPS p policy<(arch armel armhf &/ value n) | value m> flag +CONFIG_LIS3L02DQ p policy<(arch armhf &/ value n) | value m> flag +CONFIG_EZX_PCAP p policy<(arch armhf &/ value n) | value m> flag +#CONFIG_TOUCHSCREEN_EGALAX p policy<(arch armel armhf &/ value n) | value m> flag note +#CONFIG_TOUCHSCREEN_EETI p policy<(arch armel armhf &/ value n) | value m> flag note +#CONFIG_SENSORS_AK8975 p policy<(arch armel armhf &/ value n) | value m> flag note +CONFIG_PPC_EPAPR_HV_BYTECHAN n flag note + +# Ensure DEPRECATED options are off. +set +all ?flag DEPRECATED !mark annotated ?type bool = n +set +all ?flag DEPRECATED !mark annotated ?type tristate = n + +# Ensure DANGEROUS things are turned off. +set +all ?flag DANGEROUS = n --- linux-3.13.0.orig/debian.master/config/arm64/config.common.arm64 +++ linux-3.13.0/debian.master/config/arm64/config.common.arm64 @@ -0,0 +1,194 @@ +# +# Config options for config.common.arm64 automatically generated by splitconfig.pl +# +CONFIG_64BIT=y +# CONFIG_AHCI_IMX is not set +CONFIG_AIC79XX_RESET_DELAY_MS=5000 +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_PIIX=m +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_BCH=m +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_COMEDI_ISA_DRIVERS=y +CONFIG_COMPACTION=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_CPU_BIG_ENDIAN is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_DEADLINE=y +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_EXYNOS_VIDEO=y +# CONFIG_EZX_PCAP is not set +CONFIG_FB_ATY128=m +CONFIG_FB_BACKLIGHT=y +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_DDC=m +# CONFIG_FB_MACMODES is not set +CONFIG_FB_RADEON=m +CONFIG_FB_SVGALIB=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_VOODOO1=m +# CONFIG_FMAN_P1023 is not set +CONFIG_FMAN_P3040_P4080_P5020=y +# CONFIG_FONTS is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_FRAME_WARN=1024 +CONFIG_FSL_FMAN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_PHY=y +CONFIG_GPIO_GENERIC=m +# CONFIG_GPIO_TWL6040 is not set +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +# CONFIG_HAVE_AOUT is not set +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HUGETLB_PAGE=y +# CONFIG_HW_RANDOM_ATMEL is not set +CONFIG_HZ=100 +CONFIG_I2C_ALGOBIT=m +# CONFIG_IKCONFIG is not set +CONFIG_IMA=y +CONFIG_INFINIBAND_AMSO1100=m +# CONFIG_IOMMU_SUPPORT is not set +# CONFIG_IPMI_SI_PROBE_DEFAULTS is not set +# CONFIG_IRQ_DOMAIN_DEBUG is not set +# CONFIG_KSM is not set +CONFIG_KVM=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LIBERTAS_MESH=y +# CONFIG_LIS3L02DQ is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_TMIO is not set +CONFIG_MII=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MTD=m +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CMDLINE_PARTS=m +CONFIG_MTD_NAND=m +CONFIG_MTD_NAND_BCH=m +CONFIG_MTD_NAND_ECC=m +CONFIG_MTD_NAND_IDS=m +CONFIG_MTD_OF_PARTS=m +CONFIG_MTD_SM_COMMON=m +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NET_TULIP=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=8 +CONFIG_PATA_SIS=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_SYSCALL=y +CONFIG_PERCPU_TEST=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_PHY_EXYNOS_DP_VIDEO=m +CONFIG_PMIC_ADP5520=y +CONFIG_PMIC_DA903X=y +CONFIG_PPS=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_RCU is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PROBE_EVENTS is not set +# CONFIG_PSTORE_CONSOLE is not set +CONFIG_PSTORE_RAM=m +CONFIG_PTP_1588_CLOCK=m +CONFIG_RBTREE_TEST=m +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_FANOUT=64 +CONFIG_RCU_FAST_NO_HZ=y +# CONFIG_RCU_NOCB_CPU is not set +CONFIG_REED_SOLOMON=m +CONFIG_REGULATOR_FIXED_VOLTAGE=m +# CONFIG_REGULATOR_S2MPS11 is not set +CONFIG_REISERFS_FS=m +CONFIG_RFKILL=y +# CONFIG_RT2X00_LIB_DEBUGFS is not set +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_PL031=m +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_SATA_HIGHBANK is not set +CONFIG_SATA_SVW=m +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SERIAL_8250_PCI=m +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_APBPS2=m +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_OLPC_APSP=m +CONFIG_SERIO_PARKBD=m +CONFIG_SERIO_PCIPS2=m +CONFIG_SERIO_PS2MULT=m +CONFIG_SERIO_RAW=m +CONFIG_SMC91X=y +# CONFIG_SOUND is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPI_PXA2XX_PCI=m +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SRAM=y +CONFIG_STAGING=y +CONFIG_STANDALONE=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_SWIOTLB=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_SYS_HYPERVISOR=y +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TIGON3=m +CONFIG_TIMER_STATS=y +# CONFIG_TRANSPARENT_HUGEPAGE is not set +# CONFIG_TWL4030_CORE is not set +CONFIG_TWL6040_CORE=y +CONFIG_UIO_AEC=m +CONFIG_UIO_CIF=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_MF624=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_SERCOS3=m +CONFIG_UNUSED_SYMBOLS=y +# CONFIG_USB_SUPPORT is not set +CONFIG_VGASTATE=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_PCI=m +CONFIG_WIL6210=m +CONFIG_WIMAX_GDM72XX_SDIO=y +CONFIG_XEN=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_TEST=m +CONFIG_XZ_DEC_X86=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DMA_FLAG=0 --- linux-3.13.0.orig/debian.master/config/arm64/config.flavour.generic +++ linux-3.13.0/debian.master/config/arm64/config.flavour.generic @@ -0,0 +1,3 @@ +# +# Config options for config.flavour.generic automatically generated by splitconfig.pl +# --- linux-3.13.0.orig/debian.master/config/armhf/config.common.armhf +++ linux-3.13.0/debian.master/config/armhf/config.common.armhf @@ -0,0 +1,287 @@ +# +# Config options for config.common.armhf automatically generated by splitconfig.pl +# +CONFIG_AIC79XX_RESET_DELAY_MS=5000 +# CONFIG_APM_EMULATION is not set +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +CONFIG_ASYNC_TX_DMA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_PIIX=y +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_BATTERY_TWL4030_MADC=m +CONFIG_BCH=y +CONFIG_CMDLINE="" +CONFIG_COMEDI_ISA_DRIVERS=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_CPU_BIG_ENDIAN is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CRASH_DUMP=y +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_DEADLINE=y +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 +CONFIG_EXYNOS_VIDEO=y +# CONFIG_EZX_PCAP is not set +CONFIG_FB_ATY128=m +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +# CONFIG_FB_MACMODES is not set +CONFIG_FB_RADEON=m +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_SYS_IMAGEBLIT=y +CONFIG_FB_UDL=m +CONFIG_FB_VOODOO1=m +# CONFIG_FMAN_P1023 is not set +CONFIG_FMAN_P3040_P4080_P5020=y +CONFIG_FONTS=y +CONFIG_FRAME_WARN=1024 +CONFIG_FSL_FMAN=y +# CONFIG_FUNCTION_TRACER is not set +CONFIG_GPIO_TWL4030=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +# CONFIG_HW_RANDOM_ATMEL is not set +CONFIG_HZ=250 +# CONFIG_HZ_100 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ_250=y +CONFIG_I2C_ALGOBIT=m +# CONFIG_IDE is not set +# CONFIG_IKCONFIG is not set +CONFIG_IMA=y +CONFIG_INFINIBAND_AMSO1100=m +CONFIG_IOMMU_SUPPORT=y +# CONFIG_IPMI_SI_PROBE_DEFAULTS is not set +# CONFIG_IRQ_DOMAIN_DEBUG is not set +# CONFIG_IRQ_FORCED_THREADING_DEFAULT is not set +# CONFIG_KSM is not set +CONFIG_KVM=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LIBERTAS_MESH is not set +# CONFIG_LIS3L02DQ is not set +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_MFD_TMIO=y +CONFIG_MII=m +CONFIG_MMC_BLOCK=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MTD=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_BCH=y +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_NAND_IDS=y +CONFIG_MTD_OF_PARTS=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NET_TULIP=y +CONFIG_NOP_USB_XCEIV=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=4 +CONFIG_NVRAM=m +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_PARIDE_EPATC8=y +CONFIG_PATA_SIS=y +# CONFIG_PCCARD is not set +CONFIG_PCIEPORTBUS=y +CONFIG_PERCPU_TEST=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_PHY_EXYNOS_DP_VIDEO=m +CONFIG_PMIC_ADP5520=y +CONFIG_PMIC_DA903X=y +CONFIG_PM_DEBUG=y +CONFIG_PM_RUNTIME=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PPS=y +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_RCU is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_PROBE_EVENTS=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_PTP_1588_CLOCK=y +CONFIG_RBTREE_TEST=m +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_FANOUT=32 +CONFIG_RCU_FAST_NO_HZ=y +# CONFIG_RCU_NOCB_CPU is not set +# CONFIG_RCU_USER_QS is not set +CONFIG_REED_SOLOMON=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +# CONFIG_REGULATOR_S2MPS11 is not set +CONFIG_REISERFS_FS=m +CONFIG_RFKILL=y +# CONFIG_RT2800USB_UNKNOWN is not set +CONFIG_RT2X00_LIB_DEBUGFS=y +CONFIG_RTC_DRV_CMOS=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_PL031=y +CONFIG_SAMSUNG_USB2PHY=m +CONFIG_SAMSUNG_USB3PHY=m +CONFIG_SATA_HIGHBANK=y +CONFIG_SATA_SVW=m +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SCHED_SMT is not set +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_APBPS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIO_OLPC_APSP=m +CONFIG_SERIO_PARKBD=m +CONFIG_SERIO_PCIPS2=m +CONFIG_SERIO_PS2MULT=m +CONFIG_SERIO_RAW=m +CONFIG_SMC91X=m +CONFIG_SND=y +CONFIG_SND_COMPRESS_OFFLOAD=y +# CONFIG_SND_OPL4_LIB_SEQ is not set +CONFIG_SND_PCM=y +# CONFIG_SND_SBAWE_SEQ is not set +CONFIG_SND_SOC=y +CONFIG_SND_SOC_FSL_SSI=y +CONFIG_SND_SOC_I2C_AND_SPI=y +CONFIG_SND_TIMER=y +CONFIG_SOUND=y +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SRAM=y +# CONFIG_STACK_TRACER is not set +CONFIG_STAGING=y +CONFIG_STANDALONE=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_SWIOTLB=y +CONFIG_SYSCTL_SYSCALL=y +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_STRING_HELPERS=m +# CONFIG_TICK_CPU_ACCOUNTING is not set +CONFIG_TIGON3=m +CONFIG_TIMER_STATS=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TWL4030_CORE=y +CONFIG_TWL6040_CORE=y +CONFIG_UIO_AEC=m +CONFIG_UIO_CIF=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_MF624=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_SERCOS3=m +CONFIG_UNUSED_SYMBOLS=y +CONFIG_USB_ADUTUX=m +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_C67X00_HCD=m +CONFIG_USB_CATC=m +CONFIG_USB_CHIPIDEA=y +CONFIG_USB_CYPRESS_CY7C63=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_DWC3=m +CONFIG_USB_DYNAMIC_MINORS=y +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHSET_TEST_FIXTURE=m +CONFIG_USB_EMI26=m +CONFIG_USB_EMI62=m +CONFIG_USB_EZUSB_FX2=m +CONFIG_USB_FOTG210_HCD=m +# CONFIG_USB_FSL_USB2 is not set +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_FUSBH200_HCD=m +CONFIG_USB_GADGET=y +CONFIG_USB_G_MULTI=m +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_SSB is not set +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_IPHETH=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_ISP116X_HCD=m +CONFIG_USB_ISP1301=m +CONFIG_USB_ISP1362_HCD=m +CONFIG_USB_ISP1760_HCD=m +CONFIG_USB_KAWETH=m +CONFIG_USB_LCD=m +CONFIG_USB_LD=m +CONFIG_USB_LED=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USB_MUSB_DSPS is not set +CONFIG_USB_MUSB_HDRC=y +# CONFIG_USB_MUSB_TUSB6010 is not set +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_OTG is not set +CONFIG_USB_OXU210HP_HCD=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_PRINTER=m +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_RCAR_PHY=m +CONFIG_USB_RIO500=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_SERIAL=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SL811_HCD=m +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_SUPPORT=y +CONFIG_USB_TEST=m +CONFIG_USB_TMC=m +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_USS720=m +CONFIG_USB_WUSB_CBAF=m +CONFIG_USB_YUREX=m +# CONFIG_VFIO_IOMMU_TYPE1 is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_WIL6210 is not set +# CONFIG_WIMAX_GDM72XX_SDIO is not set +# CONFIG_XEN is not set +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_TEST=m +CONFIG_XZ_DEC_X86=y +CONFIG_ZONE_DMA_FLAG=1 --- linux-3.13.0.orig/debian.master/config/armhf/config.flavour.generic +++ linux-3.13.0/debian.master/config/armhf/config.flavour.generic @@ -0,0 +1,38 @@ +# +# Config options for config.flavour.generic automatically generated by splitconfig.pl +# +CONFIG_AC97_BUS=y +CONFIG_AHCI_IMX=y +CONFIG_ARCH_MXC=y +CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED=y +CONFIG_ARCH_NR_GPIO=1024 +CONFIG_ARCH_OMAP3=y +CONFIG_ARCH_OMAP4=y +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set +CONFIG_ARCH_TEGRA=y +CONFIG_ARM_ATAG_DTB_COMPAT=y +# CONFIG_ARM_LPAE is not set +# CONFIG_COMPACTION is not set +# CONFIG_CPU_IDLE is not set +CONFIG_DRM_TILCDC=m +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_DDC=m +CONFIG_FB_SVGALIB=m +CONFIG_FORCE_MAX_ZONEORDER=12 +CONFIG_GENERIC_PHY=y +CONFIG_GPIO_GENERIC=y +CONFIG_GPIO_TWL6040=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_MFD_TI_AM335X_TSCADC=m +CONFIG_MTD_SM_COMMON=m +# CONFIG_MUSB_PIO_ONLY is not set +CONFIG_PCI_SYSCALL=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_SERIAL_FSL_LPUART=m +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SOC_AM33XX=y +CONFIG_SPI_PXA2XX_PCI=m +CONFIG_USB_DWC3_OMAP=m +CONFIG_VGASTATE=m --- linux-3.13.0.orig/debian.master/config/armhf/config.flavour.generic-lpae +++ linux-3.13.0/debian.master/config/armhf/config.flavour.generic-lpae @@ -0,0 +1,38 @@ +# +# Config options for config.flavour.generic-lpae automatically generated by splitconfig.pl +# +CONFIG_AC97_BUS=m +# CONFIG_AHCI_IMX is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set +CONFIG_ARCH_NR_GPIO=0 +# CONFIG_ARCH_OMAP3 is not set +# CONFIG_ARCH_OMAP4 is not set +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARM_ATAG_DTB_COMPAT is not set +CONFIG_ARM_LPAE=y +CONFIG_COMPACTION=y +CONFIG_CPU_IDLE=y +# CONFIG_DRM_TILCDC is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_SVGALIB is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_GENERIC_PHY=m +CONFIG_GPIO_GENERIC=m +CONFIG_GPIO_TWL6040=m +CONFIG_HUGETLB_PAGE=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MTD_SM_COMMON is not set +CONFIG_MUSB_PIO_ONLY=y +# CONFIG_PCI_SYSCALL is not set +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SND_EMU10K1_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SOC_AM33XX is not set +# CONFIG_SPI_PXA2XX_PCI is not set +# CONFIG_USB_DWC3_OMAP is not set +# CONFIG_VGASTATE is not set --- linux-3.13.0.orig/debian.master/config/config.common.ports +++ linux-3.13.0/debian.master/config/config.common.ports @@ -0,0 +1,3 @@ +# +# Common config options automatically generated by splitconfig.pl +# --- linux-3.13.0.orig/debian.master/config/config.common.ubuntu +++ linux-3.13.0/debian.master/config/config.common.ubuntu @@ -0,0 +1,7502 @@ +# +# Common config options automatically generated by splitconfig.pl +# +CONFIG_32BIT=y +CONFIG_3C515=m +# CONFIG_40x is not set +# CONFIG_44x is not set +CONFIG_60XX_WDT=m +CONFIG_6PACK=m +CONFIG_6xx=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +CONFIG_8139TOO_8129=y +CONFIG_8139TOO_PIO=y +# CONFIG_8139TOO_TUNE_TWISTER is not set +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_88EU_AP_MODE=y +CONFIG_88EU_P2P=y +CONFIG_9P_FS=m +# CONFIG_9P_FSCACHE is not set +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_AB3100_CORE=y +CONFIG_AB3100_OTP=m +CONFIG_ABX500_CORE=y +# CONFIG_ACCESSIBILITY is not set +CONFIG_ACENIC=m +# CONFIG_ACENIC_OMIT_TIGON_I is not set +CONFIG_ACERHDF=m +CONFIG_ACER_WMI=m +CONFIG_ACORN_PARTITION=y +# CONFIG_ACORN_PARTITION_ADFS is not set +# CONFIG_ACORN_PARTITION_CUMANA is not set +# CONFIG_ACORN_PARTITION_EESOX is not set +CONFIG_ACORN_PARTITION_ICS=y +# CONFIG_ACORN_PARTITION_POWERTEC is not set +CONFIG_ACORN_PARTITION_RISCIX=y +CONFIG_ACPI=y +CONFIG_ACPI_AC=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_CMPC=m +CONFIG_ACPI_CONTAINER=y +# CONFIG_ACPI_CUSTOM_DSDT is not set +CONFIG_ACPI_CUSTOM_DSDT_FILE="" +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +# CONFIG_ACPI_INITRD_TABLE_OVERRIDE is not set +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_PROCFS_POWER=y +CONFIG_ACPI_QUICKSTART=m +CONFIG_ACPI_SBS=m +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_TOSHIBA=m +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_WMI=m +CONFIG_ACQUIRE_WDT=m +CONFIG_ACT200L_DONGLE=m +CONFIG_ACTISYS_DONGLE=m +CONFIG_AD2S1200=m +CONFIG_AD2S1210=m +CONFIG_AD2S90=m +CONFIG_AD5064=m +CONFIG_AD525X_DPOT=m +CONFIG_AD525X_DPOT_I2C=m +CONFIG_AD525X_DPOT_SPI=m +CONFIG_AD5360=m +CONFIG_AD5380=m +CONFIG_AD5421=m +CONFIG_AD5446=m +CONFIG_AD5449=m +CONFIG_AD5504=m +CONFIG_AD5624R_SPI=m +CONFIG_AD5686=m +CONFIG_AD5755=m +CONFIG_AD5764=m +CONFIG_AD5791=m +CONFIG_AD5930=m +CONFIG_AD5933=m +CONFIG_AD7150=m +CONFIG_AD7152=m +CONFIG_AD7192=m +CONFIG_AD7266=m +CONFIG_AD7280=m +CONFIG_AD7291=m +CONFIG_AD7298=m +CONFIG_AD7303=m +CONFIG_AD7476=m +CONFIG_AD7606=m +CONFIG_AD7606_IFACE_PARALLEL=m +CONFIG_AD7606_IFACE_SPI=m +CONFIG_AD7746=m +CONFIG_AD7780=m +CONFIG_AD7791=m +CONFIG_AD7793=m +CONFIG_AD7816=m +CONFIG_AD7887=m +CONFIG_AD7923=m +CONFIG_AD799X=m +CONFIG_AD799X_RING_BUFFER=y +CONFIG_AD8366=m +CONFIG_AD9523=m +CONFIG_AD9832=m +CONFIG_AD9834=m +CONFIG_AD9850=m +CONFIG_AD9852=m +CONFIG_AD9910=m +CONFIG_AD9951=m +CONFIG_ADAPTEC_STARFIRE=m +CONFIG_ADB=y +CONFIG_ADB_CUDA=y +CONFIG_ADB_MACIO=y +CONFIG_ADB_PMU=y +CONFIG_ADB_PMU_LED=y +CONFIG_ADE7753=m +CONFIG_ADE7754=m +CONFIG_ADE7758=m +CONFIG_ADE7759=m +CONFIG_ADE7854=m +CONFIG_ADE7854_I2C=m +CONFIG_ADE7854_SPI=m +CONFIG_ADF4350=m +CONFIG_ADFS_FS=m +# CONFIG_ADFS_FS_RW is not set +CONFIG_ADIS16060=m +CONFIG_ADIS16080=m +CONFIG_ADIS16130=m +CONFIG_ADIS16136=m +CONFIG_ADIS16201=m +CONFIG_ADIS16203=m +CONFIG_ADIS16204=m +CONFIG_ADIS16209=m +CONFIG_ADIS16220=m +CONFIG_ADIS16240=m +CONFIG_ADIS16260=m +CONFIG_ADIS16400=m +CONFIG_ADIS16480=m +CONFIG_ADJD_S311=m +CONFIG_ADM8211=m +CONFIG_ADT7316=m +CONFIG_ADT7316_I2C=m +CONFIG_ADT7316_SPI=m +# CONFIG_ADVANCED_OPTIONS is not set +CONFIG_ADVANTECH_WDT=m +CONFIG_ADXRS450=m +CONFIG_AD_SIGMA_DELTA=m +CONFIG_AEABI=y +CONFIG_AFFS_FS=m +# CONFIG_AFS_DEBUG is not set +CONFIG_AFS_FS=m +# CONFIG_AFS_FSCACHE is not set +CONFIG_AF_RXRPC=m +# CONFIG_AF_RXRPC_DEBUG is not set +CONFIG_AGP=y +CONFIG_AGP_ALI=m +CONFIG_AGP_AMD=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_ATI=m +CONFIG_AGP_EFFICEON=m +CONFIG_AGP_INTEL=y +CONFIG_AGP_NVIDIA=y +CONFIG_AGP_SIS=m +CONFIG_AGP_SWORKS=m +CONFIG_AGP_UNINORTH=m +CONFIG_AGP_VIA=y +CONFIG_AHCI_XGENE=m +CONFIG_AIC79XX_CMDS_PER_DEVICE=32 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set +CONFIG_AIC79XX_DEBUG_MASK=0 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y +CONFIG_AIC7XXX_CMDS_PER_DEVICE=8 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set +CONFIG_AIC7XXX_DEBUG_MASK=0 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +# CONFIG_AIC94XX_DEBUG is not set +CONFIG_AIO=y +CONFIG_AIRO=m +CONFIG_AIRO_CS=m +CONFIG_AIX_PARTITION=y +CONFIG_AK8975=m +CONFIG_ALIGNMENT_TRAP=y +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_ALIX=y +CONFIG_ALI_FIR=m +CONFIG_ALTERA_STAPL=m +CONFIG_ALTIVEC=y +CONFIG_ALX=m +CONFIG_AM335X_CONTROL_USB=m +CONFIG_AM335X_PHY_USB=m +CONFIG_AMBA_PL08X=y +CONFIG_AMD8111_ETH=m +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_STATS=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_AMD_NB=y +CONFIG_AMD_NUMA=y +CONFIG_AMD_PHY=y +CONFIG_AMIGA_PARTITION=y +CONFIG_AMILO_RFKILL=m +# CONFIG_ANDROID is not set +CONFIG_ANON_INODES=y +CONFIG_ANSLCD=m +CONFIG_APB_TIMER=y +CONFIG_APDS9300=m +CONFIG_APDS9802ALS=m +CONFIG_APM=m +# CONFIG_APM_ALLOW_INTS is not set +# CONFIG_APM_CPU_IDLE is not set +# CONFIG_APM_DISPLAY_BLANK is not set +# CONFIG_APM_DO_ENABLE is not set +# CONFIG_APM_IGNORE_USER_SUSPEND is not set +CONFIG_APM_POWER=m +CONFIG_APPLE_AIRPORT=m +CONFIG_APPLE_GMUX=m +CONFIG_APPLICOM=m +CONFIG_AR5523=m +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCM is not set +CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +# CONFIG_ARCH_CLPS711X is not set +CONFIG_ARCH_CPU_PROBE_RELEASE=y +# CONFIG_ARCH_DAVINCI is not set +CONFIG_ARCH_DISCARD_MEMBLOCK=y +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_EBSA110 is not set +CONFIG_ARCH_EMEV2=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_EXYNOS is not set +CONFIG_ARCH_FLATMEM_ENABLE=y +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_GEMINI is not set +CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_ARCH_HAS_BANDGAP=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CPUFREQ=y +CONFIG_ARCH_HAS_CPU_AUTOPROBE=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y +CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y +CONFIG_ARCH_HAS_ILOG2_U32=y +CONFIG_ARCH_HAS_ILOG2_U64=y +CONFIG_ARCH_HAS_OPP=y +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_ARCH_HAS_WALK_MEMORY=y +CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIGHBANK=y +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_KEYSTONE is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_LPC32XX is not set +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_MSM is not set +CONFIG_ARCH_MULTIPLATFORM=y +# CONFIG_ARCH_MULTI_CPU_AUTO is not set +# CONFIG_ARCH_MULTI_V6 is not set +CONFIG_ARCH_MULTI_V6_V7=y +CONFIG_ARCH_MULTI_V7=y +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_NETX is not set +CONFIG_ARCH_OMAP=y +# CONFIG_ARCH_OMAP1 is not set +CONFIG_ARCH_OMAP2PLUS=y +CONFIG_ARCH_OMAP2PLUS_TYPICAL=y +# CONFIG_ARCH_ORION5X is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +# CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_REALVIEW is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +# CONFIG_ARCH_ROCKCHIP is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_S3C24XX is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P64X0 is not set +# CONFIG_ARCH_S5PC100 is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_SHMOBILE is not set +CONFIG_ARCH_SHMOBILE_MULTI=y +# CONFIG_ARCH_SIRF is not set +# CONFIG_ARCH_SOCFPGA is not set +CONFIG_ARCH_SPARSEMEM_ENABLE=y +# CONFIG_ARCH_STI is not set +# CONFIG_ARCH_SUNXI is not set +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_TEGRA_114_SOC=y +CONFIG_ARCH_TEGRA_124_SOC=y +CONFIG_ARCH_TEGRA_2x_SOC=y +CONFIG_ARCH_TEGRA_3x_SOC=y +# CONFIG_ARCH_U8500 is not set +CONFIG_ARCH_USES_NUMA_PROT_NONE=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +# CONFIG_ARCH_VERSATILE is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_ARCH_VEXPRESS_CA9X4=y +CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA=y +CONFIG_ARCH_VEXPRESS_DCSCB=y +CONFIG_ARCH_VEXPRESS_SPC=y +CONFIG_ARCH_VEXPRESS_TC2_PM=y +CONFIG_ARCH_VIRT=y +# CONFIG_ARCH_W90X900 is not set +CONFIG_ARCH_WANTS_FREEZER_CONTROL=y +CONFIG_ARCH_WANTS_PROT_NUMA_PROT_NONE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y +# CONFIG_ARCH_WM8850 is not set +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZYNQ is not set +CONFIG_ARCNET=m +CONFIG_ARCNET_1051=m +CONFIG_ARCNET_1201=m +CONFIG_ARCNET_CAP=m +CONFIG_ARCNET_COM20020=m +CONFIG_ARCNET_COM20020_CS=m +CONFIG_ARCNET_COM20020_ISA=m +CONFIG_ARCNET_COM20020_PCI=m +CONFIG_ARCNET_COM90xx=m +CONFIG_ARCNET_COM90xxIO=m +CONFIG_ARCNET_RAW=m +CONFIG_ARCNET_RIM_I=m +CONFIG_ARC_EMAC=m +CONFIG_ARM=y +CONFIG_ARM64=y +# CONFIG_ARM64_64K_PAGES is not set +CONFIG_ARM_AMBA=y +CONFIG_ARM_APPENDED_DTB=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_AT91_ETHER=m +CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND=y +# CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER is not set +CONFIG_ARM_BIG_LITTLE_CPUFREQ=m +CONFIG_ARM_BIG_LITTLE_CPUIDLE=y +CONFIG_ARM_CCI=y +CONFIG_ARM_CHARLCD=y +CONFIG_ARM_CPU_SUSPEND=y +CONFIG_ARM_CPU_TOPOLOGY=y +CONFIG_ARM_DMA_IOMMU_ALIGNMENT=8 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_ARM_DMA_USE_IOMMU=y +CONFIG_ARM_DT_BL_CPUFREQ=m +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_643719=y +CONFIG_ARM_ERRATA_720789=y +CONFIG_ARM_ERRATA_754322=y +CONFIG_ARM_ERRATA_754327=y +CONFIG_ARM_ERRATA_764369=y +CONFIG_ARM_ERRATA_773022=y +CONFIG_ARM_ERRATA_775420=y +CONFIG_ARM_ERRATA_798181=y +CONFIG_ARM_GIC=y +CONFIG_ARM_HAS_SG_CHAIN=y +CONFIG_ARM_HIGHBANK_CPUFREQ=y +CONFIG_ARM_HIGHBANK_CPUIDLE=y +CONFIG_ARM_IMX6Q_CPUFREQ=m +# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set +CONFIG_ARM_KPROBES_TEST=m +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_L1_CACHE_SHIFT_6=y +CONFIG_ARM_NR_BANKS=8 +CONFIG_ARM_OMAP2PLUS_CPUFREQ=y +CONFIG_ARM_PATCH_PHYS_VIRT=y +CONFIG_ARM_PSCI=y +# CONFIG_ARM_SMMU is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_TEGRA_CPUFREQ=y +CONFIG_ARM_THUMB=y +# CONFIG_ARM_THUMBEE is not set +CONFIG_ARM_TIMER_SP804=y +CONFIG_ARM_UNWIND=y +CONFIG_ARM_VEXPRESS_SPC_CPUFREQ=m +CONFIG_ARM_VIRT_EXT=y +CONFIG_ASN1=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_ASUS_LAPTOP=m +CONFIG_ASUS_NB_WMI=m +CONFIG_ASUS_WMI=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=y +CONFIG_ASYNC_XOR=m +CONFIG_AT76C50X_USB=m +CONFIG_AT803X_PHY=y +CONFIG_ATA=y +CONFIG_ATAGS=y +CONFIG_ATAGS_PROC=y +CONFIG_ATALK=m +CONFIG_ATARI_PARTITION=y +CONFIG_ATA_ACPI=y +CONFIG_ATA_BMDMA=y +CONFIG_ATA_GENERIC=y +CONFIG_ATA_OVER_ETH=m +CONFIG_ATA_SFF=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATH10K=m +# CONFIG_ATH10K_DEBUG is not set +CONFIG_ATH10K_DEBUGFS=y +CONFIG_ATH10K_PCI=m +CONFIG_ATH10K_TRACING=y +CONFIG_ATH5K=m +# CONFIG_ATH5K_DEBUG is not set +CONFIG_ATH5K_PCI=y +# CONFIG_ATH5K_TRACER is not set +CONFIG_ATH6KL=m +# CONFIG_ATH6KL_DEBUG is not set +CONFIG_ATH6KL_SDIO=m +# CONFIG_ATH6KL_TRACING is not set +CONFIG_ATH6KL_USB=m +CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_DEBUGFS=y +CONFIG_ATH9K_HTC=m +CONFIG_ATH9K_HTC_DEBUGFS=y +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_LEGACY_RATE_CONTROL=y +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_RFKILL=y +CONFIG_ATH_CARDS=m +CONFIG_ATH_COMMON=m +# CONFIG_ATH_DEBUG is not set +CONFIG_ATL1=m +CONFIG_ATL1C=m +CONFIG_ATL1E=m +CONFIG_ATL2=m +CONFIG_ATM=m +CONFIG_ATMEL=m +CONFIG_ATMEL_PWM=m +CONFIG_ATMEL_SSC=m +CONFIG_ATM_AMBASSADOR=m +# CONFIG_ATM_AMBASSADOR_DEBUG is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_DRIVERS=y +CONFIG_ATM_DUMMY=m +CONFIG_ATM_ENI=m +# CONFIG_ATM_ENI_DEBUG is not set +# CONFIG_ATM_ENI_TUNE_BURST is not set +CONFIG_ATM_FIRESTREAM=m +CONFIG_ATM_FORE200E=m +CONFIG_ATM_FORE200E_DEBUG=0 +CONFIG_ATM_FORE200E_TX_RETRY=16 +# CONFIG_ATM_FORE200E_USE_TASKLET is not set +CONFIG_ATM_HE=m +CONFIG_ATM_HE_USE_SUNI=y +CONFIG_ATM_HORIZON=m +# CONFIG_ATM_HORIZON_DEBUG is not set +CONFIG_ATM_IA=m +# CONFIG_ATM_IA_DEBUG is not set +CONFIG_ATM_IDT77252=m +# CONFIG_ATM_IDT77252_DEBUG is not set +# CONFIG_ATM_IDT77252_RCV_ALL is not set +CONFIG_ATM_IDT77252_USE_SUNI=y +CONFIG_ATM_LANAI=m +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_NICSTAR=m +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set +# CONFIG_ATM_NICSTAR_USE_SUNI is not set +CONFIG_ATM_SOLOS=m +CONFIG_ATM_TCP=m +CONFIG_ATM_ZATM=m +# CONFIG_ATM_ZATM_DEBUG is not set +CONFIG_ATP=m +CONFIG_AUDIT=y +CONFIG_AUDITSYSCALL=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_TREE=y +CONFIG_AUDIT_WATCH=y +CONFIG_AUFS_BDEV_LOOP=y +# CONFIG_AUFS_BRANCH_MAX_1023 is not set +CONFIG_AUFS_BRANCH_MAX_127=y +# CONFIG_AUFS_BRANCH_MAX_32767 is not set +# CONFIG_AUFS_BRANCH_MAX_511 is not set +CONFIG_AUFS_BR_FUSE=y +CONFIG_AUFS_BR_HFSPLUS=y +CONFIG_AUFS_BR_RAMFS=y +# CONFIG_AUFS_DEBUG is not set +CONFIG_AUFS_EXPORT=y +CONFIG_AUFS_FS=m +# CONFIG_AUFS_HNOTIFY is not set +CONFIG_AUFS_INO_T_64=y +CONFIG_AUFS_POLL=y +# CONFIG_AUFS_RDU is not set +CONFIG_AUFS_SBILIST=y +# CONFIG_AUFS_SHWH is not set +# CONFIG_AUFS_SP_IATTR is not set +CONFIG_AUTOFS4_FS=m +CONFIG_AUTO_ZRELADDR=y +CONFIG_AUXDISPLAY=y +CONFIG_AVERAGE=y +CONFIG_AX25=m +CONFIG_AX25_DAMA_SLAVE=y +CONFIG_AX88796=m +# CONFIG_AX88796_93CX6 is not set +CONFIG_B43=m +CONFIG_B43LEGACY=m +# CONFIG_B43LEGACY_DEBUG is not set +CONFIG_B43LEGACY_DMA=y +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y +# CONFIG_B43LEGACY_DMA_MODE is not set +CONFIG_B43LEGACY_HWRNG=y +CONFIG_B43LEGACY_LEDS=y +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y +CONFIG_B43LEGACY_PCI_AUTOSELECT=y +CONFIG_B43LEGACY_PIO=y +# CONFIG_B43LEGACY_PIO_MODE is not set +CONFIG_B43_BCMA=y +CONFIG_B43_BCMA_PIO=y +# CONFIG_B43_DEBUG is not set +CONFIG_B43_HWRNG=y +CONFIG_B43_LEDS=y +CONFIG_B43_PCICORE_AUTOSELECT=y +CONFIG_B43_PCI_AUTOSELECT=y +# CONFIG_B43_PCMCIA is not set +CONFIG_B43_PHY_HT=y +CONFIG_B43_PHY_LP=y +CONFIG_B43_PHY_N=y +CONFIG_B43_PIO=y +# CONFIG_B43_SDIO is not set +CONFIG_B43_SSB=y +CONFIG_B44=m +CONFIG_B44_PCI=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_BACKLIGHT_88PM860X=m +CONFIG_BACKLIGHT_AAT2870=m +CONFIG_BACKLIGHT_ADP5520=m +CONFIG_BACKLIGHT_ADP8860=m +CONFIG_BACKLIGHT_ADP8870=m +CONFIG_BACKLIGHT_APPLE=m +CONFIG_BACKLIGHT_AS3711=m +CONFIG_BACKLIGHT_ATMEL_PWM=m +CONFIG_BACKLIGHT_BD6107=m +CONFIG_BACKLIGHT_CARILLO_RANCH=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_DA903X=m +CONFIG_BACKLIGHT_DA9052=m +CONFIG_BACKLIGHT_GENERIC=m +CONFIG_BACKLIGHT_GPIO=m +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_LM3533=m +CONFIG_BACKLIGHT_LM3630A=m +CONFIG_BACKLIGHT_LM3639=m +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_LP8788=m +CONFIG_BACKLIGHT_LV5207LP=m +CONFIG_BACKLIGHT_MAX8925=m +CONFIG_BACKLIGHT_PANDORA=m +CONFIG_BACKLIGHT_PCF50633=m +CONFIG_BACKLIGHT_PWM=m +CONFIG_BACKLIGHT_SAHARA=m +CONFIG_BACKLIGHT_TPS65217=m +CONFIG_BACKLIGHT_WM831X=m +# CONFIG_BACKTRACE_SELF_TEST is not set +CONFIG_BALLOON_COMPACTION=y +CONFIG_BASE_FULL=y +CONFIG_BASE_SMALL=0 +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +# CONFIG_BATMAN_ADV_DEBUG is not set +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATTERY_88PM860X=m +CONFIG_BATTERY_BQ27X00_I2C=y +CONFIG_BATTERY_BQ27X00_PLATFORM=y +CONFIG_BATTERY_BQ27x00=m +CONFIG_BATTERY_DA9030=m +CONFIG_BATTERY_DA9052=m +CONFIG_BATTERY_DS2760=m +CONFIG_BATTERY_DS2780=m +CONFIG_BATTERY_DS2781=m +CONFIG_BATTERY_DS2782=m +CONFIG_BATTERY_INTEL_MID=m +CONFIG_BATTERY_MAX17040=m +CONFIG_BATTERY_MAX17042=m +CONFIG_BATTERY_PMU=m +CONFIG_BATTERY_RX51=m +CONFIG_BATTERY_SBS=m +CONFIG_BAYCOM_EPP=m +CONFIG_BAYCOM_PAR=m +CONFIG_BAYCOM_SER_FDX=m +CONFIG_BAYCOM_SER_HDX=m +CONFIG_BCACHE=m +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_DEBUG is not set +CONFIG_BCH_CONST_M=14 +CONFIG_BCH_CONST_PARAMS=y +CONFIG_BCH_CONST_T=4 +CONFIG_BCM87XX_PHY=y +CONFIG_BCMA=m +CONFIG_BCMA_BLOCKIO=y +# CONFIG_BCMA_DEBUG is not set +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_BCMA_HOST_PCI=y +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_SOC=y +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCM_WIMAX=m +# CONFIG_BDI_SWITCH is not set +CONFIG_BE2ISCSI=m +CONFIG_BE2NET=m +# CONFIG_BEFS_DEBUG is not set +CONFIG_BEFS_FS=m +CONFIG_BFS_FS=m +CONFIG_BIG_KEYS=y +CONFIG_BIG_LITTLE=y +CONFIG_BINARY_PRINTF=y +CONFIG_BINFMT_AOUT=m +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_MISC=m +CONFIG_BINFMT_SCRIPT=y +CONFIG_BITREVERSE=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_CMDLINE_PARSER=y +CONFIG_BLK_CPQ_CISS_DA=m +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_3W_XXXX_RAID=m +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +CONFIG_BLK_DEV_AMD74XX=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +# CONFIG_BLK_DEV_CMD64X is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_CRYPTOLOOP=m +# CONFIG_BLK_DEV_CS5520 is not set +# CONFIG_BLK_DEV_CS5530 is not set +CONFIG_BLK_DEV_DAC960=m +CONFIG_BLK_DEV_DM=y +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_FD=m +CONFIG_BLK_DEV_GENERIC=y +# CONFIG_BLK_DEV_HD is not set +# CONFIG_BLK_DEV_HPT366 is not set +CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y +CONFIG_BLK_DEV_IDEDMA=y +CONFIG_BLK_DEV_IDEDMA_PCI=y +CONFIG_BLK_DEV_IDEDMA_SFF=y +CONFIG_BLK_DEV_IDEPCI=y +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_IO_TRACE=y +# CONFIG_BLK_DEV_IT8172 is not set +# CONFIG_BLK_DEV_IT8213 is not set +# CONFIG_BLK_DEV_IT821X is not set +# CONFIG_BLK_DEV_JMICRON is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_NS87415 is not set +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_BLK_DEV_NVME=m +# CONFIG_BLK_DEV_OFFBOARD is not set +# CONFIG_BLK_DEV_OPTI621 is not set +CONFIG_BLK_DEV_OSD=m +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m +# CONFIG_BLK_DEV_PDC202XX_NEW is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +# CONFIG_BLK_DEV_PIIX is not set +# CONFIG_BLK_DEV_PLATFORM is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_RSXX=m +# CONFIG_BLK_DEV_SC1200 is not set +CONFIG_BLK_DEV_SD=y +# CONFIG_BLK_DEV_SIIMAGE is not set +CONFIG_BLK_DEV_SKD=m +# CONFIG_BLK_DEV_SL82C105 is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +# CONFIG_BLK_DEV_SVWKS is not set +CONFIG_BLK_DEV_SX8=m +# CONFIG_BLK_DEV_TC86C001 is not set +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_TRM290 is not set +CONFIG_BLK_DEV_UMEM=m +# CONFIG_BLK_DEV_VIA82CXXX is not set +# CONFIG_BLK_DEV_XIP is not set +CONFIG_BLOCK=y +CONFIG_BLOCK_COMPAT=y +CONFIG_BL_SWITCHER=y +CONFIG_BL_SWITCHER_DUMMY_IF=m +CONFIG_BMA180=m +CONFIG_BMAC=m +CONFIG_BMP085=y +CONFIG_BMP085_I2C=m +CONFIG_BMP085_SPI=m +CONFIG_BNA=m +CONFIG_BNX2=m +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +CONFIG_BOARD_TPCI200=m +CONFIG_BONDING=m +CONFIG_BOOKE=y +CONFIG_BOOKE_WDT=m +CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT=38 +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +# CONFIG_BOOTX_TEXT is not set +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_BOUNCE=y +CONFIG_BPCTL=m +CONFIG_BPF_JIT=y +CONFIG_BPQETHER=m +CONFIG_BQL=y +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_BRCMDBG is not set +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMSMAC=m +CONFIG_BRCMUTIL=m +CONFIG_BRCM_TRACING=y +CONFIG_BRIDGE=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +# CONFIG_BRIDGE_EBT_ULOG is not set +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_NETFILTER=y +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BROADCOM_PHY=y +CONFIG_BSD_DISKLABEL=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_BT=m +CONFIG_BTREE=y +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_DEBUG is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +CONFIG_BT_ATH3K=m +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIBTUART=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_3WIRE=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_LL=y +CONFIG_BT_HCIVHCI=m +CONFIG_BT_HIDP=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_WILINK=m +CONFIG_BUG=y +CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_C101=m +CONFIG_C293_PCIE=y +CONFIG_C2PORT=m +CONFIG_C2PORT_DURAMAR_2150=m +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_HISTOGRAM is not set +CONFIG_CACHE_L2X0=y +CONFIG_CACHE_PL310=y +CONFIG_CAIF=m +# CONFIG_CAIF_DEBUG is not set +CONFIG_CAIF_HSI=m +CONFIG_CAIF_NETDEV=m +CONFIG_CAIF_SPI_SLAVE=m +# CONFIG_CAIF_SPI_SYNC is not set +CONFIG_CAIF_TTY=m +CONFIG_CAIF_USB=m +CONFIG_CAIF_VIRTIO=m +CONFIG_CALGARY_IOMMU=y +CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y +CONFIG_CAN=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_AT91=m +CONFIG_CAN_BCM=m +CONFIG_CAN_CALC_BITTIMING=y +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_ISA=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_C_CAN_PLATFORM=m +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_CAN_DEV=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_EMS_PCMCIA=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_ESD_USB2=m +CONFIG_CAN_FLEXCAN=m +CONFIG_CAN_GRCAN=m +CONFIG_CAN_GW=m +CONFIG_CAN_JANZ_ICAN3=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_LEDS=y +CONFIG_CAN_MCP251X=m +CONFIG_CAN_MSCAN=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_PEAK_PCMCIA=m +CONFIG_CAN_PEAK_USB=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_RAW=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_SJA1000_ISA=m +CONFIG_CAN_SJA1000_OF_PLATFORM=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_SOFTING_CS=m +CONFIG_CAN_TI_HECC=m +CONFIG_CAN_TSCAN1=m +CONFIG_CAN_VCAN=m +CONFIG_CAPI_AVM=y +CONFIG_CAPI_EICON=y +CONFIG_CAPI_TRACE=y +CONFIG_CARDBUS=y +CONFIG_CARDMAN_4000=m +CONFIG_CARDMAN_4040=m +CONFIG_CARL9170=m +# CONFIG_CARL9170_DEBUGFS is not set +CONFIG_CARL9170_HWRNG=y +CONFIG_CARL9170_LEDS=y +CONFIG_CARL9170_WPC=y +# CONFIG_CARMINE_DRAM_CUSTOM is not set +CONFIG_CASSINI=m +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y +CONFIG_CBE_CPUFREQ_SPU_GOVERNOR=m +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_CC_STACKPROTECTOR=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +CONFIG_CED1401=m +# CONFIG_CELL_CPU is not set +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_CFAG12864B=m +CONFIG_CFAG12864B_RATE=20 +CONFIG_CFG80211=m +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_INTERNAL_REGDB is not set +# CONFIG_CFG80211_REG_DEBUG is not set +CONFIG_CFG80211_WEXT=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_CPUACCT=y +# CONFIG_CGROUP_DEBUG is not set +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_SCHED=y +CONFIG_CHARGER_88PM860X=m +CONFIG_CHARGER_BQ2415X=m +CONFIG_CHARGER_BQ24190=m +CONFIG_CHARGER_BQ24735=m +CONFIG_CHARGER_GPIO=m +CONFIG_CHARGER_ISP1704=m +CONFIG_CHARGER_LP8727=m +CONFIG_CHARGER_LP8788=m +CONFIG_CHARGER_MANAGER=y +CONFIG_CHARGER_MAX8903=m +CONFIG_CHARGER_MAX8997=m +CONFIG_CHARGER_MAX8998=m +CONFIG_CHARGER_PCF50633=m +CONFIG_CHARGER_SMB347=m +CONFIG_CHARGER_TPS65090=m +CONFIG_CHARGER_TWL4030=m +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHROMEOS_LAPTOP=m +CONFIG_CHROME_PLATFORMS=y +CONFIG_CHR_DEV_OSST=m +CONFIG_CHR_DEV_SCH=m +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_ST=m +CONFIG_CICADA_PHY=y +CONFIG_CIFS=m +CONFIG_CIFS_ACL=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_FSCACHE=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_SMB2=y +CONFIG_CIFS_STATS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CISS_SCSI_TAPE=y +CONFIG_CLEANCACHE=y +CONFIG_CLKBLD_I8253=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKEVT_I8253=y +CONFIG_CLKSRC_I8253=y +CONFIG_CLKSRC_MMIO=y +CONFIG_CLKSRC_OF=y +CONFIG_CLK_PPC_CORENET=y +CONFIG_CLK_TWL6040=m +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CLS_U32_MARK=y +# CONFIG_CLS_U32_PERF is not set +CONFIG_CLZ_TAB=y +CONFIG_CM36651=m +CONFIG_CMA=y +CONFIG_CMA_ALIGNMENT=8 +CONFIG_CMA_AREAS=7 +# CONFIG_CMA_DEBUG is not set +CONFIG_CMA_SIZE_MBYTES=16 +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMDLINE_BOOL is not set +# CONFIG_CMDLINE_FORCE is not set +CONFIG_CMDLINE_PARTITION=y +CONFIG_CMM=y +CONFIG_CNIC=m +CONFIG_CODA_FS=m +CONFIG_COMEDI=m +CONFIG_COMEDI_8255=m +CONFIG_COMEDI_8255_PCI=m +CONFIG_COMEDI_ADDI_APCI_035=m +CONFIG_COMEDI_ADDI_APCI_1032=m +CONFIG_COMEDI_ADDI_APCI_1500=m +CONFIG_COMEDI_ADDI_APCI_1516=m +CONFIG_COMEDI_ADDI_APCI_1564=m +CONFIG_COMEDI_ADDI_APCI_16XX=m +CONFIG_COMEDI_ADDI_APCI_2032=m +CONFIG_COMEDI_ADDI_APCI_2200=m +CONFIG_COMEDI_ADDI_APCI_3120=m +CONFIG_COMEDI_ADDI_APCI_3501=m +CONFIG_COMEDI_ADDI_APCI_3XXX=m +CONFIG_COMEDI_ADDI_WATCHDOG=m +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADQ12B=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI_DIO=m +CONFIG_COMEDI_AIO_AIO12_8=m +CONFIG_COMEDI_AIO_IIRO_16=m +CONFIG_COMEDI_AMPLC_DIO200=m +CONFIG_COMEDI_AMPLC_DIO200_ISA=m +CONFIG_COMEDI_AMPLC_DIO200_PCI=m +CONFIG_COMEDI_AMPLC_PC236=m +CONFIG_COMEDI_AMPLC_PC236_ISA=m +CONFIG_COMEDI_AMPLC_PC236_PCI=m +CONFIG_COMEDI_AMPLC_PC263_ISA=m +CONFIG_COMEDI_AMPLC_PC263_PCI=m +CONFIG_COMEDI_AMPLC_PCI224=m +CONFIG_COMEDI_AMPLC_PCI230=m +CONFIG_COMEDI_BOND=m +CONFIG_COMEDI_C6XDIGIO=m +CONFIG_COMEDI_CB_DAS16_CS=m +CONFIG_COMEDI_CB_PCIDAS=m +CONFIG_COMEDI_CB_PCIDAS64=m +CONFIG_COMEDI_CB_PCIDDA=m +CONFIG_COMEDI_CB_PCIMDAS=m +CONFIG_COMEDI_CB_PCIMDDA=m +CONFIG_COMEDI_CONTEC_PCI_DIO=m +CONFIG_COMEDI_DAQBOARD2000=m +CONFIG_COMEDI_DAS08=m +CONFIG_COMEDI_DAS08_CS=m +CONFIG_COMEDI_DAS08_ISA=m +CONFIG_COMEDI_DAS08_PCI=m +CONFIG_COMEDI_DAS16=m +CONFIG_COMEDI_DAS16M1=m +CONFIG_COMEDI_DAS1800=m +CONFIG_COMEDI_DAS6402=m +CONFIG_COMEDI_DAS800=m +# CONFIG_COMEDI_DEBUG is not set +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 +CONFIG_COMEDI_DMM32AT=m +CONFIG_COMEDI_DT2801=m +CONFIG_COMEDI_DT2811=m +CONFIG_COMEDI_DT2814=m +CONFIG_COMEDI_DT2815=m +CONFIG_COMEDI_DT2817=m +CONFIG_COMEDI_DT282X=m +CONFIG_COMEDI_DT3000=m +CONFIG_COMEDI_DT9812=m +CONFIG_COMEDI_DYNA_PCI10XX=m +CONFIG_COMEDI_FC=m +CONFIG_COMEDI_FL512=m +CONFIG_COMEDI_GSC_HPDI=m +CONFIG_COMEDI_ICP_MULTI=m +CONFIG_COMEDI_II_PCI20KC=m +CONFIG_COMEDI_JR3_PCI=m +CONFIG_COMEDI_KCOMEDILIB=m +CONFIG_COMEDI_KE_COUNTER=m +CONFIG_COMEDI_ME4000=m +CONFIG_COMEDI_ME_DAQ=m +CONFIG_COMEDI_MISC_DRIVERS=y +CONFIG_COMEDI_MITE=m +CONFIG_COMEDI_MPC624=m +CONFIG_COMEDI_MULTIQ3=m +CONFIG_COMEDI_NI_6527=m +CONFIG_COMEDI_NI_65XX=m +CONFIG_COMEDI_NI_660X=m +CONFIG_COMEDI_NI_670X=m +CONFIG_COMEDI_NI_ATMIO=m +CONFIG_COMEDI_NI_ATMIO16D=m +CONFIG_COMEDI_NI_AT_A2150=m +CONFIG_COMEDI_NI_AT_AO=m +CONFIG_COMEDI_NI_DAQ_700_CS=m +CONFIG_COMEDI_NI_DAQ_DIO24_CS=m +CONFIG_COMEDI_NI_LABPC=m +CONFIG_COMEDI_NI_LABPC_CS=m +CONFIG_COMEDI_NI_LABPC_ISA=m +CONFIG_COMEDI_NI_LABPC_ISADMA=m +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_MIO_CS=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_TIOCMD=m +CONFIG_COMEDI_PARPORT=m +CONFIG_COMEDI_PCI_DRIVERS=y +CONFIG_COMEDI_PCL711=m +CONFIG_COMEDI_PCL724=m +CONFIG_COMEDI_PCL726=m +CONFIG_COMEDI_PCL730=m +CONFIG_COMEDI_PCL812=m +CONFIG_COMEDI_PCL816=m +CONFIG_COMEDI_PCL818=m +CONFIG_COMEDI_PCM3724=m +CONFIG_COMEDI_PCMAD=m +CONFIG_COMEDI_PCMCIA_DRIVERS=y +CONFIG_COMEDI_PCMDA12=m +CONFIG_COMEDI_PCMMIO=m +CONFIG_COMEDI_PCMUIO=m +CONFIG_COMEDI_POC=m +CONFIG_COMEDI_QUATECH_DAQP_CS=m +CONFIG_COMEDI_RTD520=m +CONFIG_COMEDI_RTI800=m +CONFIG_COMEDI_RTI802=m +CONFIG_COMEDI_S526=m +CONFIG_COMEDI_S626=m +CONFIG_COMEDI_SERIAL2002=m +CONFIG_COMEDI_SKEL=m +CONFIG_COMEDI_SSV_DNP=m +CONFIG_COMEDI_TEST=m +CONFIG_COMEDI_UNIOXX5=m +CONFIG_COMEDI_USBDUX=m +CONFIG_COMEDI_USBDUXFAST=m +CONFIG_COMEDI_USBDUXSIGMA=m +CONFIG_COMEDI_USB_DRIVERS=y +CONFIG_COMEDI_VMK80XX=m +CONFIG_COMMON_CLK=y +# CONFIG_COMMON_CLK_DEBUG is not set +CONFIG_COMMON_CLK_MAX77686=m +CONFIG_COMMON_CLK_S2MPS11=m +CONFIG_COMMON_CLK_SI5351=m +CONFIG_COMMON_CLK_VERSATILE=y +CONFIG_COMMON_CLK_WM831X=m +CONFIG_COMMON_CLK_XGENE=y +CONFIG_COMPAL_LAPTOP=m +CONFIG_COMPAT=y +CONFIG_COMPAT_BINFMT_ELF=y +# CONFIG_COMPAT_BRK is not set +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +CONFIG_COMPAT_NETLINK_MESSAGES=y +CONFIG_COMPAT_OLD_SIGACTION=y +# CONFIG_COMPAT_VDSO is not set +# CONFIG_COMPILE_TEST is not set +CONFIG_CONFIGFS_FS=m +CONFIG_CONNECTOR=y +CONFIG_CONSOLE_POLL=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_CONTEXT_TRACKING=y +# CONFIG_CONTEXT_TRACKING_FORCE is not set +CONFIG_COPS=m +CONFIG_COPS_DAYNA=y +CONFIG_COPS_TANGENT=y +CONFIG_CORDIC=m +CONFIG_COREDUMP=y +CONFIG_CORENET_GENERIC=y +CONFIG_COSA=m +# CONFIG_CPA_DEBUG is not set +CONFIG_CPM=y +CONFIG_CPM2=y +CONFIG_CPU5_WDT=m +CONFIG_CPUSETS=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y +# CONFIG_CPU_DCACHE_DISABLE is not set +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_MAPLE=y +CONFIG_CPU_FREQ_PMAC=y +CONFIG_CPU_FREQ_PMAC64=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +CONFIG_CPU_HAS_ASID=y +# CONFIG_CPU_ICACHE_DISABLE is not set +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +CONFIG_CPU_NOTIFIER_ERROR_INJECT=m +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_PM=y +CONFIG_CPU_RMAP=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_CYRIX_32=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_TRANSMETA_32=y +CONFIG_CPU_SUP_UMC_32=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_V7=y +CONFIG_CRAMFS=m +CONFIG_CRC16=y +CONFIG_CRC32=y +# CONFIG_CRC32_BIT is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_SELFTEST is not set +# CONFIG_CRC32_SLICEBY4 is not set +CONFIG_CRC32_SLICEBY8=y +CONFIG_CRC7=m +CONFIG_CRC8=m +CONFIG_CRC_CCITT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC_T10DIF=y +CONFIG_CROSS_COMPILE="" +CONFIG_CROSS_MEMORY_ATTACH=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_ABLK_HELPER=m +CONFIG_CRYPTO_AEAD=m +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AES_586=m +CONFIG_CRYPTO_AES_ARM=m +CONFIG_CRYPTO_AES_ARM_BS=m +CONFIG_CRYPTO_AES_NI_INTEL=m +CONFIG_CRYPTO_AES_X86_64=m +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32C_INTEL=y +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_CTR=m +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_DEV_FSL_CAAM=m +CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API=m +CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API=m +# CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG is not set +CONFIG_CRYPTO_DEV_FSL_CAAM_INTC=y +CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD=255 +CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD=2048 +CONFIG_CRYPTO_DEV_FSL_CAAM_JR=m +CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE=9 +CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API=m +CONFIG_CRYPTO_DEV_GEODE=m +CONFIG_CRYPTO_DEV_HIFN_795X=m +CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y +CONFIG_CRYPTO_DEV_NX=y +CONFIG_CRYPTO_DEV_NX_COMPRESS=m +CONFIG_CRYPTO_DEV_NX_ENCRYPT=m +CONFIG_CRYPTO_DEV_OMAP_AES=m +CONFIG_CRYPTO_DEV_OMAP_SHAM=m +CONFIG_CRYPTO_DEV_PADLOCK=y +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_CRYPTO_DEV_SAHARA=m +CONFIG_CRYPTO_DEV_TALITOS=m +CONFIG_CRYPTO_DEV_TEGRA_AES=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_GCM=m +CONFIG_CRYPTO_GF128MUL=m +CONFIG_CRYPTO_GHASH=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m +CONFIG_CRYPTO_GLUE_HELPER_X86=m +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_PCOMP=m +CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_RMD128=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_RMD256=m +CONFIG_CRYPTO_RMD320=m +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_SALSA20=m +CONFIG_CRYPTO_SALSA20_586=m +CONFIG_CRYPTO_SALSA20_X86_64=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_SSE2_586=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA1_ARM=m +CONFIG_CRYPTO_SHA1_PPC=m +CONFIG_CRYPTO_SHA1_SSSE3=m +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA256_SSSE3=m +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA512_SSSE3=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_586=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_USER=m +CONFIG_CRYPTO_USER_API=m +CONFIG_CRYPTO_USER_API_HASH=m +CONFIG_CRYPTO_USER_API_SKCIPHER=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XTS=m +CONFIG_CRYPTO_ZLIB=m +CONFIG_CRYSTALHD=m +# CONFIG_CS5535_MFGPT is not set +CONFIG_CS89x0=m +CONFIG_CS89x0_PLATFORM=y +CONFIG_CUSE=m +CONFIG_CW1200=m +CONFIG_CW1200_WLAN_SDIO=m +CONFIG_CW1200_WLAN_SPI=m +CONFIG_CXT1E1=m +CONFIG_CYCLADES=m +CONFIG_CYPRESS_FIRMWARE=m +# CONFIG_CYZ_INTR is not set +CONFIG_DA9052_WATCHDOG=m +CONFIG_DA9055_WATCHDOG=m +CONFIG_DAVICOM_PHY=y +CONFIG_DCA=m +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_DCB=y +CONFIG_DCDBAS=m +CONFIG_DDR=y +CONFIG_DE2104X=m +CONFIG_DE2104X_DSL=0 +CONFIG_DE4X5=m +CONFIG_DEBUGGER=y +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_BLK_CGROUP is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_BOOT_PARAMS is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +CONFIG_DEBUG_FS=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_DEBUG_HIGHMEM is not set +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set +CONFIG_DEBUG_IMX_UART_PORT=1 +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_LL is not set +CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_NX_TEST is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_RODATA=y +# CONFIG_DEBUG_RODATA_TEST is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_DEBUG_SET_MODULE_RONX=y +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_SHIRQ is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set +# CONFIG_DEBUG_TLBFLUSH is not set +# CONFIG_DEBUG_UART_8250 is not set +# CONFIG_DEBUG_UART_PL01X is not set +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_VIRTUAL is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +CONFIG_DECNET=m +CONFIG_DECNET_NF_GRABULATOR=m +# CONFIG_DECNET_ROUTER is not set +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DEFAULT_CUBIC=y +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +CONFIG_DEFAULT_IO_DELAY_TYPE=1 +CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4 +# CONFIG_DEFAULT_NOOP is not set +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_SECURITY="apparmor" +CONFIG_DEFAULT_SECURITY_APPARMOR=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +# CONFIG_DEFAULT_SECURITY_YAMA is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_DEFXX=m +# CONFIG_DEFXX_MMIO is not set +CONFIG_DELL_LAPTOP=m +CONFIG_DELL_RBU=m +CONFIG_DELL_WMI=m +CONFIG_DELL_WMI_AIO=m +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +# CONFIG_DEVKMEM is not set +CONFIG_DEVPORT=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DEV_APPLETALK=m +CONFIG_DE_AOC=y +CONFIG_DGAP=m +CONFIG_DGNC=m +CONFIG_DGRP=m +CONFIG_DIRECT_GBPAGES=y +CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m +CONFIG_DISPLAY_CONNECTOR_DVI=m +CONFIG_DISPLAY_CONNECTOR_HDMI=y +CONFIG_DISPLAY_ENCODER_TFP410=m +CONFIG_DISPLAY_ENCODER_TPD12S015=y +CONFIG_DISPLAY_PANEL_DPI=m +CONFIG_DISPLAY_PANEL_DSI_CM=m +CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m +CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m +CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m +CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m +CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m +CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m +CONFIG_DL2K=m +CONFIG_DLCI=m +CONFIG_DLCI_MAX=8 +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +CONFIG_DM9000=m +# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set +CONFIG_DM9102=m +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set +CONFIG_DMAR_TABLE=y +# CONFIG_DMATEST is not set +CONFIG_DMA_ACPI=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_DMA_CMA=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ENGINE_RAID=y +CONFIG_DMA_OF=y +CONFIG_DMA_OMAP=y +CONFIG_DMA_SHARED_BUFFER=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMI=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=m +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_BUFIO=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_CLEANER=m +CONFIG_DM_CACHE_MQ=m +CONFIG_DM_CRYPT=m +# CONFIG_DM_DEBUG is not set +# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set +CONFIG_DM_DELAY=m +CONFIG_DM_FLAKEY=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_MIRROR=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_RAID=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_SWITCH=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=m +CONFIG_DM_ZERO=m +CONFIG_DNET=m +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=y +CONFIG_DONGLE=y +CONFIG_DOUBLEFAULT=y +CONFIG_DPA=y +# CONFIG_DPAA_ETH_UNIT_TESTS is not set +CONFIG_DPA_MAX_FRM_SIZE=1522 +CONFIG_DPA_OFFLINE_PORTS=y +# CONFIG_DPM_WATCHDOG is not set +CONFIG_DQL=y +CONFIG_DRA752_THERMAL=y +CONFIG_DRAGONRISE_FF=y +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_DRM=m +CONFIG_DRM_ARMADA=m +CONFIG_DRM_AST=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_EXYNOS is not set +CONFIG_DRM_GEM_CMA_HELPER=y +CONFIG_DRM_GMA3600=y +CONFIG_DRM_GMA500=m +CONFIG_DRM_GMA600=y +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_NXP_TDA998X=m +CONFIG_DRM_I2C_SIL164=m +CONFIG_DRM_I810=m +CONFIG_DRM_I915=m +CONFIG_DRM_I915_BDW=m +CONFIG_DRM_I915_FBDEV=y +CONFIG_DRM_I915_KMS=y +CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT=y +# CONFIG_DRM_I915_UMS is not set +# CONFIG_DRM_IMX is not set +CONFIG_DRM_KMS_CMA_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_MEDFIELD is not set +CONFIG_DRM_MGA=m +# CONFIG_DRM_MGAG200 is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_OMAP=m +CONFIG_DRM_OMAP_NUM_CRTCS=1 +CONFIG_DRM_QXL=m +CONFIG_DRM_R128=m +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_UMS=y +CONFIG_DRM_RCAR_DU=m +CONFIG_DRM_RCAR_LVDS=y +CONFIG_DRM_SAVAGE=m +CONFIG_DRM_SHMOBILE=m +CONFIG_DRM_SIS=m +CONFIG_DRM_TDFX=m +# CONFIG_DRM_TEGRA is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_UDL=m +CONFIG_DRM_USB=m +CONFIG_DRM_VIA=m +CONFIG_DRM_VMWGFX=m +CONFIG_DRM_VMWGFX_FBCON=y +CONFIG_DS1682=m +CONFIG_DSCC4=m +CONFIG_DSCC4_PCISYNC=y +CONFIG_DSCC4_PCI_RST=y +# CONFIG_DT3155_CCIR is not set +CONFIG_DT3155_STREAMING=y +CONFIG_DTC=y +CONFIG_DTL=y +CONFIG_DTLK=m +CONFIG_DUMMY=m +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_IRQ=m +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9013=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_AS102=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_AV7110=m +CONFIG_DVB_AV7110_OSD=y +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +CONFIG_DVB_BCM3510=m +CONFIG_DVB_BT8XX=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET_PATCH=m +CONFIG_DVB_CORE=m +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_CXD2099=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB8000=m +CONFIG_DVB_DM1105=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_DRXK=m +CONFIG_DVB_DS3000=m +# CONFIG_DVB_DUMMY_FE is not set +CONFIG_DVB_DYNAMIC_MINORS=y +CONFIG_DVB_EC100=m +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_DVB_HOPPER=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IT913X_FE=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_L64781=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_MT312=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NET=y +CONFIG_DVB_NGENE=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_PLL=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_RTL2830=m +CONFIG_DVB_RTL2832=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_S921=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_SP8870=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0297=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA18271C2DD=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AF9015=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_FRIIO=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_IT913X=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_RTL28XXU=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_DVB_VES1820=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m +CONFIG_DVB_ZL10353=m +CONFIG_DW_APB_TIMER=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC_PCI=m +CONFIG_DW_WATCHDOG=m +CONFIG_DX_SEP=m +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_E100=m +CONFIG_E1000=m +CONFIG_E1000E=m +# CONFIG_E200 is not set +CONFIG_E500=y +# CONFIG_E5500_CPU is not set +# CONFIG_E6500_CPU is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_EFI=y +CONFIG_EARLY_PRINTK_INTEL_MID=y +CONFIG_ECHO=m +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_EDAC=y +CONFIG_EDAC_AMD64=m +# CONFIG_EDAC_AMD64_ERROR_INJECTION is not set +CONFIG_EDAC_AMD76X=m +CONFIG_EDAC_AMD8111=m +CONFIG_EDAC_AMD8131=m +CONFIG_EDAC_CPC925=m +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_E7XXX=m +CONFIG_EDAC_HIGHBANK_L2=m +CONFIG_EDAC_HIGHBANK_MC=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_I5000=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I82860=m +CONFIG_EDAC_I82875P=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_LEGACY_SYSFS=y +CONFIG_EDAC_MCE_INJ=m +CONFIG_EDAC_MM_EDAC=m +CONFIG_EDAC_MPC85XX=m +CONFIG_EDAC_PASEMI=m +CONFIG_EDAC_R82600=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_X38=m +CONFIG_EDD=y +CONFIG_EDD_OFF=y +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +CONFIG_EEH=y +CONFIG_EEPROM_93CX6=m +CONFIG_EEPROM_93XX46=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_AT25=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EFI=y +CONFIG_EFIVAR_FS=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_PARTITION=y +CONFIG_EFI_STUB=y +CONFIG_EFI_VARS=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFS_FS=m +CONFIG_EHEA=y +CONFIG_EISA=y +CONFIG_EISA_NAMES=y +CONFIG_EISA_PCI_EISA=y +CONFIG_EISA_VIRTUAL_ROOT=y +CONFIG_EISA_VLB_PRIMING=y +CONFIG_EL3=m +CONFIG_ELECTRA_CF=m +CONFIG_ELF_CORE=y +# CONFIG_EMBEDDED is not set +CONFIG_EM_TIMER_STI=y +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_ENC28J60=m +# CONFIG_ENC28J60_WRITEVERIFY is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_ENCRYPTED_KEYS=y +CONFIG_ENIC=m +CONFIG_EPAPR_PARAVIRT=y +CONFIG_EPIC100=m +CONFIG_EPOLL=y +CONFIG_EQUALIZER=m +CONFIG_ESI_DONGLE=m +CONFIG_ET131X=m +CONFIG_ETHERNET=y +CONFIG_ETHOC=m +CONFIG_EUROTECH_WDT=m +CONFIG_EVENTFD=y +CONFIG_EVENT_TRACING=y +CONFIG_EVM=y +CONFIG_EVM_HMAC_VERSION=2 +# CONFIG_EXOFS_DEBUG is not set +CONFIG_EXOFS_FS=m +CONFIG_EXPERT=y +CONFIG_EXPORTFS=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4_DEBUG is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_USE_FOR_EXT23=y +CONFIG_EXTCON=y +CONFIG_EXTCON_ADC_JACK=m +CONFIG_EXTCON_ARIZONA=m +CONFIG_EXTCON_GPIO=m +CONFIG_EXTCON_MAX77693=m +CONFIG_EXTCON_MAX8997=m +CONFIG_EXTCON_PALMAS=m +CONFIG_EXTRA_FIRMWARE="" +CONFIG_EXTRA_TARGETS="" +CONFIG_EXYNOS_ADC=y +# CONFIG_F2FS_CHECK_FS is not set +CONFIG_F2FS_FS=m +CONFIG_F2FS_FS_POSIX_ACL=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_XATTR=y +CONFIG_F2FS_STAT_FS=y +CONFIG_F71808E_WDT=m +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_FARSYNC=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +CONFIG_FAT_FS=y +# CONFIG_FAULT_INJECTION is not set +# CONFIG_FA_DUMP is not set +CONFIG_FB=y +CONFIG_FB_3DFX=m +CONFIG_FB_3DFX_ACCEL=y +CONFIG_FB_3DFX_I2C=y +CONFIG_FB_ARC=m +CONFIG_FB_ARK=m +CONFIG_FB_ARMCLCD=y +CONFIG_FB_ASILIANT=y +CONFIG_FB_ATY=m +CONFIG_FB_ATY128_BACKLIGHT=y +CONFIG_FB_ATY_BACKLIGHT=y +CONFIG_FB_ATY_CT=y +# CONFIG_FB_ATY_GENERIC_LCD is not set +CONFIG_FB_ATY_GX=y +CONFIG_FB_AUO_K1900=m +CONFIG_FB_AUO_K1901=m +CONFIG_FB_AUO_K190X=m +CONFIG_FB_BROADSHEET=m +CONFIG_FB_CARILLO_RANCH=m +CONFIG_FB_CARMINE=m +CONFIG_FB_CARMINE_DRAM_EVAL=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +CONFIG_FB_CIRRUS=m +CONFIG_FB_CONTROL=y +CONFIG_FB_CT65550=y +CONFIG_FB_CYBER2000=m +CONFIG_FB_CYBER2000_DDC=y +# CONFIG_FB_DA8XX is not set +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_EFI=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_FSL_DIU=m +CONFIG_FB_GEODE=y +CONFIG_FB_GEODE_GX=m +CONFIG_FB_GEODE_GX1=m +CONFIG_FB_GEODE_LX=m +CONFIG_FB_GOLDFISH=m +CONFIG_FB_HECUBA=m +CONFIG_FB_HGA=m +CONFIG_FB_HYPERV=m +CONFIG_FB_I740=m +CONFIG_FB_I810=m +# CONFIG_FB_I810_GTF is not set +CONFIG_FB_IMSTT=y +CONFIG_FB_INTEL=m +# CONFIG_FB_INTEL_DEBUG is not set +# CONFIG_FB_INTEL_I2C is not set +CONFIG_FB_KYRO=m +CONFIG_FB_LE80578=m +CONFIG_FB_MATROX=m +CONFIG_FB_MATROX_G=y +CONFIG_FB_MATROX_I2C=m +CONFIG_FB_MATROX_MAVEN=m +CONFIG_FB_MATROX_MILLENIUM=y +CONFIG_FB_MATROX_MYSTIQUE=y +CONFIG_FB_MB862XX=m +CONFIG_FB_MB862XX_I2C=y +# CONFIG_FB_MB862XX_LIME is not set +CONFIG_FB_MB862XX_PCI_GDC=y +CONFIG_FB_METRONOME=m +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_MX3=y +CONFIG_FB_N411=m +CONFIG_FB_NEOMAGIC=m +CONFIG_FB_NVIDIA=m +CONFIG_FB_NVIDIA_BACKLIGHT=y +# CONFIG_FB_NVIDIA_DEBUG is not set +CONFIG_FB_NVIDIA_I2C=y +CONFIG_FB_OF=y +# CONFIG_FB_OMAP2 is not set +CONFIG_FB_PLATINUM=y +CONFIG_FB_PM2=m +CONFIG_FB_PM2_FIFO_DISCONNECT=y +CONFIG_FB_PM3=m +CONFIG_FB_PS3=y +CONFIG_FB_PS3_DEFAULT_SIZE_M=9 +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RIVA=m +CONFIG_FB_RIVA_BACKLIGHT=y +# CONFIG_FB_RIVA_DEBUG is not set +CONFIG_FB_RIVA_I2C=y +CONFIG_FB_S1D13XXX=m +CONFIG_FB_S3=m +CONFIG_FB_S3_DDC=y +CONFIG_FB_SAVAGE=m +# CONFIG_FB_SAVAGE_ACCEL is not set +CONFIG_FB_SAVAGE_I2C=y +CONFIG_FB_SIMPLE=y +CONFIG_FB_SIS=m +CONFIG_FB_SIS_300=y +CONFIG_FB_SIS_315=y +CONFIG_FB_SM501=m +CONFIG_FB_SM7XX=m +CONFIG_FB_SMSCUFX=m +CONFIG_FB_SSD1307=m +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_TMIO=m +CONFIG_FB_TMIO_ACCELL=y +CONFIG_FB_TRIDENT=m +CONFIG_FB_UVESA=m +CONFIG_FB_VALKYRIE=y +CONFIG_FB_VESA=y +CONFIG_FB_VGA16=m +CONFIG_FB_VIA=m +# CONFIG_FB_VIA_DIRECT_PROCFS is not set +CONFIG_FB_VIA_X_COMPATIBILITY=y +# CONFIG_FB_VIRTUAL is not set +CONFIG_FB_VT8623=m +CONFIG_FB_XGI=m +CONFIG_FCOE=m +CONFIG_FCOE_FNIC=m +CONFIG_FDDI=y +CONFIG_FEALNX=m +CONFIG_FEC=y +CONFIG_FHANDLE=y +CONFIG_FIB_RULES=y +CONFIG_FILE_LOCKING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_NET=m +CONFIG_FIREWIRE_NOSY=m +CONFIG_FIREWIRE_OHCI=m +# CONFIG_FIREWIRE_OHCI_REMOTE_DMA is not set +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_SERIAL=m +CONFIG_FIRMWARE_EDID=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_FIXED_PHY=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_FLATMEM=y +CONFIG_FLATMEM_MANUAL=y +CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN is not set +# CONFIG_FMAN_RESOURCE_ALLOCATION_ALGORITHM is not set +CONFIG_FMC=m +CONFIG_FMC_CHARDEV=m +CONFIG_FMC_FAKEDEV=m +CONFIG_FMC_TRIVIAL=m +CONFIG_FMC_WRITE_EEPROM=m +# CONFIG_FONT_10x18 is not set +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +CONFIG_FONT_8x16=y +CONFIG_FONT_8x8=y +CONFIG_FONT_ACORN_8x8=y +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_SUN8x16 is not set +CONFIG_FONT_SUPPORT=y +CONFIG_FORCEDETH=m +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_FRAME_POINTER=y +CONFIG_FREEZER=y +CONFIG_FRONTSWAP=y +CONFIG_FSCACHE=m +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +CONFIG_FSCACHE_STATS=y +CONFIG_FSL_BMAN=y +CONFIG_FSL_BMAN_CONFIG=y +CONFIG_FSL_BMAN_DEBUGFS=m +CONFIG_FSL_BMAN_PORTAL=y +# CONFIG_FSL_BMAN_TEST is not set +CONFIG_FSL_BOOKE=y +CONFIG_FSL_DMA=m +CONFIG_FSL_DPA=y +CONFIG_FSL_DPA_1588=m +CONFIG_FSL_DPA_CAN_WAIT=y +CONFIG_FSL_DPA_CAN_WAIT_SYNC=y +# CONFIG_FSL_DPA_CHECKING is not set +CONFIG_FSL_DPA_HAVE_IRQ=y +CONFIG_FSL_DPA_PIRQ_FAST=y +CONFIG_FSL_DPA_PIRQ_SLOW=y +CONFIG_FSL_DPA_PORTAL_SHARE=y +CONFIG_FSL_DPA_UIO=m +CONFIG_FSL_EMB_PERFMON=y +CONFIG_FSL_EMB_PERF_EVENT=y +CONFIG_FSL_EMB_PERF_EVENT_E500=y +# CONFIG_FSL_FMAN_TEST is not set +CONFIG_FSL_HV_MANAGER=m +CONFIG_FSL_IFC=y +CONFIG_FSL_LBC=y +# CONFIG_FSL_PAMU is not set +CONFIG_FSL_PCI=y +CONFIG_FSL_PME2=y +CONFIG_FSL_PME2_CTRL=y +CONFIG_FSL_PME2_DB=y +CONFIG_FSL_PME2_DB_QOSOUT_PRIORITY=2 +CONFIG_FSL_PME2_PDSRSIZE=131072 +CONFIG_FSL_PME2_PORTAL=y +CONFIG_FSL_PME2_SCAN=y +# CONFIG_FSL_PME2_SCAN_DEBUG is not set +CONFIG_FSL_PME2_SRESIZE=327680 +# CONFIG_FSL_PME2_SRE_AIM is not set +CONFIG_FSL_PME2_SRE_CNR=128 +CONFIG_FSL_PME2_SRE_CTX_SIZE_PER_SESSION=17 +# CONFIG_FSL_PME2_SRE_ESR is not set +CONFIG_FSL_PME2_SRE_MAX_BLOCK_NUMBER=32767 +CONFIG_FSL_PME2_SRE_MAX_INSTRUCTION_LIMIT=65535 +CONFIG_FSL_PME2_STAT_ACCUMULATOR_UPDATE_INTERVAL=3400 +# CONFIG_FSL_PME2_TEST_HIGH is not set +# CONFIG_FSL_PME2_TEST_SCAN is not set +CONFIG_FSL_PQ_MDIO=m +CONFIG_FSL_QMAN=y +CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1=y +CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W=2 +CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W=2 +CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV=4 +CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W=3 +CONFIG_FSL_QMAN_CONFIG=y +CONFIG_FSL_QMAN_DEBUGFS=m +CONFIG_FSL_QMAN_DQRR_PREFETCHING=y +CONFIG_FSL_QMAN_FQD_SZ=9 +CONFIG_FSL_QMAN_NULL_FQ_DEMUX=y +CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH=12 +CONFIG_FSL_QMAN_PIRQ_IPERIOD=100 +CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH=4 +CONFIG_FSL_QMAN_POLL_LIMIT=32 +CONFIG_FSL_QMAN_PORTAL=y +# CONFIG_FSL_QMAN_PORTAL_DISABLEAUTO_DCA is not set +# CONFIG_FSL_QMAN_TEST is not set +CONFIG_FSL_SOC=y +CONFIG_FSL_SOC_BOOKE=y +CONFIG_FSL_USB2_OTG=m +CONFIG_FSL_XGMAC_MDIO=y +CONFIG_FSNOTIFY=y +CONFIG_FS_ENET=m +CONFIG_FS_ENET_HAS_FCC=y +CONFIG_FS_ENET_HAS_SCC=y +CONFIG_FS_ENET_MDIO_FCC=m +CONFIG_FS_MBCACHE=y +CONFIG_FS_POSIX_ACL=y +CONFIG_FT1000=m +CONFIG_FT1000_PCMCIA=m +CONFIG_FT1000_USB=m +CONFIG_FTGMAC100=m +CONFIG_FTL=m +CONFIG_FTMAC100=m +CONFIG_FTRACE=y +CONFIG_FTRACE_MCOUNT_RECORD=y +# CONFIG_FTRACE_STARTUP_TEST is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_FTR_FIXUP_SELFTEST is not set +CONFIG_FUJITSU_LAPTOP=m +# CONFIG_FUJITSU_LAPTOP_DEBUG is not set +CONFIG_FUJITSU_TABLET=m +CONFIG_FUNCTION_GRAPH_TRACER=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_FUSE_FS=y +CONFIG_FUSION=y +CONFIG_FUSION_CTL=m +CONFIG_FUSION_FC=m +CONFIG_FUSION_LAN=m +CONFIG_FUSION_LOGGING=y +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_SAS=m +CONFIG_FUSION_SPI=m +CONFIG_FUTEX=y +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_USER_HELPER=y +CONFIG_GACT_PROB=y +CONFIG_GADGET_UAC1=y +CONFIG_GAMEPORT=m +CONFIG_GAMEPORT_EMU10K1=m +CONFIG_GAMEPORT_FM801=m +CONFIG_GAMEPORT_L4=m +CONFIG_GAMEPORT_NS558=m +CONFIG_GARP=m +CONFIG_GART_IOMMU=y +# CONFIG_GCOV_KERNEL is not set +# CONFIG_GEF_WDT is not set +CONFIG_GELIC_NET=m +CONFIG_GELIC_WIRELESS=y +CONFIG_GENERIC_ACL=y +CONFIG_GENERIC_ADC_BATTERY=m +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_GENERIC_ATOMIC64=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_GENERIC_CPU=y +CONFIG_GENERIC_CPUFREQ_CPU0=y +# CONFIG_GENERIC_CPU_DEVICES is not set +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_GENERIC_FIND_FIRST_BIT=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IOMAP=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_GENERIC_NVRAM=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_PINCONF=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_TIME_VSYSCALL_OLD=y +CONFIG_GENERIC_TRACER=y +CONFIG_GEOS=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_GIANFAR=m +CONFIG_GIGASET_BASE=m +# CONFIG_GIGASET_CAPI is not set +# CONFIG_GIGASET_DEBUG is not set +# CONFIG_GIGASET_DUMMYLL is not set +CONFIG_GIGASET_I4L=y +CONFIG_GIGASET_M101=m +CONFIG_GIGASET_M105=m +CONFIG_GIRBIL_DONGLE=m +# CONFIG_GOOGLE_FIRMWARE is not set +CONFIG_GP2AP020A00F=m +CONFIG_GPIOLIB=y +CONFIG_GPIO_74X164=m +CONFIG_GPIO_ACPI=y +CONFIG_GPIO_ADNP=m +CONFIG_GPIO_ADP5520=m +CONFIG_GPIO_ADP5588=m +CONFIG_GPIO_AMD8111=m +CONFIG_GPIO_ARIZONA=m +CONFIG_GPIO_BCM_KONA=y +CONFIG_GPIO_CS5535=m +CONFIG_GPIO_DA9052=m +CONFIG_GPIO_DA9055=m +CONFIG_GPIO_DEVRES=y +CONFIG_GPIO_DWAPB=m +# CONFIG_GPIO_EM is not set +CONFIG_GPIO_F7188X=m +CONFIG_GPIO_GENERIC_PLATFORM=m +CONFIG_GPIO_GE_FPGA=y +CONFIG_GPIO_GRGPIO=m +CONFIG_GPIO_ICH=m +CONFIG_GPIO_INTEL_MID=y +CONFIG_GPIO_INTEL_PMIC=y +CONFIG_GPIO_IT8761E=m +CONFIG_GPIO_JANZ_TTL=m +CONFIG_GPIO_KEMPLD=m +CONFIG_GPIO_LYNXPOINT=y +CONFIG_GPIO_MAX7300=m +CONFIG_GPIO_MAX7301=m +CONFIG_GPIO_MAX730X=m +CONFIG_GPIO_MAX732X=m +CONFIG_GPIO_MC33880=m +CONFIG_GPIO_MCP23S08=m +CONFIG_GPIO_ML_IOH=m +CONFIG_GPIO_MPC8XXX=y +CONFIG_GPIO_MSIC=y +CONFIG_GPIO_MXC=y +CONFIG_GPIO_PALMAS=y +CONFIG_GPIO_PCA953X=m +CONFIG_GPIO_PCF857X=m +CONFIG_GPIO_PCH=m +CONFIG_GPIO_PL061=y +CONFIG_GPIO_RC5T583=y +CONFIG_GPIO_RCAR=m +CONFIG_GPIO_RDC321X=m +CONFIG_GPIO_SCH=m +CONFIG_GPIO_STMPE=y +CONFIG_GPIO_SX150X=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_TC3589X=y +CONFIG_GPIO_TIMBERDALE=y +CONFIG_GPIO_TPS6586X=y +CONFIG_GPIO_TPS65910=y +CONFIG_GPIO_TPS65912=m +CONFIG_GPIO_TS5500=m +CONFIG_GPIO_UCB1400=m +CONFIG_GPIO_VIPERBOARD=m +CONFIG_GPIO_VX855=m +CONFIG_GPIO_WM831X=m +CONFIG_GPIO_WM8350=m +CONFIG_GPIO_WM8994=m +CONFIG_GPIO_XILINX=y +CONFIG_GREENASIA_FF=y +CONFIG_HAMACHI=m +CONFIG_HAMRADIO=y +CONFIG_HANGCHECK_TIMER=m +CONFIG_HAPPYMEAL=m +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HAS_DMA=y +CONFIG_HAS_FSL_PME=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_KMEMCHECK=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARM_ARCH_TIMER=y +CONFIG_HAVE_ARM_SCU=y +CONFIG_HAVE_ARM_TWD=y +CONFIG_HAVE_ATOMIC_IOMAP=y +CONFIG_HAVE_BPF_JIT=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_BUGVERBOSE=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_ATTRS=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IMX_ANATOP=y +CONFIG_HAVE_IMX_GPC=y +CONFIG_HAVE_IMX_MMDC=y +CONFIG_HAVE_IMX_SRC=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MEMORYLESS_NODES=y +CONFIG_HAVE_MEMORY_PRESENT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_PATA_PLATFORM=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_HAVE_RCU_TABLE_FREE=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SMP=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_UID16=y +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_HCALL_STATS is not set +CONFIG_HDLC=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_RAW_ETH=m +CONFIG_HDLC_X25=m +CONFIG_HDMI=y +CONFIG_HDQ_MASTER_OMAP=m +# CONFIG_HEADERS_CHECK is not set +CONFIG_HERMES=m +CONFIG_HERMES_CACHE_FW_ON_INIT=y +# CONFIG_HERMES_PRISM is not set +CONFIG_HFSPLUS_FS=m +CONFIG_HFSPLUS_FS_POSIX_ACL=y +CONFIG_HFS_FS=m +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HID=m +CONFIG_HIDRAW=y +CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HID_BELKIN=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EMS_FF=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GENERIC=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_GYRATION=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_HUION=m +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_ICADE=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO_TPKBD=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_MAGICMOUSE=m +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTRIG=m +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PID=y +CONFIG_HID_PRIMAX=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_RMI=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUB=m +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_SONY=m +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WACOM=m +CONFIG_HID_WALTOP=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HIGHMEM=y +# CONFIG_HIGHMEM4G is not set +CONFIG_HIGHMEM64G=y +CONFIG_HIGHPTE=y +CONFIG_HIGH_RES_TIMERS=y +# CONFIG_HIPPI is not set +CONFIG_HISAX_16_0=y +CONFIG_HISAX_16_3=y +CONFIG_HISAX_1TR6=y +CONFIG_HISAX_ASUSCOM=y +CONFIG_HISAX_AVM_A1=y +CONFIG_HISAX_AVM_A1_CS=m +CONFIG_HISAX_AVM_A1_PCMCIA=y +CONFIG_HISAX_BKM_A4T=y +# CONFIG_HISAX_DEBUG is not set +CONFIG_HISAX_DIEHLDIVA=y +CONFIG_HISAX_ELSA=y +CONFIG_HISAX_ELSA_CS=m +CONFIG_HISAX_ENTERNOW_PCI=y +CONFIG_HISAX_EURO=y +CONFIG_HISAX_FRITZPCI=y +CONFIG_HISAX_FRITZ_PCIPNP=m +CONFIG_HISAX_GAZEL=y +CONFIG_HISAX_HFC4S8S=m +CONFIG_HISAX_HFCS=y +CONFIG_HISAX_HFCUSB=m +CONFIG_HISAX_HFC_PCI=y +CONFIG_HISAX_HFC_SX=y +CONFIG_HISAX_HSTSAPHIR=y +CONFIG_HISAX_ISURF=y +CONFIG_HISAX_IX1MICROR2=y +CONFIG_HISAX_MAX_CARDS=8 +CONFIG_HISAX_MIC=y +CONFIG_HISAX_NETJET=y +CONFIG_HISAX_NETJET_U=y +CONFIG_HISAX_NI1=y +CONFIG_HISAX_NICCY=y +# CONFIG_HISAX_NO_KEYPAD is not set +# CONFIG_HISAX_NO_LLC is not set +# CONFIG_HISAX_NO_SENDCOMPLETE is not set +CONFIG_HISAX_S0BOX=y +CONFIG_HISAX_SCT_QUADRO=y +CONFIG_HISAX_SEDLBAUER=y +CONFIG_HISAX_SEDLBAUER_CS=m +CONFIG_HISAX_SPORTSTER=y +CONFIG_HISAX_ST5481=m +CONFIG_HISAX_TELEINT=y +CONFIG_HISAX_TELESPCI=y +CONFIG_HISAX_TELES_CS=m +CONFIG_HISAX_W6692=y +CONFIG_HMC6352=m +CONFIG_HOLTEK_FF=y +CONFIG_HOSTAP=m +CONFIG_HOSTAP_CS=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PCI=m +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTESS_SV11=m +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +CONFIG_HOTPLUG_PCI_COMPAQ=m +CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM=y +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m +CONFIG_HOTPLUG_PCI_IBM=m +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HOTPLUG_PCI_RPA=m +CONFIG_HOTPLUG_PCI_RPA_DLPAR=m +CONFIG_HP100=m +CONFIG_HPET=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_HPET_MMAP=y +CONFIG_HPET_MMAP_DEFAULT=y +CONFIG_HPET_TIMER=y +CONFIG_HPFS_FS=m +CONFIG_HPWDT_NMI_DECODING=y +CONFIG_HP_ACCEL=m +CONFIG_HP_ILO=m +CONFIG_HP_WATCHDOG=m +CONFIG_HP_WIRELESS=m +CONFIG_HP_WMI=m +CONFIG_HSI=m +CONFIG_HSI_BOARDINFO=y +CONFIG_HSI_CHAR=m +CONFIG_HSR=m +CONFIG_HTC_EGPIO=y +CONFIG_HTC_I2CPLD=y +CONFIG_HTC_PASIC3=m +CONFIG_HT_IRQ=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE_SIZE_VARIABLE=y +CONFIG_HVC_CONSOLE=y +# CONFIG_HVC_DCC is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_OPAL=y +CONFIG_HVC_RTAS=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set +CONFIG_HWMON_VID=m +CONFIG_HWPOISON_INJECT=m +# CONFIG_HWSPINLOCK_OMAP is not set +CONFIG_HW_CONSOLE=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_AMD=m +# CONFIG_HW_RANDOM_EXYNOS is not set +CONFIG_HW_RANDOM_GEODE=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_OMAP=m +CONFIG_HW_RANDOM_OMAP3_ROM=m +CONFIG_HW_RANDOM_PASEMI=y +CONFIG_HW_RANDOM_PSERIES=m +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_TPM=m +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HYPERV=m +CONFIG_HYPERVISOR_GUEST=y +CONFIG_HYPERV_BALLOON=m +CONFIG_HYPERV_KEYBOARD=m +CONFIG_HYPERV_NET=m +CONFIG_HYPERV_STORAGE=m +CONFIG_HYPERV_UTILS=m +CONFIG_HYSDN=m +CONFIG_HYSDN_CAPI=y +# CONFIG_HZ_200 is not set +# CONFIG_HZ_300 is not set +# CONFIG_HZ_500 is not set +CONFIG_HZ_FIXED=0 +# CONFIG_HZ_PERIODIC is not set +CONFIG_I2C=y +CONFIG_I2C_ALGOPCA=m +CONFIG_I2C_ALI1535=m +CONFIG_I2C_ALI1563=m +CONFIG_I2C_ALI15X3=m +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CBUS_GPIO=m +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CPM=m +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CORE is not set +CONFIG_I2C_DESIGNWARE_CORE=m +CONFIG_I2C_DESIGNWARE_PCI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_EG20T=m +CONFIG_I2C_GPIO=m +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_HID=m +CONFIG_I2C_HYDRA=m +CONFIG_I2C_I801=m +CONFIG_I2C_IMX=y +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_KEMPLD=m +CONFIG_I2C_MPC=m +CONFIG_I2C_MUX=m +CONFIG_I2C_MUX_GPIO=m +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +CONFIG_I2C_NOMADIK=m +CONFIG_I2C_OCORES=m +CONFIG_I2C_OMAP=y +CONFIG_I2C_PARPORT=m +CONFIG_I2C_PARPORT_LIGHT=m +CONFIG_I2C_PASEMI=m +CONFIG_I2C_PCA_ISA=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_POWERMAC=y +# CONFIG_I2C_PXA_PCI is not set +CONFIG_I2C_RCAR=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_SH_MOBILE=m +CONFIG_I2C_SI470X=m +CONFIG_I2C_SI4713=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_SIS5595=m +CONFIG_I2C_SIS630=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_SMBUS=m +CONFIG_I2C_STUB=m +CONFIG_I2C_TAOS_EVM=m +CONFIG_I2C_TEGRA=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_XILINX=m +CONFIG_I2O=m +CONFIG_I2O_BLOCK=m +CONFIG_I2O_BUS=m +CONFIG_I2O_CONFIG=m +# CONFIG_I2O_CONFIG_OLD_IOCTL is not set +CONFIG_I2O_EXT_ADAPTEC=y +CONFIG_I2O_EXT_ADAPTEC_DMA64=y +CONFIG_I2O_LCT_NOTIFY_ON_CHANGES=y +CONFIG_I2O_PROC=m +CONFIG_I2O_SCSI=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_I40E_DCB=y +CONFIG_I40E_VXLAN=y +CONFIG_I6300ESB_WDT=m +CONFIG_I7300_IDLE=m +CONFIG_I7300_IDLE_IOAT_CHANNEL=y +CONFIG_I82092=m +CONFIG_I82365=m +CONFIG_I8253_LOCK=y +CONFIG_I8K=m +# CONFIG_IA32_AOUT is not set +CONFIG_IA32_EMULATION=y +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +CONFIG_IBMVIO=y +CONFIG_IBM_ASM=m +CONFIG_IBM_BSR=m +# CONFIG_IBM_EMAC_EMAC4 is not set +# CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_EMAC_MAL_COMMON_ERR is not set +# CONFIG_IBM_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_EMAC_RGMII is not set +# CONFIG_IBM_EMAC_TAH is not set +# CONFIG_IBM_EMAC_ZMII is not set +CONFIG_IBM_RTL=m +CONFIG_ICPLUS_PHY=y +CONFIG_ICS932S401=m +CONFIG_ICST=y +CONFIG_IDEAPAD_LAPTOP=m +CONFIG_IDEPCI_PCIBUS_ORDER=y +CONFIG_IDE_ATAPI=y +CONFIG_IDE_GD=y +CONFIG_IDE_GD_ATA=y +# CONFIG_IDE_GD_ATAPI is not set +CONFIG_IDE_PHISON=m +CONFIG_IDE_PROC_FS=y +# CONFIG_IDE_TASK_IOCTL is not set +CONFIG_IDE_TIMINGS=y +CONFIG_IDE_XFER_MODE=y +CONFIG_IE6XX_WDT=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_IEEE802154_AT86RF230=m +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKEHARD is not set +CONFIG_IEEE802154_FAKELB=m +CONFIG_IEEE802154_MRF24J40=m +CONFIG_IFB=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IGB_DCA=y +CONFIG_IGB_HWMON=y +CONFIG_IIO=m +CONFIG_IIO_ADIS_LIB=m +CONFIG_IIO_ADIS_LIB_BUFFER=y +CONFIG_IIO_BUFFER=y +CONFIG_IIO_BUFFER_CB=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +CONFIG_IIO_INTERRUPT_TRIGGER=m +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_PERIODIC_RTC_TRIGGER=m +CONFIG_IIO_SIMPLE_DUMMY=m +# CONFIG_IIO_SIMPLE_DUMMY_BUFFER is not set +# CONFIG_IIO_SIMPLE_DUMMY_EVENTS is not set +CONFIG_IIO_ST_ACCEL_3AXIS=m +CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m +CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m +CONFIG_IIO_ST_GYRO_3AXIS=m +CONFIG_IIO_ST_GYRO_I2C_3AXIS=m +CONFIG_IIO_ST_GYRO_SPI_3AXIS=m +CONFIG_IIO_ST_MAGN_3AXIS=m +CONFIG_IIO_ST_MAGN_I2C_3AXIS=m +CONFIG_IIO_ST_MAGN_SPI_3AXIS=m +CONFIG_IIO_ST_PRESS=m +CONFIG_IIO_ST_PRESS_I2C=m +CONFIG_IIO_ST_PRESS_SPI=m +CONFIG_IIO_ST_SENSORS_CORE=m +CONFIG_IIO_ST_SENSORS_I2C=m +CONFIG_IIO_ST_SENSORS_SPI=m +CONFIG_IIO_SYSFS_TRIGGER=m +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_TRIGGERED_BUFFER=m +CONFIG_IKCONFIG_PROC=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_DEFAULT_HASH="sha1" +CONFIG_IMA_DEFAULT_HASH_SHA1=y +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_WP512 is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +CONFIG_IMA_LSM_RULES=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +# CONFIG_IMA_TEMPLATE is not set +# CONFIG_IMX2_WDT is not set +# CONFIG_IMX_DMA is not set +# CONFIG_IMX_SDMA is not set +CONFIG_IMX_THERMAL=m +CONFIG_IMX_WEIM=y +CONFIG_INET=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_INET6_TUNNEL=m +CONFIG_INET6_XFRM_MODE_BEET=m +CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m +CONFIG_INET6_XFRM_MODE_TRANSPORT=m +CONFIG_INET6_XFRM_MODE_TUNNEL=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET_AH=m +CONFIG_INET_DCCP_DIAG=m +CONFIG_INET_DIAG=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_LRO=y +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_XFRM_MODE_BEET=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_ADDR_TRANS=y +# CONFIG_INFINIBAND_AMSO1100_DEBUG is not set +CONFIG_INFINIBAND_CXGB3=m +# CONFIG_INFINIBAND_CXGB3_DEBUG is not set +CONFIG_INFINIBAND_CXGB4=m +CONFIG_INFINIBAND_EHCA=m +CONFIG_INFINIBAND_IPATH=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +# CONFIG_INFINIBAND_IPOIB_DEBUG is not set +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_NES=m +# CONFIG_INFINIBAND_NES_DEBUG is not set +CONFIG_INFINIBAND_OCRDMA=m +CONFIG_INFINIBAND_QIB=m +CONFIG_INFINIBAND_QIB_DCA=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFTL=m +CONFIG_INITRAMFS_SOURCE="" +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INOTIFY_USER=y +CONFIG_INPUT=y +CONFIG_INPUT_88PM80X_ONKEY=m +CONFIG_INPUT_88PM860X_ONKEY=m +CONFIG_INPUT_AD714X=m +CONFIG_INPUT_AD714X_I2C=m +CONFIG_INPUT_AD714X_SPI=m +CONFIG_INPUT_ADBHID=y +CONFIG_INPUT_ADXL34X=m +CONFIG_INPUT_ADXL34X_I2C=m +CONFIG_INPUT_ADXL34X_SPI=m +CONFIG_INPUT_APANEL=m +CONFIG_INPUT_APMPOWER=m +CONFIG_INPUT_ARIZONA_HAPTICS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_BMA150=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_CMA3000=m +CONFIG_INPUT_CMA3000_I2C=m +CONFIG_INPUT_DA9052_ONKEY=m +CONFIG_INPUT_DA9055_ONKEY=m +CONFIG_INPUT_EVBUG=m +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_GP2A=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_INPUT_GPIO_TILT_POLLED=m +CONFIG_INPUT_IMS_PCU=m +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_KXTJ9=m +# CONFIG_INPUT_KXTJ9_POLLED_MODE is not set +CONFIG_INPUT_MATRIXKMAP=m +CONFIG_INPUT_MAX8925_ONKEY=m +CONFIG_INPUT_MC13783_PWRBUTTON=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_MMA8450=m +CONFIG_INPUT_MOUSE=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_MPU3050=m +CONFIG_INPUT_PCAP=m +CONFIG_INPUT_PCF50633_PMU=m +CONFIG_INPUT_PCF8574=m +CONFIG_INPUT_PCSPKR=m +CONFIG_INPUT_POLLDEV=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_RETU_PWRBUTTON=m +CONFIG_INPUT_SPARSEKMAP=m +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_TWL4030_PWRBUTTON=m +CONFIG_INPUT_TWL4030_VIBRA=m +CONFIG_INPUT_TWL6040_VIBRA=m +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_WISTRON_BTNS=m +CONFIG_INPUT_WM831X_ON=m +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +CONFIG_INPUT_YEALINK=m +CONFIG_INSTRUCTION_DECODER=y +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEL_IDLE=y +CONFIG_INTEL_IOATDMA=m +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_INTEL_IPS=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +CONFIG_INTEL_MENLOW=m +CONFIG_INTEL_MFLD_THERMAL=m +CONFIG_INTEL_MIC_CARD=m +CONFIG_INTEL_MIC_HOST=m +CONFIG_INTEL_MID_DMAC=m +CONFIG_INTEL_MID_POWER_BUTTON=m +CONFIG_INTEL_MID_PTI=m +CONFIG_INTEL_OAKTRAIL=m +CONFIG_INTEL_POWERCLAMP=m +CONFIG_INTEL_RAPL=m +CONFIG_INTEL_RST=m +CONFIG_INTEL_SCU_IPC=y +CONFIG_INTEL_SCU_IPC_UTIL=m +CONFIG_INTEL_SCU_WATCHDOG=y +CONFIG_INTEL_SMARTCONNECT=m +CONFIG_INTEL_TXT=y +CONFIG_INTERVAL_TREE_TEST=m +CONFIG_INV_MPU6050_IIO=m +CONFIG_IOMMU_API=y +# CONFIG_IOMMU_DEBUG is not set +CONFIG_IOMMU_HELPER=y +# CONFIG_IOMMU_STRESS is not set +CONFIG_IOSCHED_CFQ=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_NOOP=y +# CONFIG_IO_DELAY_0X80 is not set +CONFIG_IO_DELAY_0XED=y +# CONFIG_IO_DELAY_NONE is not set +CONFIG_IO_DELAY_TYPE_0X80=0 +CONFIG_IO_DELAY_TYPE_0XED=1 +CONFIG_IO_DELAY_TYPE_NONE=3 +CONFIG_IO_DELAY_TYPE_UDELAY=2 +# CONFIG_IO_DELAY_UDELAY is not set +CONFIG_IO_EVENT_IRQ=y +CONFIG_IP1000=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IPACK_BUS=m +CONFIG_IPC_NS=y +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +# CONFIG_IPIC is not set +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_HANDLER=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_POWEROFF=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPPP_FILTER=y +CONFIG_IPV6=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_NDISC_NODETYPE=y +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPW2100=m +# CONFIG_IPW2100_DEBUG is not set +CONFIG_IPW2100_MONITOR=y +CONFIG_IPW2200=m +# CONFIG_IPW2200_DEBUG is not set +CONFIG_IPW2200_MONITOR=y +CONFIG_IPW2200_PROMISCUOUS=y +CONFIG_IPW2200_QOS=y +CONFIG_IPW2200_RADIOTAP=y +CONFIG_IPWIRELESS=m +CONFIG_IPX=m +# CONFIG_IPX_INTERN is not set +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_DCCP=m +# CONFIG_IP_DCCP_CCID2_DEBUG is not set +# CONFIG_IP_DCCP_CCID3 is not set +# CONFIG_IP_DCCP_DEBUG is not set +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MROUTE=y +# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set +CONFIG_IP_MULTICAST=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_BOOTP is not set +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_RARP is not set +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_SCTP=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_VS=m +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SH_TAB_BITS=8 +CONFIG_IP_VS_TAB_BITS=12 +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_WRR=m +CONFIG_IRCOMM=m +CONFIG_IRDA=m +CONFIG_IRDA_CACHE_LAST_LSAP=y +# CONFIG_IRDA_DEBUG is not set +CONFIG_IRDA_FAST_RR=y +CONFIG_IRDA_ULTRA=y +CONFIG_IRLAN=m +CONFIG_IRNET=m +CONFIG_IRQCHIP=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_REMAP=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_IRQ_WORK=y +CONFIG_IRTTY_SIR=m +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_GPIO_CIR=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_LIRC_CODEC=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC5_SZ_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_ISAPNP=y +CONFIG_ISA_DMA_API=y +CONFIG_ISCSI_BOOT_SYSFS=m +CONFIG_ISCSI_IBFT=m +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TCP=m +CONFIG_ISDN=y +CONFIG_ISDN_AUDIO=y +CONFIG_ISDN_CAPI=m +CONFIG_ISDN_CAPI_CAPI20=m +CONFIG_ISDN_CAPI_CAPIDRV=m +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_ISDN_DIVAS=m +CONFIG_ISDN_DIVAS_BRIPCI=y +CONFIG_ISDN_DIVAS_DIVACAPI=m +CONFIG_ISDN_DIVAS_MAINT=m +CONFIG_ISDN_DIVAS_PRIPCI=y +CONFIG_ISDN_DIVAS_USERIDI=m +CONFIG_ISDN_DIVERSION=m +CONFIG_ISDN_DRV_ACT2000=m +CONFIG_ISDN_DRV_AVMB1_AVM_CS=m +CONFIG_ISDN_DRV_AVMB1_B1ISA=m +CONFIG_ISDN_DRV_AVMB1_B1PCI=m +CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y +CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m +CONFIG_ISDN_DRV_AVMB1_C4=m +CONFIG_ISDN_DRV_AVMB1_T1ISA=m +CONFIG_ISDN_DRV_AVMB1_T1PCI=m +CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON=y +CONFIG_ISDN_DRV_GIGASET=m +CONFIG_ISDN_DRV_HISAX=m +CONFIG_ISDN_DRV_ICN=m +CONFIG_ISDN_DRV_PCBIT=m +CONFIG_ISDN_DRV_SC=m +CONFIG_ISDN_HDLC=m +CONFIG_ISDN_I4L=m +CONFIG_ISDN_MPP=y +CONFIG_ISDN_PPP=y +CONFIG_ISDN_PPP_BSDCOMP=m +CONFIG_ISDN_PPP_VJ=y +CONFIG_ISDN_TTY_FAX=y +CONFIG_ISDN_X25=y +CONFIG_ISI=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_ISO9660_FS=m +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_ITCO_WDT=m +CONFIG_ITG3200=m +CONFIG_IWL3945=m +CONFIG_IWL4965=m +CONFIG_IWLDVM=m +CONFIG_IWLEGACY=m +# CONFIG_IWLEGACY_DEBUG is not set +CONFIG_IWLEGACY_DEBUGFS=y +CONFIG_IWLMVM=m +CONFIG_IWLWIFI=m +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEBUGFS=y +CONFIG_IWLWIFI_DEVICE_TRACING=y +CONFIG_IWLWIFI_OPMODE_MODULAR=y +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_HWMON=y +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_JFFS2_CMODE_FAVOURLZO=y +# CONFIG_JFFS2_CMODE_NONE is not set +# CONFIG_JFFS2_CMODE_PRIORITY is not set +# CONFIG_JFFS2_CMODE_SIZE is not set +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_XATTR is not set +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_JFFS2_SUMMARY is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_JFS_DEBUG is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_JFS_STATISTICS=y +CONFIG_JME=m +CONFIG_JOLIET=y +CONFIG_JOYSTICK_A3D=m +CONFIG_JOYSTICK_ADI=m +CONFIG_JOYSTICK_ANALOG=m +CONFIG_JOYSTICK_AS5011=m +CONFIG_JOYSTICK_COBRA=m +CONFIG_JOYSTICK_DB9=m +CONFIG_JOYSTICK_GAMECON=m +CONFIG_JOYSTICK_GF2K=m +CONFIG_JOYSTICK_GRIP=m +CONFIG_JOYSTICK_GRIP_MP=m +CONFIG_JOYSTICK_GUILLEMOT=m +CONFIG_JOYSTICK_IFORCE=m +CONFIG_JOYSTICK_IFORCE_232=y +CONFIG_JOYSTICK_IFORCE_USB=y +CONFIG_JOYSTICK_INTERACT=m +CONFIG_JOYSTICK_JOYDUMP=m +CONFIG_JOYSTICK_MAGELLAN=m +CONFIG_JOYSTICK_SIDEWINDER=m +CONFIG_JOYSTICK_SPACEBALL=m +CONFIG_JOYSTICK_SPACEORB=m +CONFIG_JOYSTICK_STINGER=m +CONFIG_JOYSTICK_TMDC=m +CONFIG_JOYSTICK_TURBOGRAFX=m +CONFIG_JOYSTICK_TWIDJOY=m +CONFIG_JOYSTICK_WALKERA0701=m +CONFIG_JOYSTICK_WARRIOR=m +CONFIG_JOYSTICK_XPAD=m +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_JOYSTICK_ZHENHUA=m +CONFIG_JUMP_LABEL=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KARMA_PARTITION=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KEMPLD_WDT=m +# CONFIG_KERNEL_BZIP2 is not set +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_LZO is not set +CONFIG_KERNEL_MODE_NEON=y +# CONFIG_KERNEL_XZ is not set +CONFIG_KEXEC=y +CONFIG_KEXEC_JUMP=y +CONFIG_KEYBOARD_ADP5520=m +CONFIG_KEYBOARD_ADP5588=m +CONFIG_KEYBOARD_ADP5589=m +CONFIG_KEYBOARD_ATKBD=y +CONFIG_KEYBOARD_CROS_EC=m +CONFIG_KEYBOARD_GPIO=m +CONFIG_KEYBOARD_GPIO_POLLED=m +# CONFIG_KEYBOARD_IMX is not set +CONFIG_KEYBOARD_LKKBD=m +CONFIG_KEYBOARD_LM8323=m +CONFIG_KEYBOARD_LM8333=m +CONFIG_KEYBOARD_MATRIX=m +CONFIG_KEYBOARD_MAX7359=m +CONFIG_KEYBOARD_MCS=m +CONFIG_KEYBOARD_MPR121=m +CONFIG_KEYBOARD_NEWTON=m +CONFIG_KEYBOARD_NVEC=m +CONFIG_KEYBOARD_OMAP4=m +CONFIG_KEYBOARD_OPENCORES=m +CONFIG_KEYBOARD_QT1070=m +CONFIG_KEYBOARD_QT2160=m +CONFIG_KEYBOARD_SAMSUNG=m +CONFIG_KEYBOARD_SH_KEYSC=m +CONFIG_KEYBOARD_STMPE=m +CONFIG_KEYBOARD_STOWAWAY=m +CONFIG_KEYBOARD_SUNKBD=m +CONFIG_KEYBOARD_TC3589X=m +CONFIG_KEYBOARD_TCA6416=m +CONFIG_KEYBOARD_TCA8418=m +CONFIG_KEYBOARD_TEGRA=m +CONFIG_KEYBOARD_TWL4030=m +CONFIG_KEYBOARD_XTKBD=m +CONFIG_KEYS=y +CONFIG_KEYS_COMPAT=y +CONFIG_KEYS_DEBUG_PROC_KEYS=y +CONFIG_KGDB=y +CONFIG_KGDB_KDB=y +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_SERIAL_CONSOLE=y +# CONFIG_KGDB_TESTS is not set +CONFIG_KINGSUN_DONGLE=m +CONFIG_KPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +# CONFIG_KPROBES_SANITY_TEST is not set +CONFIG_KPROBE_EVENT=y +CONFIG_KRETPROBES=y +CONFIG_KS0108=m +CONFIG_KS0108_DELAY=2 +CONFIG_KS0108_PORT=0x378 +CONFIG_KS8842=m +CONFIG_KS8851=m +CONFIG_KS8851_MLL=m +CONFIG_KS959_DONGLE=m +CONFIG_KSDAZZLE_DONGLE=m +CONFIG_KSZ884X_PCI=m +CONFIG_KTIME_SCALAR=y +CONFIG_KUSER_HELPERS=y +CONFIG_KVM_AMD=m +CONFIG_KVM_APIC_ARCHITECTURE=y +CONFIG_KVM_ARM_HOST=y +CONFIG_KVM_ARM_MAX_VCPUS=4 +CONFIG_KVM_ARM_TIMER=y +CONFIG_KVM_ARM_VGIC=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_KVM_BOOK3S_64_HANDLER=y +# CONFIG_KVM_BOOK3S_64_HV is not set +CONFIG_KVM_BOOK3S_64_PR=m +CONFIG_KVM_BOOK3S_HANDLER=y +CONFIG_KVM_BOOK3S_PR_POSSIBLE=y +CONFIG_KVM_BOOKE_HV=y +CONFIG_KVM_DEBUG_FS=y +CONFIG_KVM_DEVICE_ASSIGNMENT=y +CONFIG_KVM_E500MC=y +# CONFIG_KVM_EXIT_TIMING is not set +CONFIG_KVM_GUEST=y +CONFIG_KVM_INTEL=m +CONFIG_KVM_MMIO=y +# CONFIG_KVM_MMU_AUDIT is not set +CONFIG_KVM_MPIC=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_XICS=y +CONFIG_KXSD9=m +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_ETH=m +CONFIG_L2TP_IP=m +CONFIG_L2TP_V3=y +CONFIG_LANCE=m +CONFIG_LANMEDIA=m +CONFIG_LAPB=m +CONFIG_LAPBETHER=m +CONFIG_LATENCYTOP=y +CONFIG_LATTICE_ECP3_CONFIG=m +CONFIG_LBDAF=y +CONFIG_LCD_AMS369FG06=m +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_HX8357=m +CONFIG_LCD_ILI922X=m +CONFIG_LCD_ILI9320=m +CONFIG_LCD_L4F00242T03=m +CONFIG_LCD_LD9040=m +CONFIG_LCD_LMS283GF05=m +CONFIG_LCD_LMS501KF03=m +CONFIG_LCD_LTV350QV=m +CONFIG_LCD_PLATFORM=m +CONFIG_LCD_S6E63M0=m +CONFIG_LCD_TDO24M=m +CONFIG_LCD_VGG2432A4=m +# CONFIG_LDM_DEBUG is not set +CONFIG_LDM_PARTITION=y +CONFIG_LEDS_88PM860X=m +CONFIG_LEDS_ADP5520=m +CONFIG_LEDS_ASIC3=y +CONFIG_LEDS_ATMEL_PWM=m +CONFIG_LEDS_BD2802=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLEVO_MAIL=m +CONFIG_LEDS_DA903X=m +CONFIG_LEDS_DA9052=m +CONFIG_LEDS_DAC124S085=m +CONFIG_LEDS_DELL_NETBOOKS=m +CONFIG_LEDS_GPIO=m +CONFIG_LEDS_INTEL_SS4200=m +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LM3533=m +CONFIG_LEDS_LM355x=m +CONFIG_LEDS_LM3642=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_LP5521=m +CONFIG_LEDS_LP5523=m +CONFIG_LEDS_LP5562=m +CONFIG_LEDS_LP55XX_COMMON=m +CONFIG_LEDS_LP8501=m +CONFIG_LEDS_LP8788=m +CONFIG_LEDS_LT3593=m +CONFIG_LEDS_MAX8997=m +CONFIG_LEDS_MC13783=m +CONFIG_LEDS_NET48XX=m +CONFIG_LEDS_OT200=m +CONFIG_LEDS_PCA9532=m +CONFIG_LEDS_PCA9532_GPIO=y +CONFIG_LEDS_PCA955X=m +CONFIG_LEDS_PCA963X=m +CONFIG_LEDS_PCA9685=m +CONFIG_LEDS_PWM=m +CONFIG_LEDS_REGULATOR=m +CONFIG_LEDS_TCA6507=m +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_GPIO=m +# CONFIG_LEDS_TRIGGER_IDE_DISK is not set +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_WM831X_STATUS=m +CONFIG_LEDS_WM8350=m +CONFIG_LEDS_WRAP=m +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=0 +# CONFIG_LGUEST is not set +# CONFIG_LGUEST_GUEST is not set +CONFIG_LIB80211=m +CONFIG_LIB80211_CRYPT_CCMP=m +CONFIG_LIB80211_CRYPT_TKIP=m +CONFIG_LIB80211_CRYPT_WEP=m +# CONFIG_LIB80211_DEBUG is not set +CONFIG_LIBCRC32C=m +CONFIG_LIBERTAS=m +CONFIG_LIBERTAS_CS=m +# CONFIG_LIBERTAS_DEBUG is not set +CONFIG_LIBERTAS_SDIO=m +CONFIG_LIBERTAS_SPI=m +CONFIG_LIBERTAS_THINFIRM=m +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set +CONFIG_LIBERTAS_THINFIRM_USB=m +CONFIG_LIBERTAS_USB=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_LIBFDT=y +CONFIG_LIBIPW=m +# CONFIG_LIBIPW_DEBUG is not set +CONFIG_LINE6_USB=m +# CONFIG_LINE6_USB_IMPULSE_RESPONSE is not set +CONFIG_LIRC=m +CONFIG_LIRC_BT829=m +CONFIG_LIRC_IGORPLUGUSB=m +CONFIG_LIRC_IMON=m +CONFIG_LIRC_PARALLEL=m +CONFIG_LIRC_SASEM=m +CONFIG_LIRC_SERIAL=m +CONFIG_LIRC_SERIAL_TRANSMITTER=y +CONFIG_LIRC_SIR=m +CONFIG_LIRC_STAGING=y +CONFIG_LIRC_ZILOG=m +CONFIG_LITELINK_DONGLE=m +# CONFIG_LKDTM is not set +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_LNET=m +CONFIG_LNET_MAX_PAYLOAD=1048576 +CONFIG_LNET_SELFTEST=m +CONFIG_LNET_XPRT_IB=m +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_LOCKD=m +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_LOCKD_V4=y +CONFIG_LOCKUP_DETECTOR=y +# CONFIG_LOCK_STAT is not set +# CONFIG_LOGFS is not set +CONFIG_LOGIG940_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIWHEELS_FF=y +# CONFIG_LOGO is not set +CONFIG_LOOPBACK_TARGET=m +CONFIG_LOWMEM_CAM_NUM=3 +CONFIG_LOWMEM_SIZE=0x30000000 +CONFIG_LP8788_ADC=y +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +# CONFIG_LP_CONSOLE is not set +CONFIG_LRU_CACHE=m +CONFIG_LSI_ET1011C_PHY=y +CONFIG_LSM_MMAP_MIN_ADDR=0 +CONFIG_LTE_GDM724X=m +CONFIG_LTPC=m +# CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK is not set +CONFIG_LUSTRE_FS=m +CONFIG_LUSTRE_LLITE_LLOOP=y +CONFIG_LUSTRE_OBD_MAX_IOCTL_BUFFER=8192 +CONFIG_LUSTRE_TRANSLATE_ERRNOS=y +CONFIG_LXT_PHY=y +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +# CONFIG_M486 is not set +# CONFIG_M586 is not set +# CONFIG_M586MMX is not set +# CONFIG_M586TSC is not set +CONFIG_M686=y +CONFIG_MA600_DONGLE=m +CONFIG_MAC80211=m +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_HWSIM=m +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_MESH=y +CONFIG_MAC80211_MESSAGE_TRACING=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +# CONFIG_MAC80211_RC_DEFAULT_PID is not set +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_MINSTREL_HT=y +CONFIG_MAC80211_RC_PID=y +CONFIG_MAC802154=m +CONFIG_MACB=m +CONFIG_MACE=m +# CONFIG_MACE_AAUI_PORT is not set +CONFIG_MACHZ_WDT=m +# CONFIG_MACH_CM_T35 is not set +CONFIG_MACH_CM_T3517=y +# CONFIG_MACH_CRANEBOARD is not set +# CONFIG_MACH_DEVKIT8000 is not set +# CONFIG_MACH_EUKREA_CPUIMX51SD is not set +# CONFIG_MACH_IMX51_DT is not set +CONFIG_MACH_KZM9D=y +# CONFIG_MACH_MX51_BABBAGE is not set +# CONFIG_MACH_NOKIA_RX51 is not set +# CONFIG_MACH_OMAP3517EVM is not set +CONFIG_MACH_OMAP3530_LV_SOM=y +CONFIG_MACH_OMAP3_BEAGLE=y +CONFIG_MACH_OMAP3_PANDORA=y +CONFIG_MACH_OMAP3_TORPEDO=y +CONFIG_MACH_OMAP_3430SDP=y +CONFIG_MACH_OMAP_GENERIC=y +CONFIG_MACH_OMAP_LDP=y +CONFIG_MACH_OVERO=y +CONFIG_MACH_SBC3530=y +CONFIG_MACH_TI8148EVM=y +CONFIG_MACH_TI8168EVM=y +CONFIG_MACH_TOUCHBOOK=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_MAC_EMUMOUSEBTN=m +CONFIG_MAC_FLOPPY=m +CONFIG_MAC_PARTITION=y +CONFIG_MAG3110=m +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAILBOX=y +CONFIG_MANTIS_CORE=m +CONFIG_MARVELL_PHY=y +# CONFIG_MATH_EMULATION_FULL is not set +CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y +# CONFIG_MATOM is not set +CONFIG_MAX1363=m +CONFIG_MAX517=m +CONFIG_MAX63XX_WATCHDOG=m +CONFIG_MAX8925_POWER=m +# CONFIG_MAXSMP is not set +CONFIG_MAX_RAW_DEVS=256 +# CONFIG_MCORE2 is not set +CONFIG_MCP2120_DONGLE=m +CONFIG_MCP320X=m +CONFIG_MCP3422=m +CONFIG_MCP4725=m +CONFIG_MCPM=y +# CONFIG_MCRUSOE is not set +CONFIG_MCS_FIR=m +# CONFIG_MCYRIXIII is not set +CONFIG_MD=y +CONFIG_MDA_CONSOLE=m +CONFIG_MDIO=m +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_BUS_MUX=y +CONFIG_MDIO_BUS_MUX_GPIO=y +CONFIG_MDIO_BUS_MUX_MMIOREG=y +CONFIG_MDIO_GPIO=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_FAULTY=m +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_ATTACH=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_COMMON_OPTIONS=y +# CONFIG_MEDIA_CONTROLLER is not set +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_PARPORT_SUPPORT=y +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_RC_SUPPORT=y +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_TUNER=m +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +CONFIG_MEDIA_USB_SUPPORT=y +# CONFIG_MEFFICEON is not set +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_SAS=m +# CONFIG_MELAN is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_MEMCG_SWAP=y +# CONFIG_MEMCG_SWAP_ENABLED is not set +CONFIG_MEMORY=y +CONFIG_MEMORY_FAILURE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_SPARSE=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_TIFM_MS=m +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MEMTEST=y +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_MEN_A21_WDT=m +CONFIG_MFD_88PM800=m +CONFIG_MFD_88PM805=m +CONFIG_MFD_88PM860X=y +CONFIG_MFD_AAT2870_CORE=y +CONFIG_MFD_ARIZONA=y +CONFIG_MFD_ARIZONA_I2C=m +CONFIG_MFD_ARIZONA_SPI=m +CONFIG_MFD_AS3711=y +CONFIG_MFD_AS3722=y +CONFIG_MFD_ASIC3=y +CONFIG_MFD_CORE=y +CONFIG_MFD_CROS_EC=m +CONFIG_MFD_CROS_EC_I2C=m +CONFIG_MFD_CROS_EC_SPI=m +CONFIG_MFD_CS5535=m +CONFIG_MFD_DA9052_I2C=y +CONFIG_MFD_DA9052_SPI=y +CONFIG_MFD_DA9055=y +CONFIG_MFD_DA9063=y +CONFIG_MFD_INTEL_MSIC=y +CONFIG_MFD_JANZ_CMODIO=m +CONFIG_MFD_KEMPLD=m +CONFIG_MFD_LM3533=m +CONFIG_MFD_LP8788=y +CONFIG_MFD_MAX77686=y +CONFIG_MFD_MAX77693=y +CONFIG_MFD_MAX8907=m +CONFIG_MFD_MAX8925=y +CONFIG_MFD_MAX8997=y +CONFIG_MFD_MAX8998=y +CONFIG_MFD_MC13783=m +CONFIG_MFD_MC13XXX=m +CONFIG_MFD_MC13XXX_I2C=m +CONFIG_MFD_MC13XXX_SPI=m +CONFIG_MFD_NVEC=m +CONFIG_MFD_OMAP_USB_HOST=y +CONFIG_MFD_PALMAS=y +CONFIG_MFD_PCF50633=m +CONFIG_MFD_RC5T583=y +CONFIG_MFD_RDC321X=m +CONFIG_MFD_RETU=m +CONFIG_MFD_RTSX_PCI=m +CONFIG_MFD_SEC_CORE=y +CONFIG_MFD_SI476X_CORE=m +CONFIG_MFD_SM501=m +# CONFIG_MFD_SM501_GPIO is not set +CONFIG_MFD_SMSC=y +CONFIG_MFD_STMPE=y +CONFIG_MFD_SYSCON=y +CONFIG_MFD_T7L66XB=y +CONFIG_MFD_TC3589X=y +CONFIG_MFD_TC6387XB=y +CONFIG_MFD_TC6393XB=y +CONFIG_MFD_TIMBERDALE=m +CONFIG_MFD_TPS65090=y +CONFIG_MFD_TPS65217=m +CONFIG_MFD_TPS6586X=y +CONFIG_MFD_TPS65910=y +CONFIG_MFD_TPS65912=y +CONFIG_MFD_TPS65912_I2C=y +CONFIG_MFD_TPS65912_SPI=y +CONFIG_MFD_TPS80031=y +CONFIG_MFD_TWL4030_AUDIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_VX855=m +CONFIG_MFD_WL1273_CORE=m +CONFIG_MFD_WM5102=y +CONFIG_MFD_WM5110=y +CONFIG_MFD_WM831X=y +CONFIG_MFD_WM831X_I2C=y +CONFIG_MFD_WM831X_SPI=y +CONFIG_MFD_WM8350=y +CONFIG_MFD_WM8350_I2C=y +CONFIG_MFD_WM8400=y +CONFIG_MFD_WM8994=y +CONFIG_MFD_WM8997=y +# CONFIG_MGEODEGX1 is not set +# CONFIG_MGEODE_LX is not set +CONFIG_MG_DISK=m +CONFIG_MG_DISK_RES=0 +CONFIG_MICREL_KS8995MA=m +CONFIG_MICREL_PHY=y +CONFIG_MICROCODE=y +CONFIG_MICROCODE_AMD=y +CONFIG_MICROCODE_AMD_EARLY=y +CONFIG_MICROCODE_EARLY=y +CONFIG_MICROCODE_INTEL=y +CONFIG_MICROCODE_INTEL_EARLY=y +CONFIG_MICROCODE_INTEL_LIB=y +CONFIG_MICROCODE_OLD_INTERFACE=y +CONFIG_MIGHT_HAVE_CACHE_L2X0=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_MIGRATION=y +CONFIG_MINIX_FS=m +CONFIG_MINIX_SUBPARTITION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MISDN=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_W6692=m +CONFIG_MIXCOMWD=m +# CONFIG_MK6 is not set +# CONFIG_MK7 is not set +# CONFIG_MK8 is not set +CONFIG_MKISS=m +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_EN_VXLAN=y +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_BLOCK_BOUNCE=y +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_MMC_CB710=m +# CONFIG_MMC_CLKGATE is not set +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_DW=m +CONFIG_MMC_DW_EXYNOS=m +# CONFIG_MMC_DW_IDMAC is not set +CONFIG_MMC_DW_PCI=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_SOCFPGA=m +# CONFIG_MMC_MXC is not set +CONFIG_MMC_OMAP=m +CONFIG_MMC_OMAP_HS=y +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER=y +CONFIG_MMC_SDHCI_ESDHC_IMX=y +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_OF_ESDHC=m +CONFIG_MMC_SDHCI_OF_HLWD=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_PXAV2=m +CONFIG_MMC_SDHCI_PXAV3=m +CONFIG_MMC_SDHCI_TEGRA=m +CONFIG_MMC_SDRICOH_CS=m +CONFIG_MMC_SPI=m +# CONFIG_MMC_TEST is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_TMIO=m +CONFIG_MMC_TMIO_CORE=m +# CONFIG_MMC_UNSAFE_RESUME is not set +CONFIG_MMC_USHC=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_WBSD=m +CONFIG_MMIOTRACE=y +# CONFIG_MMIOTRACE_TEST is not set +CONFIG_MMU=y +CONFIG_MMU_NOTIFIER=y +CONFIG_MM_OWNER=y +CONFIG_MODULES=y +CONFIG_MODULES_USE_ELF_REL=y +CONFIG_MODULES_USE_ELF_RELA=y +# CONFIG_MODULE_FORCE_LOAD is not set +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_HASH="sha512" +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +# CONFIG_MODULE_SIG_SHA256 is not set +# CONFIG_MODULE_SIG_SHA384 is not set +CONFIG_MODULE_SIG_SHA512=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_GPIO=m +# CONFIG_MOUSE_INPORT is not set +CONFIG_MOUSE_LOGIBM=m +CONFIG_MOUSE_PC110PAD=m +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_TOUCHKIT=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOVABLE_NODE=y +CONFIG_MOXA_INTELLIO=m +CONFIG_MOXA_SMARTIO=m +# CONFIG_MPENTIUM4 is not set +# CONFIG_MPENTIUMII is not set +# CONFIG_MPENTIUMIII is not set +# CONFIG_MPENTIUMM is not set +CONFIG_MPIC=y +CONFIG_MPIC_BROKEN_REGREAD=y +# CONFIG_MPIC_TIMER is not set +# CONFIG_MPIC_WEIRD is not set +CONFIG_MPILIB=y +# CONFIG_MPSC is not set +CONFIG_MRP=m +CONFIG_MSDOS_FS=m +CONFIG_MSDOS_PARTITION=y +# CONFIG_MSI_BITMAP_SELFTEST is not set +CONFIG_MSI_LAPTOP=m +CONFIG_MSI_WMI=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MS_BLOCK=m +CONFIG_MTDRAM_ERASE_SIZE=128 +CONFIG_MTDRAM_TOTAL_SIZE=4096 +CONFIG_MTD_ABSENT=m +CONFIG_MTD_AFS_PARTS=m +CONFIG_MTD_AMD76XROM=m +CONFIG_MTD_AR7_PARTS=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_BLOCK_RO=m +CONFIG_MTD_CFI=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +CONFIG_MTD_CK804XROM=m +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_DATAFLASH=m +CONFIG_MTD_DATAFLASH_OTP=y +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set +CONFIG_MTD_DOCG3=m +CONFIG_MTD_ESB2ROM=m +CONFIG_MTD_GEN_PROBE=m +CONFIG_MTD_GPIO_ADDR=m +CONFIG_MTD_ICHXROM=m +CONFIG_MTD_IMPA7=m +CONFIG_MTD_INTEL_VR_NOR=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_L440GX=m +CONFIG_MTD_LATCH_ADDR=m +CONFIG_MTD_LPDDR=m +CONFIG_MTD_M25P80=m +CONFIG_MTD_MAP_BANK_WIDTH_1=y +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +CONFIG_MTD_MAP_BANK_WIDTH_2=y +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +CONFIG_MTD_MTDRAM=m +CONFIG_MTD_NAND_CAFE=m +CONFIG_MTD_NAND_CS553X=m +CONFIG_MTD_NAND_DENALI=m +CONFIG_MTD_NAND_DENALI_DT=m +CONFIG_MTD_NAND_DENALI_PCI=m +CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018 +CONFIG_MTD_NAND_DISKONCHIP=m +# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set +CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0 +# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set +CONFIG_MTD_NAND_DOCG4=m +CONFIG_MTD_NAND_ECC_BCH=y +# CONFIG_MTD_NAND_ECC_SMC is not set +CONFIG_MTD_NAND_FSL_ELBC=m +CONFIG_MTD_NAND_FSL_IFC=m +CONFIG_MTD_NAND_FSL_UPM=m +CONFIG_MTD_NAND_GPIO=m +# CONFIG_MTD_NAND_MXC is not set +CONFIG_MTD_NAND_NANDSIM=m +CONFIG_MTD_NAND_OMAP2=y +CONFIG_MTD_NAND_OMAP_BCH=y +CONFIG_MTD_NAND_PASEMI=m +CONFIG_MTD_NAND_PLATFORM=m +CONFIG_MTD_NAND_RICOH=m +CONFIG_MTD_NAND_SOCRATES=m +CONFIG_MTD_NAND_TMIO=m +CONFIG_MTD_NETSC520=m +CONFIG_MTD_NETtel=m +CONFIG_MTD_ONENAND=m +CONFIG_MTD_ONENAND_2X_PROGRAM=y +CONFIG_MTD_ONENAND_GENERIC=m +CONFIG_MTD_ONENAND_OMAP2=m +# CONFIG_MTD_ONENAND_OTP is not set +CONFIG_MTD_ONENAND_VERIFY_WRITE=y +CONFIG_MTD_OOPS=m +CONFIG_MTD_PCI=m +CONFIG_MTD_PCMCIA=m +# CONFIG_MTD_PCMCIA_ANONYMOUS is not set +CONFIG_MTD_PHRAM=m +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=m +CONFIG_MTD_PLATRAM=m +CONFIG_MTD_PMC551=m +# CONFIG_MTD_PMC551_BUGFIX is not set +# CONFIG_MTD_PMC551_DEBUG is not set +CONFIG_MTD_QINFO_PROBE=m +CONFIG_MTD_RAM=m +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 +CONFIG_MTD_REDBOOT_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set +# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set +CONFIG_MTD_ROM=m +CONFIG_MTD_SBC_GXX=m +CONFIG_MTD_SC520CDP=m +CONFIG_MTD_SCB2_FLASH=m +CONFIG_MTD_SCx200_DOCFLASH=m +CONFIG_MTD_SLRAM=m +CONFIG_MTD_SPINAND_MT29F=m +CONFIG_MTD_SPINAND_ONDIEECC=y +CONFIG_MTD_SST25L=m +CONFIG_MTD_SWAP=m +# CONFIG_MTD_TESTS is not set +CONFIG_MTD_TS5500=m +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_BEB_LIMIT=20 +CONFIG_MTD_UBI_FASTMAP=y +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_MULTI_IRQ_HANDLER=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_MV643XX_ETH=m +# CONFIG_MVIAC3_2 is not set +# CONFIG_MVIAC7 is not set +CONFIG_MVMDIO=m +CONFIG_MWAVE=m +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWINCHIP3D is not set +# CONFIG_MWINCHIPC6 is not set +CONFIG_MWL8K=m +CONFIG_MX3_IPU=y +CONFIG_MX3_IPU_IRQS=4 +# CONFIG_MXC_DEBUG_BOARD is not set +# CONFIG_MXC_IRQ_PRIOR is not set +CONFIG_MXM_WMI=m +# CONFIG_MXS_DMA is not set +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +CONFIG_N2=m +CONFIG_NAMESPACES=y +CONFIG_NATIONAL_PHY=y +CONFIG_NATSEMI=m +CONFIG_NAU7802=m +CONFIG_NCPFS_EXTRAS=y +CONFIG_NCPFS_IOCTL_LOCKING=y +CONFIG_NCPFS_NFS_NS=y +CONFIG_NCPFS_NLS=y +CONFIG_NCPFS_OS2_NS=y +CONFIG_NCPFS_PACKET_SIGNING=y +# CONFIG_NCPFS_SMALLDOS is not set +CONFIG_NCPFS_STRONG=y +CONFIG_NCP_FS=m +CONFIG_NE2000=m +CONFIG_NE2K_PCI=m +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEON=y +CONFIG_NET=y +CONFIG_NET5501=y +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_QUEUE_CT=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NETFILTER_XTABLES=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETLABEL=y +CONFIG_NETLINK_DIAG=m +CONFIG_NETLINK_MMAP=y +CONFIG_NETPOLL=y +# CONFIG_NETPOLL_TRAP is not set +CONFIG_NETPRIO_CGROUP=m +CONFIG_NETROM=m +CONFIG_NETWORK_FILESYSTEMS=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETWORK_SECMARK=y +CONFIG_NETXEN_NIC=m +CONFIG_NET_9P=m +# CONFIG_NET_9P_DEBUG is not set +CONFIG_NET_9P_RDMA=m +CONFIG_NET_9P_VIRTIO=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_CADENCE=y +CONFIG_NET_CALXEDA_XGMAC=m +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_FW=m +# CONFIG_NET_CLS_IND is not set +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CORE=y +CONFIG_NET_DCCPPROBE=m +# CONFIG_NET_DROP_MONITOR is not set +CONFIG_NET_DSA=m +CONFIG_NET_DSA_MV88E6060=m +CONFIG_NET_DSA_MV88E6123_61_65=m +CONFIG_NET_DSA_MV88E6131=m +CONFIG_NET_DSA_MV88E6XXX=m +CONFIG_NET_DSA_MV88E6XXX_NEED_PPU=y +CONFIG_NET_DSA_TAG_DSA=y +CONFIG_NET_DSA_TAG_EDSA=y +CONFIG_NET_DSA_TAG_TRAILER=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CANID=m +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_FC=y +CONFIG_NET_FLOW_LIMIT=y +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPIP=m +CONFIG_NET_IPVTI=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_KEY=m +# CONFIG_NET_KEY_MIGRATE is not set +CONFIG_NET_MPLS_GSO=m +CONFIG_NET_NS=y +CONFIG_NET_PACKET_ENGINE=y +CONFIG_NET_PKTGEN=m +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_NET_SB1000=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_ATM=m +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_FIFO=y +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCTPPROBE=m +CONFIG_NET_TCPPROBE=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_VENDOR_3COM=y +CONFIG_NET_VENDOR_8390=y +CONFIG_NET_VENDOR_ADAPTEC=y +CONFIG_NET_VENDOR_ALTEON=y +CONFIG_NET_VENDOR_AMD=y +CONFIG_NET_VENDOR_APPLE=y +CONFIG_NET_VENDOR_ARC=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_NET_VENDOR_BROADCOM=y +CONFIG_NET_VENDOR_BROCADE=y +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_NET_VENDOR_CIRRUS=y +CONFIG_NET_VENDOR_CISCO=y +CONFIG_NET_VENDOR_DEC=y +CONFIG_NET_VENDOR_DLINK=y +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_NET_VENDOR_EXAR=y +CONFIG_NET_VENDOR_FARADAY=y +CONFIG_NET_VENDOR_FREESCALE=y +CONFIG_NET_VENDOR_FUJITSU=y +CONFIG_NET_VENDOR_HP=y +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_IBM=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_NET_VENDOR_MARVELL=y +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_NET_VENDOR_MICREL=y +CONFIG_NET_VENDOR_MICROCHIP=y +CONFIG_NET_VENDOR_MYRI=y +CONFIG_NET_VENDOR_NATSEMI=y +CONFIG_NET_VENDOR_NVIDIA=y +CONFIG_NET_VENDOR_OKI=y +CONFIG_NET_VENDOR_PASEMI=y +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_NET_VENDOR_RDC=y +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_NET_VENDOR_SEEQ=y +CONFIG_NET_VENDOR_SILAN=y +CONFIG_NET_VENDOR_SILICOM=y +CONFIG_NET_VENDOR_SIS=y +CONFIG_NET_VENDOR_SMSC=y +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_NET_VENDOR_SUN=y +CONFIG_NET_VENDOR_TEHUTI=y +CONFIG_NET_VENDOR_TI=y +CONFIG_NET_VENDOR_TOSHIBA=y +CONFIG_NET_VENDOR_VIA=y +CONFIG_NET_VENDOR_WIZNET=y +CONFIG_NET_VENDOR_XILINX=y +CONFIG_NET_VENDOR_XIRCOM=y +CONFIG_NET_XGENE=m +CONFIG_NEW_LEDS=y +CONFIG_NFC=m +CONFIG_NFC_DIGITAL=m +CONFIG_NFC_HCI=m +CONFIG_NFC_MEI_PHY=m +CONFIG_NFC_MICROREAD=m +CONFIG_NFC_MICROREAD_I2C=m +CONFIG_NFC_MICROREAD_MEI=m +CONFIG_NFC_NCI=m +CONFIG_NFC_NCI_SPI=y +CONFIG_NFC_PN533=m +CONFIG_NFC_PN544=m +CONFIG_NFC_PN544_I2C=m +CONFIG_NFC_PN544_MEI=m +CONFIG_NFC_PORT100=m +CONFIG_NFC_SHDLC=y +CONFIG_NFC_SIM=m +CONFIG_NFC_WILINK=m +CONFIG_NFSD=m +# CONFIG_NFSD_FAULT_INJECTION is not set +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_FS=m +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_USE_KERNEL_DNS=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_V2=m +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFTL=m +CONFIG_NFTL_RW=y +CONFIG_NFT_CHAIN_NAT_IPV4=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CT=m +CONFIG_NFT_EXTHDR=m +CONFIG_NFT_HASH=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_META=m +CONFIG_NFT_NAT=m +CONFIG_NFT_RBTREE=m +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_PPTP=m +# CONFIG_NF_CONNTRACK_PROCFS is not set +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_GRE=m +CONFIG_NF_CT_PROTO_SCTP=m +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_H323=m +CONFIG_NF_NAT_IPV4=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_PROTO_DCCP=m +CONFIG_NF_NAT_PROTO_GRE=m +CONFIG_NF_NAT_PROTO_SCTP=m +CONFIG_NF_NAT_PROTO_UDPLITE=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NI65=m +CONFIG_NILFS2_FS=m +CONFIG_NIU=m +# CONFIG_NL80211_TESTMODE is not set +CONFIG_NLATTR=y +CONFIG_NLMON=m +CONFIG_NLS=y +CONFIG_NLS_ASCII=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NODES_SPAN_OTHER_NODES=y +# CONFIG_NOHIGHMEM is not set +CONFIG_NOP_TRACER=y +CONFIG_NORTEL_HERMES=m +CONFIG_NOTIFIER_ERROR_INJECTION=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_NOZOMI=m +CONFIG_NO_BOOTMEM=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_NO_HZ_FULL_ALL is not set +CONFIG_NO_HZ_FULL_SYSIDLE=y +CONFIG_NO_HZ_FULL_SYSIDLE_SMALL=8 +CONFIG_NO_IOPORT=y +CONFIG_NR_IRQS=512 +CONFIG_NS83820=m +CONFIG_NSC_FIR=m +CONFIG_NSC_GPIO=m +CONFIG_NTB=m +CONFIG_NTB_NETDEV=m +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_FS=m +# CONFIG_NTFS_RW is not set +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +# CONFIG_NUMA_EMU is not set +CONFIG_NVEC_PAZ00=m +CONFIG_NVEC_POWER=m +CONFIG_NV_TCO=m +# CONFIG_N_GSM is not set +CONFIG_N_HDLC=m +# CONFIG_OABI_COMPAT is not set +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_OCFS2_DEBUG_MASKLOG=y +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OC_ETM=y +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_EXTCON=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IOMMU=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_MTD=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +# CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT is not set +# CONFIG_OF_SELFTEST is not set +CONFIG_OID_REGISTRY=y +CONFIG_OLD_BELKIN_DONGLE=m +CONFIG_OLD_SIGACTION=y +CONFIG_OLD_SIGSUSPEND=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_OMAP2PLUS_MBOX=m +CONFIG_OMAP2_DSS=y +# CONFIG_OMAP2_DSS_DEBUG is not set +# CONFIG_OMAP2_DSS_DEBUGFS is not set +CONFIG_OMAP2_DSS_DPI=y +# CONFIG_OMAP2_DSS_DSI is not set +CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0 +CONFIG_OMAP2_DSS_SDI=y +CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET=y +CONFIG_OMAP2_DSS_VENC=y +CONFIG_OMAP2_VRFB=y +CONFIG_OMAP3_EMU=y +# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set +# CONFIG_OMAP3_SDRC_AC_TIMING is not set +CONFIG_OMAP4_DSS_HDMI=y +CONFIG_OMAP4_DSS_HDMI_AUDIO=y +CONFIG_OMAP4_THERMAL=y +CONFIG_OMAP_32K_TIMER=y +CONFIG_OMAP_CONTROL_USB=y +CONFIG_OMAP_DM_TIMER=y +CONFIG_OMAP_INTERCONNECT=y +CONFIG_OMAP_IOMMU=y +# CONFIG_OMAP_IOVMM is not set +CONFIG_OMAP_MBOX=m +CONFIG_OMAP_MBOX_KFIFO_SIZE=256 +CONFIG_OMAP_MUX=y +# CONFIG_OMAP_MUX_DEBUG is not set +CONFIG_OMAP_MUX_WARNINGS=y +CONFIG_OMAP_OCP2SCP=m +CONFIG_OMAP_PACKAGE_CBB=y +CONFIG_OMAP_PACKAGE_CUS=y +CONFIG_OMAP_PM_NOOP=y +CONFIG_OMAP_REMOTEPROC=m +CONFIG_OMAP_RESET_CLOCKS=y +CONFIG_OMAP_USB2=y +CONFIG_OMAP_USB3=m +CONFIG_OMAP_WATCHDOG=m +CONFIG_OMFS_FS=m +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=y +CONFIG_OPENVSWITCH_VXLAN=y +CONFIG_OPROFILE=m +# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set +CONFIG_OPROFILE_NMI_TIMER=y +CONFIG_OPTIMIZE_INLINING=y +CONFIG_OPTPROBES=y +CONFIG_ORE=m +CONFIG_ORINOCO_USB=m +CONFIG_OSF_PARTITION=y +CONFIG_OUTER_CACHE=y +CONFIG_OUTER_CACHE_SYNC=y +CONFIG_OVERLAYFS_FS=m +CONFIG_P54_COMMON=m +CONFIG_P54_LEDS=y +CONFIG_P54_PCI=m +CONFIG_P54_SPI=m +# CONFIG_P54_SPI_DEFAULT_EEPROM is not set +CONFIG_P54_USB=m +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PADATA=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_PANASONIC_LAPTOP=m +CONFIG_PANEL=m +# CONFIG_PANEL_CHANGE_MESSAGE is not set +CONFIG_PANEL_PARPORT=0 +CONFIG_PANEL_PROFILE=5 +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANTHERLORD_FF=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set +CONFIG_PARIDE=m +CONFIG_PARIDE_ATEN=m +CONFIG_PARIDE_BPCK=m +CONFIG_PARIDE_BPCK6=m +CONFIG_PARIDE_COMM=m +CONFIG_PARIDE_DSTR=m +CONFIG_PARIDE_EPAT=m +CONFIG_PARIDE_EPIA=m +CONFIG_PARIDE_FIT2=m +CONFIG_PARIDE_FIT3=m +CONFIG_PARIDE_FRIQ=m +CONFIG_PARIDE_FRPW=m +CONFIG_PARIDE_KBIC=m +CONFIG_PARIDE_KTTI=m +CONFIG_PARIDE_ON20=m +CONFIG_PARIDE_ON26=m +CONFIG_PARIDE_PCD=m +CONFIG_PARIDE_PD=m +CONFIG_PARIDE_PF=m +CONFIG_PARIDE_PG=m +CONFIG_PARIDE_PT=m +CONFIG_PARPORT=m +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_AX88796=m +# CONFIG_PARPORT_GSC is not set +CONFIG_PARPORT_NOT_PC=y +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_PC_PCMCIA=m +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_SERIAL=m +CONFIG_PARTITION_ADVANCED=y +# CONFIG_PASEMI_MAC is not set +CONFIG_PATA_ACPI=m +CONFIG_PATA_ALI=m +CONFIG_PATA_AMD=m +CONFIG_PATA_ARASAN_CF=m +CONFIG_PATA_ARTOP=m +CONFIG_PATA_ATIIXP=m +CONFIG_PATA_ATP867X=m +CONFIG_PATA_CMD640_PCI=m +CONFIG_PATA_CMD64X=m +CONFIG_PATA_CS5520=m +CONFIG_PATA_CS5530=m +CONFIG_PATA_CS5535=m +CONFIG_PATA_CS5536=m +CONFIG_PATA_CYPRESS=m +CONFIG_PATA_EFAR=m +CONFIG_PATA_HPT366=m +CONFIG_PATA_HPT37X=m +CONFIG_PATA_HPT3X2N=m +CONFIG_PATA_HPT3X3=m +# CONFIG_PATA_HPT3X3_DMA is not set +# CONFIG_PATA_IMX is not set +CONFIG_PATA_ISAPNP=m +CONFIG_PATA_IT8213=m +CONFIG_PATA_IT821X=m +CONFIG_PATA_JMICRON=m +CONFIG_PATA_LEGACY=m +CONFIG_PATA_MACIO=y +CONFIG_PATA_MARVELL=m +CONFIG_PATA_MPIIX=m +CONFIG_PATA_NETCELL=m +CONFIG_PATA_NINJA32=m +CONFIG_PATA_NS87410=m +CONFIG_PATA_NS87415=m +CONFIG_PATA_OF_PLATFORM=m +CONFIG_PATA_OLDPIIX=m +CONFIG_PATA_OPTI=m +CONFIG_PATA_OPTIDMA=m +CONFIG_PATA_PCMCIA=m +CONFIG_PATA_PDC2027X=m +CONFIG_PATA_PDC_OLD=m +CONFIG_PATA_PLATFORM=m +CONFIG_PATA_QDI=m +CONFIG_PATA_RADISYS=m +CONFIG_PATA_RDC=m +CONFIG_PATA_RZ1000=m +CONFIG_PATA_SC1200=m +CONFIG_PATA_SCH=m +CONFIG_PATA_SERVERWORKS=m +CONFIG_PATA_SIL680=m +CONFIG_PATA_TOSHIBA=m +CONFIG_PATA_TRIFLEX=m +CONFIG_PATA_VIA=m +CONFIG_PATA_WINBOND=m +CONFIG_PATA_WINBOND_VLB=m +CONFIG_PC300TOO=m +CONFIG_PC8736x_GPIO=m +CONFIG_PC87413_WDT=m +CONFIG_PCCARD_NONSTATIC=y +CONFIG_PCF50633_ADC=m +CONFIG_PCF50633_GPIO=m +CONFIG_PCH_CAN=m +CONFIG_PCH_DMA=m +CONFIG_PCH_GBE=m +CONFIG_PCH_PHUB=m +CONFIG_PCI=y +CONFIG_PCI200SYN=m +CONFIG_PCIEAER=y +# CONFIG_PCIEAER_INJECT is not set +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEBUG=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_PERFORMANCE is not set +# CONFIG_PCIEASPM_POWERSAVE is not set +CONFIG_PCIE_DW=y +# CONFIG_PCIE_ECRC is not set +CONFIG_PCIE_PME=y +CONFIG_PCIPCWATCHDOG=m +CONFIG_PCI_ATMEL=m +CONFIG_PCI_ATS=y +CONFIG_PCI_BIOS=y +# CONFIG_PCI_CNB20LE_QUIRK is not set +# CONFIG_PCI_DEBUG is not set +CONFIG_PCI_DIRECT=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_GOANY=y +# CONFIG_PCI_GOBIOS is not set +# CONFIG_PCI_GODIRECT is not set +# CONFIG_PCI_GOMMCONFIG is not set +CONFIG_PCI_IMX6=y +CONFIG_PCI_IOAPIC=y +CONFIG_PCI_IOV=y +CONFIG_PCI_LABEL=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_MSI=y +CONFIG_PCI_PASID=y +CONFIG_PCI_PRI=y +CONFIG_PCI_QUIRKS=y +CONFIG_PCI_REALLOC_ENABLE_AUTO=y +CONFIG_PCI_STUB=m +CONFIG_PCI_TEGRA=y +CONFIG_PCI_XEN=y +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +CONFIG_PCMCIA=m +CONFIG_PCMCIA_3C574=m +CONFIG_PCMCIA_3C589=m +CONFIG_PCMCIA_AHA152X=m +CONFIG_PCMCIA_ATMEL=m +CONFIG_PCMCIA_AXNET=m +CONFIG_PCMCIA_FDOMAIN=m +CONFIG_PCMCIA_FMVJ18X=m +CONFIG_PCMCIA_HERMES=m +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_PCMCIA_NINJA_SCSI=m +CONFIG_PCMCIA_NMCLAN=m +CONFIG_PCMCIA_PCNET=m +CONFIG_PCMCIA_PROBE=y +CONFIG_PCMCIA_QLOGIC=m +CONFIG_PCMCIA_RAYCS=m +CONFIG_PCMCIA_SMC91C92=m +CONFIG_PCMCIA_SPECTRUM=m +CONFIG_PCMCIA_SYM53C500=m +CONFIG_PCMCIA_WL3501=m +CONFIG_PCMCIA_XIRC2PS=m +CONFIG_PCMCIA_XIRCOM=m +CONFIG_PCNET32=m +CONFIG_PCSPKR_PLATFORM=y +CONFIG_PCWATCHDOG=m +CONFIG_PD6729=m +CONFIG_PDA_POWER=m +CONFIG_PDC_ADMA=m +CONFIG_PERCPU_RWSEM=y +CONFIG_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PHANTOM=m +CONFIG_PHONET=m +CONFIG_PHYLIB=y +CONFIG_PHYS_64BIT=y +# CONFIG_PHY_EXYNOS_MIPI_VIDEO is not set +CONFIG_PHY_XGENE=y +# CONFIG_PID_IN_CONTEXTIDR is not set +CONFIG_PID_NS=y +CONFIG_PINCONF=y +CONFIG_PINCTRL=y +CONFIG_PINCTRL_AS3722=y +CONFIG_PINCTRL_BAYTRAIL=y +CONFIG_PINCTRL_IMX=y +CONFIG_PINCTRL_IMX6Q=y +CONFIG_PINCTRL_IMX6SL=y +CONFIG_PINCTRL_PALMAS=y +CONFIG_PINCTRL_SINGLE=y +CONFIG_PINCTRL_TEGRA=y +CONFIG_PINCTRL_TEGRA114=y +CONFIG_PINCTRL_TEGRA20=y +CONFIG_PINCTRL_TEGRA30=y +CONFIG_PINCTRL_VF610=y +CONFIG_PINMUX=y +CONFIG_PL310_ERRATA_588369=y +CONFIG_PL310_ERRATA_727915=y +CONFIG_PL310_ERRATA_753970=y +CONFIG_PL310_ERRATA_769419=y +CONFIG_PL320_MBOX=y +CONFIG_PL330_DMA=m +# CONFIG_PLAT_SPEAR is not set +CONFIG_PLAT_VERSATILE=y +CONFIG_PLAT_VERSATILE_CLCD=y +CONFIG_PLAT_VERSATILE_SCHED_CLOCK=y +CONFIG_PLIP=m +CONFIG_PLX_HERMES=m +CONFIG_PM=y +CONFIG_PMAC_APM_EMU=m +CONFIG_PMAC_BACKLIGHT=y +CONFIG_PMAC_BACKLIGHT_LEGACY=y +CONFIG_PMAC_MEDIABAY=y +CONFIG_PMAC_RACKMETER=m +CONFIG_PMAC_SMU=y +CONFIG_PMBUS=m +CONFIG_PMIC_DA9052=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_AUTOSLEEP is not set +CONFIG_PM_CLK=y +CONFIG_PM_DEVFREQ=y +CONFIG_PM_NOTIFIER_ERROR_INJECT=m +CONFIG_PM_OPP=y +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_PM_STD_PARTITION="" +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_PM_WAKELOCKS_GC=y +CONFIG_PM_WAKELOCKS_LIMIT=100 +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_OBJLAYOUT=m +CONFIG_PNP=y +CONFIG_PNPACPI=y +CONFIG_PNPBIOS=y +CONFIG_PNPBIOS_PROC_FS=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POWER3=y +CONFIG_POWER4=y +# CONFIG_POWER4_CPU is not set +# CONFIG_POWER5_CPU is not set +# CONFIG_POWER6_CPU is not set +CONFIG_POWERCAP=y +CONFIG_POWERNV_CPUFREQ=y +CONFIG_POWERNV_MSI=y +CONFIG_POWER_AVS=y +CONFIG_POWER_AVS_OMAP=y +CONFIG_POWER_AVS_OMAP_CLASS3=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_RESTART=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_POWER_RESET_VEXPRESS=y +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_PPA8548=y +CONFIG_PPC=y +CONFIG_PPC32=y +# CONFIG_PPC601_SYNC_FIX is not set +# CONFIG_PPC_82xx is not set +# CONFIG_PPC_83xx is not set +# CONFIG_PPC_86xx is not set +# CONFIG_PPC_8xx is not set +CONFIG_PPC_A2=y +CONFIG_PPC_ADV_DEBUG_DACS=2 +CONFIG_PPC_ADV_DEBUG_DVCS=0 +CONFIG_PPC_ADV_DEBUG_IACS=2 +CONFIG_PPC_ADV_DEBUG_REGS=y +CONFIG_PPC_BOOK3E=y +CONFIG_PPC_BOOK3E_MMU=y +CONFIG_PPC_BOOK3S=y +# CONFIG_PPC_CELLEB is not set +# CONFIG_PPC_CELL_NATIVE is not set +# CONFIG_PPC_CELL_QPACE is not set +CONFIG_PPC_CHROMA=y +CONFIG_PPC_CHRP=y +# CONFIG_PPC_CLOCK is not set +CONFIG_PPC_CORENET_CPUFREQ=m +# CONFIG_PPC_DCR_MMIO is not set +# CONFIG_PPC_DCR_NATIVE is not set +CONFIG_PPC_E500MC=y +# CONFIG_PPC_EARLY_DEBUG is not set +# CONFIG_PPC_EARLY_DEBUG_EHV_BC is not set +# CONFIG_PPC_EMULATED_STATS is not set +CONFIG_PPC_EMULATE_SSTEP=y +CONFIG_PPC_FPU=y +CONFIG_PPC_FSL_BOOK3E=y +CONFIG_PPC_HAVE_PMU_SUPPORT=y +# CONFIG_PPC_IBM_CELL_BLADE is not set +CONFIG_PPC_ICSWX_PID=y +# CONFIG_PPC_ICSWX_USE_SIGILL is not set +CONFIG_PPC_INDIRECT_PIO=y +CONFIG_PPC_IO_WORKAROUNDS=y +CONFIG_PPC_LIB_RHEAP=y +CONFIG_PPC_MMU_NOHASH=y +# CONFIG_PPC_MPC512x is not set +# CONFIG_PPC_MPC52xx is not set +CONFIG_PPC_MSI_BITMAP=y +CONFIG_PPC_NATIVE=y +CONFIG_PPC_OF=y +# CONFIG_PPC_OF_PLATFORM_PCI is not set +CONFIG_PPC_PASEMI_CPUFREQ=y +CONFIG_PPC_PASEMI_IOMMU=y +# CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE is not set +CONFIG_PPC_PASEMI_MDIO=y +CONFIG_PPC_PCI_CHOICE=y +CONFIG_PPC_PERF_CTRS=y +CONFIG_PPC_PMAC32_PSURGE=y +CONFIG_PPC_PMAC64=y +CONFIG_PPC_POWERNV=y +CONFIG_PPC_POWERNV_RTAS=y +CONFIG_PPC_PSERIES=y +CONFIG_PPC_PSR2=y +CONFIG_PPC_QEMU_E500=y +CONFIG_PPC_RTAS_DAEMON=y +CONFIG_PPC_SCOM=y +CONFIG_PPC_SMP_MUXED_IPI=y +CONFIG_PPC_STD_MMU=y +CONFIG_PPC_STD_MMU_32=y +CONFIG_PPC_STD_MMU_64=y +CONFIG_PPC_SUBPAGE_PROT=y +CONFIG_PPC_TRANSACTIONAL_MEM=y +CONFIG_PPC_UDBG_16550=y +CONFIG_PPC_WERROR=y +CONFIG_PPDEV=m +CONFIG_PPP=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPP_SYNC_TTY=m +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_DEBUG is not set +CONFIG_PPTP=m +# CONFIG_PQ2ADS is not set +CONFIG_PREEMPT_COUNT=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_NOTIFIERS=y +# CONFIG_PREEMPT_TRACER is not set +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_PRINTER=m +CONFIG_PRINTK=y +CONFIG_PRINTK_TIME=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_PRINT_STACK_DEPTH=64 +CONFIG_PRISM2_USB=m +# CONFIG_PRISM54 is not set +CONFIG_PROCESSOR_SELECT=y +CONFIG_PROC_DEVICETREE=y +CONFIG_PROC_EVENTS=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_PROFILING=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_PROVE_RCU_DELAY is not set +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set +# CONFIG_PS3GELIC_UDBG is not set +# CONFIG_PS3_ADVANCED is not set +CONFIG_PS3_DISK=m +# CONFIG_PS3_DYNAMIC_DMA is not set +CONFIG_PS3_FLASH=m +CONFIG_PS3_HTAB_SIZE=20 +CONFIG_PS3_LPM=m +CONFIG_PS3_PS3AV=y +# CONFIG_PS3_REPOSITORY_WRITE is not set +CONFIG_PS3_ROM=m +CONFIG_PS3_STORAGE=m +CONFIG_PS3_SYS_MANAGER=y +CONFIG_PS3_VRAM=m +CONFIG_PS3_VUART=y +CONFIG_PSERIES_IDLE=y +CONFIG_PSERIES_MSI=y +CONFIG_PSTORE=y +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PTE_64BIT=y +CONFIG_PTP_1588_CLOCK_GIANFAR=m +CONFIG_PTP_1588_CLOCK_PCH=m +CONFIG_PUBLIC_KEY_ALGO_RSA=y +CONFIG_PVPANIC=m +CONFIG_PWM=y +# CONFIG_PWM_IMX is not set +CONFIG_PWM_PCA9685=m +CONFIG_PWM_SYSFS=y +CONFIG_PWM_TEGRA=m +# CONFIG_PWM_TIECAP is not set +# CONFIG_PWM_TIEHRPWM is not set +CONFIG_PWM_TWL=m +CONFIG_PWM_TWL_LED=m +CONFIG_QE_USB=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=m +CONFIG_QLA3XXX=m +CONFIG_QLCNIC=m +CONFIG_QLCNIC_DCB=y +CONFIG_QLCNIC_SRIOV=y +CONFIG_QLGE=m +CONFIG_QNX4FS_FS=m +# CONFIG_QNX6FS_DEBUG is not set +CONFIG_QNX6FS_FS=m +CONFIG_QSEMI_PHY=y +# CONFIG_QUICC_ENGINE is not set +CONFIG_QUOTA=y +CONFIG_QUOTACTL=y +CONFIG_QUOTACTL_COMPAT=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_TREE=m +CONFIG_R3964=m +CONFIG_R6040=m +CONFIG_R8169=m +CONFIG_R8187SE=m +CONFIG_R8188EU=m +CONFIG_R8712U=m +CONFIG_R8821AE=m +CONFIG_RADIO_ADAPTERS=y +CONFIG_RADIO_AZTECH=m +CONFIG_RADIO_CADET=m +CONFIG_RADIO_GEMTEK=m +CONFIG_RADIO_ISA=m +CONFIG_RADIO_MAXIRADIO=m +CONFIG_RADIO_MIROPCM20=m +CONFIG_RADIO_RTRACK=m +CONFIG_RADIO_RTRACK2=m +CONFIG_RADIO_SAA7706H=m +CONFIG_RADIO_SF16FMI=m +CONFIG_RADIO_SF16FMR2=m +CONFIG_RADIO_SHARK=m +CONFIG_RADIO_SHARK2=m +CONFIG_RADIO_SI470X=y +CONFIG_RADIO_SI4713=m +CONFIG_RADIO_SI476X=m +CONFIG_RADIO_TEA575X=m +CONFIG_RADIO_TEA5764=m +CONFIG_RADIO_TEF6862=m +CONFIG_RADIO_TERRATEC=m +CONFIG_RADIO_TIMBERDALE=m +CONFIG_RADIO_TRUST=m +CONFIG_RADIO_TYPHOON=m +CONFIG_RADIO_WL1273=m +CONFIG_RADIO_WL128X=m +CONFIG_RADIO_ZOLTRIX=m +CONFIG_RAID6_PQ=m +CONFIG_RAID_ATTRS=m +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_RAPIDIO=y +# CONFIG_RAPIDIO_DEBUG is not set +CONFIG_RAPIDIO_DISC_TIMEOUT=30 +CONFIG_RAPIDIO_DMA_ENGINE=y +# CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS is not set +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RATIONAL=y +CONFIG_RAW_DRIVER=m +# CONFIG_RCU_BOOST is not set +# CONFIG_RCU_CPU_STALL_INFO is not set +# CONFIG_RCU_CPU_STALL_VERBOSE is not set +# CONFIG_RCU_FANOUT_EXACT is not set +CONFIG_RCU_FANOUT_LEAF=16 +CONFIG_RCU_NOCB_CPU_ALL=y +CONFIG_RCU_STALL_COMMON=y +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RC_ATI_REMOTE=m +CONFIG_RC_CORE=m +CONFIG_RC_DECODERS=y +CONFIG_RC_DEVICES=y +CONFIG_RC_LOOPBACK=m +CONFIG_RC_MAP=m +CONFIG_RDS=m +# CONFIG_RDS_DEBUG is not set +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RD_BZIP2=y +CONFIG_RD_GZIP=y +CONFIG_RD_LZ4=y +CONFIG_RD_LZMA=y +CONFIG_RD_LZO=y +CONFIG_RD_XZ=y +# CONFIG_READABLE_ASM is not set +CONFIG_REALTEK_AUTOPM=y +CONFIG_REALTEK_PHY=y +CONFIG_REED_SOLOMON_DEC16=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_IRQ=y +CONFIG_REGMAP_MMIO=y +CONFIG_REGMAP_SPI=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_88PM800=m +CONFIG_REGULATOR_88PM8607=y +CONFIG_REGULATOR_AAT2870=m +CONFIG_REGULATOR_AB3100=m +CONFIG_REGULATOR_AD5398=m +CONFIG_REGULATOR_ANATOP=m +CONFIG_REGULATOR_ARIZONA=m +CONFIG_REGULATOR_AS3711=m +CONFIG_REGULATOR_AS3722=m +CONFIG_REGULATOR_DA903X=m +CONFIG_REGULATOR_DA9052=m +CONFIG_REGULATOR_DA9055=m +CONFIG_REGULATOR_DA9063=m +CONFIG_REGULATOR_DA9210=m +# CONFIG_REGULATOR_DEBUG is not set +CONFIG_REGULATOR_FAN53555=m +CONFIG_REGULATOR_GPIO=m +CONFIG_REGULATOR_ISL6271A=m +CONFIG_REGULATOR_LP3971=m +CONFIG_REGULATOR_LP3972=m +CONFIG_REGULATOR_LP872X=y +CONFIG_REGULATOR_LP8755=m +CONFIG_REGULATOR_LP8788=y +CONFIG_REGULATOR_MAX1586=m +CONFIG_REGULATOR_MAX77686=m +CONFIG_REGULATOR_MAX77693=m +CONFIG_REGULATOR_MAX8649=m +CONFIG_REGULATOR_MAX8660=m +CONFIG_REGULATOR_MAX8907=m +CONFIG_REGULATOR_MAX8925=m +CONFIG_REGULATOR_MAX8952=m +CONFIG_REGULATOR_MAX8973=m +CONFIG_REGULATOR_MAX8997=m +CONFIG_REGULATOR_MAX8998=m +CONFIG_REGULATOR_MC13783=m +CONFIG_REGULATOR_MC13892=m +CONFIG_REGULATOR_MC13XXX_CORE=m +CONFIG_REGULATOR_PALMAS=m +CONFIG_REGULATOR_PCAP=m +CONFIG_REGULATOR_PCF50633=m +CONFIG_REGULATOR_PFUZE100=m +CONFIG_REGULATOR_RC5T583=m +CONFIG_REGULATOR_S5M8767=m +CONFIG_REGULATOR_TI_ABB=y +CONFIG_REGULATOR_TPS51632=m +CONFIG_REGULATOR_TPS6105X=m +CONFIG_REGULATOR_TPS62360=m +CONFIG_REGULATOR_TPS65023=m +CONFIG_REGULATOR_TPS6507X=m +CONFIG_REGULATOR_TPS65090=m +CONFIG_REGULATOR_TPS65217=m +CONFIG_REGULATOR_TPS6524X=m +CONFIG_REGULATOR_TPS6586X=m +CONFIG_REGULATOR_TPS65910=m +CONFIG_REGULATOR_TPS65912=m +CONFIG_REGULATOR_TPS80031=m +CONFIG_REGULATOR_TWL4030=y +CONFIG_REGULATOR_USERSPACE_CONSUMER=m +CONFIG_REGULATOR_VEXPRESS=m +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m +CONFIG_REGULATOR_WM831X=m +CONFIG_REGULATOR_WM8350=m +CONFIG_REGULATOR_WM8400=m +CONFIG_REGULATOR_WM8994=m +# CONFIG_REISERFS_CHECK is not set +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_REISERFS_FS_XATTR=y +# CONFIG_REISERFS_PROC_INFO is not set +CONFIG_RELAY=y +CONFIG_RELOCATABLE=y +CONFIG_REMOTEPROC=m +CONFIG_RESET_CONTROLLER=y +CONFIG_RESOURCE_COUNTERS=y +CONFIG_RETU_WATCHDOG=m +CONFIG_RFD_FTL=m +CONFIG_RFKILL_GPIO=m +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_REGULATOR=m +CONFIG_RFS_ACCEL=y +CONFIG_RING_BUFFER=y +CONFIG_RING_BUFFER_ALLOW_SWAP=y +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +CONFIG_RIONET=m +CONFIG_RIONET_RX_SIZE=128 +CONFIG_RIONET_TX_SIZE=128 +CONFIG_ROCKETPORT=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +# CONFIG_ROMFS_BACKED_BY_MTD is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_ROSE=m +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPMSG=m +CONFIG_RPS=y +CONFIG_RT2400PCI=m +CONFIG_RT2500PCI=m +CONFIG_RT2500USB=m +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT3290=y +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00=m +# CONFIG_RT2X00_DEBUG is not set +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT61PCI=m +CONFIG_RT73USB=m +CONFIG_RTAS_ERROR_LOGGING=y +CONFIG_RTAS_PROC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_DRV_88PM80X=m +CONFIG_RTC_DRV_88PM860X=m +CONFIG_RTC_DRV_AB3100=m +CONFIG_RTC_DRV_AS3722=m +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_BQ4802=m +CONFIG_RTC_DRV_DA9052=m +CONFIG_RTC_DRV_DA9055=m +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1390=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_DS3234=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_GENERIC=m +CONFIG_RTC_DRV_HID_SENSOR_TIME=m +# CONFIG_RTC_DRV_IMXDI is not set +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_LP8788=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_M48T86=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_MAX77686=m +CONFIG_RTC_DRV_MAX8907=m +CONFIG_RTC_DRV_MAX8925=m +CONFIG_RTC_DRV_MAX8997=m +CONFIG_RTC_DRV_MAX8998=m +CONFIG_RTC_DRV_MC13XXX=m +CONFIG_RTC_DRV_MOXART=m +CONFIG_RTC_DRV_MSM6242=m +# CONFIG_RTC_DRV_MXC is not set +CONFIG_RTC_DRV_OMAP=y +CONFIG_RTC_DRV_PALMAS=m +CONFIG_RTC_DRV_PCAP=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_PCF50633=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_PL030=m +CONFIG_RTC_DRV_PS3=m +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RC5T583=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_RV3029C2=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_S35390A=m +CONFIG_RTC_DRV_S5M=m +CONFIG_RTC_DRV_SNVS=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_TEGRA=m +# CONFIG_RTC_DRV_TEST is not set +CONFIG_RTC_DRV_TPS6586X=m +CONFIG_RTC_DRV_TPS65910=m +CONFIG_RTC_DRV_TPS80031=m +CONFIG_RTC_DRV_TWL4030=y +CONFIG_RTC_DRV_V3020=m +CONFIG_RTC_DRV_VRTC=m +CONFIG_RTC_DRV_WM831X=m +CONFIG_RTC_DRV_WM8350=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_XGENE=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_LIB=y +CONFIG_RTC_SYSTOHC=y +CONFIG_RTL8180=m +CONFIG_RTL8187=m +CONFIG_RTL8187_LEDS=y +CONFIG_RTL8188EE=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192CU=m +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8192DE=m +CONFIG_RTL8192E=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192U=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTLLIB=m +CONFIG_RTLLIB_CRYPTO_CCMP=m +CONFIG_RTLLIB_CRYPTO_TKIP=m +CONFIG_RTLLIB_CRYPTO_WEP=m +CONFIG_RTLWIFI=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +CONFIG_RTL_CARDS=m +CONFIG_RTS5139=m +# CONFIG_RTS5139_DEBUG is not set +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_RT_MUTEXES=y +# CONFIG_RT_MUTEX_TESTER is not set +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_RXKAD=m +CONFIG_S2IO=m +# CONFIG_SAMPLES is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_SAMSUNG_Q10=m +CONFIG_SAMSUNG_USBPHY=m +CONFIG_SATA_ACARD_AHCI=m +CONFIG_SATA_AHCI=m +CONFIG_SATA_FSL=m +CONFIG_SATA_INIC162X=m +CONFIG_SATA_MV=m +CONFIG_SATA_NV=m +CONFIG_SATA_PMP=y +CONFIG_SATA_PROMISE=m +CONFIG_SATA_QSTOR=m +CONFIG_SATA_RCAR=m +CONFIG_SATA_SIL=m +CONFIG_SATA_SIL24=m +CONFIG_SATA_SIS=m +CONFIG_SATA_SX4=m +CONFIG_SATA_ULI=m +CONFIG_SATA_VIA=m +CONFIG_SATA_VITESSE=m +CONFIG_SATA_ZPODD=y +CONFIG_SBC7240_WDT=m +CONFIG_SBC8360_WDT=m +CONFIG_SBC_EPX_C3_WATCHDOG=m +CONFIG_SBC_FITPC2_WATCHDOG=m +CONFIG_SBE_2T3E3=m +CONFIG_SBE_PMCC4_NCOMM=y +CONFIG_SBNI=m +# CONFIG_SBNI_MULTILINE is not set +CONFIG_SBP_TARGET=m +CONFIG_SBYPASS=m +CONFIG_SC1200_WDT=m +CONFIG_SC520_WDT=m +CONFIG_SC92031=m +CONFIG_SCA3000=m +CONFIG_SCANLOG=m +CONFIG_SCC=m +# CONFIG_SCC_DELAY is not set +# CONFIG_SCC_TRXECHO is not set +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_HRTICK=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_SCHED_TRACER=y +CONFIG_SCSI=y +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_3W_SAS=m +CONFIG_SCSI_7000FASST=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_ADVANSYS=m +CONFIG_SCSI_AHA152X=m +CONFIG_SCSI_AHA1542=m +CONFIG_SCSI_AHA1740=m +CONFIG_SCSI_AIC79XX=m +CONFIG_SCSI_AIC7XXX=m +# CONFIG_SCSI_AIC7XXX_OLD is not set +CONFIG_SCSI_AIC94XX=m +CONFIG_SCSI_ARCMSR=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BUSLOGIC=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_DC390T=m +CONFIG_SCSI_DC395x=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_DH=m +CONFIG_SCSI_DH_ALUA=m +CONFIG_SCSI_DH_EMC=m +CONFIG_SCSI_DH_HP_SW=m +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DMA=y +CONFIG_SCSI_DMX3191D=m +CONFIG_SCSI_DPT_I2O=m +CONFIG_SCSI_DTC3280=m +CONFIG_SCSI_EATA=m +CONFIG_SCSI_EATA_LINKED_COMMANDS=y +CONFIG_SCSI_EATA_MAX_TAGS=16 +CONFIG_SCSI_EATA_TAGGED_QUEUE=y +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_ESAS2R=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_FC_TGT_ATTRS=y +CONFIG_SCSI_FLASHPOINT=y +CONFIG_SCSI_FUTURE_DOMAIN=m +CONFIG_SCSI_GDTH=m +CONFIG_SCSI_GENERIC_NCR5380=m +CONFIG_SCSI_GENERIC_NCR5380_MMIO=m +CONFIG_SCSI_GENERIC_NCR53C400=y +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_HPTIOP=m +CONFIG_SCSI_IBMVFC=m +CONFIG_SCSI_IBMVFC_TRACE=y +CONFIG_SCSI_IBMVSCSI=y +CONFIG_SCSI_IMM=m +CONFIG_SCSI_IN2000=m +CONFIG_SCSI_INIA100=m +CONFIG_SCSI_INITIO=m +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_DUMP=y +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPS=m +CONFIG_SCSI_ISCI=m +CONFIG_SCSI_ISCSI_ATTRS=m +# CONFIG_SCSI_IZIP_EPP16 is not set +# CONFIG_SCSI_IZIP_SLOW_CTR is not set +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_LOWLEVEL=y +CONFIG_SCSI_LOWLEVEL_PCMCIA=y +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +CONFIG_SCSI_MAC53C94=m +CONFIG_SCSI_MESH=m +CONFIG_SCSI_MESH_RESET_DELAY_MS=4000 +CONFIG_SCSI_MESH_SYNC_RATE=5 +CONFIG_SCSI_MOD=y +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPT2SAS_LOGGING is not set +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS=m +# CONFIG_SCSI_MPT3SAS_LOGGING is not set +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_MVSAS=m +# CONFIG_SCSI_MVSAS_DEBUG is not set +# CONFIG_SCSI_MVSAS_TASKLET is not set +CONFIG_SCSI_MVUMI=m +CONFIG_SCSI_NCR53C406A=m +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_NSP32=m +# CONFIG_SCSI_OSD_DEBUG is not set +CONFIG_SCSI_OSD_DPRINT_SENSE=1 +CONFIG_SCSI_OSD_INITIATOR=m +CONFIG_SCSI_OSD_ULD=m +CONFIG_SCSI_PAS16=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PPA=m +CONFIG_SCSI_PROC_FS=y +CONFIG_SCSI_QLA_FC=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLOGIC_FAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SIM710=m +CONFIG_SCSI_SRP=m +CONFIG_SCSI_SRP_TGT_ATTRS=y +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C416=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 +CONFIG_SCSI_SYM53C8XX_MMIO=y +CONFIG_SCSI_T128=m +CONFIG_SCSI_TGT=m +CONFIG_SCSI_U14_34F=m +CONFIG_SCSI_U14_34F_LINKED_COMMANDS=y +CONFIG_SCSI_U14_34F_MAX_TAGS=8 +CONFIG_SCSI_U14_34F_TAGGED_QUEUE=y +CONFIG_SCSI_UFSHCD=m +CONFIG_SCSI_UFSHCD_PCI=m +CONFIG_SCSI_UFSHCD_PLATFORM=m +CONFIG_SCSI_ULTRASTOR=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCx200=m +CONFIG_SCx200HR_TIMER=m +CONFIG_SCx200_ACB=m +CONFIG_SCx200_GPIO=m +# CONFIG_SCx200_I2C is not set +CONFIG_SCx200_WDT=m +CONFIG_SDIO_UART=m +CONFIG_SDLA=m +CONFIG_SEALEVEL_4021=m +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE=1 +CONFIG_SECURITY_APPARMOR_HASH=y +# CONFIG_SECURITY_APPARMOR_STATS is not set +CONFIG_SECURITY_APPARMOR_UNCONFINED_INIT=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY_NETWORK=y +# CONFIG_SECURITY_NETWORK_XFRM is not set +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_DISABLE=y +# CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set +CONFIG_SECURITY_SMACK=y +CONFIG_SECURITY_TOMOYO=y +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init" +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024 +# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set +CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init" +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_YAMA_STACKED=y +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_AD7314=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADCXX=m +CONFIG_SENSORS_ADM1021=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADS1015=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_ADS7871=m +CONFIG_SENSORS_ADT7310=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_AMS=m +# CONFIG_SENSORS_AMS_I2C is not set +# CONFIG_SENSORS_AMS_PMU is not set +CONFIG_SENSORS_APDS990X=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATK0110=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_BH1780=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_DA9052_ADC=m +CONFIG_SENSORS_DA9055=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC2103=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_FSCHMD=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_G762=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_GPIO_FAN=m +CONFIG_SENSORS_HDAPS=m +CONFIG_SENSORS_HIH6130=m +CONFIG_SENSORS_HMC5843=m +CONFIG_SENSORS_HTU21=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IIO_HWMON=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_ISL29018=m +CONFIG_SENSORS_ISL29028=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LIS3LV02D=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_SENSORS_LIS3_SPI=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LM3533=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM70=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX1111=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6642=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_MC13783_ADC=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_PCF8591=m +CONFIG_SENSORS_PMBUS=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_SMM665=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_TSL2563=m +CONFIG_SENSORS_TWL4030_MADC=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_VEXPRESS=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_WM831X=m +CONFIG_SENSORS_WM8350=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_ACCENT=m +CONFIG_SERIAL_8250_BOCA=m +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_CS=m +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_DW=m +# CONFIG_SERIAL_8250_EM is not set +CONFIG_SERIAL_8250_EXAR_ST16C554=m +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_FOURPORT=m +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_HUB6=m +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_NR_UARTS=48 +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_RUNTIME_UARTS=32 +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_ALTERA_JTAGUART=m +CONFIG_SERIAL_ALTERA_UART=m +CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200 +CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4 +CONFIG_SERIAL_AMBA_PL010=m +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_CPM=m +CONFIG_SERIAL_ICOM=m +# CONFIG_SERIAL_IFX6X60 is not set +CONFIG_SERIAL_IMX=y +CONFIG_SERIAL_IMX_CONSOLE=y +CONFIG_SERIAL_IPOCTAL=m +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_KGDB_NMI=y +CONFIG_SERIAL_MAX3100=m +CONFIG_SERIAL_MAX310X=y +CONFIG_SERIAL_MFD_HSU=m +CONFIG_SERIAL_MRST_MAX3110=m +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_SERIAL_OMAP=y +CONFIG_SERIAL_OMAP_CONSOLE=y +CONFIG_SERIAL_PCH_UART=m +CONFIG_SERIAL_PMACZILOG=y +CONFIG_SERIAL_PMACZILOG_CONSOLE=y +# CONFIG_SERIAL_PMACZILOG_TTYS is not set +CONFIG_SERIAL_RP2=m +CONFIG_SERIAL_RP2_NR_UARTS=32 +CONFIG_SERIAL_SCCNXP=y +CONFIG_SERIAL_SCCNXP_CONSOLE=y +CONFIG_SERIAL_SH_SCI=m +CONFIG_SERIAL_SH_SCI_NR_UARTS=2 +CONFIG_SERIAL_ST_ASC=m +CONFIG_SERIAL_TEGRA=m +CONFIG_SERIAL_TIMBERDALE=m +CONFIG_SERIAL_UARTLITE=m +CONFIG_SERIAL_XILINX_PS_UART=m +CONFIG_SERIO=y +CONFIG_SERIO_AMBAKMI=m +CONFIG_SERIO_CT82C710=m +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_NVEC_PS2=m +CONFIG_SERIO_SERPORT=m +CONFIG_SFC=m +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_MTD=y +CONFIG_SFC_SRIOV=y +CONFIG_SFI=y +CONFIG_SGI_IOC4=m +CONFIG_SGI_PARTITION=y +CONFIG_SGY_CTS1000=y +CONFIG_SHMEM=y +CONFIG_SHMOBILE_IOMMU=y +# CONFIG_SHMOBILE_IOMMU_ADDRSIZE_1024MB is not set +# CONFIG_SHMOBILE_IOMMU_ADDRSIZE_128MB is not set +CONFIG_SHMOBILE_IOMMU_ADDRSIZE_2048MB=y +# CONFIG_SHMOBILE_IOMMU_ADDRSIZE_256MB is not set +# CONFIG_SHMOBILE_IOMMU_ADDRSIZE_32MB is not set +# CONFIG_SHMOBILE_IOMMU_ADDRSIZE_512MB is not set +# CONFIG_SHMOBILE_IOMMU_ADDRSIZE_64MB is not set +CONFIG_SHMOBILE_IOMMU_L1SIZE=8192 +CONFIG_SHMOBILE_IPMMU=y +CONFIG_SHMOBILE_IPMMU_TLB=y +CONFIG_SHMOBILE_TIMER_HZ=128 +CONFIG_SH_ETH=m +CONFIG_SH_TIMER_CMT=y +CONFIG_SH_TIMER_TMU=y +CONFIG_SIGMATEL_FIR=m +CONFIG_SIGNALFD=y +CONFIG_SIGNATURE=y +CONFIG_SIS190=m +CONFIG_SIS900=m +CONFIG_SKFP=m +CONFIG_SKGE=m +# CONFIG_SKGE_DEBUG is not set +CONFIG_SKGE_GENESIS=y +CONFIG_SKY2=m +# CONFIG_SKY2_DEBUG is not set +# CONFIG_SLAB is not set +CONFIG_SLABINFO=y +CONFIG_SLHC=y +CONFIG_SLICOSS=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_MODE_SLIP6=y +CONFIG_SLIP_SMART=y +# CONFIG_SLOB is not set +CONFIG_SLUB=y +CONFIG_SLUB_CPU_PARTIAL=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_SMC911X=m +CONFIG_SMC9194=m +CONFIG_SMC_IRCC_FIR=m +CONFIG_SMP=y +CONFIG_SMP_ON_UP=y +CONFIG_SMSC37B787_WDT=m +CONFIG_SMSC911X=m +# CONFIG_SMSC911X_ARCH_HOOKS is not set +CONFIG_SMSC9420=m +CONFIG_SMSC_PHY=y +CONFIG_SMSC_SCH311X_WDT=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +CONFIG_SMS_USB_DRV=m +CONFIG_SM_FTL=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0 +CONFIG_SND_AD1816A=m +CONFIG_SND_AD1848=m +CONFIG_SND_AD1889=m +CONFIG_SND_ADLIB=m +CONFIG_SND_ALI5451=m +CONFIG_SND_ALOOP=m +CONFIG_SND_ALS100=m +CONFIG_SND_ALS300=m +CONFIG_SND_ALS4000=m +CONFIG_SND_AM33XX_SOC_EVM=m +CONFIG_SND_AOA=m +CONFIG_SND_AOA_FABRIC_LAYOUT=m +CONFIG_SND_AOA_ONYX=m +CONFIG_SND_AOA_SOUNDBUS=m +CONFIG_SND_AOA_SOUNDBUS_I2S=m +CONFIG_SND_AOA_TAS=m +CONFIG_SND_AOA_TOONIE=m +CONFIG_SND_ARM=y +CONFIG_SND_ARMAACI=m +CONFIG_SND_ASIHPI=m +CONFIG_SND_AT73C213=m +CONFIG_SND_AT73C213_TARGET_BITRATE=48000 +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_ATMEL_SOC=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_AW2=m +CONFIG_SND_AZT1605=m +CONFIG_SND_AZT2316=m +CONFIG_SND_AZT2320=m +CONFIG_SND_AZT3328=m +CONFIG_SND_BT87X=m +# CONFIG_SND_BT87X_OVERCLOCK is not set +CONFIG_SND_CA0106=m +CONFIG_SND_CMI8328=m +CONFIG_SND_CMI8330=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_CS4231=m +CONFIG_SND_CS4236=m +CONFIG_SND_CS4281=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CS5530=m +CONFIG_SND_CS5535AUDIO=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_DAVINCI_SOC=m +CONFIG_SND_DAVINCI_SOC_MCASP=m +CONFIG_SND_DEBUG=y +# CONFIG_SND_DEBUG_VERBOSE is not set +CONFIG_SND_DESIGNWARE_I2S=m +CONFIG_SND_DICE=m +CONFIG_SND_DMAENGINE_PCM=y +CONFIG_SND_DMA_SGBUF=y +CONFIG_SND_DRIVERS=y +CONFIG_SND_DUMMY=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_ECHO3G=m +CONFIG_SND_EMU10K1=m +CONFIG_SND_EMU10K1X=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_ES1688=m +CONFIG_SND_ES18XX=m +CONFIG_SND_ES1938=m +CONFIG_SND_ES1968=m +CONFIG_SND_ES1968_INPUT=y +CONFIG_SND_ES1968_RADIO=y +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_FIREWIRE_SPEAKERS=m +CONFIG_SND_FM801=m +CONFIG_SND_FM801_TEA575X_BOOL=y +CONFIG_SND_GINA20=m +CONFIG_SND_GINA24=m +CONFIG_SND_GUSCLASSIC=m +CONFIG_SND_GUSEXTREME=m +CONFIG_SND_GUSMAX=m +CONFIG_SND_HDA_CODEC_ANALOG=y +CONFIG_SND_HDA_CODEC_CA0110=y +CONFIG_SND_HDA_CODEC_CA0132=y +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CIRRUS=y +CONFIG_SND_HDA_CODEC_CMEDIA=y +CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_HDA_CODEC_HDMI=y +CONFIG_SND_HDA_CODEC_REALTEK=y +CONFIG_SND_HDA_CODEC_SI3054=y +CONFIG_SND_HDA_CODEC_SIGMATEL=y +CONFIG_SND_HDA_CODEC_VIA=y +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_GENERIC=y +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_I915=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_INPUT_JACK=y +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +CONFIG_SND_HDA_PREALLOC_SIZE=64 +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_HRTIMER=m +CONFIG_SND_HWDEP=m +CONFIG_SND_ICE1712=m +CONFIG_SND_ICE1724=m +CONFIG_SND_IMX_SOC=y +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_INTERWAVE=m +CONFIG_SND_INTERWAVE_STB=m +CONFIG_SND_ISA=y +CONFIG_SND_ISIGHT=m +CONFIG_SND_JACK=y +CONFIG_SND_JAZZ16=m +CONFIG_SND_KCTL_JACK=y +CONFIG_SND_KORG1212=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MAESTRO3=m +CONFIG_SND_MAESTRO3_INPUT=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_MFLD_MACHINE=m +CONFIG_SND_MIA=m +CONFIG_SND_MIRO=m +CONFIG_SND_MIXART=m +# CONFIG_SND_MIXER_OSS is not set +CONFIG_SND_MONA=m +CONFIG_SND_MPU401=m +CONFIG_SND_MPU401_UART=m +CONFIG_SND_MSND_CLASSIC=m +CONFIG_SND_MSND_PINNACLE=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MTS64=m +CONFIG_SND_NM256=m +CONFIG_SND_OMAP_SOC=y +CONFIG_SND_OMAP_SOC_DMIC=y +CONFIG_SND_OMAP_SOC_HDMI=y +CONFIG_SND_OMAP_SOC_MCBSP=y +CONFIG_SND_OMAP_SOC_MCPDM=y +CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m +CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=y +CONFIG_SND_OMAP_SOC_OMAP_HDMI=y +CONFIG_SND_OMAP_SOC_OMAP_TWL4030=y +CONFIG_SND_OPL3SA2=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL4_LIB=m +CONFIG_SND_OPTI92X_AD1848=m +CONFIG_SND_OPTI92X_CS4231=m +CONFIG_SND_OPTI93X=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_PCI=y +CONFIG_SND_PCMCIA=y +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_XRUN_DEBUG=y +CONFIG_SND_PCSP=m +CONFIG_SND_PCXHR=m +CONFIG_SND_PDAUDIOCF=m +CONFIG_SND_PORTMAN2X4=m +CONFIG_SND_POWERMAC=m +CONFIG_SND_POWERMAC_AUTO_DRC=y +CONFIG_SND_POWERPC_SOC=m +CONFIG_SND_PPC=y +CONFIG_SND_PS3=m +CONFIG_SND_PS3_DEFAULT_START_DELAY=2000 +CONFIG_SND_RAWMIDI=m +CONFIG_SND_RAWMIDI_SEQ=m +CONFIG_SND_RIPTIDE=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_SB16=m +CONFIG_SND_SB16_CSP=y +CONFIG_SND_SB16_DSP=m +CONFIG_SND_SB8=m +CONFIG_SND_SB8_DSP=m +CONFIG_SND_SBAWE=m +CONFIG_SND_SB_COMMON=m +CONFIG_SND_SC6000=m +CONFIG_SND_SCS1X=m +CONFIG_SND_SEQUENCER=m +# CONFIG_SND_SEQUENCER_OSS is not set +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SERIAL_U16550=m +CONFIG_SND_SIMPLE_CARD=m +CONFIG_SND_SIS7019=m +CONFIG_SND_SOC_AC97_BUS=y +CONFIG_SND_SOC_ALC5632=m +CONFIG_SND_SOC_DMIC=y +CONFIG_SND_SOC_FSL_SPDIF=m +CONFIG_SND_SOC_FSL_UTILS=m +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y +CONFIG_SND_SOC_HDMI_CODEC=y +CONFIG_SND_SOC_IMX_AUDMUX=y +CONFIG_SND_SOC_IMX_MC13783=m +CONFIG_SND_SOC_IMX_PCM_DMA=y +CONFIG_SND_SOC_IMX_SGTL5000=y +CONFIG_SND_SOC_IMX_SPDIF=m +CONFIG_SND_SOC_IMX_SSI=m +CONFIG_SND_SOC_IMX_WM8962=m +CONFIG_SND_SOC_MC13783=m +CONFIG_SND_SOC_P1022_DS=m +CONFIG_SND_SOC_P1022_RDK=m +CONFIG_SND_SOC_POWERPC_DMA=m +CONFIG_SND_SOC_RT5640=m +CONFIG_SND_SOC_SGTL5000=y +CONFIG_SND_SOC_SI476X=m +CONFIG_SND_SOC_SN95031=m +CONFIG_SND_SOC_SPDIF=m +CONFIG_SND_SOC_TEGRA=m +CONFIG_SND_SOC_TEGRA20_AC97=m +CONFIG_SND_SOC_TEGRA20_DAS=m +CONFIG_SND_SOC_TEGRA20_I2S=m +CONFIG_SND_SOC_TEGRA20_SPDIF=m +CONFIG_SND_SOC_TEGRA30_AHUB=m +CONFIG_SND_SOC_TEGRA30_I2S=m +CONFIG_SND_SOC_TEGRA_ALC5632=m +CONFIG_SND_SOC_TEGRA_RT5640=m +CONFIG_SND_SOC_TEGRA_TRIMSLICE=m +CONFIG_SND_SOC_TEGRA_WM8753=m +CONFIG_SND_SOC_TEGRA_WM8903=m +CONFIG_SND_SOC_TEGRA_WM9712=m +CONFIG_SND_SOC_TLV320AIC23=m +CONFIG_SND_SOC_TLV320AIC3X=m +CONFIG_SND_SOC_TWL4030=y +CONFIG_SND_SOC_TWL6040=y +CONFIG_SND_SOC_WM8753=m +CONFIG_SND_SOC_WM8776=m +CONFIG_SND_SOC_WM8903=m +CONFIG_SND_SOC_WM8960=m +CONFIG_SND_SOC_WM8962=m +CONFIG_SND_SOC_WM9712=m +CONFIG_SND_SONICVIBES=m +CONFIG_SND_SPI=y +CONFIG_SND_SSCAPE=m +CONFIG_SND_SST_PLATFORM=m +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_TRIDENT=m +CONFIG_SND_USB=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_US122L=m +CONFIG_SND_USB_USX2Y=m +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_VERBOSE_PROCFS=y +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VMASTER=y +CONFIG_SND_VX222=m +CONFIG_SND_VXPOCKET=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_WAVEFRONT=m +CONFIG_SND_WSS_LIB=m +CONFIG_SND_YMFPCI=m +# CONFIG_SOC_AM43XX is not set +CONFIG_SOC_BUS=y +CONFIG_SOC_CAMERA=m +CONFIG_SOC_CAMERA_IMX074=m +CONFIG_SOC_CAMERA_MT9M001=m +CONFIG_SOC_CAMERA_MT9M111=m +CONFIG_SOC_CAMERA_MT9T031=m +CONFIG_SOC_CAMERA_MT9T112=m +CONFIG_SOC_CAMERA_MT9V022=m +CONFIG_SOC_CAMERA_OV2640=m +CONFIG_SOC_CAMERA_OV5642=m +CONFIG_SOC_CAMERA_OV6650=m +CONFIG_SOC_CAMERA_OV772X=m +CONFIG_SOC_CAMERA_OV9640=m +CONFIG_SOC_CAMERA_OV9740=m +CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_SOC_CAMERA_RJ54N1=m +CONFIG_SOC_CAMERA_SCALE_CROP=m +CONFIG_SOC_CAMERA_TW9910=m +CONFIG_SOC_DRA7XX=y +CONFIG_SOC_HAS_OMAP2_SDRC=y +CONFIG_SOC_HAS_REALTIME_COUNTER=y +# CONFIG_SOC_IMX53 is not set +CONFIG_SOC_IMX6Q=y +CONFIG_SOC_IMX6SL=y +CONFIG_SOC_OMAP3430=y +# CONFIG_SOC_OMAP5 is not set +CONFIG_SOC_TI81XX=y +CONFIG_SOC_VF610=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_SOLO6X10=m +CONFIG_SONYPI=m +CONFIG_SONYPI_COMPAT=y +CONFIG_SONY_FF=y +CONFIG_SONY_LAPTOP=m +# CONFIG_SOUND_OSS_CORE is not set +# CONFIG_SOUND_PRIME is not set +CONFIG_SP5100_TCO=m +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_STATIC=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSE_IRQ=y +# CONFIG_SPARSE_RCU_POINTER is not set +CONFIG_SPEAKUP=m +CONFIG_SPEAKUP_SYNTH_ACNTPC=m +CONFIG_SPEAKUP_SYNTH_ACNTSA=m +CONFIG_SPEAKUP_SYNTH_APOLLO=m +CONFIG_SPEAKUP_SYNTH_AUDPTR=m +CONFIG_SPEAKUP_SYNTH_BNS=m +CONFIG_SPEAKUP_SYNTH_DECEXT=m +CONFIG_SPEAKUP_SYNTH_DECPC=m +CONFIG_SPEAKUP_SYNTH_DECTLK=m +CONFIG_SPEAKUP_SYNTH_DTLK=m +CONFIG_SPEAKUP_SYNTH_DUMMY=m +CONFIG_SPEAKUP_SYNTH_KEYPC=m +CONFIG_SPEAKUP_SYNTH_LTLK=m +CONFIG_SPEAKUP_SYNTH_SOFT=m +CONFIG_SPEAKUP_SYNTH_SPKOUT=m +CONFIG_SPEAKUP_SYNTH_TXPRT=m +CONFIG_SPI=y +CONFIG_SPI_ALTERA=m +CONFIG_SPI_BITBANG=m +CONFIG_SPI_BUTTERFLY=m +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_DESIGNWARE=m +CONFIG_SPI_DW_MID_DMA=y +CONFIG_SPI_DW_MMIO=m +CONFIG_SPI_DW_PCI=m +CONFIG_SPI_FSL_CPM=y +CONFIG_SPI_FSL_DSPI=m +CONFIG_SPI_FSL_ESPI=y +CONFIG_SPI_FSL_LIB=y +CONFIG_SPI_FSL_SPI=y +CONFIG_SPI_GPIO=m +CONFIG_SPI_IMX=m +CONFIG_SPI_LM70_LLP=m +CONFIG_SPI_MASTER=y +CONFIG_SPI_OC_TINY=m +CONFIG_SPI_OMAP24XX=y +CONFIG_SPI_PL022=m +CONFIG_SPI_PXA2XX=m +CONFIG_SPI_PXA2XX_DMA=y +CONFIG_SPI_SC18IS602=m +CONFIG_SPI_SPIDEV=m +CONFIG_SPI_TEGRA114=m +CONFIG_SPI_TEGRA20_SFLASH=m +CONFIG_SPI_TEGRA20_SLINK=m +CONFIG_SPI_TI_QSPI=m +CONFIG_SPI_TLE62X0=m +CONFIG_SPI_TOPCLIFF_PCH=m +CONFIG_SPI_XCOMM=m +# CONFIG_SPI_XILINX is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_SPU_BASE=y +CONFIG_SPU_FS=m +CONFIG_SPU_FS_64K_LS=y +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_XZ=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SSB=m +CONFIG_SSB_B43_PCI_BRIDGE=y +CONFIG_SSB_BLOCKIO=y +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_GPIO=y +CONFIG_SSB_DRIVER_PCICORE=y +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +# CONFIG_SSB_PCMCIAHOST is not set +CONFIG_SSB_PCMCIAHOST_POSSIBLE=y +CONFIG_SSB_POSSIBLE=y +CONFIG_SSB_SDIOHOST=y +CONFIG_SSB_SDIOHOST_POSSIBLE=y +# CONFIG_SSB_SILENT is not set +CONFIG_SSB_SPROM=y +CONFIG_SSFDC=m +CONFIG_STACKTRACE=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_STAGING_MEDIA=y +CONFIG_STE10XP=y +CONFIG_STE_MODEM_RPROC=m +# CONFIG_STMMAC_DA is not set +# CONFIG_STMMAC_DEBUG_FS is not set +CONFIG_STMMAC_ETH=m +# CONFIG_STMMAC_PCI is not set +CONFIG_STMMAC_PLATFORM=y +CONFIG_STMPE_I2C=y +CONFIG_STMPE_SPI=y +CONFIG_STMP_DEVICE=y +CONFIG_STOP_MACHINE=y +CONFIG_STP=m +CONFIG_STRICT_DEVMEM=y +# CONFIG_STRIP_ASM_SYMS is not set +CONFIG_SUNDANCE=m +# CONFIG_SUNDANCE_MMIO is not set +CONFIG_SUNGEM=m +CONFIG_SUNGEM_PHY=m +CONFIG_SUNRPC=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_SWAP=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_SUN_PARTITION=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_SWAP=y +CONFIG_SWIOTLB_XEN=y +CONFIG_SWP_EMULATE=y +CONFIG_SYNCLINK=m +CONFIG_SYNCLINKMP=m +CONFIG_SYNCLINK_CS=m +CONFIG_SYNCLINK_GT=m +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSFS=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSV68_PARTITION=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSV_FS=m +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_GTCO=m +CONFIG_TABLET_USB_HANWANG=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_USB_WACOM=m +CONFIG_TARGET_CORE=m +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_TASK_SIZE=0xc0000000 +CONFIG_TASK_XACCT=y +CONFIG_TAU=y +# CONFIG_TAU_AVERAGE is not set +# CONFIG_TAU_INT is not set +CONFIG_TC1100_WMI=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ST33_I2C=m +CONFIG_TCG_TIS=y +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_TPM=y +CONFIG_TCG_XEN=m +CONFIG_TCIC=m +CONFIG_TCM_FC=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_QLA2XXX=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_MD5SIG=y +CONFIG_TCS3472=m +CONFIG_TEGRA20_APB_DMA=y +CONFIG_TEGRA20_MC=y +CONFIG_TEGRA30_MC=y +CONFIG_TEGRA_AHB=y +CONFIG_TEGRA_EMC_SCALING_ENABLE=y +CONFIG_TEGRA_HOST1X=m +CONFIG_TEGRA_HOST1X_FIREWALL=y +CONFIG_TEGRA_IOMMU_GART=y +CONFIG_TEGRA_IOMMU_SMMU=y +CONFIG_TEHUTI=m +CONFIG_TEKRAM_DONGLE=m +CONFIG_TELCLOCK=m +# CONFIG_TEST_LIST_SORT is not set +CONFIG_TEST_POWER=m +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_TEXTSEARCH_KMP=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_EMULATION=y +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERM_ADT746X=y +CONFIG_THERM_PM72=m +CONFIG_THERM_WINDTUNNEL=m +CONFIG_THINKPAD_ACPI=m +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y +# CONFIG_THINKPAD_ACPI_DEBUG is not set +CONFIG_THINKPAD_ACPI_DEBUGFACILITIES=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THRUSTMASTER_FF=y +# CONFIG_THUMB2_KERNEL is not set +CONFIG_TICK_ONESHOT=y +CONFIG_TIFM_7XX1=m +CONFIG_TIFM_CORE=m +CONFIG_TIMB_DMA=m +CONFIG_TIMERFD=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_PORTS=8191 +CONFIG_TI_ADC081C=m +CONFIG_TI_AM335X_ADC=m +CONFIG_TI_CPPI41=m +CONFIG_TI_CPSW=y +CONFIG_TI_CPSW_PHY_SEL=y +CONFIG_TI_CPTS=y +CONFIG_TI_DAC7512=m +CONFIG_TI_DAVINCI_CPDMA=y +CONFIG_TI_DAVINCI_EMAC=m +CONFIG_TI_DAVINCI_MDIO=y +CONFIG_TI_EDMA=y +CONFIG_TI_EMIF=m +CONFIG_TI_PRIV_EDMA=y +CONFIG_TI_SOC_THERMAL=m +CONFIG_TI_ST=m +CONFIG_TI_THERMAL=y +CONFIG_TLAN=m +CONFIG_TMD_HERMES=m +CONFIG_TMP006=m +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +CONFIG_TOIM3232_DONGLE=m +CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +CONFIG_TOSHIBA_FIR=m +CONFIG_TOUCHSCREEN_88PM860X=m +CONFIG_TOUCHSCREEN_AD7877=m +CONFIG_TOUCHSCREEN_AD7879=m +CONFIG_TOUCHSCREEN_AD7879_I2C=m +CONFIG_TOUCHSCREEN_AD7879_SPI=m +CONFIG_TOUCHSCREEN_ADS7846=m +CONFIG_TOUCHSCREEN_ATMEL_MXT=m +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m +CONFIG_TOUCHSCREEN_BU21013=m +CONFIG_TOUCHSCREEN_CLEARPAD_TM1217=m +CONFIG_TOUCHSCREEN_CY8CTMG110=m +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m +CONFIG_TOUCHSCREEN_CYTTSP_SPI=m +CONFIG_TOUCHSCREEN_DA9034=m +CONFIG_TOUCHSCREEN_DA9052=m +CONFIG_TOUCHSCREEN_DYNAPRO=m +CONFIG_TOUCHSCREEN_EDT_FT5X06=m +CONFIG_TOUCHSCREEN_EETI=m +CONFIG_TOUCHSCREEN_EGALAX=m +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_FUJITSU=m +CONFIG_TOUCHSCREEN_GUNZE=m +CONFIG_TOUCHSCREEN_HAMPSHIRE=m +CONFIG_TOUCHSCREEN_HTCPEN=m +CONFIG_TOUCHSCREEN_ILI210X=m +CONFIG_TOUCHSCREEN_INEXIO=m +CONFIG_TOUCHSCREEN_INTEL_MID=m +CONFIG_TOUCHSCREEN_MAX11801=m +CONFIG_TOUCHSCREEN_MC13783=m +CONFIG_TOUCHSCREEN_MCS5000=m +CONFIG_TOUCHSCREEN_MK712=m +CONFIG_TOUCHSCREEN_MMS114=m +CONFIG_TOUCHSCREEN_MTOUCH=m +CONFIG_TOUCHSCREEN_PCAP=m +CONFIG_TOUCHSCREEN_PENMOUNT=m +CONFIG_TOUCHSCREEN_PIXCIR=m +CONFIG_TOUCHSCREEN_ST1232=m +CONFIG_TOUCHSCREEN_STMPE=m +CONFIG_TOUCHSCREEN_SUR40=m +CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=m +CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m +CONFIG_TOUCHSCREEN_TOUCHIT213=m +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m +CONFIG_TOUCHSCREEN_TOUCHWIN=m +CONFIG_TOUCHSCREEN_TPS6507X=m +CONFIG_TOUCHSCREEN_TSC2005=m +CONFIG_TOUCHSCREEN_TSC2007=m +CONFIG_TOUCHSCREEN_TSC_SERIO=m +CONFIG_TOUCHSCREEN_UCB1400=m +CONFIG_TOUCHSCREEN_USB_3M=y +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y +CONFIG_TOUCHSCREEN_USB_E2I=y +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y +CONFIG_TOUCHSCREEN_USB_EGALAX=y +CONFIG_TOUCHSCREEN_USB_ELO=y +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y +CONFIG_TOUCHSCREEN_USB_ETURBO=y +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y +CONFIG_TOUCHSCREEN_USB_GOTOP=y +CONFIG_TOUCHSCREEN_USB_GUNZE=y +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y +CONFIG_TOUCHSCREEN_USB_ITM=y +CONFIG_TOUCHSCREEN_USB_JASTEC=y +CONFIG_TOUCHSCREEN_USB_NEXIO=y +CONFIG_TOUCHSCREEN_USB_PANJIT=y +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y +CONFIG_TOUCHSCREEN_W90X900=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WM831X=m +CONFIG_TOUCHSCREEN_WM9705=y +CONFIG_TOUCHSCREEN_WM9712=y +CONFIG_TOUCHSCREEN_WM9713=y +CONFIG_TOUCHSCREEN_WM97XX=m +CONFIG_TOUCHSCREEN_ZFORCE=m +CONFIG_TPS6105X=m +CONFIG_TPS65010=m +CONFIG_TPS6507X=m +CONFIG_TQM85xx=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_TRACE_CLOCK=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_ROUTER=m +CONFIG_TRACE_SINK=m +CONFIG_TRACING=y +CONFIG_TRACING_SUPPORT=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANZPORT=m +CONFIG_TREE_PREEMPT_RCU=y +CONFIG_TREE_RCU=y +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_TRUSTED_KEYS=y +CONFIG_TSL2583=m +CONFIG_TSL2x7x=m +CONFIG_TSL4531=m +CONFIG_TTPCI_EEPROM=m +CONFIG_TTY=y +CONFIG_TTY_PRINTK=y +CONFIG_TULIP=m +# CONFIG_TULIP_MMIO is not set +# CONFIG_TULIP_MWI is not set +# CONFIG_TULIP_NAPI is not set +CONFIG_TUN=y +# CONFIG_TUNE_CELL is not set +CONFIG_TWL4030_MADC=m +CONFIG_TWL4030_POWER=y +CONFIG_TWL4030_USB=m +CONFIG_TWL4030_WATCHDOG=m +CONFIG_TWL6030_GPADC=m +CONFIG_TWL6030_USB=m +CONFIG_TYPHOON=m +# CONFIG_UACCESS_WITH_MEMCPY is not set +CONFIG_UBIFS_FS=m +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UCB1400_CORE=m +CONFIG_UCS2_STRING=y +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y +CONFIG_UEFI_CPER=y +CONFIG_UEVENT_HELPER_PATH="" +# CONFIG_UFS_DEBUG is not set +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +CONFIG_UHID=m +CONFIG_UID16=y +CONFIG_UIDGID_STRICT_TYPE_CHECKS=y +CONFIG_UIO=m +CONFIG_ULI526X=m +CONFIG_ULTRA=m +CONFIG_ULTRIX_PARTITION=y +CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_UNIX=y +CONFIG_UNIX98_PTYS=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_UNIX_DIAG=m +CONFIG_UPROBES=y +CONFIG_UPROBE_EVENT=y +CONFIG_USB=y +CONFIG_USBIP_CORE=m +# CONFIG_USBIP_DEBUG is not set +CONFIG_USBIP_HOST=m +CONFIG_USBIP_VHCI_HCD=m +CONFIG_USBPCWATCHDOG=m +CONFIG_USB_ACM=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AMD5536UDC=m +CONFIG_USB_AN2720=y +CONFIG_USB_ARCH_HAS_EHCI=y +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB_ARCH_HAS_OHCI=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_ATM=m +CONFIG_USB_AUDIO=m +CONFIG_USB_BELKIN=y +CONFIG_USB_BTMTK=m +CONFIG_USB_CDC_COMPOSITE=m +CONFIG_USB_CDC_PHONET=m +# CONFIG_USB_CHIPIDEA_DEBUG is not set +CONFIG_USB_CHIPIDEA_HOST=y +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_COMMON=y +CONFIG_USB_CONFIGFS=m +CONFIG_USB_CONFIGFS_ACM=y +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_ECM_SUBSET=y +CONFIG_USB_CONFIGFS_EEM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_OBEX=y +CONFIG_USB_CONFIGFS_PHONET=y +CONFIG_USB_CONFIGFS_RNDIS=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CXACRU=m +# CONFIG_USB_DEBUG is not set +CONFIG_USB_DEFAULT_PERSIST=y +CONFIG_USB_DSBR=m +# CONFIG_USB_DUMMY_HCD is not set +CONFIG_USB_DWC2=m +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_DWC3_DEBUG is not set +CONFIG_USB_DWC3_DUAL_ROLE=y +# CONFIG_USB_DWC3_EXYNOS is not set +# CONFIG_USB_DWC3_GADGET is not set +# CONFIG_USB_DWC3_HOST is not set +CONFIG_USB_DWC3_PCI=m +CONFIG_USB_EG20T=m +CONFIG_USB_EHCI_BIG_ENDIAN_DESC=y +CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y +CONFIG_USB_EHCI_FSL=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_OMAP=y +# CONFIG_USB_EHCI_MXC is not set +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_TEGRA=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_ENESTORAGE=m +CONFIG_USB_EPSON2888=y +CONFIG_USB_ETH=m +CONFIG_USB_ETH_EEM=y +CONFIG_USB_ETH_RNDIS=y +CONFIG_USB_FOTG210_UDC=m +CONFIG_USB_FSL_MPH_DR_OF=y +CONFIG_USB_FSL_QE=m +CONFIG_USB_FUNCTIONFS=m +CONFIG_USB_FUNCTIONFS_ETH=y +CONFIG_USB_FUNCTIONFS_GENERIC=y +CONFIG_USB_FUNCTIONFS_RNDIS=y +CONFIG_USB_FUSB300=m +CONFIG_USB_F_ACM=m +CONFIG_USB_F_ECM=m +CONFIG_USB_F_EEM=m +CONFIG_USB_F_MASS_STORAGE=m +CONFIG_USB_F_NCM=m +CONFIG_USB_F_OBEX=m +CONFIG_USB_F_PHONET=m +CONFIG_USB_F_RNDIS=m +CONFIG_USB_F_SERIAL=m +CONFIG_USB_F_SS_LB=m +CONFIG_USB_F_SUBSET=m +CONFIG_USB_GADGETFS=m +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 +CONFIG_USB_GADGET_TARGET=m +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GL860=m +CONFIG_USB_GOKU=m +CONFIG_USB_GPIO_VBUS=m +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KINECT=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STK1135=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_G_ACM_MS=m +CONFIG_USB_G_DBGP=m +# CONFIG_USB_G_DBGP_PRINTK is not set +CONFIG_USB_G_DBGP_SERIAL=y +CONFIG_USB_G_HID=m +CONFIG_USB_G_MULTI_CDC=y +CONFIG_USB_G_MULTI_RNDIS=y +CONFIG_USB_G_NCM=m +CONFIG_USB_G_NOKIA=m +CONFIG_USB_G_PRINTER=m +CONFIG_USB_G_SERIAL=m +CONFIG_USB_G_WEBCAM=m +# CONFIG_USB_HCD_TEST_MODE is not set +CONFIG_USB_HID=m +CONFIG_USB_HIDDEV=y +CONFIG_USB_HSO=m +CONFIG_USB_HWA_HCD=m +# CONFIG_USB_IMX21_HCD is not set +CONFIG_USB_INVENTRA_DMA=y +CONFIG_USB_IRDA=m +CONFIG_USB_KBD=m +CONFIG_USB_KC2190=y +CONFIG_USB_KEENE=m +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_M5602=m +# CONFIG_USB_M66592 is not set +CONFIG_USB_MA901=m +CONFIG_USB_MASS_STORAGE=m +CONFIG_USB_MIDI_GADGET=m +CONFIG_USB_MON=m +CONFIG_USB_MOUSE=m +CONFIG_USB_MR800=m +CONFIG_USB_MSI3101=m +CONFIG_USB_MUSB_AM335X_CHILD=m +# CONFIG_USB_MUSB_AM35X is not set +CONFIG_USB_MUSB_DUAL_ROLE=y +# CONFIG_USB_MUSB_GADGET is not set +# CONFIG_USB_MUSB_HOST is not set +CONFIG_USB_MUSB_OMAP2PLUS=m +CONFIG_USB_MUSB_UX500=m +CONFIG_USB_MV_U3D=m +CONFIG_USB_MV_UDC=m +CONFIG_USB_MXS_PHY=y +CONFIG_USB_NET2272=m +CONFIG_USB_NET2272_DMA=y +CONFIG_USB_NET2280=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_SR9700=m +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y +CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_OMAP3=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_OTG_WHITELIST is not set +CONFIG_USB_PHY=y +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_USB_PXA27X=m +CONFIG_USB_R8A66597=m +CONFIG_USB_RENESAS_USBHS=m +CONFIG_USB_RENESAS_USBHS_HCD=m +CONFIG_USB_RENESAS_USBHS_UDC=m +CONFIG_USB_S2255=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_F81232=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KEYSPAN_MPR=y +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN_USA18X=y +CONFIG_USB_SERIAL_KEYSPAN_USA19=y +CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y +CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y +CONFIG_USB_SERIAL_KEYSPAN_USA19W=y +CONFIG_USB_SERIAL_KEYSPAN_USA28=y +CONFIG_USB_SERIAL_KEYSPAN_USA28X=y +CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y +CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y +CONFIG_USB_SERIAL_KEYSPAN_USA49W=y +CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_QUATECH2=m +CONFIG_USB_SERIAL_SAFE=m +# CONFIG_USB_SERIAL_SAFE_PADDED is not set +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_WISHBONE=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_ZTE=m +CONFIG_USB_SI470X=m +CONFIG_USB_SIERRA_NET=m +# CONFIG_USB_SISUSBVGA_CON is not set +CONFIG_USB_SL811_CS=m +CONFIG_USB_SL811_HCD_ISO=y +# CONFIG_USB_SN9C102 is not set +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_STKWEBCAM=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STV06XX=m +CONFIG_USB_SWITCH_FSA9480=m +# CONFIG_USB_TI_CPPI41_DMA is not set +CONFIG_USB_U132_HCD=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_ULPI=y +CONFIG_USB_ULPI_VIEWPORT=y +CONFIG_USB_USBNET=m +# CONFIG_USB_UX500_DMA is not set +CONFIG_USB_U_ETHER=m +CONFIG_USB_U_RNDIS=m +CONFIG_USB_U_SERIAL=m +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y +CONFIG_USB_VL600=m +CONFIG_USB_WDM=m +CONFIG_USB_WHCI_HCD=m +CONFIG_USB_WPAN_HCD=m +CONFIG_USB_WUSB=m +# CONFIG_USB_WUSB_CBAF_DEBUG is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_XUSBATM=m +CONFIG_USB_ZD1201=m +CONFIG_USB_ZERO=m +CONFIG_USB_ZERO_HNPTEST=y +CONFIG_USB_ZR364XX=m +CONFIG_USER_NS=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_USE_OF=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_UTS_NS=y +CONFIG_UWB=m +CONFIG_UWB_HWA=m +CONFIG_UWB_I1480U=m +CONFIG_UWB_WHCI=m +CONFIG_V4L2_MEM2MEM_DEV=m +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_V4L_RADIO_ISA_DRIVERS=y +CONFIG_V4L_TEST_DRIVERS=y +CONFIG_VCNL4000=m +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_VERSION_SIGNATURE="" +CONFIG_VETH=m +CONFIG_VEXPRESS_CONFIG=y +CONFIG_VFAT_FS=y +CONFIG_VFIO=m +CONFIG_VFIO_IOMMU_SPAPR_TCE=m +CONFIG_VFIO_PCI=m +CONFIG_VFIO_PCI_VGA=y +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_VF_PIT_TIMER=y +# CONFIG_VGACON_SOFT_SCROLLBACK is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_VGA_SWITCHEROO=y +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_RING=m +CONFIG_VHOST_SCSI=m +CONFIG_VIA_FIR=m +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y +CONFIG_VIA_VELOCITY=m +CONFIG_VIA_WDT=m +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF_DMA_CONTIG=m +CONFIG_VIDEOBUF_DMA_SG=m +CONFIG_VIDEOBUF_DVB=m +CONFIG_VIDEOBUF_GEN=m +CONFIG_VIDEOBUF_VMALLOC=m +CONFIG_VIDEOMODE_HELPERS=y +CONFIG_VIDEO_ADV7170=m +CONFIG_VIDEO_ADV7175=m +CONFIG_VIDEO_ADV7180=m +# CONFIG_VIDEO_ADV_DEBUG is not set +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +CONFIG_VIDEO_BT819=m +CONFIG_VIDEO_BT848=m +CONFIG_VIDEO_BT856=m +CONFIG_VIDEO_BT866=m +CONFIG_VIDEO_BTCX=m +CONFIG_VIDEO_BWQCAM=m +CONFIG_VIDEO_CAFE_CCIC=m +# CONFIG_VIDEO_CODA is not set +CONFIG_VIDEO_CPIA2=m +CONFIG_VIDEO_CQCAM=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX18_ALSA=m +CONFIG_VIDEO_CX231XX=m +CONFIG_VIDEO_CX231XX_ALSA=m +CONFIG_VIDEO_CX231XX_DVB=m +CONFIG_VIDEO_CX231XX_RC=y +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_CX23885=m +CONFIG_VIDEO_CX25821=m +CONFIG_VIDEO_CX25821_ALSA=m +CONFIG_VIDEO_CX25840=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +CONFIG_VIDEO_CX88_ENABLE_VP3054=y +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_CX88_VP3054=m +CONFIG_VIDEO_DEV=m +CONFIG_VIDEO_DM6446_CCDC=m +CONFIG_VIDEO_DT3155=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m +CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_GO7007=m +CONFIG_VIDEO_GO7007_LOADER=m +CONFIG_VIDEO_GO7007_USB=m +CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_HEXIUM_GEMINI=m +CONFIG_VIDEO_HEXIUM_ORION=m +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_IVTV_ALSA=m +CONFIG_VIDEO_KS0127=m +CONFIG_VIDEO_M52790=m +CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m +CONFIG_VIDEO_MEM2MEM_TESTDEV=m +CONFIG_VIDEO_MEYE=m +CONFIG_VIDEO_MSP3400=m +CONFIG_VIDEO_MT9V011=m +# CONFIG_VIDEO_MX3 is not set +CONFIG_VIDEO_MXB=m +CONFIG_VIDEO_OMAP2_VOUT=m +CONFIG_VIDEO_OMAP2_VOUT_VRFB=y +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_VIDEO_OV7640=m +CONFIG_VIDEO_OV7670=m +CONFIG_VIDEO_PMS=m +CONFIG_VIDEO_PVRUSB2=m +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +CONFIG_VIDEO_PVRUSB2_DVB=y +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_RCAR_VIN=m +CONFIG_VIDEO_SAA6588=m +CONFIG_VIDEO_SAA7110=m +CONFIG_VIDEO_SAA711X=m +CONFIG_VIDEO_SAA7127=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_VIDEO_SAA7164=m +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_SAA7185=m +CONFIG_VIDEO_SH_MOBILE_CEU=m +CONFIG_VIDEO_SH_MOBILE_CSI2=m +CONFIG_VIDEO_SH_VEU=m +CONFIG_VIDEO_SONY_BTF_MPX=m +CONFIG_VIDEO_STK1160=m +CONFIG_VIDEO_STK1160_AC97=y +CONFIG_VIDEO_STK1160_COMMON=m +CONFIG_VIDEO_TDA7432=m +CONFIG_VIDEO_TDA9840=m +CONFIG_VIDEO_TEA6415C=m +CONFIG_VIDEO_TEA6420=m +CONFIG_VIDEO_TIMBERDALE=m +CONFIG_VIDEO_TI_VPE=m +# CONFIG_VIDEO_TI_VPE_DEBUG is not set +CONFIG_VIDEO_TLG2300=m +CONFIG_VIDEO_TM6000=m +CONFIG_VIDEO_TM6000_ALSA=m +CONFIG_VIDEO_TM6000_DVB=m +CONFIG_VIDEO_TUNER=m +CONFIG_VIDEO_TVAUDIO=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_VIDEO_TVP5150=m +CONFIG_VIDEO_TW2804=m +CONFIG_VIDEO_TW9903=m +CONFIG_VIDEO_TW9906=m +CONFIG_VIDEO_UDA1342=m +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +CONFIG_VIDEO_USBTV=m +CONFIG_VIDEO_USBVISION=m +CONFIG_VIDEO_V4L2=m +CONFIG_VIDEO_V4L2_INT_DEVICE=m +CONFIG_VIDEO_VIA_CAMERA=m +CONFIG_VIDEO_VIVI=m +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_VPX3220=m +CONFIG_VIDEO_W9966=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +CONFIG_VIDEO_ZORAN=m +CONFIG_VIDEO_ZORAN_AVS6EYES=m +CONFIG_VIDEO_ZORAN_BUZ=m +CONFIG_VIDEO_ZORAN_DC10=m +CONFIG_VIDEO_ZORAN_DC30=m +CONFIG_VIDEO_ZORAN_LML33=m +CONFIG_VIDEO_ZORAN_LML33R10=m +CONFIG_VIDEO_ZORAN_ZR36060=m +CONFIG_VIPERBOARD_ADC=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTUALIZATION=y +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y +CONFIG_VIRT_DRIVERS=y +CONFIG_VIRT_TO_BUS=y +CONFIG_VITESSE_PHY=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_VLSI_FIR=m +CONFIG_VM86=y +CONFIG_VME_BUS=m +CONFIG_VME_CA91CX42=m +CONFIG_VME_PIO2=m +CONFIG_VME_TSI148=m +CONFIG_VME_USER=m +CONFIG_VMIVME_7805=m +# CONFIG_VMSPLIT_1G is not set +# CONFIG_VMSPLIT_2G is not set +CONFIG_VMSPLIT_3G=y +CONFIG_VMWARE_BALLOON=m +CONFIG_VMWARE_PVSCSI=m +CONFIG_VMWARE_VMCI=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VMXNET3=m +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VORTEX=m +CONFIG_VSOCKETS=m +CONFIG_VSX=y +CONFIG_VT=y +CONFIG_VT6655=m +CONFIG_VT6656=m +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_VXFS_FS=m +CONFIG_VXGE=m +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set +CONFIG_VXLAN=m +CONFIG_W1=m +CONFIG_W1_CON=y +CONFIG_W1_MASTER_DS1WM=m +CONFIG_W1_MASTER_DS2482=m +CONFIG_W1_MASTER_DS2490=m +CONFIG_W1_MASTER_GPIO=m +CONFIG_W1_MASTER_MATROX=m +CONFIG_W1_MASTER_MXC=m +CONFIG_W1_SLAVE_BQ27000=m +CONFIG_W1_SLAVE_DS2408=m +CONFIG_W1_SLAVE_DS2408_READBACK=y +CONFIG_W1_SLAVE_DS2413=m +CONFIG_W1_SLAVE_DS2423=m +CONFIG_W1_SLAVE_DS2431=m +CONFIG_W1_SLAVE_DS2433=m +# CONFIG_W1_SLAVE_DS2433_CRC is not set +CONFIG_W1_SLAVE_DS2760=m +CONFIG_W1_SLAVE_DS2780=m +CONFIG_W1_SLAVE_DS2781=m +CONFIG_W1_SLAVE_DS28E04=m +CONFIG_W1_SLAVE_SMEM=m +CONFIG_W1_SLAVE_THERM=m +CONFIG_W35UND=m +CONFIG_W83627HF_WDT=m +CONFIG_W83697HF_WDT=m +CONFIG_W83697UG_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_WAFER_WDT=m +CONFIG_WAN=y +CONFIG_WANXL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_RTAS=m +CONFIG_WCN36XX=m +# CONFIG_WCN36XX_DEBUGFS is not set +CONFIG_WD80x3=m +CONFIG_WDT=m +CONFIG_WDTPCI=m +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PRIV=y +CONFIG_WEXT_PROC=y +CONFIG_WEXT_SPY=y +CONFIG_WIL6210_ISR_COR=y +CONFIG_WIL6210_TRACING=y +CONFIG_WILINK_PLATFORM_DATA=y +CONFIG_WIMAX=m +CONFIG_WIMAX_DEBUG_LEVEL=8 +CONFIG_WIMAX_GDM72XX=m +CONFIG_WIMAX_GDM72XX_K_MODE=y +CONFIG_WIMAX_GDM72XX_QOS=y +CONFIG_WIMAX_GDM72XX_USB=y +CONFIG_WIMAX_GDM72XX_USB_PM=y +CONFIG_WIMAX_GDM72XX_WIMAX2=y +CONFIG_WIMAX_I2400M=m +CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8 +CONFIG_WIMAX_I2400M_USB=m +CONFIG_WINBOND_840=m +CONFIG_WINBOND_FIR=m +CONFIG_WINDFARM=m +CONFIG_WINDFARM_PM112=m +CONFIG_WINDFARM_PM121=m +CONFIG_WINDFARM_PM72=m +CONFIG_WINDFARM_PM81=m +CONFIG_WINDFARM_PM91=m +CONFIG_WINDFARM_RM31=m +CONFIG_WIRELESS=y +CONFIG_WIRELESS_EXT=y +CONFIG_WIZNET_BUS_ANY=y +# CONFIG_WIZNET_BUS_DIRECT is not set +# CONFIG_WIZNET_BUS_INDIRECT is not set +CONFIG_WIZNET_W5100=m +CONFIG_WIZNET_W5300=m +CONFIG_WL1251=m +CONFIG_WL1251_SDIO=m +CONFIG_WL1251_SPI=m +CONFIG_WL12XX=m +CONFIG_WL18XX=m +CONFIG_WLAGS49_H2=m +CONFIG_WLAGS49_H25=m +CONFIG_WLAN=y +CONFIG_WLCORE=m +CONFIG_WLCORE_SDIO=m +CONFIG_WLCORE_SPI=m +CONFIG_WL_TI=y +CONFIG_WM831X_BACKUP=m +CONFIG_WM831X_POWER=m +CONFIG_WM831X_WATCHDOG=m +CONFIG_WM8350_POWER=m +CONFIG_WM8350_WATCHDOG=m +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_X25=m +CONFIG_X25_ASY=m +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_X86=y +CONFIG_X86_16BIT=y +CONFIG_X86_32=y +CONFIG_X86_32_IRIS=m +# CONFIG_X86_32_NON_STANDARD is not set +CONFIG_X86_32_SMP=y +CONFIG_X86_64=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_X86_64_SMP=y +CONFIG_X86_ACPI_CPUFREQ=y +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_ANCIENT_MCE is not set +CONFIG_X86_APM_BOOT=y +# CONFIG_X86_BIGSMP is not set +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_CMOV=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CPUFREQ_NFORCE2=y +CONFIG_X86_CPUID=m +CONFIG_X86_DEBUGCTLMSR=y +# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set +# CONFIG_X86_DECODER_SELFTEST is not set +CONFIG_X86_DEV_DMA_OPS=y +CONFIG_X86_ESPFIX32=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_E_POWERSAVER is not set +CONFIG_X86_GENERIC=y +# CONFIG_X86_GOLDFISH is not set +CONFIG_X86_GX_SUSPMOD=m +CONFIG_X86_HT=y +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_INTEL_MID=y +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_INTEL_USERCOPY=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_IO_APIC=y +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_LONGHAUL=m +CONFIG_X86_LONGRUN=m +CONFIG_X86_MCE=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_INJECT=m +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MDFLD=y +CONFIG_X86_MPPARSE=y +CONFIG_X86_MSR=m +CONFIG_X86_NEED_RELOCS=y +CONFIG_X86_NUMACHIP=y +CONFIG_X86_P4_CLOCKMOD=m +CONFIG_X86_PAE=y +CONFIG_X86_PAT=y +CONFIG_X86_PCC_CPUFREQ=y +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_X86_PM_TIMER=y +CONFIG_X86_POWERNOW_K6=m +CONFIG_X86_POWERNOW_K7=m +CONFIG_X86_POWERNOW_K7_ACPI=y +CONFIG_X86_POWERNOW_K8=y +CONFIG_X86_PPRO_FENCE=y +# CONFIG_X86_PTDUMP is not set +# CONFIG_X86_RDC321X is not set +CONFIG_X86_REBOOTFIXUPS=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_RESERVE_LOW=64 +CONFIG_X86_SMAP=y +CONFIG_X86_SPEEDSTEP_CENTRINO=y +CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE=y +CONFIG_X86_SPEEDSTEP_ICH=y +CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK=y +CONFIG_X86_SPEEDSTEP_SMI=y +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +# CONFIG_X86_SYSFB is not set +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_X86_TSC=y +CONFIG_X86_UP_APIC_MSI=y +CONFIG_X86_USE_PPRO_CHECKSUM=y +# CONFIG_X86_UV is not set +# CONFIG_X86_VERBOSE_BOOTUP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_WANT_INTEL_MID=y +CONFIG_X86_X2APIC=y +CONFIG_X86_X32=y +CONFIG_XENFS=m +CONFIG_XEN_ACPI_PROCESSOR=y +CONFIG_XEN_BACKEND=y +CONFIG_XEN_BALLOON=y +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y +CONFIG_XEN_BLKDEV_BACKEND=m +CONFIG_XEN_BLKDEV_FRONTEND=y +CONFIG_XEN_COMPAT_XENFS=y +# CONFIG_XEN_DEBUG_FS is not set +CONFIG_XEN_DEV_EVTCHN=m +CONFIG_XEN_DOM0=y +CONFIG_XEN_FBDEV_FRONTEND=m +CONFIG_XEN_GNTDEV=m +CONFIG_XEN_GRANT_DEV_ALLOC=m +CONFIG_XEN_HAVE_PVMMU=y +CONFIG_XEN_MCE_LOG=y +CONFIG_XEN_NETDEV_BACKEND=m +CONFIG_XEN_NETDEV_FRONTEND=y +CONFIG_XEN_PCIDEV_BACKEND=m +CONFIG_XEN_PCIDEV_FRONTEND=m +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_PRIVILEGED_GUEST=y +CONFIG_XEN_PVHVM=y +CONFIG_XEN_SAVE_RESTORE=y +CONFIG_XEN_SCRUB_PAGES=y +CONFIG_XEN_SELFBALLOONING=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_TMEM=m +CONFIG_XEN_WDT=m +CONFIG_XEN_XENBUS_FRONTEND=y +CONFIG_XFRM=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_IPCOMP=m +# CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set +# CONFIG_XFRM_SUB_POLICY is not set +CONFIG_XFRM_USER=m +# CONFIG_XFS_DEBUG is not set +CONFIG_XFS_FS=m +CONFIG_XFS_POSIX_ACL=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_RT=y +# CONFIG_XFS_WARN is not set +# CONFIG_XILINX_EMACLITE is not set +CONFIG_XILINX_LL_TEMAC=m +CONFIG_XILLYBUS=m +CONFIG_XILLYBUS_OF=m +CONFIG_XILLYBUS_PCIE=m +# CONFIG_XMON_DEFAULT is not set +CONFIG_XMON_DISASSEMBLY=y +CONFIG_XO15_EBOOK=m +CONFIG_XOR_BLOCKS=m +CONFIG_XPS=y +CONFIG_XPS_USB_HCD_XILINX=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_BCJ=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_YAM=m +CONFIG_YELLOWFIN=m +CONFIG_YENTA=m +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_TOSHIBA=y +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBUD=y +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_ZEROPLUS_FF=y +CONFIG_ZISOFS=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZONE_DMA=y +CONFIG_ZRAM=m +# CONFIG_ZRAM_DEBUG is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSWAP=y --- linux-3.13.0.orig/debian.master/config/enforce +++ linux-3.13.0/debian.master/config/enforce @@ -0,0 +1,130 @@ +# +# SECURITY items +# +# Ensure this option is enabled. +value CONFIG_COMPAT_BRK n +value CONFIG_DEVKMEM n +value CONFIG_LSM_MMAP_MIN_ADDR 0 +value CONFIG_SECURITY y +!exists CONFIG_SECURITY_FILE_CAPABILITIES | value CONFIG_SECURITY_FILE_CAPABILITIES y +value CONFIG_SECURITY_SELINUX y +value CONFIG_SECURITY_SMACK y +value CONFIG_SECURITY_YAMA y +value CONFIG_SYN_COOKIES y +value CONFIG_DEFAULT_SECURITY_APPARMOR y +value CONFIG_DEBUG_INFO y +# For architectures which support this option ensure it is enabled. +!exists CONFIG_XEN_ACPI_PROCESSOR | value CONFIG_XEN_ACPI_PROCESSOR y +!exists CONFIG_SECCOMP | value CONFIG_SECCOMP y +!exists CONFIG_HAVE_ARCH_SECCOMP_FILTER | value CONFIG_SECCOMP_FILTER y +!exists CONFIG_CC_STACKPROTECTOR | value CONFIG_CC_STACKPROTECTOR y +!exists CONFIG_DEBUG_RODATA | value CONFIG_DEBUG_RODATA y +!exists CONFIG_DEBUG_SET_MODULE_RONX | value CONFIG_DEBUG_SET_MODULE_RONX y +!exists CONFIG_STRICT_DEVMEM | value CONFIG_STRICT_DEVMEM y +# For architectures which support this option ensure it is disabled. +!exists CONFIG_COMPAT_VDSO | value CONFIG_COMPAT_VDSO n +!exists CONFIG_ACPI_CUSTOM_METHOD | value CONFIG_ACPI_CUSTOM_METHOD n +# Default to 32768 on ARM, 65536 for everything else -- arm64 and armhf must match (LP:1418140) +(arch armel armhf arm64 &/ value CONFIG_DEFAULT_MMAP_MIN_ADDR 32768) | \ + value CONFIG_DEFAULT_MMAP_MIN_ADDR 65536 + +# upstart requires DEVTMPFS be enabled and mounted by default. +value CONFIG_DEVTMPFS y +value CONFIG_DEVTMPFS_MOUNT y + +# some /dev nodes require POSIX ACLs, like /dev/dsp +value CONFIG_TMPFS_POSIX_ACL y + +# Ramdisk size should be a minimum of 64M +value CONFIG_BLK_DEV_RAM_SIZE 65536 + +# LVM requires dm_mod built in to activate correctly (LP: #560717) +value CONFIG_BLK_DEV_DM y + +# sysfs: ensure all DEPRECATED items are off +!exists CONFIG_SYSFS_DEPRECATED_V2 | value CONFIG_SYSFS_DEPRECATED_V2 n +!exists CONFIG_SYSFS_DEPRECATED | value CONFIG_SYSFS_DEPRECATED n + +# automatically add local version will cause packaging failure +value CONFIG_LOCALVERSION_AUTO n + +# provide framebuffer console form the start +# UbuntuSpec:foundations-m-grub2-boot-framebuffer +value CONFIG_FRAMEBUFFER_CONSOLE y + +# GRUB changes will rely on built in vesafb on x86, +# UbuntuSpec:foundations-m-grub2-boot-framebuffer +#(( arch i386 | arch amd64 ) & value CONFIG_FB_VESA y) | \ +# value CONFIG_FB_VESA m | !exists CONFIG_FB_VESA +#value CONFIG_FB_VESA m | !exists CONFIG_FB_VESA + +# Build in uinput module so that it's always available (LP: 584812) +value CONFIG_INPUT_UINPUT y + +# upstart relies on getting all of the kernel arguments +#value CONFIG_INIT_PASS_ALL_PARAMS y + +# Ensure CONFIG_IPV6 is y, if this is a module we get a module load for +# every ipv6 packet, bad. +value CONFIG_IPV6 y + +# Ensure ECRYPT_FS is y as it cannot be autoloaded and it has complex +# dependancies which can pull it =m at a whim. +value CONFIG_ECRYPT_FS y + +# Ensure CONFIG_EFI_VARS is y as debian-installer relies on having +# access to efivars when installing in EFI mode. See LP:837332 +value CONFIG_EFI_VARS y | !exists CONFIG_EFI_VARS + +# Ensure CONFIG_VFAT_FS is y for arm, needed to ensure we able to replace +# a kernel with the same version. Also needed for EFI based systems. +#(arch armel armhf arm64 i386 amd64 &/ value CONFIG_VFAT_FS y) | \ +# value CONFIG_VFAT_FS m +value CONFIG_VFAT_FS y + +# Ensure CONFIG_GPIO_TWL4030 is y for arm, LP:921934 +(arch armel armhf &/ value CONFIG_GPIO_TWL4030 y) | \ + value CONFIG_GPIO_TWL4030 m | \ + !exists CONFIG_GPIO_TWL4030 + +# Ensure CONFIG_THERM_ADT746X is y for powerpc-smp flavours. +# See LP:923094 +(flavour powerpc-smp &/ value CONFIG_THERM_ADT746X y) | \ + !exists CONFIG_THERM_ADT746X + +# Ensure CONFIG_NVRAM is y for powerpc-smp, LP:942193 +(flavour powerpc-smp powerpc-e500 powerpc-e500mc &/ value CONFIG_NVRAM y) | \ + (flavour powerpc-e500 powerpc-e500mc) | \ + value CONFIG_NVRAM m | \ + !exists CONFIG_NVRAM + +# Ensure CONFIG_STUB_POULSBO is disabled if CONFIG_DRM_PSB is enabled +# See LP:899244 +(!exists CONFIG_DRM_PSB | value CONFIG_DRM_PSB n) | \ +((value CONFIG_DRM_PSB y | value CONFIG_DRM_PSB m) & (value CONFIG_STUB_POULSBO n | !exists CONFIG_STUB_POULSBO)) + +# Ensure CONFIG_B43_BCMA_EXTRA is disabled if CONFIG_BRCMSMAC is enabled. +# Otherwise b43 and brcmsmac will overlap in the hardware they claim to +# support. +(!exists CONFIG_BRCMSMAC | value CONFIG_BRCMSMAC n) | \ +((value CONFIG_BRCMSMAC y | value CONFIG_BRCMSMAC m) & (value CONFIG_B43_BCMA_EXTRA n | !exists CONFIG_B43_BCMA_EXTRA)) + +# CONFIG_I2C_DESIGNWARE_PLATFORM is required by Calxeda Highbank +(flavour highbank &/ value CONFIG_I2C_DESIGNWARE_PLATFORM y) | \ +value CONFIG_I2C_DESIGNWARE_PLATFORM m | \ +!exists CONFIG_I2C_DESIGNWARE_PLATFORM + +# Don't use the generic ehci/ohci code on omap, it doesn't work +((flavour generic & arch armhf &/ value CONFIG_USB_EHCI_HCD_PLATFORM n & value CONFIG_USB_OHCI_HCD_PLATFORM n) | \ + !exists MISSING) + +# CONFIG_DRM_MGAG200, experimental driver hangs boot on HP Proliant Gen8 +# See LP:1042903 +(!exists CONFIG_DRM_MGAG200 | value CONFIG_DRM_MGAG200 n) + +# Required if /init is a shell script. +value CONFIG_BINFMT_SCRIPT y + +# CPUIDLE is a source of instabilities on ECX-1000, leave it off +((flavour generic & arch armhf &/ value CONFIG_CPU_IDLE n) | \ + !exists MISSING) --- linux-3.13.0.orig/debian.master/config/i386/config.common.i386 +++ linux-3.13.0/debian.master/config/i386/config.common.i386 @@ -0,0 +1,315 @@ +# +# Config options for config.common.i386 automatically generated by splitconfig.pl +# +# CONFIG_64BIT is not set +CONFIG_AC97_BUS=m +# CONFIG_AHCI_IMX is not set +CONFIG_AIC79XX_RESET_DELAY_MS=5000 +CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-ecx -fcall-saved-edx" +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_RANDOM=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ASYNC_TX_DMA=y +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_PIIX=y +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_AUDIT_ARCH is not set +CONFIG_BCH=m +CONFIG_COMEDI_ISA_DRIVERS=y +CONFIG_COMPACTION=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CPU_IDLE=y +CONFIG_CRASH_DUMP=y +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_DEADLINE=y +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_EXYNOS_VIDEO=y +# CONFIG_EZX_PCAP is not set +CONFIG_FB_ATY128=m +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_BOOT_VESA_SUPPORT=y +CONFIG_FB_DDC=m +# CONFIG_FB_MACMODES is not set +CONFIG_FB_RADEON=m +CONFIG_FB_SVGALIB=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_UDL=m +CONFIG_FB_VOODOO1=m +# CONFIG_FMAN_P1023 is not set +CONFIG_FMAN_P3040_P4080_P5020=y +# CONFIG_FONTS is not set +CONFIG_FRAME_WARN=1024 +CONFIG_FSL_FMAN=y +CONFIG_FUNCTION_TRACER=y +CONFIG_GENERIC_PHY=m +CONFIG_GPIO_GENERIC=m +CONFIG_GPIO_TWL6040=m +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_AOUT=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_HIBERNATION=y +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_SHPC=m +CONFIG_HUGETLB_PAGE=y +# CONFIG_HW_RANDOM_ATMEL is not set +# CONFIG_HZ_100 is not set +CONFIG_I2C_ALGOBIT=m +# CONFIG_IDE is not set +# CONFIG_IKCONFIG is not set +CONFIG_ILLEGAL_POINTER_VALUE=0 +CONFIG_IMA=y +CONFIG_INFINIBAND_AMSO1100=m +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m +CONFIG_IOMMU_SUPPORT=y +CONFIG_IPMI_SI_PROBE_DEFAULTS=y +# CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_ISA=y +CONFIG_KSM=y +CONFIG_KVM=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LIBERTAS_MESH=y +CONFIG_LIS3L02DQ=m +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_MACINTOSH_DRIVERS=y +# CONFIG_MATH_EMULATION is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_TMIO is not set +CONFIG_MII=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MTD=m +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CMDLINE_PARTS=m +CONFIG_MTD_NAND=m +CONFIG_MTD_NAND_BCH=m +CONFIG_MTD_NAND_ECC=m +CONFIG_MTD_NAND_IDS=m +CONFIG_MTD_SM_COMMON=m +CONFIG_MUSB_PIO_ONLY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NET_TULIP=y +CONFIG_NOP_USB_XCEIV=m +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=8 +CONFIG_NVRAM=m +CONFIG_OUTPUT_FORMAT="elf32-i386" +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_PARIDE_EPATC8=y +CONFIG_PATA_SIS=y +CONFIG_PCCARD=m +CONFIG_PCIEPORTBUS=y +CONFIG_PERCPU_TEST=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_PMIC_ADP5520=y +CONFIG_PMIC_DA903X=y +CONFIG_PM_DEBUG=y +CONFIG_PM_RUNTIME=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PPS=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PROBE_EVENTS=y +# CONFIG_PSTORE_CONSOLE is not set +CONFIG_PSTORE_RAM=m +CONFIG_PTP_1588_CLOCK=m +CONFIG_RAPIDIO_CPS_GEN2=m +CONFIG_RAPIDIO_CPS_XX=m +CONFIG_RAPIDIO_TSI568=m +CONFIG_RAPIDIO_TSI57X=m +CONFIG_RAPIDIO_TSI721=m +CONFIG_RBTREE_TEST=m +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_FANOUT=32 +CONFIG_RCU_FAST_NO_HZ=y +# CONFIG_RCU_NOCB_CPU is not set +CONFIG_REED_SOLOMON=m +CONFIG_REGULATOR_FIXED_VOLTAGE=m +# CONFIG_REGULATOR_S2MPS11 is not set +CONFIG_REISERFS_FS=m +CONFIG_RFKILL=y +CONFIG_RT2800USB_UNKNOWN=y +# CONFIG_RT2X00_LIB_DEBUGFS is not set +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS3232=m +CONFIG_SAMSUNG_USB2PHY=m +CONFIG_SAMSUNG_USB3PHY=m +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_SATA_HIGHBANK is not set +CONFIG_SATA_SVW=m +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SERIAL_8250_PCI=y +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIO_I8042=y +CONFIG_SERIO_PARKBD=m +CONFIG_SERIO_PCIPS2=m +CONFIG_SERIO_PS2MULT=m +CONFIG_SERIO_RAW=m +CONFIG_SND=m +CONFIG_SND_COMPRESS_OFFLOAD=m +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_OPL4_LIB_SEQ=m +CONFIG_SND_PCM=m +CONFIG_SND_SBAWE_SEQ=m +CONFIG_SND_SOC=m +CONFIG_SND_SOC_I2C_AND_SPI=m +CONFIG_SND_TIMER=m +CONFIG_SOUND=m +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_SPI_PXA2XX_PCI=m +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SRAM=y +CONFIG_STACK_TRACER=y +CONFIG_STAGING=y +# CONFIG_STANDALONE is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SWIOTLB=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_SYS_HYPERVISOR=y +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TIGON3=m +CONFIG_TIMER_STATS=y +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TWL4030_CORE is not set +CONFIG_TWL6040_CORE=y +CONFIG_UIO_AEC=m +CONFIG_UIO_CIF=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_MF624=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_SERCOS3=m +CONFIG_UNUSED_SYMBOLS=y +CONFIG_USB_ADUTUX=m +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_C67X00_HCD=m +CONFIG_USB_CATC=m +CONFIG_USB_CHIPIDEA=m +CONFIG_USB_CYPRESS_CY7C63=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_DWC3=m +# CONFIG_USB_DWC3_OMAP is not set +CONFIG_USB_DYNAMIC_MINORS=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHSET_TEST_FIXTURE=m +CONFIG_USB_EMI26=m +CONFIG_USB_EMI62=m +CONFIG_USB_EZUSB_FX2=m +CONFIG_USB_FOTG210_HCD=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_FUSBH200_HCD=m +CONFIG_USB_GADGET=m +# CONFIG_USB_G_MULTI is not set +CONFIG_USB_HCD_BCMA=m +CONFIG_USB_HCD_SSB=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_IPHETH=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_ISP116X_HCD=m +CONFIG_USB_ISP1301=m +CONFIG_USB_ISP1362_HCD=m +CONFIG_USB_ISP1760_HCD=m +CONFIG_USB_KAWETH=m +CONFIG_USB_LCD=m +CONFIG_USB_LD=m +CONFIG_USB_LED=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USB_MUSB_HDRC=m +CONFIG_USB_MUSB_TUSB6010=m +CONFIG_USB_OHCI_HCD_PLATFORM=y +# CONFIG_USB_OTG is not set +CONFIG_USB_OXU210HP_HCD=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_PRINTER=m +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_RCAR_PHY=m +CONFIG_USB_RIO500=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_SERIAL=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SL811_HCD=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_SUPPORT=y +CONFIG_USB_TEST=m +CONFIG_USB_TMC=m +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_USS720=m +CONFIG_USB_WUSB_CBAF=m +CONFIG_USB_YUREX=m +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VGASTATE=m +CONFIG_VGA_CONSOLE=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_PCI=y +CONFIG_WIL6210=m +# CONFIG_WIMAX_GDM72XX_SDIO is not set +CONFIG_X86_MINIMUM_CPU_FAMILY=5 +CONFIG_X86_SPEEDSTEP_LIB=y +CONFIG_XEN=y +CONFIG_XEN_MAX_DOMAIN_MEMORY=64 +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_TEST=m +CONFIG_XZ_DEC_X86=y +# CONFIG_ZONE_DMA32 is not set +CONFIG_ZONE_DMA_FLAG=1 --- linux-3.13.0.orig/debian.master/config/i386/config.flavour.generic +++ linux-3.13.0/debian.master/config/i386/config.flavour.generic @@ -0,0 +1,10 @@ +# +# Config options for config.flavour.generic automatically generated by splitconfig.pl +# +CONFIG_HZ=250 +# CONFIG_HZ_1000 is not set +CONFIG_HZ_250=y +# CONFIG_IRQ_FORCED_THREADING_DEFAULT is not set +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_RCU is not set +CONFIG_PREEMPT_VOLUNTARY=y --- linux-3.13.0.orig/debian.master/config/i386/config.flavour.lowlatency +++ linux-3.13.0/debian.master/config/i386/config.flavour.lowlatency @@ -0,0 +1,10 @@ +# +# Config options for config.flavour.lowlatency automatically generated by splitconfig.pl +# +CONFIG_HZ=1000 +CONFIG_HZ_1000=y +# CONFIG_HZ_250 is not set +CONFIG_IRQ_FORCED_THREADING_DEFAULT=y +CONFIG_PREEMPT=y +CONFIG_PREEMPT_RCU=y +# CONFIG_PREEMPT_VOLUNTARY is not set --- linux-3.13.0.orig/debian.master/config/powerpc/config.common.powerpc +++ linux-3.13.0/debian.master/config/powerpc/config.common.powerpc @@ -0,0 +1,341 @@ +# +# Config options for config.common.powerpc automatically generated by splitconfig.pl +# +CONFIG_64BIT=y +CONFIG_AC97_BUS=m +# CONFIG_AHCI_IMX is not set +CONFIG_AIC79XX_RESET_DELAY_MS=15000 +CONFIG_APM_EMULATION=m +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ASYNC_TX_DMA is not set +CONFIG_ATA_PIIX=m +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_AUDIT_ARCH=y +# CONFIG_BATTERY_TWL4030_MADC is not set +CONFIG_BCH=m +# CONFIG_CODE_PATCHING_SELFTEST is not set +# CONFIG_COMEDI_ISA_DRIVERS is not set +CONFIG_COMPACTION=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_CPU_BIG_ENDIAN=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CPU_IDLE=y +# CONFIG_CPU_LITTLE_ENDIAN is not set +# CONFIG_CRASH_DUMP is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_DEADLINE=y +CONFIG_DEFAULT_IOSCHED="deadline" +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_EXYNOS_VIDEO=y +# CONFIG_EZX_PCAP is not set +CONFIG_FB_ATY128=y +CONFIG_FB_BACKLIGHT=y +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_DDC=y +CONFIG_FB_IBM_GXT4500=m +CONFIG_FB_MACMODES=y +CONFIG_FB_RADEON=y +CONFIG_FB_SVGALIB=m +CONFIG_FB_VOODOO1=y +# CONFIG_FONTS is not set +CONFIG_FRAME_WARN=1024 +CONFIG_FUNCTION_TRACER=y +# CONFIG_GENERIC_CSUM is not set +CONFIG_GENERIC_PHY=m +CONFIG_GENERIC_TBSYNC=y +CONFIG_GPIO_GENERIC=m +CONFIG_GPIO_TWL4030=m +CONFIG_GPIO_TWL6040=m +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_SHPC=m +CONFIG_HVCS=y +CONFIG_HVC_OLD_HVSI=y +CONFIG_HVC_UDBG=y +CONFIG_HW_RANDOM_ATMEL=m +CONFIG_HW_RANDOM_POWERNV=y +CONFIG_HZ=250 +# CONFIG_HZ_100 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ_250=y +CONFIG_I2C_ALGOBIT=y +# CONFIG_IBMEBUS is not set +CONFIG_IBMVETH=m +# CONFIG_IDE is not set +# CONFIG_IKCONFIG is not set +# CONFIG_IMA is not set +CONFIG_INFINIBAND_AMSO1100=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +CONFIG_IOMMU_SUPPORT=y +CONFIG_IPMI_SI_PROBE_DEFAULTS=y +# CONFIG_IRQ_ALL_CPUS is not set +# CONFIG_IRQ_DOMAIN_DEBUG is not set +# CONFIG_IRQ_FORCED_THREADING_DEFAULT is not set +# CONFIG_ISA is not set +CONFIG_KSM=y +CONFIG_KVM=y +CONFIG_KVM_BOOK3S_64=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LIBERTAS_MESH=y +CONFIG_LIS3L02DQ=m +CONFIG_LOG_BUF_SHIFT=17 +# CONFIG_LPARCFG is not set +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MATH_EMULATION=y +# CONFIG_MEMORY_HOTREMOVE is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_TMIO is not set +CONFIG_MII=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MPIC_MSGR=y +CONFIG_MTD=m +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CMDLINE_PARTS=m +CONFIG_MTD_NAND=m +CONFIG_MTD_NAND_BCH=m +CONFIG_MTD_NAND_ECC=m +CONFIG_MTD_NAND_IDS=m +CONFIG_MTD_OF_PARTS=m +CONFIG_MTD_SM_COMMON=m +CONFIG_MUSB_PIO_ONLY=y +CONFIG_NET_TULIP=y +CONFIG_NODES_SHIFT=8 +CONFIG_NOP_USB_XCEIV=m +CONFIG_NO_HZ_FULL=y +CONFIG_NVRAM=y +# CONFIG_PARIDE_EPATC8 is not set +CONFIG_PATA_SIS=m +CONFIG_PCCARD=m +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_SYSCALL=y +CONFIG_PERCPU_TEST=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_PHYSICAL_ALIGN=0x04000000 +CONFIG_PHYSICAL_START=0x00000000 +CONFIG_PHY_EXYNOS_DP_VIDEO=m +CONFIG_PMIC_ADP5520=y +CONFIG_PMIC_DA903X=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_RUNTIME=y +CONFIG_PM_WAKELOCKS=y +# CONFIG_POWER7_CPU is not set +CONFIG_PPC_4K_PAGES=y +# CONFIG_PPC_64K_PAGES is not set +# CONFIG_PPC_DENORMALISATION is not set +CONFIG_PPC_DISABLE_WERROR=y +CONFIG_PPC_EPAPR_HV_BYTECHAN=y +CONFIG_PPC_ICSWX=y +CONFIG_PPC_MAPLE=y +CONFIG_PPC_PASEMI=y +CONFIG_PPC_PMAC=y +CONFIG_PPC_PS3=y +# CONFIG_PPC_SMLPAR is not set +# CONFIG_PPC_SPLPAR is not set +CONFIG_PPS=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_RCU is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_PROBE_EVENTS=y +CONFIG_PSERIES_ENERGY=m +# CONFIG_PSTORE_CONSOLE is not set +CONFIG_PSTORE_RAM=m +CONFIG_PTP_1588_CLOCK=m +CONFIG_RAPIDIO_CPS_GEN2=y +CONFIG_RAPIDIO_CPS_XX=y +CONFIG_RAPIDIO_TSI568=y +CONFIG_RAPIDIO_TSI57X=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RBTREE_TEST=m +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_USER_QS=y +CONFIG_REED_SOLOMON=m +CONFIG_REGULATOR_FIXED_VOLTAGE=m +CONFIG_REGULATOR_S2MPS11=m +CONFIG_REISERFS_FS=m +CONFIG_RFKILL=y +CONFIG_RT2800USB_UNKNOWN=y +# CONFIG_RT2X00_LIB_DEBUGFS is not set +# CONFIG_RTAS_FLASH is not set +CONFIG_RTC_DRV_CMOS=m +CONFIG_SAMSUNG_USB2PHY=m +CONFIG_SAMSUNG_USB3PHY=m +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_SATA_HIGHBANK is not set +CONFIG_SATA_SVW=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCSI_SPI_ATTRS=m +# CONFIG_SENSORS_SCH56XX_COMMON is not set +CONFIG_SERIAL_8250_PCI=m +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_SERIAL_OF_PLATFORM=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_APBPS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIO_I8042=y +CONFIG_SERIO_OLPC_APSP=m +CONFIG_SERIO_PARKBD=m +CONFIG_SERIO_PCIPS2=m +CONFIG_SERIO_PS2MULT=m +CONFIG_SERIO_RAW=m +CONFIG_SERIO_XILINX_XPS_PS2=m +CONFIG_SIMPLE_GPIO=y +CONFIG_SND=m +CONFIG_SND_COMPRESS_OFFLOAD=m +CONFIG_SND_EMU10K1_SEQ=m +CONFIG_SND_OPL3_LIB_SEQ=m +# CONFIG_SND_OPL4_LIB_SEQ is not set +CONFIG_SND_PCM=m +# CONFIG_SND_SBAWE_SEQ is not set +CONFIG_SND_SOC=m +CONFIG_SND_SOC_FSL_SSI=m +CONFIG_SND_SOC_I2C_AND_SPI=m +CONFIG_SND_TIMER=m +CONFIG_SOUND=m +CONFIG_SPAPR_TCE_IOMMU=y +CONFIG_SPI_PXA2XX_PCI=m +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y +# CONFIG_SQUASHFS_DECOMP_SINGLE is not set +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SRAM=y +CONFIG_STACK_TRACER=y +CONFIG_STAGING=y +CONFIG_STANDALONE=y +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_SWIOTLB=y +CONFIG_SYSCTL_SYSCALL=y +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_TCG_IBMVTPM=m +CONFIG_TEST_KSTRTOX=m +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TIGON3=m +CONFIG_TIMER_STATS=y +CONFIG_TWL4030_CORE=y +CONFIG_TWL6040_CORE=y +CONFIG_UDBG_RTAS_CONSOLE=y +CONFIG_UIO_AEC=m +CONFIG_UIO_CIF=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_MF624=m +CONFIG_UIO_NETX=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_SERCOS3=m +CONFIG_UNUSED_SYMBOLS=y +CONFIG_USB_ADUTUX=m +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_C67X00_HCD=m +CONFIG_USB_CATC=m +CONFIG_USB_CHIPIDEA=m +CONFIG_USB_CYPRESS_CY7C63=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_DWC3=m +# CONFIG_USB_DWC3_OMAP is not set +CONFIG_USB_DYNAMIC_MINORS=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_HCD_PPC_OF=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHSET_TEST_FIXTURE=m +CONFIG_USB_EMI26=m +CONFIG_USB_EMI62=m +CONFIG_USB_EZUSB_FX2=m +CONFIG_USB_FOTG210_HCD=m +CONFIG_USB_FSL_USB2=m +CONFIG_USB_FTDI_ELAN=m +CONFIG_USB_FUSBH200_HCD=m +CONFIG_USB_GADGET=m +# CONFIG_USB_G_MULTI is not set +CONFIG_USB_HCD_BCMA=m +CONFIG_USB_HCD_SSB=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_IPHETH=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_ISP116X_HCD=m +CONFIG_USB_ISP1301=m +CONFIG_USB_ISP1362_HCD=m +CONFIG_USB_ISP1760_HCD=m +CONFIG_USB_KAWETH=m +CONFIG_USB_LCD=m +CONFIG_USB_LD=m +CONFIG_USB_LED=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USB_MUSB_DSPS=m +CONFIG_USB_MUSB_HDRC=m +CONFIG_USB_MUSB_TUSB6010=m +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD_PPC_OF=y +CONFIG_USB_OHCI_HCD_PPC_OF_BE=y +CONFIG_USB_OXU210HP_HCD=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_PRINTER=m +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_RCAR_PHY=m +CONFIG_USB_RIO500=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_SERIAL=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_SL811_HCD=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_SUPPORT=y +CONFIG_USB_TEST=m +CONFIG_USB_TMC=m +CONFIG_USB_TRANCEVIBRATOR=m +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_USS720=m +CONFIG_USB_WUSB_CBAF=m +CONFIG_USB_YUREX=m +# CONFIG_VFIO_IOMMU_TYPE1 is not set +CONFIG_VGASTATE=m +CONFIG_VGA_CONSOLE=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=m +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_WIL6210=m +# CONFIG_WIMAX_GDM72XX_SDIO is not set +# CONFIG_XMON is not set +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_TEST=m +CONFIG_XZ_DEC_X86=y +CONFIG_ZONE_DMA_FLAG=1 --- linux-3.13.0.orig/debian.master/config/powerpc/config.flavour.powerpc-e500 +++ linux-3.13.0/debian.master/config/powerpc/config.flavour.powerpc-e500 @@ -0,0 +1,85 @@ +# +# Config options for config.flavour.powerpc-e500 automatically generated by splitconfig.pl +# +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +# CONFIG_ARCH_RANDOM is not set +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_BSC9131_RDB=y +CONFIG_DEFAULT_UIMAGE=y +# CONFIG_EPAPR_BOOT is not set +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_UDL=m +CONFIG_FMAN_P1023=y +# CONFIG_FMAN_P3040_P4080_P5020 is not set +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_FSL_FMAN=y +# CONFIG_FSL_QMAN_FQ_LOOKUP is not set +# CONFIG_FSL_RIO is not set +CONFIG_FSL_ULI1575=y +CONFIG_GE_FPGA=y +CONFIG_GE_IMP3A=y +CONFIG_HAS_FSL_QBMAN=y +CONFIG_HAS_RAPIDIO=y +# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set +# CONFIG_HIBERNATION is not set +CONFIG_HUGETLB_PAGE=y +CONFIG_KERNEL_START=0xc0000000 +CONFIG_KSI8560=y +CONFIG_MMIO_NVRAM=y +CONFIG_MPC8536_DS=y +CONFIG_MPC8540_ADS=y +CONFIG_MPC8560_ADS=y +CONFIG_MPC85xx_CDS=y +CONFIG_MPC85xx_DS=y +CONFIG_MPC85xx_MDS=y +CONFIG_MPC85xx_RDB=y +# CONFIG_MPIC_U3_HT_IRQS is not set +# CONFIG_NEED_DMA_MAP_STATE is not set +# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set +# CONFIG_NONSTATIC_KERNEL is not set +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=8 +CONFIG_P1010_RDB=y +CONFIG_P1022_DS=y +CONFIG_P1022_RDK=y +CONFIG_P1023_RDS=y +CONFIG_PAGE_OFFSET=0xc0000000 +CONFIG_PHYS_ADDR_T_64BIT=y +# CONFIG_PPC64 is not set +CONFIG_PPC_85xx=y +# CONFIG_PPC_970_NAP is not set +# CONFIG_PPC_BOOK3S_32 is not set +# CONFIG_PPC_CELL is not set +CONFIG_PPC_DOORBELL=y +CONFIG_PPC_EPAPR_HV_PIC=y +CONFIG_PPC_I8259=y +# CONFIG_PPC_ICP_HV is not set +# CONFIG_PPC_ICP_NATIVE is not set +# CONFIG_PPC_ICS_RTAS is not set +CONFIG_PPC_INDIRECT_PCI=y +# CONFIG_PPC_MM_SLICES is not set +# CONFIG_PPC_MPC106 is not set +# CONFIG_PPC_P7_NAP is not set +# CONFIG_PPC_RTAS is not set +# CONFIG_PPC_WSP is not set +# CONFIG_PPC_XICS is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_NOCB_CPU is not set +CONFIG_RTC_DRV_DS3232=m +CONFIG_SBC8548=y +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_SOCRATES=y +CONFIG_STX_GP3=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TQM8540=y +CONFIG_TQM8541=y +CONFIG_TQM8548=y +CONFIG_TQM8555=y +CONFIG_TQM8560=y +CONFIG_USB_OTG=y +CONFIG_WORD_SIZE=32 +CONFIG_XES_MPC85xx=y --- linux-3.13.0.orig/debian.master/config/powerpc/config.flavour.powerpc-e500mc +++ linux-3.13.0/debian.master/config/powerpc/config.flavour.powerpc-e500mc @@ -0,0 +1,85 @@ +# +# Config options for config.flavour.powerpc-e500mc automatically generated by splitconfig.pl +# +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +# CONFIG_ARCH_RANDOM is not set +# CONFIG_ATA_NONSTANDARD is not set +# CONFIG_BSC9131_RDB is not set +CONFIG_DEFAULT_UIMAGE=y +# CONFIG_EPAPR_BOOT is not set +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_SYS_IMAGEBLIT=y +CONFIG_FB_UDL=y +# CONFIG_FMAN_P1023 is not set +CONFIG_FMAN_P3040_P4080_P5020=y +CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_FSL_FMAN=y +# CONFIG_FSL_QMAN_FQ_LOOKUP is not set +CONFIG_FSL_RIO=y +# CONFIG_FSL_ULI1575 is not set +# CONFIG_GE_FPGA is not set +# CONFIG_GE_IMP3A is not set +CONFIG_HAS_FSL_QBMAN=y +CONFIG_HAS_RAPIDIO=y +# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set +# CONFIG_HIBERNATION is not set +CONFIG_HUGETLB_PAGE=y +CONFIG_KERNEL_START=0xc0000000 +# CONFIG_KSI8560 is not set +# CONFIG_MMIO_NVRAM is not set +# CONFIG_MPC8536_DS is not set +# CONFIG_MPC8540_ADS is not set +# CONFIG_MPC8560_ADS is not set +# CONFIG_MPC85xx_CDS is not set +# CONFIG_MPC85xx_DS is not set +# CONFIG_MPC85xx_MDS is not set +# CONFIG_MPC85xx_RDB is not set +# CONFIG_MPIC_U3_HT_IRQS is not set +# CONFIG_NEED_DMA_MAP_STATE is not set +# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set +# CONFIG_NONSTATIC_KERNEL is not set +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=8 +# CONFIG_P1010_RDB is not set +# CONFIG_P1022_DS is not set +# CONFIG_P1022_RDK is not set +# CONFIG_P1023_RDS is not set +CONFIG_PAGE_OFFSET=0xc0000000 +CONFIG_PHYS_ADDR_T_64BIT=y +# CONFIG_PPC64 is not set +CONFIG_PPC_85xx=y +# CONFIG_PPC_970_NAP is not set +# CONFIG_PPC_BOOK3S_32 is not set +# CONFIG_PPC_CELL is not set +CONFIG_PPC_DOORBELL=y +CONFIG_PPC_EPAPR_HV_PIC=y +# CONFIG_PPC_I8259 is not set +# CONFIG_PPC_ICP_HV is not set +# CONFIG_PPC_ICP_NATIVE is not set +# CONFIG_PPC_ICS_RTAS is not set +CONFIG_PPC_INDIRECT_PCI=y +# CONFIG_PPC_MM_SLICES is not set +# CONFIG_PPC_MPC106 is not set +# CONFIG_PPC_P7_NAP is not set +# CONFIG_PPC_RTAS is not set +# CONFIG_PPC_WSP is not set +# CONFIG_PPC_XICS is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_NOCB_CPU is not set +CONFIG_RTC_DRV_DS3232=y +# CONFIG_SBC8548 is not set +CONFIG_SCSI_SRP_ATTRS=m +# CONFIG_SOCRATES is not set +# CONFIG_STX_GP3 is not set +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_TQM8540 is not set +# CONFIG_TQM8541 is not set +# CONFIG_TQM8548 is not set +# CONFIG_TQM8555 is not set +# CONFIG_TQM8560 is not set +CONFIG_USB_OTG=y +CONFIG_WORD_SIZE=32 +# CONFIG_XES_MPC85xx is not set --- linux-3.13.0.orig/debian.master/config/powerpc/config.flavour.powerpc-smp +++ linux-3.13.0/debian.master/config/powerpc/config.flavour.powerpc-smp @@ -0,0 +1,59 @@ +# +# Config options for config.flavour.powerpc-smp automatically generated by splitconfig.pl +# +# CONFIG_ARCH_DMA_ADDR_T_64BIT is not set +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set +# CONFIG_ARCH_RANDOM is not set +# CONFIG_ATA_NONSTANDARD is not set +# CONFIG_DEFAULT_UIMAGE is not set +# CONFIG_EPAPR_BOOT is not set +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_UDL=m +CONFIG_FORCE_MAX_ZONEORDER=11 +# CONFIG_FSL_FMAN is not set +# CONFIG_FSL_ULI1575 is not set +# CONFIG_GE_FPGA is not set +# CONFIG_HAS_FSL_QBMAN is not set +# CONFIG_HAS_RAPIDIO is not set +# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set +CONFIG_HIBERNATION=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_KERNEL_START=0xc0000000 +# CONFIG_MMIO_NVRAM is not set +# CONFIG_MPIC_U3_HT_IRQS is not set +# CONFIG_NEED_DMA_MAP_STATE is not set +# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set +# CONFIG_NONSTATIC_KERNEL is not set +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=4 +CONFIG_PAGE_OFFSET=0xc0000000 +# CONFIG_PHYS_ADDR_T_64BIT is not set +# CONFIG_PPC64 is not set +# CONFIG_PPC_85xx is not set +# CONFIG_PPC_970_NAP is not set +CONFIG_PPC_BOOK3S_32=y +# CONFIG_PPC_CELL is not set +# CONFIG_PPC_DOORBELL is not set +# CONFIG_PPC_EPAPR_HV_PIC is not set +CONFIG_PPC_I8259=y +# CONFIG_PPC_ICP_HV is not set +# CONFIG_PPC_ICP_NATIVE is not set +# CONFIG_PPC_ICS_RTAS is not set +CONFIG_PPC_INDIRECT_PCI=y +# CONFIG_PPC_MM_SLICES is not set +CONFIG_PPC_MPC106=y +CONFIG_PPC_OF_BOOT_TRAMPOLINE=y +# CONFIG_PPC_P7_NAP is not set +CONFIG_PPC_RTAS=y +# CONFIG_PPC_WSP is not set +# CONFIG_PPC_XICS is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_NOCB_CPU is not set +CONFIG_RTC_DRV_DS3232=m +CONFIG_SCSI_SRP_ATTRS=m +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_USB_OTG is not set +CONFIG_WORD_SIZE=32 --- linux-3.13.0.orig/debian.master/config/powerpc/config.flavour.powerpc64-emb +++ linux-3.13.0/debian.master/config/powerpc/config.flavour.powerpc64-emb @@ -0,0 +1,67 @@ +# +# Config options for config.flavour.powerpc64-emb automatically generated by splitconfig.pl +# +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +# CONFIG_ARCH_RANDOM is not set +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_DEFAULT_UIMAGE=y +CONFIG_EPAPR_BOOT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_SYS_IMAGEBLIT=y +CONFIG_FB_UDL=y +CONFIG_FORCE_MAX_ZONEORDER=13 +# CONFIG_FSL_FMAN is not set +CONFIG_FSL_QMAN_FQ_LOOKUP=y +CONFIG_FSL_RIO=y +# CONFIG_FSL_ULI1575 is not set +# CONFIG_GE_FPGA is not set +CONFIG_HAS_FSL_QBMAN=y +CONFIG_HAS_RAPIDIO=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +# CONFIG_HIBERNATION is not set +CONFIG_HUGETLB_PAGE=y +CONFIG_KERNEL_START=0xc000000000000000 +# CONFIG_MMIO_NVRAM is not set +# CONFIG_MPIC_U3_HT_IRQS is not set +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NONSTATIC_KERNEL=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=8 +# CONFIG_NUMA is not set +CONFIG_PAGE_OFFSET=0xc000000000000000 +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_PPC64=y +# CONFIG_PPC_970_NAP is not set +CONFIG_PPC_BOOK3E_64=y +# CONFIG_PPC_BOOK3S_64 is not set +# CONFIG_PPC_CELL is not set +CONFIG_PPC_DOORBELL=y +CONFIG_PPC_EPAPR_HV_PIC=y +# CONFIG_PPC_HAS_HASH_64K is not set +# CONFIG_PPC_I8259 is not set +# CONFIG_PPC_ICP_HV is not set +CONFIG_PPC_ICP_NATIVE=y +# CONFIG_PPC_ICS_RTAS is not set +CONFIG_PPC_INDIRECT_PCI=y +# CONFIG_PPC_MM_SLICES is not set +# CONFIG_PPC_MPC106 is not set +# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set +# CONFIG_PPC_P7_NAP is not set +# CONFIG_PPC_RTAS is not set +CONFIG_PPC_WSP=y +CONFIG_PPC_XICS=y +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_NOCB_CPU is not set +CONFIG_RTC_DRV_DS3232=y +# CONFIG_SCHED_SMT is not set +# CONFIG_SCOM_DEBUGFS is not set +CONFIG_SCSI_SRP_ATTRS=m +# CONFIG_SPARSEMEM_MANUAL is not set +# CONFIG_TICK_CPU_ACCOUNTING is not set +# CONFIG_U3_DART is not set +CONFIG_USB_OTG=y +CONFIG_WORD_SIZE=64 --- linux-3.13.0.orig/debian.master/config/powerpc/config.flavour.powerpc64-smp +++ linux-3.13.0/debian.master/config/powerpc/config.flavour.powerpc64-smp @@ -0,0 +1,64 @@ +# +# Config options for config.flavour.powerpc64-smp automatically generated by splitconfig.pl +# +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_RANDOM=y +CONFIG_ATA_NONSTANDARD=y +# CONFIG_DEFAULT_UIMAGE is not set +CONFIG_EPAPR_BOOT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_SYS_IMAGEBLIT=y +CONFIG_FB_UDL=m +CONFIG_FORCE_MAX_ZONEORDER=13 +# CONFIG_FSL_FMAN is not set +# CONFIG_FSL_ULI1575 is not set +# CONFIG_GE_FPGA is not set +# CONFIG_HAS_FSL_QBMAN is not set +# CONFIG_HAS_RAPIDIO is not set +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_HIBERNATION=y +CONFIG_HUGETLB_PAGE=y +CONFIG_KERNEL_START=0xc000000000000000 +CONFIG_MMIO_NVRAM=y +CONFIG_MPIC_U3_HT_IRQS=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NONSTATIC_KERNEL=y +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NR_CPUS=1024 +CONFIG_NUMA=y +CONFIG_PAGE_OFFSET=0xc000000000000000 +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_PPC64=y +CONFIG_PPC_970_NAP=y +# CONFIG_PPC_BOOK3E_64 is not set +CONFIG_PPC_BOOK3S_64=y +CONFIG_PPC_CELL=y +CONFIG_PPC_DOORBELL=y +# CONFIG_PPC_EPAPR_HV_PIC is not set +CONFIG_PPC_HAS_HASH_64K=y +CONFIG_PPC_I8259=y +CONFIG_PPC_ICP_HV=y +CONFIG_PPC_ICP_NATIVE=y +CONFIG_PPC_ICS_RTAS=y +# CONFIG_PPC_INDIRECT_PCI is not set +CONFIG_PPC_MM_SLICES=y +# CONFIG_PPC_MPC106 is not set +CONFIG_PPC_OF_BOOT_TRAMPOLINE=y +CONFIG_PPC_P7_NAP=y +CONFIG_PPC_RTAS=y +# CONFIG_PPC_WSP is not set +CONFIG_PPC_XICS=y +CONFIG_RCU_FANOUT=64 +CONFIG_RCU_NOCB_CPU=y +CONFIG_RTC_DRV_DS3232=m +CONFIG_SCHED_SMT=y +CONFIG_SCOM_DEBUGFS=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_U3_DART=y +# CONFIG_USB_OTG is not set +CONFIG_WORD_SIZE=64 --- linux-3.13.0.orig/debian.master/config/ppc64el/config.common.ppc64el +++ linux-3.13.0/debian.master/config/ppc64el/config.common.ppc64el @@ -0,0 +1,371 @@ +# +# Config options for config.common.ppc64el automatically generated by splitconfig.pl +# +CONFIG_64BIT=y +# CONFIG_AHCI_IMX is not set +CONFIG_AIC79XX_RESET_DELAY_MS=5000 +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set +CONFIG_ARCH_PHYS_ADDR_T_64BIT=y +CONFIG_ARCH_RANDOM=y +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_ATA_NONSTANDARD is not set +CONFIG_ATA_PIIX=m +# CONFIG_ATOMIC64_SELFTEST is not set +CONFIG_AUDIT_ARCH=y +CONFIG_BCH=m +CONFIG_CODE_PATCHING_SELFTEST=y +CONFIG_COMPACTION=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CRASH_DUMP=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_IOSCHED="cfq" +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +# CONFIG_DEFAULT_UIMAGE is not set +CONFIG_EPAPR_BOOT=y +# CONFIG_EXYNOS_VIDEO is not set +# CONFIG_EZX_PCAP is not set +CONFIG_FB_ATY128=m +CONFIG_FB_BACKLIGHT=y +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_DDC=m +CONFIG_FB_IBM_GXT4500=y +CONFIG_FB_MACMODES=y +CONFIG_FB_RADEON=m +CONFIG_FB_SVGALIB=m +CONFIG_FB_SYS_COPYAREA=m +CONFIG_FB_SYS_FILLRECT=m +CONFIG_FB_SYS_FOPS=m +CONFIG_FB_SYS_IMAGEBLIT=m +CONFIG_FB_UDL=m +CONFIG_FB_VOODOO1=m +# CONFIG_FMAN_P1023 is not set +CONFIG_FMAN_P3040_P4080_P5020=y +# CONFIG_FONTS is not set +CONFIG_FORCE_MAX_ZONEORDER=9 +CONFIG_FRAME_WARN=2048 +CONFIG_FSL_FMAN=y +# CONFIG_FSL_ULI1575 is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_GENERIC_CSUM=y +# CONFIG_GENERIC_PHY is not set +# CONFIG_GENERIC_TBSYNC is not set +# CONFIG_GE_FPGA is not set +CONFIG_GPIO_GENERIC=m +# CONFIG_HAS_FSL_QBMAN is not set +# CONFIG_HAS_RAPIDIO is not set +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +# CONFIG_HAVE_AOUT is not set +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +# CONFIG_HIBERNATION is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set +CONFIG_HUGETLB_PAGE=y +CONFIG_HVCS=m +# CONFIG_HVC_OLD_HVSI is not set +# CONFIG_HVC_UDBG is not set +CONFIG_HW_RANDOM_POWERNV=m +CONFIG_HZ=100 +CONFIG_HZ_100=y +# CONFIG_HZ_1000 is not set +# CONFIG_HZ_250 is not set +CONFIG_I2C_ALGOBIT=m +CONFIG_IBMEBUS=y +CONFIG_IBMVETH=y +CONFIG_IDE=y +CONFIG_IKCONFIG=y +CONFIG_IMA=y +# CONFIG_INFINIBAND_AMSO1100 is not set +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m +CONFIG_IOMMU_SUPPORT=y +CONFIG_IPMI_SI_PROBE_DEFAULTS=y +CONFIG_IRQ_ALL_CPUS=y +CONFIG_IRQ_DOMAIN_DEBUG=y +# CONFIG_IRQ_FORCED_THREADING_DEFAULT is not set +CONFIG_KERNEL_START=0xc000000000000000 +CONFIG_KSM=y +# CONFIG_KVM_BOOK3S_64 is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LIBERTAS_MESH=y +CONFIG_LOG_BUF_SHIFT=17 +CONFIG_LPARCFG=y +# CONFIG_MACINTOSH_DRIVERS is not set +CONFIG_MEMORY_HOTREMOVE=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_TMIO is not set +CONFIG_MII=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMIO_NVRAM is not set +# CONFIG_MPIC_MSGR is not set +# CONFIG_MPIC_U3_HT_IRQS is not set +CONFIG_MTD=m +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CMDLINE_PARTS=m +CONFIG_MTD_NAND=m +CONFIG_MTD_NAND_BCH=m +CONFIG_MTD_NAND_ECC=m +CONFIG_MTD_NAND_IDS=m +CONFIG_MTD_OF_PARTS=m +CONFIG_MTD_SM_COMMON=m +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +# CONFIG_NET_TULIP is not set +CONFIG_NODES_SHIFT=8 +CONFIG_NONSTATIC_KERNEL=y +CONFIG_NOP_USB_XCEIV=m +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=2048 +CONFIG_NUMA=y +CONFIG_PAGE_OFFSET=0xc000000000000000 +CONFIG_PARIDE_EPATC8=y +CONFIG_PATA_SIS=m +# CONFIG_PCCARD is not set +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCI_SYSCALL=y +# CONFIG_PERCPU_TEST is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +CONFIG_PHYSICAL_START=0x00000000 +CONFIG_PHYS_ADDR_T_64BIT=y +# CONFIG_PHY_EXYNOS_DP_VIDEO is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PM_DEBUG is not set +# CONFIG_PM_RUNTIME is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_POWER7_CPU=y +CONFIG_PPC64=y +# CONFIG_PPC_4K_PAGES is not set +CONFIG_PPC_64K_PAGES=y +# CONFIG_PPC_970_NAP is not set +# CONFIG_PPC_BOOK3E_64 is not set +CONFIG_PPC_BOOK3S_64=y +# CONFIG_PPC_CELL is not set +CONFIG_PPC_DENORMALISATION=y +# CONFIG_PPC_DISABLE_WERROR is not set +CONFIG_PPC_DOORBELL=y +# CONFIG_PPC_EPAPR_HV_BYTECHAN is not set +# CONFIG_PPC_EPAPR_HV_PIC is not set +CONFIG_PPC_HAS_HASH_64K=y +CONFIG_PPC_I8259=y +CONFIG_PPC_ICP_HV=y +CONFIG_PPC_ICP_NATIVE=y +# CONFIG_PPC_ICSWX is not set +CONFIG_PPC_ICS_RTAS=y +# CONFIG_PPC_INDIRECT_PCI is not set +# CONFIG_PPC_MAPLE is not set +CONFIG_PPC_MM_SLICES=y +# CONFIG_PPC_MPC106 is not set +CONFIG_PPC_OF_BOOT_TRAMPOLINE=y +CONFIG_PPC_P7_NAP=y +# CONFIG_PPC_PASEMI is not set +# CONFIG_PPC_PMAC is not set +# CONFIG_PPC_PS3 is not set +CONFIG_PPC_RTAS=y +CONFIG_PPC_SMLPAR=y +CONFIG_PPC_SPLPAR=y +# CONFIG_PPC_WSP is not set +CONFIG_PPC_XICS=y +CONFIG_PPS=y +# CONFIG_PPS_CLIENT_GPIO is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_PARPORT is not set +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_RCU is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_PROBE_EVENTS=y +CONFIG_PSERIES_ENERGY=y +# CONFIG_PSTORE_CONSOLE is not set +CONFIG_PSTORE_RAM=m +CONFIG_PTP_1588_CLOCK=y +CONFIG_RAPIDIO_CPS_GEN2=m +CONFIG_RAPIDIO_CPS_XX=m +CONFIG_RAPIDIO_TSI568=m +CONFIG_RAPIDIO_TSI57X=m +# CONFIG_RBTREE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +CONFIG_RCU_FANOUT=64 +# CONFIG_RCU_FAST_NO_HZ is not set +# CONFIG_RCU_NOCB_CPU is not set +# CONFIG_RCU_USER_QS is not set +CONFIG_REED_SOLOMON=m +CONFIG_REGULATOR_FIXED_VOLTAGE=m +# CONFIG_REGULATOR_S2MPS11 is not set +CONFIG_REISERFS_FS=y +# CONFIG_RFKILL is not set +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2X00_LIB_DEBUGFS=y +CONFIG_RTAS_FLASH=m +CONFIG_RTC_DRV_CMOS=m +CONFIG_RTC_DRV_DS3232=m +# CONFIG_SAMSUNG_USB2PHY is not set +# CONFIG_SAMSUNG_USB3PHY is not set +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_SATA_HIGHBANK is not set +CONFIG_SATA_SVW=m +# CONFIG_SCHED_AUTOGROUP is not set +CONFIG_SCHED_SMT=y +CONFIG_SCOM_DEBUGFS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_SRP_ATTRS=y +# CONFIG_SENSORS_SCH56XX_COMMON is not set +CONFIG_SERIAL_8250_PCI=y +# CONFIG_SERIAL_FSL_LPUART is not set +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_ARC_PS2 is not set +CONFIG_SERIO_I8042=y +# CONFIG_SERIO_OLPC_APSP is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_XILINX_XPS_PS2 is not set +# CONFIG_SIMPLE_GPIO is not set +# CONFIG_SOUND is not set +# CONFIG_SPAPR_TCE_IOMMU is not set +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPI_PXA2XX_PCI=m +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_DECOMP_SINGLE=y +CONFIG_SQUASHFS_FILE_CACHE=y +# CONFIG_SQUASHFS_FILE_DIRECT is not set +# CONFIG_SRAM is not set +CONFIG_STACK_TRACER=y +# CONFIG_STAGING is not set +CONFIG_STANDALONE=y +# CONFIG_SUNRPC_DEBUG is not set +# CONFIG_SWIOTLB is not set +# CONFIG_SYSCTL_SYSCALL is not set +# CONFIG_SYS_HYPERVISOR is not set +CONFIG_TCG_IBMVTPM=y +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TICK_CPU_ACCOUNTING is not set +CONFIG_TIGON3=y +# CONFIG_TIMER_STATS is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_U3_DART is not set +# CONFIG_UDBG_RTAS_CONSOLE is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PCI_GENERIC is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_SERCOS3 is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_CATC is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_EHCI_HCD_PPC_OF is not set +# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_FUSBH200_HCD is not set +# CONFIG_USB_GADGET is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_SSB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_OHCI_HCD_PPC_OF is not set +# CONFIG_USB_OHCI_HCD_PPC_OF_BE is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_RCAR_PHY is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_SERIAL is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_SL811_HCD is not set +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_USBAT is not set +CONFIG_USB_SUPPORT=y +# CONFIG_USB_TEST is not set +# CONFIG_USB_TMC is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_USS720 is not set +# CONFIG_USB_WUSB_CBAF is not set +# CONFIG_USB_YUREX is not set +CONFIG_VGASTATE=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_VIRTIO_MMIO is not set +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +CONFIG_WIL6210=m +CONFIG_WORD_SIZE=64 +CONFIG_XMON=y +# CONFIG_XZ_DEC_ARM is not set +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_IA64 is not set +# CONFIG_XZ_DEC_SPARC is not set +# CONFIG_XZ_DEC_TEST is not set +# CONFIG_XZ_DEC_X86 is not set +CONFIG_ZONE_DMA_FLAG=1 --- linux-3.13.0.orig/debian.master/config/ppc64el/config.flavour.generic +++ linux-3.13.0/debian.master/config/ppc64el/config.flavour.generic @@ -0,0 +1,3 @@ +# +# Config options for config.flavour.generic automatically generated by splitconfig.pl +# --- linux-3.13.0.orig/debian.master/config/x32/config.common.x32 +++ linux-3.13.0/debian.master/config/x32/config.common.x32 @@ -0,0 +1 @@ +# nothing here yet --- linux-3.13.0.orig/debian.master/config/x32/config.flavour.generic +++ linux-3.13.0/debian.master/config/x32/config.flavour.generic @@ -0,0 +1 @@ +# nothing here yet --- linux-3.13.0.orig/debian.master/control +++ linux-3.13.0/debian.master/control @@ -0,0 +1,977 @@ +Source: linux +Section: devel +Priority: optional +Maintainer: Ubuntu Kernel Team +Standards-Version: 3.9.4.0 +Build-Depends: debhelper (>= 5), cpio, module-init-tools, kernel-wedge (>= 2.24ubuntu1), makedumpfile [amd64 i386], libelf-dev, libnewt-dev, libiberty-dev, rsync, libdw-dev, libpci-dev, dpkg (>= 1.16.0~ubuntu4), pkg-config, flex, bison, libunwind8-dev, openssl, libaudit-dev, bc, python-dev, gawk, device-tree-compiler [powerpc], u-boot-tools [powerpc], libc6-dev-ppc64 [powerpc] +Build-Depends-Indep: xmlto, docbook-utils, ghostscript, transfig, bzip2, sharutils, asciidoc +Vcs-Git: http://kernel.ubuntu.com/git-repos/ubuntu/ubuntu-trusty.git +XS-Testsuite: autopkgtest +#XS-Testsuite-Depends: gcc-4.7 binutils + +Package: linux-source-3.13.0 +Architecture: all +Section: devel +Priority: optional +Provides: linux-source, linux-source-3 +Depends: ${misc:Depends}, binutils, bzip2, coreutils | fileutils (>= 4.0) +Recommends: libc-dev, gcc, make +Suggests: libncurses-dev | ncurses-dev, kernel-package, libqt3-dev +Description: Linux kernel source for version 3.13.0 with Ubuntu patches + This package provides the source code for the Linux kernel version + 3.13.0. + . + This package is mainly meant for other packages to use, in order to build + custom flavours. + . + If you wish to use this package to create a custom Linux kernel, then it + is suggested that you investigate the package kernel-package, which has + been designed to ease the task of creating kernel image packages. + . + If you are simply trying to build third-party modules for your kernel, + you do not want this package. Install the appropriate linux-headers + package instead. + +Package: linux-doc +Architecture: all +Section: doc +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-doc-3 +Replaces: linux-doc-3 +Description: Linux kernel specific documentation for version 3.13.0 + This package provides the various documents in the 3.13.0 kernel + Documentation/ subdirectory. These document kernel subsystems, APIs, device + drivers, and so on. See + /usr/share/doc/linux-doc/00-INDEX for a list of what is + contained in each file. + +Package: linux-headers-3.13.0-54 +Architecture: all +Multi-Arch: foreign +Section: devel +Priority: optional +Depends: ${misc:Depends}, coreutils | fileutils (>= 4.0) +Description: Header files related to Linux kernel version 3.13.0 + This package provides kernel header files for version 3.13.0, for sites + that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details + +Package: linux-libc-dev +Architecture: i386 amd64 armhf arm64 x32 powerpc ppc64el +Depends: ${misc:Depends} +Conflicts: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), amd64-libs-dev (<= 1.1), linux-kernel-headers +Replaces: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), linux-kernel-headers, libdrm-dev +Provides: linux-kernel-headers +Multi-Arch: same +Description: Linux Kernel Headers for development + This package provides headers from the Linux kernel. These headers + are used by the installed headers for GNU glibc and other system + libraries. They are NOT meant to be used to build third-party modules for + your kernel. Use linux-headers-* packages for that. + +Package: linux-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Replaces: linux-tools (<= 2.6.32-16.25), linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Description: Linux kernel version specific tools for version 3.13.0 + This package provides the architecture independent parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version PGKVER. + +Package: linux-tools-3.13.0-54 +Architecture: i386 amd64 armhf arm64 powerpc ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-tools-common +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + You probabally want to install linux-tools-3.13.0-54-. + +Package: linux-cloud-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Breaks: linux-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13) +Conflicts: linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Replaces: linux-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13), linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Depends: ${misc:Depends} +Description: Linux kernel version specific cloud tools for version 3.13.0 + This package provides the architecture independent parts for kernel + version locked tools for cloud tools for version PGKVER. + +Package: linux-cloud-tools-3.13.0-54 +Architecture: i386 amd64 armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-cloud-tools-common +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud tools for version 3.13.0-54 on + 64 bit x86. + You probabally want to install linux-cloud-tools-3.13.0-54-. + + +Package: linux-image-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] | grub-ieee1275 [ppc64el] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-generic +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-generic, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-generic-dbgsym +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-generic +XC-Package-Type: udeb +Section: debian-installer +Architecture: i386 amd64 armhf arm64 ppc64el +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-generic-lpae +Architecture: armhf +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: flash-kernel [armhf] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-generic-lpae +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic LPAE processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic-lpae meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-generic-lpae +Architecture: armhf +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-generic-lpae, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic LPAE processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic-lpae meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-generic-lpae-dbgsym +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-generic-lpae +XC-Package-Type: udeb +Section: debian-installer +Architecture: armhf +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-lowlatency +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Lowlatency processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-lowlatency meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-lowlatency, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Lowlatency processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-lowlatency meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-lowlatency-dbgsym +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-lowlatency +XC-Package-Type: udeb +Section: debian-installer +Architecture: i386 amd64 +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-e500 +Description: Linux kernel image for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package contains the Linux kernel image for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500v1 and e500v2 processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500 meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-e500, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500v1 and e500v2 processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500 meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package provides kernel header files for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-e500-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package provides a kernel debug image for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-e500 +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-e500mc +Description: Linux kernel image for version 3.13.0 on 32-bit Freescale Power e500mc + This package contains the Linux kernel image for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500mc processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500mc meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-e500mc, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit Freescale Power e500mc + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500mc processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500mc meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit Freescale Power e500mc + This package provides kernel header files for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-e500mc-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit Freescale Power e500mc + This package provides a kernel debug image for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-e500mc +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: yaboot +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-smp +Description: Linux kernel image for version 3.13.0 on 32-bit PowerPC SMP + This package contains the Linux kernel image for version 3.13.0 on + 32-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-smp, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit PowerPC SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit PowerPC SMP + This package provides kernel header files for version 3.13.0 on + 32-bit PowerPC SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-smp-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit PowerPC SMP + This package provides a kernel debug image for version 3.13.0 on + 32-bit PowerPC SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-smp +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc64-emb +Description: Linux kernel image for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package contains the Linux kernel image for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP Book3E processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-emb meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc64-emb, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package contains the Linux kernel extra modules for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP Book3E processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-emb meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package provides kernel header files for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc64-emb-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package provides a kernel debug image for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc64-emb +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: yaboot +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc64-smp +Description: Linux kernel image for version 3.13.0 on 64-bit PowerPC SMP + This package contains the Linux kernel image for version 3.13.0 on + 64-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc64-smp, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64-bit PowerPC SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64-bit PowerPC SMP + This package provides kernel header files for version 3.13.0 on + 64-bit PowerPC SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc64-smp-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64-bit PowerPC SMP + This package provides a kernel debug image for version 3.13.0 on + 64-bit PowerPC SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc64-smp +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + --- linux-3.13.0.orig/debian.master/control.d/flavour-control.stub +++ linux-3.13.0/debian.master/control.d/flavour-control.stub @@ -0,0 +1,129 @@ +# Items that get replaced: +# FLAVOUR +# DESC +# ARCH +# SUPPORTED +# TARGET +# BOOTLOADER +# =PROVIDES= +# +# Items marked with =FOO= are optional +# +# This file describes the template for packages that are created for each flavour +# in debian/control.d/vars.* +# +# This file gets edited in a couple of places. See the debian/control.stub rule in +# debian/rules. PGGVER, ABINUM, and SRCPKGNAME are all converted in the +# process of creating debian/control. +# +# The flavour specific strings (ARCH, DESC, etc) are converted using values from the various +# flavour files in debian/control.d/vars.* +# +# XXX: Leave the blank line before the first package!! + +Package: linux-image-PKGVER-ABINUM-FLAVOUR +Architecture: ARCH +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, =PROVIDES= +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: BOOTLOADER +Suggests: fdutils, SRCPKGNAME-doc-PKGVER | SRCPKGNAME-source-PKGVER, SRCPKGNAME-tools, linux-headers-PKGVER-ABINUM-FLAVOUR +Description: Linux kernel image for version PKGVER on DESC + This package contains the Linux kernel image for version PKGVER on + DESC. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports SUPPORTED processors. + . + TARGET + . + You likely do not want to install this package directly. Instead, install + the linux-FLAVOUR meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-PKGVER-ABINUM-FLAVOUR +Architecture: ARCH +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-PKGVER-ABINUM-FLAVOUR, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version PKGVER on DESC + This package contains the Linux kernel extra modules for version PKGVER on + DESC. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports SUPPORTED processors. + . + TARGET + . + You likely do not want to install this package directly. Instead, install + the linux-FLAVOUR meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-PKGVER-ABINUM-FLAVOUR +Architecture: ARCH +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-PKGVER-ABINUM, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version PKGVER on DESC + This package provides kernel header files for version PKGVER on + DESC. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-PKGVER-ABINUM/debian.README.gz for details. + +Package: linux-image-PKGVER-ABINUM-FLAVOUR-dbgsym +Architecture: ARCH +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version PKGVER on DESC + This package provides a kernel debug image for version PKGVER on + DESC. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-PKGVER-ABINUM-FLAVOUR +Architecture: ARCH +Section: devel +Priority: optional +Depends: ${misc:Depends}, SRCPKGNAME-tools-PKGVER-ABINUM +Description: Linux kernel version specific tools for version PKGVER-ABINUM + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version PKGVER-ABINUM on + =HUMAN=. + +Package: linux-cloud-tools-PKGVER-ABINUM-FLAVOUR +Architecture: ARCH +Section: devel +Priority: optional +Depends: ${misc:Depends}, SRCPKGNAME-cloud-tools-PKGVER-ABINUM +Description: Linux kernel version specific cloud tools for version PKGVER-ABINUM + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version PKGVER-ABINUM on + =HUMAN=. + +Package: SRCPKGNAME-udebs-FLAVOUR +XC-Package-Type: udeb +Section: debian-installer +Architecture: ARCH +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + --- linux-3.13.0.orig/debian.master/control.d/generic.inclusion-list +++ linux-3.13.0/debian.master/control.d/generic.inclusion-list @@ -0,0 +1,171 @@ +arch/*/{crypto,kernel,oprofile} +arch/*/kvm/kvm.ko +arch/x86/kvm/kvm-intel.ko +arch/x86/kvm/kvm-amd.ko +crypto/* +drivers/acpi/* +drivers/ata/ahci_platform.ko +drivers/ata/ahci.ko +drivers/ata/acard-ahci.ko +drivers/ata/libahci.ko +drivers/block/nbd.ko +drivers/block/floppy.ko +drivers/block/cryptoloop.ko +drivers/block/rbd.ko +drivers/char/hangcheck-timer.ko +drivers/char/hw_random/virtio-rng.ko +drivers/char/ipmi/ipmi_msghandler.ko +drivers/char/lp.ko +drivers/char/nvram.ko +drivers/char/ppdev.ko +drivers/char/raw.ko +drivers/hid/hid.ko +drivers/hid/hid-hyperv.ko +drivers/hv/* +drivers/infiniband/core/ib_core.ko +drivers/infiniband/core/ib_addr.ko +drivers/infiniband/core/ib_cm.ko +drivers/infiniband/core/ib_mad.ko +drivers/infiniband/core/ib_sa.ko +drivers/infiniband/core/iw_cm.ko +drivers/infiniband/core/rdma_cm.ko +drivers/input/evbug.ko +drivers/input/gameport/gameport.ko +drivers/input/mouse/psmouse.ko +drivers/input/serio/hyperv-keyboard.ko +drivers/input/serio/serio_raw.ko +drivers/input/serio/serport.ko +drivers/input/joydev.ko +drivers/input/touchscreen/usbtouchscreen.ko +drivers/input/misc/xen-kbdfront.ko +drivers/md/* +drivers/message/fusion* +drivers/misc/vmw_balloon.ko +drivers/misc/vmw_vmci/vmw_vmci.ko +drivers/net/caif/caif_virtio.ko +drivers/net/mii.ko +drivers/net/ethernet/8390/8390.ko +drivers/net/ethernet/realtek/8139too.ko +drivers/net/ethernet/realtek/8139cp.ko +drivers/net/appletalk/ipddp.ko +drivers/net/bonding/bonding.ko +drivers/net/ppp/bsd_comp.ko +drivers/net/dummy.ko +drivers/net/ethernet/intel/e1000/e1000.ko +drivers/net/ethernet/intel/e1000e/e1000e.ko +drivers/net/eql.ko +drivers/net/ifb.ko +drivers/net/ethernet/intel/igbvf/igbvf.ko +drivers/net/ethernet/intel/ixgbevf/ixgbevf.ko +drivers/net/macvlan.ko +drivers/net/macvtap.ko +drivers/net/ethernet/8390/ne2k-pci.ko +drivers/net/netconsole.ko +drivers/net/ethernet/amd/pcnet32.ko +drivers/net/hyperv/hv_netvsc.ko +drivers/net/ppp/* +drivers/net/slip/* +drivers/net/veth.ko +drivers/net/vmxnet3/vmxnet3.ko +drivers/net/vxlan.ko +drivers/net/xen-netback/* +drivers/parport/parport.ko +drivers/parport/parport_pc.ko +drivers/phy/phy-core.ko +drivers/pps/pps_core.ko +drivers/ptp/ptp.ko +drivers/net/ethernet/dec/tulip/* +drivers/scsi/BusLogic.ko +drivers/scsi/device_handler/scsi_dh.ko +drivers/scsi/iscsi_tcp.ko +drivers/scsi/libiscsi.ko +drivers/scsi/libiscsi_tcp.ko +drivers/scsi/libsas/* +drivers/scsi/osd/osd.ko +drivers/scsi/osd/libosd.ko +drivers/scsi/qla1280.ko +drivers/scsi/raid_class.ko +drivers/scsi/scsi_transport_fc.ko +drivers/scsi/scsi_transport_iscsi.ko +drivers/scsi/scsi_transport_sas.ko +drivers/scsi/scsi_tgt.ko +drivers/scsi/vmw_pvscsi.ko +drivers/scsi/hv_storvsc.ko +drivers/scsi/virtio_scsi.ko +drivers/usb/storage/usb-storage.ko +drivers/vhost/vringh.ko +drivers/video/cirrusfb.ko +drivers/video/output.ko +drivers/video/syscopyarea.ko +drivers/video/sysfillrect.ko +drivers/video/sysimgblt.ko +drivers/video/hyperv_fb.ko +drivers/video/vga16fb.ko +drivers/video/vgastate.ko +drivers/video/xen-fbfront.ko +drivers/video/fb_sys_fops.ko +drivers/watchdog/softdog.ko +drivers/xen/* +fs/9p/* +fs/binfmt_misc.ko +fs/btrfs/* +fs/cachefiles/cachefiles.ko +fs/ceph/* +fs/configfs/* +fs/exofs/libore.ko +fs/ext*/* +fs/fat/* +fs/nls/nls_iso8859-1.ko +fs/isofs/* +fs/cifs/* +fs/xfs/* +fs/ufs/* +fs/squashfs/* +fs/nfs/* +fs/nfsd/* +fs/nfs_common/* +fs/fscache/* +fs/lockd/* +fs/autofs4/autofs4.ko +fs/overlayfs/* +fs/udf/* +lib/* +net/802/* +net/8021q/* +net/9p/* +net/appletalk/* +net/atm/* +net/ax25/* +net/bridge/* +net/can/* +net/core/* +net/ceph/libceph.ko +net/dccp/* +net/decnet/* +net/ieee802154/* +net/ipv4/* +net/ipv6/* +net/ipx/* +net/irda/* +net/key/* +net/lapb/* +net/llc/* +net/netfilter/* +net/netrom/* +net/openvswitch/* +net/phonet/* +net/rose/* +net/rxrpc/* +net/sched/* +net/sctp/* +net/tipc/* +net/vmw_vsock/* +net/x25/* +net/xfrm/* +net/sunrpc/auth_gss/auth_rpcgss.ko +net/sunrpc/auth_gss/rpcsec_gss_krb5.ko +net/sunrpc/sunrpc.ko +sound/soundcore.ko +sound/core/* +sound/pci/snd-ens1370.ko +sound/drivers/pcsp/snd-pcsp.ko --- linux-3.13.0.orig/debian.master/control.d/vars.generic +++ linux-3.13.0/debian.master/control.d/vars.generic @@ -0,0 +1,6 @@ +arch="i386 amd64 armhf arm64 ppc64el" +supported="Generic" +target="Geared toward desktop and server systems." +desc="=HUMAN= SMP" +bootloader="grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] | grub-ieee1275 [ppc64el]" +provides="kvm-api-4, redhat-cluster-modules, ivtv-modules" --- linux-3.13.0.orig/debian.master/control.d/vars.generic-lpae +++ linux-3.13.0/debian.master/control.d/vars.generic-lpae @@ -0,0 +1,6 @@ +arch="armhf" +supported="Generic LPAE" +target="Geared toward desktop and server systems." +desc="=HUMAN= SMP" +bootloader="flash-kernel [armhf]" +provides="kvm-api-4, redhat-cluster-modules, ivtv-modules" --- linux-3.13.0.orig/debian.master/control.d/vars.lowlatency +++ linux-3.13.0/debian.master/control.d/vars.lowlatency @@ -0,0 +1,6 @@ +arch="i386 amd64" +supported="Lowlatency" +target="Geared toward desktop and server systems." +desc="=HUMAN= SMP" +bootloader="grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64]" +provides="kvm-api-4, redhat-cluster-modules, ivtv-modules" --- linux-3.13.0.orig/debian.master/control.d/vars.powerpc-e500 +++ linux-3.13.0/debian.master/control.d/vars.powerpc-e500 @@ -0,0 +1,6 @@ +supported="32-bit Freescale Power e500v1 and e500v2" +target="Geared toward server systems." +desc="32-bit Freescale Power e500v1 and e500v2" +bootloader="grub-ieee1275" +provides="redhat-cluster-modules, ivtv-modules" +arch="powerpc" --- linux-3.13.0.orig/debian.master/control.d/vars.powerpc-e500mc +++ linux-3.13.0/debian.master/control.d/vars.powerpc-e500mc @@ -0,0 +1,6 @@ +supported="32-bit Freescale Power e500mc" +target="Geared toward server systems." +desc="32-bit Freescale Power e500mc" +bootloader="grub-ieee1275" +provides="redhat-cluster-modules, ivtv-modules" +arch="powerpc" --- linux-3.13.0.orig/debian.master/control.d/vars.powerpc-smp +++ linux-3.13.0/debian.master/control.d/vars.powerpc-smp @@ -0,0 +1,6 @@ +supported="32-bit PowerPC SMP" +target="Geared toward desktop or server systems." +desc="32-bit PowerPC SMP" +bootloader="yaboot" +provides="redhat-cluster-modules, ivtv-modules" +arch="powerpc" --- linux-3.13.0.orig/debian.master/control.d/vars.powerpc64-emb +++ linux-3.13.0/debian.master/control.d/vars.powerpc64-emb @@ -0,0 +1,6 @@ +supported="64-bit PowerPC SMP Book3E" +target="Geared toward desktop or server systems." +desc="64-bit PowerPC SMP Book3E" +bootloader="grub-ieee1275" +provides="redhat-cluster-modules, ivtv-modules" +arch="powerpc" --- linux-3.13.0.orig/debian.master/control.d/vars.powerpc64-smp +++ linux-3.13.0/debian.master/control.d/vars.powerpc64-smp @@ -0,0 +1,6 @@ +supported="64-bit PowerPC SMP" +target="Geared toward desktop or server systems." +desc="64-bit PowerPC SMP" +bootloader="yaboot" +provides="redhat-cluster-modules, ivtv-modules" +arch="powerpc" --- linux-3.13.0.orig/debian.master/control.stub +++ linux-3.13.0/debian.master/control.stub @@ -0,0 +1,977 @@ +Source: linux +Section: devel +Priority: optional +Maintainer: Ubuntu Kernel Team +Standards-Version: 3.9.4.0 +Build-Depends: debhelper (>= 5), cpio, module-init-tools, kernel-wedge (>= 2.24ubuntu1), makedumpfile [amd64 i386], libelf-dev, libnewt-dev, libiberty-dev, rsync, libdw-dev, libpci-dev, dpkg (>= 1.16.0~ubuntu4), pkg-config, flex, bison, libunwind8-dev, openssl, libaudit-dev, bc, python-dev, gawk, device-tree-compiler [powerpc], u-boot-tools [powerpc], libc6-dev-ppc64 [powerpc] +Build-Depends-Indep: xmlto, docbook-utils, ghostscript, transfig, bzip2, sharutils, asciidoc +Vcs-Git: http://kernel.ubuntu.com/git-repos/ubuntu/ubuntu-trusty.git +XS-Testsuite: autopkgtest +#XS-Testsuite-Depends: gcc-4.7 binutils + +Package: linux-source-3.13.0 +Architecture: all +Section: devel +Priority: optional +Provides: linux-source, linux-source-3 +Depends: ${misc:Depends}, binutils, bzip2, coreutils | fileutils (>= 4.0) +Recommends: libc-dev, gcc, make +Suggests: libncurses-dev | ncurses-dev, kernel-package, libqt3-dev +Description: Linux kernel source for version 3.13.0 with Ubuntu patches + This package provides the source code for the Linux kernel version + 3.13.0. + . + This package is mainly meant for other packages to use, in order to build + custom flavours. + . + If you wish to use this package to create a custom Linux kernel, then it + is suggested that you investigate the package kernel-package, which has + been designed to ease the task of creating kernel image packages. + . + If you are simply trying to build third-party modules for your kernel, + you do not want this package. Install the appropriate linux-headers + package instead. + +Package: linux-doc +Architecture: all +Section: doc +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-doc-3 +Replaces: linux-doc-3 +Description: Linux kernel specific documentation for version 3.13.0 + This package provides the various documents in the 3.13.0 kernel + Documentation/ subdirectory. These document kernel subsystems, APIs, device + drivers, and so on. See + /usr/share/doc/linux-doc/00-INDEX for a list of what is + contained in each file. + +Package: linux-headers-3.13.0-54 +Architecture: all +Multi-Arch: foreign +Section: devel +Priority: optional +Depends: ${misc:Depends}, coreutils | fileutils (>= 4.0) +Description: Header files related to Linux kernel version 3.13.0 + This package provides kernel header files for version 3.13.0, for sites + that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details + +Package: linux-libc-dev +Architecture: i386 amd64 armhf arm64 x32 powerpc ppc64el +Depends: ${misc:Depends} +Conflicts: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), amd64-libs-dev (<= 1.1), linux-kernel-headers +Replaces: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), linux-kernel-headers, libdrm-dev +Provides: linux-kernel-headers +Multi-Arch: same +Description: Linux Kernel Headers for development + This package provides headers from the Linux kernel. These headers + are used by the installed headers for GNU glibc and other system + libraries. They are NOT meant to be used to build third-party modules for + your kernel. Use linux-headers-* packages for that. + +Package: linux-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Replaces: linux-tools (<= 2.6.32-16.25), linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Description: Linux kernel version specific tools for version 3.13.0 + This package provides the architecture independent parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version PGKVER. + +Package: linux-tools-3.13.0-54 +Architecture: i386 amd64 armhf arm64 powerpc ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-tools-common +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + You probabally want to install linux-tools-3.13.0-54-. + +Package: linux-cloud-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Breaks: linux-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13) +Conflicts: linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Replaces: linux-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13), linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Depends: ${misc:Depends} +Description: Linux kernel version specific cloud tools for version 3.13.0 + This package provides the architecture independent parts for kernel + version locked tools for cloud tools for version PGKVER. + +Package: linux-cloud-tools-3.13.0-54 +Architecture: i386 amd64 armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-cloud-tools-common +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud tools for version 3.13.0-54 on + 64 bit x86. + You probabally want to install linux-cloud-tools-3.13.0-54-. + + +Package: linux-image-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] | grub-ieee1275 [ppc64el] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-generic +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-generic, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-generic-dbgsym +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-generic +XC-Package-Type: udeb +Section: debian-installer +Architecture: i386 amd64 armhf arm64 ppc64el +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-generic-lpae +Architecture: armhf +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: flash-kernel [armhf] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-generic-lpae +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic LPAE processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic-lpae meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-generic-lpae +Architecture: armhf +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-generic-lpae, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic LPAE processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic-lpae meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-generic-lpae-dbgsym +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-generic-lpae +XC-Package-Type: udeb +Section: debian-installer +Architecture: armhf +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-lowlatency +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Lowlatency processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-lowlatency meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-lowlatency, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Lowlatency processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-lowlatency meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-lowlatency-dbgsym +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-lowlatency +XC-Package-Type: udeb +Section: debian-installer +Architecture: i386 amd64 +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-e500 +Description: Linux kernel image for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package contains the Linux kernel image for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500v1 and e500v2 processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500 meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-e500, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500v1 and e500v2 processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500 meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package provides kernel header files for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-e500-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package provides a kernel debug image for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-e500 +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-e500mc +Description: Linux kernel image for version 3.13.0 on 32-bit Freescale Power e500mc + This package contains the Linux kernel image for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500mc processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500mc meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-e500mc, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit Freescale Power e500mc + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500mc processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500mc meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit Freescale Power e500mc + This package provides kernel header files for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-e500mc-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit Freescale Power e500mc + This package provides a kernel debug image for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-e500mc +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: yaboot +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-smp +Description: Linux kernel image for version 3.13.0 on 32-bit PowerPC SMP + This package contains the Linux kernel image for version 3.13.0 on + 32-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-smp, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit PowerPC SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit PowerPC SMP + This package provides kernel header files for version 3.13.0 on + 32-bit PowerPC SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-smp-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit PowerPC SMP + This package provides a kernel debug image for version 3.13.0 on + 32-bit PowerPC SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-smp +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc64-emb +Description: Linux kernel image for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package contains the Linux kernel image for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP Book3E processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-emb meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc64-emb, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package contains the Linux kernel extra modules for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP Book3E processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-emb meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package provides kernel header files for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc64-emb-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package provides a kernel debug image for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc64-emb +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: yaboot +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc64-smp +Description: Linux kernel image for version 3.13.0 on 64-bit PowerPC SMP + This package contains the Linux kernel image for version 3.13.0 on + 64-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc64-smp, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64-bit PowerPC SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64-bit PowerPC SMP + This package provides kernel header files for version 3.13.0 on + 64-bit PowerPC SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc64-smp-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64-bit PowerPC SMP + This package provides a kernel debug image for version 3.13.0 on + 64-bit PowerPC SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc64-smp +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + --- linux-3.13.0.orig/debian.master/control.stub.in +++ linux-3.13.0/debian.master/control.stub.in @@ -0,0 +1,121 @@ +Source: linux +Section: devel +Priority: optional +Maintainer: Ubuntu Kernel Team +Standards-Version: 3.9.4.0 +Build-Depends: debhelper (>= 5), cpio, module-init-tools, kernel-wedge (>= 2.24ubuntu1), makedumpfile [amd64 i386], libelf-dev, libnewt-dev, libiberty-dev, rsync, libdw-dev, libpci-dev, dpkg (>= 1.16.0~ubuntu4), pkg-config, flex, bison, libunwind8-dev, openssl, libaudit-dev, bc, python-dev, gawk, device-tree-compiler [powerpc], u-boot-tools [powerpc], libc6-dev-ppc64 [powerpc] +Build-Depends-Indep: xmlto, docbook-utils, ghostscript, transfig, bzip2, sharutils, asciidoc +Vcs-Git: http://kernel.ubuntu.com/git-repos/ubuntu/ubuntu-trusty.git +XS-Testsuite: autopkgtest +#XS-Testsuite-Depends: gcc-4.7 binutils + +Package: SRCPKGNAME-source-PKGVER +Architecture: all +Section: devel +Priority: optional +Provides: SRCPKGNAME-source, SRCPKGNAME-source-3 +Depends: ${misc:Depends}, binutils, bzip2, coreutils | fileutils (>= 4.0) +Recommends: libc-dev, gcc, make +Suggests: libncurses-dev | ncurses-dev, kernel-package, libqt3-dev +Description: Linux kernel source for version PKGVER with Ubuntu patches + This package provides the source code for the Linux kernel version + PKGVER. + . + This package is mainly meant for other packages to use, in order to build + custom flavours. + . + If you wish to use this package to create a custom Linux kernel, then it + is suggested that you investigate the package kernel-package, which has + been designed to ease the task of creating kernel image packages. + . + If you are simply trying to build third-party modules for your kernel, + you do not want this package. Install the appropriate linux-headers + package instead. + +Package: SRCPKGNAME-doc +Architecture: all +Section: doc +Priority: optional +Depends: ${misc:Depends} +Conflicts: SRCPKGNAME-doc-3 +Replaces: SRCPKGNAME-doc-3 +Description: Linux kernel specific documentation for version PKGVER + This package provides the various documents in the PKGVER kernel + Documentation/ subdirectory. These document kernel subsystems, APIs, device + drivers, and so on. See + /usr/share/doc/SRCPKGNAME-doc/00-INDEX for a list of what is + contained in each file. + +Package: SRCPKGNAME-headers-PKGVER-ABINUM +Architecture: all +Multi-Arch: foreign +Section: devel +Priority: optional +Depends: ${misc:Depends}, coreutils | fileutils (>= 4.0) +Description: Header files related to Linux kernel version PKGVER + This package provides kernel header files for version PKGVER, for sites + that want the latest kernel headers. Please read + /usr/share/doc/SRCPKGNAME-headers-PKGVER-ABINUM/debian.README.gz for details + +Package: SRCPKGNAME-libc-dev +Architecture: i386 amd64 armhf arm64 x32 powerpc ppc64el +Depends: ${misc:Depends} +Conflicts: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), amd64-libs-dev (<= 1.1), SRCPKGNAME-kernel-headers +Replaces: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), SRCPKGNAME-kernel-headers, libdrm-dev +Provides: SRCPKGNAME-kernel-headers +Multi-Arch: same +Description: Linux Kernel Headers for development + This package provides headers from the Linux kernel. These headers + are used by the installed headers for GNU glibc and other system + libraries. They are NOT meant to be used to build third-party modules for + your kernel. Use SRCPKGNAME-headers-* packages for that. + +Package: SRCPKGNAME-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Replaces: SRCPKGNAME-tools (<= 2.6.32-16.25), linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Description: Linux kernel version specific tools for version PKGVER + This package provides the architecture independent parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version PGKVER. + +Package: SRCPKGNAME-tools-PKGVER-ABINUM +Architecture: i386 amd64 armhf arm64 powerpc ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, SRCPKGNAME-tools-common +Description: Linux kernel version specific tools for version PKGVER-ABINUM + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version PKGVER-ABINUM on + =HUMAN=. + You probabally want to install linux-tools-PKGVER-ABINUM-. + +Package: SRCPKGNAME-cloud-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Breaks: SRCPKGNAME-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13) +Conflicts: linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Replaces: SRCPKGNAME-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13), linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Depends: ${misc:Depends} +Description: Linux kernel version specific cloud tools for version PKGVER + This package provides the architecture independent parts for kernel + version locked tools for cloud tools for version PGKVER. + +Package: SRCPKGNAME-cloud-tools-PKGVER-ABINUM +Architecture: i386 amd64 armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, SRCPKGNAME-cloud-tools-common +Description: Linux kernel version specific cloud tools for version PKGVER-ABINUM + This package provides the architecture dependant parts for kernel + version locked tools for cloud tools for version PKGVER-ABINUM on + =HUMAN=. + You probabally want to install linux-cloud-tools-PKGVER-ABINUM-. + --- linux-3.13.0.orig/debian.master/copyright +++ linux-3.13.0/debian.master/copyright @@ -0,0 +1,29 @@ +This is the Ubuntu prepackaged version of the Linux kernel. +Linux was written by Linus Torvalds +and others. + +This package was put together by the Ubuntu Kernel Team, from +sources retrieved from upstream linux git. +The sources may be found at most Linux ftp sites, including +ftp://ftp.kernel.org/pub/linux/kernel/ + +This package is currently maintained by the +Ubuntu Kernel Team + +Linux is copyrighted by Linus Torvalds and others. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 dated June, 1991. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +On Ubuntu Linux systems, the complete text of the GNU General +Public License v2 can be found in `/usr/share/common-licenses/GPL-2'. --- linux-3.13.0.orig/debian.master/d-i/exclude-firmware.armhf-generic +++ linux-3.13.0/debian.master/d-i/exclude-firmware.armhf-generic @@ -0,0 +1,2 @@ +nic-modules +scsi-modules --- linux-3.13.0.orig/debian.master/d-i/exclude-firmware.armhf-generic-lpae +++ linux-3.13.0/debian.master/d-i/exclude-firmware.armhf-generic-lpae @@ -0,0 +1,2 @@ +nic-modules +scsi-modules --- linux-3.13.0.orig/debian.master/d-i/exclude-modules.amd64-virtual +++ linux-3.13.0/debian.master/d-i/exclude-modules.amd64-virtual @@ -0,0 +1,14 @@ +pata-modules +firewire-core-modules +plip-modules +usb-modules +pcmcia-storage-modules +input-modules +nic-pcmcia-modules +pcmcia-modules +nic-usb-modules +nfs-modules +speakup-modules +ppp-modules +sata-modules +ipmi-modules --- linux-3.13.0.orig/debian.master/d-i/exclude-modules.arm64-generic +++ linux-3.13.0/debian.master/d-i/exclude-modules.arm64-generic @@ -0,0 +1,11 @@ +serial-modules +pata-modules +firewire-core-modules +floppy-modules +usb-modules +pcmcia-storage-modules +fb-modules +nic-pcmcia-modules +pcmcia-modules +nic-usb-modules +message-modules --- linux-3.13.0.orig/debian.master/d-i/exclude-modules.armhf-generic +++ linux-3.13.0/debian.master/d-i/exclude-modules.armhf-generic @@ -0,0 +1,10 @@ +serial-modules +firewire-core-modules +floppy-modules +pcmcia-storage-modules +fb-modules +nic-pcmcia-modules +pcmcia-modules +pata-modules +message-modules +virtio-modules --- linux-3.13.0.orig/debian.master/d-i/exclude-modules.armhf-generic-lpae +++ linux-3.13.0/debian.master/d-i/exclude-modules.armhf-generic-lpae @@ -0,0 +1,10 @@ +serial-modules +firewire-core-modules +floppy-modules +pcmcia-storage-modules +fb-modules +nic-pcmcia-modules +pcmcia-modules +pata-modules +message-modules +virtio-modules --- linux-3.13.0.orig/debian.master/d-i/exclude-modules.i386-virtual +++ linux-3.13.0/debian.master/d-i/exclude-modules.i386-virtual @@ -0,0 +1,14 @@ +pata-modules +firewire-core-modules +plip-modules +usb-modules +pcmcia-storage-modules +input-modules +nic-pcmcia-modules +pcmcia-modules +nic-usb-modules +nfs-modules +speakup-modules +ppp-modules +sata-modules +ipmi-modules --- linux-3.13.0.orig/debian.master/d-i/exclude-modules.powerpc +++ linux-3.13.0/debian.master/d-i/exclude-modules.powerpc @@ -0,0 +1,7 @@ +efi-modules +fb-modules +acpi-modules +serial-modules +nic-pcmcia-modules +pcmcia-modules +pcmcia-storage-modules --- linux-3.13.0.orig/debian.master/d-i/exclude-modules.ppc64el-generic +++ linux-3.13.0/debian.master/d-i/exclude-modules.ppc64el-generic @@ -0,0 +1,11 @@ +serial-modules +pata-modules +fat-modules +pcmcia-storage-modules +nic-pcmcia-modules +pcmcia-modules +firewire-core-modules +usb-modules +mouse-modules +irda-modules +speakup-modules --- linux-3.13.0.orig/debian.master/d-i/firmware/README.txt +++ linux-3.13.0/debian.master/d-i/firmware/README.txt @@ -0,0 +1,4 @@ +# +# Place the names of udeb modules into this directory that require +# runtime firmware. +# --- linux-3.13.0.orig/debian.master/d-i/firmware/kernel-image +++ linux-3.13.0/debian.master/d-i/firmware/kernel-image @@ -0,0 +1,14 @@ +device-tree/apm-mustang.dtb ? +device-tree/highbank.dtb ? +device-tree/imx6dl-wandboard.dtb ? +device-tree/imx6q-sabrelite.dtb ? +device-tree/imx6q-wandboard.dtb ? +device-tree/omap3-beagle-xm.dtb ? +device-tree/omap4-panda.dtb ? +device-tree/omap4-panda-es.dtb ? +device-tree/tegra20-harmony.dtb ? +device-tree/tegra20-paz00.dtb ? +device-tree/tegra20-seaboard.dtb ? +device-tree/tegra20-trimslice.dtb ? +device-tree/tegra20-ventana.dtb ? +device-tree/tegra20-whistler.dtb ? --- linux-3.13.0.orig/debian.master/d-i/firmware/nic-modules +++ linux-3.13.0/debian.master/d-i/firmware/nic-modules @@ -0,0 +1,14 @@ +e100/d101m_ucode.bin ? +e100/d101s_ucode.bin ? +e100/d102e_ucode.bin ? +bnx2/bnx2-mips-09-6.2.1b.fw ? +bnx2/bnx2-rv2p-06-6.0.15.fw ? +bnx2/bnx2-mips-06-6.2.3.fw ? +bnx2/bnx2-rv2p-09-6.0.17.fw ? +bnx2/bnx2-rv2p-09ax-6.0.17.fw ? +bnx2x/bnx2x-e1h-7.8.17.0.fw ? +bnx2x/bnx2x-e1-7.8.17.0.fw ? +bnx2x/bnx2x-e2-7.8.17.0.fw ? +tigon/tg3_tso5.bin ? +tigon/tg3_tso.bin ? +tigon/tg3.bin ? --- linux-3.13.0.orig/debian.master/d-i/firmware/scsi-modules +++ linux-3.13.0/debian.master/d-i/firmware/scsi-modules @@ -0,0 +1,3 @@ +qlogic/1040.bin ? +qlogic/12160.bin ? +qlogic/1280.bin ? --- linux-3.13.0.orig/debian.master/d-i/kernel-versions +++ linux-3.13.0/debian.master/d-i/kernel-versions @@ -0,0 +1,19 @@ +# arch version flavour installedname suffix bdep +amd64 3.13.0-54 generic 3.13.0-54-generic - + +i386 3.13.0-54 generic 3.13.0-54-generic - + +armhf 3.13.0-54 generic 3.13.0-54-generic - + +armhf 3.13.0-54 generic-lpae 3.13.0-54-generic-lpae - + +arm64 3.13.0-54 generic 3.13.0-54-generic - + +ppc64el 3.13.0-54 generic 3.13.0-54-generic - + +# Ports +# arch version flavour installedname suffix bdep +powerpc 3.13.0-54 powerpc-smp 3.13.0-54-powerpc-smp - +powerpc 3.13.0-54 powerpc64-smp 3.13.0-54-powerpc64-smp - +powerpc 3.13.0-54 powerpc-e500 3.13.0-54-powerpc-e500 - +powerpc 3.13.0-54 powerpc-e500mc 3.13.0-54-powerpc-e500mc - --- linux-3.13.0.orig/debian.master/d-i/kernel-versions.in +++ linux-3.13.0/debian.master/d-i/kernel-versions.in @@ -0,0 +1,19 @@ +# arch version flavour installedname suffix bdep +amd64 PKGVER-ABINUM generic PKGVER-ABINUM-generic - + +i386 PKGVER-ABINUM generic PKGVER-ABINUM-generic - + +armhf PKGVER-ABINUM generic PKGVER-ABINUM-generic - + +armhf PKGVER-ABINUM generic-lpae PKGVER-ABINUM-generic-lpae - + +arm64 PKGVER-ABINUM generic PKGVER-ABINUM-generic - + +ppc64el PKGVER-ABINUM generic PKGVER-ABINUM-generic - + +# Ports +# arch version flavour installedname suffix bdep +powerpc PKGVER-ABINUM powerpc-smp PKGVER-ABINUM-powerpc-smp - +powerpc PKGVER-ABINUM powerpc64-smp PKGVER-ABINUM-powerpc64-smp - +powerpc PKGVER-ABINUM powerpc-e500 PKGVER-ABINUM-powerpc-e500 - +powerpc PKGVER-ABINUM powerpc-e500mc PKGVER-ABINUM-powerpc-e500mc - --- linux-3.13.0.orig/debian.master/d-i/modules-powerpc/block-modules +++ linux-3.13.0/debian.master/d-i/modules-powerpc/block-modules @@ -0,0 +1,31 @@ +aoe +aten +bpck +bpck6 ? +cciss +comm +cpqarray ? +DAC960 +dstr +epat +epia +fit2 +fit3 +friq +frpw +kbic +ktti +nbd +on20 +on26 +paride +pcd +pd +pf +pg +ps3disk ? +ps3vram ? +pt +sx8 +umem +virtio_blk ? --- linux-3.13.0.orig/debian.master/d-i/modules-powerpc/message-modules +++ linux-3.13.0/debian.master/d-i/modules-powerpc/message-modules @@ -0,0 +1,13 @@ +mptbase +mptctl +mptfc +mptlan +mptsas +mptscsih +mptspi +i2o_block +i2o_bus +i2o_config ? +i2o_core +i2o_proc +i2o_scsi --- linux-3.13.0.orig/debian.master/d-i/modules-powerpc/nic-modules +++ linux-3.13.0/debian.master/d-i/modules-powerpc/nic-modules @@ -0,0 +1,152 @@ +3c359 ? +3c501 ? +3c503 ? +3c505 ? +3c507 ? +3c509 ? +3c515 ? +3c523 ? +3c527 ? +3c59x ? +8139cp ? +8139too ? +82596 ? +abyss ? +ac3200 ? +adm8211 ? +airo ? +airport ? +amd8111e ? +arc4 ? +arcnet ? +arc-rawmode ? +arc-rimi ? +arlan ? +at1700 ? +atl1 ? +atl1e ? +atl2 ? +atmel ? +atmel_pci ? +b44 ? +bcm43xx ? +bcm43xx-mac80211 ? +bmac ? +bnx2 ? +bnx2x ? +bonding ? +cassini ? +com20020 ? +com20020-pci ? +com90io ? +com90xx ? +cs89x0 ? +de2104x ? +de4x5 ? +de600 ? +de620 ? +defxx ? +depca ? +dl2k ? +dmfe ? +dummy ? +e100 ? +e1000 ? +e1000e ? +e2100 ? +eepro ? +eepro100 ? +eexpress ? +epic100 ? +eql ? +es3210 ? +eth16i ? +ewrk3 ? +fealnx ? +forcedeth ? +igb ? +hamachi ? +hermes ? +hp ? +hp100 ? +hp-plus ? +ibmtr ? +ibmveth ? +ipddp ? +ipw2100 ? +ipw2200 ? +ipw3945 ? +ixgb ? +lance ? +lanstreamer ? +lasi_82596 ? +lne390 ? +lp486e ? +mace ? +mv643xx_eth ? +myri_sbus ? +natsemi ? +ne ? +ne2 ? +ne2k-pci ? +ne3210 ? +netconsole ? +netxen_nic ? +ni5010 ? +ni52 ? +ni65 ? +niu ? +ns83820 ? +olympic ? +orinoco ? +orinoco_pci ? +orinoco_plx ? +orinoco_tmd ? +pcnet32 ? +ps3_gelic ? +r8169 ? +rate_control ? +rfc1051 ? +rfc1201 ? +rrunner ? +rt2400 ? +rt2500 ? +rt61pci ? +s2io ? +shaper ? +sis190 ? +sis900 ? +spidernet ? +skfp ? +skge ? +sk98lin ? +sky2 ? +smc9194 ? +smc-ultra ? +smc-ultra32 ? +starfire ? +strip ? +sunbmac ? +sundance ? +sungem ? +sungem_phy ? +sunhme ? +sunlance ? +sunqe ? +sunvnet ? +tg3 ? +tlan ? +tms380tr ? +tmspci ? +tulip ? +tun ? +typhoon ? +uli526x ? +via-rhine ? +via-velocity ? +virtio_net ? +wavelan ? +wd ? +winbond-840 ? +yellowfin ? +znet ? --- linux-3.13.0.orig/debian.master/d-i/modules-powerpc/scsi-modules +++ linux-3.13.0/debian.master/d-i/modules-powerpc/scsi-modules @@ -0,0 +1,118 @@ +# SCSI +raid_class ? +scsi_transport_spi ? +scsi_transport_fc ? +scsi_transport_iscsi ? +scsi_transport_sas ? +iscsi_tcp ? +libiscsi ? +amiga7xx ? +a3000 ? +a2091 ? +gvp11 ? +mvme147 ? +sgiwd93 ? +cyberstorm ? +cyberstormII ? +blz2060 ? +blz1230 ? +fastlane ? +oktagon_esp_mod ? +atari_scsi ? +mac_scsi ? +mac_esp ? +sun3_scsi ? +mvme16x ? +bvme6000 ? +sim710 ? +advansys ? +psi240i ? +BusLogic ? +dpt_i2o ? +u14-34f ? +ultrastor ? +aha152x ? +aha1542 ? +aha1740 ? +aic7xxx_old ? +ips ? +fd_mcs ? +fdomain ? +in2000 ? +g_NCR5380 ? +g_NCR5380_mmio ? +NCR53c406a ? +NCR_D700 ? +NCR_Q720_mod ? +sym53c416 ? +qlogicfas408 ? +qla1280 ? +pas16 ? +seagate ? +seagate ? +t128 ? +dmx3191d ? +dtc ? +zalon7xx ? +eata_pio ? +wd7000 ? +mca_53c9x ? +ibmmca ? +ibmvfc ? +ibmvscsic ? +eata ? +dc395x ? +tmscsim ? +megaraid ? +atp870u ? +esp ? +gdth ? +initio ? +a100u2w ? +qlogicpti ? +ide-scsi ? +mesh ? +mac53c94 ? +pluto ? +dec_esp ? +3w-xxxx ? +3w-9xxx ? +ppa ? +imm ? +jazz_esp ? +sun3x_esp ? +fcal ? +lasi700 ? +nsp32 ? +ipr ? +hptiop ? +stex ? +osst ? +sg ? +ch ? +scsi_debug ? +aacraid ? +aic7xxx ? +aic79xx ? +aic94xx ? +arcmsr ? +acornscsi_mod ? +arxescsi ? +cumana_1 ? +cumana_2 ? +ecoscsi ? +oak ? +powertec ? +eesox ? +ibmvscsic ? +libsas ? +lpfc ? +megaraid_mm ? +megaraid_mbox ? +megaraid_sas ? +qla2xxx ? +sym53c8xx ? +qla4xxx ? +mvsas ? +sr_mod ? +sd_mod ? --- linux-3.13.0.orig/debian.master/d-i/modules-powerpc/storage-core-modules +++ linux-3.13.0/debian.master/d-i/modules-powerpc/storage-core-modules @@ -0,0 +1,13 @@ +# Core stacks +usb-storage ? + +# Block level + +# Loop modules +cryptoloop + +# Needs to be here for better cdrom initrd layout +isofs + +ps3stor_lib ? +ps3rom ? --- linux-3.13.0.orig/debian.master/d-i/modules/block-modules +++ linux-3.13.0/debian.master/d-i/modules/block-modules @@ -0,0 +1,34 @@ +aoe ? +aten ? +bpck ? +bpck6 ? +cciss ? +comm ? +cpqarray ? +DAC960 ? +dstr ? +epat ? +epia ? +fit2 ? +fit3 ? +friq ? +frpw ? +hpsa ? +kbic ? +ktti ? +nbd ? +nvme ? +on20 ? +on26 ? +paride ? +pcd ? +pd ? +pf ? +pg ? +pt ? +sdhci-tegra ? +sx8 ? +umem ? +virtio_blk ? +xen-blkfront ? +mtip32xx ? --- linux-3.13.0.orig/debian.master/d-i/modules/crypto-modules +++ linux-3.13.0/debian.master/d-i/modules/crypto-modules @@ -0,0 +1,70 @@ +aesni-intel ? +aes-x86_64 ? +af_alg ? +algif_hash ? +algif_skcipher ? +ansi_cprng ? +anubis ? +arc4 ? +async_memcpy ? +async_pq ? +async_raid6_recov ? +async_tx ? +async_xor ? +authenc ? +authencesn ? +blowfish_common ? +blowfish_generic ? +blowfish-x86_64 ? +camellia ? +cast5 ? +cast6 ? +ccm ? +cryptd ? +cryptoloop ? +crypto_null ? +crypto_user ? +ctr ? +cts ? +deflate ? +des_generic ? +fcrypt ? +gcm ? +gf128mul ? +ghash-clmulni-intel ? +ghash-generic ? +khazad ? +lrw ? +lzo ? +md4 ? +michael_mic ? +padlock-aes ? +padlock-sha ? +pcbc ? +pcrypt ? +raid6test ? +rmd128 ? +rmd160 ? +rmd256 ? +rmd320 ? +salsa20_generic ? +salsa20-x86_64 ? +seed ? +seqiv ? +serpent_generic ? +serpent-sse2-x86_64 ? +sha1-ssse3 ? +sha512_generic ? +tcrypt ? +tea ? +tgr192 ? +twofish_common ? +twofish_generic ? +twofish-x86_64 ? +twofish-x86_64-3way ? +vmac ? +wp512 ? +xcbc ? +xor ? +xts ? +zlib ? --- linux-3.13.0.orig/debian.master/d-i/modules/fat-modules +++ linux-3.13.0/debian.master/d-i/modules/fat-modules @@ -0,0 +1,7 @@ +fat ? +vfat ? + +# Supporting modules ? +nls_cp437 ? +nls_iso8859-1 ? +nls_utf8 ? --- linux-3.13.0.orig/debian.master/d-i/modules/fb-modules +++ linux-3.13.0/debian.master/d-i/modules/fb-modules @@ -0,0 +1,3 @@ +fbcon ? +vesafb ? +vga16fb ? --- linux-3.13.0.orig/debian.master/d-i/modules/firewire-core-modules +++ linux-3.13.0/debian.master/d-i/modules/firewire-core-modules @@ -0,0 +1,4 @@ +firewire-core ? +firewire-ohci ? +firewire-sbp2 ? +firewire-net ? --- linux-3.13.0.orig/debian.master/d-i/modules/floppy-modules +++ linux-3.13.0/debian.master/d-i/modules/floppy-modules @@ -0,0 +1 @@ +floppy ? --- linux-3.13.0.orig/debian.master/d-i/modules/fs-core-modules +++ linux-3.13.0/debian.master/d-i/modules/fs-core-modules @@ -0,0 +1,4 @@ +ext2 ? +jfs ? +reiserfs ? +xfs ? --- linux-3.13.0.orig/debian.master/d-i/modules/fs-secondary-modules +++ linux-3.13.0/debian.master/d-i/modules/fs-secondary-modules @@ -0,0 +1,5 @@ +btrfs ? +fuse ? +ntfs ? +hfs ? +hfsplus ? --- linux-3.13.0.orig/debian.master/d-i/modules/input-modules +++ linux-3.13.0/debian.master/d-i/modules/input-modules @@ -0,0 +1,23 @@ +hid-a4tech ? +hid-apple ? +hid-belkin ? +hid-bright ? +hid-cherry ? +hid-chicony ? +hid-cypress ? +hid-dell ? +hid-ezkey ? +hid-generic ? +hid-gyration ? +hid-logitech ? +hid-logitech-dj ? +hid-microsoft ? +hid-monterey ? +hid-petalynx ? +hid-pl ? +hid-samsung ? +hid-sony ? +hid-sunplus ? +hid-tmff ? +hid-zpff ? +usbhid ? --- linux-3.13.0.orig/debian.master/d-i/modules/ipmi-modules +++ linux-3.13.0/debian.master/d-i/modules/ipmi-modules @@ -0,0 +1,5 @@ +ipmi_devintf ? +ipmi_msghandler ? +ipmi_poweroff ? +ipmi_si ? +ipmi_watchdog ? --- linux-3.13.0.orig/debian.master/d-i/modules/irda-modules +++ linux-3.13.0/debian.master/d-i/modules/irda-modules @@ -0,0 +1,30 @@ +act200l-sir ? +actisys-sir ? +ali-ircc ? +donauboe ? +esi-sir ? +girbil-sir ? +ircomm ? +ircomm-tty ? +irda ? +irda-usb ? +irlan ? +irnet ? +irport ? +irtty-sir ? +kingsun-sir ? +ks959-sir ? +ksdazzle-sir ? +litelink-sir ? +ma600-sir ? +mcp2120-sir ? +mcs7780 ? +nsc-ircc ? +old_belkin-sir ? +sir-dev ? +smsc-ircc2 ? +stir4200 ? +tekram-sir ? +via-ircc ? +vlsi_ir ? +w83977af_ir ? --- linux-3.13.0.orig/debian.master/d-i/modules/kernel-image +++ linux-3.13.0/debian.master/d-i/modules/kernel-image @@ -0,0 +1,24 @@ +gpio-pca953x ? +gpio-regulator ? +i2c-mux ? +i2c-mux-pinctrl ? +i2c-tegra ? +max8907 ? +max8907-regulator ? +nvec ? +nvec_kbd ? +nvec_paz00 ? +nvec_power ? +nvec_ps2 ? +palmas-regulator ? +rtc-em3027 ? +rtc-max8907 ? +rtc-palmas ? +rtc-tps6586x ? +rtc-tps65910 ? +tps51632-regulator ? +tps62360-regulator ? +tps65090-charger ? +tps65090-regulator ? +tps6586x-regulator ? +tps65910-regulator ? --- linux-3.13.0.orig/debian.master/d-i/modules/md-modules +++ linux-3.13.0/debian.master/d-i/modules/md-modules @@ -0,0 +1,16 @@ +dm-crypt ? +dm-mirror ? +dm-raid ? +dm-snapshot ? +dm-zero ? +faulty ? +linear ? +multipath ? +raid0 ? +raid1 ? +raid10 ? +raid456 ? + +# Extras +dm-raid45 ? +dm-loop ? --- linux-3.13.0.orig/debian.master/d-i/modules/message-modules +++ linux-3.13.0/debian.master/d-i/modules/message-modules @@ -0,0 +1,15 @@ +mptbase ? +mptctl ? +mptfc ? +mptlan ? +mptsas ? +mpt2sas ? +mpt3sas ? +mptscsih ? +mptspi ? +i2o_block ? +i2o_bus ? +i2o_config ? +i2o_core ? +i2o_proc ? +i2o_scsi ? --- linux-3.13.0.orig/debian.master/d-i/modules/mouse-modules +++ linux-3.13.0/debian.master/d-i/modules/mouse-modules @@ -0,0 +1,2 @@ +psmouse ? +usbmouse ? --- linux-3.13.0.orig/debian.master/d-i/modules/multipath-modules +++ linux-3.13.0/debian.master/d-i/modules/multipath-modules @@ -0,0 +1,2 @@ +dm-multipath ? +dm-round-robin ? --- linux-3.13.0.orig/debian.master/d-i/modules/nfs-modules +++ linux-3.13.0/debian.master/d-i/modules/nfs-modules @@ -0,0 +1,6 @@ +nfs ? +nfs_acl ? +nfsv3 ? +lockd ? +sunrpc ? +cifs ? --- linux-3.13.0.orig/debian.master/d-i/modules/nic-modules +++ linux-3.13.0/debian.master/d-i/modules/nic-modules @@ -0,0 +1,175 @@ +3c359 ? +3c501 ? +3c503 ? +3c505 ? +3c507 ? +3c509 ? +3c515 ? +3c523 ? +3c527 ? +3c59x ? +8139cp ? +8139too ? +82596 ? +abyss ? +ac3200 ? +adm8211 ? +airo ? +airport ? +alx ? +amd8111e ? +arc4 ? +arcnet ? +arc-rawmode ? +arc-rimi ? +arlan ? +at1700 ? +ath5k ? +ath9k ? +ath9k_htc ? +atl1 ? +atl1c ? +atl1e ? +atl2 ? +atmel ? +atmel_pci ? +b44 ? +be2net ? +bmac ? +bnx2 ? +bnx2x ? +bonding ? +brcmfmac ? +brcmsmac ? +xgmac ? +cassini ? +com20020 ? +com20020-pci ? +com90io ? +com90xx ? +cs89x0 ? +de2104x ? +de4x5 ? +de600 ? +de620 ? +defxx ? +depca ? +dl2k ? +dmfe ? +dummy ? +e100 ? +e1000 ? +e1000e ? +e2100 ? +eepro ? +eepro100 ? +eexpress ? +enic ? +epic100 ? +eql ? +es3210 ? +eth16i ? +ewrk3 ? +fealnx ? +forcedeth ? +igb ? +ps3_gelic ? +hamachi ? +hermes ? +hp ? +hp100 ? +hp-plus ? +ibmtr ? +ipddp ? +ipw2100 ? +ipw2200 ? +iwl3945 ? +iwl4965 ? +iwl-legacy ? +iwldvm ? +iwlwifi ? +ixgb ? +ixgbe ? +lance ? +lanstreamer ? +lasi_82596 ? +lne390 ? +lp486e ? +mace ? +mlx4_core ? +mlx4_en ? +mlx5_core ? +mv643xx_eth ? +myri_sbus ? +natsemi ? +ne ? +ne2 ? +ne2k-pci ? +ne3210 ? +netconsole ? +netxen_nic ? +ni5010 ? +ni52 ? +ni65 ? +niu ? +ns83820 ? +olympic ? +orinoco ? +orinoco_pci ? +orinoco_plx ? +orinoco_tmd ? +pcnet32 ? +qlcnic ? +r815x ? +r8169 ? +rate_control ? +rfc1051 ? +rfc1201 ? +rrunner ? +rt2400 ? +rt2400pci ? +rt2500 ? +rt2500pci ? +rt2800pci ? +rt61pci ? +s2io ? +shaper ? +sis190 ? +sis900 ? +spidernet ? +skfp ? +skge ? +sk98lin ? +sky2 ? +smc9194 ? +smc-ultra ? +smc-ultra32 ? +starfire ? +strip ? +sunbmac ? +sundance ? +sungem ? +sungem_phy ? +sunhme ? +sunlance ? +sunqe ? +sunvnet ? +tg3 ? +tlan ? +tms380tr ? +tmspci ? +tulip ? +tun ? +typhoon ? +uli526x ? +via-rhine ? +via-velocity ? +virtio_net ? +wavelan ? +wd ? +winbond-840 ? +yellowfin ? +znet ? +vmxnet3 ? +xen-netfront ? +xgene-enet ? --- linux-3.13.0.orig/debian.master/d-i/modules/nic-pcmcia-modules +++ linux-3.13.0/debian.master/d-i/modules/nic-pcmcia-modules @@ -0,0 +1,19 @@ +3c574_cs ? +3c589_cs ? +airo_cs ? +atmel_cs ? +axnet_cs ? +com20020_cs ? +fmvj18x_cs ? +ibmtr_cs ? +netwave_cs ? +nmclan_cs ? +orinoco_cs ? +pcnet_cs ? +ray_cs ? +smc91c92_cs ? +wavelan_cs ? +wl3501_cs ? +xirc2ps_cs ? +xircom_cb ? +xircom_tulip_cb ? --- linux-3.13.0.orig/debian.master/d-i/modules/nic-shared-modules +++ linux-3.13.0/debian.master/d-i/modules/nic-shared-modules @@ -0,0 +1,26 @@ +# PHY +8390 ? +mii ? + +# CRC modules +crc-ccitt ? +crc-itu-t ? +libcrc32c ? + +# mac80211 stuff +mac80211 ? +cfg80211 ? + +# rt2x00 lib (since rt2x00 is split across usb/pci/cb +rt2x00lib ? +rt2800lib ? + +# Atheros library (since drivers are split across nic-modules/nic-usb-modules) +ath ? + +# Wireless 802.11 modules +lib80211 ? +cfg80211 ? +lib80211_crypt_ccmp ? +lib80211_crypt_tkip ? +lib80211_crypt_wep ? --- linux-3.13.0.orig/debian.master/d-i/modules/nic-usb-modules +++ linux-3.13.0/debian.master/d-i/modules/nic-usb-modules @@ -0,0 +1,32 @@ +catc ? +kaweth ? +pegasus ? +prism2_usb ? +rtl8150 ? +usbnet ? +zd1211rw ? +zd1201 ? +rt2500usb ? +rt73usb ? +rt2570 ? +rt2800usb ? +rt2x00usb ? +cdc_ether ? +asix ? +cdc_eem ? +cdc_ether ? +cdc-phonet ? +cdc_subset ? +dm9601 ? +gl620a ? +hso ? +int51x1 ? +mcs7830 ? +net1080 ? +plusb ? +rndis_host ? +smsc95xx ? +zaurus ? +carl9170 ? +smsc75xx ? +smsc95xx ? --- linux-3.13.0.orig/debian.master/d-i/modules/parport-modules +++ linux-3.13.0/debian.master/d-i/modules/parport-modules @@ -0,0 +1,2 @@ +parport ? +parport_pc ? --- linux-3.13.0.orig/debian.master/d-i/modules/pata-modules +++ linux-3.13.0/debian.master/d-i/modules/pata-modules @@ -0,0 +1,47 @@ +pata_ali.ko ? +pata_amd.ko ? +pata_artop.ko ? +pata_atiixp.ko ? +pata_atp867x.ko ? +pata_cmd640.ko ? +pata_cmd64x.ko ? +pata_cs5520.ko ? +pata_cs5530.ko ? +pata_cs5535.ko ? +pata_cs5536.ko ? +pata_cypress.ko ? +pata_efar.ko ? +pata_hpt366.ko ? +pata_hpt37x.ko ? +pata_hpt3x2n.ko ? +pata_hpt3x3.ko ? +pata_isapnp.ko ? +pata_it8213.ko ? +pata_it821x.ko ? +pata_jmicron.ko ? +pata_legacy.ko ? +pata_macio.ko ? +pata_marvell.ko ? +pata_mpiix.ko ? +pata_netcell.ko ? +pata_ninja32.ko ? +pata_ns87410.ko ? +pata_ns87415.ko ? +pata_oldpiix.ko ? +pata_optidma.ko ? +pata_opti.ko ? +pata_pcmcia.ko ? +pata_pdc2027x.ko ? +pata_pdc202xx_old.ko ? +pata_qdi.ko ? +pata_radisys.ko ? +pata_rdc.ko ? +pata_rz1000.ko ? +pata_sc1200.ko ? +pata_sch.ko ? +pata_serverworks.ko ? +pata_sil680.ko ? +pata_sl82c105.ko ? +pata_triflex.ko ? +pata_via.ko ? +pata_winbond.ko ? --- linux-3.13.0.orig/debian.master/d-i/modules/pcmcia-modules +++ linux-3.13.0/debian.master/d-i/modules/pcmcia-modules @@ -0,0 +1,8 @@ +i82092 ? +i82365 ? +pcmcia ? +pcmcia_core ? +pd6729 ? +rsrc_nonstatic ? +tcic ? +yenta_socket ? --- linux-3.13.0.orig/debian.master/d-i/modules/pcmcia-storage-modules +++ linux-3.13.0/debian.master/d-i/modules/pcmcia-storage-modules @@ -0,0 +1,6 @@ +pata_pcmcia ? +qlogic_cs ? +fdomain_cs ? +aha152x_cs ? +nsp_cs ? +sym53c500_cs ? --- linux-3.13.0.orig/debian.master/d-i/modules/plip-modules +++ linux-3.13.0/debian.master/d-i/modules/plip-modules @@ -0,0 +1 @@ +plip ? --- linux-3.13.0.orig/debian.master/d-i/modules/ppp-modules +++ linux-3.13.0/debian.master/d-i/modules/ppp-modules @@ -0,0 +1,6 @@ +ppp_async ? +ppp_deflate ? +ppp_mppe ? +pppoe ? +pppox ? +ppp_synctty ? --- linux-3.13.0.orig/debian.master/d-i/modules/sata-modules +++ linux-3.13.0/debian.master/d-i/modules/sata-modules @@ -0,0 +1,18 @@ +sata_inic162x.ko ? +sata_mv.ko ? +sata_nv.ko ? +sata_promise.ko ? +sata_qstor.ko ? +sata_sil24.ko ? +sata_sil.ko ? +sata_sis.ko ? +sata_svw.ko ? +sata_sx4.ko ? +sata_uli.ko ? +sata_via.ko ? +sata_vsc.ko ? +ahci_platform ? +ahci ? +ahci_xgene ? +acard-ahci ? +libahci ? --- linux-3.13.0.orig/debian.master/d-i/modules/scsi-modules +++ linux-3.13.0/debian.master/d-i/modules/scsi-modules @@ -0,0 +1,129 @@ +# SCSI +raid_class ? +scsi_transport_spi ? +scsi_transport_fc ? +scsi_transport_iscsi ? +scsi_transport_sas ? +iscsi_tcp ? +libiscsi ? +amiga7xx ? +a3000 ? +a2091 ? +gvp11 ? +mvme147 ? +sgiwd93 ? +cyberstorm ? +cyberstormII ? +blz2060 ? +blz1230 ? +fastlane ? +oktagon_esp_mod ? +atari_scsi ? +mac_scsi ? +mac_esp ? +sun3_scsi ? +mvme16x ? +bvme6000 ? +sim710 ? +advansys ? +psi240i ? +BusLogic ? +dpt_i2o ? +u14-34f ? +ultrastor ? +aha152x ? +aha1542 ? +aha1740 ? +aic7xxx_old ? +ips ? +fd_mcs ? +fdomain ? +fnic ? +in2000 ? +g_NCR5380 ? +g_NCR5380_mmio ? +NCR53c406a ? +NCR_D700 ? +NCR_Q720_mod ? +sym53c416 ? +qlogicfas408 ? +qla1280 ? +pas16 ? +seagate ? +seagate ? +t128 ? +dmx3191d ? +dtc ? +zalon7xx ? +eata_pio ? +wd7000 ? +mca_53c9x ? +ibmmca ? +eata ? +dc395x ? +tmscsim ? +megaraid ? +atp870u ? +esp ? +gdth ? +initio ? +a100u2w ? +qlogicpti ? +ide-scsi ? +mesh ? +mac53c94 ? +pluto ? +dec_esp ? +3w-xxxx ? +3w-9xxx ? +ppa ? +imm ? +jazz_esp ? +sun3x_esp ? +fcal ? +lasi700 ? +nsp32 ? +ipr ? +hptiop ? +stex ? +osst ? +sg ? +ch ? +scsi_debug ? +aacraid ? +aic7xxx ? +aic79xx ? +aic94xx ? +arcmsr ? +acornscsi_mod ? +arxescsi ? +cumana_1 ? +cumana_2 ? +ecoscsi ? +oak ? +powertec ? +eesox ? +ibmvscsic ? +ibmvfc ? +libsas ? +lpfc ? +megaraid_mm ? +megaraid_mbox ? +megaraid_sas ? +qla2xxx ? +sym53c8xx ? +qla4xxx ? +mvsas ? +vmw_pvscsi ? +ums-cypress ? +be2iscsi ? +3w-sas ? +isci ? +mlx4_ib ? +mlx5_ib ? + +# device handlers +scsi_dh_alua ? +scsi_dh_emc ? +scsi_dh_rdac ? +scsi_dh_hp_sw ? --- linux-3.13.0.orig/debian.master/d-i/modules/serial-modules +++ linux-3.13.0/debian.master/d-i/modules/serial-modules @@ -0,0 +1,4 @@ +generic_serial ? +serial_cs ? +synclink_cs ? +hyperv-keyboard ? --- linux-3.13.0.orig/debian.master/d-i/modules/speakup-modules +++ linux-3.13.0/debian.master/d-i/modules/speakup-modules @@ -0,0 +1,16 @@ +speakup ? +speakup_acntpc ? +speakup_acntsa ? +speakup_apollo ? +speakup_audptr ? +speakup_bns ? +speakup_decext ? +speakup_dectlk ? +speakup_dtlk ? +speakup_dummy ? +speakup_keypc ? +speakup_ltlk ? +speakup_soft ? +speakup_spkout ? +speakup_txprt ? +speakup_decpc ? --- linux-3.13.0.orig/debian.master/d-i/modules/squashfs-modules +++ linux-3.13.0/debian.master/d-i/modules/squashfs-modules @@ -0,0 +1 @@ +squashfs ? --- linux-3.13.0.orig/debian.master/d-i/modules/storage-core-modules +++ linux-3.13.0/debian.master/d-i/modules/storage-core-modules @@ -0,0 +1,10 @@ +# Core stacks +usb-storage ? + +# Block level + +# Loop modules +cryptoloop ? + +# Needs to be here for better cdrom initrd layout +isofs ? --- linux-3.13.0.orig/debian.master/d-i/modules/usb-modules +++ linux-3.13.0/debian.master/d-i/modules/usb-modules @@ -0,0 +1,10 @@ +ehci-hcd ? +isp116x-hcd ? +isp1760 ? +ohci-hcd ? +r8a66597-hcd ? +sl811_cs ? +sl811-hcd ? +u132-hcd ? +uhci-hcd ? +xhci-hcd ? --- linux-3.13.0.orig/debian.master/d-i/modules/virtio-modules +++ linux-3.13.0/debian.master/d-i/modules/virtio-modules @@ -0,0 +1,11 @@ +virtio_balloon ? +virtio_pci ? +virtio_ring ? +virtio-rng ? +virtio_scsi ? +hv_vmbus ? +hv_utils ? +hv_netvsc ? +hv_mouse ? +hv_storvsc ? +hv_balloon ? --- linux-3.13.0.orig/debian.master/d-i/modules/vlan-modules +++ linux-3.13.0/debian.master/d-i/modules/vlan-modules @@ -0,0 +1,3 @@ +slp ? +garp ? +8021q ? --- linux-3.13.0.orig/debian.master/d-i/package-list +++ linux-3.13.0/debian.master/d-i/package-list @@ -0,0 +1,202 @@ +Package: kernel-image +Provides: ext3-modules, ext4-modules +Provides_amd64: efi-modules, ext3-modules, ext4-modules +Provides_i386: efi-modules, ext3-modules, ext4-modules +Provides_ppc64el: ext3-modules, ext4-modules, fat-modules + +Package: fat-modules +Depends: kernel-image +Priority: standard +Description: FAT filesystem support + This includes Windows FAT and VFAT support. + +Package: fb-modules +Depends: kernel-image +Priority: standard +Description: Framebuffer modules + +Package: firewire-core-modules +Depends: kernel-image, storage-core-modules +Priority: standard +Description: Firewire (IEEE-1394) Support + +Package: floppy-modules +Depends: kernel-image +Priority: standard +Description: Floppy driver support + +Package: fs-core-modules +Depends: kernel-image +Priority: standard +Provides: ext2-modules, jfs-modules, reiserfs-modules, xfs-modules +Description: Base filesystem modules + This includes jfs, reiserfs and xfs. + +Package: fs-secondary-modules +Depends: kernel-image, fat-modules +Priority: standard +Provides: btrfs-modules, ntfs-modules, hfs-modules +Description: Extra filesystem modules + This includes support for Windows NTFS and MacOS HFS/HFSPlus + +Package: input-modules +Depends: kernel-image, usb-modules +Priority: standard +Description: Support for various input methods + +Package: irda-modules +Depends: kernel-image, nic-shared-modules +Priority: standard +Description: Support for Infrared protocols + +Package: md-modules +Depends: kernel-image +Priority: standard +Provides: crypto-dm-modules +Description: Multi-device support (raid, device-mapper, lvm) + +Package: nic-modules +Depends: kernel-image, nic-shared-modules, virtio-modules +Priority: standard +Description: Network interface support + +Package: nic-pcmcia-modules +Depends: kernel-image, nic-shared-modules, nic-modules +Priority: standard +Description: PCMCIA network interface support + +Package: nic-usb-modules +Depends: kernel-image, nic-shared-modules, usb-modules +Priority: standard +Description: USB network interface support + +Package: nic-shared-modules +Depends: kernel-image, crypto-modules +Priority: standard +Description: nic shared modules + This package contains modules which support nic modules + +Package: parport-modules +Depends: kernel-image +Priority: standard +Description: Parallel port support + +Package: pata-modules +Depends: kernel-image, storage-core-modules +Priority: standard +Description: PATA support modules + +Package: pcmcia-modules +Depends: kernel-image +Priority: standard +Description: PCMCIA Modules + +Package: pcmcia-storage-modules +Depends: kernel-image, scsi-modules +Priority: standard +Description: PCMCIA storage support + +Package: plip-modules +Depends: kernel-image, nic-shared-modules, parport-modules +Priority: standard +Description: PLIP (parallel port) networking support + +Package: ppp-modules +Depends: kernel-image, nic-shared-modules, serial-modules +Priority: standard +Description: PPP (serial port) networking support + +Package: sata-modules +Depends: kernel-image, storage-core-modules +Priority: standard +Description: SATA storage support + +Package: scsi-modules +Depends: kernel-image, storage-core-modules +Priority: standard +Description: SCSI storage support + +Package: serial-modules +Depends: kernel-image +Priority: standard +Description: Serial port support + +Package: storage-core-modules +Depends: kernel-image +Priority: standard +Provides: loop-modules +Description: Core storage support + Includes core SCSI, LibATA, USB-Storage. Also includes related block + devices for CD, Disk and Tape medium (and IDE Floppy). + +Package: usb-modules +Depends: kernel-image, storage-core-modules +Priority: standard +Description: Core USB support + +Package: nfs-modules +Priority: standard +Depends: kernel-image +Description: NFS filesystem drivers + Includes the NFS client driver, and supporting modules. + +Package: block-modules +Priority: standard +Provides: nbd-modules +Depends: kernel-image, storage-core-modules, parport-modules, virtio-modules +Description: Block storage devices + This package contains the block storage devices, including DAC960 and + paraide. + +Package: message-modules +Priority: standard +Depends: kernel-image, storage-core-modules, scsi-modules +Description: Fusion and i2o storage modules + This package containes the fusion and i2o storage modules. + +Package: crypto-modules +Priority: extra +Depends: kernel-image +Description: crypto modules + This package contains crypto modules. + +Package: virtio-modules +Priority: standard +Depends: kernel-image +Description: VirtIO Modules + Includes modules for VirtIO (virtual machine, generally kvm guests) + +Package: socket-modules +Depends: kernel-image +Priority: standard +Description: Unix socket support + +Package: mouse-modules +Depends: kernel-image, input-modules, usb-modules +Priority: extra +Description: Mouse support + This package contains mouse drivers for the Linux kernel. + +Package: squashfs-modules +Depends: kernel-image +Priority: extra +Description: squashfs modules + This package contains squashfs modules. + +Package: vlan-modules +Depends: kernel-image +Priority: extra +Description: vlan modules + This package contains vlan (8021.Q) modules. + +Package: ipmi-modules +Depends: kernel-image +Priority: standard +Description: ipmi modules + +Package: multipath-modules +Depends: kernel-image +Priority: extra +Description: DM-Multipath support + This package contains modules for device-mapper multipath support. + --- linux-3.13.0.orig/debian.master/etc/getabis +++ linux-3.13.0/debian.master/etc/getabis @@ -0,0 +1,19 @@ +repo_list=( + "http://archive.ubuntu.com/ubuntu/pool/main/l/linux" + "http://ports.ubuntu.com/ubuntu-ports/pool/main/l/linux" + "http://archive.ubuntu.com/ubuntu/pool/universe/l/linux" + "http://ports.ubuntu.com/ubuntu-ports/pool/universe/l/linux" + "http://ppa.launchpad.net/canonical-kernel-team/ppa/ubuntu/pool/main/l/linux" +) + +package_prefixes linux-image linux-image-extra + +getall armhf generic +getall armhf generic-lpae +getall amd64 generic lowlatency +getall i386 generic lowlatency +getall arm64 generic +getall ppc64el generic + +# Ports arches and flavours. +getall powerpc powerpc-smp powerpc64-smp powerpc-e500 powerpc-e500mc powerpc64-emb --- linux-3.13.0.orig/debian.master/etc/kernelconfig +++ linux-3.13.0/debian.master/etc/kernelconfig @@ -0,0 +1,7 @@ +if [ "$variant" = "ports" ]; then + archs="" + family='ports' +else + archs="amd64 i386 armhf arm64 ppc64el powerpc" + family='ubuntu' +fi --- linux-3.13.0.orig/debian.master/rules.d/amd64.mk +++ linux-3.13.0/debian.master/rules.d/amd64.mk @@ -0,0 +1,17 @@ +human_arch = 64 bit x86 +build_arch = x86_64 +header_arch = $(build_arch) +defconfig = defconfig +flavours = generic lowlatency +build_image = bzImage +kernel_file = arch/$(build_arch)/boot/bzImage +install_file = vmlinuz +loader = grub +vdso = vdso_install +no_dumpfile = true +uefi_signed = true +do_tools_cpupower = true +do_tools_perf = true +do_tools_x86 = true +do_tools_hyperv = true +do_extras_package = true --- linux-3.13.0.orig/debian.master/rules.d/arm64.mk +++ linux-3.13.0/debian.master/rules.d/arm64.mk @@ -0,0 +1,19 @@ +human_arch = ARMv8 +build_arch = arm64 +header_arch = arm64 +defconfig = defconfig +flavours = generic +build_image = Image +kernel_file = arch/$(build_arch)/boot/Image +install_file = vmlinuz +no_dumpfile = true + +loader = grub +vdso = vdso_install + +do_tools_cpupower = true +do_tools_perf = true + +dtb_files_generic += apm-mustang.dtb +dtb_files_generic += foundation-v8.dtb +dtb_files_generic += rtsm_ve-aemv8a.dtb --- linux-3.13.0.orig/debian.master/rules.d/armhf.mk +++ linux-3.13.0/debian.master/rules.d/armhf.mk @@ -0,0 +1,35 @@ +human_arch = ARM (hard float) +build_arch = arm +header_arch = arm +defconfig = defconfig +flavours = generic generic-lpae +build_image = zImage +kernel_file = arch/$(build_arch)/boot/zImage +install_file = vmlinuz +no_dumpfile = true + +loader = grub + +do_tools_cpupower = true +do_tools_perf = true + +# Flavour specific configuration. +dtb_files_generic += highbank.dtb +dtb_files_generic += imx6q-sabrelite.dtb +dtb_files_generic += imx6dl-wandboard.dtb +dtb_files_generic += imx6q-wandboard.dtb +dtb_files_generic += omap3-beagle-xm.dtb +dtb_files_generic += omap4-panda.dtb +dtb_files_generic += omap4-panda-es.dtb +dtb_files_generic += tegra20-harmony.dtb +dtb_files_generic += tegra20-paz00.dtb +dtb_files_generic += tegra20-seaboard.dtb +dtb_files_generic += tegra20-trimslice.dtb +dtb_files_generic += tegra20-ventana.dtb +dtb_files_generic += tegra20-whistler.dtb +dtb_files_generic += am335x-bone.dtb +dtb_files_generic += am335x-boneblack.dtb +dtb_files_generic += vexpress-v2p-ca9.dtb + +dtb_files_generic-lpae += highbank.dtb +dtb_files_generic-lpae += vexpress-v2p-ca15-tc1.dtb --- linux-3.13.0.orig/debian.master/rules.d/i386.mk +++ linux-3.13.0/debian.master/rules.d/i386.mk @@ -0,0 +1,15 @@ +human_arch = 32 bit x86 +build_arch = i386 +header_arch = x86_64 +defconfig = defconfig +flavours = generic lowlatency +build_image = bzImage +kernel_file = arch/$(build_arch)/boot/bzImage +install_file = vmlinuz +loader = grub +vdso = vdso_install +no_dumpfile = true +do_tools_cpupower = true +do_tools_perf = true +do_tools_x86 = true +do_extras_package = true --- linux-3.13.0.orig/debian.master/rules.d/powerpc.mk +++ linux-3.13.0/debian.master/rules.d/powerpc.mk @@ -0,0 +1,29 @@ +human_arch = PowerPC (32 bit userspace) +build_arch = powerpc +header_arch = $(build_arch) +defconfig = pmac32_defconfig +flavours = powerpc-smp powerpc64-smp powerpc-e500 powerpc-e500mc powerpc64-emb +build_image = zImage +kernel_file = $(shell if [ ! -f $(builddir)/build-$*/vmlinux.strip ] && \ + [ -f $(builddir)/build-$*/vmlinux.strip.gz ]; then \ + gunzip -c $(builddir)/build-$*/vmlinux.strip.gz \ + > $(builddir)/build-$*/vmlinux.strip; \ + fi && echo vmlinux.strip) +install_file = vmlinux + +# These flavours differ +build_image_powerpc-e500mc = uImage +kernel_file_powerpc-e500mc = arch/powerpc/boot/uImage + +build_image_powerpc-e500 = uImage +kernel_file_powerpc-e500 = arch/powerpc/boot/uImage + +loader = yaboot + +custom_flavours = + +no_dumpfile = true +do_tools_cpupower = true +do_tools_perf = true + +family = ubuntu --- linux-3.13.0.orig/debian.master/rules.d/ppc64el.mk +++ linux-3.13.0/debian.master/rules.d/ppc64el.mk @@ -0,0 +1,17 @@ +human_arch = PowerPC 64el +build_arch = powerpc +header_arch = $(build_arch) +defconfig = pseries_le_defconfig +flavours = generic +build_image = vmlinux.strip +kernel_file = arch/powerpc/boot/vmlinux.strip +install_file = vmlinux +no_dumpfile = true +loader = grub +vdso = vdso_install +do_extras_package = true + +do_tools_cpupower = true +do_tools_perf = true + +#do_flavour_image_package = false --- linux-3.13.0.orig/debian.master/rules.d/x32.mk +++ linux-3.13.0/debian.master/rules.d/x32.mk @@ -0,0 +1,14 @@ +human_arch = 64 bit x86 (32 bit userspace) +build_arch = x86_64 +header_arch = $(build_arch) +defconfig = defconfig +flavours = +build_image = bzImage +kernel_file = arch/$(build_arch)/boot/bzImage +install_file = vmlinuz +loader = grub +vdso = vdso_install +no_dumpfile = true +uefi_signed = true + +do_flavour_image_package = false --- linux-3.13.0.orig/debian/changelog +++ linux-3.13.0/debian/changelog @@ -0,0 +1,19693 @@ +linux (3.13.0-54.91) trusty; urgency=medium + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1458618 + + [ Upstream Kernel Changes ] + + * [3.13-stable only] Revert "gianfar: Carefully free skbs in functions + called by netpoll." + - LP: #1454746 + + -- Luis Henriques Tue, 26 May 2015 17:19:30 +0100 + +linux (3.13.0-54.90) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1458618 + + [ Andy Whitcroft ] + + * [Config] push off linux-lts-{utopic, vivid}-tools-common + - LP: #1405807 + + [ Brad Figg ] + + * hyper-v -- add hid and fb drivers to linux-virtual + - LP: #1444179 + + [ Upstream Kernel Changes ] + + * DT doc: net: cpsw mac-address is optional + - LP: #1452628 + * net: cpsw: Add missing return value + - LP: #1452628 + * net: cpsw: header, Add missing include + - LP: #1452628 + * net: cpsw: Add am33xx MACID readout + - LP: #1452628 + * am33xx: define syscon control module device node + - LP: #1452628 + * arm: dts: am33xx, Add syscon phandle to cpsw node + - LP: #1452628 + * net: cpsw: do not register cpts twice + - LP: #1452620 + * x86: kvm: Revert "remove sched notifier for cross-cpu migrations" + - LP: #1450584 + * x86: vdso: fix pvclock races with task migration + - LP: #1450584 + * n_tty: Fix read buffer overwrite when no newline + - LP: #1381005, #1454746 + * KVM: x86: Fix lost interrupt on irr_pending race + - LP: #1454746 + * writeback: add missing INITIAL_JIFFIES init in + global_update_bandwidth() + - LP: #1454746 + * nbd: fix possible memory leak + - LP: #1454746 + * mfd: kempld-core: Fix callback return value check + - LP: #1454746 + * KVM: nVMX: mask unrestricted_guest if disabled on L0 + - LP: #1454746 + * spi: trigger trace event for message-done before mesg->complete + - LP: #1454746 + * powerpc/pseries: Little endian fixes for post mobility device tree + update + - LP: #1454746 + * net: ethernet: pcnet32: Setup the SRAM and NOUFLO on Am79C97{3, 5} + - LP: #1454746 + * perf: Fix irq_work 'tail' recursion + - LP: #1454746 + * arm64: Use the reserved TTBR0 if context switching to the init_mm + - LP: #1454746 + * selinux: fix sel_write_enforce broken return value + - LP: #1454746 + * mm: fix anon_vma->degree underflow in anon_vma endless growing + prevention + - LP: #1454746 + * mm/memory hotplug: postpone the reset of obsolete pgdat + - LP: #1454746 + * hfsplus: fix B-tree corruption after insertion at position 0 + - LP: #1454746 + * ARC: SA_SIGINFO ucontext regs off-by-one + - LP: #1454746 + * writeback: fix possible underflow in write bandwidth calculation + - LP: #1454746 + * iio: fix drivers that check buffer->scan_mask + - LP: #1454746 + * iio: inv_mpu6050: Clear timestamps fifo while resetting hardware fifo + - LP: #1454746 + * iio: core: Fix double free. + - LP: #1454746 + * USB: ftdi_sio: Added custom PID for Synapse Wireless product + - LP: #1454746 + * iwlwifi: dvm: run INIT firmware again upon .start() + - LP: #1454746 + * USB: keyspan_pda: add new device id + - LP: #1454746 + * cifs: smb2_clone_range() - exit on unhandled error + - LP: #1454746 + * cifs: fix use-after-free bug in find_writable_file + - LP: #1454746 + * can: flexcan: Deferred on Regulator return EPROBE_DEFER + - LP: #1454746 + * usb: xhci: handle Config Error Change (CEC) in xhci driver + - LP: #1454746 + * usb: xhci: apply XHCI_AVOID_BEI quirk to all Intel xHCI controllers + - LP: #1454746 + * USB: ftdi_sio: Use jtag quirk for SNAP Connect E10 + - LP: #1454746 + * tty: serial: fsl_lpuart: clear receive flag on FIFO flush + - LP: #1454746 + * radeon: Do not directly dereference pointers to BIOS area. + - LP: #1454746 + * iio: imu: Use iio_trigger_get for indio_dev->trig assignment + - LP: #1454746 + * dmaengine: edma: fix memory leak when terminating running transfers + - LP: #1454746 + * dmaengine: omap-dma: Fix memory leak when terminating running transfer + - LP: #1454746 + * x86/reboot: Add ASRock Q1900DC-ITX mainboard reboot quirk + - LP: #1454746 + * mac80211: fix RX A-MPDU session reorder timer deletion + - LP: #1454746 + * tcp: prevent fetching dst twice in early demux code + - LP: #1454746 + * net: use for_each_netdev_safe() in rtnl_group_changelink() + - LP: #1454746 + * xen-netfront: transmit fully GSO-sized packets + - LP: #1454746 + * tcp: fix FRTO undo on cumulative ACK of SACKed range + - LP: #1454746 + * PCI: cpcihp: Add missing curly braces in cpci_configure_slot() + - LP: #1454746 + * sh_veu: v4l2_dev wasn't set + - LP: #1454746 + * media: s5p-mfc: fix mmap support for 64bit arch + - LP: #1454746 + * cpuidle: ACPI: do not overwrite name and description of C0 + - LP: #1454746 + * ioctx_alloc(): fix vma (and file) leak on failure + - LP: #1454746 + * ALSA: hda/realtek - Make more stable to get pin sense for ALC283 + - LP: #1454746 + * be2iscsi: Fix kernel panic when device initialization fails + - LP: #1454746 + * Defer processing of REQ_PREEMPT requests for blocked devices + - LP: #1454746 + * ALSA: hda - Fix headphone pin config for Lifebook T731 + - LP: #1454746 + * ocfs2: _really_ sync the right range + - LP: #1454746 + * ALSA: usb - Creative USB X-Fi Pro SB1095 volume knob support + - LP: #1454746 + * iscsi target: fix oops when adding reject pdu + - LP: #1454746 + * net/mlx4_en: Call register_netdevice in the proper location + - LP: #1454746 + * ipv6: protect skb->sk accesses from recursive dereference inside the + stack + - LP: #1454746 + * tcp: tcp_make_synack() should clear skb->tstamp + - LP: #1454746 + * 8139cp: Call dev_kfree_skby_any instead of kfree_skb. + - LP: #1454746 + * 8139too: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * r8169: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * bonding: Call dev_kfree_skby_any instead of kfree_skb. + - LP: #1454746 + * bnx2: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * tg3: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * ixgb: Call dev_kfree_skby_any instead of dev_kfree_skb. + - LP: #1454746 + * benet: Call dev_kfree_skby_any instead of kfree_skb. + - LP: #1454746 + * gianfar: Carefully free skbs in functions called by netpoll. + - LP: #1454746 + * ip_forward: Drop frames with attached skb->sk + - LP: #1454746 + * tcp: fix possible deadlock in tcp_send_fin() + - LP: #1454746 + * tcp: avoid looping in tcp_send_fin() + - LP: #1454746 + * net: do not deplete pfmemalloc reserve + - LP: #1454746 + * net: fix crash in build_skb() + - LP: #1454746 + * ipv4: Missing sk_nulls_node_init() in ping_unhash(). + - LP: #1454746 + * Linux 3.13.11-ckt20 + - LP: #1454746 + * of: Add support for ePAPR "stdout-path" property + - LP: #1438585 + * lib: add glibc style strchrnul() variant + - LP: #1438585 + * of: Create unlocked version of for_each_child_of_node() + - LP: #1438585 + * of: Make of_find_node_by_path() handle /aliases + - LP: #1438585 + * of: Create of_console_check() for selecting a console specified in + /chosen + - LP: #1438585 + * of: Enable console on serial ports specified by /chosen/stdout-path + - LP: #1438585 + * of: correct of_console_check()'s return value + - LP: #1438585 + * of: Add bindings for chosen node, stdout-path + - LP: #1438585 + * of: add optional options parameter to of_find_node_by_path() + - LP: #1438585 + * of: support passing console options with stdout-path + - LP: #1438585 + * (upstream) net/mlx4_core: Adjust command timeouts to conform to the + firmware spec + - LP: #1455121 + * arm64: kernel: add MPIDR_EL1 accessors macros + - LP: #1455372 + * of: reimplement the matching method for __of_match_node() + - LP: #1455372 + * arm64: remove redundant "psci:" prefixes + - LP: #1455372 + * arm64: remove return value form psci_init() + - LP: #1455372 + * arm: KVM: Don't return PSCI_INVAL if waitqueue is inactive + - LP: #1455372 + * KVM: Add capability to advertise PSCI v0.2 support + - LP: #1455372 + * ARM/ARM64: KVM: Add common header for PSCI related defines + - LP: #1455372 + * ARM/ARM64: KVM: Add base for PSCI v0.2 emulation + - LP: #1455372 + * KVM: Documentation: Add info regarding KVM_ARM_VCPU_PSCI_0_2 feature + - LP: #1455372 + * ARM/ARM64: KVM: Make kvm_psci_call() return convention more flexible + - LP: #1455372 + * KVM: Add KVM_EXIT_SYSTEM_EVENT to user space API header + - LP: #1455372 + * ARM/ARM64: KVM: Emulate PSCI v0.2 SYSTEM_OFF and SYSTEM_RESET + - LP: #1455372 + * ARM/ARM64: KVM: Emulate PSCI v0.2 AFFINITY_INFO + - LP: #1455372 + * ARM/ARM64: KVM: Emulate PSCI v0.2 MIGRATE_INFO_TYPE and related + functions + - LP: #1455372 + * ARM/ARM64: KVM: Fix CPU_ON emulation for PSCI v0.2 + - LP: #1455372 + * ARM/ARM64: KVM: Emulate PSCI v0.2 CPU_SUSPEND + - LP: #1455372 + * ARM/ARM64: KVM: Advertise KVM_CAP_ARM_PSCI_0_2 to user space + - LP: #1455372 + * PSCI: Add initial support for PSCIv0.2 functions + - LP: #1455372 + * Documentation: devicetree: Add new binding for PSCIv0.2 + - LP: #1455372 + * ARM: Check if a CPU has gone offline + - LP: #1455372 + * arm64: KVM: Enable minimalistic support for Cortex-A53 + - LP: #1455372 + * HID: multitouch: add support of clickpads + - LP: #1456881 + * vhost/scsi: potential memory corruption + - LP: #1457807 + - CVE-2015-4036 + + -- Luis Henriques Tue, 26 May 2015 11:21:53 +0100 + +linux (3.13.0-53.89) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1456976 + + [ Upstream Kernel Changes ] + + * tcp: Fix crash in TCP Fast Open + - LP: #1447371 + - CVE-2015-3332 + + -- Luis Henriques Wed, 20 May 2015 11:00:58 +0100 + +linux (3.13.0-53.88) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1454785 + + [ Upstream Kernel Changes ] + + * mmc: card: Don't access RPMB partitions for normal read/write + - LP: #1454013 + + -- Brad Figg Wed, 13 May 2015 10:15:29 -0700 + +linux (3.13.0-53.87) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1452736 + + [ dann frazier ] + + * [Config] CONFIG_{EFI_PARAMS_FROM_FDT,GENERIC_EARLY_IOREMAP,LIBFDT}=y + - LP: #1441876 + * Move get_dram_base to arm private file + - LP: #1441876 + * arm64: Implement efi_enabled() + - LP: #1441876 + * [Config] CONFIG_RTC_DRV_EFI=y on arm64 + - LP: #1441291 + + [ Kamal Mostafa ] + + * Fix "mei: me: release hw from reset only during the reset flow" + - LP: #1450813 + + [ Stefan Bader ] + + * SAUCE: vesafb: Set mtrr:3 (write-combining) as default + - LP: #1434581 + + [ Upstream Kernel Changes ] + + * Revert "net: cx82310_eth: use common match macro" + - LP: #1451900 + * netfilter: nf_conntrack: reserve two bytes for nf_ct_ext->len + - LP: #1442080 + - CVE-2014-9715 + * add generic fixmap.h + - LP: #1441876 + * mm: create generic early_ioremap() support + - LP: #1441876 + * arm64: initialize pgprot info earlier in boot + - LP: #1441876 + * arm64: add early_ioremap support + - LP: #1441876 + * arm64: fixmap: fix missing sub-page offset for earlyprintk + - LP: #1441876 + * efi: create memory map iteration helper + - LP: #1441876 + * efi: Add get_dram_base() helper function + - LP: #1441876 + * lib: add fdt_empty_tree.c + - LP: #1441876 + * doc: efi-stub.txt updates for ARM + - LP: #1441876 + * efi: add helper function to get UEFI params from FDT + - LP: #1441876 + * arm64: Add function to create identity mappings + - LP: #1441876 + * efi: Add shared FDT related functions for ARM/ARM64 + - LP: #1441876 + * arm64: add EFI runtime services + - LP: #1441876 + * doc: arm: add UEFI support documentation + - LP: #1441876 + * arm64: efi: add EFI stub + - LP: #1441876 + * doc: arm64: add description of EFI stub support + - LP: #1441876 + * efi/arm64: ignore dtb= when UEFI SecureBoot is enabled + - LP: #1441876 + * arm64: efi: only attempt efi map setup if booting via EFI + - LP: #1441876 + * PCI: Don't clear ASPM bits when the FADT declares it's unsupported + - LP: #1441335 + * regmap: Skip read-only registers in regcache_sync() + - LP: #1448830 + * rtc: ia64: allow other architectures to use EFI RTC + - LP: #1441291 + * rtc: Disable EFI rtc for x86 + - LP: #1441291 + * mei: me: fix hw ready reset flow + - LP: #1450813 + * Input: serio - add firmware_id sysfs attribute + - LP: #1414930 + * Input: i8042 - add firmware_id support + - LP: #1414930 + * Input: Add INPUT_PROP_TOPBUTTONPAD device property + - LP: #1414930 + * Input: synaptics - report INPUT_PROP_TOPBUTTONPAD property + - LP: #1414930 + * Input: synaptics - add a matches_pnp_id helper function + - LP: #1414930 + * Input: synaptics - change min/max quirk table to pnp-id matching + - LP: #1414930 + * Input: psmouse - add psmouse_matches_pnp_id helper function + - LP: #1414930 + * Input: synaptics - split synaptics_resolution(), query first + - LP: #1414930 + * Input: synaptics - log queried and quirked dimension values + - LP: #1414930 + * Input: synaptics - remove obsolete min/max quirk for X240 + - LP: #1414930 + * Input: synaptics - add min/max quirk for pnp-id LEN2002 (Edge E531) + - LP: #1414930 + * Input: synaptics - add min/max quirk for Lenovo T440s + - LP: #1414930 + * Input: synaptics - adjust min/max for Lenovo ThinkPad X1 Carbon 2nd + - LP: #1414930 + * Input: synaptics - adjust min/max on Thinkpad E540 + - LP: #1414930 + * Input: synaptics - support min/max board id in min_max_pnpid_table + - LP: #1414930 + * Input: synaptics - skip quirks when post-2013 dimensions + - LP: #1414930 + * Input: synaptics - query min dimensions for fw v8.1 + - LP: #1414930 + * Input: synaptics - fix middle button on Lenovo 2015 products + - LP: #1414930 + * Input: synaptics - handle spurious release of trackstick buttons + - LP: #1414930 + * Input: synaptics - do not retrieve the board id on old firmwares + - LP: #1414930 + * Input: synaptics - retrieve the extended capabilities in query $10 + - LP: #1414930 + * Input: synaptics - remove TOPBUTTONPAD property for Lenovos 2015 + - LP: #1414930 + * Input: synaptics - re-route tracksticks buttons on the Lenovo 2015 + series + - LP: #1414930 + * Input: synaptics - remove X1 Carbon 3rd gen from the topbuttonpad list + - LP: #1414930 + * Input: synaptics - remove X250 from the topbuttonpad list + - LP: #1414930 + * drm/dp_helper: don't return EPROTO for defers (v2) + - LP: #1450322 + * iio: mxs-lradc: separate touchscreen and buffer virtual channels + - LP: #1451900 + * iio: mxs-lradc: make ADC reads not disable touchscreen interrupts + - LP: #1451900 + * iio: mxs-lradc: make ADC reads not unschedule touchscreen conversions + - LP: #1451900 + * iio: mxs-lradc: only update the buffer when its conversions have + finished + - LP: #1451900 + * iio: imu: adis16400: Fix sign extension + - LP: #1451900 + * iio:adc:mcp3422 Fix incorrect scales table + - LP: #1451900 + * iio: ad5686: fix optional reference voltage declaration + - LP: #1451900 + * usb: dwc3: dwc3-omap: Fix disable IRQ + - LP: #1451900 + * KVM: emulate: fix CMPXCHG8B on 32-bit hosts + - LP: #1451900 + * xhci: Allocate correct amount of scratchpad buffers + - LP: #1451900 + * USB: usbfs: don't leak kernel data in siginfo + - LP: #1451900 + * efi/libstub: Fix boundary checking in efi_high_alloc() + - LP: #1451900 + * USB: ftdi_sio: add PIDs for Actisense USB devices + - LP: #1451900 + * USB: serial: fix potential use-after-free after failed probe + - LP: #1451900 + * USB: serial: fix tty-device error handling at probe + - LP: #1451900 + * mac80211: Send EAPOL frames at lowest rate + - LP: #1451900 + * ARC: Fix KSTK_ESP() + - LP: #1451900 + * USB: serial: cp210x: Adding Seletek device id's + - LP: #1451900 + * mei: make device disabled on stop unconditionally + - LP: #1451900 + * NFSv4: Don't call put_rpccred() under the rcu_read_lock() + - LP: #1451900 + * btrfs: fix lost return value due to variable shadowing + - LP: #1451900 + * KVM: MIPS: Fix trace event to save PC directly + - LP: #1451900 + * usb: ftdi_sio: Add jtag quirk support for Cyber Cortex AV boards + - LP: #1451900 + * eCryptfs: don't pass fs-specific ioctl commands through + - LP: #1451900 + * drm/radeon: do a posting read in r100_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in rs600_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in r600_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in evergreen_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in si_set_irq + - LP: #1451900 + * drm/radeon: do a posting read in cik_set_irq + - LP: #1451900 + * drm/radeon: fix DRM_IOCTL_RADEON_CS oops + - LP: #1451900 + * drm/radeon: fix interlaced modes on DCE8 + - LP: #1451900 + * ACPI / video: Load the module even if ACPI is disabled + - LP: #1451900 + * ASoC: omap-pcm: Correct dma mask + - LP: #1451900 + * x86/asm/entry/64: Remove a bogus 'ret_from_fork' optimization + - LP: #1451900 + * Btrfs: fix data loss in the fast fsync path + - LP: #1451900 + * Btrfs:__add_inode_ref: out of bounds memory read when looking for + extended ref. + - LP: #1451900 + * xhci: fix reporting of 0-sized URBs in control endpoint + - LP: #1451900 + * xhci: Workaround for PME stuck issues in Intel xhci + - LP: #1451900 + * Change email address for 8250_pci + - LP: #1451900 + * tty: fix up atime/mtime mess, take four + - LP: #1451900 + * console: Fix console name size mismatch + - LP: #1451900 + * net: irda: fix wait_until_sent poll timeout + - LP: #1451900 + * USB: serial: fix infinite wait_until_sent timeout + - LP: #1451900 + * TTY: fix tty_wait_until_sent on 64-bit machines + - LP: #1451900 + * sunrpc: fix braino in ->poll() + - LP: #1451900 + * netfilter: xt_socket: fix a stack corruption bug + - LP: #1451900 + * svcrpc: fix memory leak in gssp_accept_sec_context_upcall + - LP: #1451900 + * ipv4: ip_check_defrag should correctly check return value of + skb_copy_bits + - LP: #1451900 + * net: phy: Fix verification of EEE support in phy_init_eee + - LP: #1451900 + * openvswitch: Fix net exit. + - LP: #1451900 + * team: fix possible null pointer dereference in team_handle_frame + - LP: #1451900 + * net: compat: Ignore MSG_CMSG_COMPAT in compat_sys_{send, recv}msg + - LP: #1451900 + * rtnetlink: ifla_vf_policy: fix misuses of NLA_BINARY + - LP: #1451900 + * rtnetlink: call ->dellink on failure when ->newlink exists + - LP: #1451900 + * gen_stats.c: Duplicate xstats buffer for later use + - LP: #1451900 + * ipv4: ip_check_defrag should not assume that skb_network_offset is zero + - LP: #1451900 + * ematch: Fix auto-loading of ematch modules. + - LP: #1451900 + * net: reject creation of netdev names with colons + - LP: #1451900 + * macvtap: make sure neighbour code can push ethernet header + - LP: #1451900 + * usb: plusb: Add support for National Instruments host-to-host cable + - LP: #1451900 + * udp: only allow UFO for packets from SOCK_DGRAM sockets + - LP: #1451900 + * net: ping: Return EAFNOSUPPORT when appropriate. + - LP: #1451900 + * team: don't traverse port list using rcu in team_set_mac_address + - LP: #1451900 + * cpuset: Fix cpuset sched_relax_domain_level + - LP: #1451900 + * workqueue: fix hang involving racing cancel[_delayed]_work_sync()'s for + PREEMPT_NONE + - LP: #1451900 + * bnx2x: Force fundamental reset for EEH recovery + - LP: #1451900 + * spi: pl022: Fix race in giveback() leading to driver lock-up + - LP: #1451900 + * tpm/ibmvtpm: Additional LE support for tpm_ibmvtpm_send + - LP: #1451900 + * libsas: Fix Kernel Crash in smp_execute_task + - LP: #1451900 + * can: add missing initialisations in CAN related skbuffs + - LP: #1451900 + * can: kvaser_usb: Avoid double free on URB submission failures + - LP: #1451900 + * can: kvaser_usb: Read all messages in a bulk-in URB buffer + - LP: #1451900 + * ftrace: Fix en(dis)able graph caller when en(dis)abling record via + sysctl + - LP: #1451900 + * ftrace: Fix ftrace enable ordering of sysctl ftrace_enabled + - LP: #1451900 + * drm/radeon: drop setting UPLL to sleep mode + - LP: #1451900 + * xen-pciback: limit guest control of command register + - LP: #1451900 + * ALSA: hda - Fix built-in mic on Compaq Presario CQ60 + - LP: #1451900 + * ALSA: control: Add sanity checks for user ctl id name string + - LP: #1451900 + * ALSA: snd-usb: add quirks for Roland UA-22 + - LP: #1451900 + * ALSA: hda - Set single_adc_amp flag for CS420x codecs + - LP: #1451900 + * ALSA: hda - Add workaround for MacBook Air 5,2 built-in mic + - LP: #1451900 + * nilfs2: fix deadlock of segment constructor during recovery + - LP: #1451900 + * ALSA: hda - Don't access stereo amps for mono channel widgets + - LP: #1451900 + * ipvs: add missing ip_vs_pe_put in sync code + - LP: #1451900 + * ARM: at91: pm: fix at91rm9200 standby + - LP: #1451900 + * rbd: drop an unsafe assertion + - LP: #1451900 + * fuse: notify: don't move pages + - LP: #1451900 + * fuse: set stolen page uptodate + - LP: #1451900 + * dm thin: fix to consistently zero-fill reads to unprovisioned blocks + - LP: #1451900 + * dm: hold suspend_lock while suspending device during device deletion + - LP: #1451900 + * dm io: deal with wandering queue limits when handling REQ_DISCARD and + REQ_WRITE_SAME + - LP: #1451900 + * crypto: arm/aes update NEON AES module to latest OpenSSL version + - LP: #1451900 + * mac80211: drop unencrypted frames in mesh fwding + - LP: #1451900 + * mac80211: disable u-APSD queues by default + - LP: #1451900 + * ASoC: ak4671: Fix control-less DAPM routes + - LP: #1451900 + * ASoC: da732x: Fix control-less DAPM routes + - LP: #1451900 + * ASoC: sn95031: Fix control-less DAPM routes + - LP: #1451900 + * virtio_console: init work unconditionally + - LP: #1451900 + * virtio_console: avoid config access from irq + - LP: #1451900 + * clocksource: efm32: Fix a NULL pointer dereference + - LP: #1451900 + * x86/vdso: Fix the build on GCC5 + - LP: #1451900 + * ASoC: sgtl5000: remove useless register write clearing CHRGPUMP_POWERUP + - LP: #1451900 + * regmap: regcache-rbtree: Fix present bitmap resize + - LP: #1451900 + * regulator: Only enable disabled regulators on resume + - LP: #1451900 + * regulator: core: Fix enable GPIO reference counting + - LP: #1451900 + * vt6655: RFbSetPower fix missing rate RATE_12M + - LP: #1451900 + * x86/asm/entry/32: Fix user_mode() misuses + - LP: #1451900 + * ASoC: adav80x: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: ak4641: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: cs4271: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: pcm1681: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: tas5086: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm2000: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8731: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8903: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8904: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8955: Fix wrong value references for boolean kctl + - LP: #1451900 + * ASoC: wm8960: Fix wrong value references for boolean kctl + - LP: #1451900 + * crypto: aesni - fix memory usage in GCM decryption + - LP: #1451900 + * phy: Find the right match in devm_phy_destroy() + - LP: #1451900 + * x86/fpu: Avoid math_state_restore() without used_math() in + __restore_xstate_sig() + - LP: #1451900 + * x86/fpu: Drop_fpu() should not assume that tsk equals current + - LP: #1451900 + * can: kvaser_usb: Fix tx queue start/stop race conditions + - LP: #1451900 + * nl80211: ignore HT/VHT capabilities without QoS/WMM + - LP: #1451900 + * ALSA: hda - Treat stereo-to-mono mix properly + - LP: #1451900 + * pagemap: do not leak physical addresses to non-privileged userspace + - LP: #1451900 + * of/irq: Fix of_irq_parse_one() returned error codes + - LP: #1451900 + * iscsi-target: Avoid early conn_logout_comp for iser connections + - LP: #1451900 + * tcm_qla2xxx: Fix incorrect use of __transport_register_session + - LP: #1451900 + * target: Fix reference leak in target_get_sess_cmd() error path + - LP: #1451900 + * tcm_fc: missing curly braces in ft_invl_hw_context() + - LP: #1451900 + * target/pscsi: Fix NULL pointer dereference in get_device_type + - LP: #1451900 + * target: Fix virtual LUN=0 target_configure_device failure OOPs + - LP: #1451900 + * xfrm: release dst_orig in case of error in xfrm_lookup() + - LP: #1451900 + * dmaengine: dw: append MODULE_ALIAS for platform driver + - LP: #1451900 + * sparc32: destroy_context() and switch_mm() needs to disable interrupts. + - LP: #1451900 + * sparc: semtimedop() unreachable due to comparison error + - LP: #1451900 + * sparc: perf: Remove redundant perf_pmu_{en|dis}able calls + - LP: #1451900 + * sparc: perf: Make counting mode actually work + - LP: #1451900 + * sparc: Touch NMI watchdog when walking cpus and calling printk + - LP: #1451900 + * sparc64: Fix several bugs in memmove(). + - LP: #1451900 + * net: sysctl_net_core: check SNDBUF and RCVBUF for min length + - LP: #1451900 + * rds: avoid potential stack overflow + - LP: #1451900 + * inet_diag: fix possible overflow in inet_diag_dump_one_icsk() + - LP: #1451900 + * caif: fix MSG_OOB test in caif_seqpkt_recvmsg() + - LP: #1451900 + * rxrpc: bogus MSG_PEEK test in rxrpc_recvmsg() + - LP: #1451900 + * ipv6: fix backtracking for throw routes + - LP: #1451900 + * tcp: fix tcp fin memory accounting + - LP: #1451900 + * net: compat: Update get_compat_msghdr() to match + copy_msghdr_from_user() behaviour + - LP: #1451900 + * tcp: make connect() mem charging friendly + - LP: #1451900 + * Linux 3.13.11-ckt19 + - LP: #1451900 + + -- Luis Henriques Thu, 07 May 2015 14:47:02 +0100 + +linux (3.13.0-52.86) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1451288 + + [ Upstream Kernel Changes ] + + * audit: create private file name copies when auditing inodes + - LP: #1450442 + + -- Brad Figg Sun, 03 May 2015 18:36:19 -0700 + +linux (3.13.0-52.85) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1450101 + + [ Upstream Kernel Changes ] + + * fs: take i_mutex during prepare_binprm for set[ug]id executables + - LP: #1447373 + - CVE-2015-3339 + + -- Luis Henriques Fri, 24 Apr 2015 17:26:50 +0100 + +linux (3.13.0-51.84) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1444141 + * Merged back Ubuntu-3.13.0-49.83 security release + + -- Luis Henriques Tue, 14 Apr 2015 21:38:57 +0100 + +linux (3.13.0-50.82) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1442285 + + [ Andy Whitcroft ] + + * [Config] CONFIG_DEFAULT_MMAP_MIN_ADDR needs to match on armhf and arm64 + - LP: #1418140 + + [ Chris J Arges ] + + * [Config] CONFIG_PCIEASPM_DEBUG=y + - LP: #1398544 + + [ Upstream Kernel Changes ] + + * KEYS: request_key() should reget expired keys rather than give + EKEYEXPIRED + - LP: #1124250 + * audit: correctly record file names with different path name types + - LP: #1439441 + * KVM: x86: Check for nested events if there is an injectable interrupt + - LP: #1413540 + * be2iscsi: fix memory leak in error path + - LP: #1440156 + * block: remove old blk_iopoll_enabled variable + - LP: #1440156 + * be2iscsi: Fix handling timed out MBX completion from FW + - LP: #1440156 + * be2iscsi: Fix doorbell format for EQ/CQ/RQ s per SLI spec. + - LP: #1440156 + * be2iscsi: Fix the session cleanup when reboot/shutdown happens + - LP: #1440156 + * be2iscsi: Fix scsi_cmnd leakage in driver. + - LP: #1440156 + * be2iscsi : Fix DMA Out of SW-IOMMU space error + - LP: #1440156 + * be2iscsi: Fix retrieving MCCQ_WRB in non-embedded Mbox path + - LP: #1440156 + * be2iscsi: Fix exposing Host in sysfs after adapter initialization is + complete + - LP: #1440156 + * be2iscsi: Fix interrupt Coalescing mechanism. + - LP: #1440156 + * be2iscsi: Fix TCP parameters while connection offloading. + - LP: #1440156 + * be2iscsi: Fix memory corruption in MBX path + - LP: #1440156 + * be2iscsi: Fix destroy MCC-CQ before MCC-EQ is destroyed + - LP: #1440156 + * be2iscsi: add an missing goto in error path + - LP: #1440156 + * be2iscsi: remove potential junk pointer free + - LP: #1440156 + * be2iscsi: Fix memory leak in mgmt_set_ip() + - LP: #1440156 + * be2iscsi: Fix the sparse warning introduced in previous submission + - LP: #1440156 + * be2iscsi: Fix updating the boot enteries in sysfs + - LP: #1440156 + * be2iscsi: Fix processing CQE before connection resources are freed + - LP: #1440156 + * be2iscsi : Fix kernel panic during reboot/shutdown + - LP: #1440156 + * fixed invalid assignment of 64bit mask to host dma_boundary for scatter + gather segment boundary limit. + - LP: #1440156 + * quota: Store maximum space limit in bytes + - LP: #1441284 + * ip: zero sockaddr returned on error queue + - LP: #1441284 + * net: rps: fix cpu unplug + - LP: #1441284 + * ipv6: stop sending PTB packets for MTU < 1280 + - LP: #1441284 + * netxen: fix netxen_nic_poll() logic + - LP: #1441284 + * udp_diag: Fix socket skipping within chain + - LP: #1441284 + * ping: Fix race in free in receive path + - LP: #1441284 + * bnx2x: fix napi poll return value for repoll + - LP: #1441284 + * net: don't OOPS on socket aio + - LP: #1441284 + * bridge: dont send notification when skb->len == 0 in rtnl_bridge_notify + - LP: #1441284 + * ipv4: tcp: get rid of ugly unicast_sock + - LP: #1441284 + * ppp: deflate: never return len larger than output buffer + - LP: #1441284 + * net: sctp: fix passing wrong parameter header to param_type2af in + sctp_process_param + - LP: #1441284 + * ARM: pxa: add regulator_has_full_constraints to corgi board file + - LP: #1441284 + * ARM: pxa: add regulator_has_full_constraints to poodle board file + - LP: #1441284 + * ARM: pxa: add regulator_has_full_constraints to spitz board file + - LP: #1441284 + * hx4700: regulator: declare full constraints + - LP: #1441284 + * HID: input: fix confusion on conflicting mappings + - LP: #1441284 + * HID: fixup the conflicting keyboard mappings quirk + - LP: #1441284 + * megaraid_sas: disable interrupt_mask before enabling hardware + interrupts + - LP: #1441284 + * PCI: Generate uppercase hex for modalias var in uevent + - LP: #1441284 + * usb: core: buffer: smallest buffer should start at ARCH_DMA_MINALIGN + - LP: #1441284 + * tty/serial: at91: enable peripheral clock before accessing I/O + registers + - LP: #1441284 + * tty/serial: at91: fix error handling in atmel_serial_probe() + - LP: #1441284 + * axonram: Fix bug in direct_access + - LP: #1441284 + * ksoftirqd: Enable IRQs and call cond_resched() before poking RCU + - LP: #1441284 + * TPM: Add new TPMs to the tail of the list to prevent inadvertent change + of dev + - LP: #1441284 + * char: tpm: Add missing error check for devm_kzalloc + - LP: #1441284 + * tpm_tis: verify interrupt during init + - LP: #1441284 + * tpm: Fix NULL return in tpm_ibmvtpm_get_desired_dma + - LP: #1441284 + * tpm/tpm_i2c_stm_st33: Fix potential bug in tpm_stm_i2c_send + - LP: #1441284 + * tpm/tpm_i2c_stm_st33: Add status check when reading data on the FIFO + - LP: #1441284 + * mmc: sdhci-pxav3: fix unbalanced clock issues during probe + - LP: #1441284 + * iwlwifi: mvm: validate tid and sta_id in ba_notif + - LP: #1441284 + * power: bq24190: Fix ignored supplicants + - LP: #1441284 + * ARM: DRA7: hwmod: Fix boot crash with DEBUG_LL enabled on UART3 + - LP: #1441284 + * Bluetooth: ath3k: Add support of AR3012 bluetooth 13d3:3423 device + - LP: #1411193, #1441284 + * cfq-iosched: fix incorrect filing of rt async cfqq + - LP: #1441284 + * smack: fix possible use after frees in task_security() callers + - LP: #1441284 + * xfs: ensure buffer types are set correctly + - LP: #1441284 + * xfs: inode unlink does not set AGI buffer type + - LP: #1441284 + * xfs: set buf types when converting extent formats + - LP: #1441284 + * xfs: set superblock buffer type correctly + - LP: #1441284 + * btrfs: set proper message level for skinny metadata + - LP: #1441284 + * KVM: s390: base hrtimer on a monotonic clock + - LP: #1441284 + * PCI: Fix infinite loop with ROM image of size 0 + - LP: #1441284 + * USB: cp210x: add ID for RUGGEDCOM USB Serial Console + - LP: #1441284 + * clk: zynq: Force CPU_2X clock to be ungated + - LP: #1441284 + * mmc: sdhci-pxav3: Remove checks for mandatory host clock + - LP: #1441284 + * mmc: sdhci-pxav3: fix race between runtime pm and irq + - LP: #1441284 + * power_supply: 88pm860x: Fix leaked power supply on probe fail + - LP: #1441284 + * staging: comedi: comedi_compat32.c: fix COMEDI_CMD copy back + - LP: #1441284 + * mmc: sdhci-pxav3: fix setting of pdata->clk_delay_cycles + - LP: #1441284 + * ARM: 8284/1: sa1100: clear RCSR_SMR on resume + - LP: #1441284 + * usb: musb: omap2plus bus glue needs USB host support + - LP: #1441284 + * USB: add flag for HCDs that can't receive wakeup requests (isp1760-hcd) + - LP: #1441284 + * USB: fix use-after-free bug in usb_hcd_unlink_urb() + - LP: #1441284 + * iwlwifi: mvm: always use mac color zero + - LP: #1441284 + * iwlwifi: pcie: disable the SCD_BASE_ADDR when we resume from WoWLAN + - LP: #1441284 + * vt: provide notifications on selection changes + - LP: #1441284 + * tty: Prevent untrappable signals from malicious program + - LP: #1441284 + * cpufreq: Set cpufreq_cpu_data to NULL before putting kobject + - LP: #1441284 + * lmedm04: Fix usb_submit_urb BOGUS urb xfer, pipe 1 != type 3 in + interrupt urb + - LP: #1441284 + * mei: mask interrupt set bit on clean reset bit + - LP: #1441284 + * mei: me: release hw from reset only during the reset flow + - LP: #1441284 + * MIPS: KVM: Deliver guest interrupts after local_irq_disable() + - LP: #1441284 + * KVM: MIPS: Don't leak FPU/DSP to guest + - LP: #1441284 + * ALSA: hda - Add the pin fixup for HP Envy TS bass speaker + - LP: #1441284 + * ALSA: hda - Set up GPIO for Toshiba Satellite S50D + - LP: #1441284 + * xen/manage: Fix USB interaction issues when resuming + - LP: #1441284 + * drm/i915: Correct the IOSF Dev_FN field for IOSF transfers + - LP: #1441284 + * cfq-iosched: handle failure of cfq group allocation + - LP: #1441284 + * tracing: Fix unmapping loop in tracing_mark_write + - LP: #1441284 + * fsnotify: fix handling of renames in audit + - LP: #1441284 + * drm/radeon: workaround for CP HW bug on CIK + - LP: #1441284 + * drm/radeon: only enable kv/kb dpm interrupts once v3 + - LP: #1441284 + * NFSv4.1: Fix a kfree() of uninitialised pointers in + decode_cb_sequence_args + - LP: #1441284 + * cpufreq: speedstep-smi: enable interrupts when waiting + - LP: #1441284 + * mm/hugetlb: pmd_huge() returns true for non-present hugepage + - LP: #1441284 + * mm: cleanup follow_page_mask() + - LP: #1441284 + * mm/hugetlb: take page table lock in follow_huge_pmd() + - LP: #1441284 + * mm/hugetlb: fix getting refcount 0 page in hugetlb_fault() + - LP: #1441284 + * mm/hugetlb: add migration/hwpoisoned entry check in + hugetlb_change_protection + - LP: #1441284 + * mm/hugetlb: add migration entry check in __unmap_hugepage_range + - LP: #1441284 + * mm: softdirty: unmapped addresses between VMAs are clean + - LP: #1441284 + * proc/pagemap: walk page tables under pte lock + - LP: #1441284 + * mm: when stealing freepages, also take pages created by splitting buddy + page + - LP: #1441284 + * mm/mmap.c: fix arithmetic overflow in __vm_enough_memory() + - LP: #1441284 + * mm/nommu.c: fix arithmetic overflow in __vm_enough_memory() + - LP: #1441284 + * iscsi-target: Drop problematic active_ts_list usage + - LP: #1441284 + * target: Fix PR_APTPL_BUF_LEN buffer size limitation + - LP: #1441284 + * mm/compaction: fix wrong order check in compact_finished() + - LP: #1441284 + * mm/memory.c: actually remap enough memory + - LP: #1441284 + * mm: hwpoison: drop lru_add_drain_all() in __soft_offline_page() + - LP: #1441284 + * ARC: fix page address calculation if PAGE_OFFSET != LINUX_LINK_BASE + - LP: #1441284 + * drm/radeon/dp: Set EDP_CONFIGURATION_SET for bridge chips if necessary + - LP: #1441284 + * drm/radeon: fix voltage setup on hawaii + - LP: #1441284 + * ALSA: hdspm - Constrain periods to 2 on older cards + - LP: #1441284 + * jffs2: fix handling of corrupted summary length + - LP: #1441284 + * dm mirror: do not degrade the mirror on discard error + - LP: #1441284 + * dm io: reject unsupported DISCARD requests with EOPNOTSUPP + - LP: #1441284 + * target: Add missing WRITE_SAME end-of-device sanity check + - LP: #1441284 + * target: Check for LBA + sectors wrap-around in sbc_parse_cdb + - LP: #1441284 + * Btrfs: fix fsync data loss after adding hard link to inode + - LP: #1441284 + * Added Little Endian support to vtpm module + - LP: #1441284 + * sg: fix read() error reporting + - LP: #1441284 + * IB/qib: Do not write EEPROM + - LP: #1441284 + * md/raid5: Fix livelock when array is both resyncing and degraded. + - LP: #1441284 + * dm: fix a race condition in dm_get_md + - LP: #1441284 + * dm snapshot: fix a possible invalid memory access on unload + - LP: #1441284 + * cpufreq: s3c: remove incorrect __init annotations + - LP: #1441284 + * libceph: assert both regular and lingering lists in __remove_osd() + - LP: #1441284 + * libceph: change from BUG to WARN for __remove_osd() asserts + - LP: #1441284 + * libceph: fix double __remove_osd() problem + - LP: #1441284 + * MIPS: Export FP functions used by lose_fpu(1) for KVM + - LP: #1441284 + * kdb: fix incorrect counts in KDB summary command output + - LP: #1441284 + * blk-throttle: check stats_cpu before reading it from sysfs + - LP: #1441284 + * procfs: fix race between symlink removals and traversals + - LP: #1441284 + * autofs4 copy_dev_ioctl(): keep the value of ->size we'd used for + allocation + - LP: #1441284 + * pktgen: fix UDP checksum computation + - LP: #1441284 + * ipv6: fix ipv6_cow_metrics for non DST_HOST case + - LP: #1441284 + * clk-gate: fix bit # check in clk_register_gate() + - LP: #1441284 + * ALSA: off by one bug in snd_riptide_joystick_probe() + - LP: #1441284 + * ath5k: fix spontaneus AR5312 freezes + - LP: #1441284 + * pinctrl: pinctrl-imx: don't use invalid value of conf_reg + - LP: #1441284 + * ALSA: hda - Add one more node in the EAPD supporting candidate list + - LP: #1436745, #1441284 + * ALSA: hda - Add pin configs for ASUS mobo with IDT 92HD73XX codec + - LP: #1441284 + * drm/i915/bdw: PCI IDs ending in 0xb are ULT. + - LP: #1441284 + * xfs: Fix quota type in quota structures when reusing quota file + - LP: #1441284 + * gpiolib: of: allow of_gpiochip_find_and_xlate to find more than one + chip per node + - LP: #1441284 + * gpio: tps65912: fix wrong container_of arguments + - LP: #1441284 + * ALSA: pcm: Don't leave PREPARED state after draining + - LP: #1441284 + * metag: Fix KSTK_EIP() and KSTK_ESP() macros + - LP: #1441284 + * md/raid1: fix read balance when a drive is write-mostly. + - LP: #1441284 + * drm/radeon: use drm_mode_vrefresh() rather than mode->vrefresh + - LP: #1441284 + * drm/radeon: fix 1 RB harvest config setup for TN/RL + - LP: #1441284 + * arm64: compat Fix siginfo_t -> compat_siginfo_t conversion on big + endian + - LP: #1441284 + * nilfs2: fix potential memory overrun on inode + - LP: #1441284 + * HID: i2c-hid: Limit reads to wMaxInputLength bytes for input events + - LP: #1441284 + * Linux 3.13.11-ckt18 + - LP: #1441284 + * ipv6: Don't reduce hop limit for an interface + - LP: #1441103 + - CVE-2015-2922 + * x86/microcode/intel: Guard against stack overflow in the loader + - LP: #1438504 + - CVE-2015-2666 + + -- Brad Figg Thu, 09 Apr 2015 10:06:28 -0700 + +linux (3.13.0-49.83) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * powerpc/perf: Cap 64bit userspace backtraces to PERF_MAX_STACK_DEPTH + - LP: #1442180 + + -- Luis Henriques Fri, 10 Apr 2015 18:46:02 +0100 + +linux (3.13.0-49.81) trusty; urgency=low + + [ Kamal Mostafa ] + + * Release Tracking Bug + - LP: #1436016 + + [ Alex Hung ] + + * SAUCE: ACPI / blacklist: blacklist Win8 OSI for HP Pavilion dv6 + - LP: #1416940 + + [ Andy Whitcroft ] + + * [Packaging] generate live watchdog blacklists + - LP: #1432837 + + [ Ben Widawsky ] + + * SAUCE: i915_bdw: drm/i915/bdw: enable eDRAM. + - LP: #1430855 + + [ Chris J Arges ] + + * [Config] Add ibmvfc to d-i + - LP: #1416001 + + [ Seth Forshee ] + + * [Config] updateconfigs - enable X86_UP_APIC_MSI + + [ Upstream Kernel Changes ] + + * net: add sysfs helpers for netdev_adjacent logic + - LP: #1410852 + * net: Mark functions as static in core/dev.c + - LP: #1410852 + * net: rename sysfs symlinks on device name change + - LP: #1410852 + * btrfs: fix null pointer dereference in clone_fs_devices when name is + null + - LP: #1429804 + * cdc-acm: add sanity checks + - LP: #1413992 + * x86: thinkpad_acpi.c: fixed spacing coding style issue + - LP: #1417915 + * thinkpad_acpi: support new BIOS version string pattern + - LP: #1417915 + * net: sctp: fix slab corruption from use after free on INIT collisions + - LP: #1416506 + - CVE-2015-1421 + * ipv4: try to cache dst_entries which would cause a redirect + - LP: #1420027 + - CVE-2015-1465 + * x86, mm/ASLR: Fix stack randomization on 64-bit systems + - LP: #1423757 + - CVE-2015-1593 + * net: llc: use correct size for sysctl timeout entries + - LP: #1425271 + - CVE-2015-2041 + * net: rds: use correct size for max unacked packets and bytes + - LP: #1425274 + - CVE-2015-2042 + * Btrfs: clear compress-force when remounting with compress option + - LP: #1434183 + * ext4: merge uninitialized extents + - LP: #1430184 + * btrfs: filter invalid arg for btrfs resize + - LP: #1435441 + * Bluetooth: Add firmware update for Atheros 0cf3:311f + * Bluetooth: btusb: Add IMC Networks (Broadcom based) + * Bluetooth: sort the list of IDs in the source code + * Bluetooth: append new supported device to the list [0b05:17d0] + * Bluetooth: Add support for Intel bootloader devices + * Bluetooth: Ignore isochronous endpoints for Intel USB bootloader + * Bluetooth: Add support for Acer [13D3:3432] + * Bluetooth: Add support for Broadcom device of Asus Z97-DELUXE + motherboard + * Add a new PID/VID 0227/0930 for AR3012. + * Bluetooth: Add support for Acer [0489:e078] + * Bluetooth: Add USB device 04ca:3010 as Atheros AR3012 + * x86: mm: move mmap_sem unlock from mm_fault_error() to caller + * vm: add VM_FAULT_SIGSEGV handling support + * vm: make stack guard page errors return VM_FAULT_SIGSEGV rather than + SIGBUS + * spi/pxa2xx: Clear cur_chip pointer before starting next message + * spi: dw: Fix detecting FIFO depth + * spi: dw-mid: fix FIFO size + * ASoC: wm8960: Fix capture sample rate from 11250 to 11025 + * regulator: core: fix race condition in regulator_put() + * ASoC: omap-mcbsp: Correct CBM_CFS dai format configuration + * can: c_can: end pending transmission on network stop (ifdown) + * nfs: fix dio deadlock when O_DIRECT flag is flipped + * NFSv4.1: Fix an Oops in nfs41_walk_client_list + * Input: i8042 - add noloop quirk for Medion Akoya E7225 (MD98857) + * mac80211: properly set CCK flag in radiotap + * nl80211: fix per-station group key get/del and memory leak + * i2c: s3c2410: fix ABBA deadlock by keeping clock prepared + * usb-storage/SCSI: blacklist FUA on JMicron 152d:2566 USB-SATA + controller + * drm/i915: Only fence tiled region of object. + * drm/i915: Fix and clean BDW PCH identification + * drm/i915: BDW Fix Halo PCI IDs marked as ULT. + * ALSA: seq-dummy: remove deadlock-causing events on close + * drivers/rtc/rtc-s5m.c: terminate s5m_rtc_id array with empty element + * drivers: net: cpsw: discard dual emac default vlan configuration + * can: kvaser_usb: Do not sleep in atomic context + * can: kvaser_usb: Send correct context to URB completion + * can: kvaser_usb: Retry the first bulk transfer on -ETIMEDOUT + * can: kvaser_usb: Fix state handling upon BUS_ERROR events + * quota: Switch ->get_dqblk() and ->set_dqblk() to use bytes as space + units + * rbd: fix rbd_dev_parent_get() when parent_overlap == 0 + * rbd: drop parent_ref in rbd_dev_unprobe() unconditionally + * dm cache: fix missing ERR_PTR returns and handling + * dm thin: don't allow messages to be sent to a pool target in READ_ONLY + or FAIL mode + * net: cls_bpf: fix size mismatch on filter preparation + * net: cls_bpf: fix auto generation of per list handles + * ipv6: replacing a rt6_info needs to purge possible propagated rt6_infos + too + * perf: Tighten (and fix) the grouping condition + * arc: mm: Fix build failure + * MIPS: IRQ: Fix disable_irq on CPU IRQs + * Complete oplock break jobs before closing file handle + * smpboot: Add missing get_online_cpus() in + smpboot_register_percpu_thread() + * ASoC: atmel_ssc_dai: fix start event for I2S mode + * spi: fsl-dspi: Fix memory leak + * spi: spi-fsl-dspi: Remove usage of devm_kzalloc + * ALSA: ak411x: Fix stall in work callback + * lib/checksum.c: fix carry in csum_tcpudp_nofold + * MIPS: Fix kernel lockup or crash after CPU offline/online + * gpio: sysfs: fix memory leak in gpiod_export_link + * gpio: sysfs: fix memory leak in gpiod_sysfs_set_active_low + * PCI: Add NEC variants to Stratus ftServer PCIe DMI check + * ASoC: sgtl5000: add delay before first I2C access + * PCI: Handle read-only BARs on AMD CS553x devices + * mm: pagewalk: call pte_hole() for VM_PFNMAP during walk_page_range + * nilfs2: fix deadlock of segment constructor over I_SYNC flag + * tcp: ipv4: initialize unicast_sock sk_pacing_rate + * caif: remove wrong dev_net_set() call + * qlge: Fix qlge_update_hw_vlan_features to handle if interface is down + * ip6_gre: fix endianness errors in ip6gre_err + * spi: dw: revisit FIFO size detection again + * Linux 3.13.11-ckt17 + + -- Kamal Mostafa Tue, 24 Mar 2015 11:58:44 -0700 + +linux (3.13.0-48.80) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1431263 + * Merged back all changes that were in Ubuntu-3.13.0-47.78 + + [ Upstream Kernel Changes ] + + * xfs: remote attribute overwrite causes transaction overrun + - LP: #1429821 + - CVE-2015-0274 + + -- Luis Henriques Thu, 12 Mar 2015 10:21:27 +0000 + +linux (3.13.0-47.78) trusty; urgency=low + + [ Seth Forshee ] + + * Release Tracking Bug + - LP: #1427733 + + [ Rodrigo Vivi ] + + * SAUCE: drm/i915: Fix and clean BDW PCH identification + - LP: #1423292 + * SAUCE: drm/i915: BDW Fix Halo PCI IDs marked as ULT. + - LP: #1423292 + + [ Upstream Kernel Changes ] + + * ext4: prevent bugon on race between write/fcntl + * Bluetooth: ath3k: workaround the compatibility issue with xHCI + controller + - LP: #1400215 + * openvswitch: Silence RCU lockdep checks from flow lookup. + - LP: #1408972 + * openvswitch: Use exact lookup for flow_get and flow_del. + - LP: #1408972 + * splice: Apply generic position and size checks to each write + - LP: #1416498 + - CVE-2014-7822 + * ALSA: hda - enable mute led quirk for one more hp machine. + - LP: #1410704 + * crypto: prefix module autoloading with "crypto-" + - LP: #1427438 + * crypto: add missing crypto module aliases + - LP: #1427438 + * crypto: include crypto- module prefix in template + - LP: #1427438 + * crypto: crc32c - add missing crypto module alias + - LP: #1427438 + * drm/i915: Invalidate media caches on gen7 + - LP: #1427438 + * drm/i915: Force the CS stall for invalidate flushes + - LP: #1427438 + * audit: restore AUDIT_LOGINUID unset ABI + - LP: #1427438 + * parisc: fix out-of-register compiler error in ldcw inline assembler + function + - LP: #1427438 + * kvm: x86: drop severity of "generation wraparound" message + - LP: #1427438 + * udf: Verify i_size when loading inode + - LP: #1427438 + * udf: Verify symlink size before loading it + - LP: #1427438 + * udf: Check path length when reading symlink + - LP: #1427438 + * udf: Check component length before reading it + - LP: #1427438 + * crypto: af_alg - fix backlog handling + - LP: #1427438 + * ASoC: dwc: Ensure FIFOs are flushed to prevent channel swap + - LP: #1427438 + * video/logo: prevent use of logos after they have been freed + - LP: #1427438 + * video/fbdev: fix defio's fsync + - LP: #1427438 + * Add USB_EHCI_EXYNOS to multi_v7_defconfig + - LP: #1427438 + * drm/i915: Swap primary planes on gen2 for FBC + - LP: #1427438 + * drm/i915: Don't swap planes on 830M + - LP: #1427438 + * drm/i915: Don't call intel_prepare_page_flip() multiple times on gen2-4 + - LP: #1427438 + * x86, vdso: Use asm volatile in __getcpu + - LP: #1427438 + * drivers: net: cpsw: enable interrupts after napi enable and clearing + previous interrupts + - LP: #1427438 + * net: ethernet: cpsw: fix hangs with interrupts + - LP: #1427438 + * ALSA: hda - Fix wrong gpio_dir & gpio_mask hint setups for IDT/STAC + codecs + - LP: #1427438 + * drm/radeon: KV has three PPLLs (v2) + - LP: #1427438 + * drm/radeon: properly filter DP1.2 4k modes on non-DP1.2 hw + - LP: #1427438 + * virtio_pci: defer kfree until release callback + - LP: #1427438 + * virtio_pci: document why we defer kfree + - LP: #1427438 + * mm: propagate error from stack expansion even for guard page + - LP: #1427438 + * ALSA: hda - Add new GPU codec ID to snd-hda + - LP: #1427438 + * ALSA: hda - Add new GPU codec ID 0x10de0070 to snd-hda + - LP: #1427438 + * ALSA: hda - Add new GPU codec ID 0x10de0072 to snd-hda + - LP: #1427438 + * vfio-pci: Fix the check on pci device type in vfio_pci_probe() + - LP: #1427438 + * mm: prevent endless growth of anon_vma hierarchy + - LP: #1427438 + * mm: protect set_page_dirty() from ongoing truncation + - LP: #1427438 + * mm, vmscan: prevent kswapd livelock due to pfmemalloc-throttled process + being killed + - LP: #1427438 + * HID: roccat: potential out of bounds in pyra_sysfs_write_settings() + - LP: #1427438 + * mm: Don't count the stack guard page towards RLIMIT_STACK + - LP: #1427438 + * mm: fix corner case in anon_vma endless growing prevention + - LP: #1427438 + * usb: musb: stuff leak of struct usb_hcd + - LP: #1427438 + * usb: gadget: udc: atmel: change setting for DMA + - LP: #1427438 + * usb: gadget: udc: atmel: fix possible IN hang issue + - LP: #1427438 + * ARM: clk-imx6q: fix video divider for rev T0 1.0 + - LP: #1427438 + * ARM: dts: imx25: Fix the SPI1 clocks + - LP: #1427438 + * USB: cp210x: fix ID for production CEL MeshConnect USB Stick + - LP: #1427438 + * USB: keyspan: fix null-deref at probe + - LP: #1427438 + * ARM: omap5/dra7xx: Fix frequency typos + - LP: #1427438 + * LOCKD: Fix a race when initialising nlmsvc_timeout + - LP: #1427438 + * NFSv4.1: Fix client id trunking on Linux + - LP: #1427438 + * USB: cp210x: add IDs for CEL USB sticks and MeshWorks devices + - LP: #1427438 + * USB: qcserial/option: make AT URCs work for Sierra Wireless MC73xx + - LP: #1427438 + * USB: EHCI: fix initialization bug in iso_stream_schedule() + - LP: #1427438 + * OHCI: add a quirk for ULi M5237 blocking on reset + - LP: #1427438 + * mei: clean reset bit before reset + - LP: #1427438 + * target: Drop arbitrary maximum I/O size limit + - LP: #1427438 + * usb: gadget: udc: atmel: fix possible oops when unloading module + - LP: #1427438 + * USB: console: fix uninitialised ldisc semaphore + - LP: #1427438 + * USB: console: fix potential use after free + - LP: #1427438 + * mmc: sdhci: Fix sleep in atomic after inserting SD card + - LP: #1427438 + * usb: dwc3: gadget: Fix TRB preparation during SG + - LP: #1427438 + * usb: dwc3: gadget: Stop TRB preparation after limit is reached + - LP: #1427438 + * ftrace/jprobes/x86: Fix conflict between jprobes and function graph + tracing + - LP: #1427438 + * clocksource: exynos_mct: Fix bitmask regression for exynos4_mct_write + - LP: #1427438 + * time: settimeofday: Validate the values of tv from user + - LP: #1427438 + * Input: i8042 - reset keyboard to fix Elantech touchpad detection + - LP: #1427438 + * drm/radeon: fix VM flush on cayman/aruba (v3) + - LP: #1427438 + * drm/radeon: fix VM flush on SI (v3) + - LP: #1427438 + * drm/radeon: fix VM flush on CIK (v3) + - LP: #1427438 + * drm/radeon: add a dpm quirk list + - LP: #1427438 + * Input: elantech - support new ICs types for version 4 + - LP: #1427438 + * Input: I8042 - add Acer Aspire 7738 to the nomux list + - LP: #1427438 + * drm/i915: Fix mutex->owner inspection race under DEBUG_MUTEXES + - LP: #1427438 + * drm/radeon: add si dpm quirk list + - LP: #1427438 + * pinctrl: Fix two deadlocks + - LP: #1427438 + * gpio / ACPI: register to ACPI events automatically + - LP: #1427438 + * gpio: fix memory and reference leaks in gpiochip_add error path + - LP: #1427438 + * gpio: fix sleep-while-atomic in gpiochip_remove + - LP: #1427438 + * can: dev: fix crtlmode_supported check + - LP: #1427438 + * can: kvaser_usb: Don't free packets when tight on URBs + - LP: #1427438 + * can: kvaser_usb: Reset all URB tx contexts upon channel close + - LP: #1427438 + * can: kvaser_usb: Don't send a RESET_CHIP for non-existing channels + - LP: #1427438 + * gpio: sysfs: fix gpio-chip device-attribute leak + - LP: #1427438 + * gpio: sysfs: fix gpio device-attribute leak + - LP: #1427438 + * gpiolib: of: Correct error handling in of_get_named_gpiod_flags + - LP: #1427438 + * ALSA: usb-audio: Add mic volume fix quirk for Logitech Webcam C210 + - LP: #1427438 + * fix deadlock in cifs_ioctl_clone() + - LP: #1427438 + * ipr: wait for aborted command responses + - LP: #1427438 + * libata: allow sata_sil24 to opt-out of tag ordered submission + - LP: #1427438 + * scripts/recordmcount.pl: There is no -m32 gcc option on Super-H anymore + - LP: #1427438 + * libata: prevent HSM state change race between ISR and PIO + - LP: #1427438 + * bus: mvebu-mbus: fix support of MBus window 13 + - LP: #1427438 + * ARM: dts: imx25: Fix PWM "per" clocks + - LP: #1427438 + * x86, boot: Skip relocs when load address unchanged + - LP: #1427438 + * x86, hyperv: Mark the Hyper-V clocksource as being continuous + - LP: #1427438 + * x86, tls, ldt: Stop checking lm in LDT_empty + - LP: #1427438 + * x86, tls: Interpret an all-zero struct user_desc as "no segment" + - LP: #1427438 + * x86/apic: Re-enable PCI_MSI support for non-SMP X86_32 + - LP: #1427438 + * x86/tsc: Change Fast TSC calibration failed from error to info + - LP: #1427438 + * dm cache: share cache-metadata object across inactive and active DM + tables + - LP: #1427438 + * dm cache: fix problematic dual use of a single migration count variable + - LP: #1427438 + * time: adjtimex: Validate the ADJ_FREQUENCY values + - LP: #1427438 + * ntp: Fixup adjtimex freq validation on 32-bit systems + - LP: #1427438 + * Linux 3.13.11-ckt16 + - LP: #1427438 + + -- Seth Forshee Tue, 03 Mar 2015 09:33:08 -0600 + +linux (3.13.0-46.79) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * IB/core: Prevent integer overflow in ib_umem_get address arithmetic + - LP: #1413741 + - CVE-2015-8159 + + -- Luis Henriques Mon, 09 Mar 2015 17:42:23 +0000 + +linux (3.13.0-46.77) trusty; urgency=low + + [ Seth Forshee ] + + * Revert "ipv6: fix swapped ipv4/ipv6 mtu_reduced callbacks" + - LP: #1404558 + * Release Tracking Bug + - LP: #1427292 + + -- Seth Forshee Mon, 02 Mar 2015 11:33:20 -0600 + +linux (3.13.0-46.76) trusty; urgency=low + + [ Kamal Mostafa ] + + * Release Tracking Bug + - LP: #1426060 + + [ Upstream Kernel Changes ] + + * clocksource: arch_timer: Only use the virtual counter (CNTVCT) on arm64 + - LP: #1426043 + + -- Kamal Mostafa Thu, 26 Feb 2015 09:53:44 -0800 + +linux (3.13.0-46.75) trusty; urgency=low + + [ Seth Forshee ] + + * Release Tracking Bug + - LP: #1419963 + + [ Andy Whitcroft ] + + * [Debian] arm64 -- build ubuntu drivers + - LP: #1411284 + * hyper-v -- fix comment handing in /etc/network/interfaces + - LP: #1413020 + + [ Kamal Mostafa ] + + * [Packaging] force "dpkg-source -I -i" behavior + + [ Upstream Kernel Changes ] + + * Revert "[SCSI] mpt2sas: Remove phys on topology change." + - LP: #1419838 + * Revert "[SCSI] mpt3sas: Remove phys on topology change" + - LP: #1419838 + * Btrfs: fix transaction abortion when remounting btrfs from RW to RO + - LP: #1411320 + * Btrfs: fix a crash of clone with inline extents's split + - LP: #1413129 + * net/mlx4_en: Add VXLAN ndo calls to the PF net device ops too + - LP: #1407760 + * KVM: x86: SYSENTER emulation is broken + - LP: #1414651 + - CVE-2015-0239 + * powerpc/xmon: Fix another endiannes issue in RTAS call from xmon + - LP: #1415919 + * ipv6: fix swapped ipv4/ipv6 mtu_reduced callbacks + - LP: #1404558, #1419837 + * usb: gadget: at91_udc: move prepare clk into process context + - LP: #1419837 + * KVM: x86: Fix far-jump to non-canonical check + - LP: #1419837 + * x86/tls: Validate TLS entries to protect espfix + - LP: #1419837 + * userns: Check euid no fsuid when establishing an unprivileged uid + mapping + - LP: #1419837 + * userns: Document what the invariant required for safe unprivileged + mappings. + - LP: #1419837 + * userns: Only allow the creator of the userns unprivileged mappings + - LP: #1419837 + * x86_64, switch_to(): Load TLS descriptors before switching DS and ES + - LP: #1419837 + * isofs: Fix infinite looping over CE entries + - LP: #1419837 + * batman-adv: Calculate extra tail size based on queued fragments + - LP: #1419837 + * KEYS: close race between key lookup and freeing + - LP: #1419837 + * isofs: Fix unchecked printing of ER records + - LP: #1419837 + * x86_64, vdso: Fix the vdso address randomization algorithm + - LP: #1419837 + * groups: Consolidate the setgroups permission checks + - LP: #1419837 + * userns: Don't allow setgroups until a gid mapping has been setablished + - LP: #1419837 + * userns: Don't allow unprivileged creation of gid mappings + - LP: #1419837 + * move d_rcu from overlapping d_child to overlapping d_alias + - LP: #1419837 + * deal with deadlock in d_walk() + - LP: #1419837 + * Linux 3.13.11-ckt14 + - LP: #1419837 + * gre: fix the inner mac header in nbma tunnel xmit path + - LP: #1419838 + * netlink: Always copy on mmap TX. + - LP: #1419838 + * netlink: Don't reorder loads/stores before marking mmap netlink frame + as available + - LP: #1419838 + * in6: fix conflict with glibc + - LP: #1419838 + * tg3: tg3_disable_ints using uninitialized mailbox value to disable + interrupts + - LP: #1419838 + * batman-adv: Unify fragment size calculation + - LP: #1419838 + * batman-adv: avoid NULL dereferences and fix if check + - LP: #1419838 + * net: Fix stacked vlan offload features computation + - LP: #1419838 + * net: Reset secmark when scrubbing packet + - LP: #1419838 + * tcp: Do not apply TSO segment limit to non-TSO packets + - LP: #1419838 + * alx: fix alx_poll() + - LP: #1419838 + * team: avoid possible underflow of count_pending value for notify_peers + and mcast_rejoin + - LP: #1419838 + * enic: fix rx skb checksum + - LP: #1419838 + * net/core: Handle csum for CHECKSUM_COMPLETE VXLAN forwarding + - LP: #1419838 + * macvlan: unregister net device when netdev_upper_dev_link() fails + - LP: #1419838 + * netfilter: conntrack: disable generic tracking for known protocols + - LP: #1419838 + * xen-netfront: Fix handling packets on compound pages with skb_linearize + - LP: #1317811, #1419838 + * xen-netfront: use correct linear area after linearizing an skb + - LP: #1317811, #1419838 + * eCryptfs: Force RO mount when encrypted view is enabled + - LP: #1419838 + * smiapp: Take mutex during PLL update in sensor initialisation + - LP: #1419838 + * smiapp-pll: Correct clock debug prints + - LP: #1419838 + * sound: simplify au0828 quirk table + - LP: #1419838 + * sound: Update au0828 quirks table + - LP: #1419838 + * af9005: fix kernel panic on init if compiled without IR + - LP: #1419838 + * writeback: fix a subtle race condition in I_DIRTY clearing + - LP: #1419838 + * usb: renesas_usbhs: gadget: fix NULL pointer dereference in + ep_disable() + - LP: #1419838 + * KVM: s390: flush CPU on load control + - LP: #1419838 + * UBI: Fix double free after do_sync_erase() + - LP: #1419838 + * UBI: Fix invalid vfree() + - LP: #1419838 + * Drivers: hv: vmbus: Fix a race condition when unregistering a device + - LP: #1419838 + * driver core: Fix unbalanced device reference in drivers_probe + - LP: #1419838 + * PCI: Restore detection of read-only BARs + - LP: #1419838 + * scsi: correct return values for .eh_abort_handler implementations + - LP: #1419838 + * drm/radeon: fix typo in CI dpm disable + - LP: #1419838 + * ARM: tegra: Re-add removed SoC id macro to tegra_resume() + - LP: #1419838 + * arm64: Add COMPAT_HWCAP_LPAE + - LP: #1419838 + * genhd: check for int overflow in disk_expand_part_tbl() + - LP: #1419838 + * ftrace/x86: Add frames pointers to trampoline as necessary + - LP: #1419838 + * drm/ttm: Avoid memory allocation from shrinker functions. + - LP: #1419838 + * ASoC: sigmadsp: Refuse to load firmware files with a non-supported + version + - LP: #1419838 + * drm/radeon: work around a hw bug in MGCG on CIK + - LP: #1419838 + * Btrfs: make sure we wait on logged extents when fsycning two subvols + - LP: #1419838 + * Btrfs: do not move em to modified list when unpinning + - LP: #1419838 + * megaraid_sas: corrected return of wait_event from abort frame path + - LP: #1419838 + * ASoC: max98090: Fix ill-defined sidetone route + - LP: #1419838 + * blk-mq: use 'nr_cpu_ids' as highest CPU ID count for hwq <-> cpu map + - LP: #1419838 + * nfs41: fix nfs4_proc_layoutget error handling + - LP: #1419838 + * cdc-acm: memory leak in error case + - LP: #1419838 + * USB: cdc-acm: check for valid interfaces + - LP: #1419838 + * x86/asm/traps: Disable tracing and kprobes in fixup_bad_iret and + sync_regs + - LP: #1419838 + * uvcvideo: Fix destruction order in uvc_delete() + - LP: #1419838 + * HID: i2c-hid: fix race condition reading reports + - LP: #1419838 + * mfd: tc6393xb: Fail ohci suspend if full state restore is required + - LP: #1419838 + * serial: samsung: wait for transfer completion before clock disable + - LP: #1419838 + * mmc: dw_mmc: avoid write to CDTHRCTL on older versions + - LP: #1419838 + * Bluetooth: ath3k: Add support of MCI 13d3:3408 bt device + - LP: #1419838 + * eCryptfs: Remove buggy and unnecessary write in file name decode + routine + - LP: #1419838 + * n_tty: Fix read_buf race condition, increment read_head after pushing + data + - LP: #1419838 + * dm cache: only use overwrite optimisation for promotion when in + writeback mode + - LP: #1419838 + * dm cache: dirty flag was mistakenly being cleared when promoting via + overwrite + - LP: #1419838 + * dm bufio: fix memleak when using a dm_buffer's inline bio + - LP: #1419838 + * ath9k_hw: fix hardware queue allocation + - LP: #1419838 + * ath9k: fix BE/BK queue order + - LP: #1419838 + * ath5k: fix hardware queue index assignment + - LP: #1419838 + * tcm_loop: Fix wrong I_T nexus association + - LP: #1419838 + * iwlwifi: dvm: fix flush support for old firmware + - LP: #1419838 + * iommu/vt-d: Fix an off-by-one bug in __domain_mapping() + - LP: #1419838 + * dm crypt: use memzero_explicit for on-stack buffer + - LP: #1419838 + * mnt: Implicitly add MNT_NODEV on remount when it was implicitly added + by mount + - LP: #1419838 + * mnt: Update unprivileged remount test + - LP: #1419838 + * umount: Disallow unprivileged mount force + - LP: #1419838 + * md/raid56: Don't perform reads to support writes until stripe is ready. + - LP: #1419838 + * md/raid5: avoid livelock caused by non-aligned writes. + - LP: #1419838 + * md/raid5: fetch_block must fetch all the blocks handle_stripe_dirtying + wants. + - LP: #1419838 + * drm/i915: Disallow pin ioctl completely for kms drivers + - LP: #1419838 + * drm/vmwgfx: Don't use memory accounting for kernel-side fence objects + - LP: #1419838 + * drm/vmwgfx: Fix fence event code + - LP: #1419838 + * hp_accel: Add support for HP ZBook 15 + - LP: #1419838 + * drm/radeon: check the right ring in radeon_evict_flags() + - LP: #1419838 + * swiotlb-xen: pass dev_addr to xen_dma_unmap_page and + xen_dma_sync_single_for_cpu + - LP: #1419838 + * swiotlb-xen: call xen_dma_sync_single_for_device when appropriate + - LP: #1419838 + * clocksource: arch_timer: Fix code to use physical timers when requested + - LP: #1419838 + * ALSA: hda - Fix built-in mic at resume on Lenovo Ideapad S210 + - LP: #1419838 + * can: peak_usb: fix memset() usage + - LP: #1419838 + * can: peak_usb: fix cleanup sequence order in case of error during init + - LP: #1419838 + * ALSA: usb-audio: Don't resubmit pending URBs at MIDI error recovery + - LP: #1419838 + * KEYS: Fix stale key registration at error path + - LP: #1419838 + * thermal: Fix error path in thermal_init() + - LP: #1419838 + * blk-mq: Fix a use-after-free + - LP: #1419838 + * fs: nfsd: Fix signedness bug in compare_blob + - LP: #1419838 + * nfsd4: fix xdr4 inclusion of escaped char + - LP: #1419838 + * userns: Rename id_map_mutex to userns_state_mutex + - LP: #1419838 + * drm/i915: Don't complain about stolen conflicts on gen3 + - LP: #1419838 + * ALSA: hda - Add EAPD fixup for ASUS Z99He laptop + - LP: #1419838 + * Btrfs: fix fs corruption on transaction abort if device supports + discard + - LP: #1419838 + * ncpfs: return proper error from NCP_IOC_SETROOT ioctl + - LP: #1419838 + * drivers/rtc/rtc-sirfsoc.c: move hardware initilization earlier in probe + - LP: #1419838 + * rtc: omap: fix missing wakealarm attribute + - LP: #1419838 + * exit: pidns: alloc_pid() leaks pid_namespace if child_reaper is exiting + - LP: #1419838 + * perf/x86/intel/uncore: Make sure only uncore events are collected + - LP: #1419838 + * perf: Fix events installation during moving group + - LP: #1419838 + * KVM: nVMX: Disable unrestricted mode if ept=0 + - LP: #1419838 + * drm/i915: save/restore GMBUS freq across suspend/resume on gen4 + - LP: #1419838 + * pstore-ram: Fix hangs by using write-combine mappings + - LP: #1419838 + * pstore-ram: Allow optional mapping with pgprot_noncached + - LP: #1419838 + * userns: Add a knob to disable setgroups on a per user namespace basis + - LP: #1419838 + * userns: Allow setting gid_maps without privilege when setgroups is + disabled + - LP: #1419838 + * userns: Unbreak the unprivileged remount tests + - LP: #1419838 + * HID: i2c-hid: prevent buffer overflow in early IRQ + - LP: #1419838 + * mac80211: fix multicast LED blinking and counter + - LP: #1419838 + * cfg80211: avoid mem leak on driver hint set + - LP: #1419838 + * mtd: tests: abort torturetest on erase errors + - LP: #1419838 + * tracing/sched: Check preempt_count() for current when reading + task->state + - LP: #1419838 + * iscsi,iser-target: Initiate termination only once + - LP: #1419838 + * iser-target: Fix flush + disconnect completion handling + - LP: #1419838 + * iser-target: Parallelize CM connection establishment + - LP: #1419838 + * iser-target: Fix connected_handler + teardown flow race + - LP: #1419838 + * iser-target: Handle ADDR_CHANGE event for listener cm_id + - LP: #1419838 + * iser-target: Fix implicit termination of connections + - LP: #1419838 + * genirq: Prevent proc race against freeing of irq descriptors + - LP: #1419838 + * x86/tls: Disallow unusual TLS segments + - LP: #1419838 + * powerpc/powernv: Switch off MMU before entering nap/sleep/rvwinkle mode + - LP: #1419838 + * ARC: [nsimosci] move peripherals to match model to FPGA + - LP: #1419838 + * scsi: blacklist RSOC for Microsoft iSCSI target devices + - LP: #1419838 + * rtlwifi: rtl8192ce: Set fw_ready flag + - LP: #1419838 + * iscsi-target: Fail connection on short sendmsg writes + - LP: #1419838 + * mac80211: free management frame keys when removing station + - LP: #1419838 + * ceph: do_sync is never initialized + - LP: #1419838 + * x86/tls: Don't validate lm in set_thread_area() after all + - LP: #1419838 + * ALSA: usb-audio: extend KEF X300A FU 10 tweak to Arcam rPAC + - LP: #1419838 + * mnt: Fix a memory stomp in umount + - LP: #1419838 + * ocfs2: fix journal commit deadlock + - LP: #1419838 + * tick/powerclamp: Remove tick_nohz_idle abuse + - LP: #1419838 + * Linux 3.13.11-ckt15 + - LP: #1419838 + + [ Xiong Zhang ] + + * SAUCE: ubuntu/i915: power on sink if dpcd read fail + - LP: #1416451 + + -- Seth Forshee Mon, 09 Feb 2015 14:08:50 -0600 + +linux (3.13.0-45.74) trusty; urgency=low + + [ Seth Forshee ] + + * Release Tracking Bug + - LP: #1410384 + + [ Jesse Barnes ] + + * SAUCE: drm/i915/vlv: assert and de-assert sideband reset at boot and + resume v3 + - LP: #1401963 + + [ K. Y. Srinivasan ] + + * SAUCE: storvsc: force SPC-3 compliance on win8 and win8 r2 hosts + - LP: #1406867 + + [ Timo Aaltonen ] + + * SAUCE: Switch VLV/BYT to use i915_bdw. + - LP: #1401963 + + [ Upstream Kernel Changes ] + + * Revert "xhci: clear root port wake on bits if controller isn't wake-up + capable" + - LP: #1408779 + * KVM: PPC: BOOK3S: HV: CMA: Reserve cma region only in hypervisor mode + - LP: #1400209 + * e1000e: Fix no connectivity when driver loaded with cable out + - LP: #1400365 + * net/mlx4_core: Enable CQE/EQE stride support + - LP: #1400127 + * net/mlx4_core: Cache line EQE size support + - LP: #1400127 + * net/mlx4_en: Add mlx4_en_get_cqe helper + - LP: #1400127 + * net/mlx4_core: Introduce mlx4_get_module_info for cable module info + reading + - LP: #1400127 + * ethtool, net/mlx4_en: Cable info, get_module_info/eeprom ethtool + support + - LP: #1400127 + * net/mlx4_core: Introduce ACCESS_REG CMD and eth_prot_ctrl dev cap + - LP: #1400127 + * net/mlx4_core: Add ethernet backplane autoneg device capability + - LP: #1400127 + * ethtool, net/mlx4_en: Add 100M, 20G, 56G speeds ethtool reporting + support + - LP: #1400127 + * net/mlx4_en: Use PTYS register to query ethtool settings + - LP: #1400127 + * net/mlx4_en: Use PTYS register to set ethtool settings (Speed) + - LP: #1400127 + * net/mlx4_en: Add support for setting rxvlan offload OFF/ON + - LP: #1400127 + * net/mlx4_en: Add ethtool support for [rx|tx]vlan offload set to OFF/ON + - LP: #1400127 + * net/mlx4_core: Prevent VF from changing port configuration + - LP: #1400127 + * net/mlx4_en: mlx4_en_set_settings() always fails when autoneg is set + - LP: #1400127 + * ipv4: fix nexthop attlen check in fib_nh_match + - LP: #1408779 + * vxlan: fix a use after free in vxlan_encap_bypass + - LP: #1408779 + * vxlan: using pskb_may_pull as early as possible + - LP: #1408779 + * vxlan: fix a free after use + - LP: #1408779 + * ipv4: fix a potential use after free in ip_tunnel_core.c + - LP: #1408779 + * ax88179_178a: fix bonding failure + - LP: #1408779 + * tcp: md5: do not use alloc_percpu() + - LP: #1408779 + * ipv4: dst_entry leak in ip_send_unicast_reply() + - LP: #1408779 + * drivers/net, ipv6: Select IPv6 fragment idents for virtio UFO packets + - LP: #1408779 + * drivers/net: macvtap and tun depend on INET + - LP: #1408779 + * ip6_tunnel: Use ip6_tnl_dev_init as the ndo_init function. + - LP: #1408779 + * vti6: Use vti6_dev_init as the ndo_init function. + - LP: #1408779 + * sit: Use ipip6_tunnel_init as the ndo_init function. + - LP: #1408779 + * gre6: Move the setting of dev->iflink into the ndo_init functions. + - LP: #1408779 + * vxlan: Do not reuse sockets for a different address family + - LP: #1408779 + * net: sctp: fix memory leak in auth key management + - LP: #1408779 + * smsc911x: power-up phydev before doing a software reset. + - LP: #1408779 + * sunvdc: add cdrom and v1.1 protocol support + - LP: #1408779 + * sunvdc: compute vdisk geometry from capacity + - LP: #1408779 + * sunvdc: limit each sg segment to a page + - LP: #1408779 + * vio: fix reuse of vio_dring slot + - LP: #1408779 + * sunvdc: don't call VD_OP_GET_VTOC + - LP: #1408779 + * sparc64: Fix crashes in schizo_pcierr_intr_other(). + - LP: #1408779 + * sparc64: Do irq_{enter,exit}() around generic_smp_call_function*(). + - LP: #1408779 + * sparc32: Implement xchg and atomic_xchg using ATOMIC_HASH locks + - LP: #1408779 + * sparc64: Fix constraints on swab helpers. + - LP: #1408779 + * inetdevice: fixed signed integer overflow + - LP: #1408779 + * ipv4: Fix incorrect error code when adding an unreachable route + - LP: #1408779 + * ieee802154: fix error handling in ieee802154fake_probe() + - LP: #1408779 + * qmi_wwan: Add support for HP lt4112 LTE/HSPA+ Gobi 4G Modem + - LP: #1408779 + * pptp: fix stack info leak in pptp_getname() + - LP: #1408779 + * ipx: fix locking regression in ipx_sendmsg and ipx_recvmsg + - LP: #1408779 + * aio: fix uncorrent dirty pages accouting when truncating AIO ring + buffer + - LP: #1408779 + * spi: dw: Fix dynamic speed change. + - LP: #1408779 + * USB: serial: cp210x: add IDs for CEL MeshConnect USB Stick + - LP: #1408779 + * iio: Fix IIO_EVENT_CODE_EXTRACT_DIR bit mask + - LP: #1408779 + * usb: serial: ftdi_sio: add PIDs for Matrix Orbital products + - LP: #1408779 + * USB: keyspan: fix tty line-status reporting + - LP: #1408779 + * USB: keyspan: fix overrun-error reporting + - LP: #1408779 + * USB: ssu100: fix overrun-error reporting + - LP: #1408779 + * nfsd: correctly define v4.2 support attributes + - LP: #1408779 + * SUNRPC: Fix locking around callback channel reply receive + - LP: #1408779 + * nfsd: Fix slot wake up race in the nfsv4.1 callback code + - LP: #1408779 + * bnx2fc: do not add shared skbs to the fcoe_rx_list + - LP: #1408779 + * scsi: add Intel Multi-Flex to scsi scan blacklist + - LP: #1408779 + * ARM: 8216/1: xscale: correct auxiliary register in suspend/resume + - LP: #1408779 + * USB: xhci: don't start a halted endpoint before its new dequeue is set + - LP: #1408779 + * USB: xhci: Reset a halted endpoint immediately when we encounter a + stall. + - LP: #1408779 + * usb: xhci: rework root port wake bits if controller isn't allowed to + wakeup + - LP: #1408779 + * ALSA: hda - Limit 40bit DMA for AMD HDMI controllers + - LP: #1408779 + * PCI/MSI: Add device flag indicating that 64-bit MSIs don't work + - LP: #1408779 + * gpu/radeon: Set flag to indicate broken 64-bit MSI + - LP: #1408779 + * sound/radeon: Move 64-bit MSI quirk from arch to driver + - LP: #1408779 + * powerpc/powernv: Honor the generic "no_64bit_msi" flag + - LP: #1408779 + * powerpc/pseries: Honor the generic "no_64bit_msi" flag + - LP: #1408779 + * MIPS: Loongson: Make platform serial setup always built-in. + - LP: #1408779 + * net/ping: handle protocol mismatching scenario + - LP: #1408779 + * usb-quirks: Add reset-resume quirk for MS Wireless Laser Mouse 6000 + - LP: #1408779 + * Input: xpad - use proper endpoint type + - LP: #1408779 + * powerpc: 32 bit getcpu VDSO function uses 64 bit instructions + - LP: #1408779 + * ARM: 8222/1: mvebu: enable strex backoff delay + - LP: #1408779 + * ARM: 8226/1: cacheflush: get rid of restarting block + - LP: #1408779 + * staging: r8188eu: Add new device ID for DLink GO-USB-N150 + - LP: #1408779 + * btrfs: zero out left over bytes after processing compression streams + - LP: #1408779 + * smiapp: Only some selection targets are settable + - LP: #1408779 + * i2c: omap: fix NACK and Arbitration Lost irq handling + - LP: #1408779 + * drm/nouveau/gf116: remove copy1 engine + - LP: #1408779 + * drm/i915: More cautious with pch fifo underruns + - LP: #1408779 + * drm/i915: Unlock panel even when LVDS is disabled + - LP: #1408779 + * AHCI: Add DeviceIDs for Sunrise Point-LP SATA controller + - LP: #1408779 + * sata_fsl: fix error handling of irq_of_parse_and_map + - LP: #1408779 + * drm/radeon: kernel panic in drm_calc_vbltimestamp_from_scanoutpos with + 3.18.0-rc6 + - LP: #1408779 + * mm: frontswap: invalidate expired data on a dup-store failure + - LP: #1408779 + * mm/vmpressure.c: fix race in vmpressure_work_fn() + - LP: #1408779 + * drivers/input/evdev.c: don't kfree() a vmalloc address + - LP: #1408779 + * mm: fix swapoff hang after page migration and fork + - LP: #1408779 + * mm: fix anon_vma_clone() error treatment + - LP: #1408779 + * slab: fix nodeid bounds check for non-contiguous node IDs + - LP: #1408779 + * ahci: disable MSI on SAMSUNG 0xa800 SSD + - LP: #1408779 + * i2c: davinci: generate STP always when NACK is received + - LP: #1408779 + * ip_tunnel: the lack of vti_link_ops' dellink() cause kernel panic + - LP: #1408779 + * ipv6: gre: fix wrong skb->protocol in WCCP + - LP: #1408779 + * Fix race condition between vxlan_sock_add and vxlan_sock_release + - LP: #1408779 + * tg3: fix ring init when there are more TX than RX channels + - LP: #1408779 + * net/mlx4_core: Limit count field to 24 bits in qp_alloc_res + - LP: #1408779 + * rtnetlink: release net refcnt on error in do_setlink() + - LP: #1408779 + * net: mvneta: fix Tx interrupt delay + - LP: #1408779 + * net: mvneta: fix race condition in mvneta_tx() + - LP: #1408779 + * net: sctp: use MAX_HEADER for headroom reserve in output path + - LP: #1408779 + * Linux 3.13.11-ckt13 + - LP: #1408779 + * ipv6: fix swapped ipv4/ipv6 mtu_reduced callbacks + - LP: #1404558 + * arm64: Fix machine_shutdown() definition + - LP: #1404335 + * arm64: Fix deadlock scenario with smp_send_stop() + - LP: #1404335 + * iwlwifi: mvm: a few more SKUs for 7260 and 3160 + * iwlwifi: fix and add 7265 series HW IDs + - LP: #1408222 + + -- Seth Forshee Tue, 13 Jan 2015 11:37:56 -0600 + +linux (3.13.0-44.73) trusty; urgency=low + + [ Kamal Mostafa ] + + * Release Tracking Bug + - LP: #1402872 + + [ AceLan Kao ] + + * SAUCE: Add use_native_backlight quirk for HP ProBook 6570b + - LP: #1359010 + + [ Andy Whitcroft ] + + * Revert "SAUCE: (no-up) arm64: optimized copy_to_user and copy_from_user + assembly code" + - LP: #1398596 + * [Config] updateconfigs to balance CONFIG_SCOM_DEBUGFS + + [ Upstream Kernel Changes ] + + * iwlwifi: mvm: fix merge damage + - LP: #1393317 + * iwlwifi: remove IWL_UCODE_TLV_FLAGS_SCHED_SCAN flag + - LP: #1393317 + * iwlwifi: mvm: disable scheduled scan to prevent firmware crash + - LP: #1393317 + * iwlwifi: mvm: enable scheduled scan on newest firmware + - LP: #1393317 + * x86: kvm: use alternatives for VMCALL vs. VMMCALL if kernel text is + read-only + - LP: #1379340 + * phylib: introduce PHY_INTERFACE_MODE_XGMII for 10G PHY + - LP: #1381084 + * of: make of_get_phy_mode parse 'phy-connection-type' + - LP: #1381084 + * xen-netfront: Remove BUGs on paged skb data which crosses a page + boundary + - LP: #1275879 + * ACPI / blacklist: blacklist Win8 OSI for Dell Vostro 3546 + - LP: #1383589 + * powerpc/pseries: Fix endiannes issue in RTAS call from xmon + - LP: #1396235 + * mmc: sdhci-pci-o2micro: Fix Dell E5440 issue + - LP: #1346067 + * mfd: rtsx: Fix PM suspend for 5227 & 5249 + - LP: #1359052 + * drivers:scsi:storvsc: Fix a bug in handling ring buffer failures that + may result in I/O freeze + - LP: #1400289 + * arm64: optimized copy_to_user and copy_from_user assembly code + - LP: #1400349 + * net:socket: set msg_namelen to 0 if msg_name is passed as NULL in + msghdr struct from userland. + - LP: #1335478 + * drm/radeon: initialize sadb to NULL in the audio code + - LP: #1402714 + * powerpc/vphn: NUMA node code expects big-endian + - LP: #1401150 + * ALSA: usb-audio: Fix device_del() sysfs warnings at disconnect + - LP: #1402853 + * ALSA: hda - Add mute LED pin quirk for HP 15 touchsmart + - LP: #1334950, #1402853 + * rcu: Make callers awaken grace-period kthread + - LP: #1402853 + * rcu: Use rcu_gp_kthread_wake() to wake up grace period kthreads + - LP: #1402853 + * net: sctp: fix NULL pointer dereference in af->from_addr_param on + malformed packet + - LP: #1402853 + * KVM: x86: Don't report guest userspace emulation error to userspace + - LP: #1402853 + * [media] ttusb-dec: buffer overflow in ioctl + - LP: #1402853 + * arm64: __clear_user: handle exceptions on strb + - LP: #1402853 + * ARM: pxa: fix hang on startup with DEBUG_LL + - LP: #1402853 + * samsung-laptop: Add broken-acpi-video quirk for NC210/NC110 + - LP: #1402853 + * acer-wmi: Add Aspire 5741 to video_vendor_dmi_table + - LP: #1402853 + * acer-wmi: Add acpi_backlight=video quirk for the Acer KAV80 + - LP: #1402853 + * rbd: Fix error recovery in rbd_obj_read_sync() + - LP: #1402853 + * [media] ds3000: fix LNB supply voltage on Tevii S480 on initialization + - LP: #1402853 + * powerpc: do_notify_resume can be called with bad thread_info flags + argument + - LP: #1402853 + * USB: kobil_sct: fix non-atomic allocation in write path + - LP: #1402853 + * USB: opticon: fix non-atomic allocation in write path + - LP: #1402853 + * regulator: max77693: Fix use of uninitialized regulator config + - LP: #1402853 + * USB: cdc-acm: add device id for GW Instek AFG-2225 + - LP: #1402853 + * usb: Do not allow usb_alloc_streams on unconfigured devices + - LP: #1402853 + * usb-storage: handle a skipped data phase + - LP: #1402853 + * xhci: Switch only Intel Lynx Point-LP ports to EHCI on shutdown. + - LP: #1402853 + * xhci: no switching back on non-ULT Haswell + - LP: #1402853 + * of: Fix overflow bug in string property parsing functions + - LP: #1402853 + * spi: fsl-dspi: Fix CTAR selection + - LP: #1402853 + * Btrfs: fix kfree on list_head in btrfs_lookup_csums_range error cleanup + - LP: #1402853 + * staging:iio:ade7758: Fix NULL pointer deref when enabling buffer + - LP: #1402853 + * staging:iio:ade7758: Fix check if channels are enabled in prenable + - LP: #1402853 + * staging:iio:ade7758: Remove "raw" from channel name + - LP: #1402853 + * USB: cdc-acm: only raise DTR on transitions from B0 + - LP: #1402853 + * serial: Fix divide-by-zero fault in uart_get_divisor() + - LP: #1402853 + * tty: Fix high cpu load if tty is unreleaseable + - LP: #1402853 + * tty: Prevent "read/write wait queue active!" log flooding + - LP: #1402853 + * tty/vt: don't set font mappings on vc not supporting this + - LP: #1402853 + * spi: pxa2xx: toggle clocks on suspend if not disabled by runtime PM + - LP: #1402853 + * sysfs: driver core: Fix glue dir race condition by gdp_mutex + - LP: #1402853 + * i2c: at91: don't account as iowait + - LP: #1402853 + * nfsd: don't try to reuse an expired DRC entry off the list + - LP: #1402853 + * nfsd: don't halt scanning the DRC LRU list when there's an RC_INPROG + entry + - LP: #1402853 + * dm bufio: change __GFP_IO to __GFP_FS in shrinker callbacks + - LP: #1402853 + * xtensa: re-wire umount syscall to sys_oldumount + - LP: #1402853 + * dm raid: ensure superblock's size matches device's logical block size + - LP: #1402853 + * ahci: disable MSI instead of NCQ on Samsung pci-e SSDs on macbooks + - LP: #1402853 + * ahci: Add Device IDs for Intel Sunrise Point PCH + - LP: #1402853 + * power: charger-manager: Fix accessing invalidated power supply after + charger unbind + - LP: #1402853 + * mac80211: use secondary channel offset IE also beacons during CSA + - LP: #1402853 + * mac80211: schedule the actual switch of the station before CSA count 0 + - LP: #1402853 + * mac80211: properly flush delayed scan work on interface removal + - LP: #1402853 + * mac80211: fix use-after-free in defragmentation + - LP: #1402853 + * tun: Fix csum_start with VLAN acceleration + - LP: #1402853 + * macvtap: Fix csum_start when VLAN tags are present + - LP: #1402853 + * dm thin: grab a virtual cell before looking up the mapping + - LP: #1402853 + * KVM: x86: Fix uninitialized op->type for some immediate values + - LP: #1402853 + * crypto: caam - fix missing dma unmap on error path + - LP: #1402853 + * hwrng: pseries - port to new read API and fix stack corruption + - LP: #1402853 + * drm/radeon: set correct CE ram size for CIK + - LP: #1402853 + * drm/radeon: make sure mode init is complete in bandwidth_update + - LP: #1402853 + * drm/radeon: use gart for DMA IB tests + - LP: #1402853 + * drm/radeon: add missing crtc unlock when setting up the MC + - LP: #1402853 + * ALSA: hda_intel: Add Device IDs for Intel Sunrise Point PCH + - LP: #1402853 + * ALSA: hda_intel: Add DeviceIDs for Sunrise Point-LP + - LP: #1402853 + * Input: alps - ignore potential bare packets when device is out of sync + - LP: #1402853 + * Input: alps - allow up to 2 invalid packets without resetting device + - LP: #1402853 + * scsi: only re-lock door after EH on devices that were reset + - LP: #1402853 + * dm btree: fix a recursion depth bug in btree walking code + - LP: #1402853 + * parisc: Use compat layer for msgctl, shmat, shmctl and semtimedop + syscalls + - LP: #1402853 + * ALSA: usb-audio: Fix memory leak in FTU quirk + - LP: #1402853 + * audit: keep inode pinned + - LP: #1402853 + * nfs: fix pnfs direct write memory leak + - LP: #1402853 + * nfs: Fix use of uninitialized variable in nfs_getattr() + - LP: #1402853 + * NFSv4: Ensure that we remove NFSv4.0 delegations when state has expired + - LP: #1402853 + * NFSv4.1: nfs41_clear_delegation_stateid shouldn't trust + NFS_DELEGATED_STATE + - LP: #1402853 + * NFSv4: Fix races between nfs_remove_bad_delegation() and delegation + return + - LP: #1402853 + * NFSv4: Ensure that we call FREE_STATEID when NFSv4.x stateids are + revoked + - LP: #1402853 + * NFS: Don't try to reclaim delegation open state if recovery failed + - LP: #1402853 + * libceph: do not crash on large auth tickets + - LP: #1402853 + * ARM: 8191/1: decompressor: ensure I-side picks up relocated code + - LP: #1402853 + * ARM: 8198/1: make kuser helpers depend on MMU + - LP: #1402853 + * zram: avoid kunmap_atomic() of a NULL pointer + - LP: #1402853 + * Input: alps - ignore bad data on Dell Latitudes E6440 and E7440 + - LP: #1402853 + * firewire: cdev: prevent kernel stack leaking into ioctl arguments + - LP: #1402853 + * md: Always set RECOVERY_NEEDED when clearing RECOVERY_FROZEN + - LP: #1402853 + * nfs: Don't busy-wait on SIGKILL in __nfs_iocounter_wait + - LP: #1402853 + * target: Don't call TFO->write_pending if data_length == 0 + - LP: #1402853 + * vhost-scsi: Take configfs group dependency during + VHOST_SCSI_SET_ENDPOINT + - LP: #1402853 + * srp-target: Retry when QP creation fails with ENOMEM + - LP: #1402853 + * ASoC: fsi: remove unsupported PAUSE flag + - LP: #1402853 + * ASoC: rsnd: remove unsupported PAUSE flag + - LP: #1402853 + * ib_isert: Add max_send_sge=2 minimum for control PDU responses + - LP: #1402853 + * iser-target: Handle DEVICE_REMOVAL event on network portal listener + correctly + - LP: #1402853 + * ASoC: dpcm: Fix race between FE/BE updates and trigger + - LP: #1402853 + * mac80211: Fix regression that triggers a kernel BUG with CCMP + - LP: #1402853 + * rt2x00: do not align payload on modern H/W + - LP: #1402853 + * ath9k: Add version/revision macros for QCA9531 + - LP: #1402853 + * ath9k: Fix RTC_DERIVED_CLK usage + - LP: #1402853 + * ASoC: sgtl5000: Fix SMALL_POP bit definition + - LP: #1402853 + * ALSA: usb-audio: Add ctrl message delay quirk for Marantz/Denon devices + - LP: #1402853 + * bitops: Fix shift overflow in GENMASK macros + - LP: #1402853 + * x86: Require exact match for 'noxsave' command line option + - LP: #1402853 + * drm/i915: drop WaSetupGtModeTdRowDispatch:snb + - LP: #1402853 + * ASoC: wm_adsp: Avoid attempt to free buffers that might still be in use + - LP: #1402853 + * can: dev: avoid calling kfree_skb() from interrupt context + - LP: #1402853 + * can: esd_usb2: fix memory leak on disconnect + - LP: #1402853 + * x86, mm: Set NX across entire PMD at boot + - LP: #1402853 + * of/irq: Drop obsolete 'interrupts' vs 'interrupts-extended' text + - LP: #1402853 + * of/base: Fix PowerPC address parsing hack + - LP: #1402853 + * clockevent: sun4i: Fix race condition in the probe code + - LP: #1402853 + * MIPS: oprofile: Fix backtrace on 64-bit kernel + - LP: #1402853 + * ACPI / PM: Ignore wakeup setting if the ACPI companion can't wake up + - LP: #1402853 + * IB/isert: Adjust CQ size to HW limits + - LP: #1402853 + * drm/radeon: fix endian swapping in vbios fetch for tdp table + - LP: #1402853 + * Linux 3.13.11-ckt12 + - LP: #1402853 + * mm: Remove false WARN_ON from pagecache_isize_extended() + - LP: #1402764 + + -- Kamal Mostafa Mon, 15 Dec 2014 16:00:52 -0800 + +linux (3.13.0-43.72) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1400408 + + [ Upstream Kernel Changes ] + + * x86_64, traps: Fix the espfix64 #DF fixup and rewrite it in C + - LP: #1398795 + - CVE-2014-9090 + * x86_64, traps: Rework bad_iret + - LP: #1398795 + - CVE-2014-9090 + * x86, kvm: Clear paravirt_enabled on KVM guests for espfix32's benefit + - LP: #1400314 + - CVE-2014-8134 + + -- Luis Henriques Mon, 08 Dec 2014 17:27:21 +0000 + +linux (3.13.0-42.71) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1399663 + + [ Upstream Kernel Changes ] + + * x86_64, traps: Stop using IST for #SS + - LP: #1398795 + - CVE-2014-9090 + + -- Luis Henriques Fri, 05 Dec 2014 14:29:10 +0000 + +linux (3.13.0-41.70) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1396112 + + [ Chris J Arges ] + + * [Config] CONFIG_SCOM_DEBUGFS=y for powerpc/powerpc64-smp + ppc64el/generic + - LP: #1395855 + + [ Upstream Kernel Changes ] + + * Revert "KVM: x86: Handle errors when RIP is set during far jumps" + - LP: #1393477 + * Revert "net/macb: add pinctrl consumer support" + - LP: #1393477 + * Revert "iwlwifi: mvm: treat EAPOLs like mgmt frames wrt rate" + - LP: #1393477 + * Revert "ipmi: simplify locking" + - LP: #1383921 + * ACPI / blacklist: add Win8 OSI quirks for some Dell laptop models + - LP: #1339456 + * ACPI / battery: Accelerate battery resume callback + - LP: #838543 + * tools: cpu-hotplug fix unexpected operator error + * netlink: reset network header before passing to taps + - LP: #1393477 + * rtnetlink: fix VF info size + - LP: #1393477 + * myri10ge: check for DMA mapping errors + - LP: #1393477 + * tcp: don't use timestamp from repaired skb-s to calculate RTT (v2) + - LP: #1393477 + * sit: Fix ipip6_tunnel_lookup device matching criteria + - LP: #1393477 + * tcp: fix tcp_release_cb() to dispatch via address family for + mtu_reduced() + - LP: #1393477 + * tcp: fix ssthresh and undo for consecutive short FRTO episodes + - LP: #1393477 + * packet: handle too big packets for PACKET_V3 + - LP: #1393477 + * openvswitch: fix panic with multiple vlan headers + - LP: #1393477 + * vxlan: fix incorrect initializer in union vxlan_addr + - LP: #1393477 + * l2tp: fix race while getting PMTU on PPP pseudo-wire + - LP: #1393477 + * bonding: fix div by zero while enslaving and transmitting + - LP: #1393477 + * bridge: Check if vlan filtering is enabled only once. + - LP: #1393477 + * bridge: Fix br_should_learn to check vlan_enabled + - LP: #1393477 + * net: allow macvlans to move to net namespace + - LP: #1393477 + * tg3: Work around HW/FW limitations with vlan encapsulated frames + - LP: #1393477 + * tg3: Allow for recieve of full-size 8021AD frames + - LP: #1393477 + * xfrm: Generate blackhole routes only from route lookup functions + - LP: #1393477 + * xfrm: Generate queueing routes only from route lookup functions + - LP: #1393477 + * macvtap: Fix race between device delete and open. + - LP: #1393477 + * gro: fix aggregation for skb using frag_list + - LP: #1393477 + * hyperv: Fix a bug in netvsc_start_xmit() + - LP: #1393477 + * ip6_gre: fix flowi6_proto value in xmit path + - LP: #1393477 + * team: avoid race condition in scheduling delayed work + - LP: #1393477 + * sctp: handle association restarts when the socket is closed. + - LP: #1393477 + * tcp: fixing TLP's FIN recovery + - LP: #1393477 + * sparc64: Do not disable interrupts in nmi_cpu_busy() + - LP: #1393477 + * sparc64: Fix pcr_ops initialization and usage bugs. + - LP: #1393477 + * sparc32: dma_alloc_coherent must honour gfp flags + - LP: #1393477 + * sparc64: sun4v TLB error power off events + - LP: #1393477 + * sparc64: Fix corrupted thread fault code. + - LP: #1393477 + * sparc64: find_node adjustment + - LP: #1393477 + * sparc64: Move request_irq() from ldc_bind() to ldc_alloc() + - LP: #1393477 + * sparc: Let memset return the address argument + - LP: #1393477 + * sparc64: Fix reversed start/end in flush_tlb_kernel_range() + - LP: #1393477 + * sparc64: Fix lockdep warnings on reboot on Ultra-5 + - LP: #1393477 + * sparc64: Fix FPU register corruption with AES crypto offload. + - LP: #1393477 + * sparc64: Do not define thread fpregs save area as zero-length array. + - LP: #1393477 + * sparc64: Fix hibernation code refrence to PAGE_OFFSET. + - LP: #1393477 + * sparc64: correctly recognise M6 and M7 cpu type + - LP: #1393477 + * sparc64: support M6 and M7 for building CPU distribution map + - LP: #1393477 + * sparc64: cpu hardware caps support for sparc M6 and M7 + - LP: #1393477 + * sparc64: T5 PMU + - LP: #1393477 + * sparc64: Switch to 4-level page tables. + - LP: #1393477 + * sparc64: Define VA hole at run time, rather than at compile time. + - LP: #1393477 + * sparc64: Adjust KTSB assembler to support larger physical addresses. + - LP: #1393477 + * sparc64: Fix physical memory management regressions with large + max_phys_bits. + - LP: #1393477 + * sparc64: Use kernel page tables for vmemmap. + - LP: #1393477 + * sparc64: Increase MAX_PHYS_ADDRESS_BITS to 53. + - LP: #1393477 + * sparc64: Adjust vmalloc region size based upon available virtual + address bits. + - LP: #1393477 + * sparc64: sparse irq + - LP: #1393477 + * sparc64: Kill unnecessary tables and increase MAX_BANKS. + - LP: #1393477 + * sparc64: Increase size of boot string to 1024 bytes + - LP: #1393477 + * sparc64: Fix register corruption in top-most kernel stack frame during + boot. + - LP: #1393477 + * sparc64: Implement __get_user_pages_fast(). + - LP: #1393477 + * ext4: check EA value offset when loading + - LP: #1393477 + * jbd2: free bh when descriptor block checksum fails + - LP: #1393477 + * ext4: don't check quota format when there are no quota files + - LP: #1393477 + * target: Fix queue full status NULL pointer for SCF_TRANSPORT_TASK_SENSE + - LP: #1393477 + * vfs: fix data corruption when blocksize < pagesize for mmaped data + - LP: #1393477 + * ext4: fix mmap data corruption when blocksize < pagesize + - LP: #1393477 + * ext4: grab missed write_count for EXT4_IOC_SWAP_BOOT + - LP: #1393477 + * qla_target: don't delete changed nacls + - LP: #1393477 + * target: Fix APTPL metadata handling for dynamic MappedLUNs + - LP: #1393477 + * iser-target: Disable TX completion interrupt coalescing + - LP: #1393477 + * ext4: don't orphan or truncate the boot loader inode + - LP: #1393477 + * ext4: add ext4_iget_normal() which is to be used for dir tree lookups + - LP: #1393477 + * ext4: fix reservation overflow in ext4_da_write_begin + - LP: #1393477 + * ext4: Replace open coded mdata csum feature to helper function + - LP: #1393477 + * ext4: move error report out of atomic context in + ext4_init_block_bitmap() + - LP: #1393477 + * ARC: [nsimosci] Allow "headless" models to boot + - LP: #1393477 + * ARC: Update order of registers in KGDB to match GDB 7.5 + - LP: #1393477 + * ext4: check s_chksum_driver when looking for bg csum presence + - LP: #1393477 + * drm/radeon: fix speaker allocation setup + - LP: #1393477 + * drm/radeon: use gart memory for DMA ring tests + - LP: #1393477 + * compiler: define OPTIMIZER_HIDE_VAR() macro + - LP: #1393477 + * random: add and use memzero_explicit() for clearing data + - LP: #1393477 + * ALSA: pcm: use the same dma mmap codepath both for arm and arm64 + - LP: #1393477 + * ALSA: ALC283 codec - Avoid pop noise on headphones during + suspend/resume + - LP: #1393477 + * ALSA: usb-audio: Add support for Steinberg UR22 USB interface + - LP: #1393477 + * ALSA: hda - hdmi: Fix missing ELD change event on plug/unplug + - LP: #1393477 + * arm64: compat: fix compat types affecting struct compat_elf_prpsinfo + - LP: #1393477 + * freezer: Do not freeze tasks killed by OOM killer + - LP: #1393477 + * OOM, PM: OOM killed task shouldn't escape PM suspend + - LP: #1393477 + * qxl: don't create too large primary surface + - LP: #1393477 + * MIPS: tlbex: Properly fix HUGE TLB Refill exception handler + - LP: #1393477 + * drm/cirrus: bind also to qemu-xen-traditional + - LP: #1393477 + * cpufreq: intel_pstate: Fix setting max_perf_pct in performance policy + - LP: #1393477 + * cpufreq: expose scaling_cur_freq sysfs file for set_policy() drivers + - LP: #1393477 + * cpufreq: intel_pstate: Reflect current no_turbo state correctly + - LP: #1393477 + * intel_pstate: Don't lose sysfs settings during cpu offline + - LP: #1393477 + * intel_pstate: Fix BYT frequency reporting + - LP: #1393477 + * intel_pstate: Correct BYT VID values. + - LP: #1393477 + * MIPS: ftrace: Fix a microMIPS build problem + - LP: #1393477 + * kvm: x86: don't kill guest on unknown exit reason + - LP: #1393477 + * kvm: fix excessive pages un-pinning in kvm_iommu_map error path. + - LP: #1393477 + * KVM: x86: use new CS.RPL as CPL during task switch + - LP: #1393477 + * KVM: x86: Handle errors when RIP is set during far jumps + - LP: #1393477 + * KVM: x86: Fix far-jump to non-canonical check + - LP: #1393477 + * staging:iio:ad5933: Fix NULL pointer deref when enabling buffer + - LP: #1393477 + * staging:iio:ad5933: Drop "raw" from channel names + - LP: #1393477 + * iio: st_sensors: Fix buffer copy + - LP: #1393477 + * iio: mxs-lradc: Propagate the real error code on platform_get_irq() + failure + - LP: #1393477 + * iio: adc: mxs-lradc: Disable the clock on probe failure + - LP: #1393477 + * spi: pl022: Fix incorrect dma_unmap_sg + - LP: #1393477 + * mac80211: fix typo in starting baserate for rts_cts_rate_idx + - LP: #1393477 + * usb: dwc3: gadget: fix set_halt() bug with pending transfers + - LP: #1393477 + * usb: gadget: function: acm: make f_acm pass USB20CV Chapter9 + - LP: #1393477 + * ext3: Don't check quota format when there are no quota files + - LP: #1393477 + * quota: Properly return errors from dquot_writeback_dquots() + - LP: #1393477 + * USB: serial: cp210x: add Silicon Labs 358x VID and PID + - LP: #1393477 + * usb: serial: ftdi_sio: add Awinda Station and Dongle products + - LP: #1393477 + * usb: option: add support for Telit LE910 + - LP: #1393477 + * USB: option: add Haier CE81B CDMA modem + - LP: #1393477 + * x86, apic: Handle a bad TSC more gracefully + - LP: #1393477 + * i3200_edac: Report CE events properly + - LP: #1393477 + * i82860_edac: Report CE events properly + - LP: #1393477 + * cpc925_edac: Report UE events properly + - LP: #1393477 + * e7xxx_edac: Report CE events properly + - LP: #1393477 + * scsi: Fix error handling in SCSI_IOCTL_SEND_COMMAND + - LP: #1393477 + * usb: serial: ftdi_sio: add "bricked" FTDI device PID + - LP: #1393477 + * usb: musb: cppi41: restart hrtimer only if not yet done + - LP: #1393477 + * usb: gadget: udc: core: fix kernel oops with soft-connect + - LP: #1393477 + * nfsd4: fix crash on unknown operation number + - LP: #1393477 + * iwlwifi: configure the LTR + - LP: #1393477 + * mac80211: add vif to flush call + - LP: #1393477 + * iwlwifi: dvm: drop non VO frames when flushing + - LP: #1393477 + * usb: dwc3: gadget: Properly initialize LINK TRB + - LP: #1393477 + * Input: i8042 - quirks for Fujitsu Lifebook A544 and Lifebook AH544 + - LP: #1393477 + * posix-timers: Fix stack info leak in timer_create() + - LP: #1393477 + * futex: Fix a race condition between REQUEUE_PI and task death + - LP: #1393477 + * PM / Sleep: fix recovery during resuming from hibernation + - LP: #1393477 + * ALSA: pcm: Zero-clear reserved fields of PCM status ioctl in compat + mode + - LP: #1393477 + * ima: check xattr value length and type in the ima_inode_setxattr() + - LP: #1393477 + * evm: check xattr value length and type in evm_inode_setxattr() + - LP: #1393477 + * drm/radeon/dpm: disable ulv support on SI + - LP: #1393477 + * drm/radeon: dpm fixes for asrock systems + - LP: #1393477 + * drm/radeon: remove invalid pci id + - LP: #1393477 + * x86, pageattr: Prevent overflow in slow_virt_to_phys() for X86_PAE + - LP: #1393477 + * cgroup/kmemleak: add kmemleak_free() for cgroup deallocations. + - LP: #1393477 + * mm: free compound page with correct order + - LP: #1393477 + * mm, thp: fix collapsing of hugepages on madvise + - LP: #1393477 + * lib/bitmap.c: fix undefined shift in __bitmap_shift_{left|right}() + - LP: #1393477 + * ext4: fix overflow when updating superblock backups after resize + - LP: #1393477 + * ext4: fix oops when loading block bitmap failed + - LP: #1393477 + * ext4: enable journal checksum when metadata checksum feature enabled + - LP: #1393477 + * ext4: bail out from make_indexed_dir() on first error + - LP: #1393477 + * PCI: Rename sysfs 'enabled' file back to 'enable' + - LP: #1393477 + * wireless: rt2x00: add new rt2800usb device + - LP: #1393477 + * fs: allow open(dir, O_TMPFILE|..., 0) with mode 0 + - LP: #1393477 + * tracing/syscalls: Ignore numbers outside NR_syscalls' range + - LP: #1393477 + * x86_64, entry: Fix out of bounds read on sysenter + - LP: #1393477 + * ACPI / EC: Add support to disallow QR_EC to be issued when SCI_EVT + isn't set + - LP: #1393477 + * ACPI / EC: Fix regression due to conflicting firmware behavior between + Samsung and Acer. + - LP: #1393477 + * net: sctp: fix skb_over_panic when receiving malformed ASCONF chunks + - LP: #1393477 + * net: sctp: fix panic on duplicate ASCONF chunks + - LP: #1393477 + * net: sctp: fix remote memory pressure from excessive queueing + - LP: #1393477 + * Linux 3.13.11.11 + - LP: #1393477 + * MAINTAINERS: Update APM X-Gene section + - LP: #1381084 + * Documentation: dts: Update section header for APM X-Gene + - LP: #1381084 + * dtb: Add 10GbE node to APM X-Gene SoC device tree + - LP: #1381084 + * drivers: net: xgene: Preparing for adding 10GbE support + - LP: #1381084 + * drivers: net: xgene: Add 10GbE support + - LP: #1381084 + * drivers: net: xgene: Add 10GbE ethtool support + - LP: #1381084 + * dtb: Add SGMII based 1GbE node to APM X-Gene SoC device tree + - LP: #1381216 + * drivers: net: xgene: Preparing for adding SGMII based 1GbE + - LP: #1381216 + * drivers: net: xgene: Add SGMII based 1GbE support + - LP: #1381216 + * drivers: net: xgene: Add SGMII based 1GbE ethtool support + - LP: #1381216 + * drivers: net: xgene: Rewrite buggy loop in xgene_enet_ecc_init() + - LP: #1381216 + * dtb: xgene: fix: Backward compatibility with older firmware + - LP: #1381084, #1381216 + * drivers: net: xgene: Backward compatibility with older firmware + - LP: #1381084, #1381216 + * drivers: net: xgene: fix: Use separate resources + - LP: #1381216 + * HID: Add the transport-driver functions to the HIDP driver. + - LP: #1393764 + * ipc: fix compat msgrcv with negative msgtyp + - LP: #1393355 + + -- Luis Henriques Tue, 25 Nov 2014 12:07:01 +0000 + +linux (3.13.0-40.69) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - re-used previous tracking bug + + [ Upstream Kernel Changes ] + + * regmap: fix kernel hang on regmap_bulk_write with zero val_count. + + -- Luis Henriques Thu, 13 Nov 2014 17:13:01 +0000 + +linux (3.13.0-40.68) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1388943 + * SAUCE: DEP8 test to run our regression tests + - LP: #1385330 + * SAUCE: The very first thing we should do when testing is make sure we + are testing the correct kernel + - LP: #1385330 + + [ dann frazier ] + + * [Config] Disable CONFIG_IPMI_SI_PROBE_DEFAULTS on armhf and arm64 + - LP: #1388952 + + [ Duc Dang ] + + * SAUCE: (no-up) [PCIE] APM X-Gene: Remove debug messages in MSI + interrupt handler path. + - LP: #1382244 + * SAUCE: (no-up) PCI: X-Gene: Fix max payload size and phantom function + configuration + - LP: #1386261 + + [ McAulay, Alistair ] + + * SAUCE: drm/i915: Rework GPU reset sequence to match driver load & thaw + - LP: #1384469 + + [ Timo Aaltonen ] + + * SAUCE: i915_bdw: Fix cherry-pick typo + - LP: #1384469 + + [ Upstream Kernel Changes ] + + * Revert "mac80211: disable uAPSD if all ACs are under ACM" + - LP: #1381234 + * Revert "iwlwifi: dvm: don't enable CTS to self" + - LP: #1381234 + * Revert "lzo: properly check for overruns" + - LP: #1387886 + * drm/i915: provide interface for audio driver to query cdclk + - LP: #1381168 + * regulatory: add NUL to alpha2 + - LP: #1381234 + * percpu: fix pcpu_alloc_pages() failure path + - LP: #1381234 + * percpu: perform tlb flush after pcpu_map_pages() failure + - LP: #1381234 + * cgroup: reject cgroup names with '\n' + - LP: #1381234 + * vfs: add d_is_dir() + - LP: #1381234 + * CIFS: Fix directory rename error + - LP: #1381234 + * usb: phy: twl4030-usb: Fix lost interrupts after ID pin goes down + - LP: #1381234 + * rtlwifi: rtl8192cu: Add new ID + - LP: #1381234 + * CIFS: Fix wrong restart readdir for SMB1 + - LP: #1381234 + * CIFS: Fix wrong filename length for SMB2 + - LP: #1381234 + * ahci: Add Device IDs for Intel 9 Series PCH + - LP: #1381234 + * ata_piix: Add Device IDs for Intel 9 Series PCH + - LP: #1381234 + * USB: zte_ev: fix removed PIDs + - LP: #1381234 + * USB: ftdi_sio: add support for NOVITUS Bono E thermal printer + - LP: #1381234 + * USB: sierra: avoid CDC class functions on "68A3" devices + - LP: #1381234 + * USB: sierra: add 1199:68AA device ID + - LP: #1381234 + * iommu/arm-smmu: fix programming of SMMU_CBn_TCR for stage 1 + - LP: #1381234 + * iommu/arm-smmu: remove pgtable_page_{c,d}tor() + - LP: #1381234 + * usb: gadget: fusb300_udc.h: Fix typo in include guard + - LP: #1381234 + * usb: phy: tegra: Avoid use of sizeof(void) + - LP: #1381234 + * arm64: use irq_set_affinity with force=false when migrating irqs + - LP: #1381234 + * block: Fix dev_t minor allocation lifetime + - LP: #1381234 + * usb: dwc3: core: fix order of PM runtime calls + - LP: #1381234 + * usb: dwc3: core: fix ordering for PHY suspend + - LP: #1381234 + * usb: dwc3: omap: fix ordering for runtime pm calls + - LP: #1381234 + * iommu/fsl: Fix warning resulting from adding PCI device twice + - LP: #1381234 + * ahci: add pcid for Marvel 0x9182 controller + - LP: #1381234 + * drm/i915: Fix EIO/wedged handling in gem fault handler + - LP: #1381234 + * ACPI / RTC: Fix CMOS RTC opregion handler accesses to wrong addresses + - LP: #1381234 + * drm/i915: Evict CS TLBs between batches + - LP: #1381234 + * drm/i915: Wait for vblank before enabling the TV encoder + - LP: #1381234 + * lockd: fix rpcbind crash on lockd startup failure + - LP: #1381234 + * drm/radeon: fix semaphore value init + - LP: #1381234 + * drm/radeon: add connector quirk for fujitsu board + - LP: #1381234 + * imx-drm: ipuv3-plane: fix ipu_plane_dpms() + - LP: #1381234 + * usb: host: xhci: fix compliance mode workaround + - LP: #1381234 + * Input: elantech - fix detection of touchpad on ASUS s301l + - LP: #1381234 + * USB: ftdi_sio: Add support for GE Healthcare Nemo Tracker device + - LP: #1381234 + * uwb: init beacon cache entry before registering uwb device + - LP: #1381234 + * nfs: fix kernel warning when removing proc entry + - LP: #1381234 + * drm/radeon/dpm: set the thermal type properly for special configs + - LP: #1381234 + * dm cache: fix race causing dirty blocks to be marked as clean + - LP: #1381234 + * libceph: gracefully handle large reply messages from the mon + - LP: #1381234 + * Input: serport - add compat handling for SPIOCSTYPE ioctl + - LP: #1381234 + * usb: hub: take hub->hdev reference when processing from eventlist + - LP: #1381234 + * eventpoll: fix uninitialized variable in epoll_ctl + - LP: #1381234 + * kcmp: fix standard comparison bug + - LP: #1381234 + * fsnotify/fdinfo: use named constants instead of hardcoded values + - LP: #1381234 + * fs/notify: don't show f_handle if exportfs_encode_inode_fh failed + - LP: #1381234 + * arm64: flush TLS registers during exec + - LP: #1381234 + * storage: Add single-LUN quirk for Jaz USB Adapter + - LP: #1381234 + * xhci: Fix null pointer dereference if xhci initialization fails + - LP: #1381234 + * usb: xhci: Fix OOPS in xhci error handling code + - LP: #1381234 + * xhci: fix oops when xhci resumes from hibernate with hw lpm capable + devices + - LP: #1381234 + * drm/ast: open key before detect chips + - LP: #1381234 + * drm/ast: AST2000 cannot be detected correctly + - LP: #1381234 + * futex: Unlock hb->lock in futex_wait_requeue_pi() error path + - LP: #1381234 + * jiffies: Fix timeval conversion to jiffies + - LP: #1381234 + * alarmtimer: Return relative times in timer_gettime + - LP: #1381234 + * alarmtimer: Do not signal SIGEV_NONE timers + - LP: #1381234 + * alarmtimer: Lock k_itimer during timer callback + - LP: #1381234 + * parisc: Implement new LWS CAS supporting 64 bit operations. + - LP: #1381234 + * don't bugger nd->seq on set_root_rcu() from follow_dotdot_rcu() + - LP: #1381234 + * be careful with nd->inode in path_init() and follow_dotdot_rcu() + - LP: #1381234 + * iio:inkern: fix overwritten -EPROBE_DEFER in of_iio_channel_get_by_name + - LP: #1381234 + * iio:trigger: modify return value for iio_trigger_get + - LP: #1381234 + * iio: accel: bma180: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: adc: ad_sigma_delta: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: hid_sensor_hub: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: st_sensors: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: gyro: itg3200: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: inv_mpu6050: Fix indio_dev->trig assignment + - LP: #1381234 + * iio: meter: ade7758: Fix indio_dev->trig assignment + - LP: #1381234 + * MIPS: ZBOOT: add missing include + - LP: #1381234 + * spi: fsl: Don't use devm_kzalloc in master->setup callback + - LP: #1381234 + * spi: dw: Don't use devm_kzalloc in master->setup callback + - LP: #1381234 + * ARM: 8133/1: use irq_set_affinity with force=false when migrating irqs + - LP: #1381234 + * cx18: fix kernel oops with tda8290 tuner + - LP: #1381234 + * ASoC: davinci-mcasp: Correct rx format unit configuration + - LP: #1381234 + * spi: sirf: enable RX_IO_DMA_INT interrupt + - LP: #1381234 + * perf: Fix a race condition in perf_remove_from_context() + - LP: #1381234 + * ASoC: samsung-i2s: Maintain CDCLK settings across + i2s_{shutdown/startup} + - LP: #1381234 + * ASoC: samsung-i2s: Check secondary DAI exists before referencing + - LP: #1381234 + * ALSA: hda - Fix invalid pin powermap without jack detection + - LP: #1381234 + * Input: atkbd - do not try 'deactivate' keyboard on any LG laptops + - LP: #1381234 + * Input: i8042 - add Fujitsu U574 to no_timeout dmi table + - LP: #1381234 + * Input: i8042 - add nomux quirk for Avatar AVIU-145A6 + - LP: #1381234 + * iio: adc: at91: don't use the last converted data register + - LP: #1381234 + * iio:magnetometer: bugfix magnetometers gain values + - LP: #1381234 + * drm/i915: Fix SRC_COPY width on 830/845g + - LP: #1381234 + * Target/iser: Get isert_conn reference once got to connected_handler + - LP: #1381234 + * Target/iser: Don't put isert_conn inside disconnected handler + - LP: #1381234 + * ARM: 8148/1: flush TLS and thumbee register state during exec + - LP: #1381234 + * x86, ia64: Move EFI_FB vga_default_device() initialization to + pci_vga_fixup() + - LP: #1381234 + * vgaarb: Don't default exclusively to first video device with mem+io + - LP: #1381234 + * iscsi-target: Fix memory corruption in + iscsit_logout_post_handler_diffcid + - LP: #1381234 + * iscsi-target: avoid NULL pointer in iscsi_copy_param_list failure + - LP: #1381234 + * NFSv4: nfs4_state_manager() vs. nfs_server_remove_lists() + - LP: #1381234 + * NFSv4: Fix another bug in the close/open_downgrade code + - LP: #1381234 + * drm/radeon: don't reset dma on NI/SI init + - LP: #1381234 + * drm/radeon: don't reset sdma on CIK init + - LP: #1381234 + * drm/radeon: don't reset dma on r6xx-evergreen init + - LP: #1381234 + * vgaswitcheroo: add vga_switcheroo_fini_domain_pm_ops + - LP: #1381234 + * drm/radeon/px: fix module unload + - LP: #1381234 + * drm/nouveau/runpm: fix module unload + - LP: #1381234 + * libiscsi: fix potential buffer overrun in __iscsi_conn_send_pdu + - LP: #1381234 + * USB: EHCI: unlink QHs even after the controller has stopped + - LP: #1381234 + * USB: storage: Add quirk for Adaptec USBConnect 2000 USB-to-SCSI Adapter + - LP: #1381234 + * USB: storage: Add quirk for Ariston Technologies iConnect USB to SCSI + adapter + - LP: #1381234 + * USB: storage: Add quirks for Entrega/Xircom USB to SCSI converters + - LP: #1381234 + * drm/nouveau: ltc/gf100-: fix cbc issues on certain boards + - LP: #1381234 + * iwlwifi: increase DEFAULT_MAX_TX_POWER + - LP: #1381234 + * iwlwifi: mvm: treat EAPOLs like mgmt frames wrt rate + - LP: #1381234 + * workqueue: apply __WQ_ORDERED to create_singlethread_workqueue() + - LP: #1381234 + * can: flexcan: mark TX mailbox as TX_INACTIVE + - LP: #1381234 + * can: flexcan: correctly initialize mailboxes + - LP: #1381234 + * can: flexcan: implement workaround for errata ERR005829 + - LP: #1381234 + * can: flexcan: put TX mailbox into TX_INACTIVE mode after tx-complete + - LP: #1381234 + * can: at91_can: add missing prepare and unprepare of the clock + - LP: #1381234 + * IB/qib: Correct reference counting in debugfs qp_stats + - LP: #1381234 + * videobuf2-dma-sg: fix for wrong GFP mask to sg_alloc_table_from_pages + - LP: #1381234 + * vb2: fix plane index sanity check in vb2_plane_cookie() + - LP: #1381234 + * adv7604: fix inverted condition + - LP: #1381234 + * md/raid1: intialise start_next_window for READ case to avoid hang + - LP: #1381234 + * md/raid1: be more cautious where we read-balance during resync. + - LP: #1381234 + * md/raid1: clean up request counts properly in close_sync() + - LP: #1381234 + * md/raid1: make sure resync waits for conflicting writes to complete. + - LP: #1381234 + * md/raid1: Don't use next_resync to determine how far resync has + progressed + - LP: #1381234 + * md/raid1: update next_resync under resync_lock. + - LP: #1381234 + * md/raid1: count resync requests in nr_pending. + - LP: #1381234 + * md/raid1: fix_read_error should act on all non-faulty devices. + - LP: #1381234 + * ALSA: pcm: fix fifo_size frame calculation + - LP: #1381234 + * Fix nasty 32-bit overflow bug in buffer i/o code. + - LP: #1381234 + * drm/radeon/cik: use a separate counter for CP init timeout + - LP: #1381234 + * parisc: Only use -mfast-indirect-calls option for 32-bit kernel builds + - LP: #1381234 + * sched: Fix unreleased llc_shared_mask bit during CPU hotplug + - LP: #1381234 + * MIPS: mcount: Adjust stack pointer for static trace in MIPS32 + - LP: #1381234 + * nilfs2: fix data loss with mmap() + - LP: #1381234 + * ocfs2/dlm: do not get resource spinlock if lockres is new + - LP: #1381234 + * mm, slab: initialize object alignment on cache creation + - LP: #1381234 + * mm: softdirty: keep bit when zapping file pte + - LP: #1381234 + * shmem: fix nlink for rename overwrite directory + - LP: #1381234 + * ARM: 8165/1: alignment: don't break misaligned NEON load/store + - LP: #1381234 + * ASoC: core: fix possible ZERO_SIZE_PTR pointer dereferencing error. + - LP: #1381234 + * cpufreq: integrator: fix integrator_cpufreq_remove return type + - LP: #1381234 + * drm/i915: Flush the PTEs after updating them before suspend + - LP: #1381234 + * ARM: 8178/1: fix set_tls for !CONFIG_KUSER_HELPERS + - LP: #1381234 + * md/raid5: disable 'DISCARD' by default due to safety concerns. + - LP: #1381234 + * mm: migrate: Close race between migration completion and mprotect + - LP: #1381234 + * mm: numa: Do not mark PTEs pte_numa when splitting huge pages + - LP: #1381234 + * Fix problem recognizing symlinks + - LP: #1381234 + * mm: memcontrol: do not iterate uninitialized memcgs + - LP: #1381234 + * perf: fix perf bug in fork() + - LP: #1381234 + * mm: page_alloc: fix zone allocation fairness on UP + - LP: #1381234 + * init/Kconfig: Hide printk log config if CONFIG_PRINTK=n + - LP: #1381234 + * init/Kconfig: Fix HAVE_FUTEX_CMPXCHG to not break up the EXPERT menu + - LP: #1381234 + * genhd: fix leftover might_sleep() in blk_free_devt() + - LP: #1381234 + * Linux 3.13.11.9 + - LP: #1381234 + * NVMe: Initialize device reference count earlier + - LP: #1382221 + * nfs: fix duplicate proc entries + - LP: #1376245 + * xfs: xfs_dir2_block_to_sf temp buffer allocation fails + - LP: #1382333 + * UPSTREAM: kernel: Mark function as static in kernel/seccomp.c + - LP: #1379020 + * ARM: 8087/1: ptrace: reload syscall number after secure_computing() + check + - LP: #1379020 + * seccomp: create internal mode-setting function + - LP: #1379020 + * seccomp: extract check/assign mode helpers + - LP: #1379020 + * seccomp: split mode setting routines + - LP: #1379020 + * seccomp: add "seccomp" syscall + - LP: #1379020 + * ARM: add seccomp syscall + - LP: #1379020 + * MIPS: add seccomp syscall + - LP: #1379020 + * sched: move no_new_privs into new atomic flags + - LP: #1379020 + * seccomp: split filter prep from check and apply + - LP: #1379020 + * seccomp: introduce writer locking + - LP: #1379020 + * seccomp: allow mode setting across threads + - LP: #1379020 + * seccomp: implement SECCOMP_FILTER_FLAG_TSYNC + - LP: #1379020 + * seccomp: Replace BUG(!spin_is_locked()) with assert_spin_lock + - LP: #1379020 + * fs: Add a missing permission check to do_umount + - LP: #1383358 + - CVE-2014-7975 + * mfd: rtsx_pcr: Fix MSI enable error handling + - LP: #1366841 + * xen/balloon: Don't continue ballooning when BP_ECANCELED is encountered + - LP: #1304001 + * Bluetooth: Fix HCI H5 corrupted ack value + - LP: #1387886 + * dmaengine: fix xor sources continuation + - LP: #1387886 + * siano: add support for PCTV 77e + - LP: #1387886 + * em28xx-v4l: give back all active video buffers to the vb2 core properly + on streaming stop + - LP: #1387886 + * em28xx-v4l: fix video buffer field order reporting in progressive mode + - LP: #1387886 + * crypto: caam - fix addressing of struct member + - LP: #1387886 + * x86, fpu: shift drop_init_fpu() from save_xstate_sig() to + handle_signal() + - LP: #1387886 + * x86, fpu: __restore_xstate_sig()->math_state_restore() needs + preempt_disable() + - LP: #1387886 + * KVM: do not bias the generation number in kvm_current_mmio_generation + - LP: #1387886 + * kvm: fix potentially corrupt mmio cache + - LP: #1387886 + * kvm: x86: fix stale mmio cache bug + - LP: #1387886 + * UBIFS: fix free log space calculation + - LP: #1387886 + * Bluetooth: Fix issue with USB suspend in btusb driver + - LP: #1387886 + * mmc: rtsx_pci_sdmmc: fix incorrect last byte in R2 response + - LP: #1387886 + * KVM: s390: unintended fallthrough for external call + - LP: #1387886 + * UBI: add missing kmem_cache_free() in process_pool_aeb error path + - LP: #1387886 + * PCI: Increase IBM ipr SAS Crocodile BARs to at least system page size + - LP: #1387886 + * drbd: compute the end before rb_insert_augmented() + - LP: #1387886 + * Bluetooth: Fix setting correct security level when initiating SMP + - LP: #1387886 + * mmc: tmio: prevent endless loop in tmio_mmc_set_clock() + - LP: #1387886 + * iwlwifi: Add missing PCI IDs for the 7260 series + - LP: #1387886 + * media: usb: uvc: add a quirk for Dell XPS M1330 webcam + - LP: #1387886 + * USB: serial: cp210x: added Ketra N1 wireless interface support + - LP: #1387886 + * USB: cp210x: add support for Seluxit USB dongle + - LP: #1387886 + * PCI: Generate uppercase hex for modalias interface class + - LP: #1387886 + * PCI: mvebu: Fix uninitialized variable in mvebu_get_tgt_attr() + - LP: #1387886 + * xfs: ensure WB_SYNC_ALL writeback handles partial pages correctly + - LP: #1387886 + * v4l2-common: fix overflow in v4l_bound_align_image() + - LP: #1387886 + * USB: Add device quirk for ASUS T100 Base Station keyboard + - LP: #1387886 + * mei: bus: fix possible boundaries violation + - LP: #1387886 + * firmware_class: make sure fw requests contain a name + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup vmbus_post_msg() + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup vmbus_teardown_gpadl() + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup vmbus_establish_gpadl() + - LP: #1387886 + * Drivers: hv: vmbus: Fix a bug in vmbus_open() + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup vmbus_close_internal() + - LP: #1387886 + * Drivers: hv: vmbus: Cleanup hv_post_message() + - LP: #1387886 + * spi: dw-mid: respect 8 bit mode + - LP: #1387886 + * spi: dw-mid: terminate ongoing transfers at exit + - LP: #1387886 + * kvm: don't take vcpu mutex for obviously invalid vcpu ioctls + - LP: #1387886 + * x86/intel/quark: Switch off CR4.PGE so TLB flush uses CR3 instead + - LP: #1387886 + * ARM: at91: fix at91sam9263ek DT mmc pinmuxing settings + - LP: #1387886 + * ARM: at91/PMC: don't forget to write PMC_PCDR register to disable + clocks + - LP: #1387886 + * Fixing lease renewal + - LP: #1387886 + * lockd: Try to reconnect if statd has moved + - LP: #1387886 + * qla2xxx: Use correct offset to req-q-out for reserve calculation + - LP: #1387886 + * power: charger-manager: Fix NULL pointer exception with missing + cm-fuel-gauge + - LP: #1387886 + * rt2800: correct BBP1_TX_POWER_CTRL mask + - LP: #1387886 + * regmap: fix NULL pointer dereference in _regmap_write/read + - LP: #1387886 + * Documentation: lzo: document part of the encoding + - LP: #1387886 + * lzo: check for length overrun in variable length encoding. + - LP: #1387886 + * regmap: debugfs: fix possbile NULL pointer dereference + - LP: #1387886 + * regmap: fix possible ZERO_SIZE_PTR pointer dereferencing error. + - LP: #1387886 + * net_dma: simple removal + - LP: #1387886 + * libata-sff: Fix controllers with no ctl port + - LP: #1387886 + * NFSv4: Fix lock recovery when CREATE_SESSION/SETCLIENTID_CONFIRM fails + - LP: #1387886 + * NFSv4: fix open/lock state recovery error handling + - LP: #1387886 + * tty: omap-serial: fix division by zero + - LP: #1387886 + * serial: 8250: Add Quark X1000 to 8250_pci.c + - LP: #1387886 + * missing data dependency barrier in prepend_name() + - LP: #1387886 + * be2iscsi: check ip buffer before copying + - LP: #1387886 + * framebuffer: fix border color + - LP: #1387886 + * framebuffer: fix screen corruption when copying + - LP: #1387886 + * mpc85xx_edac: Make L2 interrupt shared too + - LP: #1387886 + * NFSv4.1: Fix an NFSv4.1 state renewal regression + - LP: #1387886 + * xen-blkback: fix leak on grant map error path + - LP: #1387886 + * m68k: Disable/restore interrupts in hwreg_present()/hwreg_write() + - LP: #1387886 + * ASoC: tlv320aic3x: fix PLL D configuration + - LP: #1387886 + * dm bufio: update last_accessed when relinking a buffer + - LP: #1387886 + * dm bufio: when done scanning return from __scan immediately + - LP: #1387886 + * dm log userspace: fix memory leak in dm_ulog_tfr_init failure path + - LP: #1387886 + * ecryptfs: avoid to access NULL pointer when write metadata in xattr + - LP: #1387886 + * x86_64, entry: Filter RFLAGS.NT on entry from userspace + - LP: #1387886 + * ASoC: soc-dapm: fix use after free + - LP: #1387886 + * pata_serverworks: disable 64-KB DMA transfers on Broadcom OSB4 IDE + Controller + - LP: #1387886 + * drm/ast: Fix HW cursor image + - LP: #1387886 + * x86: Reject x32 executables if x32 ABI not supported + - LP: #1387886 + * kill wbuf_queued/wbuf_dwork_lock + - LP: #1387886 + * fs: Fix theoretical division by 0 in super_cache_scan(). + - LP: #1387886 + * fs: make cont_expand_zero interruptible + - LP: #1387886 + * fix misuses of f_count() in ppp and netlink + - LP: #1387886 + * block: fix alignment_offset math that assumes io_min is a power-of-2 + - LP: #1387886 + * fanotify: enable close-on-exec on events' fd when requested in + fanotify_init() + - LP: #1387886 + * mm: clear __GFP_FS when PF_MEMALLOC_NOIO is set + - LP: #1387886 + * Input: i8042 - add noloop quirk for Asus X750LN + - LP: #1387886 + * um: ubd: Fix for processes stuck in D state forever + - LP: #1387886 + * kernel: add support for gcc 5 + - LP: #1387886 + * ALSA: emu10k1: Fix deadlock in synth voice lookup + - LP: #1387886 + * libceph: ceph-msgr workqueue needs a resque worker + - LP: #1387886 + * mnt: Prevent pivot_root from creating a loop in the mount tree + - LP: #1387886 + * modules, lock around setting of MODULE_STATE_UNFORMED + - LP: #1387886 + * virtio_pci: fix virtio spec compliance on restore + - LP: #1387886 + * selinux: fix inode security list corruption + - LP: #1387886 + * pstore: Fix duplicate {console,ftrace}-efi entries + - LP: #1387886 + * futex: Ensure get_futex_key_refs() always implies a barrier + - LP: #1387886 + * x86,kvm,vmx: Preserve CR4 across VM entry + - LP: #1387886 + * crypto: caam - remove duplicated sg copy functions + - LP: #1387886 + * Linux 3.13.11.10 + - LP: #1387886 + * ipmi: Turn off default probing of interfaces + - LP: #1388952 + + -- Tim Gardner Wed, 29 Oct 2014 14:39:14 -0400 + +linux (3.13.0-39.66) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1386629 + + [ Upstream Kernel Changes ] + + * KVM: x86: Check non-canonical addresses upon WRMSR + - LP: #1384539 + - CVE-2014-3610 + * KVM: x86: Prevent host from panicking on shared MSR writes. + - LP: #1384539 + - CVE-2014-3610 + * KVM: x86: Improve thread safety in pit + - LP: #1384540 + - CVE-2014-3611 + * KVM: x86: Fix wrong masking on relative jump/call + - LP: #1384545 + - CVE-2014-3647 + * KVM: x86: Warn if guest virtual address space is not 48-bits + - LP: #1384545 + - CVE-2014-3647 + * KVM: x86: Emulator fixes for eip canonical checks on near branches + - LP: #1384545 + - CVE-2014-3647 + * KVM: x86: emulating descriptor load misses long-mode case + - LP: #1384545 + - CVE-2014-3647 + * KVM: x86: Handle errors when RIP is set during far jumps + - LP: #1384545 + - CVE-2014-3647 + * kvm: vmx: handle invvpid vm exit gracefully + - LP: #1384544 + - CVE-2014-3646 + * Input: synaptics - gate forcepad support by DMI check + - LP: #1381815 + + -- Luis Henriques Tue, 28 Oct 2014 10:29:51 +0000 + +linux (3.13.0-38.65) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1379244 + + [ Andy Whitcroft ] + + * Revert "SAUCE: scsi: hyper-v storsvc switch up to SPC-3" + - LP: #1354397 + * [Config] linux-image-extra is additive to linux-image + - LP: #1375310 + * [Config] linux-image-extra postrm is not needed on purge + - LP: #1375310 + + [ Upstream Kernel Changes ] + + * Revert "KVM: x86: Increase the number of fixed MTRR regs to 10" + - LP: #1377564 + * Revert "USB: option,zte_ev: move most ZTE CDMA devices to zte_ev" + - LP: #1377564 + * aufs: bugfix, stop calling security_mmap_file() again + - LP: #1371316 + * ipvs: fix ipv6 hook registration for local replies + - LP: #1349768 + * Drivers: add blist flags + - LP: #1354397 + * sd: fix a bug in deriving the FLUSH_TIMEOUT from the basic I/O timeout + - LP: #1354397 + * drm/i915/bdw: Add 42ms delay for IPS disable + - LP: #1374389 + * drm/i915: add null render states for gen6, gen7 and gen8 + - LP: #1374389 + * drm/i915/bdw: 3D_CHICKEN3 has write mask bits + - LP: #1374389 + * drm/i915/bdw: Disable idle DOP clock gating + - LP: #1374389 + * drm/i915: call lpt_init_clock_gating on BDW too + - LP: #1374389 + * drm/i915: shuffle panel code + - LP: #1374389 + * drm/i915: extract backlight minimum brightness from VBT + - LP: #1374389 + * drm/i915: respect the VBT minimum backlight brightness + - LP: #1374389 + * drm/i915/bdw: Apply workarounds in render ring init function + - LP: #1374389 + * drm/i915/bdw: Cleanup pre prod workarounds + - LP: #1374389 + * drm/i915: Replace hardcoded cacheline size with macro + - LP: #1374389 + * drm/i915: Refactor Broadwell PIPE_CONTROL emission into a helper. + - LP: #1374389 + * drm/i915: Add the WaCsStallBeforeStateCacheInvalidate:bdw workaround. + - LP: #1374389 + * drm/i915/bdw: Remove BDW preproduction W/As until C stepping. + - LP: #1374389 + * mptfusion: enable no_write_same for vmware scsi disks + - LP: #1371591 + * iommu/amd: Fix cleanup_domain for mass device removal + - LP: #1375266 + * cifs: mask off top byte in get_rfc1002_length() + - LP: #1372482 + * Input: synaptics - add support for ForcePads + - LP: #1377564 + * ASoC: pxa-ssp: drop SNDRV_PCM_FMTBIT_S24_LE + - LP: #1377564 + * drm/radeon: add bapm module parameter + - LP: #1377564 + * drm/radeon: Add missing lines to ci_set_thermal_temperature_range + - LP: #1377564 + * drm/radeon: Add ability to get and change dpm state when radeon PX card + is turned off + - LP: #1377564 + * ALSA: hda/realtek - Avoid setting wrong COEF on ALC269 & co + - LP: #1377564 + * of/irq: Fix lookup to use 'interrupts-extended' property first + - LP: #1377564 + * Possible null ptr deref in SMB2_tcon + - LP: #1377564 + * CIFS: Fix SMB2 readdir error handling + - LP: #1377564 + * CIFS: Fix wrong directory attributes after rename + - LP: #1377564 + * md/raid6: avoid data corruption during recovery of double-degraded + RAID6 + - LP: #1377564 + * ARM: dts: i.MX53: fix apparent bug in VPU clks + - LP: #1377564 + * pata_scc: propagate return value of scc_wait_after_reset + - LP: #1377564 + * libata: widen Crucial M550 blacklist matching + - LP: #1377564 + * ALSA: hda - restore the gpio led after resume + - LP: #1358116, #1377564 + * md/raid10: fix memory leak when reshaping a RAID10. + - LP: #1377564 + * md/raid10: Fix memory leak when raid10 reshape completes. + - LP: #1377564 + * MIPS: OCTEON: make get_system_type() thread-safe + - LP: #1377564 + * can: c_can: checking IS_ERR() instead of NULL + - LP: #1377564 + * HID: logitech: perform bounds checking on device_id early enough + - LP: #1377564 + * firmware: Do not use WARN_ON(!spin_is_locked()) + - LP: #1377564 + * drm/radeon: add new KV pci id + - LP: #1377564 + * drm/radeon: add new bonaire pci ids + - LP: #1377564 + * drm/radeon: add additional SI pci ids + - LP: #1377564 + * ibmveth: Fix endian issues with rx_no_buffer statistic + - LP: #1377564 + * spi/omap-mcspi: Fix the spi task hangs waiting dma_rx + - LP: #1377564 + * xtensa: replace IOCTL code definitions with constants + - LP: #1377564 + * xtensa: fix address checks in dma_{alloc,free}_coherent + - LP: #1377564 + * xtensa: fix access to THREAD_RA/THREAD_SP/THREAD_DS + - LP: #1377564 + * xtensa: fix TLBTEMP_BASE_2 region handling in fast_second_level_miss + - LP: #1377564 + * xtensa: fix a6 and a7 handling in fast_syscall_xtensa + - LP: #1377564 + * staging: lustre: Remove circular dependency on header + - LP: #1377564 + * USB: option: reduce interrupt-urb logging verbosity + - LP: #1377564 + * USB: option: add VIA Telecom CDS7 chipset device id + - LP: #1377564 + * USB: zte_ev: remove duplicate Gobi PID + - LP: #1377564 + * USB: zte_ev: remove duplicate Qualcom PID + - LP: #1377564 + * USB: ftdi_sio: add Basic Micro ATOM Nano USB2Serial PID + - LP: #1377564 + * USB: serial: pl2303: add device id for ztek device + - LP: #1377564 + * USB: ftdi_sio: Added PID for new ekey device + - LP: #1377564 + * xhci: Treat not finding the event_seg on COMP_STOP the same as + COMP_STOP_INVAL + - LP: #1377564 + * usb: xhci: amd chipset also needs short TX quirk + - LP: #1377564 + * xhci: rework cycle bit checking for new dequeue pointers + - LP: #1377564 + * spi/pxa2xx: Add ACPI ID for Intel Braswell + - LP: #1377564 + * ALSA: core: fix buffer overflow in snd_info_get_line() + - LP: #1377564 + * HID: logitech-dj: prevent false errors to be shown + - LP: #1377564 + * usb: ehci: using wIndex + 1 for hub port + - LP: #1377564 + * staging/rtl8188eu: add 0df6:0076 Sitecom Europe B.V. + - LP: #1377564 + * staging: r8188eu: Add new USB ID + - LP: #1377564 + * mtd: nand: omap: Fix 1-bit Hamming code scheme, omap_calculate_ecc() + - LP: #1377564 + * trace: Fix epoll hang when we race with new entries + - LP: #1377564 + * cfq-iosched: Fix wrong children_weight calculation + - LP: #1377564 + * USB: sisusb: add device id for Magic Control USB video + - LP: #1377564 + * NFSv4: Fix problems with close in the presence of a delegation + - LP: #1377564 + * usb: hub: Prevent hub autosuspend if usbcore.autosuspend is -1 + - LP: #1377564 + * ARM: 8128/1: abort: don't clear the exclusive monitors + - LP: #1377564 + * ARM: 8129/1: errata: work around Cortex-A15 erratum 830321 using dummy + strex + - LP: #1377564 + * USB: serial: fix potential stack buffer overflow + - LP: #1377564 + * USB: serial: fix potential heap buffer overflow + - LP: #1377564 + * ext4: update i_disksize coherently with block allocation on error path + - LP: #1377564 + * jbd2: fix infinite loop when recovering corrupt journal blocks + - LP: #1377564 + * jbd2: fix descriptor block size handling errors with journal_csum + - LP: #1377564 + * memblock, memhotplug: fix wrong type in memblock_find_in_range_node(). + - LP: #1377564 + * xattr: fix check for simultaneous glibc header inclusion + - LP: #1377564 + * KVM: s390: Fix user triggerable bug in dead code + - LP: #1377564 + * KVM: s390/mm: try a cow on read only pages for key ops + - LP: #1377564 + * regmap: Fix regcache debugfs initialization + - LP: #1377564 + * regmap: Fix handling of volatile registers for format_write() chips + - LP: #1377564 + * ASoC: rt5640: Do not allow regmap to use bulk read-write operations + - LP: #1377564 + * drm/i915: Remove bogus __init annotation from DMI callbacks + - LP: #1377564 + * hwmon: (ds1621) Update zbits after conversion rate change + - LP: #1377564 + * arm64: ptrace: fix compat hardware watchpoint reporting + - LP: #1377564 + * ARM/ARM64: KVM: Nuke Hyp-mode tlbs before enabling MMU + - LP: #1377564 + * arm/arm64: KVM: Complete WFI/WFE instructions + - LP: #1377564 + * get rid of propagate_umount() mistakenly treating slaves as busy. + - LP: #1377564 + * fix EBUSY on umount() from MNT_SHRINKABLE + - LP: #1377564 + * regmap: Don't attempt block writes when syncing cache on single_rw + devices + - LP: #1377564 + * drm/vmwgfx: Fix a potential infinite spin waiting for fifo idle + - LP: #1377564 + * ALSA: hda - Fix digital mic on Acer Aspire 3830TG + - LP: #1377564 + * xfs: don't dirty buffers beyond EOF + - LP: #1377564 + * xfs: don't zero partial page cache pages during O_DIRECT writes + - LP: #1377564 + * xfs: don't zero partial page cache pages during O_DIRECT writes + - LP: #1377564 + * ALSA: hda - Fix COEF setups for ALC1150 codec + - LP: #1377564 + * i2c: rcar: fix MNR interrupt handling + - LP: #1377564 + * i2c: mv64xxx: continue probe when clock-frequency is missing + - LP: #1377564 + * i2c: at91: Fix a race condition during signal handling in + at91_do_twi_xfer. + - LP: #1377564 + * i2c: at91: add bound checking on SMBus block length bytes + - LP: #1377564 + * aio: add missing smp_rmb() in read_events_ring + - LP: #1377564 + * KEYS: Fix use-after-free in assoc_array_gc() + - LP: #1377564 + * ACPI / cpuidle: fix deadlock between cpuidle_lock and cpu_hotplug.lock + - LP: #1377564 + * USB: fix build error with CONFIG_PM_RUNTIME disabled + - LP: #1377564 + * Linux 3.13.11.8 + - LP: #1377564 + * powerpc: Fix kdump hang issue on p8 with relocation on exception + enabled. + - LP: #1352056 + * net-gre-gro: Fix a bug that breaks the forwarding path + - LP: #1377851 + + -- Luis Henriques Thu, 09 Oct 2014 10:31:36 +0100 + +linux (3.13.0-37.64) trusty; urgency=low + + [ Joseph Salisbury ] + + * Release Tracking Bug + - LP: #1372576 + + [ dann frazier ] + + * [Config] CONFIG_HW_RANDOM_XGENE=m on arm64 + + [ Edward Lin ] + + * SAUCE: Add use_native_backlight quirk for Dell Inspiron 5721/3521 + - LP: #1354253, #1354313 + + [ Tim Gardner ] + + * SAUCE: Fix nfs oops stable regression + - LP: #1348670 + * [Config] Add mpt3sas to d-i + - LP: #1368907 + * [Config] CONFIG_X86_16BIT=y + - LP: #1371601 + + [ Timo Aaltonen ] + + * SAUCE: i915_bdw: Rebase to v3.15.8 + - LP: #1359213 + + [ Upstream Kernel Changes ] + + * Revert "x86-64, modify_ldt: Make support for 16-bit segments a runtime + option" + - LP: #1371601 + * mmc: rtsx: add R1-no-CRC mmc command type handle + - LP: #1365378 + * rpc_pipe: remove the clntXX dir if creating the pipe fails + - LP: #1365869 + * sunrpc: add an "info" file for the dummy gssd pipe + - LP: #1365869 + * rpc_pipe: fix cleanup of dummy gssd directory when notification fails + - LP: #1365869 + * hwrng: xgene - add support for APM X-Gene SoC RNG support + - LP: #1365593 + * Documentation: rng: Add X-Gene SoC RNG driver documentation + - LP: #1365593 + * arm64: dts: add random number generator dts node to APM X-Gene + platform. + - LP: #1365593 + * xen/balloon: cancel ballooning if adding new memory failed + - LP: #1304001 + * x86/xen: resume timer irqs early + - LP: #1368724 + * xen/manage: Always freeze/thaw processes when suspend/resuming + - LP: #1368724 + * scsi_transport_sas: move bsg destructor into sas_rphy_remove + - LP: #1368991 + * drm/i915: Enable 5.4Ghz (HBR2) link rate for Displayport 1.2-capable + devices + - LP: #1369633 + * bnx2x: Fix link for KR with swapped polarity lane + - LP: #1370716 + * drm: add DRM_CAPs for cursor size + - LP: #1359213 + * drm/dp: Add AUX channel infrastructure + - LP: #1359213 + * drm/dp: Add drm_dp_dpcd_read_link_status() + - LP: #1359213 + * drm/dp: Add DisplayPort link helpers + - LP: #1359213 + * drm/dp: Allow registering AUX channels as I2C busses + - LP: #1359213 + * drm/dp: let drivers specify the name of the I2C-over-AUX adapter + - LP: #1359213 + * drm/dp: make aux retries less chatty + - LP: #1359213 + * Bluetooth: Enable Atheros 0cf3:311e for firmware upload + - LP: #1371477 + * bnx2x: fix crash during TSO tunneling + - LP: #1371601 + * inetpeer: get rid of ip_id_count + - LP: #1371601 + * ip: make IP identifiers less predictable + - LP: #1371601 + * tcp: Fix integer-overflows in TCP veno + - LP: #1371601 + * tcp: Fix integer-overflow in TCP vegas + - LP: #1371601 + * macvlan: Initialize vlan_features to turn on offload support. + - LP: #1371601 + * net: Correctly set segment mac_len in skb_segment(). + - LP: #1371601 + * iovec: make sure the caller actually wants anything in + memcpy_fromiovecend + - LP: #1371601 + * batman-adv: Fix out-of-order fragmentation support + - LP: #1371601 + * sctp: fix possible seqlock seadlock in sctp_packet_transmit() + - LP: #1371601 + * sparc64: Fix argument sign extension for compat_sys_futex(). + - LP: #1371601 + * sparc64: Make itc_sync_lock raw + - LP: #1371601 + * sparc64: Fix executable bit testing in set_pmd_at() paths. + - LP: #1371601 + * sparc64: Fix huge PMD invalidation. + - LP: #1371601 + * sparc64: Fix bugs in get_user_pages_fast() wrt. THP. + - LP: #1371601 + * sparc64: Fix hex values in comment above pte_modify(). + - LP: #1371601 + * sparc64: Don't use _PAGE_PRESENT in pte_modify() mask. + - LP: #1371601 + * sparc64: Handle 32-bit tasks properly in compute_effective_address(). + - LP: #1371601 + * sparc64: Fix top-level fault handling bugs. + - LP: #1371601 + * sparc64: Fix range check in kern_addr_valid(). + - LP: #1371601 + * sparc64: Use 'ILOG2_4MB' instead of constant '22'. + - LP: #1371601 + * sparc64: Add basic validations to {pud,pmd}_bad(). + - LP: #1371601 + * sparc64: Give more detailed information in {pgd,pmd}_ERROR() and kill + pte_ERROR(). + - LP: #1371601 + * sparc64: Don't bark so loudly about 32-bit tasks generating 64-bit + fault addresses. + - LP: #1371601 + * sparc64: Fix huge TSB mapping on pre-UltraSPARC-III cpus. + - LP: #1371601 + * sparc64: Add membar to Niagara2 memcpy code. + - LP: #1371601 + * sparc64: Do not insert non-valid PTEs into the TSB hash table. + - LP: #1371601 + * sparc64: Guard against flushing openfirmware mappings. + - LP: #1371601 + * bbc-i2c: Fix BBC I2C envctrl on SunBlade 2000 + - LP: #1371601 + * sunsab: Fix detection of BREAK on sunsab serial console + - LP: #1371601 + * sparc64: ldc_connect() should not return EINVAL when handshake is in + progress. + - LP: #1371601 + * arch/sparc/math-emu/math_32.c: drop stray break operator + - LP: #1371601 + * x86-64, espfix: Don't leak bits 31:16 of %esp returning to 16-bit stack + - LP: #1371601 + * x86, espfix: Move espfix definitions into a separate header file + - LP: #1371601 + * x86, espfix: Fix broken header guard + - LP: #1371601 + * x86, espfix: Make espfix64 a Kconfig option, fix UML + - LP: #1371601 + * x86, espfix: Make it possible to disable 16-bit support + - LP: #1371601 + * x86_64/entry/xen: Do not invoke espfix64 on Xen + - LP: #1371601 + * ALSA: usb-audio: fix BOSS ME-25 MIDI regression + - LP: #1371601 + * ASoC: wm8994: Prevent double lock of accdet_lock mutex on wm1811 + - LP: #1371601 + * v4l: vsp1: Remove the unneeded vsp1_video_buffer video field + - LP: #1371601 + * ASoC: max98090: Fix missing free_irq + - LP: #1371601 + * KVM: x86: Inter-privilege level ret emulation is not implemeneted + - LP: #1371601 + * au0828: Only alt setting logic when needed + - LP: #1371601 + * ASoC: pcm: fix dpcm_path_put in dpcm runtime update + - LP: #1371601 + * crypto: ux500 - make interrupt mode plausible + - LP: #1371601 + * Bluetooth: btmrvl: wait for HOST_SLEEP_ENABLE event in suspend + - LP: #1371601 + * ASoC: adau1701: fix adau1701_reg_read() + - LP: #1371601 + * ASoC: wm_adsp: Add missing MODULE_LICENSE + - LP: #1371601 + * regulator: arizona-ldo1: remove bypass functionality + - LP: #1371601 + * ASoC: samsung: Correct I2S DAI suspend/resume ops + - LP: #1371601 + * drm/tilcdc: panel: fix dangling sysfs connector node + - LP: #1371601 + * drm/tilcdc: slave: fix dangling sysfs connector node + - LP: #1371601 + * drm/tilcdc: tfp410: fix dangling sysfs connector node + - LP: #1371601 + * drm/tilcdc: panel: fix leak when unloading the module + - LP: #1371601 + * drm/tilcdc: fix release order on exit + - LP: #1371601 + * drm/tilcdc: fix double kfree + - LP: #1371601 + * ACPICA: Utilities: Fix memory leak in acpi_ut_copy_iobject_to_iobject + - LP: #1371601 + * stable_kernel_rules: Add pointer to netdev-FAQ for network patches + - LP: #1371601 + * USB: ehci-pci: USB host controller support for Intel Quark X1000 + - LP: #1371601 + * debugfs: Fix corrupted loop in debugfs_remove_recursive + - LP: #1371601 + * serial: core: Preserve termios c_cflag for console resume + - LP: #1371601 + * mtd/ftl: fix the double free of the buffers allocated in build_maps() + - LP: #1371601 + * ext4: Fix block zeroing when punching holes in indirect block files + - LP: #1371601 + * ext4: fix punch hole on files with indirect mapping + - LP: #1371601 + * x86: don't exclude low BIOS area when allocating address space for + non-PCI cards + - LP: #1371601 + * PCI: Configure ASPM when enabling device + - LP: #1371601 + * Bluetooth: never linger on process exit + - LP: #1371601 + * ASoC: blackfin: use samples to set silence + - LP: #1371601 + * USB: OHCI: fix bugs in debug routines + - LP: #1371601 + * USB: OHCI: don't lose track of EDs when a controller dies + - LP: #1371601 + * mei: start disconnect request timer consistently + - LP: #1371601 + * mei: fix return value on disconnect timeout + - LP: #1371601 + * USB: Fix persist resume of some SS USB devices + - LP: #1371601 + * media-device: Remove duplicated memset() in media_enum_entities() + - LP: #1371601 + * Bluetooth: Avoid use of session socket after the session gets freed + - LP: #1371601 + * xc5000: Fix get_frequency() + - LP: #1371601 + * xc4000: Fix get_frequency() + - LP: #1371601 + * CAPABILITIES: remove undefined caps from all processes + - LP: #1371601 + * scsi: add a blacklist flag which enables VPD page inquiries + - LP: #1371601 + * bfa: Fix undefined bit shift on big-endian architectures with 32-bit + DMA address + - LP: #1371601 + * hpsa: fix bad -ENOMEM return value in hpsa_big_passthru_ioctl + - LP: #1371601 + * Drivers: scsi: storvsc: Change the limits to reflect the values on the + host + - LP: #1371601 + * Drivers: scsi: storvsc: Set cmd_per_lun to reflect value supported by + the Host + - LP: #1371601 + * Drivers: scsi: storvsc: Filter commands based on the storage protocol + version + - LP: #1371601 + * Drivers: scsi: storvsc: Fix a bug in handling VMBUS protocol version + - LP: #1371601 + * Drivers: scsi: storvsc: Implement a eh_timed_out handler + - LP: #1371601 + * drivers: scsi: storvsc: Set srb_flags in all cases + - LP: #1371601 + * drivers: scsi: storvsc: Correctly handle TEST_UNIT_READY failure + - LP: #1371601 + * x86_64/vsyscall: Fix warn_bad_vsyscall log output + - LP: #1371601 + * KVM: PPC: Book3S PR: Take SRCU read lock around RTAS kvm_read_guest() + call + - LP: #1371601 + * spi: orion: fix incorrect handling of cell-index DT property + - LP: #1371601 + * mfd: omap-usb-host: Fix improper mask use. + - LP: #1371601 + * tpm: Add missing tpm_do_selftest to ST33 I2C driver + - LP: #1371601 + * tpm: missing tpm_chip_put in tpm_get_random() + - LP: #1371601 + * scsi: do not issue SCSI RSOC command to Promise Vtrak E610f + - LP: #1371601 + * hwmon: (ads1015) Fix off-by-one for valid channel index checking + - LP: #1371601 + * ALSA: hda - fix an external mic jack problem on a HP machine + - LP: #1350148, #1371601 + * MIPS: tlbex: Fix a missing statement for HUGETLB + - LP: #1371601 + * MIPS: Prevent user from setting FCSR cause bits + - LP: #1371601 + * KVM: x86: always exit on EOIs for interrupts listed in the IOAPIC redir + table + - LP: #1371601 + * MIPS: Remove BUG_ON(!is_fpu_owner()) in do_ade() + - LP: #1371601 + * MIPS: ptrace: Test correct task's flags in task_user_regset_view() + - LP: #1371601 + * MIPS: asm/reg.h: Make 32- and 64-bit definitions available at the same + time + - LP: #1371601 + * MIPS: ptrace: Change GP regset to use correct core dump register layout + - LP: #1371601 + * md/raid1,raid10: always abort recover on write error. + - LP: #1371601 + * ext4: fix ext4_discard_allocated_blocks() if we can't allocate the pa + struct + - LP: #1371601 + * hwmon: (lm85) Fix various errors on attribute writes + - LP: #1371601 + * hwmon: (lm78) Fix overflow problems seen when writing large temperature + limits + - LP: #1371601 + * hwmon: (amc6821) Fix possible race condition bug + - LP: #1371601 + * MIPS: GIC: Prevent array overrun + - LP: #1371601 + * mnt: Add tests for unprivileged remount cases that have found to be + faulty + - LP: #1371601 + * ARM: OMAP3: Fix choice of omap3_restore_es function in OMAP34XX + rev3.1.2 case. + - LP: #1371601 + * netlabel: fix a problem when setting bits below the previously lowest + bit + - LP: #1371601 + * netlabel: fix the horribly broken catmap functions + - LP: #1371601 + * netlabel: fix the catmap walking functions + - LP: #1371601 + * drivers/i2c/busses: use correct type for dma_map/unmap + - LP: #1371601 + * NFSD: Decrease nfsd_users in nfsd_startup_generic fail + - LP: #1371601 + * MIPS: O32/32-bit: Fix bug which can cause incorrect system call + restarts + - LP: #1371601 + * IB/srp: Fix deadlock between host removal and multipathd + - LP: #1371601 + * USB: serial: ftdi_sio: Annotate the current Xsens PID assignments + - LP: #1371601 + * USB: serial: ftdi_sio: Add support for new Xsens devices + - LP: #1371601 + * USB: devio: fix issue with log flooding + - LP: #1371601 + * CIFS: Fix async reading on reconnects + - LP: #1371601 + * CIFS: Fix STATUS_CANNOT_DELETE error mapping for SMB2 + - LP: #1371601 + * xfs: ensure verifiers are attached to recovered buffers + - LP: #1371601 + * drm/tegra: add MODULE_DEVICE_TABLEs + - LP: #1371601 + * ALSA: virtuoso: add Xonar Essence STX II support + - LP: #1371601 + * hwmon: (gpio-fan) Prevent overflow problem when writing large limits + - LP: #1371601 + * hwmon: (sis5595) Prevent overflow problem when writing large limits + - LP: #1371601 + * NFS: Fix /proc/fs/nfsfs/servers and /proc/fs/nfsfs/volumes + - LP: #1371601 + * drm/ttm: Fix possible division by 0 in ttm_dma_pool_shrink_scan(). + - LP: #1371601 + * drm/ttm: Choose a pool to shrink correctly in + ttm_dma_pool_shrink_scan(). + - LP: #1371601 + * drm/ttm: Use mutex_trylock() to avoid deadlock inside shrinker + functions. + - LP: #1371601 + * drm/ttm: Fix possible stack overflow by recursive shrinker calls. + - LP: #1371601 + * drm/ttm: Pass GFP flags in order to avoid deadlock. + - LP: #1371601 + * powerpc/mm/numa: Fix break placement + - LP: #1371601 + * powerpc/pci: Reorder pci bus/bridge unregistration during PHB removal + - LP: #1371601 + * drm/radeon: load the lm63 driver for an lm64 thermal chip. + - LP: #1371601 + * drm/radeon: set VM base addr using the PFP v2 + - LP: #1371601 + * drm/radeon/atom: add new voltage fetch function for hawaii + - LP: #1371601 + * drm/radeon/dpm: handle voltage info fetching on hawaii + - LP: #1371601 + * drm/radeon: re-enable dpm by default on cayman + - LP: #1371601 + * drm/radeon: re-enable dpm by default on BTC + - LP: #1371601 + * drm/radeon: use packet2 for nop on hawaii with old firmware + - LP: #1371601 + * drm/radeon: tweak ACCEL_WORKING2 query for hawaii + - LP: #1371601 + * KVM: nVMX: fix "acknowledge interrupt on exit" when APICv is in use + - LP: #1371601 + * RDMA/iwcm: Use a default listen backlog if needed + - LP: #1371601 + * x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub + - LP: #1371601 + * net: sun4i-emac: fix memory leak on bad packet + - LP: #1371601 + * hwmon: (ads1015) Fix out-of-bounds array access + - LP: #1371601 + * hwmon: (dme1737) Prevent overflow problem when writing large limits + - LP: #1371601 + * s390/locking: Reenable optimistic spinning + - LP: #1371601 + * ring-buffer: Up rb_iter_peek() loop count to 3 + - LP: #1371601 + * ring-buffer: Always reset iterator to reader page + - LP: #1371601 + * kernel/smp.c:on_each_cpu_cond(): fix warning in fallback path + - LP: #1371601 + * drm/i915: read HEAD register back in init_ring_common() to enforce + ordering + - LP: #1371601 + * vm_is_stack: use for_each_thread() rather then buggy + while_each_thread() + - LP: #1371601 + * libceph: set last_piece in ceph_msg_data_pages_cursor_init() correctly + - LP: #1371601 + * drm/nouveau: Bump version from 1.1.1 to 1.1.2 + - LP: #1371601 + * ALSA: usb-audio: fix BOSS ME-25 MIDI regression + - LP: #1371601 + * ALSA: hda/ca0132 - Don't try loading firmware at resume when already + failed + - LP: #1371601 + * carl9170: fix sending URBs with wrong type when using full-speed + - LP: #1371601 + * powerpc/pseries: Failure on removing device node + - LP: #1371601 + * Btrfs: Fix memory corruption by ulist_add_merge() on 32bit arch + - LP: #1371601 + * Btrfs: fix csum tree corruption, duplicate and outdated checksums + - LP: #1371601 + * ext4: fix BUG_ON in mb_free_blocks() + - LP: #1371601 + * x86/espfix/xen: Fix allocation of pages for paravirt page tables + - LP: #1371601 + * Linux 3.13.11.7 + - LP: #1371601 + * HID: magicmouse: sanity check report size in raw_event() callback + - LP: #1370025 + - CVE-2014-3181 + * HID: fix a couple of off-by-ones + - LP: #1370035 + - CVE-2014-3184 + * USB: whiteheat: Added bounds checking for bulk command response + - LP: #1370036 + - CVE-2014-3185 + * HID: picolcd: sanity check report size in raw_event() callback + - LP: #1370038 + - CVE-2014-3186 + * KEYS: Fix termination condition in assoc array garbage collection + - LP: #1370041 + - CVE-2014-3631 + * udf: Fold udf_fill_inode() into __udf_read_inode() + - LP: #1370042 + - CVE-2014-6410 + * udf: Avoid infinite loop when processing indirect ICBs + - LP: #1370042 + - CVE-2014-6410 + * libceph: add process_one_ticket() helper + - LP: #1370044, #1370046, #1370047 + - CVE-2014-6418 + * libceph: do not hard code max auth ticket len + - LP: #1370044, #1370046, #1370047 + - CVE-2014-6418 + + -- Joseph Salisbury Mon, 22 Sep 2014 15:51:07 -0400 + +linux (3.13.0-36.63) trusty; urgency=low + + [ Joseph Salisbury ] + + * Release Tracking Bug + - LP: #1365052 + + [ Feng Kan ] + + * SAUCE: (no-up) irqchip:gic: change access of gicc_ctrl register to read + modify write. + - LP: #1357527 + * SAUCE: (no-up) arm64: optimized copy_to_user and copy_from_user + assembly code + - LP: #1358949 + + [ Ming Lei ] + + * SAUCE: (no-up) Drop APM X-Gene SoC Ethernet driver + - LP: #1360140 + * [Config] Drop XGENE entries + - LP: #1360140 + * [Config] CONFIG_NET_XGENE=m for arm64 + - LP: #1360140 + + [ Stefan Bader ] + + * SAUCE: Add compat macro for skb_get_hash + - LP: #1358162 + * SAUCE: bcache: prevent crash on changing writeback_running + - LP: #1357295 + + [ Suman Tripathi ] + + * SAUCE: (no-up) arm64: Fix the csr-mask for APM X-Gene SoC AHCI SATA PHY + clock DTS node. + - LP: #1359489 + * SAUCE: (no-up) ahci_xgene: Skip the PHY and clock initialization if + already configured by the firmware. + - LP: #1359501 + * SAUCE: (no-up) ahci_xgene: Fix the link down in first attempt for the + APM X-Gene SoC AHCI SATA host controller driver. + - LP: #1359507 + + [ Tuan Phan ] + + * SAUCE: (no-up) pci-xgene-msi: fixed deadlock in irq_set_affinity + - LP: #1359514 + + [ Upstream Kernel Changes ] + + * iwlwifi: mvm: Add a missed beacons threshold + - LP: #1349572 + * mac80211: reset probe_send_count also in HW_CONNECTION_MONITOR case + - LP: #1349572 + * genirq: Add an accessor for IRQ_PER_CPU flag + - LP: #1357527 + * arm64: perf: add support for percpu pmu interrupt + - LP: #1357527 + * cifs: sanity check length of data to send before sending + - LP: #1283101 + * KVM: nVMX: Pass vmexit parameters to nested_vmx_vmexit + - LP: #1329434 + * KVM: nVMX: Rework interception of IRQs and NMIs + - LP: #1329434 + * KVM: vmx: disable APIC virtualization in nested guests + - LP: #1329434 + * HID: Add transport-driver functions to the USB HID interface. + - LP: #1353021 + * ahci_xgene: Removing NCQ support from the APM X-Gene SoC AHCI SATA Host + Controller driver. + - LP: #1358498 + * fold d_kill() and d_free() + - LP: #1354234 + * fold try_prune_one_dentry() + - LP: #1354234 + * new helper: dentry_free() + - LP: #1354234 + * expand the call of dentry_lru_del() in dentry_kill() + - LP: #1354234 + * dentry_kill(): don't try to remove from shrink list + - LP: #1354234 + * don't remove from shrink list in select_collect() + - LP: #1354234 + * more graceful recovery in umount_collect() + - LP: #1354234 + * dcache: don't need rcu in shrink_dentry_list() + - LP: #1354234 + * lift the "already marked killed" case into shrink_dentry_list() + * split dentry_kill() + - LP: #1354234 + * expand dentry_kill(dentry, 0) in shrink_dentry_list() + - LP: #1354234 + * shrink_dentry_list(): take parent's ->d_lock earlier + - LP: #1354234 + * dealing with the rest of shrink_dentry_list() livelock + - LP: #1354234 + * dentry_kill() doesn't need the second argument now + - LP: #1354234 + * dcache: add missing lockdep annotation + - LP: #1354234 + * fs: convert use of typedef ctl_table to struct ctl_table + - LP: #1354234 + * lock_parent: don't step on stale ->d_parent of all-but-freed one + - LP: #1354234 + * tools/testing/selftests/ptrace/peeksiginfo.c: add PAGE_SIZE definition + - LP: #1358855 + * x86, irq, pic: Probe for legacy PIC and set legacy_pic appropriately + - LP: #1317697 + * bnx2x: Fix kernel crash and data miscompare after EEH recovery + - LP: #1353105 + * bnx2x: Adapter not recovery from EEH error injection + - LP: #1353105 + * Fix: module signature vs tracepoints: add new TAINT_UNSIGNED_MODULE + - LP: #1359670 + * bcache: fix crash on shutdown in passthrough mode + - LP: #1357295 + * bcache: fix uninterruptible sleep in writeback thread + - LP: #1357295 + * namespaces: Use task_lock and not rcu to protect nsproxy + - LP: #1328088 + * MAINTAINERS: Add entry for APM X-Gene SoC ethernet driver + - LP: #1360140 + * Documentation: dts: Add bindings for APM X-Gene SoC ethernet driver + - LP: #1360140 + * dts: Add bindings for APM X-Gene SoC ethernet driver + - LP: #1360140 + * drivers: net: Add APM X-Gene SoC ethernet driver support. + - LP: #1360140 + * powerpc/mm: Add new "set" flag argument to pte/pmd update function + - LP: #1357014 + * powerpc/thp: Add write barrier after updating the valid bit + - LP: #1357014 + * powerpc/thp: Don't recompute vsid and ssize in loop on invalidate + - LP: #1357014 + * powerpc/thp: Invalidate old 64K based hash page mapping before insert + of 4k pte + - LP: #1357014 + * powerpc/thp: Handle combo pages in invalidate + - LP: #1357014 + * powerpc/thp: Invalidate with vpn in loop + - LP: #1357014 + * powerpc/thp: Use ACCESS_ONCE when loading pmdp + - LP: #1357014 + * powerpc/mm: Use read barrier when creating real_pte + - LP: #1357014 + * powerpc/thp: Add tracepoints to track hugepage invalidate + - LP: #1357014 + * powerpc: subpage_protect: Increase the array size to take care of 64TB + - LP: #1357014 + * mfd: rtsx: Add set pull control macro and simplify rtl8411 + - LP: #1361086 + * mfd: rtsx: Add support for card reader rtl8402 + - LP: #1361086 + * kvm: iommu: fix the third parameter of kvm_iommu_put_pages + (CVE-2014-3601) + - LP: #1362443 + - CVE-2014-3601 + * isofs: Fix unbounded recursion when processing relocated directories + - LP: #1362447, #1362448 + - CVE-2014-5472 + * net: sctp: inherit auth_capable on INIT collisions + - LP: #1349804 + - CVE-2014-5077 + * blk-mq: fix initializing request's start time + - LP: #1297522 + + [ Vinayak Kale ] + + * SAUCE: (no-up) dt-bindings: Add Potenza PMU binding + - LP: #1357527 + * SAUCE: (no-up) arm64: dts: Add PMU node for APM X-Gene Storm SOC + - LP: #1357527 + + -- Joseph Salisbury Wed, 03 Sep 2014 12:13:43 -0400 + +linux (3.13.0-35.62) trusty; urgency=low + + [ Joseph Salisbury ] + + * Release Tracking Bug + - LP: #1357148 + + [ Brad Figg ] + + * Start new release + + [ dann frazier ] + + * SAUCE: (no-up) Fix build failure on arm64 + - LP: #1353657 + * [debian] Allow for package revisions condusive for branching + + [ David Henningsson ] + + * SAUCE: Call broadwell specific functions from the hda driver + - LP: #1317865 + + [ Edward Lin ] + + * SAUCE: (no-up) Add use native backlight quirk for Dell Inspiron + 5547/5447 + - LP: #1332437 + + [ Imre Deak ] + + * SAUCE: drm/i915: move power domain init earlier during system resume + - LP: #1353405 + + [ Jani Nikula ] + + * SAUCE: drm/i915: use lane count and link rate from VBT as minimums for + eDP + - LP: #1338582 + * SAUCE: drm/i915/dp: force eDP lane count to max available lanes on BDW + - LP: #1338582 + * SAUCE: drm/i915: provide interface for audio driver to query cdclk + - LP: #1188091 + * SAUCE: drm/i915: demote opregion excessive timeout WARN_ONCE to + DRM_INFO_ONCE + - LP: #1351014 + + [ Joseph Salisbury ] + + * [Config] updateconfigs after Linux 3.13.11.6 updates + + [ Luis Henriques ] + + * Revert "[Packaging] linux-udeb-flavour -- standardise on linux prefix" + + [ Ming Lei ] + + * Revert "SAUCE: (no-up) ata: Fix the dma state machine lockup for the + IDENTIFY DEVICE PIO mode command." + - LP: #1335645 + + [ Paulo Zanoni ] + + * SAUCE: drm/i915: consider the source max DP lane count too + - LP: #1338582 + + [ Tim Gardner ] + + * [Config] CONFIG_GPIO_SYSFS=y + - LP: #1342153 + * [Config] CONFIG_KEYS_DEBUG_PROC_KEYS=y + - LP: #1344405 + * [Config] updateconfigs + * [Config] CONFIG_SCSI_IPR_TRACE=y, CONFIG_SCSI_IPR_DUMP=y + - LP: #1343109 + * [Config] CONFIG_CONTEXT_TRACKING_FORCE=n + - LP: #1349028 + + [ Timo Aaltonen ] + + * SAUCE: Fix a typo in hda i915_bdw support. + - LP: #1343140 + + [ Upstream Kernel Changes ] + + * Revert "net/mlx4_en: Fix bad use of dev_id" + - LP: #1347012 + * Revert "ACPI / AC: Remove AC's proc directory." + - LP: #1356913 + * Revert "mac80211: move "bufferable MMPDU" check to fix AP mode scan" + - LP: #1356913 + * mm, pcp: allow restoring percpu_pagelist_fraction default + - LP: #1347088 + * net: Fix permission check in netlink_connect() + - LP: #1312989 + * netlink: Rename netlink_capable netlink_allowed + - LP: #1312989 + * net: Move the permission check in sock_diag_put_filterinfo to + packet_diag_dump + - LP: #1312989 + * net: Add variants of capable for use on on sockets + - LP: #1312989 + * net: Add variants of capable for use on netlink messages + - LP: #1312989 + * net: Use netlink_ns_capable to verify the permisions of netlink + messages + - LP: #1312989 + * netlink: Only check file credentials for implicit destinations + - LP: #1312989 + * igb: fix stats for i210 rx_fifo_errors + - LP: #1338893 + * HID: use multi input quirk for 22b9:2968 + - LP: #1339567 + * crypto/nx: disable NX on little endian builds + - LP: #1338666 + * ACPI / video: Add Dell Inspiron 5737 to the blacklist + - LP: #1250401 + * Input: elantech - deal with clickpads reporting right button events + - LP: #1188025 + * net/mlx4_core: Enforce irq affinity changes immediatly + - LP: #1326108 + * cpumask: Utility function to set n'th cpu - local cpu first + - LP: #1326108 + * net/mlx4_en: Use affinity hint + - LP: #1326108 + * net/mlx4_en: Don't use irq_affinity_notifier to track changes in IRQ + affinity map + - LP: #1326108 + * net/mlx4_en: IRQ affinity hint is not cleared on port down + - LP: #1326108 + * Subject: net: Allow tc changes in user namespaces + - LP: #1344049 + * net-gro: restore frag0 optimization + - LP: #1344323 + * Bluetooth: Fix redundant encryption request for reauthentication + - LP: #1347088 + * Bluetooth: Fix check for connection encryption + - LP: #1347088 + * introduce for_each_thread() to replace the buggy while_each_thread() + - LP: #1347088 + * NFS: Don't declare inode uptodate unless all attributes were checked + - LP: #1347088 + * usb: dwc3: gadget: clear stall when disabling endpoint + - LP: #1347088 + * ACPICA: utstring: Check array index bound before use. + - LP: #1347088 + * mtip32xx: Increase timeout for STANDBY IMMEDIATE command + - LP: #1347088 + * mtip32xx: Remove dfs_parent after pci unregister + - LP: #1347088 + * mtip32xx: Fix ERO and NoSnoop values in PCIe upstream on AMD systems + - LP: #1347088 + * extcon: max77693: Fix two NULL pointer exceptions on missing pdata + - LP: #1347088 + * extcon: max8997: Fix NULL pointer exception on missing pdata + - LP: #1347088 + * builddeb: use $OBJCOPY variable instead of objcopy + - LP: #1347088 + * bluetooth: hci_ldisc: fix deadlock condition + - LP: #1347088 + * powerpc/pseries: Fix overwritten PE state + - LP: #1347088 + * PCI: Add new ID for Intel GPU "spurious interrupt" quirk + - LP: #1347088 + * x86-32, espfix: Remove filter for espfix32 due to race + - LP: #1347088 + * genirq: Sanitize spurious interrupt detection of threaded irqs + - LP: #1347088 + * Drivers: hv: balloon: Ensure pressure reports are posted regularly + - LP: #1347088 + * x86, x32: Use compat shims for io_{setup,submit} + - LP: #1347088 + * iwlwifi: pcie: try to get ownership several times + - LP: #1347088 + * ext4: fix data integrity sync in ordered mode + - LP: #1347088 + * UBIFS: fix an mmap and fsync race condition + - LP: #1347088 + * [media] rtl28xxu: add USB ID for Genius TVGo DVB-T03 + - LP: #1347088 + * [media] rtl28xxu: add 1b80:d395 Peak DVB-T USB + - LP: #1347088 + * [media] rtl28xxu: add [1b80:d39d] Sveon STV20 + - LP: #1347088 + * [media] rtl28xxu: add [1b80:d3af] Sveon STV27 + - LP: #1347088 + * ASoC: max98090: Fix reset at resume time + - LP: #1347088 + * ACPI: Fix conflict between customized DSDT and DSDT local copy + - LP: #1347088 + * PM / OPP: fix incorrect OPP count handling in of_init_opp_table + - LP: #1347088 + * Target/iser: Bail from accept_np if np_thread is trying to close + - LP: #1347088 + * Target/iser: Fix hangs in connection teardown + - LP: #1347088 + * HID: core: fix validation of report id 0 + - LP: #1347088 + * IB/srp: Fix a sporadic crash triggered by cable pulling + - LP: #1347088 + * Target/iser: Improve cm events handling + - LP: #1347088 + * Target/iser: Wait for proper cleanup before unloading + - LP: #1347088 + * mtd: nand: omap: fix BCHx ecc.correct to return detected bit-flips in + erased-page + - LP: #1347088 + * mtd: eLBC NAND: fix subpage write support + - LP: #1347088 + * reiserfs: call truncate_setsize under tailpack mutex + - LP: #1347088 + * ARM: stacktrace: avoid listing stacktrace functions in stacktrace + - LP: #1347088 + * SUNRPC: Fix a module reference leak in svc_handle_xprt + - LP: #1347088 + * [media] uvcvideo: Fix clock param realtime setting + - LP: #1347088 + * [media] ivtv: Fix Oops when no firmware is loaded + - LP: #1347088 + * CIFS: Fix memory leaks in SMB2_open + - LP: #1347088 + * iio:adc:max1363 incorrect resolutions for max11604, max11605, max11610 + and max11611. + - LP: #1347088 + * staging/mt29f_spinand: Terminate of match table + - LP: #1347088 + * mac80211: fix IBSS join by initializing last_scan_completed + - LP: #1347088 + * KVM: lapic: sync highest ISR to hardware apic on EOI + - LP: #1347088 + * s390/time: cast tv_nsec to u64 prior to shift in update_vsyscall + - LP: #1347088 + * ahci: add PCI ID for Marvell 88SE91A0 SATA Controller + - LP: #1347088 + * ext4: fix zeroing of page during writeback + - LP: #1347088 + * ext4: fix wrong assert in ext4_mb_normalize_request() + - LP: #1347088 + * IB/mlx5: add missing padding at end of struct mlx5_ib_create_cq + - LP: #1347088 + * IB/mlx5: add missing padding at end of struct mlx5_ib_create_srq + - LP: #1347088 + * IB/qib: Fix port in pkey change event + - LP: #1347088 + * IB/ipath: Translate legacy diagpkt into newer extended diagpkt + - LP: #1347088 + * mei: me: drop harmful wait optimization + - LP: #1347088 + * mei: me: read H_CSR after asserting reset + - LP: #1347088 + * usb: usbtest: fix unlink write error with pattern 1 + - LP: #1347088 + * s390/lowcore: reserve 96 bytes for IRB in lowcore + - LP: #1347088 + * mac80211: fix a memory leak on sta rate selection table + - LP: #1347088 + * mac80211: don't check netdev state for debugfs read/write + - LP: #1347088 + * mtd: pxa3xx_nand: make the driver work on big-endian systems + - LP: #1347088 + * hv: use correct order when freeing monitor_pages + - LP: #1347088 + * usb: qcserial: add Netgear AirCard 341U + - LP: #1347088 + * usb: qcserial: add additional Sierra Wireless QMI devices + - LP: #1347088 + * IB/umad: Fix error handling + - LP: #1347088 + * RDMA/cxgb4: Add missing padding at end of struct c4iw_create_cq_resp + - LP: #1347088 + * MIPS: KVM: Allocate at least 16KB for exception handlers + - LP: #1347088 + * block: virtio_blk: don't hold spin lock during world switch + - LP: #1347088 + * nfsd: getattr for FATTR4_WORD0_FILES_AVAIL needs the statfs buffer + - LP: #1347088 + * ASoC: tlv320aci3x: Fix custom snd_soc_dapm_put_volsw_aic3x() function + - LP: #1347088 + * UBIFS: Remove incorrect assertion in shrink_tnc() + - LP: #1347088 + * Bluetooth: Fix L2CAP deadlock + - LP: #1347088 + * vgaswitcheroo: switch the mux to the igp on power down when runpm is + enabled + - LP: #1347088 + * drm/radeon: fix typo in radeon_connector_is_dp12_capable() + - LP: #1347088 + * drm/radeon/dp: fix lane/clock setup for dp 1.2 capable devices + - LP: #1347088 + * drm/radeon/atom: fix dithering on certain panels + - LP: #1347088 + * drm/radeon: only apply hdmi bpc pll flags when encoder mode is hdmi + - LP: #1347088 + * ahci: Add Device ID for HighPoint RocketRaid 642L + - LP: #1347088 + * mm: fix sleeping function warning from __put_anon_vma + - LP: #1347088 + * hugetlb: restrict hugepage_migration_support() to x86_64 + - LP: #1347088 + * kthread: fix return value of kthread_create() upon SIGKILL. + - LP: #1347088 + * mm: vmscan: do not throttle based on pfmemalloc reserves if node has no + ZONE_NORMAL + - LP: #1347088 + * memcg: do not hang on OOM when killed by userspace OOM access to memory + reserves + - LP: #1347088 + * mm: page_alloc: use word-based accesses for get/set pageblock bitmaps + - LP: #1347088 + * mm/memory-failure.c-failure: send right signal code to correct thread + - LP: #1347088 + * mm/memory-failure.c: don't let collect_procs() skip over processes for + MF_ACTION_REQUIRED + - LP: #1347088 + * mm/memory-failure.c: support use of a dedicated thread to handle + SIGBUS(BUS_MCEERR_AO) + - LP: #1347088 + * powerpc/serial: Use saner flags when creating legacy ports + - LP: #1347088 + * ALSA: hda/realtek - Add support of ALC891 codec + - LP: #1347088 + * rbd: use reference counts for image requests + - LP: #1347088 + * iscsi-target: Reject mutual authentication with reflected CHAP_C + - LP: #1347088 + * powerpc/mm: Check paca psize is up to date for huge mappings + - LP: #1347088 + * IB/umad: Fix use-after-free on close + - LP: #1347088 + * mm: vmscan: clear kswapd's special reclaim powers before exiting + - LP: #1347088 + * rtc: rtc-at91rm9200: fix infinite wait for ACKUPD irq + - LP: #1347088 + * ptrace: fix fork event messages across pid namespaces + - LP: #1347088 + * idr: fix overflow bug during maximum ID calculation at maximum height + - LP: #1347088 + * Input: elantech - don't set bit 1 of reg_10 when the no_hw_res quirk is + set + - LP: #1347088 + * nfsd4: fix FREE_STATEID lockowner leak + - LP: #1347088 + * Btrfs: fix double free in find_lock_delalloc_range + - LP: #1347088 + * target: Set CMD_T_ACTIVE bit for Task Management Requests + - LP: #1347088 + * target: Use complete_all for se_cmd->t_transport_stop_comp + - LP: #1347088 + * iscsi-target: Fix ABORT_TASK + connection reset iscsi_queue_req memory + leak + - LP: #1347088 + * drm/nv50-/mc: fix kms pageflip events by reordering irq handling order. + - LP: #1347088 + * drm/nouveau/kms/nv04-nv40: fix pageflip events via special case. + - LP: #1347088 + * NFS: populate ->net in mount data when remounting + - LP: #1347088 + * watchdog: kempld-wdt: Use the correct value when configuring the + prescaler with the watchdog + - LP: #1347088 + * watchdog: ath79_wdt: avoid spurious restarts on AR934x + - LP: #1347088 + * watchdog: sp805: Set watchdog_device->timeout from ->set_timeout() + - LP: #1347088 + * fs,userns: Change inode_capable to capable_wrt_inode_uidgid + - LP: #1347088 + * powerpc: Add AT_HWCAP2 to indicate V.CRYPTO category support + - LP: #1347088 + * powerpc: Correct DSCR during TM context switch + - LP: #1347088 + * powerpc: Don't setup CPUs with bad status + - LP: #1347088 + * Target/iscsi: Fix sendtargets response pdu for iser transport + - LP: #1347088 + * target: Report correct response length for some commands + - LP: #1347088 + * dm thin: update discard_granularity to reflect the thin-pool blocksize + - LP: #1347088 + * ALSA: compress: Cancel the optimization of compiler and fix the size of + struct for all platform. + - LP: #1347088 + * hwmon: (ina2xx) Cast to s16 on shunt and current regs + - LP: #1347088 + * evm: prohibit userspace writing 'security.evm' HMAC value + - LP: #1347088 + * ALSA: hda - Add quirk for external mic on Lifebook U904 + - LP: #1328587, #1347088 + * ALSA: hda/realtek - Add more entry for enable HP mute led + - LP: #1347088 + * ALSA: hda/realtek - Add more entry for enable HP mute led + - LP: #1347088 + * staging: iio: tsl2x7x_core: fix proximity treshold + - LP: #1347088 + * iio: Fix endianness issue in ak8975_read_axis() + - LP: #1347088 + * rtmutex: Handle deadlock detection smarter + - LP: #1347088 + * rtmutex: Detect changes in the pi lock chain + - LP: #1347088 + * drm/i915: Disable FBC by default also on Haswell and later + - LP: #1347088 + * drm/i915: Avoid div-by-zero when pixel_multiplier is zero + - LP: #1347088 + * drm/i915: Reorder semaphore deadlock check + - LP: #1347088 + * iio: adc: at91: signedness bug in at91_adc_get_trigger_value_by_name() + - LP: #1347088 + * rtmutex: Plug slow unlock race + - LP: #1347088 + * ACPI / ia64 / sba_iommu: Restore the working initialization ordering + - LP: #1347088 + * epoll: fix use-after-free in eventpoll_release_file + - LP: #1347088 + * drm/nouveau/kms: reference vblank for crtc during pageflip. + - LP: #1347088 + * ARM: mvebu: DT: fix OpenBlocks AX3-4 RAM size + - LP: #1347088 + * USB: EHCI: avoid BIOS handover on the HASEE E200 + - LP: #1347088 + * arm64: Bug fix in stack alignment exception + - LP: #1347088 + * arm64: ptrace: change fs when passing kernel pointer to regset code + - LP: #1347088 + * arm64: uid16: fix __kernel_old_{gid,uid}_t definitions + - LP: #1347088 + * arm64: ptrace: fix empty registers set in prstatus of aarch32 process + core + - LP: #1347088 + * ALSA: control: Protect user controls against concurrent access + - LP: #1347088 + * ALSA: control: Fix replacing user controls + - LP: #1347088 + * ALSA: control: Don't access controls outside of protected regions + - LP: #1347088 + * ALSA: control: Handle numid overflow + - LP: #1347088 + * ALSA: control: Make sure that id->index does not overflow + - LP: #1347088 + * Bluetooth: Fix SSP acceptor just-works confirmation without MITM + - LP: #1347088 + * Bluetooth: Fix setting correct authentication information for SMP STK + - LP: #1347088 + * Bluetooth: Fix indicating discovery state when canceling inquiry + - LP: #1347088 + * Bluetooth: Fix locking of hdev when calling into SMP code + - LP: #1347088 + * Bluetooth: Allow change security level on ATT_CID in slave role + - LP: #1347088 + * rt2x00: disable TKIP on USB + - LP: #1347088 + * b43: fix frequency reported on G-PHY with /new/ firmware + - LP: #1347088 + * rt2x00: fix rfkill regression on rt2500pci + - LP: #1347088 + * blkcg: fix use-after-free in __blkg_release_rcu() by making blkcg_gq + refcnt an atomic_t + - LP: #1347088 + * rbd: handle parent_overlap on writes correctly + - LP: #1347088 + * ALSA: hda - hdmi: call overridden init on resume + - LP: #1347088 + * x86_32, entry: Do syscall exit work on badsys (CVE-2014-4508) + - LP: #1347088 + * hugetlb: fix copy_hugetlb_page_range() to handle migration/hwpoisoned + entry + - LP: #1347088 + * kernel/watchdog.c: remove preemption restrictions when restarting + lockup detector + - LP: #1347088 + * DMA, CMA: fix possible memory leak + - LP: #1347088 + * mm: fix crashes from mbind() merging vmas + - LP: #1347088 + * drm/i915: Hold the table lock whilst walking the file's idr and + counting the objects in debugfs + - LP: #1347088 + * [CIFS] fix mount failure with broken pathnames when smb3 mount with + mapchars option + - LP: #1347088 + * aio: fix aio request leak when events are reaped by userspace + - LP: #1347088 + * aio: fix kernel memory disclosure in io_getevents() introduced in v3.10 + - LP: #1347088 + * nfs: Fix cache_validity check in nfs_write_pageuptodate() + - LP: #1347088 + * powerpc: Don't skip ePAPR spin-table CPUs + - LP: #1347088 + * net: allwinner: emac: Add missing free_irq + - LP: #1347088 + * ALSA: usb-audio: Fix races at disconnection and PCM closing + - LP: #1347088 + * recordmcount/MIPS: Fix possible incorrect mcount_loc table entries in + modules + - LP: #1347088 + * MIPS: MSC: Prevent out-of-bounds writes to MIPS SC ioremap'd region + - LP: #1347088 + * ALSA: hda - restore BCLK M/N values when resuming HSW/BDW display + controller + - LP: #1347088 + * target: Fix left-over se_lun->lun_sep pointer OOPs + - LP: #1347088 + * iscsi-target: Explicily clear login response PDU in exception path + - LP: #1347088 + * efi-pstore: Fix an overflow on 32-bit builds + - LP: #1347088 + * lz4: fix another possible overrun + - LP: #1347088 + * iscsi-target: Avoid rejecting incorrect ITT for Data-Out + - LP: #1347088 + * iscsi-target: fix iscsit_del_np deadlock on unload + - LP: #1347088 + * Linux 3.13.11.5 + - LP: #1347088 + * powerpc/powernv: Fix endianness problems in EEH + - LP: #1340200 + * libahci: export ahci_qc_issue() and ahci_start_fix_rx() + - LP: #1335645 + * ahci_xgene: fix the dma state machine lockup for the IDENTIFY DEVICE + PIO mode command. + - LP: #1335645 + * fix build error in gpio-dwapb patch + - LP: #1348808 + * usb: Check if port status is equal to RxDetect + - LP: #1322409 + * net/mlx4_en: Protect MAC address modification with the state_lock mutex + - LP: #1347012 + * net/mlx4_en: Fix errors in MAC address changing when port is down + - LP: #1347012 + * bonding: Advertize vxlan offload features when supported + - LP: #1347012 + * net/mlx4_core: Fix the error flow when probing with invalid VF + configuration + - LP: #1347012 + * net/mlx4_en: Don't configure the HW vxlan parser when vxlan offloading + isn't set + - LP: #1347012 + * net/mlx4_core: Keep only one driver entry release mlx4_priv + - LP: #1347012 + * net/mlx4_core: Preserve pci_dev_data after __mlx4_remove_one() + - LP: #1347012 + * net/mlx4_core: Defer VF initialization till PF is fully initialized + - LP: #1347012 + * net/mlx4_core: Adjust port number in qp_attach wrapper when detaching + - LP: #1347012 + * net/mlx4_core: Fix slave id computation for single port VF + - LP: #1347012 + * net/mlx4_core: Load the Eth driver first + - LP: #1347012 + * net/mlx4_core: Don't issue PCIe speed/width checks for VFs + - LP: #1347012 + * net/mlx4_core: Add UPDATE_QP SRIOV wrapper support + - LP: #1347012 + * net/mlx4_core: Reset RoCE VF gids when guest driver goes down + - LP: #1347012 + * net/mlx4_en: Reduce memory consumption on kdump kernel + - LP: #1347012 + * net/mlx4_core: Use low memory profile on kdump kernel + - LP: #1347012 + * net/mlx4_en: current_mac isn't updated in port up + - LP: #1347012 + * net/mlx4_en: Disable blueflame using ethtool private flags + - LP: #1347012 + * net/mlx4_en: Fix mac_hash database inconsistency + - LP: #1347012 + * ext4: handle symlink properly with inline_data + - LP: #1349020 + * net/mlx4_en: cq->irq_desc wasn't set in legacy EQ's + - LP: #1354242 + * rtl8821ae: fixup staging driver for revised + ieee80211_is_robust_mgmt_frame + - LP: #1354469 + * ahci_xgene: Fix the watermark threshold for the APM X-Gene SATA host + controller driver. + - LP: #1350087 + * ahci_xgene: Use correct OOB tunning parameters for APM X-Gene SoC AHCI + SATA Host controller driver. + - LP: #1350087 + * sunrpc: create a new dummy pipe for gssd to hold open + - LP: #1327563 + * sunrpc: replace sunrpc_net->gssd_running flag with a more reliable + check + - LP: #1327563 + * nfs: check if gssd is running before attempting to use krb5i auth in + SETCLIENTID call + - LP: #1327563 + * ACPI / PAD: call schedule() when need_resched() is true + - LP: #1356913 + * KVM: ioapic: fix assignment of ioapic->rtc_status.pending_eoi + (CVE-2014-0155) + - LP: #1356913 + * target: Explicitly clear ramdisk_mcp backend pages + - LP: #1356913 + * sctp: Fix sk_ack_backlog wrap-around problem + - LP: #1356913 + * mm: hugetlb: fix copy_hugetlb_page_range() + - LP: #1356913 + * x86_32, entry: Store badsys error code in %eax + - LP: #1356913 + * shmem: fix faulting into a hole while it's punched + - LP: #1356913 + * shmem: fix faulting into a hole, not taking i_mutex + - LP: #1356913 + * shmem: fix splicing from a hole while it's punched + - LP: #1356913 + * ipvs: Fix panic due to non-linear skb + - LP: #1356913 + * ALSA: hda - verify pin:converter connection on unsol event for HSW and + VLV + - LP: #1356913 + * ALSA: hda - verify pin:cvt connection on preparing a stream for Intel + HDMI codec + - LP: #1356913 + * x86/xen: safely map and unmap grant frames when in atomic context + - LP: #1356913 + * ext4: Fix buffer double free in ext4_alloc_branch() + - LP: #1356913 + * ARM: OMAP2+: Fix parser-bug in platform muxing code + - LP: #1356913 + * KVM: x86: Increase the number of fixed MTRR regs to 10 + - LP: #1356913 + * KVM: x86: preserve the high 32-bits of the PAT register + - LP: #1356913 + * usb: musb: ux500: don't propagate the OF node + - LP: #1356913 + * usb: gadget: f_fs: fix NULL pointer dereference when there are no + strings + - LP: #1356913 + * staging: iio/ad7291: fix error code in ad7291_probe() + - LP: #1356913 + * iio: of_iio_channel_get_by_name() returns non-null pointers for error + legs + - LP: #1356913 + * irqchip: spear_shirq: Fix interrupt offset + - LP: #1356913 + * USB: option: add device ID for SpeedUp SU9800 usb 3g modem + - LP: #1356913 + * USB: ftdi_sio: fix null deref at port probe + - LP: #1356913 + * usb: option: add/modify Olivetti Olicard modems + - LP: #1356913 + * scsi_error: fix invalid setting of host byte + - LP: #1356913 + * xhci: Use correct SLOT ID when handling a reset device command + - LP: #1356913 + * xhci: correct burst count field for isoc transfers on 1.0 xhci hosts + - LP: #1356913 + * xhci: clear root port wake on bits if controller isn't wake-up capable + - LP: #1356913 + * xhci: Fix runtime suspended xhci from blocking system suspend. + - LP: #1356913 + * ibmvscsi: Abort init sequence during error recovery + - LP: #1356913 + * ibmvscsi: Add memory barriers for send / receive + - LP: #1356913 + * virtio-scsi: avoid cancelling uninitialized work items + - LP: #1356913 + * virtio-scsi: fix various bad behavior on aborted requests + - LP: #1356913 + * MIPS: KVM: Fix memory leak on VCPU + - LP: #1356913 + * ext4: Fix hole punching for files with indirect blocks + - LP: #1356913 + * usb: musb: Fix panic upon musb_am335x module removal + - LP: #1356913 + * usb: musb: Ensure that cppi41 timer gets armed on premature DMA TX irq + - LP: #1356913 + * nfsd: fix rare symlink decoding bug + - LP: #1356913 + * tools: ffs-test: fix header values endianess + - LP: #1356913 + * usb-storage/SCSI: Add broken_fua blacklist flag + - LP: #1356913 + * drm/radeon/dpm: fix typo in vddci setup for eg/btc + - LP: #1356913 + * drm/radeon/dpm: fix vddci setup typo on cayman + - LP: #1356913 + * tracing: Remove ftrace_stop/start() from reading the trace file + - LP: #1356913 + * usb: chipidea: udc: delete td from req's td list at ep_dequeue + - LP: #1356913 + * drm/radeon/cik: fix typo in EOP packet + - LP: #1356913 + * md: flush writes before starting a recovery. + - LP: #1356913 + * drm/vmwgfx: Fix incorrect write to read-only register v2: + - LP: #1356913 + * mm: page_alloc: fix CMA area initialisation when pageblock > MAX_ORDER + - LP: #1356913 + * /proc/stat: convert to single_open_size() + - LP: #1356913 + * nick kvfree() from apparmor + - LP: #1356913 + * fs/seq_file: fallback to vmalloc allocation + - LP: #1356913 + * lz4: add overrun checks to lz4_uncompress_unknownoutputsize() + - LP: #1356913 + * arm64: mm: Make icache synchronisation logic huge page aware + - LP: #1356913 + * workqueue: fix dev_set_uevent_suppress() imbalance + - LP: #1356913 + * cpuset,mempolicy: fix sleeping function called from invalid context + - LP: #1356913 + * crypto: sha512_ssse3 - fix byte count to bit count conversion + - LP: #1356913 + * thermal: hwmon: Make the check for critical temp valid consistent + - LP: #1356913 + * clk: s2mps11: Fix double free corruption during driver unbind + - LP: #1356913 + * hwmon: (amc6821) Fix permissions for temp2_input + - LP: #1356913 + * hwmon: (adm1029) Ensure the fan_div cache is updated in set_fan_div + - LP: #1356913 + * hwmon: (adm1021) Fix cache problem when writing temperature limits + - LP: #1356913 + * ext4: fix unjournalled bg descriptor while initializing inode bitmap + - LP: #1356913 + * ext4: clarify error count warning messages + - LP: #1356913 + * ext4: clarify ext4_error message in ext4_mb_generate_buddy_error() + - LP: #1356913 + * ext4: disable synchronous transaction batching if max_batch_time==0 + - LP: #1356913 + * intel_pstate: Fix setting VID + - LP: #1356913 + * intel_pstate: don't touch turbo bit if turbo disabled or unavailable. + - LP: #1356913 + * intel_pstate: Set CPU number before accessing MSRs + - LP: #1356913 + * USB: cp210x: add support for Corsair usb dongle + - LP: #1356913 + * usb: option: Add ID for Telewell TW-LTE 4G v2 + - LP: #1356913 + * ACPI / EC: Avoid race condition related to advance_transaction() + - LP: #1356913 + * ACPI / EC: Add asynchronous command byte write support + - LP: #1356913 + * ACPI / EC: Remove duplicated ec_wait_ibf0() waiter + - LP: #1356913 + * ACPI / EC: Fix race condition in ec_transaction_completed() + - LP: #1356913 + * ACPI / battery: Retry to get battery information if failed during + probing + - LP: #1356913 + * hwmon: (adm1031) Fix writes to limit registers + - LP: #1356913 + * workqueue: zero cpumask of wq_numa_possible_cpumask on init + - LP: #1356913 + * hwmon: (emc2103) Clamp limits instead of bailing out + - LP: #1356913 + * arm64: implement TASK_SIZE_OF + - LP: #1356913 + * iio: ti_am335x_adc: Fix: Use same step id at FIFOs both ends + - LP: #1356913 + * cpufreq: Makefile: fix compilation for davinci platform + - LP: #1356913 + * drm/i915: Don't clobber the GTT when it's within stolen memory + - LP: #1356913 + * Drivers: hv: vmbus: Fix a bug in the channel callback dispatch code + - LP: #1356913 + * USB: ftdi_sio: Add extra PID. + - LP: #1356913 + * crypto: caam - fix memleak in caam_jr module + - LP: #1356913 + * dm: allocate a special workqueue for deferred device removal + - LP: #1356913 + * dm io: fix a race condition in the wake up code for sync_io + - LP: #1356913 + * drm/radeon/dp: return -EIO for flags not zero case + - LP: #1356913 + * drm/radeon: fix typo in golden register setup on evergreen + - LP: #1356913 + * drm/radeon: fix typo in ci_stop_dpm() + - LP: #1356913 + * drm/radeon/dpm: Reenabling SS on Cayman + - LP: #1356913 + * powerpc/perf: Add PPMU_ARCH_207S define + - LP: #1356913 + * powerpc/perf: Clear MMCR2 when enabling PMU + - LP: #1356913 + * powerpc/perf: Never program book3s PMCs with values >= 0x80000000 + - LP: #1356913 + * USB: serial: ftdi_sio: Add Infineon Triboard + - LP: #1356913 + * phy: core: Fix error path in phy_create() + - LP: #1356913 + * ext4: fix a potential deadlock in __ext4_es_shrink() + - LP: #1356913 + * parisc: add serial ports of C8000/1GHz machine to hardware database + - LP: #1356913 + * parisc: fix fanotify_mark() syscall on 32bit compat kernel + - LP: #1356913 + * parisc: drop unused defines and header includes + - LP: #1356913 + * clk: spear3xx: Use proper control register offset + - LP: #1356913 + * Bluetooth: Ignore H5 non-link packets in non-active state + - LP: #1356913 + * iwlwifi: update the 7265 series HW IDs + - LP: #1356913 + * mwifiex: fix Tx timeout issue + - LP: #1356913 + * x86, tsc: Fix cpufreq lockup + - LP: #1356913 + * perf/x86/intel: ignore CondChgd bit to avoid false NMI handling + - LP: #1356913 + * perf: Do not allow optimized switch for non-cloned events + - LP: #1356913 + * xen/manage: fix potential deadlock when resuming the console + - LP: #1356913 + * iwlwifi: dvm: don't enable CTS to self + - LP: #1356913 + * iwlwifi: mvm: disable CTS to Self + - LP: #1356913 + * xen/balloon: set ballooned out pages as invalid in p2m + - LP: #1356913 + * mtd: devices: elm: fix elm_context_save() and elm_context_restore() + functions + - LP: #1356913 + * fuse: timeout comparison fix + - LP: #1356913 + * fuse: ignore entry-timeout on LOOKUP_REVAL + - LP: #1356913 + * fuse: handle large user and group ID + - LP: #1356913 + * alarmtimer: Fix bug where relative alarm timers were treated as + absolute + - LP: #1356913 + * irqchip: gic: Add support for cortex a7 compatible string + - LP: #1356913 + * net: mvneta: fix operation in 10 Mbit/s mode + - LP: #1356913 + * net: mvneta: Fix big endian issue in mvneta_txq_desc_csum() + - LP: #1356913 + * igb: Workaround for i210 Errata 25: Slow System Clock + - LP: #1356913 + * x86/efi: Include a .bss section within the PE/COFF headers + - LP: #1356913 + * igb: do a reset on SR-IOV re-init if device is down + - LP: #1356913 + * iio:core: Handle error when mask type is not separate + - LP: #1356913 + * of/irq: do irq resolution in platform_get_irq_byname() + - LP: #1356913 + * platform_get_irq: Revert to platform_get_resource if of_irq_get fails + - LP: #1356913 + * aio: protect reqs_available updates from changes in interrupt handlers + - LP: #1356913 + * hwmon: (da9052) Don't use dash in the name attribute + - LP: #1356913 + * hwmon: (da9055) Don't use dash in the name attribute + - LP: #1356913 + * PM / sleep: Fix request_firmware() error at resume + - LP: #1356913 + * ALSA: hda - Fix broken PM due to incomplete i915 initialization + - LP: #1356913 + * tracing: Add ftrace_trace_stack into __trace_puts/__trace_bputs + - LP: #1356913 + * tracing: Fix graph tracer with stack tracer on other archs + - LP: #1356913 + * tracing: Add TRACE_ITER_PRINTK flag check in __trace_puts/__trace_bputs + - LP: #1356913 + * dm thin metadata: do not allow the data block size to change + - LP: #1356913 + * dm cache metadata: do not allow the data block size to change + - LP: #1356913 + * quota: missing lock in dqcache_shrink_scan() + - LP: #1356913 + * ring-buffer: Fix polling on trace_pipe + - LP: #1356913 + * sched: Fix possible divide by zero in avg_atom() calculation + - LP: #1356913 + * locking/mutex: Disable optimistic spinning on some architectures + - LP: #1356913 + * drm/qxl: return IRQ_NONE if it was not our irq + - LP: #1356913 + * hwmon: (adt7470) Fix writes to temperature limit registers + - LP: #1356913 + * cpufreq: move policy kobj to policy->cpu at resume + - LP: #1356913 + * drm/radeon: avoid leaking edid data + - LP: #1356913 + * drm/radeon: set default bl level to something reasonable + - LP: #1356913 + * usb: chipidea: udc: Disable auto ZLP generation on ep0 + - LP: #1356913 + * irqchip: gic: Fix core ID calculation when topology is read from DT + - LP: #1356913 + * slab_common: fix the check for duplicate slab names + - LP: #1356913 + * xtensa: add fixup for double exception raised in window overflow + - LP: #1356913 + * [media] media: v4l2-core: v4l2-dv-timings.c: Cleaning up code wrong + value used in aspect ratio + - LP: #1356913 + * [media] hdpvr: fix two audio bugs + - LP: #1356913 + * block: don't assume last put of shared tags is for the host + - LP: #1356913 + * blkcg: don't call into policy draining if root_blkg is already gone + - LP: #1356913 + * block: provide compat ioctl for BLKZEROOUT + - LP: #1356913 + * libata: support the ata host which implements a queue depth less than + 32 + - LP: #1356913 + * [media] tda10071: force modulation to QPSK on DVB-S + - LP: #1356913 + * [media] gspca_pac7302: Add new usb-id for Genius i-Look 317 + - LP: #1356913 + * s390/ptrace: fix PSW mask check + - LP: #1356913 + * ahci: add support for the Promise FastTrak TX8660 SATA HBA (ahci mode) + - LP: #1356913 + * Input: fix defuzzing logic + - LP: #1356913 + * tracing: Fix wraparound problems in "uptime" trace clock + - LP: #1356913 + * drm/i915: Reorder the semaphore deadlock check, again + - LP: #1356913 + * libata: introduce ata_host->n_tags to avoid oops on SAS controllers + - LP: #1356913 + * drm/radeon: fix irq ring buffer overflow handling + - LP: #1356913 + * coredump: fix the setting of PF_DUMPCORE + - LP: #1356913 + * fs: umount on symlink leaks mnt count + - LP: #1356913 + * hwmon: (smsc47m192) Fix temperature limit and vrm write operations + - LP: #1356913 + * parisc: Remove SA_RESTORER define + - LP: #1356913 + * drm/radeon: fix cut and paste issue for hawaii. + - LP: #1356913 + * parport: fix menu breakage + - LP: #1356913 + * Fix gcc-4.9.0 miscompilation of load_balance() in scheduler + - LP: #1356913 + * scsi: handle flush errors properly + - LP: #1356913 + * cfg80211: fix mic_failure tracing + - LP: #1356913 + * iio: buffer: Fix demux table creation + - LP: #1356913 + * iio:bma180: Fix scale factors to report correct acceleration units + - LP: #1356913 + * iio:bma180: Missing check for frequency fractional part + - LP: #1356913 + * powerpc/perf: Fix MMCR2 handling for EBB + - LP: #1356913 + * ath9k: fix aggregation session lockup + - LP: #1356913 + * sched_clock: Avoid corrupting hrtimer tree during suspend + - LP: #1356913 + * staging: vt6655: Fix Warning on boot handle_irq_event_percpu. + - LP: #1356913 + * staging: vt6655: Fix disassociated messages every 10 seconds + - LP: #1356913 + * can: c_can_platform: Fix raminit, use devm_ioremap() instead of + devm_ioremap_resource() + - LP: #1356913 + * crypto: arm-aes - fix encryption of unaligned data + - LP: #1356913 + * ARM: fix alignment of keystone page table fixup + - LP: #1356913 + * net: sendmsg: fix NULL pointer dereference + - LP: #1356913 + * mm/page-writeback.c: fix divide by zero in bdi_dirty_limits() + - LP: #1356913 + * mm, thp: do not allow thp faults to avoid cpuset restrictions + - LP: #1356913 + * rapidio/tsi721_dma: fix failure to obtain transaction descriptor + - LP: #1356913 + * memcg: oom_notify use-after-free fix + - LP: #1356913 + * crypto: af_alg - properly label AF_ALG socket + - LP: #1356913 + * printk: rename printk_sched to printk_deferred + - LP: #1356913 + * timer: Fix lock inversion between hrtimer_bases.lock and scheduler + locks + - LP: #1356913 + * dm bufio: fully initialize shrinker + - LP: #1356913 + * dm cache: fix race affecting dirty block count + - LP: #1356913 + * qlcnic: info leak in qlcnic_dcb_peer_app_info() + - LP: #1356913 + * netlink: rate-limit leftover bytes warning and print process name + - LP: #1356913 + * bridge: Prevent insertion of FDB entry with disallowed vlan + - LP: #1356913 + * net: tunnels - enable module autoloading + - LP: #1356913 + * net: fix inet_getid() and ipv6_select_ident() bugs + - LP: #1356913 + * team: fix mtu setting + - LP: #1356913 + * tcp: fix cwnd undo on DSACK in F-RTO + - LP: #1356913 + * sh_eth: use RNC mode for packet reception + - LP: #1356913 + * sh_eth: fix SH7619/771x support + - LP: #1356913 + * net: filter: fix typo in sparc BPF JIT + - LP: #1356913 + * net: filter: fix sparc32 typo + - LP: #1356913 + * net: qmi_wwan: add Olivetti Olicard modems + - LP: #1356913 + * net: force a list_del() in unregister_netdevice_many() + - LP: #1356913 + * ipip, sit: fix ipv4_{update_pmtu,redirect} calls + - LP: #1356913 + * sfc: PIO:Restrict to 64bit arch and use 64-bit writes. + - LP: #1356913 + * ipv4: fix a race in ip4_datagram_release_cb() + - LP: #1356913 + * rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 + - LP: #1356913 + * vxlan: use dev->needed_headroom instead of dev->hard_header_len + - LP: #1356913 + * udp: ipv4: do not waste time in __udp4_lib_mcast_demux_lookup + - LP: #1356913 + * ip_tunnel: fix ip_tunnel_lookup + - LP: #1356913 + * slip: Fix deadlock in write_wakeup + - LP: #1356913 + * slcan: Port write_wakeup deadlock fix from slip + - LP: #1356913 + * net: sctp: propagate sysctl errors from proc_do* properly + - LP: #1356913 + * tcp: fix tcp_match_skb_to_sack() for unaligned SACK at end of an skb + - LP: #1356913 + * net: sctp: check proc_dointvec result in proc_sctp_do_auth + - LP: #1356913 + * 8021q: fix a potential memory leak + - LP: #1356913 + * net: huawei_cdc_ncm: increase command buffer size + - LP: #1356913 + * ipv4: fix dst race in sk_dst_get() + - LP: #1356913 + * ipv4: irq safe sk_dst_[re]set() and ipv4_sk_update_pmtu() fix + - LP: #1356913 + * net: fix sparse warning in sk_dst_set() + - LP: #1356913 + * vlan: free percpu stats in device destructor + - LP: #1356913 + * bnx2x: fix possible panic under memory stress + - LP: #1356913 + * tcp: Fix divide by zero when pushing during tcp-repair + - LP: #1356913 + * ipv4: icmp: Fix pMTU handling for rare case + - LP: #1356913 + * net: qmi_wwan: Add ID for Telewell TW-LTE 4G v2 + - LP: #1356913 + * net: qmi_wwan: add two Sierra Wireless/Netgear devices + - LP: #1356913 + * net: Fix NETDEV_CHANGE notifier usage causing spurious arp flush + - LP: #1356913 + * igmp: fix the problem when mc leave group + - LP: #1356913 + * tcp: fix false undo corner cases + - LP: #1356913 + * appletalk: Fix socket referencing in skb + - LP: #1356913 + * netlink: Fix handling of error from netlink_dump(). + - LP: #1356913 + * be2net: set EQ DB clear-intr bit in be_open() + - LP: #1356913 + * tipc: clear 'next'-pointer of message fragments before reassembly + - LP: #1356913 + * net: sctp: fix information leaks in ulpevent layer + - LP: #1356913 + * net: pppoe: use correct channel MTU when using Multilink PPP + - LP: #1356913 + * sunvnet: clean up objects created in vnet_new() on vnet_exit() + - LP: #1356913 + * net: huawei_cdc_ncm: add "subclass 3" devices + - LP: #1356913 + * dns_resolver: assure that dns_query() result is null-terminated + - LP: #1356913 + * dns_resolver: Null-terminate the right string + - LP: #1356913 + * ipv4: fix buffer overflow in ip_options_compile() + - LP: #1356913 + * x86/xen: no need to explicitly register an NMI callback + - LP: #1356913 + * Linux 3.13.11.6 + - LP: #1356913 + + -- Joseph Salisbury Thu, 14 Aug 2014 17:12:19 -0400 + +linux (3.13.0-34.60) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1356396 + + [ Upstream Kernel Changes ] + + * mnt: Only change user settable mount flags in remount + - CVE-2014-5206 + * mnt: Move the test for MNT_LOCK_READONLY from change_mount_flags into + do_remount + - CVE-2014-5206 + * mnt: Correct permission checks in do_remount + - CVE-2014-5207 + * mnt: Change the default remount atime from relatime to the existing + value + - CVE-2014-5207 + + -- Brad Figg Tue, 29 Jul 2014 08:25:07 -0700 + +linux (3.13.0-33.58) trusty; urgency=low + + [ Brad Figg ] + + * Release Tracking Bug + - LP: #1349897 + + [ Upstream Kernel Changes ] + + * mm: numa: do not automatically migrate KSM pages + - LP: #1346917 + * net: fix UDP tunnel GSO of frag_list GRO packets + - LP: #1331219 + * auditsc: audit_krule mask accesses need bounds checking + - LP: #1347088 + * n_tty: Fix buffer overruns with larger-than-4k pastes + - LP: #1208740 + + -- Tim Gardner Fri, 18 Jul 2014 14:57:50 +0000 + +linux (3.13.0-32.57) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * l2tp: Privilege escalation in ppp over l2tp sockets + - LP: #1341472 + - CVE-2014-4943 + + -- Luis Henriques Mon, 14 Jul 2014 10:03:44 +0100 + +linux (3.13.0-32.56) trusty; urgency=low + + [ Luis Henriques ] + + * Merged back Ubuntu-3.13.0-30.55 security release + * Revert "x86_64,ptrace: Enforce RIP <= TASK_SIZE_MAX (CVE-2014-4699)" + - LP: #1337339 + * Release Tracking Bug + - LP: #1338524 + + [ Upstream Kernel Changes ] + + * ptrace,x86: force IRET path after a ptrace_stop() + - LP: #1337339 + - CVE-2014-4699 + * hpsa: add new Smart Array PCI IDs (May 2014) + - LP: #1337516 + + -- Luis Henriques Mon, 07 Jul 2014 11:38:37 +0100 + +linux (3.13.0-31.55) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1336278 + + [ Andy Whitcroft ] + + * [Config] switch hyper-keyboard to virtual + - LP: #1325306 + * [Packaging] linux-udeb-flavour -- standardise on linux prefix + + [ dann frazier ] + + * [Config] CONFIG_GPIO_DWAPB=m + - LP: #1334823 + + [ Feng Kan ] + + * SAUCE: (no-up) arm64: dts: Add Designware GPIO dts binding to APM + X-Gene platform + - LP: #1334823 + + [ John Johansen ] + + * SAUCE: (no-up) apparmor: fix apparmor spams log with warning message + - LP: #1308761 + + [ Kamal Mostafa ] + + * [Config] updateconfigs ACPI_PROCFS_POWER=y after v3.13.11.4 rebase + + [ Loc Ho ] + + * SAUCE: (no-up) phy-xgene: Use correct tuning for Mustang + - LP: #1335636 + + [ Michael Ellerman ] + + * SAUCE: (no-up) powerpc/perf: Ensure all EBB register state is cleared + on fork() + - LP: #1328914 + + [ Ming Lei ] + + * Revert "SAUCE: (no-up) rtc: Add X-Gene SoC Real Time Clock Driver" + - LP: #1274305 + + [ Suman Tripathi ] + + * SAUCE: (no-up) libahci: Implement the function ahci_restart_engine to + restart the port dma engine. + - LP: #1335645 + * SAUCE: (no-up) ata: Fix the dma state machine lockup for the IDENTIFY + DEVICE PIO mode command. + - LP: #1335645 + + [ Tim Gardner ] + + * [Config] CONFIG_POWERNV_CPUFREQ=y for powerpc, ppc64el + - LP: #1324571 + * [Debian] Add UTS_UBUNTU_RELEASE_ABI to utsrelease.h + - LP: #1327619 + * [Config] CONFIG_HAVE_MEMORYLESS_NODES=y + - LP: #1332063 + * [Config] CONFIG_HID_RMI=m + - LP: #1305522 + + [ Upstream Kernel Changes ] + + * Revert "offb: Add palette hack for little endian" + - LP: #1333430 + * Revert "net: mvneta: fix usage as a module on RGMII configurations" + - LP: #1333837 + * Revert "USB: serial: add usbid for dell wwan card to sierra.c" + - LP: #1333837 + * Revert "macvlan : fix checksums error when we are in bridge mode" + - LP: #1333838 + * serial: uart: add hw flow control support configuration + - LP: #1328295 + * mm/numa: Remove BUG_ON() in __handle_mm_fault() + - LP: #1323165 + * Tools: hv: Handle the case when the target file exists correctly + - LP: #1306215 + * Documentation/devicetree/bindings: add documentation for the APM X-Gene + SoC RTC DTS binding + - LP: #1274305 + * drivers/rtc: add APM X-Gene SoC RTC driver + - LP: #1274305 + * arm64: add APM X-Gene SoC RTC DTS entry + - LP: #1274305 + * powerpc/perf: Add Power8 cache & TLB events + - LP: #1328914 + * powerpc/perf: Configure BHRB filter before enabling PMU interrupts + - LP: #1328914 + * powerpc/perf: Define perf_event_print_debug() to print PMU register + values + - LP: #1328914 + * powerpc: Add a cpu feature CPU_FTR_PMAO_BUG + - LP: #1328914 + * powerpc/perf: Add lost exception workaround + - LP: #1328914 + * powerpc/perf: Reject EBB events which specify a sample_type + - LP: #1328914 + * powerpc/perf: Clean up the EBB hash defines a little + - LP: #1328914 + * powerpc/perf: Avoid mutating event in power8_get_constraint() + - LP: #1328914 + * powerpc/perf: Add BHRB constraint and IFM MMCRA handling for EBB + - LP: #1328914 + * powerpc/perf: Enable BHRB access for EBB events + - LP: #1328914 + * powerpc/perf: Fix handling of L3 events with bank == 1 + - LP: #1328914 + * Bluetooth: Add support for Intel Bluetooth device [8087:0a2a] + - LP: #1329184 + * iwlwifi: mvm: disable beacon filtering + - LP: #1293569 + * SUNRPC: Ensure that call_connect times out correctly + - LP: #1322407 + * SUNRPC: Ensure call_connect_status() deals correctly with SOFTCONN + tasks + - LP: #1322407 + * bitops: Fix signedness of compile-time hweight implementations + - LP: #1321791 + * cpumask.h: silence warning with -Wsign-compare + - LP: #1321791 + * fbdev/fb.h: silence warning with -Wsign-compare + - LP: #1321791 + * rtlwifi: make MSI support a module parameter + - LP: #1296591 + * rtlwifi: rtl8188ee: add msi module parameter + - LP: #1296591 + * rtlwifi: rtl8723be: add msi module parameter + - LP: #1296591 + * net: avoid dependency of net_get_random_once on nop patching + - LP: #1330671 + * x86-64, modify_ldt: Make support for 16-bit segments a runtime option + - LP: #1328965 + * ALSA: usb-audio: Prevent printk ratelimiting from spamming kernel log + while DEBUG not defined + - LP: #1319457 + * btrfs: fix defrag 32-bit integer overflow + - LP: #1324953 + * dell-laptop: Only install the i8042 filter when rfkill is active + - LP: #1289238 + * kthread: ensure locality of task_struct allocations + - LP: #1332063 + * slub: search partial list on numa_mem_id(), instead of numa_node_id() + - LP: #1332063 + * powerpc/numa: Enable USE_PERCPU_NUMA_NODE_ID + - LP: #1332063 + * powerpc/numa: Enable CONFIG_HAVE_MEMORYLESS_NODES + - LP: #1332063 + * drm/i915: Allow user modes to exceed DVI 165MHz limit + - LP: #1332220 + * HID: rmi: introduce RMI driver for Synaptics touchpads + - LP: #1305522 + * HID: rmi: do not stop the device at the end of probe + - LP: #1305522 + * HID: rmi: check for the existence of some optional queries before + reading query 12 + - LP: #1305522 + * HID: rmi: do not fetch more than 16 bytes in a query + - LP: #1305522 + * HID: rmi: fix wrong struct field name + - LP: #1305522 + * HID: rmi: fix masks for x and w_x data + - LP: #1305522 + * HID: rmi: do not handle touchscreens through hid-rmi + - LP: #1305522 + * ipv6: Fix regression caused by efe4208 in udp_v6_mcast_next() + - LP: #1332420 + * HID: core: do not scan constant input report + - LP: #1333837 + * drm/radeon: fix audio pin counts for DCE6+ (v2) + - LP: #1333837 + * mac80211: fix software remain-on-channel implementation + - LP: #1333837 + * mac80211: exclude AP_VLAN interfaces from tx power calculation + - LP: #1333837 + * iwlwifi: add new 7265 HW IDs + - LP: #1333837 + * parisc: fix epoll_pwait syscall on compat kernel + - LP: #1333837 + * iwlwifi: add MODULE_FIRMWARE for 7265 + - LP: #1333837 + * dma: edma: fix incorrect SG list handling + - LP: #1333837 + * ALSA: hda/realtek - Add support of ALC288 codec + - LP: #1333837 + * xen/spinlock: Don't enable them unconditionally. + - LP: #1333837 + * tick-common: Fix wrong check in tick_check_replacement() + - LP: #1333837 + * tick-sched: Check tick_nohz_enabled in tick_nohz_switch_to_nohz() + - LP: #1333837 + * ALSA: hda - add headset mic detect quirk for a Dell laptop + - LP: #1297581, #1333837 + * ALSA: hda/realtek - Add headset Mic support for Dell machine + - LP: #1333837 + * staging: r8188eu: Calling rtw_get_stainfo() with a NULL sta_addr will + return NULL + - LP: #1333837 + * cifs: Wait for writebacks to complete before attempting write. + - LP: #1333837 + * mlx4_en: don't use napi_synchronize inside mlx4_en_netpoll + - LP: #1333837 + * mei: ignore client writing state during cb completion + - LP: #1333837 + * staging: r8712u: Fix case where ethtype was never obtained and always + be checked against 0 + - LP: #1333837 + * staging: r8188eu: Fix case where ethtype was never obtained and always + be checked against 0 + - LP: #1333837 + * USB: serial: ftdi_sio: add id for Brainboxes serial cards + - LP: #1333837 + * usb: option driver, add support for Telit UE910v2 + - LP: #1333837 + * USB: cp210x: Add 8281 (Nanotec Plug & Drive) + - LP: #1333837 + * USB: pl2303: add ids for Hewlett-Packard HP POS pole displays + - LP: #1333837 + * USB: usb_wwan: fix handling of missing bulk endpoints + - LP: #1333837 + * USB: fix crash during hotplug of PCI USB controller card + - LP: #1333837 + * USB: cdc-acm: Remove Motorola/Telit H24 serial interfaces from ACM + driver + - LP: #1333837 + * Drivers: hv: vmbus: Negotiate version 3.0 when running on ws2012r2 + hosts + - LP: #1333837 + * serial: omap: Fix missing pm_runtime_resume handling by simplifying + code + - LP: #1333837 + * drm/radeon: disable mclk dpm on R7 260X + - LP: #1333837 + * drm/radeon: fix runpm handling on APUs (v4) + - LP: #1333837 + * drm/radeon: add support for newer mc ucode on SI (v2) + - LP: #1333837 + * drm/radeon: add support for newer mc ucode on CI (v2) + - LP: #1333837 + * drm/radeon: re-enable mclk dpm on R7 260X asics + - LP: #1333837 + * drm/radeon: memory leak on bo reservation failure. v2 + - LP: #1333837 + * drm/radeon/si: make sure mc ucode is loaded before checking the size + - LP: #1333837 + * drm/radeon/ci: make sure mc ucode is loaded before checking the size + - LP: #1333837 + * init/Kconfig: move the trusted keyring config option to general setup + - LP: #1333837 + * mm/hugetlb.c: add cond_resched_lock() in return_unused_surplus_pages() + - LP: #1333837 + * thp: close race between split and zap huge pages + - LP: #1333837 + * coredump: fix va_list corruption + - LP: #1333837 + * powerpc/tm: Disable IRQ in tm_recheckpoint + - LP: #1333837 + * ACPI / EC: Process rather than discard events in acpi_ec_clear + - LP: #1333837 + * ath9k: Fix sequence number assignment for non-data frames + - LP: #1333837 + * xhci: Switch Intel Lynx Point ports to EHCI on shutdown. + - LP: #1333837 + * iio: adc: at91_adc: Repair broken platform_data support + - LP: #1333837 + * iio: querying buffer scan_mask should return 0/1 + - LP: #1333837 + * iio: cm36651: Fix i2c client leak and possible NULL pointer dereference + - LP: #1333837 + * libata: Update queued trim blacklist for M5x0 drives + - LP: #1333837 + * pata_at91: fix ata_host_activate() failure handling + - LP: #1333837 + * ext4: avoid possible overflow in ext4_map_blocks() + - LP: #1333837 + * ext4: FIBMAP ioctl causes BUG_ON due to handle EXT_MAX_BLOCKS + - LP: #1333837 + * ext4: note the error in ext4_end_bio() + - LP: #1333837 + * ext4: fix jbd2 warning under heavy xattr load + - LP: #1333837 + * ext4: move ext4_update_i_disksize() into mpage_map_and_submit_extent() + - LP: #1333837 + * ext4: use i_size_read in ext4_unaligned_aio() + - LP: #1333837 + * locks: allow __break_lease to sleep even when break_time is 0 + - LP: #1333837 + * usb: gadget: zero: Fix SuperSpeed enumeration for alternate setting 1 + - LP: #1333837 + * ahci: do not request irq for dummy port + - LP: #1333837 + * genirq: Allow forcing cpu affinity of interrupts + - LP: #1333837 + * irqchip: Gic: Support forced affinity setting + - LP: #1333837 + * clocksource: Exynos_mct: Use irq_force_affinity() in cpu bringup + - LP: #1333837 + * clocksource: Exynos_mct: Register clock event after request_irq() + - LP: #1333837 + * nfsd: set timeparms.to_maxval in setup_callback_client + - LP: #1333837 + * ahci: Do not receive interrupts sent by dummy ports + - LP: #1333837 + * libata/ahci: accommodate tag ordered controllers + - LP: #1333837 + * drm/radeon: disable dpm on rv770 by default + - LP: #1333837 + * Input: synaptics - add min/max quirk for ThinkPad T431s, L440, L540, S1 + Yoga and X1 + - LP: #1333837 + * drm/radeon: fix count in cik_sdma_ring_test() + - LP: #1333837 + * drm/radeon: properly unregister hwmon interface (v2) + - LP: #1333837 + * drm/radeon/pm: don't walk the crtc list before it has been initialized + (v2) + - LP: #1333837 + * drm/radeon: fix ATPX detection on non-VGA GPUs + - LP: #1333837 + * drm/radeon: don't allow runpm=1 on systems with out ATPX + - LP: #1333837 + * mm: make fixup_user_fault() check the vma access rights too + - LP: #1333837 + * ARM: 8027/1: fix do_div() bug in big-endian systems + - LP: #1333837 + * ARM: 8030/1: ARM : kdump : add arch_crash_save_vmcoreinfo + - LP: #1333837 + * ARM: pxa: hx4700.h: include "irqs.h" for PXA_NR_BUILTIN_GPIO + - LP: #1333837 + * ARM: tegra: remove UART5/UARTE from tegra124.dtsi + - LP: #1333837 + * USB: serial: fix sysfs-attribute removal deadlock + - LP: #1333837 + * 8250_core: Fix unwanted TX chars write + - LP: #1333837 + * serial: 8250: Fix thread unsafe __dma_tx_complete function + - LP: #1333837 + * Btrfs: fix inode caching vs tree log + - LP: #1333837 + * xhci: For streams the css flag most be read from the stream-ctx on ep + stop + - LP: #1333837 + * usb: xhci: Prefer endpoint context dequeue pointer over stopped_trb + - LP: #1333837 + * USB: io_ti: fix firmware download on big-endian machines + - LP: #1333837 + * usb: qcserial: add Sierra Wireless EM7355 + - LP: #1333837 + * usb: qcserial: add Sierra Wireless MC73xx + - LP: #1333837 + * usb: qcserial: add Sierra Wireless MC7305/MC7355 + - LP: #1333837 + * usb: option: add Olivetti Olicard 500 + - LP: #1333837 + * usb: option: add Alcatel L800MA + - LP: #1333837 + * usb: option: add and update a number of CMOTech devices + - LP: #1333837 + * word-at-a-time: avoid undefined behaviour in zero_bytemask macro + - LP: #1333837 + * s390/chsc: fix SEI usage on old FW levels + - LP: #1333837 + * irqchip: armada-370-xp: fix invalid cast of signed value into unsigned + variable + - LP: #1333837 + * irqchip: armada-370-xp: implement the ->check_device() msi_chip + operation + - LP: #1333837 + * irqchip: armada-370-xp: Fix releasing of MSIs + - LP: #1333837 + * ASoC: dapm: Fix widget double free with auto-disable DAPM kcontrol + - LP: #1333837 + * drm/i915: Don't check gmch state on inherited configs + - LP: #1333837 + * drm/vmwgfx: Make sure user-space can't DMA across buffer object + boundaries v2 + - LP: #1333837 + * of/irq: do irq resolution in platform_get_irq + - LP: #1333837 + * s390/bpf,jit: initialize A register if 1st insn is BPF_S_LDX_B_MSH + - LP: #1333837 + * drm/i915: Don't WARN nor handle unexpected hpd interrupts on gmch + platforms + - LP: #1333837 + * module: remove warning about waiting module removal. + - LP: #1333837 + * ALSA: hda - add headset mic detect quirk for a Dell laptop + - LP: #1297581, #1333837 + * arm: KVM: fix possible misalignment of PGDs and bounce page + - LP: #1333837 + * KVM: ARM: vgic: Fix sgi dispatch problem + - LP: #1333837 + * KVM: async_pf: mm->mm_users can not pin apf->mm + - LP: #1333837 + * ftrace/module: Hardcode ftrace_module_init() call into load_module() + - LP: #1333837 + * [SCSI] mpt2sas: Don't disable device twice at suspend. + - LP: #1333837 + * [SCSI] virtio-scsi: Skip setting affinity on uninitialized vq + - LP: #1333837 + * drivercore: deferral race condition fix + - LP: #1333837 + * hrtimer: Prevent all reprogramming if hang detected + - LP: #1333837 + * hrtimer: Prevent remote enqueue of leftmost timers + - LP: #1333837 + * timer: Prevent overflow in apply_slack + - LP: #1333837 + * ARC: !PREEMPT: Ensure Return to kernel mode is IRQ safe + - LP: #1333837 + * aio: fix potential leak in aio_run_iocb(). + - LP: #1333837 + * dm cache: fix writethrough mode quiescing in cache_map + - LP: #1333837 + * fix races between __d_instantiate() and checks of dentry flags + - LP: #1333837 + * net: Fix ns_capable check in sock_diag_put_filterinfo + - LP: #1333837 + * rt2x00: fix beaconing on USB + - LP: #1333837 + * rtlwifi: rtl8188ee: initialize packet_beacon + - LP: #1333837 + * Input: synaptics - add min/max quirk for ThinkPad Edge E431 + - LP: #1333837 + * Input: atkbd - fix keyboard not working on some LG laptops + - LP: #1333837 + * Bluetooth: Fix triggering BR/EDR L2CAP Connect too early + - LP: #1333837 + * drm/i915: Break encoder->crtc link separately in intel_sanitize_crtc() + - LP: #1333837 + * iio:imu:mpu6050: Fixed segfault in Invensens MPU driver due to null + dereference + - LP: #1333837 + * ALSA: hda - add headset mic detect quirk for a Dell laptop + - LP: #1297581, #1333837 + * rtlwifi: rtl8192se: Fix regression due to commit 1bf4bbb + - LP: #1333837 + * rtl8192cu: Fix unbalanced irq enable in error path of rtl92cu_hw_init() + - LP: #1333837 + * drm/radeon/uvd: use lower clocks on old UVD to boot v2 + - LP: #1333837 + * drm/radeon: use pflip irq on R600+ v2 + - LP: #1333837 + * drm/radeon: check buffer relocation offset + - LP: #1333837 + * drm/nouveau/acpi: allow non-optimus setups to load vbios from acpi + - LP: #1333837 + * drm/nouveau: fix another lock unbalance in nouveau_crtc_page_flip + - LP: #1333837 + * ALSA: usb-audio: work around corrupted TEAC UD-H01 feedback data + - LP: #1333837 + * USB: OHCI: fix problem with global suspend on ATI controllers + - LP: #1333837 + * usb: qcserial: add a number of Dell devices + - LP: #1333837 + * usb: storage: shuttle_usbat: fix discs being detected twice + - LP: #1333837 + * fsl-usb: do not test for PHY_CLK_VALID bit on controller version 1.6 + - LP: #1333837 + * tty: serial: 8250_core.c Bug fix for Exar chips. + - LP: #1333837 + * drivers/tty/hvc: don't free hvc_console_setup after init + - LP: #1333837 + * tty: Fix lockless tty buffer race + - LP: #1333837 + * USB: Nokia 305 should be treated as unusual dev + - LP: #1333837 + * USB: Nokia 5300 should be treated as unusual dev + - LP: #1333837 + * HID: add NO_INIT_REPORTS quirk for Synaptics Touch Pad V 103S + - LP: #1333837 + * ALSA: hda - hdmi: Set converter channel count even without sink + - LP: #1333837 + * Input: elantech - fix touchpad initialization on Gigabyte U2442 + - LP: #1333837 + * posix_acl: handle NULL ACL in posix_acl_equiv_mode + - LP: #1333837 + * mm/page-writeback.c: fix divide by zero in pos_ratio_polynom + - LP: #1333837 + * mm: compaction: detect when scanners meet in isolate_freepages + - LP: #1333837 + * mm/compaction: make isolate_freepages start at pageblock boundary + - LP: #1333837 + * autofs: fix lockref lookup + - LP: #1333837 + * libata: Blacklist queued trim for Crucial M500 + - LP: #1333837 + * Linux 3.13.11.3 + - LP: #1333837 + * net: sctp: wake up all assocs if sndbuf policy is per socket + - LP: #1333838 + * net: sctp: test if association is dead in sctp_wake_up_waiters + - LP: #1333838 + * l2tp: take PMTU from tunnel UDP socket + - LP: #1333838 + * net: core: don't account for udp header size when computing seglen + - LP: #1333838 + * bonding: Remove debug_fs files when module init fails + - LP: #1333838 + * bridge: Fix double free and memory leak around br_allowed_ingress + - LP: #1333838 + * ipv6: Limit mtu to 65575 bytes + - LP: #1333838 + * gre: don't allow to add the same tunnel twice + - LP: #1333838 + * vti: don't allow to add the same tunnel twice + - LP: #1333838 + * ipv4: return valid RTA_IIF on ip route get + - LP: #1333838 + * filter: prevent nla extensions to peek beyond the end of the message + - LP: #1333838 + * ip6_gre: don't allow to remove the fb_tunnel_dev + - LP: #1333838 + * vlan: Fix lockdep warning when vlan dev handle notification + - LP: #1333838 + * net: Find the nesting level of a given device by type. + - LP: #1333838 + * net: Allow for more then a single subclass for netif_addr_lock + - LP: #1333838 + * vlan: Fix lockdep warning with stacked vlan devices. + - LP: #1333838 + * macvlan: Fix lockdep warnings with stacked macvlan devices + - LP: #1333838 + * tg3: update rx_jumbo_pending ring param only when jumbo frames are + enabled + - LP: #1333838 + * net: sctp: cache auth_enable per endpoint + - LP: #1333838 + * rtnetlink: Warn when interface's information won't fit in our packet + - LP: #1333838 + * rtnetlink: Only supply IFLA_VF_PORTS information when RTEXT_FILTER_VF + is set + - LP: #1333838 + * ipv6: fib: fix fib dump restart + - LP: #1333838 + * bridge: Handle IFLA_ADDRESS correctly when creating bridge device + - LP: #1333838 + * sctp: reset flowi4_oif parameter on route lookup + - LP: #1333838 + * net: qmi_wwan: add Sierra Wireless EM7355 + - LP: #1333838 + * net: qmi_wwan: add Sierra Wireless MC73xx + - LP: #1333838 + * net: qmi_wwan: add Sierra Wireless MC7305/MC7355 + - LP: #1333838 + * net: qmi_wwan: add Olivetti Olicard 500 + - LP: #1333838 + * net: qmi_wwan: add Alcatel L800MA + - LP: #1333838 + * net: qmi_wwan: add a number of CMOTech devices + - LP: #1333838 + * net: qmi_wwan: add a number of Dell devices + - LP: #1333838 + * slip: fix spinlock variant + - LP: #1333838 + * net: sctp: Potentially-Failed state should not be reached from + unconfirmed state + - LP: #1333838 + * net: sctp: Don't transition to PF state when transport has exhausted + 'Path.Max.Retrans'. + - LP: #1333838 + * mactap: Fix checksum errors for non-gso packets in bridge mode + - LP: #1333838 + * tcp_cubic: fix the range of delayed_ack + - LP: #1333838 + * vsock: Make transport the proto owner + - LP: #1333838 + * net: cdc_ncm: fix buffer overflow + - LP: #1333838 + * ip_tunnel: Set network header properly for IP_ECN_decapsulate() + - LP: #1333838 + * net: cdc_mbim: __vlan_find_dev_deep need rcu_read_lock + - LP: #1333838 + * net: ipv4: ip_forward: fix inverted local_df test + - LP: #1333838 + * net: ipv6: send pkttoobig immediately if orig frag size > mtu + - LP: #1333838 + * ipv4: fib_semantics: increment fib_info_cnt after fib_info allocation + - LP: #1333838 + * net: cdc_mbim: handle unaccelerated VLAN tagged frames + - LP: #1333838 + * macvlan: Don't propagate IFF_ALLMULTI changes on down interfaces. + - LP: #1333838 + * sfc: fix calling of free_irq with already free vector + - LP: #1333838 + * ip6_tunnel: fix potential NULL pointer dereference + - LP: #1333838 + * net: filter: x86: fix JIT address randomization + - LP: #1333838 + * net: filter: s390: fix JIT address randomization + - LP: #1333838 + * ipv6: fix calculation of option len in ip6_append_data + - LP: #1333838 + * rtnetlink: wait for unregistering devices in rtnl_link_unregister() + - LP: #1333838 + * net: gro: make sure skb->cb[] initial content has not to be zero + - LP: #1333838 + * batman-adv: fix reference counting imbalance while sending fragment + - LP: #1333838 + * batman-adv: increase orig refcount when storing ref in gw_node + - LP: #1333838 + * batman-adv: fix local TT check for outgoing arp requests in DAT + - LP: #1333838 + * ip_tunnel: Initialize the fallback device properly + - LP: #1333838 + * ipv4: initialise the itag variable in __mkroute_input + - LP: #1333838 + * net-gro: reset skb->truesize in napi_reuse_skb() + - LP: #1333838 + * netfilter: ipv4: defrag: set local_df flag on defragmented skb + - LP: #1333838 + * ima: introduce ima_kernel_read() + - LP: #1333838 + * ima: audit log files opened with O_DIRECT flag + - LP: #1333838 + * percpu: make pcpu_alloc_chunk() use pcpu_mem_free() instead of kfree() + - LP: #1333838 + * workqueue: fix bugs in wq_update_unbound_numa() failure path + - LP: #1333838 + * [media] fc2580: fix tuning failure on 32-bit arch + - LP: #1333838 + * memory: mvebu-devbus: fix the conversion of the bus width + - LP: #1333838 + * ARM: orion5x: fix target ID for crypto SRAM window + - LP: #1333838 + * workqueue: make rescuer_thread() empty wq->maydays list before exiting + - LP: #1333838 + * workqueue: fix a possible race condition between rescuer and + pwq-release + - LP: #1333838 + * spi: core: Ignore unsupported Dual/Quad Transfer Mode bits + - LP: #1333838 + * device_cgroup: rework device access check and exception checking + - LP: #1333838 + * PCI: mvebu: fix off-by-one in the computed size of the mbus windows + - LP: #1333838 + * bus: mvebu-mbus: allow several windows with the same target/attribute + - LP: #1333838 + * PCI: mvebu: split PCIe BARs into multiple MBus windows when needed + - LP: #1333838 + * ARM: mvebu: fix NOR bus-width in Armada XP GP Device Tree + - LP: #1333838 + * ARM: mvebu: fix NOR bus-width in Armada XP DB Device Tree + - LP: #1333838 + * ARM: mvebu: fix NOR bus-width in Armada XP OpenBlocks AX3 Device Tree + - LP: #1333838 + * crypto: caam - add allocation failure handling in SPRINTFCAT macro + - LP: #1333838 + * ARM: common: edma: Fix xbar mapping + - LP: #1333838 + * clk: Fix double free due to devm_clk_register() + - LP: #1333838 + * [media] media-device: fix infoleak in ioctl media_enum_entities() + - LP: #1333838 + * ARM: dts: kirkwood: fix mislocated pcie-controller nodes + - LP: #1333838 + * device_cgroup: check if exception removal is allowed + - LP: #1333838 + * md: avoid possible spinning md thread at shutdown. + - LP: #1333838 + * ACPI: Remove Kconfig symbol ACPI_PROCFS + - LP: #1333838 + * ACPI: Revert "ACPI: Remove CONFIG_ACPI_PROCFS_POWER and cm_sbsc.c" + - LP: #1333838 + * ACPI: Revert "ACPI / Battery: Remove battery's proc directory" + - LP: #1333838 + * NFSd: Move default initialisers from create_client() to alloc_client() + - LP: #1333838 + * NFSd: call rpc_destroy_wait_queue() from free_client() + - LP: #1333838 + * genirq: Provide irq_force_affinity fallback for non-SMP + - LP: #1333838 + * libata: clean up ZPODD when a port is detached + - LP: #1333838 + * ACPI / blacklist: Add dmi_enable_osi_linux quirk for Asus EEE PC 1015PX + - LP: #1333838 + * ACPI: Revert "ACPI / AC: convert ACPI ac driver to platform bus" + - LP: #1333838 + * ACPI / processor: do not mark present at boot but not onlined CPU as + onlined + - LP: #1333838 + * NFSD: Call ->set_acl with a NULL ACL structure if no entries + - LP: #1333838 + * ALSA: hda - add headset mic detect quirks for three Dell laptops + - LP: #1297581, #1333838 + * gpio: mcp23s08: Bug fix of SPI device tree registration. + - LP: #1333838 + * drm/i915/vlv: reset VLV media force wake request register + - LP: #1333838 + * ARM: dts: i.MX53: Fix ipu register space size + - LP: #1333838 + * mm, thp: close race between mremap() and split_huge_page() + - LP: #1333838 + * intel_pstate: Set turbo VID for BayTrail + - LP: #1333838 + * powerpc/powernv: Reset root port in firmware + - LP: #1333838 + * hrtimer: Set expiry time before switch_hrtimer_base() + - LP: #1333838 + * hwmon: (emc1403) fix inverted store_hyst() + - LP: #1333838 + * hwmon: (emc1403) Fix resource leak on module unload + - LP: #1333838 + * hwmon: (emc1403) Support full range of known chip revision numbers + - LP: #1333838 + * iommu/amd: Fix interrupt remapping for aliased devices + - LP: #1333838 + * ASoC: wm8962: Update register CLASS_D_CONTROL_1 to be non-volatile + - LP: #1333838 + * [media] V4L2: ov7670: fix a wrong index, potentially Oopsing the kernel + from user-space + - LP: #1333838 + * [media] V4L2: fix VIDIOC_CREATE_BUFS in 64- / 32-bit compatibility mode + - LP: #1333838 + * x86, mm, hugetlb: Add missing TLB page invalidation for hugetlb_cow() + - LP: #1333838 + * i2c: designware: Mask all interrupts during i2c controller enable + - LP: #1333838 + * i2c: s3c2410: resume race fix + - LP: #1333838 + * i2c: rcar: bail out on zero length transfers + - LP: #1333838 + * dm crypt: fix cpu hotplug crash by removing per-cpu structure + - LP: #1333838 + * metag: fix memory barriers + - LP: #1333838 + * metag: Reduce maximum stack size to 256MB + - LP: #1333838 + * drm/i915: restore backlight precision when converting from ACPI + - LP: #1333838 + * drm/i915: Increase WM memory latency values on SNB + - LP: #1333838 + * PCI: shpchp: Check bridge's secondary (not primary) bus speed + - LP: #1333838 + * parisc: ratelimit userspace segfault printing + - LP: #1333838 + * parisc: Improve LWS-CAS performance + - LP: #1333838 + * Target/iser: Fix wrong connection requests list addition + - LP: #1333838 + * Target/iser: Fix iscsit_accept_np and rdma_cm racy flow + - LP: #1333838 + * iscsi-target: Change BUG_ON to REJECT in iscsit_process_nop_out + - LP: #1333838 + * tcm_fc: Fix free-after-use regression in ft_free_cmd + - LP: #1333838 + * target: Don't allow setting WC emulation if device doesn't support + - LP: #1333838 + * arm: dts: Fix missing device_type="memory" for ste-ccu8540 + - LP: #1333838 + * mips: dts: Fix missing device_type="memory" property in memory nodes + - LP: #1333838 + * arm64: fix pud_huge() for 2-level pagetables + - LP: #1333838 + * libceph: fix corruption when using page_count 0 page in rbd + - LP: #1333838 + * clk: tegra: use pll_ref as the pll_e parent + - LP: #1333838 + * clk: tegra: Fix wrong value written to PLLE_AUX + - LP: #1333838 + * target: fix memory leak on XCOPY + - LP: #1333838 + * sysfs: make sure read buffer is zeroed + - LP: #1333838 + * cfg80211: free sme on connection failures + - LP: #1333838 + * sched: Sanitize irq accounting madness + - LP: #1333838 + * sched: Use CPUPRI_NR_PRIORITIES instead of MAX_RT_PRIO in cpupri check + - LP: #1333838 + * mac80211: fix suspend vs. association race + - LP: #1333838 + * mac80211: fix on-channel remain-on-channel + - LP: #1333838 + * af_iucv: wrong mapping of sent and confirmed skbs + - LP: #1333838 + * perf: Limit perf_event_attr::sample_period to 63 bits + - LP: #1333838 + * perf: Prevent false warning in perf_swevent_add + - LP: #1333838 + * drm/gf119-/disp: fix nasty bug which can clobber SOR0's clock setup + - LP: #1333838 + * drm/radeon: also try GART for CPU accessed buffers + - LP: #1333838 + * drm/radeon: handle non-VGA class pci devices with ATRM + - LP: #1333838 + * drm/radeon: fix register typo on si + - LP: #1333838 + * drm/radeon: avoid segfault on device open when accel is not working. + - LP: #1333838 + * drm/radeon/pm: don't allow debugfs/sysfs access when PX card is off + (v2) + - LP: #1333838 + * can: peak_pci: prevent use after free at netdev removal + - LP: #1333838 + * nfsd4: remove lockowner when removing lock stateid + - LP: #1333838 + * nfsd4: warn on finding lockowner without stateid's + - LP: #1333838 + * dma: mv_xor: Flush descriptors before activating a channel + - LP: #1333838 + * dmaengine: fix dmaengine_unmap failure + - LP: #1333838 + * hwpoison, hugetlb: lock_page/unlock_page does not match for handling a + free hugepage + - LP: #1333838 + * mm/memory-failure.c: fix memory leak by race between poison and + unpoison + - LP: #1333838 + * ARM: OMAP3: clock: Back-propagate rate change from cam_mclk to dpll4_m5 + on all OMAP3 platforms + - LP: #1333838 + * dmaengine: dw: went back to plain {request,free}_irq() calls + - LP: #1333838 + * ARM: omap5: hwmod_data: Correct IDLEMODE for McPDM + - LP: #1333838 + * Input: synaptics - add min/max quirk for the ThinkPad W540 + - LP: #1333838 + * ARM: OMAP2+: nand: Fix NAND on OMAP2 and OMAP3 boards + - LP: #1333838 + * futex: Add another early deadlock detection check + - LP: #1333838 + * futex: Prevent attaching to kernel threads + - LP: #1333838 + * ARM: OMAP4: Fix the boot regression with CPU_IDLE enabled + - LP: #1333838 + * cpufreq: remove race while accessing cur_policy + - LP: #1333838 + * cpufreq: cpu0: drop wrong devm usage + - LP: #1333838 + * ARM: imx: fix error handling in ipu device registration + - LP: #1333838 + * ALSA: hda - Fix onboard audio on Intel H97/Z97 chipsets + - LP: #1333838 + * ARM: 8051/1: put_user: fix possible data corruption in put_user + - LP: #1333838 + * ARM: 8064/1: fix v7-M signal return + - LP: #1333838 + * Input: synaptics - T540p - unify with other LEN0034 models + - LP: #1333838 + * drm/i915: Only copy back the modified fields to userspace from + execbuffer + - LP: #1333838 + * dm cache: always split discards on cache block boundaries + - LP: #1333838 + * virtio_blk: don't crash, report error if virtqueue is broken. + - LP: #1333838 + * virtio_blk: fix race between start and stop queue + - LP: #1333838 + * powerpc: Fix 64 bit builds with binutils 2.24 + - LP: #1333838 + * powerpc, kexec: Fix "Processor X is stuck" issue during kexec from ST + mode + - LP: #1333838 + * rtmutex: Fix deadlock detector for real + - LP: #1333838 + * drm/radeon: avoid crash if VM command submission isn't available + - LP: #1333838 + * drm/radeon: don't allow RADEON_GEM_DOMAIN_CPU for command submission + - LP: #1333838 + * iwlwifi: mvm: fix setting channel in monitor mode + - LP: #1333838 + * Staging: speakup: Move pasting into a work item + - LP: #1333838 + * USB: Avoid runtime suspend loops for HCDs that can't handle + suspend/resume + - LP: #1333838 + * can: only rename enabled led triggers when changing the netdev name + - LP: #1333838 + * USB: io_ti: fix firmware download on big-endian machines (part 2) + - LP: #1333838 + * USB: ftdi_sio: add NovaTech OrionLXm product ID + - LP: #1333838 + * USB: serial: option: add support for Novatel E371 PCIe card + - LP: #1333838 + * USB: cdc-wdm: properly include types.h + - LP: #1333838 + * md: always set MD_RECOVERY_INTR when aborting a reshape or other + "resync". + - LP: #1333838 + * xhci: delete endpoints from bandwidth list before freeing whole device + - LP: #1333838 + * md: always set MD_RECOVERY_INTR when interrupting a reshape thread. + - LP: #1333838 + * ALSA: hda/analog - Fix silent output on ASUS A8JN + - LP: #1333838 + * drm/radeon/dpm: resume fixes for some systems + - LP: #1333838 + * drm/radeon: use the CP DMA on CIK + - LP: #1333838 + * ALSA: hda/realtek - Correction of fixup codes for PB V7900 laptop + - LP: #1333838 + * ALSA: hda/realtek - Fix COEF widget NID for ALC260 replacer fixup + - LP: #1333838 + * iser-target: Add missing target_put_sess_cmd for ImmedateData failure + - LP: #1333838 + * iscsi-target: Fix wrong buffer / buffer overrun in + iscsi_change_param_value() + - LP: #1333838 + * percpu-refcount: fix usage of this_cpu_ops + - LP: #1333838 + * target: Fix alua_access_state attribute OOPs for un-configured devices + - LP: #1333838 + * mm: rmap: fix use-after-free in __put_anon_vma + - LP: #1333838 + * mm: add !pte_present() check on existing hugetlb_entry callbacks + - LP: #1333838 + * target: Fix NULL pointer dereference for XCOPY in target_put_sess_cmd + - LP: #1333838 + * Linux 3.13.11.4 + - LP: #1333838 + * powerpc/powernv: Infrastructure to read opal messages in generic + format. + - LP: #1334268 + * powerpc/powernv: Infrastructure to support OPAL async completion + - LP: #1334268 + * powerpc/powernv: Enable fetching of platform sensor data + - LP: #1334268 + * powerpc/powernv: Fix endian issues with sensor code + - LP: #1334268 + * powerpc/powernv: Add OPAL message log interface + - LP: #1334268 + * powerpc/powernv: Fix kexec races going back to OPAL + - LP: #1334268 + * powerpc/powernv: Fix little endian issues in OPAL flash code + - LP: #1334268 + * powerpc/powernv: Fix little endian issues with opal_do_notifier calls + - LP: #1334268 + * powerpc/powernv: Fix little endian issues in OPAL error log code + - LP: #1334268 + * powerpc/powernv: Create OPAL sglist helper functions and fix endian + issues + - LP: #1334268 + * powerpc/powernv: Fix little endian issues in OPAL dump code + - LP: #1334268 + * powerpc: Fix error return in rtas_flash module init + - LP: #1334268 + * powerpc/powernv: Increase candidate fw image size + - LP: #1334268 + * powerpc/powernv: Return secondary CPUs to firmware before FW update + - LP: #1334268 + * powerpc/powernv: Pass buffer size to OPAL validate flash call + - LP: #1334268 + * gpio: add a driver for the Synopsys DesignWare APB GPIO block + - LP: #1334823 + * gpio: dwapb: drop irq_setup_generic_chip() + - LP: #1334823 + * gpio: dwapb: use a second irq chip + - LP: #1334823 + * lzo: properly check for overruns + - LP: #1335313 + - CVE-2014-4608 + * lz4: ensure length does not wrap + - LP: #1335314 + - CVE-2014-4611 + * netfilter: nf_nat: fix oops on netns removal + - LP: #1314274 + * ALSA: hda - add device ID for Broadwell display audio controller + - LP: #1188091 + * ALSA: hda - add codec ID for Broadwell display audio codec + - LP: #1188091 + * ALSA: hda/hdmi - apply all Haswell fix-ups to Broadwell display codec + - LP: #1188091 + * ALSA: hda - using POS_FIX_LPIB on Broadwell HDMI Audio + - LP: #1188091 + + -- Luis Henriques Tue, 01 Jul 2014 15:10:43 +0100 + +linux (3.13.0-30.55) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * x86_64,ptrace: Enforce RIP <= TASK_SIZE_MAX (CVE-2014-4699) + - LP: #1337339 + - CVE-2014-4699 + + -- Luis Henriques Thu, 03 Jul 2014 16:15:57 +0100 + +linux (3.13.0-30.54) trusty; urgency=low + + [ Adam Conrad ] + + * [Config] Enable building the sata-modules udeb on ppc64el. + - LP: #1323980 + + [ Anton Blanchard ] + + * SAUCE: (no-up) powerpc: 64bit sendfile is capped at 2GB + - LP: #1328230 + + [ Ben Widawsky ] + + * SAUCE: i915_bdw: drm/i915: Fix PSR programming + - LP: #1321729 + * SAUCE: i915_bdw: drm/i915: Correct PPGTT total size + - LP: #1321729 + + [ Chris Wilson ] + + * SAUCE: i915_bdw: drm/i915: Broadwell expands ACTHD to 64bit + - LP: #1321729 + + [ Damien Lespiau ] + + * SAUCE: i915_bdw: drm/i915/bdw: Implement + Wa4x4STCOptimizationDisable:bdw + - LP: #1321729 + + [ Dave Chiluk ] + + * [Config] Enable CONFIG_IP_VS_IPV6=y + - LP: #1300739 + + [ Kamal Mostafa ] + + * [Config] add debian/gbp.conf + * Release Tracking Bug + - LP: #1328286 + + [ Mika Kuoppala ] + + * SAUCE: i915_bdw: drm/i915: add render state initialization + - LP: #1321729 + + [ Paulo Zanoni ] + + * SAUCE: i915_bdw: drm/i915: fix assert_cursor on BDW + - LP: #1321729 + + [ Steven Rostedt ] + + * SAUCE: i915_bdw: drm/i915: Do not dereference pointers from ring buffer + in evict event + - LP: #1321729 + + [ Tim Gardner ] + + * [Config] CONFIG_POWERNV_CPUFREQ=y for ppc64el + - LP: #1324571 + * [Debian] Treat vdso install as an environment variable + * [Config] Treat vdso install as an environment variable + * [Config] CONFIG_MLX4_DEBUG=y + - LP: #1328256 + * [Config] CONFIG_I40EVF=m, CONFIG_I40E_DCB=y, CONFIG_I40E_VXLAN=y + - LP: #1328037 + + [ Timo Aaltonen ] + + * SAUCE: i915_bdw: Rebase to drm-intel-next-2014-03-07 + fixes + - LP: #1321729 + * SAUCE: i915_bdw: Add BDW specific power well calls + - LP: #1317865 + + [ Upstream Kernel Changes ] + + * drm: expose subpixel order name routine v3 + * drm: dp helper: Add DP test sink CRC definition. + * drm: export cmdline and preferred mode functions from fb helper + * hugetlb: ensure hugepage access is denied if hugepages are not + supported + - LP: #1328251 + * powerpc/powernv: Move SG list structure to header file + - LP: #1326015 + * powerpc/powernv: Read OPAL error log and export it through sysfs + - LP: #1326015 + * powerpc/powernv Platform dump interface + - LP: #1326015 + * pci_regs.h: Add PCI bus link speed and width defines + - LP: #1328037 + * net_tstamp: Add SIOCGHWTSTAMP ioctl to match SIOCSHWTSTAMP + - LP: #1328037 + * PCI/MSI: Add pci_enable_msi_range() and pci_enable_msix_range() + - LP: #1328037 + * net: Change skb_get_rxhash to skb_get_hash + - LP: #1328037 + * net: Add utility functions to clear rxhash + - LP: #1328037 + * net: Add function to set the rxhash + - LP: #1328037 + * i40e: set pf_id based on device and function numbers + - LP: #1328037 + * i40e: register file updates + - LP: #1328037 + * i40e: clear AQ head and tail registers + - LP: #1328037 + * i40e: simplify aq head-tail-len setups + - LP: #1328037 + * i40e: firmware version fields offsets update + - LP: #1328037 + * i40e: allow one more vector for VFs + - LP: #1328037 + * i40e: select reset counters correctly + - LP: #1328037 + * i40e: retry call on timeout + - LP: #1328037 + * i40e: properly add VF MAC addresses + - LP: #1328037 + * i40e: fix debugging messages + - LP: #1328037 + * i40e: default debug mask setting + - LP: #1328037 + * i40e: add interrupt test + - LP: #1328037 + * i40e: add support for triggering EMPR + - LP: #1328037 + * i40e: restrict diag test length + - LP: #1328037 + * i40e: sync header files with hardware + - LP: #1328037 + * i40e: separate TSYNVALID and TSYNINDX fields in Rx descriptor + - LP: #1328037 + * i40e: check multi-bit state correctly + - LP: #1328037 + * i40e: get media type during link info + - LP: #1328037 + * i40e: Add flag for L2 VEB filtering + - LP: #1328037 + * i40e: enable early hardware support + - LP: #1328037 + * i40e: whitespace + - LP: #1328037 + * i40e: Bump version + - LP: #1328037 + * i40e: refactor reset code + - LP: #1328037 + * i40e: Enable all PCTYPEs except FCOE for RSS. + - LP: #1328037 + * i40e: only set up the rings to be used + - LP: #1328037 + * i40e: clear test state bit after all ethtool tests + - LP: #1328037 + * i40e: refactor ethtool tests + - LP: #1328037 + * i40e: add num_VFs message + - LP: #1328037 + * i40e: Add a new variable to track number of pf instances + - LP: #1328037 + * i40e: restrict diag test messages + - LP: #1328037 + * i40e: loopback info and set loopback fix + - LP: #1328037 + * i40e: complain about out-of-range descriptor request + - LP: #1328037 + * i40e: remove and fix confusing define name + - LP: #1328037 + * i40e: Bump version number + - LP: #1328037 + * i40e: fix up some of the ethtool connection reporting + - LP: #1328037 + * i40e: fix pf reset after offline test + - LP: #1328037 + * i40e: Tell the stack about our actual number of queues + - LP: #1328037 + * i40e: init flow control settings to disabled + - LP: #1328037 + * i40e: trivial fixes + - LP: #1328037 + * i40e: use same number of queues as CPUs + - LP: #1328037 + * i40e: reinit flow for the main VSI + - LP: #1328037 + * i40e: function to reconfigure RSS queues and rebuild + - LP: #1328037 + * i40e: Add basic support for get/set channels for RSS + - LP: #1328037 + * i40e: rtnl_lock in reset path fixes + - LP: #1328037 + * i40e: support for suspend and resume + - LP: #1328037 + * i40e: Remove FCoE in i40e_virtchnl_pf.c code + - LP: #1328037 + * i40e: Fix dump output from debugfs calls + - LP: #1328037 + * i40e: prevent null pointer exception in dump descriptor + - LP: #1328037 + * i40e: simplify error messages for dump descriptor + - LP: #1328037 + * i40e: fix up scanf decoders + - LP: #1328037 + * i40e: more print_hex_dump use + - LP: #1328037 + * i40e: Fix wrong mask bits being used in misc interrupt + - LP: #1328037 + * i40e: Bump version number + - LP: #1328037 + * i40e: Fix off by one in i40e_dbg_command_write + - LP: #1328037 + * i40e: make functions static and remove dead code + - LP: #1328037 + * i40evf: main driver core + - LP: #1328037 + * i40evf: transmit and receive functionality + - LP: #1328037 + * i40evf: core ethtool functionality + - LP: #1328037 + * i40evf: virtual channel interface + - LP: #1328037 + * i40evf: driver core headers + - LP: #1328037 + * i40evf: init code and hardware support + - LP: #1328037 + * i40evf: add driver to kernel build system + - LP: #1328037 + * i40evf: A0 silicon specific + - LP: #1328037 + * i40e: using for_each_set_bit to simplify the code + - LP: #1328037 + * i40e: Suppress HMC error to Interrupt message level + - LP: #1328037 + * i40e: Populate and check pci bus speed and width + - LP: #1328037 + * i40e: add wake-on-lan support + - LP: #1328037 + * i40e: fix curly brace use and return type + - LP: #1328037 + * i40e: Implementation of VXLAN ndo's + - LP: #1328037 + * i40e: Rx checksum offload for VXLAN + - LP: #1328037 + * i40e: move i40e_reset_vf + - LP: #1328037 + * i40e: refactor VF reset flow + - LP: #1328037 + * i40e: remove redundant code + - LP: #1328037 + * i40e: remove chatty log messages + - LP: #1328037 + * i40e: fix error return + - LP: #1328037 + * i40e: be more informative + - LP: #1328037 + * i40e: make a define from a large constant + - LP: #1328037 + * i40e: update led set args + - LP: #1328037 + * i40e: report VF MAC addresses correctly + - LP: #1328037 + * i40e: Dump the whole NVM, not half + - LP: #1328037 + * i40e: fix mac address checking + - LP: #1328037 + * i40e: Change the ethtool NVM read method to use AQ + - LP: #1328037 + * i40e: fix constant cast issues + - LP: #1328037 + * i40e: guard against vf message races + - LP: #1328037 + * i40e: add header file flag _I40E_TXRX_H_ + - LP: #1328037 + * i40e: use functions to enable and disable icr 0 + - LP: #1328037 + * i40e: reinit buffer size each time + - LP: #1328037 + * i40e: fix error handling when alloc of vsi array fails + - LP: #1328037 + * i40e: keep allocated memory in structs + - LP: #1328037 + * i40e: catch unset q_vector + - LP: #1328037 + * i40e: Fix ring allocation + - LP: #1328037 + * i40e: I40E_FLAG_MQ_ENABLED is not used + - LP: #1328037 + * i40e: Remove unnecessary prototypes + - LP: #1328037 + * i40e: remove un-necessary io-write + - LP: #1328037 + * i40e: Record dma buffer info for dummy packets + - LP: #1328037 + * i40e: Fix SR-IOV VF port VLAN + - LP: #1328037 + * i40e: fix whitespace + - LP: #1328037 + * i40e: avoid unnecessary register read + - LP: #1328037 + * i40e: Do not enable default port on the VEB + - LP: #1328037 + * i40e: use struct assign instead of memcpy + - LP: #1328037 + * i40e: don't allocate zero size + - LP: #1328037 + * i40e: acknowledge VFLR when disabling SR-IOV + - LP: #1328037 + * i40e: support VFs on PFs other than 0 + - LP: #1328037 + * i40e: Fix VF driver MAC address configuration + - LP: #1328037 + * i40e: use correct struct for get and update vsi params + - LP: #1328037 + * i40e: Hide the Port VLAN VLAN ID + - LP: #1328037 + * i40e: Admin queue shutdown fixes + - LP: #1328037 + * i40e: check asq alive before notify + - LP: #1328037 + * i40e: Do not allow AQ calls from ndo-ops + - LP: #1328037 + * i40e: Expose AQ debugfs hooks + - LP: #1328037 + * i40e: Do not enable broadcast promiscuous by default + - LP: #1328037 + * i40e: Stop accepting any VLAN tag on VLAN 0 filter set + - LP: #1328037 + * i40e: Allow VF to set already assigned MAC address + - LP: #1328037 + * i40e: Bump version + - LP: #1328037 + * i40e: Add code to wait for FW to complete in reset path + - LP: #1328037 + * i40e: update firmware api to 1.1 + - LP: #1328037 + * i40e: Reduce range of interrupt reg in reg test + - LP: #1328037 + * i40e: move PF ID init from PF reset to SC init + - LP: #1328037 + * i40e: check MAC type before any REG access + - LP: #1328037 + * i40e: rework shadow ram read functions + - LP: #1328037 + * i40e: whitespace paren and comment tweaks + - LP: #1328037 + * i40e: Enable/Disable PF switch LB on SR-IOV configure changes + - LP: #1328037 + * i40e: remove redundant AQ enable + - LP: #1328037 + * i40e: correctly setup ARQ descriptors + - LP: #1328037 + * i40e: Re-enable interrupt on ICR0 + - LP: #1328037 + * i40e: use kernel specific defines + - LP: #1328037 + * i40e: Fix GPL header + - LP: #1328037 + * i40e: Fix MAC format in Write MAC address AQ cmd + - LP: #1328037 + * i40e: add a comment on barrier and fix panic on reset + - LP: #1328037 + * i40e: disable packet split + - LP: #1328037 + * i40e: Cleanup reconfig rss path + - LP: #1328037 + * i40e: release NVM resource reservation on startup + - LP: #1328037 + * i40e: remove interrupt on AQ error + - LP: #1328037 + * i40e: accept pf to pf adminq messages + - LP: #1328037 + * i40e: shorten wordy fields + - LP: #1328037 + * i40e: trivial: formatting and checkpatch fixes + - LP: #1328037 + * i40e: fix spelling errors + - LP: #1328037 + * i40e: Add a dummy packet template + - LP: #1328037 + * i40e: Turn flow director off in MFP mode + - LP: #1328037 + * i40e: use assignment instead of memcpy + - LP: #1328037 + * i40e: drop unused macros + - LP: #1328037 + * i40e: Update the Current NVM version Low value + - LP: #1328037 + * i40e: Bump version + - LP: #1328037 + * i40e: fix long lines + - LP: #1328037 + * i40e: Cleanup Doxygen warnings + - LP: #1328037 + * i40e: Setting queue count to 1 using ethtool is valid + - LP: #1328037 + * i40e: do not bail when disabling if Tx queue disable fails + - LP: #1328037 + * i40e: allow VF to remove any MAC filter + - LP: #1328037 + * i40e: check for possible incorrect ipv6 checksum + - LP: #1328037 + * i40e: adjust ITR max and min values + - LP: #1328037 + * i40e: clear qtx_head before enabling Tx queue + - LP: #1328037 + * i40e: call clear_pxe after adminq is initialized + - LP: #1328037 + * i40e: enable PTP + - LP: #1328037 + * i40e: fix log message wording + - LP: #1328037 + * i40e: Bump version + - LP: #1328037 + * i40evf: fix s390 build failure due to implicit prefetch.h + - LP: #1328037 + * i40e: remove extra register write + - LP: #1328037 + * i40e: associate VMDq queue with VM type + - LP: #1328037 + * i40e: make message meaningful + - LP: #1328037 + * i40e: whitespace fixes + - LP: #1328037 + * i40e: trivial cleanup + - LP: #1328037 + * i40e: Bump version number + - LP: #1328037 + * i40e: Warn admin to reload VF driver on port VLAN configuration + - LP: #1328037 + * i40e: Retain MAC filters on port VLAN deletion + - LP: #1328037 + * i40e: Remove autogenerated Module.symvers file. + - LP: #1328037 + * i40e: check desc pointer before printing + - LP: #1328037 + * i40e: updates to AdminQ interface + - LP: #1328037 + * i40e: fix compile warning on checksum_local + - LP: #1328037 + * i40e: Change firmware workaround + - LP: #1328037 + * i40e: whitespace fixes + - LP: #1328037 + * i40e: rename defines + - LP: #1328037 + * i40e: refactor flow director + - LP: #1328037 + * i40e: implement DCB support infastructure + - LP: #1328037 + * i40e: add DCB and DCBNL support + - LP: #1328037 + * i40e: add DCB option to Kconfig + - LP: #1328037 + * i40e: Fix device ID define names to align to standard + - LP: #1328037 + * i40e: Add missing braces to i40e_dcb_need_reconfig() + - LP: #1328037 + * i40e: spelling error + - LP: #1328037 + * i40e: bump driver version + - LP: #1328037 + * i40evf: trivial fixes + - LP: #1328037 + * i40evf: clean up memsets + - LP: #1328037 + * i40e: Setting i40e_down bit for tx_timeout + - LP: #1328037 + * i40e: remove dead code + - LP: #1328037 + * i40e: set VF state to active when reset is complete + - LP: #1328037 + * i40e: reset VFs after PF reset + - LP: #1328037 + * i40e: enable extant VFs + - LP: #1328037 + * i40e: don't handle VF reset on unload + - LP: #1328037 + * i40evf: clean up adapter struct + - LP: #1328037 + * i40evf: fix bogus comment + - LP: #1328037 + * i40evf: don't store unnecessary array of strings + - LP: #1328037 + * i40evf: change type of flags variable + - LP: #1328037 + * i40evf: refactor reset handling + - LP: #1328037 + * net: i40evf: Remove duplicate include + - LP: #1328037 + * i40e: Use pci_enable_msix_range() instead of pci_enable_msix() + - LP: #1328037 + * i40evf: request reset on tx hang + - LP: #1328037 + * i40evf: remove VLAN filters on close + - LP: #1328037 + * i40evf: fix multiple crashes on remove + - LP: #1328037 + * i40evf: get rid of pci_using_dac + - LP: #1328037 + * i40evf: fix up strings in init task + - LP: #1328037 + * i40evf: remove bogus comment + - LP: #1328037 + * i40evf: don't guess device name + - LP: #1328037 + * i40evf: store ring size in ring structs + - LP: #1328037 + * i40evf: update version and copyright date + - LP: #1328037 + * i40evf: remove errant space + - LP: #1328037 + * i40e: remove unnecessary delay + - LP: #1328037 + * i40e: tighten up ring enable/disable flow + - LP: #1328037 + * i40e: Change MSIX to MSI-X + - LP: #1328037 + * i40e and i40evf: Bump driver versions + - LP: #1328037 + * i40evf: Enable the ndo_set_features netdev op + - LP: #1328037 + * i40e: Flow Director sideband accounting + - LP: #1328037 + * i40e: Prevent overflow due to kzalloc + - LP: #1328037 + * i40e/i40evf: i40e implementation for skb_set_hash + - LP: #1328037 + * i40e: clean up comment style + - LP: #1328037 + * i40e: Remove a FW workaround for Number of MSIX vectors + - LP: #1328037 + * i40e: count timeout events + - LP: #1328037 + * i40e: Remove a redundant filter addition + - LP: #1328037 + * i40e: Fix static checker warning + - LP: #1328037 + * i40e: fix nvm version and remove firmware report + - LP: #1328037 + * i40e/i40evf: carefully fill tx ring + - LP: #1328037 + * i40e/i40evf: Bump pf&vf build versions + - LP: #1328037 + * i40e: delete netdev after deleting napi and vectors + - LP: #1328037 + * i40e: Fix a bug in the update logic for FDIR SB filter. + - LP: #1328037 + * i40e/i40evf: Some flow director HW definition fixes + - LP: #1328037 + * i40e: make string references to q be queue + - LP: #1328037 + * i40e: cleanup strings + - LP: #1328037 + * i40e: simplified init string + - LP: #1328037 + * i40e: Fix function comments + - LP: #1328037 + * i40e: Define a new state variable to keep track of feature auto disable + - LP: #1328037 + * i40e: Add code to handle FD table full condition + - LP: #1328037 + * i40e: Bug fix for FDIR replay logic + - LP: #1328037 + * i40e: Let MDD events be handled by MDD handler + - LP: #1328037 + * i40e/i40evf: Use correct number of VF vectors + - LP: #1328037 + * i40e/i40evf: Use dma_set_mask_and_coherent + - LP: #1328037 + * net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq + - LP: #1328037 + * i40e: Don't receive packets when the napi budget == 0 + - LP: #1328037 + * i40evf: Rename i40e_ptype_lookup i40evf_ptype_lookup + - LP: #1328037 + * net/i40e: Avoid double setting of NETIF_F_SG for the HW encapsulation + feature mask + - LP: #1328037 + * i40e: support VF link state ndo + - LP: #1328037 + * i40evf: correctly program RSS HLUT table + - LP: #1328037 + * i40evf: use min_t + - LP: #1328037 + * i40e: Patch to enable Ethtool/netdev feature flag for NTUPLE control + - LP: #1328037 + * i40e: Refactor and cleanup i40e_open(), adding i40e_vsi_open() + - LP: #1328037 + * i40e/i40evf: enable hardware feature head write back + - LP: #1328037 + * i40e/i40evf: reduce context descriptors + - LP: #1328037 + * i40e: potential array underflow in i40e_vc_process_vf_msg() + - LP: #1328037 + * i40e/i40evf: Bump build versions + - LP: #1328037 + * i40e/i40evf: Add EEE LPI stats + - LP: #1328037 + * i40e: Fix a message string + - LP: #1328037 + * i40evf: don't shut down admin queue on error + - LP: #1328037 + * i40evf: clean up init error messages + - LP: #1328037 + * i40e: Delete ATR filter on RST + - LP: #1328037 + * i40evf: fix oops in watchdog handler + - LP: #1328037 + * i40e: Make the alloc and free queue vector calls orthogonal + - LP: #1328037 + * i40e: eeprom integrity check on load and empr + - LP: #1328037 + * i40e: Cleanup in FDIR SB ethtool code + - LP: #1328037 + * i40e: Add functionality for FD SB to drop packets + - LP: #1328037 + * i40evf: remove double space after return + - LP: #1328037 + * i40e: check for netdev before debugfs use + - LP: #1328037 + * i40e/i40evf: Add an FD message level + - LP: #1328037 + * i40e: Use DEBUG_FD message level for an FD message + - LP: #1328037 + * i40e: fix function kernel doc description + - LP: #1328037 + * i40e/i40evf: fix error checking path + - LP: #1328037 + * i40e/i40evf: Remove addressof casts to same type + - LP: #1328037 + * i40e: Remove casts of pointer to same type + - LP: #1328037 + * i40evf: remove open-coded skb_cow_head + - LP: #1328037 + * i40evf: program RSS LUT correctly + - LP: #1328037 + * i40e: remove open-coded skb_cow_head + - LP: #1328037 + * i40e: fix TCP flag replication for hardware offload + - LP: #1328037 + * e1000e/igb/ixgbe/i40e: fix message terminations + - LP: #1328037 + * i40e: fix Timesync Tx interrupt handler code + - LP: #1328037 + * mm: use paravirt friendly ops for NUMA hinting ptes + - LP: #1313450 + + [ Ville Syrjälä ] + + * SAUCE: i915_bdw: drm/i915: Fix scanline counter fixup on BDW + - LP: #1321729 + + -- Kamal Mostafa Mon, 09 Jun 2014 15:09:43 -0700 + +linux (3.13.0-29.53) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * futex-prevent-requeue-pi-on-same-futex.patch futex: Forbid uaddr == + uaddr2 in futex_requeue(..., requeue_pi=1) + - LP: #1326367 + - CVE-2014-3153 + * futex: Validate atomic acquisition in futex_lock_pi_atomic() + - LP: #1326367 + - CVE-2014-3153 + * futex: Always cleanup owner tid in unlock_pi + - LP: #1326367 + - CVE-2014-3153 + * futex: Make lookup_pi_state more robust + - LP: #1326367 + - CVE-2014-3153 + + -- Brad Figg Wed, 04 Jun 2014 08:25:41 -0700 + +linux (3.13.0-29.52) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - re-used previous tracking bug + + [ Adam Conrad ] + + * [Config] Normalize AHCI configs on powerpc/ppc64el with the rest of the + architectures + - LP: #1323980 + + [ Tanmay Inamdar ] + + * SAUCE: Add MSI/MSI-X driver for APM PCI bus + - LP: #1318977 + + -- Luis Henriques Wed, 28 May 2014 09:38:41 +0100 + +linux (3.13.0-28.51) trusty; urgency=low + + [ Luis Henriques ] + + * Release Tracking Bug + - LP: #1322112 + + [ Adam Lee ] + + * SAUCE: (no-up) rtlwifi: rtl8723be: disable MSI interrupts mode + - LP: #1310512, #1320070 + + [ Andy Whitcroft ] + + * [Packaging] ppc64el is a powerpc kernel arch and needs its quirks + - LP: #1318848 + + [ Ben Collins ] + + * [Config] Switch to grub-ieee1275 as recommended on book3e systems + - LP: #1318629 + + [ Upstream Kernel Changes ] + + * Revert "PCI: Enable INTx in pci_reenable_device() only when MSI/MSI-X + not enabled" + - LP: #1320946 + * Bluetooth: allocate static minor for vhci + - LP: #1317336 + * core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle + errors + - LP: #1320946 + * __dentry_path() fixes + - LP: #1320946 + * drm/i915: quirk invert brightness for Acer Aspire 5336 + - LP: #1320946 + * w1: fix w1_send_slave dropping a slave id + - LP: #1320946 + * ARM: 7954/1: mm: remove remaining domain support from ARMv6 + - LP: #1320946 + * matroxfb: restore the registers M_ACCESS and M_PITCH + - LP: #1320946 + * framebuffer: fix cfb_copyarea + - LP: #1320946 + * mach64: use unaligned access + - LP: #1320946 + * mach64: fix cursor when character width is not a multiple of 8 pixels + - LP: #1320946 + * tgafb: fix mode setting with fbset + - LP: #1320946 + * tgafb: fix data copying + - LP: #1320946 + * hvc: ensure hvc_init is only ever called once in hvc_console.c + - LP: #1320946 + * ARM: dts: Keep G3D regulator always on for exynos5250-arndale + - LP: #1320946 + * PCI: mvebu: Fix potential issue in range parsing + - LP: #1320946 + * usb: dwc3: fix wrong bit mask in dwc3_event_devt + - LP: #1320946 + * x86, AVX-512: AVX-512 Feature Detection + - LP: #1320946 + * x86, AVX-512: Enable AVX-512 States Context Switch + - LP: #1320946 + * s390/cio: fix driver callback initialization for ccw consoles + - LP: #1320946 + * ARM: Fix default CPU selection for ARCH_MULTI_V5 + - LP: #1320946 + * omap3isp: preview: Fix the crop margins + - LP: #1320946 + * ACPICA: Restore code that repairs NULL package elements in return + values. + - LP: #1320946 + * media: gspca: sn9c20x: add ID for Genius Look 1320 V2 + - LP: #1320946 + * m88rs2000: add caps FE_CAN_INVERSION_AUTO + - LP: #1320946 + * m88rs2000: prevent frontend crash on continuous transponder scans + - LP: #1320946 + * mmc: sdhci-bcm-kona: fix build errors when built-in + - LP: #1320946 + * usb: musb: avoid NULL pointer dereference + - LP: #1320946 + * uvcvideo: Do not use usb_set_interface on bulk EP + - LP: #1320946 + * drm/i915: Don't clobber CHICKEN_PIPESL_1 on BDW + - LP: #1320946 + * usb: dwc3: fix randconfig build errors + - LP: #1320946 + * usb: gadget: atmel_usba: fix crashed during stopping when DEBUG is + enabled + - LP: #1320946 + * blktrace: fix accounting of partially completed requests + - LP: #1320946 + * rtlwifi: rtl8192cu: Fix too long disable of IRQs + - LP: #1320946 + * rtlwifi: rtl8192se: Fix too long disable of IRQs + - LP: #1320946 + * rtlwifi: rtl8188ee: Fix too long disable of IRQs + - LP: #1320946 + * rtlwifi: rtl8723ae: Fix too long disable of IRQs + - LP: #1320946 + * xhci: Prevent runtime pm from autosuspending during initialization + - LP: #1320946 + * staging:serqt_usb2: Fix sparse warning restricted __le16 degrades to + integer + - LP: #1320946 + * mtd: atmel_nand: Disable subpage NAND write when using Atmel PMECC + - LP: #1320946 + * iwlwifi: dvm: take mutex when sending SYNC BT config command + - LP: #1320946 + * virtio_balloon: don't softlockup on huge balloon changes. + - LP: #1320946 + * arm64: Make DMA coherent and strongly ordered mappings not executable + - LP: #1320946 + * arm64: Do not synchronise I and D caches for special ptes + - LP: #1320946 + * ARM: OMAP2+: INTC: Acknowledge stuck active interrupts + - LP: #1320946 + * ARM: dts: am33xx: correcting dt node unit address for usb + - LP: #1320946 + * mtip32xx: Set queue bounce limit + - LP: #1320946 + * mtip32xx: Unmap the DMA segments before completing the IO request + - LP: #1320946 + * mtip32xx: mtip_async_complete() bug fixes + - LP: #1320946 + * ath9k: fix ready time of the multicast buffer queue + - LP: #1320946 + * KVM: s390: Optimize ucontrol path + - LP: #1320946 + * mei: fix memory leak of pending write cb objects + - LP: #1320946 + * usb: gadget: tcm_usb_gadget: stop format strings + - LP: #1320946 + * usb: phy: Add ulpi IDs for SMSC USB3320 and TI TUSB1210 + - LP: #1320946 + * USB: unbind all interfaces before rebinding any + - LP: #1320946 + * IB/ipath: Fix potential buffer overrun in sending diag packet routine + - LP: #1320946 + * IB/qib: Fix debugfs ordering issue with multiple HCAs + - LP: #1320946 + * IB/qib: add missing braces in do_qib_user_sdma_queue_create() + - LP: #1320946 + * IB/nes: Return an error on ib_copy_from_udata() failure instead of NULL + - LP: #1320946 + * ALSA: hda/realtek - Restore default value for ALC283 + - LP: #1320946 + * mfd: sec-core: Fix possible NULL pointer dereference when i2c_new_dummy + error + - LP: #1320946 + * regulator: arizona-ldo1: Correct default regulator init_data + - LP: #1320946 + * ASoC: cs42l73: Fix mask bits for SOC_VALUE_ENUM_SINGLE + - LP: #1320946 + * ASoC: cs42l52: Fix mask bits for SOC_VALUE_ENUM_SINGLE + - LP: #1320946 + * drm/i915: Do not dereference pointers from ring buffer in evict event + - LP: #1320946 + * mfd: Include all drivers in subsystem menu + - LP: #1320946 + * mfd: max8997: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: max77686: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: max8998: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: max8925: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: 88pm860x: Fix I2C device resource leak on regmap init fail + - LP: #1320946 + * mfd: 88pm860x: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: max77693: Fix possible NULL pointer dereference on i2c_new_dummy + error + - LP: #1320946 + * mfd: 88pm800: Fix I2C device resource leak if probe fails + - LP: #1320946 + * mfd: tps65910: Fix possible invalid pointer dereference on + regmap_add_irq_chip fail + - LP: #1320946 + * MIPS: KVM: Pass reserved instruction exceptions to guest + - LP: #1320946 + * ASoC: cs42l51: Fix SOC_DOUBLE_R_SX_TLV shift values for ADC, PCM, and + Analog kcontrols + - LP: #1320946 + * mac80211: fix potential use-after-free + - LP: #1320946 + * mac80211: fix suspend vs. authentication race + - LP: #1320946 + * mac80211: fix WPA with VLAN on AP side with ps-sta again + - LP: #1320946 + * pid: get pid_t ppid of task in init_pid_ns + - LP: #1320946 + * audit: restore order of tty and ses fields in log output + - LP: #1320946 + * audit: convert PPIDs to the inital PID namespace. + - LP: #1320946 + * mfd: kempld-core: Fix potential hang-up during boot + - LP: #1320946 + * drm/i915: Fix unsafe loop iteration over vma whilst unbinding them + - LP: #1320946 + * powerpc/compat: 32-bit little endian machine name is ppcle, not ppc + - LP: #1320946 + * clk: s2mps11: Fix possible NULL pointer dereference + - LP: #1320946 + * spi: efm32: use $vendor,$device scheme for compatible string + - LP: #1320946 + * ALSA: hda - add headset mic detect quirks for three Dell laptops + - LP: #1297581, #1320946 + * KVM: PPC: Book3S HV: Fix KVM hang with CONFIG_KVM_XICS=n + - LP: #1320946 + * gpio: mxs: Allow for recursive enable_irq_wake() call + - LP: #1320946 + * nfsd4: buffer-length check for SUPPATTR_EXCLCREAT + - LP: #1320946 + * nfsd4: session needs room for following op to error out + - LP: #1320946 + * nfsd4: leave reply buffer space for failed setattr + - LP: #1320946 + * nfsd4: fix test_stateid error reply encoding + - LP: #1320946 + * nfsd: notify_change needs elevated write count + - LP: #1320946 + * dm cache: prevent corruption caused by discard_block_size > + cache_block_size + - LP: #1320946 + * dm transaction manager: fix corruption due to non-atomic transaction + commit + - LP: #1320946 + * dm: take care to copy the space map roots before locking the superblock + - LP: #1320946 + * aio: v4 ensure access to ctx->ring_pages is correctly serialised for + migration + - LP: #1320946 + * NFSD: Traverse unconfirmed client through hash-table + - LP: #1320946 + * lockd: ensure we tear down any live sockets when socket creation fails + during lockd_up + - LP: #1320946 + * drm/i915/tv: fix gen4 composite s-video tv-out + - LP: #1320946 + * dm thin: fix dangling bio in process_deferred_bios error path + - LP: #1320946 + * em28xx: fix PCTV 290e LNA oops + - LP: #1320946 + * NFSv4: Fix a use-after-free problem in open() + - LP: #1320946 + * nfsd4: fix setclientid encode size + - LP: #1320946 + * MIPS: Hibernate: Flush TLB entries in swsusp_arch_resume() + - LP: #1320946 + * ALSA: hda - Enable beep for ASUS 1015E + - LP: #1320946 + * nfsd: check passed socket's net matches NFSd superblock's one + - LP: #1320946 + * s390/bitops,atomic: add missing memory barriers + - LP: #1320946 + * IB/mthca: Return an error on ib_copy_to_udata() failure + - LP: #1320946 + * IB/ehca: Returns an error on ib_copy_to_udata() failure + - LP: #1320946 + * drm/qxl: unset a pointer in sync_obj_unref + - LP: #1320946 + * smarter propagate_mnt() + - LP: #1320946 + * don't bother with {get,put}_write_access() on non-regular files + - LP: #1320946 + * reiserfs: fix race in readdir + - LP: #1320946 + * drm/vmwgfx: correct fb_fix_screeninfo.line_length + - LP: #1320946 + * ALSA: hda - Fix silent speaker output due to mute LED fixup + - LP: #1320946 + * drm/radeon: clear needs_reset flag if IB test fails + - LP: #1320946 + * drm/radeon: call drm_edid_to_eld when we update the edid + - LP: #1320946 + * drm/radeon: fix endian swap on hawaii clear state buffer setup + - LP: #1320946 + * drm/radeon: fix typo in spectre_golden_registers + - LP: #1320946 + * sh: fix format string bug in stack tracer + - LP: #1320946 + * ocfs2: dlm: fix lock migration crash + - LP: #1320946 + * ocfs2: dlm: fix recovery hung + - LP: #1320946 + * ocfs2: do not put bh when buffer_uptodate failed + - LP: #1320946 + * ocfs2: fix panic on kfree(xattr->name) + - LP: #1320946 + * xattr: guard against simultaneous glibc header inclusion + - LP: #1320946 + * drm/i915: move power domain init earlier during system resume + - LP: #1320946 + * Skip intel_crt_init for Dell XPS 8700 + - LP: #1320946 + * dm cache: fix a lock-inversion + - LP: #1320946 + * thinkpad_acpi: Fix inconsistent mute LED after resume + - LP: #1320946 + * iser-target: Add missing se_cmd put for WRITE_PENDING in tx_comp_err + - LP: #1320946 + * iscsi-target: Fix ERL=2 ASYNC_EVENT connection pointer bug + - LP: #1320946 + * Target/sbc: Initialize COMPARE_AND_WRITE write_sg scatterlist + - LP: #1320946 + * mm: page_alloc: spill to remote nodes before waking kswapd + - LP: #1320946 + * mm: try_to_unmap_cluster() should lock_page() before mlocking + - LP: #1320946 + * mm: hugetlb: fix softlockup when a large number of hugepages are freed. + - LP: #1320946 + * hung_task: check the value of "sysctl_hung_task_timeout_sec" + - LP: #1320946 + * DRM: armada: fix corruption while loading cursors + - LP: #1320946 + * ALSA: ice1712: Fix boundary checks in PCM pointer ops + - LP: #1320946 + * lib/percpu_counter.c: fix bad percpu counter state during suspend + - LP: #1320946 + * md/raid1: r1buf_pool_alloc: free allocate pages when subsequent + allocation fails. + - LP: #1320946 + * ALSA: hda - add headset mic detect quirk for a Dell laptop + - LP: #1297581, #1320946 + * b43: Fix machine check error due to improper access of + B43_MMIO_PSM_PHY_HDR + - LP: #1320946 + * x86-64, modify_ldt: Ban 16-bit segments on 64-bit kernels + - LP: #1320946 + * target/tcm_fc: Fix use-after-free of ft_tpg + - LP: #1320946 + * ib_srpt: Use correct ib_sg_dma primitives + - LP: #1320946 + * Linux 3.13.11.1 + - LP: #1320946 + * Linux 3.13.11.2 + - LP: #1320946 + * hp-wireless: new driver for hp wireless button for Windows 8 + - LP: #1303737 + * x86, platform: Make HP_WIRELESS option text more descriptive + - LP: #1303737 + * ACPI: blacklist win8 OSI for Dell Inspiron 7737 + - LP: #1288161 + + -- Luis Henriques Thu, 22 May 2014 12:20:44 +0100 + +linux (3.13.0-27.50) trusty; urgency=low + + [ Brad Figg ] + + * Revert "rtlwifi: Set the link state" + + -- Brad Figg Thu, 15 May 2014 10:21:43 -0700 + +linux (3.13.0-27.49) trusty; urgency=low + + [ Brad Figg ] + + * Revert "SAUCE: (no-up) HID: rmi: do not stop the device at the end of + probe" + * Revert "SAUCE: (no-up) HID: rmi: introduce RMI driver for Synaptics + touchpads" + * Revert "[Config] CONFIG_HID_RMI=m" + + -- Brad Figg Wed, 14 May 2014 10:29:07 -0700 + +linux (3.13.0-26.48) trusty; urgency=low + + [ Benjamin Tissoires ] + + * SAUCE: (no-up) HID: rmi: introduce RMI driver for Synaptics touchpads + - LP: #1305522 + * SAUCE: (no-up) HID: rmi: do not stop the device at the end of probe + - LP: #1305522 + + [ Kamal Mostafa ] + + * Merged back Ubuntu-3.13.0-24.47 security release + * Revert "n_tty: Fix n_tty_write crash when echoing in raw mode" + - LP: #1314762 + * Release Tracking Bug + - LP: #1316835 + + [ Tim Gardner ] + + * [Config] CONFIG_HID_RMI=m + - LP: #1305522 + * [Config] CONFIG_CRYPTO_DEV_NX=n for ppc64el + - LP: #1314625 + * [Config] CONFIG_ZSWAP=y + - LP: #1315203 + * Add rpcsec_gss_krb5 to generic inclusion list + - LP: #769527 + + [ Upstream Kernel Changes ] + + * HID: hidraw: make comment more accurate and nicer + - LP: #1305522 + * HID: remove hid_get_raw_report in struct hid_device + - LP: #1305522 + * HID: i2c-hid: implement ll_driver transport-layer callbacks + - LP: #1305522 + * HID: add inliners for ll_driver transport-layer callbacks + - LP: #1305522 + * HID: Add transport-driver callbacks to the hid_ll_driver struct + - LP: #1305522 + * drm/nouveau: fail runtime pm properly. + - LP: #1313986 + * drm/nouveau: don't suspend/resume display on runtime s/r + - LP: #1313986 + * n_tty: Fix n_tty_write crash when echoing in raw mode + - LP: #1314762 + - CVE-2014-0196 + * floppy: ignore kernel-only members in FDRAWCMD ioctl input + - LP: #1316729 + - CVE-2014-1737 + * floppy: don't write kernel-only members to FDRAWCMD ioctl output + - LP: #1316735 + - CVE-2014-1738 + + -- Kamal Mostafa Tue, 06 May 2014 15:30:29 -0700 + +linux (3.13.0-25.47) trusty; urgency=low + + [ Joseph Salisbury ] + + * Release Tracking Bug + - LP: #1313868 + + [ Adam Lee ] + + * [Config] CONFIG_RTL8723BE=m, CONFIG_RTL8723_COMMON=m + - LP: #1240940 + + [ Alex Hung ] + + * SAUCE: (no-up) dell-led: add mic mute led interface + - LP: #1308297 + + [ Andy Whitcroft ] + + * SAUCE: (no-up) powerpc: Increase COMMAND_LINE_SIZE to 2048 from 512. + - LP: #1306677 + + [ Ben Collins ] + + * [Config] Disable PAMU on Freescale kernels + - LP: #1311738 + + [ Tim Gardner ] + + * Revert "SAUCE: x86, hyperv: bypass the timer_irq_works() check" + - LP: #1311683 + * SAUCE: (no-up) ALSA: usb-audio: Suppress repetitive debug messages from + retire_playback_urb() + - LP: #1305133 + * SAUCE: (no-up) 'BUG:' message unnecessarily triggers kerneloops + - LP: #1305480 + * [Config] CONFIG_POWERNV_CPUFREQ=m + - LP: #1309576 + * [Config] CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y for ppc64el + - LP: #1309576 + * [Config] CONFIG_TRANSPARENT_HUGEPAGE=n for arm64 + - LP: #1309221 + * [Config] CONFIG_MEMCG_KMEM=y + - LP: #1309586 + * [Config] CONFIG_CRASH_DUMP=y for ppc64el + - LP: #1312783 + + [ Upstream Kernel Changes ] + + * Revert "rtlwifi: rtl8188ee: enable MSI interrupts mode" + - LP: #1310512 + * mac80211: add length check in ieee80211_is_robust_mgmt_frame() + - LP: #1240940 + * rtlwifi: rtl8723ae: rtl8723-common: Create new driver for common code + - LP: #1240940 + * rtlwifi: rtl8723ae: rtl8723-common: Copy common firmware code + - LP: #1240940 + * rtlwifi: rtl8723ae: rtl8723-common: Copy common dynamic power + management code + - LP: #1240940 + * rtlwifi: rtl8723be: Add new driver + - LP: #1240940 + * selinux: correctly label /proc inodes in use before the policy is + loaded + - LP: #1309007 + * net: sctp: fix skb leakage in COOKIE ECHO path of chunk->auth_chunk + - LP: #1309007 + * bridge: multicast: add sanity check for query source addresses + - LP: #1309007 + * tipc: allow connection shutdown callback to be invoked in advance + - LP: #1309007 + * tipc: fix connection refcount leak + - LP: #1309007 + * tipc: drop subscriber connection id invalidation + - LP: #1309007 + * tipc: fix memory leak during module removal + - LP: #1309007 + * tipc: don't log disabled tasklet handler errors + - LP: #1309007 + * inet: frag: make sure forced eviction removes all frags + - LP: #1309007 + * net: unix: non blocking recvmsg() should not return -EINTR + - LP: #1309007 + * ipv6: Fix exthdrs offload registration. + - LP: #1309007 + * bnx2: Fix shutdown sequence + - LP: #1309007 + * pkt_sched: fq: do not hold qdisc lock while allocating memory + - LP: #1309007 + * Xen-netback: Fix issue caused by using gso_type wrongly + - LP: #1309007 + * vlan: Set correct source MAC address with TX VLAN offload enabled + - LP: #1309007 + * tcp: tcp_release_cb() should release socket ownership + - LP: #1309007 + * bridge: multicast: add sanity check for general query destination + - LP: #1309007 + * bridge: multicast: enable snooping on general queries only + - LP: #1309007 + * net: socket: error on a negative msg_namelen + - LP: #1309007 + * bonding: set correct vlan id for alb xmit path + - LP: #1309007 + * eth: fec: Fix lost promiscuous mode after reconnecting cable + - LP: #1309007 + * ipv6: Avoid unnecessary temporary addresses being generated + - LP: #1309007 + * ipv6: ip6_append_data_mtu do not handle the mtu of the second fragment + properly + - LP: #1309007 + * net: cdc_ncm: fix control message ordering + - LP: #1309007 + * vxlan: fix potential NULL dereference in arp_reduce() + - LP: #1309007 + * vxlan: fix nonfunctional neigh_reduce() + - LP: #1309007 + * tcp: syncookies: do not use getnstimeofday() + - LP: #1309007 + * rtnetlink: fix fdb notification flags + - LP: #1309007 + * ipmr: fix mfc notification flags + - LP: #1309007 + * ip6mr: fix mfc notification flags + - LP: #1309007 + * net: micrel : ks8851-ml: add vdd-supply support + - LP: #1309007 + * netpoll: fix the skb check in pkt_is_ns + - LP: #1309007 + * tipc: fix spinlock recursion bug for failed subscriptions + - LP: #1309007 + * ip_tunnel: Fix dst ref-count. + - LP: #1309007 + * tg3: Do not include vlan acceleration features in vlan_features + - LP: #1309007 + * virtio-net: correct error handling of virtqueue_kick() + - LP: #1309007 + * usbnet: include wait queue head in device structure + - LP: #1309007 + * vlan: Set hard_header_len according to available acceleration + - LP: #1309007 + * vhost: fix total length when packets are too short + - LP: #1309007 + - CVE-2014-0077 + * tcp: fix get_timewait4_sock() delay computation on 64bit + - LP: #1309007 + * xen-netback: remove pointless clause from if statement + - LP: #1309007 + * ipv6: some ipv6 statistic counters failed to disable bh + - LP: #1309007 + * netlink: don't compare the nul-termination in nla_strcmp + - LP: #1309007 + * xen-netback: disable rogue vif in kthread context + - LP: #1309007 + * Call efx_set_channels() before efx->type->dimension_resources() + - LP: #1309007 + * net: vxlan: fix crash when interface is created with no group + - LP: #1309007 + * isdnloop: Validate NUL-terminated strings from user. + - LP: #1309007 + * isdnloop: several buffer overflows + - LP: #1309007 + * powernow-k6: disable cache when changing frequency + - LP: #1309007 + * powernow-k6: correctly initialize default parameters + - LP: #1309007 + * powernow-k6: reorder frequencies + - LP: #1309007 + * ARC: [nsimosci] Change .dts to use generic 8250 UART + - LP: #1309007 + * ARC: [nsimosci] Unbork console + - LP: #1309007 + * futex: Allow architectures to skip futex_atomic_cmpxchg_inatomic() test + - LP: #1309007 + * m68k: Skip futex_atomic_cmpxchg_inatomic() test + - LP: #1309007 + * crypto: ghash-clmulni-intel - use C implementation for setkey() + - LP: #1309007 + * Linux 3.13.10 + - LP: #1309007 + * cpufreq: powernv: cpufreq driver for powernv platform + - LP: #1309576 + * cpufreq: powernv: Use cpufreq_frequency_table.driver_data to store + pstate ids + - LP: #1309576 + * cpufreq: powernv: Select CPUFreq related Kconfig options for powernv + - LP: #1309576 + * support Thinkpad X1 Carbon 2nd generation's adaptive keyboard + - LP: #1309609 + * save and restore adaptive keyboard mode for suspend and,resume + - LP: #1309609 + * user namespace: fix incorrect memory barriers + - LP: #1311683 + * Char: ipmi_bt_sm, fix infinite loop + - LP: #1311683 + * x86, hyperv: Bypass the timer_irq_works() check + - LP: #1311683 + * x86: Adjust irq remapping quirk for older revisions of 5500/5520 + chipsets + - LP: #1311683 + * PCI: designware: Fix RC BAR to be single 64-bit non-prefetchable memory + BAR + - LP: #1311683 + * PCI: designware: Fix iATU programming for cfg1, io and mem viewport + - LP: #1311683 + * ACPI / button: Add ACPI Button event via netlink routine + - LP: #1311683 + * PCI: Enable INTx in pci_reenable_device() only when MSI/MSI-X not + enabled + - LP: #1311683 + * staging: comedi: 8255_pci: initialize MITE data window + - LP: #1311683 + * tty: Set correct tty name in 'active' sysfs attribute + - LP: #1311683 + * tty: Fix low_latency BUG + - LP: #1311683 + * SCSI: sd: don't fail if the device doesn't recognize SYNCHRONIZE CACHE + - LP: #1311683 + * pid_namespace: pidns_get() should check task_active_pid_ns() != NULL + - LP: #1311683 + * Bluetooth: Fix removing Long Term Key + - LP: #1311683 + * ima: restore the original behavior for sending data with ima template + - LP: #1311683 + * backing_dev: fix hung task on sync + - LP: #1311683 + * bdi: avoid oops on device removal + - LP: #1311683 + * xfs: fix directory hash ordering bug + - LP: #1311683 + * Btrfs: skip submitting barrier for missing device + - LP: #1311683 + * Btrfs: fix deadlock with nested trans handles + - LP: #1311683 + * ext4: fix error return from ext4_ext_handle_uninitialized_extents() + - LP: #1311683 + * ext4: fix partial cluster handling for bigalloc file systems + - LP: #1311683 + * ext4: fix premature freeing of partial clusters split across leaf + blocks + - LP: #1311683 + * jffs2: Fix segmentation fault found in stress test + - LP: #1311683 + * jffs2: Fix crash due to truncation of csize + - LP: #1311683 + * jffs2: avoid soft-lockup in jffs2_reserve_space_gc() + - LP: #1311683 + * jffs2: remove from wait queue after schedule() + - LP: #1311683 + * sparc32: fix build failure for arch_jump_label_transform + - LP: #1311683 + * sparc64: don't treat 64-bit syscall return codes as 32-bit + - LP: #1311683 + * sparc64: Make sure %pil interrupts are enabled during hypervisor yield. + - LP: #1311683 + * wait: fix reparent_leader() vs EXIT_DEAD->EXIT_ZOMBIE race + - LP: #1311683 + * exit: call disassociate_ctty() before exit_task_namespaces() + - LP: #1311683 + * Linux 3.13.11 + - LP: #1311683 + * powerpc/le: Enable RTAS events support + - LP: #1312230 + * net: ipv4: current group_info should be put after using. + - CVE-2014-2851 + * powerpc/relocate fix relocate processing in LE mode + - LP: #1312783 + + -- Joseph Salisbury Mon, 28 Apr 2014 15:02:45 -0400 + +linux (3.13.0-24.47) trusty; urgency=low + + [ Peter Hurley ] + + * n_tty: Fix n_tty_write crash when echoing in raw mode + + -- Brad Figg Wed, 30 Apr 2014 15:08:31 -0700 + +linux (3.13.0-24.46) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] d-i -- add nvme devices to block-modules udeb + - LP: #1303710 + + [ Paolo Pisati ] + + * [Config] build vexpress a9 dtb + - LP: #1303657 + * [Config] disable HVC_DCC + - LP: #1303657 + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1305158 + * rebase to v3.13.9 + * CONFIG_RTLBTCOEXIST=m + - LP: #1296591 + + [ Upstream Kernel Changes ] + + * HID: Bluetooth: hidp: make sure input buffers are big enough + - LP: #1252874 + * ACPI / video: Add systems that should favour native backlight interface + - LP: #1303419 + * rds: prevent dereference of a NULL device in rds_iw_laddr_check + - LP: #1302222 + - CVE-2014-2678 + * x86/efi: Fix 32-bit fallout + - LP: #1301590 + * drm/nouveau/devinit: tidy up the subdev class definition + - LP: #1158689 + * drm/nouveau/device: provide a way for devinit to mark engines as + disabled + - LP: #1158689 + * drm/nv50-/devinit: prevent use of engines marked as disabled by + hw/vbios + - LP: #1158689 + * rtlwifi: btcoexist: Add new mini driver + - LP: #1296591 + * rtlwifi: Prepare existing drivers for new driver + - LP: #1296591 + * rtlwifi: add MSI interrupts mode support + - LP: #1296591 + * rtlwifi: rtl8188ee: enable MSI interrupts mode + - LP: #1296591 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.9 + + -- Tim Gardner Fri, 04 Apr 2014 09:26:27 -0400 + +linux (3.13.0-23.45) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1302083 + + [ Emil Goode ] + + * SAUCE: (no-up) brcmsmac: fix deadlock on missing firmware + - LP: #1300416 + + [ Moni Shoua ] + + * SAUCE: (no-up) IB/core: Don't resolve passive side RoCE L2 address in cma req handler + Merged at the request of Mellanox engineers. It shold be making its way upstream. + http://www.spinics.net/lists/linux-rdma/msg19417.html + + [ Or Gerlitz ] + + * [Config] CONFIG_INFINIBAND_USNIC=m + + [ Tim Gardner ] + + * [Config] CONFIG_MLX4_EN_VXLAN=y + + [ Upstream Kernel Changes ] + + * This set of patches essentially updates the infiniband subsystem to 3.14 plus some + bug fixes from 3.15. The initial set of commits were determined using: + + git log --pretty=oneline --reverse v3.13..cd6362befe4cc7bf589a5236d2a780af2d47bcc9 -- drivers/infiniband drivers/net/ethernet/mellanox include/linux/mlx* include/rdma + + A few tree-wide patches were dropped since they were not essential. + This is the list of commits that were actually applied. As you can see there were + a few back ports, but all were simple context collisions and easily resolved. + + (cherry picked from commit 7c6d74d23a33a946bcf08ba2d3e52d31943b7342) + (cherry picked from commit 7b25d81b7ff03bb1893a9c7f97797b891a772deb) + (cherry picked from commit 73e74ab4e0d030f28d640507998156a22d4211f8) + (cherry picked from commit eb17711bc1d6611e934af5b6dabc225936084128) + (cherry picked from commit 8e1a28e8e6797449dfdfa4739002d1e5939355a8) + (cherry picked from commit 84c864038d6d991be81344fc3168ec2c2f7a8d06) + (cherry picked from commit d03a68f8217ea03492e4f7928db222dc6544792a) + (cherry picked from commit be902ab122fcc59ba6a8588e820c31861d75a5a4) + (cherry picked from commit e4b59a1cb6f8feb03f356b0abfd20451f05d7470) + (cherry picked from commit 0276a330617a0cf380f09e5065299078d3d45886) + (cherry picked from commit 982290a7fe36e528af292d3e3b61939b1900bfc6) + (cherry picked from commit c5266d40b0a26546d0ebedb44dd4145088b85cb8) + (cherry picked from commit 7ffdf726cfe0d188907bdbb0e7729fb35a69c219) + (cherry picked from commit 837052d0ccc5a789a578f8b628ba154b63bd51ea) + (cherry picked from commit c0623e587d869b4b18e077d64a8524ea364b5b77) + (cherry picked from commit ad7d4eaed995d76fb24a18e202fdf5072197ff0a) + (cherry picked from commit 2156d9a8ac0202f0158d407063cb850afffd3f56) + (cherry picked from commit b912b2f8fc71df4c3ffa7a9fe2c2227e8bcdaa07) + (cherry picked from commit 9ba75fb0c4b92416b94640b5a043c323a457f14a) + (cherry picked from commit 74b9c3ea847f060c784e86453f1ad77dd05a7a8f) + (cherry picked from commit e6a767582942d6fd9da0ddea673f5a7017a365c7) + (cherry picked from commit fe5e8a1acc7fd877b6706053cf88c418c33fe7a3) + (cherry picked from commit be8348df6efac6b602f2ad3210139bccf0dbe3d7) + (cherry picked from commit e3cf00d0a87f025db5855a43a67c67a41fa79fef) + (cherry picked from commit 8192d4acb5c5376c0f6756f2106ab243036c8c7d) + (cherry picked from commit 301a0dd68e5ddd22d992a58f466b621987d9df3b) + (cherry picked from commit 2183b990b67b761f81c68a18f60df028e080cf05) + (cherry picked from commit 8af94ac66a4d53a96278ecbb9ef2e8592f0d9ba3) + (cherry picked from commit 256d6a6ac52ee02e897cec88ecc96c3ae7f9cb88) + (cherry picked from commit 60b215e8b267f911751a043de63181dab1b69706) + (cherry picked from commit b85caf479b577f000067002259539ad4341d4530) + (cherry picked from commit 3f92bed3d6c073f41efc0777ecd3442aa1f03d20) + (cherry picked from commit 6214105460842759020bdd7f4dbb50afa1be9d17) + (cherry picked from commit c7845bcafe4d2ecd5c479fa64d1b425c21dde17c) + (cherry picked from commit e45e614e4015a489d2f8013eaed45d498d884e86) + (cherry picked from commit 248567f79304b953ea492fb92ade097b62ed09b2) + (cherry picked from commit 6a54d9f9a04ed35e6615a47974c1ef02ff3a62cb) + (cherry picked from commit c5f855e08a97edc107c4a3b73809ed629c1dcac1) + (cherry picked from commit 9f637f7936025aef57f247b11036bad18bb87c06) + (cherry picked from commit c30392ab5bb536fef268c22804dafded15170d14) + (cherry picked from commit 3108bccb3d9afbd32931d775f5dd5ee157eaa5a9) + (cherry picked from commit d9fe40916387bab884e458c99399c149b033506c) + (cherry picked from commit 24e42754f676d34e5c26d6b7b30f36df8004ec08) + (cherry picked from commit c1be5232d21de68f46637e617225b9b7c586451a) + (cherry picked from commit 90f1d1b41b70474bf73d07d4300196901cd81718) + (cherry picked from commit 240ae00e4d834e387f4f09e236130f520e357a70) + (back ported from commit 4de6580360867d44adecb2d05febed1c8d186c82) + (cherry picked from commit 0a9b7d59d5a8e2b97406a29a8a807bbc5ce7092e) + (cherry picked from commit a37a1a428431d3e7e9f53530b5c56ff7867bd487) + (cherry picked from commit c1c98501121eefa0888a42566ec7233a1626f678) + (back ported from commit dd5f03beb4f76ae65d76d8c22a8815e424fc607c) + (cherry picked from commit c9218a9e677856d6647ea82d821f22ccbffc988c) + (cherry picked from commit f088cbb8d8547a89af258a3223657f9a69b811e4) + (cherry picked from commit 4942c0b4b64478ff45c3bbf4d40aebd66de0bcc5) + (cherry picked from commit 2d97436f5b06217beb6c91a0cd0ae0f0d79b61cc) + (cherry picked from commit 5db5765e255de4072eb0e35facfeafce53af001b) + (cherry picked from commit 61f78268936e781a104b4ac06b7e47d760800c40) + (cherry picked from commit 6dcebe614c667fca73aaf0cfbd1e70bc9179538e) + (cherry picked from commit d1db47c5eed89a1c8b60e780aeadd870bb0d3894) + (cherry picked from commit af2e2e35a23e4aeecfe4332a7140c81e0f09b7e3) + (cherry picked from commit 7b85627b9f02f9b0fb2ef5f021807f4251135857) + (back ported from commit d487ee77740ccf79d7dc1935d4daa77887283028) + (cherry picked from commit 297e0dad720664dad44baa2cdd13f871979fb58c) + (cherry picked from commit eb6ab13267be87dcad3e611500b7cb404ed4479c) + (cherry picked from commit a3a5a82627492c8947d8866ddf421e9248088466) + (cherry picked from commit dc87a90f5d61d7f01cfb63d92d5eaa27718e5b19) + (cherry picked from commit 9bd626e79df67b2ba3b0c91a4640ab7bca1af04d) + (cherry picked from commit 40aca6ffcac57dd9c65877a59a8bbb210c4691ca) + (cherry picked from commit 37721d8501a9a1fbe87527c24d127a914b29fd7f) + (cherry picked from commit ed4c54e5b4baf55a7a67a80fa766334855c94854) + (cherry picked from commit 2f85d24e604c1532723c4b5791816b533baed2c3) + (cherry picked from commit 990acea616e99355703b503c1e50fb9c7ddff6b9) + (cherry picked from commit f282651de676d10e395bc7923f0087fbbba12ed7) + (cherry picked from commit 31ab8acbf6618c89fec77f7706df7daaa319feb5) + (cherry picked from commit 9392fa06411cf93885c4cafc8058085d98f52fec) + (cherry picked from commit 27cdef637c25705b433d5c4deeef4cf8dcb75d6a) + (cherry picked from commit 8ce96afa8239f13bdf5ab35839bac46c103bbedc) + (cherry picked from commit 6cd28f044b47aeeba91807d97d6f3ea5a048e88d) + (cherry picked from commit 5462eddd7a78131ccb514d52473625d99769215e) + (cherry picked from commit 05633102d85b50f35325dfbedafcedd6c5b3264c) + (cherry picked from commit 437708c44395a11e474fb33b4fd7f29483118e51) + (cherry picked from commit d9d5713ca628dc211d8b4a1da5fb9e0cfe592b92) + (cherry picked from commit a384b20e417ae0f5f1f359600b4bdcc34265b256) + (cherry picked from commit 298589b1cb626adf4beba6dd8e3cd4b64e8799be) + (cherry picked from commit e08a8761d89b7625144c3fbf0ff9643159135c96) + (cherry picked from commit 0b6e81b91070bdbe0defb9101384ebb26835e401) + (cherry picked from commit 042b9adae899e1b497282d92205d3fef42d5ca8d) + (cherry picked from commit ada388f7afad1e2e87acbfe30600fdaff9bd6327) + (cherry picked from commit 3bdb31f688276505ede23280885948e934304674) + (cherry picked from commit bde51583f49bd87e452e9504d489926638046b11) + (cherry picked from commit db81a5c374b5bd650c5e6ae85d026709751db103) + (cherry picked from commit 05bdb2ab6b09f2306f0afe0f60f4b9abffa7aba4) + (cherry picked from commit 9e9c47d07d447e09a66ee528c3ebad9ba359af6a) + (cherry picked from commit 1bde6e301cf6217da9238086c958f532b16e504d) + (cherry picked from commit 8c8a49148b95c4d7c5f58a6866a30ea02485d7a3) + (cherry picked from commit 57761d8df8efc7cc1227f9bc22e0dda01b0dd91b) + (cherry picked from commit 676687c69697d2081d25afd14ee90937d1fb0c8e) + (cherry picked from commit 9e65dc371b5c8d7476c81353137efc13cc1bdabd) + (cherry picked from commit 78c0f98cc9dd46824fa66f35f14ea24ba733d145) + (cherry picked from commit 1a4c3a3dc5fdeef2a7bdf4ac7d81df58c3c0a51e) + (cherry picked from commit d07875bd0d1517185534b5f9eef469426ba42cb9) + (cherry picked from commit ab576627c8f97c08297d81537be17df161171923) + (cherry picked from commit a80e21b3b2d151d79bb9be42334ab10d40195324) + (cherry picked from commit 9d8abf45944e4f1c18a04070fc3ed2f3ffcbbcb6) + (cherry picked from commit 4196670be786d529ab7f6c18f5077141ce1b787e) + (cherry picked from commit acc4fccf4eff5b29e545995b75de77e60ea44aae) + (cherry picked from commit 4ce5a5744a2f5479e58c6788cbe3987b8071b62e) + (cherry picked from commit ddf8bd349115c2bc85a62e3d94018c9976ac72f7) + (cherry picked from commit 5071456fe244e409adcda7226e5f0b5d1b879fd3) + (cherry picked from commit ad4885d279b63c65347220236d07669a2f59634b) + (cherry picked from commit b4a26a27287a7f81933ba016aeed6c69dd155323) + (cherry picked from commit 0f0132001fd239bb67c1f68436b95cc79de89736) + (cherry picked from commit 6ecde51dd7894ffe2f959cca1fea3ea2b9ee2394) + (cherry picked from commit 0861565f501ce3fcea9394d4b98c02b1f6de6b9e) + (cherry picked from commit f809309a251a13bd97cc189c3fa428782aab9716) + (cherry picked from commit 7d9eacf9457efc6b614665e1095336c11ad83f0d) + (cherry picked from commit fd8b48b22a2b7cdf21f15b01cae379e6159a7eea) + (cherry picked from commit a61d93d92f5c9533898098abb5f187840900aeb5) + (cherry picked from commit 09de3f1313a30d8a22e488c9a5b96a9560cae96d) + (cherry picked from commit 99932d4fc03a13bb3e94938fe25458fabc8f2fc3) + (cherry picked from commit 169a1d85d084edeb0736ad80fe439639ac938dcd) + (cherry picked from commit 367d56f7b4d5ce61e883c64f81786c7a3ae88eea) + (cherry picked from commit 57352ef4f5f19969a50d42e84b274287993b576f) + (cherry picked from commit 97989356af0ec8b1b1658d804892abb354127330) + (cherry picked from commit 56cb456746a15c1025a178466492ca4c373b1a63) + (cherry picked from commit 2a2083f7f3568c0192daa6ac0e6fa35d953f47bd) + (cherry picked from commit 7855bff42ea9938a0853321256f4c8ce3628aa73) + (cherry picked from commit de123268300fd33b7f7668fda3264059daffa6ef) + (cherry picked from commit 299603e8370a93dd5d8e8d800f0dff1ce2c53d36) + (cherry picked from commit bf5a755f5e9186406bbf50f4087100af5bd68e40) + (cherry picked from commit 600adc18eba823f9fd8ed5fec8b04f11dddf3884) + (back ported from commit b582ef0990d457f7ce8ccf827af51a575ca0b4a6) + (cherry picked from commit e27a2f839598e48bb345f9fc86d4d54c3944db50) + (back ported from commit dc01e7d3447793fd9e4090aa9d50c549848b5a18) + (cherry picked from commit b5aaab12b2b4bc4acab7384c17a87f3406e5047d) + (cherry picked from commit 920a0fde5a32fd40b4d3c94ad72f7fc7039db8e3) + (cherry picked from commit 438e38fadca2f6e57eeecc08326c8a95758594d4) + (cherry picked from commit 97a5221f56bad2e1c7e8ab55da4ac4748ef59c64) + (cherry picked from commit 02512482321c531df4abf73943529f8b44d869e2) + (cherry picked from commit bb2146bc883e86b835e30644757a6d4a649a7ce8) + (back ported from commit ca9f9f703950e5cb300526549b4f1b0a6605a5c5) + (cherry picked from commit fd8daa45f2bd9b876e0dbb9503ccc5a5252844f2) + (cherry picked from commit b97b33a3df0439401f80f041eda507d4fffa0dbf) + (cherry picked from commit 93591aaa62f89820f4ae0558f01eaf9a359738da) + (cherry picked from commit 15bffdffccb3204eb1e993f60eee65c439a03136) + (cherry picked from commit 9813337a4b16ea5b1701b1d00f7e410f5decdfa5) + (cherry picked from commit 313c2d375b1c9b648d9d4b96ec1b8185ac6a78c5) + (cherry picked from commit 28d222bbaa5122fb4bb0e607e39ab149a010e587) + (cherry picked from commit ec5709403e6893acb4f7ca40514ebd29c3116836) + (cherry picked from commit 9717218bb2982f5f214d84473c70542f1e42bfd7) + (cherry picked from commit d0ceebd7508d5bf6e81367640959aef7e0de4947) + (cherry picked from commit c120e9e03090b4f9578ca38ef4250ff3805b6e3f) + (cherry picked from commit 6ee51a4e866bbb0921180b457ed16cd172859346) + (cherry picked from commit 9cd593529c8652785bc9962acc79b6b176741f99) + (cherry picked from commit b6ffaeffaea4d92f05f5ba1ef54df407cb7c8517) + (cherry picked from commit 2f5bb473681b88819a9de28ac3a47e7737815a92) + (cherry picked from commit 5ea8bbfc49291b7e23161fe4de0bf3e4a4e34b18) + (cherry picked from commit ceb5433b3a54979216d794e45147d25c24c94999) + (cherry picked from commit aa9a2d51a3e70b15a898bec7dde3ce5726fec641) + (cherry picked from commit e81f44b66b456a7dcfbdeffeb355458cd6a58973) + (cherry picked from commit 7a2cea2aaae2d5eb5c00c49c52180c7c2c66130a) + (cherry picked from commit 05eb23893c2cf9502a9cec0c32e7f1d1ed2895c8) + (cherry picked from commit 38be0a347c91133843474e12baacd252d0fd1c30) + (cherry picked from commit 82373701be26b893eaf7372db0af84235a51998a) + (cherry picked from commit 1ab95d37bcc3ff2d69e3871e4f056bab7aed0b85) + (cherry picked from commit f74462acf8f390528c8b7937f227c6c90d017f3b) + (cherry picked from commit 449fc48866f7d84b0d9a19201de18a4dd4d3488c) + (cherry picked from commit dd41cc3bb90efd455df514899a5d3cf245182eb1) + (cherry picked from commit e471b40321a94f07d13b8a9e4b064885cf08835d) + (cherry picked from commit bfd2793c9559ae73ae021797f1d4b097c27f24be) + (cherry picked from commit b74757944d69f8cd7de5284fc7e8649d965361ab) + (cherry picked from commit d18f141a1a7cfa5ffad8433e43062b05a8d1b82a) + (cherry picked from commit 1b136de120dda625109f2afe1e3d04e256be9ec1) + (cherry picked from commit a66132f3eb514f42c49a3e8f57aab2ccd0360f06) + + * mlx4_core: Roll back round robin bitmap allocation commit for CQs, SRQs, and MPTs + * net/mlx4_core: Remove zeroed out of explicit QUERY_FUNC_CAP fields + * net/mlx4_core: Rename QUERY_FUNC_CAP fields + * net/mlx4_core: Introduce nic_info new flag in QUERY_FUNC_CAP + * net/mlx4_core: Expose physical port id as PF/VF capability + * net/mlx4_en: Implement ndo_get_phys_port_id + * net/mlx4_en: Configure the XPS queue mapping on driver load + * net/mlx4_core: Set CQE/EQE size to 64B by default + * net/mlx4_en: Ignore irrelevant hypervisor events + * net/mlx4_en: Add NAPI support for transmit side + * net/mlx4_core: Check port number for validity before accessing data + * infiniband: slight optimization of addr compare + * net/mlx4_core: Add basic support for TCP/IP offloads under tunneling + * net/mlx4_en: Add netdev support for TCP/IP offloads of vxlan tunneling + * net: mlx4: slight optimization of addr compare + * mlx4_en: Add PTP hardware clock + * mlx4_en: Only cycle port if HW timestamp config changes + * net/mlx4_core: Warn if device doesn't have enough PCI bandwidth + * net/mlx4_en: fix error return code in mlx4_en_get_qp() + * mlx4_en: Select PTP_1588_CLOCK + * net/mlx4_en: call gro handler for encapsulated frames + * RDMA/ocrdma: Fix AV_VALID bit position + * RDMA/ocrdma: Fix OCRDMA_GEN2_FAMILY macro definition + * IB/usnic: Add Cisco VIC low-level hardware driver + * IB/usnic: Change WARN_ON to lockdep_assert_held + * IB/usnic: Add struct usnic_transport_spec + * IB/usnic: Push all forwarding state to usnic_fwd.[hc] + * IB/usnic: Port over main.c and verbs.c to the usnic_fwd.h + * IB/usnic: Port over usnic_ib_qp_grp.[hc] to new usnic_fwd.h + * IB/usnic: Port over sysfs to new usnic_fwd.h + * IB/usnic: Update ABI and Version file for UDP support + * IB/usnic: Add UDP support to usnic_fwd.[hc] + * IB:usnic: Add UDP support to usnic_transport.[hc] + * IB/usnic: Add UDP support in u*verbs.c, u*main.c and u*util.h + * IB/usnic: Add UDP support in usnic_ib_qp_grp.[hc] + * IB/core: Add RDMA_TRANSPORT_USNIC_UDP + * IB/usnic: Remove superflous parentheses + * IB/usnic: Use for_each_sg instead of a for-loop + * IB/usnic: Expose flows via debugfs + * IB/usnic: Fix typo "Ignorning" -> "Ignoring" + * IB/usnic: Append documentation to usnic_transport.h and cleanup + * IB/mlx5: Remove unused code in mr.c + * mlx5_core: Remove dead code + * IB/mlx5: Fix micro UAR allocator + * IB/core: Add flow steering support for IPoIB UD traffic + * IB/core: Add support for IB L2 device-managed steering + * mlx4_core: Add support for steerable IB UD QPs + * IB/mlx4: Enable device-managed steering support for IB ports too + * IB/mlx4: Add mechanism to support flow steering over IB links + * IB/mlx4: Add support for steerable IB UD QPs + * IB/core: Ethernet L2 attributes in verbs/cm structures + * net/mlx4_core: clean up cq_res_start_move_to() + * net/mlx4_core: clean up srq_res_start_move_to() + * IB/usnic: Fix endianness-related warnings + * IB/usnic: Add dependency on CONFIG_INET + * IB/core: Add support for RDMA_NODE_USNIC_UDP + * IB/usnic: Advertise usNIC devices as RDMA_NODE_USNIC_UDP + * IB/usnic: Set userspace/kernel ABI ver to 4 + * IB/usnic: Remove unused variable in usnic_debugfs_exit() + * IB/mlx4: Fix error return code + * IB/cma: IBoE (RoCE) IP-based GID addressing + * IB/mlx4: Use IBoE (RoCE) IP based GIDs in the port GID table + * IB/mlx4: Handle Ethernet L2 parameters for IP based GID addressing + * IB/isert: seperate connection protection domains and dma MRs + * IB/isert: Avoid frwr notation, user fastreg + * IB/isert: Move fastreg descriptor creation to a function + * IB/isert: pass scatterlist instead of cmd to fast_reg_mr routine + * RDMA/ocrdma: Handle Ethernet L2 parameters for IP based GID addressing + * RDMA/ocrdma: Populate GID table with IP based gids + * IB/core: Resolve Ethernet L2 addresses when modifying QP + * IB/core: Make ib_addr a core IB module + * IB/cm: Fix missing unlock on error in cm_init_qp_rtr_attr() + * IB/mlx4: Add dependency INET + * RDMA/ocrdma: Move ocrdma_inetaddr_event outside of "#if CONFIG_IPV6" + * RDMA/ocrdma: Add dependency on INET + * IB/mlx4: Use IS_ENABLED(CONFIG_IPV6) + * IB/usnic: Use GFP_ATOMIC under spinlock + * net/mlx4_core: Remove unnecessary validation for port number + * RDMA/cma: Handle global/non-linklocal IPv6 addresses in cma_check_linklocal() + * IB/core: Fix unused variable warning + * IPoIB: Report operstate consistently when brought up without a link + * RDMA/amso1100: Add check if cache memory was allocated before freeing it + * IB/usnic: Remove unused includes of + * RDMA/cxgb4: Fix gcc warning on 32-bit arch + * mlx5_core: Fix out arg size in access_register command + * IB/mlx5: Clear out struct before create QP command + * mlx5_core: Use mlx5 core style warning + * IB/mlx5: Make sure doorbell record is visible before doorbell + * IB/mlx5: Implement modify CQ + * IB/mlx5: Add support for resize CQ + * mlx5_core: Improve debugfs readability + * mlx5_core: Fix PowerPC support + * IB/mlx5: Allow creation of QPs with zero-length work queues + * IB/mlx5: Abort driver cleanup if teardown hca fails + * IB/mlx5: Remove old field for create mkey mailbox + * IB/mlx5: Verify reserved fields are cleared + * iscsi-target: Convert gfp_t parameter to task state bitmask + * IB/mlx5: Fix RC transport send queue overhead computation + * IB/mlx5: Fix binary compatibility with libmlx5 + * IB/mlx5: Don't set "block multicast loopback" capability + * RDMA/nes: Fix error return code + * RDMA/amso1100: Fix error return code + * iser-target: Fix leak on failure in isert_conn_create_fastreg_pool + * IB/srpt: replace strict_strtoul() with kstrtoul() + * IB/mlx4: Don't allocate range of steerable UD QPs for Ethernet-only device + * IB/mlx4: Make sure GID index 0 is always occupied + * IB/mlx4: Move rtnl locking to the right place + * IB/mlx4: Do IBoE locking earlier when initializing the GID table + * IB/mlx4: Do IBoE GID table resets per-port + * IB/mlx4: Build the port IBoE GID table properly under bonding + * IB: Report using RoCE IP based gids in port caps + * RDMA/cxgb4: Add missing neigh_release in LE-Workaround path + * mlx5: Add include of because of kzalloc()/kfree() use + * IB/mlx5: Remove dependency on X86 + * IB/usnic: Fix smatch endianness error + * IB/iser: Avoid dereferencing iscsi_iser conn object when not bound to iser connection + * IB/iser: Fix use after free in iser_snd_completion() + * RDMA/ocrdma: Fix traffic class shift + * RDMA/ocrdma: Fix load time panic during GID table init + * netdevice: add queue selection fallback handler for ndo_select_queue + * net,IB/mlx: Bump all Mellanox driver versions + * net/mlx4: Support shutdown() interface + * net/mlx4_core: Fix memory access error in mlx4_QUERY_DEV_CAP_wrapper() + * net/mlx4_core: mlx4_init_slave() shouldn't access comm channel before PF is ready + * net/mlx4_core: Fix wrong dump of the vxlan offloads device capability + * net/mlx4_en: Handle vxlan steering rules for mac address changes + * net/mlx4_core: Load the IB driver when the device supports IBoE + * net/mlx4_en: Deregister multicast vxlan steering rules when going down + * net-gro: Prepare GRO stack for the upcoming tunneling support + * net-gre-gro: Add GRE support to the GRO stack + * net: gro: change GRO overflow strategy + * net: Add GRO support for UDP encapsulating protocols + * net: Export gro_find_by_type helpers + * net: Add GRO support for vxlan traffic + * net/udp_offload: Handle static checker complaints + * net/ipv4: Use non-atomic allocation of udp offloads structure instance + * net/vxlan: Go over all candidate streams for GRO matching + * gre_offload: statically build GRE offloading support + * net/mlx4_core: pass pci_device_id.driver_data to __mlx4_init_one during reset + * net/mlx4: Set number of RX rings in a utility function + * net/mlx4: Fix limiting number of IRQ's instead of RSS queues + * net/mlx4_en: Fix bad use of dev_id + * net/mlx4_en: Fix UP limit in ieee_ets->prio_tc + * net/mlx4_en: Verify mlx4_en module parameters + * net/mlx4_en: Pad ethernet packets smaller than 17 bytes + * net/mlx4_en: Move queue stopped/waked counters to be per ring + * net/mlx4: Replace mlx4_en_mac_to_u64() with mlx4_mac_to_u64() + * net/mlx4_en: Fix selftest failing on non 10G link speed + * net/mlx4_core: Fix sparse warning + * net/mlx4_en: Use union for BlueFlame WQE + * net/mlx4_en: Change Connect-X description in kconfig + * net/mlx4_en: mlx4_en_verify_params() can be static + * IB/mlx5_core: remove unreachable function call in module init + * mlx4: Adjust QP1 multiplexing for RoCE/SRIOV + * mlx4_core: For RoCE, allow slaves to set the GID entry at that slave's index + * mlx4: In RoCE allow guests to have multiple GIDS + * mlx4: Add ref counting to port MAC table for RoCE + * mlx4: Implement IP based gids support for RoCE/SRIOV + * mlx4_ib: Fix SIDR support of for UD QPs under SRIOV/RoCE + * mlx4: Activate RoCE/SRIOV + * mlx4: Call dev_kfree_skby_any instead of dev_kfree_skb. + * cxgb4/iw_cxgb4: Treat CPL_ERR_KEEPALV_NEG_ADVICE as negative advice + * cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes + * mlx4: Don't receive packets when the napi budget == 0 + * IB/mlx4_ib: Adapt code to use caps.num_ports instead of a constant + * net/mlx4: Add data structures to support N-Ports per VF + * net/mlx4: Add utils for N-Port VFs + * net/mlx4: Adapt code for N-Port VF + * net/mlx4: Adapt num_vfs/probed_vf params for single port VF + * mlx4: Use actual number of PCI functions (PF + VFs) for alias GUID logic + * RDMA/cxgb4: set error code on kmalloc() failure + * net/mlx4: USe one wrapper that returns -EPERM + * mlx4: Add support for CONFIG_DEV command + * net/mlx4: Implement vxlan ndo calls + * net/mlx4: Set proper build dependancy with vxlan + + [ Wen-chien Jesse Sung ] + + * SAUCE: Bluetooth: Give restart command more time to complete its job + - LP: #1301908 + + -- Tim Gardner Thu, 03 Apr 2014 06:06:15 -0600 + +linux (3.13.0-22.44) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1301562 + + [ dann frazier ] + + * [Config] enable linux-tools on arm64 + https://lists.ubuntu.com/archives/kernel-team/2014-April/041332.html + + [ Greg Kurz ] + + * SAUCE: powerpc/le: Big endian arguments for ppc_rtas() + - LP: #1289518 + + [ Mahesh Salgaonkar ] + + * SAUCE: powerpc/book3s: Fix CFAR clobbering issue in machine check + handler. + - LP: #1301424 + * SAUCE: powerpc/book3s: Recover from MC in sapphire on SCOM read via + MMIO. + - LP: #1301424 + * SAUCE: powerpc/book3s: Fix mc_recoverable_range buffer overrun issue. + - LP: #1301424 + + [ Paolo Pisati ] + + * [Config] armhf: USB_STORAGE=y + https://lists.ubuntu.com/archives/kernel-team/2014-April/041349.html + + [ Stefan Bader ] + + * SAUCE: kvm: Force preempt folding in kvm on i386 + - LP: #1268906 + + [ Tim Gardner ] + + * SAUCE: Drop lttng in favor of lttng-modules + The kernel version was down rev on an rc release. + + [ Tomas Winkler ] + + * SAUCE: (no-up) mei: me: do not load the driver if the FW doesn't + support MEI interface + - LP: #1301118 + + [ Upstream Kernel Changes ] + + * drm/i915: Deprecated UMS support + - LP: #1284816 + * powerpc/book3s: Split the common exception prolog logic into two + section. + - LP: #1301424 + * powerpc/book3s: Introduce exclusive emergency stack for machine check + exception. + - LP: #1301424 + * powerpc/book3s: handle machine check in Linux host. + - LP: #1301424 + * powerpc/book3s: Return from interrupt if coming from evil context. + - LP: #1301424 + * powerpc/book3s: Introduce a early machine check hook in cpu_spec. + - LP: #1301424 + * powerpc/book3s: Add flush_tlb operation in cpu_spec. + - LP: #1301424 + * powerpc/book3s: Flush SLB/TLBs if we get SLB/TLB machine check errors + on power7. + - LP: #1301424 + * powerpc/book3s: Flush SLB/TLBs if we get SLB/TLB machine check errors + on power8. + - LP: #1301424 + * powerpc/book3s: Decode and save machine check event. + - LP: #1301424 + * powerpc/book3s: Queue up and process delayed MCE events. + - LP: #1301424 + * powerpc/powernv: Remove machine check handling in OPAL. + - LP: #1301424 + * powerpc/powernv: Machine check exception handling. + - LP: #1301424 + * powerpc: Fix "attempt to move .org backwards" error + - LP: #1301424 + * powerpc: Fix endian issues in power7/8 machine check handler + - LP: #1301424 + * Move precessing of MCE queued event out from syscall exit path. + - LP: #1301424 + + -- Andy Whitcroft Wed, 02 Apr 2014 15:58:48 +0100 + +linux (3.13.0-21.43) trusty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: kvm: BIOS disabled kvm support should be a warning + - LP: #1300247 + * SAUCE: nouveau: missing outputs should be warnings + - LP: #1300244 + + [ John Johansen ] + + * Revert "SAUCE: Add config option to disable new apparmor 3 semantics" + * Revert "SAUCE: apparmor: fix uninitialized lsm_audit membe" + * Revert "SAUCE: (no-up) apparmor: Fix tasks not subject to, reloaded + policy" + * Revert "SAUCE: apparmor: allocate path lookup buffers during init" + * Revert "SAUCE: apparmor: fix unix domain sockets to be mediated on + connection" + * Revert "SAUCE: (no-up) apparmor: Sync to apparmor 3 - alpha 4 snapshot" + * SAUCE: (no-up) apparmor: Sync to apparmor3 - alpha6 snapshot + - LP: #1298611 + + [ Tetsuo Handa ] + + * SAUCE: kthread: Do not leave kthread_create() immediately upon SIGKILL. + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1300412 + * [Config] updateconfigs after AA patch set + * [Config] CONFIG_ZSWAP=n, CONFIG_ZBUD=n for all arches + * [Config] CONFIG_XILINX_LL_TEMAC=m for powerpc + * [Config] CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y for ppc64el + * [Config] CONFIG_WLAN=y for arm64 + * [Config] CONFIG_VORTEX=m for ppc64el + * [Config] CONFIG_WIMAX=m for ppc64el + * [Config] CONFIG_WATCHDOG=y for ppc64el + * [Config] CONFIG_VME_BUS=m for ppc64el + * [Config] CONFIG_VIRT_DRIVERS=y for ppc64el + * [Config] CONFIG_VIDEO_OUTPUT_CONTROL=m for ppc64el + * [Config] CONFIG_VERSION_SIGNATURE="" for powerpc64-emb + * [Config] CONFIG_UWB=m for ppc64el + + [ Upstream Kernel Changes ] + + * vhost: validate vhost_get_vq_desc return value + - CVE-2014-0055 + * net: use kfree_skb_list() helper + * skbuff: skb_segment: s/frag/nskb_frag/ + * skbuff: skb_segment: s/skb_frag/frag/ + * skbuff: skb_segment: s/skb/head_skb/ + * skbuff: skb_segment: s/fskb/list_skb/ + * skbuff: skb_segment: orphan frags before copying + - CVE-2014-0131 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.8 + + -- Tim Gardner Mon, 31 Mar 2014 12:38:11 -0600 + +linux (3.13.0-20.42) trusty; urgency=low + + [ Adam Conrad ] + + * [Packaging] Set bootloader and loader on ppc64el to grub + + [ Andy Whitcroft ] + + * rebase to v3.13.7 + * [Config] updateconfigs following rebase to v3.13.7 + * cloud-tools -- pull in init scripts for Hyper-V daemons + * cloud-tools -- detect Hyper-V VM to avoid starting + * cloud-tools -- update IF_NAME to DEVICE in hv_* scripts + - LP: #1295401 + * [Config] cloud-tools -- ensure we force older hv-kvp-daemon-init off + * [Config] fix up Breaks/Replaces on linux-cloud-tools-common to fix upgrades + + [ Emmanuel Grumbach ] + + * SAUCE: (no-up) iwlwifi: mvm: disable uAPSD due to bugs in the firmware + + [ James Bottomley ] + + * SAUCE: (no-up) fix our current target reap infrastructure + - LP: #1283604 + * SAUCE: (no-up) dual scan thread bug fix + - LP: #1283604 + + [ K. Y. Srinivasan ] + + * SAUCE: (no-up) Tools: hv: vssdaemon: Ignore VFAT mounts during the + Freeze operation + - LP: #1298192 + + [ Paolo Pisati ] + + * [Config] disable HW_RANDOM_EXYNOS, USB_DWC3_EXYNOS, + PHY_EXYNOS_MIPI_VIDEO + - LP: #1294353 + * [Config] armhf: generic: AHCI_IMX=y, SERIAL_FSL_LPUART=m + - LP: #1294951 + * [Config] armhf: generic: MFD_TI_AM335X_TSCADC and USB_DWC3_OMAP = m + - LP: #1294962 + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1298585 + * [Config] ignore.modules + * SAUCE: i2c-cpm: Add missing includes for powerpc + * [Config] CONFIG_ABX500_CORE=y for ppc64el + * [Config] CONFIG_ALX=m for powerpc + * [Config] CONFIG_ACENIC_OMIT_TIGON_I=n for ppc64el + * [Config] CONFIG_ACORN_PARTITION_*=n for ppc64el + * [Config] CONFIG_ANDROID=n for powerpc + * [Config] CONFIG_ASYNC_RAID6_TEST=m for ppc64el + * [Config] CONFIG_BIG_KEYS=y for ppc64el + * [Config] CONFIG_BLK_DEV_INTEGRITY=y for ppc64el + * [Config] CONFIG_BSD_PROCESS_ACCT=y for ppc64el + * [Config] CONFIG_BLK_DEV_SR_VENDOR=n for ppc64el + * [Config] CONFIG_CFG80211=m for ppc64el + * [Config] CONFIG_CHARGER_BQ24190=m for powerpc + * [Config] CONFIG_CHARGER_BQ24735=m for powerpc + * [Config] CONFIG_EXPERT=y for ppc64el + * [Config] CONFIG_ATA_SFF=y on ppc64el + * [Config] CONFIG_ATA_GENERIC=y for ppc64el, powerpc + * [Config] CONFIG_CHR_DEV_ST=m for ppc64el + * [Config] CONFIG_CHECKPOINT_RESTORE=y for ppc64el + * [Config] CONFIG_CHELSIO_T1_1G=y for ppc64el + * [Config] CONFIG_CHR_DEV_OSST=m for ppc64el + * [Config] CONFIG_CHR_DEV_SCH=m for ppc64el + * [Config] CONFIG_CPU_FREQ_STAT=y for powerpc + * [Config] CONFIG_DDR=y for ppc64el + * [Config] CONFIG_DEBUG_BUGVERBOSE=y for powerpc + * [Config] CONFIG_EXT4_USE_FOR_EXT23=y for powerpc, ppc64el + * [Config] CONFIG_E100=m, CONFIG_E1000=m, CONFIG_E1000E=m for ppc64el + * [Config] CONFIG_EZX_PCAP=n for all arches + * [Config] CONFIG_DYNAMIC_DEBUG=y for powerpc + * [Config] CONFIG_ENABLE_MUST_CHECK=n for ppc64el + * [Config] CONFIG_ENABLE_WARN_DEPRECATED=n for ppc64el + * [Config] CONFIG_FB_3DFX=m for all arches + * [Config] CONFIG_FB_MATROX=m for ppc64el + * [Config] CONFIG_FB_RADEON=m for ppc64el + * [Config] CONFIG_FB_SAVAGE_I2C=y for all arches + * [Config] CONFIG_FIREWIRE=m for ppc64el + * [Config] CONFIG_FTR_FIXUP_SELFTEST=n for ppc64el + * [Config] CONFIG_HAMRADIO=y for ppc64el + * [Config] CONFIG_I2C_CHARDEV=m for ppc64el + * [Config] CONFIG_I2C_MUX=m for ppc64el + * [Config] CONFIG_I2C_STUB=m for ppc64el + * [Config] CONFIG_I2O=m for ppc64el + * [Config] CONFIG_INET_XFRM_MODE_BEET=m, CONFIG_INET_XFRM_MODE_TRANSPORT=m, CONFIG_INET_XFRM_MODE_TUNNEL=m for ppc64el + * [Config] CONFIG_INFINIBAND_IPOIB_DEBUG=n, CONFIG_INFINIBAND_MTHCA_DEBUG=n for ppc64el + * [Config] CONFIG_INFINIBAND_NES=m, CONFIG_INFINIBAND_OCRDMA=m, CONFIG_INFINIBAND_QIB=m for ppc64el + * [Config] CONFIG_INPUT_FF_MEMLESS=m for ppc64el + * [Config] CONFIG_INTERVAL_TREE_TEST=m for ppc64el + * [Config] CONFIG_IPACK_BUS=m for ppc64el + * [Config] CONFIG_ISDN=y for ppc64el + * [Config] CONFIG_ISO9660_FS=m for ppc64el + * [Config] CONFIG_KGDB=y for ppc64el + * [Config] CONFIG_KVM_GUEST=y for ppc64el + * [Config] CONFIG_L2TP_V3=y for powerpc + * [Config] CONFIG_MAILBOX=y for ppc64el + * [Config] CONFIG_MD_LINEAR=m, CONFIG_MD_RAID0=m, CONFIG_MD_RAID1=m for ppc64el + * [Config] CONFIG_MEDIA_SUPPORT=m for ppc64el + * [Config] CONFIG_MEMORY=y for ppc64el + * [Config] CONFIG_MEMSTICK=m for ppc64el + * [Config] CONFIG_MFD_SM501_GPIO=n for ppc64el + * [Config] CONFIG_MLX4_DEBUG=n for ppc64el + * [Config] CONFIG_MMC_BLOCK=m for ppc64el + * [Config] CONFIG_MOUSE_PS2=m for ppc64el + * [Config] CONFIG_NET_9P=m for ppc64el + * [Config] CONFIG_MSDOS_FS=m for ppc64el + * [Config] CONFIG_MSI_BITMAP_SELFTEST=n for ppc64el + * [Config] CONFIG_MTD=m for arm64 + * [Config] CONFIG_NETCONSOLE=m for ppc64el + * [Config] CONFIG_NETFILTER_XT_TARGET_NOTRACK=m for ppc64el + * [Config] CONFIG_NETPOLL_TRAP=n for ppc64el + * [Config] CONFIG_NET_IPIP=m for ppc64el + * [Config] CONFIG_NET_TEAM=m for all arches + * [Config] CONFIG_NFC=m for ppc64el + * [Config] CONFIG_NL80211_TESTMODE=n for all arches + * [Config] CONFIG_NLS_CODEPAGE_437=y for powerpc + * [Config] CONFIG_NLS_ASCII=m, CONFIG_NLS_ISO8859_1=m, CONFIG_NLS_UTF8=m for ppc64el + * [Config] CONFIG_NOP_USB_XCEIV=m for ppc64el + * [Config] CONFIG_NOTIFIER_ERROR_INJECTION=m for ppc64el + * [Config] CONFIG_OPROFILE=m for ppc64el + * [Config] CONFIG_PARPORT_1284=y for ppc64el + * [Config] CONFIG_PARPORT_AX88796=m, CONFIG_PARPORT_PC_FIFO=y, CONFIG_PARPORT_SERIAL=m for ppc64el + * [Config] CONFIG_PCI_IOV=y, CONFIG_PCI_PASID=y, CONFIG_PCI_PRI=y, CONFIG_PCI_REALLOC_ENABLE_AUTO=y, CONFIG_PCI_STUB=m for ppc64el + * [Config] CONFIG_PCNET32=m for ppc64el + * [Config] CONFIG_SCSI_DH_EMC=m for ppc64el + * [Config] CONFIG_SCSI_DH_HP_SW=m for ppc64el + * [Config] CONFIG_SCSI_FC_ATTRS=m for ppc64el + * [Config] CONFIG_SCSI_IPR=m for ppc64el + * [Config] CONFIG_SCSI_LOGGING=y for ppc64el + * [Config] CONFIG_SCSI_OSD_INITIATOR=m for ppc64el + * [Config] CONFIG_SCSI_SCAN_ASYNC=y for ppc64el + * [Config] CONFIG_SCSI_SYM53C8XX_2=m for ppc64el + + [ Timo Aaltonen ] + + * SAUCE: i915_bdw: Provide an ubuntu/i915 driver for Broadwell graphics + - LP: #1294144 + * SAUCE: i915_bdw: Add DP_AUX definitions + - LP: #1294144 + * SAUCE: i915_bdw: Update intel_ips.h file location + - LP: #1294144 + * SAUCE: i915_bdw: Revert "ACPI / i915: replace open-coded _DSM code with + helper functions" + - LP: #1294144 + * SAUCE: i915_bdw: Add an include back to intel_opregion.c + - LP: #1294144 + * SAUCE: i915_bdw: Only support Broadwell with ubuntu/i915 driver + - LP: #1294144 + * SAUCE: i915_bdw: Add i915_bdw_gpu_*() calls for ubuntu/i915 + - LP: #1294144 + * i915_bdw: [Config] Enable CONFIG_DRM_I915_BDW=m, and disable UMS + - LP: #1294144 + * SAUCE: i915_bdw: Rename ubuntu/i915 driver to i915_bdw + - LP: #1294144 + + [ Upstream Kernel Changes ] + + * netfilter: nf_conntrack_dccp: fix skb_header_pointer API usages + - CVE-2014-2523 + * Input: ALPS - add support for "Dolphin" devices + - LP: #1190867 + * x86/mm/pageattr: Lookup address in an arbitrary PGD + - LP: #1297658 + * x86/mm/pageattr: Add a PGD pagetable populating function + - LP: #1297658 + * x86/mm/pageattr: Add a PUD pagetable populating function + - LP: #1297658 + * x86/mm/pageattr: Add a PMD pagetable populating function + - LP: #1297658 + * x86/mm/pageattr: Add a PTE pagetable populating function + - LP: #1297658 + * x86/mm/pageattr: Add a PUD error unwinding path + - LP: #1297658 + * x86/mm/pageattr: Add last levels of error path + - LP: #1297658 + * x86/mm/cpa: Map in an arbitrary pgd + - LP: #1297658 + * x86/efi: Runtime services virtual mapping + - LP: #1297658 + * x86/efi: Check krealloc return value + - LP: #1297658 + * Include apm-mustang.dtb in kernel-image udeb + * drm/i915: add i915_reset_count + - LP: #1294144 + * drm/i915: add i915_get_reset_stats_ioctl + - LP: #1294144 + * drm: add DRM_INFO_ONCE() to print a one-time DRM_INFO() message + - LP: #1294144 + * drm: provide a helper for the encoder possible_crtcs mask + - LP: #1294144 + * drm: Move drm_encoder_crtc_ok() to core + - LP: #1294144 + * drm: Pass the display mode to drm_calc_timestamping_constants() + - LP: #1294144 + * drm: Pass the display mode to drm_calc_vbltimestamp_from_scanoutpos() + - LP: #1294144 + * drm: Pass 'flags' from the caller to .get_scanout_position() + - LP: #1294144 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.7 + - LP: #1268468 + + -- Andy Whitcroft Mon, 24 Mar 2014 10:41:31 +0000 + +linux (3.13.0-19.40) trusty; urgency=low + + [ Tim Gardner ] + + * [Config] Add new mlx modules to d-i + * [Config] Fix d-i spelling error: ahxi_xgene --> ahci_xgene + * [Config] Added Muti-Arch support for linux-headers-PKGVER-ABINUM, + linux-tools-common, and linux-cloud-tools-common + - LP: #1295112 + * Release Tracking Bug + - LP: #1296484 + + [ Upstream Kernel Changes ] + + * Drivers: hv: vmbus: Specify the target CPU that should receive + notification + - LP: #1295813 + + -- Tim Gardner Sun, 23 Mar 2014 19:54:50 -0600 + +linux (3.13.0-19.39) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1295462 + + [ Andy Whitcroft ] + + * ubuntu: aufs3 -- update update scripting + * ubuntu: AUFS -- update to 75dbb997b5812e16771bec20e92449ba0b1705d9 + + [ Anup Patel ] + + * SAUCE: (no-up) KVM: Documentation: Fix typo for KVM_ARM_VCPU_INIT ioctl + * SAUCE: (no-up) arm64: KVM: Force undefined exception for Guest SMC + intructions + + [ dann frazier ] + + * SAUCE: (no-up) Fix pcie-xgene build failure + + [ Emmanuel Grumbach ] + + * SAUCE: iwlwifi: mvm: send udev event upon firmware error to dump logs + + [ Feng Kan ] + + * SAUCE: (no-up) power: reset: Add generic SYSCON register mapped reset + - LP: #1284433 + * SAUCE: (no-up) arm64: dts: Add X-Gene reboot driver dts node + - LP: #1284433 + + [ Hans de Goede ] + + * SAUCE: (no-up) libahci: Allow drivers to override start_engine + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: Add support for devices with more then 1 + clock + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: Add support for an optional regulator for + sata-target power + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: Add enable_ / disable_resources helper + functions + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: "Library-ise" ahci_probe functionality + - LP: #1282920 + * SAUCE: (no-up) ahci-platform: "Library-ise" suspend / resume + functionality + - LP: #1282920 + + [ Iyappan Subramanian ] + + * SAUCE: (no-up) Documentation: APM X-Gene SoC Ethernet DTS binding + documentation + * SAUCE: (no-up) arm64: dts: APM X-Gene SoC Ethernet device tree nodes + * SAUCE: (no-up) drivers: net: APM X-Gene SoC Ethernet base driver + * SAUCE: (no-up) drivers: net: APM X-Gene SoC Ethernet driver error + handling + * SAUCE: (no-up) drivers: net: APM X-Gene SoC Ethernet driver ethtool + support + + [ Loc Ho ] + + * SAUCE: (no-up) Documentation: Add APM X-Gene SoC 15Gbps Multi-purpose + PHY driver binding documentation + - LP: #1282920 + * SAUCE: (no-up) PHY: add APM X-Gene SoC 15Gbps Multi-purpose PHY driver + - LP: #1282920 + * SAUCE: (no-up) arm64: Add APM X-Gene SoC 15Gbps Multi-purpose PHY DTS + entries + - LP: #1282920 + * SAUCE: (no-up) Documentation: Add documentation for the APM X-Gene SoC + SATA host controller DTS binding + * SAUCE: (no-up) ata: Add APM X-Gene SoC AHCI SATA host controller driver + * SAUCE: (no-up) ata: Fix compiler warning with APM X-Gene host + controller driver + * SAUCE: (no-up) arm64: Add APM X-Gene SoC AHCI SATA host controller DTS + entries + * SAUCE: (no-up) clk: arm64: Fix the clock-names property for pcppll, + socpll, and socplldiv2 + + [ Marc Zyngier ] + + * SAUCE: (no-up) arm64: KVM: force cache clean on page fault when caches + are off + * SAUCE: (no-up) arm64: KVM: allows discrimination of AArch32 sysreg + access + * SAUCE: (no-up) arm64: KVM: trap VM system registers until MMU and + caches are ON + * SAUCE: (no-up) ARM: KVM: introduce kvm_p*d_addr_end + * SAUCE: (no-up) arm64: KVM: flush VM pages before letting the guest + enable caches + * SAUCE: (no-up) ARM: KVM: force cache clean on page fault when caches + are off + + [ Ming Lei ] + + * SAUCE: (no-up) arm64: apm-storm: support ttyS1 + * SAUCE: (no-up) apm: pcie: fix hang when no card connected + + [ Paolo Pisati ] + + * [Config] armhf: generic: disable CPU_IDLE + * [Config] armhf: CPU_FREQ=y + * [Config] armhf: TI_THERMAL=y + + [ Rameshwar Prasad Sahu ] + + * SAUCE: (no-up) rtc: Add X-Gene SoC Real Time Clock Driver + + [ Ravi Patel ] + + * SAUCE: (no-up) Documentation: misc-devices: APM X-Gene SoC QMTM + * SAUCE: (no-up) Documentation: devicetree: bindings for APM X-Gene SoC + QMTM + * SAUCE: (no-up) misc: xgene: base driver for APM X-Gene SoC QMTM + * SAUCE: (no-up) arm64: boot: dts: entries for APM X-Gene SoC QMTM + * SAUCE: (no-up) misc: xgene: error handling for APM X-Gene SoC QMTM + + [ Roger Quadros ] + + * SAUCE: (no-up) ata: ahci_platform: Manage SATA PHY + - LP: #1282920 + * SAUCE: (no-up) ata: ahci_platform: runtime resume the device before use + - LP: #1282920 + + [ Seth Forshee ] + + * SAUCE: iwlwifi: mvm: Free sram dump immediately after using it + * SAUCE: iwlwifi: mvm: Only notify userspace of fw error dump when one is + created + * SAUCE: iwlwifi: mvm: Don't create fw error dump if there's nothing to + dump + + [ Tanmay Inamdar ] + + * SAUCE: (no-up) arm64: PCI(e) arch support + * SAUCE: (no-up) pci: APM X-Gene PCIe controller driver + * SAUCE: (no-up) arm64: dts: APM X-Gene PCIe device tree nodes + * SAUCE: (no-up) dt-bindings: pci: xgene pcie device tree bindings + * SAUCE: (no-up) MAINTAINERS: entry for APM X-Gene PCIe host driver + + [ Tim Gardner ] + + * [Config] CONFIG_POWER_RESET_SYSCON=y for arm64 + * SAUCE: (no-up) Restrict CONFIG_POWER_RESET_SYSCON to arm64 only + * SAUCE: iwlwifi: Fix FTBS for armhf + * [Config] CONFIG_PHY_XGENE=m for arm64 + * [Config] CONFIG_AHCI_XGENE=m for arm64 + * [Config] CONFIG_NET_XGENE=m for arm64 + * [Config] CONFIG_RTC_DRV_XGENE=m for arm64 + * SAUCE: (no-up) Add drivers/phy/phy-core.ko to generic inclusion list + * [Config] Enable PCI for arm64 + * SAUCE: arm64: export __cpu_clear_user_page for modules + * SAUCE: ARCH_HAS_DMA_GET_REQUIRED_MASK=n for arm64 + * [Config] CONFIG_PCI_XGENE=y + * [Config] CONFIG_PHY_XGENE=y for arm64 + * [Config] CONFIG_RTC_DRV_XGENE=y for arm64 + * d-i: Add ahxi_xgene and xgene-enet + + [ Upstream Kernel Changes ] + + * Staging: rtl8821ae: rc.c: fix up function prototypes + * Staging: rtl8821ae: removed unused functions and variables + * Staging: rtl8821ae: add TODO file + * Staging: rtl8812ae: disable due to build errors + * staging: r8821ae: Fix build problems + * staging: r8821ae: Enable build by reverting BROKEN marking + * staging: rtl8821ae: Fixed the size of array to macro as discussed by + Linus + * staging/rtl8821ae: fix build, depends on MAC80211 + * powerpc: Reclaim two unused thread_info flag bits + * powerpc: Don't corrupt transactional state when using FP/VMX in kernel + * powerpc: Fix transactional FP/VMX/VSX unavailable handlers + * Drivers: hv: Ballon: Make pressure posting thread sleep interruptibly + - LP: #1294253 + * mac80211: don't validate unchanged AP bandwidth while tracking + - LP: #1294558 + * phy-core: Don't propagate -ENOSUPP from phy_pm_runtime_get_sync to + caller + * CONFIG_XGENE_QMTM=m for arm64 + + -- Tim Gardner Thu, 20 Mar 2014 21:51:11 -0400 + +linux (3.13.0-18.38) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1293725 + * [Config] Add hv_balloon to d-i virtio-modules + - LP: #1292216 + + [ Andy Whitcroft ] + + * [Config] d-i -- add virtio_scsi to virtio-modules + - LP: #1288607 + + [ Colin Ian King ] + + * SAUCE: intel_pstate: inform user that thermald is worth considering + + [ dann frazier ] + + * [Config] arm64: KVM=y + + [ Gerd Hoffmann ] + + * SAUCE: vmbus: add missing breaks + - LP: #1287398 + * SAUCE: vmbus: use resource for hyperv mmio region + - LP: #1287398 + * SAUCE: hyperv-fb: add support for generation 2 virtual machines. + - LP: #1287398 + * SAUCE: hyperv-fb: kick off efifb early + - LP: #1287398 + + [ Haiyang Zhang ] + + * SAUCE: hyperv: Change the receive buffer size for legacy hosts + - LP: #1290151 + + [ K. Y. Srinivasan ] + + * SAUCE: Drivers: hv: vmbus: Extract the mmio information from DSDT + - LP: #1287398 + + [ Paolo Pisati ] + + * SAUCE: leds-gpio: of: introduce MODULE_DEVICE_TABLE for module + autoloading + * [Config] amhf: LEDS_TRIGGER_HEARTBEAT=y + + [ Upstream Kernel Changes ] + + * Revert "xhci 1.0: Limit arbitrarily-aligned scatter gather." + - LP: #1293361 + * Revert "USBNET: ax88179_178a: enable tso if usb host supports sg dma" + - LP: #1293361 + * powerpc/tm: Fix crash when forking inside a transaction + * AX88179_178A: Add VID:DID for Lenovo OneLinkDock Gigabit LAN + - LP: #1291890 + * drm/vmwgfx: Fix a surface reference corner-case in legacy emulation + mode + * Input: wacom - scale up touch width and height values for Intuos Pro + * Input: wacom - make sure touch_max is set for touch devices + * Input: wacom - add support for three new Intuos devices + * Input: wacom - add reporting of SW_MUTE_DEVICE events + * Input: wacom - fix wacom->shared guards for dual input devices + * Input: wacom - add support for DTU-1031 + * net: fix for a race condition in the inet frag code + - CVE-2014-0100 + * net: sctp: fix sctp_sf_do_5_1D_ce to verify if we/peer is AUTH capable + - CVE-2014-0101 + * KEYS: Make the keyring cycle detector ignore other keyrings of the same + name + - CVE-2014-0102 + * ipv6: don't set DST_NOCOUNT for remotely added routes + - CVE-2014-2309 + + -- Andy Whitcroft Thu, 13 Mar 2014 10:06:05 +0000 + +linux (3.13.0-17.37) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1290484 + * Revert "SAUCE: hv: Add vss daemon to Makefile" + * Revert "SAUCE: (no-up) tools/hv: add basic Makefile" + * Revert "SAUCE: (no-up) hv -- bodge hv_kvp_daemon so it can use the + local linux/hyperv.h" + * Revert "SAUCE: (no-up) hv -- bodge hv_vss_daemon so it can use the + local linux/hyperv.h" + * Revert "SAUCE: SELinux: security_load_policy: Silence frame-larger-than + warning" + * Revert "SAUCE: ARM: OMAP4460: cpuidle: Extend + PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD on cpuidle" + * ubuntu: overlayfs -- use kernel service credentials for copy up and + xattr manipulations + * [Packaging] tools -- hv tools build correctly against the built headers + * [Packaging] cloud-tools -- add the hv_fcopy_daemon to the package + * rebase to v3.13.6 + + [ Anton Blanchard ] + + * SAUCE: ibmveth: Fix endian issues with MAC addresses + + [ Bjarke Istrup Pedersen ] + + * SAUCE: hv: Add hyperv.h to uapi headers + - LP: #1282700 + + [ Fengguang Wu ] + + * SAUCE: Drivers: hv: fcopy_open() can be static + - LP: #1282700 + + [ K. Y. Srinivasan ] + + * SAUCE: Drivers: hv: Implement the file copy service + - LP: #1282700 + + [ Tim Gardner ] + + * SAUCE: (no-up) mei: Fix stable update misapplication + * SAUCE: (no-up) mei_me: Add module parameter to disable MSI + + [ Upstream Kernel Changes ] + + * arm64: KVM: Add Kconfig option for max VCPUs per-Guest + * arm64: KVM: Support X-Gene guest VCPU on APM X-Gene host + * ACPI / EC: Clear stale EC events on Samsung systems + * drm/ttm: Fix TTM object open regression + * SELinux: security_load_policy: Silence frame-larger-than warning + * ARM: OMAP4460: cpuidle: Extend PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD on + cpuidle + + [ Upstream Kernel Changes ] + + * rebase to v3.13.6 + - LP: #1282369 + - LP: #1260303 + + -- Tim Gardner Wed, 05 Mar 2014 06:52:34 -0700 + +linux (3.13.0-16.36) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1287903 + + [ Andy Whitcroft ] + + * Revert "[Config] lowlatency -- turn CONFIG_IRQ_FORCED_THREADING_DEFAULT + off temporarily" + + [ Chris Bainbridge ] + + * SAUCE: x86: set Pentium M as PAE capable + - LP: #930447 + + [ Dave Jones ] + + * SAUCE: taint: repurpose TAINT_UNSAFE_SMP to TAINT_CPU_OUT_OF_SPEC + - LP: #930447 + + [ Paolo Pisati ] + + * [Config] SND_DAVINCI_SOC && SND_AM33XX_SOC_EVM =m + * [Config] armhf: DRM_TILCDC=m + + [ Philippe Bergheaud ] + + * SAUCE: powerpc: fix xmon disassembler for little-endian + - LP: #1286255 + + [ Tim Gardner ] + + * [Config] CONFIG_MICROCODE_EARLY=y + * [Config] CONFIG_R8821AE=m + * [Config] Add some virtio drivers to -virtual + - LP: #1287401 + * [Config] inclusion-list: vesafb and virtio_balloon are built-in + * SAUCE: vmwgfx: Expose U32_MAX + + [ Upstream Kernel Changes ] + + * Revert "drm/vmwgfx: Fix regression caused by "drm/ttm: make ttm + reservation calls behave like reservation calls"" + * Revert "drm/vmwgfx: Fix the driver for large dma addresses" + * usb: ehci: fix deadlock when threadirqs option is used + - LP: #1274987, #1279081 + * Staging: rtl8812ae: Add Realtek 8821 PCI WIFI driver + - LP: #1287298 + * intel_pstate: Remove periodic P state boost + * intel_pstate: Add trace point to report internal state. + * intel_pstate: Take core C0 time into account for core busy calculation + * intel_pstate: Use LFM bus ratio as min ratio/P state + * intel_pstate: Add support for Baytrail turbo P states + * intel_pstate: Change busy calculation to use fixed point math. + * PM / hibernate: Fix restore hang in freeze_processes() + * ipmi: remove deprecated IRQF_DISABLED + * ipmi: use USEC_PER_SEC instead of 1000000 for more meaningful + * ipmi: fix timeout calculation when bmc is disconnected + * ipmi: Cleanup error return + * ipmi: Add missing rv in ipmi_parisc_probe() + * drm/ttm: ttm object security fixes for render nodes + * drivers: gpu: Mark functions as static in vmwgfx_kms.c + * drivers: gpu: Mark functions as static in vmwgfx_buffer.c + * drivers: gpu: Mark functions as static in vmwgfx_fence.c + * drm/vmwgfx: Fix the driver for large dma addresses + * drm/vmwgfx: Update the svga3d register header file for new device + version + * drm/vmwgfx: Update the driver user-space interface for guest-backed + objects + * drm/vmwgfx: Replace vram_size with prim_bb_mem for calculation of max + resolution + * drm/vmwgfx: Update the svga register definition + * drm/vmwgfx: Adapt capability reporting to new hardware version + * drm/vmwgfx: Add MOB management + * drm/vmwgfx: Hook up MOBs to TTM as a separate memory type + * drm/vmwgfx: Read bounding box memory from the appropriate register + * drm/vmwgfx: Add the possibility to validate a buffer as a MOB + * drm/vmwgfx: Hook up guest-backed queries + * drm/vmwgfx: Detach backing store from its resources when it is evicted + * drm/vmwgfx: Hook up guest-backed contexts + * drm/vmwgfx: Hook up guest-backed surfaces + * drm/vmwgfx: Add guest-backed shaders + * drm/vmwgfx: Validate guest-backed shader const commands + * drm/vmwgfx: Add new unused (by user-space) commands to the verifier + * drm/vmwgfx: Enable 3D for new hardware version + * drm/vmwgfx: Fix up the vmwgfx_drv.h header for new files + * drm/vmwgfx: Extend the command verifier to handle guest-backed on / off + * drm/vmwgfx: Make sure that the multisampling is off + * drm/vmwgfx: Implement a buffer object synccpu ioctl. + * drm/vmwgfx: Add a parameter to get max MOB memory size + * drm/vmwgfx: Block the BIND_SHADERCONSTS command + * drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf + * drm/vmwgfx: Persistent tracking of context bindings + * drm/vmwgfx: Ditch the vmw_dummy_query_bo_prepare function + * drm/vmwgfx: Use the linux DMA api also for MOBs + * drm/vmwgfx: Update otable definitions + * drm/vmwgfx: Fix surface framebuffer check for guest-backed surfaces + * drm/vmwgfx: Implement 64-bit Otable- and MOB binding v2 + * drm/vmwgfx: Silence the device command verifier + * drm/vmwgfx: Invalidate surface on non-readback unbind + * drm/vmwgfx: Fix recently introduced sparse / smatch warnings and errors + * drm/vmwgfx: Don't commit staged bindings if execbuf fails + * drm/vmwgfx: Fix regression caused by "drm/ttm: make ttm reservation + calls behave like reservation calls" + * drm/vmwgfx: Fix SET_SHADER_CONST emulation on guest-backed devices + * drm/vmwgfx: Fix legacy surface reference size copyback + * drm/vmwgfx: Emulate legacy shaders on guest-backed devices v2 + * drm/vmwgfx: Detect old user-space drivers and set up legacy emulation + v2 + * drm/vmwgfx: Reemit context bindings when necessary v2 + * vmwgfx: Fix unitialized stack read in vmw_setup_otable_base + * drm/vmwgfx: Fix a couple of sparse warnings and errors + * drm/vmwgfx: Get maximum mob size from register SVGA_REG_MOB_MAX_SIZE + * drm/vmwgfx: unlock on error path in vmw_execbuf_process() + * drm/vmwgfx: Remove stray const + * drm/vmwgfx: Fix possible integer overflow + * drm/vmwgfx: Fix command defines and checks + * drm/vmwgfx: Remove some unused surface formats + * drm/vmwgfx: Make sure backing mobs are cleared when allocated. Update + driver date. + * drm/vmwgfx: avoid null pointer dereference at failure paths + + -- Tim Gardner Mon, 03 Mar 2014 13:04:10 -0700 + +linux (3.13.0-15.35) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1287175 + + [ Andy Whitcroft ] + + * [Config] tools -- enable cpupower on ppc64el + * [Config] ppc64el -- enable perf tools + * [Config] powerpc -- enable perf tools + * [Config] ppc64el -- reduce MAX_ORDER with 64k pages + * [Config] ppc64el -- switch to 64K system pages + + [ Benjamin Herrenschmidt ] + + * SAUCE: powerpc/powernv: Add iommu DMA bypass support for IODA2 + + [ Paul Mackerras ] + + * SAUCE: powerpc: Increase stack redzone for 64-bit userspace to 512 + bytes + + [ Upstream Kernel Changes ] + + * perf trace: Fix ioctl 'request' beautifier build problems on !(i386 || + x86_64) arches + * kvm, vmx: Really fix lazy FPU on nested guest + - LP: #1278531 + + -- Tim Gardner Mon, 03 Mar 2014 13:22:56 +0000 + +linux (3.13.0-14.34) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1285851 + + [ Andy Whitcroft ] + + * [Config] d-i -- add hyperv_keyboard to serial-modules udeb + - LP: #1285434 + + [ Hannes Frederic Sowa ] + + * SAUCE: ipv6: honor IPV6_PKTINFO with v4 mapped addresses on sendmsg + - LP: #1284535 + + [ Jason Wang ] + + * SAUCE: x86, hyperv: bypass the timer_irq_works() check + - LP: #1282693 + + [ Paolo Pisati ] + + * [Config] disable FB_OMAP2, DRM_OMAP=m + + [ Upstream Kernel Changes ] + + * ipv6: make IPV6_RECVPKTINFO work for ipv4 datagrams + - LP: #1284535 + * mei: remove flash_work_queue + * mei: drop redundant list_del_init + * mei: cleanup mei_irq_read_handler + * mei: enable marking internal commands + * mei: me: set dma mask using DMA mapping API + * Documentation/misc-devices/mei/mei-amt-version.c: remove unneeded call + of mei_deinit() + * mei: do not run reset flow from the interrupt thread + * mei: nfc: mei_nfc_free has to be called under lock + * mei: fix syntax in comments and debug output + * mei: revamp mei reset state machine + * mei: limit the number of consecutive resets + * mei: set client's read_cb to NULL when flow control fails + + -- Tim Gardner Wed, 26 Feb 2014 08:43:20 -0500 + +linux (3.13.0-13.33) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1284614 + * [Config] powerpc -- CONFIG_SCSI_IBMVSCSI=y + * [Config] CONFIG_RT_GROUP_SCHED=n + - LP: #1284731 + + [ Stefan Bader ] + + * [Config] Revert back to build IPMI as a module on all arches + + [ Tim Gardner ] + + * rebase to v3.13.5 + + [ Upstream Kernel Changes ] + + * cifs: ensure that uncached writes handle unmapped areas correctly + * rebase to v3.13.5 + + -- Andy Whitcroft Tue, 25 Feb 2014 12:15:09 +0000 + +linux (3.13.0-12.32) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1283074 + + * Revert "SAUCE: Drivers: hv: vmbus: Specify the target CPU that should + receive notification" + * [Packaging] tools -- clean up generic so it gives more targetted hints + * [Packaging] tools -- fix relative links + * rebase to v3.13.4 + + [ Colin Ian King ] + + * [Config][v2] armhf, arm64, powerpc, ppc64el: IPMI_SI=m + + [ Paolo Pisati ] + + * [Config] armhf: SOC_AM33XX=y + * [Config] armhf: TI_CPSW, TI_CPTS and TI_DAVINCI_[CPDMA|MDIO] = y + * [Config] armhf: RTC_DRV_OMAP=y + * [Config] build beaglebone and bleaglebone black dtbs + + [ Tony Breeds ] + + * SAUCE: powerpc/le: Ensure that the 'stop-self' RTAS token is handled + correctly + - LP: #1282712 + + [ Upstream Kernel Changes ] + + * Drivers: hv: vmbus: Specify the target CPU that should receive + notification + - LP: #1282694 + * Drivers: hv: vmbus: Don't timeout during the initial connection with + host + - LP: #1282694 + * hyperv: Add support for physically discontinuous receive buffer + - LP: #1282695 + * Input: hyperv-keyboard - pass through 0xE1 prefix + - LP: #1282699 + * rebase to v3.13.4 + + -- Andy Whitcroft Fri, 21 Feb 2014 12:51:17 +0000 + +linux (3.13.0-11.31) trusty; urgency=low + + [ Colin Ian King ] + + * [config] Set IPMI suppoort default to "y" + + [ Tim Gardner ] + + * [Config] CONFIG_USER_NS=y for ppc64el + * [Config] CONFIG_RESOURCE_COUNTERS=y for ppc64el + * Release Tracking Bug + - LP: #1282243 + + -- Tim Gardner Wed, 19 Feb 2014 12:04:43 -0500 + +linux (3.13.0-10.30) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] tools -- fix up do_tools interface which is assumed by + external tooling + + [ Tim Gardner ] + + * [Config] CONFIG_CGROUP_SCHED=y for ppc64el + * [Config] CONFIG_BLK_CGROUP=y for ppc64el + * [Config] CONFIG_USB_XHCI_HCD=y for ppc64el + * [Config] CONFIG_CGROUP_PERF=y for ppc64el + * Release Tracking Bug + - LP: #1281783 + + [ Upstream Kernel Changes ] + + * be2net: Use MCC_CREATE_EXT_V1 cmd for Skyhawk-R + - LP: #1281446 + * be2net: don't set "pport" field when querying "pvid" + - LP: #1281446 + * be2net: Log the profile-id used by FW during driver initialization + - LP: #1281446 + * be2net: do not call be_set/get_fw_log_level() on Skyhawk-R + - LP: #1281446 + * be2net: ignore mac-addr set call for an already programmed mac-addr + - LP: #1281446 + * be2net: fix incorrect setting of cmd_privileges for VFs + - LP: #1281446 + * be2net: Remove "10Gbps" from driver description string + - LP: #1281446 + * be2net: do not use frag index in the RX-compl entry + - LP: #1281446 + * be2net: use GET_MAC_LIST cmd to query mac-address from a pmac-id + - LP: #1281446 + * be2net: cleanup wake-on-lan code + - LP: #1281446 + * be2net: update driver version to 10.0.x + - LP: #1281446 + * be2net: Fix be_vlan_add/rem_vid() routines + - LP: #1281446 + + -- Tim Gardner Tue, 18 Feb 2014 13:13:24 -0700 + +linux (3.13.0-9.29) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1280291 + * [Config] lowlatency -- turn CONFIG_IRQ_FORCED_THREADING_DEFAULT off + temporarily + * [Packaging] tools -- split out "cloud only" tools into a cloud tools + package + + [ Paolo Pisati ] + + * [Config] armhf: DISPLAY_CONNECTOR_HDMI && DISPLAY_ENCODER_TPD12S015 =y + + [ Tim Gardner ] + + * rebase to v3.13.3 + + [ Upstream Kernel Changes ] + + * Revert "mmc: sdhci-pci: Fix possibility of chip->fixes being null" + * Revert "mmc: sdhci-pci: Fix BYT sd card getting stuck in runtime + suspend" + * NVMe: Avoid shift operation when writing cq head doorbell + - LP: #1256155 + * NVMe: remove deprecated IRQF_DISABLED + - LP: #1256155 + * NVMe: compat SG_IO ioctl + - LP: #1256155 + * NVMe: Fix lockdep warnings + - LP: #1256155 + * NVMe: Cache dev->pci_dev in a local pointer + - LP: #1256155 + * NVMe: Device resume error handling + - LP: #1256155 + * NVMe: Schedule reset for failed controllers + - LP: #1256155 + * NVMe: Abort timed out commands + - LP: #1256155 + * NVMe: Surprise removal handling + - LP: #1256155 + * NVMe: Async IO queue deletion + - LP: #1256155 + * NVMe: Dynamically allocate partition numbers + - LP: #1256155 + * NVMe: Disable admin queue on init failure + - LP: #1256155 + * NVMe: Add a pci_driver shutdown method + - LP: #1256155 + * NVMe: Include device and queue numbers in interrupt name + - LP: #1256155 + * NVMe: Correct uses of INIT_WORK + - LP: #1256155 + * NVMe: Namespace use after free on surprise removal + - LP: #1256155 + * intel_pstate: Add setting voltage value for baytrail P states. + - LP: #1270736 + * mmc: sdhci-pci: break out definitions to header file + * mmc: sdhci-pci: add support of O2Micro/BayHubTech SD hosts + * mmc: sdhci-pci: Fix BYT sd card getting stuck in runtime suspend + * mmc: sdhci-pci: Fix possibility of chip->fixes being null + + [ Upstream Kernel Changes ] + + * rebase to v3.13.3 + + -- Tim Gardner Thu, 13 Feb 2014 06:08:09 -0700 + +linux (3.13.0-8.28) trusty; urgency=low + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1278963 + + [ Paolo Pisati ] + + * [Config] armhf: RTC_DRV_PL031=y + + [ Serge Hallyn ] + + * SAUCE: Overlayfs: allow unprivileged mounts + + [ Upstream Kernel Changes ] + + * kexec: add sysctl to disable kexec_load + - LP: #1259570 + * SELinux: Fix kernel BUG on empty security contexts. + - CVE-2014-1874 + + -- Tim Gardner Tue, 11 Feb 2014 08:35:39 -0500 + +linux (3.13.0-8.27) trusty; urgency=low + + [ John Johansen ] + + * SAUCE: Add config option to disable new apparmor 3 semantics + -LP: #1270215 + + [ Tim Gardner ] + + * [debian] Fix indep_hdrs_pkg_name + - LP: #1134441 + * Update lttng to 00808267d3ba7cdcddfed7bec7e62a40463c1307 Version 2.4.0-rc3 + * Enabled lttng build + * Don't build lttng for armhf + lttng hates gcc-4.8 for armhf + * Release Tracking Bug + - LP: #1277309 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.2 + - LP: #1260303 + - LP: #1260303 + - LP: #1268468 + + -- Tim Gardner Thu, 06 Feb 2014 09:25:51 -0700 + +linux (3.13.0-7.26) trusty; urgency=low + + [ John Johansen ] + + * SAUCE: apparmor: fix uninitialized lsm_audit membe + - LP: #1268727 + * Add config option to optionally enable new apparmor 3 semantics + + [ Tim Gardner ] + + * [Config] Add lowlatency to getabis + * [Config] CONFIG_SECURITY_APPARMOR_AA3_SEMANTICS=y + - LP: #1270215 + * Release Tracking Bug + - LP: #1276810 + + [ Upstream Kernel Changes ] + + * x86, x32: Correct invalid use of user timespec in the kernel + - LP: #1274349 + - CVE-2014-0038 + + -- Tim Gardner Wed, 05 Feb 2014 15:49:44 -0500 + +linux (3.13.0-7.25) trusty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: fix fmd headers" + * SAUCE: fix fmd headers + + -- Andy Whitcroft Tue, 04 Feb 2014 09:36:09 +0000 + +linux (3.13.0-7.24) trusty; urgency=low + + [ Stefan Bader ] + + * [Config] Make vmwgfx driver enable the framebuffer device + + [ Tim Gardner ] + + * rebase to v3.13.1 + * [Config] CONFIG_NFS_FS=m for ppc64el + * [Config] CONFIG_X86_SYSFB=n + https://lists.ubuntu.com/archives/kernel-team/2014-February/038166.html + * Release Tracking Bug + - LP: #1275898 + + [ Upstream Kernel Changes ] + + * i2c: piix4: Add support for AMD ML and CZ SMBus changes + - LP: #1272525 + * i2c: piix4: Use different message for AMD Auxiliary SMBus Controller + - LP: #1272525 + * mm: ignore VM_SOFTDIRTY on VMA merging + - LP: #1274917 + * drm/radeon: disable dpm on BTC + - LP: #1266984 + + [ Upstream Kernel Changes ] + + * rebase to v3.13.1 + + -- Tim Gardner Thu, 30 Jan 2014 15:24:48 +0000 + +linux (3.13.0-6.23) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] fix up architecture for linux-tools + + -- Andy Whitcroft Thu, 30 Jan 2014 09:00:41 +0000 + +linux (3.13.0-6.22) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] limit linux-udebs- to matching arch + * [Config] powerpc -- disable perf to fix FTBFS + * [Config] ppc64el -- fix up missing udebs + + -- Andy Whitcroft Wed, 29 Jan 2014 16:00:28 +0000 + +linux (3.13.0-6.21) trusty; urgency=low + + [ Andy Fleming ] + + * SAUCE: net: Add support for handling queueing in hardware + * SAUCE: of_mdio: Add of_phy_attach function + * SAUCE: phylib: Add generic 10G driver + * SAUCE: phylib: Support attaching to gen10g_driver + * SAUCE: phylib: Add Clause 45 read/write functions + + [ Andy Whitcroft ] + + * SAUCE: fix fmd headers + * [Packaging] lowlatency -- merge out of tree flavours + * SAUCE: allow IRQs to be irq-threaded by default via config + * [Config]: enable CONFIG_IRQ_FORCED_THREADING_DEFAULT for lowlatency + * [Config] powerpc -- fix up Build-depends: + * Release Tracking Bug + - LP: #1273747 + + [ Ben Collins ] + + * SAUCE: PPC: PCI: Fix pcibios_io_space_offset() so it works for 32-bit + ptr/64-bit rsrcs + * SAUCE: Revert "phy: vitesse make vsc824x_add_skew static" + * SAUCE: Fixup freescale usb phy driver to work on ppc64 + * SAUCE: xgmac_mdio: Silence read errors + * SAUCE: Provide booke stub for kvmppc_is_bigendian() + * SAUCE: Fix stack overflow on ppc32 + * SAUCE: Use resource_size_t instead of long for PCI resource address + * SAUCE: net/phy: Export function for use by dpaa_eth + * [Packaging] powerpc -- merge out of tree powerpc arch + + [ Bjorn Helgaas ] + + * SAUCE: Revert "EISA: Log device resources in dmesg" + - LP: #1251816 + * SAUCE: Revert "EISA: Initialize device before its resources" + - LP: #1251816 + + [ Emil Medve ] + + * SAUCE: phylib: Minimum hack to get the generic 10G PHY driver to work + with 10G "fixed-link"s + + [ Kumar Gala ] + + * SAUCE: fsl_qbman: Add drivers for Freescale DPAA Qman & Bman + * SAUCE: fsl_pme2: Add support for DPAA PME + * SAUCE: fmd: FMD14 integration + * SAUCE: dpaa_eth: Ethernet driver for Freescale QorIQ DPA Architecture + * SAUCE: powerpc/85xx: Add DPAA/networking support for CoreNet + + [ Madalin Bucur ] + + * SAUCE: net/flow: remove sleeping and deferral mechanism from + flow_cache_flush + * SAUCE: net/phy: abort genphy_read_status when link changes during speed + and duplex reading + + [ Stefan Bader ] + + * [Config] move some VMWare related modules into main package + - LP: #1271669 + + [ Tim Gardner ] + + * [Config] Add r815x to nic-modules + - LP: #1273735 + * [Config] CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y + - LP: #239479 + + [ Upstream Kernel Changes ] + + * mmc: sdhci-pci: break out definitions to header file + - LP: #1239938 + * mmc: sdhci-pci: add support of O2Micro/BayHubTech SD hosts + - LP: #1239938 + * powerpc/book3e: rename interrupt_end_book3e with __end_interrupts + * powerpc/book3e: support CONFIG_RELOCATABLE + * book3e/kexec/kdump: enable kexec for kernel + * book3e/kexec/kdump: create a 1:1 TLB mapping + * book3e/kexec/kdump: introduce a kexec kernel flag + * book3e/kexec/kdump: implement ppc64 kexec specfic + * book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET + * book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB + + -- Andy Whitcroft Tue, 28 Jan 2014 22:59:46 +0000 + +linux (3.13.0-5.20) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] tools -- do not make symlinks when not making packages + * [Packaging] tools -- tidy up control.stub.in ordering + * [Packaging] tools -- tools-common is shared and not conditional + * rebase to v3.13 + + [ Dirk Brandewie ] + + * SAUCE: intel_pstate: Add setting voltage value for baytrail P states. + + [ KY Srinivasan ] + + * SAUCE: Drivers: hv: vmbus: Specify the target CPU that should receive + notification + + [ Upstream Kernel Changes ] + + * rebase to v3.13 + - LP: #1270603 + + -- Andy Whitcroft Fri, 17 Jan 2014 15:45:31 +0000 + +linux (3.13.0-4.19) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] libunwind8-dev is now available for ppc64el + * [Packaging] tools -- make cpupower optional + * [Packaging] tools -- enable correctly for x86 + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1269872 + + [ Upstream Kernel Changes ] + + * SAUCE: ARM: OMAP: hwmod: Add SYSC offsets for AES IP + * SAUCE: ARM: OMAP4: hwmod: Add hwmod data for AES IP + * SAUCE: OMAP: AM33xx: hwmod: Correct AES module SYSC type + * SAUCE: crypto: omap-aes: add error check for pm_runtime_get_sync + + [ Upstream Kernel Changes ] + + * rebase to 85ce70fdf48aa290b4845311c2dd815d7f8d1fa5 + + -- Tim Gardner Wed, 15 Jan 2014 13:23:05 +0000 + +linux (3.13.0-3.18) trusty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.13-rc8 + * [Packaging] efi -- allow EFI signatures on any arch + + [ Tim Gardner ] + + * [Config] Fix vcs-git path + * Release Tracking Bug + - LP: #1268683 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc8 + + -- Andy Whitcroft Sun, 12 Jan 2014 11:58:01 +0000 + +linux (3.13.0-2.17) trusty; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1267809 + * [Config] apply Platform support>>CPUIdle driver>>CPU Idle config + defaults + * [Config] apply Platform support>>CPU Frequency scaling config defaults + * [Config] CONFIG_PARIDE_EPATC8=y + * [Config] apply Device Drivers >> Broadcom specific AMBA config defaults + * [Config] apply Bus options >> PCI support >> RapidIO support >> RapidIO + Switch drivers config defaults + * [Config] apply Cryptographic API config defaults + * [Config] apply Device Drivers >> Common Clock Framework config defaults + * [Config] apply Device Drivers >> Distributed Switch Architecture + drivers config defaults + * [Config] apply Device Drivers >> Graphics support >> Backlight & LCD + device support config defaults + * [Config] apply Device Drivers >> Graphics support >> Support for frame + buffer devices >> Bootup logo config defaults + * annotations -- update in tree annotations + * [Config] apply Bus options >> PCI support >> RapidIO support config + defaults + * [Config] CONFIG_POWER_AVS=y CONFIG_RESET_CONTROLLER=y + * [Config] apply Device Drivers >> 1-wire Bus Masters config defaults + * [Config] apply CAN Device Drivers >> Platform CAN drivers with Netlink + support config defaults + * [Config] apply Device Drivers >> Character devices >> Serial drivers + config defaults + * [Config] apply Device Drivers >> Generic Thermal sysfs driver config + defaults + * [Config] apply Device Drivers >> Character devices >> TPM Hardware + Support config defaults + * [Config] apply Device Drivers >> Character devices config defaults + * [Config] apply Device Drivers >> HID support >> USB HID support >> USB + HID transport layer config defaults + * [Config] apply Device Drivers >> HID support >> HID bus support config + defaults + * [Config] apply Device Drivers >> HID support >> USB HID support config + defaults + * annotations -- update in tree annotations + * [Config] apply Device Drivers >> GPIO Support config defaults + * [Config] update configs for apparmour update + + [ John Johansen ] + + * SAUCE: (no-up) apparmor: Sync to apparmor 3 - alpha 4 snapshot + * SAUCE: apparmor: fix unix domain sockets to be mediated on connection + - LP: #1208988 + * SAUCE: apparmor: allocate path lookup buffers during init + - LP: #1208988 + * SAUCE: (no-up) apparmor: Fix tasks not subject to, reloaded policy + - LP: #1236455 + + [ Tim Gardner ] + + * Revert "[Debian] getabis: Preface module with package name" + * [Config] Added ppc64el to getabis + * [packaging] Bump ABI for every new release + + -- Andy Whitcroft Fri, 10 Jan 2014 11:48:39 +0000 + +linux (3.13.0-1.16) trusty; urgency=low + + * First 3.13 upload. + * Release tracker + - LP: #1266852 + + -- Tim Gardner Tue, 07 Jan 2014 09:21:26 -0700 + +linux (3.13.0-0.15) trusty; urgency=low + + [ Tim Gardner ] + + * rebase to v3.13-rc7 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc7 + + -- Tim Gardner Sun, 05 Jan 2014 06:13:33 -0700 + +linux (3.13.0-0.14) trusty; urgency=low + + [ Andy Whitcroft ] + + * rebase to 7a262d2ed9fa42fad8c4f243f8025580b58cf2f6 + + [ Tim Gardner ] + + * Remove ubuntu/dm-raid4-5 in favor of CONFIG_MD_RAID456 + * Update lttng to Version 2.4.0-rc2 + * lttng: Disabled trace_kvm_async_pf_completed + * [Config] CONFIG_IMA=y + - LP: #1244627 + + [ Upstream Kernel Changes ] + + * rebase to 7a262d2ed9fa42fad8c4f243f8025580b58cf2f6 + + -- Tim Gardner Thu, 02 Jan 2014 12:57:13 -0700 + +linux (3.13.0-0.13) trusty; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: aufs3 -- (no-up) aufs3-base.patch + * ubuntu: aufs3 -- (no-up) aufs3-mmap.patch + * ubuntu: aufs3 -- (no-up) aufs3-standalone.patch + * ubuntu: AUFS (no-squash): basic framework and update machinary + * ubuntu: AUFS -- update to 7b136a27b021da9010d8b6c101939dd298e46be7 + * ubuntu: aufs3 -- enable + * ubuntu: aufs3 -- update configs + + -- Andy Whitcroft Thu, 02 Jan 2014 09:41:02 +0000 + +linux (3.13.0-0.12) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc6 + + -- Tim Gardner Tue, 31 Dec 2013 06:16:03 -0700 + +linux (3.13.0-0.11) trusty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: suspicious unlocked ->status reading and writing in ipc/sem.c + * [Config] ppc64el -- initial defconfig based -generic flavour + * [Config] initial defconfig for ppc64el + * [Config] ubuntuise ppc64el config + * [Config] ubuntuise ppc64el config part 2 + * [Config] d-i -- update empty udebs list + * [Config] ppc64el -- split extras package + + [ Anton Blanchard ] + + * SAUCE: KVM: PPC: Book3S HV: Add little-endian guest support + + [ Benjamin Herrenschmidt ] + + * SAUCE: powerpc/powernv: Add calls to support little endian + + [ Cédric Le Goater ] + + * SAUCE: KVM: PPC: Book3S: add helper routine to load guest instructions + * SAUCE: KVM: PPC: Book3S: add helper routines to detect endian order + * SAUCE: KVM: PPC: Book3S: MMIO emulation support for little endian + guests + + [ Paul E. McKenney ] + + * SAUCE: powerpc: Make 64-bit non-VMX copy_tofrom_user() bi-endian + + -- Andy Whitcroft Fri, 27 Dec 2013 16:48:55 +0000 + +linux (3.13.0-0.10) trusty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.13-rc5 + * [Config] updateconfigs following rebase to v3.13-rc5 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc5 + - LP: #1260303 + - LP: #1260303 + - LP: #1260225 + + -- Andy Whitcroft Mon, 23 Dec 2013 12:48:28 +0000 + +linux (3.13.0-0.9) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] d-i -- allow missing firmware + + -- Andy Whitcroft Fri, 20 Dec 2013 17:57:06 +0000 + +linux (3.13.0-0.8) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] annotations -- first pass over entire config + * [Config] drop libunwind8-dev from Build-Depends for ppc64el + + [ Tim Gardner ] + + * [Config] Add arm64 device tree files + - LP: #1262901 + + -- Andy Whitcroft Thu, 19 Dec 2013 18:36:43 +0000 + +linux (3.13.0-0.7) trusty; urgency=low + + [ Rajesh B Prathipati ] + + * SAUCE: powerpc: Make unaligned accesses endian-safe for powerpc + + [ Tim Gardner ] + + * [Config] CONFIG_REGULATOR_S2MPS11=n for FTBS + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc4 + - LP: #1259790 + - LP: #1259437 + - LP: #1259435 + + -- Tim Gardner Fri, 13 Dec 2013 07:56:34 -0700 + +linux (3.13.0-0.6) trusty; urgency=low + + [ Paolo Pisati ] + + * [Config] armhf: arm64: VIRTIO_[BLK|MMIO|NET|CONSOLE|BALLOON]=y + * [Config] i386: amd64: VIRTIO_CONSOLE=y + + [ Tim Gardner ] + + * [Config] CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y + + -- Tim Gardner Sun, 08 Dec 2013 09:22:01 -0700 + +linux (3.13.0-0.5) trusty; urgency=low + + [ Andy Whitcroft ] + + * correct bug listing for v3.13-rc2 rebase + * [Config] ppc64el -- create linux-libc-dev + * [Debian] Improve tools version message + - LP: #1257715 + + [ Serge Hallyn ] + + * SAUCE: fork: Allow CLONE_PARENT after setns(CLONE_NEWPID)] + - LP: #1248590 + * SAUCE: vfs: Fix a regression in mounting proc + + [ Tim Gardner ] + + * [Config] Build-in ohci-pci + - LP: #1244176 + * Rebase to v3.13-rc3 + + [ Upstream Kernel Changes ] + + * Revert "Revert "fork: unify and tighten up CLONE_NEWUSER/CLONE_NEWPID + checks"" + - LP: #1248590 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc3 + - LP: #1256840 + - LP: #1256212 + + -- Tim Gardner Sat, 07 Dec 2013 07:55:39 -0700 + +linux (3.13.0-0.4) trusty; urgency=low + + [ Tim Gardner ] + + * Rebase to v3.13-rc2 + + [ Upstream Kernel Changes ] + + * rebase to v3.13-rc2 + + -- Tim Gardner Fri, 29 Nov 2013 23:54:05 -0500 + +linux (3.13.0-0.3) trusty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: Fix DocBook FTBS" + + [ Tim Gardner ] + + * [Debian] Re-sign modules after debug objcopy + - LP: #1253155 + * [Config] CONFIG_EXT4_USE_FOR_EXT23=y + + [ Upstream Kernel Changes ] + + * doc: fix generation of device-drivers + * rebase to b975dc3689fc6a3718ad288ce080924f9cb7e176 + + -- Tim Gardner Tue, 26 Nov 2013 12:24:42 -0700 + +linux (3.13.0-0.2) trusty; urgency=low + + [ Tim Gardner ] + + * SAUCE: Fix DocBook FTBS + + -- Tim Gardner Mon, 25 Nov 2013 13:24:15 -0700 + +linux (3.13.0-0.1) trusty; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to 7e3528c3660a2e8602abc7858b0994d611f74bc3 + + -- Tim Gardner Tue, 12 Nov 2013 07:28:53 -0700 + +linux (3.13.0-0.0) trusty; urgency=low + + * Major release bump. + + -- Andy Whitcroft Tue, 12 Nov 2013 21:37:52 +0000 + +linux (3.12.0-2.7) trusty; urgency=low + + * Fixed armhf ABI build failure. + * Release tracker + - LP: #1249477 + + -- Tim Gardner Fri, 08 Nov 2013 16:22:45 -0700 + +linux (3.12.0-2.6) trusty; urgency=low + + [ Joseph Salisbury ] + + * SAUCE: tg3: Add support for new 57786 device id. + - LP: #1242610 + + [ Tim Gardner ] + + * [Config] CONFIG_OABI_COMPAT=n + * [Config] add the wandboard to shipped dtb + - LP: #1249421 + * Release tracker + - LP: #1249477 + + -- Tim Gardner Fri, 08 Nov 2013 12:23:18 -0700 + +linux (3.12.0-2.5) trusty; urgency=low + + [ Andy Whitcroft ] + + * rebase to mainline v3.12 + * [Config] updateconfigs following rebase to v3.12 + * postinst -- improve relative symlink detection with missing files + - LP: #1248053 + * postinst -- fix unchanged link detection + * [Config] update configs following addition of apparmor fixes + + [ Anthony Wong ] + + * SAUCE: Work around broken ACPI backlight on Dell Inspiron 5537 + - LP: #1231305 + + [ John Johansen ] + + * SAUCE: (no-up) apparmor: Sync to apparmor 3 - alpha 4 snapshot + * SAUCE: apparmor: fix unix domain sockets to be mediated on connection + - LP: #1208988 + * SAUCE: apparmor: allocate path lookup buffers during init + - LP: #1208988 + + [ Tim Gardner ] + + * [Config] Remove superfluous ubuntu/lttng-modules + + [ Upstream Kernel Changes ] + + * Revert "fork: unify and tighten up CLONE_NEWUSER/CLONE_NEWPID checks" + - LP: #1248590 + + [ Upstream Kernel Changes ] + + * rebase to v3.12 + - LP: #1222850 + + [ Adam Conrad ] + * etc/getabis: Fetch arm64/generic abis as well + + -- Andy Whitcroft Wed, 06 Nov 2013 21:00:21 +0000 + +linux (3.12.0-1.3) trusty; urgency=low + + [ Andy Whitcroft ] + + * [Config] arm64 -- add arch to the configuration handlers + * [Config] arm64 -- add generic flavour + * [Config] arm64 -- default config + * [Config] arm64 -- fix up various FTBFS config options + * SAUCE: arm64: export __copy_in_user to modules + * [Config] arm64 -- disable ABI/module checks + * [Config] arm64 -- enforcer -- add arm64 to the enforcer + * [Config] arm64 -- enable udebs for arm64 + + [ Colin Watson ] + + * [Config] Clean up various udeb Provides + + [ Paolo Pisati ] + + * [Config] AHCI_IMX=y + * [Config] build imx*-wandboard dtbs + + [ Serge Hallyn ] + + * SAUCE: device_cgroup: remove can_attach + + [ Tim Gardner ] + + * rebase to v3.12-rc7 + * SAUCE: KVM: Fix modprobe failure for kvm_intel/kvm_amd + * Release tracker + - LP: #1245932 + + [ Upstream Kernel Changes ] + + * rebase to v3.12-rc7 + - LP: #1180881 + - LP: #1180881 + - LP: #1217957 + + -- Tim Gardner Sun, 27 Oct 2013 22:08:55 -0600 + +linux (3.12.0-0.2) trusty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: (no-up) scsi: hyper-v storage -- mark as preferring READ + CAPACITY (16) at SPC-2" + * Revert "SAUCE: (no-up) scsi: hyper-v storage -- mark as VPD capable at + SPC-2" + * Revert "SAUCE: (no-up) scsi: add scsi device flag to request READ + CAPACITY (16) be preferred" + * Revert "SAUCE: (no-up) scsi: add scsi device flag to request VPD pages + be used at SPC-2" + * Revert "overlayfs: Update to v19" + * Revert "ubuntu: overlayfs v18 -- -- overlayfs: implement show_options" + * Revert "ubuntu: overlayfs v18 -- -- overlayfs: add statfs support" + * Revert "ubuntu: overlayfs v18 -- -- overlay filesystem" + * Revert "ubuntu: overlayfs v18 -- -- vfs: introduce + clone_private_mount()" + * Revert "ubuntu: overlayfs v18 -- -- vfs: export do_splice_direct() to + modules" + * Revert "ubuntu: overlayfs v18 -- -- overlay: overlay filesystem + documentation" + * ubuntu: overlayfs v20 -- overlayfs: add statfs support + * [Config] fix linux-libc-dev generation for arm64 + * [Config] fix linux-libc-dev generation for x32 + * [Config] add linux-libc-dev generation for ppc64el + + [ Erez Zadok ] + + * ubuntu: overlayfs v20 -- overlayfs: implement show_options + + [ Miklos Szeredi ] + + * ubuntu: overlayfs v20 -- vfs: add i_op->dentry_open() + * ubuntu: overlayfs v20 -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs v20 -- vfs: export __inode_permission() to modules + * ubuntu: overlayfs v20 -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs v20 -- overlay filesystem + * ubuntu: overlayfs v20 -- fs: limit filesystem stacking depth + + [ Neil Brown ] + + * ubuntu: overlayfs v20 -- overlay: overlay filesystem documentation + + [ Paolo Pisati ] + + * [Config] arm: VIRTIO_[BLK|NET|MMIO]=y + + [ Seth Forshee ] + + * SAUCE: (no-up) ACPI: Disable Windows 8 compatibility for some Lenovo + ThinkPads + - LP: #1183856 + + [ Tim Gardner ] + + * [Config] CONFIG_CRYPTO_CRCT10DIF=y, CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m + * rebase to v3.12-rc2 + * updateconfigs + * [Config] disable CONFIG_FB_VESA enforcer check + * [Config] Disable lttng for FTBS + * rebase to v3.12-rc3 + * rebase to v3.12-rc4 + * [Config] CONFIG_ANDROID=n + - LP: #1235161 + * [Config] CONFIG_L2TP_V3=y + - LP: #1235914 + * [Config] CONFIG_USB_OTG=n for all arches + * Release tracker + - LP: #1242811 + + [ Upstream Kernel Changes ] + + * scsi: hyper-v storsvc switch up to SPC-3 + + * rebase to v3.12-rc6 + - LP: #1235977 + - LP: #1235523 + - LP: #1239392 + - LP: #1227491 + + * rebase to v3.12-rc3 + - LP: #1231931 + + * rebase to v3.12-rc2 + - LP: #1213820 + - LP: #1213055 + - LP: #1198030 + + -- Tim Gardner Mon, 23 Sep 2013 07:41:07 -0600 + +linux (3.11.0-8.15) saucy; urgency=low + + [ Tim Gardner ] + + * Release tracker + - LP: #1227969 + + * Update lttng + Updated to git://git.lttng.org/lttng-modules.git 9998f5216f4641a79e158135 + Version 2.3.0+ + + [ Upstream Kernel Changes ] + + * igb: Add additional get_phy_id call for i354 devices + - LP: #1219619 + * igb: Read flow control for i350 from correct EEPROM section + - LP: #1219619 + * timekeeping: Fix HRTICK related deadlock from ntp lock changes + Required for lttng update. + + -- Tim Gardner Thu, 19 Sep 2013 07:41:49 -0600 + +linux (3.11.0-7.14) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] sort out linux-tools naming + - LP: #1205284 + * [Packaging] linux-tools: switch to common generic version helper + + [ Paolo Pisati ] + + * [Config] highbank: ecx1000: CPU_IDLE causes instabilities, disable it + + [ Tim Gardner ] + + * Release tracker + - LP: #1226160 + + [ Tony Lindgren ] + + * SAUCE: ARM: dts: Fix muxing and regulator for wl12xx on the SDIO bus + for pandaboard + + [ Upstream Kernel Changes ] + + * USB: handle LPM errors during device suspend correctly + - LP: #1011415 + * usb: don't check pm qos NO_POWER_OFF flag in usb_port_suspend() + - LP: #1011415 + * usb: Don't fail port power resume on device disconnect. + - LP: #1011415 + + [ Upstream Kernel Changes ] + + * rebase to v3.11.1 + + -- Tim Gardner Wed, 11 Sep 2013 07:30:17 -0600 + +linux (3.11.0-7.13) saucy; urgency=low + + * Release tracker + - LP: #1223545 + + [ Andy Whitcroft ] + + * SAUCE: (no-up) scsi: add scsi device flag to request VPD pages be used at SPC-2 + - LP: #1223499 + * SAUCE: (no-up) scsi: add scsi device flag to request READ CAPACITY (16) be preferred + - LP: #1223499 + * SAUCE: (no-up) scsi: hyper-v storage -- mark as VPD capable at SPC-2 + - LP: #1223499 + * SAUCE: (no-up) scsi: hyper-v storage -- mark as preferring READ CAPACITY (16) at SPC-2 + - LP: #1223499 + + [ Maximiliano Curia ] + + * SAUCE: (no-up) Only let characters through when there are active readers. + - LP: #1208740 + + [ Tim Gardner ] + + * [Debian] getabis: Commit new ABI directory, remove the old + * [Config] CONFIG_EFIVAR_FS=y + - LP: #1223195 + * [Config] CONFIG_EFI_VARS_PSTORE=m, + CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=n + * SAUCE: (no-up) USB: input: cm109.c: Convert high volume dev_err() to dev_err_ratelimited() + - LP: #1222850 + + [ Upstream Kernel Changes ] + + * Intel xhci: refactor EHCI/xHCI port switching + - LP: #1210858 + + -- Tim Gardner Tue, 10 Sep 2013 09:00:19 -0600 + +linux (3.11.0-6.12) saucy; urgency=low + + * Release tracker + - LP: #1222893 + + [ Andy Whitcroft ] + + * Revert "ubuntu: (no-squash) AUFS3 -- aufs3-standalone.patch" + * Revert "ubuntu: (no-squash) AUFS3 -- aufs3-base.patch" + * ubuntu: (no-squash) AUFS3 -- aufs3-base.patch + * ubuntu: (no-squash) AUFS3 -- aufs3-standalone.patch + - LP: #1222407 + + [ Paolo Pisati ] + + * [Config] restore mmc boot on panda + + [ Tyler Hicks ] + + * SAUCE: apparmor: Use shash crypto API interface for profile hashes + - LP: #1216294 + + [ Upstream Kernel Changes ] + + * net: calxedaxgmac: remove NETIF_F_FRAGLIST setting + * net: calxedaxgmac: read correct field in xgmac_desc_get_buf_len + * net: calxedaxgmac: fix race between xgmac_tx_complete and xgmac_tx_err + * net: calxedaxgmac: fix possible skb free before tx complete + * net: calxedaxgmac: update ring buffer tx_head after barriers + * net: calxedaxgmac: fix race with tx queue stop/wake + * net: calxedaxgmac: enable interrupts after napi_enable + * net: calxedaxgmac: fix various errors in xgmac_set_rx_mode + * net: calxedaxgmac: remove some unused statistic counters + * net: calxedaxgmac: fix rx DMA mapping API size mismatches + * net: calxedaxgmac: fix xgmac_xmit DMA mapping error handling + * mfd: rtsx: Read vendor setting from config space + - LP: #1201698 + + -- Tim Gardner Mon, 09 Sep 2013 07:21:06 -0600 + +linux (3.11.0-5.11) saucy; urgency=low + + * Release tracker + - LP: #1221886 + + [ Adam Lee ] + + * SAUCE: Bluetooth: Add support for 04ca:2007 + - LP: #1153448 + * SAUCE: Bluetooth: Add support for 105b:e065 + - LP: #1161261 + + [ Gavin Guo ] + + * SAUCE: Bluetooth: Add support for Broadcom 413c:8143 + - LP: #1166113 + + [ Upstream Kernel Changes ] + + * igb: Reset the link when EEE setting changed + - LP: #1219619 + * igb: Read register for latch_on without return value + - LP: #1219619 + * igb: Added rcu_lock to avoid race + - LP: #1219619 + * igb: don't allow SR-IOV without MSI-X + - LP: #1219619 + * igb: Update MTU so that it is always at least a standard frame size + - LP: #1219619 + * igb: Refactor of init_nvm_params + - LP: #1219619 + * igb: Refactor NVM read functions to accommodate devices with no flash + - LP: #1219619 + * igb: Add device support for flashless SKU of i210 device + - LP: #1219619 + * igb: Fix get_fw_version function for all parts + - LP: #1219619 + * igb: Add macro for size of RETA indirection table + - LP: #1219619 + * igb: Expose RSS indirection table for ethtool + - LP: #1219619 + * igb: Don't look for a PBA in the iNVM when flashless + - LP: #1219619 + * igb: Implementation of 1-sec delay for i210 devices + - LP: #1219619 + * igb: New PHY_ID for i354 device + - LP: #1219619 + * igb: M88E1543 PHY downshift implementation + - LP: #1219619 + * igb: No PHPM support in i354 devices + - LP: #1219619 + * igb: Support to get 2_5G link status for appropriate media type + - LP: #1219619 + * igb: Get speed and duplex for 1G non_copper devices + - LP: #1219619 + * igb: Implementation to report advertised/supported link on i354 devices + - LP: #1219619 + * igb: Update version number + - LP: #1219619 + * Bluetooth: Take proper tty_struct references + - LP: #1189998 + * Bluetooth: Remove the device from the list in the destructor + - LP: #1189998 + * Bluetooth: Move the tty initialization and cleanup out of open/close + - LP: #1189998 + * Bluetooth: Implement .activate, .shutdown and .carrier_raised methods + - LP: #1189998 + * Bluetooth: Fix the reference counting of tty_port + - LP: #1189998 + * Bluetooth: Purge the dlc->tx_queue to avoid circular dependency + - LP: #1189998 + + [ Wen-chien Jesse Sung ] + + * SAUCE: Bluetooth: Support for loading broadcom patchram firmware + - LP: #1065400 + * SAUCE: Bluetooth: Add support for 13d3:3388 and 13d3:3389 + - LP: #1065400 + + -- Tim Gardner Thu, 05 Sep 2013 08:06:17 -0600 + +linux (3.11.0-5.10) saucy; urgency=low + + [ Andy Whitcroft ] + + * Release tracker + - LP: #1220222 + * Revert "[Config] Fix ubuntu directoy Kbuilds" + * Revert "aufs update dropped some Kbuild files" + * Revert "ubuntu: AUFS -- follow rename of loop.h into drivers/block" + * Revert "ubuntu: AUFS -- update to + 8e503d4142c189ed6c47a2177ad2cd058e8d340e" + * Revert "ubuntu: (no-squash) AUFS3 -- aufs3-standalone.patch" + * Revert "ubuntu: (no-squash) AUFS3 -- aufs3-base.patch" + * rebase to v3.11 final + * [Config] clean up ubuntu/Kconfig and ubuntu/Makefile + * ubuntu: AUFS (no-squash): basic framework and update machinary + * ubuntu: (no-squash) AUFS3 -- aufs3-base.patch + * ubuntu: (no-squash) AUFS3 -- aufs3-standalone.patch + * ubuntu: AUFS -- update to 5ac5fe26a90a818218310e208d17688fddb07622 + * ubuntu: (no-squash) AUFS -- enable aufs + * ubuntu: AUFS -- fix remaining d_count references to use accessor + * ubuntu: lttng -- follow rename of pid_ns + * SAUCE: disable stack-protector for ARM compressed bootloader + + [ Paolo Pisati ] + + * [Config] ARM_ATAG_DTB_COMPAT=y + + [ Rob Herring ] + + * [Config] Enable KVM and virtio for armhf generic-lpae + + [ Tim Gardner ] + + * [Config] CONFIG_ARPD=y + * [Config] CONFIG_ZSWAP=y + - LP: #1215379 + + [ Upstream Kernel Changes ] + + * uvcvideo: quirk PROBE_DEF for Dell SP2008WFP monitor. + - LP: #1217957 + * ARM: use phys_addr_t for DMA zone sizes + * ARM: highbank: enable DMA zone for LPAE + * ARM: highbank: select ARCH_HAS_HOLES_MEMORYMODEL + * ARM: highbank: select required errata work-arounds + * DMA: fix AMBA PL08x compilation issue with 64bit DMA address type + * DMA: fix printk warning in AMBA PL08x DMA driver + * ARM: highbank: select ARCH_DMA_ADDR_T_64BIT for LPAE + * ARM: move outer_cache declaration out of ifdef + * ARM: highbank: avoid L2 cache smc calls when PL310 is not present + * ARM: highbank: clean-up some unused includes + * ARM: xen: only set pm function ptrs for Xen guests + + [ Upstream Kernel Changes ] + + * rebase to v3.11 + + -- Andy Whitcroft Tue, 03 Sep 2013 17:08:06 +0100 + +linux (3.11.0-4.9) saucy; urgency=low + + [ Tim Gardner ] + + * rebase to v3.11-rc7 + * Release tracker + - LP: #1216962 + + [ Upstream Kernel Changes ] + + * mwifiex: do not create AP and P2P interfaces upon driver loading + - LP: #1212720 + + -- Tim Gardner Mon, 26 Aug 2013 06:25:35 -0600 + +linux (3.11.0-3.8) saucy; urgency=low + + [ Johannes Berg ] + + * SAUCE: mac80211: ignore (E)CSA in probe response frames + - LP: #1201470 + + -- Tim Gardner Fri, 23 Aug 2013 09:47:36 -0600 + +linux (3.11.0-3.7) saucy; urgency=low + + [ Tim Gardner ] + + * SAUCE: (no-up) hv_vss_daemon -- prevent self-daemonising to allow + upstart to track + * SAUCE: (no-up) hv -- bodge hv_vss_daemon so it can use the local + linux/hyperv.h + * SAUCE: hv: Add vss daemon to Makefile + * [Debian] Add hv_vss_daemon to tools package + - LP: #1213282 + * [Config] Fix ubuntu directoy Kbuilds + - LP: #1181755 + + -- Tim Gardner Tue, 20 Aug 2013 08:34:05 -0600 + +linux (3.11.0-3.6) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] tools: conditionalise x86 and hyper-v tools sensibly + * [Config] tools: enable x86 and hyper-v + + [ John Johansen ] + + * Revert "SAUCE: (no-up) apparmor: Sync to apparmor 3 dev stable + snapshot" + * Revert "SAUCE: (no-up) apparmor: fix apparmor module status for none + root users" + * SAUCE: (no-up) apparmor: Sync to apparmor 3 - alpha 4 snapshot + + [ Joseph Salisbury ] + + * SAUCE: (no-up) intel_ips: blacklist ASUSTek G60JX laptops + - LP: #1210848 + + [ Kamal Mostafa ] + + * [debian] tools: ship 'cpupower' in linux-tools + - LP: #1158668 + * [Config] Build-dep on libpci-dev for cpu tools + - LP: #1158668 + + [ Tim Gardner ] + + * rebase to v3.11-rc6 + * Release tracker + - LP: #1213941 + + -- Tim Gardner Fri, 16 Aug 2013 07:02:07 -0600 + +linux (3.11.0-2.5) saucy; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_PM_DEBUG=y + - LP: #1210539 + * rebase to v3.11-rc5 + * Release tracker + - LP: #1211378 + + -- Tim Gardner Mon, 12 Aug 2013 06:10:39 -0600 + +linux (3.11.0-1.4) saucy; urgency=low + + [ Tim Gardner ] + + * Bump ABI when making changes to the inclusion list lest + you cause conflicts with existing installed kernel + packages. + - LP: #1210331 + + -- Tim Gardner Fri, 09 Aug 2013 03:03:51 +0100 + +linux (3.11.0-0.3) saucy; urgency=low + + [ Tim Gardner ] + + * [Config] Include rbd and kvm in the virtual inclusion list + - LP: #1206961 + * [Config] Removed obsolete inclusion list entries + + -- Tim Gardner Tue, 06 Aug 2013 08:52:14 +0100 + +linux (3.11.0-0.2) saucy; urgency=low + + [ Bruce Allan ] + + * SAUCE: (no-up) e1000e: fix I217/I218 PHY initialization flow + - LP: #1206757 + * SAUCE: (no-up) e1000e: enable support for new device IDs + - LP: #1206757 + + [ John Johansen ] + + * SAUCE: (no-up) apparmor: Sync to apparmor 3 dev stable snapshot + + [ Paolo Pisati ] + + * build vexpress a15 dtb + * [Config] disable Broadcom bcm support (ARCH_BCM) + * [Config] disable Allwinner a1x support (ARCH_SUNXI) + * [Config] disable WonderMedia WM8850 support (ARCH_WM8850) + * [Config] disable Rockchip support (ARCH_ROCKCHIP) + * [Config] disable STMicroelectronics STiH41x SOCs (ARCH_STI) + * [Config] disable TI Keystone, AM43xx and OMAP5 support + * [Config] ARM_APPENDED_DTB=y + + [ Tim Gardner ] + + * rebase to v3.11-rc4 + * overlayfs: Update to v19 + * [Config] Enable overlayfs + * SAUCE: Fix lttng compile errors + + [ Upstream Kernel Changes ] + + * rebase to v3.11-rc4 + - LP: #1163720 + - LP: #1162026 + - LP: #1195636 + - LP: #1195597 + - LP: #1180409 + - LP: #1168430 + + -- Tim Gardner Sun, 04 Aug 2013 03:45:31 -0600 + +linux (3.11.0-0.1) saucy; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to 64ccccf8525fee499625b517c0faadf784c79e93 + - LP: #1163720 + - LP: #1162026 + - LP: #1195636 + - LP: #1195597 + - LP: #1180409 + - LP: #1168430 + + -- Tim Gardner Mon, 08 Jul 2013 08:50:46 -0600 + +linux (3.10.0-2.10) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_INTEL_MEI*=m + - LP: #1196155 + * [Config] CONFIG_DEBUG_INFO=y + + [ Stephen Warren ] + + * [Config] fix Calxeda xgmac module filename + + [ Upstream Kernel Changes ] + + * Revert "serial: 8250_pci: add support for another kind of NetMos + Technology PCI 9835 Multi-I/O Controller" + - LP: #1190967 + * mfd: lpc_ich: Add support for Intel Avoton SoC + - LP: #1196658 + + -- Andy Whitcroft Fri, 05 Jul 2013 18:08:02 +0100 + +linux (3.10.0-2.9) saucy; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.10 + + [ John Johansen ] + + * Revert "SAUCE: (no-up) apparmor: Fix quieting of audit messages for + network mediation" + * Revert "SAUCE: (no-up) apparmor: Fix compile warnings" + * Revert "SAUCE: (no-up) AppArmor: basic networking rules" + * Revert "SAUCE: (no-up) apparmor: Add the ability to mediate mount" + * Revert "SAUCE: (no-up) AppArmor: Add profile introspection file to + interface" + * Revert "SAUCE: (no-up) AppArmor: Disable Add PR_{GET,SET}_NO_NEW_PRIVS + to prevent execve from granting privs" + * SAUCE: (no-up) apparmor: Sync to apparmor 3 dev stable snapshot + + [ Upstream Kernel Changes ] + + * rebase to v3.10 + + -- Andy Whitcroft Mon, 01 Jul 2013 17:42:29 +0100 + +linux (3.10.0-1.8) saucy; urgency=low + + [ Andy Whitcroft ] + + * Release Tracking Bug + - LP: #1195717 + + [ Andy Whitcroft ] + + * Revert "ubuntu: overlayfs -- follow change to do_splice_direct + interface" + * Revert "ubuntu: overlayfs -- expose do_splice_direct prototype" + * Revert "SAUCE: ubuntu: overlayfs -- ovl_path_open should not take path + reference" + * Revert "ubuntu: overlayfs -- add FS_ALIAS" + * Revert "ubuntu: overlayfs -- + overlayfs-copy-up-i_uid-i_gid-from-the-underlying-inode" + * Revert "ubuntu: overlayfs -- ovl-switch-to-inode_permission" + * Revert "ubuntu: overlayfs -- vfs-export-inode_permission-to-modules" + * Revert "ubuntu: overlayfs -- overlayfs-create-new-inode-in-ovl_link" + * Revert "ubuntu: overlayfs -- + overlayfs-fix-possible-leak-in-ovl_new_inode" + * Revert "ubuntu: overlayfs -- fs-limit-filesystem-stacking-depth" + * Revert "ubuntu: overlayfs -- overlay-overlay-filesystem-documentation" + * Revert "ubuntu: overlayfs -- overlayfs-implement-show_options" + * Revert "ubuntu: overlayfs -- overlayfs-add-statfs-support" + * Revert "ubuntu: overlayfs -- overlay filesystem" + * Revert "ubuntu: overlayfs -- vfs-introduce-clone_private_mount" + * Revert "ubuntu: overlayfs -- vfs-export-do_splice_direct-to-modules" + * Revert "ubuntu: overlayfs -- vfs-add-i_op-dentry_open" + * ubuntu: overlayfs v18 -- -- overlayfs: add statfs support + + [ Erez Zadok ] + + * ubuntu: overlayfs v18 -- -- overlayfs: implement show_options + + [ Miklos Szeredi ] + + * ubuntu: overlayfs v18 -- -- vfs: add i_op->dentry_open() + * ubuntu: overlayfs v18 -- -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs v18 -- -- vfs: export __inode_permission() to modules + * ubuntu: overlayfs v18 -- -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs v18 -- -- overlay filesystem + * ubuntu: overlayfs v18 -- -- fs: limit filesystem stacking depth + + [ Neil Brown ] + + * ubuntu: overlayfs v18 -- -- overlay: overlay filesystem documentation + + [ Tim Gardner ] + + * [Config] CONFIG_SUNRPC_DEBUG=y + - LP: #1127319 + + -- Andy Whitcroft Fri, 28 Jun 2013 10:26:52 +0100 + +linux (3.10.0-0.7) saucy; urgency=low + + [ Andy Whitcroft ] + + * autopkgtest: switch Depends: to build-essential + + -- Andy Whitcroft Tue, 25 Jun 2013 08:40:55 +0100 + +linux (3.10.0-0.6) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_ARCH_TEGRA to fix FTBFS on armhf + * SAUCE: ubuntu: overlayfs -- ovl_path_open should not take path reference + - LP: #1098378 + * ubuntu: AUFS -- update to 4f14cef47eb7c23eda7198931fbab1040866b6ee + * ubuntu: overlayfs -- expose do_splice_direct prototype + * ubuntu: overlayfs -- follow change to do_splice_direct interface + * [Config] flip CONFIG_NO_HZ_FULL_ALL off as it is overheating machines + - LP: #1192691 + + [ Stefan Bader ] + + * (d-i) Add dm-snapshot to md-modules + - LP: #1191726 + + [ Tim Gardner ] + + * Release tracker + - LP: #1194149 + * [Config] CONFIG_WIL6210=n for armhf + * [Config] d-i: Add calxedaxgmac to nic-modules + - LP: #1192358 + * [debian] Use dh_strip + - LP: #1192759 + * [Config] Enable perf for armhf + * do_tools=false when cross compiling + * [Config] CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y + - LP: #1108082 + + [ Upstream Kernel Changes ] + + * nsp32: switch reset delay to msleep() as it is tooo long + * alx: add a simple AR816x/AR817x device driver + Plucked from linux-next. replaces ubuntu/alx in favor of + 'to be merged' version in 3.11. + * rebase to v3.10-rc7 + - LP: #1189363 + + -- Tim Gardner Sat, 22 Jun 2013 18:10:31 -0600 + +linux (3.10.0-0.5) saucy; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.10-rc6 + * [Config] updateconfigs following rebase to v3.10-rc6 + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc6 + + -- Andy Whitcroft Mon, 17 Jun 2013 11:12:39 +0100 + +linux (3.10.0-0.4) saucy; urgency=low + + [ Andy Whitcroft ] + + * [Config] updateconfigs following rebase to v3.10-rc4 + + -- Andy Whitcroft Mon, 10 Jun 2013 11:42:28 +0100 + +linux (3.10.0-0.3) saucy; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.10-rc5 + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc5 + - LP: #1186170 + + -- Andy Whitcroft Mon, 10 Jun 2013 09:23:31 +0100 + +linux (3.10.0-0.2) saucy; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.10-rc4 + * [Config] CONFIG_BINFMT_SCRIPT=y to fix booting initramfs scripts + * [Config] enable SND_PCM_DEBUG SND_PCM_XRUN_DEBUG + - LP: #1187744 + * [Config] enforce CONFIG_BINFMT_SCRIPT=y + + [ Dave Chiluk ] + + * SAUCE: ncpfs: fix rmdir returns Device or resource busy + - LP: #1035226 + + [ Tim Gardner ] + + * rebase to v3.10-rc3 + * [Config] sparc be gone + * [Config] ia64 be gone + * d-i: block-modules provides nbd-modules + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc4 + * rebase to v3.10-rc3 + + -- Tim Gardner Tue, 28 May 2013 06:16:46 -0600 + +linux (3.10.0-0.1) saucy; urgency=low + + [ Tim Gardner ] + + * UBUNTU: Disabled lttng + * UBUNTU: Disable aufs for FTBS + * UBUNTU: Disabled alx + * UBUNTU: alx: rename NETIF_F_HW_VLAN_* feature flags to NETIF_F_HW_VLAN_CTAG_* + * UBUNTU: rebase to v3.10-rc2 + * UBUNTU: SAUCE: uvcvideo: quirk PROBE_DEF for Alienware X51 OmniVision webcam + + [ Andy Whitcroft ] + + * UBUNTU: [Config] update standards version to 3.9.4.0 + * UBUNTU: [Config] squash duplicate package description (long and short) + * UBUNTU: [Config] fix up Vcs-git: to point to saucy + * UBUNTU: [Config] drop depenancy on util-linux as is Essential + * UBUNTU: [Config] drop redundant Build-Conficts: + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc2 + - LP: #1180351 + + [ Upstream Kernel Changes ] + + * rebase to v3.10-rc1 + - LP: #1172151 + - LP: #1089795 + - LP: #1167270 + - LP: #1128840 + + -- Tim Gardner Tue, 14 May 2013 13:41:07 -0600 + +linux (3.10.0-0.0) saucy; urgency=low + + * Dummy + + -- Tim Gardner Thu, 09 May 2013 20:30:40 +0100 + +linux (3.9.0-2.6) saucy; urgency=low + + [ Tim Gardner ] + + * rebase to v3.9.2 + + -- Tim Gardner Thu, 09 May 2013 20:30:40 +0100 + +linux (3.9.0-1.5) saucy; urgency=low + + [ Tim Gardner ] + + * rebase to v3.9.1 + + -- Tim Gardner Wed, 08 May 2013 12:49:45 -0400 + +linux (3.9.0-0.4) saucy; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: overlayfs -- add FS_ALIAS + + [ Tim Gardner ] + + * Added lttng + - LP: #1175784 + + -- Tim Gardner Thu, 02 May 2013 17:17:13 -0400 + +linux (3.9.0-0.3) saucy; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to v3.9 + + -- Tim Gardner Mon, 29 Apr 2013 18:20:00 -0400 + +linux (3.9.0-0.2) saucy; urgency=low + + [ Tim Gardner ] + + * Enable extras packaging for amd64/i386. + Fixes build depenencies with brittany and linux-meta. + + -- Tim Gardner Mon, 29 Apr 2013 05:37:01 -0600 + +linux (3.9.0-0.1) saucy; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc8 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc7 + - LP: #1128840 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc6 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc5 + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc4 + - LP: #1095315 + - LP: #886975 + - LP: #1086921 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc3 + - LP: #1155016 + - LP: #1103594 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc2 + + [ Upstream Kernel Changes ] + + * rebase to v3.9-rc1 + - LP: #901105 + - LP: #961286 + - LP: #1011792 + - LP: #1128934 + - LP: #886975 + - LP: #978807 + + -- Tim Gardner Wed, 20 Feb 2013 09:12:39 -0700 + +linux (3.8.0-7.14) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_RCU_USER_QS=n + * [Config] CONFIG_MTD_ONENAND_SIM=n + * annotations: add annotations for CONFIG_CC_STACKPROTECTOR + + [ Upstream Kernel Changes ] + + * rebase to v3.8 + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1130111 + * UBUNTU: SAUCE: rt2x00: rt2x00pci_regbusy_read() - only print register access failure once + - LP: #1128840 + + -- Tim Gardner Mon, 18 Feb 2013 09:25:56 -0700 + +linux (3.8.0-6.13) raring; urgency=low + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1125364 + * Add ahci modules to d-i + - LP: #1124415 + + + [ Chris Wilson ] + + * SAUCE: drm/i915: Wait for pending flips to complete before tearing down + the encoders + - LP: #1097315 + + -- Tim Gardner Wed, 13 Feb 2013 12:16:48 -0700 + +linux (3.8.0-6.12) raring; urgency=low + + [Tim Gardner] + + * perf: NO_LIBPERL=1 + * Fix linux-headers dependency + * Release Tracking Bug + - LP: #1124362 + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_AUFS_EXPORT to allow nfs exports + - LP: #1121699 + + [ Daniel Vetter ] + + * SAUCE: drm/i915: write backlight harder + - LP: #954661 + + -- Tim Gardner Wed, 13 Feb 2013 10:25:11 -0700 + +linux (3.8.0-6.11) raring; urgency=low + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1122071 + * rebase to v3.8-rc7 + * Add libaudit-dev as a build dependency + * Build perf with NO_LIBPYTHON=1 to avoid a python build dependency. + + [ Leann Ogasawara ] + + * [Config] Remove CONFIG_SATA_AHCI annotation + + -- Tim Gardner Fri, 08 Feb 2013 07:41:13 -0500 + +linux (3.8.0-5.10) raring; urgency=low + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1118568 + + * Bump ABI to fix install issue with 3.8.0-4.8. + Moving drivers/ata/*ahci* to linux-image caused an + install conflict with linux-image-extras without an + ABI bump. + + [ Jan Beulich ] + + * SAUCE: xen-pciback: rate limit error messages from + xen_pcibk_enable_msi{, x}() + - LP: #1117336 + - CVE-2013-0231 + + -- Tim Gardner Thu, 07 Feb 2013 05:38:12 -0700 + +linux (3.8.0-4.9) raring; urgency=low + + [ Herton Ronaldo Krzesinski ] + + * d-i: Add mellanox ethernet drivers to nic-modules + - LP: #1015339 + + [ Joseph Salisbury ] + + * SAUCE: ACPI: Add DMI entry for Sony VGN-FW41E_H + - LP: #1113547 + + [ Kamal Mostafa ] + + * SAUCE: alx driver import script + + [ Qualcomm Atheros, Inc ] + + * SAUCE: alx: Update to heads/master + + [ Tim Gardner ] + + * Release Tracking Bug + - LP: #1117673 + + * [debian] Remove dangling symlink from headers package + - LP: #1112442 + * [config] CONFIG_ALX=m + * [Config] Add alx to d-i nic-modules + * [Config] CONFIG_SATA_AHCI=m + - LP: #1056563 + + -- Leann Ogasawara Tue, 05 Feb 2013 05:54:32 -0800 + +linux (3.8.0-4.8) raring; urgency=low + + [ Allen Ibara ] + + * SAUCE: imx6: dts: Add IMX6Q AHCI support + + [ Andy Whitcroft ] + + * rebase to v3.8-rc6 + * updateconfigs following rebase to v3.8-rc6 + + [Leann Ogasawara] + + * Release Tracking Bug + - LP: #1112573 + + [ Paolo Pisati ] + + * SAUCE: imx6: enable sata clk if SATA_AHCI_PLATFORM + * [Config] SERIAL_AMBA_PL011=y (vexpress serial console) + * [Config] MMC_ARMMMCI=y (vexpress mmc) + * [Config] FB_ARMCLCD=y (vexpress framebuffer) + + [ Seth Forshee ] + + * [Config] CONFIG_MAC80211_MESSAGE_TRACING=y + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc6 + - LP: #1107477 + + -- Leann Ogasawara Fri, 01 Feb 2013 07:20:59 -0800 + +linux (3.8.0-3.7) raring; urgency=low + + [ Andy Green ] + + * SAUCE: ARM: OMAP2+: add cpu id register to MAC address helper + * SAUCE: ARM: omap2 add mac address allocation register api + * SAUCE: ARM: omap2 panda register ethernet and wlan for automatic mac + allocation + + [ Leann Ogasawara ] + + * rebase to v3.8-rc5 + * Release Tracking Bug + - LP: #1111486 + + [ Paolo Pisati ] + + * SAUCE: davinci: vpss: compilation fix + * [Config] enable TI OMAP4 support (Pandaboard/ES) + * [Config] OMAP_USB2=y (since TWL6030_USB depends on it) + * [Config] enable Freescale IMX6 support (SabreLite) + * [Config] SERIAL_IMX_CONSOLE=y + * [Config] MMC_*_IMX=y + * [Config] disable USB_SUSPEND + * [Config] USB_MXS_PHY=y + * [Config] USB_CHIPIDEA=y + * SAUCE: DTB: add support for multiple DTBs + * SAUCE: DTB: build imx6q-sabrelite + * SAUCE: DTB: build beaglexm + * SAUCE: DTB: build panda/panda es + * [Config] disable CPU_FREQ + * [Config] PANEL_TFP410=y (video DVI output) + * [Config] SND_OMAP_SOC*=y + * [Config] SND_IMX_SOC*=y + * [Config] I2C_IMX=y + * [Config] SPI_IMX=m + + [ Stefan Bader ] + + * [Config] Move 9p modules into generic package + - LP: #1107658 + + [ Tony Lindgren ] + + * SAUCE: ARM: OMAP2+: Limit omap initcalls to omap only on multiplatform + kernels + * SAUCE: ARM: OMAP2+: Use omap initcalls + * SAUCE: ARM: OMAP: Fix i2c cmdline initcall for multiplatform + * SAUCE: ARM: OMAP: Fix dmaengine init for multiplatform + * SAUCE: ARM: OMAP2+: Add multiplatform debug_ll support + * SAUCE: ARM: OMAP2+: Disable code that currently does not work with + multiplaform + * SAUCE: ARM: OMAP2+: Enable ARCH_MULTIPLATFORM support + * SAUCE: ARM: OMAP2+: Add minimal support for booting vexpress + * SAUCE: ARM: OMAP2+: Remove now obsolete uncompress.h and debug-macro.S + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc5 + - LP: #1096789 + + -- Leann Ogasawara Thu, 31 Jan 2013 06:44:52 -0800 + +linux (3.8.0-2.6) raring; urgency=low + + [ Adam Conrad ] + + * Fix up linux-tools -> SRCPKGNAME-tools rename + + [ Andy Whitcroft ] + + * [Config] re-disable CONFIG_SOUND_OSS + - LP: #1105230 + + [ Arend van Spriel ] + + * SAUCE: brcmsmac: fix tx status processing + + [Leann Ogasawara] + + * Release Tracking Bug + - LP: #1105104 + + -- Leann Ogasawara Fri, 25 Jan 2013 11:56:30 -0800 + +linux (3.8.0-1.5) raring; urgency=low + + [Tim Gardner] + + * Release Tracking Bug + - LP: #1101235 + + [ Dudley Du ] + + * SAUCE: Input: add support for Cypress PS/2 Trackpads + - LP: #978807 + + [ Kamal Mostafa ] + + * SAUCE: Input: increase struct ps2dev cmdbuf[] to 8 bytes + * SAUCE: Input: Cypress PS/2 Trackpad simulated multitouch + * [Config] Add CONFIG_PS2_CYPRESS + + [ Tim Gardner ] + + * rebase to v3.8-rc4 + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc4 + - LP: #1095315 + - LP: #886975 + - LP: #1086921 + + -- Leann Ogasawara Thu, 17 Jan 2013 10:50:22 -0800 + +linux (3.8.0-0.4) raring; urgency=low + + [ Leann Ogasawara ] + + * [Config] Update CONFIG_TOUCHSCREEN_EGALAX build annotation + * [Config] Update CONFIG_IIO build annotation + * [Config] Update CONFIG_TOUCHSCREEN_EETI annotation + * [Config] Remove CONFIG_SPI_DW_MMIO annotation + * [Config] Remove CONFIG_SPI_PL022 annotation + * [Config] Update CONFIG_EZX_PCAP annotation + * [Config] Update CONFIG_SENSORS_AK8975 annotation + * [Config] Disable CONFIG_DRM_MGAG200 + - LP: #1042903 + + -- Leann Ogasawara Mon, 14 Jan 2013 10:01:50 -0800 + +linux (3.8.0-0.3) raring; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: (no-up) trace: add trace events for open(), exec() and + uselib()" + + [ Scott James Remnant ] + + * SAUCE: (no-up) trace: add trace events for open(), exec() and uselib() + (for v3.7+) + - LP: #1085766, #462111 + + -- Andy Whitcroft Fri, 11 Jan 2013 16:57:27 +0000 + +linux (3.8.0-0.2) raring; urgency=low + + [ Tim Gardner ] + + * [packaging] Add macro to selectively disable building perf + * [packaging] Cannot depend on universe package libaudit-dev + + -- Tim Gardner Thu, 10 Jan 2013 12:43:24 -0700 + +linux (3.8.0-0.1) raring; urgency=low + + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc3 + - LP: #1096789 + + [ Upstream Kernel Changes ] + + * rebase to v3.8-rc2 + - LP: #1082357 + - LP: #1075882 + + -- Andy Whitcroft Mon, 17 Dec 2012 10:35:09 +0000 + +linux (3.7.0-7.15) raring; urgency=low + + [ Chris J Arges ] + + * SAUCE: add eeprom_bad_csum_allow module parameter + - LP: #1070182 + + [ Leann Ogasawara ] + + * Add ceph to linux-image for virtual instances + - LP: #1063784 + + [ Serge Hallyn ] + + * SAUCE: net: dev_change_net_namespace: send a KOBJ_REMOVED/KOBJ_ADD + + [ Tim Gardner ] + + * [Config] CONFIG_SLUB_DEBUG=y + - LP: #1090308 + + [ Upstream Kernel Changes ] + + * Revert "[SCSI] sd: Implement support for WRITE SAME" + - LP: #1089818 + + -- Leann Ogasawara Wed, 12 Dec 2012 06:50:20 -0800 + +linux (3.7.0-6.14) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] annotations: all new annotations scheme including defaults + * [Configs] apply annotation updates to main configs (top section) + + [ Leann Ogasawara ] + + * Revert "SAUCE: include and for mmc_core arm + build" + * Revert "SAUCE: [arm] fixup __aeabi_uldivmod undefined build error" + * Temporarily disable module check for build + + [ Stefan Bader ] + + * ubuntu: dm-raid45: Adapt to upstream interface changes + * Re-enable build of dm-raid45 + + [ Tim Gardner ] + + * SAUCE: Moved scripts/fw-to-ihex.sh to debian/scripts/misc + * SAUCE: ACPICA: Fix ACPI mutex object allocation memory leak on error + * SAUCE: drm: Fix possible EDID memory allocation oops + * SAUCE: ttm: Fix possible _manager memory allocation oops + * SAUCE: iwlwifi: iwlagn_request_scan: Fix check for priv->scan_request + * SAUCE: i915: intel_set_mode: Reduce stack allocation from 500 bytes to + 2 pointers + + [ Tomas Hozza ] + + * SAUCE: tools: hv: Netlink source address validation allows DoS + - LP: #1084777 + - CVE-2012-5532 + + [ Upstream Kernel Changes ] + + * rebase to v3.7 + + -- Leann Ogasawara Wed, 05 Dec 2012 14:11:12 -0800 + +linux (3.7.0-5.13) raring; urgency=low + + [ Lino Sanfilippo ] + + * SAUCE: inotify, fanotify: replace fsnotify_put_group() with + fsnotify_destroy_group() + - LP: #922906 + * SAUCE: fsnotify: introduce fsnotify_get_group() + - LP: #922906 + * SAUCE: fsnotify: use reference counting for groups + - LP: #922906 + * SAUCE: fsnotify: take groups mark_lock before mark lock + - LP: #922906 + * SAUCE: fanotify: add an extra flag to mark_remove_from_mask that + indicates wheather a mark should be destroyed + - LP: #922906 + * SAUCE: fsnotify: use a mutex instead of a spinlock to protect a groups + mark list + - LP: #922906 + * SAUCE: fsnotify: pass group to fsnotify_destroy_mark() + - LP: #922906 + * SAUCE: fsnotify: introduce locked versions of fsnotify_add_mark() and + fsnotify_remove_mark() + - LP: #922906 + * SAUCE: fsnotify: dont put marks on temporary list when clearing marks + by group + - LP: #922906 + * SAUCE: fsnotify: change locking order + - LP: #922906 + + [ Tim Gardner ] + + * [Config] CONFIG_NFC_LLCP=y + * [Config] get-firmware: Filter new files through fwinfo + * [Config] CONFIG_MTD_NAND_DOCG4=m for all arches + * [Config] CONFIG_DRM_EXYNOS_HDMI=y + * [Config] CONFIG_XEN=y for all arches + * [Config] CONFIG_SND_OMAP_SOC_ZOOM2=m + * [Config] CONFIG_MMC_DW_EXYNOS=m + * [Config] CONFIG_GPIO_ADNP=m + * [Config] find-obsolete-firmware: Use correct path + * rebase to v3.7-rc8 + - LP: #1084640 + + [ Upstream Kernel Changes ] + + * Revert "VFS: don't do protected {sym,hard}links by default" + - LP: #1084192 + + -- Tim Gardner Wed, 28 Nov 2012 16:07:08 +0000 + +linux (3.7.0-4.12) raring; urgency=low + + [ Tim Gardner ] + + * Revert "[Config] Use -j1 for headers_install" + * Revert "[Config] install-arch-headers needs a valid config" + Strayed into the weeds in search of the root cause of the periodic + build failure. + Fixes powerpc FTBS introduced in -4.11. + * [Config] hmake -j1 + The kernel makefile appears to have parallel dependency + problems for the install_headers target. This appears to be root + cause for a periodic build failure on N-way machines. + + -- Leann Ogasawara Tue, 27 Nov 2012 12:33:06 -0800 + +linux (3.7.0-4.11) raring; urgency=low + + [ Tim Gardner ] + + * [Config] Use -j1 for headers_install + Also fixes a powerpc FTBS introduced by + "[Config] install-arch-headers needs a valid config". + + -- Tim Gardner Tue, 27 Nov 2012 10:19:30 -0700 + +linux (3.7.0-4.10) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] add rebuild-test support for autopkgtest + - LP: #1081500 + * [tests] move build tests out of the way + - LP: #1081500 + * [tests] add an autopkgtest rebuild test + - LP: #1081500 + + [ Tim Gardner ] + + * rebase to v3.7-rc7 + * SAUCE: Remove emi62 files duplicated in linux-firmware + * SAUCE: Remove sb16 files duplicated in linux-firmware + * SAUCE: Remove whiteheat files duplicated in linux-firmware + * SAUCE: Remove yamaha files duplicated in linux-firmware + * SAUCE: Remove dsp56k files used only by m68k + * SAUCE: firmware: Remove last vestiges of dabusb + * SAUCE: Remove vicam files duplicated in linux-firmware + * [Config] install-arch-headers needs a valid config + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc7 + - LP: #1076840 + - LP: #1081466 + + -- Leann Ogasawara Wed, 21 Nov 2012 06:07:23 -0800 + +linux (3.7.0-3.9) raring; urgency=low + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_X86_CPUFREQ_NFORCE2=y + - LP: #1079900 + * Add nfsv3 to nfs-modules udeb + + [ Paolo Pisati ] + + * [Config] SND_OMAP_SOC*=y + - LP: #1019321 + + [ Stefan Bader ] + + * SAUCE: (no-up) xen/netfront: handle compound page fragments on transmit + - LP: #1078926 + + [ Tim Gardner ] + + * Revert "SAUCE: SECCOMP: audit: always report seccomp violations" + - LP: #1079469 + * Revert "SAUCE: omap3 clocks .dev_id = NULL" + * rebase to v3.7-rc6 + * SAUCE: script to detect obsolete firmware + * SAUCE: Remove yam files duplicated in linux-firmware + * SAUCE: Remove tehuti files duplicated in linux-firmware + * SAUCE: Remove matrox files duplicated in linux-firmware + * SAUCE: Remove cxgb3 files duplicated in linux-firmware + * SAUCE: Remove r128 files duplicated in linux-firmware + * SAUCE: Remove acenic files duplicated in linux-firmware + * SAUCE: Remove keyspan files duplicated in linux-firmware + * SAUCE: Remove sun files duplicated in linux-firmware + * SAUCE: Remove radeon files duplicated in linux-firmware + * SAUCE: Update bnx2x firmware to 7.8.2.0 + * [Config] generic.inclusion-list: econet has disappeared + + [ Upstream Kernel Changes ] + + * seccomp: forcing auditing of kill condition + - LP: #1079469 + * rebase to v3.7-rc6 + + -- Leann Ogasawara Tue, 20 Nov 2012 12:28:55 -0800 + +linux (3.7.0-2.8) raring; urgency=low + + [ Andy Whitcroft ] + + * Revert "overlayfs: disable until FTBS is fixed" + * Revert "ubuntu: overlayfs" + * Revert "ubuntu: AUFS" + * ubuntu: overlayfs -- overlayfs: add statfs support + * ubuntu: overlayfs -- ovl: switch to __inode_permission() + * ubuntu: overlayfs -- overlayfs: copy up i_uid/i_gid from the underlying + inode + - LP: #944386 + * ubuntu: AUFS (no-squash): basic framework and update machinary + * ubuntu: AUFS (no-squash) -- aufs3-base.patch + * ubuntu: AUFS (no-squash) -- aufs3-standalone.patch + * ubuntu: AUFS: aufs-update -- follow the uapi header changes + * ubuntu: AUFS -- update to f2873474324d0a31af4340554b9715f51331bc7f + * ubuntu: AUFS (no-squash) -- reenable + - LP: #1079193 + + [ Erez Zadok ] + + * ubuntu: overlayfs -- overlayfs: implement show_options + + [ Miklos Szeredi ] + + * ubuntu: overlayfs -- vfs: add i_op->dentry_open() + * ubuntu: overlayfs -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs -- overlay filesystem + * ubuntu: overlayfs -- fs: limit filesystem stacking depth + * ubuntu: overlayfs -- vfs: export __inode_permission() to modules + + [ Neil Brown ] + + * ubuntu: overlayfs -- overlay: overlay filesystem documentation + + [ Robin Dong ] + + * ubuntu: overlayfs -- overlayfs: fix possible leak in ovl_new_inode + * ubuntu: overlayfs -- overlayfs: create new inode in ovl_link + + -- Andy Whitcroft Thu, 15 Nov 2012 13:35:12 +0000 + +linux (3.7.0-1.7) raring; urgency=low + + [ Tim Gardner ] + + * [Config] Drop dependency on libaudit-dev + Its a universe package which causes an FTBS on the builders. + libaudit-dev is not strictly required for the perf tools build. + + -- Tim Gardner Wed, 14 Nov 2012 10:08:13 -0700 + +linux (3.7.0-1.6) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] enforce -- switch CONFIG_NVRAM to more readable form + * [Config] better encode the CONFIG_NVRAM constaint + * enforcer -- fix debugging output + + [ Ben Collins ] + + * [Config] Add custom_override rule to allow for alternate kernel + file/install + * [Config] Use SRCPKGNAME as prefix for indep linux headers package + + [ Tim Gardner ] + + * [Config] Dropped armel + * Drop highbank from ABI fetch list + * [Config] Use dh_prep instead of 'dh_clean -k' + * [Config] Build depend on libaudit-dev, libunwind8-dev for tools + * [Config] Document binary-indep dependency chain + * rebase to v3.7-rc5 + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc5 + + -- Tim Gardner Tue, 13 Nov 2012 07:13:37 -0500 + +linux (3.7.0-0.5) raring; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_AMD_IOMMU_V2=m + - LP: #1071520 + * [Config] CONFIG_MTD_ONENAND_SIM=n for armel + Fixes FTBS + + -- Tim Gardner Thu, 08 Nov 2012 15:45:39 -0500 + +linux (3.7.0-0.4) raring; urgency=low + + [ Ben Collins ] + + * [Config] Update enforce rule for CONFIG_NVRAM to better suit flavours + + [ Tim Gardner ] + + * [Config] do_tools=false for arm + + -- Tim Gardner Thu, 08 Nov 2012 05:39:51 -0700 + +linux (3.7.0-0.3) raring; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_DRM_EXYNOS_HDMI=n for armhf + * [Config] CONFIG_MTD_NAND_DOCG4=n for armel/armhf + * [Config] Drop highbank harder + + -- Tim Gardner Wed, 07 Nov 2012 18:11:45 +0000 + +linux (3.7.0-0.2) raring; urgency=low + + [ Andy Whitcroft ] + + * [Config] add fs/udf to linux-image to support DVD/CD formats in virtual + instances + - LP: #1066921 + * [Config] drop highbank builds + + [ Jeremy Kerr ] + + * SAUCE: efivarfs: Implement exclusive access for {get, set}_variable + - LP: #1063061 + + [ Leann Ogasawara ] + + * Reinstate dropped.txt from Ubuntu-3.7.0-0.1-rc1 + + [ Tim Gardner ] + + * [Config] Dropped powerpc/ppc64 in favour of the community kernel + * [Config] CONFIG_MODULE_SIG=y for amd64,i386, and highbank + * rebase to v3.7-rc4 + * SAUCE: MODSIGN: Emit error for incorrectly signed module + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc4 + + -- Tim Gardner Mon, 05 Nov 2012 05:35:41 -0700 + +linux (3.7.0-0.1) raring; urgency=low + + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc3 + - LP: #1056078 + + [ Upstream Kernel Changes ] + + * rebase to v3.7-rc2 + - LP: #1060729 + - LP: #1059523 + - LP: #1006690 + - LP: #1049623 + - LP: #1046512 + - LP: #1052499 + - LP: #1037642 + - LP: #559939 + - LP: #1052460 + - LP: #939161 + - LP: #1046734 + + -- Tim Gardner Tue, 02 Oct 2012 08:13:07 -0600 + +linux (3.6.0-0.1) UNRELEASED; urgency=low + + + [ Upstream Kernel Changes ] + + * rebase to v3.6 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc7 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc6 + - LP: #1000424 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc5 + - LP: #1040077 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc4 + + [ Upstream Kernel Changes ] + + * rebase to v3.6-rc3 + - LP: #1038651 + - LP: #1034779 + + -- Leann Ogasawara Tue, 24 Jul 2012 06:37:09 -0700 + +linux (3.5.0-6.6) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION enable IPv6 + experimental features + * SAUCE: highbank -- export clock functions for modules + * [Config] highbank -- reenable CONFIG_TOUCHSCREEN_W90X900 + * [Config] highbank -- renenable CONFIG_SERIO_AMBAKMI + * [Config] highbank -- reenable CONFIG_RFKILL_GPIO + * [Config] highbank -- reenable CONFIG_MMC_SDHCI_PXAV3 + * [Config] highbank -- reenable CONFIG_MMC_SDHCI_PXAV3 + * [Config] highbank -- reenable CONFIG_KEYBOARD_SAMSUNG + * [Config] highbank -- reenable CONFIG_FB_ARMCLCD + * [Config] highbank -- reenable CONFIG_DW_DMAC + * [Config] highbank -- reenable CONFIG_USB_R8A66597_HCD + * [Config] highbank -- reenable CONFIG_USB_MV_UDC + * [Config] highbank -- reenable CONFIG_USB_DWC3 + * [Config] highbank -- reenable CONFIG_SATA_MV + * [Config] highbank -- reenable CONFIG_PATA_ARASAN_CF + * [Config] highbank -- CONFIG_CAN_C_CAN_PLATFORM + * [Config] highbank -- reenable CONFIG_MMC_ARMMMCI + * [Config] highbank -- reenable CONFIG_SERIAL_AMBA_PL010 + * [Config] highbank -- reenable CONFIG_ATMEL_PWM + * [Config] highbank -- enable CONFIG_CHECKPOINT_RESTORE + * [Config] highbank -- enable CONFIG_EXPERT + * [Config] highbank -- enable CONFIG_CHECKPOINT_RESTORE + * [Config] enable CONFIG_USB_DYNAMIC_MINORS + * [Config] enable CONFIG_USB_EHCI_TT_NEWSCHED + * [Config] enable CONFIG_USB_ETH_EEM + * [Config] enable CONFIG_USB_HCD_BCMA/CONFIG_USB_HCD_SSB + * [Config] disable CONFIG_USB_M66592 + * [Config] enable CONFIG_USB_NET2272 + * [Config] enable CONFIG_USB_R8A66597 + * [Config] annotate: CONFIG_USB_OMAP not required for our h/w + * [Config] set CONFIG_USB_MUSB_HDRC=m for omap + * [Config] annotate: CONFIG_USB_G_MULTI fix rule + * [Config] CONFIG_USB_GPIO_VBUS=m for OMAP + * [Config] Enable CONFIG_DRM_AST/_CIRRUS_QEMU/_MGAG200 + * [Config] sync configuration armhf omap -> armel omap + * [Config] annotate: CONFIG_IIO triggers build failures on OMAP4 + * [Config] disable CONFIG_OMAP_IOVMM is deprecated + + [ Bryan Wu ] + + * [Config] change default IO scheduler from CFQ to Deadline + + [ Leann Ogasawara ] + + * Revert "[Config] Temporarily disable CONFIG_MV643XX_ETH on powerpc" + * [Config] Disable CONFIG_MOUSE_INPORT + + [ Tim Gardner ] + + * SAUCE: firmware: Update bnx2x to current firmware version 7.2.51 + * [Config] Add bnx2x firmware to nic-modules udeb + * SAUCE: Add script to convert firmware to ihex format + * SAUCE: firmware: Upgrade bnx2 to current versions + * [Config] Add tigon firmware to nic-modules udeb + * [Config] CONFIG_EARLY_PRINTK_DBGP=y + - LP: #1026761 + * SAUCE: Remove redundant cis firmware + * SAUCE: Remove redundant emi26 firmware + * SAUCE: Remove redundant ttusb-budget firmware + * SAUCE: Remove redundant sun/cassini firmware + * SAUCE: Remove redundant ositech/Xilinx7OD firmware + * SAUCE: Remove redundant 3com/typhoon.bin firmware + * SAUCE: Remove redundant yamaha/ds1 firmware + * SAUCE: Remove redundant keyspan_pda firmware + * rebase to v3.5 + + [ Upstream Kernel Changes ] + + * rebase to v3.5 + - LP: #1027828 + + -- Leann Ogasawara Mon, 23 Jul 2012 05:57:04 -0700 + +linux (3.5.0-5.5) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Config] annotations: initial import of configuration annotations + + [ Bryan Wu ] + + * [Config] enforcer -- add CONFIG_I2C_DESIGNWARE_PLATFORM enforce checker + + [ Leann Ogasawara ] + + * Rebase to v3.5-rc7 + + [ Manoj Iyer ] + + * SAUCE: Bluetooth: btusb: Add vendor specific ID (0a5c:21f4) BCM20702A0 + - LP: #1010281 + + [ Tim Gardner ] + + * [Config] enable CONFIG_I2C_HELPER_AUTO for all flavours as policy + expects + * [Config] CONFIG_I2O_CONFIG_OLD_IOCTL=n + * [Config] CONFIG_BRIDGE_EBT_ULOG=n + * [Config] CONFIG_IP_NF_QUEUE=n + * [Config] CONFIG_MTD_DOC2000=n + * [Config] CONFIG_PRINT_QUOTA_WARNING=n + * [Config] CONFIG_PRISM54=n + * [Config] CONFIG_SCx200_I2C=n + * [Config] CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc7 + + -- Leann Ogasawara Mon, 16 Jul 2012 15:38:41 -0700 + +linux (3.5.0-4.4) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Packaging] getabis should be extracting all packages + - LP: #1021174 + * [Config] getabis -- series uses linux-image-extra + - LP: #1021174 + * rebase to v3.5-rc6 + + [ Bryan Wu ] + + * [Config] built-in CONFIG_MICREL_PHY as other PHY drivers for all + flavours + * [Config] sync CONFIG_MOUSE_PS2_ config for all flavours + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_RT2800USB_RT35XX and CONFIG_RT2800USB_RT53XX + - LP: #1019561 + + [ Paolo Pisati ] + + * [Config] SND_OMAP_SOC, SND_OMAP_SOC_MCBSP and SND_OMAP_SOC_OMAP3_BEAGLE =y + - LP: #1019321 + + [ Stefan Bader ] + + * SAUCE: (pre-up) net: dont use __netdev_alloc_skb for bounce buffer + - LP: #1018456 + * (config) Disable ACPI_PROCFS_POWER + + [ Tim Gardner ] + + * [Config] CONFIG_ACPI_BGRT=y + * Extract firmware module info during getabi + - LP: #1021174 + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc6 + + -- Leann Ogasawara Mon, 09 Jul 2012 08:50:20 -0700 + +linux (3.5.0-3.3) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_MEMTEST=y + - LP: #1004535 + * [Config] config-check: add support for a cut operation + * [Config] enforcer -- switch to cut where appropriate + + [ Leann Ogasawara ] + + * Rebase to v3.5-rc5 + * [Config] Updateconfigs after rebase to v3.5-rc5 + + [ Luis Henriques ] + + * SAUCE: ocfs2: Fix NULL pointer dereferrence in + __ocfs2_change_file_space + - LP: #1006012 + + [ Seth Forshee ] + + * SAUCE: (drop after 3.5) drm/i915: ignore pipe select bit when checking + for LVDS register initialization + - LP: #1012800 + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc5 + - LP: #1013183 + - LP: #1017017 + - LP: #884652 + + -- Leann Ogasawara Mon, 02 Jul 2012 06:41:58 -0700 + +linux (3.5.0-2.2) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.5-rc4 + + [ Arend van Spriel ] + + * SAUCE: (drop after 3.5) brcmsmac: fix NULL pointer crash in + brcms_c_regd_init() + - LP: #950320 + + [ Bryan Wu ] + + * [Config] Sync CONFIG_CGROUP_MEM_RES_CTLR_SWAP for ARM + + [ Chris J Arges ] + + * PACKAGING: add .gnu_debuglink sections to .ko files + - LP: #669641 + + [ Leann Ogasawara ] + + * d-i: Add hid-generic to input-modules + - LP: #1017879 + + [ Ming Lei ] + + * SAUCE: Revert "mmc: omap_hsmmc: Enable Auto CMD12" + - LP: #1017717, #225 + + [ Paolo Pisati ] + + * SAUCE: Revert "Fix OMAP EHCI suspend/resume failure (i693)" + - LP: #1017718 + * [Config] Disable generic USB_EHCI_HCD_PLATFORM on omap3 + + [ Seth Forshee ] + + * SAUCE: (drop after 3.5) brcm80211: smac: don't set up tx power limits + during initialization + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: always set channel specified + by mac80211 + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: remove unused code for 40MHz + channels + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: clean up channel.c + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: inform mac80211 of the X2 + regulatory domain + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: enable/disable radio on + regulatory updates + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: use mac80211 channel data for + tx power limits + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: don't validate channels + against internal regulatory data + - LP: #950320 + * SAUCE: (drop after 3.5) brcm80211: smac: use current regulatory domain + when checking whether OFDM is allowed + - LP: #950320 + + [ Tim Gardner ] + + * [Config] Enable CONFIG_CGROUPS for highbank + - LP: #1014692 + * [Config] FB_OMAP*=y and PANEL_TFP410=y + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc4 + + -- Leann Ogasawara Tue, 26 Jun 2012 06:21:05 -0700 + +linux (3.5.0-1.1) quantal-proposed; urgency=low + + [ Andy Whitcroft ] + + * [Config] highbank -- enable CONFIG_RFKILL=y and CONFIG_CAN=m + + [ Leann Ogasawara ] + + * Rebase to v3.5-rc1 + * [Config] Remove USB_DEVICEFS from the config enforcer + * [Config] Updateconfigs after rebase to v3.5-rc1 + * [Config] Temporarily disable CONFIG_MACH_NOKIA_RX51 on arm + * [Config] Temporarily disable CONFIG_TOUCHSCREEN_EETI on arm + * [Config] Temporarily disable CONFIG_TOUCHSCREEN_EGALAX on arm + * [Config] Temporarily disable CONFIG_EZX_PCAP on arm + * [Config] Temporarily disable CONFIG_LIS3L02DQ on arm + * [Config] Temporarily disable CONFIG_TI_CPSW on arm + * [Config] Temporarily disable CONFIG_GPIO_EM on arm + * [Config] Temporarily disable CONFIG_SERIAL_8250_EM on armhf + * [Config] Temporarily disable CONFIG_STMMAC_ETH on armhf + * [Config] Temporarily disable CONFIG_HW_RANDOM_ATMEL on armhf + * Rebase to v3.5-rc2 + * [Config] Updateconfigs after rebase to v3.5-rc2 + * [Config] Temporarily disable CONFIG_MV643XX_ETH on powerpc + * Rebase to v3.5-rc3 + * [Config] Updateconfigs after rebase to v3.5-rc3 + + [ Paul Mundt ] + + * SAUCE: fix bug.h's inclusion of kernel.h + + [ Stefan Bader ] + + * SAUCE: Fix compile failures of dm-raid45 + * [Config] Enable dm-raid45 + * Move dependency on crda to extra package + - LP: #657901 + * SAUCE: Mask CR4 writes on older Xen hypervisors + + [ Upstream Kernel Changes ] + + * rebase to v3.5-rc3 + - LP: #993162 + - LP: #925577 + * rebase to v3.5-rc2 + * rebase to v3.5-rc1 + - LP: #955892 + - LP: #978038 + - LP: #987371 + - LP: #929545 + - LP: #942316 + - LP: #903853 + + -- Leann Ogasawara Fri, 08 Jun 2012 14:28:46 -0700 + +linux (3.4.0-5.11) quantal-proposed; urgency=low + + [ Leann Ogasawara ] + + * [Config] Disable CONFIG_ARM_LPAE + - LP: #1009061 + + [ Oleksij Rempel ] + + * SAUCE: b43: do not call ieee80211_unregister_hw if we are not registred + - LP: #1008905 + + [ Paolo Pisati ] + + * [Config] omap3: MFD_OMAP_USB_HOST is usb host in omap2+. + - LP: #1009061 + + -- Leann Ogasawara Tue, 05 Jun 2012 08:06:28 -0700 + +linux (3.4.0-4.10) quantal; urgency=low + + [ Leann Ogasawara ] + + * Temporarily disable ABI and module check + + -- Leann Ogasawara Mon, 04 Jun 2012 20:27:31 -0700 + +linux (3.4.0-4.9) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] fix config split to avoid the shared config + * [Config] updateconfigs following split config fix + * [Config] linux-image-extras needs full postinst + * [Config] CONFIG_BLK_DEV_NVME commonise across architectures + * [Config] CONFIG_HP_WATCHDOG enable as module + * [Config] CONFIG_PDC_ADMA is not boot essential + * [Config] CONFIG_XEN_ACPI_PROCESSOR should be enabled on x86 + * [Config] CONFIG_VT6655/CONFIG_VT6656=m + * [Config] CONFIG_TRANZPORT=m commonise + * [Config] CONFIG_R3964=m commonise + * [Config] CONFIG_SCSI_DH=m commonise + * [Config] CONFIG_SCSI_IBMVSCSIS=m commonise + * [Config] CONFIG_AMD_PHY=y phys are not autoloadable + * [Config] CONFIG_SCSI_QLA_ISCSI=m commonise + * [Config] CONFIG_SCSI_SPI_ATTR=m commonise + * [Config] CONFIG_USB_SN9C102 is deprecated disable + * [Config] CONFIG_USB_SI470X=m commonise + * [Config] CONFIG_USB_ET61X251=m commonise + * [Config] CONFIG_RTS_PSTOR=m commonise + * [Config] CONFIG_SCANLOG=m commonise + * [Config] CONFIG_SCSI_SYM53C8XX_2=m commonise + * [Config] CONFIG_SM_FTL=m commonise + * [Config] CONFIG_SOLO6X10=m commonise + * [Config] CONFIG_SND_PCM_OSS=n using pulseaudio emulation instead + * [Config] CONFIG_SPI_DESIGNWARE=m commonise + * [Config] CONFIG_SPI_SPIDEV=m commonise + * [Config] CONFIG_TABLET_USB_WACOM=m commonise + * [Config] CONFIG_TPS65010=m commonise + * [Config] CONFIG_STE10XP=y commonise + * [Config] CONFIG_X25_ASY=m commonise + * [Config] CONFIG_USB_MON=m commonise + * [Config] CONFIG_VME_BUS=m commonise + * [Config] CONFIG_W35UND=m commonise + * [Config] -CONFIG_TCG_TPM=y commonise + * [Config] highbank -- commonise filesystems + * [Config] highbank -- commonise subsystems + * [Config] highbank -- commonise network protocols + * [Config] highbank -- commonise input drivers + * [Config] highbank -- commonise CRYPTO options + * [Config] highbank -- commonise HID options + * [Config] highbank -- commonise sensors options + * [Config] highbank -- commonise EXPORTFS/FHANDLE + * [Config] highbank -- commonise CONFIG_CRYPTO_LZO + * [Config] highbank -- commonise ENCRYPTED_KEYS + * [Config] highbank -- commonise CONFIG_ATALK + * [Config] highbank -- commonise INET/INET6 + * [Config] highbank -- commonise NLS + * [Config] highbank -- commonise BLK/CHR + * [Config] highbank -- CONFIG_EXT2_FS=y boot essential on highbank + * [Config] highbank -- commonise INET/INET6 part 2 + * [Config] highbank -- commonise PHY settings + * [Config] highbank -- commonise CRC settings + * [Config] highbank -- commonise BINFMT settings + * [Config] highbank -- commonise DM settings + * [Config] highbank -- commonise RTC_DRV settings + * [Config] highbank -- commonise KEYBOARD/MOUSE settings + * [Config] highbank -- commonise USB settings + * [Config] highbank -- commonise GPIO settings + * [Config] highbank -- commonise I2C settings + * [Config] highbank -- commonise numerous subsystem selectors + * [Config] highbank -- commonise A-C modules missmatches + * [Config] highbank -- commonise D-F modules missmatches + * [Config] CONFIG_AUDIT_LOGINUID_IMMUTABLE incompatible with upstart + * [Config] highbank -- commonise G-I modules missmatches + * [Config] highbank -- commonise J-L modules missmatches + * [Config] highbank -- commonise M modules missmatches + * [Config] highbank -- commonise N-P modules missmatches + * [Config] highbank -- commonise Q-R modules missmatches + * [Config] highbank -- commonise S modules missmatches -- part 1 + * [Config] highbank -- commonise S modules missmatches -- part 2 + * [Config] highbank -- commonise T modules missmatches + * [Config] highbank -- commonise U-Z modules missmatches + + [ Ike Panhc ] + + * [Config] add highbank flavour + - LP: #1000831 + + [ Mark Langsdorf ] + + * SAUCE: arm highbank: add support for pl320-ipc driver + - LP: #1000831 + + [ Rob Herring ] + + * SAUCE: input: add a key driver for highbank + - LP: #1000831 + * SAUCE: ARM: highbank: Add smc calls to enable/disable the L2 + - LP: #1000831 + * SAUCE: force DMA buffers to non-bufferable on highbank + - LP: #1000831 + * SAUCE: net: calxedaxgmac: fix net timeout recovery + - LP: #1000831 + + [ Tim Gardner ] + + * [Config] CONFIG_IWLWIFI_EXPERIMENTAL_MFP=n + * [Config] CONFIG_PCI_REALLOC_ENABLE_AUTO=y + * [Config] CONFIG_CIFS_EXPERIMENTAL has disappeared + * [Config] Homogenize CIFS configs across all arches + * [Config] armhf should not be skipabi or skipmodules + - LP: #1006913 + + -- Leann Ogasawara Mon, 04 Jun 2012 05:52:49 -0700 + +linux (3.4.0-3.8) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] include include/generated/compile.h + - LP: #942569 + * [Config] fix up postinst to ensure we know which error is which + - LP: #1002388 + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: async_populate_rootfs: fix build warnings + - LP: #1003417 + + [ John Johansen ] + + * Revert "SAUCE: AppArmor: Add the ability to mediate mount" + * SAUCE: apparmor: Add the ability to mediate mount + * SAUCE: AppArmor: basic networking rules + * SAUCE: apparmor: fix profile lookup for unconfined + - LP: #978038, #987371 + * SAUCE: apparmor: fix long path failure due to disconnected path + - LP: #955892 + + [ Mario Limonciello ] + + * SAUCE: dell-laptop: rfkill blacklist Dell XPS 13z, 15 + - LP: #901410 + + [ Stefan Bader ] + + * (config) Built-in xen-acpi-processor + + [ Tim Gardner ] + + * [Config] CONFIG_NET_DSA=m + - LP: #1004148 + * [Config] Ensure CONFIG_XEN_ACPI_PROCESSOR=y for amd64 + + -- Leann Ogasawara Fri, 25 May 2012 11:38:33 -0700 + +linux (3.4.0-3.7) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] drop the virtual flavour in favour of a split generic et al + * [Config] enforcer -- drop IDLE enforcement + * [Config] enable CONFIG_SCSI_VIRTIO=m for amd64 + * [Config] updateconfigs following removal of -virtual + + [ Leann Ogasawara ] + + * Rebase to v3.4 + + [ Seth Forshee ] + + * [Config] disable CONFIG_B43_BCMA_EXTRA + + [ Tim Gardner ] + + * [Config] Check for extras when building udebs + * [Config] Collapsed generic-pae into generic [i386] + + [ Upstream Kernel Changes ] + + * rebase to v3.4 + + -- Leann Ogasawara Mon, 21 May 2012 07:23:47 -0700 + +linux (3.4.0-2.6) quantal; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: overlayfs -- overlayfs: update touch_atime() usage" + * Revert "ubuntu: overlayfs -- overlayfs: switch from d_alloc_root() to + d_make_root()" + * Revert "ubuntu: overlayfs -- overlayfs: follow header cleanup" + * Revert "ubuntu: overlayfs -- overlayfs: apply device cgroup and + security permissions to overlay files" + * Revert "ubuntu: overlayfs -- fs: limit filesystem stacking depth" + * Revert "ubuntu: overlayfs -- overlay: overlay filesystem documentation" + * Revert "ubuntu: overlayfs -- overlayfs: implement show_options" + * Revert "ubuntu: overlayfs -- overlayfs: add statfs support" + * Revert "ubuntu: overlayfs -- overlay filesystem" + * Revert "ubuntu: overlayfs -- vfs: introduce clone_private_mount()" + * Revert "ubuntu: overlayfs -- vfs: export do_splice_direct() to modules" + * Revert "ubuntu: overlayfs -- vfs: add i_op->open()" + * Revert "ubuntu: overlayfs -- vfs: pass struct path to __dentry_open()" + * ubuntu: overlayfs -- overlayfs: add statfs support + * ubuntu: overlayfs -- inode_only_permission: export inode level + permissions checks + * ubuntu: overlayfs -- overlayfs: switch to use inode_only_permissions + + [ Erez Zadok ] + + * ubuntu: overlayfs -- overlayfs: implement show_options + + [ Miklos Szeredi ] + + * ubuntu: overlayfs -- vfs: pass struct path to __dentry_open() + * ubuntu: overlayfs -- vfs: add i_op->open() + * ubuntu: overlayfs -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs -- overlay filesystem + * ubuntu: overlayfs -- fs: limit filesystem stacking depth + + [ Neil Brown ] + + * ubuntu: overlayfs -- overlay: overlay filesystem documentation + + [ Robin Dong ] + + * ubuntu: overlayfs -- overlayfs: fix possible leak in ovl_new_inode + * ubuntu: overlayfs -- overlayfs: create new inode in ovl_link + + [ Tim Gardner ] + + * [Config] perarch and indep tools builds need separate build directories + * Prevent upgrading a non-PAE CPU + * perf is not parallel build safe + + -- Leann Ogasawara Wed, 16 May 2012 08:43:18 -0700 + +linux (3.4.0-2.5) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] perarch and indep tools builds need separate build directories + + [ Tim Gardner ] + + * Prevent upgrading a non-PAE CPU + * [Config] build debug + * [Config] perf tools are not parallel build safe + + -- Leann Ogasawara Tue, 15 May 2012 11:37:53 -0700 + +linux (3.4.0-2.4) quantal; urgency=low + + [ Leann Ogasawara ] + + * Revert "SAUCE: fsam7400: use UMH_WAIT_PROC consistently" + * Revert "ubuntu: fsam7400 select CHECK_SIGNATURE and depend on X86" + * Revert "ubuntu: fsam7400: Depend on CHECK_SIGNATURE" + * Revert "ubuntu: fsam7400 -- Cleanup Makefile" + * Revert "ubuntu: fsam7400 -- kill switch for Fujitsu Siemens Amilo M + 7400" + * Revert "ubuntu: omnibook: fix source file newline" + * Revert "ubuntu: omnibook -- update BOM" + * Revert "SAUCE: Make CONFIG_{OMNIBOOK, AVERATEC_5100P, PACKARDBELL_E5} + depend on X86" + * Revert "ubuntu: omnibook -- Added missing BOM file" + * Revert "ubuntu: omnibook -- support Toshiba (HP) netbooks" + * Revert "ubuntu: nx-emu - i386: mmap randomization for executable + mappings" + * Revert "SAUCE: disable_nx should not be in __cpuinitdata section for + X86_32" + * Revert "ubuntu: nx-emu - i386: NX emulation" + * Revert "ubuntu: rfkill drivers -- version 1.3" + * Temporarily disable module check + * [Config] Remove CONFIG_FSAM7400 + * [Config] Remove CONFIG_OMNIBOOK + * [Config] Update configs + * Rebase to v3.4-rc7 + * SAUCE: genirq: export handle_edge_irq() and irq_to_desc() + + [ Tim Gardner ] + + * Updated generic-pae description + * Rebase to v3.4-rc6 + * install-tools depends on build targets + + [ Upstream Kernel Changes ] + + * kconfig: in debug mode some 0 length message prints occur + * rebase to v3.4-rc7 + * rebase to v3.3-rc6 + + -- Leann Ogasawara Mon, 14 May 2012 08:22:56 -0700 + +linux (3.4.0-1.3) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] control.stub is an intermediate product not a dependancy + - LP: #992414 + + [ Leann Ogasawara ] + + * remove i386 generic from getabis + + [ Upstream Kernel Changes ] + + * (pre-stable) b43: only reload config after successful initialization + - LP: #950295 + + -- Leann Ogasawara Wed, 02 May 2012 09:48:14 -0700 + +linux (3.4.0-1.2) quantal; urgency=low + + [ Andy Whitcroft ] + + * [Config] add build depends for flex, bison and pkg-config + + -- Andy Whitcroft Tue, 01 May 2012 13:15:41 +0100 + +linux (3.4.0-1.1) quantal; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: overlayfs -- overlayfs: apply device cgroup and security + permissions to overlay files + - LP: #915941, #918212 + - CVE-2012-0055 + + [ Leann Ogasawara ] + + * Open Q + * Rebase to v3.4-rc5 + * [Config] Temporarily disable CONFIG_TOUCHSCREEN_EETI on arm + * [Config] Temporarily disable CONFIG_TOUCHSCREEN_EGALAX on arm + * [Config] Temporarily disable CONFIG_EZX_PCAP on arm + * [Config] Temporarily disable CONFIG_MFD_OMAP_USB_HOST on arm + * [Config] Temporarily disable CONFIG_LIS3L02DQ on arm + * [Config] Temporarily disable CONFIG_USB_EHCI_HCD_PLATFORM on arm + * [Config] Temporarily disable CONFIG_TI_CPSW on arm + * [Config] Temporarily disable CONFIG_AX88796 on arm + + [ Upstream Kernel Changes ] + + * vfs: pass struct path to __dentry_open() + * vfs: add i_op->open() + * vfs: export do_splice_direct() to modules + * vfs: introduce clone_private_mount() + * overlay filesystem + * overlayfs: add statfs support + * overlayfs: implement show_options + * overlay: overlay filesystem documentation + * fs: limit filesystem stacking depth + * overlayfs: follow header cleanup + * overlayfs: switch from d_alloc_root() to d_make_root() + * overlayfs: update touch_atime() usage + * rebase to v3.4-rc5 + - LP: #950490 + * rebase to v3.4-rc4 + * rebase to v3.4-rc3 + * rebase to v3.4-rc2 + * rebase to v3.4-rc1 + * rebase to v3.3 + * rebase to v3.3-rc7 + * rebase to v3.3-rc6 + * rebase to v3.3-rc5 + * rebase to v3.3-rc4 + - LP: #900802 + - LP: #930842 + * rebase to v3.3-rc3 + - LP: #924320 + - LP: #923316 + - LP: #923409 + - LP: #918254 + * rebase to v3.3-rc2 + * rebase to v3.3-rc1 + - LP: #795823 + - LP: #909419 + - LP: #910792 + - LP: #878701 + - LP: #724831 + + -- Leann Ogasawara Wed, 25 Jan 2012 06:50:04 -0800 + +linux (3.2.0-10.18) precise; urgency=low + + [ Tim Gardner ] + + * SAUCE: ecryptfs: Print inode on metadata error + + [ Upstream Kernel Changes ] + + * Revert "proc: enable writing to /proc/pid/mem" + - LP: #919115 + - CVE-2012-0056 + * (pre-stable) ALSA: HDA: Use LPIB position fix for Macbook Pro 7, 1 + - LP: #909419 + + -- Andy Whitcroft Tue, 24 Jan 2012 10:15:12 +0000 + +linux (3.2.0-10.17) precise; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: overlayfs -- fs: limit filesystem stacking depth" + * Revert "SAUCE: overlayfs -- overlay: overlay filesystem documentation" + * Revert "SAUCE: overlayfs -- overlayfs: implement show_options" + * Revert "SAUCE: overlayfs -- overlayfs: add statfs support" + * Revert "SAUCE: overlayfs -- overlay filesystem" + * Revert "SAUCE: overlayfs -- vfs: introduce clone_private_mount()" + * Revert "SAUCE: overlayfs -- vfs: export do_splice_direct() to modules" + * Revert "SAUCE: overlayfs -- vfs: add i_op->open()" + * ensure debian/ is not excluded from git by default + * add new scripting to handle buglinks in rebases + * ubuntu: overlayfs -- overlayfs: add statfs support + * ubuntu: overlayfs -- overlayfs: apply device cgroup and security + permissions to overlay files + - LP: #915941, #918212 + - CVE-2012-0055 + + [ Erez Zadok ] + + * ubuntu: overlayfs -- overlayfs: implement show_options + + [ Leann Ogasawara ] + + * Revert "SAUCE: dmar: disable if ricoh multifunction detected" + * [Config] Disable CONFIG_INTEL_IOMMU_DEFAULT_ON + - LP: #907377, #911236 + * [Config] Enable CONFIG_IRQ_REMAP + + [ Miklos Szeredi ] + + * ubuntu: overlayfs -- vfs: pass struct path to __dentry_open() + * ubuntu: overlayfs -- vfs: add i_op->open() + * ubuntu: overlayfs -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs -- overlay filesystem + * ubuntu: overlayfs -- fs: limit filesystem stacking depth + + [ Neil Brown ] + + * ubuntu: overlayfs -- overlay: overlay filesystem documentation + + [ Upstream Kernel Changes ] + + * (pre-stable) x86/PCI: amd: factor out MMCONFIG discovery + - LP: #647043 + * (pre-stable) PNP: work around Dell 1536/1546 BIOS MMCONFIG bug that + breaks USB + - LP: #647043 + + -- Leann Ogasawara Mon, 16 Jan 2012 07:10:08 -0800 + +linux (3.2.0-9.16) precise; urgency=low + + [ Andy Whitcroft ] + + * [Config] Enable numerous CONFIG_VIDEO_* cards on ARM + * [Config] pull ARM sound modules =m + * [Config] CONFIG_RTC_DRV_TEST is for testing only + * [Config] CONFIG_USB_DUMMY_HCD is testing only + * [Config] CONFIG_USB_FILE_STORAGE is deprecated + + [ Leann Ogasawara ] + + * Revert "[Config] Temporarily disable CONFIG_CAN_TI_HECC on armel" + * [Config] Enable CONFIG_HW_RANDOM_PASEMI=m + * [Config] Enable CONFIG_MMC_TMIO=m + * [Config] Enable CONFIG_MTD_NAND_FSL_ELBC=m + * [Config] Enable CONFIG_ISI=m + * [Config] Enable CONFIG_MMC=y + * [Config] Enable CONFIG_LIRC_PARALLEL=m + * [Config] Enable CONFIG_MAC_EMUMOUSEBTN=m + * [Config] Enable CONFIG_CHR_DEV_SG=y + * [Config] Enable CONFIG_GPIO_PCA953X=m + * [Config] Enable CONFIG_GPIO_TWL4030=m + * [Config] Enable CONFIG_INET_DIAG=m + * [Config] Enable CONFIG_NLS_ISO8859_1=m + * [Config] Enable CONFIG_NVRAM=m + * [Config] Enable CONFIG_SLIP=m + * [Config] Enable CONFIG_PC300TOO=m + * [Config] Enable CONFIG_TUN=y + * [Config] Enable CONFIG_NET_CLS_CGROUP=m + * [Config] Enable CONFIG_THERMAL=y + * [Config] Enable CONFIG_PPP=y + * [Config] Enable CONFIG_PCI_STUB=m + * Rebase to v3.2.1 + * [Config] Enable CONFIG_RTL8192E=m + * [Config] Enable CONFIG_RTS5139=m + + [ Stefan Bader ] + + * [Config] Make CONFIG_VIRTIO_(NET|BLK)=y + + [ Upstream Kernel Changes ] + + * ARM: restart: add restart hook to machine_desc record + * ARM: restart: allow platforms more flexibility specifying restart mode + * ARM: restart: move reboot failure handing into machine_restart() + * ARM: restart: remove argument to setup_mm_for_reboot() + * ARM: 7159/1: OMAP: Introduce local common.h files + * ARM: restart: only perform setup for restart when soft-restarting + * ARM: 7189/1: OMAP3: Fix build break in cpuidle34xx.c because of irq + function + * ARM: idmap: populate identity map pgd at init time using .init.text + * ARM: suspend: use idmap_pgd instead of suspend_pgd + * ARM: proc-*.S: place cpu_reset functions into .idmap.text section + * ARM: idmap: use idmap_pgd when setting up mm for reboot + * ARM: head.S: only include __turn_mmu_on in the initial identity mapping + * ARM: SMP: use idmap_pgd for mapping MMU enable during secondary booting + * ARM: 7194/1: OMAP: Fix build after a merge between v3.2-rc4 and ARM + restart changes + * ARM: lib: add call_with_stack function for safely changing stack + * ARM: reset: implement soft_restart for jumping to a physical address + * ARM: stop: execute platform callback from cpu_stop code + * ARM: kexec: use soft_restart for branching to the reboot buffer + * ARM: restart: omap: use new restart hook + * topdown mmap support + - LP: #861296 + + [ Upstream Kernel Changes ] + + * Rebase to v3.2.1 + + -- Leann Ogasawara Fri, 13 Jan 2012 20:32:08 +0100 + +linux (3.2.0-8.15) precise; urgency=low + + [ Leann Ogasawara ] + + * [Config] Disable CONFIG_ACPI_PROCFS + * Remove server from getabis + * Temporarily disable module check + * [Config] Disable CONFIG_MTD_TESTS + * [Config] Disable CONFIG_X86_E_POWERSAVER + * [Config] Set CONFIG_ARCNET=m + * [Config] Enable CONFIG_ATM_DUMMY=m + * [Config] Enable CONFIG_BLK_DEV_MD=y + * ubuntu: fsam7400 select CHECK_SIGNATURE and depend on X86 + * [Config] Enable CONFIG_BLK_DEV_SD=y + * [Config] Enable CONFIG_BLK_DEV_SR=y + * [Config] Enable CONFIG_BLK_DEV_UB=m + * [Config] Enable CONFIG_COPS=m + * [Config] Enable CONFIG_DVB_USB_EC168=m + * [Config] Enable CONFIG_ENC28J60=m + * [Config] Enable CONFIG_FB_UVESA=m + * [Config] Enable CONFIG_FB_ATY=m + * [Config] Enable CONFIG_BROADCOM_PHY=y + * [Config] Enable CONFIG_CICADA_PHY=y + * [Config] Enable CONFIG_DAVICOM_PHY=y + * [Config] Enable CONFIG_ICPLUS_PHY=y + * [Config] Enable CONFIG_LSI_ET1011C_PHY=y + * [Config] Enable CONFIG_LXT_PHY=y + * [Config] Enable CONFIG_MARVELL_PHY=y + * [Config] Enable CONFIG_NATIONAL_PHY=y + * [Config] Enable CONFIG_QSEMI_PHY=y + * [Config] Enable CONFIG_SMSC_PHY=y + * [Config] Enable CONFIG_VITESSE_PHY=y + * Add 3w-sas to scsi-modules + - LP: #776542 + + [ Mathieu Trudel-Lapierre ] + + * SAUCE: ipv6: make the net.ipv6.conf.all.use_tempaddr sysctl propagate + to interface settings + + [ Paolo Pisati ] + + * Revert "SAUCE: omap3: beagle: if rev unknown, assume xM revision C" + - LP: #912199 + * Revert "SAUCE: omap3: beagle: detect new xM revision B" + - LP: #912199 + * Revert "SAUCE: omap3: beaglexm: fix DVI initialization" + - LP: #912199 + + [ Upstream Kernel Changes ] + + * Bluetooth: Add support for BCM20702A0 [0a5c:21e3] + - LP: #906832 + + -- Leann Ogasawara Fri, 06 Jan 2012 10:02:03 -0800 + +linux (3.2.0-8.14) precise; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_SND_USB_6FIRE + - LP: #912197 + * rebase to mainline v3.2 final release + * updateconfigs following rebase to v3.2 final + * ubuntu: AUFS -- add BOM and automated update script + * ubuntu: AUFS -- include the aufs_types.h file in linux-libc-headers + - LP: #684666 + * ubuntu: AUFS -- update aufs-update to track new locations of headers + * ubuntu: AUFS -- clean up the aufs updater and BOM + * ubuntu: AUFS -- documentation on updating aufs2 + * ubuntu: AUFS -- aufs3-base.patch + * ubuntu: AUFS -- aufs3-standalone.patch + * ubuntu: AUFS -- fix undefined __devcgroup_inode_permission + * ubuntu: AUFS -- fix undefined security_path_link + * ubuntu: AUFS -- update to 4cf5db36bcd9748e8e7270022f295f84d1fc2245 + * ubuntu: AUFS -- updateconfigs following update + * ubuntu: AUFS -- suppress benign plink warning messages + - LP: #621195 + * ubuntu: AUFS -- enable in config and makefile + * ubuntu: AUFS -- disable in favor of overlayfs + * [Config] linux-virtual -- should include the extX modules + - LP: #912308 + + [ Tyler Hicks ] + + * SAUCE: eCryptfs: Improve statfs reporting + - LP: #885744 + + [ Upstream Kernel Changes ] + + * rebase to upstream v3.2 + + -- Leann Ogasawara Mon, 26 Dec 2011 20:24:30 -0800 + +linux (3.2.0-7.13) precise; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to upstream 3.2-rc7 + + -- Leann Ogasawara Mon, 19 Dec 2011 09:14:34 -0800 + +linux (3.2.0-6.12) precise; urgency=low + + [ Upstream Kernel Changes ] + + * rebase to upstream v3.2-rc6 + + -- Leann Ogasawara Fri, 16 Dec 2011 10:19:02 -0800 + +linux (3.2.0-5.11) precise; urgency=low + + [ Andy Whitcroft ] + + * enforcer -- allow arch and flavour predicates to take lists + * enforcer -- simplify armel/armhf specific options + * enforcer -- fix incorrectly specified flavour matches + + [ Leann Ogasawara ] + + * [Config] Disable IRQ_REMAP + * [Config] Enable CONFIG_SENSORS_LM95245=m + * [Config] Enable CONFIG_SENSORS_MAX1668=m + * [Config] Enable CONFIG_SENSORS_NTC_THERMISTOR=m + * [Config] Enable CONFIG_SENSORS_MAX6639=m + * [Config] Enable CONFIG_SENSORS_MAX6642=m + * [Config] Enable CONFIG_SENSORS_LINEAGE=m + * [Config] Enable CONFIG_CRYPTO_SALSA20=m + * [Config] Enable CONFIG_PATA_TOSHIBA=m + * [Config] Enable CONFIG_POHMELFS=m + * [Config] Enable CONFIG_NET_PACKET_ENGINE=y + * [Config] Enable CONFIG_PATA_OPTI=m + * add overlayfs to virtual inclusion list + - LP: #903897 + * add veth to virtual inclusion list + - LP: #903897 + * SAUCE: resolve WARNING: at drivers/block/floppy.c:2929 do_fd_request + + [ Paolo Pisati ] + + * [Config] DEFAULT_MMAP_MIN_ADDR=32k on arm + - LP: #903346 + + [ Tim Gardner ] + + * [Config] CONFIG_LOCKUP_DETECTOR=y + - LP: #903615 + + [ Upstream Kernel Changes ] + + * rebase to upstream 55b02d2f + + -- Leann Ogasawara Mon, 12 Dec 2011 07:08:10 -0800 + +linux (3.2.0-4.10) precise; urgency=low + + [ Kyle McMartin ] + + * SAUCE: dmar: disable if ricoh multifunction detected + - LP: #894070 + + [ Seth Forshee ] + + * SAUCE: dell-wmi: Demote unknown WMI event message to pr_debug + - LP: #581312 + + [ Tim Gardner ] + + * Start new release, Bump ABI, rebase to 3.2-rc5 + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_SENSORS_AK8975=m + + -- Tim Gardner Sat, 10 Dec 2011 08:57:04 -0700 + +linux (3.2.0-3.9) precise; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: ext4: correct partial write discard size calculation + - LP: #894768 + + [ Leann Ogasawara ] + + * Revert "SAUCE: x86, microcode, AMD: Restrict microcode reporting" + - LP: #892615 + + [ Matthew Garrett ] + + * SAUCE: pci: Rework ASPM disable code + + [ Upstream Kernel Changes ] + + * x86: Fix boot failures on older AMD CPU's + - LP: #892615 + * EHCI : Fix a regression in the ISO scheduler + - LP: #899165 + + -- Leann Ogasawara Mon, 05 Dec 2011 10:37:36 -0800 + +linux (3.2.0-3.8) precise; urgency=low + + [ Andy Whitcroft ] + + * armhf -- add d-i configuration + * armhf -- disable ABI checks for armhf + * armhf -- add arch to getabis config + + -- Andy Whitcroft Sat, 03 Dec 2011 14:22:52 +0000 + +linux (3.2.0-3.7) precise; urgency=low + + [ Stefan Bader ] + + * SAUCE: x86/paravirt: PTE updates in k(un)map_atomic need to be + synchronous, regardless of lazy_mmu mode + - LP: #854050 + + [ Tim Gardner ] + + * rebase to v3.2-rc4 + + -- Leann Ogasawara Fri, 02 Dec 2011 11:53:56 -0800 + +linux (3.2.0-2.6) precise; urgency=low + + [ Andy Whitcroft ] + + * armhf -- fix omap flavour to build on armhf + * [Config] CONFIG_PATA_MACIO=y to fix MAC qemu boot + + [ Borislav Petkov ] + + * SAUCE: x86, microcode, AMD: Restrict microcode reporting + - LP: #892615 + + [ Colin Watson ] + + * Add pata_macio to pata-modules + + [ Tim Gardner ] + + * [Config] Prefer crda over wireless-crda + * [Config] Fix virtual inclusion list. + - LP: #897795 + + -- Leann Ogasawara Wed, 30 Nov 2011 06:09:35 -0800 + +linux (3.2.0-2.5) precise; urgency=low + + [ Paolo Pisati ] + + * [Config] PANEL_DVI=y + + -- Leann Ogasawara Mon, 28 Nov 2011 09:13:24 -0800 + +linux (3.2.0-2.4) precise; urgency=low + + [ Andy Whitcroft ] + + * rebase to v3.2-rc3 + + [ Leann Ogasawara ] + + * Revert "SAUCE: xen: Do not use pv spinlocks on HVM" + * Revert "fix ERROR: __devcgroup_inode_permission undefined" + * Revert "olpc_dcon_xo_1_5 needs delay.h" + * Revert "olpc_dcon_xo_1 needs delay.h" + * rebase to 6fe4c6d4 + * [Config] updateconfigs after rebase to 6fe4c6d4 + + [ Tim Gardner ] + + * [Config] Replace wireless-crda with crda,wireless-regdb + - LP: #856421 + * [Config] Relax the dependencies on crda + + [ Upstream Kernel Changes ] + + * (pre-stable) HID: bump maximum global item tag report size to 96 bytes + - LP: #724831 + * Ubuntu: remove coreutils|fileutils package dependency + - LP: #892814 + * iio: iio_event_getfd -- fix ev_int build failure + + [ Upstream Kernel Changes ] + + * Rebase to v3.2-rc3 + + -- Andy Whitcroft Thu, 24 Nov 2011 16:20:45 +0000 + +linux (3.2.0-1.3) precise; urgency=low + + [ Upstream Kernel Changes ] + + * Ubuntu: Add ext2 to fs-core-modules + - LP: #893395 + + -- Leann Ogasawara Mon, 21 Nov 2011 20:42:33 -0800 + +linux (3.2.0-1.2) precise; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_NFC and associated devices + * SAUCE: allow brcmsmac and b43 to both build + + [ Soren Hansen ] + + * Add ixgbe driver to d-i + - LP: #891969 + + -- Leann Ogasawara Mon, 21 Nov 2011 08:33:46 -0800 + +linux (3.2.0-1.1) precise; urgency=low + [ Andy Whitcroft ] + + * armhf -- enable armhf and create the first flavours + * SAUCE: ensure root is ready before running usermodehelpers in it + * [Config] enforcer -- ensure CONFIG_FAT_FS is built-in on arm + + [ Leann Ogasawara ] + + * Temporarily ignore module check + * [Config] Enable PCI_IOV on powerpc + * [Config] Temporarily disable CONFIG_PASEMI_MAC on powerpc + * rebase to v3.2-rc2 + * SAUCE: include for cpuidle34xx arm build + * SAUCE: include for linux/mtd/map.h arm build + * SAUCE: include and for mmc_core arm build + * SAUCE: select ARM_AMBA if OMAP3_EMU + * [Config] updateconfigs after select ARM_AMBA + * [Config] Temporarily disable CONFIG_KVM_BOOK3S_32 on powerpc + * [Config] Enable CONFIG_EXT2_FS=m + * [Config] Build in CONFIG_SATA_AHCI=y + * Resolve linux-image-extra's install dependency + + [ Seth Forshee ] + + * [Config] Enable EVENT_POWER_TRACING_DEPRECATED=y for powertop + * SAUCE: (drop after 3.2) Input: ALPS - move protocol information to + Documentation + * SAUCE: (drop after 3.2) Input: ALPS - add protocol version field in + alps_model_info + * SAUCE: (drop after 3.2) Input: ALPS - remove assumptions about packet + size + * SAUCE: (drop after 3.2) Input: ALPS - add support for protocol versions + 3 and 4 + * SAUCE: (drop after 3.2) Input: ALPS - add semi-MT support for v3 + protocol + * SAUCE: (drop after 3.2) Input: ALPS - add documentation for protocol + versions 3 and 4 + + [ Stefan Bader ] + + * [Config] Built-in xen-netfront and xen-blkfront + * Fix build of dm-raid45 and re-enable it + + [ Tim Gardner ] + + * [Config] CONFIG_USB_XHCI_HCD=y + - LP: #886167 + * [Config] CONFIG_R6040=m + - LP: #650899 + * SAUCE: Add a new entry (413c:8197) to Bluetooth USB device ID table + - LP: #854399 + * [Config] Consolidated amd64 server flavour into generic + * [Config] updateconfigs after rebase to 3.2-rc1 + * [Config] Disabled dm-raid4-5 + * [Config] Disabled ndiswrapper + * [Config] Disable vt6656 + * [Config] exclude ppp-modules for virtual flavour + * [Config] CONFIG_MEMSTICK_R592=m + - LP: #238208 + + [ Upstream Kernel Changes ] + + * CHROMIUM: seccomp_filter: new mode with configurable syscall filters + - LP: #887780 + * CHROMIUM: seccomp_filter: add process state reporting + - LP: #887780 + * CHROMIUM: seccomp_filter: Document what seccomp_filter is and how it + works. + - LP: #887780 + * CHROMIUM: x86: add HAVE_SECCOMP_FILTER and seccomp_execve + - LP: #887780 + * CHROMIUM: arm: select HAVE_SECCOMP_FILTER + - LP: #887780 + * CHROMIUM: seccomp_filters: move to btrees + * CHROMIUM: enable CONFIG_BTREE + * CHROMIUM: seccomp_filter: kill NR_syscall references + * CHROMIUM: seccomp_filters: guard all ftrace wrapper code + * CHROMIUM: seccomp_filters: clean up warnings; kref mistake + * CHROMIUM: seccomp_filter: remove "skip" from copy and add drop helper + * CHROMIUM: seccomp_filter: allow CAP_SYS_ADMIN management of execve + * CHROMIUM: seccomp_filter: inheritance documentation + * CHROMIUM: seccomp_filter: make inherited filters composable + * CHROMIUM: Fix seccomp_t compile error + - LP: #887780 + * CHROMIUM: Fix kref usage + - LP: #887780 + * CHROMIUM: enable CONFIG_SECCOMP_FILTER and CONFIG_HAVE_SECCOMP_FILTER + * rebase to v3.2-rc2 + + -- Leann Ogasawara Mon, 31 Oct 2011 09:24:39 -0400 + +linux (3.1.0-2.3) precise; urgency=low + + [ Tim Gardner ] + + * Add postinit and postrm scripts to the extras package + - LP: #882120 + + -- Leann Ogasawara Fri, 28 Oct 2011 12:48:33 -0700 + +linux (3.1.0-2.2) precise; urgency=low + + [ Andy Whitcroft ] + + * debian: add locking to protect debian/files from parallel update + + [ Leann Ogasawara ] + + * rebase to v3.1 + + [ Upstream Kernel Changes ] + + * rebase to v3.1 + + -- Leann Ogasawara Wed, 19 Oct 2011 07:12:38 -0700 + +linux (3.1.0-1.1) precise; urgency=low + + [ Andiry Xu ] + + * SAUCE: (drop during 3.2 merge) xHCI: AMD isoc link TRB chain bit quirk + - LP: #872811 + + [ Andy Whitcroft ] + + * Revert "ubuntu: compcache -- follow changes to bd_claim/bd_release" + - LP: #832694 + * Revert "ubuntu: compcache -- version 0.5.3" + - LP: #832694 + * [Config] standardise CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + * [Config] Enable CONFIG_MACVTAP=m + - LP: #822601 + * record the compiler in the ABI and check for inconsistant builds + * [Config] move ECRYPT_FS back to =y for all architectures + - LP: #827197 + * [Config] enable CONFIG_DRM_VMWGFX=m + - LP: #698009 + * [Config] re-fix ECRYPT_FS=y + - LP: #827197 + * enforcer -- ensure we have CONFIG_ECRYPT_FS=y + - LP: #827197 + * [Config] dropping compcache configuration options + * [Config] standardise on HZ=250 + * SAUCE: headers_install: fix #include "..." usage for userspace + - LP: #824377 + * make module-inclusion selection retain the left overs + * add a new linux-image-extras package for virtual + + [ Colin Watson ] + + * Deliver more Atheros, Ralink, and iwlagn NIC drivers to d-i + + [ edwin_rong ] + + * SAUCE: Staging: add driver for Realtek RTS5139 cardreader + - LP: #824273 + + [ Greg Kroah-Hartman ] + + * SAUCE: staging: rts5139: add vmalloc.h to some files to fix the build. + - LP: #824273 + + [ Jesse Sung ] + + * SAUCE: Unregister input device only if it is registered + - LP: #839238 + + [ Jiri Kosina ] + + * SAUCE: HID: add MacBookAir4, 2 to hid_have_special_driver[] + + [ Joshua V. Dillon ] + + * SAUCE: HID: add support for MacBookAir4,2 keyboard. + + [ Kees Cook ] + + * [Config] enable and enforce SECCOMP_FILTER on x86 + + [ Keng-Yu Lin ] + + * [Config] Enable CONFIG_RTS5139=m on i386/amd64 + - LP: #824273 + + [ Leann Ogasawara ] + + * Revert "ubuntu: overlayfs -- ovl: make lower mount read-only" + * Revert "ubuntu: overlayfs -- fs: limit filesystem stacking depth" + * Revert "ubuntu: overlayfs -- ovl: improve stack use of lookup and + readdir" + * Revert "ubuntu: overlayfs -- ovl: fix overlayfs over overlayfs" + * Revert "ubuntu: overlayfs -- overlayfs: implement show_options" + * Revert "ubuntu: overlayfs -- overlayfs: add statfs support" + * Revert "ubuntu: overlayfs -- overlay filesystem" + * Revert "ubuntu: overlayfs -- overlay: overlay filesystem documentation" + * Revert "SAUCE: ARM: OMAP: Add macros for comparing silicon revision" + * Revert "SAUCE: OMAP: DSS2: check for both cpu type and revision, rather + than just revision" + * Revert "SAUCE: OMAP: DSS2: enable hsclk in dsi_pll_init for OMAP36XX" + * Revert "ubuntu: fsam7400 disable driver" + - LP: #876030 + * rebase to v3.1-rc1 + * [Config] updateconfigs after rebase to v3.1-rc1 + * rebase to v3.1-rc2 + * [Config] Updateconfigs after rebase to v3.1-rc2 + * ubuntu: Yama - update calls to generic_permission() and + inode->i_op->permission() + * ubuntu: ndiswrapper -- remove netdev_priv macro + * ubuntu: aufs -- Temporarily disable due to build failure + * [Config] Diable INTEL_MID_PTI on armel + * [Config] Temporarily disable CONFIG_FTMAC100 on armel + * [Config] Temporarily disable CONFIG_FTGMAC100 on armel + * [Config] Temporarily disable CONFIG_CAN_TI_HECC on armel + * [Config] Temporarily disable CONFIG_VIDEO_OMAP2_VOUT on armel + * [Config] Set CONFIG_DM_MIRROR=m on amd64, i386, and arm + * [Config] Set CONFIG_DM_MULTIPATH=m on amd64, i386, and arm + * [Config] Set CONFIG_DM_SNAPSHOT=m on amd64, i386, and arm + * [Config] Enable CONFIG_EDAC_AMD8111=m on powerpc + * [Config] Enable CONFIG_EDAC_AMD8131=m on powerpc + * [Config] Enable CONFIG_EDAC_CPC925=m on powerpc + * [Config] Enable CONFIG_EDAC_PASEMI=m on powerpc + * [Config] Enable CONFIG_ECHO=m on powerpc + * [Config] Enable CONFIG_ET131X=m on powerpc + * [Config] Set CONFIG_FB_MATROX=m + * [Config] Enable CONFIG_FB_UDL=m on powerpc + * [Config] Set CONFIG_FB_VIRTUAL=n + * [Config] Enable CONFIG_FB_VGA16=m on powerpc + * [Config] Enable CONFIG_GPIO_MAX732X=m on arm + * [Config] Enable CONFIG_GPIO_PCF857X=m on arm + * [Config] Set CONFIG_HOTPLUG_PCI_FAKE=m + * [Config] Enable CONFIG_HOTPLUG_PCI=y on powerpc + * [Config] Enable CONFIG_HOTPLUG_PCI_CPCI=y on powerpc + * [Config] Enable CONFIG_HP_ILO=m on powerpc-smp + * [Config] Enable CONFIG_I2C_PASEMI=m on powerpc + * [Config] Enable CONFIG_IBM_BSR=m on powerpc + * [Config] Enable CONFIG_IBMVETH=m on powerpc + * [Config] Enable CONFIG_IDE_PHISON=m on powerpc + * [Config] Enable CONFIG_IGB=m on powerpc + * [Config] Enable CONFIG_IIO=m on powerpc + * [Config] Enable CONFIG_INFINIBAND_NES=m + * [Config] Enable CONFIG_IPMI_HANDLER=m on arm + * [Config] Enable CONFIG_IWL3945=m on powerpc + * [Config] Disable CONFIG_KVM_BOOK3S_64 + * [Config] Enable CONFIG_LAPBETHER=m on arm + * [Config] Enable CONFIG_LEDS_GPIO=m on powerpc + * [Config] Enable CONFIG_LEDS_CLEVO_MAIL=m all arch's + * [Config] Enable CONFIG_LEDS_PCA9532=m on powerpc + * [Config] Enable CONFIG_LEDS_PCA955X=m on powerpc + * [Config] Enable CONFIG_LEDS_TRIGGER_DEFAULT_ON=m on powerpc + * [Config] Set CONFIG_LEDS_TRIGGER_HEARTBEAT=m on arm and powerpc + * [Config] Set CONFIG_LEDS_TRIGGER_TIMER=m on powerpc + * [Config] Enable CONFIG_LINE6_USB=m on arm and powerpc + * [Config] Enable CONFIG_MEMSTICK=m on arm + * [Config] Enable CONFIG_MTD_AFS_PARTS=m on arm + * [Config] Enable CONFIG_MTD_ALAUDA=m on arm + * [Config] Enable CONFIG_MTD_AR7_PARTS=m on arm + * [Config] Enable CONFIG_MTD_ARM_INTEGRATOR=m on arm + * [Config] Enable CONFIG_MOXA_SMARTIO=m on powerpc + * [Config] Enable CONFIG_MTD_DATAFLASH=m on arm + * [Config] Enable CONFIG_MTD_GPIO_ADDR=m on arm + * [Config] Enable CONFIG_MTD_IMPA7=m on arm + * [Config] Enable CONFIG_MTD_NAND_GPIO=m on arm + * [Config] Enable CONFIG_MTD_NAND_NANDSIM=m on arm + * [Config] Enable CONFIG_MTD_NAND_PASEMI=m on powerpc + * [Config] Enable CONFIG_MTD_NAND_PLATFORM=m on arm + * [Config] Enable CONFIG_MTD_NAND_TMIO=m on arm + * [Config] Enable CONFIG_MTD_SST25L=m on arm + * [Config] Enable CONFIG_NET_CLS_CGROUP=y on arm + * [Config] Enable CONFIG_NET_CLS_FLOW=m on arm + * [Config] Enable CONFIG_NET_CLS_U32=m on arm + * [Config] Enable CONFIG_NET_DCCPPROBE=m on arm + * [Config] Enable CONFIG_NET_SCH_INGRESS=m on arm + * [Config] Enable CONFIG_NET_TCPPROBE=m on arm + * [Config] Enable CONFIG_PASEMI_MAC=m on powerpc + * [Config] Enable CONFIG_PATA_NS87410=m on powerpc + * [Config] Enable CONFIG_I2C_GPIO=m on powerpc64-smp + * [Config] Enable CONFIG_PANEL=m on powerpc + * [Config] Enable CONFIG_PATA_CMD640_PCI=m on powerpc + * SAUCE: x86: reboot: Make Dell Latitude E6520 use reboot=pci + - LP: #833705 + * [Config] Add CONFIG_EFI_VARS=y to the enforcer + - LP: #837332 + * [Config] Update CONFIG_EFI_VARS enforcer check + * [Config] Add aufs to virtual flavor inclusion list + - LP: #844159 + * SAUCE: x86: reboot: Make Dell Optiplex 790 use reboot=pci + - LP: #818933 + * SAUCE: x86: reboot: Make Dell Optiplex 990 use reboot=pci + - LP: #768039 + * SAUCE: x86: reboot: Make Dell Latitude E6220 use reboot=pci + - LP: #838402 + * [Config] Add igbvf to the virtual flavor inclusion list + - LP: #794570 + * [Config] Add ixgbevf to the virtual inclusion list + - LP: #872411 + * [Config] Transition -generic and -server to be identical + * rebase to v3.1-rc10 + + [ Luke Yelavich ] + + * [Config] Disable legacy IDE drivers on powerpc + + [ Ming Lei ] + + * SAUCE: fireware: add NO_MSI quirks for o2micro controller + - LP: #801719 + * SAUCE: ata_piix: make DVD Drive recognisable on systems with Intel + Sandybridge chipsets(v2) + - LP: #737388, #782389, #794642 + + [ Paolo Pisati ] + + * [Config] Compile-in vfat support for armel + - LP: #853783 + + [ Randy Dunlap ] + + * SAUCE: staging: fix rts5139 depends & build + - LP: #824273 + + [ Rene Bolldorf ] + + * SAUCE: (drop after 3.0) ideapad: Check if acpi already handle backlight + power in 'ideapad_backlight_notify_power' to avoid a page fault + + [ Seth Forshee ] + + * SAUCE: (no-up) Input: elantech - Add v3 hardware support + - LP: #681904 + * SAUCE: (drop after 3.1) usb_storage: Don't freeze in usb-stor-scan + - LP: #810020 + + [ Stefan Bader ] + + * (config) Package macvlan and macvtap for virtual + * [Config] Force perf to use libiberty for demangling + - LP: #783660 + * SAUCE: xen: Do not use pv spinlocks on HVM + - LP: #838026 + + [ Tim Gardner ] + + * [Config] Clean up tools rules + * [Config] Package x86_energy_perf_policy and turbostat + - LP: #797556 + * rebase to v3.1-rc3 + * [Config] Simplify binary-udebs dependencies + * [Config] kernel preparation cannot be parallelized + * [Config] Linearize module/abi checks + * [Config] Linearize and simplify tree preparation rules + * [Config] Build kernel image in parallel with modules + * [Config] Set concurrency for kmake invocations + * [Config] Improve install-arch-headers speed + * [Config] Fix binary-perarch dependencies + * [Config] Removed stamp-flavours target + * [Config] Serialize binary indep targets + * [Config] Use build stamp directly + * [Config] Restore prepare-% target + * rebase to v3.1-rc4 + * rebase to v3.1-rc5 + * [Config] Disable makedumpfile for i386/amd64 + * rebase to v3.1-rc6 + * [Config] Fix binary-% build target + * rebase to v3.1-rc7 + * rebase to v3.1-rc8 + * SAUCE: Add a new entry (413c:8197) to Bluetooth USB device ID table + - LP: #854399 + * [Config] Enable ftrace support in the mac80211 layer + - LP: #865171 + * rebase to v3.1-rc9 + * SAUCE: usb/core/devio.c: Check for printer class specific request + - LP: #872711 + + [ Upstream Kernel Changes ] + + * overlay filesystem + * overlayfs: add statfs support + * overlayfs: implement show_options + * overlay: overlay filesystem documentation + * fs: limit filesystem stacking depth + + [ Will Drewry ] + + * SAUCE: seccomp_filter: new mode with configurable syscall filters + * SAUCE: seccomp_filter: add process state reporting + * SAUCE: seccomp_filter: Document what seccomp_filter is and how it + works. + * SAUCE: seccomp_filter: add HAVE_SECCOMP_FILTER and seccomp_execve + + [ Upstream Kernel Changes ] + + * rebase to v3.1-rc1 + * rebase to v3.1-rc2 + * rebase to v3.1-rc3 + +CONFIG_BLK_DEV_BSGLIB=y + +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 + -CONFIG_VIDEO_OMAP2_VOUT_VRFB=y + * rebase to v3.1-rc4 + * rebase to v3.1-rc5 + * rebase to v3.1-rc6 + * rebase to v3.1-rc7 + * rebase to v3.1-rc8 + * rebase to v3.1-rc9 + * rebase to v3.1-rc10 + + -- Leann Ogasawara Wed, 10 Aug 2011 15:43:38 -0700 + +linux (3.1.0-1.0) oneiric; urgency=low + + [ Leann Ogasawara ] + + * Open P-series + + -- Leann Ogasawara Wed, 10 Aug 2011 13:42:40 -0700 + +linux (3.0.0-8.10) oneiric; urgency=low + + [ Adam Jackson ] + + * SAUCE: drm/i915/pch: Fix integer math bugs in panel fitting + - LP: #753994 + + [ John Johansen ] + + * [Config] Enable missing IPv6 options + + [ Leann Ogasawara ] + + * [Config] Disable config IWLWIFI_DEVICE_SVTOOL + - LP: #819925 + * Rebase to 3.0.1 + + [ Upstream Kernel Changes ] + + * x86, intel, power: Correct the MSR_IA32_ENERGY_PERF_BIAS message + * ALSA: hda - Turn on extra EAPDs on Conexant codecs + - LP: #783582 + * KVM: Remove SMEP bit from CR4_RESERVED_BITS + - LP: #796476 + * KVM: Add SMEP support when setting CR4 + - LP: #796476 + * KVM: Mask function7 ebx against host capability word9 + - LP: #796476 + * KVM: Add instruction fetch checking when walking guest page table + - LP: #796476 + + [ Upstream Kernel Changes ] + + * rebase to v3.0.1 + + -- Leann Ogasawara Fri, 05 Aug 2011 11:32:25 -0700 + +linux (3.0.0-7.9) oneiric; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Upstream] add local prefix to oss local change_bits" + * Revert "SAUCE: add tracing for user initiated readahead requests" + * Revert "SAUCE: vfs: Add a trace point in the mark_inode_dirty function" + * Revert "SAUCE: Input: ALPS - Enable Intellimouse mode for Lenovo + Zhaoyang E47" + * Revert "SAUCE: fix documentation strings for struct input_keymap_entry" + * Revert "SAUCE: vt -- fix handoff numbering to 1..n and add range checks + (grub)" + * Revert "SAUCE: vt -- fix handoff numbering to 1..n and add range + checks" + * Revert "SAUCE: vt -- allow grub to request automatic vt_handoff" + * Revert "SAUCE: vt -- maintain bootloader screen mode and content until + vt switch" + * [Config] enable CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 + - LP: #816035 + * ubuntu: Yama: if an underlying filesystem provides a permissions op use + it + * SAUCE: (no-up) add tracing for user initiated readahead requests + * SAUCE: vt -- maintain bootloader screen mode and content until vt + switch + * SAUCE: vt -- allow grub to request automatic vt_handoff + + [ Arjan van de Ven ] + + * SAUCE: (no-up) vfs: Add a trace point in the mark_inode_dirty function + + [ Kees Cook ] + + * Revert "SAUCE: (no-up) Disable building the ACPI debugfs source" + * [Config] enforce ACPI_CUSTOM_METHOD disabled + + [ Keng-Yu Lin ] + + * SAUCE: (no-up) Input: ALPS - Enable Intellimouse mode for Lenovo + Zhaoyang E47 + - LP: #632884, #803005 + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_BLK_DEV_CMD64X=m on powerpc + - LP: #513131 + * [Config] Enable CONFIG_RT2800PCI_RT53XX=y + - LP: #815064 + + [ Rezwanul Kabir ] + + * SAUCE: (no-up) Add support for Intellimouse Mode in ALPS touchpad on + Dell E2 series Laptops + - LP: #632884 + + [ Upstream Kernel Changes ] + + * Revert "yama: if an underlying filesystem provides a permissions op use + it" + * Revert "Add support for Intellimouse Mode in ALPS touchpad on Dell E2 + series Laptops" + * Revert "tty: include linux/slab.h for kfree" + * Revert "gpio/ml_ioh_gpio: include linux/slab.h for kfree" + * Revert "pch_dma: add include/slab.h for kfree" + * mmc: Added quirks for Ricoh 1180:e823 lower base clock frequency + - LP: #773524 + * oss: rename local change_bits to avoid powerpc bitsops.h definition + + -- Leann Ogasawara Mon, 25 Jul 2011 09:08:01 -0700 + +linux (3.0.0-7.8) oneiric; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: overlayfs -- overlayfs: add statfs support + * [Config] enable CONFIG_OVERLAYFS + + [ Erez Zadok ] + + * ubuntu: overlayfs -- overlayfs: implement show_options + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_ALIM7101_WDT=m on powerpc + * [Config] Enable CONFIG_ASUS_OLED=m on powerpc + * [Config] Disable CONFIG_ATM_DUMMY on arm + * [Config] Enable CONFIG_BLK_DEV_DRBD=m on powerpc + * Temporarily disable module check on arm + * Rebase to 3.0 final + * [Config] Enable CONFIG_CAN_TI_HECC=m on arm + * [Config] Set CONFIG_CDROM_PKTCDVD=m on amd64 and i386 + * [Config] Enable CONFIG_CRYPTO_CCM=m on powerpc + * [Config] Enable CONFIG_CRYPTO_DEV_HIFN_795X=m on powerpc + * [Config] Enable CONFIG_CRYPTO_GCM=m on powerpc + * [Config] Set CRYPTO_LZO=m on powerpc64-smp + * [Config] Enable CONFIG_DM9000=m on arm + * [Config] Set CONFIG_DISPLAY_SUPPORT=m on arm + * [Config] Enable CONFIG_DL2K=m on amd64 and i386 + + [ Miklos Szeredi ] + + * ubuntu: overlayfs -- vfs: add i_op->open() + * ubuntu: overlayfs -- vfs: export do_splice_direct() to modules + * ubuntu: overlayfs -- vfs: introduce clone_private_mount() + * ubuntu: overlayfs -- overlay filesystem + * ubuntu: overlayfs -- ovl: fix overlayfs over overlayfs + * ubuntu: overlayfs -- ovl: improve stack use of lookup and readdir + * ubuntu: overlayfs -- fs: limit filesystem stacking depth + * ubuntu: overlayfs -- ovl: make lower mount read-only + + [ Neil Brown ] + + * ubuntu: overlayfs -- overlay: overlay filesystem documentation + + [ Tim Gardner ] + + * [Config] Add enic/fnic to udebs + - LP: #801610 + + [ Upstream Kernel Changes ] + + * yama: if an underlying filesystem provides a permissions op use it + + [ Major Kernel Changes ] + + * Rebase to 3.0 final + + -- Leann Ogasawara Thu, 21 Jul 2011 07:01:32 -0700 + +linux (3.0.0-6.7) oneiric; urgency=low + + [ Eagon Yager ] + + * [Config] Fix misspelled 'skipmodule' in arm makefile. + + [ Keng-Yu Lin ] + + * SAUCE: Input: ALPS - Enable Intellimouse mode for Lenovo Zhaoyang E47 + - LP: #632884, #803005 + + [ Leann Ogasawara ] + + * Revert "[Config] Temporarily disable CONFIG_SMC91X on armel-omap" + * Revert "[Config] Temporarily Disable CONFIG_BRCMSMAC on arm" + * Revert "[Config] Temporarily Disable CONFIG_RTL8192SE on powerpc" + * Revert "[Config] Temporarily Disable CONFIG_RTL8192SE on arm" + * Revert "[Config] Temporarily disable CONFIG_BRCMSMAC on powerpc" + * [Config] Set CONFIG_ACPI_PCI_SLOT=m + * [Config] Set CONFIG_ACPI_SBS=m + * [Config] Set CONFIG_ACPI_WMI=m + * [Config] Set CONFIG_AD7150=m on arm + * [Config] Set CONFIG_AD7152=m on arm + * [Config] Drop CONFIG_GPIO_S5PV210 + * [Config] Drop CONFIG_GPIO_S5PC100 + * [Config] Drop CONFIG_GPIO_PLAT_SAMSUNG + * [Config] Drop CONFIG_GPIO_EXYNOS4 + + [ Stefan Bader ] + + * SAUCE: Re-enable RODATA for i386 virtual + - LP: #809838 + + [ Upstream Kernel Changes ] + + * Revert "Quirk to fix suspend/resume on Lenovo Edge 11,13,14,15" + * (drop after 3.0.0) acer-wmi: Add support for Aspire 1830 wlan hotkey + - LP: #771758 + + -- Leann Ogasawara Wed, 20 Jul 2011 06:36:02 -0700 + +linux (3.0.0-5.6) oneiric; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_RTL8192CU=m + * Rebase to -rc7 + + -- Tim Gardner Mon, 11 Jul 2011 22:13:50 +0100 + +linux (3.0.0-4.5) oneiric; urgency=low + + [ Ming Lei ] + + * SAUCE: fix yama_ptracer_del lockdep warning + - LP: #791019 + + [ Seth Forshee ] + + * SAUCE: (drop after 3.0) asus-wmi: Add callback for hotkey filtering + * SAUCE: (drop after 3.0) eeepc-wmi: Add support for T101MT Home/Express Gate key + * SAUCE: (drop after 3.0) asus-wmi: Enable autorepeat for hotkey input device + * [Config] CONFIG_{ASUS,ASUS_NB,EEEPC}_WMI=m + - LP: #805218 + + [ Tim Gardner ] + + * [Config] updateconfigs after rebase to -rc6+ + Rebased against 4dd1b49c6d215dc41ce50c80b4868388b93f31a3 + * Adopt a 3 digit verion, e.g., 3.0.0-x.x + * Revert "UBUNTU: add dependancies for module-init-tools" + This dependency is no longer required for a 3 digit version. + + -- Tim Gardner Tue, 05 Jul 2011 14:03:04 +0100 + +linux (3.0-3.4) oneiric; urgency=low + + [ Keng-Yu Lin ] + + * SAUCE: Revert: "dell-laptop: Toggle the unsupported hardware + killswitch" + - LP: #775281 + + [ Leann Ogasawara ] + + * rebase to v3.0-rc5 + * [Config] updateconfigs after rebase to 3.0-rc5 + + [ Tim Gardner ] + + * [Config] Remove ubuntu/rtl8192se + * [Config] Added armel ABI files + * [Config] Removed armel versatile flavour + * [Config] CONFIG_INTEL_MEI=m + - LP: #716867 + + [ Upstream Kernel Changes ] + + * ALSA: hda - Enable auto-parser as default for Conexant codecs + + [ Upstream Kernel Changes ] + + * rebase to v3.0-rc5 + + -- Leann Ogasawara Thu, 30 Jun 2011 14:27:10 +0100 + +linux (3.0-2.3) oneiric; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- update to 0e2bafab74f0d1463383faeb93f9fc5eb8c2c54e + + [ Leann Ogasawara ] + + * rebase to v3.0-rc4 + * [Config] updateconfigs after rebase to 3.0-rc4 + * fix ERROR: __devcgroup_inode_permission undefined + + [ Stefan Bader ] + + * SAUCE: iscsitarget: Remove driver from the kernel + + [ Tim Gardner ] + + * SAUCE: rtl8192se: Force a build for a 2.6/3.0 kernel + * [Config] Add grub-efi as a recommended bootloader for server and + generic + - LP: #800910 + + [ Upstream Kernel Changes ] + + * Fix node_start/end_pfn() definition for mm/page_cgroup.c + + [ Leann Ogasawara ] + + * rebase to v3.0-rc4 + + -- Leann Ogasawara Fri, 24 Jun 2011 11:51:12 -0700 + +linux (3.0-1.2) oneiric; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_CAN_CALC_BITTIMING + + [ Leann Ogasawara ] + + * rebase to v3.0-rc3 + * [Config] updateconfigs after rebase to 3.0-rc3 + + [ Upstream Kernel Changes ] + + * perf: clear out make flags when calling kernel make kernelver + + [ Leann Ogasawara ] + + * rebase to v3.0-rc3 + + -- Leann Ogasawara Tue, 14 Jun 2011 07:25:35 -0700 + +linux (3.0-0.1) oneiric; urgency=low + + [ Andy Whitcroft ] + + * use the packaging version in the kernel + * use the kernels idea of its version for version_signature + * add dependancies for module-init-tools + * update control files to version 3 + * printchanges/insertchanges allow override of prev_release + * correct Vcs-Git: to point to oneiric + + [ Leann Ogasawara ] + + * rebase to v3.0-rc1 + * [Config] updateconfigs after rebase to 3.0-rc1 + * ubuntu: dm-raid4-5 fix up build failure + * [Config] Temporarily Disable CONFIG_GPIO_EXYNOS4 on arm + * [Config] Temporarily Disable CONFIG_GPIO_PLAT_SAMSUNG on arm + * [Config] Temporarily Disable CONFIG_GPIO_S5PC100 on arm + * [Config] Temporarily Disable CONFIG_GPIO_S5PV210 on arm + * [Config] Temporarily disable CONFIG_BRCMSMAC on powerpc + * [Config] Temporarily Disable CONFIG_BRCMSMAC on arm + * [Config] Temporarily Disable CONFIG_RTL8192SE on arm + * [Config] Temporarily Disable CONFIG_RTL8192SE on powerpc + * [Config] Temporarily disable CONFIG_SMC91X on armel-omap + * rebase to v3.0-rc2 + + [ Manoj Iyer ] + + * SAUCE: mmc: Enable MMC card reader for RICOH [1180:e823] + - LP: #790754 + + [ Upstream Kernel Changes ] + + * Revert "x86 idle: EXPORT_SYMBOL(default_idle, pm_idle) only when APM + demands it" + * drm/i915: fix regression after clock gating init split + + [ Major Kernel Changes ] + + * rebase from v2.6.39 to v3.0-rc1 + * rebase from v3.0-rc1 to v3.0-rc2 + + -- Andy Whitcroft Thu, 09 Jun 2011 15:18:33 +0100 + +linux (2.6.39-3.10) oneiric; urgency=low + + [ Colin Ian King ] + + * SAUCE: S3 early resume debug via keyboard LEDs + + [ Ingo Molnar ] + + * ubuntu: nx-emu - i386: NX emulation + * ubuntu: nx-emu - i386: mmap randomization for executable mappings + + [ Leann Ogasawara ] + + * Revert "[Config] Disable CONFIG_FT1000 on powerpc64-smp" + * Revert "[Config] Disable CONFIG_DM_RAID45" + * [Config] enable CONFIG_BRCMFMAC=y + * [Config] enable CONFIG_MDIO_BITBANG=m across all arch's and flavors + * [Config] enable CONFIG_VIDEO_OUTPUT_CONTROL=m on armel-omap + + [ Robert Nelson ] + + * SAUCE: omap3: beagle: detect new xM revision B + - LP: #770679 + * SAUCE: omap3: beagle: detect new xM revision C + - LP: #770679 + * SAUCE: omap3: beagle: if rev unknown, assume xM revision C + - LP: #770679 + + [ Stefan Bader ] + + * SAUCE: Convert dm-raid45 to new block plugging + + -- Leann Ogasawara Mon, 23 May 2011 11:46:43 -0700 + +linux (2.6.39-3.9) oneiric; urgency=low + + [ Leann Ogasawara ] + + * [Config] Disable CONFIG_SCSI_LPFC_DEBUG_FS + * rebase to v2.6.39 + * [Config] enable CONFIG_LLC2=m across all arch's and flavours + * [Config] enable CONFIG_INPUT_APANEL=m + + [ Thomas Schlichter ] + + * SAUCE: vesafb: mtrr module parameter is uint, not bool + - LP: #778043 + * SAUCE: vesafb: enable mtrr WC by default + - LP: #778043 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc7 to v2.6.39 + + -- Andy Whitcroft Fri, 20 May 2011 09:52:32 +0100 + +linux (2.6.39-2.8) oneiric; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- aufs2-standalone.patch aufs2.1-37" + * Revert "ubuntu: AUFS -- aufs2-base.patch aufs2.1-37" + * Revert "[Config] Disable CONFIG_AUFS_FS" + * ubuntu: AUFS -- aufs2-base.patch aufs2.1-39 + * ubuntu: AUFS -- aufs2-standalone.patch aufs2.1-39 + * ubuntu: AUFS -- update to c6b76974311efc5bf3eddf921cd015b6aae46935 + * ubuntu: AUFS -- clean up the aufs updater and BOM + * ubuntu: AUFS -- documentation on updating aufs2 + + [ Kees Cook ] + + * ubuntu: Yama - LSM hooks + * ubuntu: Yama - create task_free security callback + * ubuntu: Yama - add ptrace relationship tracking interface + * ubuntu: Yama - unconditionally chain to Yama LSM + + [ Leann Ogasawara ] + + * Revert "SAUCE: Fix drivers/staging/easycap FTBS" + * Revert "[Config] Disable CONFIG_EASYCAP" + * ubuntu: fsam7400 disable driver + * ubuntu: omnibook disable driver + * ubuntu: rfkill disable driver + + [ Tim Gardner ] + + * SAUCE: Fix extra reference in fb_open() + + -- Leann Ogasawara Mon, 16 May 2011 09:23:56 -0700 + +linux (2.6.39-2.7) oneiric; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc7 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc6 to v2.6.39-rc7 + + -- Leann Ogasawara Tue, 10 May 2011 10:18:28 +0200 + +linux (2.6.39-1.6) oneiric; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc6 + * SAUCE: [arm] fixup __aeabi_uldivmod undefined build error + + [ Tim Gardner ] + + * [Config] updateconfigs after rebase to 2.6.39-rc6 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc5 to v2.6.39-rc6 + - LP: #740126 + + -- Leann Ogasawara Thu, 05 May 2011 09:46:12 -0700 + +linux (2.6.39-0.5) oneiric; urgency=low + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: Revert "x86, hibernate: Initialize mmu_cr4_features during boot" + - LP: #764758 + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc5 + * [Config] updateconfigs following rebase to v2.6.39-rc5 + + [ Paolo Pisati ] + + * [Config] s/USB_MUSB_TUSB6010/USB_MUSB_OMAP2PLUS/ on omap3 to get musb + - LP: #759913 + + [ Stefan Bader ] + + * Include nls_iso8859-1 for virtual images + - LP: #732046 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc4 to v2.6.39-rc5 + + -- Leann Ogasawara Wed, 27 Apr 2011 06:39:42 -0700 + +linux (2.6.39-0.4) oneiric; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc4 + * [Config] updateconfigs following rebase to v2.6.39-rc4 + * fixup powerpc implicit declaration of function + 'crash_kexec_wait_realmode' + * [Config] Disable CONFIG_FT1000 on powerpc64-smp + + [ Tim Gardner ] + + * [Config] CONFIG_TRANSPARENT_HUGEPAGE=y + - LP: #769503 + * [Config] Add cachefiles.ko to virtual flavour + - LP: #770430 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc3 to v2.6.39-rc4 + + -- Leann Ogasawara Tue, 19 Apr 2011 06:25:20 -0700 + +linux (2.6.39-0.3) oneiric; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc3 + * crash_kexec_wait_realmode() undefined when !SMP + + [ Tim Gardner ] + + * [Config] CONFIG_PM_ADVANCED_DEBUG=y for i386/amd64 + - LP: #632327 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc2 to v2.6.39-rc3 + + -- Leann Ogasawara Tue, 12 Apr 2011 06:52:24 -0700 + +linux (2.6.39-0.2) oneiric; urgency=low + + [ Gustavo F. Padovan ] + + * SAUCE: Revert "Bluetooth: Add new PID for Atheros 3011" + - LP: #720949 + + [ John Johansen ] + + * AppArmor: Fix masking of capabilities in complain mode + - LP: #748656 + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc2 + * [Config] updateconfigs following rebase to v2.6.39-rc2 + * hv_mouse needs delay.h + * olpc_dcon_xo_1 needs delay.h + * olpc_dcon_xo_1_5 needs delay.h + * Update dropped.txt for Oneiric + + [ Steve Langasek ] + + * [Config] Make linux-libc-dev coinstallable under multiarch + - LP: #750585 + + [ Upstream Kernel Changes ] + + * x86, hibernate: Initialize mmu_cr4_features during boot + - LP: #752870 + + [ Major Kernel Changes ] + + * rebase from v2.6.39-rc1 to v2.6.39-rc2 + + -- Leann Ogasawara Wed, 06 Apr 2011 11:04:15 -0700 + +linux (2.6.39-0.1) oneiric; urgency=low + + [ Brad Figg ] + + * [Config] Set CONFIG_NR_CPUS=256 for amd64 generic + - LP: #737124 + + [ Henrik Rydberg ] + + * SAUCE: HID: hid-ntrig: add support for 1b96:0006 model + * SAUCE: HID: ntrig: fix suspend/resume on recent models + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: (drop after 2.6.39) v4l: make sure drivers supply a zeroed + struct v4l2_subdev + - LP: #745213 + + [ Kees Cook ] + + * [Config] packaging: adjust perms on vmlinuz as well + * SAUCE: nx-emu: further clarify dmesg reporting + - LP: #745181 + + [ Leann Ogasawara ] + + * rebase to v2.6.39-rc1 + * [Config] updateconfigs following rebase to v2.6.39-rc1 + * [Config] Disable CONFIG_DM_RAID45 + * [Config] Disable CONFIG_SCSI_ISCSITARGET + * [Config] Disable CONFIG_EASYCAP + * [Config] Disable CONFIG_AUFS_FS + * update bnx2 firmware files in d-i/firmware/nic-modules + * xhci-pci.c resolve implicit declaration of kzalloc + * [Config] Enable CONFIG_DRM_PSB for only x86 + * [Config] Enable CONFIG_RTS_PSTOR for only x86 + * mfd/asic3: Fix typo, s/irq_data/data/ + + [ Luke Yelavich ] + + * [Config] Disable CONFIG_CRASH_DUMP on 32-bit powerpc kernels + - LP: #745358 + * [Config] Disable CONFIG_DRM_RADEON_KMS on powerpc kernels + * [Config] Build some framebuffer drivers as modules for powerpc kernels. + + [ Manoj Iyer ] + + * SAUCE: thinkpad-acpi: module autoloading for newer Lenovo ThinkPads. + - LP: #745217 + + [ Tim Gardner ] + + * SAUCE: INR_OPEN=4096 + - LP: #663090 + * SAUCE: Increase the default hard limit for open FDs to 4096 + - LP: #663090 + + [ Upstream Kernel Changes ] + + * (drop after 2.6.39-rc1) arm: versatile : Fix typo introduced in irq + namespace cleanup + * (drop after 2.6.39-rc1) [media] staging: altera-jtag needs delay.h + * ALSA: pcm: fix infinite loop in snd_pcm_update_hw_ptr0() + + [ Major Kernel Changes ] + + * rebase from v2.6.38 to v2.6.39-rc1 + + -- Leann Ogasawara Thu, 31 Mar 2011 12:50:10 -0700 + +linux (2.6.39-0.0) oneiric; urgency=low + + [ Leann Ogasawara ] + + * Open Oneiric + + -- Leann Ogasawara Thu, 31 Mar 2011 12:29:23 -0700 + +linux (2.6.38-7.39) natty; urgency=low + + [ Leann Ogasawara ] + + * No change upload. This is just to rebuild with gcc-4.5.2-7ubuntu1. + + -- Leann Ogasawara Thu, 24 Mar 2011 09:27:45 -0700 + +linux (2.6.38-7.38) natty; urgency=low + + [ Leann Ogasawara ] + + * No change upload take 2. 2.6.38-7.37 was accidentally uploaded before + gcc-4.5.2-6ubuntu5 finished building on all arches. + + -- Leann Ogasawara Tue, 22 Mar 2011 06:12:47 -0700 + +linux (2.6.38-7.37) natty; urgency=low + + [ Leann Ogasawara ] + + * No change upload. This is just to rebuild with gcc-4.5.2-6ubuntu5. + + -- Leann Ogasawara Sun, 20 Mar 2011 16:02:48 -0700 + +linux (2.6.38-7.36) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: KLUDGE: work around failed 'shrink-wrap' compiler + optimisation" + * purge last vestiges of maverick + * [Config] switch CONFIG_FB_VESA back to module + + [ Chris Wilson ] + + * SAUCE: drm/i915: Fix pipelined fencing + - LP: #717114 + + [ Loïc Minier ] + + * Include nls_cp437 module in virtual for fat + - LP: #732046 + * Support arch= cross-compilation for any arch + * Fix couple of typos in 0-common-vars.mk + * Enforce DEFAULT_MMAP_MIN_ADDR on armhf + * Add armhf to Debian -> Linux arch map + * Add initial armhf.mk + * Enable common packages for armhf + + [ Upstream Kernel Changes ] + + * Yama: fix default relationship to check thread group + - LP: #737676 + + -- Andy Whitcroft Fri, 18 Mar 2011 18:18:02 +0000 + +linux (2.6.38-7.35) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to 2fbfac4e053861925fa3fffcdc327649b09af54c + * rebase fixes bug #715330 + * [Config] disable CONFIG_SCSI_QLA_ISCSI for powerpc 32bit to fix FTBS + * rebase to v2.6.38 final + + [ Herton Ronaldo Krzesinski ] + + * SAUCE: Apply OPTION_BLACKLIST_SENDSETUP also for ZTE MF626 + - LP: #636091 + + [ Tim Gardner ] + + * [Confg] CONFIG_BOOT_PRINTK_DELAY=y + + [ Upstream Kernel Changes ] + + * Yama: use thread group leader when creating match + - LP: #729839 + * (drop after 2.6.38) ahci: AHCI mode SATA patch for Intel Patsburg SATA + RAID controller + - LP: #735240 + * (drop after v2.6.38) x86, quirk: Fix SB600 revision check + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc8 to v2.6.38 final + - LP: #715330 + + -- Andy Whitcroft Tue, 15 Mar 2011 19:04:19 +0000 + +linux (2.6.38-6.34) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] normalise CONFIG_INTEL_TXT + * SAUCE: KLUDGE: work around failed 'shrink-wrap' compiler optimisation + - LP: #730860 + * rebase to mainline v2.6.38-rc8 + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc7 + fb62c00a6d8942775abc23d1621db1252e2d93d1 + to v2.6.38-rc8 + + -- Andy Whitcroft Tue, 08 Mar 2011 11:54:48 +0000 + +linux (2.6.38-6.33) natty; urgency=low + + [ Andy Whitcroft ] + + * d-i -- enable speakup-modules udeb + - LP: #672699 + * rebase to 493f3358cb289ccf716c5a14fa5bb52ab75943e5 + * [Config] debian PPC64 configuration + * [Config] cleanup powerpc config fixing unexpected inconsistancies + * [Config] resync ppc64 configuration + * SAUCE: match up ENTRY/END naming for 32/64 bit + * rebase to fb62c00a6d8942775abc23d1621db1252e2d93d1 + * [Config] update configs after rebase to + fb62c00a6d8942775abc23d1621db1252e2d93d1 + * [Config] pps_gen_parport no longer built + + [ Corentin Chary ] + + * SAUCE: (drop after 2.6.38) eeepc-wmi: reorder keymap + - LP: #689393 + * SAUCE: (drop after 2.6.38) eeepc-wmi: add wlan key found on 1015P + - LP: #689393 + + [ John Johansen ] + + * SAUCE: Fix aufs calling of security_path_mknod + - LP: #724456 + + [ Kees Cook ] + + * SAUCE: proc: hide kernel addresses via %pK in /proc//stack + + [ Tim Gardner ] + + * rebase to 2.6.38-rc7 + + [ Upstream Kernel Changes ] + + * Revert "drm/i915: fix corruptions on i8xx due to relaxed fencing" + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc6 to v2.6.38-rc7 + + fb62c00a6d8942775abc23d1621db1252e2d93d1 + - LP: #721389 + - LP: #722925 + - LP: #723672 + - LP: #723676 + - LP: #715318 + + -- Andy Whitcroft Mon, 07 Mar 2011 15:33:17 +0000 + +linux (2.6.38-5.32) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to mainline 6f576d57f1fa0d6026b495d8746d56d949989161 + * [Config] updateconfigs following rebase to v2.6.38-rc6 + * [Config] enable CONFIG_DMAR + - LP: #552311 + + [ Upstream Kernel Changes ] + + * drm/i915: skip FDI & PCH enabling for DP_A + - LP: #561802, #600453, #681877 + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc5 to v2.6.38-rc6 + - LP: #718402 + - LP: #719524 + - LP: #721126 + - LP: #719691 + - LP: #722689 + - LP: #722310 + + -- Andy Whitcroft Tue, 22 Feb 2011 13:28:39 +0000 + +linux (2.6.38-4.31) natty; urgency=low + + [ Andy Whitcroft ] + + * add in bugs closed by upstream patches pulled in by rebases + * rebase to 795abaf1e4e188c4171e3cd3dbb11a9fcacaf505 + * [Config] enable CONFIG_VSX to allow use of vector instuctions + * resync with maverick 98defa1c5773a3d7e4c524967eb01d5bae035816 + * rebase to mainline v2.6.38-rc5 + * SAUCE: ecryptfs: read on a directory should return EISDIR if not + supported + - LP: #719691 + + [ Colin Ian King ] + + * SAUCE: Dell All-In-One: Remove need for Dell module alias + + [ Manoj Iyer ] + + * SAUCE: (drop after 2.6.38) add ricoh 0xe823 pci id. + - LP: #717435 + + [ Tim Gardner ] + + * [Config] CONFIG_CRYPTO_CRC32C_INTEL=y + + [ Upstream Kernel Changes ] + + * Quirk to fix suspend/resume on Lenovo Edge 11,13,14,15 + - LP: #702434 + * vfs: fix BUG_ON() in fs/namei.c:1461 + + [ Vladislav P ] + + * SAUCE: Release BTM while sleeping to avoid deadlock. + - LP: #713837 + + [ Major Kernel Changes ] + + * rebase from v2.6.38-rc4 to v2.6.38-rc5 + - LP: #579276 + - LP: #715877 + - LP: #713769 + - LP: #716811 + * resync with Maverick Ubuntu-2.6.35-27.47 + + -- Andy Whitcroft Fri, 11 Feb 2011 17:24:09 +0000 + +linux (2.6.38-3.30) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.38-rc4 + * ppc64 -- add basic architecture + * ubuntu: AUFS -- update to 65835da20b77c98fb538c9114fc31f5de1328230 + + [ Colin Ian King ] + + * SAUCE: Add WMI hotkeys support for Dell All-In-One series + - LP: #676997 + * SAUCE: Add WMI hotkeys support for another Dell All-In-One series + - LP: #701530 + * SAUCE: Dell WMI: Use sparse keymaps and tidy up code. + - LP: #701530 + + [ Dan Rosenberg ] + + * SAUCE: (drop after 2.6.38) Convert net %p usage %pK + + [ Kees Cook ] + + * Revert "SAUCE: kernel: make /proc/kallsyms mode 400 to reduce ease of + attacking" + * SAUCE: (drop after 2.6.38) use %pK for /proc/kallsyms and /proc/modules + + [ Tim Gardner ] + + * [Config] CONFIG_BLK_CGROUP=y + - LP: #706394 + * [Config] CONFIG_DELL_WMI_AIO=m + + [ Upstream Kernel Changes ] + + * drm/i915/lvds: Restore dithering on native modes for gen2/3 + - LP: #711568 + + [ Upstream Kernel Changes ] + + * rebase from v2.6.38-rc3 to v2.6.38-rc4. + - LP: #701271 + - LP: #708521 + - LP: #710371 + + -- Andy Whitcroft Tue, 08 Feb 2011 02:07:18 +0000 + +linux (2.6.38-2.29) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to 1f0324caefd39985e9fe052fac97da31694db31e + * [Config] updateconfigs following rebase to + 1f0324caefd39985e9fe052fac97da31694db31e + * rebase to 70d1f365568e0cdbc9f4ab92428e1830fdb09ab0 + * [Config] reenable HIBERNATE + - LP: #710877 + * rebase to v2.6.38-rc3 + * [Config] reenable CONFIG_CRASH_DUMP + + [ Kamal Mostafa ] + + * SAUCE: rtl8192se: fix source file perms + * SAUCE: rtl8192se: fix source file newline + * SAUCE: omnibook: fix source file newline + + [ Kees Cook ] + + * [Config] packaging: really make System.map mode 0600 + + [ Ricardo Salveti de Araujo ] + + * SAUCE: OMAP3630: PM: don't warn the user with a trace in case of + PM34XX_ERRATUM + + [ Soren Hansen ] + + * SAUCE: nbd: Remove module-level ioctl mutex + + [ Tim Gardner ] + + * SAUCE: Disable building the ACPI debugfs source + + [ Upstream Kernel Changes ] + + * Set physical start and alignment 1M for virtual i386 + - LP: #710754 + + [ Upstream Kernel Changes ] + + * rebase from v2.6.38-rc2 + c723fdab8aa728dc2bf0da6a0de8bb9c3f588d84 + to v2.6.38-rc3 + - LP: #707902 + + -- Andy Whitcroft Fri, 28 Jan 2011 16:30:32 +0000 + +linux (2.6.38-1.28) natty; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- update to b1cee06249dfa0ab30951e7f06490a75c155b620 + + [ Ricardo Salveti de Araujo ] + + * SAUCE: omap3: beaglexm: fix DVI initialization + * [Config] omap: move CONFIG_PANEL_GENERIC_DPI to build in to make + display work at Beagle + + -- Andy Whitcroft Fri, 28 Jan 2011 10:51:57 +0000 + +linux (2.6.38-1.27) natty; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- update aufs-update to track new locations of headers + * ubuntu: AUFS -- update to c5021514085a5d96364e096dbd34cadb2251abfd + * SAUCE: ensure root is ready before running usermodehelpers in it + * correct the Vcs linkage to point to natty + * rebase to linux tip e78bf5e6cbe837daa6ab628a5f679548742994d3 + * [Config] update configs following rebase + e78bf5e6cbe837daa6ab628a5f679548742994d3 + * SAUCE: Yama: follow changes to generic_permission + * ubuntu: compcache -- follow changes to bd_claim/bd_release + * ubuntu: iscsitarget -- follow changes to open_bdev_exclusive + * ubuntu: ndiswrapper -- fix interaction between __packed and packed + * ubuntu: AUFS -- update to 806051bcbeec27748aae2b7957726a4e63ff308e + * update package version to match payload version + * rebase to e6f597a1425b5af64917be3448b29e2d5a585ac8 + * rebase to v2.6.38-rc1 + * [Config] updateconfigs following rebase to v2.6.38-rc1 + * SAUCE: x86 fix up jiffies/jiffies_64 handling + * rebase to linus tip 2b1caf6ed7b888c95a1909d343799672731651a5 + * [Config] updateconfigs following rebase to + 2b1caf6ed7b888c95a1909d343799672731651a5 + * [Config] disable CONFIG_TRANSPARENT_HUGEPAGE to fix i386 boot crashes + * ubuntu: AUFS -- suppress benign plink warning messages + - LP: #621195 + * [Config] CONFIG_NR_CPUS=256 for amd64 -server flavour + * rebase to v2.6.38-rc2 + * rebase to mainline d315777b32a4696feb86f2a0c9e9f39c94683649 + * rebase to c723fdab8aa728dc2bf0da6a0de8bb9c3f588d84 + * [Config] update configs following rebase to + c723fdab8aa728dc2bf0da6a0de8bb9c3f588d84 + * [Config] disable CONFIG_AD7152 to fix FTBS on armel versatile + * [Config] disable CONFIG_AD7150 to fix FTBS on armel versatile + * [Config] disable CONFIG_RTL8192CE to fix FTBS on armel omap + * [Config] disable CONFIG_MANTIS_CORE to fix FTBS on armel versatile + + [ Kees Cook ] + + * SAUCE: kernel: make /proc/kallsyms mode 400 to reduce ease of attacking + + [ Stefan Bader ] + + * Temporarily disable RODATA for virtual i386 + - LP: #699828 + + [ Tim Gardner ] + + * [Config] CONFIG_NLS_DEFAULT=utf8 + - LP: #683690 + * [Config] CONFIG_HIBERNATION=n + * update bnx2 firmware files in d-i/firmware/nic-modules + + [ Upstream Kernel Changes ] + + * Revert "drm/radeon/bo: add some fallback placements for VRAM only + objects." + * packaging: make System.map mode 0600 + * thinkpad_acpi: Always report scancodes for hotkeys + - LP: #702407 + * sched: tg->se->load should be initialised to tg->shares + * Input: sysrq -- ensure sysrq_enabled and __sysrq_enabled are consistent + * brcm80211: include linux/slab.h for kfree + * pch_dma: add include/slab.h for kfree + * i2c-eg20t: include linux/slab.h for kfree + * gpio/ml_ioh_gpio: include linux/slab.h for kfree + * tty: include linux/slab.h for kfree + * winbond: include linux/delay.h for mdelay et al + + [ Upstream Kernel Changes ] + + * mark the start of v2.6.38 versioning + * rebase v2.6.37 to v2.6.38-rc2 + c723fdab8aa728dc2bf0da6a0de8bb9c3f588d84 + - LP: #689886 + - LP: #702125 + - LP: #608775 + - LP: #215802 + - LP: #686333 + - LP: #677830 + - LP: #677652 + - LP: #696493 + - LP: #697240 + - LP: #689036 + - LP: #705323 + - LP: #686692 + + -- Andy Whitcroft Sun, 09 Jan 2011 13:44:52 +0000 + +linux (2.6.37-12.26) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.37-rc8 + * [Config] armel -- reenable omap flavour + * [Config] disable CONFIG_MACH_OMAP3517EVM to fix FTBS on armel omap + * [Config] disable CONFIG_GPIO_VX855 to fix FTBS on omap armel + * [Config] disable CONFIG_WESTBRIDGE_ASTORIA to fix FTBS on omap armel + * [Config] disable CONFIG_TI_DAVINCI_EMAC to fix FTBS on omap armel + * rebase to mainline 989d873fc5b6a96695b97738dea8d9f02a60f8ab + * [Config] track missing modules + * rebase to v2.6.37 final + + [ Chase Douglas ] + + * SAUCE: (drop after 2.6.37) HID: magicmouse: Don't report REL_{X, Y} for + Magic Trackpad + + [ Stefan Bader ] + + * Revert "SAUCE: blkfront: default to sd devices" + - LP: #684875 + + [ Tim Gardner ] + + * Revert "SAUCE: (no-up) libata: Ignore HPA by default." + - LP: #380138 + * [Config] Added autofs4.ko to -virtual flavour + - LP: #692917 + + [ Upstream Kernel Changes ] + + * Add support for Intellimouse Mode in ALPS touchpad on Dell E2 series + Laptops + - LP: #632884 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.37-rc8 + * rebase to mainline 989d873fc5b6a96695b97738dea8d9f02a60f8ab + * rebase to v2.6.37 final + + -- Andy Whitcroft Thu, 23 Dec 2010 18:34:13 +0000 + +linux (2.6.37-11.25) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] d-i -- add hpsa to the list of block devices + - LP: #684304 + * [Config] add vmw-balloon driver to -virtual flavour + - LP: #592039 + * rebase to v2.6.37-rc7 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.37-rc7 + + -- Andy Whitcroft Tue, 21 Dec 2010 13:35:28 +0000 + +linux (2.6.37-10.24) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.37-rc6 + * updateconfigs following rebase to v2.6.37-rc6 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.37-rc6 + + -- Andy Whitcroft Thu, 16 Dec 2010 12:34:19 +0000 + +linux (2.6.37-9.23) natty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: vt -- fix handoff numbering to 1..n and add range checks + - LP: #689606 + * SAUCE: vt -- fix handoff numbering to 1..n and add range checks (grub) + - LP: #689606 + + [ Kees Cook ] + + * SAUCE: RO/NX protection for loadable kernel, fix ftrace + - LP: #690190 + + -- Andy Whitcroft Wed, 15 Dec 2010 19:29:57 +0000 + +linux (2.6.37-9.22) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.35-rc5 + * [Config] updateconfigs following rebase to v2.6.37-rc5 + * (no-up) add support for installed header files to ubuntu directory + - LP: #684666 + * ubuntu: AUFS -- include the aufs_types.h file in linux-libc-headers + - LP: #684666 + * ubuntu: dm-raid4-5 -- follow changes to bio flags + * ubuntu: dm-raid4-5 -- re-enable + * ubuntu: omnibook -- update BOM + * ubuntu: ndiswrapper -- update BOM to match actual version + * ubuntu: ndiswrapper -- follow removal of the BKL and locked ioctl + * ubuntu: ndiswrapper -- re-enable + * ubuntu: iscsitarget -- re-instate copy_io_context + * ubuntu: iscsitarget -- follow changes to semaphore initialisation + * ubuntu: iscsitarget -- convert NIPQUAD to %pI4 + * ubuntu: iscsitarget -- re-enable + + [ Kees Cook ] + + * [Config] update config for CONFIG_DEBUG_SET_MODULE_RONX + + [ Manoj Iyer ] + + * SAUCE: Enable jack sense for Thinkpad Edge 13 + - LP: #685015 + + [ Tim Gardner ] + + * [Config] CONFIG_9P_FSCACHE=y,CONFIG_9P_FS_POSIX_ACL=y + * [Config] CONFIG_CRYPTO_CRC32C=y + - LP: #681819 + * [Config] CONFIG_9P_FSCACHE=n + * [Config] Add nfsd modules to -virtual flavour + - LP: #688070 + + [ Upstream Kernel Changes ] + + * Revert "Staging: zram: work around oops due to startup ordering snafu" + * NFS: Fix panic after nfs_umount() + - LP: #683938 + * x86: Add NX protection for kernel data + * x86: Add RO/NX protection for loadable kernel modules + * x86: Resume trampoline must be executable + * x86: RO/NX protection for loadable kernel, jump_table fix + + [ Upstream Kernel Changes ] + + * rebase to v2.6.37-rc5 + + -- Andy Whitcroft Thu, 09 Dec 2010 18:15:35 +0000 + +linux (2.6.37-8.21) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- include the aufs_types.h file in + linux-libc-headers" + * Revert "(no-up) add support for installed header files to ubuntu + directory" + + -- Andy Whitcroft Sun, 05 Dec 2010 17:33:28 +0000 + +linux (2.6.37-8.20) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Upstream] drivers/serial/mfd.c: Fix ARM compile error" + * Revert "SAUCE: Nouveau: Disable acceleration on MacBook Pros" + * Revert "SAUCE: Nouveau: Add quirk framework to disable acceleration" + * Revert "SAUCE: i915 -- disable powersave by default" + * SAUCE: enable Marvell 9128 PCIe SATA controller + - LP: #658521 + * [Config] evtchn has been renamed + * (no-up) add support for installed header files to ubuntu directory + - LP: #684666 + * ubuntu: AUFS -- include the aufs_types.h file in linux-libc-headers + - LP: #684666 + + [ Tim Gardner ] + + * [Config] MISS: evtchn, NEW : xen-evtchn + * rebase to v2.6.37-rc4 + + [ Upstream Kernel Changes ] + + * drm/i915: Clean conflicting modesetting registers upon init + - LP: #683775 + * rebase to v2.6.37-rc4 + + -- Andy Whitcroft Fri, 03 Dec 2010 18:42:07 +0000 + +linux (2.6.37-7.19) natty; urgency=low + + [ Tim Gardner ] + + * [Config] Add bnx2 firmware to nic-modules udeb + - LP: #676245 + + -- Andy Whitcroft Fri, 26 Nov 2010 17:53:45 +0000 + +linux (2.6.37-7.18) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Upstream] USB: option: Remove duplicate AMOI_VENDOR_ID" + * Revert "SAUCE: Add extra headers to linux-libc-dev" + * Revert "SAUCE: Enable speedstep for sonoma processors." + * [Config] enable CONFIG_BT_HCIUART_ATH3K + * [Config] enable CONFIG_IWLWIFI_DEBUGFS + * [Config] standardise CONFIG_MII + * [Config] standardise CONFIG_PRISM2_USB + * [Config] standardise CONFIG_SCSI_QLA_ISCSI + * [Config] build in CONFIG_AGP + * [Config] build in CONFIG_AGP_INTEL + * [Config] build in CONFIG_AGP_AMD + * [Config] build in CONFIG_AGP_AMD64 + * [Config] build in CONFIG_AGP_NVIDIA + * [Config] build in CONFIG_AGP_VIA + * [Config] disable CONFIG_SCSI_QLA_ISCSI for FTBS (arm) + * (no-up): document the new ## scheme + * [Config] harmonise CONFIG_SERIAL_8250_NR_UARTS + * [Config] update CONFIG_SERIAL_8250_RUNTIME_UARTS=32 + - LP: #675453 + + [ Mathieu J. Poirier ] + + * SAUCE: ARM: Adding vdd_sdi regulator supply to OMAP3EVM + + [ Upstream Kernel Changes ] + + * nx-emu: fix inverted report of disable_nx + + -- Andy Whitcroft Tue, 23 Nov 2010 21:00:39 +0000 + +linux (2.6.37-6.17) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- aufs2-standalone.patch + aufs2.1-36-UNRELEASED-20101103" + * Revert "ubuntu: AUFS -- aufs2-base.patch + aufs2.1-36-UNRELEASED-20101103" + * [Config] standardise CONFIG_BT + * [Config] standardise CONFIG_IRDA + * [Config] standardise CONFIG_LAPB + * [Config] standardise CONFIG_RDS + * [Config] standardise CONFIG_RFKILL + * [Config] standardise CONFIG_TIPC + * [Config] standardise CONFIG_X25 + * [Config] standardise CONFIG_INPUT_EVDEV + * [Config] standardise CONFIG_INPUT_JOYDEV + * [Config] standardise CONFIG_INPUT_JOYSTICK + * [Config] standardise CONFIG_INPUT_TOUCHSCREEN + * [Config] CONFIG_INPUT_TOUCHSCREEN=n for FTBS (arm) + * [Config] CONFIG_IRDA=n for FTBS (arm) + * ubuntu: AUFS -- aufs2-base.patch aufs2.1-37 + * ubuntu: AUFS -- aufs2-standalone.patch aufs2.1-37 + * ubuntu: AUFS -- update to 097bf62d6f49619359d34bf17f242df38562489a + + [ Tim Gardner ] + + * SAUCE: Fix drivers/staging/easycap FTBS + * [Config] CONFIG_EASYCAP=m after fixing FTBS + + [ Upstream Kernel Changes ] + + * Revert "x86: Add NX protection for kernel data" + + -- Andy Whitcroft Mon, 22 Nov 2010 18:09:10 +0000 + +linux (2.6.37-6.16) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Config] update config for CONFIG_DEBUG_SET_MODULE_RONX" + * rebase to v2.6.37-rc3 + + [ Tim Gardner ] + + * [Config] CONFIG_SCHED_AUTOGROUP=y + + [ Upstream Kernel Changes ] + + * Revert "x86: Add RO/NX protection for loadable kernel modules" + * sched: automated per session task groups + * rebase to v2.6.37-rc3 + + -- Andy Whitcroft Mon, 22 Nov 2010 10:11:13 +0000 + +linux (2.6.37-6.15) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] standardise CONFIG_CEPH_FS + * [Config] standardise CONFIG_SCSI_LPFC_DEBUG_FS + * [Config] standardise CONFIG_SCSI_PROC_FS + * [Config] standardise CONFIG_UBIFS_FS + * [Config] standardise CONFIG_USB_GADGET_DEBUG_FS + + [ Kees Cook ] + + * [Config] update config for CONFIG_DEBUG_SET_MODULE_RONX + + [ Manoj Iyer ] + + * SAUCE: Enable jack sense for Thinkpad Edge 11 + - LP: #677210 + * SAUCE: enable rfkill for rtl8192se driver + - LP: #640992 + + [ Tim Gardner ] + + * [Config] CONFIG_EASYCAP=n for FTBS + * Rebase to v2.6.32-rc2+git + + [ Upstream Kernel Changes ] + + * x86: Fix improper large page preservation + * x86: Add NX protection for kernel data + * x86: Add RO/NX protection for loadable kernel modules + + [ Upstream Kernel Changes ] + + * Rebase to Linus 2.6.37-rc2+git + + -- Andy Whitcroft Sat, 20 Nov 2010 11:40:00 +0000 + +linux (2.6.37-5.14) natty; urgency=low + + [ Upstream Kernel Changes ] + + * PCI: fix offset check for sysfs mmapped files + - LP: #676963 + + -- Andy Whitcroft Thu, 18 Nov 2010 18:12:27 +0000 + +linux (2.6.37-5.13) natty; urgency=low + + [ Andy Whitcroft ] + + * rebased to v2.6.37-rc2 + * updateconfigs following rebase to v2.6.37-rc2 + + [ Tim Gardner ] + + * [Config] Added NFS and related modules to virtual flavour + - LP: #659084 + + [ Upstream Kernel Changes ] + + * x86, cpu: Rename verify_cpu_64.S to verify_cpu.S + * x86, cpu: Clear XD_DISABLED flag on Intel to regain NX + * x86, cpu: Call verify_cpu during 32bit CPU startup + * x86, cpu: Only CPU features determine NX capabilities + + [ Upstream Changes ] + + * rebased to v2.67.37-rc2 + + -- Andy Whitcroft Tue, 16 Nov 2010 13:13:29 +0000 + +linux (2.6.37-4.12) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Upstream] HID: magicmouse: add param for scroll speed" + * Revert "[Upstream] HID: magicmouse: properly account for scroll + movement in state" + * Revert "[Upstream] HID: magicmouse: disable and add module param for + scroll acceleration" + * Revert "[Upstream] HID: magicmouse: scroll on entire surface, not just + middle of mouse" + + [ Henrik Rydberg ] + + * SAUCE: hid: ntrig: remove sysfs nodes + * SAUCE: hid: ntrig: Setup input filtering manually + * SAUCE: hid: ntrig: New ghost-filtering event logic + + [ Manoj Iyer ] + + * SAUCE: Added quirk to recognize GE0301 3G modem as an interface. + - LP: #348861 + + [ Upstream Kernel Changes ] + + * Revert "mmc: fix all hangs related to mmc/sd card insert/removal during + suspend/resume" + * Revert "[ARM] implement arch_randomize_brk()" + * Revert "ARM: stack protector: change the canary value per task" + * Revert "ARM: initial stack protector (-fstack-protector) support" + * Revert "ALSA: hda - Handle pin NID 0x1a on ALC259/269" + * Revert "ALSA: hda - Handle missing NID 0x1b on ALC259 codec" + * Revert "perf probe: Add kernel source path option" + * hid: ntrig: Support single-touch devices + * hid: ntrig: Mask pen switch events + * net: rtnetlink.h -- only include linux/netdevice.h when used by the + kernel + - LP: #673073 + * Fix userspace build of linux/fs.h + + -- Andy Whitcroft Mon, 15 Nov 2010 19:31:44 +0000 + +linux (2.6.37-3.11) natty; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- update to + b37c575759dc4535ccc03241c584ad5fe69e3b25" + * Revert "ubuntu: AUFS -- track changes to the arguements to fop fsync()" + * Revert "ubuntu: AUFS -- update to standalone 2.6.35-rcN as at 20100601" + * Revert "ubuntu: AUFS -- update to standalone 2.6.34 as at 20100601" + * Revert "ubuntu: AUFS -- aufs2 base patch for linux-2.6.34" + * [Config] Disable intel_idle for -virtual kernels + - LP: #651370 + * [Config] enforcer -- ensure we never enable CONFIG_IMA + * debian -- pass the correct flavour name when checking configs + * [Config] enforcer -- ensure CONFIG_INTEL_IDLE is off for -virtual + * [Config] ensure CONFIG_IPV6=y for powerpc + * [Config] enforcer -- ensure CONFIG_IPV6=y + * ubuntu: AUFS -- aufs2-base.patch aufs2.1-36-UNRELEASED-20101103 + * ubuntu: AUFS -- aufs2-standalone.patch aufs2.1-36-UNRELEASED-20101103 + * ubuntu: AUFS -- update to aufs2.1-36-UNRELEASED-20101103 + * ubuntu: AUFS -- re-enable + * ubuntu: AUFS -- track changes to work queue initialisation + * ubuntu: AUFS -- track changes to llseek in v2.6.37-rc1 + * SAUCE: fbcon -- fix race between open and removal of framebuffers + * SAUCE: fbcon -- fix OOPs triggered by race prevention fixes + - LP: #614008 + * SAUCE: drm -- stop early access to drm devices + + [ Jeremy Kerr ] + + * [Config] Build-in powermac ZILOG serial driver + - LP: #673346 + + [ Kees Cook ] + + * SAUCE: nx-emu: use upstream ASLR when possible + + [ Tim Gardner ] + + * [Config] Use correct be2iscsi module name in d-i/modules/scsi-modules + - LP: #628776 + + [ Upstream Kernel Changes ] + + * i386: NX emulation + * nx-emu: drop exec-shield sysctl, merge with disable_nx + * nx-emu: standardize boottime message prefix + * mmap randomization for executable mappings on 32-bit + * exec-randomization: brk away from exec rand area + + -- Andy Whitcroft Thu, 11 Nov 2010 23:46:37 +0000 + +linux (2.6.37-2.10) natty; urgency=low + + [ Andy Whitcroft ] + + * reinstate armel config changes: + * [Config] CONFIG_GPIO_PCH=n for armel FTBS + * [Config] CONFIG_GPIO_VX855=n for armel FTBS + + -- Andy Whitcroft Wed, 03 Nov 2010 22:20:35 +0000 + +linux (2.6.37-2.9) natty; urgency=low + + [ Andy Whitcroft ] + + * config -- fix genportsconfig + * [Config] move powerpc over from ports to distro + * bump master version number to match contained kernel + * SAUCE: fix documentation strings for struct input_keymap_entry + * usb: gadget: goku_udc: add registered flag bit + + -- Andy Whitcroft Tue, 02 Nov 2010 15:14:11 +0000 + +linux (2.6.36-2.8) natty; urgency=low + + [ Tim Gardner ] + + * [Config]: fix changed CONFIG_SYSFS_DEPRECATED_V2 enforcement rules + * [Config]: TWL4030_CORE=n for FTBS + * [Config]: CONFIG_ATH6K_LEGACY=n for FTBS + * [Config]: CONFIG_SOLO6X10=n for FTBS + * [Config]: CONFIG_GPIO_PCH=n for armel FTBS + * [Config]: CONFIG_GPIO_VX855=n for armel FTBS + * [Config]: CONFIG_DRM_NOUVEAU=n for armel FTBS + * [Config]: CONFIG_LINE6_USB=n for armel FTBS + * [Config]: CONFIG_SENSORS_AK8975=n for armel FTBS + * [Config]: CONFIG_I2C_I801=n for armel FTBS + * UBUNTU: SAUCE: AppArmor: Fix unpack of network tables. + * AppArmor: compatibility patch for v5 interface + * AppArmor: compatibility patch for v5 network controll + * Dropped (pre-stable): input: Support Clickpad devices in ClickZone mode + * Dropped: UBUNTU: SAUCE: libata: Add ALPM power state accounting to the AHCI driver + * Dropped: UBUNTU: SAUCE: Added quirk to recognize GE0301 3G modem as an interface. + * Dropped: hid: 3m: Convert to MT slots + * Dropped: HID: magicmouse: don't allow hidinput to initialize the device + * Dropped: HID: magicmouse: simplify touch data bit manipulation + * Dropped: HID: magicmouse: simplify touch down logic + * Dropped: HID: magicmouse: enable Magic Trackpad support + * Dropped: UBUNTU: SAUCE: hid: ntrig: remove sysfs nodes + * Dropped: UBUNTU: SAUCE: hid: ntrig: Setup input filtering manually + * Dropped: UBUNTU: SAUCE: hid: ntrig: New ghost-filtering event logic + * Dropped: UBUNTU: SAUCE: hid: ntrig: identify firmware version (wiggled) + * Dropped: UBUNTU: (pre-stable): input: Support Clickpad devices in ClickZone mode + * Dropped: UBUNTU: SAUCE: KMS: cache the EDID information of the LVDS + * Dropped: UBUNTU: SAUCE: fbcon -- fix race between open and removal of framebuffers + * Dropped: UBUNTU: SAUCE: fbcon -- fix OOPs triggered by race prevention fixes + * Dropped: UBUNTU: SAUCE: x86: implement cs-limit nx-emulation for ia32 + * Dropped: UBUNTU: SAUCE: x86: more tightly confine cs-limit nx-emulation to ia32 only + * Dropped: UBUNTU: SAUCE: [um] Don't use nx_enabled under UML + * Dropped: UBUNTU: SAUCE: x86: brk away from exec rand area + + [ Upstream Kernel Changes ] + + * rebased against 2.6.27-rc1 + + -- Tim Gardner Fri, 22 Oct 2010 19:35:05 -0600 + +linux (2.6.36-1.7) natty; urgency=low + + [ Andy Whitcroft ] + + * rebased to v2.6.36 final + * [Config] update configs following rebase to v2.6.36 final + * [Config] update ports configs following rebase to v2.6.36 final + + [ Upstream Kernel Changes ] + + * rebased to v2.6.36 final + + -- Andy Whitcroft Thu, 21 Oct 2010 14:28:57 +0100 + +linux (2.6.36-1.6) natty; urgency=low + + [ Upstream Kernel Changes ] + + * drop broadcom staging driver preview: + * Revert "Staging: Add initial release of brcm80211 - Broadcom 802.11n + wireless LAN driver." + + -- Andy Whitcroft Wed, 20 Oct 2010 10:41:25 +0100 + +linux (2.6.36-1.5) natty; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.36-rc8 + * updateconfigs following rebase to v2.6.36-rc8 + * updateportsconfigs following rebase to v2.6.36-rc8 + * config -- simplify the kernelconfig interface + * config -- add new config mode 'dumpconfigs' + + [ Tim Gardner ] + + * Simplify the use of CROSS_COMPILER + + [ Upstream Kernel Changes ] + + * drop broadcom staging driver preview: + * Revert "staging: brcm80211: Make compiling of brcm80211.ko and + brcmfmac.ko mutually exclusive." + * Revert "staging: brcm80211: Fix compile issue when BRCM80211_PCI is not + set." + * Revert "Staging: brcm80211: remove driver specific -W options" + * Revert "Staging: brcm80211: clean up makefile cflag lines" + * Revert "staging: brcm80211: add fullmac driver" + * Revert "staging: brcm80211: use string native library" + * Revert "staging: brcm80211: use native ctype library" + * Revert "staging: brcm80211: fix remaining checkpatch errors." + * Revert "staging: brcm80211: fix "ERROR: trailing whitespace."" + * Revert "staging: brcm80211: fix "ERROR: spaces required around that + ..."" + * Revert "staging: brcm80211: fix "ERROR: spaces prohibited around that + ':' ..."" + * Revert "staging: brcm80211: fix "ERROR: space required before that + ..."" + * Revert "staging: brcm80211: fix "ERROR: space required after that ..."" + * Revert "staging: brcm80211: fix "ERROR: space required after that close + brace"" + * Revert "staging: brcm80211: fix "ERROR: space prohibited before + ...close square bracket"" + * Revert "staging: brcm80211: fix "ERROR: space prohibited after that + ..."" + * Revert "staging: brcm80211: fix "ERROR: need consistent spacing around + '*'"" + * Revert "staging: brcm80211: fix 'ERROR: "(foo*)" should be "(foo *)"'" + * Revert "staging: brcm80211: fix "ERROR: Macros w/ mult. statements ... + do - while loop"" + * Revert "staging: brcm80211: fix "ERROR: Macros w/ complex values ... + parenthesis"" + * Revert "staging: brcm80211: fix "ERROR: do not initialise statics to 0 + or NULL"" + * Revert "staging: brcm80211: fix "ERROR: do not initialise globals to 0 + or NULL"" + * Revert "staging: brcm80211: fix "ERROR: while should follow close brace + '}'"" + * Revert "staging: brcm80211: fix "ERROR: that open brace { ... prev + line"" + * Revert "staging: brcm80211: fix "ERROR: trailing statements should be + on next line"" + * Revert "staging: brcm80211: fix "ERROR: do not use assignment in if + condition"" + * Revert "staging: brcm80211: fix "ERROR: return is not a function, + paren..."" + * Revert "staging: brcm80211: fix "ERROR: open brace '{' following + function dec..."" + * Revert "staging: brcm80211: fix 'ERROR: "foo * bar" should be "foo + *bar"'" + * Revert "staging: brcm80211: Fix URLs for firmware files." + * Revert "staging: brcm80211: use '%pM' format to print MAC address" + * Revert "staging: brcm80211: Add contact info to TODO list." + * Revert "staging: brcm80211: Fix some initialisation failure paths" + * Export dump_{write,seek} to binary loader modules + * rebase to v2.6.36-rc8. + + -- Andy Whitcroft Tue, 19 Oct 2010 18:58:11 +0100 + +linux (2.6.36-0.4) natty; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: perf: increase stack footprint to avoid stack-protector warning + (fixes FTBS on powerpc) + + -- Andy Whitcroft Thu, 14 Oct 2010 13:16:16 +0100 + +linux (2.6.36-0.3) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] disable CONFIG_SCSI_QLA_ISCSI to fix FTBS on powerpc + + -- Andy Whitcroft Thu, 14 Oct 2010 03:01:30 +0100 + +linux (2.6.36-0.2) natty; urgency=low + + [ Andy Whitcroft ] + + * [Config] updateportsconfigs following rebase to 2.6.36-rc7 + (fix FTBS on powerpc) + + -- Andy Whitcroft Wed, 13 Oct 2010 23:25:12 +0100 + +linux (2.6.36-0.1) natty; urgency=low + + [ Andy Whitcroft ] + + * reduce disk usage during buildd builds + - LP: #645653 + * [Config] enforcer -- ensure CONFIG_INIT_PASS_ALL_PARAMS is y + * [Config] armel -- drop omap flavour + + [ Tim Gardner ] + + * Added dropped patch list + * more dropped patches + * [Config] Disable aufs, dmraid-4.5, ndis-wrapper + * [Config] Add support for cross compiling armel + * [Config] CONFIG_SCSI_QLA_ISCSI=n for armel + * [Upstream] drivers/serial/mfd.c: Fix ARM compile error + * [Config]: updateconfigs after adding brcm80211 + * staging: brcm80211: Fix Makefile syntax error + * rebased to v2.6.36-rc7 + + [ Upstream Kernel Changes ] + + * (upstream) IPS driver: don't toggle CPU turbo on unsupported CPUs + * (upstream) IPS driver: verify BIOS provided limits + * intel_ips: Print MCP limit exceeded values. + * Staging: Add initial release of brcm80211 - Broadcom 802.11n wireless + LAN driver. + * staging: brcm80211: Fix some initialisation failure paths + * staging: brcm80211: Add contact info to TODO list. + * staging: brcm80211: use '%pM' format to print MAC address + * staging: brcm80211: Fix URLs for firmware files. + * staging: brcm80211: fix 'ERROR: "foo * bar" should be "foo *bar"' + * staging: brcm80211: fix "ERROR: open brace '{' following function + dec..." + * staging: brcm80211: fix "ERROR: return is not a function, paren..." + * staging: brcm80211: fix "ERROR: do not use assignment in if condition" + * staging: brcm80211: fix "ERROR: trailing statements should be on next + line" + * staging: brcm80211: fix "ERROR: that open brace { ... prev line" + * staging: brcm80211: fix "ERROR: while should follow close brace '}'" + * staging: brcm80211: fix "ERROR: do not initialise globals to 0 or NULL" + * staging: brcm80211: fix "ERROR: do not initialise statics to 0 or NULL" + * staging: brcm80211: fix "ERROR: Macros w/ complex values ... + parenthesis" + * staging: brcm80211: fix "ERROR: Macros w/ mult. statements ... do - + while loop" + * staging: brcm80211: fix 'ERROR: "(foo*)" should be "(foo *)"' + * staging: brcm80211: fix "ERROR: need consistent spacing around '*'" + * staging: brcm80211: fix "ERROR: space prohibited after that ..." + * staging: brcm80211: fix "ERROR: space prohibited before ...close square + bracket" + * staging: brcm80211: fix "ERROR: space required after that close brace" + * staging: brcm80211: fix "ERROR: space required after that ..." + * staging: brcm80211: fix "ERROR: space required before that ..." + * staging: brcm80211: fix "ERROR: spaces prohibited around that ':' ..." + * staging: brcm80211: fix "ERROR: spaces required around that ..." + * staging: brcm80211: fix "ERROR: trailing whitespace." + * staging: brcm80211: fix remaining checkpatch errors. + * staging: brcm80211: use native ctype library + * staging: brcm80211: use string native library + * staging: brcm80211: add fullmac driver + * Staging: brcm80211: clean up makefile cflag lines + * Staging: brcm80211: remove driver specific -W options + * staging: brcm80211: Fix compile issue when BRCM80211_PCI is not set. + * staging: brcm80211: Make compiling of brcm80211.ko and brcmfmac.ko + mutually exclusive. + + -- Andy Whitcroft Tue, 12 Oct 2010 16:00:27 +0100 + +linux (2.6.35-22.33) maverick; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: Add support for Intellimouse Mode in ALPS touchpad on + Dell E2 series Laptops" + - LP: #641320 + + [ Brian Rogers ] + + * SAUCE: ir-core: Fix null dereferences in the protocols sysfs interface + - LP: #624701 + + [ Christopher James Halse Rogers ] + + * SAUCE: Nouveau: Add quirk framework to disable acceleration + - LP: #544088, #546393 + * SAUCE: Nouveau: Disable acceleration on MacBook Pros + - LP: #546393 + + [ John Johansen ] + + * Revert "SAUCE: AppArmor: allow newer tools to load policy on older + kernels" + * SAUCE: AppArmor: allow newer tools to load policy on older kernels + - LP: #639758 + + [ Mathieu J. Poirier ] + + * SAUCE: Adding vdd_sdi regulator supply to OMAP3EVM + + [ Upstream Kernel Changes ] + + * ALSA: HDA: Enable internal speaker on Dell M101z + - LP: #640254 + + -- Leann Ogasawara Fri, 17 Sep 2010 13:21:28 -0700 + +linux (2.6.35-22.32) maverick; urgency=low + + [ Arjan van de Ven ] + + * SAUCE: libata: Add ALPM power state accounting to the AHCI driver + + [ David Henningsson ] + + * SAUCE: ALSA: HDA: Enable internal mic on Dell E6410 and Dell E6510 + - LP: #605047, #628961 + + [ John Johansen ] + + * [Upstream] AppArmor: Fix splitting an fqname into separate namespace + and profile names + - LP: #615947 + * [Upstream] AppArmor: Fix locking from removal of profile namespace + - LP: #615947 + * SAUCE: AppArmor: allow newer tools to load policy on older kernels + - LP: #639758 + * SAUCE: Improve Amazon EBS performance for EC2 + - LP: #634316 + + [ Leann Ogasawara ] + + * Revert "SAUCE: i915 KMS -- blacklist i855" + * Revert "SAUCE: i915 KMS -- blacklist i845g" + * Revert "SAUCE: i915 KMS -- blacklist i830" + * Revert "SAUCE: i915 KMS -- support disabling KMS for known broken + devices" + * execute module-inclusion within a subshell + - LP: #621175 + + [ Upstream Kernel Changes ] + + * (pre-stable) bounce: call flush_dcache_page() after bounce_copy_vec() + - LP: #633227 + * (pre-stable) drm/i915: don't enable self-refresh on Ironlake + - LP: #629711 + * (pre-stable) mm: Move vma_stack_continue into mm.h + * x86, hwmon: Fix unsafe smp_processor_id() in thermal_throttle_add_dev + - LP: #601073 + * PM / Runtime: Make runtime_status attribute not debug-only (v. 2) + * PM / Runtime: Add runtime PM statistics (v3) + * compat: Make compat_alloc_user_space() incorporate the access_ok() + - CVE-2010-3081 + * x86-64, compat: Test %rax for the syscall number, not %eax + - CVE-2010-3301 + * x86-64, compat: Retruncate rax after ia32 syscall entry tracing + - CVE-2010-3301 + + -- Leann Ogasawara Tue, 14 Sep 2010 08:46:49 -0700 + +linux (2.6.35-21.31) maverick; urgency=low + + [ Andy Whitcroft ] + + * bodge linux-libc-dev package version due to ti-omap4 error + * linux-libc-dev -- ensure we can only build this on debian.master + + -- Leann Ogasawara Mon, 13 Sep 2010 09:54:31 -0700 + +linux (2.6.35-21.30) maverick; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: fbcon -- fix OOPs triggered by race prevention fixes + - LP: #614008 + + [ Daniel Lezcano ] + + * SAUCE: fix compilation warning when CONFIG_SECURITY is not set + + [ Henrik Rydberg ] + + * SAUCE: Input: wacom - add fuzz parameters to features + * SAUCE: Input: wacom - collect device quirks into single function + * SAUCE: Input: wacom - add support for the Bamboo Touch trackpad + * SAUCE: Input: wacom - add a quirk for low resolution Bamboo devices + * SAUCE: hid: ntrig: Remove unused device ids + * SAUCE: hid: ntrig: remove sysfs nodes + * SAUCE: hid: ntrig: Correct logic for quirks + * SAUCE: hid: ntrig: zero-initialize ntrig struct + * SAUCE: hid: ntrig: Setup input filtering manually + * SAUCE: hid: ntrig: New ghost-filtering event logic + + [ Leann Ogasawara ] + + * SAUCE: ndiswrapper: Initialize buffer index and check its value + - LP: #613796 + + [ Manoj Iyer ] + + * SAUCE: Add support for Intellimouse Mode in ALPS touchpad on Dell E2 + series Laptops + - LP: #632884 + + [ Ping Cheng ] + + * SAUCE: Input: wacom - parse the Bamboo device family + + [ Rafi Rubin ] + + * SAUCE: hid: ntrig: identify firmware version (wiggled) + + [ Tim Gardner ] + + * [Config] CONFIG_NL80211_TESTMODE=n + + [ Upstream Kernel Changes ] + + * Revert "input: mt: Add support for the Bamboo Touch trackpad" + * e1000e: initial support for 82579 LOMs + * e1000e: correct MAC-PHY interconnect register offset for 82579 + * (pre-stable) ALSA: hda - Add a new hp-laptop model for Conexant 5066, + tested on HP G60 + - LP: #587388 + * DSS2: Don't power off a panel twice + - LP: #588243 + * mmc: build fix: mmc_pm_notify is only available with CONFIG_PM=y + * Input: i8042 - reset keyboard controller wehen resuming from S2R + - LP: #86820 + * ALSA: hda - Fix beep frequency on IDT 92HD73xx and 92HD71Bxx codecs + - LP: #414795 + * agp/intel: Support the extended physical addressing bits on + Sandybridge. + - LP: #632488 + * drm/i915,intel_agp: Add support for Sandybridge D0 + - LP: #632488 + * (pre-stable) intel_agp,i915: Add more sandybridge graphics device ids + - LP: #632488 + * mmc: omap: fix for bus width which improves SD card's peformance. + + -- Leann Ogasawara Tue, 07 Sep 2010 09:58:52 -0700 + +linux (2.6.35-20.29) maverick; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: i915 KMS -- support disabling KMS for known broken devices + - LP: #563277 + * SAUCE: i915 KMS -- blacklist i830 + - LP: #542208, #563277 + * SAUCE: i915 KMS -- blacklist i845g + - LP: #541492, #563277 + * SAUCE: i915 KMS -- blacklist i855 + - LP: #511001, #541511, #563277 + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_SENSORS_PKGTEMP=m + - LP: #601073 + * ARM: Temporarily disable module check for armel + * rebase to v2.6.35.4 + * [Config] update configs following rebase to v2.6.35.4 + + [ Ricardo Salveti de Araujo ] + + * [Config] Change CONFIG_LEDS_TRIGGER_HEARTBEAT from module to built-in + in Omap + + [ Tim Gardner ] + + * [Config] Added be2net, be2scsi to udebs + - LP: #628776 + + [ Upstream Kernel Changes ] + + * x86, cpu: Package Level Thermal Control, Power Limit Notification + definitions + - LP: #601073 + * x86, hwmon: Package Level Thermal/Power: pkgtemp hwmon driver + - LP: #601073 + * x86, hwmon: Package Level Thermal/Power: thermal throttling handler + - LP: #601073 + * x86, hwmon: Package Level Thermal/Power: power limit + - LP: #601073 + * x86, hwmon: Package Level Thermal/Power: pkgtemp documentation + - LP: #601073 + * hid: 3m: Adjust to sequential MT HID protocol + * hid: 3m: Convert to MT slots + * hid: 3m: Correct touchscreen emulation + * hid: 3m: Adjust major / minor axes to scale + * input: bcm5974: Adjust major / minor to scale + * HID: magicmouse: don't allow hidinput to initialize the device + * HID: magicmouse: simplify multitouch feature request + * HID: magicmouse: simplify touch data bit manipulation + * HID: magicmouse: simplify touch down logic + * HID: magicmouse: remove timestamp logic + * HID: magicmouse: enable Magic Trackpad support + * HID: magicmouse: Adjust major / minor axes to scale + * mmc: fix all hangs related to mmc/sd card insert/removal during + suspend/resume + - LP: #477106 + * drm/i915: fix VGA plane disable for Ironlake+ + - LP: #602281 + + -- Leann Ogasawara Mon, 30 Aug 2010 08:38:01 -0700 + +linux (2.6.35-19.28) maverick; urgency=low + + [ Leann Ogasawara ] + + * No changes from 2.6.35-19.27. Some armel udebs were accidentally deleted + from the archive and a no-change rebuild was attempted. However, the ABI + did not get bumped and resulted in build failures for 2.6.35-19.27. Fix + up the ABI and re-upload. + + -- Leann Ogasawara Sat, 28 Aug 2010 16:42:27 -0700 + +linux (2.6.35-19.27) maverick; urgency=low + + [ Leann Ogasawara ] + + * No changes from 2.6.35-19.26. Some armel udebs were accidentally deleted + from the archive. + + -- Leann Ogasawara Fri, 27 Aug 2010 08:58:35 -0700 + +linux (2.6.35-19.26) maverick; urgency=low + + [ Upstream Kernel Changes ] + + * ARM: OMAP: Beagle: revision detection + * ARM: OMAP: Beagle: only Cx boards use pin 23 for write protect + * ARM: OMAP: Beagle: no gpio_wp pin connection on xM + + -- Leann Ogasawara Thu, 26 Aug 2010 09:15:09 -0700 + +linux (2.6.35-19.25) maverick; urgency=low + + [ Jarod Wilson ] + + * SAUCE: Bring in staging/lirc from 2.6.36 + - LP: #609234 + * SAUCE: Update ir-core to linuxtv/other which should be merged for + 2.6.36. + - LP: #609234 + * SAUCE: Fix memleaks in imon and mceusb drivers + - LP: #609234 + * SAUCE: Bring in streamzap support from linuxtv/other + - LP: #609234 + + [ Mario Limonciello ] + + * Remove ubuntu/lirc in favor of staging/lirc from 2.6.36 + - LP: #609234 + + [ Mathieu J. Poirier ] + + * SAUCE: ARM: adding i2c eeprom driver to read EDID + - LP: #608279 + + [ Upstream Kernel Changes ] + + * intel_idle: disable module support + - LP: #615265 + * (pre-stable) ALSA: hda - Ensure codec patch files are checked for the + correct codec ID + * (pre-stable) ALSA: hda - Rename iMic to Int Mic on Lenovo NB0763 + - LP: #605101 + * (pre-stable) ALSA: HDA: Use model=auto for LG R510 + - LP: #495134 + * (pre-stable) ALSA: HDA: Add Sony VAIO quirk for ALC269 + - LP: #519066 + * (pre-stable) ALSA: HDA: Fix front mic on Dell Precision M6500 + - LP: #519066 + * input: mt: Initialize slots to unused (rev2) + * input: mt: Add support for the Bamboo Touch trackpad + * hid: Add a hid quirk for input sync override + + -- Leann Ogasawara Mon, 23 Aug 2010 12:42:52 -0700 + +linux (2.6.35-18.24) maverick; urgency=low + + [ Colin Watson ] + + * Pass DEB_MAINT_PARAMS to hook scripts + + [ Leann Ogasawara ] + + * [Config] Add CONFIG_INPUT_UINPUT=y to config enforcer + - LP: #584812 + * rebase to v2.6.35.3 + + [ Upstream Kernel Changes ] + + * (pre-stable) dell-wmi: Add support for eject key on Dell Studio 1555 + - LP: #609234 + * can: add limit for nframes and clean up signed/unsigned variables + - CVE-2010-2959 + * drm: Initialize ioctl struct when no user data is present + - CVE-2010-2803 + * ARM: initial stack protector (-fstack-protector) support + * ARM: stack protector: change the canary value per task + * [ARM] implement arch_randomize_brk() + * [ARM] add address randomization to mmap() + * ARM: fix ASLR of PIE executables + + -- Leann Ogasawara Sun, 22 Aug 2010 19:22:04 -0700 + +linux (2.6.35-17.23) maverick; urgency=low + + [ Jeremy Kerr ] + + * [Config] build-in uinput module + - LP: #584812 + + [ Leann Ogasawara ] + + * Revert "[Config] [FTBS] ia64: Temporarily disable CONFIG_CEPH_FS" + * Revert "[Config] [FTBS] ia64: Temporarily disable gpiolib" + * Revert "[Config] [FTBS] sparc: Temporarily disable + CONFIG_MTD_NAND_DENALI" + * Revert "[Config] [FTBS] sparc: Temporarily disable + CONFIG_MFD_JANZ_CMODIO" + * Revert "[Config] [FTBS] sparc: Temporarily disable + CONFIG_INFINIBAND_QIB" + * [Config] Enable INTEL_IPS + - LP: #601057 + * Remove ia64 support + * [Config] Update portsconfigs after removing ia64 support + * Remove sparc support + * [Config] Update portsconfigs after removing sparc support + + [ Linus Torvalds ] + + * (pre-stable) mm: fix page table unmap for stack guard page properly + + [ Mathieu J. Poirier ] + + * SAUCE: (no-up) ARM: Resetting power_mode to its original value. + - LP: #591941 + + [ Upstream Kernel Changes ] + + * timer: add on-stack deferrable timer interfaces + - LP: #601057 + * x86 platform driver: intelligent power sharing driver + - LP: #601057 + * IPS driver: add GPU busy and turbo checking + - LP: #601057 + * X86: intel_ips, check for kzalloc properly + - LP: #601057 + * ips driver: make it less chatty + - LP: #601057 + + -- Leann Ogasawara Tue, 17 Aug 2010 09:38:08 -0700 + +linux (2.6.35-16.22) maverick; urgency=low + + [ Andy Whitcroft ] + + * debian -- more agressivly clean up after depmod on purge + - LP: #618591 + + [ Henrik Rydberg ] + + * SAUCE: hid: 3m: Simplify touchsreen emulation logic + + [ Leann Ogasawara ] + + * ubuntu: iscsitarget -- version 1.4.20.2 + * ubuntu: rtl8192se -- update to version 0017.0507.2010 + * rebase to v2.6.35.2 + * [Config] update configs following rebase to v2.6.35.2 + * [Config] update ports configs following rebase to v2.6.35.2 + + [ Luke Yelavich ] + + * [Config] Enable new firewire stack on powerpc + + [ Mathieu J. Poirier ] + + * SAUCE: (drop after 2.6.35) ARM: Using gpmc function to init nand flash. + - LP: #608266 + + -- Leann Ogasawara Thu, 12 Aug 2010 09:58:01 -0700 + +linux (2.6.35-15.21) maverick; urgency=low + + [ Luke Yelavich ] + + * [Config] CONFIG_SND_USB_UA101=m for all architectures + + [ Upstream Kernel Changes ] + + * Input: introduce MT event slots + * Input: document the MT event slot protocol + * (pre-stable) sched: Revert nohz_ratelimit() for now + * (pre-stable) drm/radeon/kms: add missing copy from user + - LP: #606081 + + [ Leann Ogasawara ] + + * rebase to v2.6.35.1 + + -- Leann Ogasawara Mon, 09 Aug 2010 09:24:04 -0700 + +linux (2.6.35-14.20) maverick; urgency=low + + [ Andy Whitcroft ] + + * update Vcs-Git to point to maverick repo + * debian -- include the debian packaging in the -source package + - LP: #608674 + * select debian source format 1.0 + * add support for building selected stages of kernel + - LP: #603087 + * cleanup conditional dependancy handling + - LP: #603087 + + [ Upstream Kernel Changes ] + + * ALSA: hda - Handle missing NID 0x1b on ALC259 codec + - LP: #582199, #586418, #588031 + * ALSA: hda - Handle pin NID 0x1a on ALC259/269 + - LP: #582199, #586418, #588031 + * sched: Revert nohz_ratelimit() for now + + -- Leann Ogasawara Tue, 03 Aug 2010 08:46:47 -0700 + +linux (2.6.35-14.19) maverick; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.35 + + -- Leann Ogasawara Sun, 01 Aug 2010 10:35:56 -0700 + +linux (2.6.35-13.18) maverick; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: (no-up) Modularize vesafb -- fix initialisation + * SAUCE: add tracing for user initiated readahead requests + * SAUCE: vt -- maintain bootloader screen mode and content until vt + switch + * SAUCE: vt -- allow grub to request automatic vt_handoff + * SAUCE: fbcon -- fix race between open and removal of framebuffers + * SAUCE: drm -- stop early access to drm devices + + [ Bryan Wu ] + + * CONFIG: compile in OTG driver and Transceiver driver + - LP: #566645 + * remove OTG modules from modules list file + + [ John Johansen ] + + * SAUCE: AppArmor: -- sync to AppArmor mainline 2010-07-27 + - LP: #581525, #599450 + * SAUCE: AppArmor: -- sync to AppArmor mainline 2010-07-29 + * SAUCE: AppArmor 2.4 compatibility patch + * SAUCE: AppArmor: Allow dfa backward compatibility with broken userspace + * SAUCE: fix pv-ops for legacy Xen + * SAUCE: blkfront: default to sd devices + * [Config] Build in drivers required for Xen pv-ops + + [ Leann Ogasawara ] + + * Revert "[Upstream] i915: Use the correct mask to detect i830 aperture + size." + + [ Lee Jones ] + + * SAUCE: ARM: OMAP: Add macros for comparing silicon revision + - LP: #608095 + * SAUCE: OMAP: DSS2: check for both cpu type and revision, rather than + just revision + - LP: #608095 + * SAUCE: OMAP: DSS2: enable hsclk in dsi_pll_init for OMAP36XX + - LP: #608095 + * SAUCE: ARM: OMAP: Beagle: support twl gpio differences on xM + - LP: #608095 + + [ Upstream Kernel Changes ] + + * agp/intel: Use the correct mask to detect i830 aperture size. + - LP: #597075 + + -- Leann Ogasawara Fri, 30 Jul 2010 15:46:59 -0700 + +linux (2.6.35-12.17) maverick; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc6 + * [Config] update configs following rebase to v2.6.35-rc6 + * [Config] update ports configs following rebase to v2.6.35-rc6 + * SAUCE: [FTBS] armel: define KEY_F10 and KEYF11 + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc6 + + -- Leann Ogasawara Fri, 23 Jul 2010 16:16:38 +0200 + +linux (2.6.35-11.16) maverick; urgency=low + + [ Leann Ogasawara ] + + * Bump ABI for new compiler update + + -- Leann Ogasawara Fri, 23 Jul 2010 10:24:58 +0200 + +linux (2.6.35-10.15) maverick; urgency=low + + [ Leann Ogasawara ] + + * Revert "SAUCE: ensure vga16fb loads if no other driver claims the VGA + device" + * [Config] Enable CONFIG_M686=y + - LP: #592495 + + [ Upstream Kernel Changes ] + + * tracing: Add alignment to syscall metadata declarations + + -- Leann Ogasawara Tue, 20 Jul 2010 18:18:49 +0200 + +linux (2.6.35-9.14) maverick; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- add BOM and automated update script + * ubuntu: AUFS -- update to b37c575759dc4535ccc03241c584ad5fe69e3b25 + + [ John Johansen ] + + * [Config] Enable DRBD as a module + + [ Kees Cook ] + + * SAUCE: Yama: verify inode is symlink to avoid bind mounts + - LP: #604407 + + [ Leann Ogasawara ] + + * [Config] Disable CONFIG_DRM_VMWGFX (staging driver) + - LP: #606139 + * [Config] ports: Disable CONFIG_DRM_VMWGFX (staging driver) + - LP: #606139 + * [Config] Enable CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y + * [Config] ports: Enable CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y + + [ Lee Jones ] + + * Stop ARM boards crashing when CUPS is loaded + - LP: #601226 + + [ Upstream Kernel Changes ] + + * perf probe: Support tracing an entry of array + * perf probe: Support static and global variables + + -- Leann Ogasawara Fri, 16 Jul 2010 14:38:17 -0700 + +linux (2.6.35-8.13) maverick; urgency=low + + [ Kees Cook ] + + * SAUCE: Yama: check PTRACE using thread group leader + * SAUCE: Yama: search for PTRACE exceptions via thread group leader + - LP: #603716 + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc5 + * [Config] update configs following rebase to v2.6.35-rc5 + + [ Nicolas Pitre ] + + * SAUCE: make ndiswrapper available on X86 only + + [ Tim Gardner ] + + * [Config] Added ums-cypress to udeb + - LP: #576066 + * SAUCE: fix build error with CONFIG_BLK_DEV_INITRD=n + * [Config] CONFIG_NDISWRAPPER=m across all configs + + [ Upstream Kernel Changes ] + + * HID: magicmouse: report last touch up + * rebase to 2.6.35-rc5 + + -- Leann Ogasawara Tue, 13 Jul 2010 18:57:59 -0700 + +linux (2.6.35-7.12) maverick; urgency=low + + [ Tim Gardner ] + + * [Upstream] i915: Use the correct mask to detect i830 aperture size. + - LP: #597075 + + [ Upstream Kernel Changes ] + + * (drop after 2.6.35) drm/radeon/kms: add ioport register access + (squashed) + + -- Tim Gardner Thu, 08 Jul 2010 09:53:13 -0600 + +linux (2.6.35-7.11) maverick; urgency=low + + [ Tim Gardner ] + + * [Config] CONFIG_X86_MRST=n + + [ Upstream Kernel Changes ] + + * (drop after 2.6.35-rc5) writeback: remove writeback_inodes_wbc + * (drop after 2.6.35-rc5) writeback: split writeback_inodes_wb + * (drop after 2.6.35-rc5) writeback: simplify the write back thread queue + + -- Tim Gardner Tue, 06 Jul 2010 18:39:08 -0600 + +linux (2.6.35-7.10) maverick; urgency=low + + [ Kees Cook ] + + * SAUCE: security: create task_free security callback + * SAUCE: Yama: add PTRACE exception tracking and interface + * SAUCE: security: unconditionally chain to Yama LSM + * Revert "SAUCE: ptrace: restrict ptrace scope to children" + * Revert "SAUCE: fs: block hardlinks to non-accessible sources" + * Revert "SAUCE: fs: block cross-uid sticky symlinks" + * [Upstream] security: Yama LSM + * [Config] Enable CONFIG_SECURITY_YAMA=y + + [ Tim Gardner ] + + * [Config] updateconfigs/updateportsconfigs after rebase to 2.6.35-rc4 + + [ Upstream Kernel Changes ] + + * rebase to 2.6.35-rc4 + + -- Leann Ogasawara Thu, 01 Jul 2010 08:55:57 -0700 + +linux (2.6.35-6.9) maverick; urgency=low + + [ Tim Gardner ] + + * [Upstream] direct_splice_actor() should not use pos in sd + - LP: #588861 + + -- Leann Ogasawara Mon, 28 Jun 2010 12:35:49 -0700 + +linux (2.6.35-6.8) maverick; urgency=low + + [ Mathieu J. Poirier ] + + * ARM: Adding regulator supply for vdds_sdi. + - LP: #597904 + + -- Leann Ogasawara Sun, 27 Jun 2010 16:34:43 -0700 + +linux (2.6.35-6.7) maverick; urgency=low + + [ Alberto Milone ] + + * [Upstream] Add support for the ATIF ACPI method to the radeon driver + + [ Chase Douglas ] + + * [Upstream] HID: magicmouse: scroll on entire surface, not just middle + of mouse + * [Upstream] HID: magicmouse: disable and add module param for scroll + acceleration + * [Upstream] HID: magicmouse: properly account for scroll movement in + state + * [Upstream] HID: magicmouse: add param for scroll speed + * [Upstream] HID: magicmouse: enable horizontal scrolling + + [ Henrik Rydberg ] + + * [Upstream] Input: evdev - convert to dynamic event buffer + * [Upstream] Input: evdev - use driver hint to compute size of event + buffer + * [Upstream] Input: bcm5974 - set the average number of events per MT + event packet + * [Upstream] Input: hid-input - use a larger event buffer for MT devices + * [Upstream] Input: evdev - never leave the client buffer empty after + write + + [ John Johansen ] + + * SAUCE: AppArmor: -- mainline 2010-06-23 + * SAUCE: AppArmor 2.4 compatibility patch + * SAUCE: fs: block hardlinks to non-accessible sources AppArmor portion + + [ Leann Ogasawara ] + + * [Config] Enable CONFIG_INTR_REMAP=y + - LP: #597091 + * [Config] Enable CONFIG_X86_X2APIC + - LP: #597091 + + [ Mathieu J. Poirier ] + + * [Config] ARM: Turning off CONFIG_CPU_IDLE on omap + - LP: #594382 + + -- Leann Ogasawara Thu, 24 Jun 2010 12:19:48 -0700 + +linux (2.6.35-5.6) maverick; urgency=low + + [ Amit Kucheria ] + + * [Config] update omap flavour description + + [ Andy Whitcroft ] + + * update to ubuntu-debian:508b7aa34b578c0d1e51bfb571f2bfb824dc65ac + - LP: #570500, #576274 + * SAUCE: add option to hand off all kernel parameters to init + - LP: #586386 + * [Config] enable passing all kernel command line to init + - LP: #586386 + * [Config] disable CONFIG_VMI + - LP: #537601 + * [Config] enable CONFIG_IPV6_SIT_6RD + - LP: #591869 + * [Config] enable CONFIG_VMWARE_BALOON as module + - LP: #592039 + + [ Leann Ogasawara ] + + * Revert "SAUCE: pm: Config option to disable handling of console during + suspend/resume" + - LP: #594885 + * [Config] Remove CONFIG_PM_DISABLE_CONSOLE + * [Config] ports: enable passing all kernel command line to init + - LP: #586386 + * [Config] Enable CONFIG_FB_VESA=y for x86 + * [Config] Add CONFIG_FRAMEBUFFER_CONSOLE=y to config enforcer + * [Config] Add CONFIG_FB_VESA=y for x86 to config enforcer + * [Config] Enable CONFIG_TASK_DELAY_ACCT=y + - LP: #493156 + + [ Mathieu Poirier ] + + * ARM: Adding MosChip MCS7830 to nic-usb + - LP: #584920 + + [ Upstream Kernel Changes ] + + * Revert "[Upstream] docbook: need xmldoclinks for all doc types" + * docbook: need xmldoclinks for all doc types + * perf probe: Add kernel source path option + + -- Leann Ogasawara Thu, 17 Jun 2010 08:05:29 -0700 + +linux (2.6.35-4.5) maverick; urgency=low + + [ Leann Ogasawara ] + + * Revert "[Upstream] (evdev) Use driver hint to compute the evdev buffer + size (rev2)" + * Revert "[Upstream] (evdev) Convert to dynamic event buffer (rev4)" + * Revert "[Upstream] (evdev) Use multi-reader buffer to save space + (rev4)" + * Revert "SAUCE: drivers: Remove some duplicate device entries in various + modules" + * [Upstream] USB: option: Remove duplicate AMOI_VENDOR_ID + * [Upstream] Revert "USB: Adding support for HTC Smartphones to ipaq" + * [Upstream] p54usb: Comment out duplicate Medion MD40900 device id + + [ Tim Gardner ] + + * [Config] CONFIG_NFS_FSCACHE=y + - LP: #440522 + * [Config] CONFIG_FSCACHE_STATS=y, CONFIG_FSCACHE_HISTOGRAM=y + - LP: #440522 + + -- Leann Ogasawara Wed, 16 Jun 2010 08:43:07 -0700 + +linux (2.6.35-3.4) maverick; urgency=low + + [ Andy Whitcroft ] + + * debian -- ensure the version number is clean + + [ Henrik Rydberg ] + + * [Upstream] Introduce MT event slots (rev 5) + * [Upstream] Document the MT event slot protocol (rev5) + * [Upstream] (evdev) Use multi-reader buffer to save space (rev4) + * [Upstream] (evdev) Convert to dynamic event buffer (rev4) + * [Upstream] (evdev) Use driver hint to compute the evdev buffer size + (rev2) + + [ Leann Ogasawara ] + + * Revert "SAUCE: Add MODULE_ALIAS for Dell WMI module" + * Revert "SAUCE: hostap: send events on data interface as well as master + interface" + * Revert "Fix webcam having USB ID 0ac8:303b" + * Revert "SAUCE: toshiba_acpi -- pull in current -dev version of driver" + * rebase to v2.6.35-rc3 + + [ Maxim Levitsky ] + + * [Config] Enable new Smartmedia/xD translation layer + - LP: #202490 + + [ Upstream Kernel Changes ] + + * net: fix deliver_no_wcard regression on loopback device + + [ Upstream changes ] + + * rebased to v2.6.35-rc3 + + -- Leann Ogasawara Thu, 10 Jun 2010 16:15:22 -0700 + +linux (2.6.35-2.3) maverick; urgency=low + + [ Bryan Wu ] + + * CONFIG: enforce -- make sure we disable CONFIG_LOCALVERSION_AUTO + + [ Leann Ogasawara ] + + * [Config] armel: Enable CONFIG_BNX2=m + * [Config] ports: Enable CONFIG_BNX2X=m + * SAUCE: armel: define get_dma_ops to fix FTBS + + [ Tim Gardner ] + + * [Upstream] net: Print num_rx_queues imbalance warning only when there + are allocated queues + - LP: #591416 + + -- Leann Ogasawara Wed, 09 Jun 2010 08:27:41 -0700 + +linux (2.6.35-2.2) maverick; urgency=low + + [ Andy Whitcroft ] + + * [Config] d-i: make armel configuration versatile flavour specific + - LP: #588805 + * [Config] d-i: enable .udebs for omap flavour + - LP: #588805 + + [ Kees Cook ] + + * ptrace: limit scope to attach only (allow read) + - LP: #589656 + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc2 + * [Config] update configs following rebase to v2.6.35-rc2 + * [Config] update port configs following rebase to v2.6.35-rc2 + + [ Lee Jones ] + + * Enable perf to be more helpful when perf_ does not exist. + - LP: #570500 + * 'fdr editconfig' modification. Easily skip over unwanted menuconfigs. + + [ Tim Gardner ] + + * [Config] Update bnx2 udeb firmware files + - LP: #589304 + + [ Upstream changes ] + + * rebased to v2.6.35-rc2 + + -- Leann Ogasawara Mon, 07 Jun 2010 09:45:04 -0700 + +linux (2.6.35-1.1) maverick; urgency=low + + [ Andy Whitcroft ] + + * ubuntu: AUFS -- update to standalone 2.6.35-rcN as at 20100601 + - LP: #587888 + * ubuntu: AUFS -- track changes to the arguements to fop fsync() + + [ Leann Ogasawara ] + + * rebase to v2.6.35-rc1 + * [Config] update configs following rebase to v2.6.35-rc1 + * [Config] update port configs following rebase to v2.6.35-rc1 + * SAUCE: lirc: rename usb_buffer_alloc() and usb_buffer_free() + * SAUCE: ndiswrapper: rename usb_buffer_alloc() and usb_buffer_free() + * SAUCE: ndiswrapper: convert multicast list to list_head + * [Config] [FTBS] armel: Temporarily disable CONFIG_GPIO_JANZ_TTL + * [Config] [FTBS] ia64: Temporarily disable gpiolib + * [Config] [FTBS] ia64: Temporarily disable CONFIG_CEPH_FS + * [Config] [FTBS] sparc: Temporarily disable CONFIG_INFINIBAND_QIB + * [Config] [FTBS] sparc: Temporarily disable CONFIG_MFD_JANZ_CMODIO + * [Config] [FTBS] armel: Temporarily disable CONFIG_MFD_JANZ_CMODIO + * [Config] [FTBS] armel: Temporarily disable CONFIG_DT3155 + * [Config] [FTBS] sparc: Temporarily disable CONFIG_MTD_NAND_DENALI + * [Config] [FTBS] armel: Temporarily disable bnx2 + * [Config] [FTBS] armel: Temporarily disable CONFIG_SERIAL_UARTLITE + * SAUCE: [FTBS] armel: Don't include asm/agp.h for ttm + * SAUCE: [FTBS] armel: include linux/dma-mapping.h + * SAUCE: [FTBS] armel: replace omap_set_gpio_debounce with + gpio_set_debounce + + [ Upstream Kernel Changes ] + + * of/usb: fix build error due to of_node pointer move + * n2_crypto: Fix build after of_device/of_platform_driver changes. + * powerpc/fsl-booke: fix the case where we are not in the first page + * powerpc/fsl-booke: Move the entry setup code into a seperate file + * powerpc/kexec: Add support for FSL-BookE + * greth: Fix build after OF device conversions. + + [ Upstream changes ] + + * rebased to v2.6.35-rc1 + + -- Leann Ogasawara Fri, 04 Jun 2010 23:01:52 -0700 + +linux (2.6.35-1.0) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * Null entry. + + -- Leann Ogasawara Wed, 02 Jun 2010 15:17:41 -0700 + +linux (2.6.34-5.14) maverick; urgency=low + + [ Tim Gardner ] + + * [Config] Added module inclusion support + * [Config] Added virtual flavour module inclusion list and d-i package + definitions + + -- Leann Ogasawara Wed, 02 Jun 2010 12:58:14 -0700 + +linux (2.6.34-5.13) maverick; urgency=low + + [ Andy Whitcroft ] + + * Revert "ubuntu: AUFS -- aufs2 20091209" + * Revert "ubuntu: AUFS -- export various core functions + (aufs2-standalone.patch)" + * Revert "ubuntu: AUFS -- export various core functions + (aufs2-base.patch)" + * ubuntu: AUFS -- aufs2 base patch for linux-2.6.34 + - LP: #587888 + * ubuntu: AUFS -- aufs2 standalone patch for linux-2.6.34 + - LP: #587888 + * ubuntu: AUFS -- update to standalone 2.6.34 as at 20100601 + - LP: #587888 + * [Config] AUFS -- enable aufs options + - LP: #587888 + + -- Leann Ogasawara Tue, 01 Jun 2010 08:56:43 -0700 + +linux (2.6.34-5.12) maverick; urgency=low + + [ Andy Whitcroft ] + + * enforce -- ensure SYSFS compatibility is disabled + + [ Chase Douglas ] + + * build with libdw-dev for perf probe symbol support + * maverick ftrace configuration changes + + [ Kees Cook ] + + * Revert "SAUCE: x86: brk away from exec rand area" + * Revert "SAUCE: [um] Don't use nx_enabled under UML" + * Revert "SAUCE: [x86] implement cs-limit nx-emulation for ia32" + * SAUCE: x86: implement cs-limit nx-emulation for ia32 + - LP: #369978 + * SAUCE: x86: more tightly confine cs-limit nx-emulation to ia32 only + * SAUCE: x86: brk away from exec rand area + - LP: #452175 + * SAUCE: ptrace: restrict ptrace scope to children + + [ Leann Ogasawara ] + + * Add new omap flavour to getabis + * [Config] Enable CONFIG_FRAMEBUFFER_CONSOLE=y for all archs + - LP: #585490 + * build/modules: Temorarily add ignore.modules + * ubuntu: iscsitarget -- version 1.4.20.1 + + [ Loïc Minier ] + + * SAUCE: [um] Don't use nx_enabled under UML + - LP: #524849 + + -- Leann Ogasawara Fri, 28 May 2010 08:27:17 -0700 + +linux (2.6.34-4.11) maverick; urgency=low + + [ Amit Kucheria ] + + * SAUCE: omap: remove calls to usb_nop_xceiv_register from board files + * [Config] Add support for OMAP-mainline flavour + + [ Andy Whitcroft ] + + * SAUCE: powerpc: fix compile error when ptrace.h is included from + userspace + - LP: #583733 + + [ Chase Douglas ] + + * Revert "SAUCE: Don't register vga16fb framebuffer if other framebuffers + are present" + * Revert "SAUCE: Disable function tracing after hitting __schedule_bug" + * Revert "SAUCE: drm/i915: don't change DRM configuration when releasing + load detect pipe" + + [ Kees Cook ] + + * SAUCE: fs: block cross-uid sticky symlinks + * SAUCE: fs: block hardlinks to non-accessible sources + + [ Koen Kooi ] + + * SAUCE: board-omap3-beagle: add DSS2 support + + [ Leann Ogasawara ] + + * Revert "staging/go7007 -- disable" + * Revert "[Config] staging/winbond -- disable" + * Revert "Disable 4MB page tables for Atom, work around errata AAE44" + * Revert "SAUCE: sync before umount to reduce time taken by ext4 umount" + * Revert "SAUCE: Enable an e1000e Intel Corporation 82567 Gigabit + controller" + * Revert "SAUCE: Fix MODULE_IMPORT/MODULE_EXPORT" + * Revert "SAUCE: Created MODULE_EXPORT/MODULE_IMPORT macros" + * Revert "SAUCE: input/mouse/alps: Do not call psmouse_reset() for alps" + * Revert "SAUCE: r8169: disable TSO by default for RTL8111/8168B + chipsets." + * Revert "[Upstream] b43: Declare all possible firmware files." + * Revert "add Breaks: against hardy lvm2" + * Revert "SAUCE: Guest OS does not recognize a lun with non zero target + id on Vmware ESX Server" + * Revert "SAUCE: Catch nonsense keycodes and silently ignore" + * [Config] Enable CONFIG_ECRYPT_FS=y for ports + * [Config] Enable CONFIG_USB=y for armel and sparc + * [Config] Enable CONFIG_SCSI=y for ia64 and sparc + * [Config] Enable CONFIG_RFKILL=y for ports + * [Config] Enable CONFIG_ATH9K_DEBUGFS=y + * [Config] Enable CONFIG_IWMC3200TOP_DEBUGFS=y + * [Config] Enable CONFIG_RCU_FAST_NO_HZ=y + * [Config] Enable CONFIG_IWLWIFI_DEVICE_TRACING=y + * [Config] Enable CONFIG_LIBERTAS_MESH=y + * [Config] Enable CONFIG_MMC_RICOH_MMC=y + * [Config] CONFIG_RT2800USB_UNKNOWN=y + * [Config] Enable CONFIG_VGA_SWITCHEROO=y + * [Config] Enable CONFIG_CEPH_FS=m + * [Config] Enable CONFIG_CRYPTO_PCRYPT=m + * [Config] Enable CONFIG_EEEPC_WMI=m + * [Config] Enable CONFIG_RT2800PCI=m + * [Config] Enable CONFIG_SCSI_HPSA=m + * [Config] Enable CONFIG_VHOST_NET=m + * [Config] Disable CONFIG_SND_HDA_INPUT_BEEP_MODE by default + - LP: #582350 + * [Config] Disable CONFIG_SOUND_OSS* and CONFIG_SND_*OSS + - LP: #579300 + * [Config] Enable CONFIG_PCIEASPM=y + - LP: #333990 + * [Config] updateconfigs for OMAP flavour + + [ Loïc Minier ] + + * Enable perf tools on armel + + [ Tim Gardner ] + + * SAUCE: Updated ndiswrapper to 1.56 + - LP: #582555 + * [Config] Added virtual flavour + * [Config] Remove support for sub-flavours + * [Config] Removed amd64 preempt flavour + * [Config] updateconfigs, updateportsconfigs after flavour munging + + -- Leann Ogasawara Tue, 25 May 2010 09:34:55 -0700 + +linux (2.6.34-3.10) maverick; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34 + + [ Upstream changes ] + + * rebased to v2.6.34 + + -- Leann Ogasawara Tue, 18 May 2010 17:35:35 -0700 + +linux (2.6.34-2.9) maverick; urgency=low + + [ Leann Ogasawara ] + + * [Config] [FTBS] Disable comedi for armel + + -- Leann Ogasawara Thu, 13 May 2010 23:20:55 +0200 + +linux (2.6.34-2.8) maverick; urgency=low + + [ Leann Ogasawara ] + + * Drop lpia + * [Config] [FTBS] disable KVM + * [Config] [FTBS] disable ipr for armel + + -- Leann Ogasawara Thu, 13 May 2010 16:07:52 +0200 + +linux (2.6.34-2.7) maverick; urgency=low + + [ Leann Ogasawara ] + + * [Config] disable CONFIG_SCSI_IPR on powerpc + * [Config] Remove 386 flavour per UDS discussion + + -- Leann Ogasawara Wed, 12 May 2010 18:26:43 +0200 + +linux (2.6.34-1.6) maverick; urgency=low + + [ Chase Douglas ] + + * enforce CONFIG_TMPFS_POSIX_ACL=y + - LP: #575940 + * don't force module dependency checking + - LP: #577029 + + [ Kees Cook ] + + * SAUCE: mmap_min_addr check CAP_SYS_RAWIO only for write + - LP: #568844 + + [ Leann Ogasawara ] + + * Revert "SAUCE: ata: blacklist FUJITSU MHW2160BH PL" + * rebase to v2.6.34-rc7 + * [Config] update configs following rebase to v2.6.34-rc7 + * [Config] update port configs following rebase to v2.6.34-rc7 + * Add btrfs to the udebs + + [ Tim Gardner ] + + * [Config] Add atl1c to nic-modules udeb + - LP: #557130 + + [ Upstream changes ] + + * rebased to v2.6.34-rc7 + + -- Leann Ogasawara Tue, 11 May 2010 11:29:08 +0200 + +linux (2.6.34-1.5) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34-rc6 + * [Config] update configs following rebase to v2.6.34-rc6 + * [Config] update port configs following rebase to v2.6.34-rc6 + + [ Upstream changes ] + + * rebased to v2.6.34-rc6 + + -- Leann Ogasawara Fri, 30 Apr 2010 15:54:05 +0100 + +linux (2.6.34-1.4) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34-rc5 + * [Config] update ports configs following rebase to v2.6.34-rc5 + + [ Upstream changes ] + + * rebased to v2.6.34-rc5 + + -- Leann Ogasawara Thu, 22 Apr 2010 15:36:12 -0700 + +linux (2.6.34-1.3) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34-rc4 + * [Config] update configs following rebase to v2.6.34-rc4 + * [Config] update port configs following rebase to v2.6.34-rc4 + * ubuntu: dm-raid4-5 -- update to compile with 2.6.34-rc4 + + [ Upstream changes ] + + * rebased to v2.6.34-rc4 + + -- Leann Ogasawara Tue, 13 Apr 2010 18:33:44 -0700 + +linux (2.6.34-1.2) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * Temorarily disable building linux-doc + * rebase to v2.6.34-rc3 + * [Config] update configs following rebase to v2.6.34-rc3 + * [Config] update port configs following rebase to v2.6.34-rc3 + + [ Upstream changes ] + + * rebased to v2.6.34-rc3 + + -- Leann Ogasawara Tue, 30 Mar 2010 16:55:44 -0700 + +linux (2.6.34-1.1) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * rebase to v2.6.34-rc2 + * ubuntu: dm-raid4-5 -- update to compile with 2.6.34-rc2 + * [Config] update port configs following rebase to v2.6.34-rc2 + * [Config] update configs following rebase to v2.6.34-rc2 + + [ Upstream changes ] + + * rebased to v2.6.34-rc2 + + -- Leann Ogasawara Wed, 24 Mar 2010 23:00:39 -0700 + +linux (2.6.33-1.1) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * ubuntu: dm-raid4-5 -- update to compile with 2.6.33 + * ubuntu: lirc -- drop explicit include of linux/autoconf.h + * ubuntu: lirc -- pass kfifo to kfifo_alloc and move spinlock + * ubuntu: lirc -- rename kfifo_put and kfifo_get + * ubuntu: iscsitarget -- rename daddr inet_sock field + * rebased to v2.6.33 + * [Config] update configs following rebase to v2.6.33 + * [Config] update ports configs following rebase to v2.6.33 + + [ Upstream changes ] + + * rebased to v2.6.33 + + -- Leann Ogasawara Tue, 23 Mar 2010 03:55:46 -0700 + +linux (2.6.33-0.0) UNRELEASED; urgency=low + + [ Leann Ogasawara ] + + * Null entry. + + -- Leann Ogasawara Wed, 17 Mar 2010 07:48:56 -0700 + +linux (2.6.32-16.25) lucid; urgency=low + + [ Andy Whitcroft ] + + * linux-tools -- move to Suggests: with explicit seeding + - LP: #534635 + + [ Tim Gardner ] + + * [Config] CONFIG_HID=m + + [ Upstream Kernel Changes ] + + * (pre-stable) sched: Fix SMT scheduler regression in + find_busiest_queue() + * KVM: introduce kvm_vcpu_on_spin + * KVM: VMX: Add support for Pause-Loop Exiting + + -- Andy Whitcroft Tue, 09 Mar 2010 14:13:51 +0000 + +linux (2.6.32-16.24) lucid; urgency=low + + [ Andy Whitcroft ] + + * armel -- perf userspace does not support arm + * ia64 -- libelf-dev/binutils-dev to not provide necessary libraries + + -- Andy Whitcroft Sat, 06 Mar 2010 11:42:12 +0000 + +linux (2.6.32-16.23) lucid; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: PM report driver and device suspend/resume times -- move config + * update to standards version 3.8.4.0 + * printenv -- expose all of the package selectors + * source package -- cleanup source content control + * doc package -- ensure we do build package content on buildd + * lintian -- correct the address in the debian/copyright + * lintian -- update debhelper package version dependancy + * lintian -- fix ghostscript dependancy + * lintian -- add required misc:Depends + * lintian -- move our debhelper compat level to debian/compat + * perf -- build the kernel carried tools + * perf -- add linux-tools carrying the version switches and manuals + * SAUCE: fix up Kconfig for staging drivers + * [Config] enable NOUVEAU etc following drm backport + * update DRM to mainline v2.6.33 + * [Config] Remove AppArmor config options that no longer exist (ports) + * [Config] updateportsconfigs following drm update + + [ John Johansen ] + + * ubuntu: AppArmor -- update to mainline 2010-03-04 + * SAUCE: AppArmor: Reintroduce AppArmor 2.4 compatibility + * SAUCE: AppArmor: replace strim with strstrip for 2.6.32 kernels + * [Config] Remove AppArmor config options that no longer exist + + [ Manoj Iyer ] + + * ubuntu: rtl8192se -- version 2010-0115,0014 + - LP: #530275 + * [Config] added CONFIG_RTL8192SE module. + - LP: #530275 + + [ Tim Gardner ] + + * [Config] Added vmw_pvscsi to d-i/scsi-modules + - LP: #531017 + * [Upstream] netfilter: xt_recent: Add an entry reaper + + [ Upstream Kernel Changes ] + + * Revert "KVM: x86 emulator: Check CPL level during privilege instruction + emulation" + * Revert "KVM: x86 emulator: Fix popf emulation" + * Revert "KVM: x86 emulator: Check IOPL level during io instruction + emulation" + * Revert "KVM: x86 emulator: Add Virtual-8086 mode of emulation" + * Revert "KVM: fix memory access during x86 emulation." + * Add vlan (8021.Q) module package for d-i. + * (pre-stable) drm/i915: blacklist lid status: Sony VGN-BX196VP, Dell + Inspiron 700m + - LP: #515246 + * [Upstream] docbook: need xmldoclinks for all doc types + * x86: set_personality_ia32() misses force_personality32 + * lib: Introduce generic list_sort function + * drm/nv50: Implement ctxprog/state generation. + * drm/nv50: Remove redundant/incorrect ctxvals initialisation. + * (pre-stable) drm/i915: blacklist lid status: Sony VGN-BX196VP, Dell + Inspiron 700m + - LP: #515246 + + -- Andy Whitcroft Fri, 05 Mar 2010 15:40:38 +0000 + +linux (2.6.32-15.22) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Config] added new config option CONFIG_SR_REPORT_TIME_LIMIT" + * Revert "SAUCE: PM report driver and device suspend/resume times." + * [Config] set CONFIG_SR_REPORT_TIME_LIMIT + + [ Manoj Iyer ] + + * SAUCE: PM report driver and device suspend/resume times. + + -- Andy Whitcroft Tue, 02 Mar 2010 01:35:37 +0000 + +linux (2.6.32-15.21) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "(pre-stable) drm/i915: Increase fb alignment to 64k" + * Revert "[Config] lenovo-sl-laptop -- enable" + * Revert "ubuntu: lenovo-sl-laptop -- git tip (b19a08f81f)" + * armel -- cramfs module will no longer be built + * d-i -- make all modules optional + * rename the debug packages to match archive standard + - LP: #527837 + * lenovo-sl-laptop is no longer built + + [ Colin Ian King ] + + * Disable 4MB page tables for Atom, work around errata AAE44 + - LP: #523112 + + [ Colin Watson ] + + * ubuntu: dm-raid4-5: Depend on XOR_BLOCKS + * ubuntu: fsam7400: Depend on CHECK_SIGNATURE + + [ Jesse Barnes ] + + * SAUCE: drm/i915: don't change DRM configuration when releasing load + detect pipe + - LP: #488328 + + [ Loïc Minier ] + + * [Config] armel Update versatile initrd configs + - LP: #524893 + * SAUCE: [um] Don't use nx_enabled under UML + - LP: #524849 + + [ Manoj Iyer ] + + * [Config] added new config option CONFIG_SR_REPORT_TIME_LIMIT + + [ Mario Limonciello ] + + * SAUCE: v3 - Add Dell Business Class Netbook LED driver + + [ Rafael J. Wysocki ] + + * SAUCE: PM report driver and device suspend/resume times. + + [ Surbhi Palande ] + + * Revert "[Upstream] e1000e: enhance frame fragment detection" + - CVE-2009-4538 + * Revert "[Upstream] e1000: enhance frame fragment detection" + - CVE-2009-4536 + + [ Tim Gardner ] + + * [Config] Enabled CONFIG_LEDS_DELL_NETBOOKS=m + * SAUCE: (pre-stable) netfilter: xt_recent: fix buffer overflow + * SAUCE: (pre-stable) netfilter: xt_recent: fix false match + + [ Upstream Kernel Changes ] + + * Revert "(pre-stable) eCryptfs: Add getattr function" + * Fix potential crash with sys_move_pages + * futex_lock_pi() key refcnt fix + * futex: Handle user space corruption gracefully + * futex: Handle futex value corruption gracefully + * Fix race in tty_fasync() properly + * hwmon: (w83781d) Request I/O ports individually for probing + * hwmon: (lm78) Request I/O ports individually for probing + * hwmon: (adt7462) Wrong ADT7462_VOLT_COUNT + * ALSA: ctxfi - fix PTP address initialization + * drm/i915: disable hotplug detect before Ironlake CRT detect + * drm/i915: enable self-refresh on 965 + * drm/i915: Disable SR when more than one pipe is enabled + * drm/i915: Fix DDC on some systems by clearing BIOS GMBUS setup. + * drm/i915: Add HP nx9020/SamsungSX20S to ACPI LID quirk list + * drm/i915: Fix the incorrect DMI string for Samsung SX20S laptop + * drm/i915: Add MALATA PC-81005 to ACPI LID quirk list + * usb: r8a66597-hcd: Flush the D-cache for the pipe-in transfer buffers. + * i2c-tiny-usb: Fix on big-endian systems + * drm/i915: handle FBC and self-refresh better + * drm/i915: Increase fb alignment to 64k + * drm/i915: Update write_domains on active list after flush. + * regulator: Fix display of null constraints for regulators + * ALSA: hda-intel: Avoid divide by zero crash + * CPUFREQ: Fix use after free of struct powernow_k8_data + * freeze_bdev: don't deactivate successfully frozen MS_RDONLY sb + * cciss: Make cciss_seq_show handle holes in the h->drv[] array + * ioat: fix infinite timeout checking in ioat2_quiesce + * resource: add helpers for fetching rlimits + * fs/exec.c: restrict initial stack space expansion to rlimit + * cifs: fix length calculation for converted unicode readdir names + * NFS: Fix a reference leak in nfs_wb_cancel_page() + * NFS: Try to commit unstable writes in nfs_release_page() + * NFSv4: Don't allow posix locking against servers that don't support it + * NFSv4: Ensure that the NFSv4 locking can recover from stateid errors + * NFS: Fix an Oops when truncating a file + * NFS: Fix a umount race + * NFS: Fix a bug in nfs_fscache_release_page() + * NFS: Fix the mapping of the NFSERR_SERVERFAULT error + * md: fix 'degraded' calculation when starting a reshape. + * V4L/DVB: dvb-core: fix initialization of feeds list in demux filter + * Export the symbol of getboottime and mmonotonic_to_bootbased + * kvmclock: count total_sleep_time when updating guest clock + * KVM: PIT: control word is write-only + * tpm_infineon: fix suspend/resume handler for pnp_driver + * amd64_edac: Do not falsely trigger kerneloops + * netfilter: nf_conntrack: fix memory corruption with multiple namespaces + * netfilter: nf_conntrack: per netns nf_conntrack_cachep + * netfilter: nf_conntrack: restrict runtime expect hashsize modifications + * netfilter: xtables: compat out of scope fix + * netfilter: nf_conntrack: fix hash resizing with namespaces + * drm/i915: remove full registers dump debug + * drm/i915: add i915_lp_ring_sync helper + * drm/i915: Don't wait interruptible for possible plane buffer flush + * dasd: remove strings from s390dbf + * crypto: padlock-sha - Add import/export support + * wmi: Free the allocated acpi objects through wmi_get_event_data + * dell-wmi, hp-wmi, msi-wmi: check wmi_get_event_data() return value + * /dev/mem: introduce size_inside_page() + * devmem: check vmalloc address on kmem read/write + * devmem: fix kmem write bug on memory holes + * SCSI: mptfusion : mptscsih_abort return value should be SUCCESS instead + of value 0. + * sh: Couple kernel and user write page perm bits for CONFIG_X2TLB + * ALSA: hda - use WARN_ON_ONCE() for zero-division detection + * dst: call cond_resched() in dst_gc_task() + * ALSA: hda - Improved MacBook (Pro) 5,1 / 5,2 support + * befs: fix leak + * rtc-fm3130: add missing braces + * Call flush_dcache_page after PIO data transfers in libata-sff.c + * ahci: add Acer G725 to broken suspend list + * pktgen: Fix freezing problem + * x86/amd-iommu: Fix IOMMU-API initialization for iommu=pt + * x86/amd-iommu: Fix deassignment of a device from the pt_domain + * x86: Re-get cfg_new in case reuse/move irq_desc + * Staging: fix rtl8187se compilation errors with mac80211 + * ALSA: usb-audio - Avoid Oops after disconnect + * serial: 8250: add serial transmitter fully empty test + * sysfs: sysfs_sd_setattr set iattrs unconditionally + * class: Free the class private data in class_release + * USB: usbfs: only copy the actual data received + * USB: usbfs: properly clean up the as structure on error paths + * rtl8187: Add new device ID + * ACPI: Add NULL pointer check in acpi_bus_start + * ACPI: fix High cpu temperature with 2.6.32 + * drm/radeon/kms: use udelay for short delays + * NFS: Too many GETATTR and ACCESS calls after direct I/O + * eCryptfs: Add getattr function + * b43: Fix throughput regression + * ath9k: Fix sequence numbers for PAE frames + * mac80211: Fix probe request filtering in IBSS mode + * iwlwifi: Fix to set correct ht configuration + * dm stripe: avoid divide by zero with invalid stripe count + * dm log: userspace fix overhead_size calcuations + * Linux 2.6.32.9 + * sfc: Fix SFE4002 initialisation + * sfc: Fix sign of efx_mcdi_poll_reboot() error in efx_mcdi_poll() + * sfc: SFE4002/SFN4112F: Widen temperature and voltage tolerances + * (pre-stable) HID: handle joysticks with large number of buttons + - LP: #492056 + * (pre-stable) HID: extend mask for BUTTON usage page + - LP: #492056 + * PM: Measure device suspend and resume times + * e1000: enhance frame fragment detection + - CVE-2009-4536 + * e1000e: enhance frame fragment detection + - CVE-2009-4538 + * KVM: fix memory access during x86 emulation. + - CVE-2010-0306 + * KVM: x86 emulator: Add Virtual-8086 mode of emulation + - CVE-2010-0306 + * KVM: x86 emulator: Check IOPL level during io instruction emulation + - CVE-2010-0306 + * KVM: x86 emulator: Fix popf emulation + - CVE-2010-0306 + * KVM: x86 emulator: Check CPL level during privilege instruction + emulation + - CVE-2010-0306 + * Input: wacom - ensure the device is initialized properly upon resume + * Input: wacom - add defines for packet lengths of various devices + * Input: wacom - add support for new LCD tablets + - LP: #516777 + + -- Andy Whitcroft Mon, 01 Mar 2010 22:56:28 +0000 + +linux (2.6.32-14.20) lucid; urgency=low + + [ Andy Whitcroft ] + + * rebuild following the GCC update to match compiler for out of tree modules + * Revert "[Config] drbd -- enable" + * Revert "ubuntu: drbd -- version 8.3.1" + * SAUCE: khubd -- switch USB product/manufacturer/serial handling to RCU + - LP: #510937 + + -- Andy Whitcroft Fri, 19 Feb 2010 18:47:18 +0000 + +linux (2.6.32-14.19) lucid; urgency=low + + [ Andy Whitcroft ] + + * ensure we build the source package contents when enabled + - LP: #522308 + * [Config] enable CONFIG_X86_MCE_XEON75XX + * SAUCE: AppArmor -- add linux/kref.h for struct kref + * [Config] enable CONFIG_HID_ORTEK + * enable udeb generation for arm versatile flavour + - LP: #522515 + + [ John Johansen ] + + * ubuntu: AppArmor -- update to mainline 2010-02-18 + - LP: #439560, #496110, #507069 + + [ Johnathon Harris ] + + * SAUCE: HID: add support for Ortek WKB-2000 + - LP: #405390 + + [ Upstream Kernel Changes ] + + * tpm_tis: TPM_STS_DATA_EXPECT workaround + - LP: #490487 + * x86, mce: Xeon75xx specific interface to get corrected memory error + information + * x86, mce: Rename cpu_specific_poll to mce_cpu_specific_poll + * x86, mce: Make xeon75xx memory driver dependent on PCI + * drm/edid: Unify detailed block parsing between base and extension + blocks + - LP: #500999 + * (pre-stable) eCryptfs: Add getattr function + - LP: #390833 + + -- Andy Whitcroft Thu, 18 Feb 2010 19:22:02 +0000 + +linux (2.6.32-13.18) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "enforcer -- make the enforcement configuration common" + * Revert "(pre-stable) Input: ALPS - add interleaved protocol support + (Dell E6x00 series)" + * Revert "(pre-stable) driver-core: fix devtmpfs crash on s390" + * Revert "(pre-stable) Driver-Core: devtmpfs - set root directory mode to + 0755" + * Revert "SAUCE: Adds support for COMPAL JHL90 webcam" + * Revert "SAUCE: fix kernel oops in VirtualBox during paravirt patching" + * Revert "SAUCE: make fc transport removal of target configurable" + * enforcer -- make the enforcement configuration common + * getabis -- add preempt flavour to the list + * [Config] enforce DEVTMPFS options + * [Config] armel -- cleanup to-be builtin modules + * [Config] cleanup ports configs + * [Config] enable CRYPTO_GHASH_CLMUL_NI_INTEL + - LP: #485536 + * add printdebian target to find branch target + * distclean -- do not remove debian.env + * [Config] generic-pae switch to M586TSC + - LP: #519448 + * git-ubuntu-log -- commonise duplicated log handling + * git-ubuntu-log -- tighten up Bug: NNNN matching + * git-ubuntu-log -- sort the bug numbers + + [ Chris Wilson ] + + * (pre-stable) drm/i915: Increase fb alignment to 64k + - LP: #404064 + + [ Eric Miao ] + + * arm -- enable ubuntu/ directory + + [ Huang Ying ] + + * SAUCE: crypto: ghash - Add PCLMULQDQ accelerated implementation + * SAUCE: crypto: ghash-intel - Fix building failure on x86_32 + + [ Loïc Minier ] + + * [Config] cleanup preempt configuration + * [Config] versatile: Fix video output + - LP: #517594 + * [Config] armel DEFAULT_MMAP_MIN_ADDR=32768 + * [Config] Large update to armel/versatile + * [Config] versatile: Add RTC support + * [Config] armel: Enable NEON + * [Config] versatile: Builtin MMC support + * [Config] versatile Builtin SCSI controller + * [Config] armel Disable dma_cache_sync callers + * [Config] armel Disable asm/time.h users + * [Config] armel Disable out of range udelay() + * [Config] armel Disable flush_cache_range() users + * [Config] armel -- Enable ubuntu/ drivers + + [ Steve Conklin ] + + * SAUCE: drm/i915: Add display hotplug event on Ironlake + * SAUCE: drm/i915: Add ACPI OpRegion support for Ironlake + + [ Upstream Kernel Changes ] + + * Revert "[Upstream]: oprofile/x86: add Xeon 7500 series support" + * Revert "Revert "[Bluetooth] Eliminate checks for impossible conditions + in IRQ handler"" + * clockevent: Don't remove broadcast device when cpu is dead + * clockevents: Add missing include to pacify sparse + * ACPI: don't cond_resched if irq is disabled + * be2net: Add support for next generation of BladeEngine device. + * be2net: Add the new PCI IDs to PCI_DEVICE_TABLE. + * mpt2sas: New device SAS2208 support is added + * ar9170: Add support for D-Link DWA 160 A2 + * powerpc/fsl: Add PCI device ids for new QoirQ chips + * davinci: dm646x: Add support for 3.x silicon revision + * Input: ALPS - add interleaved protocol support (Dell E6x00 series) + * Driver-Core: devtmpfs - set root directory mode to 0755 + * driver-core: fix devtmpfs crash on s390 + * vfs: get_sb_single() - do not pass options twice + * ALSA: hda - Add PCI IDs for Nvidia G2xx-series + * V4L/DVB (13569): smsusb: add autodetection support for five additional + Hauppauge USB IDs + * USB: mos7840: add device IDs for B&B electronics devices + * USB: ftdi_sio: add USB device ID's for B&B Electronics line + * V4L/DVB (13168): Add support for Asus Europa Hybrid DVB-T card (SAA7134 + SubVendor ID: 0x1043 Device ID: 0x4847) + * iTCO_wdt: Add support for Intel Ibex Peak + * atl1c:use common_task instead of reset_task and link_chg_task + * atl1e:disable NETIF_F_TSO6 for hardware limit + * V4L/DVB (13680a): DocBook/media: copy images after building HTML + * V4L/DVB (13680b): DocBook/media: create links for included sources + * netfilter: xtables: fix conntrack match v1 ipt-save output + * partitions: read whole sector with EFI GPT header + * partitions: use sector size for EFI GPT + * ALSA: ice1724 - Patch for suspend/resume for ESI Juli@ + * sched: Fix isolcpus boot option + * sched: Fix missing sched tunable recalculation on cpu add/remove + * nohz: Prevent clocksource wrapping during idle + * nfsd: Fix sort_pacl in fs/nfsd/nf4acl.c to actually sort groups + * timers, init: Limit the number of per cpu calibration bootup messages + * PCI: Always set prefetchable base/limit upper32 registers + * iscsi class: modify handling of replacement timeout + * NFS: Revert default r/wsize behavior + * HID: fixup quirk for NCR devices + * scsi_devinfo: update Hitachi entries (v2) + * scsi_dh: create sysfs file, dh_state for all SCSI disk devices + * scsi_transport_fc: remove invalid BUG_ON + * lpfc: fix hang on SGI ia64 platform + * libfc: fix typo in retry check on received PRLI + * libfc: fix ddp in fc_fcp for 0 xid + * fcoe: remove redundant checking of netdev->netdev_ops + * libfc: Fix wrong scsi return status under FC_DATA_UNDRUN + * libfc: lport: fix minor documentation errors + * libfc: don't WARN_ON in lport_timeout for RESET state + * fcoe: initialize return value in fcoe_destroy + * libfc: Fix frags in frame exceeding SKB_MAX_FRAGS in fc_fcp_send_data + * libfc: fix memory corruption caused by double frees and bad error + handling + * libfc: fix free of fc_rport_priv with timer pending + * libfc: remote port gets stuck in restart state without really + restarting + * fcoe, libfc: fix an libfc issue with queue ramp down in libfc + * fcoe: Fix checking san mac address + * fcoe: Fix getting san mac for VLAN interface + * qlge: Remove explicit setting of PCI Dev CTL reg. + * qlge: Set PCIE max read request size. + * qlge: Don't fail open when port is not initialized. + * qlge: Add handler for DCBX firmware event. + * qlge: Bonding fix for mode 6. + * PCI: AER: fix aer inject result in kernel oops + * DMI: allow omitting ident strings in DMI tables + * Input: i8042 - remove identification strings from DMI tables + * Input: i8042 - add Gigabyte M1022M to the noloop list + * Input: i8042 - add Dritek quirk for Acer Aspire 5610. + * ALSA: hda - select IbexPeak handler for Calpella + * ALSA: hda - Fix quirk for Maxdata obook4-1 + * ALSA: hda - Add missing Line-Out and PCM switches as slave + * iTCO_wdt.c - cleanup chipset documentation + * iTCO_wdt: add PCI ID for the Intel EP80579 (Tolapai) SoC + * iTCO_wdt: Add Intel Cougar Point and PCH DeviceIDs + * ahci: disable SNotification capability for ich8 + * ata_piix: fix MWDMA handling on PIIX3 + * md: fix small irregularity with start_ro module parameter + * V4L/DVB (13826): uvcvideo: Fix controls blacklisting + * cio: fix double free in case of probe failure + * cio: dont panic in non-fatal conditions + * netiucv: displayed TX bytes value much too high + * ipc ns: fix memory leak (idr) + * ALSA: hda - Fix HP T5735 automute + * hwmon: (fschmd) Fix a memleak on multiple opens of /dev/watchdog + * UBI: fix memory leak in update path + * UBI: initialise update marker + * ASoC: fix a memory-leak in wm8903 + * mac80211: check that ieee80211_set_power_mgmt only handles STA + interfaces. + * cfg80211: fix channel setting for wext + * KVM: S390: fix potential array overrun in intercept handling + * KVM: only allow one gsi per fd + * KVM: Fix race between APIC TMR and IRR + * KVM: MMU: bail out pagewalk on kvm_read_guest error + * KVM: x86: Fix host_mapping_level() + * KVM: x86: Fix probable memory leak of vcpu->arch.mce_banks + * KVM: x86: Fix leak of free lapic date in kvm_arch_vcpu_init() + * KVM: fix lock imbalance in kvm_*_irq_source_id() + * KVM: only clear irq_source_id if irqchip is present + * IPoIB: Clear ipoib_neigh.dgid in ipoib_neigh_alloc() + * x86: Reenable TSC sync check at boot, even with NONSTOP_TSC + * ACPI: enable C2 and Turbo-mode on Nehalem notebooks on A/C + - LP: #516325 + * iwlwifi: Fix throughput stall issue in HT mode for 5000 + * fnctl: f_modown should call write_lock_irqsave/restore + * x86, msr/cpuid: Pass the number of minors when unregistering MSR and + CPUID drivers. + * Linux 2.6.32.7 + * scsi_lib: Fix bug in completion of bidi commands + * mptsas: Fix issue with chain pools allocation on katmai + * mm: add new 'read_cache_page_gfp()' helper function + * drm/i915: Selectively enable self-reclaim + * firewire: ohci: fix crashes with TSB43AB23 on 64bit systems + * S390: fix single stepped svcs with TRACE_IRQFLAGS=y + * x86: Set hotpluggable nodes in nodes_possible_map + * x86: Remove "x86 CPU features in debugfs" (CONFIG_X86_CPU_DEBUG) + * libata: retry FS IOs even if it has failed with AC_ERR_INVALID + * zcrypt: Do not remove coprocessor for error 8/72 + * dasd: fix possible NULL pointer errors + * ACPI: Add a generic API for _OSC -v2 + * ACPI: Add platform-wide _OSC support. + * ACPI: fix OSC regression that caused aer and pciehp not to load + * ACPI: Advertise to BIOS in _OSC: _OST on _PPC changes + * UBI: fix volume creation input checking + * e1000/e1000e: don't use small hardware rx buffers + * drm/i915: Reload hangcheck timer too for Ironlake + * Fix a leak in affs_fill_super() + * Fix failure exits in bfs_fill_super() + * fix oops in fs/9p late mount failure + * fix leak in romfs_fill_super() + * Fix remount races with symlink handling in affs + * fix affs parse_options() + * Fix failure exit in ipathfs + * mm: fix migratetype bug which slowed swapping + * FDPIC: Respect PT_GNU_STACK exec protection markings when creating + NOMMU stack + * Split 'flush_old_exec' into two functions + * sparc: TIF_ABI_PENDING bit removal + * x86: get rid of the insane TIF_ABI_PENDING bit + * Input: winbond-cir - remove dmesg spam + * x86: Disable HPET MSI on ATI SB700/SB800 + * iwlwifi: set default aggregation frame count limit to 31 + * drm/i915: only enable hotplug for detected outputs + * firewire: core: add_descriptor size check + * SECURITY: selinux, fix update_rlimit_cpu parameter + * regulator: Specify REGULATOR_CHANGE_STATUS for WM835x LED constraints + * x86: Add Dell OptiPlex 760 reboot quirk + - LP: #488319 + * x86: Add quirk for Intel DG45FC board to avoid low memory corruption + * x86/amd-iommu: Fix possible integer overflow + * clocksource: fix compilation if no GENERIC_TIME + * tcp: update the netstamp_needed counter when cloning sockets + * sky2: Fix oops in sky2_xmit_frame() after TX timeout + * net: restore ip source validation + * af_packet: Don't use skb after dev_queue_xmit() + * ax25: netrom: rose: Fix timer oopses + * KVM: allow userspace to adjust kvmclock offset + * oprofile/x86: add Xeon 7500 series support + * oprofile/x86: fix crash when profiling more than 28 events + * libata: retry link resume if necessary + * mm: percpu-vmap fix RCU list walking + * mm: purge fragmented percpu vmap blocks + * block: fix bio_add_page for non trivial merge_bvec_fn case + * Fix 'flush_old_exec()/setup_new_exec()' split + * random: drop weird m_time/a_time manipulation + * random: Remove unused inode variable + * block: fix bugs in bio-integrity mempool usage + * usb: r8a66597-hdc disable interrupts fix + * connector: Delete buggy notification code. + * be2net: Bug fix to support newer generation of BE ASIC + * be2net: Fix memset() arg ordering. + * mm: flush dcache before writing into page to avoid alias + * mac80211: fix NULL pointer dereference when ftrace is enabled + * imxfb: correct location of callbacks in suspend and resume + * mx3fb: some debug and initialisation fixes + * starfire: clean up properly if firmware loading fails + * kernel/cred.c: use kmem_cache_free + * uartlite: fix crash when using as console + * pktcdvd: removing device does not remove its sysfs dir + * ath9k: fix eeprom INI values override for 2GHz-only cards + * ath9k: fix beacon slot/buffer leak + * powerpc: TIF_ABI_PENDING bit removal + * NET: fix oops at bootime in sysctl code + * Linux 2.6.32.8 + + -- Andy Whitcroft Wed, 10 Feb 2010 18:56:52 +0000 + +linux (2.6.32-12.17) lucid; urgency=low + + [ Andy Whitcroft ] + + * restore linux-image prefix -- master + * enforce -- we require SELINUX enabled -- master + * enforce -- ensure APPARMOR is our default LSM -- master + * make doc package completely optional -- master + * make source package completely optional -- master + * make linux-libc-dev completly optional -- master + * convert package disable to a deps list -- master + * allow common headers to switch from indep to arch -- master + * convert binary package disable to a deps list -- master + * add configuration option for a full source build tree -- master + * add support for uImage kernels in package control scripts + * getabis -- cleanup and parameterise repository list -- master + * getabis -- move configuration to etc/getabi -- master + * kernelconfig -- move configuration to etc -- master + * rules -- make debian/debian.env master for branch name + * set the current branch name -- master + * pull back common debian.master files into debian -- master + * enforcer -- make the enforcement configuration common + * insert-changes -- correctly link to debian/rules in DROOT + + [ Colin Watson ] + + * future-proof ddeb handling against buildd changes + + [ Eric Miao ] + + * SAUCE: Make CONFIG_{OMNIBOOK, AVERATEC_5100P, PACKARDBELL_E5} depend on + X86 + + [ Loïc Minier ] + + * Add modules.builtin.bin to prerm rm list + - LP: #516584 + + [ Tim Gardner ] + + * [Config] Implement the amd64 preempt flavour + + [ Upstream Kernel Changes ] + + * syslog: distinguish between /proc/kmsg and syscalls + - LP: #515623 + * sfc: Fix polling for slow MCDI operations + * sfc: Fix conditions for MDIO self-test + * sfc: QT202x: Remove unreliable MMD check at initialisation + * sfc: Add workspace for GMAC bug workaround to MCDI MAC_STATS buffer + * sfc: Use fixed-size buffers for MCDI NVRAM requests + + -- Andy Whitcroft Fri, 05 Feb 2010 07:09:31 +0000 + +linux (2.6.32-12.16) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: acpi battery -- delay first lookup of the battery until + first use" + * SAUCE: acpi battery -- move first lookup asynchronous + - LP: #507211 + * [Config] update configs to cleanup generic configs + * [Config] disable CONFIG_X86_CPU_DEBUG for amd64 + * [Config] enable USER_NS + - LP: #480739, #509808 + + [ Heiko Carstens ] + + * (pre-stable) driver-core: fix devtmpfs crash on s390 + - LP: #512370 + + [ John Johansen ] + + * [Config] for server and virtual flavours make CONFIG_SCSI_SYM53C8XX_2=y + - LP: #494565 + * [Config] VIRTIO=y for server/virtual flavours + - LP: #494565 + + [ Kay Sievers ] + + * (pre-stable) Driver-Core: devtmpfs - set root directory mode to 0755 + - LP: #512370 + + [ Kees Cook ] + + * SAUCE: x86: brk away from exec rand area + - LP: #452175 + + [ Leann Ogasawara ] + + * [Upstream] e1000: enhance frame fragment detection + - CVE-2009-4536 + * [Upstream] e1000e: enhance frame fragment detection + - CVE-2009-4538 + + [ Sebastian Kapfer ] + + * (pre-stable) Input: ALPS - add interleaved protocol support (Dell E6x00 + series) + - LP: #296610 + + [ Upstream Kernel Changes ] + + * inotify: do not reuse watch descriptors + - LP: #485556 + * inotify: only warn once for inotify problems + * revert "drivers/video/s3c-fb.c: fix clock setting for Samsung SoC + Framebuffer" + * memcg: ensure list is empty at rmdir + * drm/i915: remove loop in Ironlake interrupt handler + * block: Fix incorrect reporting of partition alignment + * x86, mce: Thermal monitoring depends on APIC being enabled + * futexes: Remove rw parameter from get_futex_key() + * page allocator: update NR_FREE_PAGES only when necessary + * x86, apic: use physical mode for IBM summit platforms + * edac: i5000_edac critical fix panic out of bounds + * x86: SGI UV: Fix mapping of MMIO registers + * mfd: WM835x GPIO direction register is not locked + * mfd: Correct WM835x ISINK ramp time defines + * ALSA: hda - Fix missing capture mixer for ALC861/660 codecs + * V4L/DVB (13868): gspca - sn9c20x: Fix test of unsigned. + * reiserfs: truncate blocks not used by a write + * HID: add device IDs for new model of Apple Wireless Keyboard + * PCI/cardbus: Add a fixup hook and fix powerpc + * Input: pmouse - move Sentelic probe down the list + * asus-laptop: add Lenovo SL hotkey support + * sched: Fix cpu_clock() in NMIs, on !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK + * sparc64: Fix NMI programming when perf events are active. + * sparc64: Fix Niagara2 perf event handling. + * i2c: Do not use device name after device_unregister + * i2c/pca: Don't use *_interruptible + * serial/8250_pnp: add a new Fujitsu Wacom Tablet PC device + * sched: Fix task priority bug + * vfs: Fix vmtruncate() regression + * Linux 2.6.32.5 + * x86, msr/cpuid: Register enough minors for the MSR and CPUID drivers + * V4L/DVB (13900): gspca - sunplus: Fix bridge exchanges. + * Staging: asus_oled: fix oops in 2.6.32.2 + * Staging: hv: fix smp problems in the hyperv core code + * tty: fix race in tty_fasync + * ecryptfs: use after free + * ecryptfs: initialize private persistent file before dereferencing + pointer + * nozomi: quick fix for the close/close bug + * serial: 8250_pnp: use wildcard for serial Wacom tablets + * usb: serial: fix memory leak in generic driver + * USB: fix bitmask merge error + * USB: Don't use GFP_KERNEL while we cannot reset a storage device + * USB: EHCI: fix handling of unusual interrupt intervals + * USB: EHCI & UHCI: fix race between root-hub suspend and port resume + * USB: add missing delay during remote wakeup + * USB: add speed values for USB 3.0 and wireless controllers + * ACPI: EC: Accelerate query execution + * ACPI: EC: Add wait for irq storm + * SCSI: enclosure: fix oops while iterating enclosure_status array + * drm/i915: Read the response after issuing DDC bus switch command + * drm/i915: try another possible DDC bus for the SDVO device with + multiple outputs + * block: bdev_stack_limits wrapper + * DM: Fix device mapper topology stacking + * x86/PCI/PAT: return EINVAL for pci mmap WC request for !pat_enabled + * USB: fix usbstorage for 2770:915d delivers no FAT + * vmalloc: remove BUG_ON due to racy counting of VM_LAZY_FREE + * perf timechart: Use tid not pid for COMM change + * perf events: Dont report side-band events on each cpu for + per-task-per-cpu events + * perf: Honour event state for aux stream data + * Linux 2.6.32.6 + + -- Andy Whitcroft Wed, 27 Jan 2010 16:40:23 +0000 + +linux (2.6.32-11.15) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "(pre-stable) drm/radeon/kms: fix crtc vblank update for r600" + * Revert "(pre-stable) sched: Fix balance vs hotplug race" + * Revert "[Upstream] acerhdf: Limit modalias matching to supported + boards" + * Revert "[Upstream] mmc: prevent dangling block device from accessing + stale queues" + * Revert "SAUCE: Fix nx_enable reporting" + * Revert "SAUCE: [x86] fix report of cs-limit nx-emulation" + * Revert "SAUCE: [x86] implement cs-limit nx-emulation for ia32" + * SAUCE: i915 -- disable powersave by default + - LP: #492392 + + [ Kees Cook ] + + * SAUCE: [x86] implement cs-limit nx-emulation for ia32 + - LP: #369978 + * SAUCE: [x86] fix report of cs-limit nx-emulation + - LP: #454285 + * SAUCE: Fix nx_enable reporting + - LP: #454285 + + [ Tim Gardner ] + + * [Upstream] b43: Declare all possible firmware files. + - LP: #488636 + * [Config] updateconfigs after adding pvscsi + - LP: #497156 + * [Config] CONFIG_BT=m + + [ Upstream Kernel Changes ] + + * Revert "x86: Side-step lguest problem by only building cmpxchg8b_emu + for pre-Pentium" + * SCSI: ipr: fix EEH recovery + * SCSI: qla2xxx: dpc thread can execute before scsi host has been added + * SCSI: st: fix mdata->page_order handling + * SCSI: fc class: fix fc_transport_init error handling + * sched: Fix task_hot() test order + * x86, cpuid: Add "volatile" to asm in native_cpuid() + * sched: Select_task_rq_fair() must honour SD_LOAD_BALANCE + * clockevents: Prevent clockevent_devices list corruption on cpu hotplug + * pata_hpt3x2n: fix clock turnaround + * pata_cmd64x: fix overclocking of UDMA0-2 modes + * ASoC: wm8974: fix a wrong bit definition + * sound: sgio2audio/pdaudiocf/usb-audio: initialize PCM buffer + * ALSA: hda - Fix missing capsrc_nids for ALC88x + * acerhdf: limit modalias matching to supported + - LP: #435958 + * ACPI: EC: Fix MSI DMI detection + * ACPI: Use the return result of ACPI lid notifier chain correctly + * powerpc: Handle VSX alignment faults correctly in little-endian mode + * ASoC: Do not write to invalid registers on the wm9712. + * drm/radeon: fix build on 64-bit with some compilers. + * USB: emi62: fix crash when trying to load EMI 6|2 firmware + * USB: option: support hi speed for modem Haier CE100 + * USB: Fix a bug on appledisplay.c regarding signedness + * USB: musb: gadget_ep0: avoid SetupEnd interrupt + * Bluetooth: Prevent ill-timed autosuspend in USB driver + * USB: rename usb_configure_device + * USB: fix bugs in usb_(de)authorize_device + * drivers/net/usb: Correct code taking the size of a pointer + * x86: SGI UV: Fix writes to led registers on remote uv hubs + * md: Fix unfortunate interaction with evms + * dma: at_hdmac: correct incompatible type for argument 1 of + 'spin_lock_bh' + * dma-debug: Do not add notifier when dma debugging is disabled. + * dma-debug: Fix bug causing build warning + * cifs: NULL out tcon, pSesInfo, and srvTcp pointers when chasing DFS + referrals + * x86/amd-iommu: Fix initialization failure panic + * ioat3: fix p-disabled q-continuation + * ioat2,3: put channel hardware in known state at init + * KVM: MMU: remove prefault from invlpg handler + * KVM: LAPIC: make sure IRR bitmap is scanned after vm load + * Libertas: fix buffer overflow in lbs_get_essid() + * iwmc3200wifi: fix array out-of-boundary access + * mac80211: fix propagation of failed hardware reconfigurations + * mac80211: fix WMM AP settings application + * mac80211: Fix IBSS merge + * cfg80211: fix race between deauth and assoc response + * ath5k: fix SWI calibration interrupt storm + * ath9k: wake hardware for interface IBSS/AP/Mesh removal + * ath9k: Fix TX queue draining + * ath9k: fix missed error codes in the tx status check + * ath9k: wake hardware during AMPDU TX actions + * ath9k: fix suspend by waking device prior to stop + * ath9k_hw: Fix possible OOB array indexing in gen_timer_index[] on + 64-bit + * ath9k_hw: Fix AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB and its shift value + in 0x4054 + * iwl3945: disable power save + * iwl3945: fix panic in iwl3945 driver + * iwlwifi: fix EEPROM/OTP reading endian annotations and a bug + * iwlwifi: fix more eeprom endian bugs + * iwlwifi: fix 40MHz operation setting on cards that do not allow it + * mac80211: fix race with suspend and dynamic_ps_disable_work + * NOMMU: Optimise away the {dac_,}mmap_min_addr tests + * 'sysctl_max_map_count' should be non-negative + * kernel/sysctl.c: fix the incomplete part of + sysctl_max_map_count-should-be-non-negative.patch + * V4L/DVB (13596): ov511.c typo: lock => unlock + * x86/ptrace: make genregs[32]_get/set more robust + * memcg: avoid oom-killing innocent task in case of use_hierarchy + * e100: Fix broken cbs accounting due to missing memset. + * ipv6: reassembly: use seperate reassembly queues for conntrack and + local delivery + * netfilter: fix crashes in bridge netfilter caused by fragment jumps + * hwmon: (sht15) Off-by-one error in array index + incorrect constants + * b43: avoid PPC fault during resume + * Keys: KEYCTL_SESSION_TO_PARENT needs TIF_NOTIFY_RESUME architecture + support + * sched: Fix balance vs hotplug race + * drm/radeon/kms: fix crtc vblank update for r600 + * drm: disable all the possible outputs/crtcs before entering KMS mode + * S390: dasd: support DIAG access for read-only devices + * xen: fix is_disconnected_device/exists_disconnected_device + * xen: improvement to wait_for_devices() + * xen: wait up to 5 minutes for device connetion + * orinoco: fix GFP_KERNEL in orinoco_set_key with interrupts disabled + * udf: Try harder when looking for VAT inode + * Add unlocked version of inode_add_bytes() function + * quota: decouple fs reserved space from quota reservation + * ext4: Convert to generic reserved quota's space management. + * ext4: fix sleep inside spinlock issue with quota and dealloc (#14739) + * x86, msr: Unify rdmsr_on_cpus/wrmsr_on_cpus + * cpumask: use modern cpumask style in drivers/edac/amd64_edac.c + * amd64_edac: unify MCGCTL ECC switching + * x86, msr: Add support for non-contiguous cpumasks + * x86, msr: msrs_alloc/free for CONFIG_SMP=n + * amd64_edac: fix driver instance freeing + * amd64_edac: make driver loading more robust + * amd64_edac: fix forcing module load/unload + * sched: Sched_rt_periodic_timer vs cpu hotplug + * ext4: Update documentation to correct the inode_readahead_blks option + name + * lguest: fix bug in setting guest GDT entry + * vmscan: do not evict inactive pages when skipping an active list scan + * ksm: fix mlockfreed to munlocked + * rt2x00: Disable powersaving for rt61pci and rt2800pci. + * generic_permission: MAY_OPEN is not write access + * Linux 2.6.32.3 + * untangle the do_mremap() mess + * fasync: split 'fasync_helper()' into separate add/remove functions + * ASoC: fix params_rate() macro use in several codecs + * modules: Skip empty sections when exporting section notes + * exofs: simple_write_end does not mark_inode_dirty + * nfsd: make sure data is on disk before calling ->fsync + * sunrpc: fix peername failed on closed listener + * SUNRPC: Fix up an error return value in + gss_import_sec_context_kerberos() + * SUNRPC: Fix the return value in gss_import_sec_context() + * sunrpc: on successful gss error pipe write, don't return error + * drm/i915: Update LVDS connector status when receiving ACPI LID event + * drm/i915: fix order of fence release wrt flushing + * drm/i915: Permit pinning whilst the device is 'suspended' + * drm: remove address mask param for drm_pci_alloc() + * drm/i915: Enable/disable the dithering for LVDS based on VBT setting + * drm/i915: Make the BPC in FDI rx/transcoder be consistent with that in + pipeconf on Ironlake + * drm/i915: Select the correct BPC for LVDS on Ironlake + * drm/i915: fix unused var + * rtc_cmos: convert shutdown to new pnp_driver->shutdown + * drivers/cpuidle/governors/menu.c: fix undefined reference to + `__udivdi3' + * cgroups: fix 2.6.32 regression causing BUG_ON() in cgroup_diput() + * lib/rational.c needs module.h + * dma-debug: allow DMA_BIDIRECTIONAL mappings to be synced with + DMA_FROM_DEVICE and + * kernel/signal.c: fix kernel information leak with print-fatal-signals=1 + * mmc_block: add dev_t initialization check + * mmc_block: fix probe error cleanup bug + * mmc_block: fix queue cleanup + * ALSA: hda - Fix ALC861-VD capture source mixer + * ALSA: ac97: Add Dell Dimension 2400 to Headphone/Line Jack Sense + blacklist + * ALSA: atiixp: Specify codec for Foxconn RC4107MA-RS2 + - LP: #498863 + * ASoC: Fix WM8350 DSP mode B configuration + * netfilter: ebtables: enforce CAP_NET_ADMIN + * netfilter: nf_ct_ftp: fix out of bounds read in update_nl_seq() + * hwmon: (coretemp) Fix TjMax for Atom N450/D410/D510 CPUs + * hwmon: (adt7462) Fix pin 28 monitoring + * quota: Fix dquot_transfer for filesystems different from ext4 + * xen: fix hang on suspend. + * iwlwifi: fix iwl_queue_used bug when read_ptr == write_ptr + * ath5k: Fix eeprom checksum check for custom sized eeproms + * cfg80211: fix syntax error on user regulatory hints + * iwl: off by one bug + * mac80211: add missing sanity checks for action frames + * drm/i915: remove render reclock support + * libertas: Remove carrier signaling from the scan code + * kernel/sysctl.c: fix stable merge error in NOMMU mmap_min_addr + * mac80211: fix skb buffering issue (and fixes to that) + * fix braindamage in audit_tree.c untag_chunk() + * fix more leaks in audit_tree.c tag_chunk() + * module: handle ppc64 relocating kcrctabs when CONFIG_RELOCATABLE=y + * ipv6: skb_dst() can be NULL in ipv6_hop_jumbo(). + * agp/intel-agp: Clear entire GTT on startup + * Linux 2.6.32.4 + * ethtool: Add reset operation + * gro: Name the GRO result enumeration type + * gro: Change all receive functions to return GRO result codes + * sfc: 10Xpress: Initialise pause advertising flags + * sfc: 10Xpress: Report support for pause frames + * sfc: Remove redundant header gmii.h + * sfc: Remove redundant hardware initialisation + * sfc: Rename Falcon-specific board code and types + * sfc: Remove boards.h, moving last remaining declaration to falcon.h + * sfc: Remove versioned bitfield macros + * sfc: Move RX data FIFO thresholds out of struct efx_nic_type + * sfc: Update hardware definitions for Siena + * sfc: Rename register I/O header and functions used by both Falcon and + Siena + * sfc: Eliminate indirect lookups of queue size constants + * sfc: Define DMA address mask explicitly in terms of descriptor field + width + * sfc: Move all TX DMA length limiting into tx.c + * sfc: Change order of device removal to reverse of probe order + * sfc: Remove declarations of nonexistent functions + * sfc: Move efx_xmit_done() declaration into correct stanza + * sfc: Move shared members of struct falcon_nic_data into struct efx_nic + * sfc: Maintain interrupt moderation values in ticks, not microseconds + * sfc: Removed kernel-doc for nonexistent member of efx_phy_operations + * sfc: Remove pointless abstraction of memory BAR number + * sfc: Remove incorrect assertion from efx_pci_remove_main() + * sfc: Remove unnecessary tests of efx->membase + * sfc: Move MTD probe after netdev registration and name allocation + * sfc: Remove unused code for non-autoneg speed/duplex switching + * sfc: Rename 'xfp' file and functions to reflect reality + * sfc: Really allow RX checksum offload to be disabled + * sfc: Feed GRO result into RX allocation policy and interrupt moderation + * sfc: Enable heuristic selection between page and skb RX buffers + * sfc: Remove pointless abstraction of memory BAR number (2) + * sfc: Remove redundant gotos from __efx_rx_packet() + * sfc: Remove ridiculously paranoid assertions + * sfc: Move assertions and buffer cleanup earlier in efx_rx_packet_lro() + * sfc: Record RX queue number on GRO path + * sfc: SFT9001: Reset LED configuration correctly after blinking + * sfc: Use a single blink implementation + * sfc: Rename efx_board::init_leds to init_phy and use for SFN4111T + * sfc: Make board information explicitly Falcon-specific + * sfc: Move definition of struct falcon_nic_data into falcon.h + * sfc: Move struct falcon_board into struct falcon_nic_data + * sfc: Move all I2C stuff into struct falcon_board + * sfc: Gather link state fields in struct efx_nic into new struct + efx_link_state + * sfc: Remove unnecessary casts to struct sk_buff * + * sfc: Remove redundant efx_xmit() function + * sfc: Combine high-level header files + * sfc: Log interrupt and reset type names, not numbers + * sfc: Fix descriptor cache sizes + * sfc: Treat all MAC registers as 128-bit + * sfc: Strengthen EFX_ASSERT_RESET_SERIALISED + * sfc: Comment corrections + * sfc: Remove unused constant + * sfc: Clean up struct falcon_board and struct falcon_board_data + * sfc: Fix bugs in RX queue flushing + * sfc: Remove unused function efx_flush_queues() + * sfc: Only switch Falcon MAC clocks as necessary + * sfc: Hold MAC lock for longer in efx_init_port() + * sfc: Split MAC stats DMA initiation and completion + * sfc: Move Falcon board/PHY/MAC monitoring code to falcon.c + * sfc: Simplify XMAC link polling + * sfc: Change MAC promiscuity and multicast hash at the same time + * sfc: Move inline comment into kernel-doc + * sfc: Do not set net_device::trans_start in self-test + * sfc: Simplify PHY polling + * sfc: QT202x: Reset before reading PHY id + * sfc: Replace MDIO spinlock with mutex + * sfc: Always start Falcon using the XMAC + * sfc: Limit some hardware workarounds to Falcon + * sfc: Remove EFX_WORKAROUND_9141 macro + * sfc: Remove another unused workaround macro + * sfc: Remove some redundant whitespace + * sfc: Decouple NIC revision number from Falcon PCI revision number + * sfc: Move descriptor cache base addresses to struct efx_nic_type + * sfc: Clean up RX event handling + * sfc: Remove redundant writes to INT_ADR_KER + * sfc: Remove duplicate hardware structure definitions + * sfc: Turn pause frame generation on and off at the MAC, not the RX FIFO + * sfc: Move Falcon NIC operations to efx_nic_type + * sfc: Refactor link configuration + * sfc: Generalise link state monitoring + * sfc: Add power-management and wake-on-LAN support + * sfc: Implement ethtool reset operation + * sfc: Add efx_nic_type operation for register self-test + * sfc: Add efx_nic_type operation for NVRAM self-test + * sfc: Add efx_nic_type operation for identity LED control + * sfc: Separate shared NIC code from Falcon-specific and rename + accordingly + * sfc: Fold falcon_probe_nic_variant() into falcon_probe_nic() + * sfc: Extend loopback mode enumeration + * sfc: Remove static PHY data and enumerations + * sfc: Extend MTD driver for use with new NICs + * sfc: Allow for additional checksum offload features + * sfc: Rename falcon.h to nic.h + * sfc: Move shared NIC code from falcon.c to new source file nic.c + * sfc: Add firmware protocol definitions (MCDI) + * sfc: Add support for SFC9000 family (1) + * sfc: Add support for SFC9000 family (2) + * sfc: Implement TSO for TCP/IPv6 + * sfc: Update version, copyright dates, authors + * drivers/net/sfc: Correct code taking the size of a pointer + * sfc: Move PHY software state initialisation from init() into probe() + * sfc: Include XGXS in XMAC link status check except in XGMII loopback + * sfc: Fix DMA mapping cleanup in case of an error in TSO + * sfc: QT2025C: Work around PHY bug + * sfc: QT2025C: Switch into self-configure mode when not in loopback + * sfc: QT2025C: Work around PHY firmware initialisation bug + * sfc: QT2025C: Add error message for suspected bad SFP+ cables + * sfc: Disable TX descriptor prefetch watchdog + * [SCSI] vmw_pvscsi: SCSI driver for VMware's virtual HBA. + - LP: #497156 + + -- Andy Whitcroft Tue, 19 Jan 2010 16:12:47 +0000 + +linux (2.6.32-10.14) lucid; urgency=low + + [ Alex Deucher ] + + * SAUCE: drm/radeon/kms: fix LVDS setup on r4xx + - LP: #493795 + + [ Andy Whitcroft ] + + * Revert "(pre-stable) acpi: Use the ARB_DISABLE for the CPU which model + id is less than 0x0f." + * config-check -- ensure the checks get run at build time + * config-check -- check the processed config during updateconfigs + * config-check -- CONFIG_SECCOMP may not be present + * TUN is now built in ignore + * SAUCE: acpi battery -- delay first lookup of the battery until first + use + * SAUCE: async_populate_rootfs: move rootfs init earlier + * ubuntu: AppArmor -- update to mainline 2010-01-06 + * SAUCE: move RLIMIT_CORE pipe dumper marker to 1 + - LP: #498525 + + [ Dave Airlie ] + + * (pre-stable) drm/radeon/kms: fix crtc vblank update for r600 + + [ Leann Ogasawara ] + + * Add asix to nic-usb-modules file + - LP: #499785 + + [ Peter Zijlstra ] + + * (pre-stable) sched: Fix balance vs hotplug race + + [ Tim Gardner ] + + * [Config] Enable CONFIG_FUNCTION_TRACER + - LP: #497989 + * [Config] Drop lpia from getabis + * [Config] Build in TUN/TAP driver + - LP: #499491 + * [Config] DH_COMPAT=5 + + [ Upstream Kernel Changes ] + + * Revert "(pre-stable) drm/i915: Avoid NULL dereference with + component_only tv_modes" + * Revert "(pre-stable) drm/i915: Fix sync to vblank when VGA output is + turned off" + * USB: usb-storage: fix bug in fill_inquiry + * USB: option: add pid for ZTE + * firewire: ohci: handle receive packets with a data length of zero + * rcu: Prepare for synchronization fixes: clean up for non-NO_HZ handling + of ->completed counter + * rcu: Fix synchronization for rcu_process_gp_end() uses of ->completed + counter + * rcu: Fix note_new_gpnum() uses of ->gpnum + * rcu: Remove inline from forward-referenced functions + * perf_event: Fix invalid type in ioctl definition + * perf_event: Initialize data.period in perf_swevent_hrtimer() + * perf: Don't free perf_mmap_data until work has been done + * PM / Runtime: Fix lockdep warning in __pm_runtime_set_status() + * sched: Check for an idle shared cache in select_task_rq_fair() + * sched: Fix affinity logic in select_task_rq_fair() + * sched: Rate-limit newidle + * sched: Fix and clean up rate-limit newidle code + * x86/amd-iommu: attach devices to pre-allocated domains early + * x86/amd-iommu: un__init iommu_setup_msi + * x86, Calgary IOMMU quirk: Find nearest matching Calgary while walking + up the PCI tree + * x86: Fix iommu=nodac parameter handling + * x86: GART: pci-gart_64.c: Use correct length in strncmp + * x86: ASUS P4S800 reboot=bios quirk + - LP: #366682 + * x86, apic: Enable lapic nmi watchdog on AMD Family 11h + * ssb: Fix range check in sprom write + * ath5k: allow setting txpower to 0 + * ath5k: enable EEPROM checksum check + * hrtimer: Fix /proc/timer_list regression + * ALSA: hrtimer - Fix lock-up + * ALSA: hda - Terradici HDA controllers does not support 64-bit mode + * KVM: x86 emulator: limit instructions to 15 bytes + * KVM: s390: Fix prefix register checking in arch/s390/kvm/sigp.c + * KVM: s390: Make psw available on all exits, not just a subset + * KVM: fix irq_source_id size verification + * KVM: x86: include pvclock MSRs in msrs_to_save + * x86: Prevent GCC 4.4.x (pentium-mmx et al) function prologue wreckage + * x86: Use -maccumulate-outgoing-args for sane mcount prologues + * x86, mce: don't restart timer if disabled + * x86/mce: Set up timer unconditionally + * x86: SGI UV: Fix BAU initialization + * x86: Fix duplicated UV BAU interrupt vector + * x86: Add new Intel CPU cache size descriptors + * x86: Fix typo in Intel CPU cache size descriptor + * pata_hpt{37x|3x2n}: fix timing register masks (take 2) + * s390: clear high-order bits of registers after sam64 + * V4L/DVB: Fix test in copy_reg_bits() + * bsdacct: fix uid/gid misreporting + * UBI: flush wl before clearing update marker + * jbd2: don't wipe the journal on a failed journal checksum + * USB: xhci: Add correct email and files to MAINTAINERS entry. + * USB: musb_gadget_ep0: fix unhandled endpoint 0 IRQs, again + * USB: option.c: add support for D-Link DWM-162-U5 + * USB: usbtmc: repeat usb_bulk_msg until whole message is transfered + * USB: usb-storage: add BAD_SENSE flag + * USB: Close usb_find_interface race v3 + * pxa/em-x270: fix usb hub power up/reset sequence + * hfs: fix a potential buffer overflow + * SUNRPC: IS_ERR/PTR_ERR confusion + * NFS: Fix nfs_migrate_page() + * md/bitmap: protect against bitmap removal while being updated. + * futex: Take mmap_sem for get_user_pages in fault_in_user_writeable + * devpts_get_tty() should validate inode + * debugfs: fix create mutex racy fops and private data + * Driver core: fix race in dev_driver_string + * Serial: Do not read IIR in serial8250_start_tx when UART_BUG_TXEN + * mac80211: Fix bug in computing crc over dynamic IEs in beacon + * mac80211: Fixed bug in mesh portal paths + * mac80211: Revert 'Use correct sign for mesh active path refresh' + * mac80211: fix scan abort sanity checks + * wireless: correctly report signal value for IEEE80211_HW_SIGNAL_UNSPEC + * rtl8187: Fix wrong rfkill switch mask for some models + * x86: Fix bogus warning in apic_noop.apic_write() + * mm: hugetlb: fix hugepage memory leak in mincore() + * mm: hugetlb: fix hugepage memory leak in walk_page_range() + * powerpc/windfarm: Add detection for second cpu pump + * powerpc/therm_adt746x: Record pwm invert bit at module load time] + * powerpc: Fix usage of 64-bit instruction in 32-bit altivec code + * drm/radeon/kms: Add quirk for HIS X1300 board + * drm/radeon/kms: handle vblanks properly with dpms on + * drm/radeon/kms: fix legacy crtc2 dpms + * drm/radeon/kms: fix vram setup on rs600 + * drm/radeon/kms: rs6xx/rs740: clamp vram to aperture size + * drm/ttm: Fix build failure due to missing struct page + * drm/i915: Set the error code after failing to insert new offset into mm + ht. + * drm/i915: Add the missing clonemask for display port on Ironlake + * xen/xenbus: make DEVICE_ATTR()s static + * xen: re-register runstate area earlier on resume. + * xen: restore runstate_info even if !have_vcpu_info_placement + * xen: correctly restore pfn_to_mfn_list_list after resume + * xen: register timer interrupt with IRQF_TIMER + * xen: register runstate on secondary CPUs + * xen: don't call dpm_resume_noirq() with interrupts disabled. + * xen: register runstate info for boot CPU early + * xen: call clock resume notifier on all CPUs + * xen: improve error handling in do_suspend. + * xen: don't leak IRQs over suspend/resume. + * xen: use iret for return from 64b kernel to 32b usermode + * xen: explicitly create/destroy stop_machine workqueues outside + suspend/resume region. + * Xen balloon: fix totalram_pages counting. + * xen: try harder to balloon up under memory pressure. + * dm exception store: free tmp_store on persistent flag error + * dm snapshot: only take lock for statustype info not table + * dm crypt: move private iv fields to structs + * dm crypt: restructure essiv error path + * dm: avoid _hash_lock deadlock + * dm snapshot: cope with chunk size larger than origin + * dm crypt: separate essiv allocation from initialisation + * dm crypt: make wipe message also wipe essiv key + * slc90e66: fix UDMA handling + * tcp: Stalling connections: Fix timeout calculation routine + * ip_fragment: also adjust skb->truesize for packets not owned by a + socket + * b44 WOL setup: one-bit-off stack corruption kernel panic fix + * sparc64: Don't specify IRQF_SHARED for LDC interrupts. + * sparc64: Fix overly strict range type matching for PCI devices. + * sparc64: Fix stack debugging IRQ stack regression. + * sparc: Set UTS_MACHINE correctly. + * b43legacy: avoid PPC fault during resume + * tracing: Fix event format export + * ath9k: Fix TX hang poll routine + * ath9k: fix processing of TX PS null data frames + * ath9k: Fix maximum tx fifo settings for single stream devices + * ath9k: fix tx status reporting + * mac80211: Fix dynamic power save for scanning. + * drm/i915: Fix sync to vblank when VGA output is turned off + * memcg: fix memory.memsw.usage_in_bytes for root cgroup + * thinkpad-acpi: fix default brightness_mode for R50e/R51 + * thinkpad-acpi: preserve rfkill state across suspend/resume + * ipw2100: fix rebooting hang with driver loaded + * matroxfb: fix problems with display stability + * acerhdf: add new BIOS versions + * asus-laptop: change light sens default values. + * vmalloc: conditionalize build of pcpu_get_vm_areas() + * ACPI: Use the ARB_DISABLE for the CPU which model id is less than 0x0f. + * net: Fix userspace RTM_NEWLINK notifications. + * ext3: Fix data / filesystem corruption when write fails to copy data + * V4L/DVB (13116): gspca - ov519: Webcam 041e:4067 added. + * bcm63xx_enet: fix compilation failure after get_stats_count removal + * x86: Under BIOS control, restore AP's APIC_LVTTHMR to the BSP value + * drm/i915: Avoid NULL dereference with component_only tv_modes + * drm/i915: PineView only has LVDS and CRT ports + * drm/i915: Fix LVDS stability issue on Ironlake + * mm: sigbus instead of abusing oom + * ipvs: zero usvc and udest + * jffs2: Fix long-standing bug with symlink garbage collection. + * intel-iommu: Detect DMAR in hyperspace at probe time. + * intel-iommu: Apply BIOS sanity checks for interrupt remapping too. + * intel-iommu: Check for an RMRR which ends before it starts. + * intel-iommu: Fix oops with intel_iommu=igfx_off + * intel-iommu: ignore page table validation in pass through mode + * netfilter: xtables: document minimal required version + * perf_event: Fix incorrect range check on cpu number + * implement early_io{re,un}map for ia64 + * Linux 2.6.32.2 + + -- Andy Whitcroft Thu, 07 Jan 2010 15:28:43 +0000 + +linux (2.6.32-9.13) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_B43_PHY_LP + - LP: #493059 + * include modules.builtin in the binary debs + * config-check -- add a configuration enforcer + * config-check -- add a unit-test suite to the checker + * [Config] Enable CONFIG_SYN_COOKIES for versatile + * [Config] Enable CONFIG_SECURITY_SMACK for ports + * [Config] Enable CONFIG_SECURITY_FILE_CAPABILITIES for ports + * [Config] Disable CONFIG_COMPAT_BRK for ports + * getabis -- add armel versatile to the list + + [ Brad Figg ] + + * SAUCE: Increase the default prealloc buffer for HDA audio devices + (non-modem) + + [ Manoj Iyer ] + + * ubuntu: onmibook -- Added missing BOM file + + [ Tim Gardner ] + + * ubuntu: fsam7400 -- Cleanup Makefile + + [ Upstream Kernel Changes ] + + * Revert "ext4: Fix insufficient checks in EXT4_IOC_MOVE_EXT" + * signal: Fix alternate signal stack check + * SCSI: scsi_lib_dma: fix bug with dma maps on nested scsi objects + * SCSI: osd_protocol.h: Add missing #include + * SCSI: megaraid_sas: fix 64 bit sense pointer truncation + * ext4: fix potential buffer head leak when add_dirent_to_buf() returns + ENOSPC + * ext4: avoid divide by zero when trying to mount a corrupted file system + * ext4: fix the returned block count if EXT4_IOC_MOVE_EXT fails + * ext4: fix lock order problem in ext4_move_extents() + * ext4: fix possible recursive locking warning in EXT4_IOC_MOVE_EXT + * ext4: plug a buffer_head leak in an error path of ext4_iget() + * ext4: make sure directory and symlink blocks are revoked + * ext4: fix i_flags access in ext4_da_writepages_trans_blocks() + * ext4: journal all modifications in ext4_xattr_set_handle + * ext4: don't update the superblock in ext4_statfs() + * ext4: fix uninit block bitmap initialization when s_meta_first_bg is + non-zero + * ext4: fix block validity checks so they work correctly with meta_bg + * ext4: avoid issuing unnecessary barriers + * ext4: fix error handling in ext4_ind_get_blocks() + * ext4: make trim/discard optional (and off by default) + * ext4: make "norecovery" an alias for "noload" + * ext4: Fix double-free of blocks with EXT4_IOC_MOVE_EXT + * ext4: initialize moved_len before calling ext4_move_extents() + * ext4: move_extent_per_page() cleanup + * jbd2: Add ENOMEM checking in and for + jbd2_journal_write_metadata_buffer() + * ext4: Return the PTR_ERR of the correct pointer in + setup_new_group_blocks() + * ext4: Avoid data / filesystem corruption when write fails to copy data + * ext4: wait for log to commit when umounting + * ext4: remove blocks from inode prealloc list on failure + * ext4: ext4_get_reserved_space() must return bytes instead of blocks + * ext4: quota macros cleanup + * ext4: fix incorrect block reservation on quota transfer. + * ext4: Wait for proper transaction commit on fsync + * ext4: Fix insufficient checks in EXT4_IOC_MOVE_EXT + * ext4: Fix potential fiemap deadlock (mmap_sem vs. i_data_sem) + * Linux 2.6.32.1 + * kbuild: generate modules.builtin + * (pre-stable) drm/i915: Fix sync to vblank when VGA output is turned off + - LP: #494461 + * (pre-stable) drm/i915: Avoid NULL dereference with component_only + tv_modes + - LP: #494045 + + [ Zhao Yakui ] + + * (pre-stable) acpi: Use the ARB_DISABLE for the CPU which model id is + less than 0x0f. + - LP: #481765 + + -- Andy Whitcroft Thu, 17 Dec 2009 15:41:21 +0000 + +linux (2.6.32-8.12) lucid; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: AppArmor -- add linux/err.h for ERR_PTR + + -- Andy Whitcroft Sat, 12 Dec 2009 10:56:16 +0000 + +linux (2.6.32-8.11) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: default ATI Radeon KMS to off until userspace catches + up" + * Revert "SAUCE: AppArmor: Fix oops there is no tracer and doing unsafe + transition." + * Revert "SAUCE: AppArmor: Fix refcounting bug causing leak of creds" + * Revert "SAUCE: AppArmor: Fix cap audit_caching preemption disabling" + * Revert "SAUCE: AppArmor: Fix Oops when in apparmor_bprm_set_creds" + * Revert "SAUCE: AppArmor: Fix oops after profile removal" + * Revert "SAUCE: AppArmor: AppArmor disallows truncate of deleted files." + * Revert "SAUCE: AppArmor: AppArmor fails to audit change_hat correctly" + * Revert "SAUCE: AppArmor: Policy load and replacement can fail to alloc + mem" + * Revert "SAUCE: AppArmor: AppArmor wrongly reports allow perms as + denied" + * Revert "SAUCE: AppArmor: Fix mediation of "deleted" paths" + * Revert "SAUCE: AppArmor: Fix off by 2 error in getprocattr mem + allocation" + * Revert "SAUCE: AppArmor: Set error code after structure + initialization." + * Revert "AppArmor -- fix pstrace_may_access rename" + * Revert "ubuntu: AppArmor security module" + * Revert "SAUCE: Add config option to set a default LSM" + * Revert "ubuntu: fsam7400 -- sw kill switch driver" + * Revert "[Config] fsam7400 -- enable" + * Revert "[Config] AUFS -- enable" + * Revert "ubuntu: AUFS -- aufs2-30 20090727" + * Revert "ubuntu: AUFS -- export various core functions -- fixes" + * Revert "ubuntu: AUFS -- export various core functions" + * Revert "[Config] ubuntu/iscsitarget -- disable" + * Revert "[Config] iscsitarget -- enable" + * Revert "ubuntu: iscsitarget -- SVN revision r214" + * update Vcs-Git to point to the correct repository + - LP: #493589 + * update build environment overrides to lucid + - LP: #493589 + * [Config] enable CONFIG_DEVTMPFS + * [Config] update all configs following AppArmor 2009-12-08 update + * SAUCE: isapnp_init: make isa PNP scans occur async + * [Config] fsam7400 -- enable + * [Config] omnibook -- enable + * [Config] cleanup CONFIG_AUDIT + * ubuntu: AUFS -- export various core functions (aufs2-base.patch) + * ubuntu: AUFS -- export various core functions (aufs2-standalone.patch) + * ubuntu: AUFS -- aufs2 20091209 + * [Config] AUFS -- enable + * [Config] iscsitarget -- enable + + [ Arjan van de Ven ] + + * SAUCE: KMS: cache the EDID information of the LVDS + + [ Colin Watson ] + + * bnx2: update d-i firmware filenames + - LP: #494052 + * add cdc_ether to nic-usb-modules udeb + - LP: #495060 + + [ John Johansen ] + + * ubuntu: AppArmor -- mainline 2009-10-08 + + [ Manoj Iyer ] + + * ubuntu: fsam7400 -- kill switch for Fujitsu Siemens Amilo M 7400 + * ubuntu: omnibook -- support Toshiba (HP) netbooks + * ubuntu: iscsitarget --- version 1.4.19 + - LP: #494693 + + [ Surbhi Palande ] + + * SAUCE: Make populate_rootfs asynchronous + + [ Tim Gardner ] + + * Parallelize flavour builds and packaging + * [Config] Enable CONFIG_KSM + + [ Upstream Kernel Changes ] + + * Config option to set a default LSM + * LSM: Add security_path_chroot(). + * LSM: Add security_path_chroot(). + * LSM: Move security_path_chmod()/security_path_chown() to after + mutex_lock(). + * ext4: Fix insufficient checks in EXT4_IOC_MOVE_EXT + + -- Andy Whitcroft Fri, 11 Dec 2009 17:45:19 +0000 + +linux (2.6.32-7.10) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] disable CONFIG_THUMB2_KERNEL to fix arm FTBFS + + -- Andy Whitcroft Sun, 06 Dec 2009 12:56:48 +0000 + +linux (2.6.32-7.9) lucid; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: set /proc/acpi/video/*/DOS to 4 by default + - LP: #458982 + * SAUCE: ensure vga16fb loads if no other driver claims the VGA device + * [Config] update configs following versatile switch to V7 + * rebased to v2.6.32 + * [Config] update configs following rebase to v2.6.32 + * [Config] update ports configs following rebase to v2.6.32 + * SAUCE: default ATI Radeon KMS to off until userspace catches up + + [ Arjan van de Ven ] + + * SAUCE: vfs: Add a trace point in the mark_inode_dirty function + + [ Leann Ogasawara ] + + * [SCSI] megaraid_sas: remove sysfs poll_mode_io world writeable + permissions + - CVE-2009-3939 + + [ Loic Minier ] + + * SAUCE: select a v7 CPU for versatile + + [ Takashi Iwai ] + + * SAUCE: ALSA: hda - Add power on/off counter + + [ Upstream changes ] + + * rebased to v2.6.32 + + -- Andy Whitcroft Fri, 04 Dec 2009 10:44:50 +0000 + +linux (2.6.32-6.8) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] disable SSB devices for armel + + -- Andy Whitcroft Sat, 28 Nov 2009 12:16:40 +0000 + +linux (2.6.32-6.7) lucid; urgency=low + + [ Andy Whitcroft ] + + * Revert "SAUCE: default ATI Radeon KMS to off until userspace catches up" + * Revert "SAUCE: Dell XPS710 reboot quirk" + * Revert "SAUCE: Link acpi-cpufreq.o first" + * Revert "SAUCE: LPIA Logical reset of USB port on resume" + * Revert "SAUCE: LPIA Reboot fix for Intel Crownbeach development boards" + * Revert "SAUCE: Enable HDMI audio codec on Studio XPS 1340" + * Revert "SAUCE: Dell laptop digital mic does not work, PCI 1028:0271" + * Revert "Add Dell Dimension 9200 reboot quirk" + * Revert "SAUCE: Correctly blacklist Thinkpad r40e in ACPI" + * Revert "SAUCE: tulip: Define ULI PCI ID's" + * Revert "SAUCE: Lower warning level of some PCI messages" + * Revert "mac80211: fix two issues in debugfs" + Drop a number of known redundant commits as identified in the Ubuntu + delta review blueprint. + + * reenable armel versatile flavour + * [Config] disable CONFIG_USB_DEVICEFS + + [ Tim Gardner ] + + * [Config] udeb: Add squashfs to fs-core-modules + - LP: #352615 + * [Config] Create a real squashfs udeb + - LP: #352615 + + + -- Andy Whitcroft Fri, 27 Nov 2009 17:31:16 +0000 + +linux (2.6.32-5.6) lucid; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.32-rc8 + * update configs following rebase to v2.6.32-rc8 + * update ports configs since rebase to v2.6.32-rc8 + * [Config] enable cgroup options + - LP: #480739 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.32-rc8 + + -- Andy Whitcroft Mon, 23 Nov 2009 11:16:14 +0000 + +linux (2.6.32-4.5) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] SERIO_LIBPS2 and SERIO_I8042 must match + * rebase to v2.6.32-rc7 + * resync with Karmic proposed + + [ John Johansen ] + + * SAUCE: AppArmor: Fix oops after profile removal + - LP: #475619 + * SAUCE: AppArmor: Fix Oops when in apparmor_bprm_set_creds + - LP: #437258 + * SAUCE: AppArmor: Fix cap audit_caching preemption disabling + - LP: #479102 + * SAUCE: AppArmor: Fix refcounting bug causing leak of creds + - LP: #479115 + * SAUCE: AppArmor: Fix oops there is no tracer and doing unsafe + transition. + - LP: #480112 + + [ Ubuntu Changes ] + + * resync with Karmic proposed (ddbc670a86a3dee18541a3734149f250ff307adf) + + [ Upstream Kernel Changes ] + + * rebase to v2.6.32-rc7 + + -- Andy Whitcroft Fri, 13 Nov 2009 11:35:13 +0000 + +linux (2.6.32-3.4) lucid; urgency=low + + [ Andy Whitcroft ] + + * [Config] SERIO_LIBPS2 and SERIO_I8042 must match + * [Upstream] add local prefix to oss local change_bits + + [ Upstream Kernel Changes ] + + * mtd/maps: gpio-addr-flash: pull in linux/ headers rather than asm/ + * mtd/maps: gpio-addr-flash: depend on GPIO arch support + + -- Andy Whitcroft Wed, 11 Nov 2009 14:47:04 +0000 + +linux (2.6.32-3.3) lucid; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.32-rc6 + * [Config] update configs following rebase to v2.6.32-rc6 + * [Config] update ports configs following rebase to v2.6.32-rc6 + * resync with Karmic Ubuntu-2.6.31-15.49 + * [Config] add module ignores for broken drivers + + [ John Johansen ] + + * SAUCE: AppArmor: AppArmor wrongly reports allow perms as denied + - LP: #453335 + * SAUCE: AppArmor: Policy load and replacement can fail to alloc mem + - LP: #458299 + * SAUCE: AppArmor: AppArmor fails to audit change_hat correctly + - LP: #462824 + * SAUCE: AppArmor: AppArmor disallows truncate of deleted files. + - LP: #451375 + + [ Kees Cook ] + + * SAUCE: Fix nx_enable reporting + - LP: #454285 + + [ Scott James Remnant ] + + * Revert "SAUCE: trace: add trace_event for the open() syscall" + * SAUCE: trace: add trace events for open(), exec() and uselib() + - LP: #462111 + + [ Stefan Bader ] + + * SAUCE: Fix sub-flavour script to not stop on missing directories + - LP: #453073 + + [ Ubuntu Changes ] + + * resync with Karmic Ubuntu-2.6.31-15.49 + + [ Upstream Kernel Changes ] + + * rebase to v2.6.32-rc6 + - LP: #464552 + + -- Andy Whitcroft Tue, 10 Nov 2009 15:00:57 +0000 + +linux (2.6.32-2.2) lucid; urgency=low + + [ Andy Whitcroft ] + + * install the full changelog with the binary package + * changelog -- explicitly note rebases and clean history + * reinstate armel.mk with no flavours + - LP: #449637 + * [Upstream] block: silently error unsupported empty barriers too + - LP: #420423 + * [Config] udate configs following karmic resync + * [Config] update ports configs following karmic resync + * [Upstream] lirc -- follow removal of .id element + + [ Colin Watson ] + + * Use section 'admin' rather than 'base' + * Add more e100 firmware to nic-modules + - LP: #451872 + * Add qla1280 firmware to scsi-modules + - LP: #381037 + + [ John Johansen ] + + * SAUCE: AppArmor: Set error code after structure initialization. + - LP: #427948 + * SAUCE: AppArmor: Fix off by 2 error in getprocattr mem allocation + - LP: #446595 + * SAUCE: AppArmor: Fix mediation of "deleted" paths + + [ Kees Cook ] + + * SAUCE: [x86] fix report of cs-limit nx-emulation + - LP: #454285 + + [ Leann Ogasawara ] + + * SAUCE: (drop after 2.6.31) input: Add support for filtering input + events + - LP: #430809 + * SAUCE: (drop after 2.6.31) dell-laptop: Trigger rfkill updates on wifi + toggle switch press + - LP: #430809 + + [ Luke Yelavich ] + + * SAUCE: Add sr_mod to the scsi-modules udeb for powerpc + * [Config] Add sd_mod to scsi-modules udeb for powerpc + + [ Mario Limonciello ] + + * SAUCE: Update to LIRC 0.8.6 + - LP: #432678 + * SAUCE: dell-laptop: Store the HW switch status internally rather than + requerying every time + - LP: #430809 + * SAUCE: dell-laptop: Blacklist machines not supporting dell-laptop + - LP: #430809 + + [ Stefan Bader ] + + * [Upstream] acerhdf: Limit modalias matching to supported boards + - LP: #435958 + + [ Tim Gardner ] + + * [Upstream] i915: Fix i2c init message + - LP: #409361 + * [Config] Add sym53c8xx.ko to virtual sub-flavour + - LP: #439415 + * [Config] Add d101m_ucode.bin to d-i/firmware/nic-modules + - LP: #439456 + * [Config] Set default I/O scheduler back to CFQ for desktop flavours + - LP: #381300 + * SAUCE: Created MODULE_EXPORT/MODULE_IMPORT macros + - LP: #430694 + * SAUCE: Use MODULE_IMPORT macro to tie intel_agp to i915 + - LP: #430694 + * [Config] CONFIG_GFS2_FS_LOCKING_DLM=y + - LP: #416325 + * SAUCE: Fix MODULE_IMPORT/MODULE_EXPORT + - LP: #430694 + * SAUCE: Raise the default console 'quiet' level to 2 + * [Config] CONFIG_X86_PAT=y + * [Config] Add armel arch to linux-libc-dev arches. + - LP: #449637 + * [Config] CONFIG_X86_MCE + * [Upstream] (drop after 2.6.31) Input: synaptics - add another Protege + M300 to rate blacklist + - LP: #433801 + + [ Upstream Kernel Changes ] + + * sgi-gru: Fix kernel stack buffer overrun, CVE-2009-2584 + * drm/i915: Fix FDI M/N setting according with correct color depth + - LP: #416792 + + -- Andy Whitcroft Thu, 22 Oct 2009 16:53:33 +0100 + +linux (2.6.32-1.1) lucid; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.32-rc3 + * [Config] update configs following rebase to 2.6.32-rc3 + * [Config] update ports configs following rebase to 2.6.32-rc3 + * AppArmor -- fix pstrace_may_access rename + * staging/android -- disable + * ubuntu: dm-raid-45 -- update to compile with 2.6.32 + * ubuntu: drbd -- disable + * staging/comdi -- disable + * staging/go7007 -- disable + * [Config] staging/winbond -- disable + * [Config] ubuntu/iscsitarget -- disable + * [d-i] cbc and ecb are builtin make them optional in udebs + * rebase to v2.6.32-rc5 + * [Config] update configs following rebase to v2.6.32-rc5 + * [Config] update ports configs following rebase to v2.6.31-rc5 + + [ Tim Gardner ] + + * [Config] Add cpio as a build dependency. + + [ Upstream Kernel Changes ] + + * rebase to v2.6.32-rc3 + * rebase to v2.6.32-rc5 + + -- Andy Whitcroft Mon, 05 Oct 2009 15:48:58 +0100 + +linux (2.6.31-11.37) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] Increase kernel log buffer to 256K for amd64 flavours + - LP: #424810 + * [Config] Set HZ=100 for amd64 flavours + - LP: #438234 + * [Upstream] e1000e: Emit notice instead of an error when + pci_enable_pcie_error_reporting() fails + - LP: #436370 + + [ Upstream Kernel Changes ] + + * n_tty: honor opost flag for echoes + * n_tty: move echoctl check and clean up logic + - LP: #438310 + + * Revert "[Upstream] drm/i915: Check that the relocation points to within + the target" - Use upstream cherry-pick. + * drm/i915: Check that the relocation points to within the target + - LP: #429241 + + * drm/i915: fix tiling on IGDNG + * drm/i915: add B43 chipset support + * agp/intel: Add B43 chipset support + Intel request from kernel team mailing list. + + * HID: completely remove apple mightymouse from blacklist + - LP: #428111 + + -- Tim Gardner Mon, 28 Sep 2009 11:47:29 -0600 + +linux (2.6.31-11.36) karmic; urgency=low + + [ Brian Rogers ] + + * SAUCE: (drop after 2.6.31) em28xx: ir-kbd-i2c init data needs a + persistent object + * SAUCE: (drop after 2.6.31) saa7134: ir-kbd-i2c init data needs a + persistent object + + [ Takashi Iwai ] + + * [Upstream] ALSA: hda - Add another entry for Nvidia HDMI device + - LP: #416482 + + [ Tyler Hicks ] + + * SAUCE: (drop after 2.6.31) eCryptfs: Prevent lower dentry from going + negative during unlink + + [ Upstream Kernel Changes ] + + * sg: fix oops in the error path in sg_build_indirect() + * mpt2sas : Rescan topology from Interrupt context instead of work thread + * mpt2sas: Prevent sending command to FW while Host Reset + * mpt2sas: setting SDEV into RUNNING state from Interrupt context + * mpt2sas: Raid 10 Volume is showing as Raid 1E in dmesg + * SCSI: fix oops during scsi scanning + * SCSI: libsrp: fix memory leak in srp_ring_free() + * cfg80211: fix looping soft lockup in find_ie() + * ath5k: write PCU registers on initial reset + * binfmt_elf: fix PT_INTERP bss handling + * TPM: Fixup boot probe timeout for tpm_tis driver + * md: Fix "strchr" [drivers/md/dm-log-userspace.ko] undefined! + * x86/amd-iommu: fix broken check in amd_iommu_flush_all_devices + * fix undefined reference to user_shm_unlock + * perf_counter: Fix buffer overflow in perf_copy_attr() + * perf_counter: Start counting time enabled when group leader gets + enabled + * powerpc/perf_counters: Reduce stack usage of power_check_constraints + * powerpc: Fix bug where perf_counters breaks oprofile + * powerpc/ps3: Workaround for flash memory I/O error + * block: don't assume device has a request list backing in nr_requests + store + * agp/intel: remove restore in resume + * ALSA: cs46xx - Fix minimum period size + * ASoC: Fix WM835x Out4 capture enumeration + * sound: oxygen: work around MCE when changing volume + * mlx4_core: Allocate and map sufficient ICM memory for EQ context + * perf stat: Change noise calculation to use stddev + * x86: Fix x86_model test in es7000_apic_is_cluster() + * x86/i386: Make sure stack-protector segment base is cache aligned + * PCI: apply nv_msi_ht_cap_quirk on resume too + * x86, pat: Fix cacheflush address in change_page_attr_set_clr() + * ARM: 5691/1: fix cache aliasing issues between kmap() and kmap_atomic() + with highmem + * KVM guest: do not batch pte updates from interrupt context + * KVM: Fix coalesced interrupt reporting in IOAPIC + * KVM: VMX: Check cpl before emulating debug register access + * KVM guest: fix bogus wallclock physical address calculation + * KVM: x86: Disallow hypercalls for guest callers in rings > 0 + * KVM: VMX: Fix cr8 exiting control clobbering by EPT + * KVM: x86 emulator: Implement zero-extended immediate decoding + * KVM: MMU: make __kvm_mmu_free_some_pages handle empty list + * KVM: x86 emulator: fix jmp far decoding (opcode 0xea) + * KVM: limit lapic periodic timer frequency + * libata: fix off-by-one error in ata_tf_read_block() + * PCI quirk: update 82576 device ids in SR-IOV quirks list + * PCI: Unhide the SMBus on the Compaq Evo D510 USDT + * powerpc/pseries: Fix to handle slb resize across migration + * Linux 2.6.31.1 + + -- Tim Gardner Thu, 24 Sep 2009 13:04:28 -0600 + +linux (2.6.31-10.35) karmic; urgency=low + + [ Amit Kucheria ] + + * Disable CONFIG_UEVENT_HELPER_PATH + + [ Andy Whitcroft ] + + * [Config] Enable CONFIG_USB_GADGET_DUMMY_HCD + * remove the tlsup driver + * remove lmpcm logitech driver support + + [ Bryan Wu ] + + * Add 3 missing files to prerm remove file list + - LP: #345623, #415832 + + [ Chris Wilson ] + + * [Upstream] drm/i915: Check that the relocation points to within the + target + - LP: #429241 + + [ Luke Yelavich ] + + * [Config] Set CONFIG_EXT4_FS=y on ports architectures + + [ Manoj Iyer ] + + * SAUCE: Added quirk to recognize GE0301 3G modem as an interface. + - LP: #348861 + + [ Tim Gardner ] + + * Revert "[Upstream] ACPI: Add Thinkpad W500, W700, & W700ds to OSI(Linux) white-list" + * Revert "[Upstream] ACPI: Add Thinkpad R400 & Thinkpad R500 to OSI(Linux) white-list" + * Revert "[Upstream] ACPI: Add Thinkpad X300 & Thinkpad X301 to OSI(Linux) white-list" + * Revert "[Upstream] ACPI: Add Thinkpad X200, X200s, X200t to OSI(Linux) white-list" + * Revert "[Upstream] ACPI: Add Thinkpad T400 & Thinkpad T500 to OSI(Linux) white-list" + Upstream suggests that this is not the right approach. + + * [Config] Set default I/O scheduler to DEADLINE + CFQ seems to have some load related problems which are often exacerbated by sreadahead. + - LP: #381300 + + [ ubuntu@tjworld.net ] + + * SAUCE: ipw2200: Enable LED by default + - LP: #21367 + + [ Upstream Kernel Changes ] + + * ALSA: hda - Add support for new AMD HD audio devices + - LP: #430564 + + -- Andy Whitcroft Wed, 16 Sep 2009 15:37:49 +0100 + +linux (2.6.31-10.34) karmic; urgency=low + + [ Ted Tso ] + + * [Upstream] ext3: Don't update superblock write time when filesystem is + read-only + - LP: #427822 + + -- Tim Gardner Tue, 15 Sep 2009 16:00:45 -0600 + +linux (2.6.31-10.33) karmic; urgency=low + + [ Leann Ogasawara ] + + * [Upstream] dvb-usb: fix tuning with Cinergy T2 + - LP: #421258 + + [ Tim Gardner ] + + * [Config] Unconditionally copy files from sub-flavours lists. + (really, really fix it this time) + - LP: #423426 + * [Config] Set CONFIG_CACHEFILES=m for all flavours + + [ Upstream Kernel Changes ] + + * ext4: Don't update superblock write time when filesystem is read-only + - LP: #427822 + + -- Tim Gardner Tue, 15 Sep 2009 07:50:21 -0600 + +linux (2.6.31-10.32) karmic; urgency=low + + [ Eric Miao ] + + * [Config] enable module support for memory stick + - LP: #159951 + + [ Tim Gardner ] + + * [Config] Unconditionally copy files from sub-flavours lists. + - LP: #423426 + + -- Tim Gardner Thu, 10 Sep 2009 15:57:55 -0600 + +linux (2.6.31-10.31) karmic; urgency=low + + [ Andy Whitcroft ] + + * rebase to v2.6.31 final + + [ Colin Watson ] + + * [Config] Recommend grub-pc in linux-image + - LP: #385741 + + [ Ike Panhc ] + + * [Upstream] Pull latest update of lenovo-sl-laptop + + [ Peter Feuerer ] + + * [Upstream] (drop after 2.6.31) acerhdf: fix fan control for AOA150 + model + - LP: #426691 + + [ Tim Gardner ] + + * [Config] De-macro some package names. + + [ Upstream Changes ] + + * rebase to 2.6.31 final. + + -- Andy Whitcroft Thu, 10 Sep 2009 09:38:10 +0100 + +linux (2.6.31-10.30) karmic; urgency=low + + [ Amit Kucheria ] + + * [Config] Enable CONFIG_USB_DEVICEFS + - LP: #417748 + * [Config] Populate the config-update template a bit more + + [ Andy Whitcroft ] + + * rebase to v2.6.31-rc9 + * [Config] update configs following rebase to v2.6.31-rc9 + * [Config] update ports configs following rebase to v2.6.31-rc9 + + [ Colin Ian King ] + + * SAUCE: wireless: hostap, fix oops due to early probing interrupt + - LP: #254837 + + [ Jerone Young ] + + * [Upstream] ACPI: Add Thinkpad T400 & Thinkpad T500 to OSI(Linux) + white-list + - LP: #281732 + * [Upstream] ACPI: Add Thinkpad X200, X200s, X200t to OSI(Linux) + white-list + - LP: #281732 + * [Upstream] ACPI: Add Thinkpad X300 & Thinkpad X301 to OSI(Linux) + white-list + - LP: #281732 + * [Upstream] ACPI: Add Thinkpad R400 & Thinkpad R500 to OSI(Linux) + white-list + - LP: #281732 + * [Upstream] ACPI: Add Thinkpad W500, W700, & W700ds to OSI(Linux) + white-list + - LP: #281732 + + [ John Johansen ] + + * SAUCE: AppArmor: Fix profile attachment for regexp based profile names + - LP: #419308 + * SAUCE: AppArmor: Return the correct error codes on profile + addition/removal + - LP: #408473 + * SAUCE: AppArmor: Fix OOPS in profile listing, and display full list + - LP: #408454 + * SAUCE: AppArmor: Fix mapping of pux to new internal permission format + - LP: #419222 + * SAUCE: AppArmor: Fix change_profile failure + - LP: #401931 + * SAUCE: AppArmor: Tell git to ignore generated include files + - LP: #419505 + + [ Stefan Bader ] + + * [Upstream] acpi: video: Loosen strictness of video bus detection code + - LP: #333386 + * SAUCE: Remove ov511 driver from ubuntu subdirectory + + [ Tim Gardner ] + + * [Config] Exclude char-modules from non-x86 udeb creation + * SAUCE: Notify the ACPI call chain of AC events + * [Config] CONFIG_SATA_VIA=m + - LP: #403385 + * [Config] Build in all phylib support modules. + * [Config] Don't fail when sub-flavour files are missing + - LP: #423426 + * [Config] Set CONFIG_LSM_MMAP_MIN_ADDR=0 + - LP: #423513 + + [ Upstream ] + + * Rebased against v2.6.31-rc9 + + -- Andy Whitcroft Mon, 07 Sep 2009 11:33:45 +0100 + +linux (2.6.31-9.29) karmic; urgency=low + + [ Leann Ogasawara ] + + * [Upstream] agp/intel: support for new chip variant of IGDNG mobile + - LP: #419993 + * [Config] d-i/modules: Add new char-modules file, initialize with + intel-agp + - LP: #420605 + + [ Upstream ] + + * Rebased against 2.6.31-rc8 plus some inotify regression patches: + up through git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + adda766193ea1cf3137484a9521972d080d0b7af. + + -- Tim Gardner Fri, 28 Aug 2009 06:31:30 -0600 + +linux (2.6.31-8.28) karmic; urgency=low + + [ Ike Panhc ] + + * [Config] Let nic-shared-modules depends on crypto-modules + - LP: #360966 + + [ Leann Ogasawara ] + + * [Upstream] (drop after 2.6.31) drm/i915: increase default latency + constant + - LP: #412492 + + [ Mario Limonciello ] + + * [Upstream]: (drop after 2.6.31) dell-laptop: don't change softblock + status if HW switch is disabled + - LP: #418721 + * [Upstream]: (drop after 2.6.31) compal-laptop: Add support for known + Compal made Dell laptops + * [Upstream]: (drop after 2.6.31) compal-laptop: Replace sysfs support + with rfkill support + + [ Tim Gardner ] + + * [Config] Add acpiphp to virtual sub-flavour + - LP: #364916 + * Drop KSM patch set for now because of instabilities with encrypted swap. + - LP: #418781 + + -- Tim Gardner Wed, 26 Aug 2009 08:14:26 -0600 + +linux (2.6.31-7.27) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] updateconfigs updateportsconfigs after 2.6.31-rc7 rebase + * SAUCE: (drop after 2.6.31) Added KSM from mmotm-2009-08-20-19-18 + Replaces previous ksm patches from 2.6.31-6.25 + * [Config] KSM=y + + [ Upstream ] + + * Rebased against v2.6.31-rc7 + + -- Tim Gardner Sat, 22 Aug 2009 20:32:11 -0600 + +linux (2.6.31-6.26) karmic; urgency=low + + [ Andy Whitcroft ] + + * [Config] enable CONFIG_AUFS_BR_RAMFS + - LP: #414738 + * split out debian directory ready for abstraction + * add printdebian target to find branch target + * abstracted debian -- debian/files is not abstracted + * abstracted debian -- packages must be built in debian/ + * abstracted debian -- kernel-wedge needs to work in debian/ + * abstracted debian -- ensure we install the copyright file + * abstracted-debian -- drop the debian directories from headers + * abstracted-debian -- drop the debian directories from headers part 2 + * SAUCE: ubuntu-insert-changes -- follow abstracted debian + * [Upstream] aoe: ensure we initialise the request_queue correctly V2 + - LP: #410198 + + [ Luke Yelavich ] + + * [Config] Ports: Disable CONFIG_CPU_FREQ_DEBUG on powerpc-smp + * [Config] Ports: Re-enable windfarm modules on powerpc64-smp + - LP: #413150 + * [Config] Ports: Build all cpu frequency scaling governors into ports + kernels + * [Config] Ports: Build ext2 and ext3 modules into ports kernels + * [Config] Ports: CONFIG_PACKET=y for all ports kernels + * [Config] Ports: Enable PS3 network driver + + [ Stefan Bader ] + + * abstracted debian -- call $(DEBIAN)/rules using make + + [ Tim Gardner ] + + * [Config] Abstract the debian directory + * SAUCE: Improve error reporting in postinst + - LP: #358564 + + -- Tim Gardner Sun, 16 Aug 2009 20:33:28 -0600 + +linux (2.6.31-6.25) karmic; urgency=low + + [ Andy Whitcroft ] + + * script to generate Ubuntu changes from changelog + * [Config] standardise ANDROID options + * [Config] standardise CONFIG_ATM as module + * [Config] standardise CONFIG_LIB80211 as module + * [Config] disable CONFIG_PRINT_QUOTA_WARNING + * [Config] set CONFIG_CRAMFS as module + * [Config] enable CONFIG_DAB and modules + * [Config] set CONFIG_MAC80211_HWSIM as module + * [Config] set CONFIG_NET_CLS_FLOW as module + * [Config] set CONFIG_NF_CONNTRACK_SANE as module + * [Config] set CONFIG_NF_CT_PROTO_DCCP as module + * [Config] set CONFIG_RTC_DRV_DS1511 as module + * [Config] set CONFIG_RTC_DRV_R9701 as module + * [Config] set CONFIG_RTC_DRV_S35390A as module + * [Config] set CONFIG_TOIM3232_DONGLE as module + * [Config] standardise CONFIG_USB_MIDI_GADGET as module + * [Config] standardise CONFIG_USB_G_PRINTER as module + * [Config] standardise CONFIG_USB_SERIAL_IR as module + * [Config] set CONFIG_USB_SERIAL_IUU as module + * [Config] standardise CONFIG_USB_STORAGE_CYPRESS_ATACB as module + * [Config] standardise CONFIG_USB_STORAGE_ONETOUCH as module + * cleanup remains of dm-loop + * drop thinkpad ec and smapi support + * drop appleir + * [Config] update configs following rebase to v2.6.31-rc6 + * rebase to v2.6.31-rc6 + + [ Hugh Dickins ] + + * SAUCE: ksm patch 1, drop after 2.6.31 + * SAUCE: ksm patch 2, drop after 2.6.31 + * SAUCE: ksm patch 3, drop after 2.6.31 + * SAUCE: ksm patch 4, drop after 2.6.31 + * SAUCE: ksm patch 5, drop after 2.6.31 + * SAUCE: ksm patch 7, drop after 2.6.31 + + [ Izik Eidus ] + + * SAUCE: ksm patch 0, drop after 2.6.31 + * SAUCE: ksm patch 6, drop after 2.6.31 + * SAUCE: ksm patch 8, drop after 2.6.31 + * SAUCE: ksm patch 9, drop after 2.6.31 + + [ Luke Yelavich ] + + * [Config] Ports: Re-add PS3 modules to udebs + + [ Michael Casadevall ] + + * [Config] Update SPARC config and d-i files to reflect what can be built + + [ Tim Gardner ] + + * [Config] Removed armel package support + * [Config] Enabled CONFIG_KSM=y + + [ Upstream Kernel Changes ] + + * Rebased against v2.6.31-rc6 + * ARM: Cleanup: Revert "ARM: Add more cache memory types macros" + * ARM: Cleanup: Revert "Do not use OOB with MLC NAND" + * ARM: Cleanup: Revert "ARM: Make ARM arch aware of ubuntu/ drivers" + * ARM: Cleanup: Revert "ARM: IMX51: Make video capture drivers compile" + * ARM: Cleanup: Revert "ARM: IMX51: Fix isl29003 HWMON driver for i2c + changes" + * ARM: Cleanup: Revert "ARM: IMX51: IPU irq handler deadlock fix" + * ARM: Cleanup: Revert "ARM: IMX51: Babbage 2.5 needs a different system + revision" + * ARM: Cleanup: Revert "ARM: IMX51: Compile-in the IMX51 cpufreq driver + by default" + * ARM: Cleanup: Revert "ARM: IMX51: Enable ZONE_DMA for ARCH_MXC" + * ARM: Cleanup: Revert "ARM: IMX51: Make ARCH_MXC auto-enable + ARCH_MXC_CANONICAL" + * ARM: Cleanup: Revert "ARM: IMX51: Unconditionally disable + CONFIG_GPIOLIB" + * ARM: Cleanup: Revert "ARM: IMX51: Minimal changes for USB to work on + 2.6.31" + * ARM: Cleanup: Revert "ARM: IMX51: Fix plat-mxc/timer.c to handle imx51" + * ARM: Cleanup: Revert "ARM: IMX51: Make it compile." + * ARM: Cleanup: Revert "ARM: IMX51: Clean-up the craziness of including + mxc_uart.h _everywhere_" + * ARM: Cleanup: Revert "ARM: IMX51: Move board-mx51* header files to the + correct location" + * ARM: Cleanup: Revert "ARM: IMX51: Changed from snd_card_new to + snd_card_create" + * ARM: Cleanup: Revert "ARM: IMX51: Fix up merge error in Kconfig" + * ARM: Cleanup: Revert "ARM: IMX51: mxc_timer_init prototype" + * ARM: Cleanup: Revert "ARM: IMX51: Removed the mxc_gpio_port structure." + * ARM: Cleanup: Revert "ARM: IMX51: Added external declaration for + mxc_map_io." + * ARM: Cleanup: Revert "ARM: IMX51: Get to bus_id by calling dev_name." + * ARM: Cleanup: Revert "ARM: IMX51: Get to bus_id by calling dev_name." + * ARM: Cleanup: Revert "ARM: IMX51: snd_soc_machine structure replaced + with snd_soc_card." + * ARM: Cleanup: Revert "ARM: IMX51: codec structure was moved to the card + structure" + * ARM: Cleanup: Revert "ARM: IMX51: Hack to add defines for + DMA_MODE_READ/WRITE/MASK" + * ARM: Cleanup: Revert "ARM: IMX51: Add SoC and board support for + Freescale mx51 platform" + * Driver core: add new device to bus's list before probing + * [Upstream] (drop after 2.6.31) ALSA: hda - Reduce click noise at + power-saving + - LP: #381693, #399750, #380892 + + -- Andy Whitcroft Fri, 14 Aug 2009 11:32:23 +0100 + +linux (2.6.31-5.24) karmic; urgency=low + + [ Amit Kucheria ] + + * ARM: IMX51: Make video capture drivers compile + * [Config] IMX51: Config updates + + [ Andy Whitcroft ] + + * remove leftovers of dm-bbr + + [ Leann Ogasawara ] + + * Add pata_cs5535 to pata-modules + - LP: #318805 + + [ Luke Yelavich ] + + * [Config] CONFIG_PPC64=y for powerpc64-smp + * [Config] Set the maximum number of CPUs to 1024 for powerpc64-smp + * [Config] CONFIG_PPC_PS3=y for powerpc64-smp + * [Config] CONFIG_PPC_MAPLE=y on powerpc64-smp + * [Config] CONFIG_PPC_PASEMI=y on powerpc64-smp + * [Config] CONFIG_CPU_FREQ_PMAC64=y on powerpc64-smp + * [Config] Enable all PS3 drivers in powerpc64-smp + + [ Mario Limonciello ] + + * LIRC -- fix lirc-i2c 2.6.31 compilation + + [ Matthew Garrett ] + + * [Upstream] dell-laptop: Fix rfkill state queries + + [ Tim Gardner ] + + * [Config] Ignore armel ABI and module changes + * [Config] Update configs after rebase against 2.6.31-rc5 + + [ Upstream ] + + * Rebased to 2.6.31-rc5 + + -- Andy Whitcroft Tue, 28 Jul 2009 10:10:09 +0100 + +linux (2.6.31-4.23) karmic; urgency=low + + [ Andy Whitcroft ] + + * AUFS -- update to aufs2-30 20090727 + * [Config] enable AUFS FUSE support + + [ Luke Yelavich ] + + * [Config] CONFIG_JFS_FS=m on sparc + + [ Tim Gardner ] + + * [Upstream] dell-laptop: Fix rfkill state setting. + + -- Andy Whitcroft Mon, 27 Jul 2009 11:11:47 +0100 + +linux (2.6.31-4.22) karmic; urgency=low + + [ Amit Kucheria ] + + * ARM: IMX51: Add SoC and board support for Freescale mx51 platform + * ARM: IMX51: Move board-mx51* header files to the correct location + * ARM: IMX51: Clean-up the craziness of including mxc_uart.h _everywhere_ + * ARM: IMX51: Make it compile. + * ARM: IMX51: Unconditionally disable CONFIG_GPIOLIB + * ARM: IMX51: Make ARCH_MXC auto-enable ARCH_MXC_CANONICAL + * ARM: IMX51: Enable ZONE_DMA for ARCH_MXC + * ARM: IMX51: Compile-in the IMX51 cpufreq driver by default + * ARM: IMX51: Fix isl29003 HWMON driver for i2c changes + * ARM: USB: musb: Refer to musb_otg_timer_func under correct #ifdef + * ARM: staging: udlfb: Add vmalloc.h include + * UBUNTU [Config]: Bring imx51 config upto date with other flavours + + [ Brad Figg ] + + * ARM: IMX51: Hack to add defines for DMA_MODE_READ/WRITE/MASK + * ARM: IMX51: codec structure was moved to the card structure + * ARM: IMX51: snd_soc_machine structure replaced with snd_soc_card. + * ARM: IMX51: Get to bus_id by calling dev_name. + * ARM: IMX51: Get to bus_id by calling dev_name. + * ARM: IMX51: Added external declaration for mxc_map_io. + * ARM: IMX51: Removed the mxc_gpio_port structure. + * ARM: IMX51: mxc_timer_init prototype + * ARM: IMX51: Fix up merge error in Kconfig + * ARM: IMX51: Changed from snd_card_new to snd_card_create + + [ Dinh Nguyen ] + + * ARM: IMX51: Fix plat-mxc/timer.c to handle imx51 + * ARM: IMX51: Minimal changes for USB to work on 2.6.31 + * ARM: IMX51: Babbage 2.5 needs a different system revision + * ARM: IMX51: IPU irq handler deadlock fix + + [ Tim Gardner ] + + * [Config] Enabled CONFIG_CAN=m + - LP: #327243 + * [Config] Enabled CONFIG_SERIAL=m + - LP: #397189 + + -- Tim Gardner Fri, 24 Jul 2009 06:19:10 -0600 + +linux (2.6.31-4.21) karmic; urgency=low + + [ Amit Kucheria ] + + * dm-raid-4-5: Add missing brackets around test_bit() + + [ John Johansen ] + + * AppArmor: Fix change_profile failing lpn401931 + * AppArmor: Fix determination of forced AUDIT messages. + * AppArmor: Fix oops in auditing of the policy interface offset + + -- Andy Whitcroft Thu, 23 Jul 2009 19:18:30 +0100 + +linux (2.6.31-4.20) karmic; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: iscsitarget -- update to SVN revision r214 + * SAUCE: iscsitarget -- renable driver + * [Config] consolidate lpia/lpia and i386/generic configs + * [Config] enable CRYPTO modules for all architectures + * [Config] enable cryptoloop + * [Config] enable various filesystems for armel + * [Config] sync i386 generic and generic-pae + * [Config] add the 386 (486 processors and above) flavour + * [Config] re-set DEFAULT_MMAP_MIN_ADDR + - LP: #399914 + * add genconfigs/genportsconfigs to extract the built configs + * updateconfigs -- alter concatenation order allow easier updates + * intelfb -- INTELFB now conflicts with DRM_I915 + * printchanges -- rebase tree does not have stable tags use changelog + * AppArmor: fix argument size missmatch on 64 bit builds + + [ Ike Panhc ] + + * Ship bnx2x firmware in nic-modules udeb + - LP: #360966 + + [ Jeff Mahoney ] + + * AppArmor: fix build failure on ia64 + + [ John Johansen ] + + * AppArmour: ensure apparmor enabled parmater is off if AppArmor fails to + initialize. + * AppArmour: fix auditing of domain transitions to include target profile + information + * AppArmor: fix C99 violation + * AppArmor: revert reporting of create to write permission. + * SAUCE: Add config option to set a default LSM + * [Config] enable AppArmor by default + * AppArmor: Fix NULL pointer dereference oops in profile attachment. + + [ Keith Packard ] + + * SAUCE: drm/i915: Allow frame buffers up to 4096x4096 on 915/945 class + hardware + - LP: #351756 + + [ Luke Yelavich ] + + * [Config] add .o files found in arch/powerpc/lib to all powerpc kernel + header packages + - LP: #355344 + + [ Michael Casadevall ] + + * [Config] update SPARC config files to allow success build + + [ Scott James Remnant ] + + * SAUCE: trace: add trace_event for the open() syscall + + [ Stefan Bader ] + + * SAUCE: jfs: Fix early release of acl in jfs_get_acl + - LP: #396780 + + [ Tim Gardner ] + + * [Upstream] Fix Soltech TA12 volume hotkeys not sending key release + - LP: #397499 + * [Upstream] USB Option driver - Add USB ID for Novatel MC727/U727/USB727 + refresh + - LP: #365291 + * [Config] SSB/B44 are common across all arches/flavours. + + [ Upstream ] + + * Rebased to 2.6.31-rc4 + + -- Andy Whitcroft Thu, 23 Jul 2009 08:41:39 +0100 + +linux (2.6.31-3.19) karmic; urgency=low + + [ Andy Whitcroft ] + + * Revert "[Config] Disabled NDISWRAPPER" + * ndiswrapper -- fix i386 compilation failures on cmpxchg8b + * AUFS -- export various core functions + * AUFS -- export various core functions -- fixes + * AUFS -- core filesystem + * AUFS -- track changes in v2.6.31 + * [Config] Enable AUFS + * droppped 'iwl3945: do not send scan command if channel count zero' as it + is already upstream but failed to auto-drop on rebase. + + [ Eric Paris ] + + * SAUCE: fsnotify: use def_bool in kconfig instead of letting the user + choose + * SAUCE: inotify: check filename before dropping repeat events + * SAUCE: fsnotify: fix inotify tail drop check with path entries + + -- Andy Whitcroft Tue, 14 Jul 2009 12:52:55 +0100 + +linux (2.6.31-3.18) karmic; urgency=low + + [ Andy Whitcroft ] + + * Revert "Add splice-2.6.23.patch from AUFS to export a symbol needed by + AUFS" + * Revert "Add put_filp.patch from AUFS to export a symbol needed by AUFS" + * Revert "Add sec_perm-2.6.24.patch from AUFS - export + security_inode_permission" + * clear out left over AUFS files and modifications + + [ Luke Yelavich ] + + * [Config] Enable CONFIG_USB_ISP116X_HCD on sparc + * SAUCE: Explicitly include header files to allow apparmor to build on + powerpc + * [Config] Enable CONFIG_BLK_DEV_IDECD on powerpc + + [ Tim Gardner ] + + * [Config] Dropped ubuntu/misc/wireless/acx + * [Config] Disabled NDISWRAPPER until the compile issues are fixed. + + [ Upstream ] + + * Rebased to 2.6.31-rc3 + + -- Andy Whitcroft Fri, 10 Jul 2009 18:59:33 +0100 + +linux (2.6.31-2.17) karmic; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_BLK_DEV_CRYPTOLOOP=m for sparc + * compcache -- remove redundant Kconfig entries part 2 + * compcache -- clean up CCFLAGS declarations + * [Config] enable AppArmor + * AppArmor: fix operator precidence issue in as_path_link + + [ John Johansen ] + + * AppArmor security module + * AppArmor: Correct mapping of file permissions. + * AppArmor: Turn auditing of ptrace on + + [ Luke Yelavich ] + + * [Config] disable CONFIG_DM_RAID45 on powerpc + + -- Andy Whitcroft Fri, 10 Jul 2009 15:02:05 +0100 + +linux (2.6.31-2.16) karmic; urgency=low + + [ Andy Whitcroft ] + + * compcache -- remove redundant Kconfig entries + added ignore and ignore.modules for all arches since the compcache update + changes the modules names as well as some compcache ABI values. + + [ Manoj Iyer ] + + * SAUCE: updated dm-raid45 module version to 2009.04.24 (2.6.30-rc3) + * SAUCE: update compcache version to 0.5.3 + + [ Tim Gardner ] + + * [Config]: Fix sparc FTBS by adding ignore.modules + + -- Tim Gardner Mon, 06 Jul 2009 13:35:29 -0600 + +linux (2.6.31-2.15) karmic; urgency=low + + [ Andy Whitcroft ] + + * SAUCE: default ATI Radeon KMS to off until userspace catches up + * [Config] Update configs following rebase to 2.6.31-rc2 + * [Config] update ports configs following update to 2.6.31-rc2 + + [ Luke Yelavich ] + + * [Config] powerpc - Disable CONFIG_RDS + + [ Matt Zimmerman ] + + * Rename linux-doc-PKGVER to linux-doc and clean up its description + - LP: #382115 + + [ Upstream Kernel Changes ] + + * rebased to mainline 2.6.31-rc2 + + -- Andy Whitcroft Sat, 04 Jul 2009 17:39:13 +0100 + +linux (2.6.31-1.14) karmic; urgency=low + + [ Andy Whitcroft ] + + * update ndiswrapper to 1.55 + * remove leftovers of gfs + * [Config] powerpc: enable CONFIG_PPC_DISABLE_WERROR + + [ Luke Yelavich ] + + * [Config] re-enable and build the ide-pmac driver into powerpc kernels + * [Config] Build the ServerWorks Frodo / Apple K2 SATA driver into the + kernel + + [ Manoj Iyer ] + + * Remove snd-bt-sco ubuntu driver + + [ Michael Casadevall ] + + * [Config] updates ia64 config and d-i folders to allow succesful build + * [Config] Update powerpc and sparc for 2.6.31 + + [ Upstream Kernel Changes ] + + * intel-iommu: fix Identity Mapping to be arch independent + - LP: #384695 + * ACPI: video: prevent NULL deref in acpi_get_pci_dev() + + -- Andy Whitcroft Tue, 30 Jun 2009 17:47:32 +0100 + +linux (2.6.31-1.13) karmic; urgency=low + + [ Andy Whitcroft ] + + * REBASE: rebased to mainline 2.6.31-rc1 + - "UBUNTU: SAUCE: UHCI USB quirk for resume" + no longer applies, using deprecated interfaces, LPIA only, dropped + - "UBUNTU: SAUCE: Mask off garbage in Dell WMI scan code data" + changes now upstream, dropped + * [Config] Update configs following rebase to 2.6.31-rc1 + * [Config] update ports configs following update to 2.6.31-rc1 + + * [Config] disable broken staging driver CONFIG_STLC45XX + * SAUCE: fix compcache to use updates accessors + * [Config] disable staging driver CONFIG_VT6655 + * SAUCE: fix DRDB to use updates accessors + * [Disable] ndiswrapper needs update + * [Disable] LIRC I2C needs update + * [Disable] CONFIG_LENOVO_SL_LAPTOP needs update + * [Config] disable I2C_DESIGNWARE does not compile + * [Config] disable CONFIG_TLSUP for lpia + * [Config] disable CONFIG_FB_UDL for arm + * SAUCE: disable adding scsi headers to linux-libc-dev + + [ Mario Limonciello ] + + * SAUCE: Add LIRC drivers + + -- Andy Whitcroft Thu, 25 Jun 2009 12:06:22 +0100 + +linux (2.6.30-10.12) karmic; urgency=low + + [ Andy Whitcroft ] + + * [Config] split out the ports configs into their own family + * [Config] update configs following introduction of ports family + + [ Upstream Kernel Changes ] + + * Revert "Rename linux-doc-PKGVER to linux-doc and clean up its + description". Fixes linux-doc package name conflicts for now. + - LP: #382115 + + -- Tim Gardner Mon, 22 Jun 2009 09:17:14 -0600 + +linux (2.6.30-10.11) karmic; urgency=low + + [ Amit Kucheria ] + + * [Config] Comment splitconfig.pl and misc cleanup + * [Config] Rename all configs to the new naming scheme + * [Config] Splitconfig rework + * [Config] Rename scripts/misc/oldconfig to kernelconfig + * [Config] Fix build system for new config split + * [Config] Run updateconfigs after the splitconfig rework + + [ Andy Whitcroft ] + + * Revert "SAUCE: Default to i915.modeset=0 if CONFIG_DRM_I915_KMS=y" + * [Config] standardise CONFIG_STAGING=y + * [Config] standardise CONFIG_RD_LZMA=y + * [Config] CONFIG_PCI_IOV=y + * [Config] CONFIG_PCI_STUB=m + * [Config] merge kernel configs more agressively + + [ Colin Watson ] + + * [Config] Run kernel-wedge in $(builddir) rather than at the top level + * [Config] Add support for including firmware in udebs + * [Config] Ship bnx2 firmware in nic-modules udeb + - LP: #384861 + + [ Luke Yelavich ] + + * [Config] ports - Import of ports architectures into kernel packaging + infrastructure + * [Config] ports - Do not update ports kernel configurations by default + * [Config] ports - Disable ABI checking for ports architectures + * [Config] ports - Build drivers in ubuntu sub-directory on powerpc + * [Config] ports - Add control.d/vars.* files for ports architectures + * [Config] ports - Add ports architectures for linux-libc-dev + * [Config] ports - Create powerpc specific message-modules and + block-modules udebs + * [Config] ports - Add configuration files for ports architectures + + [ Manoj Iyer ] + + * [Config] Enable CONFIG_BLK_DEV_AEC62XX=m for amd64 and i386 + - LP: #329864 + + [ Michael Casadevall ] + + * [Config] ports - Fix compression of kernels + + [ Stefan Bader ] + + * [Upstream] mmc: prevent dangling block device from accessing stale + queues + - LP: #383668 + + [ Tim Gardner ] + + * [Config] Recommend grub-pc in linux-image + - LP: #385741 + * [Config] Implement i386 generic and generic-pae flavours + * [Config] ports - Add control info after integrating ports arches + * [Config] Removed auto-generated files from git + * [Config] Added netxen_nic to nic-modules + - LP: #389603 + + [ Matt Zimmerman ] + + * Rename linux-doc-PKGVER to linux-doc and clean up its description + - LP: #382115 + + -- Tim Gardner Mon, 15 Jun 2009 14:38:26 -0600 + +linux (2.6.30-9.10) karmic; urgency=low + + [ Andy Whitcroft ] + + * [Config] CONFIG_SECURITY_TOMOYO=y (amd64, i386, lpia) + * [Config] CONFIG_KEXEC_JUMP=y (amd64, lpia) + * [Config] CONFIG_LENOVO_SL_LAPTOP=m (amd64, lpia) + * [Config] CONFIG_POHMELFS_CRYPTO=y (i386, amd64) + * [Config] CONFIG_SERIAL_MAX3100=m (i386, amd64, lpia) + * [Config] CONFIG_VIDEO_GO7007=m (amd64, i386) + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30 final + + -- Andy Whitcroft Fri, 05 Jun 2009 11:42:53 +0100 + +linux (2.6.30-8.9) karmic; urgency=low + + [ Andy Whitcroft ] + + * Config update removed the following options: + CONFIG_EDAC_AMD8111=m + CONFIG_EDAC_AMD8131=m + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc8 + + -- Andy Whitcroft Wed, 03 Jun 2009 09:21:13 +0100 + +linux (2.6.30-7.8) karmic; urgency=low + + [ Andy Whitcroft ] + + * Enabled NEW configration options: + Paravirtualization layer for spinlocks (PARAVIRT_SPINLOCKS) [N/y/?] Y + Cisco FNIC Driver (FCOE_FNIC) [N/m/y/?] M + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc7 + + -- Andy Whitcroft Sat, 23 May 2009 23:47:24 +0100 + +linux (2.6.30-6.7) karmic; urgency=low + + [ Andy Whitcroft ] + + * Dropped: UBUNTU: SAUCE: input: Blacklist digitizers from joydev.c (now + upstream) + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc6 + + -- Andy Whitcroft Mon, 18 May 2009 18:05:54 +0100 + +linux (2.6.30-5.6) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] Enable Keyspan USB serial device firmware in kernel module + - LP: #334285 + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc5 + + -- Tim Gardner Mon, 11 May 2009 12:02:16 -0600 + +linux (2.6.30-4.5) karmic; urgency=low + + [ Colin Watson ] + + * Build-Conflict with findutils (= 4.4.1-1ubuntu1), to avoid + /usr/include/asm/* going missing + - LP: #373214 + + -- Stefan Bader Fri, 08 May 2009 11:09:08 +0200 + +linux (2.6.30-3.4) karmic; urgency=low + + [ Kees Cook ] + + * SAUCE: [x86] implement cs-limit nx-emulation for ia32 + - LP: #369978 + + [ Stefan Bader ] + + * SAUCE: input: Blacklist digitizers from joydev.c + - LP: #300143 + + -- Tim Gardner Fri, 01 May 2009 14:00:42 -0600 + +linux (2.6.30-2.3) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] Enabled CC_STACKPROTECTOR=y for all x86en + - LP: #369152 + * SAUCE: Default to i915_modeset=0 if CONFIG_DRM_I915_KMS=y + * [Config] CONFIG_DRM_I915_KMS=y + * [Config] Set CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR to appropriate ARCH + minimums + + [ Upstream Kernel Changes ] + + * rebased to 2.6.30-rc4 + + -- Tim Gardner Thu, 30 Apr 2009 09:17:05 -0600 + +linux (2.6.30-1.2) karmic; urgency=low + + [ Tim Gardner ] + + * [Config] armel: disable staging drivers, fixes FTBS + * [Config] armel imx51: Disable CONFIG_MTD_NAND_MXC, fixes FTBS + + [ Upstream Kernel Changes ] + + * mpt2sas: Change reset_type enum to avoid namespace collision. + Submitted upstream. + + -- Tim Gardner Tue, 28 Apr 2009 16:54:41 -0600 + +linux (2.6.30-1.1) karmic; urgency=low + + * Initial release after rebasing against v2.6.30-rc3 + + -- Tim Gardner Thu, 12 Mar 2009 19:16:07 -0600 --- linux-3.13.0.orig/debian/cloud-tools/hv_get_dhcp_info +++ linux-3.13.0/debian/cloud-tools/hv_get_dhcp_info @@ -0,0 +1,55 @@ +#!/bin/bash + +# This example script retrieves the DHCP state of a given interface. +# In the interest of keeping the KVP daemon code free of distro specific +# information; the kvp daemon code invokes this external script to gather +# DHCP setting for the specific interface. +# +# Input: Name of the interface +# +# Output: The script prints the string "Enabled" to stdout to indicate +# that DHCP is enabled on the interface. If DHCP is not enabled, +# the script prints the string "Disabled" to stdout. +# +# Each Distro is expected to implement this script in a distro specific +# fashion. + +#set -x + +IF_FILE="/etc/network/interfaces" +NMCMD="nmcli" + +function checknetworkmanager { + #Assumes if $NMCMD exists, inteface exists and interface is not + # in $IF_FILE then dhcp is being used by NM + if hash $NMCMD >/dev/null 2>&1 ; then + if $NMCMD dev status |grep -q $1 ; then + echo "Enabled" + else + echo "Disabled" + fi + else + #Give up + echo "Disabled" + fi +} + +if [ -z $1 ] ; then echo "Disabled"; exit; fi + +if [ -e $IF_FILE ]; then + if grep -v -e "^#" $IF_FILE|grep -q $1 ; then + #interface exists so + if grep -q -e $1\.\*dhcp $IF_FILE; then + echo "Enabled"; exit; + else + echo "Disabled"; exit; + fi + else + checknetworkmanager $1 + exit + fi +else + checknetworkmanager $1 + exit +fi + --- linux-3.13.0.orig/debian/cloud-tools/hv_get_dns_info +++ linux-3.13.0/debian/cloud-tools/hv_get_dns_info @@ -0,0 +1,13 @@ +#!/bin/bash + +# This example script parses /etc/resolv.conf to retrive DNS information. +# In the interest of keeping the KVP daemon code free of distro specific +# information; the kvp daemon code invokes this external script to gather +# DNS information. +# This script is expected to print the nameserver values to stdout. +# Each Distro is expected to implement this script in a distro specific +# fashion. For instance on Distros that ship with Network Manager enabled, +# this script can be based on the Network Manager APIs for retrieving DNS +# entries. + +cat /etc/resolv.conf 2>/dev/null | awk '/^nameserver/ { print $2 }' --- linux-3.13.0.orig/debian/cloud-tools/hv_set_ifconfig +++ linux-3.13.0/debian/cloud-tools/hv_set_ifconfig @@ -0,0 +1,242 @@ +#! /usr/bin/env python + +# set interfaces in hv_kvp_daemon style +import fileinput +import sys +import errno +import os +import shutil +import tempfile +import subprocess + +if_filename="/etc/network/interfaces" + +'''Get quiet''' +sys.stdout = open(os.devnull, 'w') +sys.stderr = open(os.devnull, 'w') + +try: + if_file=open(if_filename,"r+") +except IOError as e: + exit(e.errno) +else: + if_file.close() + + +def kvp_dict(file): + return dict(line.strip().split("=") for line in file) + + +#setting the hwaddress to something azure is not expecting is fatal networking + +if len(sys.argv) != 2 : + exit(errno.EINVAL) + +kvp=dict(line.strip().split("=") for line in fileinput.input()) + +if not "HWADDR" in kvp : + exit(errno.EPROTO) + +if not "DEVICE" in kvp : + exit(1) + +output=[] +basename=kvp["DEVICE"] + +if "DHCP" in kvp and kvp["DHCP"]=="yes" : + output += ["auto " + basename] + output += ["iface " + basename + " inet dhcp"] + output += [""] +else: + ''' Matchup the interface specific lines ''' + + '''DNS entries will go with the first interface + and there can be a max of three''' + autolist=[] + dns=[] + if "DNS1" in kvp : + dns+=[kvp["DNS1"]] + if "DNS2" in kvp : + dns+=[kvp["DNS2"]] + if "DNS3" in kvp : + dns+=[kvp["DNS3"]] + + + ''' + No real max for the number of interface + aliases ... + only required is the address (but mate everything up that comes in. ''' + + '''ipv4 first''' + + v4names=[name for name in kvp.keys() if name.startswith("IPADDR")] + v4names.sort() + + v6names=[name for name in kvp.keys() if name.startswith("IPV6ADDR")] + v6names.sort() + + '''IPV6 requires a netmask''' + '''If an ipv6 exists, you'll want to turn off /proc/sys/net/ipv6/conf/all/autoconf with + up echo 0 > /proc/sys/net/ipv6/conf/all/autoconf''' + + '''Counter needs to increment as soon as any interface is set.''' + + + if_count=0 + + for v4 in v4names: + ifname=basename + suffix="" + if if_count : + ifname+=":" + str(if_count) + suffix="_"+str(if_count) + if not ifname in autolist: + autolist += [ifname] + output += [ "iface " + ifname + " inet static"] + output += [ "\t" + "address " + kvp[v4]] + if "NETMASK"+suffix in kvp.keys(): + output += ["\tnetmask " + kvp["NETMASK"+suffix]] + if "GATEWAY"+suffix in kvp.keys(): + output += ["\tgateway " + kvp["GATEWAY"+suffix]] + if not if_count : + output += ["\tdns-nameservers " + ' '.join(dns)] + output += [""] + if_count+=1 + + if6_count=0 + if6_used=0 + for v6 in v6names: + ifname=basename + suffix="" + if if6_used : + ifname+=":" + str(if6_used) + if if6_count : + suffix="_" + str(if6_count) + if not ifname in autolist: + autolist += [ifname] + if "IPV6NETMASK"+suffix in kvp.keys(): + output += [ "iface " + ifname + " inet6 static"] + output += [ "\taddress " + kvp[v6]] + output += [ "\tnetmask " + kvp["IPV6NETMASK"+suffix]] + if "IPV6_DEFAULTGW"+suffix in kvp.keys(): + output += [ "\tgateway " + kvp["IPV6_DEFAULTGW"+suffix] ] + if not if_count : + output += ["\tdns-nameservers " + ' '.join(dns)] + output += [""] + if_count += 1 + if6_used += 1 + if6_count += 1 + + output = ["auto "+" ".join(autolist)] + output +print "===================================" +print output +print "===================================" + + +''' Time to clean out the existing interface file''' + +# Markers. +start_mark = "# The following stanza(s) added by hv_set_ifconfig" +end_mark = "#End of hv_set_ifconfig stanzas" + +f=open(if_filename,"r") +flines=f.readlines() +f.close() +newfile=[] +pitchstanza=0 +inastanza=0 +stanza=[] +prev_line=None +for line in flines: + if line.startswith("auto"): + if inastanza: + if not pitchstanza: + newfile.extend(stanza) + stanza=[] + inastanza=0 + newline="" + autoline=line.strip().split(" ") + for word in autoline: + if (not word == basename) and (not word.startswith(basename+":")): + newline+=word + " " + newline = newline.strip() + if not newline == "auto": + newfile += [newline.strip()] + elif line.startswith(("iface","mapping","source")): + '''Read a stanza''' + '''A Stanza can also start with allow- ie allow-hotplug''' + if inastanza: + if not pitchstanza: + newfile.extend(stanza) + stanza=[] + inastanza=1 + pitchstanza=0 + autoline=line.strip().split(" ") + for word in autoline: + if (word == basename) or (word.startswith(basename+":")): + pitchstanza=1 + if not pitchstanza: + stanza+=[line.strip()] + elif line.strip() in (start_mark, end_mark): + if inastanza: + if not pitchstanza: + newfile.extend(stanza) + stanza=[] + inastanza = 0 + pitchstanza = 0 + # Deduplicate markers. + if line != prev_line: + newfile += [line.strip()] + else: + if inastanza: + if not pitchstanza: + stanza+=[line.strip()] + else: + if not pitchstanza: + newfile += [line.strip()] + prev_line=line + + + +def emit(line): + print(line) + os.write(fd, line + "\n") + +# Insert the new output at the end and inside the existing markers if found. +emitted = False +fd, path = tempfile.mkstemp() +for line in newfile: + if line == end_mark: + emit("\n".join(output)) + emitted = True + emit(line) +if not emitted: + emit(start_mark) + emit("\n".join(output)) + emit(end_mark) +os.close(fd) + +shutil.copy(path,if_filename) +os.chmod(if_filename,0644) +#print "TMPFILE is at: " + path +#print "Copied file is at: " + if_filename + + +try: + retcode = subprocess.call("ifdown "+basename , shell=True) + if retcode < 0: + print >>sys.stderr, "Child was terminated by signal", -retcode + else: + print >>sys.stderr, "Child returned", retcode +except OSError as e: + print >>sys.stderr, "Execution failed:", e + +try: + retcode = subprocess.call("ifup "+basename , shell=True) + if retcode < 0: + print >>sys.stderr, "Child was terminated by signal", -retcode + else: + print >>sys.stderr, "Child returned", retcode +except OSError as e: + print >>sys.stderr, "Execution failed:", e + + --- linux-3.13.0.orig/debian/commit-templates/bumpabi +++ linux-3.13.0/debian/commit-templates/bumpabi @@ -0,0 +1,3 @@ +UBUNTU: Bump ABI + +Ignore: yes --- linux-3.13.0.orig/debian/commit-templates/config-updates +++ linux-3.13.0/debian/commit-templates/config-updates @@ -0,0 +1,15 @@ +# +# This template is used for commit messages that don't need to +# show up in debian/changelog. Administrative stuff like config +# updates, ABI bumps, etc. Setting 'Ignore: yes' prevents +# 'debian/rules insertchanges' from inserting this commit meesage +# as a changelog entry. +# +# Please give a one-line description of the config change followed +# by a detailed explanation if necessary + +UBUNTU: [Config] XXXX + +# BugLink: http://bugs.launchpad.net/bugs/ +# Ignore: yes +# Other text below here. --- linux-3.13.0.orig/debian/commit-templates/external-driver +++ linux-3.13.0/debian/commit-templates/external-driver @@ -0,0 +1,20 @@ +# Ubuntu external driver commit. +# +# NOTE: This gets reformatted for README.Ubuntu-External-Drivers and +# debian/changelog. +# +# This is only needed when a driver is added, updated or removed. It is +# not needed when patches or fixes are applied to the driver. If the +# driver is being removed, add the line: +# +# Removing: yes +# +# to the commit, and you can remove all other tags (except UBUNTU:). +# +UBUNTU: + +ExternalDriver: +Description: +Url: +Mask: +Version: --- linux-3.13.0.orig/debian/commit-templates/missing-modules +++ linux-3.13.0/debian/commit-templates/missing-modules @@ -0,0 +1,3 @@ +UBUNTU: build/modules: Add modules that have intentionally gone missing + +Ignore: yes --- linux-3.13.0.orig/debian/commit-templates/newrelease +++ linux-3.13.0/debian/commit-templates/newrelease @@ -0,0 +1,3 @@ +UBUNTU: Start new release + +Ignore: yes --- linux-3.13.0.orig/debian/commit-templates/sauce-patch +++ linux-3.13.0/debian/commit-templates/sauce-patch @@ -0,0 +1,40 @@ +# Ubuntu commit template. +# +# NOTE: This gets reformatted for debian/changelog +# +# +# SAUCE refers to the fact that this patch might not go upstream, but we need to +# carry it to successive releases. In most cases you DONOT want to use this +# template. +# +# An example of a SAUCE patch is the ACPI DSDT-in-initramfs patch which has been +# refused upstream, but still provides useful functionality to users with broken +# BIOSes. +# +#------------------------------------------------------------------------- +# +# The initial UBUNTU is a flag that this is an Ubuntu commit. It will be +# referenced to the Author in the debian/changelog entry. +# +# The text following is the short message that will be placed in the +# changelog. Extra text on the following lines will be ignored, but left +# in the git commit. Lines with # will be ignored in the commit. +# +# OriginalAuthor allows for alternate attribution. +# +# OriginalLocation allows for a URL or description of where the patch came +# from. +# +# BugLink is a URL to a Malone bug. +# +# Ignore: yes will keep this commit from showing up in the changelog. +# +UBUNTU: SAUCE: + + + +# OriginalAuthor: +# OriginalLocation: +# BugLink: http://bugs.launchpad.net/bugs/ +# Ignore: yes +# Other text below here. --- linux-3.13.0.orig/debian/commit-templates/upstream-patch +++ linux-3.13.0/debian/commit-templates/upstream-patch @@ -0,0 +1,27 @@ +# Ubuntu commit template. +# +# NOTE: This gets reformatted for debian/changelog +# +# The initial UBUNTU is a flag that this is an Ubuntu commit. It will be +# referenced to the Author in the debian/changelog entry. +# +# The text following is the short message that will be placed in the +# changelog. Extra text on the following lines will be ignored, but left +# in the git commit. Lines with # will be ignored in the commit. +# +# OriginalAuthor allows for alternate attribution. +# +# OriginalLocation allows for a URL or description of where the patch came +# from. +# +# BugLink is a URL to a Malone bug. +# +# Ignore: yes will keep this commit from showing up in the changelog. +# +UBUNTU: [Upstream] + +# OriginalAuthor: +# OriginalLocation: +# BugLink: http://bugs.launchpad.net/bugs/ +# Ignore: yes +# Other text below here. --- linux-3.13.0.orig/debian/compat +++ linux-3.13.0/debian/compat @@ -0,0 +1 @@ +5 --- linux-3.13.0.orig/debian/control +++ linux-3.13.0/debian/control @@ -0,0 +1,1356 @@ +Source: linux +Section: devel +Priority: optional +Maintainer: Ubuntu Kernel Team +Standards-Version: 3.9.4.0 +Build-Depends: debhelper (>= 5), cpio, module-init-tools, kernel-wedge (>= 2.24ubuntu1), makedumpfile [amd64 i386], libelf-dev, libnewt-dev, libiberty-dev, rsync, libdw-dev, libpci-dev, dpkg (>= 1.16.0~ubuntu4), pkg-config, flex, bison, libunwind8-dev, openssl, libaudit-dev, bc, python-dev, gawk, device-tree-compiler [powerpc], u-boot-tools [powerpc], libc6-dev-ppc64 [powerpc] +Build-Depends-Indep: xmlto, docbook-utils, ghostscript, transfig, bzip2, sharutils, asciidoc +Vcs-Git: http://kernel.ubuntu.com/git-repos/ubuntu/ubuntu-trusty.git +XS-Testsuite: autopkgtest +#XS-Testsuite-Depends: gcc-4.7 binutils + +Package: linux-source-3.13.0 +Architecture: all +Section: devel +Priority: optional +Provides: linux-source, linux-source-3 +Depends: ${misc:Depends}, binutils, bzip2, coreutils | fileutils (>= 4.0) +Recommends: libc-dev, gcc, make +Suggests: libncurses-dev | ncurses-dev, kernel-package, libqt3-dev +Description: Linux kernel source for version 3.13.0 with Ubuntu patches + This package provides the source code for the Linux kernel version + 3.13.0. + . + This package is mainly meant for other packages to use, in order to build + custom flavours. + . + If you wish to use this package to create a custom Linux kernel, then it + is suggested that you investigate the package kernel-package, which has + been designed to ease the task of creating kernel image packages. + . + If you are simply trying to build third-party modules for your kernel, + you do not want this package. Install the appropriate linux-headers + package instead. + +Package: linux-doc +Architecture: all +Section: doc +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-doc-3 +Replaces: linux-doc-3 +Description: Linux kernel specific documentation for version 3.13.0 + This package provides the various documents in the 3.13.0 kernel + Documentation/ subdirectory. These document kernel subsystems, APIs, device + drivers, and so on. See + /usr/share/doc/linux-doc/00-INDEX for a list of what is + contained in each file. + +Package: linux-headers-3.13.0-54 +Architecture: all +Multi-Arch: foreign +Section: devel +Priority: optional +Depends: ${misc:Depends}, coreutils | fileutils (>= 4.0) +Description: Header files related to Linux kernel version 3.13.0 + This package provides kernel header files for version 3.13.0, for sites + that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details + +Package: linux-libc-dev +Architecture: i386 amd64 armhf arm64 x32 powerpc ppc64el +Depends: ${misc:Depends} +Conflicts: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), amd64-libs-dev (<= 1.1), linux-kernel-headers +Replaces: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), linux-kernel-headers, libdrm-dev +Provides: linux-kernel-headers +Multi-Arch: same +Description: Linux Kernel Headers for development + This package provides headers from the Linux kernel. These headers + are used by the installed headers for GNU glibc and other system + libraries. They are NOT meant to be used to build third-party modules for + your kernel. Use linux-headers-* packages for that. + +Package: linux-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Replaces: linux-tools (<= 2.6.32-16.25), linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Description: Linux kernel version specific tools for version 3.13.0 + This package provides the architecture independent parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version PGKVER. + +Package: linux-tools-3.13.0-54 +Architecture: i386 amd64 armhf arm64 powerpc ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-tools-common +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + You probabally want to install linux-tools-3.13.0-54-. + +Package: linux-cloud-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Breaks: linux-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13) +Conflicts: linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Replaces: linux-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13), linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Depends: ${misc:Depends} +Description: Linux kernel version specific cloud tools for version 3.13.0 + This package provides the architecture independent parts for kernel + version locked tools for cloud tools for version PGKVER. + +Package: linux-cloud-tools-3.13.0-54 +Architecture: i386 amd64 armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-cloud-tools-common +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud tools for version 3.13.0-54 on + 64 bit x86. + You probabally want to install linux-cloud-tools-3.13.0-54-. + + +Package: linux-image-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] | grub-ieee1275 [ppc64el] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-generic +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-generic, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-generic-dbgsym +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-generic +XC-Package-Type: udeb +Section: debian-installer +Architecture: i386 amd64 armhf arm64 ppc64el +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-generic-lpae +Architecture: armhf +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: flash-kernel [armhf] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-generic-lpae +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic LPAE processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic-lpae meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-generic-lpae +Architecture: armhf +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-generic-lpae, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic LPAE processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic-lpae meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-generic-lpae-dbgsym +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-generic-lpae +XC-Package-Type: udeb +Section: debian-installer +Architecture: armhf +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-lowlatency +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Lowlatency processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-lowlatency meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-lowlatency, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Lowlatency processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-lowlatency meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-lowlatency-dbgsym +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-lowlatency +XC-Package-Type: udeb +Section: debian-installer +Architecture: i386 amd64 +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-e500 +Description: Linux kernel image for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package contains the Linux kernel image for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500v1 and e500v2 processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500 meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-e500, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500v1 and e500v2 processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500 meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package provides kernel header files for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-e500-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package provides a kernel debug image for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-e500 +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-e500mc +Description: Linux kernel image for version 3.13.0 on 32-bit Freescale Power e500mc + This package contains the Linux kernel image for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500mc processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500mc meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-e500mc, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit Freescale Power e500mc + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500mc processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500mc meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit Freescale Power e500mc + This package provides kernel header files for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-e500mc-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit Freescale Power e500mc + This package provides a kernel debug image for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-e500mc +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: yaboot +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-smp +Description: Linux kernel image for version 3.13.0 on 32-bit PowerPC SMP + This package contains the Linux kernel image for version 3.13.0 on + 32-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-smp, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit PowerPC SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit PowerPC SMP + This package provides kernel header files for version 3.13.0 on + 32-bit PowerPC SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-smp-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit PowerPC SMP + This package provides a kernel debug image for version 3.13.0 on + 32-bit PowerPC SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-smp +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc64-emb +Description: Linux kernel image for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package contains the Linux kernel image for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP Book3E processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-emb meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc64-emb, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package contains the Linux kernel extra modules for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP Book3E processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-emb meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package provides kernel header files for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc64-emb-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package provides a kernel debug image for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc64-emb +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: yaboot +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc64-smp +Description: Linux kernel image for version 3.13.0 on 64-bit PowerPC SMP + This package contains the Linux kernel image for version 3.13.0 on + 64-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc64-smp, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64-bit PowerPC SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64-bit PowerPC SMP + This package provides kernel header files for version 3.13.0 on + 64-bit PowerPC SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc64-smp-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64-bit PowerPC SMP + This package provides a kernel debug image for version 3.13.0 on + 64-bit PowerPC SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc64-smp +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: kernel-image-3.13.0-54-generic-di +Package-Type: udeb +Provides: kernel-image, efi-modules, ext3-modules, ext4-modules +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: extra +Description: Linux kernel binary image for the Debian installer + This package contains the kernel image for the Debian installer + boot images. It does _not_ provide a usable kernel for your full + Debian system. + +Package: nic-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: nic-modules +Depends: kernel-image-3.13.0-54-generic-di, nic-shared-modules-3.13.0-54-generic-di, virtio-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Network interface support + +Package: nic-shared-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: nic-shared-modules +Depends: kernel-image-3.13.0-54-generic-di, crypto-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: nic shared modules + This package contains modules which support nic modules + +Package: serial-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: serial-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Serial port support + +Package: ppp-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: ppp-modules +Depends: kernel-image-3.13.0-54-generic-di, nic-shared-modules-3.13.0-54-generic-di, serial-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: PPP (serial port) networking support + +Package: pata-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: pata-modules +Depends: kernel-image-3.13.0-54-generic-di, storage-core-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: PATA support modules + +Package: firewire-core-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: firewire-core-modules +Depends: kernel-image-3.13.0-54-generic-di, storage-core-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Firewire (IEEE-1394) Support + +Package: scsi-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: scsi-modules +Depends: kernel-image-3.13.0-54-generic-di, storage-core-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: SCSI storage support + +Package: plip-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: plip-modules +Depends: kernel-image-3.13.0-54-generic-di, nic-shared-modules-3.13.0-54-generic-di, parport-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: PLIP (parallel port) networking support + +Package: floppy-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: floppy-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Floppy driver support + +Package: fat-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: fat-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: FAT filesystem support + This includes Windows FAT and VFAT support. + +Package: nfs-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: nfs-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: NFS filesystem drivers + Includes the NFS client driver, and supporting modules. + +Package: md-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: md-modules, crypto-dm-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Multi-device support (raid, device-mapper, lvm) + +Package: multipath-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: multipath-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: extra +Description: DM-Multipath support + This package contains modules for device-mapper multipath support. + +Package: usb-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: usb-modules +Depends: kernel-image-3.13.0-54-generic-di, storage-core-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Core USB support + +Package: pcmcia-storage-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: pcmcia-storage-modules +Depends: kernel-image-3.13.0-54-generic-di, scsi-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: PCMCIA storage support + +Package: fb-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: fb-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Framebuffer modules + +Package: input-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: input-modules +Depends: kernel-image-3.13.0-54-generic-di, usb-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Support for various input methods + +Package: mouse-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: mouse-modules +Depends: kernel-image-3.13.0-54-generic-di, input-modules-3.13.0-54-generic-di, usb-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: extra +Description: Mouse support + This package contains mouse drivers for the Linux kernel. + +Package: irda-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: irda-modules +Depends: kernel-image-3.13.0-54-generic-di, nic-shared-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Support for Infrared protocols + +Package: parport-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: parport-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Parallel port support + +Package: nic-pcmcia-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: nic-pcmcia-modules +Depends: kernel-image-3.13.0-54-generic-di, nic-shared-modules-3.13.0-54-generic-di, nic-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: PCMCIA network interface support + +Package: pcmcia-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: pcmcia-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: PCMCIA Modules + +Package: nic-usb-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: nic-usb-modules +Depends: kernel-image-3.13.0-54-generic-di, nic-shared-modules-3.13.0-54-generic-di, usb-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: USB network interface support + +Package: sata-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: sata-modules +Depends: kernel-image-3.13.0-54-generic-di, storage-core-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: SATA storage support + +Package: crypto-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: crypto-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: extra +Description: crypto modules + This package contains crypto modules. + +Package: squashfs-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: squashfs-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: extra +Description: squashfs modules + This package contains squashfs modules. + +Package: speakup-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: speakup-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: extra +Description: speakup modules + This package contains speakup modules. + +Package: virtio-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: virtio-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: VirtIO Modules + Includes modules for VirtIO (virtual machine, generally kvm guests) + +Package: fs-core-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: fs-core-modules, ext2-modules, jfs-modules, reiserfs-modules, xfs-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Base filesystem modules + This includes jfs, reiserfs and xfs. + +Package: fs-secondary-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: fs-secondary-modules, btrfs-modules, ntfs-modules, hfs-modules +Depends: kernel-image-3.13.0-54-generic-di, fat-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Extra filesystem modules + This includes support for Windows NTFS and MacOS HFS/HFSPlus + +Package: storage-core-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: storage-core-modules, loop-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Core storage support + Includes core SCSI, LibATA, USB-Storage. Also includes related block + devices for CD, Disk and Tape medium (and IDE Floppy). + +Package: block-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: block-modules, nbd-modules +Depends: kernel-image-3.13.0-54-generic-di, storage-core-modules-3.13.0-54-generic-di, parport-modules-3.13.0-54-generic-di, virtio-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Block storage devices + This package contains the block storage devices, including DAC960 and + paraide. + +Package: message-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: message-modules +Depends: kernel-image-3.13.0-54-generic-di, storage-core-modules-3.13.0-54-generic-di, scsi-modules-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: Fusion and i2o storage modules + This package containes the fusion and i2o storage modules. + +Package: vlan-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: vlan-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: extra +Description: vlan modules + This package contains vlan (8021.Q) modules. + +Package: ipmi-modules-3.13.0-54-generic-di +Package-Type: udeb +Provides: ipmi-modules +Depends: kernel-image-3.13.0-54-generic-di +Architecture: amd64 +Kernel-Version: 3.13.0-54-generic +Section: debian-installer +Priority: standard +Description: ipmi modules --- linux-3.13.0.orig/debian/control-scripts/extra-post +++ linux-3.13.0/debian/control-scripts/extra-post @@ -0,0 +1,14 @@ +#!/bin/sh +set -e + +case "$0::$1" in +*.postinst::configure|*.postrm::remove) + depmod -a -F /boot/System.map-=V =V || true + for dir in "/etc/kernel/postinst.d" "/etc/kernel/postinst.d/=V" + do + if [ -d "$dir" ]; then + run-parts --verbose --exit-on-error --arg="=V" --arg="/boot/=K-=V" "$dir" + fi + done + ;; +esac --- linux-3.13.0.orig/debian/control-scripts/headers-postinst +++ linux-3.13.0/debian/control-scripts/headers-postinst @@ -0,0 +1,126 @@ +#!/usr/bin/perl +# -*- Mode: Cperl -*- +# debian.postinst --- +# Author : Manoj Srivastava ( srivasta@pilgrim.umass.edu ) +# Created On : Sat Apr 27 05:42:43 1996 +# Created On Node : melkor.pilgrim.umass.edu +# Last Modified By : Manoj Srivastava +# Last Modified On : Sat Aug 5 13:20:22 2006 +# Last Machine Used: glaurung.internal.golden-gryphon.com +# Update Count : 45 +# Status : Unknown, Use with caution! +# HISTORY : +# Description : +# +# +# +# arch-tag: 1c716174-2f0a-476d-a626-a1322e62503a +# + + +$|=1; + +# Predefined values: +my $version = "=V"; +my $kimage = "=K"; +my $package_name = "linux-image-$version"; + + +# Ignore all invocations uxcept when called on to configure. +exit 0 unless ($ARGV[0] && $ARGV[0] =~ /configure/); + +#known variables +my $image_dest = "/"; +my $realimageloc = "/boot/"; +my $silent_modules = ''; +my $modules_base = '/lib/modules'; +my $CONF_LOC = '/etc/kernel-img.conf'; +# remove multiple leading slashes; make sure there is at least one. +$realimageloc =~ s|^/*|/|o; +$realimageloc =~ s|/+|/|o; + +chdir '/usr/src' or die "Could not chdir to /usr/src:$!"; + +if (-r "$CONF_LOC" && -f "$CONF_LOC" ) { + if (open(CONF, "$CONF_LOC")) { + while () { + chomp; + s/\#.*$//g; + next if /^\s*$/; + + $header_postinst_hook = "$1" if /^\s*header_postinst_hook\s*=\s*(\S+)/ig; + } + close CONF; + } +} + +sub exec_script { + my $type = shift; + my $script = shift; + print STDERR "Running $type hook script $script.\n"; + system ("$script $version $realimageloc$kimage-$version") && + print STDERR "User $type hook script [$script] "; + if ($?) { + if ($? == -1) { + print STDERR "failed to execute: $!\n"; + } + elsif ($? & 127) { + printf STDERR "died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + } + else { + printf STDERR "exited with value %d\n", $? >> 8; + } + exit $? >> 8; + } +} +sub run_hook { + my $type = shift; + my $script = shift; + if ($script =~ m,^/,) { + # Full path provided for the hook script + if (-x "$script") { + &exec_script($type,$script); + } + else { + die "The provided $type hook script [$script] could not be run.\n"; + } + } + else { + # Look for it in a safe path + for my $path ('/bin', '/sbin', '/usr/bin', '/usr/sbin') { + if (-x "$path/$script") { + &exec_script($type, "$path/$script"); + return 0; + } + } + # No luck + print STDERR "Could not find $type hook script [$script].\n"; + die "Looked in: '/bin', '/sbin', '/usr/bin', '/usr/sbin'\n"; + } +} + +## Run user hook script here, if any +if (-x "$header_postinst_hook") { + &run_hook("postinst", $header_postinst_hook); +} + +if (-d "/etc/kernel/header_postinst.d") { + print STDERR "Examining /etc/kernel/header_postinst.d.\n"; + system ("run-parts --verbose --exit-on-error --arg=$version " . + "--arg=$realimageloc$kimage-$version " . + "/etc/kernel/header_postinst.d") && + die "Failed to process /etc/kernel/header_postinst.d"; +} + +if (-d "/etc/kernel/header_postinst.d/$version") { + print STDERR "Examining /etc/kernel/header_postinst.d/$version.\n"; + system ("run-parts --verbose --exit-on-error --arg=$version " . + "--arg=$realimageloc$kimage-$version " . + "/etc/kernel/header_postinst.d/$version") && + die "Failed to process /etc/kernel/header_postinst.d/$version"; +} + +exit 0; + +__END__ --- linux-3.13.0.orig/debian/control-scripts/postinst +++ linux-3.13.0/debian/control-scripts/postinst @@ -0,0 +1,1110 @@ +#! /usr/bin/perl +# OriginalAuthor : Manoj Srivastava ( srivasta@pilgrim.umass.edu ) +# +# Customized for Ubuntu by: Ben Collins + +#use strict; #for debugging +use Cwd 'abs_path'; + +$|=1; + +# Predefined values: +my $version = "=V"; +my $link_in_boot = ""; # Should be empty, mostly +my $no_symlink = ""; # Should be empty, mostly +my $reverse_symlink = ""; # Should be empty, mostly +my $do_symlink = "Yes"; # target machine defined +my $do_boot_enable = "Yes"; # target machine defined +my $do_bootfloppy = "Yes"; # target machine defined +my $do_bootloader = "Yes"; # target machine defined +my $move_image = ''; # target machine defined +my $kimage = "=K"; # Should be empty, mostly +my $loader = "=L"; # lilo, silo, quik, palo, vmelilo, nettrom, arcboot or delo +my $image_dir = "/boot"; # where the image is located +my $clobber_modules = ''; # target machine defined +my $relative_links = ""; # target machine defined +my $initrd = "YES"; # initrd kernel +my $do_initrd = ''; # Normally we do not +my $use_hard_links = ''; # hardlinks do not work across fs boundaries +my $postinst_hook = ''; #Normally we do not +my $postrm_hook = ''; #Normally we do not +my $preinst_hook = ''; #Normally we do not +my $prerm_hook = ''; #Normally we do not +my $minimal_swap = ''; # Do not swap symlinks +my $ignore_depmod_err = ''; # normally we do not +my $kernel_arch = "=B"; +my $ramdisk = "/usr/sbin/update-initramfs"; # List of tools to create initial ram fs. +my $notifier = "/usr/share/update-notifier/notify-reboot-required"; +my $package_name = "linux-image-$version"; +my $explicit_do_loader = 'Yes'; + +my $Loader = "NoLOADER"; # +$Loader = "LILO" if $loader =~ /^lilo/io; +$Loader = "SILO" if $loader =~ /^silo/io; +$Loader = "QUIK" if $loader =~ /^quik/io; +$Loader = "yaboot" if $loader =~ /^yaboot/io; +$Loader = "PALO" if $loader =~ /^palo/io; +$Loader = "NETTROM" if $loader =~ /^nettrom/io; +$Loader = "VMELILO" if $loader =~ /^vmelilo/io; +$Loader = "ZIPL" if $loader =~ /^zipl/io; +$Loader = "ELILO" if $loader =~ /^elilo/io; +$Loader = "ARCBOOT" if $loader =~ /^arcboot/io; +$Loader = "DELO" if $loader =~ /^delo/io; + +# This should not point to /tmp, because of security risks. +my $temp_file_name = "/var/log/$loader" . "_log.$$"; + +#known variables +my $image_dest = "/"; +my $realimageloc = "/$image_dir/"; +my $have_conffile = ""; +my $silent_modules = ''; +my $silent_loader = ''; +my $warn_reboot = 'Yes'; # Warn that we are installing a version of + # the kernel we are running + +my $modules_base = '/lib/modules'; +my $CONF_LOC = '/etc/kernel-img.conf'; + +# Ignore all invocations except when called on to configure. +exit 0 unless $ARGV[0] =~ /configure/; + +my $DEBUG = 0; + +# Do some preliminary sanity checks here to ensure we actually have an +# valid image dir +chdir('/') or die "could not chdir to /:$!\n"; +die "Internal Error: ($image_dir) is not a directory!\n" + unless -d $image_dir; + +# remove multiple leading slashes; make sure there is at least one. +$realimageloc =~ s|^/*|/|o; +$realimageloc =~ s|/+|/|o; +die "Internal Error: ($realimageloc) is not a directory!\n" + unless -d $realimageloc; + +if (-r "$CONF_LOC" && -f "$CONF_LOC" ) { + if (open(CONF, "$CONF_LOC")) { + while () { + chomp; + s/\#.*$//g; + next if /^\s*$/; + + $do_symlink = "" if /^\s*do_symlinks\s*=\s*(no|false|0)\s*$/ig; + $no_symlink = "" if /^\s*no_symlinks\s*=\s*(no|false|0)\s*$/ig; + $reverse_symlink = "" if /^\s*reverse_symlink\s*=\s*(no|false|0)\s*$/ig; + $link_in_boot = "" if /^\s*image_in_boot\s*=\s*(no|false|0)\s*$/ig; + $link_in_boot = "" if /^\s*link_in_boot\s*=\s*(no|false|0)\s*$/ig; + $move_image = "" if /^\s*move_image\s*=\s*(no|false|0)\s*$/ig; + $clobber_modules = '' if /^\s*clobber_modules\s*=\s*(no|false|0)\s*$/ig; + $do_boot_enable = '' if /^\s*do_boot_enable\s*=\s*(no|false|0)\s*$/ig; + $do_bootfloppy = '' if /^\s*do_bootfloppy\s*=\s*(no|false|0)\s*$/ig; + $relative_links = '' if /^\s*relative_links \s*=\s*(no|false|0)\s*$/ig; + $do_bootloader = '' if /^\s*do_bootloader\s*=\s*(no|false|0)\s*$/ig; + $explicit_do_loader = '' if /^\s*do_bootloader\s*=\s*(no|false|0)\s*$/ig; + $do_initrd = '' if /^\s*do_initrd\s*=\s*(no|false|0)\s*$/ig; + $use_hard_links = '' if /^\s*use_hard_links\s*=\s*(no|false|0)\s*$/ig; + $silent_modules = '' if /^\s*silent_modules\s*=\s*(no|false|0)\s*$/ig; + $silent_loader = '' if /^\s*silent_loader\s*=\s*(no|false|0)\s*$/ig; + $warn_reboot = '' if /^\s*warn_reboot\s*=\s*(no|false|0)\s*$/ig; + $minimal_swap = '' if /^\s*minimal_swap\s*=\s*(no|false|0)\s*$/ig; + $ignore_depmod_err = '' if /^\s*ignore_depmod_err\s*=\s*(no|false|0)\s*$/ig; + + $do_symlink = "Yes" if /^\s*do_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $no_symlink = "Yes" if /^\s*no_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $reverse_symlink = "Yes" if /^\s*reverse_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $link_in_boot = "Yes" if /^\s*image_in_boot\s*=\s*(yes|true|1)\s*$/ig; + $link_in_boot = "Yes" if /^\s*link_in_boot\s*=\s*(yes|true|1)\s*$/ig; + $move_image = "Yes" if /^\s*move_image\s*=\s*(yes|true|1)\s*$/ig; + $clobber_modules = "Yes" if /^\s*clobber_modules\s*=\s*(yes|true|1)\s*$/ig; + $do_boot_enable = "Yes" if /^\s*do_boot_enable\s*=\s*(yes|true|1)\s*$/ig; + $do_bootfloppy = "Yes" if /^\s*do_bootfloppy\s*=\s*(yes|true|1)\s*$/ig; + $do_bootloader = "Yes" if /^\s*do_bootloader\s*=\s*(yes|true|1)\s*$/ig; + $explicit_do_loader = "YES" if /^\s*do_bootloader\s*=\s*(yes|true|1)\s*$/ig; + $relative_links = "Yes" if /^\s*relative_links\s*=\s*(yes|true|1)\s*$/ig; + $do_initrd = "Yes" if /^\s*do_initrd\s*=\s*(yes|true|1)\s*$/ig; + $use_hard_links = "Yes" if /^\s*use_hard_links\s*=\s*(yes|true|1)\s*$/ig; + $silent_modules = 'Yes' if /^\s*silent_modules\s*=\s*(yes|true|1)\s*$/ig; + $silent_loader = 'Yes' if /^\s*silent_loader\s*=\s*(yes|true|1)\s*$/ig; + $warn_reboot = 'Yes' if /^\s*warn_reboot\s*=\s*(yes|true|1)\s*$/ig; + $minimal_swap = 'Yes' if /^\s*minimal_swap\s*=\s*(yes|true|1)\s*$/ig; + $ignore_depmod_err = 'Yes' if /^\s*ignore_depmod_err\s*=\s*(yes|true|1)\s*$/ig; + + $image_dest = "$1" if /^\s*image_dest\s*=\s*(\S+)/ig; + $postinst_hook = "$1" if /^\s*postinst_hook\s*=\s*(\S+)/ig; + $postrm_hook = "$1" if /^\s*postrm_hook\s*=\s*(\S+)/ig; + $preinst_hook = "$1" if /^\s*preinst_hook\s*=\s*(\S+)/ig; + $prerm_hook = "$1" if /^\s*prerm_hook\s*=\s*(\S+)/ig; + $ramdisk = "$1" if /^\s*ramdisk\s*=\s*(.+)$/ig; + } + close CONF; + $have_conffile = "Yes"; + } +} + + + +# For some versions of kernel-package, we had this warning in the +# postinst, but the rules did not really interpolate the value in. +# Here is a sanity check. +my $pattern = "=" . "I"; +$initrd=~ s/^$pattern$//; + +if ($link_in_boot) { + $image_dest = "/$image_dir/"; # same as realimageloc +} + +# Tack on at least one trainling / +$image_dest = "$image_dest/"; +$image_dest =~ s|^/*|/|o; +$image_dest =~ s|/+$|/|o; + +if (! -d "$image_dest") { + die "Expected Image Destination dir ($image_dest) to be a valid directory!\n"; +} + +# sanity +if (!($do_bootfloppy || $do_bootloader)) { + $do_boot_enable = ''; +} +if ($do_symlink && $no_symlink) { + warn "Both do_symlinks and no_symlinks options enabled; disabling no_symlinks\n"; + $no_symlink = 0; +} + +# most of our work is done in $image_dest (nominally /) +chdir("$image_dest") or die "could not chdir to $image_dest:$!\n"; + +# Paranoid check to make sure that the correct value is put in there +if (! $kimage) { $kimage = "vmlinuz"; } # Hmm. empty +elsif ($kimage =~ m/^b?uImage$/o) { $kimage = "vmlinuz"; } # these produce vmlinuz +elsif ($kimage =~ m/^b?zImage$/o) { $kimage = "vmlinuz"; } # these produce vmlinuz +elsif ($kimage =~ m/^[iI]mage$/o) { my $nop = $kimage; } +elsif ($kimage =~ m/^vmlinux$/o) { my $nop = $kimage; } +else { $kimage = "vmlinuz"; } # Default + +$ENV{KERNEL_ARCH}=$kernel_arch if $kernel_arch; + + +die "Internal Error: Could not find image (" . $realimageloc + . "$kimage-$version)\n" unless -e $realimageloc + . "$kimage-$version"; + +# search for the boot loader in the path +my $loader_exec; +($loader_exec = $loader) =~ s|.*/||; +my ($loaderloc) = grep -x, map "$_/$loader_exec", + map { length($_) ? $_ : "." } split /:/, $ENV{PATH}; + + +###################################################################### +###################################################################### +########### Test whether a relative symlinkwould be OK ####### +###################################################################### +###################################################################### +sub test_relative { + my %params = @_; + my $cwd; + + die "Internal Error: Missing Required paramater 'Old Dir' " + unless $params{'Old Dir'}; + die "Internal Error: Missing Required paramater New Dir' " + unless $params{'New Dir'}; + + + die "Internal Error: No such dir $params{'Old Dir'} " + unless -d $params{'Old Dir'}; + die "Internal Error: No such dir $params{'New Dir'} " + unless -d $params{'New Dir'}; + + # If the caller specified a test file, and it does not exist try and find + # another file to use in its stead. As we are installing kernels we can + # use any versioned file from this kernel which should be in the same place. + if (defined $params{'Test File'} and + ! -f $params{'Old Dir'} . '/' . $params{'Test File'}) { + + my @possible = glob($params{'Old Dir'} . "/*$version*"); + if ($#possible >= 0) { + $params{'Test File'} = `basename "$possible[0]"`; + chomp($params{'Test File'}); + warn "Test relative: selected $params{'Test File'}" + if $DEBUG; + } + } + + warn "Test relative: testing $params{'Old Dir'} -> $params{'New Dir'}" + if $DEBUG; + chomp($cwd = `pwd`); + chdir ($params{'New Dir'}) or die "Could not chdir to $params{'New Dir'}:$!"; + my $ok = 0; + $params{'Old Dir'} =~ s|^/*||o; + if (-d $params{'Old Dir'} ) { + if (defined $params{'Test File'}) { + if (-e $params{'Old Dir'} . $params{'Test File'}) { + $ok = 1; + } + } else { + $ok = 1; # well, backward compatibility + } + } + chdir ($cwd) or die "Could not chdir to $params{'New Dir'}:$!"; + return $ok; +} + +###################################################################### +###################################################################### +############ +###################################################################### +###################################################################### +# sub CanonicalizePath { +# my $path = join '/', @_; +# my @work = split '/', $path; +# my @out; +# my $is_absolute; + +# if (@work && $work[0] eq "") { +# $is_absolute = 1; shift @work; +# } + +# while (@work) { +# my $seg = shift @work; +# if ($seg eq "." || $seg eq "") { +# } +# elsif ($seg eq "..") { +# if (@out && $out[-1] ne "..") { +# pop @out; +# } +# else { +# # Leading "..", or "../..", etc. +# push @out, $seg; +# } +# } +# else { +# push @out, $seg; +# } +# } + +# unshift @out, "" if $is_absolute; +# return join('/', @out); +# } +###################################################################### +###################################################################### +############ +###################################################################### +###################################################################### + +sub spath { + my %params = @_; + + die "Missing Required paramater 'Old'" unless $params{'Old'}; + die "Missing Required paramater 'New'" unless $params{'New'}; + + my @olddir = split '/', `readlink -q -m $params{'Old'}`; + my @newdir = split '/', `readlink -q -m $params{'New'}`; + my @outdir = @olddir; + + my $out = ''; + my $i; + for ($i = 0; $i <= $#olddir && $i <= $#newdir; $i++) { + $out++ if ($olddir[$i] ne $newdir[$i]); + shift @outdir unless $out; + unshift @outdir, ".." if $out; + } + if ($#newdir > $#olddir) { + for ($i=0; $i < $#newdir; $i++) { + unshift @outdir, ".."; + } + } + return join ('/', @outdir); +} +###################################################################### +###################################################################### +############ +###################################################################### +###################################################################### + + +# This routine actually moves the kernel image +# From: $realimageloc/$kimage-$version (/boot/vmlinuz-2.6.12) +# To: $image_dest/$kimage-$version (/vmlinuz-2.6.12) +# Note that the image is moved to a versioned destination, but ordinary +# symlinks we create otherwise are not normally versioned +sub really_move_image { + my $src_dir = $_[0]; + my $target = $_[1]; + my $dest_dir = $_[2]; + + warn "Really move image: src_dir=$src_dir, target=$target,\n destdir=$dest_dir" + if $DEBUG; + if (-e "$target") { + # we should be in dir $dest_dir == $image_dest /, normally + rename("$target", "$target.$$") || + die "failed to move " . $dest_dir . "$target:$!"; + warn "mv $target $target.$$" if $DEBUG; + } + warn "mv -f $src_dir$target $target" if $DEBUG; + my $ret = system("mv -f " . $src_dir . "$target " . + " $target"); + if ($ret) { + die("Failed to move " . $src_dir . "$target to " + . $dest_dir . "$target"); + } + # Ok, now we may clobber the previous .old files + if (-e "$target.$$") { + rename("$target.$$", "$target.old") || + die "failed to move " . $dest_dir . "$target:$!"; + warn "mv $target.$$ $target " if $DEBUG; + } +} + +# Normally called after really_move_image; and only called if we asked for +# reversed link this routine reverses the symbolic link that is notmally +# created. Since the real kernel image has been moved over to +# $image_dest/$kimage-$version. So, this routine links +# From: $image_dest/$kimage-$version (/vmlinuz-2.6.12) +# To: $realimageloc/$kimage-$version (/boot/vmlinuz-2.6.12) +sub really_reverse_link { + my $src_dir = $_[0]; + my $link_name = $_[1]; + my $dest_dir = $_[2]; + warn "Really reverse link: src_dir=$src_dir, link name=$link_name\n" . + "\tdestdir=$dest_dir" if $DEBUG; + + my $Old = $dest_dir; + if (test_relative ('Old Dir' => $Old, 'New Dir' => $src_dir, + 'Test File' => "$link_name")) { + $Old =~ s|^/*||o; + } + # Special case is they are in the same dir + my $rel_path = spath('Old' => "$Old", 'New' => "$src_dir" ); + $Old ="" if $rel_path =~ m/^\s*$/o; + + if ($use_hard_links =~ m/YES/i) { + link($Old . "$link_name", $src_dir . "$link_name") || + die("Failed to link " . $dest_dir . "$link_name to " . $src_dir . + "$link_name:$!"); + warn "ln " . $Old . "$link_name " . $src_dir . "$link_name" if $DEBUG; + } + else { + symlink($Old . "$link_name", $src_dir . "$link_name") || + die("Failed to symbolic-link " . $dest_dir . "$link_name to " . $src_dir + . "$link_name:$!"); + warn "ln -s " . $Old . "$link_name " . $src_dir . "$link_name" if $DEBUG; + } +} + +# This routine is invoked if there is a symbolic link in place +# in $image_dest/$kimage -- so a symlink exists in the destination. +# What we are trying to determine is if we need to move the symbolic link over +# to the the .old location +sub move_p { + my $kimage = $_[0]; # Name of the symbolic link + my $image_dest = $_[1]; # The directory the links goes into + my $image_name = $_[2]; + my $src_dir = $_[3]; + my $force_move = 0; + warn "Move?: kimage=$kimage, image_dest=$image_dest, \n" . + "\timage_name=$image_name, src_dir=$src_dir" if $DEBUG; + + if ($no_symlink || $reverse_symlink) { + # we do not want links, yet we have a symbolic link here! + warn "found a symbolic link in " . $image_dest . "$kimage \n" . + "even though no_symlink is defined\n" if $no_symlink; + warn "found a symbolic link in " . $image_dest . "$kimage \n" . + "even though reverse_symlink is defined\n" if $reverse_symlink; + # make sure we change this state of affairs + $force_move = 1; + return $force_move; + } + + warn "DEBUG: OK. We found symlink, and we should have a symlink here.\n" + if $DEBUG; + my $vmlinuz_target = readlink "$kimage"; + my $real_target = ''; + my $target = `readlink -q -m "${realimageloc}${kimage}-${version}"`; + $real_target = abs_path($vmlinuz_target) if defined($vmlinuz_target); + + if (!defined($vmlinuz_target) || ! -f "$real_target") { + # what, a dangling symlink? + warn "The link " . $image_dest . "$kimage is a dangling link" . + "to $real_target\n"; + $force_move = 1; + return $force_move; + } + + + warn "DEBUG: The link $kimage points to ($vmlinuz_target)\n" if $DEBUG; + warn "DEBUG: ($vmlinuz_target) is really ($real_target)\n" if $DEBUG; + my $cwd; + chomp ($cwd=`pwd`); + if ($vmlinuz_target !~ m|^/|o) { + $vmlinuz_target = $cwd . "/" . $vmlinuz_target; + $vmlinuz_target =~ s|/+|/|o; + } + $vmlinuz_target = `readlink -q -m $vmlinuz_target`; + + if ("$vmlinuz_target" ne "$target") { + warn "DEBUG: We need to handle this.\n" if $DEBUG; + if ($minimal_swap) { + warn "DEBUG: Minimal swap.\n" if $DEBUG; + if (-l "$kimage.old") { + warn "DEBUG: There is an old link at $kimage.old\n" if $DEBUG; + my $old_target = readlink "$kimage.old"; + my $real_old_target = ''; + $real_old_target=abs_path($old_target) if defined ($old_target); + + if ($real_old_target && -f "$real_old_target") { + if ($old_target !~ m|^/|o) { + $old_target = $cwd . "/" . $old_target; + $old_target =~ s|/+|/|o; + } + $old_target = `readlink -q -m $old_target`; + if ("$old_target" ne "$target") { + $force_move = 1; + warn "DEBUG: Old link ($old_target) does not point to us ($target)\n" + if $DEBUG; + } + else { # The .old points to the current + warn "$kimage.old --> $target -- doing nothing"; + $force_move = 0; + } + } + else { + warn "DEBUG: Well, the old link does not exist -- so we move\n" + if $DEBUG; + $force_move = 1; + } + } + else { + warn "DEBUG: No .old link -- OK to move\n" + if $DEBUG; + $force_move = 1; + } + } + else { + warn "DEBUG: ok, minimal swap is no-- so we move.\n" + if $DEBUG; + $force_move = 1; + } + } + else { # already have proper link + warn "$kimage($vmlinuz_target) points to $target ($real_target) -- doing nothing"; + $force_move = 0; + } + return $force_move; +} + + +# This routine moves the symbolic link around (/vmlinuz -> /vmlinuz.old) +# It pays attention to whether we should the fact whether we should be using +# hard links or not. +sub really_move_link { + my $kimage = $_[0]; # Name of the symbolic link + my $image_dest = $_[1]; # The directory the links goes into + my $image_name = $_[2]; + my $src_dir = $_[3]; + warn "really_move_link: kimage=$kimage, image_dest=$image_dest\n" . + "\t image_name=$image_name, src_dir=$src_dir" if $DEBUG; + + # don't clobber $kimage.old quite yet + rename("$kimage", "$kimage.$$") || + die "failed to move " . $image_dest . "$kimage:$!"; + warn "mv $kimage $kimage.$$" if $DEBUG; + my $Old = $src_dir; + my $cwd; + + chomp($cwd=`pwd`); + if (test_relative ('Old Dir' => $Old, 'New Dir' => $cwd, + 'Test File' => "$image_name")) { + $Old =~ s|^/*||o; + } + # Special case is they are in the same dir + my $rel_path = spath('Old' => "$Old", 'New' => "$cwd" ); + $Old ="" if $rel_path =~ m/^\s*$/o; + + if ($use_hard_links =~ m/YES/i) { + warn "ln ${Old}${image_name} $kimage" if $DEBUG; + if (! link("${Old}${image_name}", "$kimage")) { + rename("$kimage.$$", "$kimage"); + die("Failed to link ${Old}${image_name} to " . + "${image_dest}${kimage}:$!"); + } + } + else { + warn "ln -s ${Old}${image_name} $kimage" if $DEBUG; + if (! symlink("${Old}${image_name}", "$kimage")) { + rename("$kimage.$$", "$kimage"); + die("Failed to symbolic-link ${Old}${image_name} to " . + "${image_dest}${kimage}:$!"); + } + } + + # Ok, now we may clobber the previous .old file + if (-l "$kimage.old" || ! -e "$kimage.old" ) { + rename("$kimage.$$", "$kimage.old"); + warn "mv $kimage.$$ $kimage.old" if $DEBUG; + } + else { + warn "$kimage.old is not a symlink, not clobbering\n"; + warn "rm $kimage.$$"; + unlink "$kimage.$$" if $DEBUG; + } +} + +# This routine handles a request to do symlinks, but there is no +# symlink file already there. Either we are supposed to use copy, or we are +# installing on a pristine system, or the user does not want symbolic links at +# all. We use a configuration file to tell the last two cases apart, creating +# a config file if needed. +sub handle_missing_link { + my $kimage = $_[0]; # Name of the symbolic link + my $image_dest = $_[1]; # The directory the links goes into + my $image_name = $_[2]; + my $src_dir = $_[3]; + warn "handle_missing_link: kimage=$kimage, image_dest=$image_dest\n" . + "\t image_name=$image_name, src_dir=$src_dir" if $DEBUG; + + if ($no_symlink) { + warn "cp -a --backup=t $realimageloc$image_name $kimage" if $DEBUG; + my $ret = system("cp -a --backup=t " . $realimageloc . + "$image_name " . " $kimage"); + if ($ret) { + die("Failed to copy " . $realimageloc . "$image_name to " + . $image_dest . "$kimage"); + } + } + elsif ($reverse_symlink) { + warn "mv -f $realimageloc$image_name $kimage" if $DEBUG; + my $ret = system("mv -f " . $realimageloc . "$image_name " + . "$kimage"); + if ($ret) { + die("Failed to move " . $realimageloc . "$image_name to " + . $image_dest . "$kimage"); + } + } + else { + if (! $have_conffile) { + my $ret; + my $answer=''; + $do_symlink = "Yes"; + + if (open(CONF, ">$CONF_LOC")) { + print CONF "# Kernel Image management overrides\n"; + print CONF "# See kernel-img.conf(5) for details\n"; + if ($loader =~ /palo/i) { + print CONF "link_in_boot = Yes\n"; + print CONF "do_symlinks = Yes\n"; + print CONF "relative_links = Yes\n"; + print CONF "do_bootloader = No\n"; + } else { + print CONF "do_symlinks = $do_symlink\n"; + } + close CONF; + } + $have_conffile = "Yes"; + } + } + + if (! $no_symlink && $do_symlink =~ /Yes/i) { + my $Old = $realimageloc; + my $New = $image_dest; + my $Name = "$image_name"; + my $Link_Dest = "$kimage"; + + if ($reverse_symlink) { + $Old = $image_dest; + $New = $realimageloc; + $Name = "$kimage"; + $Link_Dest = $realimageloc . "$image_name"; + } + if (test_relative ('Old Dir' => $Old, + 'New Dir' => $New, + 'Test File' => $Name)) { + $Old =~ s|^/*||o; + } + # Special case is they are in the same dir + my $rel_path = spath('Old' => "$Old", 'New' => "$New" ); + $Old ="" if $rel_path =~ m/^\s*$/o; + + symlink($Old . "$Name", "$Link_Dest") || + die("Failed to symbolic-link ${Old}$Name to $Link_Dest:$!"); + warn "ln -s ${Old}$Name $Link_Dest" if $DEBUG; + + } +} + +# This routine handles the rest of the cases, where the user has requested +# non-traditional handling, like using cp, or reverse symlinks, or hard links. +sub handle_non_symlinks { + my $kimage = $_[0]; # Name of the symbolic link + my $image_dest = $_[1]; # The directory the links goes into + my $image_name = $_[2]; + my $src_dir = $_[3]; + warn "handle_non_link: kimage=$kimage, image_dest=$image_dest\n" . + "\t image_name=$image_name, src_dir=$src_dir" if $DEBUG; + + # Save the current image. We do this in all four cases + rename("$kimage", "$kimage.$$") || + die "failed to move " . $image_dest . "$kimage:$!"; + warn "mv $kimage $kimage.$$" if $DEBUG; + + ##,#### + # case One + #`#### + if ($no_symlink) { + # Maybe /$image_dest is on a dos system? + warn "cp -a --backup=t $realimageloc$image_name $kimage" if $DEBUG; + my $ret = system("cp -a --backup=t " . $realimageloc + . "$image_name " . "$kimage"); + if ($ret) { + if (-e "$kimage.$$") { + rename("$kimage.$$", "$kimage"); + warn "mv $kimage.$$ $kimage" if $DEBUG; + } + die("Failed to copy " . $realimageloc . "$image_name to " + . $image_dest . "$kimage"); + } + } + ##,#### + # case Two + #`#### + elsif ($reverse_symlink) { # Maybe /$image_dest is on a dos system? + warn "mv -f $realimageloc$image_name $kimage" if $DEBUG; + my $ret = system("mv -f " . $realimageloc . "$image_name " + . $image_dest . "$kimage"); + if ($ret) { + if (-e "$kimage.$$") { + rename("$kimage.$$", "$kimage"); + warn "mv $kimage.$$ $kimage" if $DEBUG; + } + die("Failed to move " . $realimageloc . "$image_name to " + . $image_dest . "$kimage"); + } + my $Old = $image_dest; + if (test_relative ('Old Dir' => $Old, 'New Dir' => $realimageloc, + 'Test File' => "$kimage")) { + $Old =~ s|^/*||o; + } + # Special case is they are in the same dir + my $rel_path = spath('Old' => "$Old", 'New' => "$realimageloc" ); + $Old ="" if $rel_path =~ m/^\s*$/o; + + if ($use_hard_links =~ m/YES/i) { + warn "ln " . $Old . "$kimage " . $realimageloc . "$image_name" if $DEBUG; + if (! link($Old . "$kimage", $realimageloc . "$image_name")) { + warn "Could not link " . $image_dest . + "$kimage to $image_name :$!"; + } + } + else { + warn "ln -s " . $Old . "$kimage " . $realimageloc . "$image_name" if $DEBUG; + if (! symlink($Old . "$kimage", $realimageloc . "$image_name")) { + warn "Could not symlink " . $image_dest . + "$kimage to $image_name :$!"; + } + } + } + ##,#### + # case Three + #`#### + elsif ($use_hard_links =~ m/YES/i ) { + # Ok then. this ought to be a hard link, and hence fair game + # don't clobber $kimage.old quite yet + my $Old = $realimageloc; + my $cwd; + chomp($cwd=`pwd`); + if (test_relative ('Old Dir' => $Old, 'New Dir' => $cwd, + 'Test File' => "$image_name")) { + $Old =~ s|^/*||o; + } + # Special case is they are in the same dir + my $rel_path = spath('Old' => "$Old", 'New' => "$cwd" ); + $Old ="" if $rel_path =~ m/^\s*$/o; + + warn "ln " . $Old . "$image_name " . "$kimage" if $DEBUG; + if (! link($Old . "$image_name", "$kimage")) { + warn "mv $kimage.$$ $kimage" if $DEBUG; + rename("$kimage.$$", "$kimage"); + die("Failed to link " . $realimageloc . "$image_name to " + . $image_dest . "$kimage"); + } + } + ##,#### + # case Four + #`#### + else { + # We just use cp + warn "cp -a --backup=t $realimageloc$image_name $kimage" if $DEBUG; + my $ret = system("cp -a --backup=t " . $realimageloc + . "$image_name " . "$kimage"); + if ($ret) { + if (-e "$kimage.$$") { + warn "mv $kimage.$$ $kimage" if $DEBUG; + rename("$kimage.$$", "$kimage"); + } + die("Failed to copy " . $realimageloc . "$image_name to " + . $image_dest . "$kimage"); + } + } + # Ok, now we may clobber the previous .old file + warn "mv $kimage.$$ $kimage.old if -e $kimage.$$" if $DEBUG; + rename("$kimage.$$", "$kimage.old") if -e "$kimage.$$"; +} + +# This routine is responsible for setting up the symbolic links +# So, the actual kernel image lives in +# $realimageloc/$image_name (/boot/vmlinuz-2.6.12). +# This routine creates symbolic links in $image_dest/$kimage (/vmlinuz) +sub image_magic { + my $kimage = $_[0]; # Name of the symbolic link + my $image_dest = $_[1]; # The directory the links goes into + my $image_name = "$kimage-$version"; + my $src_dir = $realimageloc; + warn "image_magic: kimage=$kimage, image_dest=$image_dest\n" . + "\t image_name=$image_name, src_dir=$src_dir" if $DEBUG; + + # Well, in any case, if the destination (the symlink we are trying + # to create) is a directory, we should do nothing, except throw a + # diagnostic. + if (-d "$kimage" ) { + die ("Hmm. $kimage is a directory, which I did not expect. I am\n" . + "trying to create a symbolic link with that name linked to \n" . + "$image_dest . Since a directory exists here, my assumptions \n" . + "are way off, and I am aborting.\n" ); + exit (3); + } + + if ($move_image) { # Maybe $image_dest is in on dos, or something? + # source dir, link name, dest dir + really_move_image( $realimageloc, $image_name, $image_dest); + really_reverse_link($realimageloc, $image_name, $image_dest) + if $reverse_symlink; + return; + } + + if (-l "$kimage") { # There is a symbolic link + warn "DEBUG: There is a symlink for $kimage\n" if $DEBUG; + my $force_move = move_p($kimage, $image_dest, $image_name, $src_dir); + + if ($force_move) { + really_move_link($kimage, $image_dest, $image_name, $src_dir); + } + } + elsif (! -e "$kimage") { + # Hmm. Pristine system? How can that be? Installing from scratch? + # Or maybe the user does not want a symbolic link here. + # Possibly they do not want a link here. (we should be in / + # here[$image_dest, really] + handle_missing_link($kimage, $image_dest, $image_name, $src_dir); + } + elsif (-e "$kimage" ) { + # OK, $kimage exists -- but is not a link + handle_non_symlinks($kimage, $image_dest, $image_name, $src_dir); + } +} + +###################################################################### +###################################################################### +###################################################################### +###################################################################### + +# We may not have any modules installed +if ( -d "$modules_base/$version" ) { + print STDERR "Running depmod.\n"; + my $ret = system("depmod -a $version"); + if ($ret) { + print STDERR "Failed to run depmod\n"; + exit(1); + } +} + + + +sub find_initrd_tool { + my $hostversion = shift; + my $version = shift; + print STDERR "Finding valid ramdisk creators.\n"; + my @ramdisks = + grep { + my $args = + "$_ " . + "--supported-host-version=$hostversion " . + "--supported-target-version=$version " . + "1>/dev/null 2>&1" + ; + system($args) == 0; + } + split (/[:,\s]+/, $ramdisk); +} + +# The initrd symlink should probably be in the same dir that the +# symlinks are in +if ($initrd) { + my $success = 0; + + # Update-initramfs is called slightly different than mkinitrd and + # mkinitramfs. XXX It should really be made compatible with this stuff + # some how. + my $upgrading = 1; + if (! defined $ARGV[1] || ! $ARGV[1] || $ARGV[1] =~ m//og) { + $upgrading = 0; + } + my $ret = system("$ramdisk " . ($upgrading ? "-u" : "-c") . " -k " . $version . " >&2"); + $success = 1 unless $ret; + die "Failed to create initrd image.\n" unless $success; + if (! defined $ARGV[1] || ! $ARGV[1] || $ARGV[1] =~ m//og) { + image_magic("initrd.img", $image_dest); + } + else { + if (! -e "initrd.img") { + handle_missing_link("initrd.img", $image_dest, "initrd.img-$version", + $realimageloc); + } + else { + print STDERR + "Not updating initrd symbolic links since we are being updated/reinstalled \n"; + print STDERR + "($ARGV[1] was configured last, according to dpkg)\n"; + } + } + + if ($initrd && -l "initrd" ) { + unlink "initrd"; + } + + if ($initrd && -l "$image_dir/initrd" && ! $link_in_boot) { + unlink "$image_dir/initrd"; + } +} +else { # Not making an initrd emage + if (-l "initrd.img") { + # Ooh, last image was an initrd image? in any case, we should move it. + my $target = readlink "initrd.img"; + my $real_target = ''; + $real_target = abs_path($target) if defined ($target); + + if (!defined($target) || ! -f "$real_target") { + # Eh. dangling link. can safely be removed. + unlink("initrd.img"); + } else { + if (-l "initrd.img.old" || ! -e "initrd.img.old" ) { + rename("initrd.img", "initrd.img.old"); + } else { + warn "initrd.img.old is not a symlink, not clobbering\n"; + unlink("initrd.img"); + } + } + } +} + +# Warn of a reboot +if (-x $notifier) { + system($notifier); +} + +# Let programs know not to hibernate if the kernel that would be used for +# resume-from-hibernate is likely to differ from the currently running kernel. +system("mountpoint -q /var/run"); +if ($? eq 0) { + system("touch /var/run/do-not-hibernate"); +} + +# Only change the symlinks if we are not being upgraded +if (! defined $ARGV[1] || ! $ARGV[1] || $ARGV[1] =~ m//og) { + image_magic($kimage, $image_dest); +} +else { + if (! -e "$kimage") { + handle_missing_link($kimage, $image_dest, "$kimage-$version", + $realimageloc); + } + else { + print STDERR + "Not updating image symbolic links since we are being updated/reinstalled \n"; + print STDERR + "($ARGV[1] was configured last, according to dpkg)\n"; + } +} + +# We used to have System.* files in / +if (-e "/System.map" || -e "/System.old") { + unlink '/System.map' if -e '/System.map'; + unlink '/System.old' if -e '/System.old'; +} + +# creating some info about kernel and initrd +if ($DEBUG) { + my $ksize=sprintf("%.0f",(stat($realimageloc . + "$kimage-$version"))[7]/1024)."kB"; + my $initrdsize=''; + if ($initrd) { + $initrdsize=sprintf("%.0f",(stat($realimageloc . + "initrd.img-$version"))[7]/1024)."kB"; + } + + print STDERR <<"EOMSG"; +A new kernel image has been installed at $realimageloc$kimage-$version + (Size: $ksize) + +Symbolic links, unless otherwise specified, can be found in $image_dest + +EOMSG + ; + + if ($initrd) { + print STDERR <<"EOMSGA"; + + Initial rootdisk image: ${realimageloc}initrd.img-$version (Size: $initrdsize) +EOMSGA + ; + } +} + +# set the env var stem +$ENV{'STEM'} = "linux"; +sub exec_script { + my $type = shift; + my $script = shift; + print STDERR "Running $type hook script $script.\n"; + system ("$script $version $realimageloc$kimage-$version") && + print STDERR "User $type hook script [$script] "; + if ($?) { + if ($? == -1) { + print STDERR "failed to execute: $!\n"; + } + elsif ($? & 127) { + printf STDERR "died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + } + else { + printf STDERR "exited with value %d\n", $? >> 8; + } + exit $? >> 8; + } +} +sub run_hook { + my $type = shift; + my $script = shift; + if ($script =~ m,^/,) { + # Full path provided for the hook script + if (-x "$script") { + &exec_script($type,$script); + } + else { + die "The provided $type hook script [$script] could not be run.\n"; + } + } + else { + # Look for it in a safe path + for my $path ('/bin', '/sbin', '/usr/bin', '/usr/sbin') { + if (-x "$path/$script") { + &exec_script($type, "$path/$script"); + return 0; + } + } + # No luck + print STDERR "Could not find $type hook script [$script].\n"; + die "Looked in: '/bin', '/sbin', '/usr/bin', '/usr/sbin'\n"; + } +} + +my $options; +for (@ARGV) { + s,','\\'',g; + $options .= " '$_'"; +} +$ENV{'DEB_MAINT_PARAMS'}="$options"; + +## Run user hook script here, if any +if ($postinst_hook) { + &run_hook("postinst", $postinst_hook); +} + +if (-d "/etc/kernel/postinst.d") { + print STDERR "Examining /etc/kernel/postinst.d.\n"; + system ("run-parts --verbose --exit-on-error --arg=$version " . + "--arg=$realimageloc$kimage-$version " . + "/etc/kernel/postinst.d") && + die "Failed to process /etc/kernel/postinst.d"; +} + +if (-d "/etc/kernel/postinst.d/$version") { + print STDERR "Examining /etc/kernel/postinst.d/$version.\n"; + system ("run-parts --verbose --exit-on-error --arg=$version " . + "--arg=$realimageloc$kimage-$version " . + "/etc/kernel/postinst.d/$version") && + die "Failed to process /etc/kernel/postinst.d/$version"; +} + +LOADER: { + last unless $do_boot_enable; # Exit if explicitly asked to + + last if $loader =~ /silo/i; # SILO does not have to be executed. + last if $loader =~ /yaboot/i; # yaboot does not have to be executed. + last if $loader =~ /milo/i; # MILO does not have to be executed. + last if $loader =~ /nettrom/i; # NETTROM does not have to be executed. + last if $loader =~ /arcboot/i; # ARCBOOT does not have to be executed. + last if $loader =~ /delo/i; # DELO does not have to be executed. + last if $loader =~ /quik/i; # maintainer asked quik invocation to be ignored + + last unless $loaderloc; + last unless -x $loaderloc; + last unless $do_bootloader; + + if (-T "/etc/$loader.conf") { + # Trust and use the existing lilo.conf. + print STDERR "You already have a $Loader configuration in /etc/$loader.conf\n"; + my $ret = &run_lilo(); + exit $ret if $ret; + } +} + + +sub run_lilo (){ + my $ret; + # Try and figure out if the user really wants lilo to be run -- + # since the default is to run the boot laoder, which is ! grub -- but + # the user may be using grub now, and not changed the default. + + # So, if the user has explicitly asked for the loader to be run, or + # if there is no postinst hook, or if there is no grub installed -- + # we are OK. Or else, we ask. + if ($explicit_do_loader || (! ($postinst_hook && -x '/usr/sbin/grub'))) { + print STDERR "Running boot loader as requested\n"; + } else { + print STDERR "Ok, not running $loader\n"; + } + if ($loader =~ /^lilo/io or $loader =~ /vmelilo/io) { + print STDERR "Testing $loader.conf ... \n"; + unlink $temp_file_name; # security + $ret = system("$loaderloc -t >$temp_file_name 2>&1"); + if ($ret) { + print STDERR "Boot loader test failed\n"; + return $ret; + } + unlink "$temp_file_name"; + print STDERR "Testing successful.\n"; + print STDERR "Installing the "; + print STDERR "partition " if $loader =~ /^lilo/io; + print STDERR "boot sector... \n"; + } + + print STDERR "Running $loaderloc ... \n"; + if ($loader =~ /^elilo/io) { + $ret = system("$loaderloc 2>&1 | tee $temp_file_name"); + } else { + $ret = system("$loaderloc >$temp_file_name 2>&1"); + } + if ($ret) { + print STDERR "Boot loader failed to run\n"; + return $ret; + } + unlink $temp_file_name; + print STDERR "Installation successful.\n"; + return 0; +} + +exit 0; + +__END__ + --- linux-3.13.0.orig/debian/control-scripts/postrm +++ linux-3.13.0/debian/control-scripts/postrm @@ -0,0 +1,361 @@ +#! /usr/bin/perl +# -*- Mode: Cperl -*- +# image.postrm --- +# Author : Manoj Srivastava ( srivasta@glaurung.green-gryphon.com ) +# Created On : Sat May 15 11:05:13 1999 +# Created On Node : glaurung.green-gryphon.com +# Last Modified By : Manoj Srivastava +# Last Modified On : Wed Sep 13 11:26:19 2006 +# Last Machine Used: glaurung.internal.golden-gryphon.com +# Update Count : 57 +# Status : Unknown, Use with caution! +# HISTORY : +# Description : +# +# $Id: image.postrm,v 1.31 2003/10/07 16:24:20 srivasta Exp $ +# + + +# +#use strict; #for debugging +use Cwd 'abs_path'; + +$|=1; + +# Predefined values: +my $version = "=V"; +my $link_in_boot = ""; # Should be empty, mostly +my $no_symlink = ""; # Should be empty, mostly +my $reverse_symlink = ""; # Should be empty, mostly +my $do_symlink = "Yes"; # target machine defined +my $do_boot_enable = "Yes"; # target machine defined +my $do_bootfloppy = "Yes"; # target machine defined +my $do_bootloader = "Yes"; # target machine defined +my $move_image = ''; # target machine defined +my $kimage = "=K"; # Should be empty, mostly +my $loader = "=L"; # lilo, silo, quik, palo, vmelilo, or nettrom +my $image_dir = "/boot"; # where the image is located +my $clobber_modules = ''; # target machine defined +my $initrd = "YES"; # initrd kernel +my $do_initrd = ''; # Normally, we don't +my $warn_initrd = 'YES'; # Normally we do +my $use_hard_links = ''; # hardlinks do not work across fs boundaries +my $postinst_hook = ''; #Normally we do not +my $postrm_hook = ''; #Normally we do not +my $preinst_hook = ''; #Normally we do not +my $prerm_hook = ''; #Normally we do not +my $minimal_swap = ''; # Do not swap symlinks +my $ignore_depmod_err = ''; # normally we do not +my $relink_build_link = 'YES'; # There is no harm in checking the link +my $force_build_link = ''; # we shall not create a dangling link +my $kernel_arch = "=B"; +my $ramdisk = "/usr/sbin/update-initramfs"; +my $package_name = "linux-image-$version"; + +my $Loader = "NoLOADER"; # +$Loader = "LILO" if $loader =~ /^lilo/io; +$Loader = "SILO" if $loader =~ /^silo/io; +$Loader = "QUIK" if $loader =~ /^quik/io; +$Loader = "yaboot" if $loader =~ /^yaboot/io; +$Loader = "PALO" if $loader =~ /^palo/io; +$Loader = "NETTROM" if $loader =~ /^nettrom/io; +$Loader = "VMELILO" if $loader =~ /^vmelilo/io; +$Loader = "ZIPL" if $loader =~ /^zipl/io; +$Loader = "ELILO" if $loader =~ /^elilo/io; + + +# This should not point to /tmp, because of security risks. +my $temp_file_name = "/var/log/$loader" . "_log.$$"; + +#known variables +my @boilerplate = (); +my @silotemplate = (); +my @quiktemplate = (); +my @palotemplate = (); +my @vmelilotemplate = (); +my $bootdevice = ''; +my $rootdevice = ''; +my $rootdisk = ''; +my $rootpartition = ''; +my $image_dest = "/"; +my $realimageloc = "/$image_dir/"; +my $have_conffile = ""; +my $CONF_LOC = '/etc/kernel-img.conf'; +my $relative_links = ''; +my $silent_modules = ''; +my $silent_loader = ''; +my $warn_reboot = 'Yes'; # Warn that we are installing a version of + # the kernel we are running + +chdir('/') or die "could not chdir to /:$!\n"; +# remove multiple leading slashes; make sure there is at least one. +$realimageloc =~ s|^/*|/|o; +$realimageloc =~ s|/+|/|o; + + +if (-r "$CONF_LOC" && -f "$CONF_LOC" ) { + if (open(CONF, "$CONF_LOC")) { + while () { + chomp; + s/\#.*$//g; + next if /^\s*$/; + + $do_symlink = "" if /^\s*do_symlinks\s*=\s*(no|false|0)\s*$/ig; + $no_symlink = "" if /^\s*no_symlinks\s*=\s*(no|false|0)\s*$/ig; + $reverse_symlink = "" if /^\s*reverse_symlinks\s*=\s*(no|false|0)\s*$/ig; + $link_in_boot = "" if /^\s*image_in_boot\s*=\s*(no|false|0)\s*$/ig; + $link_in_boot = "" if /^\s*link_in_boot\s*=\s*(no|false|0)\s*$/ig; + $move_image = "" if /^\s*move_image\s*=\s*(no|false|0)\s*$/ig; + $clobber_modules = '' if /^\s*clobber_modules\s*=\s*(no|false|0)\s*$/ig; + $do_boot_enable = '' if /^\s*do_boot_enable\s*=\s*(no|false|0)\s*$/ig; + $do_bootfloppy = '' if /^\s*do_bootfloppy\s*=\s*(no|false|0)\s*$/ig; + $relative_links = '' if /^\s*relative_links \s*=\s*(no|false|0)\s*$/ig; + $do_bootloader = '' if /^\s*do_bootloader\s*=\s*(no|false|0)\s*$/ig; + $do_initrd = '' if /^\s*do_initrd\s*=\s*(no|false|0)\s*$/ig; + $warn_initrd = '' if /^\s*warn_initrd\s*=\s*(no|false|0)\s*$/ig; + $use_hard_links = '' if /^\s*use_hard_links\s*=\s*(no|false|0)\s*$/ig; + $silent_modules = '' if /^\s*silent_modules\s*=\s*(no|false|0)\s*$/ig; + $silent_loader = '' if /^\s*silent_loader\s*=\s*(no|false|0)\s*$/ig; + $warn_reboot = '' if /^\s*warn_reboot\s*=\s*(no|false|0)\s*$/ig; + $minimal_swap = '' if /^\s*minimal_swap\s*=\s*(no|false|0)\s*$/ig; + $ignore_depmod_err = '' if /^\s*ignore_depmod_err\s*=\s*(no|false|0)\s*$/ig; + $relink_build_link = '' if /^\s*relink_build_link\s*=\s*(no|false|0)\s*$/ig; + $force_build_link = '' if /^\s*force_build_link\s*=\s*(no|false|0)\s*$/ig; + + $do_symlink = "Yes" if /^\s*do_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $no_symlink = "Yes" if /^\s*no_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $reverse_symlink = "Yes" if /^\s*reverse_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $link_in_boot = "Yes" if /^\s*image_in_boot\s*=\s*(yes|true|1)\s*$/ig; + $link_in_boot = "Yes" if /^\s*link_in_boot\s*=\s*(yes|true|1)\s*$/ig; + $move_image = "Yes" if /^\s*move_image\s*=\s*(yes|true|1)\s*$/ig; + $clobber_modules = "Yes" if /^\s*clobber_modules\s*=\s*(yes|true|1)\s*$/ig; + $do_boot_enable = "Yes" if /^\s*do_boot_enable\s*=\s*(yes|true|1)\s*$/ig; + $do_bootfloppy = "Yes" if /^\s*do_bootfloppy\s*=\s*(yes|true|1)\s*$/ig; + $do_bootloader = "Yes" if /^\s*do_bootloader\s*=\s*(yes|true|1)\s*$/ig; + $relative_links = "Yes" if /^\s*relative_links\s*=\s*(yes|true|1)\s*$/ig; + $do_initrd = "Yes" if /^\s*do_initrd\s*=\s*(yes|true|1)\s*$/ig; + $warn_initrd = "Yes" if /^\s*warn_initrd\s*=\s*(yes|true|1)\s*$/ig; + $use_hard_links = "Yes" if /^\s*use_hard_links\s*=\s*(yes|true|1)\s*$/ig; + $silent_modules = 'Yes' if /^\s*silent_modules\s*=\s*(yes|true|1)\s*$/ig; + $silent_loader = 'Yes' if /^\s*silent_loader\s*=\s*(yes|true|1)\s*$/ig; + $warn_reboot = 'Yes' if /^\s*warn_reboot\s*=\s*(yes|true|1)\s*$/ig; + $minimal_swap = 'Yes' if /^\s*minimal_swap\s*=\s*(yes|true|1)\s*$/ig; + $ignore_depmod_err = 'Yes' if /^\s*ignore_depmod_err\s*=\s*(yes|true|1)\s*$/ig; + $relink_build_link = 'Yes' if /^\s*relink_build_link\s*=\s*(yes|true|1)\s*$/ig; + $force_build_link = 'Yes' if /^\s*force_build_link\s*=\s*(yes|true|1)\s*$/ig; + + $image_dest = "$1" if /^\s*image_dest\s*=\s*(\S+)/ig; + $postinst_hook = "$1" if /^\s*postinst_hook\s*=\s*(\S+)/ig; + $postrm_hook = "$1" if /^\s*postrm_hook\s*=\s*(\S+)/ig; + $preinst_hook = "$1" if /^\s*preinst_hook\s*=\s*(\S+)/ig; + $prerm_hook = "$1" if /^\s*prerm_hook\s*=\s*(\S+)/ig; + $ramdisk = "$1" if /^\s*ramdisk\s*=\s*(.+)$/ig; + } + close CONF; + $have_conffile = "Yes"; + } +} + +if ($link_in_boot) { + $image_dest = "/$image_dir/"; + $image_dest =~ s|^/*|/|o; +} + +$image_dest = "$image_dest/"; +$image_dest =~ s|/+$|/|o; + +# The destdir may be gone by now. +if (-d "$image_dest") { + chdir("$image_dest") or die "could not chdir to $image_dest:$!\n"; +} + +# Paranoid check to make sure that the correct value is put in there +if (! $kimage) {$kimage = "vmlinuz"} # Hmm. empty +elsif ($kimage =~ m/^b?uImage$/o) {$kimage = "vmlinuz"} # these produce vmlinuz +elsif ($kimage =~ m/^b?zImage$/o) {$kimage = "vmlinuz"} # these produce vmlinuz +elsif ($kimage =~ m/^[iI]mage$/o) { my $nop = $kimage;} +elsif ($kimage =~ m/^vmlinux$/o) { my $nop = $kimage;} +else {$kimage = "vmlinuz"} # default + +$ENV{KERNEL_ARCH}=$kernel_arch if $kernel_arch; + + +###################################################################### +###################################################################### +############ +###################################################################### +###################################################################### +sub remove_sym_link { + my $bad_image = $_[0]; + + warn "Removing symbolic link $bad_image \n"; + if ($loader =~ /lilo/i) + { + warn "Unless you used the optional flag in lilo, \n"; + } + warn " you may need to re-run your boot loader" . ($loader ? "[$loader]":"") + . "\n"; + # Remove the dangling link + unlink "$bad_image"; +} + +###################################################################### +###################################################################### +############ +###################################################################### +###################################################################### +sub CanonicalizePath { + my $path = join '/', @_; + my @work = split '/', $path; + my @out; + my $is_absolute; + + if (@work && $work[0] eq "") { $is_absolute = 1; shift @work; } + + while (@work) { + my $seg = shift @work; + if ($seg eq "." || $seg eq "") { + } elsif ($seg eq "..") { + if (@out && $out[-1] ne "..") { + pop @out; + } else { + # Leading "..", or "../..", etc. + push @out, $seg; + } + } else { + push @out, $seg; + } + } + + unshift @out, "" if $is_absolute; + return join('/', @out); +} + +###################################################################### +###################################################################### +############ +###################################################################### +###################################################################### +# This removes dangling symlinks. What do we do about hard links? Surely a +# something with the nane $image_dest . "$kimage" ought not to be left behind? +sub image_magic { + my $kimage = $_[0]; + my $image_dest = $_[1]; + + if (-l "$kimage") { + # There is a symbolic link + my $force_move = 0; + my $vmlinuz_target = readlink "$kimage"; + my $real_target = ''; + $real_target = abs_path($vmlinuz_target) if defined ($vmlinuz_target); + if (!defined($vmlinuz_target) || ! -f "$real_target") { + # what, a dangling symlink? + warn "The link " . $image_dest . "$kimage is a damaged link\n"; + # Remove the dangling link + &remove_sym_link("$kimage"); + } + else { + my $canonical_target = CanonicalizePath("$vmlinuz_target"); + if (! -e $canonical_target) { + warn "The link " . $image_dest . "$kimage is a dangling link\n"; + &remove_sym_link("$kimage"); + } + } + } +} + +# set the env var stem +$ENV{'STEM'} = "linux"; + +sub exec_script { + my $type = shift; + my $script = shift; + print STDERR "Running $type hook script $script.\n"; + system ("$script $version $realimageloc$kimage-$version") && + print STDERR "User $type hook script [$script] "; + if ($?) { + if ($? == -1) { + print STDERR "failed to execute: $!\n"; + } + elsif ($? & 127) { + printf STDERR "died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + } + else { + printf STDERR "exited with value %d\n", $? >> 8; + } + } +} +sub run_hook { + my $type = shift; + my $script = shift; + if ($script =~ m,^/,) { + # Full path provided for the hook script + if (-x "$script") { + &exec_script($type,$script); + } + else { + warn "The provided $type hook script [$script] could not be run.\n"; + } + } + else { + # Look for it in a safe path + for my $path ('/bin', '/sbin', '/usr/bin', '/usr/sbin') { + if (-x "$path/$script") { + &exec_script($type, "$path/$script"); + return 0; + } + } + # No luck + print STDERR "Could not find $type hook script [$script].\n"; + warn "Looked in: '/bin', '/sbin', '/usr/bin', '/usr/sbin'\n"; + } +} + +my $options; +for (@ARGV) { + s,','\\'',g; + $options .= " '$_'"; +} +$ENV{'DEB_MAINT_PARAMS'}="$options"; + +## Run user hook script here, if any +if ($postrm_hook) { + &run_hook("postrm", $postrm_hook); +} +if (-d "/etc/kernel/postrm.d") { + warn "Examining /etc/kernel/postrm.d .\n"; + system ("run-parts --verbose --exit-on-error --arg=$version " . + "--arg=$realimageloc$kimage-$version " . + "/etc/kernel/postrm.d") && + die "Failed to process /etc/kernel/postrm.d"; +} +if (-d "/etc/kernel/postrm.d/$version") { + warn "Examining /etc/kernel/postrm.d/$version .\n"; + system ("run-parts --verbose --exit-on-error --arg=$version " . + "--arg=$realimageloc$kimage-$version " . + "/etc/kernel/postrm.d/$version") && + die "Failed to process /etc/kernel/postrm.d/$version"; +} + +# check and remove damaged and dangling symlinks +if ($ARGV[0] !~ /upgrade/) { + system("$ramdisk -d -k " . $version . " > /dev/null 2>&1"); + if (-f $realimageloc . "initrd.img-$version.bak") { + unlink $realimageloc . "initrd.img-$version.bak"; + } + image_magic($kimage, $image_dest); + image_magic($kimage . ".old", $image_dest); + image_magic("initrd.img", $image_dest) if $initrd; + image_magic("initrd.img.old", $image_dest) if $initrd; +} + +exit 0; + +__END__ + + + + + + --- linux-3.13.0.orig/debian/control-scripts/preinst +++ linux-3.13.0/debian/control-scripts/preinst @@ -0,0 +1,315 @@ +#! /usr/bin/perl +# -*- Mode: Cperl -*- +# image.preinst --- +# Author : Manoj Srivastava ( srivasta@tiamat.datasync.com ) +# Created On : Sun Jun 14 03:38:02 1998 +# Created On Node : tiamat.datasync.com +# Last Modified By : Manoj Srivastava +# Last Modified On : Sun Sep 24 14:04:42 2006 +# Last Machine Used: glaurung.internal.golden-gryphon.com +# Update Count : 99 +# Status : Unknown, Use with caution! +# HISTORY : +# Description : +# +# + +# +#use strict; #for debugging + +use Debconf::Client::ConfModule qw(:all); +version('2.0'); +my $capb=capb("backup"); + +$|=1; + +# Predefined values: +my $version = "=V"; +my $link_in_boot = ""; # Should be empty, mostly +my $no_symlink = ""; # Should be empty, mostly +my $reverse_symlink = ""; # Should be empty, mostly +my $do_symlink = "Yes"; # target machine defined +my $do_boot_enable = "Yes"; # target machine defined +my $do_bootfloppy = "Yes"; # target machine defined +my $do_bootloader = "Yes"; # target machine defined +my $move_image = ''; # target machine defined +my $kimage = "=K"; # Should be empty, mostly +my $loader = "=L"; # lilo, silo, quik, palo, vmelilo, nettrom + # or elilo +my $image_dir = "/boot"; # where the image is located +my $initrd = "YES"; # initrd kernel +my $use_hard_links = ''; # hardlinks do not wirk across fs boundaries +my $postinst_hook = ''; #Normally we do not +my $postrm_hook = ''; #Normally we do not +my $preinst_hook = ''; #Normally we do not +my $prerm_hook = ''; #Normally we do not +my $minimal_swap = ''; # Do not swap symlinks +my $ignore_depmod_err = ''; # normally we do not +my $relink_src_link = 'YES'; # There is no harm in checking the link +my $relink_build_link = 'YES'; # There is no harm in checking the link +my $force_build_link = ''; # There is no harm in checking the link +my $kernel_arch = "=B"; +my $ramdisk = "/usr/sbin/update-initramfs"; # List of tools to create initial ram fs. +my $package_name = "linux-image-$version"; + +my $Loader = "NoLOADER"; # +$Loader = "LILO" if $loader =~ /^lilo/io; +$Loader = "SILO" if $loader =~ /^silo/io; +$Loader = "QUIK" if $loader =~ /^quik/io; +$Loader = "yaboot" if $loader =~ /^yaboot/io; +$Loader = "PALO" if $loader =~ /^palo/io; +$Loader = "NETTROM" if $loader =~ /^nettrom/io; +$Loader = "VMELILO" if $loader =~ /^vmelilo/io; +$Loader = "ZIPL" if $loader =~ /^zipl/io; +$Loader = "ELILO" if $loader =~ /^elilo/io; + + +#known variables +my @boilerplate = (); +my @silotemplate = (); +my @quiktemplate = (); +my @palotemplate = (); +my @vmelilotemplate = (); +my $bootdevice = ''; +my $rootdevice = ''; +my $rootdisk = ''; +my $rootpartition = ''; +my $image_dest = "/"; +my $realimageloc = "/$image_dir/"; +my $have_conffile = ""; +my $CONF_LOC = '/etc/kernel-img.conf'; +my $relative_links = ''; +my $silent_loader = ''; +my $warn_reboot = ''; # Warn that we are installing a version of + # the kernel we are running + +my $modules_base = '/lib/modules'; + +die "Pre inst Internal error. Aborting." unless $version; + +exit 0 if $ARGV[0] =~ /abort-upgrade/; +exit 1 unless $ARGV[0] =~ /(install|upgrade)/; + +$arch = `uname -i`; +if ($arch =~ m/86/) { + system ("grep -q ' pae ' /proc/cpuinfo"); + if ($?) { + print STDERR "This kernel does not support a non-PAE CPU.\n"; + exit 1; + } +} + +# remove multiple leading slashes; make sure there is at least one. +$realimageloc =~ s|^/*|/|o; +$realimageloc =~ s|/+|/|o; + +if (-r "$CONF_LOC" && -f "$CONF_LOC" ) { + if (open(CONF, "$CONF_LOC")) { + while () { + chomp; + s/\#.*$//g; + next if /^\s*$/; + + $do_symlink = "" if /^\s*do_symlinks\s*=\s*(no|false|0)\s*$/ig; + $no_symlink = "" if /^\s*no_symlinks\s*=\s*(no|false|0)\s*$/ig; + $reverse_symlink = "" if /^\s*reverse_symlinks\s*=\s*(no|false|0)\s*$/ig; + $link_in_boot = "" if /^\s*image_in_boot\s*=\s*(no|false|0)\s*$/ig; + $link_in_boot = "" if /^\s*link_in_boot\s*=\s*(no|false|0)\s*$/ig; + $move_image = "" if /^\s*move_image\s*=\s*(no|false|0)\s*$/ig; + $do_boot_enable = '' if /^\s*do_boot_enable\s*=\s*(no|false|0)\s*$/ig; + $do_bootfloppy = '' if /^\s*do_bootfloppy\s*=\s*(no|false|0)\s*$/ig; + $do_bootloader = '' if /^\s*do_bootloader\s*=\s*(no|false|0)\s*$/ig; + $relative_links = '' if /^\s*relative_links \s*=\s*(no|false|0)\s*$/ig; + $use_hard_links = '' if /^\s*use_hard_links\s*=\s*(no|false|0)\s*$/ig; + $silent_loader = '' if /^\s*silent_loader\s*=\s*(no|false|0)\s*$/ig; + $warn_reboot = '' if /^\s*warn_reboot\s*=\s*(no|false|0)\s*$/ig; + $minimal_swap = '' if /^\s*minimal_swap\s*=\s*(no|false|0)\s*$/ig; + $ignore_depmod_err = '' if /^\s*ignore_depmod_err\s*=\s*(no|false|0)\s*$/ig; + $relink_src_link = '' if /^\s*relink_src_link\s*=\s*(no|false|0)\s*$/ig; + $relink_build_link = '' if /^\s*relink_build_link\s*=\s*(no|false|0)\s*$/ig; + $force_build_link = '' if /^\s*force_build_link\s*=\s*(no|false|0)\s*$/ig; + + $do_symlink = "Yes" if /^\s*do_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $no_symlink = "Yes" if /^\s*no_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $reverse_symlink = "Yes" if /^\s*reverse_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $link_in_boot = "Yes" if /^\s*image_in_boot\s*=\s*(yes|true|1)\s*$/ig; + $link_in_boot = "Yes" if /^\s*link_in_boot\s*=\s*(yes|true|1)\s*$/ig; + $move_image = "Yes" if /^\s*move_image\s*=\s*(yes|true|1)\s*$/ig; + $do_boot_enable = "Yes" if /^\s*do_boot_enable\s*=\s*(yes|true|1)\s*$/ig; + $do_bootfloppy = "Yes" if /^\s*do_bootfloppy\s*=\s*(yes|true|1)\s*$/ig; + $do_bootloader = "Yes" if /^\s*do_bootloader\s*=\s*(yes|true|1)\s*$/ig; + $relative_links = "Yes" if /^\s*relative_links\s*=\s*(yes|true|1)\s*$/ig; + $use_hard_links = "Yes" if /^\s*use_hard_links\s*=\s*(yes|true|1)\s*$/ig; + $silent_loader = 'Yes' if /^\s*silent_loader\s*=\s*(yes|true|1)\s*$/ig; + $warn_reboot = 'Yes' if /^\s*warn_reboot\s*=\s*(yes|true|1)\s*$/ig; + $minimal_swap = 'Yes' if /^\s*minimal_swap\s*=\s*(yes|true|1)\s*$/ig; + $ignore_depmod_err = 'Yes' if /^\s*ignore_depmod_err\s*=\s*(yes|true|1)\s*$/ig; + $relink_src_link = 'Yes' if /^\s*relink_src_link\s*=\s*(yes|true|1)\s*$/ig; + $relink_build_link = 'Yes' if /^\s*relink_build_link\s*=\s*(yes|true|1)\s*$/ig; + $force_build_link = 'Yes' if /^\s*force_build_link\s*=\s*(yes|true|1)\s*$/ig; + + $image_dest = "$1" if /^\s*image_dest\s*=\s*(\S+)/ig; + $postinst_hook = "$1" if /^\s*postinst_hook\s*=\s*(\S+)/ig; + $postrm_hook = "$1" if /^\s*postrm_hook\s*=\s*(\S+)/ig; + $preinst_hook = "$1" if /^\s*preinst_hook\s*=\s*(\S+)/ig; + $prerm_hook = "$1" if /^\s*prerm_hook\s*=\s*(\S+)/ig; + $ramdisk = "$1" if /^\s*ramdisk\s*=\s*(.+)$/ig; + } + close CONF; + $have_conffile = "Yes"; + $have_conffile = "Yes"; # stop perl complaining + } +} + +$ENV{KERNEL_ARCH}=$kernel_arch if $kernel_arch; + +# About to upgrade this package from version $2 TO THIS VERSION. +# "prerm upgrade" has already been called for the old version of +# this package. + +sub find_initrd_tool { + my $hostversion = shift; + my $version = shift; + my @ramdisks = + grep { + my $args = + "$_ " . + "--supported-host-version=$hostversion " . + "--supported-target-version=$version " . + "1>/dev/null 2>&1" + ; + system($args) == 0; + } + split (/[:,\s]+/, $ramdisk); +} + +sub check { + my $version = shift; + my $lib_modules="$modules_base/$version"; + my $message = ''; + + if (-d "$lib_modules") { + opendir(DIR, $lib_modules) || die "can’t opendir $lib_modules: $!"; + my @children = readdir(DIR); + if ($#children > 1) { + my @dirs = grep { -d "$lib_modules/$_" } @children; + if ($#dirs > 1) { # we have subdirs + my $dir_message=''; + for my $dir (@dirs) { + if ($dir =~/kernel$/) { + $dir_message="An older install was detected.\n"; + } + else { + $dir_message="Module sub-directories were detected.\n" + unless $dir_message; + } + } + $message += $dir_message if $dir_message; + } + + my @links = grep { -l "$lib_modules/$_" } @children; + if ($#links > -1) { + my $links_message = ''; + for my $link (@links) { + next if ($link =~ /^build$/); + next if ($link =~ /^source$/); + $links_message = "Symbolic links were detected in $modules_base/$version.\n"; + } + $message += $links_message if $links_message; + } + my @files = grep { -f "$lib_modules/$_" } @children; + $message += "Additional files also exist in $modules_base/$version.\n" + if ($#files > -1); + } + } + else { $message .= "$lib_modules does not exist. ";} + return $message; +} + +if (-d "$modules_base/$version") { + my $errors=check($version); + warn "Info:\n$errors\n" if $errors; +} + +# set the env var stem +$ENV{'STEM'} = "linux"; + +sub exec_script { + my $type = shift; + my $script = shift; + print STDERR "Running $type hook script $script.\n"; + system ("$script $version $realimageloc$kimage-$version") && + print STDERR "User $type hook script [$script] "; + if ($?) { + if ($? == -1) { + print STDERR "failed to execute: $!\n"; + } + elsif ($? & 127) { + printf STDERR "died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + } + else { + printf STDERR "exited with value %d\n", $? >> 8; + } + exit $? >> 8; + } +} +sub run_hook { + my $type = shift; + my $script = shift; + if ($script =~ m,^/,) { + # Full path provided for the hook script + if (-x "$script") { + &exec_script($type,$script); + } + else { + die "The provided $type hook script [$script] could not be run.\n"; + } + } + else { + # Look for it in a safe path + for my $path ('/bin', '/sbin', '/usr/bin', '/usr/sbin') { + if (-x "$path/$script") { + &exec_script($type, "$path/$script"); + return 0; + } + } + # No luck + print STDERR "Could not find $type hook script [$script].\n"; + die "Looked in: '/bin', '/sbin', '/usr/bin', '/usr/sbin'\n"; + } +} + + +my $options; +for (@ARGV) { + s,','\\'',g; + $options .= " '$_'"; +} +$ENV{'DEB_MAINT_PARAMS'}="$options"; + +## Run user hook script here, if any +if (-x "$preinst_hook") { + &run_hook("preinst", $preinst_hook); +} +if (-d "/etc/kernel/preinst.d") { + print STDERR "Examining /etc/kernel/preinst.d/\n"; + system ("run-parts --verbose --exit-on-error --arg=$version" . + " --arg=$realimageloc$kimage-$version" . + " /etc/kernel/preinst.d") && + die "Failed to process /etc/kernel/preinst.d"; +} +if (-d "/etc/kernel/preinst.d/$version") { + print STDERR "Examining /etc/kernel/preinst.d/$version.\n"; + system ("run-parts --verbose --exit-on-error --arg=$version" . + " --arg=$realimageloc$kimage-$version" . + " /etc/kernel/preinst.d/$version") && + die "Failed to process /etc/kernel/preinst.d/$version"; +} +print STDERR "Done.\n"; + +exit 0; + +__END__ + + --- linux-3.13.0.orig/debian/control-scripts/prerm +++ linux-3.13.0/debian/control-scripts/prerm @@ -0,0 +1,312 @@ +#! /usr/bin/perl +# -*- Mode: Perl -*- +# image.prerm --- +# Author : root ( root@melkor.pilgrim.umass.edu ) +# Created On : Fri May 17 03:28:59 1996 +# Created On Node : melkor.pilgrim.umass.edu +# Last Modified By : Manoj Srivastava +# Last Modified On : Sat Aug 5 13:14:17 2006 +# Last Machine Used: glaurung.internal.golden-gryphon.com +# Update Count : 85 +# Status : Unknown, Use with caution! +# HISTORY : +# Description : +# +# +# $Id: image.prerm,v 1.22 2003/10/07 16:24:20 srivasta Exp $ +# +# +#use strict; + +$|=1; +# Predefined values: +my $version = "=V"; +my $link_in_boot = ""; # Should be empty, mostly +my $no_symlink = ""; # Should be empty, mostly +my $reverse_symlink = ""; # Should be empty, mostly +my $do_symlinks = "Yes"; # target machine defined +my $do_boot_enable = "Yes"; # target machine defined +my $do_bootfloppy = "Yes"; # target machine defined +my $do_bootloader = "Yes"; # target machine defined +my $move_image = ''; # target machine defined +my $kimage = "=K"; # Should be empty, mostly +my $loader = "=L"; # lilo, silo, quik, palo, vmelilo, or nettrom +my $image_dir = "/boot"; # where the image is located +my $clobber_modules = ''; # target machine defined +my $initrd = "YES"; # initrd kernel +my $use_hard_links = ''; # hardlinks do not wirk across fs boundaries +my $postinst_hook = ''; #Normally we do not +my $postrm_hook = ''; #Normally we do not +my $preinst_hook = ''; #Normally we do not +my $prerm_hook = ''; #Normally we do not +my $minimal_swap = ''; # Do not swap symlinks +my $ignore_depmod_err = ''; # normally we do not +my $relink_build_link = 'YES'; # There is no harm in checking the link +my $force_build_link = ''; # There is no harm in checking the link +my $kernel_arch = "=B"; +my $ramdisk = "/usr/sbin/update-initramfs"; +my $package_name = "linux-image-$version"; + +my $Loader = "NoLOADER"; # +$Loader = "LILO" if $loader =~ /^lilo/io; +$Loader = "SILO" if $loader =~ /^silo/io; +$Loader = "QUIK" if $loader =~ /^quik/io; +$Loader = "yaboot" if $loader =~ /^yaboot/io; +$Loader = "PALO" if $loader =~ /^palo/io; +$Loader = "NETTROM" if $loader =~ /^nettrom/io; +$Loader = "VMELILO" if $loader =~ /^vmelilo/io; +$Loader = "ZIPL" if $loader =~ /^zipl/io; +$Loader = "ELILO" if $loader =~ /^elilo/io; + + +# This should not point to /tmp, because of security risks. +my $temp_file_name = "/var/log/$loader" . "_log.$$"; + +#known variables +my $image_dest = "/"; +my $realimageloc = "/$image_dir/"; +my $have_conffile = ""; +my $CONF_LOC = '/etc/kernel-img.conf'; +my $relative_links = ''; +my $silent_loader = ''; +my $warn_reboot = 'Yes'; # Warn that we are installing a version of + # the kernel we are running + +# remove multiple leading slashes; make sure there is at least one. +$realimageloc =~ s|^/*|/|o; +$realimageloc =~ s|/+|/|o; + +my $DEBUG = 0; + +# Variables used +my $image=''; +my $ret=0; +my $seen=''; +my $answer=''; +my $running = ''; +my $WouldInvalidate = 0; + +if ($ARGV[0] && ($ARGV[0] =~ /remove/ || $ARGV[0] =~ /upgrade/)) { + if (-l "/usr/doc/linux-image-$version") { + unlink "/usr/doc/linux-image-$version"; + } +} + +# Ignore all invocations uxcept when called on to remove +exit 0 unless ($ARGV[0] && $ARGV[0] =~ /remove/) ; + +# Paranoid check to make sure that the correct value is put in there +if (! $kimage) { $kimage = "vmlinuz";} # Hmm. empty +elsif ($kimage =~ m/^b?uImage$/o) { $kimage = "vmlinuz";} # these produce vmlinuz +elsif ($kimage =~ m/^b?zImage$/o) { $kimage = "vmlinuz";} # these produce vmlinuz +elsif ($kimage =~ m/^[iI]mage$/o) { my $nop = $kimage; } +elsif ($kimage =~ m/^vmlinux$/o) { my $nop = $kimage; } +else { $kimage = "vmlinuz";} # Default + +if (-r "$CONF_LOC" && -f "$CONF_LOC" ) { + if (open(CONF, "$CONF_LOC")) { + while () { + chomp; + s/\#.*$//g; + next if /^\s*$/; + + $do_symlink = "" if /^\s*do_symlinks\s*=\s*(no|false|0)\s*$/ig; + $no_symlink = "" if /^\s*no_symlinks\s*=\s*(no|false|0)\s*$/ig; + $reverse_symlink = "" if /^\s*reverse_symlinks\s*=\s*(no|false|0)\s*$/ig; + $link_in_boot = "" if /^\s*image_in_boot\s*=\s*(no|false|0)\s*$/ig; + $link_in_boot = "" if /^\s*link_in_boot\s*=\s*(no|false|0)\s*$/ig; + $move_image = "" if /^\s*move_image\s*=\s*(no|false|0)\s*$/ig; + $clobber_modules = '' if /^\s*clobber_modules\s*=\s*(no|false|0)\s*$/ig; + $do_boot_enable = '' if /^\s*do_boot_enable\s*=\s*(no|false|0)\s*$/ig; + $do_bootfloppy = '' if /^\s*do_bootfloppy\s*=\s*(no|false|0)\s*$/ig; + $relative_links = '' if /^\s*relative_links \s*=\s*(no|false|0)\s*$/ig; + $do_bootloader = '' if /^\s*do_bootloader\s*=\s*(no|false|0)\s*$/ig; + $do_initrd = '' if /^\s*do_initrd\s*=\s*(no|false|0)\s*$/ig; + $use_hard_links = '' if /^\s*use_hard_links\s*=\s*(no|false|0)\s*$/ig; + $silent_loader = '' if /^\s*silent_loader\s*=\s*(no|false|0)\s*$/ig; + $warn_reboot = '' if /^\s*warn_reboot\s*=\s*(no|false|0)\s*$/ig; + $minimal_swap = '' if /^\s*minimal_swap\s*=\s*(no|false|0)\s*$/ig; + $ignore_depmod_err = '' if /^\s*ignore_depmod_err\s*=\s*(no|false|0)\s*$/ig; + $relink_build_link = '' if /^\s*relink_build_link\s*=\s*(no|false|0)\s*$/ig; + $force_build_link = '' if /^\s*force_build_link\s*=\s*(no|false|0)\s*$/ig; + + + $do_symlink = "Yes" if /^\s*do_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $no_symlink = "Yes" if /^\s*no_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $reverse_symlink = "Yes" if /^\s*reverse_symlinks\s*=\s*(yes|true|1)\s*$/ig; + $link_in_boot = "Yes" if /^\s*image_in_boot\s*=\s*(yes|true|1)\s*$/ig; + $link_in_boot = "Yes" if /^\s*link_in_boot\s*=\s*(yes|true|1)\s*$/ig; + $move_image = "Yes" if /^\s*move_image\s*=\s*(yes|true|1)\s*$/ig; + $clobber_modules = "Yes" if /^\s*clobber_modules\s*=\s*(yes|true|1)\s*$/ig; + $do_boot_enable = "Yes" if /^\s*do_boot_enable\s*=\s*(yes|true|1)\s*$/ig; + $do_bootfloppy = "Yes" if /^\s*do_bootfloppy\s*=\s*(yes|true|1)\s*$/ig; + $do_bootloader = "Yes" if /^\s*do_bootloader\s*=\s*(yes|true|1)\s*$/ig; + $relative_links = "Yes" if /^\s*relative_links\s*=\s*(yes|true|1)\s*$/ig; + $do_initrd = "Yes" if /^\s*do_initrd\s*=\s*(yes|true|1)\s*$/ig; + $use_hard_links = "Yes" if /^\s*use_hard_links\s*=\s*(yes|true|1)\s*$/ig; + $silent_loader = 'Yes' if /^\s*silent_loader\s*=\s*(yes|true|1)\s*$/ig; + $warn_reboot = 'Yes' if /^\s*warn_reboot\s*=\s*(yes|true|1)\s*$/ig; + $minimal_swap = 'Yes' if /^\s*minimal_swap\s*=\s*(yes|true|1)\s*$/ig; + $ignore_depmod_err = 'Yes' if /^\s*ignore_depmod_err\s*=\s*(yes|true|1)\s*$/ig; + $relink_build_link = 'Yes' if /^\s*relink_build_link\s*=\s*(yes|true|1)\s*$/ig; + $force_build_link = 'Yes' if /^\s*force_build_link\s*=\s*(yes|true|1)\s*$/ig; + + $image_dest = "$1" if /^\s*image_dest\s*=\s*(\S+)/ig; + $postinst_hook = "$1" if /^\s*postinst_hook\s*=\s*(\S+)/ig; + $postrm_hook = "$1" if /^\s*postrm_hook\s*=\s*(\S+)/ig; + $preinst_hook = "$1" if /^\s*preinst_hook\s*=\s*(\S+)/ig; + $prerm_hook = "$1" if /^\s*prerm_hook\s*=\s*(\S+)/ig; + $ramdisk = "$1" if /^\s*ramdisk\s*=\s*(.+)$/ig; + } + close CONF; + $have_conffile = "Yes"; + } +} + + +$ENV{KERNEL_ARCH}=$kernel_arch if $kernel_arch; + +#check to see if we are trying to remove a running kernel +# if so we abort right now. +chop($running=`uname -r`); +if ($running eq $version) { + print STDERR "WARN: Proceeding with removing running kernel image.\n"; +} + +#Now, they have an alternate kernel which they are currently running + +# This is just us being nice to lilo users. + +chdir("/") or die "could not chdir to /:$!\n"; + +if (-f "/etc/$loader.conf") { #I know, could be a link, but .. + open (LILO, "/etc/$loader.conf") || &success(); # this is not critical + while () { + chop; + s/\#.*//; # nix the comments + next unless /^\s*image\s*=\s(\S+)/o; + $image = $1; + if ($image && -e $image) { + while (defined($image) && -l $image) { + $image = readlink ($image); + } + if (defined($image) && -e $image) { + $WouldInvalidate |= $image =~ /$kimage-$version/; + } + else { + &success(); # invalid $loader.conf file + } + } + else { + &success(); # invalid $loader.conf file + } + } + close (LILO); + if ($WouldInvalidate) { + print STFERR "WARN: Proceeding with removing running kernel image.\n"; + &success(); + } +} + + +# set the env var stem +$ENV{'STEM'} = "linux"; + +sub exec_script { + my $type = shift; + my $script = shift; + print STDERR "Running $type hook script $script.\n"; + system ("$script $version $realimageloc$kimage-$version") && + print STDERR "User $type hook script [$script] "; + if ($?) { + if ($? == -1) { + print STDERR "failed to execute: $!\n"; + } + elsif ($? & 127) { + printf STDERR "died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + } + else { + printf STDERR "exited with value %d\n", $? >> 8; + } + exit $? >> 8; + } +} +sub run_hook { + my $type = shift; + my $script = shift; + if ($script =~ m,^/,) { + # Full path provided for the hook script + if (-x "$script") { + &exec_script($type,$script); + } + else { + die "The provided $type hook script [$script] could not be run.\n"; + } + } + else { + # Look for it in a safe path + for my $path ('/bin', '/sbin', '/usr/bin', '/usr/sbin') { + if (-x "$path/$script") { + &exec_script($type, "$path/$script"); + return 0; + } + } + # No luck + print STDERR "Could not find $type hook script [$script].\n"; + die "Looked in: '/bin', '/sbin', '/usr/bin', '/usr/sbin'\n"; + } +} + + +my $options; +for (@ARGV) { + s,','\\'',g; + $options .= " '$_'"; +} +$ENV{'DEB_MAINT_PARAMS'}="$options"; + +## Run user hook script here, if any +if (-x "$prerm_hook") { + &run_hook("prerm", $prerm_hook); +} +if (-d "/etc/kernel/prerm.d") { + print STDERR "Examining /etc/kernel/prerm.d.\n"; + system ("run-parts --verbose --exit-on-error --arg=$version " . + "--arg=$realimageloc$kimage-$version /etc/kernel/prerm.d") && + die "Failed to process /etc/kernel/prerm.d"; +} +if (-d "/etc/kernel/prerm.d/$version") { + print STDERR "Examining /etc/kernel/prerm.d/$version.\n"; + system ("run-parts --verbose --exit-on-error --arg=$version" . + " --arg=$realimageloc$kimage-$version " . + "/etc/kernel/prerm.d/$version") && + die "Failed to process /etc/kernel/prerm.d/$version"; +} + +sub success () { + # NOTE: need to keep this list in sync with rules.d/2-binary-arch.mk + my %files_to_keep = ( + 'modules.builtin' => 1, + 'modules.order' => 1, + ); + my $short; + for my $file () { + $short = $file; $short =~ s,.*/,,; + if (!defined $files_to_keep{$short}) { + unlink "$file"; + } + } + exit 0; +} + + + +&success(); +exit 0; +__END__ + + + + + --- linux-3.13.0.orig/debian/control.stub +++ linux-3.13.0/debian/control.stub @@ -0,0 +1,977 @@ +Source: linux +Section: devel +Priority: optional +Maintainer: Ubuntu Kernel Team +Standards-Version: 3.9.4.0 +Build-Depends: debhelper (>= 5), cpio, module-init-tools, kernel-wedge (>= 2.24ubuntu1), makedumpfile [amd64 i386], libelf-dev, libnewt-dev, libiberty-dev, rsync, libdw-dev, libpci-dev, dpkg (>= 1.16.0~ubuntu4), pkg-config, flex, bison, libunwind8-dev, openssl, libaudit-dev, bc, python-dev, gawk, device-tree-compiler [powerpc], u-boot-tools [powerpc], libc6-dev-ppc64 [powerpc] +Build-Depends-Indep: xmlto, docbook-utils, ghostscript, transfig, bzip2, sharutils, asciidoc +Vcs-Git: http://kernel.ubuntu.com/git-repos/ubuntu/ubuntu-trusty.git +XS-Testsuite: autopkgtest +#XS-Testsuite-Depends: gcc-4.7 binutils + +Package: linux-source-3.13.0 +Architecture: all +Section: devel +Priority: optional +Provides: linux-source, linux-source-3 +Depends: ${misc:Depends}, binutils, bzip2, coreutils | fileutils (>= 4.0) +Recommends: libc-dev, gcc, make +Suggests: libncurses-dev | ncurses-dev, kernel-package, libqt3-dev +Description: Linux kernel source for version 3.13.0 with Ubuntu patches + This package provides the source code for the Linux kernel version + 3.13.0. + . + This package is mainly meant for other packages to use, in order to build + custom flavours. + . + If you wish to use this package to create a custom Linux kernel, then it + is suggested that you investigate the package kernel-package, which has + been designed to ease the task of creating kernel image packages. + . + If you are simply trying to build third-party modules for your kernel, + you do not want this package. Install the appropriate linux-headers + package instead. + +Package: linux-doc +Architecture: all +Section: doc +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-doc-3 +Replaces: linux-doc-3 +Description: Linux kernel specific documentation for version 3.13.0 + This package provides the various documents in the 3.13.0 kernel + Documentation/ subdirectory. These document kernel subsystems, APIs, device + drivers, and so on. See + /usr/share/doc/linux-doc/00-INDEX for a list of what is + contained in each file. + +Package: linux-headers-3.13.0-54 +Architecture: all +Multi-Arch: foreign +Section: devel +Priority: optional +Depends: ${misc:Depends}, coreutils | fileutils (>= 4.0) +Description: Header files related to Linux kernel version 3.13.0 + This package provides kernel header files for version 3.13.0, for sites + that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details + +Package: linux-libc-dev +Architecture: i386 amd64 armhf arm64 x32 powerpc ppc64el +Depends: ${misc:Depends} +Conflicts: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), amd64-libs-dev (<= 1.1), linux-kernel-headers +Replaces: libc6-dev (<< 2.3.2.ds1-6), libc6.1-dev (<< 2.3.2.ds1-6), dvb-dev (<< 1.0.1-6), linux-kernel-headers, libdrm-dev +Provides: linux-kernel-headers +Multi-Arch: same +Description: Linux Kernel Headers for development + This package provides headers from the Linux kernel. These headers + are used by the installed headers for GNU glibc and other system + libraries. They are NOT meant to be used to build third-party modules for + your kernel. Use linux-headers-* packages for that. + +Package: linux-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Depends: ${misc:Depends} +Conflicts: linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Replaces: linux-tools (<= 2.6.32-16.25), linux-lts-utopic-tools-common, linux-lts-vivid-tools-common +Description: Linux kernel version specific tools for version 3.13.0 + This package provides the architecture independent parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version PGKVER. + +Package: linux-tools-3.13.0-54 +Architecture: i386 amd64 armhf arm64 powerpc ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-tools-common +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + You probabally want to install linux-tools-3.13.0-54-. + +Package: linux-cloud-tools-common +Architecture: all +Multi-Arch: foreign +Section: kernel +Priority: optional +Breaks: linux-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13) +Conflicts: linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Replaces: linux-tools-common (<< 3.13.0-8.28), hv-kvp-daemon-init (<< 3.13), linux-lts-utopic-cloud-tools-common, linux-lts-vivid-cloud-tools-common +Depends: ${misc:Depends} +Description: Linux kernel version specific cloud tools for version 3.13.0 + This package provides the architecture independent parts for kernel + version locked tools for cloud tools for version PGKVER. + +Package: linux-cloud-tools-3.13.0-54 +Architecture: i386 amd64 armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-cloud-tools-common +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud tools for version 3.13.0-54 on + 64 bit x86. + You probabally want to install linux-cloud-tools-3.13.0-54-. + + +Package: linux-image-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] | grub-ieee1275 [ppc64el] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-generic +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-generic, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-generic-dbgsym +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-generic +Architecture: i386 amd64 armhf arm64 ppc64el +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-generic +XC-Package-Type: udeb +Section: debian-installer +Architecture: i386 amd64 armhf arm64 ppc64el +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-generic-lpae +Architecture: armhf +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: flash-kernel [armhf] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-generic-lpae +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic LPAE processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic-lpae meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-generic-lpae +Architecture: armhf +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-generic-lpae, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Generic LPAE processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-generic-lpae meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-generic-lpae-dbgsym +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-generic-lpae +Architecture: armhf +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-generic-lpae +XC-Package-Type: udeb +Section: debian-installer +Architecture: armhf +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-pc [i386 amd64 x32] | grub-efi-amd64 [amd64 x32] | grub-efi-ia32 [i386 amd64 x32] | grub [i386 amd64 x32] | lilo (>= 19.1) [i386 amd64 x32] | flash-kernel [armhf arm64] +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-lowlatency +Description: Linux kernel image for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel image for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Lowlatency processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-lowlatency meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-lowlatency, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64 bit x86 SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64 bit x86 SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports Lowlatency processors. + . + Geared toward desktop and server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-lowlatency meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64 bit x86 SMP + This package provides kernel header files for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-lowlatency-dbgsym +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64 bit x86 SMP + This package provides a kernel debug image for version 3.13.0 on + 64 bit x86 SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-lowlatency +Architecture: i386 amd64 +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-lowlatency +XC-Package-Type: udeb +Section: debian-installer +Architecture: i386 amd64 +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-e500 +Description: Linux kernel image for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package contains the Linux kernel image for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500v1 and e500v2 processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500 meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-e500, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500v1 and e500v2 processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500 meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package provides kernel header files for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-e500-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit Freescale Power e500v1 and e500v2 + This package provides a kernel debug image for version 3.13.0 on + 32-bit Freescale Power e500v1 and e500v2. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-e500 +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-e500 +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-e500mc +Description: Linux kernel image for version 3.13.0 on 32-bit Freescale Power e500mc + This package contains the Linux kernel image for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500mc processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500mc meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-e500mc, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit Freescale Power e500mc + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit Freescale Power e500mc processors. + . + Geared toward server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-e500mc meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit Freescale Power e500mc + This package provides kernel header files for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-e500mc-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit Freescale Power e500mc + This package provides a kernel debug image for version 3.13.0 on + 32-bit Freescale Power e500mc. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-e500mc +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-e500mc +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: yaboot +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc-smp +Description: Linux kernel image for version 3.13.0 on 32-bit PowerPC SMP + This package contains the Linux kernel image for version 3.13.0 on + 32-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc-smp, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 32-bit PowerPC SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 32-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 32-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 32-bit PowerPC SMP + This package provides kernel header files for version 3.13.0 on + 32-bit PowerPC SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc-smp-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 32-bit PowerPC SMP + This package provides a kernel debug image for version 3.13.0 on + 32-bit PowerPC SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc-smp +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: grub-ieee1275 +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc64-emb +Description: Linux kernel image for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package contains the Linux kernel image for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP Book3E processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-emb meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc64-emb, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package contains the Linux kernel extra modules for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP Book3E processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-emb meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package provides kernel header files for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc64-emb-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64-bit PowerPC SMP Book3E + This package provides a kernel debug image for version 3.13.0 on + 64-bit PowerPC SMP Book3E. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc64-emb +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc64-emb +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + + +Package: linux-image-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: kernel +Priority: optional +Pre-Depends: dpkg (>= 1.10.24) +Provides: linux-image, linux-image-3.0, fuse-module, redhat-cluster-modules, ivtv-modules +Depends: ${misc:Depends}, ${shlibs:Depends}, initramfs-tools (>= 0.36ubuntu6), module-init-tools (>= 3.3-pre11-4ubuntu3) +Conflicts: hotplug (<< 0.0.20040105-1) +Recommends: yaboot +Suggests: fdutils, linux-doc-3.13.0 | linux-source-3.13.0, linux-tools, linux-headers-3.13.0-54-powerpc64-smp +Description: Linux kernel image for version 3.13.0 on 64-bit PowerPC SMP + This package contains the Linux kernel image for version 3.13.0 on + 64-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-image-extra-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: kernel +Priority: optional +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-3.13.0-54-powerpc64-smp, crda (>=1.1.1-1ubuntu2) | wireless-crda +Description: Linux kernel extra modules for version 3.13.0 on 64-bit PowerPC SMP + This package contains the Linux kernel extra modules for version 3.13.0 on + 64-bit PowerPC SMP. + . + Also includes the corresponding System.map file, the modules built by the + packager, and scripts that try to ensure that the system is not left in an + unbootable state after an update. + . + Supports 64-bit PowerPC SMP processors. + . + Geared toward desktop or server systems. + . + You likely do not want to install this package directly. Instead, install + the linux-powerpc64-smp meta-package, which will ensure that upgrades work + correctly, and that supporting packages are also installed. + +Package: linux-headers-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-headers-3.13.0-54, ${shlibs:Depends} +Provides: linux-headers, linux-headers-3.0 +Description: Linux kernel headers for version 3.13.0 on 64-bit PowerPC SMP + This package provides kernel header files for version 3.13.0 on + 64-bit PowerPC SMP. + . + This is for sites that want the latest kernel headers. Please read + /usr/share/doc/linux-headers-3.13.0-54/debian.README.gz for details. + +Package: linux-image-3.13.0-54-powerpc64-smp-dbgsym +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends} +Provides: linux-debug +Description: Linux kernel debug image for version 3.13.0 on 64-bit PowerPC SMP + This package provides a kernel debug image for version 3.13.0 on + 64-bit PowerPC SMP. + . + This is for sites that wish to debug the kernel. + . + The kernel image contained in this package is NOT meant to boot from. It + is uncompressed, and unstripped. This package also includes the + unstripped modules. + +Package: linux-tools-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-tools-3.13.0-54 +Description: Linux kernel version specific tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools (such as perf and x86_energy_perf_policy) for + version 3.13.0-54 on + 64 bit x86. + +Package: linux-cloud-tools-3.13.0-54-powerpc64-smp +Architecture: powerpc +Section: devel +Priority: optional +Depends: ${misc:Depends}, linux-cloud-tools-3.13.0-54 +Description: Linux kernel version specific cloud tools for version 3.13.0-54 + This package provides the architecture dependant parts for kernel + version locked tools for cloud for version 3.13.0-54 on + 64 bit x86. + +Package: linux-udebs-powerpc64-smp +XC-Package-Type: udeb +Section: debian-installer +Architecture: powerpc +Depends: ${udeb:Depends} +Description: Metapackage depending on kernel udebs + This package depends on the all udebs that the kernel build generated, + for easier version and migration tracking. + --- linux-3.13.0.orig/debian/copyright +++ linux-3.13.0/debian/copyright @@ -0,0 +1,29 @@ +This is the Ubuntu prepackaged version of the Linux kernel. +Linux was written by Linus Torvalds +and others. + +This package was put together by the Ubuntu Kernel Team, from +sources retrieved from upstream linux git. +The sources may be found at most Linux ftp sites, including +ftp://ftp.kernel.org/pub/linux/kernel/ + +This package is currently maintained by the +Ubuntu Kernel Team + +Linux is copyrighted by Linus Torvalds and others. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 dated June, 1991. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +On Ubuntu Linux systems, the complete text of the GNU General +Public License v2 can be found in `/usr/share/common-licenses/GPL-2'. --- linux-3.13.0.orig/debian/debian.env +++ linux-3.13.0/debian/debian.env @@ -0,0 +1 @@ +DEBIAN=debian.master --- linux-3.13.0.orig/debian/docs/README.inclusion-list +++ linux-3.13.0/debian/docs/README.inclusion-list @@ -0,0 +1,51 @@ +This README describes the reason for, and the use of, module +inclusion lists. + +The original Hardy release had the notion of sub-flavours, +e.g., a flavour that was constructed as a subset of an existing flavour. +For example, the virtual flavour was extracted from the server flavour using +a subset of the server flavour modules. However, there were some difficult +mainteneance issues with regard to packaging, make rules, and scripts. This +re-implementation of the sub-flavours philosophy is hopefully simpler, +and retrofitable to all releases. + +A module inclusion list looks at the problem of of constructing a package +from the perspective of what modules do we _want_ in the package, as opposed +to what modules we _don't_ want. As the kernel matures, more and more devices are added +which makes the problem of configuration maintenance a real pain in the ass. +If we took the approach of disabling all of the config options that we don't want, +then the differences between flavours will quickly become quite large, making +it difficult to quickly compare the individual flavour configs. Each time a +new config option is added then we also have to make a decision about disabling in +order to continue to keep the minimal number of modules. + +A module inclusion list is applied on a per-flavour basis. For example, +debian./control.d/${flavour}.inclusion-list. For example, the +config for virtual is very close to server and generic, but the inclusion list +causes the virtual package to be constructed with _only_ the modules described +in the inclusion list. + +The inclusion list format is a simple bash regular expression list of files. For example, + +arch/*/{crypto,kernel,oprofile} +drivers/acpi/* +drivers/ata/ahci.ko + +These 3 regular expression forms are suitable for expansion by bash and as inputs to 'find'. +See debian/scripts/module-inclusion for details. + +There are 2 log files created as a side effect of the application of the module +inclusion list; $(flavour).inclusion-list.log and $(flavour).depmod.log. + +$(flavour).inclusion-list.log : This log is created while the inclusion list +modules are being copied. If any are missing, then those warnings go in this log. +While its not considered a fatal error, you should endevour to correct your inclusion +list such that there are no missing modules. + +$(flavour).depmod.log : The log is created as a result of running depmod on the +resulting set of modules. If there are missing symbols then you'll find that information +here. Again, you should modify your inclusion list such that there are no missing +symbols. + +Tim Gardner +June 2, 2010 --- linux-3.13.0.orig/debian/gbp.conf +++ linux-3.13.0/debian/gbp.conf @@ -0,0 +1,2 @@ +[buildpackage] +debian-tag = Ubuntu-%(version)s --- linux-3.13.0.orig/debian/linux-cloud-tools-common.hv-fcopy-daemon.upstart +++ linux-3.13.0/debian/linux-cloud-tools-common.hv-fcopy-daemon.upstart @@ -0,0 +1,22 @@ +# On Azure/Hyper-V systems start the hv_fcopy_daemon +# +description "Hyper-V File Copy Protocol Daemon" +author "Andy Whitcroft " + +start on runlevel [2345] +stop on runlevel [!2345] +console log + +pre-start script + if [ -e "/etc/default/hv-kvp-daemon-init" ]; then + . /etc/default/hv-kvp-daemon-init + fi + [ "$RUN_FCOPY_DAEMON" -eq 0 ] && { stop; exit 0; } + if [ -d /sys/class/dmi/id/. ]; then + read company " + +start on runlevel [2345] +stop on runlevel [!2345] +console log + +pre-start script + if [ -e "/etc/default/hv-kvp-daemon-init" ]; then + . /etc/default/hv-kvp-daemon-init + fi + [ "$RUN_KVP_DAEMON" = 0 ] && { stop; exit 0; } + if [ -d /sys/class/dmi/id/. ]; then + read company " + +start on runlevel [2345] +stop on runlevel [!2345] +console log + +pre-start script + if [ -e "/etc/default/hv-kvp-daemon-init" ]; then + . /etc/default/hv-kvp-daemon-init + fi + [ "$RUN_VSS_DAEMON" -eq 0 ] && { stop; exit 0; } + if [ -d /sys/class/dmi/id/. ]; then + read company +# + +DEBIAN=$(shell awk -F= '($$1 == "DEBIAN") { print $$2 }' $$new; \ + done + flavours="$(wildcard $(DEBIAN)/control.d/vars.* $(DEBIAN)/sub-flavours/*.vars)";\ + for i in $$flavours; do \ + $(SHELL) $(DROOT)/scripts/control-create $$i | \ + sed -e 's/PKGVER/$(release)/g' \ + -e 's/ABINUM/$(abinum)/g' \ + -e 's/SRCPKGNAME/$(src_pkg_name)/g' \ + -e 's/=HUMAN=/$(human_arch)/g' \ + >> $(DEBIAN)/control.stub; \ + done + cp $(DEBIAN)/control.stub $(DEBIAN)/control + +.PHONY: debian/control +debian/control: $(DEBIAN)/control.stub + rm -rf $(builddir)/modules $(builddir)/firmware \ + $(builddir)/kernel-versions $(builddir)/package-list \ + $(builddir)/$(DEBIAN) + mkdir -p $(builddir)/modules/$(arch)/ + cp $(DEBIAN)/d-i/modules/* $(builddir)/modules/$(arch)/ + mkdir -p $(builddir)/firmware/$(arch)/ + cp $(DEBIAN)/d-i/firmware/* $(builddir)/firmware/$(arch)/ + cp $(DEBIAN)/d-i/package-list $(DEBIAN)/d-i/kernel-versions $(builddir)/ + touch $(builddir)/modules/$(arch)/kernel-image + # kernel-wedge needs to poke around in $(DEBIAN)/ + ln -nsf $(CURDIR)/debian $(builddir)/debian + + # Some files may need to differ between architectures + if [ -d $(DEBIAN)/d-i/modules-$(arch) ]; then \ + cp $(DEBIAN)/d-i/modules-$(arch)/* \ + $(builddir)/modules/$(arch)/; \ + fi + if [ -d $(DEBIAN)/d-i/firmware-$(arch) ]; then \ + cp $(DEBIAN)/d-i/firmware-$(arch)/* \ + $(builddir)/firmware/$(arch)/; \ + fi + + # Remove unwanted stuff for this architecture + if [ -r "$(DEBIAN)/d-i/exclude-modules.$(arch)" ]; then \ + (cat $(DEBIAN)/d-i/exclude-modules.$(arch); \ + ls $(builddir)/modules/$(arch)/) | sort | uniq -d | \ + (cd $(builddir)/modules/$(arch)/; xargs rm -f); \ + fi + if [ -r "$(DEBIAN)/d-i/exclude-firmware.$(arch)" ]; then \ + (cat $(DEBIAN)/d-i/exclude-firmware.$(arch); \ + ls $(builddir)/firmware/$(arch)/) | sort | uniq -d | \ + (cd $(builddir)/firmware/$(arch)/; xargs rm -f); \ + fi + + # Per flavour module lists + flavour_modules=`ls $(DEBIAN)/d-i/modules.$(arch)-* 2>/dev/null` \ + || true; \ + if [ "$$flavour_modules" != "" ]; then \ + for flav in $$flavour_modules; do \ + name=`echo $$flav | sed 's/.*\/modules.$(arch)-//'`; \ + mkdir $(builddir)/modules/$(arch)-$$name; \ + (cd $(builddir)/modules/; tar cf - `cat ../$$flav`) | \ + (cd $(builddir)/modules/$(arch)-$$name/; tar xf -); \ + touch $(builddir)/modules/$(arch)-$$name/kernel-image; \ + done; \ + fi + flavour_firmware=`ls $(DEBIAN)/d-i/firmware.$(arch)-* 2>/dev/null` \ + || true; \ + if [ "$$flavour_firmware" != "" ]; then \ + for flav in $$flavour_firmware; do \ + name=`echo $$flav | sed 's/.*\/firmware.$(arch)-//'`; \ + mkdir $(builddir)/firmware/$(arch)-$$name; \ + (cd $(builddir)/firmware/; tar cf - `cat ../$$flav`) | \ + (cd $(builddir)/firmware/$(arch)-$$name/; tar xf -);\ + touch $(builddir)/firmware/$(arch)-$$name/kernel-image; \ + done; \ + fi + + # Some files may need to differ between flavours + flavour_module_dirs=`ls -d $(DEBIAN)/d-i/modules-$(arch)-* 2>/dev/null`\ + || true; \ + if [ "$$flavour_module_dirs" ]; then \ + for flav in $$flavour_module_dirs; do \ + name=`echo $$flav | sed 's/.*\/modules-$(arch)-//'`; \ + [ -d $(builddir)/modules/$(arch)-$$name ] || \ + cp -a $(builddir)/modules/$(arch) \ + modules/$(arch)-$$name; \ + cp $$flav/* $(builddir)/modules/$(arch)-$$name/; \ + done; \ + fi + flavour_firmware_dirs=`ls -d $(DEBIAN)/d-i/firmware-$(arch)-* 2>/dev/null`\ + || true; \ + if [ "$$flavour_firmware_dirs" ]; then \ + for flav in $$flavour_firmware_dirs; do \ + name=`echo $$flav | sed 's/.*\/firmware-$(arch)-//'`; \ + [ -d $(builddir)/firmware/$(arch)-$$name ] || \ + cp -a $(builddir)/firmware/$(arch) \ + firmware/$(arch)-$$name; \ + cp $$flav/* $(builddir)/firmware/$(arch)-$$name/; \ + done; \ + fi + + # Remove unwanted stuff for each flavour + flavour_exclude=`ls $(DEBIAN)/d-i/exclude-modules.$(arch)-* 2>/dev/null`\ + || true; \ + if [ "$$flavour_exclude" ]; then \ + for flav in $$flavour_exclude; do \ + name=`echo $$flav | sed 's/.*\/exclude-modules.$(arch)-//'`;\ + [ -d $(builddir)/modules/$(arch)-$$name ] || \ + cp -a $(builddir)/modules/$(arch) \ + $(builddir)/modules/$(arch)-$$name; \ + (cat $$flav; \ + ls $(builddir)/modules/$(arch)-$$name) | \ + sort | uniq -d | \ + (cd $(builddir)/modules/$(arch)-$$name/; \ + xargs rm -f); \ + done; \ + fi + flavour_exclude=`ls $(DEBIAN)/d-i/exclude-firmware.$(arch)-* 2>/dev/null`\ + || true; \ + if [ "$$flavour_exclude" ]; then \ + for flav in $$flavour_exclude; do \ + name=`echo $$flav | sed 's/.*\/exclude-firmware.$(arch)-//'`;\ + [ -d $(builddir)/firmware/$(arch)-$$name ] || \ + cp -a $(builddir)/firmware/$(arch) \ + $(builddir)/firmware/$(arch)-$$name; \ + (cat $$flav; \ + ls $(builddir)/firmware/$(arch)-$$name) | \ + sort | uniq -d | \ + (cd $(builddir)/firmware/$(arch)-$$name/; \ + xargs rm -f); \ + done; \ + fi + + if [ ! -d $(builddir)/modules/$(build_arch) ]; then \ + mkdir -p $(builddir)/modules/$(build_arch); \ + cp $(builddir)/modules/$(arch)/* \ + $(builddir)/modules/$(build_arch); \ + fi + if [ ! -d $(builddir)/firmware/$(build_arch) ]; then \ + mkdir -p $(builddir)/firmware/$(build_arch); \ + cp $(builddir)/firmware/$(arch)/* \ + $(builddir)/firmware/$(build_arch); \ + fi + + cp $(DEBIAN)/control.stub debian/control.stub + cd $(builddir) && kernel-wedge gen-control > $(CURDIR)/debian/control --- linux-3.13.0.orig/debian/rules.d/0-common-vars.mk +++ linux-3.13.0/debian/rules.d/0-common-vars.mk @@ -0,0 +1,235 @@ +# +# The source package name will be the first token from $(DEBIAN)/changelog +# +src_pkg_name=$(shell sed -n '1s/^\(.*\) (.*).*$$/\1/p' $(DEBIAN)/changelog) + +# Get some version info +release := $(shell sed -n '1s/^$(src_pkg_name).*(\(.*\)-.*).*$$/\1/p' $(DEBIAN)/changelog) +revisions := $(shell sed -n 's/^$(src_pkg_name)\ .*($(release)-\(.*\)).*$$/\1/p' $(DEBIAN)/changelog | tac) +revision ?= $(word $(words $(revisions)),$(revisions)) +prev_revisions := $(filter-out $(revision),0.0 $(revisions)) +prev_revision := $(word $(words $(prev_revisions)),$(prev_revisions)) + +prev_fullver ?= $(shell dpkg-parsechangelog -l$(DEBIAN)/changelog -o1 -c1 | sed -ne 's/^Version: *//p') + +family=ubuntu + +# This is an internally used mechanism for the daily kernel builds. It +# creates packages whose ABI is suffixed with a minimal representation of +# the current git HEAD sha. If .git/HEAD is not present, then it uses the +# uuidgen program, +# +# AUTOBUILD can also be used by anyone wanting to build a custom kernel +# image, or rebuild the entire set of Ubuntu packages using custom patches +# or configs. +AUTOBUILD= + +ifneq ($(AUTOBUILD),) +skipabi = true +skipmodule = true +skipdbg = true +gitver=$(shell if test -f .git/HEAD; then cat .git/HEAD; else uuidgen; fi) +gitverpre=$(shell echo $(gitver) | cut -b -3) +gitverpost=$(shell echo $(gitver) | cut -b 38-40) +abi_suffix = -$(gitverpre)$(gitverpost) +endif + +ifneq ($(NOKERNLOG),) +ubuntu_log_opts += --no-kern-log +endif +ifneq ($(PRINTSHAS),) +ubuntu_log_opts += --print-shas +endif + +# Get the kernels own extra version to be added to the release signature. +raw_kernelversion=$(shell make kernelversion) + +# +# full_build -- are we doing a full buildd style build +# +ifeq ($(wildcard /CurrentlyBuilding),) +full_build?=false +else +full_build?=true +endif + +# +# The debug packages are ginormous, so you probably want to skip +# building them (as a developer). +# +ifeq ($(full_build),false) +skipdbg=true +endif + +abinum := $(shell echo $(revision) | sed -r -e 's/([^\+]*)\.[^\.]+(\+.*)?$$/\1/')$(abi_suffix) +prev_abinum := $(shell echo $(prev_revision) | sed -r -e 's/([^\+]*)\.[^\.]+(\+.*)?$$/\1/')$(abi_suffix) +abi_release := $(release)-$(abinum) + +uploadnum := $(shell echo $(revision) | sed -r -e 's/[^\+]*\.([^\.]+(\+.*)?$$)/\1/') +ifneq ($(full_build),false) + uploadnum := $(uploadnum)-Ubuntu +endif + +# XXX: linux-libc-dev got bumped to -803.N inadvertantly by a ti-omap4 upload +# shift our version higher for this package only. Ensure this only +# occurs for the v2.6.35 kernel so that we do not propogate this into +# any other series. +raw_uploadnum := $(shell echo $(revision) | sed -e 's/.*\.//') +libc_dev_version := +ifeq ($(DEBIAN),debian.master) +ifeq ($(release),2.6.35) +libc_dev_version := -v$(release)-$(shell expr "$(abinum)" + 1000).$(raw_uploadnum) +endif +endif + +DEB_HOST_MULTIARCH = $(shell dpkg-architecture -qDEB_HOST_MULTIARCH) +DEB_HOST_GNU_TYPE = $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE) +DEB_BUILD_GNU_TYPE = $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE) +DEB_HOST_ARCH = $(shell dpkg-architecture -qDEB_HOST_ARCH) +DEB_BUILD_ARCH = $(shell dpkg-architecture -qDEB_BUILD_ARCH) + +# +# Detect invocations of the form 'fakeroot debian/rules binary arch=armhf' +# within an x86'en schroot. This only gets you part of the way since the +# packaging phase fails, but you can at least compile the kernel quickly. +# +arch := $(DEB_HOST_ARCH) +ifneq ($(arch),$(DEB_HOST_ARCH)) + CROSS_COMPILE ?= $(shell dpkg-architecture -a$(arch) -qDEB_HOST_GNU_TYPE -f 2>/dev/null)- +endif + +# +# Detect invocations of the form 'dpkg-buildpackage -B -aarmhf' within +# an x86'en schroot. This is the only way to build all of the packages +# (except for tools). +# +ifneq ($(DEB_BUILD_GNU_TYPE),$(DEB_HOST_GNU_TYPE)) + CROSS_COMPILE ?= $(DEB_HOST_GNU_TYPE)- +endif + +abidir := $(CURDIR)/$(DEBIAN)/abi/$(release)-$(revision)/$(arch) +prev_abidir := $(CURDIR)/$(DEBIAN)/abi/$(release)-$(prev_revision)/$(arch) +commonconfdir := $(CURDIR)/$(DEBIAN)/config +archconfdir := $(CURDIR)/$(DEBIAN)/config/$(arch) +sharedconfdir := $(CURDIR)/debian.master/config +builddir := $(CURDIR)/debian/build +stampdir := $(CURDIR)/debian/stamps + +# +# The binary package name always starts with linux-image-$KVER-$ABI.$UPLOAD_NUM. There +# are places that you'll find linux-image hard coded, but I guess thats OK since the +# assumption that the binary package always starts with linux-image will never change. +# +bin_pkg_name=linux-image-$(abi_release) +extra_pkg_name=linux-image-extra-$(abi_release) +hdrs_pkg_name=linux-headers-$(abi_release) +indep_hdrs_pkg_name=linux-headers-$(abi_release) + +# +# The generation of content in the doc package depends on both 'AUTOBUILD=' and +# 'do_doc_package_content=true'. There are usually build errors during the development +# cycle, so its OK to leave 'do_doc_package_content=false' until those build +# failures get sorted out. Finally, the doc package doesn't really need to be built +# for developer testing (its kind of slow), so only do it if on a buildd. +do_doc_package=true +do_doc_package_content=true +ifeq ($(full_build),false) +do_doc_package_content=false +endif +doc_pkg_name=$(src_pkg_name)-doc + +# +# Similarly with the linux-source package, you need not build it as a developer. Its +# somewhat I/O intensive and utterly useless. +# +do_source_package=true +do_source_package_content=true +ifeq ($(full_build),false) +do_source_package_content=false +endif + +# linux-libc-dev may not be needed, default to building it. +do_libc_dev_package=true + +# common headers normally is built as an indep package, but may be arch +do_common_headers_indep=true + +# add a 'full source' mode +do_full_source=false + +# build tools +ifneq ($(wildcard $(CURDIR)/tools),) + ifeq ($(do_tools),) + ifneq ($(DEB_BUILD_GNU_TYPE),$(DEB_HOST_GNU_TYPE)) + do_tools=false + endif + endif + do_tools?=true +else + do_tools?=false +endif +tools_pkg_name=$(src_pkg_name)-tools-$(abi_release) +tools_common_pkg_name=$(src_pkg_name)-tools-common +tools_flavour_pkg_name=linux-tools-$(abi_release) +cloud_pkg_name=$(src_pkg_name)-cloud-tools-$(abi_release) +cloud_common_pkg_name=$(src_pkg_name)-cloud-tools-common +cloud_flavour_pkg_name=linux-cloud-tools-$(abi_release) + +# The general flavour specific image package. +do_flavour_image_package=true + +# The general flavour specific header package. +do_flavour_header_package=true + +# Support parallel= in DEB_BUILD_OPTIONS (see #209008) +# +# These 2 environment variables set the -j value of the kernel build. For example, +# CONCURRENCY_LEVEL=16 fakeroot $(DEBIAN)/rules binary-debs +# or +# DEB_BUILD_OPTIONS=parallel=16 fakeroot $(DEBIAN)/rules binary-debs +# +# The default is to use the number of CPUs. +# +COMMA=, +DEB_BUILD_OPTIONS_PARA = $(subst parallel=,,$(filter parallel=%,$(subst $(COMMA), ,$(DEB_BUILD_OPTIONS)))) +ifneq (,$(DEB_BUILD_OPTIONS_PARA)) + CONCURRENCY_LEVEL := $(DEB_BUILD_OPTIONS_PARA) +endif + +ifeq ($(CONCURRENCY_LEVEL),) + # Check the environment + CONCURRENCY_LEVEL := $(shell echo $$CONCURRENCY_LEVEL) + # No? Then build with the number of CPUs on the host. + ifeq ($(CONCURRENCY_LEVEL),) + CONCURRENCY_LEVEL := $(shell expr `getconf _NPROCESSORS_ONLN` \* 1) + endif + # Oh hell, give 'em one + ifeq ($(CONCURRENCY_LEVEL),) + CONCURRENCY_LEVEL := 1 + endif +endif + +conc_level = -j$(CONCURRENCY_LEVEL) + +# target_flavour is filled in for each step +kmake = make ARCH=$(build_arch) \ + CROSS_COMPILE=$(CROSS_COMPILE) \ + KERNELVERSION=$(abi_release)-$(target_flavour) \ + CONFIG_DEBUG_SECTION_MISMATCH=y \ + KBUILD_BUILD_VERSION="$(uploadnum)" \ + LOCALVERSION= localver-extra= \ + CFLAGS_MODULE="-DPKG_ABI=$(abinum)" +ifneq ($(LOCAL_ENV_CC),) +kmake += CC=$(LOCAL_ENV_CC) DISTCC_HOSTS=$(LOCAL_ENV_DISTCC_HOSTS) +endif + +# Locking is required in parallel builds to prevent loss of contents +# of the debian/files. +lockme_file = $(CURDIR)/debian/.LOCK +lockme_cmd = flock -w 60 +lockme = $(lockme_cmd) $(lockme_file) + +# Checks if a var is overriden by the custom rules. Called with var and +# flavour as arguments. +custom_override = \ + $(shell if [ -n "$($(1)_$(2))" ]; then echo "$($(1)_$(2))"; else echo "$($(1))"; fi) --- linux-3.13.0.orig/debian/rules.d/1-maintainer.mk +++ linux-3.13.0/debian/rules.d/1-maintainer.mk @@ -0,0 +1,127 @@ +# The following targets are for the maintainer only! do not run if you don't +# know what they do. + +.PHONY: printenv updateconfigs printchanges insertchanges startnewrelease diffupstream help updateportsconfigs editportsconfigs + +help: + @echo "These are the targets in addition to the normal $(DEBIAN) ones:" + @echo + @echo " printenv : Print some variables used in the build" + @echo + @echo " updateconfigs : Update core arch configs" + @echo + @echo " editconfigs : Update core arch configs interractively" + @echo " genconfigs : Generate core arch configs in CONFIGS/*" + @echo + @echo " updateportsconfigs : Update ports arch configs" + @echo + @echo " editportsconfigs : Update ports arch configs interactivly" + @echo " genportconfigs : Generate ports arch configs in CONFIGS/*" + @echo + @echo " printchanges : Print the current changelog entries (from git)" + @echo + @echo " insertchanges : Insert current changelog entries (from git)" + @echo + @echo " startnewrelease : Start a new changelog set" + @echo + @echo " diffupstream : Diff stock kernel code against upstream (git)" + @echo + @echo " help : If you are kernel hacking, you need the professional" + @echo " version of this" + @echo + @echo "Environment variables:" + @echo + @echo " NOKERNLOG : Do not add upstream kernel commits to changelog" + @echo " CONCURRENCY_LEVEL=X" + @echo " : Use -jX for kernel compile" + @echo " PRINTSHAS : Include SHAs for commits in changelog" + +printdebian: + @echo "$(DEBIAN)" + +updateconfigs defaultconfigs editconfigs genconfigs dumpconfigs: + dh_testdir; + $(SHELL) $(DROOT)/scripts/misc/kernelconfig $@ + rm -rf build + +updateportsconfigs defaultportsconfigs editportsconfigs genportsconfigs askconfigs: + dh_testdir; + $(SHELL) $(DROOT)/scripts/misc/kernelconfig $@ ports + rm -rf build + +printenv: + dh_testdir + @echo "src package name = $(src_pkg_name)" + @echo "release = $(release)" + @echo "revisions = $(revisions)" + @echo "revision = $(revision)" + @echo "uploadnum = $(uploadnum)" + @echo "prev_revisions = $(prev_revisions)" + @echo "prev_revision = $(prev_revision)" + @echo "abinum = $(abinum)" + @echo "gitver = $(gitver)" + @echo "flavours = $(flavours)" + @echo "skipabi = $(skipabi)" + @echo "skipmodule = $(skipmodule)" + @echo "skipdbg = $(skipdbg)" + @echo "ubuntu_log_opts = $(ubuntu_log_opts)" + @echo "CONCURRENCY_LEVEL = $(CONCURRENCY_LEVEL)" + @echo "bin package name = $(bin_pkg_name)" + @echo "hdr package name = $(hdrs_pkg_name)" + @echo "doc package name = $(doc_pkg_name)" + @echo "do_doc_package = $(do_doc_package)" + @echo "do_doc_package_content = $(do_doc_package_content)" + @echo "do_source_package = $(do_source_package)" + @echo "do_source_package_content = $(do_source_package_content)" + @echo "do_libc_dev_package = $(do_libc_dev_package)" + @echo "do_flavour_image_package = $(do_flavour_image_package)" + @echo "do_flavour_header_package = $(do_flavour_header_package)" + @echo "do_common_headers_indep = $(do_common_headers_indep)" + @echo "do_full_source = $(do_full_source)" + @echo "do_tools = $(do_tools)" + @echo "do_any_tools = $(do_any_tools)" + @echo "do_linux_tools = $(do_linux_tools)" + @echo " do_tools_cpupower = $(do_tools_cpupower)" + @echo " do_tools_perf = $(do_tools_perf)" + @echo " do_tools_x86 = $(do_tools_x86)" + @echo "do_cloud_tools = $(do_cloud_tools)" + @echo " do_tools_hyperv = $(do_tools_hyperv)" + @echo "full_build = $(full_build)" + @echo "libc_dev_version = $(libc_dev_version)" + @echo "DEB_HOST_GNU_TYPE = $(DEB_HOST_GNU_TYPE)" + @echo "DEB_BUILD_GNU_TYPE = $(DEB_BUILD_GNU_TYPE)" + @echo "DEB_HOST_ARCH = $(DEB_HOST_ARCH)" + @echo "DEB_BUILD_ARCH = $(DEB_BUILD_ARCH)" + @echo "arch = $(arch)" + @echo "kmake = $(kmake)" + +printchanges: + @baseCommit=$$(git log --pretty=format:'%H %s' | \ + gawk '/UBUNTU: '".*Ubuntu-`echo $(prev_fullver) | sed 's/+/\\\\+/'`"'$$/ { print $$1; exit }'); \ + git log "$$baseCommit"..HEAD | \ + perl -w -f $(DROOT)/scripts/misc/git-ubuntu-log $(ubuntu_log_opts) + +insertchanges: + @perl -w -f $(DROOT)/scripts/misc/insert-changes.pl $(DROOT) $(DEBIAN) + +diffupstream: + @git diff-tree -p refs/remotes/linux-2.6/master..HEAD $(shell ls | grep -vE '^(ubuntu|$(DEBIAN)|\.git.*)') + +startnewrelease: + dh_testdir + @nextminor=$(shell expr `echo $(revision) | gawk -F. '{print $$2}'` + 1); \ + nextmajor=$(shell expr `echo $(revision) | awk -F. '{print $$1}'` + 1); \ + now="$(shell date -R)"; \ + echo "Creating new changelog set for $(release)-$$nextmajor.$$nextminor..."; \ + echo -e "$(src_pkg_name) ($(release)-$$nextmajor.$$nextminor) UNRELEASED; urgency=low\n" > $(DEBIAN)/changelog.new; \ + echo " CHANGELOG: Do not edit directly. Autogenerated at release." >> \ + $(DEBIAN)/changelog.new; \ + echo " CHANGELOG: Use the printchanges target to see the curent changes." \ + >> $(DEBIAN)/changelog.new; \ + echo " CHANGELOG: Use the insertchanges target to create the final log." \ + >> $(DEBIAN)/changelog.new; \ + echo -e "\n -- $$DEBFULLNAME <$$DEBEMAIL> $$now\n" >> \ + $(DEBIAN)/changelog.new ; \ + cat $(DEBIAN)/changelog >> $(DEBIAN)/changelog.new; \ + mv $(DEBIAN)/changelog.new $(DEBIAN)/changelog + --- linux-3.13.0.orig/debian/rules.d/2-binary-arch.mk +++ linux-3.13.0/debian/rules.d/2-binary-arch.mk @@ -0,0 +1,650 @@ +# We don't want make removing intermediary stamps +.SECONDARY : + +# Prepare the out-of-tree build directory +ifeq ($(do_full_source),true) +build_cd = cd $(builddir)/build-$*; # +build_O = +else +build_cd = +build_O = O=$(builddir)/build-$* +endif + +# Typically supplied from the arch makefile, e.g., debian.master/control.d/armhf.mk +ifneq ($(gcc),) +kmake += CC=$(CROSS_COMPILE)$(gcc) +endif + +$(stampdir)/stamp-prepare-%: config-prepare-check-% + @echo Debug: $@ + @touch $@ +$(stampdir)/stamp-prepare-tree-%: target_flavour = $* +$(stampdir)/stamp-prepare-tree-%: $(commonconfdir)/config.common.$(family) $(archconfdir)/config.common.$(arch) $(archconfdir)/config.flavour.% + @echo Debug: $@ + install -d $(builddir)/build-$* + touch $(builddir)/build-$*/ubuntu-build + [ "$(do_full_source)" != 'true' ] && true || \ + rsync -a --exclude debian --exclude debian.master --exclude $(DEBIAN) * $(builddir)/build-$* + cat $^ | sed -e 's/.*CONFIG_VERSION_SIGNATURE.*/CONFIG_VERSION_SIGNATURE="Ubuntu $(release)-$(revision)-$* $(raw_kernelversion)"/' > $(builddir)/build-$*/.config + find $(builddir)/build-$* -name "*.ko" | xargs rm -f + $(build_cd) $(kmake) $(build_O) -j1 silentoldconfig prepare scripts + touch $@ + +# Used by developers as a shortcut to prepare a tree for compilation. +prepare-%: $(stampdir)/stamp-prepare-% + @echo Debug: $@ +# Used by developers to allow efficient pre-building without fakeroot. +build-%: $(stampdir)/stamp-build-% + @echo Debug: $@ + +# Do the actual build, including image and modules +$(stampdir)/stamp-build-%: target_flavour = $* +$(stampdir)/stamp-build-%: bldimg = $(call custom_override,build_image,$*) +$(stampdir)/stamp-build-%: dtb_target = $(dtb_files_$*) +$(stampdir)/stamp-build-%: $(stampdir)/stamp-prepare-% + @echo Debug: $@ build_image $(build_image) bldimg $(bldimg) + $(build_cd) $(kmake) $(build_O) $(conc_level) $(bldimg) modules $(dtb_target) + @touch $@ + +# Install the finished build +install-%: pkgdir = $(CURDIR)/debian/$(bin_pkg_name)-$* +install-%: pkgdir_ex = $(CURDIR)/debian/$(extra_pkg_name)-$* +install-%: bindoc = $(pkgdir)/usr/share/doc/$(bin_pkg_name)-$* +install-%: dbgpkgdir = $(CURDIR)/debian/$(bin_pkg_name)-$*-dbgsym +install-%: signed = $(CURDIR)/debian/$(bin_pkg_name)-signed +install-%: toolspkgdir = $(CURDIR)/debian/$(tools_flavour_pkg_name)-$* +install-%: cloudpkgdir = $(CURDIR)/debian/$(cloud_flavour_pkg_name)-$* +install-%: basepkg = $(hdrs_pkg_name) +install-%: indeppkg = $(indep_hdrs_pkg_name) +install-%: kernfile = $(call custom_override,kernel_file,$*) +install-%: instfile = $(call custom_override,install_file,$*) +install-%: hdrdir = $(CURDIR)/debian/$(basepkg)-$*/usr/src/$(basepkg)-$* +install-%: target_flavour = $* +install-%: dtb_files = $(dtb_files_$*) +install-%: CONFIG_MODULE_SIG_HASH=sha512 +install-%: MODSECKEY=$(builddir)/build-$*/signing_key.priv +install-%: MODPUBKEY=$(builddir)/build-$*/signing_key.x509 +install-%: checks-% + @echo Debug: $@ kernel_file $(kernel_file) kernfile $(kernfile) install_file $(install_file) instfile $(instfile) + dh_testdir + dh_testroot + dh_clean -k -p$(bin_pkg_name)-$* + dh_clean -k -p$(hdrs_pkg_name)-$* +ifneq ($(skipdbg),true) + dh_clean -k -p$(dbg_pkg_name)-$* +endif + + # The main image + # compress_file logic required because not all architectures + # generate a zImage automatically out of the box +ifeq ($(compress_file),) + install -m600 -D $(builddir)/build-$*/$(kernfile) \ + $(pkgdir)/boot/$(instfile)-$(abi_release)-$* +else + install -d $(pkgdir)/boot + gzip -c9v $(builddir)/build-$*/$(kernfile) > \ + $(pkgdir)/boot/$(instfile)-$(abi_release)-$* + chmod 600 $(pkgdir)/boot/$(instfile)-$(abi_release)-$* +endif + +ifeq ($(uefi_signed),true) + install -d $(signed)/$(release)-$(revision) + # Check to see if this supports handoff, if not do not sign it. + # Check the identification area magic and version >= 0x020b + handoff=`dd if="$(pkgdir)/boot/$(instfile)-$(abi_release)-$*" bs=1 skip=514 count=6 2>/dev/null | od -s | gawk '($$1 == 0 && $$2 == 25672 && $$3 == 21362 && $$4 >= 523) { print "GOOD" }'`; \ + if [ "$$handoff" = "GOOD" ]; then \ + cp -p $(pkgdir)/boot/$(instfile)-$(abi_release)-$* \ + $(signed)/$(release)-$(revision)/$(instfile)-$(abi_release)-$*.efi; \ + fi +endif + + install -m644 $(builddir)/build-$*/.config \ + $(pkgdir)/boot/config-$(abi_release)-$* + install -m644 $(abidir)/$* \ + $(pkgdir)/boot/abi-$(abi_release)-$* + install -m600 $(builddir)/build-$*/System.map \ + $(pkgdir)/boot/System.map-$(abi_release)-$* + if [ "$(dtb_files)" ]; then \ + install -d $(pkgdir)/lib/firmware/$(abi_release)-$*/device-tree; \ + for dtb_file in $(dtb_files); do \ + install -m644 $(builddir)/build-$*/arch/$(build_arch)/boot/dts/$$dtb_file \ + $(pkgdir)/lib/firmware/$(abi_release)-$*/device-tree/$$dtb_file; \ + done \ + fi +ifeq ($(no_dumpfile),) + makedumpfile -g $(pkgdir)/boot/vmcoreinfo-$(abi_release)-$* \ + -x $(builddir)/build-$*/vmlinux + chmod 0600 $(pkgdir)/boot/vmcoreinfo-$(abi_release)-$* +endif + + $(build_cd) $(kmake) $(build_O) $(conc_level) modules_install $(vdso) \ + INSTALL_MOD_STRIP=1 INSTALL_MOD_PATH=$(pkgdir)/ \ + INSTALL_FW_PATH=$(pkgdir)/lib/firmware/$(abi_release)-$* + + # + # Build module blacklists: + # - blacklist all watchdog drivers (LP:1432837) + # + install -d $(pkgdir)/lib/modprobe.d + echo "# Autogenerated blacklist for $(src_pkg_name) $(abi_release)-$* $(arch)" \ + >$(pkgdir)/lib/modprobe.d/blacklist_$(src_pkg_name)_$(abi_release)-$*.conf + ls -1 $(pkgdir)/lib/modules/$(abi_release)-$*/kernel/drivers/watchdog/ | \ + sed -e 's/^/blacklist /' -e 's/.ko$$//' | \ + sort -u \ + >>$(pkgdir)/lib/modprobe.d/blacklist_$(src_pkg_name)_$(abi_release)-$*.conf + +ifeq ($(do_extras_package),true) + # + # Remove all modules not in the inclusion list. + # + if [ -f $(DEBIAN)/control.d/$(target_flavour).inclusion-list ] ; then \ + mkdir -p $(pkgdir_ex)/lib/modules/$(abi_release)-$*; \ + mv $(pkgdir)/lib/modules/$(abi_release)-$*/kernel \ + $(pkgdir_ex)/lib/modules/$(abi_release)-$*/kernel; \ + $(SHELL) $(DROOT)/scripts/module-inclusion --master \ + $(pkgdir_ex)/lib/modules/$(abi_release)-$*/kernel \ + $(pkgdir)/lib/modules/$(abi_release)-$*/kernel \ + $(DEBIAN)/control.d/$(target_flavour).inclusion-list 2>&1 | \ + tee $(target_flavour).inclusion-list.log; \ + /sbin/depmod -b $(pkgdir) -ea -F $(pkgdir)/boot/System.map-$(abi_release)-$* \ + $(abi_release)-$* 2>&1 |tee $(target_flavour).depmod.log; \ + if [ `grep -c 'unknown symbol' $(target_flavour).depmod.log` -gt 0 ]; then \ + echo "EE: Unresolved module dependencies in base package!"; \ + exit 1; \ + fi \ + fi +endif + +ifeq ($(no_dumpfile),) + makedumpfile -g $(pkgdir)/boot/vmcoreinfo-$(abi_release)-$* \ + -x $(builddir)/build-$*/vmlinux + chmod 0600 $(pkgdir)/boot/vmcoreinfo-$(abi_release)-$* +endif + rm -f $(pkgdir)/lib/modules/$(abi_release)-$*/build + rm -f $(pkgdir)/lib/modules/$(abi_release)-$*/source + + # Some initramfs-tools specific modules + install -d $(pkgdir)/lib/modules/$(abi_release)-$*/initrd + if [ -f $(pkgdir)/lib/modules/$(abi_release)-$*/kernel/drivers/video/vesafb.ko ]; then\ + ln -f $(pkgdir)/lib/modules/$(abi_release)-$*/kernel/drivers/video/vesafb.ko \ + $(pkgdir)/lib/modules/$(abi_release)-$*/initrd/; \ + fi + + # Now the image scripts + install -d $(pkgdir)/DEBIAN + for script in postinst postrm preinst prerm; do \ + sed -e 's/=V/$(abi_release)-$*/g' -e 's/=K/$(instfile)/g' \ + -e 's/=L/$(loader)/g' -e 's@=B@$(build_arch)@g' \ + $(DROOT)/control-scripts/$$script > $(pkgdir)/DEBIAN/$$script; \ + chmod 755 $(pkgdir)/DEBIAN/$$script; \ + done +ifeq ($(do_extras_package),true) + # Install the postinit/postrm scripts in the extras package. + if [ -f $(DEBIAN)/control.d/$(target_flavour).inclusion-list ] ; then \ + install -d $(pkgdir_ex)/DEBIAN; \ + for script in postinst postrm ; do \ + sed -e 's/=V/$(abi_release)-$*/g' -e 's/=K/$(instfile)/g' \ + -e 's/=L/$(loader)/g' -e 's@=B@$(build_arch)@g' \ + debian/control-scripts/extra-post > $(pkgdir_ex)/DEBIAN/$$script; \ + chmod 755 $(pkgdir_ex)/DEBIAN/$$script; \ + done; \ + fi +endif + + # Install the full changelog. +ifeq ($(do_doc_package),true) + install -d $(bindoc) + cat $(DEBIAN)/changelog $(DEBIAN)/changelog.historical | \ + gzip -9 >$(bindoc)/changelog.Debian.old.gz + chmod 644 $(bindoc)/changelog.Debian.old.gz +endif + +ifneq ($(skipsub),true) + for sub in $($(*)_sub); do \ + if ! (TO=$$sub FROM=$* ABI_RELEASE=$(abi_release) $(SHELL) \ + $(DROOT)/scripts/sub-flavour); then exit 1; fi; \ + /sbin/depmod -b debian/$(bin_pkg_name)-$$sub \ + -ea -F debian/$(bin_pkg_name)-$$sub/boot/System.map-$(abi_release)-$* \ + $(abi_release)-$*; \ + install -d debian/$(bin_pkg_name)-$$sub/DEBIAN; \ + for script in postinst postrm preinst prerm; do \ + sed -e 's/=V/$(abi_release)-$*/g' \ + -e 's/=K/$(instfile)/g' \ + -e 's/=L/$(loader)/g' \ + -e 's@=B@$(build_arch)@g' \ + $(DROOT)/control-scripts/$$script > \ + debian/$(bin_pkg_name)-$$sub/DEBIAN/$$script;\ + chmod 755 debian/$(bin_pkg_name)-$$sub/DEBIAN/$$script;\ + done; \ + done +endif + +ifneq ($(skipdbg),true) + # Debug image is simple + install -m644 -D $(builddir)/build-$*/vmlinux \ + $(dbgpkgdir)/usr/lib/debug/boot/vmlinux-$(abi_release)-$* + $(build_cd) $(kmake) $(build_O) modules_install $(vdso) \ + INSTALL_MOD_PATH=$(dbgpkgdir)/usr/lib/debug + # Add .gnu_debuglink sections to each stripped .ko + # pointing to unstripped verson + find $(pkgdir) -name '*.ko' | sed 's|$(pkgdir)||'| while read module ; do \ + if [[ -f "$(dbgpkgdir)/usr/lib/debug/$$module" ]] ; then \ + $(CROSS_COMPILE)objcopy \ + --add-gnu-debuglink=$(dbgpkgdir)/usr/lib/debug/$$module \ + $(pkgdir)/$$module; \ + scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODSECKEY) $(MODPUBKEY) \ + $(pkgdir)/$$module; \ + fi; \ + done + rm -f $(dbgpkgdir)/usr/lib/debug/lib/modules/$(abi_release)-$*/build + rm -f $(dbgpkgdir)/usr/lib/debug/lib/modules/$(abi_release)-$*/source + rm -f $(dbgpkgdir)/usr/lib/debug/lib/modules/$(abi_release)-$*/modules.* + rm -fr $(dbgpkgdir)/usr/lib/debug/lib/firmware +endif + + # The flavour specific headers image + # TODO: Would be nice if we didn't have to dupe the original builddir + install -d -m755 $(hdrdir) + cat $(builddir)/build-$*/.config | \ + sed -e 's/.*CONFIG_DEBUG_INFO=.*/# CONFIG_DEBUG_INFO is not set/g' > \ + $(hdrdir)/.config + chmod 644 $(hdrdir)/.config + $(kmake) O=$(hdrdir) -j1 silentoldconfig prepare scripts + # We'll symlink this stuff + rm -f $(hdrdir)/Makefile + rm -rf $(hdrdir)/include2 $(hdrdir)/source + # Copy over the compilation version. + cp "$(builddir)/build-$*/include/generated/compile.h" \ + "$(hdrdir)/include/generated/compile.h" + # Add UTS_UBUNTU_RELEASE_ABI since UTS_RELEASE is difficult to parse. + echo "#define UTS_UBUNTU_RELEASE_ABI $(abinum)" >> $(hdrdir)/include/generated/utsrelease.h + # powerpc kernel arch seems to need some .o files for external module linking. Add them in. +ifeq ($(build_arch),powerpc) + mkdir -p $(hdrdir)/arch/powerpc/lib + cp $(builddir)/build-$*/arch/powerpc/lib/*.o $(hdrdir)/arch/powerpc/lib +endif + # Script to symlink everything up + $(SHELL) $(DROOT)/scripts/link-headers "$(hdrdir)" "$(indeppkg)" "$*" + # The build symlink + install -d debian/$(basepkg)-$*/lib/modules/$(abi_release)-$* + ln -s /usr/src/$(basepkg)-$* \ + debian/$(basepkg)-$*/lib/modules/$(abi_release)-$*/build + # And finally the symvers + install -m644 $(builddir)/build-$*/Module.symvers \ + $(hdrdir)/Module.symvers + + # Now the header scripts + install -d $(CURDIR)/debian/$(basepkg)-$*/DEBIAN + for script in postinst; do \ + sed -e 's/=V/$(abi_release)-$*/g' -e 's/=K/$(instfile)/g' \ + $(DROOT)/control-scripts/headers-$$script > \ + $(CURDIR)/debian/$(basepkg)-$*/DEBIAN/$$script; \ + chmod 755 $(CURDIR)/debian/$(basepkg)-$*/DEBIAN/$$script; \ + done + + # At the end of the package prep, call the tests + DPKG_ARCH="$(arch)" KERN_ARCH="$(build_arch)" FLAVOUR="$*" \ + VERSION="$(abi_release)" REVISION="$(revision)" \ + PREV_REVISION="$(prev_revision)" ABI_NUM="$(abinum)" \ + PREV_ABI_NUM="$(prev_abinum)" BUILD_DIR="$(builddir)/build-$*" \ + INSTALL_DIR="$(pkgdir)" SOURCE_DIR="$(CURDIR)" \ + run-parts -v $(DROOT)/tests-build + + # + # Remove files which are generated at installation by postinst, + # except for modules.order and modules.builtin + # + # NOTE: need to keep this list in sync with postrm + # + mkdir $(pkgdir)/lib/modules/$(abi_release)-$*/_ + mv $(pkgdir)/lib/modules/$(abi_release)-$*/modules.order \ + $(pkgdir)/lib/modules/$(abi_release)-$*/_ + if [ -f $(pkgdir)/lib/modules/$(abi_release)-$*/modules.builtin ] ; then \ + mv $(pkgdir)/lib/modules/$(abi_release)-$*/modules.builtin \ + $(pkgdir)/lib/modules/$(abi_release)-$*/_; \ + fi + rm -f $(pkgdir)/lib/modules/$(abi_release)-$*/modules.* + mv $(pkgdir)/lib/modules/$(abi_release)-$*/_/* \ + $(pkgdir)/lib/modules/$(abi_release)-$* + rmdir $(pkgdir)/lib/modules/$(abi_release)-$*/_ + +ifeq ($(do_linux_tools),true) + # Create the linux-tools tool links + install -d $(toolspkgdir)/usr/lib/linux-tools/$(abi_release)-$* +ifeq ($(do_tools_cpupower),true) + ln -s ../../$(src_pkg_name)-tools-$(abi_release)/cpupower $(toolspkgdir)/usr/lib/linux-tools/$(abi_release)-$* +endif +ifeq ($(do_tools_perf),true) + ln -s ../../$(src_pkg_name)-tools-$(abi_release)/perf $(toolspkgdir)/usr/lib/linux-tools/$(abi_release)-$* +endif +ifeq ($(do_tools_x86),true) + ln -s ../../$(src_pkg_name)-tools-$(abi_release)/x86_energy_perf_policy $(toolspkgdir)/usr/lib/linux-tools/$(abi_release)-$* + ln -s ../../$(src_pkg_name)-tools-$(abi_release)/turbostat $(toolspkgdir)/usr/lib/linux-tools/$(abi_release)-$* +endif +endif +ifeq ($(do_cloud_tools),true) +ifeq ($(do_tools_hyperv),true) + # Create the linux-hyperv tool links + install -d $(cloudpkgdir)/usr/lib/linux-tools/$(abi_release)-$* + ln -s ../../$(src_pkg_name)-tools-$(abi_release)/hv_kvp_daemon $(cloudpkgdir)/usr/lib/linux-tools/$(abi_release)-$* + ln -s ../../$(src_pkg_name)-tools-$(abi_release)/hv_vss_daemon $(cloudpkgdir)/usr/lib/linux-tools/$(abi_release)-$* + ln -s ../../$(src_pkg_name)-tools-$(abi_release)/hv_fcopy_daemon $(cloudpkgdir)/usr/lib/linux-tools/$(abi_release)-$* +endif +endif + +headers_tmp := $(CURDIR)/debian/tmp-headers +headers_dir := $(CURDIR)/debian/linux-libc-dev + +hmake := $(MAKE) -C $(CURDIR) O=$(headers_tmp) \ + KERNELVERSION=$(abi_release) INSTALL_HDR_PATH=$(headers_tmp)/install \ + SHELL="$(SHELL)" ARCH=$(header_arch) + +install-arch-headers: + @echo Debug: $@ + dh_testdir + dh_testroot + dh_clean -k -plinux-libc-dev + + rm -rf $(headers_tmp) + install -d $(headers_tmp) $(headers_dir)/usr/include/ + + $(hmake) $(defconfig) + mv $(headers_tmp)/.config $(headers_tmp)/.config.old + sed -e 's/^# \(CONFIG_MODVERSIONS\) is not set$$/\1=y/' \ + -e 's/.*CONFIG_LOCALVERSION_AUTO.*/# CONFIG_LOCALVERSION_AUTO is not set/' \ + $(headers_tmp)/.config.old > $(headers_tmp)/.config + $(hmake) silentoldconfig + $(hmake) headers_install + + ( cd $(headers_tmp)/install/include/ && \ + find . -name '.' -o -name '.*' -prune -o -print | \ + cpio -pvd --preserve-modification-time \ + $(headers_dir)/usr/include/ ) + mkdir $(headers_dir)/usr/include/$(DEB_HOST_MULTIARCH) + mv $(headers_dir)/usr/include/asm $(headers_dir)/usr/include/$(DEB_HOST_MULTIARCH)/ + + rm -rf $(headers_tmp) + +binary-arch-headers: install-arch-headers + @echo Debug: $@ + dh_testdir + dh_testroot +ifeq ($(do_libc_dev_package),true) +ifneq ($(DEBIAN),debian.master) + echo "non-master branch building linux-libc-dev, aborting" + exit 1 +endif + dh_installchangelogs -plinux-libc-dev + dh_installdocs -plinux-libc-dev + dh_compress -plinux-libc-dev + dh_fixperms -plinux-libc-dev + dh_installdeb -plinux-libc-dev + $(lockme) dh_gencontrol -plinux-libc-dev -- $(libc_dev_version) + dh_md5sums -plinux-libc-dev + dh_builddeb -plinux-libc-dev +endif + +binary-%: pkgimg = $(bin_pkg_name)-$* +binary-%: pkgimg_ex = $(extra_pkg_name)-$* +binary-%: pkghdr = $(hdrs_pkg_name)-$* +binary-%: dbgpkg = $(bin_pkg_name)-$*-dbgsym +binary-%: dbgpkgdir = $(CURDIR)/debian/$(bin_pkg_name)-$*-dbgsym +binary-%: pkgtools = $(tools_flavour_pkg_name)-$* +binary-%: pkgcloud = $(cloud_flavour_pkg_name)-$* +binary-%: target_flavour = $* +binary-%: install-% + @echo Debug: $@ + dh_testdir + dh_testroot + + dh_installchangelogs -p$(pkgimg) + dh_installdocs -p$(pkgimg) + dh_compress -p$(pkgimg) + dh_fixperms -p$(pkgimg) -X/boot/ + dh_installdeb -p$(pkgimg) + dh_shlibdeps -p$(pkgimg) + $(lockme) dh_gencontrol -p$(pkgimg) + dh_md5sums -p$(pkgimg) + dh_builddeb -p$(pkgimg) -- -Zbzip2 -z9 + +ifeq ($(do_extras_package),true) + if [ -f $(DEBIAN)/control.d/$(target_flavour).inclusion-list ] ; then \ + dh_installchangelogs -p$(pkgimg_ex); \ + dh_installdocs -p$(pkgimg_ex); \ + dh_compress -p$(pkgimg_ex); \ + dh_fixperms -p$(pkgimg_ex) -X/boot/; \ + dh_installdeb -p$(pkgimg_ex); \ + dh_shlibdeps -p$(pkgimg_ex); \ + $(lockme) dh_gencontrol -p$(pkgimg_ex); \ + dh_md5sums -p$(pkgimg_ex); \ + dh_builddeb -p$(pkgimg_ex) -- -Zbzip2 -z9; \ + fi +endif + + dh_installchangelogs -p$(pkghdr) + dh_installdocs -p$(pkghdr) + dh_compress -p$(pkghdr) + dh_fixperms -p$(pkghdr) + dh_shlibdeps -p$(pkghdr) + dh_installdeb -p$(pkghdr) + $(lockme) dh_gencontrol -p$(pkghdr) + dh_md5sums -p$(pkghdr) + dh_builddeb -p$(pkghdr) + +ifneq ($(skipsub),true) + @set -e; for sub in $($(*)_sub); do \ + pkg=$(bin_pkg_name)-$$sub; \ + dh_installchangelogs -p$$pkg; \ + dh_installdocs -p$$pkg; \ + dh_compress -p$$pkg; \ + dh_fixperms -p$$pkg -X/boot/; \ + dh_shlibdeps -p$$pkg; \ + dh_installdeb -p$$pkg; \ + $(lockme) dh_gencontrol -p$$pkg; \ + dh_md5sums -p$$pkg; \ + dh_builddeb -p$$pkg; \ + done +endif + +ifneq ($(skipdbg),true) + dh_installchangelogs -p$(dbgpkg) + dh_installdocs -p$(dbgpkg) + dh_compress -p$(dbgpkg) + dh_fixperms -p$(dbgpkg) + dh_installdeb -p$(dbgpkg) + $(lockme) dh_gencontrol -p$(dbgpkg) + dh_md5sums -p$(dbgpkg) + dh_builddeb -p$(dbgpkg) + + # Hokay...here's where we do a little twiddling... + # Renaming the debug package prevents it from getting into + # the primary archive, and therefore prevents this very large + # package from being mirrored. It is instead, through some + # archive admin hackery, copied to http://ddebs.ubuntu.com. + # + mv ../$(dbgpkg)_$(release)-$(revision)_$(arch).deb \ + ../$(dbgpkg)_$(release)-$(revision)_$(arch).ddeb + set -e; \ + ( \ + $(lockme_cmd) 9 || exit 1; \ + if grep -qs '^Build-Debug-Symbols: yes$$' /CurrentlyBuilding; then \ + sed -i '/^$(dbgpkg)_/s/\.deb /.ddeb /' debian/files; \ + else \ + grep -v '^$(dbgpkg)_.*$$' debian/files > debian/files.new; \ + mv debian/files.new debian/files; \ + fi; \ + ) 9>$(lockme_file) + # Now, the package wont get into the archive, but it will get put + # into the debug system. +endif + +ifeq ($(do_linux_tools),true) + dh_installchangelogs -p$(pkgtools) + dh_installdocs -p$(pkgtools) + dh_compress -p$(pkgtools) + dh_fixperms -p$(pkgtools) + dh_shlibdeps -p$(pkgtools) + dh_installdeb -p$(pkgtools) + $(lockme) dh_gencontrol -p$(pkgtools) + dh_md5sums -p$(pkgtools) + dh_builddeb -p$(pkgtools) +endif +ifeq ($(do_cloud_tools),true) + dh_installchangelogs -p$(pkgcloud) + dh_installdocs -p$(pkgcloud) + dh_compress -p$(pkgcloud) + dh_fixperms -p$(pkgcloud) + dh_shlibdeps -p$(pkgcloud) + dh_installdeb -p$(pkgcloud) + $(lockme) dh_gencontrol -p$(pkgcloud) + dh_md5sums -p$(pkgcloud) + dh_builddeb -p$(pkgcloud) +endif + +ifneq ($(full_build),false) + # Clean out this flavours build directory. + rm -rf $(builddir)/build-$* + # Clean out the debugging package source directory. + rm -rf $(dbgpkgdir) +endif + +# +# per-architecture packages +# +builddirpa = $(builddir)/tools-perarch + +$(stampdir)/stamp-prepare-perarch: + @echo Debug: $@ +ifeq ($(do_any_tools),true) + rm -rf $(builddirpa) + install -d $(builddirpa) + for i in *; do ln -s $(CURDIR)/$$i $(builddirpa); done + rm $(builddirpa)/tools + rsync -a tools/ $(builddirpa)/tools/ +endif + touch $@ + +$(stampdir)/stamp-build-perarch: $(stampdir)/stamp-prepare-perarch install-arch-headers + @echo Debug: $@ +ifeq ($(do_linux_tools),true) +ifeq ($(do_tools_cpupower),true) + # Allow for multiple installed versions of cpupower and libcpupower.so: + # Override LIB_MIN in order to to generate a versioned .so named + # libcpupower.so.$(abi_release) and link cpupower with that. + make -C $(builddirpa)/tools/power/cpupower \ + CROSS_COMPILE=$(CROSS_COMPILE) \ + LIB_MIN=$(abi_release) CPUFREQ_BENCH=false +endif +ifeq ($(do_tools_perf),true) + cd $(builddirpa)/tools/perf && \ + make prefix=/usr HAVE_CPLUS_DEMANGLE=1 CROSS_COMPILE=$(CROSS_COMPILE) NO_LIBPYTHON=1 NO_LIBPERL=1 PYTHON=python2.7 +endif +ifeq ($(do_tools_x86),true) + cd $(builddirpa)/tools/power/x86/x86_energy_perf_policy && make CROSS_COMPILE=$(CROSS_COMPILE) + cd $(builddirpa)/tools/power/x86/turbostat && make CROSS_COMPILE=$(CROSS_COMPILE) +endif +endif +ifeq ($(do_cloud_tools),true) +ifeq ($(do_tools_hyperv),true) + cd $(builddirpa)/tools/hv && make CFLAGS="-I$(headers_dir)/usr/include -I$(headers_dir)/usr/include/$(DEB_HOST_MULTIARCH)" CROSS_COMPILE=$(CROSS_COMPILE) hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon +endif +endif + @touch $@ + +install-perarch: toolspkgdir = $(CURDIR)/debian/$(tools_pkg_name) +install-perarch: cloudpkgdir = $(CURDIR)/debian/$(cloud_pkg_name) +install-perarch: $(stampdir)/stamp-build-perarch + @echo Debug: $@ + # Add the tools. +ifeq ($(do_linux_tools),true) + install -d $(toolspkgdir)/usr/lib + install -d $(toolspkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) + +ifeq ($(do_tools_cpupower),true) + install -m755 $(builddirpa)/tools/power/cpupower/cpupower \ + $(toolspkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) + # Install only the full versioned libcpupower.so.$(abi_release), not + # the usual symlinks to it. + install -m644 $(builddirpa)/tools/power/cpupower/libcpupower.so.$(abi_release) \ + $(toolspkgdir)/usr/lib/ +endif +ifeq ($(do_tools_perf),true) + install -m755 $(builddirpa)/tools/perf/perf $(toolspkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) +endif +ifeq ($(do_tools_x86),true) + install -m755 $(builddirpa)/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy \ + $(toolspkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) + install -m755 $(builddirpa)/tools/power/x86/turbostat/turbostat \ + $(toolspkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) +endif +endif +ifeq ($(do_cloud_tools),true) +ifeq ($(do_tools_hyperv),true) + install -d $(cloudpkgdir)/usr/lib + install -d $(cloudpkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) + install -m755 $(builddirpa)/tools/hv/hv_kvp_daemon \ + $(cloudpkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) + install -m755 $(builddirpa)/tools/hv/hv_vss_daemon \ + $(cloudpkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) + install -m755 $(builddirpa)/tools/hv/hv_fcopy_daemon \ + $(cloudpkgdir)/usr/lib/$(src_pkg_name)-tools-$(abi_release) +endif +endif + +binary-perarch: toolspkg = $(tools_pkg_name) +binary-perarch: cloudpkg = $(cloud_pkg_name) +binary-perarch: install-perarch + @echo Debug: $@ +ifeq ($(do_linux_tools),true) + dh_strip -p$(toolspkg) + dh_installchangelogs -p$(toolspkg) + dh_installdocs -p$(toolspkg) + dh_compress -p$(toolspkg) + dh_fixperms -p$(toolspkg) + dh_shlibdeps -p$(toolspkg) + dh_installdeb -p$(toolspkg) + $(lockme) dh_gencontrol -p$(toolspkg) + dh_md5sums -p$(toolspkg) + dh_builddeb -p$(toolspkg) +endif +ifeq ($(do_cloud_tools),true) + dh_strip -p$(cloudpkg) + dh_installchangelogs -p$(cloudpkg) + dh_installdocs -p$(cloudpkg) + dh_compress -p$(cloudpkg) + dh_fixperms -p$(cloudpkg) + dh_shlibdeps -p$(cloudpkg) + dh_installdeb -p$(cloudpkg) + $(lockme) dh_gencontrol -p$(cloudpkg) + dh_md5sums -p$(cloudpkg) + dh_builddeb -p$(cloudpkg) +endif + +binary-debs: signed = $(CURDIR)/debian/$(bin_pkg_name)-signed +binary-debs: signedv = $(CURDIR)/debian/$(bin_pkg_name)-signed/$(release)-$(revision) +binary-debs: signed_tar = $(src_pkg_name)_$(release)-$(revision)_$(arch).tar.gz +binary-debs: binary-perarch $(addprefix binary-,$(flavours)) + @echo Debug: $@ +ifeq ($(uefi_signed),true) + echo $(release)-$(revision) > $(signedv)/version + cd $(signedv) && ls *.efi >flavours + cd $(signed) && tar czvf ../../../$(signed_tar) . + dpkg-distaddfile $(signed_tar) raw-uefi - +endif + +build-arch-deps-$(do_flavour_image_package) += $(addprefix $(stampdir)/stamp-build-,$(flavours)) +build-arch: $(build-arch-deps-true) + @echo Debug: $@ + +ifeq ($(AUTOBUILD),) +binary-arch-deps-$(do_flavour_image_package) += binary-udebs +else +binary-arch-deps-$(do_flavour_image_package) = binary-debs +endif +binary-arch-deps-$(do_libc_dev_package) += binary-arch-headers +ifneq ($(do_common_headers_indep),true) +binary-arch-deps-$(do_flavour_header_package) += binary-headers +endif +binary-arch: $(binary-arch-deps-true) + @echo Debug: $@ + --- linux-3.13.0.orig/debian/rules.d/3-binary-indep.mk +++ linux-3.13.0/debian/rules.d/3-binary-indep.mk @@ -0,0 +1,163 @@ +build-indep: + @echo Debug: $@ + +# The binary-indep dependency chain is: +# +# install-headers <- install-doc <- install-source <- install-tools <- install-indep <- binary-indep +# install-headers <- binary-headers +# +indep_hdrpkg = $(indep_hdrs_pkg_name) +indep_hdrdir = $(CURDIR)/debian/$(indep_hdrpkg)/usr/src/$(indep_hdrpkg) +install-headers: + @echo Debug: $@ + dh_testdir + dh_testroot + dh_prep + +ifeq ($(do_flavour_header_package),true) + install -d $(indep_hdrdir) + find . -path './debian' -prune -o -path './$(DEBIAN)' -prune \ + -o -path './include/*' -prune \ + -o -path './scripts/*' -prune -o -type f \ + \( -name 'Makefile*' -o -name 'Kconfig*' -o -name 'Kbuild*' -o \ + -name '*.sh' -o -name '*.pl' -o -name '*.lds' \) \ + -print | cpio -pd --preserve-modification-time $(indep_hdrdir) + cp -a scripts include $(indep_hdrdir) + (find arch -name include -type d -print | \ + xargs -n1 -i: find : -type f) | \ + cpio -pd --preserve-modification-time $(indep_hdrdir) +endif + +docpkg = $(doc_pkg_name) +docdir = $(CURDIR)/debian/$(docpkg)/usr/share/doc/$(docpkg) +install-doc: install-headers + @echo Debug: $@ +ifeq ($(do_doc_package),true) + dh_testdir + dh_testroot + + install -d $(docdir) +ifeq ($(do_doc_package_content),true) + # First the html docs. We skip these for autobuilds + if [ -z "$(AUTOBUILD)" ]; then \ + install -d $(docdir)/$(doc_pkg_name)-tmp; \ + $(kmake) O=$(docdir)/$(doc_pkg_name)-tmp htmldocs; \ + mv $(docdir)/$(doc_pkg_name)-tmp/Documentation/DocBook \ + $(docdir)/html; \ + rm -rf $(docdir)/$(doc_pkg_name)-tmp; \ + fi +endif + # Copy the rest + cp -a Documentation/* $(docdir) + rm -rf $(docdir)/DocBook + find $(docdir) -name .gitignore | xargs rm -f +endif + +srcpkg = $(src_pkg_name)-source-$(release) +srcdir = $(CURDIR)/debian/$(srcpkg)/usr/src/$(srcpkg) +balldir = $(CURDIR)/debian/$(srcpkg)/usr/src/$(srcpkg)/$(srcpkg) +install-source: install-doc + @echo Debug: $@ +ifeq ($(do_source_package),true) + + install -d $(srcdir) +ifeq ($(do_source_package_content),true) + find . -path './debian' -prune -o -path './$(DEBIAN)' -prune -o \ + -path './.*' -prune -o -print | \ + cpio -pd --preserve-modification-time $(balldir) + (cd $(srcdir); tar cf - $(srcpkg)) | bzip2 -9c > \ + $(srcdir)/$(srcpkg).tar.bz2 + rm -rf $(balldir) + find './debian' './$(DEBIAN)' \ + -path './debian/linux-*' -prune -o \ + -path './debian/$(src_pkg_name)-*' -prune -o \ + -path './debian/build' -prune -o \ + -path './debian/files' -prune -o \ + -path './debian/stamps' -prune -o \ + -path './debian/tmp' -prune -o \ + -print | \ + cpio -pd --preserve-modification-time $(srcdir) + ln -s $(srcpkg)/$(srcpkg).tar.bz2 $(srcdir)/.. +endif +endif + +install-tools: toolspkg = $(tools_common_pkg_name) +install-tools: toolsbin = $(CURDIR)/debian/$(toolspkg)/usr/bin +install-tools: toolssbin = $(CURDIR)/debian/$(toolspkg)/usr/sbin +install-tools: toolsman = $(CURDIR)/debian/$(toolspkg)/usr/share/man +install-tools: cloudpkg = $(cloud_common_pkg_name) +install-tools: cloudbin = $(CURDIR)/debian/$(cloudpkg)/usr/bin +install-tools: cloudsbin = $(CURDIR)/debian/$(cloudpkg)/usr/sbin +install-tools: cloudman = $(CURDIR)/debian/$(cloudpkg)/usr/share/man +install-tools: install-source $(stampdir)/stamp-build-perarch + @echo Debug: $@ + + rm -rf $(builddir)/tools + install -d $(builddir)/tools + for i in *; do ln -s $(CURDIR)/$$i $(builddir)/tools/; done + rm $(builddir)/tools/tools + rsync -a tools/ $(builddir)/tools/tools/ + + install -d $(toolsbin) + install -d $(toolsman)/man1 + + install -m755 debian/tools/generic $(toolsbin)/cpupower + install -m644 $(CURDIR)/tools/power/cpupower/man/*.1 $(toolsman)/man1/ + + install -m755 debian/tools/generic $(toolsbin)/perf + + install -m755 debian/tools/generic $(toolsbin)/x86_energy_perf_policy + install -m755 debian/tools/generic $(toolsbin)/turbostat + + cd $(builddir)/tools/tools/perf && make man + install -m644 $(builddir)/tools/tools/perf/Documentation/*.1 \ + $(toolsman)/man1 + + install -d $(toolsman)/man8 + install -m644 $(CURDIR)/tools/power/x86/x86_energy_perf_policy/*.8 $(toolsman)/man8 + install -m644 $(CURDIR)/tools/power/x86/turbostat/*.8 $(toolsman)/man8 + + install -d $(cloudsbin) + install -m755 debian/tools/generic $(cloudsbin)/hv_kvp_daemon + install -m755 debian/tools/generic $(cloudsbin)/hv_vss_daemon + install -m755 debian/tools/generic $(cloudsbin)/hv_fcopy_daemon + install -m755 debian/cloud-tools/hv_get_dhcp_info $(cloudsbin) + install -m755 debian/cloud-tools/hv_get_dns_info $(cloudsbin) + install -m755 debian/cloud-tools/hv_set_ifconfig $(cloudsbin) + + install -d $(cloudman)/man8 + install -m644 $(CURDIR)/tools/hv/*.8 $(cloudman)/man8 + + dh_installinit -p$(cloudpkg) --name hv-kvp-daemon + dh_installinit -p$(cloudpkg) --name hv-vss-daemon + dh_installinit -p$(cloudpkg) --name hv-fcopy-daemon + + + +install-indep: install-tools + @echo Debug: $@ + +# This is just to make it easy to call manually. Normally done in +# binary-indep target during builds. +binary-headers: install-headers + @echo Debug: $@ + dh_installchangelogs -p$(indep_hdrpkg) + dh_installdocs -p$(indep_hdrpkg) + dh_compress -p$(indep_hdrpkg) + dh_fixperms -p$(indep_hdrpkg) + dh_installdeb -p$(indep_hdrpkg) + $(lockme) dh_gencontrol -p$(indep_hdrpkg) + dh_md5sums -p$(indep_hdrpkg) + dh_builddeb -p$(indep_hdrpkg) + +binary-indep: install-indep + @echo Debug: $@ + + dh_installchangelogs -i + dh_installdocs -i + dh_compress -i + dh_fixperms -i + dh_installdeb -i + $(lockme) dh_gencontrol -i + dh_md5sums -i + dh_builddeb -i --- linux-3.13.0.orig/debian/rules.d/4-checks.mk +++ linux-3.13.0/debian/rules.d/4-checks.mk @@ -0,0 +1,27 @@ +# Check ABI for package against last release (if not same abinum) +abi-check-%: $(stampdir)/stamp-build-% + @echo Debug: $@ + install -d $(abidir) + sed -e 's/^\(.\+\)[[:space:]]\+\(.\+\)[[:space:]]\(.\+\)$$/\3 \2 \1/' \ + $(builddir)/build-$*/Module.symvers | sort > $(abidir)/$* + @perl -f $(DROOT)/scripts/abi-check "$*" "$(prev_abinum)" "$(abinum)" \ + "$(prev_abidir)" "$(abidir)" "$(skipabi)" + +# Check the module list against the last release (always) +module-check-%: $(stampdir)/stamp-build-% + @echo Debug: $@ + install -d $(abidir) + find $(builddir)/build-$*/ -name \*.ko | \ + sed -e 's/.*\/\([^\/]*\)\.ko/\1/' | sort > $(abidir)/$*.modules + @perl -f $(DROOT)/scripts/module-check "$*" \ + "$(prev_abidir)" "$(abidir)" $(skipmodule) + +checks-%: module-check-% abi-check-% + @echo Debug: $@ + +# Check the config against the known options list. +config-prepare-check-%: $(stampdir)/stamp-prepare-tree-% + @echo Debug: $@ + @perl -f $(DROOT)/scripts/config-check \ + $(builddir)/build-$*/.config "$(arch)" "$*" "$(sharedconfdir)" "$(skipconfig)" + --- linux-3.13.0.orig/debian/rules.d/5-udebs.mk +++ linux-3.13.0/debian/rules.d/5-udebs.mk @@ -0,0 +1,74 @@ +# Do udebs if not disabled in the arch-specific makefile +binary-udebs: binary-debs + @echo Debug: $@ +ifeq ($(disable_d_i),) + @$(MAKE) --no-print-directory -f $(DROOT)/rules DEBIAN=$(DEBIAN) \ + do-binary-udebs +endif + +do-binary-udebs: debian/control + @echo Debug: $@ + dh_testdir + dh_testroot + + # unpack the kernels into a temporary directory + mkdir -p debian/d-i-${arch} + + imagelist=$$(cat $(builddir)/kernel-versions | grep ^${arch} | gawk '{print $$4}') && \ + for i in $$imagelist; do \ + dpkg -x $$(ls ../linux-image-$$i\_$(release)-$(revision)_${arch}.deb) \ + debian/d-i-${arch}; \ + if [ -f ../linux-image-extra-$$i\_$(release)-$(revision)_${arch}.deb ] ; then \ + dpkg -x ../linux-image-extra-$$i\_$(release)-$(revision)_${arch}.deb \ + debian/d-i-${arch}; \ + fi; \ + /sbin/depmod -b debian/d-i-${arch} $$i; \ + done + + # kernel-wedge will error if no modules unless this is touched + touch $(CURDIR)/debian/build/no-modules + + touch ignore-dups + export SOURCEDIR=$(CURDIR)/debian/d-i-${arch} && \ + cd $(builddir) && \ + kernel-wedge install-files && \ + kernel-wedge check + + # Build just the udebs + dilist=$$(dh_listpackages -s | grep "\-di$$") && \ + [ -z "$dilist" ] || \ + for i in $$dilist; do \ + dh_fixperms -p$$i; \ + $(lockme) dh_gencontrol -p$$i; \ + dh_builddeb -p$$i; \ + done + + # Generate the meta-udeb dependancy lists. + @gawk ' \ + /^Package:/ { \ + package=$$2; flavour=""; parch="" } \ + (/Package-Type: udeb/ && package !~ /^'$(src_pkg_name)'-udebs-/) { \ + match(package, "'$(release)'-'$(abinum)'-(.*)-di", bits); \ + flavour = bits[1]; \ + } \ + (/^Architecture:/ && $$0 " " ~ / '$(arch)'/) { \ + parch=$$0; \ + } \ + (flavour != "" && parch != "") { \ + udebs[flavour] = udebs[flavour] package ", "; \ + flavour=""; parch=""; \ + } \ + END { \ + for (flavour in udebs) { \ + package="'$(src_pkg_name)'-udebs-" flavour; \ + file="debian/" package ".substvars"; \ + print("udeb:Depends=" udebs[flavour]) > file; \ + metas="'$(builddir)'/udeb-meta-packages"; \ + print(package) >metas \ + } \ + } \ + ' <$(CURDIR)/debian/control + @while read i; do \ + $(lockme) dh_gencontrol -p$$i; \ + dh_builddeb -p$$i; \ + done <$(builddir)/udeb-meta-packages --- linux-3.13.0.orig/debian/scripts/abi-check +++ linux-3.13.0/debian/scripts/abi-check @@ -0,0 +1,210 @@ +#!/usr/bin/perl -w + +my $flavour = shift; +my $prev_abinum = shift; +my $abinum = shift; +my $prev_abidir = shift; +my $abidir = shift; +my $skipabi = shift; + +my $fail_exit = 1; +my $EE = "EE:"; +my $errors = 0; +my $abiskip = 0; + +my $count; + +print "II: Checking ABI for $flavour...\n"; + +if (-f "$prev_abidir/ignore" + or -f "$prev_abidir/$flavour.ignore" or "$skipabi" eq "true") { + print "WW: Explicitly asked to ignore ABI, running in no-fail mode\n"; + $fail_exit = 0; + $abiskip = 1; + $EE = "WW:"; +} + +if ($prev_abinum != $abinum) { + print "II: Different ABI's, running in no-fail mode\n"; + $fail_exit = 0; + $EE = "WW:"; +} + +if (not -f "$abidir/$flavour" or not -f "$prev_abidir/$flavour") { + print "EE: Previous or current ABI file missing!\n"; + print " $abidir/$flavour\n" if not -f "$abidir/$flavour"; + print " $prev_abidir/$flavour\n" if not -f "$prev_abidir/$flavour"; + + # Exit if the ABI files are missing, but return status based on whether + # skip ABI was indicated. + if ("$abiskip" eq "1") { + exit(0); + } else { + exit(1); + } +} + +my %symbols; +my %symbols_ignore; +my %modules_ignore; +my %module_syms; + +# See if we have any ignores +my $ignore = 0; +print " Reading symbols/modules to ignore..."; + +for $file ("$prev_abidir/../blacklist", "$prev_abidir/../../perm-blacklist") { + if (-f $file) { + open(IGNORE, "< $file") or + die "Could not open $file"; + while () { + chomp; + if ($_ =~ m/M: (.*)/) { + $modules_ignore{$1} = 1; + } else { + $symbols_ignore{$_} = 1; + } + $ignore++; + } + close(IGNORE); + } +} +print "read $ignore symbols/modules.\n"; + +sub is_ignored($$) { + my ($mod, $sym) = @_; + + die "Missing module name in is_ignored()" if not defined($mod); + die "Missing symbol name in is_ignored()" if not defined($sym); + + if (defined($symbols_ignore{$sym}) or defined($modules_ignore{$mod})) { + return 1; + } + return 0; +} + +# Read new syms first +print " Reading new symbols ($abinum)..."; +$count = 0; +open(NEW, "< $abidir/$flavour") or + die "Could not open $abidir/$flavour"; +while () { + chomp; + m/^(\S+)\s(.+)\s(0x[0-9a-f]+)\s(.+)$/; + $symbols{$4}{'type'} = $1; + $symbols{$4}{'loc'} = $2; + $symbols{$4}{'hash'} = $3; + $module_syms{$2} = 0; + $count++; +} +close(NEW); +print "read $count symbols.\n"; + +# Now the old symbols, checking for missing ones +print " Reading old symbols ($prev_abinum)..."; +$count = 0; +open(OLD, "< $prev_abidir/$flavour") or + die "Could not open $prev_abidir/$flavour"; +while () { + chomp; + m/^(\S+)\s(.+)\s(0x[0-9a-f]+)\s(.+)$/; + $symbols{$4}{'old_type'} = $1; + $symbols{$4}{'old_loc'} = $2; + $symbols{$4}{'old_hash'} = $3; + $count++; +} +close(OLD); + +print "read $count symbols.\n"; + +print "II: Checking for missing symbols in new ABI..."; +$count = 0; +foreach $sym (keys(%symbols)) { + if (!defined($symbols{$sym}{'type'})) { + print "\n" if not $count; + printf(" MISS : %s%s\n", $sym, + is_ignored($symbols{$sym}{'old_loc'}, $sym) ? " (ignored)" : ""); + $count++ if !is_ignored($symbols{$sym}{'old_loc'}, $sym); + } +} +print " " if $count; +print "found $count missing symbols\n"; +if ($count) { + print "$EE Symbols gone missing (what did you do!?!)\n"; + $errors++; +} + + +print "II: Checking for new symbols in new ABI..."; +$count = 0; +foreach $sym (keys(%symbols)) { + if (!defined($symbols{$sym}{'old_type'})) { + print "\n" if not $count; + print " NEW : $sym\n"; + $count++; + } +} +print " " if $count; +print "found $count new symbols\n"; +if ($count and $prev_abinum == $abinum) { + print "WW: Found new symbols within same ABI. Not recommended\n"; +} + +print "II: Checking for changes to ABI...\n"; +$count = 0; +my $moved = 0; +my $changed_type = 0; +my $changed_hash = 0; +foreach $sym (keys(%symbols)) { + if (!defined($symbols{$sym}{'old_type'}) or + !defined($symbols{$sym}{'type'})) { + next; + } + + # Changes in location don't hurt us, but log it anyway + if ($symbols{$sym}{'loc'} ne $symbols{$sym}{'old_loc'}) { + printf(" MOVE : %-40s : %s => %s\n", $sym, $symbols{$sym}{'old_loc'}, + $symbols{$sym}{'loc'}); + $moved++; + } + + # Changes to export type are only bad if new type isn't + # EXPORT_SYMBOL. Changing things to GPL are bad. + if ($symbols{$sym}{'type'} ne $symbols{$sym}{'old_type'}) { + printf(" TYPE : %-40s : %s => %s%s\n", $sym, $symbols{$sym}{'old_type'}. + $symbols{$sym}{'type'}, is_ignored($symbols{$sym}{'loc'}, $sym) + ? " (ignored)" : ""); + $changed_type++ if $symbols{$sym}{'type'} ne "EXPORT_SYMBOL" + and !is_ignored($symbols{$sym}{'loc'}, $sym); + } + + # Changes to the hash are always bad + if ($symbols{$sym}{'hash'} ne $symbols{$sym}{'old_hash'}) { + printf(" HASH : %-40s : %s => %s%s\n", $sym, $symbols{$sym}{'old_hash'}, + $symbols{$sym}{'hash'}, is_ignored($symbols{$sym}{'loc'}, $sym) + ? " (ignored)" : ""); + $changed_hash++ if !is_ignored($symbols{$sym}{'loc'}, $sym); + $module_syms{$symbols{$sym}{'loc'}}++; + } +} + +print "WW: $moved symbols changed location\n" if $moved; +print "$EE $changed_type symbols changed export type and weren't ignored\n" if $changed_type; +print "$EE $changed_hash symbols changed hash and weren't ignored\n" if $changed_hash; + +$errors++ if $changed_hash or $changed_type; +if ($changed_hash) { + print "II: Module hash change summary...\n"; + foreach $mod (sort { $module_syms{$b} <=> $module_syms{$a} } keys %module_syms) { + next if ! $module_syms{$mod}; + printf(" %-40s: %d\n", $mod, $module_syms{$mod}); + } +} + +print "II: Done\n"; + +if ($errors) { + exit($fail_exit); +} else { + exit(0); +} --- linux-3.13.0.orig/debian/scripts/config-check +++ linux-3.13.0/debian/scripts/config-check @@ -0,0 +1,413 @@ +#!/usr/bin/perl +# +# check-config -- check the current config for issues +# +use strict; + +my $P = 'check-config'; + +my $test = -1; +if ($ARGV[0] eq '--test') { + $test = $ARGV[1] + 0; +} elsif ($#ARGV != 4) { + die "Usage: $P \n"; +} + +my ($config, $arch, $flavour, $commonconfig, $warn_only) = @ARGV; + +my $checks = "$commonconfig/enforce"; +my %values = (); + +# If we are in overridden then still perform the checks and emit the messages +# but do not return failure. Those items marked FATAL will alway trigger +# failure. +my $fail_exit = 1; +$fail_exit = 0 if ($warn_only eq 'true' || $warn_only eq '1'); +my $exit_val = 0; + +# Predicate execution engine. +sub pred_first { + my ($rest) = @_; + my $depth = 0; + my $off; + my $char; + my $pred; + + for ($off = 0; $off <= length($rest); $off++) { + $char = substr($rest, $off, 1); + if ($char eq '(') { + $depth++; + } elsif ($char eq ')') { + $depth--; + } elsif ($depth == 0 && $char eq '&') { + last; + } elsif ($depth == 0 && $char eq '|') { + last; + } + } + if ($depth > 0) { + die "$P: $rest: missing close parenthesis ')'\n"; + } elsif ($depth < 0) { + die "$P: $rest: missing open parenthesis '('\n"; + } + + ($pred, $rest) = (substr($rest, 0, $off), substr($rest, $off + 1)); + + $pred =~ s/^\s*//; + $pred =~ s/\s*$//; + + #print "pred<$pred> rest<$rest> char<$char>\n"; + ($pred, $rest, $char); +} + +sub pred_do { + my ($pred) = @_; + my (@a) = split(' ', $pred); + my $possible; + + if ($a[0] eq 'arch') { + die "$P: $pred: malformed -- $pred \n" if ($#a < 1); + for $possible (@a[1..$#a]) { + #print " *** ARCH<$flavour ?? $possible>\n"; + return 1 if ($arch eq $possible); + } + return 0; + } elsif ($a[0] eq 'flavour') { + die "$P: $pred: malformed -- $pred \n" if ($#a < 1); + for $possible (@a[1..$#a]) { + #print " *** FLAVOUR<$flavour ?? $possible>\n"; + return 1 if ($flavour eq $possible); + } + return 0; + } elsif ($a[0] eq 'value') { + die "$P: $pred: malformed -- $pred \n" if ($#a != 2); + #print " *** CHECK<$a[1] $a[2] ?? " . $values{$a[1]} . ">\n"; + return ($values{$a[1]} eq $a[2]); + } elsif ($a[0] eq 'exists') { + die "$P: $pred: malformed -- $pred \n" if ($#a != 1); + return (defined $values{$a[1]}); + } else { + die "$P: $pred: unknown predicate\n"; + } + return 1; +} +sub pred_exec { + my ($rest) = @_; + my $pred; + my $cut = 0; + my $res; + my $sep; + + #print "pred_exec<$rest>\n"; + + ($pred, $rest, $sep) = pred_first($rest); + + # Leading ! implies inversion. + if ($pred =~ /^\s*!\s*(.*)$/) { + #print " invert<$1>\n"; + ($cut, $res) = pred_exec($1); + $res = !$res; + + # Leading / implies a CUT operation. + } elsif ($pred =~ /^\s*\/\s*(.*)$/) { + #print " cut<$1>\n"; + ($cut, $res) = pred_exec($1); + $cut = 1; + + # Recurse left for complex expressions. + } elsif ($pred =~ /^\s*\((.*)\)\s*$/) { + #print " left<$1>\n"; + ($cut, $res) = pred_exec($1); + + # Check for common syntax issues. + } elsif ($pred eq '') { + if ($sep eq '&' || $sep eq '|') { + die "$P: $pred$rest: malformed binary operator\n"; + } else { + die "$P: $pred$rest: syntax error\n"; + } + + # A predicate, execute it. + } else { + #print " DO<$pred> sep<$sep>\n"; + $res = pred_do($pred); + } + + #print " pre-return res<$res> sep<$sep>\n"; + if ($sep eq '') { + # + + # Recurse right for binary operators -- note these are lazy. + } elsif ($sep eq '&' || $sep eq '|') { + #print " right<$rest> ? sep<$sep> res<$res>\n"; + if ($rest =~ /^\s*($|\||\&)/) { + die "$P: $pred$rest: malformed binary operator\n"; + } + if ($cut == 0 && (($res && $sep eq '&') || (!$res && $sep eq '|'))) { + #print " right<$rest>\n"; + ($cut, $res) = pred_exec($rest); + } + + } else { + die "$P: $pred$rest: malformed predicate\n"; + } + #warn " return cut<$cut> res<$res> sep<$sep>\n"; + return ($cut, $res); +} + +# +# PREDICATE TESTS +# +my $test_total = 1; +my $test_good = 0; +sub pred_test { + my ($pred, $eres, $eerr) = @_; + my ($cut, $res, $err, $fail); + + $test_total++; + if ($test != 0 && $test != $test_total - 1) { + return; + } + + eval { + ($cut, $res) = pred_exec($pred); + }; + $err = $@; + chomp($err); + + $res = !!$res; + $eres = !!$eres; + + $fail = ''; + if (defined $eres && $res != $eres) { + $fail = "result missmatch, expected $eres returned $res"; + } + if (defined $eerr && $err eq '') { + $fail = "error missmatch, expected '$eerr' returned success"; + } elsif (defined $eerr && $err !~ /$eerr/) { + $fail = "error missmatch, expected '$eerr' returned '$err'"; + } elsif (!defined $eerr && $err ne '') { + $fail = "error missmatch, expected success returned '$err'"; + } + + if ($fail eq '') { + $test_good++; + } else { + print "$pred: $test_total: FAIL: $fail\n"; + } + #print "TEST<$pred> eres<$eres> eerr<$eerr> res<$res> err<$err>\n"; +} +if ($test >= 0) { + $arch = 'MYARCH'; + $flavour = 'MYFLAVOUR'; + %values = ( 'ENABLED' => 'y', 'DISABLED' => 'n' ); + + # Errors. + my $eunkn = 'unknown predicate'; + my $epred = 'malformed'; + my $eclose = 'missing close parenthesis'; + my $eopen = 'missing open parenthesis'; + my $ebinary = 'malformed binary operator'; + + # Basic predicate tests. + print "TEST: $test_total: basic predicate tests ...\n"; + + pred_test('nosuchcommand', undef, $eunkn); + pred_test('arch', undef, $epred); + pred_test('arch MYARCH', 1, undef); + pred_test('arch MYARCH NOTMYARCH', 1, undef); + pred_test('arch NOTMYARCH MYARCH', 1, undef); + pred_test('arch NOTMYARCH NOTMYARCH MYARCH', 1, undef); + pred_test('arch NOTMYARCH MYARCH NOTMYARCH', 1, undef); + pred_test('arch NOTMYARCH', 0, undef); + + pred_test('flavour', undef, $epred); + pred_test('flavour MYFLAVOUR', 1, undef); + pred_test('flavour NOTMYFLAVOUR MYFLAVOUR', 1, undef); + pred_test('flavour NOTMYFLAVOUR NOTMYFLAVOUR MYFLAVOUR', 1, undef); + pred_test('flavour NOTMYFLAVOUR MYFLAVOUR NOTMYFLAVOUR', 1, undef); + pred_test('flavour NOTMYFLAVOUR', 0, undef); + + pred_test('value', undef, $epred); + pred_test('value ENABLED', undef, $epred); + pred_test('value ENABLED ENABLED ENABLED', undef, $epred); + pred_test('value ENABLED y', 1, undef); + pred_test('value ENABLED n', 0, undef); + pred_test('value DISABLED n', 1, undef); + pred_test('value DISABLED y', 0, undef); + + pred_test('exists', undef, $epred); + pred_test('exists ENABLED ENABLED', undef, $epred); + pred_test('exists ENABLED', 1, undef); + pred_test('exists DISABLED', 1, undef); + pred_test('exists MISSING', 0, undef); + + print "TEST: $test_total: inversion tests ...\n"; + pred_test('!exists ENABLED', 0, undef); + pred_test('!exists MISSING', 1, undef); + pred_test('!!exists ENABLED', 1, undef); + pred_test('!!exists MISSING', 0, undef); + pred_test('!!!exists ENABLED', 0, undef); + pred_test('!!!exists MISSING', 1, undef); + + print "TEST: $test_total: parentheses tests ...\n"; + pred_test('(exists ENABLED)', 1, undef); + pred_test('((exists ENABLED))', 1, undef); + pred_test('(((exists ENABLED)))', 1, undef); + pred_test('(exists MISSING)', 0, undef); + pred_test('((exists MISSING))', 0, undef); + pred_test('(((exists MISSING)))', 0, undef); + + pred_test('(!exists ENABLED)', 0, undef); + pred_test('((!exists ENABLED))', 0, undef); + pred_test('(((!exists ENABLED)))', 0, undef); + pred_test('(!exists MISSING)', 1, undef); + pred_test('((!exists MISSING))', 1, undef); + pred_test('(((!exists MISSING)))', 1, undef); + + pred_test('((!(exists ENABLED)))', 0, undef); + pred_test('((!(exists MISSING)))', 1, undef); + pred_test('(!((exists ENABLED)))', 0, undef); + pred_test('(!((exists MISSING)))', 1, undef); + pred_test('!(((exists ENABLED)))', 0, undef); + pred_test('!(((exists MISSING)))', 1, undef); + pred_test('!((!(exists ENABLED)))', 1, undef); + pred_test('!((!(exists MISSING)))', 0, undef); + pred_test('!(!(!(exists ENABLED)))', 0, undef); + pred_test('!(!(!(exists MISSING)))', 1, undef); + + pred_test('(', undef, $eclose); + pred_test('()(', undef, $eclose); + pred_test('(())(', undef, $eclose); + pred_test('((()))(', undef, $eclose); + pred_test('(()', undef, $eclose); + pred_test('((())', undef, $eclose); + pred_test('(((()))', undef, $eclose); + pred_test('(()()', undef, $eclose); + pred_test('((())()', undef, $eclose); + + pred_test(')', undef, $eopen); + pred_test('())', undef, $eopen); + pred_test('(()))', undef, $eopen); + pred_test('((())))', undef, $eopen); + + print "TEST: $test_total: binary and tests ...\n"; + + pred_test('exists ENABLED &', undef, $ebinary); + pred_test('& exists ENABLED', undef, $ebinary); + pred_test('exists ENABLED & & exists ENABLED', undef, $ebinary); + + pred_test('exists MISSING & exists MISSING', 0, undef); + pred_test('exists MISSING & exists ENABLED', 0, undef); + pred_test('exists ENABLED & exists MISSING', 0, undef); + pred_test('exists ENABLED & exists ENABLED', 1, undef); + + pred_test('exists MISSING & exists MISSING & exists MISSING', 0, undef); + pred_test('exists MISSING & exists MISSING & exists ENABLED', 0, undef); + pred_test('exists MISSING & exists ENABLED & exists MISSING', 0, undef); + pred_test('exists MISSING & exists ENABLED & exists ENABLED', 0, undef); + pred_test('exists ENABLED & exists MISSING & exists MISSING', 0, undef); + pred_test('exists ENABLED & exists MISSING & exists ENABLED', 0, undef); + pred_test('exists ENABLED & exists ENABLED & exists MISSING', 0, undef); + pred_test('exists ENABLED & exists ENABLED & exists ENABLED', 1, undef); + + print "TEST: $test_total: binary or tests ...\n"; + + pred_test('exists ENABLED |', undef, $ebinary); + pred_test('| exists ENABLED', undef, $ebinary); + pred_test('exists ENABLED | | exists ENABLED', undef, $ebinary); + + pred_test('exists MISSING | exists MISSING', 0, undef); + pred_test('exists MISSING | exists ENABLED', 1, undef); + pred_test('exists ENABLED | exists MISSING', 1, undef); + pred_test('exists ENABLED | exists ENABLED', 1, undef); + + pred_test('exists MISSING | exists MISSING | exists MISSING', 0, undef); + pred_test('exists MISSING | exists MISSING | exists ENABLED', 1, undef); + pred_test('exists MISSING | exists ENABLED | exists MISSING', 1, undef); + pred_test('exists MISSING | exists ENABLED | exists ENABLED', 1, undef); + pred_test('exists ENABLED | exists MISSING | exists MISSING', 1, undef); + pred_test('exists ENABLED | exists MISSING | exists ENABLED', 1, undef); + pred_test('exists ENABLED | exists ENABLED | exists MISSING', 1, undef); + pred_test('exists ENABLED | exists ENABLED | exists ENABLED', 1, undef); + + print "TEST: $test_total: binary or/and combination tests ...\n"; + + pred_test('exists MISSING | exists MISSING & exists MISSING', 0, undef); + pred_test('exists MISSING | exists MISSING & exists ENABLED', 0, undef); + pred_test('exists MISSING | exists ENABLED & exists MISSING', 0, undef); + pred_test('exists MISSING | exists ENABLED & exists ENABLED', 1, undef); + pred_test('exists ENABLED | exists MISSING & exists MISSING', 1, undef); + pred_test('exists ENABLED | exists MISSING & exists ENABLED', 1, undef); + pred_test('exists ENABLED | exists ENABLED & exists MISSING', 1, undef); + pred_test('exists ENABLED | exists ENABLED & exists ENABLED', 1, undef); + + print "TEST: $test_total: binary and/or combination tests ...\n"; + + pred_test('exists MISSING & exists MISSING | exists MISSING', 0, undef); + pred_test('exists MISSING & exists MISSING | exists ENABLED', 0, undef); + pred_test('exists MISSING & exists ENABLED | exists MISSING', 0, undef); + pred_test('exists MISSING & exists ENABLED | exists ENABLED', 0, undef); + pred_test('exists ENABLED & exists MISSING | exists MISSING', 0, undef); + pred_test('exists ENABLED & exists MISSING | exists ENABLED', 1, undef); + pred_test('exists ENABLED & exists ENABLED | exists MISSING', 1, undef); + pred_test('exists ENABLED & exists ENABLED | exists ENABLED', 1, undef); + + print "TEST: $test_total: cut tests ...\n"; + pred_test('(arch MYARCH & exists MISSING) | exists ENABLED', 1, undef); + pred_test('(arch MYARCH &/ exists MISSING) | exists ENABLED', 0, undef); + + $test_total--; + print "TEST: $test_good/$test_total succeeded\n"; + + exit $exit_val; +} + +# Load up the current configuration values -- FATAL if this fails +print "$P: $config: loading config\n"; +open(CONFIG, "<$config") || die "$P: $config: open failed -- $! -- aborting\n"; +while () { + # Pull out values. + /^#*\s*(CONFIG_\w+)[\s=](.*)$/ or next; + if ($2 eq 'is not set') { + $values{$1} = 'n'; + } else { + $values{$1} = $2; + } +} +close(CONFIG); + +# FATAL: Check if we have an enforcement list. +my $pass = 0; +my $total = 0; +my $line = ''; +print "$P: $checks: loading checks\n"; +open(CHECKS, "<$checks") || die "$P: $checks: open failed -- $! -- aborting\n"; +while () { + /^#/ && next; + chomp; + + $line .= $_; + if ($line =~ /\\$/) { + chop($line); + $line .= " "; + next; + } + $line =~ /^\s*$/ && next; + + #print "CHECK: <$line>\n"; + $total++; + my (undef, $result) = pred_exec($line); + if (!$result) { + print "$P: FAIL: $line\n"; + $exit_val = $fail_exit; + } else { + $pass++; + } + + $line = ''; +} +close(CHECKS); + +print "$P: $pass/$total checks passed -- exit $exit_val\n"; +exit $exit_val; --- linux-3.13.0.orig/debian/scripts/control-create +++ linux-3.13.0/debian/scripts/control-create @@ -0,0 +1,25 @@ +#!/bin/bash + +. debian/debian.env + +vars=$1 + +. $vars + +if [ "$is_sub" = "" ]; then + flavour=$(basename $vars | sed 's/.*\.//') + stub=${DEBIAN}/control.d/flavour-control.stub +else + flavour=$(basename $vars .vars) + stub=${DEBIAN}/sub-flavours/control.stub +fi + +cat $stub | grep -v '^#' | sed \ + -e "s#FLAVOUR#$flavour#g" \ + -e "s#DESC#$desc#g" \ + -e "s#ARCH#$arch#g" \ + -e "s#SUPPORTED#$supported#g" \ + -e "s#TARGET#$target#g" \ + -e "s#BOOTLOADER#$bootloader#g" \ + -e "s#=PROVIDES=#$provides#g" \ + -e "s#=CONFLICTS=#$conflicts#g" --- linux-3.13.0.orig/debian/scripts/link-headers +++ linux-3.13.0/debian/scripts/link-headers @@ -0,0 +1,42 @@ +#!/bin/bash -e + +. debian/debian.env + +hdrdir="$1" +symdir="$2" +flavour="$3" + +echo "Symlinking and copying headers for $flavour..." + +excludes="( -path ./debian -prune -o -path ./${DEBIAN} -prune -o -path ./.git ) -prune -o" + +( +find . $excludes -type f \ + \( -name 'Makefile*' -o -name 'Kconfig*' -o -name 'Kbuild*' -o \ + -name '*.sh' -o -name '*.pl' -o -name '*.lds' \) -print +find ./include ./scripts -name .gitignore -prune -o -type f -print +find ./include -mindepth 1 -maxdepth 1 $excludes -type d -print +) | ( +while read file; do + dir=$file + lastdir=$file + + if [ -e "$hdrdir/$file" -o -L "$hdrdir/$file" ]; then + continue + fi + + while [ ! -e "$hdrdir/$dir" -a ! -L "$hdrdir/$dir" ]; do + lastdir=$dir + dir=`dirname $dir` + done + # If the last item to exist is a symlink we assume all is good + if [ ! -L "$hdrdir/$dir" ]; then + # Turns things like "./foo" into "../" + deref="`echo -n $lastdir | sed -e 's/^\.//' -e's,/[^/]*,../,g'`" + item="`echo -n $lastdir | sed -e 's/^\.\///'`" + ln -s $deref$symdir/$item $hdrdir/$item + fi +done +) + +exit --- linux-3.13.0.orig/debian/scripts/misc/find-obsolete-firmware +++ linux-3.13.0/debian/scripts/misc/find-obsolete-firmware @@ -0,0 +1,91 @@ +#!/bin/bash +# +# Find all duplicate or obsolete firmware that is being carried +# in the kernel firmware directory. Compare these files against +# the linux-firmware package for the approriate release. For example, +# assuming this is raring, then compare the kernel firmware files +# against the raring branch of linux-firmware. +# +# Example: $0 ~/ubuntu/linux-firmware-raring + +USEAGE="$0 LINUX-FIRMWARE" + +. debian/debian.env + +NFWINFO="`find $DEBIAN -name fwinfo|wc -l`" +if [ ! "$NFWINFO" = "1" ] +then + echo Your repo is hosed. There can only be one fwinfo file. + find $DEBIAN -name fwinfo + exit 1 +fi + +FWINFO="`pwd`/`find $DEBIAN -name fwinfo`" + +if [ "$1" = "" ] +then + echo $USEAGE + exit 1 +fi +FW="$1" + +if [ ! -f $FW/WHENCE ] +then + echo Bogus linux-firmware directory + exit 1 +fi +if ! egrep -q "^firmware:" $FWINFO +then + echo Bogus firmware info file + exit 1 +fi + +# +# Prepare the tree and make firmware. +# +TEE="tee -a" +LO=`pwd`/firmware.txt +LF=`pwd`/lib/firmware +rm -rf debian/build $LF $LO +fakeroot debian/rules clean prepare-generic +cp debian/build/build-generic/.config . +mkdir -p $LF +make firmware_install INSTALL_MOD_PATH=`pwd` + +(cd $LF +find . -type f | while read f +do + BN="`basename $f`" + + if ! grep -q $BN $FWINFO + then + echo "Unused firmware: $f" | $TEE $LO + else + if [ -f $FW/$f ] + then + if ! cmp $FW/$f $f + then + echo "$f differs" | $TEE $LO + else + echo "$f is duplicated" | $TEE $LO + fi + else + echo "$f does not exist in $FW" | $TEE $LO + fi + fi +done) + +# +# Check for firmware files referenced by the kernel +# that do not exist in either location. +# +cat $FWINFO | while read fwi f +do + if [ -s lib/firmware/$f ] || [ -s $FW/$f ] + then + continue + else + echo "Missing firmware $f" | $TEE $LO + fi +done + --- linux-3.13.0.orig/debian/scripts/misc/fw-to-ihex.sh +++ linux-3.13.0/debian/scripts/misc/fw-to-ihex.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +F=$1 +if [ "$F" = "" ] +then + echo You must supply a firmware file. + exit 1 +fi + +echo "unsigned char d[] = {" > $F.c +hexdump -v -e '"\t" 8/1 "0x%02x, " "\n"' $F >> $F.c +echo "};" >> $F.c +sed -i 's/0x .*$//' $F.c + +O="`dirname $F`/`basename $F`.o" +gcc -o $O -c $F.c +objcopy -Oihex $F.o $F.ihex + --- linux-3.13.0.orig/debian/scripts/misc/get-firmware +++ linux-3.13.0/debian/scripts/misc/get-firmware @@ -0,0 +1,62 @@ +#!/bin/bash +# +# Find all files in linux-firmware that are new or different since the previous release +# and copy them into the kernel firmware directory. You should only do this on the +# backport branch since it would be redundant on the released kernel. It assumed you've +# unpacked linux-firmware from each release into separate directories. +# +# Example: $0 ~/ubuntu/linux-firmware-precise ~/ubuntu/linux-firmware-quantal + +if [ "$1" = "" ] || [ "$2" = "" ] || [ ! -f $1/WHENCE ] || [ ! -f $2/WHENCE ] +then + echo You must supply 2 firmware directories. + exit 1 +fi + +if [ ! -f debian/debian.env ] +then + echo You must run this script from the root of the repo + exit 1 +fi +. debian/debian.env + +NFWINFO="`find $DEBIAN -name fwinfo|wc -l`" +if [ ! "$NFWINFO" = "1" ] +then + echo Your repo is hosed. There can only be one fwinfo file. + find $DEBIAN -name fwinfo + exit 1 +fi + +FWINFO="`pwd`/`find $DEBIAN -name fwinfo`" + +CDIR=`pwd` +OFW=$1 +NFW=$2 + +cd $NFW +# +# Find all files in $NFW that are new or different from $1 +# +(find . -type f | egrep -v "debian|git|LICEN|WHEN|READ|Make|configure" | sed 's;\./;;' | \ +while read f +do + if grep -q $f $FWINFO + then + if [ ! -f $OFW/$f ] + then + echo $f + elif ! cmp $f $OFW/$f > /dev/null + then + echo $f + fi + fi +done) |\ +while read f +do + mkdir -p $CDIR/firmware/`dirname $f` + if [ ! -f $CDIR/firmware/`dirname $f`/`basename $f`.ihex ] + then + cp -v $f $CDIR/firmware/`dirname $f` + fi +done --- linux-3.13.0.orig/debian/scripts/misc/getabis +++ linux-3.13.0/debian/scripts/misc/getabis @@ -0,0 +1,130 @@ +#!/bin/bash + +if [ "$#" != "2" ]; then + echo "Usage: $0 " 1>&2 + exit 1 +fi + +if [ "$DEBIAN" = "" ]; then + . debian/debian.env +fi + +ver=$1 +revision=$2 +abi=$(echo $revision | sed -r -e 's/([^\+]*)\.[^\.]+(\+.*)?$/\1/') + +verabi=$ver-$abi +verfull=$ver-$revision + +WGET="wget --tries=1 --timeout=10 --quiet -c" + +abidir="`pwd`/$DEBIAN/abi/$verfull" +tmpdir="`pwd`/abi-tmp-$verfull" +origdir="`pwd`" +fwinfo=$abidir/fwinfo + +test -d $tmpdir || mkdir $tmpdir + +package_prefixes() { + __package_prefixes="$@" +} + +getall() { + arch=$1 + shift + + mkdir -p $abidir/$arch + + for sub in $@; do + if [ -f $abidir/$arch/$sub ]; then + echo "Exists: $sub" + continue + fi + echo -n "Fetching $sub($arch)..." + prefixes="" + filenames="" + cd $tmpdir + for prefix in $__package_prefixes + do + filename=${prefix}-${verabi}-${sub}_${verfull}_${arch}.deb + for r in "${repo_list[@]}" + do + if ! [ -f $filename ]; then + $WGET $r/$filename + fi + if [ -f $filename ]; then + prefixes="$prefixes $prefix" + filenames="$filenames $filename" + break + fi + done + done + if [ "$filenames" != "" ]; then + echo -n "extracting$prefixes..." + for filename in $filenames + do + dpkg-deb --extract $filename tmp + done + find tmp -name "*.ko" | while read f; do + modinfo $f | grep ^firmware >> $fwinfo + done + if [ -f tmp/boot/abi-* ]; then + mv tmp/boot/abi-* $abidir/$arch/$sub + else + echo -n "NO ABI FILE..." + fi + (cd tmp; find lib/modules/$verabi-$sub/kernel -name '*.ko') | \ + sed -e 's/.*\/\([^\/]*\)\.ko/\1/' | sort > \ + $abidir/$arch/$sub.modules + ( + cd tmp; + # Prevent exposing some errors when called by python scripts. SIGPIPE seems to get + # exposed when using the `find ...` form of the command. + ko=$(find lib/modules/$verabi-$sub/kernel \ + -name '*.ko' | head -1) + readelf -p .comment "$ko" | gawk ' + ($1 == "[") { + printf("%s", $3); + for (n=4; n<=NF; n++) { + printf(" %s", $n); + } + print "" + }' | sort -u >$abidir/$arch/$sub.compiler + version=`cat $abidir/$arch/$sub.compiler` + echo -n "$version..." + ) + rm -rf tmp $filenames + echo "done." + else + echo "FAILED." + fi + cd $origdir + done +} + +# MAIN + +# Setup abi directory +mkdir -p $abidir +echo $abi > $abidir/abiname + +# NOTE: The flavours are hardcoded, because they may have changed from the +# current build. + +__package_prefixes="linux-image" + +. $DEBIAN/etc/getabis + +compilers=`cat $abidir/*/*.compiler | sort -u | wc -l` +if [ "$compilers" != 1 ]; then + echo "WARNING: inconsistant compiler versions detected" 1>&2 +fi + +sort < $fwinfo | uniq > fwinfo.tmp +mv fwinfo.tmp $fwinfo + +rmdir $tmpdir + +# Add the new ABI directory, remove the old +git add $abidir +find $DEBIAN/abi/* -maxdepth 0 -type d | grep -v $verfull | while read f; do git rm -r $f;done --- linux-3.13.0.orig/debian/scripts/misc/git-ubuntu-log +++ linux-3.13.0/debian/scripts/misc/git-ubuntu-log @@ -0,0 +1,232 @@ +#!/usr/bin/perl -w + +use strict; +use Text::Wrap; + +my $kernel_auth = "Upstream Kernel Changes"; + +my (%map, @reverts); +my $pstate = 1; +my $no_kern_log = 0; +my $print_shas = 0; +my $first_print = 1; + +while (@ARGV) { + my $opt = $ARGV[0]; + shift; + if ($opt eq "--no-kern-log") { + $no_kern_log = 1; + } elsif ($opt eq "--print-shas") { + $print_shas = 1; + } else { + print STDERR "Unknown options: $opt\n"; + exit(1); + } +} + +sub check_reverts($) { + my ($entry) = @_; + my ($check); + + foreach $check (reverse @reverts) { + my $desc = "Revert \"" . $entry->{'desc'} . "\""; + if ($check->{'desc'} eq $desc) { + @reverts = grep($_->{'desc'} ne $desc, @reverts); + return 1; + } + } + + return 0; +} + +sub add_entry($) { + my ($entry) = @_; + my $key = $entry->{'author'}; + + # store description in array, in email->{desc list} map + if (exists $map{$key}) { + # grab ref + my $obj = $map{$key}; + + # add desc to array + push(@$obj, $entry); + } else { + # create new array, containing 1 item + my @arr = ($entry); + + # store ref to array + $map{$key} = \@arr; + } +} + +sub shortlog_entry($$$$$) { + my ($name, $desc, $bug, $cve, $commit) = @_; + my $entry; + + $desc =~ s#/pub/scm/linux/kernel/git/#/.../#g; + $desc =~ s#\[PATCH\] ##g; + + $desc =~ s#^\s*##g; + $desc =~ s# *UBUNTU: ##g; + + $entry->{'desc'} = $desc; + if ($bug ne '') { + $entry->{'bugno'} = $bug; + } + $entry->{'cve'} = $cve; + $entry->{'commit'} = $commit; + $entry->{'author'} = $name; + + if ($desc =~ /^Revert "/) { + push(@reverts, $entry); + return; + } + + return if check_reverts($entry); + + add_entry($entry); +} + +# sort comparison function +sub by_name($$) { + my ($a, $b) = @_; + + uc($a) cmp uc($b); +} + +sub shortlog_output { + my ($obj, $key, $entry); + + foreach $key (sort by_name keys %map) { + next if $key eq $kernel_auth and $no_kern_log; + + print "\n" unless $first_print; + $first_print = 0; + + # output author + printf " [ %s ]\n\n", $key; + + # output author's 1-line summaries + $obj = $map{$key}; + foreach $entry (reverse @$obj) { + print wrap(" * ", " ", $entry->{'desc'}) . "\n"; + # For non upstream changes, add other info. + if ($key ne $kernel_auth) { + if ($print_shas) { + print " - GIT-SHA " . $entry->{'commit'} . + "\n"; + } + } + if (defined($entry->{'bugno'})) { + print " - LP: #" . $entry->{'bugno'} . "\n"; + } + if (defined($entry->{'cve'})) { + print " - " . $entry->{'cve'} . "\n"; + } + } + } +} + +sub changelog_input { + my ($author, $desc, $commit, $entry, $cve); + + while () { + # get commit + if ($pstate == 1) { + next unless /^commit (.*)/; + + $commit = $1; + + $pstate++; + } + + # get author and email + elsif ($pstate == 2) { + my ($email); + + next unless /^[Aa]uthor:?\s*(.*?)\s*<(.*)>/; + + $author = $1; + $email = $2; + $desc = undef; + $cve = undef; + + # cset author fixups + if (!$author) { + $author = $email; + } + $pstate++; + } + + # skip to blank line + elsif ($pstate == 3) { + next unless /^\s*$/; + $pstate++; + } + + # skip to non-blank line + elsif ($pstate == 4) { + next unless /^\s*?(.*)/; + my $ignore = 0; + my $do_ignore = 0; + my $bug = undef; + my %bugz = (); + my $k; + + # skip lines that are obviously not + # a 1-line cset description + next if /^\s*From: /; + + chomp; + $desc = $1; + + if ($desc =~ /^ *(Revert "|)UBUNTU:/) { + $do_ignore = 1; + } else { + $do_ignore = 0; + $author = $kernel_auth; + $ignore = 1 if $desc =~ /Merge /; + } + while () { + $ignore = 1 if ($do_ignore && /^ *Ignore: yes/i); + if (/^ *Bug: *(#|)([0-9#,\s]*)\s*$/i) { + foreach $k (split('(,|\s)\s*(#|)', $2)) { + $bugz{$k} = 1 if (($k ne '') and ($k =~ /[0-9]+/)); + } + } + elsif (/^ *BugLink: *http.*:\/\/.*\/([0-9]+)/i) { + $bugz{$1} = 1; + } + elsif (/^ *(CVE-.*)/) { + $cve = $1 + } + last if /^commit /; + } + + $bug = join(", #", sort keys(%bugz)); + if (!$ignore) { + &shortlog_entry($author, $desc, $bug, + $cve, $commit, 0); + } + + $pstate = 1; + if ($_ && /^commit (.*)/) { + $commit = $1; + $pstate++; + } + } + + else { + die "invalid parse state $pstate"; + } + } + + foreach $entry (@reverts) { + add_entry($entry); + } +} + +&changelog_input; +&shortlog_output; + +exit(0); --- linux-3.13.0.orig/debian/scripts/misc/insert-changes.pl +++ linux-3.13.0/debian/scripts/misc/insert-changes.pl @@ -0,0 +1,36 @@ +#!/usr/bin/perl -w + +my $debian; +$droot = $ARGV[0] if (defined $ARGV[0]); +$droot = 'debian' if (!defined $droot); +$debian = $ARGV[1] if (defined $ARGV[1]); +$debian = 'debian.master' if (!defined $debian); + +system("make -s -f $droot/rules printchanges > $debian/changes"); + +open(CHANGELOG, "< $debian/changelog") or die "Cannot open changelog"; +open(CHANGES, "< $debian/changes") or die "Cannot open new changes"; +open(NEW, "> $debian/changelog.new") or die "Cannot open new changelog"; + +$printed = 0; + +while () { + if (/^ CHANGELOG: /) { + next if $printed; + + while () { + print NEW; + } + + $printed = 1; + } else { + print NEW; + } +} + +close(NEW); +close(CHANGES); +close(CHANGELOG); + +rename("$debian/changelog.new", "$debian/changelog"); +unlink("$debian/changes"); --- linux-3.13.0.orig/debian/scripts/misc/insert-mainline-changes +++ linux-3.13.0/debian/scripts/misc/insert-mainline-changes @@ -0,0 +1,42 @@ +#!/usr/bin/perl + +if ($#ARGV != 2) { + warn "Usage: $0 \n"; + die " $0 debian.master/changelog v3.2.3 v3.2.2..v3.2.3\n"; +} +my ($changelog, $to, $range) = @ARGV; + +my @changes = (); + +push(@changes, "\n"); +push(@changes, " [ Upstream Kernel Changes ]\n\n"); +push(@changes, " * rebase to $to\n"); + +open(LOG, "git log '$range'|") || die "$0: git log failed: - $!\n"; +while () { + if (m@BugLink: .*launchpad.net/.*/([0-9]+)\s$@) { + push(@changes, " - LP: #$1\n"); + } +} +close(LOG); + +open(CHANGELOG, "< $changelog") or die "Cannot open changelog"; +open(NEW, "> $changelog.new") or die "Cannot open new changelog"; + +$printed = 3; +while () { + if (/^ CHANGELOG: /) { + $printed--; + print NEW; + if ($printed == 0) { + print NEW @changes; + } + next; + } + print NEW; +} + +close(NEW); +close(CHANGELOG); + +rename("$changelog.new", "$changelog"); --- linux-3.13.0.orig/debian/scripts/misc/insert-ubuntu-changes +++ linux-3.13.0/debian/scripts/misc/insert-ubuntu-changes @@ -0,0 +1,58 @@ +#!/usr/bin/perl + +if ($#ARGV != 2) { + die "Usage: $0 \n"; +} +my ($changelog, $end, $start) = @ARGV; + +$end =~ s/.*\.//; +$start =~ s/.*\.//; + +my @changes = (); +my $output = 0; +open(CHG, ") { + if (/^\S+\s+\((.*\.(\d+))\)/) { + if ($2 <= $end) { + last; + } + if ($2 == $start) { + $output = 1; + } + if ($output) { + push(@changes, "\n [ Ubuntu: $1 ]\n\n"); + next; + } + } + next if ($output == 0); + + next if (/^\s*$/); + next if (/^\s--/); + next if (/^\s\s[^\*\s]/); + + push(@changes, $_); +} +close(CHG); + +open(CHANGELOG, "< $changelog") or die "Cannot open changelog"; +open(NEW, "> $changelog.new") or die "Cannot open new changelog"; + +$printed = 3; +while () { + if (/^ CHANGELOG: /) { + $printed--; + print NEW; + if ($printed == 0) { + print NEW @changes; + } + next; + } + print NEW; +} + +close(NEW); +close(CHANGELOG); + +rename("$changelog.new", "$changelog"); --- linux-3.13.0.orig/debian/scripts/misc/kernelconfig +++ linux-3.13.0/debian/scripts/misc/kernelconfig @@ -0,0 +1,172 @@ +#!/bin/bash + +. debian/debian.env + +# Script to merge all configs and run 'make silentoldconfig' on it to wade out bad juju. +# Then split the configs into distro-commmon and flavour-specific parts + +# We have to be in the top level kernel source directory +if [ ! -f MAINTAINERS ] || [ ! -f Makefile ]; then + echo "This does not appear to be the kernel source directory." 1>&2 + exit 1 +fi + +mode=${1:?"Usage: $0 [oldconfig|editconfig]"} +yes=0 +case "$mode" in + update*configs) mode='silentoldconfig' ;; + default*configs) mode='oldconfig'; yes=1 ;; + edit*configs) ;; # All is good + gen*configs) mode='genconfigs' ;; # All is good + dump*configs) mode='config'; yes=1 ;; + *) echo "$0 called with invalid mode" 1>&2 + exit 1 ;; +esac +kerneldir="`pwd`" +confdir="$kerneldir/${DEBIAN}/config" +sharedconfdir="$kerneldir/debian.master/config" +variant="$2" + +. $DEBIAN/etc/kernelconfig + +bindir="`pwd`/${DROOT}/scripts/misc" +common_conf="$confdir/config.common.$family" +tmpdir=`mktemp -d` +mkdir "$tmpdir/CONFIGS" + +if [ "$mode" = "genconfigs" ]; then + keep=1 + mode="oldconfig" + test -d CONFIGS || mkdir CONFIGS +fi + +for arch in $archs; do + rm -rf build + mkdir build + + # Map debian archs to kernel archs + case "$arch" in + ppc64|ppc64el) kernarch="powerpc" ;; + amd64) kernarch="x86_64" ;; + lpia) kernarch="x86" ;; + sparc) kernarch="sparc64" ;; + armel|armhf) kernarch="arm" ;; + *) kernarch="$arch" ;; + esac + + archconfdir=$confdir/$arch + flavourconfigs=$(cd $archconfdir && ls config.flavour.*) + + # Merge configs + # We merge config.common.ubuntu + config.common. + + # config.flavour. + + for config in $flavourconfigs; do + fullconf="$tmpdir/$arch-$config-full" + case $config in + *) + : >"$fullconf" + if [ -f $common_conf ]; then + cat $common_conf >> "$fullconf" + fi + if [ -f $archconfdir/config.common.$arch ]; then + cat $archconfdir/config.common.$arch >> "$fullconf" + fi + cat "$archconfdir/$config" >>"$fullconf" + if [ -f $confdir/OVERRIDES ]; then + cat $confdir/OVERRIDES >> "$fullconf" + fi + ;; + esac + done + + for config in $flavourconfigs; do + if [ -f $archconfdir/$config ]; then + fullconf="$tmpdir/$arch-$config-full" + cat "$fullconf" > build/.config + # Call oldconfig or menuconfig + case "$mode" in + editconfigs) + # Interactively edit config parameters + while : ; do + echo -n "Do you want to edit config: $arch/$config? [Y/n] " + read choice + + case "$choice" in + y* | Y* | "" ) + make O=`pwd`/build ARCH=$kernarch menuconfig + break ;; + n* | N* ) + break ;; + *) + echo "Entry not valid" + esac + done + ;; + *) + echo "* Run $mode (yes=$yes) on $arch/$config ..." + if [ "$yes" -eq 1 ]; then + yes "" | make O=`pwd`/build ARCH=$kernarch "$mode" + else + make O=`pwd`/build ARCH=$kernarch "$mode" + fi ;; + esac + cat build/.config > $archconfdir/$config + cat build/.config > "$tmpdir/CONFIGS/$arch-$config" + if [ "$keep" = "1" ]; then + cat build/.config > CONFIGS/$arch-$config + fi + else + echo "!! Config not found $archconfdir/$config..." + fi + done + + echo "Running splitconfig.pl for $arch" + echo + + # Can we make this more robust by avoiding $tmpdir completely? + # This approach was used for now because I didn't want to change + # splitconfig.pl + (cd $archconfdir; $bindir/splitconfig.pl config.flavour.*; mv config.common \ + config.common.$arch; cp config.common.$arch $tmpdir) +done + +rm -f $common_conf + +# Now run splitconfig.pl on all the config.common. copied to +# $tmpdir +(cd $tmpdir; $bindir/splitconfig.pl *) +( + cd $confdir; + rm -f *-full + grep -v 'is UNMERGABLE' <$tmpdir/config.common >$common_conf + for arch in $archs; do + grep -v 'is UNMERGABLE' <$tmpdir/config.common.$arch \ + >$arch/config.common.$arch + done +) + +echo "" +echo "Running config-check for all configurations ..." +echo "" +fail=0 +for arch in $archs; do + archconfdir=$confdir/$arch + flavourconfigs=$(cd $archconfdir && ls config.flavour.*) + for config in $flavourconfigs; do + flavour="${config##*.}" + if [ -f $archconfdir/$config ]; then + fullconf="$tmpdir/CONFIGS/$arch-$config" + "$bindir/../config-check" "$fullconf" "$arch" "$flavour" "$sharedconfdir" "0" || let "fail=$fail+1" + fi + done +done + +if [ "$fail" != 0 ]; then + echo "" + echo "*** ERROR: $fail config-check failures detected" + echo "" +fi + +rm -rf build + --- linux-3.13.0.orig/debian/scripts/misc/retag +++ linux-3.13.0/debian/scripts/misc/retag @@ -0,0 +1,34 @@ +#!/usr/bin/perl -w + +open(TAGS, "git tag -l |") or die "Could not get list of tags"; +@tags = ; +close(TAGS); + +open(LOGS, "git log --pretty=short |") or die "ERROR: Calling git log"; +my $commit = ""; + +while () { + my $origtag; + + if (m|^commit (.*)$|) { + $commit = $1; + next; + } + + m|\s*UBUNTU: (Ubuntu-2\.6\..*)| or next; + + $tag = $1; + + ($origtag) = grep(/^$tag.orig$/, @tags); + + if (!defined($origtag)) { + print "I: Adding original tag for $tag\n"; + system("git tag -m $tag $tag.orig $tag"); + } + + print "I: Tagging $tag => $commit\n"; + + system("git tag -f -m $tag $tag $commit"); +} + +close(LOGS); --- linux-3.13.0.orig/debian/scripts/misc/splitconfig.pl +++ linux-3.13.0/debian/scripts/misc/splitconfig.pl @@ -0,0 +1,107 @@ +#!/usr/bin/perl -w + +%allconfigs = (); +%common = (); + +print "Reading config's ...\n"; + +for $config (@ARGV) { + # Only config.* + next if $config !~ /^config\..*/; + # Nothing that is disabled, or remnant + next if $config =~ /.*\.(default|disabled|stub)$/; + + %{$allconfigs{$config}} = (); + + print " processing $config ... "; + + open(CONFIG, "< $config"); + + while () { + # Skip comments + /^#*\s*CONFIG_(\w+)[\s=](.*)$/ or next; + + ${$allconfigs{$config}}{$1} = $2; + + $common{$1} = $2; + } + + close(CONFIG); + + print "done.\n"; +} + +print "\n"; + +print "Merging lists ... \n"; + +# %options - pointer to flavour config inside the allconfigs array +for $config (keys(%allconfigs)) { + my %options = %{$allconfigs{$config}}; + + print " processing $config ... "; + + for $key (keys(%common)) { + next if not defined $common{$key}; + + # If we don't have the common option, then it isn't + # common. If we do have that option, it must have the same + # value. EXCEPT where this file does not have a value at all + # which may safely be merged with any other value; the value + # will be elided during recombination of the parts. + if (!defined($options{$key})) { + # Its ok really ... let it merge + } elsif (not defined($options{$key})) { + undef $common{$key}; + } elsif ($common{$key} ne $options{$key}) { + undef $common{$key}; + } + } + + print "done.\n"; +} + +print "\n"; + +print "Creating common config ... "; + +open(COMMON, "> config.common"); +print COMMON "#\n# Common config options automatically generated by splitconfig.pl\n#\n"; + +for $key (sort(keys(%common))) { + if (not defined $common{$key}) { + print COMMON "# CONFIG_$key is UNMERGABLE\n"; + } elsif ($common{$key} eq "is not set") { + print COMMON "# CONFIG_$key is not set\n"; + } else { + print COMMON "CONFIG_$key=$common{$key}\n"; + } +} +close(COMMON); + +print "done.\n\n"; + +print "Creating stub configs ...\n"; + +for $config (keys(%allconfigs)) { + my %options = %{$allconfigs{$config}}; + + print " processing $config ... "; + + open(STUB, "> $config"); + print STUB "#\n# Config options for $config automatically generated by splitconfig.pl\n#\n"; + + for $key (sort(keys(%options))) { + next if defined $common{$key}; + + if ($options{$key} =~ /^is /) { + print STUB "# CONFIG_$key $options{$key}\n"; + } else { + print STUB "CONFIG_$key=$options{$key}\n"; + } + } + + close(STUB); + + print "done.\n"; +} --- linux-3.13.0.orig/debian/scripts/module-check +++ linux-3.13.0/debian/scripts/module-check @@ -0,0 +1,120 @@ +#!/usr/bin/perl -w + +$flavour = shift; +$prev_abidir = shift; +$abidir = shift; +$skipmodule = shift; + +print "II: Checking modules for $flavour..."; + +if (-f "$prev_abidir/ignore.modules" + or -f "$prev_abidir/$flavour.ignore.modules") { + print "explicitly ignoring modules\n"; + exit(0); +} + +if (not -f "$abidir/$flavour.modules" or not -f + "$prev_abidir/$flavour.modules") { + print "previous or current modules file missing!\n"; + print " $abidir/$flavour.modules\n"; + print " $prev_abidir/$flavour.modules\n"; + if (defined($skipmodule)) { + exit(0); + } else { + exit(1); + } +} + +print "\n"; + +my %modules; +my %modules_ignore; +my $missing = 0; +my $new = 0; +my $errors = 0; + +# See if we have any ignores +if (-f "$prev_abidir/../modules.ignore") { + my $ignore = 0; + open(IGNORE, "< $prev_abidir/../modules.ignore") or + die "Could not open $prev_abidir/../modules.ignore"; + print " reading modules to ignore..."; + while () { + chomp; + next if /\s*#/; + $modules_ignore{$_} = 1; + $ignore++; + } + close(IGNORE); + print "read $ignore modules.\n"; +} + +# Read new modules first +print " reading new modules..."; +$new_count = 0; +open(NEW, "< $abidir/$flavour.modules") or + die "Could not open $abidir/$flavour.modules"; +while () { + chomp; + $modules{$_} = 1; + $new_count++; +} +close(NEW); +print "read $new_count modules.\n"; + +# Now the old modules, checking for missing ones +print " reading old modules..."; +$old_count = 0; +open(OLD, "< $prev_abidir/$flavour.modules") or + die "Could not open $prev_abidir/$flavour.modules"; +while () { + chomp; + if (not defined($modules{$_})) { + print "\n" if not $missing; + $missing++; + if (not defined($modules_ignore{$_})) { + print " MISS: $_\n"; + $errors++; + } else { + print " MISS: $_ (ignored)\n"; + } + } else { + $modules{$_}++; + } + $old_count++; +} +close(OLD); +# Check for new modules +foreach $mod (keys(%modules)) { + if ($modules{$mod} < 2) { + print "\n" if not $missing and not $new; + print " NEW : $mod\n"; + $new++; + } +} +if ($new or $missing) { + print " read $old_count modules : new($new) missing($missing)\n"; +} else { + print "read $old_count modules.\n"; +} + + +# Let's see where we stand... +if ($errors) { + if (defined($skipmodule)) { + print "WW: Explicitly asked to ignore failures (probably not good)\n"; + } else { + print "EE: Missing modules (start begging for mercy)\n"; + exit 1 + } +} + +if ($new) { + print "II: New modules (you've been busy, wipe the poop off your nose)\n"; +} else { + print "II: No new modules (hope you're happy, slacker)\n"; +} + +print "II: Done\n"; + +exit(0); --- linux-3.13.0.orig/debian/scripts/module-inclusion +++ linux-3.13.0/debian/scripts/module-inclusion @@ -0,0 +1,60 @@ +#!/bin/bash + +# +# Build a new directory of modules based on an inclusion list. +# The includsion list format must be a bash regular expression. +# +# usage: $0 ROOT INCLUSION_LIST +# example: $0 debian/build/build-virtual \ +# debian/build/build-virtual-ALL debian/build/build-virtual \ +# debian.master/control.d/virtual.inclusion-list +master=0 +if [ "$1" = "--master" ]; then + master=1 + shift +fi + +ROOT=$1 +NROOT=$2 +ILIST=$3 + +# +# Prep a destination directory. +# +mkdir -p ${NROOT} + +# Copy over the framework... +if [ "$master" -eq 1 ]; then + (cd ${ROOT}; find . ! -name "*.ko" -type f) | \ + while read f + do + mkdir -p ${NROOT}/`dirname $f` + mv ${ROOT}/$f ${NROOT}/$f + done +fi + +cat ${ILIST} |while read i +do + # + # 'find' blurts a warning if it cannot find any ko files. + # + if echo "$i" | grep '\*' > /dev/null + then + (cd ${ROOT}; eval find "${i}" -name "*.ko") |while read f + do + mkdir -p ${NROOT}/`dirname $f` + mv ${ROOT}/$f ${NROOT}/$f + done + else + if [ -f "${ROOT}/$i" ] + then + mkdir -p ${NROOT}/`dirname $i` + mv ${ROOT}/$i ${NROOT}/$i + else + echo Warning: Could not find ${ROOT}/$i + fi + fi + +done + +exit 0 --- linux-3.13.0.orig/debian/scripts/sub-flavour +++ linux-3.13.0/debian/scripts/sub-flavour @@ -0,0 +1,69 @@ +#!/bin/bash + +. debian/debian.env + +echo "SUB_PROCESS $FROM => $TO" + +export from_pkg="linux-image-$ABI_RELEASE-$FROM" +export to_pkg="linux-image-$ABI_RELEASE-$TO" + +from_moddir="debian/$from_pkg/lib/modules/$ABI_RELEASE-$FROM" +to_moddir="debian/$to_pkg/lib/modules/$ABI_RELEASE-$FROM" + +install -d "debian/$to_pkg/boot" +install -m644 debian/$from_pkg/boot/config-$ABI_RELEASE-$FROM \ + debian/$to_pkg/boot/ +install -m600 debian/$from_pkg/boot/{vmlinuz,System.map}-$ABI_RELEASE-$FROM \ + debian/$to_pkg/boot/ + +# +# Print some warnings if there are files in the sub-flavours list +# that do not actually exist. +# +cat ${DEBIAN}/sub-flavours/$TO.list | while read line +do +( + cd debian/$from_pkg/lib/modules/$ABI_RELEASE-$FROM/kernel; + # + # If its a wildcard, then check that there are files that match. + # + if echo "$line" | grep '\*' > /dev/null + then + if [ `eval find "$line" -name '*.ko' 2>/dev/null|wc -l` -lt 1 ] + then + echo SUB_INST Warning - No files in $line + fi + # + # Else it should be a single file reference. + # + elif [ ! -f "$line" ] + then + echo SUB_INST Warning - could not find "$line" + fi +) +done + +cat ${DEBIAN}/sub-flavours/$TO.list | while read line; do + ( + cd debian/$from_pkg/lib/modules/$ABI_RELEASE-$FROM/kernel; + if echo "$line" | grep '\*' > /dev/null + then + eval find "$line" -name '*.ko' 2>/dev/null || true + elif [ -f "$line" ] + then + echo "$line" + fi + ); +done | while read mod; do + echo "SUB_INST checking: $mod" + fromdir="/lib/modules/$ABI_RELEASE-$FROM/" + egrep "^($fromdir)?kernel/$mod:" \ + $from_moddir/modules.dep | sed -e "s|^$fromdir||" -e 's/://' -e 's/ /\n/g' | \ + while read m; do + m="${fromdir}$m" + test -f debian/$to_pkg/$m && continue + echo "SUB_INST installing: $m" + install -D -m644 debian/$from_pkg/$m \ + debian/$to_pkg/$m + done +done --- linux-3.13.0.orig/debian/source/format +++ linux-3.13.0/debian/source/format @@ -0,0 +1 @@ +1.0 --- linux-3.13.0.orig/debian/source/options +++ linux-3.13.0/debian/source/options @@ -0,0 +1,3 @@ +# force "dpkg-source -I -i" behavior +diff-ignore +tar-ignore --- linux-3.13.0.orig/debian/stamps/keep-dir +++ linux-3.13.0/debian/stamps/keep-dir @@ -0,0 +1 @@ +Place holder --- linux-3.13.0.orig/debian/tests-build/README +++ linux-3.13.0/debian/tests-build/README @@ -0,0 +1,21 @@ +Scripts placed in this directory get called one at a time by run-parts(8). +The scripts are expected to perform some sort of sanity checks on the +finished build. Scripts will be called once for each flavour. + +Some environment variables are exported to make life a little easier: + +DPKG_ARCH : The dpkg architecture (e.g. "amd64") +KERN_ARCH : The kernel architecture (e.g. "x86_64") +FLAVOUR : The specific flavour for this run (e.g. "generic") +VERSION : The full version of this build (e.g. 2.6.22-1) +REVISION : The exact revision of this build (e.g. 1.3) +PREV_REVISION : The revision prior to this one +ABI_NUM : The specific ABI number for this build (e.g. 2) +PREV_ABI_NUM : The previous ABI number. Can be the same as ABI_NUM. +BUILD_DIR : The directory where this build took place +INSTALL_DIR : The directory where the package is prepared +SOURCE_DIR : Where the main kernel source is + +Scripts are expected to have a zero exit status when no problems occur, +and non-zero when an error occurs that should stop the build. Scripts +should print whatever info they deem needed to deduce the problem. --- linux-3.13.0.orig/debian/tests-build/check-aliases +++ linux-3.13.0/debian/tests-build/check-aliases @@ -0,0 +1,24 @@ +#!/usr/bin/perl -w + +my %map; + +print "Checking for dupe aliases in $ENV{'FLAVOUR'}...\n"; + +$aliases = + "$ENV{'INSTALL_DIR'}/lib/modules/$ENV{'VERSION'}-$ENV{'FLAVOUR'}/modules.alias"; + +open(ALIASES, "< $aliases") or die "Could not open $aliases"; + +while () { + chomp; + my ($junk, $alias, $module) = split; + + if (defined($map{$alias})) { + printf("%s %20s / %-20s : %s \n", ("$map{$alias}" eq "$module") + ? "INT" : " ", $map{$alias}, $module, $alias); + } else { + $map{$alias} = $module; + } +} + +exit(0); --- linux-3.13.0.orig/debian/tests/control +++ linux-3.13.0/debian/tests/control @@ -0,0 +1,7 @@ +Tests: rebuild +Depends: @builddeps@, fakeroot +Restrictions: allow-stderr + +Tests: ubuntu-regression-suite +Depends: build-essential, gcc-multilib, gdb, git, bzr +Restrictions: allow-stderr, isolation-machine, breaks-testbed --- linux-3.13.0.orig/debian/tests/rebuild +++ linux-3.13.0/debian/tests/rebuild @@ -0,0 +1 @@ +#!/bin/true --- linux-3.13.0.orig/debian/tests/ubuntu-regression-suite +++ linux-3.13.0/debian/tests/ubuntu-regression-suite @@ -0,0 +1,19 @@ +#!/bin/sh +set -e + +sver=`dpkg-parsechangelog -SVersion` +read x rver x &2 + exit 1 +fi + +git clone git://kernel.ubuntu.com/ubuntu/kernel-testing +kernel-testing/run-dep8-tests --- linux-3.13.0.orig/debian/tools/generic +++ linux-3.13.0/debian/tools/generic @@ -0,0 +1,60 @@ +#!/bin/bash +full_version=`uname -r` + +# First check for a fully qualified version. +this="/usr/lib/linux-tools/$full_version/`basename $0`" +if [ -f "$this" ]; then + exec "$this" "$@" +fi + +# Removing flavour from version i.e. generic or server. +flavour_abi=${full_version#*-} +flavour=${flavour_abi#*-} +version=${full_version%-$flavour} +this="$0_$version" +if [ -f "$this" ]; then + exec "$this" "$@" +fi + +# Before saucy kernels we had no flavour linkage. +if dpkg --compare-versions "$version" lt "3.11.0"; then + flavour='' +else + flavour="-$flavour" +fi +# Hint at the cloud tools if they exist (trusty and later) +if dpkg --compare-versions "$version" ge "3.13.0"; then + cld="" +else + cld=":" +fi +# Work out if this is an LTS backport or not. +codename=`lsb_release -cs` +case "$codename" in +precise) base='3.2.0-9999' ;; +trusty) base='3.13.0-9999' ;; +*) base='' ;; +esac +std="" +lts=":" +if [ "$base" != "" ]; then + if dpkg --compare-versions "$version" gt "$base"; then + std=":" + lts="" + fi +fi + +# Give them a hint as to what to install. + echo "WARNING: `basename $0` not found for kernel $version" >&2 + echo "" >&2 + echo " You may need to install the following packages for this specific kernel:" >&2 + echo " linux-tools-$version$flavour" >&2 +$cld echo " linux-cloud-tools-$version$flavour" >&2 + echo "" >&2 + echo " You may also want to install one of the following packages to keep up to date:" >&2 +$std echo " linux-tools$flavour" >&2 +$std $cld echo " linux-cloud-tools$flavour" >&2 +$lts echo " linux-tools$flavour-lts-" >&2 +$lts $cld echo " linux-cloud-tools$flavour-lts-" >&2 + +exit 2 --- linux-3.13.0.orig/drivers/Kconfig +++ linux-3.13.0/drivers/Kconfig @@ -170,4 +170,6 @@ source "drivers/powercap/Kconfig" +source "drivers/net/dpa/NetCommSw/Kconfig" + endmenu --- linux-3.13.0.orig/drivers/acpi/Kconfig +++ linux-3.13.0/drivers/acpi/Kconfig @@ -43,18 +43,22 @@ depends on SUSPEND || HIBERNATION default y -config ACPI_PROCFS - bool "Deprecated /proc/acpi files" +config ACPI_PROCFS_POWER + bool "Deprecated power /proc/acpi directories" depends on PROC_FS help For backwards compatibility, this option allows - deprecated /proc/acpi/ files to exist, even when - they have been replaced by functions in /sys. + deprecated power /proc/acpi/ directories to exist, even when + they have been replaced by functions in /sys. + The deprecated directories (and their replacements) include: + /proc/acpi/battery/* (/sys/class/power_supply/*) + /proc/acpi/ac_adapter/* (sys/class/power_supply/*) + This option has no effect on /proc/acpi/ directories + and functions, which do not yet exist in /sys + This option, together with the proc directories, will be + deleted in 2.6.39. - This option has no effect on /proc/acpi/ files - and functions which do not yet exist in /sys. - - Say N to delete /proc/acpi/ files that have moved to /sys/ + Say N to delete power /proc/acpi/ directories that have moved to /sys/ config ACPI_EC_DEBUGFS tristate "EC read/write access through /sys/kernel/debug/ec" --- linux-3.13.0.orig/drivers/acpi/Makefile +++ linux-3.13.0/drivers/acpi/Makefile @@ -47,6 +47,7 @@ acpi-$(CONFIG_X86) += acpi_cmos_rtc.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o +acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o ifdef CONFIG_ACPI_VIDEO acpi-y += video_detect.o endif --- linux-3.13.0.orig/drivers/acpi/ac.c +++ linux-3.13.0/drivers/acpi/ac.c @@ -30,6 +30,10 @@ #include #include #include +#ifdef CONFIG_ACPI_PROCFS_POWER +#include +#include +#endif #include #include #include @@ -52,26 +56,74 @@ MODULE_DESCRIPTION("ACPI AC Adapter Driver"); MODULE_LICENSE("GPL"); + +static int acpi_ac_add(struct acpi_device *device); +static int acpi_ac_remove(struct acpi_device *device); +static void acpi_ac_notify(struct acpi_device *device, u32 event); + +static const struct acpi_device_id ac_device_ids[] = { + {"ACPI0003", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, ac_device_ids); + +#ifdef CONFIG_PM_SLEEP +static int acpi_ac_resume(struct device *dev); +#endif +static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); + +#ifdef CONFIG_ACPI_PROCFS_POWER +extern struct proc_dir_entry *acpi_lock_ac_dir(void); +extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); +static int acpi_ac_open_fs(struct inode *inode, struct file *file); +#endif + + static int ac_sleep_before_get_state_ms; +static struct acpi_driver acpi_ac_driver = { + .name = "ac", + .class = ACPI_AC_CLASS, + .ids = ac_device_ids, + .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, + .ops = { + .add = acpi_ac_add, + .remove = acpi_ac_remove, + .notify = acpi_ac_notify, + }, + .drv.pm = &acpi_ac_pm, +}; + struct acpi_ac { struct power_supply charger; - struct platform_device *pdev; + struct acpi_device * device; unsigned long long state; }; #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) +#ifdef CONFIG_ACPI_PROCFS_POWER +static const struct file_operations acpi_ac_fops = { + .owner = THIS_MODULE, + .open = acpi_ac_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + /* -------------------------------------------------------------------------- AC Adapter Management -------------------------------------------------------------------------- */ static int acpi_ac_get_state(struct acpi_ac *ac) { - acpi_status status; - acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); + acpi_status status = AE_OK; - status = acpi_evaluate_integer(handle, "_PSR", NULL, + if (!ac) + return -EINVAL; + + status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, @@ -112,14 +164,90 @@ POWER_SUPPLY_PROP_ONLINE, }; +#ifdef CONFIG_ACPI_PROCFS_POWER +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +static struct proc_dir_entry *acpi_ac_dir; + +static int acpi_ac_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_ac *ac = seq->private; + + + if (!ac) + return 0; + + if (acpi_ac_get_state(ac)) { + seq_puts(seq, "ERROR: Unable to read AC Adapter state\n"); + return 0; + } + + seq_puts(seq, "state: "); + switch (ac->state) { + case ACPI_AC_STATUS_OFFLINE: + seq_puts(seq, "off-line\n"); + break; + case ACPI_AC_STATUS_ONLINE: + seq_puts(seq, "on-line\n"); + break; + default: + seq_puts(seq, "unknown\n"); + break; + } + + return 0; +} + +static int acpi_ac_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_ac_seq_show, PDE_DATA(inode)); +} + +static int acpi_ac_add_fs(struct acpi_ac *ac) +{ + struct proc_dir_entry *entry = NULL; + + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); + if (!acpi_device_dir(ac->device)) { + acpi_device_dir(ac->device) = + proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir); + if (!acpi_device_dir(ac->device)) + return -ENODEV; + } + + /* 'state' [R] */ + entry = proc_create_data(ACPI_AC_FILE_STATE, + S_IRUGO, acpi_device_dir(ac->device), + &acpi_ac_fops, ac); + if (!entry) + return -ENODEV; + return 0; +} + +static int acpi_ac_remove_fs(struct acpi_ac *ac) +{ + + if (acpi_device_dir(ac->device)) { + remove_proc_entry(ACPI_AC_FILE_STATE, + acpi_device_dir(ac->device)); + remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir); + acpi_device_dir(ac->device) = NULL; + } + + return 0; +} +#endif + /* -------------------------------------------------------------------------- Driver Model -------------------------------------------------------------------------- */ -static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) +static void acpi_ac_notify(struct acpi_device *device, u32 event) { - struct acpi_ac *ac = data; - struct acpi_device *adev; + struct acpi_ac *ac = acpi_driver_data(device); if (!ac) return; @@ -142,11 +270,10 @@ msleep(ac_sleep_before_get_state_ms); acpi_ac_get_state(ac); - adev = ACPI_COMPANION(&ac->pdev->dev); - acpi_bus_generate_netlink_event(adev->pnp.device_class, - dev_name(&ac->pdev->dev), - event, (u32) ac->state); - acpi_notifier_call_chain(adev, event, (u32) ac->state); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, + (u32) ac->state); + acpi_notifier_call_chain(device, event, (u32) ac->state); kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); } @@ -171,54 +298,53 @@ {}, }; -static int acpi_ac_probe(struct platform_device *pdev) +static int acpi_ac_add(struct acpi_device *device) { int result = 0; struct acpi_ac *ac = NULL; - struct acpi_device *adev; - if (!pdev) - return -EINVAL; - adev = ACPI_COMPANION(&pdev->dev); - if (!adev) - return -ENODEV; + if (!device) + return -EINVAL; ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); if (!ac) return -ENOMEM; - strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); - strcpy(acpi_device_class(adev), ACPI_AC_CLASS); - ac->pdev = pdev; - platform_set_drvdata(pdev, ac); + ac->device = device; + strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_AC_CLASS); + device->driver_data = ac; result = acpi_ac_get_state(ac); if (result) goto end; - ac->charger.name = acpi_device_bid(adev); + ac->charger.name = acpi_device_bid(device); +#ifdef CONFIG_ACPI_PROCFS_POWER + result = acpi_ac_add_fs(ac); + if (result) + goto end; +#endif ac->charger.type = POWER_SUPPLY_TYPE_MAINS; ac->charger.properties = ac_props; ac->charger.num_properties = ARRAY_SIZE(ac_props); ac->charger.get_property = get_ac_property; - result = power_supply_register(&pdev->dev, &ac->charger); + result = power_supply_register(&ac->device->dev, &ac->charger); if (result) goto end; - result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), - ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac); - if (result) { - power_supply_unregister(&ac->charger); - goto end; - } printk(KERN_INFO PREFIX "%s [%s] (%s)\n", - acpi_device_name(adev), acpi_device_bid(adev), + acpi_device_name(device), acpi_device_bid(device), ac->state ? "on-line" : "off-line"); end: - if (result) + if (result) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_remove_fs(ac); +#endif kfree(ac); + } dmi_check_system(ac_dmi_table); return result; @@ -233,7 +359,7 @@ if (!dev) return -EINVAL; - ac = platform_get_drvdata(to_platform_device(dev)); + ac = acpi_driver_data(to_acpi_device(dev)); if (!ac) return -EINVAL; @@ -245,44 +371,29 @@ return 0; } #endif -static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); -static int acpi_ac_remove(struct platform_device *pdev) +static int acpi_ac_remove(struct acpi_device *device) { - struct acpi_ac *ac; + struct acpi_ac *ac = NULL; + - if (!pdev) + if (!device || !acpi_driver_data(device)) return -EINVAL; - acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), - ACPI_ALL_NOTIFY, acpi_ac_notify_handler); + ac = acpi_driver_data(device); - ac = platform_get_drvdata(pdev); if (ac->charger.dev) power_supply_unregister(&ac->charger); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_remove_fs(ac); +#endif + kfree(ac); return 0; } -static const struct acpi_device_id acpi_ac_match[] = { - { "ACPI0003", 0 }, - { } -}; -MODULE_DEVICE_TABLE(acpi, acpi_ac_match); - -static struct platform_driver acpi_ac_driver = { - .probe = acpi_ac_probe, - .remove = acpi_ac_remove, - .driver = { - .name = "acpi-ac", - .owner = THIS_MODULE, - .pm = &acpi_ac_pm_ops, - .acpi_match_table = ACPI_PTR(acpi_ac_match), - }, -}; - static int __init acpi_ac_init(void) { int result; @@ -290,16 +401,30 @@ if (acpi_disabled) return -ENODEV; - result = platform_driver_register(&acpi_ac_driver); - if (result < 0) +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_ac_dir = acpi_lock_ac_dir(); + if (!acpi_ac_dir) return -ENODEV; +#endif + + + result = acpi_bus_register_driver(&acpi_ac_driver); + if (result < 0) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_ac_dir(acpi_ac_dir); +#endif + return -ENODEV; + } return 0; } static void __exit acpi_ac_exit(void) { - platform_driver_unregister(&acpi_ac_driver); + acpi_bus_unregister_driver(&acpi_ac_driver); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_ac_dir(acpi_ac_dir); +#endif } module_init(acpi_ac_init); module_exit(acpi_ac_exit); --- linux-3.13.0.orig/drivers/acpi/acpi_cmos_rtc.c +++ linux-3.13.0/drivers/acpi/acpi_cmos_rtc.c @@ -35,7 +35,7 @@ void *handler_context, void *region_context) { int i; - u8 *value = (u8 *)&value64; + u8 *value = (u8 *)value64; if (address > 0xff || !value64) return AE_BAD_PARAMETER; --- linux-3.13.0.orig/drivers/acpi/acpi_pad.c +++ linux-3.13.0/drivers/acpi/acpi_pad.c @@ -219,8 +219,15 @@ * borrow CPU time from this CPU and cause RT task use > 95% * CPU time. To make 'avoid starvation' work, takes a nap here. */ - if (do_sleep) + if (unlikely(do_sleep)) schedule_timeout_killable(HZ * idle_pct / 100); + + /* If an external event has set the need_resched flag, then + * we need to deal with it, or this loop will continue to + * spin without calling __mwait(). + */ + if (unlikely(need_resched())) + schedule(); } exit_round_robin(tsk_index); --- linux-3.13.0.orig/drivers/acpi/acpi_platform.c +++ linux-3.13.0/drivers/acpi/acpi_platform.c @@ -29,7 +29,6 @@ static const struct acpi_device_id acpi_platform_device_ids[] = { { "PNP0D40" }, - { "ACPI0003" }, { "VPC2004" }, { "BCM4752" }, --- linux-3.13.0.orig/drivers/acpi/acpi_processor.c +++ linux-3.13.0/drivers/acpi/acpi_processor.c @@ -400,7 +400,6 @@ goto err; pr->dev = dev; - dev->offline = pr->flags.need_hotplug_init; /* Trigger the processor driver's .probe() if present. */ if (device_attach(dev) >= 0) --- linux-3.13.0.orig/drivers/acpi/acpica/nsrepair.c +++ linux-3.13.0/drivers/acpi/acpica/nsrepair.c @@ -207,13 +207,30 @@ * this predefined name. Either one return value is expected, or none, * for both methods and other objects. * - * Exit now if there is no return object. Warning if one was expected. + * Try to fix if there was no return object. Warning if failed to fix. */ if (!return_object) { if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) { - ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, - ACPI_WARN_ALWAYS, - "Missing expected return value")); + if (package_index != ACPI_NOT_PACKAGE_ELEMENT) { + ACPI_WARN_PREDEFINED((AE_INFO, + info->full_pathname, + ACPI_WARN_ALWAYS, + "Found unexpected NULL package element")); + + status = + acpi_ns_repair_null_element(info, + expected_btypes, + package_index, + return_object_ptr); + if (ACPI_SUCCESS(status)) { + return (AE_OK); /* Repair was successful */ + } + } else { + ACPI_WARN_PREDEFINED((AE_INFO, + info->full_pathname, + ACPI_WARN_ALWAYS, + "Missing expected return value")); + } return (AE_AML_NO_RETURN_VALUE); } --- linux-3.13.0.orig/drivers/acpi/acpica/utcopy.c +++ linux-3.13.0/drivers/acpi/acpica/utcopy.c @@ -1001,5 +1001,11 @@ status = acpi_ut_copy_simple_object(source_desc, *dest_desc); } + /* Delete the allocated object if copy failed */ + + if (ACPI_FAILURE(status)) { + acpi_ut_remove_reference(*dest_desc); + } + return_ACPI_STATUS(status); } --- linux-3.13.0.orig/drivers/acpi/acpica/utstring.c +++ linux-3.13.0/drivers/acpi/acpica/utstring.c @@ -353,7 +353,7 @@ } acpi_os_printf("\""); - for (i = 0; string[i] && (i < max_length); i++) { + for (i = 0; (i < max_length) && string[i]; i++) { /* Escape sequences */ --- linux-3.13.0.orig/drivers/acpi/battery.c +++ linux-3.13.0/drivers/acpi/battery.c @@ -34,8 +34,15 @@ #include #include #include +#include #include +#ifdef CONFIG_ACPI_PROCFS_POWER +#include +#include +#include +#endif + #include #include #include @@ -67,6 +74,19 @@ module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); +#ifdef CONFIG_ACPI_PROCFS_POWER +extern struct proc_dir_entry *acpi_lock_battery_dir(void); +extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); + +enum acpi_battery_files { + info_tag = 0, + state_tag, + alarm_tag, + ACPI_BATTERY_NUMFILES, +}; + +#endif + static const struct acpi_device_id battery_device_ids[] = { {"PNP0C0A", 0}, {"", 0}, @@ -302,6 +322,14 @@ POWER_SUPPLY_PROP_SERIAL_NUMBER, }; +#ifdef CONFIG_ACPI_PROCFS_POWER +inline char *acpi_battery_units(struct acpi_battery *battery) +{ + return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? + "mA" : "mW"; +} +#endif + /* -------------------------------------------------------------------------- Battery Management -------------------------------------------------------------------------- */ @@ -672,7 +700,7 @@ } } -static int acpi_battery_update(struct acpi_battery *battery) +static int acpi_battery_update(struct acpi_battery *battery, bool resume) { int result, old_present = acpi_battery_present(battery); result = acpi_battery_get_status(battery); @@ -683,6 +711,10 @@ battery->update_time = 0; return 0; } + + if (resume) + return 0; + if (!battery->update_time || old_present != acpi_battery_present(battery)) { result = acpi_battery_get_info(battery); @@ -720,6 +752,279 @@ } /* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_PROCFS_POWER +static struct proc_dir_entry *acpi_battery_dir; + +static int acpi_battery_print_info(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + seq_printf(seq, "present: %s\n", + acpi_battery_present(battery) ? "yes" : "no"); + if (!acpi_battery_present(battery)) + goto end; + if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "design capacity: unknown\n"); + else + seq_printf(seq, "design capacity: %d %sh\n", + battery->design_capacity, + acpi_battery_units(battery)); + + if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "last full capacity: unknown\n"); + else + seq_printf(seq, "last full capacity: %d %sh\n", + battery->full_charge_capacity, + acpi_battery_units(battery)); + + seq_printf(seq, "battery technology: %srechargeable\n", + (!battery->technology)?"non-":""); + + if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "design voltage: unknown\n"); + else + seq_printf(seq, "design voltage: %d mV\n", + battery->design_voltage); + seq_printf(seq, "design capacity warning: %d %sh\n", + battery->design_capacity_warning, + acpi_battery_units(battery)); + seq_printf(seq, "design capacity low: %d %sh\n", + battery->design_capacity_low, + acpi_battery_units(battery)); + seq_printf(seq, "cycle count: %i\n", battery->cycle_count); + seq_printf(seq, "capacity granularity 1: %d %sh\n", + battery->capacity_granularity_1, + acpi_battery_units(battery)); + seq_printf(seq, "capacity granularity 2: %d %sh\n", + battery->capacity_granularity_2, + acpi_battery_units(battery)); + seq_printf(seq, "model number: %s\n", battery->model_number); + seq_printf(seq, "serial number: %s\n", battery->serial_number); + seq_printf(seq, "battery type: %s\n", battery->type); + seq_printf(seq, "OEM info: %s\n", battery->oem_info); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery info\n"); + return result; +} + +static int acpi_battery_print_state(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + seq_printf(seq, "present: %s\n", + acpi_battery_present(battery) ? "yes" : "no"); + if (!acpi_battery_present(battery)) + goto end; + + seq_printf(seq, "capacity state: %s\n", + (battery->state & 0x04) ? "critical" : "ok"); + if ((battery->state & 0x01) && (battery->state & 0x02)) + seq_printf(seq, + "charging state: charging/discharging\n"); + else if (battery->state & 0x01) + seq_printf(seq, "charging state: discharging\n"); + else if (battery->state & 0x02) + seq_printf(seq, "charging state: charging\n"); + else + seq_printf(seq, "charging state: charged\n"); + + if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "present rate: unknown\n"); + else + seq_printf(seq, "present rate: %d %s\n", + battery->rate_now, acpi_battery_units(battery)); + + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "remaining capacity: unknown\n"); + else + seq_printf(seq, "remaining capacity: %d %sh\n", + battery->capacity_now, acpi_battery_units(battery)); + if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "present voltage: unknown\n"); + else + seq_printf(seq, "present voltage: %d mV\n", + battery->voltage_now); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery state\n"); + + return result; +} + +static int acpi_battery_print_alarm(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + if (!acpi_battery_present(battery)) { + seq_printf(seq, "present: no\n"); + goto end; + } + seq_printf(seq, "alarm: "); + if (!battery->alarm) + seq_printf(seq, "unsupported\n"); + else + seq_printf(seq, "%u %sh\n", battery->alarm, + acpi_battery_units(battery)); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery alarm\n"); + return result; +} + +static ssize_t acpi_battery_write_alarm(struct file *file, + const char __user * buffer, + size_t count, loff_t * ppos) +{ + int result = 0; + char alarm_string[12] = { '\0' }; + struct seq_file *m = file->private_data; + struct acpi_battery *battery = m->private; + + if (!battery || (count > sizeof(alarm_string) - 1)) + return -EINVAL; + if (!acpi_battery_present(battery)) { + result = -ENODEV; + goto end; + } + if (copy_from_user(alarm_string, buffer, count)) { + result = -EFAULT; + goto end; + } + alarm_string[count] = '\0'; + battery->alarm = simple_strtol(alarm_string, NULL, 0); + result = acpi_battery_set_alarm(battery); + end: + if (!result) + return count; + return result; +} + +typedef int(*print_func)(struct seq_file *seq, int result); + +static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = { + acpi_battery_print_info, + acpi_battery_print_state, + acpi_battery_print_alarm, +}; + +static int acpi_battery_read(int fid, struct seq_file *seq) +{ + struct acpi_battery *battery = seq->private; + int result = acpi_battery_update(battery, false); + return acpi_print_funcs[fid](seq, result); +} + +#define DECLARE_FILE_FUNCTIONS(_name) \ +static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \ +{ \ + return acpi_battery_read(_name##_tag, seq); \ +} \ +static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \ +} + +DECLARE_FILE_FUNCTIONS(info); +DECLARE_FILE_FUNCTIONS(state); +DECLARE_FILE_FUNCTIONS(alarm); + +#undef DECLARE_FILE_FUNCTIONS + +#define FILE_DESCRIPTION_RO(_name) \ + { \ + .name = __stringify(_name), \ + .mode = S_IRUGO, \ + .ops = { \ + .open = acpi_battery_##_name##_open_fs, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + .owner = THIS_MODULE, \ + }, \ + } + +#define FILE_DESCRIPTION_RW(_name) \ + { \ + .name = __stringify(_name), \ + .mode = S_IFREG | S_IRUGO | S_IWUSR, \ + .ops = { \ + .open = acpi_battery_##_name##_open_fs, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .write = acpi_battery_write_##_name, \ + .release = single_release, \ + .owner = THIS_MODULE, \ + }, \ + } + +static const struct battery_file { + struct file_operations ops; + umode_t mode; + const char *name; +} acpi_battery_file[] = { + FILE_DESCRIPTION_RO(info), + FILE_DESCRIPTION_RO(state), + FILE_DESCRIPTION_RW(alarm), +}; + +#undef FILE_DESCRIPTION_RO +#undef FILE_DESCRIPTION_RW + +static int acpi_battery_add_fs(struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + int i; + + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_battery_dir); + if (!acpi_device_dir(device)) + return -ENODEV; + } + + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { + entry = proc_create_data(acpi_battery_file[i].name, + acpi_battery_file[i].mode, + acpi_device_dir(device), + &acpi_battery_file[i].ops, + acpi_driver_data(device)); + if (!entry) + return -ENODEV; + } + return 0; +} + +static void acpi_battery_remove_fs(struct acpi_device *device) +{ + int i; + if (!acpi_device_dir(device)) + return; + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) + remove_proc_entry(acpi_battery_file[i].name, + acpi_device_dir(device)); + + remove_proc_entry(acpi_device_bid(device), acpi_battery_dir); + acpi_device_dir(device) = NULL; +} + +#endif + +/* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -733,7 +1038,7 @@ old = battery->bat.dev; if (event == ACPI_BATTERY_NOTIFY_INFO) acpi_battery_refresh(battery); - acpi_battery_update(battery); + acpi_battery_update(battery, false); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, acpi_battery_present(battery)); @@ -747,13 +1052,27 @@ { struct acpi_battery *battery = container_of(nb, struct acpi_battery, pm_nb); + int result; + switch (mode) { case PM_POST_HIBERNATION: case PM_POST_SUSPEND: - if (battery->bat.dev) { - sysfs_remove_battery(battery); - sysfs_add_battery(battery); - } + if (!acpi_battery_present(battery)) + return 0; + + if (!battery->bat.dev) { + result = acpi_battery_get_info(battery); + if (result) + return result; + + result = sysfs_add_battery(battery); + if (result) + return result; + } else + acpi_battery_refresh(battery); + + acpi_battery_init_alarm(battery); + acpi_battery_get_state(battery); break; } @@ -771,6 +1090,28 @@ {}, }; +/* + * Some machines'(E,G Lenovo Z480) ECs are not stable + * during boot up and this causes battery driver fails to be + * probed due to failure of getting battery information + * from EC sometimes. After several retries, the operation + * may work. So add retry code here and 20ms sleep between + * every retries. + */ +static int acpi_battery_update_retry(struct acpi_battery *battery) +{ + int retry, ret; + + for (retry = 5; retry; retry--) { + ret = acpi_battery_update(battery, false); + if (!ret) + break; + + msleep(20); + } + return ret; +} + static int acpi_battery_add(struct acpi_device *device) { int result = 0; @@ -789,10 +1130,21 @@ mutex_init(&battery->sysfs_lock); if (acpi_has_method(battery->device->handle, "_BIX")) set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); - result = acpi_battery_update(battery); + + result = acpi_battery_update_retry(battery); if (result) goto fail; +#ifdef CONFIG_ACPI_PROCFS_POWER + result = acpi_battery_add_fs(device); +#endif + if (result) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_remove_fs(device); +#endif + goto fail; + } + printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), device->status.battery_present ? "present" : "absent"); @@ -818,6 +1170,9 @@ return -EINVAL; battery = acpi_driver_data(device); unregister_pm_notifier(&battery->pm_nb); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_remove_fs(device); +#endif sysfs_remove_battery(battery); mutex_destroy(&battery->lock); mutex_destroy(&battery->sysfs_lock); @@ -839,7 +1194,7 @@ return -EINVAL; battery->update_time = 0; - acpi_battery_update(battery); + acpi_battery_update(battery, true); return 0; } #endif @@ -866,7 +1221,19 @@ if (dmi_check_system(bat_dmi_table)) battery_bix_broken_package = 1; - acpi_bus_register_driver(&acpi_battery_driver); + +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_dir = acpi_lock_battery_dir(); + if (!acpi_battery_dir) + return; +#endif + if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_battery_dir(acpi_battery_dir); +#endif + return; + } + return; } static int __init acpi_battery_init(void) @@ -878,6 +1245,9 @@ static void __exit acpi_battery_exit(void) { acpi_bus_unregister_driver(&acpi_battery_driver); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_battery_dir(acpi_battery_dir); +#endif } module_init(acpi_battery_init); --- linux-3.13.0.orig/drivers/acpi/blacklist.c +++ linux-3.13.0/drivers/acpi/blacklist.c @@ -248,8 +248,8 @@ }, /* - * The following machines have broken backlight support when reporting - * the Windows 2012 OSI, so disable it until their support is fixed. + * The wireless hotkey does not work on those machines when + * returning true for _OSI("Windows 2012") */ { .callback = dmi_disable_osi_win8, @@ -261,14 +261,6 @@ }, { .callback = dmi_disable_osi_win8, - .ident = "Dell Inspiron 15R SE", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), - }, - }, - { - .callback = dmi_disable_osi_win8, .ident = "ThinkPad Edge E530", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), @@ -323,6 +315,144 @@ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), }, }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 7737", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 7537", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 5437", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 3437", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Vostro 3446", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Vostro 3546", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"), + }, + }, + /* + * The following Lenovo models have a broken workaround in the + * acpi_video backlight implementation to meet the Windows 8 + * requirement of 101 backlight levels. Reverting to pre-Win8 + * behavior fixes the problem. + */ + { + .callback = dmi_disable_osi_win8, + .ident = "Lenovo ThinkPad L430", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Lenovo ThinkPad T430", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Lenovo ThinkPad T430s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Lenovo ThinkPad T530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Lenovo ThinkPad W530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Lenovo ThinkPad X1 Carbon", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Lenovo ThinkPad X230", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Lenovo ThinkPad Edge E330", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E330"), + }, + }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 5537", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5537"), + }, + }, + /* + * The brightness hotkeys do not work on those machines when + * returning true for _OSI("Windows 2012") + */ + { + .callback = dmi_disable_osi_win8, + .ident = "HP Pavilion dv6", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"), + }, + }, /* * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. @@ -383,6 +513,19 @@ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), }, }, + /* + * Without this this EEEpc exports a non working WMI interface, with + * this it exports a working "good old" eeepc_laptop interface, fixing + * both brightness control, and rfkill not working. + */ + { + .callback = dmi_enable_osi_linux, + .ident = "Asus EEE PC 1015PX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"), + }, + }, {} }; --- linux-3.13.0.orig/drivers/acpi/bus.c +++ linux-3.13.0/drivers/acpi/bus.c @@ -33,6 +33,7 @@ #include #include #include +#include #ifdef CONFIG_X86 #include #endif @@ -56,6 +57,12 @@ #ifdef CONFIG_X86 +#ifdef CONFIG_ACPI_CUSTOM_DSDT +static inline int set_copy_dsdt(const struct dmi_system_id *id) +{ + return 0; +} +#else static int set_copy_dsdt(const struct dmi_system_id *id) { printk(KERN_NOTICE "%s detected - " @@ -63,6 +70,7 @@ acpi_gbl_copy_dsdt_locally = 1; return 0; } +#endif static struct dmi_system_id dsdt_dmi_table[] __initdata = { /* @@ -576,6 +584,14 @@ goto error0; } + /* + * If the system is using ACPI then we can be reasonably + * confident that any regulators are managed by the firmware + * so tell the regulator core it has everything it needs to + * know. + */ + regulator_has_full_constraints(); + return; error0: --- linux-3.13.0.orig/drivers/acpi/button.c +++ linux-3.13.0/drivers/acpi/button.c @@ -302,6 +302,10 @@ input_sync(input); pm_wakeup_event(&device->dev, 0); + acpi_bus_generate_netlink_event( + device->pnp.device_class, + dev_name(&device->dev), + event, ++button->pushed); } break; default: --- linux-3.13.0.orig/drivers/acpi/cm_sbs.c +++ linux-3.13.0/drivers/acpi/cm_sbs.c @@ -0,0 +1,105 @@ +/* + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFIX "ACPI: " + +ACPI_MODULE_NAME("cm_sbs"); +#define ACPI_AC_CLASS "ac_adapter" +#define ACPI_BATTERY_CLASS "battery" +#define _COMPONENT ACPI_SBS_COMPONENT +static struct proc_dir_entry *acpi_ac_dir; +static struct proc_dir_entry *acpi_battery_dir; + +static DEFINE_MUTEX(cm_sbs_mutex); + +static int lock_ac_dir_cnt; +static int lock_battery_dir_cnt; + +struct proc_dir_entry *acpi_lock_ac_dir(void) +{ + mutex_lock(&cm_sbs_mutex); + if (!acpi_ac_dir) + acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); + if (acpi_ac_dir) { + lock_ac_dir_cnt++; + } else { + printk(KERN_ERR PREFIX + "Cannot create %s\n", ACPI_AC_CLASS); + } + mutex_unlock(&cm_sbs_mutex); + return acpi_ac_dir; +} +EXPORT_SYMBOL(acpi_lock_ac_dir); + +void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param) +{ + mutex_lock(&cm_sbs_mutex); + if (acpi_ac_dir_param) + lock_ac_dir_cnt--; + if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) { + remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); + acpi_ac_dir = NULL; + } + mutex_unlock(&cm_sbs_mutex); +} +EXPORT_SYMBOL(acpi_unlock_ac_dir); + +struct proc_dir_entry *acpi_lock_battery_dir(void) +{ + mutex_lock(&cm_sbs_mutex); + if (!acpi_battery_dir) { + acpi_battery_dir = + proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); + } + if (acpi_battery_dir) { + lock_battery_dir_cnt++; + } else { + printk(KERN_ERR PREFIX + "Cannot create %s\n", ACPI_BATTERY_CLASS); + } + mutex_unlock(&cm_sbs_mutex); + return acpi_battery_dir; +} +EXPORT_SYMBOL(acpi_lock_battery_dir); + +void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param) +{ + mutex_lock(&cm_sbs_mutex); + if (acpi_battery_dir_param) + lock_battery_dir_cnt--; + if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param + && acpi_battery_dir) { + remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); + acpi_battery_dir = NULL; + } + mutex_unlock(&cm_sbs_mutex); + return; +} +EXPORT_SYMBOL(acpi_unlock_battery_dir); --- linux-3.13.0.orig/drivers/acpi/device_pm.c +++ linux-3.13.0/drivers/acpi/device_pm.c @@ -857,7 +857,7 @@ return 0; target_state = acpi_target_system_state(); - wakeup = device_may_wakeup(dev); + wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev); error = __acpi_device_sleep_wake(adev, target_state, wakeup); if (wakeup && error) return error; --- linux-3.13.0.orig/drivers/acpi/ec.c +++ linux-3.13.0/drivers/acpi/ec.c @@ -68,6 +68,8 @@ #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ +#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query + * when trying to clear the EC */ enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ @@ -77,6 +79,9 @@ EC_FLAGS_BLOCKED, /* Transactions are blocked */ }; +#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ +#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ + /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); @@ -112,7 +117,7 @@ u8 ri; u8 wlen; u8 rlen; - bool done; + u8 flags; }; struct acpi_ec *boot_ec, *first_ec; @@ -121,6 +126,8 @@ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ +static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ /* -------------------------------------------------------------------------- Transaction Management @@ -152,69 +159,93 @@ outb(data, ec->data_addr); } -static int ec_transaction_done(struct acpi_ec *ec) +static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; int ret = 0; spin_lock_irqsave(&ec->lock, flags); - if (!ec->curr || ec->curr->done) + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; spin_unlock_irqrestore(&ec->lock, flags); return ret; } -static void start_transaction(struct acpi_ec *ec) -{ - ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; - ec->curr->done = false; - acpi_ec_write_cmd(ec, ec->curr->command); -} - -static void advance_transaction(struct acpi_ec *ec, u8 status) +static bool advance_transaction(struct acpi_ec *ec) { - unsigned long flags; struct transaction *t; + u8 status; + bool wakeup = false; - spin_lock_irqsave(&ec->lock, flags); + pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); + status = acpi_ec_read_status(ec); t = ec->curr; if (!t) - goto unlock; - if (t->wlen > t->wi) { - if ((status & ACPI_EC_FLAG_IBF) == 0) - acpi_ec_write_data(ec, - t->wdata[t->wi++]); - else - goto err; - } else if (t->rlen > t->ri) { - if ((status & ACPI_EC_FLAG_OBF) == 1) { - t->rdata[t->ri++] = acpi_ec_read_data(ec); - if (t->rlen == t->ri) - t->done = true; + goto err; + if (t->flags & ACPI_EC_COMMAND_POLL) { + if (t->wlen > t->wi) { + if ((status & ACPI_EC_FLAG_IBF) == 0) + acpi_ec_write_data(ec, t->wdata[t->wi++]); + else + goto err; + } else if (t->rlen > t->ri) { + if ((status & ACPI_EC_FLAG_OBF) == 1) { + t->rdata[t->ri++] = acpi_ec_read_data(ec); + if (t->rlen == t->ri) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + if (t->command == ACPI_EC_COMMAND_QUERY) + pr_debug("hardware QR_EC completion\n"); + wakeup = true; + } + } else + goto err; + } else if (t->wlen == t->wi && + (status & ACPI_EC_FLAG_IBF) == 0) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + return wakeup; + } else { + if (EC_FLAGS_QUERY_HANDSHAKE && + !(status & ACPI_EC_FLAG_SCI) && + (t->command == ACPI_EC_COMMAND_QUERY)) { + t->flags |= ACPI_EC_COMMAND_POLL; + t->rdata[t->ri++] = 0x00; + t->flags |= ACPI_EC_COMMAND_COMPLETE; + pr_debug("software QR_EC completion\n"); + wakeup = true; + } else if ((status & ACPI_EC_FLAG_IBF) == 0) { + acpi_ec_write_cmd(ec, t->command); + t->flags |= ACPI_EC_COMMAND_POLL; } else goto err; - } else if (t->wlen == t->wi && - (status & ACPI_EC_FLAG_IBF) == 0) - t->done = true; - goto unlock; + return wakeup; + } err: /* * If SCI bit is set, then don't think it's a false IRQ * otherwise will take a not handled IRQ as a false one. */ - if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) - ++t->irq_count; + if (!(status & ACPI_EC_FLAG_SCI)) { + if (in_interrupt() && t) + ++t->irq_count; + } + return wakeup; +} -unlock: - spin_unlock_irqrestore(&ec->lock, flags); +static void start_transaction(struct acpi_ec *ec) +{ + ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; + ec->curr->flags = 0; + (void)advance_transaction(ec); } -static int acpi_ec_sync_query(struct acpi_ec *ec); +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) { if (state & ACPI_EC_FLAG_SCI) { if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - return acpi_ec_sync_query(ec); + return acpi_ec_sync_query(ec, NULL); } return 0; } @@ -230,15 +261,17 @@ /* don't sleep with disabled interrupts */ if (EC_FLAGS_MSI || irqs_disabled()) { udelay(ACPI_EC_MSI_UDELAY); - if (ec_transaction_done(ec)) + if (ec_transaction_completed(ec)) return 0; } else { if (wait_event_timeout(ec->wait, - ec_transaction_done(ec), + ec_transaction_completed(ec), msecs_to_jiffies(1))) return 0; } - advance_transaction(ec, acpi_ec_read_status(ec)); + spin_lock_irqsave(&ec->lock, flags); + (void)advance_transaction(ec); + spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); spin_lock_irqsave(&ec->lock, flags); @@ -270,23 +303,6 @@ return ret; } -static int ec_check_ibf0(struct acpi_ec *ec) -{ - u8 status = acpi_ec_read_status(ec); - return (status & ACPI_EC_FLAG_IBF) == 0; -} - -static int ec_wait_ibf0(struct acpi_ec *ec) -{ - unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); - /* interrupt wait manually if GPE mode is not active */ - while (time_before(jiffies, delay)) - if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), - msecs_to_jiffies(1))) - return 0; - return -ETIME; -} - static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) { int status; @@ -307,12 +323,6 @@ goto unlock; } } - if (ec_wait_ibf0(ec)) { - pr_err("input buffer is not empty, " - "aborting transaction\n"); - status = -ETIME; - goto end; - } pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", t->command, t->wdata ? t->wdata[0] : 0); /* disable GPE during transaction if storm is detected */ @@ -336,7 +346,6 @@ set_bit(EC_FLAGS_GPE_STORM, &ec->flags); } pr_debug("transaction end\n"); -end: if (ec->global_lock) acpi_release_global_lock(glk); unlock: @@ -466,6 +475,27 @@ EXPORT_SYMBOL(ec_get_handle); +/* + * Process _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_sync_query(ec, &value); + if (status || !value) + break; + } + + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + void acpi_ec_block_transactions(void) { struct acpi_ec *ec = first_ec; @@ -489,6 +519,10 @@ mutex_lock(&ec->mutex); /* Allow transactions to be carried out again */ clear_bit(EC_FLAGS_BLOCKED, &ec->flags); + + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); } @@ -578,13 +612,18 @@ kfree(handler); } -static int acpi_ec_sync_query(struct acpi_ec *ec) +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) { u8 value = 0; int status; struct acpi_ec_query_handler *handler, *copy; - if ((status = acpi_ec_query_unlocked(ec, &value))) + + status = acpi_ec_query_unlocked(ec, &value); + if (data) + *data = value; + if (status) return status; + list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { /* have custom handler for this bit */ @@ -608,7 +647,7 @@ if (!ec) return; mutex_lock(&ec->mutex); - acpi_ec_sync_query(ec); + acpi_ec_sync_query(ec, NULL); mutex_unlock(&ec->mutex); } @@ -627,17 +666,14 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, u32 gpe_number, void *data) { + unsigned long flags; struct acpi_ec *ec = data; - u8 status = acpi_ec_read_status(ec); - - pr_debug("~~~> interrupt, status:0x%02x\n", status); - advance_transaction(ec, status); - if (ec_transaction_done(ec) && - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { + spin_lock_irqsave(&ec->lock, flags); + if (advance_transaction(ec)) wake_up(&ec->wait); - ec_check_sci(ec, acpi_ec_read_status(ec)); - } + spin_unlock_irqrestore(&ec->lock, flags); + ec_check_sci(ec, acpi_ec_read_status(ec)); return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; } @@ -847,6 +883,13 @@ /* EC is fully operational, allow queries */ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + + /* Clear stale _Q events if hardware might require that */ + if (EC_FLAGS_CLEAR_ON_RESUME) { + mutex_lock(&ec->mutex); + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); + } return ret; } @@ -948,6 +991,42 @@ return 0; } +/* + * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for + * which case, we complete the QR_EC without issuing it to the firmware. + * https://bugzilla.kernel.org/show_bug.cgi?id=86211 + */ +static int ec_flag_query_handshake(const struct dmi_system_id *id) +{ + pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n"); + EC_FLAGS_QUERY_HANDSHAKE = 1; + return 0; +} + +/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. + */ +static int ec_clear_on_resume(const struct dmi_system_id *id) +{ + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + return 0; +} + static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_skip_dsdt_scan, "Compal JFL92", { @@ -991,6 +1070,12 @@ ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, + { + ec_flag_query_handshake, "Acer hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL}, {}, }; --- linux-3.13.0.orig/drivers/acpi/pci_irq.c +++ linux-3.13.0/drivers/acpi/pci_irq.c @@ -432,6 +432,7 @@ pin_name(pin)); } + kfree(entry); return 0; } --- linux-3.13.0.orig/drivers/acpi/pci_root.c +++ linux-3.13.0/drivers/acpi/pci_root.c @@ -418,8 +418,7 @@ } EXPORT_SYMBOL(acpi_pci_osc_control_set); -static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, - int *clear_aspm) +static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) { u32 support, control, requested; acpi_status status; @@ -477,10 +476,12 @@ decode_osc_control(root, "OS now controls", control); if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { /* - * We have ASPM control, but the FADT indicates - * that it's unsupported. Clear it. + * We have ASPM control, but the FADT indicates that + * it's unsupported. Leave existing configuration + * intact and prevent the OS from touching it. */ - *clear_aspm = 1; + dev_info(&device->dev, "FADT indicates ASPM is unsupported, using BIOS configuration\n"); + *no_aspm = 1; } } else { decode_osc_control(root, "OS requested", requested); @@ -507,7 +508,7 @@ int result; struct acpi_pci_root *root; acpi_handle handle = device->handle; - int no_aspm = 0, clear_aspm = 0; + int no_aspm = 0; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -560,7 +561,7 @@ root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle); - negotiate_os_control(root, &no_aspm, &clear_aspm); + negotiate_os_control(root, &no_aspm); /* * TBD: Need PCI interface for enumeration/configuration of roots. @@ -583,10 +584,6 @@ goto end; } - if (clear_aspm) { - dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n"); - pcie_clear_aspm(root->bus); - } if (no_aspm) pcie_no_aspm(); --- linux-3.13.0.orig/drivers/acpi/processor_idle.c +++ linux-3.13.0/drivers/acpi/processor_idle.c @@ -973,7 +973,7 @@ return -EINVAL; drv->safe_state_index = -1; - for (i = 0; i < CPUIDLE_STATE_MAX; i++) { + for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) { drv->states[i].name[0] = '\0'; drv->states[i].desc[0] = '\0'; } @@ -1089,9 +1089,9 @@ if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { - cpuidle_pause_and_lock(); /* Protect against cpu-hotplug */ get_online_cpus(); + cpuidle_pause_and_lock(); /* Disable all cpuidle devices */ for_each_online_cpu(cpu) { @@ -1118,8 +1118,8 @@ cpuidle_enable_device(dev); } } - put_online_cpus(); cpuidle_resume_and_unlock(); + put_online_cpus(); } return 0; --- linux-3.13.0.orig/drivers/acpi/processor_throttling.c +++ linux-3.13.0/drivers/acpi/processor_throttling.c @@ -59,6 +59,12 @@ int target_state; /* target T-state */ }; +struct acpi_processor_throttling_arg { + struct acpi_processor *pr; + int target_state; + bool force; +}; + #define THROTTLING_PRECHANGE (1) #define THROTTLING_POSTCHANGE (2) @@ -1063,16 +1069,24 @@ return 0; } +static long acpi_processor_throttling_fn(void *data) +{ + struct acpi_processor_throttling_arg *arg = data; + struct acpi_processor *pr = arg->pr; + + return pr->throttling.acpi_processor_set_throttling(pr, + arg->target_state, arg->force); +} + int acpi_processor_set_throttling(struct acpi_processor *pr, int state, bool force) { - cpumask_var_t saved_mask; int ret = 0; unsigned int i; struct acpi_processor *match_pr; struct acpi_processor_throttling *p_throttling; + struct acpi_processor_throttling_arg arg; struct throttling_tstate t_state; - cpumask_var_t online_throttling_cpus; if (!pr) return -EINVAL; @@ -1083,14 +1097,6 @@ if ((state < 0) || (state > (pr->throttling.state_count - 1))) return -EINVAL; - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) - return -ENOMEM; - - if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { - free_cpumask_var(saved_mask); - return -ENOMEM; - } - if (cpu_is_offline(pr->id)) { /* * the cpu pointed by pr->id is offline. Unnecessary to change @@ -1099,17 +1105,15 @@ return -ENODEV; } - cpumask_copy(saved_mask, ¤t->cpus_allowed); t_state.target_state = state; p_throttling = &(pr->throttling); - cpumask_and(online_throttling_cpus, cpu_online_mask, - p_throttling->shared_cpu_map); + /* * The throttling notifier will be called for every * affected cpu in order to get one proper T-state. * The notifier event is THROTTLING_PRECHANGE. */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, &t_state); @@ -1121,21 +1125,18 @@ * it can be called only for the cpu pointed by pr. */ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { - /* Can't migrate to the pr->id CPU. Exit */ - ret = -ENODEV; - goto exit; - } - ret = p_throttling->acpi_processor_set_throttling(pr, - t_state.target_state, force); + arg.pr = pr; + arg.target_state = state; + arg.force = force; + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, * it is necessary to set T-state for every affected * cpus. */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, + p_throttling->shared_cpu_map) { match_pr = per_cpu(processors, i); /* * If the pointer is invalid, we will report the @@ -1156,13 +1157,12 @@ "on CPU %d\n", i)); continue; } - t_state.cpu = i; - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(i))) - continue; - ret = match_pr->throttling. - acpi_processor_set_throttling( - match_pr, t_state.target_state, force); + + arg.pr = match_pr; + arg.target_state = state; + arg.force = force; + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, + &arg); } } /* @@ -1171,17 +1171,12 @@ * affected cpu to update the T-states. * The notifier event is THROTTLING_POSTCHANGE */ - for_each_cpu(i, online_throttling_cpus) { + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { t_state.cpu = i; acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, &t_state); } - /* restore the previous state */ - /* FIXME: use work_on_cpu() */ - set_cpus_allowed_ptr(current, saved_mask); -exit: - free_cpumask_var(online_throttling_cpus); - free_cpumask_var(saved_mask); + return ret; } --- linux-3.13.0.orig/drivers/acpi/resource.c +++ linux-3.13.0/drivers/acpi/resource.c @@ -77,18 +77,24 @@ switch (ares->type) { case ACPI_RESOURCE_TYPE_MEMORY24: memory24 = &ares->data.memory24; + if (!memory24->address_length) + return false; acpi_dev_get_memresource(res, memory24->minimum, memory24->address_length, memory24->write_protect); break; case ACPI_RESOURCE_TYPE_MEMORY32: memory32 = &ares->data.memory32; + if (!memory32->address_length) + return false; acpi_dev_get_memresource(res, memory32->minimum, memory32->address_length, memory32->write_protect); break; case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: fixed_memory32 = &ares->data.fixed_memory32; + if (!fixed_memory32->address_length) + return false; acpi_dev_get_memresource(res, fixed_memory32->address, fixed_memory32->address_length, fixed_memory32->write_protect); @@ -144,12 +150,16 @@ switch (ares->type) { case ACPI_RESOURCE_TYPE_IO: io = &ares->data.io; + if (!io->address_length) + return false; acpi_dev_get_ioresource(res, io->minimum, io->address_length, io->io_decode); break; case ACPI_RESOURCE_TYPE_FIXED_IO: fixed_io = &ares->data.fixed_io; + if (!fixed_io->address_length) + return false; acpi_dev_get_ioresource(res, fixed_io->address, fixed_io->address_length, ACPI_DECODE_10); --- linux-3.13.0.orig/drivers/acpi/sleep.c +++ linux-3.13.0/drivers/acpi/sleep.c @@ -75,6 +75,17 @@ return 0; } +static bool acpi_sleep_state_supported(u8 sleep_state) +{ + acpi_status status; + u8 type_a, type_b; + + status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); + return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware + || (acpi_gbl_FADT.sleep_control.address + && acpi_gbl_FADT.sleep_status.address)); +} + #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; @@ -608,15 +619,9 @@ { int i; - for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { - acpi_status status; - u8 type_a, type_b; - - status = acpi_get_sleep_type_data(i, &type_a, &type_b); - if (ACPI_SUCCESS(status)) { + for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) + if (acpi_sleep_state_supported(i)) sleep_states[i] = 1; - } - } suspend_set_ops(old_suspend_ordering ? &acpi_suspend_ops_old : &acpi_suspend_ops); @@ -747,11 +752,7 @@ static void acpi_sleep_hibernate_setup(void) { - acpi_status status; - u8 type_a, type_b; - - status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); - if (ACPI_FAILURE(status)) + if (!acpi_sleep_state_supported(ACPI_STATE_S4)) return; hibernation_set_ops(old_suspend_ordering ? @@ -800,8 +801,6 @@ int __init acpi_sleep_init(void) { - acpi_status status; - u8 type_a, type_b; char supported[ACPI_S_STATE_COUNT * 3 + 1]; char *pos = supported; int i; @@ -816,8 +815,7 @@ acpi_sleep_suspend_setup(); acpi_sleep_hibernate_setup(); - status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); - if (ACPI_SUCCESS(status)) { + if (acpi_sleep_state_supported(ACPI_STATE_S5)) { sleep_states[ACPI_STATE_S5] = 1; pm_power_off_prepare = acpi_power_off_prepare; pm_power_off = acpi_power_off; --- linux-3.13.0.orig/drivers/acpi/video.c +++ linux-3.13.0/drivers/acpi/video.c @@ -82,11 +82,12 @@ module_param(allow_duplicates, bool, 0644); /* - * For Windows 8 systems: if set ture and the GPU driver has - * registered a backlight interface, skip registering ACPI video's. + * For Windows 8 systems: used to decide if video module + * should skip registering backlight interface of its own. */ -static bool use_native_backlight = false; -module_param(use_native_backlight, bool, 0644); +static int use_native_backlight_param = -1; +module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); +static bool use_native_backlight_dmi = false; static int register_count; static struct mutex video_list_lock; @@ -232,9 +233,17 @@ static int acpi_video_switch_brightness(struct acpi_video_device *device, int event); +static bool acpi_video_use_native_backlight(void) +{ + if (use_native_backlight_param != -1) + return use_native_backlight_param; + else + return use_native_backlight_dmi; +} + static bool acpi_video_verify_backlight_support(void) { - if (acpi_osi_is_win8() && use_native_backlight && + if (acpi_osi_is_win8() && acpi_video_use_native_backlight() && backlight_device_registered(BACKLIGHT_RAW)) return false; return acpi_video_backlight_support(); @@ -399,6 +408,12 @@ return 0; } +static int __init video_set_use_native_backlight(const struct dmi_system_id *d) +{ + use_native_backlight_dmi = true; + return 0; +} + static struct dmi_system_id video_dmi_table[] __initdata = { /* * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 @@ -443,6 +458,160 @@ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), }, }, + { + .callback = video_set_use_native_backlight, + .ident = "ThinkPad T430s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "ThinkPad X230", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "ThinkPad X1 Carbon", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "Lenovo Yoga 13", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "Dell Inspiron 7520", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "Dell Inspiron 5547", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5547"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "Dell Inspiron 5447", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5447"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "Dell Inspiron 5721", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5721"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "Dell Inspiron 3521", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3521"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "Acer Aspire 5733Z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "Acer Aspire V5-431", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "HP ProBook 4340s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "HP ProBook 2013 models", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "), + DMI_MATCH(DMI_PRODUCT_NAME, " G1"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "HP EliteBook 2013 models", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "), + DMI_MATCH(DMI_PRODUCT_NAME, " G1"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "HP ZBook 14", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "HP ZBook 15", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "HP ZBook 17", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "HP EliteBook 8780w", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), + }, + }, + { + .callback = video_set_use_native_backlight, + .ident = "HP ProBook 6570b", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook 6570b"), + }, + }, {} }; @@ -686,6 +855,7 @@ union acpi_object *o; struct acpi_video_device_brightness *br = NULL; int result = -EINVAL; + u32 value; if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available " @@ -716,7 +886,12 @@ printk(KERN_ERR PREFIX "Invalid data\n"); continue; } - br->levels[count] = (u32) o->integer.value; + value = (u32) o->integer.value; + /* Skip duplicate entries */ + if (count > 2 && br->levels[count - 1] == value) + continue; + + br->levels[count] = value; if (br->levels[count] > max_level) max_level = br->levels[count]; @@ -1922,6 +2097,17 @@ static int __init acpi_video_init(void) { + /* + * Let the module load even if ACPI is disabled (e.g. due to + * a broken BIOS) so that i915.ko can still be loaded on such + * old systems without an AcpiOpRegion. + * + * acpi_video_register() will report -ENODEV later as well due + * to acpi_disabled when i915.ko tries to register itself afterwards. + */ + if (acpi_disabled) + return 0; + dmi_check_system(video_dmi_table); if (intel_opregion_present()) --- linux-3.13.0.orig/drivers/acpi/video_detect.c +++ linux-3.13.0/drivers/acpi/video_detect.c @@ -176,6 +176,14 @@ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"), }, }, + { + .callback = video_detect_force_vendor, + .ident = "Dell Inspiron 5737", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), + }, + }, { }, }; --- linux-3.13.0.orig/drivers/ata/Kconfig +++ linux-3.13.0/drivers/ata/Kconfig @@ -106,6 +106,13 @@ If unsure, say N. +config AHCI_XGENE + tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support" + depends on SATA_AHCI_PLATFORM && (ARM64 || COMPILE_TEST) + select PHY_XGENE + help + This option enables support for APM X-Gene SoC SATA host controller. + config SATA_FSL tristate "Freescale 3.0Gbps SATA support" depends on FSL_SOC --- linux-3.13.0.orig/drivers/ata/Makefile +++ linux-3.13.0/drivers/ata/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o obj-$(CONFIG_AHCI_IMX) += ahci_imx.o +obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o # SFF w/ custom DMA obj-$(CONFIG_PDC_ADMA) += pdc_adma.o --- linux-3.13.0.orig/drivers/ata/ahci.c +++ linux-3.13.0/drivers/ata/ahci.c @@ -61,6 +61,8 @@ /* board IDs by feature in alphabetical order */ board_ahci, board_ahci_ign_iferr, + board_ahci_nomsi, + board_ahci_noncq, board_ahci_nosntf, board_ahci_yes_fbs, @@ -119,6 +121,20 @@ .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + [board_ahci_nomsi] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_MSI), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, + [board_ahci_noncq] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, [board_ahci_nosntf] = { AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), .flags = AHCI_FLAG_COMMON, @@ -296,6 +312,22 @@ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ + { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ + { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ + { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ + { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, @@ -433,16 +465,23 @@ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), + .driver_data = board_ahci_yes_fbs }, /* 88se9182 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0), + .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), + .driver_data = board_ahci_yes_fbs }, /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ + { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */ /* Asmedia */ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ @@ -450,6 +489,13 @@ { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ + /* + * Samsung SSDs found on some macbooks. NCQ times out if MSI is + * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731 + */ + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi }, + { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi }, + /* Enmotus */ { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, @@ -562,6 +608,7 @@ unsigned long deadline) { struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; bool online; int rc; @@ -572,7 +619,7 @@ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), deadline, &online, NULL); - ahci_start_engine(ap); + hpriv->start_engine(ap); DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); @@ -587,6 +634,7 @@ { struct ata_port *ap = link->ap; struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; struct ata_taskfile tf; bool online; @@ -602,7 +650,7 @@ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), deadline, &online, NULL); - ahci_start_engine(ap); + hpriv->start_engine(ap); /* The pseudo configuration device on SIMG4726 attached to * ASUS P5W-DH Deluxe doesn't send signature FIS after @@ -1155,18 +1203,18 @@ return rc; for (i = 0; i < host->n_ports; i++) { - const char* desc; struct ahci_port_priv *pp = host->ports[i]->private_data; - /* pp is NULL for dummy ports */ - if (pp) - desc = pp->irq_desc; - else - desc = dev_driver_string(host->dev); + /* Do not receive interrupts sent by dummy ports */ + if (!pp) { + disable_irq(irq + i); + continue; + } - rc = devm_request_threaded_irq(host->dev, - irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED, - desc, host->ports[i]); + rc = devm_request_threaded_irq(host->dev, irq + i, + ahci_hw_interrupt, + ahci_thread_fn, IRQF_SHARED, + pp->irq_desc, host->ports[i]); if (rc) goto out_free_irqs; } --- linux-3.13.0.orig/drivers/ata/ahci.h +++ linux-3.13.0/drivers/ata/ahci.h @@ -37,6 +37,8 @@ #include #include +#include +#include /* Enclosure Management Control */ #define EM_CTRL_MSG_TYPE 0x000f0000 @@ -51,6 +53,7 @@ enum { AHCI_MAX_PORTS = 32, + AHCI_MAX_CLKS = 3, AHCI_MAX_SG = 168, /* hardware max is 64K */ AHCI_DMA_BOUNDARY = 0xffffffff, AHCI_MAX_CMDS = 32, @@ -321,8 +324,17 @@ u32 em_loc; /* enclosure management location */ u32 em_buf_sz; /* EM buffer size in byte */ u32 em_msg_type; /* EM message type */ - struct clk *clk; /* Only for platforms supporting clk */ + bool got_runtime_pm; /* Did we do pm_runtime_get? */ + struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ + struct regulator *target_pwr; /* Optional */ + struct phy *phy; /* If platform uses phy */ void *plat_data; /* Other platform data */ + /* + * Optional ahci_start_engine override, if not set this gets set to the + * default ahci_start_engine during ahci_save_initial_config, this can + * be overridden anytime before the host is activated. + */ + void (*start_engine)(struct ata_port *ap); }; extern int ahci_ignore_sss; @@ -356,8 +368,11 @@ int pmp, unsigned long deadline, int (*check_ready)(struct ata_link *link)); +unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); int ahci_stop_engine(struct ata_port *ap); +void ahci_start_fis_rx(struct ata_port *ap); void ahci_start_engine(struct ata_port *ap); +int ahci_restart_engine(struct ata_port *ap); int ahci_check_ready(struct ata_link *link); int ahci_kick_engine(struct ata_port *ap); int ahci_port_resume(struct ata_port *ap); --- linux-3.13.0.orig/drivers/ata/ahci_platform.c +++ linux-3.13.0/drivers/ata/ahci_platform.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #include "ahci.h" static void ahci_host_stop(struct ata_host *host); @@ -87,78 +89,284 @@ AHCI_SHT("ahci_platform"), }; -static int ahci_probe(struct platform_device *pdev) +/** + * ahci_platform_enable_clks - Enable platform clocks + * @hpriv: host private area to store config values + * + * This function enables all the clks found in hpriv->clks, starting at + * index 0. If any clk fails to enable it disables all the clks already + * enabled in reverse order, and then returns an error. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_enable_clks(struct ahci_host_priv *hpriv) +{ + int c, rc; + + for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) { + rc = clk_prepare_enable(hpriv->clks[c]); + if (rc) + goto disable_unprepare_clk; + } + return 0; + +disable_unprepare_clk: + while (--c >= 0) + clk_disable_unprepare(hpriv->clks[c]); + return rc; +} +EXPORT_SYMBOL_GPL(ahci_platform_enable_clks); + +/** + * ahci_platform_disable_clks - Disable platform clocks + * @hpriv: host private area to store config values + * + * This function disables all the clks found in hpriv->clks, in reverse + * order of ahci_platform_enable_clks (starting at the end of the array). + */ +void ahci_platform_disable_clks(struct ahci_host_priv *hpriv) +{ + int c; + + for (c = AHCI_MAX_CLKS - 1; c >= 0; c--) + if (hpriv->clks[c]) + clk_disable_unprepare(hpriv->clks[c]); +} +EXPORT_SYMBOL_GPL(ahci_platform_disable_clks); + +/** + * ahci_platform_enable_resources - Enable platform resources + * @hpriv: host private area to store config values + * + * This function enables all ahci_platform managed resources in the + * following order: + * 1) Regulator + * 2) Clocks (through ahci_platform_enable_clks) + * 3) Phy + * + * If resource enabling fails at any point the previous enabled resources + * are disabled in reverse order. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_enable_resources(struct ahci_host_priv *hpriv) { - struct device *dev = &pdev->dev; - struct ahci_platform_data *pdata = dev_get_platdata(dev); - const struct platform_device_id *id = platform_get_device_id(pdev); - struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0]; - const struct ata_port_info *ppi[] = { &pi, NULL }; - struct ahci_host_priv *hpriv; - struct ata_host *host; - struct resource *mem; - int irq; - int n_ports; - int i; int rc; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_err(dev, "no mmio space\n"); - return -EINVAL; + if (hpriv->target_pwr) { + rc = regulator_enable(hpriv->target_pwr); + if (rc) + return rc; } - irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(dev, "no irq\n"); - return -EINVAL; + rc = ahci_platform_enable_clks(hpriv); + if (rc) + goto disable_regulator; + + if (hpriv->phy) { + rc = phy_init(hpriv->phy); + if (rc) + goto disable_clks; + + rc = phy_power_on(hpriv->phy); + if (rc) { + phy_exit(hpriv->phy); + goto disable_clks; + } } - if (pdata && pdata->ata_port_info) - pi = *pdata->ata_port_info; + return 0; - hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); - if (!hpriv) { - dev_err(dev, "can't alloc ahci_host_priv\n"); - return -ENOMEM; +disable_clks: + ahci_platform_disable_clks(hpriv); + +disable_regulator: + if (hpriv->target_pwr) + regulator_disable(hpriv->target_pwr); + return rc; +} +EXPORT_SYMBOL_GPL(ahci_platform_enable_resources); + +/** + * ahci_platform_disable_resources - Disable platform resources + * @hpriv: host private area to store config values + * + * This function disables all ahci_platform managed resources in the + * following order: + * 1) Phy + * 2) Clocks (through ahci_platform_disable_clks) + * 3) Regulator + */ +void ahci_platform_disable_resources(struct ahci_host_priv *hpriv) +{ + if (hpriv->phy) { + phy_power_off(hpriv->phy); + phy_exit(hpriv->phy); } - hpriv->flags |= (unsigned long)pi.private_data; + ahci_platform_disable_clks(hpriv); + + if (hpriv->target_pwr) + regulator_disable(hpriv->target_pwr); +} +EXPORT_SYMBOL_GPL(ahci_platform_disable_resources); + +static void ahci_platform_put_resources(struct device *dev, void *res) +{ + struct ahci_host_priv *hpriv = res; + int c; + + if (hpriv->got_runtime_pm) { + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + } + + for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) + clk_put(hpriv->clks[c]); +} - hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem)); +/** + * ahci_platform_get_resources - Get platform resources + * @pdev: platform device to get resources for + * + * This function allocates an ahci_host_priv struct, and gets the following + * resources, storing a reference to them inside the returned struct: + * + * 1) mmio registers (IORESOURCE_MEM 0, mandatory) + * 2) regulator for controlling the targets power (optional) + * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node, + * or for non devicetree enabled platforms a single clock + * 4) phy (optional) + * + * RETURNS: + * The allocated ahci_host_priv on success, otherwise an ERR_PTR value + */ +struct ahci_host_priv *ahci_platform_get_resources( + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + struct clk *clk; + int i, rc = -ENOMEM; + + if (!devres_open_group(dev, NULL, GFP_KERNEL)) + return ERR_PTR(-ENOMEM); + + hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv), + GFP_KERNEL); + if (!hpriv) + goto err_out; + + devres_add(dev, hpriv); + + hpriv->mmio = devm_ioremap_resource(dev, + platform_get_resource(pdev, IORESOURCE_MEM, 0)); if (!hpriv->mmio) { - dev_err(dev, "can't map %pR\n", mem); - return -ENOMEM; + dev_err(dev, "no mmio space\n"); + goto err_out; } - hpriv->clk = clk_get(dev, NULL); - if (IS_ERR(hpriv->clk)) { - dev_err(dev, "can't get clock\n"); - } else { - rc = clk_prepare_enable(hpriv->clk); - if (rc) { - dev_err(dev, "clock prepare enable failed"); - goto free_clk; + hpriv->target_pwr = devm_regulator_get_optional(dev, "target"); + if (IS_ERR(hpriv->target_pwr)) { + rc = PTR_ERR(hpriv->target_pwr); + if (rc == -EPROBE_DEFER) + goto err_out; + hpriv->target_pwr = NULL; + } + + for (i = 0; i < AHCI_MAX_CLKS; i++) { + /* + * For now we must use clk_get(dev, NULL) for the first clock, + * because some platforms (da850, spear13xx) are not yet + * converted to use devicetree for clocks. For new platforms + * this is equivalent to of_clk_get(dev->of_node, 0). + */ + if (i == 0) + clk = clk_get(dev, NULL); + else + clk = of_clk_get(dev->of_node, i); + + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -EPROBE_DEFER) + goto err_out; + break; } + hpriv->clks[i] = clk; } - /* - * Some platforms might need to prepare for mmio region access, - * which could be done in the following init call. So, the mmio - * region shouldn't be accessed before init (if provided) has - * returned successfully. - */ - if (pdata && pdata->init) { - rc = pdata->init(dev, hpriv->mmio); - if (rc) - goto disable_unprepare_clk; + hpriv->phy = devm_phy_get(dev, "sata-phy"); + if (IS_ERR(hpriv->phy)) { + rc = PTR_ERR(hpriv->phy); + switch (rc) { + case -ENODEV: + case -ENOSYS: + /* continue normally */ + hpriv->phy = NULL; + break; + + case -EPROBE_DEFER: + goto err_out; + + default: + dev_err(dev, "couldn't get sata-phy\n"); + goto err_out; + } } - ahci_save_initial_config(dev, hpriv, - pdata ? pdata->force_port_map : 0, - pdata ? pdata->mask_port_map : 0); + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + hpriv->got_runtime_pm = true; + + devres_remove_group(dev, NULL); + return hpriv; + +err_out: + devres_release_group(dev, NULL); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(ahci_platform_get_resources); + +/** + * ahci_platform_init_host - Bring up an ahci-platform host + * @pdev: platform device pointer for the host + * @hpriv: ahci-host private data for the host + * @pi_template: template for the ata_port_info to use + * @force_port_map: param passed to ahci_save_initial_config + * @mask_port_map: param passed to ahci_save_initial_config + * + * This function does all the usual steps needed to bring up an + * ahci-platform host, note any necessary resources (ie clks, phy, etc.) + * must be initialized / enabled before calling this. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_init_host(struct platform_device *pdev, + struct ahci_host_priv *hpriv, + const struct ata_port_info *pi_template, + unsigned int force_port_map, + unsigned int mask_port_map) +{ + struct device *dev = &pdev->dev; + struct ata_port_info pi = *pi_template; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct ata_host *host; + int i, irq, n_ports, rc; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(dev, "no irq\n"); + return -EINVAL; + } /* prepare host */ + hpriv->flags |= (unsigned long)pi.private_data; + + ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map); + if (hpriv->cap & HOST_CAP_NCQ) pi.flags |= ATA_FLAG_NCQ; @@ -175,10 +383,8 @@ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); host = ata_host_alloc_pinfo(dev, ppi, n_ports); - if (!host) { - rc = -ENOMEM; - goto pdata_exit; - } + if (!host) + return -ENOMEM; host->private_data = hpriv; @@ -193,7 +399,8 @@ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - ata_port_desc(ap, "mmio %pR", mem); + ata_port_desc(ap, "mmio %pR", + platform_get_resource(pdev, IORESOURCE_MEM, 0)); ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); /* set enclosure management message type */ @@ -207,13 +414,53 @@ rc = ahci_reset_controller(host); if (rc) - goto pdata_exit; + return rc; ahci_init_controller(host); ahci_print_info(host, "platform"); - rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, - &ahci_platform_sht); + return ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, + &ahci_platform_sht); +} +EXPORT_SYMBOL_GPL(ahci_platform_init_host); + +static int ahci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_platform_data *pdata = dev_get_platdata(dev); + const struct platform_device_id *id = platform_get_device_id(pdev); + const struct ata_port_info *pi_template; + struct ahci_host_priv *hpriv; + int rc; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; + + /* + * Some platforms might need to prepare for mmio region access, + * which could be done in the following init call. So, the mmio + * region shouldn't be accessed before init (if provided) has + * returned successfully. + */ + if (pdata && pdata->init) { + rc = pdata->init(dev, hpriv->mmio); + if (rc) + goto disable_resources; + } + + if (pdata && pdata->ata_port_info) + pi_template = pdata->ata_port_info; + else + pi_template = &ahci_port_info[id ? id->driver_data : 0]; + + rc = ahci_platform_init_host(pdev, hpriv, pi_template, + pdata ? pdata->force_port_map : 0, + pdata ? pdata->mask_port_map : 0); if (rc) goto pdata_exit; @@ -221,12 +468,8 @@ pdata_exit: if (pdata && pdata->exit) pdata->exit(dev); -disable_unprepare_clk: - if (!IS_ERR(hpriv->clk)) - clk_disable_unprepare(hpriv->clk); -free_clk: - if (!IS_ERR(hpriv->clk)) - clk_put(hpriv->clk); +disable_resources: + ahci_platform_disable_resources(hpriv); return rc; } @@ -239,21 +482,27 @@ if (pdata && pdata->exit) pdata->exit(dev); - if (!IS_ERR(hpriv->clk)) { - clk_disable_unprepare(hpriv->clk); - clk_put(hpriv->clk); - } + ahci_platform_disable_resources(hpriv); } #ifdef CONFIG_PM_SLEEP -static int ahci_suspend(struct device *dev) +/** + * ahci_platform_suspend_host - Suspend an ahci-platform host + * @dev: device pointer for the host + * + * This function does all the usual steps needed to suspend an + * ahci-platform host, note any necessary resources (ie clks, phy, etc.) + * must be disabled after calling this. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_suspend_host(struct device *dev) { - struct ahci_platform_data *pdata = dev_get_platdata(dev); struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->mmio; u32 ctl; - int rc; if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { dev_err(dev, "firmware update required for suspend/resume\n"); @@ -270,61 +519,118 @@ writel(ctl, mmio + HOST_CTL); readl(mmio + HOST_CTL); /* flush */ - rc = ata_host_suspend(host, PMSG_SUSPEND); + return ata_host_suspend(host, PMSG_SUSPEND); +} +EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); + +/** + * ahci_platform_resume_host - Resume an ahci-platform host + * @dev: device pointer for the host + * + * This function does all the usual steps needed to resume an ahci-platform + * host, note any necessary resources (ie clks, phy, etc.) must be + * initialized / enabled before calling this. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_resume_host(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + int rc; + + if (dev->power.power_state.event == PM_EVENT_SUSPEND) { + rc = ahci_reset_controller(host); + if (rc) + return rc; + + ahci_init_controller(host); + } + + ata_host_resume(host); + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_platform_resume_host); + +/** + * ahci_platform_suspend - Suspend an ahci-platform device + * @dev: the platform device to suspend + * + * This function suspends the host associated with the device, followed by + * disabling all the resources of the device. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_suspend(struct device *dev) +{ + struct ahci_platform_data *pdata = dev_get_platdata(dev); + struct ata_host *host = dev_get_drvdata(dev); + struct ahci_host_priv *hpriv = host->private_data; + int rc; + + rc = ahci_platform_suspend_host(dev); if (rc) return rc; if (pdata && pdata->suspend) return pdata->suspend(dev); - if (!IS_ERR(hpriv->clk)) - clk_disable_unprepare(hpriv->clk); + ahci_platform_disable_resources(hpriv); return 0; } +EXPORT_SYMBOL_GPL(ahci_platform_suspend); -static int ahci_resume(struct device *dev) +/** + * ahci_platform_resume - Resume an ahci-platform device + * @dev: the platform device to resume + * + * This function enables all the resources of the device followed by + * resuming the host associated with the device. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_resume(struct device *dev) { struct ahci_platform_data *pdata = dev_get_platdata(dev); struct ata_host *host = dev_get_drvdata(dev); struct ahci_host_priv *hpriv = host->private_data; int rc; - if (!IS_ERR(hpriv->clk)) { - rc = clk_prepare_enable(hpriv->clk); - if (rc) { - dev_err(dev, "clock prepare enable failed"); - return rc; - } - } + rc = ahci_platform_enable_resources(hpriv); + if (rc) + return rc; if (pdata && pdata->resume) { rc = pdata->resume(dev); if (rc) - goto disable_unprepare_clk; + goto disable_resources; } - if (dev->power.power_state.event == PM_EVENT_SUSPEND) { - rc = ahci_reset_controller(host); - if (rc) - goto disable_unprepare_clk; - - ahci_init_controller(host); - } + rc = ahci_platform_resume_host(dev); + if (rc) + goto disable_resources; - ata_host_resume(host); + /* We resumed so update PM runtime state */ + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); return 0; -disable_unprepare_clk: - if (!IS_ERR(hpriv->clk)) - clk_disable_unprepare(hpriv->clk); +disable_resources: + ahci_platform_disable_resources(hpriv); return rc; } +EXPORT_SYMBOL_GPL(ahci_platform_resume); #endif -static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume); +static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend, + ahci_platform_resume); static const struct of_device_id ahci_of_match[] = { { .compatible = "snps,spear-ahci", }, --- linux-3.13.0.orig/drivers/ata/ahci_xgene.c +++ linux-3.13.0/drivers/ata/ahci_xgene.c @@ -0,0 +1,555 @@ +/* + * AppliedMicro X-Gene SoC SATA Host Controller Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Loc Ho + * Tuan Phan + * Suman Tripathi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * NOTE: PM support is not currently available. + * + */ +#include +#include +#include +#include +#include +#include +#include "ahci.h" + +/* Max # of disk per a controller */ +#define MAX_AHCI_CHN_PERCTR 2 + +/* MUX CSR */ +#define SATA_ENET_CONFIG_REG 0x00000000 +#define CFG_SATA_ENET_SELECT_MASK 0x00000001 + +/* SATA core host controller CSR */ +#define SLVRDERRATTRIBUTES 0x00000000 +#define SLVWRERRATTRIBUTES 0x00000004 +#define MSTRDERRATTRIBUTES 0x00000008 +#define MSTWRERRATTRIBUTES 0x0000000c +#define BUSCTLREG 0x00000014 +#define IOFMSTRWAUX 0x00000018 +#define INTSTATUSMASK 0x0000002c +#define ERRINTSTATUS 0x00000030 +#define ERRINTSTATUSMASK 0x00000034 + +/* SATA host AHCI CSR */ +#define PORTCFG 0x000000a4 +#define PORTADDR_SET(dst, src) \ + (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f)) +#define PORTPHY1CFG 0x000000a8 +#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \ + (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000)) +#define PORTPHY2CFG 0x000000ac +#define PORTPHY3CFG 0x000000b0 +#define PORTPHY4CFG 0x000000b4 +#define PORTPHY5CFG 0x000000b8 +#define SCTL0 0x0000012C +#define PORTPHY5CFG_RTCHG_SET(dst, src) \ + (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000)) +#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \ + (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000)) +#define PORTAXICFG 0x000000bc +#define PORTAXICFG_OUTTRANS_SET(dst, src) \ + (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000)) +#define PORTRANSCFG 0x000000c8 +#define PORTRANSCFG_RXWM_SET(dst, src) \ + (((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f)) + +/* SATA host controller AXI CSR */ +#define INT_SLV_TMOMASK 0x00000010 + +/* SATA diagnostic CSR */ +#define CFG_MEM_RAM_SHUTDOWN 0x00000070 +#define BLOCK_MEM_RDY 0x00000074 + +/* Max retry for link down */ +#define MAX_LINK_DOWN_RETRY 3 + +struct xgene_ahci_context { + struct ahci_host_priv *hpriv; + struct device *dev; + u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/ + void __iomem *csr_core; /* Core CSR address of IP */ + void __iomem *csr_diag; /* Diag CSR address of IP */ + void __iomem *csr_axi; /* AXI CSR address of IP */ + void __iomem *csr_mux; /* MUX CSR address of IP */ +}; + +static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx) +{ + dev_dbg(ctx->dev, "Release memory from shutdown\n"); + writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); + readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */ + msleep(1); /* reset may take up to 1ms */ + if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) { + dev_err(ctx->dev, "failed to release memory from shutdown\n"); + return -ENODEV; + } + return 0; +} + +/** + * xgene_ahci_restart_engine - Restart the dma engine. + * @ap : ATA port of interest + * + * Restarts the dma engine inside the controller. + */ +static int xgene_ahci_restart_engine(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + + ahci_stop_engine(ap); + ahci_start_fis_rx(ap); + hpriv->start_engine(ap); + + return 0; +} + +/** + * xgene_ahci_qc_issue - Issue commands to the device + * @qc: Command to issue + * + * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot + * clear the BSY bit after receiving the PIO setup FIS. This results in the dma + * state machine goes into the CMFatalErrorUpdate state and locks up. By + * restarting the dma engine, it removes the controller out of lock up state. + */ +static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + struct xgene_ahci_context *ctx = hpriv->plat_data; + int rc = 0; + + if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA)) + xgene_ahci_restart_engine(ap); + + rc = ahci_qc_issue(qc); + + /* Save the last command issued */ + ctx->last_cmd[ap->port_no] = qc->tf.command; + + return rc; +} + +static int xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx) +{ + void __iomem *diagcsr = ctx->csr_diag; + + if (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 && + readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF) + return 1; + return 0; +} + +/** + * xgene_ahci_read_id - Read ID data from the specified device + * @dev: device + * @tf: proposed taskfile + * @id: data buffer + * + * This custom read ID function is required due to the fact that the HW + * does not support DEVSLP. + */ +static unsigned int xgene_ahci_read_id(struct ata_device *dev, + struct ata_taskfile *tf, u16 *id) +{ + u32 err_mask; + + err_mask = ata_do_dev_read_id(dev, tf, id); + if (err_mask) + return err_mask; + + /* + * Mask reserved area. Word78 spec of Link Power Management + * bit15-8: reserved + * bit7: NCQ autosence + * bit6: Software settings preservation supported + * bit5: reserved + * bit4: In-order sata delivery supported + * bit3: DIPM requests supported + * bit2: DMA Setup FIS Auto-Activate optimization supported + * bit1: DMA Setup FIX non-Zero buffer offsets supported + * bit0: Reserved + * + * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP + */ + id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); + + return 0; +} + +static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel) +{ + void __iomem *mmio = ctx->hpriv->mmio; + u32 val; + + dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n", + mmio, channel); + val = readl(mmio + PORTCFG); + val = PORTADDR_SET(val, channel == 0 ? 2 : 3); + writel(val, mmio + PORTCFG); + readl(mmio + PORTCFG); /* Force a barrier */ + /* Disable fix rate */ + writel(0x0001fffe, mmio + PORTPHY1CFG); + readl(mmio + PORTPHY1CFG); /* Force a barrier */ + writel(0x28183219, mmio + PORTPHY2CFG); + readl(mmio + PORTPHY2CFG); /* Force a barrier */ + writel(0x13081008, mmio + PORTPHY3CFG); + readl(mmio + PORTPHY3CFG); /* Force a barrier */ + writel(0x00480815, mmio + PORTPHY4CFG); + readl(mmio + PORTPHY4CFG); /* Force a barrier */ + /* Set window negotiation */ + val = readl(mmio + PORTPHY5CFG); + val = PORTPHY5CFG_RTCHG_SET(val, 0x300); + writel(val, mmio + PORTPHY5CFG); + readl(mmio + PORTPHY5CFG); /* Force a barrier */ + val = readl(mmio + PORTAXICFG); + val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */ + val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */ + writel(val, mmio + PORTAXICFG); + readl(mmio + PORTAXICFG); /* Force a barrier */ + /* Set the watermark threshold of the receive FIFO */ + val = readl(mmio + PORTRANSCFG); + val = PORTRANSCFG_RXWM_SET(val, 0x30); + writel(val, mmio + PORTRANSCFG); +} + +/** + * xgene_ahci_do_hardreset - Issue the actual COMRESET + * @link: link to reset + * @deadline: deadline jiffies for the operation + * @online: Return value to indicate if device online + * + * Due to the limitation of the hardware PHY, a difference set of setting is + * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps), + * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will + * report disparity error and etc. In addition, during COMRESET, there can + * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and + * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following + * algorithm is followed to proper configure the hardware PHY during COMRESET: + * + * Alg Part 1: + * 1. Start the PHY at Gen3 speed (default setting) + * 2. Issue the COMRESET + * 3. If no link, go to Alg Part 3 + * 4. If link up, determine if the negotiated speed matches the PHY + * configured speed + * 5. If they matched, go to Alg Part 2 + * 6. If they do not matched and first time, configure the PHY for the linked + * up disk speed and repeat step 2 + * 7. Go to Alg Part 2 + * + * Alg Part 2: + * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error + * reported in the register PORT_SCR_ERR, then reset the PHY receiver line + * 2. Go to Alg Part 3 + * + * Alg Part 3: + * 1. Clear any pending from register PORT_SCR_ERR. + * + * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition + * and until the underlying PHY supports an method to reset the receiver + * line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors, + * an warning message will be printed. + */ +static int xgene_ahci_do_hardreset(struct ata_link *link, + unsigned long deadline, bool *online) +{ + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); + struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + struct xgene_ahci_context *ctx = hpriv->plat_data; + struct ahci_port_priv *pp = ap->private_data; + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + void __iomem *port_mmio = ahci_port_base(ap); + struct ata_taskfile tf; + int link_down_retry = 0; + int rc; + u32 val, sstatus; + + do { + /* clear D2H reception area to properly wait for D2H FIS */ + ata_tf_init(link->device, &tf); + tf.command = ATA_BUSY; + ata_tf_to_fis(&tf, 0, 0, d2h_fis); + rc = sata_link_hardreset(link, timing, deadline, online, + ahci_check_ready); + if (*online) + break; + + sata_scr_read(link, SCR_STATUS, &sstatus); + } while (link_down_retry++ < MAX_LINK_DOWN_RETRY && + (sstatus & 0xff) == 0x1); + + val = readl(port_mmio + PORT_SCR_ERR); + if (val & (SERR_DISPARITY | SERR_10B_8B_ERR)) + dev_warn(ctx->dev, "link has error\n"); + + /* clear all errors if any pending */ + val = readl(port_mmio + PORT_SCR_ERR); + writel(val, port_mmio + PORT_SCR_ERR); + + return rc; +} + +static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + struct ata_port *ap = link->ap; + struct ahci_host_priv *hpriv = ap->host->private_data; + void __iomem *port_mmio = ahci_port_base(ap); + bool online; + int rc; + u32 portcmd_saved; + u32 portclb_saved; + u32 portclbhi_saved; + u32 portrxfis_saved; + u32 portrxfishi_saved; + + /* As hardreset resets these CSR, save it to restore later */ + portcmd_saved = readl(port_mmio + PORT_CMD); + portclb_saved = readl(port_mmio + PORT_LST_ADDR); + portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI); + portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR); + portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI); + + ahci_stop_engine(ap); + + rc = xgene_ahci_do_hardreset(link, deadline, &online); + + /* As controller hardreset clears them, restore them */ + writel(portcmd_saved, port_mmio + PORT_CMD); + writel(portclb_saved, port_mmio + PORT_LST_ADDR); + writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI); + writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR); + writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI); + + hpriv->start_engine(ap); + + if (online) + *class = ahci_dev_classify(ap); + + return rc; +} + +static void xgene_ahci_host_stop(struct ata_host *host) +{ + struct ahci_host_priv *hpriv = host->private_data; + + ahci_platform_disable_resources(hpriv); +} + +static struct ata_port_operations xgene_ahci_ops = { + .inherits = &ahci_ops, + .host_stop = xgene_ahci_host_stop, + .hardreset = xgene_ahci_hardreset, + .read_id = xgene_ahci_read_id, + .qc_issue = xgene_ahci_qc_issue, +}; + +static const struct ata_port_info xgene_ahci_port_info = { + AHCI_HFLAGS(AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &xgene_ahci_ops, +}; + +static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv) +{ + struct xgene_ahci_context *ctx = hpriv->plat_data; + int i; + int rc; + u32 val; + + /* Remove IP RAM out of shutdown */ + rc = xgene_ahci_init_memram(ctx); + if (rc) + return rc; + + for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++) + xgene_ahci_set_phy_cfg(ctx, i); + + /* AXI disable Mask */ + writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT); + readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */ + writel(0, ctx->csr_core + INTSTATUSMASK); + val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */ + dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n", + INTSTATUSMASK, val); + + writel(0x0, ctx->csr_core + ERRINTSTATUSMASK); + readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */ + writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK); + readl(ctx->csr_axi + INT_SLV_TMOMASK); + + /* Enable AXI Interrupt */ + writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES); + writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES); + writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES); + writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES); + + /* Enable coherency */ + val = readl(ctx->csr_core + BUSCTLREG); + val &= ~0x00000002; /* Enable write coherency */ + val &= ~0x00000001; /* Enable read coherency */ + writel(val, ctx->csr_core + BUSCTLREG); + + val = readl(ctx->csr_core + IOFMSTRWAUX); + val |= (1 << 3); /* Enable read coherency */ + val |= (1 << 9); /* Enable write coherency */ + writel(val, ctx->csr_core + IOFMSTRWAUX); + val = readl(ctx->csr_core + IOFMSTRWAUX); + dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n", + IOFMSTRWAUX, val); + + return rc; +} + +static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx) +{ + u32 val; + + /* Check for optional MUX resource */ + if (IS_ERR(ctx->csr_mux)) + return 0; + + val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG); + val &= ~CFG_SATA_ENET_SELECT_MASK; + writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG); + val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG); + return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0; +} + +static int xgene_ahci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ahci_host_priv *hpriv; + struct xgene_ahci_context *ctx; + struct resource *res; + int rc; + + hpriv = ahci_platform_get_resources(pdev); + if (IS_ERR(hpriv)) + return PTR_ERR(hpriv); + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + hpriv->plat_data = ctx; + ctx->hpriv = hpriv; + ctx->dev = dev; + + /* Retrieve the IP core resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ctx->csr_core = devm_ioremap_resource(dev, res); + if (IS_ERR(ctx->csr_core)) + return PTR_ERR(ctx->csr_core); + + /* Retrieve the IP diagnostic resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + ctx->csr_diag = devm_ioremap_resource(dev, res); + if (IS_ERR(ctx->csr_diag)) + return PTR_ERR(ctx->csr_diag); + + /* Retrieve the IP AXI resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 3); + ctx->csr_axi = devm_ioremap_resource(dev, res); + if (IS_ERR(ctx->csr_axi)) + return PTR_ERR(ctx->csr_axi); + + /* Retrieve the optional IP mux resource */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 4); + ctx->csr_mux = devm_ioremap_resource(dev, res); + + dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core, + hpriv->mmio); + + /* Select ATA */ + if ((rc = xgene_ahci_mux_select(ctx))) { + dev_err(dev, "SATA mux selection failed error %d\n", rc); + return -ENODEV; + } + + if (xgene_ahci_is_memram_inited(ctx)) { + dev_info(dev, "skip clock and PHY initialization\n"); + goto skip_clk_phy; + } + + /* Due to errata, HW requires full toggle transition */ + rc = ahci_platform_enable_clks(hpriv); + if (rc) + goto disable_resources; + ahci_platform_disable_clks(hpriv); + + rc = ahci_platform_enable_resources(hpriv); + if (rc) + goto disable_resources; + + /* Configure the host controller */ + xgene_ahci_hw_init(hpriv); + +skip_clk_phy: + + /* + * Setup DMA mask. This is preliminary until the DMA range is sorted + * out. + */ + rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (rc) { + dev_err(dev, "Unable to set dma mask\n"); + goto disable_resources; + } + + rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info, 0, 0); + if (rc) + goto disable_resources; + + dev_dbg(dev, "X-Gene SATA host controller initialized\n"); + return 0; + +disable_resources: + ahci_platform_disable_resources(hpriv); + return rc; +} + +static const struct of_device_id xgene_ahci_of_match[] = { + {.compatible = "apm,xgene-ahci"}, + {}, +}; +MODULE_DEVICE_TABLE(of, xgene_ahci_of_match); + +static struct platform_driver xgene_ahci_driver = { + .probe = xgene_ahci_probe, + .remove = ata_platform_remove_one, + .driver = { + .name = "xgene-ahci", + .owner = THIS_MODULE, + .of_match_table = xgene_ahci_of_match, + }, +}; + +module_platform_driver(xgene_ahci_driver); + +MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver"); +MODULE_AUTHOR("Loc Ho "); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.4"); --- linux-3.13.0.orig/drivers/ata/ata_piix.c +++ linux-3.13.0/drivers/ata/ata_piix.c @@ -340,6 +340,14 @@ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, /* SATA Controller IDE (Coleto Creek) */ { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (9 Series) */ + { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, + /* SATA Controller IDE (9 Series) */ + { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, + /* SATA Controller IDE (9 Series) */ + { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (9 Series) */ + { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, { } /* terminate list */ }; --- linux-3.13.0.orig/drivers/ata/libahci.c +++ linux-3.13.0/drivers/ata/libahci.c @@ -69,7 +69,6 @@ static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); -static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int ahci_port_start(struct ata_port *ap); static void ahci_port_stop(struct ata_port *ap); @@ -394,6 +393,9 @@ * * If inconsistent, config values are fixed up by this function. * + * If it is not set already this function sets hpriv->start_engine to + * ahci_start_engine. + * * LOCKING: * None. */ @@ -500,6 +502,9 @@ hpriv->cap = cap; hpriv->cap2 = cap2; hpriv->port_map = port_map; + + if (!hpriv->start_engine) + hpriv->start_engine = ahci_start_engine; } EXPORT_SYMBOL_GPL(ahci_save_initial_config); @@ -603,7 +608,7 @@ } EXPORT_SYMBOL_GPL(ahci_stop_engine); -static void ahci_start_fis_rx(struct ata_port *ap) +void ahci_start_fis_rx(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); struct ahci_host_priv *hpriv = ap->host->private_data; @@ -629,6 +634,7 @@ /* flush */ readl(port_mmio + PORT_CMD); } +EXPORT_SYMBOL_GPL(ahci_start_fis_rx); static int ahci_stop_fis_rx(struct ata_port *ap) { @@ -730,6 +736,18 @@ return 0; } +int ahci_restart_engine(struct ata_port *ap) +{ + struct ahci_host_priv *hpriv = ap->host->private_data; + + ahci_stop_engine(ap); + ahci_start_fis_rx(ap); + hpriv->start_engine(ap); + + return 0; +} +EXPORT_SYMBOL_GPL(ahci_restart_engine); + #ifdef CONFIG_PM static void ahci_power_down(struct ata_port *ap) { @@ -766,7 +784,7 @@ /* enable DMA */ if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE)) - ahci_start_engine(ap); + hpriv->start_engine(ap); /* turn on LEDs */ if (ap->flags & ATA_FLAG_EM) { @@ -1234,7 +1252,7 @@ /* restart engine */ out_restart: - ahci_start_engine(ap); + hpriv->start_engine(ap); return rc; } EXPORT_SYMBOL_GPL(ahci_kick_engine); @@ -1426,6 +1444,7 @@ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); struct ata_port *ap = link->ap; struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; struct ata_taskfile tf; bool online; @@ -1443,7 +1462,7 @@ rc = sata_link_hardreset(link, timing, deadline, &online, ahci_check_ready); - ahci_start_engine(ap); + hpriv->start_engine(ap); if (online) *class = ahci_dev_classify(ap); @@ -1926,7 +1945,7 @@ } EXPORT_SYMBOL_GPL(ahci_interrupt); -static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) +unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_mmio = ahci_port_base(ap); @@ -1955,6 +1974,7 @@ return 0; } +EXPORT_SYMBOL_GPL(ahci_qc_issue); static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) { @@ -2007,10 +2027,12 @@ void ahci_error_handler(struct ata_port *ap) { + struct ahci_host_priv *hpriv = ap->host->private_data; + if (!(ap->pflags & ATA_PFLAG_FROZEN)) { /* restart engine */ ahci_stop_engine(ap); - ahci_start_engine(ap); + hpriv->start_engine(ap); } sata_pmp_error_handler(ap); @@ -2031,6 +2053,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) { + struct ahci_host_priv *hpriv = ap->host->private_data; void __iomem *port_mmio = ahci_port_base(ap); struct ata_device *dev = ap->link.device; u32 devslp, dm, dito, mdat, deto; @@ -2094,7 +2117,7 @@ PORT_DEVSLP_ADSE); writel(devslp, port_mmio + PORT_DEVSLP); - ahci_start_engine(ap); + hpriv->start_engine(ap); /* enable device sleep feature for the drive */ err_mask = ata_dev_set_feature(dev, @@ -2106,6 +2129,7 @@ static void ahci_enable_fbs(struct ata_port *ap) { + struct ahci_host_priv *hpriv = ap->host->private_data; struct ahci_port_priv *pp = ap->private_data; void __iomem *port_mmio = ahci_port_base(ap); u32 fbs; @@ -2134,11 +2158,12 @@ } else dev_err(ap->host->dev, "Failed to enable FBS\n"); - ahci_start_engine(ap); + hpriv->start_engine(ap); } static void ahci_disable_fbs(struct ata_port *ap) { + struct ahci_host_priv *hpriv = ap->host->private_data; struct ahci_port_priv *pp = ap->private_data; void __iomem *port_mmio = ahci_port_base(ap); u32 fbs; @@ -2166,7 +2191,7 @@ pp->fbs_enabled = false; } - ahci_start_engine(ap); + hpriv->start_engine(ap); } static void ahci_pmp_attach(struct ata_port *ap) --- linux-3.13.0.orig/drivers/ata/libata-core.c +++ linux-3.13.0/drivers/ata/libata-core.c @@ -2222,6 +2222,16 @@ if (rc) return rc; + /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ + if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && + (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) + dev->horkage |= ATA_HORKAGE_NOLPM; + + if (dev->horkage & ATA_HORKAGE_NOLPM) { + ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); + dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; + } + /* let ACPI work its magic */ rc = ata_acpi_on_devcfg(dev); if (rc) @@ -4165,6 +4175,7 @@ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ @@ -4214,7 +4225,26 @@ /* devices that don't properly handle queued TRIM commands */ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + + /* + * Some WD SATA-I drives spin up and down erratically when the link + * is put into the slumber mode. We don't have full list of the + * affected devices. Disable LPM if the device matches one of the + * known prefixes and is SATA-1. As a side effect LPM partial is + * lost too. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=57211 + */ + { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, + { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, /* End Marker */ { } @@ -4757,6 +4787,10 @@ * ata_qc_new - Request an available ATA command, for queueing * @ap: target port * + * Some ATA host controllers may implement a queue depth which is less + * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond + * the hardware limitation. + * * LOCKING: * None. */ @@ -4764,21 +4798,30 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) { struct ata_queued_cmd *qc = NULL; - unsigned int i; + unsigned int max_queue = ap->host->n_tags; + unsigned int i, tag; /* no command while frozen */ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) return NULL; - /* the last tag is reserved for internal command. */ - for (i = 0; i < ATA_MAX_QUEUE - 1; i++) - if (!test_and_set_bit(i, &ap->qc_allocated)) { - qc = __ata_qc_from_tag(ap, i); + for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { + if (ap->flags & ATA_FLAG_LOWTAG) + tag = i; + else + tag = tag < max_queue ? tag : 0; + + /* the last tag is reserved for internal command. */ + if (tag == ATA_TAG_INTERNAL) + continue; + + if (!test_and_set_bit(tag, &ap->qc_allocated)) { + qc = __ata_qc_from_tag(ap, tag); + qc->tag = tag; + ap->last_tag = tag; break; } - - if (qc) - qc->tag = i; + } return qc; } @@ -6068,6 +6111,7 @@ { spin_lock_init(&host->lock); mutex_init(&host->eh_mutex); + host->n_tags = ATA_MAX_QUEUE - 1; host->dev = dev; host->ops = ops; } @@ -6149,6 +6193,8 @@ { int i, rc; + host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); + /* host must have been started */ if (!(host->flags & ATA_HOST_STARTED)) { dev_err(host->dev, "BUG: trying to register unstarted host\n"); @@ -6294,6 +6340,8 @@ static void ata_port_detach(struct ata_port *ap) { unsigned long flags; + struct ata_link *link; + struct ata_device *dev; if (!ap->ops->error_handler) goto skip_eh; @@ -6313,6 +6361,13 @@ cancel_delayed_work_sync(&ap->hotplug_task); skip_eh: + /* clean up zpodd on port removal */ + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(dev, link, ALL) { + if (zpodd_dev_enabled(dev)) + zpodd_exit(dev); + } + } if (ap->pmp_link) { int i; for (i = 0; i < SATA_PMP_MAX_PORTS; i++) --- linux-3.13.0.orig/drivers/ata/libata-pmp.c +++ linux-3.13.0/drivers/ata/libata-pmp.c @@ -447,8 +447,11 @@ * otherwise. Don't try hard to recover it. */ ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY; - } else if (vendor == 0x197b && devid == 0x2352) { - /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */ + } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) { + /* + * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350? + * 0x0325: jmicron JMB394. + */ ata_for_each_link(link, ap, EDGE) { /* SRST breaks detection and disks get misclassified * LPM disabled to avoid potential problems --- linux-3.13.0.orig/drivers/ata/libata-scsi.c +++ linux-3.13.0/drivers/ata/libata-scsi.c @@ -111,12 +111,14 @@ [ATA_LPM_MIN_POWER] = "min_power", }; -static ssize_t ata_scsi_lpm_store(struct device *dev, +static ssize_t ata_scsi_lpm_store(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { - struct Scsi_Host *shost = class_to_shost(dev); + struct Scsi_Host *shost = class_to_shost(device); struct ata_port *ap = ata_shost_to_port(shost); + struct ata_link *link; + struct ata_device *dev; enum ata_lpm_policy policy; unsigned long flags; @@ -132,10 +134,20 @@ return -EINVAL; spin_lock_irqsave(ap->lock, flags); + + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, &ap->link, ENABLED) { + if (dev->horkage & ATA_HORKAGE_NOLPM) { + count = -EOPNOTSUPP; + goto out_unlock; + } + } + } + ap->target_lpm_policy = policy; ata_port_schedule_eh(ap); +out_unlock: spin_unlock_irqrestore(ap->lock, flags); - return count; } --- linux-3.13.0.orig/drivers/ata/libata-sff.c +++ linux-3.13.0/drivers/ata/libata-sff.c @@ -1333,7 +1333,19 @@ DPRINTK("ENTER\n"); cancel_delayed_work_sync(&ap->sff_pio_task); + + /* + * We wanna reset the HSM state to IDLE. If we do so without + * grabbing the port lock, critical sections protected by it which + * expect the HSM state to stay stable may get surprised. For + * example, we may set IDLE in between the time + * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls + * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG(). + */ + spin_lock_irq(ap->lock); ap->hsm_task_state = HSM_ST_IDLE; + spin_unlock_irq(ap->lock); + ap->sff_pio_task_link = NULL; if (ata_msg_ctl(ap)) @@ -2008,13 +2020,15 @@ DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); - /* software reset. causes dev0 to be selected */ - iowrite8(ap->ctl, ioaddr->ctl_addr); - udelay(20); /* FIXME: flush */ - iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); - udelay(20); /* FIXME: flush */ - iowrite8(ap->ctl, ioaddr->ctl_addr); - ap->last_ctl = ap->ctl; + if (ap->ioaddr.ctl_addr) { + /* software reset. causes dev0 to be selected */ + iowrite8(ap->ctl, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + iowrite8(ap->ctl, ioaddr->ctl_addr); + ap->last_ctl = ap->ctl; + } /* wait the port to become ready */ return ata_sff_wait_after_reset(&ap->link, devmask, deadline); @@ -2215,10 +2229,6 @@ spin_unlock_irqrestore(ap->lock, flags); - /* ignore ata_sff_softreset if ctl isn't accessible */ - if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) - softreset = NULL; - /* ignore built-in hardresets if SCR access is not available */ if ((hardreset == sata_std_hardreset || hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) --- linux-3.13.0.orig/drivers/ata/pata_at91.c +++ linux-3.13.0/drivers/ata/pata_at91.c @@ -408,12 +408,13 @@ host->private_data = info; - return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0, - gpio_is_valid(irq) ? ata_sff_interrupt : NULL, - irq_flags, &pata_at91_sht); + ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0, + gpio_is_valid(irq) ? ata_sff_interrupt : NULL, + irq_flags, &pata_at91_sht); + if (ret) + goto err_put; - if (!ret) - return 0; + return 0; err_put: clk_put(info->mck); --- linux-3.13.0.orig/drivers/ata/pata_scc.c +++ linux-3.13.0/drivers/ata/pata_scc.c @@ -586,7 +586,7 @@ * Note: Original code is ata_bus_softreset(). */ -static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, +static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; @@ -600,9 +600,7 @@ udelay(20); out_be32(ioaddr->ctl_addr, ap->ctl); - scc_wait_after_reset(&ap->link, devmask, deadline); - - return 0; + return scc_wait_after_reset(&ap->link, devmask, deadline); } /** @@ -619,7 +617,8 @@ { struct ata_port *ap = link->ap; unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - unsigned int devmask = 0, err_mask; + unsigned int devmask = 0; + int rc; u8 err; DPRINTK("ENTER\n"); @@ -635,9 +634,9 @@ /* issue bus reset */ DPRINTK("about to softreset, devmask=%x\n", devmask); - err_mask = scc_bus_softreset(ap, devmask, deadline); - if (err_mask) { - ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask); + rc = scc_bus_softreset(ap, devmask, deadline); + if (rc) { + ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc); return -EIO; } --- linux-3.13.0.orig/drivers/ata/pata_serverworks.c +++ linux-3.13.0/drivers/ata/pata_serverworks.c @@ -252,12 +252,18 @@ pci_write_config_byte(pdev, 0x54, ultra_cfg); } -static struct scsi_host_template serverworks_sht = { +static struct scsi_host_template serverworks_osb4_sht = { + ATA_BMDMA_SHT(DRV_NAME), + .sg_tablesize = LIBATA_DUMB_MAX_PRD, +}; + +static struct scsi_host_template serverworks_csb_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations serverworks_osb4_port_ops = { .inherits = &ata_bmdma_port_ops, + .qc_prep = ata_bmdma_dumb_qc_prep, .cable_detect = serverworks_cable_detect, .mode_filter = serverworks_osb4_filter, .set_piomode = serverworks_set_piomode, @@ -266,6 +272,7 @@ static struct ata_port_operations serverworks_csb_port_ops = { .inherits = &serverworks_osb4_port_ops, + .qc_prep = ata_bmdma_qc_prep, .mode_filter = serverworks_csb_filter, }; @@ -405,6 +412,7 @@ } }; const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL }; + struct scsi_host_template *sht = &serverworks_csb_sht; int rc; rc = pcim_enable_device(pdev); @@ -418,6 +426,7 @@ /* Select non UDMA capable OSB4 if we can't do fixups */ if (rc < 0) ppi[0] = &info[1]; + sht = &serverworks_osb4_sht; } /* setup CSB5/CSB6 : South Bridge and IDE option RAID */ else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) || @@ -434,7 +443,7 @@ ppi[1] = &ata_dummy_port_info; } - return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0); } #ifdef CONFIG_PM --- linux-3.13.0.orig/drivers/ata/sata_fsl.c +++ linux-3.13.0/drivers/ata/sata_fsl.c @@ -1503,7 +1503,7 @@ host_priv->csr_base = csr_base; irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); - if (irq < 0) { + if (!irq) { dev_err(&ofdev->dev, "invalid irq from platform\n"); goto error_exit_with_cleanup; } --- linux-3.13.0.orig/drivers/ata/sata_highbank.c +++ linux-3.13.0/drivers/ata/sata_highbank.c @@ -404,6 +404,7 @@ static const unsigned long timing[] = { 5, 100, 500}; struct ata_port *ap = link->ap; struct ahci_port_priv *pp = ap->private_data; + struct ahci_host_priv *hpriv = ap->host->private_data; u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; struct ata_taskfile tf; bool online; @@ -432,7 +433,7 @@ break; } while (!online && retry--); - ahci_start_engine(ap); + hpriv->start_engine(ap); if (online) *class = ahci_dev_classify(ap); --- linux-3.13.0.orig/drivers/ata/sata_mv.c +++ linux-3.13.0/drivers/ata/sata_mv.c @@ -304,6 +304,7 @@ MV5_LTMODE = 0x30, MV5_PHY_CTL = 0x0C, SATA_IFCFG = 0x050, + LP_PHY_CTL = 0x058, MV_M2_PREAMP_MASK = 0x7e0, @@ -431,6 +432,7 @@ MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ + MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ @@ -1358,6 +1360,7 @@ if (ofs != 0xffffffffU) { void __iomem *addr = mv_ap_base(link->ap) + ofs; + struct mv_host_priv *hpriv = link->ap->host->private_data; if (sc_reg_in == SCR_CONTROL) { /* * Workaround for 88SX60x1 FEr SATA#26: @@ -1374,6 +1377,18 @@ */ if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) val |= 0xf000; + + if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) { + void __iomem *lp_phy_addr = + mv_ap_base(link->ap) + LP_PHY_CTL; + /* + * Set PHY speed according to SControl speed. + */ + if ((val & 0xf0) == 0x10) + writelfl(0x7, lp_phy_addr); + else + writelfl(0x227, lp_phy_addr); + } } writelfl(val, addr); return 0; @@ -4110,6 +4125,15 @@ if (rc) goto err; + /* + * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be + * updated in the LP_PHY_CTL register. + */ + if (pdev->dev.of_node && + of_device_is_compatible(pdev->dev.of_node, + "marvell,armada-370-sata")) + hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL; + /* initialize adapter */ rc = mv_init_host(host); if (rc) @@ -4209,6 +4233,7 @@ #ifdef CONFIG_OF static struct of_device_id mv_sata_dt_ids[] = { + { .compatible = "marvell,armada-370-sata", }, { .compatible = "marvell,orion-sata", }, {}, }; --- linux-3.13.0.orig/drivers/ata/sata_sil.c +++ linux-3.13.0/drivers/ata/sata_sil.c @@ -157,6 +157,7 @@ { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, + { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE }, { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, { } }; --- linux-3.13.0.orig/drivers/ata/sata_sil24.c +++ linux-3.13.0/drivers/ata/sata_sil24.c @@ -246,7 +246,7 @@ /* host flags */ SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | - ATA_FLAG_AN | ATA_FLAG_PMP, + ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG, SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ IRQ_STAT_4PORTS = 0xf, --- linux-3.13.0.orig/drivers/base/bus.c +++ linux-3.13.0/drivers/base/bus.c @@ -243,13 +243,15 @@ const char *buf, size_t count) { struct device *dev; + int err = -EINVAL; dev = bus_find_device_by_name(bus, NULL, buf); if (!dev) return -ENODEV; - if (bus_rescan_devices_helper(dev, NULL) != 0) - return -EINVAL; - return count; + if (bus_rescan_devices_helper(dev, NULL) == 0) + err = count; + put_device(dev); + return err; } static struct device *next_device(struct klist_iter *i) --- linux-3.13.0.orig/drivers/base/core.c +++ linux-3.13.0/drivers/base/core.c @@ -739,12 +739,12 @@ return &dir->kobj; } +static DEFINE_MUTEX(gdp_mutex); static struct kobject *get_device_parent(struct device *dev, struct device *parent) { if (dev->class) { - static DEFINE_MUTEX(gdp_mutex); struct kobject *kobj = NULL; struct kobject *parent_kobj; struct kobject *k; @@ -808,7 +808,9 @@ glue_dir->kset != &dev->class->p->glue_dirs) return; + mutex_lock(&gdp_mutex); kobject_put(glue_dir); + mutex_unlock(&gdp_mutex); } static void cleanup_device_parent(struct device *dev) --- linux-3.13.0.orig/drivers/base/dd.c +++ linux-3.13.0/drivers/base/dd.c @@ -52,6 +52,7 @@ static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static struct workqueue_struct *deferred_wq; +static atomic_t deferred_trigger_count = ATOMIC_INIT(0); /** * deferred_probe_work_func() - Retry probing devices in the active list. @@ -135,6 +136,17 @@ * This functions moves all devices from the pending list to the active * list and schedules the deferred probe workqueue to process them. It * should be called anytime a driver is successfully bound to a device. + * + * Note, there is a race condition in multi-threaded probe. In the case where + * more than one device is probing at the same time, it is possible for one + * probe to complete successfully while another is about to defer. If the second + * depends on the first, then it will get put on the pending list after the + * trigger event has already occured and will be stuck there. + * + * The atomic 'deferred_trigger_count' is used to determine if a successful + * trigger has occurred in the midst of probing a driver. If the trigger count + * changes in the midst of a probe, then deferred processing should be triggered + * again. */ static void driver_deferred_probe_trigger(void) { @@ -147,6 +159,7 @@ * into the active list so they can be retried by the workqueue */ mutex_lock(&deferred_probe_mutex); + atomic_inc(&deferred_trigger_count); list_splice_tail_init(&deferred_probe_pending_list, &deferred_probe_active_list); mutex_unlock(&deferred_probe_mutex); @@ -265,6 +278,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) { int ret = 0; + int local_trigger_count = atomic_read(&deferred_trigger_count); atomic_inc(&probe_count); pr_debug("bus: '%s': %s: probing driver %s with device %s\n", @@ -310,6 +324,9 @@ /* Driver requested deferred probing */ dev_info(dev, "Driver %s requests probe deferral\n", drv->name); driver_deferred_probe_add(dev); + /* Did a trigger occur while probing? Need to re-trigger if yes */ + if (local_trigger_count != atomic_read(&deferred_trigger_count)) + driver_deferred_probe_trigger(); } else if (ret != -ENODEV && ret != -ENXIO) { /* driver matched but the probe failed */ printk(KERN_WARNING --- linux-3.13.0.orig/drivers/base/dma-contiguous.c +++ linux-3.13.0/drivers/base/dma-contiguous.c @@ -155,13 +155,23 @@ base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); + /* + * alloc_contig_range requires the pfn range + * specified to be in the same zone. Make this + * simple by forcing the entire CMA resv range + * to be in the same zone. + */ if (page_zone(pfn_to_page(pfn)) != zone) - return -EINVAL; + goto err; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); return 0; + +err: + kfree(cma->bitmap); + return -EINVAL; } static struct cma cma_areas[MAX_CMA_AREAS]; --- linux-3.13.0.orig/drivers/base/firmware_class.c +++ linux-3.13.0/drivers/base/firmware_class.c @@ -1070,6 +1070,9 @@ if (!firmware_p) return -EINVAL; + if (!name || name[0] == '\0') + return -EINVAL; + ret = _request_firmware_prepare(&fw, name, device); if (ret <= 0) /* error or already assigned */ goto out; @@ -1541,6 +1544,7 @@ switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: kill_requests_without_uevent(); device_cache_fw_images(); break; --- linux-3.13.0.orig/drivers/base/platform.c +++ linux-3.13.0/drivers/base/platform.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -87,7 +88,16 @@ return -ENXIO; return dev->archdata.irqs[num]; #else - struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); + struct resource *r; + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + int ret; + + ret = of_irq_get(dev->dev.of_node, num); + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } + + r = platform_get_resource(dev, IORESOURCE_IRQ, num); return r ? r->start : -ENXIO; #endif @@ -126,9 +136,17 @@ */ int platform_get_irq_byname(struct platform_device *dev, const char *name) { - struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, - name); + struct resource *r; + + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + int ret; + + ret = of_irq_get_byname(dev->dev.of_node, name); + if (ret >= 0 || ret == -EPROBE_DEFER) + return ret; + } + r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); return r ? r->start : -ENXIO; } EXPORT_SYMBOL_GPL(platform_get_irq_byname); --- linux-3.13.0.orig/drivers/base/power/opp.c +++ linux-3.13.0/drivers/base/power/opp.c @@ -735,11 +735,9 @@ unsigned long freq = be32_to_cpup(val++) * 1000; unsigned long volt = be32_to_cpup(val++); - if (dev_pm_opp_add(dev, freq, volt)) { + if (dev_pm_opp_add(dev, freq, volt)) dev_warn(dev, "%s: Failed to add OPP %ld\n", __func__, freq); - continue; - } nr -= 2; } --- linux-3.13.0.orig/drivers/base/regmap/internal.h +++ linux-3.13.0/drivers/base/regmap/internal.h @@ -144,6 +144,9 @@ enum regcache_type type; int (*init)(struct regmap *map); int (*exit)(struct regmap *map); +#ifdef CONFIG_DEBUG_FS + void (*debugfs_init)(struct regmap *map); +#endif int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); int (*write)(struct regmap *map, unsigned int reg, unsigned int value); int (*sync)(struct regmap *map, unsigned int min, unsigned int max); --- linux-3.13.0.orig/drivers/base/regmap/regcache-rbtree.c +++ linux-3.13.0/drivers/base/regmap/regcache-rbtree.c @@ -194,10 +194,6 @@ { debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); } -#else -static void rbtree_debugfs_init(struct regmap *map) -{ -} #endif static int regcache_rbtree_init(struct regmap *map) @@ -222,8 +218,6 @@ goto err; } - rbtree_debugfs_init(map); - return 0; err: @@ -313,7 +307,7 @@ if (pos == 0) { memmove(blk + offset * map->cache_word_size, blk, rbnode->blklen * map->cache_word_size); - bitmap_shift_right(present, present, offset, blklen); + bitmap_shift_left(present, present, offset, blklen); } /* update the rbnode block, its size and the base register */ @@ -532,6 +526,9 @@ .name = "rbtree", .init = regcache_rbtree_init, .exit = regcache_rbtree_exit, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = rbtree_debugfs_init, +#endif .read = regcache_rbtree_read, .write = regcache_rbtree_write, .sync = regcache_rbtree_sync, --- linux-3.13.0.orig/drivers/base/regmap/regcache.c +++ linux-3.13.0/drivers/base/regmap/regcache.c @@ -603,7 +603,8 @@ for (i = start; i < end; i++) { regtmp = block_base + (i * map->reg_stride); - if (!regcache_reg_present(cache_present, i)) + if (!regcache_reg_present(cache_present, i) || + !regmap_writeable(map, regtmp)) continue; val = regcache_get_val(map, block, i); @@ -666,7 +667,8 @@ for (i = start; i < end; i++) { regtmp = block_base + (i * map->reg_stride); - if (!regcache_reg_present(cache_present, i)) { + if (!regcache_reg_present(cache_present, i) || + !regmap_writeable(map, regtmp)) { ret = regcache_sync_block_raw_flush(map, &data, base, regtmp); if (ret != 0) @@ -701,7 +703,7 @@ unsigned int block_base, unsigned int start, unsigned int end) { - if (regmap_can_raw_write(map)) + if (regmap_can_raw_write(map) && !map->use_single_rw) return regcache_sync_block_raw(map, block, cache_present, block_base, start, end); else --- linux-3.13.0.orig/drivers/base/regmap/regmap-debugfs.c +++ linux-3.13.0/drivers/base/regmap/regmap-debugfs.c @@ -473,6 +473,7 @@ { struct rb_node *next; struct regmap_range_node *range_node; + const char *devname = "dummy"; /* If we don't have the debugfs root yet, postpone init */ if (!regmap_debugfs_root) { @@ -491,12 +492,15 @@ INIT_LIST_HEAD(&map->debugfs_off_cache); mutex_init(&map->cache_lock); + if (map->dev) + devname = dev_name(map->dev); + if (name) { map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", - dev_name(map->dev), name); + devname, name); name = map->debugfs_name; } else { - name = dev_name(map->dev); + name = devname; } map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); @@ -538,6 +542,9 @@ next = rb_next(&range_node->node); } + + if (map->cache_ops && map->cache_ops->debugfs_init) + map->cache_ops->debugfs_init(map); } void regmap_debugfs_exit(struct regmap *map) --- linux-3.13.0.orig/drivers/base/regmap/regmap.c +++ linux-3.13.0/drivers/base/regmap/regmap.c @@ -105,7 +105,7 @@ bool regmap_volatile(struct regmap *map, unsigned int reg) { - if (!regmap_readable(map, reg)) + if (!map->format.format_write && !regmap_readable(map, reg)) return false; if (map->volatile_reg) @@ -1308,7 +1308,7 @@ } #ifdef LOG_DEVICE - if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) + if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) dev_info(map->dev, "%x <= %x\n", reg, val); #endif @@ -1529,6 +1529,11 @@ if (val_bytes == 1) { wval = (void *)val; } else { + if (!val_count) { + ret = -EINVAL; + goto out; + } + wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); if (!wval) { ret = -ENOMEM; @@ -1727,7 +1732,7 @@ ret = map->reg_read(context, reg, val); if (ret == 0) { #ifdef LOG_DEVICE - if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) + if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0) dev_info(map->dev, "%x => %x\n", reg, *val); #endif --- linux-3.13.0.orig/drivers/block/aoe/aoecmd.c +++ linux-3.13.0/drivers/block/aoe/aoecmd.c @@ -905,7 +905,7 @@ /* Non-zero page count for non-head members of * compound pages is no longer allowed by the kernel. */ - page = compound_trans_head(bv->bv_page); + page = compound_head(bv->bv_page); atomic_inc(&page->_count); } } @@ -918,7 +918,7 @@ int i; bio_for_each_segment(bv, bio, i) { - page = compound_trans_head(bv->bv_page); + page = compound_head(bv->bv_page); atomic_dec(&page->_count); } } --- linux-3.13.0.orig/drivers/block/drbd/drbd_interval.c +++ linux-3.13.0/drivers/block/drbd/drbd_interval.c @@ -79,6 +79,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this) { struct rb_node **new = &root->rb_node, *parent = NULL; + sector_t this_end = this->sector + (this->size >> 9); BUG_ON(!IS_ALIGNED(this->size, 512)); @@ -87,6 +88,8 @@ rb_entry(*new, struct drbd_interval, rb); parent = *new; + if (here->end < this_end) + here->end = this_end; if (this->sector < here->sector) new = &(*new)->rb_left; else if (this->sector > here->sector) @@ -99,6 +102,7 @@ return false; } + this->end = this_end; rb_link_node(&this->rb, parent, new); rb_insert_augmented(&this->rb, root, &augment_callbacks); return true; --- linux-3.13.0.orig/drivers/block/floppy.c +++ linux-3.13.0/drivers/block/floppy.c @@ -3053,7 +3053,10 @@ int ret; while (ptr) { - ret = copy_to_user(param, ptr, sizeof(*ptr)); + struct floppy_raw_cmd cmd = *ptr; + cmd.next = NULL; + cmd.kernel_data = NULL; + ret = copy_to_user(param, &cmd, sizeof(cmd)); if (ret) return -EFAULT; param += sizeof(struct floppy_raw_cmd); @@ -3107,10 +3110,11 @@ return -ENOMEM; *rcmd = ptr; ret = copy_from_user(ptr, param, sizeof(*ptr)); - if (ret) - return -EFAULT; ptr->next = NULL; ptr->buffer_length = 0; + ptr->kernel_data = NULL; + if (ret) + return -EFAULT; param += sizeof(struct floppy_raw_cmd); if (ptr->cmd_count > 33) /* the command may now also take up the space @@ -3126,7 +3130,6 @@ for (i = 0; i < 16; i++) ptr->reply[i] = 0; ptr->resultcode = 0; - ptr->kernel_data = NULL; if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { if (ptr->length <= 0) --- linux-3.13.0.orig/drivers/block/loop.c +++ linux-3.13.0/drivers/block/loop.c @@ -691,6 +691,24 @@ return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; } +/* + * for AUFS + * no get/put for file. + */ +struct file *loop_backing_file(struct super_block *sb) +{ + struct file *ret; + struct loop_device *l; + + ret = NULL; + if (MAJOR(sb->s_dev) == LOOP_MAJOR) { + l = sb->s_bdev->bd_disk->private_data; + ret = l->lo_backing_file; + } + return ret; +} +EXPORT_SYMBOL(loop_backing_file); + /* loop sysfs attributes */ static ssize_t loop_attr_show(struct device *dev, char *page, --- linux-3.13.0.orig/drivers/block/mtip32xx/mtip32xx.c +++ linux-3.13.0/drivers/block/mtip32xx/mtip32xx.c @@ -231,38 +231,45 @@ void *data, int status) { - struct mtip_cmd *command; + struct mtip_cmd *cmd; struct driver_data *dd = data; - int cb_status = status ? -EIO : 0; + int unaligned, cb_status = status ? -EIO : 0; + void (*func)(void *, int); if (unlikely(!dd) || unlikely(!port)) return; - command = &port->commands[tag]; + cmd = &port->commands[tag]; if (unlikely(status == PORT_IRQ_TF_ERR)) { dev_warn(&port->dd->pdev->dev, "Command tag %d failed due to TFE\n", tag); } + /* Clear the active flag */ + atomic_set(&port->commands[tag].active, 0); + /* Upper layer callback */ - if (likely(command->async_callback)) - command->async_callback(command->async_data, cb_status); + func = cmd->async_callback; + if (likely(func && cmpxchg(&cmd->async_callback, func, 0) == func)) { - command->async_callback = NULL; - command->comp_func = NULL; + /* Unmap the DMA scatter list entries */ + dma_unmap_sg(&dd->pdev->dev, + cmd->sg, + cmd->scatter_ents, + cmd->direction); - /* Unmap the DMA scatter list entries */ - dma_unmap_sg(&dd->pdev->dev, - command->sg, - command->scatter_ents, - command->direction); + func(cmd->async_data, cb_status); + unaligned = cmd->unaligned; - /* Clear the allocated and active bits for the command */ - atomic_set(&port->commands[tag].active, 0); - release_slot(port, tag); + /* Clear the allocated bit for the command */ + release_slot(port, tag); - up(&port->cmd_slot); + if (unlikely(unaligned)) + up(&port->cmd_slot_unal); + else + up(&port->cmd_slot); + } } /* @@ -639,11 +646,12 @@ { struct mtip_port *port = (struct mtip_port *) data; struct host_to_dev_fis *fis; - struct mtip_cmd *command; - int tag, cmdto_cnt = 0; + struct mtip_cmd *cmd; + int unaligned, tag, cmdto_cnt = 0; unsigned int bit, group; unsigned int num_command_slots; unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; + void (*func)(void *, int); if (unlikely(!port)) return; @@ -673,8 +681,8 @@ group = tag >> 5; bit = tag & 0x1F; - command = &port->commands[tag]; - fis = (struct host_to_dev_fis *) command->command; + cmd = &port->commands[tag]; + fis = (struct host_to_dev_fis *) cmd->command; set_bit(tag, tagaccum); cmdto_cnt++; @@ -688,27 +696,30 @@ */ writel(1 << bit, port->completed[group]); - /* Call the async completion callback. */ - if (likely(command->async_callback)) - command->async_callback(command->async_data, - -EIO); - command->async_callback = NULL; - command->comp_func = NULL; - - /* Unmap the DMA scatter list entries */ - dma_unmap_sg(&port->dd->pdev->dev, - command->sg, - command->scatter_ents, - command->direction); - - /* - * Clear the allocated bit and active tag for the - * command. - */ + /* Clear the active flag for the command */ atomic_set(&port->commands[tag].active, 0); - release_slot(port, tag); - up(&port->cmd_slot); + func = cmd->async_callback; + if (func && + cmpxchg(&cmd->async_callback, func, 0) == func) { + + /* Unmap the DMA scatter list entries */ + dma_unmap_sg(&port->dd->pdev->dev, + cmd->sg, + cmd->scatter_ents, + cmd->direction); + + func(cmd->async_data, -EIO); + unaligned = cmd->unaligned; + + /* Clear the allocated bit for the command. */ + release_slot(port, tag); + + if (unaligned) + up(&port->cmd_slot_unal); + else + up(&port->cmd_slot); + } } } @@ -1496,6 +1507,37 @@ be16_to_cpus(&buf[i]); } +static void mtip_set_timeout(struct driver_data *dd, + struct host_to_dev_fis *fis, + unsigned int *timeout, u8 erasemode) +{ + switch (fis->command) { + case ATA_CMD_DOWNLOAD_MICRO: + *timeout = 120000; /* 2 minutes */ + break; + case ATA_CMD_SEC_ERASE_UNIT: + case 0xFC: + if (erasemode) + *timeout = ((*(dd->port->identify + 90) * 2) * 60000); + else + *timeout = ((*(dd->port->identify + 89) * 2) * 60000); + break; + case ATA_CMD_STANDBYNOW1: + *timeout = 120000; /* 2 minutes */ + break; + case 0xF7: + case 0xFA: + *timeout = 60000; /* 60 seconds */ + break; + case ATA_CMD_SMART: + *timeout = 15000; /* 15 seconds */ + break; + default: + *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; + break; + } +} + /* * Request the device identity information. * @@ -1605,6 +1647,7 @@ int rv; struct host_to_dev_fis fis; unsigned long start; + unsigned int timeout; /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); @@ -1612,6 +1655,8 @@ fis.opts = 1 << 7; fis.command = ATA_CMD_STANDBYNOW1; + mtip_set_timeout(port->dd, &fis, &timeout, 0); + start = jiffies; rv = mtip_exec_internal_command(port, &fis, @@ -1620,7 +1665,7 @@ 0, 0, GFP_ATOMIC, - 15000); + timeout); dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", jiffies_to_msecs(jiffies - start)); if (rv) @@ -2159,36 +2204,6 @@ } return rv; } -static void mtip_set_timeout(struct driver_data *dd, - struct host_to_dev_fis *fis, - unsigned int *timeout, u8 erasemode) -{ - switch (fis->command) { - case ATA_CMD_DOWNLOAD_MICRO: - *timeout = 120000; /* 2 minutes */ - break; - case ATA_CMD_SEC_ERASE_UNIT: - case 0xFC: - if (erasemode) - *timeout = ((*(dd->port->identify + 90) * 2) * 60000); - else - *timeout = ((*(dd->port->identify + 89) * 2) * 60000); - break; - case ATA_CMD_STANDBYNOW1: - *timeout = 120000; /* 2 minutes */ - break; - case 0xF7: - case 0xFA: - *timeout = 60000; /* 60 seconds */ - break; - case ATA_CMD_SMART: - *timeout = 15000; /* 15 seconds */ - break; - default: - *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; - break; - } -} /* * Executes a taskfile @@ -4145,6 +4160,7 @@ blk_queue_max_hw_sectors(dd->queue, 0xffff); blk_queue_max_segment_size(dd->queue, 0x400000); blk_queue_io_min(dd->queue, 4096); + blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask); /* * write back cache is not supported in the device. FUA depends on @@ -4399,6 +4415,57 @@ static DEFINE_HANDLER(6); static DEFINE_HANDLER(7); +static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev) +{ + int pos; + unsigned short pcie_dev_ctrl; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(pdev, + pos + PCI_EXP_DEVCTL, + &pcie_dev_ctrl); + if (pcie_dev_ctrl & (1 << 11) || + pcie_dev_ctrl & (1 << 4)) { + dev_info(&dd->pdev->dev, + "Disabling ERO/No-Snoop on bridge device %04x:%04x\n", + pdev->vendor, pdev->device); + pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN | + PCI_EXP_DEVCTL_RELAX_EN); + pci_write_config_word(pdev, + pos + PCI_EXP_DEVCTL, + pcie_dev_ctrl); + } + } +} + +static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev) +{ + /* + * This workaround is specific to AMD/ATI chipset with a PCI upstream + * device with device id 0x5aXX + */ + if (pdev->bus && pdev->bus->self) { + if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI && + ((pdev->bus->self->device & 0xff00) == 0x5a00)) { + mtip_disable_link_opts(dd, pdev->bus->self); + } else { + /* Check further up the topology */ + struct pci_dev *parent_dev = pdev->bus->self; + if (parent_dev->bus && + parent_dev->bus->parent && + parent_dev->bus->parent->self && + parent_dev->bus->parent->self->vendor == + PCI_VENDOR_ID_ATI && + (parent_dev->bus->parent->self->device & + 0xff00) == 0x5a00) { + mtip_disable_link_opts(dd, + parent_dev->bus->parent->self); + } + } + } +} + /* * Called for each supported PCI device detected. * @@ -4550,6 +4617,8 @@ goto block_initialize_err; } + mtip_fix_ero_nosnoop(dd, pdev); + /* Initialize the block layer. */ rv = mtip_block_initialize(dd); if (rv < 0) { @@ -4853,13 +4922,13 @@ */ static void __exit mtip_exit(void) { - debugfs_remove_recursive(dfs_parent); - /* Release the allocated major block device number. */ unregister_blkdev(mtip_major, MTIP_DRV_NAME); /* Unregister the PCI driver. */ pci_unregister_driver(&mtip_pci_driver); + + debugfs_remove_recursive(dfs_parent); } MODULE_AUTHOR("Micron Technology, Inc"); --- linux-3.13.0.orig/drivers/block/nbd.c +++ linux-3.13.0/drivers/block/nbd.c @@ -57,7 +57,7 @@ static unsigned int nbds_max = 16; static struct nbd_device *nbd_dev; -static int max_part; +static int max_part = 15; /* * Use just one lock (or at most 1 per NIC). Two arguments for this: @@ -814,10 +814,6 @@ return -EINVAL; } - nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); - if (!nbd_dev) - return -ENOMEM; - part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); @@ -839,6 +835,10 @@ if (nbds_max > 1UL << (MINORBITS - part_shift)) return -EINVAL; + nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); + if (!nbd_dev) + return -ENOMEM; + for (i = 0; i < nbds_max; i++) { struct gendisk *disk = alloc_disk(1 << part_shift); if (!disk) --- linux-3.13.0.orig/drivers/block/nvme-core.c +++ linux-3.13.0/drivers/block/nvme-core.c @@ -46,7 +46,6 @@ #define NVME_Q_DEPTH 1024 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) -#define NVME_MINORS 64 #define ADMIN_TIMEOUT (60 * HZ) static int nvme_major; @@ -58,6 +57,17 @@ static DEFINE_SPINLOCK(dev_list_lock); static LIST_HEAD(dev_list); static struct task_struct *nvme_thread; +static struct workqueue_struct *nvme_workq; + +static void nvme_reset_failed_dev(struct work_struct *ws); + +struct async_cmd_info { + struct kthread_work work; + struct kthread_worker *worker; + u32 result; + int status; + void *ctx; +}; /* * An NVM Express queue. Each device has at least two (one for admin @@ -66,6 +76,7 @@ struct nvme_queue { struct device *q_dmadev; struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ spinlock_t q_lock; struct nvme_command *sq_cmds; volatile struct nvme_completion *cqes; @@ -80,9 +91,11 @@ u16 sq_head; u16 sq_tail; u16 cq_head; + u16 qid; u8 cq_phase; u8 cqe_seen; u8 q_suspended; + struct async_cmd_info cmdinfo; unsigned long cmdid_data[]; }; @@ -97,6 +110,7 @@ BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(sizeof(struct nvme_features) != 64); BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); + BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); @@ -111,6 +125,7 @@ nvme_completion_fn fn; void *ctx; unsigned long timeout; + int aborted; }; static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq) @@ -154,6 +169,7 @@ info[cmdid].fn = handler; info[cmdid].ctx = ctx; info[cmdid].timeout = jiffies + timeout; + info[cmdid].aborted = 0; return cmdid; } @@ -172,6 +188,7 @@ #define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE) #define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE) #define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE) +#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE) static void special_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) @@ -180,6 +197,10 @@ return; if (ctx == CMD_CTX_FLUSH) return; + if (ctx == CMD_CTX_ABORT) { + ++dev->abort_limit; + return; + } if (ctx == CMD_CTX_COMPLETED) { dev_warn(&dev->pci_dev->dev, "completed id %d twice on queue %d\n", @@ -196,6 +217,15 @@ dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx); } +static void async_completion(struct nvme_dev *dev, void *ctx, + struct nvme_completion *cqe) +{ + struct async_cmd_info *cmdinfo = ctx; + cmdinfo->result = le32_to_cpup(&cqe->result); + cmdinfo->status = le16_to_cpup(&cqe->status) >> 1; + queue_kthread_work(cmdinfo->worker, &cmdinfo->work); +} + /* * Called with local interrupts disabled and the q_lock held. May not sleep. */ @@ -775,7 +805,7 @@ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) return 0; - writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride)); + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); nvmeq->cq_head = head; nvmeq->cq_phase = phase; @@ -886,12 +916,34 @@ return cmdinfo.status; } +static int nvme_submit_async_cmd(struct nvme_queue *nvmeq, + struct nvme_command *cmd, + struct async_cmd_info *cmdinfo, unsigned timeout) +{ + int cmdid; + + cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout); + if (cmdid < 0) + return cmdid; + cmdinfo->status = -EINTR; + cmd->common.command_id = cmdid; + nvme_submit_cmd(nvmeq, cmd); + return 0; +} + int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, u32 *result) { return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); } +static int nvme_submit_admin_cmd_async(struct nvme_dev *dev, + struct nvme_command *cmd, struct async_cmd_info *cmdinfo) +{ + return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo, + ADMIN_TIMEOUT); +} + static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { int status; @@ -1002,6 +1054,56 @@ } /** + * nvme_abort_cmd - Attempt aborting a command + * @cmdid: Command id of a timed out IO + * @queue: The queue with timed out IO + * + * Schedule controller reset if the command was already aborted once before and + * still hasn't been returned to the driver, or if this is the admin queue. + */ +static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq) +{ + int a_cmdid; + struct nvme_command cmd; + struct nvme_dev *dev = nvmeq->dev; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + + if (!nvmeq->qid || info[cmdid].aborted) { + if (work_busy(&dev->reset_work)) + return; + list_del_init(&dev->node); + dev_warn(&dev->pci_dev->dev, + "I/O %d QID %d timeout, reset controller\n", cmdid, + nvmeq->qid); + PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev); + queue_work(nvme_workq, &dev->reset_work); + return; + } + + if (!dev->abort_limit) + return; + + a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion, + ADMIN_TIMEOUT); + if (a_cmdid < 0) + return; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abort.opcode = nvme_admin_abort_cmd; + cmd.abort.cid = cmdid; + cmd.abort.sqid = cpu_to_le16(nvmeq->qid); + cmd.abort.command_id = a_cmdid; + + --dev->abort_limit; + info[cmdid].aborted = 1; + info[cmdid].timeout = jiffies + ADMIN_TIMEOUT; + + dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid, + nvmeq->qid); + nvme_submit_cmd(dev->queues[0], &cmd); +} + +/** * nvme_cancel_ios - Cancel outstanding I/Os * @queue: The queue to cancel I/Os on * @timeout: True to only cancel I/Os which have timed out @@ -1024,7 +1126,12 @@ continue; if (info[cmdid].ctx == CMD_CTX_CANCELLED) continue; - dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); + if (timeout && nvmeq->dev->initialized) { + nvme_abort_cmd(cmdid, nvmeq); + continue; + } + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid, + nvmeq->qid); ctx = cancel_cmdid(nvmeq, cmdid, &fn); fn(nvmeq->dev, ctx, &cqe); } @@ -1046,26 +1153,31 @@ kfree(nvmeq); } -static void nvme_free_queues(struct nvme_dev *dev) +static void nvme_free_queues(struct nvme_dev *dev, int lowest) { int i; - for (i = dev->queue_count - 1; i >= 0; i--) { + for (i = dev->queue_count - 1; i >= lowest; i--) { nvme_free_queue(dev->queues[i]); dev->queue_count--; dev->queues[i] = NULL; } } -static void nvme_disable_queue(struct nvme_dev *dev, int qid) +/** + * nvme_suspend_queue - put queue into suspended state + * @nvmeq - queue to suspend + * + * Returns 1 if already suspended, 0 otherwise. + */ +static int nvme_suspend_queue(struct nvme_queue *nvmeq) { - struct nvme_queue *nvmeq = dev->queues[qid]; - int vector = dev->entry[nvmeq->cq_vector].vector; + int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; spin_lock_irq(&nvmeq->q_lock); if (nvmeq->q_suspended) { spin_unlock_irq(&nvmeq->q_lock); - return; + return 1; } nvmeq->q_suspended = 1; spin_unlock_irq(&nvmeq->q_lock); @@ -1073,18 +1185,35 @@ irq_set_affinity_hint(vector, NULL); free_irq(vector, nvmeq); - /* Don't tell the adapter to delete the admin queue */ - if (qid) { - adapter_delete_sq(dev, qid); - adapter_delete_cq(dev, qid); - } + return 0; +} +static void nvme_clear_queue(struct nvme_queue *nvmeq) +{ spin_lock_irq(&nvmeq->q_lock); nvme_process_cq(nvmeq); nvme_cancel_ios(nvmeq, false); spin_unlock_irq(&nvmeq->q_lock); } +static void nvme_disable_queue(struct nvme_dev *dev, int qid) +{ + struct nvme_queue *nvmeq = dev->queues[qid]; + + if (!nvmeq) + return; + if (nvme_suspend_queue(nvmeq)) + return; + + /* Don't tell the adapter to delete the admin queue. + * Don't tell a removed adapter to delete IO queues. */ + if (qid && readl(&dev->bar->csts) != -1) { + adapter_delete_sq(dev, qid); + adapter_delete_cq(dev, qid); + } + nvme_clear_queue(nvmeq); +} + static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth, int vector) { @@ -1107,15 +1236,18 @@ nvmeq->q_dmadev = dmadev; nvmeq->dev = dev; + snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", + dev->instance, qid); spin_lock_init(&nvmeq->q_lock); nvmeq->cq_head = 0; nvmeq->cq_phase = 1; init_waitqueue_head(&nvmeq->sq_full); init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread); bio_list_init(&nvmeq->sq_cong); - nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; + nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->q_depth = depth; nvmeq->cq_vector = vector; + nvmeq->qid = qid; nvmeq->q_suspended = 1; dev->queue_count++; @@ -1134,11 +1266,10 @@ { if (use_threaded_interrupts) return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, - nvme_irq_check, nvme_irq, - IRQF_DISABLED | IRQF_SHARED, + nvme_irq_check, nvme_irq, IRQF_SHARED, name, nvmeq); return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, - IRQF_DISABLED | IRQF_SHARED, name, nvmeq); + IRQF_SHARED, name, nvmeq); } static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) @@ -1149,7 +1280,7 @@ nvmeq->sq_tail = 0; nvmeq->cq_head = 0; nvmeq->cq_phase = 1; - nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)]; + nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; memset(nvmeq->cmdid_data, 0, extra); memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); nvme_cancel_ios(nvmeq, false); @@ -1169,13 +1300,13 @@ if (result < 0) goto release_cq; - result = queue_request_irq(dev, nvmeq, "nvme"); + result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result < 0) goto release_sq; - spin_lock(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->q_lock); nvme_init_queue(nvmeq, qid); - spin_unlock(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->q_lock); return result; @@ -1287,13 +1418,13 @@ if (result) return result; - result = queue_request_irq(dev, nvmeq, "nvme admin"); + result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result) return result; - spin_lock(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->q_lock); nvme_init_queue(nvmeq, 0); - spin_unlock(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->q_lock); return result; } @@ -1569,10 +1700,47 @@ } } +#ifdef CONFIG_COMPAT +static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct nvme_ns *ns = bdev->bd_disk->private_data; + + switch (cmd) { + case SG_IO: + return nvme_sg_io32(ns, arg); + } + return nvme_ioctl(bdev, mode, cmd, arg); +} +#else +#define nvme_compat_ioctl NULL +#endif + +static int nvme_open(struct block_device *bdev, fmode_t mode) +{ + struct nvme_ns *ns = bdev->bd_disk->private_data; + struct nvme_dev *dev = ns->dev; + + kref_get(&dev->kref); + return 0; +} + +static void nvme_free_dev(struct kref *kref); + +static void nvme_release(struct gendisk *disk, fmode_t mode) +{ + struct nvme_ns *ns = disk->private_data; + struct nvme_dev *dev = ns->dev; + + kref_put(&dev->kref, nvme_free_dev); +} + static const struct block_device_operations nvme_fops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, - .compat_ioctl = nvme_ioctl, + .compat_ioctl = nvme_compat_ioctl, + .open = nvme_open, + .release = nvme_release, }; static void nvme_resubmit_bios(struct nvme_queue *nvmeq) @@ -1596,13 +1764,25 @@ static int nvme_kthread(void *data) { - struct nvme_dev *dev; + struct nvme_dev *dev, *next; while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); spin_lock(&dev_list_lock); - list_for_each_entry(dev, &dev_list, node) { + list_for_each_entry_safe(dev, next, &dev_list, node) { int i; + if (readl(&dev->bar->csts) & NVME_CSTS_CFS && + dev->initialized) { + if (work_busy(&dev->reset_work)) + continue; + list_del_init(&dev->node); + dev_warn(&dev->pci_dev->dev, + "Failed status, reset controller\n"); + PREPARE_WORK(&dev->reset_work, + nvme_reset_failed_dev); + queue_work(nvme_workq, &dev->reset_work); + continue; + } for (i = 0; i < dev->queue_count; i++) { struct nvme_queue *nvmeq = dev->queues[i]; if (!nvmeq) @@ -1623,33 +1803,6 @@ return 0; } -static DEFINE_IDA(nvme_index_ida); - -static int nvme_get_ns_idx(void) -{ - int index, error; - - do { - if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL)) - return -1; - - spin_lock(&dev_list_lock); - error = ida_get_new(&nvme_index_ida, &index); - spin_unlock(&dev_list_lock); - } while (error == -EAGAIN); - - if (error) - index = -1; - return index; -} - -static void nvme_put_ns_idx(int index) -{ - spin_lock(&dev_list_lock); - ida_remove(&nvme_index_ida, index); - spin_unlock(&dev_list_lock); -} - static void nvme_config_discard(struct nvme_ns *ns) { u32 logical_block_size = queue_logical_block_size(ns->queue); @@ -1683,7 +1836,7 @@ ns->dev = dev; ns->queue->queuedata = ns; - disk = alloc_disk(NVME_MINORS); + disk = alloc_disk(0); if (!disk) goto out_free_queue; ns->ns_id = nsid; @@ -1696,12 +1849,12 @@ blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); disk->major = nvme_major; - disk->minors = NVME_MINORS; - disk->first_minor = NVME_MINORS * nvme_get_ns_idx(); + disk->first_minor = 0; disk->fops = &nvme_fops; disk->private_data = ns; disk->queue = ns->queue; disk->driverfs_dev = &dev->pci_dev->dev; + disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); @@ -1717,15 +1870,6 @@ return NULL; } -static void nvme_ns_free(struct nvme_ns *ns) -{ - int index = ns->disk->first_minor / NVME_MINORS; - put_disk(ns->disk); - nvme_put_ns_idx(index); - blk_cleanup_queue(ns->queue); - kfree(ns); -} - static int set_queue_count(struct nvme_dev *dev, int count) { int status; @@ -1741,11 +1885,12 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) { - return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); + return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride); } static int nvme_setup_io_queues(struct nvme_dev *dev) { + struct nvme_queue *adminq = dev->queues[0]; struct pci_dev *pdev = dev->pci_dev; int result, cpu, i, vecs, nr_io_queues, size, q_depth; @@ -1772,7 +1917,7 @@ } /* Deregister the admin queue's interrupt */ - free_irq(dev->entry[0].vector, dev->queues[0]); + free_irq(dev->entry[0].vector, adminq); vecs = nr_io_queues; for (i = 0; i < vecs; i++) @@ -1810,9 +1955,9 @@ */ nr_io_queues = vecs; - result = queue_request_irq(dev, dev->queues[0], "nvme admin"); + result = queue_request_irq(dev, adminq, adminq->irqname); if (result) { - dev->queues[0]->q_suspended = 1; + adminq->q_suspended = 1; goto free_queues; } @@ -1821,9 +1966,9 @@ for (i = dev->queue_count - 1; i > nr_io_queues; i--) { struct nvme_queue *nvmeq = dev->queues[i]; - spin_lock(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->q_lock); nvme_cancel_ios(nvmeq, false); - spin_unlock(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->q_lock); nvme_free_queue(nvmeq); dev->queue_count--; @@ -1864,7 +2009,7 @@ return 0; free_queues: - nvme_free_queues(dev); + nvme_free_queues(dev, 1); return result; } @@ -1876,6 +2021,7 @@ */ static int nvme_dev_add(struct nvme_dev *dev) { + struct pci_dev *pdev = dev->pci_dev; int res; unsigned nn, i; struct nvme_ns *ns; @@ -1885,8 +2031,7 @@ dma_addr_t dma_addr; int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; - mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, - GFP_KERNEL); + mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL); if (!mem) return -ENOMEM; @@ -1899,13 +2044,14 @@ ctrl = mem; nn = le32_to_cpup(&ctrl->nn); dev->oncs = le16_to_cpup(&ctrl->oncs); + dev->abort_limit = ctrl->acl + 1; memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); if (ctrl->mdts) dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); - if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) && - (dev->pci_dev->device == 0x0953) && ctrl->vs[3]) + if ((pdev->vendor == PCI_VENDOR_ID_INTEL) && + (pdev->device == 0x0953) && ctrl->vs[3]) dev->stripe_size = 1 << (ctrl->vs[3] + shift); id_ns = mem; @@ -1953,16 +2099,21 @@ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) goto disable; - pci_set_drvdata(pdev, dev); dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); if (!dev->bar) goto disable; - - dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap)); + if (readl(&dev->bar->csts) == -1) { + result = -ENODEV; + goto unmap; + } + dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap)); dev->dbs = ((void __iomem *)dev->bar) + 4096; return 0; + unmap: + iounmap(dev->bar); + dev->bar = NULL; disable: pci_release_regions(pdev); disable_pci: @@ -1980,37 +2131,183 @@ if (dev->bar) { iounmap(dev->bar); dev->bar = NULL; + pci_release_regions(dev->pci_dev); } - pci_release_regions(dev->pci_dev); if (pci_is_enabled(dev->pci_dev)) pci_disable_device(dev->pci_dev); } +struct nvme_delq_ctx { + struct task_struct *waiter; + struct kthread_worker *worker; + atomic_t refcount; +}; + +static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) +{ + dq->waiter = current; + mb(); + + for (;;) { + set_current_state(TASK_KILLABLE); + if (!atomic_read(&dq->refcount)) + break; + if (!schedule_timeout(ADMIN_TIMEOUT) || + fatal_signal_pending(current)) { + set_current_state(TASK_RUNNING); + + nvme_disable_ctrl(dev, readq(&dev->bar->cap)); + nvme_disable_queue(dev, 0); + + send_sig(SIGKILL, dq->worker->task, 1); + flush_kthread_worker(dq->worker); + return; + } + } + set_current_state(TASK_RUNNING); +} + +static void nvme_put_dq(struct nvme_delq_ctx *dq) +{ + atomic_dec(&dq->refcount); + if (dq->waiter) + wake_up_process(dq->waiter); +} + +static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq) +{ + atomic_inc(&dq->refcount); + return dq; +} + +static void nvme_del_queue_end(struct nvme_queue *nvmeq) +{ + struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; + + nvme_clear_queue(nvmeq); + nvme_put_dq(dq); +} + +static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, + kthread_work_func_t fn) +{ + struct nvme_command c; + + memset(&c, 0, sizeof(c)); + c.delete_queue.opcode = opcode; + c.delete_queue.qid = cpu_to_le16(nvmeq->qid); + + init_kthread_work(&nvmeq->cmdinfo.work, fn); + return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo); +} + +static void nvme_del_cq_work_handler(struct kthread_work *work) +{ + struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, + cmdinfo.work); + nvme_del_queue_end(nvmeq); +} + +static int nvme_delete_cq(struct nvme_queue *nvmeq) +{ + return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq, + nvme_del_cq_work_handler); +} + +static void nvme_del_sq_work_handler(struct kthread_work *work) +{ + struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, + cmdinfo.work); + int status = nvmeq->cmdinfo.status; + + if (!status) + status = nvme_delete_cq(nvmeq); + if (status) + nvme_del_queue_end(nvmeq); +} + +static int nvme_delete_sq(struct nvme_queue *nvmeq) +{ + return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq, + nvme_del_sq_work_handler); +} + +static void nvme_del_queue_start(struct kthread_work *work) +{ + struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, + cmdinfo.work); + allow_signal(SIGKILL); + if (nvme_delete_sq(nvmeq)) + nvme_del_queue_end(nvmeq); +} + +static void nvme_disable_io_queues(struct nvme_dev *dev) +{ + int i; + DEFINE_KTHREAD_WORKER_ONSTACK(worker); + struct nvme_delq_ctx dq; + struct task_struct *kworker_task = kthread_run(kthread_worker_fn, + &worker, "nvme%d", dev->instance); + + if (IS_ERR(kworker_task)) { + dev_err(&dev->pci_dev->dev, + "Failed to create queue del task\n"); + for (i = dev->queue_count - 1; i > 0; i--) + nvme_disable_queue(dev, i); + return; + } + + dq.waiter = NULL; + atomic_set(&dq.refcount, 0); + dq.worker = &worker; + for (i = dev->queue_count - 1; i > 0; i--) { + struct nvme_queue *nvmeq = dev->queues[i]; + + if (nvme_suspend_queue(nvmeq)) + continue; + nvmeq->cmdinfo.ctx = nvme_get_dq(&dq); + nvmeq->cmdinfo.worker = dq.worker; + init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start); + queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work); + } + nvme_wait_dq(&dq, dev); + kthread_stop(kworker_task); +} + static void nvme_dev_shutdown(struct nvme_dev *dev) { int i; - for (i = dev->queue_count - 1; i >= 0; i--) - nvme_disable_queue(dev, i); + dev->initialized = 0; spin_lock(&dev_list_lock); list_del_init(&dev->node); spin_unlock(&dev_list_lock); - if (dev->bar) + if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) { + for (i = dev->queue_count - 1; i >= 0; i--) { + struct nvme_queue *nvmeq = dev->queues[i]; + nvme_suspend_queue(nvmeq); + nvme_clear_queue(nvmeq); + } + } else { + nvme_disable_io_queues(dev); nvme_shutdown_ctrl(dev); + nvme_disable_queue(dev, 0); + } nvme_dev_unmap(dev); } static void nvme_dev_remove(struct nvme_dev *dev) { - struct nvme_ns *ns, *next; + struct nvme_ns *ns; - list_for_each_entry_safe(ns, next, &dev->namespaces, list) { - list_del(&ns->list); - del_gendisk(ns->disk); - nvme_ns_free(ns); + list_for_each_entry(ns, &dev->namespaces, list) { + if (ns->disk->flags & GENHD_FL_UP) + del_gendisk(ns->disk); + if (!blk_queue_dying(ns->queue)) + blk_cleanup_queue(ns->queue); } } @@ -2067,14 +2364,22 @@ spin_unlock(&dev_list_lock); } +static void nvme_free_namespaces(struct nvme_dev *dev) +{ + struct nvme_ns *ns, *next; + + list_for_each_entry_safe(ns, next, &dev->namespaces, list) { + list_del(&ns->list); + put_disk(ns->disk); + kfree(ns); + } +} + static void nvme_free_dev(struct kref *kref) { struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); - nvme_dev_remove(dev); - nvme_dev_shutdown(dev); - nvme_free_queues(dev); - nvme_release_instance(dev); - nvme_release_prp_pools(dev); + + nvme_free_namespaces(dev); kfree(dev->queues); kfree(dev->entry); kfree(dev); @@ -2138,6 +2443,7 @@ return result; disable: + nvme_disable_queue(dev, 0); spin_lock(&dev_list_lock); list_del_init(&dev->node); spin_unlock(&dev_list_lock); @@ -2146,6 +2452,71 @@ return result; } +static int nvme_remove_dead_ctrl(void *arg) +{ + struct nvme_dev *dev = (struct nvme_dev *)arg; + struct pci_dev *pdev = dev->pci_dev; + + if (pci_get_drvdata(pdev)) + pci_stop_and_remove_bus_device(pdev); + kref_put(&dev->kref, nvme_free_dev); + return 0; +} + +static void nvme_remove_disks(struct work_struct *ws) +{ + int i; + struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); + + nvme_dev_remove(dev); + spin_lock(&dev_list_lock); + for (i = dev->queue_count - 1; i > 0; i--) { + BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended); + nvme_free_queue(dev->queues[i]); + dev->queue_count--; + dev->queues[i] = NULL; + } + spin_unlock(&dev_list_lock); +} + +static int nvme_dev_resume(struct nvme_dev *dev) +{ + int ret; + + ret = nvme_dev_start(dev); + if (ret && ret != -EBUSY) + return ret; + if (ret == -EBUSY) { + spin_lock(&dev_list_lock); + PREPARE_WORK(&dev->reset_work, nvme_remove_disks); + queue_work(nvme_workq, &dev->reset_work); + spin_unlock(&dev_list_lock); + } + dev->initialized = 1; + return 0; +} + +static void nvme_dev_reset(struct nvme_dev *dev) +{ + nvme_dev_shutdown(dev); + if (nvme_dev_resume(dev)) { + dev_err(&dev->pci_dev->dev, "Device failed to resume\n"); + kref_get(&dev->kref); + if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d", + dev->instance))) { + dev_err(&dev->pci_dev->dev, + "Failed to start controller remove task\n"); + kref_put(&dev->kref, nvme_free_dev); + } + } +} + +static void nvme_reset_failed_dev(struct work_struct *ws) +{ + struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); + nvme_dev_reset(dev); +} + static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int result = -ENOMEM; @@ -2164,8 +2535,9 @@ goto free; INIT_LIST_HEAD(&dev->namespaces); + INIT_WORK(&dev->reset_work, nvme_reset_failed_dev); dev->pci_dev = pdev; - + pci_set_drvdata(pdev, dev); result = nvme_set_instance(dev); if (result) goto free; @@ -2174,6 +2546,7 @@ if (result) goto release; + kref_init(&dev->kref); result = nvme_dev_start(dev); if (result) { if (result == -EBUSY) @@ -2195,15 +2568,16 @@ if (result) goto remove; - kref_init(&dev->kref); + dev->initialized = 1; return 0; remove: nvme_dev_remove(dev); + nvme_free_namespaces(dev); shutdown: nvme_dev_shutdown(dev); release_pools: - nvme_free_queues(dev); + nvme_free_queues(dev, 0); nvme_release_prp_pools(dev); release: nvme_release_instance(dev); @@ -2214,10 +2588,28 @@ return result; } +static void nvme_shutdown(struct pci_dev *pdev) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + nvme_dev_shutdown(dev); +} + static void nvme_remove(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); + + spin_lock(&dev_list_lock); + list_del_init(&dev->node); + spin_unlock(&dev_list_lock); + + pci_set_drvdata(pdev, NULL); + flush_work(&dev->reset_work); misc_deregister(&dev->miscdev); + nvme_dev_remove(dev); + nvme_dev_shutdown(dev); + nvme_free_queues(dev, 0); + nvme_release_instance(dev); + nvme_release_prp_pools(dev); kref_put(&dev->kref, nvme_free_dev); } @@ -2241,13 +2633,12 @@ { struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); - int ret; - ret = nvme_dev_start(ndev); - /* XXX: should remove gendisks if resume fails */ - if (ret) - nvme_free_queues(ndev); - return ret; + if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) { + PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev); + queue_work(nvme_workq, &ndev->reset_work); + } + return 0; } static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume); @@ -2274,6 +2665,7 @@ .id_table = nvme_id_table, .probe = nvme_probe, .remove = nvme_remove, + .shutdown = nvme_shutdown, .driver = { .pm = &nvme_dev_pm_ops, }, @@ -2288,9 +2680,14 @@ if (IS_ERR(nvme_thread)) return PTR_ERR(nvme_thread); + result = -ENOMEM; + nvme_workq = create_singlethread_workqueue("nvme"); + if (!nvme_workq) + goto kill_kthread; + result = register_blkdev(nvme_major, "nvme"); if (result < 0) - goto kill_kthread; + goto kill_workq; else if (result > 0) nvme_major = result; @@ -2301,6 +2698,8 @@ unregister_blkdev: unregister_blkdev(nvme_major, "nvme"); + kill_workq: + destroy_workqueue(nvme_workq); kill_kthread: kthread_stop(nvme_thread); return result; @@ -2310,6 +2709,7 @@ { pci_unregister_driver(&nvme_driver); unregister_blkdev(nvme_major, "nvme"); + destroy_workqueue(nvme_workq); kthread_stop(nvme_thread); } --- linux-3.13.0.orig/drivers/block/nvme-scsi.c +++ linux-3.13.0/drivers/block/nvme-scsi.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -3038,6 +3039,152 @@ return retcode; } +#ifdef CONFIG_COMPAT +typedef struct sg_io_hdr32 { + compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */ + compat_int_t dxfer_direction; /* [i] data transfer direction */ + unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */ + unsigned char mx_sb_len; /* [i] max length to write to sbp */ + unsigned short iovec_count; /* [i] 0 implies no scatter gather */ + compat_uint_t dxfer_len; /* [i] byte count of data transfer */ + compat_uint_t dxferp; /* [i], [*io] points to data transfer memory + or scatter gather list */ + compat_uptr_t cmdp; /* [i], [*i] points to command to perform */ + compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */ + compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */ + compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */ + compat_int_t pack_id; /* [i->o] unused internally (normally) */ + compat_uptr_t usr_ptr; /* [i->o] unused internally */ + unsigned char status; /* [o] scsi status */ + unsigned char masked_status; /* [o] shifted, masked scsi status */ + unsigned char msg_status; /* [o] messaging level data (optional) */ + unsigned char sb_len_wr; /* [o] byte count actually written to sbp */ + unsigned short host_status; /* [o] errors from host adapter */ + unsigned short driver_status; /* [o] errors from software driver */ + compat_int_t resid; /* [o] dxfer_len - actual_transferred */ + compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */ + compat_uint_t info; /* [o] auxiliary information */ +} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */ + +typedef struct sg_iovec32 { + compat_uint_t iov_base; + compat_uint_t iov_len; +} sg_iovec32_t; + +static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count) +{ + sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1); + sg_iovec32_t __user *iov32 = dxferp; + int i; + + for (i = 0; i < iovec_count; i++) { + u32 base, len; + + if (get_user(base, &iov32[i].iov_base) || + get_user(len, &iov32[i].iov_len) || + put_user(compat_ptr(base), &iov[i].iov_base) || + put_user(len, &iov[i].iov_len)) + return -EFAULT; + } + + if (put_user(iov, &sgio->dxferp)) + return -EFAULT; + return 0; +} + +int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg) +{ + sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg; + sg_io_hdr_t __user *sgio; + u16 iovec_count; + u32 data; + void __user *dxferp; + int err; + int interface_id; + + if (get_user(interface_id, &sgio32->interface_id)) + return -EFAULT; + if (interface_id != 'S') + return -EINVAL; + + if (get_user(iovec_count, &sgio32->iovec_count)) + return -EFAULT; + + { + void __user *top = compat_alloc_user_space(0); + void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) + + (iovec_count * sizeof(sg_iovec_t))); + if (new > top) + return -EINVAL; + + sgio = new; + } + + /* Ok, now construct. */ + if (copy_in_user(&sgio->interface_id, &sgio32->interface_id, + (2 * sizeof(int)) + + (2 * sizeof(unsigned char)) + + (1 * sizeof(unsigned short)) + + (1 * sizeof(unsigned int)))) + return -EFAULT; + + if (get_user(data, &sgio32->dxferp)) + return -EFAULT; + dxferp = compat_ptr(data); + if (iovec_count) { + if (sg_build_iovec(sgio, dxferp, iovec_count)) + return -EFAULT; + } else { + if (put_user(dxferp, &sgio->dxferp)) + return -EFAULT; + } + + { + unsigned char __user *cmdp; + unsigned char __user *sbp; + + if (get_user(data, &sgio32->cmdp)) + return -EFAULT; + cmdp = compat_ptr(data); + + if (get_user(data, &sgio32->sbp)) + return -EFAULT; + sbp = compat_ptr(data); + + if (put_user(cmdp, &sgio->cmdp) || + put_user(sbp, &sgio->sbp)) + return -EFAULT; + } + + if (copy_in_user(&sgio->timeout, &sgio32->timeout, + 3 * sizeof(int))) + return -EFAULT; + + if (get_user(data, &sgio32->usr_ptr)) + return -EFAULT; + if (put_user(compat_ptr(data), &sgio->usr_ptr)) + return -EFAULT; + + err = nvme_sg_io(ns, sgio); + if (err >= 0) { + void __user *datap; + + if (copy_in_user(&sgio32->pack_id, &sgio->pack_id, + sizeof(int)) || + get_user(datap, &sgio->usr_ptr) || + put_user((u32)(unsigned long)datap, + &sgio32->usr_ptr) || + copy_in_user(&sgio32->status, &sgio->status, + (4 * sizeof(unsigned char)) + + (2 * sizeof(unsigned short)) + + (3 * sizeof(int)))) + err = -EFAULT; + } + + return err; +} +#endif + int nvme_sg_get_version_num(int __user *ip) { return put_user(sg_version_num, ip); --- linux-3.13.0.orig/drivers/block/rbd.c +++ linux-3.13.0/drivers/block/rbd.c @@ -1379,6 +1379,14 @@ return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0; } +static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request) +{ + struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev; + + return obj_request->img_offset < + round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header)); +} + static void rbd_obj_request_get(struct rbd_obj_request *obj_request) { dout("%s: obj %p (was %d)\n", __func__, obj_request, @@ -1395,6 +1403,13 @@ kref_put(&obj_request->kref, rbd_obj_request_destroy); } +static void rbd_img_request_get(struct rbd_img_request *img_request) +{ + dout("%s: img %p (was %d)\n", __func__, img_request, + atomic_read(&img_request->kref.refcount)); + kref_get(&img_request->kref); +} + static bool img_request_child_test(struct rbd_img_request *img_request); static void rbd_parent_request_destroy(struct kref *kref); static void rbd_img_request_destroy(struct kref *kref); @@ -1930,32 +1945,26 @@ * If an image has a non-zero parent overlap, get a reference to its * parent. * - * We must get the reference before checking for the overlap to - * coordinate properly with zeroing the parent overlap in - * rbd_dev_v2_parent_info() when an image gets flattened. We - * drop it again if there is no overlap. - * * Returns true if the rbd device has a parent with a non-zero * overlap and a reference for it was successfully taken, or * false otherwise. */ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) { - int counter; + int counter = 0; if (!rbd_dev->parent_spec) return false; - counter = atomic_inc_return_safe(&rbd_dev->parent_ref); - if (counter > 0 && rbd_dev->parent_overlap) - return true; - - /* Image was flattened, but parent is not yet torn down */ + down_read(&rbd_dev->header_rwsem); + if (rbd_dev->parent_overlap) + counter = atomic_inc_return_safe(&rbd_dev->parent_ref); + up_read(&rbd_dev->header_rwsem); if (counter < 0) rbd_warn(rbd_dev, "parent reference overflow\n"); - return false; + return counter > 0; } /* @@ -2128,7 +2137,6 @@ rbd_assert(img_request->obj_request_count > 0); rbd_assert(which != BAD_WHICH); rbd_assert(which < img_request->obj_request_count); - rbd_assert(which >= img_request->next_completion); spin_lock_irq(&img_request->completion_lock); if (which != img_request->next_completion) @@ -2148,6 +2156,7 @@ img_request->next_completion = which; out: spin_unlock_irq(&img_request->completion_lock); + rbd_img_request_put(img_request); if (!more) rbd_img_request_complete(img_request); @@ -2244,6 +2253,7 @@ goto out_partial; obj_request->osd_req = osd_req; obj_request->callback = rbd_img_obj_callback; + rbd_img_request_get(img_request); osd_req_op_extent_init(osd_req, 0, opcode, offset, length, 0, 0); @@ -2666,7 +2676,7 @@ */ if (!img_request_write_test(img_request) || !img_request_layered_test(img_request) || - rbd_dev->parent_overlap <= obj_request->img_offset || + !obj_request_overlaps_parent(obj_request) || ((known = obj_request_known_test(obj_request)) && obj_request_exists_test(obj_request))) { @@ -3203,7 +3213,7 @@ page_count = (u32) calc_pages_for(offset, length); pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); if (IS_ERR(pages)) - ret = PTR_ERR(pages); + return PTR_ERR(pages); ret = -ENOMEM; obj_request = rbd_obj_request_create(object_name, offset, length, @@ -3877,7 +3887,6 @@ */ if (rbd_dev->parent_overlap) { rbd_dev->parent_overlap = 0; - smp_mb(); rbd_dev_parent_put(rbd_dev); pr_info("%s: clone image has been flattened\n", rbd_dev->disk->disk_name); @@ -3921,7 +3930,6 @@ * treat it specially. */ rbd_dev->parent_overlap = overlap; - smp_mb(); if (!overlap) { /* A null parent_spec indicates it's the initial probe */ @@ -4765,10 +4773,7 @@ { struct rbd_image_header *header; - /* Drop parent reference unless it's already been done (or none) */ - - if (rbd_dev->parent_overlap) - rbd_dev_parent_put(rbd_dev); + rbd_dev_parent_put(rbd_dev); /* Free dynamic fields from the header, then zero it out */ --- linux-3.13.0.orig/drivers/block/sunvdc.c +++ linux-3.13.0/drivers/block/sunvdc.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -22,8 +23,8 @@ #define DRV_MODULE_NAME "sunvdc" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.0" -#define DRV_MODULE_RELDATE "June 25, 2007" +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "February 13, 2013" static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; @@ -32,7 +33,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -#define VDC_TX_RING_SIZE 256 +#define VDC_TX_RING_SIZE 512 #define WAITING_FOR_LINK_UP 0x01 #define WAITING_FOR_TX_SPACE 0x02 @@ -65,11 +66,9 @@ u64 operations; u32 vdisk_size; u8 vdisk_type; + u8 vdisk_mtype; char disk_name[32]; - - struct vio_disk_geom geom; - struct vio_disk_vtoc label; }; static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) @@ -79,9 +78,16 @@ /* Ordered from largest major to lowest */ static struct vio_version vdc_versions[] = { + { .major = 1, .minor = 1 }, { .major = 1, .minor = 0 }, }; +static inline int vdc_version_supported(struct vdc_port *port, + u16 major, u16 minor) +{ + return port->vio.ver.major == major && port->vio.ver.minor >= minor; +} + #define VDCBLK_NAME "vdisk" static int vdc_major; #define PARTITION_SHIFT 3 @@ -94,18 +100,54 @@ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct gendisk *disk = bdev->bd_disk; - struct vdc_port *port = disk->private_data; + sector_t nsect = get_capacity(disk); + sector_t cylinders = nsect; - geo->heads = (u8) port->geom.num_hd; - geo->sectors = (u8) port->geom.num_sec; - geo->cylinders = port->geom.num_cyl; + geo->heads = 0xff; + geo->sectors = 0x3f; + sector_div(cylinders, geo->heads * geo->sectors); + geo->cylinders = cylinders; + if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect) + geo->cylinders = 0xffff; return 0; } +/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev + * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD. + * Needed to be able to install inside an ldom from an iso image. + */ +static int vdc_ioctl(struct block_device *bdev, fmode_t mode, + unsigned command, unsigned long argument) +{ + int i; + struct gendisk *disk; + + switch (command) { + case CDROMMULTISESSION: + pr_debug(PFX "Multisession CDs not supported\n"); + for (i = 0; i < sizeof(struct cdrom_multisession); i++) + if (put_user(0, (char __user *)(argument + i))) + return -EFAULT; + return 0; + + case CDROM_GET_CAPABILITY: + disk = bdev->bd_disk; + + if (bdev->bd_disk && (disk->flags & GENHD_FL_CD)) + return 0; + return -EINVAL; + + default: + pr_debug(PFX "ioctl %08x not supported\n", command); + return -EINVAL; + } +} + static const struct block_device_operations vdc_fops = { .owner = THIS_MODULE, .getgeo = vdc_getgeo, + .ioctl = vdc_ioctl, }; static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) @@ -165,9 +207,9 @@ struct vio_disk_attr_info *pkt = arg; viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] " - "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", + "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", pkt->tag.stype, pkt->operations, - pkt->vdisk_size, pkt->vdisk_type, + pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype, pkt->xfer_mode, pkt->vdisk_block_size, pkt->max_xfer_size); @@ -192,8 +234,11 @@ } port->operations = pkt->operations; - port->vdisk_size = pkt->vdisk_size; port->vdisk_type = pkt->vdisk_type; + if (vdc_version_supported(port, 1, 1)) { + port->vdisk_size = pkt->vdisk_size; + port->vdisk_mtype = pkt->vdisk_mtype; + } if (pkt->max_xfer_size < port->max_xfer_size) port->max_xfer_size = pkt->max_xfer_size; port->vdisk_block_size = pkt->vdisk_block_size; @@ -236,7 +281,9 @@ __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); - if (blk_queue_stopped(port->disk->queue)) + /* restart blk queue when ring is half emptied */ + if (blk_queue_stopped(port->disk->queue) && + vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) blk_start_queue(port->disk->queue); } @@ -388,12 +435,6 @@ for (i = 0; i < nsg; i++) len += sg[i].length; - if (unlikely(vdc_tx_dring_avail(dr) < 1)) { - blk_stop_queue(port->disk->queue); - err = -ENOMEM; - goto out; - } - desc = vio_dring_cur(dr); err = ldc_map_sg(port->vio.lp, sg, nsg, @@ -433,21 +474,32 @@ port->req_id++; dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); } -out: return err; } -static void do_vdc_request(struct request_queue *q) +static void do_vdc_request(struct request_queue *rq) { - while (1) { - struct request *req = blk_fetch_request(q); + struct request *req; - if (!req) + while ((req = blk_peek_request(rq)) != NULL) { + struct vdc_port *port; + struct vio_dring_state *dr; + + port = req->rq_disk->private_data; + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + if (unlikely(vdc_tx_dring_avail(dr) < 1)) + goto wait; + + blk_start_request(req); + + if (__send_request(req) < 0) { + blk_requeue_request(rq, req); +wait: + /* Avoid pointless unplugs. */ + blk_stop_queue(rq); break; - - if (__send_request(req) < 0) - __blk_end_request_all(req, -EIO); + } } } @@ -656,25 +708,27 @@ if (comp.err) return comp.err; - err = generic_request(port, VD_OP_GET_VTOC, - &port->label, sizeof(port->label)); - if (err < 0) { - printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); - return err; - } + if (vdc_version_supported(port, 1, 1)) { + /* vdisk_size should be set during the handshake, if it wasn't + * then the underlying disk is reserved by another system + */ + if (port->vdisk_size == -1) + return -ENODEV; + } else { + struct vio_disk_geom geom; - err = generic_request(port, VD_OP_GET_DISKGEOM, - &port->geom, sizeof(port->geom)); - if (err < 0) { - printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " - "error %d\n", err); - return err; + err = generic_request(port, VD_OP_GET_DISKGEOM, + &geom, sizeof(geom)); + if (err < 0) { + printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " + "error %d\n", err); + return err; + } + port->vdisk_size = ((u64)geom.num_cyl * + (u64)geom.num_hd * + (u64)geom.num_sec); } - port->vdisk_size = ((u64)port->geom.num_cyl * - (u64)port->geom.num_hd * - (u64)port->geom.num_sec); - q = blk_init_queue(do_vdc_request, &port->vio.lock); if (!q) { printk(KERN_ERR PFX "%s: Could not allocate queue.\n", @@ -691,6 +745,10 @@ port->disk = g; + /* Each segment in a request is up to an aligned page in size. */ + blk_queue_segment_boundary(q, PAGE_SIZE - 1); + blk_queue_max_segment_size(q, PAGE_SIZE); + blk_queue_max_segments(q, port->ring_cookies); blk_queue_max_hw_sectors(q, port->max_xfer_size); g->major = vdc_major; @@ -704,9 +762,32 @@ set_capacity(g, port->vdisk_size); - printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n", + if (vdc_version_supported(port, 1, 1)) { + switch (port->vdisk_mtype) { + case VD_MEDIA_TYPE_CD: + pr_info(PFX "Virtual CDROM %s\n", port->disk_name); + g->flags |= GENHD_FL_CD; + g->flags |= GENHD_FL_REMOVABLE; + set_disk_ro(g, 1); + break; + + case VD_MEDIA_TYPE_DVD: + pr_info(PFX "Virtual DVD %s\n", port->disk_name); + g->flags |= GENHD_FL_CD; + g->flags |= GENHD_FL_REMOVABLE; + set_disk_ro(g, 1); + break; + + case VD_MEDIA_TYPE_FIXED: + pr_info(PFX "Virtual Hard disk %s\n", port->disk_name); + break; + } + } + + pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n", g->disk_name, - port->vdisk_size, (port->vdisk_size >> (20 - 9))); + port->vdisk_size, (port->vdisk_size >> (20 - 9)), + port->vio.ver.major, port->vio.ver.minor); add_disk(g); @@ -765,6 +846,7 @@ else snprintf(port->disk_name, sizeof(port->disk_name), VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); + port->vdisk_size = -1; err = vio_driver_init(&port->vio, vdev, VDEV_DISK, vdc_versions, ARRAY_SIZE(vdc_versions), --- linux-3.13.0.orig/drivers/block/virtio_blk.c +++ linux-3.13.0/drivers/block/virtio_blk.c @@ -144,11 +144,11 @@ if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); - spin_unlock_irqrestore(&vblk->vq_lock, flags); /* In case queue is stopped waiting for more buffers. */ if (req_done) blk_mq_start_stopped_hw_queues(vblk->disk->queue); + spin_unlock_irqrestore(&vblk->vq_lock, flags); } static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) @@ -158,6 +158,8 @@ unsigned long flags; unsigned int num; const bool last = (req->cmd_flags & REQ_END) != 0; + int err; + bool notify = false; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); @@ -198,17 +200,24 @@ } spin_lock_irqsave(&vblk->vq_lock, flags); - if (__virtblk_add_req(vblk->vq, vbr, vbr->sg, num) < 0) { + err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); + if (err) { virtqueue_kick(vblk->vq); - spin_unlock_irqrestore(&vblk->vq_lock, flags); blk_mq_stop_hw_queue(hctx); - return BLK_MQ_RQ_QUEUE_BUSY; + spin_unlock_irqrestore(&vblk->vq_lock, flags); + /* Out of mem doesn't actually happen, since we fall back + * to direct descriptors */ + if (err == -ENOMEM || err == -ENOSPC) + return BLK_MQ_RQ_QUEUE_BUSY; + return BLK_MQ_RQ_QUEUE_ERROR; } - if (last) - virtqueue_kick(vblk->vq); - + if (last && virtqueue_kick_prepare(vblk->vq)) + notify = true; spin_unlock_irqrestore(&vblk->vq_lock, flags); + + if (notify) + virtqueue_notify(vblk->vq); return BLK_MQ_RQ_QUEUE_OK; } --- linux-3.13.0.orig/drivers/block/xen-blkback/blkback.c +++ linux-3.13.0/drivers/block/xen-blkback/blkback.c @@ -755,6 +755,7 @@ BUG_ON(new_map_idx >= segs_to_map); if (unlikely(map[new_map_idx].status != 0)) { pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); + put_free_pages(blkif, &pages[seg_idx]->page, 1); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; ret |= 1; goto next; --- linux-3.13.0.orig/drivers/block/xen-blkfront.c +++ linux-3.13.0/drivers/block/xen-blkfront.c @@ -1356,7 +1356,7 @@ char *type; int len; /* no unplug has been done: do not hook devices != xen vbds */ - if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) { + if (xen_has_pv_and_legacy_disk_devices()) { int major; if (!VDEV_IS_EXTENDED(vdevice)) @@ -1904,13 +1904,16 @@ case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateUnknown: - case XenbusStateClosed: break; case XenbusStateConnected: blkfront_connect(info); break; + case XenbusStateClosed: + if (dev->state == XenbusStateClosed) + break; + /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: blkfront_closing(info); break; @@ -2079,7 +2082,7 @@ if (!xen_domain()) return -ENODEV; - if (xen_hvm_domain() && !xen_platform_pci_unplug) + if (!xen_has_pv_disk_devices()) return -ENODEV; if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { --- linux-3.13.0.orig/drivers/bluetooth/ath3k.c +++ linux-3.13.0/drivers/bluetooth/ath3k.c @@ -62,48 +62,57 @@ { USB_DEVICE(0x0CF3, 0x3000) }, /* Atheros AR3011 with sflash firmware*/ + { USB_DEVICE(0x0489, 0xE027) }, + { USB_DEVICE(0x0489, 0xE03D) }, + { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0CF3, 0x3002) }, { USB_DEVICE(0x0CF3, 0xE019) }, { USB_DEVICE(0x13d3, 0x3304) }, - { USB_DEVICE(0x0930, 0x0215) }, - { USB_DEVICE(0x0489, 0xE03D) }, - { USB_DEVICE(0x0489, 0xE027) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0489, 0xe04d) }, + { USB_DEVICE(0x0489, 0xe04e) }, + { USB_DEVICE(0x0489, 0xe056) }, + { USB_DEVICE(0x0489, 0xe057) }, + { USB_DEVICE(0x0489, 0xe078) }, + { USB_DEVICE(0x04c5, 0x1330) }, + { USB_DEVICE(0x04CA, 0x3004) }, + { USB_DEVICE(0x04CA, 0x3005) }, + { USB_DEVICE(0x04CA, 0x3006) }, + { USB_DEVICE(0x04CA, 0x3008) }, + { USB_DEVICE(0x04CA, 0x3010) }, + { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0930, 0x0220) }, + { USB_DEVICE(0x0930, 0x0227) }, + { USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x3008) }, { USB_DEVICE(0x0CF3, 0x311D) }, + { USB_DEVICE(0x0CF3, 0x311E) }, + { USB_DEVICE(0x0CF3, 0x311F) }, + { USB_DEVICE(0x0cf3, 0x3121) }, { USB_DEVICE(0x0CF3, 0x817a) }, - { USB_DEVICE(0x13d3, 0x3375) }, - { USB_DEVICE(0x04CA, 0x3004) }, - { USB_DEVICE(0x04CA, 0x3005) }, - { USB_DEVICE(0x04CA, 0x3006) }, - { USB_DEVICE(0x04CA, 0x3008) }, - { USB_DEVICE(0x13d3, 0x3362) }, + { USB_DEVICE(0x0cf3, 0xe003) }, { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0CF3, 0xE005) }, - { USB_DEVICE(0x0930, 0x0219) }, - { USB_DEVICE(0x0930, 0x0220) }, - { USB_DEVICE(0x0489, 0xe057) }, + { USB_DEVICE(0x13d3, 0x3362) }, + { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x13d3, 0x3393) }, - { USB_DEVICE(0x0489, 0xe04e) }, - { USB_DEVICE(0x0489, 0xe056) }, - { USB_DEVICE(0x0489, 0xe04d) }, - { USB_DEVICE(0x04c5, 0x1330) }, { USB_DEVICE(0x13d3, 0x3402) }, - { USB_DEVICE(0x0cf3, 0x3121) }, - { USB_DEVICE(0x0cf3, 0xe003) }, + { USB_DEVICE(0x13d3, 0x3408) }, + { USB_DEVICE(0x13d3, 0x3423) }, + { USB_DEVICE(0x13d3, 0x3432) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, /* Atheros AR5BBU22 with sflash firmware */ - { USB_DEVICE(0x0489, 0xE03C) }, { USB_DEVICE(0x0489, 0xE036) }, + { USB_DEVICE(0x0489, 0xE03C) }, { } /* Terminating entry */ }; @@ -116,30 +125,39 @@ static const struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, @@ -151,6 +169,8 @@ #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 #define FW_HDR_SIZE 20 +#define TIMEGAP_USEC_MIN 50 +#define TIMEGAP_USEC_MAX 100 static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) @@ -181,6 +201,9 @@ count -= 20; while (count) { + /* workaround the compatibility issue with xHCI controller*/ + usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); + size = min_t(uint, count, BULK_SIZE); pipe = usb_sndbulkpipe(udev, 0x02); memcpy(send_buf, firmware->data + sent, size); @@ -277,6 +300,9 @@ count -= size; while (count) { + /* workaround the compatibility issue with xHCI controller*/ + usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); + size = min_t(uint, count, BULK_SIZE); pipe = usb_sndbulkpipe(udev, 0x02); --- linux-3.13.0.orig/drivers/bluetooth/btmrvl_drv.h +++ linux-3.13.0/drivers/bluetooth/btmrvl_drv.h @@ -70,6 +70,7 @@ u8 hs_state; u8 wakeup_tries; wait_queue_head_t cmd_wait_q; + wait_queue_head_t event_hs_wait_q; u8 cmd_complete; bool is_suspended; }; --- linux-3.13.0.orig/drivers/bluetooth/btmrvl_main.c +++ linux-3.13.0/drivers/bluetooth/btmrvl_main.c @@ -115,6 +115,7 @@ adapter->hs_state = HS_ACTIVATED; if (adapter->psmode) adapter->ps_state = PS_SLEEP; + wake_up_interruptible(&adapter->event_hs_wait_q); BT_DBG("HS ACTIVATED!"); } else { BT_DBG("HS Enable failed"); @@ -254,11 +255,31 @@ int btmrvl_enable_hs(struct btmrvl_private *priv) { + struct btmrvl_adapter *adapter = priv->adapter; int ret; ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0); - if (ret) + if (ret) { BT_ERR("Host sleep enable command failed\n"); + return ret; + } + + ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q, + adapter->hs_state, + msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED)); + if (ret < 0) { + BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d", + ret, adapter->hs_state, adapter->ps_state, + adapter->wakeup_tries); + } else if (!ret) { + BT_ERR("hs_enable timeout: %d,%d,%d", adapter->hs_state, + adapter->ps_state, adapter->wakeup_tries); + ret = -ETIMEDOUT; + } else { + BT_DBG("host sleep enabled: %d,%d,%d", adapter->hs_state, + adapter->ps_state, adapter->wakeup_tries); + ret = 0; + } return ret; } @@ -344,6 +365,7 @@ priv->adapter->ps_state = PS_AWAKE; init_waitqueue_head(&priv->adapter->cmd_wait_q); + init_waitqueue_head(&priv->adapter->event_hs_wait_q); } static void btmrvl_free_adapter(struct btmrvl_private *priv) @@ -716,6 +738,7 @@ hdev = priv->btmrvl_dev.hcidev; wake_up_interruptible(&priv->adapter->cmd_wait_q); + wake_up_interruptible(&priv->adapter->event_hs_wait_q); kthread_stop(priv->main_thread.task); --- linux-3.13.0.orig/drivers/bluetooth/btusb.c +++ linux-3.13.0/drivers/bluetooth/btusb.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -49,6 +50,8 @@ #define BTUSB_WRONG_SCO_MTU 0x40 #define BTUSB_ATH3012 0x80 #define BTUSB_INTEL 0x100 +#define BTUSB_INTEL_BOOT 0x200 +#define BTUSB_BCM_PATCHRAM 0x800 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -101,21 +104,38 @@ { USB_DEVICE(0x0c10, 0x0000) }, /* Broadcom BCM20702A0 */ + { USB_DEVICE(0x0489, 0xe042) }, + { USB_DEVICE(0x04ca, 0x2003) }, { USB_DEVICE(0x0b05, 0x17b5) }, { USB_DEVICE(0x0b05, 0x17cb) }, - { USB_DEVICE(0x04ca, 0x2003) }, - { USB_DEVICE(0x0489, 0xe042) }, - { USB_DEVICE(0x413c, 0x8197) }, + { USB_DEVICE(0x13d3, 0x3388), .driver_info = BTUSB_BCM_PATCHRAM }, + { USB_DEVICE(0x13d3, 0x3389), .driver_info = BTUSB_BCM_PATCHRAM }, + { USB_DEVICE(0x413c, 0x8197), .driver_info = BTUSB_BCM_PATCHRAM }, + { USB_DEVICE(0x413c, 0x8143), .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Broadcom BCM43142A0 */ + { USB_DEVICE(0x04ca, 0x2007), .driver_info = BTUSB_BCM_PATCHRAM }, + { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM }, /* Foxconn - Hon Hai */ - { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, + + /* Broadcom devices with vendor specific id */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, - /*Broadcom devices with vendor specific id */ - { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, + /* ASUSTek Computer - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) }, /* Belkin F8065bf - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) }, + /* IMC Networks - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) }, + + /* Intel Bluetooth USB Bootloader (RAM module) */ + { USB_DEVICE(0x8087, 0x0a5a), + .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, + { } /* Terminating entry */ }; @@ -129,41 +149,50 @@ { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, /* Atheros 3011 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, - { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, - { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, - { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, /* Atheros 3012 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, @@ -173,9 +202,9 @@ { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, /* Broadcom BCM2035 */ - { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, - { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, + { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Broadcom BCM2045 */ { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, @@ -224,6 +253,7 @@ /* Intel Bluetooth device */ { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, + { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, { } /* Terminating entry */ }; @@ -305,6 +335,9 @@ BT_ERR("%s corrupted event packet", hdev->name); hdev->stat.err_rx++; } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; } if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) @@ -393,6 +426,9 @@ BT_ERR("%s corrupted ACL packet", hdev->name); hdev->stat.err_rx++; } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; } if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) @@ -487,6 +523,9 @@ hdev->stat.err_rx++; } } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; } if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) @@ -1331,6 +1370,72 @@ return 0; } +static int btusb_setup_patchram_packet(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + return 0; +} + +#define PATCHRAM_NAME_LEN 20 + +static int btusb_setup_patchram(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct usb_device *udev = data->udev; + size_t pos = 0; + int err = 0; + char filename[PATCHRAM_NAME_LEN]; + const struct firmware *fw; + u8 val = 0x00; + + snprintf(filename, PATCHRAM_NAME_LEN, "fw-%04x_%04x.hcd", + le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct)); + if (request_firmware(&fw, (const char *) filename, &udev->dev) < 0) { + BT_INFO("can't load firmware, may not work correctly"); + return 0; + } + + err = btusb_setup_patchram_packet(hdev, 0x0c03, 1, &val); + if (err) + goto out; + + msleep(300); + err = btusb_setup_patchram_packet(hdev, 0xfc2e, 1, &val); + if (err) + goto out; + + msleep(1000); + while (pos < fw->size) { + size_t len; + len = fw->data[pos + 2] + 3; + if (pos + len > fw->size) { + err = -EINVAL; + goto out; + } + err = btusb_setup_patchram_packet(hdev, le16_to_cpu(*(u16*)(fw->data + pos)), + fw->data[pos + 2] , &fw->data[pos + 3]); + if (err) + goto out; + pos += len; + } + + err = btusb_setup_patchram_packet(hdev, 0x0c03, 1, &val); +out: + release_firmware(fw); + if (err) { + BT_INFO("fail to load firmware"); + return err; + } + BT_INFO("firmware loaded"); + return 0; +} + static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -1439,6 +1544,12 @@ if (id->driver_info & BTUSB_INTEL) hdev->setup = btusb_setup_intel; + if (id->driver_info & BTUSB_BCM_PATCHRAM) + hdev->setup = btusb_setup_patchram; + + if (id->driver_info & BTUSB_INTEL_BOOT) + set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + /* Interface numbers are hardcoded in the specification */ data->isoc = usb_ifnum_to_if(data->udev, 1); --- linux-3.13.0.orig/drivers/bluetooth/hci_h5.c +++ linux-3.13.0/drivers/bluetooth/hci_h5.c @@ -237,7 +237,7 @@ break; to_remove--; - seq = (seq - 1) % 8; + seq = (seq - 1) & 0x07; } if (seq != h5->rx_ack) @@ -406,6 +406,7 @@ H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { BT_ERR("Non-link packet received in non-active state"); h5_reset_rx(h5); + return 0; } h5->rx_func = h5_rx_payload; --- linux-3.13.0.orig/drivers/bluetooth/hci_ldisc.c +++ linux-3.13.0/drivers/bluetooth/hci_ldisc.c @@ -118,10 +118,6 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) { - struct tty_struct *tty = hu->tty; - struct hci_dev *hdev = hu->hdev; - struct sk_buff *skb; - if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) { set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); return 0; @@ -129,6 +125,22 @@ BT_DBG(""); + schedule_work(&hu->write_work); + + return 0; +} + +static void hci_uart_write_work(struct work_struct *work) +{ + struct hci_uart *hu = container_of(work, struct hci_uart, write_work); + struct tty_struct *tty = hu->tty; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + + /* REVISIT: should we cope with bad skbs or ->write() returning + * and error value ? + */ + restart: clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); @@ -153,7 +165,6 @@ goto restart; clear_bit(HCI_UART_SENDING, &hu->tx_state); - return 0; } static void hci_uart_init_work(struct work_struct *work) @@ -281,6 +292,7 @@ tty->receive_room = 65536; INIT_WORK(&hu->init_ready, hci_uart_init_work); + INIT_WORK(&hu->write_work, hci_uart_write_work); spin_lock_init(&hu->rx_lock); @@ -318,6 +330,8 @@ if (hdev) hci_uart_close(hdev); + cancel_work_sync(&hu->write_work); + if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { if (hdev) { if (test_bit(HCI_UART_REGISTERED, &hu->flags)) --- linux-3.13.0.orig/drivers/bluetooth/hci_uart.h +++ linux-3.13.0/drivers/bluetooth/hci_uart.h @@ -68,6 +68,7 @@ unsigned long hdev_flags; struct work_struct init_ready; + struct work_struct write_work; struct hci_uart_proto *proto; void *priv; --- linux-3.13.0.orig/drivers/bluetooth/hci_vhci.c +++ linux-3.13.0/drivers/bluetooth/hci_vhci.c @@ -352,7 +352,7 @@ static struct miscdevice vhci_miscdev= { .name = "vhci", .fops = &vhci_fops, - .minor = MISC_DYNAMIC_MINOR, + .minor = VHCI_MINOR, }; static int __init vhci_init(void) @@ -378,3 +378,4 @@ MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("devname:vhci"); +MODULE_ALIAS_MISCDEV(VHCI_MINOR); --- linux-3.13.0.orig/drivers/bus/mvebu-mbus.c +++ linux-3.13.0/drivers/bus/mvebu-mbus.c @@ -181,12 +181,25 @@ } /* Checks whether the given window number is available */ + +/* On Armada XP, 375 and 38x the MBus window 13 has the remap + * capability, like windows 0 to 7. However, the mvebu-mbus driver + * isn't currently taking into account this special case, which means + * that when window 13 is actually used, the remap registers are left + * to 0, making the device using this MBus window unavailable. The + * quick fix for stable is to not use window 13. A follow up patch + * will correctly handle this window. +*/ static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus, const int win) { void __iomem *addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win); u32 ctrl = readl(addr + WIN_CTRL_OFF); + + if (win == 13) + return false; + return !(ctrl & WIN_CTRL_ENABLE); } @@ -222,12 +235,6 @@ */ if ((u64)base < wend && end > wbase) return 0; - - /* - * Check if target/attribute conflicts - */ - if (target == wtarget && attr == wattr) - return 0; } return 1; --- linux-3.13.0.orig/drivers/cdrom/cdrom.c +++ linux-3.13.0/drivers/cdrom/cdrom.c @@ -289,7 +289,7 @@ /* default compatibility mode */ static bool autoclose=1; static bool autoeject; -static bool lockdoor = 1; +static bool lockdoor = 0; /* will we ever get to use this... sigh. */ static bool check_media_type; /* automatically restart mrw format */ --- linux-3.13.0.orig/drivers/char/hw_random/Kconfig +++ linux-3.13.0/drivers/char/hw_random/Kconfig @@ -352,3 +352,16 @@ module will be called msm-rng. If unsure, say Y. + +config HW_RANDOM_XGENE + tristate "APM X-Gene True Random Number Generator (TRNG) support" + depends on HW_RANDOM && ARCH_XGENE + default HW_RANDOM + ---help--- + This driver provides kernel-side support for the Random Number + Generator hardware found on APM X-Gene SoC. + + To compile this driver as a module, choose M here: the + module will be called xgene_rng. + + If unsure, say Y. --- linux-3.13.0.orig/drivers/char/hw_random/Makefile +++ linux-3.13.0/drivers/char/hw_random/Makefile @@ -30,3 +30,4 @@ obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o +obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o --- linux-3.13.0.orig/drivers/char/hw_random/pseries-rng.c +++ linux-3.13.0/drivers/char/hw_random/pseries-rng.c @@ -25,18 +25,21 @@ #include -static int pseries_rng_data_read(struct hwrng *rng, u32 *data) +static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { + u64 buffer[PLPAR_HCALL_BUFSIZE]; + size_t size = max < 8 ? max : 8; int rc; - rc = plpar_hcall(H_RANDOM, (unsigned long *)data); + rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); if (rc != H_SUCCESS) { pr_err_ratelimited("H_RANDOM call failed %d\n", rc); return -EIO; } + memcpy(data, buffer, size); /* The hypervisor interface returns 64 bits */ - return 8; + return size; } /** @@ -55,7 +58,7 @@ static struct hwrng pseries_rng = { .name = KBUILD_MODNAME, - .data_read = pseries_rng_data_read, + .read = pseries_rng_read, }; static int __init pseries_rng_probe(struct vio_dev *dev, --- linux-3.13.0.orig/drivers/char/hw_random/xgene-rng.c +++ linux-3.13.0/drivers/char/hw_random/xgene-rng.c @@ -0,0 +1,423 @@ +/* + * APM X-Gene SoC RNG Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Rameshwar Prasad Sahu + * Shamal Winchurkar + * Feng Kan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RNG_MAX_DATUM 4 +#define MAX_TRY 100 +#define XGENE_RNG_RETRY_COUNT 20 +#define XGENE_RNG_RETRY_INTERVAL 10 + +/* RNG Registers */ +#define RNG_INOUT_0 0x00 +#define RNG_INTR_STS_ACK 0x10 +#define RNG_CONTROL 0x14 +#define RNG_CONFIG 0x18 +#define RNG_ALARMCNT 0x1c +#define RNG_FROENABLE 0x20 +#define RNG_FRODETUNE 0x24 +#define RNG_ALARMMASK 0x28 +#define RNG_ALARMSTOP 0x2c +#define RNG_OPTIONS 0x78 +#define RNG_EIP_REV 0x7c + +#define MONOBIT_FAIL_MASK BIT(7) +#define POKER_FAIL_MASK BIT(6) +#define LONG_RUN_FAIL_MASK BIT(5) +#define RUN_FAIL_MASK BIT(4) +#define NOISE_FAIL_MASK BIT(3) +#define STUCK_OUT_MASK BIT(2) +#define SHUTDOWN_OFLO_MASK BIT(1) +#define READY_MASK BIT(0) + +#define MAJOR_HW_REV_RD(src) (((src) & 0x0f000000) >> 24) +#define MINOR_HW_REV_RD(src) (((src) & 0x00f00000) >> 20) +#define HW_PATCH_LEVEL_RD(src) (((src) & 0x000f0000) >> 16) +#define MAX_REFILL_CYCLES_SET(dst, src) \ + ((dst & ~0xffff0000) | (((u32)src << 16) & 0xffff0000)) +#define MIN_REFILL_CYCLES_SET(dst, src) \ + ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff)) +#define ALARM_THRESHOLD_SET(dst, src) \ + ((dst & ~0x000000ff) | (((u32)src) & 0x000000ff)) +#define ENABLE_RNG_SET(dst, src) \ + ((dst & ~BIT(10)) | (((u32)src << 10) & BIT(10))) +#define REGSPEC_TEST_MODE_SET(dst, src) \ + ((dst & ~BIT(8)) | (((u32)src << 8) & BIT(8))) +#define MONOBIT_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(7)) | (((u32)src << 7) & BIT(7))) +#define POKER_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(6)) | (((u32)src << 6) & BIT(6))) +#define LONG_RUN_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(5)) | (((u32)src << 5) & BIT(5))) +#define RUN_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(4)) | (((u32)src << 4) & BIT(4))) +#define NOISE_FAIL_MASK_SET(dst, src) \ + ((dst & ~BIT(3)) | (((u32)src << 3) & BIT(3))) +#define STUCK_OUT_MASK_SET(dst, src) \ + ((dst & ~BIT(2)) | (((u32)src << 2) & BIT(2))) +#define SHUTDOWN_OFLO_MASK_SET(dst, src) \ + ((dst & ~BIT(1)) | (((u32)src << 1) & BIT(1))) + +struct xgene_rng_dev { + u32 irq; + void __iomem *csr_base; + u32 revision; + u32 datum_size; + u32 failure_cnt; /* Failure count last minute */ + unsigned long failure_ts;/* First failure timestamp */ + struct timer_list failure_timer; + struct device *dev; + struct clk *clk; +}; + +static void xgene_rng_expired_timer(unsigned long arg) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg; + + /* Clear failure counter as timer expired */ + disable_irq(ctx->irq); + ctx->failure_cnt = 0; + del_timer(&ctx->failure_timer); + enable_irq(ctx->irq); +} + +static void xgene_rng_start_timer(struct xgene_rng_dev *ctx) +{ + ctx->failure_timer.data = (unsigned long) ctx; + ctx->failure_timer.function = xgene_rng_expired_timer; + ctx->failure_timer.expires = jiffies + 120 * HZ; + add_timer(&ctx->failure_timer); +} + +/* + * Initialize or reinit free running oscillators (FROs) + */ +static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val) +{ + writel(fro_val, ctx->csr_base + RNG_FRODETUNE); + writel(0x00000000, ctx->csr_base + RNG_ALARMMASK); + writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP); + writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE); +} + +static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx) +{ + u32 val; + + val = readl(ctx->csr_base + RNG_INTR_STS_ACK); + if (val & MONOBIT_FAIL_MASK) + /* + * LFSR detected an out-of-bounds number of 1s after + * checking 20,000 bits (test T1 as specified in the + * AIS-31 standard) + */ + dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val); + if (val & POKER_FAIL_MASK) + /* + * LFSR detected an out-of-bounds value in at least one + * of the 16 poker_count_X counters or an out of bounds sum + * of squares value after checking 20,000 bits (test T2 as + * specified in the AIS-31 standard) + */ + dev_err(ctx->dev, "test poker failure error 0x%08X\n", val); + if (val & LONG_RUN_FAIL_MASK) + /* + * LFSR detected a sequence of 34 identical bits + * (test T4 as specified in the AIS-31 standard) + */ + dev_err(ctx->dev, "test long run failure error 0x%08X\n", val); + if (val & RUN_FAIL_MASK) + /* + * LFSR detected an outof-bounds value for at least one + * of the running counters after checking 20,000 bits + * (test T3 as specified in the AIS-31 standard) + */ + dev_err(ctx->dev, "test run failure error 0x%08X\n", val); + if (val & NOISE_FAIL_MASK) + /* LFSR detected a sequence of 48 identical bits */ + dev_err(ctx->dev, "noise failure error 0x%08X\n", val); + if (val & STUCK_OUT_MASK) + /* + * Detected output data registers generated same value twice + * in a row + */ + dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val); + + if (val & SHUTDOWN_OFLO_MASK) { + u32 frostopped; + + /* FROs shut down after a second error event. Try recover. */ + if (++ctx->failure_cnt == 1) { + /* 1st time, just recover */ + ctx->failure_ts = jiffies; + frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); + xgene_rng_init_fro(ctx, frostopped); + + /* + * We must start a timer to clear out this error + * in case the system timer wrap around + */ + xgene_rng_start_timer(ctx); + } else { + /* 2nd time failure in lesser than 1 minute? */ + if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) { + dev_err(ctx->dev, + "FRO shutdown failure error 0x%08X\n", + val); + } else { + /* 2nd time failure after 1 minutes, recover */ + ctx->failure_ts = jiffies; + ctx->failure_cnt = 1; + /* + * We must start a timer to clear out this + * error in case the system timer wrap + * around + */ + xgene_rng_start_timer(ctx); + } + frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); + xgene_rng_init_fro(ctx, frostopped); + } + } + /* Clear them all */ + writel(val, ctx->csr_base + RNG_INTR_STS_ACK); +} + +static irqreturn_t xgene_rng_irq_handler(int irq, void *id) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id; + + /* RNG Alarm Counter overflow */ + xgene_rng_chk_overflow(ctx); + + return IRQ_HANDLED; +} + +static int xgene_rng_data_present(struct hwrng *rng, int wait) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; + u32 i, val = 0; + + for (i = 0; i < XGENE_RNG_RETRY_COUNT; i++) { + val = readl(ctx->csr_base + RNG_INTR_STS_ACK); + if ((val & READY_MASK) || !wait) + break; + udelay(XGENE_RNG_RETRY_INTERVAL); + } + + return (val & READY_MASK); +} + +static int xgene_rng_data_read(struct hwrng *rng, u32 *data) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; + int i; + + for (i = 0; i < ctx->datum_size; i++) + data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4); + + /* Clear ready bit to start next transaction */ + writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); + + return ctx->datum_size << 2; +} + +static void xgene_rng_init_internal(struct xgene_rng_dev *ctx) +{ + u32 val; + + writel(0x00000000, ctx->csr_base + RNG_CONTROL); + + val = MAX_REFILL_CYCLES_SET(0, 10); + val = MIN_REFILL_CYCLES_SET(val, 10); + writel(val, ctx->csr_base + RNG_CONFIG); + + val = ALARM_THRESHOLD_SET(0, 0xFF); + writel(val, ctx->csr_base + RNG_ALARMCNT); + + xgene_rng_init_fro(ctx, 0); + + writel(MONOBIT_FAIL_MASK | + POKER_FAIL_MASK | + LONG_RUN_FAIL_MASK | + RUN_FAIL_MASK | + NOISE_FAIL_MASK | + STUCK_OUT_MASK | + SHUTDOWN_OFLO_MASK | + READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); + + val = ENABLE_RNG_SET(0, 1); + val = MONOBIT_FAIL_MASK_SET(val, 1); + val = POKER_FAIL_MASK_SET(val, 1); + val = LONG_RUN_FAIL_MASK_SET(val, 1); + val = RUN_FAIL_MASK_SET(val, 1); + val = NOISE_FAIL_MASK_SET(val, 1); + val = STUCK_OUT_MASK_SET(val, 1); + val = SHUTDOWN_OFLO_MASK_SET(val, 1); + writel(val, ctx->csr_base + RNG_CONTROL); +} + +static int xgene_rng_init(struct hwrng *rng) +{ + struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; + + ctx->failure_cnt = 0; + init_timer(&ctx->failure_timer); + + ctx->revision = readl(ctx->csr_base + RNG_EIP_REV); + + dev_dbg(ctx->dev, "Rev %d.%d.%d\n", + MAJOR_HW_REV_RD(ctx->revision), + MINOR_HW_REV_RD(ctx->revision), + HW_PATCH_LEVEL_RD(ctx->revision)); + + dev_dbg(ctx->dev, "Options 0x%08X", + readl(ctx->csr_base + RNG_OPTIONS)); + + xgene_rng_init_internal(ctx); + + ctx->datum_size = RNG_MAX_DATUM; + + return 0; +} + +static struct hwrng xgene_rng_func = { + .name = "xgene-rng", + .init = xgene_rng_init, + .data_present = xgene_rng_data_present, + .data_read = xgene_rng_data_read, +}; + +static int xgene_rng_probe(struct platform_device *pdev) +{ + struct resource *res; + struct xgene_rng_dev *ctx; + int rc = 0; + + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = &pdev->dev; + platform_set_drvdata(pdev, ctx); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->csr_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctx->csr_base)) + return PTR_ERR(ctx->csr_base); + + ctx->irq = platform_get_irq(pdev, 0); + if (ctx->irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + return ctx->irq; + } + + dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", + ctx->csr_base, ctx->irq); + + rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0, + dev_name(&pdev->dev), ctx); + if (rc) { + dev_err(&pdev->dev, "Could not request RNG alarm IRQ\n"); + return rc; + } + + /* Enable IP clock */ + ctx->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(ctx->clk)) { + dev_warn(&pdev->dev, "Couldn't get the clock for RNG\n"); + } else { + rc = clk_prepare_enable(ctx->clk); + if (rc) { + dev_warn(&pdev->dev, + "clock prepare enable failed for RNG"); + return rc; + } + } + + xgene_rng_func.priv = (unsigned long) ctx; + + rc = hwrng_register(&xgene_rng_func); + if (rc) { + dev_err(&pdev->dev, "RNG registering failed error %d\n", rc); + if (!IS_ERR(ctx->clk)) + clk_disable_unprepare(ctx->clk); + return rc; + } + + rc = device_init_wakeup(&pdev->dev, 1); + if (rc) { + dev_err(&pdev->dev, "RNG device_init_wakeup failed error %d\n", + rc); + if (!IS_ERR(ctx->clk)) + clk_disable_unprepare(ctx->clk); + hwrng_unregister(&xgene_rng_func); + return rc; + } + + return 0; +} + +static int xgene_rng_remove(struct platform_device *pdev) +{ + struct xgene_rng_dev *ctx = platform_get_drvdata(pdev); + int rc; + + rc = device_init_wakeup(&pdev->dev, 0); + if (rc) + dev_err(&pdev->dev, "RNG init wakeup failed error %d\n", rc); + if (!IS_ERR(ctx->clk)) + clk_disable_unprepare(ctx->clk); + hwrng_unregister(&xgene_rng_func); + + return rc; +} + +static const struct of_device_id xgene_rng_of_match[] = { + { .compatible = "apm,xgene-rng" }, + { } +}; + +MODULE_DEVICE_TABLE(of, xgene_rng_of_match); + +static struct platform_driver xgene_rng_driver = { + .probe = xgene_rng_probe, + .remove = xgene_rng_remove, + .driver = { + .name = "xgene-rng", + .of_match_table = xgene_rng_of_match, + }, +}; + +module_platform_driver(xgene_rng_driver); +MODULE_DESCRIPTION("APM X-Gene RNG driver"); +MODULE_LICENSE("GPL"); --- linux-3.13.0.orig/drivers/char/ipmi/Kconfig +++ linux-3.13.0/drivers/char/ipmi/Kconfig @@ -50,6 +50,18 @@ Currently, only KCS and SMIC are supported. If you are using IPMI, you should probably say "y" here. +config IPMI_SI_PROBE_DEFAULTS + bool 'Probe for all possible IPMI system interfaces by default' + default n + depends on IPMI_SI + help + Modern systems will usually expose IPMI interfaces via a discoverable + firmware mechanism such as ACPI or DMI. Older systems do not, and so + the driver is forced to probe hardware manually. This may cause boot + delays. Say "n" here to disable this manual probing. IPMI will then + only be available on older systems if the "ipmi_si_intf.trydefaults=1" + boot argument is passed. + config IPMI_WATCHDOG tristate 'IPMI Watchdog Timer' help --- linux-3.13.0.orig/drivers/char/ipmi/ipmi_bt_sm.c +++ linux-3.13.0/drivers/char/ipmi/ipmi_bt_sm.c @@ -201,7 +201,7 @@ } bt->state = BT_STATE_IDLE; /* start here */ bt->complete = BT_STATE_IDLE; /* end here */ - bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000; + bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */ return 3; /* We claim 3 bytes of space; ought to check SPMI table */ @@ -352,7 +352,7 @@ static inline int read_all_bytes(struct si_sm_data *bt) { - unsigned char i; + unsigned int i; /* * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. @@ -613,7 +613,7 @@ HOST2BMC(42); /* Sequence number */ HOST2BMC(3); /* Cmd == Soft reset */ BT_CONTROL(BT_H2B_ATN); - bt->timeout = BT_RESET_DELAY * 1000000; + bt->timeout = BT_RESET_DELAY * USEC_PER_SEC; BT_STATE_CHANGE(BT_STATE_RESET3, SI_SM_CALL_WITH_DELAY); @@ -651,14 +651,14 @@ bt_init_data(bt, bt->io); if ((i == 8) && !BT_CAP[2]) { bt->BT_CAP_outreqs = BT_CAP[3]; - bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000; + bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC; bt->BT_CAP_retries = BT_CAP[7]; } else printk(KERN_WARNING "IPMI BT: using default values\n"); if (!bt->BT_CAP_outreqs) bt->BT_CAP_outreqs = 1; printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", - bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries); + bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries); bt->timeout = bt->BT_CAP_req2rsp; return SI_SM_CALL_WITHOUT_DELAY; --- linux-3.13.0.orig/drivers/char/ipmi/ipmi_kcs_sm.c +++ linux-3.13.0/drivers/char/ipmi/ipmi_kcs_sm.c @@ -118,8 +118,8 @@ #define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH /* Timeouts in microseconds. */ -#define IBF_RETRY_TIMEOUT 5000000 -#define OBF_RETRY_TIMEOUT 5000000 +#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC) +#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC) #define MAX_ERROR_RETRIES 10 #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) --- linux-3.13.0.orig/drivers/char/ipmi/ipmi_si_intf.c +++ linux-3.13.0/drivers/char/ipmi/ipmi_si_intf.c @@ -175,6 +175,7 @@ struct si_sm_handlers *handlers; enum si_type si_type; spinlock_t si_lock; + spinlock_t msg_lock; struct list_head xmit_msgs; struct list_head hp_xmit_msgs; struct ipmi_smi_msg *curr_msg; @@ -356,6 +357,13 @@ struct timeval t; #endif + /* + * No need to save flags, we aleady have interrupts off and we + * already hold the SMI lock. + */ + if (!smi_info->run_to_completion) + spin_lock(&(smi_info->msg_lock)); + /* Pick the high priority queue first. */ if (!list_empty(&(smi_info->hp_xmit_msgs))) { entry = smi_info->hp_xmit_msgs.next; @@ -393,6 +401,9 @@ rv = SI_SM_CALL_WITHOUT_DELAY; } out: + if (!smi_info->run_to_completion) + spin_unlock(&(smi_info->msg_lock)); + return rv; } @@ -879,6 +890,19 @@ printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif + /* + * last_timeout_jiffies is updated here to avoid + * smi_timeout() handler passing very large time_diff + * value to smi_event_handler() that causes + * the send command to abort. + */ + smi_info->last_timeout_jiffies = jiffies; + + mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); + + if (smi_info->thread) + wake_up_process(smi_info->thread); + if (smi_info->run_to_completion) { /* * If we are running to completion, then throw it in @@ -901,26 +925,15 @@ return; } - spin_lock_irqsave(&smi_info->si_lock, flags); + spin_lock_irqsave(&smi_info->msg_lock, flags); if (priority > 0) list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); else list_add_tail(&msg->link, &smi_info->xmit_msgs); + spin_unlock_irqrestore(&smi_info->msg_lock, flags); + spin_lock_irqsave(&smi_info->si_lock, flags); if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { - /* - * last_timeout_jiffies is updated here to avoid - * smi_timeout() handler passing very large time_diff - * value to smi_event_handler() that causes - * the send command to abort. - */ - smi_info->last_timeout_jiffies = jiffies; - - mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); - - if (smi_info->thread) - wake_up_process(smi_info->thread); - start_next_msg(smi_info); smi_event_handler(smi_info, 0); } @@ -1024,19 +1037,16 @@ static void poll(void *send_info) { struct smi_info *smi_info = send_info; - unsigned long flags = 0; - int run_to_completion = smi_info->run_to_completion; + unsigned long flags; /* * Make sure there is some delay in the poll loop so we can * drive time forward and timeout things. */ udelay(10); - if (!run_to_completion) - spin_lock_irqsave(&smi_info->si_lock, flags); + spin_lock_irqsave(&smi_info->si_lock, flags); smi_event_handler(smi_info, 10); - if (!run_to_completion) - spin_unlock_irqrestore(&smi_info->si_lock, flags); + spin_unlock_irqrestore(&smi_info->si_lock, flags); } static void request_events(void *send_info) @@ -1230,7 +1240,7 @@ #ifdef CONFIG_PCI static bool si_trypci = 1; #endif -static bool si_trydefaults = 1; +static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS); static char *si_type[SI_MAX_PARMS]; #define MAX_SI_TYPE_STR 30 static char si_type_str[MAX_SI_TYPE_STR]; @@ -1358,7 +1368,7 @@ if (info->si_type == SI_BT) { rv = request_irq(info->irq, si_bt_irq_handler, - IRQF_SHARED | IRQF_DISABLED, + IRQF_SHARED, DEVICE_NAME, info); if (!rv) @@ -1368,7 +1378,7 @@ } else rv = request_irq(info->irq, si_irq_handler, - IRQF_SHARED | IRQF_DISABLED, + IRQF_SHARED, DEVICE_NAME, info); if (rv) { @@ -1702,8 +1712,10 @@ { struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); - if (info) + if (info) { spin_lock_init(&info->si_lock); + spin_lock_init(&info->msg_lock); + } return info; } @@ -1849,11 +1861,15 @@ info->irq_setup = std_irq_setup; info->slave_addr = ipmb; - if (!add_smi(info)) { - if (try_smi_init(info)) - cleanup_one_si(info); - } else { + rv = add_smi(info); + if (rv) { kfree(info); + goto out; + } + rv = try_smi_init(info); + if (rv) { + cleanup_one_si(info); + goto out; } } else { /* remove */ @@ -2067,6 +2083,7 @@ static int try_init_spmi(struct SPMITable *spmi) { struct smi_info *info; + int rv; if (spmi->IPMIlegacy != 1) { printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); @@ -2141,10 +2158,11 @@ info->io.addr_data, info->io.regsize, info->io.regspacing, info->irq); - if (add_smi(info)) + rv = add_smi(info); + if (rv) kfree(info); - return 0; + return rv; } static void spmi_find_bmc(void) @@ -2178,6 +2196,7 @@ acpi_handle handle; acpi_status status; unsigned long long tmp; + int rv; acpi_dev = pnp_acpi_device(dev); if (!acpi_dev) @@ -2259,10 +2278,11 @@ res, info->io.regsize, info->io.regspacing, info->irq); - if (add_smi(info)) - goto err_free; + rv = add_smi(info); + if (rv) + kfree(info); - return 0; + return rv; err_free: kfree(info); @@ -2566,16 +2586,20 @@ &pdev->resource[0], info->io.regsize, info->io.regspacing, info->irq); - if (add_smi(info)) + rv = add_smi(info); + if (rv) { kfree(info); + pci_disable_device(pdev); + } - return 0; + return rv; } static void ipmi_pci_remove(struct pci_dev *pdev) { struct smi_info *info = pci_get_drvdata(pdev); cleanup_one_si(info); + pci_disable_device(pdev); } static struct pci_device_id ipmi_pci_devices[] = { @@ -2670,9 +2694,10 @@ dev_set_drvdata(&dev->dev, info); - if (add_smi(info)) { + ret = add_smi(info); + if (ret) { kfree(info); - return -EBUSY; + return ret; } #endif return 0; @@ -2711,6 +2736,7 @@ static int ipmi_parisc_probe(struct parisc_device *dev) { struct smi_info *info; + int rv; info = smi_info_alloc(); @@ -2736,9 +2762,10 @@ dev_set_drvdata(&dev->dev, info); - if (add_smi(info)) { + rv = add_smi(info); + if (rv) { kfree(info); - return -EBUSY; + return rv; } return 0; @@ -2773,7 +2800,7 @@ smi_result == SI_SM_CALL_WITH_TICK_DELAY) { schedule_timeout_uninterruptible(1); smi_result = smi_info->handlers->event( - smi_info->si_sm, 100); + smi_info->si_sm, jiffies_to_usecs(1)); } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { smi_result = smi_info->handlers->event( smi_info->si_sm, 0); --- linux-3.13.0.orig/drivers/char/ipmi/ipmi_smic_sm.c +++ linux-3.13.0/drivers/char/ipmi/ipmi_smic_sm.c @@ -80,7 +80,7 @@ #define SMIC_MAX_ERROR_RETRIES 3 /* Timeouts in microseconds. */ -#define SMIC_RETRY_TIMEOUT 2000000 +#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC) /* SMIC Flags Register Bits */ #define SMIC_RX_DATA_READY 0x80 --- linux-3.13.0.orig/drivers/char/random.c +++ linux-3.13.0/drivers/char/random.c @@ -1063,8 +1063,8 @@ * pool while mixing, and hash one final time. */ sha_transform(hash.w, extract, workspace); - memset(extract, 0, sizeof(extract)); - memset(workspace, 0, sizeof(workspace)); + memzero_explicit(extract, sizeof(extract)); + memzero_explicit(workspace, sizeof(workspace)); /* * In case the hash function has some recognizable output @@ -1076,7 +1076,7 @@ hash.w[2] ^= rol32(hash.w[2], 16); memcpy(out, &hash, EXTRACT_SIZE); - memset(&hash, 0, sizeof(hash)); + memzero_explicit(&hash, sizeof(hash)); } static ssize_t extract_entropy(struct entropy_store *r, void *buf, @@ -1124,7 +1124,7 @@ } /* Wipe data just returned from memory */ - memset(tmp, 0, sizeof(tmp)); + memzero_explicit(tmp, sizeof(tmp)); return ret; } @@ -1162,7 +1162,7 @@ } /* Wipe data just returned from memory */ - memset(tmp, 0, sizeof(tmp)); + memzero_explicit(tmp, sizeof(tmp)); return ret; } --- linux-3.13.0.orig/drivers/char/raw.c +++ linux-3.13.0/drivers/char/raw.c @@ -190,7 +190,7 @@ struct raw_device_data *rawdev; struct block_device *bdev; - if (number <= 0 || number >= MAX_RAW_MINORS) + if (number <= 0 || number >= max_raw_minors) return -EINVAL; rawdev = &raw_devices[number]; --- linux-3.13.0.orig/drivers/char/tpm/tpm-interface.c +++ linux-3.13.0/drivers/char/tpm/tpm-interface.c @@ -1400,13 +1400,13 @@ int err, total = 0, retries = 5; u8 *dest = out; + if (!out || !num_bytes || max > TPM_MAX_RNG_DATA) + return -EINVAL; + chip = tpm_chip_find_get(chip_num); if (chip == NULL) return -ENODEV; - if (!out || !num_bytes || max > TPM_MAX_RNG_DATA) - return -EINVAL; - do { tpm_cmd.header.in = tpm_getrandom_header; tpm_cmd.params.getrandom_in.num_bytes = cpu_to_be32(num_bytes); @@ -1425,6 +1425,7 @@ num_bytes -= recd; } while (retries-- && total < max); + tpm_chip_put(chip); return total ? total : -EIO; } EXPORT_SYMBOL_GPL(tpm_get_random); @@ -1535,7 +1536,7 @@ /* Make chip available */ spin_lock(&driver_lock); - list_add_rcu(&chip->list, &tpm_chip_list); + list_add_tail_rcu(&chip->list, &tpm_chip_list); spin_unlock(&driver_lock); return chip; --- linux-3.13.0.orig/drivers/char/tpm/tpm_i2c_atmel.c +++ linux-3.13.0/drivers/char/tpm/tpm_i2c_atmel.c @@ -208,6 +208,10 @@ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); + if (!chip->vendor.priv) { + rc = -ENOMEM; + goto out_err; + } /* Default timeouts */ chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT); --- linux-3.13.0.orig/drivers/char/tpm/tpm_i2c_nuvoton.c +++ linux-3.13.0/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -579,6 +579,11 @@ chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); + if (!chip->vendor.priv) { + rc = -ENOMEM; + goto out_err; + } + init_waitqueue_head(&chip->vendor.read_queue); init_waitqueue_head(&chip->vendor.int_queue); --- linux-3.13.0.orig/drivers/char/tpm/tpm_i2c_stm_st33.c +++ linux-3.13.0/drivers/char/tpm/tpm_i2c_stm_st33.c @@ -398,7 +398,7 @@ */ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) { - int size = 0, burstcnt, len; + int size = 0, burstcnt, len, ret; struct i2c_client *client; client = (struct i2c_client *)TPM_VPRIV(chip); @@ -410,8 +410,13 @@ &chip->vendor.read_queue) == 0) { burstcnt = get_burstcount(chip); + if (burstcnt < 0) + return burstcnt; len = min_t(int, burstcnt, count - size); - I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len); + ret = I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len); + if (ret < 0) + return ret; + size += len; } return size; @@ -451,7 +456,8 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf, size_t len) { - u32 status, burstcnt = 0, i, size; + u32 status, i, size; + int burstcnt = 0; int ret; u8 data; struct i2c_client *client; @@ -482,8 +488,10 @@ for (i = 0; i < len - 1;) { burstcnt = get_burstcount(chip); + if (burstcnt < 0) + return burstcnt; size = min_t(int, len - i - 1, burstcnt); - ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size); + ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size); if (ret < 0) goto out_err; @@ -745,6 +753,7 @@ } tpm_get_timeouts(chip); + tpm_do_selftest(chip); dev_info(chip->dev, "TPM I2C Initialized\n"); return 0; --- linux-3.13.0.orig/drivers/char/tpm/tpm_ibmvtpm.c +++ linux-3.13.0/drivers/char/tpm/tpm_ibmvtpm.c @@ -124,7 +124,7 @@ { struct ibmvtpm_dev *ibmvtpm; struct ibmvtpm_crq crq; - u64 *word = (u64 *) &crq; + __be64 *word = (__be64 *)&crq; int rc; ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); @@ -145,10 +145,11 @@ memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); crq.valid = (u8)IBMVTPM_VALID_CMD; crq.msg = (u8)VTPM_TPM_COMMAND; - crq.len = (u16)count; - crq.data = ibmvtpm->rtce_dma_handle; + crq.len = cpu_to_be16(count); + crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); - rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), + be64_to_cpu(word[1])); if (rc != H_SUCCESS) { dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); rc = 0; @@ -186,7 +187,8 @@ crq.valid = (u8)IBMVTPM_VALID_CMD; crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), + cpu_to_be64(buf[1])); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); @@ -212,7 +214,8 @@ crq.valid = (u8)IBMVTPM_VALID_CMD; crq.msg = (u8)VTPM_GET_VERSION; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), + cpu_to_be64(buf[1])); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "ibmvtpm_crq_get_version failed rc=%d\n", rc); @@ -307,6 +310,14 @@ static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) { struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev); + + /* ibmvtpm initializes at probe time, so the data we are + * asking for may not be set yet. Estimate that 4K required + * for TCE-mapped buffer in addition to CRQ. + */ + if (!ibmvtpm) + return CRQ_RES_BUF_SIZE + PAGE_SIZE; + return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; } @@ -327,7 +338,8 @@ crq.valid = (u8)IBMVTPM_VALID_CMD; crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; - rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]), + cpu_to_be64(buf[1])); if (rc != H_SUCCESS) dev_err(ibmvtpm->dev, "tpm_ibmvtpm_suspend failed rc=%d\n", rc); @@ -511,11 +523,11 @@ case IBMVTPM_VALID_CMD: switch (crq->msg) { case VTPM_GET_RTCE_BUFFER_SIZE_RES: - if (crq->len <= 0) { + if (be16_to_cpu(crq->len) <= 0) { dev_err(ibmvtpm->dev, "Invalid rtce size\n"); return; } - ibmvtpm->rtce_size = crq->len; + ibmvtpm->rtce_size = be16_to_cpu(crq->len); ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size, GFP_KERNEL); if (!ibmvtpm->rtce_buf) { @@ -536,11 +548,11 @@ return; case VTPM_GET_VERSION_RES: - ibmvtpm->vtpm_version = crq->data; + ibmvtpm->vtpm_version = be32_to_cpu(crq->data); return; case VTPM_TPM_COMMAND_RES: /* len of the data in rtce buffer */ - ibmvtpm->res_len = crq->len; + ibmvtpm->res_len = be16_to_cpu(crq->len); wake_up_interruptible(&ibmvtpm->wq); return; default: --- linux-3.13.0.orig/drivers/char/tpm/tpm_ibmvtpm.h +++ linux-3.13.0/drivers/char/tpm/tpm_ibmvtpm.h @@ -22,9 +22,9 @@ struct ibmvtpm_crq { u8 valid; u8 msg; - u16 len; - u32 data; - u64 reserved; + __be16 len; + __be32 data; + __be64 reserved; } __attribute__((packed, aligned(8))); struct ibmvtpm_crq_queue { --- linux-3.13.0.orig/drivers/char/tpm/tpm_ppi.c +++ linux-3.13.0/drivers/char/tpm/tpm_ppi.c @@ -172,7 +172,7 @@ * is updated with function index from SUBREQ to SUBREQ2 since PPI * version 1.1 */ - if (strcmp(version, "1.1") == -1) + if (strcmp(version, "1.1") < 0) params[2].integer.value = TPM_PPI_FN_SUBREQ; else params[2].integer.value = TPM_PPI_FN_SUBREQ2; @@ -182,7 +182,7 @@ * string/package type. For PPI version 1.0 and 1.1, use buffer type * for compatibility, and use package type since 1.2 according to spec. */ - if (strcmp(version, "1.2") == -1) { + if (strcmp(version, "1.2") < 0) { params[3].type = ACPI_TYPE_BUFFER; params[3].buffer.length = sizeof(req); sscanf(buf, "%d", &req); @@ -248,7 +248,7 @@ * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for * compatibility, define params[3].type as buffer, if PPI version < 1.2 */ - if (strcmp(version, "1.2") == -1) { + if (strcmp(version, "1.2") < 0) { params[3].type = ACPI_TYPE_BUFFER; params[3].buffer.length = 0; params[3].buffer.pointer = NULL; @@ -390,7 +390,7 @@ kfree(output.pointer); output.length = ACPI_ALLOCATE_BUFFER; output.pointer = NULL; - if (strcmp(version, "1.2") == -1) + if (strcmp(version, "1.2") < 0) return -EPERM; params[2].integer.value = TPM_PPI_FN_GETOPR; --- linux-3.13.0.orig/drivers/char/tpm/tpm_tis.c +++ linux-3.13.0/drivers/char/tpm/tpm_tis.c @@ -75,6 +75,10 @@ #define TPM_DID_VID(l) (0x0F00 | ((l) << 12)) #define TPM_RID(l) (0x0F04 | ((l) << 12)) +struct priv_data { + bool irq_tested; +}; + static LIST_HEAD(tis_chips); static DEFINE_MUTEX(tis_lock); @@ -338,12 +342,27 @@ return rc; } +static void disable_interrupts(struct tpm_chip *chip) +{ + u32 intmask; + + intmask = + ioread32(chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + intmask &= ~TPM_GLOBAL_INT_ENABLE; + iowrite32(intmask, + chip->vendor.iobase + + TPM_INT_ENABLE(chip->vendor.locality)); + free_irq(chip->vendor.irq, chip); + chip->vendor.irq = 0; +} + /* * If interrupts are used (signaled by an irq set in the vendor structure) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) { int rc; u32 ordinal; @@ -373,6 +392,30 @@ return rc; } +static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) +{ + int rc, irq; + struct priv_data *priv = chip->vendor.priv; + + if (!chip->vendor.irq || priv->irq_tested) + return tpm_tis_send_main(chip, buf, len); + + /* Verify receipt of the expected IRQ */ + irq = chip->vendor.irq; + chip->vendor.irq = 0; + rc = tpm_tis_send_main(chip, buf, len); + chip->vendor.irq = irq; + if (!priv->irq_tested) + msleep(1); + if (!priv->irq_tested) { + disable_interrupts(chip); + dev_err(chip->dev, + FW_BUG "TPM interrupt not working, polling instead\n"); + } + priv->irq_tested = true; + return rc; +} + /* * Early probing for iTPM with STS_DATA_EXPECT flaw. * Try sending command without itpm flag set and if that @@ -515,6 +558,7 @@ if (interrupt == 0) return IRQ_NONE; + ((struct priv_data *)chip->vendor.priv)->irq_tested = true; if (interrupt & TPM_INTF_DATA_AVAIL_INT) wake_up_interruptible(&chip->vendor.read_queue); if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT) @@ -544,9 +588,14 @@ u32 vendor, intfcaps, intmask; int rc, i, irq_s, irq_e, probe; struct tpm_chip *chip; + struct priv_data *priv; + priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; if (!(chip = tpm_register_hardware(dev, &tpm_tis))) return -ENODEV; + chip->vendor.priv = priv; chip->vendor.iobase = ioremap(start, len); if (!chip->vendor.iobase) { @@ -615,19 +664,6 @@ if (intfcaps & TPM_INTF_DATA_AVAIL_INT) dev_dbg(dev, "\tData Avail Int Support\n"); - /* get the timeouts before testing for irqs */ - if (tpm_get_timeouts(chip)) { - dev_err(dev, "Could not get TPM timeouts and durations\n"); - rc = -ENODEV; - goto out_err; - } - - if (tpm_do_selftest(chip)) { - dev_err(dev, "TPM self test failed\n"); - rc = -ENODEV; - goto out_err; - } - /* INTERRUPT Setup */ init_waitqueue_head(&chip->vendor.read_queue); init_waitqueue_head(&chip->vendor.int_queue); @@ -729,6 +765,18 @@ } } + if (tpm_get_timeouts(chip)) { + dev_err(dev, "Could not get TPM timeouts and durations\n"); + rc = -ENODEV; + goto out_err; + } + + if (tpm_do_selftest(chip)) { + dev_err(dev, "TPM self test failed\n"); + rc = -ENODEV; + goto out_err; + } + INIT_LIST_HEAD(&chip->vendor.list); mutex_lock(&tis_lock); list_add(&chip->vendor.list, &tis_chips); --- linux-3.13.0.orig/drivers/char/tpm/xen-tpmfront.c +++ linux-3.13.0/drivers/char/tpm/xen-tpmfront.c @@ -17,6 +17,7 @@ #include #include #include "tpm.h" +#include struct tpm_private { struct tpm_chip *chip; @@ -421,6 +422,9 @@ if (!xen_domain()) return -ENODEV; + if (!xen_has_pv_devices()) + return -ENODEV; + return xenbus_register_frontend(&tpmfront_driver); } module_init(xen_tpmfront_init); --- linux-3.13.0.orig/drivers/char/virtio_console.c +++ linux-3.13.0/drivers/char/virtio_console.c @@ -142,6 +142,7 @@ * notification */ struct work_struct control_work; + struct work_struct config_work; struct list_head ports; @@ -1835,10 +1836,21 @@ portdev = vdev->priv; + if (!use_multiport(portdev)) + schedule_work(&portdev->config_work); +} + +static void config_work_handler(struct work_struct *work) +{ + struct ports_device *portdev; + + portdev = container_of(work, struct ports_device, control_work); if (!use_multiport(portdev)) { + struct virtio_device *vdev; struct port *port; u16 rows, cols; + vdev = portdev->vdev; virtio_cread(vdev, struct virtio_console_config, cols, &cols); virtio_cread(vdev, struct virtio_console_config, rows, &rows); @@ -2027,12 +2039,14 @@ spin_lock_init(&portdev->ports_lock); INIT_LIST_HEAD(&portdev->ports); + INIT_WORK(&portdev->config_work, &config_work_handler); + INIT_WORK(&portdev->control_work, &control_work_handler); + if (multiport) { unsigned int nr_added_bufs; spin_lock_init(&portdev->c_ivq_lock); spin_lock_init(&portdev->c_ovq_lock); - INIT_WORK(&portdev->control_work, &control_work_handler); nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); @@ -2100,6 +2114,8 @@ /* Finish up work that's lined up */ if (use_multiport(portdev)) cancel_work_sync(&portdev->control_work); + else + cancel_work_sync(&portdev->config_work); list_for_each_entry_safe(port, port2, &portdev->ports, list) unplug_port(port); @@ -2151,6 +2167,7 @@ virtqueue_disable_cb(portdev->c_ivq); cancel_work_sync(&portdev->control_work); + cancel_work_sync(&portdev->config_work); /* * Once more: if control_work_handler() was running, it would * enable the cb as the last step. --- linux-3.13.0.orig/drivers/clk/clk-gate.c +++ linux-3.13.0/drivers/clk/clk-gate.c @@ -128,7 +128,7 @@ struct clk_init_data init; if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { - if (bit_idx > 16) { + if (bit_idx > 15) { pr_err("gate bit exceeds LOWORD field\n"); return ERR_PTR(-EINVAL); } --- linux-3.13.0.orig/drivers/clk/clk-s2mps11.c +++ linux-3.13.0/drivers/clk/clk-s2mps11.c @@ -130,7 +130,7 @@ int i; if (!iodev->dev->of_node) - return NULL; + return ERR_PTR(-EINVAL); clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks"); if (!clk_np) { @@ -190,16 +190,13 @@ goto err_reg; } - s2mps11_clk->lookup = devm_kzalloc(&pdev->dev, - sizeof(struct clk_lookup), GFP_KERNEL); + s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk, + s2mps11_name(s2mps11_clk), NULL); if (!s2mps11_clk->lookup) { ret = -ENOMEM; goto err_lup; } - s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk); - s2mps11_clk->lookup->clk = s2mps11_clk->clk; - clkdev_add(s2mps11_clk->lookup); } --- linux-3.13.0.orig/drivers/clk/clk.c +++ linux-3.13.0/drivers/clk/clk.c @@ -1819,9 +1819,28 @@ } EXPORT_SYMBOL_GPL(__clk_register); -static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) +/** + * clk_register - allocate a new clock, register it and return an opaque cookie + * @dev: device that is registering this clock + * @hw: link to hardware-specific clock data + * + * clk_register is the primary interface for populating the clock tree with new + * clock nodes. It returns a pointer to the newly allocated struct clk which + * cannot be dereferenced by driver code but may be used in conjuction with the + * rest of the clock API. In the event of an error clk_register will return an + * error code; drivers must test for an error code after calling clk_register. + */ +struct clk *clk_register(struct device *dev, struct clk_hw *hw) { int i, ret; + struct clk *clk; + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) { + pr_err("%s: could not allocate clk\n", __func__); + ret = -ENOMEM; + goto fail_out; + } clk->name = kstrdup(hw->init->name, GFP_KERNEL); if (!clk->name) { @@ -1859,7 +1878,7 @@ ret = __clk_init(dev, clk); if (!ret) - return 0; + return clk; fail_parent_names_copy: while (--i >= 0) @@ -1868,36 +1887,6 @@ fail_parent_names: kfree(clk->name); fail_name: - return ret; -} - -/** - * clk_register - allocate a new clock, register it and return an opaque cookie - * @dev: device that is registering this clock - * @hw: link to hardware-specific clock data - * - * clk_register is the primary interface for populating the clock tree with new - * clock nodes. It returns a pointer to the newly allocated struct clk which - * cannot be dereferenced by driver code but may be used in conjuction with the - * rest of the clock API. In the event of an error clk_register will return an - * error code; drivers must test for an error code after calling clk_register. - */ -struct clk *clk_register(struct device *dev, struct clk_hw *hw) -{ - int ret; - struct clk *clk; - - clk = kzalloc(sizeof(*clk), GFP_KERNEL); - if (!clk) { - pr_err("%s: could not allocate clk\n", __func__); - ret = -ENOMEM; - goto fail_out; - } - - ret = _clk_register(dev, hw, clk); - if (!ret) - return clk; - kfree(clk); fail_out: return ERR_PTR(ret); @@ -1915,7 +1904,7 @@ static void devm_clk_release(struct device *dev, void *res) { - clk_unregister(res); + clk_unregister(*(struct clk **)res); } /** @@ -1930,18 +1919,18 @@ struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) { struct clk *clk; - int ret; + struct clk **clkp; - clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); - if (!clk) + clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); + if (!clkp) return ERR_PTR(-ENOMEM); - ret = _clk_register(dev, hw, clk); - if (!ret) { - devres_add(dev, clk); + clk = clk_register(dev, hw); + if (!IS_ERR(clk)) { + *clkp = clk; + devres_add(dev, clkp); } else { - devres_free(clk); - clk = ERR_PTR(ret); + devres_free(clkp); } return clk; --- linux-3.13.0.orig/drivers/clk/spear/spear3xx_clock.c +++ linux-3.13.0/drivers/clk/spear/spear3xx_clock.c @@ -211,7 +211,7 @@ /* array of all spear 320 clock lookups */ #ifdef CONFIG_MACH_SPEAR320 -#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) +#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) #define SPEAR320_UARTX_PCLK_MASK 0x1 --- linux-3.13.0.orig/drivers/clk/tegra/clk-pll.c +++ linux-3.13.0/drivers/clk/tegra/clk-pll.c @@ -1587,12 +1587,14 @@ val_aux = pll_readl(pll_params->aux_reg, pll); if (val & PLL_BASE_ENABLE) { - if (!(val_aux & PLLE_AUX_PLLRE_SEL)) + if ((val_aux & PLLE_AUX_PLLRE_SEL) || + (val_aux & PLLE_AUX_PLLP_SEL)) WARN(1, "pll_e enabled with unsupported parent %s\n", - (val & PLLE_AUX_PLLP_SEL) ? "pllp_out0" : "pll_ref"); + (val_aux & PLLE_AUX_PLLP_SEL) ? "pllp_out0" : + "pll_re_vco"); } else { - val_aux |= PLLE_AUX_PLLRE_SEL; - pll_writel(val, pll_params->aux_reg, pll); + val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); + pll_writel(val_aux, pll_params->aux_reg, pll); } clk = _tegra_clk_register_pll(pll, name, parent_name, flags, --- linux-3.13.0.orig/drivers/clk/tegra/clk-tegra114.c +++ linux-3.13.0/drivers/clk/tegra/clk-tegra114.c @@ -673,6 +673,7 @@ /* PLLE special case: use cpcon field to store cml divider value */ {336000000, 100000000, 100, 21, 16, 11}, {312000000, 100000000, 200, 26, 24, 13}, + {12000000, 100000000, 200, 1, 24, 13}, {0, 0, 0, 0, 0, 0}, }; @@ -1501,7 +1502,7 @@ clks[pll_re_out] = clk; /* PLLE */ - clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_re_vco", + clk = tegra_clk_register_plle_tegra114("pll_e_out0", "pll_ref", clk_base, 0, 100000000, &pll_e_params, pll_e_freq_table, NULL); clk_register_clkdev(clk, "pll_e_out0", NULL); --- linux-3.13.0.orig/drivers/clk/zynq/clkc.c +++ linux-3.13.0/drivers/clk/zynq/clkc.c @@ -290,6 +290,7 @@ clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x], "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, 26, 0, &armclk_lock); + clk_prepare_enable(clks[cpu_2x]); clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1, 4 + 2 * tmp); --- linux-3.13.0.orig/drivers/clocksource/arm_arch_timer.c +++ linux-3.13.0/drivers/clocksource/arm_arch_timer.c @@ -426,10 +426,14 @@ u64 start_count; /* Register the CP15 based counter if we have one */ - if (type & ARCH_CP15_TIMER) - arch_timer_read_counter = arch_counter_get_cntvct; - else + if (type & ARCH_CP15_TIMER) { + if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual) + arch_timer_read_counter = arch_counter_get_cntvct; + else + arch_timer_read_counter = arch_counter_get_cntpct; + } else { arch_timer_read_counter = arch_counter_get_cntvct_mem; + } start_count = arch_timer_read_counter(); clocksource_register_hz(&clocksource_counter, arch_timer_rate); --- linux-3.13.0.orig/drivers/clocksource/exynos_mct.c +++ linux-3.13.0/drivers/clocksource/exynos_mct.c @@ -94,8 +94,8 @@ __raw_writel(value, reg_base + offset); if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { - stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; - switch (offset & EXYNOS4_MCT_L_MASK) { + stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; + switch (offset & ~EXYNOS4_MCT_L_MASK) { case MCT_L_TCON_OFFSET: mask = 1 << 3; /* L_TCON write status */ break; @@ -414,8 +414,6 @@ evt->set_mode = exynos4_tick_set_mode; evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; evt->rating = 450; - clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), - 0xf, 0x7fffffff); exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); @@ -428,9 +426,12 @@ evt->irq); return -EIO; } + irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu)); } else { enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); } + clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1), + 0xf, 0x7fffffff); return 0; } @@ -448,7 +449,6 @@ unsigned long action, void *hcpu) { struct mct_clock_event_device *mevt; - unsigned int cpu; /* * Grab cpu pointer in each case to avoid spurious @@ -459,12 +459,6 @@ mevt = this_cpu_ptr(&percpu_mct_tick); exynos4_local_timer_setup(&mevt->evt); break; - case CPU_ONLINE: - cpu = (unsigned long)hcpu; - if (mct_int_type == MCT_INT_SPI) - irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu], - cpumask_of(cpu)); - break; case CPU_DYING: mevt = this_cpu_ptr(&percpu_mct_tick); exynos4_local_timer_stop(&mevt->evt); --- linux-3.13.0.orig/drivers/clocksource/sun4i_timer.c +++ linux-3.13.0/drivers/clocksource/sun4i_timer.c @@ -182,6 +182,11 @@ /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(0); + sun4i_clockevent.cpumask = cpumask_of(0); + + clockevents_config_and_register(&sun4i_clockevent, rate, + TIMER_SYNC_TICKS, 0xffffffff); + ret = setup_irq(irq, &sun4i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); @@ -189,11 +194,6 @@ /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); - - sun4i_clockevent.cpumask = cpumask_of(0); - - clockevents_config_and_register(&sun4i_clockevent, rate, - TIMER_SYNC_TICKS, 0xffffffff); } CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer", sun4i_timer_init); --- linux-3.13.0.orig/drivers/clocksource/time-efm32.c +++ linux-3.13.0/drivers/clocksource/time-efm32.c @@ -225,12 +225,12 @@ clock_event_ddata.base = base; clock_event_ddata.periodic_top = DIV_ROUND_CLOSEST(rate, 1024 * HZ); - setup_irq(irq, &efm32_clock_event_irq); - clockevents_config_and_register(&clock_event_ddata.evtdev, DIV_ROUND_CLOSEST(rate, 1024), 0xf, 0xffff); + setup_irq(irq, &efm32_clock_event_irq); + return 0; err_get_irq: --- linux-3.13.0.orig/drivers/clocksource/vf_pit_timer.c +++ linux-3.13.0/drivers/clocksource/vf_pit_timer.c @@ -54,7 +54,7 @@ static u64 pit_read_sched_clock(void) { - return __raw_readl(clksrc_base + PITCVAL); + return ~__raw_readl(clksrc_base + PITCVAL); } static int __init pit_clocksource_init(unsigned long rate) --- linux-3.13.0.orig/drivers/connector/cn_proc.c +++ linux-3.13.0/drivers/connector/cn_proc.c @@ -369,7 +369,7 @@ return; /* Can only change if privileged. */ - if (!capable(CAP_NET_ADMIN)) { + if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) { err = EPERM; goto out; } --- linux-3.13.0.orig/drivers/cpufreq/Kconfig.powerpc +++ linux-3.13.0/drivers/cpufreq/Kconfig.powerpc @@ -54,3 +54,11 @@ help This adds the support for frequency switching on PA Semi PWRficient processors. + +config POWERNV_CPUFREQ + tristate "CPU frequency scaling for IBM POWERNV platform" + depends on PPC_POWERNV + default y + help + This adds support for CPU frequency switching on IBM POWERNV + platform --- linux-3.13.0.orig/drivers/cpufreq/Makefile +++ linux-3.13.0/drivers/cpufreq/Makefile @@ -47,7 +47,7 @@ # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o @@ -86,6 +86,7 @@ obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o +obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o ################################################################################## # Other platform drivers --- linux-3.13.0.orig/drivers/cpufreq/cpufreq-cpu0.c +++ linux-3.13.0/drivers/cpufreq/cpufreq-cpu0.c @@ -131,7 +131,7 @@ return -ENOENT; } - cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); + cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); if (IS_ERR(cpu_reg)) { /* * If cpu0 regulator supply node is present, but regulator is @@ -146,23 +146,23 @@ PTR_ERR(cpu_reg)); } - cpu_clk = devm_clk_get(cpu_dev, NULL); + cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); pr_err("failed to get cpu0 clock: %d\n", ret); - goto out_put_node; + goto out_put_reg; } ret = of_init_opp_table(cpu_dev); if (ret) { pr_err("failed to init OPP table: %d\n", ret); - goto out_put_node; + goto out_put_clk; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table: %d\n", ret); - goto out_put_node; + goto out_put_clk; } of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); @@ -206,6 +206,12 @@ out_free_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_put_clk: + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); +out_put_reg: + if (!IS_ERR(cpu_reg)) + regulator_put(cpu_reg); out_put_node: of_node_put(np); return ret; --- linux-3.13.0.orig/drivers/cpufreq/cpufreq.c +++ linux-3.13.0/drivers/cpufreq/cpufreq.c @@ -405,7 +405,18 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); show_one(scaling_min_freq, min); show_one(scaling_max_freq, max); -show_one(scaling_cur_freq, cur); + +static ssize_t show_scaling_cur_freq( + struct cpufreq_policy *policy, char *buf) +{ + ssize_t ret; + + if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) + ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); + else + ret = sprintf(buf, "%u\n", policy->cur); + return ret; +} static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy); @@ -799,11 +810,11 @@ if (ret) goto err_out_kobj_put; } - if (has_target()) { - ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); - if (ret) - goto err_out_kobj_put; - } + + ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); + if (ret) + goto err_out_kobj_put; + if (cpufreq_driver->bios_limit) { ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); if (ret) @@ -1031,10 +1042,12 @@ * the creation of a brand new one. So we need to perform this update * by invoking update_policy_cpu(). */ - if (frozen && cpu != policy->cpu) + if (frozen && cpu != policy->cpu) { update_policy_cpu(policy, cpu); - else + WARN_ON(kobject_move(&policy->kobj, &dev->kobj)); + } else { policy->cpu = cpu; + } policy->governor = CPUFREQ_DEFAULT_GOVERNOR; cpumask_copy(policy->cpus, cpumask_of(cpu)); @@ -1051,7 +1064,7 @@ goto err_set_policy_cpu; } - if (cpufreq_driver->get) { + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { policy->cur = cpufreq_driver->get(policy->cpu); if (!policy->cur) { pr_err("%s: ->get() failed\n", __func__); @@ -1249,9 +1262,10 @@ unsigned long flags; struct cpufreq_policy *policy; - read_lock_irqsave(&cpufreq_driver_lock, flags); + write_lock_irqsave(&cpufreq_driver_lock, flags); policy = per_cpu(cpufreq_cpu_data, cpu); - read_unlock_irqrestore(&cpufreq_driver_lock, flags); + per_cpu(cpufreq_cpu_data, cpu) = NULL; + write_unlock_irqrestore(&cpufreq_driver_lock, flags); if (!policy) { pr_debug("%s: No cpu_data found\n", __func__); @@ -1306,7 +1320,6 @@ } } - per_cpu(cpufreq_cpu_data, cpu) = NULL; return 0; } @@ -1447,23 +1460,16 @@ */ unsigned int cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); unsigned int ret_freq = 0; - if (cpufreq_disabled() || !cpufreq_driver) - return -ENOENT; - - BUG_ON(!policy); - - if (!down_read_trylock(&cpufreq_rwsem)) - return 0; - - down_read(&policy->rwsem); - - ret_freq = __cpufreq_get(cpu); + if (policy) { + down_read(&policy->rwsem); + ret_freq = __cpufreq_get(cpu); + up_read(&policy->rwsem); - up_read(&policy->rwsem); - up_read(&cpufreq_rwsem); + cpufreq_cpu_put(policy); + } return ret_freq; } @@ -2058,7 +2064,7 @@ * BIOS might change freq behind our back * -> ask driver for current freq and notify governors about a change */ - if (cpufreq_driver->get) { + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { new_policy.cur = cpufreq_driver->get(cpu); if (!policy->cur) { pr_debug("Driver did not initialize current freq"); @@ -2123,6 +2129,20 @@ * REGISTER / UNREGISTER CPUFREQ DRIVER * *********************************************************************/ +static char cpufreq_driver_name[CPUFREQ_NAME_LEN]; + +static int __init cpufreq_driver_setup(char *str) +{ + strlcpy(cpufreq_driver_name, str, CPUFREQ_NAME_LEN); + return 1; +} + +/* + * Set this name to only allow one specific cpu freq driver, e.g., + * cpufreq_driver=powernow-k8 + */ +__setup("cpufreq_driver=", cpufreq_driver_setup); + /** * cpufreq_register_driver - register a CPU Frequency driver * @driver_data: A struct cpufreq_driver containing the values# @@ -2146,7 +2166,13 @@ driver_data->target)) return -EINVAL; - pr_debug("trying to register driver %s\n", driver_data->name); + pr_debug("trying to register driver %s, cpufreq_driver=%s\n", + driver_data->name, cpufreq_driver_name); + + if (cpufreq_driver_name[0]) + if (!driver_data->name || + strcmp(cpufreq_driver_name, driver_data->name)) + return -EINVAL; if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; --- linux-3.13.0.orig/drivers/cpufreq/cpufreq_governor.c +++ linux-3.13.0/drivers/cpufreq/cpufreq_governor.c @@ -362,6 +362,11 @@ break; case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_data->mutex); + if (!cpu_cdbs->cur_policy) { + mutex_unlock(&dbs_data->mutex); + break; + } mutex_lock(&cpu_cdbs->timer_mutex); if (policy->max < cpu_cdbs->cur_policy->cur) __cpufreq_driver_target(cpu_cdbs->cur_policy, @@ -371,6 +376,7 @@ policy->min, CPUFREQ_RELATION_L); dbs_check_cpu(dbs_data, cpu); mutex_unlock(&cpu_cdbs->timer_mutex); + mutex_unlock(&dbs_data->mutex); break; } return 0; --- linux-3.13.0.orig/drivers/cpufreq/integrator-cpufreq.c +++ linux-3.13.0/drivers/cpufreq/integrator-cpufreq.c @@ -212,9 +212,9 @@ return cpufreq_register_driver(&integrator_driver); } -static void __exit integrator_cpufreq_remove(struct platform_device *pdev) +static int __exit integrator_cpufreq_remove(struct platform_device *pdev) { - cpufreq_unregister_driver(&integrator_driver); + return cpufreq_unregister_driver(&integrator_driver); } static const struct of_device_id integrator_cpufreq_match[] = { --- linux-3.13.0.orig/drivers/cpufreq/intel_pstate.c +++ linux-3.13.0/drivers/cpufreq/intel_pstate.c @@ -34,11 +34,16 @@ #define SAMPLE_COUNT 3 -#define BYT_RATIOS 0x66a +#define BYT_RATIOS 0x66a +#define BYT_VIDS 0x66b +#define BYT_TURBO_RATIOS 0x66c +#define BYT_TURBO_VIDS 0x66d -#define FRAC_BITS 8 + +#define FRAC_BITS 6 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) +#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS) static inline int32_t mul_fp(int32_t x, int32_t y) { @@ -50,10 +55,24 @@ return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); } +static u64 energy_divisor; + +static inline int ceiling_fp(int32_t x) +{ + int mask, ret; + + ret = fp_toint(x); + mask = (1 << FRAC_BITS) - 1; + if (x & mask) + ret += 1; + return ret; +} + struct sample { int32_t core_pct_busy; u64 aperf; u64 mperf; + unsigned long long tsc; int freq; }; @@ -61,9 +80,17 @@ int current_pstate; int min_pstate; int max_pstate; + int scaling; int turbo_pstate; }; +struct vid_data { + int min; + int max; + int turbo; + int32_t ratio; +}; + struct _pid { int setpoint; int32_t integral; @@ -82,12 +109,12 @@ struct timer_list timer; struct pstate_data pstate; + struct vid_data vid; struct _pid pid; - int min_pstate_count; - u64 prev_aperf; u64 prev_mperf; + unsigned long long prev_tsc; int sample_ptr; struct sample samples[SAMPLE_COUNT]; }; @@ -106,7 +133,9 @@ int (*get_max)(void); int (*get_min)(void); int (*get_turbo)(void); - void (*set)(int pstate); + int (*get_scaling)(void); + void (*set)(struct cpudata*, int pstate); + void (*get_vid)(struct cpudata *); }; struct cpu_defaults { @@ -119,6 +148,7 @@ struct perf_limits { int no_turbo; + int turbo_disabled; int max_perf_pct; int min_perf_pct; int32_t max_perf; @@ -129,6 +159,7 @@ static struct perf_limits limits = { .no_turbo = 0, + .turbo_disabled = 0, .max_perf_pct = 100, .max_perf = int_tofp(1), .min_perf_pct = 0, @@ -213,6 +244,18 @@ } } +static inline void update_turbo_state(void) +{ + u64 misc_en; + struct cpudata *cpu; + + cpu = all_cpu_data[0]; + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + limits.turbo_disabled = + (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); +} + /************************** debugfs begin ************************/ static int pid_param_set(void *data, u64 val) { @@ -269,6 +312,20 @@ return sprintf(buf, "%u\n", limits.object); \ } +static ssize_t show_no_turbo(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + ssize_t ret; + + update_turbo_state(); + if (limits.turbo_disabled) + ret = sprintf(buf, "%u\n", limits.turbo_disabled); + else + ret = sprintf(buf, "%u\n", limits.no_turbo); + + return ret; +} + static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, const char *buf, size_t count) { @@ -277,7 +334,13 @@ ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - limits.no_turbo = clamp_t(int, input, 0 , 1); + + update_turbo_state(); + if (limits.turbo_disabled) { + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + return -EPERM; + } + limits.no_turbo = clamp_t(int, input, 0, 1); return count; } @@ -311,7 +374,6 @@ return count; } -show_one(no_turbo, no_turbo); show_one(max_perf_pct, max_perf_pct); show_one(min_perf_pct, min_perf_pct); @@ -348,16 +410,82 @@ { u64 value; rdmsrl(BYT_RATIOS, value); - return value & 0xFF; + return (value >> 8) & 0x7F; } static int byt_get_max_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 16) & 0xFF; + return (value >> 16) & 0x7F; +} + +static int byt_get_turbo_pstate(void) +{ + u64 value; + rdmsrl(BYT_TURBO_RATIOS, value); + return value & 0x7F; +} + +static void byt_set_pstate(struct cpudata *cpudata, int pstate) +{ + u64 val; + int32_t vid_fp; + u32 vid; + + val = pstate << 8; + if (limits.no_turbo && !limits.turbo_disabled) + val |= (u64)1 << 32; + + vid_fp = cpudata->vid.min + mul_fp( + int_tofp(pstate - cpudata->pstate.min_pstate), + cpudata->vid.ratio); + + vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); + vid = ceiling_fp(vid_fp); + + if (pstate > cpudata->pstate.max_pstate) + vid = cpudata->vid.turbo; + + val |= vid; + + wrmsrl(MSR_IA32_PERF_CTL, val); } +#define BYT_BCLK_FREQS 5 +static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; + +static int byt_get_scaling(void) +{ + u64 value; + int i; + + rdmsrl(MSR_FSB_FREQ, value); + i = value & 0x3; + + BUG_ON(i > BYT_BCLK_FREQS); + + return byt_freq_table[i] * 100; +} + +static void byt_get_vid(struct cpudata *cpudata) +{ + u64 value; + + + rdmsrl(BYT_VIDS, value); + cpudata->vid.min = int_tofp((value >> 8) & 0x7f); + cpudata->vid.max = int_tofp((value >> 16) & 0x7f); + cpudata->vid.ratio = div_fp( + cpudata->vid.max - cpudata->vid.min, + int_tofp(cpudata->pstate.max_pstate - + cpudata->pstate.min_pstate)); + + rdmsrl(BYT_TURBO_VIDS, value); + cpudata->vid.turbo = value & 0x7f; +} + + static int core_get_min_pstate(void) { u64 value; @@ -384,12 +512,17 @@ return ret; } -static void core_set_pstate(int pstate) +static inline int core_get_scaling(void) +{ + return 100000; +} + +static void core_set_pstate(struct cpudata *cpudata, int pstate) { u64 val; val = pstate << 8; - if (limits.no_turbo) + if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; wrmsrl(MSR_IA32_PERF_CTL, val); @@ -408,6 +541,7 @@ .get_max = core_get_max_pstate, .get_min = core_get_min_pstate, .get_turbo = core_get_turbo_pstate, + .get_scaling = core_get_scaling, .set = core_set_pstate, }, }; @@ -424,8 +558,10 @@ .funcs = { .get_max = byt_get_max_pstate, .get_min = byt_get_min_pstate, - .get_turbo = byt_get_max_pstate, - .set = core_set_pstate, + .get_turbo = byt_get_turbo_pstate, + .set = byt_set_pstate, + .get_scaling = byt_get_scaling, + .get_vid = byt_get_vid, }, }; @@ -435,7 +571,8 @@ int max_perf = cpu->pstate.turbo_pstate; int max_perf_adj; int min_perf; - if (limits.no_turbo) + + if (limits.no_turbo || limits.turbo_disabled) max_perf = cpu->pstate.max_pstate; max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); @@ -451,6 +588,8 @@ { int max_perf, min_perf; + update_turbo_state(); + intel_pstate_get_min_max(cpu, &min_perf, &max_perf); pstate = clamp_t(int, pstate, min_perf, max_perf); @@ -458,11 +597,11 @@ if (pstate == cpu->pstate.current_pstate) return; - trace_cpu_frequency(pstate * 100000, cpu->cpu); + trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); cpu->pstate.current_pstate = pstate; - pstate_funcs.set(pstate); + pstate_funcs.set(cpu, pstate); } static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) @@ -487,6 +626,10 @@ cpu->pstate.min_pstate = pstate_funcs.get_min(); cpu->pstate.max_pstate = pstate_funcs.get_max(); cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); + cpu->pstate.scaling = pstate_funcs.get_scaling(); + + if (pstate_funcs.get_vid) + pstate_funcs.get_vid(cpu); /* * goto max pstate so we don't slow up boot if we are built-in if we are @@ -498,30 +641,50 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { - u64 core_pct; - core_pct = div64_u64(int_tofp(sample->aperf * 100), - sample->mperf); - sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); + int32_t core_pct; + int32_t c0_pct; + + core_pct = div_fp(int_tofp((sample->aperf)), + int_tofp((sample->mperf))); + core_pct = mul_fp(core_pct, int_tofp(100)); + FP_ROUNDUP(core_pct); + + c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc)); + + sample->freq = fp_toint( + mul_fp(int_tofp( + cpu->pstate.max_pstate * cpu->pstate.scaling / 100), + core_pct)); - sample->core_pct_busy = core_pct; + sample->core_pct_busy = mul_fp(core_pct, c0_pct); } static inline void intel_pstate_sample(struct cpudata *cpu) { u64 aperf, mperf; + unsigned long long tsc; rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); + tsc = native_read_tsc(); + + aperf = aperf >> FRAC_BITS; + mperf = mperf >> FRAC_BITS; + tsc = tsc >> FRAC_BITS; + cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; cpu->samples[cpu->sample_ptr].aperf = aperf; cpu->samples[cpu->sample_ptr].mperf = mperf; + cpu->samples[cpu->sample_ptr].tsc = tsc; cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; + cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc; intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; + cpu->prev_tsc = tsc; } static inline void intel_pstate_set_sample_time(struct cpudata *cpu) @@ -540,7 +703,8 @@ core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; max_pstate = int_tofp(cpu->pstate.max_pstate); current_pstate = int_tofp(cpu->pstate.current_pstate); - return mul_fp(core_busy, div_fp(max_pstate, current_pstate)); + core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); + return FP_ROUNDUP(core_busy); } static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) @@ -556,6 +720,7 @@ ctl = pid_calc(pid, busy_scaled); steps = abs(ctl); + if (ctl < 0) intel_pstate_pstate_increase(cpu, steps); else @@ -565,17 +730,23 @@ static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; + struct sample *sample; + u64 energy; intel_pstate_sample(cpu); + + sample = &cpu->samples[cpu->sample_ptr]; + rdmsrl(MSR_PKG_ENERGY_STATUS, energy); + intel_pstate_adjust_busy_pstate(cpu); - if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { - cpu->min_pstate_count++; - if (!(cpu->min_pstate_count % 5)) { - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); - } - } else - cpu->min_pstate_count = 0; + trace_pstate_sample(fp_toint(sample->core_pct_busy), + fp_toint(intel_pstate_get_scaled_busy(cpu)), + cpu->pstate.current_pstate, + sample->mperf, + sample->aperf, + div64_u64(energy, energy_divisor), + sample->freq); intel_pstate_set_sample_time(cpu); } @@ -608,12 +779,15 @@ if (!id) return -ENODEV; - all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); + if (!all_cpu_data[cpunum]) + all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), + GFP_KERNEL); if (!all_cpu_data[cpunum]) return -ENOMEM; cpu = all_cpu_data[cpunum]; + cpu->cpu = cpunum; intel_pstate_get_cpu_pstates(cpu); if (!cpu->pstate.current_pstate) { all_cpu_data[cpunum] = NULL; @@ -621,8 +795,6 @@ return -ENODATA; } - cpu->cpu = cpunum; - init_timer_deferrable(&cpu->timer); cpu->timer.function = intel_pstate_timer_func; cpu->timer.data = @@ -663,6 +835,7 @@ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { limits.min_perf_pct = 100; limits.min_perf = int_tofp(1); + limits.max_policy_pct = 100; limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); limits.no_turbo = 0; @@ -696,8 +869,6 @@ int cpu = policy->cpu; del_timer(&all_cpu_data[cpu]->timer); - kfree(all_cpu_data[cpu]); - all_cpu_data[cpu] = NULL; return 0; } @@ -712,18 +883,18 @@ cpu = all_cpu_data[policy->cpu]; - if (!limits.no_turbo && - limits.min_perf_pct == 100 && limits.max_perf_pct == 100) + if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; - policy->min = cpu->pstate.min_pstate * 100000; - policy->max = cpu->pstate.turbo_pstate * 100000; + policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; + policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; /* cpuinfo and default policy values */ - policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; - policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; + policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; + policy->cpuinfo.max_freq = + cpu->pstate.turbo_pstate * cpu->pstate.scaling; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; cpumask_set_cpu(policy->cpu, policy->cpus); @@ -740,7 +911,7 @@ .name = "intel_pstate", }; -static int __initdata no_load; +static int __initdata no_load = 1; static int intel_pstate_msrs_not_valid(void) { @@ -781,7 +952,9 @@ pstate_funcs.get_max = funcs->get_max; pstate_funcs.get_min = funcs->get_min; pstate_funcs.get_turbo = funcs->get_turbo; + pstate_funcs.get_scaling = funcs->get_scaling; pstate_funcs.set = funcs->set; + pstate_funcs.get_vid = funcs->get_vid; } #if IS_ENABLED(CONFIG_ACPI) @@ -855,6 +1028,7 @@ int cpu, rc = 0; const struct x86_cpu_id *id; struct cpu_defaults *cpu_info; + u64 units; if (no_load) return -ENODEV; @@ -888,8 +1062,14 @@ if (rc) goto out; + rdmsrl(MSR_RAPL_POWER_UNIT, units); + energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */ + intel_pstate_debug_expose_params(); intel_pstate_sysfs_expose_params(); + + pr_info("Consider also installing thermald for improved thermal control.\n"); + return rc; out: get_online_cpus(); @@ -913,6 +1093,8 @@ if (!strcmp(str, "disable")) no_load = 1; + else if (!strcmp(str, "enable")) + no_load = 0; return 0; } early_param("intel_pstate", intel_pstate_setup); --- linux-3.13.0.orig/drivers/cpufreq/powernow-k6.c +++ linux-3.13.0/drivers/cpufreq/powernow-k6.c @@ -26,41 +26,108 @@ static unsigned int busfreq; /* FSB, in 10 kHz */ static unsigned int max_multiplier; +static unsigned int param_busfreq = 0; +static unsigned int param_max_multiplier = 0; + +module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO); +MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)"); + +module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO); +MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz"); /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ static struct cpufreq_frequency_table clock_ratio[] = { - {45, /* 000 -> 4.5x */ 0}, + {60, /* 110 -> 6.0x */ 0}, + {55, /* 011 -> 5.5x */ 0}, {50, /* 001 -> 5.0x */ 0}, + {45, /* 000 -> 4.5x */ 0}, {40, /* 010 -> 4.0x */ 0}, - {55, /* 011 -> 5.5x */ 0}, - {20, /* 100 -> 2.0x */ 0}, - {30, /* 101 -> 3.0x */ 0}, - {60, /* 110 -> 6.0x */ 0}, {35, /* 111 -> 3.5x */ 0}, + {30, /* 101 -> 3.0x */ 0}, + {20, /* 100 -> 2.0x */ 0}, {0, CPUFREQ_TABLE_END} }; +static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 }; +static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 }; + +static const struct { + unsigned freq; + unsigned mult; +} usual_frequency_table[] = { + { 400000, 40 }, // 100 * 4 + { 450000, 45 }, // 100 * 4.5 + { 475000, 50 }, // 95 * 5 + { 500000, 50 }, // 100 * 5 + { 506250, 45 }, // 112.5 * 4.5 + { 533500, 55 }, // 97 * 5.5 + { 550000, 55 }, // 100 * 5.5 + { 562500, 50 }, // 112.5 * 5 + { 570000, 60 }, // 95 * 6 + { 600000, 60 }, // 100 * 6 + { 618750, 55 }, // 112.5 * 5.5 + { 660000, 55 }, // 120 * 5.5 + { 675000, 60 }, // 112.5 * 6 + { 720000, 60 }, // 120 * 6 +}; + +#define FREQ_RANGE 3000 /** * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier * - * Returns the current setting of the frequency multiplier. Core clock + * Returns the current setting of the frequency multiplier. Core clock * speed is frequency of the Front-Side Bus multiplied with this value. */ static int powernow_k6_get_cpu_multiplier(void) { - u64 invalue = 0; + unsigned long invalue = 0; u32 msrval; + local_irq_disable(); + msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ - return clock_ratio[(invalue >> 5)&7].driver_data; + local_irq_enable(); + + return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data; } +static void powernow_k6_set_cpu_multiplier(unsigned int best_i) +{ + unsigned long outvalue, invalue; + unsigned long msrval; + unsigned long cr0; + + /* we now need to transform best_i to the BVC format, see AMD#23446 */ + + /* + * The processor doesn't respond to inquiry cycles while changing the + * frequency, so we must disable cache. + */ + local_irq_disable(); + cr0 = read_cr0(); + write_cr0(cr0 | X86_CR0_CD); + wbinvd(); + + outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5); + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue = inl(POWERNOW_IOPORT + 0x8); + invalue = invalue & 0x1f; + outvalue = outvalue | invalue; + outl(outvalue, (POWERNOW_IOPORT + 0x8)); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + write_cr0(cr0); + local_irq_enable(); +} /** * powernow_k6_target - set the PowerNow! multiplier @@ -71,8 +138,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy, unsigned int best_i) { - unsigned long outvalue = 0, invalue = 0; - unsigned long msrval; struct cpufreq_freqs freqs; if (clock_ratio[best_i].driver_data > max_multiplier) { @@ -85,35 +150,63 @@ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* we now need to transform best_i to the BVC format, see AMD#23446 */ - - outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); - - msrval = POWERNOW_IOPORT + 0x1; - wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ - invalue = inl(POWERNOW_IOPORT + 0x8); - invalue = invalue & 0xf; - outvalue = outvalue | invalue; - outl(outvalue , (POWERNOW_IOPORT + 0x8)); - msrval = POWERNOW_IOPORT + 0x0; - wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + powernow_k6_set_cpu_multiplier(best_i); cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); return 0; } - static int powernow_k6_cpu_init(struct cpufreq_policy *policy) { unsigned int i, f; + unsigned khz; if (policy->cpu != 0) return -ENODEV; - /* get frequencies */ - max_multiplier = powernow_k6_get_cpu_multiplier(); - busfreq = cpu_khz / max_multiplier; + max_multiplier = 0; + khz = cpu_khz; + for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) { + if (khz >= usual_frequency_table[i].freq - FREQ_RANGE && + khz <= usual_frequency_table[i].freq + FREQ_RANGE) { + khz = usual_frequency_table[i].freq; + max_multiplier = usual_frequency_table[i].mult; + break; + } + } + if (param_max_multiplier) { + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].driver_data == param_max_multiplier) { + max_multiplier = param_max_multiplier; + goto have_max_multiplier; + } + } + printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n"); + return -EINVAL; + } + + if (!max_multiplier) { + printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz); + printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n"); + return -EOPNOTSUPP; + } + +have_max_multiplier: + param_max_multiplier = max_multiplier; + + if (param_busfreq) { + if (param_busfreq >= 50000 && param_busfreq <= 150000) { + busfreq = param_busfreq / 10; + goto have_busfreq; + } + printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n"); + return -EINVAL; + } + + busfreq = khz / max_multiplier; +have_busfreq: + param_busfreq = busfreq * 10; /* table init */ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { @@ -125,7 +218,7 @@ } /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 200000; + policy->cpuinfo.transition_latency = 500000; return cpufreq_table_validate_and_show(policy, clock_ratio); } --- linux-3.13.0.orig/drivers/cpufreq/powernow-k8.c +++ linux-3.13.0/drivers/cpufreq/powernow-k8.c @@ -1081,7 +1081,7 @@ { struct powernow_k8_data *data; struct init_on_cpu init_on_cpu; - int rc; + int rc, cpu; smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); if (rc) @@ -1145,7 +1145,9 @@ pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", data->currfid, data->currvid); - per_cpu(powernow_data, pol->cpu) = data; + /* Point all the CPUs in this policy to the same data */ + for_each_cpu(cpu, pol->cpus) + per_cpu(powernow_data, cpu) = data; return 0; @@ -1160,6 +1162,7 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol) { struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); + int cpu; if (!data) return -EINVAL; @@ -1170,7 +1173,8 @@ kfree(data->powernow_table); kfree(data); - per_cpu(powernow_data, pol->cpu) = NULL; + for_each_cpu(cpu, pol->cpus) + per_cpu(powernow_data, cpu) = NULL; return 0; } --- linux-3.13.0.orig/drivers/cpufreq/powernv-cpufreq.c +++ linux-3.13.0/drivers/cpufreq/powernv-cpufreq.c @@ -0,0 +1,341 @@ +/* + * POWERNV cpufreq driver for the IBM POWER processors + * + * (C) Copyright IBM 2014 + * + * Author: Vaidyanathan Srinivasan + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "powernv-cpufreq: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define POWERNV_MAX_PSTATES 256 + +static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; + +/* + * Note: The set of pstates consists of contiguous integers, the + * smallest of which is indicated by powernv_pstate_info.min, the + * largest of which is indicated by powernv_pstate_info.max. + * + * The nominal pstate is the highest non-turbo pstate in this + * platform. This is indicated by powernv_pstate_info.nominal. + */ +static struct powernv_pstate_info { + int min; + int max; + int nominal; + int nr_pstates; +} powernv_pstate_info; + +/* + * Initialize the freq table based on data obtained + * from the firmware passed via device-tree + */ +static int init_powernv_pstates(void) +{ + struct device_node *power_mgt; + int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0; + const __be32 *pstate_ids, *pstate_freqs; + u32 len_ids, len_freqs; + + power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); + if (!power_mgt) { + pr_warn("power-mgt node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) { + pr_warn("ibm,pstate-min node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) { + pr_warn("ibm,pstate-max node not found\n"); + return -ENODEV; + } + + if (of_property_read_u32(power_mgt, "ibm,pstate-nominal", + &pstate_nominal)) { + pr_warn("ibm,pstate-nominal not found\n"); + return -ENODEV; + } + pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min, + pstate_nominal, pstate_max); + + pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids); + if (!pstate_ids) { + pr_warn("ibm,pstate-ids not found\n"); + return -ENODEV; + } + + pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz", + &len_freqs); + if (!pstate_freqs) { + pr_warn("ibm,pstate-frequencies-mhz not found\n"); + return -ENODEV; + } + + WARN_ON(len_ids != len_freqs); + nr_pstates = min(len_ids, len_freqs) / sizeof(u32); + if (!nr_pstates) { + pr_warn("No PStates found\n"); + return -ENODEV; + } + + pr_debug("NR PStates %d\n", nr_pstates); + for (i = 0; i < nr_pstates; i++) { + u32 id = be32_to_cpu(pstate_ids[i]); + u32 freq = be32_to_cpu(pstate_freqs[i]); + + pr_debug("PState id %d freq %d MHz\n", id, freq); + powernv_freqs[i].frequency = freq * 1000; /* kHz */ + powernv_freqs[i].driver_data = id; + } + /* End of list marker entry */ + powernv_freqs[i].frequency = CPUFREQ_TABLE_END; + + powernv_pstate_info.min = pstate_min; + powernv_pstate_info.max = pstate_max; + powernv_pstate_info.nominal = pstate_nominal; + powernv_pstate_info.nr_pstates = nr_pstates; + + return 0; +} + +/* Returns the CPU frequency corresponding to the pstate_id. */ +static unsigned int pstate_id_to_freq(int pstate_id) +{ + int i; + + i = powernv_pstate_info.max - pstate_id; + BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0); + + return powernv_freqs[i].frequency; +} + +/* + * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by + * the firmware + */ +static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy, + char *buf) +{ + return sprintf(buf, "%u\n", + pstate_id_to_freq(powernv_pstate_info.nominal)); +} + +struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq = + __ATTR_RO(cpuinfo_nominal_freq); + +static struct freq_attr *powernv_cpu_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + &cpufreq_freq_attr_cpuinfo_nominal_freq, + NULL, +}; + +/* Helper routines */ + +/* Access helpers to power mgt SPR */ + +static inline unsigned long get_pmspr(unsigned long sprn) +{ + switch (sprn) { + case SPRN_PMCR: + return mfspr(SPRN_PMCR); + + case SPRN_PMICR: + return mfspr(SPRN_PMICR); + + case SPRN_PMSR: + return mfspr(SPRN_PMSR); + } + BUG(); +} + +static inline void set_pmspr(unsigned long sprn, unsigned long val) +{ + switch (sprn) { + case SPRN_PMCR: + mtspr(SPRN_PMCR, val); + return; + + case SPRN_PMICR: + mtspr(SPRN_PMICR, val); + return; + } + BUG(); +} + +/* + * Use objects of this type to query/update + * pstates on a remote CPU via smp_call_function. + */ +struct powernv_smp_call_data { + unsigned int freq; + int pstate_id; +}; + +/* + * powernv_read_cpu_freq: Reads the current frequency on this CPU. + * + * Called via smp_call_function. + * + * Note: The caller of the smp_call_function should pass an argument of + * the type 'struct powernv_smp_call_data *' along with this function. + * + * The current frequency on this CPU will be returned via + * ((struct powernv_smp_call_data *)arg)->freq; + */ +static void powernv_read_cpu_freq(void *arg) +{ + unsigned long pmspr_val; + s8 local_pstate_id; + struct powernv_smp_call_data *freq_data = arg; + + pmspr_val = get_pmspr(SPRN_PMSR); + + /* + * The local pstate id corresponds bits 48..55 in the PMSR. + * Note: Watch out for the sign! + */ + local_pstate_id = (pmspr_val >> 48) & 0xFF; + freq_data->pstate_id = local_pstate_id; + freq_data->freq = pstate_id_to_freq(freq_data->pstate_id); + + pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n", + raw_smp_processor_id(), pmspr_val, freq_data->pstate_id, + freq_data->freq); +} + +/* + * powernv_cpufreq_get: Returns the CPU frequency as reported by the + * firmware for CPU 'cpu'. This value is reported through the sysfs + * file cpuinfo_cur_freq. + */ +unsigned int powernv_cpufreq_get(unsigned int cpu) +{ + struct powernv_smp_call_data freq_data; + + smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq, + &freq_data, 1); + + return freq_data.freq; +} + +/* + * set_pstate: Sets the pstate on this CPU. + * + * This is called via an smp_call_function. + * + * The caller must ensure that freq_data is of the type + * (struct powernv_smp_call_data *) and the pstate_id which needs to be set + * on this CPU should be present in freq_data->pstate_id. + */ +static void set_pstate(void *freq_data) +{ + unsigned long val; + unsigned long pstate_ul = + ((struct powernv_smp_call_data *) freq_data)->pstate_id; + + val = get_pmspr(SPRN_PMCR); + val = val & 0x0000FFFFFFFFFFFFULL; + + pstate_ul = pstate_ul & 0xFF; + + /* Set both global(bits 56..63) and local(bits 48..55) PStates */ + val = val | (pstate_ul << 56) | (pstate_ul << 48); + + pr_debug("Setting cpu %d pmcr to %016lX\n", + raw_smp_processor_id(), val); + set_pmspr(SPRN_PMCR, val); +} + +/* + * powernv_cpufreq_target_index: Sets the frequency corresponding to + * the cpufreq table entry indexed by new_index on the cpus in the + * mask policy->cpus + */ +static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, + unsigned int new_index) +{ + struct powernv_smp_call_data freq_data; + + freq_data.pstate_id = powernv_freqs[new_index].driver_data; + + /* + * Use smp_call_function to send IPI and execute the + * mtspr on target CPU. We could do that without IPI + * if current CPU is within policy->cpus (core) + */ + smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); + + return 0; +} + +static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + int base, i; + + base = cpu_first_thread_sibling(policy->cpu); + + for (i = 0; i < threads_per_core; i++) + cpumask_set_cpu(base + i, policy->cpus); + + return cpufreq_table_validate_and_show(policy, powernv_freqs); +} + +static struct cpufreq_driver powernv_cpufreq_driver = { + .name = "powernv-cpufreq", + .flags = CPUFREQ_CONST_LOOPS, + .init = powernv_cpufreq_cpu_init, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = powernv_cpufreq_target_index, + .get = powernv_cpufreq_get, + .attr = powernv_cpu_freq_attr, +}; + +static int __init powernv_cpufreq_init(void) +{ + int rc = 0; + + /* Discover pstates from device tree and init */ + rc = init_powernv_pstates(); + if (rc) { + pr_info("powernv-cpufreq disabled. System does not support PState control\n"); + return rc; + } + + return cpufreq_register_driver(&powernv_cpufreq_driver); +} +module_init(powernv_cpufreq_init); + +static void __exit powernv_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&powernv_cpufreq_driver); +} +module_exit(powernv_cpufreq_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Vaidyanathan Srinivasan "); --- linux-3.13.0.orig/drivers/cpufreq/s3c2416-cpufreq.c +++ linux-3.13.0/drivers/cpufreq/s3c2416-cpufreq.c @@ -263,7 +263,7 @@ } #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE -static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) +static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) { int count, v, i, found; struct cpufreq_frequency_table *freq; @@ -335,7 +335,7 @@ .notifier_call = s3c2416_cpufreq_reboot_notifier_evt, }; -static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) +static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; struct cpufreq_frequency_table *freq; --- linux-3.13.0.orig/drivers/cpufreq/s3c24xx-cpufreq.c +++ linux-3.13.0/drivers/cpufreq/s3c24xx-cpufreq.c @@ -458,7 +458,7 @@ }; -int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info) +int s3c_cpufreq_register(struct s3c_cpufreq_info *info) { if (!info || !info->name) { printk(KERN_ERR "%s: failed to pass valid information\n", --- linux-3.13.0.orig/drivers/cpufreq/speedstep-lib.c +++ linux-3.13.0/drivers/cpufreq/speedstep-lib.c @@ -400,6 +400,7 @@ pr_debug("previous speed is %u\n", prev_speed); + preempt_disable(); local_irq_save(flags); /* switch to low state */ @@ -464,6 +465,8 @@ out: local_irq_restore(flags); + preempt_enable(); + return ret; } EXPORT_SYMBOL_GPL(speedstep_get_freqs); --- linux-3.13.0.orig/drivers/cpufreq/speedstep-smi.c +++ linux-3.13.0/drivers/cpufreq/speedstep-smi.c @@ -188,6 +188,7 @@ return; /* Disable IRQs */ + preempt_disable(); local_irq_save(flags); command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); @@ -198,9 +199,19 @@ do { if (retry) { + /* + * We need to enable interrupts, otherwise the blockage + * won't resolve. + * + * We disable preemption so that other processes don't + * run. If other processes were running, they could + * submit more DMA requests, making the blockage worse. + */ pr_debug("retry %u, previous result %u, waiting...\n", retry, result); + local_irq_enable(); mdelay(retry * 50); + local_irq_disable(); } retry++; __asm__ __volatile__( @@ -217,6 +228,7 @@ /* enable IRQs */ local_irq_restore(flags); + preempt_enable(); if (new_state == state) pr_debug("change to %u MHz succeeded after %u tries " --- linux-3.13.0.orig/drivers/crypto/Kconfig +++ linux-3.13.0/drivers/crypto/Kconfig @@ -323,7 +323,7 @@ config CRYPTO_DEV_NX bool "Support for IBM Power7+ in-Nest cryptographic acceleration" - depends on PPC64 && IBMVIO + depends on PPC64 && IBMVIO && !CPU_LITTLE_ENDIAN default n help Support for Power7+ in-Nest cryptographic acceleration. --- linux-3.13.0.orig/drivers/crypto/caam/caamhash.c +++ linux-3.13.0/drivers/crypto/caam/caamhash.c @@ -835,8 +835,9 @@ edesc->sec4_sg + sec4_sg_src_index, chained); if (*next_buflen) { - sg_copy_part(next_buf, req->src, to_hash - - *buflen, req->nbytes); + scatterwalk_map_and_copy(next_buf, req->src, + to_hash - *buflen, + *next_buflen, 0); state->current_buf = !state->current_buf; } } else { @@ -869,7 +870,8 @@ kfree(edesc); } } else if (*next_buflen) { - sg_copy(buf + *buflen, req->src, req->nbytes); + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, + req->nbytes, 0); *buflen = *next_buflen; *next_buflen = last_buflen; } @@ -1216,8 +1218,9 @@ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, chained); if (*next_buflen) { - sg_copy_part(next_buf, req->src, to_hash - *buflen, - req->nbytes); + scatterwalk_map_and_copy(next_buf, req->src, + to_hash - *buflen, + *next_buflen, 0); state->current_buf = !state->current_buf; } @@ -1248,7 +1251,8 @@ kfree(edesc); } } else if (*next_buflen) { - sg_copy(buf + *buflen, req->src, req->nbytes); + scatterwalk_map_and_copy(buf + *buflen, req->src, 0, + req->nbytes, 0); *buflen = *next_buflen; *next_buflen = 0; } @@ -1348,9 +1352,9 @@ struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; - u8 *next_buf = state->buf_0 + state->current_buf * - CAAM_MAX_HASH_BLOCK_SIZE; - int *next_buflen = &state->buflen_0 + state->current_buf; + u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0; + int *next_buflen = state->current_buf ? + &state->buflen_1 : &state->buflen_0; int to_hash; u32 *sh_desc = ctx->sh_desc_update_first, *desc; dma_addr_t ptr = ctx->sh_desc_update_first_dma; @@ -1405,7 +1409,8 @@ } if (*next_buflen) - sg_copy_part(next_buf, req->src, to_hash, req->nbytes); + scatterwalk_map_and_copy(next_buf, req->src, to_hash, + *next_buflen, 0); sh_len = desc_len(sh_desc); desc = edesc->hw_desc; @@ -1438,7 +1443,8 @@ state->update = ahash_update_no_ctx; state->finup = ahash_finup_no_ctx; state->final = ahash_final_no_ctx; - sg_copy(next_buf, req->src, req->nbytes); + scatterwalk_map_and_copy(next_buf, req->src, 0, + req->nbytes, 0); } #ifdef DEBUG print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", --- linux-3.13.0.orig/drivers/crypto/caam/error.c +++ linux-3.13.0/drivers/crypto/caam/error.c @@ -16,9 +16,13 @@ char *tmp; \ \ tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ - sprintf(tmp, format, param); \ - strcat(str, tmp); \ - kfree(tmp); \ + if (likely(tmp)) { \ + sprintf(tmp, format, param); \ + strcat(str, tmp); \ + kfree(tmp); \ + } else { \ + strcat(str, "kmalloc failure in SPRINTFCAT"); \ + } \ } static void report_jump_idx(u32 status, char *outstr) --- linux-3.13.0.orig/drivers/crypto/caam/jr.c +++ linux-3.13.0/drivers/crypto/caam/jr.c @@ -453,8 +453,8 @@ int error; jrdev = &pdev->dev; - jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), - GFP_KERNEL); + jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr), + GFP_KERNEL); if (!jrpriv) return -ENOMEM; @@ -487,10 +487,8 @@ /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ - if (error) { - kfree(jrpriv); + if (error) return error; - } jrpriv->dev = jrdev; spin_lock(&driver_data.jr_alloc_lock); --- linux-3.13.0.orig/drivers/crypto/caam/key_gen.c +++ linux-3.13.0/drivers/crypto/caam/key_gen.c @@ -51,23 +51,29 @@ u32 *desc; struct split_key_result result; dma_addr_t dma_addr_in, dma_addr_out; - int ret = 0; + int ret = -ENOMEM; desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); if (!desc) { dev_err(jrdev, "unable to allocate key input memory\n"); - return -ENOMEM; + return ret; } - init_job_desc(desc, 0); - dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, dma_addr_in)) { dev_err(jrdev, "unable to map key input memory\n"); - kfree(desc); - return -ENOMEM; + goto out_free; } + + dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, dma_addr_out)) { + dev_err(jrdev, "unable to map key output memory\n"); + goto out_unmap_in; + } + + init_job_desc(desc, 0); append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); /* Sets MDHA up into an HMAC-INIT */ @@ -84,13 +90,6 @@ * FIFO_STORE with the explicit split-key content store * (0x26 output type) */ - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(jrdev, dma_addr_out)) { - dev_err(jrdev, "unable to map key output memory\n"); - kfree(desc); - return -ENOMEM; - } append_fifo_store(desc, dma_addr_out, split_key_len, LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); @@ -118,10 +117,10 @@ dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, DMA_FROM_DEVICE); +out_unmap_in: dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); - +out_free: kfree(desc); - return ret; } EXPORT_SYMBOL(gen_split_key); --- linux-3.13.0.orig/drivers/crypto/caam/sg_sw_sec4.h +++ linux-3.13.0/drivers/crypto/caam/sg_sw_sec4.h @@ -116,57 +116,3 @@ } return nents; } - -/* Map SG page in kernel virtual address space and copy */ -static inline void sg_map_copy(u8 *dest, struct scatterlist *sg, - int len, int offset) -{ - u8 *mapped_addr; - - /* - * Page here can be user-space pinned using get_user_pages - * Same must be kmapped before use and kunmapped subsequently - */ - mapped_addr = kmap_atomic(sg_page(sg)); - memcpy(dest, mapped_addr + offset, len); - kunmap_atomic(mapped_addr); -} - -/* Copy from len bytes of sg to dest, starting from beginning */ -static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) -{ - struct scatterlist *current_sg = sg; - int cpy_index = 0, next_cpy_index = current_sg->length; - - while (next_cpy_index < len) { - sg_map_copy(dest + cpy_index, current_sg, current_sg->length, - current_sg->offset); - current_sg = scatterwalk_sg_next(current_sg); - cpy_index = next_cpy_index; - next_cpy_index += current_sg->length; - } - if (cpy_index < len) - sg_map_copy(dest + cpy_index, current_sg, len-cpy_index, - current_sg->offset); -} - -/* Copy sg data, from to_skip to end, to dest */ -static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, - int to_skip, unsigned int end) -{ - struct scatterlist *current_sg = sg; - int sg_index, cpy_index, offset; - - sg_index = current_sg->length; - while (sg_index <= to_skip) { - current_sg = scatterwalk_sg_next(current_sg); - sg_index += current_sg->length; - } - cpy_index = sg_index - to_skip; - offset = current_sg->offset + current_sg->length - cpy_index; - sg_map_copy(dest, current_sg, cpy_index, offset); - if (end - sg_index) { - current_sg = scatterwalk_sg_next(current_sg); - sg_copy(dest + cpy_index, current_sg, end - sg_index); - } -} --- linux-3.13.0.orig/drivers/crypto/omap-aes.c +++ linux-3.13.0/drivers/crypto/omap-aes.c @@ -784,6 +784,7 @@ static int omap_aes_cra_init(struct crypto_tfm *tfm) { struct omap_aes_dev *dd = NULL; + int err; /* Find AES device, currently picks the first device */ spin_lock_bh(&list_lock); @@ -792,7 +793,13 @@ } spin_unlock_bh(&list_lock); - pm_runtime_get_sync(dd->dev); + err = pm_runtime_get_sync(dd->dev); + if (err < 0) { + dev_err(dd->dev, "%s: failed to get_sync(%d)\n", + __func__, err); + return err; + } + tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); return 0; @@ -1182,7 +1189,12 @@ dd->phys_base = res.start; pm_runtime_enable(dev); - pm_runtime_get_sync(dev); + err = pm_runtime_get_sync(dev); + if (err < 0) { + dev_err(dev, "%s: failed to get_sync(%d)\n", + __func__, err); + goto err_res; + } omap_aes_dma_stop(dd); --- linux-3.13.0.orig/drivers/crypto/padlock-aes.c +++ linux-3.13.0/drivers/crypto/padlock-aes.c @@ -563,4 +563,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michal Ludvig"); -MODULE_ALIAS("aes"); +MODULE_ALIAS_CRYPTO("aes"); --- linux-3.13.0.orig/drivers/crypto/padlock-sha.c +++ linux-3.13.0/drivers/crypto/padlock-sha.c @@ -593,7 +593,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michal Ludvig"); -MODULE_ALIAS("sha1-all"); -MODULE_ALIAS("sha256-all"); -MODULE_ALIAS("sha1-padlock"); -MODULE_ALIAS("sha256-padlock"); +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-padlock"); +MODULE_ALIAS_CRYPTO("sha256-padlock"); --- linux-3.13.0.orig/drivers/crypto/ux500/cryp/cryp_core.c +++ linux-3.13.0/drivers/crypto/ux500/cryp/cryp_core.c @@ -190,7 +190,7 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) { struct cryp_ctx *ctx; - int i; + int count; struct cryp_device_data *device_data; if (param == NULL) { @@ -215,12 +215,11 @@ if (cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_OUTPUT_FIFO)) { if (ctx->outlen / ctx->blocksize > 0) { - for (i = 0; i < ctx->blocksize / 4; i++) { - *(ctx->outdata) = readl_relaxed( - &device_data->base->dout); - ctx->outdata += 4; - ctx->outlen -= 4; - } + count = ctx->blocksize / 4; + + readsl(&device_data->base->dout, ctx->outdata, count); + ctx->outdata += count; + ctx->outlen -= count; if (ctx->outlen == 0) { cryp_disable_irq_src(device_data, @@ -230,12 +229,12 @@ } else if (cryp_pending_irq_src(device_data, CRYP_IRQ_SRC_INPUT_FIFO)) { if (ctx->datalen / ctx->blocksize > 0) { - for (i = 0 ; i < ctx->blocksize / 4; i++) { - writel_relaxed(ctx->indata, - &device_data->base->din); - ctx->indata += 4; - ctx->datalen -= 4; - } + count = ctx->blocksize / 4; + + writesl(&device_data->base->din, ctx->indata, count); + + ctx->indata += count; + ctx->datalen -= count; if (ctx->datalen == 0) cryp_disable_irq_src(device_data, @@ -1811,7 +1810,7 @@ module_param(cryp_mode, int, 0); MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine."); -MODULE_ALIAS("aes-all"); -MODULE_ALIAS("des-all"); +MODULE_ALIAS_CRYPTO("aes-all"); +MODULE_ALIAS_CRYPTO("des-all"); MODULE_LICENSE("GPL"); --- linux-3.13.0.orig/drivers/crypto/ux500/hash/hash_core.c +++ linux-3.13.0/drivers/crypto/ux500/hash/hash_core.c @@ -1995,7 +1995,7 @@ MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine."); MODULE_LICENSE("GPL"); -MODULE_ALIAS("sha1-all"); -MODULE_ALIAS("sha256-all"); -MODULE_ALIAS("hmac-sha1-all"); -MODULE_ALIAS("hmac-sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("hmac-sha1-all"); +MODULE_ALIAS_CRYPTO("hmac-sha256-all"); --- linux-3.13.0.orig/drivers/dma/Kconfig +++ linux-3.13.0/drivers/dma/Kconfig @@ -351,18 +351,6 @@ comment "DMA Clients" depends on DMA_ENGINE -config NET_DMA - bool "Network: TCP receive copy offload" - depends on DMA_ENGINE && NET - default (INTEL_IOATDMA || FSL_DMA) - depends on BROKEN - help - This enables the use of DMA engines in the network stack to - offload receive copy-to-user operations, freeing CPU cycles. - - Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise - say N. - config ASYNC_TX_DMA bool "Async_tx: Offload support for the async_tx api" depends on DMA_ENGINE --- linux-3.13.0.orig/drivers/dma/Makefile +++ linux-3.13.0/drivers/dma/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_ACPI) += acpi-dma.o obj-$(CONFIG_DMA_OF) += of-dma.o -obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ --- linux-3.13.0.orig/drivers/dma/dmaengine.c +++ linux-3.13.0/drivers/dma/dmaengine.c @@ -959,6 +959,7 @@ dma_unmap_page(dev, unmap->addr[i], unmap->len, DMA_BIDIRECTIONAL); } + cnt = unmap->map_cnt; mempool_free(unmap, __get_unmap_pool(cnt)->pool); } @@ -1024,115 +1025,12 @@ memset(unmap, 0, sizeof(*unmap)); kref_init(&unmap->kref); unmap->dev = dev; + unmap->map_cnt = nr; return unmap; } EXPORT_SYMBOL(dmaengine_get_unmap_data); -/** - * dma_async_memcpy_pg_to_pg - offloaded copy from page to page - * @chan: DMA channel to offload copy to - * @dest_pg: destination page - * @dest_off: offset in page to copy to - * @src_pg: source page - * @src_off: offset in page to copy from - * @len: length - * - * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus - * address according to the DMA mapping API rules for streaming mappings. - * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages). - */ -dma_cookie_t -dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, - unsigned int dest_off, struct page *src_pg, unsigned int src_off, - size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - struct dmaengine_unmap_data *unmap; - dma_cookie_t cookie; - unsigned long flags; - - unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); - if (!unmap) - return -ENOMEM; - - unmap->to_cnt = 1; - unmap->from_cnt = 1; - unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, - DMA_TO_DEVICE); - unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, - DMA_FROM_DEVICE); - unmap->len = len; - flags = DMA_CTRL_ACK; - tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], - len, flags); - - if (!tx) { - dmaengine_unmap_put(unmap); - return -ENOMEM; - } - - dma_set_unmap(tx, unmap); - cookie = tx->tx_submit(tx); - dmaengine_unmap_put(unmap); - - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); - - return cookie; -} -EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); - -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -dma_cookie_t -dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, - void *src, size_t len) -{ - return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), - (unsigned long) dest & ~PAGE_MASK, - virt_to_page(src), - (unsigned long) src & ~PAGE_MASK, len); -} -EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); - -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -dma_cookie_t -dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, - unsigned int offset, void *kdata, size_t len) -{ - return dma_async_memcpy_pg_to_pg(chan, page, offset, - virt_to_page(kdata), - (unsigned long) kdata & ~PAGE_MASK, len); -} -EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); - void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { --- linux-3.13.0.orig/drivers/dma/dw/core.c +++ linux-3.13.0/drivers/dma/dw/core.c @@ -1546,11 +1546,6 @@ /* Disable BLOCK interrupts as well */ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, - IRQF_SHARED, "dw_dmac", dw); - if (err) - return err; - /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, sizeof(struct dw_desc), 4, 0); @@ -1561,6 +1556,11 @@ tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); + err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, + "dw_dmac", dw); + if (err) + return err; + INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; @@ -1663,6 +1663,7 @@ dw_dma_off(dw); dma_async_device_unregister(&dw->dma); + free_irq(chip->irq, dw); tasklet_kill(&dw->tasklet); list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, --- linux-3.13.0.orig/drivers/dma/dw/platform.c +++ linux-3.13.0/drivers/dma/dw/platform.c @@ -25,6 +25,8 @@ #include "internal.h" +#define DRV_NAME "dw_dmac" + struct dw_dma_of_filter_args { struct dw_dma *dw; unsigned int req; @@ -293,7 +295,7 @@ .remove = dw_remove, .shutdown = dw_shutdown, .driver = { - .name = "dw_dmac", + .name = DRV_NAME, .pm = &dw_dev_pm_ops, .of_match_table = of_match_ptr(dw_dma_of_id_table), .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), @@ -314,3 +316,4 @@ MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver"); +MODULE_ALIAS("platform:" DRV_NAME); --- linux-3.13.0.orig/drivers/dma/edma.c +++ linux-3.13.0/drivers/dma/edma.c @@ -182,11 +182,13 @@ echan->ecc->dummy_slot); } - edma_resume(echan->ch_num); - if (edesc->processed <= MAX_NR_SG) { dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); edma_start(echan->ch_num); + } else { + dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", + echan->ch_num, edesc->processed); + edma_resume(echan->ch_num); } /* @@ -217,6 +219,12 @@ * echan->edesc is NULL and exit.) */ if (echan->edesc) { + /* + * free the running request descriptor + * since it is not in any of the vdesc lists + */ + edma_desc_free(&echan->edesc->vdesc); + echan->edesc = NULL; edma_stop(echan->ch_num); } --- linux-3.13.0.orig/drivers/dma/ioat/dma.c +++ linux-3.13.0/drivers/dma/ioat/dma.c @@ -77,7 +77,8 @@ attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) { chan = ioat_chan_by_index(instance, bit); - tasklet_schedule(&chan->cleanup_task); + if (test_bit(IOAT_RUN, &chan->state)) + tasklet_schedule(&chan->cleanup_task); } writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); @@ -93,7 +94,8 @@ { struct ioat_chan_common *chan = data; - tasklet_schedule(&chan->cleanup_task); + if (test_bit(IOAT_RUN, &chan->state)) + tasklet_schedule(&chan->cleanup_task); return IRQ_HANDLED; } @@ -116,7 +118,6 @@ chan->timer.function = device->timer_fn; chan->timer.data = data; tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); - tasklet_disable(&chan->cleanup_task); } /** @@ -354,13 +355,49 @@ writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - tasklet_enable(&chan->cleanup_task); + set_bit(IOAT_RUN, &chan->state); ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", __func__, ioat->desccount); return ioat->desccount; } +void ioat_stop(struct ioat_chan_common *chan) +{ + struct ioatdma_device *device = chan->device; + struct pci_dev *pdev = device->pdev; + int chan_id = chan_num(chan); + struct msix_entry *msix; + + /* 1/ stop irq from firing tasklets + * 2/ stop the tasklet from re-arming irqs + */ + clear_bit(IOAT_RUN, &chan->state); + + /* flush inflight interrupts */ + switch (device->irq_mode) { + case IOAT_MSIX: + msix = &device->msix_entries[chan_id]; + synchronize_irq(msix->vector); + break; + case IOAT_MSI: + case IOAT_INTX: + synchronize_irq(pdev->irq); + break; + default: + break; + } + + /* flush inflight timers */ + del_timer_sync(&chan->timer); + + /* flush inflight tasklet runs */ + tasklet_kill(&chan->cleanup_task); + + /* final cleanup now that everything is quiesced and can't re-arm */ + device->cleanup_fn((unsigned long) &chan->common); +} + /** * ioat1_dma_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned @@ -379,9 +416,7 @@ if (ioat->desccount == 0) return; - tasklet_disable(&chan->cleanup_task); - del_timer_sync(&chan->timer); - ioat1_cleanup(ioat); + ioat_stop(chan); /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. @@ -526,8 +561,11 @@ static void ioat1_cleanup_event(unsigned long data) { struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat1_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } @@ -1184,7 +1222,6 @@ err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(4096); err = ioat_register(device); if (err) return err; --- linux-3.13.0.orig/drivers/dma/ioat/dma.h +++ linux-3.13.0/drivers/dma/ioat/dma.h @@ -214,13 +214,6 @@ #define dump_desc_dbg(c, d) \ ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) -static inline void ioat_set_tcp_copy_break(unsigned long copybreak) -{ - #ifdef CONFIG_NET_DMA - sysctl_tcp_dma_copybreak = copybreak; - #endif -} - static inline struct ioat_chan_common * ioat_chan_by_index(struct ioatdma_device *device, int index) { @@ -356,6 +349,7 @@ void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); void ioat_kobject_del(struct ioatdma_device *device); int ioat_dma_setup_interrupts(struct ioatdma_device *device); +void ioat_stop(struct ioat_chan_common *chan); extern const struct sysfs_ops ioat_sysfs_ops; extern struct ioat_sysfs_entry ioat_version_attr; extern struct ioat_sysfs_entry ioat_cap_attr; --- linux-3.13.0.orig/drivers/dma/ioat/dma_v2.c +++ linux-3.13.0/drivers/dma/ioat/dma_v2.c @@ -190,8 +190,11 @@ void ioat2_cleanup_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat2_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } @@ -553,10 +556,10 @@ ioat->issued = 0; ioat->tail = 0; ioat->alloc_order = order; + set_bit(IOAT_RUN, &chan->state); spin_unlock_bh(&ioat->prep_lock); spin_unlock_bh(&chan->cleanup_lock); - tasklet_enable(&chan->cleanup_task); ioat2_start_null_desc(ioat); /* check that we got off the ground */ @@ -566,7 +569,6 @@ } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); if (is_ioat_active(status) || is_ioat_idle(status)) { - set_bit(IOAT_RUN, &chan->state); return 1 << ioat->alloc_order; } else { u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); @@ -809,11 +811,8 @@ if (!ioat->ring) return; - tasklet_disable(&chan->cleanup_task); - del_timer_sync(&chan->timer); - device->cleanup_fn((unsigned long) c); + ioat_stop(chan); device->reset_hw(chan); - clear_bit(IOAT_RUN, &chan->state); spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&ioat->prep_lock); @@ -900,7 +899,6 @@ err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(2048); list_for_each_entry(c, &dma->channels, device_node) { chan = to_chan_common(c); --- linux-3.13.0.orig/drivers/dma/ioat/dma_v3.c +++ linux-3.13.0/drivers/dma/ioat/dma_v3.c @@ -464,8 +464,11 @@ static void ioat3_cleanup_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat_chan_common *chan = &ioat->base; ioat3_cleanup(ioat); + if (!test_bit(IOAT_RUN, &chan->state)) + return; writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } @@ -1652,7 +1655,6 @@ err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(262144); list_for_each_entry(c, &dma->channels, device_node) { chan = to_chan_common(c); --- linux-3.13.0.orig/drivers/dma/mv_xor.c +++ linux-3.13.0/drivers/dma/mv_xor.c @@ -191,12 +191,10 @@ static void mv_chan_activate(struct mv_xor_chan *chan) { - u32 activation; - dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); - activation = readl_relaxed(XOR_ACTIVATION(chan)); - activation |= 0x1; - writel_relaxed(activation, XOR_ACTIVATION(chan)); + + /* writel ensures all descriptors are flushed before activation */ + writel(BIT(0), XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) --- linux-3.13.0.orig/drivers/dma/omap-dma.c +++ linux-3.13.0/drivers/dma/omap-dma.c @@ -487,6 +487,7 @@ * c->desc is NULL and exit.) */ if (c->desc) { + omap_dma_desc_free(&c->desc->vd); c->desc = NULL; /* Avoid stopping the dma twice */ if (!c->paused) --- linux-3.13.0.orig/drivers/dma/ste_dma40.c +++ linux-3.13.0/drivers/dma/ste_dma40.c @@ -1641,6 +1641,7 @@ struct d40_chan *d40c = (struct d40_chan *) data; struct d40_desc *d40d; unsigned long flags; + bool callback_active; dma_async_tx_callback callback; void *callback_param; @@ -1668,6 +1669,7 @@ } /* Callback to client */ + callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); callback = d40d->txd.callback; callback_param = d40d->txd.callback_param; @@ -1690,7 +1692,7 @@ spin_unlock_irqrestore(&d40c->lock, flags); - if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) + if (callback_active && callback) callback(callback_param); return; --- linux-3.13.0.orig/drivers/edac/cpc925_edac.c +++ linux-3.13.0/drivers/edac/cpc925_edac.c @@ -562,7 +562,7 @@ if (apiexcp & UECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, pfn, offset, 0, csrow, -1, -1, mci->ctl_name, ""); --- linux-3.13.0.orig/drivers/edac/e752x_edac.c +++ linux-3.13.0/drivers/edac/e752x_edac.c @@ -1182,9 +1182,11 @@ pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, pvt->dev_info->err_dev, pvt->bridge_ck); - if (pvt->bridge_ck == NULL) + if (pvt->bridge_ck == NULL) { pvt->bridge_ck = pci_scan_single_device(pdev->bus, PCI_DEVFN(0, 1)); + pci_dev_get(pvt->bridge_ck); + } if (pvt->bridge_ck == NULL) { e752x_printk(KERN_ERR, "error reporting device not found:" --- linux-3.13.0.orig/drivers/edac/e7xxx_edac.c +++ linux-3.13.0/drivers/edac/e7xxx_edac.c @@ -226,7 +226,7 @@ static void process_ce_no_info(struct mem_ctl_info *mci) { edac_dbg(3, "\n"); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, "e7xxx CE log register overflow", ""); } --- linux-3.13.0.orig/drivers/edac/edac_mc.c +++ linux-3.13.0/drivers/edac/edac_mc.c @@ -559,7 +559,8 @@ * * called with the mem_ctls_mutex held */ -static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) +static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec, + bool init) { edac_dbg(0, "\n"); @@ -567,7 +568,9 @@ if (mci->op_state != OP_RUNNING_POLL) return; - INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); + if (init) + INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); + mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); } @@ -601,7 +604,7 @@ * user space has updated our poll period value, need to * reset our workq delays */ -void edac_mc_reset_delay_period(int value) +void edac_mc_reset_delay_period(unsigned long value) { struct mem_ctl_info *mci; struct list_head *item; @@ -611,7 +614,7 @@ list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); - edac_mc_workq_setup(mci, (unsigned long) value); + edac_mc_workq_setup(mci, value, false); } mutex_unlock(&mem_ctls_mutex); @@ -782,7 +785,7 @@ /* This instance is NOW RUNNING */ mci->op_state = OP_RUNNING_POLL; - edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); + edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true); } else { mci->op_state = OP_RUNNING_INTERRUPT; } --- linux-3.13.0.orig/drivers/edac/edac_mc_sysfs.c +++ linux-3.13.0/drivers/edac/edac_mc_sysfs.c @@ -52,18 +52,20 @@ static int edac_set_poll_msec(const char *val, struct kernel_param *kp) { - long l; + unsigned long l; int ret; if (!val) return -EINVAL; - ret = kstrtol(val, 0, &l); + ret = kstrtoul(val, 0, &l); if (ret) return ret; - if ((int)l != l) + + if (l < 1000) return -EINVAL; - *((int *)kp->arg) = l; + + *((unsigned long *)kp->arg) = l; /* notify edac_mc engine to reset the poll period */ edac_mc_reset_delay_period(l); --- linux-3.13.0.orig/drivers/edac/edac_module.h +++ linux-3.13.0/drivers/edac/edac_module.h @@ -52,7 +52,7 @@ extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); extern void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, unsigned long value); -extern void edac_mc_reset_delay_period(int value); +extern void edac_mc_reset_delay_period(unsigned long value); extern void *edac_align_ptr(void **p, unsigned size, int n_elems); --- linux-3.13.0.orig/drivers/edac/i3200_edac.c +++ linux-3.13.0/drivers/edac/i3200_edac.c @@ -242,11 +242,11 @@ -1, -1, "i3000 UE", ""); } else if (log & I3200_ECCERRLOG_CE) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, eccerrlog_syndrome(log), eccerrlog_row(channel, log), -1, -1, - "i3000 UE", ""); + "i3000 CE", ""); } } } --- linux-3.13.0.orig/drivers/edac/i7300_edac.c +++ linux-3.13.0/drivers/edac/i7300_edac.c @@ -943,33 +943,35 @@ /* Attempt to 'get' the MCH register we want */ pdev = NULL; - while (!pvt->pci_dev_16_1_fsb_addr_map || - !pvt->pci_dev_16_2_fsb_err_regs) { - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev); - if (!pdev) { - /* End of list, leave */ - i7300_printk(KERN_ERR, - "'system address,Process Bus' " - "device not found:" - "vendor 0x%x device 0x%x ERR funcs " - "(broken BIOS?)\n", - PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); - goto error; - } - + while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, + pdev))) { /* Store device 16 funcs 1 and 2 */ switch (PCI_FUNC(pdev->devfn)) { case 1: - pvt->pci_dev_16_1_fsb_addr_map = pdev; + if (!pvt->pci_dev_16_1_fsb_addr_map) + pvt->pci_dev_16_1_fsb_addr_map = + pci_dev_get(pdev); break; case 2: - pvt->pci_dev_16_2_fsb_err_regs = pdev; + if (!pvt->pci_dev_16_2_fsb_err_regs) + pvt->pci_dev_16_2_fsb_err_regs = + pci_dev_get(pdev); break; } } + if (!pvt->pci_dev_16_1_fsb_addr_map || + !pvt->pci_dev_16_2_fsb_err_regs) { + /* At least one device was not found */ + i7300_printk(KERN_ERR, + "'system address,Process Bus' device not found:" + "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); + goto error; + } + edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", pci_name(pvt->pci_dev_16_0_fsb_ctlr), pvt->pci_dev_16_0_fsb_ctlr->vendor, --- linux-3.13.0.orig/drivers/edac/i7core_edac.c +++ linux-3.13.0/drivers/edac/i7core_edac.c @@ -1334,14 +1334,19 @@ * is at addr 8086:2c40, instead of 8086:2c41. So, we need * to probe for the alternate address in case of failure */ - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) { + pci_dev_get(*prev); /* pci_get_device will put it */ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev); + } - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev) + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && + !pdev) { + pci_dev_get(*prev); /* pci_get_device will put it */ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT, *prev); + } if (!pdev) { if (*prev) { --- linux-3.13.0.orig/drivers/edac/i82860_edac.c +++ linux-3.13.0/drivers/edac/i82860_edac.c @@ -124,7 +124,7 @@ dimm->location[0], dimm->location[1], -1, "i82860 UE", ""); else - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, info->eap, 0, info->derrsyn, dimm->location[0], dimm->location[1], -1, "i82860 CE", ""); --- linux-3.13.0.orig/drivers/edac/mpc85xx_edac.c +++ linux-3.13.0/drivers/edac/mpc85xx_edac.c @@ -557,7 +557,7 @@ if (edac_op_state == EDAC_OPSTATE_INT) { pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); res = devm_request_irq(&op->dev, pdata->irq, - mpc85xx_l2_isr, IRQF_DISABLED, + mpc85xx_l2_isr, IRQF_SHARED, "[EDAC] L2 err", edac_dev); if (res < 0) { printk(KERN_ERR --- linux-3.13.0.orig/drivers/eisa/eisa-bus.c +++ linux-3.13.0/drivers/eisa/eisa-bus.c @@ -275,18 +275,19 @@ } if (slot) { + edev->res[i].name = NULL; edev->res[i].start = SLOT_ADDRESS(root, slot) + (i * 0x400); edev->res[i].end = edev->res[i].start + 0xff; edev->res[i].flags = IORESOURCE_IO; } else { + edev->res[i].name = NULL; edev->res[i].start = SLOT_ADDRESS(root, slot) + EISA_VENDOR_ID_OFFSET; edev->res[i].end = edev->res[i].start + 3; edev->res[i].flags = IORESOURCE_IO | IORESOURCE_BUSY; } - dev_printk(KERN_DEBUG, &edev->dev, "%pR\n", &edev->res[i]); if (request_resource(root->res, &edev->res[i])) goto failed; } @@ -326,19 +327,20 @@ return -ENOMEM; } - if (eisa_init_device(root, edev, 0)) { + if (eisa_request_resources(root, edev, 0)) { + dev_warn(root->dev, + "EISA: Cannot allocate resource for mainboard\n"); kfree(edev); if (!root->force_probe) - return -ENODEV; + return -EBUSY; goto force_probe; } - if (eisa_request_resources(root, edev, 0)) { - dev_warn(root->dev, - "EISA: Cannot allocate resource for mainboard\n"); + if (eisa_init_device(root, edev, 0)) { + eisa_release_resources(edev); kfree(edev); if (!root->force_probe) - return -EBUSY; + return -ENODEV; goto force_probe; } @@ -361,11 +363,6 @@ continue; } - if (eisa_init_device(root, edev, i)) { - kfree(edev); - continue; - } - if (eisa_request_resources(root, edev, i)) { dev_warn(root->dev, "Cannot allocate resource for EISA slot %d\n", @@ -373,6 +370,12 @@ kfree(edev); continue; } + + if (eisa_init_device(root, edev, i)) { + eisa_release_resources(edev); + kfree(edev); + continue; + } if (edev->state == (EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED)) enabled_str = " (forced enabled)"; --- linux-3.13.0.orig/drivers/extcon/extcon-gpio.c +++ linux-3.13.0/drivers/extcon/extcon-gpio.c @@ -105,6 +105,12 @@ extcon_data->state_off = pdata->state_off; if (pdata->state_on && pdata->state_off) extcon_data->edev.print_state = extcon_gpio_print_state; + + ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, + pdev->name); + if (ret < 0) + return ret; + if (pdata->debounce) { ret = gpio_set_debounce(extcon_data->gpio, pdata->debounce * 1000); @@ -117,11 +123,6 @@ if (ret < 0) return ret; - ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, - pdev->name); - if (ret < 0) - goto err; - INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); extcon_data->irq = gpio_to_irq(extcon_data->gpio); --- linux-3.13.0.orig/drivers/extcon/extcon-max77693.c +++ linux-3.13.0/drivers/extcon/extcon-max77693.c @@ -1193,7 +1193,7 @@ /* Initialize MUIC register by using platform data or default data */ - if (pdata->muic_data) { + if (pdata && pdata->muic_data) { init_data = pdata->muic_data->init_data; num_init_data = pdata->muic_data->num_init_data; } else { @@ -1226,7 +1226,7 @@ = init_data[i].data; } - if (pdata->muic_data) { + if (pdata && pdata->muic_data) { struct max77693_muic_platform_data *muic_pdata = pdata->muic_data; --- linux-3.13.0.orig/drivers/extcon/extcon-max8997.c +++ linux-3.13.0/drivers/extcon/extcon-max8997.c @@ -715,7 +715,7 @@ goto err_irq; } - if (pdata->muic_pdata) { + if (pdata && pdata->muic_pdata) { struct max8997_muic_platform_data *muic_pdata = pdata->muic_pdata; --- linux-3.13.0.orig/drivers/firewire/core-cdev.c +++ linux-3.13.0/drivers/firewire/core-cdev.c @@ -1637,8 +1637,7 @@ _IOC_SIZE(cmd) > sizeof(buffer)) return -ENOTTY; - if (_IOC_DIR(cmd) == _IOC_READ) - memset(&buffer, 0, _IOC_SIZE(cmd)); + memset(&buffer, 0, sizeof(buffer)); if (_IOC_DIR(cmd) & _IOC_WRITE) if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) --- linux-3.13.0.orig/drivers/firewire/core-device.c +++ linux-3.13.0/drivers/firewire/core-device.c @@ -916,7 +916,7 @@ old->config_rom_retries = 0; fw_notice(card, "rediscovered device %s\n", dev_name(dev)); - PREPARE_DELAYED_WORK(&old->work, fw_device_update); + old->workfn = fw_device_update; fw_schedule_device_work(old, 0); if (current_node == card->root_node) @@ -1075,7 +1075,7 @@ if (atomic_cmpxchg(&device->state, FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); } else { fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", @@ -1196,13 +1196,20 @@ dev_name(&device->device), fw_rcode_string(ret)); gone: atomic_set(&device->state, FW_DEVICE_GONE); - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); out: if (node_id == card->root_node->node_id) fw_schedule_bm_work(card, 0); } +static void fw_device_workfn(struct work_struct *work) +{ + struct fw_device *device = container_of(to_delayed_work(work), + struct fw_device, work); + device->workfn(work); +} + void fw_node_event(struct fw_card *card, struct fw_node *node, int event) { struct fw_device *device; @@ -1252,7 +1259,8 @@ * power-up after getting plugged in. We schedule the * first config rom scan half a second after bus reset. */ - INIT_DELAYED_WORK(&device->work, fw_device_init); + device->workfn = fw_device_init; + INIT_DELAYED_WORK(&device->work, fw_device_workfn); fw_schedule_device_work(device, INITIAL_DELAY); break; @@ -1268,7 +1276,7 @@ if (atomic_cmpxchg(&device->state, FW_DEVICE_RUNNING, FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); + device->workfn = fw_device_refresh; fw_schedule_device_work(device, device->is_local ? 0 : INITIAL_DELAY); } @@ -1283,7 +1291,7 @@ smp_wmb(); /* update node_id before generation */ device->generation = card->generation; if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_update); + device->workfn = fw_device_update; fw_schedule_device_work(device, 0); } break; @@ -1308,7 +1316,7 @@ device = node->data; if (atomic_xchg(&device->state, FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); } --- linux-3.13.0.orig/drivers/firewire/net.c +++ linux-3.13.0/drivers/firewire/net.c @@ -929,8 +929,6 @@ if (rcode == RCODE_COMPLETE) { fwnet_transmit_packet_done(ptask); } else { - fwnet_transmit_packet_failed(ptask); - if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { dev_err(&ptask->dev->netdev->dev, "fwnet_write_complete failed: %x (skipped %d)\n", @@ -938,8 +936,10 @@ errors_skipped = 0; last_rcode = rcode; - } else + } else { errors_skipped++; + } + fwnet_transmit_packet_failed(ptask); } } --- linux-3.13.0.orig/drivers/firewire/ohci.c +++ linux-3.13.0/drivers/firewire/ohci.c @@ -290,7 +290,6 @@ #define QUIRK_NO_MSI 0x10 #define QUIRK_TI_SLLZ059 0x20 #define QUIRK_IR_WAKE 0x40 -#define QUIRK_PHY_LCTRL_TIMEOUT 0x80 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { @@ -303,10 +302,7 @@ QUIRK_BE_HEADERS}, {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, - QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI}, - - {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID, - QUIRK_PHY_LCTRL_TIMEOUT}, + QUIRK_NO_MSI}, {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, QUIRK_RESET_PACKET}, @@ -353,7 +349,6 @@ ", disable MSI = " __stringify(QUIRK_NO_MSI) ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) - ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT) ")"); #define OHCI_PARAM_DEBUG_AT_AR 1 @@ -2295,9 +2290,6 @@ * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but * cannot actually use the phy at that time. These need tens of * millisecods pause between LPS write and first phy access too. - * - * But do not wait for 50msec on Agere/LSI cards. Their phy - * arbitration state machine may time out during such a long wait. */ reg_write(ohci, OHCI1394_HCControlSet, @@ -2305,11 +2297,8 @@ OHCI1394_HCControl_postedWriteEnable); flush_writes(ohci); - if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT)) + for (lps = 0, i = 0; !lps && i < 3; i++) { msleep(50); - - for (lps = 0, i = 0; !lps && i < 150; i++) { - msleep(1); lps = reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS; } --- linux-3.13.0.orig/drivers/firewire/sbp2.c +++ linux-3.13.0/drivers/firewire/sbp2.c @@ -146,6 +146,7 @@ */ int generation; int retries; + work_func_t workfn; struct delayed_work work; bool has_sdev; bool blocked; @@ -864,7 +865,7 @@ /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ sbp2_set_busy_timeout(lu); - PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); + lu->workfn = sbp2_reconnect; sbp2_agent_reset(lu); /* This was a re-login. */ @@ -918,7 +919,7 @@ * If a bus reset happened, sbp2_update will have requeued * lu->work already. Reset the work from reconnect to login. */ - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; } static void sbp2_reconnect(struct work_struct *work) @@ -952,7 +953,7 @@ lu->retries++ >= 5) { dev_err(tgt_dev(tgt), "failed to reconnect\n"); lu->retries = 0; - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; } sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); @@ -972,6 +973,13 @@ sbp2_conditionally_unblock(lu); } +static void sbp2_lu_workfn(struct work_struct *work) +{ + struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), + struct sbp2_logical_unit, work); + lu->workfn(work); +} + static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) { struct sbp2_logical_unit *lu; @@ -998,7 +1006,8 @@ lu->blocked = false; ++tgt->dont_block; INIT_LIST_HEAD(&lu->orb_list); - INIT_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; + INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); list_add_tail(&lu->link, &tgt->lu_list); return 0; --- linux-3.13.0.orig/drivers/firmware/efi/Kconfig +++ linux-3.13.0/drivers/firmware/efi/Kconfig @@ -36,6 +36,13 @@ backend for pstore by default. This setting can be overridden using the efivars module's pstore_disable parameter. +config EFI_PARAMS_FROM_FDT + bool + help + Select this config option from the architecture Kconfig if + the EFI runtime support gets system table address, memory + map address, and other parameters from the device tree. + endmenu config UEFI_CPER --- linux-3.13.0.orig/drivers/firmware/efi/arm-stub.c +++ linux-3.13.0/drivers/firmware/efi/arm-stub.c @@ -0,0 +1,291 @@ +/* + * EFI stub implementation that is shared by arm and arm64 architectures. + * This should be #included by the EFI stub implementation files. + * + * Copyright (C) 2013,2014 Linaro Limited + * Roy Franz + * + * This file is part of the Linux kernel, and is made available under the + * terms of the GNU General Public License version 2. + * + */ + +static int __init efi_secureboot_enabled(efi_system_table_t *sys_table_arg) +{ + static efi_guid_t const var_guid __initconst = EFI_GLOBAL_VARIABLE_GUID; + static efi_char16_t const var_name[] __initconst = { + 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 }; + + efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable; + unsigned long size = sizeof(u8); + efi_status_t status; + u8 val; + + status = f_getvar((efi_char16_t *)var_name, (efi_guid_t *)&var_guid, + NULL, &size, &val); + + switch (status) { + case EFI_SUCCESS: + return val; + case EFI_NOT_FOUND: + return 0; + default: + return 1; + } +} + +static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, + void *__image, void **__fh) +{ + efi_file_io_interface_t *io; + efi_loaded_image_t *image = __image; + efi_file_handle_t *fh; + efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID; + efi_status_t status; + void *handle = (void *)(unsigned long)image->device_handle; + + status = sys_table_arg->boottime->handle_protocol(handle, + &fs_proto, (void **)&io); + if (status != EFI_SUCCESS) { + efi_printk(sys_table_arg, "Failed to handle fs_proto\n"); + return status; + } + + status = io->open_volume(io, &fh); + if (status != EFI_SUCCESS) + efi_printk(sys_table_arg, "Failed to open volume\n"); + + *__fh = fh; + return status; +} +static efi_status_t efi_file_close(void *handle) +{ + efi_file_handle_t *fh = handle; + + return fh->close(handle); +} + +static efi_status_t +efi_file_read(void *handle, unsigned long *size, void *addr) +{ + efi_file_handle_t *fh = handle; + + return fh->read(handle, size, addr); +} + + +static efi_status_t +efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, + efi_char16_t *filename_16, void **handle, u64 *file_sz) +{ + efi_file_handle_t *h, *fh = __fh; + efi_file_info_t *info; + efi_status_t status; + efi_guid_t info_guid = EFI_FILE_INFO_ID; + unsigned long info_sz; + + status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, (u64)0); + if (status != EFI_SUCCESS) { + efi_printk(sys_table_arg, "Failed to open file: "); + efi_char16_printk(sys_table_arg, filename_16); + efi_printk(sys_table_arg, "\n"); + return status; + } + + *handle = h; + + info_sz = 0; + status = h->get_info(h, &info_guid, &info_sz, NULL); + if (status != EFI_BUFFER_TOO_SMALL) { + efi_printk(sys_table_arg, "Failed to get file info size\n"); + return status; + } + +grow: + status = sys_table_arg->boottime->allocate_pool(EFI_LOADER_DATA, + info_sz, (void **)&info); + if (status != EFI_SUCCESS) { + efi_printk(sys_table_arg, "Failed to alloc mem for file info\n"); + return status; + } + + status = h->get_info(h, &info_guid, &info_sz, + info); + if (status == EFI_BUFFER_TOO_SMALL) { + sys_table_arg->boottime->free_pool(info); + goto grow; + } + + *file_sz = info->file_size; + sys_table_arg->boottime->free_pool(info); + + if (status != EFI_SUCCESS) + efi_printk(sys_table_arg, "Failed to get initrd info\n"); + + return status; +} + +/* + * This function handles the architcture specific differences between arm and + * arm64 regarding where the kernel image must be loaded and any memory that + * must be reserved. On failure it is required to free all + * all allocations it has made. + */ +static efi_status_t handle_kernel_image(efi_system_table_t *sys_table, + unsigned long *image_addr, + unsigned long *image_size, + unsigned long *reserve_addr, + unsigned long *reserve_size, + unsigned long dram_base, + efi_loaded_image_t *image); +static unsigned long __init get_dram_base(efi_system_table_t *sys_table_arg) +{ + efi_status_t status; + unsigned long map_size; + unsigned long membase = EFI_ERROR; + struct efi_memory_map map; + efi_memory_desc_t *md; + + status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map, + &map_size, &map.desc_size, NULL, NULL); + if (status != EFI_SUCCESS) + return membase; + + map.map_end = map.map + map_size; + + for_each_efi_memory_desc(&map, md) + if (md->attribute & EFI_MEMORY_WB) + if (membase > md->phys_addr) + membase = md->phys_addr; + + efi_call_early(free_pool, map.map); + + return membase; +} + +/* + * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint + * that is described in the PE/COFF header. Most of the code is the same + * for both archictectures, with the arch-specific code provided in the + * handle_kernel_image() function. + */ +unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table, + unsigned long *image_addr) +{ + efi_loaded_image_t *image; + efi_status_t status; + unsigned long image_size = 0; + unsigned long dram_base; + /* addr/point and size pairs for memory management*/ + unsigned long initrd_addr; + u64 initrd_size = 0; + unsigned long fdt_addr = 0; /* Original DTB */ + u64 fdt_size = 0; /* We don't get size from configuration table */ + char *cmdline_ptr = NULL; + int cmdline_size = 0; + unsigned long new_fdt_addr; + efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID; + unsigned long reserve_addr = 0; + unsigned long reserve_size = 0; + + /* Check if we were booted by the EFI firmware */ + if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) + goto fail; + + pr_efi(sys_table, "Booting Linux Kernel...\n"); + + /* + * Get a handle to the loaded image protocol. This is used to get + * information about the running image, such as size and the command + * line. + */ + status = sys_table->boottime->handle_protocol(handle, + &loaded_image_proto, (void *)&image); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table, "Failed to get loaded image protocol\n"); + goto fail; + } + + dram_base = get_dram_base(sys_table); + if (dram_base == EFI_ERROR) { + pr_efi_err(sys_table, "Failed to find DRAM base\n"); + goto fail; + } + status = handle_kernel_image(sys_table, image_addr, &image_size, + &reserve_addr, + &reserve_size, + dram_base, image); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table, "Failed to relocate kernel\n"); + goto fail; + } + + /* + * Get the command line from EFI, using the LOADED_IMAGE + * protocol. We are going to copy the command line into the + * device tree, so this can be allocated anywhere. + */ + cmdline_ptr = efi_convert_cmdline_to_ascii(sys_table, image, &cmdline_size); + if (!cmdline_ptr) { + pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n"); + goto fail_free_image; + } + + /* + * Unauthenticated device tree data is a security hazard, so + * ignore 'dtb=' unless UEFI Secure Boot is disabled. + */ + if (efi_secureboot_enabled(sys_table)) { + pr_efi(sys_table, "UEFI Secure Boot is enabled.\n"); + } else { + status = handle_cmdline_files(sys_table, image, cmdline_ptr, + "dtb=", + ~0UL, (unsigned long *)&fdt_addr, + (unsigned long *)&fdt_size); + + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table, "Failed to load device tree!\n"); + goto fail_free_cmdline; + } + } + if (!fdt_addr) + /* Look for a device tree configuration table entry. */ + fdt_addr = (uintptr_t)get_fdt(sys_table); + + status = handle_cmdline_files(sys_table, image, cmdline_ptr, + "initrd=", dram_base + SZ_512M, + (unsigned long *)&initrd_addr, + (unsigned long *)&initrd_size); + if (status != EFI_SUCCESS) + pr_efi_err(sys_table, "Failed initrd from command line!\n"); + + new_fdt_addr = fdt_addr; + status = allocate_new_fdt_and_exit_boot(sys_table, handle, + &new_fdt_addr, dram_base + MAX_FDT_OFFSET, + initrd_addr, initrd_size, cmdline_ptr, + fdt_addr, fdt_size); + + /* + * If all went well, we need to return the FDT address to the + * calling function so it can be passed to kernel as part of + * the kernel boot protocol. + */ + if (status == EFI_SUCCESS) + return new_fdt_addr; + + pr_efi_err(sys_table, "Failed to update FDT and exit boot services\n"); + + efi_free(sys_table, initrd_size, initrd_addr); + efi_free(sys_table, fdt_size, fdt_addr); + +fail_free_cmdline: + efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr); + +fail_free_image: + efi_free(sys_table, image_size, *image_addr); + efi_free(sys_table, reserve_size, reserve_addr); +fail: + return EFI_ERROR; +} --- linux-3.13.0.orig/drivers/firmware/efi/efi-pstore.c +++ linux-3.13.0/drivers/firmware/efi/efi-pstore.c @@ -40,7 +40,7 @@ static inline u64 generic_id(unsigned long timestamp, unsigned int part, int count) { - return (timestamp * 100 + part) * 1000 + count; + return ((u64) timestamp * 100 + part) * 1000 + count; } static int efi_pstore_read_func(struct efivar_entry *entry, void *data) --- linux-3.13.0.orig/drivers/firmware/efi/efi-stub-helper.c +++ linux-3.13.0/drivers/firmware/efi/efi-stub-helper.c @@ -11,6 +11,10 @@ */ #define EFI_READ_CHUNK_SIZE (1024 * 1024) +/* error code which can't be mistaken for valid address */ +#define EFI_ERROR (~0UL) + + struct file_info { efi_file_handle_t *handle; u64 size; @@ -45,6 +49,9 @@ } } +#define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg) +#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) + static efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, efi_memory_desc_t **map, @@ -133,12 +140,12 @@ start = desc->phys_addr; end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); - if ((start + size) > end || (start + size) > max) - continue; - - if (end - size > max) + if (end > max) end = max; + if ((start + size) > end) + continue; + if (round_down(end - size, align) < start) continue; --- linux-3.13.0.orig/drivers/firmware/efi/efi.c +++ linux-3.13.0/drivers/firmware/efi/efi.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include struct efi __read_mostly efi = { @@ -272,3 +274,80 @@ early_iounmap(config_tables, efi.systab->nr_tables * sz); return 0; } + +#ifdef CONFIG_EFI_PARAMS_FROM_FDT + +#define UEFI_PARAM(name, prop, field) \ + { \ + { name }, \ + { prop }, \ + offsetof(struct efi_fdt_params, field), \ + FIELD_SIZEOF(struct efi_fdt_params, field) \ + } + +static __initdata struct { + const char name[32]; + const char propname[32]; + int offset; + int size; +} dt_params[] = { + UEFI_PARAM("System Table", "linux,uefi-system-table", system_table), + UEFI_PARAM("MemMap Address", "linux,uefi-mmap-start", mmap), + UEFI_PARAM("MemMap Size", "linux,uefi-mmap-size", mmap_size), + UEFI_PARAM("MemMap Desc. Size", "linux,uefi-mmap-desc-size", desc_size), + UEFI_PARAM("MemMap Desc. Version", "linux,uefi-mmap-desc-ver", desc_ver) +}; + +struct param_info { + int verbose; + void *params; +}; + +static int __init fdt_find_uefi_params(unsigned long node, const char *uname, + int depth, void *data) +{ + struct param_info *info = data; + void *prop, *dest; + unsigned long len; + u64 val; + int i; + + if (depth != 1 || + (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) + return 0; + + pr_info("Getting parameters from FDT:\n"); + + for (i = 0; i < ARRAY_SIZE(dt_params); i++) { + prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); + if (!prop) { + pr_err("Can't find %s in device tree!\n", + dt_params[i].name); + return 0; + } + dest = info->params + dt_params[i].offset; + + val = of_read_number(prop, len / sizeof(u32)); + + if (dt_params[i].size == sizeof(u32)) + *(u32 *)dest = val; + else + *(u64 *)dest = val; + + if (info->verbose) + pr_info(" %s: 0x%0*llx\n", dt_params[i].name, + dt_params[i].size * 2, val); + } + return 1; +} + +int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) +{ + struct param_info info; + + info.verbose = verbose; + info.params = params; + + return of_scan_flat_dt(fdt_find_uefi_params, &info); +} +#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ --- linux-3.13.0.orig/drivers/firmware/efi/fdt.c +++ linux-3.13.0/drivers/firmware/efi/fdt.c @@ -0,0 +1,285 @@ +/* + * FDT related Helper functions used by the EFI stub on multiple + * architectures. This should be #included by the EFI stub + * implementation files. + * + * Copyright 2013 Linaro Limited; author Roy Franz + * + * This file is part of the Linux kernel, and is made available + * under the terms of the GNU General Public License version 2. + * + */ + +static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, + unsigned long orig_fdt_size, + void *fdt, int new_fdt_size, char *cmdline_ptr, + u64 initrd_addr, u64 initrd_size, + efi_memory_desc_t *memory_map, + unsigned long map_size, unsigned long desc_size, + u32 desc_ver) +{ + int node, prev; + int status; + u32 fdt_val32; + u64 fdt_val64; + + /* + * Copy definition of linux_banner here. Since this code is + * built as part of the decompressor for ARM v7, pulling + * in version.c where linux_banner is defined for the + * kernel brings other kernel dependencies with it. + */ + const char linux_banner[] = + "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@" + LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n"; + + /* Do some checks on provided FDT, if it exists*/ + if (orig_fdt) { + if (fdt_check_header(orig_fdt)) { + pr_efi_err(sys_table, "Device Tree header not valid!\n"); + return EFI_LOAD_ERROR; + } + /* + * We don't get the size of the FDT if we get if from a + * configuration table. + */ + if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) { + pr_efi_err(sys_table, "Truncated device tree! foo!\n"); + return EFI_LOAD_ERROR; + } + } + + if (orig_fdt) + status = fdt_open_into(orig_fdt, fdt, new_fdt_size); + else + status = fdt_create_empty_tree(fdt, new_fdt_size); + + if (status != 0) + goto fdt_set_fail; + + /* + * Delete any memory nodes present. We must delete nodes which + * early_init_dt_scan_memory may try to use. + */ + prev = 0; + for (;;) { + const char *type, *name; + int len; + + node = fdt_next_node(fdt, prev, NULL); + if (node < 0) + break; + + type = fdt_getprop(fdt, node, "device_type", &len); + if (type && strncmp(type, "memory", len) == 0) { + fdt_del_node(fdt, node); + continue; + } + + prev = node; + } + + node = fdt_subnode_offset(fdt, 0, "chosen"); + if (node < 0) { + node = fdt_add_subnode(fdt, 0, "chosen"); + if (node < 0) { + status = node; /* node is error code when negative */ + goto fdt_set_fail; + } + } + + if ((cmdline_ptr != NULL) && (strlen(cmdline_ptr) > 0)) { + status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr, + strlen(cmdline_ptr) + 1); + if (status) + goto fdt_set_fail; + } + + /* Set initrd address/end in device tree, if present */ + if (initrd_size != 0) { + u64 initrd_image_end; + u64 initrd_image_start = cpu_to_fdt64(initrd_addr); + + status = fdt_setprop(fdt, node, "linux,initrd-start", + &initrd_image_start, sizeof(u64)); + if (status) + goto fdt_set_fail; + initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size); + status = fdt_setprop(fdt, node, "linux,initrd-end", + &initrd_image_end, sizeof(u64)); + if (status) + goto fdt_set_fail; + } + + /* Add FDT entries for EFI runtime services in chosen node. */ + node = fdt_subnode_offset(fdt, 0, "chosen"); + fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table); + status = fdt_setprop(fdt, node, "linux,uefi-system-table", + &fdt_val64, sizeof(fdt_val64)); + if (status) + goto fdt_set_fail; + + fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map); + status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", + &fdt_val64, sizeof(fdt_val64)); + if (status) + goto fdt_set_fail; + + fdt_val32 = cpu_to_fdt32(map_size); + status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", + &fdt_val32, sizeof(fdt_val32)); + if (status) + goto fdt_set_fail; + + fdt_val32 = cpu_to_fdt32(desc_size); + status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", + &fdt_val32, sizeof(fdt_val32)); + if (status) + goto fdt_set_fail; + + fdt_val32 = cpu_to_fdt32(desc_ver); + status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", + &fdt_val32, sizeof(fdt_val32)); + if (status) + goto fdt_set_fail; + + /* + * Add kernel version banner so stub/kernel match can be + * verified. + */ + status = fdt_setprop_string(fdt, node, "linux,uefi-stub-kern-ver", + linux_banner); + if (status) + goto fdt_set_fail; + + return EFI_SUCCESS; + +fdt_set_fail: + if (status == -FDT_ERR_NOSPACE) + return EFI_BUFFER_TOO_SMALL; + + return EFI_LOAD_ERROR; +} + +#ifndef EFI_FDT_ALIGN +#define EFI_FDT_ALIGN EFI_PAGE_SIZE +#endif + +/* + * Allocate memory for a new FDT, then add EFI, commandline, and + * initrd related fields to the FDT. This routine increases the + * FDT allocation size until the allocated memory is large + * enough. EFI allocations are in EFI_PAGE_SIZE granules, + * which are fixed at 4K bytes, so in most cases the first + * allocation should succeed. + * EFI boot services are exited at the end of this function. + * There must be no allocations between the get_memory_map() + * call and the exit_boot_services() call, so the exiting of + * boot services is very tightly tied to the creation of the FDT + * with the final memory map in it. + */ + +efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, + void *handle, + unsigned long *new_fdt_addr, + unsigned long max_addr, + u64 initrd_addr, u64 initrd_size, + char *cmdline_ptr, + unsigned long fdt_addr, + unsigned long fdt_size) +{ + unsigned long map_size, desc_size; + u32 desc_ver; + unsigned long mmap_key; + efi_memory_desc_t *memory_map; + unsigned long new_fdt_size; + efi_status_t status; + + /* + * Estimate size of new FDT, and allocate memory for it. We + * will allocate a bigger buffer if this ends up being too + * small, so a rough guess is OK here. + */ + new_fdt_size = fdt_size + EFI_PAGE_SIZE; + while (1) { + status = efi_high_alloc(sys_table, new_fdt_size, EFI_FDT_ALIGN, + new_fdt_addr, max_addr); + if (status != EFI_SUCCESS) { + pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n"); + goto fail; + } + + /* + * Now that we have done our final memory allocation (and free) + * we can get the memory map key needed for + * exit_boot_services(). + */ + status = efi_get_memory_map(sys_table, &memory_map, &map_size, + &desc_size, &desc_ver, &mmap_key); + if (status != EFI_SUCCESS) + goto fail_free_new_fdt; + + status = update_fdt(sys_table, + (void *)fdt_addr, fdt_size, + (void *)*new_fdt_addr, new_fdt_size, + cmdline_ptr, initrd_addr, initrd_size, + memory_map, map_size, desc_size, desc_ver); + + /* Succeeding the first time is the expected case. */ + if (status == EFI_SUCCESS) + break; + + if (status == EFI_BUFFER_TOO_SMALL) { + /* + * We need to allocate more space for the new + * device tree, so free existing buffer that is + * too small. Also free memory map, as we will need + * to get new one that reflects the free/alloc we do + * on the device tree buffer. + */ + efi_free(sys_table, new_fdt_size, *new_fdt_addr); + sys_table->boottime->free_pool(memory_map); + new_fdt_size += EFI_PAGE_SIZE; + } else { + pr_efi_err(sys_table, "Unable to constuct new device tree.\n"); + goto fail_free_mmap; + } + } + + /* Now we are ready to exit_boot_services.*/ + status = sys_table->boottime->exit_boot_services(handle, mmap_key); + + + if (status == EFI_SUCCESS) + return status; + + pr_efi_err(sys_table, "Exit boot services failed.\n"); + +fail_free_mmap: + sys_table->boottime->free_pool(memory_map); + +fail_free_new_fdt: + efi_free(sys_table, new_fdt_size, *new_fdt_addr); + +fail: + return EFI_LOAD_ERROR; +} + +static void *get_fdt(efi_system_table_t *sys_table) +{ + efi_guid_t fdt_guid = DEVICE_TREE_GUID; + efi_config_table_t *tables; + void *fdt; + int i; + + tables = (efi_config_table_t *) sys_table->tables; + fdt = NULL; + + for (i = 0; i < sys_table->nr_tables; i++) + if (efi_guidcmp(tables[i].guid, fdt_guid) == 0) { + fdt = (void *) tables[i].table; + break; + } + + return fdt; +} --- linux-3.13.0.orig/drivers/firmware/efi/vars.c +++ linux-3.13.0/drivers/firmware/efi/vars.c @@ -481,7 +481,7 @@ */ static void efivar_entry_list_del_unlock(struct efivar_entry *entry) { - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); list_del(&entry->list); spin_unlock_irq(&__efivars->lock); @@ -507,7 +507,7 @@ const struct efivar_operations *ops = __efivars->ops; efi_status_t status; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); status = ops->set_variable(entry->var.VariableName, &entry->var.VendorGuid, @@ -667,7 +667,7 @@ int strsize1, strsize2; bool found = false; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); list_for_each_entry_safe(entry, n, head, list) { strsize1 = ucs2_strsize(name, 1024); @@ -739,7 +739,7 @@ const struct efivar_operations *ops = __efivars->ops; efi_status_t status; - WARN_ON(!spin_is_locked(&__efivars->lock)); + lockdep_assert_held(&__efivars->lock); status = ops->get_variable(entry->var.VariableName, &entry->var.VendorGuid, --- linux-3.13.0.orig/drivers/gpio/Kconfig +++ linux-3.13.0/drivers/gpio/Kconfig @@ -121,6 +121,15 @@ help Say yes here to support basic platform_device memory-mapped GPIO controllers. +config GPIO_DWAPB + tristate "Synopsys DesignWare APB GPIO driver" + select GPIO_GENERIC + select GENERIC_IRQ_CHIP + depends on OF_GPIO + help + Say Y or M here to build support for the Synopsys DesignWare APB + GPIO block. + config GPIO_IT8761E tristate "IT8761E GPIO support" depends on X86 # unconditional access to IO space. --- linux-3.13.0.orig/drivers/gpio/Makefile +++ linux-3.13.0/drivers/gpio/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o +obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o obj-$(CONFIG_GPIO_EM) += gpio-em.o obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o obj-$(CONFIG_GPIO_F7188X) += gpio-f7188x.o --- linux-3.13.0.orig/drivers/gpio/gpio-dwapb.c +++ linux-3.13.0/drivers/gpio/gpio-dwapb.c @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2011 Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * All enquiries to support@picochip.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GPIO_SWPORTA_DR 0x00 +#define GPIO_SWPORTA_DDR 0x04 +#define GPIO_SWPORTB_DR 0x0c +#define GPIO_SWPORTB_DDR 0x10 +#define GPIO_SWPORTC_DR 0x18 +#define GPIO_SWPORTC_DDR 0x1c +#define GPIO_SWPORTD_DR 0x24 +#define GPIO_SWPORTD_DDR 0x28 +#define GPIO_INTEN 0x30 +#define GPIO_INTMASK 0x34 +#define GPIO_INTTYPE_LEVEL 0x38 +#define GPIO_INT_POLARITY 0x3c +#define GPIO_INTSTATUS 0x40 +#define GPIO_PORTA_EOI 0x4c +#define GPIO_EXT_PORTA 0x50 +#define GPIO_EXT_PORTB 0x54 +#define GPIO_EXT_PORTC 0x58 +#define GPIO_EXT_PORTD 0x5c + +#define DWAPB_MAX_PORTS 4 +#define GPIO_EXT_PORT_SIZE (GPIO_EXT_PORTB - GPIO_EXT_PORTA) +#define GPIO_SWPORT_DR_SIZE (GPIO_SWPORTB_DR - GPIO_SWPORTA_DR) +#define GPIO_SWPORT_DDR_SIZE (GPIO_SWPORTB_DDR - GPIO_SWPORTA_DDR) + +struct dwapb_gpio; + +struct dwapb_gpio_port { + struct bgpio_chip bgc; + bool is_registered; + struct dwapb_gpio *gpio; +}; + +struct dwapb_gpio { + struct device *dev; + void __iomem *regs; + struct dwapb_gpio_port *ports; + unsigned int nr_ports; + struct irq_domain *domain; +}; + +static int dwapb_gpio_to_irq(struct gpio_chip *gc, unsigned offset) +{ + struct bgpio_chip *bgc = to_bgpio_chip(gc); + struct dwapb_gpio_port *port = container_of(bgc, struct + dwapb_gpio_port, bgc); + struct dwapb_gpio *gpio = port->gpio; + + return irq_find_mapping(gpio->domain, offset); +} + +static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs) +{ + u32 v = readl(gpio->regs + GPIO_INT_POLARITY); + + if (gpio_get_value(gpio->ports[0].bgc.gc.base + offs)) + v &= ~BIT(offs); + else + v |= BIT(offs); + + writel(v, gpio->regs + GPIO_INT_POLARITY); +} + +static void dwapb_irq_handler(u32 irq, struct irq_desc *desc) +{ + struct dwapb_gpio *gpio = irq_get_handler_data(irq); + struct irq_chip *chip = irq_desc_get_chip(desc); + u32 irq_status = readl_relaxed(gpio->regs + GPIO_INTSTATUS); + + while (irq_status) { + int hwirq = fls(irq_status) - 1; + int gpio_irq = irq_find_mapping(gpio->domain, hwirq); + + generic_handle_irq(gpio_irq); + irq_status &= ~BIT(hwirq); + + if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK) + == IRQ_TYPE_EDGE_BOTH) + dwapb_toggle_trigger(gpio, hwirq); + } + + if (chip->irq_eoi) + chip->irq_eoi(irq_desc_get_irq_data(desc)); +} + +static void dwapb_irq_enable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct dwapb_gpio *gpio = igc->private; + struct bgpio_chip *bgc = &gpio->ports[0].bgc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&bgc->lock, flags); + val = readl(gpio->regs + GPIO_INTEN); + val |= BIT(d->hwirq); + writel(val, gpio->regs + GPIO_INTEN); + spin_unlock_irqrestore(&bgc->lock, flags); +} + +static void dwapb_irq_disable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct dwapb_gpio *gpio = igc->private; + struct bgpio_chip *bgc = &gpio->ports[0].bgc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&bgc->lock, flags); + val = readl(gpio->regs + GPIO_INTEN); + val &= ~BIT(d->hwirq); + writel(val, gpio->regs + GPIO_INTEN); + spin_unlock_irqrestore(&bgc->lock, flags); +} + +static unsigned int dwapb_irq_startup(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct dwapb_gpio *gpio = igc->private; + struct bgpio_chip *bgc = &gpio->ports[0].bgc; + + if (gpio_lock_as_irq(&bgc->gc, irqd_to_hwirq(d))) + dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n", + irqd_to_hwirq(d)); + dwapb_irq_enable(d); + return 0; +} + +static void dwapb_irq_shutdown(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct dwapb_gpio *gpio = igc->private; + struct bgpio_chip *bgc = &gpio->ports[0].bgc; + + dwapb_irq_disable(d); + gpio_unlock_as_irq(&bgc->gc, irqd_to_hwirq(d)); +} + +static int dwapb_irq_set_type(struct irq_data *d, u32 type) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct dwapb_gpio *gpio = igc->private; + struct bgpio_chip *bgc = &gpio->ports[0].bgc; + int bit = d->hwirq; + unsigned long level, polarity, flags; + + if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | + IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + return -EINVAL; + + spin_lock_irqsave(&bgc->lock, flags); + level = readl(gpio->regs + GPIO_INTTYPE_LEVEL); + polarity = readl(gpio->regs + GPIO_INT_POLARITY); + + switch (type) { + case IRQ_TYPE_EDGE_BOTH: + level |= BIT(bit); + dwapb_toggle_trigger(gpio, bit); + break; + case IRQ_TYPE_EDGE_RISING: + level |= BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_EDGE_FALLING: + level |= BIT(bit); + polarity &= ~BIT(bit); + break; + case IRQ_TYPE_LEVEL_HIGH: + level &= ~BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_LEVEL_LOW: + level &= ~BIT(bit); + polarity &= ~BIT(bit); + break; + } + + irq_setup_alt_chip(d, type); + + writel(level, gpio->regs + GPIO_INTTYPE_LEVEL); + writel(polarity, gpio->regs + GPIO_INT_POLARITY); + spin_unlock_irqrestore(&bgc->lock, flags); + + return 0; +} + +static void dwapb_configure_irqs(struct dwapb_gpio *gpio, + struct dwapb_gpio_port *port) +{ + struct gpio_chip *gc = &port->bgc.gc; + struct device_node *node = gc->of_node; + struct irq_chip_generic *irq_gc; + unsigned int hwirq, ngpio = gc->ngpio; + struct irq_chip_type *ct; + int err, irq, i; + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { + dev_warn(gpio->dev, "no irq for bank %s\n", + port->bgc.gc.of_node->full_name); + return; + } + + gpio->domain = irq_domain_add_linear(node, ngpio, + &irq_generic_chip_ops, gpio); + if (!gpio->domain) + return; + + err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, + "gpio-dwapb", handle_level_irq, + IRQ_NOREQUEST, 0, + IRQ_GC_INIT_NESTED_LOCK); + if (err) { + dev_info(gpio->dev, "irq_alloc_domain_generic_chips failed\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc = irq_get_domain_generic_chip(gpio->domain, 0); + if (!irq_gc) { + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc->reg_base = gpio->regs; + irq_gc->private = gpio; + + for (i = 0; i < 2; i++) { + ct = &irq_gc->chip_types[i]; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_set_type = dwapb_irq_set_type; + ct->chip.irq_enable = dwapb_irq_enable; + ct->chip.irq_disable = dwapb_irq_disable; + ct->regs.ack = GPIO_PORTA_EOI; + ct->regs.mask = GPIO_INTMASK; + ct->type = IRQ_TYPE_LEVEL_MASK; + } + + irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + irq_gc->chip_types[1].handler = handle_edge_irq; + + irq_set_chained_handler(irq, dwapb_irq_handler); + irq_set_handler_data(irq, gpio); + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_create_mapping(gpio->domain, hwirq); + + port->bgc.gc.to_irq = dwapb_gpio_to_irq; +} + +static void dwapb_irq_teardown(struct dwapb_gpio *gpio) +{ + struct dwapb_gpio_port *port = &gpio->ports[0]; + struct gpio_chip *gc = &port->bgc.gc; + unsigned int ngpio = gc->ngpio; + irq_hw_number_t hwirq; + + if (!gpio->domain) + return; + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq)); + + irq_domain_remove(gpio->domain); + gpio->domain = NULL; +} + +static int dwapb_gpio_add_port(struct dwapb_gpio *gpio, + struct device_node *port_np, + unsigned int offs) +{ + struct dwapb_gpio_port *port; + u32 port_idx, ngpio; + void __iomem *dat, *set, *dirout; + int err; + + if (of_property_read_u32(port_np, "reg", &port_idx) || + port_idx >= DWAPB_MAX_PORTS) { + dev_err(gpio->dev, "missing/invalid port index for %s\n", + port_np->full_name); + return -EINVAL; + } + + port = &gpio->ports[offs]; + port->gpio = gpio; + + if (of_property_read_u32(port_np, "snps,nr-gpios", &ngpio)) { + dev_info(gpio->dev, "failed to get number of gpios for %s\n", + port_np->full_name); + ngpio = 32; + } + + dat = gpio->regs + GPIO_EXT_PORTA + (port_idx * GPIO_EXT_PORT_SIZE); + set = gpio->regs + GPIO_SWPORTA_DR + (port_idx * GPIO_SWPORT_DR_SIZE); + dirout = gpio->regs + GPIO_SWPORTA_DDR + + (port_idx * GPIO_SWPORT_DDR_SIZE); + + err = bgpio_init(&port->bgc, gpio->dev, 4, dat, set, NULL, dirout, + NULL, false); + if (err) { + dev_err(gpio->dev, "failed to init gpio chip for %s\n", + port_np->full_name); + return err; + } + + port->bgc.gc.ngpio = ngpio; + port->bgc.gc.of_node = port_np; + + /* + * Only port A can provide interrupts in all configurations of the IP. + */ + if (port_idx == 0 && + of_property_read_bool(port_np, "interrupt-controller")) + dwapb_configure_irqs(gpio, port); + + err = gpiochip_add(&port->bgc.gc); + if (err) + dev_err(gpio->dev, "failed to register gpiochip for %s\n", + port_np->full_name); + else + port->is_registered = true; + + return err; +} + +static void dwapb_gpio_unregister(struct dwapb_gpio *gpio) +{ + unsigned int m; + + for (m = 0; m < gpio->nr_ports; ++m) + if (gpio->ports[m].is_registered) + WARN_ON(gpiochip_remove(&gpio->ports[m].bgc.gc)); +} + +static int dwapb_gpio_probe(struct platform_device *pdev) +{ + struct resource *res; + struct dwapb_gpio *gpio; + struct device_node *np; + int err; + unsigned int offs = 0; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + gpio->dev = &pdev->dev; + + gpio->nr_ports = of_get_child_count(pdev->dev.of_node); + if (!gpio->nr_ports) { + err = -EINVAL; + goto out_err; + } + gpio->ports = devm_kzalloc(&pdev->dev, gpio->nr_ports * + sizeof(*gpio->ports), GFP_KERNEL); + if (!gpio->ports) { + err = -ENOMEM; + goto out_err; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) { + err = PTR_ERR(gpio->regs); + goto out_err; + } + + for_each_child_of_node(pdev->dev.of_node, np) { + err = dwapb_gpio_add_port(gpio, np, offs++); + if (err) + goto out_unregister; + } + platform_set_drvdata(pdev, gpio); + + return 0; + +out_unregister: + dwapb_gpio_unregister(gpio); + dwapb_irq_teardown(gpio); + +out_err: + return err; +} + +static int dwapb_gpio_remove(struct platform_device *pdev) +{ + struct dwapb_gpio *gpio = platform_get_drvdata(pdev); + + dwapb_gpio_unregister(gpio); + dwapb_irq_teardown(gpio); + + return 0; +} + +static const struct of_device_id dwapb_of_match[] = { + { .compatible = "snps,dw-apb-gpio" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, dwapb_of_match); + +static struct platform_driver dwapb_gpio_driver = { + .driver = { + .name = "gpio-dwapb", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(dwapb_of_match), + }, + .probe = dwapb_gpio_probe, + .remove = dwapb_gpio_remove, +}; + +module_platform_driver(dwapb_gpio_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jamie Iles"); +MODULE_DESCRIPTION("Synopsys DesignWare APB GPIO driver"); --- linux-3.13.0.orig/drivers/gpio/gpio-mcp23s08.c +++ linux-3.13.0/drivers/gpio/gpio-mcp23s08.c @@ -656,9 +656,11 @@ dev_err(&spi->dev, "invalid spi-present-mask\n"); return -ENODEV; } - - for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) + for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { + if ((spi_present_mask & (1 << addr))) + chips++; pullups[addr] = 0; + } } else { type = spi_get_device_id(spi)->driver_data; pdata = dev_get_platdata(&spi->dev); @@ -681,12 +683,12 @@ pullups[addr] = pdata->chip[addr].pullups; } - if (!chips) - return -ENODEV; - base = pdata->base; } + if (!chips) + return -ENODEV; + data = kzalloc(sizeof *data + chips * sizeof(struct mcp23s08), GFP_KERNEL); if (!data) --- linux-3.13.0.orig/drivers/gpio/gpio-mxs.c +++ linux-3.13.0/drivers/gpio/gpio-mxs.c @@ -214,7 +214,8 @@ ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR; ct->regs.mask = PINCTRL_IRQEN(port); - irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0); + irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK, + IRQ_NOREQUEST, 0); } static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset) --- linux-3.13.0.orig/drivers/gpio/gpio-tps65912.c +++ linux-3.13.0/drivers/gpio/gpio-tps65912.c @@ -26,9 +26,12 @@ struct gpio_chip gpio_chip; }; +#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip) + static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) { - struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); + struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); + struct tps65912 *tps65912 = tps65912_gpio->tps65912; int val; val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); @@ -42,7 +45,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, int value) { - struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); + struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); + struct tps65912 *tps65912 = tps65912_gpio->tps65912; if (value) tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, @@ -55,7 +59,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, int value) { - struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); + struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); + struct tps65912 *tps65912 = tps65912_gpio->tps65912; /* Set the initial value */ tps65912_gpio_set(gc, offset, value); @@ -66,7 +71,8 @@ static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) { - struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); + struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); + struct tps65912 *tps65912 = tps65912_gpio->tps65912; return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, GPIO_CFG_MASK); --- linux-3.13.0.orig/drivers/gpio/gpiolib-acpi.c +++ linux-3.13.0/drivers/gpio/gpiolib-acpi.c @@ -94,7 +94,7 @@ * gpio pins have acpi event methods and assigns interrupt handlers that calls * the acpi event methods for those pins. */ -void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) +static void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; struct acpi_resource *res; @@ -192,7 +192,6 @@ irq); } } -EXPORT_SYMBOL(acpi_gpiochip_request_interrupts); /** * acpi_gpiochip_free_interrupts() - Free GPIO _EVT ACPI event interrupts. @@ -203,7 +202,7 @@ * The remaining ACPI event interrupts associated with the chip are freed * automatically. */ -void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) +static void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { acpi_handle handle; acpi_status status; @@ -230,7 +229,6 @@ acpi_detach_data(handle, acpi_gpio_evt_dh); kfree(evt_pins); } -EXPORT_SYMBOL(acpi_gpiochip_free_interrupts); struct acpi_gpio_lookup { struct acpi_gpio_info info; @@ -310,3 +308,13 @@ return lookup.desc ? lookup.desc : ERR_PTR(-ENODEV); } EXPORT_SYMBOL_GPL(acpi_get_gpiod_by_index); + +void acpi_gpiochip_add(struct gpio_chip *chip) +{ + acpi_gpiochip_request_interrupts(chip); +} + +void acpi_gpiochip_remove(struct gpio_chip *chip) +{ + acpi_gpiochip_free_interrupts(chip); +} --- linux-3.13.0.orig/drivers/gpio/gpiolib-of.c +++ linux-3.13.0/drivers/gpio/gpiolib-of.c @@ -44,8 +44,15 @@ return false; ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); - if (ret < 0) + if (ret < 0) { + /* We've found a gpio chip, but the translation failed. + * Store translation error in out_gpio. + * Return false to keep looking, as more than one gpio chip + * could be registered per of-node. + */ + gg_data->out_gpio = ERR_PTR(ret); return false; + } gg_data->out_gpio = gpio_to_desc(ret + gc->base); return true; --- linux-3.13.0.orig/drivers/gpio/gpiolib.c +++ linux-3.13.0/drivers/gpio/gpiolib.c @@ -16,6 +16,8 @@ #include #include +#include "gpiolib.h" + #define CREATE_TRACE_POINTS #include @@ -388,7 +390,7 @@ return status; } -static const DEVICE_ATTR(value, 0644, +static DEVICE_ATTR(value, 0644, gpio_value_show, gpio_value_store); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) @@ -613,18 +615,15 @@ return status ? : size; } -static const DEVICE_ATTR(active_low, 0644, +static DEVICE_ATTR(active_low, 0644, gpio_active_low_show, gpio_active_low_store); -static const struct attribute *gpio_attrs[] = { +static struct attribute *gpio_attrs[] = { &dev_attr_value.attr, &dev_attr_active_low.attr, NULL, }; - -static const struct attribute_group gpio_attr_group = { - .attrs = (struct attribute **) gpio_attrs, -}; +ATTRIBUTE_GROUPS(gpio); /* * /sys/class/gpio/gpiochipN/ @@ -660,16 +659,13 @@ } static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); -static const struct attribute *gpiochip_attrs[] = { +static struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, &dev_attr_ngpio.attr, NULL, }; - -static const struct attribute_group gpiochip_attr_group = { - .attrs = (struct attribute **) gpiochip_attrs, -}; +ATTRIBUTE_GROUPS(gpiochip); /* * /sys/class/gpio/export ... write-only @@ -824,18 +820,15 @@ if (desc->chip->names && desc->chip->names[offset]) ioname = desc->chip->names[offset]; - dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), - desc, ioname ? ioname : "gpio%u", - desc_to_gpio(desc)); + dev = device_create_with_groups(&gpio_class, desc->chip->dev, + MKDEV(0, 0), desc, gpio_groups, + ioname ? ioname : "gpio%u", + desc_to_gpio(desc)); if (IS_ERR(dev)) { status = PTR_ERR(dev); goto fail_unlock; } - status = sysfs_create_group(&dev->kobj, &gpio_attr_group); - if (status) - goto fail_unregister_device; - if (direction_may_change) { status = device_create_file(dev, &dev_attr_direction); if (status) @@ -846,13 +839,15 @@ !test_bit(FLAG_IS_OUT, &desc->flags))) { status = device_create_file(dev, &dev_attr_edge); if (status) - goto fail_unregister_device; + goto fail_remove_attr_direction; } set_bit(FLAG_EXPORT, &desc->flags); mutex_unlock(&sysfs_lock); return 0; +fail_remove_attr_direction: + device_remove_file(dev, &dev_attr_direction); fail_unregister_device: device_unregister(dev); fail_unlock: @@ -898,6 +893,7 @@ if (tdev != NULL) { status = sysfs_create_link(&dev->kobj, &tdev->kobj, name); + put_device(tdev); } else { status = -ENODEV; } @@ -946,7 +942,7 @@ } status = sysfs_set_active_low(desc, dev, value); - + put_device(dev); unlock: mutex_unlock(&sysfs_lock); @@ -989,6 +985,8 @@ mutex_unlock(&sysfs_lock); if (dev) { + device_remove_file(dev, &dev_attr_edge); + device_remove_file(dev, &dev_attr_direction); device_unregister(dev); put_device(dev); } @@ -1014,13 +1012,13 @@ /* use chip->base for the ID; it's already known to be unique */ mutex_lock(&sysfs_lock); - dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, - "gpiochip%d", chip->base); - if (!IS_ERR(dev)) { - status = sysfs_create_group(&dev->kobj, - &gpiochip_attr_group); - } else + dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0), + chip, gpiochip_groups, + "gpiochip%d", chip->base); + if (IS_ERR(dev)) status = PTR_ERR(dev); + else + status = 0; chip->exported = (status == 0); mutex_unlock(&sysfs_lock); @@ -1208,18 +1206,22 @@ spin_unlock_irqrestore(&gpio_lock, flags); + if (status) + goto fail; + #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&chip->pin_ranges); #endif of_gpiochip_add(chip); - - if (status) - goto fail; + acpi_gpiochip_add(chip); status = gpiochip_export(chip); - if (status) + if (status) { + acpi_gpiochip_remove(chip); + of_gpiochip_remove(chip); goto fail; + } pr_debug("gpiochip_add: registered GPIOs %d to %d on device: %s\n", chip->base, chip->base + chip->ngpio - 1, @@ -1250,11 +1252,11 @@ int status = 0; unsigned id; - spin_lock_irqsave(&gpio_lock, flags); - gpiochip_remove_pin_ranges(chip); of_gpiochip_remove(chip); + acpi_gpiochip_remove(chip); + spin_lock_irqsave(&gpio_lock, flags); for (id = 0; id < chip->ngpio; id++) { if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) { status = -EBUSY; --- linux-3.13.0.orig/drivers/gpio/gpiolib.h +++ linux-3.13.0/drivers/gpio/gpiolib.h @@ -0,0 +1,23 @@ +/* + * Internal GPIO functions. + * + * Copyright (C) 2013, Intel Corporation + * Author: Mika Westerberg + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef GPIOLIB_H +#define GPIOLIB_H + +#ifdef CONFIG_ACPI +void acpi_gpiochip_add(struct gpio_chip *chip); +void acpi_gpiochip_remove(struct gpio_chip *chip); +#else +static inline void acpi_gpiochip_add(struct gpio_chip *chip) { } +static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { } +#endif + +#endif /* GPIOLIB_H */ --- linux-3.13.0.orig/drivers/gpu/drm/armada/armada_crtc.c +++ linux-3.13.0/drivers/gpu/drm/armada/armada_crtc.c @@ -678,6 +678,7 @@ base + LCD_SPU_SRAM_WRDAT); writel_relaxed(addr | SRAM_WRITE, base + LCD_SPU_SRAM_CTRL); + readl_relaxed(base + LCD_SPU_HWC_OVSA_HPXL_VLN); addr += 1; if ((addr & 0x00ff) == 0) addr += 0xf00; --- linux-3.13.0.orig/drivers/gpu/drm/armada/armada_drv.c +++ linux-3.13.0/drivers/gpu/drm/armada/armada_drv.c @@ -68,15 +68,7 @@ { struct armada_private *priv = dev->dev_private; - /* - * Yes, we really must jump through these hoops just to store a - * _pointer_ to something into the kfifo. This is utterly insane - * and idiotic, because it kfifo requires the _data_ pointed to by - * the pointer const, not the pointer itself. Not only that, but - * you have to pass a pointer _to_ the pointer you want stored. - */ - const struct drm_framebuffer *silly_api_alert = fb; - WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert)); + WARN_ON(!kfifo_put(&priv->fb_unref, fb)); schedule_work(&priv->fb_unref_work); } --- linux-3.13.0.orig/drivers/gpu/drm/ast/ast_fb.c +++ linux-3.13.0/drivers/gpu/drm/ast/ast_fb.c @@ -65,7 +65,7 @@ * then the BO is being moved and we should * store up the damage until later. */ - if (!in_interrupt()) + if (drm_can_sleep()) ret = ast_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) --- linux-3.13.0.orig/drivers/gpu/drm/ast/ast_main.c +++ linux-3.13.0/drivers/gpu/drm/ast/ast_main.c @@ -66,6 +66,7 @@ static int ast_detect_chip(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; + ast_open_key(ast); if (dev->pdev->device == PCI_CHIP_AST1180) { ast->chip = AST1100; @@ -100,7 +101,7 @@ } ast->vga2_clone = false; } else { - ast->chip = 2000; + ast->chip = AST2000; DRM_INFO("AST 2000 detected\n"); } } --- linux-3.13.0.orig/drivers/gpu/drm/ast/ast_mode.c +++ linux-3.13.0/drivers/gpu/drm/ast/ast_mode.c @@ -1012,8 +1012,8 @@ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0; data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); - data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4); - data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4); + data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4); + data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4); writel(data32.ul, dstxor); csum += data32.ul; --- linux-3.13.0.orig/drivers/gpu/drm/cirrus/cirrus_drv.c +++ linux-3.13.0/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -31,6 +31,8 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0, 0, 0 }, + { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN, + 0x0001, 0, 0, 0 }, {0,} }; --- linux-3.13.0.orig/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ linux-3.13.0/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -39,7 +39,7 @@ * then the BO is being moved and we should * store up the damage until later. */ - if (!in_interrupt()) + if (drm_can_sleep()) ret = cirrus_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) --- linux-3.13.0.orig/drivers/gpu/drm/cirrus/cirrus_mode.c +++ linux-3.13.0/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -273,8 +273,8 @@ sr07 |= 0x11; break; case 16: - sr07 |= 0xc1; - hdr = 0xc0; + sr07 |= 0x17; + hdr = 0xc1; break; case 24: sr07 |= 0x15; --- linux-3.13.0.orig/drivers/gpu/drm/drm_crtc.c +++ linux-3.13.0/drivers/gpu/drm/drm_crtc.c @@ -215,6 +215,16 @@ { DRM_MODE_ENCODER_DSI, "DSI" }, }; +static const struct drm_prop_enum_list drm_subpixel_enum_list[] = +{ + { SubPixelUnknown, "Unknown" }, + { SubPixelHorizontalRGB, "Horizontal RGB" }, + { SubPixelHorizontalBGR, "Horizontal BGR" }, + { SubPixelVerticalRGB, "Vertical RGB" }, + { SubPixelVerticalBGR, "Vertical BGR" }, + { SubPixelNone, "None" }, +}; + void drm_connector_ida_init(void) { int i; @@ -264,6 +274,19 @@ } EXPORT_SYMBOL(drm_get_connector_status_name); +/** + * drm_get_subpixel_order_name - return a string for a given subpixel enum + * @order: enum of subpixel_order + * + * Note you could abuse this and return something out of bounds, but that + * would be a caller error. No unscrubbed user data should make it here. + */ +const char *drm_get_subpixel_order_name(enum subpixel_order order) +{ + return drm_subpixel_enum_list[order].name; +} +EXPORT_SYMBOL(drm_get_subpixel_order_name); + static char printable_char(int c) { return isascii(c) && isprint(c) ? c : '?'; @@ -675,6 +698,29 @@ EXPORT_SYMBOL(drm_crtc_cleanup); /** + * drm_crtc_index - find the index of a registered CRTC + * @crtc: CRTC to find index for + * + * Given a registered CRTC, return the index of that CRTC within a DRM + * device's list of CRTCs. + */ +unsigned int drm_crtc_index(struct drm_crtc *crtc) +{ + unsigned int index = 0; + struct drm_crtc *tmp; + + list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) { + if (tmp == crtc) + return index; + + index++; + } + + BUG(); +} +EXPORT_SYMBOL(drm_crtc_index); + +/** * drm_mode_probed_add - add a mode to a connector's probed mode list * @connector: connector the new mode * @mode: mode data --- linux-3.13.0.orig/drivers/gpu/drm/drm_crtc_helper.c +++ linux-3.13.0/drivers/gpu/drm/drm_crtc_helper.c @@ -324,35 +324,6 @@ } EXPORT_SYMBOL(drm_helper_disable_unused_functions); -/** - * drm_encoder_crtc_ok - can a given crtc drive a given encoder? - * @encoder: encoder to test - * @crtc: crtc to test - * - * Return false if @encoder can't be driven by @crtc, true otherwise. - */ -static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, - struct drm_crtc *crtc) -{ - struct drm_device *dev; - struct drm_crtc *tmp; - int crtc_mask = 1; - - WARN(!crtc, "checking null crtc?\n"); - - dev = crtc->dev; - - list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) { - if (tmp == crtc) - break; - crtc_mask <<= 1; - } - - if (encoder->possible_crtcs & crtc_mask) - return true; - return false; -} - /* * Check the CRTC we're going to map each output to vs. its current * CRTC. If they don't match, we have to disable the output and the CRTC @@ -536,7 +507,7 @@ * are later needed by vblank and swap-completion * timestamping. They are derived from true hwmode. */ - drm_calc_timestamping_constants(crtc); + drm_calc_timestamping_constants(crtc, &crtc->hwmode); /* FIXME: add subpixel order */ done: --- linux-3.13.0.orig/drivers/gpu/drm/drm_dp_helper.c +++ linux-3.13.0/drivers/gpu/drm/drm_dp_helper.c @@ -346,3 +346,399 @@ } } EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); + +/** + * DOC: dp helpers + * + * The DisplayPort AUX channel is an abstraction to allow generic, driver- + * independent access to AUX functionality. Drivers can take advantage of + * this by filling in the fields of the drm_dp_aux structure. + * + * Transactions are described using a hardware-independent drm_dp_aux_msg + * structure, which is passed into a driver's .transfer() implementation. + * Both native and I2C-over-AUX transactions are supported. + */ + +static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, + unsigned int offset, void *buffer, size_t size) +{ + struct drm_dp_aux_msg msg; + unsigned int retry; + int err; + + memset(&msg, 0, sizeof(msg)); + msg.address = offset; + msg.request = request; + msg.buffer = buffer; + msg.size = size; + + /* + * The specification doesn't give any recommendation on how often to + * retry native transactions, so retry 7 times like for I2C-over-AUX + * transactions. + */ + for (retry = 0; retry < 7; retry++) { + err = aux->transfer(aux, &msg); + if (err < 0) { + if (err == -EBUSY) + continue; + + return err; + } + + + switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { + case DP_AUX_NATIVE_REPLY_ACK: + if (err < size) + return -EPROTO; + return err; + + case DP_AUX_NATIVE_REPLY_NACK: + return -EIO; + + case DP_AUX_NATIVE_REPLY_DEFER: + usleep_range(400, 500); + break; + } + } + + DRM_DEBUG_KMS("too many retries, giving up\n"); + return -EIO; +} + +/** + * drm_dp_dpcd_read() - read a series of bytes from the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the (first) register to read + * @buffer: buffer to store the register values + * @size: number of bytes in @buffer + * + * Returns the number of bytes transferred on success, or a negative error + * code on failure. -EIO is returned if the request was NAKed by the sink or + * if the retry count was exceeded. If not all bytes were transferred, this + * function returns -EPROTO. Errors from the underlying AUX channel transfer + * function, with the exception of -EBUSY (which causes the transaction to + * be retried), are propagated to the caller. + */ +ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size) +{ + return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer, + size); +} +EXPORT_SYMBOL(drm_dp_dpcd_read); + +/** + * drm_dp_dpcd_write() - write a series of bytes to the DPCD + * @aux: DisplayPort AUX channel + * @offset: address of the (first) register to write + * @buffer: buffer containing the values to write + * @size: number of bytes in @buffer + * + * Returns the number of bytes transferred on success, or a negative error + * code on failure. -EIO is returned if the request was NAKed by the sink or + * if the retry count was exceeded. If not all bytes were transferred, this + * function returns -EPROTO. Errors from the underlying AUX channel transfer + * function, with the exception of -EBUSY (which causes the transaction to + * be retried), are propagated to the caller. + */ +ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, + void *buffer, size_t size) +{ + return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, + size); +} +EXPORT_SYMBOL(drm_dp_dpcd_write); + +/** + * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207) + * @aux: DisplayPort AUX channel + * @status: buffer to store the link status in (must be at least 6 bytes) + * + * Returns the number of bytes transferred on success or a negative error + * code on failure. + */ +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, + u8 status[DP_LINK_STATUS_SIZE]) +{ + return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status, + DP_LINK_STATUS_SIZE); +} +EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); + +/** + * drm_dp_link_probe() - probe a DisplayPort link for capabilities + * @aux: DisplayPort AUX channel + * @link: pointer to structure in which to return link capabilities + * + * The structure filled in by this function can usually be passed directly + * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and + * configure the link based on the link's capabilities. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[3]; + int err; + + memset(link, 0, sizeof(*link)); + + err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); + if (err < 0) + return err; + + link->revision = values[0]; + link->rate = drm_dp_bw_code_to_link_rate(values[1]); + link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; + + if (values[2] & DP_ENHANCED_FRAME_CAP) + link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_probe); + +/** + * drm_dp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_up); + +/** + * drm_dp_link_configure() - configure a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[2]; + int err; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_configure); + +/* + * I2C-over-AUX implementation + */ + +static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +/* + * Transfer a single I2C-over-AUX message and handle various error conditions, + * retrying the transaction as appropriate. + */ +static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + unsigned int retry; + int err; + + /* + * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device + * is required to retry at least seven times upon receiving AUX_DEFER + * before giving up the AUX transaction. + */ + for (retry = 0; retry < 7; retry++) { + err = aux->transfer(aux, msg); + if (err < 0) { + if (err == -EBUSY) + continue; + + DRM_DEBUG_KMS("transaction failed: %d\n", err); + return err; + } + + + switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) { + case DP_AUX_NATIVE_REPLY_ACK: + /* + * For I2C-over-AUX transactions this isn't enough, we + * need to check for the I2C ACK reply. + */ + break; + + case DP_AUX_NATIVE_REPLY_NACK: + DRM_DEBUG_KMS("native nack\n"); + return -EREMOTEIO; + + case DP_AUX_NATIVE_REPLY_DEFER: + DRM_DEBUG_KMS("native defer"); + /* + * We could check for I2C bit rate capabilities and if + * available adjust this interval. We could also be + * more careful with DP-to-legacy adapters where a + * long legacy cable may force very low I2C bit rates. + * + * For now just defer for long enough to hopefully be + * safe for all use-cases. + */ + usleep_range(500, 600); + continue; + + default: + DRM_ERROR("invalid native reply %#04x\n", msg->reply); + return -EREMOTEIO; + } + + switch (msg->reply & DP_AUX_I2C_REPLY_MASK) { + case DP_AUX_I2C_REPLY_ACK: + /* + * Both native ACK and I2C ACK replies received. We + * can assume the transfer was successful. + */ + if (err < msg->size) + return -EPROTO; + return 0; + + case DP_AUX_I2C_REPLY_NACK: + DRM_DEBUG_KMS("I2C nack\n"); + return -EREMOTEIO; + + case DP_AUX_I2C_REPLY_DEFER: + DRM_DEBUG_KMS("I2C defer\n"); + usleep_range(400, 500); + continue; + + default: + DRM_ERROR("invalid I2C reply %#04x\n", msg->reply); + return -EREMOTEIO; + } + } + + DRM_DEBUG_KMS("too many retries, giving up\n"); + return -EREMOTEIO; +} + +static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, + int num) +{ + struct drm_dp_aux *aux = adapter->algo_data; + unsigned int i, j; + + for (i = 0; i < num; i++) { + struct drm_dp_aux_msg msg; + int err; + + /* + * Many hardware implementations support FIFOs larger than a + * single byte, but it has been empirically determined that + * transferring data in larger chunks can actually lead to + * decreased performance. Therefore each message is simply + * transferred byte-by-byte. + */ + for (j = 0; j < msgs[i].len; j++) { + memset(&msg, 0, sizeof(msg)); + msg.address = msgs[i].addr; + + msg.request = (msgs[i].flags & I2C_M_RD) ? + DP_AUX_I2C_READ : + DP_AUX_I2C_WRITE; + + /* + * All messages except the last one are middle-of- + * transfer messages. + */ + if ((i < num - 1) || (j < msgs[i].len - 1)) + msg.request |= DP_AUX_I2C_MOT; + + msg.buffer = msgs[i].buf + j; + msg.size = 1; + + err = drm_dp_i2c_do_msg(aux, &msg); + if (err < 0) + return err; + } + } + + return num; +} + +static const struct i2c_algorithm drm_dp_i2c_algo = { + .functionality = drm_dp_i2c_functionality, + .master_xfer = drm_dp_i2c_xfer, +}; + +/** + * drm_dp_aux_register_i2c_bus() - register an I2C adapter for I2C-over-AUX + * @aux: DisplayPort AUX channel + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_aux_register_i2c_bus(struct drm_dp_aux *aux) +{ + aux->ddc.algo = &drm_dp_i2c_algo; + aux->ddc.algo_data = aux; + aux->ddc.retries = 3; + + aux->ddc.class = I2C_CLASS_DDC; + aux->ddc.owner = THIS_MODULE; + aux->ddc.dev.parent = aux->dev; + aux->ddc.dev.of_node = aux->dev->of_node; + + strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), + sizeof(aux->ddc.name)); + + return i2c_add_adapter(&aux->ddc); +} +EXPORT_SYMBOL(drm_dp_aux_register_i2c_bus); + +/** + * drm_dp_aux_unregister_i2c_bus() - unregister an I2C-over-AUX adapter + * @aux: DisplayPort AUX channel + */ +void drm_dp_aux_unregister_i2c_bus(struct drm_dp_aux *aux) +{ + i2c_del_adapter(&aux->ddc); +} +EXPORT_SYMBOL(drm_dp_aux_unregister_i2c_bus); --- linux-3.13.0.orig/drivers/gpu/drm/drm_fb_helper.c +++ linux-3.13.0/drivers/gpu/drm/drm_fb_helper.c @@ -1130,7 +1130,7 @@ return count; } -static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) +struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height) { struct drm_display_mode *mode; @@ -1143,6 +1143,7 @@ } return NULL; } +EXPORT_SYMBOL(drm_has_preferred_mode); static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector) { @@ -1151,7 +1152,7 @@ return cmdline_mode->specified; } -static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, +struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, int width, int height) { struct drm_cmdline_mode *cmdline_mode; @@ -1191,6 +1192,7 @@ list_add(&mode->head, &fb_helper_conn->connector->modes); return mode; } +EXPORT_SYMBOL(drm_pick_cmdline_mode); static bool drm_connector_enabled(struct drm_connector *connector, bool strict) { --- linux-3.13.0.orig/drivers/gpu/drm/drm_gem.c +++ linux-3.13.0/drivers/gpu/drm/drm_gem.c @@ -129,11 +129,12 @@ { struct file *filp; + drm_gem_private_object_init(dev, obj, size); + filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); if (IS_ERR(filp)) return PTR_ERR(filp); - drm_gem_private_object_init(dev, obj, size); obj->filp = filp; return 0; --- linux-3.13.0.orig/drivers/gpu/drm/drm_ioctl.c +++ linux-3.13.0/drivers/gpu/drm/drm_ioctl.c @@ -296,6 +296,18 @@ case DRM_CAP_ASYNC_PAGE_FLIP: req->value = dev->mode_config.async_page_flip; break; + case DRM_CAP_CURSOR_WIDTH: + if (dev->mode_config.cursor_width) + req->value = dev->mode_config.cursor_width; + else + req->value = 64; + break; + case DRM_CAP_CURSOR_HEIGHT: + if (dev->mode_config.cursor_height) + req->value = dev->mode_config.cursor_height; + else + req->value = 64; + break; default: return -EINVAL; } --- linux-3.13.0.orig/drivers/gpu/drm/drm_irq.c +++ linux-3.13.0/drivers/gpu/drm/drm_irq.c @@ -445,20 +445,22 @@ * adjustments into account. * * @crtc drm_crtc whose timestamp constants should be updated. + * @mode display mode containing the scanout timings * */ -void drm_calc_timestamping_constants(struct drm_crtc *crtc) +void drm_calc_timestamping_constants(struct drm_crtc *crtc, + const struct drm_display_mode *mode) { s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; u64 dotclock; /* Dot clock in Hz: */ - dotclock = (u64) crtc->hwmode.clock * 1000; + dotclock = (u64) mode->clock * 1000; /* Fields of interlaced scanout modes are only half a frame duration. * Double the dotclock to get half the frame-/line-/pixelduration. */ - if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) + if (mode->flags & DRM_MODE_FLAG_INTERLACE) dotclock *= 2; /* Valid dotclock? */ @@ -469,10 +471,9 @@ * nanoseconds: */ pixeldur_ns = (s64) div64_u64(1000000000, dotclock); - linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal * + linedur_ns = (s64) div64_u64(((u64) mode->crtc_htotal * 1000000000), dotclock); - frame_size = crtc->hwmode.crtc_htotal * - crtc->hwmode.crtc_vtotal; + frame_size = mode->crtc_htotal * mode->crtc_vtotal; framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000, dotclock); } else @@ -484,8 +485,8 @@ crtc->framedur_ns = framedur_ns; DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", - crtc->base.id, crtc->hwmode.crtc_htotal, - crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay); + crtc->base.id, mode->crtc_htotal, + mode->crtc_vtotal, mode->crtc_vdisplay); DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", crtc->base.id, (int) dotclock/1000, (int) framedur_ns, (int) linedur_ns, (int) pixeldur_ns); @@ -521,6 +522,7 @@ * 0 = Default. * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. * @refcrtc: drm_crtc* of crtc which defines scanout timing. + * @mode: mode which defines the scanout timings * * Returns negative value on error, failure or if not supported in current * video mode: @@ -540,11 +542,11 @@ int *max_error, struct timeval *vblank_time, unsigned flags, - struct drm_crtc *refcrtc) + const struct drm_crtc *refcrtc, + const struct drm_display_mode *mode) { ktime_t stime, etime, mono_time_offset; struct timeval tv_etime; - struct drm_display_mode *mode; int vbl_status, vtotal, vdisplay; int vpos, hpos, i; s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; @@ -561,7 +563,6 @@ return -EIO; } - mode = &refcrtc->hwmode; vtotal = mode->crtc_vtotal; vdisplay = mode->crtc_vdisplay; @@ -590,7 +591,7 @@ * Get vertical and horizontal scanout position vpos, hpos, * and bounding timestamps stime, etime, pre/post query. */ - vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, + vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos, &hpos, &stime, &etime); /* --- linux-3.13.0.orig/drivers/gpu/drm/gma500/gma_display.c +++ linux-3.13.0/drivers/gpu/drm/gma500/gma_display.c @@ -349,6 +349,7 @@ /* If we didn't get a handle then turn the cursor off */ if (!handle) { temp = CURSOR_MODE_DISABLE; + mutex_lock(&dev->struct_mutex); if (gma_power_begin(dev, false)) { REG_WRITE(control, temp); @@ -365,6 +366,7 @@ gma_crtc->cursor_obj = NULL; } + mutex_unlock(&dev->struct_mutex); return 0; } @@ -374,9 +376,12 @@ return -EINVAL; } + mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file_priv, handle); - if (!obj) - return -ENOENT; + if (!obj) { + ret = -ENOENT; + goto unlock; + } if (obj->size < width * height * 4) { dev_dbg(dev->dev, "Buffer is too small\n"); @@ -440,10 +445,13 @@ } gma_crtc->cursor_obj = obj; +unlock: + mutex_unlock(&dev->struct_mutex); return ret; unref_cursor: drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return ret; } --- linux-3.13.0.orig/drivers/gpu/drm/i915/Kconfig +++ linux-3.13.0/drivers/gpu/drm/i915/Kconfig @@ -35,12 +35,11 @@ config DRM_I915_KMS bool "Enable modesetting on intel by default" depends on DRM_I915 + default y help - Choose this option if you want kernel modesetting enabled by default, - and you have a new enough userspace to support this. Running old - userspaces with this enabled will cause pain. Note that this causes - the driver to bind to PCI devices, which precludes loading things - like intelfb. + Choose this option if you want kernel modesetting enabled by default. + + If in doubt, say "Y". config DRM_I915_FBDEV bool "Enable legacy fbdev support for the modesettting intel driver" @@ -55,9 +54,12 @@ support. Note that this support also provide the linux console support on top of the intel modesetting driver. + If in doubt, say "Y". + config DRM_I915_PRELIMINARY_HW_SUPPORT bool "Enable preliminary support for prerelease Intel hardware by default" depends on DRM_I915 + default n help Choose this option if you have prerelease Intel hardware and want the i915 driver to support it by default. You can enable such support at @@ -65,3 +67,15 @@ option changes the default for that module option. If in doubt, say "N". + +config DRM_I915_UMS + bool "Enable userspace modesetting on Intel hardware (DEPRECATED)" + depends on DRM_I915 + default n + help + Choose this option if you still need userspace modesetting. + + Userspace modesetting is deprecated for quite some time now, so + enable this only if you have ancient versions of the DDX drivers. + + If in doubt, say "N". --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_debugfs.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_debugfs.c @@ -408,7 +408,9 @@ struct file_stats stats; memset(&stats, 0, sizeof(stats)); + spin_lock(&file->table_lock); idr_for_each(&file->object_idr, per_file_stats, &stats); + spin_unlock(&file->table_lock); seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", get_pid_task(file->pid, PIDTYPE_PID)->comm, stats.count, --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_dma.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_dma.c @@ -1679,6 +1679,7 @@ intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); + pm_qos_remove_request(&dev_priv->pm_qos); destroy_workqueue(dev_priv->wq); out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); @@ -1910,6 +1911,7 @@ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_drv.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_drv.c @@ -383,11 +383,7 @@ INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ INTEL_HSW_D_IDS(&intel_haswell_d_info), \ - INTEL_HSW_M_IDS(&intel_haswell_m_info), \ - INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ - INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ - INTEL_BDW_M_IDS(&intel_broadwell_m_info), \ - INTEL_BDW_D_IDS(&intel_broadwell_d_info) + INTEL_HSW_M_IDS(&intel_haswell_m_info) static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_PCI_IDS, @@ -401,7 +397,7 @@ void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct pci_dev *pch; + struct pci_dev *pch = NULL; /* In all current cases, num_pipes is equivalent to the PCH_NOP setting * (which really amounts to a PCH but no South Display). @@ -422,12 +418,9 @@ * all the ISA bridge devices and check for the first match, instead * of only checking the first one. */ - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); - while (pch) { - struct pci_dev *curr = pch; + while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { - unsigned short id; - id = pch->device & INTEL_PCH_DEVICE_ID_MASK; + unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; dev_priv->pch_id = id; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { @@ -446,31 +439,23 @@ } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev)); - WARN_ON(IS_ULT(dev)); - } else if (IS_BROADWELL(dev)) { - dev_priv->pch_type = PCH_LPT; - dev_priv->pch_id = - INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; - DRM_DEBUG_KMS("This is Broadwell, assuming " - "LynxPoint LP PCH\n"); + WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); + WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev)); - WARN_ON(!IS_ULT(dev)); - } else { - goto check_next; - } - pci_dev_put(pch); + WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); + WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); + } else + continue; + break; } -check_next: - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); - pci_dev_put(curr); } if (!pch) - DRM_DEBUG_KMS("No PCH found?\n"); + DRM_DEBUG_KMS("No PCH found.\n"); + + pci_dev_put(pch); } bool i915_semaphore_is_enabled(struct drm_device *dev) @@ -615,15 +600,20 @@ drm_helper_hpd_irq_event(dev); } +static int i915_drm_thaw_early(struct drm_device *dev) +{ + intel_uncore_early_sanitize(dev); + intel_uncore_sanitize(dev); + intel_power_domains_init_hw(dev); + + return 0; +} + static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; - intel_uncore_early_sanitize(dev); - - intel_uncore_sanitize(dev); - if (drm_core_check_feature(dev, DRIVER_MODESET) && restore_gtt_mappings) { mutex_lock(&dev->struct_mutex); @@ -631,8 +621,6 @@ mutex_unlock(&dev->struct_mutex); } - intel_power_domains_init_hw(dev); - i915_restore_state(dev); intel_opregion_setup(dev); @@ -699,19 +687,33 @@ return __i915_drm_thaw(dev, true); } -int i915_resume(struct drm_device *dev) +static int i915_resume_early(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + /* + * We have a resume ordering issue with the snd-hda driver also + * requiring our device to be power up. Due to the lack of a + * parent/child relationship we currently solve this with an early + * resume hook. + * + * FIXME: This should be solved with a special hdmi sink device or + * similar so that power domains can be employed. + */ if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); + return i915_drm_thaw_early(dev); +} + +int i915_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + /* * Platforms with opregion should have sane BIOS, older ones (gen3 and * earlier) need to restore the GTT mappings since the BIOS might clear @@ -725,6 +727,14 @@ return 0; } +static int i915_resume_legacy(struct drm_device *dev) +{ + i915_resume_early(dev); + i915_resume(dev); + + return 0; +} + /** * i915_reset - reset chip after a hang * @dev: drm device to reset @@ -858,7 +868,6 @@ { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - int error; if (!drm_dev || !drm_dev->dev_private) { dev_err(dev, "DRM not initialized, aborting suspend.\n"); @@ -868,9 +877,25 @@ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - error = i915_drm_freeze(drm_dev); - if (error) - return error; + return i915_drm_freeze(drm_dev); +} + +static int i915_pm_suspend_late(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + /* + * We have a suspedn ordering issue with the snd-hda driver also + * requiring our device to be power up. Due to the lack of a + * parent/child relationship we currently solve this with an late + * suspend hook. + * + * FIXME: This should be solved with a special hdmi sink device or + * similar so that power domains can be employed. + */ + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); @@ -878,6 +903,14 @@ return 0; } +static int i915_pm_resume_early(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_resume_early(drm_dev); +} + static int i915_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -899,6 +932,14 @@ return i915_drm_freeze(drm_dev); } +static int i915_pm_thaw_early(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + + return i915_drm_thaw_early(drm_dev); +} + static int i915_pm_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -917,10 +958,14 @@ static const struct dev_pm_ops i915_pm_ops = { .suspend = i915_pm_suspend, + .suspend_late = i915_pm_suspend_late, + .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, .freeze = i915_pm_freeze, + .thaw_early = i915_pm_thaw_early, .thaw = i915_pm_thaw, .poweroff = i915_pm_poweroff, + .restore_early = i915_pm_resume_early, .restore = i915_pm_resume, }; @@ -961,7 +1006,7 @@ /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ .suspend = i915_suspend, - .resume = i915_resume, + .resume = i915_resume_legacy, .device_is_agp = i915_driver_device_is_agp, .master_create = i915_master_create, @@ -1024,8 +1069,13 @@ driver.driver_features &= ~DRIVER_MODESET; #endif - if (!(driver.driver_features & DRIVER_MODESET)) + if (!(driver.driver_features & DRIVER_MODESET)) { driver.get_vblank_timestamp = NULL; +#ifndef CONFIG_DRM_I915_UMS + /* Silently fail loading to not upset userspace. */ + return 0; +#endif + } return drm_pci_init(&driver, &i915_pci_driver); } --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_drv.h +++ linux-3.13.0/drivers/gpu/drm/i915/i915_drv.h @@ -317,6 +317,7 @@ u64 fence[I915_MAX_NUM_FENCES]; struct timeval time; struct drm_i915_error_ring { + bool valid; struct drm_i915_error_object { int page_count; u32 gtt_offset; @@ -875,6 +876,7 @@ u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; + u16 saveGCDGMBUS; }; struct intel_gen6_power_mgmt { @@ -1077,34 +1079,30 @@ unsigned long missed_irq_rings; /** - * State variable and reset counter controlling the reset flow + * State variable controlling the reset flow and count * - * Upper bits are for the reset counter. This counter is used by the - * wait_seqno code to race-free noticed that a reset event happened and - * that it needs to restart the entire ioctl (since most likely the - * seqno it waited for won't ever signal anytime soon). + * This is a counter which gets incremented when reset is triggered, + * and again when reset has been handled. So odd values (lowest bit set) + * means that reset is in progress and even values that + * (reset_counter >> 1):th reset was successfully completed. + * + * If reset is not completed succesfully, the I915_WEDGE bit is + * set meaning that hardware is terminally sour and there is no + * recovery. All waiters on the reset_queue will be woken when + * that happens. + * + * This counter is used by the wait_seqno code to notice that reset + * event happened and it needs to restart the entire ioctl (since most + * likely the seqno it waited for won't ever signal anytime soon). * * This is important for lock-free wait paths, where no contended lock * naturally enforces the correct ordering between the bail-out of the * waiter and the gpu reset work code. - * - * Lowest bit controls the reset state machine: Set means a reset is in - * progress. This state will (presuming we don't have any bugs) decay - * into either unset (successful reset) or the special WEDGED value (hw - * terminally sour). All waiters on the reset_queue will be woken when - * that happens. */ atomic_t reset_counter; - /** - * Special values/flags for reset_counter - * - * Note that the code relies on - * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG - * being true. - */ #define I915_RESET_IN_PROGRESS_FLAG 1 -#define I915_WEDGED 0xffffffff +#define I915_WEDGED (1 << 31) /** * Waitqueue to signal when the reset has completed. Used by clients @@ -1756,8 +1754,8 @@ #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ - (((dev)->pdev->device & 0xf) == 0x2 || \ - ((dev)->pdev->device & 0xf) == 0x6 || \ + (((dev)->pdev->device & 0xf) == 0x6 || \ + ((dev)->pdev->device & 0xf) == 0xb || \ ((dev)->pdev->device & 0xf) == 0xe)) #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0xFF00) == 0x0A00) @@ -2063,12 +2061,17 @@ static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { return unlikely(atomic_read(&error->reset_counter) - & I915_RESET_IN_PROGRESS_FLAG); + & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); } static inline bool i915_terminally_wedged(struct i915_gpu_error *error) { - return atomic_read(&error->reset_counter) == I915_WEDGED; + return atomic_read(&error->reset_counter) & I915_WEDGED; +} + +static inline u32 i915_reset_count(struct i915_gpu_error *error) +{ + return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; } void i915_gem_reset(struct drm_device *dev); @@ -2398,6 +2401,8 @@ extern bool i915_semaphore_is_enabled(struct drm_device *dev); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* overlay */ extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_gem.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_gem.c @@ -1424,10 +1424,13 @@ out: switch (ret) { case -EIO: - /* If this -EIO is due to a gpu hang, give the reset code a - * chance to clean up the mess. Otherwise return the proper - * SIGBUS. */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) + /* + * We eat errors when the gpu is terminally wedged to avoid + * userspace unduly crashing (gl has no provisions for mmaps to + * fail). But any other -EIO isn't ours (e.g. swap in failure) + * and so needs to be reported. + */ + if (!i915_terminally_wedged(&dev_priv->gpu_error)) return VM_FAULT_SIGBUS; case -EAGAIN: /* @@ -2852,6 +2855,13 @@ u32 size = i915_gem_obj_ggtt_size(obj); uint64_t val; + /* Adjust fence size to match tiled area */ + if (obj->tiling_mode != I915_TILING_NONE) { + uint32_t row_size = obj->stride * + (obj->tiling_mode == I915_TILING_Y ? 32 : 8); + size = (size / row_size) * row_size; + } + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 0xfffff000) << 32; val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; @@ -3486,7 +3496,7 @@ { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct i915_vma *vma; + struct i915_vma *vma, *next; int ret; if (obj->cache_level == cache_level) @@ -3497,7 +3507,7 @@ return -EBUSY; } - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { ret = i915_vma_unbind(vma); if (ret) @@ -3923,6 +3933,9 @@ struct drm_i915_gem_object *obj; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -3976,6 +3989,9 @@ struct drm_i915_gem_object *obj; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return -ENODEV; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -4922,7 +4938,7 @@ if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) +#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -767,9 +767,9 @@ * relocations were valid. */ for (j = 0; j < exec[i].relocation_count; j++) { - if (copy_to_user(&user_relocs[j].presumed_offset, - &invalid_offset, - sizeof(invalid_offset))) { + if (__copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { ret = -EFAULT; mutex_lock(&dev->struct_mutex); goto err; @@ -1312,18 +1312,21 @@ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, &dev_priv->gtt.base); if (!ret) { + struct drm_i915_gem_exec_object __user *user_exec_list = + to_user_ptr(args->buffers_ptr); + /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) - exec_list[i].offset = exec2_list[i].offset; - /* ... and back out to userspace */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + break; + } } } @@ -1371,14 +1374,21 @@ &dev_priv->gtt.base); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec2_list, - sizeof(*exec2_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + struct drm_i915_gem_exec_object2 *user_exec_list = + to_user_ptr(args->buffers_ptr); + int i; + + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user\n", + args->buffer_count); + break; + } } } --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_gem_gtt.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -813,6 +813,16 @@ POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); } +static void i915_ggtt_flush(struct drm_i915_private *dev_priv) +{ + if (INTEL_INFO(dev_priv->dev)->gen < 6) { + intel_gtt_chipset_flush(); + } else { + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + } +} + void i915_gem_suspend_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -828,7 +838,9 @@ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, dev_priv->gtt.base.start / PAGE_SIZE, dev_priv->gtt.base.total / PAGE_SIZE, - false); + true); + + i915_ggtt_flush(dev_priv); } void i915_gem_restore_gtt_mappings(struct drm_device *dev) @@ -849,7 +861,7 @@ i915_gem_gtt_bind_object(obj, obj->cache_level); } - i915_gem_chipset_flush(dev); + i915_ggtt_flush(dev_priv); } int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_gem_stolen.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -74,6 +74,50 @@ if (base == 0) return 0; + /* make sure we don't clobber the GTT if it's within stolen memory */ + if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { + struct { + u32 start, end; + } stolen[2] = { + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, + { .start = base, .end = base + dev_priv->gtt.stolen_size, }, + }; + u64 gtt_start, gtt_end; + + gtt_start = I915_READ(PGTBL_CTL); + if (IS_GEN4(dev)) + gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | + (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; + else + gtt_start &= PGTBL_ADDRESS_LO_MASK; + gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; + + if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) + stolen[0].end = gtt_start; + if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) + stolen[1].start = gtt_end; + + /* pick the larger of the two chunks */ + if (stolen[0].end - stolen[0].start > + stolen[1].end - stolen[1].start) { + base = stolen[0].start; + dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; + } else { + base = stolen[1].start; + dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; + } + + if (stolen[0].start != stolen[1].start || + stolen[0].end != stolen[1].end) { + DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", + (unsigned long long) gtt_start, + (unsigned long long) gtt_end - 1); + DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", + base, base + (u32) dev_priv->gtt.stolen_size - 1); + } + } + + /* Verify that nothing else uses this physical address. Stolen * memory should be reserved by the BIOS and hidden from the * kernel. So if the region is already marked as busy, something @@ -81,7 +125,11 @@ */ r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, "Graphics Stolen Memory"); - if (r == NULL) { + /* + * GEN3 firmware likes to smash pci bridges into the stolen + * range. Apparently this works. + */ + if (r == NULL && !IS_GEN3(dev)) { DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", base, base + (uint32_t)dev_priv->gtt.stolen_size); base = 0; @@ -201,6 +249,13 @@ struct drm_i915_private *dev_priv = dev->dev_private; int bios_reserved = 0; +#ifdef CONFIG_INTEL_IOMMU + if (intel_iommu_gfx_mapped) { + DRM_INFO("DMAR active, disabling use of stolen memory\n"); + return 0; + } +#endif + if (dev_priv->gtt.stolen_size == 0) return 0; @@ -250,7 +305,7 @@ } sg = st->sgl; - sg->offset = offset; + sg->offset = 0; sg->length = size; sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_gpu_error.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_gpu_error.c @@ -146,7 +146,10 @@ va_list tmp; va_copy(tmp, args); - if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) + len = vsnprintf(NULL, 0, f, tmp); + va_end(tmp); + + if (!__i915_error_seek(e, len)) return; } @@ -239,6 +242,9 @@ unsigned ring) { BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ + if (!error->ring[ring].valid) + return; + err_printf(m, "%s command stream:\n", ring_str(ring)); err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); @@ -294,7 +300,6 @@ struct drm_device *dev = error_priv->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_error_state *error = error_priv->error; - struct intel_ring_buffer *ring; int i, j, page, offset, elt; if (!error) { @@ -329,7 +334,7 @@ if (INTEL_INFO(dev)->gen == 7) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - for_each_ring(ring, dev_priv, i) + for (i = 0; i < ARRAY_SIZE(error->ring); i++) i915_ring_error_state(m, dev, error, i); if (error->active_bo) @@ -386,8 +391,7 @@ } } - obj = error->ring[i].ctx; - if (obj) { + if ((obj = error->ring[i].ctx)) { err_printf(m, "%s --- HW Context = 0x%08x\n", dev_priv->ring[i].name, obj->gtt_offset); @@ -668,7 +672,8 @@ return NULL; obj = ring->scratch.obj; - if (acthd >= i915_gem_obj_ggtt_offset(obj) && + if (obj != NULL && + acthd >= i915_gem_obj_ggtt_offset(obj) && acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) return i915_error_object_create(dev_priv, obj); } @@ -775,11 +780,17 @@ struct drm_i915_error_state *error) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; struct drm_i915_gem_request *request; int i, count; - for_each_ring(ring, dev_priv, i) { + for (i = 0; i < I915_NUM_RINGS; i++) { + struct intel_ring_buffer *ring = &dev_priv->ring[i]; + + if (ring->dev == NULL) + continue; + + error->ring[i].valid = true; + i915_record_ring_state(dev, error, ring); error->ring[i].batchbuffer = --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_irq.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_irq.c @@ -567,8 +567,7 @@ vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; } else { - enum transcoder cpu_transcoder = - intel_pipe_to_cpu_transcoder(dev_priv, pipe); + enum transcoder cpu_transcoder = (enum transcoder) pipe; u32 htotal; htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; @@ -675,7 +674,8 @@ } static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, - int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) + unsigned int flags, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; @@ -809,7 +809,8 @@ /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, vblank_time, flags, - crtc); + crtc, + &to_intel_crtc(crtc)->config.adjusted_mode); } static bool intel_hpd_irq_event(struct drm_device *dev, @@ -1235,9 +1236,20 @@ spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { - WARN(((hpd[i] & hotplug_trigger) && - dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), - "Received HPD interrupt although disabled\n"); + if (hpd[i] & hotplug_trigger && + dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { + /* + * On GMCH platforms the interrupt mask bits only + * prevent irq generation, not the setting of the + * hotplug bits itself. So only WARN about unexpected + * interrupts on saner platforms. + */ + WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), + "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", + hotplug_trigger, i, hpd[i]); + + continue; + } if (!(hpd[i] & hotplug_trigger) || dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) @@ -1993,7 +2005,7 @@ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, reset_done_event); } else { - atomic_set(&error->reset_counter, I915_WEDGED); + atomic_set_mask(I915_WEDGED, &error->reset_counter); } /* @@ -2382,20 +2394,27 @@ { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_ring_buffer *signaller; - u32 seqno, ctl; + u32 seqno; - ring->hangcheck.deadlock = true; + ring->hangcheck.deadlock++; signaller = semaphore_waits_for(ring, &seqno); - if (signaller == NULL || signaller->hangcheck.deadlock) + if (signaller == NULL) return -1; + /* Prevent pathological recursion due to driver bugs */ + if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) + return -1; + + if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) + return 1; + /* cursory check for an unkickable deadlock */ - ctl = I915_READ_CTL(signaller); - if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) + if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && + semaphore_passed(signaller) < 0) return -1; - return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); + return 0; } static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) @@ -2404,7 +2423,7 @@ int i; for_each_ring(ring, dev_priv, i) - ring->hangcheck.deadlock = false; + ring->hangcheck.deadlock = 0; } static enum intel_ring_hangcheck_action @@ -2761,10 +2780,9 @@ return; if (HAS_PCH_IBX(dev)) { - mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | - SDE_TRANSA_FIFO_UNDER | SDE_POISON; + mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; } else { - mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; + mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; I915_WRITE(SERR_INT, I915_READ(SERR_INT)); } @@ -2824,20 +2842,19 @@ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | DE_PLANEB_FLIP_DONE_IVB | - DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | - DE_ERR_INT_IVB); + DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | - DE_PIPEA_VBLANK_IVB); + DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); } else { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | DE_AUX_CHANNEL_A | - DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | DE_POISON); - extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; + extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | + DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; } dev_priv->irq_mask = ~display_mask; @@ -2953,9 +2970,9 @@ struct drm_device *dev = dev_priv->dev; uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | GEN8_PIPE_CDCLK_CRC_DONE | - GEN8_PIPE_FIFO_UNDERRUN | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; + uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | + GEN8_PIPE_FIFO_UNDERRUN; int pipe; dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; @@ -3140,10 +3157,10 @@ * Returns true when a page flip has completed. */ static bool i8xx_handle_vblank(struct drm_device *dev, - int pipe, u16 iir) + int plane, int pipe, u32 iir) { drm_i915_private_t *dev_priv = dev->dev_private; - u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); + u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); if (!drm_handle_vblank(dev, pipe)) return false; @@ -3151,8 +3168,6 @@ if ((iir & flip_pending) == 0) return false; - intel_prepare_page_flip(dev, pipe); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -3162,6 +3177,7 @@ if (I915_READ16(ISR) & flip_pending) return false; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; @@ -3220,9 +3236,13 @@ notify_ring(dev, &dev_priv->ring[RCS]); for_each_pipe(pipe) { + int plane = pipe; + if (I915_HAS_FBC(dev)) + plane = !plane; + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && - i8xx_handle_vblank(dev, pipe, iir)) - flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); + i8xx_handle_vblank(dev, plane, pipe, iir)) + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev, pipe); @@ -3334,8 +3354,6 @@ if ((iir & flip_pending) == 0) return false; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -3345,6 +3363,7 @@ if (I915_READ(ISR) & flip_pending) return false; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; @@ -3418,7 +3437,7 @@ for_each_pipe(pipe) { int plane = pipe; - if (IS_MOBILE(dev)) + if (I915_HAS_FBC(dev)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_reg.h +++ linux-3.13.0/drivers/gpu/drm/i915/i915_reg.h @@ -74,6 +74,7 @@ #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) #define LBB 0xf4 +#define GCDGMBUS 0xcc /* Graphics reset regs */ #define I965_GDRST 0xc0 /* PCI config register */ @@ -300,16 +301,20 @@ #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) -#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) + +#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) +#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) -#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) -#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) +#define BLT_WRITE_A (2<<20) +#define BLT_WRITE_RGB (1<<20) +#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) #define BLT_DEPTH_8 (0<<24) #define BLT_DEPTH_16_565 (1<<24) #define BLT_DEPTH_16_1555 (2<<24) #define BLT_DEPTH_32 (3<<24) -#define BLT_ROP_GXCOPY (0xcc<<16) +#define BLT_ROP_SRC_COPY (0xcc<<16) +#define BLT_ROP_COLOR_COPY (0xf0<<16) #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) @@ -320,6 +325,7 @@ #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ #define PIPE_CONTROL_CS_STALL (1<<20) #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) +#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) #define PIPE_CONTROL_WRITE_FLUSH (1<<12) @@ -628,6 +634,9 @@ /* * Instruction and interrupt control regs */ +#define PGTBL_CTL 0x02020 +#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ +#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ #define PGTBL_ER 0x02024 #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 @@ -2117,9 +2126,13 @@ * Please check the detailed lore in the commit message for for experimental * evidence. */ -#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) -#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) -#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) +#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) +#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) +#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) +/* VLV DP/HDMI bits again match Bspec */ +#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27) +#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) +#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) #define PORTD_HOTPLUG_INT_STATUS (3 << 21) #define PORTC_HOTPLUG_INT_STATUS (3 << 19) #define PORTB_HOTPLUG_INT_STATUS (3 << 17) --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_suspend.c +++ linux-3.13.0/drivers/gpu/drm/i915/i915_suspend.c @@ -397,6 +397,10 @@ intel_disable_gt_powersave(dev); + if (IS_GEN4(dev)) + pci_read_config_word(dev->pdev, GCDGMBUS, + &dev_priv->regfile.saveGCDGMBUS); + /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); @@ -429,6 +433,10 @@ mutex_lock(&dev->struct_mutex); i915_gem_restore_fences(dev); + + if (IS_GEN4(dev)) + pci_write_config_word(dev->pdev, GCDGMBUS, + dev_priv->regfile.saveGCDGMBUS); i915_restore_display(dev); if (!drm_core_check_feature(dev, DRIVER_MODESET)) { --- linux-3.13.0.orig/drivers/gpu/drm/i915/i915_trace.h +++ linux-3.13.0/drivers/gpu/drm/i915/i915_trace.h @@ -238,14 +238,16 @@ TP_ARGS(vm), TP_STRUCT__entry( + __field(u32, dev) __field(struct i915_address_space *, vm) ), TP_fast_assign( + __entry->dev = vm->dev->primary->index; __entry->vm = vm; ), - TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm) + TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm) ); TRACE_EVENT(i915_gem_ring_sync_to, --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_bios.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_bios.c @@ -811,7 +811,7 @@ } } -static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) +static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) { DRM_DEBUG_KMS("Falling back to manually reading VBT from " "VBIOS ROM for %s\n", --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_crt.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_crt.c @@ -749,7 +749,7 @@ .destroy = intel_encoder_destroy, }; -static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) +static int intel_no_crt_dmi_callback(const struct dmi_system_id *id) { DRM_INFO("Skipping CRT initialization for %s\n", id->ident); return 1; @@ -764,6 +764,14 @@ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), }, }, + { + .callback = intel_no_crt_dmi_callback, + .ident = "DELL XPS 8700", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"), + }, + }, { } }; --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_display.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_display.c @@ -3755,7 +3755,6 @@ ironlake_fdi_disable(crtc); ironlake_disable_pch_transcoder(dev_priv, pipe); - intel_set_pch_fifo_underrun_reporting(dev, pipe, true); if (HAS_PCH_CPT(dev)) { /* disable TRANS_DP_CTL */ @@ -3821,7 +3820,6 @@ if (intel_crtc->config.has_pch_encoder) { lpt_disable_pch_transcoder(dev_priv); - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); intel_ddi_fdi_disable(crtc); } @@ -8335,6 +8333,20 @@ if (ring->id == RCS) len += 6; + /* + * BSpec MI_DISPLAY_FLIP for IVB: + * "The full packet must be contained within the same cache line." + * + * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same + * cacheline, if we ever start emitting more commands before + * the MI_DISPLAY_FLIP we may need to first emit everything else, + * then do the cacheline alignment, and finally emit the + * MI_DISPLAY_FLIP. + */ + ret = intel_ring_cacheline_align(ring); + if (ret) + goto err_unpin; + ret = intel_ring_begin(ring, len); if (ret) goto err_unpin; @@ -9111,11 +9123,22 @@ PIPE_CONF_CHECK_I(pipe_src_w); PIPE_CONF_CHECK_I(pipe_src_h); - PIPE_CONF_CHECK_I(gmch_pfit.control); - /* pfit ratios are autocomputed by the hw on gen4+ */ - if (INTEL_INFO(dev)->gen < 4) - PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); - PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); + /* + * FIXME: BIOS likes to set up a cloned config with lvds+external + * screen. Since we don't yet re-compute the pipe config when moving + * just the lvds port away to another pipe the sw tracking won't match. + * + * Proper atomic modesets with recomputed global state will fix this. + * Until then just don't check gmch state for inherited modes. + */ + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { + PIPE_CONF_CHECK_I(gmch_pfit.control); + /* pfit ratios are autocomputed by the hw on gen4+ */ + if (INTEL_INFO(dev)->gen < 4) + PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); + PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); + } + PIPE_CONF_CHECK_I(pch_pfit.enabled); if (current_config->pch_pfit.enabled) { PIPE_CONF_CHECK_I(pch_pfit.pos); @@ -9449,7 +9472,8 @@ * are later needed by vblank and swap-completion * timestamping. They are derived from true hwmode. */ - drm_calc_timestamping_constants(crtc); + drm_calc_timestamping_constants(crtc, + &pipe_config->adjusted_mode); } /* FIXME: add subpixel order */ @@ -9926,10 +9950,13 @@ intel_crtc->lut_b[i] = i; } - /* Swap pipes & planes for FBC on pre-965 */ + /* + * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port + * is hooked to plane B. Hence we want plane A feeding pipe B. + */ intel_crtc->pipe = pipe; intel_crtc->plane = pipe; - if (IS_MOBILE(dev) && IS_GEN3(dev)) { + if (I915_HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); intel_crtc->plane = !pipe; } @@ -10556,6 +10583,9 @@ /* Acer Aspire 4736Z */ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, + /* Acer Aspire 5336 */ + { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, + /* Dell XPS13 HD Sandy Bridge */ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ @@ -10687,15 +10717,6 @@ intel_disable_fbc(dev); } -static void -intel_connector_break_all_links(struct intel_connector *connector) -{ - connector->base.dpms = DRM_MODE_DPMS_OFF; - connector->base.encoder = NULL; - connector->encoder->connectors_active = false; - connector->encoder->base.crtc = NULL; -} - static void intel_enable_pipe_a(struct drm_device *dev) { struct intel_connector *connector; @@ -10777,8 +10798,17 @@ if (connector->encoder->base.crtc != &crtc->base) continue; - intel_connector_break_all_links(connector); + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; } + /* multiple connectors may have the same encoder: + * handle them and break crtc link separately */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) + if (connector->encoder->base.crtc == &crtc->base) { + connector->encoder->base.crtc = NULL; + connector->encoder->connectors_active = false; + } WARN_ON(crtc->active); crtc->base.enabled = false; @@ -10849,6 +10879,8 @@ drm_get_encoder_name(&encoder->base)); encoder->disable(encoder); } + encoder->base.crtc = NULL; + encoder->connectors_active = false; /* Inconsistent output/port/pipe state happens presumably due to * a bug in one of the get_hw_state functions. Or someplace else @@ -10859,8 +10891,8 @@ base.head) { if (connector->encoder != encoder) continue; - - intel_connector_break_all_links(connector); + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; } } /* Enabled encoders without active connectors will be fixed in @@ -10902,6 +10934,8 @@ base.head) { memset(&crtc->config, 0, sizeof(crtc->config)); + crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; + crtc->active = dev_priv->display.get_pipe_config(crtc, &crtc->config); --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_dp.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_dp.c @@ -96,13 +96,18 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; + struct drm_device *dev = intel_dp->attached_connector->base.dev; switch (max_link_bw) { case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: break; case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ - max_link_bw = DP_LINK_BW_2_7; + if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && + intel_dp->dpcd[DP_DPCD_REV] >= 0x12) + max_link_bw = DP_LINK_BW_5_4; + else + max_link_bw = DP_LINK_BW_2_7; break; default: WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", @@ -537,6 +542,7 @@ uint8_t msg[20]; int msg_bytes; uint8_t ack; + int retry; if (WARN_ON(send_bytes > 16)) return -E2BIG; @@ -548,18 +554,20 @@ msg[3] = send_bytes - 1; memcpy(&msg[4], send, send_bytes); msg_bytes = send_bytes + 4; - for (;;) { + for (retry = 0; retry < 7; retry++) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); if (ret < 0) return ret; if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) - break; + return send_bytes; else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) - udelay(100); + usleep_range(400, 500); else return -EIO; } - return send_bytes; + + DRM_ERROR("too many retries, giving up\n"); + return -EIO; } /* Write a single byte to the aux channel in native mode */ @@ -581,6 +589,7 @@ int reply_bytes; uint8_t ack; int ret; + int retry; if (WARN_ON(recv_bytes > 19)) return -E2BIG; @@ -594,7 +603,7 @@ msg_bytes = 4; reply_bytes = recv_bytes + 1; - for (;;) { + for (retry = 0; retry < 7; retry++) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret == 0) @@ -607,10 +616,13 @@ return ret - 1; } else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) - udelay(100); + usleep_range(400, 500); else return -EIO; } + + DRM_ERROR("too many retries, giving up\n"); + return -EIO; } static int @@ -804,9 +816,10 @@ struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); - int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; + /* Conveniently, the link BW constants become indices with a shift...*/ + int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; int bpp, mode_rate; - static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; int link_avail, link_clock; if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) @@ -1627,7 +1640,7 @@ val |= EDP_PSR_LINK_DISABLE; I915_WRITE(EDP_PSR_CTL(dev), val | - IS_BROADWELL(dev) ? 0 : link_entry_time | + (IS_BROADWELL(dev) ? 0 : link_entry_time) | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | EDP_PSR_ENABLE); @@ -1865,10 +1878,12 @@ mutex_unlock(&dev_priv->dpio_lock); - /* init power sequencer on this pipe and port */ - intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, - &power_seq); + if (is_edp(intel_dp)) { + /* init power sequencer on this pipe and port */ + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, + &power_seq); + } intel_enable_dp(encoder); @@ -2629,10 +2644,15 @@ bool channel_eq = false; int tries, cr_tries; uint32_t DP = intel_dp->DP; + uint32_t training_pattern = DP_TRAINING_PATTERN_2; + + /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ + if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) + training_pattern = DP_TRAINING_PATTERN_3; /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_2 | + training_pattern | DP_LINK_SCRAMBLING_DISABLE)) { DRM_ERROR("failed to start channel equalization\n"); return; @@ -2660,7 +2680,7 @@ if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { intel_dp_start_link_train(intel_dp); intel_dp_set_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_2 | + training_pattern | DP_LINK_SCRAMBLING_DISABLE); cr_tries++; continue; @@ -2676,7 +2696,7 @@ intel_dp_link_down(intel_dp); intel_dp_start_link_train(intel_dp); intel_dp_set_link_train(intel_dp, &DP, - DP_TRAINING_PATTERN_2 | + training_pattern | DP_LINK_SCRAMBLING_DISABLE); tries = 0; cr_tries++; @@ -2818,6 +2838,14 @@ } } + /* Training Pattern 3 support */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && + intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { + intel_dp->use_tps3 = true; + DRM_DEBUG_KMS("Displayport TPS3 supported"); + } else + intel_dp->use_tps3 = false; + if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) return true; /* native DP sink */ @@ -3020,18 +3048,34 @@ return status; } - switch (intel_dig_port->port) { - case PORT_B: - bit = PORTB_HOTPLUG_LIVE_STATUS; - break; - case PORT_C: - bit = PORTC_HOTPLUG_LIVE_STATUS; - break; - case PORT_D: - bit = PORTD_HOTPLUG_LIVE_STATUS; - break; - default: - return connector_status_unknown; + if (IS_VALLEYVIEW(dev)) { + switch (intel_dig_port->port) { + case PORT_B: + bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; + break; + case PORT_C: + bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; + break; + case PORT_D: + bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; + break; + default: + return connector_status_unknown; + } + } else { + switch (intel_dig_port->port) { + case PORT_B: + bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; + break; + case PORT_C: + bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; + break; + case PORT_D: + bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; + break; + default: + return connector_status_unknown; + } } if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_drv.h +++ linux-3.13.0/drivers/gpu/drm/i915/intel_drv.h @@ -207,7 +207,8 @@ * tracked with quirk flags so that fastboot and state checker can act * accordingly. */ -#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ +#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ +#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */ unsigned long quirks; /* User requested mode, only valid as a starting point to @@ -474,6 +475,7 @@ struct delayed_work panel_vdd_work; bool want_panel_vdd; bool psr_setup_done; + bool use_tps3; struct intel_connector *attached_connector; }; --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_hdmi.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_hdmi.c @@ -841,11 +841,11 @@ } } -static int hdmi_portclock_limit(struct intel_hdmi *hdmi) +static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) { struct drm_device *dev = intel_hdmi_to_dev(hdmi); - if (IS_G4X(dev)) + if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) return 165000; else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) return 300000; @@ -856,7 +856,8 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) + if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), + true)) return MODE_CLOCK_HIGH; if (mode->clock < 20000) return MODE_CLOCK_LOW; @@ -874,7 +875,7 @@ struct drm_device *dev = encoder->base.dev; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; - int portclock_limit = hdmi_portclock_limit(intel_hdmi); + int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); int desired_bpp; if (intel_hdmi->color_range_auto) { @@ -898,8 +899,8 @@ * outputs. We also need to check that the higher clock still fits * within limits. */ - if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit - && HAS_PCH_SPLIT(dev)) { + if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && + clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_lvds.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_lvds.c @@ -533,7 +533,7 @@ .destroy = intel_encoder_destroy, }; -static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) +static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); return 1; @@ -945,6 +945,17 @@ int pipe; u8 pin; + /* + * Unlock registers and just leave them unlocked. Do this before + * checking quirk lists to avoid bogus WARNINGs. + */ + if (HAS_PCH_SPLIT(dev)) { + I915_WRITE(PCH_PP_CONTROL, + I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); + } else { + I915_WRITE(PP_CONTROL, + I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); + } if (!intel_lvds_supported(dev)) return; @@ -1124,17 +1135,6 @@ DRM_DEBUG_KMS("detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); - /* - * Unlock registers and just - * leave them unlocked - */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_PP_CONTROL, - I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); - } else { - I915_WRITE(PP_CONTROL, - I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); - } lvds_connector->lid_notifier.notifier_call = intel_lid_notify; if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { DRM_DEBUG_KMS("lid notifier registration failed\n"); --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_panel.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_panel.c @@ -544,6 +544,7 @@ enum pipe pipe = intel_get_pipe_from_connector(connector); u32 freq; unsigned long flags; + u64 n; if (pipe == INVALID_PIPE) return; @@ -557,10 +558,9 @@ } /* scale to hardware, but be careful to not overflow */ - if (freq < max) - level = level * freq / max; - else - level = freq / max * level; + n = (u64)level * freq; + do_div(n, max); + level = n; dev_priv->backlight.level = level; if (dev_priv->backlight.device) --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_pm.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_pm.c @@ -498,8 +498,7 @@ obj = intel_fb->obj; adjusted_mode = &intel_crtc->config.adjusted_mode; - if (i915_enable_fbc < 0 && - INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { + if (i915_enable_fbc < 0) { if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) DRM_DEBUG_KMS("disabled per chip default\n"); goto out_disable; @@ -2563,6 +2562,43 @@ } } +static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, + uint16_t wm[5], uint16_t min) +{ + int level, max_level = ilk_wm_max_level(dev_priv->dev); + + if (wm[0] >= min) + return false; + + wm[0] = max(wm[0], min); + for (level = 1; level <= max_level; level++) + wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); + + return true; +} + +static void snb_wm_latency_quirk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool changed; + + /* + * The BIOS provided WM memory latency values are often + * inadequate for high resolution displays. Adjust them. + */ + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + + if (!changed) + return; + + DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); + intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); +} + static void intel_setup_wm_latency(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2580,6 +2616,9 @@ intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); + + if (IS_GEN6(dev)) + snb_wm_latency_quirk(dev); } static void hsw_compute_wm_parameters(struct drm_crtc *crtc, @@ -5156,11 +5195,6 @@ I915_WRITE(_3D_CHICKEN, _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); - /* WaSetupGtModeTdRowDispatch:snb */ - if (IS_SNB_GT1(dev)) - I915_WRITE(GEN6_GT_MODE, - _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); - I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); @@ -5314,8 +5348,8 @@ /* WaPsrDPRSUnmaskVBlankInSRD */ for_each_pipe(i) { I915_WRITE(CHICKEN_PIPESL_1(i), - I915_READ(CHICKEN_PIPESL_1(i) | - DPRS_MASK_VBLANK_SRD)); + I915_READ(CHICKEN_PIPESL_1(i)) | + DPRS_MASK_VBLANK_SRD); } } @@ -5843,6 +5877,27 @@ } EXPORT_SYMBOL_GPL(i915_release_power_well); +/* + * Private interface for the audio driver to get CDCLK in kHz. + * + * Caller must request power well using i915_request_power_well() prior to + * making the call. + */ +int i915_get_cdclk_freq(void) +{ + struct drm_i915_private *dev_priv; + + if (!hsw_pwr) + return -ENODEV; + + dev_priv = container_of(hsw_pwr, struct drm_i915_private, + power_domains); + + return intel_ddi_get_cdclk_freq(dev_priv); +} +EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); + + int intel_power_domains_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_ringbuffer.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -332,12 +332,15 @@ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; /* * TLB invalidate requires a post-sync write. */ flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; + /* Workaround: we must issue a pipe_control with CS-stall bit * set before a pipe_control command that has the state cache * invalidate bit set. */ @@ -473,6 +476,9 @@ } } + /* Enforce ordering by reading HEAD register back */ + I915_READ_HEAD(ring); + /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring @@ -1177,54 +1183,66 @@ /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ #define I830_BATCH_LIMIT (256*1024) +#define I830_TLB_ENTRIES (2) +#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) static int i830_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 len, unsigned flags) { + u32 cs_offset = ring->scratch.gtt_offset; int ret; - if (flags & I915_DISPATCH_PINNED) { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); - } else { - u32 cs_offset = ring->scratch.gtt_offset; + /* Evict the invalid PTE TLBs */ + intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); + intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0xdeadbeef); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + if ((flags & I915_DISPATCH_PINNED) == 0) { if (len > I830_BATCH_LIMIT) return -ENOSPC; - ret = intel_ring_begin(ring, 9+3); + ret = intel_ring_begin(ring, 6 + 2); if (ret) return ret; - /* Blit the batch (which has now all relocs applied) to the stable batch - * scratch bo area (so that the CS never stumbles over its tlb - * invalidation bug) ... */ - intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | - XY_SRC_COPY_BLT_WRITE_ALPHA | - XY_SRC_COPY_BLT_WRITE_RGB); - intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); + + /* Blit the batch (which has now all relocs applied) to the + * stable batch scratch bo area (so that the CS never + * stumbles over its tlb invalidation bug) ... + */ + intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); + intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); intel_ring_emit(ring, cs_offset); - intel_ring_emit(ring, 0); intel_ring_emit(ring, 4096); intel_ring_emit(ring, offset); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); /* ... and execute it. */ - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, cs_offset + len - 8); - intel_ring_advance(ring); + offset = cs_offset; } + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + return 0; } @@ -1611,8 +1629,8 @@ return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); } -static int __intel_ring_begin(struct intel_ring_buffer *ring, - int bytes) +static int __intel_ring_prepare(struct intel_ring_buffer *ring, + int bytes) { int ret; @@ -1628,7 +1646,6 @@ return ret; } - ring->space -= bytes; return 0; } @@ -1643,12 +1660,38 @@ if (ret) return ret; + ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); + if (ret) + return ret; + /* Preallocate the olr before touching the ring */ ret = intel_ring_alloc_seqno(ring); if (ret) return ret; - return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); + ring->space -= num_dwords * sizeof(uint32_t); + return 0; +} + +/* Align the ring tail to a cacheline boundary */ +int intel_ring_cacheline_align(struct intel_ring_buffer *ring) +{ + int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); + int ret; + + if (num_dwords == 0) + return 0; + + ret = intel_ring_begin(ring, num_dwords); + if (ret) + return ret; + + while (num_dwords--) + intel_ring_emit(ring, MI_NOOP); + + intel_ring_advance(ring); + + return 0; } void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) @@ -1925,7 +1968,7 @@ struct drm_i915_gem_object *obj; int ret; - obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); + obj = i915_gem_alloc_object(dev, I830_WA_SIZE); if (obj == NULL) { DRM_ERROR("Failed to allocate batch bo\n"); return -ENOMEM; --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_ringbuffer.h +++ linux-3.13.0/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -42,11 +42,11 @@ }; struct intel_ring_hangcheck { - bool deadlock; u32 seqno; u32 acthd; int score; enum intel_ring_hangcheck_action action; + int deadlock; }; struct intel_ring_buffer { @@ -233,6 +233,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); +int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); static inline void intel_ring_emit(struct intel_ring_buffer *ring, u32 data) { --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_sdvo.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_sdvo.c @@ -1364,7 +1364,9 @@ >> SDVO_PORT_MULTIPLY_SHIFT) + 1; } - dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier; + dotclock = pipe_config->port_clock; + if (pipe_config->pixel_multiplier) + dotclock /= pipe_config->pixel_multiplier; if (HAS_PCH_SPLIT(dev)) ironlake_check_encoder_dotclock(pipe_config, dotclock); --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_sideband.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_sideband.c @@ -73,7 +73,7 @@ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, PUNIT_OPCODE_REG_READ, addr, &val); mutex_unlock(&dev_priv->dpio_lock); @@ -85,7 +85,7 @@ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, PUNIT_OPCODE_REG_WRITE, addr, &val); mutex_unlock(&dev_priv->dpio_lock); } @@ -97,7 +97,7 @@ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); mutex_lock(&dev_priv->dpio_lock); - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC, PUNIT_OPCODE_REG_READ, addr, &val); mutex_unlock(&dev_priv->dpio_lock); @@ -107,56 +107,56 @@ u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, PUNIT_OPCODE_REG_READ, reg, &val); return val; } void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, PUNIT_OPCODE_REG_WRITE, reg, &val); } u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK, PUNIT_OPCODE_REG_READ, reg, &val); return val; } void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK, PUNIT_OPCODE_REG_WRITE, reg, &val); } u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU, PUNIT_OPCODE_REG_READ, reg, &val); return val; } void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU, PUNIT_OPCODE_REG_WRITE, reg, &val); } u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, PUNIT_OPCODE_REG_READ, reg, &val); return val; } void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, PUNIT_OPCODE_REG_WRITE, reg, &val); } --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_tv.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_tv.c @@ -854,6 +854,10 @@ struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + /* Prevents vblank waits from timing out in intel_tv_detect_type() */ + intel_wait_for_vblank(encoder->base.dev, + to_intel_crtc(encoder->base.crtc)->pipe); + I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); } @@ -1536,9 +1540,14 @@ /* * If the device type is not TV, continue. */ - if (p_child->old.device_type != DEVICE_TYPE_INT_TV && - p_child->old.device_type != DEVICE_TYPE_TV) + switch (p_child->old.device_type) { + case DEVICE_TYPE_INT_TV: + case DEVICE_TYPE_TV: + case DEVICE_TYPE_TV_SVIDEO_COMPOSITE: + break; + default: continue; + } /* Only when the addin_offset is non-zero, it is regarded * as present. */ --- linux-3.13.0.orig/drivers/gpu/drm/i915/intel_uncore.c +++ linux-3.13.0/drivers/gpu/drm/i915/intel_uncore.c @@ -167,6 +167,8 @@ { __raw_i915_write32(dev_priv, FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_DISABLE(0xffff)); /* something from same cacheline, but !FORCEWAKE_VLV */ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); } @@ -686,6 +688,40 @@ return 0; } + +int i915_get_reset_stats_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_reset_stats *args = data; + struct i915_ctx_hang_stats *hs; + int ret; + + if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id); + if (IS_ERR(hs)) { + mutex_unlock(&dev->struct_mutex); + return PTR_ERR(hs); + } + + if (capable(CAP_SYS_ADMIN)) + args->reset_count = i915_reset_count(&dev_priv->gpu_error); + else + args->reset_count = 0; + + args->batch_active = hs->batch_active; + args->batch_pending = hs->batch_pending; + + mutex_unlock(&dev->struct_mutex); + + return 0; +} static int i965_reset_complete(struct drm_device *dev) { --- linux-3.13.0.orig/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ linux-3.13.0/drivers/gpu/drm/mgag200/mgag200_cursor.c @@ -22,8 +22,10 @@ { WREG8(MGA_CURPOSXL, 0); WREG8(MGA_CURPOSXH, 0); - mgag200_bo_unpin(mdev->cursor.pixels_1); - mgag200_bo_unpin(mdev->cursor.pixels_2); + if (mdev->cursor.pixels_1->pin_count) + mgag200_bo_unpin(mdev->cursor.pixels_1); + if (mdev->cursor.pixels_2->pin_count) + mgag200_bo_unpin(mdev->cursor.pixels_2); } int mga_crtc_cursor_set(struct drm_crtc *crtc, @@ -32,7 +34,7 @@ uint32_t width, uint32_t height) { - struct drm_device *dev = (struct drm_device *)file_priv->minor->dev; + struct drm_device *dev = crtc->dev; struct mga_device *mdev = (struct mga_device *)dev->dev_private; struct mgag200_bo *pixels_1 = mdev->cursor.pixels_1; struct mgag200_bo *pixels_2 = mdev->cursor.pixels_2; --- linux-3.13.0.orig/drivers/gpu/drm/mgag200/mgag200_fb.c +++ linux-3.13.0/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -41,7 +41,7 @@ * then the BO is being moved and we should * store up the damage until later. */ - if (!in_interrupt()) + if (drm_can_sleep()) ret = mgag200_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) --- linux-3.13.0.orig/drivers/gpu/drm/mgag200/mgag200_mode.c +++ linux-3.13.0/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1519,11 +1519,11 @@ (mga_vga_calculate_mode_bandwidth(mode, bpp) > (32700 * 1024))) { return MODE_BANDWIDTH; - } else if (mode->type == G200_EH && + } else if (mdev->type == G200_EH && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (37500 * 1024))) { return MODE_BANDWIDTH; - } else if (mode->type == G200_ER && + } else if (mdev->type == G200_ER && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (55000 * 1024))) { return MODE_BANDWIDTH; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/Makefile +++ linux-3.13.0/drivers/gpu/drm/nouveau/Makefile @@ -71,7 +71,10 @@ nouveau-y += core/subdev/devinit/nv1a.o nouveau-y += core/subdev/devinit/nv20.o nouveau-y += core/subdev/devinit/nv50.o +nouveau-y += core/subdev/devinit/nv84.o +nouveau-y += core/subdev/devinit/nv98.o nouveau-y += core/subdev/devinit/nva3.o +nouveau-y += core/subdev/devinit/nvaf.o nouveau-y += core/subdev/devinit/nvc0.o nouveau-y += core/subdev/fb/base.o nouveau-y += core/subdev/fb/nv04.o --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/core/engine.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/core/engine.c @@ -42,11 +42,24 @@ if (ret) return ret; - if ( parent && - !nouveau_boolopt(nv_device(parent)->cfgopt, iname, enable)) { - if (!enable) - nv_warn(engine, "disabled, %s=1 to enable\n", iname); - return -ENODEV; + if (parent) { + struct nouveau_device *device = nv_device(parent); + int engidx = nv_engidx(nv_object(engine)); + + if (device->disable_mask & (1ULL << engidx)) { + if (!nouveau_boolopt(device->cfgopt, iname, false)) { + nv_debug(engine, "engine disabled by hw/fw\n"); + return -ENODEV; + } + + nv_warn(engine, "ignoring hw/fw engine disable\n"); + } + + if (!nouveau_boolopt(device->cfgopt, iname, enable)) { + if (!enable) + nv_warn(engine, "disabled, %s=1 to enable\n", iname); + return -ENODEV; + } } INIT_LIST_HEAD(&engine->contexts); --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c @@ -105,9 +105,6 @@ struct nvc0_copy_priv *priv; int ret; - if (nv_rd32(parent, 0x022500) & 0x00000100) - return -ENODEV; - ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true, "PCE0", "copy0", &priv); *pobject = nv_object(priv); @@ -133,9 +130,6 @@ struct nvc0_copy_priv *priv; int ret; - if (nv_rd32(parent, 0x022500) & 0x00000200) - return -ENODEV; - ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true, "PCE1", "copy1", &priv); *pobject = nv_object(priv); --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c @@ -88,9 +88,6 @@ struct nve0_copy_priv *priv; int ret; - if (nv_rd32(parent, 0x022500) & 0x00000100) - return -ENODEV; - ret = nouveau_engine_create(parent, engine, oclass, true, "PCE0", "copy0", &priv); *pobject = nv_object(priv); @@ -112,9 +109,6 @@ struct nve0_copy_priv *priv; int ret; - if (nv_rd32(parent, 0x022500) & 0x00000200) - return -ENODEV; - ret = nouveau_engine_create(parent, engine, oclass, true, "PCE1", "copy1", &priv); *pobject = nv_object(priv); --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/device/nv04.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/device/nv04.c @@ -49,7 +49,7 @@ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv04_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -67,7 +67,7 @@ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv05_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/device/nv10.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/device/nv10.c @@ -51,7 +51,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -68,7 +68,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -87,7 +87,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -106,7 +106,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -125,7 +125,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -144,7 +144,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -163,7 +163,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -182,7 +182,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/device/nv20.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/device/nv20.c @@ -52,7 +52,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -71,7 +71,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -90,7 +90,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -109,7 +109,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/device/nv30.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/device/nv30.c @@ -52,7 +52,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -71,7 +71,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv04_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -90,7 +90,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -110,7 +110,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv20_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -130,7 +130,7 @@ device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv10_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv04_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/device/nv40.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/device/nv40.c @@ -57,7 +57,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -80,7 +80,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -103,7 +103,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -126,7 +126,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -149,7 +149,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -172,7 +172,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -195,7 +195,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -218,7 +218,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv40_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -241,7 +241,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -264,7 +264,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -287,7 +287,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -310,7 +310,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -333,7 +333,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -356,7 +356,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -379,7 +379,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -402,7 +402,7 @@ device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass; device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/device/nv50.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/device/nv50.c @@ -65,7 +65,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv50_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv50_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -90,7 +90,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -118,7 +118,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -146,7 +146,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv50_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv50_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -174,7 +174,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -202,7 +202,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv94_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -230,7 +230,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -258,7 +258,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv84_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -286,7 +286,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -314,7 +314,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nv98_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -342,7 +342,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -372,7 +372,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -401,7 +401,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nva3_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -430,7 +430,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nva3_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvaf_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nv98_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv94_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c @@ -65,7 +65,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -97,7 +97,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -129,7 +129,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -160,7 +160,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -192,7 +192,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -212,7 +212,6 @@ device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; - device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; break; @@ -224,7 +223,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -255,7 +254,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -287,7 +286,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -318,7 +317,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/device/nve0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/device/nve0.c @@ -65,7 +65,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -98,7 +98,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -131,7 +131,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -164,7 +164,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; @@ -199,7 +199,7 @@ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; - device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -1112,7 +1112,7 @@ if (conf != ~0) { if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { u32 soff = (ffs(outp.or) - 1) * 0x08; - u32 ctrl = nv_rd32(priv, 0x610798 + soff); + u32 ctrl = nv_rd32(priv, 0x610794 + soff); u32 datarate; switch ((ctrl & 0x000f0000) >> 16) { --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -687,7 +687,7 @@ } if (outp == 8) - return false; + return conf; data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); if (data == 0x0000) @@ -967,9 +967,6 @@ int heads = nv_rd32(parent, 0x022448); int ret; - if (nv_rd32(parent, 0x022500) & 0x00000001) - return -ENODEV; - ret = nouveau_disp_create(parent, engine, oclass, heads, "PDISP", "display", &priv); *pobject = nv_object(priv); --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c @@ -54,9 +54,6 @@ int heads = nv_rd32(parent, 0x022448); int ret; - if (nv_rd32(parent, 0x022500) & 0x00000001) - return -ENODEV; - ret = nouveau_disp_create(parent, engine, oclass, heads, "PDISP", "display", &priv); *pobject = nv_object(priv); --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c @@ -54,9 +54,6 @@ int heads = nv_rd32(parent, 0x022448); int ret; - if (nv_rd32(parent, 0x022500) & 0x00000001) - return -ENODEV; - ret = nouveau_disp_create(parent, engine, oclass, heads, "PDISP", "display", &priv); *pobject = nv_object(priv); --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/engine/falcon.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/engine/falcon.c @@ -56,6 +56,16 @@ nv_wr32(falcon, falcon->addr + addr, data); } +static void * +vmemdup(const void *src, size_t len) +{ + void *p = vmalloc(len); + + if (p) + memcpy(p, src, len); + return p; +} + int _nouveau_falcon_init(struct nouveau_object *object) { @@ -111,7 +121,7 @@ ret = request_firmware(&fw, name, &device->pdev->dev); if (ret == 0) { - falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); + falcon->code.data = vmemdup(fw->data, fw->size); falcon->code.size = fw->size; falcon->data.data = NULL; falcon->data.size = 0; @@ -134,7 +144,7 @@ return ret; } - falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL); + falcon->data.data = vmemdup(fw->data, fw->size); falcon->data.size = fw->size; release_firmware(fw); if (!falcon->data.data) @@ -149,7 +159,7 @@ return ret; } - falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL); + falcon->code.data = vmemdup(fw->data, fw->size); falcon->code.size = fw->size; release_firmware(fw); if (!falcon->code.data) @@ -235,8 +245,8 @@ if (!suspend) { nouveau_gpuobj_ref(NULL, &falcon->core); if (falcon->external) { - kfree(falcon->data.data); - kfree(falcon->code.data); + vfree(falcon->data.data); + vfree(falcon->code.data); falcon->code.data = NULL; } } --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/include/core/device.h +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/include/core/device.h @@ -70,6 +70,7 @@ const char *dbgopt; const char *name; const char *cname; + u64 disable_mask; enum { NV_04 = 0x04, --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h @@ -9,7 +9,6 @@ bool post; void (*meminit)(struct nouveau_devinit *); int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq); - }; static inline struct nouveau_devinit * @@ -18,32 +17,16 @@ return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT]; } -#define nouveau_devinit_create(p,e,o,d) \ - nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d) -#define nouveau_devinit_destroy(p) \ - nouveau_subdev_destroy(&(p)->base) -#define nouveau_devinit_init(p) ({ \ - struct nouveau_devinit *d = (p); \ - _nouveau_devinit_init(nv_object(d)); \ -}) -#define nouveau_devinit_fini(p,s) ({ \ - struct nouveau_devinit *d = (p); \ - _nouveau_devinit_fini(nv_object(d), (s)); \ -}) - -int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, int, void **); -#define _nouveau_devinit_dtor _nouveau_subdev_dtor -int _nouveau_devinit_init(struct nouveau_object *); -int _nouveau_devinit_fini(struct nouveau_object *, bool suspend); - -extern struct nouveau_oclass nv04_devinit_oclass; -extern struct nouveau_oclass nv05_devinit_oclass; -extern struct nouveau_oclass nv10_devinit_oclass; -extern struct nouveau_oclass nv1a_devinit_oclass; -extern struct nouveau_oclass nv20_devinit_oclass; -extern struct nouveau_oclass nv50_devinit_oclass; -extern struct nouveau_oclass nva3_devinit_oclass; -extern struct nouveau_oclass nvc0_devinit_oclass; +extern struct nouveau_oclass *nv04_devinit_oclass; +extern struct nouveau_oclass *nv05_devinit_oclass; +extern struct nouveau_oclass *nv10_devinit_oclass; +extern struct nouveau_oclass *nv1a_devinit_oclass; +extern struct nouveau_oclass *nv20_devinit_oclass; +extern struct nouveau_oclass *nv50_devinit_oclass; +extern struct nouveau_oclass *nv84_devinit_oclass; +extern struct nouveau_oclass *nv98_devinit_oclass; +extern struct nouveau_oclass *nva3_devinit_oclass; +extern struct nouveau_oclass *nvaf_devinit_oclass; +extern struct nouveau_oclass *nvc0_devinit_oclass; #endif --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c @@ -198,7 +198,6 @@ nv_mask(priv, 0x000200, 0x00000100, 0x00000000); nv_mask(priv, 0x000200, 0x00000100, 0x00000100); - nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12); nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12); --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include "pll.h" --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c @@ -24,10 +24,11 @@ #include -#include #include #include +#include "priv.h" + int _nouveau_devinit_fini(struct nouveau_object *object, bool suspend) { @@ -43,12 +44,21 @@ int _nouveau_devinit_init(struct nouveau_object *object) { + struct nouveau_devinit_impl *impl = (void *)object->oclass; struct nouveau_devinit *devinit = (void *)object; - int ret = nouveau_subdev_init(&devinit->base); + int ret; + + ret = nouveau_subdev_init(&devinit->base); if (ret) return ret; - return nvbios_init(&devinit->base, devinit->post); + ret = nvbios_init(&devinit->base, devinit->post); + if (ret) + return ret; + + if (impl->disable) + nv_device(devinit)->disable_mask |= impl->disable(devinit); + return 0; } int @@ -57,6 +67,7 @@ struct nouveau_oclass *oclass, int size, void **pobject) { + struct nouveau_devinit_impl *impl = (void *)oclass; struct nouveau_device *device = nv_device(parent); struct nouveau_devinit *devinit; int ret; @@ -68,5 +79,7 @@ return ret; devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false); + devinit->meminit = impl->meminit; + devinit->pll_set = impl->pll_set; return 0; } --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c @@ -27,12 +27,7 @@ #include #include "fbmem.h" -#include "priv.h" - -struct nv04_devinit_priv { - struct nouveau_devinit base; - int owner; -}; +#include "nv04.h" static void nv04_devinit_meminit(struct nouveau_devinit *devinit) @@ -438,7 +433,7 @@ nouveau_devinit_destroy(&priv->base); } -static int +int nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) @@ -451,19 +446,19 @@ if (ret) return ret; - priv->base.meminit = nv04_devinit_meminit; - priv->base.pll_set = nv04_devinit_pll_set; priv->owner = -1; return 0; } -struct nouveau_oclass -nv04_devinit_oclass = { - .handle = NV_SUBDEV(DEVINIT, 0x04), - .ofuncs = &(struct nouveau_ofuncs) { +struct nouveau_oclass * +nv04_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0x04), + .base.ofuncs = &(struct nouveau_ofuncs) { .ctor = nv04_devinit_ctor, .dtor = nv04_devinit_dtor, .init = nv04_devinit_init, .fini = nv04_devinit_fini, }, -}; + .meminit = nv04_devinit_meminit, + .pll_set = nv04_devinit_pll_set, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.h @@ -0,0 +1,23 @@ +#ifndef __NVKM_DEVINIT_NV04_H__ +#define __NVKM_DEVINIT_NV04_H__ + +#include "priv.h" + +struct nv04_devinit_priv { + struct nouveau_devinit base; + u8 owner; +}; + +int nv04_devinit_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +void nv04_devinit_dtor(struct nouveau_object *); +int nv04_devinit_init(struct nouveau_object *); +int nv04_devinit_fini(struct nouveau_object *, bool); +int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32); + +void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); +void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); +void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); + +#endif --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c @@ -29,12 +29,7 @@ #include #include "fbmem.h" -#include "priv.h" - -struct nv05_devinit_priv { - struct nouveau_devinit base; - u8 owner; -}; +#include "nv04.h" static void nv05_devinit_meminit(struct nouveau_devinit *devinit) @@ -49,7 +44,7 @@ { 0x06, 0x00 }, { 0x00, 0x00 } }; - struct nv05_devinit_priv *priv = (void *)devinit; + struct nv04_devinit_priv *priv = (void *)devinit; struct nouveau_bios *bios = nouveau_bios(priv); struct io_mapping *fb; u32 patt = 0xdeadbeef; @@ -130,31 +125,15 @@ fbmem_fini(fb); } -static int -nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv05_devinit_priv *priv; - int ret; - - ret = nouveau_devinit_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.meminit = nv05_devinit_meminit; - priv->base.pll_set = nv04_devinit_pll_set; - return 0; -} - -struct nouveau_oclass -nv05_devinit_oclass = { - .handle = NV_SUBDEV(DEVINIT, 0x05), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv05_devinit_ctor, +struct nouveau_oclass * +nv05_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0x05), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_devinit_ctor, .dtor = nv04_devinit_dtor, .init = nv04_devinit_init, .fini = nv04_devinit_fini, }, -}; + .meminit = nv05_devinit_meminit, + .pll_set = nv04_devinit_pll_set, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c @@ -27,17 +27,12 @@ #include #include "fbmem.h" -#include "priv.h" - -struct nv10_devinit_priv { - struct nouveau_devinit base; - u8 owner; -}; +#include "nv04.h" static void nv10_devinit_meminit(struct nouveau_devinit *devinit) { - struct nv10_devinit_priv *priv = (void *)devinit; + struct nv04_devinit_priv *priv = (void *)devinit; static const int mem_width[] = { 0x10, 0x00, 0x20 }; int mem_width_count; uint32_t patt = 0xdeadbeef; @@ -101,31 +96,15 @@ fbmem_fini(fb); } -static int -nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv10_devinit_priv *priv; - int ret; - - ret = nouveau_devinit_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.meminit = nv10_devinit_meminit; - priv->base.pll_set = nv04_devinit_pll_set; - return 0; -} - -struct nouveau_oclass -nv10_devinit_oclass = { - .handle = NV_SUBDEV(DEVINIT, 0x10), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv10_devinit_ctor, +struct nouveau_oclass * +nv10_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0x10), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_devinit_ctor, .dtor = nv04_devinit_dtor, .init = nv04_devinit_init, .fini = nv04_devinit_fini, }, -}; + .meminit = nv10_devinit_meminit, + .pll_set = nv04_devinit_pll_set, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c @@ -22,37 +22,16 @@ * Authors: Ben Skeggs */ -#include "priv.h" +#include "nv04.h" -struct nv1a_devinit_priv { - struct nouveau_devinit base; - u8 owner; -}; - -static int -nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv1a_devinit_priv *priv; - int ret; - - ret = nouveau_devinit_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.pll_set = nv04_devinit_pll_set; - return 0; -} - -struct nouveau_oclass -nv1a_devinit_oclass = { - .handle = NV_SUBDEV(DEVINIT, 0x1a), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv1a_devinit_ctor, +struct nouveau_oclass * +nv1a_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0x1a), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_devinit_ctor, .dtor = nv04_devinit_dtor, .init = nv04_devinit_init, .fini = nv04_devinit_fini, }, -}; + .pll_set = nv04_devinit_pll_set, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c @@ -24,18 +24,13 @@ * */ -#include "priv.h" +#include "nv04.h" #include "fbmem.h" -struct nv20_devinit_priv { - struct nouveau_devinit base; - u8 owner; -}; - static void nv20_devinit_meminit(struct nouveau_devinit *devinit) { - struct nv20_devinit_priv *priv = (void *)devinit; + struct nv04_devinit_priv *priv = (void *)devinit; struct nouveau_device *device = nv_device(priv); uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900); uint32_t amount, off; @@ -65,31 +60,15 @@ fbmem_fini(fb); } -static int -nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) -{ - struct nv20_devinit_priv *priv; - int ret; - - ret = nouveau_devinit_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; - - priv->base.meminit = nv20_devinit_meminit; - priv->base.pll_set = nv04_devinit_pll_set; - return 0; -} - -struct nouveau_oclass -nv20_devinit_oclass = { - .handle = NV_SUBDEV(DEVINIT, 0x20), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nv20_devinit_ctor, +struct nouveau_oclass * +nv20_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0x20), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_devinit_ctor, .dtor = nv04_devinit_dtor, .init = nv04_devinit_init, .fini = nv04_devinit_fini, }, -}; + .meminit = nv20_devinit_meminit, + .pll_set = nv04_devinit_pll_set, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c @@ -28,9 +28,9 @@ #include #include -#include "priv.h" +#include "nv50.h" -static int +int nv50_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) { struct nv50_devinit_priv *priv = (void *)devinit; @@ -74,6 +74,19 @@ return 0; } +static u64 +nv50_devinit_disable(struct nouveau_devinit *devinit) +{ + struct nv50_devinit_priv *priv = (void *)devinit; + u32 r001540 = nv_rd32(priv, 0x001540); + u64 disable = 0ULL; + + if (!(r001540 & 0x40000000)) + disable |= (1ULL << NVDEV_ENGINE_MPEG); + + return disable; +} + int nv50_devinit_init(struct nouveau_object *object) { @@ -120,7 +133,7 @@ return 0; } -static int +int nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) @@ -133,17 +146,18 @@ if (ret) return ret; - priv->base.pll_set = nv50_devinit_pll_set; return 0; } -struct nouveau_oclass -nv50_devinit_oclass = { - .handle = NV_SUBDEV(DEVINIT, 0x50), - .ofuncs = &(struct nouveau_ofuncs) { +struct nouveau_oclass * +nv50_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0x50), + .base.ofuncs = &(struct nouveau_ofuncs) { .ctor = nv50_devinit_ctor, .dtor = _nouveau_devinit_dtor, .init = nv50_devinit_init, .fini = _nouveau_devinit_fini, }, -}; + .pll_set = nv50_devinit_pll_set, + .disable = nv50_devinit_disable, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h @@ -0,0 +1,18 @@ +#ifndef __NVKM_DEVINIT_NV50_H__ +#define __NVKM_DEVINIT_NV50_H__ + +#include "priv.h" + +struct nv50_devinit_priv { + struct nouveau_devinit base; +}; + +int nv50_devinit_ctor(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, void *, u32, + struct nouveau_object **); +int nv50_devinit_init(struct nouveau_object *); +int nv50_devinit_pll_set(struct nouveau_devinit *, u32, u32); + +int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32); + +#endif --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c @@ -0,0 +1,63 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nv50.h" + +static u64 +nv84_devinit_disable(struct nouveau_devinit *devinit) +{ + struct nv50_devinit_priv *priv = (void *)devinit; + u32 r001540 = nv_rd32(priv, 0x001540); + u32 r00154c = nv_rd32(priv, 0x00154c); + u64 disable = 0ULL; + + if (!(r001540 & 0x40000000)) { + disable |= (1ULL << NVDEV_ENGINE_MPEG); + disable |= (1ULL << NVDEV_ENGINE_VP); + disable |= (1ULL << NVDEV_ENGINE_BSP); + disable |= (1ULL << NVDEV_ENGINE_CRYPT); + } + + if (!(r00154c & 0x00000004)) + disable |= (1ULL << NVDEV_ENGINE_DISP); + if (!(r00154c & 0x00000020)) + disable |= (1ULL << NVDEV_ENGINE_BSP); + if (!(r00154c & 0x00000040)) + disable |= (1ULL << NVDEV_ENGINE_CRYPT); + + return disable; +} + +struct nouveau_oclass * +nv84_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0x84), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_devinit_ctor, + .dtor = _nouveau_devinit_dtor, + .init = nv50_devinit_init, + .fini = _nouveau_devinit_fini, + }, + .pll_set = nv50_devinit_pll_set, + .disable = nv84_devinit_disable, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c @@ -0,0 +1,62 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nv50.h" + +static u64 +nv98_devinit_disable(struct nouveau_devinit *devinit) +{ + struct nv50_devinit_priv *priv = (void *)devinit; + u32 r001540 = nv_rd32(priv, 0x001540); + u32 r00154c = nv_rd32(priv, 0x00154c); + u64 disable = 0ULL; + + if (!(r001540 & 0x40000000)) { + disable |= (1ULL << NVDEV_ENGINE_VP); + disable |= (1ULL << NVDEV_ENGINE_BSP); + disable |= (1ULL << NVDEV_ENGINE_PPP); + } + + if (!(r00154c & 0x00000004)) + disable |= (1ULL << NVDEV_ENGINE_DISP); + if (!(r00154c & 0x00000020)) + disable |= (1ULL << NVDEV_ENGINE_BSP); + if (!(r00154c & 0x00000040)) + disable |= (1ULL << NVDEV_ENGINE_CRYPT); + + return disable; +} + +struct nouveau_oclass * +nv98_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0x98), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_devinit_ctor, + .dtor = _nouveau_devinit_dtor, + .init = nv50_devinit_init, + .fini = _nouveau_devinit_fini, + }, + .pll_set = nv50_devinit_pll_set, + .disable = nv98_devinit_disable, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c @@ -22,12 +22,12 @@ * Authors: Ben Skeggs */ -#include "priv.h" +#include "nv50.h" -static int +int nva3_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) { - struct nva3_devinit_priv *priv = (void *)devinit; + struct nv50_devinit_priv *priv = (void *)devinit; struct nouveau_bios *bios = nouveau_bios(priv); struct nvbios_pll info; int N, fN, M, P; @@ -58,30 +58,38 @@ return ret; } -static int -nva3_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, void *data, u32 size, - struct nouveau_object **pobject) +static u64 +nva3_devinit_disable(struct nouveau_devinit *devinit) { - struct nv50_devinit_priv *priv; - int ret; + struct nv50_devinit_priv *priv = (void *)devinit; + u32 r001540 = nv_rd32(priv, 0x001540); + u32 r00154c = nv_rd32(priv, 0x00154c); + u64 disable = 0ULL; + + if (!(r001540 & 0x40000000)) { + disable |= (1ULL << NVDEV_ENGINE_VP); + disable |= (1ULL << NVDEV_ENGINE_PPP); + } - ret = nouveau_devinit_create(parent, engine, oclass, &priv); - *pobject = nv_object(priv); - if (ret) - return ret; + if (!(r00154c & 0x00000004)) + disable |= (1ULL << NVDEV_ENGINE_DISP); + if (!(r00154c & 0x00000020)) + disable |= (1ULL << NVDEV_ENGINE_BSP); + if (!(r00154c & 0x00000200)) + disable |= (1ULL << NVDEV_ENGINE_COPY0); - priv->base.pll_set = nva3_devinit_pll_set; - return 0; + return disable; } -struct nouveau_oclass -nva3_devinit_oclass = { - .handle = NV_SUBDEV(DEVINIT, 0xa3), - .ofuncs = &(struct nouveau_ofuncs) { - .ctor = nva3_devinit_ctor, +struct nouveau_oclass * +nva3_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0xa3), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_devinit_ctor, .dtor = _nouveau_devinit_dtor, .init = nv50_devinit_init, .fini = _nouveau_devinit_fini, }, -}; + .pll_set = nva3_devinit_pll_set, + .disable = nva3_devinit_disable, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c @@ -0,0 +1,63 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "nv50.h" + +static u64 +nvaf_devinit_disable(struct nouveau_devinit *devinit) +{ + struct nv50_devinit_priv *priv = (void *)devinit; + u32 r001540 = nv_rd32(priv, 0x001540); + u32 r00154c = nv_rd32(priv, 0x00154c); + u64 disable = 0; + + if (!(r001540 & 0x40000000)) { + disable |= (1ULL << NVDEV_ENGINE_VP); + disable |= (1ULL << NVDEV_ENGINE_PPP); + } + + if (!(r00154c & 0x00000004)) + disable |= (1ULL << NVDEV_ENGINE_DISP); + if (!(r00154c & 0x00000020)) + disable |= (1ULL << NVDEV_ENGINE_BSP); + if (!(r00154c & 0x00000040)) + disable |= (1ULL << NVDEV_ENGINE_VIC); + if (!(r00154c & 0x00000200)) + disable |= (1ULL << NVDEV_ENGINE_COPY0); + + return disable; +} + +struct nouveau_oclass * +nvaf_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0xaf), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv50_devinit_ctor, + .dtor = _nouveau_devinit_dtor, + .init = nv50_devinit_init, + .fini = _nouveau_devinit_fini, + }, + .pll_set = nva3_devinit_pll_set, + .disable = nvaf_devinit_disable, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c @@ -22,12 +22,12 @@ * Authors: Ben Skeggs */ -#include "priv.h" +#include "nv50.h" static int nvc0_devinit_pll_set(struct nouveau_devinit *devinit, u32 type, u32 freq) { - struct nvc0_devinit_priv *priv = (void *)devinit; + struct nv50_devinit_priv *priv = (void *)devinit; struct nouveau_bios *bios = nouveau_bios(priv); struct nvbios_pll info; int N, fN, M, P; @@ -59,6 +59,33 @@ return ret; } +static u64 +nvc0_devinit_disable(struct nouveau_devinit *devinit) +{ + struct nv50_devinit_priv *priv = (void *)devinit; + u32 r022500 = nv_rd32(priv, 0x022500); + u64 disable = 0ULL; + + if (r022500 & 0x00000001) + disable |= (1ULL << NVDEV_ENGINE_DISP); + + if (r022500 & 0x00000002) { + disable |= (1ULL << NVDEV_ENGINE_VP); + disable |= (1ULL << NVDEV_ENGINE_PPP); + } + + if (r022500 & 0x00000004) + disable |= (1ULL << NVDEV_ENGINE_BSP); + if (r022500 & 0x00000008) + disable |= (1ULL << NVDEV_ENGINE_VENC); + if (r022500 & 0x00000100) + disable |= (1ULL << NVDEV_ENGINE_COPY0); + if (r022500 & 0x00000200) + disable |= (1ULL << NVDEV_ENGINE_COPY1); + + return disable; +} + static int nvc0_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, @@ -72,19 +99,20 @@ if (ret) return ret; - priv->base.pll_set = nvc0_devinit_pll_set; if (nv_rd32(priv, 0x022500) & 0x00000001) priv->base.post = true; return 0; } -struct nouveau_oclass -nvc0_devinit_oclass = { - .handle = NV_SUBDEV(DEVINIT, 0xc0), - .ofuncs = &(struct nouveau_ofuncs) { +struct nouveau_oclass * +nvc0_devinit_oclass = &(struct nouveau_devinit_impl) { + .base.handle = NV_SUBDEV(DEVINIT, 0xc0), + .base.ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_devinit_ctor, .dtor = _nouveau_devinit_dtor, .init = nv50_devinit_init, .fini = _nouveau_devinit_fini, }, -}; + .pll_set = nvc0_devinit_pll_set, + .disable = nvc0_devinit_disable, +}.base; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h @@ -6,20 +6,30 @@ #include #include -void nv04_devinit_dtor(struct nouveau_object *); -int nv04_devinit_init(struct nouveau_object *); -int nv04_devinit_fini(struct nouveau_object *, bool); -int nv04_devinit_pll_set(struct nouveau_devinit *, u32, u32); - -void setPLL_single(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); -void setPLL_double_highregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); -void setPLL_double_lowregs(struct nouveau_devinit *, u32, struct nouveau_pll_vals *); - - -struct nv50_devinit_priv { - struct nouveau_devinit base; +struct nouveau_devinit_impl { + struct nouveau_oclass base; + void (*meminit)(struct nouveau_devinit *); + int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq); + u64 (*disable)(struct nouveau_devinit *); }; -int nv50_devinit_init(struct nouveau_object *); +#define nouveau_devinit_create(p,e,o,d) \ + nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_devinit_destroy(p) \ + nouveau_subdev_destroy(&(p)->base) +#define nouveau_devinit_init(p) ({ \ + struct nouveau_devinit *d = (p); \ + _nouveau_devinit_init(nv_object(d)); \ +}) +#define nouveau_devinit_fini(p,s) ({ \ + struct nouveau_devinit *d = (p); \ + _nouveau_devinit_fini(nv_object(d), (s)); \ +}) + +int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *, + struct nouveau_oclass *, int, void **); +#define _nouveau_devinit_dtor _nouveau_subdev_dtor +int _nouveau_devinit_init(struct nouveau_object *); +int _nouveau_devinit_fini(struct nouveau_object *, bool suspend); #endif --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c @@ -36,7 +36,7 @@ .fini = _nouveau_fb_fini, }, .base.memtype = nv04_fb_memtype_valid, - .base.ram = &nv10_ram_oclass, + .base.ram = &nv1a_ram_oclass, .tile.regions = 8, .tile.init = nv10_fb_tile_init, .tile.fini = nv10_fb_tile_fini, --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c @@ -45,6 +45,7 @@ if (priv->r100c10_page) nv_wr32(priv, 0x100c10, priv->r100c10 >> 8); + nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */ return 0; } --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c @@ -26,6 +26,7 @@ const struct nouveau_mc_intr nv50_mc_intr[] = { + { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */ { 0x00000001, NVDEV_ENGINE_MPEG }, { 0x00000100, NVDEV_ENGINE_FIFO }, { 0x00001000, NVDEV_ENGINE_GR }, @@ -34,7 +35,6 @@ { 0x00020000, NVDEV_ENGINE_VP }, /* NV84- */ { 0x00100000, NVDEV_SUBDEV_TIMER }, { 0x00200000, NVDEV_SUBDEV_GPIO }, - { 0x04000000, NVDEV_ENGINE_DISP }, { 0x10000000, NVDEV_SUBDEV_BUS }, { 0x80000000, NVDEV_ENGINE_SW }, { 0x0002d101, NVDEV_SUBDEV_FB }, --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c @@ -26,6 +26,7 @@ static const struct nouveau_mc_intr nv98_mc_intr[] = { + { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */ { 0x00000001, NVDEV_ENGINE_PPP }, { 0x00000100, NVDEV_ENGINE_FIFO }, { 0x00001000, NVDEV_ENGINE_GR }, @@ -37,7 +38,6 @@ { 0x00100000, NVDEV_SUBDEV_TIMER }, { 0x00200000, NVDEV_SUBDEV_GPIO }, { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */ - { 0x04000000, NVDEV_ENGINE_DISP }, { 0x10000000, NVDEV_SUBDEV_BUS }, { 0x80000000, NVDEV_ENGINE_SW }, { 0x0042d101, NVDEV_SUBDEV_FB }, --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c @@ -26,6 +26,7 @@ const struct nouveau_mc_intr nvc0_mc_intr[] = { + { 0x04000000, NVDEV_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */ { 0x00000001, NVDEV_ENGINE_PPP }, { 0x00000020, NVDEV_ENGINE_COPY0 }, { 0x00000040, NVDEV_ENGINE_COPY1 }, @@ -39,7 +40,6 @@ { 0x00200000, NVDEV_SUBDEV_GPIO }, { 0x01000000, NVDEV_SUBDEV_PWR }, { 0x02000000, NVDEV_SUBDEV_LTCG }, - { 0x04000000, NVDEV_ENGINE_DISP }, { 0x10000000, NVDEV_SUBDEV_BUS }, { 0x40000000, NVDEV_SUBDEV_IBUS }, { 0x80000000, NVDEV_ENGINE_SW }, --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -423,9 +423,6 @@ acpi_status status; acpi_handle dhandle, rom_handle; - if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) - return false; - dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/nouveau_bios.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -352,7 +352,7 @@ /* Apple cards don't have the fp table; the laptops use DDC */ /* The table is also missing on some x86 IGPs */ #ifndef __powerpc__ - NV_ERROR(drm, "Pointer to flat panel table invalid\n"); + NV_WARN(drm, "Pointer to flat panel table invalid\n"); #endif bios->digital_min_front_porch = 0x4b; return 0; @@ -936,7 +936,7 @@ tmdstableptr = ROM16(bios->data[bitentry->offset]); if (!tmdstableptr) { - NV_ERROR(drm, "Pointer to TMDS table invalid\n"); + NV_WARN(drm, "Pointer to TMDS table invalid\n"); return -EINVAL; } --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/nouveau_bo.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -798,25 +798,25 @@ struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { struct nouveau_mem *node = old_mem->mm_node; - struct nouveau_bo *nvbo = nouveau_bo(bo); u64 length = (new_mem->num_pages << PAGE_SHIFT); u64 src_offset = node->vma[0].offset; u64 dst_offset = node->vma[1].offset; + int src_tiled = !!node->memtype; + int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype; int ret; while (length) { u32 amount, stride, height; + ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled)); + if (ret) + return ret; + amount = min(length, (u64)(4 * 1024 * 1024)); stride = 16 * 4; height = amount / stride; - if (old_mem->mem_type == TTM_PL_VRAM && - nouveau_bo_tile_layout(nvbo)) { - ret = RING_SPACE(chan, 8); - if (ret) - return ret; - + if (src_tiled) { BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); OUT_RING (chan, 0); OUT_RING (chan, 0); @@ -826,19 +826,10 @@ OUT_RING (chan, 0); OUT_RING (chan, 0); } else { - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); OUT_RING (chan, 1); } - if (new_mem->mem_type == TTM_PL_VRAM && - nouveau_bo_tile_layout(nvbo)) { - ret = RING_SPACE(chan, 8); - if (ret) - return ret; - + if (dst_tiled) { BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); OUT_RING (chan, 0); OUT_RING (chan, 0); @@ -848,18 +839,10 @@ OUT_RING (chan, 0); OUT_RING (chan, 0); } else { - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); OUT_RING (chan, 1); } - ret = RING_SPACE(chan, 14); - if (ret) - return ret; - BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); OUT_RING (chan, upper_32_bits(src_offset)); OUT_RING (chan, upper_32_bits(dst_offset)); --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/nouveau_display.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/nouveau_display.c @@ -603,6 +603,14 @@ if (!s) return -ENOMEM; + if (new_bo != old_bo) { + ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); + if (ret) + goto fail_free; + } + + mutex_lock(&chan->cli->mutex); + /* synchronise rendering channel with the kernel's channel */ spin_lock(&new_bo->bo.bdev->fence_lock); fence = nouveau_fence_ref(new_bo->bo.sync_obj); @@ -610,15 +618,8 @@ ret = nouveau_fence_sync(fence, chan); nouveau_fence_unref(&fence); if (ret) - goto fail_free; - - if (new_bo != old_bo) { - ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); - if (ret) - goto fail_free; - } + goto fail_unpin; - mutex_lock(&chan->cli->mutex); ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); if (ret) goto fail_unpin; @@ -629,6 +630,9 @@ fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y, new_bo->bo.offset }; + /* Keep vblanks on during flip, for the target crtc of this flip */ + drm_vblank_get(dev, nouveau_crtc(crtc)->index); + /* Emit a page flip */ if (nv_device(drm->device)->card_type >= NV_50) { ret = nv50_display_flip_next(crtc, fb, chan, swap_interval); @@ -657,9 +661,9 @@ } ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); - mutex_unlock(&chan->cli->mutex); if (ret) goto fail_unreserve; + mutex_unlock(&chan->cli->mutex); /* Update the crtc struct and cleanup */ crtc->fb = fb; @@ -672,6 +676,7 @@ return 0; fail_unreserve: + drm_vblank_put(dev, nouveau_crtc(crtc)->index); ttm_bo_unreserve(&old_bo->bo); fail_unpin: mutex_unlock(&chan->cli->mutex); @@ -691,6 +696,7 @@ struct drm_device *dev = drm->dev; struct nouveau_page_flip_state *s; unsigned long flags; + int crtcid = -1; spin_lock_irqsave(&dev->event_lock, flags); @@ -701,8 +707,16 @@ } s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); - if (s->event) - drm_send_vblank_event(dev, s->crtc, s->event); + if (s->event) { + /* Vblank timestamps/counts are only correct on >= NV-50 */ + if (nv_device(drm->device)->card_type >= NV_50) + crtcid = s->crtc; + + drm_send_vblank_event(dev, crtcid, s->event); + } + + /* Give up ownership of vblank for page-flipped crtc */ + drm_vblank_put(dev, s->crtc); list_del(&s->head); if (ps) --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/nouveau_drm.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -376,6 +376,8 @@ if (ret) goto fail_device; + dev->irq_enabled = true; + /* workaround an odd issue on nvc1 by disabling the device's * nosnoop capability. hopefully won't cause issues until a * better fix is found - assuming there is one... @@ -475,6 +477,7 @@ struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_object *device; + dev->irq_enabled = false; device = drm->client.base.device; drm_put_dev(dev); @@ -483,13 +486,13 @@ } static int -nouveau_do_suspend(struct drm_device *dev) +nouveau_do_suspend(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; int ret; - if (dev->mode_config.num_crtc) { + if (dev->mode_config.num_crtc && !runtime) { NV_INFO(drm, "suspending display...\n"); ret = nouveau_display_suspend(dev); if (ret) @@ -557,7 +560,7 @@ if (drm_dev->mode_config.num_crtc) nouveau_fbcon_set_suspend(drm_dev, 1); - ret = nouveau_do_suspend(drm_dev); + ret = nouveau_do_suspend(drm_dev, false); if (ret) return ret; @@ -637,7 +640,7 @@ if (drm_dev->mode_config.num_crtc) nouveau_fbcon_set_suspend(drm_dev, 1); - ret = nouveau_do_suspend(drm_dev); + ret = nouveau_do_suspend(drm_dev, false); return ret; } @@ -855,20 +858,23 @@ struct drm_device *drm_dev = pci_get_drvdata(pdev); int ret; - if (nouveau_runtime_pm == 0) - return -EINVAL; + if (nouveau_runtime_pm == 0) { + pm_runtime_forbid(dev); + return -EBUSY; + } /* are we optimus enabled? */ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); - return -EINVAL; + pm_runtime_forbid(dev); + return -EBUSY; } nv_debug_level(SILENT); drm_kms_helper_poll_disable(drm_dev); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); nouveau_switcheroo_optimus_dsm(); - ret = nouveau_do_suspend(drm_dev); + ret = nouveau_do_suspend(drm_dev, true); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3cold); @@ -894,8 +900,6 @@ pci_set_master(pdev); ret = nouveau_do_resume(drm_dev); - if (drm_dev->mode_config.num_crtc) - nouveau_display_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev); /* do magic */ nv_mask(device, 0x88488, (1 << 25), (1 << 25)); @@ -912,12 +916,15 @@ struct nouveau_drm *drm = nouveau_drm(drm_dev); struct drm_crtc *crtc; - if (nouveau_runtime_pm == 0) + if (nouveau_runtime_pm == 0) { + pm_runtime_forbid(dev); return -EBUSY; + } /* are we optimus enabled? */ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); + pm_runtime_forbid(dev); return -EBUSY; } --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/nouveau_drm.h +++ linux-3.13.0/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -10,7 +10,7 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_PATCHLEVEL 2 /* * 1.1.1: @@ -21,6 +21,8 @@ * to control registers on the MPs to enable performance counters, * and to control the warp error enable mask (OpenGL requires out of * bounds access to local memory to be silently ignored / return 0). + * 1.1.2: + * - fixes multiple bugs in flip completion events and timestamping */ #include --- linux-3.13.0.orig/drivers/gpu/drm/nouveau/nouveau_vga.c +++ linux-3.13.0/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -98,7 +98,16 @@ nouveau_vga_fini(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; + bool runtime = false; + + if (nouveau_runtime_pm == 1) + runtime = true; + if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) + runtime = true; + vga_switcheroo_unregister_client(dev->pdev); + if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) + vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); vga_client_register(dev->pdev, NULL, NULL, NULL); } --- linux-3.13.0.orig/drivers/gpu/drm/qxl/qxl_display.c +++ linux-3.13.0/drivers/gpu/drm/qxl/qxl_display.c @@ -516,7 +516,6 @@ struct qxl_framebuffer *qfb; struct qxl_bo *bo, *old_bo = NULL; struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); - uint32_t width, height, base_offset; bool recreate_primary = false; int ret; int surf_id; @@ -546,9 +545,10 @@ if (qcrtc->index == 0) recreate_primary = true; - width = mode->hdisplay; - height = mode->vdisplay; - base_offset = 0; + if (bo->surf.stride * bo->surf.height > qdev->vram_size) { + DRM_ERROR("Mode doesn't fit in vram size (vgamem)"); + return -EINVAL; + } ret = qxl_bo_reserve(bo, false); if (ret != 0) @@ -562,10 +562,10 @@ if (recreate_primary) { qxl_io_destroy_primary(qdev); qxl_io_log(qdev, - "recreate primary: %dx%d (was %dx%d,%d,%d)\n", - width, height, bo->surf.width, - bo->surf.height, bo->surf.stride, bo->surf.format); - qxl_io_create_primary(qdev, base_offset, bo); + "recreate primary: %dx%d,%d,%d\n", + bo->surf.width, bo->surf.height, + bo->surf.stride, bo->surf.format); + qxl_io_create_primary(qdev, 0, bo); bo->is_primary = true; surf_id = 0; } else { --- linux-3.13.0.orig/drivers/gpu/drm/qxl/qxl_irq.c +++ linux-3.13.0/drivers/gpu/drm/qxl/qxl_irq.c @@ -33,6 +33,9 @@ pending = xchg(&qdev->ram_header->int_pending, 0); + if (!pending) + return IRQ_NONE; + atomic_inc(&qdev->irq_received); if (pending & QXL_INTERRUPT_DISPLAY) { --- linux-3.13.0.orig/drivers/gpu/drm/qxl/qxl_ttm.c +++ linux-3.13.0/drivers/gpu/drm/qxl/qxl_ttm.c @@ -433,6 +433,7 @@ static void qxl_sync_obj_unref(void **sync_obj) { + *sync_obj = NULL; } static void *qxl_sync_obj_ref(void *sync_obj) --- linux-3.13.0.orig/drivers/gpu/drm/radeon/atombios_crtc.c +++ linux-3.13.0/drivers/gpu/drm/radeon/atombios_crtc.c @@ -209,6 +209,16 @@ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +static const u32 vga_control_regs[6] = +{ + AVIVO_D1VGA_CONTROL, + AVIVO_D2VGA_CONTROL, + EVERGREEN_D3VGA_CONTROL, + EVERGREEN_D4VGA_CONTROL, + EVERGREEN_D5VGA_CONTROL, + EVERGREEN_D6VGA_CONTROL, +}; + static void atombios_blank_crtc(struct drm_crtc *crtc, int state) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -216,13 +226,23 @@ struct radeon_device *rdev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); BLANK_CRTC_PS_ALLOCATION args; + u32 vga_control = 0; memset(&args, 0, sizeof(args)); + if (ASIC_IS_DCE8(rdev)) { + vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]); + WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1); + } + args.ucCRTC = radeon_crtc->crtc_id; args.ucBlanking = state; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + if (ASIC_IS_DCE8(rdev)) { + WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control); + } } static void atombios_powergate_crtc(struct drm_crtc *crtc, int state) @@ -250,8 +270,6 @@ switch (mode) { case DRM_MODE_DPMS_ON: radeon_crtc->enabled = true; - /* adjust pm to dpms changes BEFORE enabling crtcs */ - radeon_pm_compute_clocks(rdev); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); @@ -269,10 +287,10 @@ atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - /* adjust pm to dpms changes AFTER disabling crtcs */ - radeon_pm_compute_clocks(rdev); break; } + /* adjust pm to dpms */ + radeon_pm_compute_clocks(rdev); } static void @@ -839,14 +857,16 @@ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; - switch (bpc) { - case 8: - default: - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; - break; - case 10: - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; - break; + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + switch (bpc) { + case 8: + default: + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; + break; + case 10: + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; + break; + } } args.v5.ucTransmitterID = encoder_id; args.v5.ucEncoderMode = encoder_mode; @@ -861,20 +881,22 @@ args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; - switch (bpc) { - case 8: - default: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; - break; - case 10: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; - break; - case 12: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; - break; - case 16: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; - break; + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + switch (bpc) { + case 8: + default: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; + break; + case 10: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; + break; + case 12: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; + break; + case 16: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; + break; + } } args.v6.ucTransmitterID = encoder_id; args.v6.ucEncoderMode = encoder_mode; @@ -938,11 +960,14 @@ radeon_atombios_get_ppll_ss_info(rdev, &radeon_crtc->ss, ATOM_DP_SS_ID1); - } else + } else { radeon_crtc->ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &radeon_crtc->ss, ATOM_DP_SS_ID1); + } + /* disable spread spectrum on DCE3 DP */ + radeon_crtc->ss_enabled = false; } break; case ATOM_ENCODER_MODE_LVDS: @@ -1146,7 +1171,7 @@ evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); /* Set NUM_BANKS. */ - if (rdev->family >= CHIP_BONAIRE) { + if (rdev->family >= CHIP_TAHITI) { unsigned tileb, index, num_banks, tile_split_bytes; /* Calculate the macrotile mode index. */ @@ -1164,13 +1189,14 @@ return -EINVAL; } - num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; + if (rdev->family >= CHIP_BONAIRE) + num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; + else + num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); } else { - /* SI and older. */ - if (rdev->family >= CHIP_TAHITI) - tmp = rdev->config.si.tile_config; - else if (rdev->family >= CHIP_CAYMAN) + /* NI and older. */ + if (rdev->family >= CHIP_CAYMAN) tmp = rdev->config.cayman.tile_config; else tmp = rdev->config.evergreen.tile_config; @@ -1273,6 +1299,9 @@ (x << 16) | y); viewport_w = crtc->mode.hdisplay; viewport_h = (crtc->mode.vdisplay + 1) & ~1; + if ((rdev->family >= CHIP_BONAIRE) && + (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) + viewport_h *= 2; WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); @@ -1688,9 +1717,8 @@ return pll; } /* otherwise, pick one of the plls */ - if ((rdev->family == CHIP_KAVERI) || - (rdev->family == CHIP_KABINI)) { - /* KB/KV has PPLL1 and PPLL2 */ + if ((rdev->family == CHIP_KABINI)) { + /* KB has PPLL1 and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -1699,7 +1727,7 @@ DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { - /* CI has PPLL0, PPLL1, and PPLL2 */ + /* CI/KV has PPLL0, PPLL1, and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -1743,6 +1771,20 @@ return ATOM_PPLL1; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; + } else if (ASIC_IS_DCE41(rdev)) { + /* Don't share PLLs on DCE4.1 chips */ + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { + if (rdev->clock.dp_extclk) + /* skip PPLL programming if using ext clock */ + return ATOM_PPLL_INVALID; + } + pll_in_use = radeon_get_pll_use_mask(crtc); + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; } else if (ASIC_IS_DCE4(rdev)) { /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, * depending on the asic: @@ -1770,7 +1812,7 @@ if (pll != ATOM_PPLL_INVALID) return pll; } - } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */ + } else { /* use the same PPLL for all monitors with the same clock */ pll = radeon_get_shared_nondp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) @@ -1973,6 +2015,7 @@ case ATOM_PPLL0: /* disable the ppll */ if ((rdev->family == CHIP_ARUBA) || + (rdev->family == CHIP_KAVERI) || (rdev->family == CHIP_BONAIRE) || (rdev->family == CHIP_HAWAII)) atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, --- linux-3.13.0.orig/drivers/gpu/drm/radeon/atombios_dp.c +++ linux-3.13.0/drivers/gpu/drm/radeon/atombios_dp.c @@ -123,7 +123,7 @@ /* flags not zero */ if (args.v1.ucReplyStatus == 2) { DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); - return -EBUSY; + return -EIO; } /* error */ @@ -384,6 +384,19 @@ /***** radeon specific DP functions *****/ +static int radeon_dp_get_max_link_rate(struct drm_connector *connector, + u8 dpcd[DP_DPCD_SIZE]) +{ + int max_link_rate; + + if (radeon_connector_is_dp12_capable(connector)) + max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000); + else + max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000); + + return max_link_rate; +} + /* First get the min lane# when low rate is used according to pixel clock * (prefer low rate), second check max lane# supported by DP panel, * if the max lane# < low rate lane# then use max lane# instead. @@ -393,7 +406,7 @@ int pix_clock) { int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); - int max_link_rate = drm_dp_max_link_rate(dpcd); + int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd); int max_lane_num = drm_dp_max_lane_count(dpcd); int lane_num; int max_dp_pix_clock; @@ -431,7 +444,7 @@ return 540000; } - return drm_dp_max_link_rate(dpcd); + return radeon_dp_get_max_link_rate(connector, dpcd); } static u8 radeon_dp_encoder_service(struct radeon_device *rdev, @@ -561,6 +574,10 @@ struct radeon_connector_atom_dig *dig_connector; int dp_clock; + if ((mode->clock > 340000) && + (!radeon_connector_is_dp12_capable(connector))) + return MODE_CLOCK_HIGH; + if (!radeon_connector->con_priv) return MODE_CLOCK_HIGH; dig_connector = radeon_connector->con_priv; @@ -683,10 +700,8 @@ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_DOWNSPREAD_CTRL, 0); - if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) && - (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { + if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE) radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1); - } /* set the lane count on the sink */ tmp = dp_info->dp_lane_count; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/atombios_encoders.c +++ linux-3.13.0/drivers/gpu/drm/radeon/atombios_encoders.c @@ -183,7 +183,6 @@ struct backlight_properties props; struct radeon_backlight_privdata *pdata; struct radeon_encoder_atom_dig *dig; - u8 backlight_level; char bl_name[16]; /* Mac laptops with multiple GPUs use the gmux driver for backlight @@ -222,12 +221,17 @@ pdata->encoder = radeon_encoder; - backlight_level = radeon_atom_get_backlight_level_from_reg(rdev); - dig = radeon_encoder->enc_priv; dig->bl_dev = bd; bd->props.brightness = radeon_atom_backlight_get_brightness(bd); + /* Set a reasonable default here if the level is 0 otherwise + * fbdev will attempt to turn the backlight on after console + * unblanking and it will try and restore 0 which turns the backlight + * off again. + */ + if (bd->props.brightness == 0) + bd->props.brightness = RADEON_MAX_BL_LEVEL; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); @@ -1313,7 +1317,7 @@ } if (is_dp) args.v5.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v5.ucLaneNum = 8; else args.v5.ucLaneNum = 4; @@ -1897,8 +1901,11 @@ args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; else args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); - } else + } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; + } else { args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: --- linux-3.13.0.orig/drivers/gpu/drm/radeon/ci_dpm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/ci_dpm.c @@ -21,8 +21,10 @@ * */ +#include #include "drmP.h" #include "radeon.h" +#include "radeon_ucode.h" #include "cikd.h" #include "r600_dpm.h" #include "ci_dpm.h" @@ -850,6 +852,9 @@ WREG32_SMC(CG_THERMAL_CTRL, tmp); #endif + rdev->pm.dpm.thermal.min_temp = low_temp; + rdev->pm.dpm.thermal.max_temp = high_temp; + return 0; } @@ -921,7 +926,18 @@ pi->vddc_leakage.count = 0; pi->vddci_leakage.count = 0; - if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { + if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { + for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0) + continue; + if (vddc != 0 && vddc != virtual_voltage_id) { + pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; + pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; + pi->vddc_leakage.count++; + } + } + } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) { for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci, @@ -1160,7 +1176,7 @@ tmp &= ~GLOBAL_PWRMGT_EN; WREG32_SMC(GENERAL_PWRMGT, tmp); - tmp = RREG32(SCLK_PWRMGT_CNTL); + tmp = RREG32_SMC(SCLK_PWRMGT_CNTL); tmp &= ~DYNAMIC_PM_EN; WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); @@ -4725,7 +4741,7 @@ ci_enable_spread_spectrum(rdev, false); ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); ci_stop_dpm(rdev); - ci_enable_ds_master_switch(rdev, true); + ci_enable_ds_master_switch(rdev, false); ci_enable_ulv(rdev, false); ci_clear_vc(rdev); ci_reset_to_default(rdev); @@ -5128,6 +5144,12 @@ pi->mclk_dpm_key_disabled = 0; pi->pcie_dpm_key_disabled = 0; + /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ + if ((rdev->pdev->device == 0x6658) && + (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) { + pi->mclk_dpm_key_disabled = 1; + } + pi->caps_sclk_ds = true; pi->mclk_strobe_mode_threshold = 40000; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/cik.c +++ linux-3.13.0/drivers/gpu/drm/radeon/cik.c @@ -38,6 +38,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_ce.bin"); MODULE_FIRMWARE("radeon/BONAIRE_mec.bin"); MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); +MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin"); MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); @@ -46,6 +47,7 @@ MODULE_FIRMWARE("radeon/HAWAII_ce.bin"); MODULE_FIRMWARE("radeon/HAWAII_mec.bin"); MODULE_FIRMWARE("radeon/HAWAII_mc.bin"); +MODULE_FIRMWARE("radeon/HAWAII_mc2.bin"); MODULE_FIRMWARE("radeon/HAWAII_rlc.bin"); MODULE_FIRMWARE("radeon/HAWAII_sdma.bin"); MODULE_FIRMWARE("radeon/HAWAII_smc.bin"); @@ -1095,7 +1097,7 @@ 0x8a14, 0xf000003f, 0x00000007, 0x8b24, 0xffffffff, 0x00ffffff, 0x28350, 0x3f3f3fff, 0x00000082, - 0x28355, 0x0000003f, 0x00000000, + 0x28354, 0x0000003f, 0x00000000, 0x3e78, 0x00000001, 0x00000002, 0x913c, 0xffff03df, 0x00000004, 0xc768, 0x00000008, 0x00000008, @@ -1702,20 +1704,20 @@ const __be32 *fw_data; u32 running, blackout = 0; u32 *io_mc_regs; - int i, ucode_size, regs_size; + int i, regs_size, ucode_size; if (!rdev->mc_fw) return -EINVAL; + ucode_size = rdev->mc_fw->size / 4; + switch (rdev->family) { case CHIP_BONAIRE: io_mc_regs = (u32 *)&bonaire_io_mc_regs; - ucode_size = CIK_MC_UCODE_SIZE; regs_size = BONAIRE_IO_MC_REGS_SIZE; break; case CHIP_HAWAII: io_mc_regs = (u32 *)&hawaii_io_mc_regs; - ucode_size = HAWAII_MC_UCODE_SIZE; regs_size = HAWAII_IO_MC_REGS_SIZE; break; default: @@ -1782,7 +1784,7 @@ const char *chip_name; size_t pfp_req_size, me_req_size, ce_req_size, mec_req_size, rlc_req_size, mc_req_size = 0, - sdma_req_size, smc_req_size = 0; + sdma_req_size, smc_req_size = 0, mc2_req_size = 0; char fw_name[30]; int err; @@ -1796,7 +1798,8 @@ ce_req_size = CIK_CE_UCODE_SIZE * 4; mec_req_size = CIK_MEC_UCODE_SIZE * 4; rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; - mc_req_size = CIK_MC_UCODE_SIZE * 4; + mc_req_size = BONAIRE_MC_UCODE_SIZE * 4; + mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); break; @@ -1808,6 +1811,7 @@ mec_req_size = CIK_MEC_UCODE_SIZE * 4; rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; mc_req_size = HAWAII_MC_UCODE_SIZE * 4; + mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4); break; @@ -1903,16 +1907,22 @@ /* No SMC, MC ucode on APUs */ if (!(rdev->flags & RADEON_IS_IGP)) { - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->mc_fw->size != mc_req_size) { + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) + goto out; + } + if ((rdev->mc_fw->size != mc_req_size) && + (rdev->mc_fw->size != mc2_req_size)){ printk(KERN_ERR "cik_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); err = -EINVAL; } + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); @@ -2209,6 +2219,7 @@ gb_tile_moden = 0; break; } + rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if (num_pipe_configs == 8) { @@ -3220,6 +3231,7 @@ (rdev->pdev->device == 0x130B) || (rdev->pdev->device == 0x130E) || (rdev->pdev->device == 0x1315) || + (rdev->pdev->device == 0x1318) || (rdev->pdev->device == 0x131B)) { rdev->config.cik.max_cu_per_sh = 4; rdev->config.cik.max_backends_per_se = 1; @@ -3501,7 +3513,21 @@ struct radeon_ring *ring = &rdev->ring[fence->ring]; u64 addr = rdev->fence_drv[fence->ring].gpu_addr; - /* EVENT_WRITE_EOP - flush caches, send int */ + /* Workaround for cache flush problems. First send a dummy EOP + * event down the pipe with seq one below. + */ + radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); + radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | + EVENT_INDEX(5))); + radeon_ring_write(ring, addr & 0xfffffffc); + radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | + DATA_SEL(1) | INT_SEL(0)); + radeon_ring_write(ring, fence->seq - 1); + radeon_ring_write(ring, 0); + + /* Then send the real EOP event down the pipe. */ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | @@ -3566,8 +3592,6 @@ struct radeon_semaphore *semaphore, bool emit_wait) { -/* TODO: figure out why semaphore cause lockups */ -#if 0 uint64_t addr = semaphore->gpu_addr; unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; @@ -3576,9 +3600,6 @@ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); return true; -#else - return false; -#endif } /** @@ -3898,8 +3919,8 @@ /* init the CE partitions. CE only used for gfx on CIK */ radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); - radeon_ring_write(ring, 0xc000); - radeon_ring_write(ring, 0xc000); + radeon_ring_write(ring, 0x8000); + radeon_ring_write(ring, 0x8000); /* setup clear context state */ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); @@ -4074,8 +4095,11 @@ { if (enable) WREG32(CP_MEC_CNTL, 0); - else + else { WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); + rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; + } udelay(50); } @@ -4317,7 +4341,7 @@ */ static int cik_cp_compute_resume(struct radeon_device *rdev) { - int r, i, idx; + int r, i, j, idx; u32 tmp; bool use_doorbell = true; u64 hqd_gpu_addr; @@ -4436,7 +4460,7 @@ mqd->queue_state.cp_hqd_pq_wptr= 0; if (RREG32(CP_HQD_ACTIVE) & 1) { WREG32(CP_HQD_DEQUEUE_REQUEST, 1); - for (i = 0; i < rdev->usec_timeout; i++) { + for (j = 0; j < rdev->usec_timeout; j++) { if (!(RREG32(CP_HQD_ACTIVE) & 1)) break; udelay(1); @@ -5323,12 +5347,13 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) { struct radeon_ring *ring = &rdev->ring[ridx]; + int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX); if (vm == NULL) return; radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | WRITE_DATA_DST_SEL(0))); if (vm->id < 8) { radeon_ring_write(ring, @@ -5386,8 +5411,19 @@ radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm->id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ + WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* compute doesn't have PFP */ - if (ridx == RADEON_RING_TYPE_GFX_INDEX) { + if (usepfp) { /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); @@ -5665,6 +5701,7 @@ } orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); + data |= 0x00000001; data &= 0xfffffffd; if (orig != data) WREG32(RLC_CGTT_MGCG_OVERRIDE, data); @@ -5696,7 +5733,7 @@ } } else { orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); - data |= 0x00000002; + data |= 0x00000003; if (orig != data) WREG32(RLC_CGTT_MGCG_OVERRIDE, data); @@ -6320,8 +6357,8 @@ buffer[count++] = cpu_to_le32(0x00000000); break; case CHIP_HAWAII: - buffer[count++] = 0x3a00161a; - buffer[count++] = 0x0000002e; + buffer[count++] = cpu_to_le32(0x3a00161a); + buffer[count++] = cpu_to_le32(0x0000002e); break; default: buffer[count++] = cpu_to_le32(0x00000000); @@ -6461,6 +6498,19 @@ WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } + /* pflip */ + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } /* dac hotplug */ WREG32(DAC_AUTODETECT_INT_CONTROL, 0); @@ -6581,7 +6631,6 @@ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; u32 dma_cntl, dma_cntl1; - u32 thermal_int; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -6618,13 +6667,6 @@ cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; - if (rdev->flags & RADEON_IS_IGP) - thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & - ~(THERM_INTH_MASK | THERM_INTL_MASK); - else - thermal_int = RREG32_SMC(CG_THERMAL_INT) & - ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); - /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("cik_irq_set: sw int gfx\n"); @@ -6782,14 +6824,6 @@ hpd6 |= DC_HPDx_INT_EN; } - if (rdev->irq.dpm_thermal) { - DRM_DEBUG("dpm thermal\n"); - if (rdev->flags & RADEON_IS_IGP) - thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; - else - thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; - } - WREG32(CP_INT_CNTL_RING0, cp_int_cntl); WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); @@ -6817,6 +6851,25 @@ WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD3_INT_CONTROL, hpd3); @@ -6824,10 +6877,8 @@ WREG32(DC_HPD5_INT_CONTROL, hpd5); WREG32(DC_HPD6_INT_CONTROL, hpd6); - if (rdev->flags & RADEON_IS_IGP) - WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); - else - WREG32_SMC(CG_THERMAL_INT, thermal_int); + /* posting read */ + RREG32(SRBM_STATUS); return 0; } @@ -6853,6 +6904,29 @@ rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); + rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC0_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC1_REGISTER_OFFSET); + if (rdev->num_crtc >= 4) { + rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC2_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC3_REGISTER_OFFSET); + } + if (rdev->num_crtc >= 6) { + rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC4_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC5_REGISTER_OFFSET); + } + + if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) @@ -6863,6 +6937,12 @@ WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); if (rdev->num_crtc >= 4) { + if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) @@ -6874,6 +6954,12 @@ } if (rdev->num_crtc >= 6) { + if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) @@ -6992,6 +7078,7 @@ tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -7225,6 +7312,15 @@ break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: @@ -7489,6 +7585,7 @@ static int cik_startup(struct radeon_device *rdev) { struct radeon_ring *ring; + u32 nop; int r; /* enable pcie gen2/3 link */ @@ -7503,26 +7600,7 @@ cik_mc_program(rdev); - if (rdev->flags & RADEON_IS_IGP) { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || - !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { - r = cik_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - } else { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || - !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw || - !rdev->mc_fw) { - r = cik_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - + if (!(rdev->flags & RADEON_IS_IGP)) { r = ci_mc_load_microcode(rdev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); @@ -7625,10 +7703,16 @@ } cik_irq_set(rdev); + if (rdev->family == CHIP_HAWAII) { + nop = RADEON_CP_PACKET2; + } else { + nop = PACKET3(PACKET3_NOP, 0x3FFF); + } + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, CP_RB0_RPTR, CP_RB0_WPTR, - PACKET3(PACKET3_NOP, 0x3FFF)); + nop); if (r) return r; @@ -7637,7 +7721,7 @@ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET, CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, - PACKET3(PACKET3_NOP, 0x3FFF)); + nop); if (r) return r; ring->me = 1; /* first MEC */ @@ -7649,7 +7733,7 @@ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET, CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR, - PACKET3(PACKET3_NOP, 0x3FFF)); + nop); if (r) return r; /* dGPU only have 1 MEC */ @@ -7835,6 +7919,27 @@ if (r) return r; + if (rdev->flags & RADEON_IS_IGP) { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || + !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { + r = cik_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } else { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || + !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw || + !rdev->mc_fw) { + r = cik_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); @@ -8593,6 +8698,9 @@ u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { --- linux-3.13.0.orig/drivers/gpu/drm/radeon/cik_sdma.c +++ linux-3.13.0/drivers/gpu/drm/radeon/cik_sdma.c @@ -88,6 +88,35 @@ } /** + * cik_sdma_hdp_flush_ring_emit - emit an hdp flush on the DMA ring + * + * @rdev: radeon_device pointer + * @ridx: radeon ring index + * + * Emit an hdp flush packet on the requested DMA ring. + */ +static void cik_sdma_hdp_flush_ring_emit(struct radeon_device *rdev, + int ridx) +{ + struct radeon_ring *ring = &rdev->ring[ridx]; + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ + u32 ref_and_mask; + + if (ridx == R600_RING_TYPE_DMA_INDEX) + ref_and_mask = SDMA0; + else + ref_and_mask = SDMA1; + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); + radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); + radeon_ring_write(ring, ref_and_mask); /* reference */ + radeon_ring_write(ring, ref_and_mask); /* mask */ + radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ +} + +/** * cik_sdma_fence_ring_emit - emit a fence on the DMA ring * * @rdev: radeon_device pointer @@ -111,12 +140,7 @@ /* generate an interrupt */ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); /* flush HDP */ - /* We should be using the new POLL_REG_MEM special op packet here - * but it causes sDMA to hang sometimes - */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); - radeon_ring_write(ring, 0); + cik_sdma_hdp_flush_ring_emit(rdev, fence->ring); } /** @@ -169,6 +193,8 @@ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); } + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; } /** @@ -196,6 +222,11 @@ u32 me_cntl, reg_offset; int i; + if (enable == false) { + cik_sdma_gfx_stop(rdev); + cik_sdma_rlc_stop(rdev); + } + for (i = 0; i < 2; i++) { if (i == 0) reg_offset = SDMA0_REGISTER_OFFSET; @@ -323,10 +354,6 @@ if (!rdev->sdma_fw) return -EINVAL; - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); - /* halt the MEs */ cik_sdma_enable(rdev, false); @@ -361,13 +388,6 @@ { int r; - /* Reset dma */ - WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - RREG32(SRBM_SOFT_RESET); - r = cik_sdma_load_microcode(rdev); if (r) return r; @@ -395,9 +415,6 @@ */ void cik_sdma_fini(struct radeon_device *rdev) { - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); /* halt the MEs */ cik_sdma_enable(rdev, false); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); @@ -491,31 +508,34 @@ { unsigned i; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + unsigned index; u32 tmp; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; + + gpu_addr = rdev->wb.gpu_addr + index; tmp = 0xCAFEDEAD; - writel(tmp, ptr); + rdev->wb.wb[index/4] = cpu_to_le32(tmp); - r = radeon_ring_lock(rdev, ring, 4); + r = radeon_ring_lock(rdev, ring, 5); if (r) { DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); return r; } radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); - radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); + radeon_ring_write(ring, lower_32_bits(gpu_addr)); + radeon_ring_write(ring, upper_32_bits(gpu_addr)); radeon_ring_write(ring, 1); /* number of DWs to follow */ radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_unlock_commit(rdev, ring); for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -544,17 +564,20 @@ { struct radeon_ib ib; unsigned i; + unsigned index; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp = 0; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; + + gpu_addr = rdev->wb.gpu_addr + index; tmp = 0xCAFEDEAD; - writel(tmp, ptr); + rdev->wb.wb[index/4] = cpu_to_le32(tmp); r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); if (r) { @@ -563,8 +586,8 @@ } ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; @@ -581,7 +604,7 @@ return r; } for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -708,6 +731,8 @@ */ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) { + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ struct radeon_ring *ring = &rdev->ring[ridx]; if (vm == NULL) @@ -747,16 +772,18 @@ radeon_ring_write(ring, VMID(0)); /* flush HDP */ - /* We should be using the new POLL_REG_MEM special op packet here - * but it causes sDMA to hang sometimes - */ - radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); - radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); - radeon_ring_write(ring, 0); + cik_sdma_hdp_flush_ring_emit(rdev, ridx); /* flush TLB */ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, 1 << vm->id); + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* reference */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ } --- linux-3.13.0.orig/drivers/gpu/drm/radeon/cikd.h +++ linux-3.13.0/drivers/gpu/drm/radeon/cikd.h @@ -871,6 +871,15 @@ # define DC_HPD6_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 +/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ +#define GRPH_INT_STATUS 0x6858 +# define GRPH_PFLIP_INT_OCCURRED (1 << 0) +# define GRPH_PFLIP_INT_CLEAR (1 << 8) +/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ +#define GRPH_INT_CONTROL 0x685c +# define GRPH_PFLIP_INT_MASK (1 << 0) +# define GRPH_PFLIP_INT_TYPE (1 << 8) + #define DAC_AUTODETECT_INT_CONTROL 0x67c8 #define DC_HPD1_INT_STATUS 0x601c @@ -1725,12 +1734,12 @@ #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ #define EOP_TCL1_ACTION_EN (1 << 16) #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ +#define EOP_TCL2_VOLATILE (1 << 24) #define EOP_CACHE_POLICY(x) ((x) << 25) /* 0 - LRU * 1 - Stream * 2 - Bypass */ -#define EOP_TCL2_VOLATILE (1 << 27) #define DATA_SEL(x) ((x) << 29) /* 0 - discard * 1 - send low 32bit data --- linux-3.13.0.orig/drivers/gpu/drm/radeon/cypress_dpm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/cypress_dpm.c @@ -1551,7 +1551,7 @@ table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } return 0; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/dce6_afmt.c +++ linux-3.13.0/drivers/gpu/drm/radeon/dce6_afmt.c @@ -155,7 +155,7 @@ struct drm_connector *connector; struct radeon_connector *radeon_connector = NULL; u32 offset, tmp; - u8 *sadb; + u8 *sadb = NULL; int sad_count; if (!dig || !dig->afmt || !dig->afmt->pin) @@ -174,9 +174,9 @@ } sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); - if (sad_count <= 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); - return; + if (sad_count < 0) { + DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + sad_count = 0; } /* program the speaker allocation */ @@ -283,7 +283,7 @@ bool enable) { WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, - AUDIO_ENABLED); + enable ? AUDIO_ENABLED : 0); DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); } @@ -307,11 +307,17 @@ rdev->audio.enabled = true; - if (ASIC_IS_DCE8(rdev)) + if (ASIC_IS_DCE81(rdev)) /* KV: 4 streams, 7 endpoints */ + rdev->audio.num_pins = 7; + else if (ASIC_IS_DCE83(rdev)) /* KB: 2 streams, 3 endpoints */ + rdev->audio.num_pins = 3; + else if (ASIC_IS_DCE8(rdev)) /* BN/HW: 6 streams, 7 endpoints */ + rdev->audio.num_pins = 7; + else if (ASIC_IS_DCE61(rdev)) /* TN: 4 streams, 6 endpoints */ rdev->audio.num_pins = 6; - else if (ASIC_IS_DCE61(rdev)) - rdev->audio.num_pins = 4; - else + else if (ASIC_IS_DCE64(rdev)) /* OL: 2 streams, 2 endpoints */ + rdev->audio.num_pins = 2; + else /* SI: 6 streams, 6 endpoints */ rdev->audio.num_pins = 6; for (i = 0; i < rdev->audio.num_pins; i++) { --- linux-3.13.0.orig/drivers/gpu/drm/radeon/evergreen.c +++ linux-3.13.0/drivers/gpu/drm/radeon/evergreen.c @@ -188,7 +188,7 @@ 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002, 0x913c, 0x0000000f, 0x0000000a }; @@ -475,7 +475,7 @@ 0x8c1c, 0xffffffff, 0x00001010, 0x28350, 0xffffffff, 0x00000000, 0xa008, 0xffffffff, 0x00010000, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x9508, 0xffffffff, 0x00000002 }; @@ -634,7 +634,7 @@ static const u32 supersumo_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, @@ -718,7 +718,7 @@ static const u32 wrestler_golden_registers[] = { 0x5eb4, 0xffffffff, 0x00000002, - 0x5cc, 0xffffffff, 0x00000001, + 0x5c4, 0xffffffff, 0x00000001, 0x7030, 0xffffffff, 0x00000011, 0x7c30, 0xffffffff, 0x00000011, 0x6104, 0x01000300, 0x00000000, @@ -2361,6 +2361,9 @@ u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { @@ -2569,6 +2572,7 @@ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); } } else { tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); @@ -4298,8 +4302,8 @@ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } - /* only one DAC on DCE6 */ - if (!ASIC_IS_DCE6(rdev)) + /* only one DAC on DCE5 */ + if (!ASIC_IS_DCE5(rdev)) WREG32(DACA_AUTODETECT_INT_CONTROL, 0); WREG32(DACB_AUTODETECT_INT_CONTROL, 0); @@ -4325,7 +4329,6 @@ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; u32 dma_cntl, dma_cntl1 = 0; u32 thermal_int = 0; @@ -4508,15 +4511,21 @@ WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -4537,6 +4546,9 @@ WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); + /* posting read */ + RREG32(SRBM_STATUS); + return 0; } @@ -4708,6 +4720,7 @@ tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -4905,6 +4918,15 @@ break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: @@ -5110,26 +5132,11 @@ evergreen_mc_program(rdev); if (ASIC_IS_DCE5(rdev)) { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { - r = ni_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } r = ni_mc_load_microcode(rdev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); return r; } - } else { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { - r = r600_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } } if (rdev->flags & RADEON_IS_AGP) { @@ -5357,6 +5364,24 @@ if (r) return r; + if (ASIC_IS_DCE5(rdev)) { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { + r = ni_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } else { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = r600_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -5418,9 +5443,9 @@ radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - evergreen_pcie_gart_fini(rdev); uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + evergreen_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/evergreen_cs.c +++ linux-3.13.0/drivers/gpu/drm/radeon/evergreen_cs.c @@ -967,7 +967,10 @@ if (track->cb_dirty) { tmp = track->cb_target_mask; for (i = 0; i < 8; i++) { - if ((tmp >> (i * 4)) & 0xF) { + u32 format = G_028C70_FORMAT(track->cb_color_info[i]); + + if (format != V_028C70_COLOR_INVALID && + (tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ if (track->cb_color_bo[i] == NULL) { dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", --- linux-3.13.0.orig/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ linux-3.13.0/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -102,7 +102,7 @@ struct drm_connector *connector; struct radeon_connector *radeon_connector = NULL; u32 tmp; - u8 *sadb; + u8 *sadb = NULL; int sad_count; list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { @@ -118,9 +118,9 @@ } sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); - if (sad_count <= 0) { - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); - return; + if (sad_count < 0) { + DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); + sad_count = 0; } /* program the speaker allocation */ --- linux-3.13.0.orig/drivers/gpu/drm/radeon/evergreen_smc.h +++ linux-3.13.0/drivers/gpu/drm/radeon/evergreen_smc.h @@ -57,7 +57,7 @@ #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 -#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 +#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8 #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 --- linux-3.13.0.orig/drivers/gpu/drm/radeon/kv_dpm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/kv_dpm.c @@ -1121,6 +1121,19 @@ } } +static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) +{ + u32 thermal_int; + + thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); + if (enable) + thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; + else + thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); + WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); + +} + int kv_dpm_enable(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); @@ -1222,8 +1235,7 @@ DRM_ERROR("kv_set_thermal_temperature_range failed\n"); return ret; } - rdev->irq.dpm_thermal = true; - radeon_irq_set(rdev); + kv_enable_thermal_int(rdev, true); } ret = kv_smc_bapm_enable(rdev, false); @@ -1269,6 +1281,7 @@ kv_stop_dpm(rdev); kv_enable_ulv(rdev, false); kv_reset_am(rdev); + kv_enable_thermal_int(rdev, false); kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); } @@ -2620,7 +2633,11 @@ if (rdev->family == CHIP_KABINI) pi->high_voltage_t = 4001; - pi->enable_nb_dpm = true; + /* Enabling nb dpm on an asrock system prevents dpm from working */ + if (rdev->pdev->subsystem_vendor == 0x1849) + pi->enable_nb_dpm = false; + else + pi->enable_nb_dpm = true; pi->caps_power_containment = true; pi->caps_cac = true; @@ -2635,7 +2652,19 @@ pi->caps_sclk_ds = true; pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; - pi->bapm_enable = false; + if (radeon_bapm == -1) { + /* There are stability issues reported on with + * bapm enabled on an asrock system. + */ + if (rdev->pdev->subsystem_vendor == 0x1849) + pi->bapm_enable = false; + else + pi->bapm_enable = true; + } else if (radeon_bapm == 0) { + pi->bapm_enable = false; + } else { + pi->bapm_enable = true; + } pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ --- linux-3.13.0.orig/drivers/gpu/drm/radeon/ni.c +++ linux-3.13.0/drivers/gpu/drm/radeon/ni.c @@ -1072,12 +1072,12 @@ if ((rdev->config.cayman.max_backends_per_se == 1) && (rdev->flags & RADEON_IS_IGP)) { - if ((disabled_rb_mask & 3) == 1) { - /* RB0 disabled, RB1 enabled */ - tmp = 0x11111111; - } else { + if ((disabled_rb_mask & 3) == 2) { /* RB1 disabled, RB0 enabled */ tmp = 0x00000000; + } else { + /* RB0 disabled, RB1 enabled */ + tmp = 0x11111111; } } else { tmp = gb_addr_config & NUM_PIPES_MASK; @@ -1330,13 +1330,12 @@ { struct radeon_ring *ring = &rdev->ring[fence->ring]; u64 addr = rdev->fence_drv[fence->ring].gpu_addr; + u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | + PACKET3_SH_ACTION_ENA; /* flush read cache over gart for this vmid */ - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); + radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0); radeon_ring_write(ring, 10); /* poll interval */ @@ -1352,6 +1351,8 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) { struct radeon_ring *ring = &rdev->ring[ib->ring]; + u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | + PACKET3_SH_ACTION_ENA; /* set to DX10/11 mode */ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); @@ -1376,14 +1377,11 @@ (ib->vm ? (ib->vm->id << 24) : 0)); /* flush read cache over gart for this vmid */ - radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); - radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); + radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0); - radeon_ring_write(ring, 10); /* poll interval */ + radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */ } static void cayman_cp_enable(struct radeon_device *rdev, bool enable) @@ -1878,23 +1876,7 @@ evergreen_mc_program(rdev); - if (rdev->flags & RADEON_IS_IGP) { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { - r = ni_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - } else { - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { - r = ni_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - + if (!(rdev->flags & RADEON_IS_IGP)) { r = ni_mc_load_microcode(rdev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); @@ -2145,6 +2127,24 @@ if (r) return r; + if (rdev->flags & RADEON_IS_IGP) { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = ni_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } else { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { + r = ni_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + } + ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); @@ -2433,6 +2433,16 @@ radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); radeon_ring_write(ring, 1 << vm->id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/ni_dma.c +++ linux-3.13.0/drivers/gpu/drm/radeon/ni_dma.c @@ -120,12 +120,6 @@ u32 reg_offset, wb_offset; int i, r; - /* Reset dma */ - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - for (i = 0; i < 2; i++) { if (i == 0) { ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; @@ -335,5 +329,11 @@ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, 1 << vm->id); + + /* wait for invalidate to complete */ + radeon_ring_write(ring, DMA_SRBM_READ_PACKET); + radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2)); + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0); /* value */ } --- linux-3.13.0.orig/drivers/gpu/drm/radeon/ni_dpm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/ni_dpm.c @@ -1313,7 +1313,7 @@ table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0; table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); } } @@ -2586,7 +2586,7 @@ if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { --- linux-3.13.0.orig/drivers/gpu/drm/radeon/nid.h +++ linux-3.13.0/drivers/gpu/drm/radeon/nid.h @@ -1132,6 +1132,23 @@ #define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_WAIT_REG_MEM 0x3C +#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) + /* 0 - always + * 1 - < + * 2 - <= + * 3 - == + * 4 - != + * 5 - >= + * 6 - > + */ +#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) + /* 0 - reg + * 1 - mem + */ +#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) + /* 0 - me + * 1 - pfp + */ #define PACKET3_MEM_WRITE 0x3D #define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_SURFACE_SYNC 0x43 @@ -1154,6 +1171,7 @@ # define PACKET3_DB_ACTION_ENA (1 << 26) # define PACKET3_SH_ACTION_ENA (1 << 27) # define PACKET3_SX_ACTION_ENA (1 << 28) +# define PACKET3_ENGINE_ME (1 << 31) #define PACKET3_ME_INITIALIZE 0x44 #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 @@ -1270,6 +1288,13 @@ (1 << 21) | \ (((n) & 0xFFFFF) << 0)) +#define DMA_SRBM_POLL_PACKET ((9 << 28) | \ + (1 << 27) | \ + (1 << 26)) + +#define DMA_SRBM_READ_PACKET ((9 << 28) | \ + (1 << 27)) + /* async DMA Packet types */ #define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_COPY 0x3 --- linux-3.13.0.orig/drivers/gpu/drm/radeon/r100.c +++ linux-3.13.0/drivers/gpu/drm/radeon/r100.c @@ -742,6 +742,10 @@ tmp |= RADEON_FP2_DETECT_MASK; } WREG32(RADEON_GEN_INT_CNTL, tmp); + + /* read back to post the write */ + RREG32(RADEON_GEN_INT_CNTL); + return 0; } @@ -3190,6 +3194,9 @@ uint32_t pixel_bytes1 = 0; uint32_t pixel_bytes2 = 0; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) { --- linux-3.13.0.orig/drivers/gpu/drm/radeon/r600.c +++ linux-3.13.0/drivers/gpu/drm/radeon/r600.c @@ -2607,14 +2607,17 @@ struct radeon_fence *fence) { struct radeon_ring *ring = &rdev->ring[fence->ring]; + u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA | + PACKET3_SH_ACTION_ENA; + + if (rdev->family >= CHIP_RV770) + cp_coher_cntl |= PACKET3_FULL_CACHE_ENA; if (rdev->wb.use_event) { u64 addr = rdev->fence_drv[fence->ring].gpu_addr; /* flush read cache over gart */ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | - PACKET3_VC_ACTION_ENA | - PACKET3_SH_ACTION_ENA); + radeon_ring_write(ring, cp_coher_cntl); radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0); radeon_ring_write(ring, 10); /* poll interval */ @@ -2628,9 +2631,7 @@ } else { /* flush read cache over gart */ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); - radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | - PACKET3_VC_ACTION_ENA | - PACKET3_SH_ACTION_ENA); + radeon_ring_write(ring, cp_coher_cntl); radeon_ring_write(ring, 0xFFFFFFFF); radeon_ring_write(ring, 0); radeon_ring_write(ring, 10); /* poll interval */ @@ -2775,14 +2776,6 @@ r600_mc_program(rdev); - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { - r = r600_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - if (rdev->flags & RADEON_IS_AGP) { r600_agp_enable(rdev); } else { @@ -2970,6 +2963,14 @@ if (r) return r; + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = r600_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -3420,7 +3421,6 @@ u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; u32 hdmi0, hdmi1; - u32 d1grph = 0, d2grph = 0; u32 dma_cntl; u32 thermal_int = 0; @@ -3529,8 +3529,8 @@ WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(DMA_CNTL, dma_cntl); WREG32(DxMODE_INT_MASK, mode_int); - WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); - WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); + WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); + WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); WREG32(GRBM_INT_CNTL, grbm_int_cntl); if (ASIC_IS_DCE3(rdev)) { WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -3559,6 +3559,9 @@ WREG32(RV770_CG_THERMAL_INT, thermal_int); } + /* posting read */ + RREG32(R_000E50_SRBM_STATUS); + return 0; } @@ -3707,6 +3710,7 @@ tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -3833,6 +3837,14 @@ break; } break; + case 9: /* D1 pflip */ + DRM_DEBUG("IH: D1 flip\n"); + radeon_crtc_handle_flip(rdev, 0); + break; + case 11: /* D2 pflip */ + DRM_DEBUG("IH: D2 flip\n"); + radeon_crtc_handle_flip(rdev, 1); + break; case 19: /* HPD/DAC hotplug */ switch (src_data) { case 0: @@ -3903,6 +3915,10 @@ break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ case 178: /* CP_INT in IB2 */ --- linux-3.13.0.orig/drivers/gpu/drm/radeon/r600_cs.c +++ linux-3.13.0/drivers/gpu/drm/radeon/r600_cs.c @@ -749,7 +749,10 @@ } for (i = 0; i < 8; i++) { - if ((tmp >> (i * 4)) & 0xF) { + u32 format = G_0280A0_FORMAT(track->cb_color_info[i]); + + if (format != V_0280A0_COLOR_INVALID && + (tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ if (track->cb_color_bo[i] == NULL) { dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", --- linux-3.13.0.orig/drivers/gpu/drm/radeon/r600_dma.c +++ linux-3.13.0/drivers/gpu/drm/radeon/r600_dma.c @@ -116,15 +116,6 @@ u32 rb_bufsz; int r; - /* Reset dma */ - if (rdev->family >= CHIP_RV770) - WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); - else - WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); - RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); @@ -236,16 +227,19 @@ { unsigned i; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + unsigned index; u32 tmp; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; + + gpu_addr = rdev->wb.gpu_addr + index; tmp = 0xCAFEDEAD; - writel(tmp, ptr); + rdev->wb.wb[index/4] = cpu_to_le32(tmp); r = radeon_ring_lock(rdev, ring, 4); if (r) { @@ -253,13 +247,13 @@ return r; } radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); - radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); + radeon_ring_write(ring, lower_32_bits(gpu_addr)); + radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); radeon_ring_write(ring, 0xDEADBEEF); radeon_ring_unlock_commit(rdev, ring); for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -339,17 +333,17 @@ { struct radeon_ib ib; unsigned i; + unsigned index; int r; - void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp = 0; + u64 gpu_addr; - if (!ptr) { - DRM_ERROR("invalid vram scratch pointer\n"); - return -EINVAL; - } + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + index = R600_WB_DMA_RING_TEST_OFFSET; + else + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; - tmp = 0xCAFEDEAD; - writel(tmp, ptr); + gpu_addr = rdev->wb.gpu_addr + index; r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); if (r) { @@ -358,8 +352,8 @@ } ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); - ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; - ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; ib.ptr[3] = 0xDEADBEEF; ib.length_dw = 4; @@ -375,7 +369,7 @@ return r; } for (i = 0; i < rdev->usec_timeout; i++) { - tmp = readl(ptr); + tmp = le32_to_cpu(rdev->wb.wb[index/4]); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/r600_dpm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/r600_dpm.c @@ -158,16 +158,18 @@ u32 line_time_us, vblank_lines; u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - radeon_crtc = to_radeon_crtc(crtc); - if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / - radeon_crtc->hw_mode.clock; - vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - - radeon_crtc->hw_mode.crtc_vdisplay + - (radeon_crtc->v_border * 2); - vblank_time_us = vblank_lines * line_time_us; - break; + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { + line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / + radeon_crtc->hw_mode.clock; + vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - + radeon_crtc->hw_mode.crtc_vdisplay + + (radeon_crtc->v_border * 2); + vblank_time_us = vblank_lines * line_time_us; + break; + } } } @@ -181,14 +183,15 @@ struct radeon_crtc *radeon_crtc; u32 vrefresh = 0; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - radeon_crtc = to_radeon_crtc(crtc); - if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - vrefresh = radeon_crtc->hw_mode.vrefresh; - break; + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { + vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); + break; + } } } - return vrefresh; } @@ -1190,7 +1193,7 @@ (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = - ppt->usMaximumPowerDeliveryLimit; + le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); pt = &ppt->power_tune_table; } else { ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) --- linux-3.13.0.orig/drivers/gpu/drm/radeon/r600_hdmi.c +++ linux-3.13.0/drivers/gpu/drm/radeon/r600_hdmi.c @@ -326,7 +326,7 @@ struct drm_connector *connector; struct radeon_connector *radeon_connector = NULL; u32 tmp; - u8 *sadb; + u8 *sadb = NULL; int sad_count; /* XXX: setting this register causes hangs on some asics */ --- linux-3.13.0.orig/drivers/gpu/drm/radeon/r600d.h +++ linux-3.13.0/drivers/gpu/drm/radeon/r600d.h @@ -1575,6 +1575,7 @@ # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) +# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */ # define PACKET3_TC_ACTION_ENA (1 << 23) # define PACKET3_VC_ACTION_ENA (1 << 24) # define PACKET3_CB_ACTION_ENA (1 << 25) --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon.h +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon.h @@ -99,6 +99,7 @@ extern int radeon_dpm; extern int radeon_aspm; extern int radeon_runtime_pm; +extern int radeon_bapm; /* * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -134,6 +135,9 @@ /* R600+ */ #define R600_RING_TYPE_UVD_INDEX 5 +/* number of hw syncs before falling back on blocking */ +#define RADEON_NUM_SYNCS 4 + /* hardcode those limit for now */ #define RADEON_VA_IB_OFFSET (1 << 20) #define RADEON_VA_RESERVED_SIZE (8 << 20) @@ -286,6 +290,9 @@ u16 *vddc, u16 *vddci, u16 virtual_voltage_id, u16 vbios_voltage_id); +int radeon_atom_get_voltage_evv(struct radeon_device *rdev, + u16 virtual_voltage_id, + u16 *voltage); int radeon_atom_round_to_true_voltage(struct radeon_device *rdev, u8 voltage_type, u16 nominal_voltage, @@ -544,7 +551,6 @@ /* * Semaphores. */ -/* everything here is constant */ struct radeon_semaphore { struct radeon_sa_bo *sa_bo; signed waiters; @@ -721,6 +727,12 @@ u32 disp_int_cont4; u32 disp_int_cont5; u32 disp_int_cont6; + u32 d1grph_int; + u32 d2grph_int; + u32 d3grph_int; + u32 d4grph_int; + u32 d5grph_int; + u32 d6grph_int; }; union radeon_irq_stat_regs { @@ -730,7 +742,7 @@ struct cik_irq_stat_regs cik; }; -#define RADEON_MAX_HPD_PINS 6 +#define RADEON_MAX_HPD_PINS 7 #define RADEON_MAX_CRTCS 6 #define RADEON_MAX_AFMT_BLOCKS 7 @@ -1069,6 +1081,8 @@ #define R600_WB_EVENT_OFFSET 3072 #define CIK_WB_CP1_WPTR_OFFSET 3328 #define CIK_WB_CP2_WPTR_OFFSET 3584 +#define R600_WB_DMA_RING_TEST_OFFSET 3588 +#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592 /** * struct radeon_pm - power management datas @@ -2229,6 +2243,7 @@ bool have_disp_power_ref; }; +bool radeon_is_px(struct drm_device *dev); int radeon_device_init(struct radeon_device *rdev, struct drm_device *ddev, struct pci_dev *pdev, @@ -2539,6 +2554,9 @@ #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) #define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) +#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) +#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) +#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ (rdev->ddev->pdev->device == 0x6850) || \ --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_asic.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_asic.c @@ -2025,8 +2025,8 @@ .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &cik_copy_dma, .dma_ring_index = R600_RING_TYPE_DMA_INDEX, - .copy = &cik_copy_dma, - .copy_ring_index = R600_RING_TYPE_DMA_INDEX, + .copy = &cik_copy_cpdma, + .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_atombios.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_atombios.c @@ -464,6 +464,13 @@ } } + /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ + if ((dev->pdev->device == 0x9805) && + (dev->pdev->subsystem_vendor == 0x1734) && + (dev->pdev->subsystem_device == 0x11bd)) { + if (*connector_type == DRM_MODE_CONNECTOR_VGA) + return false; + } return true; } @@ -1963,7 +1970,7 @@ "adm1032", "adm1030", "max6649", - "lm64", + "lm63", /* lm64 */ "f75375", "asc7xxx", }; @@ -1974,7 +1981,7 @@ "adm1032", "adm1030", "max6649", - "lm64", + "lm63", /* lm64 */ "f75375", "RV6xx", "RV770", @@ -2281,19 +2288,31 @@ (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_KV; - } else if ((controller->ucType == - ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || - (controller->ucType == - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || - (controller->ucType == - ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { - DRM_INFO("Special thermal controller config\n"); + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { + DRM_INFO("External GPIO thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { + DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { + DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { @@ -3236,6 +3255,42 @@ return 0; } +union get_voltage_info { + struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in; + struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out; +}; + +int radeon_atom_get_voltage_evv(struct radeon_device *rdev, + u16 virtual_voltage_id, + u16 *voltage) +{ + int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo); + u32 entry_id; + u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; + union get_voltage_info args; + + for (entry_id = 0; entry_id < count; entry_id++) { + if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v == + virtual_voltage_id) + break; + } + + if (entry_id >= count) + return -EINVAL; + + args.in.ucVoltageType = VOLTAGE_TYPE_VDDC; + args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; + args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id); + args.in.ulSCLKFreq = + cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + + *voltage = le16_to_cpu(args.evv_out.usVoltageLevel); + + return 0; +} + int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type, u32 *gpio_value, u32 *gpio_mask) @@ -3944,6 +3999,10 @@ /* tell the bios not to handle mode switching */ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; + /* clear the vbios dpms state */ + if (ASIC_IS_DCE4(rdev)) + bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE; + if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -59,7 +59,7 @@ u16 mux; } __packed; -bool radeon_is_px(void) { +bool radeon_has_atpx(void) { return radeon_atpx_priv.atpx_detected; } @@ -219,7 +219,8 @@ memcpy(&output, info->buffer.pointer, size); /* TODO: check version? */ - printk("ATPX version %u\n", output.version); + printk("ATPX version %u, functions 0x%08x\n", + output.version, output.function_bits); radeon_atpx_parse_functions(&atpx->functions, output.function_bits); @@ -525,6 +526,13 @@ vga_count++; has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); + } + + /* some newer PX laptops mark the dGPU as a non-VGA display device */ + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + vga_count++; + + has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); } if (has_atpx && vga_count == 2) { --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_bios.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_bios.c @@ -76,7 +76,7 @@ static bool radeon_read_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; + uint8_t __iomem *bios, val1, val2; size_t size; rdev->bios = NULL; @@ -86,15 +86,19 @@ return false; } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + val1 = readb(&bios[0]); + val2 = readb(&bios[1]); + + if (size == 0 || val1 != 0x55 || val2 != 0xaa) { pci_unmap_rom(rdev->pdev, bios); return false; } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); + rdev->bios = kzalloc(size, GFP_KERNEL); if (rdev->bios == NULL) { pci_unmap_rom(rdev->pdev, bios); return false; } + memcpy_fromio(rdev->bios, bios, size); pci_unmap_rom(rdev->pdev, bios); return true; } @@ -196,6 +200,20 @@ } } + if (!found) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } + } + if (!found) return false; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_connectors.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1411,7 +1411,7 @@ struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE5(rdev) && - (rdev->clock.dp_extclk >= 53900) && + (rdev->clock.default_dispclk >= 53900) && radeon_connector_encoder_is_hbr2(connector)) { return true; } --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_cs.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_cs.c @@ -97,6 +97,12 @@ uint32_t domain = r->write_domain ? r->write_domain : r->read_domains; + if (domain & RADEON_GEM_DOMAIN_CPU) { + DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " + "for command submission\n"); + return -EINVAL; + } + p->relocs[i].lobj.domain = domain; if (domain == RADEON_GEM_DOMAIN_VRAM) domain |= RADEON_GEM_DOMAIN_GTT; @@ -173,11 +179,13 @@ u32 ring = RADEON_CS_RING_GFX; s32 priority = 0; + INIT_LIST_HEAD(&p->validated); + if (!cs->num_chunks) { return 0; } + /* get chunks */ - INIT_LIST_HEAD(&p->validated); p->idx = 0; p->ib.sa_bo = NULL; p->ib.semaphore = NULL; @@ -276,10 +284,17 @@ return -EINVAL; /* we only support VM on some SI+ rings */ - if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && - ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { - DRM_ERROR("Ring %d requires VM!\n", p->ring); - return -EINVAL; + if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { + if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { + DRM_ERROR("Ring %d requires VM!\n", p->ring); + return -EINVAL; + } + } else { + if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { + DRM_ERROR("VM not supported on ring %d!\n", + p->ring); + return -EINVAL; + } } } --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_device.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_device.c @@ -102,11 +102,14 @@ "LAST", }; -#if defined(CONFIG_VGA_SWITCHEROO) -bool radeon_is_px(void); -#else -static inline bool radeon_is_px(void) { return false; } -#endif +bool radeon_is_px(struct drm_device *dev) +{ + struct radeon_device *rdev = dev->dev_private; + + if (rdev->flags & RADEON_IS_PX) + return true; + return false; +} /** * radeon_program_register_sequence - program an array of registers. @@ -1077,7 +1080,7 @@ { struct drm_device *dev = pci_get_drvdata(pdev); - if (radeon_is_px() && state == VGA_SWITCHEROO_OFF) + if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { @@ -1298,9 +1301,7 @@ * ignore it */ vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); - if (radeon_runtime_pm == 1) - runtime = true; - if ((radeon_runtime_pm == -1) && radeon_is_px()) + if (rdev->flags & RADEON_IS_PX) runtime = true; vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); if (runtime) @@ -1308,7 +1309,7 @@ r = radeon_init(rdev); if (r) - return r; + goto failed; r = radeon_ib_ring_tests(rdev); if (r) @@ -1328,7 +1329,7 @@ radeon_agp_disable(rdev); r = radeon_init(rdev); if (r) - return r; + goto failed; } if ((radeon_testing & 1)) { if (rdev->accel_working) @@ -1349,6 +1350,11 @@ DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); } return 0; + +failed: + if (runtime) + vga_switcheroo_fini_domain_pm_ops(rdev->dev); + return r; } static void radeon_debugfs_remove_files(struct radeon_device *rdev); @@ -1369,6 +1375,8 @@ radeon_bo_evict_vram(rdev); radeon_fini(rdev); vga_switcheroo_unregister_client(rdev->pdev); + if (rdev->flags & RADEON_IS_PX) + vga_switcheroo_fini_domain_pm_ops(rdev->dev); vga_client_register(rdev->pdev, NULL, NULL, NULL); if (rdev->rio_mem) pci_iounmap(rdev->pdev, rdev->rio_mem); @@ -1546,6 +1554,11 @@ } drm_kms_helper_poll_enable(dev); + + /* set the power state here in case we are a PX system or headless */ + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) + radeon_pm_compute_clocks(rdev); + return 0; } --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_display.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_display.c @@ -282,6 +282,10 @@ u32 update_pending; int vpos, hpos; + /* can happen during initialization */ + if (radeon_crtc == NULL) + return; + spin_lock_irqsave(&rdev->ddev->event_lock, flags); work = radeon_crtc->unpin_work; if (work == NULL || @@ -306,7 +310,7 @@ * to complete in this vblank? */ if (update_pending && - (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, + (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, &vpos, &hpos, NULL, NULL)) && ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { @@ -749,6 +753,10 @@ struct radeon_device *rdev = dev->dev_private; int ret = 0; + /* don't leak the edid if we already fetched it in detect() */ + if (radeon_connector->edid) + goto got_edid; + /* on hw with routers, select right port */ if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); @@ -788,8 +796,10 @@ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); } if (radeon_connector->edid) { +got_edid: drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); + drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); return ret; } drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); @@ -1601,6 +1611,7 @@ * * \param dev Device to query. * \param crtc Crtc to query. + * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. * \param *stime Target location for timestamp taken immediately before @@ -1622,8 +1633,8 @@ * unknown small number of scanlines wrt. real scanout position. * */ -int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime) +int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, + int *vpos, int *hpos, ktime_t *stime, ktime_t *etime) { u32 stat_crtc = 0, vbl = 0, position = 0; int vbl_start, vbl_end, vtotal, ret = 0; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_drv.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_drv.c @@ -109,8 +109,10 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, + unsigned int flags, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime); +extern bool radeon_is_px(struct drm_device *dev); extern const struct drm_ioctl_desc radeon_ioctls_kms[]; extern int radeon_max_kms_ioctl; int radeon_mmap(struct file *filp, struct vm_area_struct *vma); @@ -140,11 +142,9 @@ #if defined(CONFIG_VGA_SWITCHEROO) void radeon_register_atpx_handler(void); void radeon_unregister_atpx_handler(void); -bool radeon_is_px(void); #else static inline void radeon_register_atpx_handler(void) {} static inline void radeon_unregister_atpx_handler(void) {} -static inline bool radeon_is_px(void) { return false; } #endif int radeon_no_wb; @@ -168,6 +168,7 @@ int radeon_dpm = -1; int radeon_aspm = -1; int radeon_runtime_pm = -1; +int radeon_bapm = -1; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -232,6 +233,9 @@ MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)"); module_param_named(runpm, radeon_runtime_pm, int, 0444); +MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); +module_param_named(bapm, radeon_bapm, int, 0444); + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; @@ -400,6 +404,9 @@ if (radeon_runtime_pm == 0) return -EINVAL; + if (radeon_runtime_pm == -1 && !radeon_is_px(drm_dev)) + return -EINVAL; + drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(drm_dev); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF); @@ -419,7 +426,7 @@ struct drm_device *drm_dev = pci_get_drvdata(pdev); int ret; - if (radeon_runtime_pm == 0) + if (!radeon_is_px(drm_dev)) return -EINVAL; drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; @@ -448,7 +455,7 @@ return -EBUSY; /* are we PX enabled? */ - if (radeon_runtime_pm == -1 && !radeon_is_px()) { + if (radeon_runtime_pm == -1 && !radeon_is_px(drm_dev)) { DRM_DEBUG_DRIVER("failing to power off - not px\n"); return -EBUSY; } --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_family.h +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_family.h @@ -115,6 +115,7 @@ RADEON_NEW_MEMMAP = 0x00400000UL, RADEON_IS_PCI = 0x00800000UL, RADEON_IS_IGPGART = 0x01000000UL, + RADEON_IS_PX = 0x02000000UL, }; #endif --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_i2c.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_i2c.c @@ -1020,6 +1020,9 @@ /* Add the default buses */ void radeon_i2c_init(struct radeon_device *rdev) { + if (radeon_hw_i2c) + DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n"); + if (rdev->is_atom_bios) radeon_atombios_i2c_init(rdev); else --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -202,6 +202,16 @@ if (rdev->flags & RADEON_IS_AGP) return false; + /* + * Older chips have a HW limitation, they can only generate 40 bits + * of address for "64-bit" MSIs which breaks on some platforms, notably + * IBM POWER servers, so we limit them + */ + if (rdev->family < CHIP_BONAIRE) { + dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n"); + rdev->pdev->no_64bit_msi = 1; + } + /* force MSI on */ if (radeon_msi == 1) return true; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_kms.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_kms.c @@ -33,6 +33,13 @@ #include #include #include + +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_has_atpx(void); +#else +static inline bool radeon_has_atpx(void) { return false; } +#endif + /** * radeon_driver_unload_kms - Main unload function for KMS. * @@ -100,6 +107,11 @@ flags |= RADEON_IS_PCI; } + if ((radeon_runtime_pm != 0) && + radeon_has_atpx() && + ((flags & RADEON_IS_IGP) == 0)) + flags |= RADEON_IS_PX; + /* radeon_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must @@ -130,7 +142,7 @@ "Error during ACPI methods call\n"); } - if (radeon_runtime_pm != 0) { + if (radeon_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); @@ -242,7 +254,14 @@ } break; case RADEON_INFO_ACCEL_WORKING2: - *value = rdev->accel_working; + if (rdev->family == CHIP_HAWAII) { + if (rdev->accel_working) + *value = 2; + else + *value = 0; + } else { + *value = rdev->accel_working; + } break; case RADEON_INFO_TILING_CONFIG: if (rdev->family >= CHIP_BONAIRE) @@ -530,19 +549,29 @@ radeon_vm_init(rdev, &fpriv->vm); - /* map the ib pool buffer read only into - * virtual address space */ - bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, - rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, - RADEON_VM_PAGE_READABLE | - RADEON_VM_PAGE_SNOOPED); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; - } + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } + /* map the ib pool buffer read only into + * virtual address space */ + bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, + RADEON_VM_PAGE_READABLE | + RADEON_VM_PAGE_SNOOPED); + + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } + } file_priv->driver_priv = fpriv; } @@ -570,13 +599,15 @@ struct radeon_bo_va *bo_va; int r; - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (!r) { - bo_va = radeon_vm_bo_find(&fpriv->vm, - rdev->ring_tmp_bo.bo); - if (bo_va) - radeon_vm_bo_rmv(rdev, bo_va); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (!r) { + bo_va = radeon_vm_bo_find(&fpriv->vm, + rdev->ring_tmp_bo.bo); + if (bo_va) + radeon_vm_bo_rmv(rdev, bo_va); + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + } } radeon_vm_fini(rdev, &fpriv->vm); @@ -708,11 +739,13 @@ /* Get associated drm_crtc: */ drmcrtc = &rdev->mode_info.crtcs[crtc]->base; + if (!drmcrtc) + return -EINVAL; /* Helper routine in DRM core does all the work: */ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, vblank_time, flags, - drmcrtc); + drmcrtc, &drmcrtc->hwmode); } #define KMS_INVALID_IOCTL(name) \ --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_mode.h +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_mode.h @@ -766,6 +766,7 @@ int x, int y); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, + unsigned int flags, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_object.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_object.c @@ -586,22 +586,30 @@ rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; - if (bo->mem.mem_type == TTM_PL_VRAM) { - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) > rdev->mc.visible_vram_size) { - /* hurrah the memory is not visible ! */ - radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; - r = ttm_bo_validate(bo, &rbo->placement, false, false); - if (unlikely(r != 0)) - return r; - offset = bo->mem.start << PAGE_SHIFT; - /* this should not happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; - } + if (bo->mem.mem_type != TTM_PL_VRAM) + return 0; + + size = bo->mem.num_pages << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; + if ((offset + size) <= rdev->mc.visible_vram_size) + return 0; + + /* hurrah the memory is not visible ! */ + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); + rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + r = ttm_bo_validate(bo, &rbo->placement, false, false); + if (unlikely(r == -ENOMEM)) { + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); + return ttm_bo_validate(bo, &rbo->placement, false, false); + } else if (unlikely(r != 0)) { + return r; } + + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return -EINVAL; + return 0; } --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_pm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_pm.c @@ -361,6 +361,11 @@ struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set profile when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (strncmp("default", buf, strlen("default")) == 0) @@ -409,6 +414,13 @@ struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set method when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + count = -EINVAL; + goto fail; + } + /* we don't support the legacy modes with dpm */ if (rdev->pm.pm_method == PM_METHOD_DPM) { count = -EINVAL; @@ -472,7 +484,12 @@ goto fail; } mutex_unlock(&rdev->pm.mutex); - radeon_pm_compute_clocks(rdev); + + /* Can't set dpm state when the card is off */ + if (!(rdev->flags & RADEON_IS_PX) || + (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) + radeon_pm_compute_clocks(rdev); + fail: return count; } @@ -485,6 +502,10 @@ struct radeon_device *rdev = ddev->dev_private; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); @@ -500,6 +521,11 @@ enum radeon_dpm_forced_level level; int ret = 0; + /* Can't force performance level when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("low", buf, strlen("low")) == 0) { level = RADEON_DPM_FORCED_LEVEL_LOW; @@ -538,8 +564,14 @@ char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); + struct drm_device *ddev = rdev->ddev; int temp; + /* Can't get temperature when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + if (rdev->asic->pm.get_temperature) temp = radeon_get_temperature(rdev); else @@ -603,7 +635,6 @@ static int radeon_hwmon_init(struct radeon_device *rdev) { int err = 0; - struct device *hwmon_dev; switch (rdev->pm.int_thermal_type) { case THERMAL_TYPE_RV6XX: @@ -616,11 +647,11 @@ case THERMAL_TYPE_KV: if (rdev->asic->pm.get_temperature == NULL) return err; - hwmon_dev = hwmon_device_register_with_groups(rdev->dev, - "radeon", rdev, - hwmon_groups); - if (IS_ERR(hwmon_dev)) { - err = PTR_ERR(hwmon_dev); + rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev, + "radeon", rdev, + hwmon_groups); + if (IS_ERR(rdev->pm.int_hwmon_dev)) { + err = PTR_ERR(rdev->pm.int_hwmon_dev); dev_err(rdev->dev, "Unable to register hwmon device: %d\n", err); } @@ -632,6 +663,12 @@ return err; } +static void radeon_hwmon_fini(struct radeon_device *rdev) +{ + if (rdev->pm.int_hwmon_dev) + hwmon_device_unregister(rdev->pm.int_hwmon_dev); +} + static void radeon_dpm_thermal_work_handler(struct work_struct *work) { struct radeon_device *rdev = @@ -1010,8 +1047,10 @@ rdev->pm.current_clock_mode_index = 0; rdev->pm.current_sclk = rdev->pm.default_sclk; rdev->pm.current_mclk = rdev->pm.default_mclk; - rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; - rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; + if (rdev->pm.power_state) { + rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; + rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; + } if (rdev->pm.pm_method == PM_METHOD_DYNPM && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; @@ -1050,7 +1089,6 @@ } } else { rdev->pm.dpm_enabled = true; - radeon_pm_compute_clocks(rdev); } } @@ -1217,8 +1255,39 @@ return 0; } +struct radeon_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; +}; + +/* cards with dpm stability problems */ +static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { + /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ + { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, + /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ + { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, + { 0, 0, 0, 0 }, +}; + int radeon_pm_init(struct radeon_device *rdev) { + struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; + bool disable_dpm = false; + + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + disable_dpm = true; + break; + } + ++p; + } + /* enable dpm on rv6xx+ */ switch (rdev->family) { case CHIP_RV610: @@ -1228,7 +1297,7 @@ case CHIP_RV670: case CHIP_RS780: case CHIP_RS880: - case CHIP_CAYMAN: + case CHIP_RV770: case CHIP_BONAIRE: case CHIP_KABINI: case CHIP_KAVERI: @@ -1245,7 +1314,6 @@ else rdev->pm.pm_method = PM_METHOD_PROFILE; break; - case CHIP_RV770: case CHIP_RV730: case CHIP_RV710: case CHIP_RV740: @@ -1260,6 +1328,7 @@ case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS: + case CHIP_CAYMAN: case CHIP_ARUBA: case CHIP_TAHITI: case CHIP_PITCAIRN: @@ -1273,6 +1342,8 @@ (!(rdev->flags & RADEON_IS_IGP)) && (!rdev->smc_fw)) rdev->pm.pm_method = PM_METHOD_PROFILE; + else if (disable_dpm && (radeon_dpm == -1)) + rdev->pm.pm_method = PM_METHOD_PROFILE; else if (radeon_dpm == 0) rdev->pm.pm_method = PM_METHOD_PROFILE; else @@ -1312,6 +1383,8 @@ device_remove_file(rdev->dev, &dev_attr_power_method); } + radeon_hwmon_fini(rdev); + if (rdev->pm.power_state) kfree(rdev->pm.power_state); } @@ -1331,6 +1404,8 @@ } radeon_dpm_fini(rdev); + radeon_hwmon_fini(rdev); + if (rdev->pm.power_state) kfree(rdev->pm.power_state); } @@ -1356,12 +1431,14 @@ rdev->pm.active_crtcs = 0; rdev->pm.active_crtc_count = 0; - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - radeon_crtc = to_radeon_crtc(crtc); - if (radeon_crtc->enabled) { - rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); - rdev->pm.active_crtc_count++; + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (radeon_crtc->enabled) { + rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); + rdev->pm.active_crtc_count++; + } } } @@ -1425,12 +1502,14 @@ /* update active crtc counts */ rdev->pm.dpm.new_active_crtcs = 0; rdev->pm.dpm.new_active_crtc_count = 0; - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - radeon_crtc = to_radeon_crtc(crtc); - if (crtc->enabled) { - rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); - rdev->pm.dpm.new_active_crtc_count++; + if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + radeon_crtc = to_radeon_crtc(crtc); + if (crtc->enabled) { + rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); + rdev->pm.dpm.new_active_crtc_count++; + } } } @@ -1464,7 +1543,7 @@ */ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { if (rdev->pm.active_crtcs & (1 << crtc)) { - vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos, NULL, NULL); + vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL); if ((vbl_status & DRM_SCANOUTPOS_VALID) && !(vbl_status & DRM_SCANOUTPOS_INVBL)) in_vbl = false; @@ -1556,8 +1635,12 @@ struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; + struct drm_device *ddev = rdev->ddev; - if (rdev->pm.dpm_enabled) { + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + seq_printf(m, "PX asic powered off\n"); + } else if (rdev->pm.dpm_enabled) { mutex_lock(&rdev->pm.mutex); if (rdev->asic->dpm.debugfs_print_current_performance_level) radeon_dpm_debugfs_print_current_performance_level(rdev, m); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_ring.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_ring.c @@ -139,7 +139,7 @@ } /* 64 dwords should be enough for fence too */ - r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); + r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); if (r) { dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); return r; @@ -257,6 +257,7 @@ r = radeon_ib_test(rdev, i, ring); if (r) { ring->ready = false; + rdev->needs_reset = false; if (i == RADEON_RING_TYPE_GFX_INDEX) { /* oh, oh, that's really bad */ --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_semaphore.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -34,14 +34,15 @@ int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { + uint64_t *cpu_addr; int i, r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); if (*semaphore == NULL) { return -ENOMEM; } - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, - &(*semaphore)->sa_bo, 8, 8, true); + r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, + 8 * RADEON_NUM_SYNCS, 8, true); if (r) { kfree(*semaphore); *semaphore = NULL; @@ -49,7 +50,10 @@ } (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); - *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; + + cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); + for (i = 0; i < RADEON_NUM_SYNCS; ++i) + cpu_addr[i] = 0; for (i = 0; i < RADEON_NUM_RINGS; ++i) (*semaphore)->sync_to[i] = NULL; @@ -125,6 +129,7 @@ struct radeon_semaphore *semaphore, int ring) { + unsigned count = 0; int i, r; for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -140,6 +145,12 @@ return -EINVAL; } + if (++count > RADEON_NUM_SYNCS) { + /* not enough room, wait manually */ + radeon_fence_wait_locked(fence); + continue; + } + /* allocate enough space for sync command */ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); if (r) { @@ -164,6 +175,8 @@ radeon_ring_commit(rdev, &rdev->ring[i]); radeon_fence_note_sync(fence, ring); + + semaphore->gpu_addr += 8; } return 0; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_ttm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_ttm.c @@ -189,7 +189,7 @@ rbo = container_of(bo, struct radeon_bo, tbo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: - if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) + if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); else radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); @@ -712,6 +712,9 @@ DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } + /* Change the size here instead of the init above so only lpfn is affected */ + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->stollen_vga_memory); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_ucode.h +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_ucode.h @@ -57,9 +57,14 @@ #define BTC_MC_UCODE_SIZE 6024 #define CAYMAN_MC_UCODE_SIZE 6037 #define SI_MC_UCODE_SIZE 7769 +#define TAHITI_MC_UCODE_SIZE 7808 +#define PITCAIRN_MC_UCODE_SIZE 7775 +#define VERDE_MC_UCODE_SIZE 7875 #define OLAND_MC_UCODE_SIZE 7863 -#define CIK_MC_UCODE_SIZE 7866 +#define BONAIRE_MC_UCODE_SIZE 7866 +#define BONAIRE_MC2_UCODE_SIZE 7948 #define HAWAII_MC_UCODE_SIZE 7933 +#define HAWAII_MC2_UCODE_SIZE 8091 /* SDMA */ #define CIK_SDMA_UCODE_SIZE 1050 --- linux-3.13.0.orig/drivers/gpu/drm/radeon/radeon_uvd.c +++ linux-3.13.0/drivers/gpu/drm/radeon/radeon_uvd.c @@ -91,6 +91,7 @@ case CHIP_VERDE: case CHIP_PITCAIRN: case CHIP_ARUBA: + case CHIP_OLAND: fw_name = FIRMWARE_TAHITI; break; @@ -170,6 +171,8 @@ radeon_bo_unref(&rdev->uvd.vcpu_bo); + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); + release_firmware(rdev->uvd_fw); } @@ -462,6 +465,10 @@ cmd = radeon_get_ib_value(p, p->idx) >> 1; if (cmd < 0x4) { + if (end <= start) { + DRM_ERROR("invalid reloc offset %X!\n", offset); + return -EINVAL; + } if ((end - start) < buf_sizes[cmd]) { DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned)(end - start), buf_sizes[cmd]); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/rs600.c +++ linux-3.13.0/drivers/gpu/drm/radeon/rs600.c @@ -698,6 +698,10 @@ WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); if (ASIC_IS_DCE2(rdev)) WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); + + /* posting read */ + RREG32(R_000040_GEN_INT_CNTL); + return 0; } @@ -888,6 +892,9 @@ u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; /* FIXME: implement full support */ + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) --- linux-3.13.0.orig/drivers/gpu/drm/radeon/rs690.c +++ linux-3.13.0/drivers/gpu/drm/radeon/rs690.c @@ -579,6 +579,9 @@ u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) --- linux-3.13.0.orig/drivers/gpu/drm/radeon/rv515.c +++ linux-3.13.0/drivers/gpu/drm/radeon/rv515.c @@ -1271,6 +1271,9 @@ struct drm_display_mode *mode0 = NULL; struct drm_display_mode *mode1 = NULL; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) --- linux-3.13.0.orig/drivers/gpu/drm/radeon/rv770.c +++ linux-3.13.0/drivers/gpu/drm/radeon/rv770.c @@ -1665,14 +1665,6 @@ rv770_mc_program(rdev); - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { - r = r600_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - if (rdev->flags & RADEON_IS_AGP) { rv770_agp_enable(rdev); } else { @@ -1876,6 +1868,14 @@ if (r) return r; + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = r600_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); @@ -1921,9 +1921,9 @@ radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - rv770_pcie_gart_fini(rdev); uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + rv770_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); --- linux-3.13.0.orig/drivers/gpu/drm/radeon/rv770_dpm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2328,12 +2328,6 @@ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, ASIC_INTERNAL_MEMORY_SS, 0); - /* disable ss, causes hangs on some cayman boards */ - if (rdev->family == CHIP_CAYMAN) { - pi->sclk_ss = false; - pi->mclk_ss = false; - } - if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; else @@ -2531,6 +2525,12 @@ (rdev->pdev->subsystem_device == 0x1c42)) switch_limit = 200; + /* RV770 */ + /* mclk switching doesn't seem to work reliably on desktop RV770s */ + if ((rdev->family == CHIP_RV770) && + !(rdev->flags & RADEON_IS_MOBILITY)) + switch_limit = 0xffffffff; /* disable mclk switching */ + if (vblank_time < switch_limit) return true; else --- linux-3.13.0.orig/drivers/gpu/drm/radeon/si.c +++ linux-3.13.0/drivers/gpu/drm/radeon/si.c @@ -39,30 +39,35 @@ MODULE_FIRMWARE("radeon/TAHITI_me.bin"); MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); +MODULE_FIRMWARE("radeon/TAHITI_mc2.bin"); MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); MODULE_FIRMWARE("radeon/TAHITI_smc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); +MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin"); MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); MODULE_FIRMWARE("radeon/VERDE_me.bin"); MODULE_FIRMWARE("radeon/VERDE_ce.bin"); MODULE_FIRMWARE("radeon/VERDE_mc.bin"); +MODULE_FIRMWARE("radeon/VERDE_mc2.bin"); MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); MODULE_FIRMWARE("radeon/VERDE_smc.bin"); MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); MODULE_FIRMWARE("radeon/OLAND_mc.bin"); +MODULE_FIRMWARE("radeon/OLAND_mc2.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); MODULE_FIRMWARE("radeon/OLAND_smc.bin"); MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); MODULE_FIRMWARE("radeon/HAINAN_me.bin"); MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_mc2.bin"); MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); @@ -1465,36 +1470,33 @@ const __be32 *fw_data; u32 running, blackout = 0; u32 *io_mc_regs; - int i, ucode_size, regs_size; + int i, regs_size, ucode_size; if (!rdev->mc_fw) return -EINVAL; + ucode_size = rdev->mc_fw->size / 4; + switch (rdev->family) { case CHIP_TAHITI: io_mc_regs = (u32 *)&tahiti_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_PITCAIRN: io_mc_regs = (u32 *)&pitcairn_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_VERDE: default: io_mc_regs = (u32 *)&verde_io_mc_regs; - ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_OLAND: io_mc_regs = (u32 *)&oland_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; case CHIP_HAINAN: io_mc_regs = (u32 *)&hainan_io_mc_regs; - ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; } @@ -1550,7 +1552,7 @@ const char *chip_name; const char *rlc_chip_name; size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; - size_t smc_req_size; + size_t smc_req_size, mc2_req_size; char fw_name[30]; int err; @@ -1565,6 +1567,7 @@ ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = TAHITI_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4); break; case CHIP_PITCAIRN: @@ -1575,6 +1578,7 @@ ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4); break; case CHIP_VERDE: @@ -1585,6 +1589,7 @@ ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; + mc2_req_size = VERDE_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4); break; case CHIP_OLAND: @@ -1594,7 +1599,7 @@ me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4); break; case CHIP_HAINAN: @@ -1604,7 +1609,7 @@ me_req_size = SI_PM4_UCODE_SIZE * 4; ce_req_size = SI_CE_UCODE_SIZE * 4; rlc_req_size = SI_RLC_UCODE_SIZE * 4; - mc_req_size = OLAND_MC_UCODE_SIZE * 4; + mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4; smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4); break; default: BUG(); @@ -1657,16 +1662,22 @@ err = -EINVAL; } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->mc_fw->size != mc_req_size) { + if (err) { + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); + if (err) + goto out; + } + if ((rdev->mc_fw->size != mc_req_size) && + (rdev->mc_fw->size != mc2_req_size)) { printk(KERN_ERR "si_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); err = -EINVAL; } + DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size); snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); @@ -2214,6 +2225,9 @@ u32 num_heads = 0, lb_size; int i; + if (!rdev->mode_info.mode_config_initialized) + return; + radeon_update_display_priority(rdev); for (i = 0; i < rdev->num_crtc; i++) { @@ -4686,7 +4700,7 @@ /* write new base address */ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | WRITE_DATA_DST_SEL(0))); if (vm->id < 8) { @@ -4715,6 +4729,16 @@ radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm->id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); @@ -5566,7 +5590,7 @@ } if (!ASIC_IS_NODCE(rdev)) { - WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + WREG32(DAC_AUTODETECT_INT_CONTROL, 0); tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; WREG32(DC_HPD1_INT_CONTROL, tmp); @@ -5661,7 +5685,6 @@ u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; u32 thermal_int = 0; @@ -5800,16 +5823,22 @@ } if (rdev->num_crtc >= 2) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (!ASIC_IS_NODCE(rdev)) { @@ -5823,6 +5852,9 @@ WREG32(CG_THERMAL_INT, thermal_int); + /* posting read */ + RREG32(SRBM_STATUS); + return 0; } @@ -5966,6 +5998,7 @@ tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); + wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -6173,6 +6206,15 @@ break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: @@ -6222,6 +6264,10 @@ break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 146: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); @@ -6324,15 +6370,6 @@ si_mc_program(rdev); - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || - !rdev->rlc_fw || !rdev->mc_fw) { - r = si_init_microcode(rdev); - if (r) { - DRM_ERROR("Failed to load firmware!\n"); - return r; - } - } - r = si_mc_load_microcode(rdev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); @@ -6600,6 +6637,15 @@ if (r) return r; + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || + !rdev->rlc_fw || !rdev->mc_fw) { + r = si_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); @@ -6724,8 +6770,7 @@ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); if (!vclk || !dclk) { - /* keep the Bypass mode, put PLL to sleep */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* keep the Bypass mode */ return 0; } @@ -6741,8 +6786,7 @@ /* set VCO_MODE to 1 */ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); - /* toggle UPLL_SLEEP to 1 then back to 0 */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* disable sleep mode */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); /* deassert UPLL_RESET */ --- linux-3.13.0.orig/drivers/gpu/drm/radeon/si_dma.c +++ linux-3.13.0/drivers/gpu/drm/radeon/si_dma.c @@ -153,6 +153,14 @@ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, 1 << vm->id); + + /* wait for invalidate to complete */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); + radeon_ring_write(ring, VM_INVALIDATE_REQUEST); + radeon_ring_write(ring, 0xff << 16); /* retry */ + radeon_ring_write(ring, 1 << vm->id); /* mask */ + radeon_ring_write(ring, 0); /* value */ + radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ } /** --- linux-3.13.0.orig/drivers/gpu/drm/radeon/si_dpm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/si_dpm.c @@ -2396,7 +2396,7 @@ if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -2901,6 +2901,22 @@ return ret; } +struct si_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; + u32 max_sclk; + u32 max_mclk; +}; + +/* cards with dpm stability problems */ +static struct si_dpm_quirk si_dpm_quirk_list[] = { + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { 0, 0, 0, 0 }, +}; + static void si_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_ps *rps) { @@ -2911,7 +2927,22 @@ u32 mclk, sclk; u16 vddc, vddci; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; + u32 max_sclk = 0, max_mclk = 0; int i; + struct si_dpm_quirk *p = si_dpm_quirk_list; + + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + max_sclk = p->max_sclk; + max_mclk = p->max_mclk; + break; + } + ++p; + } if ((rdev->pm.dpm.new_active_crtc_count > 1) || ni_dpm_vblank_too_short(rdev)) @@ -2965,6 +2996,14 @@ if (ps->performance_levels[i].mclk > max_mclk_vddc) ps->performance_levels[i].mclk = max_mclk_vddc; } + if (max_mclk) { + if (ps->performance_levels[i].mclk > max_mclk) + ps->performance_levels[i].mclk = max_mclk; + } + if (max_sclk) { + if (ps->performance_levels[i].sclk > max_sclk) + ps->performance_levels[i].sclk = max_sclk; + } } /* XXX validate the min clocks required for display */ @@ -3591,10 +3630,9 @@ /* Setting this to false forces the performance state to low if the crtcs are disabled. * This can be a problem on PowerXpress systems or if you want to use the card - * for offscreen rendering or compute if there are no crtcs enabled. Set it to - * true for now so that performance scales even if the displays are off. + * for offscreen rendering or compute if there are no crtcs enabled. */ - si_notify_smc_display_change(rdev, true /*rdev->pm.dpm.new_active_crtc_count > 0*/); + si_notify_smc_display_change(rdev, rdev->pm.dpm.new_active_crtc_count > 0); } static void si_enable_spread_spectrum(struct radeon_device *rdev, bool enable) @@ -5414,7 +5452,7 @@ for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { if (si_pi->mc_reg_table.valid_flag & (1 << j)) { - if (i >= SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE) + if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) break; mc_reg_table->address[i].s0 = cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); @@ -6225,7 +6263,7 @@ if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && index == 0) { /* XXX disable for A0 tahiti */ - si_pi->ulv.supported = true; + si_pi->ulv.supported = false; si_pi->ulv.pl = *pl; si_pi->ulv.one_pcie_lane_in_ulv = false; si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/sid.h +++ linux-3.13.0/drivers/gpu/drm/radeon/sid.h @@ -102,8 +102,8 @@ #define CG_SPLL_FUNC_CNTL_4 0x60c #define SPLL_CNTL_MODE 0x618 -# define SPLL_REFCLK_SEL(x) ((x) << 8) -# define SPLL_REFCLK_SEL_MASK 0xFF00 +# define SPLL_REFCLK_SEL(x) ((x) << 26) +# define SPLL_REFCLK_SEL_MASK (3 << 26) #define CG_SPLL_SPREAD_SPECTRUM 0x620 #define SSEN (1 << 0) @@ -815,7 +815,7 @@ # define GRPH_PFLIP_INT_MASK (1 << 0) # define GRPH_PFLIP_INT_TYPE (1 << 8) -#define DACA_AUTODETECT_INT_CONTROL 0x66c8 +#define DAC_AUTODETECT_INT_CONTROL 0x67c8 #define DC_HPD1_INT_STATUS 0x601c #define DC_HPD2_INT_STATUS 0x6028 @@ -1586,6 +1586,23 @@ #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_COPY_DW 0x3B #define PACKET3_WAIT_REG_MEM 0x3C +#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) + /* 0 - always + * 1 - < + * 2 - <= + * 3 - == + * 4 - != + * 5 - >= + * 6 - > + */ +#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) + /* 0 - reg + * 1 - mem + */ +#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) + /* 0 - me + * 1 - pfp + */ #define PACKET3_MEM_WRITE 0x3D #define PACKET3_COPY_DATA 0x40 #define PACKET3_CP_DMA 0x41 @@ -1789,6 +1806,7 @@ #define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_SRBM_WRITE 0x9 #define DMA_PACKET_CONSTANT_FILL 0xd +#define DMA_PACKET_POLL_REG_MEM 0xe #define DMA_PACKET_NOP 0xf #endif --- linux-3.13.0.orig/drivers/gpu/drm/radeon/trinity_dpm.c +++ linux-3.13.0/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1868,7 +1868,22 @@ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) pi->at[i] = TRINITY_AT_DFLT; - pi->enable_bapm = false; + if (radeon_bapm == -1) { + /* There are stability issues reported on with + * bapm enabled when switching between AC and battery + * power. At the same time, some MSI boards hang + * if it's not enabled and dpm is enabled. Just enable + * it for MSI boards right now. + */ + if (rdev->pdev->subsystem_vendor == 0x1462) + pi->enable_bapm = true; + else + pi->enable_bapm = false; + } else if (radeon_bapm == 0) { + pi->enable_bapm = false; + } else { + pi->enable_bapm = true; + } pi->enable_nbps_policy = true; pi->enable_sclk_ds = true; pi->enable_gfx_power_gating = true; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/uvd_v1_0.c +++ linux-3.13.0/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -83,7 +83,10 @@ int r; /* raise clocks while booting up the VCPU */ - radeon_set_uvd_clocks(rdev, 53300, 40000); + if (rdev->family < CHIP_RV740) + radeon_set_uvd_clocks(rdev, 10000, 10000); + else + radeon_set_uvd_clocks(rdev, 53300, 40000); r = uvd_v1_0_start(rdev); if (r) @@ -407,7 +410,10 @@ struct radeon_fence *fence = NULL; int r; - r = radeon_set_uvd_clocks(rdev, 53300, 40000); + if (rdev->family < CHIP_RV740) + r = radeon_set_uvd_clocks(rdev, 10000, 10000); + else + r = radeon_set_uvd_clocks(rdev, 53300, 40000); if (r) { DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); return r; --- linux-3.13.0.orig/drivers/gpu/drm/radeon/uvd_v2_2.c +++ linux-3.13.0/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -153,6 +153,7 @@ chip_id = 0x01000015; break; case CHIP_PITCAIRN: + case CHIP_OLAND: chip_id = 0x01000016; break; case CHIP_ARUBA: --- linux-3.13.0.orig/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ linux-3.13.0/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -371,7 +371,6 @@ goto error; rcrtc->plane->format = format; - rcrtc->plane->pitch = crtc->fb->pitches[0]; rcrtc->plane->src_x = x; rcrtc->plane->src_y = y; --- linux-3.13.0.orig/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ linux-3.13.0/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -104,6 +104,15 @@ { struct rcar_du_group *rgrp = plane->group; unsigned int index = plane->hwindex; + u32 mwr; + + /* Memory pitch (expressed in pixels) */ + if (plane->format->planes == 2) + mwr = plane->pitch; + else + mwr = plane->pitch * 8 / plane->format->bpp; + + rcar_du_plane_write(rgrp, index, PnMWR, mwr); /* The Y position is expressed in raster line units and must be doubled * for 32bpp formats, according to the R8A7790 datasheet. No mention of @@ -133,6 +142,8 @@ { struct drm_gem_cma_object *gem; + plane->pitch = fb->pitches[0]; + gem = drm_fb_cma_get_gem_obj(fb, 0); plane->dma[0] = gem->paddr + fb->offsets[0]; @@ -209,7 +220,6 @@ struct rcar_du_group *rgrp = plane->group; u32 ddcr2 = PnDDCR2_CODE; u32 ddcr4; - u32 mwr; /* Data format * @@ -240,14 +250,6 @@ rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2); rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4); - /* Memory pitch (expressed in pixels) */ - if (plane->format->planes == 2) - mwr = plane->pitch; - else - mwr = plane->pitch * 8 / plane->format->bpp; - - rcar_du_plane_write(rgrp, index, PnMWR, mwr); - /* Destination position and size */ rcar_du_plane_write(rgrp, index, PnDSXR, plane->width); rcar_du_plane_write(rgrp, index, PnDSYR, plane->height); @@ -309,7 +311,6 @@ rplane->crtc = crtc; rplane->format = format; - rplane->pitch = fb->pitches[0]; rplane->src_x = src_x >> 16; rplane->src_y = src_y >> 16; --- linux-3.13.0.orig/drivers/gpu/drm/tegra/dc.c +++ linux-3.13.0/drivers/gpu/drm/tegra/dc.c @@ -1252,6 +1252,7 @@ { .compatible = "nvidia,tegra20-dc", }, { }, }; +MODULE_DEVICE_TABLE(of, tegra_dc_of_match); struct platform_driver tegra_dc_driver = { .driver = { --- linux-3.13.0.orig/drivers/gpu/drm/tegra/gr2d.c +++ linux-3.13.0/drivers/gpu/drm/tegra/gr2d.c @@ -129,6 +129,7 @@ { .compatible = "nvidia,tegra20-gr2d" }, { }, }; +MODULE_DEVICE_TABLE(of, gr2d_match); static const u32 gr2d_addr_regs[] = { GR2D_UA_BASE_ADDR, --- linux-3.13.0.orig/drivers/gpu/drm/tegra/gr3d.c +++ linux-3.13.0/drivers/gpu/drm/tegra/gr3d.c @@ -127,6 +127,7 @@ { .compatible = "nvidia,tegra20-gr3d" }, { } }; +MODULE_DEVICE_TABLE(of, tegra_gr3d_match); static const u32 gr3d_addr_regs[] = { GR3D_IDX_ATTRIBUTE( 0), --- linux-3.13.0.orig/drivers/gpu/drm/tegra/hdmi.c +++ linux-3.13.0/drivers/gpu/drm/tegra/hdmi.c @@ -1309,6 +1309,7 @@ { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config }, { }, }; +MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match); static int tegra_hdmi_probe(struct platform_device *pdev) { --- linux-3.13.0.orig/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ linux-3.13.0/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -122,6 +122,7 @@ struct tilcdc_drm_private *priv = dev->dev_private; struct tilcdc_module *mod, *cur; + drm_fbdev_cma_fini(priv->fbdev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); @@ -628,10 +629,10 @@ static void __exit tilcdc_drm_fini(void) { DBG("fini"); - tilcdc_tfp410_fini(); - tilcdc_slave_fini(); - tilcdc_panel_fini(); platform_driver_unregister(&tilcdc_platform_driver); + tilcdc_panel_fini(); + tilcdc_slave_fini(); + tilcdc_tfp410_fini(); } late_initcall(tilcdc_drm_init); --- linux-3.13.0.orig/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ linux-3.13.0/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -151,6 +151,7 @@ static void panel_connector_destroy(struct drm_connector *connector) { struct panel_connector *panel_connector = to_panel_connector(connector); + drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(panel_connector); } @@ -285,10 +286,8 @@ { struct panel_module *panel_mod = to_panel_module(mod); - if (panel_mod->timings) { + if (panel_mod->timings) display_timings_release(panel_mod->timings); - kfree(panel_mod->timings); - } tilcdc_module_cleanup(mod); kfree(panel_mod->info); --- linux-3.13.0.orig/drivers/gpu/drm/tilcdc/tilcdc_slave.c +++ linux-3.13.0/drivers/gpu/drm/tilcdc/tilcdc_slave.c @@ -166,6 +166,7 @@ static void slave_connector_destroy(struct drm_connector *connector) { struct slave_connector *slave_connector = to_slave_connector(connector); + drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(slave_connector); } --- linux-3.13.0.orig/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ linux-3.13.0/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -167,6 +167,7 @@ static void tfp410_connector_destroy(struct drm_connector *connector) { struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); + drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(tfp410_connector); } --- linux-3.13.0.orig/drivers/gpu/drm/ttm/ttm_bo.c +++ linux-3.13.0/drivers/gpu/drm/ttm/ttm_bo.c @@ -351,9 +351,11 @@ moved: if (bo->evicted) { - ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); - if (ret) - pr_err("Can not flush read caches\n"); + if (bdev->driver->invalidate_caches) { + ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); + if (ret) + pr_err("Can not flush read caches\n"); + } bo->evicted = false; } --- linux-3.13.0.orig/drivers/gpu/drm/ttm/ttm_object.c +++ linux-3.13.0/drivers/gpu/drm/ttm/ttm_object.c @@ -68,7 +68,7 @@ struct ttm_object_file { struct ttm_object_device *tdev; - rwlock_t lock; + spinlock_t lock; struct list_head ref_list; struct drm_open_hash ref_hash[TTM_REF_NUM]; struct kref refcount; @@ -118,6 +118,7 @@ */ struct ttm_ref_object { + struct rcu_head rcu_head; struct drm_hash_item hash; struct list_head head; struct kref kref; @@ -210,10 +211,9 @@ * call_rcu() or ttm_base_object_kfree(). */ - if (base->refcount_release) { - ttm_object_file_unref(&base->tfile); + ttm_object_file_unref(&base->tfile); + if (base->refcount_release) base->refcount_release(&base); - } } void ttm_base_object_unref(struct ttm_base_object **p_base) @@ -229,32 +229,46 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, uint32_t key) { - struct ttm_object_device *tdev = tfile->tdev; - struct ttm_base_object *uninitialized_var(base); + struct ttm_base_object *base = NULL; struct drm_hash_item *hash; + struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; int ret; rcu_read_lock(); - ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); + ret = drm_ht_find_item_rcu(ht, key, &hash); if (likely(ret == 0)) { - base = drm_hash_entry(hash, struct ttm_base_object, hash); - ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; + base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; + if (!kref_get_unless_zero(&base->refcount)) + base = NULL; } rcu_read_unlock(); - if (unlikely(ret != 0)) - return NULL; + return base; +} +EXPORT_SYMBOL(ttm_base_object_lookup); - if (tfile != base->tfile && !base->shareable) { - pr_err("Attempted access of non-shareable object\n"); - ttm_base_object_unref(&base); - return NULL; +struct ttm_base_object * +ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) +{ + struct ttm_base_object *base = NULL; + struct drm_hash_item *hash; + struct drm_open_hash *ht = &tdev->object_hash; + int ret; + + rcu_read_lock(); + ret = drm_ht_find_item_rcu(ht, key, &hash); + + if (likely(ret == 0)) { + base = drm_hash_entry(hash, struct ttm_base_object, hash); + if (!kref_get_unless_zero(&base->refcount)) + base = NULL; } + rcu_read_unlock(); return base; } -EXPORT_SYMBOL(ttm_base_object_lookup); +EXPORT_SYMBOL(ttm_base_object_lookup_for_ref); int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, @@ -266,21 +280,25 @@ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; int ret = -EINVAL; + if (base->tfile != tfile && !base->shareable) + return -EPERM; + if (existed != NULL) *existed = true; while (ret == -EINVAL) { - read_lock(&tfile->lock); - ret = drm_ht_find_item(ht, base->hash.key, &hash); + rcu_read_lock(); + ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); if (ret == 0) { ref = drm_hash_entry(hash, struct ttm_ref_object, hash); - kref_get(&ref->kref); - read_unlock(&tfile->lock); - break; + if (kref_get_unless_zero(&ref->kref)) { + rcu_read_unlock(); + break; + } } - read_unlock(&tfile->lock); + rcu_read_unlock(); ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false); if (unlikely(ret != 0)) @@ -297,19 +315,19 @@ ref->ref_type = ref_type; kref_init(&ref->kref); - write_lock(&tfile->lock); - ret = drm_ht_insert_item(ht, &ref->hash); + spin_lock(&tfile->lock); + ret = drm_ht_insert_item_rcu(ht, &ref->hash); if (likely(ret == 0)) { list_add_tail(&ref->head, &tfile->ref_list); kref_get(&base->refcount); - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); if (existed != NULL) *existed = false; break; } - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); BUG_ON(ret != -EINVAL); ttm_mem_global_free(mem_glob, sizeof(*ref)); @@ -330,17 +348,17 @@ struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; ht = &tfile->ref_hash[ref->ref_type]; - (void)drm_ht_remove_item(ht, &ref->hash); + (void)drm_ht_remove_item_rcu(ht, &ref->hash); list_del(&ref->head); - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) base->ref_obj_release(base, ref->ref_type); ttm_base_object_unref(&ref->obj); ttm_mem_global_free(mem_glob, sizeof(*ref)); - kfree(ref); - write_lock(&tfile->lock); + kfree_rcu(ref, rcu_head); + spin_lock(&tfile->lock); } int ttm_ref_object_base_unref(struct ttm_object_file *tfile, @@ -351,15 +369,15 @@ struct drm_hash_item *hash; int ret; - write_lock(&tfile->lock); + spin_lock(&tfile->lock); ret = drm_ht_find_item(ht, key, &hash); if (unlikely(ret != 0)) { - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); return -EINVAL; } ref = drm_hash_entry(hash, struct ttm_ref_object, hash); kref_put(&ref->kref, ttm_ref_object_release); - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); return 0; } EXPORT_SYMBOL(ttm_ref_object_base_unref); @@ -372,7 +390,7 @@ struct ttm_object_file *tfile = *p_tfile; *p_tfile = NULL; - write_lock(&tfile->lock); + spin_lock(&tfile->lock); /* * Since we release the lock within the loop, we have to @@ -388,7 +406,7 @@ for (i = 0; i < TTM_REF_NUM; ++i) drm_ht_remove(&tfile->ref_hash[i]); - write_unlock(&tfile->lock); + spin_unlock(&tfile->lock); ttm_object_file_unref(&tfile); } EXPORT_SYMBOL(ttm_object_file_release); @@ -404,7 +422,7 @@ if (unlikely(tfile == NULL)) return NULL; - rwlock_init(&tfile->lock); + spin_lock_init(&tfile->lock); tfile->tdev = tdev; kref_init(&tfile->refcount); INIT_LIST_HEAD(&tfile->ref_list); --- linux-3.13.0.orig/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ linux-3.13.0/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -297,9 +297,12 @@ * * @pool: to free the pages from * @free_all: If set to true will free all pages in pool + * @use_static: Safe to use static buffer **/ -static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) +static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, + bool use_static) { + static struct page *static_buf[NUM_PAGES_TO_ALLOC]; unsigned long irq_flags; struct page *p; struct page **pages_to_free; @@ -309,8 +312,11 @@ if (NUM_PAGES_TO_ALLOC < nr_free) npages_to_free = NUM_PAGES_TO_ALLOC; - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), - GFP_KERNEL); + if (use_static) + pages_to_free = static_buf; + else + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), + GFP_KERNEL); if (!pages_to_free) { pr_err("Failed to allocate memory for pool free operation\n"); return 0; @@ -373,7 +379,8 @@ if (freed_pages) ttm_pages_put(pages_to_free, freed_pages); out: - kfree(pages_to_free); + if (pages_to_free != static_buf) + kfree(pages_to_free); return nr_free; } @@ -382,32 +389,33 @@ * * XXX: (dchinner) Deadlock warning! * - * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means - * this can deadlock when called a sc->gfp_mask that is not equal to - * GFP_KERNEL. - * * This code is crying out for a shrinker per pool.... */ static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static DEFINE_MUTEX(lock); + static unsigned start_pool; unsigned i; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; - pool_offset = pool_offset % NUM_POOLS; + if (!mutex_trylock(&lock)) + return SHRINK_STOP; + pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; if (shrink_pages == 0) break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; - shrink_pages = ttm_page_pool_free(pool, nr_free); + /* OK to use static buffer since global mutex is held. */ + shrink_pages = ttm_page_pool_free(pool, nr_free, true); freed += nr_free - shrink_pages; } + mutex_unlock(&lock); return freed; } @@ -706,7 +714,7 @@ } spin_unlock_irqrestore(&pool->lock, irq_flags); if (npages) - ttm_page_pool_free(pool, npages); + ttm_page_pool_free(pool, npages, false); } /* @@ -845,8 +853,9 @@ pr_info("Finalizing pool allocator\n"); ttm_pool_mm_shrink_fini(_manager); + /* OK to use static buffer since global mutex is no longer used. */ for (i = 0; i < NUM_POOLS; ++i) - ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); + ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); kobject_put(&_manager->kobj); _manager = NULL; --- linux-3.13.0.orig/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ linux-3.13.0/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -411,9 +411,12 @@ * * @pool: to free the pages from * @nr_free: If set to true will free all pages in pool + * @use_static: Safe to use static buffer **/ -static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) +static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, + bool use_static) { + static struct page *static_buf[NUM_PAGES_TO_ALLOC]; unsigned long irq_flags; struct dma_page *dma_p, *tmp; struct page **pages_to_free; @@ -430,8 +433,11 @@ npages_to_free, nr_free); } #endif - pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), - GFP_KERNEL); + if (use_static) + pages_to_free = static_buf; + else + pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), + GFP_KERNEL); if (!pages_to_free) { pr_err("%s: Failed to allocate memory for pool free operation\n", @@ -501,7 +507,8 @@ if (freed_pages) ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); out: - kfree(pages_to_free); + if (pages_to_free != static_buf) + kfree(pages_to_free); return nr_free; } @@ -530,7 +537,8 @@ if (pool->type != type) continue; /* Takes a spinlock.. */ - ttm_dma_page_pool_free(pool, FREE_ALL_PAGES); + /* OK to use static buffer since global mutex is held. */ + ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); /* This code path is called after _all_ references to the * struct device has been dropped - so nobody should be @@ -983,7 +991,7 @@ /* shrink pool if necessary (only on !is_cached pools)*/ if (npages) - ttm_dma_page_pool_free(pool, npages); + ttm_dma_page_pool_free(pool, npages, false); ttm->state = tt_unpopulated; } EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); @@ -993,20 +1001,15 @@ * * XXX: (dchinner) Deadlock warning! * - * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention - * needs to be paid to sc->gfp_mask to determine if this can be done or not. - * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really - * bad. - * * I'm getting sadder as I hear more pathetical whimpers about needing per-pool * shrinkers */ static unsigned long ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static unsigned start_pool; unsigned idx = 0; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; unsigned shrink_pages = sc->nr_to_scan; struct device_pools *p; unsigned long freed = 0; @@ -1014,8 +1017,11 @@ if (list_empty(&_manager->pools)) return SHRINK_STOP; - mutex_lock(&_manager->lock); - pool_offset = pool_offset % _manager->npools; + if (!mutex_trylock(&_manager->lock)) + return SHRINK_STOP; + if (!_manager->npools) + goto out; + pool_offset = ++start_pool % _manager->npools; list_for_each_entry(p, &_manager->pools, pools) { unsigned nr_free; @@ -1027,13 +1033,15 @@ if (++idx < pool_offset) continue; nr_free = shrink_pages; - shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); + /* OK to use static buffer since global mutex is held. */ + shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); freed += nr_free - shrink_pages; pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", p->pool->dev_name, p->pool->name, current->pid, nr_free, shrink_pages); } +out: mutex_unlock(&_manager->lock); return freed; } @@ -1044,7 +1052,8 @@ struct device_pools *p; unsigned long count = 0; - mutex_lock(&_manager->lock); + if (!mutex_trylock(&_manager->lock)) + return 0; list_for_each_entry(p, &_manager->pools, pools) count += p->pool->npages_free; mutex_unlock(&_manager->lock); --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/Makefile +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/Makefile @@ -6,6 +6,6 @@ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ - vmwgfx_surface.o vmwgfx_prime.o + vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/svga3d_reg.h @@ -34,6 +34,8 @@ #include "svga_reg.h" +typedef uint32 PPN; +typedef __le64 PPN64; /* * 3D Hardware Version @@ -71,6 +73,9 @@ #define SVGA3D_MAX_CONTEXT_IDS 256 #define SVGA3D_MAX_SURFACE_IDS (32 * 1024) +#define SVGA3D_NUM_TEXTURE_UNITS 32 +#define SVGA3D_NUM_LIGHTS 8 + /* * Surface formats. * @@ -81,6 +86,7 @@ */ typedef enum SVGA3dSurfaceFormat { + SVGA3D_FORMAT_MIN = 0, SVGA3D_FORMAT_INVALID = 0, SVGA3D_X8R8G8B8 = 1, @@ -134,12 +140,6 @@ SVGA3D_RG_S10E5 = 35, SVGA3D_RG_S23E8 = 36, - /* - * Any surface can be used as a buffer object, but SVGA3D_BUFFER is - * the most efficient format to use when creating new surfaces - * expressly for index or vertex data. - */ - SVGA3D_BUFFER = 37, SVGA3D_Z_D24X8 = 38, @@ -159,15 +159,109 @@ /* Video format with alpha */ SVGA3D_AYUV = 45, + SVGA3D_R32G32B32A32_TYPELESS = 46, + SVGA3D_R32G32B32A32_FLOAT = 25, + SVGA3D_R32G32B32A32_UINT = 47, + SVGA3D_R32G32B32A32_SINT = 48, + SVGA3D_R32G32B32_TYPELESS = 49, + SVGA3D_R32G32B32_FLOAT = 50, + SVGA3D_R32G32B32_UINT = 51, + SVGA3D_R32G32B32_SINT = 52, + SVGA3D_R16G16B16A16_TYPELESS = 53, + SVGA3D_R16G16B16A16_FLOAT = 24, + SVGA3D_R16G16B16A16_UNORM = 41, + SVGA3D_R16G16B16A16_UINT = 54, + SVGA3D_R16G16B16A16_SNORM = 55, + SVGA3D_R16G16B16A16_SINT = 56, + SVGA3D_R32G32_TYPELESS = 57, + SVGA3D_R32G32_FLOAT = 36, + SVGA3D_R32G32_UINT = 58, + SVGA3D_R32G32_SINT = 59, + SVGA3D_R32G8X24_TYPELESS = 60, + SVGA3D_D32_FLOAT_S8X24_UINT = 61, + SVGA3D_R32_FLOAT_X8X24_TYPELESS = 62, + SVGA3D_X32_TYPELESS_G8X24_UINT = 63, + SVGA3D_R10G10B10A2_TYPELESS = 64, + SVGA3D_R10G10B10A2_UNORM = 26, + SVGA3D_R10G10B10A2_UINT = 65, + SVGA3D_R11G11B10_FLOAT = 66, + SVGA3D_R8G8B8A8_TYPELESS = 67, + SVGA3D_R8G8B8A8_UNORM = 68, + SVGA3D_R8G8B8A8_UNORM_SRGB = 69, + SVGA3D_R8G8B8A8_UINT = 70, + SVGA3D_R8G8B8A8_SNORM = 28, + SVGA3D_R8G8B8A8_SINT = 71, + SVGA3D_R16G16_TYPELESS = 72, + SVGA3D_R16G16_FLOAT = 35, + SVGA3D_R16G16_UNORM = 40, + SVGA3D_R16G16_UINT = 73, + SVGA3D_R16G16_SNORM = 39, + SVGA3D_R16G16_SINT = 74, + SVGA3D_R32_TYPELESS = 75, + SVGA3D_D32_FLOAT = 76, + SVGA3D_R32_FLOAT = 34, + SVGA3D_R32_UINT = 77, + SVGA3D_R32_SINT = 78, + SVGA3D_R24G8_TYPELESS = 79, + SVGA3D_D24_UNORM_S8_UINT = 80, + SVGA3D_R24_UNORM_X8_TYPELESS = 81, + SVGA3D_X24_TYPELESS_G8_UINT = 82, + SVGA3D_R8G8_TYPELESS = 83, + SVGA3D_R8G8_UNORM = 84, + SVGA3D_R8G8_UINT = 85, + SVGA3D_R8G8_SNORM = 27, + SVGA3D_R8G8_SINT = 86, + SVGA3D_R16_TYPELESS = 87, + SVGA3D_R16_FLOAT = 33, + SVGA3D_D16_UNORM = 8, + SVGA3D_R16_UNORM = 88, + SVGA3D_R16_UINT = 89, + SVGA3D_R16_SNORM = 90, + SVGA3D_R16_SINT = 91, + SVGA3D_R8_TYPELESS = 92, + SVGA3D_R8_UNORM = 93, + SVGA3D_R8_UINT = 94, + SVGA3D_R8_SNORM = 95, + SVGA3D_R8_SINT = 96, + SVGA3D_A8_UNORM = 32, + SVGA3D_R1_UNORM = 97, + SVGA3D_R9G9B9E5_SHAREDEXP = 98, + SVGA3D_R8G8_B8G8_UNORM = 99, + SVGA3D_G8R8_G8B8_UNORM = 100, + SVGA3D_BC1_TYPELESS = 101, + SVGA3D_BC1_UNORM = 15, + SVGA3D_BC1_UNORM_SRGB = 102, + SVGA3D_BC2_TYPELESS = 103, + SVGA3D_BC2_UNORM = 17, + SVGA3D_BC2_UNORM_SRGB = 104, + SVGA3D_BC3_TYPELESS = 105, + SVGA3D_BC3_UNORM = 19, + SVGA3D_BC3_UNORM_SRGB = 106, + SVGA3D_BC4_TYPELESS = 107, SVGA3D_BC4_UNORM = 108, + SVGA3D_BC4_SNORM = 109, + SVGA3D_BC5_TYPELESS = 110, SVGA3D_BC5_UNORM = 111, + SVGA3D_BC5_SNORM = 112, + SVGA3D_B5G6R5_UNORM = 3, + SVGA3D_B5G5R5A1_UNORM = 5, + SVGA3D_B8G8R8A8_UNORM = 2, + SVGA3D_B8G8R8X8_UNORM = 1, + SVGA3D_R10G10B10_XR_BIAS_A2_UNORM = 113, + SVGA3D_B8G8R8A8_TYPELESS = 114, + SVGA3D_B8G8R8A8_UNORM_SRGB = 115, + SVGA3D_B8G8R8X8_TYPELESS = 116, + SVGA3D_B8G8R8X8_UNORM_SRGB = 117, /* Advanced D3D9 depth formats. */ SVGA3D_Z_DF16 = 118, SVGA3D_Z_DF24 = 119, SVGA3D_Z_D24S8_INT = 120, - SVGA3D_FORMAT_MAX + /* Planar video formats. */ + SVGA3D_YV12 = 121, + + SVGA3D_FORMAT_MAX = 122, } SVGA3dSurfaceFormat; typedef uint32 SVGA3dColor; /* a, r, g, b */ @@ -957,15 +1051,21 @@ } SVGA3dCubeFace; typedef enum { + SVGA3D_SHADERTYPE_INVALID = 0, + SVGA3D_SHADERTYPE_MIN = 1, SVGA3D_SHADERTYPE_VS = 1, SVGA3D_SHADERTYPE_PS = 2, - SVGA3D_SHADERTYPE_MAX + SVGA3D_SHADERTYPE_MAX = 3, + SVGA3D_SHADERTYPE_GS = 3, } SVGA3dShaderType; +#define SVGA3D_NUM_SHADERTYPE (SVGA3D_SHADERTYPE_MAX - SVGA3D_SHADERTYPE_MIN) + typedef enum { SVGA3D_CONST_TYPE_FLOAT = 0, SVGA3D_CONST_TYPE_INT = 1, SVGA3D_CONST_TYPE_BOOL = 2, + SVGA3D_CONST_TYPE_MAX } SVGA3dShaderConstType; #define SVGA3D_MAX_SURFACE_FACES 6 @@ -1056,9 +1156,84 @@ #define SVGA_3D_CMD_GENERATE_MIPMAPS SVGA_3D_CMD_BASE + 31 #define SVGA_3D_CMD_ACTIVATE_SURFACE SVGA_3D_CMD_BASE + 40 #define SVGA_3D_CMD_DEACTIVATE_SURFACE SVGA_3D_CMD_BASE + 41 -#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 42 +#define SVGA_3D_CMD_SCREEN_DMA 1082 +#define SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE 1083 +#define SVGA_3D_CMD_OPEN_CONTEXT_SURFACE 1084 + +#define SVGA_3D_CMD_LOGICOPS_BITBLT 1085 +#define SVGA_3D_CMD_LOGICOPS_TRANSBLT 1086 +#define SVGA_3D_CMD_LOGICOPS_STRETCHBLT 1087 +#define SVGA_3D_CMD_LOGICOPS_COLORFILL 1088 +#define SVGA_3D_CMD_LOGICOPS_ALPHABLEND 1089 +#define SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND 1090 + +#define SVGA_3D_CMD_SET_OTABLE_BASE 1091 +#define SVGA_3D_CMD_READBACK_OTABLE 1092 + +#define SVGA_3D_CMD_DEFINE_GB_MOB 1093 +#define SVGA_3D_CMD_DESTROY_GB_MOB 1094 +#define SVGA_3D_CMD_REDEFINE_GB_MOB 1095 +#define SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING 1096 + +#define SVGA_3D_CMD_DEFINE_GB_SURFACE 1097 +#define SVGA_3D_CMD_DESTROY_GB_SURFACE 1098 +#define SVGA_3D_CMD_BIND_GB_SURFACE 1099 +#define SVGA_3D_CMD_COND_BIND_GB_SURFACE 1100 +#define SVGA_3D_CMD_UPDATE_GB_IMAGE 1101 +#define SVGA_3D_CMD_UPDATE_GB_SURFACE 1102 +#define SVGA_3D_CMD_READBACK_GB_IMAGE 1103 +#define SVGA_3D_CMD_READBACK_GB_SURFACE 1104 +#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE 1105 +#define SVGA_3D_CMD_INVALIDATE_GB_SURFACE 1106 + +#define SVGA_3D_CMD_DEFINE_GB_CONTEXT 1107 +#define SVGA_3D_CMD_DESTROY_GB_CONTEXT 1108 +#define SVGA_3D_CMD_BIND_GB_CONTEXT 1109 +#define SVGA_3D_CMD_READBACK_GB_CONTEXT 1110 +#define SVGA_3D_CMD_INVALIDATE_GB_CONTEXT 1111 + +#define SVGA_3D_CMD_DEFINE_GB_SHADER 1112 +#define SVGA_3D_CMD_DESTROY_GB_SHADER 1113 +#define SVGA_3D_CMD_BIND_GB_SHADER 1114 + +#define SVGA_3D_CMD_SET_OTABLE_BASE64 1115 + +#define SVGA_3D_CMD_BEGIN_GB_QUERY 1116 +#define SVGA_3D_CMD_END_GB_QUERY 1117 +#define SVGA_3D_CMD_WAIT_FOR_GB_QUERY 1118 + +#define SVGA_3D_CMD_NOP 1119 + +#define SVGA_3D_CMD_ENABLE_GART 1120 +#define SVGA_3D_CMD_DISABLE_GART 1121 +#define SVGA_3D_CMD_MAP_MOB_INTO_GART 1122 +#define SVGA_3D_CMD_UNMAP_GART_RANGE 1123 + +#define SVGA_3D_CMD_DEFINE_GB_SCREENTARGET 1124 +#define SVGA_3D_CMD_DESTROY_GB_SCREENTARGET 1125 +#define SVGA_3D_CMD_BIND_GB_SCREENTARGET 1126 +#define SVGA_3D_CMD_UPDATE_GB_SCREENTARGET 1127 + +#define SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL 1128 +#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 + +#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 +#define SVGA_3D_CMD_GB_SCREEN_DMA 1131 +#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 +#define SVGA_3D_CMD_GB_MOB_FENCE 1133 +#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 +#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 +#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 +#define SVGA_3D_CMD_NOP_ERROR 1137 + +#define SVGA_3D_CMD_RESERVED1 1138 +#define SVGA_3D_CMD_RESERVED2 1139 +#define SVGA_3D_CMD_RESERVED3 1140 +#define SVGA_3D_CMD_RESERVED4 1141 +#define SVGA_3D_CMD_RESERVED5 1142 -#define SVGA_3D_CMD_FUTURE_MAX 2000 +#define SVGA_3D_CMD_MAX 1142 +#define SVGA_3D_CMD_FUTURE_MAX 3000 /* * Common substructures used in multiple FIFO commands: @@ -1750,6 +1925,507 @@ /* + * Guest-backed surface definitions. + */ + +typedef uint32 SVGAMobId; + +typedef enum SVGAMobFormat { + SVGA3D_MOBFMT_INVALID = SVGA3D_INVALID_ID, + SVGA3D_MOBFMT_PTDEPTH_0 = 0, + SVGA3D_MOBFMT_PTDEPTH_1 = 1, + SVGA3D_MOBFMT_PTDEPTH_2 = 2, + SVGA3D_MOBFMT_RANGE = 3, + SVGA3D_MOBFMT_PTDEPTH64_0 = 4, + SVGA3D_MOBFMT_PTDEPTH64_1 = 5, + SVGA3D_MOBFMT_PTDEPTH64_2 = 6, + SVGA3D_MOBFMT_MAX, +} SVGAMobFormat; + +/* + * Sizes of opaque types. + */ + +#define SVGA3D_OTABLE_MOB_ENTRY_SIZE 16 +#define SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE 8 +#define SVGA3D_OTABLE_SURFACE_ENTRY_SIZE 64 +#define SVGA3D_OTABLE_SHADER_ENTRY_SIZE 16 +#define SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE 64 +#define SVGA3D_CONTEXT_DATA_SIZE 16384 + +/* + * SVGA3dCmdSetOTableBase -- + * + * This command allows the guest to specify the base PPN of the + * specified object table. + */ + +typedef enum { + SVGA_OTABLE_MOB = 0, + SVGA_OTABLE_MIN = 0, + SVGA_OTABLE_SURFACE = 1, + SVGA_OTABLE_CONTEXT = 2, + SVGA_OTABLE_SHADER = 3, + SVGA_OTABLE_SCREEN_TARGET = 4, + SVGA_OTABLE_DX9_MAX = 5, + SVGA_OTABLE_MAX = 8 +} SVGAOTableType; + +typedef +struct { + SVGAOTableType type; + PPN baseAddress; + uint32 sizeInBytes; + uint32 validSizeInBytes; + SVGAMobFormat ptDepth; +} __packed +SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ + +typedef +struct { + SVGAOTableType type; + PPN64 baseAddress; + uint32 sizeInBytes; + uint32 validSizeInBytes; + SVGAMobFormat ptDepth; +} __packed +SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ + +typedef +struct { + SVGAOTableType type; +} __packed +SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ + +/* + * Define a memory object (Mob) in the OTable. + */ + +typedef +struct SVGA3dCmdDefineGBMob { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN base; + uint32 sizeInBytes; +} __packed +SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ + + +/* + * Destroys an object in the OTable. + */ + +typedef +struct SVGA3dCmdDestroyGBMob { + SVGAMobId mobid; +} __packed +SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ + +/* + * Redefine an object in the OTable. + */ + +typedef +struct SVGA3dCmdRedefineGBMob { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN base; + uint32 sizeInBytes; +} __packed +SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ + +/* + * Define a memory object (Mob) in the OTable with a PPN64 base. + */ + +typedef +struct SVGA3dCmdDefineGBMob64 { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN64 base; + uint32 sizeInBytes; +} __packed +SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ + +/* + * Redefine an object in the OTable with PPN64 base. + */ + +typedef +struct SVGA3dCmdRedefineGBMob64 { + SVGAMobId mobid; + SVGAMobFormat ptDepth; + PPN64 base; + uint32 sizeInBytes; +} __packed +SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ + +/* + * Notification that the page tables have been modified. + */ + +typedef +struct SVGA3dCmdUpdateGBMobMapping { + SVGAMobId mobid; +} __packed +SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ + +/* + * Define a guest-backed surface. + */ + +typedef +struct SVGA3dCmdDefineGBSurface { + uint32 sid; + SVGA3dSurfaceFlags surfaceFlags; + SVGA3dSurfaceFormat format; + uint32 numMipLevels; + uint32 multisampleCount; + SVGA3dTextureFilter autogenFilter; + SVGA3dSize size; +} __packed +SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ + +/* + * Destroy a guest-backed surface. + */ + +typedef +struct SVGA3dCmdDestroyGBSurface { + uint32 sid; +} __packed +SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ + +/* + * Bind a guest-backed surface to an object. + */ + +typedef +struct SVGA3dCmdBindGBSurface { + uint32 sid; + SVGAMobId mobid; +} __packed +SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ + +/* + * Conditionally bind a mob to a guest backed surface if testMobid + * matches the currently bound mob. Optionally issue a readback on + * the surface while it is still bound to the old mobid if the mobid + * is changed by this command. + */ + +#define SVGA3D_COND_BIND_GB_SURFACE_FLAG_READBACK (1 << 0) + +typedef +struct{ + uint32 sid; + SVGAMobId testMobid; + SVGAMobId mobid; + uint32 flags; +} __packed +SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ + +/* + * Update an image in a guest-backed surface. + * (Inform the device that the guest-contents have been updated.) + */ + +typedef +struct SVGA3dCmdUpdateGBImage { + SVGA3dSurfaceImageId image; + SVGA3dBox box; +} __packed +SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ + +/* + * Update an entire guest-backed surface. + * (Inform the device that the guest-contents have been updated.) + */ + +typedef +struct SVGA3dCmdUpdateGBSurface { + uint32 sid; +} __packed +SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ + +/* + * Readback an image in a guest-backed surface. + * (Request the device to flush the dirty contents into the guest.) + */ + +typedef +struct SVGA3dCmdReadbackGBImage { + SVGA3dSurfaceImageId image; +} __packed +SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ + +/* + * Readback an entire guest-backed surface. + * (Request the device to flush the dirty contents into the guest.) + */ + +typedef +struct SVGA3dCmdReadbackGBSurface { + uint32 sid; +} __packed +SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ + +/* + * Readback a sub rect of an image in a guest-backed surface. After + * issuing this command the driver is required to issue an update call + * of the same region before issuing any other commands that reference + * this surface or rendering is not guaranteed. + */ + +typedef +struct SVGA3dCmdReadbackGBImagePartial { + SVGA3dSurfaceImageId image; + SVGA3dBox box; + uint32 invertBox; +} __packed +SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ + +/* + * Invalidate an image in a guest-backed surface. + * (Notify the device that the contents can be lost.) + */ + +typedef +struct SVGA3dCmdInvalidateGBImage { + SVGA3dSurfaceImageId image; +} __packed +SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ + +/* + * Invalidate an entire guest-backed surface. + * (Notify the device that the contents if all images can be lost.) + */ + +typedef +struct SVGA3dCmdInvalidateGBSurface { + uint32 sid; +} __packed +SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ + +/* + * Invalidate a sub rect of an image in a guest-backed surface. After + * issuing this command the driver is required to issue an update call + * of the same region before issuing any other commands that reference + * this surface or rendering is not guaranteed. + */ + +typedef +struct SVGA3dCmdInvalidateGBImagePartial { + SVGA3dSurfaceImageId image; + SVGA3dBox box; + uint32 invertBox; +} __packed +SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ + +/* + * Define a guest-backed context. + */ + +typedef +struct SVGA3dCmdDefineGBContext { + uint32 cid; +} __packed +SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ + +/* + * Destroy a guest-backed context. + */ + +typedef +struct SVGA3dCmdDestroyGBContext { + uint32 cid; +} __packed +SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ + +/* + * Bind a guest-backed context. + * + * validContents should be set to 0 for new contexts, + * and 1 if this is an old context which is getting paged + * back on to the device. + * + * For new contexts, it is recommended that the driver + * issue commands to initialize all interesting state + * prior to rendering. + */ + +typedef +struct SVGA3dCmdBindGBContext { + uint32 cid; + SVGAMobId mobid; + uint32 validContents; +} __packed +SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ + +/* + * Readback a guest-backed context. + * (Request that the device flush the contents back into guest memory.) + */ + +typedef +struct SVGA3dCmdReadbackGBContext { + uint32 cid; +} __packed +SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ + +/* + * Invalidate a guest-backed context. + */ +typedef +struct SVGA3dCmdInvalidateGBContext { + uint32 cid; +} __packed +SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ + +/* + * Define a guest-backed shader. + */ + +typedef +struct SVGA3dCmdDefineGBShader { + uint32 shid; + SVGA3dShaderType type; + uint32 sizeInBytes; +} __packed +SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ + +/* + * Bind a guest-backed shader. + */ + +typedef struct SVGA3dCmdBindGBShader { + uint32 shid; + SVGAMobId mobid; + uint32 offsetInBytes; +} __packed +SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ + +/* + * Destroy a guest-backed shader. + */ + +typedef struct SVGA3dCmdDestroyGBShader { + uint32 shid; +} __packed +SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ + +typedef +struct { + uint32 cid; + uint32 regStart; + SVGA3dShaderType shaderType; + SVGA3dShaderConstType constType; + + /* + * Followed by a variable number of shader constants. + * + * Note that FLOAT and INT constants are 4-dwords in length, while + * BOOL constants are 1-dword in length. + */ +} __packed +SVGA3dCmdSetGBShaderConstInline; +/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ + +typedef +struct { + uint32 cid; + SVGA3dQueryType type; +} __packed +SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ + +typedef +struct { + uint32 cid; + SVGA3dQueryType type; + SVGAMobId mobid; + uint32 offset; +} __packed +SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ + + +/* + * SVGA_3D_CMD_WAIT_FOR_GB_QUERY -- + * + * The semantics of this command are identical to the + * SVGA_3D_CMD_WAIT_FOR_QUERY except that the results are written + * to a Mob instead of a GMR. + */ + +typedef +struct { + uint32 cid; + SVGA3dQueryType type; + SVGAMobId mobid; + uint32 offset; +} __packed +SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ + +typedef +struct { + SVGAMobId mobid; + uint32 fbOffset; + uint32 initalized; +} __packed +SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ + +typedef +struct { + SVGAMobId mobid; + uint32 gartOffset; +} __packed +SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ + + +typedef +struct { + uint32 gartOffset; + uint32 numPages; +} __packed +SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ + + +/* + * Screen Targets + */ +#define SVGA_STFLAG_PRIMARY (1 << 0) + +typedef +struct { + uint32 stid; + uint32 width; + uint32 height; + int32 xRoot; + int32 yRoot; + uint32 flags; +} __packed +SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ + +typedef +struct { + uint32 stid; +} __packed +SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ + +typedef +struct { + uint32 stid; + SVGA3dSurfaceImageId image; +} __packed +SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ + +typedef +struct { + uint32 stid; + SVGA3dBox box; +} __packed +SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ + +/* * Capability query index. * * Notes: @@ -1879,10 +2555,41 @@ SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM = 83, /* - * Don't add new caps into the previous section; the values in this - * enumeration must not change. You can put new values right before - * SVGA3D_DEVCAP_MAX. + * Deprecated. + */ + SVGA3D_DEVCAP_VGPU10 = 84, + + /* + * This contains several SVGA_3D_CAPS_VIDEO_DECODE elements + * ored together, one for every type of video decoding supported. */ + SVGA3D_DEVCAP_VIDEO_DECODE = 85, + + /* + * This contains several SVGA_3D_CAPS_VIDEO_PROCESS elements + * ored together, one for every type of video processing supported. + */ + SVGA3D_DEVCAP_VIDEO_PROCESS = 86, + + SVGA3D_DEVCAP_LINE_AA = 87, /* boolean */ + SVGA3D_DEVCAP_LINE_STIPPLE = 88, /* boolean */ + SVGA3D_DEVCAP_MAX_LINE_WIDTH = 89, /* float */ + SVGA3D_DEVCAP_MAX_AA_LINE_WIDTH = 90, /* float */ + + SVGA3D_DEVCAP_SURFACEFMT_YV12 = 91, + + /* + * Does the host support the SVGA logic ops commands? + */ + SVGA3D_DEVCAP_LOGICOPS = 92, + + /* + * What support does the host have for screen targets? + * + * See the SVGA3D_SCREENTARGET_CAP bits below. + */ + SVGA3D_DEVCAP_SCREENTARGETS = 93, + SVGA3D_DEVCAP_MAX /* This must be the last index. */ } SVGA3dDevCapIndex; @@ -1893,4 +2600,28 @@ float f; } SVGA3dDevCapResult; +typedef enum { + SVGA3DCAPS_RECORD_UNKNOWN = 0, + SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, + SVGA3DCAPS_RECORD_DEVCAPS = 0x100, + SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, +} SVGA3dCapsRecordType; + +typedef +struct SVGA3dCapsRecordHeader { + uint32 length; + SVGA3dCapsRecordType type; +} +SVGA3dCapsRecordHeader; + +typedef +struct SVGA3dCapsRecord { + SVGA3dCapsRecordHeader header; + uint32 data[1]; +} +SVGA3dCapsRecord; + + +typedef uint32 SVGA3dCapPair[2]; + #endif /* _SVGA3D_REG_H_ */ --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h @@ -38,11 +38,15 @@ #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) +#define min_t(type, x, y) ((x) < (y) ? (x) : (y)) #define surf_size_struct SVGA3dSize #define u32 uint32 +#define u64 uint64_t #endif /* __KERNEL__ */ +#define U32_MAX ((u32)~0U) + #include "svga3d_reg.h" /* @@ -704,8 +708,8 @@ static inline u32 clamped_umul32(u32 a, u32 b) { - uint64_t tmp = (uint64_t) a*b; - return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; + u64 tmp = (u64) a*b; + return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; } static inline const struct svga3d_surface_desc * @@ -834,7 +838,7 @@ bool cubemap) { const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - u32 total_size = 0; + u64 total_size = 0; u32 mip; for (mip = 0; mip < num_mip_levels; mip++) { @@ -847,7 +851,7 @@ if (cubemap) total_size *= SVGA3D_MAX_SURFACE_FACES; - return total_size; + return (u32) min_t(u64, total_size, (u64) U32_MAX); } --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/svga_reg.h +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/svga_reg.h @@ -169,7 +169,17 @@ SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ - SVGA_REG_TOP = 48, /* Must be 1 more than the last register */ + SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ + SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ + SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ + SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ + SVGA_REG_CMD_PREPEND_LOW = 53, + SVGA_REG_CMD_PREPEND_HIGH = 54, + SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, + SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, + SVGA_REG_MOB_MAX_SIZE = 57, + SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ /* Next 768 (== 256*3) registers exist for colormap */ @@ -431,7 +441,10 @@ #define SVGA_CAP_TRACES 0x00200000 #define SVGA_CAP_GMR2 0x00400000 #define SVGA_CAP_SCREEN_OBJECT_2 0x00800000 - +#define SVGA_CAP_COMMAND_BUFFERS 0x01000000 +#define SVGA_CAP_DEAD1 0x02000000 +#define SVGA_CAP_CMD_BUFFERS_2 0x04000000 +#define SVGA_CAP_GBOBJECTS 0x08000000 /* * FIFO register indices. --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -40,6 +40,10 @@ static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; +static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | + TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_NO_EVICT; + static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; @@ -47,6 +51,9 @@ TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT; +static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | + TTM_PL_FLAG_CACHED; + struct ttm_placement vmw_vram_placement = { .fpfn = 0, .lpfn = 0, @@ -116,16 +123,26 @@ .busy_placement = &sys_placement_flags }; +struct ttm_placement vmw_sys_ne_placement = { + .fpfn = 0, + .lpfn = 0, + .num_placement = 1, + .placement = &sys_ne_placement_flags, + .num_busy_placement = 1, + .busy_placement = &sys_ne_placement_flags +}; + static uint32_t evictable_placement_flags[] = { TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, + VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED }; struct ttm_placement vmw_evictable_placement = { .fpfn = 0, .lpfn = 0, - .num_placement = 3, + .num_placement = 4, .placement = evictable_placement_flags, .num_busy_placement = 1, .busy_placement = &sys_placement_flags @@ -140,10 +157,21 @@ .busy_placement = gmr_vram_placement_flags }; +struct ttm_placement vmw_mob_placement = { + .fpfn = 0, + .lpfn = 0, + .num_placement = 1, + .num_busy_placement = 1, + .placement = &mob_placement_flags, + .busy_placement = &mob_placement_flags +}; + struct vmw_ttm_tt { struct ttm_dma_tt dma_ttm; struct vmw_private *dev_priv; int gmr_id; + struct vmw_mob *mob; + int mem_type; struct sg_table sgt; struct vmw_sg_table vsgt; uint64_t sg_alloc_size; @@ -244,6 +272,7 @@ viter->dma_address = &__vmw_piter_dma_addr; viter->page = &__vmw_piter_non_sg_page; viter->addrs = vsgt->addrs; + viter->pages = vsgt->pages; break; case vmw_dma_map_populate: case vmw_dma_map_bind: @@ -424,6 +453,63 @@ vmw_tt->mapped = false; } + +/** + * vmw_bo_map_dma - Make sure buffer object pages are visible to the device + * + * @bo: Pointer to a struct ttm_buffer_object + * + * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer + * instead of a pointer to a struct vmw_ttm_backend as argument. + * Note that the buffer object must be either pinned or reserved before + * calling this function. + */ +int vmw_bo_map_dma(struct ttm_buffer_object *bo) +{ + struct vmw_ttm_tt *vmw_tt = + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); + + return vmw_ttm_map_dma(vmw_tt); +} + + +/** + * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device + * + * @bo: Pointer to a struct ttm_buffer_object + * + * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer + * instead of a pointer to a struct vmw_ttm_backend as argument. + */ +void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) +{ + struct vmw_ttm_tt *vmw_tt = + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); + + vmw_ttm_unmap_dma(vmw_tt); +} + + +/** + * vmw_bo_sg_table - Return a struct vmw_sg_table object for a + * TTM buffer object + * + * @bo: Pointer to a struct ttm_buffer_object + * + * Returns a pointer to a struct vmw_sg_table object. The object should + * not be freed after use. + * Note that for the device addresses to be valid, the buffer object must + * either be reserved or pinned. + */ +const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) +{ + struct vmw_ttm_tt *vmw_tt = + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); + + return &vmw_tt->vsgt; +} + + static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct vmw_ttm_tt *vmw_be = @@ -435,9 +521,27 @@ return ret; vmw_be->gmr_id = bo_mem->start; + vmw_be->mem_type = bo_mem->mem_type; + + switch (bo_mem->mem_type) { + case VMW_PL_GMR: + return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, + ttm->num_pages, vmw_be->gmr_id); + case VMW_PL_MOB: + if (unlikely(vmw_be->mob == NULL)) { + vmw_be->mob = + vmw_mob_create(ttm->num_pages); + if (unlikely(vmw_be->mob == NULL)) + return -ENOMEM; + } - return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, - ttm->num_pages, vmw_be->gmr_id); + return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, + &vmw_be->vsgt, ttm->num_pages, + vmw_be->gmr_id); + default: + BUG(); + } + return 0; } static int vmw_ttm_unbind(struct ttm_tt *ttm) @@ -445,7 +549,16 @@ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); - vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); + switch (vmw_be->mem_type) { + case VMW_PL_GMR: + vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); + break; + case VMW_PL_MOB: + vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); + break; + default: + BUG(); + } if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) vmw_ttm_unmap_dma(vmw_be); @@ -453,6 +566,7 @@ return 0; } + static void vmw_ttm_destroy(struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = @@ -463,9 +577,14 @@ ttm_dma_tt_fini(&vmw_be->dma_ttm); else ttm_tt_fini(ttm); + + if (vmw_be->mob) + vmw_mob_destroy(vmw_be->mob); + kfree(vmw_be); } + static int vmw_ttm_populate(struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_tt = @@ -500,6 +619,12 @@ struct vmw_private *dev_priv = vmw_tt->dev_priv; struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); + + if (vmw_tt->mob) { + vmw_mob_destroy(vmw_tt->mob); + vmw_tt->mob = NULL; + } + vmw_ttm_unmap_dma(vmw_tt); if (dev_priv->map_mode == vmw_dma_alloc_coherent) { size_t size = @@ -517,7 +642,7 @@ .destroy = vmw_ttm_destroy, }; -struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, +static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct page *dummy_read_page) { @@ -530,6 +655,7 @@ vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); + vmw_be->mob = NULL; if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, @@ -546,12 +672,12 @@ return NULL; } -int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) +static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; } -int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, +static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { switch (type) { @@ -571,6 +697,7 @@ man->default_caching = TTM_PL_FLAG_CACHED; break; case VMW_PL_GMR: + case VMW_PL_MOB: /* * "Guest Memory Regions" is an aperture like feature with * one slot per bo. There is an upper limit of the number of @@ -589,7 +716,7 @@ return 0; } -void vmw_evict_flags(struct ttm_buffer_object *bo, +static void vmw_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { *placement = vmw_sys_placement; @@ -618,6 +745,7 @@ switch (mem->mem_type) { case TTM_PL_SYSTEM: case VMW_PL_GMR: + case VMW_PL_MOB: return 0; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; @@ -677,6 +805,38 @@ VMW_FENCE_WAIT_TIMEOUT); } +/** + * vmw_move_notify - TTM move_notify_callback + * + * @bo: The TTM buffer object about to move. + * @mem: The truct ttm_mem_reg indicating to what memory + * region the move is taking place. + * + * Calls move_notify for all subsystems needing it. + * (currently only resources). + */ +static void vmw_move_notify(struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem) +{ + vmw_resource_move_notify(bo, mem); +} + + +/** + * vmw_swap_notify - TTM move_notify_callback + * + * @bo: The TTM buffer object about to be swapped out. + */ +static void vmw_swap_notify(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + + spin_lock(&bdev->fence_lock); + ttm_bo_wait(bo, false, false, false); + spin_unlock(&bdev->fence_lock); +} + + struct ttm_bo_driver vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, @@ -691,8 +851,8 @@ .sync_obj_flush = vmw_sync_obj_flush, .sync_obj_unref = vmw_sync_obj_unref, .sync_obj_ref = vmw_sync_obj_ref, - .move_notify = NULL, - .swap_notify = NULL, + .move_notify = vmw_move_notify, + .swap_notify = vmw_swap_notify, .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, .io_mem_free = &vmw_ttm_io_mem_free, --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -32,12 +32,30 @@ struct vmw_user_context { struct ttm_base_object base; struct vmw_resource res; + struct vmw_ctx_binding_state cbs; }; + + +typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); + static void vmw_user_context_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_context_base_to_res(struct ttm_base_object *base); +static int vmw_gb_context_create(struct vmw_resource *res); +static int vmw_gb_context_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf); +static int vmw_gb_context_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf); +static int vmw_gb_context_destroy(struct vmw_resource *res); +static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, + bool rebind); +static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); +static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); +static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); static uint64_t vmw_user_context_size; static const struct vmw_user_resource_conv user_context_conv = { @@ -62,6 +80,23 @@ .unbind = NULL }; +static const struct vmw_res_func vmw_gb_context_func = { + .res_type = vmw_res_context, + .needs_backup = true, + .may_evict = true, + .type_name = "guest backed contexts", + .backup_placement = &vmw_mob_placement, + .create = vmw_gb_context_create, + .destroy = vmw_gb_context_destroy, + .bind = vmw_gb_context_bind, + .unbind = vmw_gb_context_unbind +}; + +static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { + [vmw_ctx_binding_shader] = vmw_context_scrub_shader, + [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, + [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; + /** * Context management: */ @@ -76,6 +111,20 @@ } *cmd; + if (res->func->destroy == vmw_gb_context_destroy) { + mutex_lock(&dev_priv->cmdbuf_mutex); + mutex_lock(&dev_priv->binding_mutex); + (void) vmw_context_binding_state_kill + (&container_of(res, struct vmw_user_context, res)->cbs); + (void) vmw_gb_context_destroy(res); + if (dev_priv->pinned_bo != NULL && + !dev_priv->query_cid_valid) + __vmw_execbuf_release_pinned_bo(dev_priv, NULL); + mutex_unlock(&dev_priv->binding_mutex); + mutex_unlock(&dev_priv->cmdbuf_mutex); + return; + } + vmw_execbuf_release_pinned_bo(dev_priv); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { @@ -92,6 +141,33 @@ vmw_3d_resource_dec(dev_priv, false); } +static int vmw_gb_context_init(struct vmw_private *dev_priv, + struct vmw_resource *res, + void (*res_free) (struct vmw_resource *res)) +{ + int ret; + struct vmw_user_context *uctx = + container_of(res, struct vmw_user_context, res); + + ret = vmw_resource_init(dev_priv, res, true, + res_free, &vmw_gb_context_func); + res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; + + if (unlikely(ret != 0)) { + if (res_free) + res_free(res); + else + kfree(res); + return ret; + } + + memset(&uctx->cbs, 0, sizeof(uctx->cbs)); + INIT_LIST_HEAD(&uctx->cbs.list); + + vmw_resource_activate(res, vmw_hw_context_destroy); + return 0; +} + static int vmw_context_init(struct vmw_private *dev_priv, struct vmw_resource *res, void (*res_free) (struct vmw_resource *res)) @@ -103,6 +179,9 @@ SVGA3dCmdDefineContext body; } *cmd; + if (dev_priv->has_mob) + return vmw_gb_context_init(dev_priv, res, res_free); + ret = vmw_resource_init(dev_priv, res, false, res_free, &vmw_legacy_context_func); @@ -154,6 +233,180 @@ return (ret == 0) ? res : NULL; } + +static int vmw_gb_context_create(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + int ret; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineGBContext body; + } *cmd; + + if (likely(res->id != -1)) + return 0; + + ret = vmw_resource_alloc_id(res); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a context id.\n"); + goto out_no_id; + } + + if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) { + ret = -EBUSY; + goto out_no_fifo; + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for context " + "creation.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = res->id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + (void) vmw_3d_resource_inc(dev_priv, false); + + return 0; + +out_no_fifo: + vmw_resource_release_id(res); +out_no_id: + return ret; +} + +static int vmw_gb_context_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBContext body; + } *cmd; + struct ttm_buffer_object *bo = val_buf->bo; + + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for context " + "binding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = res->id; + cmd->body.mobid = bo->mem.start; + cmd->body.validContents = res->backup_dirty; + res->backup_dirty = false; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +static int vmw_gb_context_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct ttm_buffer_object *bo = val_buf->bo; + struct vmw_fence_obj *fence; + struct vmw_user_context *uctx = + container_of(res, struct vmw_user_context, res); + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdReadbackGBContext body; + } *cmd1; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBContext body; + } *cmd2; + uint32_t submit_size; + uint8_t *cmd; + + + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + + mutex_lock(&dev_priv->binding_mutex); + vmw_context_binding_state_scrub(&uctx->cbs); + + submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); + + cmd = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for context " + "unbinding.\n"); + mutex_unlock(&dev_priv->binding_mutex); + return -ENOMEM; + } + + cmd2 = (void *) cmd; + if (readback) { + cmd1 = (void *) cmd; + cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT; + cmd1->header.size = sizeof(cmd1->body); + cmd1->body.cid = res->id; + cmd2 = (void *) (&cmd1[1]); + } + cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; + cmd2->header.size = sizeof(cmd2->body); + cmd2->body.cid = res->id; + cmd2->body.mobid = SVGA3D_INVALID_ID; + + vmw_fifo_commit(dev_priv, submit_size); + mutex_unlock(&dev_priv->binding_mutex); + + /* + * Create a fence object and fence the backup buffer. + */ + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + + vmw_fence_single_bo(bo, fence); + + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + + return 0; +} + +static int vmw_gb_context_destroy(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyGBContext body; + } *cmd; + + if (likely(res->id == -1)) + return 0; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for context " + "destruction.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = res->id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + if (dev_priv->query_cid == res->id) + dev_priv->query_cid_valid = false; + vmw_resource_release_id(res); + vmw_3d_resource_dec(dev_priv, false); + + return 0; +} + /** * User-space context management: */ @@ -272,3 +525,380 @@ return ret; } + +/** + * vmw_context_scrub_shader - scrub a shader binding from a context. + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) +{ + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdSetShader body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for shader " + "unbinding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_SET_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = bi->ctx->id; + cmd->body.type = bi->i1.shader_type; + cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_context_scrub_render_target - scrub a render target binding + * from a context. + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + */ +static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, + bool rebind) +{ + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdSetRenderTarget body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for render target " + "unbinding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = bi->ctx->id; + cmd->body.type = bi->i1.rt_type; + cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + cmd->body.target.face = 0; + cmd->body.target.mipmap = 0; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_context_scrub_texture - scrub a texture binding from a context. + * + * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. + * + * TODO: Possibly complement this function with a function that takes + * a list of texture bindings and combines them to a single command. + */ +static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, + bool rebind) +{ + struct vmw_private *dev_priv = bi->ctx->dev_priv; + struct { + SVGA3dCmdHeader header; + struct { + SVGA3dCmdSetTextureState c; + SVGA3dTextureState s1; + } body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for texture " + "unbinding.\n"); + return -ENOMEM; + } + + + cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; + cmd->header.size = sizeof(cmd->body); + cmd->body.c.cid = bi->ctx->id; + cmd->body.s1.stage = bi->i1.texture_stage; + cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; + cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +/** + * vmw_context_binding_drop: Stop tracking a context binding + * + * @cb: Pointer to binding tracker storage. + * + * Stops tracking a context binding, and re-initializes its storage. + * Typically used when the context binding is replaced with a binding to + * another (or the same, for that matter) resource. + */ +static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) +{ + list_del(&cb->ctx_list); + if (!list_empty(&cb->res_list)) + list_del(&cb->res_list); + cb->bi.ctx = NULL; +} + +/** + * vmw_context_binding_add: Start tracking a context binding + * + * @cbs: Pointer to the context binding state tracker. + * @bi: Information about the binding to track. + * + * Performs basic checks on the binding to make sure arguments are within + * bounds and then starts tracking the binding in the context binding + * state structure @cbs. + */ +int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *bi) +{ + struct vmw_ctx_binding *loc; + + switch (bi->bt) { + case vmw_ctx_binding_rt: + if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { + DRM_ERROR("Illegal render target type %u.\n", + (unsigned) bi->i1.rt_type); + return -EINVAL; + } + loc = &cbs->render_targets[bi->i1.rt_type]; + break; + case vmw_ctx_binding_tex: + if (unlikely((unsigned)bi->i1.texture_stage >= + SVGA3D_NUM_TEXTURE_UNITS)) { + DRM_ERROR("Illegal texture/sampler unit %u.\n", + (unsigned) bi->i1.texture_stage); + return -EINVAL; + } + loc = &cbs->texture_units[bi->i1.texture_stage]; + break; + case vmw_ctx_binding_shader: + if (unlikely((unsigned)bi->i1.shader_type >= + SVGA3D_SHADERTYPE_MAX)) { + DRM_ERROR("Illegal shader type %u.\n", + (unsigned) bi->i1.shader_type); + return -EINVAL; + } + loc = &cbs->shaders[bi->i1.shader_type]; + break; + default: + BUG(); + } + + if (loc->bi.ctx != NULL) + vmw_context_binding_drop(loc); + + loc->bi = *bi; + loc->bi.scrubbed = false; + list_add_tail(&loc->ctx_list, &cbs->list); + INIT_LIST_HEAD(&loc->res_list); + + return 0; +} + +/** + * vmw_context_binding_transfer: Transfer a context binding tracking entry. + * + * @cbs: Pointer to the persistent context binding state tracker. + * @bi: Information about the binding to track. + * + */ +static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *bi) +{ + struct vmw_ctx_binding *loc; + + switch (bi->bt) { + case vmw_ctx_binding_rt: + loc = &cbs->render_targets[bi->i1.rt_type]; + break; + case vmw_ctx_binding_tex: + loc = &cbs->texture_units[bi->i1.texture_stage]; + break; + case vmw_ctx_binding_shader: + loc = &cbs->shaders[bi->i1.shader_type]; + break; + default: + BUG(); + } + + if (loc->bi.ctx != NULL) + vmw_context_binding_drop(loc); + + if (bi->res != NULL) { + loc->bi = *bi; + list_add_tail(&loc->ctx_list, &cbs->list); + list_add_tail(&loc->res_list, &bi->res->binding_head); + } +} + +/** + * vmw_context_binding_kill - Kill a binding on the device + * and stop tracking it. + * + * @cb: Pointer to binding tracker storage. + * + * Emits FIFO commands to scrub a binding represented by @cb. + * Then stops tracking the binding and re-initializes its storage. + */ +static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) +{ + if (!cb->bi.scrubbed) { + (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); + cb->bi.scrubbed = true; + } + vmw_context_binding_drop(cb); +} + +/** + * vmw_context_binding_state_kill - Kill all bindings associated with a + * struct vmw_ctx_binding state structure, and re-initialize the structure. + * + * @cbs: Pointer to the context binding state tracker. + * + * Emits commands to scrub all bindings associated with the + * context binding state tracker. Then re-initializes the whole structure. + */ +static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_ctx_binding *entry, *next; + + list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) + vmw_context_binding_kill(entry); +} + +/** + * vmw_context_binding_state_scrub - Scrub all bindings associated with a + * struct vmw_ctx_binding state structure. + * + * @cbs: Pointer to the context binding state tracker. + * + * Emits commands to scrub all bindings associated with the + * context binding state tracker. + */ +static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_ctx_binding *entry; + + list_for_each_entry(entry, &cbs->list, ctx_list) { + if (!entry->bi.scrubbed) { + (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); + entry->bi.scrubbed = true; + } + } +} + +/** + * vmw_context_binding_res_list_kill - Kill all bindings on a + * resource binding list + * + * @head: list head of resource binding list + * + * Kills all bindings associated with a specific resource. Typically + * called before the resource is destroyed. + */ +void vmw_context_binding_res_list_kill(struct list_head *head) +{ + struct vmw_ctx_binding *entry, *next; + + list_for_each_entry_safe(entry, next, head, res_list) + vmw_context_binding_kill(entry); +} + +/** + * vmw_context_binding_res_list_scrub - Scrub all bindings on a + * resource binding list + * + * @head: list head of resource binding list + * + * Scrub all bindings associated with a specific resource. Typically + * called before the resource is evicted. + */ +void vmw_context_binding_res_list_scrub(struct list_head *head) +{ + struct vmw_ctx_binding *entry; + + list_for_each_entry(entry, head, res_list) { + if (!entry->bi.scrubbed) { + (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); + entry->bi.scrubbed = true; + } + } +} + +/** + * vmw_context_binding_state_transfer - Commit staged binding info + * + * @ctx: Pointer to context to commit the staged binding info to. + * @from: Staged binding info built during execbuf. + * + * Transfers binding info from a temporary structure to the persistent + * structure in the context. This can be done once commands + */ +void vmw_context_binding_state_transfer(struct vmw_resource *ctx, + struct vmw_ctx_binding_state *from) +{ + struct vmw_user_context *uctx = + container_of(ctx, struct vmw_user_context, res); + struct vmw_ctx_binding *entry, *next; + + list_for_each_entry_safe(entry, next, &from->list, ctx_list) + vmw_context_binding_transfer(&uctx->cbs, &entry->bi); +} + +/** + * vmw_context_rebind_all - Rebind all scrubbed bindings of a context + * + * @ctx: The context resource + * + * Walks through the context binding list and rebinds all scrubbed + * resources. + */ +int vmw_context_rebind_all(struct vmw_resource *ctx) +{ + struct vmw_ctx_binding *entry; + struct vmw_user_context *uctx = + container_of(ctx, struct vmw_user_context, res); + struct vmw_ctx_binding_state *cbs = &uctx->cbs; + int ret; + + list_for_each_entry(entry, &cbs->list, ctx_list) { + if (likely(!entry->bi.scrubbed)) + continue; + + if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == + SVGA3D_INVALID_ID)) + continue; + + ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); + if (unlikely(ret != 0)) + return ret; + + entry->bi.scrubbed = false; + } + + return 0; +} + +/** + * vmw_context_binding_list - Return a list of context bindings + * + * @ctx: The context resource + * + * Returns the current list of bindings of the given context. Note that + * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. + */ +struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) +{ + return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); +} --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -290,8 +290,7 @@ /** * vmw_bo_pin - Pin or unpin a buffer object without moving it. * - * @bo: The buffer object. Must be reserved, and present either in VRAM - * or GMR memory. + * @bo: The buffer object. Must be reserved. * @pin: Whether to pin or unpin. * */ @@ -303,10 +302,9 @@ int ret; lockdep_assert_held(&bo->resv->lock.base); - BUG_ON(old_mem_type != TTM_PL_VRAM && - old_mem_type != VMW_PL_GMR); - pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; + pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB + | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; if (pin) pl_flags |= TTM_PL_FLAG_NO_EVICT; --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -112,6 +112,21 @@ #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ struct drm_vmw_update_layout_arg) +#define DRM_IOCTL_VMW_CREATE_SHADER \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ + struct drm_vmw_shader_create_arg) +#define DRM_IOCTL_VMW_UNREF_SHADER \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ + struct drm_vmw_shader_arg) +#define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ + union drm_vmw_gb_surface_create_arg) +#define DRM_IOCTL_VMW_GB_SURFACE_REF \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ + union drm_vmw_gb_surface_reference_arg) +#define DRM_IOCTL_VMW_SYNCCPU \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ + struct drm_vmw_synccpu_arg) /** * The core DRM version of this macro doesn't account for @@ -177,6 +192,21 @@ VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, DRM_MASTER | DRM_UNLOCKED), + VMW_IOCTL_DEF(VMW_CREATE_SHADER, + vmw_shader_define_ioctl, + DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(VMW_UNREF_SHADER, + vmw_shader_destroy_ioctl, + DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, + vmw_gb_surface_define_ioctl, + DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, + vmw_gb_surface_reference_ioctl, + DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(VMW_SYNCCPU, + vmw_user_dmabuf_synccpu_ioctl, + DRM_AUTH | DRM_UNLOCKED), }; static struct pci_device_id vmw_pci_id_list[] = { @@ -189,6 +219,7 @@ static int vmw_force_iommu; static int vmw_restrict_iommu; static int vmw_force_coherent; +static int vmw_restrict_dma_mask; static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); @@ -203,6 +234,8 @@ module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); module_param_named(force_coherent, vmw_force_coherent, int, 0600); +MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); +module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); static void vmw_print_capabilities(uint32_t capabilities) @@ -240,38 +273,52 @@ DRM_INFO(" GMR2.\n"); if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) DRM_INFO(" Screen Object 2.\n"); + if (capabilities & SVGA_CAP_COMMAND_BUFFERS) + DRM_INFO(" Command Buffers.\n"); + if (capabilities & SVGA_CAP_CMD_BUFFERS_2) + DRM_INFO(" Command Buffers 2.\n"); + if (capabilities & SVGA_CAP_GBOBJECTS) + DRM_INFO(" Guest Backed Resources.\n"); } - /** - * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at - * the start of a buffer object. + * vmw_dummy_query_bo_create - create a bo to hold a dummy query result * - * @dev_priv: The device private structure. + * @dev_priv: A device private structure. * - * This function will idle the buffer using an uninterruptible wait, then - * map the first page and initialize a pending occlusion query result structure, - * Finally it will unmap the buffer. + * This function creates a small buffer object that holds the query + * result for dummy queries emitted as query barriers. + * The function will then map the first page and initialize a pending + * occlusion query result structure, Finally it will unmap the buffer. + * No interruptible waits are done within this function. * - * TODO: Since we're only mapping a single page, we should optimize the map - * to use kmap_atomic / iomap_atomic. + * Returns an error if bo creation or initialization fails. */ -static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) +static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { + int ret; + struct ttm_buffer_object *bo; struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; - int ret; - struct ttm_bo_device *bdev = &dev_priv->bdev; - struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; - ttm_bo_reserve(bo, false, false, false, 0); - spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bdev->fence_lock); + /* + * Create the bo as pinned, so that a tryreserve will + * immediately succeed. This is because we're the only + * user of the bo currently. + */ + ret = ttm_bo_create(&dev_priv->bdev, + PAGE_SIZE, + ttm_bo_type_device, + &vmw_sys_ne_placement, + 0, false, NULL, + &bo); + if (unlikely(ret != 0)) - (void) vmw_fallback_wait(dev_priv, false, true, 0, false, - 10*HZ); + return ret; + + ret = ttm_bo_reserve(bo, false, true, false, 0); + BUG_ON(ret != 0); ret = ttm_bo_kmap(bo, 0, 1, &map); if (likely(ret == 0)) { @@ -280,34 +327,19 @@ result->state = SVGA3D_QUERYSTATE_PENDING; result->result32 = 0xff; ttm_bo_kunmap(&map); - } else - DRM_ERROR("Dummy query buffer map failed.\n"); + } + vmw_bo_pin(bo, false); ttm_bo_unreserve(bo); -} + if (unlikely(ret != 0)) { + DRM_ERROR("Dummy query buffer map failed.\n"); + ttm_bo_unref(&bo); + } else + dev_priv->dummy_query_bo = bo; -/** - * vmw_dummy_query_bo_create - create a bo to hold a dummy query result - * - * @dev_priv: A device private structure. - * - * This function creates a small buffer object that holds the query - * result for dummy queries emitted as query barriers. - * No interruptible waits are done within this function. - * - * Returns an error if bo creation fails. - */ -static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) -{ - return ttm_bo_create(&dev_priv->bdev, - PAGE_SIZE, - ttm_bo_type_device, - &vmw_vram_sys_placement, - 0, false, NULL, - &dev_priv->dummy_query_bo); + return ret; } - static int vmw_request_device(struct vmw_private *dev_priv) { int ret; @@ -318,14 +350,24 @@ return ret; } vmw_fence_fifo_up(dev_priv->fman); + if (dev_priv->has_mob) { + ret = vmw_otables_setup(dev_priv); + if (unlikely(ret != 0)) { + DRM_ERROR("Unable to initialize " + "guest Memory OBjects.\n"); + goto out_no_mob; + } + } ret = vmw_dummy_query_bo_create(dev_priv); if (unlikely(ret != 0)) goto out_no_query_bo; - vmw_dummy_query_bo_prepare(dev_priv); return 0; out_no_query_bo: + if (dev_priv->has_mob) + vmw_otables_takedown(dev_priv); +out_no_mob: vmw_fence_fifo_down(dev_priv->fman); vmw_fifo_release(dev_priv, &dev_priv->fifo); return ret; @@ -341,10 +383,13 @@ BUG_ON(dev_priv->pinned_bo != NULL); ttm_bo_unref(&dev_priv->dummy_query_bo); + if (dev_priv->has_mob) + vmw_otables_takedown(dev_priv); vmw_fence_fifo_down(dev_priv->fman); vmw_fifo_release(dev_priv, &dev_priv->fifo); } + /** * Increase the 3d resource refcount. * If the count was prevously zero, initialize the fifo, switching to svga @@ -510,6 +555,33 @@ return 0; } +/** + * vmw_dma_masks - set required page- and dma masks + * + * @dev: Pointer to struct drm-device + * + * With 32-bit we can only handle 32 bit PFNs. Optionally set that + * restriction also for 64-bit systems. + */ +#ifdef CONFIG_INTEL_IOMMU +static int vmw_dma_masks(struct vmw_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + + if (intel_iommu_enabled && + (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { + DRM_INFO("Restricting DMA addresses to 44 bits.\n"); + return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); + } + return 0; +} +#else +static int vmw_dma_masks(struct vmw_private *dev_priv) +{ + return 0; +} +#endif + static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { struct vmw_private *dev_priv; @@ -532,6 +604,7 @@ mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); + mutex_init(&dev_priv->binding_mutex); rwlock_init(&dev_priv->resource_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { @@ -578,14 +651,9 @@ vmw_get_initial_size(dev_priv); - if (dev_priv->capabilities & SVGA_CAP_GMR) { - dev_priv->max_gmr_descriptors = - vmw_read(dev_priv, - SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); + if (dev_priv->capabilities & SVGA_CAP_GMR2) { dev_priv->max_gmr_ids = vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); - } - if (dev_priv->capabilities & SVGA_CAP_GMR2) { dev_priv->max_gmr_pages = vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); dev_priv->memory_size = @@ -598,23 +666,45 @@ */ dev_priv->memory_size = 512*1024*1024; } + dev_priv->max_mob_pages = 0; + dev_priv->max_mob_size = 0; + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + uint64_t mem_size = + vmw_read(dev_priv, + SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); + + dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; + dev_priv->prim_bb_mem = + vmw_read(dev_priv, + SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); + dev_priv->max_mob_size = + vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); + } else + dev_priv->prim_bb_mem = dev_priv->vram_size; + + ret = vmw_dma_masks(dev_priv); + if (unlikely(ret != 0)) { + mutex_unlock(&dev_priv->hw_mutex); + goto out_err0; + } + + if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) + dev_priv->prim_bb_mem = dev_priv->vram_size; mutex_unlock(&dev_priv->hw_mutex); vmw_print_capabilities(dev_priv->capabilities); - if (dev_priv->capabilities & SVGA_CAP_GMR) { + if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); - DRM_INFO("Max GMR descriptors is %u\n", - (unsigned)dev_priv->max_gmr_descriptors); - } - if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max number of GMR pages is %u\n", (unsigned)dev_priv->max_gmr_pages); DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", (unsigned)dev_priv->memory_size / 1024); } + DRM_INFO("Maximum display memory size is %u kiB\n", + dev_priv->prim_bb_mem / 1024); DRM_INFO("VRAM at 0x%08x size is %u kiB\n", dev_priv->vram_start, dev_priv->vram_size / 1024); DRM_INFO("MMIO at 0x%08x size is %u kiB\n", @@ -649,12 +739,22 @@ dev_priv->has_gmr = true; if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, - dev_priv->max_gmr_ids) != 0) { + VMW_PL_GMR) != 0) { DRM_INFO("No GMR memory available. " "Graphics memory resources are very limited.\n"); dev_priv->has_gmr = false; } + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + dev_priv->has_mob = true; + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, + VMW_PL_MOB) != 0) { + DRM_INFO("No MOB memory available. " + "3D will be disabled.\n"); + dev_priv->has_mob = false; + } + } + dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_size); @@ -757,6 +857,8 @@ iounmap(dev_priv->mmio_virt); out_err3: arch_phys_wc_del(dev_priv->mmio_mtrr); + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); if (dev_priv->has_gmr) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); @@ -801,6 +903,8 @@ ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); if (dev_priv->has_gmr) (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); @@ -840,6 +944,7 @@ drm_master_put(&vmw_fp->locked_master); } + vmw_compat_shader_man_destroy(vmw_fp->shman); ttm_object_file_release(&vmw_fp->tfile); kfree(vmw_fp); } @@ -859,11 +964,17 @@ if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; + vmw_fp->shman = vmw_compat_shader_man_create(dev_priv); + if (IS_ERR(vmw_fp->shman)) + goto out_no_shman; + file_priv->driver_priv = vmw_fp; dev_priv->bdev.dev_mapping = dev->dev_mapping; return 0; +out_no_shman: + ttm_object_file_release(&vmw_fp->tfile); out_no_tfile: kfree(vmw_fp); return ret; --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -40,9 +40,9 @@ #include #include "vmwgfx_fence.h" -#define VMWGFX_DRIVER_DATE "20120209" +#define VMWGFX_DRIVER_DATE "20140228" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 4 +#define VMWGFX_DRIVER_MINOR 5 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) @@ -50,19 +50,39 @@ #define VMWGFX_MAX_VALIDATIONS 2048 #define VMWGFX_MAX_DISPLAYS 16 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 +#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0 + +/* + * Perhaps we should have sysfs entries for these. + */ +#define VMWGFX_NUM_GB_CONTEXT 256 +#define VMWGFX_NUM_GB_SHADER 20000 +#define VMWGFX_NUM_GB_SURFACE 32768 +#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS +#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ + VMWGFX_NUM_GB_SHADER +\ + VMWGFX_NUM_GB_SURFACE +\ + VMWGFX_NUM_GB_SCREEN_TARGET) #define VMW_PL_GMR TTM_PL_PRIV0 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 +#define VMW_PL_MOB TTM_PL_PRIV1 +#define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1 #define VMW_RES_CONTEXT ttm_driver_type0 #define VMW_RES_SURFACE ttm_driver_type1 #define VMW_RES_STREAM ttm_driver_type2 #define VMW_RES_FENCE ttm_driver_type3 +#define VMW_RES_SHADER ttm_driver_type4 + +struct vmw_compat_shader_manager; struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; struct list_head fence_events; + bool gb_aware; + struct vmw_compat_shader_manager *shman; }; struct vmw_dma_buffer { @@ -82,6 +102,7 @@ struct vmw_validate_buffer { struct ttm_validate_buffer base; struct drm_hash_item hash; + bool validate_as_mob; }; struct vmw_res_func; @@ -98,6 +119,7 @@ const struct vmw_res_func *func; struct list_head lru_head; /* Protected by the resource lock */ struct list_head mob_head; /* Protected by @backup reserved */ + struct list_head binding_head; /* Protected by binding_mutex */ void (*res_free) (struct vmw_resource *res); void (*hw_destroy) (struct vmw_resource *res); }; @@ -106,6 +128,7 @@ vmw_res_context, vmw_res_surface, vmw_res_stream, + vmw_res_shader, vmw_res_max }; @@ -154,6 +177,7 @@ }; struct vmw_relocation { + SVGAMobId *mob_loc; SVGAGuestPtr *location; uint32_t index; }; @@ -229,11 +253,77 @@ struct page *(*page)(struct vmw_piter *); }; +/* + * enum vmw_ctx_binding_type - abstract resource to context binding types + */ +enum vmw_ctx_binding_type { + vmw_ctx_binding_shader, + vmw_ctx_binding_rt, + vmw_ctx_binding_tex, + vmw_ctx_binding_max +}; + +/** + * struct vmw_ctx_bindinfo - structure representing a single context binding + * + * @ctx: Pointer to the context structure. NULL means the binding is not + * active. + * @res: Non ref-counted pointer to the bound resource. + * @bt: The binding type. + * @i1: Union of information needed to unbind. + */ +struct vmw_ctx_bindinfo { + struct vmw_resource *ctx; + struct vmw_resource *res; + enum vmw_ctx_binding_type bt; + bool scrubbed; + union { + SVGA3dShaderType shader_type; + SVGA3dRenderTargetType rt_type; + uint32 texture_stage; + } i1; +}; + +/** + * struct vmw_ctx_binding - structure representing a single context binding + * - suitable for tracking in a context + * + * @ctx_list: List head for context. + * @res_list: List head for bound resource. + * @bi: Binding info + */ +struct vmw_ctx_binding { + struct list_head ctx_list; + struct list_head res_list; + struct vmw_ctx_bindinfo bi; +}; + + +/** + * struct vmw_ctx_binding_state - context binding state + * + * @list: linked list of individual bindings. + * @render_targets: Render target bindings. + * @texture_units: Texture units/samplers bindings. + * @shaders: Shader bindings. + * + * Note that this structure also provides storage space for the individual + * struct vmw_ctx_binding objects, so that no dynamic allocation is needed + * for individual bindings. + * + */ +struct vmw_ctx_binding_state { + struct list_head list; + struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; + struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; + struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX]; +}; + struct vmw_sw_context{ struct drm_open_hash res_ht; bool res_ht_initialized; bool kernel; /**< is the called made from the kernel */ - struct ttm_object_file *tfile; + struct vmw_fpriv *fp; struct list_head validate_nodes; struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; uint32_t cur_reloc; @@ -250,6 +340,8 @@ struct vmw_resource *last_query_ctx; bool needs_post_query_barrier; struct vmw_resource *error_resource; + struct vmw_ctx_binding_state staged_bindings; + struct list_head staged_shaders; }; struct vmw_legacy_display; @@ -281,6 +373,7 @@ unsigned int io_start; uint32_t vram_start; uint32_t vram_size; + uint32_t prim_bb_mem; uint32_t mmio_start; uint32_t mmio_size; uint32_t fb_max_width; @@ -290,11 +383,13 @@ __le32 __iomem *mmio_virt; int mmio_mtrr; uint32_t capabilities; - uint32_t max_gmr_descriptors; uint32_t max_gmr_ids; uint32_t max_gmr_pages; + uint32_t max_mob_pages; + uint32_t max_mob_size; uint32_t memory_size; bool has_gmr; + bool has_mob; struct mutex hw_mutex; /* @@ -370,6 +465,7 @@ struct vmw_sw_context ctx; struct mutex cmdbuf_mutex; + struct mutex binding_mutex; /** * Operating mode. @@ -415,6 +511,12 @@ * DMA mapping stuff. */ enum vmw_dma_map_mode map_mode; + + /* + * Guest Backed stuff + */ + struct ttm_buffer_object *otable_bo; + struct vmw_otable *otables; }; static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) @@ -471,23 +573,14 @@ * Resource utilities - vmwgfx_resource.c */ struct vmw_user_resource_conv; -extern const struct vmw_user_resource_conv *user_surface_converter; -extern const struct vmw_user_resource_conv *user_context_converter; -extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); extern void vmw_resource_unreference(struct vmw_resource **p_res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); +extern struct vmw_resource * +vmw_resource_reference_unless_doomed(struct vmw_resource *res); extern int vmw_resource_validate(struct vmw_resource *res); extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); -extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_context_check(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - int id, - struct vmw_resource **p_res); extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, struct ttm_object_file *tfile, uint32_t handle, @@ -499,18 +592,6 @@ uint32_t handle, const struct vmw_user_resource_conv *converter, struct vmw_resource **p_res); -extern void vmw_surface_res_free(struct vmw_resource *res); -extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -extern int vmw_surface_check(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t handle, int *id); -extern int vmw_surface_validate(struct vmw_private *dev_priv, - struct vmw_surface *srf); extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); extern int vmw_dmabuf_init(struct vmw_private *dev_priv, struct vmw_dma_buffer *vmw_bo, @@ -519,10 +600,21 @@ void (*bo_free) (struct ttm_buffer_object *bo)); extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, struct ttm_object_file *tfile); +extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t size, + bool shareable, + uint32_t *handle, + struct vmw_dma_buffer **p_dma_buf); +extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, + struct vmw_dma_buffer *dma_buf, + uint32_t *handle); extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, uint32_t cur_validate_node); extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); @@ -622,10 +714,16 @@ extern struct ttm_placement vmw_vram_gmr_placement; extern struct ttm_placement vmw_vram_gmr_ne_placement; extern struct ttm_placement vmw_sys_placement; +extern struct ttm_placement vmw_sys_ne_placement; extern struct ttm_placement vmw_evictable_placement; extern struct ttm_placement vmw_srf_placement; +extern struct ttm_placement vmw_mob_placement; extern struct ttm_bo_driver vmw_bo_driver; extern int vmw_dma_quiescent(struct drm_device *dev); +extern int vmw_bo_map_dma(struct ttm_buffer_object *bo); +extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo); +extern const struct vmw_sg_table * +vmw_bo_sg_table(struct ttm_buffer_object *bo); extern void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, unsigned long p_offs); @@ -832,6 +930,101 @@ uint32_t handle, uint32_t flags, int *prime_fd); +/* + * MemoryOBject management - vmwgfx_mob.c + */ +struct vmw_mob; +extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, + const struct vmw_sg_table *vsgt, + unsigned long num_data_pages, int32_t mob_id); +extern void vmw_mob_unbind(struct vmw_private *dev_priv, + struct vmw_mob *mob); +extern void vmw_mob_destroy(struct vmw_mob *mob); +extern struct vmw_mob *vmw_mob_create(unsigned long data_pages); +extern int vmw_otables_setup(struct vmw_private *dev_priv); +extern void vmw_otables_takedown(struct vmw_private *dev_priv); + +/* + * Context management - vmwgfx_context.c + */ + +extern const struct vmw_user_resource_conv *user_context_converter; + +extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); + +extern int vmw_context_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + int id, + struct vmw_resource **p_res); +extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, + const struct vmw_ctx_bindinfo *ci); +extern void +vmw_context_binding_state_transfer(struct vmw_resource *res, + struct vmw_ctx_binding_state *cbs); +extern void vmw_context_binding_res_list_kill(struct list_head *head); +extern void vmw_context_binding_res_list_scrub(struct list_head *head); +extern int vmw_context_rebind_all(struct vmw_resource *ctx); +extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); + +/* + * Surface management - vmwgfx_surface.c + */ + +extern const struct vmw_user_resource_conv *user_surface_converter; + +extern void vmw_surface_res_free(struct vmw_resource *res); +extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_surface_check(struct vmw_private *dev_priv, + struct ttm_object_file *tfile, + uint32_t handle, int *id); +extern int vmw_surface_validate(struct vmw_private *dev_priv, + struct vmw_surface *srf); + +/* + * Shader management - vmwgfx_shader.c + */ + +extern const struct vmw_user_resource_conv *user_shader_converter; + +extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, + SVGA3dShaderType shader_type, + u32 *user_key); +extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, + struct list_head *list); +extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, + struct list_head *list); +extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, + u32 user_key, + SVGA3dShaderType shader_type, + struct list_head *list); +extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, + u32 user_key, const void *bytecode, + SVGA3dShaderType shader_type, + size_t size, + struct ttm_object_file *tfile, + struct list_head *list); +extern struct vmw_compat_shader_manager * +vmw_compat_shader_man_create(struct vmw_private *dev_priv); +extern void +vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); + /** * Inline helper functions --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -54,6 +54,8 @@ * @res: Ref-counted pointer to the resource. * @switch_backup: Boolean whether to switch backup buffer on unreserve. * @new_backup: Refcounted pointer to the new backup buffer. + * @staged_bindings: If @res is a context, tracks bindings set up during + * the command batch. Otherwise NULL. * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. * @first_usage: Set to true the first time the resource is referenced in * the command stream. @@ -65,12 +67,32 @@ struct drm_hash_item hash; struct vmw_resource *res; struct vmw_dma_buffer *new_backup; + struct vmw_ctx_binding_state *staged_bindings; unsigned long new_backup_offset; bool first_usage; bool no_buffer_needed; }; /** + * struct vmw_cmd_entry - Describe a command for the verifier + * + * @user_allow: Whether allowed from the execbuf ioctl. + * @gb_disable: Whether disabled if guest-backed objects are available. + * @gb_enable: Whether enabled iff guest-backed objects are available. + */ +struct vmw_cmd_entry { + int (*func) (struct vmw_private *, struct vmw_sw_context *, + SVGA3dCmdHeader *); + bool user_allow; + bool gb_disable; + bool gb_enable; +}; + +#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \ + [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ + (_gb_disable), (_gb_enable)} + +/** * vmw_resource_unreserve - unreserve resources previously reserved for * command submission. * @@ -87,6 +109,18 @@ struct vmw_dma_buffer *new_backup = backoff ? NULL : val->new_backup; + /* + * Transfer staged context bindings to the + * persistent context binding tracker. + */ + if (unlikely(val->staged_bindings)) { + if (!backoff) { + vmw_context_binding_state_transfer + (val->res, val->staged_bindings); + } + kfree(val->staged_bindings); + val->staged_bindings = NULL; + } vmw_resource_unreserve(res, new_backup, val->new_backup_offset); vmw_dmabuf_unreference(&val->new_backup); @@ -146,6 +180,44 @@ } /** + * vmw_resource_context_res_add - Put resources previously bound to a context on + * the validation list + * + * @dev_priv: Pointer to a device private structure + * @sw_context: Pointer to a software context used for this command submission + * @ctx: Pointer to the context resource + * + * This function puts all resources that were previously bound to @ctx on + * the resource validation list. This is part of the context state reemission + */ +static int vmw_resource_context_res_add(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + struct vmw_resource *ctx) +{ + struct list_head *binding_list; + struct vmw_ctx_binding *entry; + int ret = 0; + struct vmw_resource *res; + + mutex_lock(&dev_priv->binding_mutex); + binding_list = vmw_context_binding_list(ctx); + + list_for_each_entry(entry, binding_list, ctx_list) { + res = vmw_resource_reference_unless_doomed(entry->bi.res); + if (unlikely(res == NULL)) + continue; + + ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); + vmw_resource_unreference(&res); + if (unlikely(ret != 0)) + break; + } + + mutex_unlock(&dev_priv->binding_mutex); + return ret; +} + +/** * vmw_resource_relocation_add - Add a relocation to the relocation list * * @list: Pointer to head of relocation list. @@ -201,8 +273,12 @@ { struct vmw_resource_relocation *rel; - list_for_each_entry(rel, list, head) - cb[rel->offset] = rel->res->id; + list_for_each_entry(rel, list, head) { + if (likely(rel->res != NULL)) + cb[rel->offset] = rel->res->id; + else + cb[rel->offset] = SVGA_3D_CMD_NOP; + } } static int vmw_cmd_invalid(struct vmw_private *dev_priv, @@ -224,6 +300,7 @@ * * @sw_context: The software context used for this command submission batch. * @bo: The buffer object to add. + * @validate_as_mob: Validate this buffer as a MOB. * @p_val_node: If non-NULL Will be updated with the validate node number * on return. * @@ -232,6 +309,7 @@ */ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, struct ttm_buffer_object *bo, + bool validate_as_mob, uint32_t *p_val_node) { uint32_t val_node; @@ -244,6 +322,10 @@ &hash) == 0)) { vval_buf = container_of(hash, struct vmw_validate_buffer, hash); + if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) { + DRM_ERROR("Inconsistent buffer usage.\n"); + return -EINVAL; + } val_buf = &vval_buf->base; val_node = vval_buf - sw_context->val_bufs; } else { @@ -266,6 +348,7 @@ val_buf->bo = ttm_bo_reference(bo); val_buf->reserved = false; list_add_tail(&val_buf->head, &sw_context->validate_nodes); + vval_buf->validate_as_mob = validate_as_mob; } sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC; @@ -302,7 +385,8 @@ struct ttm_buffer_object *bo = &res->backup->base; ret = vmw_bo_to_validate_list - (sw_context, bo, NULL); + (sw_context, bo, + vmw_resource_needs_backup(res), NULL); if (unlikely(ret != 0)) return ret; @@ -339,22 +423,27 @@ } /** - * vmw_cmd_res_check - Check that a resource is present and if so, put it + * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it * on the resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. * @converter: User-space visisble type specific information. - * @id: Pointer to the location in the command buffer currently being + * @id: user-space resource id handle. + * @id_loc: Pointer to the location in the command buffer currently being * parsed from where the user-space resource id handle is located. + * @p_val: Pointer to pointer to resource validalidation node. Populated + * on exit. */ -static int vmw_cmd_res_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t *id, - struct vmw_resource_val_node **p_val) +static int +vmw_cmd_compat_res_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv *converter, + uint32_t id, + uint32_t *id_loc, + struct vmw_resource_val_node **p_val) { struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; @@ -362,15 +451,22 @@ struct vmw_resource_val_node *node; int ret; - if (*id == SVGA3D_INVALID_ID) + if (id == SVGA3D_INVALID_ID) { + if (p_val) + *p_val = NULL; + if (res_type == vmw_res_context) { + DRM_ERROR("Illegal context invalid id.\n"); + return -EINVAL; + } return 0; + } /* * Fastpath in case of repeated commands referencing the same * resource */ - if (likely(rcache->valid && *id == rcache->handle)) { + if (likely(rcache->valid && id == rcache->handle)) { const struct vmw_resource *res = rcache->res; rcache->node->first_usage = false; @@ -379,28 +475,28 @@ return vmw_resource_relocation_add (&sw_context->res_relocations, res, - id - sw_context->buf_start); + id_loc - sw_context->buf_start); } ret = vmw_user_resource_lookup_handle(dev_priv, - sw_context->tfile, - *id, + sw_context->fp->tfile, + id, converter, &res); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use resource 0x%08x.\n", - (unsigned) *id); + (unsigned) id); dump_stack(); return ret; } rcache->valid = true; rcache->res = res; - rcache->handle = *id; + rcache->handle = id; ret = vmw_resource_relocation_add(&sw_context->res_relocations, res, - id - sw_context->buf_start); + id_loc - sw_context->buf_start); if (unlikely(ret != 0)) goto out_no_reloc; @@ -411,6 +507,22 @@ rcache->node = node; if (p_val) *p_val = node; + + if (dev_priv->has_mob && node->first_usage && + res_type == vmw_res_context) { + ret = vmw_resource_context_res_add(dev_priv, sw_context, res); + if (unlikely(ret != 0)) + goto out_no_reloc; + node->staged_bindings = + kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); + if (node->staged_bindings == NULL) { + DRM_ERROR("Failed to allocate context binding " + "information.\n"); + goto out_no_reloc; + } + INIT_LIST_HEAD(&node->staged_bindings->list); + } + vmw_resource_unreference(&res); return 0; @@ -422,6 +534,59 @@ } /** + * vmw_cmd_res_check - Check that a resource is present and if so, put it + * on the resource validate list unless it's already there. + * + * @dev_priv: Pointer to a device private structure. + * @sw_context: Pointer to the software context. + * @res_type: Resource type. + * @converter: User-space visisble type specific information. + * @id_loc: Pointer to the location in the command buffer currently being + * parsed from where the user-space resource id handle is located. + * @p_val: Pointer to pointer to resource validalidation node. Populated + * on exit. + */ +static int +vmw_cmd_res_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv *converter, + uint32_t *id_loc, + struct vmw_resource_val_node **p_val) +{ + return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, + converter, *id_loc, id_loc, p_val); +} + +/** + * vmw_rebind_contexts - Rebind all resources previously bound to + * referenced contexts. + * + * @sw_context: Pointer to the software context. + * + * Rebind context binding points that have been scrubbed because of eviction. + */ +static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) +{ + struct vmw_resource_val_node *val; + int ret; + + list_for_each_entry(val, &sw_context->resource_list, head) { + if (likely(!val->staged_bindings)) + continue; + + ret = vmw_context_rebind_all(val->res); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to rebind context.\n"); + return ret; + } + } + + return 0; +} + +/** * vmw_cmd_cid_check - Check a command header for valid context information. * * @dev_priv: Pointer to a device private structure. @@ -437,7 +602,7 @@ { struct vmw_cid_cmd { SVGA3dCmdHeader header; - __le32 cid; + uint32_t cid; } *cmd; cmd = container_of(header, struct vmw_cid_cmd, header); @@ -453,17 +618,35 @@ SVGA3dCmdHeader header; SVGA3dCmdSetRenderTarget body; } *cmd; + struct vmw_resource_val_node *ctx_node; + struct vmw_resource_val_node *res_node; int ret; - ret = vmw_cmd_cid_check(dev_priv, sw_context, header); + cmd = container_of(header, struct vmw_sid_cmd, header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + &ctx_node); if (unlikely(ret != 0)) return ret; - cmd = container_of(header, struct vmw_sid_cmd, header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, - &cmd->body.target.sid, NULL); - return ret; + &cmd->body.target.sid, &res_node); + if (unlikely(ret != 0)) + return ret; + + if (dev_priv->has_mob) { + struct vmw_ctx_bindinfo bi; + + bi.ctx = ctx_node->res; + bi.res = res_node ? res_node->res : NULL; + bi.bt = vmw_ctx_binding_rt; + bi.i1.rt_type = cmd->body.type; + return vmw_context_binding_add(ctx_node->staged_bindings, &bi); + } + + return 0; } static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, @@ -519,11 +702,6 @@ cmd = container_of(header, struct vmw_sid_cmd, header); - if (unlikely(!sw_context->kernel)) { - DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); - return -EPERM; - } - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->body.srcImage.sid, NULL); @@ -541,11 +719,6 @@ cmd = container_of(header, struct vmw_sid_cmd, header); - if (unlikely(!sw_context->kernel)) { - DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id); - return -EPERM; - } - return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->body.sid, NULL); @@ -586,7 +759,7 @@ sw_context->needs_post_query_barrier = true; ret = vmw_bo_to_validate_list(sw_context, sw_context->cur_query_bo, - NULL); + dev_priv->has_mob, NULL); if (unlikely(ret != 0)) return ret; } @@ -594,7 +767,7 @@ ret = vmw_bo_to_validate_list(sw_context, dev_priv->dummy_query_bo, - NULL); + dev_priv->has_mob, NULL); if (unlikely(ret != 0)) return ret; @@ -672,6 +845,66 @@ } /** + * vmw_translate_mob_pointer - Prepare to translate a user-space buffer + * handle to a MOB id. + * + * @dev_priv: Pointer to a device private structure. + * @sw_context: The software context used for this command batch validation. + * @id: Pointer to the user-space handle to be translated. + * @vmw_bo_p: Points to a location that, on successful return will carry + * a reference-counted pointer to the DMA buffer identified by the + * user-space handle in @id. + * + * This function saves information needed to translate a user-space buffer + * handle to a MOB id. The translation does not take place immediately, but + * during a call to vmw_apply_relocations(). This function builds a relocation + * list and a list of buffers to validate. The former needs to be freed using + * either vmw_apply_relocations() or vmw_free_relocations(). The latter + * needs to be freed using vmw_clear_validations. + */ +static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGAMobId *id, + struct vmw_dma_buffer **vmw_bo_p) +{ + struct vmw_dma_buffer *vmw_bo = NULL; + struct ttm_buffer_object *bo; + uint32_t handle = *id; + struct vmw_relocation *reloc; + int ret; + + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not find or use MOB buffer.\n"); + return -EINVAL; + } + bo = &vmw_bo->base; + + if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { + DRM_ERROR("Max number relocations per submission" + " exceeded\n"); + ret = -EINVAL; + goto out_no_reloc; + } + + reloc = &sw_context->relocs[sw_context->cur_reloc++]; + reloc->mob_loc = id; + reloc->location = NULL; + + ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); + if (unlikely(ret != 0)) + goto out_no_reloc; + + *vmw_bo_p = vmw_bo; + return 0; + +out_no_reloc: + vmw_dmabuf_unreference(&vmw_bo); + vmw_bo_p = NULL; + return ret; +} + +/** * vmw_translate_guest_pointer - Prepare to translate a user-space buffer * handle to a valid SVGAGuestPtr * @@ -701,7 +934,7 @@ struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); return -EINVAL; @@ -718,7 +951,7 @@ reloc = &sw_context->relocs[sw_context->cur_reloc++]; reloc->location = ptr; - ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index); + ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); if (unlikely(ret != 0)) goto out_no_reloc; @@ -732,6 +965,30 @@ } /** + * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context used for this command submission. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_begin_gb_query_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdBeginGBQuery q; + } *cmd; + + cmd = container_of(header, struct vmw_begin_gb_query_cmd, + header); + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->q.cid, + NULL); +} + +/** * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command. * * @dev_priv: Pointer to a device private struct. @@ -750,12 +1007,64 @@ cmd = container_of(header, struct vmw_begin_query_cmd, header); + if (unlikely(dev_priv->has_mob)) { + struct { + SVGA3dCmdHeader header; + SVGA3dCmdBeginGBQuery q; + } gb_cmd; + + BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); + + gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY; + gb_cmd.header.size = cmd->header.size; + gb_cmd.q.cid = cmd->q.cid; + gb_cmd.q.type = cmd->q.type; + + memcpy(cmd, &gb_cmd, sizeof(*cmd)); + return vmw_cmd_begin_gb_query(dev_priv, sw_context, header); + } + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, user_context_converter, &cmd->q.cid, NULL); } /** + * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context used for this command submission. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_dma_buffer *vmw_bo; + struct vmw_query_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdEndGBQuery q; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_query_cmd, header); + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_translate_mob_ptr(dev_priv, sw_context, + &cmd->q.mobid, + &vmw_bo); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context); + + vmw_dmabuf_unreference(&vmw_bo); + return ret; +} + +/** * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command. * * @dev_priv: Pointer to a device private struct. @@ -774,6 +1083,25 @@ int ret; cmd = container_of(header, struct vmw_query_cmd, header); + if (dev_priv->has_mob) { + struct { + SVGA3dCmdHeader header; + SVGA3dCmdEndGBQuery q; + } gb_cmd; + + BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); + + gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY; + gb_cmd.header.size = cmd->header.size; + gb_cmd.q.cid = cmd->q.cid; + gb_cmd.q.type = cmd->q.type; + gb_cmd.q.mobid = cmd->q.guestResult.gmrId; + gb_cmd.q.offset = cmd->q.guestResult.offset; + + memcpy(cmd, &gb_cmd, sizeof(*cmd)); + return vmw_cmd_end_gb_query(dev_priv, sw_context, header); + } + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret; @@ -790,7 +1118,40 @@ return ret; } -/* +/** + * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context used for this command submission. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_dma_buffer *vmw_bo; + struct vmw_query_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdWaitForGBQuery q; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_query_cmd, header); + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_translate_mob_ptr(dev_priv, sw_context, + &cmd->q.mobid, + &vmw_bo); + if (unlikely(ret != 0)) + return ret; + + vmw_dmabuf_unreference(&vmw_bo); + return 0; +} + +/** * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command. * * @dev_priv: Pointer to a device private struct. @@ -809,6 +1170,25 @@ int ret; cmd = container_of(header, struct vmw_query_cmd, header); + if (dev_priv->has_mob) { + struct { + SVGA3dCmdHeader header; + SVGA3dCmdWaitForGBQuery q; + } gb_cmd; + + BUG_ON(sizeof(gb_cmd) != sizeof(*cmd)); + + gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; + gb_cmd.header.size = cmd->header.size; + gb_cmd.q.cid = cmd->q.cid; + gb_cmd.q.type = cmd->q.type; + gb_cmd.q.mobid = cmd->q.guestResult.gmrId; + gb_cmd.q.offset = cmd->q.guestResult.offset; + + memcpy(cmd, &gb_cmd, sizeof(*cmd)); + return vmw_cmd_wait_gb_query(dev_priv, sw_context, header); + } + ret = vmw_cmd_cid_check(dev_priv, sw_context, header); if (unlikely(ret != 0)) return ret; @@ -834,14 +1214,36 @@ SVGA3dCmdSurfaceDMA dma; } *cmd; int ret; + SVGA3dCmdSurfaceDMASuffix *suffix; + uint32_t bo_size; cmd = container_of(header, struct vmw_dma_cmd, header); + suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + + header->size - sizeof(*suffix)); + + /* Make sure device and verifier stays in sync. */ + if (unlikely(suffix->suffixSize != sizeof(*suffix))) { + DRM_ERROR("Invalid DMA suffix size.\n"); + return -EINVAL; + } + ret = vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->dma.guest.ptr, &vmw_bo); if (unlikely(ret != 0)) return ret; + /* Make sure DMA doesn't cross BO boundaries. */ + bo_size = vmw_bo->base.num_pages * PAGE_SIZE; + if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { + DRM_ERROR("Invalid DMA offset.\n"); + return -EINVAL; + } + + bo_size -= cmd->dma.guest.ptr.offset; + if (unlikely(suffix->maximumOffset > bo_size)) + suffix->maximumOffset = bo_size; + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->dma.host.sid, NULL); @@ -853,7 +1255,8 @@ srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); - vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); + vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, + header); out_no_surface: vmw_dmabuf_unreference(&vmw_bo); @@ -921,15 +1324,22 @@ struct vmw_tex_state_cmd { SVGA3dCmdHeader header; SVGA3dCmdSetTextureState state; - }; + } *cmd; SVGA3dTextureState *last_state = (SVGA3dTextureState *) ((unsigned long) header + header->size + sizeof(header)); SVGA3dTextureState *cur_state = (SVGA3dTextureState *) ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); + struct vmw_resource_val_node *ctx_node; + struct vmw_resource_val_node *res_node; int ret; - ret = vmw_cmd_cid_check(dev_priv, sw_context, header); + cmd = container_of(header, struct vmw_tex_state_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->state.cid, + &ctx_node); if (unlikely(ret != 0)) return ret; @@ -939,9 +1349,20 @@ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, - &cur_state->value, NULL); + &cur_state->value, &res_node); if (unlikely(ret != 0)) return ret; + + if (dev_priv->has_mob) { + struct vmw_ctx_bindinfo bi; + + bi.ctx = ctx_node->res; + bi.res = res_node ? res_node->res : NULL; + bi.bt = vmw_ctx_binding_tex; + bi.i1.texture_stage = cur_state->stage; + vmw_context_binding_add(ctx_node->staged_bindings, + &bi); + } } return 0; @@ -971,6 +1392,314 @@ } /** + * vmw_cmd_switch_backup - Utility function to handle backup buffer switching + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @res_type: The resource type. + * @converter: Information about user-space binding for this resource type. + * @res_id: Pointer to the user-space resource handle in the command stream. + * @buf_id: Pointer to the user-space backup buffer handle in the command + * stream. + * @backup_offset: Offset of backup into MOB. + * + * This function prepares for registering a switch of backup buffers + * in the resource metadata just prior to unreserving. + */ +static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv + *converter, + uint32_t *res_id, + uint32_t *buf_id, + unsigned long backup_offset) +{ + int ret; + struct vmw_dma_buffer *dma_buf; + struct vmw_resource_val_node *val_node; + + ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, + converter, res_id, &val_node); + if (unlikely(ret != 0)) + return ret; + + ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); + if (unlikely(ret != 0)) + return ret; + + if (val_node->first_usage) + val_node->no_buffer_needed = true; + + vmw_dmabuf_unreference(&val_node->new_backup); + val_node->new_backup = dma_buf; + val_node->new_backup_offset = backup_offset; + + return 0; +} + +/** + * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_bind_gb_surface_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBSurface body; + } *cmd; + + cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header); + + return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.sid, &cmd->body.mobid, + 0); +} + +/** + * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_gb_surface_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdUpdateGBImage body; + } *cmd; + + cmd = container_of(header, struct vmw_gb_surface_cmd, header); + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.image.sid, NULL); +} + +/** + * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_gb_surface_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdUpdateGBSurface body; + } *cmd; + + cmd = container_of(header, struct vmw_gb_surface_cmd, header); + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.sid, NULL); +} + +/** + * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_gb_surface_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdReadbackGBImage body; + } *cmd; + + cmd = container_of(header, struct vmw_gb_surface_cmd, header); + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.image.sid, NULL); +} + +/** + * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_gb_surface_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdReadbackGBSurface body; + } *cmd; + + cmd = container_of(header, struct vmw_gb_surface_cmd, header); + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.sid, NULL); +} + +/** + * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_gb_surface_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdInvalidateGBImage body; + } *cmd; + + cmd = container_of(header, struct vmw_gb_surface_cmd, header); + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.image.sid, NULL); +} + +/** + * vmw_cmd_invalidate_gb_surface - Validate an + * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_gb_surface_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdInvalidateGBSurface body; + } *cmd; + + cmd = container_of(header, struct vmw_gb_surface_cmd, header); + + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, + user_surface_converter, + &cmd->body.sid, NULL); +} + + +/** + * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_shader_define(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_shader_define_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdDefineShader body; + } *cmd; + int ret; + size_t size; + + cmd = container_of(header, struct vmw_shader_define_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); + if (unlikely(ret != 0)) + return ret; + + if (unlikely(!dev_priv->has_mob)) + return 0; + + size = cmd->header.size - sizeof(cmd->body); + ret = vmw_compat_shader_add(sw_context->fp->shman, + cmd->body.shid, cmd + 1, + cmd->body.type, size, + sw_context->fp->tfile, + &sw_context->staged_shaders); + if (unlikely(ret != 0)) + return ret; + + return vmw_resource_relocation_add(&sw_context->res_relocations, + NULL, &cmd->header.id - + sw_context->buf_start); + + return 0; +} + +/** + * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_shader_destroy_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyShader body; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_shader_destroy_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); + if (unlikely(ret != 0)) + return ret; + + if (unlikely(!dev_priv->has_mob)) + return 0; + + ret = vmw_compat_shader_remove(sw_context->fp->shman, + cmd->body.shid, + cmd->body.type, + &sw_context->staged_shaders); + if (unlikely(ret != 0)) + return ret; + + return vmw_resource_relocation_add(&sw_context->res_relocations, + NULL, &cmd->header.id - + sw_context->buf_start); + + return 0; +} + +/** * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER * command * @@ -986,18 +1715,105 @@ SVGA3dCmdHeader header; SVGA3dCmdSetShader body; } *cmd; + struct vmw_resource_val_node *ctx_node; int ret; cmd = container_of(header, struct vmw_set_shader_cmd, header); - ret = vmw_cmd_cid_check(dev_priv, sw_context, header); + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + &ctx_node); + if (unlikely(ret != 0)) + return ret; + + if (dev_priv->has_mob) { + struct vmw_ctx_bindinfo bi; + struct vmw_resource_val_node *res_node; + u32 shid = cmd->body.shid; + + if (shid != SVGA3D_INVALID_ID) + (void) vmw_compat_shader_lookup(sw_context->fp->shman, + cmd->body.type, + &shid); + + ret = vmw_cmd_compat_res_check(dev_priv, sw_context, + vmw_res_shader, + user_shader_converter, + shid, + &cmd->body.shid, &res_node); + if (unlikely(ret != 0)) + return ret; + + bi.ctx = ctx_node->res; + bi.res = res_node ? res_node->res : NULL; + bi.bt = vmw_ctx_binding_shader; + bi.i1.shader_type = cmd->body.type; + return vmw_context_binding_add(ctx_node->staged_bindings, &bi); + } + + return 0; +} + +/** + * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_set_shader_const_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSetShaderConst body; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_set_shader_const_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); if (unlikely(ret != 0)) return ret; + if (dev_priv->has_mob) + header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; + return 0; } +/** + * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_bind_gb_shader_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBShader body; + } *cmd; + + cmd = container_of(header, struct vmw_bind_gb_shader_cmd, + header); + + return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader, + user_shader_converter, + &cmd->body.shid, &cmd->body.mobid, + cmd->body.offsetInBytes); +} + static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) @@ -1041,50 +1857,173 @@ return 0; } -typedef int (*vmw_cmd_func) (struct vmw_private *, - struct vmw_sw_context *, - SVGA3dCmdHeader *); - -#define VMW_CMD_DEF(cmd, func) \ - [cmd - SVGA_3D_CMD_BASE] = func - -static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), - VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), +static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check, + true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, - &vmw_cmd_set_render_target_check), - VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), - VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), - VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader), - VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), - VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), - VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query), - VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), - VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), - VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), + &vmw_cmd_set_render_target_check, true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok, + true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, - &vmw_cmd_blt_surf_screen_check), - VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid), - VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid), + &vmw_cmd_blt_surf_screen_check, false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE, + &vmw_cmd_update_gb_surface, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE, + &vmw_cmd_readback_gb_image, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE, + &vmw_cmd_readback_gb_surface, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE, + &vmw_cmd_invalidate_gb_image, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE, + &vmw_cmd_invalidate_gb_surface, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid, + false, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, + false, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, + true, false, true) }; static int vmw_cmd_check(struct vmw_private *dev_priv, @@ -1095,6 +2034,8 @@ uint32_t size_remaining = *size; SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; int ret; + const struct vmw_cmd_entry *entry; + bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); /* Handle any none 3D commands */ @@ -1107,18 +2048,43 @@ cmd_id -= SVGA_3D_CMD_BASE; if (unlikely(*size > size_remaining)) - goto out_err; + goto out_invalid; if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) - goto out_err; + goto out_invalid; + + entry = &vmw_cmd_entries[cmd_id]; + if (unlikely(!entry->func)) + goto out_invalid; + + if (unlikely(!entry->user_allow && !sw_context->kernel)) + goto out_privileged; - ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); + if (unlikely(entry->gb_disable && gb)) + goto out_old; + + if (unlikely(entry->gb_enable && !gb)) + goto out_new; + + ret = entry->func(dev_priv, sw_context, header); if (unlikely(ret != 0)) - goto out_err; + goto out_invalid; return 0; -out_err: - DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", +out_invalid: + DRM_ERROR("Invalid SVGA3D command: %d\n", + cmd_id + SVGA_3D_CMD_BASE); + return -EINVAL; +out_privileged: + DRM_ERROR("Privileged SVGA3D command: %d\n", + cmd_id + SVGA_3D_CMD_BASE); + return -EPERM; +out_old: + DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n", + cmd_id + SVGA_3D_CMD_BASE); + return -EINVAL; +out_new: + DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n", cmd_id + SVGA_3D_CMD_BASE); return -EINVAL; } @@ -1174,6 +2140,9 @@ case VMW_PL_GMR: reloc->location->gmrId = bo->mem.start; break; + case VMW_PL_MOB: + *reloc->mob_loc = bo->mem.start; + break; default: BUG(); } @@ -1198,6 +2167,8 @@ list_for_each_entry_safe(val, val_next, list, head) { list_del_init(&val->head); vmw_resource_unreference(&val->res); + if (unlikely(val->staged_bindings)) + kfree(val->staged_bindings); kfree(val); } } @@ -1224,7 +2195,8 @@ } static int vmw_validate_single_buffer(struct vmw_private *dev_priv, - struct ttm_buffer_object *bo) + struct ttm_buffer_object *bo, + bool validate_as_mob) { int ret; @@ -1238,6 +2210,9 @@ dev_priv->dummy_query_bo_pinned)) return 0; + if (validate_as_mob) + return ttm_bo_validate(bo, &vmw_mob_placement, true, false); + /** * Put BO in VRAM if there is space, otherwise as a GMR. * If there is no space in VRAM and GMR ids are all used up, @@ -1259,7 +2234,6 @@ return ret; } - static int vmw_validate_buffers(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context) { @@ -1267,7 +2241,8 @@ int ret; list_for_each_entry(entry, &sw_context->validate_nodes, base.head) { - ret = vmw_validate_single_buffer(dev_priv, entry->base.bo); + ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, + entry->validate_as_mob); if (unlikely(ret != 0)) return ret; } @@ -1461,7 +2436,7 @@ } else sw_context->kernel = true; - sw_context->tfile = vmw_fpriv(file_priv)->tfile; + sw_context->fp = vmw_fpriv(file_priv); sw_context->cur_reloc = 0; sw_context->cur_val_buf = 0; sw_context->fence_flags = 0; @@ -1478,16 +2453,17 @@ goto out_unlock; sw_context->res_ht_initialized = true; } + INIT_LIST_HEAD(&sw_context->staged_shaders); INIT_LIST_HEAD(&resource_list); ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, command_size); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = vmw_resources_reserve(sw_context); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); if (unlikely(ret != 0)) @@ -1509,11 +2485,23 @@ goto out_err; } + ret = mutex_lock_interruptible(&dev_priv->binding_mutex); + if (unlikely(ret != 0)) { + ret = -ERESTARTSYS; + goto out_err; + } + + if (dev_priv->has_mob) { + ret = vmw_rebind_contexts(sw_context); + if (unlikely(ret != 0)) + goto out_unlock_binding; + } + cmd = vmw_fifo_reserve(dev_priv, command_size); if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving fifo space for commands.\n"); ret = -ENOMEM; - goto out_err; + goto out_unlock_binding; } vmw_apply_relocations(sw_context); @@ -1538,6 +2526,8 @@ DRM_ERROR("Fence submission error. Syncing.\n"); vmw_resource_list_unreserve(&sw_context->resource_list, false); + mutex_unlock(&dev_priv->binding_mutex); + ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, (void *) fence); @@ -1558,6 +2548,8 @@ } list_splice_init(&sw_context->resource_list, &resource_list); + vmw_compat_shaders_commit(sw_context->fp->shman, + &sw_context->staged_shaders); mutex_unlock(&dev_priv->cmdbuf_mutex); /* @@ -1568,11 +2560,14 @@ return 0; +out_unlock_binding: + mutex_unlock(&dev_priv->binding_mutex); out_err: - vmw_resource_relocations_free(&sw_context->res_relocations); - vmw_free_relocations(sw_context); ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); +out_err_nores: vmw_resource_list_unreserve(&sw_context->resource_list, true); + vmw_resource_relocations_free(&sw_context->res_relocations); + vmw_free_relocations(sw_context); vmw_clear_validations(sw_context); if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) @@ -1581,6 +2576,8 @@ list_splice_init(&sw_context->resource_list, &resource_list); error_resource = sw_context->error_resource; sw_context->error_resource = NULL; + vmw_compat_shaders_revert(sw_context->fp->shman, + &sw_context->staged_shaders); mutex_unlock(&dev_priv->cmdbuf_mutex); /* --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -147,7 +147,7 @@ } if (!vmw_kms_validate_mode_vram(vmw_priv, - info->fix.line_length, + var->xres * var->bits_per_pixel/8, var->yoffset + var->yres)) { DRM_ERROR("Requested geom can not fit in framebuffer\n"); return -EINVAL; @@ -162,6 +162,8 @@ struct vmw_private *vmw_priv = par->vmw_priv; int ret; + info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8; + ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, info->fix.line_length, par->bpp, par->depth); --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -271,7 +271,7 @@ spin_unlock_irq(&fman->lock); } -void vmw_fences_perform_actions(struct vmw_fence_manager *fman, +static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, struct list_head *list) { struct vmw_fence_action *action, *next_action; @@ -485,14 +485,7 @@ static void vmw_fence_destroy(struct vmw_fence_obj *fence) { - struct vmw_fence_manager *fman = fence->fman; - kfree(fence); - /* - * Free kernel space accounting. - */ - ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), - fman->fence_size); } int vmw_fence_create(struct vmw_fence_manager *fman, @@ -500,20 +493,12 @@ uint32_t mask, struct vmw_fence_obj **p_fence) { - struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); struct vmw_fence_obj *fence; int ret; - ret = ttm_mem_global_alloc(mem_glob, fman->fence_size, - false, false); - if (unlikely(ret != 0)) - return ret; - fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (unlikely(fence == NULL)) { - ret = -ENOMEM; - goto out_no_object; - } + if (unlikely(fence == NULL)) + return -ENOMEM; ret = vmw_fence_obj_init(fman, fence, seqno, mask, vmw_fence_destroy); @@ -525,8 +510,6 @@ out_err_init: kfree(fence); -out_no_object: - ttm_mem_global_free(mem_glob, fman->fence_size); return ret; } @@ -897,7 +880,7 @@ * Note that the action callbacks may be executed before this function * returns. */ -void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, +static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, struct vmw_fence_action *action) { struct vmw_fence_manager *fman = fence->fman; @@ -993,7 +976,7 @@ struct drm_vmw_event_fence event; }; -int vmw_event_fence_action_create(struct drm_file *file_priv, +static int vmw_event_fence_action_create(struct drm_file *file_priv, struct vmw_fence_obj *fence, uint32_t flags, uint64_t user_data, @@ -1049,6 +1032,8 @@ if (ret != 0) goto out_no_queue; + return 0; + out_no_queue: event->base.destroy(&event->base); out_no_event: @@ -1080,7 +1065,8 @@ */ if (arg->handle) { struct ttm_base_object *base = - ttm_base_object_lookup(vmw_fp->tfile, arg->handle); + ttm_base_object_lookup_for_ref(dev_priv->tdev, + arg->handle); if (unlikely(base == NULL)) { DRM_ERROR("Fence event invalid fence object handle " @@ -1123,17 +1109,10 @@ BUG_ON(fence == NULL); - if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) - ret = vmw_event_fence_action_create(file_priv, fence, - arg->flags, - arg->user_data, - true); - else - ret = vmw_event_fence_action_create(file_priv, fence, - arg->flags, - arg->user_data, - true); - + ret = vmw_event_fence_action_create(file_priv, fence, + arg->flags, + arg->user_data, + true); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) DRM_ERROR("Failed to attach event to fence.\n"); --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -35,6 +35,23 @@ uint32_t fifo_min, hwversion; const struct vmw_fifo_state *fifo = &dev_priv->fifo; + if (!(dev_priv->capabilities & SVGA_CAP_3D)) + return false; + + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + uint32_t result; + + if (!dev_priv->has_mob) + return false; + + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); + result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); + mutex_unlock(&dev_priv->hw_mutex); + + return (result != 0); + } + if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) return false; @@ -163,8 +180,9 @@ mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); + ; dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); @@ -511,24 +529,16 @@ } /** - * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo. + * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using + * legacy query commands. * * @dev_priv: The device private structure. * @cid: The hardware context id used for the query. * - * This function is used to emit a dummy occlusion query with - * no primitives rendered between query begin and query end. - * It's used to provide a query barrier, in order to know that when - * this query is finished, all preceding queries are also finished. - * - * A Query results structure should have been initialized at the start - * of the dev_priv->dummy_query_bo buffer object. And that buffer object - * must also be either reserved or pinned when this function is called. - * - * Returns -ENOMEM on failure to reserve fifo space. + * See the vmw_fifo_emit_dummy_query documentation. */ -int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, - uint32_t cid) +static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, + uint32_t cid) { /* * A query wait without a preceding query end will @@ -566,3 +576,75 @@ return 0; } + +/** + * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using + * guest-backed resource query commands. + * + * @dev_priv: The device private structure. + * @cid: The hardware context id used for the query. + * + * See the vmw_fifo_emit_dummy_query documentation. + */ +static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, + uint32_t cid) +{ + /* + * A query wait without a preceding query end will + * actually finish all queries for this cid + * without writing to the query result structure. + */ + + struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdWaitForGBQuery body; + } *cmd; + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + + if (unlikely(cmd == NULL)) { + DRM_ERROR("Out of fifo space for dummy query.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; + cmd->header.size = sizeof(cmd->body); + cmd->body.cid = cid; + cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + cmd->body.mobid = bo->mem.start; + cmd->body.offset = 0; + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + + +/** + * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using + * appropriate resource query commands. + * + * @dev_priv: The device private structure. + * @cid: The hardware context id used for the query. + * + * This function is used to emit a dummy occlusion query with + * no primitives rendered between query begin and query end. + * It's used to provide a query barrier, in order to know that when + * this query is finished, all preceding queries are also finished. + * + * A Query results structure should have been initialized at the start + * of the dev_priv->dummy_query_bo buffer object. And that buffer object + * must also be either reserved or pinned when this function is called. + * + * Returns -ENOMEM on failure to reserve fifo space. + */ +int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, + uint32_t cid) +{ + if (dev_priv->has_mob) + return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); + + return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); +} --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -125,181 +125,27 @@ } -static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma, - struct list_head *desc_pages) -{ - struct page *page, *next; - struct svga_guest_mem_descriptor *page_virtual; - unsigned int desc_per_page = PAGE_SIZE / - sizeof(struct svga_guest_mem_descriptor) - 1; - - if (list_empty(desc_pages)) - return; - - list_for_each_entry_safe(page, next, desc_pages, lru) { - list_del_init(&page->lru); - - if (likely(desc_dma != DMA_ADDR_INVALID)) { - dma_unmap_page(dev, desc_dma, PAGE_SIZE, - DMA_TO_DEVICE); - } - - page_virtual = kmap_atomic(page); - desc_dma = (dma_addr_t) - le32_to_cpu(page_virtual[desc_per_page].ppn) << - PAGE_SHIFT; - kunmap_atomic(page_virtual); - - __free_page(page); - } -} - -/** - * FIXME: Adjust to the ttm lowmem / highmem storage to minimize - * the number of used descriptors. - * - */ - -static int vmw_gmr_build_descriptors(struct device *dev, - struct list_head *desc_pages, - struct vmw_piter *iter, - unsigned long num_pages, - dma_addr_t *first_dma) -{ - struct page *page; - struct svga_guest_mem_descriptor *page_virtual = NULL; - struct svga_guest_mem_descriptor *desc_virtual = NULL; - unsigned int desc_per_page; - unsigned long prev_pfn; - unsigned long pfn; - int ret; - dma_addr_t desc_dma; - - desc_per_page = PAGE_SIZE / - sizeof(struct svga_guest_mem_descriptor) - 1; - - while (likely(num_pages != 0)) { - page = alloc_page(__GFP_HIGHMEM); - if (unlikely(page == NULL)) { - ret = -ENOMEM; - goto out_err; - } - - list_add_tail(&page->lru, desc_pages); - page_virtual = kmap_atomic(page); - desc_virtual = page_virtual - 1; - prev_pfn = ~(0UL); - - while (likely(num_pages != 0)) { - pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; - - if (pfn != prev_pfn + 1) { - - if (desc_virtual - page_virtual == - desc_per_page - 1) - break; - - (++desc_virtual)->ppn = cpu_to_le32(pfn); - desc_virtual->num_pages = cpu_to_le32(1); - } else { - uint32_t tmp = - le32_to_cpu(desc_virtual->num_pages); - desc_virtual->num_pages = cpu_to_le32(tmp + 1); - } - prev_pfn = pfn; - --num_pages; - vmw_piter_next(iter); - } - - (++desc_virtual)->ppn = DMA_PAGE_INVALID; - desc_virtual->num_pages = cpu_to_le32(0); - kunmap_atomic(page_virtual); - } - - desc_dma = 0; - list_for_each_entry_reverse(page, desc_pages, lru) { - page_virtual = kmap_atomic(page); - page_virtual[desc_per_page].ppn = cpu_to_le32 - (desc_dma >> PAGE_SHIFT); - kunmap_atomic(page_virtual); - desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE, - DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(dev, desc_dma))) - goto out_err; - } - *first_dma = desc_dma; - - return 0; -out_err: - vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages); - return ret; -} - -static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, - int gmr_id, dma_addr_t desc_dma) -{ - mutex_lock(&dev_priv->hw_mutex); - - vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); - wmb(); - vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT); - mb(); - - mutex_unlock(&dev_priv->hw_mutex); - -} - int vmw_gmr_bind(struct vmw_private *dev_priv, const struct vmw_sg_table *vsgt, unsigned long num_pages, int gmr_id) { - struct list_head desc_pages; - dma_addr_t desc_dma = 0; - struct device *dev = dev_priv->dev->dev; struct vmw_piter data_iter; - int ret; vmw_piter_start(&data_iter, vsgt, 0); if (unlikely(!vmw_piter_next(&data_iter))) return 0; - if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) - return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); - - if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR))) - return -EINVAL; - - if (vsgt->num_regions > dev_priv->max_gmr_descriptors) + if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2))) return -EINVAL; - INIT_LIST_HEAD(&desc_pages); - - ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter, - num_pages, &desc_dma); - if (unlikely(ret != 0)) - return ret; - - vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma); - vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages); - - return 0; + return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id); } void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) { - if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) { + if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) vmw_gmr2_unbind(dev_priv, gmr_id); - return; - } - - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); - wmb(); - vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); - mb(); - mutex_unlock(&dev_priv->hw_mutex); } --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -125,10 +125,21 @@ return -ENOMEM; spin_lock_init(&gman->lock); - gman->max_gmr_pages = dev_priv->max_gmr_pages; gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); - gman->max_gmr_ids = p_size; + + switch (p_size) { + case VMW_PL_GMR: + gman->max_gmr_ids = dev_priv->max_gmr_ids; + gman->max_gmr_pages = dev_priv->max_gmr_pages; + break; + case VMW_PL_MOB: + gman->max_gmr_ids = VMWGFX_NUM_MOB; + gman->max_gmr_pages = dev_priv->max_mob_pages; + break; + default: + BUG(); + } man->priv = (void *) gman; return 0; } --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -29,12 +29,18 @@ #include #include "vmwgfx_kms.h" +struct svga_3d_compat_cap { + SVGA3dCapsRecordHeader header; + SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX]; +}; + int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_getparam_arg *param = (struct drm_vmw_getparam_arg *)data; + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); switch (param->param) { case DRM_VMW_PARAM_NUM_STREAMS: @@ -53,13 +59,18 @@ param->value = dev_priv->fifo.capabilities; break; case DRM_VMW_PARAM_MAX_FB_SIZE: - param->value = dev_priv->vram_size; + param->value = dev_priv->prim_bb_mem; break; case DRM_VMW_PARAM_FIFO_HW_VERSION: { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; const struct vmw_fifo_state *fifo = &dev_priv->fifo; + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { + param->value = SVGA3D_HWVERSION_WS8_B1; + break; + } + param->value = ioread32(fifo_mem + ((fifo->capabilities & @@ -69,7 +80,30 @@ break; } case DRM_VMW_PARAM_MAX_SURF_MEMORY: - param->value = dev_priv->memory_size; + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && + !vmw_fp->gb_aware) + param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2; + else + param->value = dev_priv->memory_size; + break; + case DRM_VMW_PARAM_3D_CAPS_SIZE: + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && + vmw_fp->gb_aware) + param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); + else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) + param->value = sizeof(struct svga_3d_compat_cap) + + sizeof(uint32_t); + else + param->value = (SVGA_FIFO_3D_CAPS_LAST - + SVGA_FIFO_3D_CAPS + 1) * + sizeof(uint32_t); + break; + case DRM_VMW_PARAM_MAX_MOB_MEMORY: + vmw_fp->gb_aware = true; + param->value = dev_priv->max_mob_pages * PAGE_SIZE; + break; + case DRM_VMW_PARAM_MAX_MOB_SIZE: + param->value = dev_priv->max_mob_size; break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", @@ -80,6 +114,38 @@ return 0; } +static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, + size_t size) +{ + struct svga_3d_compat_cap *compat_cap = + (struct svga_3d_compat_cap *) bounce; + unsigned int i; + size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); + unsigned int max_size; + + if (size < pair_offset) + return -EINVAL; + + max_size = (size - pair_offset) / sizeof(SVGA3dCapPair); + + if (max_size > SVGA3D_DEVCAP_MAX) + max_size = SVGA3D_DEVCAP_MAX; + + compat_cap->header.length = + (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); + compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; + + mutex_lock(&dev_priv->hw_mutex); + for (i = 0; i < max_size; ++i) { + vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); + compat_cap->pairs[i][0] = i; + compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); + } + mutex_unlock(&dev_priv->hw_mutex); + + return 0; +} + int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -92,29 +158,58 @@ void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); void *bounce; int ret; + bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); if (unlikely(arg->pad64 != 0)) { DRM_ERROR("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } - size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2; + if (gb_objects && vmw_fp->gb_aware) + size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); + else if (gb_objects) + size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t); + else + size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * + sizeof(uint32_t); if (arg->max_size < size) size = arg->max_size; - bounce = vmalloc(size); + bounce = vzalloc(size); if (unlikely(bounce == NULL)) { DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); return -ENOMEM; } - fifo_mem = dev_priv->mmio_virt; - memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); + if (gb_objects && vmw_fp->gb_aware) { + int i, num; + uint32_t *bounce32 = (uint32_t *) bounce; + + num = size / sizeof(uint32_t); + if (num > SVGA3D_DEVCAP_MAX) + num = SVGA3D_DEVCAP_MAX; + + mutex_lock(&dev_priv->hw_mutex); + for (i = 0; i < num; ++i) { + vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); + *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); + } + mutex_unlock(&dev_priv->hw_mutex); + } else if (gb_objects) { + ret = vmw_fill_compat_cap(dev_priv, bounce, size); + if (unlikely(ret != 0)) + goto out_err; + } else { + fifo_mem = dev_priv->mmio_virt; + memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); + } ret = copy_to_user(buffer, bounce, size); if (ret) ret = -EFAULT; +out_err: vfree(bounce); if (unlikely(ret != 0)) --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -40,7 +40,7 @@ * Clip @num_rects number of @rects against @clip storing the * results in @out_rects and the number of passed rects in @out_num. */ -void vmw_clip_cliprects(struct drm_clip_rect *rects, +static void vmw_clip_cliprects(struct drm_clip_rect *rects, int num_rects, struct vmw_clip_rect clip, SVGASignedRect *out_rects, @@ -423,7 +423,7 @@ struct drm_master *master; }; -void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) +static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); @@ -589,7 +589,7 @@ return ret; } -int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, +static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, @@ -665,9 +665,9 @@ if (unlikely(surface->mip_levels[0] != 1 || surface->num_sizes != 1 || - surface->sizes[0].width < mode_cmd->width || - surface->sizes[0].height < mode_cmd->height || - surface->sizes[0].depth != 1)) { + surface->base_size.width < mode_cmd->width || + surface->base_size.height < mode_cmd->height || + surface->base_size.depth != 1)) { DRM_ERROR("Incompatible surface dimensions " "for requested mode.\n"); return -EINVAL; @@ -754,7 +754,7 @@ struct vmw_dma_buffer *buffer; }; -void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) +static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_dmabuf *vfbd = vmw_framebuffer_to_vfbd(framebuffer); @@ -940,7 +940,7 @@ return ret; } -int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, +static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, @@ -1631,7 +1631,7 @@ uint32_t pitch, uint32_t height) { - return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; + return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem; } @@ -1663,7 +1663,7 @@ * Small shared kms functions. */ -int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, +static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, struct drm_vmw_rect *rects) { struct drm_device *dev = dev_priv->dev; --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -0,0 +1,656 @@ +/************************************************************************** + * + * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" + +/* + * If we set up the screen target otable, screen objects stop working. + */ + +#define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE) ? 0 : 1) + +#ifdef CONFIG_64BIT +#define VMW_PPN_SIZE 8 +#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 +#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 +#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 +#else +#define VMW_PPN_SIZE 4 +#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 +#define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 +#define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 +#endif + +/* + * struct vmw_mob - Structure containing page table and metadata for a + * Guest Memory OBject. + * + * @num_pages Number of pages that make up the page table. + * @pt_level The indirection level of the page table. 0-2. + * @pt_root_page DMA address of the level 0 page of the page table. + */ +struct vmw_mob { + struct ttm_buffer_object *pt_bo; + unsigned long num_pages; + unsigned pt_level; + dma_addr_t pt_root_page; + uint32_t id; +}; + +/* + * struct vmw_otable - Guest Memory OBject table metadata + * + * @size: Size of the table (page-aligned). + * @page_table: Pointer to a struct vmw_mob holding the page table. + */ +struct vmw_otable { + unsigned long size; + struct vmw_mob *page_table; +}; + +static int vmw_mob_pt_populate(struct vmw_private *dev_priv, + struct vmw_mob *mob); +static void vmw_mob_pt_setup(struct vmw_mob *mob, + struct vmw_piter data_iter, + unsigned long num_data_pages); + +/* + * vmw_setup_otable_base - Issue an object table base setup command to + * the device + * + * @dev_priv: Pointer to a device private structure + * @type: Type of object table base + * @offset Start of table offset into dev_priv::otable_bo + * @otable Pointer to otable metadata; + * + * This function returns -ENOMEM if it fails to reserve fifo space, + * and may block waiting for fifo space. + */ +static int vmw_setup_otable_base(struct vmw_private *dev_priv, + SVGAOTableType type, + unsigned long offset, + struct vmw_otable *otable) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdSetOTableBase64 body; + } *cmd; + struct vmw_mob *mob; + const struct vmw_sg_table *vsgt; + struct vmw_piter iter; + int ret; + + BUG_ON(otable->page_table != NULL); + + vsgt = vmw_bo_sg_table(dev_priv->otable_bo); + vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); + WARN_ON(!vmw_piter_next(&iter)); + + mob = vmw_mob_create(otable->size >> PAGE_SHIFT); + if (unlikely(mob == NULL)) { + DRM_ERROR("Failed creating OTable page table.\n"); + return -ENOMEM; + } + + if (otable->size <= PAGE_SIZE) { + mob->pt_level = VMW_MOBFMT_PTDEPTH_0; + mob->pt_root_page = vmw_piter_dma_addr(&iter); + } else if (vsgt->num_regions == 1) { + mob->pt_level = SVGA3D_MOBFMT_RANGE; + mob->pt_root_page = vmw_piter_dma_addr(&iter); + } else { + ret = vmw_mob_pt_populate(dev_priv, mob); + if (unlikely(ret != 0)) + goto out_no_populate; + + vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); + mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + memset(cmd, 0, sizeof(*cmd)); + cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; + cmd->header.size = sizeof(cmd->body); + cmd->body.type = type; + cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); + cmd->body.sizeInBytes = otable->size; + cmd->body.validSizeInBytes = 0; + cmd->body.ptDepth = mob->pt_level; + + /* + * The device doesn't support this, But the otable size is + * determined at compile-time, so this BUG shouldn't trigger + * randomly. + */ + BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + otable->page_table = mob; + + return 0; + +out_no_fifo: +out_no_populate: + vmw_mob_destroy(mob); + return ret; +} + +/* + * vmw_takedown_otable_base - Issue an object table base takedown command + * to the device + * + * @dev_priv: Pointer to a device private structure + * @type: Type of object table base + * + */ +static void vmw_takedown_otable_base(struct vmw_private *dev_priv, + SVGAOTableType type, + struct vmw_otable *otable) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdSetOTableBase body; + } *cmd; + struct ttm_buffer_object *bo; + + if (otable->page_table == NULL) + return; + + bo = otable->page_table->pt_bo; + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for OTable " + "takedown.\n"); + } else { + memset(cmd, 0, sizeof(*cmd)); + cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; + cmd->header.size = sizeof(cmd->body); + cmd->body.type = type; + cmd->body.baseAddress = 0; + cmd->body.sizeInBytes = 0; + cmd->body.validSizeInBytes = 0; + cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + } + + if (bo) { + int ret; + + ret = ttm_bo_reserve(bo, false, true, false, NULL); + BUG_ON(ret != 0); + + vmw_fence_single_bo(bo, NULL); + ttm_bo_unreserve(bo); + } + + vmw_mob_destroy(otable->page_table); + otable->page_table = NULL; +} + +/* + * vmw_otables_setup - Set up guest backed memory object tables + * + * @dev_priv: Pointer to a device private structure + * + * Takes care of the device guest backed surface + * initialization, by setting up the guest backed memory object tables. + * Returns 0 on success and various error codes on failure. A succesful return + * means the object tables can be taken down using the vmw_otables_takedown + * function. + */ +int vmw_otables_setup(struct vmw_private *dev_priv) +{ + unsigned long offset; + unsigned long bo_size; + struct vmw_otable *otables; + SVGAOTableType i; + int ret; + + otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), + GFP_KERNEL); + if (unlikely(otables == NULL)) { + DRM_ERROR("Failed to allocate space for otable " + "metadata.\n"); + return -ENOMEM; + } + + otables[SVGA_OTABLE_MOB].size = + VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; + otables[SVGA_OTABLE_SURFACE].size = + VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; + otables[SVGA_OTABLE_CONTEXT].size = + VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; + otables[SVGA_OTABLE_SHADER].size = + VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; + otables[SVGA_OTABLE_SCREEN_TARGET].size = + VMWGFX_NUM_GB_SCREEN_TARGET * + SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; + + bo_size = 0; + for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { + otables[i].size = + (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; + bo_size += otables[i].size; + } + + ret = ttm_bo_create(&dev_priv->bdev, bo_size, + ttm_bo_type_device, + &vmw_sys_ne_placement, + 0, false, NULL, + &dev_priv->otable_bo); + + if (unlikely(ret != 0)) + goto out_no_bo; + + ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); + BUG_ON(ret != 0); + ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); + if (unlikely(ret != 0)) + goto out_unreserve; + ret = vmw_bo_map_dma(dev_priv->otable_bo); + if (unlikely(ret != 0)) + goto out_unreserve; + + ttm_bo_unreserve(dev_priv->otable_bo); + + offset = 0; + for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { + ret = vmw_setup_otable_base(dev_priv, i, offset, + &otables[i]); + if (unlikely(ret != 0)) + goto out_no_setup; + offset += otables[i].size; + } + + dev_priv->otables = otables; + return 0; + +out_unreserve: + ttm_bo_unreserve(dev_priv->otable_bo); +out_no_setup: + for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) + vmw_takedown_otable_base(dev_priv, i, &otables[i]); + + ttm_bo_unref(&dev_priv->otable_bo); +out_no_bo: + kfree(otables); + return ret; +} + + +/* + * vmw_otables_takedown - Take down guest backed memory object tables + * + * @dev_priv: Pointer to a device private structure + * + * Take down the Guest Memory Object tables. + */ +void vmw_otables_takedown(struct vmw_private *dev_priv) +{ + SVGAOTableType i; + struct ttm_buffer_object *bo = dev_priv->otable_bo; + int ret; + + for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) + vmw_takedown_otable_base(dev_priv, i, + &dev_priv->otables[i]); + + ret = ttm_bo_reserve(bo, false, true, false, NULL); + BUG_ON(ret != 0); + + vmw_fence_single_bo(bo, NULL); + ttm_bo_unreserve(bo); + + ttm_bo_unref(&dev_priv->otable_bo); + kfree(dev_priv->otables); + dev_priv->otables = NULL; +} + + +/* + * vmw_mob_calculate_pt_pages - Calculate the number of page table pages + * needed for a guest backed memory object. + * + * @data_pages: Number of data pages in the memory object buffer. + */ +static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) +{ + unsigned long data_size = data_pages * PAGE_SIZE; + unsigned long tot_size = 0; + + while (likely(data_size > PAGE_SIZE)) { + data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); + data_size *= VMW_PPN_SIZE; + tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; + } + + return tot_size >> PAGE_SHIFT; +} + +/* + * vmw_mob_create - Create a mob, but don't populate it. + * + * @data_pages: Number of data pages of the underlying buffer object. + */ +struct vmw_mob *vmw_mob_create(unsigned long data_pages) +{ + struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); + + if (unlikely(mob == NULL)) + return NULL; + + mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); + + return mob; +} + +/* + * vmw_mob_pt_populate - Populate the mob pagetable + * + * @mob: Pointer to the mob the pagetable of which we want to + * populate. + * + * This function allocates memory to be used for the pagetable, and + * adjusts TTM memory accounting accordingly. Returns ENOMEM if + * memory resources aren't sufficient and may cause TTM buffer objects + * to be swapped out by using the TTM memory accounting function. + */ +static int vmw_mob_pt_populate(struct vmw_private *dev_priv, + struct vmw_mob *mob) +{ + int ret; + BUG_ON(mob->pt_bo != NULL); + + ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, + ttm_bo_type_device, + &vmw_sys_ne_placement, + 0, false, NULL, &mob->pt_bo); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL); + + BUG_ON(ret != 0); + ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); + if (unlikely(ret != 0)) + goto out_unreserve; + ret = vmw_bo_map_dma(mob->pt_bo); + if (unlikely(ret != 0)) + goto out_unreserve; + + ttm_bo_unreserve(mob->pt_bo); + + return 0; + +out_unreserve: + ttm_bo_unreserve(mob->pt_bo); + ttm_bo_unref(&mob->pt_bo); + + return ret; +} + +/** + * vmw_mob_assign_ppn - Assign a value to a page table entry + * + * @addr: Pointer to pointer to page table entry. + * @val: The page table entry + * + * Assigns a value to a page table entry pointed to by *@addr and increments + * *@addr according to the page table entry size. + */ +#if (VMW_PPN_SIZE == 8) +static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) +{ + *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT); + *addr += 2; +} +#else +static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) +{ + *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT); +} +#endif + +/* + * vmw_mob_build_pt - Build a pagetable + * + * @data_addr: Array of DMA addresses to the underlying buffer + * object's data pages. + * @num_data_pages: Number of buffer object data pages. + * @pt_pages: Array of page pointers to the page table pages. + * + * Returns the number of page table pages actually used. + * Uses atomic kmaps of highmem pages to avoid TLB thrashing. + */ +static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, + unsigned long num_data_pages, + struct vmw_piter *pt_iter) +{ + unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; + unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); + unsigned long pt_page; + __le32 *addr, *save_addr; + unsigned long i; + struct page *page; + + for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { + page = vmw_piter_page(pt_iter); + + save_addr = addr = kmap_atomic(page); + + for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { + vmw_mob_assign_ppn(&addr, + vmw_piter_dma_addr(data_iter)); + if (unlikely(--num_data_pages == 0)) + break; + WARN_ON(!vmw_piter_next(data_iter)); + } + kunmap_atomic(save_addr); + vmw_piter_next(pt_iter); + } + + return num_pt_pages; +} + +/* + * vmw_mob_build_pt - Set up a multilevel mob pagetable + * + * @mob: Pointer to a mob whose page table needs setting up. + * @data_addr Array of DMA addresses to the buffer object's data + * pages. + * @num_data_pages: Number of buffer object data pages. + * + * Uses tail recursion to set up a multilevel mob page table. + */ +static void vmw_mob_pt_setup(struct vmw_mob *mob, + struct vmw_piter data_iter, + unsigned long num_data_pages) +{ + unsigned long num_pt_pages = 0; + struct ttm_buffer_object *bo = mob->pt_bo; + struct vmw_piter save_pt_iter; + struct vmw_piter pt_iter; + const struct vmw_sg_table *vsgt; + int ret; + + ret = ttm_bo_reserve(bo, false, true, false, NULL); + BUG_ON(ret != 0); + + vsgt = vmw_bo_sg_table(bo); + vmw_piter_start(&pt_iter, vsgt, 0); + BUG_ON(!vmw_piter_next(&pt_iter)); + mob->pt_level = 0; + while (likely(num_data_pages > 1)) { + ++mob->pt_level; + BUG_ON(mob->pt_level > 2); + save_pt_iter = pt_iter; + num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, + &pt_iter); + data_iter = save_pt_iter; + num_data_pages = num_pt_pages; + } + + mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); + ttm_bo_unreserve(bo); +} + +/* + * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. + * + * @mob: Pointer to a mob to destroy. + */ +void vmw_mob_destroy(struct vmw_mob *mob) +{ + if (mob->pt_bo) + ttm_bo_unref(&mob->pt_bo); + kfree(mob); +} + +/* + * vmw_mob_unbind - Hide a mob from the device. + * + * @dev_priv: Pointer to a device private. + * @mob_id: Device id of the mob to unbind. + */ +void vmw_mob_unbind(struct vmw_private *dev_priv, + struct vmw_mob *mob) +{ + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyGBMob body; + } *cmd; + int ret; + struct ttm_buffer_object *bo = mob->pt_bo; + + if (bo) { + ret = ttm_bo_reserve(bo, false, true, false, NULL); + /* + * Noone else should be using this buffer. + */ + BUG_ON(ret != 0); + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for Memory " + "Object unbinding.\n"); + } else { + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; + cmd->header.size = sizeof(cmd->body); + cmd->body.mobid = mob->id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + } + if (bo) { + vmw_fence_single_bo(bo, NULL); + ttm_bo_unreserve(bo); + } + vmw_3d_resource_dec(dev_priv, false); +} + +/* + * vmw_mob_bind - Make a mob visible to the device after first + * populating it if necessary. + * + * @dev_priv: Pointer to a device private. + * @mob: Pointer to the mob we're making visible. + * @data_addr: Array of DMA addresses to the data pages of the underlying + * buffer object. + * @num_data_pages: Number of data pages of the underlying buffer + * object. + * @mob_id: Device id of the mob to bind + * + * This function is intended to be interfaced with the ttm_tt backend + * code. + */ +int vmw_mob_bind(struct vmw_private *dev_priv, + struct vmw_mob *mob, + const struct vmw_sg_table *vsgt, + unsigned long num_data_pages, + int32_t mob_id) +{ + int ret; + bool pt_set_up = false; + struct vmw_piter data_iter; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineGBMob64 body; + } *cmd; + + mob->id = mob_id; + vmw_piter_start(&data_iter, vsgt, 0); + if (unlikely(!vmw_piter_next(&data_iter))) + return 0; + + if (likely(num_data_pages == 1)) { + mob->pt_level = VMW_MOBFMT_PTDEPTH_0; + mob->pt_root_page = vmw_piter_dma_addr(&data_iter); + } else if (vsgt->num_regions == 1) { + mob->pt_level = SVGA3D_MOBFMT_RANGE; + mob->pt_root_page = vmw_piter_dma_addr(&data_iter); + } else if (unlikely(mob->pt_bo == NULL)) { + ret = vmw_mob_pt_populate(dev_priv, mob); + if (unlikely(ret != 0)) + return ret; + + vmw_mob_pt_setup(mob, data_iter, num_data_pages); + pt_set_up = true; + mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; + } + + (void) vmw_3d_resource_inc(dev_priv, false); + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for Memory " + "Object binding.\n"); + goto out_no_cmd_space; + } + + cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64; + cmd->header.size = sizeof(cmd->body); + cmd->body.mobid = mob_id; + cmd->body.ptDepth = mob->pt_level; + cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); + cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; + + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; + +out_no_cmd_space: + vmw_3d_resource_dec(dev_priv, false); + if (pt_set_up) + ttm_bo_unref(&mob->pt_bo); + + return -ENOMEM; +} --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -88,6 +88,11 @@ return res; } +struct vmw_resource * +vmw_resource_reference_unless_doomed(struct vmw_resource *res) +{ + return kref_get_unless_zero(&res->kref) ? res : NULL; +} /** * vmw_resource_release_id - release a resource id to the id manager. @@ -136,8 +141,12 @@ vmw_dmabuf_unreference(&res->backup); } - if (likely(res->hw_destroy != NULL)) + if (likely(res->hw_destroy != NULL)) { res->hw_destroy(res); + mutex_lock(&dev_priv->binding_mutex); + vmw_context_binding_res_list_kill(&res->binding_head); + mutex_unlock(&dev_priv->binding_mutex); + } id = res->id; if (res->res_free != NULL) @@ -215,6 +224,7 @@ res->func = func; INIT_LIST_HEAD(&res->lru_head); INIT_LIST_HEAD(&res->mob_head); + INIT_LIST_HEAD(&res->binding_head); res->id = -1; res->backup = NULL; res->backup_offset = 0; @@ -417,8 +427,7 @@ INIT_LIST_HEAD(&vmw_bo->res_list); ret = ttm_bo_init(bdev, &vmw_bo->base, size, - (user) ? ttm_bo_type_device : - ttm_bo_type_kernel, placement, + ttm_bo_type_device, placement, 0, interruptible, NULL, acc_size, NULL, bo_free); return ret; @@ -441,6 +450,21 @@ ttm_bo_unref(&bo); } +static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, + enum ttm_ref_type ref_type) +{ + struct vmw_user_dma_buffer *user_bo; + user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); + + switch (ref_type) { + case TTM_REF_SYNCCPU_WRITE: + ttm_bo_synccpu_write_release(&user_bo->dma.base); + break; + default: + BUG(); + } +} + /** * vmw_user_dmabuf_alloc - Allocate a user dma buffer * @@ -471,6 +495,8 @@ } ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, + (dev_priv->has_mob) ? + &vmw_sys_placement : &vmw_vram_sys_placement, true, &vmw_user_dmabuf_destroy); if (unlikely(ret != 0)) @@ -482,7 +508,8 @@ &user_bo->prime, shareable, ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + &vmw_user_dmabuf_release, + &vmw_user_dmabuf_ref_obj_release); if (unlikely(ret != 0)) { ttm_bo_unref(&tmp); goto out_no_base_object; @@ -515,6 +542,130 @@ vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; } +/** + * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu + * access, idling previous GPU operations on the buffer and optionally + * blocking it for further command submissions. + * + * @user_bo: Pointer to the buffer object being grabbed for CPU access + * @tfile: Identifying the caller. + * @flags: Flags indicating how the grab should be performed. + * + * A blocking grab will be automatically released when @tfile is closed. + */ +static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, + struct ttm_object_file *tfile, + uint32_t flags) +{ + struct ttm_buffer_object *bo = &user_bo->dma.base; + bool existed; + int ret; + + if (flags & drm_vmw_synccpu_allow_cs) { + struct ttm_bo_device *bdev = bo->bdev; + + spin_lock(&bdev->fence_lock); + ret = ttm_bo_wait(bo, false, true, + !!(flags & drm_vmw_synccpu_dontblock)); + spin_unlock(&bdev->fence_lock); + return ret; + } + + ret = ttm_bo_synccpu_write_grab + (bo, !!(flags & drm_vmw_synccpu_dontblock)); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_ref_object_add(tfile, &user_bo->prime.base, + TTM_REF_SYNCCPU_WRITE, &existed); + if (ret != 0 || existed) + ttm_bo_synccpu_write_release(&user_bo->dma.base); + + return ret; +} + +/** + * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, + * and unblock command submission on the buffer if blocked. + * + * @handle: Handle identifying the buffer object. + * @tfile: Identifying the caller. + * @flags: Flags indicating the type of release. + */ +static int vmw_user_dmabuf_synccpu_release(uint32_t handle, + struct ttm_object_file *tfile, + uint32_t flags) +{ + if (!(flags & drm_vmw_synccpu_allow_cs)) + return ttm_ref_object_base_unref(tfile, handle, + TTM_REF_SYNCCPU_WRITE); + + return 0; +} + +/** + * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu + * functionality. + * + * @dev: Identifies the drm device. + * @data: Pointer to the ioctl argument. + * @file_priv: Identifies the caller. + * + * This function checks the ioctl arguments for validity and calls the + * relevant synccpu functions. + */ +int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_synccpu_arg *arg = + (struct drm_vmw_synccpu_arg *) data; + struct vmw_dma_buffer *dma_buf; + struct vmw_user_dma_buffer *user_bo; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret; + + if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 + || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | + drm_vmw_synccpu_dontblock | + drm_vmw_synccpu_allow_cs)) != 0) { + DRM_ERROR("Illegal synccpu flags.\n"); + return -EINVAL; + } + + switch (arg->op) { + case drm_vmw_synccpu_grab: + ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); + if (unlikely(ret != 0)) + return ret; + + user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, + dma); + ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); + vmw_dmabuf_unreference(&dma_buf); + if (unlikely(ret != 0 && ret != -ERESTARTSYS && + ret != -EBUSY)) { + DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", + (unsigned int) arg->handle); + return ret; + } + break; + case drm_vmw_synccpu_release: + ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, + arg->flags); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", + (unsigned int) arg->handle); + return ret; + } + break; + default: + DRM_ERROR("Invalid synccpu operation.\n"); + return -EINVAL; + } + + return 0; +} + int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -591,7 +742,8 @@ } int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, - struct vmw_dma_buffer *dma_buf) + struct vmw_dma_buffer *dma_buf, + uint32_t *handle) { struct vmw_user_dma_buffer *user_bo; @@ -599,6 +751,8 @@ return -EINVAL; user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); + + *handle = user_bo->prime.base.hash.key; return ttm_ref_object_add(tfile, &user_bo->prime.base, TTM_REF_USAGE, NULL); } @@ -1291,11 +1445,54 @@ * @mem: The truct ttm_mem_reg indicating to what memory * region the move is taking place. * - * For now does nothing. + * Evicts the Guest Backed hardware resource if the backup + * buffer is being moved out of MOB memory. + * Note that this function should not race with the resource + * validation code as long as it accesses only members of struct + * resource that remain static while bo::res is !NULL and + * while we have @bo reserved. struct resource::backup is *not* a + * static member. The resource validation code will take care + * to set @bo::res to NULL, while having @bo reserved when the + * buffer is no longer bound to the resource, so @bo:res can be + * used to determine whether there is a need to unbind and whether + * it is safe to unbind. */ void vmw_resource_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { + struct vmw_dma_buffer *dma_buf; + + if (mem == NULL) + return; + + if (bo->destroy != vmw_dmabuf_bo_free && + bo->destroy != vmw_user_dmabuf_destroy) + return; + + dma_buf = container_of(bo, struct vmw_dma_buffer, base); + + if (mem->mem_type != VMW_PL_MOB) { + struct vmw_resource *res, *n; + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_validate_buffer val_buf; + + val_buf.bo = bo; + + list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { + + if (unlikely(res->func->unbind == NULL)) + continue; + + (void) res->func->unbind(res, true, &val_buf); + res->backup_dirty = true; + res->res_dirty = false; + list_del_init(&res->mob_head); + } + + spin_lock(&bdev->fence_lock); + (void) ttm_bo_wait(bo, false, false, false); + spin_unlock(&bdev->fence_lock); + } } /** --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -0,0 +1,812 @@ +/************************************************************************** + * + * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_drv.h" +#include "vmwgfx_resource_priv.h" +#include "ttm/ttm_placement.h" + +#define VMW_COMPAT_SHADER_HT_ORDER 12 + +struct vmw_shader { + struct vmw_resource res; + SVGA3dShaderType type; + uint32_t size; +}; + +struct vmw_user_shader { + struct ttm_base_object base; + struct vmw_shader shader; +}; + +/** + * enum vmw_compat_shader_state - Staging state for compat shaders + */ +enum vmw_compat_shader_state { + VMW_COMPAT_COMMITED, + VMW_COMPAT_ADD, + VMW_COMPAT_DEL +}; + +/** + * struct vmw_compat_shader - Metadata for compat shaders. + * + * @handle: The TTM handle of the guest backed shader. + * @tfile: The struct ttm_object_file the guest backed shader is registered + * with. + * @hash: Hash item for lookup. + * @head: List head for staging lists or the compat shader manager list. + * @state: Staging state. + * + * The structure is protected by the cmdbuf lock. + */ +struct vmw_compat_shader { + u32 handle; + struct ttm_object_file *tfile; + struct drm_hash_item hash; + struct list_head head; + enum vmw_compat_shader_state state; +}; + +/** + * struct vmw_compat_shader_manager - Compat shader manager. + * + * @shaders: Hash table containing staged and commited compat shaders + * @list: List of commited shaders. + * @dev_priv: Pointer to a device private structure. + * + * @shaders and @list are protected by the cmdbuf mutex for now. + */ +struct vmw_compat_shader_manager { + struct drm_open_hash shaders; + struct list_head list; + struct vmw_private *dev_priv; +}; + +static void vmw_user_shader_free(struct vmw_resource *res); +static struct vmw_resource * +vmw_user_shader_base_to_res(struct ttm_base_object *base); + +static int vmw_gb_shader_create(struct vmw_resource *res); +static int vmw_gb_shader_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf); +static int vmw_gb_shader_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf); +static int vmw_gb_shader_destroy(struct vmw_resource *res); + +static uint64_t vmw_user_shader_size; + +static const struct vmw_user_resource_conv user_shader_conv = { + .object_type = VMW_RES_SHADER, + .base_obj_to_res = vmw_user_shader_base_to_res, + .res_free = vmw_user_shader_free +}; + +const struct vmw_user_resource_conv *user_shader_converter = + &user_shader_conv; + + +static const struct vmw_res_func vmw_gb_shader_func = { + .res_type = vmw_res_shader, + .needs_backup = true, + .may_evict = true, + .type_name = "guest backed shaders", + .backup_placement = &vmw_mob_placement, + .create = vmw_gb_shader_create, + .destroy = vmw_gb_shader_destroy, + .bind = vmw_gb_shader_bind, + .unbind = vmw_gb_shader_unbind +}; + +/** + * Shader management: + */ + +static inline struct vmw_shader * +vmw_res_to_shader(struct vmw_resource *res) +{ + return container_of(res, struct vmw_shader, res); +} + +static void vmw_hw_shader_destroy(struct vmw_resource *res) +{ + (void) vmw_gb_shader_destroy(res); +} + +static int vmw_gb_shader_init(struct vmw_private *dev_priv, + struct vmw_resource *res, + uint32_t size, + uint64_t offset, + SVGA3dShaderType type, + struct vmw_dma_buffer *byte_code, + void (*res_free) (struct vmw_resource *res)) +{ + struct vmw_shader *shader = vmw_res_to_shader(res); + int ret; + + ret = vmw_resource_init(dev_priv, res, true, + res_free, &vmw_gb_shader_func); + + + if (unlikely(ret != 0)) { + if (res_free) + res_free(res); + else + kfree(res); + return ret; + } + + res->backup_size = size; + if (byte_code) { + res->backup = vmw_dmabuf_reference(byte_code); + res->backup_offset = offset; + } + shader->size = size; + shader->type = type; + + vmw_resource_activate(res, vmw_hw_shader_destroy); + return 0; +} + +static int vmw_gb_shader_create(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_shader *shader = vmw_res_to_shader(res); + int ret; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineGBShader body; + } *cmd; + + if (likely(res->id != -1)) + return 0; + + ret = vmw_resource_alloc_id(res); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a shader id.\n"); + goto out_no_id; + } + + if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) { + ret = -EBUSY; + goto out_no_fifo; + } + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for shader " + "creation.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.shid = res->id; + cmd->body.type = shader->type; + cmd->body.sizeInBytes = shader->size; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + (void) vmw_3d_resource_inc(dev_priv, false); + + return 0; + +out_no_fifo: + vmw_resource_release_id(res); +out_no_id: + return ret; +} + +static int vmw_gb_shader_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBShader body; + } *cmd; + struct ttm_buffer_object *bo = val_buf->bo; + + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for shader " + "binding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.shid = res->id; + cmd->body.mobid = bo->mem.start; + cmd->body.offsetInBytes = 0; + res->backup_dirty = false; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + return 0; +} + +static int vmw_gb_shader_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBShader body; + } *cmd; + struct vmw_fence_obj *fence; + + BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for shader " + "unbinding.\n"); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.shid = res->id; + cmd->body.mobid = SVGA3D_INVALID_ID; + cmd->body.offsetInBytes = 0; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + + /* + * Create a fence object and fence the backup buffer. + */ + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + + vmw_fence_single_bo(val_buf->bo, fence); + + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + + return 0; +} + +static int vmw_gb_shader_destroy(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyGBShader body; + } *cmd; + + if (likely(res->id == -1)) + return 0; + + mutex_lock(&dev_priv->binding_mutex); + vmw_context_binding_res_list_scrub(&res->binding_head); + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for shader " + "destruction.\n"); + mutex_unlock(&dev_priv->binding_mutex); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; + cmd->header.size = sizeof(cmd->body); + cmd->body.shid = res->id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + mutex_unlock(&dev_priv->binding_mutex); + vmw_resource_release_id(res); + vmw_3d_resource_dec(dev_priv, false); + + return 0; +} + +/** + * User-space shader management: + */ + +static struct vmw_resource * +vmw_user_shader_base_to_res(struct ttm_base_object *base) +{ + return &(container_of(base, struct vmw_user_shader, base)-> + shader.res); +} + +static void vmw_user_shader_free(struct vmw_resource *res) +{ + struct vmw_user_shader *ushader = + container_of(res, struct vmw_user_shader, shader.res); + struct vmw_private *dev_priv = res->dev_priv; + + ttm_base_object_kfree(ushader, base); + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_user_shader_size); +} + +/** + * This function is called when user space has no more references on the + * base object. It releases the base-object's reference on the resource object. + */ + +static void vmw_user_shader_base_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct vmw_resource *res = vmw_user_shader_base_to_res(base); + + *p_base = NULL; + vmw_resource_unreference(&res); +} + +int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + + return ttm_ref_object_base_unref(tfile, arg->handle, + TTM_REF_USAGE); +} + +static int vmw_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type, + struct ttm_object_file *tfile, + u32 *handle) +{ + struct vmw_user_shader *ushader; + struct vmw_resource *res, *tmp; + int ret; + + /* + * Approximate idr memory usage with 128 bytes. It will be limited + * by maximum number_of shaders anyway. + */ + if (unlikely(vmw_user_shader_size == 0)) + vmw_user_shader_size = + ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + vmw_user_shader_size, + false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for shader " + "creation.\n"); + goto out; + } + + ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); + if (unlikely(ushader == NULL)) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_user_shader_size); + ret = -ENOMEM; + goto out; + } + + res = &ushader->shader.res; + ushader->base.shareable = false; + ushader->base.tfile = NULL; + + /* + * From here on, the destructor takes over resource freeing. + */ + + ret = vmw_gb_shader_init(dev_priv, res, shader_size, + offset, shader_type, buffer, + vmw_user_shader_free); + if (unlikely(ret != 0)) + goto out; + + tmp = vmw_resource_reference(res); + ret = ttm_base_object_init(tfile, &ushader->base, false, + VMW_RES_SHADER, + &vmw_user_shader_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + goto out_err; + } + + if (handle) + *handle = ushader->base.hash.key; +out_err: + vmw_resource_unreference(&res); +out: + return ret; +} + + +int vmw_shader_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct drm_vmw_shader_create_arg *arg = + (struct drm_vmw_shader_create_arg *)data; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_master *vmaster = vmw_master(file_priv->master); + struct vmw_dma_buffer *buffer = NULL; + SVGA3dShaderType shader_type; + int ret; + + if (arg->buffer_handle != SVGA3D_INVALID_ID) { + ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, + &buffer); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not find buffer for shader " + "creation.\n"); + return ret; + } + + if ((u64)buffer->base.num_pages * PAGE_SIZE < + (u64)arg->size + (u64)arg->offset) { + DRM_ERROR("Illegal buffer- or shader size.\n"); + ret = -EINVAL; + goto out_bad_arg; + } + } + + switch (arg->shader_type) { + case drm_vmw_shader_type_vs: + shader_type = SVGA3D_SHADERTYPE_VS; + break; + case drm_vmw_shader_type_ps: + shader_type = SVGA3D_SHADERTYPE_PS; + break; + case drm_vmw_shader_type_gs: + shader_type = SVGA3D_SHADERTYPE_GS; + break; + default: + DRM_ERROR("Illegal shader type.\n"); + ret = -EINVAL; + goto out_bad_arg; + } + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + goto out_bad_arg; + + ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, + shader_type, tfile, &arg->shader_handle); + + ttm_read_unlock(&vmaster->lock); +out_bad_arg: + vmw_dmabuf_unreference(&buffer); + return ret; +} + +/** + * vmw_compat_shader_lookup - Look up a compat shader + * + * @man: Pointer to the compat shader manager. + * @shader_type: The shader type, that combined with the user_key identifies + * the shader. + * @user_key: On entry, this should be a pointer to the user_key. + * On successful exit, it will contain the guest-backed shader's TTM handle. + * + * Returns 0 on success. Non-zero on failure, in which case the value pointed + * to by @user_key is unmodified. + */ +int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, + SVGA3dShaderType shader_type, + u32 *user_key) +{ + struct drm_hash_item *hash; + int ret; + unsigned long key = *user_key | (shader_type << 24); + + ret = drm_ht_find_item(&man->shaders, key, &hash); + if (unlikely(ret != 0)) + return ret; + + *user_key = drm_hash_entry(hash, struct vmw_compat_shader, + hash)->handle; + + return 0; +} + +/** + * vmw_compat_shader_free - Free a compat shader. + * + * @man: Pointer to the compat shader manager. + * @entry: Pointer to a struct vmw_compat_shader. + * + * Frees a struct vmw_compat_shder entry and drops its reference to the + * guest backed shader. + */ +static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man, + struct vmw_compat_shader *entry) +{ + list_del(&entry->head); + WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash)); + WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle, + TTM_REF_USAGE)); + kfree(entry); +} + +/** + * vmw_compat_shaders_commit - Commit a list of compat shader actions. + * + * @man: Pointer to the compat shader manager. + * @list: Caller's list of compat shader actions. + * + * This function commits a list of compat shader additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions has commited the fifo contents to the device. + */ +void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, + struct list_head *list) +{ + struct vmw_compat_shader *entry, *next; + + list_for_each_entry_safe(entry, next, list, head) { + list_del(&entry->head); + switch (entry->state) { + case VMW_COMPAT_ADD: + entry->state = VMW_COMPAT_COMMITED; + list_add_tail(&entry->head, &man->list); + break; + case VMW_COMPAT_DEL: + ttm_ref_object_base_unref(entry->tfile, entry->handle, + TTM_REF_USAGE); + kfree(entry); + break; + default: + BUG(); + break; + } + } +} + +/** + * vmw_compat_shaders_revert - Revert a list of compat shader actions + * + * @man: Pointer to the compat shader manager. + * @list: Caller's list of compat shader actions. + * + * This function reverts a list of compat shader additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions failed for some reason, and the command stream was never + * submitted. + */ +void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, + struct list_head *list) +{ + struct vmw_compat_shader *entry, *next; + int ret; + + list_for_each_entry_safe(entry, next, list, head) { + switch (entry->state) { + case VMW_COMPAT_ADD: + vmw_compat_shader_free(man, entry); + break; + case VMW_COMPAT_DEL: + ret = drm_ht_insert_item(&man->shaders, &entry->hash); + list_del(&entry->head); + list_add_tail(&entry->head, &man->list); + entry->state = VMW_COMPAT_COMMITED; + break; + default: + BUG(); + break; + } + } +} + +/** + * vmw_compat_shader_remove - Stage a compat shader for removal. + * + * @man: Pointer to the compat shader manager + * @user_key: The key that is used to identify the shader. The key is + * unique to the shader type. + * @shader_type: Shader type. + * @list: Caller's list of staged shader actions. + * + * This function stages a compat shader for removal and removes the key from + * the shader manager's hash table. If the shader was previously only staged + * for addition it is completely removed (But the execbuf code may keep a + * reference if it was bound to a context between addition and removal). If + * it was previously commited to the manager, it is staged for removal. + */ +int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, + u32 user_key, SVGA3dShaderType shader_type, + struct list_head *list) +{ + struct vmw_compat_shader *entry; + struct drm_hash_item *hash; + int ret; + + ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24), + &hash); + if (likely(ret != 0)) + return -EINVAL; + + entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); + + switch (entry->state) { + case VMW_COMPAT_ADD: + vmw_compat_shader_free(man, entry); + break; + case VMW_COMPAT_COMMITED: + (void) drm_ht_remove_item(&man->shaders, &entry->hash); + list_del(&entry->head); + entry->state = VMW_COMPAT_DEL; + list_add_tail(&entry->head, list); + break; + default: + BUG(); + break; + } + + return 0; +} + +/** + * vmw_compat_shader_add - Create a compat shader and add the + * key to the manager + * + * @man: Pointer to the compat shader manager + * @user_key: The key that is used to identify the shader. The key is + * unique to the shader type. + * @bytecode: Pointer to the bytecode of the shader. + * @shader_type: Shader type. + * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is + * to be created with. + * @list: Caller's list of staged shader actions. + * + * Note that only the key is added to the shader manager's hash table. + * The shader is not yet added to the shader manager's list of shaders. + */ +int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, + u32 user_key, const void *bytecode, + SVGA3dShaderType shader_type, + size_t size, + struct ttm_object_file *tfile, + struct list_head *list) +{ + struct vmw_dma_buffer *buf; + struct ttm_bo_kmap_obj map; + bool is_iomem; + struct vmw_compat_shader *compat; + u32 handle; + int ret; + + if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) + return -EINVAL; + + /* Allocate and pin a DMA buffer */ + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (unlikely(buf == NULL)) + return -ENOMEM; + + ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, + true, vmw_dmabuf_bo_free); + if (unlikely(ret != 0)) + goto out; + + ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); + if (unlikely(ret != 0)) + goto no_reserve; + + /* Map and copy shader bytecode. */ + ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, + &map); + if (unlikely(ret != 0)) { + ttm_bo_unreserve(&buf->base); + goto no_reserve; + } + + memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size); + WARN_ON(is_iomem); + + ttm_bo_kunmap(&map); + ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); + WARN_ON(ret != 0); + ttm_bo_unreserve(&buf->base); + + /* Create a guest-backed shader container backed by the dma buffer */ + ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type, + tfile, &handle); + vmw_dmabuf_unreference(&buf); + if (unlikely(ret != 0)) + goto no_reserve; + /* + * Create a compat shader structure and stage it for insertion + * in the manager + */ + compat = kzalloc(sizeof(*compat), GFP_KERNEL); + if (compat == NULL) + goto no_compat; + + compat->hash.key = user_key | (shader_type << 24); + ret = drm_ht_insert_item(&man->shaders, &compat->hash); + if (unlikely(ret != 0)) + goto out_invalid_key; + + compat->state = VMW_COMPAT_ADD; + compat->handle = handle; + compat->tfile = tfile; + list_add_tail(&compat->head, list); + + return 0; + +out_invalid_key: + kfree(compat); +no_compat: + ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); +no_reserve: +out: + return ret; +} + +/** + * vmw_compat_shader_man_create - Create a compat shader manager + * + * @dev_priv: Pointer to a device private structure. + * + * Typically done at file open time. If successful returns a pointer to a + * compat shader manager. Otherwise returns an error pointer. + */ +struct vmw_compat_shader_manager * +vmw_compat_shader_man_create(struct vmw_private *dev_priv) +{ + struct vmw_compat_shader_manager *man; + int ret; + + man = kzalloc(sizeof(*man), GFP_KERNEL); + if (man == NULL) + return ERR_PTR(-ENOMEM); + + man->dev_priv = dev_priv; + INIT_LIST_HEAD(&man->list); + ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER); + if (ret == 0) + return man; + + kfree(man); + return ERR_PTR(ret); +} + +/** + * vmw_compat_shader_man_destroy - Destroy a compat shader manager + * + * @man: Pointer to the shader manager to destroy. + * + * Typically done at file close time. + */ +void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) +{ + struct vmw_compat_shader *entry, *next; + + mutex_lock(&man->dev_priv->cmdbuf_mutex); + list_for_each_entry_safe(entry, next, &man->list, head) + vmw_compat_shader_free(man, entry); + + mutex_unlock(&man->dev_priv->cmdbuf_mutex); + kfree(man); +} --- linux-3.13.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ linux-3.13.0/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -41,7 +41,6 @@ struct ttm_prime_object prime; struct vmw_surface srf; uint32_t size; - uint32_t backup_handle; }; /** @@ -68,6 +67,14 @@ struct ttm_validate_buffer *val_buf); static int vmw_legacy_srf_create(struct vmw_resource *res); static int vmw_legacy_srf_destroy(struct vmw_resource *res); +static int vmw_gb_surface_create(struct vmw_resource *res); +static int vmw_gb_surface_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf); +static int vmw_gb_surface_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf); +static int vmw_gb_surface_destroy(struct vmw_resource *res); + static const struct vmw_user_resource_conv user_surface_conv = { .object_type = VMW_RES_SURFACE, @@ -93,6 +100,18 @@ .unbind = &vmw_legacy_srf_unbind }; +static const struct vmw_res_func vmw_gb_surface_func = { + .res_type = vmw_res_surface, + .needs_backup = true, + .may_evict = true, + .type_name = "guest backed surfaces", + .backup_placement = &vmw_mob_placement, + .create = vmw_gb_surface_create, + .destroy = vmw_gb_surface_destroy, + .bind = vmw_gb_surface_bind, + .unbind = vmw_gb_surface_unbind +}; + /** * struct vmw_surface_dma - SVGA3D DMA command */ @@ -291,6 +310,11 @@ struct vmw_surface *srf; void *cmd; + if (res->func->destroy == vmw_gb_surface_destroy) { + (void) vmw_gb_surface_destroy(res); + return; + } + if (res->id != -1) { cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); @@ -549,12 +573,15 @@ struct vmw_resource *res = &srf->res; BUG_ON(res_free == NULL); - (void) vmw_3d_resource_inc(dev_priv, false); + if (!dev_priv->has_mob) + (void) vmw_3d_resource_inc(dev_priv, false); ret = vmw_resource_init(dev_priv, res, true, res_free, + (dev_priv->has_mob) ? &vmw_gb_surface_func : &vmw_legacy_surface_func); if (unlikely(ret != 0)) { - vmw_3d_resource_dec(dev_priv, false); + if (!dev_priv->has_mob) + vmw_3d_resource_dec(dev_priv, false); res_free(res); return ret; } @@ -750,7 +777,7 @@ srf->base_size = *srf->sizes; srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; - srf->multisample_count = 1; + srf->multisample_count = 0; cur_bo_offset = 0; cur_offset = srf->offsets; @@ -803,6 +830,24 @@ if (unlikely(ret != 0)) goto out_unlock; + /* + * A gb-aware client referencing a shared surface will + * expect a backup buffer to be present. + */ + if (dev_priv->has_mob && req->shareable) { + uint32_t backup_handle; + + ret = vmw_user_dmabuf_alloc(dev_priv, tfile, + res->backup_size, + true, + &backup_handle, + &res->backup); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + } + tmp = vmw_resource_reference(&srf->res); ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, req->shareable, VMW_RES_SURFACE, @@ -843,6 +888,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vmw_private *dev_priv = vmw_priv(dev); union drm_vmw_surface_reference_arg *arg = (union drm_vmw_surface_reference_arg *)data; struct drm_vmw_surface_arg *req = &arg->req; @@ -854,7 +900,7 @@ struct ttm_base_object *base; int ret = -EINVAL; - base = ttm_base_object_lookup(tfile, req->sid); + base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); if (unlikely(base == NULL)) { DRM_ERROR("Could not find surface to reference.\n"); return -EINVAL; @@ -880,8 +926,8 @@ rep->size_addr; if (user_sizes) - ret = copy_to_user(user_sizes, srf->sizes, - srf->num_sizes * sizeof(*srf->sizes)); + ret = copy_to_user(user_sizes, &srf->base_size, + sizeof(srf->base_size)); if (unlikely(ret != 0)) { DRM_ERROR("copy_to_user failed %p %u\n", user_sizes, srf->num_sizes); @@ -892,4 +938,437 @@ ttm_base_object_unref(&base); return ret; +} + +/** + * vmw_surface_define_encode - Encode a surface_define command. + * + * @srf: Pointer to a struct vmw_surface object. + * @cmd_space: Pointer to memory area in which the commands should be encoded. + */ +static int vmw_gb_surface_create(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct vmw_surface *srf = vmw_res_to_srf(res); + uint32_t cmd_len, submit_len; + int ret; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDefineGBSurface body; + } *cmd; + + if (likely(res->id != -1)) + return 0; + + (void) vmw_3d_resource_inc(dev_priv, false); + ret = vmw_resource_alloc_id(res); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed to allocate a surface id.\n"); + goto out_no_id; + } + + if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { + ret = -EBUSY; + goto out_no_fifo; + } + + cmd_len = sizeof(cmd->body); + submit_len = sizeof(*cmd); + cmd = vmw_fifo_reserve(dev_priv, submit_len); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "creation.\n"); + ret = -ENOMEM; + goto out_no_fifo; + } + + cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; + cmd->header.size = cmd_len; + cmd->body.sid = srf->res.id; + cmd->body.surfaceFlags = srf->flags; + cmd->body.format = cpu_to_le32(srf->format); + cmd->body.numMipLevels = srf->mip_levels[0]; + cmd->body.multisampleCount = srf->multisample_count; + cmd->body.autogenFilter = srf->autogen_filter; + cmd->body.size.width = srf->base_size.width; + cmd->body.size.height = srf->base_size.height; + cmd->body.size.depth = srf->base_size.depth; + vmw_fifo_commit(dev_priv, submit_len); + + return 0; + +out_no_fifo: + vmw_resource_release_id(res); +out_no_id: + vmw_3d_resource_dec(dev_priv, false); + return ret; +} + + +static int vmw_gb_surface_bind(struct vmw_resource *res, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBSurface body; + } *cmd1; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdUpdateGBSurface body; + } *cmd2; + uint32_t submit_size; + struct ttm_buffer_object *bo = val_buf->bo; + + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + + submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); + + cmd1 = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd1 == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "binding.\n"); + return -ENOMEM; + } + + cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; + cmd1->header.size = sizeof(cmd1->body); + cmd1->body.sid = res->id; + cmd1->body.mobid = bo->mem.start; + if (res->backup_dirty) { + cmd2 = (void *) &cmd1[1]; + cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; + cmd2->header.size = sizeof(cmd2->body); + cmd2->body.sid = res->id; + res->backup_dirty = false; + } + vmw_fifo_commit(dev_priv, submit_size); + + return 0; +} + +static int vmw_gb_surface_unbind(struct vmw_resource *res, + bool readback, + struct ttm_validate_buffer *val_buf) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct ttm_buffer_object *bo = val_buf->bo; + struct vmw_fence_obj *fence; + + struct { + SVGA3dCmdHeader header; + SVGA3dCmdReadbackGBSurface body; + } *cmd1; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdInvalidateGBSurface body; + } *cmd2; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdBindGBSurface body; + } *cmd3; + uint32_t submit_size; + uint8_t *cmd; + + + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + + submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); + cmd = vmw_fifo_reserve(dev_priv, submit_size); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "unbinding.\n"); + return -ENOMEM; + } + + if (readback) { + cmd1 = (void *) cmd; + cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; + cmd1->header.size = sizeof(cmd1->body); + cmd1->body.sid = res->id; + cmd3 = (void *) &cmd1[1]; + } else { + cmd2 = (void *) cmd; + cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; + cmd2->header.size = sizeof(cmd2->body); + cmd2->body.sid = res->id; + cmd3 = (void *) &cmd2[1]; + } + + cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; + cmd3->header.size = sizeof(cmd3->body); + cmd3->body.sid = res->id; + cmd3->body.mobid = SVGA3D_INVALID_ID; + + vmw_fifo_commit(dev_priv, submit_size); + + /* + * Create a fence object and fence the backup buffer. + */ + + (void) vmw_execbuf_fence_commands(NULL, dev_priv, + &fence, NULL); + + vmw_fence_single_bo(val_buf->bo, fence); + + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); + + return 0; +} + +static int vmw_gb_surface_destroy(struct vmw_resource *res) +{ + struct vmw_private *dev_priv = res->dev_priv; + struct { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyGBSurface body; + } *cmd; + + if (likely(res->id == -1)) + return 0; + + mutex_lock(&dev_priv->binding_mutex); + vmw_context_binding_res_list_scrub(&res->binding_head); + + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for surface " + "destruction.\n"); + mutex_unlock(&dev_priv->binding_mutex); + return -ENOMEM; + } + + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; + cmd->header.size = sizeof(cmd->body); + cmd->body.sid = res->id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + mutex_unlock(&dev_priv->binding_mutex); + vmw_resource_release_id(res); + vmw_3d_resource_dec(dev_priv, false); + + return 0; +} + +/** + * vmw_gb_surface_define_ioctl - Ioctl function implementing + * the user surface define functionality. + * + * @dev: Pointer to a struct drm_device. + * @data: Pointer to data copied from / to user-space. + * @file_priv: Pointer to a drm file private structure. + */ +int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_user_surface *user_srf; + struct vmw_surface *srf; + struct vmw_resource *res; + struct vmw_resource *tmp; + union drm_vmw_gb_surface_create_arg *arg = + (union drm_vmw_gb_surface_create_arg *)data; + struct drm_vmw_gb_surface_create_req *req = &arg->req; + struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret; + uint32_t size; + struct vmw_master *vmaster = vmw_master(file_priv->master); + const struct svga3d_surface_desc *desc; + uint32_t backup_handle; + + if (unlikely(vmw_user_surface_size == 0)) + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + + 128; + + size = vmw_user_surface_size + 128; + + desc = svga3dsurface_get_desc(req->format); + if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { + DRM_ERROR("Invalid surface format for surface creation.\n"); + return -EINVAL; + } + + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + size, false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for surface" + " creation.\n"); + goto out_unlock; + } + + user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); + if (unlikely(user_srf == NULL)) { + ret = -ENOMEM; + goto out_no_user_srf; + } + + srf = &user_srf->srf; + res = &srf->res; + + srf->flags = req->svga3d_flags; + srf->format = req->format; + srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; + srf->mip_levels[0] = req->mip_levels; + srf->num_sizes = 1; + srf->sizes = NULL; + srf->offsets = NULL; + user_srf->size = size; + srf->base_size = req->base_size; + srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; + srf->multisample_count = req->multisample_count; + res->backup_size = svga3dsurface_get_serialized_size + (srf->format, srf->base_size, srf->mip_levels[0], + srf->flags & SVGA3D_SURFACE_CUBEMAP); + + user_srf->prime.base.shareable = false; + user_srf->prime.base.tfile = NULL; + + /** + * From this point, the generic resource management functions + * destroy the object on failure. + */ + + ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); + if (unlikely(ret != 0)) + goto out_unlock; + + if (req->buffer_handle != SVGA3D_INVALID_ID) { + ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, + &res->backup); + } else if (req->drm_surface_flags & + drm_vmw_surface_flag_create_buffer) + ret = vmw_user_dmabuf_alloc(dev_priv, tfile, + res->backup_size, + req->drm_surface_flags & + drm_vmw_surface_flag_shareable, + &backup_handle, + &res->backup); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + + tmp = vmw_resource_reference(&srf->res); + ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, + req->drm_surface_flags & + drm_vmw_surface_flag_shareable, + VMW_RES_SURFACE, + &vmw_user_surface_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + vmw_resource_unreference(&res); + goto out_unlock; + } + + rep->handle = user_srf->prime.base.hash.key; + rep->backup_size = res->backup_size; + if (res->backup) { + rep->buffer_map_handle = + drm_vma_node_offset_addr(&res->backup->base.vma_node); + rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; + rep->buffer_handle = backup_handle; + } else { + rep->buffer_map_handle = 0; + rep->buffer_size = 0; + rep->buffer_handle = SVGA3D_INVALID_ID; + } + + vmw_resource_unreference(&res); + + ttm_read_unlock(&vmaster->lock); + return 0; +out_no_user_srf: + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); +out_unlock: + ttm_read_unlock(&vmaster->lock); + return ret; +} + +/** + * vmw_gb_surface_reference_ioctl - Ioctl function implementing + * the user surface reference functionality. + * + * @dev: Pointer to a struct drm_device. + * @data: Pointer to data copied from / to user-space. + * @file_priv: Pointer to a drm file private structure. + */ +int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + union drm_vmw_gb_surface_reference_arg *arg = + (union drm_vmw_gb_surface_reference_arg *)data; + struct drm_vmw_surface_arg *req = &arg->req; + struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_surface *srf; + struct vmw_user_surface *user_srf; + struct ttm_base_object *base; + uint32_t backup_handle; + int ret = -EINVAL; + + base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid); + if (unlikely(base == NULL)) { + DRM_ERROR("Could not find surface to reference.\n"); + return -EINVAL; + } + + if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) + goto out_bad_resource; + + user_srf = container_of(base, struct vmw_user_surface, prime.base); + srf = &user_srf->srf; + if (srf->res.backup == NULL) { + DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); + goto out_bad_resource; + } + + ret = ttm_ref_object_add(tfile, &user_srf->prime.base, + TTM_REF_USAGE, NULL); + if (unlikely(ret != 0)) { + DRM_ERROR("Could not add a reference to a GB surface.\n"); + goto out_bad_resource; + } + + mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ + ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, + &backup_handle); + mutex_unlock(&dev_priv->cmdbuf_mutex); + + if (unlikely(ret != 0)) { + DRM_ERROR("Could not add a reference to a GB surface " + "backup buffer.\n"); + (void) ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, + req->sid, + TTM_REF_USAGE); + goto out_bad_resource; + } + + rep->creq.svga3d_flags = srf->flags; + rep->creq.format = srf->format; + rep->creq.mip_levels = srf->mip_levels[0]; + rep->creq.drm_surface_flags = 0; + rep->creq.multisample_count = srf->multisample_count; + rep->creq.autogen_filter = srf->autogen_filter; + rep->creq.buffer_handle = backup_handle; + rep->creq.base_size = srf->base_size; + rep->crep.handle = user_srf->prime.base.hash.key; + rep->crep.backup_size = srf->res.backup_size; + rep->crep.buffer_handle = backup_handle; + rep->crep.buffer_map_handle = + drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); + rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; + +out_bad_resource: + ttm_base_object_unref(&base); + + return ret; } --- linux-3.13.0.orig/drivers/gpu/vga/vga_switcheroo.c +++ linux-3.13.0/drivers/gpu/vga/vga_switcheroo.c @@ -623,7 +623,8 @@ ret = dev->bus->pm->runtime_suspend(dev); if (ret) return ret; - + if (vgasr_priv.handler->switchto) + vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD); vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF); return 0; } @@ -659,6 +660,12 @@ } EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops); +void vga_switcheroo_fini_domain_pm_ops(struct device *dev) +{ + dev->pm_domain = NULL; +} +EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops); + static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); --- linux-3.13.0.orig/drivers/gpu/vga/vgaarb.c +++ linux-3.13.0/drivers/gpu/vga/vgaarb.c @@ -41,6 +41,7 @@ #include #include #include +#include #include @@ -580,8 +581,11 @@ */ #ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE if (vga_default == NULL && - ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) + ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) { + pr_info("vgaarb: setting as boot device: PCI:%s\n", + pci_name(pdev)); vga_set_default_device(pdev); + } #endif vga_arbiter_check_bridge_sharing(vgadev); @@ -1316,6 +1320,38 @@ pr_info("vgaarb: loaded\n"); list_for_each_entry(vgadev, &vga_list, list) { +#if defined(CONFIG_X86) || defined(CONFIG_IA64) + /* Override I/O based detection done by vga_arbiter_add_pci_device() + * as it may take the wrong device (e.g. on Apple system under EFI). + * + * Select the device owning the boot framebuffer if there is one. + */ + resource_size_t start, end; + int i; + + /* Does firmware framebuffer belong to us? */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (!(pci_resource_flags(vgadev->pdev, i) & IORESOURCE_MEM)) + continue; + + start = pci_resource_start(vgadev->pdev, i); + end = pci_resource_end(vgadev->pdev, i); + + if (!start || !end) + continue; + + if (screen_info.lfb_base < start || + (screen_info.lfb_base + screen_info.lfb_size) >= end) + continue; + if (!vga_default_device()) + pr_info("vgaarb: setting as boot device: PCI:%s\n", + pci_name(vgadev->pdev)); + else if (vgadev->pdev != vga_default_device()) + pr_info("vgaarb: overriding boot device: PCI:%s\n", + pci_name(vgadev->pdev)); + vga_set_default_device(vgadev->pdev); + } +#endif if (vgadev->bridge_has_one_vga) pr_info("vgaarb: bridge control possible %s\n", pci_name(vgadev->pdev)); else --- linux-3.13.0.orig/drivers/hid/Kconfig +++ linux-3.13.0/drivers/hid/Kconfig @@ -27,7 +27,7 @@ config HID_BATTERY_STRENGTH bool "Battery level reporting for HID devices" - depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY + depends on HID && POWER_SUPPLY default n ---help--- This option adds support of reporting battery strength (for HID devices @@ -645,6 +645,14 @@ ---help--- Support for Sunplus wireless desktop. +config HID_RMI + tristate "Synaptics RMI4 device support" + depends on HID + ---help--- + Support for Synaptics RMI4 touchpads. + Say Y here if you have a Synaptics RMI4 touchpads over i2c-hid or usbhid + and want support for its special functionalities. + config HID_GREENASIA tristate "GreenAsia (Product ID 0x12) game controller support" depends on HID --- linux-3.13.0.orig/drivers/hid/Makefile +++ linux-3.13.0/drivers/hid/Makefile @@ -96,6 +96,7 @@ hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \ hid-roccat-koneplus.o hid-roccat-konepure.o hid-roccat-kovaplus.o \ hid-roccat-lua.o hid-roccat-pyra.o hid-roccat-ryos.o hid-roccat-savu.o +obj-$(CONFIG_HID_RMI) += hid-rmi.o obj-$(CONFIG_HID_SAITEK) += hid-saitek.o obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o --- linux-3.13.0.orig/drivers/hid/hid-cherry.c +++ linux-3.13.0/drivers/hid/hid-cherry.c @@ -28,7 +28,7 @@ static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { + if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; --- linux-3.13.0.orig/drivers/hid/hid-core.c +++ linux-3.13.0/drivers/hid/hid-core.c @@ -718,6 +718,9 @@ case HID_MAIN_ITEM_TAG_END_COLLECTION: break; case HID_MAIN_ITEM_TAG_INPUT: + /* ignore constant inputs, they will be ignored by hid-input */ + if (data & HID_MAIN_ITEM_CONSTANT) + break; for (i = 0; i < parser->local.usage_index; i++) hid_scan_input_usage(parser, parser->local.usage[i]); break; @@ -776,6 +779,14 @@ (hid->group == HID_GROUP_MULTITOUCH)) hid->group = HID_GROUP_MULTITOUCH_WIN_8; + /* + * Vendor specific handlings + */ + if ((hid->vendor == USB_VENDOR_ID_SYNAPTICS) && + (hid->group == HID_GROUP_GENERIC)) + /* hid-rmi should take care of them, not hid-generic */ + hid->group = HID_GROUP_RMI; + vfree(parser); return 0; } @@ -839,7 +850,17 @@ * ->numbered being checked, which may not always be the case when * drivers go to access report values. */ - report = hid->report_enum[type].report_id_hash[id]; + if (id == 0) { + /* + * Validating on id 0 means we should examine the first + * report in the list. + */ + report = list_entry( + hid->report_enum[type].report_list.next, + struct hid_report, list); + } else { + report = hid->report_enum[type].report_id_hash[id]; + } if (!report) { hid_err(hid, "missing %s %u\n", hid_report_names[type], id); return NULL; --- linux-3.13.0.orig/drivers/hid/hid-ids.h +++ linux-3.13.0/drivers/hid/hid-ids.h @@ -312,6 +312,7 @@ #define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 #define USB_DEVICE_ID_ETURBOTOUCH 0x0006 +#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968 #define USB_VENDOR_ID_EZKEY 0x0518 #define USB_DEVICE_ID_BTC_8193 0x0002 @@ -809,6 +810,7 @@ #define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 #define USB_DEVICE_ID_SYNAPTICS_LTS1 0x0af8 #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 +#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 #define USB_VENDOR_ID_THINGM 0x27b8 #define USB_DEVICE_ID_BLINK1 0x01ed --- linux-3.13.0.orig/drivers/hid/hid-input.c +++ linux-3.13.0/drivers/hid/hid-input.c @@ -350,9 +350,9 @@ ret = -ENOMEM; break; } - ret = dev->hid_get_raw_report(dev, dev->battery_report_id, - buf, 2, - dev->battery_report_type); + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, + dev->battery_report_type, + HID_REQ_GET_REPORT); if (ret != 2) { ret = -ENODATA; @@ -1063,6 +1063,23 @@ return; } + /* + * Ignore reports for absolute data if the data didn't change. This is + * not only an optimization but also fixes 'dead' key reports. Some + * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID + * 0x31 and 0x32) report multiple keys, even though a localized keyboard + * can only have one of them physically available. The 'dead' keys + * report constant 0. As all map to the same keycode, they'd confuse + * the input layer. If we filter the 'dead' keys on the HID level, we + * skip the keycode translation and only forward real events. + */ + if (!(field->flags & (HID_MAIN_ITEM_RELATIVE | + HID_MAIN_ITEM_BUFFERED_BYTE)) && + (field->flags & HID_MAIN_ITEM_VARIABLE) && + usage->usage_index < field->maxusage && + value == field->value[usage->usage_index]) + return; + /* report the usage code as scancode if the key status has changed */ if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value) input_event(input, EV_MSC, MSC_SCAN, usage->hid); --- linux-3.13.0.orig/drivers/hid/hid-kye.c +++ linux-3.13.0/drivers/hid/hid-kye.c @@ -300,7 +300,7 @@ * - change the button usage range to 4-7 for the extra * buttons */ - if (*rsize >= 74 && + if (*rsize >= 75 && rdesc[61] == 0x05 && rdesc[62] == 0x08 && rdesc[63] == 0x19 && rdesc[64] == 0x08 && rdesc[65] == 0x29 && rdesc[66] == 0x0f && --- linux-3.13.0.orig/drivers/hid/hid-lg.c +++ linux-3.13.0/drivers/hid/hid-lg.c @@ -345,14 +345,14 @@ struct usb_device_descriptor *udesc; __u16 bcdDevice, rev_maj, rev_min; - if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 && + if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { hid_info(hdev, "fixing up Logitech keyboard report descriptor\n"); rdesc[84] = rdesc[89] = 0x4d; rdesc[85] = rdesc[90] = 0x10; } - if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 && + if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 && rdesc[32] == 0x81 && rdesc[33] == 0x06 && rdesc[49] == 0x81 && rdesc[50] == 0x06) { hid_info(hdev, --- linux-3.13.0.orig/drivers/hid/hid-logitech-dj.c +++ linux-3.13.0/drivers/hid/hid-logitech-dj.c @@ -237,13 +237,6 @@ return; } - if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || - (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { - dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n", - __func__, dj_report->device_index); - return; - } - if (djrcv_dev->paired_dj_devices[dj_report->device_index]) { /* The device is already known. No need to reallocate it. */ dbg_hid("%s: device is already known\n", __func__); @@ -686,7 +679,6 @@ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); struct dj_report *dj_report = (struct dj_report *) data; unsigned long flags; - bool report_processed = false; dbg_hid("%s, size:%d\n", __func__, size); @@ -714,27 +706,41 @@ * anything else with it. */ + /* case 1) */ + if (data[0] != REPORT_ID_DJ_SHORT) + return false; + + if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || + (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { + /* + * Device index is wrong, bail out. + * This driver can ignore safely the receiver notifications, + * so ignore those reports too. + */ + if (dj_report->device_index != DJ_RECEIVER_INDEX) + dev_err(&hdev->dev, "%s: invalid device index:%d\n", + __func__, dj_report->device_index); + return false; + } + spin_lock_irqsave(&djrcv_dev->lock, flags); - if (dj_report->report_id == REPORT_ID_DJ_SHORT) { - switch (dj_report->report_type) { - case REPORT_TYPE_NOTIF_DEVICE_PAIRED: - case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: - logi_dj_recv_queue_notification(djrcv_dev, dj_report); - break; - case REPORT_TYPE_NOTIF_CONNECTION_STATUS: - if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == - STATUS_LINKLOSS) { - logi_dj_recv_forward_null_report(djrcv_dev, dj_report); - } - break; - default: - logi_dj_recv_forward_report(djrcv_dev, dj_report); + switch (dj_report->report_type) { + case REPORT_TYPE_NOTIF_DEVICE_PAIRED: + case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: + logi_dj_recv_queue_notification(djrcv_dev, dj_report); + break; + case REPORT_TYPE_NOTIF_CONNECTION_STATUS: + if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == + STATUS_LINKLOSS) { + logi_dj_recv_forward_null_report(djrcv_dev, dj_report); } - report_processed = true; + break; + default: + logi_dj_recv_forward_report(djrcv_dev, dj_report); } spin_unlock_irqrestore(&djrcv_dev->lock, flags); - return report_processed; + return true; } static int logi_dj_probe(struct hid_device *hdev, --- linux-3.13.0.orig/drivers/hid/hid-logitech-dj.h +++ linux-3.13.0/drivers/hid/hid-logitech-dj.h @@ -27,6 +27,7 @@ #define DJ_MAX_PAIRED_DEVICES 6 #define DJ_MAX_NUMBER_NOTIFICATIONS 8 +#define DJ_RECEIVER_INDEX 0 #define DJ_DEVICE_INDEX_MIN 1 #define DJ_DEVICE_INDEX_MAX 6 --- linux-3.13.0.orig/drivers/hid/hid-magicmouse.c +++ linux-3.13.0/drivers/hid/hid-magicmouse.c @@ -290,6 +290,11 @@ if (size < 4 || ((size - 4) % 9) != 0) return 0; npoints = (size - 4) / 9; + if (npoints > 15) { + hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", + size); + return 0; + } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); @@ -307,6 +312,11 @@ if (size < 6 || ((size - 6) % 8) != 0) return 0; npoints = (size - 6) / 8; + if (npoints > 15) { + hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", + size); + return 0; + } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); --- linux-3.13.0.orig/drivers/hid/hid-monterey.c +++ linux-3.13.0/drivers/hid/hid-monterey.c @@ -24,7 +24,7 @@ static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { + if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } --- linux-3.13.0.orig/drivers/hid/hid-multitouch.c +++ linux-3.13.0/drivers/hid/hid-multitouch.c @@ -111,6 +111,7 @@ __u8 touches_by_report; /* how many touches are present in one report: * 1 means we should use a serial protocol * > 1 means hybrid (multitouch) protocol */ + __u8 buttons_count; /* number of physical buttons per touchpad */ bool serial_maybe; /* need to check for serial protocol */ bool curvalid; /* is the current contact valid? */ unsigned mt_flags; /* flags to pass to input-mt */ @@ -418,6 +419,10 @@ (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) td->mt_flags |= INPUT_MT_POINTER; + /* count the buttons on touchpads */ + if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) + td->buttons_count++; + if (usage->usage_index) prev_usage = &field->usage[usage->usage_index - 1]; @@ -767,6 +772,10 @@ if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) td->mt_flags |= INPUT_MT_DROP_UNUSED; + /* check for clickpads */ + if ((td->mt_flags & INPUT_MT_POINTER) && (td->buttons_count == 1)) + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + input_mt_init_slots(input, td->maxcontacts, td->mt_flags); td->mt_flags = 0; --- linux-3.13.0.orig/drivers/hid/hid-petalynx.c +++ linux-3.13.0/drivers/hid/hid-petalynx.c @@ -25,7 +25,7 @@ static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && + if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && rdesc[41] == 0x00 && rdesc[59] == 0x26 && rdesc[60] == 0xf9 && rdesc[61] == 0x00) { hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n"); --- linux-3.13.0.orig/drivers/hid/hid-picolcd_core.c +++ linux-3.13.0/drivers/hid/hid-picolcd_core.c @@ -350,6 +350,12 @@ if (!data) return 1; + if (size > 64) { + hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n", + size); + return 0; + } + if (report->id == REPORT_KEY_STATE) { if (data->input_keys) ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); --- linux-3.13.0.orig/drivers/hid/hid-rmi.c +++ linux-3.13.0/drivers/hid/hid-rmi.c @@ -0,0 +1,920 @@ +/* + * Copyright (c) 2013 Andrew Duggan + * Copyright (c) 2013 Synaptics Incorporated + * Copyright (c) 2014 Benjamin Tissoires + * Copyright (c) 2014 Red Hat, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hid-ids.h" + +#define RMI_MOUSE_REPORT_ID 0x01 /* Mouse emulation Report */ +#define RMI_WRITE_REPORT_ID 0x09 /* Output Report */ +#define RMI_READ_ADDR_REPORT_ID 0x0a /* Output Report */ +#define RMI_READ_DATA_REPORT_ID 0x0b /* Input Report */ +#define RMI_ATTN_REPORT_ID 0x0c /* Input Report */ +#define RMI_SET_RMI_MODE_REPORT_ID 0x0f /* Feature Report */ + +/* flags */ +#define RMI_READ_REQUEST_PENDING BIT(0) +#define RMI_READ_DATA_PENDING BIT(1) +#define RMI_STARTED BIT(2) + +enum rmi_mode_type { + RMI_MODE_OFF = 0, + RMI_MODE_ATTN_REPORTS = 1, + RMI_MODE_NO_PACKED_ATTN_REPORTS = 2, +}; + +struct rmi_function { + unsigned page; /* page of the function */ + u16 query_base_addr; /* base address for queries */ + u16 command_base_addr; /* base address for commands */ + u16 control_base_addr; /* base address for controls */ + u16 data_base_addr; /* base address for datas */ + unsigned int interrupt_base; /* cross-function interrupt number + * (uniq in the device)*/ + unsigned int interrupt_count; /* number of interrupts */ + unsigned int report_size; /* size of a report */ + unsigned long irq_mask; /* mask of the interrupts + * (to be applied against ATTN IRQ) */ +}; + +/** + * struct rmi_data - stores information for hid communication + * + * @page_mutex: Locks current page to avoid changing pages in unexpected ways. + * @page: Keeps track of the current virtual page + * + * @wait: Used for waiting for read data + * + * @writeReport: output buffer when writing RMI registers + * @readReport: input buffer when reading RMI registers + * + * @input_report_size: size of an input report (advertised by HID) + * @output_report_size: size of an output report (advertised by HID) + * + * @flags: flags for the current device (started, reading, etc...) + * + * @f11: placeholder of internal RMI function F11 description + * @f30: placeholder of internal RMI function F30 description + * + * @max_fingers: maximum finger count reported by the device + * @max_x: maximum x value reported by the device + * @max_y: maximum y value reported by the device + * + * @gpio_led_count: count of GPIOs + LEDs reported by F30 + * @button_count: actual physical buttons count + * @button_mask: button mask used to decode GPIO ATTN reports + * @button_state_mask: pull state of the buttons + * + * @input: pointer to the kernel input device + * + * @reset_work: worker which will be called in case of a mouse report + * @hdev: pointer to the struct hid_device + */ +struct rmi_data { + struct mutex page_mutex; + int page; + + wait_queue_head_t wait; + + u8 *writeReport; + u8 *readReport; + + int input_report_size; + int output_report_size; + + unsigned long flags; + + struct rmi_function f11; + struct rmi_function f30; + + unsigned int max_fingers; + unsigned int max_x; + unsigned int max_y; + unsigned int x_size_mm; + unsigned int y_size_mm; + + unsigned int gpio_led_count; + unsigned int button_count; + unsigned long button_mask; + unsigned long button_state_mask; + + struct input_dev *input; + + struct work_struct reset_work; + struct hid_device *hdev; +}; + +#define RMI_PAGE(addr) (((addr) >> 8) & 0xff) + +static int rmi_write_report(struct hid_device *hdev, u8 *report, int len); + +/** + * rmi_set_page - Set RMI page + * @hdev: The pointer to the hid_device struct + * @page: The new page address. + * + * RMI devices have 16-bit addressing, but some of the physical + * implementations (like SMBus) only have 8-bit addressing. So RMI implements + * a page address at 0xff of every page so we can reliable page addresses + * every 256 registers. + * + * The page_mutex lock must be held when this function is entered. + * + * Returns zero on success, non-zero on failure. + */ +static int rmi_set_page(struct hid_device *hdev, u8 page) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + int retval; + + data->writeReport[0] = RMI_WRITE_REPORT_ID; + data->writeReport[1] = 1; + data->writeReport[2] = 0xFF; + data->writeReport[4] = page; + + retval = rmi_write_report(hdev, data->writeReport, + data->output_report_size); + if (retval != data->output_report_size) { + dev_err(&hdev->dev, + "%s: set page failed: %d.", __func__, retval); + return retval; + } + + data->page = page; + return 0; +} + +static int rmi_set_mode(struct hid_device *hdev, u8 mode) +{ + int ret; + u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; + + ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf, + sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + if (ret < 0) { + dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode, + ret); + return ret; + } + + return 0; +} + +static int rmi_write_report(struct hid_device *hdev, u8 *report, int len) +{ + int ret; + + ret = hid_hw_output_report(hdev, (void *)report, len); + if (ret < 0) { + dev_err(&hdev->dev, "failed to write hid report (%d)\n", ret); + return ret; + } + + return ret; +} + +static int rmi_read_block(struct hid_device *hdev, u16 addr, void *buf, + const int len) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + int ret; + int bytes_read; + int bytes_needed; + int retries; + int read_input_count; + + mutex_lock(&data->page_mutex); + + if (RMI_PAGE(addr) != data->page) { + ret = rmi_set_page(hdev, RMI_PAGE(addr)); + if (ret < 0) + goto exit; + } + + for (retries = 5; retries > 0; retries--) { + data->writeReport[0] = RMI_READ_ADDR_REPORT_ID; + data->writeReport[1] = 0; /* old 1 byte read count */ + data->writeReport[2] = addr & 0xFF; + data->writeReport[3] = (addr >> 8) & 0xFF; + data->writeReport[4] = len & 0xFF; + data->writeReport[5] = (len >> 8) & 0xFF; + + set_bit(RMI_READ_REQUEST_PENDING, &data->flags); + + ret = rmi_write_report(hdev, data->writeReport, + data->output_report_size); + if (ret != data->output_report_size) { + clear_bit(RMI_READ_REQUEST_PENDING, &data->flags); + dev_err(&hdev->dev, + "failed to write request output report (%d)\n", + ret); + goto exit; + } + + bytes_read = 0; + bytes_needed = len; + while (bytes_read < len) { + if (!wait_event_timeout(data->wait, + test_bit(RMI_READ_DATA_PENDING, &data->flags), + msecs_to_jiffies(1000))) { + hid_warn(hdev, "%s: timeout elapsed\n", + __func__); + ret = -EAGAIN; + break; + } + + read_input_count = data->readReport[1]; + memcpy(buf + bytes_read, &data->readReport[2], + read_input_count < bytes_needed ? + read_input_count : bytes_needed); + + bytes_read += read_input_count; + bytes_needed -= read_input_count; + clear_bit(RMI_READ_DATA_PENDING, &data->flags); + } + + if (ret >= 0) { + ret = 0; + break; + } + } + +exit: + clear_bit(RMI_READ_REQUEST_PENDING, &data->flags); + mutex_unlock(&data->page_mutex); + return ret; +} + +static inline int rmi_read(struct hid_device *hdev, u16 addr, void *buf) +{ + return rmi_read_block(hdev, addr, buf, 1); +} + +static void rmi_f11_process_touch(struct rmi_data *hdata, int slot, + u8 finger_state, u8 *touch_data) +{ + int x, y, wx, wy; + int wide, major, minor; + int z; + + input_mt_slot(hdata->input, slot); + input_mt_report_slot_state(hdata->input, MT_TOOL_FINGER, + finger_state == 0x01); + if (finger_state == 0x01) { + x = (touch_data[0] << 4) | (touch_data[2] & 0x0F); + y = (touch_data[1] << 4) | (touch_data[2] >> 4); + wx = touch_data[3] & 0x0F; + wy = touch_data[3] >> 4; + wide = (wx > wy); + major = max(wx, wy); + minor = min(wx, wy); + z = touch_data[4]; + + /* y is inverted */ + y = hdata->max_y - y; + + input_event(hdata->input, EV_ABS, ABS_MT_POSITION_X, x); + input_event(hdata->input, EV_ABS, ABS_MT_POSITION_Y, y); + input_event(hdata->input, EV_ABS, ABS_MT_ORIENTATION, wide); + input_event(hdata->input, EV_ABS, ABS_MT_PRESSURE, z); + input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MAJOR, major); + input_event(hdata->input, EV_ABS, ABS_MT_TOUCH_MINOR, minor); + } +} + +static void rmi_reset_work(struct work_struct *work) +{ + struct rmi_data *hdata = container_of(work, struct rmi_data, + reset_work); + + /* switch the device to RMI if we receive a generic mouse report */ + rmi_set_mode(hdata->hdev, RMI_MODE_ATTN_REPORTS); +} + +static inline int rmi_schedule_reset(struct hid_device *hdev) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + return schedule_work(&hdata->reset_work); +} + +static int rmi_f11_input_event(struct hid_device *hdev, u8 irq, u8 *data, + int size) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + int offset; + int i; + + if (size < hdata->f11.report_size) + return 0; + + if (!(irq & hdata->f11.irq_mask)) + return 0; + + offset = (hdata->max_fingers >> 2) + 1; + for (i = 0; i < hdata->max_fingers; i++) { + int fs_byte_position = i >> 2; + int fs_bit_position = (i & 0x3) << 1; + int finger_state = (data[fs_byte_position] >> fs_bit_position) & + 0x03; + + rmi_f11_process_touch(hdata, i, finger_state, + &data[offset + 5 * i]); + } + input_mt_sync_frame(hdata->input); + input_sync(hdata->input); + return hdata->f11.report_size; +} + +static int rmi_f30_input_event(struct hid_device *hdev, u8 irq, u8 *data, + int size) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + int i; + int button = 0; + bool value; + + if (!(irq & hdata->f30.irq_mask)) + return 0; + + for (i = 0; i < hdata->gpio_led_count; i++) { + if (test_bit(i, &hdata->button_mask)) { + value = (data[i / 8] >> (i & 0x07)) & BIT(0); + if (test_bit(i, &hdata->button_state_mask)) + value = !value; + input_event(hdata->input, EV_KEY, BTN_LEFT + button++, + value); + } + } + return hdata->f30.report_size; +} + +static int rmi_input_event(struct hid_device *hdev, u8 *data, int size) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + unsigned long irq_mask = 0; + unsigned index = 2; + + if (!(test_bit(RMI_STARTED, &hdata->flags))) + return 0; + + irq_mask |= hdata->f11.irq_mask; + irq_mask |= hdata->f30.irq_mask; + + if (data[1] & ~irq_mask) + hid_warn(hdev, "unknown intr source:%02lx %s:%d\n", + data[1] & ~irq_mask, __FILE__, __LINE__); + + if (hdata->f11.interrupt_base < hdata->f30.interrupt_base) { + index += rmi_f11_input_event(hdev, data[1], &data[index], + size - index); + index += rmi_f30_input_event(hdev, data[1], &data[index], + size - index); + } else { + index += rmi_f30_input_event(hdev, data[1], &data[index], + size - index); + index += rmi_f11_input_event(hdev, data[1], &data[index], + size - index); + } + + return 1; +} + +static int rmi_read_data_event(struct hid_device *hdev, u8 *data, int size) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + + if (!test_bit(RMI_READ_REQUEST_PENDING, &hdata->flags)) { + hid_err(hdev, "no read request pending\n"); + return 0; + } + + memcpy(hdata->readReport, data, size < hdata->input_report_size ? + size : hdata->input_report_size); + set_bit(RMI_READ_DATA_PENDING, &hdata->flags); + wake_up(&hdata->wait); + + return 1; +} + +static int rmi_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + switch (data[0]) { + case RMI_READ_DATA_REPORT_ID: + return rmi_read_data_event(hdev, data, size); + case RMI_ATTN_REPORT_ID: + return rmi_input_event(hdev, data, size); + case RMI_MOUSE_REPORT_ID: + rmi_schedule_reset(hdev); + break; + } + + return 0; +} + +static int rmi_post_reset(struct hid_device *hdev) +{ + return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); +} + +static int rmi_post_resume(struct hid_device *hdev) +{ + return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); +} + +#define RMI4_MAX_PAGE 0xff +#define RMI4_PAGE_SIZE 0x0100 + +#define PDT_START_SCAN_LOCATION 0x00e9 +#define PDT_END_SCAN_LOCATION 0x0005 +#define RMI4_END_OF_PDT(id) ((id) == 0x00 || (id) == 0xff) + +struct pdt_entry { + u8 query_base_addr:8; + u8 command_base_addr:8; + u8 control_base_addr:8; + u8 data_base_addr:8; + u8 interrupt_source_count:3; + u8 bits3and4:2; + u8 function_version:2; + u8 bit7:1; + u8 function_number:8; +} __attribute__((__packed__)); + +static inline unsigned long rmi_gen_mask(unsigned irq_base, unsigned irq_count) +{ + return GENMASK(irq_count + irq_base - 1, irq_base); +} + +static void rmi_register_function(struct rmi_data *data, + struct pdt_entry *pdt_entry, int page, unsigned interrupt_count) +{ + struct rmi_function *f = NULL; + u16 page_base = page << 8; + + switch (pdt_entry->function_number) { + case 0x11: + f = &data->f11; + break; + case 0x30: + f = &data->f30; + break; + } + + if (f) { + f->page = page; + f->query_base_addr = page_base | pdt_entry->query_base_addr; + f->command_base_addr = page_base | pdt_entry->command_base_addr; + f->control_base_addr = page_base | pdt_entry->control_base_addr; + f->data_base_addr = page_base | pdt_entry->data_base_addr; + f->interrupt_base = interrupt_count; + f->interrupt_count = pdt_entry->interrupt_source_count; + f->irq_mask = rmi_gen_mask(f->interrupt_base, + f->interrupt_count); + } +} + +static int rmi_scan_pdt(struct hid_device *hdev) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + struct pdt_entry entry; + int page; + bool page_has_function; + int i; + int retval; + int interrupt = 0; + u16 page_start, pdt_start , pdt_end; + + hid_info(hdev, "Scanning PDT...\n"); + + for (page = 0; (page <= RMI4_MAX_PAGE); page++) { + page_start = RMI4_PAGE_SIZE * page; + pdt_start = page_start + PDT_START_SCAN_LOCATION; + pdt_end = page_start + PDT_END_SCAN_LOCATION; + + page_has_function = false; + for (i = pdt_start; i >= pdt_end; i -= sizeof(entry)) { + retval = rmi_read_block(hdev, i, &entry, sizeof(entry)); + if (retval) { + hid_err(hdev, + "Read of PDT entry at %#06x failed.\n", + i); + goto error_exit; + } + + if (RMI4_END_OF_PDT(entry.function_number)) + break; + + page_has_function = true; + + hid_info(hdev, "Found F%02X on page %#04x\n", + entry.function_number, page); + + rmi_register_function(data, &entry, page, interrupt); + interrupt += entry.interrupt_source_count; + } + + if (!page_has_function) + break; + } + + hid_info(hdev, "%s: Done with PDT scan.\n", __func__); + retval = 0; + +error_exit: + return retval; +} + +static int rmi_populate_f11(struct hid_device *hdev) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + u8 buf[20]; + int ret; + bool has_query9; + bool has_query10; + bool has_query11; + bool has_query12; + bool has_physical_props; + unsigned x_size, y_size; + u16 query12_offset; + + if (!data->f11.query_base_addr) { + hid_err(hdev, "No 2D sensor found, giving up.\n"); + return -ENODEV; + } + + /* query 0 contains some useful information */ + ret = rmi_read(hdev, data->f11.query_base_addr, buf); + if (ret) { + hid_err(hdev, "can not get query 0: %d.\n", ret); + return ret; + } + has_query9 = !!(buf[0] & BIT(3)); + has_query11 = !!(buf[0] & BIT(4)); + has_query12 = !!(buf[0] & BIT(5)); + + /* query 1 to get the max number of fingers */ + ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf); + if (ret) { + hid_err(hdev, "can not get NumberOfFingers: %d.\n", ret); + return ret; + } + data->max_fingers = (buf[0] & 0x07) + 1; + if (data->max_fingers > 5) + data->max_fingers = 10; + + data->f11.report_size = data->max_fingers * 5 + + DIV_ROUND_UP(data->max_fingers, 4); + + if (!(buf[0] & BIT(4))) { + hid_err(hdev, "No absolute events, giving up.\n"); + return -ENODEV; + } + + /* query 8 to find out if query 10 exists */ + ret = rmi_read(hdev, data->f11.query_base_addr + 8, buf); + if (ret) { + hid_err(hdev, "can not read gesture information: %d.\n", ret); + return ret; + } + has_query10 = !!(buf[0] & BIT(2)); + + /* + * At least 8 queries are guaranteed to be present in F11 + * +1 for query12. + */ + query12_offset = 9; + + if (has_query9) + ++query12_offset; + + if (has_query10) + ++query12_offset; + + if (has_query11) + ++query12_offset; + + /* query 12 to know if the physical properties are reported */ + if (has_query12) { + ret = rmi_read(hdev, data->f11.query_base_addr + + query12_offset, buf); + if (ret) { + hid_err(hdev, "can not get query 12: %d.\n", ret); + return ret; + } + has_physical_props = !!(buf[0] & BIT(5)); + + if (has_physical_props) { + ret = rmi_read_block(hdev, + data->f11.query_base_addr + + query12_offset + 1, buf, 4); + if (ret) { + hid_err(hdev, "can not read query 15-18: %d.\n", + ret); + return ret; + } + + x_size = buf[0] | (buf[1] << 8); + y_size = buf[2] | (buf[3] << 8); + + data->x_size_mm = DIV_ROUND_CLOSEST(x_size, 10); + data->y_size_mm = DIV_ROUND_CLOSEST(y_size, 10); + + hid_info(hdev, "%s: size in mm: %d x %d\n", + __func__, data->x_size_mm, data->y_size_mm); + } + } + + /* + * retrieve the ctrl registers + * the ctrl register has a size of 20 but a fw bug split it into 16 + 4, + * and there is no way to know if the first 20 bytes are here or not. + * We use only the first 10 bytes, so get only them. + */ + ret = rmi_read_block(hdev, data->f11.control_base_addr, buf, 10); + if (ret) { + hid_err(hdev, "can not read ctrl block of size 10: %d.\n", ret); + return ret; + } + + data->max_x = buf[6] | (buf[7] << 8); + data->max_y = buf[8] | (buf[9] << 8); + + return 0; +} + +static int rmi_populate_f30(struct hid_device *hdev) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + u8 buf[20]; + int ret; + bool has_gpio, has_led; + unsigned bytes_per_ctrl; + u8 ctrl2_addr; + int ctrl2_3_length; + int i; + + /* function F30 is for physical buttons */ + if (!data->f30.query_base_addr) { + hid_err(hdev, "No GPIO/LEDs found, giving up.\n"); + return -ENODEV; + } + + ret = rmi_read_block(hdev, data->f30.query_base_addr, buf, 2); + if (ret) { + hid_err(hdev, "can not get F30 query registers: %d.\n", ret); + return ret; + } + + has_gpio = !!(buf[0] & BIT(3)); + has_led = !!(buf[0] & BIT(2)); + data->gpio_led_count = buf[1] & 0x1f; + + /* retrieve ctrl 2 & 3 registers */ + bytes_per_ctrl = (data->gpio_led_count + 7) / 8; + /* Ctrl0 is present only if both has_gpio and has_led are set*/ + ctrl2_addr = (has_gpio && has_led) ? bytes_per_ctrl : 0; + /* Ctrl1 is always be present */ + ctrl2_addr += bytes_per_ctrl; + ctrl2_3_length = 2 * bytes_per_ctrl; + + data->f30.report_size = bytes_per_ctrl; + + ret = rmi_read_block(hdev, data->f30.control_base_addr + ctrl2_addr, + buf, ctrl2_3_length); + if (ret) { + hid_err(hdev, "can not read ctrl 2&3 block of size %d: %d.\n", + ctrl2_3_length, ret); + return ret; + } + + for (i = 0; i < data->gpio_led_count; i++) { + int byte_position = i >> 3; + int bit_position = i & 0x07; + u8 dir_byte = buf[byte_position]; + u8 data_byte = buf[byte_position + bytes_per_ctrl]; + bool dir = (dir_byte >> bit_position) & BIT(0); + bool dat = (data_byte >> bit_position) & BIT(0); + + if (dir == 0) { + /* input mode */ + if (dat) { + /* actual buttons have pull up resistor */ + data->button_count++; + set_bit(i, &data->button_mask); + set_bit(i, &data->button_state_mask); + } + } + + } + + return 0; +} + +static int rmi_populate(struct hid_device *hdev) +{ + int ret; + + ret = rmi_scan_pdt(hdev); + if (ret) { + hid_err(hdev, "PDT scan failed with code %d.\n", ret); + return ret; + } + + ret = rmi_populate_f11(hdev); + if (ret) { + hid_err(hdev, "Error while initializing F11 (%d).\n", ret); + return ret; + } + + ret = rmi_populate_f30(hdev); + if (ret) + hid_warn(hdev, "Error while initializing F30 (%d).\n", ret); + + return 0; +} + +static void rmi_input_configured(struct hid_device *hdev, struct hid_input *hi) +{ + struct rmi_data *data = hid_get_drvdata(hdev); + struct input_dev *input = hi->input; + int ret; + int res_x, res_y, i; + + data->input = input; + + hid_dbg(hdev, "Opening low level driver\n"); + ret = hid_hw_open(hdev); + if (ret) + return; + + /* Allow incoming hid reports */ + hid_device_io_start(hdev); + + ret = rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); + if (ret < 0) { + dev_err(&hdev->dev, "failed to set rmi mode\n"); + goto exit; + } + + ret = rmi_set_page(hdev, 0); + if (ret < 0) { + dev_err(&hdev->dev, "failed to set page select to 0.\n"); + goto exit; + } + + ret = rmi_populate(hdev); + if (ret) + goto exit; + + __set_bit(EV_ABS, input->evbit); + input_set_abs_params(input, ABS_MT_POSITION_X, 1, data->max_x, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 1, data->max_y, 0, 0); + + if (data->x_size_mm && data->y_size_mm) { + res_x = (data->max_x - 1) / data->x_size_mm; + res_y = (data->max_y - 1) / data->y_size_mm; + + input_abs_set_res(input, ABS_MT_POSITION_X, res_x); + input_abs_set_res(input, ABS_MT_POSITION_Y, res_y); + } + + input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0); + input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0); + input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0); + input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0); + + input_mt_init_slots(input, data->max_fingers, INPUT_MT_POINTER); + + if (data->button_count) { + __set_bit(EV_KEY, input->evbit); + for (i = 0; i < data->button_count; i++) + __set_bit(BTN_LEFT + i, input->keybit); + + if (data->button_count == 1) + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); + } + + set_bit(RMI_STARTED, &data->flags); + +exit: + hid_device_io_stop(hdev); + hid_hw_close(hdev); +} + +static int rmi_input_mapping(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +{ + /* we want to make HID ignore the advertised HID collection */ + return -1; +} + +static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id) +{ + struct rmi_data *data = NULL; + int ret; + size_t alloc_size; + + data = devm_kzalloc(&hdev->dev, sizeof(struct rmi_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + INIT_WORK(&data->reset_work, rmi_reset_work); + data->hdev = hdev; + + hid_set_drvdata(hdev, data); + + hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; + + ret = hid_parse(hdev); + if (ret) { + hid_err(hdev, "parse failed\n"); + return ret; + } + + data->input_report_size = (hdev->report_enum[HID_INPUT_REPORT] + .report_id_hash[RMI_ATTN_REPORT_ID]->size >> 3) + + 1 /* report id */; + data->output_report_size = (hdev->report_enum[HID_OUTPUT_REPORT] + .report_id_hash[RMI_WRITE_REPORT_ID]->size >> 3) + + 1 /* report id */; + + alloc_size = data->output_report_size + data->input_report_size; + + data->writeReport = devm_kzalloc(&hdev->dev, alloc_size, GFP_KERNEL); + if (!data->writeReport) { + ret = -ENOMEM; + return ret; + } + + data->readReport = data->writeReport + data->output_report_size; + + init_waitqueue_head(&data->wait); + + mutex_init(&data->page_mutex); + + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (ret) { + hid_err(hdev, "hw start failed\n"); + return ret; + } + + if (!test_bit(RMI_STARTED, &data->flags)) { + hid_hw_stop(hdev); + return -EIO; + } + + return 0; +} + +static void rmi_remove(struct hid_device *hdev) +{ + struct rmi_data *hdata = hid_get_drvdata(hdev); + + clear_bit(RMI_STARTED, &hdata->flags); + + hid_hw_stop(hdev); +} + +static const struct hid_device_id rmi_id[] = { + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) }, + { } +}; +MODULE_DEVICE_TABLE(hid, rmi_id); + +static struct hid_driver rmi_driver = { + .name = "hid-rmi", + .id_table = rmi_id, + .probe = rmi_probe, + .remove = rmi_remove, + .raw_event = rmi_raw_event, + .input_mapping = rmi_input_mapping, + .input_configured = rmi_input_configured, +#ifdef CONFIG_PM + .resume = rmi_post_resume, + .reset_resume = rmi_post_reset, +#endif +}; + +module_hid_driver(rmi_driver); + +MODULE_AUTHOR("Andrew Duggan "); +MODULE_DESCRIPTION("RMI HID driver"); +MODULE_LICENSE("GPL"); --- linux-3.13.0.orig/drivers/hid/hid-roccat-pyra.c +++ linux-3.13.0/drivers/hid/hid-roccat-pyra.c @@ -35,6 +35,8 @@ static void profile_activated(struct pyra_device *pyra, unsigned int new_profile) { + if (new_profile >= ARRAY_SIZE(pyra->profile_settings)) + return; pyra->actual_profile = new_profile; pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; } @@ -257,9 +259,11 @@ if (off != 0 || count != PYRA_SIZE_SETTINGS) return -EINVAL; - mutex_lock(&pyra->pyra_lock); - settings = (struct pyra_settings const *)buf; + if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings)) + return -EINVAL; + + mutex_lock(&pyra->pyra_lock); retval = pyra_set_settings(usb_dev, settings); if (retval) { --- linux-3.13.0.orig/drivers/hid/hid-sony.c +++ linux-3.13.0/drivers/hid/hid-sony.c @@ -432,7 +432,8 @@ if (!buf) return -ENOMEM; - ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT); + ret = hid_hw_raw_request(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); if (ret < 0) hid_err(hdev, "can't set operational mode\n"); --- linux-3.13.0.orig/drivers/hid/hid-sunplus.c +++ linux-3.13.0/drivers/hid/hid-sunplus.c @@ -24,7 +24,7 @@ static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && + if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && rdesc[106] == 0x03) { hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n"); rdesc[105] = rdesc[110] = 0x03; --- linux-3.13.0.orig/drivers/hid/hidraw.c +++ linux-3.13.0/drivers/hid/hidraw.c @@ -6,7 +6,7 @@ * to work on raw hid events as they want to, and avoids a need to * use a transport-specific userspace libhid/libusb libraries. * - * Copyright (c) 2007 Jiri Kosina + * Copyright (c) 2007-2014 Jiri Kosina */ /* @@ -104,8 +104,11 @@ return ret; } -/* The first byte is expected to be a report number. - * This function is to be called with the minors_lock mutex held */ +/* + * The first byte of the report buffer is expected to be a report number. + * + * This function is to be called with the minors_lock mutex held. + */ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file_inode(file)); @@ -157,7 +160,6 @@ return ret; } -/* the first byte is expected to be a report number */ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { ssize_t ret; @@ -168,12 +170,15 @@ } -/* This function performs a Get_Report transfer over the control endpoint +/* + * This function performs a Get_Report transfer over the control endpoint * per section 7.2.1 of the HID specification, version 1.1. The first byte * of buffer is the report number to request, or 0x0 if the defice does not * use numbered reports. The report_type parameter can be HID_FEATURE_REPORT - * or HID_INPUT_REPORT. This function is to be called with the minors_lock - * mutex held. */ + * or HID_INPUT_REPORT. + * + * This function is to be called with the minors_lock mutex held. + */ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file_inode(file)); @@ -184,7 +189,7 @@ dev = hidraw_table[minor]->hid; - if (!dev->hid_get_raw_report) { + if (!dev->ll_driver->raw_request) { ret = -ENODEV; goto out; } @@ -209,14 +214,17 @@ goto out; } - /* Read the first byte from the user. This is the report number, - * which is passed to dev->hid_get_raw_report(). */ + /* + * Read the first byte from the user. This is the report number, + * which is passed to hid_hw_raw_request(). + */ if (copy_from_user(&report_number, buffer, 1)) { ret = -EFAULT; goto out_free; } - ret = dev->hid_get_raw_report(dev, report_number, buf, count, report_type); + ret = hid_hw_raw_request(dev, report_number, buf, count, report_type, + HID_REQ_GET_REPORT); if (ret < 0) goto out_free; @@ -313,13 +321,13 @@ hid_hw_close(hidraw->hid); wake_up_interruptible(&hidraw->wait); } + device_destroy(hidraw_class, + MKDEV(hidraw_major, hidraw->minor)); } else { --hidraw->open; } if (!hidraw->open) { if (!hidraw->exist) { - device_destroy(hidraw_class, - MKDEV(hidraw_major, hidraw->minor)); hidraw_table[hidraw->minor] = NULL; kfree(hidraw); } else { @@ -498,7 +506,7 @@ int minor, result; struct hidraw *dev; - /* we accept any HID device, no matter the applications */ + /* we accept any HID device, all applications */ dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL); if (!dev) --- linux-3.13.0.orig/drivers/hid/i2c-hid/i2c-hid.c +++ linux-3.13.0/drivers/hid/i2c-hid/i2c-hid.c @@ -136,6 +136,7 @@ * descriptor. */ unsigned int bufsize; /* i2c buffer size */ char *inbuf; /* Input buffer */ + char *rawbuf; /* Raw Input buffer */ char *cmdbuf; /* Command buffer */ char *argsbuf; /* Command arguments buffer */ @@ -256,12 +257,21 @@ return 0; } -static int i2c_hid_set_report(struct i2c_client *client, u8 reportType, - u8 reportID, unsigned char *buf, size_t data_len) +/** + * i2c_hid_set_or_send_report: forward an incoming report to the device + * @client: the i2c_client of the device + * @reportType: 0x03 for HID_FEATURE_REPORT ; 0x02 for HID_OUTPUT_REPORT + * @reportID: the report ID + * @buf: the actual data to transfer, without the report ID + * @len: size of buf + * @use_data: true: use SET_REPORT HID command, false: send plain OUTPUT report + */ +static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType, + u8 reportID, unsigned char *buf, size_t data_len, bool use_data) { struct i2c_hid *ihid = i2c_get_clientdata(client); u8 *args = ihid->argsbuf; - const struct i2c_hid_cmd * hidcmd = &hid_set_report_cmd; + const struct i2c_hid_cmd *hidcmd; int ret; u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister); u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister); @@ -278,6 +288,9 @@ i2c_hid_dbg(ihid, "%s\n", __func__); + if (!use_data && maxOutputLength == 0) + return -ENOSYS; + if (reportID >= 0x0F) { args[index++] = reportID; reportID = 0x0F; @@ -287,9 +300,10 @@ * use the data register for feature reports or if the device does not * support the output register */ - if (reportType == 0x03 || maxOutputLength == 0) { + if (use_data) { args[index++] = dataRegister & 0xFF; args[index++] = dataRegister >> 8; + hidcmd = &hid_set_report_cmd; } else { args[index++] = outputRegister & 0xFF; args[index++] = outputRegister >> 8; @@ -357,6 +371,9 @@ int ret, ret_size; int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); + if (size > ihid->bufsize) + size = ihid->bufsize; + ret = i2c_master_recv(ihid->client, ihid->inbuf, size); if (ret != size) { if (ret < 0) @@ -482,9 +499,11 @@ static void i2c_hid_free_buffers(struct i2c_hid *ihid) { kfree(ihid->inbuf); + kfree(ihid->rawbuf); kfree(ihid->argsbuf); kfree(ihid->cmdbuf); ihid->inbuf = NULL; + ihid->rawbuf = NULL; ihid->cmdbuf = NULL; ihid->argsbuf = NULL; ihid->bufsize = 0; @@ -500,10 +519,11 @@ report_size; /* report */ ihid->inbuf = kzalloc(report_size, GFP_KERNEL); + ihid->rawbuf = kzalloc(report_size, GFP_KERNEL); ihid->argsbuf = kzalloc(args_len, GFP_KERNEL); ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL); - if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) { + if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) { i2c_hid_free_buffers(ihid); return -ENOMEM; } @@ -530,12 +550,12 @@ ret = i2c_hid_get_report(client, report_type == HID_FEATURE_REPORT ? 0x03 : 0x01, - report_number, ihid->inbuf, ask_count); + report_number, ihid->rawbuf, ask_count); if (ret < 0) return ret; - ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8); + ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8); if (ret_count <= 2) return 0; @@ -544,13 +564,13 @@ /* The query buffer contains the size, dropping it in the reply */ count = min(count, ret_count - 2); - memcpy(buf, ihid->inbuf + 2, count); + memcpy(buf, ihid->rawbuf + 2, count); return count; } static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf, - size_t count, unsigned char report_type) + size_t count, unsigned char report_type, bool use_data) { struct i2c_client *client = hid->driver_data; int report_id = buf[0]; @@ -564,9 +584,9 @@ count--; } - ret = i2c_hid_set_report(client, + ret = i2c_hid_set_or_send_report(client, report_type == HID_FEATURE_REPORT ? 0x03 : 0x02, - report_id, buf, count); + report_id, buf, count, use_data); if (report_id && ret >= 0) ret++; /* add report_id to the number of transfered bytes */ @@ -574,6 +594,42 @@ return ret; } +static int __i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf, + size_t count, unsigned char report_type) +{ + struct i2c_client *client = hid->driver_data; + struct i2c_hid *ihid = i2c_get_clientdata(client); + bool data = true; /* SET_REPORT */ + + if (report_type == HID_OUTPUT_REPORT) + data = le16_to_cpu(ihid->hdesc.wMaxOutputLength) == 0; + + return i2c_hid_output_raw_report(hid, buf, count, report_type, data); +} + +static int i2c_hid_output_report(struct hid_device *hid, __u8 *buf, + size_t count) +{ + return i2c_hid_output_raw_report(hid, buf, count, HID_OUTPUT_REPORT, + false); +} + +static int i2c_hid_raw_request(struct hid_device *hid, unsigned char reportnum, + __u8 *buf, size_t len, unsigned char rtype, + int reqtype) +{ + switch (reqtype) { + case HID_REQ_GET_REPORT: + return i2c_hid_get_raw_report(hid, reportnum, buf, len, rtype); + case HID_REQ_SET_REPORT: + if (buf[0] != reportnum) + return -EINVAL; + return i2c_hid_output_raw_report(hid, buf, len, rtype, true); + default: + return -EIO; + } +} + static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype) { @@ -597,7 +653,7 @@ break; case HID_REQ_SET_REPORT: hid_output_report(rep, buf); - i2c_hid_output_raw_report(hid, buf, len, rep->type); + i2c_hid_output_raw_report(hid, buf, len, rep->type, true); break; } @@ -761,6 +817,8 @@ .close = i2c_hid_close, .power = i2c_hid_power, .request = i2c_hid_request, + .output_report = i2c_hid_output_report, + .raw_request = i2c_hid_raw_request, }; static int i2c_hid_init_irq(struct i2c_client *client) @@ -1005,8 +1063,7 @@ hid->driver_data = client; hid->ll_driver = &i2c_hid_ll_driver; - hid->hid_get_raw_report = i2c_hid_get_raw_report; - hid->hid_output_raw_report = i2c_hid_output_raw_report; + hid->hid_output_raw_report = __i2c_hid_output_raw_report; hid->dev.parent = &client->dev; ACPI_COMPANION_SET(&hid->dev, ACPI_COMPANION(&client->dev)); hid->bus = BUS_I2C; --- linux-3.13.0.orig/drivers/hid/uhid.c +++ linux-3.13.0/drivers/hid/uhid.c @@ -377,7 +377,6 @@ hid->uniq[63] = 0; hid->ll_driver = &uhid_hid_driver; - hid->hid_get_raw_report = uhid_hid_get_raw; hid->hid_output_raw_report = uhid_hid_output_raw; hid->bus = ev->u.create.bus; hid->vendor = ev->u.create.vendor; --- linux-3.13.0.orig/drivers/hid/usbhid/hid-core.c +++ linux-3.13.0/drivers/hid/usbhid/hid-core.c @@ -884,6 +884,38 @@ return ret; } +static int usbhid_set_raw_report(struct hid_device *hid, unsigned int reportnum, + __u8 *buf, size_t count, unsigned char rtype) +{ + struct usbhid_device *usbhid = hid->driver_data; + struct usb_device *dev = hid_to_usb_dev(hid); + struct usb_interface *intf = usbhid->intf; + struct usb_host_interface *interface = intf->cur_altsetting; + int ret, skipped_report_id = 0; + + /* Byte 0 is the report number. Report data starts at byte 1.*/ + buf[0] = reportnum; + if (buf[0] == 0x0) { + /* Don't send the Report ID */ + buf++; + count--; + skipped_report_id = 1; + } + + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + HID_REQ_SET_REPORT, + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + ((rtype + 1) << 8) | reportnum, + interface->desc.bInterfaceNumber, buf, count, + USB_CTRL_SET_TIMEOUT); + /* count also the report id, if this was a numbered report. */ + if (ret > 0 && skipped_report_id) + ret++; + + return ret; +} + + static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { @@ -936,6 +968,36 @@ return ret; } +static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count) +{ + struct usbhid_device *usbhid = hid->driver_data; + struct usb_device *dev = hid_to_usb_dev(hid); + int actual_length, skipped_report_id = 0, ret; + + if (!usbhid->urbout) + return -EIO; + + if (buf[0] == 0x0) { + /* Don't send the Report ID */ + buf++; + count--; + skipped_report_id = 1; + } + + ret = usb_interrupt_msg(dev, usbhid->urbout->pipe, + buf, count, &actual_length, + USB_CTRL_SET_TIMEOUT); + /* return the number of bytes transferred */ + if (ret == 0) { + ret = actual_length; + /* count also the report id */ + if (skipped_report_id) + ret++; + } + + return ret; +} + static void usbhid_restart_queues(struct usbhid_device *usbhid) { if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl)) @@ -1200,6 +1262,20 @@ } } +static int usbhid_raw_request(struct hid_device *hid, unsigned char reportnum, + __u8 *buf, size_t len, unsigned char rtype, + int reqtype) +{ + switch (reqtype) { + case HID_REQ_GET_REPORT: + return usbhid_get_raw_report(hid, reportnum, buf, len, rtype); + case HID_REQ_SET_REPORT: + return usbhid_set_raw_report(hid, reportnum, buf, len, rtype); + default: + return -EIO; + } +} + static int usbhid_idle(struct hid_device *hid, int report, int idle, int reqtype) { @@ -1223,6 +1299,8 @@ .power = usbhid_power, .request = usbhid_request, .wait = usbhid_wait_io, + .raw_request = usbhid_raw_request, + .output_report = usbhid_output_report, .idle = usbhid_idle, }; @@ -1253,7 +1331,6 @@ usb_set_intfdata(intf, hid); hid->ll_driver = &usb_hid_driver; - hid->hid_get_raw_report = usbhid_get_raw_report; hid->hid_output_raw_report = usbhid_output_raw_report; hid->ff_init = hid_pidff_init; #ifdef CONFIG_USB_HIDDEV --- linux-3.13.0.orig/drivers/hid/usbhid/hid-quirks.c +++ linux-3.13.0/drivers/hid/usbhid/hid-quirks.c @@ -49,6 +49,7 @@ { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, @@ -115,6 +116,7 @@ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS }, { 0, 0 } }; --- linux-3.13.0.orig/drivers/hv/Makefile +++ linux-3.13.0/drivers/hv/Makefile @@ -5,4 +5,4 @@ hv_vmbus-y := vmbus_drv.o \ hv.o connection.o channel.o \ channel_mgmt.o ring_buffer.o -hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o +hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o --- linux-3.13.0.orig/drivers/hv/channel.c +++ linux-3.13.0/drivers/hv/channel.c @@ -164,8 +164,10 @@ ret = vmbus_post_msg(open_msg, sizeof(struct vmbus_channel_open_channel)); - if (ret != 0) + if (ret != 0) { + err = ret; goto error1; + } t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ); if (t == 0) { @@ -209,7 +211,6 @@ { int i; int pagecount; - unsigned long long pfn; struct vmbus_channel_gpadl_header *gpadl_header; struct vmbus_channel_gpadl_body *gpadl_body; struct vmbus_channel_msginfo *msgheader; @@ -219,7 +220,6 @@ int pfnsum, pfncount, pfnleft, pfncurr, pfnsize; pagecount = size >> PAGE_SHIFT; - pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT; /* do we need a gpadl body msg */ pfnsize = MAX_SIZE_CHANNEL_MESSAGE - @@ -248,7 +248,8 @@ gpadl_header->range[0].byte_offset = 0; gpadl_header->range[0].byte_count = size; for (i = 0; i < pfncount; i++) - gpadl_header->range[0].pfn_array[i] = pfn+i; + gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( + kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; *msginfo = msgheader; *messagecount = 1; @@ -301,7 +302,9 @@ * so the hypervisor gurantees that this is ok. */ for (i = 0; i < pfncurr; i++) - gpadl_body->pfn[i] = pfn + pfnsum + i; + gpadl_body->pfn[i] = slow_virt_to_phys( + kbuffer + PAGE_SIZE * (pfnsum + i)) >> + PAGE_SHIFT; /* add to msg header */ list_add_tail(&msgbody->msglistentry, @@ -327,7 +330,8 @@ gpadl_header->range[0].byte_offset = 0; gpadl_header->range[0].byte_count = size; for (i = 0; i < pagecount; i++) - gpadl_header->range[0].pfn_array[i] = pfn+i; + gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys( + kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT; *msginfo = msgheader; *messagecount = 1; @@ -344,7 +348,7 @@ * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer * * @channel: a channel - * @kbuffer: from kmalloc + * @kbuffer: from kmalloc or vmalloc * @size: page-size multiple * @gpadl_handle: some funky thing */ @@ -360,7 +364,6 @@ u32 next_gpadl_handle; unsigned long flags; int ret = 0; - int t; next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle); atomic_inc(&vmbus_connection.next_gpadl_handle); @@ -407,9 +410,7 @@ } } - t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); - BUG_ON(t == 0); - + wait_for_completion(&msginfo->waitevent); /* At this point, we received the gpadl created msg */ *gpadl_handle = gpadlmsg->gpadl; @@ -432,7 +433,7 @@ struct vmbus_channel_gpadl_teardown *msg; struct vmbus_channel_msginfo *info; unsigned long flags; - int ret, t; + int ret; info = kmalloc(sizeof(*info) + sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL); @@ -454,11 +455,12 @@ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown)); - BUG_ON(ret != 0); - t = wait_for_completion_timeout(&info->waitevent, 5*HZ); - BUG_ON(t == 0); + if (ret) + goto post_msg_err; + + wait_for_completion(&info->waitevent); - /* Received a torndown response */ +post_msg_err: spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&info->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); @@ -468,7 +470,7 @@ } EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); -static void vmbus_close_internal(struct vmbus_channel *channel) +static int vmbus_close_internal(struct vmbus_channel *channel) { struct vmbus_channel_close_channel *msg; int ret; @@ -490,11 +492,28 @@ ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel)); - BUG_ON(ret != 0); + if (ret) { + pr_err("Close failed: close post msg return is %d\n", ret); + /* + * If we failed to post the close msg, + * it is perhaps better to leak memory. + */ + return ret; + } + /* Tear down the gpadl for the channel's ring buffer */ - if (channel->ringbuffer_gpadlhandle) - vmbus_teardown_gpadl(channel, - channel->ringbuffer_gpadlhandle); + if (channel->ringbuffer_gpadlhandle) { + ret = vmbus_teardown_gpadl(channel, + channel->ringbuffer_gpadlhandle); + if (ret) { + pr_err("Close failed: teardown gpadl return %d\n", ret); + /* + * If we failed to teardown gpadl, + * it is perhaps better to leak memory. + */ + return ret; + } + } /* Cleanup the ring buffers for this channel */ hv_ringbuffer_cleanup(&channel->outbound); @@ -503,7 +522,7 @@ free_pages((unsigned long)channel->ringbuffer_pages, get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); - + return ret; } /* --- linux-3.13.0.orig/drivers/hv/channel_mgmt.c +++ linux-3.13.0/drivers/hv/channel_mgmt.c @@ -202,9 +202,16 @@ unsigned long flags; struct vmbus_channel *primary_channel; struct vmbus_channel_relid_released msg; + struct device *dev; + + if (channel->device_obj) { + dev = get_device(&channel->device_obj->device); + if (dev) { + vmbus_device_unregister(channel->device_obj); + put_device(dev); + } + } - if (channel->device_obj) - vmbus_device_unregister(channel->device_obj); memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); msg.child_relid = channel->offermsg.child_relid; msg.header.msgtype = CHANNELMSG_RELID_RELEASED; --- linux-3.13.0.orig/drivers/hv/connection.c +++ linux-3.13.0/drivers/hv/connection.c @@ -55,6 +55,9 @@ case (VERSION_WIN8): return VERSION_WIN7; + case (VERSION_WIN8_1): + return VERSION_WIN8; + case (VERSION_WS2008): default: return VERSION_INVAL; @@ -67,7 +70,6 @@ int ret = 0; struct vmbus_channel_initiate_contact *msg; unsigned long flags; - int t; init_completion(&msginfo->waitevent); @@ -78,6 +80,8 @@ msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); + if (version == VERSION_WIN8_1) + msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; /* * Add to list before we send the request since we may @@ -100,15 +104,7 @@ } /* Wait for the connection response */ - t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); - if (t == 0) { - spin_lock_irqsave(&vmbus_connection.channelmsg_lock, - flags); - list_del(&msginfo->msglistentry); - spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, - flags); - return -ETIMEDOUT; - } + wait_for_completion(&msginfo->waitevent); spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); @@ -228,8 +224,8 @@ vmbus_connection.int_page = NULL; } - free_pages((unsigned long)vmbus_connection.monitor_pages[0], 1); - free_pages((unsigned long)vmbus_connection.monitor_pages[1], 1); + free_pages((unsigned long)vmbus_connection.monitor_pages[0], 0); + free_pages((unsigned long)vmbus_connection.monitor_pages[1], 0); vmbus_connection.monitor_pages[0] = NULL; vmbus_connection.monitor_pages[1] = NULL; @@ -323,9 +319,13 @@ */ do { - hv_begin_read(&channel->inbound); + if (read_state) + hv_begin_read(&channel->inbound); channel->onchannel_callback(arg); - bytes_to_read = hv_end_read(&channel->inbound); + if (read_state) + bytes_to_read = hv_end_read(&channel->inbound); + else + bytes_to_read = 0; } while (read_state && (bytes_to_read != 0)); } else { pr_err("no channel callback for relid - %u\n", relid); @@ -408,10 +408,21 @@ * insufficient resources. Retry the operation a couple of * times before giving up. */ - while (retries < 3) { - ret = hv_post_message(conn_id, 1, buffer, buflen); - if (ret != HV_STATUS_INSUFFICIENT_BUFFERS) + while (retries < 10) { + ret = hv_post_message(conn_id, 1, buffer, buflen); + + switch (ret) { + case HV_STATUS_INSUFFICIENT_BUFFERS: + ret = -ENOMEM; + case -ENOMEM: + break; + case HV_STATUS_SUCCESS: return ret; + default: + pr_err("hv_post_msg() failed; error code:%d\n", ret); + return -EINVAL; + } + retries++; msleep(100); } --- linux-3.13.0.orig/drivers/hv/hv.c +++ linux-3.13.0/drivers/hv/hv.c @@ -31,6 +31,14 @@ #include #include "hyperv_vmbus.h" +#ifndef PKG_ABI +/* + * Preserve the ability to 'make deb-pkg' since PKG_ABI is provided + * by the Ubuntu build rules. + */ +#define PKG_ABI 0 +#endif + /* The one and only */ struct hv_context hv_context = { .synic_initialized = false, @@ -138,6 +146,8 @@ memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); memset(hv_context.synic_message_page, 0, sizeof(void *) * NR_CPUS); + memset(hv_context.post_msg_page, 0, + sizeof(void *) * NR_CPUS); memset(hv_context.vp_index, 0, sizeof(int) * NR_CPUS); memset(hv_context.event_dpc, 0, @@ -148,7 +158,7 @@ /* * Write our OS ID. */ - hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0); + hv_context.guestid = generate_guest_id(0x80 /*Canonical*/, LINUX_VERSION_CODE, PKG_ABI); wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid); /* See if the hypercall page is already set */ @@ -217,26 +227,18 @@ enum hv_message_type message_type, void *payload, size_t payload_size) { - struct aligned_input { - u64 alignment8; - struct hv_input_post_message msg; - }; struct hv_input_post_message *aligned_msg; u16 status; - unsigned long addr; if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) return -EMSGSIZE; - addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC); - if (!addr) - return -ENOMEM; - aligned_msg = (struct hv_input_post_message *) - (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN)); + hv_context.post_msg_page[get_cpu()]; aligned_msg->connectionid = connection_id; + aligned_msg->reserved = 0; aligned_msg->message_type = message_type; aligned_msg->payload_size = payload_size; memcpy((void *)aligned_msg->payload, payload, payload_size); @@ -244,8 +246,7 @@ status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL) & 0xFFFF; - kfree((void *)addr); - + put_cpu(); return status; } @@ -294,6 +295,14 @@ pr_err("Unable to allocate SYNIC event page\n"); goto err; } + + hv_context.post_msg_page[cpu] = + (void *)get_zeroed_page(GFP_ATOMIC); + + if (hv_context.post_msg_page[cpu] == NULL) { + pr_err("Unable to allocate post msg page\n"); + goto err; + } } return 0; @@ -308,6 +317,8 @@ free_page((unsigned long)hv_context.synic_event_page[cpu]); if (hv_context.synic_message_page[cpu]) free_page((unsigned long)hv_context.synic_message_page[cpu]); + if (hv_context.post_msg_page[cpu]) + free_page((unsigned long)hv_context.post_msg_page[cpu]); } void hv_synic_free(void) --- linux-3.13.0.orig/drivers/hv/hv_balloon.c +++ linux-3.13.0/drivers/hv/hv_balloon.c @@ -19,6 +19,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include @@ -459,6 +460,11 @@ */ static uint pressure_report_delay = 45; +/* + * The last time we posted a pressure report to host. + */ +static unsigned long last_post_time; + module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); @@ -542,6 +548,7 @@ static struct hv_dynmem_device dm_device; +static void post_status(struct hv_dynmem_device *dm); #ifdef CONFIG_MEMORY_HOTPLUG static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size) @@ -612,7 +619,7 @@ * have not been "onlined" within the allowed time. */ wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); - + post_status(&dm_device); } return; @@ -951,11 +958,17 @@ { struct dm_status status; struct sysinfo val; + unsigned long now = jiffies; + unsigned long last_post = last_post_time; if (pressure_report_delay > 0) { --pressure_report_delay; return; } + + if (!time_after(now, (last_post_time + HZ))) + return; + si_meminfo(&val); memset(&status, 0, sizeof(struct dm_status)); status.hdr.type = DM_STATUS_REPORT; @@ -983,6 +996,14 @@ if (status.hdr.trans_id != atomic_read(&trans_id)) return; + /* + * If the last post time that we sampled has changed, + * we have raced, don't post the status. + */ + if (last_post != last_post_time) + return; + + last_post_time = jiffies; vmbus_sendpacket(dm->dev->channel, &status, sizeof(struct dm_status), (unsigned long)NULL, @@ -1117,7 +1138,7 @@ if (ret == -EAGAIN) msleep(20); - + post_status(&dm_device); } while (ret == -EAGAIN); if (ret) { @@ -1144,8 +1165,10 @@ struct dm_unballoon_response resp; int i; - for (i = 0; i < range_count; i++) + for (i = 0; i < range_count; i++) { free_balloon_pages(dm, &range_array[i]); + post_status(&dm_device); + } if (req->more_pages == 1) return; @@ -1171,7 +1194,8 @@ int t; while (!kthread_should_stop()) { - t = wait_for_completion_timeout(&dm_device.config_event, 1*HZ); + t = wait_for_completion_interruptible_timeout( + &dm_device.config_event, 1*HZ); /* * The host expects us to post information on the memory * pressure every second. --- linux-3.13.0.orig/drivers/hv/hv_fcopy.c +++ linux-3.13.0/drivers/hv/hv_fcopy.c @@ -0,0 +1,414 @@ +/* + * An implementation of file copy service. + * + * Copyright (C) 2014, Microsoft, Inc. + * + * Author : K. Y. Srinivasan + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hyperv_vmbus.h" + +#define WIN8_SRV_MAJOR 1 +#define WIN8_SRV_MINOR 1 +#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR) + +/* + * Global state maintained for transaction that is being processed. + * For a class of integration services, including the "file copy service", + * the specified protocol is a "request/response" protocol which means that + * there can only be single outstanding transaction from the host at any + * given point in time. We use this to simplify memory management in this + * driver - we cache and process only one message at a time. + * + * While the request/response protocol is guaranteed by the host, we further + * ensure this by serializing packet processing in this driver - we do not + * read additional packets from the VMBUs until the current packet is fully + * handled. + * + * The transaction "active" state is set when we receive a request from the + * host and we cleanup this state when the transaction is completed - when we + * respond to the host with our response. When the transaction active state is + * set, we defer handling incoming packets. + */ + +static struct { + bool active; /* transaction status - active or not */ + int recv_len; /* number of bytes received. */ + struct hv_fcopy_hdr *fcopy_msg; /* current message */ + struct hv_start_fcopy message; /* sent to daemon */ + struct vmbus_channel *recv_channel; /* chn we got the request */ + u64 recv_req_id; /* request ID. */ + void *fcopy_context; /* for the channel callback */ + struct semaphore read_sema; +} fcopy_transaction; + +static bool opened; /* currently device opened */ + +/* + * Before we can accept copy messages from the host, we need + * to handshake with the user level daemon. This state tracks + * if we are in the handshake phase. + */ +static bool in_hand_shake = true; +static void fcopy_send_data(void); +static void fcopy_respond_to_host(int error); +static void fcopy_work_func(struct work_struct *dummy); +static DECLARE_DELAYED_WORK(fcopy_work, fcopy_work_func); +static u8 *recv_buffer; + +static void fcopy_work_func(struct work_struct *dummy) +{ + /* + * If the timer fires, the user-mode component has not responded; + * process the pending transaction. + */ + fcopy_respond_to_host(HV_E_FAIL); +} + +static int fcopy_handle_handshake(u32 version) +{ + switch (version) { + case FCOPY_CURRENT_VERSION: + break; + default: + /* + * For now we will fail the registration. + * If and when we have multiple versions to + * deal with, we will be backward compatible. + * We will add this code when needed. + */ + return -EINVAL; + } + pr_info("FCP: user-mode registering done. Daemon version: %d\n", + version); + fcopy_transaction.active = false; + if (fcopy_transaction.fcopy_context) + hv_fcopy_onchannelcallback(fcopy_transaction.fcopy_context); + in_hand_shake = false; + return 0; +} + +static void fcopy_send_data(void) +{ + struct hv_start_fcopy *smsg_out = &fcopy_transaction.message; + int operation = fcopy_transaction.fcopy_msg->operation; + struct hv_start_fcopy *smsg_in; + + /* + * The strings sent from the host are encoded in + * in utf16; convert it to utf8 strings. + * The host assures us that the utf16 strings will not exceed + * the max lengths specified. We will however, reserve room + * for the string terminating character - in the utf16s_utf8s() + * function we limit the size of the buffer where the converted + * string is placed to W_MAX_PATH -1 to guarantee + * that the strings can be properly terminated! + */ + + switch (operation) { + case START_FILE_COPY: + memset(smsg_out, 0, sizeof(struct hv_start_fcopy)); + smsg_out->hdr.operation = operation; + smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg; + + utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH, + UTF16_LITTLE_ENDIAN, + (__u8 *)smsg_out->file_name, W_MAX_PATH - 1); + + utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH, + UTF16_LITTLE_ENDIAN, + (__u8 *)smsg_out->path_name, W_MAX_PATH - 1); + + smsg_out->copy_flags = smsg_in->copy_flags; + smsg_out->file_size = smsg_in->file_size; + break; + + default: + break; + } + up(&fcopy_transaction.read_sema); + return; +} + +/* + * Send a response back to the host. + */ + +static void +fcopy_respond_to_host(int error) +{ + struct icmsg_hdr *icmsghdr; + u32 buf_len; + struct vmbus_channel *channel; + u64 req_id; + + /* + * Copy the global state for completing the transaction. Note that + * only one transaction can be active at a time. This is guaranteed + * by the file copy protocol implemented by the host. Furthermore, + * the "transaction active" state we maintain ensures that there can + * only be one active transaction at a time. + */ + + buf_len = fcopy_transaction.recv_len; + channel = fcopy_transaction.recv_channel; + req_id = fcopy_transaction.recv_req_id; + + fcopy_transaction.active = false; + + icmsghdr = (struct icmsg_hdr *) + &recv_buffer[sizeof(struct vmbuspipe_hdr)]; + + if (channel->onchannel_callback == NULL) + /* + * We have raced with util driver being unloaded; + * silently return. + */ + return; + + icmsghdr->status = error; + icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; + vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, + VM_PKT_DATA_INBAND, 0); +} + +void hv_fcopy_onchannelcallback(void *context) +{ + struct vmbus_channel *channel = context; + u32 recvlen; + u64 requestid; + struct hv_fcopy_hdr *fcopy_msg; + struct icmsg_hdr *icmsghdr; + struct icmsg_negotiate *negop = NULL; + int util_fw_version; + int fcopy_srv_version; + + if (fcopy_transaction.active) { + /* + * We will defer processing this callback once + * the current transaction is complete. + */ + fcopy_transaction.fcopy_context = context; + return; + } + + vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, + &requestid); + if (recvlen <= 0) + return; + + icmsghdr = (struct icmsg_hdr *)&recv_buffer[ + sizeof(struct vmbuspipe_hdr)]; + if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) { + util_fw_version = UTIL_FW_VERSION; + fcopy_srv_version = WIN8_SRV_VERSION; + vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer, + util_fw_version, fcopy_srv_version); + } else { + fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[ + sizeof(struct vmbuspipe_hdr) + + sizeof(struct icmsg_hdr)]; + + /* + * Stash away this global state for completing the + * transaction; note transactions are serialized. + */ + + fcopy_transaction.active = true; + fcopy_transaction.recv_len = recvlen; + fcopy_transaction.recv_channel = channel; + fcopy_transaction.recv_req_id = requestid; + fcopy_transaction.fcopy_msg = fcopy_msg; + + /* + * Send the information to the user-level daemon. + */ + fcopy_send_data(); + schedule_delayed_work(&fcopy_work, 5*HZ); + return; + } + icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; + vmbus_sendpacket(channel, recv_buffer, recvlen, requestid, + VM_PKT_DATA_INBAND, 0); +} + +/* + * Create a char device that can support read/write for passing + * the payload. + */ + +static ssize_t fcopy_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + void *src; + size_t copy_size; + int operation; + + /* + * Wait until there is something to be read. + */ + if (down_interruptible(&fcopy_transaction.read_sema)) + return -EINTR; + + /* + * The channel may be rescinded and in this case, we will wakeup the + * the thread blocked on the semaphore and we will use the opened + * state to correctly handle this case. + */ + if (!opened) + return -ENODEV; + + operation = fcopy_transaction.fcopy_msg->operation; + + if (operation == START_FILE_COPY) { + src = &fcopy_transaction.message; + copy_size = sizeof(struct hv_start_fcopy); + if (count < copy_size) + return 0; + } else { + src = fcopy_transaction.fcopy_msg; + copy_size = sizeof(struct hv_do_fcopy); + if (count < copy_size) + return 0; + } + if (copy_to_user(buf, src, copy_size)) + return -EFAULT; + + return copy_size; +} + +static ssize_t fcopy_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int response = 0; + + if (count != sizeof(int)) + return -EINVAL; + + if (copy_from_user(&response, buf, sizeof(int))) + return -EFAULT; + + if (in_hand_shake) { + if (fcopy_handle_handshake(response)) + return -EINVAL; + return sizeof(int); + } + + /* + * Complete the transaction by forwarding the result + * to the host. But first, cancel the timeout. + */ + if (cancel_delayed_work_sync(&fcopy_work)) + fcopy_respond_to_host(response); + + return sizeof(int); +} + +static int fcopy_open(struct inode *inode, struct file *f) +{ + /* + * The user level daemon that will open this device is + * really an extension of this driver. We can have only + * active open at a time. + */ + if (opened) + return -EBUSY; + + /* + * The daemon is alive; setup the state. + */ + opened = true; + return 0; +} + +static int fcopy_release(struct inode *inode, struct file *f) +{ + /* + * The daemon has exited; reset the state. + */ + in_hand_shake = true; + opened = false; + return 0; +} + + +static const struct file_operations fcopy_fops = { + .read = fcopy_read, + .write = fcopy_write, + .release = fcopy_release, + .open = fcopy_open, +}; + +static struct miscdevice fcopy_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vmbus/hv_fcopy", + .fops = &fcopy_fops, +}; + +static int fcopy_dev_init(void) +{ + return misc_register(&fcopy_misc); +} + +static void fcopy_dev_deinit(void) +{ + + /* + * The device is going away - perhaps because the + * host has rescinded the channel. Setup state so that + * user level daemon can gracefully exit if it is blocked + * on the read semaphore. + */ + opened = false; + /* + * Signal the semaphore as the device is + * going away. + */ + up(&fcopy_transaction.read_sema); + misc_deregister(&fcopy_misc); +} + +int hv_fcopy_init(struct hv_util_service *srv) +{ + recv_buffer = srv->recv_buffer; + + /* + * When this driver loads, the user level daemon that + * processes the host requests may not yet be running. + * Defer processing channel callbacks until the daemon + * has registered. + */ + fcopy_transaction.active = true; + sema_init(&fcopy_transaction.read_sema, 0); + + return fcopy_dev_init(); +} + +void hv_fcopy_deinit(void) +{ + cancel_delayed_work_sync(&fcopy_work); + fcopy_dev_deinit(); +} --- linux-3.13.0.orig/drivers/hv/hv_util.c +++ linux-3.13.0/drivers/hv/hv_util.c @@ -28,6 +28,7 @@ #include #include +#include "hyperv_vmbus.h" #define SD_MAJOR 3 #define SD_MINOR 0 @@ -82,6 +83,12 @@ .util_deinit = hv_vss_deinit, }; +static struct hv_util_service util_fcopy = { + .util_cb = hv_fcopy_onchannelcallback, + .util_init = hv_fcopy_init, + .util_deinit = hv_fcopy_deinit, +}; + static void perform_shutdown(struct work_struct *dummy) { orderly_poweroff(true); @@ -401,6 +408,10 @@ { HV_VSS_GUID, .driver_data = (unsigned long)&util_vss }, + /* File copy GUID */ + { HV_FCOPY_GUID, + .driver_data = (unsigned long)&util_fcopy + }, { }, }; --- linux-3.13.0.orig/drivers/hv/hyperv_vmbus.h +++ linux-3.13.0/drivers/hv/hyperv_vmbus.h @@ -510,6 +510,10 @@ * basis. */ struct tasklet_struct *event_dpc[NR_CPUS]; + /* + * buffer to post messages to the host. + */ + void *post_msg_page[NR_CPUS]; }; extern struct hv_context hv_context; @@ -669,5 +673,9 @@ void vmbus_on_event(unsigned long data); +int hv_fcopy_init(struct hv_util_service *); +void hv_fcopy_deinit(void); +void hv_fcopy_onchannelcallback(void *); + #endif /* _HYPERV_VMBUS_H */ --- linux-3.13.0.orig/drivers/hv/vmbus_drv.c +++ linux-3.13.0/drivers/hv/vmbus_drv.c @@ -46,6 +46,12 @@ static struct completion probe_event; static int irq; +struct resource hyperv_mmio = { + .name = "hyperv mmio", + .flags = IORESOURCE_MEM, +}; +EXPORT_SYMBOL_GPL(hyperv_mmio); + static int vmbus_exists(void) { if (hv_acpi_dev == NULL) @@ -888,18 +894,21 @@ /* - * VMBUS is an acpi enumerated device. Get the the IRQ information - * from DSDT. + * VMBUS is an acpi enumerated device. Get the the information we + * need from DSDT. */ -static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq) +static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx) { + switch (res->type) { + case ACPI_RESOURCE_TYPE_IRQ: + irq = res->data.irq.interrupts[0]; + break; - if (res->type == ACPI_RESOURCE_TYPE_IRQ) { - struct acpi_resource_irq *irqp; - irqp = &res->data.irq; - - *((unsigned int *)irq) = irqp->interrupts[0]; + case ACPI_RESOURCE_TYPE_ADDRESS64: + hyperv_mmio.start = res->data.address64.minimum; + hyperv_mmio.end = res->data.address64.maximum; + break; } return AE_OK; @@ -908,18 +917,34 @@ static int vmbus_acpi_add(struct acpi_device *device) { acpi_status result; + int ret_val = -ENODEV; hv_acpi_dev = device; result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, - vmbus_walk_resources, &irq); + vmbus_walk_resources, NULL); - if (ACPI_FAILURE(result)) { - complete(&probe_event); - return -ENODEV; + if (ACPI_FAILURE(result)) + goto acpi_walk_err; + /* + * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that + * has the mmio ranges. Get that. + */ + if (device->parent) { + result = acpi_walk_resources(device->parent->handle, + METHOD_NAME__CRS, + vmbus_walk_resources, NULL); + + if (ACPI_FAILURE(result)) + goto acpi_walk_err; + if (hyperv_mmio.start && hyperv_mmio.end) + request_resource(&iomem_resource, &hyperv_mmio); } + ret_val = 0; + +acpi_walk_err: complete(&probe_event); - return 0; + return ret_val; } static const struct acpi_device_id vmbus_acpi_device_ids[] = { --- linux-3.13.0.orig/drivers/hwmon/adm1021.c +++ linux-3.13.0/drivers/hwmon/adm1021.c @@ -185,7 +185,7 @@ struct i2c_client *client = to_i2c_client(dev); struct adm1021_data *data = i2c_get_clientdata(client); long temp; - int err; + int reg_val, err; err = kstrtol(buf, 10, &temp); if (err) @@ -193,10 +193,11 @@ temp /= 1000; mutex_lock(&data->update_lock); - data->temp_max[index] = clamp_val(temp, -128, 127); + reg_val = clamp_val(temp, -128, 127); + data->temp_max[index] = reg_val * 1000; if (!read_only) i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), - data->temp_max[index]); + reg_val); mutex_unlock(&data->update_lock); return count; @@ -210,7 +211,7 @@ struct i2c_client *client = to_i2c_client(dev); struct adm1021_data *data = i2c_get_clientdata(client); long temp; - int err; + int reg_val, err; err = kstrtol(buf, 10, &temp); if (err) @@ -218,10 +219,11 @@ temp /= 1000; mutex_lock(&data->update_lock); - data->temp_min[index] = clamp_val(temp, -128, 127); + reg_val = clamp_val(temp, -128, 127); + data->temp_min[index] = reg_val * 1000; if (!read_only) i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), - data->temp_min[index]); + reg_val); mutex_unlock(&data->update_lock); return count; --- linux-3.13.0.orig/drivers/hwmon/adm1029.c +++ linux-3.13.0/drivers/hwmon/adm1029.c @@ -232,6 +232,9 @@ /* Update the value */ reg = (reg & 0x3F) | (val << 6); + /* Update the cache */ + data->fan_div[attr->index] = reg; + /* Write value */ i2c_smbus_write_byte_data(client, ADM1029_REG_FAN_DIV[attr->index], reg); --- linux-3.13.0.orig/drivers/hwmon/adm1031.c +++ linux-3.13.0/drivers/hwmon/adm1031.c @@ -365,6 +365,7 @@ if (ret) return ret; + val = clamp_val(val, 0, 127000); mutex_lock(&data->update_lock); data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), @@ -394,6 +395,7 @@ if (ret) return ret; + val = clamp_val(val, 0, 127000); mutex_lock(&data->update_lock); data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]); @@ -696,7 +698,7 @@ if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), @@ -717,7 +719,7 @@ if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), @@ -738,7 +740,7 @@ if (ret) return ret; - val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); + val = clamp_val(val, -55000, 127000); mutex_lock(&data->update_lock); data->temp_crit[nr] = TEMP_TO_REG(val); adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), --- linux-3.13.0.orig/drivers/hwmon/ads1015.c +++ linux-3.13.0/drivers/hwmon/ads1015.c @@ -198,7 +198,7 @@ } channel = be32_to_cpup(property); - if (channel > ADS1015_CHANNELS) { + if (channel >= ADS1015_CHANNELS) { dev_err(&client->dev, "invalid channel index %d on %s\n", channel, node->full_name); @@ -212,6 +212,7 @@ dev_err(&client->dev, "invalid gain on %s\n", node->full_name); + return -EINVAL; } } @@ -222,6 +223,7 @@ dev_err(&client->dev, "invalid data_rate on %s\n", node->full_name); + return -EINVAL; } } --- linux-3.13.0.orig/drivers/hwmon/adt7470.c +++ linux-3.13.0/drivers/hwmon/adt7470.c @@ -515,7 +515,7 @@ return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->temp_min[attr->index] = temp; @@ -549,7 +549,7 @@ return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->temp_max[attr->index] = temp; @@ -826,7 +826,7 @@ return -EINVAL; temp = DIV_ROUND_CLOSEST(temp, 1000); - temp = clamp_val(temp, 0, 255); + temp = clamp_val(temp, -128, 127); mutex_lock(&data->lock); data->pwm_tmin[attr->index] = temp; --- linux-3.13.0.orig/drivers/hwmon/amc6821.c +++ linux-3.13.0/drivers/hwmon/amc6821.c @@ -360,11 +360,13 @@ if (config) return config; + mutex_lock(&data->update_lock); config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); if (config < 0) { dev_err(&client->dev, "Error reading configuration register, aborting.\n"); - return config; + count = config; + goto unlock; } switch (val) { @@ -381,14 +383,15 @@ config |= AMC6821_CONF1_FDRC1; break; default: - return -EINVAL; + count = -EINVAL; + goto unlock; } - mutex_lock(&data->update_lock); if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) { dev_err(&client->dev, "Configuration register write error, aborting.\n"); count = -EIO; } +unlock: mutex_unlock(&data->update_lock); return count; } @@ -493,8 +496,9 @@ return -EINVAL; } - data->valid = 0; mutex_lock(&data->update_lock); + data->valid = 0; + switch (ix) { case 0: ptemp[0] = clamp_val(val / 1000, 0, @@ -658,13 +662,14 @@ if (config) return config; + mutex_lock(&data->update_lock); config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); if (config < 0) { dev_err(&client->dev, "Error reading configuration register, aborting.\n"); - return config; + count = config; + goto EXIT; } - mutex_lock(&data->update_lock); switch (val) { case 2: config &= ~AMC6821_CONF4_PSPR; @@ -704,7 +709,7 @@ get_temp_alarm, NULL, IDX_TEMP1_MAX); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, get_temp_alarm, NULL, IDX_TEMP1_CRIT); -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_temp, NULL, IDX_TEMP2_INPUT); static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, set_temp, IDX_TEMP2_MIN); --- linux-3.13.0.orig/drivers/hwmon/da9052-hwmon.c +++ linux-3.13.0/drivers/hwmon/da9052-hwmon.c @@ -194,7 +194,7 @@ struct device_attribute *devattr, char *buf) { - return sprintf(buf, "da9052-hwmon\n"); + return sprintf(buf, "da9052\n"); } static ssize_t show_label(struct device *dev, --- linux-3.13.0.orig/drivers/hwmon/da9055-hwmon.c +++ linux-3.13.0/drivers/hwmon/da9055-hwmon.c @@ -204,7 +204,7 @@ struct device_attribute *devattr, char *buf) { - return sprintf(buf, "da9055-hwmon\n"); + return sprintf(buf, "da9055\n"); } static ssize_t show_label(struct device *dev, --- linux-3.13.0.orig/drivers/hwmon/dme1737.c +++ linux-3.13.0/drivers/hwmon/dme1737.c @@ -247,8 +247,8 @@ u8 pwm_acz[3]; u8 pwm_freq[6]; u8 pwm_rr[2]; - u8 zone_low[3]; - u8 zone_abs[3]; + s8 zone_low[3]; + s8 zone_abs[3]; u8 zone_hyst[2]; u32 alarms; }; @@ -277,7 +277,7 @@ return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2)); } -static inline int IN_TO_REG(int val, int nominal) +static inline int IN_TO_REG(long val, int nominal) { return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255); } @@ -293,7 +293,7 @@ return (reg * 1000) >> (res - 8); } -static inline int TEMP_TO_REG(int val) +static inline int TEMP_TO_REG(long val) { return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127); } @@ -308,7 +308,7 @@ return TEMP_RANGE[(reg >> 4) & 0x0f]; } -static int TEMP_RANGE_TO_REG(int val, int reg) +static int TEMP_RANGE_TO_REG(long val, int reg) { int i; @@ -331,7 +331,7 @@ return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000; } -static inline int TEMP_HYST_TO_REG(int val, int ix, int reg) +static inline int TEMP_HYST_TO_REG(long val, int ix, int reg) { int hyst = clamp_val((val + 500) / 1000, 0, 15); @@ -347,7 +347,7 @@ return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg; } -static inline int FAN_TO_REG(int val, int tpc) +static inline int FAN_TO_REG(long val, int tpc) { if (tpc) { return clamp_val(val / tpc, 0, 0xffff); @@ -379,7 +379,7 @@ return (edge > 0) ? 1 << (edge - 1) : 0; } -static inline int FAN_TYPE_TO_REG(int val, int reg) +static inline int FAN_TYPE_TO_REG(long val, int reg) { int edge = (val == 4) ? 3 : val; @@ -402,7 +402,7 @@ return 1000 + i * 500; } -static int FAN_MAX_TO_REG(int val) +static int FAN_MAX_TO_REG(long val) { int i; @@ -460,7 +460,7 @@ return acz[(reg >> 5) & 0x07]; } -static inline int PWM_ACZ_TO_REG(int val, int reg) +static inline int PWM_ACZ_TO_REG(long val, int reg) { int acz = (val == 4) ? 2 : val - 1; @@ -476,7 +476,7 @@ return PWM_FREQ[reg & 0x0f]; } -static int PWM_FREQ_TO_REG(int val, int reg) +static int PWM_FREQ_TO_REG(long val, int reg) { int i; @@ -510,7 +510,7 @@ return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0; } -static int PWM_RR_TO_REG(int val, int ix, int reg) +static int PWM_RR_TO_REG(long val, int ix, int reg) { int i; @@ -528,7 +528,7 @@ return PWM_RR_FROM_REG(reg, ix) ? 1 : 0; } -static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg) +static inline int PWM_RR_EN_TO_REG(long val, int ix, int reg) { int en = (ix == 1) ? 0x80 : 0x08; @@ -1481,13 +1481,16 @@ const char *buf, size_t count) { struct dme1737_data *data = dev_get_drvdata(dev); - long val; + unsigned long val; int err; - err = kstrtol(buf, 10, &val); + err = kstrtoul(buf, 10, &val); if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } --- linux-3.13.0.orig/drivers/hwmon/ds1621.c +++ linux-3.13.0/drivers/hwmon/ds1621.c @@ -309,6 +309,7 @@ data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT); i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf); data->update_interval = ds1721_convrates[resol]; + data->zbits = 7 - resol; mutex_unlock(&data->update_lock); return count; --- linux-3.13.0.orig/drivers/hwmon/emc1403.c +++ linux-3.13.0/drivers/hwmon/emc1403.c @@ -163,7 +163,7 @@ if (retval < 0) goto fail; - hyst = val - retval * 1000; + hyst = retval * 1000 - val; hyst = DIV_ROUND_CLOSEST(hyst, 1000); if (hyst < 0 || hyst > 255) { retval = -ERANGE; @@ -330,7 +330,7 @@ } id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); - if (id != 0x01) + if (id < 0x01 || id > 0x04) return -ENODEV; return 0; @@ -355,9 +355,9 @@ if (id->driver_data) data->groups[1] = &emc1404_group; - hwmon_dev = hwmon_device_register_with_groups(&client->dev, - client->name, data, - data->groups); + hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, + client->name, data, + data->groups); if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev); --- linux-3.13.0.orig/drivers/hwmon/emc2103.c +++ linux-3.13.0/drivers/hwmon/emc2103.c @@ -250,9 +250,7 @@ if (result < 0) return result; - val = DIV_ROUND_CLOSEST(val, 1000); - if ((val < -63) || (val > 127)) - return -EINVAL; + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); mutex_lock(&data->update_lock); data->temp_min[nr] = val; @@ -274,9 +272,7 @@ if (result < 0) return result; - val = DIV_ROUND_CLOSEST(val, 1000); - if ((val < -63) || (val > 127)) - return -EINVAL; + val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127); mutex_lock(&data->update_lock); data->temp_max[nr] = val; @@ -390,15 +386,14 @@ { struct emc2103_data *data = emc2103_update_device(dev); struct i2c_client *client = to_i2c_client(dev); - long rpm_target; + unsigned long rpm_target; - int result = kstrtol(buf, 10, &rpm_target); + int result = kstrtoul(buf, 10, &rpm_target); if (result < 0) return result; /* Datasheet states 16384 as maximum RPM target (table 3.2) */ - if ((rpm_target < 0) || (rpm_target > 16384)) - return -EINVAL; + rpm_target = clamp_val(rpm_target, 0, 16384); mutex_lock(&data->update_lock); --- linux-3.13.0.orig/drivers/hwmon/gpio-fan.c +++ linux-3.13.0/drivers/hwmon/gpio-fan.c @@ -173,7 +173,7 @@ return -ENODEV; } -static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm) +static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm) { struct gpio_fan_speed *speed = fan_data->speed; int i; --- linux-3.13.0.orig/drivers/hwmon/ina2xx.c +++ linux-3.13.0/drivers/hwmon/ina2xx.c @@ -148,7 +148,8 @@ switch (reg) { case INA2XX_SHUNT_VOLTAGE: - val = DIV_ROUND_CLOSEST(data->regs[reg], + /* signed register */ + val = DIV_ROUND_CLOSEST((s16)data->regs[reg], data->config->shunt_div); break; case INA2XX_BUS_VOLTAGE: @@ -160,8 +161,8 @@ val = data->regs[reg] * data->config->power_lsb; break; case INA2XX_CURRENT: - /* LSB=1mA (selected). Is in mA */ - val = data->regs[reg]; + /* signed register, LSB=1mA (selected), in mA */ + val = (s16)data->regs[reg]; break; default: /* programmer goofed */ --- linux-3.13.0.orig/drivers/hwmon/k10temp.c +++ linux-3.13.0/drivers/hwmon/k10temp.c @@ -210,6 +210,7 @@ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, {} }; --- linux-3.13.0.orig/drivers/hwmon/lm78.c +++ linux-3.13.0/drivers/hwmon/lm78.c @@ -108,7 +108,7 @@ * TEMP: mC (-128C to +127C) * REG: 1C/bit, two's complement */ -static inline s8 TEMP_TO_REG(int val) +static inline s8 TEMP_TO_REG(long val) { int nval = clamp_val(val, -128000, 127000) ; return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000; --- linux-3.13.0.orig/drivers/hwmon/lm85.c +++ linux-3.13.0/drivers/hwmon/lm85.c @@ -158,7 +158,7 @@ /* Temperature is reported in .001 degC increments */ #define TEMP_TO_REG(val) \ - clamp_val(SCALE(val, 1000, 1), -127, 127) + DIV_ROUND_CLOSEST(clamp_val((val), -127000, 127000), 1000) #define TEMPEXT_FROM_REG(val, ext) \ SCALE(((val) << 4) + (ext), 16, 1000) #define TEMP_FROM_REG(val) ((val) * 1000) @@ -192,7 +192,7 @@ 13300, 16000, 20000, 26600, 32000, 40000, 53300, 80000 }; -static int RANGE_TO_REG(int range) +static int RANGE_TO_REG(long range) { int i; @@ -214,7 +214,7 @@ 11, 15, 22, 29, 35, 44, 59, 88 }; -static int FREQ_TO_REG(const int *map, int freq) +static int FREQ_TO_REG(const int *map, unsigned long freq) { int i; @@ -463,6 +463,9 @@ if (err) return err; + if (val > 255) + return -EINVAL; + data->vrm = val; return count; } --- linux-3.13.0.orig/drivers/hwmon/max1668.c +++ linux-3.13.0/drivers/hwmon/max1668.c @@ -243,7 +243,7 @@ data->temp_min[index] = clamp_val(temp/1000, -128, 127); if (i2c_smbus_write_byte_data(client, MAX1668_REG_LIML_WR(index), - data->temp_max[index])) + data->temp_min[index])) count = -EIO; mutex_unlock(&data->update_lock); --- linux-3.13.0.orig/drivers/hwmon/ntc_thermistor.c +++ linux-3.13.0/drivers/hwmon/ntc_thermistor.c @@ -145,7 +145,7 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) { struct iio_channel *channel = pdata->chan; - unsigned int result; + s64 result; int val, ret; ret = iio_read_channel_raw(channel, &val); @@ -155,10 +155,10 @@ } /* unit: mV */ - result = pdata->pullup_uv * val; + result = pdata->pullup_uv * (s64) val; result >>= 12; - return result; + return (int)result; } static const struct of_device_id ntc_match[] = { --- linux-3.13.0.orig/drivers/hwmon/sis5595.c +++ linux-3.13.0/drivers/hwmon/sis5595.c @@ -159,7 +159,7 @@ { return val * 830 + 52120; } -static inline s8 TEMP_TO_REG(int val) +static inline s8 TEMP_TO_REG(long val) { int nval = clamp_val(val, -54120, 157530) ; return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830; --- linux-3.13.0.orig/drivers/hwmon/smsc47m192.c +++ linux-3.13.0/drivers/hwmon/smsc47m192.c @@ -86,7 +86,7 @@ */ static inline s8 TEMP_TO_REG(int val) { - return clamp_val(SCALE(val, 1, 1000), -128000, 127000); + return SCALE(clamp_val(val, -128000, 127000), 1, 1000); } static inline int TEMP_FROM_REG(s8 val) @@ -384,6 +384,8 @@ err = kstrtoul(buf, 10, &val); if (err) return err; + if (val > 255) + return -EINVAL; data->vrm = val; return count; --- linux-3.13.0.orig/drivers/i2c/busses/Kconfig +++ linux-3.13.0/drivers/i2c/busses/Kconfig @@ -152,6 +152,7 @@ ATI SB700/SP5100 ATI SB800 AMD Hudson-2 + AMD ML AMD CZ Serverworks OSB4 Serverworks CSB5 @@ -386,7 +387,7 @@ config I2C_CPM tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" - depends on (CPM1 || CPM2) && OF_I2C + depends on CPM1 || CPM2 help This supports the use of the I2C interface on Freescale processors with CPM1 or CPM2. --- linux-3.13.0.orig/drivers/i2c/busses/i2c-at91.c +++ linux-3.13.0/drivers/i2c/busses/i2c-at91.c @@ -101,6 +101,7 @@ unsigned twi_cwgr_reg; struct at91_twi_pdata *pdata; bool use_dma; + bool recv_len_abort; struct at91_twi_dma dma; }; @@ -210,7 +211,7 @@ struct at91_twi_dev *dev = (struct at91_twi_dev *)data; dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), - dev->buf_len, DMA_MEM_TO_DEV); + dev->buf_len, DMA_TO_DEVICE); at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); } @@ -267,12 +268,24 @@ *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff; --dev->buf_len; + /* return if aborting, we only needed to read RHR to clear RXRDY*/ + if (dev->recv_len_abort) + return; + /* handle I2C_SMBUS_BLOCK_DATA */ if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) { - dev->msg->flags &= ~I2C_M_RECV_LEN; - dev->buf_len += *dev->buf; - dev->msg->len = dev->buf_len + 1; - dev_dbg(dev->dev, "received block length %d\n", dev->buf_len); + /* ensure length byte is a valid value */ + if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) { + dev->msg->flags &= ~I2C_M_RECV_LEN; + dev->buf_len += *dev->buf; + dev->msg->len = dev->buf_len + 1; + dev_dbg(dev->dev, "received block length %d\n", + dev->buf_len); + } else { + /* abort and send the stop by reading one more byte */ + dev->recv_len_abort = true; + dev->buf_len = 1; + } } /* send stop if second but last byte has been read */ @@ -289,7 +302,7 @@ struct at91_twi_dev *dev = (struct at91_twi_dev *)data; dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), - dev->buf_len, DMA_DEV_TO_MEM); + dev->buf_len, DMA_FROM_DEVICE); /* The last two bytes have to be read without using dma */ dev->buf += dev->buf_len - 2; @@ -421,8 +434,8 @@ } } - ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, - dev->adapter.timeout); + ret = wait_for_completion_timeout(&dev->cmd_complete, + dev->adapter.timeout); if (ret == 0) { dev_err(dev->dev, "controller timed out\n"); at91_init_twi_bus(dev); @@ -444,6 +457,12 @@ ret = -EIO; goto error; } + if (dev->recv_len_abort) { + dev_err(dev->dev, "invalid smbus block length recvd\n"); + ret = -EPROTO; + goto error; + } + dev_dbg(dev->dev, "transfer complete\n"); return 0; @@ -500,6 +519,7 @@ dev->buf_len = m_start->len; dev->buf = m_start->buf; dev->msg = m_start; + dev->recv_len_abort = false; ret = at91_do_twi_transfer(dev); --- linux-3.13.0.orig/drivers/i2c/busses/i2c-cpm.c +++ linux-3.13.0/drivers/i2c/busses/i2c-cpm.c @@ -40,8 +40,12 @@ #include #include #include +#include #include +#include #include +#include +#include #include #include --- linux-3.13.0.orig/drivers/i2c/busses/i2c-davinci.c +++ linux-3.13.0/drivers/i2c/busses/i2c-davinci.c @@ -411,11 +411,9 @@ if (dev->cmd_err & DAVINCI_I2C_STR_NACK) { if (msg->flags & I2C_M_IGNORE_NAK) return msg->len; - if (stop) { - w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); - w |= DAVINCI_I2C_MDR_STP; - davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); - } + w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); + w |= DAVINCI_I2C_MDR_STP; + davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w); return -EREMOTEIO; } return -EIO; --- linux-3.13.0.orig/drivers/i2c/busses/i2c-designware-core.c +++ linux-3.13.0/drivers/i2c/busses/i2c-designware-core.c @@ -418,6 +418,9 @@ */ dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); + /* enforce disabled interrupts (due to HW issues) */ + i2c_dw_disable_int(dev); + /* Enable the adapter */ __i2c_dw_enable(dev, true); --- linux-3.13.0.orig/drivers/i2c/busses/i2c-mv64xxx.c +++ linux-3.13.0/drivers/i2c/busses/i2c-mv64xxx.c @@ -97,7 +97,6 @@ enum { MV64XXX_I2C_ACTION_INVALID, MV64XXX_I2C_ACTION_CONTINUE, - MV64XXX_I2C_ACTION_OFFLOAD_SEND_START, MV64XXX_I2C_ACTION_SEND_START, MV64XXX_I2C_ACTION_SEND_RESTART, MV64XXX_I2C_ACTION_OFFLOAD_RESTART, @@ -204,6 +203,9 @@ unsigned long ctrl_reg; struct i2c_msg *msg = drv_data->msgs; + if (!drv_data->offload_enabled) + return -EOPNOTSUPP; + drv_data->msg = msg; drv_data->byte_posn = 0; drv_data->bytes_left = msg->len; @@ -433,8 +435,7 @@ drv_data->msgs++; drv_data->num_msgs--; - if (!(drv_data->offload_enabled && - mv64xxx_i2c_offload_msg(drv_data))) { + if (mv64xxx_i2c_offload_msg(drv_data) < 0) { drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START; writel(drv_data->cntl_bits, drv_data->reg_base + drv_data->reg_offsets.control); @@ -458,15 +459,14 @@ drv_data->reg_base + drv_data->reg_offsets.control); break; - case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START: - if (!mv64xxx_i2c_offload_msg(drv_data)) - break; - else - drv_data->action = MV64XXX_I2C_ACTION_SEND_START; - /* FALLTHRU */ case MV64XXX_I2C_ACTION_SEND_START: - writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, - drv_data->reg_base + drv_data->reg_offsets.control); + /* Can we offload this msg ? */ + if (mv64xxx_i2c_offload_msg(drv_data) < 0) { + /* No, switch to standard path */ + mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs); + writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, + drv_data->reg_base + drv_data->reg_offsets.control); + } break; case MV64XXX_I2C_ACTION_SEND_ADDR_1: @@ -625,15 +625,10 @@ unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->offload_enabled) { - drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START; - drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; - } else { - mv64xxx_i2c_prepare_for_io(drv_data, msg); - drv_data->action = MV64XXX_I2C_ACTION_SEND_START; - drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; - } + drv_data->action = MV64XXX_I2C_ACTION_SEND_START; + drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; + drv_data->send_stop = is_last; drv_data->block = 1; mv64xxx_i2c_do_action(drv_data); @@ -692,6 +687,7 @@ { .compatible = "allwinner,sun4i-i2c", .data = &mv64xxx_i2c_regs_sun4i}, { .compatible = "marvell,mv64xxx-i2c", .data = &mv64xxx_i2c_regs_mv64xxx}, { .compatible = "marvell,mv78230-i2c", .data = &mv64xxx_i2c_regs_mv64xxx}, + { .compatible = "marvell,mv78230-a0-i2c", .data = &mv64xxx_i2c_regs_mv64xxx}, {} }; MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); @@ -752,8 +748,7 @@ } tclk = clk_get_rate(drv_data->clk); - rc = of_property_read_u32(np, "clock-frequency", &bus_freq); - if (rc) + if (of_property_read_u32(np, "clock-frequency", &bus_freq)) bus_freq = 100000; /* 100kHz by default */ if (!mv64xxx_find_baud_factors(bus_freq, tclk, @@ -783,6 +778,10 @@ drv_data->errata_delay = true; } + if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) { + drv_data->offload_enabled = false; + drv_data->errata_delay = true; + } out: return rc; #endif --- linux-3.13.0.orig/drivers/i2c/busses/i2c-omap.c +++ linux-3.13.0/drivers/i2c/busses/i2c-omap.c @@ -926,14 +926,12 @@ if (stat & OMAP_I2C_STAT_NACK) { err |= OMAP_I2C_STAT_NACK; omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); - break; } if (stat & OMAP_I2C_STAT_AL) { dev_err(dev->dev, "Arbitration lost\n"); err |= OMAP_I2C_STAT_AL; omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); - break; } /* --- linux-3.13.0.orig/drivers/i2c/busses/i2c-piix4.c +++ linux-3.13.0/drivers/i2c/busses/i2c-piix4.c @@ -22,7 +22,7 @@ Intel PIIX4, 440MX Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100 ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800 - AMD Hudson-2, CZ + AMD Hudson-2, ML, CZ SMSC Victory66 Note: we assume there can only be one device, with one or more @@ -235,7 +235,8 @@ { unsigned short piix4_smba; unsigned short smba_idx = 0xcd6; - u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en; + u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status; + u8 i2ccfg, i2ccfg_offset = 0x10; /* SB800 and later SMBus does not support forcing address */ if (force || force_addr) { @@ -245,7 +246,15 @@ } /* Determine the address of the SMBus areas */ - smb_en = (aux) ? 0x28 : 0x2c; + if ((PIIX4_dev->vendor == PCI_VENDOR_ID_AMD && + PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && + PIIX4_dev->revision >= 0x41) || + (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD && + PIIX4_dev->device == 0x790b && + PIIX4_dev->revision >= 0x49)) + smb_en = 0x00; + else + smb_en = (aux) ? 0x28 : 0x2c; if (!request_region(smba_idx, 2, "smba_idx")) { dev_err(&PIIX4_dev->dev, "SMBus base address index region " @@ -258,13 +267,22 @@ smba_en_hi = inb_p(smba_idx + 1); release_region(smba_idx, 2); - if ((smba_en_lo & 1) == 0) { + if (!smb_en) { + smb_en_status = smba_en_lo & 0x10; + piix4_smba = smba_en_hi << 8; + if (aux) + piix4_smba |= 0x20; + } else { + smb_en_status = smba_en_lo & 0x01; + piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; + } + + if (!smb_en_status) { dev_err(&PIIX4_dev->dev, "Host SMBus controller not enabled!\n"); return -ENODEV; } - piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) return -ENODEV; @@ -277,7 +295,8 @@ /* Aux SMBus does not support IRQ information */ if (aux) { dev_info(&PIIX4_dev->dev, - "SMBus Host Controller at 0x%x\n", piix4_smba); + "Auxiliary SMBus Host Controller at 0x%x\n", + piix4_smba); return piix4_smba; } --- linux-3.13.0.orig/drivers/i2c/busses/i2c-rcar.c +++ linux-3.13.0/drivers/i2c/busses/i2c-rcar.c @@ -488,6 +488,9 @@ msr = rcar_i2c_status_get(priv); + /* Only handle interrupts that are currently enabled */ + msr &= rcar_i2c_read(priv, ICMIER); + /* * Arbitration lost */ @@ -503,15 +506,6 @@ } /* - * Stop - */ - if (msr & MST) { - dev_dbg(dev, "Stop\n"); - rcar_i2c_flags_set(priv, ID_DONE); - goto out; - } - - /* * Nack */ if (msr & MNR) { @@ -524,6 +518,12 @@ goto out; } + /* Stop */ + if (msr & MST) { + rcar_i2c_flags_set(priv, ID_DONE); + goto out; + } + /* * recv/send */ @@ -567,6 +567,12 @@ ret = -EINVAL; for (i = 0; i < num; i++) { + /* This HW can't send STOP after address phase */ + if (msgs[i].len == 0) { + ret = -EOPNOTSUPP; + break; + } + /*-------------- spin lock -----------------*/ spin_lock_irqsave(&priv->lock, flags); @@ -631,7 +637,8 @@ static u32 rcar_i2c_func(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + /* This HW can't do SMBUS_QUICK and NOSTART */ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static const struct i2c_algorithm rcar_i2c_algo = { --- linux-3.13.0.orig/drivers/i2c/busses/i2c-s3c2410.c +++ linux-3.13.0/drivers/i2c/busses/i2c-s3c2410.c @@ -712,14 +712,16 @@ int ret; pm_runtime_get_sync(&adap->dev); - clk_prepare_enable(i2c->clk); + ret = clk_enable(i2c->clk); + if (ret) + return ret; for (retry = 0; retry < adap->retries; retry++) { ret = s3c24xx_i2c_doxfer(i2c, msgs, num); if (ret != -EAGAIN) { - clk_disable_unprepare(i2c->clk); + clk_disable(i2c->clk); pm_runtime_put(&adap->dev); return ret; } @@ -729,7 +731,7 @@ udelay(100); } - clk_disable_unprepare(i2c->clk); + clk_disable(i2c->clk); pm_runtime_put(&adap->dev); return -EREMOTEIO; } @@ -1109,7 +1111,7 @@ clk_prepare_enable(i2c->clk); ret = s3c24xx_i2c_init(i2c); - clk_disable_unprepare(i2c->clk); + clk_disable(i2c->clk); if (ret != 0) { dev_err(&pdev->dev, "I2C controller init failed\n"); return ret; @@ -1121,6 +1123,7 @@ i2c->irq = ret = platform_get_irq(pdev, 0); if (ret <= 0) { dev_err(&pdev->dev, "cannot find IRQ\n"); + clk_unprepare(i2c->clk); return ret; } @@ -1129,12 +1132,14 @@ if (ret != 0) { dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq); + clk_unprepare(i2c->clk); return ret; } ret = s3c24xx_i2c_register_cpufreq(i2c); if (ret < 0) { dev_err(&pdev->dev, "failed to register cpufreq notifier\n"); + clk_unprepare(i2c->clk); return ret; } @@ -1151,6 +1156,7 @@ if (ret < 0) { dev_err(&pdev->dev, "failed to add bus to i2c core\n"); s3c24xx_i2c_deregister_cpufreq(i2c); + clk_unprepare(i2c->clk); return ret; } @@ -1172,6 +1178,8 @@ { struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); + clk_unprepare(i2c->clk); + pm_runtime_disable(&i2c->adap.dev); pm_runtime_disable(&pdev->dev); @@ -1200,11 +1208,14 @@ { struct platform_device *pdev = to_platform_device(dev); struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); + int ret; - i2c->suspended = 0; - clk_prepare_enable(i2c->clk); + ret = clk_enable(i2c->clk); + if (ret) + return ret; s3c24xx_i2c_init(i2c); - clk_disable_unprepare(i2c->clk); + clk_disable(i2c->clk); + i2c->suspended = 0; return 0; } --- linux-3.13.0.orig/drivers/iio/accel/bma180.c +++ linux-3.13.0/drivers/iio/accel/bma180.c @@ -68,13 +68,13 @@ /* Defaults values */ #define BMA180_DEF_PMODE 0 #define BMA180_DEF_BW 20 -#define BMA180_DEF_SCALE 250 +#define BMA180_DEF_SCALE 2452 /* Available values for sysfs */ #define BMA180_FLP_FREQ_AVAILABLE \ "10 20 40 75 150 300" #define BMA180_SCALE_AVAILABLE \ - "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980" + "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417" struct bma180_data { struct i2c_client *client; @@ -94,7 +94,7 @@ }; static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */ -static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 }; +static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 }; static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis) { @@ -376,6 +376,8 @@ mutex_unlock(&data->mutex); return ret; case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: + if (val2) + return -EINVAL; mutex_lock(&data->mutex); ret = bma180_set_bw(data, val); mutex_unlock(&data->mutex); @@ -476,7 +478,7 @@ mutex_lock(&data->mutex); - for_each_set_bit(bit, indio_dev->buffer->scan_mask, + for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { ret = bma180_get_acc_reg(data, bit); if (ret < 0) { @@ -564,7 +566,7 @@ trig->ops = &bma180_trigger_ops; iio_trigger_set_drvdata(trig, indio_dev); data->trig = trig; - indio_dev->trig = trig; + indio_dev->trig = iio_trigger_get(trig); ret = iio_trigger_register(trig); if (ret) --- linux-3.13.0.orig/drivers/iio/adc/ad_sigma_delta.c +++ linux-3.13.0/drivers/iio/adc/ad_sigma_delta.c @@ -472,7 +472,7 @@ goto error_free_irq; /* select default trigger */ - indio_dev->trig = sigma_delta->trig; + indio_dev->trig = iio_trigger_get(sigma_delta->trig); return 0; --- linux-3.13.0.orig/drivers/iio/adc/at91_adc.c +++ linux-3.13.0/drivers/iio/adc/at91_adc.c @@ -78,6 +78,7 @@ bool done; int irq; u16 last_value; + int chnb; struct mutex lock; u8 num_channels; void __iomem *reg_base; @@ -151,7 +152,7 @@ disable_irq_nosync(irq); iio_trigger_poll(idev->trig, iio_get_time_ns()); } else { - st->last_value = at91_adc_readl(st, AT91_ADC_LCDR); + st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb)); st->done = true; wake_up_interruptible(&st->wq_data_avail); } @@ -230,7 +231,7 @@ AT91_ADC_IER_YRDY | AT91_ADC_IER_PRDY; - if (status & st->registers->drdy_mask) + if (status & GENMASK(st->num_channels - 1, 0)) handle_adc_eoc_trigger(irq, idev); if (status & AT91_ADC_IER_PEN) { @@ -322,12 +323,11 @@ return idev->num_channels; } -static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev, +static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev, struct at91_adc_trigger *triggers, const char *trigger_name) { struct at91_adc_state *st = iio_priv(idev); - u8 value = 0; int i; for (i = 0; i < st->trigger_number; i++) { @@ -340,32 +340,32 @@ return -ENOMEM; if (strcmp(trigger_name, name) == 0) { - value = triggers[i].value; kfree(name); - break; + if (triggers[i].value == 0) + return -EINVAL; + return triggers[i].value; } kfree(name); } - return value; + return -EINVAL; } static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) { struct iio_dev *idev = iio_trigger_get_drvdata(trig); struct at91_adc_state *st = iio_priv(idev); - struct iio_buffer *buffer = idev->buffer; struct at91_adc_reg_desc *reg = st->registers; u32 status = at91_adc_readl(st, reg->trigger_register); - u8 value; + int value; u8 bit; value = at91_adc_get_trigger_value_by_name(idev, st->trigger_list, idev->trig->name); - if (value == 0) - return -EINVAL; + if (value < 0) + return value; if (state) { st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL); @@ -375,7 +375,7 @@ at91_adc_writel(st, reg->trigger_register, status | value); - for_each_set_bit(bit, buffer->scan_mask, + for_each_set_bit(bit, idev->active_scan_mask, st->num_channels) { struct iio_chan_spec const *chan = idev->channels + bit; at91_adc_writel(st, AT91_ADC_CHER, @@ -390,7 +390,7 @@ at91_adc_writel(st, reg->trigger_register, status & ~value); - for_each_set_bit(bit, buffer->scan_mask, + for_each_set_bit(bit, idev->active_scan_mask, st->num_channels) { struct iio_chan_spec const *chan = idev->channels + bit; at91_adc_writel(st, AT91_ADC_CHDR, @@ -501,9 +501,10 @@ case IIO_CHAN_INFO_RAW: mutex_lock(&st->lock); + st->chnb = chan->channel; at91_adc_writel(st, AT91_ADC_CHER, AT91_ADC_CH(chan->channel)); - at91_adc_writel(st, AT91_ADC_IER, st->registers->drdy_mask); + at91_adc_writel(st, AT91_ADC_IER, BIT(chan->channel)); at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_START); ret = wait_event_interruptible_timeout(st->wq_data_avail, @@ -520,7 +521,7 @@ at91_adc_writel(st, AT91_ADC_CHDR, AT91_ADC_CH(chan->channel)); - at91_adc_writel(st, AT91_ADC_IDR, st->registers->drdy_mask); + at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel)); st->last_value = 0; st->done = false; @@ -765,14 +766,17 @@ if (!pdata) return -EINVAL; + st->caps = (struct at91_adc_caps *) + platform_get_device_id(pdev)->driver_data; + st->use_external = pdata->use_external_triggers; st->vref_mv = pdata->vref; st->channels_mask = pdata->channels_used; - st->num_channels = pdata->num_channels; + st->num_channels = st->caps->num_channels; st->startup_time = pdata->startup_time; st->trigger_number = pdata->trigger_number; st->trigger_list = pdata->trigger_list; - st->registers = pdata->registers; + st->registers = &st->caps->registers; return 0; } @@ -1101,7 +1105,6 @@ return 0; } -#ifdef CONFIG_OF static struct at91_adc_caps at91sam9260_caps = { .calc_startup_ticks = calc_startup_ticks_9260, .num_channels = 4, @@ -1154,11 +1157,27 @@ {}, }; MODULE_DEVICE_TABLE(of, at91_adc_dt_ids); -#endif + +static const struct platform_device_id at91_adc_ids[] = { + { + .name = "at91sam9260-adc", + .driver_data = (unsigned long)&at91sam9260_caps, + }, { + .name = "at91sam9g45-adc", + .driver_data = (unsigned long)&at91sam9g45_caps, + }, { + .name = "at91sam9x5-adc", + .driver_data = (unsigned long)&at91sam9x5_caps, + }, { + /* terminator */ + } +}; +MODULE_DEVICE_TABLE(platform, at91_adc_ids); static struct platform_driver at91_adc_driver = { .probe = at91_adc_probe, .remove = at91_adc_remove, + .id_table = at91_adc_ids, .driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(at91_adc_dt_ids), --- linux-3.13.0.orig/drivers/iio/adc/max1363.c +++ linux-3.13.0/drivers/iio/adc/max1363.c @@ -1258,8 +1258,8 @@ .num_modes = ARRAY_SIZE(max1238_mode_list), .default_mode = s0to11, .info = &max1238_info, - .channels = max1238_channels, - .num_channels = ARRAY_SIZE(max1238_channels), + .channels = max1038_channels, + .num_channels = ARRAY_SIZE(max1038_channels), }, [max11605] = { .bits = 8, @@ -1268,8 +1268,8 @@ .num_modes = ARRAY_SIZE(max1238_mode_list), .default_mode = s0to11, .info = &max1238_info, - .channels = max1238_channels, - .num_channels = ARRAY_SIZE(max1238_channels), + .channels = max1038_channels, + .num_channels = ARRAY_SIZE(max1038_channels), }, [max11606] = { .bits = 10, @@ -1318,8 +1318,8 @@ .num_modes = ARRAY_SIZE(max1238_mode_list), .default_mode = s0to11, .info = &max1238_info, - .channels = max1238_channels, - .num_channels = ARRAY_SIZE(max1238_channels), + .channels = max1138_channels, + .num_channels = ARRAY_SIZE(max1138_channels), }, [max11611] = { .bits = 10, @@ -1328,8 +1328,8 @@ .num_modes = ARRAY_SIZE(max1238_mode_list), .default_mode = s0to11, .info = &max1238_info, - .channels = max1238_channels, - .num_channels = ARRAY_SIZE(max1238_channels), + .channels = max1138_channels, + .num_channels = ARRAY_SIZE(max1138_channels), }, [max11612] = { .bits = 12, @@ -1560,7 +1560,7 @@ st->client = client; st->vref_uv = st->chip_info->int_vref_mv * 1000; - vref = devm_regulator_get(&client->dev, "vref"); + vref = devm_regulator_get_optional(&client->dev, "vref"); if (!IS_ERR(vref)) { int vref_uv; --- linux-3.13.0.orig/drivers/iio/adc/mcp3422.c +++ linux-3.13.0/drivers/iio/adc/mcp3422.c @@ -57,20 +57,11 @@ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ } -/* LSB is in nV to eliminate floating point */ -static const u32 rates_to_lsb[] = {1000000, 250000, 62500, 15625}; - -/* - * scales calculated as: - * rates_to_lsb[sample_rate] / (1 << pga); - * pga is 1 for 0, 2 - */ - static const int mcp3422_scales[4][4] = { - { 1000000, 250000, 62500, 15625 }, - { 500000 , 125000, 31250, 7812 }, - { 250000 , 62500 , 15625, 3906 }, - { 125000 , 31250 , 7812 , 1953 } }; + { 1000000, 500000, 250000, 125000 }, + { 250000 , 125000, 62500 , 31250 }, + { 62500 , 31250 , 15625 , 7812 }, + { 15625 , 7812 , 3906 , 1953 } }; /* Constant msleep times for data acquisitions */ static const int mcp3422_read_times[4] = { --- linux-3.13.0.orig/drivers/iio/adc/ti_am335x_adc.c +++ linux-3.13.0/drivers/iio/adc/ti_am335x_adc.c @@ -172,12 +172,11 @@ static int tiadc_buffer_postenable(struct iio_dev *indio_dev) { struct tiadc_device *adc_dev = iio_priv(indio_dev); - struct iio_buffer *buffer = indio_dev->buffer; unsigned int enb = 0; u8 bit; tiadc_step_config(indio_dev); - for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels) + for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) enb |= (get_adc_step_bit(adc_dev, bit) << 1); adc_dev->buffer_en_ch_steps = enb; @@ -342,7 +341,7 @@ if (time_after(jiffies, timeout)) return -EAGAIN; } - map_val = chan->channel + TOTAL_CHANNELS; + map_val = adc_dev->channel_step[chan->scan_index]; /* * When the sub-system is first enabled, --- linux-3.13.0.orig/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ linux-3.13.0/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -99,7 +99,8 @@ dev_err(&indio_dev->dev, "Trigger Register Failed\n"); goto error_free_trig; } - indio_dev->trig = attrb->trigger = trig; + attrb->trigger = trig; + indio_dev->trig = iio_trigger_get(trig); return ret; --- linux-3.13.0.orig/drivers/iio/common/st_sensors/st_sensors_buffer.c +++ linux-3.13.0/drivers/iio/common/st_sensors/st_sensors_buffer.c @@ -71,7 +71,7 @@ goto st_sensors_free_memory; } - for (i = 0; i < n * num_data_channels; i++) { + for (i = 0; i < n * byte_for_channel; i++) { if (i < n) buf[i] = rx_array[i]; else --- linux-3.13.0.orig/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ linux-3.13.0/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -49,7 +49,7 @@ dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); goto iio_trigger_register_error; } - indio_dev->trig = sdata->trig; + indio_dev->trig = iio_trigger_get(sdata->trig); return 0; --- linux-3.13.0.orig/drivers/iio/dac/ad5686.c +++ linux-3.13.0/drivers/iio/dac/ad5686.c @@ -317,7 +317,7 @@ st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); - st->reg = devm_regulator_get(&spi->dev, "vcc"); + st->reg = devm_regulator_get_optional(&spi->dev, "vcc"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) --- linux-3.13.0.orig/drivers/iio/gyro/Kconfig +++ linux-3.13.0/drivers/iio/gyro/Kconfig @@ -70,7 +70,7 @@ select IIO_TRIGGERED_BUFFER if (IIO_BUFFER) help Say yes here to build support for STMicroelectronics gyroscopes: - L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330. + L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330. This driver can also be built as a module. If so, these modules will be created: --- linux-3.13.0.orig/drivers/iio/gyro/itg3200_buffer.c +++ linux-3.13.0/drivers/iio/gyro/itg3200_buffer.c @@ -132,7 +132,7 @@ goto error_free_irq; /* select default trigger */ - indio_dev->trig = st->trig; + indio_dev->trig = iio_trigger_get(st->trig); return 0; --- linux-3.13.0.orig/drivers/iio/gyro/st_gyro.h +++ linux-3.13.0/drivers/iio/gyro/st_gyro.h @@ -19,7 +19,6 @@ #define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro" #define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro" #define L3GD20_GYRO_DEV_NAME "l3gd20" -#define L3GD20H_GYRO_DEV_NAME "l3gd20h" #define L3G4IS_GYRO_DEV_NAME "l3g4is_ui" #define LSM330_GYRO_DEV_NAME "lsm330_gyro" --- linux-3.13.0.orig/drivers/iio/gyro/st_gyro_core.c +++ linux-3.13.0/drivers/iio/gyro/st_gyro_core.c @@ -167,11 +167,10 @@ .wai = ST_GYRO_2_WAI_EXP, .sensors_supported = { [0] = L3GD20_GYRO_DEV_NAME, - [1] = L3GD20H_GYRO_DEV_NAME, - [2] = LSM330D_GYRO_DEV_NAME, - [3] = LSM330DLC_GYRO_DEV_NAME, - [4] = L3G4IS_GYRO_DEV_NAME, - [5] = LSM330_GYRO_DEV_NAME, + [1] = LSM330D_GYRO_DEV_NAME, + [2] = LSM330DLC_GYRO_DEV_NAME, + [3] = L3G4IS_GYRO_DEV_NAME, + [4] = LSM330_GYRO_DEV_NAME, }, .ch = (struct iio_chan_spec *)st_gyro_16bit_channels, .odr = { --- linux-3.13.0.orig/drivers/iio/gyro/st_gyro_i2c.c +++ linux-3.13.0/drivers/iio/gyro/st_gyro_i2c.c @@ -55,7 +55,6 @@ { LSM330DL_GYRO_DEV_NAME }, { LSM330DLC_GYRO_DEV_NAME }, { L3GD20_GYRO_DEV_NAME }, - { L3GD20H_GYRO_DEV_NAME }, { L3G4IS_GYRO_DEV_NAME }, { LSM330_GYRO_DEV_NAME }, {}, --- linux-3.13.0.orig/drivers/iio/gyro/st_gyro_spi.c +++ linux-3.13.0/drivers/iio/gyro/st_gyro_spi.c @@ -54,7 +54,6 @@ { LSM330DL_GYRO_DEV_NAME }, { LSM330DLC_GYRO_DEV_NAME }, { L3GD20_GYRO_DEV_NAME }, - { L3GD20H_GYRO_DEV_NAME }, { L3G4IS_GYRO_DEV_NAME }, { LSM330_GYRO_DEV_NAME }, {}, --- linux-3.13.0.orig/drivers/iio/imu/adis16400.h +++ linux-3.13.0/drivers/iio/imu/adis16400.h @@ -189,6 +189,7 @@ ADIS16300_SCAN_INCLI_X, ADIS16300_SCAN_INCLI_Y, ADIS16400_SCAN_ADC, + ADIS16400_SCAN_TIMESTAMP, }; #ifdef CONFIG_IIO_BUFFER --- linux-3.13.0.orig/drivers/iio/imu/adis16400_core.c +++ linux-3.13.0/drivers/iio/imu/adis16400_core.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -447,7 +448,7 @@ mutex_unlock(&indio_dev->mlock); if (ret) return ret; - val16 = ((val16 & 0xFFF) << 4) >> 4; + val16 = sign_extend32(val16, 11); *val = val16; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: @@ -632,7 +633,7 @@ ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14), ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12), ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12), - IIO_CHAN_SOFT_TIMESTAMP(12) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static const struct iio_chan_spec adis16448_channels[] = { @@ -659,7 +660,7 @@ }, }, ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12), - IIO_CHAN_SOFT_TIMESTAMP(11) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static const struct iio_chan_spec adis16350_channels[] = { @@ -677,7 +678,7 @@ ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12), ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12), ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12), - IIO_CHAN_SOFT_TIMESTAMP(11) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static const struct iio_chan_spec adis16300_channels[] = { @@ -690,7 +691,7 @@ ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12), ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13), ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13), - IIO_CHAN_SOFT_TIMESTAMP(14) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static const struct iio_chan_spec adis16334_channels[] = { @@ -701,7 +702,7 @@ ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14), ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14), ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12), - IIO_CHAN_SOFT_TIMESTAMP(8) + IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP), }; static struct attribute *adis16400_attributes[] = { --- linux-3.13.0.orig/drivers/iio/imu/adis_trigger.c +++ linux-3.13.0/drivers/iio/imu/adis_trigger.c @@ -60,7 +60,7 @@ iio_trigger_set_drvdata(adis->trig, adis); ret = iio_trigger_register(adis->trig); - indio_dev->trig = adis->trig; + indio_dev->trig = iio_trigger_get(adis->trig); if (ret) goto error_free_irq; --- linux-3.13.0.orig/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ linux-3.13.0/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -661,6 +661,7 @@ { struct inv_mpu6050_state *st; struct iio_dev *indio_dev; + struct inv_mpu6050_platform_data *pdata; int result; if (!i2c_check_functionality(client->adapter, @@ -673,8 +674,10 @@ st = iio_priv(indio_dev); st->client = client; - st->plat_data = *(struct inv_mpu6050_platform_data - *)dev_get_platdata(&client->dev); + pdata = (struct inv_mpu6050_platform_data + *)dev_get_platdata(&client->dev); + if (pdata) + st->plat_data = *pdata; /* power is turned on inside check chip type*/ result = inv_check_and_setup_chip(st, id); if (result) --- linux-3.13.0.orig/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ linux-3.13.0/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -25,6 +25,16 @@ #include #include "inv_mpu_iio.h" +static void inv_clear_kfifo(struct inv_mpu6050_state *st) +{ + unsigned long flags; + + /* take the spin lock sem to avoid interrupt kick in */ + spin_lock_irqsave(&st->time_stamp_lock, flags); + kfifo_reset(&st->timestamps); + spin_unlock_irqrestore(&st->time_stamp_lock, flags); +} + int inv_reset_fifo(struct iio_dev *indio_dev) { int result; @@ -51,6 +61,10 @@ INV_MPU6050_BIT_FIFO_RST); if (result) goto reset_fifo_fail; + + /* clear timestamps fifo */ + inv_clear_kfifo(st); + /* enable interrupt */ if (st->chip_config.accl_fifo_enable || st->chip_config.gyro_fifo_enable) { @@ -84,16 +98,6 @@ return result; } -static void inv_clear_kfifo(struct inv_mpu6050_state *st) -{ - unsigned long flags; - - /* take the spin lock sem to avoid interrupt kick in */ - spin_lock_irqsave(&st->time_stamp_lock, flags); - kfifo_reset(&st->timestamps); - spin_unlock_irqrestore(&st->time_stamp_lock, flags); -} - /** * inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt. */ @@ -185,7 +189,6 @@ flush_fifo: /* Flush HW and SW FIFOs. */ inv_reset_fifo(indio_dev); - inv_clear_kfifo(st); mutex_unlock(&indio_dev->mlock); iio_trigger_notify_done(indio_dev->trig); --- linux-3.13.0.orig/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ linux-3.13.0/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -135,7 +135,7 @@ ret = iio_trigger_register(st->trig); if (ret) goto error_free_irq; - indio_dev->trig = st->trig; + indio_dev->trig = iio_trigger_get(st->trig); return 0; --- linux-3.13.0.orig/drivers/iio/industrialio-buffer.c +++ linux-3.13.0/drivers/iio/industrialio-buffer.c @@ -136,7 +136,8 @@ int ret; struct iio_dev *indio_dev = dev_to_iio_dev(dev); - ret = test_bit(to_iio_dev_attr(attr)->address, + /* Ensure ret is 0 or 1. */ + ret = !!test_bit(to_iio_dev_attr(attr)->address, indio_dev->buffer->scan_mask); return sprintf(buf, "%d\n", ret); @@ -837,7 +838,8 @@ if (!buffer->scan_mask) return 0; - return test_bit(bit, buffer->scan_mask); + /* Ensure return value is 0 or 1. */ + return !!test_bit(bit, buffer->scan_mask); }; EXPORT_SYMBOL_GPL(iio_scan_mask_query); @@ -922,7 +924,7 @@ /* Now we have the two masks, work from least sig and build up sizes */ for_each_set_bit(out_ind, - indio_dev->active_scan_mask, + buffer->scan_mask, indio_dev->masklength) { in_ind = find_next_bit(indio_dev->active_scan_mask, indio_dev->masklength, --- linux-3.13.0.orig/drivers/iio/industrialio-core.c +++ linux-3.13.0/drivers/iio/industrialio-core.c @@ -812,8 +812,7 @@ * @attr_list: List of IIO device attributes * * This function frees the memory allocated for each of the IIO device - * attributes in the list. Note: if you want to reuse the list after calling - * this function you have to reinitialize it using INIT_LIST_HEAD(). + * attributes in the list. */ void iio_free_chan_devattr_list(struct list_head *attr_list) { @@ -821,6 +820,7 @@ list_for_each_entry_safe(p, n, attr_list, l) { kfree(p->dev_attr.attr.name); + list_del(&p->l); kfree(p); } } @@ -901,6 +901,7 @@ iio_free_chan_devattr_list(&indio_dev->channel_attr_list); kfree(indio_dev->chan_attr_group.attrs); + indio_dev->chan_attr_group.attrs = NULL; } static void iio_dev_release(struct device *device) --- linux-3.13.0.orig/drivers/iio/industrialio-event.c +++ linux-3.13.0/drivers/iio/industrialio-event.c @@ -362,6 +362,9 @@ &indio_dev->event_interface->dev_attr_list); kfree(postfix); + if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) + continue; + if (ret) return ret; @@ -593,6 +596,7 @@ error_free_setup_event_lines: iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list); kfree(indio_dev->event_interface); + indio_dev->event_interface = NULL; error_ret: return ret; --- linux-3.13.0.orig/drivers/iio/inkern.c +++ linux-3.13.0/drivers/iio/inkern.c @@ -178,12 +178,12 @@ index = of_property_match_string(np, "io-channel-names", name); chan = of_iio_channel_get(np, index); - if (!IS_ERR(chan)) + if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) break; else if (name && index >= 0) { pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", np->full_name, name ? name : "", index); - return chan; + return NULL; } /* @@ -193,8 +193,9 @@ */ np = np->parent; if (np && !of_get_property(np, "io-channel-ranges", NULL)) - break; + return NULL; } + return chan; } @@ -317,6 +318,7 @@ if (channel != NULL) return channel; } + return iio_channel_get_sys(name, channel_name); } EXPORT_SYMBOL_GPL(iio_channel_get); --- linux-3.13.0.orig/drivers/iio/light/cm36651.c +++ linux-3.13.0/drivers/iio/light/cm36651.c @@ -628,7 +628,19 @@ cm36651->client = client; cm36651->ps_client = i2c_new_dummy(client->adapter, CM36651_I2C_ADDR_PS); + if (!cm36651->ps_client) { + dev_err(&client->dev, "%s: new i2c device failed\n", __func__); + ret = -ENODEV; + goto error_disable_reg; + } + cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA); + if (!cm36651->ara_client) { + dev_err(&client->dev, "%s: new i2c device failed\n", __func__); + ret = -ENODEV; + goto error_i2c_unregister_ps; + } + mutex_init(&cm36651->lock); indio_dev->dev.parent = &client->dev; indio_dev->channels = cm36651_channels; @@ -640,7 +652,7 @@ ret = cm36651_setup_reg(cm36651); if (ret) { dev_err(&client->dev, "%s: register setup failed\n", __func__); - goto error_disable_reg; + goto error_i2c_unregister_ara; } ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler, @@ -648,7 +660,7 @@ "cm36651", indio_dev); if (ret) { dev_err(&client->dev, "%s: request irq failed\n", __func__); - goto error_disable_reg; + goto error_i2c_unregister_ara; } ret = iio_device_register(indio_dev); @@ -661,6 +673,10 @@ error_free_irq: free_irq(client->irq, indio_dev); +error_i2c_unregister_ara: + i2c_unregister_device(cm36651->ara_client); +error_i2c_unregister_ps: + i2c_unregister_device(cm36651->ps_client); error_disable_reg: regulator_disable(cm36651->vled_reg); return ret; @@ -674,6 +690,8 @@ iio_device_unregister(indio_dev); regulator_disable(cm36651->vled_reg); free_irq(client->irq, indio_dev); + i2c_unregister_device(cm36651->ps_client); + i2c_unregister_device(cm36651->ara_client); return 0; } --- linux-3.13.0.orig/drivers/iio/magnetometer/ak8975.c +++ linux-3.13.0/drivers/iio/magnetometer/ak8975.c @@ -85,6 +85,7 @@ #define AK8975_MAX_CONVERSION_TIMEOUT 500 #define AK8975_CONVERSION_DONE_POLL_TIME 10 #define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000) +#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256) /* * Per-instance context data for the device. @@ -265,15 +266,15 @@ * * Since 1uT = 0.01 gauss, our final scale factor becomes: * - * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100 - * Hadj = H * ((ASA + 128) * 30 / 256 + * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100 + * Hadj = H * ((ASA + 128) * 0.003) / 256 * * Since ASA doesn't change, we cache the resultant scale factor into the * device context in ak8975_setup(). */ - data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8; - data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8; - data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8; + data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]); + data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]); + data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]); return 0; } @@ -351,8 +352,6 @@ { struct ak8975_data *data = iio_priv(indio_dev); struct i2c_client *client = data->client; - u16 meas_reg; - s16 raw; int ret; mutex_lock(&data->lock); @@ -400,16 +399,11 @@ dev_err(&client->dev, "Read axis data fails\n"); goto exit; } - meas_reg = ret; mutex_unlock(&data->lock); - /* Endian conversion of the measured values. */ - raw = (s16) (le16_to_cpu(meas_reg)); - /* Clamp to valid range. */ - raw = clamp_t(s16, raw, -4096, 4095); - *val = raw; + *val = clamp_t(s16, ret, -4096, 4095); return IIO_VAL_INT; exit: @@ -428,8 +422,9 @@ case IIO_CHAN_INFO_RAW: return ak8975_read_axis(indio_dev, chan->address, val); case IIO_CHAN_INFO_SCALE: - *val = data->raw_to_gauss[chan->address]; - return IIO_VAL_INT; + *val = 0; + *val2 = data->raw_to_gauss[chan->address]; + return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } --- linux-3.13.0.orig/drivers/iio/magnetometer/st_magn_core.c +++ linux-3.13.0/drivers/iio/magnetometer/st_magn_core.c @@ -42,7 +42,8 @@ #define ST_MAGN_FS_AVL_5600MG 5600 #define ST_MAGN_FS_AVL_8000MG 8000 #define ST_MAGN_FS_AVL_8100MG 8100 -#define ST_MAGN_FS_AVL_10000MG 10000 +#define ST_MAGN_FS_AVL_12000MG 12000 +#define ST_MAGN_FS_AVL_16000MG 16000 /* CUSTOM VALUES FOR SENSOR 1 */ #define ST_MAGN_1_WAI_EXP 0x3c @@ -69,20 +70,20 @@ #define ST_MAGN_1_FS_AVL_4700_VAL 0x05 #define ST_MAGN_1_FS_AVL_5600_VAL 0x06 #define ST_MAGN_1_FS_AVL_8100_VAL 0x07 -#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 1100 -#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 855 -#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 670 -#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 450 -#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 400 -#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 330 -#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 230 -#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 980 -#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 760 -#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 600 -#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 400 -#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 355 -#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 295 -#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 205 +#define ST_MAGN_1_FS_AVL_1300_GAIN_XY 909 +#define ST_MAGN_1_FS_AVL_1900_GAIN_XY 1169 +#define ST_MAGN_1_FS_AVL_2500_GAIN_XY 1492 +#define ST_MAGN_1_FS_AVL_4000_GAIN_XY 2222 +#define ST_MAGN_1_FS_AVL_4700_GAIN_XY 2500 +#define ST_MAGN_1_FS_AVL_5600_GAIN_XY 3030 +#define ST_MAGN_1_FS_AVL_8100_GAIN_XY 4347 +#define ST_MAGN_1_FS_AVL_1300_GAIN_Z 1020 +#define ST_MAGN_1_FS_AVL_1900_GAIN_Z 1315 +#define ST_MAGN_1_FS_AVL_2500_GAIN_Z 1666 +#define ST_MAGN_1_FS_AVL_4000_GAIN_Z 2500 +#define ST_MAGN_1_FS_AVL_4700_GAIN_Z 2816 +#define ST_MAGN_1_FS_AVL_5600_GAIN_Z 3389 +#define ST_MAGN_1_FS_AVL_8100_GAIN_Z 4878 #define ST_MAGN_1_MULTIREAD_BIT false /* CUSTOM VALUES FOR SENSOR 2 */ @@ -105,10 +106,12 @@ #define ST_MAGN_2_FS_MASK 0x60 #define ST_MAGN_2_FS_AVL_4000_VAL 0x00 #define ST_MAGN_2_FS_AVL_8000_VAL 0x01 -#define ST_MAGN_2_FS_AVL_10000_VAL 0x02 -#define ST_MAGN_2_FS_AVL_4000_GAIN 430 -#define ST_MAGN_2_FS_AVL_8000_GAIN 230 -#define ST_MAGN_2_FS_AVL_10000_GAIN 230 +#define ST_MAGN_2_FS_AVL_12000_VAL 0x02 +#define ST_MAGN_2_FS_AVL_16000_VAL 0x03 +#define ST_MAGN_2_FS_AVL_4000_GAIN 146 +#define ST_MAGN_2_FS_AVL_8000_GAIN 292 +#define ST_MAGN_2_FS_AVL_12000_GAIN 438 +#define ST_MAGN_2_FS_AVL_16000_GAIN 584 #define ST_MAGN_2_MULTIREAD_BIT false #define ST_MAGN_2_OUT_X_L_ADDR 0x28 #define ST_MAGN_2_OUT_Y_L_ADDR 0x2a @@ -266,9 +269,14 @@ .gain = ST_MAGN_2_FS_AVL_8000_GAIN, }, [2] = { - .num = ST_MAGN_FS_AVL_10000MG, - .value = ST_MAGN_2_FS_AVL_10000_VAL, - .gain = ST_MAGN_2_FS_AVL_10000_GAIN, + .num = ST_MAGN_FS_AVL_12000MG, + .value = ST_MAGN_2_FS_AVL_12000_VAL, + .gain = ST_MAGN_2_FS_AVL_12000_GAIN, + }, + [3] = { + .num = ST_MAGN_FS_AVL_16000MG, + .value = ST_MAGN_2_FS_AVL_16000_VAL, + .gain = ST_MAGN_2_FS_AVL_16000_GAIN, }, }, }, --- linux-3.13.0.orig/drivers/infiniband/Kconfig +++ linux-3.13.0/drivers/infiniband/Kconfig @@ -3,6 +3,8 @@ depends on PCI || BROKEN depends on HAS_IOMEM depends on NET + depends on INET + depends on m || IPV6 != m ---help--- Core support for InfiniBand (IB). Make sure to also select any protocols you wish to use as well as drivers for your @@ -38,8 +40,7 @@ config INFINIBAND_ADDR_TRANS bool - depends on INET - depends on !(INFINIBAND = y && IPV6 = m) + depends on INFINIBAND default y source "drivers/infiniband/hw/mthca/Kconfig" @@ -53,6 +54,7 @@ source "drivers/infiniband/hw/mlx5/Kconfig" source "drivers/infiniband/hw/nes/Kconfig" source "drivers/infiniband/hw/ocrdma/Kconfig" +source "drivers/infiniband/hw/usnic/Kconfig" source "drivers/infiniband/ulp/ipoib/Kconfig" --- linux-3.13.0.orig/drivers/infiniband/Makefile +++ linux-3.13.0/drivers/infiniband/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_MLX5_INFINIBAND) += hw/mlx5/ obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ obj-$(CONFIG_INFINIBAND_OCRDMA) += hw/ocrdma/ +obj-$(CONFIG_INFINIBAND_USNIC) += hw/usnic/ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/ --- linux-3.13.0.orig/drivers/infiniband/core/Makefile +++ linux-3.13.0/drivers/infiniband/core/Makefile @@ -1,8 +1,9 @@ -infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o +infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ - ib_cm.o iw_cm.o $(infiniband-y) + ib_cm.o iw_cm.o ib_addr.o \ + $(infiniband-y) obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ $(user_access-y) --- linux-3.13.0.orig/drivers/infiniband/core/addr.c +++ linux-3.13.0/drivers/infiniband/core/addr.c @@ -86,6 +86,8 @@ } EXPORT_SYMBOL(rdma_addr_size); +static struct rdma_addr_client self; + void rdma_addr_register_client(struct rdma_addr_client *client) { atomic_set(&client->refcount, 1); @@ -119,7 +121,8 @@ } EXPORT_SYMBOL(rdma_copy_addr); -int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) +int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr, + u16 *vlan_id) { struct net_device *dev; int ret = -EADDRNOTAVAIL; @@ -142,6 +145,8 @@ return ret; ret = rdma_copy_addr(dev_addr, dev, NULL); + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(dev); dev_put(dev); break; @@ -153,6 +158,8 @@ &((struct sockaddr_in6 *) addr)->sin6_addr, dev, 1)) { ret = rdma_copy_addr(dev_addr, dev, NULL); + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(dev); break; } } @@ -238,7 +245,7 @@ src_in->sin_addr.s_addr = fl4.saddr; if (rt->dst.dev->flags & IFF_LOOPBACK) { - ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); + ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL); if (!ret) memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); goto put; @@ -286,7 +293,7 @@ } if (dst->dev->flags & IFF_LOOPBACK) { - ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); + ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL); if (!ret) memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); goto put; @@ -437,6 +444,88 @@ } EXPORT_SYMBOL(rdma_addr_cancel); +struct resolve_cb_context { + struct rdma_dev_addr *addr; + struct completion comp; +}; + +static void resolve_cb(int status, struct sockaddr *src_addr, + struct rdma_dev_addr *addr, void *context) +{ + memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct + rdma_dev_addr)); + complete(&((struct resolve_cb_context *)context)->comp); +} + +int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac, + u16 *vlan_id) +{ + int ret = 0; + struct rdma_dev_addr dev_addr; + struct resolve_cb_context ctx; + struct net_device *dev; + + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } sgid_addr, dgid_addr; + + + ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid); + if (ret) + return ret; + + ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid); + if (ret) + return ret; + + memset(&dev_addr, 0, sizeof(dev_addr)); + + ctx.addr = &dev_addr; + init_completion(&ctx.comp); + ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr, + &dev_addr, 1000, resolve_cb, &ctx); + if (ret) + return ret; + + wait_for_completion(&ctx.comp); + + memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); + dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); + if (!dev) + return -ENODEV; + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(dev); + dev_put(dev); + return ret; +} +EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh); + +int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id) +{ + int ret = 0; + struct rdma_dev_addr dev_addr; + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } gid_addr; + + ret = rdma_gid2ip(&gid_addr._sockaddr, sgid); + + if (ret) + return ret; + memset(&dev_addr, 0, sizeof(dev_addr)); + ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); + if (ret) + return ret; + + memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN); + return ret; +} +EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid); + static int netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { @@ -461,11 +550,13 @@ return -ENOMEM; register_netevent_notifier(&nb); + rdma_addr_register_client(&self); return 0; } static void __exit addr_cleanup(void) { + rdma_addr_unregister_client(&self); unregister_netevent_notifier(&nb); destroy_workqueue(addr_wq); } --- linux-3.13.0.orig/drivers/infiniband/core/cm.c +++ linux-3.13.0/drivers/infiniband/core/cm.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -177,6 +178,8 @@ struct ib_ah_attr ah_attr; u16 pkey_index; u8 timeout; + u8 valid; + u8 smac[ETH_ALEN]; }; struct cm_work { @@ -376,6 +379,9 @@ ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, &av->ah_attr); av->timeout = path->packet_life_time + 1; + memcpy(av->smac, path->smac, sizeof(av->smac)); + + av->valid = 1; return 0; } @@ -1554,6 +1560,9 @@ cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); + + memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN); + work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id; ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); if (ret) { ib_get_cached_gid(work->port->cm_dev->ib_device, @@ -3500,6 +3509,32 @@ *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN; qp_attr->ah_attr = cm_id_priv->av.ah_attr; + if (!cm_id_priv->av.valid) { + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + return -EINVAL; + } + if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) { + qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id; + *qp_attr_mask |= IB_QP_VID; + } + if (!is_zero_ether_addr(cm_id_priv->av.smac)) { + memcpy(qp_attr->smac, cm_id_priv->av.smac, + sizeof(qp_attr->smac)); + *qp_attr_mask |= IB_QP_SMAC; + } + if (cm_id_priv->alt_av.valid) { + if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) { + qp_attr->alt_vlan_id = + cm_id_priv->alt_av.ah_attr.vlan_id; + *qp_attr_mask |= IB_QP_ALT_VID; + } + if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) { + memcpy(qp_attr->alt_smac, + cm_id_priv->alt_av.smac, + sizeof(qp_attr->alt_smac)); + *qp_attr_mask |= IB_QP_ALT_SMAC; + } + } qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); --- linux-3.13.0.orig/drivers/infiniband/core/cma.c +++ linux-3.13.0/drivers/infiniband/core/cma.c @@ -340,7 +340,7 @@ int ret; if (addr->sa_family != AF_IB) { - ret = rdma_translate_ip(addr, dev_addr); + ret = rdma_translate_ip(addr, dev_addr, NULL); } else { cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); ret = 0; @@ -365,7 +365,9 @@ return -EINVAL; mutex_lock(&lock); - iboe_addr_get_sgid(dev_addr, &iboe_gid); + rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, + &iboe_gid); + memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof gid); if (listen_id_priv && @@ -603,6 +605,7 @@ { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; + union ib_gid sgid; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { @@ -625,6 +628,20 @@ if (ret) goto out; + ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, + qp_attr.ah_attr.grh.sgid_index, &sgid); + if (ret) + goto out; + + if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) + == RDMA_TRANSPORT_IB && + rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) + == IB_LINK_LAYER_ETHERNET) { + ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); + + if (ret) + goto out; + } if (conn_param) qp_attr.max_dest_rd_atomic = conn_param->responder_resources; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); @@ -725,6 +742,7 @@ else ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, qp_attr_mask); + if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; break; @@ -1309,13 +1327,13 @@ ret = conn_id->id.event_handler(&conn_id->id, &event); if (ret) goto err3; - /* * Acquire mutex to prevent user executing rdma_destroy_id() * while we're accessing the cm_id. */ mutex_lock(&lock); - if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) + if (cma_comp(conn_id, RDMA_CM_CONNECT) && + (conn_id->id.qp_type != IB_QPT_UD)) ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); @@ -1474,7 +1492,7 @@ mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); conn_id->state = RDMA_CM_CONNECT; - ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); + ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); if (ret) { mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(new_cm_id); @@ -1873,7 +1891,7 @@ struct cma_work *work; int ret; struct net_device *ndev = NULL; - u16 vid; + work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) @@ -1897,10 +1915,14 @@ goto err2; } - vid = rdma_vlan_dev_vlan_id(ndev); - - iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); - iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); + route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev); + memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); + memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len); + + rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, + &route->path_rec->sgid); + rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, + &route->path_rec->dgid); route->path_rec->hop_limit = 1; route->path_rec->reversible = 1; @@ -2063,6 +2085,7 @@ RDMA_CM_ADDR_RESOLVED)) goto out; + memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); if (!status && !id_priv->cma_dev) status = cma_acquire_dev(id_priv, NULL); @@ -2072,10 +2095,8 @@ goto out; event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = status; - } else { - memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); + } else event.event = RDMA_CM_EVENT_ADDR_RESOLVED; - } if (id_priv->id.event_handler(&id_priv->id, &event)) { cma_exch(id_priv, RDMA_CM_DESTROYING); @@ -2480,8 +2501,11 @@ return 0; sin6 = (struct sockaddr_in6 *) addr; - if ((ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) && - !sin6->sin6_scope_id) + + if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) + return 0; + + if (!sin6->sin6_scope_id) return -EINVAL; dev_addr->bound_dev_if = sin6->sin6_scope_id; @@ -2556,6 +2580,7 @@ if (ret) goto err1; + memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); if (!cma_any_addr(addr)) { ret = cma_translate_addr(addr, &id->route.addr.dev_addr); if (ret) @@ -2566,7 +2591,6 @@ goto err1; } - memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { if (addr->sa_family == AF_INET) id_priv->afonly = 1; @@ -3295,7 +3319,8 @@ err = -EINVAL; goto out2; } - iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); + rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, + &mc->multicast.ib->rec.port_gid); work->id = id_priv; work->mc = mc; INIT_WORK(&work->work, iboe_mcast_work_handler); --- linux-3.13.0.orig/drivers/infiniband/core/core_priv.h +++ linux-3.13.0/drivers/infiniband/core/core_priv.h @@ -49,4 +49,6 @@ int ib_cache_setup(void); void ib_cache_cleanup(void); +int ib_resolve_eth_l2_attrs(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, int *qp_attr_mask); #endif /* _CORE_PRIV_H */ --- linux-3.13.0.orig/drivers/infiniband/core/iwcm.c +++ linux-3.13.0/drivers/infiniband/core/iwcm.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -65,6 +66,20 @@ struct list_head free_list; }; +static unsigned int default_backlog = 256; + +static struct ctl_table_header *iwcm_ctl_table_hdr; +static struct ctl_table iwcm_ctl_table[] = { + { + .procname = "default_backlog", + .data = &default_backlog, + .maxlen = sizeof(default_backlog), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { } +}; + /* * The following services provide a mechanism for pre-allocating iwcm_work * elements. The design pre-allocates them based on the cm_id type: @@ -334,7 +349,6 @@ { struct iwcm_id_private *cm_id_priv; unsigned long flags; - int ret; cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); /* @@ -350,7 +364,7 @@ cm_id_priv->state = IW_CM_STATE_DESTROYING; spin_unlock_irqrestore(&cm_id_priv->lock, flags); /* destroy the listening endpoint */ - ret = cm_id->device->iwcm->destroy_listen(cm_id); + cm_id->device->iwcm->destroy_listen(cm_id); spin_lock_irqsave(&cm_id_priv->lock, flags); break; case IW_CM_STATE_ESTABLISHED: @@ -426,6 +440,9 @@ cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + if (!backlog) + backlog = default_backlog; + ret = alloc_work_entries(cm_id_priv, backlog); if (ret) return ret; @@ -1031,11 +1048,20 @@ if (!iwcm_wq) return -ENOMEM; + iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm", + iwcm_ctl_table); + if (!iwcm_ctl_table_hdr) { + pr_err("iw_cm: couldn't register sysctl paths\n"); + destroy_workqueue(iwcm_wq); + return -ENOMEM; + } + return 0; } static void __exit iw_cm_cleanup(void) { + unregister_net_sysctl_table(iwcm_ctl_table_hdr); destroy_workqueue(iwcm_wq); } --- linux-3.13.0.orig/drivers/infiniband/core/sa_query.c +++ linux-3.13.0/drivers/infiniband/core/sa_query.c @@ -42,7 +42,7 @@ #include #include #include - +#include #include #include #include "sa.h" @@ -556,6 +556,13 @@ ah_attr->grh.hop_limit = rec->hop_limit; ah_attr->grh.traffic_class = rec->traffic_class; } + if (force_grh) { + memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN); + ah_attr->vlan_id = rec->vlan_id; + } else { + ah_attr->vlan_id = 0xffff; + } + return 0; } EXPORT_SYMBOL(ib_init_ah_from_path); @@ -670,6 +677,9 @@ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), mad->data, &rec); + rec.vlan_id = 0xffff; + memset(rec.dmac, 0, ETH_ALEN); + memset(rec.smac, 0, ETH_ALEN); query->callback(status, &rec, query->context); } else query->callback(status, NULL, query->context); --- linux-3.13.0.orig/drivers/infiniband/core/sysfs.c +++ linux-3.13.0/drivers/infiniband/core/sysfs.c @@ -613,6 +613,7 @@ case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type); + case RDMA_NODE_USNIC_UDP: return sprintf(buf, "%d: usNIC UDP\n", dev->node_type); case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); default: return sprintf(buf, "%d: \n", dev->node_type); --- linux-3.13.0.orig/drivers/infiniband/core/ucma.c +++ linux-3.13.0/drivers/infiniband/core/ucma.c @@ -655,24 +655,14 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { - struct rdma_dev_addr *dev_addr; - struct net_device *dev; - u16 vid = 0; resp->num_paths = route->num_paths; switch (route->num_paths) { case 0: - dev_addr = &route->addr.dev_addr; - dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); - if (dev) { - vid = rdma_vlan_dev_vlan_id(dev); - dev_put(dev); - } - - iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid, - dev_addr->dst_dev_addr, vid); - iboe_addr_get_sgid(dev_addr, - (union ib_gid *) &resp->ib_route[0].sgid); + rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, + (union ib_gid *)&resp->ib_route[0].dgid); + rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, + (union ib_gid *)&resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(0xffff); break; case 2: --- linux-3.13.0.orig/drivers/infiniband/core/umem.c +++ linux-3.13.0/drivers/infiniband/core/umem.c @@ -94,6 +94,14 @@ if (dmasync) dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); + /* + * If the combination of the addr and size requested for this memory + * region causes an integer overflow, return error. + */ + if ((PAGE_ALIGN(addr + size) <= size) || + (PAGE_ALIGN(addr + size) <= addr)) + return ERR_PTR(-EINVAL); + if (!can_do_mlock()) return ERR_PTR(-EPERM); --- linux-3.13.0.orig/drivers/infiniband/core/user_mad.c +++ linux-3.13.0/drivers/infiniband/core/user_mad.c @@ -98,7 +98,7 @@ struct ib_umad_device { int start_port, end_port; - struct kref ref; + struct kobject kobj; struct ib_umad_port port[0]; }; @@ -134,14 +134,18 @@ static void ib_umad_add_one(struct ib_device *device); static void ib_umad_remove_one(struct ib_device *device); -static void ib_umad_release_dev(struct kref *ref) +static void ib_umad_release_dev(struct kobject *kobj) { struct ib_umad_device *dev = - container_of(ref, struct ib_umad_device, ref); + container_of(kobj, struct ib_umad_device, kobj); kfree(dev); } +static struct kobj_type ib_umad_dev_ktype = { + .release = ib_umad_release_dev, +}; + static int hdr_size(struct ib_umad_file *file) { return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : @@ -780,27 +784,19 @@ { struct ib_umad_port *port; struct ib_umad_file *file; - int ret; + int ret = -ENXIO; port = container_of(inode->i_cdev, struct ib_umad_port, cdev); - if (port) - kref_get(&port->umad_dev->ref); - else - return -ENXIO; mutex_lock(&port->file_mutex); - if (!port->ib_dev) { - ret = -ENXIO; + if (!port->ib_dev) goto out; - } + ret = -ENOMEM; file = kzalloc(sizeof *file, GFP_KERNEL); - if (!file) { - kref_put(&port->umad_dev->ref, ib_umad_release_dev); - ret = -ENOMEM; + if (!file) goto out; - } mutex_init(&file->mutex); spin_lock_init(&file->send_lock); @@ -814,6 +810,13 @@ list_add_tail(&file->port_list, &port->file_list); ret = nonseekable_open(inode, filp); + if (ret) { + list_del(&file->port_list); + kfree(file); + goto out; + } + + kobject_get(&port->umad_dev->kobj); out: mutex_unlock(&port->file_mutex); @@ -852,7 +855,7 @@ mutex_unlock(&file->port->file_mutex); kfree(file); - kref_put(&dev->ref, ib_umad_release_dev); + kobject_put(&dev->kobj); return 0; } @@ -880,10 +883,6 @@ int ret; port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); - if (port) - kref_get(&port->umad_dev->ref); - else - return -ENXIO; if (filp->f_flags & O_NONBLOCK) { if (down_trylock(&port->sm_sem)) { @@ -898,17 +897,27 @@ } ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); - if (ret) { - up(&port->sm_sem); - goto fail; - } + if (ret) + goto err_up_sem; filp->private_data = port; - return nonseekable_open(inode, filp); + ret = nonseekable_open(inode, filp); + if (ret) + goto err_clr_sm_cap; + + kobject_get(&port->umad_dev->kobj); + + return 0; + +err_clr_sm_cap: + swap(props.set_port_cap_mask, props.clr_port_cap_mask); + ib_modify_port(port->ib_dev, port->port_num, 0, &props); + +err_up_sem: + up(&port->sm_sem); fail: - kref_put(&port->umad_dev->ref, ib_umad_release_dev); return ret; } @@ -927,7 +936,7 @@ up(&port->sm_sem); - kref_put(&port->umad_dev->ref, ib_umad_release_dev); + kobject_put(&port->umad_dev->kobj); return ret; } @@ -995,6 +1004,7 @@ } static int ib_umad_init_port(struct ib_device *device, int port_num, + struct ib_umad_device *umad_dev, struct ib_umad_port *port) { int devnum; @@ -1027,6 +1037,7 @@ cdev_init(&port->cdev, &umad_fops); port->cdev.owner = THIS_MODULE; + port->cdev.kobj.parent = &umad_dev->kobj; kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); if (cdev_add(&port->cdev, base, 1)) goto err_cdev; @@ -1045,6 +1056,7 @@ base += IB_UMAD_MAX_PORTS; cdev_init(&port->sm_cdev, &umad_sm_fops); port->sm_cdev.owner = THIS_MODULE; + port->sm_cdev.kobj.parent = &umad_dev->kobj; kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); if (cdev_add(&port->sm_cdev, base, 1)) goto err_sm_cdev; @@ -1138,7 +1150,7 @@ if (!umad_dev) return; - kref_init(&umad_dev->ref); + kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); umad_dev->start_port = s; umad_dev->end_port = e; @@ -1146,7 +1158,8 @@ for (i = s; i <= e; ++i) { umad_dev->port[i - s].umad_dev = umad_dev; - if (ib_umad_init_port(device, i, &umad_dev->port[i - s])) + if (ib_umad_init_port(device, i, umad_dev, + &umad_dev->port[i - s])) goto err; } @@ -1158,7 +1171,7 @@ while (--i >= s) ib_umad_kill_port(&umad_dev->port[i - s]); - kref_put(&umad_dev->ref, ib_umad_release_dev); + kobject_put(&umad_dev->kobj); } static void ib_umad_remove_one(struct ib_device *device) @@ -1172,7 +1185,7 @@ for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) ib_umad_kill_port(&umad_dev->port[i]); - kref_put(&umad_dev->ref, ib_umad_release_dev); + kobject_put(&umad_dev->kobj); } static char *umad_devnode(struct device *dev, umode_t *mode) --- linux-3.13.0.orig/drivers/infiniband/core/uverbs_cmd.c +++ linux-3.13.0/drivers/infiniband/core/uverbs_cmd.c @@ -40,6 +40,7 @@ #include #include "uverbs.h" +#include "core_priv.h" struct uverbs_lock_class { struct lock_class_key key; @@ -1961,6 +1962,9 @@ attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; if (qp->real_qp == qp) { + ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); + if (ret) + goto out; ret = qp->device->modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); } else { --- linux-3.13.0.orig/drivers/infiniband/core/verbs.c +++ linux-3.13.0/drivers/infiniband/core/verbs.c @@ -44,6 +44,9 @@ #include #include +#include + +#include "core_priv.h" int ib_rate_to_mult(enum ib_rate rate) { @@ -116,6 +119,8 @@ return RDMA_TRANSPORT_IWARP; case RDMA_NODE_USNIC: return RDMA_TRANSPORT_USNIC; + case RDMA_NODE_USNIC_UDP: + return RDMA_TRANSPORT_USNIC_UDP; default: BUG(); return 0; @@ -133,6 +138,7 @@ return IB_LINK_LAYER_INFINIBAND; case RDMA_TRANSPORT_IWARP: case RDMA_TRANSPORT_USNIC: + case RDMA_TRANSPORT_USNIC_UDP: return IB_LINK_LAYER_ETHERNET; default: return IB_LINK_LAYER_UNSPECIFIED; @@ -192,8 +198,28 @@ u32 flow_class; u16 gid_index; int ret; + int is_eth = (rdma_port_get_link_layer(device, port_num) == + IB_LINK_LAYER_ETHERNET); memset(ah_attr, 0, sizeof *ah_attr); + if (is_eth) { + if (!(wc->wc_flags & IB_WC_GRH)) + return -EPROTOTYPE; + + if (wc->wc_flags & IB_WC_WITH_SMAC && + wc->wc_flags & IB_WC_WITH_VLAN) { + memcpy(ah_attr->dmac, wc->smac, ETH_ALEN); + ah_attr->vlan_id = wc->vlan_id; + } else { + ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid, + ah_attr->dmac, &ah_attr->vlan_id); + if (ret) + return ret; + } + } else { + ah_attr->vlan_id = 0xffff; + } + ah_attr->dlid = wc->slid; ah_attr->sl = wc->sl; ah_attr->src_path_bits = wc->dlid_path_bits; @@ -476,7 +502,9 @@ static const struct { int valid; enum ib_qp_attr_mask req_param[IB_QPT_MAX]; + enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX]; enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; + enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX]; } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, @@ -557,6 +585,12 @@ IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER), }, + .req_param_add_eth = { + [IB_QPT_RC] = (IB_QP_SMAC), + [IB_QPT_UC] = (IB_QP_SMAC), + [IB_QPT_XRC_INI] = (IB_QP_SMAC), + [IB_QPT_XRC_TGT] = (IB_QP_SMAC) + }, .opt_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), @@ -576,7 +610,21 @@ IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), - } + }, + .opt_param_add_eth = { + [IB_QPT_RC] = (IB_QP_ALT_SMAC | + IB_QP_VID | + IB_QP_ALT_VID), + [IB_QPT_UC] = (IB_QP_ALT_SMAC | + IB_QP_VID | + IB_QP_ALT_VID), + [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC | + IB_QP_VID | + IB_QP_ALT_VID), + [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC | + IB_QP_VID | + IB_QP_ALT_VID) + } } }, [IB_QPS_RTR] = { @@ -779,7 +827,8 @@ }; int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, - enum ib_qp_type type, enum ib_qp_attr_mask mask) + enum ib_qp_type type, enum ib_qp_attr_mask mask, + enum rdma_link_layer ll) { enum ib_qp_attr_mask req_param, opt_param; @@ -798,6 +847,13 @@ req_param = qp_state_table[cur_state][next_state].req_param[type]; opt_param = qp_state_table[cur_state][next_state].opt_param[type]; + if (ll == IB_LINK_LAYER_ETHERNET) { + req_param |= qp_state_table[cur_state][next_state]. + req_param_add_eth[type]; + opt_param |= qp_state_table[cur_state][next_state]. + opt_param_add_eth[type]; + } + if ((mask & req_param) != req_param) return 0; @@ -808,10 +864,51 @@ } EXPORT_SYMBOL(ib_modify_qp_is_ok); +int ib_resolve_eth_l2_attrs(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, int *qp_attr_mask) +{ + int ret = 0; + union ib_gid sgid; + + if ((*qp_attr_mask & IB_QP_AV) && + (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) { + ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num, + qp_attr->ah_attr.grh.sgid_index, &sgid); + if (ret) + goto out; + if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) { + rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac); + rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac); + qp_attr->vlan_id = rdma_get_vlan_id(&sgid); + } else { + ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid, + qp_attr->ah_attr.dmac, &qp_attr->vlan_id); + if (ret) + goto out; + ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL); + if (ret) + goto out; + } + *qp_attr_mask |= IB_QP_SMAC; + if (qp_attr->vlan_id < 0xFFFF) + *qp_attr_mask |= IB_QP_VID; + } +out: + return ret; +} +EXPORT_SYMBOL(ib_resolve_eth_l2_attrs); + + int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) { + int ret; + + ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask); + if (ret) + return ret; + return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); --- linux-3.13.0.orig/drivers/infiniband/hw/amso1100/c2.c +++ linux-3.13.0/drivers/infiniband/hw/amso1100/c2.c @@ -1082,6 +1082,7 @@ /* Initialize network device */ if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { + ret = -ENOMEM; iounmap(mmio_regs); goto bail4; } @@ -1151,7 +1152,8 @@ goto bail10; } - if (c2_register_device(c2dev)) + ret = c2_register_device(c2dev); + if (ret) goto bail10; return 0; --- linux-3.13.0.orig/drivers/infiniband/hw/amso1100/c2_intr.c +++ linux-3.13.0/drivers/infiniband/hw/amso1100/c2_intr.c @@ -169,7 +169,8 @@ * We should never get here, as the adapter should * never send us a reply that we're not expecting. */ - vq_repbuf_free(c2dev, host_msg); + if (reply_msg != NULL) + vq_repbuf_free(c2dev, host_msg); pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n"); return; } --- linux-3.13.0.orig/drivers/infiniband/hw/amso1100/c2_rnic.c +++ linux-3.13.0/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -576,7 +576,8 @@ goto bail4; /* Initialize cached the adapter limits */ - if (c2_rnic_query(c2dev, &c2dev->props)) + err = c2_rnic_query(c2dev, &c2dev->props); + if (err) goto bail5; /* Initialize the PD pool */ --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/cm.c +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/cm.c @@ -1647,6 +1647,15 @@ status != CPL_ERR_ARP_MISS; } +/* Returns whether a CPL status conveys negative advice. + */ +static int is_neg_adv(unsigned int status) +{ + return status == CPL_ERR_RTX_NEG_ADVICE || + status == CPL_ERR_PERSIST_NEG_ADVICE || + status == CPL_ERR_KEEPALV_NEG_ADVICE; +} + #define ACT_OPEN_RETRY_COUNT 2 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, @@ -1835,7 +1844,7 @@ PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, status, status2errno(status)); - if (status == CPL_ERR_RTX_NEG_ADVICE) { + if (is_neg_adv(status)) { printk(KERN_WARNING MOD "Connection problems for atid %u\n", atid); return 0; @@ -2265,15 +2274,6 @@ return 0; } -/* - * Returns whether an ABORT_REQ_RSS message is a negative advice. - */ -static int is_neg_adv_abort(unsigned int status) -{ - return status == CPL_ERR_RTX_NEG_ADVICE || - status == CPL_ERR_PERSIST_NEG_ADVICE; -} - static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_abort_req_rss *req = cplhdr(skb); @@ -2287,7 +2287,7 @@ unsigned int tid = GET_TID(req); ep = lookup_tid(t, tid); - if (is_neg_adv_abort(req->status)) { + if (is_neg_adv(req->status)) { PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, ep->hwtid); return 0; @@ -3352,6 +3352,7 @@ goto free_dst; } + neigh_release(neigh); step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; window = (__force u16) htons((__force u16)tcph->window); @@ -3569,7 +3570,7 @@ kfree_skb(skb); return 0; } - if (is_neg_adv_abort(req->status)) { + if (is_neg_adv(req->status)) { PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, ep->hwtid); kfree_skb(skb); --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/cq.c +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/cq.c @@ -940,7 +940,8 @@ uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); - ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); + ret = ib_copy_to_udata(udata, &uresp, + sizeof(uresp) - sizeof(uresp.reserved)); if (ret) goto err5; --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/device.c +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/device.c @@ -64,6 +64,10 @@ static LIST_HEAD(uld_ctx_list); static DEFINE_MUTEX(dev_mutex); +#define DB_FC_RESUME_SIZE 64 +#define DB_FC_RESUME_DELAY 1 +#define DB_FC_DRAIN_THRESH 0 + static struct dentry *c4iw_debugfs_root; struct c4iw_debugfs_data { @@ -282,7 +286,7 @@ .llseek = default_llseek, }; -static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"}; +static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"}; static int stats_show(struct seq_file *seq, void *v) { @@ -311,9 +315,10 @@ seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full); seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty); seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop); - seq_printf(seq, " DB State: %s Transitions %llu\n", + seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n", db_state_str[dev->db_state], - dev->rdev.stats.db_state_transitions); + dev->rdev.stats.db_state_transitions, + dev->rdev.stats.db_fc_interruptions); seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", dev->rdev.stats.act_ofld_conn_fails); @@ -643,6 +648,12 @@ printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); goto err4; } + rdev->status_page = (struct t4_dev_status_page *) + __get_free_page(GFP_KERNEL); + if (!rdev->status_page) { + pr_err(MOD "error allocating status page\n"); + goto err4; + } return 0; err4: c4iw_rqtpool_destroy(rdev); @@ -656,6 +667,7 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) { + free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); c4iw_destroy_resource(&rdev->resource); @@ -703,18 +715,6 @@ pr_info("%s: On-Chip Queues not supported on this device.\n", pci_name(infop->pdev)); - if (!is_t4(infop->adapter_type)) { - if (!allow_db_fc_on_t5) { - db_fc_threshold = 100000; - pr_info("DB Flow Control Disabled.\n"); - } - - if (!allow_db_coalescing_on_t5) { - db_coalescing_threshold = -1; - pr_info("DB Coalescing Disabled.\n"); - } - } - devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); if (!devp) { printk(KERN_ERR MOD "Cannot allocate ib device\n"); @@ -749,6 +749,7 @@ spin_lock_init(&devp->lock); mutex_init(&devp->rdev.stats.lock); mutex_init(&devp->db_mutex); + INIT_LIST_HEAD(&devp->db_fc_list); if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( @@ -977,13 +978,16 @@ static void stop_queues(struct uld_ctx *ctx) { - spin_lock_irq(&ctx->dev->lock); - if (ctx->dev->db_state == NORMAL) { - ctx->dev->rdev.stats.db_state_transitions++; - ctx->dev->db_state = FLOW_CONTROL; + unsigned long flags; + + spin_lock_irqsave(&ctx->dev->lock, flags); + ctx->dev->rdev.stats.db_state_transitions++; + ctx->dev->db_state = STOPPED; + if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); - } - spin_unlock_irq(&ctx->dev->lock); + else + ctx->dev->rdev.status_page->db_off = 1; + spin_unlock_irqrestore(&ctx->dev->lock, flags); } static int enable_qp_db(int id, void *p, void *data) @@ -994,15 +998,70 @@ return 0; } +static void resume_rc_qp(struct c4iw_qp *qp) +{ + spin_lock(&qp->lock); + t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc); + qp->wq.sq.wq_pidx_inc = 0; + t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc); + qp->wq.rq.wq_pidx_inc = 0; + spin_unlock(&qp->lock); +} + +static void resume_a_chunk(struct uld_ctx *ctx) +{ + int i; + struct c4iw_qp *qp; + + for (i = 0; i < DB_FC_RESUME_SIZE; i++) { + qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, + db_fc_entry); + list_del_init(&qp->db_fc_entry); + resume_rc_qp(qp); + if (list_empty(&ctx->dev->db_fc_list)) + break; + } +} + static void resume_queues(struct uld_ctx *ctx) { spin_lock_irq(&ctx->dev->lock); - if (ctx->dev->qpcnt <= db_fc_threshold && - ctx->dev->db_state == FLOW_CONTROL) { - ctx->dev->db_state = NORMAL; - ctx->dev->rdev.stats.db_state_transitions++; - idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL); + if (ctx->dev->db_state != STOPPED) + goto out; + ctx->dev->db_state = FLOW_CONTROL; + while (1) { + if (list_empty(&ctx->dev->db_fc_list)) { + WARN_ON(ctx->dev->db_state != FLOW_CONTROL); + ctx->dev->db_state = NORMAL; + ctx->dev->rdev.stats.db_state_transitions++; + if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { + idr_for_each(&ctx->dev->qpidr, enable_qp_db, + NULL); + } else { + ctx->dev->rdev.status_page->db_off = 0; + } + break; + } else { + if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) + < (ctx->dev->rdev.lldi.dbfifo_int_thresh << + DB_FC_DRAIN_THRESH)) { + resume_a_chunk(ctx); + } + if (!list_empty(&ctx->dev->db_fc_list)) { + spin_unlock_irq(&ctx->dev->lock); + if (DB_FC_RESUME_DELAY) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(DB_FC_RESUME_DELAY); + } + spin_lock_irq(&ctx->dev->lock); + if (ctx->dev->db_state != FLOW_CONTROL) + break; + } + } } +out: + if (ctx->dev->db_state != NORMAL) + ctx->dev->rdev.stats.db_fc_interruptions++; spin_unlock_irq(&ctx->dev->lock); } @@ -1028,12 +1087,12 @@ return 0; } -static void deref_qps(struct qp_list qp_list) +static void deref_qps(struct qp_list *qp_list) { int idx; - for (idx = 0; idx < qp_list.idx; idx++) - c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp); + for (idx = 0; idx < qp_list->idx; idx++) + c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp); } static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) @@ -1044,17 +1103,22 @@ for (idx = 0; idx < qp_list->idx; idx++) { struct c4iw_qp *qp = qp_list->qps[idx]; + spin_lock_irq(&qp->rhp->lock); + spin_lock(&qp->lock); ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], qp->wq.sq.qid, t4_sq_host_wq_pidx(&qp->wq), t4_sq_wq_size(&qp->wq)); if (ret) { - printk(KERN_ERR MOD "%s: Fatal error - " + pr_err(KERN_ERR MOD "%s: Fatal error - " "DB overflow recovery failed - " "error syncing SQ qid %u\n", pci_name(ctx->lldi.pdev), qp->wq.sq.qid); + spin_unlock(&qp->lock); + spin_unlock_irq(&qp->rhp->lock); return; } + qp->wq.sq.wq_pidx_inc = 0; ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], qp->wq.rq.qid, @@ -1062,12 +1126,17 @@ t4_rq_wq_size(&qp->wq)); if (ret) { - printk(KERN_ERR MOD "%s: Fatal error - " + pr_err(KERN_ERR MOD "%s: Fatal error - " "DB overflow recovery failed - " "error syncing RQ qid %u\n", pci_name(ctx->lldi.pdev), qp->wq.rq.qid); + spin_unlock(&qp->lock); + spin_unlock_irq(&qp->rhp->lock); return; } + qp->wq.rq.wq_pidx_inc = 0; + spin_unlock(&qp->lock); + spin_unlock_irq(&qp->rhp->lock); /* Wait for the dbfifo to drain */ while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { @@ -1083,36 +1152,22 @@ struct qp_list qp_list; int ret; - /* lock out kernel db ringers */ - mutex_lock(&ctx->dev->db_mutex); - - /* put all queues in to recovery mode */ - spin_lock_irq(&ctx->dev->lock); - ctx->dev->db_state = RECOVERY; - ctx->dev->rdev.stats.db_state_transitions++; - idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); - spin_unlock_irq(&ctx->dev->lock); - /* slow everybody down */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(usecs_to_jiffies(1000)); - /* Wait for the dbfifo to completely drain. */ - while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(usecs_to_jiffies(10)); - } - /* flush the SGE contexts */ ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); if (ret) { printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", pci_name(ctx->lldi.pdev)); - goto out; + return; } /* Count active queues so we can build a list of queues to recover */ spin_lock_irq(&ctx->dev->lock); + WARN_ON(ctx->dev->db_state != STOPPED); + ctx->dev->db_state = RECOVERY; idr_for_each(&ctx->dev->qpidr, count_qps, &count); qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC); @@ -1120,7 +1175,7 @@ printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", pci_name(ctx->lldi.pdev)); spin_unlock_irq(&ctx->dev->lock); - goto out; + return; } qp_list.idx = 0; @@ -1133,29 +1188,13 @@ recover_lost_dbs(ctx, &qp_list); /* we're almost done! deref the qps and clean up */ - deref_qps(qp_list); + deref_qps(&qp_list); kfree(qp_list.qps); - /* Wait for the dbfifo to completely drain again */ - while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(usecs_to_jiffies(10)); - } - - /* resume the queues */ spin_lock_irq(&ctx->dev->lock); - if (ctx->dev->qpcnt > db_fc_threshold) - ctx->dev->db_state = FLOW_CONTROL; - else { - ctx->dev->db_state = NORMAL; - idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL); - } - ctx->dev->rdev.stats.db_state_transitions++; + WARN_ON(ctx->dev->db_state != RECOVERY); + ctx->dev->db_state = STOPPED; spin_unlock_irq(&ctx->dev->lock); - -out: - /* start up kernel db ringers again */ - mutex_unlock(&ctx->dev->db_mutex); } static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) @@ -1165,9 +1204,7 @@ switch (control) { case CXGB4_CONTROL_DB_FULL: stop_queues(ctx); - mutex_lock(&ctx->dev->rdev.stats.lock); ctx->dev->rdev.stats.db_full++; - mutex_unlock(&ctx->dev->rdev.stats.lock); break; case CXGB4_CONTROL_DB_EMPTY: resume_queues(ctx); --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -109,6 +109,7 @@ enum c4iw_rdev_flags { T4_FATAL_ERROR = (1<<0), + T4_STATUS_PAGE_DISABLED = (1<<1), }; struct c4iw_stat { @@ -130,6 +131,7 @@ u64 db_empty; u64 db_drop; u64 db_state_transitions; + u64 db_fc_interruptions; u64 tcam_full; u64 act_ofld_conn_fails; u64 pas_ofld_conn_fails; @@ -150,6 +152,7 @@ unsigned long oc_mw_pa; void __iomem *oc_mw_kva; struct c4iw_stats stats; + struct t4_dev_status_page *status_page; }; static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) @@ -211,7 +214,8 @@ enum db_state { NORMAL = 0, FLOW_CONTROL = 1, - RECOVERY = 2 + RECOVERY = 2, + STOPPED = 3 }; struct c4iw_dev { @@ -225,10 +229,10 @@ struct mutex db_mutex; struct dentry *debugfs_root; enum db_state db_state; - int qpcnt; struct idr hwtid_idr; struct idr atid_idr; struct idr stid_idr; + struct list_head db_fc_list; }; static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) @@ -432,6 +436,7 @@ struct c4iw_qp { struct ib_qp ibqp; + struct list_head db_fc_entry; struct c4iw_dev *rhp; struct c4iw_ep *ep; struct c4iw_qp_attributes attr; --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/mem.c +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/mem.c @@ -76,7 +76,7 @@ INIT_ULPTX_WR(req, wr_len, 0, 0); req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | (wait ? FW_WR_COMPL(1) : 0)); - req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0; + req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/provider.c +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/provider.c @@ -106,15 +106,56 @@ { struct c4iw_ucontext *context; struct c4iw_dev *rhp = to_c4iw_dev(ibdev); + static int warned; + struct c4iw_alloc_ucontext_resp uresp; + int ret = 0; + struct c4iw_mm_entry *mm = NULL; PDBG("%s ibdev %p\n", __func__, ibdev); context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return ERR_PTR(-ENOMEM); + if (!context) { + ret = -ENOMEM; + goto err; + } + c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); INIT_LIST_HEAD(&context->mmaps); spin_lock_init(&context->mmap_lock); + + if (udata->outlen < sizeof(uresp)) { + if (!warned++) + pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled."); + rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; + } else { + mm = kmalloc(sizeof(*mm), GFP_KERNEL); + if (!mm) { + ret = -ENOMEM; + goto err_free; + } + + uresp.status_page_size = PAGE_SIZE; + + spin_lock(&context->mmap_lock); + uresp.status_page_key = context->key; + context->key += PAGE_SIZE; + spin_unlock(&context->mmap_lock); + + ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (ret) + goto err_mm; + + mm->key = uresp.status_page_key; + mm->addr = virt_to_phys(rhp->rdev.status_page); + mm->len = PAGE_SIZE; + insert_mmap(context, mm); + } return &context->ibucontext; +err_mm: + kfree(mm); +err_free: + kfree(context); +err: + return ERR_PTR(ret); } static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/qp.c +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/qp.c @@ -638,6 +638,46 @@ wake_up(&(to_c4iw_qp(qp)->wait)); } +static void add_to_fc_list(struct list_head *head, struct list_head *entry) +{ + if (list_empty(entry)) + list_add_tail(entry, head); +} + +static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) +{ + unsigned long flags; + + spin_lock_irqsave(&qhp->rhp->lock, flags); + spin_lock(&qhp->lock); + if (qhp->rhp->db_state == NORMAL) { + t4_ring_sq_db(&qhp->wq, inc); + } else { + add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); + qhp->wq.sq.wq_pidx_inc += inc; + } + spin_unlock(&qhp->lock); + spin_unlock_irqrestore(&qhp->rhp->lock, flags); + return 0; +} + +static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) +{ + unsigned long flags; + + spin_lock_irqsave(&qhp->rhp->lock, flags); + spin_lock(&qhp->lock); + if (qhp->rhp->db_state == NORMAL) { + t4_ring_rq_db(&qhp->wq, inc); + } else { + add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); + qhp->wq.rq.wq_pidx_inc += inc; + } + spin_unlock(&qhp->lock); + spin_unlock_irqrestore(&qhp->rhp->lock, flags); + return 0; +} + int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { @@ -750,9 +790,13 @@ t4_sq_produce(&qhp->wq, len16); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); } - if (t4_wq_db_enabled(&qhp->wq)) + if (!qhp->rhp->rdev.status_page->db_off) { t4_ring_sq_db(&qhp->wq, idx); - spin_unlock_irqrestore(&qhp->lock, flag); + spin_unlock_irqrestore(&qhp->lock, flag); + } else { + spin_unlock_irqrestore(&qhp->lock, flag); + ring_kernel_sq_db(qhp, idx); + } return err; } @@ -812,9 +856,13 @@ wr = wr->next; num_wrs--; } - if (t4_wq_db_enabled(&qhp->wq)) + if (!qhp->rhp->rdev.status_page->db_off) { t4_ring_rq_db(&qhp->wq, idx); - spin_unlock_irqrestore(&qhp->lock, flag); + spin_unlock_irqrestore(&qhp->lock, flag); + } else { + spin_unlock_irqrestore(&qhp->lock, flag); + ring_kernel_rq_db(qhp, idx); + } return err; } @@ -1200,35 +1248,6 @@ return ret; } -/* - * Called by the library when the qp has user dbs disabled due to - * a DB_FULL condition. This function will single-thread all user - * DB rings to avoid overflowing the hw db-fifo. - */ -static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc) -{ - int delay = db_delay_usecs; - - mutex_lock(&qhp->rhp->db_mutex); - do { - - /* - * The interrupt threshold is dbfifo_int_thresh << 6. So - * make sure we don't cross that and generate an interrupt. - */ - if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) < - (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) { - writel(QID(qid) | PIDX(inc), qhp->wq.db); - break; - } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(usecs_to_jiffies(delay)); - delay = min(delay << 1, 2000); - } while (1); - mutex_unlock(&qhp->rhp->db_mutex); - return 0; -} - int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, enum c4iw_qp_attr_mask mask, struct c4iw_qp_attributes *attrs, @@ -1278,11 +1297,11 @@ } if (mask & C4IW_QP_ATTR_SQ_DB) { - ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc); + ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc); goto out; } if (mask & C4IW_QP_ATTR_RQ_DB) { - ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc); + ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc); goto out; } @@ -1465,14 +1484,6 @@ return ret; } -static int enable_qp_db(int id, void *p, void *data) -{ - struct c4iw_qp *qp = p; - - t4_enable_wq_db(&qp->wq); - return 0; -} - int c4iw_destroy_qp(struct ib_qp *ib_qp) { struct c4iw_dev *rhp; @@ -1490,22 +1501,15 @@ c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); wait_event(qhp->wait, !qhp->ep); - spin_lock_irq(&rhp->lock); - remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid); - rhp->qpcnt--; - BUG_ON(rhp->qpcnt < 0); - if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) { - rhp->rdev.stats.db_state_transitions++; - rhp->db_state = NORMAL; - idr_for_each(&rhp->qpidr, enable_qp_db, NULL); - } - if (db_coalescing_threshold >= 0) - if (rhp->qpcnt <= db_coalescing_threshold) - cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]); - spin_unlock_irq(&rhp->lock); + remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); atomic_dec(&qhp->refcnt); wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); + spin_lock_irq(&rhp->lock); + if (!list_empty(&qhp->db_fc_entry)) + list_del_init(&qhp->db_fc_entry); + spin_unlock_irq(&rhp->lock); + ucontext = ib_qp->uobject ? to_c4iw_ucontext(ib_qp->uobject->context) : NULL; destroy_qp(&rhp->rdev, &qhp->wq, @@ -1516,14 +1520,6 @@ return 0; } -static int disable_qp_db(int id, void *p, void *data) -{ - struct c4iw_qp *qp = p; - - t4_disable_wq_db(&qp->wq); - return 0; -} - struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, struct ib_udata *udata) { @@ -1610,20 +1606,7 @@ init_waitqueue_head(&qhp->wait); atomic_set(&qhp->refcnt, 1); - spin_lock_irq(&rhp->lock); - if (rhp->db_state != NORMAL) - t4_disable_wq_db(&qhp->wq); - rhp->qpcnt++; - if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { - rhp->rdev.stats.db_state_transitions++; - rhp->db_state = FLOW_CONTROL; - idr_for_each(&rhp->qpidr, disable_qp_db, NULL); - } - if (db_coalescing_threshold >= 0) - if (rhp->qpcnt > db_coalescing_threshold) - cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]); - ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); - spin_unlock_irq(&rhp->lock); + ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); if (ret) goto err2; @@ -1709,6 +1692,7 @@ } qhp->ibqp.qp_num = qhp->wq.sq.qid; init_timer(&(qhp->timer)); + INIT_LIST_HEAD(&qhp->db_fc_entry); PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n", __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, qhp->wq.sq.qid); --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/t4.h +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/t4.h @@ -300,6 +300,7 @@ u16 cidx; u16 pidx; u16 wq_pidx; + u16 wq_pidx_inc; u16 flags; short flush_cidx; }; @@ -324,6 +325,7 @@ u16 cidx; u16 pidx; u16 wq_pidx; + u16 wq_pidx_inc; }; struct t4_wq { @@ -609,3 +611,7 @@ ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; } #endif + +struct t4_dev_status_page { + u8 db_off; +}; --- linux-3.13.0.orig/drivers/infiniband/hw/cxgb4/user.h +++ linux-3.13.0/drivers/infiniband/hw/cxgb4/user.h @@ -48,6 +48,7 @@ __u32 cqid; __u32 size; __u32 qid_mask; + __u32 reserved; /* explicit padding (optional for i386) */ }; @@ -70,4 +71,9 @@ __u32 qid_mask; __u32 flags; }; + +struct c4iw_alloc_ucontext_resp { + __u64 status_page_key; + __u32 status_page_size; +}; #endif --- linux-3.13.0.orig/drivers/infiniband/hw/ehca/ehca_cq.c +++ linux-3.13.0/drivers/infiniband/hw/ehca/ehca_cq.c @@ -283,6 +283,7 @@ (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1)); if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { ehca_err(device, "Copy to udata failed."); + cq = ERR_PTR(-EFAULT); goto create_cq_exit4; } } --- linux-3.13.0.orig/drivers/infiniband/hw/ehca/ehca_qp.c +++ linux-3.13.0/drivers/infiniband/hw/ehca/ehca_qp.c @@ -1329,7 +1329,7 @@ qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state; if (!smi_reset2init && !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type, - attr_mask)) { + attr_mask, IB_LINK_LAYER_UNSPECIFIED)) { ret = -EINVAL; ehca_err(ibqp->device, "Invalid qp transition new_state=%x cur_state=%x " --- linux-3.13.0.orig/drivers/infiniband/hw/ipath/ipath_diag.c +++ linux-3.13.0/drivers/infiniband/hw/ipath/ipath_diag.c @@ -326,7 +326,7 @@ size_t count, loff_t *off) { u32 __iomem *piobuf; - u32 plen, clen, pbufn; + u32 plen, pbufn, maxlen_reserve; struct ipath_diag_pkt odp; struct ipath_diag_xpkt dp; u32 *tmpbuf = NULL; @@ -335,42 +335,24 @@ u64 val; u32 l_state, lt_state; /* LinkState, LinkTrainingState */ - if (count < sizeof(odp)) { - ret = -EINVAL; - goto bail; - } if (count == sizeof(dp)) { if (copy_from_user(&dp, data, sizeof(dp))) { ret = -EFAULT; goto bail; } - } else if (copy_from_user(&odp, data, sizeof(odp))) { - ret = -EFAULT; - goto bail; - } - - /* - * Due to padding/alignment issues (lessened with new struct) - * the old and new structs are the same length. We need to - * disambiguate them, which we can do because odp.len has never - * been less than the total of LRH+BTH+DETH so far, while - * dp.unit (same offset) unit is unlikely to get that high. - * Similarly, dp.data, the pointer to user at the same offset - * as odp.unit, is almost certainly at least one (512byte)page - * "above" NULL. The if-block below can be omitted if compatibility - * between a new driver and older diagnostic code is unimportant. - * compatibility the other direction (new diags, old driver) is - * handled in the diagnostic code, with a warning. - */ - if (dp.unit >= 20 && dp.data < 512) { - /* very probable version mismatch. Fix it up */ - memcpy(&odp, &dp, sizeof(odp)); - /* We got a legacy dp, copy elements to dp */ + } else if (count == sizeof(odp)) { + if (copy_from_user(&odp, data, sizeof(odp))) { + ret = -EFAULT; + goto bail; + } + dp.len = odp.len; dp.unit = odp.unit; dp.data = odp.data; - dp.len = odp.len; - dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */ + dp.pbc_wd = 0; + } else { + ret = -EINVAL; + goto bail; } /* send count must be an exact number of dwords */ @@ -379,7 +361,7 @@ goto bail; } - clen = dp.len >> 2; + plen = dp.len >> 2; dd = ipath_lookup(dp.unit); if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || @@ -422,16 +404,22 @@ goto bail; } - /* need total length before first word written */ - /* +1 word is for the qword padding */ - plen = sizeof(u32) + dp.len; - - if ((plen + 4) > dd->ipath_ibmaxlen) { + /* + * need total length before first word written, plus 2 Dwords. One Dword + * is for padding so we get the full user data when not aligned on + * a word boundary. The other Dword is to make sure we have room for the + * ICRC which gets tacked on later. + */ + maxlen_reserve = 2 * sizeof(u32); + if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) { ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", - plen - 4, dd->ipath_ibmaxlen); + dp.len, dd->ipath_ibmaxlen); ret = -EINVAL; - goto bail; /* before writing pbc */ + goto bail; } + + plen = sizeof(u32) + dp.len; + tmpbuf = vmalloc(plen); if (!tmpbuf) { dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " @@ -473,11 +461,11 @@ */ if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) { ipath_flush_wc(); - __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); + __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1); ipath_flush_wc(); - __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); + __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1); } else - __iowrite32_copy(piobuf + 2, tmpbuf, clen); + __iowrite32_copy(piobuf + 2, tmpbuf, plen); ipath_flush_wc(); --- linux-3.13.0.orig/drivers/infiniband/hw/ipath/ipath_qp.c +++ linux-3.13.0/drivers/infiniband/hw/ipath/ipath_qp.c @@ -463,7 +463,7 @@ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, - attr_mask)) + attr_mask, IB_LINK_LAYER_UNSPECIFIED)) goto inval; if (attr_mask & IB_QP_AV) { --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/Kconfig +++ linux-3.13.0/drivers/infiniband/hw/mlx4/Kconfig @@ -1,6 +1,6 @@ config MLX4_INFINIBAND tristate "Mellanox ConnectX HCA support" - depends on NETDEVICES && ETHERNET && PCI + depends on NETDEVICES && ETHERNET && PCI && INET select NET_VENDOR_MELLANOX select MLX4_CORE ---help--- --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/ah.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/ah.c @@ -39,25 +39,6 @@ #include "mlx4_ib.h" -int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, - u8 *mac, int *is_mcast, u8 port) -{ - struct in6_addr in6; - - *is_mcast = 0; - - memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6); - if (rdma_link_local_addr(&in6)) - rdma_get_ll_mac(&in6, mac); - else if (rdma_is_multicast_addr(&in6)) { - rdma_get_mcast_mac(&in6, mac); - *is_mcast = 1; - } else - return -EINVAL; - - return 0; -} - static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, struct mlx4_ib_ah *ah) { @@ -92,21 +73,18 @@ { struct mlx4_ib_dev *ibdev = to_mdev(pd->device); struct mlx4_dev *dev = ibdev->dev; - union ib_gid sgid; - u8 mac[6]; - int err; int is_mcast; + struct in6_addr in6; u16 vlan_tag; - err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num); - if (err) - return ERR_PTR(err); - - memcpy(ah->av.eth.mac, mac, 6); - err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid); - if (err) - return ERR_PTR(err); - vlan_tag = rdma_get_vlan_id(&sgid); + memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); + if (rdma_is_multicast_addr(&in6)) { + is_mcast = 1; + rdma_get_mcast_mac(&in6, ah->av.eth.mac); + } else { + memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN); + } + vlan_tag = ah_attr->vlan_id; if (vlan_tag < 0x1000) vlan_tag |= (ah_attr->sl & 7) << 13; ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/alias_GUID.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -154,7 +154,7 @@ continue; slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; - if (slave_id >= dev->dev->num_slaves) + if (slave_id >= dev->dev->num_vfs + 1) return; tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; form_cache_ag = get_cached_alias_guid(dev, port_num, --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/cm.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/cm.c @@ -61,6 +61,11 @@ __be32 remote_comm_id; }; +struct cm_sidr_generic_msg { + struct ib_mad_hdr hdr; + __be32 request_id; +}; + struct cm_req_msg { unsigned char unused[0x60]; union ib_gid primary_path_sgid; @@ -69,28 +74,62 @@ static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) { - struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; - msg->local_comm_id = cpu_to_be32(cm_id); + if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { + struct cm_sidr_generic_msg *msg = + (struct cm_sidr_generic_msg *)mad; + msg->request_id = cpu_to_be32(cm_id); + } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { + pr_err("trying to set local_comm_id in SIDR_REP\n"); + return; + } else { + struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; + msg->local_comm_id = cpu_to_be32(cm_id); + } } static u32 get_local_comm_id(struct ib_mad *mad) { - struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; - - return be32_to_cpu(msg->local_comm_id); + if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { + struct cm_sidr_generic_msg *msg = + (struct cm_sidr_generic_msg *)mad; + return be32_to_cpu(msg->request_id); + } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { + pr_err("trying to set local_comm_id in SIDR_REP\n"); + return -1; + } else { + struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; + return be32_to_cpu(msg->local_comm_id); + } } static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) { - struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; - msg->remote_comm_id = cpu_to_be32(cm_id); + if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { + struct cm_sidr_generic_msg *msg = + (struct cm_sidr_generic_msg *)mad; + msg->request_id = cpu_to_be32(cm_id); + } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { + pr_err("trying to set remote_comm_id in SIDR_REQ\n"); + return; + } else { + struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; + msg->remote_comm_id = cpu_to_be32(cm_id); + } } static u32 get_remote_comm_id(struct ib_mad *mad) { - struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; - - return be32_to_cpu(msg->remote_comm_id); + if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { + struct cm_sidr_generic_msg *msg = + (struct cm_sidr_generic_msg *)mad; + return be32_to_cpu(msg->request_id); + } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { + pr_err("trying to set remote_comm_id in SIDR_REQ\n"); + return -1; + } else { + struct cm_generic_msg *msg = (struct cm_generic_msg *)mad; + return be32_to_cpu(msg->remote_comm_id); + } } static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad) @@ -282,19 +321,21 @@ u32 sl_cm_id; int pv_cm_id = -1; - sl_cm_id = get_local_comm_id(mad); - if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || - mad->mad_hdr.attr_id == CM_REP_ATTR_ID) { + mad->mad_hdr.attr_id == CM_REP_ATTR_ID || + mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { + sl_cm_id = get_local_comm_id(mad); id = id_map_alloc(ibdev, slave_id, sl_cm_id); if (IS_ERR(id)) { mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", __func__, slave_id, sl_cm_id); return PTR_ERR(id); } - } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) { + } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID || + mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) { return 0; } else { + sl_cm_id = get_local_comm_id(mad); id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); } @@ -315,14 +356,18 @@ } int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, - struct ib_mad *mad) + struct ib_mad *mad) { u32 pv_cm_id; struct id_map_entry *id; - if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) { + if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID || + mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { union ib_gid gid; + if (!slave) + return 0; + gid = gid_from_req_msg(ibdev, mad); *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); if (*slave < 0) { @@ -341,7 +386,8 @@ return -ENOENT; } - *slave = id->slave_id; + if (slave) + *slave = id->slave_id; set_remote_comm_id(mad, id->sl_cm_id); if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/cq.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/cq.c @@ -564,7 +564,7 @@ } static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, - unsigned tail, struct mlx4_cqe *cqe) + unsigned tail, struct mlx4_cqe *cqe, int is_eth) { struct mlx4_ib_proxy_sqp_hdr *hdr; @@ -574,12 +574,20 @@ DMA_FROM_DEVICE); hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index); - wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); - wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF; wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0; wc->dlid_path_bits = 0; + if (is_eth) { + wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); + memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); + memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); + wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); + } else { + wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); + wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); + } + return 0; } @@ -594,6 +602,7 @@ struct mlx4_srq *msrq = NULL; int is_send; int is_error; + int is_eth; u32 g_mlpath_rqpn; u16 wqe_ctr; unsigned tail = 0; @@ -778,11 +787,15 @@ break; } + is_eth = (rdma_port_get_link_layer(wc->qp->device, + (*cur_qp)->port) == + IB_LINK_LAYER_ETHERNET); if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { if ((*cur_qp)->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) - return use_tunnel_data(*cur_qp, cq, wc, tail, cqe); + return use_tunnel_data(*cur_qp, cq, wc, tail, + cqe, is_eth); } wc->slid = be16_to_cpu(cqe->rlid); @@ -793,11 +806,21 @@ wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; - if (rdma_port_get_link_layer(wc->qp->device, - (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET) + if (is_eth) { wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; - else + if (be32_to_cpu(cqe->vlan_my_qpn) & + MLX4_CQE_VLAN_PRESENT_MASK) { + wc->vlan_id = be16_to_cpu(cqe->sl_vid) & + MLX4_CQE_VID_MASK; + } else { + wc->vlan_id = 0xffff; + } + memcpy(wc->smac, cqe->smac, ETH_ALEN); + wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); + } else { wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; + wc->vlan_id = 0xffff; + } } return 0; --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/mad.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/mad.c @@ -467,6 +467,7 @@ int ret = 0; u16 tun_pkey_ix; u16 cached_pkey; + u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; if (dest_qpt > IB_QPT_GSI) return -EINVAL; @@ -509,6 +510,10 @@ * The driver will set the force loopback bit in post_send */ memset(&attr, 0, sizeof attr); attr.port_num = port; + if (is_eth) { + memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16); + attr.ah_flags = IB_AH_GRH; + } ah = ib_create_ah(tun_ctx->pd, &attr); if (IS_ERR(ah)) return -ENOMEM; @@ -540,11 +545,36 @@ /* adjust tunnel data */ tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix); - tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); - tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid); tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF); tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0; + if (is_eth) { + u16 vlan = 0; + if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan, + NULL)) { + /* VST mode */ + if (vlan != wc->vlan_id) + /* Packet vlan is not the VST-assigned vlan. + * Drop the packet. + */ + goto out; + else + /* Remove the vlan tag before forwarding + * the packet to the VF. + */ + vlan = 0xffff; + } else { + vlan = wc->vlan_id; + } + + tun_mad->hdr.sl_vid = cpu_to_be16(vlan); + memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4); + memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2); + } else { + tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); + tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid); + } + ib_dma_sync_single_for_device(&dev->ib_dev, tun_qp->tx_ring[tun_tx_ix].buf.map, sizeof (struct mlx4_rcv_tunnel_mad), @@ -580,6 +610,41 @@ int err; int slave; u8 *slave_id; + int is_eth = 0; + + if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) + is_eth = 0; + else + is_eth = 1; + + if (is_eth) { + if (!(wc->wc_flags & IB_WC_GRH)) { + mlx4_ib_warn(ibdev, "RoCE grh not present.\n"); + return -EINVAL; + } + if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) { + mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n"); + return -EINVAL; + } + if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) { + mlx4_ib_warn(ibdev, "failed matching grh\n"); + return -ENOENT; + } + if (slave >= dev->dev->caps.sqp_demux) { + mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", + slave, dev->dev->caps.sqp_demux); + return -ENOENT; + } + + if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad)) + return 0; + + err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); + if (err) + pr_debug("failed sending to slave %d via tunnel qp (%d)\n", + slave, err); + return 0; + } /* Initially assume that this mad is for us */ slave = mlx4_master_func_num(dev->dev); @@ -1076,8 +1141,9 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, - enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, - u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad) + enum ib_qp_type dest_qpt, u16 pkey_index, + u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr, + u8 *s_mac, struct ib_mad *mad) { struct ib_sge list; struct ib_send_wr wr, *bad_wr; @@ -1166,6 +1232,9 @@ wr.num_sge = 1; wr.opcode = IB_WR_SEND; wr.send_flags = IB_SEND_SIGNALED; + if (s_mac) + memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6); + ret = ib_post_send(send_qp, &wr, &bad_wr); out: @@ -1174,6 +1243,22 @@ return ret; } +static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port) +{ + if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) + return slave; + return mlx4_get_base_gid_ix(dev->dev, slave, port); +} + +static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port, + struct ib_ah_attr *ah_attr) +{ + if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) + ah_attr->grh.sgid_index = slave; + else + ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port); +} + static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) { struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); @@ -1184,6 +1269,7 @@ struct ib_ah_attr ah_attr; u8 *slave_id; int slave; + int port; /* Get slave that sent this packet */ if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn || @@ -1260,12 +1346,18 @@ memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av)); ah.ibah.device = ctx->ib_dev; mlx4_ib_query_ah(&ah.ibah, &ah_attr); - if ((ah_attr.ah_flags & IB_AH_GRH) && - (ah_attr.grh.sgid_index != slave)) { - mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n", - slave, ah_attr.grh.sgid_index); + if (ah_attr.ah_flags & IB_AH_GRH) + fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); + + port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num); + if (port < 0) return; - } + ah_attr.port_num = port; + memcpy(ah_attr.dmac, tunnel->hdr.mac, 6); + ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan); + /* if slave have default vlan use it */ + mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, + &ah_attr.vlan_id, &ah_attr.sl); mlx4_ib_send_to_wire(dev, slave, ctx->port, is_proxy_qp0(dev, wc->src_qp, slave) ? @@ -1273,7 +1365,7 @@ be16_to_cpu(tunnel->hdr.pkey_index), be32_to_cpu(tunnel->hdr.remote_qpn), be32_to_cpu(tunnel->hdr.qkey), - &ah_attr, &tunnel->mad); + &ah_attr, wc->smac, &tunnel->mad); } static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, @@ -1850,7 +1942,15 @@ ctx->port = port; ctx->ib_dev = &dev->ib_dev; - for (i = 0; i < dev->dev->caps.sqp_demux; i++) { + for (i = 0; + i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1)); + i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev->dev, i); + + if (!test_bit(port - 1, actv_ports.ports)) + continue; + ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); if (ret) { ret = -ENOMEM; --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/main.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/main.c @@ -39,6 +39,8 @@ #include #include #include +#include +#include #include #include @@ -51,10 +53,11 @@ #include "user.h" #define DRV_NAME MLX4_IB_DRV_NAME -#define DRV_VERSION "1.0" -#define DRV_RELDATE "April 4, 2008" +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb 2014" #define MLX4_IB_FLOW_MAX_PRIO 0xFFF +#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); @@ -92,21 +95,27 @@ static int check_flow_steering_support(struct mlx4_dev *dev) { + int eth_num_ports = 0; int ib_num_ports = 0; - int i; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) - ib_num_ports++; + int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED; - if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { - if (ib_num_ports || mlx4_is_mfunc(dev)) { - pr_warn("Device managed flow steering is unavailable " - "for IB ports or in multifunction env.\n"); - return 0; + if (dmfs) { + int i; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) + eth_num_ports++; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_num_ports++; + dmfs &= (!ib_num_ports || + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) && + (!eth_num_ports || + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)); + if (ib_num_ports && mlx4_is_mfunc(dev)) { + pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n"); + dmfs = 0; } - return 1; } - return 0; + return dmfs; } static int mlx4_ib_query_device(struct ib_device *ibdev, @@ -165,7 +174,7 @@ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; else props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; - if (check_flow_steering_support(dev->dev)) + if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; } @@ -338,7 +347,7 @@ props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? IB_WIDTH_4X : IB_WIDTH_1X; props->active_speed = IB_SPEED_QDR; - props->port_cap_flags = IB_PORT_CM_SUP; + props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS; props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; props->max_msg_sz = mdev->dev->caps.max_msg_sz; props->pkey_tbl_len = 1; @@ -787,7 +796,6 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, union ib_gid *gid) { - u8 mac[6]; struct net_device *ndev; int ret = 0; @@ -801,11 +809,7 @@ spin_unlock(&mdev->iboe.lock); if (ndev) { - rdma_get_mcast_mac((struct in6_addr *)gid, mac); - rtnl_lock(); - dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac); ret = 1; - rtnl_unlock(); dev_put(ndev); } @@ -819,6 +823,7 @@ }; static int parse_flow_attr(struct mlx4_dev *dev, + u32 qp_num, union ib_flow_spec *ib_spec, struct _rule_hw *mlx4_spec) { @@ -834,6 +839,14 @@ mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag; mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag; break; + case IB_FLOW_SPEC_IB: + type = MLX4_NET_TRANS_RULE_ID_IB; + mlx4_spec->ib.l3_qpn = + cpu_to_be32(qp_num); + mlx4_spec->ib.qpn_mask = + cpu_to_be32(MLX4_IB_FLOW_QPN_MASK); + break; + case IB_FLOW_SPEC_IPV4: type = MLX4_NET_TRANS_RULE_ID_IPV4; @@ -865,6 +878,115 @@ return mlx4_hw_rule_sz(dev, type); } +struct default_rules { + __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; + __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; + __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS]; + __u8 link_layer; +}; +static const struct default_rules default_table[] = { + { + .mandatory_fields = {IB_FLOW_SPEC_IPV4}, + .mandatory_not_fields = {IB_FLOW_SPEC_ETH}, + .rules_create_list = {IB_FLOW_SPEC_IB}, + .link_layer = IB_LINK_LAYER_INFINIBAND + } +}; + +static int __mlx4_ib_default_rules_match(struct ib_qp *qp, + struct ib_flow_attr *flow_attr) +{ + int i, j, k; + void *ib_flow; + const struct default_rules *pdefault_rules = default_table; + u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); + + for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++, + pdefault_rules++) { + __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; + memset(&field_types, 0, sizeof(field_types)); + + if (link_layer != pdefault_rules->link_layer) + continue; + + ib_flow = flow_attr + 1; + /* we assume the specs are sorted */ + for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS && + j < flow_attr->num_of_specs; k++) { + union ib_flow_spec *current_flow = + (union ib_flow_spec *)ib_flow; + + /* same layer but different type */ + if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) == + (pdefault_rules->mandatory_fields[k] & + IB_FLOW_SPEC_LAYER_MASK)) && + (current_flow->type != + pdefault_rules->mandatory_fields[k])) + goto out; + + /* same layer, try match next one */ + if (current_flow->type == + pdefault_rules->mandatory_fields[k]) { + j++; + ib_flow += + ((union ib_flow_spec *)ib_flow)->size; + } + } + + ib_flow = flow_attr + 1; + for (j = 0; j < flow_attr->num_of_specs; + j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size) + for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++) + /* same layer and same type */ + if (((union ib_flow_spec *)ib_flow)->type == + pdefault_rules->mandatory_not_fields[k]) + goto out; + + return i; + } +out: + return -1; +} + +static int __mlx4_ib_create_default_rules( + struct mlx4_ib_dev *mdev, + struct ib_qp *qp, + const struct default_rules *pdefault_rules, + struct _rule_hw *mlx4_spec) { + int size = 0; + int i; + + for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/ + sizeof(pdefault_rules->rules_create_list[0]); i++) { + int ret; + union ib_flow_spec ib_spec; + switch (pdefault_rules->rules_create_list[i]) { + case 0: + /* no rule */ + continue; + case IB_FLOW_SPEC_IB: + ib_spec.type = IB_FLOW_SPEC_IB; + ib_spec.size = sizeof(struct ib_flow_spec_ib); + + break; + default: + /* invalid rule */ + return -EINVAL; + } + /* We must put empty rule, qpn is being ignored */ + ret = parse_flow_attr(mdev->dev, 0, &ib_spec, + mlx4_spec); + if (ret < 0) { + pr_info("invalid parsing\n"); + return -EINVAL; + } + + mlx4_spec = (void *)mlx4_spec + ret; + size += ret; + } + return size; +} + static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain, enum mlx4_net_trans_promisc_mode flow_type, @@ -876,6 +998,7 @@ struct mlx4_ib_dev *mdev = to_mdev(qp->device); struct mlx4_cmd_mailbox *mailbox; struct mlx4_net_trans_rule_hw_ctrl *ctrl; + int default_flow; static const u16 __mlx4_domain[] = { [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS, @@ -910,8 +1033,21 @@ ib_flow = flow_attr + 1; size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); + /* Add default flows */ + default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); + if (default_flow >= 0) { + ret = __mlx4_ib_create_default_rules( + mdev, qp, default_table + default_flow, + mailbox->buf + size); + if (ret < 0) { + mlx4_free_cmd_mailbox(mdev->dev, mailbox); + return -EINVAL; + } + size += ret; + } for (i = 0; i < flow_attr->num_of_specs; i++) { - ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size); + ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, + mailbox->buf + size); if (ret < 0) { mlx4_free_cmd_mailbox(mdev->dev, mailbox); return -EINVAL; @@ -1025,6 +1161,8 @@ struct mlx4_ib_qp *mqp = to_mqp(ibqp); u64 reg_id; struct mlx4_ib_steering *ib_steering = NULL; + enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? + MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { @@ -1036,7 +1174,7 @@ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), - MLX4_PROT_IB_IPV6, ®_id); + prot, ®_id); if (err) goto err_malloc; @@ -1055,7 +1193,7 @@ err_add: mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, - MLX4_PROT_IB_IPV6, reg_id); + prot, reg_id); err_malloc: kfree(ib_steering); @@ -1083,10 +1221,11 @@ int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_ib_qp *mqp = to_mqp(ibqp); - u8 mac[6]; struct net_device *ndev; struct mlx4_ib_gid_entry *ge; u64 reg_id = 0; + enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? + MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { @@ -1109,7 +1248,7 @@ } err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, - MLX4_PROT_IB_IPV6, reg_id); + prot, reg_id); if (err) return err; @@ -1121,13 +1260,8 @@ if (ndev) dev_hold(ndev); spin_unlock(&mdev->iboe.lock); - rdma_get_mcast_mac((struct in6_addr *)gid, mac); - if (ndev) { - rtnl_lock(); - dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac); - rtnl_unlock(); + if (ndev) dev_put(ndev); - } list_del(&ge->list); kfree(ge); } else @@ -1223,7 +1357,8 @@ &dev_attr_board_id }; -static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev) +static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, + struct net_device *dev) { memcpy(eui, dev->dev_addr, 3); memcpy(eui + 5, dev->dev_addr + 3, 3); @@ -1259,161 +1394,377 @@ MLX4_CMD_WRAPPED); if (err) pr_warn("set port command failed\n"); - else { - memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); + else mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); + + mlx4_free_cmd_mailbox(dev, mailbox); + kfree(gw); +} + +static void reset_gids_task(struct work_struct *work) +{ + struct update_gid_work *gw = + container_of(work, struct update_gid_work, work); + struct mlx4_cmd_mailbox *mailbox; + union ib_gid *gids; + int err; + struct mlx4_dev *dev = gw->dev->dev; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) { + pr_warn("reset gid table failed\n"); + goto free; + } + + gids = mailbox->buf; + memcpy(gids, gw->gids, sizeof(gw->gids)); + + if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) == + IB_LINK_LAYER_ETHERNET) { + err = mlx4_cmd(dev, mailbox->dma, + MLX4_SET_PORT_GID_TABLE << 8 | gw->port, + 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + pr_warn(KERN_WARNING + "set port %d command failed\n", gw->port); } mlx4_free_cmd_mailbox(dev, mailbox); +free: kfree(gw); } -static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) +static int update_gid_table(struct mlx4_ib_dev *dev, int port, + union ib_gid *gid, int clear, + int default_gid) { - struct net_device *ndev = dev->iboe.netdevs[port - 1]; struct update_gid_work *work; - struct net_device *tmp; int i; - u8 *hits; - int ret; - union ib_gid gid; - int free; - int found; int need_update = 0; - u16 vid; + int free = -1; + int found = -1; + int max_gids; - work = kzalloc(sizeof *work, GFP_ATOMIC); - if (!work) - return -ENOMEM; - - hits = kzalloc(128, GFP_ATOMIC); - if (!hits) { - ret = -ENOMEM; - goto out; - } - - rcu_read_lock(); - for_each_netdev_rcu(&init_net, tmp) { - if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { - gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); - vid = rdma_vlan_dev_vlan_id(tmp); - mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev); - found = 0; - free = -1; - for (i = 0; i < 128; ++i) { - if (free < 0 && - !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) - free = i; - if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) { - hits[i] = 1; - found = 1; + if (default_gid) { + free = 0; + } else { + max_gids = dev->dev->caps.gid_table_len[port]; + for (i = 1; i < max_gids; ++i) { + if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid, + sizeof(*gid))) + found = i; + + if (clear) { + if (found >= 0) { + need_update = 1; + dev->iboe.gid_table[port - 1][found] = + zgid; break; } - } + } else { + if (found >= 0) + break; - if (!found) { - if (tmp == ndev && - (memcmp(&dev->iboe.gid_table[port - 1][0], - &gid, sizeof gid) || - !memcmp(&dev->iboe.gid_table[port - 1][0], - &zgid, sizeof gid))) { - dev->iboe.gid_table[port - 1][0] = gid; - ++need_update; - hits[0] = 1; - } else if (free >= 0) { - dev->iboe.gid_table[port - 1][free] = gid; - hits[free] = 1; - ++need_update; - } + if (free < 0 && + !memcmp(&dev->iboe.gid_table[port - 1][i], + &zgid, sizeof(*gid))) + free = i; } } } - rcu_read_unlock(); - for (i = 0; i < 128; ++i) - if (!hits[i]) { - if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) - ++need_update; - dev->iboe.gid_table[port - 1][i] = zgid; - } + if (found == -1 && !clear && free >= 0) { + dev->iboe.gid_table[port - 1][free] = *gid; + need_update = 1; + } - if (need_update) { - memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids); - INIT_WORK(&work->work, update_gids_task); - work->port = port; - work->dev = dev; - queue_work(wq, &work->work); - } else - kfree(work); + if (!need_update) + return 0; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; + + memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids)); + INIT_WORK(&work->work, update_gids_task); + work->port = port; + work->dev = dev; + queue_work(wq, &work->work); - kfree(hits); return 0; +} -out: - kfree(work); - return ret; +static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid) +{ + gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); + mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev); } -static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) + +static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port) { - switch (event) { - case NETDEV_UP: - case NETDEV_CHANGEADDR: - update_ipv6_gids(dev, port, 0); - break; + struct update_gid_work *work; - case NETDEV_DOWN: - update_ipv6_gids(dev, port, 1); - dev->iboe.netdevs[port - 1] = NULL; - } + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; + + memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids)); + memset(work->gids, 0, sizeof(work->gids)); + INIT_WORK(&work->work, reset_gids_task); + work->dev = dev; + work->port = port; + queue_work(wq, &work->work); + return 0; } -static void netdev_added(struct mlx4_ib_dev *dev, int port) +static int mlx4_ib_addr_event(int event, struct net_device *event_netdev, + struct mlx4_ib_dev *ibdev, union ib_gid *gid) { - update_ipv6_gids(dev, port, 0); + struct mlx4_ib_iboe *iboe; + int port = 0; + struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ? + rdma_vlan_dev_real_dev(event_netdev) : + event_netdev; + union ib_gid default_gid; + + mlx4_make_default_gid(real_dev, &default_gid); + + if (!memcmp(gid, &default_gid, sizeof(*gid))) + return 0; + + if (event != NETDEV_DOWN && event != NETDEV_UP) + return 0; + + if ((real_dev != event_netdev) && + (event == NETDEV_DOWN) && + rdma_link_local_addr((struct in6_addr *)gid)) + return 0; + + iboe = &ibdev->iboe; + spin_lock(&iboe->lock); + + for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) + if ((netif_is_bond_master(real_dev) && + (real_dev == iboe->masters[port - 1])) || + (!netif_is_bond_master(real_dev) && + (real_dev == iboe->netdevs[port - 1]))) + update_gid_table(ibdev, port, gid, + event == NETDEV_DOWN, 0); + + spin_unlock(&iboe->lock); + return 0; + } -static void netdev_removed(struct mlx4_ib_dev *dev, int port) +static u8 mlx4_ib_get_dev_port(struct net_device *dev, + struct mlx4_ib_dev *ibdev) { - update_ipv6_gids(dev, port, 1); + u8 port = 0; + struct mlx4_ib_iboe *iboe; + struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ? + rdma_vlan_dev_real_dev(dev) : dev; + + iboe = &ibdev->iboe; + + for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) + if ((netif_is_bond_master(real_dev) && + (real_dev == iboe->masters[port - 1])) || + (!netif_is_bond_master(real_dev) && + (real_dev == iboe->netdevs[port - 1]))) + break; + + if ((port == 0) || (port > ibdev->dev->caps.num_ports)) + return 0; + else + return port; } -static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, +static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct mlx4_ib_dev *ibdev; + struct in_ifaddr *ifa = ptr; + union ib_gid gid; + struct net_device *event_netdev = ifa->ifa_dev->dev; + + ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid); + + ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet); + + mlx4_ib_addr_event(event, event_netdev, ibdev, &gid); + return NOTIFY_DONE; +} + +#if IS_ENABLED(CONFIG_IPV6) +static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct mlx4_ib_dev *ibdev; - struct net_device *oldnd; + struct inet6_ifaddr *ifa = ptr; + union ib_gid *gid = (union ib_gid *)&ifa->addr; + struct net_device *event_netdev = ifa->idev->dev; + + ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6); + + mlx4_ib_addr_event(event, event_netdev, ibdev, gid); + return NOTIFY_DONE; +} +#endif + +static void mlx4_ib_get_dev_addr(struct net_device *dev, + struct mlx4_ib_dev *ibdev, u8 port) +{ + struct in_device *in_dev; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_dev *in6_dev; + union ib_gid *pgid; + struct inet6_ifaddr *ifp; +#endif + union ib_gid gid; + + + if ((port == 0) || (port > ibdev->dev->caps.num_ports)) + return; + + /* IPv4 gids */ + in_dev = in_dev_get(dev); + if (in_dev) { + for_ifa(in_dev) { + /*ifa->ifa_address;*/ + ipv6_addr_set_v4mapped(ifa->ifa_address, + (struct in6_addr *)&gid); + update_gid_table(ibdev, port, &gid, 0, 0); + } + endfor_ifa(in_dev); + in_dev_put(in_dev); + } +#if IS_ENABLED(CONFIG_IPV6) + /* IPv6 gids */ + in6_dev = in6_dev_get(dev); + if (in6_dev) { + read_lock_bh(&in6_dev->lock); + list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { + pgid = (union ib_gid *)&ifp->addr; + update_gid_table(ibdev, port, pgid, 0, 0); + } + read_unlock_bh(&in6_dev->lock); + in6_dev_put(in6_dev); + } +#endif +} + +static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev, + struct net_device *dev, u8 port) +{ + union ib_gid gid; + mlx4_make_default_gid(dev, &gid); + update_gid_table(ibdev, port, &gid, 0, 1); +} + +static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) +{ + struct net_device *dev; + struct mlx4_ib_iboe *iboe = &ibdev->iboe; + int i; + + for (i = 1; i <= ibdev->num_ports; ++i) + if (reset_gid_table(ibdev, i)) + return -1; + + read_lock(&dev_base_lock); + spin_lock(&iboe->lock); + + for_each_netdev(&init_net, dev) { + u8 port = mlx4_ib_get_dev_port(dev, ibdev); + if (port) + mlx4_ib_get_dev_addr(dev, ibdev, port); + } + + spin_unlock(&iboe->lock); + read_unlock(&dev_base_lock); + + return 0; +} + +static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) +{ struct mlx4_ib_iboe *iboe; int port; - if (!net_eq(dev_net(dev), &init_net)) - return NOTIFY_DONE; - - ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); iboe = &ibdev->iboe; spin_lock(&iboe->lock); mlx4_foreach_ib_transport_port(port, ibdev->dev) { - oldnd = iboe->netdevs[port - 1]; + enum ib_port_state port_state = IB_PORT_NOP; + struct net_device *old_master = iboe->masters[port - 1]; + struct net_device *curr_netdev; + struct net_device *curr_master; + iboe->netdevs[port - 1] = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); - if (oldnd != iboe->netdevs[port - 1]) { - if (iboe->netdevs[port - 1]) - netdev_added(ibdev, port); - else - netdev_removed(ibdev, port); + if (iboe->netdevs[port - 1]) + mlx4_ib_set_default_gid(ibdev, + iboe->netdevs[port - 1], port); + curr_netdev = iboe->netdevs[port - 1]; + + if (iboe->netdevs[port - 1] && + netif_is_bond_slave(iboe->netdevs[port - 1])) { + iboe->masters[port - 1] = netdev_master_upper_dev_get( + iboe->netdevs[port - 1]); + } else { + iboe->masters[port - 1] = NULL; } - } + curr_master = iboe->masters[port - 1]; - if (dev == iboe->netdevs[0] || - (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0])) - handle_en_event(ibdev, 1, event); - else if (dev == iboe->netdevs[1] - || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1])) - handle_en_event(ibdev, 2, event); + if (curr_netdev) { + port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; + mlx4_ib_set_default_gid(ibdev, curr_netdev, port); + } else { + reset_gid_table(ibdev, port); + } + /* if using bonding/team and a slave port is down, we don't the bond IP + * based gids in the table since flows that select port by gid may get + * the down port. + */ + if (curr_master && (port_state == IB_PORT_DOWN)) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, curr_netdev, port); + } + /* if bonding is used it is possible that we add it to masters + * only after IP address is assigned to the net bonding + * interface. + */ + if (curr_master && (old_master != curr_master)) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, curr_netdev, port); + mlx4_ib_get_dev_addr(curr_master, ibdev, port); + } + + if (!curr_master && (old_master != curr_master)) { + reset_gid_table(ibdev, port); + mlx4_ib_set_default_gid(ibdev, curr_netdev, port); + mlx4_ib_get_dev_addr(curr_netdev, ibdev, port); + } + } spin_unlock(&iboe->lock); +} + +static int mlx4_ib_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct mlx4_ib_dev *ibdev; + + if (!net_eq(dev_net(dev), &init_net)) + return NOTIFY_DONE; + + ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); + mlx4_ib_scan_netdevs(ibdev); return NOTIFY_DONE; } @@ -1533,17 +1884,10 @@ int i, j; int err; struct mlx4_ib_iboe *iboe; + int ib_num_ports = 0; pr_info_once("%s", mlx4_ib_version); - mlx4_foreach_non_ib_transport_port(i, dev) - num_ports++; - - if (mlx4_is_mfunc(dev) && num_ports) { - dev_err(&dev->pdev->dev, "RoCE is not supported over SRIOV as yet\n"); - return NULL; - } - num_ports = 0; mlx4_foreach_ib_transport_port(i, dev) num_ports++; @@ -1682,6 +2026,7 @@ } if (check_flow_steering_support(dev)) { + ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; ibdev->ib_dev.create_flow = mlx4_ib_create_flow; ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; @@ -1707,11 +2052,42 @@ ibdev->counters[i] = -1; } + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_num_ports++; + spin_lock_init(&ibdev->sm_lock); mutex_init(&ibdev->cap_mask_mutex); + if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && + ib_num_ports) { + ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; + err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, + MLX4_IB_UC_STEER_QPN_ALIGN, + &ibdev->steer_qpn_base); + if (err) + goto err_counter; + + ibdev->ib_uc_qpns_bitmap = + kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) * + sizeof(long), + GFP_KERNEL); + if (!ibdev->ib_uc_qpns_bitmap) { + dev_err(&dev->pdev->dev, "bit map alloc failed\n"); + goto err_steer_qp_release; + } + + bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count); + + err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( + dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_base + + ibdev->steer_qpn_count - 1); + if (err) + goto err_steer_free_bitmap; + } + if (ib_register_device(&ibdev->ib_dev, NULL)) - goto err_counter; + goto err_steer_free_bitmap; if (mlx4_ib_mad_init(ibdev)) goto err_reg; @@ -1719,11 +2095,39 @@ if (mlx4_ib_init_sriov(ibdev)) goto err_mad; - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) { - iboe->nb.notifier_call = mlx4_ib_netdev_event; - err = register_netdevice_notifier(&iboe->nb); - if (err) - goto err_sriov; + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) { + if (!iboe->nb.notifier_call) { + iboe->nb.notifier_call = mlx4_ib_netdev_event; + err = register_netdevice_notifier(&iboe->nb); + if (err) { + iboe->nb.notifier_call = NULL; + goto err_notif; + } + } + if (!iboe->nb_inet.notifier_call) { + iboe->nb_inet.notifier_call = mlx4_ib_inet_event; + err = register_inetaddr_notifier(&iboe->nb_inet); + if (err) { + iboe->nb_inet.notifier_call = NULL; + goto err_notif; + } + } +#if IS_ENABLED(CONFIG_IPV6) + if (!iboe->nb_inet6.notifier_call) { + iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event; + err = register_inet6addr_notifier(&iboe->nb_inet6); + if (err) { + iboe->nb_inet6.notifier_call = NULL; + goto err_notif; + } + } +#endif + for (i = 1 ; i <= ibdev->num_ports ; ++i) + reset_gid_table(ibdev, i); + rtnl_lock(); + mlx4_ib_scan_netdevs(ibdev); + rtnl_unlock(); + mlx4_ib_init_gid_table(ibdev); } for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) { @@ -1749,11 +2153,25 @@ return ibdev; err_notif: - if (unregister_netdevice_notifier(&ibdev->iboe.nb)) - pr_warn("failure unregistering notifier\n"); + if (ibdev->iboe.nb.notifier_call) { + if (unregister_netdevice_notifier(&ibdev->iboe.nb)) + pr_warn("failure unregistering notifier\n"); + ibdev->iboe.nb.notifier_call = NULL; + } + if (ibdev->iboe.nb_inet.notifier_call) { + if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet)) + pr_warn("failure unregistering notifier\n"); + ibdev->iboe.nb_inet.notifier_call = NULL; + } +#if IS_ENABLED(CONFIG_IPV6) + if (ibdev->iboe.nb_inet6.notifier_call) { + if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6)) + pr_warn("failure unregistering notifier\n"); + ibdev->iboe.nb_inet6.notifier_call = NULL; + } +#endif flush_workqueue(wq); -err_sriov: mlx4_ib_close_sriov(ibdev); err_mad: @@ -1762,6 +2180,13 @@ err_reg: ib_unregister_device(&ibdev->ib_dev); +err_steer_free_bitmap: + kfree(ibdev->ib_uc_qpns_bitmap); + +err_steer_qp_release: + if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); err_counter: for (; i; --i) if (ibdev->counters[i - 1] != -1) @@ -1782,6 +2207,69 @@ return NULL; } +int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) +{ + int offset; + + WARN_ON(!dev->ib_uc_qpns_bitmap); + + offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap, + dev->steer_qpn_count, + get_count_order(count)); + if (offset < 0) + return offset; + + *qpn = dev->steer_qpn_base + offset; + return 0; +} + +void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count) +{ + if (!qpn || + dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED) + return; + + BUG_ON(qpn < dev->steer_qpn_base); + + bitmap_release_region(dev->ib_uc_qpns_bitmap, + qpn - dev->steer_qpn_base, + get_count_order(count)); +} + +int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, + int is_attach) +{ + int err; + size_t flow_size; + struct ib_flow_attr *flow = NULL; + struct ib_flow_spec_ib *ib_spec; + + if (is_attach) { + flow_size = sizeof(struct ib_flow_attr) + + sizeof(struct ib_flow_spec_ib); + flow = kzalloc(flow_size, GFP_KERNEL); + if (!flow) + return -ENOMEM; + flow->port = mqp->port; + flow->num_of_specs = 1; + flow->size = flow_size; + ib_spec = (struct ib_flow_spec_ib *)(flow + 1); + ib_spec->type = IB_FLOW_SPEC_IB; + ib_spec->size = sizeof(struct ib_flow_spec_ib); + /* Add an empty rule for IB L2 */ + memset(&ib_spec->mask, 0, sizeof(ib_spec->mask)); + + err = __mlx4_ib_create_flow(&mqp->ibqp, flow, + IB_FLOW_DOMAIN_NIC, + MLX4_FS_REGULAR, + &mqp->reg_id); + } else { + err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); + } + kfree(flow); + return err; +} + static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) { struct mlx4_ib_dev *ibdev = ibdev_ptr; @@ -1795,6 +2283,26 @@ pr_warn("failure unregistering notifier\n"); ibdev->iboe.nb.notifier_call = NULL; } + + if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_qp_release_range(dev, ibdev->steer_qpn_base, + ibdev->steer_qpn_count); + kfree(ibdev->ib_uc_qpns_bitmap); + } + + if (ibdev->iboe.nb_inet.notifier_call) { + if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet)) + pr_warn("failure unregistering notifier\n"); + ibdev->iboe.nb_inet.notifier_call = NULL; + } +#if IS_ENABLED(CONFIG_IPV6) + if (ibdev->iboe.nb_inet6.notifier_call) { + if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6)) + pr_warn("failure unregistering notifier\n"); + ibdev->iboe.nb_inet6.notifier_call = NULL; + } +#endif + iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) if (ibdev->counters[p] != -1) @@ -1815,17 +2323,24 @@ struct mlx4_dev *dev = ibdev->dev; int i; unsigned long flags; + struct mlx4_active_ports actv_ports; + unsigned int ports; + unsigned int first_port; if (!mlx4_is_master(dev)) return; - dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC); + actv_ports = mlx4_get_active_ports(dev, slave); + ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports); + first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); + + dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); if (!dm) { pr_err("failed to allocate memory for tunneling qp update\n"); goto out; } - for (i = 0; i < dev->caps.num_ports; i++) { + for (i = 0; i < ports; i++) { dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); if (!dm[i]) { pr_err("failed to allocate memory for tunneling qp update work struct\n"); @@ -1837,9 +2352,9 @@ } } /* initialize or tear down tunnel QPs for the slave */ - for (i = 0; i < dev->caps.num_ports; i++) { + for (i = 0; i < ports; i++) { INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); - dm[i]->port = i + 1; + dm[i]->port = first_port + i + 1; dm[i]->slave = slave; dm[i]->do_init = do_init; dm[i]->dev = ibdev; --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/mcg.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/mcg.c @@ -215,8 +215,9 @@ } mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); spin_unlock(&dev->sm_lock); - return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port, - IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, mad); + return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), + ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY, + &ah_attr, NULL, mad); } static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx, --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ linux-3.13.0/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -68,6 +68,8 @@ /*module param to indicate if SM assigns the alias_GUID*/ extern int mlx4_ib_sm_guid_assign; +#define MLX4_IB_UC_STEER_QPN_ALIGN 1 +#define MLX4_IB_UC_MAX_NUM_QPS 256 struct mlx4_ib_ucontext { struct ib_ucontext ibucontext; struct mlx4_uar uar; @@ -153,6 +155,7 @@ enum mlx4_ib_qp_flags { MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, + MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30, MLX4_IB_SRIOV_SQP = 1 << 31, }; @@ -238,6 +241,22 @@ struct mlx4_rcv_tunnel_hdr tun; } __packed; +struct mlx4_roce_smac_vlan_info { + u64 smac; + int smac_index; + int smac_port; + u64 candidate_smac; + int candidate_smac_index; + int candidate_smac_port; + u16 vid; + int vlan_index; + int vlan_port; + u16 candidate_vid; + int candidate_vlan_index; + int candidate_vlan_port; + int update_vid; +}; + struct mlx4_ib_qp { struct ib_qp ibqp; struct mlx4_qp mqp; @@ -270,7 +289,9 @@ struct list_head gid_list; struct list_head steering_rules; struct mlx4_ib_buf *sqp_proxy_rcv; - + struct mlx4_roce_smac_vlan_info pri; + struct mlx4_roce_smac_vlan_info alt; + u64 reg_id; }; struct mlx4_ib_srq { @@ -428,7 +449,10 @@ struct mlx4_ib_iboe { spinlock_t lock; struct net_device *netdevs[MLX4_MAX_PORTS]; + struct net_device *masters[MLX4_MAX_PORTS]; struct notifier_block nb; + struct notifier_block nb_inet; + struct notifier_block nb_inet6; union ib_gid gid_table[MLX4_MAX_PORTS][128]; }; @@ -494,6 +518,10 @@ struct kobject *dev_ports_parent[MLX4_MFUNC_MAX]; struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS]; struct pkey_mgt pkeys; + unsigned long *ib_uc_qpns_bitmap; + int steer_qpn_count; + int steer_qpn_base; + int steering_support; }; struct ib_event_work { @@ -675,9 +703,6 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid, int netw_view); -int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, - u8 *mac, int *is_mcast, u8 port); - static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) { u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; @@ -712,9 +737,12 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type qpt, struct ib_wc *wc, struct ib_grh *grh, struct ib_mad *mad); + int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, - u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad); + u32 qkey, struct ib_ah_attr *attr, u8 *s_mac, + struct ib_mad *mad); + __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx); int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, @@ -752,5 +780,9 @@ __be64 mlx4_ib_gen_node_guid(void); +int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn); +void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count); +int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, + int is_attach); #endif /* MLX4_IB_H */ --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/qp.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/qp.c @@ -90,6 +90,21 @@ MLX4_RAW_QP_MSGMAX = 31, }; +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif +static inline u64 mlx4_mac_to_u64(u8 *addr) +{ + u64 mac = 0; + int i; + + for (i = 0; i < ETH_ALEN; i++) { + mac <<= 8; + mac |= addr[i]; + } + return mac; +} + static const __be32 mlx4_ib_opcode[] = { [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), @@ -647,10 +662,14 @@ if (!sqp) return -ENOMEM; qp = &sqp->qp; + qp->pri.vid = 0xFFFF; + qp->alt.vid = 0xFFFF; } else { qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL); if (!qp) return -ENOMEM; + qp->pri.vid = 0xFFFF; + qp->alt.vid = 0xFFFF; } } else qp = *caller_qp; @@ -716,6 +735,14 @@ if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) qp->flags |= MLX4_IB_QP_LSO; + if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { + if (dev->steering_support == + MLX4_STEERING_MODE_DEVICE_MANAGED) + qp->flags |= MLX4_IB_QP_NETIF; + else + goto err; + } + err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); if (err) goto err; @@ -765,7 +792,11 @@ if (init_attr->qp_type == IB_QPT_RAW_PACKET) err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); else - err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn); + if (qp->flags & MLX4_IB_QP_NETIF) + err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); + else + err = mlx4_qp_reserve_range(dev->dev, 1, 1, + &qpn); if (err) goto err_proxy; } @@ -790,8 +821,12 @@ return 0; err_qpn: - if (!sqpn) - mlx4_qp_release_range(dev->dev, qpn, 1); + if (!sqpn) { + if (qp->flags & MLX4_IB_QP_NETIF) + mlx4_ib_steer_qp_free(dev, qpn, 1); + else + mlx4_qp_release_range(dev->dev, qpn, 1); + } err_proxy: if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) free_proxy_bufs(pd->device, qp); @@ -909,11 +944,32 @@ { struct mlx4_ib_cq *send_cq, *recv_cq; - if (qp->state != IB_QPS_RESET) + if (qp->state != IB_QPS_RESET) { if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) pr_warn("modify QP %06x to RESET failed.\n", qp->mqp.qpn); + if (qp->pri.smac) { + mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); + qp->pri.smac = 0; + } + if (qp->alt.smac) { + mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); + qp->alt.smac = 0; + } + if (qp->pri.vid < 0x1000) { + mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); + qp->pri.vid = 0xFFFF; + qp->pri.candidate_vid = 0xFFFF; + qp->pri.update_vid = 0; + } + if (qp->alt.vid < 0x1000) { + mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); + qp->alt.vid = 0xFFFF; + qp->alt.candidate_vid = 0xFFFF; + qp->alt.update_vid = 0; + } + } get_cqs(qp, &send_cq, &recv_cq); @@ -932,8 +988,12 @@ mlx4_qp_free(dev->dev, &qp->mqp); - if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) - mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); + if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { + if (qp->flags & MLX4_IB_QP_NETIF) + mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); + else + mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); + } mlx4_mtt_cleanup(dev->dev, &qp->mtt); @@ -987,9 +1047,16 @@ */ if (init_attr->create_flags & ~(MLX4_IB_QP_LSO | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | - MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP)) + MLX4_IB_SRIOV_TUNNEL_QP | + MLX4_IB_SRIOV_SQP | + MLX4_IB_QP_NETIF)) return ERR_PTR(-EINVAL); + if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { + if (init_attr->qp_type != IB_QPT_UD) + return ERR_PTR(-EINVAL); + } + if (init_attr->create_flags && (udata || ((init_attr->create_flags & ~MLX4_IB_SRIOV_SQP) && @@ -1015,6 +1082,8 @@ qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); + qp->pri.vid = 0xFFFF; + qp->alt.vid = 0xFFFF; /* fall through */ case IB_QPT_UD: { @@ -1144,16 +1213,16 @@ path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); } -static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, - struct mlx4_qp_path *path, u8 port) +static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, + u64 smac, u16 vlan_tag, struct mlx4_qp_path *path, + struct mlx4_roce_smac_vlan_info *smac_info, u8 port) { - int err; int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_ETHERNET; - u8 mac[6]; - int is_mcast; - u16 vlan_tag; int vidx; + int smac_index; + int err; + path->grh_mylmc = ah->src_path_bits & 0x7f; path->rlid = cpu_to_be16(ah->dlid); @@ -1182,36 +1251,105 @@ } if (is_eth) { - path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | - ((port - 1) << 6) | ((ah->sl & 7) << 3); - if (!(ah->ah_flags & IB_AH_GRH)) return -1; - err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); - if (err) - return err; - - memcpy(path->dmac, mac, 6); - path->ackto = MLX4_IB_LINK_TYPE_ETH; - /* use index 0 into MAC table for IBoE */ - path->grh_mylmc &= 0x80; + path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | + ((port - 1) << 6) | ((ah->sl & 7) << 3); - vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); + path->feup |= MLX4_FEUP_FORCE_ETH_UP; if (vlan_tag < 0x1000) { - if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) - return -ENOENT; - - path->vlan_index = vidx; + if (smac_info->vid < 0x1000) { + /* both valid vlan ids */ + if (smac_info->vid != vlan_tag) { + /* different VIDs. unreg old and reg new */ + err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); + if (err) + return err; + smac_info->candidate_vid = vlan_tag; + smac_info->candidate_vlan_index = vidx; + smac_info->candidate_vlan_port = port; + smac_info->update_vid = 1; + path->vlan_index = vidx; + } else { + path->vlan_index = smac_info->vlan_index; + } + } else { + /* no current vlan tag in qp */ + err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); + if (err) + return err; + smac_info->candidate_vid = vlan_tag; + smac_info->candidate_vlan_index = vidx; + smac_info->candidate_vlan_port = port; + smac_info->update_vid = 1; + path->vlan_index = vidx; + } + path->feup |= MLX4_FVL_FORCE_ETH_VLAN; path->fl = 1 << 6; + } else { + /* have current vlan tag. unregister it at modify-qp success */ + if (smac_info->vid < 0x1000) { + smac_info->candidate_vid = 0xFFFF; + smac_info->update_vid = 1; + } } - } else + + /* get smac_index for RoCE use. + * If no smac was yet assigned, register one. + * If one was already assigned, but the new mac differs, + * unregister the old one and register the new one. + */ + if (!smac_info->smac || smac_info->smac != smac) { + /* register candidate now, unreg if needed, after success */ + smac_index = mlx4_register_mac(dev->dev, port, smac); + if (smac_index >= 0) { + smac_info->candidate_smac_index = smac_index; + smac_info->candidate_smac = smac; + smac_info->candidate_smac_port = port; + } else { + return -EINVAL; + } + } else { + smac_index = smac_info->smac_index; + } + + memcpy(path->dmac, ah->dmac, 6); + path->ackto = MLX4_IB_LINK_TYPE_ETH; + /* put MAC table smac index for IBoE */ + path->grh_mylmc = (u8) (smac_index) | 0x80; + } else { path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((port - 1) << 6) | ((ah->sl & 0xf) << 2); + } return 0; } +static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, + enum ib_qp_attr_mask qp_attr_mask, + struct mlx4_ib_qp *mqp, + struct mlx4_qp_path *path, u8 port) +{ + return _mlx4_set_path(dev, &qp->ah_attr, + mlx4_mac_to_u64((u8 *)qp->smac), + (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff, + path, &mqp->pri, port); +} + +static int mlx4_set_alt_path(struct mlx4_ib_dev *dev, + const struct ib_qp_attr *qp, + enum ib_qp_attr_mask qp_attr_mask, + struct mlx4_ib_qp *mqp, + struct mlx4_qp_path *path, u8 port) +{ + return _mlx4_set_path(dev, &qp->alt_ah_attr, + mlx4_mac_to_u64((u8 *)qp->alt_smac), + (qp_attr_mask & IB_QP_ALT_VID) ? + qp->alt_vlan_id : 0xffff, + path, &mqp->alt, port); +} + static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; @@ -1224,6 +1362,37 @@ } } +static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, + struct mlx4_qp_context *context) +{ + struct net_device *ndev; + u64 u64_mac; + int smac_index; + + + ndev = dev->iboe.netdevs[qp->port - 1]; + if (ndev) { + smac = ndev->dev_addr; + u64_mac = mlx4_mac_to_u64(smac); + } else { + u64_mac = dev->dev->caps.def_mac[qp->port]; + } + + context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); + if (!qp->pri.smac) { + smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); + if (smac_index >= 0) { + qp->pri.candidate_smac_index = smac_index; + qp->pri.candidate_smac = u64_mac; + qp->pri.candidate_smac_port = qp->port; + context->pri_path.grh_mylmc = 0x80 | (u8) smac_index; + } else { + return -ENOENT; + } + } + return 0; +} + static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) @@ -1235,6 +1404,7 @@ struct mlx4_qp_context *context; enum mlx4_qp_optpar optpar = 0; int sqd_event; + int steer_qp = 0; int err = -EINVAL; context = kzalloc(sizeof *context, GFP_KERNEL); @@ -1319,6 +1489,11 @@ optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; } else context->pri_path.counter_index = 0xff; + + if (qp->flags & MLX4_IB_QP_NETIF) { + mlx4_ib_steer_qp_reg(dev, qp, 1); + steer_qp = 1; + } } if (attr_mask & IB_QP_PKEY_INDEX) { @@ -1329,7 +1504,7 @@ } if (attr_mask & IB_QP_AV) { - if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, + if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) goto out; @@ -1352,8 +1527,9 @@ dev->dev->caps.pkey_table_len[attr->alt_port_num]) goto out; - if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, - attr->alt_port_num)) + if (mlx4_set_alt_path(dev, attr, attr_mask, qp, + &context->alt_path, + attr->alt_port_num)) goto out; context->alt_path.pkey_index = attr->alt_pkey_index; @@ -1458,12 +1634,37 @@ context->pri_path.fl = 0x80; context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; } + if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == + IB_LINK_LAYER_ETHERNET) { + if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || + qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) + context->pri_path.feup = 1 << 7; /* don't fsm */ + /* handle smac_index */ + if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || + qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || + qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { + err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); + if (err) + return -EINVAL; + } + } } if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | MLX4_IB_LINK_TYPE_ETH; + if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { + int is_eth = rdma_port_get_link_layer( + &dev->ib_dev, qp->port) == + IB_LINK_LAYER_ETHERNET; + if (is_eth) { + context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH; + optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH; + } + } + + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; @@ -1534,23 +1735,113 @@ * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ - if (new_state == IB_QPS_RESET && !ibqp->uobject) { - mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, - ibqp->srq ? to_msrq(ibqp->srq): NULL); - if (send_cq != recv_cq) - mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); - - qp->rq.head = 0; - qp->rq.tail = 0; - qp->sq.head = 0; - qp->sq.tail = 0; - qp->sq_next_wqe = 0; - if (qp->rq.wqe_cnt) - *qp->db.db = 0; + if (new_state == IB_QPS_RESET) { + if (!ibqp->uobject) { + mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, + ibqp->srq ? to_msrq(ibqp->srq) : NULL); + if (send_cq != recv_cq) + mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); + + qp->rq.head = 0; + qp->rq.tail = 0; + qp->sq.head = 0; + qp->sq.tail = 0; + qp->sq_next_wqe = 0; + if (qp->rq.wqe_cnt) + *qp->db.db = 0; + + if (qp->flags & MLX4_IB_QP_NETIF) + mlx4_ib_steer_qp_reg(dev, qp, 0); + } + if (qp->pri.smac) { + mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); + qp->pri.smac = 0; + } + if (qp->alt.smac) { + mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); + qp->alt.smac = 0; + } + if (qp->pri.vid < 0x1000) { + mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); + qp->pri.vid = 0xFFFF; + qp->pri.candidate_vid = 0xFFFF; + qp->pri.update_vid = 0; + } + + if (qp->alt.vid < 0x1000) { + mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); + qp->alt.vid = 0xFFFF; + qp->alt.candidate_vid = 0xFFFF; + qp->alt.update_vid = 0; + } } - out: + if (err && steer_qp) + mlx4_ib_steer_qp_reg(dev, qp, 0); kfree(context); + if (qp->pri.candidate_smac) { + if (err) { + mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); + } else { + if (qp->pri.smac) + mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); + qp->pri.smac = qp->pri.candidate_smac; + qp->pri.smac_index = qp->pri.candidate_smac_index; + qp->pri.smac_port = qp->pri.candidate_smac_port; + } + qp->pri.candidate_smac = 0; + qp->pri.candidate_smac_index = 0; + qp->pri.candidate_smac_port = 0; + } + if (qp->alt.candidate_smac) { + if (err) { + mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); + } else { + if (qp->alt.smac) + mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); + qp->alt.smac = qp->alt.candidate_smac; + qp->alt.smac_index = qp->alt.candidate_smac_index; + qp->alt.smac_port = qp->alt.candidate_smac_port; + } + qp->alt.candidate_smac = 0; + qp->alt.candidate_smac_index = 0; + qp->alt.candidate_smac_port = 0; + } + + if (qp->pri.update_vid) { + if (err) { + if (qp->pri.candidate_vid < 0x1000) + mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, + qp->pri.candidate_vid); + } else { + if (qp->pri.vid < 0x1000) + mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, + qp->pri.vid); + qp->pri.vid = qp->pri.candidate_vid; + qp->pri.vlan_port = qp->pri.candidate_vlan_port; + qp->pri.vlan_index = qp->pri.candidate_vlan_index; + } + qp->pri.candidate_vid = 0xFFFF; + qp->pri.update_vid = 0; + } + + if (qp->alt.update_vid) { + if (err) { + if (qp->alt.candidate_vid < 0x1000) + mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, + qp->alt.candidate_vid); + } else { + if (qp->alt.vid < 0x1000) + mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, + qp->alt.vid); + qp->alt.vid = qp->alt.candidate_vid; + qp->alt.vlan_port = qp->alt.candidate_vlan_port; + qp->alt.vlan_index = qp->alt.candidate_vlan_index; + } + qp->alt.candidate_vid = 0xFFFF; + qp->alt.update_vid = 0; + } + return err; } @@ -1561,13 +1852,21 @@ struct mlx4_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; - + int ll; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { + if (cur_state == new_state && cur_state == IB_QPS_RESET) { + ll = IB_LINK_LAYER_UNSPECIFIED; + } else { + int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + ll = rdma_port_get_link_layer(&dev->ib_dev, port); + } + + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, + attr_mask, ll)) { pr_debug("qpn 0x%x: invalid attribute mask specified " "for transition %d to %d. qp_type %d," " attr_mask 0x%x\n", @@ -1744,9 +2043,9 @@ { struct ib_device *ib_dev = sqp->qp.ibqp.device; struct mlx4_wqe_mlx_seg *mlx = wqe; + struct mlx4_wqe_ctrl_seg *ctrl = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); - struct net_device *ndev; union ib_gid sgid; u16 pkey; int send_size; @@ -1770,12 +2069,11 @@ /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so * we must use our own cache */ - sgid.global.subnet_prefix = - to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. - subnet_prefix; - sgid.global.interface_id = - to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. - guid_cache[ah->av.ib.gid_index]; + err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev, + be32_to_cpu(ah->av.ib.port_pd) >> 24, + ah->av.ib.gid_index, &sgid.raw[0]); + if (err) + return err; } else { err = ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, @@ -1784,8 +2082,10 @@ return err; } - vlan = rdma_get_vlan_id(&sgid); - is_vlan = vlan < 0x1000; + if (ah->av.eth.vlan != 0xffff) { + vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; + is_vlan = 1; + } } ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); @@ -1802,6 +2102,9 @@ sqp->ud_header.grh.flow_label = ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; + if (is_eth) + memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); + else { if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { /* When multi-function is enabled, the ib_core gid * indexes don't necessarily match the hw ones, so @@ -1817,6 +2120,7 @@ be32_to_cpu(ah->av.ib.port_pd) >> 24, ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid); + } memcpy(sqp->ud_header.grh.destination_gid.raw, ah->av.ib.dgid, 16); } @@ -1849,16 +2153,23 @@ if (is_eth) { u8 *smac; + struct in6_addr in6; + u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; mlx->sched_prio = cpu_to_be16(pcp); memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); /* FIXME: cache smac value? */ - ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]; - if (!ndev) - return -ENODEV; - smac = ndev->dev_addr; + memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2); + memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); + memcpy(&in6, sgid.raw, sizeof(in6)); + + if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) + smac = to_mdev(sqp->qp.ibqp.device)-> + iboe.netdevs[sqp->qp.port - 1]->dev_addr; + else /* use the src mac of the tunnel */ + smac = ah->av.eth.s_mac; memcpy(sqp->ud_header.eth.smac_h, smac, 6); if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); @@ -2090,6 +2401,8 @@ hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index); hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); + memcpy(hdr.mac, ah->av.eth.mac, 6); + hdr.vlan = ah->av.eth.vlan; spc = MLX4_INLINE_ALIGN - ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); @@ -2762,6 +3075,9 @@ if (qp->flags & MLX4_IB_QP_LSO) qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; + if (qp->flags & MLX4_IB_QP_NETIF) + qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP; + qp_init_attr->sq_sig_type = qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; --- linux-3.13.0.orig/drivers/infiniband/hw/mlx4/sysfs.c +++ linux-3.13.0/drivers/infiniband/hw/mlx4/sysfs.c @@ -582,8 +582,10 @@ p->pkey_group.attrs = alloc_group_attrs(show_port_pkey, store_port_pkey, dev->dev->caps.pkey_table_len[port_num]); - if (!p->pkey_group.attrs) + if (!p->pkey_group.attrs) { + ret = -ENOMEM; goto err_alloc; + } ret = sysfs_create_group(&p->kobj, &p->pkey_group); if (ret) @@ -591,8 +593,10 @@ p->gid_group.name = "gid_idx"; p->gid_group.attrs = alloc_group_attrs(show_port_gid_idx, NULL, 1); - if (!p->gid_group.attrs) + if (!p->gid_group.attrs) { + ret = -ENOMEM; goto err_free_pkey; + } ret = sysfs_create_group(&p->kobj, &p->gid_group); if (ret) @@ -623,6 +627,7 @@ int port; struct kobject *p, *t; struct mlx4_port *mport; + struct mlx4_active_ports actv_ports; get_name(dev, name, slave, sizeof name); @@ -645,7 +650,11 @@ goto err_ports; } + actv_ports = mlx4_get_active_ports(dev->dev, slave); + for (port = 1; port <= dev->dev->caps.num_ports; ++port) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; err = add_port(dev, port, slave); if (err) goto err_add; --- linux-3.13.0.orig/drivers/infiniband/hw/mlx5/Kconfig +++ linux-3.13.0/drivers/infiniband/hw/mlx5/Kconfig @@ -1,6 +1,6 @@ config MLX5_INFINIBAND tristate "Mellanox Connect-IB HCA support" - depends on NETDEVICES && ETHERNET && PCI && X86 + depends on NETDEVICES && ETHERNET && PCI select NET_VENDOR_MELLANOX select MLX5_CORE ---help--- --- linux-3.13.0.orig/drivers/infiniband/hw/mlx5/cq.c +++ linux-3.13.0/drivers/infiniband/hw/mlx5/cq.c @@ -32,6 +32,7 @@ #include #include +#include #include "mlx5_ib.h" #include "user.h" @@ -73,14 +74,24 @@ return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); } +static u8 sw_ownership_bit(int n, int nent) +{ + return (n & nent) ? 1 : 0; +} + static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) { void *cqe = get_cqe(cq, n & cq->ibcq.cqe); struct mlx5_cqe64 *cqe64; cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; - return ((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ - !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; + + if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && + !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { + return cqe; + } else { + return NULL; + } } static void *next_cqe_sw(struct mlx5_ib_cq *cq) @@ -351,6 +362,11 @@ qp->sq.last_poll = tail; } +static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) +{ + mlx5_buf_free(&dev->mdev, &buf->buf); +} + static int mlx5_poll_one(struct mlx5_ib_cq *cq, struct mlx5_ib_qp **cur_qp, struct ib_wc *wc) @@ -366,6 +382,7 @@ void *cqe; int idx; +repoll: cqe = next_cqe_sw(cq); if (!cqe) return -EAGAIN; @@ -379,7 +396,18 @@ */ rmb(); - /* TBD: resize CQ */ + opcode = cqe64->op_own >> 4; + if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { + if (likely(cq->resize_buf)) { + free_cq_buf(dev, &cq->buf); + cq->buf = *cq->resize_buf; + kfree(cq->resize_buf); + cq->resize_buf = NULL; + goto repoll; + } else { + mlx5_ib_warn(dev, "unexpected resize cqe\n"); + } + } qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { @@ -398,7 +426,6 @@ } wc->qp = &(*cur_qp)->ibqp; - opcode = cqe64->op_own >> 4; switch (opcode) { case MLX5_CQE_REQ: wq = &(*cur_qp)->sq; @@ -503,29 +530,35 @@ return err; buf->cqe_size = cqe_size; + buf->nent = nent; return 0; } -static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) -{ - mlx5_buf_free(&dev->mdev, &buf->buf); -} - static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, struct ib_ucontext *context, struct mlx5_ib_cq *cq, int entries, struct mlx5_create_cq_mbox_in **cqb, int *cqe_size, int *index, int *inlen) { struct mlx5_ib_create_cq ucmd; + size_t ucmdlen; int page_shift; int npages; int ncont; int err; - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + ucmdlen = + (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < + sizeof(ucmd)) ? (sizeof(ucmd) - + sizeof(ucmd.reserved)) : sizeof(ucmd); + + if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) return -EFAULT; + if (ucmdlen == sizeof(ucmd) && + ucmd.reserved != 0) + return -EINVAL; + if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) return -EINVAL; @@ -576,16 +609,16 @@ ib_umem_release(cq->buf.umem); } -static void init_cq_buf(struct mlx5_ib_cq *cq, int nent) +static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) { int i; void *cqe; struct mlx5_cqe64 *cqe64; - for (i = 0; i < nent; i++) { - cqe = get_cqe(cq, i); - cqe64 = (cq->buf.cqe_size == 64) ? cqe : cqe + 64; - cqe64->op_own = 0xf1; + for (i = 0; i < buf->nent; i++) { + cqe = get_cqe_from_buf(buf, i, buf->cqe_size); + cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; + cqe64->op_own = MLX5_CQE_INVALID << 4; } } @@ -610,7 +643,7 @@ if (err) goto err_db; - init_cq_buf(cq, entries); + init_cq_buf(cq, &cq->buf); *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; *cqb = mlx5_vzalloc(*inlen); @@ -818,12 +851,266 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) { - return -ENOSYS; + struct mlx5_modify_cq_mbox_in *in; + struct mlx5_ib_dev *dev = to_mdev(cq->device); + struct mlx5_ib_cq *mcq = to_mcq(cq); + int err; + u32 fsel; + + if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) + return -ENOSYS; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->cqn = cpu_to_be32(mcq->mcq.cqn); + fsel = (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT); + in->ctx.cq_period = cpu_to_be16(cq_period); + in->ctx.cq_max_count = cpu_to_be16(cq_count); + in->field_select = cpu_to_be32(fsel); + err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in)); + kfree(in); + + if (err) + mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); + + return err; +} + +static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, + int entries, struct ib_udata *udata, int *npas, + int *page_shift, int *cqe_size) +{ + struct mlx5_ib_resize_cq ucmd; + struct ib_umem *umem; + int err; + int npages; + struct ib_ucontext *context = cq->buf.umem->context; + + err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); + if (err) + return err; + + if (ucmd.reserved0 || ucmd.reserved1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + IB_ACCESS_LOCAL_WRITE, 1); + if (IS_ERR(umem)) { + err = PTR_ERR(umem); + return err; + } + + mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, + npas, NULL); + + cq->resize_umem = umem; + *cqe_size = ucmd.cqe_size; + + return 0; +} + +static void un_resize_user(struct mlx5_ib_cq *cq) +{ + ib_umem_release(cq->resize_umem); +} + +static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, + int entries, int cqe_size) +{ + int err; + + cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); + if (!cq->resize_buf) + return -ENOMEM; + + err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); + if (err) + goto ex; + + init_cq_buf(cq, cq->resize_buf); + + return 0; + +ex: + kfree(cq->resize_buf); + return err; +} + +static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) +{ + free_cq_buf(dev, cq->resize_buf); + cq->resize_buf = NULL; +} + +static int copy_resize_cqes(struct mlx5_ib_cq *cq) +{ + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); + struct mlx5_cqe64 *scqe64; + struct mlx5_cqe64 *dcqe64; + void *start_cqe; + void *scqe; + void *dcqe; + int ssize; + int dsize; + int i; + u8 sw_own; + + ssize = cq->buf.cqe_size; + dsize = cq->resize_buf->cqe_size; + if (ssize != dsize) { + mlx5_ib_warn(dev, "resize from different cqe size is not supported\n"); + return -EINVAL; + } + + i = cq->mcq.cons_index; + scqe = get_sw_cqe(cq, i); + scqe64 = ssize == 64 ? scqe : scqe + 64; + start_cqe = scqe; + if (!scqe) { + mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); + return -EINVAL; + } + + while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { + dcqe = get_cqe_from_buf(cq->resize_buf, + (i + 1) & (cq->resize_buf->nent), + dsize); + dcqe64 = dsize == 64 ? dcqe : dcqe + 64; + sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); + memcpy(dcqe, scqe, dsize); + dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own; + + ++i; + scqe = get_sw_cqe(cq, i); + scqe64 = ssize == 64 ? scqe : scqe + 64; + if (!scqe) { + mlx5_ib_warn(dev, "expected cqe in sw ownership\n"); + return -EINVAL; + } + + if (scqe == start_cqe) { + pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", + cq->mcq.cqn); + return -ENOMEM; + } + } + ++cq->mcq.cons_index; + return 0; } int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { - return -ENOSYS; + struct mlx5_ib_dev *dev = to_mdev(ibcq->device); + struct mlx5_ib_cq *cq = to_mcq(ibcq); + struct mlx5_modify_cq_mbox_in *in; + int err; + int npas; + int page_shift; + int inlen; + int uninitialized_var(cqe_size); + unsigned long flags; + + if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { + pr_info("Firmware does not support resize CQ\n"); + return -ENOSYS; + } + + if (entries < 1) + return -EINVAL; + + entries = roundup_pow_of_two(entries + 1); + if (entries > dev->mdev.caps.max_cqes + 1) + return -EINVAL; + + if (entries == ibcq->cqe + 1) + return 0; + + mutex_lock(&cq->resize_mutex); + if (udata) { + err = resize_user(dev, cq, entries, udata, &npas, &page_shift, + &cqe_size); + } else { + cqe_size = 64; + err = resize_kernel(dev, cq, entries, cqe_size); + if (!err) { + npas = cq->resize_buf->buf.npages; + page_shift = cq->resize_buf->buf.page_shift; + } + } + + if (err) + goto ex; + + inlen = sizeof(*in) + npas * sizeof(in->pas[0]); + in = mlx5_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto ex_resize; + } + + if (udata) + mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, + in->pas, 0); + else + mlx5_fill_page_array(&cq->resize_buf->buf, in->pas); + + in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE | + MLX5_MODIFY_CQ_MASK_PG_OFFSET | + MLX5_MODIFY_CQ_MASK_PG_SIZE); + in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; + in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5; + in->ctx.page_offset = 0; + in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); + in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); + in->cqn = cpu_to_be32(cq->mcq.cqn); + + err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen); + if (err) + goto ex_alloc; + + if (udata) { + cq->ibcq.cqe = entries - 1; + ib_umem_release(cq->buf.umem); + cq->buf.umem = cq->resize_umem; + cq->resize_umem = NULL; + } else { + struct mlx5_ib_cq_buf tbuf; + int resized = 0; + + spin_lock_irqsave(&cq->lock, flags); + if (cq->resize_buf) { + err = copy_resize_cqes(cq); + if (!err) { + tbuf = cq->buf; + cq->buf = *cq->resize_buf; + kfree(cq->resize_buf); + cq->resize_buf = NULL; + resized = 1; + } + } + cq->ibcq.cqe = entries - 1; + spin_unlock_irqrestore(&cq->lock, flags); + if (resized) + free_cq_buf(dev, &tbuf); + } + mutex_unlock(&cq->resize_mutex); + + mlx5_vfree(in); + return 0; + +ex_alloc: + mlx5_vfree(in); + +ex_resize: + if (udata) + un_resize_user(cq); + else + un_resize_kernel(dev, cq); +ex: + mutex_unlock(&cq->resize_mutex); + return err; } int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) --- linux-3.13.0.orig/drivers/infiniband/hw/mlx5/main.c +++ linux-3.13.0/drivers/infiniband/hw/mlx5/main.c @@ -46,8 +46,8 @@ #include "mlx5_ib.h" #define DRIVER_NAME "mlx5_ib" -#define DRIVER_VERSION "1.0" -#define DRIVER_RELDATE "June 2013" +#define DRIVER_VERSION "2.2-1" +#define DRIVER_RELDATE "Feb 2014" MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); @@ -261,8 +261,7 @@ props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | - IB_DEVICE_RC_RNR_NAK_GEN | - IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; + IB_DEVICE_RC_RNR_NAK_GEN; flags = dev->mdev.caps.flags; if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; @@ -536,34 +535,51 @@ struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_ib_alloc_ucontext_req req; + struct mlx5_ib_alloc_ucontext_req_v2 req; struct mlx5_ib_alloc_ucontext_resp resp; struct mlx5_ib_ucontext *context; struct mlx5_uuar_info *uuari; struct mlx5_uar *uars; + int gross_uuars; int num_uars; + int ver; int uuarn; int err; int i; + int reqlen; if (!dev->ib_active) return ERR_PTR(-EAGAIN); - err = ib_copy_from_udata(&req, udata, sizeof(req)); + memset(&req, 0, sizeof(req)); + reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); + if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) + ver = 0; + else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2)) + ver = 2; + else + return ERR_PTR(-EINVAL); + + err = ib_copy_from_udata(&req, udata, reqlen); if (err) return ERR_PTR(err); + if (req.flags || req.reserved) + return ERR_PTR(-EINVAL); + if (req.total_num_uuars > MLX5_MAX_UUARS) return ERR_PTR(-ENOMEM); if (req.total_num_uuars == 0) return ERR_PTR(-EINVAL); - req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE); + req.total_num_uuars = ALIGN(req.total_num_uuars, + MLX5_NON_FP_BF_REGS_PER_PAGE); if (req.num_low_latency_uuars > req.total_num_uuars - 1) return ERR_PTR(-EINVAL); - num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE; + num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; + gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp; resp.bf_reg_size = dev->mdev.caps.bf_reg_size; resp.cache_line_size = L1_CACHE_BYTES; @@ -585,7 +601,7 @@ goto out_ctx; } - uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars), + uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), sizeof(*uuari->bitmap), GFP_KERNEL); if (!uuari->bitmap) { @@ -595,13 +611,13 @@ /* * clear all fast path uuars */ - for (i = 0; i < req.total_num_uuars; i++) { + for (i = 0; i < gross_uuars; i++) { uuarn = i & 3; if (uuarn == 2 || uuarn == 3) set_bit(i, uuari->bitmap); } - uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL); + uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); if (!uuari->count) { err = -ENOMEM; goto out_bitmap; @@ -623,6 +639,7 @@ if (err) goto out_uars; + uuari->ver = ver; uuari->num_low_latency_uuars = req.num_low_latency_uuars; uuari->uars = uars; uuari->num_uars = num_uars; --- linux-3.13.0.orig/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ linux-3.13.0/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -195,6 +195,7 @@ struct mlx5_buf buf; struct ib_umem *umem; int cqe_size; + int nent; }; enum mlx5_ib_qp_flags { @@ -220,7 +221,7 @@ /* protect resize cq */ struct mutex resize_mutex; - struct mlx5_ib_cq_resize *resize_buf; + struct mlx5_ib_cq_buf *resize_buf; struct ib_umem *resize_umem; int cqe_size; }; @@ -264,7 +265,6 @@ enum ib_wc_status status; struct mlx5_ib_dev *dev; struct mlx5_create_mkey_mbox_out out; - unsigned long start; }; struct mlx5_ib_fast_reg_page_list { --- linux-3.13.0.orig/drivers/infiniband/hw/mlx5/mr.c +++ linux-3.13.0/drivers/infiniband/hw/mlx5/mr.c @@ -146,7 +146,6 @@ spin_lock_irq(&ent->lock); ent->pending++; spin_unlock_irq(&ent->lock); - mr->start = jiffies; err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), reg_mr_callback, mr, &mr->out); --- linux-3.13.0.orig/drivers/infiniband/hw/mlx5/qp.c +++ linux-3.13.0/drivers/infiniband/hw/mlx5/qp.c @@ -216,7 +216,9 @@ case IB_QPT_UC: size += sizeof(struct mlx5_wqe_ctrl_seg) + - sizeof(struct mlx5_wqe_raddr_seg); + sizeof(struct mlx5_wqe_raddr_seg) + + sizeof(struct mlx5_wqe_umr_ctrl_seg) + + sizeof(struct mlx5_mkey_seg); break; case IB_QPT_UD: @@ -340,14 +342,57 @@ return 1; } +static int first_med_uuar(void) +{ + return 1; +} + +static int next_uuar(int n) +{ + n++; + + while (((n % 4) & 2)) + n++; + + return n; +} + +static int num_med_uuar(struct mlx5_uuar_info *uuari) +{ + int n; + + n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE - + uuari->num_low_latency_uuars - 1; + + return n >= 0 ? n : 0; +} + +static int max_uuari(struct mlx5_uuar_info *uuari) +{ + return uuari->num_uars * 4; +} + +static int first_hi_uuar(struct mlx5_uuar_info *uuari) +{ + int med; + int i; + int t; + + med = num_med_uuar(uuari); + for (t = 0, i = first_med_uuar();; i = next_uuar(i)) { + t++; + if (t == med) + return next_uuar(i); + } + + return 0; +} + static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) { - int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; - int start_uuar; int i; - start_uuar = nuuars - uuari->num_low_latency_uuars; - for (i = start_uuar; i < nuuars; i++) { + for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) { if (!test_bit(i, uuari->bitmap)) { set_bit(i, uuari->bitmap); uuari->count[i]++; @@ -360,19 +405,10 @@ static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) { - int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; - int minidx = 1; - int uuarn; - int end; + int minidx = first_med_uuar(); int i; - end = nuuars - uuari->num_low_latency_uuars; - - for (i = 1; i < end; i++) { - uuarn = i & 3; - if (uuarn == 2 || uuarn == 3) - continue; - + for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) { if (uuari->count[i] < uuari->count[minidx]) minidx = i; } @@ -394,11 +430,17 @@ break; case MLX5_IB_LATENCY_CLASS_MEDIUM: - uuarn = alloc_med_class_uuar(uuari); + if (uuari->ver < 2) + uuarn = -ENOMEM; + else + uuarn = alloc_med_class_uuar(uuari); break; case MLX5_IB_LATENCY_CLASS_HIGH: - uuarn = alloc_high_class_uuar(uuari); + if (uuari->ver < 2) + uuarn = -ENOMEM; + else + uuarn = alloc_high_class_uuar(uuari); break; case MLX5_IB_LATENCY_CLASS_FAST_PATH: @@ -489,12 +531,12 @@ { struct mlx5_ib_ucontext *context; struct mlx5_ib_create_qp ucmd; - int page_shift; + int page_shift = 0; int uar_index; int npages; - u32 offset; + u32 offset = 0; int uuarn; - int ncont; + int ncont = 0; int err; err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); @@ -510,11 +552,16 @@ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); if (uuarn < 0) { mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); - mlx5_ib_dbg(dev, "reverting to high latency\n"); - uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); + mlx5_ib_dbg(dev, "reverting to medium latency\n"); + uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); if (uuarn < 0) { - mlx5_ib_dbg(dev, "uuar allocation failed\n"); - return uuarn; + mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n"); + mlx5_ib_dbg(dev, "reverting to high latency\n"); + uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); + if (uuarn < 0) { + mlx5_ib_warn(dev, "uuar allocation failed\n"); + return uuarn; + } } } @@ -525,23 +572,29 @@ if (err) goto err_uuar; - qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, - qp->buf_size, 0, 0); - if (IS_ERR(qp->umem)) { - mlx5_ib_dbg(dev, "umem_get failed\n"); - err = PTR_ERR(qp->umem); - goto err_uuar; + if (ucmd.buf_addr && qp->buf_size) { + qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, + qp->buf_size, 0, 0); + if (IS_ERR(qp->umem)) { + mlx5_ib_dbg(dev, "umem_get failed\n"); + err = PTR_ERR(qp->umem); + goto err_uuar; + } + } else { + qp->umem = NULL; } - mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, - &ncont, NULL); - err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); - if (err) { - mlx5_ib_warn(dev, "bad offset\n"); - goto err_umem; + if (qp->umem) { + mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); + if (err) { + mlx5_ib_warn(dev, "bad offset\n"); + goto err_umem; + } + mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", + ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); } - mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", - ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; *in = mlx5_vzalloc(*inlen); @@ -549,7 +602,8 @@ err = -ENOMEM; goto err_umem; } - mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); + if (qp->umem) + mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); (*in)->ctx.params2 = cpu_to_be32(offset << 6); @@ -580,7 +634,8 @@ mlx5_vfree(*in); err_umem: - ib_umem_release(qp->umem); + if (qp->umem) + ib_umem_release(qp->umem); err_uuar: free_uuar(&context->uuari, uuarn); @@ -593,7 +648,8 @@ context = to_mucontext(pd->uobject->context); mlx5_ib_db_unmap_user(context, &qp->db); - ib_umem_release(qp->umem); + if (qp->umem) + ib_umem_release(qp->umem); free_uuar(&context->uuari, qp->uuarn); } @@ -609,8 +665,8 @@ int err; uuari = &dev->mdev.priv.uuari; - if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) - qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; + if (init_attr->create_flags) + return -EINVAL; if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; @@ -1616,7 +1672,8 @@ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && - !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) + !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_UNSPECIFIED)) goto out; if ((attr_mask & IB_QP_PORT) && @@ -2212,6 +2269,10 @@ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); + /* Make sure doorbell record is visible to the HCA before + * we hit doorbell */ + wmb(); + if (bf->need_lock) spin_lock(&bf->lock); --- linux-3.13.0.orig/drivers/infiniband/hw/mlx5/srq.c +++ linux-3.13.0/drivers/infiniband/hw/mlx5/srq.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mlx5_ib.h" #include "user.h" @@ -78,16 +79,27 @@ { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_create_srq ucmd; + size_t ucmdlen; int err; int npages; int page_shift; int ncont; u32 offset; - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + ucmdlen = + (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) < + sizeof(ucmd)) ? (sizeof(ucmd) - + sizeof(ucmd.reserved)) : sizeof(ucmd); + + if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { mlx5_ib_dbg(dev, "failed copy udata\n"); return -EFAULT; } + + if (ucmdlen == sizeof(ucmd) && + ucmd.reserved != 0) + return -EINVAL; + srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, --- linux-3.13.0.orig/drivers/infiniband/hw/mlx5/user.h +++ linux-3.13.0/drivers/infiniband/hw/mlx5/user.h @@ -62,6 +62,13 @@ __u32 num_low_latency_uuars; }; +struct mlx5_ib_alloc_ucontext_req_v2 { + __u32 total_num_uuars; + __u32 num_low_latency_uuars; + __u32 flags; + __u32 reserved; +}; + struct mlx5_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 bf_reg_size; @@ -84,6 +91,7 @@ __u64 buf_addr; __u64 db_addr; __u32 cqe_size; + __u32 reserved; /* explicit padding (optional on i386) */ }; struct mlx5_ib_create_cq_resp { @@ -93,12 +101,16 @@ struct mlx5_ib_resize_cq { __u64 buf_addr; + __u16 cqe_size; + __u16 reserved0; + __u32 reserved1; }; struct mlx5_ib_create_srq { __u64 buf_addr; __u64 db_addr; __u32 flags; + __u32 reserved; /* explicit padding (optional on i386) */ }; struct mlx5_ib_create_srq_resp { --- linux-3.13.0.orig/drivers/infiniband/hw/mthca/mthca_provider.c +++ linux-3.13.0/drivers/infiniband/hw/mthca/mthca_provider.c @@ -695,6 +695,7 @@ if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { mthca_free_cq(to_mdev(ibdev), cq); + err = -EFAULT; goto err_free; } --- linux-3.13.0.orig/drivers/infiniband/hw/mthca/mthca_qp.c +++ linux-3.13.0/drivers/infiniband/hw/mthca/mthca_qp.c @@ -860,7 +860,8 @@ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_UNSPECIFIED)) { mthca_dbg(dev, "Bad QP transition (transport %d) " "%d->%d with attr 0x%08x\n", qp->transport, cur_state, new_state, --- linux-3.13.0.orig/drivers/infiniband/hw/nes/nes.c +++ linux-3.13.0/drivers/infiniband/hw/nes/nes.c @@ -675,8 +675,11 @@ INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); /* Initialize network devices */ - if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) + netdev = nes_netdev_init(nesdev, mmio_regs); + if (netdev == NULL) { + ret = -ENOMEM; goto bail7; + } /* Register network device */ ret = register_netdev(netdev); --- linux-3.13.0.orig/drivers/infiniband/hw/nes/nes_cm.c +++ linux-3.13.0/drivers/infiniband/hw/nes/nes_cm.c @@ -1354,8 +1354,7 @@ neigh->ha, ntohl(rt->rt_gateway)); if (arpindex >= 0) { - if (!memcmp(nesadapter->arp_table[arpindex].mac_addr, - neigh->ha, ETH_ALEN)) { + if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) { /* Mac address same as in nes_arp_table */ goto out; } --- linux-3.13.0.orig/drivers/infiniband/hw/nes/nes_verbs.c +++ linux-3.13.0/drivers/infiniband/hw/nes/nes_verbs.c @@ -1186,7 +1186,7 @@ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); kfree(nesqp->allocated_buffer); nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n"); - return NULL; + return ERR_PTR(-EFAULT); } if (req.user_wqe_buffers) { virt_wqs = 1; --- linux-3.13.0.orig/drivers/infiniband/hw/ocrdma/Kconfig +++ linux-3.13.0/drivers/infiniband/hw/ocrdma/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_OCRDMA tristate "Emulex One Connect HCA support" - depends on ETHERNET && NETDEVICES && PCI && (IPV6 || IPV6=n) + depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n) select NET_VENDOR_EMULEX select BE2NET ---help--- --- linux-3.13.0.orig/drivers/infiniband/hw/ocrdma/ocrdma.h +++ linux-3.13.0/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -423,5 +423,17 @@ OCRDMA_CQE_WRITE_IMM) ? 1 : 0; } +static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, + struct ib_ah_attr *ah_attr, u8 *mac_addr) +{ + struct in6_addr in6; + + memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); + if (rdma_is_multicast_addr(&in6)) + rdma_get_mcast_mac(&in6, mac_addr); + else + memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); + return 0; +} #endif --- linux-3.13.0.orig/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ linux-3.13.0/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -49,7 +49,7 @@ ah->sgid_index = attr->grh.sgid_index; - vlan_tag = rdma_get_vlan_id(&attr->grh.dgid); + vlan_tag = attr->vlan_id; if (!vlan_tag || (vlan_tag > 0xFFF)) vlan_tag = dev->pvid; if (vlan_tag && (vlan_tag < 0x1000)) { @@ -64,7 +64,8 @@ eth_sz = sizeof(struct ocrdma_eth_basic); } memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN); - status = ocrdma_resolve_dgid(dev, &attr->grh.dgid, ð.dmac[0]); + memcpy(ð.dmac[0], attr->dmac, ETH_ALEN); + status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]); if (status) return status; status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, @@ -84,6 +85,7 @@ memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); if (vlan_enabled) ah->av->valid |= OCRDMA_AV_VLAN_VALID; + ah->av->valid = cpu_to_le32(ah->av->valid); return status; } --- linux-3.13.0.orig/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ linux-3.13.0/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -2076,23 +2076,6 @@ return status; } -int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid, - u8 *mac_addr) -{ - struct in6_addr in6; - - memcpy(&in6, dgid, sizeof in6); - if (rdma_is_multicast_addr(&in6)) { - rdma_get_mcast_mac(&in6, mac_addr); - } else if (rdma_link_local_addr(&in6)) { - rdma_get_ll_mac(&in6, mac_addr); - } else { - pr_err("%s() fail to resolve mac_addr.\n", __func__); - return -EINVAL; - } - return 0; -} - static int ocrdma_set_av_params(struct ocrdma_qp *qp, struct ocrdma_modify_qp *cmd, struct ib_qp_attr *attrs) @@ -2126,14 +2109,14 @@ qp->sgid_idx = ah_attr->grh.sgid_index; memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); - ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]); + ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | (mac_addr[2] << 16) | (mac_addr[3] << 24); /* convert them to LE format. */ ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); - vlan_id = rdma_get_vlan_id(&sgid); + vlan_id = ah_attr->vlan_id; if (vlan_id && (vlan_id < 0x1000)) { cmd->params.vlan_dmac_b4_to_b5 |= vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; --- linux-3.13.0.orig/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ linux-3.13.0/drivers/infiniband/hw/ocrdma/ocrdma_hw.h @@ -94,7 +94,6 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed); int ocrdma_query_config(struct ocrdma_dev *, struct ocrdma_mbx_query_config *config); -int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr); int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *); --- linux-3.13.0.orig/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ linux-3.13.0/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -67,46 +67,24 @@ guid[7] = mac_addr[5]; } -static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr, - bool is_vlan, u16 vlan_id) -{ - sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); - sgid->raw[8] = mac_addr[0] ^ 2; - sgid->raw[9] = mac_addr[1]; - sgid->raw[10] = mac_addr[2]; - if (is_vlan) { - sgid->raw[11] = vlan_id >> 8; - sgid->raw[12] = vlan_id & 0xff; - } else { - sgid->raw[11] = 0xff; - sgid->raw[12] = 0xfe; - } - sgid->raw[13] = mac_addr[3]; - sgid->raw[14] = mac_addr[4]; - sgid->raw[15] = mac_addr[5]; -} - -static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, - bool is_vlan, u16 vlan_id) +static bool ocrdma_add_sgid(struct ocrdma_dev *dev, union ib_gid *new_sgid) { int i; - union ib_gid new_sgid; unsigned long flags; memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); - ocrdma_build_sgid_mac(&new_sgid, mac_addr, is_vlan, vlan_id); spin_lock_irqsave(&dev->sgid_lock, flags); for (i = 0; i < OCRDMA_MAX_SGID; i++) { if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, sizeof(union ib_gid))) { /* found free entry */ - memcpy(&dev->sgid_tbl[i], &new_sgid, + memcpy(&dev->sgid_tbl[i], new_sgid, sizeof(union ib_gid)); spin_unlock_irqrestore(&dev->sgid_lock, flags); return true; - } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, + } else if (!memcmp(&dev->sgid_tbl[i], new_sgid, sizeof(union ib_gid))) { /* entry already present, no addition is required. */ spin_unlock_irqrestore(&dev->sgid_lock, flags); @@ -117,20 +95,17 @@ return false; } -static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, - bool is_vlan, u16 vlan_id) +static bool ocrdma_del_sgid(struct ocrdma_dev *dev, union ib_gid *sgid) { int found = false; int i; - union ib_gid sgid; unsigned long flags; - ocrdma_build_sgid_mac(&sgid, mac_addr, is_vlan, vlan_id); spin_lock_irqsave(&dev->sgid_lock, flags); /* first is default sgid, which cannot be deleted. */ for (i = 1; i < OCRDMA_MAX_SGID; i++) { - if (!memcmp(&dev->sgid_tbl[i], &sgid, sizeof(union ib_gid))) { + if (!memcmp(&dev->sgid_tbl[i], sgid, sizeof(union ib_gid))) { /* found matching entry */ memset(&dev->sgid_tbl[i], 0, sizeof(union ib_gid)); found = true; @@ -141,75 +116,18 @@ return found; } -static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) -{ - /* GID Index 0 - Invariant manufacturer-assigned EUI-64 */ - union ib_gid *sgid = &dev->sgid_tbl[0]; - - sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); - ocrdma_get_guid(dev, &sgid->raw[8]); -} - -#if IS_ENABLED(CONFIG_VLAN_8021Q) -static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) -{ - struct net_device *netdev, *tmp; - u16 vlan_id; - bool is_vlan; - - netdev = dev->nic_info.netdev; - - rcu_read_lock(); - for_each_netdev_rcu(&init_net, tmp) { - if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) { - if (!netif_running(tmp) || !netif_oper_up(tmp)) - continue; - if (netdev != tmp) { - vlan_id = vlan_dev_vlan_id(tmp); - is_vlan = true; - } else { - is_vlan = false; - vlan_id = 0; - tmp = netdev; - } - ocrdma_add_sgid(dev, tmp->dev_addr, is_vlan, vlan_id); - } - } - rcu_read_unlock(); -} -#else -static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) -{ - -} -#endif /* VLAN */ - -static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) -{ - ocrdma_add_default_sgid(dev); - ocrdma_add_vlan_sgids(dev); - return 0; -} - -#if IS_ENABLED(CONFIG_IPV6) - -static int ocrdma_inet6addr_event(struct notifier_block *notifier, - unsigned long event, void *ptr) +static int ocrdma_addr_event(unsigned long event, struct net_device *netdev, + union ib_gid *gid) { - struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; - struct net_device *netdev = ifa->idev->dev; struct ib_event gid_event; struct ocrdma_dev *dev; bool found = false; bool updated = false; bool is_vlan = false; - u16 vid = 0; is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; - if (is_vlan) { - vid = vlan_dev_vlan_id(netdev); - netdev = vlan_dev_real_dev(netdev); - } + if (is_vlan) + netdev = rdma_vlan_dev_real_dev(netdev); rcu_read_lock(); list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { @@ -222,16 +140,14 @@ if (!found) return NOTIFY_DONE; - if (!rdma_link_local_addr((struct in6_addr *)&ifa->addr)) - return NOTIFY_DONE; mutex_lock(&dev->dev_lock); switch (event) { case NETDEV_UP: - updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); + updated = ocrdma_add_sgid(dev, gid); break; case NETDEV_DOWN: - updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); + updated = ocrdma_del_sgid(dev, gid); break; default: break; @@ -247,6 +163,32 @@ return NOTIFY_OK; } +static int ocrdma_inetaddr_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = ptr; + union ib_gid gid; + struct net_device *netdev = ifa->ifa_dev->dev; + + ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid); + return ocrdma_addr_event(event, netdev, &gid); +} + +static struct notifier_block ocrdma_inetaddr_notifier = { + .notifier_call = ocrdma_inetaddr_event +}; + +#if IS_ENABLED(CONFIG_IPV6) + +static int ocrdma_inet6addr_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + union ib_gid *gid = (union ib_gid *)&ifa->addr; + struct net_device *netdev = ifa->idev->dev; + return ocrdma_addr_event(event, netdev, gid); +} + static struct notifier_block ocrdma_inet6addr_notifier = { .notifier_call = ocrdma_inet6addr_event }; @@ -423,10 +365,6 @@ if (status) goto alloc_err; - status = ocrdma_build_sgid_tbl(dev); - if (status) - goto alloc_err; - status = ocrdma_register_device(dev); if (status) goto alloc_err; @@ -553,6 +491,10 @@ { int status; + status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier); + if (status) + return status; + #if IS_ENABLED(CONFIG_IPV6) status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); if (status) --- linux-3.13.0.orig/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ linux-3.13.0/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -31,7 +31,7 @@ #define Bit(_b) (1 << (_b)) #define OCRDMA_GEN1_FAMILY 0xB -#define OCRDMA_GEN2_FAMILY 0x2 +#define OCRDMA_GEN2_FAMILY 0x0F #define OCRDMA_SUBSYS_ROCE 10 enum { @@ -1694,7 +1694,7 @@ u16 rsvd; } __packed; -#define OCRDMA_AV_VALID Bit(0) +#define OCRDMA_AV_VALID Bit(7) #define OCRDMA_AV_VLAN_VALID Bit(1) struct ocrdma_av { --- linux-3.13.0.orig/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ linux-3.13.0/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -176,7 +176,7 @@ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | - IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; + IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS; props->gid_tbl_len = OCRDMA_MAX_SGID; props->pkey_tbl_len = 1; props->bad_pkey_cntr = 0; @@ -1326,7 +1326,8 @@ new_qps = old_qps; spin_unlock_irqrestore(&qp->q_lock, flags); - if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { + if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_ETHERNET)) { pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, @@ -1415,7 +1416,7 @@ OCRDMA_QP_PARAMS_HOP_LMT_MASK) >> OCRDMA_QP_PARAMS_HOP_LMT_SHIFT; qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn & - OCRDMA_QP_PARAMS_SQ_PSN_MASK) >> + OCRDMA_QP_PARAMS_TCLASS_MASK) >> OCRDMA_QP_PARAMS_TCLASS_SHIFT; qp_attr->ah_attr.ah_flags = IB_AH_GRH; --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib.h +++ linux-3.13.0/drivers/infiniband/hw/qib/qib.h @@ -1080,12 +1080,6 @@ /* control high-level access to EEPROM */ struct mutex eep_lock; uint64_t traffic_wds; - /* active time is kept in seconds, but logged in hours */ - atomic_t active_time; - /* Below are nominal shadow of EEPROM, new since last EEPROM update */ - uint8_t eep_st_errs[QIB_EEP_LOG_CNT]; - uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT]; - uint16_t eep_hrs; /* * masks for which bits of errs, hwerrs that cause * each of the counters to increment. @@ -1307,8 +1301,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, const void *buffer, int len); void qib_get_eeprom_info(struct qib_devdata *); -int qib_update_eeprom_log(struct qib_devdata *dd); -void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr); +#define qib_inc_eeprom_err(dd, eidx, incr) void qib_dump_lookup_output_queue(struct qib_devdata *); void qib_force_pio_avail_update(struct qib_devdata *); void qib_clear_symerror_on_linkup(unsigned long opaque); --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_debugfs.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_debugfs.c @@ -193,6 +193,7 @@ struct qib_qp_iter *iter; loff_t n = *pos; + rcu_read_lock(); iter = qib_qp_iter_init(s->private); if (!iter) return NULL; @@ -224,7 +225,7 @@ static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr) { - /* nothing for now */ + rcu_read_unlock(); } static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr) --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_eeprom.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_eeprom.c @@ -267,190 +267,9 @@ "Board SN %s did not pass functional test: %s\n", dd->serial, ifp->if_comment); - memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); - /* - * Power-on (actually "active") hours are kept as little-endian value - * in EEPROM, but as seconds in a (possibly as small as 24-bit) - * atomic_t while running. - */ - atomic_set(&dd->active_time, 0); - dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); - done: vfree(buf); bail:; } -/** - * qib_update_eeprom_log - copy active-time and error counters to eeprom - * @dd: the qlogic_ib device - * - * Although the time is kept as seconds in the qib_devdata struct, it is - * rounded to hours for re-write, as we have only 16 bits in EEPROM. - * First-cut code reads whole (expected) struct qib_flash, modifies, - * re-writes. Future direction: read/write only what we need, assuming - * that the EEPROM had to have been "good enough" for driver init, and - * if not, we aren't making it worse. - * - */ -int qib_update_eeprom_log(struct qib_devdata *dd) -{ - void *buf; - struct qib_flash *ifp; - int len, hi_water; - uint32_t new_time, new_hrs; - u8 csum; - int ret, idx; - unsigned long flags; - - /* first, check if we actually need to do anything. */ - ret = 0; - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - if (dd->eep_st_new_errs[idx]) { - ret = 1; - break; - } - } - new_time = atomic_read(&dd->active_time); - - if (ret == 0 && new_time < 3600) - goto bail; - - /* - * The quick-check above determined that there is something worthy - * of logging, so get current contents and do a more detailed idea. - * read full flash, not just currently used part, since it may have - * been written with a newer definition - */ - len = sizeof(struct qib_flash); - buf = vmalloc(len); - ret = 1; - if (!buf) { - qib_dev_err(dd, - "Couldn't allocate memory to read %u bytes from eeprom for logging\n", - len); - goto bail; - } - - /* Grab semaphore and read current EEPROM. If we get an - * error, let go, but if not, keep it until we finish write. - */ - ret = mutex_lock_interruptible(&dd->eep_lock); - if (ret) { - qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); - goto free_bail; - } - ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); - if (ret) { - mutex_unlock(&dd->eep_lock); - qib_dev_err(dd, "Unable read EEPROM for logging\n"); - goto free_bail; - } - ifp = (struct qib_flash *)buf; - - csum = flash_csum(ifp, 0); - if (csum != ifp->if_csum) { - mutex_unlock(&dd->eep_lock); - qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", - csum, ifp->if_csum); - ret = 1; - goto free_bail; - } - hi_water = 0; - spin_lock_irqsave(&dd->eep_st_lock, flags); - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - int new_val = dd->eep_st_new_errs[idx]; - if (new_val) { - /* - * If we have seen any errors, add to EEPROM values - * We need to saturate at 0xFF (255) and we also - * would need to adjust the checksum if we were - * trying to minimize EEPROM traffic - * Note that we add to actual current count in EEPROM, - * in case it was altered while we were running. - */ - new_val += ifp->if_errcntp[idx]; - if (new_val > 0xFF) - new_val = 0xFF; - if (ifp->if_errcntp[idx] != new_val) { - ifp->if_errcntp[idx] = new_val; - hi_water = offsetof(struct qib_flash, - if_errcntp) + idx; - } - /* - * update our shadow (used to minimize EEPROM - * traffic), to match what we are about to write. - */ - dd->eep_st_errs[idx] = new_val; - dd->eep_st_new_errs[idx] = 0; - } - } - /* - * Now update active-time. We would like to round to the nearest hour - * but unless atomic_t are sure to be proper signed ints we cannot, - * because we need to account for what we "transfer" to EEPROM and - * if we log an hour at 31 minutes, then we would need to set - * active_time to -29 to accurately count the _next_ hour. - */ - if (new_time >= 3600) { - new_hrs = new_time / 3600; - atomic_sub((new_hrs * 3600), &dd->active_time); - new_hrs += dd->eep_hrs; - if (new_hrs > 0xFFFF) - new_hrs = 0xFFFF; - dd->eep_hrs = new_hrs; - if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) { - ifp->if_powerhour[0] = new_hrs & 0xFF; - hi_water = offsetof(struct qib_flash, if_powerhour); - } - if ((new_hrs >> 8) != ifp->if_powerhour[1]) { - ifp->if_powerhour[1] = new_hrs >> 8; - hi_water = offsetof(struct qib_flash, if_powerhour) + 1; - } - } - /* - * There is a tiny possibility that we could somehow fail to write - * the EEPROM after updating our shadows, but problems from holding - * the spinlock too long are a much bigger issue. - */ - spin_unlock_irqrestore(&dd->eep_st_lock, flags); - if (hi_water) { - /* we made some change to the data, uopdate cksum and write */ - csum = flash_csum(ifp, 1); - ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1); - } - mutex_unlock(&dd->eep_lock); - if (ret) - qib_dev_err(dd, "Failed updating EEPROM\n"); - -free_bail: - vfree(buf); -bail: - return ret; -} - -/** - * qib_inc_eeprom_err - increment one of the four error counters - * that are logged to EEPROM. - * @dd: the qlogic_ib device - * @eidx: 0..3, the counter to increment - * @incr: how much to add - * - * Each counter is 8-bits, and saturates at 255 (0xFF). They - * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log() - * is called, but it can only be called in a context that allows sleep. - * This function can be called even at interrupt level. - */ -void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr) -{ - uint new_val; - unsigned long flags; - - spin_lock_irqsave(&dd->eep_st_lock, flags); - new_val = dd->eep_st_new_errs[eidx] + incr; - if (new_val > 255) - new_val = 255; - dd->eep_st_new_errs[eidx] = new_val; - spin_unlock_irqrestore(&dd->eep_st_lock, flags); -} --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_file_ops.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1578,7 +1578,7 @@ struct qib_ctxtdata *rcd = fd->rcd; struct qib_devdata *dd = rcd->dd; - if (dd->flags & QIB_HAS_SEND_DMA) + if (dd->flags & QIB_HAS_SEND_DMA) { fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, dd->unit, @@ -1586,6 +1586,7 @@ fd->subctxt); if (!fd->pq) return -ENOMEM; + } return 0; } --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_iba6120.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2682,8 +2682,6 @@ spin_lock_irqsave(&dd->eep_st_lock, flags); traffic_wds -= dd->traffic_wds; dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(5, &dd->active_time); /* S/B #define */ spin_unlock_irqrestore(&dd->eep_st_lock, flags); qib_chk_6120_errormask(dd); --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_iba7220.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_iba7220.c @@ -3299,8 +3299,6 @@ spin_lock_irqsave(&dd->eep_st_lock, flags); traffic_wds -= dd->traffic_wds; dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(5, &dd->active_time); /* S/B #define */ spin_unlock_irqrestore(&dd->eep_st_lock, flags); done: mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_iba7322.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_iba7322.c @@ -2395,6 +2395,11 @@ qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg(dd, kr_scratch, 0ULL); + /* ensure previous Tx parameters are not still forced */ + qib_write_kreg_port(ppd, krp_tx_deemph_override, + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + reset_tx_deemphasis_override)); + if (qib_compat_ddr_negotiate) { ppd->cpspec->ibdeltainprog = 1; ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd, @@ -5186,8 +5191,6 @@ spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); traffic_wds -= ppd->dd->traffic_wds; ppd->dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & QIB_IB_QDR) && --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_init.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_init.c @@ -922,7 +922,6 @@ } } - qib_update_eeprom_log(dd); } /** @@ -1097,14 +1096,10 @@ int ret; dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); - if (!dd) { - dd = ERR_PTR(-ENOMEM); - goto bail; - } + if (!dd) + return ERR_PTR(-ENOMEM); -#ifdef CONFIG_DEBUG_FS - qib_dbg_ibdev_init(&dd->verbs_dev); -#endif + INIT_LIST_HEAD(&dd->list); idr_preload(GFP_KERNEL); spin_lock_irqsave(&qib_devs_lock, flags); @@ -1121,11 +1116,6 @@ if (ret < 0) { qib_early_err(&pdev->dev, "Could not allocate unit ID: error %d\n", -ret); -#ifdef CONFIG_DEBUG_FS - qib_dbg_ibdev_exit(&dd->verbs_dev); -#endif - ib_dealloc_device(&dd->verbs_dev.ibdev); - dd = ERR_PTR(ret); goto bail; } @@ -1139,9 +1129,15 @@ qib_early_err(&pdev->dev, "Could not alloc cpulist info, cpu affinity might be wrong\n"); } - -bail: +#ifdef CONFIG_DEBUG_FS + qib_dbg_ibdev_init(&dd->verbs_dev); +#endif return dd; +bail: + if (!list_empty(&dd->list)) + list_del_init(&dd->list); + ib_dealloc_device(&dd->verbs_dev.ibdev); + return ERR_PTR(ret);; } /* --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_mad.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_mad.c @@ -1028,7 +1028,7 @@ event.event = IB_EVENT_PKEY_CHANGE; event.device = &dd->verbs_dev.ibdev; - event.element.port_num = 1; + event.element.port_num = port; ib_dispatch_event(&event); } return 0; --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_qp.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_qp.c @@ -585,7 +585,7 @@ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, - attr_mask)) + attr_mask, IB_LINK_LAYER_UNSPECIFIED)) goto inval; if (attr_mask & IB_QP_AV) { @@ -1324,7 +1324,6 @@ struct qib_qp *pqp = iter->qp; struct qib_qp *qp; - rcu_read_lock(); for (; n < dev->qp_table_size; n++) { if (pqp) qp = rcu_dereference(pqp->next); @@ -1332,18 +1331,11 @@ qp = rcu_dereference(dev->qp_table[n]); pqp = qp; if (qp) { - if (iter->qp) - atomic_dec(&iter->qp->refcount); - atomic_inc(&qp->refcount); - rcu_read_unlock(); iter->qp = qp; iter->n = n; return 0; } } - rcu_read_unlock(); - if (iter->qp) - atomic_dec(&iter->qp->refcount); return ret; } --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_sysfs.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_sysfs.c @@ -611,28 +611,6 @@ return ret < 0 ? ret : count; } -static ssize_t show_logged_errs(struct device *device, - struct device_attribute *attr, char *buf) -{ - struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); - struct qib_devdata *dd = dd_from_dev(dev); - int idx, count; - - /* force consistency with actual EEPROM */ - if (qib_update_eeprom_log(dd) != 0) - return -ENXIO; - - count = 0; - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", - dd->eep_st_errs[idx], - idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); - } - - return count; -} - /* * Dump tempsense regs. in decimal, to ease shell-scripts. */ @@ -679,7 +657,6 @@ static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); -static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); @@ -693,7 +670,6 @@ &dev_attr_nfreectxts, &dev_attr_serial, &dev_attr_boardversion, - &dev_attr_logged_errors, &dev_attr_tempsense, &dev_attr_localbus_info, &dev_attr_chip_reset, --- linux-3.13.0.orig/drivers/infiniband/hw/qib/qib_ud.c +++ linux-3.13.0/drivers/infiniband/hw/qib/qib_ud.c @@ -57,13 +57,20 @@ struct qib_sge *sge; struct ib_wc wc; u32 length; + enum ib_qp_type sqptype, dqptype; qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); if (!qp) { ibp->n_pkt_drops++; return; } - if (qp->ibqp.qp_type != sqp->ibqp.qp_type || + + sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ? + IB_QPT_UD : sqp->ibqp.qp_type; + dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? + IB_QPT_UD : qp->ibqp.qp_type; + + if (dqptype != sqptype || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { ibp->n_pkt_drops++; goto drop; --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/Kconfig +++ linux-3.13.0/drivers/infiniband/hw/usnic/Kconfig @@ -0,0 +1,10 @@ +config INFINIBAND_USNIC + tristate "Verbs support for Cisco VIC" + depends on NETDEVICES && ETHERNET && INET && PCI && INTEL_IOMMU + select ENIC + select NET_VENDOR_CISCO + select PCI_IOV + select INFINIBAND_USER_ACCESS + ---help--- + This is a low-level driver for Cisco's Virtual Interface + Cards (VICs), including the VIC 1240 and 1280 cards. --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/Makefile +++ linux-3.13.0/drivers/infiniband/hw/usnic/Makefile @@ -0,0 +1,15 @@ +ccflags-y := -Idrivers/net/ethernet/cisco/enic + +obj-$(CONFIG_INFINIBAND_USNIC)+= usnic_verbs.o + +usnic_verbs-y=\ +usnic_fwd.o \ +usnic_transport.o \ +usnic_uiom.o \ +usnic_uiom_interval_tree.o \ +usnic_vnic.o \ +usnic_ib_main.o \ +usnic_ib_qp_grp.o \ +usnic_ib_sysfs.o \ +usnic_ib_verbs.o \ +usnic_debugfs.o \ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_H_ +#define USNIC_H_ + +#define DRV_NAME "usnic_verbs" + +#define PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC 0x00cf /* User space NIC */ + +#define DRV_VERSION "1.0.3" +#define DRV_RELDATE "December 19, 2013" + +#endif /* USNIC_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_abi.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_abi.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + + +#ifndef USNIC_ABI_H +#define USNIC_ABI_H + +/* ABI between userspace and kernel */ +#define USNIC_UVERBS_ABI_VERSION 4 + +#define USNIC_QP_GRP_MAX_WQS 8 +#define USNIC_QP_GRP_MAX_RQS 8 +#define USNIC_QP_GRP_MAX_CQS 16 + +enum usnic_transport_type { + USNIC_TRANSPORT_UNKNOWN = 0, + USNIC_TRANSPORT_ROCE_CUSTOM = 1, + USNIC_TRANSPORT_IPV4_UDP = 2, + USNIC_TRANSPORT_MAX = 3, +}; + +struct usnic_transport_spec { + enum usnic_transport_type trans_type; + union { + struct { + uint16_t port_num; + } usnic_roce; + struct { + uint32_t sock_fd; + } udp; + }; +}; + +struct usnic_ib_create_qp_cmd { + struct usnic_transport_spec spec; +}; + +/*TODO: Future - usnic_modify_qp needs to pass in generic filters */ +struct usnic_ib_create_qp_resp { + u32 vfid; + u32 qp_grp_id; + u64 bar_bus_addr; + u32 bar_len; +/* + * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface + * expands the scope of ABI to many files. + */ + u32 wq_cnt; + u32 rq_cnt; + u32 cq_cnt; + u32 wq_idx[USNIC_QP_GRP_MAX_WQS]; + u32 rq_idx[USNIC_QP_GRP_MAX_RQS]; + u32 cq_idx[USNIC_QP_GRP_MAX_CQS]; + u32 transport; + u32 reserved[9]; +}; + +#endif /* USNIC_ABI_H */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_CMN_PKT_HDR_H +#define USNIC_CMN_PKT_HDR_H + +#define USNIC_ROCE_ETHERTYPE (0x8915) +#define USNIC_ROCE_GRH_VER (8) +#define USNIC_PROTO_VER (1) +#define USNIC_ROCE_GRH_VER_SHIFT (4) + +#endif /* USNIC_COMMON_PKT_HDR_H */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_common_util.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_common_util.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_CMN_UTIL_H +#define USNIC_CMN_UTIL_H + +static inline void +usnic_mac_to_gid(const char *const mac, char *raw_gid) +{ + raw_gid[0] = 0xfe; + raw_gid[1] = 0x80; + memset(&raw_gid[2], 0, 6); + raw_gid[8] = mac[0]^2; + raw_gid[9] = mac[1]; + raw_gid[10] = mac[2]; + raw_gid[11] = 0xff; + raw_gid[12] = 0xfe; + raw_gid[13] = mac[3]; + raw_gid[14] = mac[4]; + raw_gid[15] = mac[5]; +} + +static inline void +usnic_mac_ip_to_gid(const char *const mac, const __be32 inaddr, char *raw_gid) +{ + raw_gid[0] = 0xfe; + raw_gid[1] = 0x80; + memset(&raw_gid[2], 0, 2); + memcpy(&raw_gid[4], &inaddr, 4); + raw_gid[8] = mac[0]^2; + raw_gid[9] = mac[1]; + raw_gid[10] = mac[2]; + raw_gid[11] = 0xff; + raw_gid[12] = 0xfe; + raw_gid[13] = mac[3]; + raw_gid[14] = mac[4]; + raw_gid[15] = mac[5]; +} + +static inline void +usnic_write_gid_if_id_from_mac(char *mac, char *raw_gid) +{ + raw_gid[8] = mac[0]^2; + raw_gid[9] = mac[1]; + raw_gid[10] = mac[2]; + raw_gid[11] = 0xff; + raw_gid[12] = 0xfe; + raw_gid[13] = mac[3]; + raw_gid[14] = mac[4]; + raw_gid[15] = mac[5]; +} + +#endif /* USNIC_COMMON_UTIL_H */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_debugfs.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_debugfs.c @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include + +#include "usnic.h" +#include "usnic_log.h" +#include "usnic_debugfs.h" +#include "usnic_ib_qp_grp.h" +#include "usnic_transport.h" + +static struct dentry *debugfs_root; +static struct dentry *flows_dentry; + +static ssize_t usnic_debugfs_buildinfo_read(struct file *f, char __user *data, + size_t count, loff_t *ppos) +{ + char buf[500]; + int res; + + if (*ppos > 0) + return 0; + + res = scnprintf(buf, sizeof(buf), + "version: %s\n" + "build date: %s\n", + DRV_VERSION, DRV_RELDATE); + + return simple_read_from_buffer(data, count, ppos, buf, res); +} + +static const struct file_operations usnic_debugfs_buildinfo_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = usnic_debugfs_buildinfo_read +}; + +static ssize_t flowinfo_read(struct file *f, char __user *data, + size_t count, loff_t *ppos) +{ + struct usnic_ib_qp_grp_flow *qp_flow; + int n; + int left; + char *ptr; + char buf[512]; + + qp_flow = f->private_data; + ptr = buf; + left = count; + + if (*ppos > 0) + return 0; + + spin_lock(&qp_flow->qp_grp->lock); + n = scnprintf(ptr, left, + "QP Grp ID: %d Transport: %s ", + qp_flow->qp_grp->grp_id, + usnic_transport_to_str(qp_flow->trans_type)); + UPDATE_PTR_LEFT(n, ptr, left); + if (qp_flow->trans_type == USNIC_TRANSPORT_ROCE_CUSTOM) { + n = scnprintf(ptr, left, "Port_Num:%hu\n", + qp_flow->usnic_roce.port_num); + UPDATE_PTR_LEFT(n, ptr, left); + } else if (qp_flow->trans_type == USNIC_TRANSPORT_IPV4_UDP) { + n = usnic_transport_sock_to_str(ptr, left, + qp_flow->udp.sock); + UPDATE_PTR_LEFT(n, ptr, left); + n = scnprintf(ptr, left, "\n"); + UPDATE_PTR_LEFT(n, ptr, left); + } + spin_unlock(&qp_flow->qp_grp->lock); + + return simple_read_from_buffer(data, count, ppos, buf, ptr - buf); +} + +static const struct file_operations flowinfo_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = flowinfo_read, +}; + +void usnic_debugfs_init(void) +{ + debugfs_root = debugfs_create_dir(DRV_NAME, NULL); + if (IS_ERR(debugfs_root)) { + usnic_err("Failed to create debugfs root dir, check if debugfs is enabled in kernel configuration\n"); + goto out_clear_root; + } + + flows_dentry = debugfs_create_dir("flows", debugfs_root); + if (IS_ERR_OR_NULL(flows_dentry)) { + usnic_err("Failed to create debugfs flow dir with err %ld\n", + PTR_ERR(flows_dentry)); + goto out_free_root; + } + + debugfs_create_file("build-info", S_IRUGO, debugfs_root, + NULL, &usnic_debugfs_buildinfo_ops); + return; + +out_free_root: + debugfs_remove_recursive(debugfs_root); +out_clear_root: + debugfs_root = NULL; +} + +void usnic_debugfs_exit(void) +{ + if (!debugfs_root) + return; + + debugfs_remove_recursive(debugfs_root); + debugfs_root = NULL; +} + +void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow) +{ + if (IS_ERR_OR_NULL(flows_dentry)) + return; + + scnprintf(qp_flow->dentry_name, sizeof(qp_flow->dentry_name), + "%u", qp_flow->flow->flow_id); + qp_flow->dbgfs_dentry = debugfs_create_file(qp_flow->dentry_name, + S_IRUGO, + flows_dentry, + qp_flow, + &flowinfo_ops); + if (IS_ERR_OR_NULL(qp_flow->dbgfs_dentry)) { + usnic_err("Failed to create dbg fs entry for flow %u\n", + qp_flow->flow->flow_id); + } +} + +void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow) +{ + if (!IS_ERR_OR_NULL(qp_flow->dbgfs_dentry)) + debugfs_remove(qp_flow->dbgfs_dentry); +} --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_debugfs.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_debugfs.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#ifndef USNIC_DEBUGFS_H_ +#define USNIC_DEBUGFS_H_ + +#include "usnic_ib_qp_grp.h" + +void usnic_debugfs_init(void); + +void usnic_debugfs_exit(void); +void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow); +void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow); + +#endif /*!USNIC_DEBUGFS_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_fwd.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_fwd.c @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#include +#include + +#include "enic_api.h" +#include "usnic_common_pkt_hdr.h" +#include "usnic_fwd.h" +#include "usnic_log.h" + +static int usnic_fwd_devcmd_locked(struct usnic_fwd_dev *ufdev, int vnic_idx, + enum vnic_devcmd_cmd cmd, u64 *a0, + u64 *a1) +{ + int status; + struct net_device *netdev = ufdev->netdev; + + lockdep_assert_held(&ufdev->lock); + + status = enic_api_devcmd_proxy_by_index(netdev, + vnic_idx, + cmd, + a0, a1, + 1000); + if (status) { + if (status == ERR_EINVAL && cmd == CMD_DEL_FILTER) { + usnic_dbg("Dev %s vnic idx %u cmd %u already deleted", + ufdev->name, vnic_idx, cmd); + } else { + usnic_err("Dev %s vnic idx %u cmd %u failed with status %d\n", + ufdev->name, vnic_idx, cmd, + status); + } + } else { + usnic_dbg("Dev %s vnic idx %u cmd %u success", + ufdev->name, vnic_idx, cmd); + } + + return status; +} + +static int usnic_fwd_devcmd(struct usnic_fwd_dev *ufdev, int vnic_idx, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1) +{ + int status; + + spin_lock(&ufdev->lock); + status = usnic_fwd_devcmd_locked(ufdev, vnic_idx, cmd, a0, a1); + spin_unlock(&ufdev->lock); + + return status; +} + +struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev) +{ + struct usnic_fwd_dev *ufdev; + + ufdev = kzalloc(sizeof(*ufdev), GFP_KERNEL); + if (!ufdev) + return NULL; + + ufdev->pdev = pdev; + ufdev->netdev = pci_get_drvdata(pdev); + spin_lock_init(&ufdev->lock); + strncpy(ufdev->name, netdev_name(ufdev->netdev), + sizeof(ufdev->name) - 1); + + return ufdev; +} + +void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev) +{ + kfree(ufdev); +} + +void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]) +{ + spin_lock(&ufdev->lock); + memcpy(&ufdev->mac, mac, sizeof(ufdev->mac)); + spin_unlock(&ufdev->lock); +} + +int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr) +{ + int status; + + spin_lock(&ufdev->lock); + if (ufdev->inaddr == 0) { + ufdev->inaddr = inaddr; + status = 0; + } else { + status = -EFAULT; + } + spin_unlock(&ufdev->lock); + + return status; +} + +void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev) +{ + spin_lock(&ufdev->lock); + ufdev->inaddr = 0; + spin_unlock(&ufdev->lock); +} + +void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev) +{ + spin_lock(&ufdev->lock); + ufdev->link_up = 1; + spin_unlock(&ufdev->lock); +} + +void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev) +{ + spin_lock(&ufdev->lock); + ufdev->link_up = 0; + spin_unlock(&ufdev->lock); +} + +void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu) +{ + spin_lock(&ufdev->lock); + ufdev->mtu = mtu; + spin_unlock(&ufdev->lock); +} + +static int usnic_fwd_dev_ready_locked(struct usnic_fwd_dev *ufdev) +{ + lockdep_assert_held(&ufdev->lock); + + if (!ufdev->link_up) + return -EPERM; + + return 0; +} + +static int validate_filter_locked(struct usnic_fwd_dev *ufdev, + struct filter *filter) +{ + + lockdep_assert_held(&ufdev->lock); + + if (filter->type == FILTER_IPV4_5TUPLE) { + if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_AD)) + return -EACCES; + if (!(filter->u.ipv4.flags & FILTER_FIELD_5TUP_DST_PT)) + return -EBUSY; + else if (ufdev->inaddr == 0) + return -EINVAL; + else if (filter->u.ipv4.dst_port == 0) + return -ERANGE; + else if (ntohl(ufdev->inaddr) != filter->u.ipv4.dst_addr) + return -EFAULT; + else + return 0; + } + + return 0; +} + +static void fill_tlv(struct filter_tlv *tlv, struct filter *filter, + struct filter_action *action) +{ + tlv->type = CLSF_TLV_FILTER; + tlv->length = sizeof(struct filter); + *((struct filter *)&tlv->val) = *filter; + + tlv = (struct filter_tlv *)((char *)tlv + sizeof(struct filter_tlv) + + sizeof(struct filter)); + tlv->type = CLSF_TLV_ACTION; + tlv->length = sizeof(struct filter_action); + *((struct filter_action *)&tlv->val) = *action; +} + +struct usnic_fwd_flow* +usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter, + struct usnic_filter_action *uaction) +{ + struct filter_tlv *tlv; + struct pci_dev *pdev; + struct usnic_fwd_flow *flow; + uint64_t a0, a1; + uint64_t tlv_size; + dma_addr_t tlv_pa; + int status; + + pdev = ufdev->pdev; + tlv_size = (2*sizeof(struct filter_tlv) + sizeof(struct filter) + + sizeof(struct filter_action)); + + flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + if (!flow) + return ERR_PTR(-ENOMEM); + + tlv = pci_alloc_consistent(pdev, tlv_size, &tlv_pa); + if (!tlv) { + usnic_err("Failed to allocate memory\n"); + status = -ENOMEM; + goto out_free_flow; + } + + fill_tlv(tlv, filter, &uaction->action); + + spin_lock(&ufdev->lock); + status = usnic_fwd_dev_ready_locked(ufdev); + if (status) { + usnic_err("Forwarding dev %s not ready with status %d\n", + ufdev->name, status); + goto out_free_tlv; + } + + status = validate_filter_locked(ufdev, filter); + if (status) { + usnic_err("Failed to validate filter with status %d\n", + status); + goto out_free_tlv; + } + + /* Issue Devcmd */ + a0 = tlv_pa; + a1 = tlv_size; + status = usnic_fwd_devcmd_locked(ufdev, uaction->vnic_idx, + CMD_ADD_FILTER, &a0, &a1); + if (status) { + usnic_err("VF %s Filter add failed with status:%d", + ufdev->name, status); + status = -EFAULT; + goto out_free_tlv; + } else { + usnic_dbg("VF %s FILTER ID:%llu", ufdev->name, a0); + } + + flow->flow_id = (uint32_t) a0; + flow->vnic_idx = uaction->vnic_idx; + flow->ufdev = ufdev; + +out_free_tlv: + spin_unlock(&ufdev->lock); + pci_free_consistent(pdev, tlv_size, tlv, tlv_pa); + if (!status) + return flow; +out_free_flow: + kfree(flow); + return ERR_PTR(status); +} + +int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow) +{ + int status; + u64 a0, a1; + + a0 = flow->flow_id; + + status = usnic_fwd_devcmd(flow->ufdev, flow->vnic_idx, + CMD_DEL_FILTER, &a0, &a1); + if (status) { + if (status == ERR_EINVAL) { + usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d", + flow->flow_id, flow->vnic_idx, + flow->ufdev->name, status); + } else { + usnic_err("PF %s VF Idx %u Filter: %u FILTER DELETE failed with status %d", + flow->ufdev->name, flow->vnic_idx, + flow->flow_id, status); + } + status = 0; + /* + * Log the error and fake success to the caller because if + * a flow fails to be deleted in the firmware, it is an + * unrecoverable error. + */ + } else { + usnic_dbg("PF %s VF Idx %u Filter: %u FILTER DELETED", + flow->ufdev->name, flow->vnic_idx, + flow->flow_id); + } + + kfree(flow); + return status; +} + +int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx) +{ + int status; + struct net_device *pf_netdev; + u64 a0, a1; + + pf_netdev = ufdev->netdev; + a0 = qp_idx; + a1 = CMD_QP_RQWQ; + + status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_ENABLE, + &a0, &a1); + if (status) { + usnic_err("PF %s VNIC Index %u RQ Index: %u ENABLE Failed with status %d", + netdev_name(pf_netdev), + vnic_idx, + qp_idx, + status); + } else { + usnic_dbg("PF %s VNIC Index %u RQ Index: %u ENABLED", + netdev_name(pf_netdev), + vnic_idx, qp_idx); + } + + return status; +} + +int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx) +{ + int status; + u64 a0, a1; + struct net_device *pf_netdev; + + pf_netdev = ufdev->netdev; + a0 = qp_idx; + a1 = CMD_QP_RQWQ; + + status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE, + &a0, &a1); + if (status) { + usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d", + netdev_name(pf_netdev), + vnic_idx, + qp_idx, + status); + } else { + usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED", + netdev_name(pf_netdev), + vnic_idx, + qp_idx); + } + + return status; +} --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_fwd.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_fwd.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_FWD_H_ +#define USNIC_FWD_H_ + +#include +#include +#include +#include + +#include "usnic_abi.h" +#include "usnic_common_pkt_hdr.h" +#include "vnic_devcmd.h" + +struct usnic_fwd_dev { + struct pci_dev *pdev; + struct net_device *netdev; + spinlock_t lock; + /* + * The following fields can be read directly off the device. + * However, they should be set by a accessor function, except name, + * which cannot be changed. + */ + bool link_up; + char mac[ETH_ALEN]; + unsigned int mtu; + __be32 inaddr; + char name[IFNAMSIZ+1]; +}; + +struct usnic_fwd_flow { + uint32_t flow_id; + struct usnic_fwd_dev *ufdev; + unsigned int vnic_idx; +}; + +struct usnic_filter_action { + int vnic_idx; + struct filter_action action; +}; + +struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev); +void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev); + +void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]); +int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr); +void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev); +void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev); +void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev); +void usnic_fwd_set_mtu(struct usnic_fwd_dev *ufdev, unsigned int mtu); + +/* + * Allocate a flow on this forwarding device. Whoever calls this function, + * must monitor netdev events on ufdev's netdevice. If NETDEV_REBOOT or + * NETDEV_DOWN is seen, flow will no longer function and must be + * immediately freed by calling usnic_dealloc_flow. + */ +struct usnic_fwd_flow* +usnic_fwd_alloc_flow(struct usnic_fwd_dev *ufdev, struct filter *filter, + struct usnic_filter_action *action); +int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow); +int usnic_fwd_enable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx); +int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx); + +static inline void usnic_fwd_init_usnic_filter(struct filter *filter, + uint32_t usnic_id) +{ + filter->type = FILTER_USNIC_ID; + filter->u.usnic.ethtype = USNIC_ROCE_ETHERTYPE; + filter->u.usnic.flags = FILTER_FIELD_USNIC_ETHTYPE | + FILTER_FIELD_USNIC_ID | + FILTER_FIELD_USNIC_PROTO; + filter->u.usnic.proto_version = (USNIC_ROCE_GRH_VER << + USNIC_ROCE_GRH_VER_SHIFT) | + USNIC_PROTO_VER; + filter->u.usnic.usnic_id = usnic_id; +} + +static inline void usnic_fwd_init_udp_filter(struct filter *filter, + uint32_t daddr, uint16_t dport) +{ + filter->type = FILTER_IPV4_5TUPLE; + filter->u.ipv4.flags = FILTER_FIELD_5TUP_PROTO; + filter->u.ipv4.protocol = PROTO_UDP; + + if (daddr) { + filter->u.ipv4.flags |= FILTER_FIELD_5TUP_DST_AD; + filter->u.ipv4.dst_addr = daddr; + } + + if (dport) { + filter->u.ipv4.flags |= FILTER_FIELD_5TUP_DST_PT; + filter->u.ipv4.dst_port = dport; + } +} + +#endif /* !USNIC_FWD_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_ib.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_ib.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_IB_H_ +#define USNIC_IB_H_ + +#include +#include + +#include + + +#include "usnic.h" +#include "usnic_abi.h" +#include "usnic_vnic.h" + +#define USNIC_IB_PORT_CNT 1 +#define USNIC_IB_NUM_COMP_VECTORS 1 + +extern unsigned int usnic_ib_share_vf; + +struct usnic_ib_ucontext { + struct ib_ucontext ibucontext; + /* Protected by usnic_ib_dev->usdev_lock */ + struct list_head qp_grp_list; + struct list_head link; +}; + +struct usnic_ib_pd { + struct ib_pd ibpd; + struct usnic_uiom_pd *umem_pd; +}; + +struct usnic_ib_mr { + struct ib_mr ibmr; + struct usnic_uiom_reg *umem; +}; + +struct usnic_ib_dev { + struct ib_device ib_dev; + struct pci_dev *pdev; + struct net_device *netdev; + struct usnic_fwd_dev *ufdev; + struct list_head ib_dev_link; + struct list_head vf_dev_list; + struct list_head ctx_list; + struct mutex usdev_lock; + + /* provisioning information */ + struct kref vf_cnt; + unsigned int vf_res_cnt[USNIC_VNIC_RES_TYPE_MAX]; + + /* sysfs vars for QPN reporting */ + struct kobject *qpn_kobj; +}; + +struct usnic_ib_vf { + struct usnic_ib_dev *pf; + spinlock_t lock; + struct usnic_vnic *vnic; + unsigned int qp_grp_ref_cnt; + struct usnic_ib_pd *pd; + struct list_head link; +}; + +static inline +struct usnic_ib_dev *to_usdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct usnic_ib_dev, ib_dev); +} + +static inline +struct usnic_ib_ucontext *to_ucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext); +} + +static inline +struct usnic_ib_pd *to_upd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct usnic_ib_pd, ibpd); +} + +static inline +struct usnic_ib_ucontext *to_uucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct usnic_ib_ucontext, ibucontext); +} + +static inline +struct usnic_ib_mr *to_umr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct usnic_ib_mr, ibmr); +} +void usnic_ib_log_vf(struct usnic_ib_vf *vf); + +#define UPDATE_PTR_LEFT(N, P, L) \ +do { \ + L -= (N); \ + P += (N); \ +} while (0) + +#endif /* USNIC_IB_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -0,0 +1,682 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Author: Upinder Malhi + * Author: Anant Deepak + * Author: Cesare Cantu' + * Author: Jeff Squyres + * Author: Kiran Thirumalai + * Author: Xuyang Wang + * Author: Reese Faucette + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "usnic_abi.h" +#include "usnic_common_util.h" +#include "usnic_ib.h" +#include "usnic_ib_qp_grp.h" +#include "usnic_log.h" +#include "usnic_fwd.h" +#include "usnic_debugfs.h" +#include "usnic_ib_verbs.h" +#include "usnic_transport.h" +#include "usnic_uiom.h" +#include "usnic_ib_sysfs.h" + +unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR; +unsigned int usnic_ib_share_vf = 1; + +static const char usnic_version[] = + DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v" + DRV_VERSION " (" DRV_RELDATE ")\n"; + +static DEFINE_MUTEX(usnic_ib_ibdev_list_lock); +static LIST_HEAD(usnic_ib_ibdev_list); + +/* Callback dump funcs */ +static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz) +{ + struct usnic_ib_vf *vf = obj; + return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name); +} +/* End callback dump funcs */ + +static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) +{ + usnic_vnic_dump(vf->vnic, buf, buf_sz, vf, + usnic_ib_dump_vf_hdr, + usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows); +} + +void usnic_ib_log_vf(struct usnic_ib_vf *vf) +{ + char buf[1000]; + usnic_ib_dump_vf(vf, buf, sizeof(buf)); + usnic_dbg("%s\n", buf); +} + +/* Start of netdev section */ +static inline const char *usnic_ib_netdev_event_to_string(unsigned long event) +{ + const char *event2str[] = {"NETDEV_NONE", "NETDEV_UP", "NETDEV_DOWN", + "NETDEV_REBOOT", "NETDEV_CHANGE", + "NETDEV_REGISTER", "NETDEV_UNREGISTER", "NETDEV_CHANGEMTU", + "NETDEV_CHANGEADDR", "NETDEV_GOING_DOWN", "NETDEV_FEAT_CHANGE", + "NETDEV_BONDING_FAILOVER", "NETDEV_PRE_UP", + "NETDEV_PRE_TYPE_CHANGE", "NETDEV_POST_TYPE_CHANGE", + "NETDEV_POST_INT", "NETDEV_UNREGISTER_FINAL", "NETDEV_RELEASE", + "NETDEV_NOTIFY_PEERS", "NETDEV_JOIN" + }; + + if (event >= ARRAY_SIZE(event2str)) + return "UNKNOWN_NETDEV_EVENT"; + else + return event2str[event]; +} + +static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) +{ + struct usnic_ib_ucontext *ctx; + struct usnic_ib_qp_grp *qp_grp; + enum ib_qp_state cur_state; + int status; + + BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); + + list_for_each_entry(ctx, &us_ibdev->ctx_list, link) { + list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) { + cur_state = qp_grp->state; + if (cur_state == IB_QPS_INIT || + cur_state == IB_QPS_RTR || + cur_state == IB_QPS_RTS) { + status = usnic_ib_qp_grp_modify(qp_grp, + IB_QPS_ERR, + NULL); + if (status) { + usnic_err("Failed to transistion qp grp %u from %s to %s\n", + qp_grp->grp_id, + usnic_ib_qp_grp_state_to_string + (cur_state), + usnic_ib_qp_grp_state_to_string + (IB_QPS_ERR)); + } + } + } + } +} + +static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev, + unsigned long event) +{ + struct net_device *netdev; + struct ib_event ib_event; + + memset(&ib_event, 0, sizeof(ib_event)); + + mutex_lock(&us_ibdev->usdev_lock); + netdev = us_ibdev->netdev; + switch (event) { + case NETDEV_REBOOT: + usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name); + usnic_ib_qp_grp_modify_active_to_err(us_ibdev); + ib_event.event = IB_EVENT_PORT_ERR; + ib_event.device = &us_ibdev->ib_dev; + ib_event.element.port_num = 1; + ib_dispatch_event(&ib_event); + break; + case NETDEV_UP: + case NETDEV_DOWN: + case NETDEV_CHANGE: + if (!us_ibdev->ufdev->link_up && + netif_carrier_ok(netdev)) { + usnic_fwd_carrier_up(us_ibdev->ufdev); + usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name); + ib_event.event = IB_EVENT_PORT_ACTIVE; + ib_event.device = &us_ibdev->ib_dev; + ib_event.element.port_num = 1; + ib_dispatch_event(&ib_event); + } else if (us_ibdev->ufdev->link_up && + !netif_carrier_ok(netdev)) { + usnic_fwd_carrier_down(us_ibdev->ufdev); + usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name); + usnic_ib_qp_grp_modify_active_to_err(us_ibdev); + ib_event.event = IB_EVENT_PORT_ERR; + ib_event.device = &us_ibdev->ib_dev; + ib_event.element.port_num = 1; + ib_dispatch_event(&ib_event); + } else { + usnic_dbg("Ignoring %s on %s\n", + usnic_ib_netdev_event_to_string(event), + us_ibdev->ib_dev.name); + } + break; + case NETDEV_CHANGEADDR: + if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr, + sizeof(us_ibdev->ufdev->mac))) { + usnic_dbg("Ignoring addr change on %s\n", + us_ibdev->ib_dev.name); + } else { + usnic_info(" %s old mac: %pM new mac: %pM\n", + us_ibdev->ib_dev.name, + us_ibdev->ufdev->mac, + netdev->dev_addr); + usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr); + usnic_ib_qp_grp_modify_active_to_err(us_ibdev); + ib_event.event = IB_EVENT_GID_CHANGE; + ib_event.device = &us_ibdev->ib_dev; + ib_event.element.port_num = 1; + ib_dispatch_event(&ib_event); + } + + break; + case NETDEV_CHANGEMTU: + if (us_ibdev->ufdev->mtu != netdev->mtu) { + usnic_info("MTU Change on %s old: %u new: %u\n", + us_ibdev->ib_dev.name, + us_ibdev->ufdev->mtu, netdev->mtu); + usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu); + usnic_ib_qp_grp_modify_active_to_err(us_ibdev); + } else { + usnic_dbg("Ignoring MTU change on %s\n", + us_ibdev->ib_dev.name); + } + break; + default: + usnic_dbg("Ignoring event %s on %s", + usnic_ib_netdev_event_to_string(event), + us_ibdev->ib_dev.name); + } + mutex_unlock(&us_ibdev->usdev_lock); +} + +static int usnic_ib_netdevice_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct usnic_ib_dev *us_ibdev; + + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + + mutex_lock(&usnic_ib_ibdev_list_lock); + list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { + if (us_ibdev->netdev == netdev) { + usnic_ib_handle_usdev_event(us_ibdev, event); + break; + } + } + mutex_unlock(&usnic_ib_ibdev_list_lock); + + return NOTIFY_DONE; +} + +static struct notifier_block usnic_ib_netdevice_notifier = { + .notifier_call = usnic_ib_netdevice_event +}; +/* End of netdev section */ + +/* Start of inet section */ +static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = ptr; + struct ib_event ib_event; + + mutex_lock(&us_ibdev->usdev_lock); + + switch (event) { + case NETDEV_DOWN: + usnic_info("%s via ip notifiers", + usnic_ib_netdev_event_to_string(event)); + usnic_fwd_del_ipaddr(us_ibdev->ufdev); + usnic_ib_qp_grp_modify_active_to_err(us_ibdev); + ib_event.event = IB_EVENT_GID_CHANGE; + ib_event.device = &us_ibdev->ib_dev; + ib_event.element.port_num = 1; + ib_dispatch_event(&ib_event); + break; + case NETDEV_UP: + usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); + usnic_info("%s via ip notifiers: ip %pI4", + usnic_ib_netdev_event_to_string(event), + &us_ibdev->ufdev->inaddr); + ib_event.event = IB_EVENT_GID_CHANGE; + ib_event.device = &us_ibdev->ib_dev; + ib_event.element.port_num = 1; + ib_dispatch_event(&ib_event); + break; + default: + usnic_info("Ignoring event %s on %s", + usnic_ib_netdev_event_to_string(event), + us_ibdev->ib_dev.name); + } + mutex_unlock(&us_ibdev->usdev_lock); + + return NOTIFY_DONE; +} + +static int usnic_ib_inetaddr_event(struct notifier_block *notifier, + unsigned long event, void *ptr) +{ + struct usnic_ib_dev *us_ibdev; + struct in_ifaddr *ifa = ptr; + struct net_device *netdev = ifa->ifa_dev->dev; + + mutex_lock(&usnic_ib_ibdev_list_lock); + list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { + if (us_ibdev->netdev == netdev) { + usnic_ib_handle_inet_event(us_ibdev, event, ptr); + break; + } + } + mutex_unlock(&usnic_ib_ibdev_list_lock); + + return NOTIFY_DONE; +} +static struct notifier_block usnic_ib_inetaddr_notifier = { + .notifier_call = usnic_ib_inetaddr_event +}; +/* End of inet section*/ + +/* Start of PF discovery section */ +static void *usnic_ib_device_add(struct pci_dev *dev) +{ + struct usnic_ib_dev *us_ibdev; + union ib_gid gid; + struct in_ifaddr *in; + struct net_device *netdev; + + usnic_dbg("\n"); + netdev = pci_get_drvdata(dev); + + us_ibdev = (struct usnic_ib_dev *)ib_alloc_device(sizeof(*us_ibdev)); + if (IS_ERR_OR_NULL(us_ibdev)) { + usnic_err("Device %s context alloc failed\n", + netdev_name(pci_get_drvdata(dev))); + return ERR_PTR(us_ibdev ? PTR_ERR(us_ibdev) : -EFAULT); + } + + us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); + if (IS_ERR_OR_NULL(us_ibdev->ufdev)) { + usnic_err("Failed to alloc ufdev for %s with err %ld\n", + pci_name(dev), PTR_ERR(us_ibdev->ufdev)); + goto err_dealloc; + } + + mutex_init(&us_ibdev->usdev_lock); + INIT_LIST_HEAD(&us_ibdev->vf_dev_list); + INIT_LIST_HEAD(&us_ibdev->ctx_list); + + us_ibdev->pdev = dev; + us_ibdev->netdev = pci_get_drvdata(dev); + us_ibdev->ib_dev.owner = THIS_MODULE; + us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; + us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; + us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; + us_ibdev->ib_dev.dma_device = &dev->dev; + us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION; + strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX); + + us_ibdev->ib_dev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_OPEN_QP); + + us_ibdev->ib_dev.query_device = usnic_ib_query_device; + us_ibdev->ib_dev.query_port = usnic_ib_query_port; + us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey; + us_ibdev->ib_dev.query_gid = usnic_ib_query_gid; + us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer; + us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd; + us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd; + us_ibdev->ib_dev.create_qp = usnic_ib_create_qp; + us_ibdev->ib_dev.modify_qp = usnic_ib_modify_qp; + us_ibdev->ib_dev.query_qp = usnic_ib_query_qp; + us_ibdev->ib_dev.destroy_qp = usnic_ib_destroy_qp; + us_ibdev->ib_dev.create_cq = usnic_ib_create_cq; + us_ibdev->ib_dev.destroy_cq = usnic_ib_destroy_cq; + us_ibdev->ib_dev.reg_user_mr = usnic_ib_reg_mr; + us_ibdev->ib_dev.dereg_mr = usnic_ib_dereg_mr; + us_ibdev->ib_dev.alloc_ucontext = usnic_ib_alloc_ucontext; + us_ibdev->ib_dev.dealloc_ucontext = usnic_ib_dealloc_ucontext; + us_ibdev->ib_dev.mmap = usnic_ib_mmap; + us_ibdev->ib_dev.create_ah = usnic_ib_create_ah; + us_ibdev->ib_dev.destroy_ah = usnic_ib_destroy_ah; + us_ibdev->ib_dev.post_send = usnic_ib_post_send; + us_ibdev->ib_dev.post_recv = usnic_ib_post_recv; + us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq; + us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq; + us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr; + + + if (ib_register_device(&us_ibdev->ib_dev, NULL)) + goto err_fwd_dealloc; + + usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu); + usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr); + if (netif_carrier_ok(us_ibdev->netdev)) + usnic_fwd_carrier_up(us_ibdev->ufdev); + + in = ((struct in_device *)(netdev->ip_ptr))->ifa_list; + if (in != NULL) + usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address); + + usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, + us_ibdev->ufdev->inaddr, &gid.raw[0]); + memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id, + sizeof(gid.global.interface_id)); + kref_init(&us_ibdev->vf_cnt); + + usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n", + us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev), + us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up, + us_ibdev->ufdev->mtu); + return us_ibdev; + +err_fwd_dealloc: + usnic_fwd_dev_free(us_ibdev->ufdev); +err_dealloc: + usnic_err("failed -- deallocing device\n"); + ib_dealloc_device(&us_ibdev->ib_dev); + return NULL; +} + +static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev) +{ + usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name); + usnic_ib_sysfs_unregister_usdev(us_ibdev); + usnic_fwd_dev_free(us_ibdev->ufdev); + ib_unregister_device(&us_ibdev->ib_dev); + ib_dealloc_device(&us_ibdev->ib_dev); +} + +static void usnic_ib_undiscover_pf(struct kref *kref) +{ + struct usnic_ib_dev *us_ibdev, *tmp; + struct pci_dev *dev; + bool found = false; + + dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev; + mutex_lock(&usnic_ib_ibdev_list_lock); + list_for_each_entry_safe(us_ibdev, tmp, + &usnic_ib_ibdev_list, ib_dev_link) { + if (us_ibdev->pdev == dev) { + list_del(&us_ibdev->ib_dev_link); + usnic_ib_device_remove(us_ibdev); + found = true; + break; + } + } + + WARN(!found, "Failed to remove PF %s\n", pci_name(dev)); + + mutex_unlock(&usnic_ib_ibdev_list_lock); +} + +static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) +{ + struct usnic_ib_dev *us_ibdev; + struct pci_dev *parent_pci, *vf_pci; + int err; + + vf_pci = usnic_vnic_get_pdev(vnic); + parent_pci = pci_physfn(vf_pci); + + BUG_ON(!parent_pci); + + mutex_lock(&usnic_ib_ibdev_list_lock); + list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { + if (us_ibdev->pdev == parent_pci) { + kref_get(&us_ibdev->vf_cnt); + goto out; + } + } + + us_ibdev = usnic_ib_device_add(parent_pci); + if (IS_ERR_OR_NULL(us_ibdev)) { + us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); + goto out; + } + + err = usnic_ib_sysfs_register_usdev(us_ibdev); + if (err) { + usnic_ib_device_remove(us_ibdev); + us_ibdev = ERR_PTR(err); + goto out; + } + + list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list); +out: + mutex_unlock(&usnic_ib_ibdev_list_lock); + return us_ibdev; +} +/* End of PF discovery section */ + +/* Start of PCI section */ + +static DEFINE_PCI_DEVICE_TABLE(usnic_ib_pci_ids) = { + {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)}, + {0,} +}; + +static int usnic_ib_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err; + struct usnic_ib_dev *pf; + struct usnic_ib_vf *vf; + enum usnic_vnic_res_type res_type; + + vf = kzalloc(sizeof(*vf), GFP_KERNEL); + if (!vf) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + usnic_err("Failed to enable %s with err %d\n", + pci_name(pdev), err); + goto out_clean_vf; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + usnic_err("Failed to request region for %s with err %d\n", + pci_name(pdev), err); + goto out_disable_device; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, vf); + + vf->vnic = usnic_vnic_alloc(pdev); + if (IS_ERR_OR_NULL(vf->vnic)) { + err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM; + usnic_err("Failed to alloc vnic for %s with err %d\n", + pci_name(pdev), err); + goto out_release_regions; + } + + pf = usnic_ib_discover_pf(vf->vnic); + if (IS_ERR_OR_NULL(pf)) { + usnic_err("Failed to discover pf of vnic %s with err%ld\n", + pci_name(pdev), PTR_ERR(pf)); + err = pf ? PTR_ERR(pf) : -EFAULT; + goto out_clean_vnic; + } + + vf->pf = pf; + spin_lock_init(&vf->lock); + mutex_lock(&pf->usdev_lock); + list_add_tail(&vf->link, &pf->vf_dev_list); + /* + * Save max settings (will be same for each VF, easier to re-write than + * to say "if (!set) { set_values(); set=1; } + */ + for (res_type = USNIC_VNIC_RES_TYPE_EOL+1; + res_type < USNIC_VNIC_RES_TYPE_MAX; + res_type++) { + pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic, + res_type); + } + + mutex_unlock(&pf->usdev_lock); + + usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev), + pf->ib_dev.name); + usnic_ib_log_vf(vf); + return 0; + +out_clean_vnic: + usnic_vnic_free(vf->vnic); +out_release_regions: + pci_set_drvdata(pdev, NULL); + pci_clear_master(pdev); + pci_release_regions(pdev); +out_disable_device: + pci_disable_device(pdev); +out_clean_vf: + kfree(vf); + return err; +} + +static void usnic_ib_pci_remove(struct pci_dev *pdev) +{ + struct usnic_ib_vf *vf = pci_get_drvdata(pdev); + struct usnic_ib_dev *pf = vf->pf; + + mutex_lock(&pf->usdev_lock); + list_del(&vf->link); + mutex_unlock(&pf->usdev_lock); + + kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf); + usnic_vnic_free(vf->vnic); + pci_set_drvdata(pdev, NULL); + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + kfree(vf); + + usnic_info("Removed VF %s\n", pci_name(pdev)); +} + +/* PCI driver entry points */ +static struct pci_driver usnic_ib_pci_driver = { + .name = DRV_NAME, + .id_table = usnic_ib_pci_ids, + .probe = usnic_ib_pci_probe, + .remove = usnic_ib_pci_remove, +}; +/* End of PCI section */ + +/* Start of module section */ +static int __init usnic_ib_init(void) +{ + int err; + + printk_once(KERN_INFO "%s", usnic_version); + + err = usnic_uiom_init(DRV_NAME); + if (err) { + usnic_err("Unable to initalize umem with err %d\n", err); + return err; + } + + if (pci_register_driver(&usnic_ib_pci_driver)) { + usnic_err("Unable to register with PCI\n"); + goto out_umem_fini; + } + + err = register_netdevice_notifier(&usnic_ib_netdevice_notifier); + if (err) { + usnic_err("Failed to register netdev notifier\n"); + goto out_pci_unreg; + } + + err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier); + if (err) { + usnic_err("Failed to register inet addr notifier\n"); + goto out_unreg_netdev_notifier; + } + + err = usnic_transport_init(); + if (err) { + usnic_err("Failed to initialize transport\n"); + goto out_unreg_inetaddr_notifier; + } + + usnic_debugfs_init(); + + return 0; + +out_unreg_inetaddr_notifier: + unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); +out_unreg_netdev_notifier: + unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); +out_pci_unreg: + pci_unregister_driver(&usnic_ib_pci_driver); +out_umem_fini: + usnic_uiom_fini(); + + return err; +} + +static void __exit usnic_ib_destroy(void) +{ + usnic_dbg("\n"); + usnic_debugfs_exit(); + usnic_transport_fini(); + unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); + unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); + pci_unregister_driver(&usnic_ib_pci_driver); + usnic_uiom_fini(); +} + +MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver"); +MODULE_AUTHOR("Upinder Malhi "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR); +module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3"); +MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs"); +MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids); + +module_init(usnic_ib_init); +module_exit(usnic_ib_destroy); +/* End of module section */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c @@ -0,0 +1,761 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#include +#include +#include +#include + +#include "usnic_log.h" +#include "usnic_vnic.h" +#include "usnic_fwd.h" +#include "usnic_uiom.h" +#include "usnic_debugfs.h" +#include "usnic_ib_qp_grp.h" +#include "usnic_ib_sysfs.h" +#include "usnic_transport.h" + +#define DFLT_RQ_IDX 0 + +const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: + return "Rst"; + case IB_QPS_INIT: + return "Init"; + case IB_QPS_RTR: + return "RTR"; + case IB_QPS_RTS: + return "RTS"; + case IB_QPS_SQD: + return "SQD"; + case IB_QPS_SQE: + return "SQE"; + case IB_QPS_ERR: + return "ERR"; + default: + return "UNKOWN STATE"; + + } +} + +int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz) +{ + return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID"); +} + +int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz) +{ + struct usnic_ib_qp_grp *qp_grp = obj; + struct usnic_ib_qp_grp_flow *default_flow; + if (obj) { + default_flow = list_first_entry(&qp_grp->flows_lst, + struct usnic_ib_qp_grp_flow, link); + return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d", + qp_grp->ibqp.qp_num, + usnic_ib_qp_grp_state_to_string( + qp_grp->state), + qp_grp->owner_pid, + usnic_vnic_get_index(qp_grp->vf->vnic), + default_flow->flow->flow_id); + } else { + return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A"); + } +} + +static struct usnic_vnic_res_chunk * +get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp) +{ + lockdep_assert_held(&qp_grp->lock); + /* + * The QP res chunk, used to derive qp indices, + * are just indices of the RQs + */ + return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); +} + +static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp) +{ + + int status; + int i, vnic_idx; + struct usnic_vnic_res_chunk *res_chunk; + struct usnic_vnic_res *res; + + lockdep_assert_held(&qp_grp->lock); + + vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); + + res_chunk = get_qp_res_chunk(qp_grp); + if (IS_ERR_OR_NULL(res_chunk)) { + usnic_err("Unable to get qp res with err %ld\n", + PTR_ERR(res_chunk)); + return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM; + } + + for (i = 0; i < res_chunk->cnt; i++) { + res = res_chunk->res[i]; + status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx, + res->vnic_idx); + if (status) { + usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n", + res->vnic_idx, qp_grp->ufdev->name, + vnic_idx, status); + goto out_err; + } + } + + return 0; + +out_err: + for (i--; i >= 0; i--) { + res = res_chunk->res[i]; + usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx, + res->vnic_idx); + } + + return status; +} + +static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp) +{ + int i, vnic_idx; + struct usnic_vnic_res_chunk *res_chunk; + struct usnic_vnic_res *res; + int status = 0; + + lockdep_assert_held(&qp_grp->lock); + vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); + + res_chunk = get_qp_res_chunk(qp_grp); + if (IS_ERR_OR_NULL(res_chunk)) { + usnic_err("Unable to get qp res with err %ld\n", + PTR_ERR(res_chunk)); + return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM; + } + + for (i = 0; i < res_chunk->cnt; i++) { + res = res_chunk->res[i]; + status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx, + res->vnic_idx); + if (status) { + usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n", + res->vnic_idx, + qp_grp->ufdev->name, + vnic_idx, status); + } + } + + return status; + +} + +static int init_filter_action(struct usnic_ib_qp_grp *qp_grp, + struct usnic_filter_action *uaction) +{ + struct usnic_vnic_res_chunk *res_chunk; + + res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); + if (IS_ERR_OR_NULL(res_chunk)) { + usnic_err("Unable to get %s with err %ld\n", + usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), + PTR_ERR(res_chunk)); + return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM; + } + + uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); + uaction->action.type = FILTER_ACTION_RQ_STEERING; + uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx; + + return 0; +} + +static struct usnic_ib_qp_grp_flow* +create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp, + struct usnic_transport_spec *trans_spec) +{ + uint16_t port_num; + int err; + struct filter filter; + struct usnic_filter_action uaction; + struct usnic_ib_qp_grp_flow *qp_flow; + struct usnic_fwd_flow *flow; + enum usnic_transport_type trans_type; + + trans_type = trans_spec->trans_type; + port_num = trans_spec->usnic_roce.port_num; + + /* Reserve Port */ + port_num = usnic_transport_rsrv_port(trans_type, port_num); + if (port_num == 0) + return ERR_PTR(-EINVAL); + + /* Create Flow */ + usnic_fwd_init_usnic_filter(&filter, port_num); + err = init_filter_action(qp_grp, &uaction); + if (err) + goto out_unreserve_port; + + flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction); + if (IS_ERR_OR_NULL(flow)) { + usnic_err("Unable to alloc flow failed with err %ld\n", + PTR_ERR(flow)); + err = flow ? PTR_ERR(flow) : -EFAULT; + goto out_unreserve_port; + } + + /* Create Flow Handle */ + qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC); + if (IS_ERR_OR_NULL(qp_flow)) { + err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM; + goto out_dealloc_flow; + } + qp_flow->flow = flow; + qp_flow->trans_type = trans_type; + qp_flow->usnic_roce.port_num = port_num; + qp_flow->qp_grp = qp_grp; + return qp_flow; + +out_dealloc_flow: + usnic_fwd_dealloc_flow(flow); +out_unreserve_port: + usnic_transport_unrsrv_port(trans_type, port_num); + return ERR_PTR(err); +} + +static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow) +{ + usnic_fwd_dealloc_flow(qp_flow->flow); + usnic_transport_unrsrv_port(qp_flow->trans_type, + qp_flow->usnic_roce.port_num); + kfree(qp_flow); +} + +static struct usnic_ib_qp_grp_flow* +create_udp_flow(struct usnic_ib_qp_grp *qp_grp, + struct usnic_transport_spec *trans_spec) +{ + struct socket *sock; + int sock_fd; + int err; + struct filter filter; + struct usnic_filter_action uaction; + struct usnic_ib_qp_grp_flow *qp_flow; + struct usnic_fwd_flow *flow; + enum usnic_transport_type trans_type; + uint32_t addr; + uint16_t port_num; + int proto; + + trans_type = trans_spec->trans_type; + sock_fd = trans_spec->udp.sock_fd; + + /* Get and check socket */ + sock = usnic_transport_get_socket(sock_fd); + if (IS_ERR_OR_NULL(sock)) + return ERR_CAST(sock); + + err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num); + if (err) + goto out_put_sock; + + if (proto != IPPROTO_UDP) { + usnic_err("Protocol for fd %d is not UDP", sock_fd); + err = -EPERM; + goto out_put_sock; + } + + /* Create flow */ + usnic_fwd_init_udp_filter(&filter, addr, port_num); + err = init_filter_action(qp_grp, &uaction); + if (err) + goto out_put_sock; + + flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction); + if (IS_ERR_OR_NULL(flow)) { + usnic_err("Unable to alloc flow failed with err %ld\n", + PTR_ERR(flow)); + err = flow ? PTR_ERR(flow) : -EFAULT; + goto out_put_sock; + } + + /* Create qp_flow */ + qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC); + if (IS_ERR_OR_NULL(qp_flow)) { + err = qp_flow ? PTR_ERR(qp_flow) : -ENOMEM; + goto out_dealloc_flow; + } + qp_flow->flow = flow; + qp_flow->trans_type = trans_type; + qp_flow->udp.sock = sock; + qp_flow->qp_grp = qp_grp; + return qp_flow; + +out_dealloc_flow: + usnic_fwd_dealloc_flow(flow); +out_put_sock: + usnic_transport_put_socket(sock); + return ERR_PTR(err); +} + +static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow) +{ + usnic_fwd_dealloc_flow(qp_flow->flow); + usnic_transport_put_socket(qp_flow->udp.sock); + kfree(qp_flow); +} + +static struct usnic_ib_qp_grp_flow* +create_and_add_flow(struct usnic_ib_qp_grp *qp_grp, + struct usnic_transport_spec *trans_spec) +{ + struct usnic_ib_qp_grp_flow *qp_flow; + enum usnic_transport_type trans_type; + + trans_type = trans_spec->trans_type; + switch (trans_type) { + case USNIC_TRANSPORT_ROCE_CUSTOM: + qp_flow = create_roce_custom_flow(qp_grp, trans_spec); + break; + case USNIC_TRANSPORT_IPV4_UDP: + qp_flow = create_udp_flow(qp_grp, trans_spec); + break; + default: + usnic_err("Unsupported transport %u\n", + trans_spec->trans_type); + return ERR_PTR(-EINVAL); + } + + if (!IS_ERR_OR_NULL(qp_flow)) { + list_add_tail(&qp_flow->link, &qp_grp->flows_lst); + usnic_debugfs_flow_add(qp_flow); + } + + + return qp_flow; +} + +static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow) +{ + usnic_debugfs_flow_remove(qp_flow); + list_del(&qp_flow->link); + + switch (qp_flow->trans_type) { + case USNIC_TRANSPORT_ROCE_CUSTOM: + release_roce_custom_flow(qp_flow); + break; + case USNIC_TRANSPORT_IPV4_UDP: + release_udp_flow(qp_flow); + break; + default: + WARN(1, "Unsupported transport %u\n", + qp_flow->trans_type); + break; + } +} + +static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp) +{ + struct usnic_ib_qp_grp_flow *qp_flow, *tmp; + list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link) + release_and_remove_flow(qp_flow); +} + +int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp, + enum ib_qp_state new_state, + void *data) +{ + int status = 0; + int vnic_idx; + struct ib_event ib_event; + enum ib_qp_state old_state; + struct usnic_transport_spec *trans_spec; + struct usnic_ib_qp_grp_flow *qp_flow; + + old_state = qp_grp->state; + vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); + trans_spec = (struct usnic_transport_spec *) data; + + spin_lock(&qp_grp->lock); + switch (new_state) { + case IB_QPS_RESET: + switch (old_state) { + case IB_QPS_RESET: + /* NO-OP */ + break; + case IB_QPS_INIT: + release_and_remove_all_flows(qp_grp); + status = 0; + break; + case IB_QPS_RTR: + case IB_QPS_RTS: + case IB_QPS_ERR: + status = disable_qp_grp(qp_grp); + release_and_remove_all_flows(qp_grp); + break; + default: + status = -EINVAL; + } + break; + case IB_QPS_INIT: + switch (old_state) { + case IB_QPS_RESET: + if (trans_spec) { + qp_flow = create_and_add_flow(qp_grp, + trans_spec); + if (IS_ERR_OR_NULL(qp_flow)) { + status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT; + break; + } + } else { + /* + * Optional to specify filters. + */ + status = 0; + } + break; + case IB_QPS_INIT: + if (trans_spec) { + qp_flow = create_and_add_flow(qp_grp, + trans_spec); + if (IS_ERR_OR_NULL(qp_flow)) { + status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT; + break; + } + } else { + /* + * Doesn't make sense to go into INIT state + * from INIT state w/o adding filters. + */ + status = -EINVAL; + } + break; + case IB_QPS_RTR: + status = disable_qp_grp(qp_grp); + break; + case IB_QPS_RTS: + status = disable_qp_grp(qp_grp); + break; + default: + status = -EINVAL; + } + break; + case IB_QPS_RTR: + switch (old_state) { + case IB_QPS_INIT: + status = enable_qp_grp(qp_grp); + break; + default: + status = -EINVAL; + } + break; + case IB_QPS_RTS: + switch (old_state) { + case IB_QPS_RTR: + /* NO-OP FOR NOW */ + break; + default: + status = -EINVAL; + } + break; + case IB_QPS_ERR: + ib_event.device = &qp_grp->vf->pf->ib_dev; + ib_event.element.qp = &qp_grp->ibqp; + ib_event.event = IB_EVENT_QP_FATAL; + + switch (old_state) { + case IB_QPS_RESET: + qp_grp->ibqp.event_handler(&ib_event, + qp_grp->ibqp.qp_context); + break; + case IB_QPS_INIT: + release_and_remove_all_flows(qp_grp); + qp_grp->ibqp.event_handler(&ib_event, + qp_grp->ibqp.qp_context); + break; + case IB_QPS_RTR: + case IB_QPS_RTS: + status = disable_qp_grp(qp_grp); + release_and_remove_all_flows(qp_grp); + qp_grp->ibqp.event_handler(&ib_event, + qp_grp->ibqp.qp_context); + break; + default: + status = -EINVAL; + } + break; + default: + status = -EINVAL; + } + spin_unlock(&qp_grp->lock); + + if (!status) { + qp_grp->state = new_state; + usnic_info("Transistioned %u from %s to %s", + qp_grp->grp_id, + usnic_ib_qp_grp_state_to_string(old_state), + usnic_ib_qp_grp_state_to_string(new_state)); + } else { + usnic_err("Failed to transistion %u from %s to %s", + qp_grp->grp_id, + usnic_ib_qp_grp_state_to_string(old_state), + usnic_ib_qp_grp_state_to_string(new_state)); + } + + return status; +} + +static struct usnic_vnic_res_chunk** +alloc_res_chunk_list(struct usnic_vnic *vnic, + struct usnic_vnic_res_spec *res_spec, void *owner_obj) +{ + enum usnic_vnic_res_type res_type; + struct usnic_vnic_res_chunk **res_chunk_list; + int err, i, res_cnt, res_lst_sz; + + for (res_lst_sz = 0; + res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL; + res_lst_sz++) { + /* Do Nothing */ + } + + res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1), + GFP_ATOMIC); + if (!res_chunk_list) + return ERR_PTR(-ENOMEM); + + for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL; + i++) { + res_type = res_spec->resources[i].type; + res_cnt = res_spec->resources[i].cnt; + + res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type, + res_cnt, owner_obj); + if (IS_ERR_OR_NULL(res_chunk_list[i])) { + err = res_chunk_list[i] ? + PTR_ERR(res_chunk_list[i]) : -ENOMEM; + usnic_err("Failed to get %s from %s with err %d\n", + usnic_vnic_res_type_to_str(res_type), + usnic_vnic_pci_name(vnic), + err); + goto out_free_res; + } + } + + return res_chunk_list; + +out_free_res: + for (i--; i > 0; i--) + usnic_vnic_put_resources(res_chunk_list[i]); + kfree(res_chunk_list); + return ERR_PTR(err); +} + +static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list) +{ + int i; + for (i = 0; res_chunk_list[i]; i++) + usnic_vnic_put_resources(res_chunk_list[i]); + kfree(res_chunk_list); +} + +static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf, + struct usnic_ib_pd *pd, + struct usnic_ib_qp_grp *qp_grp) +{ + int err; + struct pci_dev *pdev; + + lockdep_assert_held(&vf->lock); + + pdev = usnic_vnic_get_pdev(vf->vnic); + if (vf->qp_grp_ref_cnt == 0) { + err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev); + if (err) { + usnic_err("Failed to attach %s to domain\n", + pci_name(pdev)); + return err; + } + vf->pd = pd; + } + vf->qp_grp_ref_cnt++; + + WARN_ON(vf->pd != pd); + qp_grp->vf = vf; + + return 0; +} + +static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp) +{ + struct pci_dev *pdev; + struct usnic_ib_pd *pd; + + lockdep_assert_held(&qp_grp->vf->lock); + + pd = qp_grp->vf->pd; + pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic); + if (--qp_grp->vf->qp_grp_ref_cnt == 0) { + qp_grp->vf->pd = NULL; + usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev); + } + qp_grp->vf = NULL; +} + +static void log_spec(struct usnic_vnic_res_spec *res_spec) +{ + char buf[512]; + usnic_vnic_spec_dump(buf, sizeof(buf), res_spec); + usnic_dbg("%s\n", buf); +} + +static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow, + uint32_t *id) +{ + enum usnic_transport_type trans_type = qp_flow->trans_type; + int err; + uint16_t port_num = 0; + + switch (trans_type) { + case USNIC_TRANSPORT_ROCE_CUSTOM: + *id = qp_flow->usnic_roce.port_num; + break; + case USNIC_TRANSPORT_IPV4_UDP: + err = usnic_transport_sock_get_addr(qp_flow->udp.sock, + NULL, NULL, + &port_num); + if (err) + return err; + /* + * Copy port_num to stack first and then to *id, + * so that the short to int cast works for little + * and big endian systems. + */ + *id = port_num; + break; + default: + usnic_err("Unsupported transport %u\n", trans_type); + return -EINVAL; + } + + return 0; +} + +struct usnic_ib_qp_grp * +usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, + struct usnic_ib_pd *pd, + struct usnic_vnic_res_spec *res_spec, + struct usnic_transport_spec *transport_spec) +{ + struct usnic_ib_qp_grp *qp_grp; + int err; + enum usnic_transport_type transport = transport_spec->trans_type; + struct usnic_ib_qp_grp_flow *qp_flow; + + lockdep_assert_held(&vf->lock); + + err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport], + res_spec); + if (err) { + usnic_err("Spec does not meet miniumum req for transport %d\n", + transport); + log_spec(res_spec); + return ERR_PTR(err); + } + + qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC); + if (!qp_grp) { + usnic_err("Unable to alloc qp_grp - Out of memory\n"); + return NULL; + } + + qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec, + qp_grp); + if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) { + err = qp_grp->res_chunk_list ? + PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM; + usnic_err("Unable to alloc res for %d with err %d\n", + qp_grp->grp_id, err); + goto out_free_qp_grp; + } + + err = qp_grp_and_vf_bind(vf, pd, qp_grp); + if (err) + goto out_free_res; + + INIT_LIST_HEAD(&qp_grp->flows_lst); + spin_lock_init(&qp_grp->lock); + qp_grp->ufdev = ufdev; + qp_grp->state = IB_QPS_RESET; + qp_grp->owner_pid = current->pid; + + qp_flow = create_and_add_flow(qp_grp, transport_spec); + if (IS_ERR_OR_NULL(qp_flow)) { + usnic_err("Unable to create and add flow with err %ld\n", + PTR_ERR(qp_flow)); + err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT; + goto out_qp_grp_vf_unbind; + } + + err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id); + if (err) + goto out_release_flow; + qp_grp->ibqp.qp_num = qp_grp->grp_id; + + usnic_ib_sysfs_qpn_add(qp_grp); + + return qp_grp; + +out_release_flow: + release_and_remove_flow(qp_flow); +out_qp_grp_vf_unbind: + qp_grp_and_vf_unbind(qp_grp); +out_free_res: + free_qp_grp_res(qp_grp->res_chunk_list); +out_free_qp_grp: + kfree(qp_grp); + + return ERR_PTR(err); +} + +void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) +{ + + WARN_ON(qp_grp->state != IB_QPS_RESET); + lockdep_assert_held(&qp_grp->vf->lock); + + release_and_remove_all_flows(qp_grp); + usnic_ib_sysfs_qpn_remove(qp_grp); + qp_grp_and_vf_unbind(qp_grp); + free_qp_grp_res(qp_grp->res_chunk_list); + kfree(qp_grp); +} + +struct usnic_vnic_res_chunk* +usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp, + enum usnic_vnic_res_type res_type) +{ + int i; + + for (i = 0; qp_grp->res_chunk_list[i]; i++) { + if (qp_grp->res_chunk_list[i]->type == res_type) + return qp_grp->res_chunk_list[i]; + } + + return ERR_PTR(-EINVAL); +} --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_IB_QP_GRP_H_ +#define USNIC_IB_QP_GRP_H_ + +#include +#include + +#include "usnic_ib.h" +#include "usnic_abi.h" +#include "usnic_fwd.h" +#include "usnic_vnic.h" + +/* + * The qp group struct represents all the hw resources needed to present a ib_qp + */ +struct usnic_ib_qp_grp { + struct ib_qp ibqp; + enum ib_qp_state state; + int grp_id; + + struct usnic_fwd_dev *ufdev; + struct usnic_ib_ucontext *ctx; + struct list_head flows_lst; + + struct usnic_vnic_res_chunk **res_chunk_list; + + pid_t owner_pid; + struct usnic_ib_vf *vf; + struct list_head link; + + spinlock_t lock; + + struct kobject kobj; +}; + +struct usnic_ib_qp_grp_flow { + struct usnic_fwd_flow *flow; + enum usnic_transport_type trans_type; + union { + struct { + uint16_t port_num; + } usnic_roce; + struct { + struct socket *sock; + } udp; + }; + struct usnic_ib_qp_grp *qp_grp; + struct list_head link; + + /* Debug FS */ + struct dentry *dbgfs_dentry; + char dentry_name[32]; +}; + +static const struct +usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = { + { /*USNIC_TRANSPORT_UNKNOWN*/ + .resources = { + {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,}, + }, + }, + { /*USNIC_TRANSPORT_ROCE_CUSTOM*/ + .resources = { + {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,}, + {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,}, + {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,}, + {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,}, + }, + }, + { /*USNIC_TRANSPORT_IPV4_UDP*/ + .resources = { + {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,}, + {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,}, + {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,}, + {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,}, + }, + }, +}; + +const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state); +int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz); +int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz); +struct usnic_ib_qp_grp * +usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf, + struct usnic_ib_pd *pd, + struct usnic_vnic_res_spec *res_spec, + struct usnic_transport_spec *trans_spec); +void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp); +int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp, + enum ib_qp_state new_state, + void *data); +struct usnic_vnic_res_chunk +*usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp, + enum usnic_vnic_res_type type); +static inline +struct usnic_ib_qp_grp *to_uqp_grp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct usnic_ib_qp_grp, ibqp); +} +#endif /* USNIC_IB_QP_GRP_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include + +#include +#include + +#include "usnic_common_util.h" +#include "usnic_ib.h" +#include "usnic_ib_qp_grp.h" +#include "usnic_vnic.h" +#include "usnic_ib_verbs.h" +#include "usnic_log.h" + +static ssize_t usnic_ib_show_fw_ver(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct usnic_ib_dev *us_ibdev = + container_of(device, struct usnic_ib_dev, ib_dev.dev); + struct ethtool_drvinfo info; + + mutex_lock(&us_ibdev->usdev_lock); + us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); + mutex_unlock(&us_ibdev->usdev_lock); + + return scnprintf(buf, PAGE_SIZE, "%s\n", info.fw_version); +} + +static ssize_t usnic_ib_show_board(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct usnic_ib_dev *us_ibdev = + container_of(device, struct usnic_ib_dev, ib_dev.dev); + unsigned short subsystem_device_id; + + mutex_lock(&us_ibdev->usdev_lock); + subsystem_device_id = us_ibdev->pdev->subsystem_device; + mutex_unlock(&us_ibdev->usdev_lock); + + return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id); +} + +/* + * Report the configuration for this PF + */ +static ssize_t +usnic_ib_show_config(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct usnic_ib_dev *us_ibdev; + char *ptr; + unsigned left; + unsigned n; + enum usnic_vnic_res_type res_type; + + us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); + + /* Buffer space limit is 1 page */ + ptr = buf; + left = PAGE_SIZE; + + mutex_lock(&us_ibdev->usdev_lock); + if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) { + char *busname; + + /* + * bus name seems to come with annoying prefix. + * Remove it if it is predictable + */ + busname = us_ibdev->pdev->bus->name; + if (strncmp(busname, "PCI Bus ", 8) == 0) + busname += 8; + + n = scnprintf(ptr, left, + "%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:", + us_ibdev->ib_dev.name, + busname, + PCI_SLOT(us_ibdev->pdev->devfn), + PCI_FUNC(us_ibdev->pdev->devfn), + netdev_name(us_ibdev->netdev), + us_ibdev->ufdev->mac, + atomic_read(&us_ibdev->vf_cnt.refcount)); + UPDATE_PTR_LEFT(n, ptr, left); + + for (res_type = USNIC_VNIC_RES_TYPE_EOL; + res_type < USNIC_VNIC_RES_TYPE_MAX; + res_type++) { + if (us_ibdev->vf_res_cnt[res_type] == 0) + continue; + n = scnprintf(ptr, left, " %d %s%s", + us_ibdev->vf_res_cnt[res_type], + usnic_vnic_res_type_to_str(res_type), + (res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ? + "," : ""); + UPDATE_PTR_LEFT(n, ptr, left); + } + n = scnprintf(ptr, left, "\n"); + UPDATE_PTR_LEFT(n, ptr, left); + } else { + n = scnprintf(ptr, left, "%s: no VFs\n", + us_ibdev->ib_dev.name); + UPDATE_PTR_LEFT(n, ptr, left); + } + mutex_unlock(&us_ibdev->usdev_lock); + + return ptr - buf; +} + +static ssize_t +usnic_ib_show_iface(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct usnic_ib_dev *us_ibdev; + + us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); + + return scnprintf(buf, PAGE_SIZE, "%s\n", + netdev_name(us_ibdev->netdev)); +} + +static ssize_t +usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct usnic_ib_dev *us_ibdev; + + us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); + + return scnprintf(buf, PAGE_SIZE, "%u\n", + atomic_read(&us_ibdev->vf_cnt.refcount)); +} + +static ssize_t +usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct usnic_ib_dev *us_ibdev; + int qp_per_vf; + + us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); + qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ], + us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]); + + return scnprintf(buf, PAGE_SIZE, + "%d\n", qp_per_vf); +} + +static ssize_t +usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct usnic_ib_dev *us_ibdev; + + us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]); +} + +static DEVICE_ATTR(fw_ver, S_IRUGO, usnic_ib_show_fw_ver, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL); +static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL); +static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL); +static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL); +static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL); +static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL); + +static struct device_attribute *usnic_class_attributes[] = { + &dev_attr_fw_ver, + &dev_attr_board_id, + &dev_attr_config, + &dev_attr_iface, + &dev_attr_max_vf, + &dev_attr_qp_per_vf, + &dev_attr_cq_per_vf, +}; + +struct qpn_attribute { + struct attribute attr; + ssize_t (*show)(struct usnic_ib_qp_grp *, char *buf); +}; + +/* + * Definitions for supporting QPN entries in sysfs + */ +static ssize_t +usnic_ib_qpn_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct usnic_ib_qp_grp *qp_grp; + struct qpn_attribute *qpn_attr; + + qp_grp = container_of(kobj, struct usnic_ib_qp_grp, kobj); + qpn_attr = container_of(attr, struct qpn_attribute, attr); + + return qpn_attr->show(qp_grp, buf); +} + +static const struct sysfs_ops usnic_ib_qpn_sysfs_ops = { + .show = usnic_ib_qpn_attr_show +}; + +#define QPN_ATTR_RO(NAME) \ +struct qpn_attribute qpn_attr_##NAME = __ATTR_RO(NAME) + +static ssize_t context_show(struct usnic_ib_qp_grp *qp_grp, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "0x%p\n", qp_grp->ctx); +} + +static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf) +{ + int i, j, n; + int left; + char *ptr; + struct usnic_vnic_res_chunk *res_chunk; + struct usnic_vnic_res *vnic_res; + + left = PAGE_SIZE; + ptr = buf; + + n = scnprintf(ptr, left, + "QPN: %d State: (%s) PID: %u VF Idx: %hu ", + qp_grp->ibqp.qp_num, + usnic_ib_qp_grp_state_to_string(qp_grp->state), + qp_grp->owner_pid, + usnic_vnic_get_index(qp_grp->vf->vnic)); + UPDATE_PTR_LEFT(n, ptr, left); + + for (i = 0; qp_grp->res_chunk_list[i]; i++) { + res_chunk = qp_grp->res_chunk_list[i]; + for (j = 0; j < res_chunk->cnt; j++) { + vnic_res = res_chunk->res[j]; + n = scnprintf(ptr, left, "%s[%d] ", + usnic_vnic_res_type_to_str(vnic_res->type), + vnic_res->vnic_idx); + UPDATE_PTR_LEFT(n, ptr, left); + } + } + + n = scnprintf(ptr, left, "\n"); + UPDATE_PTR_LEFT(n, ptr, left); + + return ptr - buf; +} + +static QPN_ATTR_RO(context); +static QPN_ATTR_RO(summary); + +static struct attribute *usnic_ib_qpn_default_attrs[] = { + &qpn_attr_context.attr, + &qpn_attr_summary.attr, + NULL +}; + +static struct kobj_type usnic_ib_qpn_type = { + .sysfs_ops = &usnic_ib_qpn_sysfs_ops, + .default_attrs = usnic_ib_qpn_default_attrs +}; + +int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev) +{ + int i; + int err; + for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) { + err = device_create_file(&us_ibdev->ib_dev.dev, + usnic_class_attributes[i]); + if (err) { + usnic_err("Failed to create device file %d for %s eith err %d", + i, us_ibdev->ib_dev.name, err); + return -EINVAL; + } + } + + /* create kernel object for looking at individual QPs */ + kobject_get(&us_ibdev->ib_dev.dev.kobj); + us_ibdev->qpn_kobj = kobject_create_and_add("qpn", + &us_ibdev->ib_dev.dev.kobj); + if (us_ibdev->qpn_kobj == NULL) { + kobject_put(&us_ibdev->ib_dev.dev.kobj); + return -ENOMEM; + } + + return 0; +} + +void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev) +{ + int i; + for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) { + device_remove_file(&us_ibdev->ib_dev.dev, + usnic_class_attributes[i]); + } + + kobject_put(us_ibdev->qpn_kobj); +} + +void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp) +{ + struct usnic_ib_dev *us_ibdev; + int err; + + us_ibdev = qp_grp->vf->pf; + + err = kobject_init_and_add(&qp_grp->kobj, &usnic_ib_qpn_type, + kobject_get(us_ibdev->qpn_kobj), + "%d", qp_grp->grp_id); + if (err) { + kobject_put(us_ibdev->qpn_kobj); + return; + } +} + +void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp) +{ + struct usnic_ib_dev *us_ibdev; + + us_ibdev = qp_grp->vf->pf; + + kobject_put(&qp_grp->kobj); + kobject_put(us_ibdev->qpn_kobj); +} --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_IB_SYSFS_H_ +#define USNIC_IB_SYSFS_H_ + +#include "usnic_ib.h" + +int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev); +void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev); +void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp); +void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp); + +#endif /* !USNIC_IB_SYSFS_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -0,0 +1,765 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#include +#include +#include +#include + +#include +#include + +#include "usnic_abi.h" +#include "usnic_ib.h" +#include "usnic_common_util.h" +#include "usnic_ib_qp_grp.h" +#include "usnic_fwd.h" +#include "usnic_log.h" +#include "usnic_uiom.h" +#include "usnic_transport.h" + +#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM + +static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver) +{ + *fw_ver = (u64) *fw_ver_str; +} + +static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp, + struct ib_udata *udata) +{ + struct usnic_ib_dev *us_ibdev; + struct usnic_ib_create_qp_resp resp; + struct pci_dev *pdev; + struct vnic_dev_bar *bar; + struct usnic_vnic_res_chunk *chunk; + struct usnic_ib_qp_grp_flow *default_flow; + int i, err; + + memset(&resp, 0, sizeof(resp)); + + us_ibdev = qp_grp->vf->pf; + pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic); + if (!pdev) { + usnic_err("Failed to get pdev of qp_grp %d\n", + qp_grp->grp_id); + return -EFAULT; + } + + bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0); + if (!bar) { + usnic_err("Failed to get bar0 of qp_grp %d vf %s", + qp_grp->grp_id, pci_name(pdev)); + return -EFAULT; + } + + resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic); + resp.bar_bus_addr = bar->bus_addr; + resp.bar_len = bar->len; + + chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); + if (IS_ERR_OR_NULL(chunk)) { + usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", + usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), + qp_grp->grp_id, + PTR_ERR(chunk)); + return chunk ? PTR_ERR(chunk) : -ENOMEM; + } + + WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ); + resp.rq_cnt = chunk->cnt; + for (i = 0; i < chunk->cnt; i++) + resp.rq_idx[i] = chunk->res[i]->vnic_idx; + + chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ); + if (IS_ERR_OR_NULL(chunk)) { + usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", + usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ), + qp_grp->grp_id, + PTR_ERR(chunk)); + return chunk ? PTR_ERR(chunk) : -ENOMEM; + } + + WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ); + resp.wq_cnt = chunk->cnt; + for (i = 0; i < chunk->cnt; i++) + resp.wq_idx[i] = chunk->res[i]->vnic_idx; + + chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ); + if (IS_ERR_OR_NULL(chunk)) { + usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", + usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ), + qp_grp->grp_id, + PTR_ERR(chunk)); + return chunk ? PTR_ERR(chunk) : -ENOMEM; + } + + WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ); + resp.cq_cnt = chunk->cnt; + for (i = 0; i < chunk->cnt; i++) + resp.cq_idx[i] = chunk->res[i]->vnic_idx; + + default_flow = list_first_entry(&qp_grp->flows_lst, + struct usnic_ib_qp_grp_flow, link); + resp.transport = default_flow->trans_type; + + err = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (err) { + usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name); + return err; + } + + return 0; +} + +static struct usnic_ib_qp_grp* +find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, + struct usnic_ib_pd *pd, + struct usnic_transport_spec *trans_spec, + struct usnic_vnic_res_spec *res_spec) +{ + struct usnic_ib_vf *vf; + struct usnic_vnic *vnic; + struct usnic_ib_qp_grp *qp_grp; + struct device *dev, **dev_list; + int i, found = 0; + + BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); + + if (list_empty(&us_ibdev->vf_dev_list)) { + usnic_info("No vfs to allocate\n"); + return NULL; + } + + if (usnic_ib_share_vf) { + /* Try to find resouces on a used vf which is in pd */ + dev_list = usnic_uiom_get_dev_list(pd->umem_pd); + for (i = 0; dev_list[i]; i++) { + dev = dev_list[i]; + vf = pci_get_drvdata(to_pci_dev(dev)); + spin_lock(&vf->lock); + vnic = vf->vnic; + if (!usnic_vnic_check_room(vnic, res_spec)) { + usnic_dbg("Found used vnic %s from %s\n", + us_ibdev->ib_dev.name, + pci_name(usnic_vnic_get_pdev( + vnic))); + found = 1; + break; + } + spin_unlock(&vf->lock); + + } + usnic_uiom_free_dev_list(dev_list); + } + + if (!found) { + /* Try to find resources on an unused vf */ + list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) { + spin_lock(&vf->lock); + vnic = vf->vnic; + if (vf->qp_grp_ref_cnt == 0 && + usnic_vnic_check_room(vnic, res_spec) == 0) { + found = 1; + break; + } + spin_unlock(&vf->lock); + } + } + + if (!found) { + usnic_info("No free qp grp found on %s\n", + us_ibdev->ib_dev.name); + return ERR_PTR(-ENOMEM); + } + + qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec, + trans_spec); + spin_unlock(&vf->lock); + if (IS_ERR_OR_NULL(qp_grp)) { + usnic_err("Failed to allocate qp_grp\n"); + return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); + } + + return qp_grp; +} + +static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) +{ + struct usnic_ib_vf *vf = qp_grp->vf; + + WARN_ON(qp_grp->state != IB_QPS_RESET); + + spin_lock(&vf->lock); + usnic_ib_qp_grp_destroy(qp_grp); + spin_unlock(&vf->lock); +} + +static void eth_speed_to_ib_speed(int speed, u8 *active_speed, + u8 *active_width) +{ + if (speed <= 10000) { + *active_width = IB_WIDTH_1X; + *active_speed = IB_SPEED_FDR10; + } else if (speed <= 20000) { + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_DDR; + } else if (speed <= 30000) { + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_QDR; + } else if (speed <= 40000) { + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_FDR10; + } else { + *active_width = IB_WIDTH_4X; + *active_speed = IB_SPEED_EDR; + } +} + +static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd) +{ + if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN || + cmd.spec.trans_type >= USNIC_TRANSPORT_MAX) + return -EINVAL; + + return 0; +} + +/* Start of ib callback functions */ + +enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, + u8 port_num) +{ + return IB_LINK_LAYER_ETHERNET; +} + +int usnic_ib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); + union ib_gid gid; + struct ethtool_drvinfo info; + struct ethtool_cmd cmd; + int qp_per_vf; + + usnic_dbg("\n"); + mutex_lock(&us_ibdev->usdev_lock); + us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); + us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); + memset(props, 0, sizeof(*props)); + usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, + &gid.raw[0]); + memcpy(&props->sys_image_guid, &gid.global.interface_id, + sizeof(gid.global.interface_id)); + usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver); + props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE; + props->page_size_cap = USNIC_UIOM_PAGE_SIZE; + props->vendor_id = PCI_VENDOR_ID_CISCO; + props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC; + props->hw_ver = us_ibdev->pdev->subsystem_device; + qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ], + us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]); + props->max_qp = qp_per_vf * + atomic_read(&us_ibdev->vf_cnt.refcount); + props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; + props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] * + atomic_read(&us_ibdev->vf_cnt.refcount); + props->max_pd = USNIC_UIOM_MAX_PD_CNT; + props->max_mr = USNIC_UIOM_MAX_MR_CNT; + props->local_ca_ack_delay = 0; + props->max_pkeys = 0; + props->atomic_cap = IB_ATOMIC_NONE; + props->masked_atomic_cap = props->atomic_cap; + props->max_qp_rd_atom = 0; + props->max_qp_init_rd_atom = 0; + props->max_res_rd_atom = 0; + props->max_srq = 0; + props->max_srq_wr = 0; + props->max_srq_sge = 0; + props->max_fast_reg_page_list_len = 0; + props->max_mcast_grp = 0; + props->max_mcast_qp_attach = 0; + props->max_total_mcast_qp_attach = 0; + props->max_map_per_fmr = 0; + /* Owned by Userspace + * max_qp_wr, max_sge, max_sge_rd, max_cqe */ + mutex_unlock(&us_ibdev->usdev_lock); + + return 0; +} + +int usnic_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); + struct ethtool_cmd cmd; + + usnic_dbg("\n"); + + mutex_lock(&us_ibdev->usdev_lock); + us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); + memset(props, 0, sizeof(*props)); + + props->lid = 0; + props->lmc = 1; + props->sm_lid = 0; + props->sm_sl = 0; + + if (!us_ibdev->ufdev->link_up) { + props->state = IB_PORT_DOWN; + props->phys_state = 3; + } else if (!us_ibdev->ufdev->inaddr) { + props->state = IB_PORT_INIT; + props->phys_state = 4; + } else { + props->state = IB_PORT_ACTIVE; + props->phys_state = 5; + } + + props->port_cap_flags = 0; + props->gid_tbl_len = 1; + props->pkey_tbl_len = 1; + props->bad_pkey_cntr = 0; + props->qkey_viol_cntr = 0; + eth_speed_to_ib_speed(cmd.speed, &props->active_speed, + &props->active_width); + props->max_mtu = IB_MTU_4096; + props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu); + /* Userspace will adjust for hdrs */ + props->max_msg_sz = us_ibdev->ufdev->mtu; + props->max_vl_num = 1; + mutex_unlock(&us_ibdev->usdev_lock); + + return 0; +} + +int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + struct usnic_ib_qp_grp *qp_grp; + struct usnic_ib_vf *vf; + int err; + + usnic_dbg("\n"); + + memset(qp_attr, 0, sizeof(*qp_attr)); + memset(qp_init_attr, 0, sizeof(*qp_init_attr)); + + qp_grp = to_uqp_grp(qp); + vf = qp_grp->vf; + mutex_lock(&vf->pf->usdev_lock); + usnic_dbg("\n"); + qp_attr->qp_state = qp_grp->state; + qp_attr->cur_qp_state = qp_grp->state; + + switch (qp_grp->ibqp.qp_type) { + case IB_QPT_UD: + qp_attr->qkey = 0; + break; + default: + usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type); + err = -EINVAL; + goto err_out; + } + + mutex_unlock(&vf->pf->usdev_lock); + return 0; + +err_out: + mutex_unlock(&vf->pf->usdev_lock); + return err; +} + +int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + + struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); + usnic_dbg("\n"); + + if (index > 1) + return -EINVAL; + + mutex_lock(&us_ibdev->usdev_lock); + memset(&(gid->raw[0]), 0, sizeof(gid->raw)); + usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, + &gid->raw[0]); + mutex_unlock(&us_ibdev->usdev_lock); + + return 0; +} + +int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + if (index > 1) + return -EINVAL; + + *pkey = 0xffff; + return 0; +} + +struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct usnic_ib_pd *pd; + void *umem_pd; + + usnic_dbg("\n"); + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); + if (IS_ERR_OR_NULL(umem_pd)) { + kfree(pd); + return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM); + } + + usnic_info("domain 0x%p allocated for context 0x%p and device %s\n", + pd, context, ibdev->name); + return &pd->ibpd; +} + +int usnic_ib_dealloc_pd(struct ib_pd *pd) +{ + usnic_info("freeing domain 0x%p\n", pd); + + usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); + kfree(pd); + return 0; +} + +struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + int err; + struct usnic_ib_dev *us_ibdev; + struct usnic_ib_qp_grp *qp_grp; + struct usnic_ib_ucontext *ucontext; + int cq_cnt; + struct usnic_vnic_res_spec res_spec; + struct usnic_ib_create_qp_cmd cmd; + struct usnic_transport_spec trans_spec; + + usnic_dbg("\n"); + + ucontext = to_uucontext(pd->uobject->context); + us_ibdev = to_usdev(pd->device); + + err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); + if (err) { + usnic_err("%s: cannot copy udata for create_qp\n", + us_ibdev->ib_dev.name); + return ERR_PTR(-EINVAL); + } + + err = create_qp_validate_user_data(cmd); + if (err) { + usnic_err("%s: Failed to validate user data\n", + us_ibdev->ib_dev.name); + return ERR_PTR(-EINVAL); + } + + if (init_attr->qp_type != IB_QPT_UD) { + usnic_err("%s asked to make a non-UD QP: %d\n", + us_ibdev->ib_dev.name, init_attr->qp_type); + return ERR_PTR(-EINVAL); + } + + trans_spec = cmd.spec; + mutex_lock(&us_ibdev->usdev_lock); + cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; + res_spec = min_transport_spec[trans_spec.trans_type]; + usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt); + qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd), + &trans_spec, + &res_spec); + if (IS_ERR_OR_NULL(qp_grp)) { + err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM; + goto out_release_mutex; + } + + err = usnic_ib_fill_create_qp_resp(qp_grp, udata); + if (err) { + err = -EBUSY; + goto out_release_qp_grp; + } + + qp_grp->ctx = ucontext; + list_add_tail(&qp_grp->link, &ucontext->qp_grp_list); + usnic_ib_log_vf(qp_grp->vf); + mutex_unlock(&us_ibdev->usdev_lock); + return &qp_grp->ibqp; + +out_release_qp_grp: + qp_grp_destroy(qp_grp); +out_release_mutex: + mutex_unlock(&us_ibdev->usdev_lock); + return ERR_PTR(err); +} + +int usnic_ib_destroy_qp(struct ib_qp *qp) +{ + struct usnic_ib_qp_grp *qp_grp; + struct usnic_ib_vf *vf; + + usnic_dbg("\n"); + + qp_grp = to_uqp_grp(qp); + vf = qp_grp->vf; + mutex_lock(&vf->pf->usdev_lock); + if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) { + usnic_err("Failed to move qp grp %u to reset\n", + qp_grp->grp_id); + } + + list_del(&qp_grp->link); + qp_grp_destroy(qp_grp); + mutex_unlock(&vf->pf->usdev_lock); + + return 0; +} + +int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct usnic_ib_qp_grp *qp_grp; + int status; + usnic_dbg("\n"); + + qp_grp = to_uqp_grp(ibqp); + + /* TODO: Future Support All States */ + mutex_lock(&qp_grp->vf->pf->usdev_lock); + if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) { + status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL); + } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) { + status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL); + } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) { + status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL); + } else { + usnic_err("Unexpected combination mask: %u state: %u\n", + attr_mask & IB_QP_STATE, attr->qp_state); + status = -EINVAL; + } + + mutex_unlock(&qp_grp->vf->pf->usdev_lock); + return status; +} + +struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries, + int vector, struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct ib_cq *cq; + + usnic_dbg("\n"); + cq = kzalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) + return ERR_PTR(-EBUSY); + + return cq; +} + +int usnic_ib_destroy_cq(struct ib_cq *cq) +{ + usnic_dbg("\n"); + kfree(cq); + return 0; +} + +struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct usnic_ib_mr *mr; + int err; + + usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start, + virt_addr, length); + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (IS_ERR_OR_NULL(mr)) + return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM); + + mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, + access_flags, 0); + if (IS_ERR_OR_NULL(mr->umem)) { + err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; + goto err_free; + } + + mr->ibmr.lkey = mr->ibmr.rkey = 0; + return &mr->ibmr; + +err_free: + kfree(mr); + return ERR_PTR(err); +} + +int usnic_ib_dereg_mr(struct ib_mr *ibmr) +{ + struct usnic_ib_mr *mr = to_umr(ibmr); + + usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); + + usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); + kfree(mr); + return 0; +} + +struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct usnic_ib_ucontext *context; + struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); + usnic_dbg("\n"); + + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&context->qp_grp_list); + mutex_lock(&us_ibdev->usdev_lock); + list_add_tail(&context->link, &us_ibdev->ctx_list); + mutex_unlock(&us_ibdev->usdev_lock); + + return &context->ibucontext; +} + +int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +{ + struct usnic_ib_ucontext *context = to_uucontext(ibcontext); + struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device); + usnic_dbg("\n"); + + mutex_lock(&us_ibdev->usdev_lock); + BUG_ON(!list_empty(&context->qp_grp_list)); + list_del(&context->link); + mutex_unlock(&us_ibdev->usdev_lock); + kfree(context); + return 0; +} + +int usnic_ib_mmap(struct ib_ucontext *context, + struct vm_area_struct *vma) +{ + struct usnic_ib_ucontext *uctx = to_ucontext(context); + struct usnic_ib_dev *us_ibdev; + struct usnic_ib_qp_grp *qp_grp; + struct usnic_ib_vf *vf; + struct vnic_dev_bar *bar; + dma_addr_t bus_addr; + unsigned int len; + unsigned int vfid; + + usnic_dbg("\n"); + + us_ibdev = to_usdev(context->device); + vma->vm_flags |= VM_IO; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vfid = vma->vm_pgoff; + usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n", + vma->vm_pgoff, PAGE_SHIFT, vfid); + + mutex_lock(&us_ibdev->usdev_lock); + list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) { + vf = qp_grp->vf; + if (usnic_vnic_get_index(vf->vnic) == vfid) { + bar = usnic_vnic_get_bar(vf->vnic, 0); + if ((vma->vm_end - vma->vm_start) != bar->len) { + usnic_err("Bar0 Len %lu - Request map %lu\n", + bar->len, + vma->vm_end - vma->vm_start); + mutex_unlock(&us_ibdev->usdev_lock); + return -EINVAL; + } + bus_addr = bar->bus_addr; + len = bar->len; + usnic_dbg("bus: %pa vaddr: %p size: %ld\n", + &bus_addr, bar->vaddr, bar->len); + mutex_unlock(&us_ibdev->usdev_lock); + + return remap_pfn_range(vma, + vma->vm_start, + bus_addr >> PAGE_SHIFT, + len, vma->vm_page_prot); + } + } + + mutex_unlock(&us_ibdev->usdev_lock); + usnic_err("No VF %u found\n", vfid); + return -EINVAL; +} + +/* In ib callbacks section - Start of stub funcs */ +struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, + struct ib_ah_attr *ah_attr) +{ + usnic_dbg("\n"); + return ERR_PTR(-EPERM); +} + +int usnic_ib_destroy_ah(struct ib_ah *ah) +{ + usnic_dbg("\n"); + return -EINVAL; +} + +int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + usnic_dbg("\n"); + return -EINVAL; +} + +int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + usnic_dbg("\n"); + return -EINVAL; +} + +int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries, + struct ib_wc *wc) +{ + usnic_dbg("\n"); + return -EINVAL; +} + +int usnic_ib_req_notify_cq(struct ib_cq *cq, + enum ib_cq_notify_flags flags) +{ + usnic_dbg("\n"); + return -EINVAL; +} + +struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc) +{ + usnic_dbg("\n"); + return ERR_PTR(-ENOMEM); +} + + +/* In ib callbacks section - End of stub funcs */ +/* End of ib callbacks section */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_IB_VERBS_H_ +#define USNIC_IB_VERBS_H_ + +#include "usnic_ib.h" + +enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, + u8 port_num); +int usnic_ib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props); +int usnic_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); +int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); +int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid); +int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey); +struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata); +int usnic_ib_dealloc_pd(struct ib_pd *pd); +struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); +int usnic_ib_destroy_qp(struct ib_qp *qp); +int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries, + int vector, struct ib_ucontext *context, + struct ib_udata *udata); +int usnic_ib_destroy_cq(struct ib_cq *cq); +struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +int usnic_ib_dereg_mr(struct ib_mr *ibmr); +struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata); +int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); +int usnic_ib_mmap(struct ib_ucontext *context, + struct vm_area_struct *vma); +struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, + struct ib_ah_attr *ah_attr); +int usnic_ib_destroy_ah(struct ib_ah *ah); +int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries, + struct ib_wc *wc); +int usnic_ib_req_notify_cq(struct ib_cq *cq, + enum ib_cq_notify_flags flags); +struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc); +#endif /* !USNIC_IB_VERBS_H */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_log.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_log.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_LOG_H_ +#define USNIC_LOG_H_ + +#include "usnic.h" + +extern unsigned int usnic_log_lvl; + +#define USNIC_LOG_LVL_NONE (0) +#define USNIC_LOG_LVL_ERR (1) +#define USNIC_LOG_LVL_INFO (2) +#define USNIC_LOG_LVL_DBG (3) + +#define usnic_printk(lvl, args...) \ + do { \ + printk(lvl "%s:%s:%d: ", DRV_NAME, __func__, \ + __LINE__); \ + printk(args); \ + } while (0) + +#define usnic_dbg(args...) \ + do { \ + if (unlikely(usnic_log_lvl >= USNIC_LOG_LVL_DBG)) { \ + usnic_printk(KERN_INFO, args); \ + } \ +} while (0) + +#define usnic_info(args...) \ +do { \ + if (usnic_log_lvl >= USNIC_LOG_LVL_INFO) { \ + usnic_printk(KERN_INFO, args); \ + } \ +} while (0) + +#define usnic_err(args...) \ + do { \ + if (usnic_log_lvl >= USNIC_LOG_LVL_ERR) { \ + usnic_printk(KERN_ERR, args); \ + } \ + } while (0) +#endif /* !USNIC_LOG_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_transport.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_transport.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#include +#include +#include +#include +#include + +#include "usnic_transport.h" +#include "usnic_log.h" + +/* ROCE */ +static unsigned long *roce_bitmap; +static u16 roce_next_port = 1; +#define ROCE_BITMAP_SZ ((1 << (8 /*CHAR_BIT*/ * sizeof(u16)))/8 /*CHAR BIT*/) +static DEFINE_SPINLOCK(roce_bitmap_lock); + +const char *usnic_transport_to_str(enum usnic_transport_type type) +{ + switch (type) { + case USNIC_TRANSPORT_UNKNOWN: + return "Unknown"; + case USNIC_TRANSPORT_ROCE_CUSTOM: + return "roce custom"; + case USNIC_TRANSPORT_IPV4_UDP: + return "IPv4 UDP"; + case USNIC_TRANSPORT_MAX: + return "Max?"; + default: + return "Not known"; + } +} + +int usnic_transport_sock_to_str(char *buf, int buf_sz, + struct socket *sock) +{ + int err; + uint32_t addr; + uint16_t port; + int proto; + + memset(buf, 0, buf_sz); + err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port); + if (err) + return 0; + + return scnprintf(buf, buf_sz, "Proto:%u Addr:%pI4h Port:%hu", + proto, &addr, port); +} + +/* + * reserve a port number. if "0" specified, we will try to pick one + * starting at roce_next_port. roce_next_port will take on the values + * 1..4096 + */ +u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num) +{ + if (type == USNIC_TRANSPORT_ROCE_CUSTOM) { + spin_lock(&roce_bitmap_lock); + if (!port_num) { + port_num = bitmap_find_next_zero_area(roce_bitmap, + ROCE_BITMAP_SZ, + roce_next_port /* start */, + 1 /* nr */, + 0 /* align */); + roce_next_port = (port_num & 4095) + 1; + } else if (test_bit(port_num, roce_bitmap)) { + usnic_err("Failed to allocate port for %s\n", + usnic_transport_to_str(type)); + spin_unlock(&roce_bitmap_lock); + goto out_fail; + } + bitmap_set(roce_bitmap, port_num, 1); + spin_unlock(&roce_bitmap_lock); + } else { + usnic_err("Failed to allocate port - transport %s unsupported\n", + usnic_transport_to_str(type)); + goto out_fail; + } + + usnic_dbg("Allocating port %hu for %s\n", port_num, + usnic_transport_to_str(type)); + return port_num; + +out_fail: + return 0; +} + +void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num) +{ + if (type == USNIC_TRANSPORT_ROCE_CUSTOM) { + spin_lock(&roce_bitmap_lock); + if (!port_num) { + usnic_err("Unreserved unvalid port num 0 for %s\n", + usnic_transport_to_str(type)); + goto out_roce_custom; + } + + if (!test_bit(port_num, roce_bitmap)) { + usnic_err("Unreserving invalid %hu for %s\n", + port_num, + usnic_transport_to_str(type)); + goto out_roce_custom; + } + bitmap_clear(roce_bitmap, port_num, 1); + usnic_dbg("Freeing port %hu for %s\n", port_num, + usnic_transport_to_str(type)); +out_roce_custom: + spin_unlock(&roce_bitmap_lock); + } else { + usnic_err("Freeing invalid port %hu for %d\n", port_num, type); + } +} + +struct socket *usnic_transport_get_socket(int sock_fd) +{ + struct socket *sock; + int err; + char buf[25]; + + /* sockfd_lookup will internally do a fget */ + sock = sockfd_lookup(sock_fd, &err); + if (!sock) { + usnic_err("Unable to lookup socket for fd %d with err %d\n", + sock_fd, err); + return ERR_PTR(-ENOENT); + } + + usnic_transport_sock_to_str(buf, sizeof(buf), sock); + usnic_dbg("Get sock %s\n", buf); + + return sock; +} + +void usnic_transport_put_socket(struct socket *sock) +{ + char buf[100]; + + usnic_transport_sock_to_str(buf, sizeof(buf), sock); + usnic_dbg("Put sock %s\n", buf); + sockfd_put(sock); +} + +int usnic_transport_sock_get_addr(struct socket *sock, int *proto, + uint32_t *addr, uint16_t *port) +{ + int len; + int err; + struct sockaddr_in sock_addr; + + err = sock->ops->getname(sock, + (struct sockaddr *)&sock_addr, + &len, 0); + if (err) + return err; + + if (sock_addr.sin_family != AF_INET) + return -EINVAL; + + if (proto) + *proto = sock->sk->sk_protocol; + if (port) + *port = ntohs(((struct sockaddr_in *)&sock_addr)->sin_port); + if (addr) + *addr = ntohl(((struct sockaddr_in *) + &sock_addr)->sin_addr.s_addr); + + return 0; +} + +int usnic_transport_init(void) +{ + roce_bitmap = kzalloc(ROCE_BITMAP_SZ, GFP_KERNEL); + if (!roce_bitmap) { + usnic_err("Failed to allocate bit map"); + return -ENOMEM; + } + + /* Do not ever allocate bit 0, hence set it here */ + bitmap_set(roce_bitmap, 0, 1); + return 0; +} + +void usnic_transport_fini(void) +{ + kfree(roce_bitmap); +} --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_transport.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_transport.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_TRANSPORT_H_ +#define USNIC_TRANSPORT_H_ + +#include "usnic_abi.h" + +const char *usnic_transport_to_str(enum usnic_transport_type trans_type); +/* + * Returns number of bytes written, excluding null terminator. If + * nothing was written, the function returns 0. + */ +int usnic_transport_sock_to_str(char *buf, int buf_sz, + struct socket *sock); +/* + * Reserve a port. If "port_num" is set, then the function will try + * to reserve that particular port. + */ +u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num); +void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num); +/* + * Do a fget on the socket refered to by sock_fd and returns the socket. + * Socket will not be destroyed before usnic_transport_put_socket has + * been called. + */ +struct socket *usnic_transport_get_socket(int sock_fd); +void usnic_transport_put_socket(struct socket *sock); +/* + * Call usnic_transport_get_socket before calling *_sock_get_addr + */ +int usnic_transport_sock_get_addr(struct socket *sock, int *proto, + uint32_t *addr, uint16_t *port); +int usnic_transport_init(void); +void usnic_transport_fini(void); +#endif /* !USNIC_TRANSPORT_H */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_uiom.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -0,0 +1,604 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2013 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "usnic_log.h" +#include "usnic_uiom.h" +#include "usnic_uiom_interval_tree.h" + +static struct workqueue_struct *usnic_uiom_wq; + +#define USNIC_UIOM_PAGE_CHUNK \ + ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ + ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ + (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) + +static void usnic_uiom_reg_account(struct work_struct *work) +{ + struct usnic_uiom_reg *umem = container_of(work, + struct usnic_uiom_reg, work); + + down_write(&umem->mm->mmap_sem); + umem->mm->locked_vm -= umem->diff; + up_write(&umem->mm->mmap_sem); + mmput(umem->mm); + kfree(umem); +} + +static int usnic_uiom_dma_fault(struct iommu_domain *domain, + struct device *dev, + unsigned long iova, int flags, + void *token) +{ + usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", + dev_name(dev), + domain, iova, flags); + return -ENOSYS; +} + +static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty) +{ + struct usnic_uiom_chunk *chunk, *tmp; + struct page *page; + struct scatterlist *sg; + int i; + dma_addr_t pa; + + list_for_each_entry_safe(chunk, tmp, chunk_list, list) { + for_each_sg(chunk->page_list, sg, chunk->nents, i) { + page = sg_page(sg); + pa = sg_phys(sg); + if (dirty) + set_page_dirty_lock(page); + put_page(page); + usnic_dbg("pa: %pa\n", &pa); + } + kfree(chunk); + } +} + +static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, + int dmasync, struct list_head *chunk_list) +{ + struct page **page_list; + struct scatterlist *sg; + struct usnic_uiom_chunk *chunk; + unsigned long locked; + unsigned long lock_limit; + unsigned long cur_base; + unsigned long npages; + int ret; + int off; + int i; + int flags; + dma_addr_t pa; + DEFINE_DMA_ATTRS(attrs); + + if (dmasync) + dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); + + if (!can_do_mlock()) + return -EPERM; + + INIT_LIST_HEAD(chunk_list); + + page_list = (struct page **) __get_free_page(GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; + + down_write(¤t->mm->mmap_sem); + + locked = npages + current->mm->locked_vm; + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { + ret = -ENOMEM; + goto out; + } + + flags = IOMMU_READ | IOMMU_CACHE; + flags |= (writable) ? IOMMU_WRITE : 0; + cur_base = addr & PAGE_MASK; + ret = 0; + + while (npages) { + ret = get_user_pages(current, current->mm, cur_base, + min_t(unsigned long, npages, + PAGE_SIZE / sizeof(struct page *)), + 1, !writable, page_list, NULL); + + if (ret < 0) + goto out; + + npages -= ret; + off = 0; + + while (ret) { + chunk = kmalloc(sizeof(*chunk) + + sizeof(struct scatterlist) * + min_t(int, ret, USNIC_UIOM_PAGE_CHUNK), + GFP_KERNEL); + if (!chunk) { + ret = -ENOMEM; + goto out; + } + + chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK); + sg_init_table(chunk->page_list, chunk->nents); + for_each_sg(chunk->page_list, sg, chunk->nents, i) { + sg_set_page(sg, page_list[i + off], + PAGE_SIZE, 0); + pa = sg_phys(sg); + usnic_dbg("va: 0x%lx pa: %pa\n", + cur_base + i*PAGE_SIZE, &pa); + } + cur_base += chunk->nents * PAGE_SIZE; + ret -= chunk->nents; + off += chunk->nents; + list_add_tail(&chunk->list, chunk_list); + } + + ret = 0; + } + +out: + if (ret < 0) + usnic_uiom_put_pages(chunk_list, 0); + else + current->mm->locked_vm = locked; + + up_write(¤t->mm->mmap_sem); + free_page((unsigned long) page_list); + return ret; +} + +static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals, + struct usnic_uiom_pd *pd) +{ + struct usnic_uiom_interval_node *interval, *tmp; + long unsigned va, size; + + list_for_each_entry_safe(interval, tmp, intervals, link) { + va = interval->start << PAGE_SHIFT; + size = ((interval->last - interval->start) + 1) << PAGE_SHIFT; + while (size > 0) { + /* Workaround for RH 970401 */ + usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE); + iommu_unmap(pd->domain, va, PAGE_SIZE); + va += PAGE_SIZE; + size -= PAGE_SIZE; + } + } +} + +static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, + struct usnic_uiom_reg *uiomr, + int dirty) +{ + int npages; + unsigned long vpn_start, vpn_last; + struct usnic_uiom_interval_node *interval, *tmp; + int writable = 0; + LIST_HEAD(rm_intervals); + + npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; + vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; + vpn_last = vpn_start + npages - 1; + + spin_lock(&pd->lock); + usnic_uiom_remove_interval(&pd->rb_root, vpn_start, + vpn_last, &rm_intervals); + usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); + + list_for_each_entry_safe(interval, tmp, &rm_intervals, link) { + if (interval->flags & IOMMU_WRITE) + writable = 1; + list_del(&interval->link); + kfree(interval); + } + + usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable); + spin_unlock(&pd->lock); +} + +static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, + struct usnic_uiom_reg *uiomr) +{ + int i, err; + size_t size; + struct usnic_uiom_chunk *chunk; + struct usnic_uiom_interval_node *interval_node; + dma_addr_t pa; + dma_addr_t pa_start = 0; + dma_addr_t pa_end = 0; + long int va_start = -EINVAL; + struct usnic_uiom_pd *pd = uiomr->pd; + long int va = uiomr->va & PAGE_MASK; + int flags = IOMMU_READ | IOMMU_CACHE; + + flags |= (uiomr->writable) ? IOMMU_WRITE : 0; + chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk, + list); + list_for_each_entry(interval_node, intervals, link) { +iter_chunk: + for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { + pa = sg_phys(&chunk->page_list[i]); + if ((va >> PAGE_SHIFT) < interval_node->start) + continue; + + if ((va >> PAGE_SHIFT) == interval_node->start) { + /* First page of the interval */ + va_start = va; + pa_start = pa; + pa_end = pa; + } + + WARN_ON(va_start == -EINVAL); + + if ((pa_end + PAGE_SIZE != pa) && + (pa != pa_start)) { + /* PAs are not contiguous */ + size = pa_end - pa_start + PAGE_SIZE; + usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", + va_start, &pa_start, size, flags); + err = iommu_map(pd->domain, va_start, pa_start, + size, flags); + if (err) { + usnic_err("Failed to map va 0x%lx pa 0x%pa size 0x%zx with err %d\n", + va_start, &pa_start, size, err); + goto err_out; + } + va_start = va; + pa_start = pa; + pa_end = pa; + } + + if ((va >> PAGE_SHIFT) == interval_node->last) { + /* Last page of the interval */ + size = pa - pa_start + PAGE_SIZE; + usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", + va_start, &pa_start, size, flags); + err = iommu_map(pd->domain, va_start, pa_start, + size, flags); + if (err) { + usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", + va_start, &pa_start, size, err); + goto err_out; + } + break; + } + + if (pa != pa_start) + pa_end += PAGE_SIZE; + } + + if (i == chunk->nents) { + /* + * Hit last entry of the chunk, + * hence advance to next chunk + */ + chunk = list_first_entry(&chunk->list, + struct usnic_uiom_chunk, + list); + goto iter_chunk; + } + } + + return 0; + +err_out: + usnic_uiom_unmap_sorted_intervals(intervals, pd); + return err; +} + +struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, + unsigned long addr, size_t size, + int writable, int dmasync) +{ + struct usnic_uiom_reg *uiomr; + unsigned long va_base, vpn_start, vpn_last; + unsigned long npages; + int offset, err; + LIST_HEAD(sorted_diff_intervals); + + /* + * Intel IOMMU map throws an error if a translation entry is + * changed from read to write. This module may not unmap + * and then remap the entry after fixing the permission + * b/c this open up a small windows where hw DMA may page fault + * Hence, make all entries to be writable. + */ + writable = 1; + + va_base = addr & PAGE_MASK; + offset = addr & ~PAGE_MASK; + npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; + vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT; + vpn_last = vpn_start + npages - 1; + + uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL); + if (!uiomr) + return ERR_PTR(-ENOMEM); + + uiomr->va = va_base; + uiomr->offset = offset; + uiomr->length = size; + uiomr->writable = writable; + uiomr->pd = pd; + + err = usnic_uiom_get_pages(addr, size, writable, dmasync, + &uiomr->chunk_list); + if (err) { + usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n", + vpn_start, vpn_last, err); + goto out_free_uiomr; + } + + spin_lock(&pd->lock); + err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last, + (writable) ? IOMMU_WRITE : 0, + IOMMU_WRITE, + &pd->rb_root, + &sorted_diff_intervals); + if (err) { + usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n", + vpn_start, vpn_last, err); + goto out_put_pages; + } + + err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr); + if (err) { + usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n", + vpn_start, vpn_last, err); + goto out_put_intervals; + + } + + err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, + (writable) ? IOMMU_WRITE : 0); + if (err) { + usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n", + vpn_start, vpn_last, err); + goto out_unmap_intervals; + } + + usnic_uiom_put_interval_set(&sorted_diff_intervals); + spin_unlock(&pd->lock); + + return uiomr; + +out_unmap_intervals: + usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); +out_put_intervals: + usnic_uiom_put_interval_set(&sorted_diff_intervals); +out_put_pages: + usnic_uiom_put_pages(&uiomr->chunk_list, 0); + spin_unlock(&pd->lock); +out_free_uiomr: + kfree(uiomr); + return ERR_PTR(err); +} + +void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing) +{ + struct mm_struct *mm; + unsigned long diff; + + __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); + + mm = get_task_mm(current); + if (!mm) { + kfree(uiomr); + return; + } + + diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; + + /* + * We may be called with the mm's mmap_sem already held. This + * can happen when a userspace munmap() is the call that drops + * the last reference to our file and calls our release + * method. If there are memory regions to destroy, we'll end + * up here and not be able to take the mmap_sem. In that case + * we defer the vm_locked accounting to the system workqueue. + */ + if (closing) { + if (!down_write_trylock(&mm->mmap_sem)) { + INIT_WORK(&uiomr->work, usnic_uiom_reg_account); + uiomr->mm = mm; + uiomr->diff = diff; + + queue_work(usnic_uiom_wq, &uiomr->work); + return; + } + } else + down_write(&mm->mmap_sem); + + current->mm->locked_vm -= diff; + up_write(&mm->mmap_sem); + mmput(mm); + kfree(uiomr); +} + +struct usnic_uiom_pd *usnic_uiom_alloc_pd(void) +{ + struct usnic_uiom_pd *pd; + void *domain; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + pd->domain = domain = iommu_domain_alloc(&pci_bus_type); + if (IS_ERR_OR_NULL(domain)) { + usnic_err("Failed to allocate IOMMU domain with err %ld\n", + PTR_ERR(pd->domain)); + kfree(pd); + return ERR_PTR(domain ? PTR_ERR(domain) : -ENOMEM); + } + + iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); + + spin_lock_init(&pd->lock); + INIT_LIST_HEAD(&pd->devs); + + return pd; +} + +void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) +{ + iommu_domain_free(pd->domain); + kfree(pd); +} + +int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) +{ + struct usnic_uiom_dev *uiom_dev; + int err; + + uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC); + if (!uiom_dev) + return -ENOMEM; + uiom_dev->dev = dev; + + err = iommu_attach_device(pd->domain, dev); + if (err) + goto out_free_dev; + + if (!iommu_domain_has_cap(pd->domain, IOMMU_CAP_CACHE_COHERENCY)) { + usnic_err("IOMMU of %s does not support cache coherency\n", + dev_name(dev)); + err = -EINVAL; + goto out_detach_device; + } + + spin_lock(&pd->lock); + list_add_tail(&uiom_dev->link, &pd->devs); + pd->dev_cnt++; + spin_unlock(&pd->lock); + + return 0; + +out_detach_device: + iommu_detach_device(pd->domain, dev); +out_free_dev: + kfree(uiom_dev); + return err; +} + +void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) +{ + struct usnic_uiom_dev *uiom_dev; + int found = 0; + + spin_lock(&pd->lock); + list_for_each_entry(uiom_dev, &pd->devs, link) { + if (uiom_dev->dev == dev) { + found = 1; + break; + } + } + + if (!found) { + usnic_err("Unable to free dev %s - not found\n", + dev_name(dev)); + spin_unlock(&pd->lock); + return; + } + + list_del(&uiom_dev->link); + pd->dev_cnt--; + spin_unlock(&pd->lock); + + return iommu_detach_device(pd->domain, dev); +} + +struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) +{ + struct usnic_uiom_dev *uiom_dev; + struct device **devs; + int i = 0; + + spin_lock(&pd->lock); + devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); + if (!devs) { + devs = ERR_PTR(-ENOMEM); + goto out; + } + + list_for_each_entry(uiom_dev, &pd->devs, link) { + devs[i++] = uiom_dev->dev; + } +out: + spin_unlock(&pd->lock); + return devs; +} + +void usnic_uiom_free_dev_list(struct device **devs) +{ + kfree(devs); +} + +int usnic_uiom_init(char *drv_name) +{ + if (!iommu_present(&pci_bus_type)) { + usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n"); + return -EPERM; + } + + usnic_uiom_wq = create_workqueue(drv_name); + if (!usnic_uiom_wq) { + usnic_err("Unable to alloc wq for drv %s\n", drv_name); + return -ENOMEM; + } + + return 0; +} + +void usnic_uiom_fini(void) +{ + flush_workqueue(usnic_uiom_wq); + destroy_workqueue(usnic_uiom_wq); +} --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_uiom.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_uiom.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_UIOM_H_ +#define USNIC_UIOM_H_ + +#include +#include + +#include "usnic_uiom_interval_tree.h" + +#define USNIC_UIOM_READ (1) +#define USNIC_UIOM_WRITE (2) + +#define USNIC_UIOM_MAX_PD_CNT (1000) +#define USNIC_UIOM_MAX_MR_CNT (1000000) +#define USNIC_UIOM_MAX_MR_SIZE (~0UL) +#define USNIC_UIOM_PAGE_SIZE (PAGE_SIZE) + +struct usnic_uiom_dev { + struct device *dev; + struct list_head link; +}; + +struct usnic_uiom_pd { + struct iommu_domain *domain; + spinlock_t lock; + struct rb_root rb_root; + struct list_head devs; + int dev_cnt; +}; + +struct usnic_uiom_reg { + struct usnic_uiom_pd *pd; + unsigned long va; + size_t length; + int offset; + int page_size; + int writable; + struct list_head chunk_list; + struct work_struct work; + struct mm_struct *mm; + unsigned long diff; +}; + +struct usnic_uiom_chunk { + struct list_head list; + int nents; + struct scatterlist page_list[0]; +}; + +struct usnic_uiom_pd *usnic_uiom_alloc_pd(void); +void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd); +int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev); +void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, + struct device *dev); +struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd); +void usnic_uiom_free_dev_list(struct device **devs); +struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, + unsigned long addr, size_t size, + int access, int dmasync); +void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing); +int usnic_uiom_init(char *drv_name); +void usnic_uiom_fini(void); +#endif /* USNIC_UIOM_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c @@ -0,0 +1,236 @@ +#include +#include +#include +#include + +#include +#include "usnic_uiom_interval_tree.h" + +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) + +#define MAKE_NODE(node, start, end, ref_cnt, flags, err, err_out) \ + do { \ + node = usnic_uiom_interval_node_alloc(start, \ + end, ref_cnt, flags); \ + if (!node) { \ + err = -ENOMEM; \ + goto err_out; \ + } \ + } while (0) + +#define MARK_FOR_ADD(node, list) (list_add_tail(&node->link, list)) + +#define MAKE_NODE_AND_APPEND(node, start, end, ref_cnt, flags, err, \ + err_out, list) \ + do { \ + MAKE_NODE(node, start, end, \ + ref_cnt, flags, err, \ + err_out); \ + MARK_FOR_ADD(node, list); \ + } while (0) + +#define FLAGS_EQUAL(flags1, flags2, mask) \ + (((flags1) & (mask)) == ((flags2) & (mask))) + +static struct usnic_uiom_interval_node* +usnic_uiom_interval_node_alloc(long int start, long int last, int ref_cnt, + int flags) +{ + struct usnic_uiom_interval_node *interval = kzalloc(sizeof(*interval), + GFP_ATOMIC); + if (!interval) + return NULL; + + interval->start = start; + interval->last = last; + interval->flags = flags; + interval->ref_cnt = ref_cnt; + + return interval; +} + +static int interval_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct usnic_uiom_interval_node *node_a, *node_b; + + node_a = list_entry(a, struct usnic_uiom_interval_node, link); + node_b = list_entry(b, struct usnic_uiom_interval_node, link); + + /* long to int */ + if (node_a->start < node_b->start) + return -1; + else if (node_a->start > node_b->start) + return 1; + + return 0; +} + +static void +find_intervals_intersection_sorted(struct rb_root *root, unsigned long start, + unsigned long last, + struct list_head *list) +{ + struct usnic_uiom_interval_node *node; + + INIT_LIST_HEAD(list); + + for (node = usnic_uiom_interval_tree_iter_first(root, start, last); + node; + node = usnic_uiom_interval_tree_iter_next(node, start, last)) + list_add_tail(&node->link, list); + + list_sort(NULL, list, interval_cmp); +} + +int usnic_uiom_get_intervals_diff(unsigned long start, unsigned long last, + int flags, int flag_mask, + struct rb_root *root, + struct list_head *diff_set) +{ + struct usnic_uiom_interval_node *interval, *tmp; + int err = 0; + long int pivot = start; + LIST_HEAD(intersection_set); + + INIT_LIST_HEAD(diff_set); + + find_intervals_intersection_sorted(root, start, last, + &intersection_set); + + list_for_each_entry(interval, &intersection_set, link) { + if (pivot < interval->start) { + MAKE_NODE_AND_APPEND(tmp, pivot, interval->start - 1, + 1, flags, err, err_out, + diff_set); + pivot = interval->start; + } + + /* + * Invariant: Set [start, pivot] is either in diff_set or root, + * but not in both. + */ + + if (pivot > interval->last) { + continue; + } else if (pivot <= interval->last && + FLAGS_EQUAL(interval->flags, flags, + flag_mask)) { + pivot = interval->last + 1; + } + } + + if (pivot <= last) + MAKE_NODE_AND_APPEND(tmp, pivot, last, 1, flags, err, err_out, + diff_set); + + return 0; + +err_out: + list_for_each_entry_safe(interval, tmp, diff_set, link) { + list_del(&interval->link); + kfree(interval); + } + + return err; +} + +void usnic_uiom_put_interval_set(struct list_head *intervals) +{ + struct usnic_uiom_interval_node *interval, *tmp; + list_for_each_entry_safe(interval, tmp, intervals, link) + kfree(interval); +} + +int usnic_uiom_insert_interval(struct rb_root *root, unsigned long start, + unsigned long last, int flags) +{ + struct usnic_uiom_interval_node *interval, *tmp; + unsigned long istart, ilast; + int iref_cnt, iflags; + unsigned long lpivot = start; + int err = 0; + LIST_HEAD(to_add); + LIST_HEAD(intersection_set); + + find_intervals_intersection_sorted(root, start, last, + &intersection_set); + + list_for_each_entry(interval, &intersection_set, link) { + /* + * Invariant - lpivot is the left edge of next interval to be + * inserted + */ + istart = interval->start; + ilast = interval->last; + iref_cnt = interval->ref_cnt; + iflags = interval->flags; + + if (istart < lpivot) { + MAKE_NODE_AND_APPEND(tmp, istart, lpivot - 1, iref_cnt, + iflags, err, err_out, &to_add); + } else if (istart > lpivot) { + MAKE_NODE_AND_APPEND(tmp, lpivot, istart - 1, 1, flags, + err, err_out, &to_add); + lpivot = istart; + } else { + lpivot = istart; + } + + if (ilast > last) { + MAKE_NODE_AND_APPEND(tmp, lpivot, last, iref_cnt + 1, + iflags | flags, err, err_out, + &to_add); + MAKE_NODE_AND_APPEND(tmp, last + 1, ilast, iref_cnt, + iflags, err, err_out, &to_add); + } else { + MAKE_NODE_AND_APPEND(tmp, lpivot, ilast, iref_cnt + 1, + iflags | flags, err, err_out, + &to_add); + } + + lpivot = ilast + 1; + } + + if (lpivot <= last) + MAKE_NODE_AND_APPEND(tmp, lpivot, last, 1, flags, err, err_out, + &to_add); + + list_for_each_entry_safe(interval, tmp, &intersection_set, link) { + usnic_uiom_interval_tree_remove(interval, root); + kfree(interval); + } + + list_for_each_entry(interval, &to_add, link) + usnic_uiom_interval_tree_insert(interval, root); + + return 0; + +err_out: + list_for_each_entry_safe(interval, tmp, &to_add, link) + kfree(interval); + + return err; +} + +void usnic_uiom_remove_interval(struct rb_root *root, unsigned long start, + unsigned long last, struct list_head *removed) +{ + struct usnic_uiom_interval_node *interval; + + for (interval = usnic_uiom_interval_tree_iter_first(root, start, last); + interval; + interval = usnic_uiom_interval_tree_iter_next(interval, + start, + last)) { + if (--interval->ref_cnt == 0) + list_add_tail(&interval->link, removed); + } + + list_for_each_entry(interval, removed, link) + usnic_uiom_interval_tree_remove(interval, root); +} + +INTERVAL_TREE_DEFINE(struct usnic_uiom_interval_node, rb, + unsigned long, __subtree_last, + START, LAST, , usnic_uiom_interval_tree) --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_UIOM_INTERVAL_TREE_H_ +#define USNIC_UIOM_INTERVAL_TREE_H_ + +#include + +struct usnic_uiom_interval_node { + struct rb_node rb; + struct list_head link; + unsigned long start; + unsigned long last; + unsigned long __subtree_last; + unsigned int ref_cnt; + int flags; +}; + +extern void +usnic_uiom_interval_tree_insert(struct usnic_uiom_interval_node *node, + struct rb_root *root); +extern void +usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node, + struct rb_root *root); +extern struct usnic_uiom_interval_node * +usnic_uiom_interval_tree_iter_first(struct rb_root *root, + unsigned long start, + unsigned long last); +extern struct usnic_uiom_interval_node * +usnic_uiom_interval_tree_iter_next(struct usnic_uiom_interval_node *node, + unsigned long start, unsigned long last); +/* + * Inserts {start...last} into {root}. If there are overlaps, + * nodes will be broken up and merged + */ +int usnic_uiom_insert_interval(struct rb_root *root, + unsigned long start, unsigned long last, + int flags); +/* + * Removed {start...last} from {root}. The nodes removed are returned in + * 'removed.' The caller is responsibile for freeing memory of nodes in + * 'removed.' + */ +void usnic_uiom_remove_interval(struct rb_root *root, + unsigned long start, unsigned long last, + struct list_head *removed); +/* + * Returns {start...last} - {root} (relative complement of {start...last} in + * {root}) in diff_set sorted ascendingly + */ +int usnic_uiom_get_intervals_diff(unsigned long start, + unsigned long last, int flags, + int flag_mask, + struct rb_root *root, + struct list_head *diff_set); +/* Call this to free diff_set returned by usnic_uiom_get_intervals_diff */ +void usnic_uiom_put_interval_set(struct list_head *intervals); +#endif /* USNIC_UIOM_INTERVAL_TREE_H_ */ --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_vnic.c +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_vnic.c @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#include +#include +#include + +#include "usnic_ib.h" +#include "vnic_resource.h" +#include "usnic_log.h" +#include "usnic_vnic.h" + +struct usnic_vnic { + struct vnic_dev *vdev; + struct vnic_dev_bar bar[PCI_NUM_RESOURCES]; + struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; + spinlock_t res_lock; +}; + +static enum vnic_res_type _to_vnic_res_type(enum usnic_vnic_res_type res_type) +{ +#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \ + vnic_res_type, +#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \ + vnic_res_type, + static enum vnic_res_type usnic_vnic_type_2_vnic_type[] = { + USNIC_VNIC_RES_TYPES}; +#undef DEFINE_USNIC_VNIC_RES +#undef DEFINE_USNIC_VNIC_RES_AT + + if (res_type >= USNIC_VNIC_RES_TYPE_MAX) + return RES_TYPE_MAX; + + return usnic_vnic_type_2_vnic_type[res_type]; +} + +const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type) +{ +#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \ + desc, +#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \ + desc, + static const char * const usnic_vnic_res_type_desc[] = { + USNIC_VNIC_RES_TYPES}; +#undef DEFINE_USNIC_VNIC_RES +#undef DEFINE_USNIC_VNIC_RES_AT + + if (res_type >= USNIC_VNIC_RES_TYPE_MAX) + return "unknown"; + + return usnic_vnic_res_type_desc[res_type]; + +} + +const char *usnic_vnic_pci_name(struct usnic_vnic *vnic) +{ + return pci_name(usnic_vnic_get_pdev(vnic)); +} + +int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf, + int buf_sz, + void *hdr_obj, + int (*printtitle)(void *, char*, int), + int (*printcols)(char *, int), + int (*printrow)(void *, char *, int)) +{ + struct usnic_vnic_res_chunk *chunk; + struct usnic_vnic_res *res; + struct vnic_dev_bar *bar0; + int i, j, offset; + + offset = 0; + bar0 = usnic_vnic_get_bar(vnic, 0); + offset += scnprintf(buf + offset, buf_sz - offset, + "VF:%hu BAR0 bus_addr=%pa vaddr=0x%p size=%ld ", + usnic_vnic_get_index(vnic), + &bar0->bus_addr, + bar0->vaddr, bar0->len); + if (printtitle) + offset += printtitle(hdr_obj, buf + offset, buf_sz - offset); + offset += scnprintf(buf + offset, buf_sz - offset, "\n"); + offset += scnprintf(buf + offset, buf_sz - offset, + "|RES\t|CTRL_PIN\t\t|IN_USE\t"); + if (printcols) + offset += printcols(buf + offset, buf_sz - offset); + offset += scnprintf(buf + offset, buf_sz - offset, "\n"); + + spin_lock(&vnic->res_lock); + for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { + chunk = &vnic->chunks[i]; + for (j = 0; j < chunk->cnt; j++) { + res = chunk->res[j]; + offset += scnprintf(buf + offset, buf_sz - offset, + "|%s[%u]\t|0x%p\t|%u\t", + usnic_vnic_res_type_to_str(res->type), + res->vnic_idx, res->ctrl, !!res->owner); + if (printrow) { + offset += printrow(res->owner, buf + offset, + buf_sz - offset); + } + offset += scnprintf(buf + offset, buf_sz - offset, + "\n"); + } + } + spin_unlock(&vnic->res_lock); + return offset; +} + +void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec, + enum usnic_vnic_res_type trgt_type, + u16 cnt) +{ + int i; + + for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) { + if (spec->resources[i].type == trgt_type) { + spec->resources[i].cnt = cnt; + return; + } + } + + WARN_ON(1); +} + +int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec, + struct usnic_vnic_res_spec *res_spec) +{ + int found, i, j; + + for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) { + found = 0; + + for (j = 0; j < USNIC_VNIC_RES_TYPE_MAX; j++) { + if (res_spec->resources[i].type != + min_spec->resources[i].type) + continue; + found = 1; + if (min_spec->resources[i].cnt > + res_spec->resources[i].cnt) + return -EINVAL; + break; + } + + if (!found) + return -EINVAL; + } + return 0; +} + +int usnic_vnic_spec_dump(char *buf, int buf_sz, + struct usnic_vnic_res_spec *res_spec) +{ + enum usnic_vnic_res_type res_type; + int res_cnt; + int i; + int offset = 0; + + for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) { + res_type = res_spec->resources[i].type; + res_cnt = res_spec->resources[i].cnt; + offset += scnprintf(buf + offset, buf_sz - offset, + "Res: %s Cnt: %d ", + usnic_vnic_res_type_to_str(res_type), + res_cnt); + } + + return offset; +} + +int usnic_vnic_check_room(struct usnic_vnic *vnic, + struct usnic_vnic_res_spec *res_spec) +{ + int i; + enum usnic_vnic_res_type res_type; + int res_cnt; + + for (i = 0; i < USNIC_VNIC_RES_TYPE_MAX; i++) { + res_type = res_spec->resources[i].type; + res_cnt = res_spec->resources[i].cnt; + + if (res_type == USNIC_VNIC_RES_TYPE_EOL) + break; + + if (res_cnt > usnic_vnic_res_free_cnt(vnic, res_type)) + return -EBUSY; + } + + return 0; +} + +int usnic_vnic_res_cnt(struct usnic_vnic *vnic, + enum usnic_vnic_res_type type) +{ + return vnic->chunks[type].cnt; +} + +int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic, + enum usnic_vnic_res_type type) +{ + return vnic->chunks[type].free_cnt; +} + +struct usnic_vnic_res_chunk * +usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type, + int cnt, void *owner) +{ + struct usnic_vnic_res_chunk *src, *ret; + struct usnic_vnic_res *res; + int i; + + if (usnic_vnic_res_free_cnt(vnic, type) < cnt || cnt < 1 || !owner) + return ERR_PTR(-EINVAL); + + ret = kzalloc(sizeof(*ret), GFP_ATOMIC); + if (!ret) { + usnic_err("Failed to allocate chunk for %s - Out of memory\n", + usnic_vnic_pci_name(vnic)); + return ERR_PTR(-ENOMEM); + } + + ret->res = kzalloc(sizeof(*(ret->res))*cnt, GFP_ATOMIC); + if (!ret->res) { + usnic_err("Failed to allocate resources for %s. Out of memory\n", + usnic_vnic_pci_name(vnic)); + kfree(ret); + return ERR_PTR(-ENOMEM); + } + + spin_lock(&vnic->res_lock); + src = &vnic->chunks[type]; + for (i = 0; i < src->cnt && ret->cnt < cnt; i++) { + res = src->res[i]; + if (!res->owner) { + src->free_cnt--; + res->owner = owner; + ret->res[ret->cnt++] = res; + } + } + + spin_unlock(&vnic->res_lock); + ret->type = type; + ret->vnic = vnic; + WARN_ON(ret->cnt != cnt); + + return ret; +} + +void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk) +{ + + struct usnic_vnic_res *res; + int i; + struct usnic_vnic *vnic = chunk->vnic; + + spin_lock(&vnic->res_lock); + while ((i = --chunk->cnt) >= 0) { + res = chunk->res[i]; + chunk->res[i] = NULL; + res->owner = NULL; + vnic->chunks[res->type].free_cnt++; + } + spin_unlock(&vnic->res_lock); + + kfree(chunk->res); + kfree(chunk); +} + +u16 usnic_vnic_get_index(struct usnic_vnic *vnic) +{ + return usnic_vnic_get_pdev(vnic)->devfn - 1; +} + +static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic, + enum usnic_vnic_res_type type, + struct usnic_vnic_res_chunk *chunk) +{ + int cnt, err, i; + struct usnic_vnic_res *res; + + cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type)); + if (cnt < 1) + return -EINVAL; + + chunk->cnt = chunk->free_cnt = cnt; + chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL); + if (!chunk->res) + return -ENOMEM; + + for (i = 0; i < cnt; i++) { + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) { + err = -ENOMEM; + goto fail; + } + res->type = type; + res->vnic_idx = i; + res->vnic = vnic; + res->ctrl = vnic_dev_get_res(vnic->vdev, + _to_vnic_res_type(type), i); + chunk->res[i] = res; + } + + chunk->vnic = vnic; + return 0; +fail: + for (i--; i >= 0; i--) + kfree(chunk->res[i]); + kfree(chunk->res); + return err; +} + +static void usnic_vnic_free_res_chunk(struct usnic_vnic_res_chunk *chunk) +{ + int i; + for (i = 0; i < chunk->cnt; i++) + kfree(chunk->res[i]); + kfree(chunk->res); +} + +static int usnic_vnic_discover_resources(struct pci_dev *pdev, + struct usnic_vnic *vnic) +{ + enum usnic_vnic_res_type res_type; + int i; + int err = 0; + + for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) { + if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) + continue; + vnic->bar[i].len = pci_resource_len(pdev, i); + vnic->bar[i].vaddr = pci_iomap(pdev, i, vnic->bar[i].len); + if (!vnic->bar[i].vaddr) { + usnic_err("Cannot memory-map BAR %d, aborting\n", + i); + err = -ENODEV; + goto out_clean_bar; + } + vnic->bar[i].bus_addr = pci_resource_start(pdev, i); + } + + vnic->vdev = vnic_dev_register(NULL, pdev, pdev, vnic->bar, + ARRAY_SIZE(vnic->bar)); + if (!vnic->vdev) { + usnic_err("Failed to register device %s\n", + pci_name(pdev)); + err = -EINVAL; + goto out_clean_bar; + } + + for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1; + res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) { + err = usnic_vnic_alloc_res_chunk(vnic, res_type, + &vnic->chunks[res_type]); + if (err) { + usnic_err("Failed to alloc res %s with err %d\n", + usnic_vnic_res_type_to_str(res_type), + err); + goto out_clean_chunks; + } + } + + return 0; + +out_clean_chunks: + for (res_type--; res_type > USNIC_VNIC_RES_TYPE_EOL; res_type--) + usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); + vnic_dev_unregister(vnic->vdev); +out_clean_bar: + for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) { + if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) + continue; + if (!vnic->bar[i].vaddr) + break; + + iounmap(vnic->bar[i].vaddr); + } + + return err; +} + +struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic) +{ + return vnic_dev_get_pdev(vnic->vdev); +} + +struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic, + int bar_num) +{ + return (bar_num < ARRAY_SIZE(vnic->bar)) ? &vnic->bar[bar_num] : NULL; +} + +static void usnic_vnic_release_resources(struct usnic_vnic *vnic) +{ + int i; + struct pci_dev *pdev; + enum usnic_vnic_res_type res_type; + + pdev = usnic_vnic_get_pdev(vnic); + + for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1; + res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) + usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); + + vnic_dev_unregister(vnic->vdev); + + for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) { + if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) + continue; + iounmap(vnic->bar[i].vaddr); + } +} + +struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev) +{ + struct usnic_vnic *vnic; + int err = 0; + + if (!pci_is_enabled(pdev)) { + usnic_err("PCI dev %s is disabled\n", pci_name(pdev)); + return ERR_PTR(-EINVAL); + } + + vnic = kzalloc(sizeof(*vnic), GFP_KERNEL); + if (!vnic) { + usnic_err("Failed to alloc vnic for %s - out of memory\n", + pci_name(pdev)); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&vnic->res_lock); + + err = usnic_vnic_discover_resources(pdev, vnic); + if (err) { + usnic_err("Failed to discover %s resources with err %d\n", + pci_name(pdev), err); + goto out_free_vnic; + } + + usnic_dbg("Allocated vnic for %s\n", usnic_vnic_pci_name(vnic)); + + return vnic; + +out_free_vnic: + kfree(vnic); + + return ERR_PTR(err); +} + +void usnic_vnic_free(struct usnic_vnic *vnic) +{ + usnic_vnic_release_resources(vnic); + kfree(vnic); +} --- linux-3.13.0.orig/drivers/infiniband/hw/usnic/usnic_vnic.h +++ linux-3.13.0/drivers/infiniband/hw/usnic/usnic_vnic.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef USNIC_VNIC_H_ +#define USNIC_VNIC_H_ + +#include + +#include "vnic_dev.h" + +/* =USNIC_VNIC_RES_TYPE= =VNIC_RES= =DESC= */ +#define USNIC_VNIC_RES_TYPES \ + DEFINE_USNIC_VNIC_RES_AT(EOL, RES_TYPE_EOL, "EOL", 0) \ + DEFINE_USNIC_VNIC_RES(WQ, RES_TYPE_WQ, "WQ") \ + DEFINE_USNIC_VNIC_RES(RQ, RES_TYPE_RQ, "RQ") \ + DEFINE_USNIC_VNIC_RES(CQ, RES_TYPE_CQ, "CQ") \ + DEFINE_USNIC_VNIC_RES(INTR, RES_TYPE_INTR_CTRL, "INT") \ + DEFINE_USNIC_VNIC_RES(MAX, RES_TYPE_MAX, "MAX")\ + +#define DEFINE_USNIC_VNIC_RES_AT(usnic_vnic_res_t, vnic_res_type, desc, val) \ + USNIC_VNIC_RES_TYPE_##usnic_vnic_res_t = val, +#define DEFINE_USNIC_VNIC_RES(usnic_vnic_res_t, vnic_res_type, desc) \ + USNIC_VNIC_RES_TYPE_##usnic_vnic_res_t, +enum usnic_vnic_res_type { + USNIC_VNIC_RES_TYPES +}; +#undef DEFINE_USNIC_VNIC_RES +#undef DEFINE_USNIC_VNIC_RES_AT + +struct usnic_vnic_res { + enum usnic_vnic_res_type type; + unsigned int vnic_idx; + struct usnic_vnic *vnic; + void __iomem *ctrl; + void *owner; +}; + +struct usnic_vnic_res_chunk { + enum usnic_vnic_res_type type; + int cnt; + int free_cnt; + struct usnic_vnic_res **res; + struct usnic_vnic *vnic; +}; + +struct usnic_vnic_res_desc { + enum usnic_vnic_res_type type; + uint16_t cnt; +}; + +struct usnic_vnic_res_spec { + struct usnic_vnic_res_desc resources[USNIC_VNIC_RES_TYPE_MAX]; +}; + +const char *usnic_vnic_res_type_to_str(enum usnic_vnic_res_type res_type); +const char *usnic_vnic_pci_name(struct usnic_vnic *vnic); +int usnic_vnic_dump(struct usnic_vnic *vnic, char *buf, int buf_sz, + void *hdr_obj, + int (*printtitle)(void *, char*, int), + int (*printcols)(char *, int), + int (*printrow)(void *, char *, int)); +void usnic_vnic_res_spec_update(struct usnic_vnic_res_spec *spec, + enum usnic_vnic_res_type trgt_type, + u16 cnt); +int usnic_vnic_res_spec_satisfied(const struct usnic_vnic_res_spec *min_spec, + struct usnic_vnic_res_spec *res_spec); +int usnic_vnic_spec_dump(char *buf, int buf_sz, + struct usnic_vnic_res_spec *res_spec); +int usnic_vnic_check_room(struct usnic_vnic *vnic, + struct usnic_vnic_res_spec *res_spec); +int usnic_vnic_res_cnt(struct usnic_vnic *vnic, + enum usnic_vnic_res_type type); +int usnic_vnic_res_free_cnt(struct usnic_vnic *vnic, + enum usnic_vnic_res_type type); +struct usnic_vnic_res_chunk * +usnic_vnic_get_resources(struct usnic_vnic *vnic, + enum usnic_vnic_res_type type, + int cnt, + void *owner); +void usnic_vnic_put_resources(struct usnic_vnic_res_chunk *chunk); +struct pci_dev *usnic_vnic_get_pdev(struct usnic_vnic *vnic); +struct vnic_dev_bar *usnic_vnic_get_bar(struct usnic_vnic *vnic, + int bar_num); +struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev); +void usnic_vnic_free(struct usnic_vnic *vnic); +u16 usnic_vnic_get_index(struct usnic_vnic *vnic); + +#endif /*!USNIC_VNIC_H_*/ --- linux-3.13.0.orig/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ linux-3.13.0/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -104,6 +104,8 @@ ipoib_dbg(priv, "bringing up interface\n"); + netif_carrier_off(dev); + set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); if (ipoib_pkey_dev_delay_open(dev)) @@ -1366,8 +1368,6 @@ memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); - netif_carrier_off(dev); - priv->dev = dev; spin_lock_init(&priv->lock); --- linux-3.13.0.orig/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ linux-3.13.0/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -192,6 +192,9 @@ if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK) init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; + if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING) + init_attr.create_flags |= IB_QP_CREATE_NETIF_QP; + if (dev->features & NETIF_F_SG) init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; --- linux-3.13.0.orig/drivers/infiniband/ulp/iser/iser_initiator.c +++ linux-3.13.0/drivers/infiniband/ulp/iser/iser_initiator.c @@ -610,11 +610,12 @@ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); kmem_cache_free(ig.desc_cache, tx_desc); + tx_desc = NULL; } atomic_dec(&ib_conn->post_send_buf_count); - if (tx_desc->type == ISCSI_TX_CONTROL) { + if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ task = (void *) ((long)(void *)tx_desc - sizeof(struct iscsi_task)); --- linux-3.13.0.orig/drivers/infiniband/ulp/iser/iser_verbs.c +++ linux-3.13.0/drivers/infiniband/ulp/iser/iser_verbs.c @@ -652,9 +652,13 @@ /* getting here when the state is UP means that the conn is being * * terminated asynchronously from the iSCSI layer's perspective. */ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, - ISER_CONN_TERMINATING)) - iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, - ISCSI_ERR_CONN_FAILED); + ISER_CONN_TERMINATING)){ + if (ib_conn->iser_conn) + iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, + ISCSI_ERR_CONN_FAILED); + else + iser_err("iscsi_iser connection isn't bound\n"); + } /* Complete the termination process if no posts are pending */ if (ib_conn->post_recv_buf_count == 0 && --- linux-3.13.0.orig/drivers/infiniband/ulp/isert/ib_isert.c +++ linux-3.13.0/drivers/infiniband/ulp/isert/ib_isert.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "isert_proto.h" #include "ib_isert.h" @@ -40,6 +41,7 @@ static LIST_HEAD(device_list); static struct workqueue_struct *isert_rx_wq; static struct workqueue_struct *isert_comp_wq; +static struct workqueue_struct *isert_release_wq; static void isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); @@ -47,10 +49,15 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr); static void -isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); +isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); static int -isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, - struct isert_rdma_wr *wr); +isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct isert_rdma_wr *wr); +static int +isert_rdma_post_recvl(struct isert_conn *isert_conn); +static int +isert_rdma_accept(struct isert_conn *isert_conn); +struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); static void isert_qp_event_callback(struct ib_event *e, void *context) @@ -111,9 +118,12 @@ attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; /* * FIXME: Use devattr.max_sge - 2 for max_send_sge as - * work-around for RDMA_READ.. + * work-around for RDMA_READs with ConnectX-2. + * + * Also, still make sure to have at least two SGEs for + * outgoing control PDU responses. */ - attr.cap.max_send_sge = device->dev_attr.max_sge - 2; + attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); isert_conn->max_sge = attr.cap.max_send_sge; attr.cap.max_recv_sge = 1; @@ -128,12 +138,18 @@ ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); if (ret) { pr_err("rdma_create_qp failed for cma_id %d\n", ret); - return ret; + goto err; } isert_conn->conn_qp = cma_id->qp; pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); return 0; +err: + mutex_lock(&device_list_mutex); + device->cq_active_qps[min_index]--; + mutex_unlock(&device_list_mutex); + + return ret; } static void @@ -219,19 +235,23 @@ struct isert_cq_desc *cq_desc; struct ib_device_attr *dev_attr; int ret = 0, i, j; + int max_rx_cqe, max_tx_cqe; dev_attr = &device->dev_attr; ret = isert_query_device(ib_dev, dev_attr); if (ret) return ret; + max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); + max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); + /* asign function handlers */ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { - device->use_frwr = 1; - device->reg_rdma_mem = isert_reg_rdma_frwr; - device->unreg_rdma_mem = isert_unreg_rdma_frwr; + device->use_fastreg = 1; + device->reg_rdma_mem = isert_reg_rdma; + device->unreg_rdma_mem = isert_unreg_rdma; } else { - device->use_frwr = 0; + device->use_fastreg = 0; device->reg_rdma_mem = isert_map_rdma; device->unreg_rdma_mem = isert_unmap_cmd; } @@ -239,9 +259,10 @@ device->cqs_used = min_t(int, num_online_cpus(), device->ib_device->num_comp_vectors); device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); - pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n", + pr_debug("Using %d CQs, device %s supports %d vectors support " + "Fast registration %d\n", device->cqs_used, device->ib_device->name, - device->ib_device->num_comp_vectors, device->use_frwr); + device->ib_device->num_comp_vectors, device->use_fastreg); device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * device->cqs_used, GFP_KERNEL); if (!device->cq_desc) { @@ -250,13 +271,6 @@ } cq_desc = device->cq_desc; - device->dev_pd = ib_alloc_pd(ib_dev); - if (IS_ERR(device->dev_pd)) { - ret = PTR_ERR(device->dev_pd); - pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret); - goto out_cq_desc; - } - for (i = 0; i < device->cqs_used; i++) { cq_desc[i].device = device; cq_desc[i].cq_index = i; @@ -266,7 +280,7 @@ isert_cq_rx_callback, isert_cq_event_callback, (void *)&cq_desc[i], - ISER_MAX_RX_CQ_LEN, i); + max_rx_cqe, i); if (IS_ERR(device->dev_rx_cq[i])) { ret = PTR_ERR(device->dev_rx_cq[i]); device->dev_rx_cq[i] = NULL; @@ -278,7 +292,7 @@ isert_cq_tx_callback, isert_cq_event_callback, (void *)&cq_desc[i], - ISER_MAX_TX_CQ_LEN, i); + max_tx_cqe, i); if (IS_ERR(device->dev_tx_cq[i])) { ret = PTR_ERR(device->dev_tx_cq[i]); device->dev_tx_cq[i] = NULL; @@ -294,13 +308,6 @@ goto out_cq; } - device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(device->dev_mr)) { - ret = PTR_ERR(device->dev_mr); - pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret); - goto out_cq; - } - return 0; out_cq: @@ -316,9 +323,6 @@ ib_destroy_cq(device->dev_tx_cq[j]); } } - ib_dealloc_pd(device->dev_pd); - -out_cq_desc: kfree(device->cq_desc); return ret; @@ -341,8 +345,6 @@ device->dev_tx_cq[i] = NULL; } - ib_dereg_mr(device->dev_mr); - ib_dealloc_pd(device->dev_pd); kfree(device->cq_desc); } @@ -398,18 +400,18 @@ } static void -isert_conn_free_frwr_pool(struct isert_conn *isert_conn) +isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) { struct fast_reg_descriptor *fr_desc, *tmp; int i = 0; - if (list_empty(&isert_conn->conn_frwr_pool)) + if (list_empty(&isert_conn->conn_fr_pool)) return; - pr_debug("Freeing conn %p frwr pool", isert_conn); + pr_debug("Freeing conn %p fastreg pool", isert_conn); list_for_each_entry_safe(fr_desc, tmp, - &isert_conn->conn_frwr_pool, list) { + &isert_conn->conn_fr_pool, list) { list_del(&fr_desc->list); ib_free_fast_reg_page_list(fr_desc->data_frpl); ib_dereg_mr(fr_desc->data_mr); @@ -417,20 +419,47 @@ ++i; } - if (i < isert_conn->conn_frwr_pool_size) + if (i < isert_conn->conn_fr_pool_size) pr_warn("Pool still has %d regions registered\n", - isert_conn->conn_frwr_pool_size - i); + isert_conn->conn_fr_pool_size - i); +} + +static int +isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, + struct fast_reg_descriptor *fr_desc) +{ + fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, + ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(fr_desc->data_frpl)) { + pr_err("Failed to allocate data frpl err=%ld\n", + PTR_ERR(fr_desc->data_frpl)); + return PTR_ERR(fr_desc->data_frpl); + } + + fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(fr_desc->data_mr)) { + pr_err("Failed to allocate data frmr err=%ld\n", + PTR_ERR(fr_desc->data_mr)); + ib_free_fast_reg_page_list(fr_desc->data_frpl); + return PTR_ERR(fr_desc->data_mr); + } + pr_debug("Create fr_desc %p page_list %p\n", + fr_desc, fr_desc->data_frpl->page_list); + + fr_desc->valid = true; + + return 0; } static int -isert_conn_create_frwr_pool(struct isert_conn *isert_conn) +isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) { struct fast_reg_descriptor *fr_desc; struct isert_device *device = isert_conn->conn_device; int i, ret; - INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); - isert_conn->conn_frwr_pool_size = 0; + INIT_LIST_HEAD(&isert_conn->conn_fr_pool); + isert_conn->conn_fr_pool_size = 0; for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); if (!fr_desc) { @@ -439,48 +468,34 @@ goto err; } - fr_desc->data_frpl = - ib_alloc_fast_reg_page_list(device->ib_device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(fr_desc->data_frpl)) { - pr_err("Failed to allocate fr_pg_list err=%ld\n", - PTR_ERR(fr_desc->data_frpl)); - ret = PTR_ERR(fr_desc->data_frpl); - goto err; - } - - fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(fr_desc->data_mr)) { - pr_err("Failed to allocate frmr err=%ld\n", - PTR_ERR(fr_desc->data_mr)); - ret = PTR_ERR(fr_desc->data_mr); - ib_free_fast_reg_page_list(fr_desc->data_frpl); + ret = isert_create_fr_desc(device->ib_device, + isert_conn->conn_pd, fr_desc); + if (ret) { + pr_err("Failed to create fastreg descriptor err=%d\n", + ret); + kfree(fr_desc); goto err; } - pr_debug("Create fr_desc %p page_list %p\n", - fr_desc, fr_desc->data_frpl->page_list); - fr_desc->valid = true; - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); - isert_conn->conn_frwr_pool_size++; + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); + isert_conn->conn_fr_pool_size++; } - pr_debug("Creating conn %p frwr pool size=%d", - isert_conn, isert_conn->conn_frwr_pool_size); + pr_debug("Creating conn %p fastreg pool size=%d", + isert_conn, isert_conn->conn_fr_pool_size); return 0; err: - isert_conn_free_frwr_pool(isert_conn); + isert_conn_free_fastreg_pool(isert_conn); return ret; } static int isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { - struct iscsi_np *np = cma_id->context; - struct isert_np *isert_np = np->np_context; + struct isert_np *isert_np = cma_id->context; + struct iscsi_np *np = isert_np->np; struct isert_conn *isert_conn; struct isert_device *device; struct ib_device *ib_dev = cma_id->device; @@ -497,15 +512,13 @@ isert_conn->state = ISER_CONN_INIT; INIT_LIST_HEAD(&isert_conn->conn_accept_node); init_completion(&isert_conn->conn_login_comp); - init_waitqueue_head(&isert_conn->conn_wait); - init_waitqueue_head(&isert_conn->conn_wait_comp_err); + init_completion(&isert_conn->login_req_comp); + init_completion(&isert_conn->conn_wait); + init_completion(&isert_conn->conn_wait_comp_err); kref_init(&isert_conn->conn_kref); - kref_get(&isert_conn->conn_kref); mutex_init(&isert_conn->conn_mutex); - mutex_init(&isert_conn->conn_comp_mutex); spin_lock_init(&isert_conn->conn_lock); - cma_id->context = isert_conn; isert_conn->conn_cm_id = cma_id; isert_conn->responder_resources = event->param.conn.responder_resources; isert_conn->initiator_depth = event->param.conn.initiator_depth; @@ -558,14 +571,29 @@ } isert_conn->conn_device = device; - isert_conn->conn_pd = device->dev_pd; - isert_conn->conn_mr = device->dev_mr; + isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); + if (IS_ERR(isert_conn->conn_pd)) { + ret = PTR_ERR(isert_conn->conn_pd); + pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", + isert_conn, ret); + goto out_pd; + } + + isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(isert_conn->conn_mr)) { + ret = PTR_ERR(isert_conn->conn_mr); + pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", + isert_conn, ret); + goto out_mr; + } - if (device->use_frwr) { - ret = isert_conn_create_frwr_pool(isert_conn); + if (device->use_fastreg) { + ret = isert_conn_create_fastreg_pool(isert_conn); if (ret) { - pr_err("Conn: %p failed to create frwr_pool\n", isert_conn); - goto out_frwr; + pr_err("Conn: %p failed to create fastreg pool\n", + isert_conn); + goto out_fastreg; } } @@ -573,18 +601,30 @@ if (ret) goto out_conn_dev; + ret = isert_rdma_post_recvl(isert_conn); + if (ret) + goto out_conn_dev; + + ret = isert_rdma_accept(isert_conn); + if (ret) + goto out_conn_dev; + mutex_lock(&isert_np->np_accept_mutex); - list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); + list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); mutex_unlock(&isert_np->np_accept_mutex); - pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); - wake_up(&isert_np->np_accept_wq); + pr_debug("isert_connect_request() up np_sem np: %p\n", np); + up(&isert_np->np_sem); return 0; out_conn_dev: - if (device->use_frwr) - isert_conn_free_frwr_pool(isert_conn); -out_frwr: + if (device->use_fastreg) + isert_conn_free_fastreg_pool(isert_conn); +out_fastreg: + ib_dereg_mr(isert_conn->conn_mr); +out_mr: + ib_dealloc_pd(isert_conn->conn_pd); +out_pd: isert_device_try_release(device); out_rsp_dma_map: ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, @@ -596,6 +636,7 @@ kfree(isert_conn->login_buf); out: kfree(isert_conn); + rdma_reject(cma_id, NULL, 0); return ret; } @@ -608,20 +649,25 @@ pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - if (device && device->use_frwr) - isert_conn_free_frwr_pool(isert_conn); + if (device && device->use_fastreg) + isert_conn_free_fastreg_pool(isert_conn); + + isert_free_rx_descriptors(isert_conn); + rdma_destroy_id(isert_conn->conn_cm_id); if (isert_conn->conn_qp) { cq_index = ((struct isert_cq_desc *) isert_conn->conn_qp->recv_cq->cq_context)->cq_index; pr_debug("isert_connect_release: cq_index: %d\n", cq_index); + mutex_lock(&device_list_mutex); isert_conn->conn_device->cq_active_qps[cq_index]--; + mutex_unlock(&device_list_mutex); - rdma_destroy_qp(isert_conn->conn_cm_id); + ib_destroy_qp(isert_conn->conn_qp); } - isert_free_rx_descriptors(isert_conn); - rdma_destroy_id(isert_conn->conn_cm_id); + ib_dereg_mr(isert_conn->conn_mr); + ib_dealloc_pd(isert_conn->conn_pd); if (isert_conn->login_buf) { ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, @@ -642,7 +688,19 @@ static void isert_connected_handler(struct rdma_cm_id *cma_id) { - return; + struct isert_conn *isert_conn = cma_id->qp->qp_context; + + pr_info("conn %p\n", isert_conn); + + if (!kref_get_unless_zero(&isert_conn->conn_kref)) { + pr_warn("conn %p connect_release is running\n", isert_conn); + return; + } + + mutex_lock(&isert_conn->conn_mutex); + if (isert_conn->state != ISER_CONN_FULL_FEATURE) + isert_conn->state = ISER_CONN_UP; + mutex_unlock(&isert_conn->conn_mutex); } static void @@ -663,49 +721,102 @@ kref_put(&isert_conn->conn_kref, isert_release_conn_kref); } +/** + * isert_conn_terminate() - Initiate connection termination + * @isert_conn: isert connection struct + * + * Notes: + * In case the connection state is FULL_FEATURE, move state + * to TEMINATING and start teardown sequence (rdma_disconnect). + * In case the connection state is UP, complete flush as well. + * + * This routine must be called with conn_mutex held. Thus it is + * safe to call multiple times. + */ static void -isert_disconnect_work(struct work_struct *work) +isert_conn_terminate(struct isert_conn *isert_conn) { - struct isert_conn *isert_conn = container_of(work, - struct isert_conn, conn_logout_work); - - pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - mutex_lock(&isert_conn->conn_mutex); - isert_conn->state = ISER_CONN_DOWN; + int err; - if (isert_conn->post_recv_buf_count == 0 && - atomic_read(&isert_conn->post_send_buf_count) == 0) { - pr_debug("Calling wake_up(&isert_conn->conn_wait);\n"); - mutex_unlock(&isert_conn->conn_mutex); - goto wake_up; - } - if (!isert_conn->conn_cm_id) { - mutex_unlock(&isert_conn->conn_mutex); - isert_put_conn(isert_conn); - return; + switch (isert_conn->state) { + case ISER_CONN_TERMINATING: + break; + case ISER_CONN_UP: + /* + * No flush completions will occur as we didn't + * get to ISER_CONN_FULL_FEATURE yet, complete + * to allow teardown progress. + */ + complete(&isert_conn->conn_wait_comp_err); + case ISER_CONN_FULL_FEATURE: /* FALLTHRU */ + pr_info("Terminating conn %p state %d\n", + isert_conn, isert_conn->state); + isert_conn->state = ISER_CONN_TERMINATING; + err = rdma_disconnect(isert_conn->conn_cm_id); + if (err) + pr_warn("Failed rdma_disconnect isert_conn %p\n", + isert_conn); + break; + default: + pr_warn("conn %p teminating in state %d\n", + isert_conn, isert_conn->state); } - if (!isert_conn->logout_posted) { - pr_debug("Calling rdma_disconnect for !logout_posted from" - " isert_disconnect_work\n"); - rdma_disconnect(isert_conn->conn_cm_id); - mutex_unlock(&isert_conn->conn_mutex); - iscsit_cause_connection_reinstatement(isert_conn->conn, 0); - goto wake_up; +} + +static int +isert_np_cma_handler(struct isert_np *isert_np, + enum rdma_cm_event_type event) +{ + pr_debug("isert np %p, handling event %d\n", isert_np, event); + + switch (event) { + case RDMA_CM_EVENT_DEVICE_REMOVAL: + isert_np->np_cm_id = NULL; + break; + case RDMA_CM_EVENT_ADDR_CHANGE: + isert_np->np_cm_id = isert_setup_id(isert_np); + if (IS_ERR(isert_np->np_cm_id)) { + pr_err("isert np %p setup id failed: %ld\n", + isert_np, PTR_ERR(isert_np->np_cm_id)); + isert_np->np_cm_id = NULL; + } + break; + default: + pr_err("isert np %p Unexpected event %d\n", + isert_np, event); } + + return -1; +} + +static int +isert_disconnected_handler(struct rdma_cm_id *cma_id, + enum rdma_cm_event_type event) +{ + struct isert_np *isert_np = cma_id->context; + struct isert_conn *isert_conn; + + if (isert_np->np_cm_id == cma_id) + return isert_np_cma_handler(cma_id->context, event); + + isert_conn = cma_id->qp->qp_context; + + mutex_lock(&isert_conn->conn_mutex); + isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->conn_mutex); -wake_up: - wake_up(&isert_conn->conn_wait); - isert_put_conn(isert_conn); + pr_info("conn %p completing conn_wait\n", isert_conn); + complete(&isert_conn->conn_wait); + + return 0; } static void -isert_disconnected_handler(struct rdma_cm_id *cma_id) +isert_connect_error(struct rdma_cm_id *cma_id) { - struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + struct isert_conn *isert_conn = cma_id->qp->qp_context; - INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); - schedule_work(&isert_conn->conn_logout_work); + isert_put_conn(isert_conn); } static int @@ -718,32 +829,30 @@ switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: - pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n"); ret = isert_connect_request(cma_id, event); + if (ret) + pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", + event->event, ret); break; case RDMA_CM_EVENT_ESTABLISHED: - pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n"); isert_connected_handler(cma_id); break; - case RDMA_CM_EVENT_DISCONNECTED: - pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n"); - isert_disconnected_handler(cma_id); - break; - case RDMA_CM_EVENT_DEVICE_REMOVAL: - case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ + case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ + ret = isert_disconnected_handler(cma_id, event->event); break; + case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ + case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ case RDMA_CM_EVENT_CONNECT_ERROR: + isert_connect_error(cma_id); + break; default: - pr_err("Unknown RDMA CMA event: %d\n", event->event); + pr_err("Unhandled RDMA CMA event: %d\n", event->event); break; } - if (ret != 0) { - pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", - event->event, ret); - dump_stack(); - } - return ret; } @@ -871,16 +980,17 @@ * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. */ - mutex_lock(&isert_conn->conn_comp_mutex); - if (coalesce && + mutex_lock(&isert_conn->conn_mutex); + if (coalesce && isert_conn->state == ISER_CONN_FULL_FEATURE && ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { + tx_desc->llnode_active = true; llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); - mutex_unlock(&isert_conn->conn_comp_mutex); + mutex_unlock(&isert_conn->conn_mutex); return; } isert_conn->conn_comp_batch = 0; tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); - mutex_unlock(&isert_conn->conn_comp_mutex); + mutex_unlock(&isert_conn->conn_mutex); send_wr->send_flags = IB_SEND_SIGNALED; } @@ -958,7 +1068,10 @@ if (ret) return ret; - isert_conn->state = ISER_CONN_UP; + /* Now we are in FULL_FEATURE phase */ + mutex_lock(&isert_conn->conn_mutex); + isert_conn->state = ISER_CONN_FULL_FEATURE; + mutex_unlock(&isert_conn->conn_mutex); goto post_send; } @@ -975,18 +1088,17 @@ } static void -isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, - struct isert_conn *isert_conn) +isert_rx_login_req(struct isert_conn *isert_conn) { + struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf; + int rx_buflen = isert_conn->login_req_len; struct iscsi_conn *conn = isert_conn->conn; struct iscsi_login *login = conn->conn_login; int size; - if (!login) { - pr_err("conn->conn_login is NULL\n"); - dump_stack(); - return; - } + pr_info("conn %p\n", isert_conn); + + WARN_ON_ONCE(!login); if (login->first_request) { struct iscsi_login_req *login_req = @@ -1024,13 +1136,13 @@ } static struct iscsi_cmd -*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp) +*isert_allocate_cmd(struct iscsi_conn *conn) { struct isert_conn *isert_conn = (struct isert_conn *)conn->context; struct isert_cmd *isert_cmd; struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, gfp); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) { pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); return NULL; @@ -1094,6 +1206,8 @@ if (!rc && dump_payload == false && unsol_data) iscsit_set_unsoliticed_dataout(cmd); + else if (dump_payload && imm_data) + target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd); return 0; } @@ -1219,7 +1333,7 @@ switch (opcode) { case ISCSI_OP_SCSI_CMD: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1233,7 +1347,7 @@ rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_NOOP_OUT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1246,7 +1360,7 @@ (unsigned char *)hdr); break; case ISCSI_OP_SCSI_TMFUNC: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1254,7 +1368,7 @@ (unsigned char *)hdr); break; case ISCSI_OP_LOGOUT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1265,7 +1379,7 @@ HZ); break; case ISCSI_OP_TEXT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1347,11 +1461,20 @@ hdr->opcode, hdr->itt, hdr->flags, (int)(xfer_len - ISER_HEADERS_LEN)); - if ((char *)desc == isert_conn->login_req_buf) - isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, - isert_conn); - else + if ((char *)desc == isert_conn->login_req_buf) { + isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN; + if (isert_conn->conn) { + struct iscsi_login *login = isert_conn->conn->conn_login; + + if (login && !login->first_request) + isert_rx_login_req(isert_conn); + } + mutex_lock(&isert_conn->conn_mutex); + complete(&isert_conn->login_req_comp); + mutex_unlock(&isert_conn->conn_mutex); + } else { isert_rx_do_work(desc, isert_conn); + } ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); @@ -1404,25 +1527,25 @@ } static void -isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) +isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; struct ib_device *ib_dev = isert_conn->conn_cm_id->device; LIST_HEAD(unmap_list); - pr_debug("unreg_frwr_cmd: %p\n", isert_cmd); + pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); if (wr->fr_desc) { - pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n", + pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", isert_cmd, wr->fr_desc); spin_lock_bh(&isert_conn->conn_lock); - list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool); + list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); spin_unlock_bh(&isert_conn->conn_lock); wr->fr_desc = NULL; } if (wr->sge) { - pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd); + pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -1434,7 +1557,7 @@ } static void -isert_put_cmd(struct isert_cmd *isert_cmd) +isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) { struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; struct isert_conn *isert_conn = isert_cmd->conn; @@ -1447,11 +1570,24 @@ case ISCSI_OP_SCSI_CMD: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - if (cmd->data_direction == DMA_TO_DEVICE) + if (cmd->data_direction == DMA_TO_DEVICE) { iscsit_stop_dataout_timer(cmd); + /* + * Check for special case during comp_err where + * WRITE_PENDING has been handed off from core, + * but requires an extra target_put_sess_cmd() + * before transport_generic_free_cmd() below. + */ + if (comp_err && + cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) { + struct se_cmd *se_cmd = &cmd->se_cmd; + + target_put_sess_cmd(se_cmd->se_sess, se_cmd); + } + } device->unreg_rdma_mem(isert_cmd, isert_conn); transport_generic_free_cmd(&cmd->se_cmd, 0); @@ -1459,7 +1595,7 @@ case ISCSI_OP_SCSI_TMFUNC: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); transport_generic_free_cmd(&cmd->se_cmd, 0); @@ -1469,7 +1605,7 @@ case ISCSI_OP_TEXT: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); /* @@ -1506,7 +1642,7 @@ static void isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd, - struct ib_device *ib_dev) + struct ib_device *ib_dev, bool comp_err) { if (isert_cmd->pdu_buf_dma != 0) { pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); @@ -1516,7 +1652,7 @@ } isert_unmap_tx_desc(tx_desc, ib_dev); - isert_put_cmd(isert_cmd); + isert_put_cmd(isert_cmd, comp_err); } static void @@ -1532,6 +1668,7 @@ iscsit_stop_dataout_timer(cmd); device->unreg_rdma_mem(isert_cmd, isert_conn); cmd->write_data_done = wr->cur_rdma_length; + wr->send_wr_num = 0; pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); spin_lock_bh(&cmd->istate_lock); @@ -1559,28 +1696,25 @@ iscsit_tmr_post_handler(cmd, cmd->conn); cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; case ISTATE_SEND_REJECT: pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n"); atomic_dec(&isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; case ISTATE_SEND_LOGOUTRSP: pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); - /* - * Call atomic_dec(&isert_conn->post_send_buf_count) - * from isert_free_conn() - */ - isert_conn->logout_posted = true; + + atomic_dec(&isert_conn->post_send_buf_count); iscsit_logout_post_handler(cmd, cmd->conn); break; case ISTATE_SEND_TEXTRSP: atomic_dec(&isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev); + isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); break; default: pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); @@ -1596,6 +1730,7 @@ struct ib_device *ib_dev) { struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || cmd->i_state == ISTATE_SEND_LOGOUTRSP || @@ -1607,10 +1742,10 @@ queue_work(isert_comp_wq, &isert_cmd->comp_work); return; } - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; - isert_completion_put(tx_desc, isert_cmd, ib_dev); + isert_completion_put(tx_desc, isert_cmd, ib_dev, false); } static void @@ -1645,7 +1780,7 @@ case ISER_IB_RDMA_READ: pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); isert_completion_rdma_read(tx_desc, isert_cmd); break; default: @@ -1674,31 +1809,80 @@ } static void -isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) +isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) +{ + struct llist_node *llnode; + struct isert_rdma_wr *wr; + struct iser_tx_desc *t; + + mutex_lock(&isert_conn->conn_mutex); + llnode = llist_del_all(&isert_conn->conn_comp_llist); + isert_conn->conn_comp_batch = 0; + mutex_unlock(&isert_conn->conn_mutex); + + while (llnode) { + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); + llnode = llist_next(llnode); + wr = &t->isert_cmd->rdma_wr; + + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + + isert_completion_put(t, t->isert_cmd, ib_dev, true); + } +} + +static void +isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_cmd *isert_cmd = tx_desc->isert_cmd; + struct llist_node *llnode = tx_desc->comp_llnode_batch; + struct isert_rdma_wr *wr; + struct iser_tx_desc *t; - if (tx_desc) { - struct isert_cmd *isert_cmd = tx_desc->isert_cmd; + while (llnode) { + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); + llnode = llist_next(llnode); + wr = &t->isert_cmd->rdma_wr; - if (!isert_cmd) - isert_unmap_tx_desc(tx_desc, ib_dev); - else - isert_completion_put(tx_desc, isert_cmd, ib_dev); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + + isert_completion_put(t, t->isert_cmd, ib_dev, true); } + tx_desc->comp_llnode_batch = NULL; - if (isert_conn->post_recv_buf_count == 0 && - atomic_read(&isert_conn->post_send_buf_count) == 0) { - pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - pr_debug("Calling wake_up from isert_cq_comp_err\n"); + if (!isert_cmd) + isert_unmap_tx_desc(tx_desc, ib_dev); + else + isert_completion_put(tx_desc, isert_cmd, ib_dev, true); +} - mutex_lock(&isert_conn->conn_mutex); - if (isert_conn->state != ISER_CONN_DOWN) - isert_conn->state = ISER_CONN_TERMINATING; - mutex_unlock(&isert_conn->conn_mutex); +static void +isert_cq_rx_comp_err(struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct iscsi_conn *conn = isert_conn->conn; + + if (isert_conn->post_recv_buf_count) + return; - wake_up(&isert_conn->conn_wait_comp_err); + isert_cq_drain_comp_llist(isert_conn, ib_dev); + + if (conn->sess) { + target_sess_cmd_list_set_waiting(conn->sess->se_sess); + target_wait_for_sess_cmds(conn->sess->se_sess); } + + while (atomic_read(&isert_conn->post_send_buf_count)) + msleep(3000); + + mutex_lock(&isert_conn->conn_mutex); + isert_conn_terminate(isert_conn); + mutex_unlock(&isert_conn->conn_mutex); + + iscsit_cause_connection_reinstatement(isert_conn->conn, 0); + + complete(&isert_conn->conn_wait_comp_err); } static void @@ -1723,8 +1907,14 @@ pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); pr_debug("TX wc.status: 0x%08x\n", wc.status); pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); - atomic_dec(&isert_conn->post_send_buf_count); - isert_cq_comp_err(tx_desc, isert_conn); + + if (wc.wr_id != ISER_FASTREG_LI_WRID) { + if (tx_desc->llnode_active) + continue; + + atomic_dec(&isert_conn->post_send_buf_count); + isert_cq_tx_comp_err(tx_desc, isert_conn); + } } } @@ -1767,7 +1957,7 @@ wc.vendor_err); } isert_conn->post_recv_buf_count--; - isert_cq_comp_err(NULL, isert_conn); + isert_cq_rx_comp_err(isert_conn); } } @@ -1841,7 +2031,7 @@ isert_cmd->tx_desc.num_sge = 2; } - isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); @@ -1948,7 +2138,7 @@ int rc; isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc); - rc = iscsit_build_text_rsp(cmd, conn, hdr); + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND); if (rc < 0) return rc; @@ -2163,32 +2353,29 @@ static int isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, - struct isert_cmd *isert_cmd, struct isert_conn *isert_conn, - struct ib_sge *ib_sge, u32 offset, unsigned int data_len) + struct isert_conn *isert_conn, struct scatterlist *sg_start, + struct ib_sge *ib_sge, u32 sg_nents, u32 offset, + unsigned int data_len) { - struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; struct ib_device *ib_dev = isert_conn->conn_cm_id->device; - struct scatterlist *sg_start; - u32 sg_off, page_off; struct ib_send_wr fr_wr, inv_wr; struct ib_send_wr *bad_wr, *wr = NULL; + int ret, pagelist_len; + u32 page_off; u8 key; - int ret, sg_nents, pagelist_len; - sg_off = offset / PAGE_SIZE; - sg_start = &cmd->se_cmd.t_data_sg[sg_off]; - sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off, - ISCSI_ISER_SG_TABLESIZE); + sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); page_off = offset % PAGE_SIZE; - pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n", - isert_cmd, fr_desc, sg_nents, sg_off, offset); + pr_debug("Use fr_desc %p sg_nents %d offset %u\n", + fr_desc, sg_nents, offset); pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, &fr_desc->data_frpl->page_list[0]); if (!fr_desc->valid) { memset(&inv_wr, 0, sizeof(inv_wr)); + inv_wr.wr_id = ISER_FASTREG_LI_WRID; inv_wr.opcode = IB_WR_LOCAL_INV; inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; wr = &inv_wr; @@ -2199,6 +2386,7 @@ /* Prepare FASTREG WR */ memset(&fr_wr, 0, sizeof(fr_wr)); + fr_wr.wr_id = ISER_FASTREG_LI_WRID; fr_wr.opcode = IB_WR_FAST_REG_MR; fr_wr.wr.fast_reg.iova_start = fr_desc->data_frpl->page_list[0] + page_off; @@ -2232,8 +2420,8 @@ } static int -isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, - struct isert_rdma_wr *wr) +isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct isert_rdma_wr *wr) { struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); @@ -2251,9 +2439,9 @@ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { data_left = se_cmd->data_length; } else { - sg_off = cmd->write_data_done / PAGE_SIZE; - data_left = se_cmd->data_length - cmd->write_data_done; offset = cmd->write_data_done; + sg_off = offset / PAGE_SIZE; + data_left = se_cmd->data_length - cmd->write_data_done; isert_cmd->tx_desc.isert_cmd = isert_cmd; } @@ -2311,16 +2499,16 @@ wr->fr_desc = NULL; } else { spin_lock_irqsave(&isert_conn->conn_lock, flags); - fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, + fr_desc = list_first_entry(&isert_conn->conn_fr_pool, struct fast_reg_descriptor, list); list_del(&fr_desc->list); spin_unlock_irqrestore(&isert_conn->conn_lock, flags); wr->fr_desc = fr_desc; - ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, - ib_sge, offset, data_len); + ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start, + ib_sge, sg_nents, offset, data_len); if (ret) { - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); goto unmap_sg; } } @@ -2362,14 +2550,14 @@ &isert_cmd->tx_desc.iscsi_header); isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); isert_init_send_wr(isert_conn, isert_cmd, - &isert_cmd->tx_desc.send_wr, true); + &isert_cmd->tx_desc.send_wr, false); - atomic_inc(&isert_conn->post_send_buf_count); + atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) { pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); } pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", isert_cmd); @@ -2397,12 +2585,12 @@ return rc; } - atomic_inc(&isert_conn->post_send_buf_count); + atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) { pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); } pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", isert_cmd); @@ -2469,13 +2657,51 @@ return ret; } +struct rdma_cm_id * +isert_setup_id(struct isert_np *isert_np) +{ + struct iscsi_np *np = isert_np->np; + struct rdma_cm_id *id; + struct sockaddr *sa; + int ret; + + sa = (struct sockaddr *)&np->np_sockaddr; + pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa); + + id = rdma_create_id(isert_cma_handler, isert_np, + RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(id)) { + pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id)); + ret = PTR_ERR(id); + goto out; + } + pr_debug("id %p context %p\n", id, id->context); + + ret = rdma_bind_addr(id, sa); + if (ret) { + pr_err("rdma_bind_addr() failed: %d\n", ret); + goto out_id; + } + + ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG); + if (ret) { + pr_err("rdma_listen() failed: %d\n", ret); + goto out_id; + } + + return id; +out_id: + rdma_destroy_id(id); +out: + return ERR_PTR(ret); +} + static int isert_setup_np(struct iscsi_np *np, struct __kernel_sockaddr_storage *ksockaddr) { struct isert_np *isert_np; struct rdma_cm_id *isert_lid; - struct sockaddr *sa; int ret; isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); @@ -2483,13 +2709,12 @@ pr_err("Unable to allocate struct isert_np\n"); return -ENOMEM; } - init_waitqueue_head(&isert_np->np_accept_wq); + sema_init(&isert_np->np_sem, 0); mutex_init(&isert_np->np_accept_mutex); INIT_LIST_HEAD(&isert_np->np_accept_list); init_completion(&isert_np->np_login_comp); + isert_np->np = np; - sa = (struct sockaddr *)ksockaddr; - pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa); /* * Setup the np->np_sockaddr from the passed sockaddr setup * in iscsi_target_configfs.c code.. @@ -2497,50 +2722,21 @@ memcpy(&np->np_sockaddr, ksockaddr, sizeof(struct __kernel_sockaddr_storage)); - isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, - IB_QPT_RC); + isert_lid = isert_setup_id(isert_np); if (IS_ERR(isert_lid)) { - pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n", - PTR_ERR(isert_lid)); ret = PTR_ERR(isert_lid); goto out; } - ret = rdma_bind_addr(isert_lid, sa); - if (ret) { - pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret); - goto out_lid; - } - - ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG); - if (ret) { - pr_err("rdma_listen() for isert_lid failed: %d\n", ret); - goto out_lid; - } - isert_np->np_cm_id = isert_lid; np->np_context = isert_np; - pr_debug("Setup isert_lid->context: %p\n", isert_lid->context); return 0; -out_lid: - rdma_destroy_id(isert_lid); out: kfree(isert_np); - return ret; -} -static int -isert_check_accept_queue(struct isert_np *isert_np) -{ - int empty; - - mutex_lock(&isert_np->np_accept_mutex); - empty = list_empty(&isert_np->np_accept_list); - mutex_unlock(&isert_np->np_accept_mutex); - - return empty; + return ret; } static int @@ -2575,7 +2771,15 @@ struct isert_conn *isert_conn = (struct isert_conn *)conn->context; int ret; - pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); + pr_info("before login_req comp conn: %p\n", isert_conn); + ret = wait_for_completion_interruptible(&isert_conn->login_req_comp); + if (ret) { + pr_err("isert_conn %p interrupted before got login req\n", + isert_conn); + return ret; + } + reinit_completion(&isert_conn->login_req_comp); + /* * For login requests after the first PDU, isert_rx_login_req() will * kick schedule_delayed_work(&conn->login_work) as the packet is @@ -2585,11 +2789,15 @@ if (!login->first_request) return 0; + isert_rx_login_req(isert_conn); + + pr_info("before conn_login_comp conn: %p\n", conn); ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); if (ret) return ret; - pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); + pr_info("processing login->req: %p\n", login->req); + return 0; } @@ -2635,16 +2843,19 @@ int max_accept = 0, ret; accept_wait: - ret = wait_event_interruptible(isert_np->np_accept_wq, - !isert_check_accept_queue(isert_np) || - np->np_thread_state == ISCSI_NP_THREAD_RESET); + ret = down_interruptible(&isert_np->np_sem); if (max_accept > 5) return -ENODEV; spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { spin_unlock_bh(&np->np_thread_lock); - pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); + pr_debug("np_thread_state %d for isert_accept_np\n", + np->np_thread_state); + /** + * No point in stalling here when np_thread + * is in state RESET/SHUTDOWN/EXIT - bail + **/ return -ENODEV; } spin_unlock_bh(&np->np_thread_lock); @@ -2664,17 +2875,10 @@ isert_conn->conn = conn; max_accept = 0; - ret = isert_rdma_post_recvl(isert_conn); - if (ret) - return ret; - - ret = isert_rdma_accept(isert_conn); - if (ret) - return ret; - isert_set_conn_info(np, conn, isert_conn); - pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); + pr_debug("Processing isert_conn: %p\n", isert_conn); + return 0; } @@ -2683,69 +2887,58 @@ { struct isert_np *isert_np = (struct isert_np *)np->np_context; - rdma_destroy_id(isert_np->np_cm_id); + if (isert_np->np_cm_id) + rdma_destroy_id(isert_np->np_cm_id); np->np_context = NULL; kfree(isert_np); } -static int isert_check_state(struct isert_conn *isert_conn, int state) +static void isert_release_work(struct work_struct *work) { - int ret; + struct isert_conn *isert_conn = container_of(work, + struct isert_conn, + release_work); + + pr_info("Starting release conn %p\n", isert_conn); + + wait_for_completion(&isert_conn->conn_wait); mutex_lock(&isert_conn->conn_mutex); - ret = (isert_conn->state == state); + isert_conn->state = ISER_CONN_DOWN; mutex_unlock(&isert_conn->conn_mutex); - return ret; + pr_info("Destroying conn %p\n", isert_conn); + isert_put_conn(isert_conn); } -static void isert_free_conn(struct iscsi_conn *conn) +static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; - pr_debug("isert_free_conn: Starting \n"); - /* - * Decrement post_send_buf_count for special case when called - * from isert_do_control_comp() -> iscsit_logout_post_handler() - */ - mutex_lock(&isert_conn->conn_mutex); - if (isert_conn->logout_posted) - atomic_dec(&isert_conn->post_send_buf_count); + pr_debug("isert_wait_conn: Starting \n"); - if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { - pr_debug("Calling rdma_disconnect from isert_free_conn\n"); - rdma_disconnect(isert_conn->conn_cm_id); - } + mutex_lock(&isert_conn->conn_mutex); /* * Only wait for conn_wait_comp_err if the isert_conn made it * into full feature phase.. */ - if (isert_conn->state == ISER_CONN_UP) { - pr_debug("isert_free_conn: Before wait_event comp_err %d\n", - isert_conn->state); - mutex_unlock(&isert_conn->conn_mutex); - - wait_event(isert_conn->conn_wait_comp_err, - (isert_check_state(isert_conn, ISER_CONN_TERMINATING))); - - wait_event(isert_conn->conn_wait, - (isert_check_state(isert_conn, ISER_CONN_DOWN))); - - isert_put_conn(isert_conn); - return; - } if (isert_conn->state == ISER_CONN_INIT) { mutex_unlock(&isert_conn->conn_mutex); - isert_put_conn(isert_conn); return; } - pr_debug("isert_free_conn: wait_event conn_wait %d\n", - isert_conn->state); + isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->conn_mutex); - wait_event(isert_conn->conn_wait, - (isert_check_state(isert_conn, ISER_CONN_DOWN))); + wait_for_completion(&isert_conn->conn_wait_comp_err); + + INIT_WORK(&isert_conn->release_work, isert_release_work); + queue_work(isert_release_wq, &isert_conn->release_work); +} + +static void isert_free_conn(struct iscsi_conn *conn) +{ + struct isert_conn *isert_conn = conn->context; isert_put_conn(isert_conn); } @@ -2758,6 +2951,7 @@ .iscsit_setup_np = isert_setup_np, .iscsit_accept_np = isert_accept_np, .iscsit_free_np = isert_free_np, + .iscsit_wait_conn = isert_wait_conn, .iscsit_free_conn = isert_free_conn, .iscsit_get_login_rx = isert_get_login_rx, .iscsit_put_login_tx = isert_put_login_tx, @@ -2785,10 +2979,21 @@ goto destroy_rx_wq; } + isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE); + if (!isert_release_wq) { + pr_err("Unable to allocate isert_release_wq\n"); + ret = -ENOMEM; + goto destroy_comp_wq; + } + iscsit_register_transport(&iser_target_transport); - pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); + pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); + return 0; +destroy_comp_wq: + destroy_workqueue(isert_comp_wq); destroy_rx_wq: destroy_workqueue(isert_rx_wq); return ret; @@ -2796,6 +3001,8 @@ static void __exit isert_exit(void) { + flush_scheduled_work(); + destroy_workqueue(isert_release_wq); destroy_workqueue(isert_comp_wq); destroy_workqueue(isert_rx_wq); iscsit_unregister_transport(&iser_target_transport); --- linux-3.13.0.orig/drivers/infiniband/ulp/isert/ib_isert.h +++ linux-3.13.0/drivers/infiniband/ulp/isert/ib_isert.h @@ -6,6 +6,7 @@ #define ISERT_RDMA_LISTEN_BACKLOG 10 #define ISCSI_ISER_SG_TABLESIZE 256 +#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL enum isert_desc_type { ISCSI_TX_CONTROL, @@ -22,6 +23,7 @@ enum iser_conn_state { ISER_CONN_INIT, ISER_CONN_UP, + ISER_CONN_FULL_FEATURE, ISER_CONN_TERMINATING, ISER_CONN_DOWN, }; @@ -45,6 +47,7 @@ struct isert_cmd *isert_cmd; struct llist_node *comp_llnode_batch; struct llist_node comp_llnode; + bool llnode_active; struct ib_send_wr send_wr; } __packed; @@ -91,7 +94,6 @@ struct isert_conn { enum iser_conn_state state; - bool logout_posted; int post_recv_buf_count; atomic_t post_send_buf_count; u32 responder_resources; @@ -101,6 +103,7 @@ char *login_req_buf; char *login_rsp_buf; u64 login_req_dma; + int login_req_len; u64 login_rsp_dma; unsigned int conn_rx_desc_head; struct iser_rx_desc *conn_rx_descs; @@ -108,25 +111,25 @@ struct iscsi_conn *conn; struct list_head conn_accept_node; struct completion conn_login_comp; + struct completion login_req_comp; struct iser_tx_desc conn_login_tx_desc; struct rdma_cm_id *conn_cm_id; struct ib_pd *conn_pd; struct ib_mr *conn_mr; struct ib_qp *conn_qp; struct isert_device *conn_device; - struct work_struct conn_logout_work; struct mutex conn_mutex; - wait_queue_head_t conn_wait; - wait_queue_head_t conn_wait_comp_err; + struct completion conn_wait; + struct completion conn_wait_comp_err; struct kref conn_kref; - struct list_head conn_frwr_pool; - int conn_frwr_pool_size; - /* lock to protect frwr_pool */ + struct list_head conn_fr_pool; + int conn_fr_pool_size; + /* lock to protect fastreg pool */ spinlock_t conn_lock; + struct work_struct release_work; #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; struct llist_head conn_comp_llist; - struct mutex conn_comp_mutex; }; #define ISERT_MAX_CQ 64 @@ -139,13 +142,11 @@ }; struct isert_device { - int use_frwr; + int use_fastreg; int cqs_used; int refcount; int cq_active_qps[ISERT_MAX_CQ]; struct ib_device *ib_device; - struct ib_pd *dev_pd; - struct ib_mr *dev_mr; struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; struct isert_cq_desc *cq_desc; @@ -159,7 +160,8 @@ }; struct isert_np { - wait_queue_head_t np_accept_wq; + struct iscsi_np *np; + struct semaphore np_sem; struct rdma_cm_id *np_cm_id; struct mutex np_accept_mutex; struct list_head np_accept_list; --- linux-3.13.0.orig/drivers/infiniband/ulp/srp/ib_srp.c +++ linux-3.13.0/drivers/infiniband/ulp/srp/ib_srp.c @@ -120,6 +120,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); static struct scsi_transport_template *ib_srp_transport_template; +static struct workqueue_struct *srp_remove_wq; static struct ib_client srp_client = { .name = "srp", @@ -539,7 +540,7 @@ spin_unlock_irq(&target->lock); if (changed) - queue_work(system_long_wq, &target->remove_work); + queue_work(srp_remove_wq, &target->remove_work); return changed; } @@ -1575,6 +1576,12 @@ err_iu: srp_put_tx_iu(target, iu, SRP_IU_CMD); + /* + * Avoid that the loops that iterate over the request ring can + * encounter a dangling SCSI command pointer. + */ + req->scmnd = NULL; + spin_lock_irqsave(&target->lock, flags); list_add(&req->list, &target->free_reqs); @@ -2879,9 +2886,10 @@ spin_unlock(&host->target_lock); /* - * Wait for target port removal tasks. + * Wait for tl_err and target port removal tasks. */ flush_workqueue(system_long_wq); + flush_workqueue(srp_remove_wq); kfree(host); } @@ -2933,16 +2941,22 @@ indirect_sg_entries = cmd_sg_entries; } + srp_remove_wq = create_workqueue("srp_remove"); + if (IS_ERR(srp_remove_wq)) { + ret = PTR_ERR(srp_remove_wq); + goto out; + } + + ret = -ENOMEM; ib_srp_transport_template = srp_attach_transport(&ib_srp_transport_functions); if (!ib_srp_transport_template) - return -ENOMEM; + goto destroy_wq; ret = class_register(&srp_class); if (ret) { pr_err("couldn't register class infiniband_srp\n"); - srp_release_transport(ib_srp_transport_template); - return ret; + goto release_tr; } ib_sa_register_client(&srp_sa_client); @@ -2950,13 +2964,22 @@ ret = ib_register_client(&srp_client); if (ret) { pr_err("couldn't register IB client\n"); - srp_release_transport(ib_srp_transport_template); - ib_sa_unregister_client(&srp_sa_client); - class_unregister(&srp_class); - return ret; + goto unreg_sa; } - return 0; +out: + return ret; + +unreg_sa: + ib_sa_unregister_client(&srp_sa_client); + class_unregister(&srp_class); + +release_tr: + srp_release_transport(ib_srp_transport_template); + +destroy_wq: + destroy_workqueue(srp_remove_wq); + goto out; } static void __exit srp_cleanup_module(void) @@ -2965,6 +2988,7 @@ ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); srp_release_transport(ib_srp_transport_template); + destroy_workqueue(srp_remove_wq); } module_init(srp_init_module); --- linux-3.13.0.orig/drivers/infiniband/ulp/srpt/ib_srpt.c +++ linux-3.13.0/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1078,6 +1078,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch, struct srpt_send_ioctx *ioctx) { + struct ib_device *dev = ch->sport->sdev->device; struct se_cmd *cmd; struct scatterlist *sg, *sg_orig; int sg_cnt; @@ -1124,7 +1125,7 @@ db = ioctx->rbufs; tsize = cmd->data_length; - dma_len = sg_dma_len(&sg[0]); + dma_len = ib_sg_dma_len(dev, &sg[0]); riu = ioctx->rdma_ius; /* @@ -1155,7 +1156,8 @@ ++j; if (j < count) { sg = sg_next(sg); - dma_len = sg_dma_len(sg); + dma_len = ib_sg_dma_len( + dev, sg); } } } else { @@ -1192,8 +1194,8 @@ tsize = cmd->data_length; riu = ioctx->rdma_ius; sg = sg_orig; - dma_len = sg_dma_len(&sg[0]); - dma_addr = sg_dma_address(&sg[0]); + dma_len = ib_sg_dma_len(dev, &sg[0]); + dma_addr = ib_sg_dma_address(dev, &sg[0]); /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */ for (i = 0, j = 0; @@ -1216,8 +1218,10 @@ ++j; if (j < count) { sg = sg_next(sg); - dma_len = sg_dma_len(sg); - dma_addr = sg_dma_address(sg); + dma_len = ib_sg_dma_len( + dev, sg); + dma_addr = ib_sg_dma_address( + dev, sg); } } } else { @@ -2087,6 +2091,7 @@ if (!qp_init) goto out; +retry: ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, ch->rq_size + srp_sq_size, 0); if (IS_ERR(ch->cq)) { @@ -2110,6 +2115,13 @@ ch->qp = ib_create_qp(sdev->pd, qp_init); if (IS_ERR(ch->qp)) { ret = PTR_ERR(ch->qp); + if (ret == -ENOMEM) { + srp_sq_size /= 2; + if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { + ib_destroy_cq(ch->cq); + goto retry; + } + } printk(KERN_ERR "failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } @@ -3666,9 +3678,9 @@ unsigned long val; int ret; - ret = strict_strtoul(page, 0, &val); + ret = kstrtoul(page, 0, &val); if (ret < 0) { - pr_err("strict_strtoul() failed with ret: %d\n", ret); + pr_err("kstrtoul() failed with ret: %d\n", ret); return -EINVAL; } if (val > MAX_SRPT_RDMA_SIZE) { @@ -3706,9 +3718,9 @@ unsigned long val; int ret; - ret = strict_strtoul(page, 0, &val); + ret = kstrtoul(page, 0, &val); if (ret < 0) { - pr_err("strict_strtoul() failed with ret: %d\n", ret); + pr_err("kstrtoul() failed with ret: %d\n", ret); return -EINVAL; } if (val > MAX_SRPT_RSP_SIZE) { @@ -3746,9 +3758,9 @@ unsigned long val; int ret; - ret = strict_strtoul(page, 0, &val); + ret = kstrtoul(page, 0, &val); if (ret < 0) { - pr_err("strict_strtoul() failed with ret: %d\n", ret); + pr_err("kstrtoul() failed with ret: %d\n", ret); return -EINVAL; } if (val > MAX_SRPT_SRQ_SIZE) { @@ -3793,7 +3805,7 @@ unsigned long tmp; int ret; - ret = strict_strtoul(page, 0, &tmp); + ret = kstrtoul(page, 0, &tmp); if (ret < 0) { printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n"); return -EINVAL; --- linux-3.13.0.orig/drivers/input/evdev.c +++ linux-3.13.0/drivers/input/evdev.c @@ -422,7 +422,7 @@ err_free_client: evdev_detach_client(evdev, client); - kfree(client); + kvfree(client); return error; } --- linux-3.13.0.orig/drivers/input/input.c +++ linux-3.13.0/drivers/input/input.c @@ -257,9 +257,10 @@ } static int input_get_disposition(struct input_dev *dev, - unsigned int type, unsigned int code, int value) + unsigned int type, unsigned int code, int *pval) { int disposition = INPUT_IGNORE_EVENT; + int value = *pval; switch (type) { @@ -357,6 +358,7 @@ break; } + *pval = value; return disposition; } @@ -365,7 +367,7 @@ { int disposition; - disposition = input_get_disposition(dev, type, code, value); + disposition = input_get_disposition(dev, type, code, &value); if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) dev->event(dev, type, code, value); --- linux-3.13.0.orig/drivers/input/joystick/xpad.c +++ linux-3.13.0/drivers/input/joystick/xpad.c @@ -1002,9 +1002,19 @@ } ep_irq_in = &intf->cur_altsetting->endpoint[1].desc; - usb_fill_bulk_urb(xpad->bulk_out, udev, - usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress), - xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad); + if (usb_endpoint_is_bulk_out(ep_irq_in)) { + usb_fill_bulk_urb(xpad->bulk_out, udev, + usb_sndbulkpipe(udev, + ep_irq_in->bEndpointAddress), + xpad->bdata, XPAD_PKT_LEN, + xpad_bulk_out, xpad); + } else { + usb_fill_int_urb(xpad->bulk_out, udev, + usb_sndintpipe(udev, + ep_irq_in->bEndpointAddress), + xpad->bdata, XPAD_PKT_LEN, + xpad_bulk_out, xpad, 0); + } /* * Submit the int URB immediately rather than waiting for open --- linux-3.13.0.orig/drivers/input/keyboard/atkbd.c +++ linux-3.13.0/drivers/input/keyboard/atkbd.c @@ -243,6 +243,12 @@ static void *atkbd_platform_fixup_data; static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int); +/* + * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding + * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed. + */ +static bool atkbd_skip_deactivate; + static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, ssize_t (*handler)(struct atkbd *, char *)); static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, @@ -768,7 +774,8 @@ * Make sure nothing is coming from the keyboard and disturbs our * internal state. */ - atkbd_deactivate(atkbd); + if (!atkbd_skip_deactivate) + atkbd_deactivate(atkbd); return 0; } @@ -1638,6 +1645,12 @@ return 1; } +static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id) +{ + atkbd_skip_deactivate = true; + return 1; +} + static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { { .matches = { @@ -1775,6 +1788,12 @@ .callback = atkbd_setup_scancode_fixup, .driver_data = atkbd_oqo_01plus_scancode_fixup, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + }, + .callback = atkbd_deactivate_fixup, + }, { } }; --- linux-3.13.0.orig/drivers/input/misc/arizona-haptics.c +++ linux-3.13.0/drivers/input/misc/arizona-haptics.c @@ -77,16 +77,14 @@ return; } + mutex_unlock(dapm_mutex); + ret = snd_soc_dapm_sync(arizona->dapm); if (ret != 0) { dev_err(arizona->dev, "Failed to sync DAPM: %d\n", ret); - mutex_unlock(dapm_mutex); return; } - - mutex_unlock(dapm_mutex); - } else { /* This disable sequence will be a noop if already enabled */ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); @@ -99,16 +97,15 @@ return; } + mutex_unlock(dapm_mutex); + ret = snd_soc_dapm_sync(arizona->dapm); if (ret != 0) { dev_err(arizona->dev, "Failed to sync DAPM: %d\n", ret); - mutex_unlock(dapm_mutex); return; } - mutex_unlock(dapm_mutex); - ret = regmap_update_bits(arizona->regmap, ARIZONA_HAPTICS_CONTROL_1, ARIZONA_HAP_CTRL_MASK, --- linux-3.13.0.orig/drivers/input/misc/xen-kbdfront.c +++ linux-3.13.0/drivers/input/misc/xen-kbdfront.c @@ -29,6 +29,7 @@ #include #include #include +#include struct xenkbd_info { struct input_dev *kbd; @@ -380,6 +381,9 @@ if (xen_initial_domain()) return -ENODEV; + if (!xen_has_pv_devices()) + return -ENODEV; + return xenbus_register_frontend(&xenkbd_driver); } --- linux-3.13.0.orig/drivers/input/mouse/alps.c +++ linux-3.13.0/drivers/input/mouse/alps.c @@ -277,6 +277,57 @@ } /* + * Process bitmap data for V5 protocols. Return value is null. + * + * The bitmaps don't have enough data to track fingers, so this function + * only generates points representing a bounding box of at most two contacts. + * These two points are returned in x1, y1, x2, and y2. + */ +static void alps_process_bitmap_dolphin(struct alps_data *priv, + struct alps_fields *fields, + int *x1, int *y1, int *x2, int *y2) +{ + int box_middle_x, box_middle_y; + unsigned int x_map, y_map; + unsigned char start_bit, end_bit; + unsigned char x_msb, x_lsb, y_msb, y_lsb; + + x_map = fields->x_map; + y_map = fields->y_map; + + if (!x_map || !y_map) + return; + + /* Get Most-significant and Least-significant bit */ + x_msb = fls(x_map); + x_lsb = ffs(x_map); + y_msb = fls(y_map); + y_lsb = ffs(y_map); + + /* Most-significant bit should never exceed max sensor line number */ + if (x_msb > priv->x_bits || y_msb > priv->y_bits) + return; + + *x1 = *y1 = *x2 = *y2 = 0; + + if (fields->fingers > 1) { + start_bit = priv->x_bits - x_msb; + end_bit = priv->x_bits - x_lsb; + box_middle_x = (priv->x_max * (start_bit + end_bit)) / + (2 * (priv->x_bits - 1)); + + start_bit = y_lsb - 1; + end_bit = y_msb - 1; + box_middle_y = (priv->y_max * (start_bit + end_bit)) / + (2 * (priv->y_bits - 1)); + *x1 = fields->x; + *y1 = fields->y; + *x2 = 2 * box_middle_x - *x1; + *y2 = 2 * box_middle_y - *y1; + } +} + +/* * Process bitmap data from v3 and v4 protocols. Returns the number of * fingers detected. A return value of 0 means at least one of the * bitmaps was empty. @@ -481,7 +532,8 @@ f->ts_middle = !!(p[3] & 0x40); } -static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p) +static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p, + struct psmouse *psmouse) { f->first_mp = !!(p[4] & 0x40); f->is_mp = !!(p[0] & 0x40); @@ -502,48 +554,61 @@ alps_decode_buttons_v3(f, p); } -static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p) +static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p, + struct psmouse *psmouse) { - alps_decode_pinnacle(f, p); + alps_decode_pinnacle(f, p, psmouse); f->x_map |= (p[5] & 0x10) << 11; f->y_map |= (p[5] & 0x20) << 6; } -static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p) +static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p, + struct psmouse *psmouse) { + u64 palm_data = 0; + struct alps_data *priv = psmouse->private; + f->first_mp = !!(p[0] & 0x02); f->is_mp = !!(p[0] & 0x20); - f->fingers = ((p[0] & 0x6) >> 1 | + if (!f->is_mp) { + f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7)); + f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3)); + f->z = (p[0] & 4) ? 0 : p[5] & 0x7f; + alps_decode_buttons_v3(f, p); + } else { + f->fingers = ((p[0] & 0x6) >> 1 | (p[0] & 0x10) >> 2); - f->x_map = ((p[2] & 0x60) >> 5) | - ((p[4] & 0x7f) << 2) | - ((p[5] & 0x7f) << 9) | - ((p[3] & 0x07) << 16) | - ((p[3] & 0x70) << 15) | - ((p[0] & 0x01) << 22); - f->y_map = (p[1] & 0x7f) | - ((p[2] & 0x1f) << 7); - - f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7)); - f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3)); - f->z = (p[0] & 4) ? 0 : p[5] & 0x7f; - alps_decode_buttons_v3(f, p); + palm_data = (p[1] & 0x7f) | + ((p[2] & 0x7f) << 7) | + ((p[4] & 0x7f) << 14) | + ((p[5] & 0x7f) << 21) | + ((p[3] & 0x07) << 28) | + (((u64)p[3] & 0x70) << 27) | + (((u64)p[0] & 0x01) << 34); + + /* Y-profile is stored in P(0) to p(n-1), n = y_bits; */ + f->y_map = palm_data & (BIT(priv->y_bits) - 1); + + /* X-profile is stored in p(n) to p(n+m-1), m = x_bits; */ + f->x_map = (palm_data >> priv->y_bits) & + (BIT(priv->x_bits) - 1); + } } -static void alps_process_touchpad_packet_v3(struct psmouse *psmouse) +static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; struct input_dev *dev2 = priv->dev2; int x1 = 0, y1 = 0, x2 = 0, y2 = 0; - int fingers = 0, bmap_fingers; - struct alps_fields f; + int fingers = 0, bmap_fn; + struct alps_fields f = {0}; - priv->decode_fields(&f, packet); + priv->decode_fields(&f, packet, psmouse); /* * There's no single feature of touchpad position and bitmap packets @@ -560,19 +625,38 @@ */ if (f.is_mp) { fingers = f.fingers; - bmap_fingers = alps_process_bitmap(priv, - f.x_map, f.y_map, - &x1, &y1, &x2, &y2); - - /* - * We shouldn't report more than one finger if - * we don't have two coordinates. - */ - if (fingers > 1 && bmap_fingers < 2) - fingers = bmap_fingers; - - /* Now process position packet */ - priv->decode_fields(&f, priv->multi_data); + if (priv->proto_version == ALPS_PROTO_V3) { + bmap_fn = alps_process_bitmap(priv, f.x_map, + f.y_map, &x1, &y1, + &x2, &y2); + + /* + * We shouldn't report more than one finger if + * we don't have two coordinates. + */ + if (fingers > 1 && bmap_fn < 2) + fingers = bmap_fn; + + /* Now process position packet */ + priv->decode_fields(&f, priv->multi_data, + psmouse); + } else { + /* + * Because Dolphin uses position packet's + * coordinate data as Pt1 and uses it to + * calculate Pt2, so we need to do position + * packet decode first. + */ + priv->decode_fields(&f, priv->multi_data, + psmouse); + + /* + * Since Dolphin's finger number is reliable, + * there is no need to compare with bmap_fn. + */ + alps_process_bitmap_dolphin(priv, &f, &x1, &y1, + &x2, &y2); + } } else { priv->multi_packet = 0; } @@ -662,7 +746,7 @@ return; } - alps_process_touchpad_packet_v3(psmouse); + alps_process_touchpad_packet_v3_v5(psmouse); } static void alps_process_packet_v6(struct psmouse *psmouse) @@ -963,7 +1047,13 @@ { struct alps_data *priv = psmouse->private; - if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ + /* + * Check if we are dealing with a bare PS/2 packet, presumably from + * a device connected to the external PS/2 port. Because bare PS/2 + * protocol does not have enough constant bits to self-synchronize + * properly we only do this if the device is fully synchronized. + */ + if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { if (psmouse->pktcnt == 3) { alps_report_bare_ps2_packet(psmouse, psmouse->packet, true); @@ -987,12 +1077,27 @@ } /* Bytes 2 - pktsize should have 0 in the highest bit */ - if ((priv->proto_version < ALPS_PROTO_V5) && + if (priv->proto_version < ALPS_PROTO_V5 && psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); + + if (priv->proto_version == ALPS_PROTO_V3 && + psmouse->pktcnt == psmouse->pktsize) { + /* + * Some Dell boxes, such as Latitude E6440 or E7440 + * with closed lid, quite often smash last byte of + * otherwise valid packet with 0xff. Given that the + * next packet is very likely to be valid let's + * report PSMOUSE_FULL_PACKET but not process data, + * rather than reporting PSMOUSE_BAD_DATA and + * filling the logs. + */ + return PSMOUSE_FULL_PACKET; + } + return PSMOUSE_BAD_DATA; } @@ -1709,6 +1814,52 @@ return -1; } +static int alps_dolphin_get_device_area(struct psmouse *psmouse, + struct alps_data *priv) +{ + struct ps2dev *ps2dev = &psmouse->ps2dev; + unsigned char param[4] = {0}; + int num_x_electrode, num_y_electrode; + + if (alps_enter_command_mode(psmouse)) + return -1; + + param[0] = 0x0a; + if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) || + ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) || + ps2_command(ps2dev, ¶m[0], PSMOUSE_CMD_SETRATE) || + ps2_command(ps2dev, ¶m[0], PSMOUSE_CMD_SETRATE)) + return -1; + + if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) + return -1; + + /* + * Dolphin's sensor line number is not fixed. It can be calculated + * by adding the device's register value with DOLPHIN_PROFILE_X/YOFFSET. + * Further more, we can get device's x_max and y_max by multiplying + * sensor line number with DOLPHIN_COUNT_PER_ELECTRODE. + * + * e.g. When we get register's sensor_x = 11 & sensor_y = 8, + * real sensor line number X = 11 + 8 = 19, and + * real sensor line number Y = 8 + 1 = 9. + * So, x_max = (19 - 1) * 64 = 1152, and + * y_max = (9 - 1) * 64 = 512. + */ + num_x_electrode = DOLPHIN_PROFILE_XOFFSET + (param[2] & 0x0F); + num_y_electrode = DOLPHIN_PROFILE_YOFFSET + ((param[2] >> 4) & 0x0F); + priv->x_bits = num_x_electrode; + priv->y_bits = num_y_electrode; + priv->x_max = (num_x_electrode - 1) * DOLPHIN_COUNT_PER_ELECTRODE; + priv->y_max = (num_y_electrode - 1) * DOLPHIN_COUNT_PER_ELECTRODE; + + if (alps_exit_command_mode(psmouse)) + return -1; + + return 0; +} + static int alps_hw_init_dolphin_v1(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; @@ -1763,13 +1914,13 @@ break; case ALPS_PROTO_V5: priv->hw_init = alps_hw_init_dolphin_v1; - priv->process_packet = alps_process_packet_v3; + priv->process_packet = alps_process_touchpad_packet_v3_v5; priv->decode_fields = alps_decode_dolphin; priv->set_abs_params = alps_set_abs_params_mt; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; priv->byte0 = 0xc8; - priv->mask0 = 0xc8; + priv->mask0 = 0xd8; priv->flags = 0; priv->x_max = 1360; priv->y_max = 660; @@ -1845,11 +1996,13 @@ if (alps_match_table(psmouse, priv, e7, ec) == 0) { return 0; } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 && - ec[0] == 0x73 && ec[1] == 0x01) { + ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) { priv->proto_version = ALPS_PROTO_V5; alps_set_defaults(priv); - - return 0; + if (alps_dolphin_get_device_area(psmouse, priv)) + return -EIO; + else + return 0; } else if (ec[0] == 0x88 && ec[1] == 0x08) { priv->proto_version = ALPS_PROTO_V3; alps_set_defaults(priv); @@ -2016,6 +2169,9 @@ /* We are having trouble resyncing ALPS touchpads so disable it for now */ psmouse->resync_time = 0; + /* Allow 2 invalid packets without resetting device */ + psmouse->resetafter = psmouse->pktsize * 2; + return 0; init_fail: --- linux-3.13.0.orig/drivers/input/mouse/alps.h +++ linux-3.13.0/drivers/input/mouse/alps.h @@ -19,6 +19,10 @@ #define ALPS_PROTO_V5 5 #define ALPS_PROTO_V6 6 +#define DOLPHIN_COUNT_PER_ELECTRODE 64 +#define DOLPHIN_PROFILE_XOFFSET 8 /* x-electrode offset */ +#define DOLPHIN_PROFILE_YOFFSET 1 /* y-electrode offset */ + /** * struct alps_model_info - touchpad ID table * @signature: E7 response string to match. @@ -146,7 +150,8 @@ int (*hw_init)(struct psmouse *psmouse); void (*process_packet)(struct psmouse *psmouse); - void (*decode_fields)(struct alps_fields *f, unsigned char *p); + void (*decode_fields)(struct alps_fields *f, unsigned char *p, + struct psmouse *psmouse); void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1); int prev_fin; --- linux-3.13.0.orig/drivers/input/mouse/cypress_ps2.c +++ linux-3.13.0/drivers/input/mouse/cypress_ps2.c @@ -391,7 +391,9 @@ if (ret < 0) return ret; +#if ( CYPRESS_SIMULATED_MT != 1 ) __set_bit(INPUT_PROP_SEMI_MT, input->propbit); +#endif input_abs_set_res(input, ABS_X, cytp->tp_res_x); input_abs_set_res(input, ABS_Y, cytp->tp_res_y); @@ -410,7 +412,6 @@ __clear_bit(REL_X, input->relbit); __clear_bit(REL_Y, input->relbit); - __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); __set_bit(EV_KEY, input->evbit); __set_bit(BTN_LEFT, input->keybit); __set_bit(BTN_RIGHT, input->keybit); @@ -480,6 +481,22 @@ ((packet[5] & 0x0f) << 8) | packet[7]; if (cytp->mode & CYTP_BIT_ABS_PRESSURE) report_data->contacts[1].z = report_data->contacts[0].z; +#if ( CYPRESS_SIMULATED_MT == 1 ) + /* simulate contact positions for >2 fingers */ + if ( report_data->contact_cnt >= 3 ) { + int i; + for ( i=1; icontact_cnt; i++ ) { + report_data->contacts[i].x = + report_data->contacts[0].x + + 100*(i)*((i%2)?-1:1); + report_data->contacts[i].y = + report_data->contacts[0].y; + if (cytp->mode & CYTP_BIT_ABS_PRESSURE) + report_data->contacts[i].z = + report_data->contacts[0].z; + } + } +#endif } report_data->left = (header_byte & BTN_LEFT_BIT) ? 1 : 0; --- linux-3.13.0.orig/drivers/input/mouse/cypress_ps2.h +++ linux-3.13.0/drivers/input/mouse/cypress_ps2.h @@ -130,7 +130,18 @@ #define RESP_REMOTE_BIT 0x40 #define RESP_SMBUS_BIT 0x80 -#define CYTP_MAX_MT_SLOTS 2 +/* + * CYPRESS_SIMULATED_MT + * set to 1 for simulated multitouch (up to 5 contact points) + * set to 0 for SEMI_MT (only 2 corner points, and count of fingers) + */ +#define CYPRESS_SIMULATED_MT 1 + +#if ( CYPRESS_SIMULATED_MT == 1 ) +# define CYTP_MAX_MT_SLOTS 5 +#else +# define CYTP_MAX_MT_SLOTS 2 +#endif struct cytp_contact { int x; --- linux-3.13.0.orig/drivers/input/mouse/elantech.c +++ linux-3.13.0/drivers/input/mouse/elantech.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -472,8 +473,15 @@ input_report_key(dev, BTN_TOOL_FINGER, fingers == 1); input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2); input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3); - input_report_key(dev, BTN_LEFT, packet[0] & 0x01); - input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); + + /* For clickpads map both buttons to BTN_LEFT */ + if (etd->fw_version & 0x001000) { + input_report_key(dev, BTN_LEFT, packet[0] & 0x03); + } else { + input_report_key(dev, BTN_LEFT, packet[0] & 0x01); + input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); + } + input_report_abs(dev, ABS_PRESSURE, pres); input_report_abs(dev, ABS_TOOL_WIDTH, width); @@ -483,9 +491,17 @@ static void elantech_input_sync_v4(struct psmouse *psmouse) { struct input_dev *dev = psmouse->dev; + struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; - input_report_key(dev, BTN_LEFT, packet[0] & 0x01); + /* For clickpads map both buttons to BTN_LEFT */ + if (etd->fw_version & 0x001000) { + input_report_key(dev, BTN_LEFT, packet[0] & 0x03); + } else { + input_report_key(dev, BTN_LEFT, packet[0] & 0x01); + input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); + } + input_mt_report_pointer_emulation(dev, true); input_sync(dev); } @@ -830,7 +846,11 @@ break; case 3: - etd->reg_10 = 0x0b; + if (etd->set_hw_resolution) + etd->reg_10 = 0x0b; + else + etd->reg_10 = 0x01; + if (elantech_write_reg(psmouse, 0x10, etd->reg_10)) rc = -1; @@ -984,6 +1004,44 @@ } /* + * Advertise INPUT_PROP_BUTTONPAD for clickpads. The testing of bit 12 in + * fw_version for this is based on the following fw_version & caps table: + * + * Laptop-model: fw_version: caps: buttons: + * Acer S3 0x461f00 10, 13, 0e clickpad + * Acer S7-392 0x581f01 50, 17, 0d clickpad + * Acer V5-131 0x461f02 01, 16, 0c clickpad + * Acer V5-551 0x461f00 ? clickpad + * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons + * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons + * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons + * Asus UX31 0x361f00 20, 15, 0e clickpad + * Asus UX32VD 0x361f02 00, 15, 0e clickpad + * Avatar AVIU-145A2 0x361f00 ? clickpad + * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons + * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) + * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons + * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad + * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad + * Samsung NP900X3E-A02 0x575f03 ? clickpad + * Samsung NP-QX410 0x851b00 19, 14, 0c clickpad + * Samsung RC512 0x450f00 08, 15, 0c 2 hw buttons + * Samsung RF710 0x450f00 ? 2 hw buttons + * System76 Pangolin 0x250f01 ? 2 hw buttons + * (*) + 3 trackpoint buttons + */ +static void elantech_set_buttonpad_prop(struct psmouse *psmouse) +{ + struct input_dev *dev = psmouse->dev; + struct elantech_data *etd = psmouse->private; + + if (etd->fw_version & 0x001000) { + __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); + __clear_bit(BTN_RIGHT, dev->keybit); + } +} + +/* * Set the appropriate event bits for the input subsystem */ static int elantech_set_input_params(struct psmouse *psmouse) @@ -1026,6 +1084,8 @@ __set_bit(INPUT_PROP_SEMI_MT, dev->propbit); /* fall through */ case 3: + if (etd->hw_version == 3) + elantech_set_buttonpad_prop(psmouse); input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); if (etd->reports_pressure) { @@ -1047,9 +1107,7 @@ */ psmouse_warn(psmouse, "couldn't query resolution data.\n"); } - /* v4 is clickpad, with only one button. */ - __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); - __clear_bit(BTN_RIGHT, dev->keybit); + elantech_set_buttonpad_prop(psmouse); __set_bit(BTN_TOOL_QUADTAP, dev->keybit); /* For X to recognize me as touchpad. */ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); @@ -1195,6 +1253,13 @@ if (param[1] == 0) return true; + /* + * Some models have a revision higher then 20. Meaning param[2] may + * be 10 or 20, skip the rates check for these. + */ + if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) + return true; + for (i = 0; i < ARRAY_SIZE(rates); i++) if (param[2] == rates[i]) return false; @@ -1292,6 +1357,23 @@ } /* + * Some hw_version 3 models go into error state when we try to set + * bit 3 and/or bit 1 of r10. + */ +static const struct dmi_system_id no_hw_res_dmi_table[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + /* Gigabyte U2442 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "U2442"), + }, + }, +#endif + { } +}; + +/* * determine hardware version and set some properties according to it. */ static int elantech_set_properties(struct elantech_data *etd) @@ -1314,6 +1396,9 @@ case 6: case 7: case 8: + case 9: + case 10: + case 13: etd->hw_version = 4; break; default: @@ -1350,6 +1435,9 @@ */ etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); + /* Enable real hardware resolution on hw_version 3 ? */ + etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table); + return 0; } --- linux-3.13.0.orig/drivers/input/mouse/elantech.h +++ linux-3.13.0/drivers/input/mouse/elantech.h @@ -130,6 +130,7 @@ bool jumpy_cursor; bool reports_pressure; bool crc_enabled; + bool set_hw_resolution; unsigned char hw_version; unsigned int fw_version; unsigned int single_finger_reports; --- linux-3.13.0.orig/drivers/input/mouse/psmouse-base.c +++ linux-3.13.0/drivers/input/mouse/psmouse-base.c @@ -462,6 +462,20 @@ PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)); } +/* + * psmouse_matches_pnp_id - check if psmouse matches one of the passed in ids. + */ +bool psmouse_matches_pnp_id(struct psmouse *psmouse, const char * const ids[]) +{ + int i; + + if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) + for (i = 0; ids[i]; i++) + if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i])) + return true; + + return false; +} /* * Genius NetMouse magic init. --- linux-3.13.0.orig/drivers/input/mouse/psmouse.h +++ linux-3.13.0/drivers/input/mouse/psmouse.h @@ -108,6 +108,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse); int psmouse_activate(struct psmouse *psmouse); int psmouse_deactivate(struct psmouse *psmouse); +bool psmouse_matches_pnp_id(struct psmouse *psmouse, const char * const ids[]); struct psmouse_attribute { struct device_attribute dattr; --- linux-3.13.0.orig/drivers/input/mouse/synaptics.c +++ linux-3.13.0/drivers/input/mouse/synaptics.c @@ -117,6 +117,83 @@ } #ifdef CONFIG_MOUSE_PS2_SYNAPTICS +#define ANY_BOARD_ID 0 +struct min_max_quirk { + const char * const *pnp_ids; + struct { + unsigned long int min, max; + } board_id; + int x_min, x_max, y_min, y_max; +}; + +static const struct min_max_quirk min_max_pnpid_table[] = { + { + (const char * const []){"LEN0033", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, + 1024, 5052, 2258, 4832 + }, + { + (const char * const []){"LEN0042", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, + 1232, 5710, 1156, 4696 + }, + { + (const char * const []){"LEN0034", "LEN0036", "LEN0037", + "LEN0039", "LEN2002", "LEN2004", + NULL}, + {ANY_BOARD_ID, 2961}, + 1024, 5112, 2024, 4832 + }, + { + (const char * const []){"LEN2001", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, + 1024, 5022, 2508, 4832 + }, + { + (const char * const []){"LEN2006", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, + 1264, 5675, 1171, 4688 + }, + { } +}; + +/* This list has been kindly provided by Synaptics. */ +static const char * const topbuttonpad_pnp_ids[] = { + "LEN0017", + "LEN0018", + "LEN0019", + "LEN0023", + "LEN002A", + "LEN002B", + "LEN002C", + "LEN002D", + "LEN002E", + "LEN0033", /* Helix */ + "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ + "LEN0035", /* X240 */ + "LEN0036", /* T440 */ + "LEN0037", /* X1 Carbon 2nd */ + "LEN0038", + "LEN0039", /* T440s */ + "LEN0041", + "LEN0042", /* Yoga */ + "LEN0045", + "LEN0047", + "LEN0049", + "LEN2000", + "LEN2001", /* Edge E431 */ + "LEN2002", /* Edge E531 */ + "LEN2003", + "LEN2004", /* L440 */ + "LEN2005", + "LEN2006", + "LEN2007", + "LEN2008", + "LEN2009", + "LEN200A", + "LEN200B", + NULL +}; /***************************************************************************** * Synaptics communications functions @@ -159,18 +236,39 @@ return 0; } +static int synaptics_more_extended_queries(struct psmouse *psmouse) +{ + struct synaptics_data *priv = psmouse->private; + unsigned char buf[3]; + + if (synaptics_send_cmd(psmouse, SYN_QUE_MEXT_CAPAB_10, buf)) + return -1; + + priv->ext_cap_10 = (buf[0]<<16) | (buf[1]<<8) | buf[2]; + + return 0; +} + /* - * Read the board id from the touchpad + * Read the board id and the "More Extended Queries" from the touchpad * The board id is encoded in the "QUERY MODES" response */ -static int synaptics_board_id(struct psmouse *psmouse) +static int synaptics_query_modes(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char bid[3]; + /* firmwares prior 7.5 have no board_id encoded */ + if (SYN_ID_FULL(priv->identity) < 0x705) + return 0; + if (synaptics_send_cmd(psmouse, SYN_QUE_MODES, bid)) return -1; priv->board_id = ((bid[0] & 0xfc) << 6) | bid[1]; + + if (SYN_MEXT_CAP_BIT(bid[0])) + return synaptics_more_extended_queries(psmouse); + return 0; } @@ -265,6 +363,7 @@ * Read touchpad resolution and maximum reported coordinates * Resolution is left zero if touchpad does not support the query */ + static int synaptics_resolution(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; @@ -288,23 +387,69 @@ } else { priv->x_max = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); priv->y_max = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); + psmouse_info(psmouse, + "queried max coordinates: x [..%d], y [..%d]\n", + priv->x_max, priv->y_max); } } - if (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 && - SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c)) { + if (SYN_CAP_MIN_DIMENSIONS(priv->ext_cap_0c) && + (SYN_EXT_CAP_REQUESTS(priv->capabilities) >= 7 || + /* + * Firmware v8.1 does not report proper number of extended + * capabilities, but has been proven to report correct min + * coordinates. + */ + SYN_ID_FULL(priv->identity) == 0x801)) { if (synaptics_send_cmd(psmouse, SYN_QUE_EXT_MIN_COORDS, resp)) { psmouse_warn(psmouse, "device claims to have min coordinates query, but I'm not able to read it.\n"); } else { priv->x_min = (resp[0] << 5) | ((resp[1] & 0x0f) << 1); priv->y_min = (resp[2] << 5) | ((resp[1] & 0xf0) >> 3); + psmouse_info(psmouse, + "queried min coordinates: x [%d..], y [%d..]\n", + priv->x_min, priv->y_min); } } return 0; } +/* + * Apply quirk(s) if the hardware matches + */ + +static void synaptics_apply_quirks(struct psmouse *psmouse) +{ + struct synaptics_data *priv = psmouse->private; + int i; + + for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) { + if (!psmouse_matches_pnp_id(psmouse, + min_max_pnpid_table[i].pnp_ids)) + continue; + + if (min_max_pnpid_table[i].board_id.min != ANY_BOARD_ID && + priv->board_id < min_max_pnpid_table[i].board_id.min) + continue; + + if (min_max_pnpid_table[i].board_id.max != ANY_BOARD_ID && + priv->board_id > min_max_pnpid_table[i].board_id.max) + continue; + + priv->x_min = min_max_pnpid_table[i].x_min; + priv->x_max = min_max_pnpid_table[i].x_max; + priv->y_min = min_max_pnpid_table[i].y_min; + priv->y_max = min_max_pnpid_table[i].y_max; + psmouse_info(psmouse, + "quirked min/max coordinates: x [%d..%d], y [%d..%d]\n", + priv->x_min, priv->x_max, + priv->y_min, priv->y_max); + break; + } +} + static int synaptics_query_hardware(struct psmouse *psmouse) { if (synaptics_identify(psmouse)) @@ -313,13 +458,15 @@ return -1; if (synaptics_firmware_id(psmouse)) return -1; - if (synaptics_board_id(psmouse)) + if (synaptics_query_modes(psmouse)) return -1; if (synaptics_capability(psmouse)) return -1; if (synaptics_resolution(psmouse)) return -1; + synaptics_apply_quirks(psmouse); + return 0; } @@ -427,18 +574,22 @@ return (buf[0] & 0xFC) == 0x84 && (buf[3] & 0xCC) == 0xC4; } -static void synaptics_pass_pt_packet(struct serio *ptport, unsigned char *packet) +static void synaptics_pass_pt_packet(struct psmouse *psmouse, + struct serio *ptport, + unsigned char *packet) { + struct synaptics_data *priv = psmouse->private; struct psmouse *child = serio_get_drvdata(ptport); if (child && child->state == PSMOUSE_ACTIVATED) { - serio_interrupt(ptport, packet[1], 0); + serio_interrupt(ptport, packet[1] | priv->pt_buttons, 0); serio_interrupt(ptport, packet[4], 0); serio_interrupt(ptport, packet[5], 0); if (child->pktsize == 4) serio_interrupt(ptport, packet[2], 0); - } else + } else { serio_interrupt(ptport, packet[1], 0); + } } static void synaptics_pt_activate(struct psmouse *psmouse) @@ -527,6 +678,20 @@ priv->agm_pending = true; } +static void synaptics_parse_ext_buttons(const unsigned char buf[], + struct synaptics_data *priv, + struct synaptics_hw_state *hw) +{ + unsigned int ext_bits = + (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; + unsigned int ext_mask = GENMASK(ext_bits - 1, 0); + + hw->ext_buttons = buf[4] & ext_mask; + hw->ext_buttons |= (buf[5] & ext_mask) << ext_bits; +} + +static bool is_forcepad; + static int synaptics_parse_hw_state(const unsigned char buf[], struct synaptics_data *priv, struct synaptics_hw_state *hw) @@ -538,10 +703,61 @@ ((buf[0] & 0x04) >> 1) | ((buf[3] & 0x04) >> 2)); + if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || + SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) && + hw->w == 2) { + synaptics_parse_agm(buf, priv, hw); + return 1; + } + + hw->x = (((buf[3] & 0x10) << 8) | + ((buf[1] & 0x0f) << 8) | + buf[4]); + hw->y = (((buf[3] & 0x20) << 7) | + ((buf[1] & 0xf0) << 4) | + buf[5]); + hw->z = buf[2]; + hw->left = (buf[0] & 0x01) ? 1 : 0; hw->right = (buf[0] & 0x02) ? 1 : 0; - if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { + if (is_forcepad) { + /* + * ForcePads, like Clickpads, use middle button + * bits to report primary button clicks. + * Unfortunately they report primary button not + * only when user presses on the pad above certain + * threshold, but also when there are more than one + * finger on the touchpad, which interferes with + * out multi-finger gestures. + */ + if (hw->z == 0) { + /* No contacts */ + priv->press = priv->report_press = false; + } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) { + /* + * Single-finger touch with pressure above + * the threshold. If pressure stays long + * enough, we'll start reporting primary + * button. We rely on the device continuing + * sending data even if finger does not + * move. + */ + if (!priv->press) { + priv->press_start = jiffies; + priv->press = true; + } else if (time_after(jiffies, + priv->press_start + + msecs_to_jiffies(50))) { + priv->report_press = true; + } + } else { + priv->press = false; + } + + hw->left = priv->report_press; + + } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { /* * Clickpad's button is transmitted as middle button, * however, since it is primary button, we will report @@ -560,43 +776,9 @@ hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; } - if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || - SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) && - hw->w == 2) { - synaptics_parse_agm(buf, priv, hw); - return 1; - } - - hw->x = (((buf[3] & 0x10) << 8) | - ((buf[1] & 0x0f) << 8) | - buf[4]); - hw->y = (((buf[3] & 0x20) << 7) | - ((buf[1] & 0xf0) << 4) | - buf[5]); - hw->z = buf[2]; - - if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && + if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) > 0 && ((buf[0] ^ buf[3]) & 0x02)) { - switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { - default: - /* - * if nExtBtn is greater than 8 it should be - * considered invalid and treated as 0 - */ - break; - case 8: - hw->ext_buttons |= ((buf[5] & 0x08)) ? 0x80 : 0; - hw->ext_buttons |= ((buf[4] & 0x08)) ? 0x40 : 0; - case 6: - hw->ext_buttons |= ((buf[5] & 0x04)) ? 0x20 : 0; - hw->ext_buttons |= ((buf[4] & 0x04)) ? 0x10 : 0; - case 4: - hw->ext_buttons |= ((buf[5] & 0x02)) ? 0x08 : 0; - hw->ext_buttons |= ((buf[4] & 0x02)) ? 0x04 : 0; - case 2: - hw->ext_buttons |= ((buf[5] & 0x01)) ? 0x02 : 0; - hw->ext_buttons |= ((buf[4] & 0x01)) ? 0x01 : 0; - } + synaptics_parse_ext_buttons(buf, priv, hw); } } else { hw->x = (((buf[1] & 0x1f) << 8) | buf[2]); @@ -658,12 +840,54 @@ } } +static void synaptics_report_ext_buttons(struct psmouse *psmouse, + const struct synaptics_hw_state *hw) +{ + struct input_dev *dev = psmouse->dev; + struct synaptics_data *priv = psmouse->private; + int ext_bits = (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) + 1) >> 1; + char buf[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + int i; + + if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap)) + return; + + /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */ + if (SYN_ID_FULL(priv->identity) == 0x801 && + !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02)) + return; + + if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) { + for (i = 0; i < ext_bits; i++) { + input_report_key(dev, BTN_0 + 2 * i, + hw->ext_buttons & (1 << i)); + input_report_key(dev, BTN_1 + 2 * i, + hw->ext_buttons & (1 << (i + ext_bits))); + } + return; + } + + /* + * This generation of touchpads has the trackstick buttons + * physically wired to the touchpad. Re-route them through + * the pass-through interface. + */ + if (!priv->pt_port) + return; + + /* The trackstick expects at most 3 buttons */ + priv->pt_buttons = SYN_CAP_EXT_BUTTON_STICK_L(hw->ext_buttons) | + SYN_CAP_EXT_BUTTON_STICK_R(hw->ext_buttons) << 1 | + SYN_CAP_EXT_BUTTON_STICK_M(hw->ext_buttons) << 2; + + synaptics_pass_pt_packet(psmouse, priv->pt_port, buf); +} + static void synaptics_report_buttons(struct psmouse *psmouse, const struct synaptics_hw_state *hw) { struct input_dev *dev = psmouse->dev; struct synaptics_data *priv = psmouse->private; - int i; input_report_key(dev, BTN_LEFT, hw->left); input_report_key(dev, BTN_RIGHT, hw->right); @@ -676,8 +900,7 @@ input_report_key(dev, BTN_BACK, hw->down); } - for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) - input_report_key(dev, BTN_0 + i, hw->ext_buttons & (1 << i)); + synaptics_report_ext_buttons(psmouse, hw); } static void synaptics_report_slot(struct input_dev *dev, int slot, @@ -1213,7 +1436,8 @@ if (SYN_CAP_PASS_THROUGH(priv->capabilities) && synaptics_is_pt_packet(psmouse->packet)) { if (priv->pt_port) - synaptics_pass_pt_packet(priv->pt_port, psmouse->packet); + synaptics_pass_pt_packet(psmouse, priv->pt_port, + psmouse->packet); } else synaptics_process_packet(psmouse); @@ -1244,8 +1468,10 @@ input_abs_set_res(dev, y_code, priv->y_res); } -static void set_input_params(struct input_dev *dev, struct synaptics_data *priv) +static void set_input_params(struct psmouse *psmouse, + struct synaptics_data *priv) { + struct input_dev *dev = psmouse->dev; int i; /* Things that apply to both modes */ @@ -1305,8 +1531,9 @@ __set_bit(BTN_BACK, dev->keybit); } - for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) - __set_bit(BTN_0 + i, dev->keybit); + if (!SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) + for (i = 0; i < SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap); i++) + __set_bit(BTN_0 + i, dev->keybit); __clear_bit(EV_REL, dev->evbit); __clear_bit(REL_X, dev->relbit); @@ -1314,10 +1541,15 @@ if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); + if (psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && + !SYN_CAP_EXT_BUTTONS_STICK(priv->ext_cap_10)) + __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); /* Clickpads report only left button */ __clear_bit(BTN_RIGHT, dev->keybit); __clear_bit(BTN_MIDDLE, dev->keybit); - } + } else if (SYN_CAP_CLICKPAD2BTN(priv->ext_cap_0c) || + SYN_CAP_CLICKPAD2BTN2(priv->ext_cap_0c)) + __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); } static ssize_t synaptics_show_disable_gesture(struct psmouse *psmouse, @@ -1485,10 +1717,28 @@ { } }; +static const struct dmi_system_id forcepad_dmi_table[] __initconst = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Folio 1040 G1"), + }, + }, +#endif + { } +}; + void __init synaptics_module_init(void) { impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); broken_olpc_ec = dmi_check_system(olpc_dmi_table); + + /* + * Unfortunately ForcePad capability is not exported over PS/2, + * so we have to resort to checking DMI. + */ + is_forcepad = dmi_check_system(forcepad_dmi_table); } static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) @@ -1538,7 +1788,7 @@ priv->capabilities, priv->ext_cap, priv->ext_cap_0c, priv->board_id, priv->firmware_id); - set_input_params(psmouse->dev, priv); + set_input_params(psmouse, priv); /* * Encode touchpad model so that it can be used to set --- linux-3.13.0.orig/drivers/input/mouse/synaptics.h +++ linux-3.13.0/drivers/input/mouse/synaptics.h @@ -22,6 +22,7 @@ #define SYN_QUE_EXT_CAPAB_0C 0x0c #define SYN_QUE_EXT_MAX_COORDS 0x0d #define SYN_QUE_EXT_MIN_COORDS 0x0f +#define SYN_QUE_MEXT_CAPAB_10 0x10 /* synatics modes */ #define SYN_BIT_ABSOLUTE_MODE (1 << 7) @@ -53,6 +54,7 @@ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) +#define SYN_MEXT_CAP_BIT(m) ((m) & (1 << 1)) /* * The following describes response for the 0x0c query. @@ -77,16 +79,43 @@ * for noise. * 2 0x08 image sensor image sensor tracks 5 fingers, but only * reports 2. + * 2 0x01 uniform clickpad whole clickpad moves instead of being + * hinged at the top. * 2 0x20 report min query 0x0f gives min coord reported */ #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ +#define SYN_CAP_CLICKPAD2BTN2(ex0c) ((ex0c) & 0x200000) /* 2-button ClickPad */ #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) #define SYN_CAP_MIN_DIMENSIONS(ex0c) ((ex0c) & 0x002000) #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000) #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) +/* + * The following descibes response for the 0x10 query. + * + * byte mask name meaning + * ---- ---- ------- ------------ + * 1 0x01 ext buttons are stick buttons exported in the extended + * capability are actually meant to be used + * by the tracktick (pass-through). + * 1 0x02 SecurePad the touchpad is a SecurePad, so it + * contains a built-in fingerprint reader. + * 1 0xe0 more ext count how many more extented queries are + * available after this one. + * 2 0xff SecurePad width the width of the SecurePad fingerprint + * reader. + * 3 0xff SecurePad height the height of the SecurePad fingerprint + * reader. + */ +#define SYN_CAP_EXT_BUTTONS_STICK(ex10) ((ex10) & 0x010000) +#define SYN_CAP_SECUREPAD(ex10) ((ex10) & 0x020000) + +#define SYN_CAP_EXT_BUTTON_STICK_L(eb) (!!((eb) & 0x01)) +#define SYN_CAP_EXT_BUTTON_STICK_M(eb) (!!((eb) & 0x02)) +#define SYN_CAP_EXT_BUTTON_STICK_R(eb) (!!((eb) & 0x04)) + /* synaptics modes query bits */ #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) #define SYN_MODE_RATE(m) ((m) & (1 << 6)) @@ -154,6 +183,7 @@ unsigned long int capabilities; /* Capabilities */ unsigned long int ext_cap; /* Extended Capabilities */ unsigned long int ext_cap_0c; /* Ext Caps from 0x0c query */ + unsigned long int ext_cap_10; /* Ext Caps from 0x10 query */ unsigned long int identity; /* Identification */ unsigned int x_res, y_res; /* X/Y resolution in units/mm */ unsigned int x_max, y_max; /* Max coordinates (from FW) */ @@ -167,6 +197,7 @@ bool disable_gesture; /* disable gestures */ struct serio *pt_port; /* Pass-through serio port */ + unsigned char pt_buttons; /* Pass-through buttons */ struct synaptics_mt_state mt_state; /* Current mt finger state */ bool mt_state_lost; /* mt_state may be incorrect */ @@ -177,6 +208,11 @@ */ struct synaptics_hw_state agm; bool agm_pending; /* new AGM packet received */ + + /* ForcePad handling */ + unsigned long press_start; + bool press; + bool report_press; }; void synaptics_module_init(void); --- linux-3.13.0.orig/drivers/input/mousedev.c +++ linux-3.13.0/drivers/input/mousedev.c @@ -67,7 +67,6 @@ struct device dev; struct cdev cdev; bool exist; - bool is_mixdev; struct list_head mixdev_node; bool opened_by_mixdev; @@ -77,6 +76,9 @@ int old_x[4], old_y[4]; int frac_dx, frac_dy; unsigned long touch; + + int (*open_device)(struct mousedev *mousedev); + void (*close_device)(struct mousedev *mousedev); }; enum mousedev_emul { @@ -116,9 +118,6 @@ static struct mousedev *mousedev_mix; static LIST_HEAD(mousedev_mix_list); -static void mixdev_open_devices(void); -static void mixdev_close_devices(void); - #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) @@ -428,9 +427,7 @@ if (retval) return retval; - if (mousedev->is_mixdev) - mixdev_open_devices(); - else if (!mousedev->exist) + if (!mousedev->exist) retval = -ENODEV; else if (!mousedev->open++) { retval = input_open_device(&mousedev->handle); @@ -446,9 +443,7 @@ { mutex_lock(&mousedev->mutex); - if (mousedev->is_mixdev) - mixdev_close_devices(); - else if (mousedev->exist && !--mousedev->open) + if (mousedev->exist && !--mousedev->open) input_close_device(&mousedev->handle); mutex_unlock(&mousedev->mutex); @@ -459,21 +454,29 @@ * stream. Note that this function is called with mousedev_mix->mutex * held. */ -static void mixdev_open_devices(void) +static int mixdev_open_devices(struct mousedev *mixdev) { - struct mousedev *mousedev; + int error; + + error = mutex_lock_interruptible(&mixdev->mutex); + if (error) + return error; - if (mousedev_mix->open++) - return; + if (!mixdev->open++) { + struct mousedev *mousedev; - list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { - if (!mousedev->opened_by_mixdev) { - if (mousedev_open_device(mousedev)) - continue; + list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { + if (!mousedev->opened_by_mixdev) { + if (mousedev_open_device(mousedev)) + continue; - mousedev->opened_by_mixdev = true; + mousedev->opened_by_mixdev = true; + } } } + + mutex_unlock(&mixdev->mutex); + return 0; } /* @@ -481,19 +484,22 @@ * device. Note that this function is called with mousedev_mix->mutex * held. */ -static void mixdev_close_devices(void) +static void mixdev_close_devices(struct mousedev *mixdev) { - struct mousedev *mousedev; + mutex_lock(&mixdev->mutex); - if (--mousedev_mix->open) - return; + if (!--mixdev->open) { + struct mousedev *mousedev; - list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { - if (mousedev->opened_by_mixdev) { - mousedev->opened_by_mixdev = false; - mousedev_close_device(mousedev); + list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { + if (mousedev->opened_by_mixdev) { + mousedev->opened_by_mixdev = false; + mousedev_close_device(mousedev); + } } } + + mutex_unlock(&mixdev->mutex); } @@ -522,7 +528,7 @@ mousedev_detach_client(mousedev, client); kfree(client); - mousedev_close_device(mousedev); + mousedev->close_device(mousedev); return 0; } @@ -550,7 +556,7 @@ client->mousedev = mousedev; mousedev_attach_client(mousedev, client); - error = mousedev_open_device(mousedev); + error = mousedev->open_device(mousedev); if (error) goto err_free_client; @@ -861,16 +867,21 @@ if (mixdev) { dev_set_name(&mousedev->dev, "mice"); + + mousedev->open_device = mixdev_open_devices; + mousedev->close_device = mixdev_close_devices; } else { int dev_no = minor; /* Normalize device number if it falls into legacy range */ if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) dev_no -= MOUSEDEV_MINOR_BASE; dev_set_name(&mousedev->dev, "mouse%d", dev_no); + + mousedev->open_device = mousedev_open_device; + mousedev->close_device = mousedev_close_device; } mousedev->exist = true; - mousedev->is_mixdev = mixdev; mousedev->handle.dev = input_get_device(dev); mousedev->handle.name = dev_name(&mousedev->dev); mousedev->handle.handler = handler; @@ -919,7 +930,7 @@ device_del(&mousedev->dev); mousedev_cleanup(mousedev); input_free_minor(MINOR(mousedev->dev.devt)); - if (!mousedev->is_mixdev) + if (mousedev != mousedev_mix) input_unregister_handle(&mousedev->handle); put_device(&mousedev->dev); } --- linux-3.13.0.orig/drivers/input/serio/hyperv-keyboard.c +++ linux-3.13.0/drivers/input/serio/hyperv-keyboard.c @@ -160,7 +160,9 @@ if (info & IS_E0) serio_interrupt(kbd_dev->hv_serio, XTKBD_EMUL0, 0); - + if (info & IS_E1) + serio_interrupt(kbd_dev->hv_serio, + XTKBD_EMUL1, 0); scan_code = __le16_to_cpu(ks_msg->make_code); if (info & IS_BREAK) scan_code |= XTKBD_RELEASE; --- linux-3.13.0.orig/drivers/input/serio/i8042-x86ia64io.h +++ linux-3.13.0/drivers/input/serio/i8042-x86ia64io.h @@ -101,6 +101,12 @@ }, { .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"), + }, + }, + { + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), DMI_MATCH(DMI_PRODUCT_VERSION, "8500"), @@ -146,6 +152,14 @@ }, }, { + /* Medion Akoya E7225 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Medion"), + DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), + }, + }, + { /* Blue FB5601 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "blue"), @@ -402,6 +416,13 @@ }, }, { + /* Acer Aspire 7738 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), + }, + }, + { /* Gericom Bellagio */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), @@ -458,6 +479,13 @@ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), }, }, + { + /* Avatar AVIU-145A6 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), + DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), + }, + }, { } }; @@ -601,6 +629,30 @@ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), }, }, + { + /* Fujitsu A544 laptop */ + /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"), + }, + }, + { + /* Fujitsu AH544 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"), + }, + }, + { + /* Fujitsu U574 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), + }, + }, { } }; @@ -684,6 +736,35 @@ { } }; +/* + * Some laptops need keyboard reset before probing for the trackpad to get + * it detected, initialised & finally work. + */ +static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { + { + /* Gigabyte P35 v2 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), + }, + }, + { + /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "X3"), + }, + }, + { + /* Gigabyte P34 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P34"), + }, + }, + { } +}; + #endif /* CONFIG_X86 */ #ifdef CONFIG_PNP @@ -702,6 +783,17 @@ static char i8042_pnp_kbd_name[32]; static char i8042_pnp_aux_name[32]; +static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size) +{ + strlcpy(dst, "PNP:", dst_size); + + while (id) { + strlcat(dst, " ", dst_size); + strlcat(dst, id->id, dst_size); + id = id->next; + } +} + static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did) { if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1) @@ -718,6 +810,8 @@ strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name)); strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name)); } + i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id, + sizeof(i8042_kbd_firmware_id)); /* Keyboard ports are always supposed to be wakeup-enabled */ device_set_wakeup_enable(&dev->dev, true); @@ -742,6 +836,8 @@ strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name)); strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name)); } + i8042_pnp_id_to_string(dev->id, i8042_aux_firmware_id, + sizeof(i8042_aux_firmware_id)); i8042_pnp_aux_devices++; return 0; @@ -964,6 +1060,9 @@ if (dmi_check_system(i8042_dmi_dritek_table)) i8042_dritek = true; + if (dmi_check_system(i8042_dmi_kbdreset_table)) + i8042_kbdreset = true; + /* * A20 was already enabled during early kernel init. But some buggy * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to --- linux-3.13.0.orig/drivers/input/serio/i8042.c +++ linux-3.13.0/drivers/input/serio/i8042.c @@ -67,6 +67,10 @@ module_param_named(notimeout, i8042_notimeout, bool, 0); MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); +static bool i8042_kbdreset; +module_param_named(kbdreset, i8042_kbdreset, bool, 0); +MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port"); + #ifdef CONFIG_X86 static bool i8042_dritek; module_param_named(dritek, i8042_dritek, bool, 0); @@ -87,6 +91,8 @@ #endif static bool i8042_bypass_aux_irq_test; +static char i8042_kbd_firmware_id[128]; +static char i8042_aux_firmware_id[128]; #include "i8042.h" @@ -788,6 +794,16 @@ return -1; /* + * Reset keyboard (needed on some laptops to successfully detect + * touchpad, e.g., some Gigabyte laptop models with Elantech + * touchpads). + */ + if (i8042_kbdreset) { + pr_warn("Attempting to reset device connected to KBD port\n"); + i8042_kbd_write(NULL, (unsigned char) 0xff); + } + +/* * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and * used it for a PCI card or somethig else. */ @@ -1218,6 +1234,8 @@ serio->dev.parent = &i8042_platform_device->dev; strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name)); strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys)); + strlcpy(serio->firmware_id, i8042_kbd_firmware_id, + sizeof(serio->firmware_id)); port->serio = serio; port->irq = I8042_KBD_IRQ; @@ -1244,6 +1262,8 @@ if (idx < 0) { strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name)); strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys)); + strlcpy(serio->firmware_id, i8042_aux_firmware_id, + sizeof(serio->firmware_id)); serio->close = i8042_port_close; } else { snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx); --- linux-3.13.0.orig/drivers/input/serio/serio.c +++ linux-3.13.0/drivers/input/serio/serio.c @@ -451,6 +451,13 @@ return retval; } +static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct serio *serio = to_serio_port(dev); + + return sprintf(buf, "%s\n", serio->firmware_id); +} + static DEVICE_ATTR_RO(type); static DEVICE_ATTR_RO(proto); static DEVICE_ATTR_RO(id); @@ -473,12 +480,14 @@ static DEVICE_ATTR_WO(drvctl); static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); +static DEVICE_ATTR_RO(firmware_id); static struct attribute *serio_device_attrs[] = { &dev_attr_modalias.attr, &dev_attr_description.attr, &dev_attr_drvctl.attr, &dev_attr_bind_mode.attr, + &dev_attr_firmware_id.attr, NULL }; @@ -921,9 +930,14 @@ SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto); SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id); SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); + SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); + if (serio->firmware_id[0]) + SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s", + serio->firmware_id); + return 0; } #undef SERIO_ADD_UEVENT_VAR --- linux-3.13.0.orig/drivers/input/serio/serport.c +++ linux-3.13.0/drivers/input/serio/serport.c @@ -21,6 +21,7 @@ #include #include #include +#include MODULE_AUTHOR("Vojtech Pavlik "); MODULE_DESCRIPTION("Input device TTY line discipline"); @@ -196,28 +197,55 @@ return 0; } +static void serport_set_type(struct tty_struct *tty, unsigned long type) +{ + struct serport *serport = tty->disc_data; + + serport->id.proto = type & 0x000000ff; + serport->id.id = (type & 0x0000ff00) >> 8; + serport->id.extra = (type & 0x00ff0000) >> 16; +} + /* * serport_ldisc_ioctl() allows to set the port protocol, and device ID */ -static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg) +static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file, + unsigned int cmd, unsigned long arg) { - struct serport *serport = (struct serport*) tty->disc_data; - unsigned long type; - if (cmd == SPIOCSTYPE) { + unsigned long type; + if (get_user(type, (unsigned long __user *) arg)) return -EFAULT; - serport->id.proto = type & 0x000000ff; - serport->id.id = (type & 0x0000ff00) >> 8; - serport->id.extra = (type & 0x00ff0000) >> 16; + serport_set_type(tty, type); + return 0; + } + + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t) +static long serport_ldisc_compat_ioctl(struct tty_struct *tty, + struct file *file, + unsigned int cmd, unsigned long arg) +{ + if (cmd == COMPAT_SPIOCSTYPE) { + void __user *uarg = compat_ptr(arg); + compat_ulong_t compat_type; + + if (get_user(compat_type, (compat_ulong_t __user *)uarg)) + return -EFAULT; + serport_set_type(tty, compat_type); return 0; } return -EINVAL; } +#endif static void serport_ldisc_write_wakeup(struct tty_struct * tty) { @@ -241,6 +269,9 @@ .close = serport_ldisc_close, .read = serport_ldisc_read, .ioctl = serport_ldisc_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = serport_ldisc_compat_ioctl, +#endif .receive_buf = serport_ldisc_receive, .write_wakeup = serport_ldisc_write_wakeup }; --- linux-3.13.0.orig/drivers/input/tablet/wacom_sys.c +++ linux-3.13.0/drivers/input/tablet/wacom_sys.c @@ -304,7 +304,7 @@ struct usb_device *dev = interface_to_usbdev(intf); char limit = 0; /* result has to be defined as int for some devices */ - int result = 0; + int result = 0, touch_max = 0; int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0; unsigned char *report; @@ -351,7 +351,8 @@ if (usage == WCM_DESKTOP) { if (finger) { features->device_type = BTN_TOOL_FINGER; - + /* touch device at least supports one touch point */ + touch_max = 1; switch (features->type) { case TABLETPC2FG: features->pktlen = WACOM_PKGLEN_TPC2FG; @@ -504,6 +505,8 @@ } out: + if (!features->touch_max && touch_max) + features->touch_max = touch_max; result = 0; kfree(report); return result; @@ -1194,12 +1197,15 @@ wacom_wac1->features.device_type = BTN_TOOL_PEN; snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen", wacom_wac1->features.name); + wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max; + wacom_wac1->shared->type = wacom_wac1->features.type; error = wacom_register_input(wacom1); if (error) goto fail; /* Touch interface */ - if (wacom_wac1->features.touch_max) { + if (wacom_wac1->features.touch_max || + wacom_wac1->features.type == INTUOSHT) { wacom_wac2->features = *((struct wacom_features *)id->driver_info); wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; @@ -1214,6 +1220,10 @@ error = wacom_register_input(wacom2); if (error) goto fail; + + if (wacom_wac1->features.type == INTUOSHT && + wacom_wac1->features.touch_max) + wacom_wac->shared->touch_input = wacom_wac2->input; } error = wacom_initialize_battery(wacom); @@ -1322,7 +1332,7 @@ * HID descriptor. If this is the touch interface (wMaxPacketSize * of WACOM_PKGLEN_BBTOUCH3), override the table values. */ - if (features->type >= INTUOS5S && features->type <= INTUOSPL) { + if (features->type >= INTUOS5S && features->type <= INTUOSHT) { if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) { features->device_type = BTN_TOOL_FINGER; features->pktlen = WACOM_PKGLEN_BBTOUCH3; @@ -1393,6 +1403,11 @@ } } + if (wacom_wac->features.type == INTUOSHT && wacom_wac->features.touch_max) { + if (wacom_wac->features.device_type == BTN_TOOL_FINGER) + wacom_wac->shared->touch_input = wacom_wac->input; + } + return 0; fail5: wacom_destroy_leds(wacom); --- linux-3.13.0.orig/drivers/input/tablet/wacom_wac.c +++ linux-3.13.0/drivers/input/tablet/wacom_wac.c @@ -210,6 +210,62 @@ return 1; } +static int wacom_dtus_irq(struct wacom_wac *wacom) +{ + char *data = wacom->data; + struct input_dev *input = wacom->input; + unsigned short prox, pressure = 0; + + if (data[0] != WACOM_REPORT_DTUS && data[0] != WACOM_REPORT_DTUSPAD) { + dev_dbg(input->dev.parent, + "%s: received unknown report #%d", __func__, data[0]); + return 0; + } else if (data[0] == WACOM_REPORT_DTUSPAD) { + input_report_key(input, BTN_0, (data[1] & 0x01)); + input_report_key(input, BTN_1, (data[1] & 0x02)); + input_report_key(input, BTN_2, (data[1] & 0x04)); + input_report_key(input, BTN_3, (data[1] & 0x08)); + input_report_abs(input, ABS_MISC, + data[1] & 0x0f ? PAD_DEVICE_ID : 0); + /* + * Serial number is required when expresskeys are + * reported through pen interface. + */ + input_event(input, EV_MSC, MSC_SERIAL, 0xf0); + return 1; + } else { + prox = data[1] & 0x80; + if (prox) { + switch ((data[1] >> 3) & 3) { + case 1: /* Rubber */ + wacom->tool[0] = BTN_TOOL_RUBBER; + wacom->id[0] = ERASER_DEVICE_ID; + break; + + case 2: /* Pen */ + wacom->tool[0] = BTN_TOOL_PEN; + wacom->id[0] = STYLUS_DEVICE_ID; + break; + } + } + + input_report_key(input, BTN_STYLUS, data[1] & 0x20); + input_report_key(input, BTN_STYLUS2, data[1] & 0x40); + input_report_abs(input, ABS_X, get_unaligned_be16(&data[3])); + input_report_abs(input, ABS_Y, get_unaligned_be16(&data[5])); + pressure = ((data[1] & 0x03) << 8) | (data[2] & 0xff); + input_report_abs(input, ABS_PRESSURE, pressure); + input_report_key(input, BTN_TOUCH, pressure > 10); + + if (!prox) /* out-prox */ + wacom->id[0] = 0; + input_report_key(input, wacom->tool[0], prox); + input_report_abs(input, ABS_MISC, wacom->id[0]); + input_event(input, EV_MSC, MSC_SERIAL, 1); + return 1; + } +} + static int wacom_graphire_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; @@ -331,7 +387,7 @@ /* Enter report */ if ((data[1] & 0xfc) == 0xc0) { - if (features->quirks == WACOM_QUIRK_MULTI_INPUT) + if (features->quirks & WACOM_QUIRK_MULTI_INPUT) wacom->shared->stylus_in_proximity = true; /* serial number of the tool */ @@ -436,7 +492,7 @@ /* Exit report */ if ((data[1] & 0xfe) == 0x80) { - if (features->quirks == WACOM_QUIRK_MULTI_INPUT) + if (features->quirks & WACOM_QUIRK_MULTI_INPUT) wacom->shared->stylus_in_proximity = false; /* @@ -1151,8 +1207,8 @@ int width, height; if (features->type >= INTUOSPS && features->type <= INTUOSPL) { - width = data[5]; - height = data[6]; + width = data[5] * 100; + height = data[6] * 100; } else { /* * "a" is a scaled-down area which we assume is @@ -1176,10 +1232,16 @@ static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) { struct input_dev *input = wacom->input; + struct wacom_features *features = &wacom->features; - input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0); + if (features->type == INTUOSHT) { + input_report_key(input, BTN_LEFT, (data[1] & 0x02) != 0); + input_report_key(input, BTN_BACK, (data[1] & 0x08) != 0); + } else { + input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0); + input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0); + } input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0); - input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0); input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0); } @@ -1213,13 +1275,23 @@ static int wacom_bpt_pen(struct wacom_wac *wacom) { + struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->input; unsigned char *data = wacom->data; int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0; - if (data[0] != 0x02) + if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_USB) return 0; + if (data[0] == WACOM_REPORT_USB) { + if (features->type == INTUOSHT && features->touch_max) { + input_report_switch(wacom->shared->touch_input, + SW_MUTE_DEVICE, data[8] & 0x40); + input_sync(wacom->shared->touch_input); + } + return 0; + } + prox = (data[1] & 0x20) == 0x20; /* @@ -1252,8 +1324,8 @@ * touching and applying pressure; do not report negative * distance. */ - if (data[8] <= wacom->features.distance_max) - d = wacom->features.distance_max - data[8]; + if (data[8] <= features->distance_max) + d = features->distance_max - data[8]; pen = data[1] & 0x01; btn1 = data[1] & 0x02; @@ -1297,13 +1369,20 @@ unsigned char *data = wacom->data; int connected; - if (len != WACOM_PKGLEN_WIRELESS || data[0] != 0x80) + if (len != WACOM_PKGLEN_WIRELESS || data[0] != WACOM_REPORT_WL) return 0; connected = data[1] & 0x01; if (connected) { int pid, battery; + if ((wacom->shared->type == INTUOSHT) && + wacom->shared->touch_max) { + input_report_switch(wacom->shared->touch_input, + SW_MUTE_DEVICE, data[5] & 0x40); + input_sync(wacom->shared->touch_input); + } + pid = get_unaligned_be16(&data[6]); battery = data[5] & 0x3f; if (wacom->pid != pid) { @@ -1348,6 +1427,10 @@ sync = wacom_dtu_irq(wacom_wac); break; + case DTUS: + sync = wacom_dtus_irq(wacom_wac); + break; + case INTUOS: case INTUOS3S: case INTUOS3: @@ -1391,6 +1474,7 @@ break; case BAMBOO_PT: + case INTUOSHT: sync = wacom_bpt_irq(wacom_wac, len); break; @@ -1459,7 +1543,7 @@ /* these device have multiple inputs */ if (features->type >= WIRELESS || - (features->type >= INTUOS5S && features->type <= INTUOSPL) || + (features->type >= INTUOS5S && features->type <= INTUOSHT) || (features->oVid && features->oPid)) features->quirks |= WACOM_QUIRK_MULTI_INPUT; @@ -1538,7 +1622,7 @@ wacom_abs_set_axis(input_dev, wacom_wac); - switch (wacom_wac->features.type) { + switch (features->type) { case WACOM_MO: input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); /* fall through */ @@ -1749,8 +1833,14 @@ /* fall through */ + case DTUS: case PL: case DTU: + if (features->type == DTUS) { + input_set_capability(input_dev, EV_MSC, MSC_SERIAL); + for (i = 0; i < 3; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + } __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); @@ -1771,33 +1861,50 @@ __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; + case INTUOSHT: + if (features->touch_max && + features->device_type == BTN_TOOL_FINGER) { + input_dev->evbit[0] |= BIT_MASK(EV_SW); + __set_bit(SW_MUTE_DEVICE, input_dev->swbit); + } + /* fall through */ + case BAMBOO_PT: __clear_bit(ABS_MISC, input_dev->absbit); - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); - if (features->device_type == BTN_TOOL_FINGER) { - unsigned int flags = INPUT_MT_POINTER; __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); __set_bit(BTN_BACK, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); - if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { - input_set_abs_params(input_dev, + if (features->touch_max) { + /* touch interface */ + unsigned int flags = INPUT_MT_POINTER; + + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); + if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { + input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, features->x_max, 0, 0); - input_set_abs_params(input_dev, + input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, features->y_max, 0, 0); + } else { + __set_bit(BTN_TOOL_FINGER, input_dev->keybit); + __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); + flags = 0; + } + input_mt_init_slots(input_dev, features->touch_max, flags); } else { - __set_bit(BTN_TOOL_FINGER, input_dev->keybit); - __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); - flags = 0; + /* buttons/keys only interface */ + __clear_bit(ABS_X, input_dev->absbit); + __clear_bit(ABS_Y, input_dev->absbit); + __clear_bit(BTN_TOUCH, input_dev->keybit); } - input_mt_init_slots(input_dev, features->touch_max, flags); } else if (features->device_type == BTN_TOOL_PEN) { + __set_bit(INPUT_PROP_POINTER, input_dev->propbit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); @@ -2055,6 +2162,9 @@ static const struct wacom_features wacom_features_0xF0 = { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0xFB = + { "Wacom DTU1031", WACOM_PKGLEN_DTUS, 22096, 13960, 511, + 0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x57 = { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES}; @@ -2200,6 +2310,17 @@ static const struct wacom_features wacom_features_0x301 = { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x302 = + { "Wacom Intuos PT S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023, + 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, + .touch_max = 16 }; +static const struct wacom_features wacom_features_0x303 = + { "Wacom Intuos PT M", WACOM_PKGLEN_BBPEN, 21600, 13500, 1023, + 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, + .touch_max = 16 }; +static const struct wacom_features wacom_features_0x30E = + { "Wacom Intuos S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023, + 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x6004 = { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2337,6 +2458,9 @@ { USB_DEVICE_WACOM(0x10F) }, { USB_DEVICE_WACOM(0x300) }, { USB_DEVICE_WACOM(0x301) }, + { USB_DEVICE_DETAILED(0x302, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_DETAILED(0x303, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_DETAILED(0x30E, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0x304) }, { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) }, @@ -2347,6 +2471,7 @@ { USB_DEVICE_WACOM(0xF8) }, { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xFA) }, + { USB_DEVICE_WACOM(0xFB) }, { USB_DEVICE_WACOM(0x0307) }, { USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_LENOVO(0x6004) }, --- linux-3.13.0.orig/drivers/input/tablet/wacom_wac.h +++ linux-3.13.0/drivers/input/tablet/wacom_wac.h @@ -12,7 +12,7 @@ #include /* maximum packet length for USB devices */ -#define WACOM_PKGLEN_MAX 64 +#define WACOM_PKGLEN_MAX 68 #define WACOM_NAME_MAX 64 @@ -29,6 +29,7 @@ #define WACOM_PKGLEN_WIRELESS 32 #define WACOM_PKGLEN_MTOUCH 62 #define WACOM_PKGLEN_MTTPC 40 +#define WACOM_PKGLEN_DTUS 68 /* wacom data size per MT contact */ #define WACOM_BYTES_PER_MT_PACKET 11 @@ -47,13 +48,17 @@ #define WACOM_REPORT_INTUOSWRITE 6 #define WACOM_REPORT_INTUOSPAD 12 #define WACOM_REPORT_INTUOS5PAD 3 +#define WACOM_REPORT_DTUSPAD 21 #define WACOM_REPORT_TPC1FG 6 #define WACOM_REPORT_TPC2FG 13 #define WACOM_REPORT_TPCMT 13 #define WACOM_REPORT_TPCHID 15 #define WACOM_REPORT_TPCST 16 +#define WACOM_REPORT_DTUS 17 #define WACOM_REPORT_TPC1FGE 18 #define WACOM_REPORT_24HDT 1 +#define WACOM_REPORT_WL 128 +#define WACOM_REPORT_USB 192 /* device quirks */ #define WACOM_QUIRK_MULTI_INPUT 0x0001 @@ -68,6 +73,7 @@ PTU, PL, DTU, + DTUS, INTUOS, INTUOS3S, INTUOS3, @@ -81,6 +87,7 @@ INTUOSPS, INTUOSPM, INTUOSPL, + INTUOSHT, WACOM_21UX2, WACOM_22HD, DTK, @@ -129,6 +136,10 @@ struct wacom_shared { bool stylus_in_proximity; bool touch_down; + /* for wireless device to access USB interfaces */ + unsigned touch_max; + int type; + struct input_dev *touch_input; }; struct wacom_wac { --- linux-3.13.0.orig/drivers/iommu/amd_iommu.c +++ linux-3.13.0/drivers/iommu/amd_iommu.c @@ -3227,14 +3227,16 @@ static void cleanup_domain(struct protection_domain *domain) { - struct iommu_dev_data *dev_data, *next; + struct iommu_dev_data *entry; unsigned long flags; write_lock_irqsave(&amd_iommu_devtable_lock, flags); - list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { - __detach_device(dev_data); - atomic_set(&dev_data->bind, 0); + while (!list_empty(&domain->dev_list)) { + entry = list_first_entry(&domain->dev_list, + struct iommu_dev_data, list); + __detach_device(entry); + atomic_set(&entry->bind, 0); } write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); @@ -3999,7 +4001,7 @@ iommu_flush_dte(iommu, devid); if (devid != alias) { irq_lookup_table[alias] = table; - set_dte_irq_entry(devid, table); + set_dte_irq_entry(alias, table); iommu_flush_dte(iommu, alias); } --- linux-3.13.0.orig/drivers/iommu/arm-smmu.c +++ linux-3.13.0/drivers/iommu/arm-smmu.c @@ -78,7 +78,6 @@ #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) -#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t)) /* Stage-1 PTE */ #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) @@ -190,6 +189,9 @@ #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2)) #define CBAR_VMID_SHIFT 0 #define CBAR_VMID_MASK 0xff +#define CBAR_S1_BPSHCFG_SHIFT 8 +#define CBAR_S1_BPSHCFG_MASK 3 +#define CBAR_S1_BPSHCFG_NSH 3 #define CBAR_S1_MEMATTR_SHIFT 12 #define CBAR_S1_MEMATTR_MASK 0xf #define CBAR_S1_MEMATTR_WB 0xf @@ -392,7 +394,7 @@ struct arm_smmu_cfg root_cfg; phys_addr_t output_mask; - struct mutex lock; + spinlock_t lock; }; static DEFINE_SPINLOCK(arm_smmu_devices_lock); @@ -631,6 +633,28 @@ return IRQ_HANDLED; } +static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, + size_t size) +{ + unsigned long offset = (unsigned long)addr & ~PAGE_MASK; + + + /* Ensure new page tables are visible to the hardware walker */ + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { + dsb(); + } else { + /* + * If the SMMU can't walk tables in the CPU caches, treat them + * like non-coherent DMA since we need to flush the new entries + * all the way out to memory. There's no possibility of + * recursion here as the SMMU table walker will not be wired + * through another SMMU. + */ + dma_map_page(smmu->dev, virt_to_page(addr), offset, size, + DMA_TO_DEVICE); + } +} + static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) { u32 reg; @@ -649,11 +673,16 @@ if (smmu->version == 1) reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT; - /* Use the weakest memory type, so it is overridden by the pte */ - if (stage1) - reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); - else + /* + * Use the weakest shareability/memory types, so they are + * overridden by the ttbcr/pte. + */ + if (stage1) { + reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) | + (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT); + } else { reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT; + } writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx)); if (smmu->version > 1) { @@ -714,6 +743,8 @@ } /* TTBR0 */ + arm_smmu_flush_pgtable(smmu, root_cfg->pgd, + PTRS_PER_PGD * sizeof(pgd_t)); reg = __pa(root_cfg->pgd); writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32; @@ -762,8 +793,11 @@ reg |= TTBCR_EAE | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | - (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) | - (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); + (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); + + if (!stage1) + reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); /* MAIR0 (stage-1 only) */ @@ -900,7 +934,7 @@ goto out_free_domain; smmu_domain->root_cfg.pgd = pgd; - mutex_init(&smmu_domain->lock); + spin_lock_init(&smmu_domain->lock); domain->priv = smmu_domain; return 0; @@ -912,7 +946,6 @@ static void arm_smmu_free_ptes(pmd_t *pmd) { pgtable_t table = pmd_pgtable(*pmd); - pgtable_page_dtor(table); __free_page(table); } @@ -1137,7 +1170,7 @@ * Sanity check the domain. We don't currently support domains * that cross between different SMMU chains. */ - mutex_lock(&smmu_domain->lock); + spin_lock(&smmu_domain->lock); if (!smmu_domain->leaf_smmu) { /* Now that we have a master, we can finalise the domain */ ret = arm_smmu_init_domain_context(domain, dev); @@ -1152,7 +1185,7 @@ dev_name(device_smmu->dev)); goto err_unlock; } - mutex_unlock(&smmu_domain->lock); + spin_unlock(&smmu_domain->lock); /* Looks ok, so add the device to the domain */ master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); @@ -1162,7 +1195,7 @@ return arm_smmu_domain_add_master(smmu_domain, master); err_unlock: - mutex_unlock(&smmu_domain->lock); + spin_unlock(&smmu_domain->lock); return ret; } @@ -1176,23 +1209,6 @@ arm_smmu_domain_remove_master(smmu_domain, master); } -static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, - size_t size) -{ - unsigned long offset = (unsigned long)addr & ~PAGE_MASK; - - /* - * If the SMMU can't walk tables in the CPU caches, treat them - * like non-coherent DMA since we need to flush the new entries - * all the way out to memory. There's no possibility of recursion - * here as the SMMU table walker will not be wired through another - * SMMU. - */ - if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) - dma_map_page(smmu->dev, virt_to_page(addr), offset, size, - DMA_TO_DEVICE); -} - static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, unsigned long end) { @@ -1209,16 +1225,11 @@ if (pmd_none(*pmd)) { /* Allocate a new set of tables */ - pgtable_t table = alloc_page(PGALLOC_GFP); + pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); if (!table) return -ENOMEM; - arm_smmu_flush_pgtable(smmu, page_address(table), - ARM_SMMU_PTE_HWTABLE_SIZE); - if (!pgtable_page_ctor(table)) { - __free_page(table); - return -ENOMEM; - } + arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); pmd_populate(NULL, pmd, table); arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); } @@ -1314,9 +1325,15 @@ #ifndef __PAGETABLE_PMD_FOLDED if (pud_none(*pud)) { - pmd = pmd_alloc_one(NULL, addr); + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); if (!pmd) return -ENOMEM; + + arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE); + pud_populate(NULL, pud, pmd); + arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); + + pmd += pmd_index(addr); } else #endif pmd = pmd_offset(pud, addr); @@ -1325,8 +1342,6 @@ next = pmd_addr_end(addr, end); ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, flags, stage); - pud_populate(NULL, pud, pmd); - arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); phys += next - addr; } while (pmd++, addr = next, addr < end); @@ -1343,9 +1358,15 @@ #ifndef __PAGETABLE_PUD_FOLDED if (pgd_none(*pgd)) { - pud = pud_alloc_one(NULL, addr); + pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); if (!pud) return -ENOMEM; + + arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE); + pgd_populate(NULL, pgd, pud); + arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); + + pud += pud_index(addr); } else #endif pud = pud_offset(pgd, addr); @@ -1354,8 +1375,6 @@ next = pud_addr_end(addr, end); ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, flags, stage); - pgd_populate(NULL, pud, pgd); - arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); phys += next - addr; } while (pud++, addr = next, addr < end); @@ -1394,7 +1413,7 @@ if (paddr & ~output_mask) return -ERANGE; - mutex_lock(&smmu_domain->lock); + spin_lock(&smmu_domain->lock); pgd += pgd_index(iova); end = iova + size; do { @@ -1410,11 +1429,7 @@ } while (pgd++, iova != end); out_unlock: - mutex_unlock(&smmu_domain->lock); - - /* Ensure new page tables are visible to the hardware walker */ - if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) - dsb(); + spin_unlock(&smmu_domain->lock); return ret; } --- linux-3.13.0.orig/drivers/iommu/fsl_pamu_domain.c +++ linux-3.13.0/drivers/iommu/fsl_pamu_domain.c @@ -1050,7 +1050,7 @@ struct iommu_group *group = NULL; struct pci_dev *pdev; const u32 *prop; - int ret, len; + int ret = 0, len; /* * For platform devices we allocate a separate group for @@ -1073,7 +1073,13 @@ if (!group || IS_ERR(group)) return PTR_ERR(group); - ret = iommu_group_add_device(group, dev); + /* + * Check if device has already been added to an iommu group. + * Group could have already been created for a PCI device in + * the iommu_group_get_for_dev path. + */ + if (!dev->iommu_group) + ret = iommu_group_add_device(group, dev); iommu_group_put(group); return ret; --- linux-3.13.0.orig/drivers/iommu/intel-iommu.c +++ linux-3.13.0/drivers/iommu/intel-iommu.c @@ -917,7 +917,7 @@ /* If range covers entire pagetable, free it */ if (!(start_pfn > level_pfn || - last_pfn < level_pfn + level_size(level))) { + last_pfn < level_pfn + level_size(level) - 1)) { dma_clear_pte(pte); domain_flush_cache(domain, pte, sizeof(*pte)); free_pgtable_page(level_pte); @@ -1796,7 +1796,7 @@ struct dma_pte *first_pte = NULL, *pte = NULL; phys_addr_t uninitialized_var(pteval); int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - unsigned long sg_res; + unsigned long sg_res = 0; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; @@ -1807,10 +1807,8 @@ prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; - if (sg) - sg_res = 0; - else { - sg_res = nr_pages + 1; + if (!sg) { + sg_res = nr_pages; pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; } --- linux-3.13.0.orig/drivers/irqchip/irq-armada-370-xp.c +++ linux-3.13.0/drivers/irqchip/irq-armada-370-xp.c @@ -132,8 +132,7 @@ struct msi_desc *desc) { struct msi_msg msg; - irq_hw_number_t hwirq; - int virq; + int virq, hwirq; hwirq = armada_370_xp_alloc_msi(); if (hwirq < 0) @@ -159,8 +158,19 @@ unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); + unsigned long hwirq = d->hwirq; + irq_dispose_mapping(irq); - armada_370_xp_free_msi(d->hwirq); + armada_370_xp_free_msi(hwirq); +} + +static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, + int nvec, int type) +{ + /* We support MSI, but not MSI-X */ + if (type == PCI_CAP_ID_MSI) + return 0; + return -EINVAL; } static struct irq_chip armada_370_xp_msi_irq_chip = { @@ -201,6 +211,7 @@ msi_chip->setup_irq = armada_370_xp_setup_msi_irq; msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; + msi_chip->check_device = armada_370_xp_check_msi_device; msi_chip->of_node = node; armada_370_xp_msi_domain = @@ -381,7 +392,7 @@ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) & PCI_MSI_DOORBELL_MASK; - writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base + + writel(~msimask, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); for (msinr = PCI_MSI_DOORBELL_START; @@ -407,7 +418,7 @@ ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) & IPI_DOORBELL_MASK; - writel(~IPI_DOORBELL_MASK, per_cpu_int_base + + writel(~ipimask, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); /* Handle all pending doorbells */ --- linux-3.13.0.orig/drivers/irqchip/irq-gic.c +++ linux-3.13.0/drivers/irqchip/irq-gic.c @@ -42,6 +42,7 @@ #include #include +#include #include #include #include @@ -246,10 +247,14 @@ bool force) { void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); - unsigned int shift = (gic_irq(d) % 4) * 8; - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu, shift = (gic_irq(d) % 4) * 8; u32 val, mask, bit; + if (!force) + cpu = cpumask_any_and(mask_val, cpu_online_mask); + else + cpu = cpumask_first(mask_val); + if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; @@ -418,6 +423,7 @@ void __iomem *dist_base = gic_data_dist_base(gic); void __iomem *base = gic_data_cpu_base(gic); unsigned int cpu_mask, cpu = smp_processor_id(); + unsigned int ctrl_mask; int i; /* @@ -449,13 +455,29 @@ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); - writel_relaxed(1, base + GIC_CPU_CTRL); + + ctrl_mask = readl(base + GIC_CPU_CTRL); + + /* Mask out the gic v2 bypass bits */ + ctrl_mask &= 0x1e0; + + /* Enable group 0 */ + ctrl_mask |= 0x1; + writel_relaxed(ctrl_mask, base + GIC_CPU_CTRL); } void gic_cpu_if_down(void) { + unsigned int ctrl_mask; void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); - writel_relaxed(0, cpu_base + GIC_CPU_CTRL); + + ctrl_mask = readl(cpu_base + GIC_CPU_CTRL); + /* + * Disable grp enable bit, leave the bypass bits alone as changing + * them could leave the system unstable + */ + ctrl_mask &= 0x1e0; + writel_relaxed(ctrl_mask, cpu_base + GIC_CPU_CTRL); } #ifdef CONFIG_CPU_PM @@ -566,6 +588,7 @@ { int i; u32 *ptr; + unsigned int ctrl_mask; void __iomem *dist_base; void __iomem *cpu_base; @@ -590,7 +613,15 @@ writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); - writel_relaxed(1, cpu_base + GIC_CPU_CTRL); + + ctrl_mask = readl(cpu_base + GIC_CPU_CTRL); + + /* Mask out the gic v2 bypass bits */ + ctrl_mask &= 0x1e0; + + /* Enable group 0 */ + ctrl_mask |= 0x1; + writel_relaxed(ctrl_mask, cpu_base + GIC_CPU_CTRL); } static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) @@ -899,7 +930,9 @@ } for_each_possible_cpu(cpu) { - unsigned long offset = percpu_offset * cpu_logical_map(cpu); + u32 mpidr = cpu_logical_map(cpu); + u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + unsigned long offset = percpu_offset * core_id; *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; } @@ -1006,6 +1039,7 @@ } IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); +IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); --- linux-3.13.0.orig/drivers/irqchip/irq-metag-ext.c +++ linux-3.13.0/drivers/irqchip/irq-metag-ext.c @@ -515,7 +515,7 @@ * one cpu (the interrupt code doesn't support it), so we just * pick the first cpu we find in 'cpumask'. */ - cpu = cpumask_any(cpumask); + cpu = cpumask_any_and(cpumask, cpu_online_mask); thread = cpu_2_hwthread_id[cpu]; metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); --- linux-3.13.0.orig/drivers/irqchip/irq-metag.c +++ linux-3.13.0/drivers/irqchip/irq-metag.c @@ -201,7 +201,7 @@ * one cpu (the interrupt code doesn't support it), so we just * pick the first cpu we find in 'cpumask'. */ - cpu = cpumask_any(cpumask); + cpu = cpumask_any_and(cpumask, cpu_online_mask); thread = cpu_2_hwthread_id[cpu]; metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), --- linux-3.13.0.orig/drivers/irqchip/irq-orion.c +++ linux-3.13.0/drivers/irqchip/irq-orion.c @@ -111,7 +111,8 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) { struct irq_domain *d = irq_get_handler_data(irq); - struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq); + + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0); u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) & gc->mask_cache; @@ -123,6 +124,19 @@ } } +/* + * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register. + * To avoid interrupt events on stale irqs, we clear them before unmask. + */ +static unsigned int orion_bridge_irq_startup(struct irq_data *d) +{ + struct irq_chip_type *ct = irq_data_get_chip_type(d); + + ct->chip.irq_ack(d); + ct->chip.irq_unmask(d); + return 0; +} + static int __init orion_bridge_irq_init(struct device_node *np, struct device_node *parent) { @@ -143,7 +157,7 @@ } ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, - handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); + handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) { pr_err("%s: unable to alloc irq domain gc\n", np->name); return ret; @@ -176,12 +190,14 @@ gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE; gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK; + gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup; gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit; gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; - /* mask all interrupts */ + /* mask and clear all interrupts */ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK); + writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE); irq_set_handler_data(irq, domain); irq_set_chained_handler(irq, orion_bridge_irq_handler); --- linux-3.13.0.orig/drivers/irqchip/spear-shirq.c +++ linux-3.13.0/drivers/irqchip/spear-shirq.c @@ -125,7 +125,7 @@ }; static struct spear_shirq spear320_shirq_ras3 = { - .irq_nr = 3, + .irq_nr = 7, .irq_bit_off = 0, .invalid_irq = 1, .regs = { --- linux-3.13.0.orig/drivers/isdn/isdnloop/isdnloop.c +++ linux-3.13.0/drivers/isdn/isdnloop/isdnloop.c @@ -518,9 +518,9 @@ static void isdnloop_fake_err(isdnloop_card *card) { - char buf[60]; + char buf[64]; - sprintf(buf, "E%s", card->omsg); + snprintf(buf, sizeof(buf), "E%s", card->omsg); isdnloop_fake(card, buf, -1); isdnloop_fake(card, "NAK", -1); } @@ -903,6 +903,8 @@ case 7: /* 0x;EAZ */ p += 3; + if (strlen(p) >= sizeof(card->eazlist[0])) + break; strcpy(card->eazlist[ch - 1], p); break; case 8: @@ -1070,6 +1072,12 @@ return -EBUSY; if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef))) return -EFAULT; + + for (i = 0; i < 3; i++) { + if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i]))) + return -EINVAL; + } + spin_lock_irqsave(&card->isdnloop_lock, flags); switch (sdef.ptype) { case ISDN_PTYPE_EURO: @@ -1127,7 +1135,7 @@ { ulong a; int i; - char cbuf[60]; + char cbuf[80]; isdn_ctrl cmd; isdnloop_cdef cdef; @@ -1192,7 +1200,6 @@ break; if ((c->arg & 255) < ISDNLOOP_BCH) { char *p; - char dial[50]; char dcode[4]; a = c->arg; @@ -1204,10 +1211,10 @@ } else /* Normal Dial */ strcpy(dcode, "CAL"); - strcpy(dial, p); - sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), - dcode, dial, c->parm.setup.si1, - c->parm.setup.si2, c->parm.setup.eazmsn); + snprintf(cbuf, sizeof(cbuf), + "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), + dcode, p, c->parm.setup.si1, + c->parm.setup.si2, c->parm.setup.eazmsn); i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card); } break; --- linux-3.13.0.orig/drivers/leds/dell-led.c +++ linux-3.13.0/drivers/leds/dell-led.c @@ -15,12 +15,15 @@ #include #include #include +#include +#include MODULE_AUTHOR("Louis Davis/Jim Dailey"); MODULE_DESCRIPTION("Dell LED Control Driver"); MODULE_LICENSE("GPL"); #define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396" +#define DELL_APP_GUID "A80593CE-A997-11DA-B012-B622A1EF5492" MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID); /* Error Result Codes: */ @@ -39,6 +42,149 @@ #define CMD_LED_OFF 17 #define CMD_LED_BLINK 18 +struct app_wmi_args { + u16 class; + u16 selector; + u32 arg1; + u32 arg2; + u32 arg3; + u32 arg4; + u32 res1; + u32 res2; + u32 res3; + u32 res4; + char dummy[92]; +}; + +#define GLOBAL_MIC_MUTE_ENABLE 0x364 +#define GLOBAL_MIC_MUTE_DISABLE 0x365 + +struct dell_bios_data_token { + u16 tokenid; + u16 location; + u16 value; +}; + +struct __attribute__ ((__packed__)) dell_bios_calling_interface { + struct dmi_header header; + u16 cmd_io_addr; + u8 cmd_io_code; + u32 supported_cmds; + struct dell_bios_data_token damap[]; +}; + +static struct dell_bios_data_token dell_mic_tokens[2]; + +static int dell_wmi_perform_query(struct app_wmi_args *args) +{ + struct app_wmi_args *bios_return; + union acpi_object *obj; + struct acpi_buffer input; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + acpi_status status; + u32 rc = -EINVAL; + + input.length = 128; + input.pointer = args; + + status = wmi_evaluate_method(DELL_APP_GUID, 0, 1, &input, &output); + if (!ACPI_SUCCESS(status)) + goto err_out0; + + obj = output.pointer; + if (!obj) + goto err_out0; + + if (obj->type != ACPI_TYPE_BUFFER) + goto err_out1; + + bios_return = (struct app_wmi_args *)obj->buffer.pointer; + rc = bios_return->res1; + if (rc) + goto err_out1; + + memcpy(args, bios_return, sizeof(struct app_wmi_args)); + rc = 0; + + err_out1: + kfree(obj); + err_out0: + return rc; +} + +static void __init find_micmute_tokens(const struct dmi_header *dm, void *dummy) +{ + struct dell_bios_calling_interface *calling_interface; + struct dell_bios_data_token *token; + int token_size = sizeof(struct dell_bios_data_token); + int i = 0; + + if (dm->type == 0xda && dm->length > 17) { + calling_interface = container_of(dm, + struct dell_bios_calling_interface, header); + + token = &calling_interface->damap[i]; + while (token->tokenid != 0xffff) { + if (token->tokenid == GLOBAL_MIC_MUTE_DISABLE) + memcpy(&dell_mic_tokens[0], token, token_size); + else if (token->tokenid == GLOBAL_MIC_MUTE_ENABLE) + memcpy(&dell_mic_tokens[1], token, token_size); + + i++; + token = &calling_interface->damap[i]; + } + } +} + +static int dell_micmute_led_set(int state) +{ + struct app_wmi_args args; + struct dell_bios_data_token *token; + + if (!wmi_has_guid(DELL_APP_GUID)) + return -ENODEV; + + if (state == 0 || state == 1) + token = &dell_mic_tokens[state]; + else + return -EINVAL; + + memset(&args, 0, sizeof(struct app_wmi_args)); + + args.class = 1; + args.arg1 = token->location; + args.arg2 = token->value; + + dell_wmi_perform_query(&args); + + return state; +} + +int dell_app_wmi_led_set(int whichled, int on) +{ + int state = 0; + + switch (whichled) { + case DELL_LED_MICMUTE: + state = dell_micmute_led_set(on); + break; + default: + pr_warn("led type %x is not supported\n", whichled); + break; + } + + return state; +} +EXPORT_SYMBOL_GPL(dell_app_wmi_led_set); + +static int __init dell_micmute_led_init(void) +{ + memset(dell_mic_tokens, 0, sizeof(struct dell_bios_data_token) * 2); + dmi_walk(find_micmute_tokens, NULL); + + return 0; +} + struct bios_args { unsigned char length; unsigned char result_code; @@ -181,21 +327,32 @@ { int error = 0; - if (!wmi_has_guid(DELL_LED_BIOS_GUID)) + if (!wmi_has_guid(DELL_LED_BIOS_GUID) && !wmi_has_guid(DELL_APP_GUID)) return -ENODEV; - error = led_off(); - if (error != 0) - return -ENODEV; + if (wmi_has_guid(DELL_APP_GUID)) + error = dell_micmute_led_init(); - return led_classdev_register(NULL, &dell_led); + if (wmi_has_guid(DELL_LED_BIOS_GUID)) { + error = led_off(); + if (error != 0) + return -ENODEV; + + error = led_classdev_register(NULL, &dell_led); + } + + return error; } static void __exit dell_led_exit(void) { - led_classdev_unregister(&dell_led); + int error = 0; - led_off(); + if (wmi_has_guid(DELL_LED_BIOS_GUID)) { + error = led_off(); + if (error == 0) + led_classdev_unregister(&dell_led); + } } module_init(dell_led_init); --- linux-3.13.0.orig/drivers/leds/leds-gpio.c +++ linux-3.13.0/drivers/leds/leds-gpio.c @@ -224,6 +224,8 @@ { .compatible = "gpio-leds", }, {}, }; + +MODULE_DEVICE_TABLE(of, of_gpio_leds_match); #else /* CONFIG_OF_GPIO */ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev) { --- linux-3.13.0.orig/drivers/md/Kconfig +++ linux-3.13.0/drivers/md/Kconfig @@ -176,8 +176,12 @@ source "drivers/md/bcache/Kconfig" +config BLK_DEV_DM_BUILTIN + boolean + config BLK_DEV_DM tristate "Device mapper support" + select BLK_DEV_DM_BUILTIN ---help--- Device-mapper is a low level volume manager. It works by allowing people to specify mappings for ranges of logical sectors. Various --- linux-3.13.0.orig/drivers/md/Makefile +++ linux-3.13.0/drivers/md/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_BCACHE) += bcache/ obj-$(CONFIG_BLK_DEV_MD) += md-mod.o obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o +obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o obj-$(CONFIG_DM_BUFIO) += dm-bufio.o obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o obj-$(CONFIG_DM_CRYPT) += dm-crypt.o --- linux-3.13.0.orig/drivers/md/bcache/bcache.h +++ linux-3.13.0/drivers/md/bcache/bcache.h @@ -209,7 +209,9 @@ #define GC_MARK_RECLAIMABLE 0 #define GC_MARK_DIRTY 1 #define GC_MARK_METADATA 2 -BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13); +#define GC_SECTORS_USED_SIZE 13 +#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) +BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1); #include "journal.h" --- linux-3.13.0.orig/drivers/md/bcache/bset.c +++ linux-3.13.0/drivers/md/bcache/bset.c @@ -955,7 +955,7 @@ *i = iter->data[--iter->used]; } -static void btree_sort_fixup(struct btree_iter *iter) +static struct bkey *btree_sort_fixup(struct btree_iter *iter, struct bkey *tmp) { while (iter->used > 1) { struct btree_iter_set *top = iter->data, *i = top + 1; @@ -983,9 +983,22 @@ } else { /* can't happen because of comparison func */ BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); - bch_cut_back(&START_KEY(i->k), top->k); + + if (bkey_cmp(i->k, top->k) < 0) { + bkey_copy(tmp, top->k); + + bch_cut_back(&START_KEY(i->k), tmp); + bch_cut_front(i->k, top->k); + heap_sift(iter, 0, btree_iter_cmp); + + return tmp; + } else { + bch_cut_back(&START_KEY(i->k), top->k); + } } } + + return NULL; } static void btree_mergesort(struct btree *b, struct bset *out, @@ -993,15 +1006,20 @@ bool fixup, bool remove_stale) { struct bkey *k, *last = NULL; + BKEY_PADDED(k) tmp; bool (*bad)(struct btree *, const struct bkey *) = remove_stale ? bch_ptr_bad : bch_ptr_invalid; while (!btree_iter_end(iter)) { if (fixup && !b->level) - btree_sort_fixup(iter); + k = btree_sort_fixup(iter, &tmp.k); + else + k = NULL; + + if (!k) + k = bch_btree_iter_next(iter); - k = bch_btree_iter_next(iter); if (bad(b, k)) continue; --- linux-3.13.0.orig/drivers/md/bcache/btree.c +++ linux-3.13.0/drivers/md/bcache/btree.c @@ -1163,7 +1163,7 @@ /* guard against overflow */ SET_GC_SECTORS_USED(g, min_t(unsigned, GC_SECTORS_USED(g) + KEY_SIZE(k), - (1 << 14) - 1)); + MAX_GC_SECTORS_USED)); BUG_ON(!GC_SECTORS_USED(g)); } --- linux-3.13.0.orig/drivers/md/bcache/super.c +++ linux-3.13.0/drivers/md/bcache/super.c @@ -1023,6 +1023,9 @@ */ atomic_set(&dc->count, 1); + if (bch_cached_dev_writeback_start(dc)) + return -ENOMEM; + if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { bch_sectors_dirty_init(dc); atomic_set(&dc->has_dirty, 1); @@ -1052,7 +1055,8 @@ struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); cancel_delayed_work_sync(&dc->writeback_rate_update); - kthread_stop(dc->writeback_thread); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) + kthread_stop(dc->writeback_thread); mutex_lock(&bch_register_lock); --- linux-3.13.0.orig/drivers/md/bcache/writeback.c +++ linux-3.13.0/drivers/md/bcache/writeback.c @@ -239,7 +239,7 @@ if (KEY_START(&w->key) != dc->last_read || jiffies_to_msecs(delay) > 50) while (!kthread_should_stop() && delay) - delay = schedule_timeout_uninterruptible(delay); + delay = schedule_timeout_interruptible(delay); dc->last_read = KEY_OFFSET(&w->key); @@ -436,7 +436,7 @@ while (delay && !kthread_should_stop() && !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) - delay = schedule_timeout_uninterruptible(delay); + delay = schedule_timeout_interruptible(delay); } } @@ -478,7 +478,7 @@ dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk); } -int bch_cached_dev_writeback_init(struct cached_dev *dc) +void bch_cached_dev_writeback_init(struct cached_dev *dc) { sema_init(&dc->in_flight, 64); init_rwsem(&dc->writeback_lock); @@ -494,14 +494,20 @@ dc->writeback_rate_d_term = 30; dc->writeback_rate_p_term_inverse = 6000; + INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); +} + +int bch_cached_dev_writeback_start(struct cached_dev *dc) +{ dc->writeback_thread = kthread_create(bch_writeback_thread, dc, "bcache_writeback"); if (IS_ERR(dc->writeback_thread)) return PTR_ERR(dc->writeback_thread); - INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); + bch_writeback_queue(dc); + return 0; } --- linux-3.13.0.orig/drivers/md/bcache/writeback.h +++ linux-3.13.0/drivers/md/bcache/writeback.h @@ -63,7 +63,8 @@ static inline void bch_writeback_queue(struct cached_dev *dc) { - wake_up_process(dc->writeback_thread); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) + wake_up_process(dc->writeback_thread); } static inline void bch_writeback_add(struct cached_dev *dc) @@ -85,6 +86,7 @@ void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); void bch_sectors_dirty_init(struct cached_dev *dc); -int bch_cached_dev_writeback_init(struct cached_dev *); +void bch_cached_dev_writeback_init(struct cached_dev *); +int bch_cached_dev_writeback_start(struct cached_dev *); #endif --- linux-3.13.0.orig/drivers/md/dm-bufio.c +++ linux-3.13.0/drivers/md/dm-bufio.c @@ -463,6 +463,7 @@ c->n_buffers[dirty]++; b->list_mode = dirty; list_move(&b->lru_list, &c->lru[dirty]); + b->last_accessed = jiffies; } /*---------------------------------------------------------------- @@ -529,6 +530,19 @@ end_io(&b->bio, r); } +static void inline_endio(struct bio *bio, int error) +{ + bio_end_io_t *end_fn = bio->bi_private; + + /* + * Reset the bio to free any attached resources + * (e.g. bio integrity profiles). + */ + bio_reset(bio); + + end_fn(bio, error); +} + static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, bio_end_io_t *end_io) { @@ -540,7 +554,12 @@ b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; b->bio.bi_sector = block << b->c->sectors_per_block_bits; b->bio.bi_bdev = b->c->bdev; - b->bio.bi_end_io = end_io; + b->bio.bi_end_io = inline_endio; + /* + * Use of .bi_private isn't a problem here because + * the dm_buffer's inline bio is local to bufio. + */ + b->bio.bi_private = end_io; /* * We assume that if len >= PAGE_SIZE ptr is page-aligned. @@ -1417,9 +1436,9 @@ /* * Test if the buffer is unused and too old, and commit it. - * At if noio is set, we must not do any I/O because we hold - * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to - * different bufio client. + * And if GFP_NOFS is used, we must not do any I/O because we hold + * dm_bufio_clients_lock and we would risk deadlock if the I/O gets + * rerouted to different bufio client. */ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, unsigned long max_jiffies) @@ -1427,7 +1446,7 @@ if (jiffies - b->last_accessed < max_jiffies) return 0; - if (!(gfp & __GFP_IO)) { + if (!(gfp & __GFP_FS)) { if (test_bit(B_READING, &b->state) || test_bit(B_WRITING, &b->state) || test_bit(B_DIRTY, &b->state)) @@ -1455,9 +1474,9 @@ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { freed += __cleanup_old_buffer(b, gfp_mask, 0); if (!--nr_to_scan) - break; + return freed; + dm_bufio_cond_resched(); } - dm_bufio_cond_resched(); } return freed; } @@ -1469,7 +1488,7 @@ unsigned long freed; c = container_of(shrink, struct dm_bufio_client, shrinker); - if (sc->gfp_mask & __GFP_IO) + if (sc->gfp_mask & __GFP_FS) dm_bufio_lock(c); else if (!dm_bufio_trylock(c)) return SHRINK_STOP; @@ -1486,7 +1505,7 @@ unsigned long count; c = container_of(shrink, struct dm_bufio_client, shrinker); - if (sc->gfp_mask & __GFP_IO) + if (sc->gfp_mask & __GFP_FS) dm_bufio_lock(c); else if (!dm_bufio_trylock(c)) return 0; @@ -1511,7 +1530,7 @@ BUG_ON(block_size < 1 << SECTOR_SHIFT || (block_size & (block_size - 1))); - c = kmalloc(sizeof(*c), GFP_KERNEL); + c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) { r = -ENOMEM; goto bad_client; --- linux-3.13.0.orig/drivers/md/dm-builtin.c +++ linux-3.13.0/drivers/md/dm-builtin.c @@ -0,0 +1,48 @@ +#include "dm.h" + +/* + * The kobject release method must not be placed in the module itself, + * otherwise we are subject to module unload races. + * + * The release method is called when the last reference to the kobject is + * dropped. It may be called by any other kernel code that drops the last + * reference. + * + * The release method suffers from module unload race. We may prevent the + * module from being unloaded at the start of the release method (using + * increased module reference count or synchronizing against the release + * method), however there is no way to prevent the module from being + * unloaded at the end of the release method. + * + * If this code were placed in the dm module, the following race may + * happen: + * 1. Some other process takes a reference to dm kobject + * 2. The user issues ioctl function to unload the dm device + * 3. dm_sysfs_exit calls kobject_put, however the object is not released + * because of the other reference taken at step 1 + * 4. dm_sysfs_exit waits on the completion + * 5. The other process that took the reference in step 1 drops it, + * dm_kobject_release is called from this process + * 6. dm_kobject_release calls complete() + * 7. a reschedule happens before dm_kobject_release returns + * 8. dm_sysfs_exit continues, the dm device is unloaded, module reference + * count is decremented + * 9. The user unloads the dm module + * 10. The other process that was rescheduled in step 7 continues to run, + * it is now executing code in unloaded module, so it crashes + * + * Note that if the process that takes the foreign reference to dm kobject + * has a low priority and the system is sufficiently loaded with + * higher-priority processes that prevent the low-priority process from + * being scheduled long enough, this bug may really happen. + * + * In order to fix this module unload race, we place the release method + * into a helper code that is compiled directly into the kernel. + */ + +void dm_kobject_release(struct kobject *kobj) +{ + complete(dm_get_completion_from_kobject(kobj)); +} + +EXPORT_SYMBOL(dm_kobject_release); --- linux-3.13.0.orig/drivers/md/dm-cache-metadata.c +++ linux-3.13.0/drivers/md/dm-cache-metadata.c @@ -94,6 +94,9 @@ } __packed; struct dm_cache_metadata { + atomic_t ref_count; + struct list_head list; + struct block_device *bdev; struct dm_block_manager *bm; struct dm_space_map *metadata_sm; @@ -120,6 +123,12 @@ unsigned policy_version[CACHE_POLICY_VERSION_SIZE]; size_t policy_hint_size; struct dm_cache_statistics stats; + + /* + * Reading the space map root can fail, so we read it into this + * buffer before the superblock is locked and updated. + */ + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; }; /*------------------------------------------------------------------- @@ -260,11 +269,31 @@ } } +static int __save_sm_root(struct dm_cache_metadata *cmd) +{ + int r; + size_t metadata_len; + + r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); + if (r < 0) + return r; + + return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root, + metadata_len); +} + +static void __copy_sm_root(struct dm_cache_metadata *cmd, + struct cache_disk_superblock *disk_super) +{ + memcpy(&disk_super->metadata_space_map_root, + &cmd->metadata_space_map_root, + sizeof(cmd->metadata_space_map_root)); +} + static int __write_initial_superblock(struct dm_cache_metadata *cmd) { int r; struct dm_block *sblock; - size_t metadata_len; struct cache_disk_superblock *disk_super; sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; @@ -272,12 +301,16 @@ if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS) bdev_size = DM_CACHE_METADATA_MAX_SECTORS; - r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); + r = dm_tm_pre_commit(cmd->tm); if (r < 0) return r; - r = dm_tm_pre_commit(cmd->tm); - if (r < 0) + /* + * dm_sm_copy_root() can fail. So we need to do it before we start + * updating the superblock. + */ + r = __save_sm_root(cmd); + if (r) return r; r = superblock_lock_zero(cmd, &sblock); @@ -293,10 +326,7 @@ memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); disk_super->policy_hint_size = 0; - r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto bad_locked; + __copy_sm_root(cmd, disk_super); disk_super->mapping_root = cpu_to_le64(cmd->root); disk_super->hint_root = cpu_to_le64(cmd->hint_root); @@ -313,10 +343,6 @@ disk_super->write_misses = cpu_to_le32(0); return dm_tm_commit(cmd->tm, sblock); - -bad_locked: - dm_bm_unlock(sblock); - return r; } static int __format_metadata(struct dm_cache_metadata *cmd) @@ -402,6 +428,15 @@ disk_super = dm_block_data(sblock); + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk_super->data_block_size), + (unsigned long long)cmd->data_block_size); + r = -EINVAL; + goto bad; + } + r = __check_incompat_features(disk_super, cmd); if (r < 0) goto bad; @@ -530,8 +565,9 @@ disk_super = dm_block_data(sblock); update_flags(disk_super, mutator); read_superblock_fields(cmd, disk_super); + dm_bm_unlock(sblock); - return dm_bm_flush_and_unlock(cmd->bm, sblock); + return dm_bm_flush(cmd->bm); } static int __begin_transaction(struct dm_cache_metadata *cmd) @@ -559,7 +595,6 @@ flags_mutator mutator) { int r; - size_t metadata_len; struct cache_disk_superblock *disk_super; struct dm_block *sblock; @@ -577,8 +612,8 @@ if (r < 0) return r; - r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); - if (r < 0) + r = __save_sm_root(cmd); + if (r) return r; r = superblock_lock(cmd, &sblock); @@ -605,13 +640,7 @@ disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits); disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses); - - r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) { - dm_bm_unlock(sblock); - return r; - } + __copy_sm_root(cmd, disk_super); return dm_tm_commit(cmd->tm, sblock); } @@ -643,10 +672,10 @@ /*----------------------------------------------------------------*/ -struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, - sector_t data_block_size, - bool may_format_device, - size_t policy_hint_size) +static struct dm_cache_metadata *metadata_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) { int r; struct dm_cache_metadata *cmd; @@ -654,9 +683,10 @@ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { DMERR("could not allocate metadata struct"); - return NULL; + return ERR_PTR(-ENOMEM); } + atomic_set(&cmd->ref_count, 1); init_rwsem(&cmd->root_lock); cmd->bdev = bdev; cmd->data_block_size = data_block_size; @@ -679,10 +709,96 @@ return cmd; } +/* + * We keep a little list of ref counted metadata objects to prevent two + * different target instances creating separate bufio instances. This is + * an issue if a table is reloaded before the suspend. + */ +static DEFINE_MUTEX(table_lock); +static LIST_HEAD(table); + +static struct dm_cache_metadata *lookup(struct block_device *bdev) +{ + struct dm_cache_metadata *cmd; + + list_for_each_entry(cmd, &table, list) + if (cmd->bdev == bdev) { + atomic_inc(&cmd->ref_count); + return cmd; + } + + return NULL; +} + +static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) +{ + struct dm_cache_metadata *cmd, *cmd2; + + mutex_lock(&table_lock); + cmd = lookup(bdev); + mutex_unlock(&table_lock); + + if (cmd) + return cmd; + + cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size); + if (!IS_ERR(cmd)) { + mutex_lock(&table_lock); + cmd2 = lookup(bdev); + if (cmd2) { + mutex_unlock(&table_lock); + __destroy_persistent_data_objects(cmd); + kfree(cmd); + return cmd2; + } + list_add(&cmd->list, &table); + mutex_unlock(&table_lock); + } + + return cmd; +} + +static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size) +{ + if (cmd->data_block_size != data_block_size) { + DMERR("data_block_size (%llu) different from that in metadata (%llu)\n", + (unsigned long long) data_block_size, + (unsigned long long) cmd->data_block_size); + return false; + } + + return true; +} + +struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, + sector_t data_block_size, + bool may_format_device, + size_t policy_hint_size) +{ + struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, + may_format_device, policy_hint_size); + + if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) { + dm_cache_metadata_close(cmd); + return ERR_PTR(-EINVAL); + } + + return cmd; +} + void dm_cache_metadata_close(struct dm_cache_metadata *cmd) { - __destroy_persistent_data_objects(cmd); - kfree(cmd); + if (atomic_dec_and_test(&cmd->ref_count)) { + mutex_lock(&table_lock); + list_del(&cmd->list); + mutex_unlock(&table_lock); + + __destroy_persistent_data_objects(cmd); + kfree(cmd); + } } /* @@ -1228,22 +1344,12 @@ return 0; } -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) +static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint) { + struct dm_cache_metadata *cmd = context; + __le32 value = cpu_to_le32(hint); int r; - down_write(&cmd->root_lock); - r = begin_hints(cmd, policy); - up_write(&cmd->root_lock); - - return r; -} - -static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, - uint32_t hint) -{ - int r; - __le32 value = cpu_to_le32(hint); __dm_bless_for_disk(&value); r = dm_array_set_value(&cmd->hint_info, cmd->hint_root, @@ -1253,16 +1359,25 @@ return r; } -int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, - uint32_t hint) +static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) { int r; - if (!hints_array_initialized(cmd)) - return 0; + r = begin_hints(cmd, policy); + if (r) { + DMERR("begin_hints failed"); + return r; + } + + return policy_walk_mappings(policy, save_hint, cmd); +} + +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) +{ + int r; down_write(&cmd->root_lock); - r = save_hint(cmd, cblock, hint); + r = write_hints(cmd, policy); up_write(&cmd->root_lock); return r; --- linux-3.13.0.orig/drivers/md/dm-cache-metadata.h +++ linux-3.13.0/drivers/md/dm-cache-metadata.h @@ -128,14 +128,7 @@ * rather than querying the policy for each cblock, we let it walk its data * structures and fill in the hints in whatever order it wishes. */ - -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); - -/* - * requests hints for every cblock and stores in the metadata device. - */ -int dm_cache_save_hint(struct dm_cache_metadata *cmd, - dm_cblock_t cblock, uint32_t hint); +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); /* * Query method. Are all the blocks in the cache clean? --- linux-3.13.0.orig/drivers/md/dm-cache-policy-mq.c +++ linux-3.13.0/drivers/md/dm-cache-policy-mq.c @@ -869,7 +869,7 @@ { struct mq_policy *mq = to_mq_policy(p); - kfree(mq->table); + vfree(mq->table); epool_exit(&mq->cache_pool); epool_exit(&mq->pre_cache_pool); kfree(mq); @@ -1224,7 +1224,7 @@ mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); mq->hash_bits = ffs(mq->nr_buckets) - 1; - mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); + mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets); if (!mq->table) goto bad_alloc_table; --- linux-3.13.0.orig/drivers/md/dm-cache-target.c +++ linux-3.13.0/drivers/md/dm-cache-target.c @@ -216,7 +216,13 @@ struct list_head need_commit_migrations; sector_t migration_threshold; wait_queue_head_t migration_wait; - atomic_t nr_migrations; + atomic_t nr_allocated_migrations; + + /* + * The number of in flight migrations that are performing + * background io. eg, promotion, writeback. + */ + atomic_t nr_io_migrations; wait_queue_head_t quiescing_wait; atomic_t quiescing; @@ -225,7 +231,7 @@ /* * cache_size entries, dirty if set */ - dm_cblock_t nr_dirty; + atomic_t nr_dirty; unsigned long *dirty_bitset; /* @@ -233,7 +239,7 @@ */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; - uint32_t discard_block_size; /* a power of 2 times sectors per block */ + uint32_t discard_block_size; /* * Rather than reconstructing the table line for the status we just @@ -253,7 +259,6 @@ struct dm_deferred_set *all_io_ds; mempool_t *migration_pool; - struct dm_cache_migration *next_migration; struct dm_cache_policy *policy; unsigned policy_nr_args; @@ -283,6 +288,7 @@ bool tick:1; unsigned req_nr:2; struct dm_deferred_entry *all_io_entry; + struct dm_hook_info hook_info; /* * writethrough fields. These MUST remain at the end of this @@ -291,7 +297,6 @@ */ struct cache *cache; dm_cblock_t cblock; - struct dm_hook_info hook_info; struct dm_bio_details bio_details; }; @@ -344,10 +349,31 @@ dm_bio_prison_free_cell(cache->prison, cell); } +static struct dm_cache_migration *alloc_migration(struct cache *cache) +{ + struct dm_cache_migration *mg; + + mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); + if (mg) { + mg->cache = cache; + atomic_inc(&mg->cache->nr_allocated_migrations); + } + + return mg; +} + +static void free_migration(struct dm_cache_migration *mg) +{ + if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations)) + wake_up(&mg->cache->migration_wait); + + mempool_free(mg, mg->cache->migration_pool); +} + static int prealloc_data_structs(struct cache *cache, struct prealloc *p) { if (!p->mg) { - p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT); + p->mg = alloc_migration(cache); if (!p->mg) return -ENOMEM; } @@ -376,7 +402,7 @@ free_prison_cell(cache, p->cell1); if (p->mg) - mempool_free(p->mg, cache->migration_pool); + free_migration(p->mg); } static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p) @@ -487,7 +513,7 @@ static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) { if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { - cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); + atomic_inc(&cache->nr_dirty); policy_set_dirty(cache->policy, oblock); } } @@ -496,8 +522,7 @@ { if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { policy_clear_dirty(cache->policy, oblock); - cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); - if (!from_cblock(cache->nr_dirty)) + if (atomic_dec_return(&cache->nr_dirty) == 0) dm_table_event(cache->ti->table); } } @@ -804,24 +829,14 @@ * Migration covers moving data from the origin device to the cache, or * vice versa. *--------------------------------------------------------------*/ -static void free_migration(struct dm_cache_migration *mg) +static void inc_io_migrations(struct cache *cache) { - mempool_free(mg, mg->cache->migration_pool); + atomic_inc(&cache->nr_io_migrations); } -static void inc_nr_migrations(struct cache *cache) +static void dec_io_migrations(struct cache *cache) { - atomic_inc(&cache->nr_migrations); -} - -static void dec_nr_migrations(struct cache *cache) -{ - atomic_dec(&cache->nr_migrations); - - /* - * Wake the worker in case we're suspending the target. - */ - wake_up(&cache->migration_wait); + atomic_dec(&cache->nr_io_migrations); } static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell, @@ -844,11 +859,10 @@ wake_worker(cache); } -static void cleanup_migration(struct dm_cache_migration *mg) +static void free_io_migration(struct dm_cache_migration *mg) { - struct cache *cache = mg->cache; + dec_io_migrations(mg->cache); free_migration(mg); - dec_nr_migrations(cache); } static void migration_failure(struct dm_cache_migration *mg) @@ -873,7 +887,7 @@ cell_defer(cache, mg->new_ocell, true); } - cleanup_migration(mg); + free_io_migration(mg); } static void migration_success_pre_commit(struct dm_cache_migration *mg) @@ -882,9 +896,9 @@ struct cache *cache = mg->cache; if (mg->writeback) { - cell_defer(cache, mg->old_ocell, false); clear_dirty(cache, mg->old_oblock, mg->cblock); - cleanup_migration(mg); + cell_defer(cache, mg->old_ocell, false); + free_io_migration(mg); return; } else if (mg->demote) { @@ -894,14 +908,14 @@ mg->old_oblock); if (mg->promote) cell_defer(cache, mg->new_ocell, true); - cleanup_migration(mg); + free_io_migration(mg); return; } } else { if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) { DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); policy_remove_mapping(cache->policy, mg->new_oblock); - cleanup_migration(mg); + free_io_migration(mg); return; } } @@ -934,18 +948,22 @@ } else { if (mg->invalidate) policy_remove_mapping(cache->policy, mg->old_oblock); - cleanup_migration(mg); + free_io_migration(mg); } } else { - if (mg->requeue_holder) + if (mg->requeue_holder) { + clear_dirty(cache, mg->new_oblock, mg->cblock); cell_defer(cache, mg->new_ocell, true); - else { + } else { + /* + * The block was promoted via an overwrite, so it's dirty. + */ + set_dirty(cache, mg->new_oblock, mg->cblock); bio_endio(mg->new_ocell->holder, 0); cell_defer(cache, mg->new_ocell, false); } - clear_dirty(cache, mg->new_oblock, mg->cblock); - cleanup_migration(mg); + free_io_migration(mg); } } @@ -970,12 +988,13 @@ int r; struct dm_io_region o_region, c_region; struct cache *cache = mg->cache; + sector_t cblock = from_cblock(mg->cblock); o_region.bdev = cache->origin_dev->bdev; o_region.count = cache->sectors_per_block; c_region.bdev = cache->cache_dev->bdev; - c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; + c_region.sector = cblock * cache->sectors_per_block; c_region.count = cache->sectors_per_block; if (mg->writeback || mg->demote) { @@ -1049,7 +1068,8 @@ avoid = is_discarded_oblock(cache, mg->new_oblock); - if (!avoid && bio_writes_complete_block(cache, bio)) { + if (writeback_mode(&cache->features) && + !avoid && bio_writes_complete_block(cache, bio)) { issue_overwrite(mg, bio); return; } @@ -1153,7 +1173,7 @@ mg->new_ocell = cell; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1176,7 +1196,7 @@ mg->new_ocell = NULL; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1202,7 +1222,7 @@ mg->new_ocell = new_ocell; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1229,7 +1249,7 @@ mg->new_ocell = NULL; mg->start_jiffies = jiffies; - inc_nr_migrations(cache); + inc_io_migrations(cache); quiesce_migration(mg); } @@ -1290,7 +1310,7 @@ static bool spare_migration_bandwidth(struct cache *cache) { - sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) * + sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * cache->sectors_per_block; return current_volume < cache->migration_threshold; } @@ -1645,7 +1665,7 @@ static void wait_for_migrations(struct cache *cache) { - wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations)); + wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations)); } static void stop_worker(struct cache *cache) @@ -1756,9 +1776,6 @@ { unsigned i; - if (cache->next_migration) - mempool_free(cache->next_migration, cache->migration_pool); - if (cache->migration_pool) mempool_destroy(cache->migration_pool); @@ -2159,35 +2176,6 @@ return 0; } -/* - * We want the discard block size to be a power of two, at least the size - * of the cache block size, and have no more than 2^14 discard blocks - * across the origin. - */ -#define MAX_DISCARD_BLOCKS (1 << 14) - -static bool too_many_discard_blocks(sector_t discard_block_size, - sector_t origin_size) -{ - (void) sector_div(origin_size, discard_block_size); - - return origin_size > MAX_DISCARD_BLOCKS; -} - -static sector_t calculate_discard_block_size(sector_t cache_block_size, - sector_t origin_size) -{ - sector_t discard_block_size; - - discard_block_size = roundup_pow_of_two(cache_block_size); - - if (origin_size) - while (too_many_discard_blocks(discard_block_size, origin_size)) - discard_block_size *= 2; - - return discard_block_size; -} - #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) @@ -2212,6 +2200,8 @@ ti->num_discard_bios = 1; ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; + /* Discard bios must be split on a block boundary */ + ti->split_discard_bios = true; cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); @@ -2293,7 +2283,8 @@ INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); - atomic_set(&cache->nr_migrations, 0); + atomic_set(&cache->nr_allocated_migrations, 0); + atomic_set(&cache->nr_io_migrations, 0); init_waitqueue_head(&cache->migration_wait); init_waitqueue_head(&cache->quiescing_wait); @@ -2301,7 +2292,7 @@ atomic_set(&cache->quiescing_ack, 0); r = -ENOMEM; - cache->nr_dirty = 0; + atomic_set(&cache->nr_dirty, 0); cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); if (!cache->dirty_bitset) { *error = "could not allocate dirty bitset"; @@ -2309,9 +2300,7 @@ } clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); - cache->discard_block_size = - calculate_discard_block_size(cache->sectors_per_block, - cache->origin_sectors); + cache->discard_block_size = cache->sectors_per_block; cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); if (!cache->discard_bitset) { @@ -2355,8 +2344,6 @@ goto bad; } - cache->next_migration = NULL; - cache->need_tick_bio = true; cache->sized = false; cache->invalidate = false; @@ -2453,20 +2440,18 @@ bool discarded_block; struct dm_bio_prison_cell *cell; struct policy_result lookup_result; - struct per_bio_data *pb; + struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); - if (from_oblock(block) > from_oblock(cache->origin_blocks)) { + if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { /* * This can only occur if the io goes to a partial block at * the end of the origin device. We don't cache these. * Just remap to the origin and carry on. */ - remap_to_origin_clear_discard(cache, bio, block); + remap_to_origin(cache, bio); return DM_MAPIO_REMAPPED; } - pb = init_per_bio_data(bio, pb_data_size); - if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; @@ -2527,6 +2512,7 @@ } else { inc_hit_counter(cache, bio); + pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && !is_dirty(cache, lookup_result.cblock)) @@ -2621,30 +2607,6 @@ return 0; } -static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, - uint32_t hint) -{ - struct cache *cache = context; - return dm_cache_save_hint(cache->cmd, cblock, hint); -} - -static int write_hints(struct cache *cache) -{ - int r; - - r = dm_cache_begin_hints(cache->cmd, cache->policy); - if (r) { - DMERR("dm_cache_begin_hints failed"); - return r; - } - - r = policy_walk_mappings(cache->policy, save_hint, cache); - if (r) - DMERR("policy_walk_mappings failed"); - - return r; -} - /* * returns true on success */ @@ -2662,7 +2624,7 @@ save_stats(cache); - r3 = write_hints(cache); + r3 = dm_cache_write_hints(cache->cmd, cache->policy); if (r3) DMERR("could not write hints"); @@ -2869,7 +2831,7 @@ residency = policy_residency(cache->policy); - DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ", + DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %lu ", (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), (unsigned long long)nr_blocks_metadata, (unsigned) atomic_read(&cache->stats.read_hit), @@ -2879,7 +2841,7 @@ (unsigned) atomic_read(&cache->stats.demotion), (unsigned) atomic_read(&cache->stats.promotion), (unsigned long long) from_cblock(residency), - cache->nr_dirty); + (unsigned long) atomic_read(&cache->nr_dirty)); if (writethrough_mode(&cache->features)) DMEMIT("1 writethrough "); @@ -3104,7 +3066,7 @@ /* * FIXME: these limits may be incompatible with the cache device */ - limits->max_discard_sectors = cache->discard_block_size * 1024; + limits->max_discard_sectors = cache->discard_block_size; limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; } --- linux-3.13.0.orig/drivers/md/dm-crypt.c +++ linux-3.13.0/drivers/md/dm-crypt.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -45,6 +44,7 @@ unsigned int idx_out; sector_t cc_sector; atomic_t cc_pending; + struct ablkcipher_request *req; }; /* @@ -113,15 +113,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; /* - * Duplicated per-CPU state for cipher. - */ -struct crypt_cpu { - struct ablkcipher_request *req; -}; - -/* - * The fields in here must be read only after initialization, - * changing state should be in crypt_cpu. + * The fields in here must be read only after initialization. */ struct crypt_config { struct dm_dev *dev; @@ -152,12 +144,6 @@ sector_t iv_offset; unsigned int iv_size; - /* - * Duplicated per cpu state. Access through - * per_cpu_ptr() only. - */ - struct crypt_cpu __percpu *cpu; - /* ESSIV: struct crypto_cipher *essiv_tfm */ void *iv_private; struct crypto_ablkcipher **tfms; @@ -194,11 +180,6 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io); static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) -{ - return this_cpu_ptr(cc->cpu); -} - /* * Use this to access cipher attributes that are the same for each CPU. */ @@ -730,7 +711,7 @@ for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) crypto_xor(data + i * 8, buf, 8); out: - memset(buf, 0, sizeof(buf)); + memzero_explicit(buf, sizeof(buf)); return r; } @@ -914,16 +895,15 @@ static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); - if (!this_cc->req) - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + if (!ctx->req) + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); - ablkcipher_request_set_callback(this_cc->req, + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); + ablkcipher_request_set_callback(ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); + kcryptd_async_done, dmreq_of_req(cc, ctx->req)); } /* @@ -932,7 +912,6 @@ static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); int r; atomic_set(&ctx->cc_pending, 1); @@ -944,7 +923,7 @@ atomic_inc(&ctx->cc_pending); - r = crypt_convert_block(cc, ctx, this_cc->req); + r = crypt_convert_block(cc, ctx, ctx->req); switch (r) { /* async */ @@ -953,7 +932,7 @@ reinit_completion(&ctx->restart); /* fall through*/ case -EINPROGRESS: - this_cc->req = NULL; + ctx->req = NULL; ctx->cc_sector++; continue; @@ -1052,6 +1031,7 @@ io->sector = sector; io->error = 0; io->base_io = NULL; + io->ctx.req = NULL; atomic_set(&io->io_pending, 0); return io; @@ -1077,6 +1057,8 @@ if (!atomic_dec_and_test(&io->io_pending)) return; + if (io->ctx.req) + mempool_free(io->ctx.req, cc->req_pool); mempool_free(io, cc->io_pool); if (likely(!base_io)) @@ -1505,8 +1487,6 @@ static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = ti->private; - struct crypt_cpu *cpu_cc; - int cpu; ti->private = NULL; @@ -1518,13 +1498,6 @@ if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue); - if (cc->cpu) - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - if (cpu_cc->req) - mempool_free(cpu_cc->req, cc->req_pool); - } - crypt_free_tfms(cc); if (cc->bs) @@ -1543,9 +1516,6 @@ if (cc->dev) dm_put_device(ti, cc->dev); - if (cc->cpu) - free_percpu(cc->cpu); - kzfree(cc->cipher); kzfree(cc->cipher_string); @@ -1601,13 +1571,6 @@ if (tmp) DMWARN("Ignoring unexpected additional cipher options"); - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), - __alignof__(struct crypt_cpu)); - if (!cc->cpu) { - ti->error = "Cannot allocate per cpu state"; - goto bad_mem; - } - /* * For compatibility with the original dm-crypt mapping format, if * only the cipher name is supplied, use cbc-plain. @@ -2003,6 +1966,12 @@ return fn(ti, cc->dev, cc->start, ti->len, data); } +static void crypt_io_hints(struct dm_target *ti, + struct queue_limits *limits) +{ + limits->max_write_same_sectors = 0; +} + static struct target_type crypt_target = { .name = "crypt", .version = {1, 13, 0}, @@ -2017,6 +1986,7 @@ .message = crypt_message, .merge = crypt_merge, .iterate_devices = crypt_iterate_devices, + .io_hints = crypt_io_hints, }; static int __init dm_crypt_init(void) --- linux-3.13.0.orig/drivers/md/dm-io.c +++ linux-3.13.0/drivers/md/dm-io.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -32,7 +33,7 @@ struct io { unsigned long error_bits; atomic_t count; - struct task_struct *sleeper; + struct completion *wait; struct dm_io_client *client; io_notify_fn callback; void *context; @@ -121,8 +122,8 @@ invalidate_kernel_vmap_range(io->vma_invalidate_address, io->vma_invalidate_size); - if (io->sleeper) - wake_up_process(io->sleeper); + if (io->wait) + complete(io->wait); else { unsigned long r = io->error_bits; @@ -288,6 +289,19 @@ struct request_queue *q = bdev_get_queue(where->bdev); unsigned short logical_block_size = queue_logical_block_size(q); sector_t num_sectors; + unsigned int uninitialized_var(special_cmd_max_sectors); + + /* + * Reject unsupported discard and write same requests. + */ + if (rw & REQ_DISCARD) + special_cmd_max_sectors = q->limits.max_discard_sectors; + else if (rw & REQ_WRITE_SAME) + special_cmd_max_sectors = q->limits.max_write_same_sectors; + if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { + dec_count(io, region, -EOPNOTSUPP); + return; + } /* * where->count may be zero if rw holds a flush and we need to @@ -310,7 +324,7 @@ store_io_and_region_in_bio(bio, io, region); if (rw & REQ_DISCARD) { - num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); + num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); bio->bi_size = num_sectors << SECTOR_SHIFT; remaining -= num_sectors; } else if (rw & REQ_WRITE_SAME) { @@ -319,7 +333,7 @@ */ dp->get_page(dp, &page, &len, &offset); bio_add_page(bio, page, logical_block_size, offset); - num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); + num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining); bio->bi_size = num_sectors << SECTOR_SHIFT; offset = 0; @@ -385,6 +399,7 @@ */ volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); + DECLARE_COMPLETION_ONSTACK(wait); if (num_regions > 1 && (rw & RW_MASK) != WRITE) { WARN_ON(1); @@ -393,7 +408,7 @@ io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = current; + io->wait = &wait; io->client = client; io->vma_invalidate_address = dp->vma_invalidate_address; @@ -401,15 +416,7 @@ dispatch_io(rw, num_regions, where, dp, io, 1); - while (1) { - set_current_state(TASK_UNINTERRUPTIBLE); - - if (!atomic_read(&io->count)) - break; - - io_schedule(); - } - set_current_state(TASK_RUNNING); + wait_for_completion_io(&wait); if (error_bits) *error_bits = io->error_bits; @@ -432,7 +439,7 @@ io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ - io->sleeper = NULL; + io->wait = NULL; io->client = client; io->callback = fn; io->context = context; --- linux-3.13.0.orig/drivers/md/dm-log-userspace-transfer.c +++ linux-3.13.0/drivers/md/dm-log-userspace-transfer.c @@ -272,7 +272,7 @@ r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback); if (r) { - cn_del_callback(&ulog_cn_id); + kfree(prealloced_cn_msg); return r; } --- linux-3.13.0.orig/drivers/md/dm-mpath.c +++ linux-3.13.0/drivers/md/dm-mpath.c @@ -1626,8 +1626,11 @@ /* * Only pass ioctls through if the device sizes match exactly. */ - if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) - r = scsi_verify_blk_ioctl(NULL, cmd); + if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) { + int err = scsi_verify_blk_ioctl(NULL, cmd); + if (err) + r = err; + } if (r == -ENOTCONN && !fatal_signal_pending(current)) queue_work(kmultipathd, &m->process_queued_ios); --- linux-3.13.0.orig/drivers/md/dm-raid.c +++ linux-3.13.0/drivers/md/dm-raid.c @@ -785,8 +785,7 @@ __le32 layout; __le32 stripe_sectors; - __u8 pad[452]; /* Round struct to 512 bytes. */ - /* Always set to 0 when writing. */ + /* Remainder of a logical block is zero-filled when writing (see super_sync()). */ } __packed; static int read_disk_sb(struct md_rdev *rdev, int size) @@ -823,7 +822,7 @@ test_bit(Faulty, &(rs->dev[i].rdev.flags))) failed_devices |= (1ULL << i); - memset(sb, 0, sizeof(*sb)); + memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); sb->magic = cpu_to_le32(DM_RAID_MAGIC); sb->features = cpu_to_le32(0); /* No features yet */ @@ -858,7 +857,11 @@ uint64_t events_sb, events_refsb; rdev->sb_start = 0; - rdev->sb_size = sizeof(*sb); + rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); + if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { + DMERR("superblock size of a logical block is no longer valid"); + return -EINVAL; + } ret = read_disk_sb(rdev, rdev->sb_size); if (ret) --- linux-3.13.0.orig/drivers/md/dm-raid1.c +++ linux-3.13.0/drivers/md/dm-raid1.c @@ -604,6 +604,15 @@ return; } + /* + * If the bio is discard, return an error, but do not + * degrade the array. + */ + if (bio->bi_rw & REQ_DISCARD) { + bio_endio(bio, -EOPNOTSUPP); + return; + } + for (i = 0; i < ms->nr_mirrors; i++) if (test_bit(i, &error)) fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); --- linux-3.13.0.orig/drivers/md/dm-snap.c +++ linux-3.13.0/drivers/md/dm-snap.c @@ -1439,8 +1439,6 @@ full_bio->bi_end_io = pe->full_bio_end_io; full_bio->bi_private = pe->full_bio_private; } - free_pending_exception(pe); - increment_pending_exceptions_done_count(); up_write(&s->lock); @@ -1457,6 +1455,8 @@ } retry_origin_bios(s, origin_bios); + + free_pending_exception(pe); } static void commit_callback(void *context, int success) --- linux-3.13.0.orig/drivers/md/dm-sysfs.c +++ linux-3.13.0/drivers/md/dm-sysfs.c @@ -86,6 +86,7 @@ static struct kobj_type dm_ktype = { .sysfs_ops = &dm_sysfs_ops, .default_attrs = dm_attrs, + .release = dm_kobject_release, }; /* @@ -104,5 +105,7 @@ */ void dm_sysfs_exit(struct mapped_device *md) { - kobject_put(dm_kobject(md)); + struct kobject *kobj = dm_kobject(md); + kobject_put(kobj); + wait_for_completion(dm_get_completion_from_kobject(kobj)); } --- linux-3.13.0.orig/drivers/md/dm-thin-metadata.c +++ linux-3.13.0/drivers/md/dm-thin-metadata.c @@ -192,6 +192,13 @@ * operation possible in this state is the closing of the device. */ bool fail_io:1; + + /* + * Reading the space map roots can fail, so we read it into these + * buffers before the superblock is locked and updated. + */ + __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE]; + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; }; struct dm_thin_device { @@ -431,26 +438,53 @@ pmd->details_info.value_type.equal = NULL; } +static int save_sm_roots(struct dm_pool_metadata *pmd) +{ + int r; + size_t len; + + r = dm_sm_root_size(pmd->metadata_sm, &len); + if (r < 0) + return r; + + r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len); + if (r < 0) + return r; + + r = dm_sm_root_size(pmd->data_sm, &len); + if (r < 0) + return r; + + return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len); +} + +static void copy_sm_roots(struct dm_pool_metadata *pmd, + struct thin_disk_superblock *disk) +{ + memcpy(&disk->metadata_space_map_root, + &pmd->metadata_space_map_root, + sizeof(pmd->metadata_space_map_root)); + + memcpy(&disk->data_space_map_root, + &pmd->data_space_map_root, + sizeof(pmd->data_space_map_root)); +} + static int __write_initial_superblock(struct dm_pool_metadata *pmd) { int r; struct dm_block *sblock; - size_t metadata_len, data_len; struct thin_disk_superblock *disk_super; sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; if (bdev_size > THIN_METADATA_MAX_SECTORS) bdev_size = THIN_METADATA_MAX_SECTORS; - r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); - if (r < 0) - return r; - - r = dm_sm_root_size(pmd->data_sm, &data_len); + r = dm_sm_commit(pmd->data_sm); if (r < 0) return r; - r = dm_sm_commit(pmd->data_sm); + r = save_sm_roots(pmd); if (r < 0) return r; @@ -471,15 +505,7 @@ disk_super->trans_id = 0; disk_super->held_root = 0; - r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto bad_locked; - - r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, - data_len); - if (r < 0) - goto bad_locked; + copy_sm_roots(pmd, disk_super); disk_super->data_mapping_root = cpu_to_le64(pmd->root); disk_super->device_details_root = cpu_to_le64(pmd->details_root); @@ -488,10 +514,6 @@ disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); return dm_tm_commit(pmd->tm, sblock); - -bad_locked: - dm_bm_unlock(sblock); - return r; } static int __format_metadata(struct dm_pool_metadata *pmd) @@ -591,6 +613,15 @@ disk_super = dm_block_data(sblock); + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk_super->data_block_size), + (unsigned long long)pmd->data_block_size); + r = -EINVAL; + goto bad_unlock_sblock; + } + r = __check_incompat_features(disk_super, pmd); if (r < 0) goto bad_unlock_sblock; @@ -769,6 +800,10 @@ if (r < 0) return r; + r = save_sm_roots(pmd); + if (r < 0) + return r; + r = superblock_lock(pmd, &sblock); if (r) return r; @@ -780,21 +815,9 @@ disk_super->trans_id = cpu_to_le64(pmd->trans_id); disk_super->flags = cpu_to_le32(pmd->flags); - r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto out_locked; - - r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, - data_len); - if (r < 0) - goto out_locked; + copy_sm_roots(pmd, disk_super); return dm_tm_commit(pmd->tm, sblock); - -out_locked: - dm_bm_unlock(sblock); - return r; } struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, @@ -1349,6 +1372,12 @@ return td->id; } +/* + * Check whether @time (of block creation) is older than @td's last snapshot. + * If so then the associated block is shared with the last snapshot device. + * Any block on a device created *after* the device last got snapshotted is + * necessarily not shared. + */ static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time) { return td->snapshotted_time > time; @@ -1458,6 +1487,20 @@ return r; } +int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) +{ + int r; + uint32_t ref_count; + + down_read(&pmd->root_lock); + r = dm_sm_get_count(pmd->data_sm, b, &ref_count); + if (!r) + *result = (ref_count != 0); + up_read(&pmd->root_lock); + + return r; +} + bool dm_thin_changed_this_transaction(struct dm_thin_device *td) { int r; @@ -1468,6 +1511,23 @@ return r; } + +bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd) +{ + bool r = false; + struct dm_thin_device *td, *tmp; + + down_read(&pmd->root_lock); + list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) { + if (td->changed) { + r = td->changed; + break; + } + } + up_read(&pmd->root_lock); + + return r; +} bool dm_thin_aborted_changes(struct dm_thin_device *td) { --- linux-3.13.0.orig/drivers/md/dm-thin-metadata.h +++ linux-3.13.0/drivers/md/dm-thin-metadata.h @@ -161,6 +161,8 @@ */ bool dm_thin_changed_this_transaction(struct dm_thin_device *td); +bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd); + bool dm_thin_aborted_changes(struct dm_thin_device *td); int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, @@ -181,6 +183,8 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); +int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); + /* * Returns -ENOSPC if the new size is too small and already allocated * blocks would be lost. --- linux-3.13.0.orig/drivers/md/dm-thin.c +++ linux-3.13.0/drivers/md/dm-thin.c @@ -512,6 +512,7 @@ unsigned quiesced:1; unsigned prepared:1; unsigned pass_discard:1; + unsigned definitely_not_shared:1; struct thin_c *tc; dm_block_t virt_block; @@ -683,7 +684,15 @@ cell_defer_no_holder(tc, m->cell2); if (m->pass_discard) - remap_and_issue(tc, m->bio, m->data_block); + if (m->definitely_not_shared) + remap_and_issue(tc, m->bio, m->data_block); + else { + bool used = false; + if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used) + bio_endio(m->bio, 0); + else + remap_and_issue(tc, m->bio, m->data_block); + } else bio_endio(m->bio, 0); @@ -751,13 +760,17 @@ static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) { - struct dm_thin_new_mapping *r = pool->next_mapping; + struct dm_thin_new_mapping *m = pool->next_mapping; BUG_ON(!pool->next_mapping); + memset(m, 0, sizeof(struct dm_thin_new_mapping)); + INIT_LIST_HEAD(&m->list); + m->bio = NULL; + pool->next_mapping = NULL; - return r; + return m; } static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, @@ -769,15 +782,10 @@ struct pool *pool = tc->pool; struct dm_thin_new_mapping *m = get_next_mapping(pool); - INIT_LIST_HEAD(&m->list); - m->quiesced = 0; - m->prepared = 0; m->tc = tc; m->virt_block = virt_block; m->data_block = data_dest; m->cell = cell; - m->err = 0; - m->bio = NULL; if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) m->quiesced = 1; @@ -840,15 +848,12 @@ struct pool *pool = tc->pool; struct dm_thin_new_mapping *m = get_next_mapping(pool); - INIT_LIST_HEAD(&m->list); m->quiesced = 1; m->prepared = 0; m->tc = tc; m->virt_block = virt_block; m->data_block = data_block; m->cell = cell; - m->err = 0; - m->bio = NULL; /* * If the whole block of data is being overwritten or we are not @@ -1040,12 +1045,12 @@ */ m = get_next_mapping(pool); m->tc = tc; - m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown; + m->pass_discard = pool->pf.discard_passdown; + m->definitely_not_shared = !lookup_result.shared; m->virt_block = block; m->data_block = lookup_result.block; m->cell = cell; m->cell2 = cell2; - m->err = 0; m->bio = bio; if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { @@ -1327,9 +1332,9 @@ */ if (ensure_next_mapping(pool)) { spin_lock_irqsave(&pool->lock, flags); + bio_list_add(&pool->deferred_bios, bio); bio_list_merge(&pool->deferred_bios, &bios); spin_unlock_irqrestore(&pool->lock, flags); - break; } @@ -1349,7 +1354,8 @@ bio_list_init(&pool->deferred_flush_bios); spin_unlock_irqrestore(&pool->lock, flags); - if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) + if (bio_list_empty(&bios) && + !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) return; if (commit(pool)) { @@ -1390,16 +1396,16 @@ return pool->pf.mode; } -static void set_pool_mode(struct pool *pool, enum pool_mode mode) +static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) { int r; + enum pool_mode old_mode = pool->pf.mode; - pool->pf.mode = mode; - - switch (mode) { + switch (new_mode) { case PM_FAIL: - DMERR("%s: switching pool to failure mode", - dm_device_name(pool->pool_md)); + if (old_mode != new_mode) + DMERR("%s: switching pool to failure mode", + dm_device_name(pool->pool_md)); dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_fail; pool->process_discard = process_bio_fail; @@ -1408,13 +1414,15 @@ break; case PM_READ_ONLY: - DMERR("%s: switching pool to read-only mode", - dm_device_name(pool->pool_md)); + if (old_mode != new_mode) + DMERR("%s: switching pool to read-only mode", + dm_device_name(pool->pool_md)); r = dm_pool_abort_metadata(pool->pmd); if (r) { DMERR("%s: aborting transaction failed", dm_device_name(pool->pool_md)); - set_pool_mode(pool, PM_FAIL); + new_mode = PM_FAIL; + set_pool_mode(pool, new_mode); } else { dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_read_only; @@ -1425,6 +1433,9 @@ break; case PM_WRITE: + if (old_mode != new_mode) + DMINFO("%s: switching pool to write mode", + dm_device_name(pool->pool_md)); dm_pool_metadata_read_write(pool->pmd); pool->process_bio = process_bio; pool->process_discard = process_discard; @@ -1432,6 +1443,8 @@ pool->process_prepared_discard = process_prepared_discard; break; } + + pool->pf.mode = new_mode; } /*----------------------------------------------------------------*/ @@ -1491,6 +1504,14 @@ return DM_MAPIO_SUBMITTED; } + /* + * We must hold the virtual cell before doing the lookup, otherwise + * there's a race with discard. + */ + build_virtual_key(tc->td, block, &key); + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) + return DM_MAPIO_SUBMITTED; + r = dm_thin_find_block(td, block, 0, &result); /* @@ -1514,13 +1535,10 @@ * shared flag will be set in their case. */ thin_defer_bio(tc, bio); + cell_defer_no_holder_no_free(tc, &cell1); return DM_MAPIO_SUBMITTED; } - build_virtual_key(tc->td, block, &key); - if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) - return DM_MAPIO_SUBMITTED; - build_data_key(tc->td, result.block, &key); if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { cell_defer_no_holder_no_free(tc, &cell1); @@ -1535,22 +1553,13 @@ return DM_MAPIO_REMAPPED; case -ENODATA: - if (get_pool_mode(tc->pool) == PM_READ_ONLY) { - /* - * This block isn't provisioned, and we have no way - * of doing so. Just error it. - */ - bio_io_error(bio); - return DM_MAPIO_SUBMITTED; - } - /* fall through */ - case -EWOULDBLOCK: /* * In future, the failed dm_thin_find_block above could * provide the hint to load the metadata into cache. */ thin_defer_bio(tc, bio); + cell_defer_no_holder_no_free(tc, &cell1); return DM_MAPIO_SUBMITTED; default: @@ -1560,6 +1569,7 @@ * pool is switched to fail-io mode. */ bio_io_error(bio); + cell_defer_no_holder_no_free(tc, &cell1); return DM_MAPIO_SUBMITTED; } } @@ -1648,6 +1658,17 @@ enum pool_mode new_mode = pt->adjusted_pf.mode; /* + * Don't change the pool's mode until set_pool_mode() below. + * Otherwise the pool's process_* function pointers may + * not match the desired pool mode. + */ + pt->adjusted_pf.mode = old_mode; + + pool->ti = ti; + pool->pf = pt->adjusted_pf; + pool->low_water_blocks = pt->low_water_blocks; + + /* * If we were in PM_FAIL mode, rollback of metadata failed. We're * not going to recover without a thin_repair. So we never let the * pool move out of the old mode. On the other hand a PM_READ_ONLY @@ -1657,10 +1678,6 @@ if (old_mode == PM_FAIL) new_mode = old_mode; - pool->ti = ti; - pool->low_water_blocks = pt->low_water_blocks; - pool->pf = pt->adjusted_pf; - set_pool_mode(pool, new_mode); return 0; @@ -2479,6 +2496,12 @@ struct pool_c *pt = ti->private; struct pool *pool = pt->pool; + if (get_pool_mode(pool) >= PM_READ_ONLY) { + DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", + dm_device_name(pool->pool_md)); + return -EINVAL; + } + if (!strcasecmp(argv[0], "create_thin")) r = process_create_thin_mesg(argc, argv, pool); @@ -2675,7 +2698,8 @@ */ if (pt->adjusted_pf.discard_passdown) { data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; - limits->discard_granularity = data_limits->discard_granularity; + limits->discard_granularity = max(data_limits->discard_granularity, + pool->sectors_per_block << SECTOR_SHIFT); } else limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; } @@ -2828,6 +2852,7 @@ if (get_pool_mode(tc->pool) == PM_FAIL) { ti->error = "Couldn't open thin device, Pool is in fail mode"; + r = -EINVAL; goto bad_thin_open; } @@ -2839,7 +2864,7 @@ r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); if (r) - goto bad_thin_open; + goto bad_target_max_io_len; ti->num_flush_bios = 1; ti->flush_supported = true; @@ -2860,6 +2885,8 @@ return 0; +bad_target_max_io_len: + dm_pool_close_thin_device(tc->td); bad_thin_open: __pool_dec(tc->pool); bad_pool_lookup: --- linux-3.13.0.orig/drivers/md/dm.c +++ linux-3.13.0/drivers/md/dm.c @@ -54,6 +54,8 @@ static DECLARE_WORK(deferred_remove_work, do_deferred_remove); +static struct workqueue_struct *deferred_remove_workqueue; + /* * For bio-based dm. * One of these is allocated per bio. @@ -200,8 +202,8 @@ /* forced geometry settings */ struct hd_geometry geometry; - /* sysfs handle */ - struct kobject kobj; + /* kobject and completion */ + struct dm_kobject_holder kobj_holder; /* zero-length flush that will be cloned and submitted to targets */ struct bio flush_bio; @@ -283,16 +285,24 @@ if (r) goto out_free_rq_tio_cache; + deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); + if (!deferred_remove_workqueue) { + r = -ENOMEM; + goto out_uevent_exit; + } + _major = major; r = register_blkdev(_major, _name); if (r < 0) - goto out_uevent_exit; + goto out_free_workqueue; if (!_major) _major = r; return 0; +out_free_workqueue: + destroy_workqueue(deferred_remove_workqueue); out_uevent_exit: dm_uevent_exit(); out_free_rq_tio_cache: @@ -306,6 +316,7 @@ static void local_exit(void) { flush_scheduled_work(); + destroy_workqueue(deferred_remove_workqueue); kmem_cache_destroy(_rq_tio_cache); kmem_cache_destroy(_io_cache); @@ -414,7 +425,7 @@ if (atomic_dec_and_test(&md->open_count) && (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) - schedule_work(&deferred_remove_work); + queue_work(deferred_remove_workqueue, &deferred_remove_work); dm_put(md); @@ -2041,6 +2052,7 @@ init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); init_waitqueue_head(&md->eventq); + init_completion(&md->kobj_holder.completion); md->disk->major = _major; md->disk->first_minor = minor; @@ -2409,7 +2421,7 @@ return 0; } -static struct mapped_device *dm_find_md(dev_t dev) +struct mapped_device *dm_get_md(dev_t dev) { struct mapped_device *md; unsigned minor = MINOR(dev); @@ -2420,12 +2432,15 @@ spin_lock(&_minor_lock); md = idr_find(&_minor_idr, minor); - if (md && (md == MINOR_ALLOCED || - (MINOR(disk_devt(dm_disk(md))) != minor) || - dm_deleting_md(md) || - test_bit(DMF_FREEING, &md->flags))) { - md = NULL; - goto out; + if (md) { + if ((md == MINOR_ALLOCED || + (MINOR(disk_devt(dm_disk(md))) != minor) || + dm_deleting_md(md) || + test_bit(DMF_FREEING, &md->flags))) { + md = NULL; + goto out; + } + dm_get(md); } out: @@ -2433,16 +2448,6 @@ return md; } - -struct mapped_device *dm_get_md(dev_t dev) -{ - struct mapped_device *md = dm_find_md(dev); - - if (md) - dm_get(md); - - return md; -} EXPORT_SYMBOL_GPL(dm_get_md); void *dm_get_mdptr(struct mapped_device *md) @@ -2480,10 +2485,16 @@ set_bit(DMF_FREEING, &md->flags); spin_unlock(&_minor_lock); + /* + * Take suspend_lock so that presuspend and postsuspend methods + * do not race with internal suspend. + */ + mutex_lock(&md->suspend_lock); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } + mutex_unlock(&md->suspend_lock); /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); @@ -2899,23 +2910,18 @@ { return md->disk; } +EXPORT_SYMBOL_GPL(dm_disk); struct kobject *dm_kobject(struct mapped_device *md) { - return &md->kobj; + return &md->kobj_holder.kobj; } -/* - * struct mapped_device should not be exported outside of dm.c - * so use this check to verify that kobj is part of md structure - */ struct mapped_device *dm_get_from_kobject(struct kobject *kobj) { struct mapped_device *md; - md = container_of(kobj, struct mapped_device, kobj); - if (&md->kobj != kobj) - return NULL; + md = container_of(kobj, struct mapped_device, kobj_holder.kobj); if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) --- linux-3.13.0.orig/drivers/md/dm.h +++ linux-3.13.0/drivers/md/dm.h @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include "dm-stats.h" @@ -148,12 +150,27 @@ /* * sysfs interface */ +struct dm_kobject_holder { + struct kobject kobj; + struct completion completion; +}; + +static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) +{ + return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; +} + int dm_sysfs_init(struct mapped_device *md); void dm_sysfs_exit(struct mapped_device *md); struct kobject *dm_kobject(struct mapped_device *md); struct mapped_device *dm_get_from_kobject(struct kobject *kobj); /* + * The kobject helper + */ +void dm_kobject_release(struct kobject *kobj); + +/* * Targets for linear and striped mappings */ int dm_linear_init(void); --- linux-3.13.0.orig/drivers/md/md.c +++ linux-3.13.0/drivers/md/md.c @@ -5321,6 +5321,7 @@ printk("md: %s still in use.\n",mdname(mddev)); if (did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); } err = -EBUSY; @@ -5335,6 +5336,8 @@ mddev->ro = 1; set_disk_ro(mddev->gendisk, 1); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); sysfs_notify_dirent_safe(mddev->sysfs_state); err = 0; } @@ -5378,6 +5381,7 @@ mutex_unlock(&mddev->open_mutex); if (did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); } return -EBUSY; @@ -7358,8 +7362,10 @@ /* just incase thread restarts... */ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) return; - if (mddev->ro) /* never try to sync a read-only array */ + if (mddev->ro) {/* never try to sync a read-only array */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); return; + } if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { @@ -7466,6 +7472,19 @@ rdev->recovery_offset < j) j = rdev->recovery_offset; rcu_read_unlock(); + + /* If there is a bitmap, we need to make sure all + * writes that started before we added a spare + * complete before we start doing a recovery. + * Otherwise the write might complete and (via + * bitmap_endwrite) set a bit in the bitmap after the + * recovery has checked that bit and skipped that + * region. + */ + if (mddev->bitmap) { + mddev->pers->quiesce(mddev, 1); + mddev->pers->quiesce(mddev, 0); + } } printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); @@ -7800,6 +7819,7 @@ /* There is no thread, but we need to call * ->spare_active and clear saved_raid_disk */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); goto unlock; @@ -8496,7 +8516,8 @@ if (mddev_trylock(mddev)) { if (mddev->pers) __md_stop_writes(mddev); - mddev->safemode = 2; + if (mddev->persistent) + mddev->safemode = 2; mddev_unlock(mddev); } need_delay = 1; --- linux-3.13.0.orig/drivers/md/persistent-data/dm-block-manager.c +++ linux-3.13.0/drivers/md/persistent-data/dm-block-manager.c @@ -595,25 +595,14 @@ } EXPORT_SYMBOL_GPL(dm_bm_unlock); -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, - struct dm_block *superblock) +int dm_bm_flush(struct dm_block_manager *bm) { - int r; - if (bm->read_only) return -EPERM; - r = dm_bufio_write_dirty_buffers(bm->bufio); - if (unlikely(r)) { - dm_bm_unlock(superblock); - return r; - } - - dm_bm_unlock(superblock); - return dm_bufio_write_dirty_buffers(bm->bufio); } -EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock); +EXPORT_SYMBOL_GPL(dm_bm_flush); void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) { --- linux-3.13.0.orig/drivers/md/persistent-data/dm-block-manager.h +++ linux-3.13.0/drivers/md/persistent-data/dm-block-manager.h @@ -105,8 +105,7 @@ * * This method always blocks. */ -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, - struct dm_block *superblock); +int dm_bm_flush(struct dm_block_manager *bm); /* * Request data is prefetched into the cache. --- linux-3.13.0.orig/drivers/md/persistent-data/dm-btree-internal.h +++ linux-3.13.0/drivers/md/persistent-data/dm-btree-internal.h @@ -42,6 +42,12 @@ } __packed; +/* + * Locks a block using the btree node validator. + */ +int bn_read_lock(struct dm_btree_info *info, dm_block_t b, + struct dm_block **result); + void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, struct dm_btree_value_type *vt); --- linux-3.13.0.orig/drivers/md/persistent-data/dm-btree-spine.c +++ linux-3.13.0/drivers/md/persistent-data/dm-btree-spine.c @@ -92,7 +92,7 @@ /*----------------------------------------------------------------*/ -static int bn_read_lock(struct dm_btree_info *info, dm_block_t b, +int bn_read_lock(struct dm_btree_info *info, dm_block_t b, struct dm_block **result) { return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); --- linux-3.13.0.orig/drivers/md/persistent-data/dm-btree.c +++ linux-3.13.0/drivers/md/persistent-data/dm-btree.c @@ -828,22 +828,26 @@ * FIXME: We shouldn't use a recursive algorithm when we have limited stack * space. Also this only works for single level trees. */ -static int walk_node(struct ro_spine *s, dm_block_t block, +static int walk_node(struct dm_btree_info *info, dm_block_t block, int (*fn)(void *context, uint64_t *keys, void *leaf), void *context) { int r; unsigned i, nr; + struct dm_block *node; struct btree_node *n; uint64_t keys; - r = ro_step(s, block); - n = ro_node(s); + r = bn_read_lock(info, block, &node); + if (r) + return r; + + n = dm_block_data(node); nr = le32_to_cpu(n->header.nr_entries); for (i = 0; i < nr; i++) { if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { - r = walk_node(s, value64(n, i), fn, context); + r = walk_node(info, value64(n, i), fn, context); if (r) goto out; } else { @@ -855,7 +859,7 @@ } out: - ro_pop(s); + dm_tm_unlock(info->tm, node); return r; } @@ -863,15 +867,7 @@ int (*fn)(void *context, uint64_t *keys, void *leaf), void *context) { - int r; - struct ro_spine spine; - BUG_ON(info->levels > 1); - - init_ro_spine(&spine, info); - r = walk_node(&spine, root, fn, context); - exit_ro_spine(&spine); - - return r; + return walk_node(info, root, fn, context); } EXPORT_SYMBOL_GPL(dm_btree_walk); --- linux-3.13.0.orig/drivers/md/persistent-data/dm-space-map-common.c +++ linux-3.13.0/drivers/md/persistent-data/dm-space-map-common.c @@ -245,6 +245,10 @@ return -EINVAL; } + /* + * We need to set this before the dm_tm_new_block() call below. + */ + ll->nr_blocks = nr_blocks; for (i = old_blocks; i < blocks; i++) { struct dm_block *b; struct disk_index_entry idx; @@ -252,6 +256,7 @@ r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b); if (r < 0) return r; + idx.blocknr = cpu_to_le64(dm_block_location(b)); r = dm_tm_unlock(ll->tm, b); @@ -266,7 +271,6 @@ return r; } - ll->nr_blocks = nr_blocks; return 0; } --- linux-3.13.0.orig/drivers/md/persistent-data/dm-space-map-metadata.c +++ linux-3.13.0/drivers/md/persistent-data/dm-space-map-metadata.c @@ -91,6 +91,69 @@ dm_block_t block; }; +struct bop_ring_buffer { + unsigned begin; + unsigned end; + struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1]; +}; + +static void brb_init(struct bop_ring_buffer *brb) +{ + brb->begin = 0; + brb->end = 0; +} + +static bool brb_empty(struct bop_ring_buffer *brb) +{ + return brb->begin == brb->end; +} + +static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old) +{ + unsigned r = old + 1; + return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r; +} + +static int brb_push(struct bop_ring_buffer *brb, + enum block_op_type type, dm_block_t b) +{ + struct block_op *bop; + unsigned next = brb_next(brb, brb->end); + + /* + * We don't allow the last bop to be filled, this way we can + * differentiate between full and empty. + */ + if (next == brb->begin) + return -ENOMEM; + + bop = brb->bops + brb->end; + bop->type = type; + bop->block = b; + + brb->end = next; + + return 0; +} + +static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) +{ + struct block_op *bop; + + if (brb_empty(brb)) + return -ENODATA; + + bop = brb->bops + brb->begin; + result->type = bop->type; + result->block = bop->block; + + brb->begin = brb_next(brb, brb->begin); + + return 0; +} + +/*----------------------------------------------------------------*/ + struct sm_metadata { struct dm_space_map sm; @@ -101,25 +164,20 @@ unsigned recursion_count; unsigned allocated_this_transaction; - unsigned nr_uncommitted; - struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; + struct bop_ring_buffer uncommitted; struct threshold threshold; }; static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) { - struct block_op *op; + int r = brb_push(&smm->uncommitted, type, b); - if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) { + if (r) { DMERR("too many recursive allocations"); return -ENOMEM; } - op = smm->uncommitted + smm->nr_uncommitted++; - op->type = type; - op->block = b; - return 0; } @@ -158,11 +216,17 @@ return -ENOMEM; } - if (smm->recursion_count == 1 && smm->nr_uncommitted) { - while (smm->nr_uncommitted && !r) { - smm->nr_uncommitted--; - r = commit_bop(smm, smm->uncommitted + - smm->nr_uncommitted); + if (smm->recursion_count == 1) { + while (!brb_empty(&smm->uncommitted)) { + struct block_op bop; + + r = brb_pop(&smm->uncommitted, &bop); + if (r) { + DMERR("bug in bop ring buffer"); + break; + } + + r = commit_bop(smm, &bop); if (r) break; } @@ -217,7 +281,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, uint32_t *result) { - int r, i; + int r; + unsigned i; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); unsigned adjustment = 0; @@ -225,8 +290,10 @@ * We may have some uncommitted adjustments to add. This list * should always be really short. */ - for (i = 0; i < smm->nr_uncommitted; i++) { - struct block_op *op = smm->uncommitted + i; + for (i = smm->uncommitted.begin; + i != smm->uncommitted.end; + i = brb_next(&smm->uncommitted, i)) { + struct block_op *op = smm->uncommitted.bops + i; if (op->block != b) continue; @@ -254,7 +321,8 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b, int *result) { - int r, i, adjustment = 0; + int r, adjustment = 0; + unsigned i; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); uint32_t rc; @@ -262,8 +330,11 @@ * We may have some uncommitted adjustments to add. This list * should always be really short. */ - for (i = 0; i < smm->nr_uncommitted; i++) { - struct block_op *op = smm->uncommitted + i; + for (i = smm->uncommitted.begin; + i != smm->uncommitted.end; + i = brb_next(&smm->uncommitted, i)) { + + struct block_op *op = smm->uncommitted.bops + i; if (op->block != b) continue; @@ -608,20 +679,38 @@ * Flick into a mode where all blocks get allocated in the new area. */ smm->begin = old_len; - memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); + memcpy(sm, &bootstrap_ops, sizeof(*sm)); /* * Extend. */ r = sm_ll_extend(&smm->ll, extra_blocks); + if (r) + goto out; /* - * Switch back to normal behaviour. + * We repeatedly increment then commit until the commit doesn't + * allocate any new blocks. */ - memcpy(&smm->sm, &ops, sizeof(smm->sm)); - for (i = old_len; !r && i < smm->begin; i++) - r = sm_ll_inc(&smm->ll, i, &ev); + do { + for (i = old_len; !r && i < smm->begin; i++) { + r = sm_ll_inc(&smm->ll, i, &ev); + if (r) + goto out; + } + old_len = smm->begin; + + r = sm_ll_commit(&smm->ll); + if (r) + goto out; + + } while (old_len != smm->begin); +out: + /* + * Switch back to normal behaviour. + */ + memcpy(sm, &ops, sizeof(*sm)); return r; } @@ -653,7 +742,7 @@ smm->begin = superblock + 1; smm->recursion_count = 0; smm->allocated_this_transaction = 0; - smm->nr_uncommitted = 0; + brb_init(&smm->uncommitted); threshold_init(&smm->threshold); memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); @@ -695,7 +784,7 @@ smm->begin = 0; smm->recursion_count = 0; smm->allocated_this_transaction = 0; - smm->nr_uncommitted = 0; + brb_init(&smm->uncommitted); threshold_init(&smm->threshold); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); --- linux-3.13.0.orig/drivers/md/persistent-data/dm-transaction-manager.c +++ linux-3.13.0/drivers/md/persistent-data/dm-transaction-manager.c @@ -154,7 +154,7 @@ if (r < 0) return r; - return 0; + return dm_bm_flush(tm->bm); } EXPORT_SYMBOL_GPL(dm_tm_pre_commit); @@ -164,8 +164,9 @@ return -EWOULDBLOCK; wipe_shadow_table(tm); + dm_bm_unlock(root); - return dm_bm_flush_and_unlock(tm->bm, root); + return dm_bm_flush(tm->bm); } EXPORT_SYMBOL_GPL(dm_tm_commit); --- linux-3.13.0.orig/drivers/md/persistent-data/dm-transaction-manager.h +++ linux-3.13.0/drivers/md/persistent-data/dm-transaction-manager.h @@ -38,18 +38,17 @@ /* * We use a 2-phase commit here. * - * i) In the first phase the block manager is told to start flushing, and - * the changes to the space map are written to disk. You should interrogate - * your particular space map to get detail of its root node etc. to be - * included in your superblock. + * i) Make all changes for the transaction *except* for the superblock. + * Then call dm_tm_pre_commit() to flush them to disk. * - * ii) @root will be committed last. You shouldn't use more than the - * first 512 bytes of @root if you wish the transaction to survive a power - * failure. You *must* have a write lock held on @root for both stage (i) - * and (ii). The commit will drop the write lock. + * ii) Lock your superblock. Update. Then call dm_tm_commit() which will + * unlock the superblock and flush it. No other blocks should be updated + * during this period. Care should be taken to never unlock a partially + * updated superblock; perform any operations that could fail *before* you + * take the superblock lock. */ int dm_tm_pre_commit(struct dm_transaction_manager *tm); -int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root); +int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock); /* * These methods are the only way to get hold of a writeable block. --- linux-3.13.0.orig/drivers/md/raid1.c +++ linux-3.13.0/drivers/md/raid1.c @@ -97,6 +97,7 @@ struct pool_info *pi = data; struct r1bio *r1_bio; struct bio *bio; + int need_pages; int i, j; r1_bio = r1bio_pool_alloc(gfp_flags, pi); @@ -119,15 +120,15 @@ * RESYNC_PAGES for each bio. */ if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) - j = pi->raid_disks; + need_pages = pi->raid_disks; else - j = 1; - while(j--) { + need_pages = 1; + for (j = 0; j < need_pages; j++) { bio = r1_bio->bios[j]; bio->bi_vcnt = RESYNC_PAGES; if (bio_alloc_pages(bio, gfp_flags)) - goto out_free_bio; + goto out_free_pages; } /* If not user-requests, copy the page pointers to all bios */ if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { @@ -141,6 +142,14 @@ return r1_bio; +out_free_pages: + while (--j >= 0) { + struct bio_vec *bv; + + bio_for_each_segment_all(bv, r1_bio->bios[j], i) + __free_page(bv->bv_page); + } + out_free_bio: while (++j < pi->raid_disks) bio_put(r1_bio->bios[j]); @@ -533,11 +542,7 @@ has_nonrot_disk = 0; choose_next_idle = 0; - if (conf->mddev->recovery_cp < MaxSector && - (this_sector + sectors >= conf->next_resync)) - choose_first = 1; - else - choose_first = 0; + choose_first = (conf->mddev->recovery_cp < this_sector + sectors); for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { sector_t dist; @@ -558,7 +563,7 @@ if (test_bit(WriteMostly, &rdev->flags)) { /* Don't balance among write-mostly, just * use the first as a last resort */ - if (best_disk < 0) { + if (best_dist_disk < 0) { if (is_badblock(rdev, this_sector, sectors, &first_bad, &bad_sectors)) { if (first_bad < this_sector) @@ -567,7 +572,8 @@ best_good_sectors = first_bad - this_sector; } else best_good_sectors = sectors; - best_disk = disk; + best_dist_disk = disk; + best_pending_disk = disk; } continue; } @@ -824,7 +830,7 @@ * there is no normal IO happeing. It must arrange to call * lower_barrier when the particular background IO completes. */ -static void raise_barrier(struct r1conf *conf) +static void raise_barrier(struct r1conf *conf, sector_t sector_nr) { spin_lock_irq(&conf->resync_lock); @@ -834,6 +840,7 @@ /* block any new IO from starting */ conf->barrier++; + conf->next_resync = sector_nr; /* For these conditions we must wait: * A: while the array is in frozen state @@ -842,14 +849,17 @@ * C: next_resync + RESYNC_SECTORS > start_next_window, meaning * next resync will reach to the window which normal bios are * handling. + * D: while there are any active requests in the current window. */ wait_event_lock_irq(conf->wait_barrier, !conf->array_frozen && conf->barrier < RESYNC_DEPTH && + conf->current_window_requests == 0 && (conf->start_next_window >= conf->next_resync + RESYNC_SECTORS), conf->resync_lock); + conf->nr_pending++; spin_unlock_irq(&conf->resync_lock); } @@ -859,6 +869,7 @@ BUG_ON(conf->barrier <= 0); spin_lock_irqsave(&conf->resync_lock, flags); conf->barrier--; + conf->nr_pending--; spin_unlock_irqrestore(&conf->resync_lock, flags); wake_up(&conf->wait_barrier); } @@ -870,12 +881,10 @@ if (conf->array_frozen || !bio) wait = true; else if (conf->barrier && bio_data_dir(bio) == WRITE) { - if (conf->next_resync < RESYNC_WINDOW_SECTORS) - wait = true; - else if ((conf->next_resync - RESYNC_WINDOW_SECTORS - >= bio_end_sector(bio)) || - (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_sector)) + if ((conf->mddev->curr_resync_completed + >= bio_end_sector(bio)) || + (conf->next_resync + NEXT_NORMALIO_DISTANCE + <= bio->bi_sector)) wait = false; else wait = true; @@ -912,8 +921,8 @@ } if (bio && bio_data_dir(bio) == WRITE) { - if (conf->next_resync + NEXT_NORMALIO_DISTANCE - <= bio->bi_sector) { + if (bio->bi_sector >= + conf->mddev->curr_resync_completed) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + @@ -1178,6 +1187,7 @@ atomic_read(&bitmap->behind_writes) == 0); } r1_bio->read_disk = rdisk; + r1_bio->start_next_window = 0; read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); bio_trim(read_bio, r1_bio->sector - bio->bi_sector, @@ -1491,12 +1501,12 @@ mddev->degraded++; set_bit(Faulty, &rdev->flags); spin_unlock_irqrestore(&conf->device_lock, flags); - /* - * if recovery is running, make sure it aborts. - */ - set_bit(MD_RECOVERY_INTR, &mddev->recovery); } else set_bit(Faulty, &rdev->flags); + /* + * if recovery is running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_CHANGE_DEVS, &mddev->flags); printk(KERN_ALERT "md/raid1:%s: Disk failure on %s, disabling device.\n" @@ -1538,8 +1548,13 @@ mempool_destroy(conf->r1buf_pool); conf->r1buf_pool = NULL; + spin_lock_irq(&conf->resync_lock); conf->next_resync = 0; conf->start_next_window = MaxSector; + conf->current_window_requests += + conf->next_window_requests; + conf->next_window_requests = 0; + spin_unlock_irq(&conf->resync_lock); } static int raid1_spare_active(struct mddev *mddev) @@ -1952,11 +1967,15 @@ for (i = 0; i < conf->raid_disks * 2; i++) { int j; int size; + int uptodate; struct bio *b = r1_bio->bios[i]; if (b->bi_end_io != end_sync_read) continue; - /* fixup the bio for reuse */ + /* fixup the bio for reuse, but preserve BIO_UPTODATE */ + uptodate = test_bit(BIO_UPTODATE, &b->bi_flags); bio_reset(b); + if (!uptodate) + clear_bit(BIO_UPTODATE, &b->bi_flags); b->bi_vcnt = vcnt; b->bi_size = r1_bio->sectors << 9; b->bi_sector = r1_bio->sector + @@ -1989,11 +2008,14 @@ int j; struct bio *pbio = r1_bio->bios[primary]; struct bio *sbio = r1_bio->bios[i]; + int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags); if (sbio->bi_end_io != end_sync_read) continue; + /* Now we can 'fixup' the BIO_UPTODATE flag */ + set_bit(BIO_UPTODATE, &sbio->bi_flags); - if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { + if (uptodate) { for (j = vcnt; j-- ; ) { struct page *p, *s; p = pbio->bi_io_vec[j].bv_page; @@ -2008,7 +2030,7 @@ if (j >= 0) atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) - && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { + && uptodate)) { /* No need to write to this device. */ sbio->bi_end_io = NULL; rdev_dec_pending(conf->mirrors[i].rdev, mddev); @@ -2133,7 +2155,7 @@ d--; rdev = conf->mirrors[d].rdev; if (rdev && - test_bit(In_sync, &rdev->flags)) + !test_bit(Faulty, &rdev->flags)) r1_sync_page_io(rdev, sect, s, conf->tmppage, WRITE); } @@ -2145,7 +2167,7 @@ d--; rdev = conf->mirrors[d].rdev; if (rdev && - test_bit(In_sync, &rdev->flags)) { + !test_bit(Faulty, &rdev->flags)) { if (r1_sync_page_io(rdev, sect, s, conf->tmppage, READ)) { atomic_add(s, &rdev->corrected_errors); @@ -2522,9 +2544,8 @@ bitmap_cond_end_sync(mddev->bitmap, sector_nr); r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); - raise_barrier(conf); - conf->next_resync = sector_nr; + raise_barrier(conf, sector_nr); rcu_read_lock(); /* --- linux-3.13.0.orig/drivers/md/raid10.c +++ linux-3.13.0/drivers/md/raid10.c @@ -1698,13 +1698,12 @@ spin_unlock_irqrestore(&conf->device_lock, flags); return; } - if (test_and_clear_bit(In_sync, &rdev->flags)) { + if (test_and_clear_bit(In_sync, &rdev->flags)) mddev->degraded++; - /* - * if recovery is running, make sure it aborts. - */ - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - } + /* + * If recovery is running, make sure it aborts. + */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags); @@ -2968,6 +2967,7 @@ */ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { end_reshape(conf); + close_sync(conf); return 0; } @@ -4422,7 +4422,7 @@ read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_sync_read; read_bio->bi_rw = READ; - read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); + read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); read_bio->bi_flags |= 1 << BIO_UPTODATE; read_bio->bi_vcnt = 0; read_bio->bi_size = 0; --- linux-3.13.0.orig/drivers/md/raid5.c +++ linux-3.13.0/drivers/md/raid5.c @@ -64,6 +64,10 @@ #define cpu_to_group(cpu) cpu_to_node(cpu) #define ANY_GROUP NUMA_NO_NODE +static bool devices_handle_discard_safely = false; +module_param(devices_handle_discard_safely, bool, 0644); +MODULE_PARM_DESC(devices_handle_discard_safely, + "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); static struct workqueue_struct *raid5_wq; /* * Stripe cache @@ -292,9 +296,12 @@ BUG_ON(atomic_read(&conf->active_stripes)==0); if (test_bit(STRIPE_HANDLE, &sh->state)) { if (test_bit(STRIPE_DELAYED, &sh->state) && - !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { list_add_tail(&sh->lru, &conf->delayed_list); - else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + if (atomic_read(&conf->preread_active_stripes) + < IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && sh->bm_seq - conf->seq_write > 0) list_add_tail(&sh->lru, &conf->bitmap_list); else { @@ -675,8 +682,10 @@ || !conf->inactive_blocked), *(conf->hash_locks + hash)); conf->inactive_blocked = 0; - } else + } else { init_stripe(sh, sector, previous); + atomic_inc(&sh->count); + } } else { spin_lock(&conf->device_lock); if (atomic_read(&sh->count)) { @@ -695,13 +704,11 @@ sh->group = NULL; } } + atomic_inc(&sh->count); spin_unlock(&conf->device_lock); } } while (sh == NULL); - if (sh) - atomic_inc(&sh->count); - spin_unlock_irq(conf->hash_locks + hash); return sh; } @@ -2111,6 +2118,7 @@ set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); } else { if (!uptodate) { + set_bit(STRIPE_DEGRADED, &sh->state); set_bit(WriteErrorSeen, &rdev->flags); set_bit(R5_WriteError, &sh->dev[i].flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) @@ -2890,8 +2898,14 @@ (s->failed >= 1 && fdev[0]->toread) || (s->failed >= 2 && fdev[1]->toread) || (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && + (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) && !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || - (sh->raid_conf->level == 6 && s->failed && s->to_write))) { + ((sh->raid_conf->level == 6 || + sh->sector >= sh->raid_conf->mddev->recovery_cp) + && s->failed && s->to_write && + (s->to_write - s->non_overwrite < + sh->raid_conf->raid_disks - sh->raid_conf->max_degraded) && + (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) { /* we would like to get this block, possibly by computing it, * otherwise read it if the backing disk is insync */ @@ -3065,7 +3079,8 @@ * generate correct data from the parity. */ if (conf->max_degraded == 2 || - (recovery_cp < MaxSector && sh->sector >= recovery_cp)) { + (recovery_cp < MaxSector && sh->sector >= recovery_cp && + s->failed == 0)) { /* Calculate the real rcw later - for now make it * look like rcw is cheaper */ @@ -3090,7 +3105,8 @@ !test_bit(R5_LOCKED, &dev->flags) && !(test_bit(R5_UPTODATE, &dev->flags) || test_bit(R5_Wantcompute, &dev->flags))) { - if (test_bit(R5_Insync, &dev->flags)) rcw++; + if (test_bit(R5_Insync, &dev->flags)) + rcw++; else rcw += 2*disks; } @@ -3111,10 +3127,10 @@ !(test_bit(R5_UPTODATE, &dev->flags) || test_bit(R5_Wantcompute, &dev->flags)) && test_bit(R5_Insync, &dev->flags)) { - if ( - test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { - pr_debug("Read_old block " - "%d for r-m-w\n", i); + if (test_bit(STRIPE_PREREAD_ACTIVE, + &sh->state)) { + pr_debug("Read_old block %d for r-m-w\n", + i); set_bit(R5_LOCKED, &dev->flags); set_bit(R5_Wantread, &dev->flags); s->locked++; @@ -3137,10 +3153,9 @@ !(test_bit(R5_UPTODATE, &dev->flags) || test_bit(R5_Wantcompute, &dev->flags))) { rcw++; - if (!test_bit(R5_Insync, &dev->flags)) - continue; /* it's a failed drive */ - if ( - test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + if (test_bit(R5_Insync, &dev->flags) && + test_bit(STRIPE_PREREAD_ACTIVE, + &sh->state)) { pr_debug("Read_old block " "%d for Reconstruct\n", i); set_bit(R5_LOCKED, &dev->flags); @@ -3778,6 +3793,8 @@ set_bit(R5_Wantwrite, &dev->flags); if (prexor) continue; + if (s.failed > 1) + continue; if (!test_bit(R5_Insync, &dev->flags) || ((i == sh->pd_idx || i == sh->qd_idx) && s.failed == 0)) @@ -5511,23 +5528,43 @@ return sectors * (raid_disks - conf->max_degraded); } +static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) +{ + safe_put_page(percpu->spare_page); + kfree(percpu->scribble); + percpu->spare_page = NULL; + percpu->scribble = NULL; +} + +static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) +{ + if (conf->level == 6 && !percpu->spare_page) + percpu->spare_page = alloc_page(GFP_KERNEL); + if (!percpu->scribble) + percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); + + if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { + free_scratch_buffer(conf, percpu); + return -ENOMEM; + } + + return 0; +} + static void raid5_free_percpu(struct r5conf *conf) { - struct raid5_percpu *percpu; unsigned long cpu; if (!conf->percpu) return; - get_online_cpus(); - for_each_possible_cpu(cpu) { - percpu = per_cpu_ptr(conf->percpu, cpu); - safe_put_page(percpu->spare_page); - kfree(percpu->scribble); - } #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&conf->cpu_notify); #endif + + get_online_cpus(); + for_each_possible_cpu(cpu) + free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); put_online_cpus(); free_percpu(conf->percpu); @@ -5554,15 +5591,7 @@ switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (conf->level == 6 && !percpu->spare_page) - percpu->spare_page = alloc_page(GFP_KERNEL); - if (!percpu->scribble) - percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); - - if (!percpu->scribble || - (conf->level == 6 && !percpu->spare_page)) { - safe_put_page(percpu->spare_page); - kfree(percpu->scribble); + if (alloc_scratch_buffer(conf, percpu)) { pr_err("%s: failed memory allocation for cpu%ld\n", __func__, cpu); return notifier_from_errno(-ENOMEM); @@ -5570,10 +5599,7 @@ break; case CPU_DEAD: case CPU_DEAD_FROZEN: - safe_put_page(percpu->spare_page); - kfree(percpu->scribble); - percpu->spare_page = NULL; - percpu->scribble = NULL; + free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); break; default: break; @@ -5585,40 +5611,29 @@ static int raid5_alloc_percpu(struct r5conf *conf) { unsigned long cpu; - struct page *spare_page; - struct raid5_percpu __percpu *allcpus; - void *scribble; - int err; + int err = 0; - allcpus = alloc_percpu(struct raid5_percpu); - if (!allcpus) + conf->percpu = alloc_percpu(struct raid5_percpu); + if (!conf->percpu) return -ENOMEM; - conf->percpu = allcpus; + +#ifdef CONFIG_HOTPLUG_CPU + conf->cpu_notify.notifier_call = raid456_cpu_notify; + conf->cpu_notify.priority = 0; + err = register_cpu_notifier(&conf->cpu_notify); + if (err) + return err; +#endif get_online_cpus(); - err = 0; for_each_present_cpu(cpu) { - if (conf->level == 6) { - spare_page = alloc_page(GFP_KERNEL); - if (!spare_page) { - err = -ENOMEM; - break; - } - per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; - } - scribble = kmalloc(conf->scribble_len, GFP_KERNEL); - if (!scribble) { - err = -ENOMEM; + err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); + if (err) { + pr_err("%s: failed memory allocation for cpu%ld\n", + __func__, cpu); break; } - per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; } -#ifdef CONFIG_HOTPLUG_CPU - conf->cpu_notify.notifier_call = raid456_cpu_notify; - conf->cpu_notify.priority = 0; - if (err == 0) - err = register_cpu_notifier(&conf->cpu_notify); -#endif put_online_cpus(); return err; @@ -6113,7 +6128,7 @@ mddev->queue->limits.discard_granularity = stripe; /* * unaligned part of discard request will be ignored, so can't - * guarantee discard_zerors_data + * guarantee discard_zeroes_data */ mddev->queue->limits.discard_zeroes_data = 0; @@ -6138,6 +6153,18 @@ !bdev_get_queue(rdev->bdev)-> limits.discard_zeroes_data) discard_supported = false; + /* Unfortunately, discard_zeroes_data is not currently + * a guarantee - just a hint. So we only allow DISCARD + * if the sysadmin has confirmed that only safe devices + * are in use by setting a module parameter. + */ + if (!devices_handle_discard_safely) { + if (discard_supported) { + pr_info("md/raid456: discard support disabled due to uncertainty.\n"); + pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); + } + discard_supported = false; + } } if (discard_supported && --- linux-3.13.0.orig/drivers/media/common/siano/sms-cards.c +++ linux-3.13.0/drivers/media/common/siano/sms-cards.c @@ -157,6 +157,12 @@ .type = SMS_DENVER_2160, .default_mode = DEVICE_MODE_DAB_TDMB, }, + [SMS1XXX_BOARD_PCTV_77E] = { + .name = "Hauppauge microStick 77e", + .type = SMS_NOVA_B0, + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ_B0, + .default_mode = DEVICE_MODE_DVBT_BDA, + }, }; struct sms_board *sms_get_board(unsigned id) --- linux-3.13.0.orig/drivers/media/common/siano/sms-cards.h +++ linux-3.13.0/drivers/media/common/siano/sms-cards.h @@ -45,6 +45,7 @@ #define SMS1XXX_BOARD_SIANO_RIO 18 #define SMS1XXX_BOARD_SIANO_DENVER_1530 19 #define SMS1XXX_BOARD_SIANO_DENVER_2160 20 +#define SMS1XXX_BOARD_PCTV_77E 21 struct sms_board_gpio_cfg { int lna_vhf_exist; --- linux-3.13.0.orig/drivers/media/dvb-core/dvb-usb-ids.h +++ linux-3.13.0/drivers/media/dvb-core/dvb-usb-ids.h @@ -239,6 +239,7 @@ #define USB_PID_AVERMEDIA_A835B_4835 0x4835 #define USB_PID_AVERMEDIA_1867 0x1867 #define USB_PID_AVERMEDIA_A867 0xa867 +#define USB_PID_AVERMEDIA_H335 0x0335 #define USB_PID_AVERMEDIA_TWINSTAR 0x0825 #define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006 #define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009 @@ -358,6 +359,7 @@ #define USB_PID_FRIIO_WHITE 0x0001 #define USB_PID_TVWAY_PLUS 0x0002 #define USB_PID_SVEON_STV20 0xe39d +#define USB_PID_SVEON_STV20_RTL2832U 0xd39d #define USB_PID_SVEON_STV22 0xe401 #define USB_PID_SVEON_STV22_IT9137 0xe411 #define USB_PID_AZUREWAVE_AZ6027 0x3275 @@ -371,4 +373,5 @@ #define USB_PID_CTVDIGDUAL_V2 0xe410 #define USB_PID_PCTV_2002E 0x025c #define USB_PID_PCTV_2002E_SE 0x025d +#define USB_PID_SVEON_STV27 0xd3af #endif --- linux-3.13.0.orig/drivers/media/dvb-frontends/cx24117.c +++ linux-3.13.0/drivers/media/dvb-frontends/cx24117.c @@ -1166,7 +1166,7 @@ switch (demod) { case 0: - dev_err(&state->priv->i2c->dev, + dev_err(&i2c->dev, "%s: Error attaching frontend %d\n", KBUILD_MODNAME, demod); goto error1; --- linux-3.13.0.orig/drivers/media/dvb-frontends/dib8000.c +++ linux-3.13.0/drivers/media/dvb-frontends/dib8000.c @@ -157,15 +157,10 @@ return ret; } -static u16 dib8000_read_word(struct dib8000_state *state, u16 reg) +static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg) { u16 ret; - if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { - dprintk("could not acquire lock"); - return 0; - } - state->i2c_write_buffer[0] = reg >> 8; state->i2c_write_buffer[1] = reg & 0xff; @@ -183,6 +178,21 @@ dprintk("i2c read error on %d", reg); ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; + + return ret; +} + +static u16 dib8000_read_word(struct dib8000_state *state, u16 reg) +{ + u16 ret; + + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { + dprintk("could not acquire lock"); + return 0; + } + + ret = __dib8000_read_word(state, reg); + mutex_unlock(&state->i2c_buffer_lock); return ret; @@ -192,8 +202,15 @@ { u16 rw[2]; - rw[0] = dib8000_read_word(state, reg + 0); - rw[1] = dib8000_read_word(state, reg + 1); + if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { + dprintk("could not acquire lock"); + return 0; + } + + rw[0] = __dib8000_read_word(state, reg + 0); + rw[1] = __dib8000_read_word(state, reg + 1); + + mutex_unlock(&state->i2c_buffer_lock); return ((rw[0] << 16) | (rw[1])); } @@ -2445,7 +2462,8 @@ if (state->revision == 0x8090) internal = dib8000_read32(state, 23) / 1000; - if (state->autosearch_state == AS_SEARCHING_FFT) { + if ((state->revision >= 0x8002) && + (state->autosearch_state == AS_SEARCHING_FFT)) { dib8000_write_word(state, 37, 0x0065); /* P_ctrl_pha_off_max default values */ dib8000_write_word(state, 116, 0x0000); /* P_ana_gain to 0 */ @@ -2481,7 +2499,8 @@ dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (1 << 13)); /* P_restart_ccg = 1 */ dib8000_write_word(state, 770, (dib8000_read_word(state, 770) & 0xdfff) | (0 << 13)); /* P_restart_ccg = 0 */ dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x7ff) | (0 << 15) | (1 << 13)); /* P_restart_search = 0; */ - } else if (state->autosearch_state == AS_SEARCHING_GUARD) { + } else if ((state->revision >= 0x8002) && + (state->autosearch_state == AS_SEARCHING_GUARD)) { c->transmission_mode = TRANSMISSION_MODE_8K; c->guard_interval = GUARD_INTERVAL_1_8; c->inversion = 0; @@ -2583,7 +2602,8 @@ struct dib8000_state *state = fe->demodulator_priv; u16 irq_pending = dib8000_read_word(state, 1284); - if (state->autosearch_state == AS_SEARCHING_FFT) { + if ((state->revision >= 0x8002) && + (state->autosearch_state == AS_SEARCHING_FFT)) { if (irq_pending & 0x1) { dprintk("dib8000_autosearch_irq: max correlation result available"); return 3; --- linux-3.13.0.orig/drivers/media/dvb-frontends/ds3000.c +++ linux-3.13.0/drivers/media/dvb-frontends/ds3000.c @@ -864,6 +864,13 @@ memcpy(&state->frontend.ops, &ds3000_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; + + /* + * Some devices like T480 starts with voltage on. Be sure + * to turn voltage off during init, as this can otherwise + * interfere with Unicable SCR systems. + */ + ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF); return &state->frontend; error3: --- linux-3.13.0.orig/drivers/media/dvb-frontends/m88rs2000.c +++ linux-3.13.0/drivers/media/dvb-frontends/m88rs2000.c @@ -110,28 +110,94 @@ return b1[0]; } +static u32 m88rs2000_get_mclk(struct dvb_frontend *fe) +{ + struct m88rs2000_state *state = fe->demodulator_priv; + u32 mclk; + u8 reg; + /* Must not be 0x00 or 0xff */ + reg = m88rs2000_readreg(state, 0x86); + if (!reg || reg == 0xff) + return 0; + + reg /= 2; + reg += 1; + + mclk = (u32)(reg * RS2000_FE_CRYSTAL_KHZ + 28 / 2) / 28; + + return mclk; +} + +static int m88rs2000_set_carrieroffset(struct dvb_frontend *fe, s16 offset) +{ + struct m88rs2000_state *state = fe->demodulator_priv; + u32 mclk; + s32 tmp; + u8 reg; + int ret; + + mclk = m88rs2000_get_mclk(fe); + if (!mclk) + return -EINVAL; + + tmp = (offset * 4096 + (s32)mclk / 2) / (s32)mclk; + if (tmp < 0) + tmp += 4096; + + /* Carrier Offset */ + ret = m88rs2000_writereg(state, 0x9c, (u8)(tmp >> 4)); + + reg = m88rs2000_readreg(state, 0x9d); + reg &= 0xf; + reg |= (u8)(tmp & 0xf) << 4; + + ret |= m88rs2000_writereg(state, 0x9d, reg); + + return ret; +} + static int m88rs2000_set_symbolrate(struct dvb_frontend *fe, u32 srate) { struct m88rs2000_state *state = fe->demodulator_priv; int ret; - u32 temp; + u64 temp; + u32 mclk; u8 b[3]; if ((srate < 1000000) || (srate > 45000000)) return -EINVAL; + mclk = m88rs2000_get_mclk(fe); + if (!mclk) + return -EINVAL; + temp = srate / 1000; - temp *= 11831; - temp /= 68; - temp -= 3; + temp *= 1 << 24; + + do_div(temp, mclk); b[0] = (u8) (temp >> 16) & 0xff; b[1] = (u8) (temp >> 8) & 0xff; b[2] = (u8) temp & 0xff; + ret = m88rs2000_writereg(state, 0x93, b[2]); ret |= m88rs2000_writereg(state, 0x94, b[1]); ret |= m88rs2000_writereg(state, 0x95, b[0]); + if (srate > 10000000) + ret |= m88rs2000_writereg(state, 0xa0, 0x20); + else + ret |= m88rs2000_writereg(state, 0xa0, 0x60); + + ret |= m88rs2000_writereg(state, 0xa1, 0xe0); + + if (srate > 12000000) + ret |= m88rs2000_writereg(state, 0xa3, 0x20); + else if (srate > 2800000) + ret |= m88rs2000_writereg(state, 0xa3, 0x98); + else + ret |= m88rs2000_writereg(state, 0xa3, 0x90); + deb_info("m88rs2000: m88rs2000_set_symbolrate\n"); return ret; } @@ -261,8 +327,6 @@ struct inittab fe_reset[] = { {DEMOD_WRITE, 0x00, 0x01}, - {DEMOD_WRITE, 0xf1, 0xbf}, - {DEMOD_WRITE, 0x00, 0x01}, {DEMOD_WRITE, 0x20, 0x81}, {DEMOD_WRITE, 0x21, 0x80}, {DEMOD_WRITE, 0x10, 0x33}, @@ -305,9 +369,6 @@ {DEMOD_WRITE, 0x9b, 0x64}, {DEMOD_WRITE, 0x9e, 0x00}, {DEMOD_WRITE, 0x9f, 0xf8}, - {DEMOD_WRITE, 0xa0, 0x20}, - {DEMOD_WRITE, 0xa1, 0xe0}, - {DEMOD_WRITE, 0xa3, 0x38}, {DEMOD_WRITE, 0x98, 0xff}, {DEMOD_WRITE, 0xc0, 0x0f}, {DEMOD_WRITE, 0x89, 0x01}, @@ -540,9 +601,8 @@ struct dtv_frontend_properties *c = &fe->dtv_property_cache; fe_status_t status; int i, ret = 0; - s32 tmp; u32 tuner_freq; - u16 offset = 0; + s16 offset = 0; u8 reg; state->no_lock_count = 0; @@ -567,29 +627,26 @@ if (ret < 0) return -ENODEV; - offset = tuner_freq - c->frequency; - - /* calculate offset assuming 96000kHz*/ - tmp = offset; - tmp *= 65536; + offset = (s16)((s32)tuner_freq - c->frequency); - tmp = (2 * tmp + 96000) / (2 * 96000); - if (tmp < 0) - tmp += 65536; - - offset = tmp & 0xffff; + /* default mclk value 96.4285 * 2 * 1000 = 192857 */ + if (((c->frequency % 192857) >= (192857 - 3000)) || + (c->frequency % 192857) <= 3000) + ret = m88rs2000_writereg(state, 0x86, 0xc2); + else + ret = m88rs2000_writereg(state, 0x86, 0xc6); - ret = m88rs2000_writereg(state, 0x9a, 0x30); - /* Unknown usually 0xc6 sometimes 0xc1 */ - reg = m88rs2000_readreg(state, 0x86); - ret |= m88rs2000_writereg(state, 0x86, reg); - /* Offset lower nibble always 0 */ - ret |= m88rs2000_writereg(state, 0x9c, (offset >> 8)); - ret |= m88rs2000_writereg(state, 0x9d, offset & 0xf0); + ret |= m88rs2000_set_carrieroffset(fe, offset); + if (ret < 0) + return -ENODEV; + /* Reset demod by symbol rate */ + if (c->symbol_rate > 27500000) + ret = m88rs2000_writereg(state, 0xf1, 0xa4); + else + ret = m88rs2000_writereg(state, 0xf1, 0xbf); - /* Reset Demod */ - ret = m88rs2000_tab_set(state, fe_reset); + ret |= m88rs2000_tab_set(state, fe_reset); if (ret < 0) return -ENODEV; @@ -655,6 +712,22 @@ return 0; } +static int m88rs2000_get_tune_settings(struct dvb_frontend *fe, + struct dvb_frontend_tune_settings *tune) +{ + struct dtv_frontend_properties *c = &fe->dtv_property_cache; + + if (c->symbol_rate > 3000000) + tune->min_delay_ms = 2000; + else + tune->min_delay_ms = 3000; + + tune->step_size = c->symbol_rate / 16000; + tune->max_drift = c->symbol_rate / 2000; + + return 0; +} + static int m88rs2000_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct m88rs2000_state *state = fe->demodulator_priv; @@ -686,7 +759,7 @@ .symbol_rate_tolerance = 500, /* ppm */ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | - FE_CAN_QPSK | + FE_CAN_QPSK | FE_CAN_INVERSION_AUTO | FE_CAN_FEC_AUTO }, @@ -706,6 +779,7 @@ .set_frontend = m88rs2000_set_frontend, .get_frontend = m88rs2000_get_frontend, + .get_tune_settings = m88rs2000_get_tune_settings, }; struct dvb_frontend *m88rs2000_attach(const struct m88rs2000_config *config, --- linux-3.13.0.orig/drivers/media/dvb-frontends/m88rs2000.h +++ linux-3.13.0/drivers/media/dvb-frontends/m88rs2000.h @@ -53,6 +53,8 @@ } #endif /* CONFIG_DVB_M88RS2000 */ +#define RS2000_FE_CRYSTAL_KHZ 27000 + enum { DEMOD_WRITE = 0x1, WRITE_DELAY = 0x10, --- linux-3.13.0.orig/drivers/media/dvb-frontends/nxt200x.c +++ linux-3.13.0/drivers/media/dvb-frontends/nxt200x.c @@ -40,7 +40,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Max transfer size done by I2C transfer functions */ -#define MAX_XFER_SIZE 64 +#define MAX_XFER_SIZE 256 #define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw" #define NXT2004_DEFAULT_FIRMWARE "dvb-fe-nxt2004.fw" --- linux-3.13.0.orig/drivers/media/dvb-frontends/tda10071.c +++ linux-3.13.0/drivers/media/dvb-frontends/tda10071.c @@ -667,6 +667,7 @@ struct dtv_frontend_properties *c = &fe->dtv_property_cache; int ret, i; u8 mode, rolloff, pilot, inversion, div; + fe_modulation_t modulation; dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d " \ "frequency=%d symbol_rate=%d inversion=%d pilot=%d " \ @@ -701,10 +702,13 @@ switch (c->delivery_system) { case SYS_DVBS: + modulation = QPSK; rolloff = 0; pilot = 2; break; case SYS_DVBS2: + modulation = c->modulation; + switch (c->rolloff) { case ROLLOFF_20: rolloff = 2; @@ -749,7 +753,7 @@ for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) { if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && - c->modulation == TDA10071_MODCOD[i].modulation && + modulation == TDA10071_MODCOD[i].modulation && c->fec_inner == TDA10071_MODCOD[i].fec) { mode = TDA10071_MODCOD[i].val; dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n", --- linux-3.13.0.orig/drivers/media/i2c/adv7604.c +++ linux-3.13.0/drivers/media/i2c/adv7604.c @@ -1752,7 +1752,7 @@ v4l2_info(sd, "HDCP keys read: %s%s\n", (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no", (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : ""); - if (!is_hdmi(sd)) { + if (is_hdmi(sd)) { bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01; bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01; bool audio_mute = io_read(sd, 0x65) & 0x40; --- linux-3.13.0.orig/drivers/media/i2c/ov7670.c +++ linux-3.13.0/drivers/media/i2c/ov7670.c @@ -1109,7 +1109,7 @@ * windows that fall outside that. */ for (i = 0; i < n_win_sizes; i++) { - struct ov7670_win_size *win = &info->devtype->win_sizes[index]; + struct ov7670_win_size *win = &info->devtype->win_sizes[i]; if (info->min_width && win->width < info->min_width) continue; if (info->min_height && win->height < info->min_height) --- linux-3.13.0.orig/drivers/media/i2c/smiapp-pll.c +++ linux-3.13.0/drivers/media/i2c/smiapp-pll.c @@ -67,7 +67,7 @@ { dev_dbg(dev, "pre_pll_clk_div\t%d\n", pll->pre_pll_clk_div); dev_dbg(dev, "pll_multiplier \t%d\n", pll->pll_multiplier); - if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) { + if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) { dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div); dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div); } @@ -77,7 +77,7 @@ dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz); dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz); dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz); - if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) { + if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) { dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n", pll->op_sys_clk_freq_hz); dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n", --- linux-3.13.0.orig/drivers/media/i2c/smiapp/smiapp-core.c +++ linux-3.13.0/drivers/media/i2c/smiapp/smiapp-core.c @@ -2139,7 +2139,7 @@ ret = smiapp_set_compose(subdev, fh, sel); break; default: - BUG(); + ret = -EINVAL; } mutex_unlock(&sensor->mutex); @@ -2625,7 +2625,9 @@ pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE; pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]; + mutex_lock(&sensor->mutex); rval = smiapp_update_mode(sensor); + mutex_unlock(&sensor->mutex); if (rval) { dev_err(&client->dev, "update mode failed\n"); goto out_nvm_release; --- linux-3.13.0.orig/drivers/media/media-device.c +++ linux-3.13.0/drivers/media/media-device.c @@ -93,6 +93,7 @@ struct media_entity *ent; struct media_entity_desc u_ent; + memset(&u_ent, 0, sizeof(u_ent)); if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) return -EFAULT; @@ -105,8 +106,6 @@ if (ent->name) { strncpy(u_ent.name, ent->name, sizeof(u_ent.name)); u_ent.name[sizeof(u_ent.name) - 1] = '\0'; - } else { - memset(u_ent.name, 0, sizeof(u_ent.name)); } u_ent.type = ent->type; u_ent.revision = ent->revision; --- linux-3.13.0.orig/drivers/media/pci/cx18/cx18-driver.c +++ linux-3.13.0/drivers/media/pci/cx18/cx18-driver.c @@ -327,13 +327,16 @@ struct i2c_client *c; u8 eedata[256]; + memset(tv, 0, sizeof(*tv)); + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return; strlcpy(c->name, "cx18 tveeprom tmp", sizeof(c->name)); c->adapter = &cx->i2c_adap[0]; c->addr = 0xa0 >> 1; - memset(tv, 0, sizeof(*tv)); if (tveeprom_read(c, eedata, sizeof(eedata))) goto ret; @@ -1088,6 +1091,7 @@ setup.addr = ADDR_UNSET; setup.type = cx->options.tuner; setup.mode_mask = T_ANALOG_TV; /* matches TV tuners */ + setup.config = NULL; if (cx->options.radio > 0) setup.mode_mask |= T_RADIO; setup.tuner_callback = (setup.type == TUNER_XC2028) ? --- linux-3.13.0.orig/drivers/media/pci/ivtv/ivtv-alsa-pcm.c +++ linux-3.13.0/drivers/media/pci/ivtv/ivtv-alsa-pcm.c @@ -159,6 +159,12 @@ /* Instruct the CX2341[56] to start sending packets */ snd_ivtv_lock(itvsc); + + if (ivtv_init_on_first_open(itv)) { + snd_ivtv_unlock(itvsc); + return -ENXIO; + } + s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM]; v4l2_fh_init(&item.fh, s->vdev); --- linux-3.13.0.orig/drivers/media/platform/omap3isp/isppreview.c +++ linux-3.13.0/drivers/media/platform/omap3isp/isppreview.c @@ -1079,6 +1079,7 @@ */ static void preview_config_input_size(struct isp_prev_device *prev, u32 active) { + const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK]; struct isp_device *isp = to_isp_device(prev); unsigned int sph = prev->crop.left; unsigned int eph = prev->crop.left + prev->crop.width - 1; @@ -1086,6 +1087,14 @@ unsigned int elv = prev->crop.top + prev->crop.height - 1; u32 features; + if (format->code != V4L2_MBUS_FMT_Y8_1X8 && + format->code != V4L2_MBUS_FMT_Y10_1X10) { + sph -= 2; + eph += 2; + slv -= 2; + elv += 2; + } + features = (prev->params.params[0].features & active) | (prev->params.params[1].features & ~active); --- linux-3.13.0.orig/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ linux-3.13.0/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -177,21 +177,6 @@ mutex_unlock(&dev->mfc_mutex); } -static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file) -{ - struct video_device *vdev = video_devdata(file); - - if (!vdev) { - mfc_err("failed to get video_device"); - return MFCNODE_INVALID; - } - if (vdev->index == 0) - return MFCNODE_DECODER; - else if (vdev->index == 1) - return MFCNODE_ENCODER; - return MFCNODE_INVALID; -} - static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev) { mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT); @@ -705,6 +690,7 @@ /* Open an MFC node */ static int s5p_mfc_open(struct file *file) { + struct video_device *vdev = video_devdata(file); struct s5p_mfc_dev *dev = video_drvdata(file); struct s5p_mfc_ctx *ctx = NULL; struct vb2_queue *q; @@ -742,7 +728,7 @@ /* Mark context as idle */ clear_work_bit_irqsave(ctx); dev->ctx[ctx->num] = ctx; - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) { + if (vdev == dev->vfd_dec) { ctx->type = MFCINST_DECODER; ctx->c_ops = get_dec_codec_ops(); s5p_mfc_dec_init(ctx); @@ -752,7 +738,7 @@ mfc_err("Failed to setup mfc controls\n"); goto err_ctrls_setup; } - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) { + } else if (vdev == dev->vfd_enc) { ctx->type = MFCINST_ENCODER; ctx->c_ops = get_enc_codec_ops(); /* only for encoder */ @@ -797,10 +783,10 @@ q = &ctx->vq_dst; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q->drv_priv = &ctx->fh; - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) { + if (vdev == dev->vfd_dec) { q->io_modes = VB2_MMAP; q->ops = get_dec_queue_ops(); - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) { + } else if (vdev == dev->vfd_enc) { q->io_modes = VB2_MMAP | VB2_USERPTR; q->ops = get_enc_queue_ops(); } else { @@ -819,10 +805,10 @@ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; q->io_modes = VB2_MMAP; q->drv_priv = &ctx->fh; - if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) { + if (vdev == dev->vfd_dec) { q->io_modes = VB2_MMAP; q->ops = get_dec_queue_ops(); - } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) { + } else if (vdev == dev->vfd_enc) { q->io_modes = VB2_MMAP | VB2_USERPTR; q->ops = get_enc_queue_ops(); } else { --- linux-3.13.0.orig/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ linux-3.13.0/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -30,7 +30,7 @@ /* Offset base used to differentiate between CAPTURE and OUTPUT * while mmaping */ -#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2) +#define DST_QUEUE_OFF_BASE (1 << 30) #define MFC_BANK1_ALLOC_CTX 0 #define MFC_BANK2_ALLOC_CTX 1 @@ -115,15 +115,6 @@ }; /** - * enum s5p_mfc_node_type - The type of an MFC device node. - */ -enum s5p_mfc_node_type { - MFCNODE_INVALID = -1, - MFCNODE_DECODER = 0, - MFCNODE_ENCODER = 1, -}; - -/** * enum s5p_mfc_inst_type - The type of an MFC instance. */ enum s5p_mfc_inst_type { --- linux-3.13.0.orig/drivers/media/platform/sh_veu.c +++ linux-3.13.0/drivers/media/platform/sh_veu.c @@ -1183,6 +1183,7 @@ } *vdev = sh_veu_videodev; + vdev->v4l2_dev = &veu->v4l2_dev; spin_lock_init(&veu->lock); mutex_init(&veu->fop_lock); vdev->lock = &veu->fop_lock; --- linux-3.13.0.orig/drivers/media/platform/vsp1/vsp1_video.c +++ linux-3.13.0/drivers/media/platform/vsp1/vsp1_video.c @@ -622,8 +622,6 @@ if (vb->num_planes < format->num_planes) return -EINVAL; - buf->video = video; - for (i = 0; i < vb->num_planes; ++i) { buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i); buf->length[i] = vb2_plane_size(vb, i); --- linux-3.13.0.orig/drivers/media/platform/vsp1/vsp1_video.h +++ linux-3.13.0/drivers/media/platform/vsp1/vsp1_video.h @@ -89,7 +89,6 @@ } struct vsp1_video_buffer { - struct vsp1_video *video; struct vb2_buffer buf; struct list_head queue; --- linux-3.13.0.orig/drivers/media/tuners/fc2580.c +++ linux-3.13.0/drivers/media/tuners/fc2580.c @@ -195,7 +195,7 @@ f_ref = 2UL * priv->cfg->clock / r_val; n_val = div_u64_rem(f_vco, f_ref, &k_val); - k_val_reg = 1UL * k_val * (1 << 20) / f_ref; + k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref); ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); if (ret < 0) @@ -348,8 +348,8 @@ if (ret < 0) goto err; - ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \ - fc2580_if_filter_lut[i].mul / 1000000000); + ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock * + fc2580_if_filter_lut[i].mul, 1000000000)); if (ret < 0) goto err; --- linux-3.13.0.orig/drivers/media/tuners/fc2580_priv.h +++ linux-3.13.0/drivers/media/tuners/fc2580_priv.h @@ -22,6 +22,7 @@ #define FC2580_PRIV_H #include "fc2580.h" +#include struct fc2580_reg_val { u8 reg; --- linux-3.13.0.orig/drivers/media/tuners/xc4000.c +++ linux-3.13.0/drivers/media/tuners/xc4000.c @@ -93,7 +93,7 @@ struct firmware_description *firm; int firm_size; u32 if_khz; - u32 freq_hz; + u32 freq_hz, freq_offset; u32 bandwidth; u8 video_standard; u8 rf_mode; @@ -1157,14 +1157,14 @@ case SYS_ATSC: dprintk(1, "%s() VSB modulation\n", __func__); priv->rf_mode = XC_RF_MODE_AIR; - priv->freq_hz = c->frequency - 1750000; + priv->freq_offset = 1750000; priv->video_standard = XC4000_DTV6; type = DTV6; break; case SYS_DVBC_ANNEX_B: dprintk(1, "%s() QAM modulation\n", __func__); priv->rf_mode = XC_RF_MODE_CABLE; - priv->freq_hz = c->frequency - 1750000; + priv->freq_offset = 1750000; priv->video_standard = XC4000_DTV6; type = DTV6; break; @@ -1173,23 +1173,23 @@ dprintk(1, "%s() OFDM\n", __func__); if (bw == 0) { if (c->frequency < 400000000) { - priv->freq_hz = c->frequency - 2250000; + priv->freq_offset = 2250000; } else { - priv->freq_hz = c->frequency - 2750000; + priv->freq_offset = 2750000; } priv->video_standard = XC4000_DTV7_8; type = DTV78; } else if (bw <= 6000000) { priv->video_standard = XC4000_DTV6; - priv->freq_hz = c->frequency - 1750000; + priv->freq_offset = 1750000; type = DTV6; } else if (bw <= 7000000) { priv->video_standard = XC4000_DTV7; - priv->freq_hz = c->frequency - 2250000; + priv->freq_offset = 2250000; type = DTV7; } else { priv->video_standard = XC4000_DTV8; - priv->freq_hz = c->frequency - 2750000; + priv->freq_offset = 2750000; type = DTV8; } priv->rf_mode = XC_RF_MODE_AIR; @@ -1200,6 +1200,8 @@ goto fail; } + priv->freq_hz = c->frequency - priv->freq_offset; + dprintk(1, "%s() frequency=%d (compensated)\n", __func__, priv->freq_hz); @@ -1520,7 +1522,7 @@ { struct xc4000_priv *priv = fe->tuner_priv; - *freq = priv->freq_hz; + *freq = priv->freq_hz + priv->freq_offset; if (debug) { mutex_lock(&priv->lock); --- linux-3.13.0.orig/drivers/media/tuners/xc5000.c +++ linux-3.13.0/drivers/media/tuners/xc5000.c @@ -55,7 +55,7 @@ u32 if_khz; u16 xtal_khz; - u32 freq_hz; + u32 freq_hz, freq_offset; u32 bandwidth; u8 video_standard; u8 rf_mode; @@ -755,13 +755,13 @@ case SYS_ATSC: dprintk(1, "%s() VSB modulation\n", __func__); priv->rf_mode = XC_RF_MODE_AIR; - priv->freq_hz = freq - 1750000; + priv->freq_offset = 1750000; priv->video_standard = DTV6; break; case SYS_DVBC_ANNEX_B: dprintk(1, "%s() QAM modulation\n", __func__); priv->rf_mode = XC_RF_MODE_CABLE; - priv->freq_hz = freq - 1750000; + priv->freq_offset = 1750000; priv->video_standard = DTV6; break; case SYS_ISDBT: @@ -776,15 +776,15 @@ switch (bw) { case 6000000: priv->video_standard = DTV6; - priv->freq_hz = freq - 1750000; + priv->freq_offset = 1750000; break; case 7000000: priv->video_standard = DTV7; - priv->freq_hz = freq - 2250000; + priv->freq_offset = 2250000; break; case 8000000: priv->video_standard = DTV8; - priv->freq_hz = freq - 2750000; + priv->freq_offset = 2750000; break; default: printk(KERN_ERR "xc5000 bandwidth not set!\n"); @@ -798,15 +798,15 @@ priv->rf_mode = XC_RF_MODE_CABLE; if (bw <= 6000000) { priv->video_standard = DTV6; - priv->freq_hz = freq - 1750000; + priv->freq_offset = 1750000; b = 6; } else if (bw <= 7000000) { priv->video_standard = DTV7; - priv->freq_hz = freq - 2250000; + priv->freq_offset = 2250000; b = 7; } else { priv->video_standard = DTV7_8; - priv->freq_hz = freq - 2750000; + priv->freq_offset = 2750000; b = 8; } dprintk(1, "%s() Bandwidth %dMHz (%d)\n", __func__, @@ -817,6 +817,8 @@ return -EINVAL; } + priv->freq_hz = freq - priv->freq_offset; + dprintk(1, "%s() frequency=%d (compensated to %d)\n", __func__, freq, priv->freq_hz); @@ -1067,7 +1069,7 @@ { struct xc5000_priv *priv = fe->tuner_priv; dprintk(1, "%s()\n", __func__); - *freq = priv->freq_hz; + *freq = priv->freq_hz + priv->freq_offset; return 0; } --- linux-3.13.0.orig/drivers/media/usb/au0828/au0828-cards.c +++ linux-3.13.0/drivers/media/usb/au0828/au0828-cards.c @@ -36,6 +36,11 @@ au0828_clear(dev, REG_000, 0x10); } +/* + * WARNING: There's a quirks table at sound/usb/quirks-table.h + * that should also be updated every time a new device with V4L2 support + * is added here. + */ struct au0828_board au0828_boards[] = { [AU0828_BOARD_UNKNOWN] = { .name = "Unknown board", --- linux-3.13.0.orig/drivers/media/usb/au0828/au0828-video.c +++ linux-3.13.0/drivers/media/usb/au0828/au0828-video.c @@ -787,11 +787,27 @@ /* * Auvitek au0828 analog stream enable - * Please set interface0 to AS5 before enable the stream */ static int au0828_analog_stream_enable(struct au0828_dev *d) { + struct usb_interface *iface; + int ret; + dprintk(1, "au0828_analog_stream_enable called\n"); + + iface = usb_ifnum_to_if(d->usbdev, 0); + if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) { + dprintk(1, "Changing intf#0 to alt 5\n"); + /* set au0828 interface0 to AS5 here again */ + ret = usb_set_interface(d->usbdev, 0, 5); + if (ret < 0) { + printk(KERN_INFO "Au0828 can't set alt setting to 5!\n"); + return -EBUSY; + } + } + + /* FIXME: size should be calculated using d->width, d->height */ + au0828_writereg(d, AU0828_SENSORCTRL_VBI_103, 0x00); au0828_writereg(d, 0x106, 0x00); /* set x position */ @@ -1002,15 +1018,6 @@ return -ERESTARTSYS; } if (dev->users == 0) { - /* set au0828 interface0 to AS5 here again */ - ret = usb_set_interface(dev->usbdev, 0, 5); - if (ret < 0) { - mutex_unlock(&dev->lock); - printk(KERN_INFO "Au0828 can't set alternate to 5!\n"); - kfree(fh); - return -EBUSY; - } - au0828_analog_stream_enable(dev); au0828_analog_stream_reset(dev); @@ -1252,13 +1259,6 @@ } } - /* set au0828 interface0 to AS5 here again */ - ret = usb_set_interface(dev->usbdev, 0, 5); - if (ret < 0) { - printk(KERN_INFO "Au0828 can't set alt setting to 5!\n"); - return -EBUSY; - } - au0828_analog_stream_enable(dev); return 0; --- linux-3.13.0.orig/drivers/media/usb/dvb-usb-v2/af9035.c +++ linux-3.13.0/drivers/media/usb/dvb-usb-v2/af9035.c @@ -1539,6 +1539,8 @@ &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05, &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) }, + { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900, + &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, af9035_id_table); --- linux-3.13.0.orig/drivers/media/usb/dvb-usb-v2/anysee.c +++ linux-3.13.0/drivers/media/usb/dvb-usb-v2/anysee.c @@ -442,6 +442,7 @@ * IOD[0] ZL10353 1=enabled * IOE[0] tuner 0=enabled * tuner is behind ZL10353 I2C-gate + * tuner is behind TDA10023 I2C-gate * * E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)" * PCB: 508TC (rev0.6) @@ -956,7 +957,7 @@ if (fe && adap->fe[1]) { /* attach tuner for 2nd FE */ - fe = dvb_attach(dvb_pll_attach, adap->fe[0], + fe = dvb_attach(dvb_pll_attach, adap->fe[1], (0xc0 >> 1), &d->i2c_adap, DVB_PLL_SAMSUNG_DTOS403IH102A); } --- linux-3.13.0.orig/drivers/media/usb/dvb-usb-v2/it913x.c +++ linux-3.13.0/drivers/media/usb/dvb-usb-v2/it913x.c @@ -799,6 +799,9 @@ { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2, &it913x_properties, "Digital Dual TV Receiver CTVDIGDUAL_V2", RC_MAP_IT913X_V1) }, + { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335, + &it913x_properties, "Avermedia H335", + RC_MAP_IT913X_V2) }, {} /* Terminating entry */ }; --- linux-3.13.0.orig/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ linux-3.13.0/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -350,6 +350,7 @@ { struct dvb_usb_device *d = adap_to_d(adap); struct lme2510_state *lme_int = adap_to_priv(adap); + struct usb_host_endpoint *ep; lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC); @@ -371,6 +372,12 @@ adap, 8); + /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */ + ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); + + if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) + lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa), + lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC); --- linux-3.13.0.orig/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h +++ linux-3.13.0/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h @@ -68,7 +68,7 @@ #else static inline struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe, - struct mxl111sf_state *mxl_state + struct mxl111sf_state *mxl_state, struct mxl111sf_tuner_config *cfg) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); --- linux-3.13.0.orig/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ linux-3.13.0/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -105,7 +105,7 @@ ret = -EINVAL; } - pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data); + pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]); fail: return ret; } --- linux-3.13.0.orig/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ linux-3.13.0/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -1427,6 +1427,14 @@ &rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) }, { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A, &rtl2832u_props, "Crypto ReDi PC 50 A", NULL) }, + { DVB_USB_DEVICE(USB_VID_KYE, 0x707f, + &rtl2832u_props, "Genius TVGo DVB-T03", NULL) }, + { DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd395, + &rtl2832u_props, "Peak DVB-T USB", NULL) }, + { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV20_RTL2832U, + &rtl2832u_props, "Sveon STV20", NULL) }, + { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV27, + &rtl2832u_props, "Sveon STV27", NULL) }, { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131, &rtl2832u_props, "Astrometa DVB-T2", NULL) }, --- linux-3.13.0.orig/drivers/media/usb/dvb-usb/af9005.c +++ linux-3.13.0/drivers/media/usb/dvb-usb/af9005.c @@ -1081,9 +1081,12 @@ err("usb_register failed. (%d)", result); return result; } +#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE) + /* FIXME: convert to todays kernel IR infrastructure */ rc_decode = symbol_request(af9005_rc_decode); rc_keys = symbol_request(rc_map_af9005_table); rc_keys_size = symbol_request(rc_map_af9005_table_size); +#endif if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) { err("af9005_rc_decode function not found, disabling remote"); af9005_properties.rc.legacy.rc_query = NULL; --- linux-3.13.0.orig/drivers/media/usb/dvb-usb/cxusb.c +++ linux-3.13.0/drivers/media/usb/dvb-usb/cxusb.c @@ -149,6 +149,7 @@ int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); + int ret; int i; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) @@ -173,7 +174,8 @@ if (1 + msg[i].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[i].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = 0; obuf[1] = msg[i].len; @@ -193,12 +195,14 @@ if (3 + msg[i].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[i].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } if (1 + msg[i + 1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[i + 1].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[i].len; obuf[1] = msg[i+1].len; @@ -223,7 +227,8 @@ if (2 + msg[i].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[i].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[i].addr; obuf[1] = msg[i].len; @@ -237,8 +242,14 @@ } } + if (i == num) + ret = num; + else + ret = -EREMOTEIO; + +unlock: mutex_unlock(&d->i2c_mutex); - return i == num ? num : -EREMOTEIO; + return ret; } static u32 cxusb_i2c_func(struct i2c_adapter *adapter) --- linux-3.13.0.orig/drivers/media/usb/dvb-usb/dw2102.c +++ linux-3.13.0/drivers/media/usb/dvb-usb/dw2102.c @@ -301,6 +301,7 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); + int ret; if (!d) return -ENODEV; @@ -316,7 +317,8 @@ if (2 + msg[1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[0].addr << 1; @@ -340,7 +342,8 @@ if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[1].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[0].addr << 1; @@ -357,7 +360,8 @@ if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[1].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[0].addr << 1; @@ -386,15 +390,17 @@ break; } + ret = num; +unlock: mutex_unlock(&d->i2c_mutex); - return num; + return ret; } static int dw2104_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); - int len, i, j; + int len, i, j, ret; if (!d) return -ENODEV; @@ -430,7 +436,8 @@ if (2 + msg[j].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } dw210x_op_rw(d->udev, 0xc3, @@ -466,7 +473,8 @@ if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[j].addr << 1; @@ -481,15 +489,18 @@ } } + ret = num; +unlock: mutex_unlock(&d->i2c_mutex); - return num; + return ret; } static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); + int ret; int i; if (!d) @@ -506,7 +517,8 @@ if (2 + msg[1].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[1].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; @@ -530,7 +542,8 @@ if (2 + msg[0].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[0].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[0].addr << 1; obuf[1] = msg[0].len; @@ -556,9 +569,11 @@ msg[i].flags == 0 ? ">>>" : "<<<"); debug_dump(msg[i].buf, msg[i].len, deb_xfer); } + ret = num; +unlock: mutex_unlock(&d->i2c_mutex); - return num; + return ret; } static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], @@ -566,7 +581,7 @@ { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct usb_device *udev; - int len, i, j; + int len, i, j, ret; if (!d) return -ENODEV; @@ -618,7 +633,8 @@ if (msg[j].len > sizeof(ibuf)) { warn("i2c rd: len=%d is too big!\n", msg[j].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } dw210x_op_rw(d->udev, 0x91, 0, 0, @@ -652,7 +668,8 @@ if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[j + 1].len; @@ -671,7 +688,8 @@ if (2 + msg[j].len > sizeof(obuf)) { warn("i2c wr: len=%d is too big!\n", msg[j].len); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto unlock; } obuf[0] = msg[j].len + 1; obuf[1] = (msg[j].addr << 1); @@ -685,9 +703,11 @@ } } } + ret = num; +unlock: mutex_unlock(&d->i2c_mutex); - return num; + return ret; } static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], --- linux-3.13.0.orig/drivers/media/usb/em28xx/em28xx-dvb.c +++ linux-3.13.0/drivers/media/usb/em28xx/em28xx-dvb.c @@ -693,7 +693,8 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; - struct em28xx *dev = fe->dvb->priv; + struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv; + struct em28xx *dev = i2c_bus->dev; #ifdef CONFIG_GPIOLIB struct em28xx_dvb *dvb = dev->dvb; int ret; --- linux-3.13.0.orig/drivers/media/usb/em28xx/em28xx-video.c +++ linux-3.13.0/drivers/media/usb/em28xx/em28xx-video.c @@ -148,7 +148,10 @@ em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field); buf->vb.v4l2_buf.sequence = dev->field_count++; - buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; + if (dev->progressive) + buf->vb.v4l2_buf.field = V4L2_FIELD_NONE; + else + buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); @@ -696,13 +699,16 @@ } spin_lock_irqsave(&dev->slock, flags); + if (dev->usb_ctl.vid_buf != NULL) { + vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR); + dev->usb_ctl.vid_buf = NULL; + } while (!list_empty(&vidq->active)) { struct em28xx_buffer *buf; buf = list_entry(vidq->active.next, struct em28xx_buffer, list); list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } - dev->usb_ctl.vid_buf = NULL; spin_unlock_irqrestore(&dev->slock, flags); return 0; @@ -724,13 +730,16 @@ } spin_lock_irqsave(&dev->slock, flags); + if (dev->usb_ctl.vbi_buf != NULL) { + vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR); + dev->usb_ctl.vbi_buf = NULL; + } while (!list_empty(&vbiq->active)) { struct em28xx_buffer *buf; buf = list_entry(vbiq->active.next, struct em28xx_buffer, list); list_del(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } - dev->usb_ctl.vbi_buf = NULL; spin_unlock_irqrestore(&dev->slock, flags); return 0; --- linux-3.13.0.orig/drivers/media/usb/gspca/pac7302.c +++ linux-3.13.0/drivers/media/usb/gspca/pac7302.c @@ -928,6 +928,7 @@ {USB_DEVICE(0x093a, 0x2620)}, {USB_DEVICE(0x093a, 0x2621)}, {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, + {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, {USB_DEVICE(0x093a, 0x2625)}, {USB_DEVICE(0x093a, 0x2626)}, --- linux-3.13.0.orig/drivers/media/usb/gspca/sn9c20x.c +++ linux-3.13.0/drivers/media/usb/gspca/sn9c20x.c @@ -2359,6 +2359,7 @@ {USB_DEVICE(0x045e, 0x00f4), SN9C20X(OV9650, 0x30, 0)}, {USB_DEVICE(0x145f, 0x013d), SN9C20X(OV7660, 0x21, 0)}, {USB_DEVICE(0x0458, 0x7029), SN9C20X(HV7131R, 0x11, 0)}, + {USB_DEVICE(0x0458, 0x7045), SN9C20X(MT9M112, 0x5d, LED_REVERSE)}, {USB_DEVICE(0x0458, 0x704a), SN9C20X(MT9M112, 0x5d, 0)}, {USB_DEVICE(0x0458, 0x704c), SN9C20X(MT9M112, 0x5d, 0)}, {USB_DEVICE(0xa168, 0x0610), SN9C20X(HV7131R, 0x11, 0)}, --- linux-3.13.0.orig/drivers/media/usb/hdpvr/hdpvr-video.c +++ linux-3.13.0/drivers/media/usb/hdpvr/hdpvr-video.c @@ -82,7 +82,7 @@ } /*=========================================================================*/ -/* bufffer bits */ +/* buffer bits */ /* function expects dev->io_mutex to be hold by caller */ int hdpvr_cancel_queue(struct hdpvr_device *dev) @@ -926,7 +926,7 @@ case V4L2_CID_MPEG_AUDIO_ENCODING: if (dev->flags & HDPVR_FLAG_AC3_CAP) { opt->audio_codec = ctrl->val; - return hdpvr_set_audio(dev, opt->audio_input, + return hdpvr_set_audio(dev, opt->audio_input + 1, opt->audio_codec); } return 0; @@ -1198,7 +1198,7 @@ v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_AUDIO_ENCODING, ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, - 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC); + 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC); v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, --- linux-3.13.0.orig/drivers/media/usb/siano/smsusb.c +++ linux-3.13.0/drivers/media/usb/siano/smsusb.c @@ -653,6 +653,8 @@ .driver_info = SMS1XXX_BOARD_ZTE_DVB_DATA_CARD }, { USB_DEVICE(0x19D2, 0x0078), .driver_info = SMS1XXX_BOARD_ONDA_MDTV_DATA_CARD }, + { USB_DEVICE(0x2013, 0x0257), + .driver_info = SMS1XXX_BOARD_PCTV_77E }, { } /* Terminating entry */ }; --- linux-3.13.0.orig/drivers/media/usb/ttusb-dec/ttusbdecfe.c +++ linux-3.13.0/drivers/media/usb/ttusb-dec/ttusbdecfe.c @@ -156,6 +156,9 @@ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + if (cmd->msg_len > sizeof(b) - 4) + return -EINVAL; + memcpy(&b[4], cmd->msg, cmd->msg_len); state->config->send_command(fe, 0x72, --- linux-3.13.0.orig/drivers/media/usb/uvc/uvc_driver.c +++ linux-3.13.0/drivers/media/usb/uvc/uvc_driver.c @@ -1603,12 +1603,12 @@ { struct list_head *p, *n; - usb_put_intf(dev->intf); - usb_put_dev(dev->udev); - uvc_status_cleanup(dev); uvc_ctrl_cleanup_device(dev); + usb_put_intf(dev->intf); + usb_put_dev(dev->udev); + if (dev->vdev.dev) v4l2_device_unregister(&dev->vdev); #ifdef CONFIG_MEDIA_CONTROLLER @@ -2209,6 +2209,24 @@ .bInterfaceClass = USB_CLASS_VIDEO, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_PROBE_DEF }, + /* Alienware X51*/ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x05a9, + .idProduct = 0x2643, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_PROBE_DEF }, + /* Dell XPS M1330 (OmniVision OV7670 webcam) */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x05a9, + .idProduct = 0x7670, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, .driver_info = UVC_QUIRK_PROBE_DEF }, /* Apple Built-In iSight */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE --- linux-3.13.0.orig/drivers/media/usb/uvc/uvc_video.c +++ linux-3.13.0/drivers/media/usb/uvc/uvc_video.c @@ -361,6 +361,14 @@ * Clocks and timestamps */ +static inline void uvc_video_get_ts(struct timespec *ts) +{ + if (uvc_clock_param == CLOCK_MONOTONIC) + ktime_get_ts(ts); + else + ktime_get_real_ts(ts); +} + static void uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, const __u8 *data, int len) @@ -420,7 +428,7 @@ stream->clock.last_sof = dev_sof; host_sof = usb_get_current_frame_number(stream->dev->udev); - ktime_get_ts(&ts); + uvc_video_get_ts(&ts); /* The UVC specification allows device implementations that can't obtain * the USB frame number to keep their own frame counters as long as they @@ -1011,10 +1019,7 @@ return -ENODATA; } - if (uvc_clock_param == CLOCK_MONOTONIC) - ktime_get_ts(&ts); - else - ktime_get_real_ts(&ts); + uvc_video_get_ts(&ts); buf->buf.v4l2_buf.sequence = stream->sequence; buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec; @@ -1847,7 +1852,25 @@ if (!enable) { uvc_uninit_video(stream, 1); - usb_set_interface(stream->dev->udev, stream->intfnum, 0); + if (stream->intf->num_altsetting > 1) { + usb_set_interface(stream->dev->udev, + stream->intfnum, 0); + } else { + /* UVC doesn't specify how to inform a bulk-based device + * when the video stream is stopped. Windows sends a + * CLEAR_FEATURE(HALT) request to the video streaming + * bulk endpoint, mimic the same behaviour. + */ + unsigned int epnum = stream->header.bEndpointAddress + & USB_ENDPOINT_NUMBER_MASK; + unsigned int dir = stream->header.bEndpointAddress + & USB_ENDPOINT_DIR_MASK; + unsigned int pipe; + + pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir; + usb_clear_halt(stream->dev->udev, pipe); + } + uvc_queue_enable(&stream->queue, 0); uvc_video_clock_cleanup(stream); return 0; --- linux-3.13.0.orig/drivers/media/v4l2-core/v4l2-common.c +++ linux-3.13.0/drivers/media/v4l2-core/v4l2-common.c @@ -431,16 +431,13 @@ /* Bits that must be zero to be aligned */ unsigned int mask = ~((1 << align) - 1); + /* Clamp to aligned min and max */ + x = clamp(x, (min + ~mask) & mask, max & mask); + /* Round to nearest aligned value */ if (align) x = (x + (1 << (align - 1))) & mask; - /* Clamp to aligned value of min and max */ - if (x < min) - x = (min + ~mask) & mask; - else if (x > max) - x = max & mask; - return x; } --- linux-3.13.0.orig/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ linux-3.13.0/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -178,6 +178,9 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { + if (get_user(kp->type, &up->type)) + return -EFAULT; + switch (kp->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: @@ -204,17 +207,16 @@ static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || - get_user(kp->type, &up->type)) - return -EFAULT; + if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) + return -EFAULT; return __get_v4l2_format32(kp, up); } static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) { if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) - return -EFAULT; + copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) + return -EFAULT; return __get_v4l2_format32(&kp->format, &up->format); } --- linux-3.13.0.orig/drivers/media/v4l2-core/v4l2-dev.c +++ linux-3.13.0/drivers/media/v4l2-core/v4l2-dev.c @@ -872,8 +872,8 @@ /* Should not happen since we thought this minor was free */ WARN_ON(video_device[vdev->minor] != NULL); - video_device[vdev->minor] = vdev; vdev->index = get_index(vdev); + video_device[vdev->minor] = vdev; mutex_unlock(&videodev_lock); if (vdev->ioctl_ops) --- linux-3.13.0.orig/drivers/media/v4l2-core/v4l2-dv-timings.c +++ linux-3.13.0/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -590,10 +590,10 @@ aspect.denominator = 9; } else if (ratio == 34) { aspect.numerator = 4; - aspect.numerator = 3; + aspect.denominator = 3; } else if (ratio == 68) { aspect.numerator = 15; - aspect.numerator = 9; + aspect.denominator = 9; } else { aspect.numerator = hor_landscape + 99; aspect.denominator = 100; --- linux-3.13.0.orig/drivers/media/v4l2-core/videobuf-dma-contig.c +++ linux-3.13.0/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -66,14 +66,11 @@ static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; - struct videobuf_queue *q = map->q; - dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", + dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); map->count++; - videobuf_queue_unlock(q); } static void videobuf_vm_close(struct vm_area_struct *vma) @@ -85,11 +82,12 @@ dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); - if (!--map->count) { + map->count--; + if (0 == map->count) { struct videobuf_dma_contig_memory *mem; dev_dbg(q->dev, "munmap %p q=%p\n", map, q); + videobuf_queue_lock(q); /* We need first to cancel streams, before unmapping */ if (q->streaming) @@ -128,8 +126,8 @@ kfree(map); + videobuf_queue_unlock(q); } - videobuf_queue_unlock(q); } static const struct vm_operations_struct videobuf_vm_ops = { --- linux-3.13.0.orig/drivers/media/v4l2-core/videobuf-dma-sg.c +++ linux-3.13.0/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -338,14 +338,11 @@ static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; - struct videobuf_queue *q = map->q; dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); map->count++; - videobuf_queue_unlock(q); } static void videobuf_vm_close(struct vm_area_struct *vma) @@ -358,9 +355,10 @@ dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); - if (!--map->count) { + map->count--; + if (0 == map->count) { dprintk(1, "munmap %p q=%p\n", map, q); + videobuf_queue_lock(q); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; @@ -376,9 +374,9 @@ q->bufs[i]->baddr = 0; q->ops->buf_release(q, q->bufs[i]); } + videobuf_queue_unlock(q); kfree(map); } - videobuf_queue_unlock(q); return; } --- linux-3.13.0.orig/drivers/media/v4l2-core/videobuf-vmalloc.c +++ linux-3.13.0/drivers/media/v4l2-core/videobuf-vmalloc.c @@ -54,14 +54,11 @@ static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; - struct videobuf_queue *q = map->q; dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); map->count++; - videobuf_queue_unlock(q); } static void videobuf_vm_close(struct vm_area_struct *vma) @@ -73,11 +70,12 @@ dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); - videobuf_queue_lock(q); - if (!--map->count) { + map->count--; + if (0 == map->count) { struct videobuf_vmalloc_memory *mem; dprintk(1, "munmap %p q=%p\n", map, q); + videobuf_queue_lock(q); /* We need first to cancel streams, before unmapping */ if (q->streaming) @@ -116,8 +114,8 @@ kfree(map); + videobuf_queue_unlock(q); } - videobuf_queue_unlock(q); return; } --- linux-3.13.0.orig/drivers/media/v4l2-core/videobuf2-core.c +++ linux-3.13.0/drivers/media/v4l2-core/videobuf2-core.c @@ -885,7 +885,7 @@ { struct vb2_queue *q = vb->vb2_queue; - if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) + if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_memop(q, cookie, vb->planes[plane_no].mem_priv); --- linux-3.13.0.orig/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ linux-3.13.0/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -112,7 +112,7 @@ goto fail_pages_alloc; ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages, - buf->num_pages, 0, size, gfp_flags); + buf->num_pages, 0, size, GFP_KERNEL); if (ret) goto fail_table_alloc; --- linux-3.13.0.orig/drivers/memory/mvebu-devbus.c +++ linux-3.13.0/drivers/memory/mvebu-devbus.c @@ -108,8 +108,19 @@ node->full_name); return err; } - /* Convert bit width to byte width */ - r.bus_width /= 8; + + /* + * The bus width is encoded into the register as 0 for 8 bits, + * and 1 for 16 bits, so we do the necessary conversion here. + */ + if (r.bus_width == 8) + r.bus_width = 0; + else if (r.bus_width == 16) + r.bus_width = 1; + else { + dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width); + return -EINVAL; + } err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", &r.badr_skew); --- linux-3.13.0.orig/drivers/message/fusion/mptspi.c +++ linux-3.13.0/drivers/message/fusion/mptspi.c @@ -1422,6 +1422,11 @@ goto out_mptspi_probe; } + /* VMWare emulation doesn't properly implement WRITE_SAME + */ + if (pdev->subsystem_vendor == 0x15AD) + sh->no_write_same = 1; + spin_lock_irqsave(&ioc->FreeQlock, flags); /* Attach the SCSI Host to the IOC structure --- linux-3.13.0.orig/drivers/mfd/88pm800.c +++ linux-3.13.0/drivers/mfd/88pm800.c @@ -571,7 +571,7 @@ ret = pm800_pages_init(chip); if (ret) { dev_err(&client->dev, "pm800_pages_init failed!\n"); - goto err_page_init; + goto err_device_init; } ret = device_800_init(chip, pdata); @@ -587,7 +587,6 @@ err_device_init: pm800_pages_exit(chip); -err_page_init: err_subchip_alloc: pm80x_deinit(); out_init: --- linux-3.13.0.orig/drivers/mfd/88pm860x-core.c +++ linux-3.13.0/drivers/mfd/88pm860x-core.c @@ -1179,12 +1179,18 @@ chip->companion_addr = pdata->companion_addr; chip->companion = i2c_new_dummy(chip->client->adapter, chip->companion_addr); + if (!chip->companion) { + dev_err(&client->dev, + "Failed to allocate I2C companion device\n"); + return -ENODEV; + } chip->regmap_companion = regmap_init_i2c(chip->companion, &pm860x_regmap_config); if (IS_ERR(chip->regmap_companion)) { ret = PTR_ERR(chip->regmap_companion); dev_err(&chip->companion->dev, "Failed to allocate register map: %d\n", ret); + i2c_unregister_device(chip->companion); return ret; } i2c_set_clientdata(chip->companion, chip); --- linux-3.13.0.orig/drivers/mfd/Kconfig +++ linux-3.13.0/drivers/mfd/Kconfig @@ -1173,9 +1173,6 @@ in various ST Microelectronics and ST-Ericsson embedded Nomadik series. -endmenu -endif - menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 @@ -1206,3 +1203,6 @@ help Platform configuration infrastructure for the ARM Ltd. Versatile Express. + +endmenu +endif --- linux-3.13.0.orig/drivers/mfd/Makefile +++ linux-3.13.0/drivers/mfd/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o -rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o +rtsx_pci-objs := rtsx_pcr.o rtsx_gops.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o --- linux-3.13.0.orig/drivers/mfd/da9055-i2c.c +++ linux-3.13.0/drivers/mfd/da9055-i2c.c @@ -53,17 +53,25 @@ return 0; } +/* + * DO NOT change the device Ids. The naming is intentionally specific as both + * the PMIC and CODEC parts of this chip are instantiated separately as I2C + * devices (both have configurable I2C addresses, and are to all intents and + * purposes separate). As a result there are specific DA9055 ids for PMIC + * and CODEC, which must be different to operate together. + */ static struct i2c_device_id da9055_i2c_id[] = { - {"da9055", 0}, + {"da9055-pmic", 0}, { } }; +MODULE_DEVICE_TABLE(i2c, da9055_i2c_id); static struct i2c_driver da9055_i2c_driver = { .probe = da9055_i2c_probe, .remove = da9055_i2c_remove, .id_table = da9055_i2c_id, .driver = { - .name = "da9055", + .name = "da9055-pmic", .owner = THIS_MODULE, }, }; --- linux-3.13.0.orig/drivers/mfd/kempld-core.c +++ linux-3.13.0/drivers/mfd/kempld-core.c @@ -322,9 +322,12 @@ return -ENODEV; } - /* Release hardware mutex if aquired */ - if (!(index_reg & KEMPLD_MUTEX_KEY)) + /* Release hardware mutex if acquired */ + if (!(index_reg & KEMPLD_MUTEX_KEY)) { iowrite8(KEMPLD_MUTEX_KEY, pld->io_index); + /* PXT and COMe-cPC2 boards may require a second release */ + iowrite8(KEMPLD_MUTEX_KEY, pld->io_index); + } mutex_unlock(&pld->lock); @@ -626,7 +629,7 @@ if (force_device_id[0]) { for (id = kempld_dmi_table; id->matches[0].slot != DMI_NONE; id++) if (strstr(id->ident, force_device_id)) - if (id->callback && id->callback(id)) + if (id->callback && !id->callback(id)) break; if (id->matches[0].slot == DMI_NONE) return -ENODEV; --- linux-3.13.0.orig/drivers/mfd/max77686.c +++ linux-3.13.0/drivers/mfd/max77686.c @@ -104,7 +104,7 @@ max77686->irq_gpio = pdata->irq_gpio; max77686->irq = i2c->irq; - max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config); + max77686->regmap = devm_regmap_init_i2c(i2c, &max77686_regmap_config); if (IS_ERR(max77686->regmap)) { ret = PTR_ERR(max77686->regmap); dev_err(max77686->dev, "Failed to allocate register map: %d\n", @@ -121,6 +121,10 @@ dev_info(max77686->dev, "device found\n"); max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC); + if (!max77686->rtc) { + dev_err(max77686->dev, "Failed to allocate I2C device for RTC\n"); + return -ENODEV; + } i2c_set_clientdata(max77686->rtc, max77686); max77686_irq_init(max77686); --- linux-3.13.0.orig/drivers/mfd/max77693.c +++ linux-3.13.0/drivers/mfd/max77693.c @@ -142,9 +142,18 @@ dev_info(max77693->dev, "device ID: 0x%x\n", reg_data); max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC); + if (!max77693->muic) { + dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n"); + return -ENODEV; + } i2c_set_clientdata(max77693->muic, max77693); max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC); + if (!max77693->haptic) { + dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n"); + ret = -ENODEV; + goto err_i2c_haptic; + } i2c_set_clientdata(max77693->haptic, max77693); /* @@ -178,8 +187,9 @@ max77693_irq_exit(max77693); err_irq: err_regmap_muic: - i2c_unregister_device(max77693->muic); i2c_unregister_device(max77693->haptic); +err_i2c_haptic: + i2c_unregister_device(max77693->muic); return ret; } --- linux-3.13.0.orig/drivers/mfd/max8925-i2c.c +++ linux-3.13.0/drivers/mfd/max8925-i2c.c @@ -181,9 +181,18 @@ mutex_init(&chip->io_lock); chip->rtc = i2c_new_dummy(chip->i2c->adapter, RTC_I2C_ADDR); + if (!chip->rtc) { + dev_err(chip->dev, "Failed to allocate I2C device for RTC\n"); + return -ENODEV; + } i2c_set_clientdata(chip->rtc, chip); chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR); + if (!chip->adc) { + dev_err(chip->dev, "Failed to allocate I2C device for ADC\n"); + i2c_unregister_device(chip->rtc); + return -ENODEV; + } i2c_set_clientdata(chip->adc, chip); device_init_wakeup(&client->dev, 1); --- linux-3.13.0.orig/drivers/mfd/max8997.c +++ linux-3.13.0/drivers/mfd/max8997.c @@ -218,10 +218,26 @@ mutex_init(&max8997->iolock); max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC); + if (!max8997->rtc) { + dev_err(max8997->dev, "Failed to allocate I2C device for RTC\n"); + return -ENODEV; + } i2c_set_clientdata(max8997->rtc, max8997); + max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC); + if (!max8997->haptic) { + dev_err(max8997->dev, "Failed to allocate I2C device for Haptic\n"); + ret = -ENODEV; + goto err_i2c_haptic; + } i2c_set_clientdata(max8997->haptic, max8997); + max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC); + if (!max8997->muic) { + dev_err(max8997->dev, "Failed to allocate I2C device for MUIC\n"); + ret = -ENODEV; + goto err_i2c_muic; + } i2c_set_clientdata(max8997->muic, max8997); pm_runtime_set_active(max8997->dev); @@ -248,7 +264,9 @@ err_mfd: mfd_remove_devices(max8997->dev); i2c_unregister_device(max8997->muic); +err_i2c_muic: i2c_unregister_device(max8997->haptic); +err_i2c_haptic: i2c_unregister_device(max8997->rtc); return ret; } --- linux-3.13.0.orig/drivers/mfd/max8998.c +++ linux-3.13.0/drivers/mfd/max8998.c @@ -215,6 +215,10 @@ mutex_init(&max8998->iolock); max8998->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); + if (!max8998->rtc) { + dev_err(&i2c->dev, "Failed to allocate I2C device for RTC\n"); + return -ENODEV; + } i2c_set_clientdata(max8998->rtc, max8998); max8998_irq_init(max8998); --- linux-3.13.0.orig/drivers/mfd/omap-usb-host.c +++ linux-3.13.0/drivers/mfd/omap-usb-host.c @@ -445,7 +445,7 @@ for (i = 0; i < omap->nports; i++) { if (is_ehci_phy_mode(pdata->port_mode[i])) { - reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; + reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; break; } } --- linux-3.13.0.orig/drivers/mfd/rtl8411.c +++ linux-3.13.0/drivers/mfd/rtl8411.c @@ -191,24 +191,25 @@ BPP_LDO_POWB, BPP_LDO_SUSPEND); } -static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +static int rtl8411_do_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage, + int bpp_tuned18_shift, int bpp_asic_1v8) { u8 mask, val; int err; - mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK; + mask = (BPP_REG_TUNED18 << bpp_tuned18_shift) | BPP_PAD_MASK; if (voltage == OUTPUT_3V3) { err = rtsx_pci_write_register(pcr, SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3); if (err < 0) return err; - val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3; + val = (BPP_ASIC_3V3 << bpp_tuned18_shift) | BPP_PAD_3V3; } else if (voltage == OUTPUT_1V8) { err = rtsx_pci_write_register(pcr, SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8); if (err < 0) return err; - val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8; + val = (bpp_asic_1v8 << bpp_tuned18_shift) | BPP_PAD_1V8; } else { return -EINVAL; } @@ -216,6 +217,18 @@ return rtsx_pci_write_register(pcr, LDO_CTL, mask, val); } +static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + return rtl8411_do_switch_output_voltage(pcr, voltage, + BPP_TUNED18_SHIFT_8411, BPP_ASIC_1V8); +} + +static int rtl8402_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + return rtl8411_do_switch_output_voltage(pcr, voltage, + BPP_TUNED18_SHIFT_8402, BPP_ASIC_2V0); +} + static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr) { unsigned int card_exist; @@ -295,6 +308,22 @@ .force_power_down = rtl8411_force_power_down, }; +static const struct pcr_ops rtl8402_pcr_ops = { + .fetch_vendor_settings = rtl8411_fetch_vendor_settings, + .extra_init_hw = rtl8411_extra_init_hw, + .optimize_phy = NULL, + .turn_on_led = rtl8411_turn_on_led, + .turn_off_led = rtl8411_turn_off_led, + .enable_auto_blink = rtl8411_enable_auto_blink, + .disable_auto_blink = rtl8411_disable_auto_blink, + .card_power_on = rtl8411_card_power_on, + .card_power_off = rtl8411_card_power_off, + .switch_output_voltage = rtl8402_switch_output_voltage, + .cd_deglitch = rtl8411_cd_deglitch, + .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n, + .force_power_down = rtl8411_force_power_down, +}; + static const struct pcr_ops rtl8411b_pcr_ops = { .fetch_vendor_settings = rtl8411b_fetch_vendor_settings, .extra_init_hw = rtl8411b_extra_init_hw, @@ -441,12 +470,10 @@ 0, }; -void rtl8411_init_params(struct rtsx_pcr *pcr) +void rtl8411_init_common_params(struct rtsx_pcr *pcr) { pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; pcr->num_slots = 2; - pcr->ops = &rtl8411_pcr_ops; - pcr->flags = 0; pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT; pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B; @@ -454,47 +481,29 @@ pcr->aspm_en = ASPM_L1_EN; pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14); pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10); - pcr->ic_version = rtl8411_get_ic_version(pcr); - pcr->sd_pull_ctl_enable_tbl = rtl8411_sd_pull_ctl_enable_tbl; - pcr->sd_pull_ctl_disable_tbl = rtl8411_sd_pull_ctl_disable_tbl; - pcr->ms_pull_ctl_enable_tbl = rtl8411_ms_pull_ctl_enable_tbl; - pcr->ms_pull_ctl_disable_tbl = rtl8411_ms_pull_ctl_disable_tbl; +} + +void rtl8411_init_params(struct rtsx_pcr *pcr) +{ + rtl8411_init_common_params(pcr); + pcr->ops = &rtl8411_pcr_ops; + set_pull_ctrl_tables(pcr, rtl8411); } void rtl8411b_init_params(struct rtsx_pcr *pcr) { - pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; - pcr->num_slots = 2; + rtl8411_init_common_params(pcr); pcr->ops = &rtl8411b_pcr_ops; + if (rtl8411b_is_qfn48(pcr)) + set_pull_ctrl_tables(pcr, rtl8411b_qfn48); + else + set_pull_ctrl_tables(pcr, rtl8411b_qfn64); +} - pcr->flags = 0; - pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT; - pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B; - pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D; - pcr->aspm_en = ASPM_L1_EN; - pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14); - pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10); - - pcr->ic_version = rtl8411_get_ic_version(pcr); - - if (rtl8411b_is_qfn48(pcr)) { - pcr->sd_pull_ctl_enable_tbl = - rtl8411b_qfn48_sd_pull_ctl_enable_tbl; - pcr->sd_pull_ctl_disable_tbl = - rtl8411b_qfn48_sd_pull_ctl_disable_tbl; - pcr->ms_pull_ctl_enable_tbl = - rtl8411b_qfn48_ms_pull_ctl_enable_tbl; - pcr->ms_pull_ctl_disable_tbl = - rtl8411b_qfn48_ms_pull_ctl_disable_tbl; - } else { - pcr->sd_pull_ctl_enable_tbl = - rtl8411b_qfn64_sd_pull_ctl_enable_tbl; - pcr->sd_pull_ctl_disable_tbl = - rtl8411b_qfn64_sd_pull_ctl_disable_tbl; - pcr->ms_pull_ctl_enable_tbl = - rtl8411b_qfn64_ms_pull_ctl_enable_tbl; - pcr->ms_pull_ctl_disable_tbl = - rtl8411b_qfn64_ms_pull_ctl_disable_tbl; - } +void rtl8402_init_params(struct rtsx_pcr *pcr) +{ + rtl8411_init_common_params(pcr); + pcr->ops = &rtl8402_pcr_ops; + set_pull_ctrl_tables(pcr, rtl8411); } --- linux-3.13.0.orig/drivers/mfd/rts5227.c +++ linux-3.13.0/drivers/mfd/rts5227.c @@ -130,6 +130,12 @@ static int rts5227_optimize_phy(struct rtsx_pcr *pcr) { + int err; + + err = rtsx_gops_pm_reset(pcr); + if (err < 0) + return err; + /* Optimize RX sensitivity */ return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42); } --- linux-3.13.0.orig/drivers/mfd/rts5249.c +++ linux-3.13.0/drivers/mfd/rts5249.c @@ -130,6 +130,10 @@ { int err; + err = rtsx_gops_pm_reset(pcr); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, PHY_REG_REV, PHY_REG_REV_RESV | PHY_REG_REV_RXIDLE_LATCHED | PHY_REG_REV_P1_EN | PHY_REG_REV_RXIDLE_EN | --- linux-3.13.0.orig/drivers/mfd/rtsx_gops.c +++ linux-3.13.0/drivers/mfd/rtsx_gops.c @@ -0,0 +1,37 @@ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + * + * Author: + * Micky Ching + */ + +#include +#include "rtsx_pcr.h" + +int rtsx_gops_pm_reset(struct rtsx_pcr *pcr) +{ + int err; + + /* init aspm */ + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0x00); + err = rtsx_pci_update_cfg_byte(pcr, LCTLR, ~LCTLR_ASPM_CTL_MASK, 0x00); + if (err < 0) + return err; + + /* reset PM_CTRL3 before send buffer cmd */ + return rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00); +} --- linux-3.13.0.orig/drivers/mfd/rtsx_pcr.c +++ linux-3.13.0/drivers/mfd/rtsx_pcr.c @@ -57,6 +57,7 @@ { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { 0, } }; @@ -1061,6 +1062,10 @@ case 0x5287: rtl8411b_init_params(pcr); break; + + case 0x5286: + rtl8402_init_params(pcr); + break; } dev_dbg(&(pcr->pci->dev), "PID: 0x%04x, IC version: 0x%02x\n", @@ -1172,7 +1177,7 @@ pcr->msi_en = msi_en; if (pcr->msi_en) { ret = pci_enable_msi(pcidev); - if (ret < 0) + if (ret) pcr->msi_en = false; } --- linux-3.13.0.orig/drivers/mfd/rtsx_pcr.h +++ linux-3.13.0/drivers/mfd/rtsx_pcr.h @@ -30,6 +30,7 @@ void rts5209_init_params(struct rtsx_pcr *pcr); void rts5229_init_params(struct rtsx_pcr *pcr); void rtl8411_init_params(struct rtsx_pcr *pcr); +void rtl8402_init_params(struct rtsx_pcr *pcr); void rts5227_init_params(struct rtsx_pcr *pcr); void rts5249_init_params(struct rtsx_pcr *pcr); void rtl8411b_init_params(struct rtsx_pcr *pcr); @@ -63,4 +64,15 @@ #define rtl8411_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x07) #define rtl8411b_reg_to_sd30_drive_sel_3v3(reg) ((reg) & 0x03) +#define set_pull_ctrl_tables(pcr, __device) \ +do { \ + pcr->sd_pull_ctl_enable_tbl = __device##_sd_pull_ctl_enable_tbl; \ + pcr->sd_pull_ctl_disable_tbl = __device##_sd_pull_ctl_disable_tbl; \ + pcr->ms_pull_ctl_enable_tbl = __device##_ms_pull_ctl_enable_tbl; \ + pcr->ms_pull_ctl_disable_tbl = __device##_ms_pull_ctl_disable_tbl; \ +} while (0) + +/* generic operations */ +int rtsx_gops_pm_reset(struct rtsx_pcr *pcr); + #endif --- linux-3.13.0.orig/drivers/mfd/sec-core.c +++ linux-3.13.0/drivers/mfd/sec-core.c @@ -280,6 +280,10 @@ } sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); + if (!sec_pmic->rtc) { + dev_err(&i2c->dev, "Failed to allocate I2C for RTC\n"); + return -ENODEV; + } i2c_set_clientdata(sec_pmic->rtc, sec_pmic); sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc, --- linux-3.13.0.orig/drivers/mfd/tc6393xb.c +++ linux-3.13.0/drivers/mfd/tc6393xb.c @@ -263,6 +263,17 @@ return 0; } +static int tc6393xb_ohci_suspend(struct platform_device *dev) +{ + struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent); + + /* We can't properly store/restore OHCI state, so fail here */ + if (tcpd->resume_restore) + return -EBUSY; + + return tc6393xb_ohci_disable(dev); +} + static int tc6393xb_fb_enable(struct platform_device *dev) { struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent); @@ -403,7 +414,7 @@ .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources), .resources = tc6393xb_ohci_resources, .enable = tc6393xb_ohci_enable, - .suspend = tc6393xb_ohci_disable, + .suspend = tc6393xb_ohci_suspend, .resume = tc6393xb_ohci_enable, .disable = tc6393xb_ohci_disable, }, --- linux-3.13.0.orig/drivers/mfd/tps65910.c +++ linux-3.13.0/drivers/mfd/tps65910.c @@ -255,8 +255,10 @@ ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq, IRQF_ONESHOT, pdata->irq_base, tps6591x_irqs_chip, &tps65910->irq_data); - if (ret < 0) + if (ret < 0) { dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret); + tps65910->chip_irq = 0; + } return ret; } --- linux-3.13.0.orig/drivers/misc/mei/amthif.c +++ linux-3.13.0/drivers/misc/mei/amthif.c @@ -177,7 +177,7 @@ unsigned long timeout; int i; - /* Only Posible if we are in timeout */ + /* Only possible if we are in timeout */ if (!cl || cl != &dev->iamthif_cl) { dev_dbg(&dev->pdev->dev, "bad file ext.\n"); return -ETIMEDOUT; @@ -249,7 +249,7 @@ cb->response_buffer.size); dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx); - /* length is being turncated to PAGE_SIZE, however, + /* length is being truncated to PAGE_SIZE, however, * the buf_idx may point beyond */ length = min_t(size_t, length, (cb->buf_idx - *offset)); @@ -316,6 +316,7 @@ mei_hdr.host_addr = dev->iamthif_cl.host_client_id; mei_hdr.me_addr = dev->iamthif_cl.me_client_id; mei_hdr.reserved = 0; + mei_hdr.internal = 0; dev->iamthif_msg_buf_index += mei_hdr.length; ret = mei_write_message(dev, &mei_hdr, dev->iamthif_msg_buf); if (ret) @@ -477,6 +478,7 @@ mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; + mei_hdr.internal = 0; if (*slots >= msg_slots) { mei_hdr.length = len; --- linux-3.13.0.orig/drivers/misc/mei/bus.c +++ linux-3.13.0/drivers/misc/mei/bus.c @@ -71,7 +71,7 @@ dev_dbg(dev, "Device probe\n"); - strncpy(id.name, dev_name(dev), sizeof(id.name)); + strlcpy(id.name, dev_name(dev), sizeof(id.name)); return driver->probe(device, &id); } --- linux-3.13.0.orig/drivers/misc/mei/client.c +++ linux-3.13.0/drivers/misc/mei/client.c @@ -74,23 +74,69 @@ /** - * mei_io_list_flush - removes list entry belonging to cl. + * mei_cl_cmp_id - tells if the clients are the same * - * @list: An instance of our list structure - * @cl: host client + * @cl1: host client 1 + * @cl2: host client 2 + * + * returns true - if the clients has same host and me ids + * false - otherwise */ -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, + const struct mei_cl *cl2) +{ + return cl1 && cl2 && + (cl1->host_client_id == cl2->host_client_id) && + (cl1->me_client_id == cl2->me_client_id); +} + +/** + * mei_io_list_flush - removes cbs belonging to cl. + * + * @list: an instance of our list structure + * @cl: host client, can be NULL for flushing the whole list + * @free: whether to free the cbs + */ +static void __mei_io_list_flush(struct mei_cl_cb *list, + struct mei_cl *cl, bool free) { struct mei_cl_cb *cb; struct mei_cl_cb *next; + /* enable removing everything if no cl is specified */ list_for_each_entry_safe(cb, next, &list->list, list) { - if (cb->cl && mei_cl_cmp_id(cl, cb->cl)) + if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) { list_del(&cb->list); + if (free) + mei_io_cb_free(cb); + } } } /** + * mei_io_list_flush - removes list entry belonging to cl. + * + * @list: An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl) +{ + __mei_io_list_flush(list, cl, false); +} + + +/** + * mei_io_list_free - removes cb belonging to cl and free them + * + * @list: An instance of our list structure + * @cl: host client + */ +static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl) +{ + __mei_io_list_flush(list, cl, true); +} + +/** * mei_io_cb_free - free mei_cb_private related memory * * @cb: mei callback struct @@ -154,7 +200,7 @@ return 0; } /** - * mei_io_cb_alloc_resp_buf - allocate respose buffer + * mei_io_cb_alloc_resp_buf - allocate response buffer * * @cb: io callback structure * @length: size of the buffer @@ -196,8 +242,8 @@ cl_dbg(dev, cl, "remove list entry belonging to cl\n"); mei_io_list_flush(&cl->dev->read_list, cl); - mei_io_list_flush(&cl->dev->write_list, cl); - mei_io_list_flush(&cl->dev->write_waiting_list, cl); + mei_io_list_free(&cl->dev->write_list, cl); + mei_io_list_free(&cl->dev->write_waiting_list, cl); mei_io_list_flush(&cl->dev->ctrl_wr_list, cl); mei_io_list_flush(&cl->dev->ctrl_rd_list, cl); mei_io_list_flush(&cl->dev->amthif_cmd_list, cl); @@ -207,7 +253,7 @@ /** - * mei_cl_init - initializes intialize cl. + * mei_cl_init - initializes cl. * * @cl: host client to be initialized * @dev: mei device @@ -263,10 +309,10 @@ return NULL; } -/** mei_cl_link: allocte host id in the host map +/** mei_cl_link: allocate host id in the host map * * @cl - host client - * @id - fixed host id or -1 for genereting one + * @id - fixed host id or -1 for generic one * * returns 0 on success * -EINVAL on incorrect values @@ -282,19 +328,19 @@ dev = cl->dev; - /* If Id is not asigned get one*/ + /* If Id is not assigned get one*/ if (id == MEI_HOST_CLIENT_ID_ANY) id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); if (id >= MEI_CLIENTS_MAX) { - dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ; + dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX); return -EMFILE; } open_handle_count = dev->open_handle_count + dev->iamthif_open_count; if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { - dev_err(&dev->pdev->dev, "open_handle_count exceded %d", + dev_err(&dev->pdev->dev, "open_handle_count exceeded %d", MEI_MAX_OPEN_HANDLE_COUNT); return -EMFILE; } @@ -344,8 +390,6 @@ cl->state = MEI_FILE_INITIALIZING; - list_del_init(&cl->link); - return 0; } @@ -372,13 +416,14 @@ } dev->dev_state = MEI_DEV_ENABLED; + dev->reset_count = 0; mutex_unlock(&dev->device_lock); } /** - * mei_cl_disconnect - disconnect host clinet form the me one + * mei_cl_disconnect - disconnect host client from the me one * * @cl: host client * @@ -390,7 +435,7 @@ { struct mei_device *dev; struct mei_cl_cb *cb; - int rets, err; + int rets; if (WARN_ON(!cl || !cl->dev)) return -ENODEV; @@ -414,6 +459,7 @@ cl_err(dev, cl, "failed to disconnect.\n"); goto free; } + cl->timer_count = MEI_CONNECT_TIMEOUT; mdelay(10); /* Wait for hardware disconnection ready */ list_add_tail(&cb->list, &dev->ctrl_rd_list.list); } else { @@ -423,24 +469,18 @@ } mutex_unlock(&dev->device_lock); - err = wait_event_timeout(dev->wait_recvd_msg, + wait_event_timeout(dev->wait_recvd_msg, MEI_FILE_DISCONNECTED == cl->state, mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); mutex_lock(&dev->device_lock); + if (MEI_FILE_DISCONNECTED == cl->state) { rets = 0; cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); } else { - rets = -ENODEV; - if (MEI_FILE_DISCONNECTED != cl->state) - cl_err(dev, cl, "wrong status client disconnect.\n"); - - if (err) - cl_dbg(dev, cl, "wait failed disconnect err=%08x\n", - err); - - cl_err(dev, cl, "failed to disconnect from FW client.\n"); + cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); + rets = -ETIME; } mei_io_list_flush(&dev->ctrl_rd_list, cl); @@ -457,7 +497,7 @@ * * @cl: private data of the file object * - * returns ture if other client is connected, 0 - otherwise. + * returns true if other client is connected, false - otherwise. */ bool mei_cl_is_other_connecting(struct mei_cl *cl) { @@ -481,7 +521,7 @@ } /** - * mei_cl_connect - connect host clinet to the me one + * mei_cl_connect - connect host client to the me one * * @cl: host client * @@ -667,7 +707,6 @@ goto err; cb->fop_type = MEI_FOP_READ; - cl->read_cb = cb; if (dev->hbuf_is_ready) { dev->hbuf_is_ready = false; if (mei_hbm_cl_flow_control_req(dev, cl)) { @@ -679,6 +718,9 @@ } else { list_add_tail(&cb->list, &dev->ctrl_wr_list.list); } + + cl->read_cb = cb; + return rets; err: mei_io_cb_free(cb); @@ -729,6 +771,7 @@ mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; + mei_hdr.internal = cb->internal; if (*slots >= msg_slots) { mei_hdr.length = len; @@ -775,7 +818,7 @@ * @cl: host client * @cl: write callback with filled data * - * returns numbe of bytes sent on success, <0 on failure. + * returns number of bytes sent on success, <0 on failure. */ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking) { @@ -828,6 +871,7 @@ mei_hdr.host_addr = cl->host_client_id; mei_hdr.me_addr = cl->me_client_id; mei_hdr.reserved = 0; + mei_hdr.internal = cb->internal; rets = mei_write_message(dev, &mei_hdr, buf->data); @@ -907,7 +951,6 @@ list_for_each_entry_safe(cl, next, &dev->file_list, link) { cl->state = MEI_FILE_DISCONNECTED; cl->mei_flow_ctrl_creds = 0; - cl->read_cb = NULL; cl->timer_count = 0; } } @@ -940,12 +983,8 @@ */ void mei_cl_all_write_clear(struct mei_device *dev) { - struct mei_cl_cb *cb, *next; - - list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { - list_del(&cb->list); - mei_io_cb_free(cb); - } + mei_io_list_free(&dev->write_list, NULL); + mei_io_list_free(&dev->write_waiting_list, NULL); } --- linux-3.13.0.orig/drivers/misc/mei/client.h +++ linux-3.13.0/drivers/misc/mei/client.h @@ -45,8 +45,6 @@ { INIT_LIST_HEAD(&list->list); } -void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl); - /* * MEI Host Client Functions */ @@ -61,22 +59,6 @@ int mei_cl_flush_queues(struct mei_cl *cl); struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl); -/** - * mei_cl_cmp_id - tells if file private data have same id - * - * @fe1: private data of 1. file object - * @fe2: private data of 2. file object - * - * returns true - if ids are the same and not NULL - */ -static inline bool mei_cl_cmp_id(const struct mei_cl *cl1, - const struct mei_cl *cl2) -{ - return cl1 && cl2 && - (cl1->host_client_id == cl2->host_client_id) && - (cl1->me_client_id == cl2->me_client_id); -} - int mei_cl_flow_ctrl_creds(struct mei_cl *cl); --- linux-3.13.0.orig/drivers/misc/mei/debugfs.c +++ linux-3.13.0/drivers/misc/mei/debugfs.c @@ -43,7 +43,7 @@ mutex_lock(&dev->device_lock); - /* if the driver is not enabled the list won't b consitent */ + /* if the driver is not enabled the list won't be consistent */ if (dev->dev_state != MEI_DEV_ENABLED) goto out; @@ -101,7 +101,7 @@ /** * mei_dbgfs_deregister - Remove the debugfs files and directories - * @mei - pointer to mei device private dat + * @mei - pointer to mei device private data */ void mei_dbgfs_deregister(struct mei_device *dev) { --- linux-3.13.0.orig/drivers/misc/mei/hbm.c +++ linux-3.13.0/drivers/misc/mei/hbm.c @@ -28,9 +28,9 @@ * * @dev: the device structure * - * returns none. + * returns 0 on success -ENOMEM on allocation failure */ -static void mei_hbm_me_cl_allocate(struct mei_device *dev) +static int mei_hbm_me_cl_allocate(struct mei_device *dev) { struct mei_me_client *clients; int b; @@ -44,7 +44,7 @@ dev->me_clients_num++; if (dev->me_clients_num == 0) - return; + return 0; kfree(dev->me_clients); dev->me_clients = NULL; @@ -56,12 +56,10 @@ sizeof(struct mei_me_client), GFP_KERNEL); if (!clients) { dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n"); - dev->dev_state = MEI_DEV_RESETTING; - mei_reset(dev, 1); - return; + return -ENOMEM; } dev->me_clients = clients; - return; + return 0; } /** @@ -85,12 +83,12 @@ } /** - * same_disconn_addr - tells if they have the same address + * mei_hbm_cl_addr_equal - tells if they have the same address * - * @file: private data of the file object. - * @disconn: disconnection request. + * @cl: - client + * @buf: buffer with cl header * - * returns true if addres are same + * returns true if addresses are the same */ static inline bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf) @@ -128,6 +126,17 @@ return false; } +/** + * mei_hbm_idle - set hbm to idle state + * + * @dev: the device structure + */ +void mei_hbm_idle(struct mei_device *dev) +{ + dev->init_clients_timer = 0; + dev->hbm_state = MEI_HBM_IDLE; +} + int mei_hbm_start_wait(struct mei_device *dev) { int ret; @@ -137,7 +146,7 @@ mutex_unlock(&dev->device_lock); ret = wait_event_interruptible_timeout(dev->wait_recvd_msg, dev->hbm_state == MEI_HBM_IDLE || - dev->hbm_state > MEI_HBM_START, + dev->hbm_state >= MEI_HBM_STARTED, mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT)); mutex_lock(&dev->device_lock); @@ -153,12 +162,15 @@ * mei_hbm_start_req - sends start request message. * * @dev: the device structure + * + * returns 0 on success and < 0 on failure */ int mei_hbm_start_req(struct mei_device *dev) { struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; struct hbm_host_version_request *start_req; const size_t len = sizeof(struct hbm_host_version_request); + int ret; mei_hbm_hdr(mei_hdr, len); @@ -170,12 +182,13 @@ start_req->host_version.minor_version = HBM_MINOR_VERSION; dev->hbm_state = MEI_HBM_IDLE; - if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { - dev_err(&dev->pdev->dev, "version message write failed\n"); - dev->dev_state = MEI_DEV_RESETTING; - mei_reset(dev, 1); - return -EIO; + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "version message write failed: ret = %d\n", + ret); + return ret; } + dev->hbm_state = MEI_HBM_START; dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; return 0; @@ -186,13 +199,15 @@ * * @dev: the device structure * - * returns none. + * returns 0 on success and < 0 on failure */ -static void mei_hbm_enum_clients_req(struct mei_device *dev) +static int mei_hbm_enum_clients_req(struct mei_device *dev) { struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr; struct hbm_host_enum_request *enum_req; const size_t len = sizeof(struct hbm_host_enum_request); + int ret; + /* enumerate clients */ mei_hbm_hdr(mei_hdr, len); @@ -200,14 +215,15 @@ memset(enum_req, 0, len); enum_req->hbm_cmd = HOST_ENUM_REQ_CMD; - if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { - dev->dev_state = MEI_DEV_RESETTING; - dev_err(&dev->pdev->dev, "enumeration request write failed.\n"); - mei_reset(dev, 1); + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "enumeration request write failed: ret = %d.\n", + ret); + return ret; } dev->hbm_state = MEI_HBM_ENUM_CLIENTS; dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; - return; + return 0; } /** @@ -215,7 +231,7 @@ * * @dev: the device structure * - * returns none. + * returns 0 on success and < 0 on failure */ static int mei_hbm_prop_req(struct mei_device *dev) @@ -226,7 +242,7 @@ const size_t len = sizeof(struct hbm_props_request); unsigned long next_client_index; unsigned long client_num; - + int ret; client_num = dev->me_client_presentation_num; @@ -253,12 +269,11 @@ prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD; prop_req->address = next_client_index; - if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) { - dev->dev_state = MEI_DEV_RESETTING; - dev_err(&dev->pdev->dev, "properties request write failed\n"); - mei_reset(dev, 1); - - return -EIO; + ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data); + if (ret) { + dev_err(&dev->pdev->dev, "properties request write failed: ret = %d\n", + ret); + return ret; } dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; @@ -268,7 +283,7 @@ } /** - * mei_hbm_stop_req_prepare - perpare stop request message + * mei_hbm_stop_req_prepare - prepare stop request message * * @dev - mei device * @mei_hdr - mei message header @@ -289,7 +304,7 @@ } /** - * mei_hbm_cl_flow_control_req - sends flow control requst. + * mei_hbm_cl_flow_control_req - sends flow control request. * * @dev: the device structure * @cl: client info @@ -451,7 +466,7 @@ } /** - * mei_hbm_cl_connect_res - connect resposne from the ME + * mei_hbm_cl_connect_res - connect response from the ME * * @dev: the device structure * @rs: connect response bus message @@ -505,8 +520,8 @@ /** - * mei_hbm_fw_disconnect_req - disconnect request initiated by me - * host sends disoconnect response + * mei_hbm_fw_disconnect_req - disconnect request initiated by ME firmware + * host sends disconnect response * * @dev: the device structure. * @disconnect_req: disconnect request bus message from the me @@ -559,8 +574,10 @@ * * @dev: the device structure * @mei_hdr: header of bus message + * + * returns 0 on success and < 0 on failure */ -void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) { struct mei_bus_message *mei_msg; struct mei_me_client *me_client; @@ -577,8 +594,20 @@ mei_read_slots(dev, dev->rd_msg_buf, hdr->length); mei_msg = (struct mei_bus_message *)dev->rd_msg_buf; + /* ignore spurious message and prevent reset nesting + * hbm is put to idle during system reset + */ + if (dev->hbm_state == MEI_HBM_IDLE) { + dev_dbg(&dev->pdev->dev, "hbm: state is idle ignore spurious messages\n"); + return 0; + } + switch (mei_msg->hbm_cmd) { case HOST_START_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: start: response message received.\n"); + + dev->init_clients_timer = 0; + version_res = (struct hbm_host_version_response *)mei_msg; dev_dbg(&dev->pdev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n", @@ -597,73 +626,89 @@ } if (!mei_hbm_version_is_supported(dev)) { - dev_warn(&dev->pdev->dev, "hbm version mismatch: stopping the driver.\n"); + dev_warn(&dev->pdev->dev, "hbm: start: version mismatch - stopping the driver.\n"); - dev->hbm_state = MEI_HBM_STOP; + dev->hbm_state = MEI_HBM_STOPPED; mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr, dev->wr_msg.data); - mei_write_message(dev, &dev->wr_msg.hdr, - dev->wr_msg.data); + if (mei_write_message(dev, &dev->wr_msg.hdr, + dev->wr_msg.data)) { + dev_err(&dev->pdev->dev, "hbm: start: failed to send stop request\n"); + return -EIO; + } + break; + } - return; + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_START) { + dev_err(&dev->pdev->dev, "hbm: start: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; } - if (dev->dev_state == MEI_DEV_INIT_CLIENTS && - dev->hbm_state == MEI_HBM_START) { - dev->init_clients_timer = 0; - mei_hbm_enum_clients_req(dev); - } else { - dev_err(&dev->pdev->dev, "reset: wrong host start response\n"); - mei_reset(dev, 1); - return; + dev->hbm_state = MEI_HBM_STARTED; + + if (mei_hbm_enum_clients_req(dev)) { + dev_err(&dev->pdev->dev, "hbm: start: failed to send enumeration request\n"); + return -EIO; } wake_up_interruptible(&dev->wait_recvd_msg); - dev_dbg(&dev->pdev->dev, "host start response message received.\n"); break; case CLIENT_CONNECT_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client connect response: message received.\n"); + connect_res = (struct hbm_client_connect_response *) mei_msg; mei_hbm_cl_connect_res(dev, connect_res); - dev_dbg(&dev->pdev->dev, "client connect response message received.\n"); wake_up(&dev->wait_recvd_msg); break; case CLIENT_DISCONNECT_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client disconnect response: message received.\n"); + disconnect_res = (struct hbm_client_connect_response *) mei_msg; mei_hbm_cl_disconnect_res(dev, disconnect_res); - dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n"); wake_up(&dev->wait_recvd_msg); break; case MEI_FLOW_CONTROL_CMD: + dev_dbg(&dev->pdev->dev, "hbm: client flow control response: message received.\n"); + flow_control = (struct hbm_flow_control *) mei_msg; mei_hbm_cl_flow_control_res(dev, flow_control); - dev_dbg(&dev->pdev->dev, "client flow control response message received.\n"); break; case HOST_CLIENT_PROPERTIES_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: properties response: message received.\n"); + + dev->init_clients_timer = 0; + + if (dev->me_clients == NULL) { + dev_err(&dev->pdev->dev, "hbm: properties response: mei_clients not allocated\n"); + return -EPROTO; + } + props_res = (struct hbm_props_response *)mei_msg; me_client = &dev->me_clients[dev->me_client_presentation_num]; - if (props_res->status || !dev->me_clients) { - dev_err(&dev->pdev->dev, "reset: properties response hbm wrong status.\n"); - mei_reset(dev, 1); - return; + if (props_res->status) { + dev_err(&dev->pdev->dev, "hbm: properties response: wrong status = %d\n", + props_res->status); + return -EPROTO; } if (me_client->client_id != props_res->address) { - dev_err(&dev->pdev->dev, "reset: host properties response address mismatch\n"); - mei_reset(dev, 1); - return; + dev_err(&dev->pdev->dev, "hbm: properties response: address mismatch %d ?= %d\n", + me_client->client_id, props_res->address); + return -EPROTO; } if (dev->dev_state != MEI_DEV_INIT_CLIENTS || dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) { - dev_err(&dev->pdev->dev, "reset: unexpected properties response\n"); - mei_reset(dev, 1); - - return; + dev_err(&dev->pdev->dev, "hbm: properties response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; } me_client->props = props_res->client_properties; @@ -671,49 +716,70 @@ dev->me_client_presentation_num++; /* request property for the next client */ - mei_hbm_prop_req(dev); + if (mei_hbm_prop_req(dev)) + return -EIO; break; case HOST_ENUM_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: enumeration response: message received\n"); + + dev->init_clients_timer = 0; + enum_res = (struct hbm_host_enum_response *) mei_msg; BUILD_BUG_ON(sizeof(dev->me_clients_map) < sizeof(enum_res->valid_addresses)); memcpy(dev->me_clients_map, enum_res->valid_addresses, sizeof(enum_res->valid_addresses)); - if (dev->dev_state == MEI_DEV_INIT_CLIENTS && - dev->hbm_state == MEI_HBM_ENUM_CLIENTS) { - dev->init_clients_timer = 0; - mei_hbm_me_cl_allocate(dev); - dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; - /* first property reqeust */ - mei_hbm_prop_req(dev); - } else { - dev_err(&dev->pdev->dev, "reset: unexpected enumeration response hbm.\n"); - mei_reset(dev, 1); - return; + if (dev->dev_state != MEI_DEV_INIT_CLIENTS || + dev->hbm_state != MEI_HBM_ENUM_CLIENTS) { + dev_err(&dev->pdev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + if (mei_hbm_me_cl_allocate(dev)) { + dev_err(&dev->pdev->dev, "hbm: enumeration response: cannot allocate clients array\n"); + return -ENOMEM; } + + dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES; + + /* first property request */ + if (mei_hbm_prop_req(dev)) + return -EIO; + break; case HOST_STOP_RES_CMD: + dev_dbg(&dev->pdev->dev, "hbm: stop response: message received\n"); - if (dev->hbm_state != MEI_HBM_STOP) - dev_err(&dev->pdev->dev, "unexpected stop response hbm.\n"); - dev->dev_state = MEI_DEV_DISABLED; - dev_info(&dev->pdev->dev, "reset: FW stop response.\n"); - mei_reset(dev, 1); + dev->init_clients_timer = 0; + + if (dev->hbm_state != MEI_HBM_STOPPED) { + dev_err(&dev->pdev->dev, "hbm: stop response: state mismatch, [%d, %d]\n", + dev->dev_state, dev->hbm_state); + return -EPROTO; + } + + dev->dev_state = MEI_DEV_POWER_DOWN; + dev_info(&dev->pdev->dev, "hbm: stop response: resetting.\n"); + /* force the reset */ + return -EPROTO; break; case CLIENT_DISCONNECT_REQ_CMD: - /* search for client */ + dev_dbg(&dev->pdev->dev, "hbm: disconnect request: message received\n"); + disconnect_req = (struct hbm_client_connect_request *)mei_msg; mei_hbm_fw_disconnect_req(dev, disconnect_req); break; case ME_STOP_REQ_CMD: + dev_dbg(&dev->pdev->dev, "hbm: stop request: message received\n"); - dev->hbm_state = MEI_HBM_STOP; + dev->hbm_state = MEI_HBM_STOPPED; mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr, dev->wr_ext_msg.data); break; @@ -722,5 +788,6 @@ break; } + return 0; } --- linux-3.13.0.orig/drivers/misc/mei/hbm.h +++ linux-3.13.0/drivers/misc/mei/hbm.h @@ -32,13 +32,13 @@ enum mei_hbm_state { MEI_HBM_IDLE = 0, MEI_HBM_START, + MEI_HBM_STARTED, MEI_HBM_ENUM_CLIENTS, MEI_HBM_CLIENT_PROPERTIES, - MEI_HBM_STARTED, - MEI_HBM_STOP, + MEI_HBM_STOPPED, }; -void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); +int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr); static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length) { @@ -49,6 +49,7 @@ hdr->reserved = 0; } +void mei_hbm_idle(struct mei_device *dev); int mei_hbm_start_req(struct mei_device *dev); int mei_hbm_start_wait(struct mei_device *dev); int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl); --- linux-3.13.0.orig/drivers/misc/mei/hw-me-regs.h +++ linux-3.13.0/drivers/misc/mei/hw-me-regs.h @@ -115,6 +115,11 @@ #define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ #define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ + +/* Host Firmware Status Registers in PCI Config Space */ +#define PCI_CFG_HFS_1 0x40 +#define PCI_CFG_HFS_2 0x48 + /* * MEI HW Section */ --- linux-3.13.0.orig/drivers/misc/mei/hw-me.c +++ linux-3.13.0/drivers/misc/mei/hw-me.c @@ -164,6 +164,9 @@ hcsr |= H_IG; hcsr &= ~H_RST; mei_hcsr_set(hw, hcsr); + + /* complete this write before we set host ready on another CPU */ + mmiowb(); } /** * mei_me_hw_reset - resets fw via mei csr register. @@ -176,6 +179,18 @@ struct mei_me_hw *hw = to_me_hw(dev); u32 hcsr = mei_hcsr_read(hw); + /* H_RST may be found lit before reset is started, + * for example if preceding reset flow hasn't completed. + * In that case asserting H_RST will be ignored, therefore + * we need to clean H_RST bit to start a successful reset sequence. + */ + if ((hcsr & H_RST) == H_RST) { + dev_warn(&dev->pdev->dev, "H_RST is set = 0x%08X", hcsr); + hcsr &= ~H_RST; + mei_hcsr_set(hw, hcsr); + hcsr = mei_hcsr_read(hw); + } + hcsr |= H_RST | H_IG | H_IS; if (intr_enable) @@ -183,9 +198,22 @@ else hcsr &= ~H_IE; + dev->recvd_hw_ready = false; mei_me_reg_write(hw, H_CSR, hcsr); - if (dev->dev_state == MEI_DEV_POWER_DOWN) + /* + * Host reads the H_CSR once to ensure that the + * posted write to H_CSR completes. + */ + hcsr = mei_hcsr_read(hw); + + if ((hcsr & H_RST) == 0) + dev_warn(&dev->pdev->dev, "H_RST is not set = 0x%08X", hcsr); + + if ((hcsr & H_RDY) == H_RDY) + dev_warn(&dev->pdev->dev, "H_RDY is not cleared 0x%08X", hcsr); + + if (intr_enable == false) mei_me_hw_reset_release(dev); return 0; @@ -201,6 +229,7 @@ static void mei_me_host_set_ready(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); + hw->host_hw_state = mei_hcsr_read(hw); hw->host_hw_state |= H_IE | H_IG | H_RDY; mei_hcsr_set(hw, hw->host_hw_state); } @@ -233,10 +262,6 @@ static int mei_me_hw_ready_wait(struct mei_device *dev) { int err; - if (mei_me_hw_is_ready(dev)) - return 0; - - dev->recvd_hw_ready = false; mutex_unlock(&dev->device_lock); err = wait_event_interruptible_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, @@ -250,6 +275,7 @@ return err; } + mei_me_hw_reset_release(dev); dev->recvd_hw_ready = false; return 0; } @@ -469,7 +495,7 @@ struct mei_device *dev = (struct mei_device *) dev_id; struct mei_cl_cb complete_list; s32 slots; - int rets; + int rets = 0; dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n"); /* initialize our complete list */ @@ -482,33 +508,22 @@ mei_clear_interrupts(dev); /* check if ME wants a reset */ - if (!mei_hw_is_ready(dev) && - dev->dev_state != MEI_DEV_RESETTING && - dev->dev_state != MEI_DEV_INITIALIZING && - dev->dev_state != MEI_DEV_POWER_DOWN && - dev->dev_state != MEI_DEV_POWER_UP) { - dev_dbg(&dev->pdev->dev, "FW not ready.\n"); - mei_reset(dev, 1); - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; + if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { + dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n"); + schedule_work(&dev->reset_work); + goto end; } /* check if we need to start the dev */ if (!mei_host_is_ready(dev)) { if (mei_hw_is_ready(dev)) { dev_dbg(&dev->pdev->dev, "we need to start the dev.\n"); - dev->recvd_hw_ready = true; wake_up_interruptible(&dev->wait_hw_ready); - - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; } else { - dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); - mei_me_hw_reset_release(dev); - mutex_unlock(&dev->device_lock); - return IRQ_HANDLED; + dev_dbg(&dev->pdev->dev, "Spurious Interrupt\n"); } + goto end; } /* check slots available for reading */ slots = mei_count_full_read_slots(dev); @@ -516,21 +531,23 @@ /* we have urgent data to send so break the read */ if (dev->wr_ext_msg.hdr.length) break; - dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots); - dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n"); + dev_dbg(&dev->pdev->dev, "slots to read = %08x\n", slots); rets = mei_irq_read_handler(dev, &complete_list, &slots); - if (rets) + if (rets && dev->dev_state != MEI_DEV_RESETTING) { + schedule_work(&dev->reset_work); goto end; + } } + rets = mei_irq_write_handler(dev, &complete_list); -end: - dev_dbg(&dev->pdev->dev, "end of bottom half function.\n"); - dev->hbuf_is_ready = mei_hbuf_is_ready(dev); - mutex_unlock(&dev->device_lock); + dev->hbuf_is_ready = mei_hbuf_is_ready(dev); mei_irq_compl_handler(dev, &complete_list); +end: + dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets); + mutex_unlock(&dev->device_lock); return IRQ_HANDLED; } static const struct mei_hw_ops mei_me_hw_ops = { --- linux-3.13.0.orig/drivers/misc/mei/hw.h +++ linux-3.13.0/drivers/misc/mei/hw.h @@ -111,7 +111,8 @@ u32 me_addr:8; u32 host_addr:8; u32 length:9; - u32 reserved:6; + u32 reserved:5; + u32 internal:1; u32 msg_complete:1; } __packed; --- linux-3.13.0.orig/drivers/misc/mei/init.c +++ linux-3.13.0/drivers/misc/mei/init.c @@ -43,41 +43,119 @@ #undef MEI_DEV_STATE } -void mei_device_init(struct mei_device *dev) -{ - /* setup our list array */ - INIT_LIST_HEAD(&dev->file_list); - INIT_LIST_HEAD(&dev->device_list); - mutex_init(&dev->device_lock); - init_waitqueue_head(&dev->wait_hw_ready); - init_waitqueue_head(&dev->wait_recvd_msg); - init_waitqueue_head(&dev->wait_stop_wd); - dev->dev_state = MEI_DEV_INITIALIZING; - mei_io_list_init(&dev->read_list); - mei_io_list_init(&dev->write_list); - mei_io_list_init(&dev->write_waiting_list); - mei_io_list_init(&dev->ctrl_wr_list); - mei_io_list_init(&dev->ctrl_rd_list); +/** + * mei_cancel_work. Cancel mei background jobs + * + * @dev: the device structure + * + * returns 0 on success or < 0 if the reset hasn't succeeded + */ +void mei_cancel_work(struct mei_device *dev) +{ + cancel_work_sync(&dev->init_work); + cancel_work_sync(&dev->reset_work); - INIT_DELAYED_WORK(&dev->timer_work, mei_timer); - INIT_WORK(&dev->init_work, mei_host_client_init); + cancel_delayed_work(&dev->timer_work); +} +EXPORT_SYMBOL_GPL(mei_cancel_work); - INIT_LIST_HEAD(&dev->wd_cl.link); - INIT_LIST_HEAD(&dev->iamthif_cl.link); - mei_io_list_init(&dev->amthif_cmd_list); - mei_io_list_init(&dev->amthif_rd_complete_list); +/** + * mei_reset - resets host and fw. + * + * @dev: the device structure + */ +int mei_reset(struct mei_device *dev) +{ + enum mei_dev_state state = dev->dev_state; + bool interrupts_enabled; + int ret; - bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); - dev->open_handle_count = 0; + if (state != MEI_DEV_INITIALIZING && + state != MEI_DEV_DISABLED && + state != MEI_DEV_POWER_DOWN && + state != MEI_DEV_POWER_UP) + dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", + mei_dev_state_str(state)); - /* - * Reserving the first client ID - * 0: Reserved for MEI Bus Message communications + /* we're already in reset, cancel the init timer + * if the reset was called due the hbm protocol error + * we need to call it before hw start + * so the hbm watchdog won't kick in */ - bitmap_set(dev->host_clients_map, 0, 1); + mei_hbm_idle(dev); + + /* enter reset flow */ + interrupts_enabled = state != MEI_DEV_POWER_DOWN; + dev->dev_state = MEI_DEV_RESETTING; + + dev->reset_count++; + if (dev->reset_count > MEI_MAX_CONSEC_RESET) { + dev_err(&dev->pdev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); + dev->dev_state = MEI_DEV_DISABLED; + return -ENODEV; + } + + ret = mei_hw_reset(dev, interrupts_enabled); + /* fall through and remove the sw state even if hw reset has failed */ + + /* no need to clean up software state in case of power up */ + if (state != MEI_DEV_INITIALIZING && + state != MEI_DEV_POWER_UP) { + + /* remove all waiting requests */ + mei_cl_all_write_clear(dev); + + mei_cl_all_disconnect(dev); + + /* wake up all readers and writers so they can be interrupted */ + mei_cl_all_wakeup(dev); + + /* remove entry if already in list */ + dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); + mei_cl_unlink(&dev->wd_cl); + mei_cl_unlink(&dev->iamthif_cl); + mei_amthif_reset_params(dev); + memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); + } + + + dev->me_clients_num = 0; + dev->rd_msg_hdr = 0; + dev->wd_pending = false; + + if (ret) { + dev_err(&dev->pdev->dev, "hw_reset failed ret = %d\n", ret); + dev->dev_state = MEI_DEV_DISABLED; + return ret; + } + + if (state == MEI_DEV_POWER_DOWN) { + dev_dbg(&dev->pdev->dev, "powering down: end of reset\n"); + dev->dev_state = MEI_DEV_DISABLED; + return 0; + } + + ret = mei_hw_start(dev); + if (ret) { + dev_err(&dev->pdev->dev, "hw_start failed ret = %d\n", ret); + dev->dev_state = MEI_DEV_DISABLED; + return ret; + } + + dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); + + dev->dev_state = MEI_DEV_INIT_CLIENTS; + ret = mei_hbm_start_req(dev); + if (ret) { + dev_err(&dev->pdev->dev, "hbm_start failed ret = %d\n", ret); + dev->dev_state = MEI_DEV_DISABLED; + return ret; + } + + return 0; } -EXPORT_SYMBOL_GPL(mei_device_init); +EXPORT_SYMBOL_GPL(mei_reset); /** * mei_start - initializes host and fw to start work. @@ -90,14 +168,21 @@ { mutex_lock(&dev->device_lock); - /* acknowledge interrupt and stop interupts */ + /* acknowledge interrupt and stop interrupts */ mei_clear_interrupts(dev); mei_hw_config(dev); dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n"); - mei_reset(dev, 1); + dev->dev_state = MEI_DEV_INITIALIZING; + dev->reset_count = 0; + mei_reset(dev); + + if (dev->dev_state == MEI_DEV_DISABLED) { + dev_err(&dev->pdev->dev, "reset failed"); + goto err; + } if (mei_hbm_start_wait(dev)) { dev_err(&dev->pdev->dev, "HBM haven't started"); @@ -132,101 +217,66 @@ EXPORT_SYMBOL_GPL(mei_start); /** - * mei_reset - resets host and fw. + * mei_restart - restart device after suspend * * @dev: the device structure - * @interrupts_enabled: if interrupt should be enabled after reset. + * + * returns 0 on success or -ENODEV if the restart hasn't succeeded */ -void mei_reset(struct mei_device *dev, int interrupts_enabled) +int mei_restart(struct mei_device *dev) { - bool unexpected; - int ret; - - unexpected = (dev->dev_state != MEI_DEV_INITIALIZING && - dev->dev_state != MEI_DEV_DISABLED && - dev->dev_state != MEI_DEV_POWER_DOWN && - dev->dev_state != MEI_DEV_POWER_UP); - - if (unexpected) - dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n", - mei_dev_state_str(dev->dev_state)); + int err; - ret = mei_hw_reset(dev, interrupts_enabled); - if (ret) { - dev_err(&dev->pdev->dev, "hw reset failed disabling the device\n"); - interrupts_enabled = false; - dev->dev_state = MEI_DEV_DISABLED; - } - - dev->hbm_state = MEI_HBM_IDLE; + mutex_lock(&dev->device_lock); - if (dev->dev_state != MEI_DEV_INITIALIZING && - dev->dev_state != MEI_DEV_POWER_UP) { - if (dev->dev_state != MEI_DEV_DISABLED && - dev->dev_state != MEI_DEV_POWER_DOWN) - dev->dev_state = MEI_DEV_RESETTING; + mei_clear_interrupts(dev); - /* remove all waiting requests */ - mei_cl_all_write_clear(dev); + dev->dev_state = MEI_DEV_POWER_UP; + dev->reset_count = 0; - mei_cl_all_disconnect(dev); + err = mei_reset(dev); - /* wake up all readings so they can be interrupted */ - mei_cl_all_wakeup(dev); + mutex_unlock(&dev->device_lock); - /* remove entry if already in list */ - dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n"); - mei_cl_unlink(&dev->wd_cl); - mei_cl_unlink(&dev->iamthif_cl); - mei_amthif_reset_params(dev); - memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg)); - } + if (err || dev->dev_state == MEI_DEV_DISABLED) + return -ENODEV; - /* we're already in reset, cancel the init timer */ - dev->init_clients_timer = 0; + return 0; +} +EXPORT_SYMBOL_GPL(mei_restart); - dev->me_clients_num = 0; - dev->rd_msg_hdr = 0; - dev->wd_pending = false; - if (!interrupts_enabled) { - dev_dbg(&dev->pdev->dev, "intr not enabled end of reset\n"); - return; - } +static void mei_reset_work(struct work_struct *work) +{ + struct mei_device *dev = + container_of(work, struct mei_device, reset_work); - ret = mei_hw_start(dev); - if (ret) { - dev_err(&dev->pdev->dev, "hw_start failed disabling the device\n"); - dev->dev_state = MEI_DEV_DISABLED; - return; - } + mutex_lock(&dev->device_lock); - dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n"); - /* link is established * start sending messages. */ + mei_reset(dev); - dev->dev_state = MEI_DEV_INIT_CLIENTS; - - mei_hbm_start_req(dev); + mutex_unlock(&dev->device_lock); + if (dev->dev_state == MEI_DEV_DISABLED) + dev_err(&dev->pdev->dev, "reset failed"); } -EXPORT_SYMBOL_GPL(mei_reset); void mei_stop(struct mei_device *dev) { dev_dbg(&dev->pdev->dev, "stopping the device.\n"); - flush_scheduled_work(); + mei_cancel_work(dev); - mutex_lock(&dev->device_lock); + mei_nfc_host_exit(dev); - cancel_delayed_work(&dev->timer_work); + mutex_lock(&dev->device_lock); mei_wd_stop(dev); - mei_nfc_host_exit(); - dev->dev_state = MEI_DEV_POWER_DOWN; - mei_reset(dev, 0); + mei_reset(dev); + /* move device to disabled state unconditionally */ + dev->dev_state = MEI_DEV_DISABLED; mutex_unlock(&dev->device_lock); @@ -236,3 +286,41 @@ +void mei_device_init(struct mei_device *dev) +{ + /* setup our list array */ + INIT_LIST_HEAD(&dev->file_list); + INIT_LIST_HEAD(&dev->device_list); + mutex_init(&dev->device_lock); + init_waitqueue_head(&dev->wait_hw_ready); + init_waitqueue_head(&dev->wait_recvd_msg); + init_waitqueue_head(&dev->wait_stop_wd); + dev->dev_state = MEI_DEV_INITIALIZING; + dev->reset_count = 0; + + mei_io_list_init(&dev->read_list); + mei_io_list_init(&dev->write_list); + mei_io_list_init(&dev->write_waiting_list); + mei_io_list_init(&dev->ctrl_wr_list); + mei_io_list_init(&dev->ctrl_rd_list); + + INIT_DELAYED_WORK(&dev->timer_work, mei_timer); + INIT_WORK(&dev->init_work, mei_host_client_init); + INIT_WORK(&dev->reset_work, mei_reset_work); + + INIT_LIST_HEAD(&dev->wd_cl.link); + INIT_LIST_HEAD(&dev->iamthif_cl.link); + mei_io_list_init(&dev->amthif_cmd_list); + mei_io_list_init(&dev->amthif_rd_complete_list); + + bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); + dev->open_handle_count = 0; + + /* + * Reserving the first client ID + * 0: Reserved for MEI Bus Message communications + */ + bitmap_set(dev->host_clients_map, 0, 1); +} +EXPORT_SYMBOL_GPL(mei_device_init); + --- linux-3.13.0.orig/drivers/misc/mei/interrupt.c +++ linux-3.13.0/drivers/misc/mei/interrupt.c @@ -31,7 +31,7 @@ /** - * mei_irq_compl_handler - dispatch complete handelers + * mei_irq_compl_handler - dispatch complete handlers * for the completed callbacks * * @dev - mei device @@ -301,13 +301,11 @@ struct mei_cl_cb *cmpl_list, s32 *slots) { struct mei_msg_hdr *mei_hdr; - struct mei_cl *cl_pos = NULL; - struct mei_cl *cl_next = NULL; - int ret = 0; + struct mei_cl *cl; + int ret; if (!dev->rd_msg_hdr) { dev->rd_msg_hdr = mei_read_hdr(dev); - dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); (*slots)--; dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); } @@ -315,61 +313,67 @@ dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); if (mei_hdr->reserved || !dev->rd_msg_hdr) { - dev_dbg(&dev->pdev->dev, "corrupted message header.\n"); + dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n", + dev->rd_msg_hdr); ret = -EBADMSG; goto end; } - if (mei_hdr->host_addr || mei_hdr->me_addr) { - list_for_each_entry_safe(cl_pos, cl_next, - &dev->file_list, link) { - dev_dbg(&dev->pdev->dev, - "list_for_each_entry_safe read host" - " client = %d, ME client = %d\n", - cl_pos->host_client_id, - cl_pos->me_client_id); - if (mei_cl_hbm_equal(cl_pos, mei_hdr)) - break; - } - - if (&cl_pos->link == &dev->file_list) { - dev_dbg(&dev->pdev->dev, "corrupted message header\n"); - ret = -EBADMSG; - goto end; - } - } - if (((*slots) * sizeof(u32)) < mei_hdr->length) { - dev_err(&dev->pdev->dev, - "we can't read the message slots =%08x.\n", + if (mei_slots2data(*slots) < mei_hdr->length) { + dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", *slots); /* we can't read the message */ ret = -ERANGE; goto end; } - /* decide where to read the message too */ - if (!mei_hdr->host_addr) { - dev_dbg(&dev->pdev->dev, "call mei_hbm_dispatch.\n"); - mei_hbm_dispatch(dev, mei_hdr); - dev_dbg(&dev->pdev->dev, "end mei_hbm_dispatch.\n"); - } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && - (MEI_FILE_CONNECTED == dev->iamthif_cl.state) && - (dev->iamthif_state == MEI_IAMTHIF_READING)) { + /* HBM message */ + if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { + ret = mei_hbm_dispatch(dev, mei_hdr); + if (ret) { + dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n", + ret); + goto end; + } + goto reset_slots; + } - dev_dbg(&dev->pdev->dev, "call mei_amthif_irq_read_msg.\n"); - dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); + /* find recipient cl */ + list_for_each_entry(cl, &dev->file_list, link) { + if (mei_cl_hbm_equal(cl, mei_hdr)) { + cl_dbg(dev, cl, "got a message\n"); + break; + } + } + + /* if no recipient cl was found we assume corrupted header */ + if (&cl->link == &dev->file_list) { + dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n", + dev->rd_msg_hdr); + ret = -EBADMSG; + goto end; + } + + if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && + MEI_FILE_CONNECTED == dev->iamthif_cl.state && + dev->iamthif_state == MEI_IAMTHIF_READING) { ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); - if (ret) + if (ret) { + dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n", + ret); goto end; + } } else { - dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n"); - dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); - if (ret) + if (ret) { + dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n", + ret); goto end; + } } +reset_slots: /* reset the number of slots and header */ *slots = mei_count_full_read_slots(dev); dev->rd_msg_hdr = 0; @@ -424,8 +428,7 @@ cl->status = 0; list_del(&cb->list); - if (MEI_WRITING == cl->writing_state && - cb->fop_type == MEI_FOP_WRITE && + if (cb->fop_type == MEI_FOP_WRITE && cl != &dev->iamthif_cl) { cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); cl->writing_state = MEI_WRITE_COMPLETE; @@ -533,7 +536,6 @@ * * @work: pointer to the work_struct structure * - * NOTE: This function is called by timer interrupt work */ void mei_timer(struct work_struct *work) { @@ -548,24 +550,30 @@ mutex_lock(&dev->device_lock); - if (dev->dev_state != MEI_DEV_ENABLED) { - if (dev->dev_state == MEI_DEV_INIT_CLIENTS) { - if (dev->init_clients_timer) { - if (--dev->init_clients_timer == 0) { - dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n", - dev->hbm_state); - mei_reset(dev, 1); - } + + /* Catch interrupt stalls during HBM init handshake */ + if (dev->dev_state == MEI_DEV_INIT_CLIENTS && + dev->hbm_state != MEI_HBM_IDLE) { + + if (dev->init_clients_timer) { + if (--dev->init_clients_timer == 0) { + dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n", + dev->hbm_state); + mei_reset(dev); + goto out; } } - goto out; } + + if (dev->dev_state != MEI_DEV_ENABLED) + goto out; + /*** connect/disconnect timeouts ***/ list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) { if (cl_pos->timer_count) { if (--cl_pos->timer_count == 0) { - dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n"); - mei_reset(dev, 1); + dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); + mei_reset(dev); goto out; } } @@ -573,8 +581,8 @@ if (dev->iamthif_stall_timer) { if (--dev->iamthif_stall_timer == 0) { - dev_err(&dev->pdev->dev, "reset: amthif hanged.\n"); - mei_reset(dev, 1); + dev_err(&dev->pdev->dev, "timer: amthif hanged.\n"); + mei_reset(dev); dev->iamthif_msg_buf_size = 0; dev->iamthif_msg_buf_index = 0; dev->iamthif_canceled = false; @@ -627,7 +635,8 @@ } } out: - schedule_delayed_work(&dev->timer_work, 2 * HZ); + if (dev->dev_state != MEI_DEV_DISABLED) + schedule_delayed_work(&dev->timer_work, 2 * HZ); mutex_unlock(&dev->device_lock); } --- linux-3.13.0.orig/drivers/misc/mei/main.c +++ linux-3.13.0/drivers/misc/mei/main.c @@ -48,7 +48,7 @@ * * @inode: pointer to inode structure * @file: pointer to file structure - e + * * returns 0 on success, <0 on error */ static int mei_open(struct inode *inode, struct file *file) @@ -653,8 +653,7 @@ goto out; } - if (MEI_WRITE_COMPLETE == cl->writing_state) - mask |= (POLLIN | POLLRDNORM); + mask |= (POLLIN | POLLRDNORM); out: mutex_unlock(&dev->device_lock); --- linux-3.13.0.orig/drivers/misc/mei/mei_dev.h +++ linux-3.13.0/drivers/misc/mei/mei_dev.h @@ -61,11 +61,16 @@ #define MEI_CLIENTS_MAX 256 /* + * maximum number of consecutive resets + */ +#define MEI_MAX_CONSEC_RESET 3 + +/* * Number of File descriptors/handles * that can be opened to the driver. * * Limit to 255: 256 Total Clients - * minus internal client for MEI Bus Messags + * minus internal client for MEI Bus Messages */ #define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1) @@ -178,9 +183,10 @@ unsigned long buf_idx; unsigned long read_time; struct file *file_object; + u32 internal:1; }; -/* MEI client instance carried as file->pirvate_data*/ +/* MEI client instance carried as file->private_data*/ struct mei_cl { struct list_head link; struct mei_device *dev; @@ -326,6 +332,7 @@ /** * struct mei_device - MEI private device struct + * @reset_count - limits the number of consecutive resets * @hbm_state - state of host bus message protocol * @mem_addr - mem mapped base register address @@ -369,6 +376,7 @@ /* * mei device states */ + unsigned long reset_count; enum mei_dev_state dev_state; enum mei_hbm_state hbm_state; u16 init_clients_timer; @@ -427,6 +435,7 @@ bool iamthif_canceled; struct work_struct init_work; + struct work_struct reset_work; /* List of bus devices */ struct list_head device_list; @@ -456,13 +465,25 @@ return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4); } +/** + * mei_slots2data- get data in slots - bytes from slots + * @slots - number of available slots + * returns - number of bytes in slots + */ +static inline u32 mei_slots2data(int slots) +{ + return slots * 4; +} + /* * mei init function prototypes */ void mei_device_init(struct mei_device *dev); -void mei_reset(struct mei_device *dev, int interrupts); +int mei_reset(struct mei_device *dev); int mei_start(struct mei_device *dev); +int mei_restart(struct mei_device *dev); void mei_stop(struct mei_device *dev); +void mei_cancel_work(struct mei_device *dev); /* * MEI interrupt functions prototype @@ -510,7 +531,7 @@ * NFC functions */ int mei_nfc_host_init(struct mei_device *dev); -void mei_nfc_host_exit(void); +void mei_nfc_host_exit(struct mei_device *dev); /* * NFC Client UUID @@ -626,9 +647,9 @@ int mei_register(struct mei_device *dev); void mei_deregister(struct mei_device *dev); -#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d" +#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d internal=%1d comp=%1d" #define MEI_HDR_PRM(hdr) \ (hdr)->host_addr, (hdr)->me_addr, \ - (hdr)->length, (hdr)->msg_complete + (hdr)->length, (hdr)->internal, (hdr)->msg_complete #endif --- linux-3.13.0.orig/drivers/misc/mei/nfc.c +++ linux-3.13.0/drivers/misc/mei/nfc.c @@ -92,7 +92,7 @@ * @cl: NFC host client * @cl_info: NFC info host client * @init_work: perform connection to the info client - * @fw_ivn: NFC Intervace Version Number + * @fw_ivn: NFC Interface Version Number * @vendor_id: NFC manufacturer ID * @radio_type: NFC radio type */ @@ -163,7 +163,7 @@ return 0; default: - dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", + dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", ndev->radio_type); return -EINVAL; @@ -175,14 +175,14 @@ ndev->bus_name = "pn544"; return 0; default: - dev_err(&dev->pdev->dev, "Unknow radio type 0x%x\n", + dev_err(&dev->pdev->dev, "Unknown radio type 0x%x\n", ndev->radio_type); return -EINVAL; } default: - dev_err(&dev->pdev->dev, "Unknow vendor ID 0x%x\n", + dev_err(&dev->pdev->dev, "Unknown vendor ID 0x%x\n", ndev->vendor_id); return -EINVAL; @@ -428,7 +428,7 @@ mutex_unlock(&dev->device_lock); if (mei_nfc_if_version(ndev) < 0) { - dev_err(&dev->pdev->dev, "Could not get the NFC interfave version"); + dev_err(&dev->pdev->dev, "Could not get the NFC interface version"); goto err; } @@ -469,7 +469,9 @@ return; err: + mutex_lock(&dev->device_lock); mei_nfc_free(ndev); + mutex_unlock(&dev->device_lock); return; } @@ -481,7 +483,7 @@ struct mei_cl *cl_info, *cl = NULL; int i, ret; - /* already initialzed */ + /* already initialized */ if (ndev->cl_info) return 0; @@ -547,12 +549,16 @@ return ret; } -void mei_nfc_host_exit(void) +void mei_nfc_host_exit(struct mei_device *dev) { struct mei_nfc_dev *ndev = &nfc_dev; + cancel_work_sync(&ndev->init_work); + + mutex_lock(&dev->device_lock); if (ndev->cl && ndev->cl->device) mei_cl_remove_device(ndev->cl->device); mei_nfc_free(ndev); + mutex_unlock(&dev->device_lock); } --- linux-3.13.0.orig/drivers/misc/mei/pci-me.c +++ linux-3.13.0/drivers/misc/mei/pci-me.c @@ -43,6 +43,9 @@ #include "hw-me.h" #include "client.h" +static bool disable_msi; +module_param(disable_msi, bool, 0); + /* mei_pci_tbl - PCI Device ID Table */ static DEFINE_PCI_DEVICE_TABLE(mei_me_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)}, @@ -100,15 +103,31 @@ const struct pci_device_id *ent) { u32 reg; - if (ent->device == MEI_DEV_ID_PBG_1) { - pci_read_config_dword(pdev, 0x48, ®); - /* make sure that bit 9 is up and bit 10 is down */ - if ((reg & 0x600) == 0x200) { - dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); - return false; - } + /* Cougar Point || Patsburg */ + if (ent->device == MEI_DEV_ID_CPT_1 || + ent->device == MEI_DEV_ID_PBG_1) { + pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®); + /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */ + if ((reg & 0x600) == 0x200) + goto no_mei; + } + + /* Lynx Point */ + if (ent->device == MEI_DEV_ID_LPT_H || + ent->device == MEI_DEV_ID_LPT_W || + ent->device == MEI_DEV_ID_LPT_HR) { + /* Read ME FW Status check for SPS Firmware */ + pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); + /* if bits [19:16] = 15, running SPS Firmware */ + if ((reg & 0xf0000) == 0xf0000) + goto no_mei; } + return true; + +no_mei: + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); + return false; } /** * mei_probe - Device Initialization Routine @@ -144,6 +163,21 @@ dev_err(&pdev->dev, "failed to get pci regions.\n"); goto disable_device; } + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + } + if (err) { + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); + goto release_regions; + } + + /* allocates and initializes the mei dev structure */ dev = mei_me_dev_init(pdev); if (!dev) { @@ -158,7 +192,8 @@ err = -ENOMEM; goto free_device; } - pci_enable_msi(pdev); + if (!disable_msi) + pci_enable_msi(pdev); /* request and enable interrupt */ if (pci_dev_msi_enabled(pdev)) @@ -197,8 +232,8 @@ return 0; release_irq: + mei_cancel_work(dev); mei_disable_interrupts(dev); - flush_scheduled_work(); free_irq(pdev->irq, dev); disable_msi: pci_disable_msi(pdev); @@ -306,16 +341,14 @@ return err; } - mutex_lock(&dev->device_lock); - dev->dev_state = MEI_DEV_POWER_UP; - mei_clear_interrupts(dev); - mei_reset(dev, 1); - mutex_unlock(&dev->device_lock); + err = mei_restart(dev); + if (err) + return err; /* Start timer if stopped in suspend */ schedule_delayed_work(&dev->timer_work, HZ); - return err; + return 0; } static SIMPLE_DEV_PM_OPS(mei_me_pm_ops, mei_me_pci_suspend, mei_me_pci_resume); #define MEI_ME_PM_OPS (&mei_me_pm_ops) --- linux-3.13.0.orig/drivers/misc/mei/wd.c +++ linux-3.13.0/drivers/misc/mei/wd.c @@ -115,6 +115,7 @@ hdr.me_addr = dev->wd_cl.me_client_id; hdr.msg_complete = 1; hdr.reserved = 0; + hdr.internal = 0; if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE)) hdr.length = MEI_WD_START_MSG_SIZE; --- linux-3.13.0.orig/drivers/misc/mic/host/mic_device.h +++ linux-3.13.0/drivers/misc/mic/host/mic_device.h @@ -134,6 +134,8 @@ * @send_intr: Send an interrupt for a particular doorbell on the card. * @ack_interrupt: Hardware specific operations to ack the h/w on * receipt of an interrupt. + * @intr_workarounds: Hardware specific workarounds needed after + * handling an interrupt. * @reset: Reset the remote processor. * @reset_fw_ready: Reset firmware ready field. * @is_fw_ready: Check if firmware is ready for OS download. @@ -149,6 +151,7 @@ void (*write_spad)(struct mic_device *mdev, unsigned int idx, u32 val); void (*send_intr)(struct mic_device *mdev, int doorbell); u32 (*ack_interrupt)(struct mic_device *mdev); + void (*intr_workarounds)(struct mic_device *mdev); void (*reset)(struct mic_device *mdev); void (*reset_fw_ready)(struct mic_device *mdev); bool (*is_fw_ready)(struct mic_device *mdev); --- linux-3.13.0.orig/drivers/misc/mic/host/mic_main.c +++ linux-3.13.0/drivers/misc/mic/host/mic_main.c @@ -115,7 +115,7 @@ struct mic_device *mdev = data; struct mic_bootparam *bootparam = mdev->dp; - mdev->ops->ack_interrupt(mdev); + mdev->ops->intr_workarounds(mdev); switch (bootparam->shutdown_status) { case MIC_HALTED: --- linux-3.13.0.orig/drivers/misc/mic/host/mic_virtio.c +++ linux-3.13.0/drivers/misc/mic/host/mic_virtio.c @@ -156,7 +156,8 @@ static int _mic_virtio_copy(struct mic_vdev *mvdev, struct mic_copy_desc *copy) { - int ret = 0, iovcnt = copy->iovcnt; + int ret = 0; + u32 iovcnt = copy->iovcnt; struct iovec iov; struct iovec __user *u_iov = copy->iov; void __user *ubuf = NULL; @@ -369,7 +370,7 @@ struct mic_vdev *mvdev = data; struct mic_device *mdev = mvdev->mdev; - mdev->ops->ack_interrupt(mdev); + mdev->ops->intr_workarounds(mdev); schedule_work(&mvdev->virtio_bh_work); return IRQ_HANDLED; } --- linux-3.13.0.orig/drivers/misc/mic/host/mic_x100.c +++ linux-3.13.0/drivers/misc/mic/host/mic_x100.c @@ -174,35 +174,38 @@ } /** - * mic_ack_interrupt - Device specific interrupt handling. - * @mdev: pointer to mic_device instance + * mic_x100_ack_interrupt - Read the interrupt sources register and + * clear it. This function will be called in the MSI/INTx case. + * @mdev: Pointer to mic_device instance. * - * Returns: bitmask of doorbell events triggered. + * Returns: bitmask of interrupt sources triggered. */ static u32 mic_x100_ack_interrupt(struct mic_device *mdev) { - u32 reg = 0; - struct mic_mw *mw = &mdev->mmio; u32 sicr0 = MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_SICR0; + u32 reg = mic_mmio_read(&mdev->mmio, sicr0); + mic_mmio_write(&mdev->mmio, reg, sicr0); + return reg; +} + +/** + * mic_x100_intr_workarounds - These hardware specific workarounds are + * to be invoked everytime an interrupt is handled. + * @mdev: Pointer to mic_device instance. + * + * Returns: none + */ +static void mic_x100_intr_workarounds(struct mic_device *mdev) +{ + struct mic_mw *mw = &mdev->mmio; /* Clear pending bit array. */ if (MIC_A0_STEP == mdev->stepping) mic_mmio_write(mw, 1, MIC_X100_SBOX_BASE_ADDRESS + MIC_X100_SBOX_MSIXPBACR); - if (mdev->irq_info.num_vectors <= 1) { - reg = mic_mmio_read(mw, sicr0); - - if (unlikely(!reg)) - goto done; - - mic_mmio_write(mw, reg, sicr0); - } - if (mdev->stepping >= MIC_B0_STEP) mdev->intr_ops->enable_interrupts(mdev); -done: - return reg; } /** @@ -553,6 +556,7 @@ .write_spad = mic_x100_write_spad, .send_intr = mic_x100_send_intr, .ack_interrupt = mic_x100_ack_interrupt, + .intr_workarounds = mic_x100_intr_workarounds, .reset = mic_x100_hw_reset, .reset_fw_ready = mic_x100_reset_fw_ready, .is_fw_ready = mic_x100_is_fw_ready, --- linux-3.13.0.orig/drivers/mmc/card/block.c +++ linux-3.13.0/drivers/mmc/card/block.c @@ -951,6 +951,18 @@ md->reset_done &= ~type; } +int mmc_access_rpmb(struct mmc_queue *mq) +{ + struct mmc_blk_data *md = mq->data; + /* + * If this is a RPMB partition access, return ture + */ + if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) + return true; + + return false; +} + static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; @@ -1959,6 +1971,7 @@ struct mmc_card *card = md->queue.card; struct mmc_host *host = card->host; unsigned long flags; + unsigned int cmd_flags = req ? req->cmd_flags : 0; if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ @@ -1974,7 +1987,7 @@ } mq->flags &= ~MMC_QUEUE_NEW_REQUEST; - if (req && req->cmd_flags & REQ_DISCARD) { + if (cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); @@ -1983,7 +1996,7 @@ ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); - } else if (req && req->cmd_flags & REQ_FLUSH) { + } else if (cmd_flags & REQ_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); @@ -1999,7 +2012,7 @@ out: if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || - (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK))) + (cmd_flags & MMC_REQ_SPECIAL_MASK)) /* * Release host when there are no more requests * and after special request(discard, flush) is done. --- linux-3.13.0.orig/drivers/mmc/card/queue.c +++ linux-3.13.0/drivers/mmc/card/queue.c @@ -38,7 +38,7 @@ return BLKPREP_KILL; } - if (mq && mmc_card_removed(mq->card)) + if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) return BLKPREP_KILL; req->cmd_flags |= REQ_DONTPREP; @@ -197,7 +197,7 @@ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; mq->queue = blk_init_queue(mmc_request_fn, lock); --- linux-3.13.0.orig/drivers/mmc/card/queue.h +++ linux-3.13.0/drivers/mmc/card/queue.h @@ -73,4 +73,6 @@ extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); extern void mmc_packed_clean(struct mmc_queue *); +extern int mmc_access_rpmb(struct mmc_queue *); + #endif --- linux-3.13.0.orig/drivers/mmc/core/sd.c +++ linux-3.13.0/drivers/mmc/core/sd.c @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -45,6 +46,13 @@ 35, 40, 45, 50, 55, 60, 70, 80, }; +static const unsigned int sd_au_size[] = { + 0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512, + SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512, + SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, + SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, +}; + #define UNSTUFF_BITS(resp,start,size) \ ({ \ const int __size = size; \ @@ -216,7 +224,7 @@ static int mmc_read_ssr(struct mmc_card *card) { unsigned int au, es, et, eo; - int err, i, max_au; + int err, i; u32 *ssr; if (!(card->csd.cmdclass & CCC_APP_SPEC)) { @@ -240,26 +248,25 @@ for (i = 0; i < 16; i++) ssr[i] = be32_to_cpu(ssr[i]); - /* SD3.0 increases max AU size to 64MB (0xF) from 4MB (0x9) */ - max_au = card->scr.sda_spec3 ? 0xF : 0x9; - /* * UNSTUFF_BITS only works with four u32s so we have to offset the * bitfield positions accordingly. */ au = UNSTUFF_BITS(ssr, 428 - 384, 4); - if (au > 0 && au <= max_au) { - card->ssr.au = 1 << (au + 4); - es = UNSTUFF_BITS(ssr, 408 - 384, 16); - et = UNSTUFF_BITS(ssr, 402 - 384, 6); - eo = UNSTUFF_BITS(ssr, 400 - 384, 2); - if (es && et) { - card->ssr.erase_timeout = (et * 1000) / es; - card->ssr.erase_offset = eo * 1000; + if (au) { + if (au <= 9 || card->scr.sda_spec3) { + card->ssr.au = sd_au_size[au]; + es = UNSTUFF_BITS(ssr, 408 - 384, 16); + et = UNSTUFF_BITS(ssr, 402 - 384, 6); + if (es && et) { + eo = UNSTUFF_BITS(ssr, 400 - 384, 2); + card->ssr.erase_timeout = (et * 1000) / es; + card->ssr.erase_offset = eo * 1000; + } + } else { + pr_warning("%s: SD Status: Invalid Allocation Unit size.\n", + mmc_hostname(card->host)); } - } else { - pr_warning("%s: SD Status: Invalid Allocation Unit " - "size.\n", mmc_hostname(card->host)); } out: kfree(ssr); --- linux-3.13.0.orig/drivers/mmc/host/Makefile +++ linux-3.13.0/drivers/mmc/host/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o +obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-o2micro.o obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o --- linux-3.13.0.orig/drivers/mmc/host/atmel-mci.c +++ linux-3.13.0/drivers/mmc/host/atmel-mci.c @@ -1192,11 +1192,22 @@ iflags |= ATMCI_CMDRDY; cmd = mrq->cmd; cmdflags = atmci_prepare_command(slot->mmc, cmd); - atmci_send_command(host, cmd, cmdflags); + + /* + * DMA transfer should be started before sending the command to avoid + * unexpected errors especially for read operations in SDIO mode. + * Unfortunately, in PDC mode, command has to be sent before starting + * the transfer. + */ + if (host->submit_data != &atmci_submit_data_dma) + atmci_send_command(host, cmd, cmdflags); if (data) host->submit_data(host, data); + if (host->submit_data == &atmci_submit_data_dma) + atmci_send_command(host, cmd, cmdflags); + if (mrq->stop) { host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); host->stop_cmdr |= ATMCI_CMDR_STOP_XFER; --- linux-3.13.0.orig/drivers/mmc/host/dw_mmc.c +++ linux-3.13.0/drivers/mmc/host/dw_mmc.c @@ -631,6 +631,13 @@ WARN_ON(!(data->flags & MMC_DATA_READ)); + /* + * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is + * in the FIFO region, so we really shouldn't access it). + */ + if (host->verid < DW_MMC_240A) + return; + if (host->timing != MMC_TIMING_MMC_HS200 && host->timing != MMC_TIMING_UHS_SDR104) goto disable; --- linux-3.13.0.orig/drivers/mmc/host/omap_hsmmc.c +++ linux-3.13.0/drivers/mmc/host/omap_hsmmc.c @@ -1812,7 +1812,7 @@ host->slot_id = 0; host->mapbase = res->start + pdata->reg_offset; host->base = ioremap(host->mapbase, SZ_4K); - host->power_mode = MMC_POWER_OFF; + host->power_mode = -1; host->next_data.cookie = 1; platform_set_drvdata(pdev, host); --- linux-3.13.0.orig/drivers/mmc/host/rtsx_pci_sdmmc.c +++ linux-3.13.0/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -246,6 +246,9 @@ case MMC_RSP_R1: rsp_type = SD_RSP_TYPE_R1; break; + case MMC_RSP_R1 & ~MMC_RSP_CRC: + rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; + break; case MMC_RSP_R1B: rsp_type = SD_RSP_TYPE_R1b; break; @@ -339,6 +342,13 @@ } if (rsp_type == SD_RSP_TYPE_R2) { + /* + * The controller offloads the last byte {CRC-7, end bit 1'b1} + * of response type R2. Assign dummy CRC, 0, and end bit to the + * byte(ptr[16], goes into the LSB of resp[3] later). + */ + ptr[16] = 1; + for (i = 0; i < 4; i++) { cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", --- linux-3.13.0.orig/drivers/mmc/host/sdhci-bcm-kona.c +++ linux-3.13.0/drivers/mmc/host/sdhci-bcm-kona.c @@ -314,7 +314,7 @@ return ret; } -static int __exit sdhci_bcm_kona_remove(struct platform_device *pdev) +static int sdhci_bcm_kona_remove(struct platform_device *pdev) { return sdhci_pltfm_unregister(pdev); } --- linux-3.13.0.orig/drivers/mmc/host/sdhci-esdhc-imx.c +++ linux-3.13.0/drivers/mmc/host/sdhci-esdhc-imx.c @@ -558,19 +558,17 @@ struct pltfm_imx_data *imx_data = pltfm_host->priv; struct esdhc_platform_data *boarddata = &imx_data->boarddata; - u32 f_host = clk_get_rate(pltfm_host->clk); - - if (boarddata->f_max && (boarddata->f_max < f_host)) + if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) return boarddata->f_max; else - return f_host; + return pltfm_host->clock; } static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - return clk_get_rate(pltfm_host->clk) / 256 / 16; + return pltfm_host->clock / 256 / 16; } static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, @@ -578,7 +576,7 @@ { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; - unsigned int host_clock = clk_get_rate(pltfm_host->clk); + unsigned int host_clock = pltfm_host->clock; int pre_div = 2; int div = 1; u32 temp, val; @@ -976,7 +974,7 @@ } pltfm_host->clk = imx_data->clk_per; - + pltfm_host->clock = clk_get_rate(pltfm_host->clk); clk_prepare_enable(imx_data->clk_per); clk_prepare_enable(imx_data->clk_ipg); clk_prepare_enable(imx_data->clk_ahb); --- linux-3.13.0.orig/drivers/mmc/host/sdhci-pci-o2micro.c +++ linux-3.13.0/drivers/mmc/host/sdhci-pci-o2micro.c @@ -0,0 +1,319 @@ +/* + * Copyright (C) 2013 BayHub Technology Ltd. + * + * Authors: Peter Guo + * Adam Lee + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include "sdhci.h" +#include "sdhci-pci.h" +#include "sdhci-pci-o2micro.h" + +void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip) +{ + u32 scratch_32; + int ret; + /* Improve write performance for SD3.0 */ + ret = pci_read_config_dword(chip->pdev, O2_SD_DEV_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 12) | (1 << 13) | (1 << 14)); + pci_write_config_dword(chip->pdev, O2_SD_DEV_CTRL, scratch_32); + + /* Enable Link abnormal reset generating Reset */ + ret = pci_read_config_dword(chip->pdev, O2_SD_MISC_REG5, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 19) | (1 << 11)); + scratch_32 |= (1 << 10); + pci_write_config_dword(chip->pdev, O2_SD_MISC_REG5, scratch_32); + + /* set card power over current protection */ + ret = pci_read_config_dword(chip->pdev, O2_SD_TEST_REG, &scratch_32); + if (ret) + return; + scratch_32 |= (1 << 4); + pci_write_config_dword(chip->pdev, O2_SD_TEST_REG, scratch_32); + + /* adjust the output delay for SD mode */ + pci_write_config_dword(chip->pdev, O2_SD_DELAY_CTRL, 0x00002492); + + /* Set the output voltage setting of Aux 1.2v LDO */ + ret = pci_read_config_dword(chip->pdev, O2_SD_LD0_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(3 << 12); + pci_write_config_dword(chip->pdev, O2_SD_LD0_CTRL, scratch_32); + + /* Set Max power supply capability of SD host */ + ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG0, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x01FE); + scratch_32 |= 0x00CC; + pci_write_config_dword(chip->pdev, O2_SD_CAP_REG0, scratch_32); + /* Set DLL Tuning Window */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_TUNING_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000000FF); + scratch_32 |= 0x00000066; + pci_write_config_dword(chip->pdev, O2_SD_TUNING_CTRL, scratch_32); + + /* Set UHS2 T_EIDLE */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_UHS2_L1_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000000FC); + scratch_32 |= 0x00000084; + pci_write_config_dword(chip->pdev, O2_SD_UHS2_L1_CTRL, scratch_32); + + /* Set UHS2 Termination */ + ret = pci_read_config_dword(chip->pdev, O2_SD_FUNC_REG3, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 21) | (1 << 30)); + + pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32); + + /* Set L1 Entrance Timer */ + ret = pci_read_config_dword(chip->pdev, O2_SD_CAPS, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0xf0000000); + scratch_32 |= 0x30000000; + pci_write_config_dword(chip->pdev, O2_SD_CAPS, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_MISC_CTRL4, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000f0000); + scratch_32 |= 0x00080000; + pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32); +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init); + +int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) +{ + struct sdhci_pci_chip *chip; + struct sdhci_host *host; + u32 reg; + + chip = slot->chip; + host = slot->host; + switch (chip->pdev->device) { + case PCI_DEVICE_ID_O2_SDS0: + case PCI_DEVICE_ID_O2_SEABIRD0: + case PCI_DEVICE_ID_O2_SEABIRD1: + case PCI_DEVICE_ID_O2_SDS1: + case PCI_DEVICE_ID_O2_FUJIN2: + reg = sdhci_readl(host, O2_SD_VENDOR_SETTING); + if (reg & 0x1) + host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + + if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2) + break; + /* set dll watch dog timer */ + reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2); + reg |= (1 << 12); + sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2); + + break; + default: + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot); + +int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) +{ + int ret; + u8 scratch; + u32 scratch_32; + + switch (chip->pdev->device) { + case PCI_DEVICE_ID_O2_8220: + case PCI_DEVICE_ID_O2_8221: + case PCI_DEVICE_ID_O2_8320: + case PCI_DEVICE_ID_O2_8321: + /* This extra setup is required due to broken ADMA. */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + /* Set Multi 3 to VCC3V# */ + pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08); + + /* Disable CLK_REQ# support after media DET */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_CLKREQ, &scratch); + if (ret) + return ret; + scratch |= 0x20; + pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); + + /* Choose capabilities, enable SDMA. We have to write 0x01 + * to the capabilities register first to unlock it. + */ + ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); + if (ret) + return ret; + scratch |= 0x01; + pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); + pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); + + /* Disable ADMA1/2 */ + pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39); + pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08); + + /* Disable the infinite transfer mode */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_INF_MOD, &scratch); + if (ret) + return ret; + scratch |= 0x08; + pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); + + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + case PCI_DEVICE_ID_O2_SDS0: + case PCI_DEVICE_ID_O2_SDS1: + case PCI_DEVICE_ID_O2_FUJIN2: + /* UnLock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + /* Set timeout CLK */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_CLK_SETTING, &scratch_32); + if (ret) + return ret; + + scratch_32 &= ~(0xFF00); + scratch_32 |= 0x07E0C800; + pci_write_config_dword(chip->pdev, + O2_SD_CLK_SETTING, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_CLKREQ, &scratch_32); + if (ret) + return ret; + scratch_32 |= 0x3; + pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_PLL_SETTING, &scratch_32); + if (ret) + return ret; + + scratch_32 &= ~(0x1F3F070E); + scratch_32 |= 0x18270106; + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + + /* Disable UHS1 funciton */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_CAP_REG2, &scratch_32); + if (ret) + return ret; + scratch_32 &= ~(0xE0); + pci_write_config_dword(chip->pdev, + O2_SD_CAP_REG2, scratch_32); + + if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) + sdhci_pci_o2_fujin2_pci_init(chip); + + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + case PCI_DEVICE_ID_O2_SEABIRD0: + case PCI_DEVICE_ID_O2_SEABIRD1: + /* UnLock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG0, &scratch_32); + + if ((scratch_32 & 0xff000000) == 0x01000000) { + scratch_32 &= 0x0000FFFF; + scratch_32 |= 0x1F340000; + + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + } else { + scratch_32 &= 0x0000FFFF; + scratch_32 |= 0x2c280000; + + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG4, + &scratch_32); + scratch_32 |= (1 << 22); + pci_write_config_dword(chip->pdev, + O2_SD_FUNC_REG4, scratch_32); + } + + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe); + +int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip) +{ + sdhci_pci_o2_probe(chip); + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume); --- linux-3.13.0.orig/drivers/mmc/host/sdhci-pci-o2micro.h +++ linux-3.13.0/drivers/mmc/host/sdhci-pci-o2micro.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2013 BayHub Technology Ltd. + * + * Authors: Peter Guo + * Adam Lee + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SDHCI_PCI_O2MICRO_H +#define __SDHCI_PCI_O2MICRO_H + +#include "sdhci-pci.h" + +/* + * O2Micro device IDs + */ + +#define PCI_DEVICE_ID_O2_SDS0 0x8420 +#define PCI_DEVICE_ID_O2_SDS1 0x8421 +#define PCI_DEVICE_ID_O2_FUJIN2 0x8520 +#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620 +#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621 + +/* + * O2Micro device registers + */ + +#define O2_SD_MISC_REG5 0x64 +#define O2_SD_LD0_CTRL 0x68 +#define O2_SD_DEV_CTRL 0x88 +#define O2_SD_LOCK_WP 0xD3 +#define O2_SD_TEST_REG 0xD4 +#define O2_SD_FUNC_REG0 0xDC +#define O2_SD_MULTI_VCC3V 0xEE +#define O2_SD_CLKREQ 0xEC +#define O2_SD_CAPS 0xE0 +#define O2_SD_ADMA1 0xE2 +#define O2_SD_ADMA2 0xE7 +#define O2_SD_INF_MOD 0xF1 +#define O2_SD_MISC_CTRL4 0xFC +#define O2_SD_TUNING_CTRL 0x300 +#define O2_SD_PLL_SETTING 0x304 +#define O2_SD_CLK_SETTING 0x328 +#define O2_SD_CAP_REG2 0x330 +#define O2_SD_CAP_REG0 0x334 +#define O2_SD_UHS1_CAP_SETTING 0x33C +#define O2_SD_DELAY_CTRL 0x350 +#define O2_SD_UHS2_L1_CTRL 0x35C +#define O2_SD_FUNC_REG3 0x3E0 +#define O2_SD_FUNC_REG4 0x3E4 + +#define O2_SD_VENDOR_SETTING 0x110 +#define O2_SD_VENDOR_SETTING2 0x1C8 + +extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip); + +extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); + +extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); + +extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); + +#endif /* __SDHCI_PCI_O2MICRO_H */ --- linux-3.13.0.orig/drivers/mmc/host/sdhci-pci.c +++ linux-3.13.0/drivers/mmc/host/sdhci-pci.c @@ -27,79 +27,8 @@ #include #include "sdhci.h" - -/* - * PCI device IDs - */ -#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 -#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a -#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 -#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 -#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 -#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50 -#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 -#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9 -#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa -#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb -#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 -#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 - -/* - * PCI registers - */ - -#define PCI_SDHCI_IFPIO 0x00 -#define PCI_SDHCI_IFDMA 0x01 -#define PCI_SDHCI_IFVENDOR 0x02 - -#define PCI_SLOT_INFO 0x40 /* 8 bits */ -#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) -#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 - -#define MAX_SLOTS 8 - -struct sdhci_pci_chip; -struct sdhci_pci_slot; - -struct sdhci_pci_fixes { - unsigned int quirks; - unsigned int quirks2; - bool allow_runtime_pm; - - int (*probe) (struct sdhci_pci_chip *); - - int (*probe_slot) (struct sdhci_pci_slot *); - void (*remove_slot) (struct sdhci_pci_slot *, int); - - int (*suspend) (struct sdhci_pci_chip *); - int (*resume) (struct sdhci_pci_chip *); -}; - -struct sdhci_pci_slot { - struct sdhci_pci_chip *chip; - struct sdhci_host *host; - struct sdhci_pci_data *data; - - int pci_bar; - int rst_n_gpio; - int cd_gpio; - int cd_irq; - - void (*hw_reset)(struct sdhci_host *host); -}; - -struct sdhci_pci_chip { - struct pci_dev *pdev; - - unsigned int quirks; - unsigned int quirks2; - bool allow_runtime_pm; - const struct sdhci_pci_fixes *fixes; - - int num_slots; /* Slots on controller */ - struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ -}; - +#include "sdhci-pci.h" +#include "sdhci-pci-o2micro.h" /*****************************************************************************\ * * @@ -296,6 +225,7 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .allow_runtime_pm = true, + .own_cd_for_runtime_pm = true, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { @@ -360,6 +290,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON, .allow_runtime_pm = true, + .own_cd_for_runtime_pm = true, }; /* Define Host controllers for Intel Merrifield platform */ @@ -381,6 +312,7 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_BROKEN_HS200, .probe_slot = intel_mrfl_mmc_probe_slot, }; @@ -393,65 +325,6 @@ #define O2_SD_ADMA2 0xE7 #define O2_SD_INF_MOD 0xF1 -static int o2_probe(struct sdhci_pci_chip *chip) -{ - int ret; - u8 scratch; - - switch (chip->pdev->device) { - case PCI_DEVICE_ID_O2_8220: - case PCI_DEVICE_ID_O2_8221: - case PCI_DEVICE_ID_O2_8320: - case PCI_DEVICE_ID_O2_8321: - /* This extra setup is required due to broken ADMA. */ - ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); - if (ret) - return ret; - scratch &= 0x7f; - pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); - - /* Set Multi 3 to VCC3V# */ - pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08); - - /* Disable CLK_REQ# support after media DET */ - ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch); - if (ret) - return ret; - scratch |= 0x20; - pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); - - /* Choose capabilities, enable SDMA. We have to write 0x01 - * to the capabilities register first to unlock it. - */ - ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); - if (ret) - return ret; - scratch |= 0x01; - pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); - pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); - - /* Disable ADMA1/2 */ - pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39); - pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08); - - /* Disable the infinite transfer mode */ - ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch); - if (ret) - return ret; - scratch |= 0x08; - pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); - - /* Lock WP */ - ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); - if (ret) - return ret; - scratch |= 0x80; - pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); - } - - return 0; -} - static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) { u8 scratch; @@ -642,7 +515,10 @@ } static const struct sdhci_pci_fixes sdhci_o2 = { - .probe = o2_probe, + .probe = sdhci_pci_o2_probe, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .probe_slot = sdhci_pci_o2_probe_slot, + .resume = sdhci_pci_o2_resume, }; static const struct sdhci_pci_fixes sdhci_jmicron = { @@ -1055,6 +931,46 @@ .driver_data = (kernel_ulong_t)&sdhci_o2, }, + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_FUJIN2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_SDS0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_SDS1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_SEABIRD0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_SEABIRD1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + { /* Generic SD host controller */ PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) }, @@ -1457,6 +1373,15 @@ sdhci_pci_add_own_cd(slot); + /* + * Check if the chip needs a separate GPIO for card detect to wake up + * from runtime suspend. If it is not there, don't allow runtime PM. + * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure. + */ + if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && + !gpio_is_valid(slot->cd_gpio)) + chip->allow_runtime_pm = false; + return slot; remove: --- linux-3.13.0.orig/drivers/mmc/host/sdhci-pci.h +++ linux-3.13.0/drivers/mmc/host/sdhci-pci.h @@ -0,0 +1,78 @@ +#ifndef __SDHCI_PCI_H +#define __SDHCI_PCI_H + +/* + * PCI device IDs + */ + +#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 +#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a +#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 +#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 +#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 +#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50 +#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa +#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb +#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 +#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 + +/* + * PCI registers + */ + +#define PCI_SDHCI_IFPIO 0x00 +#define PCI_SDHCI_IFDMA 0x01 +#define PCI_SDHCI_IFVENDOR 0x02 + +#define PCI_SLOT_INFO 0x40 /* 8 bits */ +#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) +#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 + +#define MAX_SLOTS 8 + +struct sdhci_pci_chip; +struct sdhci_pci_slot; + +struct sdhci_pci_fixes { + unsigned int quirks; + unsigned int quirks2; + bool allow_runtime_pm; + bool own_cd_for_runtime_pm; + + int (*probe) (struct sdhci_pci_chip *); + + int (*probe_slot) (struct sdhci_pci_slot *); + void (*remove_slot) (struct sdhci_pci_slot *, int); + + int (*suspend) (struct sdhci_pci_chip *); + int (*resume) (struct sdhci_pci_chip *); +}; + +struct sdhci_pci_slot { + struct sdhci_pci_chip *chip; + struct sdhci_host *host; + struct sdhci_pci_data *data; + + int pci_bar; + int rst_n_gpio; + int cd_gpio; + int cd_irq; + + void (*hw_reset)(struct sdhci_host *host); +}; + +struct sdhci_pci_chip { + struct pci_dev *pdev; + + unsigned int quirks; + unsigned int quirks2; + bool allow_runtime_pm; + const struct sdhci_pci_fixes *fixes; + + int num_slots; /* Slots on controller */ + struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ +}; + +#endif /* __SDHCI_PCI_H */ --- linux-3.13.0.orig/drivers/mmc/host/sdhci-pxav3.c +++ linux-3.13.0/drivers/mmc/host/sdhci-pxav3.c @@ -201,8 +201,8 @@ if (!pdata) return NULL; - of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); - if (clk_delay_cycles > 0) + if (!of_property_read_u32(np, "mrvl,clk-delay-cycles", + &clk_delay_cycles)) pdata->clk_delay_cycles = clk_delay_cycles; return pdata; @@ -288,10 +288,11 @@ } } - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS); pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); pm_suspend_ignore_children(&pdev->dev, 1); ret = sdhci_add_host(host); @@ -316,8 +317,8 @@ err_of_parse: err_cd_req: err_add_host: - pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); clk_disable_unprepare(clk); clk_put(clk); err_clk_get: @@ -378,15 +379,13 @@ { struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - unsigned long flags; + int ret; - if (pltfm_host->clk) { - spin_lock_irqsave(&host->lock, flags); - host->runtime_suspended = true; - spin_unlock_irqrestore(&host->lock, flags); + ret = sdhci_runtime_suspend_host(host); + if (ret) + return ret; - clk_disable_unprepare(pltfm_host->clk); - } + clk_disable_unprepare(pltfm_host->clk); return 0; } @@ -395,17 +394,10 @@ { struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - unsigned long flags; - - if (pltfm_host->clk) { - clk_prepare_enable(pltfm_host->clk); - spin_lock_irqsave(&host->lock, flags); - host->runtime_suspended = false; - spin_unlock_irqrestore(&host->lock, flags); - } + clk_prepare_enable(pltfm_host->clk); - return 0; + return sdhci_runtime_resume_host(host); } #endif --- linux-3.13.0.orig/drivers/mmc/host/sdhci.c +++ linux-3.13.0/drivers/mmc/host/sdhci.c @@ -1333,6 +1333,8 @@ sdhci_runtime_pm_get(host); + present = mmc_gpio_get_cd(host->mmc); + spin_lock_irqsave(&host->lock, flags); WARN_ON(host->mrq != NULL); @@ -1361,7 +1363,6 @@ * zero: cd-gpio is used, and card is removed * one: cd-gpio is used, and card is present */ - present = mmc_gpio_get_cd(host->mmc); if (present < 0) { /* If polling, assume that the card is always present. */ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) @@ -1845,12 +1846,12 @@ unsigned long timeout; int err = 0; bool requires_tuning_nonuhs = false; + unsigned long flags; host = mmc_priv(mmc); sdhci_runtime_pm_get(host); - disable_irq(host->irq); - spin_lock(&host->lock); + spin_lock_irqsave(&host->lock, flags); ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); @@ -1870,15 +1871,13 @@ requires_tuning_nonuhs) ctrl |= SDHCI_CTRL_EXEC_TUNING; else { - spin_unlock(&host->lock); - enable_irq(host->irq); + spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_pm_put(host); return 0; } if (host->ops->platform_execute_tuning) { - spin_unlock(&host->lock); - enable_irq(host->irq); + spin_unlock_irqrestore(&host->lock, flags); err = host->ops->platform_execute_tuning(host, opcode); sdhci_runtime_pm_put(host); return err; @@ -1951,15 +1950,12 @@ host->cmd = NULL; host->mrq = NULL; - spin_unlock(&host->lock); - enable_irq(host->irq); - + spin_unlock_irqrestore(&host->lock, flags); /* Wait for Buffer Read Ready interrupt */ wait_event_interruptible_timeout(host->buf_ready_int, (host->tuning_done == 1), msecs_to_jiffies(50)); - disable_irq(host->irq); - spin_lock(&host->lock); + spin_lock_irqsave(&host->lock, flags); if (!host->tuning_done) { pr_info(DRIVER_NAME ": Timeout waiting for " @@ -2034,8 +2030,7 @@ err = 0; sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); - spin_unlock(&host->lock); - enable_irq(host->irq); + spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_pm_put(host); return err; @@ -2071,15 +2066,18 @@ { struct sdhci_host *host = mmc_priv(mmc); unsigned long flags; + int present; /* First check if client has provided their own card event */ if (host->ops->card_event) host->ops->card_event(host); + present = sdhci_do_get_cd(host); + spin_lock_irqsave(&host->lock, flags); /* Check host->mrq first in case we are runtime suspended */ - if (host->mrq && !sdhci_do_get_cd(host)) { + if (host->mrq && !present) { pr_err("%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); pr_err("%s: Resetting controller.\n", @@ -3004,7 +3002,8 @@ /* SD3.0: SDR104 is supported so (for eMMC) the caps2 * field can be promoted to support HS200. */ - mmc->caps2 |= MMC_CAP2_HS200; + if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) + mmc->caps2 |= MMC_CAP2_HS200; } else if (caps[1] & SDHCI_SUPPORT_SDR50) mmc->caps |= MMC_CAP_UHS_SDR50; --- linux-3.13.0.orig/drivers/mmc/host/tmio_mmc_pio.c +++ linux-3.13.0/drivers/mmc/host/tmio_mmc_pio.c @@ -1042,6 +1042,15 @@ } /* + * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from + * looping forever... + */ + if (mmc->f_min == 0) { + ret = -EINVAL; + goto host_free; + } + + /* * There are 4 different scenarios for the card detection: * 1) an external gpio irq handles the cd (best for power savings) * 2) internal sdhi irq handles the cd --- linux-3.13.0.orig/drivers/mtd/devices/elm.c +++ linux-3.13.0/drivers/mtd/devices/elm.c @@ -428,6 +428,7 @@ ELM_SYNDROME_FRAGMENT_1 + offset); regs->elm_syndrome_fragment_0[i] = elm_read_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset); + break; default: return -EINVAL; } @@ -466,6 +467,7 @@ regs->elm_syndrome_fragment_1[i]); elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset, regs->elm_syndrome_fragment_0[i]); + break; default: return -EINVAL; } --- linux-3.13.0.orig/drivers/mtd/ftl.c +++ linux-3.13.0/drivers/mtd/ftl.c @@ -1075,7 +1075,6 @@ return; } - ftl_freepart(partition); kfree(partition); } --- linux-3.13.0.orig/drivers/mtd/nand/atmel_nand.c +++ linux-3.13.0/drivers/mtd/nand/atmel_nand.c @@ -1220,6 +1220,7 @@ goto err; } + nand_chip->options |= NAND_NO_SUBPAGE_WRITE; nand_chip->ecc.read_page = atmel_nand_pmecc_read_page; nand_chip->ecc.write_page = atmel_nand_pmecc_write_page; --- linux-3.13.0.orig/drivers/mtd/nand/fsl_elbc_nand.c +++ linux-3.13.0/drivers/mtd/nand/fsl_elbc_nand.c @@ -724,6 +724,19 @@ return 0; } +/* ECC will be calculated automatically, and errors will be detected in + * waitfunc. + */ +static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offset, uint32_t data_len, + const uint8_t *buf, int oob_required) +{ + fsl_elbc_write_buf(mtd, buf, mtd->writesize); + fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) { struct fsl_lbc_ctrl *ctrl = priv->ctrl; @@ -762,6 +775,7 @@ chip->ecc.read_page = fsl_elbc_read_page; chip->ecc.write_page = fsl_elbc_write_page; + chip->ecc.write_subpage = fsl_elbc_write_subpage; /* If CS Base Register selects full hardware ECC then use it */ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == --- linux-3.13.0.orig/drivers/mtd/nand/mxc_nand.c +++ linux-3.13.0/drivers/mtd/nand/mxc_nand.c @@ -677,7 +677,6 @@ ecc_stat >>= 4; } while (--no_subpages); - mtd->ecc_stats.corrected += ret; pr_debug("%d Symbol Correctable RS-ECC Error\n", ret); return ret; --- linux-3.13.0.orig/drivers/mtd/nand/omap2.c +++ linux-3.13.0/drivers/mtd/nand/omap2.c @@ -933,7 +933,7 @@ u32 val; val = readl(info->reg.gpmc_ecc_config); - if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs) + if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs) return -EINVAL; /* read ecc result */ @@ -1451,7 +1451,7 @@ /* Check if any error reported */ if (!is_error_reported) - return 0; + return stat; /* Decode BCH error using ELM module */ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec); @@ -1633,6 +1633,7 @@ int i; dma_cap_mask_t mask; unsigned sig; + unsigned oob_index; struct resource *res; struct mtd_part_parser_data ppdata = {}; @@ -1832,11 +1833,14 @@ (mtd->writesize / nand_chip->ecc.size); if (nand_chip->options & NAND_BUSWIDTH_16) - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; + oob_index = BADBLOCK_MARKER_LENGTH; else - ecclayout->eccpos[0] = 1; - ecclayout->oobfree->offset = ecclayout->eccpos[0] + - ecclayout->eccbytes; + oob_index = 1; + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) + ecclayout->eccpos[i] = oob_index; + /* no reserved-marker in ecclayout for this ecc-scheme */ + ecclayout->oobfree->offset = + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; break; case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: @@ -1853,9 +1857,15 @@ ecclayout->eccbytes = nand_chip->ecc.bytes * (mtd->writesize / nand_chip->ecc.size); - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; - ecclayout->oobfree->offset = ecclayout->eccpos[0] + - ecclayout->eccbytes; + oob_index = BADBLOCK_MARKER_LENGTH; + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { + ecclayout->eccpos[i] = oob_index; + if (((i + 1) % nand_chip->ecc.bytes) == 0) + oob_index++; + } + /* include reserved-marker in ecclayout->oobfree calculation */ + ecclayout->oobfree->offset = 1 + + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; /* software bch library is used for locating errors */ nand_chip->ecc.priv = nand_bch_init(mtd, nand_chip->ecc.size, @@ -1889,9 +1899,12 @@ ecclayout->eccbytes = nand_chip->ecc.bytes * (mtd->writesize / nand_chip->ecc.size); - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; - ecclayout->oobfree->offset = ecclayout->eccpos[0] + - ecclayout->eccbytes; + oob_index = BADBLOCK_MARKER_LENGTH; + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) + ecclayout->eccpos[i] = oob_index; + /* reserved marker already included in ecclayout->eccbytes */ + ecclayout->oobfree->offset = + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; /* This ECC scheme requires ELM H/W block */ if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) { pr_err("nand: error: could not initialize ELM\n"); @@ -1919,9 +1932,15 @@ ecclayout->eccbytes = nand_chip->ecc.bytes * (mtd->writesize / nand_chip->ecc.size); - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; - ecclayout->oobfree->offset = ecclayout->eccpos[0] + - ecclayout->eccbytes; + oob_index = BADBLOCK_MARKER_LENGTH; + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { + ecclayout->eccpos[i] = oob_index; + if (((i + 1) % nand_chip->ecc.bytes) == 0) + oob_index++; + } + /* include reserved-marker in ecclayout->oobfree calculation */ + ecclayout->oobfree->offset = 1 + + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; /* software bch library is used for locating errors */ nand_chip->ecc.priv = nand_bch_init(mtd, nand_chip->ecc.size, @@ -1962,9 +1981,12 @@ ecclayout->eccbytes = nand_chip->ecc.bytes * (mtd->writesize / nand_chip->ecc.size); - ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH; - ecclayout->oobfree->offset = ecclayout->eccpos[0] + - ecclayout->eccbytes; + oob_index = BADBLOCK_MARKER_LENGTH; + for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) + ecclayout->eccpos[i] = oob_index; + /* reserved marker already included in ecclayout->eccbytes */ + ecclayout->oobfree->offset = + ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; break; #else pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); @@ -1978,11 +2000,8 @@ goto return_error; } - /* populate remaining ECC layout data */ - ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH + - ecclayout->eccbytes); - for (i = 1; i < ecclayout->eccbytes; i++) - ecclayout->eccpos[i] = ecclayout->eccpos[0] + i; + /* all OOB bytes from oobfree->offset till end off OOB are free */ + ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; /* check if NAND device's OOB is enough to store ECC signatures */ if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) { pr_err("not enough OOB bytes required = %d, available=%d\n", --- linux-3.13.0.orig/drivers/mtd/nand/pxa3xx_nand.c +++ linux-3.13.0/drivers/mtd/nand/pxa3xx_nand.c @@ -110,10 +110,10 @@ /* macros for registers read/write */ #define nand_writel(info, off, val) \ - __raw_writel((val), (info)->mmio_base + (off)) + writel_relaxed((val), (info)->mmio_base + (off)) #define nand_readl(info, off) \ - __raw_readl((info)->mmio_base + (off)) + readl_relaxed((info)->mmio_base + (off)) /* error code and state */ enum { --- linux-3.13.0.orig/drivers/mtd/tests/torturetest.c +++ linux-3.13.0/drivers/mtd/tests/torturetest.c @@ -264,7 +264,9 @@ int i; void *patt; - mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); + err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); + if (err) + goto out; /* Check if the eraseblocks contain only 0xFF bytes */ if (check) { --- linux-3.13.0.orig/drivers/mtd/ubi/fastmap.c +++ linux-3.13.0/drivers/mtd/ubi/fastmap.c @@ -330,6 +330,7 @@ av = tmp_av; else { ubi_err("orphaned volume in fastmap pool!"); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); return UBI_BAD_FASTMAP; } --- linux-3.13.0.orig/drivers/mtd/ubi/upd.c +++ linux-3.13.0/drivers/mtd/ubi/upd.c @@ -133,6 +133,10 @@ ubi_assert(!vol->updating && !vol->changing_leb); vol->updating = 1; + vol->upd_buf = vmalloc(ubi->leb_size); + if (!vol->upd_buf) + return -ENOMEM; + err = set_update_marker(ubi, vol); if (err) return err; @@ -152,14 +156,12 @@ err = clear_update_marker(ubi, vol, 0); if (err) return err; + + vfree(vol->upd_buf); vol->updating = 0; return 0; } - vol->upd_buf = vmalloc(ubi->leb_size); - if (!vol->upd_buf) - return -ENOMEM; - vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1, vol->usable_leb_size); vol->upd_bytes = bytes; --- linux-3.13.0.orig/drivers/mtd/ubi/wl.c +++ linux-3.13.0/drivers/mtd/ubi/wl.c @@ -1205,7 +1205,6 @@ err = do_sync_erase(ubi, e1, vol_id, lnum, 0); if (err) { - kmem_cache_free(ubi_wl_entry_slab, e1); if (e2) kmem_cache_free(ubi_wl_entry_slab, e2); goto out_ro; @@ -1219,10 +1218,8 @@ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", e2->pnum, vol_id, lnum); err = do_sync_erase(ubi, e2, vol_id, lnum, 0); - if (err) { - kmem_cache_free(ubi_wl_entry_slab, e2); + if (err) goto out_ro; - } } dbg_wl("done"); @@ -1258,10 +1255,9 @@ ubi_free_vid_hdr(ubi, vid_hdr); err = do_sync_erase(ubi, e2, vol_id, lnum, torture); - if (err) { - kmem_cache_free(ubi_wl_entry_slab, e2); + if (err) goto out_ro; - } + mutex_unlock(&ubi->move_mutex); return 0; --- linux-3.13.0.orig/drivers/net/Kconfig +++ linux-3.13.0/drivers/net/Kconfig @@ -135,6 +135,7 @@ config MACVTAP tristate "MAC-VLAN based tap driver" depends on MACVLAN + depends on INET help This adds a specialized tap character device driver that is based on the MAC-VLAN network interface, called macvtap. A macvtap device @@ -205,6 +206,7 @@ config TUN tristate "Universal TUN/TAP device driver support" + depends on INET select CRC32 ---help--- TUN/TAP provides packet reception and transmission for user space @@ -341,7 +343,6 @@ The corresponding Linux frontend driver is enabled by the CONFIG_XEN_NETDEV_FRONTEND configuration option. - The backend driver presents a standard network device endpoint for each paravirtual network device to the driver domain network stack. These can then be bridged or routed @@ -353,12 +354,63 @@ will be called xen-netback. config VMXNET3 - tristate "VMware VMXNET3 ethernet driver" - depends on PCI && INET + tristate "VMware VMXNET3 ethernet driver" + depends on PCI && INET + help + This driver supports VMware's vmxnet3 virtual ethernet NIC. + To compile this driver as a module, choose M here: the + module will be called vmxnet3. + +config DPA + bool "Freescale Data Path Frame Manager Ethernet" + depends on FSL_SOC && FSL_BMAN_PORTAL && FSL_QMAN_PORTAL && FSL_FMAN + select PHYLIB + +config DPA_OFFLINE_PORTS + bool "Offline Ports support" + depends on DPA + default y + help + The Offline Parsing / Host Command ports (short: OH ports, of Offline ports) provide + most of the functionality of the regular, online ports, except they receive their + frames from a core or an accelerator on the SoC, via QMan frame queues, + rather than directly from the network. + Offline ports are configured via PCD (Parse-Classify-Distribute) schemes, just like + any online FMan port. They deliver the processed frames to frame queues, according + to the applied PCD configurations. + + Choosing this feature will not impact the functionality and/or performance of the system, + so it is safe to have it. + +config DPA_MAX_FRM_SIZE + int "Maximum L2 frame size" + depends on DPA + range 64 9600 + default "1522" help - This driver supports VMware's vmxnet3 virtual ethernet NIC. - To compile this driver as a module, choose M here: the - module will be called vmxnet3. + Configure this in relation to the maximum possible MTU of your network configuration. In particular, + one would need to increase this value in order to use jumbo frames. DPA_MAX_FRM_SIZE must accomodate + the Ethernet FCS (4 bytes) and one ETH+VLAN header (18 bytes), to a total of 22 bytes in excess of + the desired L3 MTU. + + Note that having too large a DPA_MAX_FRM_SIZE (much larger than the actual MTU) may lead to buffer + exhaustion, especially in the case of badly fragmented datagrams on the Rx path. Conversely, + having a DPA_MAX_FRM_SIZE smaller than the actual MTU will lead to frames being dropped. + + This can be overridden by specifying "fsl_fman_phy_max_frm" in the kernel bootargs: + * in Hypervisor-based scenarios, by adding a "chosen" node with the "bootargs" property specifying + "fsl_fman_phy_max_frm="; + * in non-Hypervisor-based scenarios, via u-boot's env, by modifying the "bootargs" env variable. + +config FSL_DPA_1588 + tristate "IEEE 1588-compliant timestamping" + depends on DPA + default n + +config DPAA_ETH_UNIT_TESTS + bool "Run Unit Tests for DPAA Ethernet" + depends on DPA + default y source "drivers/net/hyperv/Kconfig" --- linux-3.13.0.orig/drivers/net/Makefile +++ linux-3.13.0/drivers/net/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_WLAN) += wireless/ obj-$(CONFIG_WIMAX) += wimax/ obj-$(CONFIG_IEEE802154) += ieee802154/ +obj-$(if $(CONFIG_DPA),y) += dpa/ obj-$(CONFIG_VMXNET3) += vmxnet3/ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o --- linux-3.13.0.orig/drivers/net/bonding/bond_3ad.c +++ linux-3.13.0/drivers/net/bonding/bond_3ad.c @@ -1806,8 +1806,6 @@ BOND_AD_INFO(bond).agg_select_timer = timeout; } -static u16 aggregator_identifier; - /** * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures * @bond: bonding struct to work on @@ -1821,7 +1819,7 @@ if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr), bond->dev->dev_addr)) { - aggregator_identifier = 0; + BOND_AD_INFO(bond).aggregator_identifier = 0; BOND_AD_INFO(bond).system.sys_priority = 0xFFFF; BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr); @@ -1892,7 +1890,7 @@ ad_initialize_agg(aggregator); aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr); - aggregator->aggregator_identifier = (++aggregator_identifier); + aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier; aggregator->slave = slave; aggregator->is_active = 0; aggregator->num_of_ports = 0; @@ -2458,7 +2456,7 @@ out: if (res) { /* no suitable interface, frame not sent */ - kfree_skb(skb); + dev_kfree_skb_any(skb); } return NETDEV_TX_OK; --- linux-3.13.0.orig/drivers/net/bonding/bond_3ad.h +++ linux-3.13.0/drivers/net/bonding/bond_3ad.h @@ -253,6 +253,7 @@ struct ad_bond_info { struct ad_system system; /* 802.3ad system structure */ u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes + u16 aggregator_identifier; }; struct ad_slave_info { --- linux-3.13.0.orig/drivers/net/bonding/bond_alb.c +++ linux-3.13.0/drivers/net/bonding/bond_alb.c @@ -731,7 +731,7 @@ client_info->ntt = 0; } - if (!vlan_get_tag(skb, &client_info->vlan_id)) + if (vlan_get_tag(skb, &client_info->vlan_id)) client_info->vlan_id = 0; if (!client_info->assigned) { @@ -1481,7 +1481,7 @@ if (res) { /* no suitable interface, frame not sent */ - kfree_skb(skb); + dev_kfree_skb_any(skb); } return NETDEV_TX_OK; --- linux-3.13.0.orig/drivers/net/bonding/bond_main.c +++ linux-3.13.0/drivers/net/bonding/bond_main.c @@ -1090,10 +1090,14 @@ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HIGHDMA | NETIF_F_LRO) +#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\ + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL) + static void bond_compute_features(struct bonding *bond) { unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; netdev_features_t vlan_features = BOND_VLAN_FEATURES; + netdev_features_t enc_features = BOND_ENC_FEATURES; struct net_device *bond_dev = bond->dev; struct list_head *iter; struct slave *slave; @@ -1108,6 +1112,9 @@ vlan_features = netdev_increment_features(vlan_features, slave->dev->vlan_features, BOND_VLAN_FEATURES); + enc_features = netdev_increment_features(enc_features, + slave->dev->hw_enc_features, + BOND_ENC_FEATURES); dst_release_flag &= slave->dev->priv_flags; if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; @@ -1118,6 +1125,7 @@ done: bond_dev->vlan_features = vlan_features; + bond_dev->hw_enc_features = enc_features; bond_dev->hard_header_len = max_hard_header_len; bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); @@ -3577,7 +3585,7 @@ } } /* no slave that can tx has been found */ - kfree_skb(skb); + dev_kfree_skb_any(skb); } /** @@ -3630,8 +3638,14 @@ else bond_xmit_slave_id(bond, skb, 0); } else { - slave_id = bond_rr_gen_slave_id(bond); - bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt); + int slave_cnt = ACCESS_ONCE(bond->slave_cnt); + + if (likely(slave_cnt)) { + slave_id = bond_rr_gen_slave_id(bond); + bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); + } else { + dev_kfree_skb_any(skb); + } } return NETDEV_TX_OK; @@ -3650,7 +3664,7 @@ if (slave) bond_dev_queue_xmit(bond, skb, slave->dev); else - kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -3662,8 +3676,13 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + int slave_cnt = ACCESS_ONCE(bond->slave_cnt); - bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt)); + if (likely(slave_cnt)) + bond_xmit_slave_id(bond, skb, + bond_xmit_hash(bond, skb, bond->slave_cnt)); + else + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -3693,7 +3712,7 @@ if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP) bond_dev_queue_xmit(bond, skb, slave->dev); else - kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -3733,7 +3752,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct @@ -3784,7 +3803,7 @@ pr_err("%s: Error: Unknown bonding mode %d\n", dev->name, bond->params.mode); WARN_ON_ONCE(1); - kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } @@ -3805,7 +3824,7 @@ if (bond_has_slaves(bond)) ret = __bond_start_xmit(skb, dev); else - kfree_skb(skb); + dev_kfree_skb_any(skb); rcu_read_unlock(); return ret; @@ -3947,6 +3966,7 @@ NETIF_F_HW_VLAN_CTAG_FILTER; bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); + bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; bond_dev->features |= bond_dev->hw_features; } @@ -4512,6 +4532,7 @@ out: return res; err: + bond_destroy_debugfs(); bond_netlink_fini(); err_link: unregister_pernet_subsys(&bond_net_ops); --- linux-3.13.0.orig/drivers/net/caif/caif_hsi.c +++ linux-3.13.0/drivers/net/caif/caif_hsi.c @@ -1415,7 +1415,6 @@ cfhsi = netdev_priv(dev); cfhsi_netlink_parms(data, cfhsi); - dev_net_set(cfhsi->ndev, src_net); get_ops = symbol_get(cfhsi_get_ops); if (!get_ops) { --- linux-3.13.0.orig/drivers/net/can/at91_can.c +++ linux-3.13.0/drivers/net/can/at91_can.c @@ -1120,7 +1120,9 @@ struct at91_priv *priv = netdev_priv(dev); int err; - clk_enable(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err) + return err; /* check or determine and set bittime */ err = open_candev(dev); @@ -1146,7 +1148,7 @@ out_close: close_candev(dev); out: - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return err; } @@ -1163,7 +1165,7 @@ at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); close_candev(dev); --- linux-3.13.0.orig/drivers/net/can/c_can/c_can.c +++ linux-3.13.0/drivers/net/can/c_can/c_can.c @@ -694,6 +694,9 @@ /* disable all interrupts */ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); + /* put ctrl to init on stop to end ongoing transmission */ + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT); + /* set the state as STOPPED */ priv->can.state = CAN_STATE_STOPPED; } --- linux-3.13.0.orig/drivers/net/can/c_can/c_can_platform.c +++ linux-3.13.0/drivers/net/can/c_can/c_can_platform.c @@ -194,8 +194,9 @@ priv->instance = pdev->id; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0) + priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!priv->raminit_ctrlreg || priv->instance < 0) dev_info(&pdev->dev, "control memory is not used for raminit\n"); else priv->raminit = c_can_hw_raminit; --- linux-3.13.0.orig/drivers/net/can/dev.c +++ linux-3.13.0/drivers/net/can/dev.c @@ -324,19 +324,10 @@ } if (!priv->echo_skb[idx]) { - struct sock *srcsk = skb->sk; - if (atomic_read(&skb->users) != 1) { - struct sk_buff *old_skb = skb; - - skb = skb_clone(old_skb, GFP_ATOMIC); - kfree_skb(old_skb); - if (!skb) - return; - } else - skb_orphan(skb); - - skb->sk = srcsk; + skb = can_create_echo_skb(skb); + if (!skb) + return; /* make settings for echo to reduce code in irq context */ skb->protocol = htons(ETH_P_CAN); @@ -394,7 +385,7 @@ BUG_ON(idx >= priv->echo_skb_max); if (priv->echo_skb[idx]) { - kfree_skb(priv->echo_skb[idx]); + dev_kfree_skb_any(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; } } @@ -512,6 +503,10 @@ skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; @@ -674,10 +669,14 @@ if (dev->flags & IFF_UP) return -EBUSY; cm = nla_data(data[IFLA_CAN_CTRLMODE]); - if (cm->flags & ~priv->ctrlmode_supported) + + /* check whether changed bits are allowed to be modified */ + if (cm->mask & ~priv->ctrlmode_supported) return -EOPNOTSUPP; + + /* clear bits to be modified and copy the flag values */ priv->ctrlmode &= ~cm->mask; - priv->ctrlmode |= cm->flags; + priv->ctrlmode |= (cm->flags & cm->mask); } if (data[IFLA_CAN_RESTART_MS]) { --- linux-3.13.0.orig/drivers/net/can/flexcan.c +++ linux-3.13.0/drivers/net/can/flexcan.c @@ -125,7 +125,9 @@ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) /* FLEXCAN interrupt flag register (IFLAG) bits */ -#define FLEXCAN_TX_BUF_ID 8 +/* Errata ERR005829 step7: Reserve first valid MB */ +#define FLEXCAN_TX_BUF_RESERVED 8 +#define FLEXCAN_TX_BUF_ID 9 #define FLEXCAN_IFLAG_BUF(x) BIT(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) @@ -136,6 +138,17 @@ /* FLEXCAN message buffers */ #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) +#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) +#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) +#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) +#define FLEXCAN_MB_CODE_RX_OVERRRUN (0x6 << 24) +#define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24) + +#define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24) +#define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24) +#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) +#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) + #define FLEXCAN_MB_CNT_SRR BIT(22) #define FLEXCAN_MB_CNT_IDE BIT(21) #define FLEXCAN_MB_CNT_RTR BIT(20) @@ -144,6 +157,8 @@ #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) +#define FLEXCAN_TIMEOUT_US (50) + /* * FLEXCAN hardware feature flags * @@ -259,6 +274,22 @@ } #endif +static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) +{ + if (!priv->reg_xceiver) + return 0; + + return regulator_enable(priv->reg_xceiver); +} + +static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) +{ + if (!priv->reg_xceiver) + return 0; + + return regulator_disable(priv->reg_xceiver); +} + static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, u32 reg_esr) { @@ -266,26 +297,42 @@ (reg_esr & FLEXCAN_ESR_ERR_BUS); } -static inline void flexcan_chip_enable(struct flexcan_priv *priv) +static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->base; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = flexcan_read(®s->mcr); reg &= ~FLEXCAN_MCR_MDIS; flexcan_write(reg, ®s->mcr); - udelay(10); + while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + usleep_range(10, 20); + + if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) + return -ETIMEDOUT; + + return 0; } -static inline void flexcan_chip_disable(struct flexcan_priv *priv) +static int flexcan_chip_disable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->base; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = flexcan_read(®s->mcr); reg |= FLEXCAN_MCR_MDIS; flexcan_write(reg, ®s->mcr); + + while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + usleep_range(10, 20); + + if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + return -ETIMEDOUT; + + return 0; } static int flexcan_get_berr_counter(const struct net_device *dev, @@ -338,6 +385,14 @@ flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); + /* Errata ERR005829 step8: + * Write twice INACTIVE(0x8) code to first MB. + */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + return NETDEV_TX_OK; } @@ -647,6 +702,9 @@ stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); + /* after sending a RTR frame mailbox is in RX mode */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); netif_wake_queue(dev); } @@ -704,9 +762,12 @@ struct flexcan_regs __iomem *regs = priv->base; int err; u32 reg_mcr, reg_ctrl; + int i; /* enable module */ - flexcan_chip_enable(priv); + err = flexcan_chip_enable(priv); + if (err) + return err; /* soft reset */ flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); @@ -773,8 +834,18 @@ netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); flexcan_write(reg_ctrl, ®s->ctrl); - /* Abort any pending TX, mark Mailbox as INACTIVE */ - flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), + /* clear and invalidate all mailboxes first */ + for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) { + flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->cantxfg[i].can_ctrl); + } + + /* Errata ERR005829: mark first TX mailbox as INACTIVE */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + + /* mark TX mailbox as INACTIVE */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); /* acceptance mask/acceptance code (accept everything) */ @@ -785,11 +856,9 @@ if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) flexcan_write(0x0, ®s->rxfgmask); - if (priv->reg_xceiver) { - err = regulator_enable(priv->reg_xceiver); - if (err) - goto out; - } + err = flexcan_transceiver_enable(priv); + if (err) + goto out; /* synchronize with the can bus */ reg_mcr = flexcan_read(®s->mcr); @@ -824,16 +893,17 @@ struct flexcan_regs __iomem *regs = priv->base; u32 reg; - /* Disable all interrupts */ - flexcan_write(0, ®s->imask1); - /* Disable + halt module */ reg = flexcan_read(®s->mcr); reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; flexcan_write(reg, ®s->mcr); - if (priv->reg_xceiver) - regulator_disable(priv->reg_xceiver); + /* Disable all interrupts */ + flexcan_write(0, ®s->imask1); + flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, + ®s->ctrl); + + flexcan_transceiver_disable(priv); priv->can.state = CAN_STATE_STOPPED; return; @@ -863,7 +933,7 @@ /* start chip and queuing */ err = flexcan_chip_start(dev); if (err) - goto out_close; + goto out_free_irq; can_led_event(dev, CAN_LED_EVENT_OPEN); @@ -872,6 +942,8 @@ return 0; + out_free_irq: + free_irq(dev->irq, dev); out_close: close_candev(dev); out_disable_per: @@ -942,12 +1014,16 @@ goto out_disable_ipg; /* select "bus clock", chip must be disabled */ - flexcan_chip_disable(priv); + err = flexcan_chip_disable(priv); + if (err) + goto out_disable_per; reg = flexcan_read(®s->ctrl); reg |= FLEXCAN_CTRL_CLK_SRC; flexcan_write(reg, ®s->ctrl); - flexcan_chip_enable(priv); + err = flexcan_chip_enable(priv); + if (err) + goto out_chip_disable; /* set freeze, halt and activate FIFO, restrict register access */ reg = flexcan_read(®s->mcr); @@ -964,14 +1040,15 @@ if (!(reg & FLEXCAN_MCR_FEN)) { netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); err = -ENODEV; - goto out_disable_per; + goto out_chip_disable; } err = register_candev(dev); - out_disable_per: /* disable core and turn off clocks */ + out_chip_disable: flexcan_chip_disable(priv); + out_disable_per: clk_disable_unprepare(priv->clk_per); out_disable_ipg: clk_disable_unprepare(priv->clk_ipg); @@ -1004,12 +1081,19 @@ const struct flexcan_devtype_data *devtype_data; struct net_device *dev; struct flexcan_priv *priv; + struct regulator *reg_xceiver; struct resource *mem; struct clk *clk_ipg = NULL, *clk_per = NULL; void __iomem *base; int err, irq; u32 clock_freq = 0; + reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); + if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (IS_ERR(reg_xceiver)) + reg_xceiver = NULL; + if (pdev->dev.of_node) of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clock_freq); @@ -1071,9 +1155,7 @@ priv->pdata = dev_get_platdata(&pdev->dev); priv->devtype_data = devtype_data; - priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); - if (IS_ERR(priv->reg_xceiver)) - priv->reg_xceiver = NULL; + priv->reg_xceiver = reg_xceiver; netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); @@ -1101,9 +1183,10 @@ static int flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct flexcan_priv *priv = netdev_priv(dev); unregister_flexcandev(dev); - + netif_napi_del(&priv->napi); free_candev(dev); return 0; @@ -1114,8 +1197,11 @@ { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); + int err; - flexcan_chip_disable(priv); + err = flexcan_chip_disable(priv); + if (err) + return err; if (netif_running(dev)) { netif_stop_queue(dev); @@ -1136,9 +1222,7 @@ netif_device_attach(dev); netif_start_queue(dev); } - flexcan_chip_enable(priv); - - return 0; + return flexcan_chip_enable(priv); } #endif /* CONFIG_PM_SLEEP */ --- linux-3.13.0.orig/drivers/net/can/janz-ican3.c +++ linux-3.13.0/drivers/net/can/janz-ican3.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -1134,20 +1135,9 @@ */ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb) { - struct sock *srcsk = skb->sk; - - if (atomic_read(&skb->users) != 1) { - struct sk_buff *old_skb = skb; - - skb = skb_clone(old_skb, GFP_ATOMIC); - kfree_skb(old_skb); - if (!skb) - return; - } else { - skb_orphan(skb); - } - - skb->sk = srcsk; + skb = can_create_echo_skb(skb); + if (!skb) + return; /* save this skb for tx interrupt echo handling */ skb_queue_tail(&mod->echoq, skb); --- linux-3.13.0.orig/drivers/net/can/led.c +++ linux-3.13.0/drivers/net/can/led.c @@ -97,6 +97,9 @@ if (!priv) return NOTIFY_DONE; + if (!priv->tx_led_trig || !priv->rx_led_trig) + return NOTIFY_DONE; + if (msg == NETDEV_CHANGENAME) { snprintf(name, sizeof(name), "%s-tx", netdev->name); led_trigger_rename_static(name, priv->tx_led_trig); --- linux-3.13.0.orig/drivers/net/can/sja1000/peak_pci.c +++ linux-3.13.0/drivers/net/can/sja1000/peak_pci.c @@ -551,7 +551,7 @@ { struct sja1000_priv *priv; struct peak_pci_chan *chan; - struct net_device *dev; + struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; @@ -687,11 +687,13 @@ writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; - for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { - unregister_sja1000dev(dev); - free_sja1000dev(dev); + for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; + prev_dev = chan->prev_dev; + + unregister_sja1000dev(dev); + free_sja1000dev(dev); } /* free any PCIeC resources too */ @@ -725,10 +727,12 @@ /* Loop over all registered devices */ while (1) { + struct net_device *prev_dev = chan->prev_dev; + dev_info(&pdev->dev, "removing device %s\n", dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); - dev = chan->prev_dev; + dev = prev_dev; if (!dev) { /* do that only for first channel */ --- linux-3.13.0.orig/drivers/net/can/slcan.c +++ linux-3.13.0/drivers/net/can/slcan.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include @@ -87,6 +88,7 @@ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ spinlock_t lock; + struct work_struct tx_work; /* Flushes transmit buffer */ /* These are pointers to the malloc()ed frame buffers. */ unsigned char rbuff[SLC_MTU]; /* receiver buffer */ @@ -311,34 +313,44 @@ sl->dev->stats.tx_bytes += cf->can_dlc; } -/* - * Called by the driver when there's room for more data. If we have - * more packets to send, we send them here. - */ -static void slcan_write_wakeup(struct tty_struct *tty) +/* Write out any remaining transmit buffer. Scheduled when tty is writable */ +static void slcan_transmit(struct work_struct *work) { + struct slcan *sl = container_of(work, struct slcan, tx_work); int actual; - struct slcan *sl = (struct slcan *) tty->disc_data; + spin_lock_bh(&sl->lock); /* First make sure we're connected. */ - if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) + if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) { + spin_unlock_bh(&sl->lock); return; + } - spin_lock(&sl->lock); if (sl->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - spin_unlock(&sl->lock); + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); + spin_unlock_bh(&sl->lock); netif_wake_queue(sl->dev); return; } - actual = tty->ops->write(tty, sl->xhead, sl->xleft); + actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; - spin_unlock(&sl->lock); + spin_unlock_bh(&sl->lock); +} + +/* + * Called by the driver when there's room for more data. + * Schedule the transmit. + */ +static void slcan_write_wakeup(struct tty_struct *tty) +{ + struct slcan *sl = tty->disc_data; + + schedule_work(&sl->tx_work); } /* Send a can_frame to a TTY queue. */ @@ -524,6 +536,7 @@ sl->magic = SLCAN_MAGIC; sl->dev = dev; spin_lock_init(&sl->lock); + INIT_WORK(&sl->tx_work, slcan_transmit); slcan_devs[i] = dev; return sl; @@ -622,8 +635,12 @@ if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty) return; + spin_lock_bh(&sl->lock); tty->disc_data = NULL; sl->tty = NULL; + spin_unlock_bh(&sl->lock); + + flush_work(&sl->tx_work); /* Flush network side */ unregister_netdev(sl->dev); --- linux-3.13.0.orig/drivers/net/can/usb/esd_usb2.c +++ linux-3.13.0/drivers/net/can/usb/esd_usb2.c @@ -1142,6 +1142,7 @@ } } unlink_all_urbs(dev); + kfree(dev); } } --- linux-3.13.0.orig/drivers/net/can/usb/kvaser_usb.c +++ linux-3.13.0/drivers/net/can/usb/kvaser_usb.c @@ -12,6 +12,8 @@ * Copyright (C) 2012 Olivier Sobrie */ +#include +#include #include #include #include @@ -296,10 +298,11 @@ struct kvaser_usb_net_priv { struct can_priv can; - atomic_t active_tx_urbs; - struct usb_anchor tx_submitted; + spinlock_t tx_contexts_lock; + int active_tx_contexts; struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; + struct usb_anchor tx_submitted; struct completion start_comp, stop_comp; struct kvaser_usb *dev; @@ -397,8 +400,15 @@ while (pos <= actual_len - MSG_HEADER_LEN) { tmp = buf + pos; - if (!tmp->len) - break; + /* Handle messages crossing the USB endpoint max packet + * size boundary. Check kvaser_usb_read_bulk_callback() + * for further details. + */ + if (tmp->len == 0) { + pos = round_up(pos, + dev->bulk_in->wMaxPacketSize); + continue; + } if (pos + tmp->len > actual_len) { dev_err(dev->udev->dev.parent, "Format error\n"); @@ -474,6 +484,8 @@ return err; dev->nchannels = msg.u.cardinfo.nchannels; + if (dev->nchannels > MAX_NET_DEVICES) + return -EINVAL; return 0; } @@ -488,6 +500,7 @@ struct can_frame *cf; u8 channel = msg->u.tx_acknowledge.channel; u8 tid = msg->u.tx_acknowledge.tid; + unsigned long flags; if (channel >= dev->nchannels) { dev_err(dev->udev->dev.parent, @@ -527,12 +540,15 @@ stats->tx_packets++; stats->tx_bytes += context->dlc; - can_get_echo_skb(priv->netdev, context->echo_index); - context->echo_index = MAX_TX_URBS; - atomic_dec(&priv->active_tx_urbs); + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + can_get_echo_skb(priv->netdev, context->echo_index); + context->echo_index = MAX_TX_URBS; + --priv->active_tx_contexts; netif_wake_queue(priv->netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); } static void kvaser_usb_simple_msg_callback(struct urb *urb) @@ -577,7 +593,7 @@ usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), buf, msg->len, - kvaser_usb_simple_msg_callback, priv); + kvaser_usb_simple_msg_callback, netdev); usb_anchor_urb(urb, &priv->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); @@ -585,7 +601,6 @@ netdev_err(netdev, "Error transmitting URB\n"); usb_unanchor_urb(urb); usb_free_urb(urb); - kfree(buf); return err; } @@ -594,17 +609,6 @@ return 0; } -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) -{ - int i; - - usb_kill_anchored_urbs(&priv->tx_submitted); - atomic_set(&priv->active_tx_urbs, 0); - - for (i = 0; i < MAX_TX_URBS; i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; -} - static void kvaser_usb_rx_error(const struct kvaser_usb *dev, const struct kvaser_msg *msg) { @@ -652,11 +656,6 @@ priv = dev->nets[channel]; stats = &priv->netdev->stats; - if (status & M16C_STATE_BUS_RESET) { - kvaser_usb_unlink_tx_urbs(priv); - return; - } - skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; @@ -667,7 +666,7 @@ netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status); - if (status & M16C_STATE_BUS_OFF) { + if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { cf->can_id |= CAN_ERR_BUSOFF; priv->can.can_stats.bus_off++; @@ -693,9 +692,7 @@ } new_state = CAN_STATE_ERROR_PASSIVE; - } - - if (status == M16C_STATE_BUS_ERROR) { + } else if (status & M16C_STATE_BUS_ERROR) { if ((priv->can.state < CAN_STATE_ERROR_WARNING) && ((txerr >= 96) || (rxerr >= 96))) { cf->can_id |= CAN_ERR_CRTL; @@ -705,7 +702,8 @@ priv->can.can_stats.error_warning++; new_state = CAN_STATE_ERROR_WARNING; - } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) { + } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) && + ((txerr < 96) && (rxerr < 96))) { cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; @@ -979,8 +977,19 @@ while (pos <= urb->actual_length - MSG_HEADER_LEN) { msg = urb->transfer_buffer + pos; - if (!msg->len) - break; + /* The Kvaser firmware can only read and write messages that + * does not cross the USB's endpoint wMaxPacketSize boundary. + * If a follow-up command crosses such boundary, firmware puts + * a placeholder zero-length command in its place then aligns + * the real command to the next max packet size. + * + * Handle such cases or we're going to miss a significant + * number of events in case of a heavy rx load on the bus. + */ + if (msg->len == 0) { + pos = round_up(pos, dev->bulk_in->wMaxPacketSize); + continue; + } if (pos + msg->len > urb->actual_length) { dev_err(dev->udev->dev.parent, "Format error\n"); @@ -988,7 +997,6 @@ } kvaser_usb_handle_message(dev, msg); - pos += msg->len; } @@ -1160,6 +1168,24 @@ return err; } +static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) +{ + int i; + + priv->active_tx_contexts = 0; + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; +} + +/* This method might sleep. Do not call it in the atomic context + * of URB completions. + */ +static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +{ + usb_kill_anchored_urbs(&priv->tx_submitted); + kvaser_usb_reset_tx_urb_contexts(priv); +} + static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) { int i; @@ -1236,6 +1262,9 @@ if (err) netdev_warn(netdev, "Cannot stop device, error %d\n", err); + /* reset tx contexts */ + kvaser_usb_unlink_tx_urbs(priv); + priv->can.state = CAN_STATE_STOPPED; close_candev(priv->netdev); @@ -1274,8 +1303,8 @@ struct urb *urb; void *buf; struct kvaser_msg *msg; - int i, err; - int ret = NETDEV_TX_OK; + int i, err, ret = NETDEV_TX_OK; + unsigned long flags; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; @@ -1284,13 +1313,15 @@ if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); stats->tx_dropped++; - goto nourbmem; + dev_kfree_skb(skb); + return NETDEV_TX_OK; } buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); if (!buf) { stats->tx_dropped++; - goto nobufmem; + dev_kfree_skb(skb); + goto freeurb; } msg = buf; @@ -1317,21 +1348,32 @@ if (cf->can_id & CAN_RTR_FLAG) msg->u.tx_can.flags |= MSG_FLAG_REMOTE_FRAME; + spin_lock_irqsave(&priv->tx_contexts_lock, flags); for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &priv->tx_contexts[i]; + + context->echo_index = i; + can_put_echo_skb(skb, netdev, context->echo_index); + ++priv->active_tx_contexts; + if (priv->active_tx_contexts >= MAX_TX_URBS) + netif_stop_queue(netdev); + break; } } + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); + /* This should never happen; it implies a flow control bug */ if (!context) { netdev_warn(netdev, "cannot find free context\n"); + + kfree(buf); ret = NETDEV_TX_BUSY; - goto releasebuf; + goto freeurb; } context->priv = priv; - context->echo_index = i; context->dlc = cf->can_dlc; msg->u.tx_can.tid = context->echo_index; @@ -1343,21 +1385,17 @@ kvaser_usb_write_bulk_callback, context); usb_anchor_urb(urb, &priv->tx_submitted); - can_put_echo_skb(skb, netdev, context->echo_index); - - atomic_inc(&priv->active_tx_urbs); - - if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) - netif_stop_queue(netdev); - err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + can_free_echo_skb(netdev, context->echo_index); + context->echo_index = MAX_TX_URBS; + --priv->active_tx_contexts; + netif_wake_queue(netdev); - skb = NULL; /* set to NULL to avoid double free in - * dev_kfree_skb(skb) */ + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); - atomic_dec(&priv->active_tx_urbs); usb_unanchor_urb(urb); stats->tx_dropped++; @@ -1367,19 +1405,13 @@ else netdev_warn(netdev, "Failed tx_urb %d\n", err); - goto releasebuf; + goto freeurb; } - usb_free_urb(urb); - - return NETDEV_TX_OK; + ret = NETDEV_TX_OK; -releasebuf: - kfree(buf); -nobufmem: +freeurb: usb_free_urb(urb); -nourbmem: - dev_kfree_skb(skb); return ret; } @@ -1489,7 +1521,11 @@ struct kvaser_usb *dev = usb_get_intfdata(intf); struct net_device *netdev; struct kvaser_usb_net_priv *priv; - int i, err; + int err; + + err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); + if (err) + return err; netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); if (!netdev) { @@ -1499,19 +1535,17 @@ priv = netdev_priv(netdev); + init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); - init_usb_anchor(&priv->tx_submitted); - atomic_set(&priv->active_tx_urbs, 0); - - for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; - priv->dev = dev; priv->netdev = netdev; priv->channel = channel; + spin_lock_init(&priv->tx_contexts_lock); + kvaser_usb_reset_tx_urb_contexts(priv); + priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = CAN_USB_CLOCK; priv->can.bittiming_const = &kvaser_usb_bittiming_const; @@ -1576,7 +1610,7 @@ { struct kvaser_usb *dev; int err = -ENOMEM; - int i; + int i, retry = 3; dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); if (!dev) @@ -1594,10 +1628,15 @@ usb_set_intfdata(intf, dev); - for (i = 0; i < MAX_NET_DEVICES; i++) - kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i); + /* On some x86 laptops, plugging a Kvaser device again after + * an unplug makes the firmware always ignore the very first + * command. For such a case, provide some room for retries + * instead of completely exiting the driver. + */ + do { + err = kvaser_usb_get_software_info(dev); + } while (--retry && err == -ETIMEDOUT); - err = kvaser_usb_get_software_info(dev); if (err) { dev_err(&intf->dev, "Cannot get software infos, error %d\n", err); --- linux-3.13.0.orig/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ linux-3.13.0/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -734,7 +734,7 @@ dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { err = -ENOMEM; - goto lbl_set_intf_data; + goto lbl_free_candev; } dev->udev = usb_dev; @@ -773,7 +773,7 @@ err = register_candev(netdev); if (err) { dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); - goto lbl_free_cmd_buf; + goto lbl_restore_intf_data; } if (dev->prev_siblings) @@ -786,14 +786,14 @@ if (dev->adapter->dev_init) { err = dev->adapter->dev_init(dev); if (err) - goto lbl_free_cmd_buf; + goto lbl_unregister_candev; } /* set bus off */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) - goto lbl_free_cmd_buf; + goto lbl_unregister_candev; } /* get device number early */ @@ -805,11 +805,14 @@ return 0; -lbl_free_cmd_buf: - kfree(dev->cmd_buf); +lbl_unregister_candev: + unregister_candev(netdev); -lbl_set_intf_data: +lbl_restore_intf_data: usb_set_intfdata(intf, dev->prev_siblings); + kfree(dev->cmd_buf); + +lbl_free_candev: free_candev(netdev); return err; --- linux-3.13.0.orig/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ linux-3.13.0/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -333,8 +333,6 @@ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; - memset(req_addr, '\0', req_size); - req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; switch (req_id) { @@ -345,6 +343,7 @@ default: p = usb_rcvctrlpipe(dev->udev, 0); req_type |= USB_DIR_IN; + memset(req_addr, '\0', req_size); break; } --- linux-3.13.0.orig/drivers/net/can/vcan.c +++ linux-3.13.0/drivers/net/can/vcan.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -109,25 +110,23 @@ stats->rx_packets++; stats->rx_bytes += cfd->len; } - kfree_skb(skb); + consume_skb(skb); return NETDEV_TX_OK; } /* perform standard echo handling for CAN network interfaces */ if (loop) { - struct sock *srcsk = skb->sk; - skb = skb_share_check(skb, GFP_ATOMIC); + skb = can_create_echo_skb(skb); if (!skb) return NETDEV_TX_OK; /* receive with packet counting */ - skb->sk = srcsk; vcan_rx(skb, dev); } else { /* no looped packets => no counting */ - kfree_skb(skb); + consume_skb(skb); } return NETDEV_TX_OK; } --- linux-3.13.0.orig/drivers/net/dpa/Makefile +++ linux-3.13.0/drivers/net/dpa/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +EXTRA_CFLAGS += -I$(NET_DPA) + +#Netcomm SW tree +obj-$(CONFIG_FSL_FMAN) += NetCommSw/ +obj-$(CONFIG_FSL_DPA_1588) += dpaa_1588.o +obj-$(CONFIG_DPA) += fsl-mac.o fsl-dpa.o +obj-$(CONFIG_DPA_OFFLINE_PORTS) += fsl-oh.o + +fsl-dpa-objs := dpa-ethtool.o dpaa_eth.o +fsl-mac-objs := mac.o mac-api.o +fsl-oh-objs := offline_port.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Kconfig +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Kconfig @@ -0,0 +1,57 @@ +menu "Frame Manager support" + +menuconfig FSL_FMAN + bool "Freescale Frame Manager (datapath) support" + # depends on PPC_E500MC + default y + ---help--- + If unsure, say Y. + +if FSL_FMAN + +config FSL_FMAN_TEST + bool "FMan test module" + default n + ---help--- + This option compiles test code for FMan. + +menu "FMAN Processor support" +choice + depends on FSL_FMAN + prompt "Processor Type" + +config FMAN_P3040_P4080_P5020 + bool "P3040 P4080 5020" + +config FMAN_P1023 + bool "P1023" + +endchoice +endmenu + +config FMAN_RESOURCE_ALLOCATION_ALGORITHM + bool "Enable FMan dynamic resource allocation algorithm" + default n + ---help--- + Enables algorithm for dynamic resource allocation + +config FMAN_DISABLE_OH_TO_REUSE_RESOURCES + depends on FMAN_RESOURCE_ALLOCATION_ALGORITHM + bool "Disable offline parsing ports to reuse resources" + default n + ---help--- + Redistributes FMan OH's resources to all other ports, + thus enabling other configurations. + +config FMAN_MIB_CNT_OVF_IRQ_EN + bool "Enable the dTSEC MIB counters overflow interrupt" + default n + ---help--- + Enable the dTSEC MIB counters overflow interrupt to get + accurate MIB counters values. Enabled it compensates + for the counters overflow but reduces performance and + triggers error messages in HV setups. + +endif # FSL_FMAN + +endmenu --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk +# +obj-y += etc/ +obj-${CONFIG_FMAN_P3040_P4080_P5020} += integrations/P3040_P4080_P5020/ +obj-${CONFIG_FMAN_P1023} += integrations/P1023/ +obj-y += Peripherals/FM/ +obj-y += src/ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/HC/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/HC/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +NCSW_FM_INC = $(srctree)/drivers/net/dpa/NetCommSw/Peripherals/FM/inc + +EXTRA_CFLAGS += -I$(NCSW_FM_INC) + +obj-y += fsl-ncsw-Hc.o + +fsl-ncsw-Hc-objs := hc.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/HC/hc.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/HC/hc.c @@ -0,0 +1,1584 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "std_ext.h" +#include "error_ext.h" +#include "sprint_ext.h" +#include "string_ext.h" + +#include "fm_common.h" +#include "fm_hc.h" + + +#define HC_HCOR_OPCODE_PLCR_PRFL 0x0 +#define HC_HCOR_OPCODE_KG_SCM 0x1 +#define HC_HCOR_OPCODE_SYNC 0x2 +#define HC_HCOR_OPCODE_CC 0x3 +#define HC_HCOR_OPCODE_CC_CAPWAP_REASSM_TIMEOUT 0x5 + +#define HC_HCOR_GBL 0x20000000 + +#define SIZE_OF_HC_FRAME_PORT_REGS (sizeof(t_HcFrame)-sizeof(t_FmPcdKgInterModuleSchemeRegs)+sizeof(t_FmPcdKgPortRegs)) +#define SIZE_OF_HC_FRAME_SCHEME_REGS sizeof(t_HcFrame) +#define SIZE_OF_HC_FRAME_PROFILES_REGS (sizeof(t_HcFrame)-sizeof(t_FmPcdKgInterModuleSchemeRegs)+sizeof(t_FmPcdPlcrInterModuleProfileRegs)) +#define SIZE_OF_HC_FRAME_PROFILE_CNT (sizeof(t_HcFrame)-sizeof(t_FmPcdPlcrInterModuleProfileRegs)+sizeof(uint32_t)) +#define SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC 16 + +#define BUILD_FD(len) \ +do { \ + memset(&fmFd, 0, sizeof(t_DpaaFD)); \ + DPAA_FD_SET_ADDR(&fmFd, p_HcFrame); \ + DPAA_FD_SET_OFFSET(&fmFd, 0); \ + DPAA_FD_SET_LENGTH(&fmFd, len); \ +} while (0) + + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/**************************************************************************//** + @Description PCD KG scheme registers +*//***************************************************************************/ +typedef _Packed struct t_FmPcdKgSchemeRegsWithoutCounter { + volatile uint32_t kgse_mode; /**< MODE */ + volatile uint32_t kgse_ekfc; /**< Extract Known Fields Command */ + volatile uint32_t kgse_ekdv; /**< Extract Known Default Value */ + volatile uint32_t kgse_bmch; /**< Bit Mask Command High */ + volatile uint32_t kgse_bmcl; /**< Bit Mask Command Low */ + volatile uint32_t kgse_fqb; /**< Frame Queue Base */ + volatile uint32_t kgse_hc; /**< Hash Command */ + volatile uint32_t kgse_ppc; /**< Policer Profile Command */ + volatile uint32_t kgse_gec[FM_PCD_KG_NUM_OF_GENERIC_REGS]; + /**< Generic Extract Command */ + volatile uint32_t kgse_dv0; /**< KeyGen Scheme Entry Default Value 0 */ + volatile uint32_t kgse_dv1; /**< KeyGen Scheme Entry Default Value 1 */ + volatile uint32_t kgse_ccbs; /**< KeyGen Scheme Entry Coarse Classification Bit*/ + volatile uint32_t kgse_mv; /**< KeyGen Scheme Entry Match vector */ +} _PackedType t_FmPcdKgSchemeRegsWithoutCounter; + +typedef _Packed struct t_FmPcdKgPortRegs { + volatile uint32_t spReg; + volatile uint32_t cppReg; +} _PackedType t_FmPcdKgPortRegs; + +typedef _Packed struct t_HcFrame { + volatile uint32_t opcode; + volatile uint32_t actionReg; + volatile uint32_t extraReg; + volatile uint32_t commandSequence; + union { + t_FmPcdKgInterModuleSchemeRegs schemeRegs; + t_FmPcdKgInterModuleSchemeRegs schemeRegsWithoutCounter; + t_FmPcdPlcrInterModuleProfileRegs profileRegs; + volatile uint32_t singleRegForWrite; /* for writing SP, CPP, profile counter */ + t_FmPcdKgPortRegs portRegsForRead; + volatile uint32_t clsPlanEntries[CLS_PLAN_NUM_PER_GRP]; + t_FmPcdCcCapwapReassmTimeoutParams ccCapwapReassmTimeout; + } hcSpecificData; +} _PackedType t_HcFrame; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +typedef struct t_FmHc { + t_Handle h_FmPcd; + t_Handle h_HcPortDev; + t_FmPcdQmEnqueueCallback *f_QmEnqueue; /**< A callback for enqueuing frames to the QM */ + t_Handle h_QmArg; /**< A handle to the QM module */ + uint8_t padTill16; + + uint32_t seqNum; + volatile bool wait[32]; +} t_FmHc; + + +static __inline__ t_Error EnQFrm(t_FmHc *p_FmHc, t_DpaaFD *p_FmFd, volatile uint32_t *p_SeqNum) +{ + t_Error err = E_OK; + uint32_t savedSeqNum; + uint32_t intFlags; + uint32_t timeout=100; + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + *p_SeqNum = p_FmHc->seqNum; + savedSeqNum = p_FmHc->seqNum; + p_FmHc->seqNum = (uint32_t)((p_FmHc->seqNum+1)%32); + ASSERT_COND(!p_FmHc->wait[savedSeqNum]); + p_FmHc->wait[savedSeqNum] = TRUE; + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + DBG(TRACE, ("Send Hc, SeqNum %d, FD@0x%x, fd offset 0x%x", + savedSeqNum,DPAA_FD_GET_ADDR(p_FmFd),DPAA_FD_GET_OFFSET(p_FmFd))); + err = p_FmHc->f_QmEnqueue(p_FmHc->h_QmArg, (void *)p_FmFd); + if(err) + RETURN_ERROR(MINOR, err, ("HC enqueue failed")); + + while (p_FmHc->wait[savedSeqNum] && --timeout) + XX_UDelay(100); + + if (!timeout) + RETURN_ERROR(MINOR, E_TIMEOUT, ("HC Callback, timeout exceeded")); + + return err; +} + +static t_Error CcHcDoDynamicChange(t_FmHc *p_FmHc, t_Handle p_OldPointer, t_Handle p_NewPointer) +{ + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + t_Error err = E_OK; + + ASSERT_COND(p_FmHc); + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC); + p_HcFrame->actionReg = FmPcdCcGetNodeAddrOffsetFromNodeInfo(p_FmHc->h_FmPcd, p_NewPointer); + if(p_HcFrame->actionReg == (uint32_t)ILLEGAL_BASE) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something wrong with base address")); + } + + p_HcFrame->actionReg |= 0xc0000000; + p_HcFrame->extraReg = FmPcdCcGetNodeAddrOffsetFromNodeInfo(p_FmHc->h_FmPcd, p_OldPointer); + if(p_HcFrame->extraReg == (uint32_t)ILLEGAL_BASE) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something wrong with base address")); + } + + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + XX_FreeSmart(p_HcFrame); + + return E_OK; +} + +static t_Error HcDynamicChange(t_FmHc *p_FmHc,t_List *h_OldPointersLst, t_List *h_NewPointersLst, t_Handle *h_Params) +{ + + t_List *p_PosOld, *p_PosNew; + uint16_t i = 0; + t_Error err = E_OK; + uint8_t numOfModifiedPtr; + + SANITY_CHECK_RETURN_ERROR((LIST_NumOfObjs(h_NewPointersLst) == LIST_NumOfObjs(h_OldPointersLst)),E_INVALID_STATE); + + numOfModifiedPtr = (uint8_t)LIST_NumOfObjs(h_NewPointersLst); + p_PosNew = LIST_FIRST(h_NewPointersLst); + p_PosOld = LIST_FIRST(h_OldPointersLst); + for(i = 0; i < numOfModifiedPtr; i++) + { + err = CcHcDoDynamicChange(p_FmHc, p_PosOld, p_PosNew); + if(err) + { + FmPcdCcReleaseModifiedDataStructure(p_FmHc->h_FmPcd, h_OldPointersLst, h_NewPointersLst, i, h_Params); + RETURN_ERROR(MAJOR, err, ("For part of nodes changes are done - situation is danger")); + } + p_PosNew = LIST_NEXT(p_PosNew); + p_PosOld = LIST_NEXT(p_PosOld); + } + + err = FmPcdCcReleaseModifiedDataStructure(p_FmHc->h_FmPcd, h_OldPointersLst, h_NewPointersLst, i, h_Params); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + return E_OK; +} + + +t_Handle FmHcConfigAndInit(t_FmHcParams *p_FmHcParams) +{ + t_FmHc *p_FmHc; + t_FmPortParams fmPortParam; + t_Error err = E_OK; + + p_FmHc = (t_FmHc *)XX_Malloc(sizeof(t_FmHc)); + if (!p_FmHc) + { + REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC obj")); + return NULL; + } + memset(p_FmHc,0,sizeof(t_FmHc)); + + p_FmHc->h_FmPcd = p_FmHcParams->h_FmPcd; + p_FmHc->f_QmEnqueue = p_FmHcParams->params.f_QmEnqueue; + p_FmHc->h_QmArg = p_FmHcParams->params.h_QmArg; + + if (!FmIsMaster(p_FmHcParams->h_Fm)) + return (t_Handle)p_FmHc; + +/* +TKT056919 - axi12axi0 can hang if read request follows the single byte write on the very next cycle +TKT038900 - FM dma lockup occur due to AXI slave protocol violation +*/ +#ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 + p_FmHc->padTill16 = 16 - (sizeof(t_FmHc) % 16); +#endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */ + memset(&fmPortParam, 0, sizeof(fmPortParam)); + fmPortParam.baseAddr = p_FmHcParams->params.portBaseAddr; + fmPortParam.portType = e_FM_PORT_TYPE_OH_HOST_COMMAND; + fmPortParam.portId = p_FmHcParams->params.portId; + fmPortParam.liodnBase = p_FmHcParams->params.liodnBase; + fmPortParam.h_Fm = p_FmHcParams->h_Fm; + + fmPortParam.specificParams.nonRxParams.errFqid = p_FmHcParams->params.errFqid; + fmPortParam.specificParams.nonRxParams.dfltFqid = p_FmHcParams->params.confFqid; + fmPortParam.specificParams.nonRxParams.qmChannel = p_FmHcParams->params.qmChannel; + + p_FmHc->h_HcPortDev = FM_PORT_Config(&fmPortParam); + if(!p_FmHc->h_HcPortDev) + { + REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM HC port!")); + XX_Free(p_FmHc); + return NULL; + } + + /* final init */ + if ((err = FM_PORT_Init(p_FmHc->h_HcPortDev)) != E_OK) + { + REPORT_ERROR(MAJOR, err, ("FM HC port!")); + FmHcFree(p_FmHc); + return NULL; + } + + if ((err = FM_PORT_Enable(p_FmHc->h_HcPortDev)) != E_OK) + { + REPORT_ERROR(MAJOR, err, ("FM HC port!")); + FmHcFree(p_FmHc); + return NULL; + } + + return (t_Handle)p_FmHc; +} + +void FmHcFree(t_Handle h_FmHc) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + + if (!p_FmHc) + return; + + if (p_FmHc->h_HcPortDev) + FM_PORT_Free(p_FmHc->h_HcPortDev); + + XX_Free(p_FmHc); +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FmHcDumpRegs(t_Handle h_FmHc) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + + SANITY_CHECK_RETURN_ERROR(p_FmHc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmHc->h_HcPortDev, E_INVALID_HANDLE); + + return FM_PORT_DumpRegs(p_FmHc->h_HcPortDev); + +} +#endif /* (defined(DEBUG_ERRORS) && ... */ + +void FmHcTxConf(t_Handle h_FmHc, t_DpaaFD *p_Fd) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_HcFrame *p_HcFrame; + uint32_t intFlags; + + ASSERT_COND(p_FmHc); + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + p_HcFrame = (t_HcFrame *)PTR_MOVE(DPAA_FD_GET_ADDR(p_Fd), DPAA_FD_GET_OFFSET(p_Fd)); + + DBG(TRACE, ("Hc Conf, SeqNum %d, FD@0x%x, fd offset 0x%x", + p_HcFrame->commandSequence, DPAA_FD_GET_ADDR(p_Fd), DPAA_FD_GET_OFFSET(p_Fd))); + + if (!(p_FmHc->wait[p_HcFrame->commandSequence])) + REPORT_ERROR(MINOR, E_INVALID_FRAME, ("Not an Host-Command frame received!")); + else + p_FmHc->wait[p_HcFrame->commandSequence] = FALSE; + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); +} + +t_Handle FmHcPcdKgSetScheme(t_Handle h_FmHc, t_FmPcdKgSchemeParams *p_Scheme) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Error err = E_OK; + t_FmPcdKgInterModuleSchemeRegs schemeRegs; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + uint32_t intFlags; + uint8_t physicalSchemeId, relativeSchemeId; + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + { + REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + return NULL; + } + + if(!p_Scheme->modify) + { + /* check that schemeId is in range */ + if(p_Scheme->id.relativeSchemeId >= FmPcdKgGetNumOfPartitionSchemes(p_FmHc->h_FmPcd)) + { + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("Scheme is out of range")); + XX_FreeSmart(p_HcFrame); + return NULL; + } + + relativeSchemeId = p_Scheme->id.relativeSchemeId; + + if (FmPcdKgSchemeTryLock(p_FmHc->h_FmPcd, relativeSchemeId, FALSE)) + { + XX_FreeSmart(p_HcFrame); + return NULL; + } + + physicalSchemeId = FmPcdKgGetPhysicalSchemeId(p_FmHc->h_FmPcd, relativeSchemeId); + + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + p_HcFrame->extraReg = 0xFFFFF800; + + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + REPORT_ERROR(MINOR, err, NO_MSG); + XX_FreeSmart(p_HcFrame); + return NULL; + } + + /* check if this scheme is already used */ + if (FmPcdKgHwSchemeIsValid(p_HcFrame->hcSpecificData.schemeRegs.kgse_mode)) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is already used")); + XX_FreeSmart(p_HcFrame); + return NULL; + } + } + else + { + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + physicalSchemeId = (uint8_t)(PTR_TO_UINT(p_Scheme->id.h_Scheme)-1); + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId); + if( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + { + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + XX_FreeSmart(p_HcFrame); + return NULL; + } + err = FmPcdKgSchemeTryLock(p_FmHc->h_FmPcd, relativeSchemeId, TRUE); + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + if (err) + { + XX_FreeSmart(p_HcFrame); + return NULL; + } + } + + err = FmPcdKgBuildScheme(p_FmHc->h_FmPcd, p_Scheme, &schemeRegs); + if(err) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + REPORT_ERROR(MAJOR, err, NO_MSG); + XX_FreeSmart(p_HcFrame); + return NULL; + } + + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, p_Scheme->schemeCounter.update); + p_HcFrame->extraReg = 0xFFFFF800; + memcpy(&p_HcFrame->hcSpecificData.schemeRegs, &schemeRegs, sizeof(t_FmPcdKgInterModuleSchemeRegs)); + if(!p_Scheme->schemeCounter.update) + { + p_HcFrame->hcSpecificData.schemeRegs.kgse_dv0 = schemeRegs.kgse_dv0; + p_HcFrame->hcSpecificData.schemeRegs.kgse_dv1 = schemeRegs.kgse_dv1; + p_HcFrame->hcSpecificData.schemeRegs.kgse_ccbs = schemeRegs.kgse_ccbs; + p_HcFrame->hcSpecificData.schemeRegs.kgse_mv = schemeRegs.kgse_mv; + } + + BUILD_FD(sizeof(t_HcFrame)); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + REPORT_ERROR(MINOR, err, NO_MSG); + XX_FreeSmart(p_HcFrame); + return NULL; + } + + FmPcdKgValidateSchemeSw(p_FmHc->h_FmPcd, relativeSchemeId); + + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + + XX_FreeSmart(p_HcFrame); + + return (t_Handle)(UINT_TO_PTR(physicalSchemeId + 1)); +} + +t_Error FmHcPcdKgDeleteScheme(t_Handle h_FmHc, t_Handle h_Scheme) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Error err = E_OK; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + uint8_t relativeSchemeId; + uint8_t physicalSchemeId = (uint8_t)(PTR_TO_UINT(h_Scheme)-1); + + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId); + + if ((err = FmPcdKgSchemeTryLock(p_FmHc->h_FmPcd, relativeSchemeId, FALSE)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if(relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + } + + err = FmPcdKgCheckInvalidateSchemeSw(p_FmHc->h_FmPcd, relativeSchemeId); + if (err) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + } + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE); + p_HcFrame->extraReg = 0xFFFFF800; + memset(&p_HcFrame->hcSpecificData.schemeRegs, 0, sizeof(t_FmPcdKgInterModuleSchemeRegs)); + + BUILD_FD(sizeof(t_HcFrame)); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + FmPcdKgInvalidateSchemeSw(p_FmHc->h_FmPcd, relativeSchemeId); + + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + + XX_FreeSmart(p_HcFrame); + + return E_OK; +} + +t_Error FmHcPcdKgCcGetSetParams(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t requiredAction) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Error err = E_OK; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + uint8_t relativeSchemeId; + uint8_t physicalSchemeId = (uint8_t)(PTR_TO_UINT(h_Scheme)-1); + uint32_t tmpReg32 = 0; + + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId); + if( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + if (FmPcdKgSchemeTryLock(p_FmHc->h_FmPcd, relativeSchemeId, FALSE)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Lock of the scheme FAILED")); + + if(!FmPcdKgGetPointedOwners(p_FmHc->h_FmPcd, relativeSchemeId) || + !(FmPcdKgGetRequiredAction(p_FmHc->h_FmPcd, relativeSchemeId) & requiredAction)) + { + + if(requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) + { + if((FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_DONE) && (FmPcdKgGetDoneAction(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_ENQ_FRAME)) + + { + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + } + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + p_HcFrame->extraReg = 0xFFFFF800; + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + /* check if this scheme is already used */ + if (!FmPcdKgHwSchemeIsValid(p_HcFrame->hcSpecificData.schemeRegs.kgse_mode)) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is already used")); + } + tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode; + + ASSERT_COND(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); + + p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32 | NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); + p_HcFrame->extraReg = 0x80000000; + + BUILD_FD(sizeof(t_HcFrame)); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + XX_FreeSmart(p_HcFrame); + } + else if (FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_PLCR) + { + + if((FmPcdKgIsDirectPlcr(p_FmHc->h_FmPcd, relativeSchemeId) == FALSE) || + (FmPcdKgIsDistrOnPlcrProfile(p_FmHc->h_FmPcd, relativeSchemeId) == TRUE)) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this situation PP can not be with distribution and has to be shared")); + } + err = FmPcdPlcrCcGetSetParams(p_FmHc->h_FmPcd, FmPcdKgGetRelativeProfileId(p_FmHc->h_FmPcd, relativeSchemeId), requiredAction); + if(err) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + } + } + + FmPcdKgUpatePointedOwner(p_FmHc->h_FmPcd, relativeSchemeId,TRUE); + FmPcdKgUpdateRequiredAction(p_FmHc->h_FmPcd, relativeSchemeId,requiredAction); + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + + return E_OK; +} + +uint32_t FmHcPcdKgGetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Error err = E_OK; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + uint32_t retVal; + uint8_t relativeSchemeId; + uint8_t physicalSchemeId = (uint8_t)(PTR_TO_UINT(h_Scheme)-1); + + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId); + if( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + { + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + return 0; + } + + if ((err = FmPcdKgSchemeTryLock(p_FmHc->h_FmPcd, relativeSchemeId, FALSE)) != E_OK) + { + REPORT_ERROR(MAJOR, err, ("Scheme lock")); + return 0; + } + + /* first read scheme and check that it is valid */ + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + { + REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + return 0; + } + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + p_HcFrame->extraReg = 0xFFFFF800; + + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + REPORT_ERROR(MINOR, err, NO_MSG); + XX_FreeSmart(p_HcFrame); + return 0; + } + + if (!FmPcdKgHwSchemeIsValid(p_HcFrame->hcSpecificData.schemeRegs.kgse_mode)) + { + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is invalid")); + XX_FreeSmart(p_HcFrame); + return 0; + } + + retVal = p_HcFrame->hcSpecificData.schemeRegs.kgse_spc; + + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + + XX_FreeSmart(p_HcFrame); + + return retVal; +} + +t_Error FmHcPcdKgSetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t value) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Error err = E_OK; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + uint8_t relativeSchemeId, physicalSchemeId = (uint8_t)(PTR_TO_UINT(h_Scheme)-1); + + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId); + if( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + if ((err = FmPcdKgSchemeTryLock(p_FmHc->h_FmPcd, relativeSchemeId, FALSE)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /* first read scheme and check that it is valid */ + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + p_HcFrame->extraReg = 0xFFFFF800; + + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + /* check that scheme is valid */ + if (!FmPcdKgHwSchemeIsValid(p_HcFrame->hcSpecificData.schemeRegs.kgse_mode)) + { + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is invalid")); + } + + /* Write scheme back, with modified counter */ + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE); + p_HcFrame->extraReg = 0xFFFFF800; + /* write counter */ + p_HcFrame->hcSpecificData.schemeRegs.kgse_spc = value; + + BUILD_FD(sizeof(t_HcFrame)); + + err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence); + + FmPcdKgReleaseSchemeLock(p_FmHc->h_FmPcd, relativeSchemeId); + XX_FreeSmart(p_HcFrame); + + return err; +} + +t_Error FmHcPcdKgSetClsPlan(t_Handle h_FmHc, t_FmPcdKgInterModuleClsPlanSet *p_Set) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + uint32_t i; + t_Error err = E_OK; + + ASSERT_COND(p_FmHc); + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + + for(i=p_Set->baseEntry;ibaseEntry+p_Set->numOfClsPlanEntries;i+=8) + { + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildWriteClsPlanBlockActionReg((uint8_t)(i / CLS_PLAN_NUM_PER_GRP)); + p_HcFrame->extraReg = 0xFFFFF800; + memcpy((void*)&p_HcFrame->hcSpecificData.clsPlanEntries, (void *)&p_Set->vectors[i-p_Set->baseEntry], CLS_PLAN_NUM_PER_GRP*sizeof(uint32_t)); + + BUILD_FD(sizeof(t_HcFrame)); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + XX_FreeSmart(p_HcFrame); + + return err; +} + +t_Error FmHcPcdKgDeleteClsPlan(t_Handle h_FmHc, uint8_t grpId) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet; + + /* clear clsPlan entries in memory */ + p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet)); + if (!p_ClsPlanSet) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("memory allocation failed for p_ClsPlanSetd")); + memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet)); + + p_ClsPlanSet->baseEntry = FmPcdKgGetClsPlanGrpBase(p_FmHc->h_FmPcd, grpId); + p_ClsPlanSet->numOfClsPlanEntries = FmPcdKgGetClsPlanGrpSize(p_FmHc->h_FmPcd, grpId); + ASSERT_COND(p_ClsPlanSet->numOfClsPlanEntries <= FM_PCD_MAX_NUM_OF_CLS_PLANS); + + if (FmHcPcdKgSetClsPlan(p_FmHc, p_ClsPlanSet) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + XX_Free(p_ClsPlanSet); + + FmPcdKgDestroyClsPlanGrp(p_FmHc->h_FmPcd, grpId); + + return E_OK; +} + +t_Error FmHcPcdCcCapwapTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcCapwapReassmTimeoutParams *p_CcCapwapReassmTimeoutParams ) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_HcFrame *p_HcFrame; + uint32_t intFlags; + t_DpaaFD fmFd; + t_Error err; + + SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0); + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_CAPWAP_REASSM_TIMEOUT); + memcpy(&p_HcFrame->hcSpecificData.ccCapwapReassmTimeout, p_CcCapwapReassmTimeoutParams, sizeof(t_FmPcdCcCapwapReassmTimeoutParams)); + BUILD_FD(sizeof(t_HcFrame)); + + err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence); + + XX_FreeSmart(p_HcFrame); + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + return err; +} + + +t_Error FmHcPcdPlcrCcGetSetParams(t_Handle h_FmHc,uint16_t absoluteProfileId, uint32_t requiredAction) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + t_Error err; + uint32_t tmpReg32 = 0; + uint32_t requiredActionTmp, pointedOwnersTmp; + + SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0); + + if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile out of range")); + + if (FmPcdPlcrProfileTryLock(p_FmHc->h_FmPcd, absoluteProfileId, FALSE)) + return ERROR_CODE(E_BUSY); + + + requiredActionTmp = FmPcdPlcrGetRequiredAction(p_FmHc->h_FmPcd, absoluteProfileId); + pointedOwnersTmp = FmPcdPlcrGetPointedOwners(p_FmHc->h_FmPcd, absoluteProfileId); + + if(!pointedOwnersTmp || !(requiredActionTmp & requiredAction)) + { + + if(requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) + { + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + /* first read scheme and check that it is valid */ + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId); + p_HcFrame->extraReg = 0x00008000; + + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + /* check that profile is valid */ + if (!FmPcdPlcrHwProfileIsValid(p_HcFrame->hcSpecificData.profileRegs.fmpl_pemode)) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Policer is already used")); + } + + tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_pegnia; + if(!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); + p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(TRUE, FALSE, FALSE); + p_HcFrame->extraReg = 0x00008000; + p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32; + + BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_peynia; + if(!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); + p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(FALSE, TRUE, FALSE); + p_HcFrame->extraReg = 0x00008000; + p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32; + + BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_pernia; + if(!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); + p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(FALSE, FALSE, TRUE); + p_HcFrame->extraReg = 0x00008000; + p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32; + + BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + XX_FreeSmart(p_HcFrame); + } + } + + FmPcdPlcrUpatePointedOwner(p_FmHc->h_FmPcd, absoluteProfileId, TRUE); + FmPcdPlcrUpdateRequiredAction(p_FmHc->h_FmPcd, absoluteProfileId, requiredAction); + + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + + return E_OK; +} + +t_Handle FmHcPcdPlcrSetProfile(t_Handle h_FmHc,t_FmPcdPlcrProfileParams *p_Profile) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_FmPcdPlcrInterModuleProfileRegs profileRegs; + t_Error err = E_OK; + uint32_t intFlags; + uint16_t profileIndx; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + + if (p_Profile->modify) + { + profileIndx = (uint16_t)(PTR_TO_UINT(p_Profile->id.h_Profile)-1); + if (FmPcdPlcrProfileTryLock(p_FmHc->h_FmPcd, profileIndx, FALSE)) + return NULL; + } + else + { + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + err = FmPcdPlcrGetAbsoluteProfileId(p_FmHc->h_FmPcd, + p_Profile->id.newParams.profileType, + p_Profile->id.newParams.h_FmPort, + p_Profile->id.newParams.relativeProfileId, + &profileIndx); + if (err) + { + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + err = FmPcdPlcrProfileTryLock(p_FmHc->h_FmPcd, profileIndx, TRUE); + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + if (err) + return NULL; + } + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + { + REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + return NULL; + } + + if(!p_Profile->modify) + { + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(profileIndx); + p_HcFrame->extraReg = 0x00008000; + + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, profileIndx); + REPORT_ERROR(MINOR, err, NO_MSG); + XX_FreeSmart(p_HcFrame); + return NULL; + } + + /* check if this scheme is already used */ + if (FmPcdPlcrHwProfileIsValid(p_HcFrame->hcSpecificData.profileRegs.fmpl_pemode)) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, profileIndx); + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Policer is already used")); + XX_FreeSmart(p_HcFrame); + return NULL; + } + } + + memset(&profileRegs, 0, sizeof(t_FmPcdPlcrInterModuleProfileRegs)); + err = FmPcdPlcrBuildProfile(p_FmHc->h_FmPcd, p_Profile, &profileRegs); + if(err) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, profileIndx); + REPORT_ERROR(MAJOR, err, NO_MSG); + XX_FreeSmart(p_HcFrame); + return NULL; + } + + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionRegs(profileIndx); + p_HcFrame->extraReg = 0x00008000; + memcpy(&p_HcFrame->hcSpecificData.profileRegs, &profileRegs, sizeof(t_FmPcdPlcrInterModuleProfileRegs)); + + BUILD_FD(sizeof(t_HcFrame)); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, profileIndx); + REPORT_ERROR(MINOR, err, NO_MSG); + XX_FreeSmart(p_HcFrame); + return NULL; + } + + FmPcdPlcrValidateProfileSw(p_FmHc->h_FmPcd, profileIndx); + + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, profileIndx); + + XX_FreeSmart(p_HcFrame); + + return UINT_TO_PTR((uint64_t)profileIndx+1); +} + +t_Error FmHcPcdPlcrDeleteProfile(t_Handle h_FmHc, t_Handle h_Profile) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + uint16_t absoluteProfileId = (uint16_t)(PTR_TO_UINT(h_Profile)-1); + t_Error err = E_OK; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + + if (FmPcdPlcrProfileTryLock(p_FmHc->h_FmPcd, absoluteProfileId, FALSE)) + return ERROR_CODE(E_BUSY); + + FmPcdPlcrInvalidateProfileSw(p_FmHc->h_FmPcd, absoluteProfileId); + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); + p_HcFrame->actionReg |= 0x00008000; + p_HcFrame->extraReg = 0x00008000; + memset(&p_HcFrame->hcSpecificData.profileRegs, 0, sizeof(t_FmPcdPlcrInterModuleProfileRegs)); + + BUILD_FD(sizeof(t_HcFrame)); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + + XX_FreeSmart(p_HcFrame); + + return E_OK; +} + +t_Error FmHcPcdPlcrSetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value) +{ + + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + uint16_t absoluteProfileId = (uint16_t)(PTR_TO_UINT(h_Profile)-1); + t_Error err = E_OK; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + + if (FmPcdPlcrProfileTryLock(p_FmHc->h_FmPcd, absoluteProfileId, FALSE)) + return ERROR_CODE(E_BUSY); + + /* first read scheme and check that it is valid */ + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId); + p_HcFrame->extraReg = 0x00008000; + + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + /* check that profile is valid */ + if (!FmPcdPlcrHwProfileIsValid(p_HcFrame->hcSpecificData.profileRegs.fmpl_pemode)) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Policer is already used")); + } + + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); + p_HcFrame->actionReg |= FmPcdPlcrBuildCounterProfileReg(counter); + p_HcFrame->extraReg = 0x00008000; + p_HcFrame->hcSpecificData.singleRegForWrite = value; + + BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + + XX_FreeSmart(p_HcFrame); + + return E_OK; +} + +uint32_t FmHcPcdPlcrGetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + uint16_t absoluteProfileId = (uint16_t)(PTR_TO_UINT(h_Profile)-1); + t_Error err = E_OK; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + uint32_t retVal = 0; + + SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0); + + if (FmPcdPlcrProfileTryLock(p_FmHc->h_FmPcd, absoluteProfileId, FALSE)) + return 0; + + /* first read scheme and check that it is valid */ + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + { + REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + return 0; + } + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); + p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId); + p_HcFrame->extraReg = 0x00008000; + + BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + REPORT_ERROR(MINOR, err, NO_MSG); + XX_FreeSmart(p_HcFrame); + return 0; + } + + /* check that profile is valid */ + if (!FmPcdPlcrHwProfileIsValid(p_HcFrame->hcSpecificData.profileRegs.fmpl_pemode)) + { + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + XX_FreeSmart(p_HcFrame); + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("invalid Policer profile")); + return 0; + } + + switch (counter) + { + case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER: + retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_pegpc; + break; + case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER: + retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_peypc; + break; + case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER: + retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perpc; + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER: + retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perypc; + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER: + retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perrpc; + break; + default: + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + FmPcdPlcrReleaseProfileLock(p_FmHc->h_FmPcd, absoluteProfileId); + + XX_FreeSmart(p_HcFrame); + + return retVal; +} + +t_Error FmHcPcdCcModifyTreeNextEngine(t_Handle h_FmHc, t_Handle h_CcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Error err = E_OK; + uint32_t intFlags; + t_List h_OldPointersLst, h_NewPointersLst; + t_Handle h_Params; + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + err = FmPcdCcTreeTryLock(h_CcTree); + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + if (err) + return err; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + err = FmPcdCcModifyNextEngineParamTree(p_FmHc->h_FmPcd, h_CcTree, grpId, index, p_FmPcdCcNextEngineParams, + &h_OldPointersLst, &h_NewPointersLst, &h_Params); + if(err) + { + FmPcdCcTreeReleaseLock(h_CcTree); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = HcDynamicChange(p_FmHc, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + + FmPcdCcTreeReleaseLock(h_CcTree); + + return err; +} + + +t_Error FmHcPcdCcModifyNodeMissNextEngine(t_Handle h_FmHc, t_Handle h_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Handle h_Params; + t_List h_OldPointersLst, h_NewPointersLst; + t_Error err = E_OK; + t_List h_List; + uint32_t intFlags; + + INIT_LIST(&h_List); + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + + if ((err = FmPcdCcNodeTreeTryLock(p_FmHc->h_FmPcd, h_CcNode, &h_List)) != E_OK) + { + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + return err; + } + + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + err = FmPcdCcModifyMissNextEngineParamNode(p_FmHc->h_FmPcd, h_CcNode, p_FmPcdCcNextEngineParams, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + if(err) + { + FmPcdCcNodeTreeReleaseLock(&h_List); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = HcDynamicChange(p_FmHc, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + + FmPcdCcNodeTreeReleaseLock(&h_List); + + + return E_OK; +} + +t_Error FmHcPcdCcRemoveKey(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Handle h_Params; + t_List h_OldPointersLst, h_NewPointersLst; + t_Error err = E_OK; + t_List h_List; + uint32_t intFlags; + + INIT_LIST(&h_List); + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + + if ((err = FmPcdCcNodeTreeTryLock(p_FmHc->h_FmPcd, h_CcNode, &h_List)) != E_OK) + { + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + return err; + } + + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + + err = FmPcdCcRemoveKey(p_FmHc->h_FmPcd,h_CcNode,keyIndex, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + if(err) + { + FmPcdCcNodeTreeReleaseLock(&h_List); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = HcDynamicChange(p_FmHc, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + + FmPcdCcNodeTreeReleaseLock(&h_List); + + return err; + +} + +t_Error FmHcPcdCcAddKey(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Handle h_Params; + t_List h_OldPointersLst, h_NewPointersLst; + t_Error err = E_OK; + t_List h_List; + uint32_t intFlags; + + INIT_LIST(&h_List); + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + + if ((err = FmPcdCcNodeTreeTryLock(p_FmHc->h_FmPcd, h_CcNode, &h_List)) != E_OK) + { + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + return err; + } + + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + + err = FmPcdCcAddKey(p_FmHc->h_FmPcd,h_CcNode,keyIndex,keySize, p_KeyParams, &h_OldPointersLst,&h_NewPointersLst, &h_Params); + if(err) + { + FmPcdCcNodeTreeReleaseLock(&h_List); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = HcDynamicChange(p_FmHc, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + + FmPcdCcNodeTreeReleaseLock(&h_List); + + return err; +} + + +t_Error FmHcPcdCcModifyKey(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_List h_OldPointersLst, h_NewPointersLst; + t_Error err = E_OK; + t_List h_List; + uint32_t intFlags; + t_Handle h_Params; + + UNUSED(keySize); + + INIT_LIST(&h_List); + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + + if ((err = FmPcdCcNodeTreeTryLock(p_FmHc->h_FmPcd, h_CcNode, &h_List)) != E_OK) + { + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + return err; + } + + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + err = FmPcdCcModifyKey(p_FmHc->h_FmPcd, h_CcNode, keyIndex, keySize, p_Key, p_Mask, &h_OldPointersLst,&h_NewPointersLst, &h_Params); + if(err) + { + FmPcdCcNodeTreeReleaseLock(&h_List); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = HcDynamicChange(p_FmHc, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + + FmPcdCcNodeTreeReleaseLock(&h_List); + + return err; +} + +t_Error FmHcPcdCcModifyNodeNextEngine(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_Error err = E_OK; + t_List h_OldPointersLst, h_NewPointersLst; + t_List h_List; + uint32_t intFlags; + t_Handle h_Params; + + INIT_LIST(&h_List); + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + + if ((err = FmPcdCcNodeTreeTryLock(p_FmHc->h_FmPcd, h_CcNode, &h_List)) != E_OK) + { + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + return err; + } + + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + + err = FmPcdCcModiyNextEngineParamNode(p_FmHc->h_FmPcd, h_CcNode, keyIndex, p_FmPcdCcNextEngineParams, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + if(err) + { + FmPcdCcNodeTreeReleaseLock(&h_List); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = HcDynamicChange(p_FmHc, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + FmPcdCcNodeTreeReleaseLock(&h_List); + return err; +} + + +t_Error FmHcPcdCcModifyKeyAndNextEngine(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_List h_OldPointersLst, h_NewPointersLst; + t_Error err = E_OK; + t_List h_List; + uint32_t intFlags; + t_Handle h_Params; + + INIT_LIST(&h_OldPointersLst); + INIT_LIST(&h_NewPointersLst); + INIT_LIST(&h_List); + + intFlags = FmPcdLock(p_FmHc->h_FmPcd); + + if ((err = FmPcdCcNodeTreeTryLock(p_FmHc->h_FmPcd, h_CcNode, &h_List)) != E_OK) + { + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + return err; + } + + FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); + + + err = FmPcdCcModifyKeyAndNextEngine(p_FmHc->h_FmPcd,h_CcNode,keyIndex,keySize, p_KeyParams, &h_OldPointersLst,&h_NewPointersLst, &h_Params); + if(err) + { + FmPcdCcNodeTreeReleaseLock(&h_List); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = HcDynamicChange(p_FmHc, &h_OldPointersLst, &h_NewPointersLst, &h_Params); + + FmPcdCcNodeTreeReleaseLock(&h_List); + + + return err; +} + + +t_Error FmHcKgWriteSp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t spReg, bool add) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + t_Error err = E_OK; + + ASSERT_COND(p_FmHc); + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + /* first read SP register */ + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId); + p_HcFrame->extraReg = 0xFFFFF800; + + BUILD_FD(SIZE_OF_HC_FRAME_PORT_REGS); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + /* spReg is the first reg, so we can use it both for read and for write */ + if(add) + p_HcFrame->hcSpecificData.portRegsForRead.spReg |= spReg; + else + p_HcFrame->hcSpecificData.portRegsForRead.spReg &= ~spReg; + + p_HcFrame->actionReg = FmPcdKgBuildWritePortSchemeBindActionReg(hardwarePortId); + + BUILD_FD(sizeof(t_HcFrame)); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + XX_FreeSmart(p_HcFrame); + + return E_OK; +} + +t_Error FmHcKgWriteCpp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t cppReg) +{ + t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; + t_HcFrame *p_HcFrame; + t_DpaaFD fmFd; + t_Error err = E_OK; + + ASSERT_COND(p_FmHc); + + p_HcFrame = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + p_FmHc->padTill16), 0, 16); + if (!p_HcFrame) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame obj")); + memset(p_HcFrame, 0, sizeof(t_HcFrame)); + /* first read SP register */ + p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); + p_HcFrame->actionReg = FmPcdKgBuildWritePortClsPlanBindActionReg(hardwarePortId); + p_HcFrame->extraReg = 0xFFFFF800; + p_HcFrame->hcSpecificData.singleRegForWrite = cppReg; + + BUILD_FD(sizeof(t_HcFrame)); + + if ((err = EnQFrm(p_FmHc, &fmFd, &p_HcFrame->commandSequence)) != E_OK) + { + XX_FreeSmart(p_HcFrame); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + XX_FreeSmart(p_HcFrame); + + return E_OK; +} + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk +NCSW_FM_INC = $(srctree)/drivers/net/dpa/NetCommSw/Peripherals/FM/inc + +EXTRA_CFLAGS += -I$(NCSW_FM_INC) + +obj-y += fsl-ncsw-MAC.o + +fsl-ncsw-MAC-objs := dtsec.o dtsec_mii_acc.o fm_mac.o tgec.o tgec_mii_acc.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/dtsec.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/dtsec.c @@ -0,0 +1,1943 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File dtsec.c + + @Description FM dTSEC ... +*//***************************************************************************/ + +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "xx_ext.h" +#include "endian_ext.h" +#include "crc_mac_addr_ext.h" +#include "debug_ext.h" + +#include "fm_common.h" +#include "dtsec.h" + + +/*****************************************************************************/ +/* Internal routines */ +/*****************************************************************************/ + +static t_Error CheckInitParameters(t_Dtsec *p_Dtsec) +{ + if(ENET_SPEED_FROM_MODE(p_Dtsec->enetMode) >= e_ENET_SPEED_10000) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 1G MAC driver only supports 1G or lower speeds")); + if(p_Dtsec->macId >= FM_MAX_NUM_OF_1G_MACS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("macId can not be greater than the number of 1G MACs")); + if(p_Dtsec->addr == 0) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet MAC Must have a valid MAC Address")); + if(((p_Dtsec->enetMode == e_ENET_MODE_SGMII_1000) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_1000) || + (p_Dtsec->enetMode == e_ENET_MODE_QSGMII_1000)) && + p_Dtsec->p_DtsecDriverParam->halfDuplex) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet MAC 1G can't work in half duplex")); + if(p_Dtsec->p_DtsecDriverParam->halfDuplex && (p_Dtsec->p_DtsecDriverParam)->loopback) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("LoopBack is not supported in halfDuplex mode")); +#ifdef FM_NO_RX_PREAM_ERRATA_DTSECx1 + if(p_Dtsec->p_DtsecDriverParam->preambleRxEn) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("preambleRxEn")); +#endif /* FM_NO_RX_PREAM_ERRATA_DTSECx1 */ + if(((p_Dtsec->p_DtsecDriverParam)->preambleTxEn || (p_Dtsec->p_DtsecDriverParam)->preambleRxEn) &&( (p_Dtsec->p_DtsecDriverParam)->preambleLength != 0x7)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Preamble length should be 0x7 bytes")); + if((p_Dtsec->p_DtsecDriverParam)->fifoTxWatermarkH<((p_Dtsec->p_DtsecDriverParam)->fifoTxThr+8)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fifoTxWatermarkH has to be at least 8 larger than fifoTxThr")); + if((p_Dtsec->p_DtsecDriverParam)->halfDuplex && + (p_Dtsec->p_DtsecDriverParam->txTimeStampEn || p_Dtsec->p_DtsecDriverParam->rxTimeStampEn)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dTSEC in half duplex mode has to be with 1588 timeStamping diable")); + if((p_Dtsec->p_DtsecDriverParam)->actOnRxPauseFrame && (p_Dtsec->p_DtsecDriverParam)->controlFrameAccept ) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Receive control frame are not passed to the system memory so it can not be accept ")); + if((p_Dtsec->p_DtsecDriverParam)->packetAlignmentPadding > MAX_PACKET_ALIGNMENT) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("packetAlignmentPadding can't be greater than %d ",MAX_PACKET_ALIGNMENT )); + if(((p_Dtsec->p_DtsecDriverParam)->nonBackToBackIpg1 > MAX_INTER_PACKET_GAP) || + ((p_Dtsec->p_DtsecDriverParam)->nonBackToBackIpg2 > MAX_INTER_PACKET_GAP) || + ((p_Dtsec->p_DtsecDriverParam)->backToBackIpg > MAX_INTER_PACKET_GAP)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Inter packet gap can't be greater than %d ",MAX_INTER_PACKET_GAP )); + if((p_Dtsec->p_DtsecDriverParam)->alternateBackoffVal > MAX_INTER_PALTERNATE_BEB) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("alternateBackoffVal can't be greater than %d ",MAX_INTER_PALTERNATE_BEB )); + if((p_Dtsec->p_DtsecDriverParam)->maxRetransmission > MAX_RETRANSMISSION) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("maxRetransmission can't be greater than %d ",MAX_RETRANSMISSION )); + if((p_Dtsec->p_DtsecDriverParam)->collisionWindow > MAX_COLLISION_WINDOW) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("collisionWindow can't be greater than %d ",MAX_COLLISION_WINDOW )); + + /* If Auto negotiation process is disabled, need to */ + /* Set up the PHY using the MII Management Interface */ + if (p_Dtsec->p_DtsecDriverParam->tbiPhyAddr > MAX_PHYS) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("PHY address (should be 0-%d)", MAX_PHYS)); + if(!p_Dtsec->f_Exception) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("uninitialized f_Exception")); + if(!p_Dtsec->f_Event) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("uninitialized f_Event")); + return E_OK; +} + +static uint8_t GetMiiDiv(int32_t refClk) +{ + uint32_t div,tmpClk; + int minRange; + + div = 1; + minRange = (int)(refClk/40 - 1); + + tmpClk = (uint32_t)ABS(refClk/60 - 1); + if (tmpClk < minRange) + { + div = 2; + minRange = (int)tmpClk; + } + tmpClk = (uint32_t)ABS(refClk/60 - 1); + if (tmpClk < minRange) + { + div = 3; + minRange = (int)tmpClk; + } + tmpClk = (uint32_t)ABS(refClk/80 - 1); + if (tmpClk < minRange) + { + div = 4; + minRange = (int)tmpClk; + } + tmpClk = (uint32_t)ABS(refClk/100 - 1); + if (tmpClk < minRange) + { + div = 5; + minRange = (int)tmpClk; + } + tmpClk = (uint32_t)ABS(refClk/140 - 1); + if (tmpClk < minRange) + { + div = 6; + minRange = (int)tmpClk; + } + tmpClk = (uint32_t)ABS(refClk/280 - 1); + if (tmpClk < minRange) + { + div = 7; + minRange = (int)tmpClk; + } + + return (uint8_t)div; +} + +/* ........................................................................... */ + +static void SetDefaultParam(t_DtsecDriverParam *p_DtsecDriverParam) +{ + p_DtsecDriverParam->errorDisabled = DEFAULT_errorDisabled; + + p_DtsecDriverParam->promiscuousEnable = DEFAULT_promiscuousEnable; + + p_DtsecDriverParam->pauseExtended = DEFAULT_pauseExtended; + p_DtsecDriverParam->pauseTime = DEFAULT_pauseTime; + + p_DtsecDriverParam->halfDuplex = DEFAULT_halfDuplex; + p_DtsecDriverParam->halfDulexFlowControlEn = DEFAULT_halfDulexFlowControlEn; + p_DtsecDriverParam->txTimeStampEn = DEFAULT_txTimeStampEn; + p_DtsecDriverParam->rxTimeStampEn = DEFAULT_rxTimeStampEn; + + p_DtsecDriverParam->packetAlignmentPadding = DEFAULT_packetAlignment; + p_DtsecDriverParam->controlFrameAccept = DEFAULT_controlFrameAccept; + p_DtsecDriverParam->groupHashExtend = DEFAULT_groupHashExtend; + p_DtsecDriverParam->broadcReject = DEFAULT_broadcReject; + p_DtsecDriverParam->rxShortFrame = DEFAULT_rxShortFrame; + p_DtsecDriverParam->exactMatch = DEFAULT_exactMatch; + p_DtsecDriverParam->debugMode = DEFAULT_debugMode; + + p_DtsecDriverParam->loopback = DEFAULT_loopback; + p_DtsecDriverParam->tbiPhyAddr = DEFAULT_tbiPhyAddr; + p_DtsecDriverParam->actOnRxPauseFrame = DEFAULT_actOnRxPauseFrame; + p_DtsecDriverParam->actOnTxPauseFrame = DEFAULT_actOnTxPauseFrame; + + p_DtsecDriverParam->preambleLength = DEFAULT_PreAmLength; + p_DtsecDriverParam->preambleRxEn = DEFAULT_PreAmRxEn; + p_DtsecDriverParam->preambleTxEn = DEFAULT_PreAmTxEn; + p_DtsecDriverParam->lengthCheckEnable = DEFAULT_lengthCheckEnable; + p_DtsecDriverParam->padAndCrcEnable = DEFAULT_padAndCrcEnable; + p_DtsecDriverParam->crcEnable = DEFAULT_crcEnable; + + p_DtsecDriverParam->nonBackToBackIpg1 = DEFAULT_nonBackToBackIpg1; + p_DtsecDriverParam->nonBackToBackIpg2 = DEFAULT_nonBackToBackIpg2; + p_DtsecDriverParam->minIfgEnforcement = DEFAULT_minIfgEnforcement; + p_DtsecDriverParam->backToBackIpg = DEFAULT_backToBackIpg; + + p_DtsecDriverParam->alternateBackoffVal = DEFAULT_altBackoffVal; + p_DtsecDriverParam->alternateBackoffEnable = DEFAULT_altBackoffEnable; + p_DtsecDriverParam->backPressureNoBackoff = DEFAULT_backPressureNoBackoff; + p_DtsecDriverParam->noBackoff = DEFAULT_noBackoff; + p_DtsecDriverParam->excessDefer = DEFAULT_excessDefer; + p_DtsecDriverParam->maxRetransmission = DEFAULT_maxRetransmission; + p_DtsecDriverParam->collisionWindow = DEFAULT_collisionWindow; + + p_DtsecDriverParam->maxFrameLength = DEFAULT_maxFrameLength; + + p_DtsecDriverParam->fifoTxThr = DEFAULT_fifoTxThr; + p_DtsecDriverParam->fifoTxWatermarkH = DEFAULT_fifoTxWatermarkH; + + p_DtsecDriverParam->fifoRxWatermarkL = DEFAULT_fifoRxWatermarkL; +} + +static void DtsecException(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + uint32_t event; + t_DtsecMemMap *p_DtsecMemMap; + + ASSERT_COND(p_Dtsec); + p_DtsecMemMap = p_Dtsec->p_MemMap; + ASSERT_COND(p_DtsecMemMap); + + event = GET_UINT32(p_DtsecMemMap->ievent); + /* handle only MDIO events */ + event &= (IMASK_MMRDEN | IMASK_MMWREN); + if(event) + { + event &= GET_UINT32(p_DtsecMemMap->imask); + + WRITE_UINT32(p_DtsecMemMap->ievent, event); + + if(event & IMASK_MMRDEN) + p_Dtsec->f_Event(p_Dtsec->h_App, e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET); + if(event & IMASK_MMWREN) + p_Dtsec->f_Event(p_Dtsec->h_App, e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET); + } +} + +static void UpdateStatistics(t_Dtsec *p_Dtsec) +{ + t_DtsecMemMap *p_DtsecMemMap = p_Dtsec->p_MemMap; + uint32_t car1 = GET_UINT32(p_DtsecMemMap->car1); + uint32_t car2 = GET_UINT32(p_DtsecMemMap->car2); + + if(car1) + { + WRITE_UINT32(p_DtsecMemMap->car1, car1); + if(car1 & CAR1_TR64) + p_Dtsec->internalStatistics.tr64 += VAL22BIT; + if(car1 & CAR1_TR127) + p_Dtsec->internalStatistics.tr127 += VAL22BIT; + if(car1 & CAR1_TR255) + p_Dtsec->internalStatistics.tr255 += VAL22BIT; + if(car1 & CAR1_TR511) + p_Dtsec->internalStatistics.tr511 += VAL22BIT; + if(car1 & CAR1_TRK1) + p_Dtsec->internalStatistics.tr1k += VAL22BIT; + if(car1 & CAR1_TRMAX) + p_Dtsec->internalStatistics.trmax += VAL22BIT; + if(car1 & CAR1_TRMGV) + p_Dtsec->internalStatistics.trmgv += VAL22BIT; + if(car1 & CAR1_RBYT) + p_Dtsec->internalStatistics.rbyt += (uint64_t)VAL32BIT; + if(car1 & CAR1_RPKT) + p_Dtsec->internalStatistics.rpkt += VAL22BIT; + if(car1 & CAR1_RMCA) + p_Dtsec->internalStatistics.rmca += VAL22BIT; + if(car1 & CAR1_RBCA) + p_Dtsec->internalStatistics.rbca += VAL22BIT; + if(car1 & CAR1_RXPF) + p_Dtsec->internalStatistics.rxpf += VAL16BIT; + if(car1 & CAR1_RALN) + p_Dtsec->internalStatistics.raln += VAL16BIT; + if(car1 & CAR1_RFLR) + p_Dtsec->internalStatistics.rflr += VAL16BIT; + if(car1 & CAR1_RCDE) + p_Dtsec->internalStatistics.rcde += VAL16BIT; + if(car1 & CAR1_RCSE) + p_Dtsec->internalStatistics.rcse += VAL16BIT; + if(car1 & CAR1_RUND) + p_Dtsec->internalStatistics.rund += VAL16BIT; + if(car1 & CAR1_ROVR) + p_Dtsec->internalStatistics.rovr += VAL16BIT; + if(car1 & CAR1_RFRG) + p_Dtsec->internalStatistics.rfrg += VAL16BIT; + if(car1 & CAR1_RJBR) + p_Dtsec->internalStatistics.rjbr += VAL16BIT; + if(car1 & CAR1_RDRP) + p_Dtsec->internalStatistics.rdrp += VAL16BIT; + } + if(car2) + { + WRITE_UINT32(p_DtsecMemMap->car2, car2); + if(car2 & CAR2_TFCS) + p_Dtsec->internalStatistics.tfcs += VAL12BIT; + if(car2 & CAR2_TBYT) + p_Dtsec->internalStatistics.tbyt += (uint64_t)VAL32BIT; + if(car2 & CAR2_TPKT) + p_Dtsec->internalStatistics.tpkt += VAL22BIT; + if(car2 & CAR2_TMCA) + p_Dtsec->internalStatistics.tmca += VAL22BIT; + if(car2 & CAR2_TBCA) + p_Dtsec->internalStatistics.tbca += VAL22BIT; + if(car2 & CAR2_TXPF) + p_Dtsec->internalStatistics.txpf += VAL16BIT; + if(car2 & CAR2_TDRP) + p_Dtsec->internalStatistics.tdrp += VAL16BIT; + } +} + +/* .............................................................................. */ + +static uint16_t DtsecGetMaxFrameLength(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_VALUE(p_Dtsec, E_INVALID_HANDLE, 0); + + return (uint16_t)GET_UINT32(p_Dtsec->p_MemMap->maxfrm); +} + +static void DtsecErrException(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + uint32_t event; + t_DtsecMemMap *p_DtsecMemMap = p_Dtsec->p_MemMap; + + event = GET_UINT32(p_DtsecMemMap->ievent); + /* do not handle MDIO events */ + event &= ~(IMASK_MMRDEN | IMASK_MMWREN); + + event &= GET_UINT32(p_DtsecMemMap->imask); + + WRITE_UINT32(p_DtsecMemMap->ievent, event); + + if(event & IMASK_BREN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_BAB_RX); + if(event & IMASK_RXCEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_RX_CTL); + if(event & IMASK_MSROEN) + UpdateStatistics(p_Dtsec); + if(event & IMASK_GTSCEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET); + if(event & IMASK_BTEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_BAB_TX); + if(event & IMASK_TXCEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_CTL); + if(event & IMASK_TXEEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_ERR); + if(event & IMASK_LCEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_LATE_COL); + if(event & IMASK_CRLEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_COL_RET_LMT); + if(event & IMASK_XFUNEN) + { +#ifdef FM_TX_LOCKUP_ERRATA_DTSEC6 + uint32_t tpkt1, tmpReg1, tpkt2, tmpReg2, i; + /* a. Write 0x00E0_0C00 to DTSEC_ID */ + /* This is a read only regidter */ + + /* b. Read and save the value of TPKT */ + tpkt1 = GET_UINT32(p_DtsecMemMap->tpkt); + + /* c. Read the register at dTSEC address offset 0x32C */ + tmpReg1 = GET_UINT32(*(uint32_t*)((uint8_t*)p_DtsecMemMap + 0x32c)); + + /* d. Compare bits [9:15] to bits [25:31] of the register at address offset 0x32C. */ + if((tmpReg1 & 0x007F0000) != (tmpReg1 & 0x0000007F)) + { + /* If they are not equal, save the value of this register and wait for at least + * MAXFRM*16 ns */ + XX_UDelay((uint32_t)(MIN(DtsecGetMaxFrameLength(p_Dtsec)*16/1000, 1))); + } + + /* e. Read and save TPKT again and read the register at dTSEC address offset + 0x32C again*/ + tpkt2 = GET_UINT32(p_DtsecMemMap->tpkt); + tmpReg2 = GET_UINT32(*(uint32_t*)((uint8_t*)p_DtsecMemMap + 0x32c)); + + /* f. Compare the value of TPKT saved in step b to value read in step e. Also + compare bits [9:15] of the register at offset 0x32C saved in step d to the value + of bits [9:15] saved in step e. If the two registers values are unchanged, then + the transmit portion of the dTSEC controller is locked up and the user should + proceed to the recover sequence. */ + if((tpkt1 == tpkt2) && ((tmpReg1 & 0x007F0000) == (tmpReg2 & 0x007F0000))) + { + /* recover sequence */ + + /* a.Write a 1 to RCTRL[GRS]*/ + + WRITE_UINT32(p_DtsecMemMap->rctrl, GET_UINT32(p_DtsecMemMap->rctrl) | RCTRL_GRS); + + /* b.Wait until IEVENT[GRSC]=1, or at least 100 us has elapsed. */ + for(i = 0 ; i < 100 ; i++ ) + { + if(GET_UINT32(p_DtsecMemMap->ievent) & IMASK_GRSCEN) + break; + XX_UDelay(1); + } + if(GET_UINT32(p_DtsecMemMap->ievent) & IMASK_GRSCEN) + WRITE_UINT32(p_DtsecMemMap->ievent, IMASK_GRSCEN); + else + DBG(INFO,("Rx lockup due to dTSEC Tx lockup")); + + + /* c.Write a 1 to bit n of FM_RSTC (offset 0x0CC of FPM)*/ + FmResetMac(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MAC_1G, p_Dtsec->fmMacControllerDriver.macId); + + /* d.Wait 4 Tx clocks (32 ns) */ + XX_UDelay(1); + + /* e.Write a 0 to bit n of FM_RSTC. */ + /* cleared by FMAN */ + } + else + { + /* If either value has changed, the dTSEC controller is not locked up and the + controller should be allowed to proceed normally by writing the reset value + of 0x0824_0101 to DTSEC_ID. */ + /* Register is read only */ + } +#endif /* FM_TX_LOCKUP_ERRATA_DTSEC6 */ + + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_FIFO_UNDRN); + } + if(event & IMASK_MAGEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_MAG_PCKT); + if(event & IMASK_GRSCEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET); + if(event & IMASK_TDPEEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_TX_DATA_ERR); + if(event & IMASK_RDPEEN) + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_RX_DATA_ERR); + + /* - masked interrupts */ + ASSERT_COND(!(event & IMASK_ABRTEN)); + ASSERT_COND(!(event & IMASK_IFERREN)); +} + +static void Dtsec1588Exception(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + uint32_t event; + t_DtsecMemMap *p_DtsecMemMap = p_Dtsec->p_MemMap; + + if (p_Dtsec->ptpTsuEnabled) + { + event = GET_UINT32(p_DtsecMemMap->tmr_pevent); + event &= GET_UINT32(p_DtsecMemMap->tmr_pemask); + if(event) + { + WRITE_UINT32(p_DtsecMemMap->tmr_pevent, event); + ASSERT_COND(event & PEMASK_TSRE); + p_Dtsec->f_Exception(p_Dtsec->h_App, e_FM_MAC_EX_1G_1588_TS_RX_ERR); + } + } +} + +/* ........................................................................... */ + +static void FreeInitResources(t_Dtsec *p_Dtsec) +{ + /*TODO - need to ask why with mdioIrq != 0*/ + if ((p_Dtsec->mdioIrq != 0) && (p_Dtsec->mdioIrq != NO_IRQ)) + { + XX_DisableIntr(p_Dtsec->mdioIrq); + XX_FreeIntr(p_Dtsec->mdioIrq); + } + else if (p_Dtsec->mdioIrq == 0) + FmUnregisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Dtsec->macId, e_FM_INTR_TYPE_NORMAL); + FmUnregisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Dtsec->macId, e_FM_INTR_TYPE_ERR); + FmUnregisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC_TMR, p_Dtsec->macId, e_FM_INTR_TYPE_NORMAL); + + /* release the driver's group hash table */ + FreeHashTable(p_Dtsec->p_MulticastAddrHash); + p_Dtsec->p_MulticastAddrHash = NULL; + + /* release the driver's individual hash table */ + FreeHashTable(p_Dtsec->p_UnicastAddrHash); + p_Dtsec->p_UnicastAddrHash = NULL; +} + +/* ........................................................................... */ + +static void HardwareClearAddrInPaddr(t_Dtsec *p_Dtsec, uint8_t paddrNum) +{ + WRITE_UINT32(((t_DtsecMemMap*)p_Dtsec->p_MemMap)->macaddr[paddrNum].exact_match1, 0x0); + WRITE_UINT32(((t_DtsecMemMap*)p_Dtsec->p_MemMap)->macaddr[paddrNum].exact_match2, 0x0); +} + +/* ........................................................................... */ + +static void HardwareAddAddrInPaddr(t_Dtsec *p_Dtsec, uint64_t *p_Addr, uint8_t paddrNum) +{ + uint32_t tmpReg32 = 0; + uint64_t addr = *p_Addr; + t_DtsecMemMap *p_DtsecMemMap = (t_DtsecMemMap*)p_Dtsec->p_MemMap; + + tmpReg32 = (uint32_t)(addr); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_DtsecMemMap->macaddr[paddrNum].exact_match1, tmpReg32); + + tmpReg32 = (uint32_t)(addr>>32); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_DtsecMemMap->macaddr[paddrNum].exact_match2, tmpReg32); +} + +/* ........................................................................... */ + +static t_Error GracefulStop(t_Dtsec *p_Dtsec, e_CommMode mode) +{ + t_DtsecMemMap *p_MemMap; + + ASSERT_COND(p_Dtsec); + + p_MemMap= (t_DtsecMemMap*)(p_Dtsec->p_MemMap); + ASSERT_COND(p_MemMap); + + /* Assert the graceful transmit stop bit */ + if (mode & e_COMM_MODE_RX) + WRITE_UINT32(p_MemMap->rctrl, + GET_UINT32(p_MemMap->rctrl) | RCTRL_GRS); + +#ifdef FM_GRS_ERRATA_DTSEC_A002 + XX_UDelay(100); +#endif /* FM_GRS_ERRATA_DTSEC_A002 */ + +#ifdef FM_GTS_ERRATA_DTSEC_A004 + DBG(INFO, ("GTS not supported due to DTSEC_A004 errata.")); +#else /* not FM_GTS_ERRATA_DTSEC_A004 */ + if (mode & e_COMM_MODE_TX) + WRITE_UINT32(p_MemMap->tctrl, + GET_UINT32(p_MemMap->tctrl) | TCTRL_GTS); +#endif /* not FM_GTS_ERRATA_DTSEC_A004 */ + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error GracefulRestart(t_Dtsec *p_Dtsec, e_CommMode mode) +{ + t_DtsecMemMap *p_MemMap; + + ASSERT_COND(p_Dtsec); + + p_MemMap= (t_DtsecMemMap*)(p_Dtsec->p_MemMap); + ASSERT_COND(p_MemMap); + + /* clear the graceful receive stop bit */ + if(mode & e_COMM_MODE_TX) + WRITE_UINT32(p_MemMap->tctrl, + GET_UINT32(p_MemMap->tctrl) & ~TCTRL_GTS); + + if(mode & e_COMM_MODE_RX) + WRITE_UINT32(p_MemMap->rctrl, + GET_UINT32(p_MemMap->rctrl) & ~RCTRL_GRS); + + return E_OK; +} + + +/*****************************************************************************/ +/* dTSEC Configs modification functions */ +/*****************************************************************************/ + + +/* .............................................................................. */ + +static t_Error DtsecConfigLoopback(t_Handle h_Dtsec, bool newVal) +{ + + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + + p_Dtsec->p_DtsecDriverParam->loopback = newVal; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecConfigMaxFrameLength(t_Handle h_Dtsec, uint16_t newVal) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + + p_Dtsec->p_DtsecDriverParam->maxFrameLength = newVal; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecConfigPadAndCrc(t_Handle h_Dtsec, bool newVal) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + + p_Dtsec->p_DtsecDriverParam->padAndCrcEnable = newVal; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecConfigHalfDuplex(t_Handle h_Dtsec, bool newVal) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + + p_Dtsec->p_DtsecDriverParam->halfDuplex = newVal; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecConfigLengthCheck(t_Handle h_Dtsec, bool newVal) +{ +#ifdef FM_LEN_CHECK_ERRATA_FMAN_SW002 +UNUSED(h_Dtsec); + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("LengthCheck!")); + +#else + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + + p_Dtsec->p_DtsecDriverParam->lengthCheckEnable = newVal; + + return E_OK; +#endif /* FM_LEN_CHECK_ERRATA_FMAN_SW002 */ +} + +static t_Error DtsecConfigException(t_Handle h_Dtsec, e_FmMacExceptions exception, bool enable) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + uint32_t bitMask = 0; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + + if(exception != e_FM_MAC_EX_1G_1588_TS_RX_ERR) + { + GET_EXCEPTION_FLAG(bitMask, exception); + if(bitMask) + { + if (enable) + p_Dtsec->exceptions |= bitMask; + else + p_Dtsec->exceptions &= ~bitMask; + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + } + else + { + if(!p_Dtsec->ptpTsuEnabled) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exception valid for 1588 only")); + switch(exception){ + case(e_FM_MAC_EX_1G_1588_TS_RX_ERR): + if(enable) + p_Dtsec->enTsuErrExeption = TRUE; + else + p_Dtsec->enTsuErrExeption = FALSE; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + } + } + return E_OK; +} +/*****************************************************************************/ +/* dTSEC Run Time API functions */ +/*****************************************************************************/ + +/* .............................................................................. */ + +static t_Error DtsecEnable(t_Handle h_Dtsec, e_CommMode mode) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_MemMap ; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_INVALID_HANDLE); + + p_MemMap= (t_DtsecMemMap*)(p_Dtsec->p_MemMap); + + tmpReg32 = GET_UINT32(p_MemMap->maccfg1); + if (mode & e_COMM_MODE_RX) + tmpReg32 |= MACCFG1_RX_EN; + if (mode & e_COMM_MODE_TX) + tmpReg32 |= MACCFG1_TX_EN; + WRITE_UINT32(p_MemMap->maccfg1, tmpReg32); + + GracefulRestart(p_Dtsec, mode); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecDisable (t_Handle h_Dtsec, e_CommMode mode) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_MemMap ; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_INVALID_HANDLE); + + p_MemMap = (t_DtsecMemMap*)(p_Dtsec->p_MemMap); + + GracefulStop(p_Dtsec, mode); + + tmpReg32 = GET_UINT32(p_MemMap->maccfg1); + if (mode & e_COMM_MODE_RX) + tmpReg32 &= ~MACCFG1_RX_EN; + if (mode & e_COMM_MODE_TX) + tmpReg32 &= ~MACCFG1_TX_EN; + WRITE_UINT32(p_MemMap->maccfg1, tmpReg32); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecTxMacPause(t_Handle h_Dtsec, uint16_t pauseTime) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + uint32_t ptv = 0; + t_DtsecMemMap *p_MemMap; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_INVALID_STATE); + + p_MemMap = (t_DtsecMemMap*)(p_Dtsec->p_MemMap); + + if (pauseTime) + { +#ifdef FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 + { + if (pauseTime <= 320) + RETURN_ERROR(MINOR, E_INVALID_VALUE, + ("This pause-time value of %d is illegal due to errata dTSEC-A003!" + " value should be greater than 320.")); + } +#endif /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 */ + +#ifdef FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Dtsec->fmMacControllerDriver.h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + pauseTime += 2; + } +#endif /* FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 */ + + ptv = GET_UINT32(p_MemMap->ptv); + ptv |= pauseTime; + WRITE_UINT32(p_MemMap->ptv, ptv); + + /* trigger the transmission of a flow-control pause frame */ + WRITE_UINT32(p_MemMap->maccfg1, + GET_UINT32(p_MemMap->maccfg1) | MACCFG1_TX_FLOW); + } + else + { + WRITE_UINT32(p_MemMap->maccfg1, + GET_UINT32(p_MemMap->maccfg1) & ~MACCFG1_TX_FLOW); + } + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecRxIgnoreMacPause(t_Handle h_Dtsec, bool en) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_MemMap; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_INVALID_STATE); + + p_MemMap = (t_DtsecMemMap*)(p_Dtsec->p_MemMap); + + tmpReg32 = GET_UINT32(p_MemMap->maccfg1); + if (en) + tmpReg32 &= ~MACCFG1_RX_FLOW; + else + tmpReg32 |= MACCFG1_RX_FLOW; + WRITE_UINT32(p_MemMap->maccfg1, tmpReg32); + + return E_OK; +} + + +/* .............................................................................. */ + +static t_Error DtsecEnable1588TimeStamp(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); +#ifdef FM_10_100_SGMII_NO_TS_ERRATA_DTSEC3 + if((p_Dtsec->enetMode == e_ENET_MODE_SGMII_10) || (p_Dtsec->enetMode == e_ENET_MODE_SGMII_100)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("1588TimeStamp in 10/100 SGMII")); +#endif /* FM_10_100_SGMII_NO_TS_ERRATA_DTSEC3 */ + p_Dtsec->ptpTsuEnabled = TRUE; + WRITE_UINT32(p_Dtsec->p_MemMap->rctrl, GET_UINT32(p_Dtsec->p_MemMap->rctrl) | RCTRL_RTSE); + WRITE_UINT32(p_Dtsec->p_MemMap->tctrl, GET_UINT32(p_Dtsec->p_MemMap->tctrl) | TCTRL_TTSE); + + return E_OK; +} + +static t_Error DtsecDisable1588TimeStamp(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + + p_Dtsec->ptpTsuEnabled = FALSE; + WRITE_UINT32(p_Dtsec->p_MemMap->rctrl, GET_UINT32(p_Dtsec->p_MemMap->rctrl) & ~RCTRL_RTSE); + WRITE_UINT32(p_Dtsec->p_MemMap->tctrl, GET_UINT32(p_Dtsec->p_MemMap->tctrl) & ~TCTRL_TTSE); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecGetStatistics(t_Handle h_Dtsec, t_FmMacStatistics *p_Statistics) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_DtsecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Statistics, E_NULL_POINTER); + + if (p_Dtsec->statisticsLevel == e_FM_MAC_NONE_STATISTICS) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Statistics disabled")); + + p_DtsecMemMap = p_Dtsec->p_MemMap; + memset(p_Statistics, 0xff, sizeof(t_FmMacStatistics)); + + if (p_Dtsec->statisticsLevel == e_FM_MAC_FULL_STATISTICS) + { + p_Statistics->eStatPkts64 = (MASK22BIT & GET_UINT32(p_DtsecMemMap->tr64)) + + p_Dtsec->internalStatistics.tr64; /**< r-10G tr-DT 64 byte frame counter */ + p_Statistics->eStatPkts65to127 = (MASK22BIT & GET_UINT32(p_DtsecMemMap->tr127)) + + p_Dtsec->internalStatistics.tr127; /**< r-10G 65 to 127 byte frame counter */ + p_Statistics->eStatPkts128to255 = (MASK22BIT & GET_UINT32(p_DtsecMemMap->tr255)) + + p_Dtsec->internalStatistics.tr255; /**< r-10G 128 to 255 byte frame counter */ + p_Statistics->eStatPkts256to511 = (MASK22BIT & GET_UINT32(p_DtsecMemMap->tr511)) + + p_Dtsec->internalStatistics.tr511; /**< r-10G 256 to 511 byte frame counter */ + p_Statistics->eStatPkts512to1023 = (MASK22BIT & GET_UINT32(p_DtsecMemMap->tr1k)) + + p_Dtsec->internalStatistics.tr1k; /**< r-10G 512 to 1023 byte frame counter */ + p_Statistics->eStatPkts1024to1518 = (MASK22BIT & GET_UINT32(p_DtsecMemMap->trmax)) + + p_Dtsec->internalStatistics.trmax; /**< r-10G 1024 to 1518 byte frame counter */ + p_Statistics->eStatPkts1519to1522 = (MASK22BIT & GET_UINT32(p_DtsecMemMap->trmgv)) + + p_Dtsec->internalStatistics.trmgv; /**< r-10G 1519 to 1522 byte good frame count */ + /* MIB II */ + p_Statistics->ifInOctets = GET_UINT32(p_DtsecMemMap->rbyt) + + p_Dtsec->internalStatistics.rbyt; /**< Total number of byte received. */ + p_Statistics->ifInPkts = (MASK22BIT & GET_UINT32(p_DtsecMemMap->rpkt)) + + p_Dtsec->internalStatistics.rpkt; /**< Total number of packets received.*/ + p_Statistics->ifInMcastPkts = (MASK22BIT & GET_UINT32(p_DtsecMemMap->rmca)) + + p_Dtsec->internalStatistics.rmca; /**< Total number of multicast frame received*/ + p_Statistics->ifInBcastPkts = (MASK22BIT & GET_UINT32(p_DtsecMemMap->rbca)) + + p_Dtsec->internalStatistics.rbca; /**< Total number of broadcast frame received */ + p_Statistics->ifOutOctets = GET_UINT32(p_DtsecMemMap->tbyt) + + p_Dtsec->internalStatistics.tbyt; /**< Total number of byte sent. */ + p_Statistics->ifOutPkts = (MASK22BIT & GET_UINT32(p_DtsecMemMap->tpkt)) + + p_Dtsec->internalStatistics.tpkt; /**< Total number of packets sent .*/ + p_Statistics->ifOutMcastPkts = (MASK22BIT & GET_UINT32(p_DtsecMemMap->tmca)) + + p_Dtsec->internalStatistics.tmca; /**< Total number of multicast frame sent */ + p_Statistics->ifOutBcastPkts = (MASK22BIT & GET_UINT32(p_DtsecMemMap->tbca)) + + p_Dtsec->internalStatistics.tbca; /**< Total number of multicast frame sent */ + } +/* */ + p_Statistics->eStatFragments = (MASK16BIT & GET_UINT32(p_DtsecMemMap->rfrg)) + + p_Dtsec->internalStatistics.rfrg; /**< Total number of packets that were less than 64 octets long with a wrong CRC.*/ + p_Statistics->eStatJabbers = (MASK16BIT & GET_UINT32(p_DtsecMemMap->rjbr)) + + p_Dtsec->internalStatistics.rjbr; /**< Total number of packets longer than valid maximum length octets */ + + p_Statistics->eStatsDropEvents = (MASK16BIT & GET_UINT32(p_DtsecMemMap->rdrp)) + + p_Dtsec->internalStatistics.rdrp; /**< number of dropped packets due to internal errors of the MAC Client. */ + p_Statistics->eStatCRCAlignErrors = (MASK16BIT & GET_UINT32(p_DtsecMemMap->raln)) + + p_Dtsec->internalStatistics.raln; /**< Incremented when frames of correct length but with CRC error are received.*/ + + p_Statistics->eStatUndersizePkts = (MASK16BIT & GET_UINT32(p_DtsecMemMap->rund)) + + p_Dtsec->internalStatistics.rund; /**< Total number of packets that were less than 64 octets long with a good CRC.*/ + p_Statistics->eStatOversizePkts = (MASK16BIT & GET_UINT32(p_DtsecMemMap->rovr)) + + p_Dtsec->internalStatistics.rovr; /**< T,B.D*/ +/* Pause */ + p_Statistics->reStatPause = (MASK16BIT & GET_UINT32(p_DtsecMemMap->rxpf)) + + p_Dtsec->internalStatistics.rxpf; /**< Pause MAC Control received */ + p_Statistics->teStatPause = (MASK16BIT & GET_UINT32(p_DtsecMemMap->txpf)) + + p_Dtsec->internalStatistics.txpf; /**< Pause MAC Control sent */ + + p_Statistics->ifInDiscards = p_Statistics->eStatsDropEvents; /**< Frames received, but discarded due to problems within the MAC RX. */ + + p_Statistics->ifInErrors = p_Statistics->eStatsDropEvents + + p_Statistics->eStatCRCAlignErrors + + (MASK16BIT & GET_UINT32(p_DtsecMemMap->rflr)) + + p_Dtsec->internalStatistics.rflr + + (MASK16BIT & GET_UINT32(p_DtsecMemMap->rcde)) + + p_Dtsec->internalStatistics.rcde + + (MASK16BIT & GET_UINT32(p_DtsecMemMap->rcse)) + + p_Dtsec->internalStatistics.rcse; + + p_Statistics->ifOutDiscards = (MASK16BIT & GET_UINT32(p_DtsecMemMap->tdrp)) + + p_Dtsec->internalStatistics.tdrp; /**< Frames received, but discarded due to problems within the MAC TX N/A!.*/ + p_Statistics->ifOutErrors = p_Statistics->ifOutDiscards /**< Number of frames transmitted with error: */ + + (MASK12BIT & GET_UINT32(p_DtsecMemMap->tfcs)) + + p_Dtsec->internalStatistics.tfcs; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecModifyMacAddress (t_Handle h_Dtsec, t_EnetAddr *p_EnetAddr) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_DtsecMemMap; + uint32_t tmpReg32 = 0; + uint64_t addr; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_NULL_POINTER); + + p_DtsecMemMap = p_Dtsec->p_MemMap; + /* Initialize MAC Station Address registers (1 & 2) */ + /* Station address have to be swapped (big endian to little endian */ + addr = ((*(uint64_t *)p_EnetAddr) >> 16); + p_Dtsec->addr = addr; + + tmpReg32 = (uint32_t)(addr); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_DtsecMemMap->macstnaddr1, tmpReg32); + + tmpReg32 = (uint32_t)(addr>>32); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_DtsecMemMap->macstnaddr2, tmpReg32); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecResetCounters (t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + + /* clear HW counters */ + WRITE_UINT32(p_Dtsec->p_MemMap->ecntrl, GET_UINT32(p_Dtsec->p_MemMap->ecntrl) | ECNTRL_CLRCNT); + + /* clear SW counters holding carries */ + memset((char *)&p_Dtsec->internalStatistics, (char)0x0, sizeof(t_InternalStatistics)); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecAddExactMatchMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *) h_Dtsec; + uint64_t ethAddr; + uint8_t paddrNum; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + + ethAddr = ((*(uint64_t *)p_EthAddr) >> 16); + + if (ethAddr & GROUP_ADDRESS) + /* Multicast address has no effect in PADDR */ + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Multicast address")); + + /* Make sure no PADDR contains this address */ + for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++) + if (p_Dtsec->indAddrRegUsed[paddrNum]) + if (p_Dtsec->paddr[paddrNum] == ethAddr) + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG); + + /* Find first unused PADDR */ + for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++) + if (!(p_Dtsec->indAddrRegUsed[paddrNum])) + { + /* mark this PADDR as used */ + p_Dtsec->indAddrRegUsed[paddrNum] = TRUE; + /* store address */ + p_Dtsec->paddr[paddrNum] = ethAddr; + + /* put in hardware */ + HardwareAddAddrInPaddr(p_Dtsec, ðAddr, paddrNum); + p_Dtsec->numOfIndAddrInRegs++; + + return E_OK; + } + + /* No free PADDR */ + RETURN_ERROR(MAJOR, E_FULL, NO_MSG); +} + +/* .............................................................................. */ + +static t_Error DtsecDelExactMatchMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *) h_Dtsec; + uint64_t ethAddr; + uint8_t paddrNum; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + + ethAddr = ((*(uint64_t *)p_EthAddr) >> 16); + + /* Find used PADDR containing this address */ + for (paddrNum = 0; paddrNum < DTSEC_NUM_OF_PADDRS; paddrNum++) + { + if ((p_Dtsec->indAddrRegUsed[paddrNum]) && + (p_Dtsec->paddr[paddrNum] == ethAddr)) + { + /* mark this PADDR as not used */ + p_Dtsec->indAddrRegUsed[paddrNum] = FALSE; + /* clear in hardware */ + HardwareClearAddrInPaddr(p_Dtsec, paddrNum); + p_Dtsec->numOfIndAddrInRegs--; + + return E_OK; + } + } + + RETURN_ERROR(MAJOR, E_NOT_FOUND, NO_MSG); +} + +/* .............................................................................. */ + +static t_Error DtsecAddHashMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_DtsecMemMap; + uint32_t crc; + uint8_t crcMirror, reg; + uint32_t bitMask; + t_EthHashEntry *p_HashEntry; + uint64_t ethAddr; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_NULL_POINTER); + + p_DtsecMemMap = p_Dtsec->p_MemMap; + + ethAddr = ((*(uint64_t *)p_EthAddr) >> 16); + + /* CRC calculation */ + GET_MAC_ADDR_CRC(ethAddr, crc); + + /* calculate the "crc mirror" */ + crcMirror = MIRROR((uint8_t)crc); + + /* 3 MSB bits define the register */ + reg = (uint8_t)(crcMirror >> 5); + /* 5 LSB bits define the bit within the register */ + bitMask = 0x80000000 >> (crcMirror & 0x1f); + + /* Create element to be added to the driver hash table */ + p_HashEntry = (t_EthHashEntry *)XX_Malloc(sizeof(t_EthHashEntry)); + p_HashEntry->addr = ethAddr; + INIT_LIST(&p_HashEntry->node); + + if (ethAddr & GROUP_ADDRESS) + { + /* Group Address */ + LIST_AddToTail(&(p_HashEntry->node), &(p_Dtsec->p_MulticastAddrHash->p_Lsts[crcMirror])); + /* Set the appropriate bit in GADDR0-7 */ + WRITE_UINT32(p_DtsecMemMap->gaddr[reg], + GET_UINT32(p_DtsecMemMap->gaddr[reg]) | bitMask); + } + else + { + LIST_AddToTail(&(p_HashEntry->node), &(p_Dtsec->p_UnicastAddrHash->p_Lsts[crcMirror])); + /* Set the appropriate bit in IADDR0-7 */ + WRITE_UINT32(p_DtsecMemMap->igaddr[reg], + GET_UINT32(p_DtsecMemMap->igaddr[reg]) | bitMask); + } + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecDelHashMacAddress(t_Handle h_Dtsec, t_EnetAddr *p_EthAddr) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_DtsecMemMap; + t_List *p_Pos; + uint32_t crc; + uint8_t crcMirror, reg; + uint32_t bitMask; + t_EthHashEntry *p_HashEntry = NULL; + uint64_t ethAddr; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_NULL_POINTER); + + p_DtsecMemMap = p_Dtsec->p_MemMap; + + ethAddr = ((*(uint64_t *)p_EthAddr) >> 16); + + /* CRC calculation */ + GET_MAC_ADDR_CRC(ethAddr, crc); + + /* calculate the "crc mirror" */ + crcMirror = MIRROR((uint8_t)crc); + + /* 3 MSB bits define the register */ + reg =(uint8_t)( crcMirror >> 5); + /* 5 LSB bits define the bit within the register */ + bitMask = 0x80000000 >> (crcMirror & 0x1f); + + if (ethAddr & GROUP_ADDRESS) + { + /* Group Address */ + LIST_FOR_EACH(p_Pos, &(p_Dtsec->p_MulticastAddrHash->p_Lsts[crcMirror])) + { + p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos); + if(p_HashEntry->addr == ethAddr) + { + LIST_DelAndInit(&p_HashEntry->node); + XX_Free(p_HashEntry); + break; + } + } + if(LIST_IsEmpty(&p_Dtsec->p_MulticastAddrHash->p_Lsts[crcMirror])) + WRITE_UINT32(p_DtsecMemMap->gaddr[reg], + GET_UINT32(p_DtsecMemMap->gaddr[reg]) & ~bitMask); + } + else + { + /* Individual Address */ + LIST_FOR_EACH(p_Pos, &(p_Dtsec->p_UnicastAddrHash->p_Lsts[crcMirror])) + { + p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos); + if(p_HashEntry->addr == ethAddr) + { + LIST_DelAndInit(&p_HashEntry->node); + XX_Free(p_HashEntry); + break; + } + } + if(LIST_IsEmpty(&p_Dtsec->p_UnicastAddrHash->p_Lsts[crcMirror])) + WRITE_UINT32(p_DtsecMemMap->igaddr[reg], + GET_UINT32(p_DtsecMemMap->igaddr[reg]) & ~bitMask); + } + + /* address does not exist */ + ASSERT_COND(p_HashEntry != NULL); + + return E_OK; +} + + +/* .............................................................................. */ + +static t_Error DtsecSetPromiscuous(t_Handle h_Dtsec, bool newVal) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_DtsecMemMap; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_NULL_POINTER); + + p_DtsecMemMap = p_Dtsec->p_MemMap; + + tmpReg32 = GET_UINT32(p_DtsecMemMap->rctrl); + + if (newVal) + tmpReg32 |= RCTRL_PROM; + else + tmpReg32 &= ~RCTRL_PROM; + + WRITE_UINT32(p_DtsecMemMap->rctrl, tmpReg32); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecSetStatistics(t_Handle h_Dtsec, e_FmMacStatisticsLevel statisticsLevel) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_DtsecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_NULL_POINTER); + + p_DtsecMemMap = p_Dtsec->p_MemMap; + + p_Dtsec->statisticsLevel = statisticsLevel; + + switch (p_Dtsec->statisticsLevel) + { + case(e_FM_MAC_NONE_STATISTICS): + WRITE_UINT32(p_DtsecMemMap->cam1,0xffffffff); + WRITE_UINT32(p_DtsecMemMap->cam2,0xffffffff); + WRITE_UINT32(p_DtsecMemMap->ecntrl, GET_UINT32(p_DtsecMemMap->ecntrl) & ~ECNTRL_STEN); + WRITE_UINT32(p_DtsecMemMap->imask, GET_UINT32(p_DtsecMemMap->imask) & ~IMASK_MSROEN); + p_Dtsec->exceptions &= ~IMASK_MSROEN; + break; + case(e_FM_MAC_PARTIAL_STATISTICS): + WRITE_UINT32(p_DtsecMemMap->cam1, CAM1_ERRORS_ONLY); + WRITE_UINT32(p_DtsecMemMap->cam2, CAM2_ERRORS_ONLY); + WRITE_UINT32(p_DtsecMemMap->ecntrl, GET_UINT32(p_DtsecMemMap->ecntrl) | ECNTRL_STEN); + WRITE_UINT32(p_DtsecMemMap->imask, GET_UINT32(p_DtsecMemMap->imask) | IMASK_MSROEN); + p_Dtsec->exceptions |= IMASK_MSROEN; + break; + case(e_FM_MAC_FULL_STATISTICS): + WRITE_UINT32(p_DtsecMemMap->cam1,0); + WRITE_UINT32(p_DtsecMemMap->cam2,0); + WRITE_UINT32(p_DtsecMemMap->ecntrl, GET_UINT32(p_DtsecMemMap->ecntrl) | ECNTRL_STEN); + WRITE_UINT32(p_DtsecMemMap->imask, GET_UINT32(p_DtsecMemMap->imask) | IMASK_MSROEN); + p_Dtsec->exceptions |= IMASK_MSROEN; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, NO_MSG); + } + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecAdjustLink(t_Handle h_Dtsec, e_EnetSpeed speed, bool fullDuplex) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_DtsecMemMap; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_HANDLE); + p_DtsecMemMap = p_Dtsec->p_MemMap; + SANITY_CHECK_RETURN_ERROR(p_DtsecMemMap, E_INVALID_HANDLE); + + if (!fullDuplex && + ((speed >= e_ENET_SPEED_1000) || + (ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode) == e_ENET_IF_SGMII))) + RETURN_ERROR(MAJOR, E_CONFLICT, ("Ethernet interface does not support Half Duplex mode")); + + p_Dtsec->enetMode = MAKE_ENET_MODE(ENET_INTERFACE_FROM_MODE(p_Dtsec->enetMode), speed); + p_Dtsec->halfDuplex = !fullDuplex; + + tmpReg32 = GET_UINT32(p_DtsecMemMap->maccfg2); + if(p_Dtsec->halfDuplex) + tmpReg32 &= ~MACCFG2_FULL_DUPLEX; + else + tmpReg32 |= MACCFG2_FULL_DUPLEX; + + tmpReg32 &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE); + if((p_Dtsec->enetMode == e_ENET_MODE_RGMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_100)|| + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_100)) + tmpReg32 |= MACCFG2_NIBBLE_MODE; + else if((p_Dtsec->enetMode == e_ENET_MODE_RGMII_1000) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_1000)|| + (p_Dtsec->enetMode == e_ENET_MODE_GMII_1000)) + tmpReg32 |= MACCFG2_BYTE_MODE; + WRITE_UINT32(p_DtsecMemMap->maccfg2, tmpReg32); + + tmpReg32 = GET_UINT32(p_DtsecMemMap->ecntrl); + if (!(tmpReg32 & ECNTRL_CFG_RO)) + { + if ((p_Dtsec->enetMode == e_ENET_MODE_RGMII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_100)) + tmpReg32 |= ECNTRL_R100M; + else + tmpReg32 &= ~ECNTRL_R100M; + WRITE_UINT32(p_DtsecMemMap->ecntrl, tmpReg32); + } + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecGetId(t_Handle h_Dtsec, uint32_t *macId) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_HANDLE); + + *macId = p_Dtsec->macId; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecGetVersion(t_Handle h_Dtsec, uint32_t *macVersion) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecMemMap *p_DtsecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_NULL_POINTER); + + p_DtsecMemMap = p_Dtsec->p_MemMap; + *macVersion = GET_UINT32(p_DtsecMemMap->tsec_id1); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error DtsecSetException(t_Handle h_Dtsec, e_FmMacExceptions exception, bool enable) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + uint32_t tmpReg, bitMask = 0; + t_DtsecMemMap *p_DtsecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Dtsec->p_DtsecDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_NULL_POINTER); + + p_DtsecMemMap = p_Dtsec->p_MemMap; + + if(exception != e_FM_MAC_EX_1G_1588_TS_RX_ERR) + { + GET_EXCEPTION_FLAG(bitMask, exception); + if(bitMask) + { + if (enable) + p_Dtsec->exceptions |= bitMask; + else + p_Dtsec->exceptions &= ~bitMask; + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + tmpReg = GET_UINT32(p_DtsecMemMap->imask); + if(enable) + tmpReg |= bitMask; + else + tmpReg &= ~bitMask; + WRITE_UINT32(p_DtsecMemMap->imask, tmpReg); + + /* warn if MIB OVFL is disabled and statistic gathering is enabled */ + if((exception == e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL) && + !enable && + (p_Dtsec->statisticsLevel != e_FM_MAC_NONE_STATISTICS)) + DBG(WARNING, ("Disabled MIB counters overflow exceptions. Counters value may be inaccurate due to unregistered overflow")); + + } + else + { + if(!p_Dtsec->ptpTsuEnabled) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exception valid for 1588 only")); + tmpReg = GET_UINT32(p_DtsecMemMap->tmr_pemask); + switch(exception){ + case(e_FM_MAC_EX_1G_1588_TS_RX_ERR): + if(enable) + { + p_Dtsec->enTsuErrExeption = TRUE; + WRITE_UINT32(p_DtsecMemMap->tmr_pemask, tmpReg | PEMASK_TSRE); + } + else + { + p_Dtsec->enTsuErrExeption = FALSE; + WRITE_UINT32(p_DtsecMemMap->tmr_pemask, tmpReg & ~PEMASK_TSRE); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + } + } + + return E_OK; +} + +/* ........................................................................... */ + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +static t_Error DtsecDumpRegs(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + int i = 0; + + DECLARE_DUMP; + + if (p_Dtsec->p_MemMap) + { + + DUMP_TITLE(p_Dtsec->p_MemMap, ("MAC %d: ", p_Dtsec->macId)); + DUMP_VAR(p_Dtsec->p_MemMap, tsec_id1); + DUMP_VAR(p_Dtsec->p_MemMap, tsec_id2); + DUMP_VAR(p_Dtsec->p_MemMap, ievent); + DUMP_VAR(p_Dtsec->p_MemMap, imask); + DUMP_VAR(p_Dtsec->p_MemMap, edis); + DUMP_VAR(p_Dtsec->p_MemMap, ecntrl); + DUMP_VAR(p_Dtsec->p_MemMap, ptv); + DUMP_VAR(p_Dtsec->p_MemMap, tmr_ctrl); + DUMP_VAR(p_Dtsec->p_MemMap, tmr_pevent); + DUMP_VAR(p_Dtsec->p_MemMap, tmr_pemask); + DUMP_VAR(p_Dtsec->p_MemMap, tctrl); + DUMP_VAR(p_Dtsec->p_MemMap, rctrl); + DUMP_VAR(p_Dtsec->p_MemMap, maccfg1); + DUMP_VAR(p_Dtsec->p_MemMap, maccfg2); + DUMP_VAR(p_Dtsec->p_MemMap, ipgifg); + DUMP_VAR(p_Dtsec->p_MemMap, hafdup); + DUMP_VAR(p_Dtsec->p_MemMap, maxfrm); + + DUMP_VAR(p_Dtsec->p_MemMap, macstnaddr1); + DUMP_VAR(p_Dtsec->p_MemMap, macstnaddr2); + + DUMP_SUBSTRUCT_ARRAY(i, 8) + { + DUMP_VAR(p_Dtsec->p_MemMap, macaddr[i].exact_match1); + DUMP_VAR(p_Dtsec->p_MemMap, macaddr[i].exact_match2); + } + } + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ + + +/*****************************************************************************/ +/* FM Init & Free API */ +/*****************************************************************************/ + +/* .............................................................................. */ + +static t_Error DtsecInit(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_DtsecDriverParam *p_DtsecDriverParam; + t_DtsecMemMap *p_DtsecMemMap; + int i; + uint32_t tmpReg32; + uint64_t addr; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_DtsecDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MemMap, E_INVALID_STATE); + + CHECK_INIT_PARAMETERS(p_Dtsec, CheckInitParameters); + + p_DtsecDriverParam = p_Dtsec->p_DtsecDriverParam; + p_Dtsec->halfDuplex = p_DtsecDriverParam->halfDuplex; + p_Dtsec->debugMode = p_DtsecDriverParam->debugMode; + p_DtsecMemMap = p_Dtsec->p_MemMap; + + /*************dtsec_id2******************/ + tmpReg32 = GET_UINT32(p_DtsecMemMap->tsec_id2); + + if ((p_Dtsec->enetMode == e_ENET_MODE_RGMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_1000) || + (p_Dtsec->enetMode == e_ENET_MODE_RMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_RMII_100)) + if(tmpReg32 & ID2_INT_REDUCED_OFF) + { + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("no support for reduced interface in current DTSEC version")); + } + + if ((p_Dtsec->enetMode == e_ENET_MODE_SGMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_1000)|| + (p_Dtsec->enetMode == e_ENET_MODE_MII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_MII_100)) + if(tmpReg32 & ID2_INT_NORMAL_OFF) + { + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("no support for normal interface in current DTSEC version")); + } + /*************dtsec_id2******************/ + + /***************EDIS************************/ + WRITE_UINT32(p_DtsecMemMap->edis, p_DtsecDriverParam->errorDisabled); + /***************EDIS************************/ + + /***************ECNTRL************************/ + tmpReg32 = 0; + if ((p_Dtsec->enetMode == e_ENET_MODE_RGMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_1000) || + (p_Dtsec->enetMode == e_ENET_MODE_GMII_1000)) + tmpReg32 |= ECNTRL_GMIIM; + if ((p_Dtsec->enetMode == e_ENET_MODE_SGMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_1000)) + tmpReg32 |= (ECNTRL_SGMIIM | ECNTRL_TBIM); + if (p_Dtsec->enetMode == e_ENET_MODE_QSGMII_1000) + tmpReg32 |= (ECNTRL_SGMIIM | ECNTRL_TBIM | ECNTRL_QSGMIIM); + if ((p_Dtsec->enetMode == e_ENET_MODE_RGMII_1000) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_10)|| + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_100)) + tmpReg32 |= ECNTRL_RPM; + if ((p_Dtsec->enetMode == e_ENET_MODE_RGMII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_RMII_100)) + tmpReg32 |= ECNTRL_R100M; + if ((p_Dtsec->enetMode == e_ENET_MODE_RMII_10) || (p_Dtsec->enetMode == e_ENET_MODE_RMII_100)) + tmpReg32 |= ECNTRL_RMM; + WRITE_UINT32(p_DtsecMemMap->ecntrl, tmpReg32); + /***************ECNTRL************************/ + + /***************PTV************************/ + tmpReg32 = 0; +#ifdef FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Dtsec->fmMacControllerDriver.h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + p_DtsecDriverParam->pauseTime += 2; + } +#endif /* FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 */ + if (p_DtsecDriverParam->pauseTime) + tmpReg32 |= (uint32_t)p_DtsecDriverParam->pauseTime; + + if (p_DtsecDriverParam->pauseExtended) + tmpReg32 |= ((uint32_t)p_DtsecDriverParam->pauseExtended) << PTV_PTE_OFST; + WRITE_UINT32(p_DtsecMemMap->ptv, tmpReg32); + /***************PTV************************/ + + /***************TCTRL************************/ + tmpReg32 = 0; + if(p_DtsecDriverParam->halfDuplex) + { + if(p_DtsecDriverParam->halfDulexFlowControlEn) + tmpReg32 |= TCTRL_THDF; + } + else + { + if(p_DtsecDriverParam->txTimeStampEn) + tmpReg32 |= TCTRL_TTSE; + } + WRITE_UINT32(p_DtsecMemMap->tctrl, tmpReg32); + /***************TCTRL************************/ + + /***************RCTRL************************/ + tmpReg32 = 0; + if (p_DtsecDriverParam->packetAlignmentPadding) + tmpReg32 |= ((uint32_t)(0x0000001f & p_DtsecDriverParam->packetAlignmentPadding)) << 16; + if (p_DtsecDriverParam->controlFrameAccept) + tmpReg32 |= RCTRL_CFA; + if (p_DtsecDriverParam->groupHashExtend) + tmpReg32 |= RCTRL_GHTX; + if(p_DtsecDriverParam->rxTimeStampEn) + tmpReg32 |= RCTRL_RTSE; + if (p_DtsecDriverParam->broadcReject) + tmpReg32 |= RCTRL_BC_REJ; + if (p_DtsecDriverParam->rxShortFrame) + tmpReg32 |= RCTRL_RSF; + if (p_DtsecDriverParam->promiscuousEnable) + tmpReg32 |= RCTRL_PROM; + if (p_DtsecDriverParam->exactMatch) + tmpReg32 |= RCTRL_EMEN; + + WRITE_UINT32(p_DtsecMemMap->rctrl, tmpReg32); + /***************RCTRL************************/ + + /* Assign a Phy Address to the TBI (TBIPA). */ + /* Done also in case that TBI is not selected to avoid */ + /* conflict with the external PHYs Physical address */ + WRITE_UINT32(p_DtsecMemMap->tbipa, p_DtsecDriverParam->tbiPhyAddr); + + /* Reset the management interface */ + WRITE_UINT32(p_Dtsec->p_MiiMemMap->miimcfg, MIIMCFG_RESET_MGMT); + WRITE_UINT32(p_Dtsec->p_MiiMemMap->miimcfg, ~MIIMCFG_RESET_MGMT); + /* Setup the MII Mgmt clock speed */ + WRITE_UINT32(p_Dtsec->p_MiiMemMap->miimcfg, + (uint32_t)GetMiiDiv((int32_t)(((p_Dtsec->fmMacControllerDriver.clkFreq*10)/2)/8))); + + if(p_Dtsec->enetMode == e_ENET_MODE_SGMII_1000) + { + uint16_t tmpReg16; + + /* Configure the TBI PHY Control Register */ + tmpReg16 = PHY_TBICON_SPEED2 | PHY_TBICON_SRESET; + + DTSEC_MII_WritePhyReg(p_Dtsec, p_DtsecDriverParam->tbiPhyAddr, 17, tmpReg16); + + tmpReg16 = PHY_TBICON_SPEED2; + + DTSEC_MII_WritePhyReg(p_Dtsec, p_DtsecDriverParam->tbiPhyAddr, 17, tmpReg16); + + if(!p_DtsecDriverParam->halfDuplex) + tmpReg16 |= PHY_CR_FULLDUPLEX | 0x8000 | PHY_CR_ANE; + + DTSEC_MII_WritePhyReg(p_Dtsec, p_DtsecDriverParam->tbiPhyAddr, 0, tmpReg16); + + tmpReg16 = 0x01a0; + DTSEC_MII_WritePhyReg(p_Dtsec, p_DtsecDriverParam->tbiPhyAddr, 4, tmpReg16); + + tmpReg16 = 0x1340; + DTSEC_MII_WritePhyReg(p_Dtsec, p_DtsecDriverParam->tbiPhyAddr, 0, tmpReg16); + } + + /***************TMR_CTL************************/ + WRITE_UINT32(p_DtsecMemMap->tmr_ctrl, 0); + + if(p_Dtsec->ptpTsuEnabled) + { + tmpReg32 = 0; + if (p_Dtsec->enTsuErrExeption) + tmpReg32 |= PEMASK_TSRE; + WRITE_UINT32(p_DtsecMemMap->tmr_pemask, tmpReg32); + WRITE_UINT32(p_DtsecMemMap->tmr_pevent, tmpReg32); + } + + /***************DEBUG************************/ + tmpReg32 = 0; + if(p_DtsecDriverParam->debugMode) + WRITE_UINT32(p_DtsecMemMap->tsec_id1, TSEC_ID1_DEBUG); + /***************DEBUG************************/ + + /***************MACCFG1***********************/ + WRITE_UINT32(p_DtsecMemMap->maccfg1, MACCFG1_SOFT_RESET); + WRITE_UINT32(p_DtsecMemMap->maccfg1, 0); + tmpReg32 = 0; + if(p_DtsecDriverParam->loopback) + tmpReg32 |= MACCFG1_LOOPBACK; + if(p_DtsecDriverParam->actOnRxPauseFrame) + tmpReg32 |= MACCFG1_RX_FLOW; + if(p_DtsecDriverParam->actOnTxPauseFrame) + tmpReg32 |= MACCFG1_TX_FLOW; + WRITE_UINT32(p_DtsecMemMap->maccfg1, tmpReg32); + /***************MACCFG1***********************/ + + /***************MACCFG2***********************/ + tmpReg32 = 0; + if( (p_Dtsec->enetMode == e_ENET_MODE_RMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_RMII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_MII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_MII_100) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_RGMII_100)|| + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_10) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_100)) + tmpReg32 |= MACCFG2_NIBBLE_MODE; + else if((p_Dtsec->enetMode == e_ENET_MODE_RGMII_1000) || + (p_Dtsec->enetMode == e_ENET_MODE_SGMII_1000)|| + (p_Dtsec->enetMode == e_ENET_MODE_GMII_1000)|| + (p_Dtsec->enetMode == e_ENET_MODE_QSGMII_1000)) + tmpReg32 |= MACCFG2_BYTE_MODE; + + tmpReg32 |= (((uint32_t)p_DtsecDriverParam->preambleLength) & 0x0000000f)<< PREAMBLE_LENGTH_SHIFT; + + if(p_DtsecDriverParam->preambleRxEn) + tmpReg32 |= MACCFG2_PRE_AM_Rx_EN; + if(p_DtsecDriverParam->preambleTxEn) + tmpReg32 |= MACCFG2_PRE_AM_Tx_EN; + if(p_DtsecDriverParam->lengthCheckEnable) + tmpReg32 |= MACCFG2_LENGTH_CHECK; + if(p_DtsecDriverParam->padAndCrcEnable) + tmpReg32 |= MACCFG2_PAD_CRC_EN; + if(p_DtsecDriverParam->crcEnable) + tmpReg32 |= MACCFG2_CRC_EN; + if(!p_DtsecDriverParam->halfDuplex) + tmpReg32 |= MACCFG2_FULL_DUPLEX; + WRITE_UINT32(p_DtsecMemMap->maccfg2, tmpReg32); + /***************MACCFG2***********************/ + + /***************IPGIFG************************/ + tmpReg32 = 0; + ASSERT_COND(p_DtsecDriverParam->nonBackToBackIpg1 <= p_DtsecDriverParam->nonBackToBackIpg2); + tmpReg32 = (uint32_t)((((uint32_t)p_DtsecDriverParam->nonBackToBackIpg1 << + IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT) & IPGIFG_NON_BACK_TO_BACK_IPG_1) | + (((uint32_t)p_DtsecDriverParam->nonBackToBackIpg2 << + IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT) & IPGIFG_NON_BACK_TO_BACK_IPG_2) | + (((uint32_t)p_DtsecDriverParam->minIfgEnforcement << + IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT) & IPGIFG_MIN_IFG_ENFORCEMENT) | + ((uint32_t)p_DtsecDriverParam->backToBackIpg & IPGIFG_BACK_TO_BACK_IPG)); + WRITE_UINT32(p_DtsecMemMap->ipgifg, tmpReg32); + /***************IPGIFG************************/ + + /***************HAFDUP************************/ + tmpReg32 = 0; + if(p_DtsecDriverParam->alternateBackoffEnable) + { + tmpReg32 = (uint32_t) (HAFDUP_ALT_BEB | (((uint32_t)p_DtsecDriverParam->alternateBackoffVal & 0x0000000f) << + HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT)); + } + + if(p_DtsecDriverParam->backPressureNoBackoff) + tmpReg32 |= HAFDUP_BP_NO_BACKOFF; + if(p_DtsecDriverParam->noBackoff) + tmpReg32 |= HAFDUP_NO_BACKOFF; + if(p_DtsecDriverParam->excessDefer) + tmpReg32 |= HAFDUP_EXCESS_DEFER; + tmpReg32 |= (((uint32_t)p_DtsecDriverParam->maxRetransmission << + HAFDUP_RETRANSMISSION_MAX_SHIFT )& HAFDUP_RETRANSMISSION_MAX); + tmpReg32|= ((uint32_t)p_DtsecDriverParam->collisionWindow & HAFDUP_COLLISION_WINDOW); + + WRITE_UINT32(p_DtsecMemMap->hafdup, tmpReg32); + /***************HAFDUP************************/ + + /***************MAXFRM************************/ + /* Initialize MAXFRM */ + WRITE_UINT32(p_DtsecMemMap->maxfrm, + p_DtsecDriverParam->maxFrameLength); + err = FmSetMacMaxFrame(p_Dtsec->fmMacControllerDriver.h_Fm, + e_FM_MAC_1G, + p_Dtsec->fmMacControllerDriver.macId, + p_DtsecDriverParam->maxFrameLength); + if (err) + RETURN_ERROR(MAJOR, err, NO_MSG); + /***************MAXFRM************************/ + + /***************CAM1************************/ + WRITE_UINT32(p_DtsecMemMap->cam1,0xffffffff); + WRITE_UINT32(p_DtsecMemMap->cam2,0xffffffff); + + /***************IMASK************************/ + WRITE_UINT32(p_DtsecMemMap->imask, p_Dtsec->exceptions); + /***************IMASK************************/ + + /***************IEVENT************************/ + WRITE_UINT32(p_DtsecMemMap->ievent, EVENTS_MASK); + + /***************MACSTNADDR1/2*****************/ + /* Initialize MAC Station Address registers (1 & 2) */ + /* Station address have to be swapped (big endian to little endian */ + addr = p_Dtsec->addr; + + tmpReg32 = (uint32_t)(addr); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_DtsecMemMap->macstnaddr1, tmpReg32); + + tmpReg32 = (uint32_t)(addr>>32); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_DtsecMemMap->macstnaddr2, tmpReg32); + /***************MACSTNADDR1/2*****************/ + + /***************DEBUG*****************/ + WRITE_UINT32(p_DtsecMemMap->tx_threshold, (uint32_t)(p_DtsecDriverParam->fifoTxThr & 0x7f)); + WRITE_UINT32(p_DtsecMemMap->tx_watermark_high, (uint32_t)(p_DtsecDriverParam->fifoTxWatermarkH & 0x7f)); + WRITE_UINT32(p_DtsecMemMap->rx_watermark_low, (uint32_t)(p_DtsecDriverParam->fifoRxWatermarkL & 0x7f)); + /***************DEBUG*****************/ + + /*****************HASH************************/ + for(i=0 ; iigaddr[i], 0); + /* Initialize GADDRx */ + WRITE_UINT32(p_DtsecMemMap->gaddr[i], 0); + } + + p_Dtsec->p_MulticastAddrHash = AllocHashTable(HASH_TABLE_SIZE); + if(!p_Dtsec->p_MulticastAddrHash) + { + FreeInitResources(p_Dtsec); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MC hash table is FAILED")); + } + + p_Dtsec->p_UnicastAddrHash = AllocHashTable(HASH_TABLE_SIZE); + if(!p_Dtsec->p_UnicastAddrHash) + { + FreeInitResources(p_Dtsec); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("UC hash table is FAILED")); + } + + /* register err intr handler for dtsec to FPM (err)*/ + FmRegisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC, p_Dtsec->macId, e_FM_INTR_TYPE_ERR, DtsecErrException , p_Dtsec); + /* register 1588 intr handler for TMR to FPM (normal)*/ + FmRegisterIntr(p_Dtsec->fmMacControllerDriver.h_Fm, e_FM_MOD_1G_MAC_TMR, p_Dtsec->macId, e_FM_INTR_TYPE_NORMAL, Dtsec1588Exception , p_Dtsec); + /* register normal intr handler for dtsec to main interrupt controller. */ + if (p_Dtsec->mdioIrq != NO_IRQ) + { + XX_SetIntr(p_Dtsec->mdioIrq, DtsecException, p_Dtsec); + XX_EnableIntr(p_Dtsec->mdioIrq); + } + + XX_Free(p_DtsecDriverParam); + p_Dtsec->p_DtsecDriverParam = NULL; + + err = DtsecSetStatistics(p_Dtsec, e_FM_MAC_FULL_STATISTICS); + if(err) + { + FreeInitResources(p_Dtsec); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} + +/* ........................................................................... */ + +static t_Error DtsecFree(t_Handle h_Dtsec) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + + FreeInitResources(p_Dtsec); + + if (p_Dtsec->p_DtsecDriverParam) + { + XX_Free(p_Dtsec->p_DtsecDriverParam); + p_Dtsec->p_DtsecDriverParam = NULL; + } + XX_Free (h_Dtsec); + + return E_OK; +} + +/* .............................................................................. */ + +static void InitFmMacControllerDriver(t_FmMacControllerDriver *p_FmMacControllerDriver) +{ + p_FmMacControllerDriver->f_FM_MAC_Init = DtsecInit; + p_FmMacControllerDriver->f_FM_MAC_Free = DtsecFree; + + p_FmMacControllerDriver->f_FM_MAC_SetStatistics = DtsecSetStatistics; + p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback = DtsecConfigLoopback; + p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength = DtsecConfigMaxFrameLength; + + p_FmMacControllerDriver->f_FM_MAC_ConfigWan = NULL; /* Not supported on dTSEC */ + + p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc = DtsecConfigPadAndCrc; + p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex = DtsecConfigHalfDuplex; + p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck = DtsecConfigLengthCheck; + p_FmMacControllerDriver->f_FM_MAC_ConfigException = DtsecConfigException; + + p_FmMacControllerDriver->f_FM_MAC_Enable = DtsecEnable; + p_FmMacControllerDriver->f_FM_MAC_Disable = DtsecDisable; + + p_FmMacControllerDriver->f_FM_MAC_SetException = DtsecSetException; + + p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous = DtsecSetPromiscuous; + p_FmMacControllerDriver->f_FM_MAC_AdjustLink = DtsecAdjustLink; + + p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp = DtsecEnable1588TimeStamp; + p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp = DtsecDisable1588TimeStamp; + + p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = DtsecTxMacPause; + p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames = DtsecRxIgnoreMacPause; + + p_FmMacControllerDriver->f_FM_MAC_ResetCounters = DtsecResetCounters; + p_FmMacControllerDriver->f_FM_MAC_GetStatistics = DtsecGetStatistics; + + p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr = DtsecModifyMacAddress; + p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr = DtsecAddHashMacAddress; + p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr = DtsecDelHashMacAddress; + p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr = DtsecAddExactMatchMacAddress; + p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr = DtsecDelExactMatchMacAddress; + p_FmMacControllerDriver->f_FM_MAC_GetId = DtsecGetId; + p_FmMacControllerDriver->f_FM_MAC_GetVersion = DtsecGetVersion; + p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength = DtsecGetMaxFrameLength; + + p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg = DTSEC_MII_WritePhyReg; + p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg = DTSEC_MII_ReadPhyReg; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + p_FmMacControllerDriver->f_FM_MAC_DumpRegs = DtsecDumpRegs; +#endif /* (defined(DEBUG_ERRORS) && ... */ +} + + +/*****************************************************************************/ +/* dTSEC Config Main Entry */ +/*****************************************************************************/ + +/* .............................................................................. */ + +t_Handle DTSEC_Config(t_FmMacParams *p_FmMacParam) +{ + t_Dtsec *p_Dtsec; + t_DtsecDriverParam *p_DtsecDriverParam; + uintptr_t baseAddr; + uint8_t i; + + SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_NULL_POINTER, NULL); + + baseAddr = p_FmMacParam->baseAddr; + /* allocate memory for the UCC GETH data structure. */ + p_Dtsec = (t_Dtsec *) XX_Malloc(sizeof(t_Dtsec)); + if (!p_Dtsec) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("dTSEC driver structure")); + return NULL; + } + /* Zero out * p_Dtsec */ + memset(p_Dtsec, 0, sizeof(t_Dtsec)); + InitFmMacControllerDriver(&p_Dtsec->fmMacControllerDriver); + + /* allocate memory for the dTSEC driver parameters data structure. */ + p_DtsecDriverParam = (t_DtsecDriverParam *) XX_Malloc(sizeof(t_DtsecDriverParam)); + if (!p_DtsecDriverParam) + { + XX_Free(p_Dtsec); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("dTSEC driver parameters")); + return NULL; + } + /* Zero out */ + memset(p_DtsecDriverParam, 0, sizeof(t_DtsecDriverParam)); + + /* Plant parameter structure pointer */ + p_Dtsec->p_DtsecDriverParam = p_DtsecDriverParam; + + SetDefaultParam(p_DtsecDriverParam); + + for (i=0; i < sizeof(p_FmMacParam->addr); i++) + p_Dtsec->addr |= ((uint64_t)p_FmMacParam->addr[i] << ((5-i) * 8)); + + p_Dtsec->p_MemMap = (t_DtsecMemMap *)UINT_TO_PTR(baseAddr); + p_Dtsec->p_MiiMemMap = (t_MiiAccessMemMap *)UINT_TO_PTR(baseAddr + DTSEC_TO_MII_OFFSET); + p_Dtsec->enetMode = p_FmMacParam->enetMode; + p_Dtsec->macId = p_FmMacParam->macId; + p_Dtsec->exceptions = DEFAULT_exceptions; + p_Dtsec->mdioIrq = p_FmMacParam->mdioIrq; + p_Dtsec->f_Exception = p_FmMacParam->f_Exception; + p_Dtsec->f_Event = p_FmMacParam->f_Event; + p_Dtsec->h_App = p_FmMacParam->h_App; + + return p_Dtsec; +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/dtsec.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/dtsec.h @@ -0,0 +1,634 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File dtsec.h + + @Description FM dTSEC ... +*//***************************************************************************/ +#ifndef __DTSEC_H +#define __DTSEC_H + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" +#include "dtsec_mii_acc.h" +#include "fm_mac.h" + + +#define PEMASK_TSRE 0x00010000 + +#define IMASK_BREN 0x80000000 +#define IMASK_RXCEN 0x40000000 +#define IMASK_MSROEN 0x04000000 +#define IMASK_GTSCEN 0x02000000 +#define IMASK_BTEN 0x01000000 +#define IMASK_TXCEN 0x00800000 +#define IMASK_TXEEN 0x00400000 +#define IMASK_LCEN 0x00040000 +#define IMASK_CRLEN 0x00020000 +#define IMASK_XFUNEN 0x00010000 +#define IMASK_ABRTEN 0x00008000 +#define IMASK_IFERREN 0x00004000 +#define IMASK_MAGEN 0x00000800 +#define IMASK_MMRDEN 0x00000400 +#define IMASK_MMWREN 0x00000200 +#define IMASK_GRSCEN 0x00000100 +#define IMASK_TDPEEN 0x00000002 +#define IMASK_RDPEEN 0x00000001 + +#define EVENTS_MASK ((uint32_t)(IMASK_BREN | \ + IMASK_RXCEN | \ + IMASK_MSROEN | \ + IMASK_GTSCEN | \ + IMASK_BTEN | \ + IMASK_TXCEN | \ + IMASK_TXEEN | \ + IMASK_ABRTEN | \ + IMASK_LCEN | \ + IMASK_CRLEN | \ + IMASK_XFUNEN | \ + IMASK_IFERREN | \ + IMASK_MAGEN | \ + IMASK_MMRDEN | \ + IMASK_MMWREN | \ + IMASK_GRSCEN | \ + IMASK_TDPEEN | \ + IMASK_RDPEEN)) + +#define GET_EXCEPTION_FLAG(bitMask, exception) switch(exception){ \ + case e_FM_MAC_EX_1G_BAB_RX: \ + bitMask = IMASK_BREN; break; \ + case e_FM_MAC_EX_1G_RX_CTL: \ + bitMask = IMASK_RXCEN; break; \ + case e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET: \ + bitMask = IMASK_GTSCEN ; break; \ + case e_FM_MAC_EX_1G_BAB_TX: \ + bitMask = IMASK_BTEN ; break; \ + case e_FM_MAC_EX_1G_TX_CTL: \ + bitMask = IMASK_TXCEN ; break; \ + case e_FM_MAC_EX_1G_TX_ERR: \ + bitMask = IMASK_TXEEN ; break; \ + case e_FM_MAC_EX_1G_LATE_COL: \ + bitMask = IMASK_LCEN ; break; \ + case e_FM_MAC_EX_1G_COL_RET_LMT: \ + bitMask = IMASK_CRLEN ; break; \ + case e_FM_MAC_EX_1G_TX_FIFO_UNDRN: \ + bitMask = IMASK_XFUNEN ; break; \ + case e_FM_MAC_EX_1G_MAG_PCKT: \ + bitMask = IMASK_MAGEN ; break; \ + case e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET: \ + bitMask = IMASK_MMRDEN; break; \ + case e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET: \ + bitMask = IMASK_MMWREN ; break; \ + case e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET: \ + bitMask = IMASK_GRSCEN; break; \ + case e_FM_MAC_EX_1G_TX_DATA_ERR: \ + bitMask = IMASK_TDPEEN; break; \ + case e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL: \ + bitMask = IMASK_MSROEN ; break; \ + default: bitMask = 0;break;} + + +#define MAX_PACKET_ALIGNMENT 31 +#define MAX_INTER_PACKET_GAP 0x7f +#define MAX_INTER_PALTERNATE_BEB 0x0f +#define MAX_RETRANSMISSION 0x0f +#define MAX_COLLISION_WINDOW 0x03ff + + +/********************* From mac ext ******************************************/ +typedef uint32_t t_ErrorDisable; + +#define ERROR_DISABLE_TRANSMIT 0x00400000 +#define ERROR_DISABLE_LATE_COLLISION 0x00040000 +#define ERROR_DISABLE_COLLISION_RETRY_LIMIT 0x00020000 +#define ERROR_DISABLE_TxFIFO_UNDERRUN 0x00010000 +#define ERROR_DISABLE_TxABORT 0x00008000 +#define ERROR_DISABLE_INTERFACE 0x00004000 +#define ERROR_DISABLE_TxDATA_PARITY 0x00000002 +#define ERROR_DISABLE_RxDATA_PARITY 0x00000001 + +/*****************************************************************************/ +#define DTSEC_NUM_OF_PADDRS 15 /* number of pattern match registers (entries) */ + +#define GROUP_ADDRESS 0x0000010000000000LL /* Group address bit indication */ + +#define HASH_TABLE_SIZE 256 /* Hash table size (= 32 bits * 8 regs) */ + +#define DTSEC_TO_MII_OFFSET 0x1120 /* number of pattern match registers (entries) */ + +#define DEFAULT_errorDisabled 0 +#define DEFAULT_promiscuousEnable FALSE +#define DEFAULT_pauseExtended 0x0 +#define DEFAULT_pauseTime 0xf000 +#define DEFAULT_halfDuplex FALSE +#define DEFAULT_halfDulexFlowControlEn FALSE +#define DEFAULT_txTimeStampEn FALSE +#define DEFAULT_rxTimeStampEn FALSE +#define DEFAULT_packetAlignment 0 +#define DEFAULT_controlFrameAccept FALSE +#define DEFAULT_groupHashExtend FALSE +#define DEFAULT_broadcReject FALSE +#define DEFAULT_rxShortFrame TRUE +#define DEFAULT_exactMatch FALSE +#define DEFAULT_debugMode FALSE +#define DEFAULT_loopback FALSE +#define DEFAULT_actOnRxPauseFrame TRUE +#define DEFAULT_actOnTxPauseFrame TRUE + +#define DEFAULT_PreAmLength 0x7 +#define DEFAULT_PreAmRxEn FALSE +#define DEFAULT_PreAmTxEn FALSE +#define DEFAULT_lengthCheckEnable FALSE +#define DEFAULT_padAndCrcEnable TRUE +#define DEFAULT_crcEnable FALSE + +#define DEFAULT_nonBackToBackIpg1 0x40 +#define DEFAULT_nonBackToBackIpg2 0x60 +#define DEFAULT_minIfgEnforcement 0x50 +#define DEFAULT_backToBackIpg 0x60 + +#define DEFAULT_altBackoffVal 0x0A +#define DEFAULT_altBackoffEnable FALSE +#define DEFAULT_backPressureNoBackoff FALSE +#define DEFAULT_noBackoff FALSE +#define DEFAULT_excessDefer TRUE +#define DEFAULT_maxRetransmission 0x0F +#define DEFAULT_collisionWindow 0x37 + +#define DEFAULT_maxFrameLength 0x600 + +#define DEFAULT_collisionWindow 0x37 + +#define DEFAULT_fifoTxThr 0x10 +#define DEFAULT_fifoTxWatermarkH 0x7e +#define DEFAULT_fifoRxWatermarkL 0x08 +#define DEFAULT_tbiPhyAddr 5 + +#define DEFAULT_exceptions ((uint32_t)(IMASK_BREN | \ + IMASK_RXCEN | \ + IMASK_BTEN | \ + IMASK_TXCEN | \ + IMASK_TXEEN | \ + IMASK_ABRTEN | \ + IMASK_LCEN | \ + IMASK_CRLEN | \ + IMASK_XFUNEN | \ + IMASK_IFERREN | \ + IMASK_MAGEN | \ + IMASK_TDPEEN | \ + IMASK_RDPEEN)) + + +#define MAX_PHYS 32 /* maximum number of phys */ + +#define DTSEC_ID1_ID 0xffff0000 +#define DTSEC_ID1_REV_MJ 0x0000FF00 +#define DTSEC_ID1_REV_MN 0x000000ff + +#define ID2_INT_REDUCED_OFF 0x00010000 +#define ID2_INT_NORMAL_OFF 0x00020000 + +#define ECNTRL_CLRCNT 0x00004000 +#define ECNTRL_AUTOZ 0x00002000 +#define ECNTRL_STEN 0x00001000 +#define ECNTRL_CFG_RO 0x80000000 +#define ECNTRL_GMIIM 0x00000040 +#define ECNTRL_TBIM 0x00000020 +#define ECNTRL_SGMIIM 0x00000002 +#define ECNTRL_RPM 0x00000010 +#define ECNTRL_R100M 0x00000008 +#define ECNTRL_RMM 0x00000004 +#define ECNTRL_QSGMIIM 0x00000001 + +#define TCTRL_THDF 0x00000800 +#define TCTRL_TTSE 0x00000040 +#define TCTRL_GTS 0x00000020 +#define TCTRL_TFC_PAUSE 0x00000010 + +/* PTV offsets */ +#define PTV_PTE_OFST 16 + +#define RCTRL_CFA 0x00008000 +#define RCTRL_GHTX 0x00000400 +#define RCTRL_RTSE 0x00000040 +#define RCTRL_GRS 0x00000020 +#define RCTRL_BC_REJ 0x00000010 +#define RCTRL_MPROM 0x00000008 +#define RCTRL_RSF 0x00000004 +#define RCTRL_EMEN 0x00000002 +#define RCTRL_UPROM 0x00000001 +#define RCTRL_PROM (RCTRL_UPROM | RCTRL_MPROM) + +#define TMR_CTL_ESFDP 0x00000800 +#define TMR_CTL_ESFDE 0x00000400 + +#define TSEC_ID1_DEBUG 0x00e00c00 +#define DEBUG_ENABLE 0x80000000 +#define DPERROR_Tx_ERROR_ON_SEC 0x00400000 +#define DPERROR_Tx_ERROR_ON_WRITE 0x10000000 +#define DPERROR_Rx_ERROR_ON_SEC 0x00000040 +#define DPERROR_Rx_ERROR_ON_WRITE 0x00001000 +#define DPERROR_STT 0x80000000 +#define DPERROR_STR 0x00008000 + +#define MACCFG1_SOFT_RESET 0x80000000 +#define MACCFG1_LOOPBACK 0x00000100 +#define MACCFG1_RX_FLOW 0x00000020 +#define MACCFG1_TX_FLOW 0x00000010 +#define MACCFG1_TX_EN 0x00000001 +#define MACCFG1_RX_EN 0x00000004 +#define MACCFG1_RESET_RxMC 0x00080000 +#define MACCFG1_RESET_TxMC 0x00040000 +#define MACCFG1_RESET_RxFUN 0x00020000 +#define MACCFG1_RESET_TxFUN 0x00010000 + +#define MACCFG2_NIBBLE_MODE 0x00000100 +#define MACCFG2_BYTE_MODE 0x00000200 +#define MACCFG2_PRE_AM_Rx_EN 0x00000080 +#define MACCFG2_PRE_AM_Tx_EN 0x00000040 +#define MACCFG2_LENGTH_CHECK 0x00000010 +#define MACCFG2_MAGIC_PACKET_EN 0x00000008 +#define MACCFG2_PAD_CRC_EN 0x00000004 +#define MACCFG2_CRC_EN 0x00000002 +#define MACCFG2_FULL_DUPLEX 0x00000001 + +#define PREAMBLE_LENGTH_SHIFT 12 + +#define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT 24 +#define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT 16 +#define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT 8 + +#define IPGIFG_NON_BACK_TO_BACK_IPG_1 0x7F000000 +#define IPGIFG_NON_BACK_TO_BACK_IPG_2 0x007F0000 +#define IPGIFG_MIN_IFG_ENFORCEMENT 0x0000FF00 +#define IPGIFG_BACK_TO_BACK_IPG 0x0000007F + +#define HAFDUP_ALT_BEB 0x00080000 +#define HAFDUP_BP_NO_BACKOFF 0x00040000 +#define HAFDUP_NO_BACKOFF 0x00020000 +#define HAFDUP_EXCESS_DEFER 0x00010000 +#define HAFDUP_COLLISION_WINDOW 0x000003ff + +#define HAFDUP_ALTERNATE_BEB_TRUNCATION_SHIFT 20 +#define HAFDUP_RETRANSMISSION_MAX_SHIFT 12 +#define HAFDUP_RETRANSMISSION_MAX 0x0000f000 + +#define NUM_OF_HASH_REGS 8 /* Number of hash table registers */ + +#define DEBUG_GET_FIFO_READ_INDEX 0x007f0000 +#define DEBUG_GET_FIFO_WRITE_INDEX 0x0000007f +/* Pause Time Value Register */ +#define PTV_PTE_SHIFT 16 + +#define MASK22BIT 0x003FFFFF +#define MASK16BIT 0x0000FFFF +#define MASK12BIT 0x00000FFF +#define MASK8BIT 0x000000FF + +#define VAL32BIT 0x100000000LL +#define VAL22BIT 0x00400000 +#define VAL16BIT 0x00010000 +#define VAL12BIT 0x00001000 + +/* PHY Control Register */ +#define PHY_CR_LOOPBACK 0x4000 +#define PHY_CR_SPEED0 0x2000 +#define PHY_CR_ANE 0x1000 +#define PHY_CR_FULLDUPLEX 0x0100 +#define PHY_CR_SPEED1 0x0040 + +#define PHY_TBICON_SRESET 0x8000 +#define PHY_TBICON_SPEED2 0x0020 + +/* CAR1/2 bits */ +#define CAR1_TR64 0x80000000 +#define CAR1_TR127 0x40000000 +#define CAR1_TR255 0x20000000 +#define CAR1_TR511 0x10000000 +#define CAR1_TRK1 0x08000000 +#define CAR1_TRMAX 0x04000000 +#define CAR1_TRMGV 0x02000000 + +#define CAR1_RBYT 0x00010000 +#define CAR1_RPKT 0x00008000 +#define CAR1_RMCA 0x00002000 +#define CAR1_RBCA 0x00001000 +#define CAR1_RXPF 0x00000400 +#define CAR1_RALN 0x00000100 +#define CAR1_RFLR 0x00000080 +#define CAR1_RCDE 0x00000040 +#define CAR1_RCSE 0x00000020 +#define CAR1_RUND 0x00000010 +#define CAR1_ROVR 0x00000008 +#define CAR1_RFRG 0x00000004 +#define CAR1_RJBR 0x00000002 +#define CAR1_RDRP 0x00000001 + +#define CAR2_TFCS 0x00040000 +#define CAR2_TBYT 0x00002000 +#define CAR2_TPKT 0x00001000 +#define CAR2_TMCA 0x00000800 +#define CAR2_TBCA 0x00000400 +#define CAR2_TXPF 0x00000200 +#define CAR2_TDRP 0x00000001 + +#define CAM1_ERRORS_ONLY (CAR1_RXPF | \ + CAR1_RALN | \ + CAR1_RFLR | \ + CAR1_RCDE | \ + CAR1_RCSE | \ + CAR1_RUND | \ + CAR1_ROVR | \ + CAR1_RFRG | \ + CAR1_RJBR | \ + CAR1_RDRP) + +#define CAM2_ERRORS_ONLY (CAR2_TFCS | CAR2_TXPF | CAR2_TDRP) + +typedef struct t_InternalStatistics +{ + uint64_t tr64; + uint64_t tr127; + uint64_t tr255; + uint64_t tr511; + uint64_t tr1k; + uint64_t trmax; + uint64_t trmgv; + uint64_t rfrg; + uint64_t rjbr; + uint64_t rdrp; + uint64_t raln; + uint64_t rund; + uint64_t rovr; + uint64_t rxpf; + uint64_t txpf; + uint64_t rbyt; + uint64_t rpkt; + uint64_t rmca; + uint64_t rbca; + uint64_t rflr; + uint64_t rcde; + uint64_t rcse; + uint64_t tbyt; + uint64_t tpkt; + uint64_t tmca; + uint64_t tbca; + uint64_t tdrp; + uint64_t tfcs; +} t_InternalStatistics; + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +typedef _Packed struct +{ + uint32_t exact_match1; /* octets 1-4 */ + uint32_t exact_match2; /* octets 5-6 */ +} _PackedType macRegs; + +typedef _Packed struct +{ + volatile uint32_t tsec_id1; /* 0x000 ETSEC_ID register */ + volatile uint32_t tsec_id2; /* 0x004 ETSEC_ID2 register */ + volatile uint32_t ievent; /* 0x008 Interrupt event register */ + volatile uint32_t imask; /* 0x00C Interrupt mask register */ + volatile uint32_t edis; /* 0x010 Error disabled register */ + volatile uint32_t ecntrl; /* 0x014 E control register */ + volatile uint32_t ptv; /* 0x018 Pause time value register */ + volatile uint32_t tbipa; /* 0x01C TBI PHY address register */ + volatile uint32_t tmr_ctrl; /* 0x020 Time-stamp Control register */ + volatile uint32_t tmr_pevent; /* 0x024 Time-stamp event register */ + volatile uint32_t tmr_pemask; /* 0x028 Timer event mask register */ + volatile uint32_t DTSEC_RESERVED2; /* 0x02C */ + volatile uint32_t iobistctl; /* 0x030 IO BIST Control register */ + volatile uint32_t DTSEC_RESERVED3[3]; /* 0x034 */ + + volatile uint32_t tctrl; /* 0x040 Transmit control register */ + volatile uint32_t DTSEC_RESERVED4[3]; /* 0x044-0x04C */ + volatile uint32_t rctrl; /* 0x050 Receive control register */ + volatile uint32_t DTSEC_RESERVED5[11]; /* 0x054- 0x07C */ + + volatile uint32_t igaddr[8]; /* 0x080-0x09C Individual/group address registers 0-7 */ + volatile uint32_t gaddr[8]; /* 0x0A0-0x0BC Group address registers 0-7 */ + volatile uint32_t ETSEC_RESERVED6[16]; /* 0x0C0-0x0FC */ + + volatile uint32_t maccfg1; /* 0x100 MAC configuration #1 */ + volatile uint32_t maccfg2; /* 0x104 MAC configuration #2 */ + volatile uint32_t ipgifg; /* 0x108 IPG/IFG */ + volatile uint32_t hafdup; /* 0x10C Half-duplex */ + volatile uint32_t maxfrm; /* 0x110 Maximum frame */ + volatile uint32_t DTSEC_RESERVED7[3]; /* 0x114-0x11C register */ + t_MiiAccessMemMap miiMemMap; + volatile uint32_t ifctrl; /* 0x138 MII Mgmt:interface control */ + volatile uint32_t ifstat; /* 0x13C Interface status */ + volatile uint32_t macstnaddr1; /* 0x140 Station Address,part 1 */ + volatile uint32_t macstnaddr2; /* 0x144 Station Address,part 2 */ + volatile macRegs macaddr[DTSEC_NUM_OF_PADDRS]; /* 0x148-0x1BC mac exact match addresses 1-15, parts 1-2 */ + volatile uint32_t DTSEC_RESERVED8[16]; /* 0x1C0-0x1FC register */ + + /* RMON MIB REGISTERS */ + /* TRANSMIT and RECEIVE COUNTERS */ + + volatile uint32_t tr64; /* 0x200 transmit and receive 64 byte frame counter */ + volatile uint32_t tr127; /* 0x204 transmit and receive 65 to 127 byte frame counter */ + volatile uint32_t tr255; /* 0x208 transmit and receive 128 to 255 byte frame counter */ + volatile uint32_t tr511; /* 0x20C transmit and receive 256 to 511 byte frame counter */ + volatile uint32_t tr1k; /* 0x210 transmit and receive 512 to 1023 byte frame counter */ + volatile uint32_t trmax; /* 0x214 transmit and receive 1024 to 1518 byte frame counter */ + volatile uint32_t trmgv; /* 0x218 transmit and receive 1519 to 1522 byte good VLAN frame count */ + + /* RECEIVE COUNTERS */ + volatile uint32_t rbyt; /* 0x21C receive byte counter */ + volatile uint32_t rpkt; /* 0x220 receive packet counter */ + volatile uint32_t rfcs; /* 0x224 receive FCS error counter */ + volatile uint32_t rmca; /* 0x228 RMCA receive multicast packet counter */ + volatile uint32_t rbca; /* 0x22C receive broadcast packet counter */ + volatile uint32_t rxcf; /* 0x230 receive control frame packet counter */ + volatile uint32_t rxpf; /* 0x234 receive PAUSE frame packet counter */ + volatile uint32_t rxuo; /* 0x238 receive unknown OP code counter */ + volatile uint32_t raln; /* 0x23C receive alignment error counter */ + volatile uint32_t rflr; /* 0x240 receive frame length error counter */ + volatile uint32_t rcde; /* 0x244 receive code error counter */ + volatile uint32_t rcse; /* 0x248 receive carrier sense error counter */ + volatile uint32_t rund; /* 0x24C receive undersize packet counter */ + volatile uint32_t rovr; /* 0x250 receive oversize packet counter */ + volatile uint32_t rfrg; /* 0x254 receive fragments counter */ + volatile uint32_t rjbr; /* 0x258 receive jabber counter */ + volatile uint32_t rdrp; /* 0x25C receive drop */ + + /* TRANSMIT COUNTERS */ + volatile uint32_t tbyt; /* 0x260 transmit byte counter */ + volatile uint32_t tpkt; /* 0x264 transmit packet counter */ + volatile uint32_t tmca; /* 0x268 transmit multicast packet counter */ + volatile uint32_t tbca; /* 0x26C transmit broadcast packet counter */ + volatile uint32_t txpf; /* 0x270 transmit PAUSE control frame counter */ + volatile uint32_t tdfr; /* 0x274 transmit deferral packet counter */ + volatile uint32_t tedf; /* 0x278 transmit excessive deferral packet counter */ + volatile uint32_t tscl; /* 0x27C transmit single collision packet counter */ + volatile uint32_t tmcl; /* 0x280 transmit multiple collision packet counter */ + volatile uint32_t tlcl; /* 0x284 transmit late collision packet counter */ + volatile uint32_t txcl; /* 0x288 transmit excessive collision packet counter */ + volatile uint32_t tncl; /* 0x28C transmit total collision counter */ + volatile uint32_t DTSEC_RESERVED9; /* 0x290 */ + volatile uint32_t tdrp; /* 0x294 transmit drop frame counter */ + volatile uint32_t tjbr; /* 0x298 transmit jabber frame counter */ + volatile uint32_t tfcs; /* 0x29C transmit FCS error counter */ + volatile uint32_t txcf; /* 0x2A0 transmit control frame counter */ + volatile uint32_t tovr; /* 0x2A4 transmit oversize frame counter */ + volatile uint32_t tund; /* 0x2A8 transmit undersize frame counter */ + volatile uint32_t tfrg; /* 0x2AC transmit fragments frame counter */ + + /* GENERAL REGISTERS */ + volatile uint32_t car1; /* 0x2B0 carry register one register* */ + volatile uint32_t car2; /* 0x2B4 carry register two register* */ + volatile uint32_t cam1; /* 0x2B8 carry register one mask register */ + volatile uint32_t cam2; /* 0x2BC carry register two mask register */ + volatile uint32_t DTSEC_RESERVED10[16]; /* 0x2C0-0x2FC */ + + /* Debug and Factory Test Registers */ + volatile uint32_t debug; /* 0x300 DEBUG - Debug Register */ + volatile uint32_t dperror; /* 0x304 DPERROR - Parity Error Register */ + volatile uint32_t hwassert; /* 0x308 HWASSERT */ + volatile uint32_t RESERVED11; /* 0x30C Reserved */ + volatile uint32_t rx_fifo_ptr; /* 0x310 RXFIFOPTR - Rx FIFO R/W Pointer Register */ + volatile uint32_t rx_fifo_dath; /* 0x314 RXFIFODATH - Rx FIFO Data Register */ + volatile uint32_t rx_fifo_datl; /* 0x318 RXFIFODATL - Rx FIFO Data Register */ + volatile uint32_t rx_fifo_stat; /* 0x31C RXFIFOSTAT - Rx FIFO Status Register */ + volatile uint32_t tx_fifo_ptr; /* 0x320 TXFIFOPTR - Tx FIFO R/W Pointer Register */ + volatile uint32_t tx_fifo_dath; /* 0x324 TXFIFODATH - Rx FIFO Data Register */ + volatile uint32_t tx_fifo_datl; /* 0x328 TXFIFODATL - Rx FIFO Data Register */ + volatile uint32_t tx_fifo_stat; /* 0x32C TXFIFOSTAT - Tx FIFO Status Register */ + volatile uint32_t pkt_rcv_cnt; /* 0x330 PKTRCVCNT - Number of packets accepted and written to Rx FIFO */ + volatile uint32_t RESERVED12[3]; /* 0x334-0x33C Reserved */ + volatile uint32_t tx_threshold; /* 0x340 Transmit threshold; Number of entries (4 bytes units) before starting to transmit to the MAC */ + volatile uint32_t tx_watermark_high;/* 0x344 Transmit watermark high; Number of entries (4 byte units) before de-asserting Ready to packet Interface */ + volatile uint32_t rx_watermark_low; /* 0x348 Receive watermark low; Number of entries (4 byte units) before unloading to packet Interface */ +} _PackedType t_DtsecMemMap; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +typedef struct { + uint32_t errorDisabled; + bool halfDuplex; + uint16_t pauseTime; + uint16_t pauseExtended; + uint8_t tbiPhyAddr; /**< TBI Physical address (1-31) [DEFAULT_tbiPhyAddr] */ + + bool autoZeroCounters; + bool promiscuousEnable; + + bool halfDulexFlowControlEn; + bool txTimeStampEn; + bool rxTimeStampEn; + + uint8_t packetAlignmentPadding; + bool controlFrameAccept; + bool groupHashExtend; + bool broadcReject; + bool rxShortFrame; + bool exactMatch; + + bool debugMode; + + bool loopback; + bool actOnRxPauseFrame; + bool actOnTxPauseFrame; + + uint8_t nonBackToBackIpg1; + uint8_t nonBackToBackIpg2; + uint8_t minIfgEnforcement; + uint8_t backToBackIpg; + + uint8_t preambleLength; + bool preambleRxEn; + bool preambleTxEn; + bool lengthCheckEnable; + bool magicPacketEnable; + bool padAndCrcEnable; + bool crcEnable; + + bool alternateBackoffEnable; + uint8_t alternateBackoffVal; + bool backPressureNoBackoff; + bool noBackoff; + bool excessDefer; + uint8_t maxRetransmission; + uint16_t collisionWindow; + + uint16_t maxFrameLength; + + uint8_t fifoTxThr; + uint8_t fifoTxWatermarkH; + uint8_t fifoRxWatermarkL; +} t_DtsecDriverParam; + +typedef struct { + t_FmMacControllerDriver fmMacControllerDriver; + t_Handle h_App; /**< Handle to the upper layer application */ + t_DtsecMemMap *p_MemMap; /**< pointer to dTSEC memory mapped registers. */ + t_MiiAccessMemMap *p_MiiMemMap; /**< pointer to dTSEC MII memory mapped registers. */ + uint64_t addr; /**< MAC address of device; */ + e_EnetMode enetMode; /**< Ethernet physical interface */ + t_FmMacExceptionCallback *f_Exception; + int mdioIrq; + t_FmMacExceptionCallback *f_Event; + bool indAddrRegUsed[DTSEC_NUM_OF_PADDRS]; /**< Whether a particular individual address recognition register is being used */ + uint64_t paddr[DTSEC_NUM_OF_PADDRS]; /**< MAC address for particular individual address recognition register */ + uint8_t numOfIndAddrInRegs; /**< Number of individual addresses in registers for this station. */ + bool debugMode; + bool halfDuplex; + t_InternalStatistics internalStatistics; + t_EthHash *p_MulticastAddrHash; /* pointer to driver's global address hash table */ + t_EthHash *p_UnicastAddrHash; /* pointer to driver's individual address hash table */ + uint8_t macId; + uint32_t exceptions; + bool ptpTsuEnabled; + bool enTsuErrExeption; + e_FmMacStatisticsLevel statisticsLevel; + + t_DtsecDriverParam *p_DtsecDriverParam; +} t_Dtsec; + + +t_Error DTSEC_MII_WritePhyReg(t_Handle h_Dtsec, uint8_t phyAddr, uint8_t reg, uint16_t data); +t_Error DTSEC_MII_ReadPhyReg(t_Handle h_Dtsec, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data); + + +#endif /* __DTSEC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/dtsec_mii_acc.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/dtsec_mii_acc.c @@ -0,0 +1,120 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File dtsec_mii_acc.c + + @Description FM dtsec MII register access MAC ... +*//***************************************************************************/ + +#include "error_ext.h" +#include "std_ext.h" +#include "fm_mac.h" +#include "dtsec.h" + + +/*****************************************************************************/ +t_Error DTSEC_MII_WritePhyReg(t_Handle h_Dtsec, + uint8_t phyAddr, + uint8_t reg, + uint16_t data) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_MiiAccessMemMap *p_MiiAccess; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MiiMemMap, E_INVALID_HANDLE); + + p_MiiAccess = p_Dtsec->p_MiiMemMap; + + /* Stop the MII management read cycle */ + WRITE_UINT32(p_MiiAccess->miimcom, 0); + /* Dummy read to make sure MIIMCOM is written */ + tmpReg = GET_UINT32(p_MiiAccess->miimcom); + + /* Setting up MII Management Address Register */ + tmpReg = (uint32_t)((phyAddr << MIIMADD_PHY_ADDR_SHIFT) | reg); + WRITE_UINT32(p_MiiAccess->miimadd, tmpReg); + + /* Setting up MII Management Control Register with data */ + WRITE_UINT32(p_MiiAccess->miimcon, (uint32_t)data); + /* Dummy read to make sure MIIMCON is written */ + tmpReg = GET_UINT32(p_MiiAccess->miimcon); + + /* Wait till MII management write is complete */ + while ((GET_UINT32(p_MiiAccess->miimind)) & MIIMIND_BUSY) ; + + return E_OK; +} + +/*****************************************************************************/ +t_Error DTSEC_MII_ReadPhyReg(t_Handle h_Dtsec, + uint8_t phyAddr, + uint8_t reg, + uint16_t *p_Data) +{ + t_Dtsec *p_Dtsec = (t_Dtsec *)h_Dtsec; + t_MiiAccessMemMap *p_MiiAccess; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_Dtsec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Dtsec->p_MiiMemMap, E_INVALID_HANDLE); + + p_MiiAccess = p_Dtsec->p_MiiMemMap; + + /* Setting up the MII Management Address Register */ + tmpReg = (uint32_t)((phyAddr << MIIMADD_PHY_ADDR_SHIFT) | reg); + WRITE_UINT32(p_MiiAccess->miimadd, tmpReg); + + /* Perform an MII management read cycle */ + WRITE_UINT32(p_MiiAccess->miimcom, MIIMCOM_READ_CYCLE); + /* Dummy read to make sure MIIMCOM is written */ + tmpReg = GET_UINT32(p_MiiAccess->miimcom); + + /* Wait till MII management read is complete */ + while ((GET_UINT32(p_MiiAccess->miimind)) & MIIMIND_BUSY) ; + + /* Read MII management status */ + *p_Data = (uint16_t)GET_UINT32(p_MiiAccess->miimstat); + + WRITE_UINT32(p_MiiAccess->miimcom, 0); + /* Dummy read to make sure MIIMCOM is written */ + tmpReg = GET_UINT32(p_MiiAccess->miimcom); + + if (*p_Data == 0xffff) + RETURN_ERROR(MINOR, E_NO_DEVICE, + ("Read wrong data (0xffff): phyAddr 0x%x, reg 0x%x", + phyAddr, reg)); + + return E_OK; +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/dtsec_mii_acc.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/dtsec_mii_acc.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DTSEC_MII_ACC_H +#define __DTSEC_MII_ACC_H + +#include "std_ext.h" + + +/* MII Management Configuration Register */ +#define MIIMCFG_RESET_MGMT 0x80000000 +#define MIIMCFG_MGMT_CLOCK_SELECT 0x00000007 + +/* MII Management Command Register */ +#define MIIMCOM_READ_CYCLE 0x00000001 +#define MIIMCOM_SCAN_CYCLE 0x00000002 + +/* MII Management Address Register */ +#define MIIMADD_PHY_ADDR_SHIFT 8 + +/* MII Management Indicator Register */ +#define MIIMIND_BUSY 0x00000001 + + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/*----------------------------------------------------*/ +/* MII Configuration Control Memory Map Registers */ +/*----------------------------------------------------*/ +typedef _Packed struct t_MiiAccessMemMap +{ + volatile uint32_t miimcfg; /* MII Mgmt:configuration */ + volatile uint32_t miimcom; /* MII Mgmt:command */ + volatile uint32_t miimadd; /* MII Mgmt:address */ + volatile uint32_t miimcon; /* MII Mgmt:control 3 */ + volatile uint32_t miimstat; /* MII Mgmt:status */ + volatile uint32_t miimind; /* MII Mgmt:indicators */ +} _PackedType t_MiiAccessMemMap ; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +#endif /* __DTSEC_MII_ACC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/fm_mac.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/fm_mac.c @@ -0,0 +1,560 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_mac.c + + @Description FM MAC ... +*//***************************************************************************/ +#include "std_ext.h" +#include "string_ext.h" +#include "sprint_ext.h" +#include "error_ext.h" +#include "fm_ext.h" + +#include "fm_common.h" +#include "fm_mac.h" + + +/* ........................................................................... */ + +t_Handle FM_MAC_Config (t_FmMacParams *p_FmMacParam) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver; + + SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_INVALID_HANDLE, NULL); + + if(ENET_SPEED_FROM_MODE(p_FmMacParam->enetMode) < e_ENET_SPEED_10000) + p_FmMacControllerDriver = (t_FmMacControllerDriver *)DTSEC_Config(p_FmMacParam); + else + p_FmMacControllerDriver = (t_FmMacControllerDriver *)TGEC_Config(p_FmMacParam); + + if (!p_FmMacControllerDriver) + return NULL; + + p_FmMacControllerDriver->h_Fm = p_FmMacParam->h_Fm; + p_FmMacControllerDriver->enetMode = p_FmMacParam->enetMode; + p_FmMacControllerDriver->macId = p_FmMacParam->macId; + p_FmMacControllerDriver->resetOnInit = DEFAULT_resetOnInit; + + return (t_Handle)p_FmMacControllerDriver; +} + +/* ........................................................................... */ + +t_Error FM_MAC_Init (t_Handle h_FmMac) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->resetOnInit && + (FmResetMac(p_FmMacControllerDriver->h_Fm, + ((ENET_INTERFACE_FROM_MODE(p_FmMacControllerDriver->enetMode) == e_ENET_IF_XGMII) ? e_FM_MAC_10G : e_FM_MAC_1G), + p_FmMacControllerDriver->macId) != E_OK)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't reset MAC!")); + + if ((p_FmMacControllerDriver->clkFreq = FmGetClockFreq(p_FmMacControllerDriver->h_Fm)) == 0) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't get clock for MAC!")); + + if (p_FmMacControllerDriver->f_FM_MAC_Init) + return p_FmMacControllerDriver->f_FM_MAC_Init(h_FmMac); + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_Free (t_Handle h_FmMac) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_Free) + return p_FmMacControllerDriver->f_FM_MAC_Free(h_FmMac); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ConfigResetOnInit (t_Handle h_FmMac, bool enable) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + p_FmMacControllerDriver->resetOnInit = enable; + + return E_OK; +} + +/* ........................................................................... */ + +t_Error FM_MAC_ConfigLoopback (t_Handle h_FmMac, bool newVal) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback) + return p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback(h_FmMac, newVal); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ConfigMaxFrameLength (t_Handle h_FmMac, uint16_t newVal) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength) + return p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength(h_FmMac, newVal); + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ConfigWan (t_Handle h_FmMac, bool flag) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ConfigWan) + return p_FmMacControllerDriver->f_FM_MAC_ConfigWan(h_FmMac, flag); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ConfigPadAndCrc (t_Handle h_FmMac, bool newVal) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc) + return p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc(h_FmMac, newVal); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ConfigHalfDuplex (t_Handle h_FmMac, bool newVal) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex) + return p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex(h_FmMac,newVal); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ConfigLengthCheck (t_Handle h_FmMac, bool newVal) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck) + return p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck(h_FmMac,newVal); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ConfigException (t_Handle h_FmMac, e_FmMacExceptions ex, bool enable) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ConfigException) + return p_FmMacControllerDriver->f_FM_MAC_ConfigException(h_FmMac, ex, enable); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +/* ........................................................................... */ + +t_Error FM_MAC_ConfigSkipFman11Workaround (t_Handle h_FmMac) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround) + return p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround(h_FmMac); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + + +/*****************************************************************************/ +/* Run Time Control */ +/*****************************************************************************/ + +/* ........................................................................... */ + +t_Error FM_MAC_Enable (t_Handle h_FmMac, e_CommMode mode) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_Enable) + return p_FmMacControllerDriver->f_FM_MAC_Enable(h_FmMac, mode); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_Disable (t_Handle h_FmMac, e_CommMode mode) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_Disable) + return p_FmMacControllerDriver->f_FM_MAC_Disable(h_FmMac, mode); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_Enable1588TimeStamp (t_Handle h_FmMac) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp) + return p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp(h_FmMac); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_Disable1588TimeStamp (t_Handle h_FmMac) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp) + return p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp(h_FmMac); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_SetTxAutoPauseFrames (t_Handle h_FmMac, uint16_t pauseTime) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames) + return p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames(h_FmMac, pauseTime); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_SetRxIgnorePauseFrames (t_Handle h_FmMac, bool en) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames) + return p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames(h_FmMac, en); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ResetCounters (t_Handle h_FmMac) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ResetCounters) + return p_FmMacControllerDriver->f_FM_MAC_ResetCounters(h_FmMac); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_SetException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_SetException) + return p_FmMacControllerDriver->f_FM_MAC_SetException(h_FmMac, ex, enable); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_SetStatistics (t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_SetStatistics) + return p_FmMacControllerDriver->f_FM_MAC_SetStatistics(h_FmMac, statisticsLevel); + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_GetStatistics (t_Handle h_FmMac, t_FmMacStatistics *p_Statistics) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_GetStatistics) + return p_FmMacControllerDriver->f_FM_MAC_GetStatistics(h_FmMac, p_Statistics); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_ModifyMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr) + return p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr(h_FmMac, p_EnetAddr); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_AddHashMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr) + return p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr(h_FmMac, p_EnetAddr); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_RemoveHashMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr) + return p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr(h_FmMac, p_EnetAddr); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_AddExactMatchMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr) + return p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr(h_FmMac, p_EnetAddr); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_RemovelExactMatchMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr) + return p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr(h_FmMac, p_EnetAddr); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_GetVesrion (t_Handle h_FmMac, uint32_t *macVresion) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_GetVersion) + return p_FmMacControllerDriver->f_FM_MAC_GetVersion(h_FmMac, macVresion); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); + +} + +/* ........................................................................... */ + +t_Error FM_MAC_GetId (t_Handle h_FmMac, uint32_t *macId) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_GetId) + return p_FmMacControllerDriver->f_FM_MAC_GetId(h_FmMac, macId); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_SetPromiscuous (t_Handle h_FmMac, bool newVal) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous) + return p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous(h_FmMac, newVal); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_AdjustLink(t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_AdjustLink) + return p_FmMacControllerDriver->f_FM_MAC_AdjustLink(h_FmMac, speed, fullDuplex); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_MII_WritePhyReg (t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg) + return p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg(h_FmMac, phyAddr, reg, data); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +t_Error FM_MAC_MII_ReadPhyReg(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg) + return p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg(h_FmMac, phyAddr, reg, p_Data); + + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} + +/* ........................................................................... */ + +uint16_t FM_MAC_GetMaxFrameLength(t_Handle h_FmMac) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_VALUE(p_FmMacControllerDriver, E_INVALID_HANDLE, 0); + + if (p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength) + return p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength(h_FmMac); + + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); + return 0; +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +/*****************************************************************************/ +t_Error FM_MAC_DumpRegs(t_Handle h_FmMac) +{ + t_FmMacControllerDriver *p_FmMacControllerDriver = (t_FmMacControllerDriver *)h_FmMac; + + SANITY_CHECK_RETURN_ERROR(p_FmMacControllerDriver, E_INVALID_HANDLE); + + if (p_FmMacControllerDriver->f_FM_MAC_DumpRegs) + return p_FmMacControllerDriver->f_FM_MAC_DumpRegs(h_FmMac); + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, NO_MSG); +} +#endif /* (defined(DEBUG_ERRORS) && ... */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/fm_mac.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/fm_mac.h @@ -0,0 +1,197 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_mac.h + + @Description FM MAC ... +*//***************************************************************************/ +#ifndef __FM_MAC_H +#define __FM_MAC_H + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" +#include "fm_mac_ext.h" + + +#define __ERR_MODULE__ MODULE_FM_MAC + + +#define DEFAULT_resetOnInit FALSE + + +typedef struct { + uint64_t addr; /* Ethernet Address */ + t_List node; +} t_EthHashEntry; +#define ETH_HASH_ENTRY_OBJ(ptr) LIST_OBJECT(ptr, t_EthHashEntry, node) + +typedef struct { + uint16_t size; + t_List *p_Lsts; +} t_EthHash; + +typedef struct { + t_Error (*f_FM_MAC_Init) (t_Handle h_FmMac); + t_Error (*f_FM_MAC_Free) (t_Handle h_FmMac); + + t_Error (*f_FM_MAC_SetStatistics) (t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel); + t_Error (*f_FM_MAC_ConfigLoopback) (t_Handle h_FmMac, bool newVal); + t_Error (*f_FM_MAC_ConfigMaxFrameLength) (t_Handle h_FmMac, uint16_t newVal); + t_Error (*f_FM_MAC_ConfigWan) (t_Handle h_FmMac, bool flag); + t_Error (*f_FM_MAC_ConfigPadAndCrc) (t_Handle h_FmMac, bool newVal); + t_Error (*f_FM_MAC_ConfigHalfDuplex) (t_Handle h_FmMac, bool newVal); + t_Error (*f_FM_MAC_ConfigLengthCheck) (t_Handle h_FmMac, bool newVal); + t_Error (*f_FM_MAC_ConfigException) (t_Handle h_FmMac, e_FmMacExceptions, bool enable); +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 + t_Error (*f_FM_MAC_ConfigSkipFman11Workaround) (t_Handle h_FmMac); +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + + t_Error (*f_FM_MAC_SetException) (t_Handle h_FmMac, e_FmMacExceptions ex, bool enable); + + t_Error (*f_FM_MAC_Enable) (t_Handle h_FmMac, e_CommMode mode); + t_Error (*f_FM_MAC_Disable) (t_Handle h_FmMac, e_CommMode mode); + t_Error (*f_FM_MAC_Enable1588TimeStamp) (t_Handle h_FmMac); + t_Error (*f_FM_MAC_Disable1588TimeStamp) (t_Handle h_FmMac); + t_Error (*f_FM_MAC_Reset) (t_Handle h_FmMac, bool wait); + + t_Error (*f_FM_MAC_SetTxAutoPauseFrames) (t_Handle h_FmMac, uint16_t pauseTime); + t_Error (*f_FM_MAC_SetRxIgnorePauseFrames) (t_Handle h_FmMac, bool en); + + t_Error (*f_FM_MAC_ResetCounters) (t_Handle h_FmMac); + t_Error (*f_FM_MAC_GetStatistics) (t_Handle h_FmMac, t_FmMacStatistics *p_Statistics); + + t_Error (*f_FM_MAC_ModifyMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + t_Error (*f_FM_MAC_AddHashMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + t_Error (*f_FM_MAC_RemoveHashMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + t_Error (*f_FM_MAC_AddExactMatchMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + t_Error (*f_FM_MAC_RemovelExactMatchMacAddr) (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + + t_Error (*f_FM_MAC_SetPromiscuous) (t_Handle h_FmMac, bool newVal); + t_Error (*f_FM_MAC_AdjustLink) (t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex); + + t_Error (*f_FM_MAC_GetId) (t_Handle h_FmMac, uint32_t *macId); + + t_Error (*f_FM_MAC_GetVersion) (t_Handle h_FmMac, uint32_t *macVersion); + + uint16_t (*f_FM_MAC_GetMaxFrameLength) (t_Handle h_FmMac); + + t_Error (*f_FM_MAC_MII_WritePhyReg)(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data); + t_Error (*f_FM_MAC_MII_ReadPhyReg)(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data); + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + t_Error (*f_FM_MAC_DumpRegs) (t_Handle h_FmMac); +#endif /* (defined(DEBUG_ERRORS) && ... */ + + t_Handle h_Fm; + e_EnetMode enetMode; + uint8_t macId; + bool resetOnInit; + uint16_t clkFreq; +} t_FmMacControllerDriver; + + +t_Handle DTSEC_Config(t_FmMacParams *p_FmMacParam); +t_Handle TGEC_Config(t_FmMacParams *p_FmMacParams); +uint16_t FM_MAC_GetMaxFrameLength(t_Handle FmMac); + + +/* ........................................................................... */ + +static __inline__ t_EthHashEntry *DequeueAddrFromHashEntry(t_List *p_AddrLst) +{ + t_EthHashEntry *p_HashEntry = NULL; + if (!LIST_IsEmpty(p_AddrLst)) + { + p_HashEntry = ETH_HASH_ENTRY_OBJ(p_AddrLst->p_Next); + LIST_DelAndInit(&p_HashEntry->node); + } + return p_HashEntry; +} + +/* ........................................................................... */ + +static __inline__ void FreeHashTable(t_EthHash *p_Hash) +{ + t_EthHashEntry *p_HashEntry; + int i = 0; + + if (!p_Hash || !p_Hash->p_Lsts) + return; + + for(i=0; isize; i++) + { + p_HashEntry = DequeueAddrFromHashEntry(&p_Hash->p_Lsts[i]); + while (p_HashEntry) + { + XX_Free(p_HashEntry); + p_HashEntry = DequeueAddrFromHashEntry(&p_Hash->p_Lsts[i]); + } + } + + XX_Free(p_Hash->p_Lsts); + XX_Free(p_Hash); +} + +/* ........................................................................... */ + +static __inline__ t_EthHash * AllocHashTable(uint16_t size) +{ + uint32_t i; + t_EthHash *p_Hash; + + /* Allocate address hash table */ + p_Hash = (t_EthHash *)XX_Malloc(size*sizeof(t_EthHash *)); + if (!p_Hash) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Address hash table")); + return NULL; + } + p_Hash->size = size; + + p_Hash->p_Lsts = (t_List *)XX_Malloc(p_Hash->size*sizeof(t_List)); + if (!p_Hash->p_Lsts) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Address hash table")); + XX_Free(p_Hash); + return NULL; + } + + for(i=0 ; isize; i++) + INIT_LIST(&p_Hash->p_Lsts[i]); + + return p_Hash; +} + + +#endif /* __FM_MAC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/tgec.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/tgec.c @@ -0,0 +1,1268 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File tgec.c + + @Description FM 10G MAC ... +*//***************************************************************************/ + +#include "std_ext.h" +#include "string_ext.h" +#include "error_ext.h" +#include "xx_ext.h" +#include "endian_ext.h" +#include "crc_mac_addr_ext.h" +#include "debug_ext.h" + +#include "fm_common.h" +#include "tgec.h" + + +/*****************************************************************************/ +/* Internal routines */ +/*****************************************************************************/ + +static t_Error CheckInitParameters(t_Tgec *p_Tgec) +{ + if(ENET_SPEED_FROM_MODE(p_Tgec->enetMode) < e_ENET_SPEED_10000) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 10G MAC driver only support 10G speed")); +#if (FM_MAX_NUM_OF_10G_MACS > 0) + if(p_Tgec->macId >= FM_MAX_NUM_OF_10G_MACS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("macId of 10G can not be greater than 0")); +#endif + if(p_Tgec->addr == 0) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Ethernet 10G MAC Must have a valid MAC Address")); + if(!p_Tgec->f_Exception) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("uninitialized f_Exception")); + if(!p_Tgec->f_Event) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("uninitialized f_Event")); + return E_OK; +} + +/* .............................................................................. */ + +static void SetDefaultParam(t_TgecDriverParam *p_TgecDriverParam) +{ + p_TgecDriverParam->wanModeEnable = DEFAULT_wanModeEnable; + p_TgecDriverParam->promiscuousModeEnable = DEFAULT_promiscuousModeEnable; + p_TgecDriverParam->pauseForwardEnable = DEFAULT_pauseForwardEnable; + p_TgecDriverParam->pauseIgnore = DEFAULT_pauseIgnore; + p_TgecDriverParam->txAddrInsEnable = DEFAULT_txAddrInsEnable; + + p_TgecDriverParam->loopbackEnable = DEFAULT_loopbackEnable; + p_TgecDriverParam->cmdFrameEnable = DEFAULT_cmdFrameEnable; + p_TgecDriverParam->rxErrorDiscard = DEFAULT_rxErrorDiscard; + p_TgecDriverParam->phyTxenaOn = DEFAULT_phyTxenaOn; + p_TgecDriverParam->sendIdleEnable = DEFAULT_sendIdleEnable; + p_TgecDriverParam->noLengthCheckEnable = DEFAULT_noLengthCheckEnable; + p_TgecDriverParam->lgthCheckNostdr = DEFAULT_lgthCheckNostdr; + p_TgecDriverParam->timeStampEnable = DEFAULT_timeStampEnable; + p_TgecDriverParam->rxSfdAny = DEFAULT_rxSfdAny; + p_TgecDriverParam->rxPblFwd = DEFAULT_rxPblFwd; + p_TgecDriverParam->txPblFwd = DEFAULT_txPblFwd; + + p_TgecDriverParam->txIpgLength = DEFAULT_txIpgLength; + p_TgecDriverParam->maxFrameLength = DEFAULT_maxFrameLength; + + p_TgecDriverParam->debugMode = DEFAULT_debugMode; + + p_TgecDriverParam->pauseTime = DEFAULT_pauseTime; + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 + p_TgecDriverParam->skipFman11Workaround = DEFAULT_skipFman11Workaround; +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ +} + +/* ........................................................................... */ + +static void TgecErrException(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + uint32_t event; + t_TgecMemMap *p_TgecMemMap = p_Tgec->p_MemMap; + + event = GET_UINT32(p_TgecMemMap->ievent); + /* do not handle MDIO events */ + event &= ~(IMASK_MDIO_SCAN_EVENTMDIO | IMASK_MDIO_CMD_CMPL); + + event &= GET_UINT32(p_TgecMemMap->imask); + + WRITE_UINT32(p_TgecMemMap->ievent, event); + + if (event & IMASK_REM_FAULT) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_REM_FAULT); + if (event & IMASK_LOC_FAULT) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_LOC_FAULT); + if (event & IMASK_1TX_ECC_ER) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_1TX_ECC_ER); + if (event & IMASK_TX_FIFO_UNFL) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_FIFO_UNFL); + if (event & IMASK_TX_FIFO_OVFL) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_FIFO_OVFL); + if (event & IMASK_TX_ER) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_TX_ER); + if (event & IMASK_RX_FIFO_OVFL) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_FIFO_OVFL); + if (event & IMASK_RX_ECC_ER) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_ECC_ER); + if (event & IMASK_RX_JAB_FRM) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_JAB_FRM); + if (event & IMASK_RX_OVRSZ_FRM) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_OVRSZ_FRM); + if (event & IMASK_RX_RUNT_FRM) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_RUNT_FRM); + if (event & IMASK_RX_FRAG_FRM) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_FRAG_FRM); + if (event & IMASK_RX_LEN_ER) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_LEN_ER); + if (event & IMASK_RX_CRC_ER) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_CRC_ER); + if (event & IMASK_RX_ALIGN_ER) + p_Tgec->f_Exception(p_Tgec->h_App, e_FM_MAC_EX_10G_RX_ALIGN_ER); +} + +static void TgecException(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + uint32_t event; + t_TgecMemMap *p_TgecMemMap = p_Tgec->p_MemMap; + + event = GET_UINT32(p_TgecMemMap->ievent); + /* handle only MDIO events */ + event &= (IMASK_MDIO_SCAN_EVENTMDIO | IMASK_MDIO_CMD_CMPL); + event &= GET_UINT32(p_TgecMemMap->imask); + + WRITE_UINT32(p_TgecMemMap->ievent, event); + + if(event & IMASK_MDIO_SCAN_EVENTMDIO) + p_Tgec->f_Event(p_Tgec->h_App, e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO); + if(event & IMASK_MDIO_CMD_CMPL) + p_Tgec->f_Event(p_Tgec->h_App, e_FM_MAC_EX_10G_MDIO_CMD_CMPL); +} + +static void FreeInitResources(t_Tgec *p_Tgec) +{ + if ((p_Tgec->mdioIrq != 0) && (p_Tgec->mdioIrq != NO_IRQ)) + { + XX_DisableIntr(p_Tgec->mdioIrq); + XX_FreeIntr(p_Tgec->mdioIrq); + } + else if (p_Tgec->mdioIrq == 0) + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, (NO_MSG)); + FmUnregisterIntr(p_Tgec->fmMacControllerDriver.h_Fm, e_FM_MOD_10G_MAC, p_Tgec->macId, e_FM_INTR_TYPE_ERR); + + /* release the driver's group hash table */ + FreeHashTable(p_Tgec->p_MulticastAddrHash); + p_Tgec->p_MulticastAddrHash = NULL; + + /* release the driver's individual hash table */ + FreeHashTable(p_Tgec->p_UnicastAddrHash); + p_Tgec->p_UnicastAddrHash = NULL; +} + +/* .............................................................................. */ + +static void HardwareClearAddrInPaddr(t_Tgec *p_Tgec, uint8_t paddrNum) +{ + if (paddrNum != 0) + return; /* At this time MAC has only one address */ + + WRITE_UINT32(p_Tgec->p_MemMap->mac_addr_2, 0x0); + WRITE_UINT32(p_Tgec->p_MemMap->mac_addr_3, 0x0); +} + +/* ........................................................................... */ + +static void HardwareAddAddrInPaddr(t_Tgec *p_Tgec, uint64_t *p_Addr, uint8_t paddrNum) +{ + uint32_t tmpReg32 = 0; + uint64_t addr = *p_Addr; + t_TgecMemMap *p_TgecMemMap = p_Tgec->p_MemMap; + + if (paddrNum != 0) + return; /* At this time MAC has only one address */ + + tmpReg32 = (uint32_t)(addr>>16); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_TgecMemMap->mac_addr_2, tmpReg32); + + tmpReg32 = (uint32_t)(addr); + SwapUint32P(&tmpReg32); + tmpReg32 >>= 16; + WRITE_UINT32(p_TgecMemMap->mac_addr_3, tmpReg32); +} + +/*****************************************************************************/ +/* 10G MAC API routines */ +/*****************************************************************************/ + +/* .............................................................................. */ + +static t_Error TgecEnable(t_Handle h_Tgec, e_CommMode mode) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_MemMap ; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_INVALID_HANDLE); + + p_MemMap= (t_TgecMemMap*)(p_Tgec->p_MemMap); + + tmpReg32 = GET_UINT32(p_MemMap->cmd_conf_ctrl); + + switch (mode) + { + case e_COMM_MODE_NONE: + tmpReg32 &= ~(CMD_CFG_TX_EN | CMD_CFG_RX_EN); + break; + case e_COMM_MODE_RX : + tmpReg32 |= CMD_CFG_RX_EN ; + break; + case e_COMM_MODE_TX : + tmpReg32 |= CMD_CFG_TX_EN ; + break; + case e_COMM_MODE_RX_AND_TX: + tmpReg32 |= (CMD_CFG_TX_EN | CMD_CFG_RX_EN); + break; + } + + WRITE_UINT32(p_MemMap->cmd_conf_ctrl, tmpReg32); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecDisable (t_Handle h_Tgec, e_CommMode mode) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_MemMap ; + uint32_t tmpReg32 = 0; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_INVALID_HANDLE); + + p_MemMap= (t_TgecMemMap*)(p_Tgec->p_MemMap); + + tmpReg32 = GET_UINT32(p_MemMap->cmd_conf_ctrl); + switch (mode) + { + case e_COMM_MODE_RX: + tmpReg32 &= ~CMD_CFG_RX_EN; + break; + case e_COMM_MODE_TX: + tmpReg32 &= ~CMD_CFG_TX_EN; + break; + case e_COMM_MODE_RX_AND_TX: + tmpReg32 &= ~(CMD_CFG_TX_EN | CMD_CFG_RX_EN); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, NO_MSG); + } + WRITE_UINT32(p_MemMap->cmd_conf_ctrl, tmpReg32); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecSetPromiscuous(t_Handle h_Tgec, bool newVal) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_TgecMemMap; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_NULL_POINTER); + + p_TgecMemMap = p_Tgec->p_MemMap; + + tmpReg32 = GET_UINT32(p_TgecMemMap->cmd_conf_ctrl); + + if (newVal) + tmpReg32 |= CMD_CFG_PROMIS_EN; + else + tmpReg32 &= ~CMD_CFG_PROMIS_EN; + + WRITE_UINT32(p_TgecMemMap->cmd_conf_ctrl, tmpReg32); + + return E_OK; +} + + +/*****************************************************************************/ +/* Tgec Configs modification functions */ +/*****************************************************************************/ + +/* .............................................................................. */ + +static t_Error TgecConfigLoopback(t_Handle h_Tgec, bool newVal) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + +#ifdef FM_NO_TGEC_LOOPBACK + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Tgec->fmMacControllerDriver.h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("no loopback in this chip rev!")); + } +#endif /* FM_NO_TGEC_LOOPBACK */ + + p_Tgec->p_TgecDriverParam->loopbackEnable = newVal; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecConfigWan(t_Handle h_Tgec, bool newVal) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + + p_Tgec->p_TgecDriverParam->wanModeEnable = newVal; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecConfigMaxFrameLength(t_Handle h_Tgec, uint16_t newVal) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + + p_Tgec->p_TgecDriverParam->maxFrameLength = newVal; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecConfigLengthCheck(t_Handle h_Tgec, bool newVal) +{ +#ifdef FM_LEN_CHECK_ERRATA_FMAN_SW002 +UNUSED(h_Tgec); + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("LengthCheck!")); + +#else + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + UNUSED(newVal); + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + + p_Tgec->p_TgecDriverParam->noLengthCheckEnable = !newVal; + + return E_OK; +#endif /* FM_LEN_CHECK_ERRATA_FMAN_SW002 */ +} + +/* .............................................................................. */ + +static t_Error TgecConfigException(t_Handle h_Tgec, e_FmMacExceptions exception, bool enable) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + uint32_t bitMask = 0; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE); +#ifdef FM_10G_REM_N_LCL_FLT_EX_ERRATA_10GMAC001 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Tgec->fmMacControllerDriver.h_Fm, &revInfo); + if((revInfo.majorRev <=2) && + enable && + ((exception == e_FM_MAC_EX_10G_LOC_FAULT) || (exception == e_FM_MAC_EX_10G_REM_FAULT))) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_MAC_EX_10G_LOC_FAULT and e_FM_MAC_EX_10G_REM_FAULT !")); + } +#endif /* FM_10G_REM_N_LCL_FLT_EX_ERRATA_10GMAC001 */ + + GET_EXCEPTION_FLAG(bitMask, exception); + if(bitMask) + { + if (enable) + p_Tgec->exceptions |= bitMask; + else + p_Tgec->exceptions &= ~bitMask; + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + return E_OK; +} + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +/* .............................................................................. */ + +static t_Error TgecConfigSkipFman11Workaround(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + + p_Tgec->p_TgecDriverParam->skipFman11Workaround = TRUE; + + return E_OK; +} +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + + +/*****************************************************************************/ +/* Tgec Run Time API functions */ +/*****************************************************************************/ + +/* .............................................................................. */ + +static t_Error TgecTxMacPause(t_Handle h_Tgec, uint16_t pauseTime) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + uint32_t ptv = 0; + t_TgecMemMap *p_MemMap; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_INVALID_STATE); + + p_MemMap = (t_TgecMemMap*)(p_Tgec->p_MemMap); + + ptv = (uint32_t)pauseTime; + + WRITE_UINT32(p_MemMap->pause_quant, ptv); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecRxIgnoreMacPause(t_Handle h_Tgec, bool en) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_MemMap; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_INVALID_STATE); + + p_MemMap = (t_TgecMemMap*)(p_Tgec->p_MemMap); + tmpReg32 = GET_UINT32(p_MemMap->cmd_conf_ctrl); + if (en) + tmpReg32 |= CMD_CFG_PAUSE_IGNORE; + else + tmpReg32 &= ~CMD_CFG_PAUSE_IGNORE; + WRITE_UINT32(p_MemMap->cmd_conf_ctrl, tmpReg32); + + return E_OK; +} + +/* Counters handling */ +/* .............................................................................. */ + +static t_Error TgecGetStatistics(t_Handle h_Tgec, t_FmMacStatistics *p_Statistics) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_TgecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Statistics, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_NULL_POINTER); + + p_TgecMemMap = p_Tgec->p_MemMap; + + p_Statistics->eStatPkts64 = GET_UINT64(p_TgecMemMap->R64); + p_Statistics->eStatPkts65to127 = GET_UINT64(p_TgecMemMap->R127); + p_Statistics->eStatPkts128to255 = GET_UINT64(p_TgecMemMap->R255); + p_Statistics->eStatPkts256to511 = GET_UINT64(p_TgecMemMap->R511); + p_Statistics->eStatPkts512to1023 = GET_UINT64(p_TgecMemMap->R1023); + p_Statistics->eStatPkts1024to1518 = GET_UINT64(p_TgecMemMap->R1518); + p_Statistics->eStatPkts1519to1522 = GET_UINT64(p_TgecMemMap->R1519X); +/* */ + p_Statistics->eStatFragments = GET_UINT64(p_TgecMemMap->TRFRG); + p_Statistics->eStatJabbers = GET_UINT64(p_TgecMemMap->TRJBR); + + p_Statistics->eStatsDropEvents = GET_UINT64(p_TgecMemMap->RDRP); + p_Statistics->eStatCRCAlignErrors = GET_UINT64(p_TgecMemMap->RALN); + + p_Statistics->eStatUndersizePkts = GET_UINT64(p_TgecMemMap->TRUND); + p_Statistics->eStatOversizePkts = GET_UINT64(p_TgecMemMap->TROVR); +/* Pause */ + p_Statistics->reStatPause = GET_UINT64(p_TgecMemMap->RXPF); + p_Statistics->teStatPause = GET_UINT64(p_TgecMemMap->TXPF); + + +/* MIB II */ + p_Statistics->ifInOctets = GET_UINT64(p_TgecMemMap->ROCT); + p_Statistics->ifInMcastPkts = GET_UINT64(p_TgecMemMap->RMCA); + p_Statistics->ifInBcastPkts = GET_UINT64(p_TgecMemMap->RBCA); + p_Statistics->ifInPkts = GET_UINT64(p_TgecMemMap->RUCA) + + p_Statistics->ifInMcastPkts + + p_Statistics->ifInBcastPkts; + p_Statistics->ifInDiscards = 0; + p_Statistics->ifInErrors = GET_UINT64(p_TgecMemMap->RERR); + + p_Statistics->ifOutOctets = GET_UINT64(p_TgecMemMap->TOCT); + p_Statistics->ifOutMcastPkts = GET_UINT64(p_TgecMemMap->TMCA); + p_Statistics->ifOutBcastPkts = GET_UINT64(p_TgecMemMap->TBCA); + p_Statistics->ifOutPkts = GET_UINT64(p_TgecMemMap->TUCA); + p_Statistics->ifOutDiscards = 0; + p_Statistics->ifOutErrors = GET_UINT64(p_TgecMemMap->TERR); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecEnable1588TimeStamp(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_TgecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + + p_TgecMemMap = p_Tgec->p_MemMap; + SANITY_CHECK_RETURN_ERROR(p_TgecMemMap, E_INVALID_HANDLE); + + WRITE_UINT32(p_TgecMemMap->cmd_conf_ctrl, GET_UINT32(p_TgecMemMap->cmd_conf_ctrl) | CMD_CFG_EN_TIMESTAMP); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecDisable1588TimeStamp(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_TgecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + + p_TgecMemMap = p_Tgec->p_MemMap; + SANITY_CHECK_RETURN_ERROR(p_TgecMemMap, E_INVALID_HANDLE); + + WRITE_UINT32(p_TgecMemMap->cmd_conf_ctrl, GET_UINT32(p_TgecMemMap->cmd_conf_ctrl) & ~CMD_CFG_EN_TIMESTAMP); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecModifyMacAddress (t_Handle h_Tgec, t_EnetAddr *p_EnetAddr) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_TgecMemMap; + uint32_t tmpReg32 = 0; + uint64_t addr; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_NULL_POINTER); + + p_TgecMemMap = p_Tgec->p_MemMap; + + /* Initialize MAC Station Address registers (1 & 2) */ + /* Station address have to be swapped (big endian to little endian */ + + addr = ((*(uint64_t *)p_EnetAddr) >> 16); + p_Tgec->addr = addr; + + tmpReg32 = (uint32_t)(addr>>16); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_TgecMemMap->mac_addr_0, tmpReg32); + + tmpReg32 = (uint32_t)(addr); + SwapUint32P(&tmpReg32); + tmpReg32 >>= 16; + WRITE_UINT32(p_TgecMemMap->mac_addr_1, tmpReg32); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecResetCounters (t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_MemMap ; + uint32_t tmpReg32, cmdConfCtrl; + int i; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_INVALID_HANDLE); + + p_MemMap= (t_TgecMemMap*)(p_Tgec->p_MemMap); + + cmdConfCtrl = GET_UINT32(p_MemMap->cmd_conf_ctrl); + + cmdConfCtrl |= CMD_CFG_STAT_CLR; + + WRITE_UINT32(p_MemMap->cmd_conf_ctrl, cmdConfCtrl); + + for (i=0; i<1000; i++) + { + tmpReg32 = GET_UINT32(p_MemMap->cmd_conf_ctrl); + if (!(tmpReg32 & CMD_CFG_STAT_CLR)) + break; + } + + cmdConfCtrl &= ~CMD_CFG_STAT_CLR; + WRITE_UINT32(p_MemMap->cmd_conf_ctrl, cmdConfCtrl); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecAddExactMatchMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr) +{ + t_Tgec *p_Tgec = (t_Tgec *) h_Tgec; + uint64_t ethAddr; + uint8_t paddrNum; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + + ethAddr = ((*(uint64_t *)p_EthAddr) >> 16); + + if (ethAddr & GROUP_ADDRESS) + /* Multicast address has no effect in PADDR */ + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Multicast address")); + + /* Make sure no PADDR contains this address */ + for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++) + { + if (p_Tgec->indAddrRegUsed[paddrNum]) + { + if (p_Tgec->paddr[paddrNum] == ethAddr) + { + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG); + } + } + } + + /* Find first unused PADDR */ + for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++) + { + if (!(p_Tgec->indAddrRegUsed[paddrNum])) + { + /* mark this PADDR as used */ + p_Tgec->indAddrRegUsed[paddrNum] = TRUE; + /* store address */ + p_Tgec->paddr[paddrNum] = ethAddr; + + /* put in hardware */ + HardwareAddAddrInPaddr(p_Tgec, ðAddr, paddrNum); + p_Tgec->numOfIndAddrInRegs++; + + return E_OK; + } + } + + /* No free PADDR */ + RETURN_ERROR(MAJOR, E_FULL, NO_MSG); +} + +/* .............................................................................. */ + +static t_Error TgecDelExactMatchMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr) +{ + t_Tgec *p_Tgec = (t_Tgec *) h_Tgec; + uint64_t ethAddr; + uint8_t paddrNum; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_INVALID_HANDLE); + + ethAddr = ((*(uint64_t *)p_EthAddr) >> 16); + + /* Find used PADDR containing this address */ + for (paddrNum = 0; paddrNum < TGEC_NUM_OF_PADDRS; paddrNum++) + { + if ((p_Tgec->indAddrRegUsed[paddrNum]) && + (p_Tgec->paddr[paddrNum] == ethAddr)) + { + /* mark this PADDR as not used */ + p_Tgec->indAddrRegUsed[paddrNum] = FALSE; + /* clear in hardware */ + HardwareClearAddrInPaddr(p_Tgec, paddrNum); + p_Tgec->numOfIndAddrInRegs--; + + return E_OK; + } + } + + RETURN_ERROR(MAJOR, E_NOT_FOUND, NO_MSG); +} + +/* .............................................................................. */ + +static t_Error TgecAddHashMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_TgecMemMap; + t_EthHashEntry *p_HashEntry; + uint32_t crc; + uint32_t hash; + uint64_t ethAddr; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_NULL_POINTER); + + p_TgecMemMap = p_Tgec->p_MemMap; + ethAddr = ((*(uint64_t *)p_EthAddr) >> 16); + + if (!(ethAddr & GROUP_ADDRESS)) + /* Unicast addresses not supported in hash */ + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unicast Address")); + + /* CRC calculation */ + GET_MAC_ADDR_CRC(ethAddr, crc); + crc = MIRROR_32(crc); + + hash = (crc >> HASH_CTRL_MCAST_SHIFT) & HASH_ADDR_MASK; /* Take 9 MSB bits */ + + /* Create element to be added to the driver hash table */ + p_HashEntry = (t_EthHashEntry *)XX_Malloc(sizeof(t_EthHashEntry)); + p_HashEntry->addr = ethAddr; + INIT_LIST(&p_HashEntry->node); + + LIST_AddToTail(&(p_HashEntry->node), &(p_Tgec->p_MulticastAddrHash->p_Lsts[hash])); + WRITE_UINT32(p_TgecMemMap->hashtable_ctrl, (hash | HASH_CTRL_MCAST_EN)); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecDelHashMacAddress(t_Handle h_Tgec, t_EnetAddr *p_EthAddr) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_TgecMemMap; + t_EthHashEntry *p_HashEntry = NULL; + t_List *p_Pos; + uint32_t crc; + uint32_t hash; + uint64_t ethAddr; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_NULL_POINTER); + + p_TgecMemMap = p_Tgec->p_MemMap; + ethAddr = ((*(uint64_t *)p_EthAddr) >> 16); + + /* CRC calculation */ + GET_MAC_ADDR_CRC(ethAddr, crc); + crc = MIRROR_32(crc); + + hash = (crc >> HASH_CTRL_MCAST_SHIFT) & HASH_ADDR_MASK; /* Take 9 MSB bits */ + + LIST_FOR_EACH(p_Pos, &(p_Tgec->p_MulticastAddrHash->p_Lsts[hash])) + { + + p_HashEntry = ETH_HASH_ENTRY_OBJ(p_Pos); + if(p_HashEntry->addr == ethAddr) + { + LIST_DelAndInit(&p_HashEntry->node); + XX_Free(p_HashEntry); + break; + } + } + if(LIST_IsEmpty(&p_Tgec->p_MulticastAddrHash->p_Lsts[hash])) + WRITE_UINT32(p_TgecMemMap->hashtable_ctrl, (hash & ~HASH_CTRL_MCAST_EN)); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecGetId(t_Handle h_Tgec, uint32_t *macId) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_NULL_POINTER); + + UNUSED(p_Tgec); + UNUSED(macId); + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("TgecGetId Not Supported")); +} + +/* .............................................................................. */ + +static t_Error TgecGetVersion(t_Handle h_Tgec, uint32_t *macVersion) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMemMap *p_TgecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_NULL_POINTER); + + p_TgecMemMap = p_Tgec->p_MemMap; + *macVersion = GET_UINT32(p_TgecMemMap->tgec_id); + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecSetExcpetion(t_Handle h_Tgec, e_FmMacExceptions exception, bool enable) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + uint32_t bitMask = 0, tmpReg; + t_TgecMemMap *p_TgecMemMap; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Tgec->p_TgecDriverParam, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_NULL_POINTER); + + p_TgecMemMap = p_Tgec->p_MemMap; +#ifdef FM_10G_REM_N_LCL_FLT_EX_ERRATA_10GMAC001 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Tgec->fmMacControllerDriver.h_Fm, &revInfo); + if((revInfo.majorRev <=2) && + enable && + ((exception == e_FM_MAC_EX_10G_LOC_FAULT) || (exception == e_FM_MAC_EX_10G_REM_FAULT))) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_MAC_EX_10G_LOC_FAULT and e_FM_MAC_EX_10G_REM_FAULT !")); + } +#endif /* FM_10G_REM_N_LCL_FLT_EX_ERRATA_10GMAC001 */ + + GET_EXCEPTION_FLAG(bitMask, exception); + if(bitMask) + { + if (enable) + p_Tgec->exceptions |= bitMask; + else + p_Tgec->exceptions &= ~bitMask; + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + tmpReg = GET_UINT32(p_TgecMemMap->imask); + if(enable) + tmpReg |= bitMask; + else + tmpReg &= ~bitMask; + WRITE_UINT32(p_TgecMemMap->imask, tmpReg); + return E_OK; +} + +/* .............................................................................. */ + +static uint16_t TgecGetMaxFrameLength(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + SANITY_CHECK_RETURN_VALUE(p_Tgec, E_INVALID_HANDLE, 0); + + return (uint16_t)GET_UINT32(p_Tgec->p_MemMap->maxfrm); +} + +/* .............................................................................. */ + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +static t_Error TgecTxEccWorkaround(t_Tgec *p_Tgec) +{ + t_Error err; + + XX_Print("Applying 10G tx-ecc error workaround (10GMAC-A004) ..."); + /* enable and set promiscuous */ + WRITE_UINT32(p_Tgec->p_MemMap->cmd_conf_ctrl, CMD_CFG_PROMIS_EN | CMD_CFG_TX_EN | CMD_CFG_RX_EN); + err = Fm10GTxEccWorkaround(p_Tgec->fmMacControllerDriver.h_Fm, p_Tgec->macId); + /* disable */ + WRITE_UINT32(p_Tgec->p_MemMap->cmd_conf_ctrl, 0); + if (err) + XX_Print("FAILED!\n"); + else + XX_Print("done.\n"); + TgecResetCounters (p_Tgec); + + return err; +} +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + +/* .............................................................................. */ + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +static t_Error TgecDumpRegs(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + DECLARE_DUMP; + + if (p_Tgec->p_MemMap) + { + DUMP_TITLE(p_Tgec->p_MemMap, ("10G MAC %d: ", p_Tgec->macId)); + DUMP_VAR(p_Tgec->p_MemMap, tgec_id); + DUMP_VAR(p_Tgec->p_MemMap, scratch); + DUMP_VAR(p_Tgec->p_MemMap, cmd_conf_ctrl); + DUMP_VAR(p_Tgec->p_MemMap, mac_addr_0); + DUMP_VAR(p_Tgec->p_MemMap, mac_addr_1); + DUMP_VAR(p_Tgec->p_MemMap, maxfrm); + DUMP_VAR(p_Tgec->p_MemMap, pause_quant); + DUMP_VAR(p_Tgec->p_MemMap, rx_fifo_sections); + DUMP_VAR(p_Tgec->p_MemMap, tx_fifo_sections); + DUMP_VAR(p_Tgec->p_MemMap, rx_fifo_almost_f_e); + DUMP_VAR(p_Tgec->p_MemMap, tx_fifo_almost_f_e); + DUMP_VAR(p_Tgec->p_MemMap, hashtable_ctrl); + DUMP_VAR(p_Tgec->p_MemMap, mdio_cfg_status); + DUMP_VAR(p_Tgec->p_MemMap, mdio_command); + DUMP_VAR(p_Tgec->p_MemMap, mdio_data); + DUMP_VAR(p_Tgec->p_MemMap, mdio_regaddr); + DUMP_VAR(p_Tgec->p_MemMap, status); + DUMP_VAR(p_Tgec->p_MemMap, tx_ipg_len); + DUMP_VAR(p_Tgec->p_MemMap, mac_addr_2); + DUMP_VAR(p_Tgec->p_MemMap, mac_addr_3); + DUMP_VAR(p_Tgec->p_MemMap, rx_fifo_ptr_rd); + DUMP_VAR(p_Tgec->p_MemMap, rx_fifo_ptr_wr); + DUMP_VAR(p_Tgec->p_MemMap, tx_fifo_ptr_rd); + DUMP_VAR(p_Tgec->p_MemMap, tx_fifo_ptr_wr); + DUMP_VAR(p_Tgec->p_MemMap, imask); + DUMP_VAR(p_Tgec->p_MemMap, ievent); + DUMP_VAR(p_Tgec->p_MemMap, udp_port); + DUMP_VAR(p_Tgec->p_MemMap, type_1588v2); + } + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ + + +/*****************************************************************************/ +/* FM Init & Free API */ +/*****************************************************************************/ + +/* .............................................................................. */ + +static t_Error TgecInit(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecDriverParam *p_TgecDriverParam; + t_TgecMemMap *p_MemMap; + uint64_t addr; + uint32_t tmpReg32; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_TgecDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MemMap, E_INVALID_HANDLE); + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 + if (!p_Tgec->p_TgecDriverParam->skipFman11Workaround && + ((err = TgecTxEccWorkaround(p_Tgec)) != E_OK)) +#ifdef NCSW_LINUX + { + /* the workaround fails in simics, just report and continue initialization */ + REPORT_ERROR(MAJOR, err, ("TgecTxEccWorkaround FAILED, skipping workaround")); + } +#else + { + FreeInitResources(p_Tgec); + RETURN_ERROR(MAJOR, err, ("TgecTxEccWorkaround FAILED")); + } +#endif +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + + CHECK_INIT_PARAMETERS(p_Tgec, CheckInitParameters); + + p_TgecDriverParam = p_Tgec->p_TgecDriverParam; + p_MemMap = p_Tgec->p_MemMap; + + /* MAC Address */ + addr = p_Tgec->addr; + tmpReg32 = (uint32_t)(addr>>16); + SwapUint32P(&tmpReg32); + WRITE_UINT32(p_MemMap->mac_addr_0, tmpReg32); + + tmpReg32 = (uint32_t)(addr); + SwapUint32P(&tmpReg32); + tmpReg32 >>= 16; + WRITE_UINT32(p_MemMap->mac_addr_1, tmpReg32); + + /* Config */ + tmpReg32 = 0; + if (p_TgecDriverParam->wanModeEnable) + tmpReg32 |= CMD_CFG_WAN_MODE; + if (p_TgecDriverParam->promiscuousModeEnable) + tmpReg32 |= CMD_CFG_PROMIS_EN; + if (p_TgecDriverParam->pauseForwardEnable) + tmpReg32 |= CMD_CFG_PAUSE_FWD; + if (p_TgecDriverParam->pauseIgnore) + tmpReg32 |= CMD_CFG_PAUSE_IGNORE; + if (p_TgecDriverParam->txAddrInsEnable) + tmpReg32 |= CMD_CFG_TX_ADDR_INS; + if (p_TgecDriverParam->loopbackEnable) + tmpReg32 |= CMD_CFG_LOOPBACK_EN; + if (p_TgecDriverParam->cmdFrameEnable) + tmpReg32 |= CMD_CFG_CMD_FRM_EN; + if (p_TgecDriverParam->rxErrorDiscard) + tmpReg32 |= CMD_CFG_RX_ER_DISC; + if (p_TgecDriverParam->phyTxenaOn) + tmpReg32 |= CMD_CFG_PHY_TX_EN; + if (p_TgecDriverParam->sendIdleEnable) + tmpReg32 |= CMD_CFG_SEND_IDLE; + if (p_TgecDriverParam->noLengthCheckEnable) + tmpReg32 |= CMD_CFG_NO_LEN_CHK; + if (p_TgecDriverParam->lgthCheckNostdr) + tmpReg32 |= CMD_CFG_LEN_CHK_NOSTDR; + if (p_TgecDriverParam->timeStampEnable) + tmpReg32 |= CMD_CFG_EN_TIMESTAMP; + if (p_TgecDriverParam->rxSfdAny) + tmpReg32 |= RX_SFD_ANY; + if (p_TgecDriverParam->rxPblFwd) + tmpReg32 |= CMD_CFG_RX_PBL_FWD; + if (p_TgecDriverParam->txPblFwd) + tmpReg32 |= CMD_CFG_TX_PBL_FWD; + tmpReg32 |= 0x40; + WRITE_UINT32(p_MemMap->cmd_conf_ctrl, tmpReg32); + + /* Max Frame Length */ + WRITE_UINT32(p_MemMap->maxfrm, (uint32_t)p_TgecDriverParam->maxFrameLength); + err = FmSetMacMaxFrame(p_Tgec->fmMacControllerDriver.h_Fm, e_FM_MAC_10G, p_Tgec->fmMacControllerDriver.macId, p_TgecDriverParam->maxFrameLength); + if(err) + { + FreeInitResources(p_Tgec); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + /* Pause Time */ + WRITE_UINT32(p_MemMap->pause_quant, p_TgecDriverParam->pauseTime); + +#ifdef FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 + WRITE_UINT32(p_Tgec->p_MemMap->tx_ipg_len, + (GET_UINT32(p_Tgec->p_MemMap->tx_ipg_len) & ~TX_IPG_LENGTH_MASK) | DEFAULT_txIpgLength); +#endif /* FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 */ + + /* Configure MII */ + tmpReg32 = GET_UINT32(p_Tgec->p_MiiMemMap->mdio_cfg_status); +#ifdef FM_10G_MDIO_HOLD_ERRATA_XAUI3 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Tgec->fmMacControllerDriver.h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + tmpReg32 |= (MIIMCOM_MDIO_HOLD_4_REG_CLK << 2); + } +#endif /* FM_10G_MDIO_HOLD_ERRATA_XAUI3 */ + tmpReg32 &= ~MIIMCOM_DIV_MASK; + /* (one half of fm clock => 2.5Mhz) */ + tmpReg32 |=((((p_Tgec->fmMacControllerDriver.clkFreq*10)/2)/25) << MIIMCOM_DIV_SHIFT); + WRITE_UINT32(p_Tgec->p_MiiMemMap->mdio_cfg_status, tmpReg32); + + p_Tgec->p_MulticastAddrHash = AllocHashTable(HASH_TABLE_SIZE); + if(!p_Tgec->p_MulticastAddrHash) + { + FreeInitResources(p_Tgec); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED")); + } + + p_Tgec->p_UnicastAddrHash = AllocHashTable(HASH_TABLE_SIZE); + if(!p_Tgec->p_UnicastAddrHash) + { + FreeInitResources(p_Tgec); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("allocation hash table is FAILED")); + } + + /* interrupts */ +#ifdef FM_10G_REM_N_LCL_FLT_EX_ERRATA_10GMAC001 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Tgec->fmMacControllerDriver.h_Fm, &revInfo); + if (revInfo.majorRev <=2) + p_Tgec->exceptions &= ~(IMASK_REM_FAULT | IMASK_LOC_FAULT); + } +#endif /* FM_10G_REM_N_LCL_FLT_EX_ERRATA_10GMAC001 */ + WRITE_UINT32(p_MemMap->ievent, EVENTS_MASK); + WRITE_UINT32(p_MemMap->imask, p_Tgec->exceptions); + + FmRegisterIntr(p_Tgec->fmMacControllerDriver.h_Fm, e_FM_MOD_10G_MAC, p_Tgec->macId, e_FM_INTR_TYPE_ERR, TgecErrException , p_Tgec); + if ((p_Tgec->mdioIrq != 0) && (p_Tgec->mdioIrq != NO_IRQ)) + { + XX_SetIntr(p_Tgec->mdioIrq, TgecException, p_Tgec); + XX_EnableIntr(p_Tgec->mdioIrq); + } + else if (p_Tgec->mdioIrq == 0) + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, (NO_MSG)); + + XX_Free(p_TgecDriverParam); + p_Tgec->p_TgecDriverParam = NULL; + + return E_OK; +} + +/* .............................................................................. */ + +static t_Error TgecFree(t_Handle h_Tgec) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + + FreeInitResources(p_Tgec); + + if (p_Tgec->p_TgecDriverParam) + { + XX_Free(p_Tgec->p_TgecDriverParam); + p_Tgec->p_TgecDriverParam = NULL; + } + XX_Free (p_Tgec); + + return E_OK; +} + +/* .............................................................................. */ + +static void InitFmMacControllerDriver(t_FmMacControllerDriver *p_FmMacControllerDriver) +{ + p_FmMacControllerDriver->f_FM_MAC_Init = TgecInit; + p_FmMacControllerDriver->f_FM_MAC_Free = TgecFree; + + p_FmMacControllerDriver->f_FM_MAC_ConfigLoopback = TgecConfigLoopback; + p_FmMacControllerDriver->f_FM_MAC_ConfigMaxFrameLength = TgecConfigMaxFrameLength; + + p_FmMacControllerDriver->f_FM_MAC_ConfigWan = TgecConfigWan; + + p_FmMacControllerDriver->f_FM_MAC_ConfigPadAndCrc = NULL; /* TGEC always works with pad+crc */ + p_FmMacControllerDriver->f_FM_MAC_ConfigHalfDuplex = NULL; /* half-duplex is not supported in xgec */ + p_FmMacControllerDriver->f_FM_MAC_ConfigLengthCheck = TgecConfigLengthCheck; + p_FmMacControllerDriver->f_FM_MAC_ConfigException = TgecConfigException; + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 + p_FmMacControllerDriver->f_FM_MAC_ConfigSkipFman11Workaround= TgecConfigSkipFman11Workaround; +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + + p_FmMacControllerDriver->f_FM_MAC_SetException = TgecSetExcpetion; + + p_FmMacControllerDriver->f_FM_MAC_Enable1588TimeStamp = TgecEnable1588TimeStamp; + p_FmMacControllerDriver->f_FM_MAC_Disable1588TimeStamp = TgecDisable1588TimeStamp; + + p_FmMacControllerDriver->f_FM_MAC_SetPromiscuous = TgecSetPromiscuous; + p_FmMacControllerDriver->f_FM_MAC_AdjustLink = NULL; + + p_FmMacControllerDriver->f_FM_MAC_Enable = TgecEnable; + p_FmMacControllerDriver->f_FM_MAC_Disable = TgecDisable; + + p_FmMacControllerDriver->f_FM_MAC_SetTxAutoPauseFrames = TgecTxMacPause; + p_FmMacControllerDriver->f_FM_MAC_SetRxIgnorePauseFrames = TgecRxIgnoreMacPause; + + p_FmMacControllerDriver->f_FM_MAC_ResetCounters = TgecResetCounters; + p_FmMacControllerDriver->f_FM_MAC_GetStatistics = TgecGetStatistics; + + p_FmMacControllerDriver->f_FM_MAC_ModifyMacAddr = TgecModifyMacAddress; + p_FmMacControllerDriver->f_FM_MAC_AddHashMacAddr = TgecAddHashMacAddress; + p_FmMacControllerDriver->f_FM_MAC_RemoveHashMacAddr = TgecDelHashMacAddress; + p_FmMacControllerDriver->f_FM_MAC_AddExactMatchMacAddr = TgecAddExactMatchMacAddress; + p_FmMacControllerDriver->f_FM_MAC_RemovelExactMatchMacAddr = TgecDelExactMatchMacAddress; + p_FmMacControllerDriver->f_FM_MAC_GetId = TgecGetId; + p_FmMacControllerDriver->f_FM_MAC_GetVersion = TgecGetVersion; + p_FmMacControllerDriver->f_FM_MAC_GetMaxFrameLength = TgecGetMaxFrameLength; + + p_FmMacControllerDriver->f_FM_MAC_MII_WritePhyReg = TGEC_MII_WritePhyReg; + p_FmMacControllerDriver->f_FM_MAC_MII_ReadPhyReg = TGEC_MII_ReadPhyReg; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + p_FmMacControllerDriver->f_FM_MAC_DumpRegs = TgecDumpRegs; +#endif /* (defined(DEBUG_ERRORS) && ... */ +} + + +/*****************************************************************************/ +/* Tgec Config Main Entry */ +/*****************************************************************************/ + +/* .............................................................................. */ + +t_Handle TGEC_Config(t_FmMacParams *p_FmMacParam) +{ + t_Tgec *p_Tgec; + t_TgecDriverParam *p_TgecDriverParam; + uintptr_t baseAddr; + uint8_t i; + + SANITY_CHECK_RETURN_VALUE(p_FmMacParam, E_NULL_POINTER, NULL); + + baseAddr = p_FmMacParam->baseAddr; + /* allocate memory for the UCC GETH data structure. */ + p_Tgec = (t_Tgec *) XX_Malloc(sizeof(t_Tgec)); + if (!p_Tgec) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("10G MAC driver structure")); + return NULL; + } + /* Zero out * p_Tgec */ + memset(p_Tgec, 0, sizeof(t_Tgec)); + InitFmMacControllerDriver(&p_Tgec->fmMacControllerDriver); + + /* allocate memory for the 10G MAC driver parameters data structure. */ + p_TgecDriverParam = (t_TgecDriverParam *) XX_Malloc(sizeof(t_TgecDriverParam)); + if (!p_TgecDriverParam) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("10G MAC driver parameters")); + TgecFree(p_Tgec); + return NULL; + } + /* Zero out */ + memset(p_TgecDriverParam, 0, sizeof(t_TgecDriverParam)); + + /* Plant parameter structure pointer */ + p_Tgec->p_TgecDriverParam = p_TgecDriverParam; + + SetDefaultParam(p_TgecDriverParam); + + for (i=0; i < sizeof(p_FmMacParam->addr); i++) + p_Tgec->addr |= ((uint64_t)p_FmMacParam->addr[i] << ((5-i) * 8)); + + p_Tgec->p_MemMap = (t_TgecMemMap *)UINT_TO_PTR(baseAddr); + p_Tgec->p_MiiMemMap = (t_TgecMiiAccessMemMap *)UINT_TO_PTR(baseAddr + TGEC_TO_MII_OFFSET); + p_Tgec->enetMode = p_FmMacParam->enetMode; + p_Tgec->macId = p_FmMacParam->macId; + p_Tgec->exceptions = DEFAULT_exceptions; + p_Tgec->mdioIrq = p_FmMacParam->mdioIrq; + p_Tgec->f_Exception = p_FmMacParam->f_Exception; + p_Tgec->f_Event = p_FmMacParam->f_Event; + p_Tgec->h_App = p_FmMacParam->h_App; + + return p_Tgec; +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/tgec.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/tgec.h @@ -0,0 +1,482 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File tgec.h + + @Description FM 10G MAC ... +*//***************************************************************************/ +#ifndef __TGEC_H +#define __TGEC_H + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" +#include "tgec_mii_acc.h" +#include "fm_mac.h" + + +/* Interrupt Mask Register (IMASK) */ +#define IMASK_MDIO_SCAN_EVENTMDIO 0x00010000 /* MDIO_SCAN_EVENTMDIO scan event interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_MDIO_CMD_CMPL 0x00008000 /* 16 MDIO_CMD_CMPL MDIO command completion interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_REM_FAULT 0x00004000 /* 17 REM_FAULT Remote fault interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_LOC_FAULT 0x00002000 /* 18 LOC_FAULT Local fault interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_1TX_ECC_ER 0x00001000 /* 19 TX_ECC_ER Transmit frame ECC error interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_TX_FIFO_UNFL 0x00000800 /* 20 TX_FIFO_UNFL Transmit FIFO underflow interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_TX_FIFO_OVFL 0x00000400 /* 21 TX_FIFO_OVFL Transmit FIFO overflow interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_TX_ER 0x00000200 /* 22 TX_ER Transmit frame error interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_FIFO_OVFL 0x00000100 /* 23 RX_FIFO_OVFL Receive FIFO overflow interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_ECC_ER 0x00000080 /* 24 RX_ECC_ER Receive frame ECC error interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_JAB_FRM 0x00000040 /* 25 RX_JAB_FRM Receive jabber frame interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_OVRSZ_FRM 0x00000020 /* 26 RX_OVRSZ_FRM Receive oversized frame interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_RUNT_FRM 0x00000010 /* 27 RX_RUNT_FRM Receive runt frame interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_FRAG_FRM 0x00000008 /* 28 RX_FRAG_FRM Receive fragment frame interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_LEN_ER 0x00000004 /* 29 RX_LEN_ER Receive payload length error interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_CRC_ER 0x00000002 /* 30 RX_CRC_ER Receive CRC error interrupt mask. + * 0 masked + * 1 enabled + */ +#define IMASK_RX_ALIGN_ER 0x00000001 /* 31 RX_ALIGN_ER Receive alignment error interrupt mask. + * 0 masked + * 1 enabled + */ + +#define EVENTS_MASK ((uint32_t)(IMASK_MDIO_SCAN_EVENTMDIO | \ + IMASK_MDIO_CMD_CMPL | \ + IMASK_REM_FAULT | \ + IMASK_LOC_FAULT | \ + IMASK_1TX_ECC_ER | \ + IMASK_TX_FIFO_UNFL | \ + IMASK_TX_FIFO_OVFL | \ + IMASK_TX_ER | \ + IMASK_RX_FIFO_OVFL | \ + IMASK_RX_ECC_ER | \ + IMASK_RX_JAB_FRM | \ + IMASK_RX_OVRSZ_FRM | \ + IMASK_RX_RUNT_FRM | \ + IMASK_RX_FRAG_FRM | \ + IMASK_RX_LEN_ER | \ + IMASK_RX_CRC_ER | \ + IMASK_RX_ALIGN_ER)) + +#define GET_EXCEPTION_FLAG(bitMask, exception) switch(exception){ \ + case e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO: \ + bitMask = IMASK_MDIO_SCAN_EVENTMDIO; break; \ + case e_FM_MAC_EX_10G_MDIO_CMD_CMPL: \ + bitMask = IMASK_MDIO_CMD_CMPL ; break; \ + case e_FM_MAC_EX_10G_REM_FAULT: \ + bitMask = IMASK_REM_FAULT ; break; \ + case e_FM_MAC_EX_10G_LOC_FAULT: \ + bitMask = IMASK_LOC_FAULT ; break; \ + case e_FM_MAC_EX_10G_1TX_ECC_ER: \ + bitMask = IMASK_1TX_ECC_ER ; break; \ + case e_FM_MAC_EX_10G_TX_FIFO_UNFL: \ + bitMask = IMASK_TX_FIFO_UNFL ; break; \ + case e_FM_MAC_EX_10G_TX_FIFO_OVFL: \ + bitMask = IMASK_TX_FIFO_OVFL ; break; \ + case e_FM_MAC_EX_10G_TX_ER: \ + bitMask = IMASK_TX_ER ; break; \ + case e_FM_MAC_EX_10G_RX_FIFO_OVFL: \ + bitMask = IMASK_RX_FIFO_OVFL ; break; \ + case e_FM_MAC_EX_10G_RX_ECC_ER: \ + bitMask = IMASK_RX_ECC_ER ; break; \ + case e_FM_MAC_EX_10G_RX_JAB_FRM: \ + bitMask = IMASK_RX_JAB_FRM ; break; \ + case e_FM_MAC_EX_10G_RX_OVRSZ_FRM: \ + bitMask = IMASK_RX_OVRSZ_FRM ; break; \ + case e_FM_MAC_EX_10G_RX_RUNT_FRM: \ + bitMask = IMASK_RX_RUNT_FRM ; break; \ + case e_FM_MAC_EX_10G_RX_FRAG_FRM: \ + bitMask = IMASK_RX_FRAG_FRM ; break; \ + case e_FM_MAC_EX_10G_RX_LEN_ER: \ + bitMask = IMASK_RX_LEN_ER ; break; \ + case e_FM_MAC_EX_10G_RX_CRC_ER: \ + bitMask = IMASK_RX_CRC_ER ; break; \ + case e_FM_MAC_EX_10G_RX_ALIGN_ER: \ + bitMask = IMASK_RX_ALIGN_ER ; break; \ + default: bitMask = 0;break;} + + +/* Default Config Params */ +#define DEFAULT_wanModeEnable FALSE +#define DEFAULT_promiscuousModeEnable FALSE + + +#define DEFAULT_pauseForwardEnable FALSE +#define DEFAULT_pauseIgnore FALSE +#define DEFAULT_txAddrInsEnable FALSE + +#define DEFAULT_loopbackEnable FALSE +#define DEFAULT_cmdFrameEnable FALSE +#define DEFAULT_rxErrorDiscard FALSE +#define DEFAULT_phyTxenaOn FALSE +#define DEFAULT_sendIdleEnable FALSE +#define DEFAULT_noLengthCheckEnable TRUE +#define DEFAULT_lgthCheckNostdr FALSE +#define DEFAULT_timeStampEnable FALSE +#define DEFAULT_rxSfdAny FALSE +#define DEFAULT_rxPblFwd FALSE +#define DEFAULT_txPblFwd FALSE +#define DEFAULT_txIpgLength 12 + +#define DEFAULT_maxFrameLength 0x600 + +#define DEFAULT_debugMode FALSE +#define DEFAULT_pauseTime 0xf000 +#define DEFAULT_imask 0xf000 +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +#define DEFAULT_skipFman11Workaround FALSE +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + +#define DEFAULT_exceptions ((uint32_t)(IMASK_MDIO_SCAN_EVENTMDIO | \ + IMASK_REM_FAULT | \ + IMASK_LOC_FAULT | \ + IMASK_1TX_ECC_ER | \ + IMASK_TX_FIFO_UNFL | \ + IMASK_TX_FIFO_OVFL | \ + IMASK_TX_ER | \ + IMASK_RX_FIFO_OVFL | \ + IMASK_RX_ECC_ER | \ + IMASK_RX_JAB_FRM | \ + IMASK_RX_OVRSZ_FRM | \ + IMASK_RX_RUNT_FRM | \ + IMASK_RX_FRAG_FRM | \ + IMASK_RX_CRC_ER | \ + IMASK_RX_ALIGN_ER)) + +#define MAX_PACKET_ALIGNMENT 31 +#define MAX_INTER_PACKET_GAP 0x7f +#define MAX_INTER_PALTERNATE_BEB 0x0f +#define MAX_RETRANSMISSION 0x0f +#define MAX_COLLISION_WINDOW 0x03ff + + +#define TGEC_NUM_OF_PADDRS 1 /* number of pattern match registers (entries) */ + +#define GROUP_ADDRESS 0x0000010000000000LL /* Group address bit indication */ + +#define HASH_TABLE_SIZE 512 /* Hash table size (= 32 bits * 8 regs) */ + +#define TGEC_TO_MII_OFFSET 0x1030 /* Offset from the MEM map to the MDIO mem map */ + +/* 10-gigabit Ethernet MAC Controller ID (10GEC_ID) */ +#define TGEC_ID_ID 0xffff0000 +#define TGEC_ID_MAC_VERSION 0x0000FF00 +#define TGEC_ID_MAC_REV 0x000000ff + +/* Command and Configuration Register (COMMAND_CONFIG) */ +#define CMD_CFG_TX_PBL_FWD 0x00800000 /* 08 Transmit Preamble Forwarding (custom preamble). + */ +#define CMD_CFG_RX_PBL_FWD 0x00400000 /* 09 Receive Preamble Forwarding (custom preamble). + */ +#define RX_SFD_ANY 0x00200000 /* 10 Enables, when set, that any character is allowed at the SFD position of the preamble and the frame will be accepted. + */ +#define CMD_CFG_EN_TIMESTAMP 0x00100000 /* 11 EN_TIMESTAMP IEEE 1588 timeStamp functionality control. + * 0 disabled + * 1 enabled + */ +#define CMD_CFG_TX_ADDR_INS_SEL 0x00080000 /* 12 TX_ADDR_INS_SEL Transmit MAC address select + * 0 insert using first MAC address + * 1 insert using second MAC address + */ +#define CMD_CFG_LEN_CHK_NOSTDR 0x00040000 /* 13 LEN_CHK_NOSTDR + */ +#define CMD_CFG_NO_LEN_CHK 0x00020000 /* 14 NO_LEN_CHK Payload length check disable + * 0 MAC compares the frame payload length with the frame length/type field. + * 1 Payload length check is disabled. + */ +#define CMD_CFG_SEND_IDLE 0x00010000 /* 15 SEND_IDLE Force idle generation + * 0 Normal operation. + * 1 MAC permanently sends XGMII idle sequences even when faults are received. + */ +#define CMD_CFG_PHY_TX_EN 0x00008000 /* 16 PHY_TX_EN PHY transmit enable + * 0 PHY transmit is disabled. + * 1 PHY transmit is enabled. + */ +#define CMD_CFG_RX_ER_DISC 0x00004000 /* 17 RX_ER_DISC Receive error frame discard enable + * 0 Received error frames are processed. + * 1 Any frame received with an error is discarded. + */ +#define CMD_CFG_CMD_FRM_EN 0x00002000 /* 18 CMD_FRM_EN Command frame reception enable + * 0 Only Pause frames are accepted (all other command frames are rejected). + * 1 All command frames are accepted. + */ +#define CMD_CFG_STAT_CLR 0x00001000 /* 19 STAT_CLR Clear statistics + * 0 Normal operations. + * 1 All statistics counters are cleared. + */ +#define CMD_CFG_LOOPBACK_EN 0x00000400 /* 21 LOOPBAC_EN PHY interface loopback enable + * 0 Configure PHY for normal operation. + * 1 Configure PHY for loopback mode. + */ +#define CMD_CFG_TX_ADDR_INS 0x00000200 /* 22 TX_ADDR_INS Transmit source MAC address insertion + * 0 MAC transmits the source MAC address unmodified. + * 1 MAC overwrites the source MAC address with address specified by COMMAND_CONFIG[TX_ADDR_INS_SEL]. + */ +#define CMD_CFG_PAUSE_IGNORE 0x00000100 /* 23 PAUSE_IGNORE Ignore Pause frame quanta + * 0 MAC stops transmit process for the duration specified in the Pause frame quanta of a received Pause frame. + * 1 MAC ignores received Pause frames. + */ +#define CMD_CFG_PAUSE_FWD 0x00000080 /* 24 PAUSE_FWD Terminate/forward received Pause frames + * 0 MAC terminates and discards received Pause frames. + * 1 MAC forwards Pause frames to the user application. + */ +#define CMD_CFG_PROMIS_EN 0x00000010 /* 27 PROMIS_EN Promiscuous operation enable + * 0 Unicast frames with a destination address not matching the core MAC address (defined by registers, MAC_ADDR_0 and MAC_ADDR_1) are rejected. + * 1 All frames are received without any MAC address filtering. + */ +#define CMD_CFG_WAN_MODE 0x00000008 /* 28 WAN_MODE WAN mode enable + * 0 Configure MAC for LAN mode. + * 1 Configure MAC for WAN mode. + */ +#define CMD_CFG_RX_EN 0x00000002 /* 30 RX_EN MAC receive path enable + * 0 MAC receive path is disabled + * 1 MAC receive path is enabled. + */ +#define CMD_CFG_TX_EN 0x00000001 /* 31 TX_EN MAC transmit path enable + * 0 MAC transmit path is disabled + * 1 MAC transmit path is enabled. + */ + +/* Hashtable Control Register (HASHTABLE_CTRL) */ +#define HASH_CTRL_MCAST_SHIFT 23 + +#define HASH_CTRL_MCAST_RD 0x00000400 /* 22 MCAST_READ Entry Multicast frame reception for the hash entry. + * 0 disabled + * 1 enabled + */ +#define HASH_CTRL_MCAST_EN 0x00000200 /* 22 MCAST_EN Multicast frame reception for the hash entry. + * 0 disabled + * 1 enabled + */ +#define HASH_ADDR_MASK 0x000001ff /* 23-31 HASH_ADDR Hash table address code. + */ + +/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */ +#define TX_IPG_LENGTH_MASK 0x000003ff + + + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/* + * 10G memory map + */ +typedef _Packed struct { +/* 10Ge General Control and Status */ + volatile uint32_t tgec_id; /* 0x000 10GEC_ID - Controller ID register */ + volatile uint32_t scratch; /* 0x004 */ + volatile uint32_t cmd_conf_ctrl; /* 0x008 COMMAND_CONFIG - Control and configuration register */ + volatile uint32_t mac_addr_0; /* 0x00C MAC_ADDR_0 - Lower 32 bits of the first 48-bit MAC address */ + volatile uint32_t mac_addr_1; /* 0x010 MAC_ADDR_1 - Upper 16 bits of the first 48-bit MAC address */ + volatile uint32_t maxfrm; /* 0x014 MAXFRM - Maximum frame length register */ + volatile uint32_t pause_quant; /* 0x018 PAUSE_QUANT - Pause quanta register */ + volatile uint32_t rx_fifo_sections; /* 0x01c */ + volatile uint32_t tx_fifo_sections; /* 0x020 */ + volatile uint32_t rx_fifo_almost_f_e; /* 0x024 */ + volatile uint32_t tx_fifo_almost_f_e; /* 0x028 */ + volatile uint32_t hashtable_ctrl; /* 0x02C HASHTABLE_CTRL - Hash table control register */ + volatile uint32_t mdio_cfg_status; /* 0x030 */ + volatile uint32_t mdio_command; /* 0x034 */ + volatile uint32_t mdio_data; /* 0x038 */ + volatile uint32_t mdio_regaddr; /* 0x03c */ + volatile uint32_t status; /* 0x040 */ + volatile uint32_t tx_ipg_len; /* 0x044 TX_IPG_LENGTH - Transmitter inter-packet-gap register */ + volatile uint32_t mac_addr_2; /* 0x048 MAC_ADDR_2 - Lower 32 bits of the second 48-bit MAC address */ + volatile uint32_t mac_addr_3; /* 0x04C MAC_ADDR_3 - Upper 16 bits of the second 48-bit MAC address */ + volatile uint32_t rx_fifo_ptr_rd; /* 0x050 */ + volatile uint32_t rx_fifo_ptr_wr; /* 0x054 */ + volatile uint32_t tx_fifo_ptr_rd; /* 0x058 */ + volatile uint32_t tx_fifo_ptr_wr; /* 0x05c */ + volatile uint32_t imask; /* 0x060 IMASK - Interrupt mask register */ + volatile uint32_t ievent; /* 0x064 IEVENT - Interrupt event register */ + volatile uint32_t udp_port; /* 0x068 Defines a UDP Port number. When an UDP/IP frame is received with a matching UDP destination port, the receive status indication pin ff_rx_ts_frm will be asserted.*/ + volatile uint32_t type_1588v2; /* 0x06c Type field for 1588v2 layer 2 frames. IEEE1588 defines the type 0x88f7 for 1588 frames. */ + volatile uint32_t TENGEC_RESERVED4[4]; +/*10Ge Statistics Counter */ + volatile uint64_t TFRM; /* 80 aFramesTransmittedOK */ + volatile uint64_t RFRM; /* 88 aFramesReceivedOK */ + volatile uint64_t RFCS; /* 90 aFrameCheckSequenceErrors */ + volatile uint64_t RALN; /* 98 aAlignmentErrors */ + volatile uint64_t TXPF; /* A0 aPAUSEMACCtrlFramesTransmitted */ + volatile uint64_t RXPF; /* A8 aPAUSEMACCtrlFramesReceived */ + volatile uint64_t RLONG; /* B0 aFrameTooLongErrors */ + volatile uint64_t RFLR; /* B8 aInRangeLengthErrors */ + volatile uint64_t TVLAN; /* C0 VLANTransmittedOK */ + volatile uint64_t RVLAN; /* C8 VLANReceivedOK */ + volatile uint64_t TOCT; /* D0 ifOutOctets */ + volatile uint64_t ROCT; /* D8 ifInOctets */ + volatile uint64_t RUCA; /* E0 ifInUcastPkts */ + volatile uint64_t RMCA; /* E8 ifInMulticastPkts */ + volatile uint64_t RBCA; /* F0 ifInBroadcastPkts */ + volatile uint64_t TERR; /* F8 ifOutErrors */ + volatile uint32_t TENGEC_RESERVED6[2]; + volatile uint64_t TUCA; /* 108 ifOutUcastPkts */ + volatile uint64_t TMCA; /* 110 ifOutMulticastPkts */ + volatile uint64_t TBCA; /* 118 ifOutBroadcastPkts */ + volatile uint64_t RDRP; /* 120 etherStatsDropEvents */ + volatile uint64_t REOCT; /* 128 etherStatsOctets */ + volatile uint64_t RPKT; /* 130 etherStatsPkts */ + volatile uint64_t TRUND; /* 138 etherStatsUndersizePkts */ + volatile uint64_t R64; /* 140 etherStatsPkts64Octets */ + volatile uint64_t R127; /* 148 etherStatsPkts65to127Octets */ + volatile uint64_t R255; /* 150 etherStatsPkts128to255Octets */ + volatile uint64_t R511; /* 158 etherStatsPkts256to511Octets */ + volatile uint64_t R1023; /* 160 etherStatsPkts512to1023Octets */ + volatile uint64_t R1518; /* 168 etherStatsPkts1024to1518Octets */ + volatile uint64_t R1519X; /* 170 etherStatsPkts1519toX */ + volatile uint64_t TROVR; /* 178 etherStatsOversizePkts */ + volatile uint64_t TRJBR; /* 180 etherStatsJabbers */ + volatile uint64_t TRFRG; /* 188 etherStatsFragments */ + volatile uint64_t RERR; /* 190 ifInErrors */ +} _PackedType t_TgecMemMap; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +typedef struct { + bool wanModeEnable; /* WAN Mode Enable. Sets WAN mode (1) or LAN mode (0, default) of operation. */ + bool promiscuousModeEnable; /* Enables MAC promiscuous operation. When set to '1', all frames are received without any MAC address filtering, when set to '0' (Reset value) Unicast Frames with a destination address not matching the Core MAC Address (MAC Address programmed in Registers MAC_ADDR_0 and MAC_ADDR_1 or the MAC address programmed in Registers MAC_ADDR_2 and MAC_ADDR_3 ) are rejected. */ + bool pauseForwardEnable; /* Terminate / Forward Pause Frames. If set to '1' pause frames are forwarded to the user application. When set to '0' (Reset value) pause frames are terminated and discarded within the MAC. */ + bool pauseIgnore; /* Ignore Pause Frame Quanta. If set to '1' received pause frames are ignored by the MAC. When set to '0' (Reset value) the transmit process is stopped for the amount of time specified in the pause quanta received within a pause frame. */ + bool txAddrInsEnable; /* Set Source MAC Address on Transmit. + If set to '1' the MAC overwrites the source MAC address received from the Client Interface with one of the MAC addresses (Refer to section 10.4) + If set to '0' (Reset value), the source MAC address from the Client Interface is transmitted unmodified to the line. */ + bool loopbackEnable; /* PHY Interface Loopback. When set to '1', the signal loop_ena is set to '1', when set to '0' (Reset value) the signal loop_ena is set to '0'. */ + bool cmdFrameEnable; /* Enables reception of all command frames. When set to '1' all Command Frames are accepted, when set to '0' (Reset Value) only Pause Frames are accepted and all other Command Frames are rejected. */ + bool rxErrorDiscard; /* Receive Errored Frame Discard Enable. When set to 1, any frame received with an error is discarded in the Core and not forwarded to the Client interface. When set to 0 (Reset value), errored Frames are forwarded to the Client interface with ff_rx_err asserted. */ + bool phyTxenaOn; /* PHY Transmit Enable. When set to '1', the signal phy_txena is set to '1', when set to '0' (Reset value) the signal phy_txena is set to '0' */ + bool sendIdleEnable; /* Force Idle Generation. When set to '1', the MAC permanently sends XGMII Idle sequences even when faults are received. */ + bool noLengthCheckEnable; /* Payload Length Check Disable. When set to 0 (Reset value), the Core checks the frame's payload length with the Frame Length/Type field, when set to 1, the payload length check is disabled. */ + bool lgthCheckNostdr; /* The Core interprets the Length/Type field differently depending on the value of this Bit */ + bool timeStampEnable; /* This bit selects between enabling and disabling the IEEE 1588 functionality. + 1: IEEE 1588 is enabled. + 0: IEEE 1588 is disabled. */ + bool rxSfdAny; /* Enables, when set, that any character is allowed at the SFD position of the preamble and the frame will be accepted. + If cleared (default) the frame is accepted only if the 8th byte of the preamble contains the SFD value 0xd5. If another value is received, the frame is discarded and the alignment error counter increments. */ + bool rxPblFwd; /* Receive Preamble Forwarding (custom preamble). + If set, the first word (ff_rx_sop) of every received frame contains the preamble of the frame. The frame data starts with the 2nd word from the FIFO. + If the bit is cleared (default) the preamble is removed from the frame before it is written into the receive FIFO. */ + bool txPblFwd; /* Transmit Preamble Forwarding (custom preamble). + If set, the first word written into the TX FIFO is considered as frame preamble. The MAC will not add a preamble in front of the frame. Note that bits 7:0 of the preamble word will still be overwritten with the XGMII start character upon transmission. + If cleared (default) the MAC */ + uint32_t txIpgLength; /*Transmit Inter-Packet-Gap (IPG) value. + A 6-bit value: Depending on LAN or WAN mode of operation (see COMMAND_CONFIG, 19.2.1 page 91) the value has the following meaning: + - LAN Mode: Number of octets in steps of 4. Valid values are 8, 12, 16, ... 100. DIC is fully supported (see 10.6.1 page 49) for any setting. A default of 12 (reset value) must be set to conform to IEEE802.3ae. Warning: When set to 8, PCS layers may not be able to perform clock rate compensation. + - WAN Mode: Stretch factor. Valid values are 4..15. The stretch factor is calculated as (value+1)*8. A default of 12 (reset value) must be set to conform to IEEE 802.3ae (i.e. 13*8=104). A larger value shrinks the IPG (increasing bandwidth). */ +/*.. */ + uint16_t maxFrameLength; + bool debugMode; + uint16_t pauseTime; +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 + bool skipFman11Workaround; +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ +} t_TgecDriverParam; + +typedef struct { + t_FmMacControllerDriver fmMacControllerDriver; /**< Upper Mac control block */ + t_Handle h_App; /**< Handle to the upper layer application */ + t_TgecMemMap *p_MemMap; /**< pointer to 10G memory mapped registers. */ + t_TgecMiiAccessMemMap *p_MiiMemMap; /**< pointer to MII memory mapped registers. */ + uint64_t addr; /**< MAC address of device; */ + e_EnetMode enetMode; /**< Ethernet physical interface */ + t_FmMacExceptionCallback *f_Exception; + int mdioIrq; + t_FmMacExceptionCallback *f_Event; + bool indAddrRegUsed[TGEC_NUM_OF_PADDRS]; /**< Whether a particular individual address recognition register is being used */ + uint64_t paddr[TGEC_NUM_OF_PADDRS]; /**< MAC address for particular individual address recognition register */ + uint8_t numOfIndAddrInRegs; /**< Number of individual addresses in registers for this station. */ + t_EthHash *p_MulticastAddrHash; /**< pointer to driver's global address hash table */ + t_EthHash *p_UnicastAddrHash; /**< pointer to driver's individual address hash table */ + bool debugMode; + uint8_t macId; + uint32_t exceptions; + t_TgecDriverParam *p_TgecDriverParam; +} t_Tgec; + + +t_Error TGEC_MII_WritePhyReg(t_Handle h_Tgec, uint8_t phyAddr, uint8_t reg, uint16_t data); +t_Error TGEC_MII_ReadPhyReg(t_Handle h_Tgec, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data); + + +#endif /* __TGEC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/tgec_mii_acc.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/tgec_mii_acc.c @@ -0,0 +1,121 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#include "error_ext.h" +#include "std_ext.h" +#include "fm_mac.h" +#include "tgec.h" +#include "xx_ext.h" + + +/*****************************************************************************/ +t_Error TGEC_MII_WritePhyReg(t_Handle h_Tgec, + uint8_t phyAddr, + uint8_t reg, + uint16_t data) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMiiAccessMemMap *p_MiiAccess; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MiiMemMap, E_INVALID_HANDLE); + + p_MiiAccess = p_Tgec->p_MiiMemMap; + + while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY) + XX_UDelay (1); + + WRITE_UINT32(p_MiiAccess->mdio_command, phyAddr); + + WRITE_UINT32(p_MiiAccess->mdio_regaddr, reg); + + CORE_MemoryBarrier(); + + while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY) + XX_UDelay (1); + + WRITE_UINT32(p_MiiAccess->mdio_data, data); + + CORE_MemoryBarrier(); + + while ((GET_UINT32(p_MiiAccess->mdio_data)) & MIIDATA_BUSY) + XX_UDelay (1); + + return E_OK; +} + +/*****************************************************************************/ +t_Error TGEC_MII_ReadPhyReg(t_Handle h_Tgec, + uint8_t phyAddr, + uint8_t reg, + uint16_t *p_Data) +{ + t_Tgec *p_Tgec = (t_Tgec *)h_Tgec; + t_TgecMiiAccessMemMap *p_MiiAccess; + uint32_t cfg_status; + + SANITY_CHECK_RETURN_ERROR(p_Tgec, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Tgec->p_MiiMemMap, E_INVALID_HANDLE); + + p_MiiAccess = p_Tgec->p_MiiMemMap; + + while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY) + XX_UDelay (1); + + WRITE_UINT32(p_MiiAccess->mdio_command, phyAddr); + + WRITE_UINT32(p_MiiAccess->mdio_regaddr, reg); + + CORE_MemoryBarrier(); + + while ((GET_UINT32(p_MiiAccess->mdio_cfg_status)) & MIIMIND_BUSY) + XX_UDelay (1); + + WRITE_UINT32(p_MiiAccess->mdio_command, (uint32_t)(phyAddr | MIIMCOM_READ_CYCLE)); + + CORE_MemoryBarrier(); + + while ((GET_UINT32(p_MiiAccess->mdio_data)) & MIIDATA_BUSY) + XX_UDelay (1); + + *p_Data = (uint16_t)GET_UINT32(p_MiiAccess->mdio_data); + + cfg_status = GET_UINT32(p_MiiAccess->mdio_cfg_status); + + if (cfg_status & MIIMIND_READ_ERROR) + RETURN_ERROR(MINOR, E_INVALID_VALUE, + ("Read Error: phyAddr 0x%x, dev 0x%x, reg 0x%x, cfg_status 0x%x", + ((phyAddr & 0xe0)>>5), (phyAddr & 0x1f), reg, cfg_status)); + + return E_OK; +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/tgec_mii_acc.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/MAC/tgec_mii_acc.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __TGEC_MII_ACC_H +#define __TGEC_MII_ACC_H + +#include "std_ext.h" + + +/* MII Management Command Register */ +#define MIIMCOM_READ_POST_INCREMENT 0x00004000 +#define MIIMCOM_READ_CYCLE 0x00008000 +#define MIIMCOM_SCAN_CYCLE 0x00000800 +#define MIIMCOM_PREAMBLE_DISABLE 0x00000400 + +#define MIIMCOM_MDIO_HOLD_1_REG_CLK 0 +#define MIIMCOM_MDIO_HOLD_2_REG_CLK 1 +#define MIIMCOM_MDIO_HOLD_3_REG_CLK 2 +#define MIIMCOM_MDIO_HOLD_4_REG_CLK 3 + +#define MIIMCOM_DIV_MASK 0x0000ff00 +#define MIIMCOM_DIV_SHIFT 8 + +/* MII Management Indicator Register */ +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_READ_ERROR 0x00000002 + +#define MIIDATA_BUSY 0x80000000 + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/*----------------------------------------------------*/ +/* MII Configuration Control Memory Map Registers */ +/*----------------------------------------------------*/ +typedef _Packed struct t_TgecMiiAccessMemMap +{ + volatile uint32_t mdio_cfg_status; /* 0x030 */ + volatile uint32_t mdio_command; /* 0x034 */ + volatile uint32_t mdio_data; /* 0x038 */ + volatile uint32_t mdio_regaddr; /* 0x03c */ +} _PackedType t_TgecMiiAccessMemMap ; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +#endif /* __TGEC_MII_ACC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Makefile @@ -0,0 +1,21 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk +NCSW_FM_INC = $(srctree)/drivers/net/dpa/NetCommSw/Peripherals/FM/inc + +EXTRA_CFLAGS += -I$(NCSW_FM_INC) + + +obj-y += fsl-ncsw-PFM1.o + +fsl-ncsw-PFM1-objs := fm.o fm_muram.o + +obj-y += MAC/ +obj-y += Pcd/ +obj-y += Port/ +obj-y += HC/ +obj-y += Rtc/ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +NCSW_FM_INC = $(srctree)/drivers/net/dpa/NetCommSw/Peripherals/FM/inc + +EXTRA_CFLAGS += -I$(NCSW_FM_INC) + +obj-y += fsl-ncsw-Pcd.o + +fsl-ncsw-Pcd-objs := fm_cc.o fm_kg.o fm_pcd.o fm_plcr.o fm_prs.o + + + + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_cc.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_cc.c @@ -0,0 +1,3467 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_cc.c + + @Description FM CC ... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "fm_pcd_ext.h" +#include "fm_muram_ext.h" + +#include "fm_common.h" +#include "fm_hc.h" +#include "fm_cc.h" + + +#if defined(FM_CAPWAP_SUPPORT) +#define FM_PCD_CC_MANIP +#endif /* defined(FM_CAPWAP_SUPPORT) || ... */ + + +t_Handle FmPcdCcTreeGetSavedManipParams(t_Handle h_FmTree, uint8_t manipIndx) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; + + ASSERT_COND(p_FmPcdCcTree); + + return p_FmPcdCcTree->fmPcdCcSavedManipParams[manipIndx]; +} + +void FmPcdCcTreeSetSavedManipParams(t_Handle h_FmTree, t_Handle h_SavedManipParams, uint8_t manipIndx) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; + + ASSERT_COND(p_FmPcdCcTree); + + p_FmPcdCcTree->fmPcdCcSavedManipParams[manipIndx] = h_SavedManipParams; +} + +uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_CcNode; + + ASSERT_COND(p_FmPcdCcNode); + return p_FmPcdCcNode->parseCode; +} + +uint8_t FmPcdCcGetOffset(t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_CcNode; + + ASSERT_COND(p_FmPcdCcNode); + return p_FmPcdCcNode->offset; +} + +uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_CcNode; + + ASSERT_COND(p_FmPcdCcNode); + return p_FmPcdCcNode->numOfKeys; +} +static void EnqueueNodeInfoToRelevantLst(t_List *p_List, t_CcNodeInformation *p_CcInfo) +{ + t_CcNodeInformation *p_CcInformation; + uint32_t intFlags; + + p_CcInformation = (t_CcNodeInformation *)XX_Malloc(sizeof(t_CcNodeInformation)); + if (p_CcInformation) + { + memset(p_CcInformation, 0, sizeof(t_CcNodeInformation)); + memcpy(p_CcInformation, p_CcInfo, sizeof(t_CcNodeInformation)); + INIT_LIST(&p_CcInformation->node); + + intFlags = XX_DisableAllIntr(); + LIST_AddToTail(&p_CcInformation->node, p_List); + XX_RestoreAllIntr(intFlags); + } + else + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Node Information")); +} + + +static t_CcNodeInformation* FindNodeInfoInReleventLst(t_List *p_List, t_Handle h_Info) +{ + t_CcNodeInformation *p_CcInformation; + t_List *p_Pos; + uint32_t intFlags; + + intFlags = XX_DisableAllIntr(); + for (p_Pos = LIST_FIRST(p_List); p_Pos != (p_List); p_Pos = LIST_NEXT(p_Pos)) + { + p_CcInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcInformation->h_CcNode); + if(p_CcInformation->h_CcNode == h_Info) + { + XX_RestoreAllIntr(intFlags); + return p_CcInformation; + } + } + XX_RestoreAllIntr(intFlags); + return NULL; +} + +static void DequeueNodeInfoFromRelevantLst(t_List *p_List, t_Handle h_Info) +{ + t_CcNodeInformation *p_CcInformation = NULL; + uint32_t intFlags; + t_List *p_Pos; + + intFlags = XX_DisableAllIntr(); + if (LIST_IsEmpty(p_List)) + { + XX_RestoreAllIntr(intFlags); + return; + } + + for (p_Pos = LIST_FIRST(p_List); p_Pos != (p_List); p_Pos = LIST_NEXT(p_Pos)) + { + p_CcInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcInformation->h_CcNode); + if (p_CcInformation->h_CcNode == h_Info) + break; + } + if (p_CcInformation) + LIST_DelAndInit(&p_CcInformation->node); + XX_RestoreAllIntr(intFlags); +} + +static t_Error FmPcdCcSetRequiredAction(t_Handle h_FmPcd, uint32_t requiredAction, t_FmPcdCcNextEngineAndRequiredActionParams *p_CcNextEngineParamsTmp, + t_Handle h_AdTmp, uint16_t numOfEntries, t_Handle h_Tree) +{ + + t_AdOfTypeResult *p_AdTmp = (t_AdOfTypeResult *)h_AdTmp; + uint32_t tmpReg32; + t_Error err; + t_FmPcdCcNode *p_FmPcdCcNode; + int i = 0; + uint16_t tmp = 0; + uint16_t profileId; + uint8_t relativeSchemeId, physicalSchemeId; + t_CcNodeInformation ccNodeInfo; + + for(i = 0; i < numOfEntries; i++) + { + if(i == 0) + h_AdTmp = PTR_MOVE(h_AdTmp, i*FM_PCD_CC_AD_ENTRY_SIZE); + else + h_AdTmp = PTR_MOVE(h_AdTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + if(p_CcNextEngineParamsTmp[i].shadowAction & requiredAction) + continue; + switch(p_CcNextEngineParamsTmp[i].nextEngineParams.nextEngine) + { + case(e_FM_PCD_CC): + if(requiredAction) + { + p_FmPcdCcNode = p_CcNextEngineParamsTmp[i].nextEngineParams.params.ccParams.h_CcNode; + ASSERT_COND(p_FmPcdCcNode); + if(p_FmPcdCcNode->shadowAction == requiredAction) + break; + if((requiredAction & UPDATE_CC_WITH_TREE) && !(p_FmPcdCcNode->shadowAction & UPDATE_CC_WITH_TREE)) + { + + ASSERT_COND(LIST_NumOfObjs(&p_FmPcdCcNode->ccTreesLst) == 0); + if(p_FmPcdCcNode->shadowAction & UPDATE_CC_WITH_DELETE_TREE) + p_FmPcdCcNode->shadowAction &= ~UPDATE_CC_WITH_DELETE_TREE; + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = h_Tree; + EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNode->ccTreesLst, &ccNodeInfo); + p_CcNextEngineParamsTmp[i].shadowAction |= UPDATE_CC_WITH_TREE; + } + if((requiredAction & UPDATE_CC_WITH_DELETE_TREE) && !(p_FmPcdCcNode->shadowAction & UPDATE_CC_WITH_DELETE_TREE)) + { + ASSERT_COND(LIST_NumOfObjs(&p_FmPcdCcNode->ccTreesLst) == 1); + if(p_FmPcdCcNode->shadowAction & UPDATE_CC_WITH_TREE) + p_FmPcdCcNode->shadowAction &= ~UPDATE_CC_WITH_TREE; + DequeueNodeInfoFromRelevantLst(&p_FmPcdCcNode->ccTreesLst, h_Tree); + p_CcNextEngineParamsTmp[i].shadowAction |= UPDATE_CC_WITH_DELETE_TREE; + } + if(p_FmPcdCcNode->nextEngineAndRequiredAction[p_FmPcdCcNode->numOfKeys].nextEngineParams.nextEngine != e_FM_PCD_INVALID) + tmp = (uint8_t)(p_FmPcdCcNode->numOfKeys + 1); + else + tmp = p_FmPcdCcNode->numOfKeys; + err = FmPcdCcSetRequiredAction(h_FmPcd, requiredAction, p_FmPcdCcNode->nextEngineAndRequiredAction, p_FmPcdCcNode->h_AdTable, tmp, h_Tree); + if(err != E_OK) + return err; + p_FmPcdCcNode->shadowAction |= requiredAction; + } + break; + + case(e_FM_PCD_KG): + if((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) + { + physicalSchemeId = (uint8_t)(PTR_TO_UINT(p_CcNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme)-1); + relativeSchemeId = FmPcdKgGetRelativeSchemeId(h_FmPcd, physicalSchemeId); + if(relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + if (!FmPcdKgIsSchemeValidSw(h_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid direct scheme.")); + if(!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("For this action scheme has to be direct.")); + err = FmPcdKgCcGetSetParams(h_FmPcd, p_CcNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme, requiredAction); + if(err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + p_CcNextEngineParamsTmp[i].shadowAction |= requiredAction; + } + break; + + case(e_FM_PCD_PLCR): + if((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) + { + if(!p_CcNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.overrideParams) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this intialization only overrideFqid can be intiizliaes")); + if(!p_CcNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.sharedProfile) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this intialization only overrideFqid can be intiizliaes")); + err = FmPcdPlcrGetAbsoluteProfileId(h_FmPcd, e_FM_PCD_PLCR_SHARED, NULL, p_CcNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.newRelativeProfileId, &profileId); + if(err!= E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + err = FmPcdPlcrCcGetSetParams(h_FmPcd, profileId, requiredAction); + if(err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + p_CcNextEngineParamsTmp[i].shadowAction |= requiredAction; + } + break; + + case(e_FM_PCD_DONE): + if((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) + { + tmpReg32 = GET_UINT32(p_AdTmp->nia); + if((tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)) != (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine was previosely assigned not as PCD_DONE")); + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + WRITE_UINT32(p_AdTmp->nia, tmpReg32); + p_CcNextEngineParamsTmp[i].shadowAction |= requiredAction; + } + break; + + default: + break; + } + } + + return E_OK; +} + +static t_Error CcUpdateParam(t_Handle h_FmPcd, + t_Handle h_FmPort, + t_FmPcdCcNextEngineAndRequiredActionParams *p_CcNextEngineParams, + uint16_t numOfEntries, + t_Handle h_Ad, + bool validate, + uint16_t level, + t_Handle h_FmTree, + bool modify) +{ + t_CcNodeInformation *p_CcNodeInfo; + t_FmPcdCcNode *p_FmPcdCcNode; + t_Error err; + uint16_t tmp = 0; + int i = 0; + + level++; + + if(numOfEntries) + { + for(i = 0; i < numOfEntries; i++) + { + if(i == 0) + h_Ad = PTR_MOVE(h_Ad, i*FM_PCD_CC_AD_ENTRY_SIZE); + else + h_Ad = PTR_MOVE(h_Ad, FM_PCD_CC_AD_ENTRY_SIZE); + + if(p_CcNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + p_FmPcdCcNode = p_CcNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; + ASSERT_COND(p_FmPcdCcNode); + p_CcNodeInfo = FindNodeInfoInReleventLst(&p_FmPcdCcNode->ccTreesLst,h_FmTree); + ASSERT_COND(p_CcNodeInfo); + p_CcNodeInfo->index = level; +#ifdef FM_PCD_CC_MANIP + if(p_CcNextEngineParams[i].nextEngineParams.h_Manip) + { + err = FmPcdManipUpdate(h_FmPcd, h_FmPort, p_CcNextEngineParams[i].nextEngineParams.h_Manip, h_Ad, validate, p_CcNodeInfo->index, h_FmTree, modify); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } +#endif /* FM_PCD_CC_MANIP */ + + if(p_FmPcdCcNode->nextEngineAndRequiredAction[p_FmPcdCcNode->numOfKeys].nextEngineParams.nextEngine != e_FM_PCD_INVALID) + tmp = (uint8_t)(p_FmPcdCcNode->numOfKeys + 1); + else + tmp = p_FmPcdCcNode->numOfKeys; + + err = CcUpdateParam(h_FmPcd, h_FmPort, p_FmPcdCcNode->nextEngineAndRequiredAction, tmp, p_FmPcdCcNode->h_AdTable, validate,level, h_FmTree, modify); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } +#ifdef FM_PCD_CC_MANIP + else + { + if(p_CcNextEngineParams[i].nextEngineParams.h_Manip) + { + err = FmPcdManipUpdate(h_FmPcd, h_FmPort, p_CcNextEngineParams[i].nextEngineParams.h_Manip, h_Ad, validate, level,h_FmTree, modify); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } +#endif /* FM_PCD_CC_MANIP */ + } + } + + return E_OK; +} +static bool IsNodeInModifiedState(t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + + ASSERT_COND(p_CcNode); + + return p_CcNode->modifiedState; +} + +static void UpdateNodeWithModifiedState(t_Handle h_CcNode, bool modifiedState) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_CcNode; + + ASSERT_COND(p_FmPcdCcNode); + + p_FmPcdCcNode->modifiedState = modifiedState; +} + +static ccPrivateInfo_t IcDefineCode(t_FmPcdCcNodeParams *p_CcNodeParam) +{ + switch (p_CcNodeParam->extractCcParams.extractNonHdr.action) + { + case(e_FM_PCD_ACTION_EXACT_MATCH): + switch(p_CcNodeParam->extractCcParams.extractNonHdr.src) + { + case(e_FM_PCD_EXTRACT_FROM_KEY): + return CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH; + case(e_FM_PCD_EXTRACT_FROM_HASH): + return CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH; + default: + return CC_PRIVATE_INFO_NONE; + } + case(e_FM_PCD_ACTION_INDEXED_LOOKUP): + switch(p_CcNodeParam->extractCcParams.extractNonHdr.src) + { + case(e_FM_PCD_EXTRACT_FROM_HASH): + return CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP; + case(e_FM_PCD_EXTRACT_FROM_FLOW_ID): + return CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP; + default: + return CC_PRIVATE_INFO_NONE; + } + default: + break; + } + return CC_PRIVATE_INFO_NONE; +} + +static t_CcNodeInformation * DequeueAdditionalInfoFromRelevantLst(t_List *p_List) +{ + t_CcNodeInformation *p_CcNodeInfo = NULL; + uint32_t intFlags; + + intFlags = XX_DisableAllIntr(); + if (!LIST_IsEmpty(p_List)) + { + p_CcNodeInfo = CC_NODE_F_OBJECT(p_List->p_Next); + LIST_DelAndInit(&p_CcNodeInfo->node); + } + XX_RestoreAllIntr(intFlags); + return p_CcNodeInfo; +} + +static void ReleaseLst(t_List *p_List) +{ + t_CcNodeInformation *p_CcNodeInfo = NULL; + + if(!LIST_IsEmpty(p_List)) + { + p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List); + while (p_CcNodeInfo) + { + XX_Free(p_CcNodeInfo); + p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List); + } + } + LIST_DelAndInit(p_List); +} + +void FmPcdCcTreeReleaseLock(t_Handle h_FmPcdCcTree) +{ + RELEASE_LOCK(((t_FmPcdCcTree *)h_FmPcdCcTree)->lock); +} + +void FmPcdCcNodeTreeReleaseLock(t_List *p_List) +{ + t_List *p_Pos; + t_CcNodeInformation *p_CcNodeInfo; + t_Handle h_FmPcdCcTree; + + LIST_FOR_EACH(p_Pos, p_List) + { + p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos); + h_FmPcdCcTree = p_CcNodeInfo->h_CcNode; + FmPcdCcTreeReleaseLock(h_FmPcdCcTree); + } + ReleaseLst(p_List); +} + +static void DeleteNode(t_FmPcdCcNode *p_FmPcdCcNode) +{ + if(p_FmPcdCcNode) + { + if(p_FmPcdCcNode->p_GlblMask) + { + XX_Free(p_FmPcdCcNode->p_GlblMask); + p_FmPcdCcNode->p_GlblMask = NULL; + } + if(p_FmPcdCcNode->h_KeysMatchTable) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcdCcNode->h_FmPcd), p_FmPcdCcNode->h_KeysMatchTable); + p_FmPcdCcNode->h_KeysMatchTable = NULL; + } + if(p_FmPcdCcNode->h_AdTable) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcdCcNode->h_FmPcd), p_FmPcdCcNode->h_AdTable); + p_FmPcdCcNode->h_AdTable = NULL; + } + + ReleaseLst(&p_FmPcdCcNode->ccPrevNodesLst); + ReleaseLst(&p_FmPcdCcNode->ccTreeIdLst); + ReleaseLst(&p_FmPcdCcNode->ccTreesLst); + + XX_Free(p_FmPcdCcNode); + } +} + +static void DeleteTree(t_FmPcdCcTree *p_FmPcdTree, t_FmPcd *p_FmPcd) +{ + if(p_FmPcdTree) + { + if(p_FmPcdTree->ccTreeBaseAddr) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd), UINT_TO_PTR(p_FmPcdTree->ccTreeBaseAddr)); + p_FmPcdTree->ccTreeBaseAddr = 0; + } + + ReleaseLst(&p_FmPcdTree->fmPortsLst); + + XX_Free(p_FmPcdTree); + } +} + +static void UpdateNodeOwner(t_FmPcdCcNode *p_FmPcdCcNode, bool add) +{ + ASSERT_COND(p_FmPcdCcNode); + + if(add) + p_FmPcdCcNode->owners++; + else + { + ASSERT_COND(p_FmPcdCcNode->owners); + p_FmPcdCcNode->owners--; + } +} + +static void GetCcExtractKeySize(uint8_t parseCodeRealSize, uint8_t *parseCodeCcSize) +{ + if((parseCodeRealSize > 0) && (parseCodeRealSize < 2)) + *parseCodeCcSize = 1; + else if(parseCodeRealSize == 2) + *parseCodeCcSize = 2; + else if((parseCodeRealSize > 2) && (parseCodeRealSize <= 4)) + *parseCodeCcSize = 4; + else if((parseCodeRealSize > 4) && (parseCodeRealSize <= 8)) + *parseCodeCcSize = 8; + else if((parseCodeRealSize > 8) && (parseCodeRealSize <= 16)) + *parseCodeCcSize = 16; + else if((parseCodeRealSize > 16) && (parseCodeRealSize <= 24)) + *parseCodeCcSize = 24; + else if((parseCodeRealSize > 24) && (parseCodeRealSize <= 32)) + *parseCodeCcSize = 32; + else if((parseCodeRealSize > 32) && (parseCodeRealSize <= 40)) + *parseCodeCcSize = 40; + else if((parseCodeRealSize > 40) && (parseCodeRealSize <= 48)) + *parseCodeCcSize = 48; + else if((parseCodeRealSize > 48) && (parseCodeRealSize <= 56)) + *parseCodeCcSize = 56; + else + *parseCodeCcSize = 0; +} + +static void GetSizeHeaderField(e_NetHeaderType hdr,t_FmPcdFields field,uint8_t *parseCodeRealSize) +{ + switch(hdr) + { + case (HEADER_TYPE_ETH): + switch(field.eth) + { + case(NET_HEADER_FIELD_ETH_DA): + *parseCodeRealSize = 6; + break; + case(NET_HEADER_FIELD_ETH_SA): + *parseCodeRealSize = 6; + break; + case(NET_HEADER_FIELD_ETH_TYPE): + *parseCodeRealSize = 2; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case(HEADER_TYPE_PPPoE): + switch(field.pppoe) + { + case(NET_HEADER_FIELD_PPPoE_PID): + *parseCodeRealSize = 2; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case (HEADER_TYPE_VLAN): + switch(field.vlan) + { + case(NET_HEADER_FIELD_VLAN_TCI): + *parseCodeRealSize = 2; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported2")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case (HEADER_TYPE_MPLS): + switch(field.mpls) + { + case(NET_HEADER_FIELD_MPLS_LABEL_STACK): + *parseCodeRealSize = 4; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported3")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case (HEADER_TYPE_IPv4): + switch(field.ipv4) + { + case(NET_HEADER_FIELD_IPv4_DST_IP): + case(NET_HEADER_FIELD_IPv4_SRC_IP): + *parseCodeRealSize = 4; + break; + case(NET_HEADER_FIELD_IPv4_TOS): + case(NET_HEADER_FIELD_IPv4_PROTO): + *parseCodeRealSize = 1; + break; + case(NET_HEADER_FIELD_IPv4_DST_IP | NET_HEADER_FIELD_IPv4_SRC_IP): + *parseCodeRealSize = 8; + break; + case(NET_HEADER_FIELD_IPv4_TTL): + *parseCodeRealSize = 1; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported4")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case (HEADER_TYPE_IPv6): + switch(field.ipv6) + { + case(NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC): + *parseCodeRealSize = 4; + break; + case(NET_HEADER_FIELD_IPv6_NEXT_HDR): + case(NET_HEADER_FIELD_IPv6_HOP_LIMIT): + *parseCodeRealSize = 1; + break; + case(NET_HEADER_FIELD_IPv6_DST_IP): + case(NET_HEADER_FIELD_IPv6_SRC_IP): + *parseCodeRealSize = 16; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case (HEADER_TYPE_GRE): + switch(field.gre) + { + case(NET_HEADER_FIELD_GRE_TYPE): + *parseCodeRealSize = 2; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported6")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case (HEADER_TYPE_MINENCAP): + switch(field.minencap) + { + case(NET_HEADER_FIELD_MINENCAP_TYPE): + *parseCodeRealSize = 1; + break; + case(NET_HEADER_FIELD_MINENCAP_DST_IP): + case(NET_HEADER_FIELD_MINENCAP_SRC_IP): + *parseCodeRealSize = 4; + break; + case(NET_HEADER_FIELD_MINENCAP_SRC_IP | NET_HEADER_FIELD_MINENCAP_DST_IP): + *parseCodeRealSize = 8; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported7")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case (HEADER_TYPE_TCP): + switch(field.tcp) + { + case(NET_HEADER_FIELD_TCP_PORT_SRC): + case(NET_HEADER_FIELD_TCP_PORT_DST): + *parseCodeRealSize = 2; + break; + case(NET_HEADER_FIELD_TCP_PORT_SRC | NET_HEADER_FIELD_TCP_PORT_DST): + *parseCodeRealSize = 4; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported8")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + case (HEADER_TYPE_UDP): + switch(field.udp) + { + case(NET_HEADER_FIELD_UDP_PORT_SRC): + case(NET_HEADER_FIELD_UDP_PORT_DST): + *parseCodeRealSize = 2; + break; + case(NET_HEADER_FIELD_UDP_PORT_SRC | NET_HEADER_FIELD_UDP_PORT_DST): + *parseCodeRealSize = 4; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported9")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported10")); + *parseCodeRealSize = CC_SIZE_ILLEGAL; + break; + } +} + +static t_Error ValidateNextEngineParams(t_Handle h_FmPcd, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + uint16_t absoluteProfileId; + t_Error err = E_OK; + uint8_t relativeSchemeId; + + switch(p_FmPcdCcNextEngineParams->nextEngine) + { + case(e_FM_PCD_INVALID): + err = E_NOT_SUPPORTED; + break; + case(e_FM_PCD_DONE): + if(p_FmPcdCcNextEngineParams->params.enqueueParams.action == e_FM_PCD_ENQ_FRAME) + { + if(p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid && + !p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("not defined fqid for control flow for BMI next engine ")); + if(p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid & ~0x00FFFFFF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidForCtrlFlow must be between 1 and 2^24-1")); + } + break; + case(e_FM_PCD_KG): + relativeSchemeId = FmPcdKgGetRelativeSchemeId(h_FmPcd, (uint8_t)(PTR_TO_UINT(p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme)-1)); + if(relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + if(!FmPcdKgIsSchemeValidSw(h_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("not valid schemeIndex in KG next engine param")); + if(!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("CC Node may point only to a scheme that is always direct.")); + break; + case(e_FM_PCD_PLCR): + if(p_FmPcdCcNextEngineParams->params.plcrParams.overrideParams) + { + /* if private policer profile, it may be uninitialized yet, therefor no checks are done at this stage */ + if(p_FmPcdCcNextEngineParams->params.plcrParams.sharedProfile) + { + err = FmPcdPlcrGetAbsoluteProfileId(h_FmPcd,e_FM_PCD_PLCR_SHARED,NULL,p_FmPcdCcNextEngineParams->params.plcrParams.newRelativeProfileId, &absoluteProfileId); + if(err) + RETURN_ERROR(MAJOR, err, ("Shared profile offset is out of range")); + if(!FmPcdPlcrIsProfileValid(h_FmPcd, absoluteProfileId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid profile")); + } + else + { + } + /* TODO - add check according to the revision of the chip. + if(!p_FmPcdCcNextEngineParams->params.plcrParams.newFqid || + (p_FmPcdCcNextEngineParams->params.plcrParams.newFqid & ~0x00FFFFFF)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("newFqid must be between 1 and 2^24-1")); + */ + } + break; + case(e_FM_PCD_CC): + if(!p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode) + RETURN_ERROR(MAJOR, E_NULL_POINTER, ("handler to next Node is NULL")); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine is not correct")); + } + return err; +} + +static uint8_t GetGenParseCode(e_FmPcdExtractFrom src, uint32_t offset, bool glblMask, uint8_t *parseArrayOffset, bool fromIc, ccPrivateInfo_t icCode) +{ + if(!fromIc) + { + switch(src) + { + case(e_FM_PCD_EXTRACT_FROM_FRAME_START): + if(glblMask) + return CC_PC_GENERIC_WITH_MASK ; + else + return CC_PC_GENERIC_WITHOUT_MASK; + case(e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE): + *parseArrayOffset = CC_PC_PR_NEXT_HEADER_OFFSET; + if(offset) + return CC_PR_OFFSET; + else + return CC_PR_WITHOUT_OFFSET; + default: + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src")); + return CC_PC_ILLEGAL; + } + } + else + { + switch (icCode) + { + case(CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH): + *parseArrayOffset = 0x50; + return CC_PC_GENERIC_IC_GMASK; + case(CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH): + *parseArrayOffset = 0x48; + return CC_PC_GENERIC_IC_GMASK; + case(CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP): + *parseArrayOffset = 0x48; + return CC_PC_GENERIC_IC_HASH_INDEXED; + case(CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP): + *parseArrayOffset = 0x16; + return CC_PC_GENERIC_IC_HASH_INDEXED; + default: + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src")); + break; + } + } + return CC_PC_ILLEGAL; +} + +static uint8_t GetFullFieldParseCode(e_NetHeaderType hdr, e_FmPcdHdrIndex index, t_FmPcdFields field) +{ + + switch(hdr) + { + case(HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + return CC_PC_ILLEGAL; + + case(HEADER_TYPE_ETH): + switch(field.eth) + { + case(NET_HEADER_FIELD_ETH_DA): + return CC_PC_FF_MACDST; + case(NET_HEADER_FIELD_ETH_SA): + return CC_PC_FF_MACSRC; + case(NET_HEADER_FIELD_ETH_TYPE): + return CC_PC_FF_ETYPE; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case(HEADER_TYPE_VLAN): + switch(field.vlan) + { + case(NET_HEADER_FIELD_VLAN_TCI): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_TCI1; + if(index == e_FM_PCD_HDR_INDEX_LAST) + return CC_PC_FF_TCI2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case(HEADER_TYPE_MPLS): + switch(field.mpls) + { + case(NET_HEADER_FIELD_MPLS_LABEL_STACK): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_MPLS1; + if(index == e_FM_PCD_HDR_INDEX_LAST) + return CC_PC_FF_MPLS_LAST; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS index")); + return CC_PC_ILLEGAL; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case(HEADER_TYPE_IPv4): + switch(field.ipv4) + { + case(NET_HEADER_FIELD_IPv4_DST_IP): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4DST1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4DST2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv4_TOS): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4IPTOS_TC1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4IPTOS_TC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv4_PROTO): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4PTYPE1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4PTYPE2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv4_SRC_IP): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4SRC1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4SRC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv4_SRC_IP | NET_HEADER_FIELD_IPv4_DST_IP): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV4SRC1_IPV4DST1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV4SRC2_IPV4DST2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv4_TTL): + return CC_PC_FF_IPV4TTL; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case(HEADER_TYPE_IPv6): + switch(field.ipv6) + { + case(NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv6_NEXT_HDR): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV6PTYPE1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV6PTYPE2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv6_DST_IP): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV6DST1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV6DST2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv6_SRC_IP): + if((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) + return CC_PC_FF_IPV6SRC1; + if(index == e_FM_PCD_HDR_INDEX_2) + return CC_PC_FF_IPV6SRC2; + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); + return CC_PC_ILLEGAL; + case(NET_HEADER_FIELD_IPv6_HOP_LIMIT): + return CC_PC_FF_IPV6HOP_LIMIT; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case(HEADER_TYPE_GRE): + switch(field.gre) + { + case(NET_HEADER_FIELD_GRE_TYPE): + return CC_PC_FF_GREPTYPE; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + case(HEADER_TYPE_MINENCAP): + switch(field.minencap) + { + case(NET_HEADER_FIELD_MINENCAP_TYPE): + return CC_PC_FF_MINENCAP_PTYPE; + case(NET_HEADER_FIELD_MINENCAP_DST_IP): + return CC_PC_FF_MINENCAP_IPDST; + case(NET_HEADER_FIELD_MINENCAP_SRC_IP): + return CC_PC_FF_MINENCAP_IPSRC; + case(NET_HEADER_FIELD_MINENCAP_SRC_IP | NET_HEADER_FIELD_MINENCAP_DST_IP): + return CC_PC_FF_MINENCAP_IPSRC_IPDST; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case(HEADER_TYPE_TCP): + switch(field.tcp) + { + case(NET_HEADER_FIELD_TCP_PORT_SRC): + return CC_PC_FF_L4PSRC; + case(NET_HEADER_FIELD_TCP_PORT_DST): + return CC_PC_FF_L4PDST; + case(NET_HEADER_FIELD_TCP_PORT_DST | NET_HEADER_FIELD_TCP_PORT_SRC): + return CC_PC_FF_L4PSRC_L4PDST; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case(HEADER_TYPE_PPPoE): + switch(field.pppoe) + { + case(NET_HEADER_FIELD_PPPoE_PID): + return CC_PC_FF_PPPPID; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + case(HEADER_TYPE_UDP): + switch(field.udp) + { + case(NET_HEADER_FIELD_UDP_PORT_SRC): + return CC_PC_FF_L4PSRC; + case(NET_HEADER_FIELD_UDP_PORT_DST): + return CC_PC_FF_L4PDST; + case(NET_HEADER_FIELD_UDP_PORT_DST | NET_HEADER_FIELD_UDP_PORT_SRC): + return CC_PC_FF_L4PSRC_L4PDST; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } +} + +static uint8_t GetPrParseCode(e_NetHeaderType hdr, e_FmPcdHdrIndex hdrIndex, uint32_t offset, bool glblMask, uint8_t *parseArrayOffset) +{ + bool offsetRelevant = FALSE; + + if(offset) + offsetRelevant = TRUE; + + switch(hdr){ + case(HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + return CC_PC_ILLEGAL; + case(HEADER_TYPE_ETH): + *parseArrayOffset = (uint8_t)CC_PC_PR_ETH_OFFSET; + break; + case(HEADER_TYPE_USER_DEFINED_SHIM1): + if(offset || glblMask) + *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM1_OFFSET; + else + return CC_PC_PR_SHIM1; + break; + case(HEADER_TYPE_USER_DEFINED_SHIM2): + if(offset || glblMask) + *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM2_OFFSET; + else + return CC_PC_PR_SHIM2; + break; + case(HEADER_TYPE_LLC_SNAP): + *parseArrayOffset = CC_PC_PR_USER_LLC_SNAP_OFFSET; + break; + case(HEADER_TYPE_PPPoE): + *parseArrayOffset = CC_PC_PR_PPPOE_OFFSET; + break; + case(HEADER_TYPE_MPLS): + if((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_MPLS1_OFFSET; + else if(hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + *parseArrayOffset = CC_PC_PR_MPLS_LAST_OFFSET; + else + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index")); + return CC_PC_ILLEGAL; + } + break; + case(HEADER_TYPE_IPv4): + case(HEADER_TYPE_IPv6): + if((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_IP1_OFFSET; + else if(hdrIndex == e_FM_PCD_HDR_INDEX_2) + *parseArrayOffset = CC_PC_PR_IP_LAST_OFFSET; + else + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header index")); + return CC_PC_ILLEGAL; + + } + break; + case(HEADER_TYPE_MINENCAP): + *parseArrayOffset = CC_PC_PR_MINENC_OFFSET; + break; + case(HEADER_TYPE_GRE): + *parseArrayOffset = CC_PC_PR_GRE_OFFSET; + break; + case(HEADER_TYPE_TCP): + case(HEADER_TYPE_UDP): + case(HEADER_TYPE_IPSEC_AH): + case(HEADER_TYPE_IPSEC_ESP): + case(HEADER_TYPE_DCCP): + case(HEADER_TYPE_SCTP): + *parseArrayOffset = CC_PC_PR_L4_OFFSET; + break; + + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header for this type of operation")); + return CC_PC_ILLEGAL; + } + + if(offsetRelevant) + return CC_PR_OFFSET; + else + return CC_PR_WITHOUT_OFFSET; +} + +static uint8_t GetFieldParseCode(e_NetHeaderType hdr, t_FmPcdFields field, uint32_t offset, uint8_t *parseArrayOffset, e_FmPcdHdrIndex hdrIndex) +{ + bool offsetRelevant = FALSE; + + if(offset) + offsetRelevant = TRUE; + + switch(hdr) + { + case(HEADER_TYPE_NONE): + ASSERT_COND(FALSE); + case(HEADER_TYPE_ETH): + switch(field.eth) + { + case(NET_HEADER_FIELD_ETH_TYPE): + *parseArrayOffset = CC_PC_PR_ETYPE_LAST_OFFSET; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + break; + case(HEADER_TYPE_VLAN): + switch(field.vlan) + { + case(NET_HEADER_FIELD_VLAN_TCI): + if((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) + *parseArrayOffset = CC_PC_PR_VLAN1_OFFSET; + else if(hdrIndex == e_FM_PCD_HDR_INDEX_LAST) + *parseArrayOffset = CC_PC_PR_VLAN2_OFFSET; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); + return CC_PC_ILLEGAL; + } + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal header ")); + return CC_PC_ILLEGAL; + } + if(offsetRelevant) + return CC_PR_OFFSET; + else + return CC_PR_WITHOUT_OFFSET; +} + +static void FillAdOfTypeResult(t_Handle p_Ad, t_FmPcd *p_FmPcd, t_FmPcdCcNextEngineParams *p_CcNextEngineParams) +{ + t_AdOfTypeResult *p_AdResult = (t_AdOfTypeResult*)p_Ad; + uint32_t tmp = 0, tmpNia = 0; + uint16_t profileId; + t_Handle p_AdNewPtr = NULL; + + p_AdNewPtr = p_AdResult; + +#ifdef FM_PCD_CC_MANIP + if (p_CcNextEngineParams->h_Manip) + FmPcdManipUpdateAdResultForCc(p_CcNextEngineParams->h_Manip, p_Ad, &p_AdNewPtr); +#endif /* FM_PCD_CC_MANIP */ + + if(p_AdNewPtr) + { + switch(p_CcNextEngineParams->nextEngine) + { + case(e_FM_PCD_DONE): + if(p_CcNextEngineParams->params.enqueueParams.action == e_FM_PCD_ENQ_FRAME) + { + if(p_CcNextEngineParams->params.enqueueParams.overrideFqid) + { + tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; + tmp |= p_CcNextEngineParams->params.enqueueParams.newFqid; + } + else + { + tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; + tmp |= FM_PCD_AD_RESULT_PLCR_DIS; + } + } + if(p_CcNextEngineParams->params.enqueueParams.action == e_FM_PCD_DROP_FRAME) + tmpNia |= (NIA_ENG_BMI |NIA_BMI_AC_DISCARD); + else + tmpNia |= (NIA_ENG_BMI |NIA_BMI_AC_ENQ_FRAME); + if(p_CcNextEngineParams->params.enqueueParams.statisticsEn) + tmpNia |= FM_PCD_AD_RESULT_EXTENDED_MODE | FM_PCD_AD_RESULT_STATISTICS_EN; + break; + case(e_FM_PCD_KG): + if(p_CcNextEngineParams->params.kgParams.overrideFqid) + { + tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; + tmp |= p_CcNextEngineParams->params.kgParams.newFqid; + } + else + { + tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; + tmp |= FM_PCD_AD_RESULT_PLCR_DIS; + } + tmpNia = NIA_KG_DIRECT; + tmpNia |= NIA_ENG_KG; + tmpNia |= (uint8_t)(PTR_TO_UINT(p_CcNextEngineParams->params.kgParams.h_DirectScheme)-1); + if(p_CcNextEngineParams->params.kgParams.statisticsEn) + tmpNia |= FM_PCD_AD_RESULT_EXTENDED_MODE | FM_PCD_AD_RESULT_STATISTICS_EN; + break; + case(e_FM_PCD_PLCR): + tmp = 0; + if(p_CcNextEngineParams->params.plcrParams.overrideParams) + { + tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; + + /* if private policer profile, it may be uninitialized yet, therefor no checks are done at this stage */ + if(p_CcNextEngineParams->params.plcrParams.sharedProfile) + { + tmpNia |= NIA_PLCR_ABSOLUTE; + FmPcdPlcrGetAbsoluteProfileId((t_Handle)p_FmPcd,e_FM_PCD_PLCR_SHARED,NULL,p_CcNextEngineParams->params.plcrParams.newRelativeProfileId, &profileId); + } + else + profileId = p_CcNextEngineParams->params.plcrParams.newRelativeProfileId; + + tmp |= p_CcNextEngineParams->params.plcrParams.newFqid; + WRITE_UINT32(p_AdResult->plcrProfile,(uint32_t)((uint32_t)profileId << FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT)); + } + else + tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; + tmpNia |= NIA_ENG_PLCR | p_CcNextEngineParams->params.plcrParams.newRelativeProfileId; + if(p_CcNextEngineParams->params.kgParams.statisticsEn) + tmpNia |= FM_PCD_AD_RESULT_EXTENDED_MODE | FM_PCD_AD_RESULT_STATISTICS_EN; + break; + default: + return; + } + WRITE_UINT32(p_AdResult->fqid, tmp); + +#ifdef FM_PCD_CC_MANIP + if(p_CcNextEngineParams->h_Manip) + { + tmp = GET_UINT32(p_AdResult->plcrProfile); + tmp |= (uint32_t)(XX_VirtToPhys(p_AdNewPtr) - (p_FmPcd->physicalMuramBase)) >> 4; + WRITE_UINT32(p_AdResult->plcrProfile, tmp); + + tmpNia |= FM_PCD_AD_RESULT_EXTENDED_MODE; + tmpNia |= FM_PCD_AD_RESULT_NADEN; + } +#endif /* FM_PCD_CC_MANIP */ + + WRITE_UINT32(p_AdResult->nia, tmpNia); + } +} + +static void FillAdOfTypeContLookup(t_Handle p_Ad, t_Handle h_FmPcd, t_Handle p_FmPcdCcNode, t_Handle h_Manip) +{ + t_FmPcdCcNode *p_Node = (t_FmPcdCcNode *)p_FmPcdCcNode; + t_AdOfTypeContLookup *p_AdContLookup = (t_AdOfTypeContLookup *)p_Ad; + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t tmpReg32; + t_Handle p_AdNewPtr = NULL; + + p_AdNewPtr = p_AdContLookup; + +#ifdef FM_PCD_CC_MANIP + if (h_Manip) + FmPcdManipUpdateAdContLookupForCc(h_Manip, p_Ad, &p_AdNewPtr, (uint32_t)((XX_VirtToPhys(p_Node->h_AdTable) - p_FmPcd->physicalMuramBase))); +#else + UNUSED(h_Manip); +#endif /* FM_PCD_CC_MANIP */ + + if(p_AdNewPtr) + { + tmpReg32 = 0; + tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; + tmpReg32 |= p_Node->sizeOfExtraction ? ((p_Node->sizeOfExtraction - 1) << 24) : 0; + tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Node->h_AdTable) - p_FmPcd->physicalMuramBase); + WRITE_UINT32(p_AdContLookup->ccAdBase, tmpReg32); + + tmpReg32 = 0; + tmpReg32 |= p_Node->numOfKeys << 24; + tmpReg32 |= (p_Node->lclMask ? FM_PCD_AD_CONT_LOOKUP_LCL_MASK : 0); + tmpReg32 |= p_Node->h_KeysMatchTable ? + (uint32_t)(XX_VirtToPhys(p_Node->h_KeysMatchTable) - p_FmPcd->physicalMuramBase) : 0; + WRITE_UINT32(p_AdContLookup->matchTblPtr, tmpReg32); + + tmpReg32 = 0; + tmpReg32 |= p_Node->prsArrayOffset << 24; + tmpReg32 |= p_Node->offset << 16; + tmpReg32 |= p_Node->parseCode; + WRITE_UINT32(p_AdContLookup->pcAndOffsets, tmpReg32); + + Mem2IOCpy32((void*)&p_AdContLookup->gmask, p_Node->p_GlblMask, CC_GLBL_MASK_SIZE); + } +} + +static void NextStepAd(t_Handle p_Ad, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, t_FmPcd *p_FmPcd) +{ + switch(p_FmPcdCcNextEngineParams->nextEngine) + { + case(e_FM_PCD_KG): + case(e_FM_PCD_PLCR): + case(e_FM_PCD_DONE): + FillAdOfTypeResult(p_Ad, p_FmPcd, p_FmPcdCcNextEngineParams); + break; + case(e_FM_PCD_CC): + FillAdOfTypeContLookup(p_Ad, + p_FmPcd, + p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode, +#ifdef FM_PCD_CC_MANIP + p_FmPcdCcNextEngineParams->h_Manip +#else + NULL +#endif /* FM_PCD_CC_MANIP */ + ); + UpdateNodeOwner (p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode, + TRUE); + break; + default: + return; + } +} + + +static void ReleaseNewNodeCommonPart(t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + if(p_AdditionalInfo->p_AdTableNew) + FM_MURAM_FreeMem(FmPcdGetMuramHandle(((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd), p_AdditionalInfo->p_AdTableNew); + if(p_AdditionalInfo->p_KeysMatchTableNew) + FM_MURAM_FreeMem(FmPcdGetMuramHandle(((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd), p_AdditionalInfo->p_KeysMatchTableNew); +} + +static t_Error UpdateGblMask(t_FmPcdCcNode *p_FmPcdCcNode, uint8_t keySize, uint8_t *p_Mask) +{ + if (p_Mask && + !p_FmPcdCcNode->glblMaskUpdated && + (keySize <= 4) && + !p_FmPcdCcNode->lclMask ) + { + memcpy(p_FmPcdCcNode->p_GlblMask, p_Mask, (sizeof(uint8_t))*keySize); + p_FmPcdCcNode->glblMaskUpdated = TRUE; + p_FmPcdCcNode->glblMaskSize = 4; + } + else if (p_Mask && + (keySize <= 4) && + !p_FmPcdCcNode->lclMask) + { + if (memcmp(p_FmPcdCcNode->p_GlblMask, p_Mask, keySize) != 0) + { + p_FmPcdCcNode->lclMask = TRUE; + p_FmPcdCcNode->glblMaskSize = 0; + } + } + else if (!p_Mask && (p_FmPcdCcNode->glblMaskUpdated) && (keySize <= 4)) + { + uint32_t tmpMask = 0xffffffff; + if (memcmp(p_FmPcdCcNode->p_GlblMask, &tmpMask, 4) != 0) + { + p_FmPcdCcNode->lclMask = TRUE; + p_FmPcdCcNode->glblMaskSize = 0; + } + } + else if (p_Mask) + { + p_FmPcdCcNode->lclMask = TRUE; + p_FmPcdCcNode->glblMaskSize = 0; + } + + return E_OK; +} + +static t_Error BuildNewNodeCommonPart(t_FmPcdCcNode *p_FmPcdCcNode, + int *size, + t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + + p_AdditionalInfo->p_AdTableNew = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcdCcNode->h_FmPcd), + (uint32_t)( (p_AdditionalInfo->numOfKeys+1) * FM_PCD_CC_AD_ENTRY_SIZE), + FM_PCD_CC_AD_TABLE_ALIGN); + if(!p_AdditionalInfo->p_AdTableNew) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("No memory in MURAM for AD table ")); + + IOMemSet32((uint8_t*)p_AdditionalInfo->p_AdTableNew, 0, (uint32_t)((p_AdditionalInfo->numOfKeys+1) * FM_PCD_CC_AD_ENTRY_SIZE)); + + if(p_FmPcdCcNode->lclMask) + *size = 2 * p_FmPcdCcNode->ccKeySizeAccExtraction; + else + *size = p_FmPcdCcNode->ccKeySizeAccExtraction; + + p_AdditionalInfo->p_KeysMatchTableNew = + (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcdCcNode->h_FmPcd), + (uint32_t)(*size * sizeof(uint8_t) * (p_AdditionalInfo->numOfKeys + 1)), + FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN); + if(!p_AdditionalInfo->p_KeysMatchTableNew) + { + FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcdCcNode->h_FmPcd), p_AdditionalInfo->p_AdTableNew); + p_AdditionalInfo->p_AdTableNew = NULL; + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("No memory in MURAM for KEY MATCH table")); + } + IOMemSet32((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0, *size * sizeof(uint8_t) * (p_AdditionalInfo->numOfKeys + 1)); + + p_AdditionalInfo->p_AdTableOld = p_FmPcdCcNode->h_AdTable; + p_AdditionalInfo->p_KeysMatchTableOld = p_FmPcdCcNode->h_KeysMatchTable; + + return E_OK; +} + +static t_Error BuildNewNodeAddOrMdfyKeyAndNextEngine(t_Handle h_FmPcd ,t_FmPcdCcNode *p_FmPcdCcNode, uint8_t keyIndex, t_FmPcdCcKeyParams *p_KeyParams,t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo, bool add) +{ + t_Error err = E_OK; + t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp; + t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; + int size; + int i = 0, j = 0; + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t requiredAction = 0; + bool prvLclMask; + t_CcNodeInformation *p_CcNodeInformation; + t_List *p_Pos; + + /*check that new NIA is legal*/ + err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + prvLclMask = p_FmPcdCcNode->lclMask; + + /*check that new key is not require update of localMask*/ + err = UpdateGblMask(p_FmPcdCcNode, + p_FmPcdCcNode->ccKeySizeAccExtraction, + p_KeyParams->p_Mask); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /*update internal data structure for next engine per index (index - key)*/ + memcpy(&p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].nextEngineParams,&p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); + + /*update numOfKeys*/ + if(add) + p_AdditionalInfo->numOfKeys = (uint8_t)(p_FmPcdCcNode->numOfKeys + 1); + else + p_AdditionalInfo->numOfKeys = (uint8_t)p_FmPcdCcNode->numOfKeys; + /*function which build in the memory new KeyTbl, AdTbl*/ + err = BuildNewNodeCommonPart(p_FmPcdCcNode, &size, p_AdditionalInfo); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + +#ifdef FM_PCD_CC_MANIP + /*check that manip is legal and what requiredAction is necessary for this manip*/ + if(p_KeyParams->ccNextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEgine(&p_KeyParams->ccNextEngineParams,&requiredAction); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + } +#endif /* FM_PCD_CC_MANIP */ + + p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].requiredAction = requiredAction; + + p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].requiredAction |= UPDATE_CC_WITH_TREE; + + + /*update new Ad and new Key Table according to new requirement*/ + i = 0; + for(j = 0; j < p_AdditionalInfo->numOfKeys; j++) + { + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE); + if(j == keyIndex) + { + NextStepAd(p_AdTableNewTmp,&p_KeyParams->ccNextEngineParams, p_FmPcd); + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j*size * sizeof(uint8_t)); + Mem2IOCpy32((void*)p_KeysMatchTableNewTmp, p_KeyParams->p_Key, p_FmPcdCcNode->userSizeOfExtraction); + if(p_FmPcdCcNode->lclMask) + { + if(p_KeyParams->p_Mask) + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), p_KeyParams->p_Mask, p_FmPcdCcNode->userSizeOfExtraction); + else if (p_FmPcdCcNode->ccKeySizeAccExtraction > 4) + IOMemSet32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), 0xff, p_FmPcdCcNode->userSizeOfExtraction); + else + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction),p_FmPcdCcNode->p_GlblMask, p_FmPcdCcNode->userSizeOfExtraction); + } + if(!add) + i++; + } + else + { + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j*size * sizeof(uint8_t)); + p_KeysMatchTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, i*size * sizeof(uint8_t)); + + if(p_FmPcdCcNode->lclMask) + { + if(prvLclMask) + IO2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), + PTR_MOVE(p_KeysMatchTableOldTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), + p_FmPcdCcNode->ccKeySizeAccExtraction); + else + { + p_KeysMatchTableOldTmp = PTR_MOVE(p_FmPcdCcNode->h_KeysMatchTable, i*p_FmPcdCcNode->ccKeySizeAccExtraction*sizeof(uint8_t)); + + if (p_FmPcdCcNode->ccKeySizeAccExtraction > 4) + IOMemSet32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), 0xff, p_FmPcdCcNode->userSizeOfExtraction); + else + IO2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), p_FmPcdCcNode->p_GlblMask, p_FmPcdCcNode->userSizeOfExtraction); + } + } + IO2IOCpy32(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp, p_FmPcdCcNode->ccKeySizeAccExtraction); + i++; + } + } + + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + + if(!LIST_IsEmpty(&p_FmPcdCcNode->ccTreesLst)) + { + LIST_FOR_EACH(p_Pos, &p_FmPcdCcNode->ccTreesLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcNodeInformation->h_CcNode); + /*update the manipulation which has to be updated from parameters of the port*/ + /*it's has to be updated with restrictions defined in the function*/ + err = FmPcdCcSetRequiredAction(p_FmPcdCcNode->h_FmPcd, + p_FmPcdCcNode->shadowAction | p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].requiredAction, + &p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex], + PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE), + 1, + p_CcNodeInformation->h_CcNode); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + err = CcUpdateParam(p_FmPcdCcNode->h_FmPcd, + NULL, + &p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex], + 1, + PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE), + TRUE, + p_CcNodeInformation->index, + p_CcNodeInformation->h_CcNode, + TRUE); + if (err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + } + + if(p_FmPcdCcNode->lclMask) + memset(p_FmPcdCcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + + + if(p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForAdd = p_KeyParams->ccNextEngineParams.params.ccParams.h_CcNode; + + if(!add) + { + if(p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForRmv = p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.params.ccParams.h_CcNode; +#ifdef FM_PCD_CC_MANIP + if(p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForRmv = p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.h_Manip; +#endif /* FM_PCD_CC_MANIP */ + } + + return E_OK; +} + +static t_Error BuildNewNodeRemoveKey(t_FmPcdCcNode *p_FmPcdCcNode, uint8_t keyIndex, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + int i = 0, j = 0; + t_Handle p_AdTableNewTmp,p_KeysMatchTableNewTmp; + t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; + int size; + t_Error err = E_OK; + + /*save new numOfKeys*/ + p_AdditionalInfo->numOfKeys = (uint16_t)(p_FmPcdCcNode->numOfKeys - 1); + + /*function which allocates in the memory new KeyTbl, AdTbl*/ + err = BuildNewNodeCommonPart(p_FmPcdCcNode, &size, p_AdditionalInfo); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /*update new Ad and new Key Table according to new requirement*/ + for(i = 0, j = 0; j < p_FmPcdCcNode->numOfKeys; i++, j++) + { + if(j == keyIndex) + { + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j*FM_PCD_CC_AD_ENTRY_SIZE); + j++; + } + if(j == p_FmPcdCcNode->numOfKeys) + break; + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i*FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j*FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp,p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + p_KeysMatchTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, j*size * sizeof(uint8_t)); + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, i*size * sizeof(uint8_t)); + IO2IOCpy32(p_KeysMatchTableNewTmp,p_KeysMatchTableOldTmp, size * sizeof(uint8_t)); + } + + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i*FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j*FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + if(p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForRmv = p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.params.ccParams.h_CcNode; +#ifdef FM_PCD_CC_MANIP + if(p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForRmv = p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.h_Manip; +#endif /* FM_PCD_CC_MANIP */ + + return E_OK; +} + +static t_Error BuildNewNodeModifyKey(t_FmPcdCcNode *p_FmPcdCcNode, uint8_t keyIndex, uint8_t *p_Key, uint8_t *p_Mask,t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + t_Error err = E_OK; + t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp; + t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; + int size; + int i = 0, j = 0; + bool prvLclMask; + + p_AdditionalInfo->numOfKeys = p_FmPcdCcNode->numOfKeys; + + prvLclMask = p_FmPcdCcNode->lclMask; + + /*check that new key is not require update of localMask*/ + err = UpdateGblMask(p_FmPcdCcNode, + p_FmPcdCcNode->sizeOfExtraction, + p_Mask); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /*function which build in the memory new KeyTbl, AdTbl*/ + err = BuildNewNodeCommonPart(p_FmPcdCcNode, &size, p_AdditionalInfo); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /*fill the New AdTable and New KeyTable*/ + for(j = 0, i = 0; j < p_AdditionalInfo->numOfKeys; j++, i++) + { + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + if(j == keyIndex) + { + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j*size * sizeof(uint8_t)); + Mem2IOCpy32(p_KeysMatchTableNewTmp, p_Key, p_FmPcdCcNode->userSizeOfExtraction); + if(p_FmPcdCcNode->lclMask) + { + if(p_Mask) + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), p_Mask, p_FmPcdCcNode->userSizeOfExtraction); + else if (p_FmPcdCcNode->ccKeySizeAccExtraction > 4) + IOMemSet32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), 0xff, p_FmPcdCcNode->userSizeOfExtraction); + else + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction),p_FmPcdCcNode->p_GlblMask, p_FmPcdCcNode->userSizeOfExtraction); + } + } + else + { + p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j*size * sizeof(uint8_t)); + p_KeysMatchTableOldTmp = PTR_MOVE(p_FmPcdCcNode->h_KeysMatchTable, i*size * sizeof(uint8_t)); + if (p_FmPcdCcNode->lclMask) + { + if(prvLclMask) + IO2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), + PTR_MOVE(p_KeysMatchTableOldTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), + p_FmPcdCcNode->userSizeOfExtraction); + else + { + p_KeysMatchTableOldTmp = PTR_MOVE(p_FmPcdCcNode->h_KeysMatchTable, i*p_FmPcdCcNode->ccKeySizeAccExtraction * sizeof(uint8_t)); + + if (p_FmPcdCcNode->ccKeySizeAccExtraction > 4) + IOMemSet32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), 0xff, p_FmPcdCcNode->userSizeOfExtraction); + else + IO2IOCpy32(PTR_MOVE(p_KeysMatchTableNewTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), p_FmPcdCcNode->p_GlblMask, p_FmPcdCcNode->userSizeOfExtraction); + } + } + IO2IOCpy32((void*)p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp, p_FmPcdCcNode->ccKeySizeAccExtraction); + } + } + + p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE); + p_AdTableOldTmp = PTR_MOVE(p_FmPcdCcNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE); + IO2IOCpy32(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + return E_OK; +} + +static t_Error BuildNewNodeModifyNextEngine(t_Handle h_FmPcd ,t_Handle h_FmPcdCcNodeOrTree, uint16_t keyIndex,t_FmPcdCcNextEngineParams *p_CcNextEngineParams, t_List *h_OldLst, t_List *h_NewLst,t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) +{ + + t_Error err = E_OK; + uint32_t requiredAction = 0; + t_List *p_Pos; + t_CcNodeInformation *p_CcNodeInformation, ccNodeInfo; + t_Handle p_Ad; + t_FmPcdCcNode *p_FmPcdCcNode1 = NULL; + t_FmPcdCcTree *p_FmPcdCcTree = NULL; + + ASSERT_COND(p_CcNextEngineParams); + /*check that new NIA is legal*/ + err = ValidateNextEngineParams(h_FmPcd, p_CcNextEngineParams); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /*update internal data structure for next engine per index (index - key)*/ + memcpy(&p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].nextEngineParams,p_CcNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); + +#ifdef FM_PCD_CC_MANIP + /*check that manip is legal and what requiredAction is necessary for this manip*/ + if(p_CcNextEngineParams->h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEgine(p_CcNextEngineParams,&requiredAction); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + } +#endif /* FM_PCD_CC_MANIP */ + + if(!p_AdditionalInfo->tree) + { + p_FmPcdCcNode1 = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree; + p_Ad = p_FmPcdCcNode1->h_AdTable; + if(p_FmPcdCcNode1->nextEngineAndRequiredAction[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForRmv = p_FmPcdCcNode1->nextEngineAndRequiredAction[keyIndex].nextEngineParams.params.ccParams.h_CcNode; +#ifdef FM_PCD_CC_MANIP + if(p_FmPcdCcNode1->nextEngineAndRequiredAction[keyIndex].nextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForRmv = p_FmPcdCcNode1->nextEngineAndRequiredAction[keyIndex].nextEngineParams.h_Manip; +#endif /* FM_PCD_CC_MANIP */ + } + else + { + p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree; + p_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); + if(p_FmPcdCcTree->nextEngineAndRequiredAction[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForRmv = p_FmPcdCcTree->nextEngineAndRequiredAction[keyIndex].nextEngineParams.params.ccParams.h_CcNode; +#ifdef FM_PCD_CC_MANIP + if(p_FmPcdCcTree->nextEngineAndRequiredAction[keyIndex].nextEngineParams.h_Manip) + p_AdditionalInfo->h_ManipForRmv = p_FmPcdCcTree->nextEngineAndRequiredAction[keyIndex].nextEngineParams.h_Manip; +#endif /* FM_PCD_CC_MANIP */ + } + ASSERT_COND(p_Ad); + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = PTR_MOVE(p_Ad, keyIndex * FM_PCD_CC_AD_ENTRY_SIZE); + EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo); + + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + p_Ad = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd), + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + + if(!p_Ad) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Memory allocation in MURAM FAILED")); + + IOMemSet32((uint8_t *)p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); + if(p_CcNextEngineParams) + NextStepAd(p_Ad,p_CcNextEngineParams, h_FmPcd); + ccNodeInfo.h_CcNode = p_Ad; + EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo); + + p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].requiredAction = requiredAction; + + p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].requiredAction |= UPDATE_CC_WITH_TREE; + + if(!p_AdditionalInfo->tree) + { + ASSERT_COND(p_FmPcdCcNode1); + if(!LIST_IsEmpty(&p_FmPcdCcNode1->ccTreesLst)) + { + LIST_FOR_EACH(p_Pos, &p_FmPcdCcNode1->ccTreesLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcNodeInformation->h_CcNode); + /*update the manipulation which has to be updated from parameters of the port*/ + /*it's has to be updated with restrictions defined in the function*/ + err = FmPcdCcSetRequiredAction(p_FmPcdCcNode1->h_FmPcd, p_FmPcdCcNode1->shadowAction | p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].requiredAction, &p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex], + p_Ad, 1, p_CcNodeInformation->h_CcNode); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + err = CcUpdateParam(p_FmPcdCcNode1->h_FmPcd, NULL, &p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex],1, p_Ad, TRUE, p_CcNodeInformation->index, p_CcNodeInformation->h_CcNode, TRUE); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + } + } + else + { + ASSERT_COND(p_FmPcdCcTree); + err = FmPcdCcSetRequiredAction(h_FmPcd, p_FmPcdCcTree->requiredAction | p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex].requiredAction, &p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex], + p_Ad, 1, (t_Handle)p_FmPcdCcTree); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + err = CcUpdateParam(h_FmPcd, NULL, &p_AdditionalInfo->nextEngineAndRequiredAction[keyIndex],1, p_Ad, TRUE, 0, (t_Handle)p_FmPcdCcTree, TRUE); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + + if(p_CcNextEngineParams->nextEngine == e_FM_PCD_CC) + p_AdditionalInfo->h_NodeForAdd = p_CcNextEngineParams->params.ccParams.h_CcNode; + return E_OK; +} + +static t_Handle BuildNewAd(t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams, + t_FmPcdCcNode *p_FmPcdCcNode, + t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + + t_Handle p_Ad; + t_FmPcdCcNode *p_FmPcdCcNodeTmp; + + p_Ad = (t_Handle)FM_MURAM_AllocMem(((t_FmPcd *)(p_FmPcdCcNode->h_FmPcd))->h_FmMuram, + FM_PCD_CC_AD_ENTRY_SIZE, + FM_PCD_CC_AD_TABLE_ALIGN); + if(!p_Ad) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM for AD")); + return NULL; + } + IOMemSet32(p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); + + p_FmPcdCcNodeTmp = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode)); + if(!p_FmPcdCcNodeTmp) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_FmPcdCcNodeTmp")); + return NULL; + } + memset(p_FmPcdCcNodeTmp, 0, sizeof(t_FmPcdCcNode)); + + p_FmPcdCcNodeTmp->numOfKeys = p_FmPcdModifyCcKeyAdditionalParams->numOfKeys; + p_FmPcdCcNodeTmp->h_KeysMatchTable = p_FmPcdModifyCcKeyAdditionalParams->p_KeysMatchTableNew; + p_FmPcdCcNodeTmp->h_AdTable = p_FmPcdModifyCcKeyAdditionalParams->p_AdTableNew; + + p_FmPcdCcNodeTmp->lclMask = p_FmPcdCcNode->lclMask; + p_FmPcdCcNodeTmp->parseCode = p_FmPcdCcNode->parseCode; + p_FmPcdCcNodeTmp->offset = p_FmPcdCcNode->offset; + p_FmPcdCcNodeTmp->prsArrayOffset = p_FmPcdCcNode->prsArrayOffset; + p_FmPcdCcNodeTmp->ctrlFlow = p_FmPcdCcNode->ctrlFlow; + p_FmPcdCcNodeTmp->ccKeySizeAccExtraction = p_FmPcdCcNode->ccKeySizeAccExtraction; + p_FmPcdCcNodeTmp->sizeOfExtraction = p_FmPcdCcNode->sizeOfExtraction; + p_FmPcdCcNodeTmp->glblMaskSize = p_FmPcdCcNode->glblMaskSize; + p_FmPcdCcNodeTmp->p_GlblMask = p_FmPcdCcNode->p_GlblMask; + + if (p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_CC) + FillAdOfTypeContLookup(p_Ad, + p_FmPcdCcNode->h_FmPcd, + p_FmPcdCcNodeTmp, +#ifdef FM_PCD_CC_MANIP + p_FmPcdCcNextEngineParams->h_Manip +#else + NULL +#endif /* FM_PCD_CC_MANIP */ + ); + + XX_Free(p_FmPcdCcNodeTmp); + + return p_Ad; +} + +static void UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(t_FmPcdCcNode *p_CrntMdfNode ,t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams, t_List *h_OldLst, t_List *h_NewLst) +{ + t_CcNodeInformation *p_CcNodeInformation; + t_FmPcdCcNode *p_NodePtrOnCurrentMdfNode = NULL; + t_List *p_Pos; + int i = 0; + t_Handle p_AdTablePtOnCrntCurrentMdfNode, p_AdTableNewModified; + t_CcNodeInformation ccNodeInfo; + + LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccPrevNodesLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + p_NodePtrOnCurrentMdfNode = (t_FmPcdCcNode *)p_CcNodeInformation->h_CcNode; + ASSERT_COND(p_NodePtrOnCurrentMdfNode); + /*search in the prev node which exact index points on this current modified node for getting AD */ + for(i = 0; i < p_NodePtrOnCurrentMdfNode->numOfKeys + 1; i++) + { + if(p_NodePtrOnCurrentMdfNode->nextEngineAndRequiredAction[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + if(p_NodePtrOnCurrentMdfNode->nextEngineAndRequiredAction[i].nextEngineParams.params.ccParams.h_CcNode == (t_Handle)p_CrntMdfNode) + { + p_AdTablePtOnCrntCurrentMdfNode = PTR_MOVE(p_NodePtrOnCurrentMdfNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE); + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = p_AdTablePtOnCrntCurrentMdfNode; + EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo); + + p_AdTableNewModified = BuildNewAd(p_FmPcdModifyCcKeyAdditionalParams, p_CrntMdfNode, &p_NodePtrOnCurrentMdfNode->nextEngineAndRequiredAction[i].nextEngineParams); + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = p_AdTableNewModified; + EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo); + } + } + } + ASSERT_COND(i != p_NodePtrOnCurrentMdfNode->numOfKeys); + } +} + +static void UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(t_FmPcdCcNode *p_CrntMdfNode ,t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams, t_List *h_OldLst, t_List *h_NewLst) +{ + t_CcNodeInformation *p_CcNodeInformation; + t_FmPcdCcTree *p_TreePtrOnCurrentMdfNode = NULL; + t_List *p_Pos; + int i = 0; + t_Handle p_AdTableTmp, p_AdTableTmp1; + t_CcNodeInformation ccNodeInfo; + + LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccTreeIdLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + p_TreePtrOnCurrentMdfNode = (t_FmPcdCcTree *)p_CcNodeInformation->h_CcNode; + + ASSERT_COND(p_TreePtrOnCurrentMdfNode); + /*search in the trees which exact index points on this current modified node for getting AD + */ + for(i = 0; i < p_TreePtrOnCurrentMdfNode->numOfEntries; i++) + { + if(p_TreePtrOnCurrentMdfNode->nextEngineAndRequiredAction[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + if(p_TreePtrOnCurrentMdfNode->nextEngineAndRequiredAction[i].nextEngineParams.params.ccParams.h_CcNode == (t_Handle)p_CrntMdfNode) + { + p_AdTableTmp = UINT_TO_PTR(p_TreePtrOnCurrentMdfNode->ccTreeBaseAddr + i*FM_PCD_CC_AD_ENTRY_SIZE); + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = p_AdTableTmp; + EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo); + + p_AdTableTmp1 = BuildNewAd(p_FmPcdModifyCcKeyAdditionalParams, p_CrntMdfNode, &p_TreePtrOnCurrentMdfNode->nextEngineAndRequiredAction[i].nextEngineParams); + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = p_AdTableTmp1; + EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo); + } + } + } + ASSERT_COND(i == p_TreePtrOnCurrentMdfNode->numOfEntries); + } +} + +static t_Error ModifyKeyCommonPart1(t_Handle h_FmPcdCcNodeOrTree, uint16_t keyIndex, t_Handle *h_Params, e_ModifyState modifyState, bool check, bool tree) +{ + t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams; + int i = 0, j = 0; + bool wasUpdate = FALSE; + t_FmPcdCcNode *p_FmPcdCcNode = NULL; + t_FmPcdCcTree *p_FmPcdCcTree; + uint16_t numOfKeys; + t_FmPcdCcNextEngineAndRequiredActionParams *p_nextEngineAndRequiredAction = NULL; + + SANITY_CHECK_RETURN_ERROR(h_FmPcdCcNodeOrTree,E_INVALID_HANDLE); + + p_nextEngineAndRequiredAction = XX_Malloc(FM_PCD_MAX_NUM_OF_KEYS * sizeof(*p_nextEngineAndRequiredAction)); + if(!p_nextEngineAndRequiredAction) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("failed to allocate memory for p_nextEngineAndRequiredAction")); + + memset(p_nextEngineAndRequiredAction, 0, FM_PCD_MAX_NUM_OF_KEYS * sizeof(*p_nextEngineAndRequiredAction)); + + if(!tree) + { + p_FmPcdCcNode = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree; + numOfKeys = p_FmPcdCcNode->numOfKeys; + + /*node has to be pointed by another node or tree*/ + if (!LIST_NumOfObjs(&p_FmPcdCcNode->ccPrevNodesLst) && + !LIST_NumOfObjs(&p_FmPcdCcNode->ccTreeIdLst)) + { + XX_Free(p_nextEngineAndRequiredAction); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("node has to be pointed by node or tree")); + } + + if(!LIST_NumOfObjs(&p_FmPcdCcNode->ccTreesLst) || + (LIST_NumOfObjs(&p_FmPcdCcNode->ccTreesLst) != 1)) + { + XX_Free(p_nextEngineAndRequiredAction); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("node has to be belonging to some tree and only to one tree")); + } + + memcpy(p_nextEngineAndRequiredAction, + p_FmPcdCcNode->nextEngineAndRequiredAction, + FM_PCD_MAX_NUM_OF_KEYS * sizeof(t_FmPcdCcNextEngineAndRequiredActionParams)); + + if(check) + { + if((p_FmPcdCcNode->parseCode == CC_PC_FF_IPV4TTL) || + (p_FmPcdCcNode->parseCode == CC_PC_FF_IPV6HOP_LIMIT) || + (p_FmPcdCcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)) + { + XX_Free(p_nextEngineAndRequiredAction); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_FF_IPV4TTL or CC_PC_FF_IPV6HOP_LIMIT can not be used for addKey, removeKey, modifyKey")); + } + } + } + else + { + p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree; + numOfKeys = p_FmPcdCcTree->numOfEntries; + memcpy(p_nextEngineAndRequiredAction, + p_FmPcdCcTree->nextEngineAndRequiredAction, + FM_PCD_MAX_NUM_OF_KEYS * sizeof(t_FmPcdCcNextEngineAndRequiredActionParams)); + } + + p_FmPcdModifyCcKeyAdditionalParams = + (t_FmPcdModifyCcKeyAdditionalParams *)XX_Malloc(sizeof(t_FmPcdModifyCcKeyAdditionalParams)); + if(!p_FmPcdModifyCcKeyAdditionalParams) + { + XX_Free(p_nextEngineAndRequiredAction); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of internal data structure FAILED")); + } + memset(p_FmPcdModifyCcKeyAdditionalParams, 0, sizeof(t_FmPcdModifyCcKeyAdditionalParams)); + + p_FmPcdModifyCcKeyAdditionalParams->h_CurrentNode = h_FmPcdCcNodeOrTree; + p_FmPcdModifyCcKeyAdditionalParams->keyIndex = keyIndex; + + while(i < numOfKeys) + { + if((j == keyIndex) && !wasUpdate) + { + if(modifyState == e_MODIFY_STATE_ADD) + j++; + else if(modifyState == e_MODIFY_STATE_REMOVE) + i++; + wasUpdate = TRUE; + } + else + { + memcpy(&p_FmPcdModifyCcKeyAdditionalParams->nextEngineAndRequiredAction[j], &p_nextEngineAndRequiredAction[i], sizeof(t_FmPcdCcNextEngineAndRequiredActionParams)); + i++; + j++; + } + } + + if (keyIndex == numOfKeys) + { + if (modifyState == e_MODIFY_STATE_ADD) + j++; + else if(modifyState == e_MODIFY_STATE_REMOVE) + i++; + } + + memcpy(&p_FmPcdModifyCcKeyAdditionalParams->nextEngineAndRequiredAction[j], &p_nextEngineAndRequiredAction[numOfKeys], sizeof(t_FmPcdCcNextEngineAndRequiredActionParams)); + + XX_Free(p_nextEngineAndRequiredAction); + *h_Params = p_FmPcdModifyCcKeyAdditionalParams; + + return E_OK; +} + +static t_Error UpdatePtrWhichPointOnCrntMdfNode(t_FmPcdCcNode *p_FmPcdCcNode, t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams ,t_List *h_OldLst, t_List *h_NewLst) +{ + if(!LIST_IsEmpty(&p_FmPcdCcNode->ccPrevNodesLst)) + UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(p_FmPcdCcNode, p_FmPcdModifyCcKeyAdditionalParams, h_OldLst, h_NewLst); + + if(!LIST_IsEmpty(&p_FmPcdCcNode->ccTreeIdLst)) + UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(p_FmPcdCcNode, p_FmPcdModifyCcKeyAdditionalParams, h_OldLst, h_NewLst); + + return E_OK; +} + +static void FmPcdCcUpdateTreeOwner(t_FmPcdCcTree *p_FmPcdCcTree, bool add) +{ + ASSERT_COND(p_FmPcdCcTree); + + if(add) + p_FmPcdCcTree->owners++; + else + { + ASSERT_COND(p_FmPcdCcTree->owners); + p_FmPcdCcTree->owners--; + } +} + +#ifdef FM_PCD_CC_MANIP +static t_Error CheckAndSetManipParamsWithCcNodeParams(t_FmPcdCcNode *p_FmPcdCcNode) +{ + t_Error err = E_OK; + int i = 0; + + for(i = 0; i < p_FmPcdCcNode->numOfKeys; i++) + { + if(p_FmPcdCcNode->nextEngineAndRequiredAction[i].nextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsWithCcNodeParams(p_FmPcdCcNode->nextEngineAndRequiredAction[i].nextEngineParams.h_Manip, (t_Handle)p_FmPcdCcNode); + if(err) + return err; + } + } + + return err; +} +#endif /* FM_PCD_CC_MANIP */ + +static t_Error CcUpdateParams(t_Handle h_FmPcd, + t_Handle h_FmPort, + t_Handle h_FmTree, + bool validate) +{ + t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *) h_FmTree; + + return CcUpdateParam(h_FmPcd, + h_FmPort, + p_CcTree->nextEngineAndRequiredAction, + p_CcTree->numOfEntries, + UINT_TO_PTR(p_CcTree->ccTreeBaseAddr), + validate, + 0, + h_FmTree, + FALSE); +} + +static t_Error CheckParams(t_Handle h_FmPcd, + t_FmPcdCcNodeParams *p_CcNodeParam, + t_FmPcdCcNode *p_FmPcdCcNode, + bool *isKeyTblAlloc) +{ + int tmp = 0; + t_FmPcdCcKeyParams *p_KeyParams; + t_Error err; + uint32_t requiredAction = 0; + + err = ValidateNextEngineParams(h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss); + if(err) + RETURN_ERROR(MAJOR, err, ("For this node MissNextEngineParams are not valid")); + +#ifdef FM_PCD_CC_MANIP + if(p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEgine(&p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, &requiredAction); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } +#endif /* FM_PCD_CC_MANIP */ + + memcpy(&p_FmPcdCcNode->nextEngineAndRequiredAction[p_FmPcdCcNode->numOfKeys].nextEngineParams,&p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, sizeof(t_FmPcdCcNextEngineParams)); + p_FmPcdCcNode->nextEngineAndRequiredAction[p_FmPcdCcNode->numOfKeys].requiredAction = requiredAction; + + for(tmp = 0 ; tmp < p_FmPcdCcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; + + if(!p_KeyParams->p_Key) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_Key is not initialized")); + + + err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + + err = UpdateGblMask(p_FmPcdCcNode, + p_CcNodeParam->keysParams.keySize, + p_KeyParams->p_Mask); + +#ifdef FM_PCD_CC_MANIP + if(p_KeyParams->ccNextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEgine(&p_KeyParams->ccNextEngineParams, &requiredAction); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } +#endif /* FM_PCD_CC_MANIP */ + + memcpy(&p_FmPcdCcNode->nextEngineAndRequiredAction[tmp],&p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); + p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].requiredAction = requiredAction; + } + + *isKeyTblAlloc = TRUE; + return E_OK; +} + +static t_Error Ipv4TtlOrIpv6HopLimiCheckParams( t_Handle h_FmPcd, + t_FmPcdCcNodeParams *p_CcNodeParam, t_FmPcdCcNode *p_FmPcdCcNode, + bool *isKeyTblAlloc) +{ + int tmp = 0; + t_FmPcdCcKeyParams *p_KeyParams; + t_Error err; + uint8_t key = 0x01; + uint32_t requiredAction = 0; + + if(p_FmPcdCcNode->numOfKeys != 1 ) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("for IPV4TTL and IPV6_HOP_LIMIT has to be only 1 key - TTL = 1, otherwise it's Miss")); + + err = ValidateNextEngineParams(h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss); + if(err) + RETURN_ERROR(MAJOR, err, ("For this node MissNextEngineParams are not valid")); + +#ifdef FM_PCD_CC_MANIP + if(p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEgine(&p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, &requiredAction); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } +#endif /* FM_PCD_CC_MANIP */ + + memcpy(&p_FmPcdCcNode->nextEngineAndRequiredAction[p_FmPcdCcNode->numOfKeys].nextEngineParams, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, sizeof(t_FmPcdCcNextEngineParams)); + p_FmPcdCcNode->nextEngineAndRequiredAction[p_FmPcdCcNode->numOfKeys].requiredAction = requiredAction; + + for(tmp = 0 ; tmp < p_FmPcdCcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; + if(p_KeyParams->p_Mask) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("If node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Mask can not be initialized")); + if(memcmp(p_KeyParams->p_Key, &key, 1) != 0) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("If node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Key has to be 1")); + err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + +#ifdef FM_PCD_CC_MANIP + if(p_KeyParams->ccNextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEgine(&p_KeyParams->ccNextEngineParams, &requiredAction); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } +#endif /* FM_PCD_CC_MANIP */ + + memcpy(&p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].nextEngineParams, &p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); + p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].requiredAction = requiredAction; + } + + *isKeyTblAlloc = FALSE; + return E_OK; +} + +static t_Error IcHashIndexedCheckParams(t_Handle h_FmPcd, + t_FmPcdCcNodeParams *p_CcNodeParam, + t_FmPcdCcNode *p_FmPcdCcNode, + /*uint16_t *ccInfo,*/ + /*t_List *ccNextDifferentNodesLst,*/ + bool *isKeyTblAlloc) +{ + int tmp = 0, countOnes = 0; + t_FmPcdCcKeyParams *p_KeyParams; + t_Error err; + uint16_t glblMask = p_CcNodeParam->extractCcParams.extractNonHdr.icIndxMask; + uint16_t countMask = (uint16_t)(glblMask >> 4); +#ifdef FM_PCD_CC_MANIP + uint32_t requiredAction; +#endif /* FM_PCD_CC_MANIP */ + + if (glblMask & 0x000f) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("icIndxMask has to be with last nibble 0")); + + while (countMask) + { + countOnes++; + countMask=(uint16_t)(countMask>>1); + } + + if (!POWER_OF_2(p_FmPcdCcNode->numOfKeys)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For Node of the type INDEXED numOfKeys has to be powerOfTwo")); + if (p_FmPcdCcNode->numOfKeys != ((uint32_t)1<keysParams.ccNextEngineParamsForMiss); + if(GET_ERROR_TYPE(err)!= E_NOT_SUPPORTED) + RETURN_ERROR(MAJOR, err, ("MissNextEngineParams for the node of the type IC_INDEX_HASH has to be UnInitialized")); + + for(tmp = 0 ; tmp < p_FmPcdCcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; + if(p_KeyParams->p_Mask || p_KeyParams->p_Key) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For Node of the type IC_HASH_INDEXED p_Key or p_Mask has to be NULL")); + + if((glblMask & (tmp * 16)) == (tmp * 16)) + { + err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams); + if(err) + RETURN_ERROR(MAJOR, err, ("This index has to be initialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask ")); + +#ifdef FM_PCD_CC_MANIP + if(p_KeyParams->ccNextEngineParams.h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEgine(&p_KeyParams->ccNextEngineParams, &requiredAction); + if(err) + RETURN_ERROR(MAJOR, err, (NO_MSG)); + } + p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].requiredAction = requiredAction; +#endif /* FM_PCD_CC_MANIP */ + + memcpy(&p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].nextEngineParams,&p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); + } + else + { + err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams); + if(GET_ERROR_TYPE(err)!= E_NOT_SUPPORTED) + RETURN_ERROR(MAJOR, err, ("This index has to be UnInitialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask")); + } + } + *isKeyTblAlloc = FALSE; + memcpy(PTR_MOVE(p_FmPcdCcNode->p_GlblMask, 2), &glblMask, 2); + + return E_OK; +} + +t_Error FmPcdCcModifyNextEngineParamTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_AdditionalParams) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; + t_Error err = E_OK; + uint16_t keyIndex; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + + SANITY_CHECK_RETURN_ERROR((grpId <= 7),E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree,E_INVALID_VALUE); + + if(grpId >= p_FmPcdCcTree->numOfGrps) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("grpId you asked > numOfGroup of relevant tree")); + + if(index >= p_FmPcdCcTree->fmPcdGroupParam[grpId].numOfEntriesInGroup) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("index > numOfEntriesInGroup")); + + keyIndex = (uint16_t)(p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry + index); + + err = ModifyKeyCommonPart1(h_FmPcdCcTree, keyIndex, h_AdditionalParams, e_MODIFY_STATE_CHANGE, TRUE, TRUE); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + p_ModifyKeyParams = (t_FmPcdModifyCcKeyAdditionalParams *)*h_AdditionalParams; + p_ModifyKeyParams->tree = TRUE; + + err = BuildNewNodeModifyNextEngine (h_FmPcd, h_FmPcdCcTree, keyIndex,p_FmPcdCcNextEngineParams, h_OldLst, h_NewLst, p_ModifyKeyParams); + if(err) + { + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + return E_OK; + +} + +t_Error FmPcdCcRemoveKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint8_t keyIndex, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_AdditionalParams) +{ + + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *) h_FmPcdCcNode; + t_Error err = E_OK; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + + if(keyIndex >= p_FmPcdCcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("impossible to remove key when numOfKeys <= keyIndex")); + + if(!p_FmPcdCcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("keyIndex you asked > numOfKeys of relevant node that was initialized")); + + if(p_FmPcdCcNode->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("handler to FmPcd is diferent from one which was assigned to the node in the Init time")); + + err = ModifyKeyCommonPart1(p_FmPcdCcNode, keyIndex, h_AdditionalParams, e_MODIFY_STATE_REMOVE, TRUE, FALSE); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + p_ModifyKeyParams = (t_FmPcdModifyCcKeyAdditionalParams *)*h_AdditionalParams; + err = BuildNewNodeRemoveKey (p_FmPcdCcNode, keyIndex, p_ModifyKeyParams); + if(err) + { + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = UpdatePtrWhichPointOnCrntMdfNode(p_FmPcdCcNode, p_ModifyKeyParams, h_OldLst, h_NewLst); + if(err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; + +} + +t_Error FmPcdCcModifyKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint8_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask, t_List *h_OldLst, t_List *h_NewLst,t_Handle *h_AdditionalParams) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_Error err = E_OK; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + + if(keyIndex >= p_FmPcdCcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previousely cleared last index + 1")); + + if((p_FmPcdCcNode->numOfKeys + 1) > FM_PCD_MAX_NUM_OF_CC_NODES) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfKeys with new key can not be larger than 255")); + + if(keySize != p_FmPcdCcNode->userSizeOfExtraction) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("size for ModifyKey has to be the same as defined in SetNode")); + + if(p_FmPcdCcNode->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("handler to FmPcd is diferent from one which was assigned to the node in the Init time")); + + err = ModifyKeyCommonPart1(p_FmPcdCcNode, keyIndex, h_AdditionalParams, e_MODIFY_STATE_CHANGE, TRUE, FALSE); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + p_ModifyKeyParams = (t_FmPcdModifyCcKeyAdditionalParams *)*h_AdditionalParams; + + err = BuildNewNodeModifyKey (p_FmPcdCcNode, keyIndex, p_Key, p_Mask, p_ModifyKeyParams); + if(err) + { + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = UpdatePtrWhichPointOnCrntMdfNode(p_FmPcdCcNode, p_ModifyKeyParams, h_OldLst, h_NewLst); + if(err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + return E_OK; +} + + +t_Error FmPcdCcModiyNextEngineParamNode(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, uint8_t keyIndex,t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,t_List *h_OldPointer, t_List *h_NewPointer,t_Handle *h_AdditionalParams) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_Error err = E_OK; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd,E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNode,E_INVALID_HANDLE); + + if(keyIndex >= p_FmPcdCcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previousely cleared last index + 1")); + + if((p_FmPcdCcNode->numOfKeys + 1) > FM_PCD_MAX_NUM_OF_CC_NODES) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfKeys with new key can not be larger than 255")); + + err = ModifyKeyCommonPart1(p_FmPcdCcNode, keyIndex, h_AdditionalParams, e_MODIFY_STATE_CHANGE, FALSE, FALSE); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + p_ModifyKeyParams = (t_FmPcdModifyCcKeyAdditionalParams *)*h_AdditionalParams; + + err = BuildNewNodeModifyNextEngine (h_FmPcd, p_FmPcdCcNode, keyIndex,p_FmPcdCcNextEngineParams, h_OldPointer, h_NewPointer, p_ModifyKeyParams); + if(err) + { + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + return E_OK; +} + +t_Error FmPcdCcModifyMissNextEngineParamNode(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,t_List *h_OldPointer, t_List *h_NewPointer,t_Handle *h_AdditionalParams) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_Error err = E_OK; + uint16_t keyIndex; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNode,E_INVALID_VALUE); + + keyIndex = p_FmPcdCcNode->numOfKeys; + + err = ModifyKeyCommonPart1(p_FmPcdCcNode, keyIndex, h_AdditionalParams, e_MODIFY_STATE_CHANGE, TRUE, FALSE); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + p_ModifyKeyParams = (t_FmPcdModifyCcKeyAdditionalParams *)*h_AdditionalParams; + + err = BuildNewNodeModifyNextEngine (h_FmPcd, p_FmPcdCcNode, keyIndex,p_FmPcdCcNextEngineParams, h_OldPointer, h_NewPointer, p_ModifyKeyParams); + if(err) + { + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} + +t_Error FmPcdCcAddKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPcdCcKeyParams, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_AdditionalParams) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + t_Error err = E_OK; + + if(keyIndex > p_FmPcdCcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previousely cleared last index + 1")); + + if((p_FmPcdCcNode->numOfKeys + 1) > FM_PCD_MAX_NUM_OF_CC_NODES) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfKeys with new key can not be larger than 255")); + + if(keySize != p_FmPcdCcNode->userSizeOfExtraction) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be defined as it was defined in initialization step.")); + + if(p_FmPcdCcNode->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("handler to FmPcd is diferent from one which was assigned to the node in the Init time")); + + err = ModifyKeyCommonPart1(p_FmPcdCcNode, keyIndex, h_AdditionalParams, e_MODIFY_STATE_ADD, TRUE, FALSE); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + p_ModifyKeyParams = (t_FmPcdModifyCcKeyAdditionalParams *)*h_AdditionalParams; + err = BuildNewNodeAddOrMdfyKeyAndNextEngine (h_FmPcd, p_FmPcdCcNode, keyIndex, p_FmPcdCcKeyParams, p_ModifyKeyParams, TRUE); + if(err) + { + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = UpdatePtrWhichPointOnCrntMdfNode(p_FmPcdCcNode, p_ModifyKeyParams, h_OldLst, h_NewLst); + if(err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} + +t_Error FmPcdCcModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPcdCcKeyParams, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_AdditionalParams) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; + t_Error err = E_OK; + + if(keyIndex > p_FmPcdCcNode->numOfKeys) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previousely cleared last index + 1")); + + if((p_FmPcdCcNode->numOfKeys + 1) > FM_PCD_MAX_NUM_OF_CC_NODES) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfKeys with new key can not be larger than 255")); + + if(keySize != p_FmPcdCcNode->userSizeOfExtraction) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be defined as it was defined in initialization step")); + + if(p_FmPcdCcNode->h_FmPcd != h_FmPcd) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("handler to FmPcd is diferent from one which was assigned to the node in the Init time")); + + err = ModifyKeyCommonPart1(p_FmPcdCcNode, keyIndex, h_AdditionalParams, e_MODIFY_STATE_CHANGE, TRUE, FALSE); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + p_ModifyKeyParams = (t_FmPcdModifyCcKeyAdditionalParams *)*h_AdditionalParams; + + err = BuildNewNodeAddOrMdfyKeyAndNextEngine (h_FmPcd, p_FmPcdCcNode, keyIndex, p_FmPcdCcKeyParams, p_ModifyKeyParams, FALSE); + if(err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = UpdatePtrWhichPointOnCrntMdfNode(p_FmPcdCcNode, p_ModifyKeyParams, h_OldLst, h_NewLst); + if(err) + { + ReleaseNewNodeCommonPart(p_ModifyKeyParams); + XX_Free(p_ModifyKeyParams); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + return E_OK; +} + +t_Error FmPcdCcReleaseModifiedDataStructure(t_Handle h_FmPcd, t_List *h_FmPcdOldPointersLst, t_List *h_FmPcdNewPointersLst, uint16_t numOfGoodChanges, t_Handle *h_Params) +{ + t_FmPcdModifyCcKeyAdditionalParams *p_CcNewModifyAdditionalParams = (t_FmPcdModifyCcKeyAdditionalParams *)*h_Params; + t_List *p_Pos; + t_Error err = E_OK; + t_CcNodeInformation ccNodeInfo, *p_CcNodeInformation; + t_Handle h_Muram; + t_FmPcdCcNode *p_FmPcdCcNextNode; + t_List *p_UpdateLst; + + UNUSED(numOfGoodChanges); + + SANITY_CHECK_RETURN_ERROR(h_FmPcd,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_CcNewModifyAdditionalParams->h_CurrentNode,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(h_FmPcdOldPointersLst,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(h_FmPcdNewPointersLst,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((numOfGoodChanges == LIST_NumOfObjs(h_FmPcdOldPointersLst)),E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR((numOfGoodChanges == LIST_NumOfObjs(h_FmPcdNewPointersLst)),E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR((LIST_NumOfObjs(h_FmPcdOldPointersLst) == LIST_NumOfObjs(h_FmPcdNewPointersLst)),E_INVALID_STATE); + + /*we don't update subtree of the new node with new tree because it was done in the previose stage*/ + if(p_CcNewModifyAdditionalParams->h_NodeForAdd) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_CcNewModifyAdditionalParams->h_NodeForAdd; + if(!p_CcNewModifyAdditionalParams->tree) + p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst; + else + p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst; + p_CcNodeInformation = FindNodeInfoInReleventLst(p_UpdateLst, p_CcNewModifyAdditionalParams->h_CurrentNode); + if(p_CcNodeInformation) + p_CcNodeInformation->index++; + else + { + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_CcNewModifyAdditionalParams->h_CurrentNode; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(p_UpdateLst, &ccNodeInfo); + } + } + + if(p_CcNewModifyAdditionalParams->h_NodeForRmv) + { + + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_CcNewModifyAdditionalParams->h_NodeForRmv; + if(!p_CcNewModifyAdditionalParams->tree) + { + p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst; + LIST_FOR_EACH(p_Pos, &p_FmPcdCcNextNode->ccTreesLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcNodeInformation->h_CcNode); + err = FmPcdCcSetRequiredAction(h_FmPcd, + UPDATE_CC_WITH_DELETE_TREE, + &((t_FmPcdCcNode *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->nextEngineAndRequiredAction[p_CcNewModifyAdditionalParams->keyIndex], + PTR_MOVE(((t_FmPcdCcNode *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->h_AdTable, p_CcNewModifyAdditionalParams->keyIndex*FM_PCD_CC_AD_ENTRY_SIZE), + 1, + p_CcNodeInformation->h_CcNode); + } + } + else + { + p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst; + err = FmPcdCcSetRequiredAction(h_FmPcd, + UPDATE_CC_WITH_DELETE_TREE, + &((t_FmPcdCcTree *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->nextEngineAndRequiredAction[p_CcNewModifyAdditionalParams->keyIndex], + UINT_TO_PTR(((t_FmPcdCcTree *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->ccTreeBaseAddr + p_CcNewModifyAdditionalParams->keyIndex*FM_PCD_CC_AD_ENTRY_SIZE), + 1, + p_CcNewModifyAdditionalParams->h_CurrentNode); + } + if(err) + return err; + + /*we remove from the subtree of the removed node tree because it wasn't done in the previose stage*/ + /*update ccPrevNodesLst or ccTreeIdLst of the removed node*/ + /*update of the nodeOwner*/ + p_CcNodeInformation = FindNodeInfoInReleventLst(p_UpdateLst, p_CcNewModifyAdditionalParams->h_CurrentNode); + ASSERT_COND(p_CcNodeInformation); + ASSERT_COND(p_CcNodeInformation->index); + p_CcNodeInformation->index--; + if(p_CcNodeInformation->index == 0) + DequeueNodeInfoFromRelevantLst(p_UpdateLst,p_CcNewModifyAdditionalParams->h_CurrentNode); + ASSERT_COND(LIST_NumOfObjs(&p_FmPcdCcNextNode->ccTreesLst) == 1); + UpdateNodeOwner(p_FmPcdCcNextNode, FALSE); + } + +#ifdef FM_PCD_CC_MANIP + if(p_CcNewModifyAdditionalParams->h_ManipForRmv) + FmPcdManipUpdateOwner(p_CcNewModifyAdditionalParams->h_ManipForRmv, FALSE); +#endif /* FM_PCD_CC_MANIP */ + + h_Muram = FmPcdGetMuramHandle(h_FmPcd); + ASSERT_COND(h_Muram); + + /*we release new AD which was allocated and updated for copy from to actual AD*/ + LIST_FOR_EACH(p_Pos, h_FmPcdNewPointersLst) + { + p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcNodeInformation->h_CcNode); + FM_MURAM_FreeMem(h_Muram, p_CcNodeInformation->h_CcNode); + + } + + /*free Old data structure if it has to be freed - new data structure was allocated*/ + if(p_CcNewModifyAdditionalParams->p_AdTableOld) + FM_MURAM_FreeMem(h_Muram,p_CcNewModifyAdditionalParams->p_AdTableOld); + if(p_CcNewModifyAdditionalParams->p_KeysMatchTableOld) + FM_MURAM_FreeMem(h_Muram,p_CcNewModifyAdditionalParams->p_KeysMatchTableOld); + + /*update current modified node with changed fields if it's required*/ + if(!p_CcNewModifyAdditionalParams->tree) + { + if(p_CcNewModifyAdditionalParams->p_AdTableNew) + ((t_FmPcdCcNode *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->h_AdTable = p_CcNewModifyAdditionalParams->p_AdTableNew; + if(p_CcNewModifyAdditionalParams->numOfKeys) + ((t_FmPcdCcNode *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->numOfKeys = p_CcNewModifyAdditionalParams->numOfKeys; + if(p_CcNewModifyAdditionalParams->p_KeysMatchTableNew) + ((t_FmPcdCcNode *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->h_KeysMatchTable = p_CcNewModifyAdditionalParams->p_KeysMatchTableNew; + memcpy(((t_FmPcdCcNode *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->nextEngineAndRequiredAction, &p_CcNewModifyAdditionalParams->nextEngineAndRequiredAction, sizeof(t_FmPcdCcNextEngineAndRequiredActionParams) * (FM_PCD_MAX_NUM_OF_KEYS)); + } + else + memcpy(&((t_FmPcdCcTree *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->nextEngineAndRequiredAction, &p_CcNewModifyAdditionalParams->nextEngineAndRequiredAction, sizeof(t_FmPcdCcNextEngineAndRequiredActionParams) * (((t_FmPcdCcTree *)(p_CcNewModifyAdditionalParams->h_CurrentNode))->numOfEntries)); + + ReleaseLst(h_FmPcdOldPointersLst); + ReleaseLst(h_FmPcdNewPointersLst); + XX_Free(p_CcNewModifyAdditionalParams); + + return E_OK; +} + +uint32_t FmPcdCcGetNodeAddrOffsetFromNodeInfo(t_Handle h_FmPcd, t_Handle h_Pointer) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_CcNodeInformation *p_CcNodeInfo; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE, (uint32_t)ILLEGAL_BASE); + + p_CcNodeInfo = CC_NODE_F_OBJECT(h_Pointer); + return (uint32_t)(XX_VirtToPhys(p_CcNodeInfo->h_CcNode) - p_FmPcd->physicalMuramBase); +} + +t_Error FmPcdCcGetGrpParams(t_Handle h_FmPcdCcTree, uint8_t grpId, uint32_t *p_GrpBits, uint8_t *p_GrpBase) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *) h_FmPcdCcTree; + + SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE); + + if(grpId >= p_FmPcdCcTree->numOfGrps) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("grpId you asked > numOfGroup of relevant tree")); + *p_GrpBits = p_FmPcdCcTree->fmPcdGroupParam[grpId].totalBitsMask; + *p_GrpBase = p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry; + return E_OK; +} + +t_Error FmPcdCcBindTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree, uint32_t *p_Offset, t_Handle h_FmPort) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd,E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcTree,E_INVALID_STATE); + + FmPcdCcUpdateTreeOwner(p_FmPcdCcTree, TRUE); + + *p_Offset = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr)) - + p_FmPcd->physicalMuramBase); + + err = CcUpdateParams(h_FmPcd, h_FmPort, h_FmPcdCcTree, TRUE); + + return err; +} + +t_Error FmPcdCcUnbindTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree) +{ + t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; + + UNUSED(h_FmPcd); + + SANITY_CHECK_RETURN_ERROR(p_FmPcdCcTree,E_INVALID_HANDLE); + + FmPcdCcUpdateTreeOwner(p_FmPcdCcTree, FALSE); + + return E_OK; +} + +t_Error FmPcdCcTreeTryLock(t_Handle h_FmPcdCcTree) +{ + if (TRY_LOCK(NULL, &((t_FmPcdCcTree *)h_FmPcdCcTree)->lock)) + return E_OK; + return ERROR_CODE(E_BUSY); +} + +t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_List *p_List) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; + t_List *p_Pos; + t_CcNodeInformation *p_CcNodeInfo, nodeInfo; + t_Error err = E_OK; + + UNUSED(h_FmPcd); + + if(LIST_IsEmpty(&p_FmPcdCcNode->ccTreesLst)) + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("asked for more nodes in CC than MAX")) ; + LIST_FOR_EACH(p_Pos, &p_FmPcdCcNode->ccTreesLst) + { + p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos); + ASSERT_COND(p_CcNodeInfo->h_CcNode); + err = FmPcdCcTreeTryLock(p_CcNodeInfo->h_CcNode); + if(err == E_OK) + { + memset(&nodeInfo, 0, sizeof(t_CcNodeInformation)); + nodeInfo.h_CcNode = p_CcNodeInfo->h_CcNode; + EnqueueNodeInfoToRelevantLst(p_List, &nodeInfo); + } + else + FmPcdCcNodeTreeReleaseLock(p_List); + } + + return err; +} + +t_Handle FM_PCD_CcBuildTree(t_Handle h_FmPcd, t_FmPcdCcTreeParams *p_PcdGroupsParam) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_Error err = E_OK; + int i = 0, j = 0, k = 0; + t_FmPcdCcTree *p_FmPcdCcTree; + uint8_t numOfEntries; + t_Handle p_CcTreeTmp; + t_FmPcdCcGrpParams *p_FmPcdCcGroupParams; + t_FmPcdCcNextEngineAndRequiredActionParams params[16]; + t_NetEnvParams netEnvParams; + uint8_t lastOne = 0; + uint32_t requiredAction = 0; + t_FmPcdCcNode *p_FmPcdCcNextNode; + t_CcNodeInformation ccNodeInfo, *p_CcInformation; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam,E_INVALID_HANDLE, NULL); + + if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); + return NULL; + } + + p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree)); + if(!p_FmPcdCcTree) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure")); + return NULL; + } + memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree)) ; + memset(params, 0, 16 * sizeof(t_FmPcdCcNextEngineParams)); + + INIT_LIST(&p_FmPcdCcTree->fmPortsLst); + + numOfEntries = 0; + p_FmPcdCcTree->netEnvId = (uint8_t)(PTR_TO_UINT(p_PcdGroupsParam->h_NetEnv)-1); + for(i = 0; i < p_PcdGroupsParam->numOfGrps; i++) + { + p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i]; + + if (p_FmPcdCcGroupParams->numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_CC_UNITS) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS)); + return NULL; + } + + p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries; + p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup =(uint8_t)( 0x01 << p_FmPcdCcGroupParams->numOfDistinctionUnits); + numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; + if(numOfEntries > 16) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than 16")); + return NULL; + } + if(lastOne) + { + if(p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order")); + return NULL; + } + } + + lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; + + netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId; + netEnvParams.numOfDistinctionUnits = p_FmPcdCcGroupParams->numOfDistinctionUnits; + memcpy(netEnvParams.unitIds, &p_FmPcdCcGroupParams->unitIds, (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits); + err = PcdGetUnitsVector(p_FmPcd, &netEnvParams); + if(err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + + p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector; + for(j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; j++) + { + err = ValidateNextEngineParams(h_FmPcd,&p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j]); + if(err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + REPORT_ERROR(MAJOR, err, (NO_MSG)); + return NULL; + } + +#ifdef FM_PCD_CC_MANIP + if(p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip) + { + err = FmPcdManipCheckParamsForCcNextEgine(&p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], &requiredAction); + if(err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + return NULL; + } + } +#endif /* FM_PCD_CC_MANIP */ + + memcpy(¶ms[k].nextEngineParams, &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], sizeof(t_FmPcdCcNextEngineParams)); + requiredAction |= UPDATE_CC_WITH_TREE; + params[k].requiredAction = requiredAction; + k++; + } + } + + p_FmPcdCcTree->numOfEntries = (uint8_t)k; + p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps; + p_FmPcdCcTree->ccTreeBaseAddr = + PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd), + (uint32_t)( k * FM_PCD_CC_AD_ENTRY_SIZE), + FM_PCD_CC_AD_TABLE_ALIGN)); + + if(!p_FmPcdCcTree->ccTreeBaseAddr) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + IOMemSet32(UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0, (uint32_t)(k * FM_PCD_CC_AD_ENTRY_SIZE)); + + p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); + + j = 0; + for(i = 0; i < numOfEntries; i++) + { + NextStepAd(p_CcTreeTmp,¶ms[i].nextEngineParams,p_FmPcd); + p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); + memcpy(&p_FmPcdCcTree->nextEngineAndRequiredAction[i], ¶ms[i], sizeof(t_FmPcdCcNextEngineAndRequiredActionParams)); + if(p_FmPcdCcTree->nextEngineAndRequiredAction[i].nextEngineParams.nextEngine== e_FM_PCD_CC) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcTree->nextEngineAndRequiredAction[i].nextEngineParams.params.ccParams.h_CcNode; + if(!IsNodeInModifiedState((t_Handle)p_FmPcdCcNextNode)) + { + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst, &ccNodeInfo); + UpdateNodeWithModifiedState((t_Handle)p_FmPcdCcNextNode, TRUE); + } + else + { + p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccTreeIdLst,(t_Handle)p_FmPcdCcTree); + ASSERT_COND(p_CcInformation); + p_CcInformation->index++; + } + } + } + + FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId); + p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); + + for(i = 0; i < p_FmPcdCcTree->numOfEntries ; i++) + { + if(p_FmPcdCcTree->nextEngineAndRequiredAction[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcTree->nextEngineAndRequiredAction[i].nextEngineParams.params.ccParams.h_CcNode; + + if(IsNodeInModifiedState((t_Handle)p_FmPcdCcNextNode)) + UpdateNodeWithModifiedState((t_Handle)p_FmPcdCcNextNode, FALSE); + } + } + + for(i = 0; i < numOfEntries; i++) + { + if(p_FmPcdCcTree->nextEngineAndRequiredAction[i].requiredAction) + { + err = FmPcdCcSetRequiredAction(h_FmPcd, p_FmPcdCcTree->nextEngineAndRequiredAction[i].requiredAction, &p_FmPcdCcTree->nextEngineAndRequiredAction[i], p_CcTreeTmp,1, p_FmPcdCcTree); + if(err) + { + DeleteTree(p_FmPcdCcTree,p_FmPcd); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); + } + } + + return p_FmPcdCcTree; +} + +t_Error FM_PCD_CcDeleteTree(t_Handle h_FmPcd, t_Handle h_CcTree) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; + int i= 0; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_CcTree,E_INVALID_STATE); + + FmPcdDecNetEnvOwners(h_FmPcd, p_CcTree->netEnvId); + + if(p_CcTree->owners) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree")); + + for(i = 0; i numOfEntries; i++) + { + if(p_CcTree->nextEngineAndRequiredAction[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + UpdateNodeOwner(p_CcTree->nextEngineAndRequiredAction[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); + } + +#ifdef FM_PCD_CC_MANIP + for(i = 0; i < p_CcTree->numOfEntries; i++) + { + if(p_CcTree->nextEngineAndRequiredAction[i].nextEngineParams.h_Manip) + FmPcdManipUpdateOwner(p_CcTree->nextEngineAndRequiredAction[i].nextEngineParams.h_Manip, FALSE); + } +#endif /* FM_PCD_CC_MANIP */ + + DeleteTree(p_CcTree, p_FmPcd); + return E_OK; +} + +t_Handle FM_PCD_CcSetNode(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *) h_FmPcd; + t_FmPcdCcNode *p_FmPcdCcNode, *p_FmPcdCcNextNode; + t_Error err = E_OK; + int tmp, size; + bool glblMask = FALSE; + t_FmPcdCcKeyParams *p_KeyParams; + t_Handle p_KeysMatchTblTmp; + t_Handle p_AdTableTmp; + bool fullField = FALSE; + ccPrivateInfo_t icCode = CC_PRIVATE_INFO_NONE; + bool isKeyTblAlloc, fromIc = FALSE; + t_CcNodeInformation ccNodeInfo, *p_CcInformation; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd,E_INVALID_HANDLE,NULL); + + /* + if (!p_CcNodeParam->keysParams.keySize || + !p_CcNodeParam->keysParams.numOfKeys) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("At least one key of keySize > 0 must be defined.")); + return NULL; + } + */ + p_FmPcdCcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode)); + if(!p_FmPcdCcNode) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + return NULL; + } + memset(p_FmPcdCcNode, 0, sizeof(t_FmPcdCcNode)); + + p_FmPcdCcNode->p_GlblMask = (t_Handle)XX_Malloc(CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + memset(p_FmPcdCcNode->p_GlblMask, 0, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + + p_FmPcdCcNode->numOfKeys = p_CcNodeParam->keysParams.numOfKeys; + + p_FmPcdCcNode->h_FmPcd = h_FmPcd; + + INIT_LIST(&p_FmPcdCcNode->ccPrevNodesLst); + INIT_LIST(&p_FmPcdCcNode->ccTreeIdLst); + INIT_LIST(&p_FmPcdCcNode->ccTreesLst); + + if((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_BY_HDR) && + ((p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv4) || + (p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv6)) && + (p_CcNodeParam->extractCcParams.extractByHdr.type == e_FM_PCD_EXTRACT_FULL_FIELD) && + ((p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv6 == NET_HEADER_FIELD_IPv6_HOP_LIMIT) || + (p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv4 == NET_HEADER_FIELD_IPv4_TTL))) + { + err = Ipv4TtlOrIpv6HopLimiCheckParams(h_FmPcd, p_CcNodeParam, p_FmPcdCcNode, &isKeyTblAlloc); + glblMask = FALSE; + + } + else if((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_NON_HDR) && + ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_KEY) || + (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_HASH) || + (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID))) + { + if((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID) && + (p_CcNodeParam->extractCcParams.extractNonHdr.offset != 0)) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("In the case of the extraction from e_FM_PCD_EXTRACT_FROM_FLOW_ID offset has to be 0")); + return NULL; + } + + icCode = IcDefineCode(p_CcNodeParam); + fromIc = TRUE; + if(icCode == CC_PRIVATE_INFO_NONE) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("user asked extraction from IC and field in internal context or action wasn't initialized in the right way")); + return NULL; + } + + if((icCode == CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP) || (icCode == CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP)) + { + err = IcHashIndexedCheckParams(h_FmPcd, p_CcNodeParam, p_FmPcdCcNode, &isKeyTblAlloc); + + glblMask = TRUE; + } + else + { + err = CheckParams(h_FmPcd, p_CcNodeParam,p_FmPcdCcNode, &isKeyTblAlloc); + if(p_FmPcdCcNode->glblMaskSize) + glblMask = TRUE; + } + } + else + { + err = CheckParams(h_FmPcd, p_CcNodeParam,p_FmPcdCcNode, &isKeyTblAlloc); + if(p_FmPcdCcNode->glblMaskSize) + glblMask = TRUE; + } + + if(err) + { + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + + switch(p_CcNodeParam->extractCcParams.type) + { + case(e_FM_PCD_EXTRACT_BY_HDR): + switch(p_CcNodeParam->extractCcParams.extractByHdr.type) + { + case(e_FM_PCD_EXTRACT_FULL_FIELD): + p_FmPcdCcNode->parseCode = GetFullFieldParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, + p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField); + GetSizeHeaderField(p_CcNodeParam->extractCcParams.extractByHdr.hdr, p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField, &p_FmPcdCcNode->sizeOfExtraction); + fullField = TRUE; + if((p_FmPcdCcNode->parseCode != CC_PC_FF_TCI1) && (p_FmPcdCcNode->parseCode != CC_PC_FF_TCI2) && + (p_FmPcdCcNode->parseCode != CC_PC_FF_MPLS1) && (p_FmPcdCcNode->parseCode != CC_PC_FF_MPLS1) && + (p_FmPcdCcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1) && (p_FmPcdCcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2) && + (p_FmPcdCcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1) && (p_FmPcdCcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2) && + glblMask) + { + glblMask = FALSE; + p_FmPcdCcNode->glblMaskSize = 4; + p_FmPcdCcNode->lclMask = TRUE; + } + break; + case(e_FM_PCD_EXTRACT_FROM_HDR): + p_FmPcdCcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.size; + p_FmPcdCcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; + p_FmPcdCcNode->parseCode = GetPrParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, + p_FmPcdCcNode->offset,glblMask, &p_FmPcdCcNode->prsArrayOffset); + break; + case(e_FM_PCD_EXTRACT_FROM_FIELD): + p_FmPcdCcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; + p_FmPcdCcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.size; + p_FmPcdCcNode->parseCode = GetFieldParseCode(p_CcNodeParam->extractCcParams.extractByHdr.hdr, p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.field, + p_FmPcdCcNode->offset,&p_FmPcdCcNode->prsArrayOffset, + p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex); + break; + default: + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + return NULL; + } + break; + case(e_FM_PCD_EXTRACT_NON_HDR): + /* get the field code for the generic extract */ + p_FmPcdCcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractNonHdr.size; + p_FmPcdCcNode->offset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; + p_FmPcdCcNode->parseCode = GetGenParseCode(p_CcNodeParam->extractCcParams.extractNonHdr.src, p_FmPcdCcNode->offset, glblMask, &p_FmPcdCcNode->prsArrayOffset, fromIc,icCode); + + if(p_FmPcdCcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED) + { + if((p_FmPcdCcNode->offset + p_FmPcdCcNode->sizeOfExtraction) > 64) + { + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_INVALID_SELECTION,("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)")); + return NULL; + } + } + if((p_FmPcdCcNode->parseCode == CC_PC_GENERIC_IC_GMASK) || (p_FmPcdCcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)) + { + p_FmPcdCcNode->offset += p_FmPcdCcNode->prsArrayOffset; + p_FmPcdCcNode->prsArrayOffset = 0; + } + break; + + default: + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + return NULL; + } + + if(p_FmPcdCcNode->parseCode == CC_PC_ILLEGAL) + { + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("illeagl extraction type")); + return NULL; + } + + if((p_FmPcdCcNode->sizeOfExtraction > FM_PCD_MAX_SIZE_OF_KEY) || !p_FmPcdCcNode->sizeOfExtraction) + { + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("sizeOfExatrction can not be greater than 56 and not 0")); + return NULL; + } + + if(p_CcNodeParam->keysParams.keySize != p_FmPcdCcNode->sizeOfExtraction) + { + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); + return NULL; + } + + + p_FmPcdCcNode->userSizeOfExtraction = p_FmPcdCcNode->sizeOfExtraction; + + if(!glblMask) + memset(p_FmPcdCcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); + +#ifdef FM_PCD_CC_MANIP + err = CheckAndSetManipParamsWithCcNodeParams(p_FmPcdCcNode); + if(err != E_OK) + { + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); + return NULL; + } +#endif /* FM_PCD_CC_MANIP */ + + GetCcExtractKeySize(p_FmPcdCcNode->sizeOfExtraction, &p_FmPcdCcNode->ccKeySizeAccExtraction); + + if(p_FmPcdCcNode->lclMask) + size = 2 * p_FmPcdCcNode->ccKeySizeAccExtraction; + else + size = p_FmPcdCcNode->ccKeySizeAccExtraction; + + if(isKeyTblAlloc) + { + p_FmPcdCcNode->h_KeysMatchTable =(t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcdCcNode->h_FmPcd), + (uint32_t)(size * sizeof(uint8_t) * (p_FmPcdCcNode->numOfKeys + 1)), + FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN); + if(!p_FmPcdCcNode->h_KeysMatchTable) + { + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory in MURAM for KEY MATCH table")); + return NULL; + } + IOMemSet32((uint8_t *)p_FmPcdCcNode->h_KeysMatchTable, 0, size * sizeof(uint8_t) * (p_FmPcdCcNode->numOfKeys + 1)); + } + + p_FmPcdCcNode->h_AdTable = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcdCcNode->h_FmPcd), + (uint32_t)( (p_FmPcdCcNode->numOfKeys+1) * FM_PCD_CC_AD_ENTRY_SIZE), + FM_PCD_CC_AD_TABLE_ALIGN); + if(!p_FmPcdCcNode->h_AdTable) + { + DeleteNode(p_FmPcdCcNode); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory in MURAM for AD table ")); + return NULL; + } + IOMemSet32((uint8_t *)p_FmPcdCcNode->h_AdTable, 0, (uint32_t)((p_FmPcdCcNode->numOfKeys+1) * FM_PCD_CC_AD_ENTRY_SIZE)); + + p_KeysMatchTblTmp = p_FmPcdCcNode->h_KeysMatchTable; + p_AdTableTmp = p_FmPcdCcNode->h_AdTable; + for(tmp = 0 ; tmp < p_FmPcdCcNode->numOfKeys; tmp++) + { + p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; + + if(p_KeysMatchTblTmp) + { + Mem2IOCpy32((void*)p_KeysMatchTblTmp, p_KeyParams->p_Key, p_FmPcdCcNode->sizeOfExtraction); + + if(p_FmPcdCcNode->lclMask && p_KeyParams->p_Mask) + Mem2IOCpy32(PTR_MOVE(p_KeysMatchTblTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), p_KeyParams->p_Mask, p_FmPcdCcNode->sizeOfExtraction); + else if(p_FmPcdCcNode->lclMask) + IOMemSet32(PTR_MOVE(p_KeysMatchTblTmp, p_FmPcdCcNode->ccKeySizeAccExtraction), 0xff, p_FmPcdCcNode->sizeOfExtraction); + p_KeysMatchTblTmp = PTR_MOVE(p_KeysMatchTblTmp, size * sizeof(uint8_t)); + } + NextStepAd(p_AdTableTmp,&p_KeyParams->ccNextEngineParams, p_FmPcd); + + p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); + + } + NextStepAd(p_AdTableTmp,&p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, p_FmPcd); + + if(fullField == TRUE) + p_FmPcdCcNode->sizeOfExtraction = 0; + + + for(tmp = 0; tmp < p_FmPcdCcNode->numOfKeys + 1; tmp++) + { + if(p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].nextEngineParams.params.ccParams.h_CcNode; + + if(!IsNodeInModifiedState((t_Handle)p_FmPcdCcNextNode)) + { + memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); + ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcNode; + ccNodeInfo.index = 1; + EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccPrevNodesLst, &ccNodeInfo); + UpdateNodeWithModifiedState((t_Handle)p_FmPcdCcNextNode, TRUE); + } + else + { + p_CcInformation = FindNodeInfoInReleventLst(&p_FmPcdCcNextNode->ccPrevNodesLst,(t_Handle)p_FmPcdCcNode); + ASSERT_COND(p_CcInformation); + p_CcInformation->index++; + } + } + + } + + for(tmp = 0; tmp < p_FmPcdCcNode->numOfKeys + 1; tmp++) + { + if(p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].nextEngineParams.params.ccParams.h_CcNode; + + if(IsNodeInModifiedState((t_Handle)p_FmPcdCcNextNode)) + UpdateNodeWithModifiedState((t_Handle)p_FmPcdCcNextNode, FALSE); + } + } + + p_AdTableTmp = p_FmPcdCcNode->h_AdTable; + for(tmp = 0; tmp < p_FmPcdCcNode->numOfKeys; tmp++) + { + if(p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].requiredAction) + { + + err = FmPcdCcSetRequiredAction(h_FmPcd, p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].requiredAction, &p_FmPcdCcNode->nextEngineAndRequiredAction[tmp], p_AdTableTmp,1, NULL); + if(err) + { + FM_PCD_CcDeleteNode(h_FmPcd, (t_Handle)p_FmPcdCcNode); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); + } + } + if(p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].requiredAction) + { + err = FmPcdCcSetRequiredAction(h_FmPcd, p_FmPcdCcNode->nextEngineAndRequiredAction[tmp].requiredAction, &p_FmPcdCcNode->nextEngineAndRequiredAction[tmp], p_AdTableTmp,1, NULL); + if(err) + { + FM_PCD_CcDeleteNode(h_FmPcd, (t_Handle)p_FmPcdCcNode); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + + } + + + return p_FmPcdCcNode; +} + +t_Error FM_PCD_CcDeleteNode(t_Handle h_FmPcd, t_Handle h_CcNode) +{ + t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; + int i = 0; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + + UNUSED(h_FmPcd); + if(!p_CcNode) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("the node with this ID is not initialized")); + + if(p_CcNode->owners) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("the node with this ID can not be removed because this node is occupied, first - unbind this node")); + + for(i = 0; i < p_CcNode->numOfKeys; i++) + { + if(p_CcNode->nextEngineAndRequiredAction[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + UpdateNodeOwner(p_CcNode->nextEngineAndRequiredAction[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); + + } + if(p_CcNode->nextEngineAndRequiredAction[i].nextEngineParams.nextEngine == e_FM_PCD_CC) + UpdateNodeOwner(p_CcNode->nextEngineAndRequiredAction[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); + +#ifdef FM_PCD_CC_MANIP + for(i = 0; i < p_CcNode->numOfKeys; i++) + { + if(p_CcNode->nextEngineAndRequiredAction[i].nextEngineParams.h_Manip) + FmPcdManipUpdateOwner(p_CcNode->nextEngineAndRequiredAction[i].nextEngineParams.h_Manip, FALSE); + } + if(p_CcNode->nextEngineAndRequiredAction[i].nextEngineParams.h_Manip) + FmPcdManipUpdateOwner(p_CcNode->nextEngineAndRequiredAction[i].nextEngineParams.h_Manip, FALSE); +#endif /* FM_PCD_CC_MANIP */ + + DeleteNode(p_CcNode); + + return E_OK; +} + +t_Error FM_PCD_CcNodeAddKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + return FmHcPcdCcAddKey(p_FmPcd->h_Hc, h_CcNode, keyIndex, keySize, p_KeyParams); +} + +t_Error FM_PCD_CcNodeRemoveKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + return FmHcPcdCcRemoveKey(p_FmPcd->h_Hc, h_CcNode, keyIndex); +} + +t_Error FM_PCD_CcNodeModifyKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + return FmHcPcdCcModifyKey(p_FmPcd->h_Hc, h_CcNode, keyIndex, keySize, p_Key, p_Mask); +} + +t_Error FM_PCD_CcNodeModifyNextEngine(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + return FmHcPcdCcModifyNodeNextEngine(p_FmPcd->h_Hc, h_CcNode, keyIndex, p_FmPcdCcNextEngineParams); +} + +t_Error FM_PCD_CcNodeModifyMissNextEngine(t_Handle h_FmPcd, t_Handle h_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + return FmHcPcdCcModifyNodeMissNextEngine(p_FmPcd->h_Hc, h_CcNode, p_FmPcdCcNextEngineParams); +} + +t_Error FM_PCD_CcTreeModifyNextEngine(t_Handle h_FmPcd, t_Handle h_CcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + return FmHcPcdCcModifyTreeNextEngine(p_FmPcd->h_Hc, h_CcTree, grpId, index, p_FmPcdCcNextEngineParams); +} + +t_Error FM_PCD_CcNodeModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); + + return FmHcPcdCcModifyKeyAndNextEngine(p_FmPcd->h_Hc, h_CcNode, keyIndex, keySize, p_KeyParams); +} + +uint32_t FM_PCD_CcNodeGetKeyCounter(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex) +{ + t_FmPcdCcNode *p_FmPcdCcNode = (t_FmPcdCcNode *)h_CcNode; + t_AdOfTypeResult *p_AdResult = NULL; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_VALUE(h_CcNode, E_INVALID_HANDLE, 0); +#ifdef DISABLE_SANITY_CHECKS +UNUSED(h_FmPcd); +#endif /* DISABLE_SANITY_CHECKS */ + + if (keyIndex >= p_FmPcdCcNode->numOfKeys) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, + ("keyIndex > numOfKeys defined for this node")); + return 0; + } + + p_AdResult = PTR_MOVE(p_FmPcdCcNode->h_AdTable, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE); + ASSERT_COND(p_AdResult); + + if (p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, + ("statistics updated only for entries where next engine not CC")); + return 0; + } + + if(((p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_DONE) && + !p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.params.enqueueParams.statisticsEn) || + ((p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_KG) && + !p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.params.kgParams.statisticsEn) || + ((p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_PLCR) && + !p_FmPcdCcNode->nextEngineAndRequiredAction[keyIndex].nextEngineParams.params.plcrParams.statisticsEn)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, + ("statistics wasn't enable")); + return 0; + } + + return GET_UINT32(p_AdResult->res); +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_cc.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_cc.h @@ -0,0 +1,312 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_cc.h + + @Description FM PCD CC ... +*//***************************************************************************/ +#ifndef __FM_CC_H +#define __FM_CC_H + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" + +#include "fm_pcd.h" + + +/***********************************************************************/ +/* Coarse classification defines */ +/***********************************************************************/ + +#define CC_PC_FF_MACDST 0x00 +#define CC_PC_FF_MACSRC 0x01 +#define CC_PC_FF_ETYPE 0x02 + +#define CC_PC_FF_TCI1 0x03 +#define CC_PC_FF_TCI2 0x04 + +#define CC_PC_FF_MPLS1 0x06 +#define CC_PC_FF_MPLS_LAST 0x07 + +#define CC_PC_FF_IPV4DST1 0x08 +#define CC_PC_FF_IPV4DST2 0x16 +#define CC_PC_FF_IPV4IPTOS_TC1 0x09 +#define CC_PC_FF_IPV4IPTOS_TC2 0x17 +#define CC_PC_FF_IPV4PTYPE1 0x0A +#define CC_PC_FF_IPV4PTYPE2 0x18 +#define CC_PC_FF_IPV4SRC1 0x0b +#define CC_PC_FF_IPV4SRC2 0x19 +#define CC_PC_FF_IPV4SRC1_IPV4DST1 0x0c +#define CC_PC_FF_IPV4SRC2_IPV4DST2 0x1a +#define CC_PC_FF_IPV4TTL 0x29 + + +#define CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1 0x0d /*TODO - CLASS - what is it? TOS*/ +#define CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2 0x1b +#define CC_PC_FF_IPV6PTYPE1 0x0e +#define CC_PC_FF_IPV6PTYPE2 0x1c +#define CC_PC_FF_IPV6DST1 0x0f +#define CC_PC_FF_IPV6DST2 0x1d +#define CC_PC_FF_IPV6SRC1 0x10 +#define CC_PC_FF_IPV6SRC2 0x1e +#define CC_PC_FF_IPV6HOP_LIMIT 0x2a +#define CC_PC_FF_GREPTYPE 0x11 + +#define CC_PC_FF_MINENCAP_PTYPE 0x12 +#define CC_PC_FF_MINENCAP_IPDST 0x13 +#define CC_PC_FF_MINENCAP_IPSRC 0x14 +#define CC_PC_FF_MINENCAP_IPSRC_IPDST 0x15 + +#define CC_PC_FF_L4PSRC 0x1f +#define CC_PC_FF_L4PDST 0x20 +#define CC_PC_FF_L4PSRC_L4PDST 0x21 + +#define CC_PC_FF_PPPPID 0x05 + +#define CC_PC_PR_SHIM1 0x22 +#define CC_PC_PR_SHIM2 0x23 + +#define CC_PC_GENERIC_WITHOUT_MASK 0x27 +#define CC_PC_GENERIC_WITH_MASK 0x28 +#define CC_PC_GENERIC_IC_GMASK 0x2B +#define CC_PC_GENERIC_IC_HASH_INDEXED 0x2C + +#define CC_PR_OFFSET 0x25 +#define CC_PR_WITHOUT_OFFSET 0x26 + +#define CC_PC_PR_ETH_OFFSET 19 +#define CC_PC_PR_USER_DEFINED_SHIM1_OFFSET 16 +#define CC_PC_PR_USER_DEFINED_SHIM2_OFFSET 17 +#define CC_PC_PR_USER_LLC_SNAP_OFFSET 20 +#define CC_PC_PR_VLAN1_OFFSET 21 +#define CC_PC_PR_VLAN2_OFFSET 22 +#define CC_PC_PR_PPPOE_OFFSET 24 +#define CC_PC_PR_MPLS1_OFFSET 25 +#define CC_PC_PR_MPLS_LAST_OFFSET 26 +#define CC_PC_PR_IP1_OFFSET 27 +#define CC_PC_PR_IP_LAST_OFFSET 28 +#define CC_PC_PR_MINENC_OFFSET 28 +#define CC_PC_PR_L4_OFFSET 30 +#define CC_PC_PR_GRE_OFFSET 29 +#define CC_PC_PR_ETYPE_LAST_OFFSET 23 +#define CC_PC_PR_NEXT_HEADER_OFFSET 31 + +#define CC_PC_ILLEGAL 0xff +#define CC_SIZE_ILLEGAL 0 + +#define FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN 16 +#define FM_PCD_CC_AD_TABLE_ALIGN 256 +#define FM_PCD_CC_AD_ENTRY_SIZE 16 +#define FM_PCD_CC_NUM_OF_KEYS 255 + +#define FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE 0x00000000 +#define FM_PCD_AD_RESULT_DATA_FLOW_TYPE 0x80000000 +#define FM_PCD_AD_RESULT_PLCR_DIS 0x20000000 +#define FM_PCD_AD_RESULT_EXTENDED_MODE 0x80000000 +#define FM_PCD_AD_RESULT_NADEN 0x20000000 +#define FM_PCD_AD_RESULT_STATISTICS_EN 0x40000000 + + +#define FM_PCD_AD_CONT_LOOKUP_TYPE 0x40000000 +#define FM_PCD_AD_CONT_LOOKUP_LCL_MASK 0x00800000 + +#define FM_PCD_AD_TYPE_MASK 0xc0000000 +#define FM_PCD_AD_OPCODE_MASK 0x0000000f + +#define FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT 16 + +#define GLBL_MASK_FOR_HASH_INDEXED 0xfff00000 +#define CC_GLBL_MASK_SIZE 4 + +typedef uint32_t ccPrivateInfo_t; /**< private info of CC: */ + +#define CC_PRIVATE_INFO_NONE 0 +#define CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP 0x80000000 +#define CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH 0x40000000 +#define CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH 0x20000000 +#define CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP 0x10000000 + +/***********************************************************************/ +/* Memory map */ +/***********************************************************************/ +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +typedef _Packed struct { + volatile uint32_t fqid; + volatile uint32_t plcrProfile; + volatile uint32_t nia; + volatile uint32_t res; +} _PackedType t_AdOfTypeResult; + +typedef _Packed struct { + volatile uint32_t ccAdBase; + volatile uint32_t matchTblPtr; + volatile uint32_t pcAndOffsets; + volatile uint32_t gmask; +} _PackedType t_AdOfTypeContLookup; + +typedef _Packed union { + volatile t_AdOfTypeResult adResult; + volatile t_AdOfTypeContLookup adContLookup; +} _PackedType t_Ad; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/***********************************************************************/ +/* Driver's internal structures */ +/***********************************************************************/ + +typedef enum e_ModifyState { + e_MODIFY_STATE_ADD = 0, + e_MODIFY_STATE_REMOVE, + e_MODIFY_STATE_CHANGE +} e_ModifyState; + +typedef struct { + t_FmPcdCcNextEngineParams nextEngineParams; + uint32_t requiredAction; + uint32_t shadowAction; +} t_FmPcdCcNextEngineAndRequiredActionParams; + +typedef struct { + t_Handle p_Ad; + e_FmPcdEngine fmPcdEngine; + bool adAllocated; + bool isTree; + + uint32_t myInfo; + t_List *h_CcNextNodesLst; + t_Handle h_AdditionalInfo; + t_Handle h_Node; +} t_FmPcdModifyCcAdditionalParams; + +typedef struct { + t_Handle p_AdTableNew; + t_Handle p_KeysMatchTableNew; + t_Handle p_AdTableOld; + t_Handle p_KeysMatchTableOld; + uint16_t numOfKeys; + t_Handle h_CurrentNode; + uint16_t keyIndex; + t_Handle h_NodeForAdd; + t_Handle h_NodeForRmv; + t_Handle h_ManipForRmv; + bool tree; + + t_FmPcdCcNextEngineAndRequiredActionParams nextEngineAndRequiredAction[256]; +} t_FmPcdModifyCcKeyAdditionalParams; + +typedef struct { + t_Handle h_Manip; + t_Handle h_CcNode; +} t_CcNextEngineInfo; + +typedef struct { + uint16_t numOfKeys; + bool glblMaskUpdated; + t_Handle p_GlblMask; + bool lclMask; + uint8_t parseCode; + uint8_t offset; + uint8_t prsArrayOffset; + bool ctrlFlow; + uint8_t owners; + + uint8_t ccKeySizeAccExtraction; + uint8_t sizeOfExtraction; + uint8_t glblMaskSize; + + t_Handle h_KeysMatchTable; + t_Handle h_AdTable; + + t_List ccPrevNodesLst; + + t_List ccTreeIdLst; + t_List ccTreesLst; + + t_Handle h_FmPcd; + uint32_t shadowAction; + bool modifiedState; + uint8_t userSizeOfExtraction; + + t_FmPcdCcNextEngineAndRequiredActionParams nextEngineAndRequiredAction[256]; +} t_FmPcdCcNode; + +typedef struct { + t_FmPcdCcNode *p_FmPcdCcNode; + bool occupied; + uint8_t owners; + volatile bool lock; +} t_FmPcdCcNodeArray; + +typedef struct { + uint8_t numOfEntriesInGroup; + uint32_t totalBitsMask; + uint8_t baseGroupEntry; +} t_FmPcdCcGroupParam; + +typedef struct { + uint8_t netEnvId; + uintptr_t ccTreeBaseAddr; + uint8_t numOfGrps; + t_FmPcdCcGroupParam fmPcdGroupParam[FM_PCD_MAX_NUM_OF_CC_GROUPS]; + t_List fmPortsLst; + volatile bool lock; + uint8_t numOfEntries; + uint8_t owners; + t_Handle *fmPcdCcSavedManipParams[256]; + bool modifiedState; + uint32_t requiredAction; + t_FmPcdCcNextEngineAndRequiredActionParams nextEngineAndRequiredAction[FM_PCD_MAX_NUM_OF_KEYS]; +} t_FmPcdCcTree; + +typedef struct { + t_FmPcdCcTree *p_FmPcdCcTree; + bool occupied; + uint8_t owners; + volatile bool lock; +} t_FmPcdCcTreeArray; + + +bool FmPcdManipIsManipNode(t_Handle h_Ad); + + +#endif /* __FM_CC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_kg.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_kg.c @@ -0,0 +1,3189 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_kg.c + + @Description FM PCD ... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "net_ext.h" +#include "fm_port_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_hc.h" + +#include "fm_pcd_ipc.h" + + +static t_Error WriteKgarWait(t_FmPcd *p_FmPcd, uint32_t kgar) +{ + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgar, kgar); + /* Wait for GO to be idle and read error */ + while ((kgar = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgar)) & FM_PCD_KG_KGAR_GO) ; + if (kgar & FM_PCD_KG_KGAR_ERR) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Keygen scheme access violation")); + return E_OK; +} + +static e_FmPcdKgExtractDfltSelect GetGenericSwDefault(t_FmPcdKgExtractDflt swDefaults[], uint8_t numOfSwDefaults, uint8_t code) +{ + int i; + + switch(code) + { + case( KG_SCH_GEN_PARSE_RESULT_N_FQID): + case( KG_SCH_GEN_DEFAULT): + case( KG_SCH_GEN_NEXTHDR): + for(i=0 ; i mask 0x1-0x7F */ + if(bitOffset<8) + { + mask = 0; + for(i = 0 ; i < bitOffset ; i++, walking1Mask <<= 1) + mask |= walking1Mask; + } + else + { + mask = 0xFF; + numOfOnesToClear = 0; + if(fqid && bitOffset>24) + /* bitOffset 25-31 --> mask 0xFE-0x80 */ + numOfOnesToClear = (uint8_t)(bitOffset-24); + else + /* bitOffset 9-15 --> mask 0xFE-0x80 */ + if(!fqid && bitOffset>8) + numOfOnesToClear = (uint8_t)(bitOffset-8); + for(i = 0 ; i < numOfOnesToClear ; i++, walking1Mask <<= 1) + mask &= ~walking1Mask; + /* bitOffset 8-24 for FQID, 8 for PP --> no mask (0xFF)*/ + } + return mask; +} + + +t_Error FmPcdKgBuildClsPlanGrp(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_Grp, t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdKgClsPlanGrp *p_ClsPlanGrp; + t_FmPcdIpcKgClsPlanParams kgAlloc; + t_Error err = E_OK; + uint32_t oredVectors = 0; + uint32_t intFlags; + int i, j; + + if (p_Grp->numOfOptions >= FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Too many classification plan basic options selected.")); + + intFlags = FmPcdLock(p_FmPcd); + + /* find a new clsPlan group */ + for(i = 0;ip_FmPcdKg->clsPlanGrps[i].used) + break; + if(i== FM_MAX_NUM_OF_PORTS) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_FULL,("No classification plan groups available.")); + } + p_FmPcd->p_FmPcdKg->clsPlanGrps[i].used = TRUE; + p_Grp->clsPlanGrpId = (uint8_t)i; + + if(p_Grp->numOfOptions == 0) + p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId = (uint8_t)i; + + if (!TRY_LOCK(NULL, &p_FmPcd->p_FmPcdKg->clsPlanGrps[p_Grp->clsPlanGrpId].lock)) + { + FmPcdUnlock(p_FmPcd, intFlags); + return ERROR_CODE(E_BUSY); + } + FmPcdUnlock(p_FmPcd, intFlags); + + p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[i]; + p_ClsPlanGrp->netEnvId = p_Grp->netEnvId; + p_ClsPlanGrp->owners = 0; + FmPcdSetClsPlanGrpId(p_FmPcd, p_Grp->netEnvId, p_Grp->clsPlanGrpId); + FmPcdIncNetEnvOwners(p_FmPcd, p_Grp->netEnvId); + + p_ClsPlanGrp->sizeOfGrp = (uint16_t)(1<numOfOptions); + /* a minimal group of 8 is required */ + if(p_ClsPlanGrp->sizeOfGrp < CLS_PLAN_NUM_PER_GRP) + p_ClsPlanGrp->sizeOfGrp = CLS_PLAN_NUM_PER_GRP; + if(p_FmPcd->guestId == NCSW_MASTER_ID) + { + err = KgAllocClsPlanEntries(h_FmPcd, p_ClsPlanGrp->sizeOfGrp, p_FmPcd->guestId, &p_ClsPlanGrp->baseEntry); + + if(err) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->clsPlanGrps[p_Grp->clsPlanGrpId].lock); + RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + } + + } + else + { + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + /* in GUEST_PARTITION, we use the IPC, to also set a private driver group if required */ + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + memset(&kgAlloc, 0, sizeof(kgAlloc)); + kgAlloc.guestId = p_FmPcd->guestId; + kgAlloc.numOfClsPlanEntries = p_ClsPlanGrp->sizeOfGrp; + msg.msgId = FM_PCD_ALLOC_KG_CLSPLAN; + memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc)); + replyLength = (sizeof(uint32_t) + sizeof(p_ClsPlanGrp->baseEntry)); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(kgAlloc), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->clsPlanGrps[p_Grp->clsPlanGrpId].lock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if (replyLength != (sizeof(uint32_t) + sizeof(p_ClsPlanGrp->baseEntry))) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->clsPlanGrps[p_Grp->clsPlanGrpId].lock); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + } + if ((t_Error)reply.error != E_OK) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->clsPlanGrps[p_Grp->clsPlanGrpId].lock); + RETURN_ERROR(MINOR, (t_Error)reply.error, NO_MSG); + } + + p_ClsPlanGrp->baseEntry = *(uint8_t*)(reply.replyBody); + } + + /* build classification plan entries parameters */ + p_ClsPlanSet->baseEntry = p_ClsPlanGrp->baseEntry; + p_ClsPlanSet->numOfClsPlanEntries = p_ClsPlanGrp->sizeOfGrp; + + oredVectors = 0; + for(i = 0; inumOfOptions; i++) + { + oredVectors |= p_Grp->optVectors[i]; + /* save an array of used options - the indexes represent the power of 2 index */ + p_ClsPlanGrp->optArray[i] = p_Grp->options[i]; + } + /* set the classification plan relevant entries so that all bits + * relevant to the list of options is cleared + */ + for(j = 0; jsizeOfGrp; j++) + p_ClsPlanSet->vectors[j] = ~oredVectors; + + for(i = 0; inumOfOptions; i++) + { + /* option i got the place 2^i in the clsPlan array. all entries that + * have bit i set, should have the vector bit cleared. So each option + * has one location that it is exclusive (1,2,4,8...) and represent the + * presence of that option only, and other locations that represent a + * combination of options. + * e.g: + * If ethernet-BC is option 1 it gets entry 2 in the table. Entry 2 + * now represents a frame with ethernet-BC header - so the bit + * representing ethernet-BC should be set and all other option bits + * should be cleared. + * Entries 2,3,6,7,10... also have ethernet-BC and therefore have bit + * vector[1] set, but they also have other bits set: + * 3=1+2, options 0 and 1 + * 6=2+4, options 1 and 2 + * 7=1+2+4, options 0,1,and 2 + * 10=2+8, options 1 and 3 + * etc. + * */ + + /* now for each option (i), we set their bits in all entries (j) + * that contain bit 2^i. + */ + for(j = 0; jsizeOfGrp; j++) + { + if(j & (1<vectors[j] |= p_Grp->optVectors[i]; + } + } + + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->clsPlanGrps[p_Grp->clsPlanGrpId].lock); + + return E_OK; +} + +void FmPcdKgDestroyClsPlanGrp(t_Handle h_FmPcd, uint8_t grpId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdIpcKgClsPlanParams kgAlloc; + t_Error err; + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + /* check that no port is bound to this clsPlan */ + if(p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].owners) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a clsPlan grp that has ports bound to")); + return; + } + + FmPcdDecNetEnvOwners(p_FmPcd, p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].netEnvId); + + /* free blocks */ + if(p_FmPcd->guestId == NCSW_MASTER_ID) + { + KgFreeClsPlanEntries(h_FmPcd, + p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].sizeOfGrp, + p_FmPcd->guestId, + p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].baseEntry); + } + else /* in GUEST_PARTITION, we use the IPC, to also set a private driver group if required */ + { + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + kgAlloc.guestId = p_FmPcd->guestId; + kgAlloc.numOfClsPlanEntries = p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].sizeOfGrp; + kgAlloc.clsPlanBase = p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId].baseEntry; + msg.msgId = FM_PCD_FREE_KG_CLSPLAN; + memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc)); + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(kgAlloc), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + { + REPORT_ERROR(MINOR, err, NO_MSG); + return; + } + if (replyLength != sizeof(uint32_t)) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return; + } + if((t_Error)reply.error != E_OK) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Free KG clsPlan failed")); + return; + } + } + + if(grpId == p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId) + p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId = ILLEGAL_CLS_PLAN; + /* clear clsPlan driver structure */ + memset(&p_FmPcd->p_FmPcdKg->clsPlanGrps[grpId], 0, sizeof(t_FmPcdKgClsPlanGrp)); +} + +t_Error FmPcdKgBuildBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort, uint32_t *p_SpReg, bool add) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t j, schemesPerPortVector = 0; + t_FmPcdKgScheme *p_Scheme; + uint8_t i, relativeSchemeId; + uint32_t tmp, walking1Mask; + uint8_t swPortIndex = 0; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + /* for each scheme */ + for(i = 0; inumOfSchemes; i++) + { + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, p_BindPort->schemesIds[i]); + if(relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + if(add) + { + if (!FmPcdKgIsSchemeValidSw(h_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Requested scheme is invalid.")); + + p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId]; + /* check netEnvId of the port against the scheme netEnvId */ + if((p_Scheme->netEnvId != p_BindPort->netEnvId) && (p_Scheme->netEnvId != ILLEGAL_NETENV)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port may not be bound to requested scheme - differ in netEnvId")); + + /* if next engine is private port policer profile, we need to check that it is valid */ + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, p_BindPort->hardwarePortId); + if(p_Scheme->nextRelativePlcrProfile) + { + for(j = 0;jnumOfProfiles;j++) + { + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].h_FmPort); + if(p_Scheme->relativeProfileId+j >= p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Relative profile not in range")); + if(!FmPcdPlcrIsProfileValid(p_FmPcd, (uint16_t)(p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase + p_Scheme->relativeProfileId + j))) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Relative profile not valid.")); + } + } + if(!p_BindPort->useClsPlan) + { + /* if this port does not use clsPlan, it may not be bound to schemes with units that contain + cls plan options. Schemes that are used only directly, should not be checked. + it also may not be bound to schemes that go to CC with units that are options - so we OR + the match vector and the grpBits (= ccUnits) */ + if ((p_Scheme->matchVector != SCHEME_ALWAYS_DIRECT) || p_Scheme->ccUnits) + { + walking1Mask = 0x80000000; + tmp = (p_Scheme->matchVector == SCHEME_ALWAYS_DIRECT)? 0:p_Scheme->matchVector; + tmp |= p_Scheme->ccUnits; + while (tmp) + { + if(tmp & walking1Mask) + { + tmp &= ~walking1Mask; + if(!PcdNetEnvIsUnitWithoutOpts(p_FmPcd, p_Scheme->netEnvId, walking1Mask)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port (without clsPlan) may not be bound to requested scheme - uses clsPlan options")); + } + walking1Mask >>= 1; + } + } + } + } + /* build vector */ + schemesPerPortVector |= 1 << (31 - p_BindPort->schemesIds[i]); + } + + *p_SpReg = schemesPerPortVector; + + return E_OK; +} + +void FmPcdKgIncSchemeOwners(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i; + t_FmPcdKgScheme *p_Scheme; + + /* for each scheme - update owners counters */ + for(i = 0; inumOfSchemes; i++) + { + p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[p_BindPort->schemesIds[i]]; + + /* increment owners number */ + p_Scheme->owners++; + } +} + +void FmPcdKgDecSchemeOwners(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i; + t_FmPcdKgScheme *p_Scheme; + + /* for each scheme - update owners counters */ + for(i = 0; inumOfSchemes; i++) + { + p_Scheme = &p_FmPcd->p_FmPcdKg->schemes[p_BindPort->schemesIds[i]]; + + /* increment owners number */ + ASSERT_COND(p_Scheme->owners); + p_Scheme->owners--; + } +} + +static t_Error KgWriteSp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint32_t spReg, bool add) +{ + t_FmPcdKgPortConfigRegs *p_FmPcdKgPortRegs; + uint32_t tmpKgarReg = 0, tmpKgpeSp, intFlags; + t_Error err = E_OK; + + if (p_FmPcd->h_Hc) + return FmHcKgWriteSp(p_FmPcd->h_Hc, hardwarePortId, spReg, add); + + p_FmPcdKgPortRegs = &p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.portRegs; + + tmpKgarReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId); + intFlags = FmPcdLock(p_FmPcd); + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + if(err) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + tmpKgpeSp = GET_UINT32(p_FmPcdKgPortRegs->kgoe_sp); + + if(add) + tmpKgpeSp |= spReg; + else /* clear */ + tmpKgpeSp &= ~spReg; + + WRITE_UINT32(p_FmPcdKgPortRegs->kgoe_sp, tmpKgpeSp); + + tmpKgarReg = FmPcdKgBuildWritePortSchemeBindActionReg(hardwarePortId); + + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + FmPcdUnlock(p_FmPcd, intFlags); + return err; +} + +static t_Error KgWriteCpp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint32_t cppReg) +{ + t_FmPcdKgPortConfigRegs *p_FmPcdKgPortRegs; + uint32_t tmpKgarReg, intFlags; + t_Error err; + + if (p_FmPcd->h_Hc) + return FmHcKgWriteCpp(p_FmPcd->h_Hc, hardwarePortId, cppReg); + + p_FmPcdKgPortRegs = &p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.portRegs; + intFlags = FmPcdLock(p_FmPcd); + WRITE_UINT32(p_FmPcdKgPortRegs->kgoe_cpp, cppReg); + + tmpKgarReg = FmPcdKgBuildWritePortClsPlanBindActionReg(hardwarePortId); + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + FmPcdUnlock(p_FmPcd, intFlags); + + return err; +} + +static void FmPcdKgUnbindPortToClsPlanGrp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId) +{ + KgWriteCpp(p_FmPcd, hardwarePortId, 0); +} + +static t_Error KgBindPortToClsPlanGrp(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId) +{ + uint32_t tmpKgpeCpp = 0; + + tmpKgpeCpp = FmPcdKgBuildCppReg(p_FmPcd, clsPlanGrpId); + return KgWriteCpp(p_FmPcd, hardwarePortId, tmpKgpeCpp); +} + +t_Error FmPcdKgBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t spReg; + t_Error err = E_OK; + + err = FmPcdKgBuildBindPortToSchemes(h_FmPcd, p_SchemeBind, &spReg, TRUE); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + err = KgWriteSp(p_FmPcd, p_SchemeBind->hardwarePortId, spReg, TRUE); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + FmPcdKgIncSchemeOwners(h_FmPcd, p_SchemeBind); + + return E_OK; +} + +t_Error FmPcdKgUnbindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t spReg; + t_Error err = E_OK; + + err = FmPcdKgBuildBindPortToSchemes(h_FmPcd, p_SchemeBind, &spReg, FALSE); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + err = KgWriteSp(p_FmPcd, p_SchemeBind->hardwarePortId, spReg, FALSE); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + FmPcdKgDecSchemeOwners(h_FmPcd, p_SchemeBind); + + return E_OK; +} + +bool FmPcdKgIsSchemeValidSw(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].valid; +} + +bool KgIsSchemeAlwaysDirect(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + if(p_FmPcd->p_FmPcdKg->schemes[schemeId].matchVector == SCHEME_ALWAYS_DIRECT) + return TRUE; + else + return FALSE; +} + +t_Error FmPcdKgAllocSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t intFlags; + uint8_t i,j; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + + intFlags = FmPcdLock(p_FmPcd); + for(j=0,i=0;ip_FmPcdKg->schemesMng[i].allocated) + { + p_FmPcd->p_FmPcdKg->schemesMng[i].allocated = TRUE; + p_FmPcd->p_FmPcdKg->schemesMng[i].ownerId = guestId; + p_SchemesIds[j] = i; + j++; + } + } + + if (j != numOfSchemes) + { + /* roll back */ + for(j--; j; j--) + { + p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[j]].allocated = FALSE; + p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[j]].ownerId = 0; + p_SchemesIds[j] = 0; + } + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("No schemes found")); + } + FmPcdUnlock(p_FmPcd, intFlags); + + return E_OK; +} + +t_Error FmPcdKgFreeSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t intFlags; + uint8_t i; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + + intFlags = FmPcdLock(p_FmPcd); + + for(i=0;ip_FmPcdKg->schemesMng[p_SchemesIds[i]].allocated) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Scheme was not previously allocated")); + } + if(p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].ownerId != guestId) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Scheme is not owned by caller. ")); + } + p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].allocated = FALSE; + p_FmPcd->p_FmPcdKg->schemesMng[p_SchemesIds[i]].ownerId = 0; + } + + FmPcdUnlock(p_FmPcd, intFlags); + return E_OK; +} + +t_Error KgAllocClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t *p_First) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t intFlags; + uint8_t numOfBlocks, blocksFound=0, first=0; + uint8_t i, j; + + intFlags = FmPcdLock(p_FmPcd); + + if(!numOfClsPlanEntries) + { + FmPcdUnlock(p_FmPcd, intFlags); + return E_OK; + } + + if ((numOfClsPlanEntries % CLS_PLAN_NUM_PER_GRP) || (!POWER_OF_2(numOfClsPlanEntries))) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfClsPlanEntries must be a power of 2 and divisible by 8")); + } + + numOfBlocks = (uint8_t)(numOfClsPlanEntries/CLS_PLAN_NUM_PER_GRP); + + /* try to find consequent blocks */ + first = 0; + for(i=0;ip_FmPcdKg->clsPlanBlocksMng[i].allocated) + { + blocksFound++; + i++; + if(blocksFound == numOfBlocks) + break; + } + else + { + blocksFound = 0; + /* advance i to the next aligned address */ + first = i = (uint8_t)(first + numOfBlocks); + } + } + + if(blocksFound == numOfBlocks) + { + *p_First = (uint8_t)(first*CLS_PLAN_NUM_PER_GRP); + for(j = first; jp_FmPcdKg->clsPlanBlocksMng[j].allocated = TRUE; + p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[j].ownerId = guestId; + } + FmPcdUnlock(p_FmPcd, intFlags); + + return E_OK; + } + else + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MINOR, E_FULL, ("No recources for clsPlan")); + } +} + +void KgFreeClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t base) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t intFlags; + uint8_t numOfBlocks; + uint8_t i, baseBlock; + + UNUSED( guestId); + + intFlags = FmPcdLock(p_FmPcd); + + numOfBlocks = (uint8_t)(numOfClsPlanEntries/CLS_PLAN_NUM_PER_GRP); + ASSERT_COND(!(base%CLS_PLAN_NUM_PER_GRP)); + + baseBlock = (uint8_t)(base/CLS_PLAN_NUM_PER_GRP); + for(i=baseBlock;ip_FmPcdKg->clsPlanBlocksMng[i].allocated); + ASSERT_COND(guestId == p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].ownerId); + p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].allocated = FALSE; + p_FmPcd->p_FmPcdKg->clsPlanBlocksMng[i].ownerId = 0; + } + FmPcdUnlock(p_FmPcd, intFlags); +} + +void KgEnable(t_FmPcd *p_FmPcd) +{ + t_FmPcdKgRegs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + WRITE_UINT32(p_Regs->kggcr,GET_UINT32(p_Regs->kggcr) | FM_PCD_KG_KGGCR_EN); +} + +void KgDisable(t_FmPcd *p_FmPcd) +{ + t_FmPcdKgRegs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + WRITE_UINT32(p_Regs->kggcr,GET_UINT32(p_Regs->kggcr) & ~FM_PCD_KG_KGGCR_EN); +} + +void KgSetClsPlan(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanSet *p_Set) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdKgClsPlanRegs *p_FmPcdKgPortRegs; + uint32_t tmpKgarReg=0, intFlags; + uint16_t i, j; + + SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + p_FmPcdKgPortRegs = &p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.clsPlanRegs; + + intFlags = FmPcdLock(p_FmPcd); + for(i=p_Set->baseEntry;ibaseEntry+p_Set->numOfClsPlanEntries;i+=8) + { + tmpKgarReg = FmPcdKgBuildWriteClsPlanBlockActionReg((uint8_t)(i / CLS_PLAN_NUM_PER_GRP)); + + for (j = i; j < i+8; j++) + { + ASSERT_COND(IN_RANGE(0, (j - p_Set->baseEntry), FM_PCD_MAX_NUM_OF_CLS_PLANS-1)); + WRITE_UINT32(p_FmPcdKgPortRegs->kgcpe[j % CLS_PLAN_NUM_PER_GRP],p_Set->vectors[j - p_Set->baseEntry]); + } + + if(WriteKgarWait(p_FmPcd, tmpKgarReg) != E_OK) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("WriteKgarWait FAILED")); + return; + } + } + FmPcdUnlock(p_FmPcd, intFlags); +} + +static void PcdKgErrorException(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, force, schemeIndexes = 0,index = 0, mask = 0; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + event = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgeer); + mask = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgeeer); + + schemeIndexes = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgseer); + schemeIndexes &= GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgseeer); + + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgfeer); + if(force & event) + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgfeer, force & ~event); + + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgeer, event); + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgseer, schemeIndexes); + + if(event & FM_PCD_KG_DOUBLE_ECC) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC); + if(event & FM_PCD_KG_KEYSIZE_OVERFLOW) + { + if(schemeIndexes) + { + while(schemeIndexes) + { + if(schemeIndexes & 0x1) + p_FmPcd->f_FmPcdIndexedException(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, (uint16_t)(31 - index)); + schemeIndexes >>= 1; + index+=1; + } + } + else /* this should happen only when interrupt is forced. */ + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW); + } +} + +static t_Error KgInitGuest(t_FmPcd *p_FmPcd) +{ + t_Error err = E_OK; + t_FmPcdIpcKgSchemesParams kgAlloc; + uint32_t replyLength; + t_FmPcdIpcReply reply; + t_FmPcdIpcMsg msg; + + ASSERT_COND(p_FmPcd->guestId != NCSW_MASTER_ID); + + /* in GUEST_PARTITION, we use the IPC */ + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + memset(&kgAlloc, 0, sizeof(t_FmPcdIpcKgSchemesParams)); + kgAlloc.numOfSchemes = p_FmPcd->p_FmPcdKg->numOfSchemes; + kgAlloc.guestId = p_FmPcd->guestId; + msg.msgId = FM_PCD_ALLOC_KG_SCHEMES; + memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc)); + replyLength = sizeof(uint32_t) + p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(kgAlloc), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if(replyLength != (sizeof(uint32_t) + p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + memcpy(p_FmPcd->p_FmPcdKg->schemesIds, (uint8_t*)(reply.replyBody),p_FmPcd->p_FmPcdKg->numOfSchemes*sizeof(uint8_t)); + + return (t_Error)reply.error; +} + +static t_Error KgInitMaster(t_FmPcd *p_FmPcd) +{ + t_Error err = E_OK; + t_FmPcdKgRegs *p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + int i; + uint8_t hardwarePortId = 0; + uint32_t tmpReg; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + + /**********************KGEER******************/ + WRITE_UINT32(p_Regs->kgeer, (FM_PCD_KG_DOUBLE_ECC | FM_PCD_KG_KEYSIZE_OVERFLOW)); + /**********************KGEER******************/ + + /**********************KGEEER******************/ + tmpReg = 0; + if(p_FmPcd->exceptions & FM_PCD_EX_KG_DOUBLE_ECC) + { + FmEnableRamsEcc(p_FmPcd->h_Fm); + tmpReg |= FM_PCD_KG_DOUBLE_ECC; + } + if(p_FmPcd->exceptions & FM_PCD_EX_KG_KEYSIZE_OVERFLOW) + tmpReg |= FM_PCD_KG_KEYSIZE_OVERFLOW; + WRITE_UINT32(p_Regs->kgeeer,tmpReg); + /**********************KGEEER******************/ + + /**********************KGFDOR******************/ + WRITE_UINT32(p_Regs->kgfdor,0); + /**********************KGFDOR******************/ + + /**********************KGGDV0R******************/ + WRITE_UINT32(p_Regs->kggdv0r,0); + /**********************KGGDV0R******************/ + + /**********************KGGDV1R******************/ + WRITE_UINT32(p_Regs->kggdv1r,0); + /**********************KGGDV1R******************/ + + /**********************KGGCR******************/ + WRITE_UINT32(p_Regs->kggcr, NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME); + /**********************KGGCR******************/ + + /* register even if no interrupts enabled, to allow future enablement */ + FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_KG, 0, e_FM_INTR_TYPE_ERR, PcdKgErrorException, p_FmPcd); + + /* clear binding between ports to schemes so that all ports are not bound to any schemes */ + for (i=0;ikgseer, 0xFFFFFFFF); + WRITE_UINT32(p_Regs->kgseeer, 0xFFFFFFFF); + + if(p_FmPcd->p_FmPcdKg->numOfSchemes) + { + err = FmPcdKgAllocSchemes(p_FmPcd, + p_FmPcd->p_FmPcdKg->numOfSchemes, + p_FmPcd->guestId, + p_FmPcd->p_FmPcdKg->schemesIds); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + } + + return E_OK; +} + + +/****************************************/ +/* API routines */ +/****************************************/ +t_Error FM_PCD_KgSetAdditionalDataAfterParsing(t_Handle h_FmPcd, uint8_t payloadOffset) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdKgRegs *p_Regs; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, E_NULL_POINTER); + + p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + if(!FmIsMaster(p_FmPcd->h_Fm)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_KgSetAdditionalDataAfterParsing - guest mode!")); + +/* not needed + if(payloadOffset > 256) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("data exatraction offset from parseing end can not be more than 256")); +*/ + + WRITE_UINT32(p_Regs->kgfdor,payloadOffset); + + return E_OK; +} + +t_Error FM_PCD_KgSetDfltValue(t_Handle h_FmPcd, uint8_t valueId, uint32_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdKgRegs *p_Regs; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(((valueId == 0) || (valueId == 1)), E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_NULL_POINTER); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, E_NULL_POINTER); + + p_Regs = p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs; + + if(!FmIsMaster(p_FmPcd->h_Fm)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_KgSetDfltValue - guest mode!")); + + if(valueId == 0) + WRITE_UINT32(p_Regs->kggdv0r,value); + else + WRITE_UINT32(p_Regs->kggdv1r,value); + return E_OK; +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_KgDumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i = 0, j = 0; + uint8_t hardwarePortId = 0; + uint32_t tmpKgarReg, intFlags; + t_Error err = E_OK; + t_FmPcdIpcMsg msg; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_KG_DUMP_REGS; + return XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId), + NULL, + NULL, + NULL, + NULL); + } + DUMP_SUBTITLE(("\n")); + DUMP_TITLE(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs, ("FmPcdKgRegs Regs")); + + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kggcr); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgeer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgeeer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgseer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgseeer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kggsr); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgtpc); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgserc); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgfdor); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kggdv0r); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kggdv1r); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgfer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgfeer); + DUMP_VAR(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs,kgar); + + DUMP_SUBTITLE(("\n")); + intFlags = FmPcdLock(p_FmPcd); + for(j = 0;jp_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs, ("FmPcdKgIndirectAccessSchemeRegs Scheme %d Regs", j)); + + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_mode); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_ekfc); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_ekdv); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_bmch); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_bmcl); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_fqb); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_hc); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_ppc); + + DUMP_TITLE(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_gec, ("kgse_gec")); + DUMP_SUBSTRUCT_ARRAY(i, FM_PCD_KG_NUM_OF_GENERIC_REGS) + { + DUMP_MEMORY(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_gec[i], sizeof(uint32_t)); + } + + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_spc); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_dv0); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_dv1); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_ccbs); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs,kgse_mv); + } + DUMP_SUBTITLE(("\n")); + + for (i=0;ip_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.portRegs, ("FmPcdKgIndirectAccessPortRegs PCD Port %d regs", hardwarePortId)); + + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.portRegs, kgoe_sp); + DUMP_VAR(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.portRegs, kgoe_cpp); + } + + DUMP_SUBTITLE(("\n")); + for(j=0;jp_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.clsPlanRegs, ("FmPcdKgIndirectAccessClsPlanRegs Regs group %d", j)); + DUMP_TITLE(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.clsPlanRegs.kgcpe, ("kgcpe")); + + tmpKgarReg = FmPcdKgBuildReadClsPlanBlockActionReg((uint8_t)j); + err = WriteKgarWait(p_FmPcd, tmpKgarReg); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + DUMP_SUBSTRUCT_ARRAY(i, 8) + DUMP_MEMORY(&p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.clsPlanRegs.kgcpe[i], sizeof(uint32_t)); + } + FmPcdUnlock(p_FmPcd, intFlags); + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ + +t_Handle KgConfig( t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams) +{ + t_FmPcdKg *p_FmPcdKg; + + UNUSED(p_FmPcd); + + if (p_FmPcdParams->numOfSchemes > FM_PCD_KG_NUM_OF_SCHEMES) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("numOfSchemes should not exceed %d", FM_PCD_KG_NUM_OF_SCHEMES)); + return NULL; + } + + p_FmPcdKg = (t_FmPcdKg *)XX_Malloc(sizeof(t_FmPcdKg)); + if (!p_FmPcdKg) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Keygen allocation FAILED")); + return NULL; + } + memset(p_FmPcdKg, 0, sizeof(t_FmPcdKg)); + + if(FmIsMaster(p_FmPcd->h_Fm)) + { + p_FmPcdKg->p_FmPcdKgRegs = (t_FmPcdKgRegs *)UINT_TO_PTR(FmGetPcdKgBaseAddr(p_FmPcdParams->h_Fm)); + p_FmPcd->exceptions |= DEFAULT_fmPcdKgErrorExceptions; + } + + p_FmPcdKg->numOfSchemes = p_FmPcdParams->numOfSchemes; + if((p_FmPcd->guestId == NCSW_MASTER_ID) && !p_FmPcdKg->numOfSchemes) + { + p_FmPcdKg->numOfSchemes = FM_PCD_KG_NUM_OF_SCHEMES; + DBG(WARNING, ("numOfSchemes was defined 0 by user, re-defined by driver to FM_PCD_KG_NUM_OF_SCHEMES")); + } + + p_FmPcdKg->emptyClsPlanGrpId = ILLEGAL_CLS_PLAN; + + return p_FmPcdKg; +} + +t_Error KgInit(t_FmPcd *p_FmPcd) +{ + if (p_FmPcd->guestId == NCSW_MASTER_ID) + return KgInitMaster(p_FmPcd); + else + return KgInitGuest(p_FmPcd); +} + +t_Error KgFree(t_FmPcd *p_FmPcd) +{ + t_FmPcdIpcKgSchemesParams kgAlloc; + t_Error err = E_OK; + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_KG, 0, e_FM_INTR_TYPE_ERR); + + if(p_FmPcd->guestId == NCSW_MASTER_ID) + return FmPcdKgFreeSchemes(p_FmPcd, + p_FmPcd->p_FmPcdKg->numOfSchemes, + p_FmPcd->guestId, + p_FmPcd->p_FmPcdKg->schemesIds); + + /* guest */ + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + kgAlloc.numOfSchemes = p_FmPcd->p_FmPcdKg->numOfSchemes; + kgAlloc.guestId = p_FmPcd->guestId; + ASSERT_COND(kgAlloc.numOfSchemes < FM_PCD_KG_NUM_OF_SCHEMES); + memcpy(kgAlloc.schemesIds, p_FmPcd->p_FmPcdKg->schemesIds , (sizeof(uint8_t))*kgAlloc.numOfSchemes); + msg.msgId = FM_PCD_FREE_KG_SCHEMES; + memcpy(msg.msgBody, &kgAlloc, sizeof(kgAlloc)); + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(kgAlloc), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + return (t_Error)reply.error; +} + +t_Error FmPcdKgSetOrBindToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t netEnvId, protocolOpt_t *p_OptArray, uint8_t *p_ClsPlanGrpId, bool *p_IsEmptyClsPlanGrp) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdKgInterModuleClsPlanGrpParams grpParams, *p_GrpParams; + t_FmPcdKgClsPlanGrp *p_ClsPlanGrp; + t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet; + t_Error err; + + memset(&grpParams, 0, sizeof(grpParams)); + grpParams.clsPlanGrpId = ILLEGAL_CLS_PLAN; + p_GrpParams = &grpParams; + + p_GrpParams->netEnvId = netEnvId; + err = PcdGetClsPlanGrpParams(h_FmPcd, p_GrpParams); + if(err) + RETURN_ERROR(MINOR,err,NO_MSG); + if(p_GrpParams->grpExists) + *p_ClsPlanGrpId = p_GrpParams->clsPlanGrpId; + else + { + p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet)); + if (!p_ClsPlanSet) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("memory allocation failed for p_ClsPlanSet")); + memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet)); + err = FmPcdKgBuildClsPlanGrp(h_FmPcd, p_GrpParams, p_ClsPlanSet); + if (err) + { + XX_Free(p_ClsPlanSet); + RETURN_ERROR(MINOR,err,NO_MSG); + } + *p_ClsPlanGrpId = p_GrpParams->clsPlanGrpId; + + if (p_FmPcd->h_Hc) + { + /* write clsPlan entries to memory */ + err = FmHcPcdKgSetClsPlan(p_FmPcd->h_Hc, p_ClsPlanSet); + if (err) + { + XX_Free(p_ClsPlanSet); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + else + /* write clsPlan entries to memory */ + KgSetClsPlan(p_FmPcd, p_ClsPlanSet); + + XX_Free(p_ClsPlanSet); + } + + /* mark if this is an empty classification group */ + if(*p_ClsPlanGrpId == p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId) + *p_IsEmptyClsPlanGrp = TRUE; + else + *p_IsEmptyClsPlanGrp = FALSE; + + p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[*p_ClsPlanGrpId]; + + /* increment owners number */ + p_ClsPlanGrp->owners++; + + /* copy options array for port */ + memcpy(p_OptArray, &p_FmPcd->p_FmPcdKg->clsPlanGrps[*p_ClsPlanGrpId].optArray, FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)*sizeof(protocolOpt_t)); + + /* bind port to the new or existing group */ + err = KgBindPortToClsPlanGrp(p_FmPcd, hardwarePortId, p_GrpParams->clsPlanGrpId); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + + return E_OK; +} + +t_Error FmPcdKgDeleteOrUnbindPortToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdKgClsPlanGrp *p_ClsPlanGrp = &p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId]; + t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet; + + FmPcdKgUnbindPortToClsPlanGrp(p_FmPcd, hardwarePortId); + + /* decrement owners number */ + ASSERT_COND(p_ClsPlanGrp->owners); + p_ClsPlanGrp->owners--; + + if(!p_ClsPlanGrp->owners) + { + if (p_FmPcd->h_Hc) + return FmHcPcdKgDeleteClsPlan(p_FmPcd->h_Hc, clsPlanGrpId); + else + { + /* clear clsPlan entries in memory */ + p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet)); + if (!p_ClsPlanSet) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("memory allocation failed for p_ClsPlanSet")); + memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet)); + p_ClsPlanSet->baseEntry = p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].baseEntry; + p_ClsPlanSet->numOfClsPlanEntries = p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].sizeOfGrp; + KgSetClsPlan(p_FmPcd, p_ClsPlanSet); + XX_Free(p_ClsPlanSet); + FmPcdKgDestroyClsPlanGrp(h_FmPcd, clsPlanGrpId); + } + } + return E_OK; +} + +t_Error FmPcdKgBuildScheme(t_Handle h_FmPcd, t_FmPcdKgSchemeParams *p_Scheme, t_FmPcdKgInterModuleSchemeRegs *p_SchemeRegs) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t grpBits = 0; + uint8_t grpBase; + bool direct=TRUE, absolute=FALSE; + uint16_t profileId=0, numOfProfiles=0, relativeProfileId; + t_Error err = E_OK; + int i = 0; + t_NetEnvParams netEnvParams; + uint32_t tmpReg, fqbTmp = 0, ppcTmp = 0, selectTmp, maskTmp, knownTmp, genTmp; + t_FmPcdKgKeyExtractAndHashParams *p_KeyAndHash = NULL; + uint8_t j, curr, idx; + uint8_t id, shift=0, code=0, offset=0, size=0; + t_FmPcdExtractEntry *p_Extract = NULL; + t_FmPcdKgExtractedOrParams *p_ExtractOr; + bool generic = FALSE; + t_KnownFieldsMasks bitMask; + e_FmPcdKgExtractDfltSelect swDefault = (e_FmPcdKgExtractDfltSelect)0; + t_FmPcdKgSchemesExtracts *p_LocalExtractsArray; + uint8_t numOfSwDefaults = 0; + t_FmPcdKgExtractDflt swDefaults[NUM_OF_SW_DEFAULTS]; + uint8_t currGenId = 0, relativeSchemeId; + + if(!p_Scheme->modify) + relativeSchemeId = p_Scheme->id.relativeSchemeId; + else + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, (uint8_t)(PTR_TO_UINT(p_Scheme->id.h_Scheme)-1)); + + memset(&p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId], 0, sizeof(t_FmPcdKgScheme)); + memset(swDefaults, 0, NUM_OF_SW_DEFAULTS*sizeof(t_FmPcdKgExtractDflt)); + memset(p_SchemeRegs, 0, sizeof(t_FmPcdKgInterModuleSchemeRegs)); + + if (p_Scheme->netEnvParams.numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("numOfDistinctionUnits should not exceed %d", FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS)); + + /* by netEnv parameters, get match vector */ + if(!p_Scheme->alwaysDirect) + { + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].netEnvId = + (uint8_t)(PTR_TO_UINT(p_Scheme->netEnvParams.h_NetEnv)-1); + netEnvParams.netEnvId = p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].netEnvId; + netEnvParams.numOfDistinctionUnits = p_Scheme->netEnvParams.numOfDistinctionUnits; + memcpy(netEnvParams.unitIds, p_Scheme->netEnvParams.unitIds, (sizeof(uint8_t))*p_Scheme->netEnvParams.numOfDistinctionUnits); + err = PcdGetUnitsVector(p_FmPcd, &netEnvParams); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].matchVector = netEnvParams.vector; + } + else + { + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].matchVector = SCHEME_ALWAYS_DIRECT; + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].netEnvId = ILLEGAL_NETENV; + } + + if(p_Scheme->nextEngine == e_FM_PCD_INVALID) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next Engine of the scheme is not Valid")); + + if(p_Scheme->bypassFqidGeneration) + { +#ifdef FM_KG_NO_BYPASS_FQID_GEN + { + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPcd->h_Fm, &revInfo); + if (revInfo.majorRev != 4) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("bypassFqidGeneration.")); + } +#endif /* FM_KG_NO_BYPASS_FQID_GEN */ + if(p_Scheme->baseFqid) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("baseFqid set for a scheme that does not generate an FQID")); + } + else + if(!p_Scheme->baseFqid) + DBG(WARNING, ("baseFqid is 0.")); + + if(p_Scheme->nextEngine == e_FM_PCD_PLCR) + { + direct = p_Scheme->kgNextEngineParams.plcrProfile.direct; + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].directPlcr = direct; + absolute = (bool)(p_Scheme->kgNextEngineParams.plcrProfile.sharedProfile ? TRUE : FALSE); + if(!direct && absolute) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Indirect policing is not available when profile is shared.")); + + if(direct) + { + profileId = p_Scheme->kgNextEngineParams.plcrProfile.profileSelect.directRelativeProfileId; + numOfProfiles = 1; + } + else + { + profileId = p_Scheme->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase; + shift = p_Scheme->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.fqidOffsetShift; + numOfProfiles = p_Scheme->kgNextEngineParams.plcrProfile.profileSelect.indirectProfile.numOfProfiles; + } + } + + if(p_Scheme->nextEngine == e_FM_PCD_CC) + { +#ifdef FM_KG_NO_BYPASS_PLCR_PROFILE_GEN + if((p_Scheme->kgNextEngineParams.cc.plcrNext) && (p_Scheme->kgNextEngineParams.cc.bypassPlcrProfileGeneration)) + { + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPcd->h_Fm, &revInfo); + if (revInfo.majorRev != 4) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("bypassPlcrProfileGeneration.")); + } +#endif /* FM_KG_NO_BYPASS_PLCR_PROFILE_GEN */ + + err = FmPcdCcGetGrpParams(p_Scheme->kgNextEngineParams.cc.h_CcTree, + p_Scheme->kgNextEngineParams.cc.grpId, + &grpBits, + &grpBase); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].ccUnits = grpBits; + + if((p_Scheme->kgNextEngineParams.cc.plcrNext) && + (!p_Scheme->kgNextEngineParams.cc.bypassPlcrProfileGeneration)) + { + if(p_Scheme->kgNextEngineParams.cc.plcrProfile.sharedProfile) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Shared profile may not be used after Coarse classification.")); + absolute = FALSE; + direct = p_Scheme->kgNextEngineParams.cc.plcrProfile.direct; + if(direct) + { + profileId = p_Scheme->kgNextEngineParams.cc.plcrProfile.profileSelect.directRelativeProfileId; + numOfProfiles = 1; + } + else + { + profileId = p_Scheme->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.fqidOffsetRelativeProfileIdBase; + shift = p_Scheme->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.fqidOffsetShift; + numOfProfiles = p_Scheme->kgNextEngineParams.cc.plcrProfile.profileSelect.indirectProfile.numOfProfiles; + } + } + } + + /* if policer is used directly after KG, or after CC */ + if((p_Scheme->nextEngine == e_FM_PCD_PLCR) || + ((p_Scheme->nextEngine == e_FM_PCD_CC) && + (p_Scheme->kgNextEngineParams.cc.plcrNext) && + (!p_Scheme->kgNextEngineParams.cc.bypassPlcrProfileGeneration))) + { + /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */ + if(absolute) + { + /* for absolute direct policy only, */ + relativeProfileId = profileId; + err = FmPcdPlcrGetAbsoluteProfileId(h_FmPcd,e_FM_PCD_PLCR_SHARED,NULL, relativeProfileId, &profileId); + if(err) + RETURN_ERROR(MAJOR, err, ("Shared profile not valid offset")); + if(!FmPcdPlcrIsProfileValid(p_FmPcd, profileId)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Shared profile not valid.")); + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].relativeProfileId = profileId; + } + else + { + /* save relative profile id's for later check */ + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextRelativePlcrProfile = TRUE; + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].relativeProfileId = profileId; + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].numOfProfiles = numOfProfiles; + } + } + else + { + /* if policer is NOT going to be used after KG at all than if bypassFqidGeneration + is set, we do not need numOfUsedExtractedOrs and hashDistributionNumOfFqids */ + if(p_Scheme->bypassFqidGeneration && p_Scheme->numOfUsedExtractedOrs) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("numOfUsedExtractedOrs is set in a scheme that does not generate FQID or policer profile ID")); + if(p_Scheme->bypassFqidGeneration && + p_Scheme->useHash && + p_Scheme->keyExtractAndHashParams.hashDistributionNumOfFqids) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("hashDistributionNumOfFqids is set in a scheme that does not generate FQID or policer profile ID")); + } + + /* configure all 21 scheme registers */ + tmpReg = KG_SCH_MODE_EN; + switch(p_Scheme->nextEngine) + { + case(e_FM_PCD_PLCR): + /* add to mode register - NIA */ + tmpReg |= KG_SCH_MODE_NIA_PLCR; + tmpReg |= NIA_ENG_PLCR; + tmpReg |= (uint32_t)(p_Scheme->kgNextEngineParams.plcrProfile.sharedProfile ? NIA_PLCR_ABSOLUTE:0); + /* initialize policer profile command - */ + /* configure kgse_ppc */ + if(direct) + /* use profileId as base, other fields are 0 */ + p_SchemeRegs->kgse_ppc = (uint32_t)profileId; + else + { + if(shift > MAX_PP_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_PP_SHIFT)); + + if(!numOfProfiles || !POWER_OF_2(numOfProfiles)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2")); + + ppcTmp = ((uint32_t)shift << KG_SCH_PP_SHIFT_HIGH_SHIFT) & KG_SCH_PP_SHIFT_HIGH; + ppcTmp |= ((uint32_t)shift << KG_SCH_PP_SHIFT_LOW_SHIFT) & KG_SCH_PP_SHIFT_LOW; + ppcTmp |= ((uint32_t)(numOfProfiles-1) << KG_SCH_PP_MASK_SHIFT); + ppcTmp |= (uint32_t)profileId; + + p_SchemeRegs->kgse_ppc = ppcTmp; + } + break; + case(e_FM_PCD_CC): + /* mode reg - define NIA */ + tmpReg |= (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC); + + p_SchemeRegs->kgse_ccbs = grpBits; + tmpReg |= (uint32_t)(grpBase << KG_SCH_MODE_CCOBASE_SHIFT); + + if(p_Scheme->kgNextEngineParams.cc.plcrNext) + { + if(!p_Scheme->kgNextEngineParams.cc.bypassPlcrProfileGeneration) + { + /* find out if absolute or relative */ + if(absolute) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("It is illegal to request a shared profile in a scheme that is in a KG->CC->PLCR flow")); + if(direct) + { + /* mask = 0, base = directProfileId */ + p_SchemeRegs->kgse_ppc = (uint32_t)profileId; + } + else + { + if(shift > MAX_PP_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fqidOffsetShift may not be larger than %d", MAX_PP_SHIFT)); + if(!numOfProfiles || !POWER_OF_2(numOfProfiles)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfProfiles must not be 0 and must be a power of 2")); + + ppcTmp = ((uint32_t)shift << KG_SCH_PP_SHIFT_HIGH_SHIFT) & KG_SCH_PP_SHIFT_HIGH; + ppcTmp |= ((uint32_t)shift << KG_SCH_PP_SHIFT_LOW_SHIFT) & KG_SCH_PP_SHIFT_LOW; + ppcTmp |= ((uint32_t)(numOfProfiles-1) << KG_SCH_PP_MASK_SHIFT); + ppcTmp |= (uint32_t)profileId; + + p_SchemeRegs->kgse_ppc = ppcTmp; + } + } + else + ppcTmp = KG_SCH_PP_NO_GEN; + } + break; + case(e_FM_PCD_DONE): + if(p_Scheme->kgNextEngineParams.doneAction == e_FM_PCD_DROP_FRAME) + tmpReg |= (NIA_ENG_BMI | NIA_BMI_AC_DISCARD); + else + tmpReg |= (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME); + break; + default: + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Next engine not supported")); + } + p_SchemeRegs->kgse_mode = tmpReg; + + p_SchemeRegs->kgse_mv = p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].matchVector; + + if(p_Scheme->useHash) + { + p_KeyAndHash = &p_Scheme->keyExtractAndHashParams; + + if (p_KeyAndHash->numOfUsedExtracts >= FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfUsedExtracts out of range")); + + /* configure kgse_dv0 */ + p_SchemeRegs->kgse_dv0 = p_KeyAndHash->privateDflt0; + + /* configure kgse_dv1 */ + p_SchemeRegs->kgse_dv1 = p_KeyAndHash->privateDflt1; + + if(!p_Scheme->bypassFqidGeneration) + { + if(!p_KeyAndHash->hashDistributionNumOfFqids || !POWER_OF_2(p_KeyAndHash->hashDistributionNumOfFqids)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashDistributionNumOfFqids must not be 0 and must be a power of 2")); + if((p_KeyAndHash->hashDistributionNumOfFqids-1) & p_Scheme->baseFqid) + DBG(WARNING, ("baseFqid unaligned. Distribution may result in less than hashDistributionNumOfFqids queues.")); + } + + /* configure kgse_ekdv */ + tmpReg = 0; + for( i=0 ;inumOfUsedDflts ; i++) + { + switch(p_KeyAndHash->dflts[i].type) + { + case(e_FM_PCD_KG_MAC_ADDR): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_MAC_ADDR_SHIFT); + break; + case(e_FM_PCD_KG_TCI): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_TCI_SHIFT); + break; + case(e_FM_PCD_KG_ENET_TYPE): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_ENET_TYPE_SHIFT); + break; + case(e_FM_PCD_KG_PPP_SESSION_ID): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PPP_SESSION_ID_SHIFT); + break; + case(e_FM_PCD_KG_PPP_PROTOCOL_ID): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PPP_PROTOCOL_ID_SHIFT); + break; + case(e_FM_PCD_KG_MPLS_LABEL): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_MPLS_LABEL_SHIFT); + break; + case(e_FM_PCD_KG_IP_ADDR): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IP_ADDR_SHIFT); + break; + case(e_FM_PCD_KG_PROTOCOL_TYPE): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_PROTOCOL_TYPE_SHIFT); + break; + case(e_FM_PCD_KG_IP_TOS_TC): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IP_TOS_TC_SHIFT); + break; + case(e_FM_PCD_KG_IPV6_FLOW_LABEL): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_L4_PORT_SHIFT); + break; + case(e_FM_PCD_KG_IPSEC_SPI): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_IPSEC_SPI_SHIFT); + break; + case(e_FM_PCD_KG_L4_PORT): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_L4_PORT_SHIFT); + break; + case(e_FM_PCD_KG_TCP_FLAG): + tmpReg |= (p_KeyAndHash->dflts[i].dfltSelect << KG_SCH_DEF_TCP_FLAG_SHIFT); + break; + case(e_FM_PCD_KG_GENERIC_FROM_DATA): + swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_FROM_DATA; + swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect; + numOfSwDefaults ++; + break; + case(e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V): + swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V; + swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect; + numOfSwDefaults ++; + break; + case(e_FM_PCD_KG_GENERIC_NOT_FROM_DATA): + swDefaults[numOfSwDefaults].type = e_FM_PCD_KG_GENERIC_NOT_FROM_DATA; + swDefaults[numOfSwDefaults].dfltSelect = p_KeyAndHash->dflts[i].dfltSelect; + numOfSwDefaults ++; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + } + p_SchemeRegs->kgse_ekdv = tmpReg; + + p_LocalExtractsArray = (t_FmPcdKgSchemesExtracts *)XX_Malloc(sizeof(t_FmPcdKgSchemesExtracts)); + if(!p_LocalExtractsArray) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); + + /* configure kgse_ekfc and kgse_gec */ + knownTmp = 0; + for( i=0 ;inumOfUsedExtracts ; i++) + { + p_Extract = &p_KeyAndHash->extractArray[i]; + switch(p_Extract->type) + { + case(e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO): + knownTmp |= KG_SCH_KN_PORT_ID; + /* save in driver structure */ + p_LocalExtractsArray->extractsArray[i].id = GetKnownFieldId(KG_SCH_KN_PORT_ID); + p_LocalExtractsArray->extractsArray[i].known = TRUE; + break; + case(e_FM_PCD_EXTRACT_BY_HDR): + switch(p_Extract->extractByHdr.hdr) + { + case(HEADER_TYPE_UDP_ENCAP_ESP): + switch(p_Extract->extractByHdr.type) + { + case(e_FM_PCD_EXTRACT_FROM_HDR): + /* case where extraction from ESP only */ + if (p_Extract->extractByHdr.extractByHdrType.fromHdr.offset >= UDP_HEADER_SIZE) + { + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromHdr.offset -= UDP_HEADER_SIZE; + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + } + else + { + p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP; + p_Extract->extractByHdr.ignoreProtocolValidation = FALSE; + } + break; + case(e_FM_PCD_EXTRACT_FROM_FIELD): + switch(p_Extract->extractByHdr.extractByHdrType.fromField.field.udpEncapEsp) + { + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC): + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST): + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN): + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM): + p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP; + break; + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI): + p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromField.size = p_Extract->extractByHdr.extractByHdrType.fromField.size; + /*p_Extract->extractByHdr.extractByHdrType.fromField.offset += ESP_SPI_OFFSET;*/ + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + break; + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM): + p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromField.size = p_Extract->extractByHdr.extractByHdrType.fromField.size; + p_Extract->extractByHdr.extractByHdrType.fromField.offset += ESP_SEQ_NUM_OFFSET; + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + break; + } + break; + case(e_FM_PCD_EXTRACT_FULL_FIELD): + switch(p_Extract->extractByHdr.extractByHdrType.fullField.udpEncapEsp) + { + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC): + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST): + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN): + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM): + p_Extract->extractByHdr.hdr = HEADER_TYPE_UDP; + break; + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI): + p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromHdr.size = ESP_SPI_SIZE; + p_Extract->extractByHdr.extractByHdrType.fromHdr.offset = ESP_SPI_OFFSET; + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + break; + case(NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM): + p_Extract->extractByHdr.type = e_FM_PCD_EXTRACT_FROM_HDR; + p_Extract->extractByHdr.hdr = FmPcdGetAliasHdr(p_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].netEnvId, HEADER_TYPE_UDP_ENCAP_ESP); + p_Extract->extractByHdr.extractByHdrType.fromHdr.size = ESP_SEQ_NUM_SIZE; + p_Extract->extractByHdr.extractByHdrType.fromHdr.offset = ESP_SEQ_NUM_OFFSET; + p_Extract->extractByHdr.ignoreProtocolValidation = TRUE; + break; + } + break; + } + break; + default: + break; + } + switch(p_Extract->extractByHdr.type) + { + case(e_FM_PCD_EXTRACT_FROM_HDR): + generic = TRUE; + /* get the header code for the generic extract */ + code = GetGenHdrCode(p_Extract->extractByHdr.hdr, p_Extract->extractByHdr.hdrIndex, p_Extract->extractByHdr.ignoreProtocolValidation); + /* set generic register fields */ + offset = p_Extract->extractByHdr.extractByHdrType.fromHdr.offset; + size = p_Extract->extractByHdr.extractByHdrType.fromHdr.size; + break; + case(e_FM_PCD_EXTRACT_FROM_FIELD): + generic = TRUE; + /* get the field code for the generic extract */ + code = GetGenFieldCode(p_Extract->extractByHdr.hdr, + p_Extract->extractByHdr.extractByHdrType.fromField.field, p_Extract->extractByHdr.ignoreProtocolValidation,p_Extract->extractByHdr.hdrIndex); + offset = p_Extract->extractByHdr.extractByHdrType.fromField.offset; + size = p_Extract->extractByHdr.extractByHdrType.fromField.size; + break; + case(e_FM_PCD_EXTRACT_FULL_FIELD): + if(!p_Extract->extractByHdr.ignoreProtocolValidation) + { + /* if we have a known field for it - use it, otherwise use generic */ + bitMask = GetKnownProtMask(p_Extract->extractByHdr.hdr, p_Extract->extractByHdr.hdrIndex, + p_Extract->extractByHdr.extractByHdrType.fullField); + if(bitMask) + { + knownTmp |= bitMask; + /* save in driver structure */ + p_LocalExtractsArray->extractsArray[i].id = GetKnownFieldId(bitMask); + p_LocalExtractsArray->extractsArray[i].known = TRUE; + } + else + generic = TRUE; + + } + else + generic = TRUE; + if(generic) + { + /* tmp - till we cover more headers under generic */ + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Full header selection not supported")); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + case(e_FM_PCD_EXTRACT_NON_HDR): + /* use generic */ + generic = TRUE; + offset = 0; + /* get the field code for the generic extract */ + code = GetGenCode(p_Extract->extractNonHdr.src, &offset); + offset += p_Extract->extractNonHdr.offset; + size = p_Extract->extractNonHdr.size; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + if(generic) + { + /* set generic register fields */ + if(currGenId >= FM_PCD_KG_NUM_OF_GENERIC_REGS) + RETURN_ERROR(MAJOR, E_FULL, ("Generic registers are fully used")); + if(!code) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG); + + genTmp = KG_SCH_GEN_VALID; + genTmp |= (uint32_t)(code << KG_SCH_GEN_HT_SHIFT); + genTmp |= offset; + if((size > MAX_KG_SCH_SIZE) || (size < 1)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal extraction (size out of range)")); + genTmp |= (uint32_t)((size - 1) << KG_SCH_GEN_SIZE_SHIFT); + swDefault = GetGenericSwDefault(swDefaults, numOfSwDefaults, code); + if(swDefault == e_FM_PCD_KG_DFLT_ILLEGAL) + DBG(WARNING, ("No sw default configured")); + + genTmp |= swDefault << KG_SCH_GEN_DEF_SHIFT; + genTmp |= KG_SCH_GEN_MASK; + p_SchemeRegs->kgse_gec[currGenId] = genTmp; + /* save in driver structure */ + p_LocalExtractsArray->extractsArray[i].id = currGenId++; + p_LocalExtractsArray->extractsArray[i].known = FALSE; + generic = FALSE; + } + } + p_SchemeRegs->kgse_ekfc = knownTmp; + + selectTmp = 0; + maskTmp = 0xFFFFFFFF; + /* configure kgse_bmch, kgse_bmcl and kgse_fqb */ + + if(p_KeyAndHash->numOfUsedMasks >= FM_PCD_KG_NUM_OF_EXTRACT_MASKS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Only %d masks supported", FM_PCD_KG_NUM_OF_EXTRACT_MASKS)); + for( i=0 ;inumOfUsedMasks ; i++) + { + /* Get the relative id of the extract (for known 0-0x1f, for generic 0-7) */ + id = p_LocalExtractsArray->extractsArray[p_KeyAndHash->masks[i].extractArrayIndex].id; + /* Get the shift of the select field (depending on i) */ + GET_MASK_SEL_SHIFT(shift,i); + if (p_LocalExtractsArray->extractsArray[p_KeyAndHash->masks[i].extractArrayIndex].known) + selectTmp |= id << shift; + else + selectTmp |= (id + MASK_FOR_GENERIC_BASE_ID) << shift; + + /* Get the shift of the offset field (depending on i) - may + be in kgse_bmch or in kgse_fqb (depending on i) */ + GET_MASK_OFFSET_SHIFT(shift,i); + if (i<=1) + selectTmp |= p_KeyAndHash->masks[i].offset << shift; + else + fqbTmp |= p_KeyAndHash->masks[i].offset << shift; + + /* Get the shift of the mask field (depending on i) */ + GET_MASK_SHIFT(shift,i); + /* pass all bits */ + maskTmp |= KG_SCH_BITMASK_MASK << shift; + /* clear bits that need masking */ + maskTmp &= ~(0xFF << shift) ; + /* set mask bits */ + maskTmp |= (p_KeyAndHash->masks[i].mask << shift) ; + } + p_SchemeRegs->kgse_bmch = selectTmp; + p_SchemeRegs->kgse_bmcl = maskTmp; + /* kgse_fqb will be written t the end of the routine */ + + /* configure kgse_hc */ + if(p_KeyAndHash->hashShift > MAX_HASH_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashShift must not be larger than %d", MAX_HASH_SHIFT)); + if(p_KeyAndHash->hashDistributionFqidsShift > MAX_DIST_FQID_SHIFT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("hashDistributionFqidsShift must not be larger than %d", MAX_DIST_FQID_SHIFT)); + + tmpReg = 0; + + tmpReg |= ((p_KeyAndHash->hashDistributionNumOfFqids - 1) << p_KeyAndHash->hashDistributionFqidsShift); + tmpReg |= p_KeyAndHash->hashShift << KG_SCH_HASH_CONFIG_SHIFT_SHIFT; + + if(p_KeyAndHash->symmetricHash) + { + if((!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_MACSRC) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_MACDST)) || + (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPSRC1) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPDST1)) || + (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPSRC2) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_IPDST2)) || + (!!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_L4PSRC) != !!(p_SchemeRegs->kgse_ekfc & KG_SCH_KN_L4PDST))) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("symmetricHash set but src/dest extractions missing")); + tmpReg |= KG_SCH_HASH_CONFIG_SYM; + } + p_SchemeRegs->kgse_hc = tmpReg; + + /* build the return array describing the order of the extractions */ + + /* the last currGenId places of the array + are for generic extracts that are always last. + We now sort for the calculation of the order of the known + extractions we sort the known extracts between orderedArray[0] and + orderedArray[p_KeyAndHash->numOfUsedExtracts - currGenId - 1]. + for the calculation of the order of the generic extractions we use: + num_of_generic - currGenId + num_of_known - p_KeyAndHash->numOfUsedExtracts - currGenId + first_generic_index = num_of_known */ + curr = 0; + for (i=0;inumOfUsedExtracts ; i++) + { + if(p_LocalExtractsArray->extractsArray[i].known) + { + ASSERT_COND(curr<(p_KeyAndHash->numOfUsedExtracts - currGenId)); + j = curr; + /* id is the extract id (port id = 0, mac src = 1 etc.). the value in the array is the original + index in the user's extractions array */ + /* we compare the id of the current extract with the id of the extract in the orderedArray[j-1] + location */ + while((j > 0) && (p_LocalExtractsArray->extractsArray[i].id < + p_LocalExtractsArray->extractsArray[p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].orderedArray[j-1]].id)) + { + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].orderedArray[j] = + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].orderedArray[j-1]; + j--; + } + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].orderedArray[j] = (uint8_t)i; + curr++; + } + else + { + /* index is first_generic_index + generic index (id) */ + idx = (uint8_t)(p_KeyAndHash->numOfUsedExtracts - currGenId + p_LocalExtractsArray->extractsArray[i].id); + ASSERT_COND(idx < FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY); + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].orderedArray[idx]= (uint8_t)i; + } + } + XX_Free(p_LocalExtractsArray); + p_LocalExtractsArray = NULL; + + } + else + { + /* clear all unused registers: */ + p_SchemeRegs->kgse_ekfc = 0; + p_SchemeRegs->kgse_ekdv = 0; + p_SchemeRegs->kgse_bmch = 0; + p_SchemeRegs->kgse_bmcl = 0; + p_SchemeRegs->kgse_hc = 0; + p_SchemeRegs->kgse_dv0 = 0; + p_SchemeRegs->kgse_dv1 = 0; + } + + if(p_Scheme->bypassFqidGeneration) + p_SchemeRegs->kgse_hc |= KG_SCH_HASH_CONFIG_NO_FQID; + + /* configure kgse_spc */ + if( p_Scheme->schemeCounter.update) + p_SchemeRegs->kgse_spc = p_Scheme->schemeCounter.value; + + + /* check that are enough generic registers */ + if(p_Scheme->numOfUsedExtractedOrs + currGenId > FM_PCD_KG_NUM_OF_GENERIC_REGS) + RETURN_ERROR(MAJOR, E_FULL, ("Generic registers are fully used")); + + /* extracted OR mask on Qid */ + for( i=0 ;inumOfUsedExtractedOrs ; i++) + { + + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].extractedOrs = TRUE; + /* configure kgse_gec[i] */ + p_ExtractOr = &p_Scheme->extractedOrs[i]; + switch(p_ExtractOr->type) + { + case(e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO): + code = KG_SCH_GEN_PARSE_RESULT_N_FQID; + offset = 0; + break; + case(e_FM_PCD_EXTRACT_BY_HDR): + /* get the header code for the generic extract */ + code = GetGenHdrCode(p_ExtractOr->extractByHdr.hdr, p_ExtractOr->extractByHdr.hdrIndex, p_ExtractOr->extractByHdr.ignoreProtocolValidation); + /* set generic register fields */ + offset = p_ExtractOr->extractionOffset; + break; + case(e_FM_PCD_EXTRACT_NON_HDR): + /* get the field code for the generic extract */ + offset = 0; + code = GetGenCode(p_ExtractOr->src, &offset); + offset += p_ExtractOr->extractionOffset; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + /* set generic register fields */ + if(!code) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG); + genTmp = KG_SCH_GEN_EXTRACT_TYPE | KG_SCH_GEN_VALID; + genTmp |= (uint32_t)(code << KG_SCH_GEN_HT_SHIFT); + genTmp |= offset; + if(!!p_ExtractOr->bitOffsetInFqid == !!p_ExtractOr->bitOffsetInPlcrProfile) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" extracted byte must effect either FQID or Policer profile")); + + /************************************************************************************ + bitOffsetInFqid and bitOffsetInPolicerProfile are translated to rotate parameter + in the following way: + + Driver API and implementation: + ============================== + FQID: extracted OR byte may be shifted right 1-31 bits to effect parts of the FQID. + if shifted less than 8 bits, or more than 24 bits a mask is set on the bits that + are not overlapping FQID. + ------------------------ + | FQID (24) | + ------------------------ + -------- + | | extracted OR byte + -------- + + Policer Profile: extracted OR byte may be shifted right 1-15 bits to effect parts of the + PP id. Unless shifted exactly 8 bits to overlap the PP id, a mask is set on the bits that + are not overlapping PP id. + + -------- + | PP (8) | + -------- + -------- + | | extracted OR byte + -------- + + HW implementation + ================= + FQID and PP construct a 32 bit word in the way describe below. Extracted byte is located + as the highest byte of that word and may be rotated to effect any part os the FQID or + the PP. + ------------------------ -------- + | FQID (24) || PP (8) | + ------------------------ -------- + -------- + | | extracted OR byte + -------- + + ************************************************************************************/ + + if(p_ExtractOr->bitOffsetInFqid) + { + if(p_ExtractOr->bitOffsetInFqid > MAX_KG_SCH_FQID_BIT_OFFSET ) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal extraction (bitOffsetInFqid out of range)")); + if(p_ExtractOr->bitOffsetInFqid<8) + genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInFqid+24) << KG_SCH_GEN_SIZE_SHIFT); + else + genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInFqid-8) << KG_SCH_GEN_SIZE_SHIFT); + p_ExtractOr->mask &= GetExtractedOrMask(p_ExtractOr->bitOffsetInFqid, TRUE); + } + else /* effect policer profile */ + { + if(p_ExtractOr->bitOffsetInPlcrProfile > MAX_KG_SCH_PP_BIT_OFFSET ) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal extraction (bitOffsetInPlcrProfile out of range)")); + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].bitOffsetInPlcrProfile = p_ExtractOr->bitOffsetInPlcrProfile; + genTmp |= (uint32_t)((p_ExtractOr->bitOffsetInPlcrProfile+16) << KG_SCH_GEN_SIZE_SHIFT); + p_ExtractOr->mask &= GetExtractedOrMask(p_ExtractOr->bitOffsetInPlcrProfile, FALSE); + } + + genTmp |= (uint32_t)(p_ExtractOr->extractionOffset << KG_SCH_GEN_DEF_SHIFT); + /* clear bits that need masking */ + genTmp &= ~KG_SCH_GEN_MASK ; + /* set mask bits */ + genTmp |= (uint32_t)(p_ExtractOr->mask << KG_SCH_GEN_MASK_SHIFT); + p_SchemeRegs->kgse_gec[currGenId++] = genTmp; + + } + /* clear all unused GEC registers */ + for( i=currGenId ;ikgse_gec[i] = 0; + + /* add base Qid for this scheme */ + /* add configuration for kgse_fqb */ + if(p_Scheme->baseFqid & ~0x00FFFFFF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("baseFqid must be between 1 and 2^24-1")); + + fqbTmp |= p_Scheme->baseFqid; + p_SchemeRegs->kgse_fqb = fqbTmp; + + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine = p_Scheme->nextEngine; + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].doneAction = p_Scheme->kgNextEngineParams.doneAction; + return E_OK; +} + +void FmPcdKgValidateSchemeSw(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(!p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + if(p_FmPcd->p_FmPcdKg->schemes[schemeId].netEnvId != ILLEGAL_NETENV) + FmPcdIncNetEnvOwners(p_FmPcd, p_FmPcd->p_FmPcdKg->schemes[schemeId].netEnvId); + p_FmPcd->p_FmPcdKg->schemes[schemeId].valid = TRUE; +} + +void FmPcdKgInvalidateSchemeSw(t_Handle h_FmPcd, uint8_t schemeId) +{ + + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + if(p_FmPcd->p_FmPcdKg->schemes[schemeId].netEnvId != ILLEGAL_NETENV) + FmPcdDecNetEnvOwners(h_FmPcd, p_FmPcd->p_FmPcdKg->schemes[schemeId].netEnvId); + p_FmPcd->p_FmPcdKg->schemes[schemeId].valid = FALSE; +} + +uint32_t FmPcdKgGetRequiredAction(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].requiredAction; +} + +uint32_t FmPcdKgGetPointedOwners(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].pointedOwners; +} + +bool FmPcdKgIsDirectPlcr(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].directPlcr; +} + + +uint16_t FmPcdKgGetRelativeProfileId(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].relativeProfileId; +} + + +bool FmPcdKgIsDistrOnPlcrProfile(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + if((p_FmPcd->p_FmPcdKg->schemes[schemeId].extractedOrs && + p_FmPcd->p_FmPcdKg->schemes[schemeId].bitOffsetInPlcrProfile) || + p_FmPcd->p_FmPcdKg->schemes[schemeId].nextRelativePlcrProfile) + return TRUE; + else + return FALSE; + +} +void FmPcdKgUpatePointedOwner(t_Handle h_FmPcd, uint8_t schemeId, bool add) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + if(add) + p_FmPcd->p_FmPcdKg->schemes[schemeId].pointedOwners++; + else + p_FmPcd->p_FmPcdKg->schemes[schemeId].pointedOwners--; +} + +e_FmPcdEngine FmPcdKgGetNextEngine(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].nextEngine; +} + +e_FmPcdDoneAction FmPcdKgGetDoneAction(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + return p_FmPcd->p_FmPcdKg->schemes[schemeId].doneAction; +} + +void FmPcdKgUpdateRequiredAction(t_Handle h_FmPcd, uint8_t schemeId, uint32_t requiredAction) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdKg->schemes[schemeId].valid); + + p_FmPcd->p_FmPcdKg->schemes[schemeId].requiredAction = requiredAction; +} + +t_Error FmPcdKgCheckInvalidateSchemeSw(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + if(schemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + /* check that no port is bound to this scheme */ + if(p_FmPcd->p_FmPcdKg->schemes[schemeId].owners) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a scheme that has ports bound to")); + if(!p_FmPcd->p_FmPcdKg->schemes[schemeId].valid) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete an invalid scheme")); + return E_OK; +} + +uint32_t FmPcdKgBuildCppReg(t_Handle h_FmPcd, uint8_t clsPlanGrpId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t tmpKgpeCpp; + + tmpKgpeCpp = (uint32_t)(p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].baseEntry / 8); + tmpKgpeCpp |= (uint32_t)(((p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrpId].sizeOfGrp / 8) - 1) << FM_PCD_KG_PE_CPP_MASK_SHIFT); + return tmpKgpeCpp; +} + +bool FmPcdKgHwSchemeIsValid(uint32_t schemeModeReg) +{ + + if(schemeModeReg & KG_SCH_MODE_EN) + return TRUE; + else + return FALSE; +} + +uint32_t FmPcdKgBuildWriteSchemeActionReg(uint8_t schemeId, bool updateCounter) +{ + return (uint32_t)(((uint32_t)schemeId << FM_PCD_KG_KGAR_NUM_SHIFT)| + FM_PCD_KG_KGAR_GO | + FM_PCD_KG_KGAR_WRITE | + FM_PCD_KG_KGAR_SEL_SCHEME_ENTRY | + DUMMY_PORT_ID | + (updateCounter ? FM_PCD_KG_KGAR_SCHEME_WSEL_UPDATE_CNT:0)); + +} + +uint32_t FmPcdKgBuildReadSchemeActionReg(uint8_t schemeId) +{ + return (uint32_t)(((uint32_t)schemeId << FM_PCD_KG_KGAR_NUM_SHIFT)| + FM_PCD_KG_KGAR_GO | + FM_PCD_KG_KGAR_READ | + FM_PCD_KG_KGAR_SEL_SCHEME_ENTRY | + DUMMY_PORT_ID | + FM_PCD_KG_KGAR_SCHEME_WSEL_UPDATE_CNT); + +} + + +uint32_t FmPcdKgBuildWriteClsPlanBlockActionReg(uint8_t grpId) +{ + return (uint32_t)(FM_PCD_KG_KGAR_GO | + FM_PCD_KG_KGAR_WRITE | + FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY | + DUMMY_PORT_ID | + ((uint32_t)grpId << FM_PCD_KG_KGAR_NUM_SHIFT) | + FM_PCD_KG_KGAR_WSEL_MASK); + + + /* if we ever want to write 1 by 1, use: + sel = (uint8_t)(0x01 << (7- (entryId % CLS_PLAN_NUM_PER_GRP)));*/ +} + +uint32_t FmPcdKgBuildReadClsPlanBlockActionReg(uint8_t grpId) +{ + return (uint32_t)(FM_PCD_KG_KGAR_GO | + FM_PCD_KG_KGAR_READ | + FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY | + DUMMY_PORT_ID | + ((uint32_t)grpId << FM_PCD_KG_KGAR_NUM_SHIFT) | + FM_PCD_KG_KGAR_WSEL_MASK); + + + /* if we ever want to write 1 by 1, use: + sel = (uint8_t)(0x01 << (7- (entryId % CLS_PLAN_NUM_PER_GRP)));*/ +} + +uint32_t FmPcdKgBuildWritePortSchemeBindActionReg(uint8_t hardwarePortId) +{ + + return (uint32_t)(FM_PCD_KG_KGAR_GO | + FM_PCD_KG_KGAR_WRITE | + FM_PCD_KG_KGAR_SEL_PORT_ENTRY | + hardwarePortId | + FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP); +} + +uint32_t FmPcdKgBuildReadPortSchemeBindActionReg(uint8_t hardwarePortId) +{ + + return (uint32_t)(FM_PCD_KG_KGAR_GO | + FM_PCD_KG_KGAR_READ | + FM_PCD_KG_KGAR_SEL_PORT_ENTRY | + hardwarePortId | + FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP); +} +uint32_t FmPcdKgBuildWritePortClsPlanBindActionReg(uint8_t hardwarePortId) +{ + + return (uint32_t)(FM_PCD_KG_KGAR_GO | + FM_PCD_KG_KGAR_WRITE | + FM_PCD_KG_KGAR_SEL_PORT_ENTRY | + hardwarePortId | + FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP); +} + +uint8_t FmPcdKgGetClsPlanGrpBase(t_Handle h_FmPcd, uint8_t clsPlanGrp) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + return p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrp].baseEntry; +} + +uint16_t FmPcdKgGetClsPlanGrpSize(t_Handle h_FmPcd, uint8_t clsPlanGrp) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + return p_FmPcd->p_FmPcdKg->clsPlanGrps[clsPlanGrp].sizeOfGrp; +} + +uint8_t FmPcdKgGetSchemeSwId(t_Handle h_FmPcd, uint8_t schemeHwId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint8_t i; + + for(i=0;ip_FmPcdKg->numOfSchemes;i++) + if(p_FmPcd->p_FmPcdKg->schemesIds[i] == schemeHwId) + return i; + ASSERT_COND(i!=p_FmPcd->p_FmPcdKg->numOfSchemes); + return FM_PCD_KG_NUM_OF_SCHEMES; +} + +uint8_t FmPcdKgGetNumOfPartitionSchemes(t_Handle h_FmPcd) +{ + return ((t_FmPcd*)h_FmPcd)->p_FmPcdKg->numOfSchemes; +} + +uint8_t FmPcdKgGetPhysicalSchemeId(t_Handle h_FmPcd, uint8_t relativeSchemeId) +{ + return ((t_FmPcd*)h_FmPcd)->p_FmPcdKg->schemesIds[relativeSchemeId]; +} + +uint8_t FmPcdKgGetRelativeSchemeId(t_Handle h_FmPcd, uint8_t schemeId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint8_t i; + + for(i = 0;ip_FmPcdKg->numOfSchemes;i++) + if(p_FmPcd->p_FmPcdKg->schemesIds[i] == schemeId) + return i; + + if(i == p_FmPcd->p_FmPcdKg->numOfSchemes) + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("Scheme is out of partition range")); + + return FM_PCD_KG_NUM_OF_SCHEMES; +} + +t_Error FmPcdKgCcGetSetParams(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t requiredAction) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint8_t relativeSchemeId, physicalSchemeId; + uint32_t tmpKgarReg, tmpReg32 = 0, intFlags; + t_Error err; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0); + + if (p_FmPcd->h_Hc) + return FmHcPcdKgCcGetSetParams(p_FmPcd->h_Hc, h_Scheme, requiredAction); + + physicalSchemeId = (uint8_t)(PTR_TO_UINT(h_Scheme)-1); + + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId); + if(relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + if (FmPcdKgSchemeTryLock(p_FmPcd, relativeSchemeId, FALSE)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Lock of the scheme FAILED")); + + if(!p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].pointedOwners || + !(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].requiredAction & requiredAction)) + { + if(requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) + { + switch(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextEngine) + { + case(e_FM_PCD_DONE): + if(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].doneAction == e_FM_PCD_ENQ_FRAME) + { + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = FmPcdLock(p_FmPcd); + WriteKgarWait(p_FmPcd, tmpKgarReg); + if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_mode) & KG_SCH_MODE_EN)) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].lock); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid")); + } + tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_mode); + ASSERT_COND(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_mode, tmpReg32 | NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA); + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); + WriteKgarWait(p_FmPcd, tmpKgarReg); + FmPcdUnlock(p_FmPcd, intFlags); + } + break; + case(e_FM_PCD_PLCR): + if(!p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].directPlcr || + (p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].extractedOrs && + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].bitOffsetInPlcrProfile) || + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].nextRelativePlcrProfile) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].lock); + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this situation PP can not be with distribution and has to be shared")); + } + err = FmPcdPlcrCcGetSetParams(h_FmPcd, p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].relativeProfileId, requiredAction); + if(err) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].lock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("in this situation the next engine after scheme can be or PLCR or ENQ_FRAME")); + } + } + } + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].pointedOwners += 1; + p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].requiredAction |= requiredAction; + + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].lock); + return E_OK; +} + +t_Error FmPcdKgSchemeTryLock(t_Handle h_FmPcd, uint8_t schemeId, bool intr) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + bool ans; + + if (intr) + ans = TRY_LOCK(NULL, &p_FmPcd->p_FmPcdKg->schemes[schemeId].lock); + else + ans = TRY_LOCK(p_FmPcd->h_Spinlock, &p_FmPcd->p_FmPcdKg->schemes[schemeId].lock); + if (ans) + return E_OK; + return ERROR_CODE(E_BUSY); +} + +void FmPcdKgReleaseSchemeLock(t_Handle h_FmPcd, uint8_t schemeId) +{ + RELEASE_LOCK(((t_FmPcd*)h_FmPcd)->p_FmPcdKg->schemes[schemeId].lock); +} + +t_Handle FM_PCD_KgSetScheme(t_Handle h_FmPcd, t_FmPcdKgSchemeParams *p_Scheme) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t tmpReg; + t_FmPcdKgInterModuleSchemeRegs schemeRegs; + t_FmPcdKgInterModuleSchemeRegs *p_MemRegs; + uint8_t i; + t_Error err = E_OK; + uint32_t tmpKgarReg; + uint32_t intFlags; + uint8_t physicalSchemeId, relativeSchemeId; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL); + + if (p_FmPcd->h_Hc) + return FmHcPcdKgSetScheme(p_FmPcd->h_Hc, p_Scheme); + + /* if not called for modification, check first that this scheme is unused */ + if(!p_Scheme->modify) + { + /* check that schemeId is in range */ + if(p_Scheme->id.relativeSchemeId >= p_FmPcd->p_FmPcdKg->numOfSchemes) + { + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, ("Scheme is out of range")); + return NULL; + } + relativeSchemeId = p_Scheme->id.relativeSchemeId; + + if (FmPcdKgSchemeTryLock(p_FmPcd, relativeSchemeId, FALSE)) + return NULL; + + physicalSchemeId = p_FmPcd->p_FmPcdKg->schemesIds[relativeSchemeId]; + + /* read specified scheme into scheme registers */ + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = FmPcdLock(p_FmPcd); + WriteKgarWait(p_FmPcd, tmpKgarReg); + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_mode); + FmPcdUnlock(p_FmPcd, intFlags); + + if (tmpReg & KG_SCH_MODE_EN) + { + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, + ("Scheme %d(phys %d) is already used", relativeSchemeId, physicalSchemeId)); + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].lock); + return NULL; + } + } + else + { + SANITY_CHECK_RETURN_VALUE(p_Scheme->id.h_Scheme, E_INVALID_HANDLE, NULL); + + intFlags = FmPcdLock(p_FmPcd); + physicalSchemeId = (uint8_t)(PTR_TO_UINT(p_Scheme->id.h_Scheme)-1); + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId); + + /* check that schemeId is in range */ + if(relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + { + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + FmPcdUnlock(p_FmPcd, intFlags); + return NULL; + } + + err = FmPcdKgSchemeTryLock(p_FmPcd, relativeSchemeId, TRUE); + FmPcdUnlock(p_FmPcd, intFlags); + if (err) + return NULL; + } + + err = FmPcdKgBuildScheme(h_FmPcd, p_Scheme, &schemeRegs); + if(err) + { + REPORT_ERROR(MAJOR, err, NO_MSG); + FmPcdKgInvalidateSchemeSw(h_FmPcd, relativeSchemeId); + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].lock); + return NULL; + } + + /* configure all 21 scheme registers */ + p_MemRegs = &p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs; + intFlags = FmPcdLock(p_FmPcd); + WRITE_UINT32(p_MemRegs->kgse_ppc, schemeRegs.kgse_ppc); + WRITE_UINT32(p_MemRegs->kgse_ccbs, schemeRegs.kgse_ccbs); + WRITE_UINT32(p_MemRegs->kgse_mode, schemeRegs.kgse_mode); + WRITE_UINT32(p_MemRegs->kgse_mv, schemeRegs.kgse_mv); + WRITE_UINT32(p_MemRegs->kgse_dv0, schemeRegs.kgse_dv0); + WRITE_UINT32(p_MemRegs->kgse_dv1, schemeRegs.kgse_dv1); + WRITE_UINT32(p_MemRegs->kgse_ekdv, schemeRegs.kgse_ekdv); + WRITE_UINT32(p_MemRegs->kgse_ekfc, schemeRegs.kgse_ekfc); + WRITE_UINT32(p_MemRegs->kgse_bmch, schemeRegs.kgse_bmch); + WRITE_UINT32(p_MemRegs->kgse_bmcl, schemeRegs.kgse_bmcl); + WRITE_UINT32(p_MemRegs->kgse_hc, schemeRegs.kgse_hc); + WRITE_UINT32(p_MemRegs->kgse_spc, schemeRegs.kgse_spc); + WRITE_UINT32(p_MemRegs->kgse_fqb, schemeRegs.kgse_fqb); + for(i=0 ; ikgse_gec[i], schemeRegs.kgse_gec[i]); + + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, p_Scheme->schemeCounter.update); + + WriteKgarWait(p_FmPcd, tmpKgarReg); + FmPcdUnlock(p_FmPcd, intFlags); + + FmPcdKgValidateSchemeSw(h_FmPcd, relativeSchemeId); + + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].lock); + + return UINT_TO_PTR((uint64_t)physicalSchemeId+1); +} + +t_Error FM_PCD_KgDeleteScheme(t_Handle h_FmPcd, t_Handle h_Scheme) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint8_t physicalSchemeId; + uint32_t tmpKgarReg, intFlags; + t_Error err = E_OK; + uint8_t relativeSchemeId; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if (p_FmPcd->h_Hc) + return FmHcPcdKgDeleteScheme(p_FmPcd->h_Hc, h_Scheme); + + physicalSchemeId = (uint8_t)(PTR_TO_UINT(h_Scheme)-1); + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId); + + if(relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + if ((err = FmPcdKgSchemeTryLock(p_FmPcd, relativeSchemeId, FALSE)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + + /* check that no port is bound to this scheme */ + err = FmPcdKgCheckInvalidateSchemeSw(h_FmPcd, relativeSchemeId); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + + intFlags = FmPcdLock(p_FmPcd); + /* clear mode register, including enable bit */ + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_mode, 0); + + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); + + WriteKgarWait(p_FmPcd, tmpKgarReg); + FmPcdUnlock(p_FmPcd, intFlags); + + FmPcdKgInvalidateSchemeSw(h_FmPcd, relativeSchemeId); + + RELEASE_LOCK(p_FmPcd->p_FmPcdKg->schemes[relativeSchemeId].lock); + + return E_OK; +} + +uint32_t FM_PCD_KgGetSchemeCounter(t_Handle h_FmPcd, t_Handle h_Scheme) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t tmpKgarReg, spc, intFlags; + uint8_t physicalSchemeId; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0); + + if (p_FmPcd->h_Hc) + return FmHcPcdKgGetSchemeCounter(p_FmPcd->h_Hc, h_Scheme); + + physicalSchemeId = (uint8_t)(PTR_TO_UINT(h_Scheme)-1); + + if(FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId) == FM_PCD_KG_NUM_OF_SCHEMES) + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = FmPcdLock(p_FmPcd); + WriteKgarWait(p_FmPcd, tmpKgarReg); + if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_mode) & KG_SCH_MODE_EN)) + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid")); + spc = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_spc); + FmPcdUnlock(p_FmPcd, intFlags); + + return spc; +} + +t_Error FM_PCD_KgSetSchemeCounter(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t tmpKgarReg, intFlags; + uint8_t physicalSchemeId; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdKg, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0); + + if (p_FmPcd->h_Hc) + return FmHcPcdKgSetSchemeCounter(p_FmPcd->h_Hc, h_Scheme, value); + + physicalSchemeId = (uint8_t)(PTR_TO_UINT(h_Scheme)-1); + /* check that schemeId is in range */ + if(FmPcdKgGetRelativeSchemeId(p_FmPcd, physicalSchemeId) == FM_PCD_KG_NUM_OF_SCHEMES) + REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + + /* read specified scheme into scheme registers */ + tmpKgarReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); + intFlags = FmPcdLock(p_FmPcd); + WriteKgarWait(p_FmPcd, tmpKgarReg); + if (!(GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_mode) & KG_SCH_MODE_EN)) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is Invalid")); + } + + /* change counter value */ + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->indirectAccessRegs.schemeRegs.kgse_spc, value); + + /* call indirect command for scheme write */ + tmpKgarReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE); + + WriteKgarWait(p_FmPcd, tmpKgarReg); + FmPcdUnlock(p_FmPcd, intFlags); + + return E_OK; +} + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_pcd.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_pcd.c @@ -0,0 +1,1693 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_pcd.c + + @Description FM PCD ... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "xx_ext.h" +#include "sprint_ext.h" +#include "debug_ext.h" +#include "net_ext.h" +#include "fm_ext.h" +#include "fm_pcd_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_pcd_ipc.h" +#include "fm_hc.h" + + +static t_Error CheckFmPcdParameters(t_FmPcd *p_FmPcd) +{ + if(!p_FmPcd->h_Fm) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("h_Fm has to be initialized")); + + if(p_FmPcd->guestId == NCSW_MASTER_ID) + { + if(p_FmPcd->p_FmPcdKg && !p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something WRONG")); + + if(p_FmPcd->p_FmPcdPlcr && !p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Something WRONG")); + + if(!p_FmPcd->f_Exception) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_FmPcdExceptions has to be initialized")); + + if((!p_FmPcd->f_FmPcdIndexedException) && (p_FmPcd->p_FmPcdPlcr || p_FmPcd->p_FmPcdKg)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_FmPcdIndexedException has to be initialized")); + + if(p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit > PRS_MAX_CYCLE_LIMIT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("prsMaxParseCycleLimit has to be less than 8191")); + } + + return E_OK; +} + +static volatile bool blockingFlag = FALSE; +static void FmPcdIpcMsgCompletionCB(t_Handle h_FmPcd, + uint8_t *p_Msg, + uint8_t *p_Reply, + uint32_t replyLength, + t_Error status) +{ + UNUSED(h_FmPcd);UNUSED(p_Msg);UNUSED(p_Reply);UNUSED(replyLength);UNUSED(status); + blockingFlag = FALSE; +} + +static t_Error FmPcdHandleIpcMsgCB(t_Handle h_FmPcd, + uint8_t *p_Msg, + uint32_t msgLength, + uint8_t *p_Reply, + uint32_t *p_ReplyLength) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + t_FmPcdIpcMsg *p_IpcMsg = (t_FmPcdIpcMsg*)p_Msg; + t_FmPcdIpcReply *p_IpcReply = (t_FmPcdIpcReply*)p_Reply; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((msgLength >= sizeof(uint32_t)), E_INVALID_VALUE); + +#ifdef DISABLE_SANITY_CHECKS + UNUSED(msgLength); +#endif /* DISABLE_SANITY_CHECKS */ + + ASSERT_COND(p_Msg); + + memset(p_IpcReply, 0, (sizeof(uint8_t) * FM_PCD_MAX_REPLY_SIZE)); + *p_ReplyLength = 0; + + switch(p_IpcMsg->msgId) + { + case (FM_PCD_MASTER_IS_ALIVE): + *(uint8_t*)(p_IpcReply->replyBody) = 1; + p_IpcReply->error = E_OK; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + case (FM_PCD_MASTER_IS_ENABLED): + /* count partitions registrations */ + if(p_FmPcd->enabled) + p_FmPcd->numOfEnabledGuestPartitionsPcds++; + *(uint8_t*)(p_IpcReply->replyBody) = (uint8_t)p_FmPcd->enabled; + p_IpcReply->error = E_OK; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + case (FM_PCD_GUEST_DISABLE): + if(p_FmPcd->numOfEnabledGuestPartitionsPcds) + { + p_FmPcd->numOfEnabledGuestPartitionsPcds--; + p_IpcReply->error = E_OK; + } + else + { + REPORT_ERROR(MINOR, E_INVALID_STATE,("Trying to disable an unregistered partition")); + p_IpcReply->error = E_INVALID_STATE; + } + *p_ReplyLength = sizeof(uint32_t); + break; + case(FM_PCD_GET_COUNTER): + { + e_FmPcdCounters inCounter; + uint32_t outCounter; + + memcpy((uint8_t*)&inCounter, p_IpcMsg->msgBody, sizeof(uint32_t)); + outCounter = FM_PCD_GetCounter(h_FmPcd, inCounter); + memcpy(p_IpcReply->replyBody, (uint8_t*)&outCounter, sizeof(uint32_t)); + p_IpcReply->error = E_OK; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t); + break; + } + case (FM_PCD_ALLOC_KG_SCHEMES): + { + t_FmPcdIpcKgSchemesParams ipcSchemesParams; + + memcpy((uint8_t*)&ipcSchemesParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgSchemesParams)); + err = FmPcdKgAllocSchemes(h_FmPcd, + ipcSchemesParams.numOfSchemes, + ipcSchemesParams.guestId, + p_IpcReply->replyBody); + p_IpcReply->error = err; + *p_ReplyLength = sizeof(uint32_t) + ipcSchemesParams.numOfSchemes*sizeof(uint8_t); + break; + } + case (FM_PCD_FREE_KG_SCHEMES): + { + t_FmPcdIpcKgSchemesParams ipcSchemesParams; + + memcpy((uint8_t*)&ipcSchemesParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgSchemesParams)); + err = FmPcdKgFreeSchemes(h_FmPcd, + ipcSchemesParams.numOfSchemes, + ipcSchemesParams.guestId, + ipcSchemesParams.schemesIds); + p_IpcReply->error = err; + *p_ReplyLength = sizeof(uint32_t); + break; + } + case (FM_PCD_ALLOC_KG_CLSPLAN): + { + t_FmPcdIpcKgClsPlanParams ipcKgClsPlanParams; + + memcpy((uint8_t*)&ipcKgClsPlanParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgClsPlanParams)); + err = KgAllocClsPlanEntries(h_FmPcd, + ipcKgClsPlanParams.numOfClsPlanEntries, + ipcKgClsPlanParams.guestId, + p_IpcReply->replyBody); + p_IpcReply->error = err; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + } + case (FM_PCD_FREE_KG_CLSPLAN): + { + t_FmPcdIpcKgClsPlanParams ipcKgClsPlanParams; + + memcpy((uint8_t*)&ipcKgClsPlanParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcKgClsPlanParams)); + KgFreeClsPlanEntries(h_FmPcd, + ipcKgClsPlanParams.numOfClsPlanEntries, + ipcKgClsPlanParams.guestId, + ipcKgClsPlanParams.clsPlanBase); + *p_ReplyLength = sizeof(uint32_t); + break; + } + case (FM_PCD_ALLOC_PROFILES): + { + t_FmPcdIpcPlcrAllocParams ipcPlcrAllocParams; + uint16_t profilesBase; + + memcpy((uint8_t*)&ipcPlcrAllocParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcPlcrAllocParams)); + err = PlcrAllocProfiles(h_FmPcd, + ipcPlcrAllocParams.hardwarePortId, + ipcPlcrAllocParams.num, + &profilesBase); + memcpy(p_IpcReply->replyBody, (uint8_t*)&profilesBase, sizeof(uint16_t)); + p_IpcReply->error = err; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint16_t); + break; + } + case (FM_PCD_FREE_PROFILES): + { + t_FmPcdIpcPlcrAllocParams ipcPlcrAllocParams; + + memcpy((uint8_t*)&ipcPlcrAllocParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcPlcrAllocParams)); + err = PlcrFreeProfiles(h_FmPcd, + ipcPlcrAllocParams.hardwarePortId, + ipcPlcrAllocParams.num, + ipcPlcrAllocParams.plcrProfilesBase); + p_IpcReply->error = err; + *p_ReplyLength = sizeof(uint32_t); + break; + } + case (FM_PCD_ALLOC_SHARED_PROFILES): + { + uint16_t numOfProfiles; + uint16_t profilesIds[FM_PCD_PLCR_NUM_ENTRIES]; + uint32_t profilesMask[FM_PCD_PLCR_NUM_ENTRIES/32]; + int i; + + memset(profilesMask, 0, FM_PCD_PLCR_NUM_ENTRIES/32 * sizeof(uint32_t)); + memcpy((uint8_t*)&numOfProfiles, p_IpcMsg->msgBody, sizeof(uint16_t)); + err = PlcrAllocSharedProfiles(h_FmPcd, + numOfProfiles, + profilesIds); + p_IpcReply->error = err; + + /* translate the allocated profile id's to a 32bit * 8regs mask */ + for(i = 0;i> (profilesIds[i] % 32)); + + memcpy(p_IpcReply->replyBody, (uint8_t*)&profilesMask, sizeof(profilesMask)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(profilesMask); /* num-of-shared-profiles */ + break; + } + case (FM_PCD_FREE_SHARED_PROFILES): + { + t_FmPcdIpcSharedPlcrAllocParams ipcSharedPlcrAllocParams; + uint16_t profilesIds[FM_PCD_PLCR_NUM_ENTRIES]; + int i,j, index = 0; + uint32_t walking1Mask = 0x80000000; + + memset(profilesIds, 0, FM_PCD_PLCR_NUM_ENTRIES*sizeof(uint16_t)); + memcpy((uint8_t*)&ipcSharedPlcrAllocParams, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcSharedPlcrAllocParams)); + for(i = 0; i>= 1; + } + walking1Mask = 0x80000000; + } + } + + PlcrFreeSharedProfiles(h_FmPcd, + ipcSharedPlcrAllocParams.num, + profilesIds); + break; + } + case(FM_PCD_GET_SW_PRS_OFFSET): + { + t_FmPcdIpcSwPrsLable ipcSwPrsLable; + uint32_t swPrsOffset; + + memcpy((uint8_t*)&ipcSwPrsLable, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcSwPrsLable)); + swPrsOffset = + FmPcdGetSwPrsOffset(h_FmPcd, + (e_NetHeaderType)ipcSwPrsLable.enumHdr, + ipcSwPrsLable.indexPerHdr); + memcpy(p_IpcReply->replyBody, (uint8_t*)&swPrsOffset, sizeof(uint32_t)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t); + break; + } + case(FM_PCD_PRS_INC_PORT_STATS): + { + t_FmPcdIpcPrsIncludePort ipcPrsIncludePort; + + memcpy((uint8_t*)&ipcPrsIncludePort, p_IpcMsg->msgBody, sizeof(t_FmPcdIpcPrsIncludePort)); + PrsIncludePortInStatistics(h_FmPcd, + ipcPrsIncludePort.hardwarePortId, + ipcPrsIncludePort.include); + break; + } +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + case(FM_PCD_DUMP_REGS): + if((err = FM_PCD_DumpRegs(h_FmPcd)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + case(FM_PCD_KG_DUMP_REGS): + if((err = FM_PCD_KgDumpRegs(h_FmPcd)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + case(FM_PCD_PLCR_DUMP_REGS): + if((err = FM_PCD_PlcrDumpRegs(h_FmPcd)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + case(FM_PCD_PLCR_PROFILE_DUMP_REGS): + { + t_Handle h_Profile; + memcpy((uint8_t*)&h_Profile, p_IpcMsg->msgBody, sizeof(t_Handle)); + if((err = FM_PCD_PlcrProfileDumpRegs(h_FmPcd, h_Profile)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + + } + case(FM_PCD_PRS_DUMP_REGS): + if((err = FM_PCD_PrsDumpRegs(h_FmPcd)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; +#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */ + default: + *p_ReplyLength = 0; + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!")); + } + return E_OK; +} + +void FmPcdSetClsPlanGrpId(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint8_t clsPlanGrpId) +{ + p_FmPcd->netEnvs[netEnvId].clsPlanGrpId = clsPlanGrpId; +} + +t_Error PcdGetClsPlanGrpParams(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_GrpParams) +{ + uint8_t netEnvId = p_GrpParams->netEnvId; + int i, k, j; + + if(p_FmPcd->netEnvs[netEnvId].clsPlanGrpId != ILLEGAL_CLS_PLAN) + { + p_GrpParams->grpExists = TRUE; + p_GrpParams->clsPlanGrpId = p_FmPcd->netEnvs[netEnvId].clsPlanGrpId; + return E_OK; + } + + for (i=0; ((i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)); i++) + { + for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); k++) + { + /* if an option exists, add it to the opts list */ + if(p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt) + { + /* check if this option already exists, add if it doesn't */ + for(j = 0;jnumOfOptions;j++) + { + if(p_GrpParams->options[j] == p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt) + break; + } + p_GrpParams->optVectors[j] |= p_FmPcd->netEnvs[netEnvId].unitsVectors[i]; + if(j == p_GrpParams->numOfOptions) + { + p_GrpParams->options[p_GrpParams->numOfOptions] = p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt; + p_GrpParams->numOfOptions++; + } + } + } + } + + if(p_GrpParams->numOfOptions == 0) + { + if(p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId != ILLEGAL_CLS_PLAN) + { + p_GrpParams->grpExists = TRUE; + p_GrpParams->clsPlanGrpId = p_FmPcd->p_FmPcdKg->emptyClsPlanGrpId; + } + } + + return E_OK; + +} + +t_Error PcdGetVectorForOpt(t_FmPcd *p_FmPcd, uint8_t netEnvId, protocolOpt_t opt, uint32_t *p_Vector) +{ + uint8_t j,k; + + *p_Vector = 0; + + for (j=0; ((j < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) && + (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[0].hdr != HEADER_TYPE_NONE)); j++) + { + for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[k].hdr != HEADER_TYPE_NONE)); k++) + { + if (p_FmPcd->netEnvs[netEnvId].units[j].hdrs[k].opt == opt) + *p_Vector |= p_FmPcd->netEnvs[netEnvId].unitsVectors[j]; + } + } + + if (!*p_Vector) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Requested option was not defined for this Network Environment Characteristics module")); + else + return E_OK; +} + +t_Error PcdGetUnitsVector(t_FmPcd *p_FmPcd, t_NetEnvParams *p_Params) +{ + int i; + + p_Params->vector = 0; + for(i=0; inumOfDistinctionUnits ;i++) + { + if(p_FmPcd->netEnvs[p_Params->netEnvId].units[p_Params->unitIds[i]].hdrs[0].hdr == HEADER_TYPE_NONE) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Requested unit was not defined for this Network Environment Characteristics module")); + ASSERT_COND(p_FmPcd->netEnvs[p_Params->netEnvId].unitsVectors[p_Params->unitIds[i]]); + p_Params->vector |= p_FmPcd->netEnvs[p_Params->netEnvId].unitsVectors[p_Params->unitIds[i]]; + } + + return E_OK; +} + +bool PcdNetEnvIsUnitWithoutOpts(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint32_t unitVector) +{ + int i=0, k; + /* check whether a given unit may be used by non-clsPlan users. */ + /* first, recognize the unit by its vector */ + while (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE) + { + if (p_FmPcd->netEnvs[netEnvId].unitsVectors[i] == unitVector) + { + for (k=0; + ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); + k++) + /* check that no option exists */ + if((protocolOpt_t)p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].opt) + return FALSE; + break; + } + i++; + } + /* assert that a unit was found to mach the vector */ + ASSERT_COND(p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); + + return TRUE; +} +bool FmPcdNetEnvIsHdrExist(t_Handle h_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i, k; + + ASSERT_COND(p_FmPcd); + + for (i=0; ((i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE)); i++) + { + for (k=0; ((k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE)); k++) + if (p_FmPcd->netEnvs[netEnvId].units[i].hdrs[k].hdr == hdr) + return TRUE; + } + for (i=0; ((i < FM_PCD_MAX_NUM_OF_PRIVATE_HDRS) && + (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE)); i++) + { + if (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr) + return TRUE; + } + + return FALSE; +} + +e_NetHeaderType FmPcdGetAliasHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr) +{ + int i; + + ASSERT_COND(p_FmPcd); + + for (i=0; (i < FM_PCD_MAX_NUM_OF_PRIVATE_HDRS) + && (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr != HEADER_TYPE_NONE); i++) + { + if (p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].hdr == hdr) + return p_FmPcd->netEnvs[netEnvId].aliasHdrs[i].aliasHdr; + } + + return HEADER_TYPE_NONE; +} + +void FmPcdPortRegister(t_Handle h_FmPcd, t_Handle h_FmPort, uint8_t hardwarePortId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint16_t swPortIndex = 0; + + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].h_FmPort = h_FmPort; +} + +uint32_t FmPcdGetLcv(t_Handle h_FmPcd, uint32_t netEnvId, uint8_t hdrNum) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + return p_FmPcd->netEnvs[netEnvId].lcvs[hdrNum]; +} + +uint32_t FmPcdGetMacsecLcv(t_Handle h_FmPcd, uint32_t netEnvId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + return p_FmPcd->netEnvs[netEnvId].macsecVector; +} + +void FmPcdIncNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId) +{ + ((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners++; +} + +void FmPcdDecNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId) +{ + ASSERT_COND(((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners); + ((t_FmPcd*)h_FmPcd)->netEnvs[netEnvId].owners--; +} + +uint32_t FmPcdLock(t_Handle h_FmPcd) +{ + return XX_LockIntrSpinlock(((t_FmPcd*)h_FmPcd)->h_Spinlock); +} + +void FmPcdUnlock(t_Handle h_FmPcd, uint32_t intFlags) +{ + XX_UnlockIntrSpinlock(((t_FmPcd*)h_FmPcd)->h_Spinlock, intFlags); +} + +t_Handle FmPcdGetHcHandle(t_Handle h_FmPcd) +{ + ASSERT_COND(h_FmPcd); + SANITY_CHECK_RETURN_VALUE(((t_FmPcd*)h_FmPcd)->h_Hc, E_INVALID_HANDLE, NULL); + return ((t_FmPcd*)h_FmPcd)->h_Hc; +} + +/**********************************************************************************************************/ +/* API */ +/**********************************************************************************************************/ + +t_Handle FM_PCD_Config(t_FmPcdParams *p_FmPcdParams) +{ + t_FmPcd *p_FmPcd = NULL; + t_FmPhysAddr physicalMuramBase; + uint8_t i; + + SANITY_CHECK_RETURN_VALUE(p_FmPcdParams, E_INVALID_HANDLE,NULL); + + p_FmPcd = (t_FmPcd *) XX_Malloc(sizeof(t_FmPcd)); + if (!p_FmPcd) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd")); + return NULL; + } + memset(p_FmPcd, 0, sizeof(t_FmPcd)); + + p_FmPcd->p_FmPcdDriverParam = (t_FmPcdDriverParam *) XX_Malloc(sizeof(t_FmPcdDriverParam)); + if (!p_FmPcd->p_FmPcdDriverParam) + { + XX_Free(p_FmPcd); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd Driver Param")); + return NULL; + } + memset(p_FmPcd->p_FmPcdDriverParam, 0, sizeof(t_FmPcdDriverParam)); + + p_FmPcd->h_Fm = p_FmPcdParams->h_Fm; + p_FmPcd->guestId = FmGetGuestId(p_FmPcd->h_Fm); + p_FmPcd->h_FmMuram = FmGetMuramHandle(p_FmPcd->h_Fm); + FmGetPhysicalMuramBase(p_FmPcdParams->h_Fm, &physicalMuramBase); + p_FmPcd->physicalMuramBase = (uint64_t)((uint64_t)(&physicalMuramBase)->low | ((uint64_t)(&physicalMuramBase)->high << 32)); + + for(i = 0; inetEnvs[i].clsPlanGrpId = ILLEGAL_CLS_PLAN; + + if (p_FmPcdParams->useHostCommand) + { + t_FmHcParams hcParams; + + memset(&hcParams, 0, sizeof(hcParams)); + hcParams.h_Fm = p_FmPcd->h_Fm; + hcParams.h_FmPcd = (t_Handle)p_FmPcd; + memcpy((uint8_t*)&hcParams.params, (uint8_t*)&p_FmPcdParams->hc, sizeof(t_FmPcdHcParams)); + p_FmPcd->h_Hc = FmHcConfigAndInit(&hcParams); + if (!p_FmPcd->h_Hc) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd HC")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + } + else if(p_FmPcd->guestId != NCSW_MASTER_ID) + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("No Host Command defined for a guest partition.")); + + if(p_FmPcdParams->kgSupport) + { + p_FmPcd->p_FmPcdKg = (t_FmPcdKg *)KgConfig(p_FmPcd, p_FmPcdParams); + if(!p_FmPcd->p_FmPcdKg) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd Keygen")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + } + + if(p_FmPcdParams->plcrSupport) + { + p_FmPcd->p_FmPcdPlcr = (t_FmPcdPlcr *)PlcrConfig(p_FmPcd, p_FmPcdParams); + if(!p_FmPcd->p_FmPcdPlcr) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd Policer")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + } + + if(p_FmPcdParams->prsSupport) + { + p_FmPcd->p_FmPcdPrs = (t_FmPcdPrs *)PrsConfig(p_FmPcd, p_FmPcdParams); + if(!p_FmPcd->p_FmPcdPrs) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd Parser")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + } + + p_FmPcd->h_Spinlock = XX_InitSpinlock(); + if (!p_FmPcd->h_Spinlock) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Pcd spinlock")); + FM_PCD_Free(p_FmPcd); + return NULL; + } + + p_FmPcd->numOfEnabledGuestPartitionsPcds = 0; + + p_FmPcd->f_Exception = p_FmPcdParams->f_Exception; + p_FmPcd->f_FmPcdIndexedException = p_FmPcdParams->f_ExceptionId; + p_FmPcd->h_App = p_FmPcdParams->h_App; + + return p_FmPcd; +} + +t_Error FM_PCD_Init(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + t_FmPcdIpcMsg msg; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + uint8_t isMasterAlive = 0; + t_FmPcdIpcReply reply; + uint32_t replyLength; + + memset(p_FmPcd->fmPcdIpcHandlerModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE); + if(Sprint (p_FmPcd->fmPcdIpcHandlerModuleName, "FM_PCD_%d_%d", FmGetId(p_FmPcd->h_Fm), NCSW_MASTER_ID) != 10) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + memset(p_FmPcd->fmPcdModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE); + if(Sprint (p_FmPcd->fmPcdModuleName, "FM_PCD_%d_%d",FmGetId(p_FmPcd->h_Fm), p_FmPcd->guestId) != (p_FmPcd->guestId<10 ? 10:11)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + + p_FmPcd->h_IpcSession = XX_IpcInitSession(p_FmPcd->fmPcdIpcHandlerModuleName, p_FmPcd->fmPcdModuleName); + if (p_FmPcd->h_IpcSession == NULL) + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM PCD Guest %d IPC session", p_FmPcd->guestId)); + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_PCD_MASTER_IS_ALIVE; + msg.msgBody[0] = p_FmPcd->guestId; + blockingFlag = TRUE; + + do + { + replyLength = sizeof(uint32_t) + sizeof(isMasterAlive); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(p_FmPcd->guestId), + (uint8_t*)&reply, + &replyLength, + FmPcdIpcMsgCompletionCB, + h_FmPcd)) != E_OK) + REPORT_ERROR(MAJOR, err, NO_MSG); + while(blockingFlag) ; + if(replyLength != (sizeof(uint32_t) + sizeof(isMasterAlive))) + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + isMasterAlive = *(uint8_t*)(reply.replyBody); + } while (!isMasterAlive); + } + + CHECK_INIT_PARAMETERS(p_FmPcd, CheckFmPcdParameters); + + if(p_FmPcd->p_FmPcdKg) + { + err = KgInit(p_FmPcd); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if(p_FmPcd->p_FmPcdPlcr) + { + err = PlcrInit(p_FmPcd); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if(p_FmPcd->p_FmPcdPrs) + { + err = PrsInit(p_FmPcd); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if(p_FmPcd->guestId == NCSW_MASTER_ID) + { + /* register to inter-core messaging mechanism */ + memset(p_FmPcd->fmPcdModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE); + if(Sprint (p_FmPcd->fmPcdModuleName, "FM_PCD_%d_%d",FmGetId(p_FmPcd->h_Fm),NCSW_MASTER_ID) != 10) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + err = XX_IpcRegisterMsgHandler(p_FmPcd->fmPcdModuleName, FmPcdHandleIpcMsgCB, p_FmPcd, FM_PCD_MAX_REPLY_SIZE); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + XX_Free(p_FmPcd->p_FmPcdDriverParam); + p_FmPcd->p_FmPcdDriverParam = NULL; + + FmRegisterPcd(p_FmPcd->h_Fm, p_FmPcd); + + return E_OK; +} + +t_Error FM_PCD_Free(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd =(t_FmPcd *)h_FmPcd; + t_Error err = E_OK; + + if(p_FmPcd->enabled) + FM_PCD_Disable(p_FmPcd); + + if (p_FmPcd->h_Spinlock) + XX_FreeSpinlock(p_FmPcd->h_Spinlock); + + if(p_FmPcd->p_FmPcdDriverParam) + { + XX_Free(p_FmPcd->p_FmPcdDriverParam); + p_FmPcd->p_FmPcdDriverParam = NULL; + } + if(p_FmPcd->p_FmPcdKg) + { + if((err = KgFree(p_FmPcd)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + XX_Free(p_FmPcd->p_FmPcdKg); + p_FmPcd->p_FmPcdKg = NULL; + } + + if(p_FmPcd->p_FmPcdPlcr) + { + if((err = PlcrFree(p_FmPcd)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + XX_Free(p_FmPcd->p_FmPcdPlcr); + p_FmPcd->p_FmPcdPlcr = NULL; + } + + if(p_FmPcd->p_FmPcdPrs) + { + if(p_FmPcd->guestId == NCSW_MASTER_ID) + PrsFree(p_FmPcd); + XX_Free(p_FmPcd->p_FmPcdPrs); + p_FmPcd->p_FmPcdPrs = NULL; + } + + if (p_FmPcd->h_Hc) + { + FmHcFree(p_FmPcd->h_Hc); + p_FmPcd->h_Hc = NULL; + } + + XX_IpcUnregisterMsgHandler(p_FmPcd->fmPcdModuleName); + + FmUnregisterPcd(p_FmPcd->h_Fm); + + XX_Free(p_FmPcd); + return E_OK; +} + +t_Error FM_PCD_Enable(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + + if(p_FmPcd->guestId == NCSW_MASTER_ID) + { + if(p_FmPcd->p_FmPcdKg) + KgEnable(p_FmPcd); + + if(p_FmPcd->p_FmPcdPlcr) + PlcrEnable(p_FmPcd); + + if(p_FmPcd->p_FmPcdPrs) + PrsEnable(p_FmPcd); + + p_FmPcd->enabled = TRUE; + } + else + { + uint8_t enabled; + t_FmPcdIpcMsg msg; + t_FmPcdIpcReply reply; + uint32_t replyLength; + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_PCD_MASTER_IS_ENABLED; + replyLength = sizeof(uint32_t) + sizeof(enabled); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t) + sizeof(enabled)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + p_FmPcd->enabled = (bool)!!(*(uint8_t*)(reply.replyBody)); + if (!p_FmPcd->enabled) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-PCD master should be enabled first!")); + } + + return E_OK; +} + +t_Error FM_PCD_Disable(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + t_FmPcdIpcMsg msg; + t_FmPcdIpcReply reply; + uint32_t replyLength; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + + if(p_FmPcd->guestId == NCSW_MASTER_ID) + { + if(p_FmPcd->numOfEnabledGuestPartitionsPcds != 0) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Trying to disable a master partition PCD while guest partitions are still enabled.")); + + if(p_FmPcd->p_FmPcdKg) + KgDisable(p_FmPcd); + + if(p_FmPcd->p_FmPcdPlcr) + PlcrDisable(p_FmPcd); + + if(p_FmPcd->p_FmPcdPrs) + PrsDisable(p_FmPcd); + + p_FmPcd->enabled = FALSE; + + return E_OK; + } + + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_GUEST_DISABLE; + memset(&reply, 0, sizeof(reply)); + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + return (t_Error)(reply.error); +} + +t_Handle FM_PCD_SetNetEnvCharacteristics(t_Handle h_FmPcd, t_FmPcdNetEnvParams *p_NetEnvParams) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t intFlags, specialUnits = 0; + uint8_t bitId = 0; + uint8_t i, j, k; + uint8_t netEnvCurrId; + uint8_t ipsecAhUnit = 0,ipsecEspUnit = 0; + bool ipsecAhExists = FALSE, ipsecEspExists = FALSE, shim1Selected = FALSE; + uint8_t hdrNum; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_STATE, NULL); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL); + + intFlags = FmPcdLock(p_FmPcd); + + /* find a new netEnv */ + for(i = 0;inetEnvs[i].used) + break; + + if(i== FM_MAX_NUM_OF_PORTS) + { + REPORT_ERROR(MAJOR, E_FULL,("No more than %d netEnv's allowed.", FM_MAX_NUM_OF_PORTS)); + FmPcdUnlock(p_FmPcd, intFlags); + return NULL; + } + + p_FmPcd->netEnvs[i].used = TRUE; + + if (!TRY_LOCK(NULL, &p_FmPcd->netEnvs[i].lock)) + { + FmPcdUnlock(p_FmPcd, intFlags); + return NULL; + } + FmPcdUnlock(p_FmPcd, intFlags); + + netEnvCurrId = (uint8_t)i; + + /* clear from previous use */ + memset(&p_FmPcd->netEnvs[netEnvCurrId].units, 0, FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS * sizeof(t_FmPcdIntDistinctionUnit)); + memset(&p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs, 0, FM_PCD_MAX_NUM_OF_PRIVATE_HDRS * sizeof(t_FmPcdNetEnvAliases)); + memcpy(&p_FmPcd->netEnvs[netEnvCurrId].units, p_NetEnvParams->units, p_NetEnvParams->numOfDistinctionUnits*sizeof(t_FmPcdIntDistinctionUnit)); + p_FmPcd->netEnvs[netEnvCurrId].clsPlanGrpId = ILLEGAL_CLS_PLAN; + + /* check that header with opt is not interchanged with the same header */ + for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + for (k=0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++) + { + /* if an option exists, check that other headers are not the same header + without option */ + if(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt) + { + for (j=0; (j < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].hdr != HEADER_TYPE_NONE); j++) + if((p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].hdr == p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr) && + !p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[j].opt) + { + REPORT_ERROR(MINOR, E_FULL, ("Illegal unit - header with opt may not be interchangeable with the same header without opt")); + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvCurrId].lock); + return NULL; + } + } + } + } + + /* IPSEC_AH and IPSEC_SPI can't be 2 units, */ + /* check that header with opt is not interchanged with the same header */ + for(i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + for(k=0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++) + { + /* Some headers pairs may not be defined on different units as the parser + doesn't distinguish */ + if(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPSEC_AH) + { + if (ipsecEspExists && (ipsecEspUnit != i)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("HEADER_TYPE_IPSEC_AH and HEADER_TYPE_IPSEC_ESP may not be defined in separate units")); + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvCurrId].lock); + return NULL; + } + else + { + ipsecAhUnit = i; + ipsecAhExists = TRUE; + } + } + if(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_IPSEC_ESP) + { + if (ipsecAhExists && (ipsecAhUnit != i)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("HEADER_TYPE_IPSEC_AH and HEADER_TYPE_IPSEC_ESP may not be defined in separate units")); + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvCurrId].lock); + return NULL; + } + else + { + ipsecEspUnit = i; + ipsecEspExists = TRUE; + } + } + if(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr == HEADER_TYPE_UDP_ENCAP_ESP) + { + /* TODO - general coding. choose the free shim header */ + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits].hdr = HEADER_TYPE_UDP_ENCAP_ESP; + p_FmPcd->netEnvs[netEnvCurrId].aliasHdrs[specialUnits++].aliasHdr = HEADER_TYPE_USER_DEFINED_SHIM1; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr = HEADER_TYPE_USER_DEFINED_SHIM1; + p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].opt = 0; + } + } + } + + /* if private header (shim), check that no other headers specified */ + for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + if(IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)) + if(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[1].hdr != HEADER_TYPE_NONE) + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("SHIM header may not be interchanged with other headers")); + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvCurrId].lock); + return NULL; + } + } + + for(i=0; inumOfDistinctionUnits;i++) + { + if (IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)) + switch(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr) + { + case(HEADER_TYPE_USER_DEFINED_SHIM1): + if (shim1Selected) + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("SHIM header cannot be selected with UDP_IPSEC_ESP")); + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvCurrId].lock); + return NULL; + } + shim1Selected = TRUE; + p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = 0x00000001; + break; + case(HEADER_TYPE_USER_DEFINED_SHIM2): + p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = 0x00000002; + break; + default: + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Requested SHIM not supported")); + } + else + { + p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i] = (uint32_t)(0x80000000 >> bitId++); + + if(IS_SPECIAL_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)) + p_FmPcd->netEnvs[netEnvCurrId].macsecVector = p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i]; + } + } + + /* define a set of hardware parser LCV's according to the defined netenv */ + + /* set an array of LCV's for each header in the netEnv */ + for (i=0; (i < FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr != HEADER_TYPE_NONE); i++) + { + /* private headers have no LCV in the hard parser */ + if (!IS_PRIVATE_HEADER(p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[0].hdr)) + { + for (k=0; (k < FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) + && (p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr != HEADER_TYPE_NONE); k++) + { + GET_PRS_HDR_NUM(hdrNum, p_FmPcd->netEnvs[netEnvCurrId].units[i].hdrs[k].hdr); + if ((hdrNum == ILLEGAL_HDR_NUM) || (hdrNum == NO_HDR_NUM)) + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, NO_MSG); + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvCurrId].lock); + return NULL; + } + p_FmPcd->netEnvs[netEnvCurrId].lcvs[hdrNum] |= p_FmPcd->netEnvs[netEnvCurrId].unitsVectors[i]; + } + } + } + + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvCurrId].lock); + + return UINT_TO_PTR((uint64_t)netEnvCurrId+1); +} + +t_Error FM_PCD_DeleteNetEnvCharacteristics(t_Handle h_FmPcd, t_Handle h_NetEnv) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint8_t netEnvId = (uint8_t)(PTR_TO_UINT(h_NetEnv)-1); + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if (!TRY_LOCK(p_FmPcd->h_Spinlock, &p_FmPcd->netEnvs[netEnvId].lock)) + return ERROR_CODE(E_BUSY); + /* check that no port is bound to this netEnv */ + if(p_FmPcd->netEnvs[netEnvId].owners) + { + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvId].lock); + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Trying to delete a netEnv that has ports/schemes/trees/clsPlanGrps bound to")); + } + p_FmPcd->netEnvs[netEnvId].used= FALSE; + p_FmPcd->netEnvs[netEnvId].clsPlanGrpId = ILLEGAL_CLS_PLAN; + + memset(p_FmPcd->netEnvs[netEnvId].units, 0, sizeof(t_FmPcdIntDistinctionUnit)*FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + memset(p_FmPcd->netEnvs[netEnvId].unitsVectors, 0, sizeof(uint32_t)*FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + memset(p_FmPcd->netEnvs[netEnvId].lcvs, 0, sizeof(uint32_t)*FM_PCD_PRS_NUM_OF_HDRS); + + RELEASE_LOCK(p_FmPcd->netEnvs[netEnvId].lock); + + return E_OK; +} + +void FM_PCD_HcTxConf(t_Handle h_FmPcd, t_DpaaFD *p_Fd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN(h_FmPcd, E_INVALID_STATE); + + FmHcTxConf(p_FmPcd->h_Hc, p_Fd); +} + +uint32_t FM_PCD_GetCounter(t_Handle h_FmPcd, e_FmPcdCounters counter) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t replyLength, outCounter = 0; + t_FmPcdIpcMsg msg; + t_Error err; + t_FmPcdIpcReply reply; + + SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_PCD_GET_COUNTER; + memcpy(msg.msgBody, (uint8_t *)&counter, sizeof(uint32_t)); + replyLength = sizeof(uint32_t) + sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(uint32_t), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t) + sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + memcpy((uint8_t*)&outCounter, reply.replyBody, sizeof(uint32_t)); + return outCounter; + } + + switch(counter) + { + case(e_FM_PCD_KG_COUNTERS_TOTAL): + if(!p_FmPcd->p_FmPcdKg) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this counters")); + return 0; + } + break; + case(e_FM_PCD_PLCR_COUNTERS_YELLOW): + case(e_FM_PCD_PLCR_COUNTERS_RED): + case(e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED): + case(e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW): + case(e_FM_PCD_PLCR_COUNTERS_TOTAL): + case(e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH): + if(!p_FmPcd->p_FmPcdPlcr) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this counters")); + return 0; + } + /* check that counters are enabled */ + if(!(GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr) & FM_PCD_PLCR_GCR_STEN)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + return 0; + } + break; + case(e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH): + case(e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED): + case(e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED): + case(e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED): + case(e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED): + case(e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR): + case(e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR): + case(e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR): + case(e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR): + case(e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES): + if(!p_FmPcd->p_FmPcdPrs) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this counters")); + return 0; + } + break; + default: + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter")); + return 0; + } + switch(counter) + { + case(e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pds); + case(e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l2rrs); + case(e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l3rrs); + case(e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l4rrs); + case(e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->srrs); + case(e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l2rres); + case(e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l3rres); + case(e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l4rres); + case(e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->srres); + case(e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->spcs); + case(e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->spscs); + case(e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->hxscs); + case(e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->mrcs); + case(e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->mrscs); + case(e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->mwcs); + case(e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->mwscs); + case(e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES): + return GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fcscs); + case(e_FM_PCD_KG_COUNTERS_TOTAL): + return GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgtpc); + + /*Policer statictics*/ + case(e_FM_PCD_PLCR_COUNTERS_YELLOW): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ypcnt); + case(e_FM_PCD_PLCR_COUNTERS_RED): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rpcnt); + case(e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rrpcnt); + case(e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rypcnt); + case(e_FM_PCD_PLCR_COUNTERS_TOTAL): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_tpcnt); + case(e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH): + return GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_flmcnt); + + default: + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter")); + return 0; + } +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_DumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdIpcMsg msg; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_DUMP_REGS; + return XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId), + NULL, + NULL, + NULL, + NULL); + } + if (p_FmPcd->p_FmPcdKg) + return FM_PCD_KgDumpRegs(h_FmPcd); + if (p_FmPcd->p_FmPcdPlcr) + return FM_PCD_PlcrDumpRegs(h_FmPcd); + if (p_FmPcd->p_FmPcdPrs) + return FM_PCD_PrsDumpRegs(h_FmPcd); + return E_OK; +} + +t_Error FM_PCD_HcDumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_STATE); + + return FmHcDumpRegs(p_FmPcd->h_Hc); +} + +#endif /* (defined(DEBUG_ERRORS) && ... */ + +t_Error FM_PCD_ConfigException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t bitMask = 0; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigException - guest mode!")); + + GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception); + if(bitMask) + { + if (enable) + p_FmPcd->exceptions |= bitMask; + else + p_FmPcd->exceptions &= ~bitMask; + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + return E_OK; +} + +t_Error FM_PCD_SetException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t bitMask = 0, tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetException - guest mode!")); + + GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception); + + if(bitMask) + { + if (enable) + p_FmPcd->exceptions |= bitMask; + else + p_FmPcd->exceptions &= ~bitMask; + + switch(exception) + { + case(e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC): + case(e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW): + if(!p_FmPcd->p_FmPcdKg) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - keygen is not working")); + break; + case(e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC): + case(e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR): + case(e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE): + case(e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE): + if(!p_FmPcd->p_FmPcdPlcr) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - policer is not working")); + break; + case(e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC): + case(e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC): + if(!p_FmPcd->p_FmPcdPrs) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - parser is not working")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported exception")); + + } + + switch(exception) + { + case(e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgeeer); + if(enable) + tmpReg |= FM_PCD_KG_DOUBLE_ECC; + else + tmpReg &= ~FM_PCD_KG_DOUBLE_ECC; + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgeeer, tmpReg); + break; + case(e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgeeer); + if(enable) + tmpReg |= FM_PCD_KG_KEYSIZE_OVERFLOW; + else + tmpReg &= ~FM_PCD_KG_KEYSIZE_OVERFLOW; + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgeeer, tmpReg); + break; + case(e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->perer); + if(enable) + tmpReg |= FM_PCD_PRS_DOUBLE_ECC; + else + tmpReg &= ~FM_PCD_PRS_DOUBLE_ECC; + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->perer, tmpReg); + break; + case(e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pever); + if(enable) + tmpReg |= FM_PCD_PRS_SINGLE_ECC; + else + tmpReg &= ~FM_PCD_PRS_SINGLE_ECC; + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pever, tmpReg); + break; + case(e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier); + if(enable) + tmpReg |= FM_PCD_PLCR_DOUBLE_ECC; + else + tmpReg &= ~FM_PCD_PLCR_DOUBLE_ECC; + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier, tmpReg); + break; + case(e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier); + if(enable) + tmpReg |= FM_PCD_PLCR_INIT_ENTRY_ERROR; + else + tmpReg &= ~FM_PCD_PLCR_INIT_ENTRY_ERROR; + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier, tmpReg); + break; + case(e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier); + if(enable) + tmpReg |= FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE; + else + tmpReg &= ~FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE; + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier, tmpReg); + break; + case(e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE): + tmpReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier); + if(enable) + tmpReg |= FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE; + else + tmpReg &= ~FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE; + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier, tmpReg); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported exception")); + } + /* for ECC exceptions driver automatically enables ECC mechanism, if disabled. + Driver may disable them automatically, depending on driver's status */ + if(enable && ( (exception == e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC))) + FmEnableRamsEcc(p_FmPcd->h_Fm); + if(!enable && ( (exception == e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC) | + (exception == e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC))) + FmDisableRamsEcc(p_FmPcd->h_Fm); + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + return E_OK; +} + +t_Error FM_PCD_ForceIntr (t_Handle h_FmPcd, e_FmPcdExceptions exception) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ForceIntr - guest mode!")); + + switch(exception) + { + case(e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC): + case(e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW): + if(!p_FmPcd->p_FmPcdKg) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - keygen is not working")); + break; + case(e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC): + case(e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR): + case(e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE): + case(e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE): + if(!p_FmPcd->p_FmPcdPlcr) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt - policer is not working")); + break; + case(e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC): + case(e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC): + if(!p_FmPcd->p_FmPcdPrs) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this interrupt -parsrer is not working")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Invalid interrupt requested")); + + } + switch(exception) + { + case e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->perfr, FM_PCD_PRS_DOUBLE_ECC); + break; + case e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevfr, FM_PCD_PRS_SINGLE_ECC); + break; + case e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC: + if (!(p_FmPcd->exceptions & FM_PCD_EX_KG_DOUBLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgfeer, FM_PCD_KG_DOUBLE_ECC); + break; + case e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW: + if (!(p_FmPcd->exceptions & FM_PCD_EX_KG_KEYSIZE_OVERFLOW)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgfeer, FM_PCD_KG_KEYSIZE_OVERFLOW); + break; + case e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_DOUBLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, FM_PCD_PLCR_DOUBLE_ECC); + break; + case e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_INIT_ENTRY_ERROR)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, FM_PCD_PLCR_INIT_ENTRY_ERROR); + break; + case e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE); + break; + case e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE: + if (!(p_FmPcd->exceptions & FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE); + break; + default: + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception may not be forced")); + } + + return E_OK; +} + + +t_Error FM_PCD_ModifyCounter(t_Handle h_FmPcd, e_FmPcdCounters counter, uint32_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ModifyCounter - guest mode!")); + + switch(counter) + { + case(e_FM_PCD_KG_COUNTERS_TOTAL): + if(!p_FmPcd->p_FmPcdKg) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this counters - keygen is not working")); + break; + case(e_FM_PCD_PLCR_COUNTERS_YELLOW): + case(e_FM_PCD_PLCR_COUNTERS_RED): + case(e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED): + case(e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW): + case(e_FM_PCD_PLCR_COUNTERS_TOTAL): + case(e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH): + if(!p_FmPcd->p_FmPcdPlcr) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Can't ask for this counters - Policer is not working")); + if(!(GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr) & FM_PCD_PLCR_GCR_STEN)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + break; + case(e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH): + case(e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED): + case(e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED): + case(e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED): + case(e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED): + case(e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR): + case(e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR): + case(e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR): + case(e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR): + case(e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES): + case(e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES): + if(!p_FmPcd->p_FmPcdPrs) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported type of counter")); + } + switch(counter) + { + case(e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pds, value); + break; + case(e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l2rrs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l3rrs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l4rrs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->srrs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l2rres, value); + break; + case(e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l3rres, value); + break; + case(e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->l4rres, value); + break; + case(e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->srres, value); + break; + case(e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->spcs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->spscs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->hxscs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->mrcs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->mrscs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->mwcs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->mwscs, value); + break; + case(e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES): + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->fcscs, value); + break; + case(e_FM_PCD_KG_COUNTERS_TOTAL): + WRITE_UINT32(p_FmPcd->p_FmPcdKg->p_FmPcdKgRegs->kgtpc,value); + break; + + /*Policer counters*/ + case(e_FM_PCD_PLCR_COUNTERS_YELLOW): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ypcnt, value); + break; + case(e_FM_PCD_PLCR_COUNTERS_RED): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rpcnt, value); + break; + case(e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rrpcnt, value); + break; + case(e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_rypcnt, value); + break; + case(e_FM_PCD_PLCR_COUNTERS_TOTAL): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_tpcnt, value); + break; + case(e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH): + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_flmcnt, value); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Unsupported type of counter")); + } + +return E_OK; +} + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_pcd.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_pcd.h @@ -0,0 +1,715 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_pcd.h + + @Description FM PCD ... +*//***************************************************************************/ +#ifndef __FM_PCD_H +#define __FM_PCD_H + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" +#include "fm_pcd_ext.h" + + +#define __ERR_MODULE__ MODULE_FM_PCD + + +/**************************************************************************//** + @Group FM_PCD_Runtime_grp FM PCD Runtime Unit + @{ +*//***************************************************************************/ + +/****************************/ +/* Network defines */ +/****************************/ +#define UDP_HEADER_SIZE 8 + +#define ESP_SPI_OFFSET 0 +#define ESP_SPI_SIZE 4 +#define ESP_SEQ_NUM_OFFSET ESP_SPI_SIZE +#define ESP_SEQ_NUM_SIZE 4 + +/****************************/ +/* General defines */ +/****************************/ +#define ILLEGAL_CLS_PLAN 0xff +#define ILLEGAL_NETENV 0xff +/****************************/ +/* Error defines */ +/****************************/ +#define FM_PCD_EX_KG_DOUBLE_ECC 0x80000000 +#define FM_PCD_EX_KG_KEYSIZE_OVERFLOW 0x40000000 + +#define FM_PCD_EX_PLCR_DOUBLE_ECC 0x20000000 +#define FM_PCD_EX_PLCR_INIT_ENTRY_ERROR 0x10000000 +#define FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE 0x08000000 +#define FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE 0x04000000 + +#define FM_PCD_EX_PRS_DOUBLE_ECC 0x02000000 +#define FM_PCD_EX_PRS_SINGLE_ECC 0x01000000 + +#define GET_FM_PCD_EXCEPTION_FLAG(bitMask, exception) \ +switch(exception){ \ + case e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC: \ + bitMask = FM_PCD_EX_KG_DOUBLE_ECC; break; \ + case e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC: \ + bitMask = FM_PCD_EX_PLCR_DOUBLE_ECC; break; \ + case e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW: \ + bitMask = FM_PCD_EX_KG_KEYSIZE_OVERFLOW; break; \ + case e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR: \ + bitMask = FM_PCD_EX_PLCR_INIT_ENTRY_ERROR; break; \ + case e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE: \ + bitMask = FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE; break; \ + case e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE: \ + bitMask = FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE; break; \ + case e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC: \ + bitMask = FM_PCD_EX_PRS_DOUBLE_ECC; break; \ + case e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC: \ + bitMask = FM_PCD_EX_PRS_SINGLE_ECC; break; \ + default: bitMask = 0;break;} + +/***********************************************************************/ +/* SW parser L4 shells patch */ +/***********************************************************************/ +#ifdef FM_PRS_L4_SHELL_ERRATA_FMANb +#define SW_PRS_L4_PATCH \ +{ 0x31,0x92,0x02,0x1f,0x00,0x32,0x00,0x78, \ + 0x00,0x34,0x32,0xf0,0x00,0x50,0x00,0x0c, \ + 0x28,0x5e,0x83,0x8e,0x29,0x32,0xaf,0x8e, \ + 0x31,0xb2,0x9f,0xff,0x00,0x06,0xaf,0xbf, \ + 0x00,0x06,0x29,0x36,0x00,0x01,0x1b,0xff, \ + 0x32,0xf0,0x00,0x50,0x00,0x08,0x28,0x5e, \ + 0x08,0x99,0x00,0x00,0x9f,0x8e,0x31,0xb2, \ + 0x9f,0xff,0x00,0x06,0x29,0x36,0x00,0x01, \ + 0x1b,0xff,0x32,0xf0,0x00,0x50,0x00,0x04, \ + 0x28,0x5e,0x8f,0x9e,0x29,0x32,0x31,0xb2, \ + 0x8f,0xbf,0x00,0x06,0x29,0x36,0x00,0x01, \ + 0x1b,0xff,0x32,0xf0,0x00,0x50,0x00,0x04, \ + 0x28,0x5e,0x8f,0x9e,0x29,0x32,0x31,0xb2, \ + 0x8f,0xbf,0x00,0x06,0x29,0x36,0x00,0x01, \ + 0x1b,0xff,0x00,0x00,0x00,0x00,0x00,0x00}; + +#define SW_PRS_L4_PATCH_SIZE 120 +#endif /* FM_PRS_L4_SHELL_ERRATA_FMANb */ + +/****************************/ +/* Parser defines */ +/****************************/ +/* masks */ +#define PRS_ERR_CAP 0x80000000 +#define PRS_ERR_TYPE_DOUBLE 0x40000000 +#define PRS_ERR_SINGLE_ECC_CNT_MASK 0x00FF0000 +#define PRS_ERR_ADDR_MASK 0x000001FF +#define FM_PCD_PRS_RPIMAC_EN 0x00000001 +#define FM_PCD_PRS_SINGLE_ECC 0x00004000 +#define FM_PCD_PRS_PORT_IDLE_STS 0xffff0000 +#define FM_PCD_PRS_DOUBLE_ECC 0x00004000 +#define FM_PCD_PRS_PPSC_ALL_PORTS 0xffff0000 + +/* others */ +#define PRS_MAX_CYCLE_LIMIT 8191 +#define PRS_SW_DATA 0x00000800 +#define PRS_REGS_OFFSET 0x00000840 + +#define GET_FM_PCD_PRS_PORT_ID(prsPortId,hardwarePortId) \ + prsPortId = (uint8_t)(hardwarePortId & 0x0f) + +#define GET_FM_PCD_INDEX_FLAG(bitMask, prsPortId) \ + bitMask = 0x80000000>>prsPortId + +/***********************************************************************/ +/* Keygen defines */ +/***********************************************************************/ +/* Masks */ +#define FM_PCD_KG_KGGCR_EN 0x80000000 +#define KG_SCH_GEN_VALID 0x80000000 +#define KG_SCH_GEN_EXTRACT_TYPE 0x00008000 +#define KG_ERR_CAP 0x80000000 +#define KG_ERR_TYPE_DOUBLE 0x40000000 +#define KG_ERR_ADDR_MASK 0x00000FFF +#define FM_PCD_KG_DOUBLE_ECC 0x80000000 +#define FM_PCD_KG_KEYSIZE_OVERFLOW 0x40000000 +#define KG_SCH_MODE_EN 0x80000000 + +/* shifts */ +#define FM_PCD_KG_PE_CPP_MASK_SHIFT 16 +#define FM_PCD_KG_KGAR_WSEL_SHIFT 8 + +/* others */ +#define KG_DOUBLE_MEANING_REGS_OFFSET 0x100 +#define NO_VALIDATION 0x70 +#define KG_ACTION_REG_TO 1024 +#define KG_MAX_PROFILE 255 +#define SCHEME_ALWAYS_DIRECT 0xFFFFFFFF + +typedef struct { + bool known; + uint8_t id; +} t_FmPcdKgSchemesExtractsEntry; + +typedef struct { + t_FmPcdKgSchemesExtractsEntry extractsArray[FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY]; +} t_FmPcdKgSchemesExtracts; + +/***********************************************************************/ +/* Policer defines */ +/***********************************************************************/ + +/* masks */ +#define FM_PCD_PLCR_PEMODE_PI 0x80000000 +#define FM_PCD_PLCR_PEMODE_CBLND 0x40000000 +#define FM_PCD_PLCR_PEMODE_ALG_MASK 0x30000000 +#define FM_PCD_PLCR_PEMODE_ALG_RFC2698 0x10000000 +#define FM_PCD_PLCR_PEMODE_ALG_RFC4115 0x20000000 +#define FM_PCD_PLCR_PEMODE_DEFC_MASK 0x0C000000 +#define FM_PCD_PLCR_PEMODE_DEFC_Y 0x04000000 +#define FM_PCD_PLCR_PEMODE_DEFC_R 0x08000000 +#define FM_PCD_PLCR_PEMODE_DEFC_OVERRIDE 0x0C000000 +#define FM_PCD_PLCR_PEMODE_OVCLR_MASK 0x03000000 +#define FM_PCD_PLCR_PEMODE_OVCLR_Y 0x01000000 +#define FM_PCD_PLCR_PEMODE_OVCLR_R 0x02000000 +#define FM_PCD_PLCR_PEMODE_OVCLR_G_NC 0x03000000 +#define FM_PCD_PLCR_PEMODE_PKT 0x00800000 +#define FM_PCD_PLCR_PEMODE_FPP_MASK 0x001F0000 +#define FM_PCD_PLCR_PEMODE_FPP_SHIFT 16 +#define FM_PCD_PLCR_PEMODE_FLS_MASK 0x0000F000 +#define FM_PCD_PLCR_PEMODE_FLS_L2 0x00003000 +#define FM_PCD_PLCR_PEMODE_FLS_L3 0x0000B000 +#define FM_PCD_PLCR_PEMODE_FLS_L4 0x0000E000 +#define FM_PCD_PLCR_PEMODE_FLS_FULL 0x0000F000 +#define FM_PCD_PLCR_PEMODE_RBFLS 0x00000800 +#define FM_PCD_PLCR_PEMODE_TRA 0x00000004 +#define FM_PCD_PLCR_PEMODE_TRB 0x00000002 +#define FM_PCD_PLCR_PEMODE_TRC 0x00000001 +#define FM_PCD_PLCR_DOUBLE_ECC 0x80000000 +#define FM_PCD_PLCR_INIT_ENTRY_ERROR 0x40000000 +#define FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE 0x80000000 +#define FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE 0x40000000 + +#define FM_PCD_PLCR_NIA_VALID 0x80000000 + +#define FM_PCD_PLCR_GCR_EN 0x80000000 +#define FM_PCD_PLCR_GCR_STEN 0x40000000 +#define FM_PCD_PLCR_GCR_DAR 0x20000000 +#define FM_PCD_PLCR_GCR_DEFNIA 0x00FFFFFF +#define FM_PCD_PLCR_NIA_ABS 0x00000100 + +#define FM_PCD_PLCR_GSR_BSY 0x80000000 +#define FM_PCD_PLCR_GSR_DQS 0x60000000 +#define FM_PCD_PLCR_GSR_RPB 0x20000000 +#define FM_PCD_PLCR_GSR_FQS 0x0C000000 +#define FM_PCD_PLCR_GSR_LPALG 0x0000C000 +#define FM_PCD_PLCR_GSR_LPCA 0x00003000 +#define FM_PCD_PLCR_GSR_LPNUM 0x000000FF + +#define FM_PCD_PLCR_EVR_PSIC 0x80000000 +#define FM_PCD_PLCR_EVR_AAC 0x40000000 + +#define FM_PCD_PLCR_PAR_PSI 0x20000000 +#define FM_PCD_PLCR_PAR_PNUM 0x00FF0000 +/* PWSEL Selctive select options */ +#define FM_PCD_PLCR_PAR_PWSEL_PEMODE 0x00008000 /* 0 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEGNIA 0x00004000 /* 1 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEYNIA 0x00002000 /* 2 */ +#define FM_PCD_PLCR_PAR_PWSEL_PERNIA 0x00001000 /* 3 */ +#define FM_PCD_PLCR_PAR_PWSEL_PECIR 0x00000800 /* 4 */ +#define FM_PCD_PLCR_PAR_PWSEL_PECBS 0x00000400 /* 5 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEPIR_EIR 0x00000200 /* 6 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEPBS_EBS 0x00000100 /* 7 */ +#define FM_PCD_PLCR_PAR_PWSEL_PELTS 0x00000080 /* 8 */ +#define FM_PCD_PLCR_PAR_PWSEL_PECTS 0x00000040 /* 9 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEPTS_ETS 0x00000020 /* 10 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEGPC 0x00000010 /* 11 */ +#define FM_PCD_PLCR_PAR_PWSEL_PEYPC 0x00000008 /* 12 */ +#define FM_PCD_PLCR_PAR_PWSEL_PERPC 0x00000004 /* 13 */ +#define FM_PCD_PLCR_PAR_PWSEL_PERYPC 0x00000002 /* 14 */ +#define FM_PCD_PLCR_PAR_PWSEL_PERRPC 0x00000001 /* 15 */ + +#define FM_PCD_PLCR_PAR_PMR_BRN_1TO1 0x0000 /* - Full bit replacement. {PBNUM[0:N-1] + 1-> 2^N specific locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_2TO2 0x1 /* - {PBNUM[0:N-2],PNUM[N-1]}. + 2-> 2^(N-1) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_4TO4 0x2 /* - {PBNUM[0:N-3],PNUM[N-2:N-1]}. + 4-> 2^(N-2) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_8TO8 0x3 /* - {PBNUM[0:N-4],PNUM[N-3:N-1]}. + 8->2^(N-3) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_16TO16 0x4 /* - {PBNUM[0:N-5],PNUM[N-4:N-1]}. + 16-> 2^(N-4) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_32TO32 0x5 /* {PBNUM[0:N-6],PNUM[N-5:N-1]}. + 32-> 2^(N-5) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_64TO64 0x6 /* {PBNUM[0:N-7],PNUM[N-6:N-1]}. + 64-> 2^(N-6) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_128TO128 0x7 /* {PBNUM[0:N-8],PNUM[N-7:N-1]}. + 128-> 2^(N-7) base locations. */ +#define FM_PCD_PLCR_PAR_PMR_BRN_256TO256 0x8 /* - No bit replacement for N=8. {PNUM[N-8:N-1]}. + When N=8 this option maps all 256 profiles by the DISPATCH bus into one group. */ + +#define FM_PCD_PLCR_PMR_V 0x80000000 +#define PLCR_ERR_ECC_CAP 0x80000000 +#define PLCR_ERR_ECC_TYPE_DOUBLE 0x40000000 +#define PLCR_ERR_ECC_PNUM_MASK 0x00000FF0 +#define PLCR_ERR_ECC_OFFSET_MASK 0x0000000F + +#define PLCR_ERR_UNINIT_CAP 0x80000000 +#define PLCR_ERR_UNINIT_NUM_MASK 0x000000FF +#define PLCR_ERR_UNINIT_PID_MASK 0x003f0000 +#define PLCR_ERR_UNINIT_ABSOLUTE_MASK 0x00008000 + +/* shifts */ +#define PLCR_ERR_ECC_PNUM_SHIFT 4 +#define PLCR_ERR_UNINIT_PID_SHIFT 16 + +#define FM_PCD_PLCR_PMR_BRN_SHIFT 16 + +/* others */ +#define WAIT_FOR_PLCR_EVR_AAC \ +{\ + uint32_t count = 0; \ + uint32_t tmpReg32; \ + while (count < FM_PCD_PLCR_POLL) \ + { \ + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->fmpl_evr);\ + if (!( tmpReg32 & FM_PCD_PLCR_EVR_AAC)) break;\ + count++;\ + }\ +} + +#define WAIT_FOR_PLCR_PAR_GO \ +{\ + uint32_t count = 0; \ + uint32_t tmpReg32; \ + while (count < FM_PCD_PLCR_POLL) \ + { \ + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->fmpl_par);\ + if (!( tmpReg32 & FM_PCD_PLCR_PAR_GO)) break;\ + count++; \ + }\ +} + +#define PLCR_PORT_WINDOW_SIZE(hardwarePortId) + +/****************************/ +/* Defaults */ +/****************************/ +#define DEFAULT_plcrAutoRefresh FALSE +#define DEFAULT_prsMaxParseCycleLimit 0 +#define DEFAULT_fmPcdKgErrorExceptions (FM_PCD_EX_KG_DOUBLE_ECC | FM_PCD_EX_KG_KEYSIZE_OVERFLOW) +#define DEFAULT_fmPcdPlcrErrorExceptions (FM_PCD_EX_PLCR_DOUBLE_ECC | FM_PCD_EX_PLCR_INIT_ENTRY_ERROR) +#define DEFAULT_fmPcdPlcrExceptions 0 +#define DEFAULT_fmPcdPrsErrorExceptions (FM_PCD_EX_PRS_DOUBLE_ECC) + +#define DEFAULT_fmPcdPrsExceptions FM_PCD_EX_PRS_SINGLE_ECC +#define DEFAULT_numOfUsedProfilesPerWindow 16 +#define DEFAULT_numOfSharedPlcrProfiles 4 + +/***********************************************************************/ +/* Memory map */ +/***********************************************************************/ +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +typedef _Packed struct { + volatile uint32_t kgoe_sp; + volatile uint32_t kgoe_cpp; +} _PackedType t_FmPcdKgPortConfigRegs; + +typedef _Packed struct { + volatile uint32_t kgcpe[8]; +} _PackedType t_FmPcdKgClsPlanRegs; + +typedef _Packed union { + t_FmPcdKgInterModuleSchemeRegs schemeRegs; + t_FmPcdKgPortConfigRegs portRegs; + t_FmPcdKgClsPlanRegs clsPlanRegs; +} _PackedType u_FmPcdKgIndirectAccessRegs; + +typedef _Packed struct { + volatile uint32_t kggcr; + volatile uint32_t res0; + volatile uint32_t res1; + volatile uint32_t kgeer; + volatile uint32_t kgeeer; + volatile uint32_t res2; + volatile uint32_t res3; + volatile uint32_t kgseer; + volatile uint32_t kgseeer; + volatile uint32_t kggsr; + volatile uint32_t kgtpc; + volatile uint32_t kgserc; + volatile uint32_t res4[4]; + volatile uint32_t kgfdor; + volatile uint32_t kggdv0r; + volatile uint32_t kggdv1r; + volatile uint32_t res5[5]; + volatile uint32_t kgfer; + volatile uint32_t kgfeer; + volatile uint32_t res6[38]; + u_FmPcdKgIndirectAccessRegs indirectAccessRegs; + volatile uint32_t res[42]; /*(0xfc-sizeof(u_FmPcdKgIndirectAccessRegs))/4 */ + volatile uint32_t kgar; +} _PackedType t_FmPcdKgRegs; + +typedef _Packed struct { +/* General Configuration and Status Registers */ + volatile uint32_t fmpl_gcr; /* 0x000 FMPL_GCR - FM Policer General Configuration */ + volatile uint32_t fmpl_gsr; /* 0x004 FMPL_GSR - FM Policer Global Status Register */ + volatile uint32_t fmpl_evr; /* 0x008 FMPL_EVR - FM Policer Event Register */ + volatile uint32_t fmpl_ier; /* 0x00C FMPL_IER - FM Policer Interrupt Enable Register */ + volatile uint32_t fmpl_ifr; /* 0x010 FMPL_IFR - FM Policer Interrupt Force Register */ + volatile uint32_t fmpl_eevr; /* 0x014 FMPL_EEVR - FM Policer Error Event Register */ + volatile uint32_t fmpl_eier; /* 0x018 FMPL_EIER - FM Policer Error Interrupt Enable Register */ + volatile uint32_t fmpl_eifr; /* 0x01C FMPL_EIFR - FM Policer Error Interrupt Force Register */ +/* Global Statistic Counters */ + volatile uint32_t fmpl_rpcnt; /* 0x020 FMPL_RPC - FM Policer RED Packets Counter */ + volatile uint32_t fmpl_ypcnt; /* 0x024 FMPL_YPC - FM Policer YELLOW Packets Counter */ + volatile uint32_t fmpl_rrpcnt; /* 0x028 FMPL_RRPC - FM Policer Recolored RED Packet Counter */ + volatile uint32_t fmpl_rypcnt; /* 0x02C FMPL_RYPC - FM Policer Recolored YELLOW Packet Counter */ + volatile uint32_t fmpl_tpcnt; /* 0x030 FMPL_TPC - FM Policer Total Packet Counter */ + volatile uint32_t fmpl_flmcnt; /* 0x034 FMPL_FLMC - FM Policer Frame Length Mismatch Counter */ + volatile uint32_t fmpl_res0[21]; /* 0x038 - 0x08B Reserved */ +/* Profile RAM Access Registers */ + volatile uint32_t fmpl_par; /* 0x08C FMPL_PAR - FM Policer Profile Action Register*/ + t_FmPcdPlcrInterModuleProfileRegs profileRegs; +/* Error Capture Registers */ + volatile uint32_t fmpl_serc; /* 0x100 FMPL_SERC - FM Policer Soft Error Capture */ + volatile uint32_t fmpl_upcr; /* 0x104 FMPL_UPCR - FM Policer Uninitialized Profile Capture Register */ + volatile uint32_t fmpl_res2; /* 0x108 Reserved */ +/* Debug Registers */ + volatile uint32_t fmpl_res3[61]; /* 0x10C-0x200 Reserved Debug*/ +/* Profile Selection Mapping Registers Per Port-ID (n=1-11, 16) */ + volatile uint32_t fmpl_dpmr; /* 0x200 FMPL_DPMR - FM Policer Default Mapping Register */ + volatile uint32_t fmpl_pmr[63]; /*+default 0x204-0x2FF FMPL_PMR1 - FMPL_PMR63, - FM Policer Profile Mapping Registers. + (for port-ID 1-11, only for supported Port-ID registers) */ +} _PackedType t_FmPcdPlcrRegs; + +typedef _Packed struct { + volatile uint32_t rpclim; + volatile uint32_t rpimac; + volatile uint32_t pmeec; + volatile uint32_t res1[5]; + volatile uint32_t pevr; + volatile uint32_t pever; + volatile uint32_t pevfr; + volatile uint32_t perr; + volatile uint32_t perer; + volatile uint32_t perfr; + volatile uint32_t res2[0xA]; + volatile uint32_t ppsc; + volatile uint32_t res3; + volatile uint32_t pds; + volatile uint32_t l2rrs; + volatile uint32_t l3rrs; + volatile uint32_t l4rrs; + volatile uint32_t srrs; + volatile uint32_t l2rres; + volatile uint32_t l3rres; + volatile uint32_t l4rres; + volatile uint32_t srres; + volatile uint32_t spcs; + volatile uint32_t spscs; + volatile uint32_t hxscs; + volatile uint32_t mrcs; + volatile uint32_t mwcs; + volatile uint32_t mrscs; + volatile uint32_t mwscs; + volatile uint32_t fcscs; +} _PackedType t_FmPcdPrsRegs; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/***********************************************************************/ +/* Driver's internal structures */ +/***********************************************************************/ + +typedef struct { + t_Handle h_Manip; + bool keepRes; + e_FmPcdEngine nextEngine; + uint8_t parseCode; +} t_FmPcdInfoForManip; + +/**************************************************************************//** + @Description A structure of parameters to communicate + between the port and PCD regarding the KG scheme. +*//***************************************************************************/ +typedef struct { + uint8_t netEnvId; /* in */ + uint8_t numOfDistinctionUnits; /* in */ + uint8_t unitIds[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /* in */ + uint32_t vector; /* out */ +} t_NetEnvParams; + +typedef struct { + volatile bool lock; + bool used; + uint8_t owners; + uint8_t netEnvId; + uint8_t guestId; + uint8_t baseEntry; + uint16_t sizeOfGrp; + protocolOpt_t optArray[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)]; +} t_FmPcdKgClsPlanGrp; + +typedef struct { + volatile bool lock; + bool valid; + uint8_t netEnvId; + uint8_t owners; + uint32_t matchVector; + uint32_t ccUnits; + bool nextRelativePlcrProfile; + uint16_t relativeProfileId; + uint16_t numOfProfiles; + t_FmPcdKgKeyOrder orderedArray; + e_FmPcdEngine nextEngine; + e_FmPcdDoneAction doneAction; + uint8_t pointedOwners; + uint32_t requiredAction; + bool extractedOrs; + uint8_t bitOffsetInPlcrProfile; + bool directPlcr; +} t_FmPcdKgScheme; + +typedef struct { + bool allocated; + uint8_t ownerId; /* guestId for KG in multi-partition only, + portId for PLCR in any environment */ +} t_FmPcdAllocMng; + +typedef struct { + t_FmPcdKgRegs *p_FmPcdKgRegs; + uint32_t schemeExceptionsBitMask; + uint8_t numOfSchemes; + uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES]; + t_FmPcdKgScheme schemes[FM_PCD_KG_NUM_OF_SCHEMES]; + t_FmPcdKgClsPlanGrp clsPlanGrps[FM_MAX_NUM_OF_PORTS]; + uint8_t emptyClsPlanGrpId; + t_FmPcdAllocMng schemesMng[FM_PCD_KG_NUM_OF_SCHEMES]; /* only for MASTER ! */ + t_FmPcdAllocMng clsPlanBlocksMng[FM_PCD_MAX_NUM_OF_CLS_PLANS/CLS_PLAN_NUM_PER_GRP]; +} t_FmPcdKg; + +typedef struct { + uint16_t profilesBase; + uint16_t numOfProfiles; + t_Handle h_FmPort; +} t_FmPcdPlcrMapParam; + +typedef struct { + bool valid; + volatile bool lock; + t_FmPcdAllocMng profilesMng; + uint8_t pointedOwners; + uint32_t requiredAction; + e_FmPcdEngine nextEngineOnGreen; /**< Green next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnGreen; /**< Green next engine params */ + + e_FmPcdEngine nextEngineOnYellow; /**< Yellow next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnYellow; /**< Yellow next engine params */ + + e_FmPcdEngine nextEngineOnRed; /**< Red next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnRed; /**< Red next engine params */ +} t_FmPcdPlcrProfile; + +typedef struct { + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs; + t_FmPcdPlcrProfile profiles[FM_PCD_PLCR_NUM_ENTRIES]; + uint16_t numOfSharedProfiles; + uint16_t sharedProfilesIds[FM_PCD_PLCR_NUM_ENTRIES]; + t_FmPcdPlcrMapParam portsMapping[FM_MAX_NUM_OF_PORTS]; +} t_FmPcdPlcr; + +typedef struct { + uint32_t *p_SwPrsCode; + uint32_t *p_CurrSwPrs; + uint8_t currLabel; + t_FmPcdPrsRegs *p_FmPcdPrsRegs; + t_FmPcdPrsLabelParams labelsTable[FM_PCD_PRS_NUM_OF_LABELS]; + uint32_t fmPcdPrsPortIdStatistics; +} t_FmPcdPrs; + +typedef struct { + struct { + e_NetHeaderType hdr; + protocolOpt_t opt; /* only one option !! */ + } hdrs[FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS]; +} t_FmPcdIntDistinctionUnit; + +typedef struct { + e_NetHeaderType hdr; + e_NetHeaderType aliasHdr; +} t_FmPcdNetEnvAliases; + +typedef struct { + volatile bool lock; + bool used; + uint8_t owners; + uint8_t clsPlanGrpId; + t_FmPcdIntDistinctionUnit units[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; + uint32_t unitsVectors[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; + uint32_t lcvs[FM_PCD_PRS_NUM_OF_HDRS]; + uint32_t macsecVector; + t_FmPcdNetEnvAliases aliasHdrs[FM_PCD_MAX_NUM_OF_PRIVATE_HDRS]; +} t_FmPcdNetEnv; + +typedef struct { + bool plcrAutoRefresh; + + uint16_t prsMaxParseCycleLimit; +} t_FmPcdDriverParam; + +typedef struct { + t_Handle h_Fm; + t_Handle h_FmMuram; + uint64_t physicalMuramBase; + volatile bool lock; + t_Handle h_Spinlock; + t_Handle h_IpcSession; /* relevant for guest only */ + bool enabled; + uint8_t guestId; /**< Guest Partition Id */ + uint8_t numOfEnabledGuestPartitionsPcds; + char fmPcdModuleName[MODULE_NAME_SIZE]; + char fmPcdIpcHandlerModuleName[MODULE_NAME_SIZE]; /* relevant for guest only - this is the master's name */ + t_FmPcdNetEnv netEnvs[FM_MAX_NUM_OF_PORTS]; + t_FmPcdKg *p_FmPcdKg; + t_FmPcdPlcr *p_FmPcdPlcr; + t_FmPcdPrs *p_FmPcdPrs; + + t_Handle h_Hc; + + uint32_t exceptions; + t_FmPcdExceptionCallback *f_Exception; + t_FmPcdIdExceptionCallback *f_FmPcdIndexedException; + t_Handle h_App; + + t_FmPcdDriverParam *p_FmPcdDriverParam; +} t_FmPcd; + + +/***********************************************************************/ +/* PCD internal routines */ +/***********************************************************************/ + +/**************************************************************************//** + + @Group FM_PCD_InterModule_grp FM PCD Inter-Module Unit + + @Description FM PCD Inter Module functions - + These are not User API routines but routines that may be called + from other modules. This will be the case in a single core environment, + where instead of useing the XX messeging mechanism, the routines may be + called from other modules. In a multicore environment, the other modules may + be run by other cores and therefor these routines may not be called directly. + + @{ +*//***************************************************************************/ + +t_Error PcdGetVectorForOpt(t_FmPcd *p_FmPcd, uint8_t netEnvId, protocolOpt_t opt, uint32_t *p_Vector); +t_Error PcdGetUnitsVector(t_FmPcd *p_FmPcd, t_NetEnvParams *p_Params); +bool PcdNetEnvIsUnitWithoutOpts(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint32_t unitVector); +t_Error PcdGetClsPlanGrpParams(t_FmPcd *p_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_GrpParams); +void FmPcdSetClsPlanGrpId(t_FmPcd *p_FmPcd, uint8_t netEnvId, uint8_t clsPlanGrpId); +e_NetHeaderType FmPcdGetAliasHdr(t_FmPcd *p_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr); +uint8_t FmPcdNetEnvGetUnitIdForSingleHdr(t_Handle h_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr); + +t_Handle KgConfig( t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams); +t_Error KgInit(t_FmPcd *p_FmPcd); +t_Error KgFree(t_FmPcd *p_FmPcd); +void KgSetClsPlan(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanSet *p_Set); +bool KgIsSchemeAlwaysDirect(t_Handle h_FmPcd, uint8_t schemeId); +void KgEnable(t_FmPcd *p_FmPcd); +void KgDisable(t_FmPcd *p_FmPcd); +t_Error KgAllocClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t *p_First); +void KgFreeClsPlanEntries(t_Handle h_FmPcd, uint16_t numOfClsPlanEntries, uint8_t guestId, uint8_t base); + +/* only for MULTI partittion */ +t_Error FmPcdKgAllocSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds); +t_Error FmPcdKgFreeSchemes(t_Handle h_FmPcd, uint8_t numOfSchemes, uint8_t guestId, uint8_t *p_SchemesIds); +/* only for SINGLE partittion */ +t_Error KgBindPortToSchemes(t_Handle h_FmPcd , uint8_t hardwarePortId, uint32_t spReg); + +t_Handle PlcrConfig(t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams); +t_Error PlcrInit(t_FmPcd *p_FmPcd); +t_Error PlcrFree(t_FmPcd *p_FmPcd); +void PlcrEnable(t_FmPcd *p_FmPcd); +void PlcrDisable(t_FmPcd *p_FmPcd); +t_Error PlcrFreeProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint16_t num, uint16_t base); +t_Error PlcrAllocProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles, uint16_t *p_Base); +t_Error PlcrAllocSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds); +void PlcrFreeSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds); + +t_Handle PrsConfig(t_FmPcd *p_FmPcd,t_FmPcdParams *p_FmPcdParams); +t_Error PrsInit(t_FmPcd *p_FmPcd); +void PrsEnable(t_FmPcd *p_FmPcd); +void PrsDisable(t_FmPcd *p_FmPcd); +void PrsFree(t_FmPcd *p_FmPcd ); +t_Error PrsIncludePortInStatistics(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, bool include); + +t_Handle FmPcdCcConfig(t_Handle h_FmPcd, t_FmPcdParams *p_FmPcdParams); +t_Error FmPcdCcGetGrpParams(t_Handle treeId, uint8_t grpId, uint32_t *p_GrpBits, uint8_t *p_GrpBase); +uint8_t FmPcdCcGetOffset(t_Handle h_CcNode); +uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode); +uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode); + +void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add); +t_Error FmPcdManipCheckParamsForCcNextEgine(t_FmPcdCcNextEngineParams *p_InfoForManip, uint32_t *requiredAction); +void FmPcdManipUpdateAdResultForCc(t_Handle h_Manip, t_Handle p_Ad, t_Handle *p_AdNew); +void FmPcdManipUpdateAdContLookupForCc(t_Handle h_Manip, t_Handle p_Ad, t_Handle *p_AdNew, uint32_t adTableOffset); +uint32_t FmPcdManipCheckNia(t_Handle h_FmPcd, t_Handle h_Ad); +void FmPcdManipUpdateOwner(t_Handle h_Manip, bool add); +t_Error FmPcdManipCheckParamsWithCcNodeParams(t_Handle h_Manip, t_Handle h_FmPcdCcNode); + +static __inline__ t_Handle FmPcdGetMuramHandle(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + ASSERT_COND(p_FmPcd); + return p_FmPcd->h_FmMuram; +} + +static __inline__ uint64_t FmPcdGetMuramPhysBase(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + ASSERT_COND(p_FmPcd); + return p_FmPcd->physicalMuramBase; +} + + +#endif /* __FM_PCD_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_pcd_ipc.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_pcd_ipc.h @@ -0,0 +1,326 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File fm_pcd_ipc.h + + @Description FM PCD Inter-Partition prototypes, structures and definitions. +*//***************************************************************************/ +#ifndef __FM_PCD_IPC_H +#define __FM_PCD_IPC_H + +#include "std_ext.h" + + +/**************************************************************************//** + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums + + @{ +*//***************************************************************************/ + + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/**************************************************************************//** + @Description Structure for getting a sw parser address according to a label + Fields commented 'IN' are passed by the port module to be used + by the FM module. + Fields commented 'OUT' will be filled by FM before returning to port. +*//***************************************************************************/ +typedef _Packed struct t_FmPcdIpcSwPrsLable +{ + uint32_t enumHdr; /**< IN. The existance of this header will envoke + the sw parser code. */ + uint8_t indexPerHdr; /**< IN. Normally 0, if more than one sw parser + attachments for the same header, use this + + index to distinguish between them. */ +} _PackedType t_FmPcdIpcSwPrsLable; + +/**************************************************************************//** + @Description Structure for port-PCD communication. + Fields commented 'IN' are passed by the port module to be used + by the FM module. + Fields commented 'OUT' will be filled by FM before returning to port. + Some fields are optional (depending on configuration) and + will be analized by the port and FM modules accordingly. +*//***************************************************************************/ +typedef struct t_FmPcdIpcKgSchemesParams +{ + uint8_t guestId; /**< IN */ + uint8_t numOfSchemes; /**< IN */ + uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES]; /**< OUT */ +} _PackedType t_FmPcdIpcKgSchemesParams; + +typedef struct t_FmPcdIpcKgClsPlanParams +{ + uint8_t guestId; /**< IN */ + uint16_t numOfClsPlanEntries; /**< IN */ + uint8_t clsPlanBase; /**< IN in alloc only */ +} _PackedType t_FmPcdIpcKgClsPlanParams; + +typedef _Packed struct t_FmPcdIpcPlcrAllocParams +{ + uint16_t num; + uint8_t hardwarePortId; + uint16_t plcrProfilesBase; +} _PackedType t_FmPcdIpcPlcrAllocParams; + +typedef _Packed struct t_FmPcdIpcSharedPlcrAllocParams +{ + uint16_t num; /**< IN */ + //uint16_t profilesIds[FM_PCD_PLCR_NUM_ENTRIES]; /**< OUT */ + uint32_t sharedProfilesMask[8]; +} _PackedType t_FmPcdIpcSharedPlcrAllocParams; + +typedef _Packed struct t_FmPcdIpcPrsIncludePort +{ + uint8_t hardwarePortId; /* IN */ + bool include; /* IN */ +} _PackedType t_FmPcdIpcPrsIncludePort; + + +#define FM_PCD_MAX_REPLY_SIZE 16 +#define FM_PCD_MAX_MSG_SIZE 36 +#define FM_PCD_MAX_REPLY_BODY_SIZE 36 + +typedef _Packed struct +{ + uint32_t msgId; + uint8_t msgBody[FM_PCD_MAX_MSG_SIZE]; +} _PackedType t_FmPcdIpcMsg; + +typedef _Packed struct t_FmPcdIpcReply +{ + uint32_t error; + uint8_t replyBody[FM_PCD_MAX_REPLY_BODY_SIZE]; +} _PackedType t_FmPcdIpcReply; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + + +/**************************************************************************//** + @Function FM_PCD_ALLOC_KG_SCHEMES + + @Description Used by FM PCD front-end in order to allocate KG resources + + @Param[in/out] t_FmPcdIpcKgAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_ALLOC_KG_SCHEMES 3 + +/**************************************************************************//** + @Function FM_PCD_FREE_KG_SCHEMES + + @Description Used by FM PCD front-end in order to Free KG resources + + @Param[in/out] t_FmPcdIpcKgSchemesParams Pointer +*//***************************************************************************/ +#define FM_PCD_FREE_KG_SCHEMES 4 + +/**************************************************************************//** + @Function FM_PCD_ALLOC_PROFILES + + @Description Used by FM PCD front-end in order to allocate Policer profiles + + @Param[in/out] t_FmPcdIpcKgSchemesParams Pointer +*//***************************************************************************/ +#define FM_PCD_ALLOC_PROFILES 5 + +/**************************************************************************//** + @Function FM_PCD_FREE_PROFILES + + @Description Used by FM PCD front-end in order to Free Policer profiles + + @Param[in/out] t_FmPcdIpcPlcrAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_FREE_PROFILES 6 + +/**************************************************************************//** + @Function FM_PCD_GET_PHYS_MURAM_BASE + + @Description Used by FM PCD front-end in order to get MURAM base address + + @Param[in/out] t_FmPcdIcPhysAddr Pointer +*//***************************************************************************/ +#define FM_PCD_GET_PHYS_MURAM_BASE 7 + +/**************************************************************************//** + @Function FM_PCD_GET_SW_PRS_OFFSET + + @Description Used by FM front-end to get the SW parser offset of the start of + code relevant to a given label. + + @Param[in/out] t_FmPcdIpcSwPrsLable Pointer +*//***************************************************************************/ +#define FM_PCD_GET_SW_PRS_OFFSET 8 + +/**************************************************************************//** + @Function FM_PCD_ALLOC_SHARED_PROFILES + + @Description Used by FM PCD front-end in order to allocate shared profiles + + @Param[in/out] t_FmPcdIpcSharedPlcrAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_ALLOC_SHARED_PROFILES 9 + +/**************************************************************************//** + @Function FM_PCD_FREE_SHARED_PROFILES + + @Description Used by FM PCD front-end in order to free shared profiles + + @Param[in/out] t_FmPcdIpcSharedPlcrAllocParams Pointer +*//***************************************************************************/ +#define FM_PCD_FREE_SHARED_PROFILES 10 + +/**************************************************************************//** + @Function FM_PCD_MASTER_IS_ENABLED + + @Description Used by FM front-end in order to verify + PCD enablement. + + @Param[in] bool Pointer +*//***************************************************************************/ +#define FM_PCD_MASTER_IS_ENABLED 15 + +/**************************************************************************//** + @Function FM_PCD_GUEST_DISABLE + + @Description Used by FM front-end to inform back-end when + front-end PCD is disabled + + @Param[in] None +*//***************************************************************************/ +#define FM_PCD_GUEST_DISABLE 16 + +/**************************************************************************//** + @Function FM_PCD_DUMP_REGS + + @Description Used by FM front-end to dump all PCD registers + + @Param[in] None +*//***************************************************************************/ +#define FM_PCD_DUMP_REGS 17 + +/**************************************************************************//** + @Function FM_PCD_KG_DUMP_REGS + + @Description Used by FM front-end to dump KG registers + + @Param[in] None +*//***************************************************************************/ +#define FM_PCD_KG_DUMP_REGS 18 + +/**************************************************************************//** + @Function FM_PCD_PLCR_DUMP_REGS + + @Description Used by FM front-end to dump PLCR registers + + @Param[in] None +*//***************************************************************************/ +#define FM_PCD_PLCR_DUMP_REGS 19 + +/**************************************************************************//** + @Function FM_PCD_PLCR_PROFILE_DUMP_REGS + + @Description Used by FM front-end to dump PLCR specified profile registers + + @Param[in] t_Handle Pointer +*//***************************************************************************/ +#define FM_PCD_PLCR_PROFILE_DUMP_REGS 20 + +/**************************************************************************//** + @Function FM_PCD_PRS_DUMP_REGS + + @Description Used by FM front-end to dump PRS registers + + @Param[in] None +*//***************************************************************************/ +#define FM_PCD_PRS_DUMP_REGS 21 + +/**************************************************************************//** + @Function FM_PCD_FREE_KG_CLSPLAN + + @Description Used by FM PCD front-end in order to Free KG classification plan entries + + @Param[in/out] t_FmPcdIpcKgClsPlanParams Pointer +*//***************************************************************************/ +#define FM_PCD_FREE_KG_CLSPLAN 22 + +/**************************************************************************//** + @Function FM_PCD_ALLOC_KG_CLSPLAN + + @Description Used by FM PCD front-end in order to allocate KG classification plan entries + + @Param[in/out] t_FmPcdIpcKgClsPlanParams Pointer +*//***************************************************************************/ +#define FM_PCD_ALLOC_KG_CLSPLAN 23 + +/**************************************************************************//** + @Function FM_PCD_MASTER_IS_ALIVE + + @Description Used by FM front-end to check that back-end exists + + @Param[in] None +*//***************************************************************************/ +#define FM_PCD_MASTER_IS_ALIVE 24 + +/**************************************************************************//** + @Function FM_PCD_GET_COUNTER + + @Description Used by FM front-end to read PCD counters + + @Param[in/out] t_FmPcdIpcGetCounter Pointer +*//***************************************************************************/ +#define FM_PCD_GET_COUNTER 25 + +/**************************************************************************//** + @Function FM_PCD_PRS_INC_PORT_STATS + + @Description Used by FM front-end to set/clear statistics for port + + @Param[in/out] t_FmPcdIpcPrsIncludePort Pointer +*//***************************************************************************/ +#define FM_PCD_PRS_INC_PORT_STATS 26 +/** @} */ /* end of FM_PCD_IPC_grp group */ +/** @} */ /* end of FM_grp group */ + + +#endif /* __FM_PCD_IPC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_plcr.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_plcr.c @@ -0,0 +1,1702 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_plcr.c + + @Description FM PCD POLICER... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "net_ext.h" +#include "fm_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_hc.h" +#include "fm_pcd_ipc.h" + + +static bool FmPcdPlcrIsProfileShared(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint16_t i; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, FALSE); + + for(i=0;ip_FmPcdPlcr->numOfSharedProfiles;i++) + if(p_FmPcd->p_FmPcdPlcr->sharedProfilesIds[i] == absoluteProfileId) + return TRUE; + return FALSE; +} + +static t_Error SetProfileNia(t_FmPcd *p_FmPcd, e_FmPcdEngine nextEngine, u_FmPcdPlcrNextEngineParams *p_NextEngineParams, uint32_t *nextAction) +{ + uint32_t nia; + uint16_t absoluteProfileId = (uint16_t)(PTR_TO_UINT(p_NextEngineParams->h_Profile)-1); + uint8_t relativeSchemeId, physicatSchemeId; + + nia = FM_PCD_PLCR_NIA_VALID; + + switch (nextEngine) + { + case e_FM_PCD_DONE : + switch (p_NextEngineParams->action) + { + case e_FM_PCD_DROP_FRAME : + nia |= (NIA_ENG_BMI | NIA_BMI_AC_DISCARD); + break; + case e_FM_PCD_ENQ_FRAME: + nia |= (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + case e_FM_PCD_KG: + physicatSchemeId = (uint8_t)(PTR_TO_UINT(p_NextEngineParams->h_DirectScheme)-1); + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPcd, physicatSchemeId); + if(relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); + if (!FmPcdKgIsSchemeValidSw(p_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid direct scheme.")); + if(!KgIsSchemeAlwaysDirect(p_FmPcd, relativeSchemeId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Policer Profile may point only to a scheme that is always direct.")); + nia |= NIA_ENG_KG | NIA_KG_DIRECT | physicatSchemeId; + break; + case e_FM_PCD_PLCR: + if(!FmPcdPlcrIsProfileShared(p_FmPcd, absoluteProfileId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next profile must be a shared profile")); + if(!FmPcdPlcrIsProfileValid(p_FmPcd, absoluteProfileId)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid profile ")); + nia |= NIA_ENG_PLCR | NIA_PLCR_ABSOLUTE | absoluteProfileId; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + *nextAction = nia; + + return E_OK; +} + +static uint32_t FPP_Function(uint32_t fpp) +{ + if(fpp > 15) + return 15 - (0x1f - fpp); + else + return 16 + fpp; +} + +static void GetInfoRateReg(e_FmPcdPlcrRateMode rateMode, + uint32_t rate, + uint64_t tsuInTenthNano, + uint32_t fppShift, + uint64_t *p_Integer, + uint64_t *p_Fraction) +{ + uint64_t tmp, div; + + if(rateMode == e_FM_PCD_PLCR_BYTE_MODE) + { + /* now we calculate the initial integer for the bigger rate */ + /* from Kbps to Bytes/TSU */ + tmp = (uint64_t)rate; + tmp *= 1000; /* kb --> b */ + tmp *= tsuInTenthNano; /* bps --> bpTsu(in 10nano) */ + + div = 1000000000; /* nano */ + div *= 10; /* 10 nano */ + div *= 8; /* bit to byte */ + } + else + { + /* now we calculate the initial integer for the bigger rate */ + /* from Kbps to Bytes/TSU */ + tmp = (uint64_t)rate; + tmp *= tsuInTenthNano; /* bps --> bpTsu(in 10nano) */ + + div = 1000000000; /* nano */ + div *= 10; /* 10 nano */ + } + *p_Integer = (tmp<h_Fm); /* TimeStamp per nano seconds units */ + /* we want the tsu to count 10 nano for better precision normally tsu is 3.9 nano, now we will get 39 */ + tsuInTenthNanos = (uint32_t)(1000*10/(1<comittedInfoRate > p_NonPassthroughAlgParam->peakOrAccessiveInfoRate) + GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->comittedInfoRate, tsuInTenthNanos, 0, &integer, &fraction); + else + GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->peakOrAccessiveInfoRate, tsuInTenthNanos, 0, &integer, &fraction); + + + /* we shift integer, as in cir/pir it is represented by the MSB 16 bits, and + * the LSB bits are for the fraction */ + temp = (uint32_t)((integer<<16) & 0x00000000FFFFFFFF); + /* temp is effected by the rate. For low rates it may be as low as 0, and then we'll + * take max fpp=31. + * For high rates it will never exceed the 32 bit reg (after the 16 shift), as it is + * limited by the 10G physical port. + */ + if(temp != 0) + { + /* count zeroes left of the higher used bit (in order to shift the value such that + * unused bits may be used for fraction). + */ + while ((temp & 0x80000000) == 0) + { + temp = temp << 1; + fppShift++; + } + if(fppShift > 15) + { + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, ("timeStampPeriod to Information rate ratio is too small")); + return; + } + } + else + { + temp = (uint32_t)fraction; /* fraction will alyas be smaller than 2^16 */ + if(!temp) + /* integer and fraction are 0, we set fpp to its max val */ + fppShift = 31; + else + { + /* integer was 0 but fraction is not. fpp is 16 for the integer, + * + all left zeroes of the fraction. */ + fppShift=16; + /* count zeroes left of the higher used bit (in order to shift the value such that + * unused bits may be used for fraction). + */ + while ((temp & 0x8000) == 0) + { + temp = temp << 1; + fppShift++; + } + } + } + + /* + * This means that the FM TS register will now be used so that 'count' bits are for + * fraction and the rest for integer */ + /* now we re-calculate cir */ + GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->comittedInfoRate, tsuInTenthNanos, fppShift, &integer, &fraction); + *cir = (uint32_t)(integer << 16 | (fraction & 0xFFFF)); + GetInfoRateReg(p_NonPassthroughAlgParam->rateMode, p_NonPassthroughAlgParam->peakOrAccessiveInfoRate, tsuInTenthNanos, fppShift, &integer, &fraction); + *pir_eir = (uint32_t)(integer << 16 | (fraction & 0xFFFF)); + + *cbs = p_NonPassthroughAlgParam->comittedBurstSize; + *pbs_ebs = p_NonPassthroughAlgParam->peakOrAccessiveBurstSize; + + /* get fpp as it should be written to reg.*/ + *fpp = FPP_Function(fppShift); + +} + +static void WritePar(t_FmPcd *p_FmPcd, uint32_t par) +{ + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + WRITE_UINT32(p_FmPcdPlcrRegs->fmpl_par, par); + + while(GET_UINT32(p_FmPcdPlcrRegs->fmpl_par) & FM_PCD_PLCR_PAR_GO) ; + +} + +/*********************************************/ +/*............Policer Exception..............*/ +/*********************************************/ +static void PcdPlcrException(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, mask, force; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + event = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_evr); + mask = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ier); + + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr); + if(force & event) + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_ifr, force & ~event); + + + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_evr, event); + + if(event & FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE); + if(event & FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE); + +} + +/* ..... */ + +static void PcdPlcrErrorException(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, force, captureReg, mask; + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + event = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eevr); + mask = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eier); + + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr); + if(force & event) + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eifr, force & ~event); + + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_eevr, event); + + if(event & FM_PCD_PLCR_DOUBLE_ECC) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC); + if(event & FM_PCD_PLCR_INIT_ENTRY_ERROR) + { + captureReg = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_upcr); + /*ASSERT_COND(captureReg & PLCR_ERR_UNINIT_CAP); + p_UnInitCapt->profileNum = (uint8_t)(captureReg & PLCR_ERR_UNINIT_NUM_MASK); + p_UnInitCapt->portId = (uint8_t)((captureReg & PLCR_ERR_UNINIT_PID_MASK) >>PLCR_ERR_UNINIT_PID_SHIFT) ; + p_UnInitCapt->absolute = (bool)(captureReg & PLCR_ERR_UNINIT_ABSOLUTE_MASK);*/ + p_FmPcd->f_FmPcdIndexedException(p_FmPcd->h_App,e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR,(uint16_t)(captureReg & PLCR_ERR_UNINIT_NUM_MASK)); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_upcr, PLCR_ERR_UNINIT_CAP); + } +} + +void FmPcdPlcrUpatePointedOwner(t_Handle h_FmPcd, uint16_t absoluteProfileId, bool add) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + if(add) + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].pointedOwners++; + else + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].pointedOwners--; +} + +uint32_t FmPcdPlcrGetPointedOwners(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + return p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].pointedOwners; +} +uint32_t FmPcdPlcrGetRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + return p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction; +} + +t_Error FmPcdPlcrAllocProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdIpcPlcrAllocParams ipcPlcrParams; + t_Error err = E_OK; + uint16_t base; + uint16_t swPortIndex = 0; + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + if(!numOfProfiles) + return E_OK; + + memset(&ipcPlcrParams, 0, sizeof(ipcPlcrParams)); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + /* Alloc resources using IPC messaging */ + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + ipcPlcrParams.num = numOfProfiles; + ipcPlcrParams.hardwarePortId = hardwarePortId; + msg.msgId = FM_PCD_ALLOC_PROFILES; + memcpy(msg.msgBody, &ipcPlcrParams, sizeof(ipcPlcrParams)); + replyLength = sizeof(uint32_t) + sizeof(uint16_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(ipcPlcrParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err,NO_MSG); + if (replyLength != sizeof(uint32_t) + sizeof(uint16_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + if((t_Error)reply.error != E_OK) + RETURN_ERROR(MAJOR, (t_Error)reply.error, ("PLCR profiles allocation failed")); + + memcpy((uint8_t*)&base, reply.replyBody, sizeof(uint16_t)); + } + else /* master */ + { + err = PlcrAllocProfiles(p_FmPcd, hardwarePortId, numOfProfiles, &base); + if(err) + RETURN_ERROR(MAJOR, err,NO_MSG); + } + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles = numOfProfiles; + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase = base; + + return E_OK; +} + +t_Error FmPcdPlcrFreeProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdIpcPlcrAllocParams ipcPlcrParams; + t_Error err = E_OK; + uint16_t swPortIndex = 0; + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + /* Alloc resources using IPC messaging */ + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + ipcPlcrParams.num = p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles; + ipcPlcrParams.hardwarePortId = hardwarePortId; + ipcPlcrParams.plcrProfilesBase = p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase; + msg.msgId = FM_PCD_FREE_PROFILES; + memcpy(msg.msgBody, &ipcPlcrParams, sizeof(ipcPlcrParams)); + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(ipcPlcrParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err,NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + if ((t_Error)reply.error != E_OK) + RETURN_ERROR(MINOR, (t_Error)reply.error, ("PLCR Free Profiles failed")); + } + else /* master */ + { + err = PlcrFreeProfiles(p_FmPcd, hardwarePortId, p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles, p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase); + if(err) + RETURN_ERROR(MAJOR, err,NO_MSG); + } + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles = 0; + p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase = 0; + + return E_OK; +} + +bool FmPcdPlcrIsProfileValid(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr; + + return p_FmPcdPlcr->profiles[absoluteProfileId].valid; +} + +t_Error PlcrAllocProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles, uint16_t *p_Base) +{ + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + uint32_t profilesFound, log2Num, tmpReg32; + uint32_t intFlags; + uint16_t first, i; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + if(!numOfProfiles) + return E_OK; + + ASSERT_COND(hardwarePortId); + + if (numOfProfiles>FM_PCD_PLCR_NUM_ENTRIES) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles is too big.")); + + if (!POWER_OF_2(numOfProfiles)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numProfiles must be a power of 2.")); + + intFlags = FmPcdLock(p_FmPcd); + + if(GET_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1]) & FM_PCD_PLCR_PMR_V) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("The requesting port has already an allocated profiles window.")); + } + + first = 0; + profilesFound = 0; + for(i=0;ip_FmPcdPlcr->profiles[i].profilesMng.allocated) + { + profilesFound++; + i++; + if(profilesFound == numOfProfiles) + break; + } + else + { + profilesFound = 0; + /* advance i to the next aligned address */ + first = i = (uint8_t)(first + numOfProfiles); + } + } + if(profilesFound == numOfProfiles) + { + for(i = first; ip_FmPcdPlcr->profiles[i].profilesMng.allocated = TRUE; + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = hardwarePortId; + } + } + else + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MINOR, E_FULL, ("No profiles.")); + } + + /**********************FMPL_PMRx******************/ + LOG2((uint64_t)numOfProfiles, log2Num); + tmpReg32 = first; + tmpReg32 |= log2Num << 16; + tmpReg32 |= FM_PCD_PLCR_PMR_V; + WRITE_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1], tmpReg32); + + *p_Base = first; + + FmPcdUnlock(p_FmPcd, intFlags); + + return E_OK; +} + +t_Error PlcrAllocSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds) +{ + uint32_t profilesFound; + uint16_t i, k=0; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + if(!numOfProfiles) + return E_OK; + + if (numOfProfiles>FM_PCD_PLCR_NUM_ENTRIES) + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("numProfiles is too big.")); + + profilesFound = 0; + for(i=0;ip_FmPcdPlcr->profiles[i].profilesMng.allocated) + { + profilesFound++; + profilesIds[k] = i; + k++; + if(profilesFound == numOfProfiles) + break; + } + } + if(profilesFound != numOfProfiles) + RETURN_ERROR(MAJOR, E_INVALID_STATE,NO_MSG); + for(i = 0;ip_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated = TRUE; + p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.ownerId = 0; + } + + return E_OK; +} + +t_Error PlcrFreeProfiles(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles, uint16_t base) +{ + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + uint16_t i; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + WRITE_UINT32(p_Regs->fmpl_pmr[hardwarePortId-1], 0); + + for(i = base; ip_FmPcdPlcr->profiles[i].profilesMng.ownerId == hardwarePortId); + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated); + + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.allocated = FALSE; + p_FmPcd->p_FmPcdPlcr->profiles[i].profilesMng.ownerId = 0; + } + + return E_OK; +} + +void PlcrFreeSharedProfiles(t_FmPcd *p_FmPcd, uint16_t numOfProfiles, uint16_t *profilesIds) +{ + uint16_t i; + + SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE); + + ASSERT_COND(FmIsMaster(p_FmPcd->h_Fm)); + for(i=0;ip_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated); + p_FmPcd->p_FmPcdPlcr->profiles[profilesIds[i]].profilesMng.allocated = FALSE; + } +} + +void PlcrEnable(t_FmPcd *p_FmPcd) +{ + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + + WRITE_UINT32(p_Regs->fmpl_gcr, GET_UINT32(p_Regs->fmpl_gcr) | FM_PCD_PLCR_GCR_EN); +} + +void PlcrDisable(t_FmPcd *p_FmPcd) +{ + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + + WRITE_UINT32(p_Regs->fmpl_gcr, GET_UINT32(p_Regs->fmpl_gcr) & ~FM_PCD_PLCR_GCR_EN); +} + +t_Error FM_PCD_SetPlcrStatistics(t_Handle h_FmPcd, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + + if(!FmIsMaster(p_FmPcd->h_Fm)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetPlcrStatistics - guest mode!")); + + tmpReg32 = GET_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr); + if(enable) + tmpReg32 |= FM_PCD_PLCR_GCR_STEN; + else + tmpReg32 &= ~FM_PCD_PLCR_GCR_STEN; + + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_gcr, tmpReg32); + return E_OK; +} + +t_Error FM_PCD_ConfigPlcrAutoRefreshMode(t_Handle h_FmPcd, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + + if(!FmIsMaster(p_FmPcd->h_Fm)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigPlcrAutoRefreshMode - guest mode!")); + + p_FmPcd->p_FmPcdDriverParam->plcrAutoRefresh = enable; + + return E_OK; +} + + +t_Error FmPcdPlcrBuildProfile(t_Handle h_FmPcd, t_FmPcdPlcrProfileParams *p_Profile, t_FmPcdPlcrInterModuleProfileRegs *p_PlcrRegs) +{ + + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_Error err = E_OK; + uint32_t pemode, gnia, ynia, rnia; + +/* Set G, Y, R Nia */ + err = SetProfileNia(p_FmPcd, p_Profile->nextEngineOnGreen, &(p_Profile->paramsOnGreen), &gnia); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + err = SetProfileNia(p_FmPcd, p_Profile->nextEngineOnYellow, &(p_Profile->paramsOnYellow), &ynia); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + err = SetProfileNia(p_FmPcd, p_Profile->nextEngineOnRed, &(p_Profile->paramsOnRed), &rnia); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + +/* Mode fmpl_pemode */ + pemode = FM_PCD_PLCR_PEMODE_PI; + + switch (p_Profile->algSelection) + { + case e_FM_PCD_PLCR_PASS_THROUGH: + p_PlcrRegs->fmpl_pecir = 0; + p_PlcrRegs->fmpl_pecbs = 0; + p_PlcrRegs->fmpl_pepepir_eir = 0; + p_PlcrRegs->fmpl_pepbs_ebs = 0; + p_PlcrRegs->fmpl_pelts = 0; + p_PlcrRegs->fmpl_pects = 0; + p_PlcrRegs->fmpl_pepts_ets = 0; + pemode &= ~FM_PCD_PLCR_PEMODE_ALG_MASK; + switch (p_Profile->colorMode) + { + case e_FM_PCD_PLCR_COLOR_BLIND: + pemode |= FM_PCD_PLCR_PEMODE_CBLND; + switch (p_Profile->color.dfltColor) + { + case e_FM_PCD_PLCR_GREEN: + pemode &= ~FM_PCD_PLCR_PEMODE_DEFC_MASK; + break; + case e_FM_PCD_PLCR_YELLOW: + pemode |= FM_PCD_PLCR_PEMODE_DEFC_Y; + break; + case e_FM_PCD_PLCR_RED: + pemode |= FM_PCD_PLCR_PEMODE_DEFC_R; + break; + case e_FM_PCD_PLCR_OVERRIDE: + pemode |= FM_PCD_PLCR_PEMODE_DEFC_OVERRIDE; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + break; + case e_FM_PCD_PLCR_COLOR_AWARE: + pemode &= ~FM_PCD_PLCR_PEMODE_CBLND; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + + case e_FM_PCD_PLCR_RFC_2698: + /* Select algorithm MODE[ALG] = "01" */ + pemode |= FM_PCD_PLCR_PEMODE_ALG_RFC2698; + if (p_Profile->nonPassthroughAlgParams.comittedInfoRate > p_Profile->nonPassthroughAlgParams.peakOrAccessiveInfoRate) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("in RFC2698 Peak rate must be equal or larger than comittedInfoRate.")); + goto cont_rfc; + case e_FM_PCD_PLCR_RFC_4115: + /* Select algorithm MODE[ALG] = "10" */ + pemode |= FM_PCD_PLCR_PEMODE_ALG_RFC4115; +cont_rfc: + /* Select Color-Blind / Color-Aware operation (MODE[CBLND]) */ + switch (p_Profile->colorMode) + { + case e_FM_PCD_PLCR_COLOR_BLIND: + pemode |= FM_PCD_PLCR_PEMODE_CBLND; + break; + case e_FM_PCD_PLCR_COLOR_AWARE: + pemode &= ~FM_PCD_PLCR_PEMODE_CBLND; + /*In color aware more select override color interpretation (MODE[OVCLR]) */ + switch (p_Profile->color.override) + { + case e_FM_PCD_PLCR_GREEN: + pemode &= ~FM_PCD_PLCR_PEMODE_OVCLR_MASK; + break; + case e_FM_PCD_PLCR_YELLOW: + pemode |= FM_PCD_PLCR_PEMODE_OVCLR_Y; + break; + case e_FM_PCD_PLCR_RED: + pemode |= FM_PCD_PLCR_PEMODE_OVCLR_R; + break; + case e_FM_PCD_PLCR_OVERRIDE: + pemode |= FM_PCD_PLCR_PEMODE_OVCLR_G_NC; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + /* Select Measurement Unit Mode to BYTE or PACKET (MODE[PKT]) */ + switch (p_Profile->nonPassthroughAlgParams.rateMode) + { + case e_FM_PCD_PLCR_BYTE_MODE : + pemode &= ~FM_PCD_PLCR_PEMODE_PKT; + switch (p_Profile->nonPassthroughAlgParams.byteModeParams.frameLengthSelection) + { + case e_FM_PCD_PLCR_L2_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_FLS_L2; + break; + case e_FM_PCD_PLCR_L3_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_FLS_L3; + break; + case e_FM_PCD_PLCR_L4_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_FLS_L4; + break; + case e_FM_PCD_PLCR_FULL_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_FLS_FULL; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + switch (p_Profile->nonPassthroughAlgParams.byteModeParams.rollBackFrameSelection) + { + case e_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN: + pemode &= ~FM_PCD_PLCR_PEMODE_RBFLS; + break; + case e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN: + pemode |= FM_PCD_PLCR_PEMODE_RBFLS; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + break; + case e_FM_PCD_PLCR_PACKET_MODE : + pemode |= FM_PCD_PLCR_PEMODE_PKT; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + /* Select timeStamp floating point position (MODE[FPP]) to fit the actual traffic rates. For PACKET + mode with low traffic rates move the fixed point to the left to increase fraction accuracy. For BYTE + mode with high traffic rates move the fixed point to the right to increase integer accuracy. */ + + /* Configure Traffic Parameters*/ + { + uint32_t cir=0, cbs=0, pir_eir=0, pbs_ebs=0, fpp=0; + + calcRates(h_FmPcd, &p_Profile->nonPassthroughAlgParams, &cir, &cbs, &pir_eir, &pbs_ebs, &fpp); + + /* Set Committed Information Rate (CIR) */ + p_PlcrRegs->fmpl_pecir = cir; + /* Set Committed Burst Size (CBS). */ + p_PlcrRegs->fmpl_pecbs = cbs; + /* Set Peak Information Rate (PIR_EIR used as PIR) */ + p_PlcrRegs->fmpl_pepepir_eir = pir_eir; + /* Set Peak Burst Size (PBS_EBS used as PBS) */ + p_PlcrRegs->fmpl_pepbs_ebs = pbs_ebs; + + /* Initialize the Metering Buckets to be full (write them with 0xFFFFFFFF. */ + /* Peak Rate Token Bucket Size (PTS_ETS used as PTS) */ + p_PlcrRegs->fmpl_pepts_ets = 0xFFFFFFFF; + /* Committed Rate Token Bucket Size (CTS) */ + p_PlcrRegs->fmpl_pects = 0xFFFFFFFF; + + /* Set the FPP based on calculation */ + pemode |= (fpp << FM_PCD_PLCR_PEMODE_FPP_SHIFT); + } + break; /* FM_PCD_PLCR_PEMODE_ALG_RFC2698 , FM_PCD_PLCR_PEMODE_ALG_RFC4115 */ + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + p_PlcrRegs->fmpl_pemode = pemode; + + p_PlcrRegs->fmpl_pegnia = gnia; + p_PlcrRegs->fmpl_peynia = ynia; + p_PlcrRegs->fmpl_pernia = rnia; + + /* Zero Counters */ + p_PlcrRegs->fmpl_pegpc = 0; + p_PlcrRegs->fmpl_peypc = 0; + p_PlcrRegs->fmpl_perpc = 0; + p_PlcrRegs->fmpl_perypc = 0; + p_PlcrRegs->fmpl_perrpc = 0; + + return E_OK; +} + +void FmPcdPlcrValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(!p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid = TRUE; +} + +void FmPcdPlcrInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid = FALSE; +} + +t_Handle PlcrConfig(t_FmPcd *p_FmPcd, t_FmPcdParams *p_FmPcdParams) +{ + t_FmPcdPlcr *p_FmPcdPlcr; + /*uint8_t i=0;*/ + + UNUSED(p_FmPcd); + UNUSED(p_FmPcdParams); + + p_FmPcdPlcr = (t_FmPcdPlcr *) XX_Malloc(sizeof(t_FmPcdPlcr)); + if (!p_FmPcdPlcr) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Policer structure allocation FAILED")); + return NULL; + } + memset(p_FmPcdPlcr, 0, sizeof(t_FmPcdPlcr)); + if(p_FmPcd->guestId == NCSW_MASTER_ID) + { + p_FmPcdPlcr->p_FmPcdPlcrRegs = (t_FmPcdPlcrRegs *)UINT_TO_PTR(FmGetPcdPlcrBaseAddr(p_FmPcdParams->h_Fm)); + p_FmPcd->p_FmPcdDriverParam->plcrAutoRefresh = DEFAULT_plcrAutoRefresh; + p_FmPcd->exceptions |= (DEFAULT_fmPcdPlcrExceptions | DEFAULT_fmPcdPlcrErrorExceptions); + } + + p_FmPcdPlcr->numOfSharedProfiles = DEFAULT_numOfSharedPlcrProfiles; + + return p_FmPcdPlcr; +} + +t_Error PlcrInit(t_FmPcd *p_FmPcd) +{ + t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam; + t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr; + uint32_t tmpReg32 = 0; + t_Error err = E_OK; + t_FmPcdPlcrRegs *p_Regs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + t_FmPcdIpcMsg msg; + uint32_t replyLength; + t_FmPcdIpcReply reply; + + if ((p_FmPcd->guestId != NCSW_MASTER_ID) && + (p_FmPcdPlcr->numOfSharedProfiles)) + { + int i, j, index = 0; + uint32_t walking1Mask = 0x80000000; + uint32_t sharedProfilesMask[FM_PCD_PLCR_NUM_ENTRIES/32]; + + memset(sharedProfilesMask, 0, FM_PCD_PLCR_NUM_ENTRIES/32 * sizeof(uint32_t)); + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_ALLOC_SHARED_PROFILES; + memcpy(msg.msgBody, (uint8_t *)&p_FmPcdPlcr->numOfSharedProfiles, sizeof(uint16_t)); + replyLength = sizeof(uint32_t) + sizeof(sharedProfilesMask); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId)+ sizeof(p_FmPcdPlcr->numOfSharedProfiles), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err,NO_MSG); + if (replyLength != (sizeof(uint32_t) + sizeof(sharedProfilesMask))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + memcpy(sharedProfilesMask, reply.replyBody, sizeof(sharedProfilesMask)); + /* translate 8 regs of 32 bits masks into an array of up to 256 indexes. */ + for(i = 0; ip_FmPcdPlcr->sharedProfilesIds[index++] = (uint16_t)(i*32+j); + walking1Mask >>= 1; + } + walking1Mask = 0x80000000; + } + } + return (t_Error)reply.error; + } + + if(p_FmPcdPlcr->numOfSharedProfiles) + { + err = PlcrAllocSharedProfiles(p_FmPcd, p_FmPcdPlcr->numOfSharedProfiles, p_FmPcd->p_FmPcdPlcr->sharedProfilesIds); + if(err) + RETURN_ERROR(MAJOR, err,NO_MSG); + } + + /**********************FMPL_GCR******************/ + tmpReg32 = 0; + tmpReg32 |= FM_PCD_PLCR_GCR_STEN; + if(p_Param->plcrAutoRefresh) + tmpReg32 |= FM_PCD_PLCR_GCR_DAR; + tmpReg32 |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME; + + WRITE_UINT32(p_Regs->fmpl_gcr, tmpReg32); + /**********************FMPL_GCR******************/ + + /**********************FMPL_EEVR******************/ + WRITE_UINT32(p_Regs->fmpl_eevr, (FM_PCD_PLCR_DOUBLE_ECC | FM_PCD_PLCR_INIT_ENTRY_ERROR)); + /**********************FMPL_EEVR******************/ + /**********************FMPL_EIER******************/ + tmpReg32 = 0; + if(p_FmPcd->exceptions & FM_PCD_EX_PLCR_DOUBLE_ECC) + { + FmEnableRamsEcc(p_FmPcd->h_Fm); + tmpReg32 |= FM_PCD_PLCR_DOUBLE_ECC; + } + if(p_FmPcd->exceptions & FM_PCD_EX_PLCR_INIT_ENTRY_ERROR) + tmpReg32 |= FM_PCD_PLCR_INIT_ENTRY_ERROR; + WRITE_UINT32(p_Regs->fmpl_eier, tmpReg32); + /**********************FMPL_EIER******************/ + + /**********************FMPL_EVR******************/ + WRITE_UINT32(p_Regs->fmpl_evr, (FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE | FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE)); + /**********************FMPL_EVR******************/ + /**********************FMPL_IER******************/ + tmpReg32 = 0; + if(p_FmPcd->exceptions & FM_PCD_EX_PLCR_PRAM_SELF_INIT_COMPLETE) + tmpReg32 |= FM_PCD_PLCR_PRAM_SELF_INIT_COMPLETE; + if(p_FmPcd->exceptions & FM_PCD_EX_PLCR_ATOMIC_ACTION_COMPLETE ) + tmpReg32 |= FM_PCD_PLCR_ATOMIC_ACTION_COMPLETE; + WRITE_UINT32(p_Regs->fmpl_ier, tmpReg32); + /**********************FMPL_IER******************/ + + /* register even if no interrupts enabled, to allow future enablement */ + FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_ERR, PcdPlcrErrorException, p_FmPcd); + FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_NORMAL, PcdPlcrException, p_FmPcd); + + /* driver initializes one DFLT profile at the last entry*/ + /**********************FMPL_DPMR******************/ + tmpReg32 = 0; + WRITE_UINT32(p_Regs->fmpl_dpmr, tmpReg32); + p_FmPcd->p_FmPcdPlcr->profiles[0].profilesMng.allocated = TRUE; + + return E_OK; +} + +t_Error PlcrFree(t_FmPcd *p_FmPcd) +{ + t_Error err; + t_FmPcdIpcSharedPlcrAllocParams ipcSharedPlcrParams; + t_FmPcdIpcMsg msg; + + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_ERR); + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PLCR, 0, e_FM_INTR_TYPE_NORMAL); + + if(p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles) + { + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + int i; + memset(ipcSharedPlcrParams.sharedProfilesMask, 0, sizeof(ipcSharedPlcrParams.sharedProfilesMask)); + /* Free resources using IPC messaging */ + ipcSharedPlcrParams.num = p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles; + + /* translate the allocated profile id's to a 32bit * 8regs mask */ + for(i = 0;ip_FmPcdPlcr->numOfSharedProfiles;i++) + ipcSharedPlcrParams.sharedProfilesMask[p_FmPcd->p_FmPcdPlcr->sharedProfilesIds[i]/32] |= (0x80000000 >> (p_FmPcd->p_FmPcdPlcr->sharedProfilesIds[i] % 32)); + + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_FREE_SHARED_PROFILES; + memcpy(msg.msgBody, &ipcSharedPlcrParams, sizeof(ipcSharedPlcrParams)); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(ipcSharedPlcrParams), + NULL, + NULL, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err,NO_MSG); + } + /* else + PlcrFreeSharedProfiles(p_FmPcd, p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles, p_FmPcd->p_FmPcdPlcr->sharedProfilesIds);*/ + } + return E_OK; +} + +t_Error FmPcdPlcrGetAbsoluteProfileId(t_Handle h_FmPcd, + e_FmPcdProfileTypeSelection profileType, + t_Handle h_FmPort, + uint16_t relativeProfile, + uint16_t *p_AbsoluteId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr; + uint8_t i; + + switch (profileType) + { + case e_FM_PCD_PLCR_PORT_PRIVATE: + /* get port PCD id from port handle */ + for(i=0;ip_FmPcdPlcr->portsMapping[i].h_FmPort == h_FmPort) + break; + if (i == FM_MAX_NUM_OF_PORTS) + RETURN_ERROR(MAJOR, E_INVALID_STATE , ("Invalid port handle.")); + + if(!p_FmPcd->p_FmPcdPlcr->portsMapping[i].numOfProfiles) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Port has no allocated profiles")); + if(relativeProfile >= p_FmPcd->p_FmPcdPlcr->portsMapping[i].numOfProfiles) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Profile id is out of range")); + *p_AbsoluteId = (uint16_t)(p_FmPcd->p_FmPcdPlcr->portsMapping[i].profilesBase + relativeProfile); + break; + case e_FM_PCD_PLCR_SHARED: + if(relativeProfile >= p_FmPcdPlcr->numOfSharedProfiles) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION , ("Profile id is out of range")); + *p_AbsoluteId = (uint16_t)(p_FmPcdPlcr->sharedProfilesIds[relativeProfile]); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Invalid policer profile type")); + } + return E_OK; +} + +uint16_t FmPcdPlcrGetPortProfilesBase(t_Handle h_FmPcd, uint8_t hardwarePortId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint16_t swPortIndex = 0; + + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + return p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].profilesBase; +} + +uint16_t FmPcdPlcrGetPortNumOfProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint16_t swPortIndex = 0; + + HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId); + + return p_FmPcd->p_FmPcdPlcr->portsMapping[swPortIndex].numOfProfiles; + +} +uint32_t FmPcdPlcrBuildWritePlcrActionReg(uint16_t absoluteProfileId) +{ + return (uint32_t)(FM_PCD_PLCR_PAR_GO | + ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT)); +} + +uint32_t FmPcdPlcrBuildWritePlcrActionRegs(uint16_t absoluteProfileId) +{ + return (uint32_t)(FM_PCD_PLCR_PAR_GO | + ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT) | + FM_PCD_PLCR_PAR_PWSEL_MASK); +} + +bool FmPcdPlcrHwProfileIsValid(uint32_t profileModeReg) +{ + + if(profileModeReg & FM_PCD_PLCR_PEMODE_PI) + return TRUE; + else + return FALSE; +} + +uint32_t FmPcdPlcrBuildReadPlcrActionReg(uint16_t absoluteProfileId) +{ + return (uint32_t)(FM_PCD_PLCR_PAR_GO | + FM_PCD_PLCR_PAR_R | + ((uint32_t)absoluteProfileId << FM_PCD_PLCR_PAR_PNUM_SHIFT) | + FM_PCD_PLCR_PAR_PWSEL_MASK); +} + +uint32_t FmPcdPlcrBuildCounterProfileReg(e_FmPcdPlcrProfileCounters counter) +{ + switch(counter) + { + case(e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER): + return FM_PCD_PLCR_PAR_PWSEL_PEGPC; + case(e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER): + return FM_PCD_PLCR_PAR_PWSEL_PEYPC; + case(e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER) : + return FM_PCD_PLCR_PAR_PWSEL_PERPC; + case(e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER) : + return FM_PCD_PLCR_PAR_PWSEL_PERYPC; + case(e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER) : + return FM_PCD_PLCR_PAR_PWSEL_PERRPC; + default: + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + return 0; + } +} + +uint32_t FmPcdPlcrBuildNiaProfileReg(bool green, bool yellow, bool red) +{ + + uint32_t tmpReg32 = 0; + + if(green) + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEGNIA; + if(yellow) + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEYNIA; + if(red) + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PERNIA; + + return tmpReg32; +} + +void FmPcdPlcrUpdateRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId, uint32_t requiredAction) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].valid); + + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].requiredAction = requiredAction; +} + +t_Error FmPcdPlcrProfileTryLock(t_Handle h_FmPcd, uint16_t profileId, bool intr) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + bool ans; + if (intr) + ans = TRY_LOCK(NULL, &p_FmPcd->p_FmPcdPlcr->profiles[profileId].lock); + else + ans = TRY_LOCK(p_FmPcd->h_Spinlock, &p_FmPcd->p_FmPcdPlcr->profiles[profileId].lock); + if (ans) + return E_OK; + return ERROR_CODE(E_BUSY); +} + +void FmPcdPlcrReleaseProfileLock(t_Handle h_FmPcd, uint16_t profileId) +{ + RELEASE_LOCK(((t_FmPcd*)h_FmPcd)->p_FmPcdPlcr->profiles[profileId].lock); +} + +/**************************************************/ +/*............Policer API.........................*/ +/**************************************************/ + +t_Handle FM_PCD_PlcrSetProfile(t_Handle h_FmPcd, + t_FmPcdPlcrProfileParams *p_Profile) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs; + t_FmPcdPlcrInterModuleProfileRegs plcrProfileReg; + uint32_t intFlags; + uint16_t absoluteProfileId; + t_Error err = E_OK; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, NULL); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE, NULL); + + if (p_FmPcd->h_Hc) + return FmHcPcdPlcrSetProfile(p_FmPcd->h_Hc, p_Profile); + + p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + SANITY_CHECK_RETURN_VALUE(p_FmPcdPlcrRegs, E_INVALID_HANDLE, NULL); + + if (p_Profile->modify) + { + absoluteProfileId = (uint16_t)(PTR_TO_UINT(p_Profile->id.h_Profile)-1); + if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big ")); + return NULL; + } + if (FmPcdPlcrProfileTryLock(p_FmPcd, absoluteProfileId, FALSE)) + return NULL; + } + else + { + intFlags = FmPcdLock(p_FmPcd); + err = FmPcdPlcrGetAbsoluteProfileId(h_FmPcd, + p_Profile->id.newParams.profileType, + p_Profile->id.newParams.h_FmPort, + p_Profile->id.newParams.relativeProfileId, + &absoluteProfileId); + if (absoluteProfileId >= FM_PCD_PLCR_NUM_ENTRIES) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big ")); + return NULL; + } + if(err) + { + FmPcdUnlock(p_FmPcd, intFlags); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + err = FmPcdPlcrProfileTryLock(p_FmPcd, absoluteProfileId, TRUE); + FmPcdUnlock(p_FmPcd, intFlags); + if (err) + return NULL; + } + + /* if no override, check first that this profile is unused */ + if(!p_Profile->modify) + { + /* read specified profile into profile registers */ + tmpReg32 = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId); + intFlags = FmPcdLock(p_FmPcd); + WritePar(p_FmPcd, tmpReg32); + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pemode); + FmPcdUnlock(p_FmPcd, intFlags); + if (tmpReg32 & FM_PCD_PLCR_PEMODE_PI) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].lock); + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Policer Profile is already used")); + return NULL; + } + } + + memset(&plcrProfileReg, 0, sizeof(t_FmPcdPlcrInterModuleProfileRegs)); + + err = FmPcdPlcrBuildProfile(h_FmPcd, p_Profile, &plcrProfileReg); + if(err) + { + RELEASE_LOCK(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].lock); + REPORT_ERROR(MAJOR, err, NO_MSG); + return NULL; + } + + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].nextEngineOnGreen = p_Profile->nextEngineOnGreen; + memcpy(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].paramsOnGreen, &(p_Profile->paramsOnGreen), sizeof(u_FmPcdPlcrNextEngineParams)); + + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].nextEngineOnYellow = p_Profile->nextEngineOnYellow; + memcpy(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].paramsOnYellow, &(p_Profile->paramsOnYellow), sizeof(u_FmPcdPlcrNextEngineParams)); + + p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].nextEngineOnRed = p_Profile->nextEngineOnRed; + memcpy(&p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].paramsOnRed, &(p_Profile->paramsOnRed), sizeof(u_FmPcdPlcrNextEngineParams)); + + intFlags = FmPcdLock(p_FmPcd); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pemode , plcrProfileReg.fmpl_pemode); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia , plcrProfileReg.fmpl_pegnia); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia , plcrProfileReg.fmpl_peynia); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia , plcrProfileReg.fmpl_pernia); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pecir , plcrProfileReg.fmpl_pecir); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pecbs , plcrProfileReg.fmpl_pecbs); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepepir_eir,plcrProfileReg.fmpl_pepepir_eir); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepbs_ebs,plcrProfileReg.fmpl_pepbs_ebs); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pelts , plcrProfileReg.fmpl_pelts); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pects , plcrProfileReg.fmpl_pects); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pepts_ets,plcrProfileReg.fmpl_pepts_ets); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc , plcrProfileReg.fmpl_pegpc); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc , plcrProfileReg.fmpl_peypc); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc , plcrProfileReg.fmpl_perpc); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc , plcrProfileReg.fmpl_perypc); + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc , plcrProfileReg.fmpl_perrpc); + + tmpReg32 = FmPcdPlcrBuildWritePlcrActionRegs(absoluteProfileId); + WritePar(p_FmPcd, tmpReg32); + + FmPcdUnlock(p_FmPcd, intFlags); + + if (!p_Profile->modify) + FmPcdPlcrValidateProfileSw(p_FmPcd,absoluteProfileId); + + RELEASE_LOCK(p_FmPcd->p_FmPcdPlcr->profiles[absoluteProfileId].lock); + + return UINT_TO_PTR((uint64_t)absoluteProfileId+1); +} + +t_Error FM_PCD_PlcrDeleteProfile(t_Handle h_FmPcd, t_Handle h_Profile) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint16_t profileIndx = (uint16_t)(PTR_TO_UINT(h_Profile)-1); + uint32_t tmpReg32, intFlags; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((profileIndx < FM_PCD_PLCR_NUM_ENTRIES), E_INVALID_SELECTION); + + if (p_FmPcd->h_Hc) + return FmHcPcdPlcrDeleteProfile(p_FmPcd->h_Hc, h_Profile); + + FmPcdPlcrInvalidateProfileSw(p_FmPcd,profileIndx); + + intFlags = FmPcdLock(p_FmPcd); + WRITE_UINT32(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->profileRegs.fmpl_pemode, ~FM_PCD_PLCR_PEMODE_PI); + + tmpReg32 = FmPcdPlcrBuildWritePlcrActionRegs(profileIndx); + WritePar(p_FmPcd, tmpReg32); + FmPcdUnlock(p_FmPcd, intFlags); + + return E_OK; +} + +/* ......... */ +/***************************************************/ +/*............Policer Profile Counter..............*/ +/***************************************************/ +uint32_t FM_PCD_PlcrGetProfileCounter(t_Handle h_FmPcd, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint16_t profileIndx = (uint16_t)(PTR_TO_UINT(h_Profile)-1); + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs; + uint32_t intFlags, counterVal = 0; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE, 0); + SANITY_CHECK_RETURN_VALUE(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE, 0); + + if (p_FmPcd->h_Hc) + return FmHcPcdPlcrGetProfileCounter(p_FmPcd->h_Hc, h_Profile, counter); + + p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + SANITY_CHECK_RETURN_VALUE(p_FmPcdPlcrRegs, E_INVALID_HANDLE, 0); + + if (profileIndx >= FM_PCD_PLCR_NUM_ENTRIES) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("profileId too Big ")); + return 0; + } + intFlags = FmPcdLock(p_FmPcd); + WritePar(p_FmPcd, FmPcdPlcrBuildReadPlcrActionReg(profileIndx)); + + if(!FmPcdPlcrHwProfileIsValid(GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pemode))) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Uninitialized profile")); + FmPcdUnlock(p_FmPcd, intFlags); + return 0; + } + + switch (counter) + { + case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER: + counterVal = (GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc)); + break; + case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER: + counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc); + break; + case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER: + counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc); + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER: + counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc); + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER: + counterVal = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc); + break; + default: + REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + break; + } + FmPcdUnlock(p_FmPcd, intFlags); + + return counterVal; +} + + +t_Error FmPcdPlcrCcGetSetParams(t_Handle h_FmPcd, uint16_t profileIndx ,uint32_t requiredAction) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdPlcr *p_FmPcdPlcr = p_FmPcd->p_FmPcdPlcr; + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs = p_FmPcdPlcr->p_FmPcdPlcrRegs; + uint32_t tmpReg32, intFlags; + + if (p_FmPcd->h_Hc) + return FmHcPcdPlcrCcGetSetParams(p_FmPcd->h_Hc, profileIndx, requiredAction); + + if (profileIndx >= FM_PCD_PLCR_NUM_ENTRIES) + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile out of range")); + + if (FmPcdPlcrProfileTryLock(p_FmPcd, profileIndx, FALSE)) + RETURN_ERROR(MAJOR, E_INVALID_STATE,("Lock on PP FAILED")); + + intFlags = FmPcdLock(p_FmPcd); + WritePar(p_FmPcd, FmPcdPlcrBuildReadPlcrActionReg(profileIndx)); + + if(!FmPcdPlcrHwProfileIsValid(GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pemode))) + { + FmPcdUnlock(p_FmPcd, intFlags); + RELEASE_LOCK(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].lock); + RETURN_ERROR(MAJOR, E_INVALID_VALUE,("Policer profile is not valid")); + } + + ASSERT_COND(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].valid); + + if(!p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].pointedOwners || + !(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].requiredAction & requiredAction)) + { + if(requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) + { + if((p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnGreen!= e_FM_PCD_DONE) || + (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnYellow!= e_FM_PCD_DONE) || + (p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].nextEngineOnRed!= e_FM_PCD_DONE)) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR (MAJOR, E_OK, ("In this case the next engine can be e_FM_PCD_DONE")); + } + + if(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnGreen.action == e_FM_PCD_ENQ_FRAME) + { + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia); + if(!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegnia, tmpReg32); + tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx); + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEGNIA; + WritePar(p_FmPcd, tmpReg32); + } + + if(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnYellow.action == e_FM_PCD_ENQ_FRAME) + { + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia); + if(!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peynia, tmpReg32); + tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx); + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PEYNIA; + WritePar(p_FmPcd, tmpReg32); + } + + if(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].paramsOnRed.action == e_FM_PCD_ENQ_FRAME) + { + tmpReg32 = GET_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia); + if(!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) + { + FmPcdUnlock(p_FmPcd, intFlags); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); + } + tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pernia, tmpReg32); + tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx); + tmpReg32 |= FM_PCD_PLCR_PAR_PWSEL_PERNIA; + WritePar(p_FmPcd, tmpReg32); + } + } + } + FmPcdUnlock(p_FmPcd, intFlags); + + p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].pointedOwners += 1; + p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].requiredAction |= requiredAction; + + RELEASE_LOCK(p_FmPcd->p_FmPcdPlcr->profiles[profileIndx].lock); + + return E_OK; +} + +t_Error FM_PCD_PlcrSetProfileCounter(t_Handle h_FmPcd, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint16_t profileIndx = (uint16_t)(PTR_TO_UINT(h_Profile)-1); + t_FmPcdPlcrRegs *p_FmPcdPlcrRegs; + uint32_t tmpReg32, intFlags; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + + if (p_FmPcd->h_Hc) + return FmHcPcdPlcrSetProfileCounter(p_FmPcd->h_Hc, h_Profile, counter, value); + + p_FmPcdPlcrRegs = p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs; + SANITY_CHECK_RETURN_ERROR(p_FmPcdPlcrRegs, E_INVALID_HANDLE); + + intFlags = FmPcdLock(p_FmPcd); + switch (counter) + { + case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_pegpc, value); + break; + case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_peypc, value); + break; + case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perpc, value); + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perypc ,value); + break; + case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER: + WRITE_UINT32(p_FmPcdPlcrRegs->profileRegs.fmpl_perrpc ,value); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); + } + + /* Activate the atomic write action by writing FMPL_PAR with: GO=1, RW=1, PSI=0, PNUM = + * Profile Number, PWSEL=0xFFFF (select all words). + */ + tmpReg32 = FmPcdPlcrBuildWritePlcrActionReg(profileIndx); + tmpReg32 |= FmPcdPlcrBuildCounterProfileReg(counter); + WritePar(p_FmPcd, tmpReg32); + FmPcdUnlock(p_FmPcd, intFlags); + + return E_OK; +} + +t_Error FM_PCD_ConfigPlcrNumOfSharedProfiles(t_Handle h_FmPcd, uint16_t numOfSharedPlcrProfiles) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + + p_FmPcd->p_FmPcdPlcr->numOfSharedProfiles = numOfSharedPlcrProfiles; + + return E_OK; +} + + +/* ... */ + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_PlcrDumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i = 0; + t_FmPcdIpcMsg msg; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_PLCR_DUMP_REGS; + return XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId), + NULL, + NULL, + NULL, + NULL); + } + else + { + DUMP_SUBTITLE(("\n")); + DUMP_TITLE(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, ("FmPcdPlcrRegs Regs")); + + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_gcr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_gsr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_evr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_ier); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_ifr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_eevr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_eier); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_eifr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_rpcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_ypcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_rrpcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_rypcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_tpcnt); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_flmcnt); + + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_serc); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_upcr); + DUMP_VAR(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs,fmpl_dpmr); + + + DUMP_TITLE(&p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_pmr, ("fmpl_pmr")); + DUMP_SUBSTRUCT_ARRAY(i, 63) + { + DUMP_MEMORY(&p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->fmpl_pmr[i], sizeof(uint32_t)); + } + + return E_OK; + } +} + +t_Error FM_PCD_PlcrProfileDumpRegs(t_Handle h_FmPcd, t_Handle h_Profile) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdPlcrInterModuleProfileRegs *p_ProfilesRegs; + uint32_t tmpReg, intFlags; + uint16_t profileIndx = (uint16_t)(PTR_TO_UINT(h_Profile)-1); + t_FmPcdIpcMsg msg; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPlcr, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_PLCR_PROFILE_DUMP_REGS; + memcpy(msg.msgBody, (uint8_t *)&h_Profile, sizeof(uint32_t)); + return XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(uint32_t), + NULL, + NULL, + NULL, + NULL); + } + else + { + DUMP_SUBTITLE(("\n")); + DUMP_TITLE(p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs, ("FmPcdPlcrRegs Profile Regs")); + + p_ProfilesRegs = &p_FmPcd->p_FmPcdPlcr->p_FmPcdPlcrRegs->profileRegs; + + tmpReg = FmPcdPlcrBuildReadPlcrActionReg((uint16_t)profileIndx); + intFlags = FmPcdLock(p_FmPcd); + WritePar(p_FmPcd, tmpReg); + + DUMP_TITLE(p_ProfilesRegs, ("Profile %d regs", profileIndx)); + + DUMP_VAR(p_ProfilesRegs, fmpl_pemode); + DUMP_VAR(p_ProfilesRegs, fmpl_pegnia); + DUMP_VAR(p_ProfilesRegs, fmpl_peynia); + DUMP_VAR(p_ProfilesRegs, fmpl_pernia); + DUMP_VAR(p_ProfilesRegs, fmpl_pecir); + DUMP_VAR(p_ProfilesRegs, fmpl_pecbs); + DUMP_VAR(p_ProfilesRegs, fmpl_pepepir_eir); + DUMP_VAR(p_ProfilesRegs, fmpl_pepbs_ebs); + DUMP_VAR(p_ProfilesRegs, fmpl_pelts); + DUMP_VAR(p_ProfilesRegs, fmpl_pects); + DUMP_VAR(p_ProfilesRegs, fmpl_pepts_ets); + DUMP_VAR(p_ProfilesRegs, fmpl_pegpc); + DUMP_VAR(p_ProfilesRegs, fmpl_peypc); + DUMP_VAR(p_ProfilesRegs, fmpl_perpc); + DUMP_VAR(p_ProfilesRegs, fmpl_perypc); + DUMP_VAR(p_ProfilesRegs, fmpl_perrpc); + FmPcdUnlock(p_FmPcd, intFlags); + + return E_OK; + } +} +#endif /* (defined(DEBUG_ERRORS) && ... */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_prs.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Pcd/fm_prs.c @@ -0,0 +1,517 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_pcd.c + + @Description FM PCD ... +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "debug_ext.h" +#include "net_ext.h" + +#include "fm_common.h" +#include "fm_pcd.h" +#include "fm_pcd_ipc.h" + + +t_Handle PrsConfig(t_FmPcd *p_FmPcd,t_FmPcdParams *p_FmPcdParams) +{ + t_FmPcdPrs *p_FmPcdPrs; + uintptr_t baseAddr; + + UNUSED(p_FmPcd); + UNUSED(p_FmPcdParams); + + p_FmPcdPrs = (t_FmPcdPrs *) XX_Malloc(sizeof(t_FmPcdPrs)); + if (!p_FmPcdPrs) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Parser structure allocation FAILED")); + return NULL; + } + memset(p_FmPcdPrs, 0, sizeof(t_FmPcdPrs)); + + if (p_FmPcd->guestId == NCSW_MASTER_ID) + { + baseAddr = FmGetPcdPrsBaseAddr(p_FmPcdParams->h_Fm); + p_FmPcdPrs->p_SwPrsCode = (uint32_t *)UINT_TO_PTR(baseAddr); + p_FmPcdPrs->p_FmPcdPrsRegs = (t_FmPcdPrsRegs *)UINT_TO_PTR(baseAddr + PRS_REGS_OFFSET); + } + + p_FmPcdPrs->fmPcdPrsPortIdStatistics = 0; + p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit = DEFAULT_prsMaxParseCycleLimit; + p_FmPcd->exceptions |= (DEFAULT_fmPcdPrsErrorExceptions | DEFAULT_fmPcdPrsExceptions); + + return p_FmPcdPrs; +} + +static void PcdPrsErrorException(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, mask, force; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + event = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->perr); + mask = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->perer); + + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->perfr); + if(force & event) + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->perfr, force & ~event); + + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->perr, event); + + DBG(TRACE, ("parser error - 0x%08x\n",event)); + + if(event & FM_PCD_PRS_DOUBLE_ECC) + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC); +} + +static void PcdPrsException(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + uint32_t event, force; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + event = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevr); + event &= GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pever); + + ASSERT_COND(event & FM_PCD_PRS_SINGLE_ECC); + + DBG(TRACE, ("parser event - 0x%08x\n",event)); + + /* clear the forced events */ + force = GET_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevfr); + if(force & event) + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevfr, force & ~event); + + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->pevr, event); + + p_FmPcd->f_Exception(p_FmPcd->h_App,e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC); +} + +static uint32_t GetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + int i; + t_FmPcdPrsLabelParams *p_Label; + + SANITY_CHECK_RETURN_VALUE(p_FmPcd, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE, 0); + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + ASSERT_COND(p_FmPcd->p_FmPcdPrs->currLabel < FM_PCD_PRS_NUM_OF_LABELS); + + for (i=0; i < p_FmPcd->p_FmPcdPrs->currLabel; i++) + { + p_Label = &p_FmPcd->p_FmPcdPrs->labelsTable[i]; + + if ((hdr == p_Label->hdr) && (indexPerHdr == p_Label->indexPerHdr)) + return p_Label->instructionOffset; + } + + REPORT_ERROR(MAJOR, E_NOT_FOUND, ("Sw Parser attachment Not found")); + return (uint32_t)ILLEGAL_BASE; +} + +t_Error PrsInit(t_FmPcd *p_FmPcd) +{ + t_FmPcdDriverParam *p_Param = p_FmPcd->p_FmPcdDriverParam; + t_FmPcdPrsRegs *p_Regs = p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + uint32_t tmpReg; + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + return E_OK; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + +#ifdef FM_PRS_MEM_ERRATA_FMAN_SW003 + { + uint32_t i; + uint32_t regsToGlobalOffset = 0x840; + uint32_t firstPortToGlobalOffset = 0x45800; + uint64_t globalAddr = PTR_TO_UINT(p_Regs) - regsToGlobalOffset; + uint32_t firstPortAddr = (uint32_t)(globalAddr - (uint64_t)firstPortToGlobalOffset); + uint32_t portSize = 0x1000; + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPcd->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + /* clear all parser memory */ + IOMemSet32(UINT_TO_PTR(globalAddr), 0x00000000, 0x800); + for(i = 0;i<16;i++) + IOMemSet32(UINT_TO_PTR(firstPortAddr+i*portSize), (uint8_t)0x00000000, (uint32_t)0x80); + } + } +#endif /* FM_PRS_MEM_ERRATA_FMAN_SW003 */ + + /**********************RPCLIM******************/ + WRITE_UINT32(p_Regs->rpclim, (uint32_t)p_Param->prsMaxParseCycleLimit); + /**********************FMPL_RPCLIM******************/ + + /* register even if no interrupts enabled, to allow future enablement */ + FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR, PcdPrsErrorException, p_FmPcd); + + /* register even if no interrupts enabled, to allow future enablement */ + FmRegisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL, PcdPrsException, p_FmPcd); + + /**********************PEVR******************/ + WRITE_UINT32(p_Regs->pevr, (FM_PCD_PRS_SINGLE_ECC | FM_PCD_PRS_PORT_IDLE_STS) ); + /**********************PEVR******************/ + + /**********************PEVER******************/ + if(p_FmPcd->exceptions & FM_PCD_EX_PRS_SINGLE_ECC) + { + FmEnableRamsEcc(p_FmPcd->h_Fm); + WRITE_UINT32(p_Regs->pever, FM_PCD_PRS_SINGLE_ECC); + } + else + WRITE_UINT32(p_Regs->pever, 0); + /**********************PEVER******************/ + + /**********************PERR******************/ + WRITE_UINT32(p_Regs->perr, FM_PCD_PRS_DOUBLE_ECC); + + /**********************PERR******************/ + + /**********************PERER******************/ + tmpReg = 0; + if(p_FmPcd->exceptions & FM_PCD_EX_PRS_DOUBLE_ECC) + { + FmEnableRamsEcc(p_FmPcd->h_Fm); + tmpReg |= FM_PCD_PRS_DOUBLE_ECC; + } + WRITE_UINT32(p_Regs->perer, tmpReg); + /**********************PERER******************/ + + /**********************PPCS******************/ + WRITE_UINT32(p_Regs->ppsc, p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics); + /**********************PPCS******************/ + +#ifdef FM_PRS_L4_SHELL_ERRATA_FMANb + { + uint32_t i, j; + t_FmRevisionInfo revInfo; + uint8_t swPrsL4Patch[] = SW_PRS_L4_PATCH; + + FM_GetRevision(p_FmPcd->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + /* load sw parser L4 patch */ + for(i=0;ip_FmPcdPrs->p_SwPrsCode+ FM_PCD_PRS_SW_OFFSET/4 + i), tmpReg); + } + p_FmPcd->p_FmPcdPrs->p_CurrSwPrs = FM_PCD_PRS_SW_OFFSET/4 + p_FmPcd->p_FmPcdPrs->p_SwPrsCode+sizeof(swPrsL4Patch)/4; + } + } +#endif /* FM_PRS_L4_SHELL_ERRATA_FMANb */ + + return E_OK; +} + +void PrsFree(t_FmPcd *p_FmPcd ) +{ + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_ERR); + /* register even if no interrupts enabled, to allow future enablement */ + FmUnregisterIntr(p_FmPcd->h_Fm, e_FM_MOD_PRS, 0, e_FM_INTR_TYPE_NORMAL); +} + +void PrsEnable(t_FmPcd *p_FmPcd ) +{ + t_FmPcdPrsRegs *p_Regs = p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + WRITE_UINT32(p_Regs->rpimac, GET_UINT32(p_Regs->rpimac) | FM_PCD_PRS_RPIMAC_EN); +} + +void PrsDisable(t_FmPcd *p_FmPcd ) +{ + t_FmPcdPrsRegs *p_Regs = p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs; + + ASSERT_COND(p_FmPcd->guestId == NCSW_MASTER_ID); + WRITE_UINT32(p_Regs->rpimac, GET_UINT32(p_Regs->rpimac) & ~FM_PCD_PRS_RPIMAC_EN); +} + +t_Error PrsIncludePortInStatistics(t_FmPcd *p_FmPcd, uint8_t hardwarePortId, bool include) +{ + uint32_t bitMask = 0; + uint8_t prsPortId; + + SANITY_CHECK_RETURN_ERROR((hardwarePortId >=1 && hardwarePortId <= 16), E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE); + + GET_FM_PCD_PRS_PORT_ID(prsPortId, hardwarePortId); + GET_FM_PCD_INDEX_FLAG(bitMask, prsPortId); + + if(include) + p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics |= bitMask; + else + p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics &= ~bitMask; + + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->ppsc, p_FmPcd->p_FmPcdPrs->fmPcdPrsPortIdStatistics); + + return E_OK; +} + +t_Error FmPcdPrsIncludePortInStatistics(t_Handle h_FmPcd, uint8_t hardwarePortId, bool include) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_FmPcdIpcPrsIncludePort prsIncludePortParams; + t_FmPcdIpcMsg msg; + t_Error err; + + SANITY_CHECK_RETURN_ERROR((hardwarePortId >=1 && hardwarePortId <= 16), E_INVALID_VALUE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + prsIncludePortParams.hardwarePortId = hardwarePortId; + prsIncludePortParams.include = include; + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_PRS_INC_PORT_STATS; + memcpy(msg.msgBody, &prsIncludePortParams, sizeof(prsIncludePortParams)); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(prsIncludePortParams), + NULL, + NULL, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + return E_OK; + } + return PrsIncludePortInStatistics(p_FmPcd, hardwarePortId, include); +} + +uint32_t FmPcdGetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr) +{ + t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; + t_Error err = E_OK; + t_FmPcdIpcSwPrsLable labelParams; + t_FmPcdIpcMsg msg; + uint32_t prsOffset = 0; + t_FmPcdIpcReply reply; + uint32_t replyLength; + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + memset(&reply, 0, sizeof(reply)); + memset(&msg, 0, sizeof(msg)); + labelParams.enumHdr = (uint32_t)hdr; + labelParams.indexPerHdr = indexPerHdr; + msg.msgId = FM_PCD_GET_SW_PRS_OFFSET; + memcpy(msg.msgBody, &labelParams, sizeof(labelParams)); + replyLength = sizeof(uint32_t) + sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(labelParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if(replyLength != sizeof(uint32_t) + sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + memcpy((uint8_t*)&prsOffset, reply.replyBody, sizeof(uint32_t)); + return prsOffset; + } + + return GetSwPrsOffset(h_FmPcd, hdr, indexPerHdr); +} + +void FM_PCD_SetPrsStatistics(t_Handle h_FmPcd, bool enable) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_SetPrsStatistics - guest mode!")); + return; + } + if(enable) + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->ppsc, FM_PCD_PRS_PPSC_ALL_PORTS); + else + WRITE_UINT32(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs->ppsc, 0); + +} + +t_Error FM_PCD_PrsLoadSw(t_Handle h_FmPcd, t_FmPcdPrsSwParams *p_SwPrs) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + uint32_t *p_LoadTarget, tmpReg; + int i, j; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_SwPrs, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->enabled, E_INVALID_HANDLE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_PrsLoadSw - guest mode!")); + + if(!p_SwPrs->override) + { + if(p_FmPcd->p_FmPcdPrs->p_CurrSwPrs > p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("SW parser base must be larger than current loaded code")); + } + if(p_SwPrs->size > FM_PCD_SW_PRS_SIZE - FM_PCD_PRS_SW_TAIL_SIZE - p_SwPrs->base*2) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_SwPrs->size may not be larger than MAX_SW_PRS_CODE_SIZE")); + if(p_SwPrs->size % 4) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_SwPrs->size must be divisible by 4")); + + /* save sw parser labels */ + if(p_SwPrs->override) + p_FmPcd->p_FmPcdPrs->currLabel = 0; + if(p_FmPcd->p_FmPcdPrs->currLabel+ p_SwPrs->numOfLabels > FM_PCD_PRS_NUM_OF_LABELS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceeded number of labels allowed ")); + memcpy(&p_FmPcd->p_FmPcdPrs->labelsTable[p_FmPcd->p_FmPcdPrs->currLabel], p_SwPrs->labelsTable, p_SwPrs->numOfLabels*sizeof(t_FmPcdPrsLabelParams)); + p_FmPcd->p_FmPcdPrs->currLabel += p_SwPrs->numOfLabels; + /* load sw parser code */ + p_LoadTarget = p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4; + for(i=0;isize/4;i++) + { + tmpReg = 0; + for(j =0;j<4;j++) + { + tmpReg <<= 8; + tmpReg |= *(p_SwPrs->p_Code+i*4+j); + } + WRITE_UINT32(*(p_LoadTarget + i), tmpReg); + } + p_FmPcd->p_FmPcdPrs->p_CurrSwPrs = p_FmPcd->p_FmPcdPrs->p_SwPrsCode + p_SwPrs->base*2/4 + p_SwPrs->size/4; + + /* copy data parameters */ + for(i=0;ip_FmPcdPrs->p_SwPrsCode+PRS_SW_DATA/4+i), p_SwPrs->swPrsDataParams[i]); + + + /* Clear last 4 bytes */ + WRITE_UINT32(*(p_FmPcd->p_FmPcdPrs->p_SwPrsCode+(PRS_SW_DATA-FM_PCD_PRS_SW_TAIL_SIZE)/4), 0); + + return E_OK; +} + +t_Error FM_PCD_ConfigPrsMaxCycleLimit(t_Handle h_FmPcd,uint16_t value) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdDriverParam, E_INVALID_HANDLE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("FM_PCD_ConfigPrsMaxCycleLimit - guest mode!")); + + p_FmPcd->p_FmPcdDriverParam->prsMaxParseCycleLimit = value; + + return E_OK; +} + + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PCD_PrsDumpRegs(t_Handle h_FmPcd) +{ + t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; + t_FmPcdIpcMsg msg; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPcd->p_FmPcdPrs, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPcd->p_FmPcdDriverParam, E_INVALID_STATE); + + if(p_FmPcd->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_PCD_PRS_DUMP_REGS; + return XX_IpcSendMessage(p_FmPcd->h_IpcSession, + (uint8_t*)&msg, + sizeof(msg.msgId), + NULL, + NULL, + NULL, + NULL); + } + DUMP_SUBTITLE(("\n")); + DUMP_TITLE(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs, ("FmPcdPrsRegs Regs")); + + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,rpclim); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,rpimac); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,pmeec); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,pevr); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,pever); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,pevfr); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,perr); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,perer); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,perfr); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,ppsc); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,pds); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,l2rrs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,l3rrs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,l4rrs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,srrs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,l2rres); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,l3rres); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,l4rres); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,srres); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,spcs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,spscs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,hxscs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,mrcs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,mwcs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,mrscs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,mwscs); + DUMP_VAR(p_FmPcd->p_FmPcdPrs->p_FmPcdPrsRegs,fcscs); + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Port/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Port/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +NCSW_FM_INC = $(srctree)/drivers/net/dpa/NetCommSw/Peripherals/FM/inc + +EXTRA_CFLAGS += -I$(NCSW_FM_INC) + +obj-y += fsl-ncsw-Pcd.o + +fsl-ncsw-Pcd-objs := fm_port.o fm_port_im.o + + + + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Port/fm_port.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Port/fm_port.c @@ -0,0 +1,5060 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_port.c + + @Description FM driver routines implementation. +*//***************************************************************************/ +#include "error_ext.h" +#include "std_ext.h" +#include "string_ext.h" +#include "sprint_ext.h" +#include "debug_ext.h" +#include "fm_pcd_ext.h" + +#include "fm_port.h" + + +/****************************************/ +/* static functions */ +/****************************************/ + +static t_Error CheckInitParameters(t_FmPort *p_FmPort) +{ + t_FmPortDriverParam *p_Params = p_FmPort->p_FmPortDriverParam; + t_Error ans = E_OK; + uint32_t unusedMask; + uint8_t i; + uint8_t j; + bool found; + + if (p_FmPort->imEn) + { + if ((ans = FmPortImCheckInitParameters(p_FmPort)) != E_OK) + return ERROR_CODE(ans); + } + else + { + /****************************************/ + /* Rx only */ + /****************************************/ + if((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + /* external buffer pools */ + if(!p_Params->extBufPools.numOfPoolsUsed) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("extBufPools.numOfPoolsUsed=0. At least one buffer pool must be defined")); + + if(p_Params->extBufPools.numOfPoolsUsed > FM_PORT_MAX_NUM_OF_EXT_POOLS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("numOfPoolsUsed can't be larger than %d", FM_PORT_MAX_NUM_OF_EXT_POOLS)); + + for(i=0;iextBufPools.numOfPoolsUsed;i++) + { + if(p_Params->extBufPools.extBufPool[i].id >= BM_MAX_NUM_OF_POOLS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("extBufPools.extBufPool[%d].id can't be larger than %d", i, BM_MAX_NUM_OF_POOLS)); + if(!p_Params->extBufPools.extBufPool[i].size) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("extBufPools.extBufPool[%d].size is 0", i)); + } + + /* backup BM pools indication is valid only for some chip deriviatives + (limited by the config routine) */ + if(p_Params->p_BackupBmPools) + { + if(p_Params->p_BackupBmPools->numOfBackupPools >= p_Params->extBufPools.numOfPoolsUsed) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_BackupBmPools must be smaller than extBufPools.numOfPoolsUsed")); + found = FALSE; + for(i = 0;ip_BackupBmPools->numOfBackupPools;i++) + for(j=0;jextBufPools.numOfPoolsUsed;j++) + if(p_Params->p_BackupBmPools->poolIds[i] == p_Params->extBufPools.extBufPool[j].id) + found = TRUE; + if (!found) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("All p_BackupBmPools.poolIds must be included in extBufPools.extBufPool[n].id")); + } + + /* up to extBufPools.numOfPoolsUsed pools may be defined */ + if(p_Params->bufPoolDepletion.numberOfPoolsModeEnable) + { + if((p_Params->bufPoolDepletion.numOfPools > p_Params->extBufPools.numOfPoolsUsed)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPools can't be larger than %d and can't be larger than numOfPoolsUsed", FM_PORT_MAX_NUM_OF_EXT_POOLS)); + + if(!p_Params->bufPoolDepletion.numOfPools) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPoolsToConsider can not be 0 when numberOfPoolsModeEnable=TRUE")); + } + /* Check that part of IC that needs copying is small enough to enter start margin */ + if(p_Params->intContext.size + p_Params->intContext.extBufOffset > p_Params->bufMargins.startMargins) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.size is larger than start margins")); + + if(p_Params->liodnOffset & ~FM_LIODN_OFFSET_MASK) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1)); +#ifdef FM_PARTITION_ARRAY + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + if(p_Params->liodnOffset >= MAX_LIODN_OFFSET) + { + p_Params->liodnOffset = (uint16_t)(p_Params->liodnOffset & (MAX_LIODN_OFFSET-1)); + DBG(WARNING, ("liodnOffset number is out of rev1 range - MSB bits cleard.")); + } + } + } +#endif /* FM_PARTITION_ARRAY */ + } + + /****************************************/ + /* Non Rx ports */ + /****************************************/ + else + { + if(p_Params->deqSubPortal >= MAX_QMI_DEQ_SUBPORTAL) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" deqSubPortal has to be in the range of 0 - %d", MAX_QMI_DEQ_SUBPORTAL)); + + /* to protect HW internal-context from overwrite */ + if((p_Params->intContext.size) && (p_Params->intContext.intContextOffset < MIN_TX_INT_OFFSET)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("non-Rx intContext.intContextOffset can't be smaller than %d", MIN_TX_INT_OFFSET)); + } + + /****************************************/ + /* Rx Or Offline Parsing */ + /****************************************/ + if((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + { + + if(!p_Params->dfltFqid) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dfltFqid must be between 1 and 2^24-1")); +#if defined(FM_CAPWAP_SUPPORT) && defined(FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004) + if(p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace % 16) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufferPrefixContent.manipExtraSpace has to be devidable by 16")); +#endif /* defined(FM_CAPWAP_SUPPORT) && ... */ + } + + /****************************************/ + /* All ports */ + /****************************************/ + /* common BMI registers values */ + /* Check that Queue Id is not larger than 2^24, and is not 0 */ + if((p_Params->errFqid & ~0x00FFFFFF) || !p_Params->errFqid) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("errFqid must be between 1 and 2^24-1")); + if(p_Params->dfltFqid & ~0x00FFFFFF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dfltFqid must be between 1 and 2^24-1")); + } + + /****************************************/ + /* Rx only */ + /****************************************/ + if((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + /* Check that divisible by 256 and not larger than 256 */ + if(p_Params->rxFifoPriElevationLevel % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("rxFifoPriElevationLevel has to be divisible by %d", BMI_FIFO_UNITS)); + if(!p_Params->rxFifoPriElevationLevel || (p_Params->rxFifoPriElevationLevel > BMI_MAX_FIFO_SIZE)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("rxFifoPriElevationLevel has to be in the range of 256 - %d", BMI_MAX_FIFO_SIZE)); + if(p_Params->rxFifoThreshold % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("rxFifoThreshold has to be divisible by %d", BMI_FIFO_UNITS)); + if(!p_Params->rxFifoThreshold ||(p_Params->rxFifoThreshold > BMI_MAX_FIFO_SIZE)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("rxFifoThreshold has to be in the range of 256 - %d", BMI_MAX_FIFO_SIZE)); + + /* Check that not larger than 16 */ + if(p_Params->cutBytesFromEnd > FRAME_END_DATA_SIZE) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("cutBytesFromEnd can't be larger than %d", FRAME_END_DATA_SIZE)); + + /* Check the margin definition */ + if(p_Params->bufMargins.startMargins > MAX_EXT_BUFFER_OFFSET) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufMargins.startMargins can't be larger than %d", MAX_EXT_BUFFER_OFFSET)); + if(p_Params->bufMargins.endMargins > MAX_EXT_BUFFER_OFFSET) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufMargins.endMargins can't be larger than %d", MAX_EXT_BUFFER_OFFSET)); + + /* extra FIFO size (allowed only to Rx ports) */ + if(p_FmPort->fifoBufs.extra % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fifoBufs.extra has to be divisible by %d", BMI_FIFO_UNITS)); + + if(p_Params->bufPoolDepletion.numberOfPoolsModeEnable && + !p_Params->bufPoolDepletion.numOfPools) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPoolsToConsider can not be 0 when numberOfPoolsModeEnable=TRUE")); +#ifdef FM_CSI_CFED_LIMIT + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + + if (revInfo.majorRev == 4) + { + /* Check that not larger than 16 */ + if(p_Params->cutBytesFromEnd + p_Params->cheksumLastBytesIgnore > FRAME_END_DATA_SIZE) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("cheksumLastBytesIgnore + cutBytesFromEnd can't be larger than %d", FRAME_END_DATA_SIZE)); + } + } +#endif /* FM_CSI_CFED_LIMIT */ + + } + + /****************************************/ + /* Non Rx ports */ + /****************************************/ + else + /* extra FIFO size (allowed only to Rx ports) */ + if(p_FmPort->fifoBufs.extra) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" No fifoBufs.extra for non Rx ports")); + + /****************************************/ + /* Rx & Tx */ + /****************************************/ + if((p_FmPort->portType == e_FM_PORT_TYPE_TX) || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) || + (p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + /* Check that not larger than 16 */ + if(p_Params->cheksumLastBytesIgnore > FRAME_END_DATA_SIZE) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("cheksumLastBytesIgnore can't be larger than %d", FRAME_END_DATA_SIZE)); + } + + /****************************************/ + /* Tx only */ + /****************************************/ + if((p_FmPort->portType == e_FM_PORT_TYPE_TX) || (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)) + { + /* Check that divisible by 256 and not larger than 256 */ + if(p_Params->txFifoMinFillLevel % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("txFifoMinFillLevel has to be divisible by %d", BMI_FIFO_UNITS)); + if(p_Params->txFifoMinFillLevel > (BMI_MAX_FIFO_SIZE - 256)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("txFifoMinFillLevel has to be in the range of 0 - %d", BMI_MAX_FIFO_SIZE)); + if(p_Params->txFifoLowComfLevel % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("txFifoLowComfLevel has to be divisible by %d", BMI_FIFO_UNITS)); + if(!p_Params->txFifoLowComfLevel || (p_Params->txFifoLowComfLevel > BMI_MAX_FIFO_SIZE)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("txFifoLowComfLevel has to be in the range of 256 - %d", BMI_MAX_FIFO_SIZE)); + + /* Check that not larger than 8 */ + if((!p_FmPort->txFifoDeqPipelineDepth) ||( p_FmPort->txFifoDeqPipelineDepth > MAX_FIFO_PIPELINE_DEPTH)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("txFifoDeqPipelineDepth can't be larger than %d", MAX_FIFO_PIPELINE_DEPTH)); + if(p_FmPort->portType == e_FM_PORT_TYPE_TX) + if(p_FmPort->txFifoDeqPipelineDepth > 2) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("txFifoDeqPipelineDepth for !G can't be larger than 2")); + } + else + /****************************************/ + /* Non Tx Ports */ + /****************************************/ + { + /* If discard override was selected , no frames may be discarded. */ + if(p_Params->frmDiscardOverride && p_Params->errorsToDiscard) + RETURN_ERROR(MAJOR, E_CONFLICT, ("errorsToDiscard is not empty, but frmDiscardOverride selected (all discarded frames to be enqueued to error queue).")); + } + + /****************************************/ + /* Rx and Offline parsing */ + /****************************************/ + if((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) + || (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + { + if(p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) + unusedMask = BMI_STATUS_OP_MASK_UNUSED; + else + unusedMask = BMI_STATUS_RX_MASK_UNUSED; + + /* Check that no common bits with BMI_STATUS_MASK_UNUSED */ + if(p_Params->errorsToDiscard & unusedMask) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("errorsToDiscard contains undefined bits")); + } + + /****************************************/ + /* All ports */ + /****************************************/ + + /* Check that divisible by 16 and not larger than 240 */ + if(p_Params->intContext.intContextOffset >MAX_INT_OFFSET) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.intContextOffset can't be larger than %d", MAX_INT_OFFSET)); + if(p_Params->intContext.intContextOffset % OFFSET_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.intContextOffset has to be divisible by %d", OFFSET_UNITS)); + + /* check that ic size+ic internal offset, does not exceed ic block size */ + if(p_Params->intContext.size + p_Params->intContext.intContextOffset > MAX_IC_SIZE) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.size + intContext.intContextOffset has to be smaller than %d", MAX_IC_SIZE)); + /* Check that divisible by 16 and not larger than 256 */ + if(p_Params->intContext.size % OFFSET_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.size has to be divisible by %d", OFFSET_UNITS)); + + /* Check that divisible by 16 and not larger than 4K */ + if(p_Params->intContext.extBufOffset > MAX_EXT_OFFSET) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.extBufOffset can't be larger than %d", MAX_EXT_OFFSET)); + if(p_Params->intContext.extBufOffset % OFFSET_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("intContext.extBufOffset has to be divisible by %d", OFFSET_UNITS)); + + /* common BMI registers values */ + if((!p_FmPort->tasks.num) || (p_FmPort->tasks.num > MAX_NUM_OF_TASKS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("tasks.num can't be larger than %d", MAX_NUM_OF_TASKS)); + if(p_FmPort->tasks.extra > MAX_NUM_OF_EXTRA_TASKS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("tasks.extra can't be larger than %d", MAX_NUM_OF_EXTRA_TASKS)); + if((!p_FmPort->openDmas.num) || (p_FmPort->openDmas.num > MAX_NUM_OF_DMAS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("openDmas.num can't be larger than %d", MAX_NUM_OF_DMAS)); + if(p_FmPort->openDmas.extra > MAX_NUM_OF_EXTRA_DMAS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("openDmas.extra can't be larger than %d", MAX_NUM_OF_EXTRA_DMAS)); + if(!p_FmPort->fifoBufs.num || (p_FmPort->fifoBufs.num > BMI_MAX_FIFO_SIZE)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fifoBufs.num has to be in the range of 256 - %d", BMI_MAX_FIFO_SIZE)); + if(p_FmPort->fifoBufs.num % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fifoBufs.num has to be divisible by %d", BMI_FIFO_UNITS)); + + return E_OK; +} + +static void FmPortDriverParamFree(t_FmPort *p_FmPort) +{ + if(p_FmPort->p_FmPortDriverParam) + { + XX_Free(p_FmPort->p_FmPortDriverParam); + p_FmPort->p_FmPortDriverParam = NULL; + } +} + +static t_Error SetExtBufferPools(t_FmPort *p_FmPort) +{ + t_FmPortExtPools *p_ExtBufPools = &p_FmPort->p_FmPortDriverParam->extBufPools; + t_FmPortBufPoolDepletion *p_BufPoolDepletion = &p_FmPort->p_FmPortDriverParam->bufPoolDepletion; + volatile uint32_t *p_ExtBufRegs; + volatile uint32_t *p_BufPoolDepletionReg; + bool rxPort; + bool found; + uint8_t orderedArray[FM_PORT_MAX_NUM_OF_EXT_POOLS]; + uint16_t sizesArray[BM_MAX_NUM_OF_POOLS]; + uint8_t count = 0; + uint8_t numOfPools; + uint16_t bufSize = 0, largestBufSize = 0; + int i=0, j=0, k=0; + uint32_t tmpReg, vector, minFifoSizeRequired=0; + + memset(&orderedArray, 0, sizeof(uint8_t) * FM_PORT_MAX_NUM_OF_EXT_POOLS); + memset(&sizesArray, 0, sizeof(uint16_t) * BM_MAX_NUM_OF_POOLS); + memcpy(&p_FmPort->extBufPools, p_ExtBufPools, sizeof(t_FmPortExtPools)); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_ExtBufRegs = p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_ebmpi; + p_BufPoolDepletionReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_mpd; + rxPort = TRUE; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_ExtBufRegs = p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_oebmpi; + p_BufPoolDepletionReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ompd; + rxPort = FALSE; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Not available for port type")); + } + + /* First we copy the external buffers pools information to an ordered local array */ + for(i=0;inumOfPoolsUsed;i++) + { + /* get pool size */ + bufSize = p_ExtBufPools->extBufPool[i].size; + + /* keep sizes in an array according to poolId for direct access */ + sizesArray[p_ExtBufPools->extBufPool[i].id] = bufSize; + + /* save poolId in an ordered array according to size */ + for (j=0;j<=i;j++) + { + /* this is the next free place in the array */ + if (j==i) + orderedArray[i] = p_ExtBufPools->extBufPool[i].id; + else + { + /* find the right place for this poolId */ + if(bufSize < sizesArray[orderedArray[j]]) + { + /* move the poolIds one place ahead to make room for this poolId */ + for(k=i;k>j;k--) + orderedArray[k] = orderedArray[k-1]; + + /* now k==j, this is the place for the new size */ + orderedArray[k] = p_ExtBufPools->extBufPool[i].id; + break; + } + } + } + } + + /* build the register value */ + + for(i=0;inumOfPoolsUsed;i++) + { + tmpReg = BMI_EXT_BUF_POOL_VALID | BMI_EXT_BUF_POOL_EN_COUNTER; + tmpReg |= ((uint32_t)orderedArray[i] << BMI_EXT_BUF_POOL_ID_SHIFT); + tmpReg |= sizesArray[orderedArray[i]]; + /* functionality available only for some deriviatives (limited by config) */ + if(p_FmPort->p_FmPortDriverParam->p_BackupBmPools) + for(j=0;jp_FmPortDriverParam->p_BackupBmPools->numOfBackupPools;j++) + if(orderedArray[i] == p_FmPort->p_FmPortDriverParam->p_BackupBmPools->poolIds[j]) + { + tmpReg |= BMI_EXT_BUF_POOL_BACKUP; + break; + } + WRITE_UINT32(*(p_ExtBufRegs+i), tmpReg); + } + + if(p_FmPort->p_FmPortDriverParam->p_BackupBmPools) + XX_Free(p_FmPort->p_FmPortDriverParam->p_BackupBmPools); + + numOfPools = (uint8_t)(rxPort ? FM_PORT_MAX_NUM_OF_EXT_POOLS:FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS); + + /* clear unused pools */ + for(i=p_ExtBufPools->numOfPoolsUsed;irxPoolsParams.largestBufSize = largestBufSize = sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed-1]]; + if((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { +#ifdef FM_FIFO_ALLOCATION_OLD_ALG + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + + if(revInfo.majorRev != 4) + { + minFifoSizeRequired = (uint32_t)(((largestBufSize % BMI_FIFO_UNITS) ? + ((largestBufSize/BMI_FIFO_UNITS + 1) * BMI_FIFO_UNITS) : + largestBufSize) + + (7*BMI_FIFO_UNITS)); + } + else +#endif /* FM_FIFO_ALLOCATION_OLD_ALG */ + { + p_FmPort->rxPoolsParams.numOfPools = p_ExtBufPools->numOfPoolsUsed; + if(p_ExtBufPools->numOfPoolsUsed == 1) + minFifoSizeRequired = 8*BMI_FIFO_UNITS; + else + { + uint16_t secondLargestBufSize = sizesArray[orderedArray[p_ExtBufPools->numOfPoolsUsed-2]]; + p_FmPort->rxPoolsParams.secondLargestBufSize = secondLargestBufSize; + minFifoSizeRequired = (uint32_t)(((secondLargestBufSize % BMI_FIFO_UNITS) ? + ((secondLargestBufSize/BMI_FIFO_UNITS + 1) * BMI_FIFO_UNITS) : + secondLargestBufSize) + + (7*BMI_FIFO_UNITS)); + } + } + if(p_FmPort->fifoBufs.num < minFifoSizeRequired) + { + p_FmPort->fifoBufs.num = minFifoSizeRequired; + DBG(INFO, ("FIFO size for Rx port enlarged to %d",minFifoSizeRequired)); + } + } + + /* check if pool size is not too big */ + /* This is a definition problem in which if the fifo for the RX port + is lower than the largest pool size the hardware will allocate scatter gather + buffers even though the frame size can fit in a single buffer. */ + if (largestBufSize > p_FmPort->fifoBufs.num) + DBG(WARNING, ("Frame larger than port Fifo size (%u) will be split to more than a single buffer (S/G) even if shorter than largest buffer size (%u)", + p_FmPort->fifoBufs.num, largestBufSize)); + + /* pool depletion */ + tmpReg = 0; + if(p_BufPoolDepletion->numberOfPoolsModeEnable) + { + /* calculate vector for number of pools depletion */ + found = FALSE; + vector = 0; + count = 0; + for(i=0;ipoolsToConsider[i]) + { + for(j=0;jnumOfPoolsUsed;j++) + { + if (i == orderedArray[j]) + { + vector |= 0x80000000 >> j; + found = TRUE; + count++; + break; + } + } + if (!found) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Pools selected for depletion are not used.")); + else + found = FALSE; + } + } + if (count < p_BufPoolDepletion->numOfPools) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufPoolDepletion.numOfPools is larger than the number of pools defined.")); + + /* configure num of pools and vector for number of pools mode */ + tmpReg |= (((uint32_t)p_BufPoolDepletion->numOfPools - 1) << BMI_POOL_DEP_NUM_OF_POOLS_SHIFT); + tmpReg |= vector; + } + + if(p_BufPoolDepletion->singlePoolModeEnable) + { + /* calculate vector for number of pools depletion */ + found = FALSE; + vector = 0; + count = 0; + for(i=0;ipoolsToConsiderForSingleMode[i]) + { + for(j=0;jnumOfPoolsUsed;j++) + { + if (i == orderedArray[j]) + { + vector |= 0x00000080 >> j; + found = TRUE; + count++; + break; + } + } + if (!found) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Pools selected for depletion are not used.")); + else + found = FALSE; + } + } + if (!count) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("No pools defined for single buffer mode pool depletion.")); + + /* configure num of pools and vector for number of pools mode */ + tmpReg |= vector; + } + + WRITE_UINT32(*p_BufPoolDepletionReg, tmpReg); + + return E_OK; +} + +static t_Error ClearPerfCnts(t_FmPort *p_FmPort) +{ + FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL, 0); + FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_QUEUE_UTIL, 0); + FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL, 0); + FM_PORT_ModifyCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL, 0); + return E_OK; +} + +static t_Error BmiRxPortInit(t_FmPort *p_FmPort) +{ + t_FmPortRxBmiRegs *p_Regs = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs; + uint32_t tmpReg; + t_FmPortDriverParam *p_Params = p_FmPort->p_FmPortDriverParam; + uint32_t errorsToEnq = 0; + t_FmPortPerformanceCnt performanceContersParams; + t_Error err; + + /* check that port is not busy */ + if (GET_UINT32(p_Regs->fmbm_rcfg) & BMI_PORT_CFG_EN) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("Port(%d,%d) is already enabled",p_FmPort->portType, p_FmPort->portId)); + + /* Set Config register */ + tmpReg = 0; + if (p_FmPort->imEn) + tmpReg |= BMI_PORT_CFG_IM; + /* No discard - all error frames go to error queue */ + else if (p_Params->frmDiscardOverride) + tmpReg |= BMI_PORT_CFG_FDOVR; + + WRITE_UINT32(p_Regs->fmbm_rcfg, tmpReg); + + /* Configure dma attributes */ + tmpReg = 0; + tmpReg |= (uint32_t)p_Params->dmaSwapData << BMI_DMA_ATTR_SWP_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaIntContextCacheAttr << BMI_DMA_ATTR_IC_CACHE_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaHeaderCacheAttr << BMI_DMA_ATTR_HDR_CACHE_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaScatterGatherCacheAttr << BMI_DMA_ATTR_SG_CACHE_SHIFT; + if(p_Params->dmaWriteOptimize) + tmpReg |= BMI_DMA_ATTR_WRITE_OPTIMIZE; + + WRITE_UINT32(p_Regs->fmbm_rda, tmpReg); + + /* Configure Rx Fifo params */ + tmpReg = 0; + tmpReg |= ((p_Params->rxFifoPriElevationLevel/BMI_FIFO_UNITS - 1) << BMI_RX_FIFO_PRI_ELEVATION_SHIFT); + tmpReg |= ((p_Params->rxFifoThreshold/BMI_FIFO_UNITS - 1) << BMI_RX_FIFO_THRESHOLD_SHIFT); + + WRITE_UINT32(p_Regs->fmbm_rfp, tmpReg); + + { +#ifdef FM_NO_THRESHOLD_REG + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if (revInfo.majorRev > 1) +#endif /* FM_NO_THRESHOLD_REG */ + /* always allow access to the extra resources */ + WRITE_UINT32(p_Regs->fmbm_reth, BMI_RX_FIFO_THRESHOLD_BC); + } + + /* frame end parameters */ + tmpReg = 0; + tmpReg |= ((uint32_t)p_Params->cheksumLastBytesIgnore << BMI_RX_FRAME_END_CS_IGNORE_SHIFT); + tmpReg |= ((uint32_t)p_Params->cutBytesFromEnd<< BMI_RX_FRAME_END_CUT_SHIFT); + + WRITE_UINT32(p_Regs->fmbm_rfed, tmpReg); + + /* IC parameters */ + tmpReg = 0; + tmpReg |= (((uint32_t)p_Params->intContext.extBufOffset/OFFSET_UNITS) << BMI_IC_TO_EXT_SHIFT); + tmpReg |= (((uint32_t)p_Params->intContext.intContextOffset/OFFSET_UNITS) << BMI_IC_FROM_INT_SHIFT); + tmpReg |= (((uint32_t)p_Params->intContext.size/OFFSET_UNITS) << BMI_IC_SIZE_SHIFT); + + WRITE_UINT32(p_Regs->fmbm_ricp, tmpReg); + + if (!p_FmPort->imEn) + { + /* check if the largest external buffer pool is large enough */ + if(p_Params->bufMargins.startMargins + MIN_EXT_BUF_SIZE + p_Params->bufMargins.endMargins > p_FmPort->rxPoolsParams.largestBufSize) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("bufMargins.startMargins (%d) + minimum buf size (64) + bufMargins.endMargins (%d) is larger than maximum external buffer size (%d)", + p_Params->bufMargins.startMargins, p_Params->bufMargins.endMargins, p_FmPort->rxPoolsParams.largestBufSize)); + + /* buffer margins */ + tmpReg = 0; + tmpReg |= (((uint32_t)p_Params->bufMargins.startMargins) << BMI_EXT_BUF_MARG_START_SHIFT); + tmpReg |= (((uint32_t)p_Params->bufMargins.endMargins) << BMI_EXT_BUF_MARG_END_SHIFT); + + WRITE_UINT32(p_Regs->fmbm_rebm, tmpReg); + } + + + if(p_FmPort->internalBufferOffset) + { + tmpReg = (uint32_t)((p_FmPort->internalBufferOffset % OFFSET_UNITS) ? + (p_FmPort->internalBufferOffset/OFFSET_UNITS + 1): + (p_FmPort->internalBufferOffset/OFFSET_UNITS)); + p_FmPort->internalBufferOffset = (uint8_t)(tmpReg * OFFSET_UNITS); + WRITE_UINT32(p_Regs->fmbm_rim, tmpReg << BMI_IM_FOF_SHIFT); + } + + /* NIA */ + if (p_FmPort->imEn) + WRITE_UINT32(p_Regs->fmbm_rfne, NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_RX); + else + { + tmpReg = 0; + if (p_Params->forwardReuseIntContext) + tmpReg |= BMI_PORT_RFNE_FRWD_RPD; + /* L3/L4 checksum verify is enabled by default. */ + /*tmpReg |= BMI_PORT_RFNE_FRWD_DCL4C;*/ + WRITE_UINT32(p_Regs->fmbm_rfne, tmpReg | NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME); + } + WRITE_UINT32(p_Regs->fmbm_rfene, NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR); + + /* command attribute */ + tmpReg = BMI_CMD_RX_MR_DEF; + if (!p_FmPort->imEn) + { + tmpReg |= BMI_CMD_ATTR_ORDER; + if(p_Params->syncReq) + tmpReg |= BMI_CMD_ATTR_SYNC; + tmpReg |= ((uint32_t)p_Params->color << BMI_CMD_ATTR_COLOR_SHIFT); + } + + WRITE_UINT32(p_Regs->fmbm_rfca, tmpReg); + + /* default queues */ + if (!p_FmPort->imEn) + { + WRITE_UINT32(p_Regs->fmbm_rfqid, p_Params->dfltFqid); + WRITE_UINT32(p_Regs->fmbm_refqid, p_Params->errFqid); + } + + /* set counters */ + WRITE_UINT32(p_Regs->fmbm_rstc, BMI_COUNTERS_EN); + + performanceContersParams.taskCompVal = (uint8_t)p_FmPort->tasks.num; + performanceContersParams.queueCompVal = 1; + performanceContersParams.dmaCompVal =(uint8_t) p_FmPort->openDmas.num; + performanceContersParams.fifoCompVal = p_FmPort->fifoBufs.num; + if((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &performanceContersParams)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + WRITE_UINT32(p_Regs->fmbm_rpc, BMI_COUNTERS_EN); + + /* error/status mask - check that if discard OV is set, no + discard is required for specific errors.*/ + WRITE_UINT32(p_Regs->fmbm_rfsdm, p_Params->errorsToDiscard); + + errorsToEnq = (RX_ERRS_TO_ENQ & ~p_Params->errorsToDiscard); + WRITE_UINT32(p_Regs->fmbm_rfsem, errorsToEnq); + +#ifdef FM_BMI_TO_RISC_ENQ_ERRATA_FMANc + if((GET_UINT32(p_Regs->fmbm_rfene) && NIA_ENG_MASK)== NIA_ENG_FM_CTL) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("NIA not supported at this stage")); +#endif /* FM_BMI_TO_RISC_ENQ_ERRATA_FMANc */ + + return E_OK; +} + +static t_Error BmiTxPortInit(t_FmPort *p_FmPort) +{ + t_FmPortTxBmiRegs *p_Regs = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs; + uint32_t tmpReg; + t_FmPortDriverParam *p_Params = p_FmPort->p_FmPortDriverParam; + /*uint32_t rateCountUnit;*/ + t_FmPortPerformanceCnt performanceContersParams; + + /* check that port is not busy */ + if (GET_UINT32(p_Regs->fmbm_tcfg) & BMI_PORT_CFG_EN) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port is already enabled")); + + tmpReg = 0; + if (p_FmPort->imEn) + tmpReg |= BMI_PORT_CFG_IM; + + WRITE_UINT32(p_Regs->fmbm_tcfg, tmpReg); + + /* Configure dma attributes */ + tmpReg = 0; + tmpReg |= (uint32_t)p_Params->dmaSwapData << BMI_DMA_ATTR_SWP_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaIntContextCacheAttr << BMI_DMA_ATTR_IC_CACHE_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaHeaderCacheAttr << BMI_DMA_ATTR_HDR_CACHE_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaScatterGatherCacheAttr << BMI_DMA_ATTR_SG_CACHE_SHIFT; + + WRITE_UINT32(p_Regs->fmbm_tda, tmpReg); + + /* Configure Tx Fifo params */ + tmpReg = 0; + tmpReg |= ((p_Params->txFifoMinFillLevel/BMI_FIFO_UNITS) << BMI_TX_FIFO_MIN_FILL_SHIFT); + tmpReg |= (((uint32_t)p_FmPort->txFifoDeqPipelineDepth - 1) << BMI_TX_FIFO_PIPELINE_DEPTH_SHIFT); + tmpReg |= ((p_Params->txFifoLowComfLevel/BMI_FIFO_UNITS - 1) << BMI_TX_LOW_COMF_SHIFT); + + WRITE_UINT32(p_Regs->fmbm_tfp, tmpReg); + + /* frame end parameters */ + tmpReg = 0; + tmpReg |= ((uint32_t)p_Params->cheksumLastBytesIgnore << BMI_TX_FRAME_END_CS_IGNORE_SHIFT); + + WRITE_UINT32(p_Regs->fmbm_tfed, tmpReg); + + if (!p_FmPort->imEn) + { + /* IC parameters */ + tmpReg = 0; + tmpReg |= (((uint32_t)p_Params->intContext.extBufOffset/OFFSET_UNITS) << BMI_IC_TO_EXT_SHIFT); + tmpReg |= (((uint32_t)p_Params->intContext.intContextOffset/OFFSET_UNITS) << BMI_IC_FROM_INT_SHIFT); + tmpReg |= (((uint32_t)p_Params->intContext.size/OFFSET_UNITS) << BMI_IC_SIZE_SHIFT); + + WRITE_UINT32(p_Regs->fmbm_ticp, tmpReg); + } + + /* NIA */ + if (p_FmPort->imEn) + { + WRITE_UINT32(p_Regs->fmbm_tfne, NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_TX); + WRITE_UINT32(p_Regs->fmbm_tfene, NIA_ENG_FM_CTL | NIA_FM_CTL_AC_IND_MODE_TX); + } + else + { + WRITE_UINT32(p_Regs->fmbm_tfne, NIA_ENG_QMI_DEQ); + WRITE_UINT32(p_Regs->fmbm_tfene, NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR); + /* The line bellow is a trick so the FM will not release the buffer + to BM nor will try to enq the frame to QM */ + if(!p_Params->dfltFqid && p_Params->dontReleaseBuf) + { + /* override fmbm_tcfqid 0 with a false non-0 value. This will force FM to + * act acording to tfene. Otherwise, if fmbm_tcfqid is 0 the FM will release + * buffers to BM regardless of fmbm_tfene + */ + WRITE_UINT32(p_Regs->fmbm_tcfqid, 0xFFFFFF); + WRITE_UINT32(p_Regs->fmbm_tfene, NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE); + } + } + + /* command attribute */ + tmpReg = BMI_CMD_TX_MR_DEF; + if (p_FmPort->imEn) + tmpReg |= BMI_CMD_MR_DEAS; + else + { + tmpReg |= BMI_CMD_ATTR_ORDER; + /* if we set syncReq, we may get stuck when HC command is running */ + /*if(p_Params->syncReq) + tmpReg |= BMI_CMD_ATTR_SYNC;*/ + tmpReg |= ((uint32_t)p_Params->color << BMI_CMD_ATTR_COLOR_SHIFT); + } + + WRITE_UINT32(p_Regs->fmbm_tfca, tmpReg); + + /* default queues */ + if (!p_FmPort->imEn) + { + if(p_Params->dfltFqid || !p_Params->dontReleaseBuf) + WRITE_UINT32(p_Regs->fmbm_tcfqid, p_Params->dfltFqid); + WRITE_UINT32(p_Regs->fmbm_tfeqid, p_Params->errFqid); + } + + /* statistics & performance counters */ + WRITE_UINT32(p_Regs->fmbm_tstc, BMI_COUNTERS_EN); + + performanceContersParams.taskCompVal = (uint8_t)p_FmPort->tasks.num; + performanceContersParams.queueCompVal = 1; + performanceContersParams.dmaCompVal = (uint8_t)p_FmPort->openDmas.num; + performanceContersParams.fifoCompVal = p_FmPort->fifoBufs.num; + FM_PORT_SetPerformanceCountersParams(p_FmPort, &performanceContersParams); + + WRITE_UINT32(p_Regs->fmbm_tpc, BMI_COUNTERS_EN); + + return E_OK; +} + +static t_Error BmiOhPortInit(t_FmPort *p_FmPort) +{ + t_FmPortOhBmiRegs *p_Regs = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs; + uint32_t tmpReg, errorsToEnq = 0; + t_FmPortDriverParam *p_Params = p_FmPort->p_FmPortDriverParam; + t_FmPortPerformanceCnt performanceContersParams; + t_Error err; + + /* check that port is not busy */ + if (GET_UINT32(p_Regs->fmbm_ocfg) & BMI_PORT_CFG_EN) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port is already enabled")); + + /* Configure dma attributes */ + tmpReg = 0; + tmpReg |= (uint32_t)p_Params->dmaSwapData << BMI_DMA_ATTR_SWP_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaIntContextCacheAttr << BMI_DMA_ATTR_IC_CACHE_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaHeaderCacheAttr << BMI_DMA_ATTR_HDR_CACHE_SHIFT; + tmpReg |= (uint32_t)p_Params->dmaScatterGatherCacheAttr << BMI_DMA_ATTR_SG_CACHE_SHIFT; + if(p_Params->dmaWriteOptimize) + tmpReg |= BMI_DMA_ATTR_WRITE_OPTIMIZE; + + WRITE_UINT32(p_Regs->fmbm_oda, tmpReg); + + /* IC parameters */ + tmpReg = 0; + tmpReg |= (((uint32_t)p_Params->intContext.extBufOffset/OFFSET_UNITS) << BMI_IC_TO_EXT_SHIFT); + tmpReg |= (((uint32_t)p_Params->intContext.intContextOffset/OFFSET_UNITS) << BMI_IC_FROM_INT_SHIFT); + tmpReg |= (((uint32_t)p_Params->intContext.size/OFFSET_UNITS) << BMI_IC_SIZE_SHIFT); + + WRITE_UINT32(p_Regs->fmbm_oicp, tmpReg); + + /* NIA */ + WRITE_UINT32(p_Regs->fmbm_ofdne, NIA_ENG_QMI_DEQ); + + if (p_FmPort->portType==e_FM_PORT_TYPE_OH_HOST_COMMAND) + WRITE_UINT32(p_Regs->fmbm_ofene, NIA_ENG_QMI_ENQ); + else + WRITE_UINT32(p_Regs->fmbm_ofene, NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR); + + /* command attribute */ + if (p_FmPort->portType==e_FM_PORT_TYPE_OH_HOST_COMMAND) + tmpReg = BMI_CMD_MR_DEAS | BMI_CMD_MR_MA; + else + tmpReg = BMI_CMD_ATTR_ORDER | BMI_CMD_MR_DEAS | BMI_CMD_MR_MA; + + if(p_Params->syncReq) + tmpReg |= BMI_CMD_ATTR_SYNC; + tmpReg |= ((uint32_t)p_Params->color << BMI_CMD_ATTR_COLOR_SHIFT); + WRITE_UINT32(p_Regs->fmbm_ofca, tmpReg); + + /* No discard - all error frames go to error queue */ + if (p_Params->frmDiscardOverride) + tmpReg = BMI_PORT_CFG_FDOVR; + else + tmpReg = 0; + WRITE_UINT32(p_Regs->fmbm_ocfg, tmpReg); + + if(p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) + { + WRITE_UINT32(p_Regs->fmbm_ofsdm, p_Params->errorsToDiscard); + + errorsToEnq = (OP_ERRS_TO_ENQ & ~p_Params->errorsToDiscard); + WRITE_UINT32(p_Regs->fmbm_ofsem, errorsToEnq); + + /* NIA */ + WRITE_UINT32(p_Regs->fmbm_ofne, NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME); + { +#ifdef FM_NO_OP_OBSERVED_POOLS + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 4) && (p_Params->enBufPoolDepletion)) +#endif /* FM_NO_OP_OBSERVED_POOLS */ + { + /* define external buffer pools */ + err = SetExtBufferPools(p_FmPort); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + } + else + /* NIA */ + WRITE_UINT32(p_Regs->fmbm_ofne, NIA_ENG_FM_CTL | NIA_FM_CTL_AC_HC); + + /* default queues */ + WRITE_UINT32(p_Regs->fmbm_ofqid, p_Params->dfltFqid); + WRITE_UINT32(p_Regs->fmbm_oefqid, p_Params->errFqid); + + if(p_FmPort->internalBufferOffset) + { + tmpReg = (uint32_t)((p_FmPort->internalBufferOffset % OFFSET_UNITS) ? + (p_FmPort->internalBufferOffset/OFFSET_UNITS + 1): + (p_FmPort->internalBufferOffset/OFFSET_UNITS)); + p_FmPort->internalBufferOffset = (uint8_t)(tmpReg * OFFSET_UNITS); + WRITE_UINT32(p_Regs->fmbm_oim, tmpReg << BMI_IM_FOF_SHIFT); + } + /* statistics & performance counters */ + WRITE_UINT32(p_Regs->fmbm_ostc, BMI_COUNTERS_EN); + + performanceContersParams.taskCompVal = (uint8_t)p_FmPort->tasks.num; + performanceContersParams.queueCompVal = 0; + performanceContersParams.dmaCompVal = (uint8_t)p_FmPort->openDmas.num; + performanceContersParams.fifoCompVal = p_FmPort->fifoBufs.num; + FM_PORT_SetPerformanceCountersParams(p_FmPort, &performanceContersParams); + + WRITE_UINT32(p_Regs->fmbm_opc, BMI_COUNTERS_EN); + + return E_OK; +} + +static t_Error QmiInit(t_FmPort *p_FmPort) +{ + t_FmPortDriverParam *p_Params = NULL; + uint32_t tmpReg; + + p_Params = p_FmPort->p_FmPortDriverParam; + + /* check that port is not busy */ + if(((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX)) && + (GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc) & QMI_PORT_CFG_EN)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port is already enabled")); + + /* enable & clear counters */ + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc, QMI_PORT_CFG_EN_COUNTERS); + + /* The following is done for non-Rx ports only */ + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + { + if((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) || + (p_FmPort->portType == e_FM_PORT_TYPE_TX)) + { + /* define dequeue NIA */ + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn, NIA_ENG_BMI | NIA_BMI_AC_TX); + /* define enqueue NIA */ + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen, NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE); + } + else /* for HC & OP */ + { + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn, NIA_ENG_BMI | NIA_BMI_AC_FETCH); + /* define enqueue NIA */ + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen, NIA_ENG_BMI | NIA_BMI_AC_RELEASE); + } + + /* configure dequeue */ + tmpReg = 0; + if(p_Params->deqHighPriority) + tmpReg |= QMI_DEQ_CFG_PRI; + + switch(p_Params->deqType) + { + case(e_FM_PORT_DEQ_TYPE1): + tmpReg |= QMI_DEQ_CFG_TYPE1; + break; + case(e_FM_PORT_DEQ_TYPE2): + tmpReg |= QMI_DEQ_CFG_TYPE2; + break; + case(e_FM_PORT_DEQ_TYPE3): + tmpReg |= QMI_DEQ_CFG_TYPE3; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid dequeue type")); + } + +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + switch(p_Params->deqPrefetchOption) + { + case(e_FM_PORT_DEQ_NO_PREFETCH): + /* Do nothing - QMI_DEQ_CFG_PREFETCH_WAITING_TNUM | QMI_DEQ_CFG_PREFETCH_1_FRAME = 0 */ + break; + case(e_FM_PORT_DEQ_PARTIAL_PREFETCH): + tmpReg |= QMI_DEQ_CFG_PREFETCH_WAITING_TNUM | QMI_DEQ_CFG_PREFETCH_3_FRAMES; + break; + case(e_FM_PORT_DEQ_FULL_PREFETCH): + tmpReg |= QMI_DEQ_CFG_PREFETCH_NO_TNUM | QMI_DEQ_CFG_PREFETCH_3_FRAMES; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid dequeue prefetch option")); + } +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + + tmpReg |= p_Params->deqByteCnt; + tmpReg |= (uint32_t)p_Params->deqSubPortal << QMI_DEQ_CFG_SUBPORTAL_SHIFT; + + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndc, tmpReg); + } + else /* rx port */ + /* define enqueue NIA */ + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen, NIA_ENG_BMI | NIA_BMI_AC_RELEASE); + + return E_OK; +} + +static t_Error BmiRxPortCheckAndGetCounterPtr(t_FmPort *p_FmPort, e_FmPortCounters counter, volatile uint32_t **p_Ptr) +{ + t_FmPortRxBmiRegs *p_BmiRegs = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs; + + /* check that counters are enabled */ + switch(counter) + { + case(e_FM_PORT_COUNTERS_CYCLE): + case(e_FM_PORT_COUNTERS_TASK_UTIL): + case(e_FM_PORT_COUNTERS_QUEUE_UTIL): + case(e_FM_PORT_COUNTERS_DMA_UTIL): + case(e_FM_PORT_COUNTERS_FIFO_UTIL): + case(e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION): + /* performance counters - may be read when disabled */ + break; + case(e_FM_PORT_COUNTERS_FRAME): + case(e_FM_PORT_COUNTERS_DISCARD_FRAME): + case(e_FM_PORT_COUNTERS_RX_BAD_FRAME): + case(e_FM_PORT_COUNTERS_RX_LARGE_FRAME): + case(e_FM_PORT_COUNTERS_RX_FILTER_FRAME): + case(e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR): + case(e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD): + case(e_FM_PORT_COUNTERS_DEALLOC_BUF): + if(!(GET_UINT32(p_BmiRegs->fmbm_rstc) & BMI_COUNTERS_EN)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for Rx ports")); + } + + /* Set counter */ + switch(counter) + { + case(e_FM_PORT_COUNTERS_CYCLE): + *p_Ptr = &p_BmiRegs->fmbm_rccn; + break; + case(e_FM_PORT_COUNTERS_TASK_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_rtuc; + break; + case(e_FM_PORT_COUNTERS_QUEUE_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_rrquc; + break; + case(e_FM_PORT_COUNTERS_DMA_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_rduc; + break; + case(e_FM_PORT_COUNTERS_FIFO_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_rfuc; + break; + case(e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION): + *p_Ptr = &p_BmiRegs->fmbm_rpac; + break; + case(e_FM_PORT_COUNTERS_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_rfrc; + break; + case(e_FM_PORT_COUNTERS_DISCARD_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_rfcd; + break; + case(e_FM_PORT_COUNTERS_RX_BAD_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_rfbc; + break; + case(e_FM_PORT_COUNTERS_RX_LARGE_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_rlfc; + break; + case(e_FM_PORT_COUNTERS_RX_FILTER_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_rffc; + break; + case(e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR): +#ifdef FM_PORT_COUNTERS_ERRATA_FMANg + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Requested counter is not available in rev1")); + } +#endif /* FM_PORT_COUNTERS_ERRATA_FMANg */ + *p_Ptr = &p_BmiRegs->fmbm_rfldec; + break; + case(e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD): + *p_Ptr = &p_BmiRegs->fmbm_rodc; + break; + case(e_FM_PORT_COUNTERS_DEALLOC_BUF): + *p_Ptr = &p_BmiRegs->fmbm_rbdc; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for Rx ports")); + } + + return E_OK; +} + +static t_Error BmiTxPortCheckAndGetCounterPtr(t_FmPort *p_FmPort, e_FmPortCounters counter, volatile uint32_t **p_Ptr) +{ + t_FmPortTxBmiRegs *p_BmiRegs = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs; + + /* check that counters are enabled */ + switch(counter) + { + case(e_FM_PORT_COUNTERS_CYCLE): + case(e_FM_PORT_COUNTERS_TASK_UTIL): + case(e_FM_PORT_COUNTERS_QUEUE_UTIL): + case(e_FM_PORT_COUNTERS_DMA_UTIL): + case(e_FM_PORT_COUNTERS_FIFO_UTIL): + /* performance counters - may be read when disabled */ + break; + case(e_FM_PORT_COUNTERS_FRAME): + case(e_FM_PORT_COUNTERS_DISCARD_FRAME): + case(e_FM_PORT_COUNTERS_LENGTH_ERR): + case(e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT): + case(e_FM_PORT_COUNTERS_DEALLOC_BUF): + if(!(GET_UINT32(p_BmiRegs->fmbm_tstc) & BMI_COUNTERS_EN)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for Tx ports")); + } + + /* Set counter */ + switch(counter) + { + case(e_FM_PORT_COUNTERS_CYCLE): + *p_Ptr = &p_BmiRegs->fmbm_tccn; + break; + case(e_FM_PORT_COUNTERS_TASK_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_ttuc; + break; + case(e_FM_PORT_COUNTERS_QUEUE_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_ttcquc; + break; + case(e_FM_PORT_COUNTERS_DMA_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_tduc; + break; + case(e_FM_PORT_COUNTERS_FIFO_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_tfuc; + break; + case(e_FM_PORT_COUNTERS_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_tfrc; + break; + case(e_FM_PORT_COUNTERS_DISCARD_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_tfdc; + break; + case(e_FM_PORT_COUNTERS_LENGTH_ERR): + *p_Ptr = &p_BmiRegs->fmbm_tfledc; + break; + case(e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT): + *p_Ptr = &p_BmiRegs->fmbm_tfufdc; + break; + case(e_FM_PORT_COUNTERS_DEALLOC_BUF): + *p_Ptr = &p_BmiRegs->fmbm_tbdc; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for Tx ports")); + } + + return E_OK; +} + +static t_Error BmiOhPortCheckAndGetCounterPtr(t_FmPort *p_FmPort, e_FmPortCounters counter, volatile uint32_t **p_Ptr) +{ + t_FmPortOhBmiRegs *p_BmiRegs = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs; + + /* check that counters are enabled */ + switch(counter) + { + case(e_FM_PORT_COUNTERS_CYCLE): + case(e_FM_PORT_COUNTERS_TASK_UTIL): + case(e_FM_PORT_COUNTERS_DMA_UTIL): + case(e_FM_PORT_COUNTERS_FIFO_UTIL): + /* performance counters - may be read when disabled */ + break; + case(e_FM_PORT_COUNTERS_FRAME): + case(e_FM_PORT_COUNTERS_DISCARD_FRAME): + case(e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR): + case(e_FM_PORT_COUNTERS_WRED_DISCARD): + case(e_FM_PORT_COUNTERS_LENGTH_ERR): + case(e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT): + case(e_FM_PORT_COUNTERS_DEALLOC_BUF): + if(!(GET_UINT32(p_BmiRegs->fmbm_ostc) & BMI_COUNTERS_EN)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + break; + case(e_FM_PORT_COUNTERS_RX_FILTER_FRAME): /* only valid for offline parsing */ + if(p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for Host Command ports")); + if(!(GET_UINT32(p_BmiRegs->fmbm_ostc) & BMI_COUNTERS_EN)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for O/H ports")); + } + + /* Set counter */ + switch(counter) + { + case(e_FM_PORT_COUNTERS_CYCLE): + *p_Ptr = &p_BmiRegs->fmbm_occn; + break; + case(e_FM_PORT_COUNTERS_TASK_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_otuc; + break; + case(e_FM_PORT_COUNTERS_DMA_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_oduc; + break; + case(e_FM_PORT_COUNTERS_FIFO_UTIL): + *p_Ptr = &p_BmiRegs->fmbm_ofuc; + break; + case(e_FM_PORT_COUNTERS_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_ofrc; + break; + case(e_FM_PORT_COUNTERS_DISCARD_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_ofdc; + break; + case(e_FM_PORT_COUNTERS_RX_FILTER_FRAME): + *p_Ptr = &p_BmiRegs->fmbm_offc; + break; + case(e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR): +#ifdef FM_PORT_COUNTERS_ERRATA_FMANg + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("Requested counter is not available in rev1")); + } +#endif /* FM_PORT_COUNTERS_ERRATA_FMANg */ + *p_Ptr = &p_BmiRegs->fmbm_ofldec; + break; + case(e_FM_PORT_COUNTERS_WRED_DISCARD): + *p_Ptr = &p_BmiRegs->fmbm_ofwdc; + break; + case(e_FM_PORT_COUNTERS_LENGTH_ERR): + *p_Ptr = &p_BmiRegs->fmbm_ofledc; + break; + case(e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT): + *p_Ptr = &p_BmiRegs->fmbm_ofufdc; + break; + case(e_FM_PORT_COUNTERS_DEALLOC_BUF): + *p_Ptr = &p_BmiRegs->fmbm_obdc; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for O/H ports")); + } + + return E_OK; +} + +static t_Error AdditionalPrsParams(t_FmPort *p_FmPort, t_FmPcdPrsAdditionalHdrParams *p_HdrParams, uint32_t *p_SoftSeqAttachReg) +{ + uint8_t hdrNum, Ipv4HdrNum; + u_FmPcdHdrPrsOpts *p_prsOpts; + uint32_t tmpReg = 0, tmpPrsOffset; + + if(IS_PRIVATE_HEADER(p_HdrParams->hdr) || IS_SPECIAL_HEADER(p_HdrParams->hdr)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("No additional parameters for private or special headers.")); + + if(p_HdrParams->errDisable) + tmpReg |= PRS_HDR_ERROR_DIS; + + /* Set parser options */ + if(p_HdrParams->usePrsOpts) + { + p_prsOpts = &p_HdrParams->prsOpts; + switch(p_HdrParams->hdr) + { + case(HEADER_TYPE_MPLS): + if(p_prsOpts->mplsPrsOptions.labelInterpretationEnable) + tmpReg |= PRS_HDR_MPLS_LBL_INTER_EN; + GET_PRS_HDR_NUM(hdrNum, p_prsOpts->mplsPrsOptions.nextParse); + if(hdrNum == ILLEGAL_HDR_NUM) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); + GET_PRS_HDR_NUM(Ipv4HdrNum, HEADER_TYPE_IPv4); + if(hdrNum < Ipv4HdrNum) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("Header must be equal or higher than IPv4")); + tmpReg |= ((uint32_t)hdrNum * PRS_HDR_ENTRY_SIZE) << PRS_HDR_MPLS_NEXT_HDR_SHIFT; + break; + case(HEADER_TYPE_PPPoE): + if(p_prsOpts->pppoePrsOptions.enableMTUCheck) + { +#ifdef FM_PPPOE_NO_MTU_CHECK + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Invalid parser option")); + else +#endif /* FM_PPPOE_NO_MTU_CHECK */ + tmpReg |= PRS_HDR_PPPOE_MTU_CHECK_EN; + } + break; + case(HEADER_TYPE_IPv6): + if(p_prsOpts->ipv6PrsOptions.routingHdrDisable) + tmpReg |= PRS_HDR_IPV6_ROUTE_HDR_DIS; + break; + case(HEADER_TYPE_TCP): + if(p_prsOpts->tcpPrsOptions.padIgnoreChecksum) + tmpReg |= PRS_HDR_TCP_PAD_REMOVAL; + break; + case(HEADER_TYPE_UDP): + if(p_prsOpts->udpPrsOptions.padIgnoreChecksum) + tmpReg |= PRS_HDR_TCP_PAD_REMOVAL; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid header")); + } + } + + /* set software parsing (address is devided in 2 since parser uses 2 byte access. */ + if(p_HdrParams->swPrsEnable) + { + tmpPrsOffset = FmPcdGetSwPrsOffset(p_FmPort->h_FmPcd, p_HdrParams->hdr, p_HdrParams->indexPerHdr); + if(tmpPrsOffset == ILLEGAL_BASE) + RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG); + tmpReg |= (PRS_HDR_SW_PRS_EN | tmpPrsOffset); + } + *p_SoftSeqAttachReg = tmpReg; + + return E_OK; +} + +static uint32_t GetPortSchemeBindParams(t_Handle h_FmPort, t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint32_t walking1Mask = 0x80000000, tmp; + uint8_t idx = 0; + + p_SchemeBind->netEnvId = p_FmPort->netEnvId; + p_SchemeBind->hardwarePortId = p_FmPort->hardwarePortId; + p_SchemeBind->useClsPlan = p_FmPort->useClsPlan; + p_SchemeBind->numOfSchemes = 0; + tmp = p_FmPort->schemesPerPortVector; + if(tmp) + { + while (tmp) + { + if(tmp & walking1Mask) + { + p_SchemeBind->schemesIds[p_SchemeBind->numOfSchemes] = FmPcdKgGetSchemeSwId(p_FmPort->h_FmPcd, idx); + p_SchemeBind->numOfSchemes++; + tmp &= ~walking1Mask; + } + walking1Mask >>= 1; + idx++; + } + } + + return tmp; +} + +static t_Error BuildBufferStructure(t_FmPort *p_FmPort) +{ + uint32_t tmp; + + ASSERT_COND(p_FmPort); + + /* Align start of internal context data to 16 byte */ + p_FmPort->p_FmPortDriverParam->intContext.extBufOffset = + (uint16_t)((p_FmPort->p_FmPortDriverParam->bufferPrefixContent.privDataSize & (OFFSET_UNITS-1)) ? + ((p_FmPort->p_FmPortDriverParam->bufferPrefixContent.privDataSize + OFFSET_UNITS) & ~(uint16_t)(OFFSET_UNITS-1)) : + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.privDataSize); + + /* Translate margin and intContext params to FM parameters */ +#ifdef FM_INCORRECT_CS_ERRATA_FMAN18 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + uint8_t mod = p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign % 256; + if(mod) + { + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign += (256-mod); + DBG(WARNING,("dataAlign modified to next 256 to conform with FMAN18 errata\n")); + } + } + } +#endif /* FM_INCORRECT_CS_ERRATA_FMAN18 */ + + /* Initialize with illegal value. Later we'll set legal values. */ + p_FmPort->bufferOffsets.prsResultOffset = (uint32_t)ILLEGAL_BASE; + p_FmPort->bufferOffsets.timeStampOffset = (uint32_t)ILLEGAL_BASE; + p_FmPort->bufferOffsets.hashResultOffset= (uint32_t)ILLEGAL_BASE; + p_FmPort->bufferOffsets.pcdInfoOffset = (uint32_t)ILLEGAL_BASE; +#ifdef DEBUG + p_FmPort->bufferOffsets.debugOffset = (uint32_t)ILLEGAL_BASE; +#endif /* DEBUG */ + + /* Internally the driver supports 4 options + 1. prsResult/timestamp/hashResult selection (in fact 8 options, but for simplicity we'll + relate to it as 1). + 2. All IC context (from AD) except debug. + 3. Debug information only. + 4. All IC context (from AD) including debug. + Note, that if user asks for prsResult/timestamp/hashResult and Debug, we give them (4) */ + + /* This 'if' covers options 2 & 4. We copy from beginning of context with or without debug. */ + /* If passAllOtherPCDInfo explicitly requested, or passDebugInfo+prs/ts --> we also take passAllOtherPCDInfo */ + if ((p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passAllOtherPCDInfo) +#ifdef DEBUG + || (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passDebugInfo && + (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult || + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp || + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passHashResult)) +#endif /* DEBUG */ + ) + { +#ifdef DEBUG + if(p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passDebugInfo) + { + p_FmPort->p_FmPortDriverParam->intContext.size = 240; + p_FmPort->bufferOffsets.debugOffset = + (uint32_t)(p_FmPort->p_FmPortDriverParam->intContext.extBufOffset + 128); + } + else +#endif /* DEBUG */ + p_FmPort->p_FmPortDriverParam->intContext.size = 128; /* must be aligned to 16 */ + /* Start copying data after 16 bytes (FD) from the beginning of the internal context */ + p_FmPort->p_FmPortDriverParam->intContext.intContextOffset = 16; + + if (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passAllOtherPCDInfo) + p_FmPort->bufferOffsets.pcdInfoOffset = p_FmPort->p_FmPortDriverParam->intContext.extBufOffset; + if (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult) + p_FmPort->bufferOffsets.prsResultOffset = + (uint32_t)(p_FmPort->p_FmPortDriverParam->intContext.extBufOffset + 16); + if (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp) + p_FmPort->bufferOffsets.timeStampOffset = + (uint32_t)(p_FmPort->p_FmPortDriverParam->intContext.extBufOffset + 48); + if (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passHashResult) + p_FmPort->bufferOffsets.hashResultOffset = + (uint32_t)(p_FmPort->p_FmPortDriverParam->intContext.extBufOffset + 56); + } + else + { +#ifdef DEBUG + if (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passDebugInfo) + { + /* This case covers option 3 */ + p_FmPort->p_FmPortDriverParam->intContext.size = 112; + p_FmPort->p_FmPortDriverParam->intContext.intContextOffset = 144; + p_FmPort->bufferOffsets.debugOffset = p_FmPort->p_FmPortDriverParam->intContext.extBufOffset; + } + else +#endif /* DEBUG */ + { + /* This case covers the options under 1 */ + /* Copy size must be in 16-byte granularity. */ + p_FmPort->p_FmPortDriverParam->intContext.size = + (uint16_t)((p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult ? 32 : 0) + + ((p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp || + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passHashResult) ? 16 : 0)); + + /* Align start of internal context data to 16 byte */ + p_FmPort->p_FmPortDriverParam->intContext.intContextOffset = + (uint8_t)(p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult ? 32 : + ((p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp || + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passHashResult) ? 64 : 0)); + + if(p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult) + p_FmPort->bufferOffsets.prsResultOffset = p_FmPort->p_FmPortDriverParam->intContext.extBufOffset; + if(p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp) + p_FmPort->bufferOffsets.timeStampOffset = p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult ? + (p_FmPort->p_FmPortDriverParam->intContext.extBufOffset + sizeof(t_FmPrsResult)) : + p_FmPort->p_FmPortDriverParam->intContext.extBufOffset; + if(p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passHashResult) + /* If PR is not requested, whether TS is requested or not, IC will be copied from TS */ + p_FmPort->bufferOffsets.hashResultOffset = p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult ? + (p_FmPort->p_FmPortDriverParam->intContext.extBufOffset + sizeof(t_FmPrsResult) + 8) : + p_FmPort->p_FmPortDriverParam->intContext.extBufOffset + 8; + } + } + + p_FmPort->p_FmPortDriverParam->bufMargins.startMargins = + (uint16_t)(p_FmPort->p_FmPortDriverParam->intContext.extBufOffset + + p_FmPort->p_FmPortDriverParam->intContext.size); +#ifdef FM_CAPWAP_SUPPORT + /* save extra space for manip in both external and internal buffers */ + if(p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace) + { + if((p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace + FRAG_EXTRA_SPACE) >= 256) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace + 32 can not be equal or larger to 256")); + p_FmPort->bufferOffsets.manipOffset = p_FmPort->p_FmPortDriverParam->bufMargins.startMargins; + p_FmPort->p_FmPortDriverParam->bufMargins.startMargins += (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace + FRAG_EXTRA_SPACE); + p_FmPort->p_FmPortDriverParam->internalBufferOffset = + (uint8_t)(p_FmPort->p_FmPortDriverParam->bufferPrefixContent.manipExtraSpace + FRAG_EXTRA_SPACE); + } +#endif /* FM_CAPWAP_SUPPORT */ + + /* align data start */ + tmp = (uint32_t)(p_FmPort->p_FmPortDriverParam->bufMargins.startMargins % + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign); + if (tmp) + p_FmPort->p_FmPortDriverParam->bufMargins.startMargins += (p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign-tmp); + p_FmPort->bufferOffsets.dataOffset = p_FmPort->p_FmPortDriverParam->bufMargins.startMargins; + p_FmPort->internalBufferOffset = p_FmPort->p_FmPortDriverParam->internalBufferOffset; + + return E_OK; +} + +static t_Error SetPcd(t_Handle h_FmPort, t_FmPortPcdParams *p_PcdParams) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err = E_OK; + uint32_t tmpReg; + volatile uint32_t *p_BmiNia=NULL; + volatile uint32_t *p_BmiPrsNia=NULL; + volatile uint32_t *p_BmiPrsStartOffset=NULL; + volatile uint32_t *p_BmiInitPrsResult=NULL; + volatile uint32_t *p_BmiCcBase=NULL; + uint8_t hdrNum, L3HdrNum, greHdrNum; + int i; + bool isEmptyClsPlanGrp; + uint32_t tmpHxs[FM_PCD_PRS_NUM_OF_HDRS]; + uint16_t absoluteProfileId; + uint8_t physicalSchemeId; + uint32_t ccTreePhysOffset; + SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if (p_FmPort->imEn) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); + + if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX) && + (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + + p_FmPort->netEnvId = (uint8_t)(PTR_TO_UINT(p_PcdParams->h_NetEnv)-1); + + p_FmPort->pcdEngines = 0; + + /* initialize p_FmPort->pcdEngines field in port's structure */ + switch(p_PcdParams->pcdSupport) + { + case(e_FM_PORT_PCD_SUPPORT_NONE): + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("No PCD configuration required if e_FM_PORT_PCD_SUPPORT_NONE selected")); + case(e_FM_PORT_PCD_SUPPORT_PRS_ONLY): + p_FmPort->pcdEngines |= FM_PCD_PRS; + break; + case(e_FM_PORT_PCD_SUPPORT_PLCR_ONLY): + if (CHECK_FM_CTL_AC_POST_FETCH_PCD(p_FmPort->savedBmiNia)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("parser support is required")); + p_FmPort->pcdEngines |= FM_PCD_PLCR; + break; + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR): + p_FmPort->pcdEngines |= FM_PCD_PRS; + p_FmPort->pcdEngines |= FM_PCD_PLCR; + break; + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_KG): + p_FmPort->pcdEngines |= FM_PCD_PRS; + p_FmPort->pcdEngines |= FM_PCD_KG; + break; + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC): + p_FmPort->pcdEngines |= FM_PCD_PRS; + p_FmPort->pcdEngines |= FM_PCD_CC; + p_FmPort->pcdEngines |= FM_PCD_KG; + break; + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR): + p_FmPort->pcdEngines |= FM_PCD_PRS; + p_FmPort->pcdEngines |= FM_PCD_KG; + p_FmPort->pcdEngines |= FM_PCD_CC; + p_FmPort->pcdEngines |= FM_PCD_PLCR; + break; + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR): + p_FmPort->pcdEngines |= FM_PCD_PRS; + p_FmPort->pcdEngines |= FM_PCD_KG; + p_FmPort->pcdEngines |= FM_PCD_PLCR; + break; + +#ifdef FM_CAPWAP_SUPPORT + case(e_FM_PORT_PCD_SUPPORT_CC_ONLY): + if (CHECK_FM_CTL_AC_POST_FETCH_PCD(p_FmPort->savedBmiNia)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("parser support is required")); + p_FmPort->pcdEngines |= FM_PCD_CC; + break; + case(e_FM_PORT_PCD_SUPPORT_CC_AND_KG): + if (CHECK_FM_CTL_AC_POST_FETCH_PCD(p_FmPort->savedBmiNia)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("parser support is required")); + p_FmPort->pcdEngines |= FM_PCD_CC; + p_FmPort->pcdEngines |= FM_PCD_KG; + break; + case(e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR): + if (CHECK_FM_CTL_AC_POST_FETCH_PCD(p_FmPort->savedBmiNia)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("parser support is required")); + p_FmPort->pcdEngines |= FM_PCD_CC; + p_FmPort->pcdEngines |= FM_PCD_KG; + p_FmPort->pcdEngines |= FM_PCD_PLCR; + break; +#endif /* FM_CAPWAP_SUPPORT */ + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("invalid pcdSupport")); + } + + if((p_FmPort->pcdEngines & FM_PCD_PRS) && (p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams > FM_PCD_PRS_NUM_OF_HDRS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Port parser numOfHdrsWithAdditionalParams may not exceed %d", FM_PCD_PRS_NUM_OF_HDRS)); + + /* check that parameters exist for each and only each defined engine */ + if((!!(p_FmPort->pcdEngines & FM_PCD_PRS) != !!p_PcdParams->p_PrsParams) || + (!!(p_FmPort->pcdEngines & FM_PCD_KG) != !!p_PcdParams->p_KgParams) || + (!!(p_FmPort->pcdEngines & FM_PCD_CC) != !!p_PcdParams->p_CcParams)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("PCD initialization structure is not consistant with pcdSupport")); + + /* get PCD registers pointers */ + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne; + p_BmiPrsNia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne; + p_BmiPrsStartOffset = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rpso; + p_BmiInitPrsResult = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rprai[0]; + p_BmiCcBase = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rccb; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofne; + p_BmiPrsNia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofpne; + p_BmiPrsStartOffset = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_opso; + p_BmiInitPrsResult = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_oprai[0]; + p_BmiCcBase = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_occb; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + if(p_FmPort->pcdEngines & FM_PCD_KG) + { + + if(p_PcdParams->p_KgParams->numOfSchemes == 0) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("For ports using Keygen, at lease one scheme must be bound. ")); + /* for each scheme */ + for(i = 0; ip_KgParams->numOfSchemes; i++) + { + physicalSchemeId = (uint8_t)(PTR_TO_UINT(p_PcdParams->p_KgParams->h_Schemes[i])-1); + /* build vector */ + p_FmPort->schemesPerPortVector |= 1 << (31 - (uint32_t)physicalSchemeId); + } + + err = FmPcdKgSetOrBindToClsPlanGrp(p_FmPort->h_FmPcd, + p_FmPort->hardwarePortId, + p_FmPort->netEnvId, + p_FmPort->optArray, + &p_FmPort->clsPlanGrpId, + &isEmptyClsPlanGrp); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FmPcdKgSetOrBindToClsPlanGrp failed. ")); + + p_FmPort->useClsPlan = !isEmptyClsPlanGrp; + } + + /* set PCD port parameter */ + if(p_FmPort->pcdEngines & FM_PCD_CC) + { + err = FmPcdCcBindTree(p_FmPort->h_FmPcd, p_PcdParams->p_CcParams->h_CcTree, &ccTreePhysOffset, h_FmPort); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + + WRITE_UINT32(*p_BmiCcBase, ccTreePhysOffset); + p_FmPort->ccTreeId = p_PcdParams->p_CcParams->h_CcTree; + } + + /***************************/ + /* configure NIA after BMI */ + /***************************/ + if (!CHECK_FM_CTL_AC_POST_FETCH_PCD(p_FmPort->savedBmiNia)) + /* rfne may contain FDCS bits, so first we read them. */ + p_FmPort->savedBmiNia = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK; + + /* If policer is used directly after BMI or PRS */ + if((p_FmPort->pcdEngines & FM_PCD_PLCR) && + ((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PLCR_ONLY) || + (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR))) + { + absoluteProfileId = (uint16_t)(PTR_TO_UINT(p_PcdParams->p_PlcrParams->h_Profile)-1); + + if(!FmPcdPlcrIsProfileValid(p_FmPort->h_FmPcd, absoluteProfileId)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Private port profile not valid.")); + + tmpReg = (uint32_t)(absoluteProfileId | NIA_PLCR_ABSOLUTE); + + if(p_FmPort->pcdEngines & FM_PCD_PRS) /* e_FM_PCD_SUPPORT_PRS_AND_PLCR */ + { + /* update BMI HPNIA */ + WRITE_UINT32(*p_BmiPrsNia, (uint32_t)(NIA_ENG_PLCR | tmpReg)); + } + else /* e_FM_PCD_SUPPORT_PLCR_ONLY */ + /* update BMI NIA */ + p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_PLCR); + } + +#ifdef FM_CAPWAP_SUPPORT + /* if CC is used directly after BMI */ + if((p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_ONLY) || + (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_AND_KG) || + (p_PcdParams->pcdSupport == e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR)) + { + if (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("e_FM_PORT_PCD_SUPPORT_CC_xx available for offline parsing ports only")); + p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC); + /* check that prs start offset == RIM[FOF] */ + } +#endif /* FM_CAPWAP_SUPPORT */ + + if (p_FmPort->pcdEngines & FM_PCD_PRS) + { + ASSERT_COND(p_PcdParams->p_PrsParams); + /* if PRS is used it is always first */ + GET_PRS_HDR_NUM(hdrNum, p_PcdParams->p_PrsParams->firstPrsHdr); + if (hdrNum == ILLEGAL_HDR_NUM) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unsupported header.")); + if (!CHECK_FM_CTL_AC_POST_FETCH_PCD(p_FmPort->savedBmiNia)) + p_FmPort->savedBmiNia |= (uint32_t)(NIA_ENG_PRS | (uint32_t)(hdrNum)); + /* set after parser NIA */ + tmpReg = 0; + switch(p_PcdParams->pcdSupport) + { + case(e_FM_PORT_PCD_SUPPORT_PRS_ONLY): + WRITE_UINT32(*p_BmiPrsNia, NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME); + break; + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC): + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR): + tmpReg = NIA_KG_CC_EN; + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_KG): + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR): + if(p_PcdParams->p_KgParams->directScheme) + { + physicalSchemeId = (uint8_t)(PTR_TO_UINT(p_PcdParams->p_KgParams->h_DirectScheme)-1); + /* check that this scheme was bound to this port */ + for(i=0 ; ip_KgParams->numOfSchemes; i++) + if(p_PcdParams->p_KgParams->h_DirectScheme == p_PcdParams->p_KgParams->h_Schemes[i]) + break; + if(i == p_PcdParams->p_KgParams->numOfSchemes) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Direct scheme is not one of the port selected schemes.")); + tmpReg |= (uint32_t)(NIA_KG_DIRECT | physicalSchemeId); + } + WRITE_UINT32(*p_BmiPrsNia, NIA_ENG_KG | tmpReg); + break; + case(e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR): + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid PCD support")); + } + + /* set start parsing offset */ + /* WRITE_UINT32(*p_BmiPrsStartOffset, p_PcdParams->p_PrsParams->parsingOffset); */ + + /************************************/ + /* Parser port parameters */ + /************************************/ + /* stop before configuring */ + WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, PRS_CAC_STOP); + /* wait for parser to be in idle state */ + while(GET_UINT32(p_FmPort->p_FmPortPrsRegs->pcac) & PRS_CAC_ACTIVE) ; + + /* set soft seq attachment register */ + memset(tmpHxs, 0, FM_PCD_PRS_NUM_OF_HDRS*sizeof(uint32_t)); + + /* set protocol options */ + for(i=0;p_FmPort->optArray[i];i++) + switch(p_FmPort->optArray[i]) + { + case(ETH_BROADCAST): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_ETH) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_ETH_BC_SHIFT; + break; + case(ETH_MULTICAST): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_ETH) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_ETH_MC_SHIFT; + break; + case(VLAN_STACKED): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_VLAN) + tmpHxs[hdrNum] |= (i+1)<< PRS_HDR_VLAN_STACKED_SHIFT; + break; + case(MPLS_STACKED): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_MPLS) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_MPLS_STACKED_SHIFT; + break; + case(IPV4_BROADCAST_1): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_IPv4) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_IPV4_1_BC_SHIFT; + break; + case(IPV4_MULTICAST_1): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_IPv4) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_IPV4_1_MC_SHIFT; + break; + case(IPV4_UNICAST_2): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_IPv4) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_IPV4_2_UC_SHIFT; + break; + case(IPV4_MULTICAST_BROADCAST_2): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_IPv4) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_IPV4_2_MC_BC_SHIFT; + break; + case(IPV6_MULTICAST_1): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_IPv6) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_IPV6_1_MC_SHIFT; + break; + case(IPV6_UNICAST_2): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_IPv6) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_IPV6_2_UC_SHIFT; + break; + case(IPV6_MULTICAST_2): + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_IPv6) + tmpHxs[hdrNum] |= (i+1) << PRS_HDR_IPV6_2_MC_SHIFT; + break; + } + + if (FmPcdNetEnvIsHdrExist(p_FmPort->h_FmPcd, p_FmPort->netEnvId, HEADER_TYPE_UDP_ENCAP_ESP)) + { + p_PcdParams->p_PrsParams->additionalParams + [p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams].hdr = HEADER_TYPE_UDP; + p_PcdParams->p_PrsParams->additionalParams + [p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams].swPrsEnable = TRUE; + p_PcdParams->p_PrsParams->numOfHdrsWithAdditionalParams++; + } + + /* set MPLS default next header - HW reset workaround */ + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_MPLS) + tmpHxs[hdrNum] |= PRS_HDR_MPLS_LBL_INTER_EN; + GET_PRS_HDR_NUM(L3HdrNum, HEADER_TYPE_USER_DEFINED_L3); + tmpHxs[hdrNum] |= (uint32_t)L3HdrNum << PRS_HDR_MPLS_NEXT_HDR_SHIFT; + + /* for GRE, disable errors */ + GET_PRS_HDR_NUM(greHdrNum, HEADER_TYPE_GRE); + tmpHxs[greHdrNum] |= PRS_HDR_ERROR_DIS; + + /* config additional params for specific headers */ + for(i=0 ; ip_PrsParams->numOfHdrsWithAdditionalParams ; i++) + { + GET_PRS_HDR_NUM(hdrNum, p_PcdParams->p_PrsParams->additionalParams[i].hdr); + if(hdrNum== ILLEGAL_HDR_NUM) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); + if(hdrNum==NO_HDR_NUM) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Private headers may not use additional parameters")); + + err = AdditionalPrsParams(p_FmPort, &p_PcdParams->p_PrsParams->additionalParams[i], &tmpReg); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); + + tmpHxs[hdrNum] |= tmpReg; + } +#ifdef FM_PRS_L4_SHELL_ERRATA_FMANb + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + /* link to sw parser code for L4 shells - only if no other code is applied. */ + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_SCTP) + if(!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN)) + tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | SCTP_SW_PATCH_START); + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_DCCP) + if(!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN)) + tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | DCCP_SW_PATCH_START); + GET_PRS_HDR_NUM(hdrNum, HEADER_TYPE_IPSEC_AH) + if(!(tmpHxs[hdrNum] & PRS_HDR_SW_PRS_EN)) + tmpHxs[hdrNum] |= (PRS_HDR_SW_PRS_EN | IPSEC_SW_PATCH_START); + } + } +#endif /* FM_PRS_L4_SHELL_ERRATA_FMANb */ + + for(i=0 ; ip_FmPortPrsRegs->hdrs[i].lcv, FmPcdGetLcv(p_FmPort->h_FmPcd, p_FmPort->netEnvId, (uint8_t)i)); + /* set HXS register according to default+Additional params+protocol options */ + WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->hdrs[i].softSeqAttach, tmpHxs[i]); + } + + /* set tpid. */ + tmpReg = PRS_TPID_DFLT; + if(p_PcdParams->p_PrsParams->setVlanTpid1) + { + tmpReg &= PRS_TPID2_MASK; + tmpReg |= (uint32_t)p_PcdParams->p_PrsParams->vlanTpid1 << PRS_PCTPID_SHIFT; + } + if(p_PcdParams->p_PrsParams->setVlanTpid2) + { + tmpReg &= PRS_TPID1_MASK; + tmpReg |= (uint32_t)p_PcdParams->p_PrsParams->vlanTpid2; + } + WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pctpid, tmpReg); + + /* enable parser */ + WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, 0); + + if(p_PcdParams->p_PrsParams->prsResultPrivateInfo) + p_FmPort->privateInfo = p_PcdParams->p_PrsParams->prsResultPrivateInfo; + + } /* end parser */ + else + p_FmPort->privateInfo = 0; + + WRITE_UINT32(*p_BmiPrsStartOffset, GET_UINT32(*p_BmiPrsStartOffset) + p_FmPort->internalBufferOffset); + + /* set initial parser result - used for all engines */ + for (i=0;iprivateInfo << BMI_PR_PORTID_SHIFT) + | BMI_PRS_RESULT_HIGH)); + else + if (i< FM_PORT_PRS_RESULT_NUM_OF_WORDS/2) + WRITE_UINT32(*(p_BmiInitPrsResult+i), BMI_PRS_RESULT_HIGH); + else + WRITE_UINT32(*(p_BmiInitPrsResult+i), BMI_PRS_RESULT_LOW); + } + + return E_OK; +} + +static t_Error DeletePcd(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err = E_OK; + volatile uint32_t *p_BmiNia=NULL; + + SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if (p_FmPort->imEn) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); + + if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX) && + (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + + if(!p_FmPort->pcdEngines) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("called for non PCD port")); + + /* get PCD registers pointers */ + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofne; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + if((GET_UINT32(*p_BmiNia) & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)) != (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("port has to be detached previousely")); + + /* "cut" PCD out of the port's flow - go to BMI */ + /* WRITE_UINT32(*p_BmiNia, (p_FmPort->savedBmiNia & BMI_RFNE_FDCS_MASK) | (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); */ + + if(p_FmPort->pcdEngines | FM_PCD_PRS) + { + /* stop parser */ + WRITE_UINT32(p_FmPort->p_FmPortPrsRegs->pcac, PRS_CAC_STOP); + /* wait for parser to be in idle state */ + while(GET_UINT32(p_FmPort->p_FmPortPrsRegs->pcac) & PRS_CAC_ACTIVE) ; + } + + if(p_FmPort->pcdEngines & FM_PCD_KG) + { + err = FmPcdKgDeleteOrUnbindPortToClsPlanGrp(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, p_FmPort->clsPlanGrpId); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + p_FmPort->useClsPlan = FALSE; + } + + if(p_FmPort->pcdEngines & FM_PCD_CC) + { + /* unbind - we need to get the treeId too */ + err = FmPcdCcUnbindTree(p_FmPort->h_FmPcd, p_FmPort->ccTreeId); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + } + + p_FmPort->pcdEngines = 0; + + return E_OK; +} + + +/********************************************/ +/* Inter-module API */ +/********************************************/ +void FmPortSetMacsecLcv(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiCfgReg = NULL; + uint32_t macsecEn = BMI_PORT_CFG_EN_MACSEC; + uint32_t lcv, walking1Mask = 0x80000000; + uint8_t cnt = 0; + + SANITY_CHECK_RETURN(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + { + REPORT_ERROR(MAJOR, E_INVALID_OPERATION, ("The routine is relevant for Rx ports only")); + return; + } + + p_BmiCfgReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rcfg; + /* get LCV for MACSEC */ + if ((p_FmPort->h_FmPcd) && ((lcv = FmPcdGetMacsecLcv(p_FmPort->h_FmPcd, p_FmPort->netEnvId))!= 0)) + { + while(!(lcv & walking1Mask)) + { + cnt++; + walking1Mask >>= 1; + } + + macsecEn |= (uint32_t)cnt << BMI_PORT_CFG_MS_SEL_SHIFT; + } + + WRITE_UINT32(*p_BmiCfgReg, GET_UINT32(*p_BmiCfgReg) | macsecEn); +} + +void FmPortSetMacsecCmd(t_Handle h_FmPort, uint8_t dfltSci) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiCfgReg = NULL; + uint32_t tmpReg; + + SANITY_CHECK_RETURN(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_TX)) + { + REPORT_ERROR(MAJOR, E_INVALID_OPERATION, ("The routine is relevant for Tx ports only")); + return; + } + + p_BmiCfgReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tfca; + tmpReg = GET_UINT32(*p_BmiCfgReg) & ~BMI_CMD_ATTR_MACCMD_MASK; + tmpReg |= BMI_CMD_ATTR_MACCMD_SECURED; + tmpReg |= (((uint32_t)dfltSci << BMI_CMD_ATTR_MACCMD_SC_SHIFT) & BMI_CMD_ATTR_MACCMD_SC_MASK); + + WRITE_UINT32(*p_BmiCfgReg, tmpReg); +} + +uint8_t FmPortGetNetEnvId(t_Handle h_FmPort) +{ + return ((t_FmPort*)h_FmPort)->netEnvId; +} + +uint8_t FmPortGetHardwarePortId(t_Handle h_FmPort) +{ + return ((t_FmPort*)h_FmPort)->hardwarePortId; +} + +uint32_t FmPortGetPcdEngines(t_Handle h_FmPort) +{ + return ((t_FmPort*)h_FmPort)->pcdEngines; +} + +t_Error FmPortAttachPCD(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiNia=NULL; + +/*TODO - to take care about the chnges that were made in the port because of the previously assigned tree. +pndn, pnen ... maybe were changed because of the Tree requirement*/ + + /* get PCD registers pointers */ + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofne; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + } + + if(p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY) + if(FmSetNumOfRiscsPerPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId, 1)!= E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + /* check that current NIA is BMI to BMI */ + if((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK) != (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("may be called only for ports in BMI-to-BMI state.")); + + WRITE_UINT32(*p_BmiNia, p_FmPort->savedBmiNia); + + if(p_FmPort->requiredAction & UPDATE_NIA_PNEN) + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen, p_FmPort->savedQmiPnen); + + if(p_FmPort->requiredAction & UPDATE_NIA_PNDN) + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn, p_FmPort->savedNonRxQmiRegsPndn); + + + return E_OK; +} + +t_Error FmPortGetSetCcParams(t_Handle h_FmPort, t_FmPortGetSetCcParams *p_CcParams) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + int tmpInt; + volatile uint32_t *p_BmiPrsStartOffset = NULL; + + /* this function called from Cc for pass and receive parameters port params between CC and PORT*/ + + if((p_CcParams->getCcParams.type & OFFSET_OF_PR) && (p_FmPort->bufferOffsets.prsResultOffset != ILLEGAL_BASE)) + { + p_CcParams->getCcParams.prOffset = (uint8_t)p_FmPort->bufferOffsets.prsResultOffset; + p_CcParams->getCcParams.type &= ~OFFSET_OF_PR; + } + if(p_CcParams->getCcParams.type & HW_PORT_ID) + { + p_CcParams->getCcParams.hardwarePortId = (uint8_t)p_FmPort->hardwarePortId; + p_CcParams->getCcParams.type &= ~HW_PORT_ID; + } + if((p_CcParams->getCcParams.type & OFFSET_OF_DATA) && (p_FmPort->bufferOffsets.dataOffset != ILLEGAL_BASE)) + { + p_CcParams->getCcParams.dataOffset = (uint16_t)p_FmPort->bufferOffsets.dataOffset; + p_CcParams->getCcParams.type &= ~OFFSET_OF_DATA; + } + if(p_CcParams->getCcParams.type & NUM_OF_TASKS) + { + p_CcParams->getCcParams.numOfTasks = p_FmPort->numOfTasks; + p_CcParams->getCcParams.type &= ~NUM_OF_TASKS; + } + if(p_CcParams->getCcParams.type & BUFFER_POOL_ID_FOR_MANIP) + { + if(p_CcParams->getCcParams.poolIndex < p_FmPort->extBufPools.numOfPoolsUsed) + { + p_CcParams->getCcParams.poolIdForManip = p_FmPort->extBufPools.extBufPool[p_CcParams->getCcParams.poolIndex].id; + p_CcParams->getCcParams.type &= ~BUFFER_POOL_ID_FOR_MANIP; + } + } + + if((p_CcParams->setCcParams.type & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY) && !(p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY)) + { + p_FmPort->requiredAction |= UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY; + } + + if((p_CcParams->setCcParams.type & UPDATE_NIA_PNEN) && !(p_FmPort->requiredAction & UPDATE_NIA_PNEN)) + { + p_FmPort->savedQmiPnen = p_CcParams->setCcParams.nia; + p_FmPort->requiredAction |= UPDATE_NIA_PNEN; + } + else if (p_CcParams->setCcParams.type & UPDATE_NIA_PNEN) + { + if(p_FmPort->savedQmiPnen != p_CcParams->setCcParams.nia) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("pnen was defined previously different")); + } + + if((p_CcParams->setCcParams.type & UPDATE_NIA_PNDN) && !(p_FmPort->requiredAction & UPDATE_NIA_PNDN)) + { + p_FmPort->savedNonRxQmiRegsPndn = p_CcParams->setCcParams.nia; + p_FmPort->requiredAction |= UPDATE_NIA_PNDN; + } + else if(p_CcParams->setCcParams.type & UPDATE_NIA_PNDN) + { + if(p_FmPort->savedNonRxQmiRegsPndn != p_CcParams->setCcParams.nia) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("pndn was defined previously different")); + } + + + if((p_CcParams->setCcParams.type & UPDATE_PSO) && !(p_FmPort->requiredAction & UPDATE_PSO)) + { + /* get PCD registers pointers */ + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiPrsStartOffset = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rpso; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiPrsStartOffset = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_opso; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + /* set start parsing offset */ + tmpInt = (int)GET_UINT32(*p_BmiPrsStartOffset)+ p_CcParams->setCcParams.psoSize; + if(tmpInt>0) + WRITE_UINT32(*p_BmiPrsStartOffset, (uint32_t)tmpInt); + + p_FmPort->requiredAction |= UPDATE_PSO; + p_FmPort->savedPrsStartOffset = p_CcParams->setCcParams.psoSize; + + } + else if (p_CcParams->setCcParams.type & UPDATE_PSO) + { + if(p_FmPort->savedPrsStartOffset != p_CcParams->setCcParams.psoSize) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("parser start offset was defoned previousley different")); + } + return E_OK; +} +/********************************** End of inter-module routines ********************************/ + +/****************************************/ +/* API Init unit functions */ +/****************************************/ +t_Handle FM_PORT_Config(t_FmPortParams *p_FmPortParams) +{ + t_FmPort *p_FmPort; + uintptr_t baseAddr = p_FmPortParams->baseAddr; + + /* Allocate FM structure */ + p_FmPort = (t_FmPort *) XX_Malloc(sizeof(t_FmPort)); + if (!p_FmPort) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Port driver structure")); + return NULL; + } + memset(p_FmPort, 0, sizeof(t_FmPort)); + + /* Allocate the FM driver's parameters structure */ + p_FmPort->p_FmPortDriverParam = (t_FmPortDriverParam *)XX_Malloc(sizeof(t_FmPortDriverParam)); + if (!p_FmPort->p_FmPortDriverParam) + { + XX_Free(p_FmPort); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Port driver parameters")); + return NULL; + } + memset(p_FmPort->p_FmPortDriverParam, 0, sizeof(t_FmPortDriverParam)); + + /* Initialize FM port parameters which will be kept by the driver */ + p_FmPort->portType = p_FmPortParams->portType; + p_FmPort->portId = p_FmPortParams->portId; + p_FmPort->pcdEngines = FM_PCD_NONE; + p_FmPort->f_Exception = p_FmPortParams->f_Exception; + p_FmPort->h_App = p_FmPortParams->h_App; + p_FmPort->h_Fm = p_FmPortParams->h_Fm; + + /* calculate global portId number */ + SW_PORT_ID_TO_HW_PORT_ID(p_FmPort->hardwarePortId, p_FmPort->portType, p_FmPortParams->portId); + + /* Initialize FM port parameters for initialization phase only */ + p_FmPort->p_FmPortDriverParam->baseAddr = baseAddr; + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.privDataSize = DEFAULT_PORT_bufferPrefixContent_privDataSize; + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passPrsResult= DEFAULT_PORT_bufferPrefixContent_passPrsResult; + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passTimeStamp= DEFAULT_PORT_bufferPrefixContent_passTimeStamp; + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passAllOtherPCDInfo + = DEFAULT_PORT_bufferPrefixContent_passTimeStamp; +#ifdef DEBUG + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.passDebugInfo= DEFAULT_PORT_bufferPrefixContent_debugInfo; +#endif /* DEBUG */ + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign = DEFAULT_PORT_bufferPrefixContent_dataAlign; + p_FmPort->p_FmPortDriverParam->dmaSwapData = DEFAULT_PORT_dmaSwapData; + p_FmPort->p_FmPortDriverParam->dmaIntContextCacheAttr = DEFAULT_PORT_dmaIntContextCacheAttr; + p_FmPort->p_FmPortDriverParam->dmaHeaderCacheAttr = DEFAULT_PORT_dmaHeaderCacheAttr; + p_FmPort->p_FmPortDriverParam->dmaScatterGatherCacheAttr = DEFAULT_PORT_dmaScatterGatherCacheAttr; + p_FmPort->p_FmPortDriverParam->dmaWriteOptimize = DEFAULT_PORT_dmaWriteOptimize; + p_FmPort->p_FmPortDriverParam->liodnBase = p_FmPortParams->liodnBase; + + /* resource distribution. */ + p_FmPort->fifoBufs.num = DEFAULT_PORT_sizeOfFifo(p_FmPort->portType); + p_FmPort->fifoBufs.extra = DEFAULT_PORT_extraSizeOfFifo(p_FmPort->portType); + p_FmPort->openDmas.num = DEFAULT_PORT_numOfOpenDmas(p_FmPort->portType); + p_FmPort->openDmas.extra = DEFAULT_PORT_extraNumOfOpenDmas(p_FmPort->portType); + p_FmPort->tasks.num = DEFAULT_PORT_numOfTasks(p_FmPort->portType); + p_FmPort->tasks.extra = DEFAULT_PORT_extraNumOfTasks(p_FmPort->portType); + p_FmPort->numOfTasks = (uint8_t)p_FmPort->tasks.num; +#ifdef FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + p_FmPort->fifoBufs.extra = 0; + p_FmPort->openDmas.extra = 0; + p_FmPort->tasks.extra = 0; + } + } +#endif /* FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 */ + + p_FmPort->p_FmPortDriverParam->color = DEFAULT_PORT_color; +#ifdef FM_OP_PORT_QMAN_REJECT_ERRATA_FMAN21 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0) && + (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + p_FmPort->p_FmPortDriverParam->color = e_FM_PORT_COLOR_OVERRIDE; + } +#endif /* FM_OP_PORT_QMAN_REJECT_ERRATA_FMAN21 */ + + if (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) + p_FmPort->p_FmPortDriverParam->syncReq = DEFAULT_PORT_syncReqForHc; + else + p_FmPort->p_FmPortDriverParam->syncReq = DEFAULT_PORT_syncReq; + +#ifdef FM_PORT_SYNC_ERRATA_FMAN6 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0) && + (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + p_FmPort->p_FmPortDriverParam->syncReq = FALSE; + } +#endif /* FM_PORT_SYNC_ERRATA_FMAN6 */ + + /* Port type specific initialization: */ + if ((p_FmPort->portType != e_FM_PORT_TYPE_TX) && + (p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)) + p_FmPort->p_FmPortDriverParam->frmDiscardOverride = DEFAULT_PORT_frmDiscardOverride; + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX): + case(e_FM_PORT_TYPE_RX_10G): + /* Initialize FM port parameters for initialization phase only */ + p_FmPort->p_FmPortDriverParam->cutBytesFromEnd = DEFAULT_PORT_cutBytesFromEnd; + p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = FALSE; + p_FmPort->p_FmPortDriverParam->frmDiscardOverride = DEFAULT_PORT_frmDiscardOverride; + p_FmPort->p_FmPortDriverParam->rxFifoPriElevationLevel = DEFAULT_PORT_rxFifoPriElevationLevel; + p_FmPort->p_FmPortDriverParam->rxFifoThreshold = DEFAULT_PORT_rxFifoThreshold; + p_FmPort->p_FmPortDriverParam->bufMargins.endMargins = DEFAULT_PORT_BufMargins_endMargins; + p_FmPort->p_FmPortDriverParam->errorsToDiscard = DEFAULT_PORT_errorsToDiscard; + p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore = DEFAULT_PORT_cheksumLastBytesIgnore; + p_FmPort->p_FmPortDriverParam->forwardReuseIntContext = DEFAULT_PORT_forwardIntContextReuse; + break; + + case(e_FM_PORT_TYPE_TX): + p_FmPort->txFifoDeqPipelineDepth = DEFAULT_PORT_txFifoDeqPipelineDepth_1G; + p_FmPort->p_FmPortDriverParam->dontReleaseBuf = FALSE; + case(e_FM_PORT_TYPE_TX_10G): + if(p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) + p_FmPort->txFifoDeqPipelineDepth = DEFAULT_PORT_txFifoDeqPipelineDepth_10G; + p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore = DEFAULT_PORT_cheksumLastBytesIgnore; + p_FmPort->p_FmPortDriverParam->txFifoMinFillLevel = DEFAULT_PORT_txFifoMinFillLevel; + p_FmPort->p_FmPortDriverParam->txFifoLowComfLevel = DEFAULT_PORT_txFifoLowComfLevel; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + p_FmPort->p_FmPortDriverParam->deqHighPriority = DEFAULT_PORT_deqHighPriority; + p_FmPort->p_FmPortDriverParam->deqType = DEFAULT_PORT_deqType; +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + p_FmPort->p_FmPortDriverParam->deqPrefetchOption = DEFAULT_PORT_deqPrefetchOption; +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + p_FmPort->p_FmPortDriverParam->deqByteCnt = DEFAULT_PORT_deqByteCnt; + + if (p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) + p_FmPort->p_FmPortDriverParam->errorsToDiscard = DEFAULT_PORT_errorsToDiscard; + break; + + default: + XX_Free(p_FmPort->p_FmPortDriverParam); + XX_Free(p_FmPort); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + return NULL; + } +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + if (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) + p_FmPort->p_FmPortDriverParam->deqPrefetchOption = DEFAULT_PORT_deqPrefetchOption_HC; +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + + if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || + (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) + p_FmPort->txFifoDeqPipelineDepth = OH_PIPELINE_DEPTH; + + p_FmPort->imEn = p_FmPortParams->independentModeEnable; + + if (p_FmPort->imEn) + { + if ((p_FmPort->portType == e_FM_PORT_TYPE_TX) || + (p_FmPort->portType == e_FM_PORT_TYPE_TX_10G)) + p_FmPort->txFifoDeqPipelineDepth = DEFAULT_PORT_txFifoDeqPipelineDepth_IM; + FmPortConfigIM(p_FmPort, p_FmPortParams); + } + else + { + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX): + case(e_FM_PORT_TYPE_RX_10G): + /* Initialize FM port parameters for initialization phase only */ + memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools, + &p_FmPortParams->specificParams.rxParams.extBufPools, + sizeof(t_FmPortExtPools)); + p_FmPort->p_FmPortDriverParam->errFqid = p_FmPortParams->specificParams.rxParams.errFqid; + p_FmPort->p_FmPortDriverParam->dfltFqid = p_FmPortParams->specificParams.rxParams.dfltFqid; + p_FmPort->p_FmPortDriverParam->liodnOffset = p_FmPortParams->specificParams.rxParams.liodnOffset; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): +#ifdef FM_OP_PARTITION_ERRATA_FMANx8 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + p_FmPort->p_FmPortDriverParam->liodnOffset = p_FmPortParams->specificParams.nonRxParams.opLiodnOffset; + } +#endif /* FM_OP_PARTITION_ERRATA_FMANx8 */ + case(e_FM_PORT_TYPE_TX): + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + p_FmPort->p_FmPortDriverParam->errFqid = p_FmPortParams->specificParams.nonRxParams.errFqid; + p_FmPort->p_FmPortDriverParam->deqSubPortal = + (uint8_t)(p_FmPortParams->specificParams.nonRxParams.qmChannel & QMI_DEQ_CFG_SUBPORTAL_MASK); + p_FmPort->p_FmPortDriverParam->dfltFqid = p_FmPortParams->specificParams.nonRxParams.dfltFqid; + break; + default: + XX_Free(p_FmPort->p_FmPortDriverParam); + XX_Free(p_FmPort); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + return NULL; + } + } + + memset(p_FmPort->name, 0, (sizeof(char)) * MODULE_NAME_SIZE); + if(Sprint (p_FmPort->name, "FM-%d-port-%s-%d", + FmGetId(p_FmPort->h_Fm), + ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING || + (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) ? + "OH" : (p_FmPort->portType == e_FM_PORT_TYPE_RX ? + "1g-RX" : (p_FmPort->portType == e_FM_PORT_TYPE_TX ? + "1g-TX" : (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G ? + "10g-RX" : "10g-TX")))), + p_FmPort->portId) == 0) + { + XX_Free(p_FmPort->p_FmPortDriverParam); + XX_Free(p_FmPort); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + return NULL; + } + + p_FmPort->h_Spinlock = XX_InitSpinlock(); + if (!p_FmPort->h_Spinlock) + { + XX_Free(p_FmPort->p_FmPortDriverParam); + XX_Free(p_FmPort); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + return NULL; + } + + return p_FmPort; +} + +/**************************************************************************//** + @Function FM_PORT_Init + + @Description Initializes the FM module + + @Param[in] h_FmPort - FM module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PORT_Init(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_FmPortDriverParam *p_Params; + t_Error err = E_OK; + t_FmInterModulePortInitParams fmParams; + uint32_t minFifoSizeRequired = 0; + + SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + if ((err = BuildBufferStructure(p_FmPort)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + + CHECK_INIT_PARAMETERS(p_FmPort, CheckInitParameters); + + p_Params = p_FmPort->p_FmPortDriverParam; + + /* set memory map pointers */ + p_FmPort->p_FmPortQmiRegs = (t_FmPortQmiRegs *)UINT_TO_PTR(p_Params->baseAddr + QMI_PORT_REGS_OFFSET); + p_FmPort->p_FmPortBmiRegs = (u_FmPortBmiRegs *)UINT_TO_PTR(p_Params->baseAddr + BMI_PORT_REGS_OFFSET); + p_FmPort->p_FmPortPrsRegs = (t_FmPortPrsRegs *)UINT_TO_PTR(p_Params->baseAddr + PRS_PORT_REGS_OFFSET); + + /* For O/H ports, check fifo size and update if necessary */ + if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) + { + minFifoSizeRequired = (uint32_t)((p_FmPort->txFifoDeqPipelineDepth+4)*BMI_FIFO_UNITS); + if (p_FmPort->fifoBufs.num < minFifoSizeRequired) + { + p_FmPort->fifoBufs.num = minFifoSizeRequired; + DBG(WARNING, ("FIFO size enlarged to %d due to txFifoDeqPipelineDepth size", minFifoSizeRequired)); + } + } + + /* For Rx Ports, call the external Buffer routine which also checks fifo + size and updates it if necessary */ + if(((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + && !p_FmPort->imEn) + { + /* define external buffer pools and pool depletion*/ + err = SetExtBufferPools(p_FmPort); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + /************************************************************/ + /* Call FM module routine for communicating parameters */ + /************************************************************/ + memset(&fmParams, 0, sizeof(fmParams)); + fmParams.hardwarePortId = p_FmPort->hardwarePortId; + fmParams.portType = (e_FmPortType)p_FmPort->portType; + fmParams.numOfTasks = (uint8_t)p_FmPort->tasks.num; + fmParams.numOfExtraTasks = (uint8_t)p_FmPort->tasks.extra; + fmParams.numOfOpenDmas = (uint8_t)p_FmPort->openDmas.num; + fmParams.numOfExtraOpenDmas = (uint8_t)p_FmPort->openDmas.extra; + fmParams.sizeOfFifo = p_FmPort->fifoBufs.num; + fmParams.extraSizeOfFifo = p_FmPort->fifoBufs.extra; + fmParams.independentMode = p_FmPort->imEn; + fmParams.liodnOffset = p_Params->liodnOffset; + fmParams.liodnBase = p_Params->liodnBase; + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + fmParams.deqPipelineDepth = p_FmPort->txFifoDeqPipelineDepth; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + } + + err = FmGetSetPortParams(p_FmPort->h_Fm, &fmParams); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + p_FmPort->tasks.num = fmParams.numOfTasks; + p_FmPort->tasks.extra = fmParams.numOfExtraTasks; + p_FmPort->openDmas.num = fmParams.numOfOpenDmas; + p_FmPort->openDmas.extra = fmParams.numOfExtraOpenDmas; + p_FmPort->fifoBufs.num = fmParams.sizeOfFifo; + p_FmPort->fifoBufs.extra = fmParams.extraSizeOfFifo; + + /* get params for use in init */ + p_Params->fmMuramPhysBaseAddr = + (uint64_t)((uint64_t)(fmParams.fmMuramPhysBaseAddr.low) | + ((uint64_t)(fmParams.fmMuramPhysBaseAddr.high) << 32)); + + /**********************/ + /* Init BMI Registers */ + /**********************/ + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + err = BmiRxPortInit(p_FmPort); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + err = BmiTxPortInit(p_FmPort); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + err = BmiOhPortInit(p_FmPort); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + } + + /**********************/ + /* Init QMI Registers */ + /**********************/ + if (!p_FmPort->imEn && ((err = QmiInit(p_FmPort)) != E_OK)) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (p_FmPort->imEn && ((err = FmPortImInit(p_FmPort)) != E_OK)) + RETURN_ERROR(MAJOR, err, NO_MSG); + + FmPortDriverParamFree(p_FmPort); + + return E_OK; +} + +/**************************************************************************//** + @Function FM_PORT_Free + + @Description Frees all resources that were assigned to FM module. + + Calling this routine invalidates the descriptor. + + @Param[in] h_FmPort - FM module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PORT_Free(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_FmInterModulePortFreeParams fmParams; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + + if(p_FmPort->pcdEngines) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Trying to free a port with PCD. FM_PORT_DeletePCD must be called first.")); + + if (p_FmPort->enabled) + { + if (FM_PORT_Disable(p_FmPort) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM_PORT_Disable FAILED")); + } + + if (p_FmPort->h_Spinlock) + XX_FreeSpinlock(p_FmPort->h_Spinlock); + + FmPortDriverParamFree(p_FmPort); + + if (p_FmPort->imEn) + FmPortImFree(p_FmPort); + + fmParams.hardwarePortId = p_FmPort->hardwarePortId; + fmParams.portType = (e_FmPortType)p_FmPort->portType; +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + fmParams.deqPipelineDepth = p_FmPort->txFifoDeqPipelineDepth; +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + + FmFreePortParams(p_FmPort->h_Fm, &fmParams); + + XX_Free(p_FmPort); + + return E_OK; +} + + +/*************************************************/ +/* API Advanced Init unit functions */ +/*************************************************/ + +t_Error FM_PORT_ConfigDeqHighPriority(t_Handle h_FmPort, bool highPri) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("not available for Rx ports")); + + p_FmPort->p_FmPortDriverParam->deqHighPriority = highPri; + + return E_OK; +} + +t_Error FM_PORT_ConfigDeqType(t_Handle h_FmPort, e_FmPortDeqType deqType) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("not available for Rx ports")); + + p_FmPort->p_FmPortDriverParam->deqType = deqType; + + return E_OK; +} + +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT +t_Error FM_PORT_ConfigDeqPrefetchOption(t_Handle h_FmPort, e_FmPortDeqPrefetchOption deqPrefetchOption) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("not available for Rx ports")); + p_FmPort->p_FmPortDriverParam->deqPrefetchOption = deqPrefetchOption; + return E_OK; +} +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + +t_Error FM_PORT_ConfigBackupPools(t_Handle h_FmPort, t_FmPortBackupBmPools *p_BackupBmPools) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; +#ifdef FM_NO_BACKUP_POOLS + t_FmRevisionInfo revInfo; +#endif /* FM_NO_BACKUP_POOLS */ + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); + +#ifdef FM_NO_BACKUP_POOLS + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if (revInfo.majorRev != 4) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("FM_PORT_ConfigBackupPools")); +#endif /* FM_NO_BACKUP_POOLS */ + + p_FmPort->p_FmPortDriverParam->p_BackupBmPools = (t_FmPortBackupBmPools *)XX_Malloc(sizeof(t_FmPortBackupBmPools)); + if(!p_FmPort->p_FmPortDriverParam->p_BackupBmPools) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("p_BackupBmPools allocation failed")); + memcpy(p_FmPort->p_FmPortDriverParam->p_BackupBmPools, p_BackupBmPools, sizeof(t_FmPortBackupBmPools)); + + return E_OK; +} + +t_Error FM_PORT_ConfigDeqByteCnt(t_Handle h_FmPort, uint16_t deqByteCnt) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("not available for Rx ports")); + + p_FmPort->p_FmPortDriverParam->deqByteCnt = deqByteCnt; + + return E_OK; +} + +t_Error FM_PORT_ConfigBufferPrefixContent(t_Handle h_FmPort, t_FmPortBufferPrefixContent *p_FmPortBufferPrefixContent) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + memcpy(&p_FmPort->p_FmPortDriverParam->bufferPrefixContent, p_FmPortBufferPrefixContent, sizeof(t_FmPortBufferPrefixContent)); + /* if dataAlign was not initialized by user, we return to driver's deafult */ + if (!p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign) + p_FmPort->p_FmPortDriverParam->bufferPrefixContent.dataAlign = DEFAULT_PORT_bufferPrefixContent_dataAlign; + + return E_OK; +} + +t_Error FM_PORT_ConfigCheksumLastBytesIgnore(t_Handle h_FmPort, uint8_t cheksumLastBytesIgnore) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx & Tx ports only")); + + p_FmPort->p_FmPortDriverParam->cheksumLastBytesIgnore = cheksumLastBytesIgnore; + + return E_OK; +} + +t_Error FM_PORT_ConfigCutBytesFromEnd(t_Handle h_FmPort, uint8_t cutBytesFromEnd) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); + + p_FmPort->p_FmPortDriverParam->cutBytesFromEnd = cutBytesFromEnd; + + return E_OK; +} + +t_Error FM_PORT_ConfigPoolDepletion(t_Handle h_FmPort, t_FmPortBufPoolDepletion *p_BufPoolDepletion) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); + + p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = TRUE; + memcpy(&p_FmPort->p_FmPortDriverParam->bufPoolDepletion, p_BufPoolDepletion, sizeof(t_FmPortBufPoolDepletion)); + + return E_OK; +} + +t_Error FM_PORT_ConfigObservedPoolDepletion(t_Handle h_FmPort, t_FmPortObservedBufPoolDepletion *p_FmPortObservedBufPoolDepletion) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if(p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for OP ports only")); + + p_FmPort->p_FmPortDriverParam->enBufPoolDepletion = TRUE; + memcpy(&p_FmPort->p_FmPortDriverParam->bufPoolDepletion, &p_FmPortObservedBufPoolDepletion->poolDepletionParams, sizeof(t_FmPortBufPoolDepletion)); + memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools, &p_FmPortObservedBufPoolDepletion->poolsParams, sizeof(t_FmPortExtPools)); + + return E_OK; +} + +t_Error FM_PORT_ConfigExtBufPools(t_Handle h_FmPort, t_FmPortExtPools *p_FmPortExtPools) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if(p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for OP ports only")); + + memcpy(&p_FmPort->p_FmPortDriverParam->extBufPools, p_FmPortExtPools, sizeof(t_FmPortExtPools)); + + return E_OK; +} + +t_Error FM_PORT_ConfigRxFifoThreshold(t_Handle h_FmPort, uint32_t fifoThreshold) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); + + p_FmPort->p_FmPortDriverParam->rxFifoThreshold = fifoThreshold; + + return E_OK; +} + +t_Error FM_PORT_ConfigRxFifoPriElevationLevel(t_Handle h_FmPort, uint32_t priElevationLevel) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); + + p_FmPort->p_FmPortDriverParam->rxFifoPriElevationLevel = priElevationLevel; + + return E_OK; +} + +t_Error FM_PORT_ConfigTxFifoMinFillLevel(t_Handle h_FmPort, uint32_t minFillLevel) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_TX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx ports only")); + + p_FmPort->p_FmPortDriverParam->txFifoMinFillLevel = minFillLevel; + + return E_OK; +} + +t_Error FM_PORT_ConfigTxFifoDeqPipelineDepth(t_Handle h_FmPort, uint8_t deqPipelineDepth) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if ((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_TX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx ports only")); + if (p_FmPort->imEn) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Not available for IM ports!")); + + p_FmPort->txFifoDeqPipelineDepth = deqPipelineDepth; + + return E_OK; +} + +t_Error FM_PORT_ConfigTxFifoLowComfLevel(t_Handle h_FmPort, uint32_t fifoLowComfLevel) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_TX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx ports only")); + + p_FmPort->p_FmPortDriverParam->txFifoLowComfLevel = fifoLowComfLevel; + + return E_OK; +} + +t_Error FM_PORT_ConfigDontReleaseTxBufToBM(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_TX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx ports only")); + + p_FmPort->p_FmPortDriverParam->dontReleaseBuf = TRUE; + + return E_OK; +} + +t_Error FM_PORT_ConfigDfltColor(t_Handle h_FmPort, e_FmPortColor color) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); +#ifdef FM_OP_PORT_QMAN_REJECT_ERRATA_FMAN21 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("FM_PORT_ConfigDfltColor!")); + } +#endif /* FM_OP_PORT_QMAN_REJECT_ERRATA_FMAN21 */ + p_FmPort->p_FmPortDriverParam->color = color; + + return E_OK; +} + +t_Error FM_PORT_ConfigSyncReq(t_Handle h_FmPort, bool syncReq) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); +#ifdef FM_PORT_SYNC_ERRATA_FMAN6 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("port-sync!")); + } +#endif /* FM_PORT_SYNC_ERRATA_FMAN6 */ + + p_FmPort->p_FmPortDriverParam->syncReq = syncReq; + + return E_OK; +} + + +t_Error FM_PORT_ConfigFrmDiscardOverride(t_Handle h_FmPort, bool override) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) && (p_FmPort->portType == e_FM_PORT_TYPE_TX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("not available for Tx ports")); + + p_FmPort->p_FmPortDriverParam->frmDiscardOverride = override; + + return E_OK; +} + +t_Error FM_PORT_ConfigErrorsToDiscard(t_Handle h_FmPort, fmPortFrameErrSelect_t errs) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX) && + (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + + p_FmPort->p_FmPortDriverParam->errorsToDiscard = errs; + + return E_OK; +} + +t_Error FM_PORT_ConfigDmaSwapData(t_Handle h_FmPort, e_FmPortDmaSwap swapData) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->p_FmPortDriverParam->dmaSwapData = swapData; + + return E_OK; +} + +t_Error FM_PORT_ConfigDmaIcCacheAttr(t_Handle h_FmPort, e_FmPortDmaCache intContextCacheAttr) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->p_FmPortDriverParam->dmaIntContextCacheAttr = intContextCacheAttr; + + return E_OK; +} + +t_Error FM_PORT_ConfigDmaHdrAttr(t_Handle h_FmPort, e_FmPortDmaCache headerCacheAttr) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->p_FmPortDriverParam->dmaHeaderCacheAttr = headerCacheAttr; + + return E_OK; +} + +t_Error FM_PORT_ConfigDmaScatterGatherAttr(t_Handle h_FmPort, e_FmPortDmaCache scatterGatherCacheAttr) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->p_FmPortDriverParam->dmaScatterGatherCacheAttr = scatterGatherCacheAttr; + + return E_OK; +} + +t_Error FM_PORT_ConfigDmaWriteOptimize(t_Handle h_FmPort, bool optimize) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + if((p_FmPort->portType == e_FM_PORT_TYPE_TX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_TX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Not available for Tx ports")); + + p_FmPort->p_FmPortDriverParam->dmaWriteOptimize = optimize; + + return E_OK; +} + +t_Error FM_PORT_ConfigForwardReuseIntContext(t_Handle h_FmPort, bool forwardReuse) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); + + p_FmPort->p_FmPortDriverParam->forwardReuseIntContext = forwardReuse; + + return E_OK; +} + + +/****************************************************/ +/* PCD Advaced config API */ +/****************************************************/ + +/****************************************************/ +/* API Run-time Control unit functions */ +/****************************************************/ + +t_Error FM_PORT_SetNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfOpenDmas) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + +#ifdef FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0) && + (p_NumOfOpenDmas->extra)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("excessive resources")); + } +#endif /* FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 */ + + if((!p_NumOfOpenDmas->num) || (p_NumOfOpenDmas->num > MAX_NUM_OF_DMAS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("openDmas-num can't be larger than %d", MAX_NUM_OF_DMAS)); + if(p_NumOfOpenDmas->extra > MAX_NUM_OF_EXTRA_DMAS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("openDmas-extra can't be larger than %d", MAX_NUM_OF_EXTRA_DMAS)); + err = FmSetNumOfOpenDmas(p_FmPort->h_Fm, p_FmPort->hardwarePortId, (uint8_t)p_NumOfOpenDmas->num, (uint8_t)p_NumOfOpenDmas->extra, FALSE); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + + memcpy(&p_FmPort->openDmas, p_NumOfOpenDmas, sizeof(t_FmPortRsrc)); + + return E_OK; +} + +t_Error FM_PORT_SetNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + if (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("not available for host command port where number is always 1")); + +#ifdef FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0) && + (p_NumOfTasks->extra)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("excessive resources")); + } +#endif /* FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 */ + + if((!p_NumOfTasks->num) || (p_NumOfTasks->num > MAX_NUM_OF_TASKS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("NumOfTasks-num can't be larger than %d", MAX_NUM_OF_TASKS)); + if(p_NumOfTasks->extra > MAX_NUM_OF_EXTRA_TASKS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("NumOfTasks-extra can't be larger than %d", MAX_NUM_OF_EXTRA_TASKS)); + + err = FmSetNumOfTasks(p_FmPort->h_Fm, p_FmPort->hardwarePortId, (uint8_t)p_NumOfTasks->num, (uint8_t)p_NumOfTasks->extra, FALSE); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + + /* update driver's struct */ + memcpy(&p_FmPort->tasks, p_NumOfTasks, sizeof(t_FmPortRsrc)); + return E_OK; +} + +t_Error FM_PORT_SetSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err; + t_FmInterModulePortRxPoolsParams rxPoolsParams; + uint32_t minFifoSizeRequired; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + +#ifdef FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0) && + (p_SizeOfFifo->extra)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("excessive resources")); + } +#endif /* FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 */ + if(!p_SizeOfFifo->num || (p_SizeOfFifo->num > BMI_MAX_FIFO_SIZE)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("SizeOfFifo-num has to be in the range of 256 - %d", BMI_MAX_FIFO_SIZE)); + if(p_SizeOfFifo->num % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("SizeOfFifo-num has to be divisible by %d", BMI_FIFO_UNITS)); + if((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + /* extra FIFO size (allowed only to Rx ports) */ + if(p_SizeOfFifo->extra % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("SizeOfFifo-extra has to be divisible by %d", BMI_FIFO_UNITS)); + } + else + if(p_SizeOfFifo->extra) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, (" No SizeOfFifo-extra for non Rx ports")); + + /* For O/H ports, check fifo size and update if necessary */ + if((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) + { + minFifoSizeRequired = (uint32_t)((p_FmPort->txFifoDeqPipelineDepth+4)*BMI_FIFO_UNITS); + if (p_FmPort->fifoBufs.num < minFifoSizeRequired) + { + p_FmPort->fifoBufs.num = minFifoSizeRequired; + DBG(INFO, ("FIFO size enlarged to %d", minFifoSizeRequired)); + } + } + memcpy(&rxPoolsParams, &p_FmPort->rxPoolsParams, sizeof(rxPoolsParams)); + err = FmSetSizeOfFifo(p_FmPort->h_Fm, + p_FmPort->hardwarePortId, + p_FmPort->portType, + p_FmPort->imEn, + &p_SizeOfFifo->num, + p_SizeOfFifo->extra, + p_FmPort->txFifoDeqPipelineDepth, + &rxPoolsParams, + FALSE); + if(err) + RETURN_ERROR(MINOR, err, NO_MSG); + + /* update driver's structure AFTER the FM routine, as it may change by the FM. */ + memcpy(&p_FmPort->fifoBufs, p_SizeOfFifo, sizeof(t_FmPortRsrc)); + + return E_OK; +} + +uint32_t FM_PORT_GetBufferDataOffset(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, 0); + + return p_FmPort->bufferOffsets.dataOffset; +} + +uint8_t * FM_PORT_GetBufferICInfo(t_Handle h_FmPort, char *p_Data) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, 0); + + if(p_FmPort->bufferOffsets.pcdInfoOffset == ILLEGAL_BASE) + return NULL; + + return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.pcdInfoOffset); +} + +#ifdef DEBUG +uint8_t * FM_PORT_GetBufferDebugInfo(t_Handle h_FmPort, char *p_Data) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, 0); + + if(p_FmPort->bufferOffsets.debugOffset == ILLEGAL_BASE) + return NULL; + + return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.debugOffset); +} +#endif /* DEBUG */ + +t_FmPrsResult * FM_PORT_GetBufferPrsResult(t_Handle h_FmPort, char *p_Data) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, NULL); + + if(p_FmPort->bufferOffsets.prsResultOffset == ILLEGAL_BASE) + return NULL; + + return (t_FmPrsResult *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.prsResultOffset); +} + +uint64_t * FM_PORT_GetBufferTimeStamp(t_Handle h_FmPort, char *p_Data) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, NULL); + + if(p_FmPort->bufferOffsets.timeStampOffset == ILLEGAL_BASE) + return NULL; + + return (uint64_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.timeStampOffset); +} + +uint8_t * FM_PORT_GetBufferHashResult(t_Handle h_FmPort, char *p_Data) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, 0); + + if(p_FmPort->bufferOffsets.hashResultOffset == ILLEGAL_BASE) + return NULL; + + return (uint8_t *)PTR_MOVE(p_Data, p_FmPort->bufferOffsets.hashResultOffset); +} + +t_Error FM_PORT_Disable(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiCfgReg = NULL; + volatile uint32_t *p_BmiStatusReg = NULL; + bool rxPort = FALSE; + int tries; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiCfgReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rcfg; + p_BmiStatusReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rst; + rxPort = TRUE; + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + p_BmiCfgReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg; + p_BmiStatusReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tst; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + p_BmiCfgReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ocfg; + p_BmiStatusReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ost; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + /* check if port is already disabled */ + if(!(GET_UINT32(*p_BmiCfgReg) & BMI_PORT_CFG_EN)) + { + if (!rxPort && !p_FmPort->imEn) + { + if(!(GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc)& QMI_PORT_CFG_EN)) + /* port is disabled */ + return E_OK; + else + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Inconsistency: Port's QMI is enabled but BMI disabled")); + } + /* port is disabled */ + return E_OK; + } + + /* Disable QMI */ + if (!rxPort && !p_FmPort->imEn) + { + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc, + GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc) & ~QMI_PORT_CFG_EN); + /* wait for QMI to finish Handling dequeue tnums */ + tries=1000; + while ((GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pns) & QMI_PORT_STATUS_DEQ_FD_BSY) && + --tries) + XX_UDelay(1); + if (!tries) + RETURN_ERROR(MINOR, E_BUSY, ("%s: can't disable!", p_FmPort->name)); + } + + /* Disable BMI */ + WRITE_UINT32(*p_BmiCfgReg, GET_UINT32(*p_BmiCfgReg) & ~BMI_PORT_CFG_EN); + + if (p_FmPort->imEn) + FmPortImDisable(p_FmPort); + + tries=5000; + while ((GET_UINT32(*p_BmiStatusReg) & BMI_PORT_STATUS_BSY) && + --tries) + XX_UDelay(1); + + if (!tries) + RETURN_ERROR(MINOR, E_BUSY, ("%s: can't disable!", p_FmPort->name)); + + p_FmPort->enabled = 0; + + return E_OK; +} + +t_Error FM_PORT_Enable(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiCfgReg = NULL; + bool rxPort = FALSE; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiCfgReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rcfg; + rxPort = TRUE; + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + p_BmiCfgReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfg; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + p_BmiCfgReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ocfg; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + /* check if port is already enabled */ + if(GET_UINT32(*p_BmiCfgReg) & BMI_PORT_CFG_EN) + { + if (!rxPort && !p_FmPort->imEn) + { + if(GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc)& QMI_PORT_CFG_EN) + /* port is enabled */ + return E_OK; + else + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Inconsistency: Port's BMI is enabled but QMI disabled")); + } + /* port is enabled */ + return E_OK; + } + + if (p_FmPort->imEn) + FmPortImEnable(p_FmPort); + + /* Enable QMI */ + if (!rxPort && !p_FmPort->imEn) + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc, + GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc) | QMI_PORT_CFG_EN); + + /* Enable BMI */ + WRITE_UINT32(*p_BmiCfgReg, GET_UINT32(*p_BmiCfgReg) | BMI_PORT_CFG_EN); + + p_FmPort->enabled = 1; + + return E_OK; +} + +t_Error FM_PORT_SetRateLimit(t_Handle h_FmPort, t_FmPortRateLimit *p_RateLimit) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint32_t tmpRateLimit, tmpRateLimitScale; + volatile uint32_t *p_RateLimitReg, *p_RateLimitScaleReg; + uint8_t factor, countUnitBit; + uint16_t baseGran; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + if((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX) || + (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx and Offline parsing ports only")); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + p_RateLimitReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_trlmt; + p_RateLimitScaleReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_trlmts; + baseGran = 16000; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_RateLimitReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_orlmt; + p_RateLimitScaleReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_orlmts; + baseGran = 10000; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + countUnitBit = (uint8_t)FmGetTimeStampScale(p_FmPort->h_Fm); /* TimeStamp per nano seconds units */ + /* normally, we use 1 usec as the reference count */ + factor = 1; + /* if ratelimit is too small for a 1usec factor, multiply the factor */ + while (p_RateLimit->rateLimit < baseGran/factor) + { + if (countUnitBit==31) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Rate limit is too small")); + + countUnitBit++; + factor <<= 1; + } + /* if ratelimit is too large for a 1usec factor, it is also larger than max rate*/ + if (p_RateLimit->rateLimit > ((uint32_t)baseGran * (1<<10) * (uint32_t)factor)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Rate limit is too large")); + + tmpRateLimit = (uint32_t)(p_RateLimit->rateLimit*factor/baseGran - 1); + + if(!p_RateLimit->maxBurstSize || (p_RateLimit->maxBurstSize > MAX_BURST_SIZE)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("maxBurstSize must be between 1K and %dk", MAX_BURST_SIZE)); + + tmpRateLimitScale = ((31 - (uint32_t)countUnitBit) << BMI_COUNT_RATE_UNIT_SHIFT) | BMI_RATE_LIMIT_EN; + + if(p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) + tmpRateLimit |= (uint32_t)(p_RateLimit->maxBurstSize - 1) << BMI_MAX_BURST_SHIFT; + else + { +#ifndef FM_NO_ADVANCED_RATE_LIMITER + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if (revInfo.majorRev == 4) + { + switch(p_RateLimit->rateLimitDivider) + { + case(e_FM_PORT_DUAL_RATE_LIMITER_NONE): + break; + case(e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_2): + tmpRateLimitScale |= BMI_RATE_LIMIT_SCALE_BY_2; + break; + case(e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_4): + tmpRateLimitScale |= BMI_RATE_LIMIT_SCALE_BY_4; + break; + case(e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8): + tmpRateLimitScale |= BMI_RATE_LIMIT_SCALE_BY_8; + break; + default: + break; + } + tmpRateLimit |= BMI_RATE_LIMIT_BURST_SIZE_GRAN; + } + else +#endif /* ! FM_NO_ADVANCED_RATE_LIMITER */ + { + if(p_RateLimit->rateLimitDivider != e_FM_PORT_DUAL_RATE_LIMITER_NONE) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("FM_PORT_ConfigDualRateLimitScaleDown")); + + if(p_RateLimit->maxBurstSize % 1000) + { + p_RateLimit->maxBurstSize = (uint16_t)((p_RateLimit->maxBurstSize/1000)+1); + DBG(WARNING, ("rateLimit.maxBurstSize rounded up to %d", (p_RateLimit->maxBurstSize/1000+1)*1000)); + } + else + p_RateLimit->maxBurstSize = (uint16_t)(p_RateLimit->maxBurstSize/1000); + } + tmpRateLimit |= (uint32_t)(p_RateLimit->maxBurstSize - 1) << BMI_MAX_BURST_SHIFT; + + } + WRITE_UINT32(*p_RateLimitScaleReg, tmpRateLimitScale); + WRITE_UINT32(*p_RateLimitReg, tmpRateLimit); + + return E_OK; +} + +t_Error FM_PORT_DeleteRateLimit(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_RateLimitReg, *p_RateLimitScaleReg; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + if((p_FmPort->portType == e_FM_PORT_TYPE_RX_10G) || (p_FmPort->portType == e_FM_PORT_TYPE_RX) || + (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Tx and Offline parsing ports only")); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + p_RateLimitReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_trlmt; + p_RateLimitScaleReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_trlmts; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_RateLimitReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_orlmt; + p_RateLimitScaleReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_orlmts; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + WRITE_UINT32(*p_RateLimitScaleReg, 0); + WRITE_UINT32(*p_RateLimitReg, 0); + + return E_OK; +} + + +t_Error FM_PORT_SetFrameQueueCounters(t_Handle h_FmPort, bool enable) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + tmpReg = GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc); + if(enable) + tmpReg |= QMI_PORT_CFG_EN_COUNTERS ; + else + tmpReg &= ~QMI_PORT_CFG_EN_COUNTERS; + + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc, tmpReg); + + return E_OK; +} + +t_Error FM_PORT_SetPerformanceCounters(t_Handle h_FmPort, bool enable) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiPcReg = NULL; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiPcReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rpc; + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + p_BmiPcReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tpc; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + p_BmiPcReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_opc; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + if(enable) + WRITE_UINT32(*p_BmiPcReg, BMI_COUNTERS_EN); + else + WRITE_UINT32(*p_BmiPcReg, 0); + + return E_OK; +} + +t_Error FM_PORT_SetPerformanceCountersParams(t_Handle h_FmPort, t_FmPortPerformanceCnt *p_FmPortPerformanceCnt) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint32_t tmpReg; + volatile uint32_t *p_BmiPcpReg = NULL; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiPcpReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rpcp; + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + p_BmiPcpReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tpcp; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + p_BmiPcpReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_opcp; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + /* check parameters */ + if (!p_FmPortPerformanceCnt->taskCompVal || + (p_FmPortPerformanceCnt->taskCompVal > p_FmPort->tasks.num)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("performanceCnt.taskCompVal has to be in the range of 1 - %d (current value)!", + p_FmPort->tasks.num)); + if (!p_FmPortPerformanceCnt->dmaCompVal || + (p_FmPortPerformanceCnt->dmaCompVal > p_FmPort->openDmas.num)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("performanceCnt.dmaCompVal has to be in the range of 1 - %d (current value)!", + p_FmPort->openDmas.num)); + if (!p_FmPortPerformanceCnt->fifoCompVal || + (p_FmPortPerformanceCnt->fifoCompVal > p_FmPort->fifoBufs.num)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("performanceCnt.fifoCompVal has to be in the range of 256 - %d (current value)!", + p_FmPort->fifoBufs.num)); + if (p_FmPortPerformanceCnt->fifoCompVal % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("performanceCnt.fifoCompVal has to be divisible by %d", + BMI_FIFO_UNITS)); + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + if (!p_FmPortPerformanceCnt->queueCompVal || + (p_FmPortPerformanceCnt->queueCompVal > MAX_PERFORMANCE_RX_QUEUE_COMP)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("performanceCnt.queueCompVal for Rx has to be in the range of 1 - %d", + MAX_PERFORMANCE_RX_QUEUE_COMP)); + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + if (!p_FmPortPerformanceCnt->queueCompVal || + (p_FmPortPerformanceCnt->queueCompVal > MAX_PERFORMANCE_TX_QUEUE_COMP)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("performanceCnt.queueCompVal for Tx has to be in the range of 1 - %d", + MAX_PERFORMANCE_TX_QUEUE_COMP)); + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + if (p_FmPortPerformanceCnt->queueCompVal) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("performanceCnt.queueCompVal is not relevant for H/O ports.")); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + tmpReg = 0; + tmpReg |= ((uint32_t)(p_FmPortPerformanceCnt->queueCompVal - 1) << BMI_PERFORMANCE_PORT_COMP_SHIFT); + tmpReg |= ((uint32_t)(p_FmPortPerformanceCnt->dmaCompVal- 1) << BMI_PERFORMANCE_DMA_COMP_SHIFT); + tmpReg |= ((uint32_t)(p_FmPortPerformanceCnt->fifoCompVal/BMI_FIFO_UNITS - 1) << BMI_PERFORMANCE_FIFO_COMP_SHIFT); + if ((p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING) && (p_FmPort->portType != e_FM_PORT_TYPE_OH_HOST_COMMAND)) + tmpReg |= ((uint32_t)(p_FmPortPerformanceCnt->taskCompVal - 1) << BMI_PERFORMANCE_TASK_COMP_SHIFT); + + WRITE_UINT32(*p_BmiPcpReg, tmpReg); + + return E_OK; +} + +t_Error FM_PORT_AnalyzePerformanceParams(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_FmPortPerformanceCnt currParams, savedParams; + t_Error err; + bool underTest, failed = FALSE; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + + XX_Print("Analyzing Performance parameters for port (type %d, id%d)\n", + p_FmPort->portType, p_FmPort->portId); + + currParams.taskCompVal = (uint8_t)p_FmPort->tasks.num; + if ((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || + (p_FmPort->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND)) + currParams.queueCompVal = 0; + else + currParams.queueCompVal = 1; + currParams.dmaCompVal =(uint8_t) p_FmPort->openDmas.num; + currParams.fifoCompVal = p_FmPort->fifoBufs.num; + + FM_PORT_SetPerformanceCounters(p_FmPort, FALSE); + ClearPerfCnts(p_FmPort); + if ((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &currParams)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + FM_PORT_SetPerformanceCounters(p_FmPort, TRUE); + XX_UDelay(1000000); + FM_PORT_SetPerformanceCounters(p_FmPort, FALSE); + if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL)) + { + XX_Print ("Max num of defined port tasks (%d) utilized - Please enlarge\n",p_FmPort->tasks.num); + failed = TRUE; + } + if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL)) + { + XX_Print ("Max num of defined port openDmas (%d) utilized - Please enlarge\n",p_FmPort->openDmas.num); + failed = TRUE; + } + if (FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL)) + { + XX_Print ("Max size of defined port fifo (%d) utilized - Please enlarge\n",p_FmPort->fifoBufs.num*BMI_FIFO_UNITS); + failed = TRUE; + } + if (failed) + RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + + memset(&savedParams, 0, sizeof(savedParams)); + while (TRUE) + { + underTest = FALSE; + if ((currParams.taskCompVal != 1) && !savedParams.taskCompVal) + { + currParams.taskCompVal--; + underTest = TRUE; + } + if ((currParams.dmaCompVal != 1) && !savedParams.dmaCompVal) + { + currParams.dmaCompVal--; + underTest = TRUE; + } + if ((currParams.fifoCompVal != BMI_FIFO_UNITS) && !savedParams.fifoCompVal) + { + currParams.fifoCompVal -= BMI_FIFO_UNITS; + underTest = TRUE; + } + if (!underTest) + break; + + ClearPerfCnts(p_FmPort); + if ((err = FM_PORT_SetPerformanceCountersParams(p_FmPort, &currParams)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + FM_PORT_SetPerformanceCounters(p_FmPort, TRUE); + XX_UDelay(1000000); + FM_PORT_SetPerformanceCounters(p_FmPort, FALSE); + + if (!savedParams.taskCompVal && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_TASK_UTIL)) + savedParams.taskCompVal = (uint8_t)(currParams.taskCompVal+2); + if (!savedParams.dmaCompVal && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_DMA_UTIL)) + savedParams.dmaCompVal = (uint8_t)(currParams.dmaCompVal+2); + if (!savedParams.fifoCompVal && FM_PORT_GetCounter(p_FmPort, e_FM_PORT_COUNTERS_FIFO_UTIL)) + savedParams.fifoCompVal = currParams.fifoCompVal+2; + } + + XX_Print("best vals: tasks %d, dmas %d, fifos %d\n", + savedParams.taskCompVal, savedParams.dmaCompVal, savedParams.fifoCompVal); + return E_OK; +} + +t_Error FM_PORT_SetStatisticsCounters(t_Handle h_FmPort, bool enable) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint32_t tmpReg; + volatile uint32_t *p_BmiStcReg = NULL; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiStcReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rstc; + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + p_BmiStcReg = &p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tstc; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + p_BmiStcReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ostc; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + tmpReg = GET_UINT32(*p_BmiStcReg); + + if(enable) + tmpReg |= BMI_COUNTERS_EN; + else + tmpReg &= ~BMI_COUNTERS_EN; + + WRITE_UINT32(*p_BmiStcReg, tmpReg); + + return E_OK; +} + +t_Error FM_PORT_SetErrorsRoute(t_Handle h_FmPort, fmPortFrameErrSelect_t errs) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_ErrQReg, *p_ErrDiscard; + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_ErrQReg = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsem; + p_ErrDiscard = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfsdm; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_ErrQReg = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsem; + p_ErrDiscard = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofsdm; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + } + + if (GET_UINT32(*p_ErrDiscard) & errs) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Selectd Errors that were configured to cause frame discard.")); + + WRITE_UINT32(*p_ErrQReg, errs); + + return E_OK; +} + +t_Error FM_PORT_SetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, bool enable) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint32_t tmpReg; + int i; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(poolIdp_FmPortDriverParam, E_INVALID_STATE); + + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); + + for(i=0 ; i< FM_PORT_MAX_NUM_OF_EXT_POOLS ; i++) + { + tmpReg = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_ebmpi[i]); + if ((uint8_t)((tmpReg & BMI_EXT_BUF_POOL_ID_MASK) >> BMI_EXT_BUF_POOL_ID_SHIFT) == poolId) + { + if(enable) + tmpReg |= BMI_EXT_BUF_POOL_EN_COUNTER; + else + tmpReg &= ~BMI_EXT_BUF_POOL_EN_COUNTER; + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_ebmpi[i], tmpReg); + break; + } + } + if (i == FM_PORT_MAX_NUM_OF_EXT_POOLS) + RETURN_ERROR(MINOR, E_INVALID_VALUE,("poolId %d is not included in this ports pools", poolId)); + + return E_OK; +} + +uint32_t FM_PORT_GetCounter(t_Handle h_FmPort, e_FmPortCounters counter) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + bool bmiCounter = FALSE; + volatile uint32_t *p_Reg; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + switch(counter) + { + case(e_FM_PORT_COUNTERS_DEQ_TOTAL): + case(e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT): + case(e_FM_PORT_COUNTERS_DEQ_CONFIRM ): + /* check that counter is available for the port type */ + if((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for Rx ports")); + return 0; + } + bmiCounter = FALSE; + case(e_FM_PORT_COUNTERS_ENQ_TOTAL): + bmiCounter = FALSE; + break; + default: /* BMI counters (or error - will be checked in BMI routine )*/ + bmiCounter = TRUE; + break; + } + + if(bmiCounter) + { + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + if(BmiRxPortCheckAndGetCounterPtr(p_FmPort, counter, &p_Reg)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return 0; + } + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + if(BmiTxPortCheckAndGetCounterPtr(p_FmPort, counter, &p_Reg)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return 0; + } + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + if(BmiOhPortCheckAndGetCounterPtr(p_FmPort, counter, &p_Reg)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return 0; + } + break; + default: + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Unsupported port type")); + return 0; + } + return GET_UINT32(*p_Reg); + } + else /* QMI counter */ + { + + /* check that counters are enabled */ + if(!(GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc) & QMI_PORT_CFG_EN_COUNTERS)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + return 0; + } + + /* Set counter */ + switch(counter) + { + case(e_FM_PORT_COUNTERS_ENQ_TOTAL): + return GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnetfc); + case(e_FM_PORT_COUNTERS_DEQ_TOTAL): + return GET_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndtfc); + case(e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT): + return GET_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndfdc); + case(e_FM_PORT_COUNTERS_DEQ_CONFIRM): + return GET_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndcc); + default: + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available")); + return 0; + } + } + + return 0; +} + +t_Error FM_PORT_ModifyCounter(t_Handle h_FmPort, e_FmPortCounters counter, uint32_t value) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + bool bmiCounter = FALSE; + volatile uint32_t *p_Reg; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + switch(counter) + { + case(e_FM_PORT_COUNTERS_DEQ_TOTAL): + case(e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT): + case(e_FM_PORT_COUNTERS_DEQ_CONFIRM ): + /* check that counter is available for the port type */ + if((p_FmPort->portType == e_FM_PORT_TYPE_RX) || (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for Rx ports")); + case(e_FM_PORT_COUNTERS_ENQ_TOTAL): + bmiCounter = FALSE; + break; + default: /* BMI counters (or error - will be checked in BMI routine )*/ + bmiCounter = TRUE; + break; + } + + if(bmiCounter) + { + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + if(BmiRxPortCheckAndGetCounterPtr(p_FmPort, counter, &p_Reg)) + RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + break; + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + if(BmiTxPortCheckAndGetCounterPtr(p_FmPort, counter, &p_Reg)) + RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + if(BmiOhPortCheckAndGetCounterPtr(p_FmPort, counter, &p_Reg)) + RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Unsupported port type")); + } + WRITE_UINT32(*p_Reg, value); + } + else /* QMI counter */ + { + + /* check that counters are enabled */ + if(!(GET_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnc) & QMI_PORT_CFG_EN_COUNTERS)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + + /* Set counter */ + switch(counter) + { + case(e_FM_PORT_COUNTERS_ENQ_TOTAL): + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnetfc, value); + break; + case(e_FM_PORT_COUNTERS_DEQ_TOTAL): + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndtfc, value); + break; + case(e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT): + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndfdc, value); + break; + case(e_FM_PORT_COUNTERS_DEQ_CONFIRM): + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndcc, value); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available")); + } + } + + return E_OK; +} + +uint32_t FM_PORT_GetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint32_t extPoolReg; + uint8_t tmpPool; + uint8_t i; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if((p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for non-Rx ports")); + return 0; + } + + for(i=0;ip_FmPortBmiRegs->rxPortBmiRegs.fmbm_ebmpi[i]); + if (extPoolReg & BMI_EXT_BUF_POOL_VALID) + { + tmpPool = (uint8_t)((extPoolReg & BMI_EXT_BUF_POOL_ID_MASK) >> BMI_EXT_BUF_POOL_ID_SHIFT); + if(tmpPool == poolId) + { + if(extPoolReg & BMI_EXT_BUF_POOL_EN_COUNTER) + return GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_acnt[i]); + else + { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not enabled")); + return 0; + } + } + } + } + REPORT_ERROR(MINOR, E_INVALID_STATE, ("Pool %d is not used", poolId)); + return 0; +} + +t_Error FM_PORT_ModifyAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, uint32_t value) +{ + t_FmPort *p_FmPort = (t_FmPort *)h_FmPort; + uint32_t extPoolReg; + uint8_t tmpPool; + uint8_t i; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if((p_FmPort->portType != e_FM_PORT_TYPE_RX) && (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not available for non-Rx ports")); + + + for(i=0;ip_FmPortBmiRegs->rxPortBmiRegs.fmbm_ebmpi[i]); + if (extPoolReg & BMI_EXT_BUF_POOL_VALID) + { + tmpPool = (uint8_t)((extPoolReg & BMI_EXT_BUF_POOL_ID_MASK) >> BMI_EXT_BUF_POOL_ID_SHIFT); + if(tmpPool == poolId) + { + if(extPoolReg & BMI_EXT_BUF_POOL_EN_COUNTER) + { + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_acnt[i], value); + return E_OK; + } + else + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter is not enabled")); + } + } + } + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Pool %d is not used", poolId)); +} + +bool FM_PORT_IsStalled(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err; + bool isStalled; + + SANITY_CHECK_RETURN_VALUE(p_FmPort, E_INVALID_HANDLE, FALSE); + SANITY_CHECK_RETURN_VALUE(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE, FALSE); + + err = FmIsPortStalled(p_FmPort->h_Fm, p_FmPort->hardwarePortId, &isStalled); + if(err != E_OK) + { + REPORT_ERROR(MINOR, err, NO_MSG); + return TRUE; + } + return isStalled; +} + +t_Error FM_PORT_ReleaseStalled(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + return FmResumeStalledPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId); +} + +t_Error FM_PORT_SetRxL4ChecksumVerify(t_Handle h_FmPort, bool l4Checksum) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if ((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx ports only")); + + tmpReg = GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne); + if (l4Checksum) + tmpReg &= ~BMI_PORT_RFNE_FRWD_DCL4C; + else + tmpReg |= BMI_PORT_RFNE_FRWD_DCL4C; + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne, tmpReg); + + return E_OK; +} + + +/* API Run-time PCD Control unit functions */ + +t_Error FM_PORT_PcdPlcrAllocProfiles(t_Handle h_FmPort, uint16_t numOfProfiles) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err = E_OK; + + p_FmPort->h_FmPcd = FmGetPcdHandle(p_FmPort->h_Fm); + ASSERT_COND(p_FmPort->h_FmPcd); + + if(numOfProfiles) + { + err = FmPcdPlcrAllocProfiles(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, numOfProfiles); + if(err) + RETURN_ERROR(MAJOR, err,NO_MSG); + } + FmPcdPortRegister(p_FmPort->h_FmPcd, h_FmPort, p_FmPort->hardwarePortId); + + return E_OK; +} + +t_Error FM_PORT_PcdPlcrFreeProfiles(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err = E_OK; + + err = FmPcdPlcrFreeProfiles(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId); + if(err) + RETURN_ERROR(MAJOR, err,NO_MSG); + return E_OK; +} + +t_Error FM_PORT_PcdKgModifyInitialScheme (t_Handle h_FmPort, t_FmPcdKgSchemeSelect *p_FmPcdKgScheme) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiHpnia = NULL; + uint32_t tmpReg; + uint8_t relativeSchemeId; + uint8_t physicalSchemeId; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG , E_INVALID_STATE); + + tmpReg = (uint32_t)((p_FmPort->pcdEngines & FM_PCD_CC)? NIA_KG_CC_EN:0); + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiHpnia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiHpnia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofpne; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + } + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + /* if we want to change to direct scheme, we need to check that this scheme is valid */ + if(p_FmPcdKgScheme->direct) + { + physicalSchemeId = (uint8_t)(PTR_TO_UINT(p_FmPcdKgScheme->h_DirectScheme)-1); + /* check that this scheme is bound to this port */ + if(!(p_FmPort->schemesPerPortVector & (uint32_t)(1 << (31 - (uint32_t)physicalSchemeId)))) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("called with a scheme that is not bound to this port")); + } + + relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmPort->h_FmPcd, physicalSchemeId); + if(relativeSchemeId >= FM_PCD_KG_NUM_OF_SCHEMES) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("called with invalid Scheme ")); + } + + if(!FmPcdKgIsSchemeValidSw(p_FmPort->h_FmPcd, relativeSchemeId)) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("called with uninitialized Scheme ")); + } + + WRITE_UINT32(*p_BmiHpnia, NIA_ENG_KG | tmpReg | NIA_KG_DIRECT | (uint32_t)physicalSchemeId); + } + else /* change to indirect scheme */ + WRITE_UINT32(*p_BmiHpnia, NIA_ENG_KG | tmpReg); + RELEASE_LOCK(p_FmPort->lock); + + return E_OK; +} + +t_Error FM_PORT_PcdPlcrModifyInitialProfile (t_Handle h_FmPort, t_Handle h_Profile) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiNia; + volatile uint32_t *p_BmiHpnia; + uint32_t tmpReg; + uint16_t absoluteProfileId = (uint16_t)(PTR_TO_UINT(h_Profile)-1); + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_PLCR , E_INVALID_STATE); + + /* check relevancy of this routine - only when policer is used + directly after BMI or Parser */ + if((p_FmPort->pcdEngines & FM_PCD_KG) || (p_FmPort->pcdEngines & FM_PCD_CC)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("relevant only when PCD support mode is e_FM_PCD_SUPPORT_PLCR_ONLY or e_FM_PCD_SUPPORT_PRS_AND_PLCR")); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne; + p_BmiHpnia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfpne; + tmpReg = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofne; + p_BmiHpnia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofpne; + tmpReg = 0; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + } + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + if(!FmPcdPlcrIsProfileValid(p_FmPort->h_FmPcd, absoluteProfileId)) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Invalid profile")); + } + + tmpReg = (uint32_t)(NIA_ENG_PLCR | NIA_PLCR_ABSOLUTE | absoluteProfileId); + + if(p_FmPort->pcdEngines & FM_PCD_PRS) /* e_FM_PCD_SUPPORT_PRS_AND_PLCR */ + { + /* update BMI HPNIA */ + WRITE_UINT32(*p_BmiHpnia, tmpReg); + } + else /* e_FM_PCD_SUPPORT_PLCR_ONLY */ + { + /* rfne may contain FDCS bits, so first we read them. */ + tmpReg |= (GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK); + /* update BMI NIA */ + WRITE_UINT32(*p_BmiNia, tmpReg); + } + RELEASE_LOCK(p_FmPort->lock); + + return E_OK; +} + + +t_Error FM_PORT_PcdCcModifyTree (t_Handle h_FmPort, t_Handle h_CcTree) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err = E_OK; + volatile uint32_t *p_BmiCcBase=NULL; + volatile uint32_t *p_BmiNia=NULL; + uint32_t ccTreePhysOffset; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_VALUE); + + if (p_FmPort->imEn) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); + + /* get PCD registers pointers */ + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofne; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + } + + /* check that current NIA is BMI to BMI */ + if((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK) != (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("may be called only for ports in BMI-to-BMI state.")); + +/*TODO - to take care of changes due to previous tree. Maybe in the previous tree where chnged pndn, pnen ... + it has to be returned to the default state - initially*/ + + p_FmPort->requiredAction = 0; + + if(p_FmPort->pcdEngines & FM_PCD_CC) + { + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiCcBase = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rccb; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiCcBase = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_occb; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid port type")); + } + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + err = FmPcdCcBindTree(p_FmPort->h_FmPcd, h_CcTree, &ccTreePhysOffset, h_FmPort); + if(err) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MINOR, err, NO_MSG); + } + WRITE_UINT32(*p_BmiCcBase, ccTreePhysOffset); + + p_FmPort->ccTreeId = h_CcTree; + RELEASE_LOCK(p_FmPort->lock); + } + else + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Coarse CLassification not defined for this port.")); + + return E_OK; +} + +t_Error FM_PORT_AttachPCD(t_Handle h_FmPort) +{ + + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if (p_FmPort->imEn) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); + + /* TODO - may add here checks for: + SP (or in sw: schemes) + CPP (or in sw clsPlan) + Parser enabled and configured(?) + Tree(?) + Profile - only if direct. + Scheme - only if direct + */ + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + err = FmPortAttachPCD(h_FmPort); + RELEASE_LOCK(p_FmPort->lock); + + return err; +} + +t_Error FM_PORT_DetachPCD(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiNia=NULL; + + SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if (p_FmPort->imEn) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); + + /* get PCD registers pointers */ + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofne; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + } + + WRITE_UINT32(*p_BmiNia, (p_FmPort->savedBmiNia & BMI_RFNE_FDCS_MASK) | (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); + +/*TODO - not atomic - it seems that port has to be disabled*/ + if(p_FmPort->requiredAction & UPDATE_NIA_PNEN) + { + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen, NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE); + break; + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + case(e_FM_PORT_TYPE_RX): + case(e_FM_PORT_TYPE_RX_10G): + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->fmqm_pnen, NIA_ENG_BMI | NIA_BMI_AC_RELEASE); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Can not reach this stage")); + } + } + + if(p_FmPort->requiredAction & UPDATE_NIA_PNDN) + { + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_TX_10G): + case(e_FM_PORT_TYPE_TX): + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn, NIA_ENG_BMI | NIA_BMI_AC_TX); + break; + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + WRITE_UINT32(p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs.fmqm_pndn, NIA_ENG_BMI | NIA_BMI_AC_FETCH); + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Can not reach this stage")); + } + } + + + if(p_FmPort->requiredAction & UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY) + if(FmSetNumOfRiscsPerPort(p_FmPort->h_Fm, p_FmPort->hardwarePortId, 2)!= E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + return E_OK; +} + +t_Error FM_PORT_SetPCD(t_Handle h_FmPort, t_FmPortPcdParams *p_PcdParams) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_FmPcdKgInterModuleBindPortToSchemes schemeBind; + t_Error err = E_OK; + uint8_t i; + + SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if (p_FmPort->imEn) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independent mode ports only")); + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + p_FmPort->h_FmPcd = FmGetPcdHandle(p_FmPort->h_Fm); + ASSERT_COND(p_FmPort->h_FmPcd); + + err = SetPcd( h_FmPort, p_PcdParams); + if(err) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + if(p_FmPort->pcdEngines & FM_PCD_KG) + { + schemeBind.netEnvId = p_FmPort->netEnvId; + schemeBind.hardwarePortId = p_FmPort->hardwarePortId; + schemeBind.numOfSchemes = p_PcdParams->p_KgParams->numOfSchemes; + schemeBind.useClsPlan = p_FmPort->useClsPlan; + for(i = 0;ip_KgParams->h_Schemes[i])-1); + + err = FmPcdKgBindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind); + if(err) + { + DeletePcd(p_FmPort); + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + + if ((p_FmPort->pcdEngines & FM_PCD_PRS) && (p_PcdParams->p_PrsParams->includeInPrsStatistics)) + FmPcdPrsIncludePortInStatistics(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, TRUE); + + FmPcdIncNetEnvOwners(p_FmPort->h_FmPcd, p_FmPort->netEnvId); + + err = FmPortAttachPCD(h_FmPort); + RELEASE_LOCK(p_FmPort->lock); + + return err; +} + +t_Error FM_PORT_DeletePCD(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_FmPcdKgInterModuleBindPortToSchemes schemeBind; + t_Error err = E_OK; + + SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + + if (p_FmPort->imEn) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for non-independant mode ports only")); + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + + err = FM_PORT_DetachPCD(h_FmPort); + if(err) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + FmPcdDecNetEnvOwners(p_FmPort->h_FmPcd, p_FmPort->netEnvId); + + /* we do it anyway, instead of checking if included */ + if (FmIsMaster(p_FmPort->h_Fm) && + (p_FmPort->pcdEngines & FM_PCD_PRS)) + FmPcdPrsIncludePortInStatistics(p_FmPort->h_FmPcd, p_FmPort->hardwarePortId, FALSE); + + if(p_FmPort->pcdEngines & FM_PCD_KG) + { + /* unbind all schemes */ + p_FmPort->schemesPerPortVector = GetPortSchemeBindParams(p_FmPort, &schemeBind); + + err = FmPcdKgUnbindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind); + if(err) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + } + + err = DeletePcd(h_FmPort); + RELEASE_LOCK(p_FmPort->lock); + + return err; +} + +t_Error FM_PORT_PcdKgBindSchemes (t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_FmPcdKgInterModuleBindPortToSchemes schemeBind; + t_Error err = E_OK; + uint32_t tmpScmVec=0; + int i; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG , E_INVALID_STATE); + + schemeBind.netEnvId = p_FmPort->netEnvId; + schemeBind.hardwarePortId = p_FmPort->hardwarePortId; + schemeBind.numOfSchemes = p_PortScheme->numOfSchemes; + schemeBind.useClsPlan = p_FmPort->useClsPlan; + for (i=0; ih_Schemes[i])-1); + /* build vector */ + tmpScmVec |= 1 << (31 - (uint32_t)schemeBind.schemesIds[i]); + } + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + err = FmPcdKgBindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind); + if (err == E_OK) + p_FmPort->schemesPerPortVector |= tmpScmVec; + RELEASE_LOCK(p_FmPort->lock); + + return err; +} + +t_Error FM_PORT_PcdKgUnbindSchemes (t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_FmPcdKgInterModuleBindPortToSchemes schemeBind; + t_Error err = E_OK; + uint32_t tmpScmVec=0; + int i; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_KG , E_INVALID_STATE); + + schemeBind.netEnvId = p_FmPort->netEnvId; + schemeBind.hardwarePortId = p_FmPort->hardwarePortId; + schemeBind.numOfSchemes = p_PortScheme->numOfSchemes; + for (i=0; ih_Schemes[i])-1); + /* build vector */ + tmpScmVec |= 1 << (31 - (uint32_t)schemeBind.schemesIds[i]); + } + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + err = FmPcdKgUnbindPortToSchemes(p_FmPort->h_FmPcd, &schemeBind); + if (err == E_OK) + p_FmPort->schemesPerPortVector &= ~tmpScmVec; + RELEASE_LOCK(p_FmPort->lock); + + return err; +} + +t_Error FM_PORT_PcdPrsModifyStartOffset (t_Handle h_FmPort, t_FmPcdPrsStart *p_FmPcdPrsStart) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + volatile uint32_t *p_BmiPrsStartOffset = NULL; + volatile uint32_t *p_BmiNia = NULL; + uint32_t tmpReg; + uint8_t hdrNum; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->pcdEngines & FM_PCD_PRS , E_INVALID_STATE); + + switch(p_FmPort->portType) + { + case(e_FM_PORT_TYPE_RX_10G): + case(e_FM_PORT_TYPE_RX): + p_BmiPrsStartOffset = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rpso; + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfne; + tmpReg = GET_UINT32(*p_BmiNia) & BMI_RFNE_FDCS_MASK; + break; + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + p_BmiPrsStartOffset = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_opso; + p_BmiNia = &p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ofne; + tmpReg = 0; + break; + default: + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("available for Rx and offline parsing ports only")); + } + + /* check that current NIA is BMI to BMI */ + if((GET_UINT32(*p_BmiNia) & ~BMI_RFNE_FDCS_MASK) != (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("may be called only for ports in BMI-to-BMI state.")); + + if (!TRY_LOCK(p_FmPort->h_Spinlock, &p_FmPort->lock)) + return ERROR_CODE(E_BUSY); + /* set the first header */ + GET_PRS_HDR_NUM(hdrNum, p_FmPcdPrsStart->firstPrsHdr); + if ((hdrNum == ILLEGAL_HDR_NUM) || (hdrNum == NO_HDR_NUM)) + { + RELEASE_LOCK(p_FmPort->lock); + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unsupported header.")); + } + WRITE_UINT32(*p_BmiNia, (uint32_t)(NIA_ENG_PRS | (uint32_t)hdrNum | tmpReg)); + + /* set start parsing offset */ + WRITE_UINT32(*p_BmiPrsStartOffset, (uint32_t)(p_FmPcdPrsStart->parsingOffset + p_FmPort->internalBufferOffset)); + RELEASE_LOCK(p_FmPort->lock); + + return E_OK; +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_PORT_DumpRegs(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err = E_OK; + char arr[30]; + uint8_t flag; + int i=0; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(h_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortQmiRegs, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortBmiRegs, E_INVALID_HANDLE); + + switch (p_FmPort->portType) + { + case (e_FM_PORT_TYPE_OH_OFFLINE_PARSING): + strcpy(arr, "PORT_TYPE_OFFLINE_PARSING"); + flag = 0; + break; + case (e_FM_PORT_TYPE_OH_HOST_COMMAND): + strcpy(arr, "PORT_TYPE_HOST_COMMAND"); + flag = 0; + break; + case (e_FM_PORT_TYPE_RX): + strcpy(arr, "PORT_TYPE_RX"); + flag = 1; + break; + case (e_FM_PORT_TYPE_RX_10G): + strcpy(arr, "PORT_TYPE_RX_10G"); + flag = 1; + break; + case (e_FM_PORT_TYPE_TX): + strcpy(arr, "PORT_TYPE_TX"); + flag = 2; + break; + case (e_FM_PORT_TYPE_TX_10G): + strcpy(arr, "PORT_TYPE_TX_10G"); + flag = 2; + break; + default: + return ERROR_CODE(E_INVALID_VALUE); + } + + DUMP_TITLE(UINT_TO_PTR(p_FmPort->hardwarePortId), ("PortId for %s %d", arr, p_FmPort->portId )); + DUMP_TITLE(p_FmPort->p_FmPortBmiRegs, ("Bmi Port Regs")); + + err = FmDumpPortRegs(p_FmPort->h_Fm, p_FmPort->hardwarePortId); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + switch(flag) + { + case(0): + + DUMP_SUBTITLE(("\n")); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ocfg); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ost); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_oda); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofdne); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofne); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofca); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofpne); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_opso); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_opp); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_occb); + + DUMP_TITLE(&(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_oprai), ("fmbm_oprai")); + DUMP_SUBSTRUCT_ARRAY(i, FM_PORT_PRS_RESULT_NUM_OF_WORDS) + { + DUMP_MEMORY(&(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_oprai[i]), sizeof(uint32_t)); + } + DUMP_SUBTITLE(("\n")); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofqid ); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_oefqid); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofsdm ); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofsem ); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofene ); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_orlmts); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_orlmt); + + { +#ifndef FM_NO_OP_OBSERVED_POOLS + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if (revInfo.majorRev == 4) +#endif /* !FM_NO_OP_OBSERVED_POOLS */ + { + DUMP_TITLE(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_oebmpi, ("fmbm_oebmpi")); + + DUMP_SUBSTRUCT_ARRAY(i, FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS) + { + DUMP_MEMORY(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_oebmpi[i], sizeof(uint32_t)); + } + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ocgm); + } + } + + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ostc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofrc ); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofdc ); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofledc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofufdc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_offc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofwdc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofldec); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_opc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_opcp); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_occn); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_otuc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_oduc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs,fmbm_ofuc); + break; + case(1): + DUMP_SUBTITLE(("\n")); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rcfg); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rst); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rda); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfp); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_reth); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfed); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_ricp); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rebm); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfne); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfca); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfpne); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rpso); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rpp); + + DUMP_TITLE(&(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rprai), ("fmbm_rprai")); + DUMP_SUBSTRUCT_ARRAY(i, FM_PORT_PRS_RESULT_NUM_OF_WORDS) + { + DUMP_MEMORY(&(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rprai[i]), sizeof(uint32_t)); + } + DUMP_SUBTITLE(("\n")); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfqid); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_refqid); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfsdm); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfsem); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfene); + DUMP_TITLE(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_ebmpi, ("fmbm_ebmpi")); + DUMP_SUBSTRUCT_ARRAY(i, FM_PORT_MAX_NUM_OF_EXT_POOLS) + { + DUMP_MEMORY(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_ebmpi[i], sizeof(uint32_t)); + } + DUMP_TITLE(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_acnt, ("fmbm_acnt")); + DUMP_SUBSTRUCT_ARRAY(i, FM_PORT_MAX_NUM_OF_EXT_POOLS) + { + DUMP_MEMORY(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_acnt[i], sizeof(uint32_t)); + } + DUMP_TITLE(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_cgm, ("fmbm_cgm")); + DUMP_SUBSTRUCT_ARRAY(i, FM_PORT_NUM_OF_CONGESTION_GRPS/32) + { + DUMP_MEMORY(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_cgm[i], sizeof(uint32_t)); + } + DUMP_SUBTITLE(("\n")); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_mpd); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rstc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfrc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfbc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rlfc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rffc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfcd); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfldec); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rodc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rpc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rpcp); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rccn); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rtuc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rrquc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rduc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rfuc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rpac); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs,fmbm_rdbg); + break; + case(2): + + DUMP_SUBTITLE(("\n")); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tcfg); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tst); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tda); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfp); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfed); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_ticp); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfne); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfca); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tcfqid); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfeqid); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfene); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_trlmts); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_trlmt); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tstc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfrc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfdc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfledc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfufdc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tpc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tpcp); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tccn); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_ttuc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_ttcquc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tduc); + DUMP_VAR(&p_FmPort->p_FmPortBmiRegs->txPortBmiRegs,fmbm_tfuc); + break; + + default: + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid flag")); + } + + DUMP_TITLE(p_FmPort->p_FmPortQmiRegs, ("Qmi Port Regs")); + + DUMP_VAR(p_FmPort->p_FmPortQmiRegs,fmqm_pnc); + DUMP_VAR(p_FmPort->p_FmPortQmiRegs,fmqm_pns); + DUMP_VAR(p_FmPort->p_FmPortQmiRegs,fmqm_pnts); + DUMP_VAR(p_FmPort->p_FmPortQmiRegs,fmqm_pnen); + DUMP_VAR(p_FmPort->p_FmPortQmiRegs,fmqm_pnetfc); + + if(flag !=1) + { + DUMP_VAR(&p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs,fmqm_pndn); + DUMP_VAR(&p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs,fmqm_pndc); + DUMP_VAR(&p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs,fmqm_pndtfc); + DUMP_VAR(&p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs,fmqm_pndfdc); + DUMP_VAR(&p_FmPort->p_FmPortQmiRegs->nonRxQmiRegs,fmqm_pndcc); + } + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ + +t_Error FM_PORT_AddCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + bool tmpArray[FM_PORT_NUM_OF_CONGESTION_GRPS], opPort; + int i; + uint8_t mod; + uint32_t tmpReg = 0; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + + { +#ifdef FM_NO_OP_OBSERVED_CGS + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if (revInfo.majorRev != 4) + { + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx ports only")); + } + else +#endif /* FM_NO_OP_OBSERVED_CGS */ + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX) && + (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx & OP ports only")); + } + + opPort = (bool)((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) ? TRUE:FALSE); + + /* to minimize memory access (groups may belong to the same regsiter, and may + be out of order), we first collect all information into a 256 booleans array, + representing each possible group. */ + + memset(&tmpArray, 0, FM_PORT_NUM_OF_CONGESTION_GRPS*sizeof(bool)); + for(i=0;inumOfCongestionGrpsToConsider;i++) + tmpArray[p_CongestionGrps->congestionGrpsToConsider[i]] = TRUE; + + for(i=0;ip_FmPortBmiRegs->ohPortBmiRegs.fmbm_ocgm): + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_cgm[7-i/32]); + + /* set in the register, the bit representing the relevant congestion group. */ + if(tmpArray[i]) + tmpReg |= (0x00000001 << (uint32_t)mod); + + if (mod == 31) /* last in a 32 bunch of congestion groups - write the corresponding register */ + { + if(opPort) + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ocgm, tmpReg); + else + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_cgm[7-i/32], tmpReg); + } + } + + return E_OK; +} + +t_Error FM_PORT_RemoveCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + bool tmpArray[FM_PORT_NUM_OF_CONGESTION_GRPS], opPort; + int i; + uint8_t mod; + uint32_t tmpReg = 0; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + + { +#ifdef FM_NO_OP_OBSERVED_CGS + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if (revInfo.majorRev != 4) + { + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx ports only")); + } + else +#endif /* FM_NO_OP_OBSERVED_CGS */ + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX) && + (p_FmPort->portType != e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Available for Rx & OP ports only")); + } + + opPort = (bool)((p_FmPort->portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) ? TRUE:FALSE); + + /* to minimize memory access (groups may belong to the same regsiter, and may + be out of order), we first collect all information into a 256 booleans array, + representing each possible group. */ + memset(&tmpArray, 0, FM_PORT_NUM_OF_CONGESTION_GRPS*sizeof(bool)); + for(i=0;inumOfCongestionGrpsToConsider;i++) + tmpArray[p_CongestionGrps->congestionGrpsToConsider[i]] = TRUE; + + for(i=0;ip_FmPortBmiRegs->ohPortBmiRegs.fmbm_ocgm): + GET_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_cgm[7-i/32]); + + /* set in the register, the bit representing the relevant congestion group. */ + if(tmpArray[i]) + tmpReg &= ~(0x00000001 << (uint32_t)mod); + + if (mod == 31) /* last in a 32 bunch of congestion groups - write the corresponding register */ + { + if(opPort) + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->ohPortBmiRegs.fmbm_ocgm, tmpReg); + else + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_cgm[7-i/32], tmpReg); + } + } + + return E_OK; +} + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Port/fm_port.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Port/fm_port.h @@ -0,0 +1,894 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_port.h + + @Description FM Port internal structures and definitions. +*//***************************************************************************/ +#ifndef __FM_PORT_H +#define __FM_PORT_H + +#include "error_ext.h" +#include "std_ext.h" +#include "fm_port_ext.h" + +#include "fm_common.h" + + +#define __ERR_MODULE__ MODULE_FM_PORT + + +#define MIN_EXT_BUF_SIZE 64 +#define DATA_ALIGNMENT 64 +#define MAX_LIODN_OFFSET 64 + +/**************************************************************************//** + @Description Memory Map defines +*//***************************************************************************/ +#define BMI_PORT_REGS_OFFSET 0 +#define QMI_PORT_REGS_OFFSET 0x400 +#define PRS_PORT_REGS_OFFSET 0x800 + +/**************************************************************************//** + @Description defaults +*//***************************************************************************/ +#define DEFAULT_PORT_deqHighPriority TRUE +#define DEFAULT_PORT_deqType e_FM_PORT_DEQ_TYPE1 +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT +#define DEFAULT_PORT_deqPrefetchOption e_FM_PORT_DEQ_FULL_PREFETCH +#define DEFAULT_PORT_deqPrefetchOption_HC e_FM_PORT_DEQ_NO_PREFETCH +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ +#define DEFAULT_PORT_deqByteCnt 2000 +#define DEFAULT_PORT_bufferPrefixContent_privDataSize 0 +#define DEFAULT_PORT_bufferPrefixContent_passPrsResult FALSE +#define DEFAULT_PORT_bufferPrefixContent_passTimeStamp FALSE +#define DEFAULT_PORT_bufferPrefixContent_allOtherPCDInfo FALSE +#ifdef DEBUG +#define DEFAULT_PORT_bufferPrefixContent_debugInfo FALSE +#endif /* DEBUG */ +#define DEFAULT_PORT_bufferPrefixContent_dataAlign DATA_ALIGNMENT +#define DEFAULT_PORT_cheksumLastBytesIgnore 0 +#define DEFAULT_PORT_cutBytesFromEnd 4 +#define DEFAULT_PORT_txFifoMinFillLevel 0 +#define DEFAULT_PORT_txFifoDeqPipelineDepth_IM 2 +#define DEFAULT_PORT_txFifoDeqPipelineDepth_1G 2 +#define DEFAULT_PORT_txFifoDeqPipelineDepth_10G 8 +#define DEFAULT_PORT_txFifoLowComfLevel (5*KILOBYTE) +#define DEFAULT_PORT_rxFifoPriElevationLevel BMI_MAX_FIFO_SIZE +#define DEFAULT_PORT_rxFifoThreshold (BMI_MAX_FIFO_SIZE*3/4) +#define DEFAULT_PORT_frmDiscardOverride FALSE +#define DEFAULT_PORT_dmaSwapData e_FM_PORT_DMA_NO_SWP +#define DEFAULT_PORT_dmaIntContextCacheAttr e_FM_PORT_DMA_NO_STASH +#define DEFAULT_PORT_dmaHeaderCacheAttr e_FM_PORT_DMA_NO_STASH +#define DEFAULT_PORT_dmaScatterGatherCacheAttr e_FM_PORT_DMA_NO_STASH +#define DEFAULT_PORT_dmaWriteOptimize TRUE +#define DEFAULT_PORT_forwardIntContextReuse FALSE +#define DEFAULT_PORT_BufMargins_startMargins 32 +#define DEFAULT_PORT_BufMargins_endMargins 0 +#define DEFAULT_PORT_syncReq TRUE +#define DEFAULT_PORT_syncReqForHc FALSE +#define DEFAULT_PORT_color e_FM_PORT_COLOR_GREEN +#define DEFAULT_PORT_errorsToDiscard FM_PORT_FRM_ERR_CLS_DISCARD +#define DEFAULT_dualRateLimitScaleDown e_FM_PORT_DUAL_RATE_LIMITER_NONE +#define DEFAULT_rateLimitBurstSizeHighGranularity FALSE +#define DEFAULT_exception IM_EV_BSY + +/* Host command port MUST NOT be changed to more than 1 !!! */ +#define DEFAULT_PORT_numOfTasks(type) \ + (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \ + ((type) == e_FM_PORT_TYPE_TX_10G)) ? 16 : \ + ((((type) == e_FM_PORT_TYPE_RX) || \ + ((type) == e_FM_PORT_TYPE_TX) || \ + ((type) == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) ? 3 : 1)) + +#define DEFAULT_PORT_extraNumOfTasks(type) \ + (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \ + ((type) == e_FM_PORT_TYPE_TX_10G)) ? 8 : \ + ((((type) == e_FM_PORT_TYPE_RX) || \ + ((type) == e_FM_PORT_TYPE_TX) || \ + ((type) == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) ? 2 : 0)) + +#define DEFAULT_PORT_numOfOpenDmas(type) \ + (uint32_t)(((type) == e_FM_PORT_TYPE_TX_10G) ? 8 : \ + (((type) == e_FM_PORT_TYPE_RX_10G) ? 4 : 1)) + +#define DEFAULT_PORT_extraNumOfOpenDmas(type) \ + (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \ + ((type) == e_FM_PORT_TYPE_TX_10G)) ? 8 : \ + ((((type) == e_FM_PORT_TYPE_RX) || \ + ((type) == e_FM_PORT_TYPE_TX) || \ + ((type) == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) ? 1 : 0)) + +#if defined(CONFIG_FMAN_RESOURCE_ALLOCATION_ALGORITHM) +/* Let LLD to set minimum fifosize, otherwise fifosize settings will not work */ +#define DEFAULT_PORT_sizeOfFifo(type) \ + (uint32_t)(KILOBYTE) +#else +#define DEFAULT_PORT_sizeOfFifo(type) \ + (uint32_t)((((type) == e_FM_PORT_TYPE_RX_10G) || \ + ((type) == e_FM_PORT_TYPE_TX_10G)) ? (16*KILOBYTE) : \ + ((((type) == e_FM_PORT_TYPE_RX) || \ + ((type) == e_FM_PORT_TYPE_TX) || \ + ((type) == e_FM_PORT_TYPE_OH_OFFLINE_PARSING)) ? (4*KILOBYTE) : (1536))) +#endif + +#define DEFAULT_PORT_extraSizeOfFifo(type) \ + (uint32_t)(((type) == e_FM_PORT_TYPE_RX_10G) ? (8*KILOBYTE) : \ + (((type) == e_FM_PORT_TYPE_RX) ? (4*KILOBYTE) : (0))) + +#define DEFAULT_PORT_txBdRingLength 16 +#define DEFAULT_PORT_rxBdRingLength 128 +#define DEFAULT_PORT_ImfwExtStructsMemId 0 +#define DEFAULT_PORT_ImfwExtStructsMemAttr MEMORY_ATTR_CACHEABLE + +#define OH_PIPELINE_DEPTH 2 + +/**************************************************************************//** + @Description Memory Mapped Registers +*//***************************************************************************/ + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +#define FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS 8 +#define FM_PORT_NUM_OF_CONGESTION_GRPS_ALL_INTEGRATIONS 256 + +typedef _Packed struct +{ + volatile uint32_t fmbm_rcfg; /**< Rx Configuration */ + volatile uint32_t fmbm_rst; /**< Rx Status */ + volatile uint32_t fmbm_rda; /**< Rx DMA attributes*/ + volatile uint32_t fmbm_rfp; /**< Rx FIFO Parameters*/ + volatile uint32_t fmbm_rfed; /**< Rx Frame End Data*/ + volatile uint32_t fmbm_ricp; /**< Rx Internal Context Parameters*/ + volatile uint32_t fmbm_rim; /**< Rx Internal Buffer Margins*/ + volatile uint32_t fmbm_rebm; /**< Rx External Buffer Margins*/ + volatile uint32_t fmbm_rfne; /**< Rx Frame Next Engine*/ + volatile uint32_t fmbm_rfca; /**< Rx Frame Command Attributes.*/ + volatile uint32_t fmbm_rfpne; /**< Rx Frame Parser Next Engine*/ + volatile uint32_t fmbm_rpso; /**< Rx Parse Start Offset*/ + volatile uint32_t fmbm_rpp; /**< Rx Policer Profile */ + volatile uint32_t fmbm_rccb; /**< Rx Coarse Classification Base */ + volatile uint32_t fmbm_reth; /**< Rx Excessive Threshold */ + volatile uint32_t reserved1[1]; /**< (0x03C 0x03F) */ + volatile uint32_t fmbm_rprai[FM_PORT_PRS_RESULT_NUM_OF_WORDS]; + /**< Rx Parse Results Array Initialization*/ + volatile uint32_t fmbm_rfqid; /**< Rx Frame Queue ID*/ + volatile uint32_t fmbm_refqid; /**< Rx Error Frame Queue ID*/ + volatile uint32_t fmbm_rfsdm; /**< Rx Frame Status Discard Mask*/ + volatile uint32_t fmbm_rfsem; /**< Rx Frame Status Error Mask*/ + volatile uint32_t fmbm_rfene; /**< Rx Frame Enqueue Next Engine */ + volatile uint32_t reserved2[0x23];/**< (0x074 0x0FF) */ + volatile uint32_t fmbm_ebmpi[FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS]; + /**< Buffer Manager pool Information-*/ + volatile uint32_t fmbm_acnt[FM_PORT_MAX_NUM_OF_EXT_POOLS_ALL_INTEGRATIONS]; + /**< Allocate Counter-*/ + volatile uint32_t reserved3[8]; + /**< 0x130/0x140 - 0x15F reserved -*/ + volatile uint32_t fmbm_cgm[FM_PORT_NUM_OF_CONGESTION_GRPS_ALL_INTEGRATIONS/32]; + /**< Congestion Group Map*/ + volatile uint32_t fmbm_mpd; /**< BM Pool Depletion */ + volatile uint32_t reserved4[0x1F];/**< (0x184 0x1FF) */ + volatile uint32_t fmbm_rstc; /**< Rx Statistics Counters*/ + volatile uint32_t fmbm_rfrc; /**< Rx Frame Counter*/ + volatile uint32_t fmbm_rfbc; /**< Rx Bad Frames Counter*/ + volatile uint32_t fmbm_rlfc; /**< Rx Large Frames Counter*/ + volatile uint32_t fmbm_rffc; /**< Rx Filter Frames Counter*/ + volatile uint32_t fmbm_rfcd; /**< Rx Frame Discard Counter*/ + volatile uint32_t fmbm_rfldec; /**< Rx Frames List DMA Error Counter*/ + volatile uint32_t fmbm_rodc; /**< Rx Out of Buffers Discard Counter-*/ + volatile uint32_t fmbm_rbdc; /**< Rx Buffers Deallocate Counter-*/ + volatile uint32_t reserved5[0x17];/**< (0x224 0x27F) */ + volatile uint32_t fmbm_rpc; /**< Rx Performance Counters*/ + volatile uint32_t fmbm_rpcp; /**< Rx Performance Count Parameters*/ + volatile uint32_t fmbm_rccn; /**< Rx Cycle Counter*/ + volatile uint32_t fmbm_rtuc; /**< Rx Tasks Utilization Counter*/ + volatile uint32_t fmbm_rrquc; /**< Rx Receive Queue Utilization Counter*/ + volatile uint32_t fmbm_rduc; /**< Rx DMA Utilization Counter*/ + volatile uint32_t fmbm_rfuc; /**< Rx FIFO Utilization Counter*/ + volatile uint32_t fmbm_rpac; /**< Rx Pause Activation Counter*/ + volatile uint32_t reserved6[0x18];/**< (0x2A0 0x2FF) */ + volatile uint32_t fmbm_rdbg; /**< Rx Debug-*/ +} _PackedType t_FmPortRxBmiRegs; + +typedef _Packed struct +{ + volatile uint32_t fmbm_tcfg; /**< Tx Configuration */ + volatile uint32_t fmbm_tst; /**< Tx Status */ + volatile uint32_t fmbm_tda; /**< Tx DMA attributes */ + volatile uint32_t fmbm_tfp; /**< Tx FIFO Parameters */ + volatile uint32_t fmbm_tfed; /**< Tx Frame End Data */ + volatile uint32_t fmbm_ticp; /**< Tx Internal Context Parameters */ + volatile uint32_t fmbm_tfne; /**< Tx Frame Next Engine. */ + volatile uint32_t fmbm_tfca; /**< Tx Frame Command attribute. */ + volatile uint32_t fmbm_tcfqid; /**< Tx Confirmation Frame Queue ID. */ + volatile uint32_t fmbm_tfeqid; /**< Tx Frame Error Queue ID */ + volatile uint32_t fmbm_tfene; /**< Tx Frame Enqueue Next Engine */ + volatile uint32_t fmbm_trlmts; /**< Tx Rate Limiter Scale */ + volatile uint32_t fmbm_trlmt; /**< Tx Rate Limiter */ + volatile uint32_t reserved0[0x73];/**< (0x038-0x200) */ + volatile uint32_t fmbm_tstc; /**< Tx Statistics Counters */ + volatile uint32_t fmbm_tfrc; /**< Tx Frame Counter */ + volatile uint32_t fmbm_tfdc; /**< Tx Frames Discard Counter */ + volatile uint32_t fmbm_tfledc; /**< Tx Frame Length error discard counter */ + volatile uint32_t fmbm_tfufdc; /**< Tx Frame unsupported format discard Counter */ + volatile uint32_t fmbm_tbdc; /**< Tx Buffers Deallocate Counter */ + volatile uint32_t reserved1[0x1A];/**< (0x218-0x280) */ + volatile uint32_t fmbm_tpc; /**< Tx Performance Counters*/ + volatile uint32_t fmbm_tpcp; /**< Tx Performance Count Parameters*/ + volatile uint32_t fmbm_tccn; /**< Tx Cycle Counter*/ + volatile uint32_t fmbm_ttuc; /**< Tx Tasks Utilization Counter*/ + volatile uint32_t fmbm_ttcquc; /**< Tx Transmit Confirm Queue Utilization Counter*/ + volatile uint32_t fmbm_tduc; /**< Tx DMA Utilization Counter*/ + volatile uint32_t fmbm_tfuc; /**< Tx FIFO Utilization Counter*/ +} _PackedType t_FmPortTxBmiRegs; + +typedef _Packed struct +{ + volatile uint32_t fmbm_ocfg; /**< O/H Configuration */ + volatile uint32_t fmbm_ost; /**< O/H Status */ + volatile uint32_t fmbm_oda; /**< O/H DMA attributes */ + volatile uint32_t fmbm_oicp; /**< O/H Internal Context Parameters */ + volatile uint32_t fmbm_ofdne; /**< O/H Frame Dequeue Next Engine */ + volatile uint32_t fmbm_ofne; /**< O/H Frame Next Engine */ + volatile uint32_t fmbm_ofca; /**< O/H Frame Command Attributes. */ + volatile uint32_t fmbm_ofpne; /**< O/H Frame Parser Next Engine */ + volatile uint32_t fmbm_opso; /**< O/H Parse Start Offset */ + volatile uint32_t fmbm_opp; /**< O/H Policer Profile */ + volatile uint32_t fmbm_occb; /**< O/H Coarse Classification base */ + volatile uint32_t fmbm_oim; /**< O/H Internal margins*/ + volatile uint32_t reserved0[4]; /**< (0x030 - 0x03F) */ + volatile uint32_t fmbm_oprai[FM_PORT_PRS_RESULT_NUM_OF_WORDS]; + /**< O/H Parse Results Array Initialization */ + volatile uint32_t fmbm_ofqid; /**< O/H Frame Queue ID */ + volatile uint32_t fmbm_oefqid; /**< O/H Error Frame Queue ID */ + volatile uint32_t fmbm_ofsdm; /**< O/H Frame Status Discard Mask */ + volatile uint32_t fmbm_ofsem; /**< O/H Frame Status Error Mask */ + volatile uint32_t fmbm_ofene; /**< O/H Frame Enqueue Next Engine */ + volatile uint32_t fmbm_orlmts; /**< O/H Rate Limiter Scale */ + volatile uint32_t fmbm_orlmt; /**< O/H Rate Limiter */ + volatile uint32_t reserved0a[0x21]; + /**< 0x07C - 0x0FF Reserved */ + union + { + volatile uint32_t fmbm_oebmpi[FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS]; + /**< Buffer Manager Observed Pool Information */ + volatile uint32_t reserved0b[0x18]; + }; /**< 0x100 - 0x15F Reserved */ + volatile uint32_t fmbm_ocgm; /**< Observed Congestion Group Map */ + volatile uint32_t reserved0c[0x7];/**< 0x164 - 0x17F Reserved */ + volatile uint32_t fmbm_ompd; /**< Observed BMan Pool Depletion */ + volatile uint32_t reserved0d[0x1F]; + /**< 0x184 - 0x1FF Reserved */ + volatile uint32_t fmbm_ostc; /**< O/H Statistics Counters */ + volatile uint32_t fmbm_ofrc; /**< O/H Frame Counter */ + volatile uint32_t fmbm_ofdc; /**< O/H Frames Discard Counter */ + volatile uint32_t fmbm_ofledc; /**< O/H Frames Length Error Discard Counter */ + volatile uint32_t fmbm_ofufdc; /**< O/H Frames Unsupported Format Discard Counter */ + volatile uint32_t fmbm_offc; /**< O/H Filter Frames Counter */ + volatile uint32_t fmbm_ofwdc; /**< - Rx Frames WRED Discard Counter */ + volatile uint32_t fmbm_ofldec; /**< O/H Frames List DMA Error Counter */ + volatile uint32_t fmbm_obdc; /**< O/H Buffers Deallocate Counter */ + volatile uint32_t reserved2[0x17];/**< (0x218 - 0x27F) */ + volatile uint32_t fmbm_opc; /**< O/H Performance Counters */ + volatile uint32_t fmbm_opcp; /**< O/H Performance Count Parameters */ + volatile uint32_t fmbm_occn; /**< O/H Cycle Counter */ + volatile uint32_t fmbm_otuc; /**< O/H Tasks Utilization Counter */ + volatile uint32_t fmbm_oduc; /**< O/H DMA Utilization Counter */ + volatile uint32_t fmbm_ofuc; /**< O/H FIFO Utilization Counter */ +} _PackedType t_FmPortOhBmiRegs; + +typedef _Packed union +{ + t_FmPortRxBmiRegs rxPortBmiRegs; + t_FmPortTxBmiRegs txPortBmiRegs; + t_FmPortOhBmiRegs ohPortBmiRegs; +} _PackedType u_FmPortBmiRegs; + +typedef _Packed struct +{ + volatile uint32_t reserved1[2]; /**< 0xn024 - 0x02B */ + volatile uint32_t fmqm_pndn; /**< PortID n Dequeue NIA Register */ + volatile uint32_t fmqm_pndc; /**< PortID n Dequeue Config Register */ + volatile uint32_t fmqm_pndtfc; /**< PortID n Dequeue Total Frame Counter */ + volatile uint32_t fmqm_pndfdc; /**< PortID n Dequeue FQID from Default Counter */ + volatile uint32_t fmqm_pndcc; /**< PortID n Dequeue Confirm Counter */ +} _PackedType t_FmPortNonRxQmiRegs; + +typedef _Packed struct +{ + volatile uint32_t fmqm_pnc; /**< PortID n Configuration Register */ + volatile uint32_t fmqm_pns; /**< PortID n Status Register */ + volatile uint32_t fmqm_pnts; /**< PortID n Task Status Register */ + volatile uint32_t reserved0[4]; /**< 0xn00C - 0xn01B */ + volatile uint32_t fmqm_pnen; /**< PortID n Enqueue NIA Register */ + volatile uint32_t fmqm_pnetfc; /**< PortID n Enqueue Total Frame Counter */ + t_FmPortNonRxQmiRegs nonRxQmiRegs; /**< Registers for Tx Hc & Op ports */ +} _PackedType t_FmPortQmiRegs; + +typedef _Packed struct +{ + _Packed struct + { + volatile uint32_t softSeqAttach; /**< Soft Sequence Attachment */ + volatile uint32_t lcv; /**< Line-up Enable Confirmation Mask */ + } _PackedType hdrs[FM_PCD_PRS_NUM_OF_HDRS]; + volatile uint8_t reserved0[0x378]; + volatile uint32_t pcac; /**< Parse Internal Memory Configuration Access Control Register */ + volatile uint32_t pctpid; /**< Parse Internal Memory Configured TPID Register */ +} _PackedType t_FmPortPrsRegs; + +/**************************************************************************//* + @Description Basic buffer descriptor (BD) structure +*//***************************************************************************/ +typedef _Packed struct +{ + volatile uint16_t status; + volatile uint16_t length; + volatile uint8_t reserved0[0x6]; + volatile uint8_t reserved1[0x1]; + volatile t_FmPhysAddr buff; +} _PackedType t_FmImBd; + +typedef _Packed struct +{ + volatile uint16_t gen; /**< tbd */ + volatile uint8_t reserved0[0x1]; + volatile t_FmPhysAddr bdRingBase; /**< tbd */ + volatile uint16_t bdRingSize; /**< tbd */ + volatile uint16_t offsetIn; /**< tbd */ + volatile uint16_t offsetOut; /**< tbd */ + volatile uint8_t reserved1[0x12]; /**< 0x0e - 0x1f */ +} _PackedType t_FmPortImQd; + +typedef _Packed struct +{ + volatile uint32_t mode; /**< Mode register */ + volatile uint32_t rxQdPtr; /**< tbd */ + volatile uint32_t txQdPtr; /**< tbd */ + volatile uint16_t mrblr; /**< tbd */ + volatile uint16_t rxQdBsyCnt; /**< tbd */ + volatile uint8_t reserved0[0x10]; /**< 0x10 - 0x1f */ + t_FmPortImQd rxQd; + t_FmPortImQd txQd; + volatile uint8_t reserved1[0xa0]; /**< 0x60 - 0xff */ +} _PackedType t_FmPortImPram; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/**************************************************************************//** + @Description Registers bit fields +*//***************************************************************************/ + +/**************************************************************************//** + @Description BMI defines +*//***************************************************************************/ +#define BMI_PORT_CFG_EN 0x80000000 +#define BMI_PORT_CFG_EN_MACSEC 0x00800000 +#define BMI_PORT_CFG_FDOVR 0x02000000 +#define BMI_PORT_CFG_IM 0x01000000 +#define BMI_PORT_STATUS_BSY 0x80000000 +#define BMI_COUNTERS_EN 0x80000000 +#define BMI_DMA_ATTR_WRITE_OPTIMIZE 0x00100000 +#define BMI_PORT_RFNE_FRWD_DCL4C 0x10000000 +#define BMI_PORT_RFNE_FRWD_RPD 0x40000000 +#define BMI_RFNE_FDCS_MASK 0xFF000000 + +#define BMI_CMD_MR_LEAC 0x00200000 +#define BMI_CMD_MR_SLEAC 0x00100000 +#define BMI_CMD_MR_MA 0x00080000 +#define BMI_CMD_MR_DEAS 0x00040000 +#define BMI_CMD_TX_MR_DEF (0) +#define BMI_CMD_RX_MR_DEF (BMI_CMD_MR_LEAC | \ + BMI_CMD_MR_SLEAC | \ + BMI_CMD_MR_MA | \ + BMI_CMD_MR_DEAS) +#define BMI_CMD_ATTR_ORDER 0x80000000 +#define BMI_CMD_ATTR_SYNC 0x02000000 +#define BMI_CMD_ATTR_MACCMD_MASK 0x0000ff00 +#define BMI_CMD_ATTR_MACCMD_OVERRIDE 0x00008000 +#define BMI_CMD_ATTR_MACCMD_SECURED 0x00001000 +#define BMI_CMD_ATTR_MACCMD_SC_MASK 0x00000f00 + +#define BMI_EXT_BUF_POOL_VALID 0x80000000 +#define BMI_EXT_BUF_POOL_EN_COUNTER 0x40000000 +#define BMI_EXT_BUF_POOL_BACKUP 0x20000000 +#define BMI_EXT_BUF_POOL_ID_MASK 0x003F0000 +#define BMI_STATUS_RX_MASK_UNUSED (uint32_t)(~(FM_PORT_FRM_ERR_DMA | \ + FM_PORT_FRM_ERR_PHYSICAL | \ + FM_PORT_FRM_ERR_SIZE | \ + FM_PORT_FRM_ERR_CLS_DISCARD | \ + FM_PORT_FRM_ERR_EXTRACTION | \ + FM_PORT_FRM_ERR_NO_SCHEME | \ + FM_PORT_FRM_ERR_COLOR_RED | \ + FM_PORT_FRM_ERR_COLOR_YELLOW | \ + FM_PORT_FRM_ERR_ILL_PLCR | \ + FM_PORT_FRM_ERR_PLCR_FRAME_LEN | \ + FM_PORT_FRM_ERR_PRS_TIMEOUT | \ + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \ + FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \ + FM_PORT_FRM_ERR_PRS_HDR_ERR | \ + FM_PORT_FRM_ERR_PROCESS_TIMEOUT | \ + FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW)) + +#define BMI_STATUS_OP_MASK_UNUSED (uint32_t)(BMI_STATUS_RX_MASK_UNUSED & \ + ~(FM_PORT_FRM_ERR_LENGTH | \ + FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT)) + +#define BMI_RATE_LIMIT_EN 0x80000000 +#define BMI_RATE_LIMIT_BURST_SIZE_GRAN 0x80000000 +#define BMI_RATE_LIMIT_SCALE_BY_2 0x00000001 +#define BMI_RATE_LIMIT_SCALE_BY_4 0x00000002 +#define BMI_RATE_LIMIT_SCALE_BY_8 0x00000003 + +#define BMI_RX_FIFO_THRESHOLD_BC 0x80000000 + +#define BMI_PRS_RESULT_HIGH 0x00000000 +#define BMI_PRS_RESULT_LOW 0xFFFFFFFF + +#define RX_ERRS_TO_ENQ (FM_PORT_FRM_ERR_DMA | \ + FM_PORT_FRM_ERR_PHYSICAL | \ + FM_PORT_FRM_ERR_SIZE | \ + FM_PORT_FRM_ERR_EXTRACTION | \ + FM_PORT_FRM_ERR_NO_SCHEME | \ + FM_PORT_FRM_ERR_ILL_PLCR | \ + FM_PORT_FRM_ERR_PLCR_FRAME_LEN | \ + FM_PORT_FRM_ERR_PRS_TIMEOUT | \ + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | \ + FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED | \ + FM_PORT_FRM_ERR_PRS_HDR_ERR | \ + FM_PORT_FRM_ERR_PROCESS_TIMEOUT | \ + FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW) + +#ifdef FM_CAPWAP_SUPPORT +#define OP_ERRS_TO_ENQ (RX_ERRS_TO_ENQ | \ + FM_PORT_FRM_ERR_LENGTH | \ + FM_PORT_FRM_ERR_NON_FM | \ + FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT) + +#else +#define OP_ERRS_TO_ENQ (RX_ERRS_TO_ENQ | \ + FM_PORT_FRM_ERR_LENGTH | \ + FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT) +#endif /* FM_CAPWAP_SUPPORT */ + +/* shifts */ +#define BMI_PORT_CFG_MS_SEL_SHIFT 16 +#define BMI_DMA_ATTR_SWP_SHIFT 30 +#define BMI_DMA_ATTR_IC_CACHE_SHIFT 28 +#define BMI_DMA_ATTR_HDR_CACHE_SHIFT 26 +#define BMI_DMA_ATTR_SG_CACHE_SHIFT 24 + +#define BMI_IM_FOF_SHIFT 28 +#define BMI_PR_PORTID_SHIFT 24 + +#define BMI_RX_FIFO_PRI_ELEVATION_SHIFT 16 +#define BMI_RX_FIFO_THRESHOLD_SHIFT 0 + +#define BMI_RX_FRAME_END_CS_IGNORE_SHIFT 24 +#define BMI_RX_FRAME_END_CUT_SHIFT 16 + +#define BMI_IC_TO_EXT_SHIFT 16 +#define BMI_IC_FROM_INT_SHIFT 8 +#define BMI_IC_SIZE_SHIFT 0 + +#define BMI_INT_BUF_MARG_SHIFT 28 + +#define BMI_EXT_BUF_MARG_START_SHIFT 16 +#define BMI_EXT_BUF_MARG_END_SHIFT 0 + +#define BMI_CMD_ATTR_COLOR_SHIFT 26 +#define BMI_CMD_ATTR_COM_MODE_SHIFT 16 +#define BMI_CMD_ATTR_MACCMD_SHIFT 8 +#define BMI_CMD_ATTR_MACCMD_OVERRIDE_SHIFT 15 +#define BMI_CMD_ATTR_MACCMD_SECURED_SHIFT 12 +#define BMI_CMD_ATTR_MACCMD_SC_SHIFT 8 + +#define BMI_POOL_DEP_NUM_OF_POOLS_SHIFT 16 +#define BMI_POOL_DEP_NUM_OF_POOLS_VECTOR_SHIFT 24 + +#define BMI_EXT_BUF_POOL_ID_SHIFT 16 + +#define BMI_TX_FIFO_MIN_FILL_SHIFT 16 +#define BMI_TX_FIFO_PIPELINE_DEPTH_SHIFT 12 +#define BMI_TX_LOW_COMF_SHIFT 0 + +#define BMI_TX_FRAME_END_CS_IGNORE_SHIFT 24 + +#define BMI_PERFORMANCE_TASK_COMP_SHIFT 24 +#define BMI_PERFORMANCE_PORT_COMP_SHIFT 16 +#define BMI_PERFORMANCE_DMA_COMP_SHIFT 12 +#define BMI_PERFORMANCE_FIFO_COMP_SHIFT 0 + +#define BMI_MAX_BURST_SHIFT 16 +#define BMI_COUNT_RATE_UNIT_SHIFT 16 + +/* sizes */ +#define FRAME_END_DATA_SIZE 16 +#define OFFSET_UNITS 16 +#define FRAME_OFFSET_UNITS 16 +#define MAX_EXT_OFFSET 496 +#define MAX_EXT_BUFFER_OFFSET 511 +#define MAX_INT_OFFSET 240 +#define MIN_TX_INT_OFFSET 16 +#define MAX_IC_SIZE 256 +#define MAX_FRAME_OFFSET 64 +#define MAX_FIFO_PIPELINE_DEPTH 8 +#define MAX_PERFORMANCE_TASK_COMP 64 +#define MAX_PERFORMANCE_TX_QUEUE_COMP 8 +#define MAX_PERFORMANCE_RX_QUEUE_COMP 64 +#define MAX_PERFORMANCE_DMA_COMP 16 +#define MAX_NUM_OF_TASKS 64 +#define MAX_NUM_OF_EXTRA_TASKS 8 +#define MAX_NUM_OF_DMAS 16 +#define MAX_NUM_OF_EXTRA_DMAS 8 +#define MAX_BURST_SIZE 1024 +#define FRAG_EXTRA_SPACE 32 + +/**************************************************************************//** + @Description QMI defines +*//***************************************************************************/ +/* masks */ +#define QMI_PORT_CFG_EN 0x80000000 +#define QMI_PORT_CFG_EN_COUNTERS 0x10000000 +#define QMI_PORT_STATUS_DEQ_TNUM_BSY 0x80000000 +#define QMI_PORT_STATUS_DEQ_FD_BSY 0x20000000 + +#define QMI_DEQ_CFG_PREFETCH_NO_TNUM 0x02000000 +#define QMI_DEQ_CFG_PREFETCH_WAITING_TNUM 0 +#define QMI_DEQ_CFG_PREFETCH_1_FRAME 0 +#define QMI_DEQ_CFG_PREFETCH_3_FRAMES 0x01000000 + +#define QMI_DEQ_CFG_PRI 0x80000000 +#define QMI_DEQ_CFG_TYPE1 0x10000000 +#define QMI_DEQ_CFG_TYPE2 0x20000000 +#define QMI_DEQ_CFG_TYPE3 0x30000000 + +#define QMI_DEQ_CFG_SUBPORTAL_MASK 0x1f +#define QMI_DEQ_CFG_SUBPORTAL_SHIFT 20 + +/**************************************************************************//** + @Description PARSER defines +*//***************************************************************************/ +/* masks */ +#define PRS_HDR_ERROR_DIS 0x00000800 +#define PRS_HDR_SW_PRS_EN 0x00000400 +#define PRS_CP_OFFSET_MASK 0x0000000F +#define PRS_TPID1_MASK 0xFFFF0000 +#define PRS_TPID2_MASK 0x0000FFFF +#define PRS_TPID_DFLT 0x91009100 + +#define PRS_HDR_MPLS_LBL_INTER_EN 0x00200000 +#define PRS_HDR_IPV6_ROUTE_HDR_DIS 0x00008000 +#define PRS_HDR_PPPOE_MTU_CHECK_EN 0x80000000 +#define PRS_HDR_UDP_PAD_REMOVAL 0x80000000 +#define PRS_HDR_TCP_PAD_REMOVAL 0x80000000 +#define PRS_CAC_STOP 0x00000001 +#define PRS_CAC_ACTIVE 0x00000100 + +/* shifts */ +#define PRS_PCTPID_SHIFT 16 +#define PRS_HDR_MPLS_NEXT_HDR_SHIFT 22 +#define PRS_HDR_ETH_BC_SHIFT 28 +#define PRS_HDR_ETH_MC_SHIFT 24 +#define PRS_HDR_VLAN_STACKED_SHIFT 16 +#define PRS_HDR_MPLS_STACKED_SHIFT 16 +#define PRS_HDR_IPV4_1_BC_SHIFT 28 +#define PRS_HDR_IPV4_1_MC_SHIFT 24 +#define PRS_HDR_IPV4_2_UC_SHIFT 20 +#define PRS_HDR_IPV4_2_MC_BC_SHIFT 16 +#define PRS_HDR_IPV6_1_MC_SHIFT 24 +#define PRS_HDR_IPV6_2_UC_SHIFT 20 +#define PRS_HDR_IPV6_2_MC_SHIFT 16 + +#define PRS_HDR_ETH_BC_MASK 0x0fffffff +#define PRS_HDR_ETH_MC_MASK 0xf0ffffff +#define PRS_HDR_VLAN_STACKED_MASK 0xfff0ffff +#define PRS_HDR_MPLS_STACKED_MASK 0xfff0ffff +#define PRS_HDR_IPV4_1_BC_MASK 0x0fffffff +#define PRS_HDR_IPV4_1_MC_MASK 0xf0ffffff +#define PRS_HDR_IPV4_2_UC_MASK 0xff0fffff +#define PRS_HDR_IPV4_2_MC_BC_MASK 0xfff0ffff +#define PRS_HDR_IPV6_1_MC_MASK 0xf0ffffff +#define PRS_HDR_IPV6_2_UC_MASK 0xff0fffff +#define PRS_HDR_IPV6_2_MC_MASK 0xfff0ffff + +/* others */ +#define PRS_HDR_ENTRY_SIZE 8 +#define DEFAULT_CLS_PLAN_VECTOR 0xFFFFFFFF + +#define IPSEC_SW_PATCH_START 0x20 +#define SCTP_SW_PATCH_START 0x4D +#define DCCP_SW_PATCH_START 0x41 + +#define IP_FRAG_SW_PATCH_IPv4 0x300 +#define IP_FRAG_SW_PATCH_IPv6_0 0x320 +#define IP_FRAG_SW_PATCH_IPv6_1 0x372 + +/**************************************************************************//** + @Description IM defines +*//***************************************************************************/ +#define BD_R_E 0x80000000 +#define BD_L 0x08000000 + +#define BD_RX_CRE 0x00080000 +#define BD_RX_FTL 0x00040000 +#define BD_RX_FTS 0x00020000 +#define BD_RX_OV 0x00010000 + +#define BD_RX_ERRORS (BD_RX_CRE | BD_RX_FTL | BD_RX_FTS | BD_RX_OV) +#define BD_ERROR_PASS_FRAME BD_RX_ERRORS + +#define FM_IM_SIZEOF_BD sizeof(t_FmImBd) + +#define BD_STATUS_MASK 0xffff0000 +#define BD_LENGTH_MASK 0x0000ffff + +#define BD_STATUS_AND_LENGTH_SET(bd, val) WRITE_UINT32(*(volatile uint32_t*)(bd), (val)) + +#define BD_STATUS_AND_LENGTH(bd) GET_UINT32(*(volatile uint32_t*)(bd)) + +#define BD_GET(id) &p_FmPort->im.p_BdRing[id] + +#define IM_ILEGAL_BD_ID 0xffff + +/* others */ +#define IM_PRAM_ALIGN 0x100 + +/* masks */ +#define IM_MODE_GBL 0x20000000 +#define IM_MODE_BO_MASK 0x18000000 +#define IM_MODE_BO_SHIFT 3 +#define IM_MODE_GRC_STP 0x00800000 + +#define IM_MODE_SET_BO(val) (uint32_t)((val << (31-IM_MODE_BO_SHIFT)) & IM_MODE_BO_MASK) + +#define IM_RXQD_BSYINTM 0x0008 +#define IM_RXQD_RXFINTM 0x0010 +#define IM_RXQD_FPMEVT_SEL_MASK 0x0003 + +#define IM_EV_BSY 0x40000000 +#define IM_EV_RX 0x80000000 + +typedef struct { + t_Handle h_FmMuram; + t_FmPortImPram *p_FmPortImPram; + uint8_t fwExtStructsMemId; + uint32_t fwExtStructsMemAttr; + uint16_t bdRingSize; + t_FmImBd *p_BdRing; + t_Handle *p_BdShadow; + uint16_t currBdId; + uint16_t firstBdOfFrameId; + + /* Rx port parameters */ + uint8_t dataMemId; /**< Memory partition ID for data buffers */ + uint32_t dataMemAttributes; /**< Memory attributes for data buffers */ + t_BufferPoolInfo rxPool; + uint16_t mrblr; + uint16_t rxFrameAccumLength; + t_FmPortImRxStoreCallback *f_RxStore; + + /* Tx port parameters */ + uint32_t txFirstBdStatus; + t_FmPortImTxConfCallback *f_TxConf; +} t_FmMacIm; + +/**************************************************************************//** + @Description structure for defining internal context copying +*//***************************************************************************/ +typedef struct +{ + uint16_t extBufOffset; /**< Offset in External buffer to which internal + context is copied to (Rx) or taken from (Tx, Op). */ + uint8_t intContextOffset; /**< Offset within internal context to copy from + (Rx) or to copy to (Tx, Op). */ + uint16_t size; /**< Internal offset size to be copied */ +} t_FmPortIntContextDataCopy; + +/**************************************************************************//** + @Description struct for defining external buffer margins +*//***************************************************************************/ +typedef struct { + uint16_t startMargins; /**< Number of bytes to be left at the beginning + of the external buffer (must be divisible by 16) */ + uint16_t endMargins; /**< number of bytes to be left at the end + of the external buffer(must be divisible by 16) */ +} t_FmPortBufMargins; + +typedef struct { + uint32_t dataOffset; + uint32_t prsResultOffset; + uint32_t timeStampOffset; + uint32_t hashResultOffset; + uint32_t pcdInfoOffset; + uint32_t manipOffset; +#ifdef DEBUG + uint32_t debugOffset; +#endif /* DEBUG */ +} t_FmPortBufferOffsets; + +typedef struct { + uint32_t dfltFqid; + uint32_t confFqid; + uint32_t errFqid; + uintptr_t baseAddr; + uint8_t deqSubPortal; + bool deqHighPriority; + e_FmPortDeqType deqType; +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + e_FmPortDeqPrefetchOption deqPrefetchOption; +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + uint16_t deqByteCnt; + uint8_t cheksumLastBytesIgnore; + uint8_t cutBytesFromEnd; + t_FmPortBufPoolDepletion bufPoolDepletion; + uint8_t pipelineDepth; + uint16_t fifoLowComfLevel; + bool frmDiscardOverride; + bool enRateLimit; + t_FmPortRateLimit rateLimit; + e_FmPortDualRateLimiterScaleDown rateLimitDivider; + bool enBufPoolDepletion; + uint16_t liodnOffset; + uint16_t liodnBase; + t_FmPortExtPools extBufPools; + e_FmPortDmaSwap dmaSwapData; + e_FmPortDmaCache dmaIntContextCacheAttr; + e_FmPortDmaCache dmaHeaderCacheAttr; + e_FmPortDmaCache dmaScatterGatherCacheAttr; + bool dmaReadOptimize; + bool dmaWriteOptimize; + uint32_t txFifoMinFillLevel; + uint32_t txFifoLowComfLevel; + uint32_t rxFifoPriElevationLevel; + uint32_t rxFifoThreshold; + t_FmPortBufMargins bufMargins; + t_FmPortIntContextDataCopy intContext; + bool syncReq; + e_FmPortColor color; + fmPortFrameErrSelect_t errorsToDiscard; + fmPortFrameErrSelect_t errorsToEnq; + uint64_t fmMuramPhysBaseAddr; + bool forwardReuseIntContext; + t_FmPortBufferPrefixContent bufferPrefixContent; + uint8_t internalBufferOffset; + t_FmPortBackupBmPools *p_BackupBmPools; + bool dontReleaseBuf; +} t_FmPortDriverParam; + +typedef struct { + t_Handle h_Fm; + t_Handle h_FmPcd; + uint8_t portId; + e_FmPortType portType; + int enabled; + char name[MODULE_NAME_SIZE]; + uint8_t hardwarePortId; + uint16_t fmClkFreq; + t_FmPortQmiRegs *p_FmPortQmiRegs; + u_FmPortBmiRegs *p_FmPortBmiRegs; + t_FmPortPrsRegs *p_FmPortPrsRegs; + fmPcdEngines_t pcdEngines; + uint32_t savedBmiNia; + uint8_t netEnvId; + uint32_t optArray[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)]; + uint32_t lcvs[FM_PCD_PRS_NUM_OF_HDRS]; + uint8_t privateInfo; + uint32_t schemesPerPortVector; + bool useClsPlan; + uint8_t clsPlanGrpId; + t_Handle ccTreeId; + t_Handle completeArg; + void (*f_Complete)(t_Handle arg); + t_FmPortBufferOffsets bufferOffsets; + /* Independent-Mode parameters support */ + bool imEn; + t_FmMacIm im; + uint8_t txFifoDeqPipelineDepth; + volatile bool lock; + t_Handle h_Spinlock; + t_FmPortExceptionCallback *f_Exception; + t_Handle h_App; + uint8_t internalBufferOffset; + uint8_t fmanCtrlEventId; + uint32_t exceptions; + bool polling; + uint8_t numOfTasks; + t_FmPortExtPools extBufPools; + uint32_t requiredAction; + uint32_t savedQmiPnen; + uint32_t savedNonRxQmiRegsPndn; + int savedPrsStartOffset; + t_FmPortRsrc openDmas; + t_FmPortRsrc tasks; + t_FmPortRsrc fifoBufs; + t_FmInterModulePortRxPoolsParams rxPoolsParams; + t_FmPortDriverParam *p_FmPortDriverParam; +} t_FmPort; + +#define CHECK_FM_CTL_AC_POST_FETCH_PCD(savedBmiNia) \ + ((((savedBmiNia) & NIA_ENG_MASK) == NIA_ENG_FM_CTL) && \ + ((((savedBmiNia) & NIA_FM_CTL_AC_MASK) == NIA_FM_CTL_AC_POST_FETCH_PCD) || \ + (((savedBmiNia) & NIA_FM_CTL_AC_MASK) == NIA_FM_CTL_AC_POST_FETCH_PCD_UDP_LEN))) + +void FmPortConfigIM (t_FmPort *p_FmPort, t_FmPortParams *p_FmPortParams); +t_Error FmPortImCheckInitParameters(t_FmPort *p_FmPort); + +t_Error FmPortImInit(t_FmPort *p_FmPort); +void FmPortImFree(t_FmPort *p_FmPort); + +t_Error FmPortImEnable (t_FmPort *p_FmPort); +t_Error FmPortImDisable (t_FmPort *p_FmPort); +t_Error FmPortImRx (t_FmPort *p_FmPort); + +void FmPortSetMacsecLcv(t_Handle h_FmPort); +void FmPortSetMacsecCmd(t_Handle h_FmPort, uint8_t dfltSci); + + +static __inline__ uint8_t * BdBufferGet (t_PhysToVirt *f_PhysToVirt, t_FmImBd *p_Bd) +{ + uint64_t physAddr = (uint64_t)((uint64_t)GET_UINT8(p_Bd->buff.high) << 32); + physAddr |= GET_UINT32(p_Bd->buff.low); + + return (uint8_t *)f_PhysToVirt((physAddress_t)(physAddr)); +} + +static __inline__ void SET_ADDR(volatile t_FmPhysAddr *fmPhysAddr, uint64_t value) +{ + WRITE_UINT8(fmPhysAddr->high,(uint8_t)((value & 0x000000ff00000000LL) >> 32)); + WRITE_UINT32(fmPhysAddr->low,(uint32_t)value); +} + +static __inline__ void BdBufferSet(t_VirtToPhys *f_VirtToPhys, t_FmImBd *p_Bd, uint8_t *p_Buffer) +{ + uint64_t physAddr = (uint64_t)(f_VirtToPhys(p_Buffer)); + SET_ADDR(&p_Bd->buff, physAddr); +} + +static __inline__ uint16_t GetNextBdId(t_FmPort *p_FmPort, uint16_t id) +{ + if (id < p_FmPort->im.bdRingSize-1) + return (uint16_t)(id+1); + else + return 0; +} + + +#endif /* __FM_PORT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Port/fm_port_im.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Port/fm_port_im.c @@ -0,0 +1,789 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_port_im.c + + @Description FM Port Independent-Mode ... +*//***************************************************************************/ +#include "std_ext.h" +#include "string_ext.h" +#include "error_ext.h" +#include "fm_muram_ext.h" + +#include "fm_port.h" + + +#define TX_CONF_STATUS_UNSENT 0x1 + +#ifdef CORE_8BIT_ACCESS_ERRATA +#undef WRITE_UINT16 +#undef GET_UINT16 + +#define WRITE_UINT16(addr, val) \ + do{ \ + if((int)&(addr) % 4) \ + WRITE_UINT32(*(uint32_t*)(uint32_t)((uint32_t)&addr & ~0x3L), \ + ((GET_UINT32(*(uint32_t*)(uint32_t)((uint32_t)&addr & ~0x3L)) & 0xffff0000) | (uint32_t)val)); \ + else \ + WRITE_UINT32(*(uint32_t*)&addr, \ + ((GET_UINT32(*(uint32_t*)&addr) & 0x0000ffff) | (uint32_t)val<<16)); \ + }while(0); + +#define GET_UINT16(addr) (((uint32_t)&addr%4) ? \ + ((uint16_t)GET_UINT32(*(uint32_t*)(uint32_t)((uint32_t)&addr & ~0x3L))): \ + ((uint16_t)(GET_UINT32(*(uint32_t*)(uint32_t)&addr) >> 16))) +#endif /* CORE_8BIT_ACCESS_ERRATA */ + + +typedef enum e_TxConfType +{ + e_TX_CONF_TYPE_CHECK = 0 /**< check if all the buffers were touched by the muxator, no confirmation callback */ + ,e_TX_CONF_TYPE_CALLBACK = 1 /**< confirm to user all the available sent buffers */ + ,e_TX_CONF_TYPE_FLUSH = 3 /**< confirm all buffers plus the unsent one with an appropriate status */ +} e_TxConfType; + + +static void ImException(t_Handle h_FmPort, uint32_t event) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + ASSERT_COND(((event & IM_EV_RX) && FmIsMaster(p_FmPort->h_Fm)) || + !FmIsMaster(p_FmPort->h_Fm)); + + if (event & IM_EV_RX) + FmPortImRx(p_FmPort); + if ((event & IM_EV_BSY) && p_FmPort->f_Exception) + p_FmPort->f_Exception(p_FmPort->h_App, e_FM_PORT_EXCEPTION_IM_BUSY); +} + + +static t_Error TxConf(t_FmPort *p_FmPort, e_TxConfType confType) +{ + t_Error retVal = E_BUSY; + uint32_t bdStatus; + uint16_t savedStartBdId, confBdId; + + ASSERT_COND(p_FmPort); + + /* + if (confType==e_TX_CONF_TYPE_CHECK) + return (WfqEntryIsQueueEmpty(p_FmPort->im.h_WfqEntry) ? E_OK : E_BUSY); + */ + + confBdId = savedStartBdId = p_FmPort->im.currBdId; + bdStatus = BD_STATUS_AND_LENGTH(BD_GET(confBdId)); + + /* If R bit is set, we don't enter, or we break. + we run till we get to R, or complete the loop */ + while ((!(bdStatus & BD_R_E) || (confType == e_TX_CONF_TYPE_FLUSH)) && (retVal != E_OK)) + { + if (confType & e_TX_CONF_TYPE_CALLBACK) /* if it is confirmation with user callbacks */ + BD_STATUS_AND_LENGTH_SET(BD_GET(confBdId), 0); + + /* case 1: R bit is 0 and Length is set -> confirm! */ + if ((confType & e_TX_CONF_TYPE_CALLBACK) && (bdStatus & BD_LENGTH_MASK)) + { + if (p_FmPort->im.f_TxConf) + { + if ((confType == e_TX_CONF_TYPE_FLUSH) && (bdStatus & BD_R_E)) + p_FmPort->im.f_TxConf(p_FmPort->h_App, + BdBufferGet(XX_PhysToVirt, BD_GET(confBdId)), + TX_CONF_STATUS_UNSENT, + p_FmPort->im.p_BdShadow[confBdId]); + else + p_FmPort->im.f_TxConf(p_FmPort->h_App, + BdBufferGet(XX_PhysToVirt, BD_GET(confBdId)), + 0, + p_FmPort->im.p_BdShadow[confBdId]); + } + } + /* case 2: R bit is 0 and Length is 0 -> not used yet, nop! */ + + confBdId = GetNextBdId(p_FmPort, confBdId); + if (confBdId == savedStartBdId) + retVal = E_OK; + bdStatus = BD_STATUS_AND_LENGTH(BD_GET(confBdId)); + } + + return retVal; +} + +t_Error FmPortImEnable(t_FmPort *p_FmPort) +{ + uint32_t tmpReg = GET_UINT32(p_FmPort->im.p_FmPortImPram->mode); + WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, (uint32_t)(tmpReg & ~IM_MODE_GRC_STP)); + return E_OK; +} + +t_Error FmPortImDisable(t_FmPort *p_FmPort) +{ + uint32_t tmpReg = GET_UINT32(p_FmPort->im.p_FmPortImPram->mode); + WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, (uint32_t)(tmpReg | IM_MODE_GRC_STP)); + return E_OK; +} + +t_Error FmPortImRx(t_FmPort *p_FmPort) +{ + t_Handle h_CurrUserPriv, h_NewUserPriv; + uint32_t bdStatus; + volatile uint8_t buffPos; + uint16_t length; + uint16_t errors/*, reportErrors*/; + uint8_t *p_CurData, *p_Data; + uint32_t flags; + + ASSERT_COND(p_FmPort); + + flags = XX_LockIntrSpinlock(p_FmPort->h_Spinlock); + if (p_FmPort->lock) + { + XX_UnlockIntrSpinlock(p_FmPort->h_Spinlock, flags); + return E_OK; + } + p_FmPort->lock = TRUE; + XX_UnlockIntrSpinlock(p_FmPort->h_Spinlock, flags); + + bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId)); + + while (!(bdStatus & BD_R_E)) /* while there is data in the Rx BD */ + { + if ((p_Data = p_FmPort->im.rxPool.f_GetBuf(p_FmPort->im.rxPool.h_BufferPool, &h_NewUserPriv)) == NULL) + { + p_FmPort->lock = FALSE; + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Data buffer")); + } + + if (p_FmPort->im.firstBdOfFrameId == IM_ILEGAL_BD_ID) + p_FmPort->im.firstBdOfFrameId = p_FmPort->im.currBdId; + + errors = 0; + p_CurData = BdBufferGet(p_FmPort->im.rxPool.f_PhysToVirt, BD_GET(p_FmPort->im.currBdId)); + h_CurrUserPriv = p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId]; + length = (uint16_t)((bdStatus & BD_L) ? + ((bdStatus & BD_LENGTH_MASK) - p_FmPort->im.rxFrameAccumLength): + (bdStatus & BD_LENGTH_MASK)); + p_FmPort->im.rxFrameAccumLength += length; + + /* determine whether buffer is first, last, first and last (single */ + /* buffer frame) or middle (not first and not last) */ + buffPos = (uint8_t)((p_FmPort->im.currBdId == p_FmPort->im.firstBdOfFrameId) ? + ((bdStatus & BD_L) ? SINGLE_BUF : FIRST_BUF) : + ((bdStatus & BD_L) ? LAST_BUF : MIDDLE_BUF)); + + if (bdStatus & BD_L) + { + p_FmPort->im.rxFrameAccumLength = 0; + p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID; + } + + BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, BD_GET(p_FmPort->im.currBdId), p_Data); + + BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), BD_R_E); + + errors = (uint16_t)((bdStatus & BD_RX_ERRORS) >> 16); + p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId] = h_NewUserPriv; + + p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId); + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.offsetOut, (uint16_t)(p_FmPort->im.currBdId<<4)); + /* Pass the buffer if one of the conditions is true: + - There are no errors + - This is a part of a larger frame ( the application has already received some buffers ) + - There is an error, but it was defined to be passed anyway. */ + if ((buffPos != SINGLE_BUF) || !errors || (errors & (uint16_t)(BD_ERROR_PASS_FRAME>>16))) + { + if (p_FmPort->im.f_RxStore(p_FmPort->h_App, + p_CurData, + length, + errors, + buffPos, + h_CurrUserPriv) == e_RX_STORE_RESPONSE_PAUSE) + break; + } + else if (p_FmPort->im.rxPool.f_PutBuf(p_FmPort->im.rxPool.h_BufferPool, + p_CurData, + h_CurrUserPriv)) + { + p_FmPort->lock = FALSE; + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Failed freeing data buffer")); + } + + bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId)); + } + p_FmPort->lock = FALSE; + return E_OK; +} + +void FmPortConfigIM (t_FmPort *p_FmPort, t_FmPortParams *p_FmPortParams) +{ + ASSERT_COND(p_FmPort); + + SANITY_CHECK_RETURN(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->im.h_FmMuram = p_FmPortParams->specificParams.imRxTxParams.h_FmMuram; + p_FmPort->p_FmPortDriverParam->liodnOffset = p_FmPortParams->specificParams.imRxTxParams.liodnOffset; + p_FmPort->im.dataMemId = p_FmPortParams->specificParams.imRxTxParams.dataMemId; + p_FmPort->im.dataMemAttributes = p_FmPortParams->specificParams.imRxTxParams.dataMemAttributes; + + p_FmPort->im.fwExtStructsMemId = DEFAULT_PORT_ImfwExtStructsMemId; + p_FmPort->im.fwExtStructsMemAttr = DEFAULT_PORT_ImfwExtStructsMemAttr; + + if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || + (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + p_FmPort->im.rxPool.h_BufferPool = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.h_BufferPool; + p_FmPort->im.rxPool.f_GetBuf = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_GetBuf; + p_FmPort->im.rxPool.f_PutBuf = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_PutBuf; + p_FmPort->im.rxPool.bufferSize = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.bufferSize; + p_FmPort->im.rxPool.f_PhysToVirt = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_PhysToVirt; + if (!p_FmPort->im.rxPool.f_PhysToVirt) + p_FmPort->im.rxPool.f_PhysToVirt = XX_PhysToVirt; + p_FmPort->im.rxPool.f_VirtToPhys = p_FmPortParams->specificParams.imRxTxParams.rxPoolParams.f_VirtToPhys; + if (!p_FmPort->im.rxPool.f_VirtToPhys) + p_FmPort->im.rxPool.f_VirtToPhys = XX_VirtToPhys; + p_FmPort->im.f_RxStore = p_FmPortParams->specificParams.imRxTxParams.f_RxStore; + + p_FmPort->im.mrblr = 0x8000; + while (p_FmPort->im.mrblr) + { + if (p_FmPort->im.rxPool.bufferSize & p_FmPort->im.mrblr) + break; + p_FmPort->im.mrblr >>= 1; + } + if (p_FmPort->im.mrblr != p_FmPort->im.rxPool.bufferSize) + DBG(WARNING, ("Max-Rx-Buffer-Length set to %d", p_FmPort->im.mrblr)); + p_FmPort->im.bdRingSize = DEFAULT_PORT_rxBdRingLength; + p_FmPort->exceptions = DEFAULT_exception; + if (FmIsMaster(p_FmPort->h_Fm)) + p_FmPort->polling = FALSE; + else + p_FmPort->polling = TRUE; + p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ; + } + else + { + p_FmPort->im.f_TxConf = p_FmPortParams->specificParams.imRxTxParams.f_TxConf; + + p_FmPort->im.bdRingSize = DEFAULT_PORT_txBdRingLength; + } +} + +t_Error FmPortImCheckInitParameters(t_FmPort *p_FmPort) +{ + if ((p_FmPort->portType != e_FM_PORT_TYPE_RX) && + (p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && + (p_FmPort->portType != e_FM_PORT_TYPE_TX) && + (p_FmPort->portType != e_FM_PORT_TYPE_TX_10G)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG); + + if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || + (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + if (!POWER_OF_2(p_FmPort->im.mrblr)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("max Rx buffer length must be power of 2!!!")); + if (p_FmPort->im.mrblr < 256) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("max Rx buffer length must at least 256!!!")); + if(p_FmPort->p_FmPortDriverParam->liodnOffset & ~FM_LIODN_OFFSET_MASK) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("liodnOffset is larger than %d", FM_LIODN_OFFSET_MASK+1)); +#ifdef FM_PARTITION_ARRAY + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_FmPort->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + if(p_FmPort->p_FmPortDriverParam->liodnOffset >= MAX_LIODN_OFFSET) + { + p_FmPort->p_FmPortDriverParam->liodnOffset = + (uint16_t)(p_FmPort->p_FmPortDriverParam->liodnOffset & (MAX_LIODN_OFFSET-1)); + DBG(WARNING, ("liodnOffset number is out of rev1 range - MSB bits cleard.")); + } + } + } +#endif /* FM_PARTITION_ARRAY */ +/* TODO - add checks */ + } + else + { +/* TODO - add checks */ + } + + return E_OK; +} + +t_Error FmPortImInit(t_FmPort *p_FmPort) +{ + t_FmImBd *p_Bd=NULL; + t_Handle h_BufContext; + uint64_t tmpPhysBase; + uint16_t log2Num; + uint8_t *p_Data/*, *p_Tmp*/; + int i; + t_Error err; + uint16_t tmpReg16; + uint32_t tmpReg32; + + ASSERT_COND(p_FmPort); + + p_FmPort->im.p_FmPortImPram = + (t_FmPortImPram *)FM_MURAM_AllocMem(p_FmPort->im.h_FmMuram, sizeof(t_FmPortImPram), IM_PRAM_ALIGN); + if (!p_FmPort->im.p_FmPortImPram) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Parameter-RAM!!!")); + WRITE_BLOCK(p_FmPort->im.p_FmPortImPram, 0, sizeof(t_FmPortImPram)); + + if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || + (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + p_FmPort->im.p_BdRing = (t_FmImBd *)XX_MallocSmart((uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize), p_FmPort->im.fwExtStructsMemId, 4); + if (!p_FmPort->im.p_BdRing) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD ring!!!")); + IOMemSet32(p_FmPort->im.p_BdRing, 0, (uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize)); + + p_FmPort->im.p_BdShadow = (t_Handle *)XX_Malloc((uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize)); + if (!p_FmPort->im.p_BdShadow) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD shadow!!!")); + memset(p_FmPort->im.p_BdShadow, 0, (uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize)); + + /* Initialize the Rx-BD ring */ + for (i=0; iim.bdRingSize; i++) + { + p_Bd = BD_GET(i); + BD_STATUS_AND_LENGTH_SET (p_Bd, BD_R_E); + + if ((p_Data = p_FmPort->im.rxPool.f_GetBuf(p_FmPort->im.rxPool.h_BufferPool, &h_BufContext)) == NULL) + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Data buffer")); + BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, p_Bd, p_Data); + p_FmPort->im.p_BdShadow[i] = h_BufContext; + } + + if ((p_FmPort->im.dataMemAttributes & MEMORY_ATTR_CACHEABLE) || + (p_FmPort->im.fwExtStructsMemAttr & MEMORY_ATTR_CACHEABLE)) + WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_GBL | IM_MODE_SET_BO(2)); + else + WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_SET_BO(2)); + + WRITE_UINT32(p_FmPort->im.p_FmPortImPram->rxQdPtr, + (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) - + p_FmPort->p_FmPortDriverParam->fmMuramPhysBaseAddr + 0x20)); + + LOG2((uint64_t)p_FmPort->im.mrblr, log2Num); + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->mrblr, log2Num); + + /* Initialize Rx QD */ + tmpPhysBase = (uint64_t)(XX_VirtToPhys(p_FmPort->im.p_BdRing)); + SET_ADDR(&p_FmPort->im.p_FmPortImPram->rxQd.bdRingBase, tmpPhysBase); + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.bdRingSize, (uint16_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize)); + + /* Update the IM PRAM address in the BMI */ + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->rxPortBmiRegs.fmbm_rfqid, + (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) - + p_FmPort->p_FmPortDriverParam->fmMuramPhysBaseAddr)); + if (!p_FmPort->polling || p_FmPort->exceptions) + { + /* Allocate, configure and register interrupts */ + err = FmAllocFmanCtrlEventReg(p_FmPort->h_Fm, &p_FmPort->fmanCtrlEventId); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + ASSERT_COND(!(p_FmPort->fmanCtrlEventId & ~IM_RXQD_FPMEVT_SEL_MASK)); + tmpReg16 = (uint16_t)(p_FmPort->fmanCtrlEventId & IM_RXQD_FPMEVT_SEL_MASK); + tmpReg32 = 0; + + if(p_FmPort->exceptions & IM_EV_BSY) + { + tmpReg16 |= IM_RXQD_BSYINTM; + tmpReg32 |= IM_EV_BSY; + } + if(!p_FmPort->polling) + { + tmpReg16 |= IM_RXQD_RXFINTM; + tmpReg32 |= IM_EV_RX; + } + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16); + + FmRegisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, ImException , (t_Handle)p_FmPort); + + FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32); + } + else + p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ; + } + else + { + p_FmPort->im.p_BdRing = (t_FmImBd *)XX_MallocSmart((uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize), p_FmPort->im.fwExtStructsMemId, 4); + if (!p_FmPort->im.p_BdRing) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Tx BD ring!!!")); + IOMemSet32(p_FmPort->im.p_BdRing, 0, (uint32_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize)); + + p_FmPort->im.p_BdShadow = (t_Handle *)XX_Malloc((uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize)); + if (!p_FmPort->im.p_BdShadow) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Independent-Mode Rx BD shadow!!!")); + memset(p_FmPort->im.p_BdShadow, 0, (uint32_t)(sizeof(t_Handle)*p_FmPort->im.bdRingSize)); + p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID; + + if ((p_FmPort->im.dataMemAttributes & MEMORY_ATTR_CACHEABLE) || + (p_FmPort->im.fwExtStructsMemAttr & MEMORY_ATTR_CACHEABLE)) + WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_GBL | IM_MODE_SET_BO(2)); + else + WRITE_UINT32(p_FmPort->im.p_FmPortImPram->mode, IM_MODE_SET_BO(2)); + + WRITE_UINT32(p_FmPort->im.p_FmPortImPram->txQdPtr, + (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) - + p_FmPort->p_FmPortDriverParam->fmMuramPhysBaseAddr + 0x40)); + + /* Initialize Tx QD */ + tmpPhysBase = (uint64_t)(XX_VirtToPhys(p_FmPort->im.p_BdRing)); + SET_ADDR(&p_FmPort->im.p_FmPortImPram->txQd.bdRingBase, tmpPhysBase); + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->txQd.bdRingSize, (uint16_t)(sizeof(t_FmImBd)*p_FmPort->im.bdRingSize)); + + /* Update the IM PRAM address in the BMI */ + WRITE_UINT32(p_FmPort->p_FmPortBmiRegs->txPortBmiRegs.fmbm_tcfqid, + (uint32_t)((uint64_t)(XX_VirtToPhys(p_FmPort->im.p_FmPortImPram)) - + p_FmPort->p_FmPortDriverParam->fmMuramPhysBaseAddr)); + } + + + return E_OK; +} + +void FmPortImFree(t_FmPort *p_FmPort) +{ + uint32_t bdStatus; + uint8_t *p_CurData; + + ASSERT_COND(p_FmPort); + ASSERT_COND(p_FmPort->im.p_FmPortImPram); + + if ((p_FmPort->portType == e_FM_PORT_TYPE_RX) || + (p_FmPort->portType == e_FM_PORT_TYPE_RX_10G)) + { + if (!p_FmPort->polling || p_FmPort->exceptions) + { + /* Deallocate and unregister interrupts */ + FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, 0); + + FmFreeFmanCtrlEventReg(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId); + + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, 0); + + FmUnregisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId); + } + /* Try first clean what has received */ + FmPortImRx(p_FmPort); + + /* Now, get rid of the the empty buffer! */ + bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId)); + + while (bdStatus & BD_R_E) /* while there is data in the Rx BD */ + { + p_CurData = BdBufferGet(p_FmPort->im.rxPool.f_PhysToVirt, BD_GET(p_FmPort->im.currBdId)); + + BdBufferSet(p_FmPort->im.rxPool.f_VirtToPhys, BD_GET(p_FmPort->im.currBdId), NULL); + BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), 0); + + p_FmPort->im.rxPool.f_PutBuf(p_FmPort->im.rxPool.h_BufferPool, + p_CurData, + p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId]); + + p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId); + bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId)); + } + } + else + TxConf(p_FmPort, e_TX_CONF_TYPE_FLUSH); + + FM_MURAM_FreeMem(p_FmPort->im.h_FmMuram, p_FmPort->im.p_FmPortImPram); + + if (p_FmPort->im.p_BdShadow) + XX_Free(p_FmPort->im.p_BdShadow); + + if (p_FmPort->im.p_BdRing) + XX_FreeSmart(p_FmPort->im.p_BdRing); +} + + +t_Error FM_PORT_ConfigIMMaxRxBufLength(t_Handle h_FmPort, uint16_t newVal) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->im.mrblr = newVal; + + return E_OK; +} + +t_Error FM_PORT_ConfigIMRxBdRingLength(t_Handle h_FmPort, uint16_t newVal) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->im.bdRingSize = newVal; + + return E_OK; +} + +t_Error FM_PORT_ConfigIMTxBdRingLength(t_Handle h_FmPort, uint16_t newVal) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->im.bdRingSize = newVal; + + return E_OK; +} + +t_Error FM_PORT_ConfigIMFmanCtrlExternalStructsMemory(t_Handle h_FmPort, + uint8_t memId, + uint32_t memAttributes) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + p_FmPort->im.fwExtStructsMemId = memId; + p_FmPort->im.fwExtStructsMemAttr = memAttributes; + + return E_OK; +} + +t_Error FM_PORT_ConfigIMPolling(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + if((p_FmPort->portType != e_FM_PORT_TYPE_RX_10G) && (p_FmPort->portType != e_FM_PORT_TYPE_RX)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Available for Rx ports only")); + + if (!FmIsMaster(p_FmPort->h_Fm)) + RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("Available on master-partition only;" + "in guest-partitions, IM is always in polling!")); + + p_FmPort->polling = TRUE; + + return E_OK; +} + +t_Error FM_PORT_SetIMExceptions(t_Handle h_FmPort, e_FmPortExceptions exception, bool enable) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + t_Error err; + uint16_t tmpReg16; + uint32_t tmpReg32; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + if(exception == e_FM_PORT_EXCEPTION_IM_BUSY) + { + if(enable) + { + p_FmPort->exceptions |= IM_EV_BSY; + if(p_FmPort->fmanCtrlEventId == (uint8_t)NO_IRQ) + { + /* Allocate, configure and register interrupts */ + err = FmAllocFmanCtrlEventReg(p_FmPort->h_Fm, &p_FmPort->fmanCtrlEventId); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + ASSERT_COND(!(p_FmPort->fmanCtrlEventId & ~IM_RXQD_FPMEVT_SEL_MASK)); + + FmRegisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, ImException, (t_Handle)p_FmPort); + tmpReg16 = (uint16_t)((p_FmPort->fmanCtrlEventId & IM_RXQD_FPMEVT_SEL_MASK) | IM_RXQD_BSYINTM); + tmpReg32 = IM_EV_BSY; + } + else + { + tmpReg16 = (uint16_t)(GET_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen) | IM_RXQD_BSYINTM); + tmpReg32 = FmGetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId) | IM_EV_BSY; + } + + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16); + FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32); + } + else + { + p_FmPort->exceptions &= ~IM_EV_BSY; + if (!p_FmPort->exceptions && p_FmPort->polling) + { + FmFreeFmanCtrlEventReg(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId); + FmUnregisterFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId); + FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, 0); + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, 0); + p_FmPort->fmanCtrlEventId = (uint8_t)NO_IRQ; + } + else + { + tmpReg16 = (uint16_t)(GET_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen) & ~IM_RXQD_BSYINTM); + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->rxQd.gen, tmpReg16); + tmpReg32 = FmGetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId) & ~IM_EV_BSY; + FmSetFmanCtrlIntr(p_FmPort->h_Fm, p_FmPort->fmanCtrlEventId, tmpReg32); + } + } + } + else + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("Invalid exception.")); + + return E_OK; +} + +t_Error FM_PORT_ImTx( t_Handle h_FmPort, + uint8_t *p_Data, + uint16_t length, + bool lastBuffer, + t_Handle h_BufContext) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + uint16_t nextBdId; + uint32_t bdStatus, nextBdStatus; + bool firstBuffer; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + bdStatus = BD_STATUS_AND_LENGTH(BD_GET(p_FmPort->im.currBdId)); + nextBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId); + nextBdStatus = BD_STATUS_AND_LENGTH(BD_GET(nextBdId)); + + if (!(bdStatus & BD_R_E) && !(nextBdStatus & BD_R_E)) + { + /* Confirm the current BD - BD is available */ + if ((bdStatus & BD_LENGTH_MASK) && (p_FmPort->im.f_TxConf)) + p_FmPort->im.f_TxConf (p_FmPort->h_App, + BdBufferGet(XX_PhysToVirt, BD_GET(p_FmPort->im.currBdId)), + 0, + p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId]); + + bdStatus = length; + + /* if this is the first BD of a frame */ + if (p_FmPort->im.firstBdOfFrameId == IM_ILEGAL_BD_ID) + { + firstBuffer = TRUE; + p_FmPort->im.txFirstBdStatus = (bdStatus | BD_R_E); + + if (!lastBuffer) + p_FmPort->im.firstBdOfFrameId = p_FmPort->im.currBdId; + } + else + firstBuffer = FALSE; + + BdBufferSet(XX_VirtToPhys, BD_GET(p_FmPort->im.currBdId), p_Data); + p_FmPort->im.p_BdShadow[p_FmPort->im.currBdId] = h_BufContext; + + /* deal with last */ + if (lastBuffer) + { + /* if single buffer frame */ + if (firstBuffer) + BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.currBdId), p_FmPort->im.txFirstBdStatus | BD_L); + else + { + /* Set the last BD of the frame */ + BD_STATUS_AND_LENGTH_SET (BD_GET(p_FmPort->im.currBdId), (bdStatus | BD_R_E | BD_L)); + /* Set the first BD of the frame */ + BD_STATUS_AND_LENGTH_SET(BD_GET(p_FmPort->im.firstBdOfFrameId), p_FmPort->im.txFirstBdStatus); + p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID; + } + WRITE_UINT16(p_FmPort->im.p_FmPortImPram->txQd.offsetIn, (uint16_t)(GetNextBdId(p_FmPort, p_FmPort->im.currBdId)<<4)); + } + else if (!firstBuffer) /* mid frame buffer */ + BD_STATUS_AND_LENGTH_SET (BD_GET(p_FmPort->im.currBdId), bdStatus | BD_R_E); + + p_FmPort->im.currBdId = GetNextBdId(p_FmPort, p_FmPort->im.currBdId); + } + else + { + /* Discard current frame. Return error. */ + if (p_FmPort->im.firstBdOfFrameId != IM_ILEGAL_BD_ID) + { + /* Error: No free BD */ + /* Response: Discard current frame. Return error. */ + uint16_t cleanBdId = p_FmPort->im.firstBdOfFrameId; + + ASSERT_COND(p_FmPort->im.firstBdOfFrameId != p_FmPort->im.currBdId); + + /* Since firstInFrame is not NULL, one buffer at least has already been + inserted into the BD ring. Using do-while covers the situation of a + frame spanned throughout the whole Tx BD ring (p_CleanBd is incremented + prior to testing whether or not it's equal to TxBd). */ + do + { + BD_STATUS_AND_LENGTH_SET(BD_GET(cleanBdId), 0); + /* Advance BD pointer */ + cleanBdId = GetNextBdId(p_FmPort, cleanBdId); + } while (cleanBdId != p_FmPort->im.currBdId); + + p_FmPort->im.currBdId = cleanBdId; + p_FmPort->im.firstBdOfFrameId = IM_ILEGAL_BD_ID; + } + + return ERROR_CODE(E_FULL); + } + + return E_OK; +} + +void FM_PORT_ImTxConf(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + TxConf(p_FmPort, e_TX_CONF_TYPE_CALLBACK); +} + +t_Error FM_PORT_ImRx(t_Handle h_FmPort) +{ + t_FmPort *p_FmPort = (t_FmPort*)h_FmPort; + + SANITY_CHECK_RETURN_ERROR(p_FmPort, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmPort->imEn, E_INVALID_STATE); + SANITY_CHECK_RETURN_ERROR(!p_FmPort->p_FmPortDriverParam, E_INVALID_HANDLE); + + return FmPortImRx(p_FmPort); +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Rtc/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Rtc/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +NCSW_FM_INC = $(srctree)/drivers/net/dpa/NetCommSw/Peripherals/FM/inc + +EXTRA_CFLAGS += -I$(NCSW_FM_INC) + +obj-y += fsl-ncsw-RTC.o + +fsl-ncsw-RTC-objs := fm_rtc.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Rtc/fm_rtc.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Rtc/fm_rtc.c @@ -0,0 +1,891 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_rtc.c + + @Description FM RTC driver implementation. + + @Cautions None +*//***************************************************************************/ + +#include "error_ext.h" +#include "debug_ext.h" +#include "string_ext.h" +#include "part_ext.h" +#include "xx_ext.h" +#include "ncsw_ext.h" + +#include "fm_rtc.h" +#include "fm_common.h" + + +/*****************************************************************************/ +static void SetDefaultParam(t_FmRtc *p_Rtc) +{ + t_FmRtcDriverParam *p_RtcDriverParam = p_Rtc->p_RtcDriverParam; + int i; + + p_Rtc->outputClockDivisor = DEFAULT_outputClockDivisor; + p_Rtc->p_RtcDriverParam->bypass = DEFAULT_bypass; + p_RtcDriverParam->srcClk = DEFAULT_srcClock; + p_RtcDriverParam->invertInputClkPhase = DEFAULT_invertInputClkPhase; + p_RtcDriverParam->invertOutputClkPhase = DEFAULT_invertOutputClkPhase; + p_RtcDriverParam->pulseRealign = DEFAULT_pulseRealign; + for (i=0; i < FM_RTC_NUM_OF_ALARMS; i++) + { + p_RtcDriverParam->alarmPolarity[i] = DEFAULT_alarmPolarity; + } + for (i=0; i < FM_RTC_NUM_OF_EXT_TRIGGERS; i++) + { + p_RtcDriverParam->triggerPolarity[i] = DEFAULT_triggerPolarity; + } + p_Rtc->clockPeriodNanoSec = DEFAULT_clockPeriod; /* 1 usec */ +} + +/*****************************************************************************/ +static t_Error CheckInitParameters(t_FmRtc *p_Rtc) +{ + t_FmRtcDriverParam *p_RtcDriverParam = p_Rtc->p_RtcDriverParam; + int i; + + if ((p_RtcDriverParam->srcClk != e_FM_RTC_SOURCE_CLOCK_EXTERNAL) && + (p_RtcDriverParam->srcClk != e_FM_RTC_SOURCE_CLOCK_SYSTEM) && + (p_RtcDriverParam->srcClk != e_FM_RTC_SOURCE_CLOCK_OSCILATOR)) + RETURN_ERROR(MAJOR, E_INVALID_CLOCK, ("Source clock undefined")); + + if (p_Rtc->outputClockDivisor == 0) + { + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("Divisor for output clock (should be positive)")); + } + + for (i=0; i < FM_RTC_NUM_OF_ALARMS; i++) + { + if ((p_RtcDriverParam->alarmPolarity[i] != e_FM_RTC_ALARM_POLARITY_ACTIVE_LOW) && + (p_RtcDriverParam->alarmPolarity[i] != e_FM_RTC_ALARM_POLARITY_ACTIVE_HIGH)) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm %d signal polarity", i)); + } + } + for (i=0; i < FM_RTC_NUM_OF_EXT_TRIGGERS; i++) + { + if ((p_RtcDriverParam->triggerPolarity[i] != e_FM_RTC_TRIGGER_ON_FALLING_EDGE) && + (p_RtcDriverParam->triggerPolarity[i] != e_FM_RTC_TRIGGER_ON_RISING_EDGE)) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Trigger %d signal polarity", i)); + } + } + +#ifdef FM_1588_SRC_CLK_ERRATA_FMAN1 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Rtc->h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)&& + ((p_RtcDriverParam->srcClk==e_FM_RTC_SOURCE_CLOCK_SYSTEM) && p_RtcDriverParam->invertInputClkPhase)) + RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("Can not use invertInputClkPhase when source clock is e_FM_RTC_SOURCE_CLOCK_SYSTEM")); + } +#endif /* FM_1588_SRC_CLK_ERRATA_FMAN1 */ + + return E_OK; +} + +/*****************************************************************************/ +static void RtcExceptions(t_Handle h_FmRtc) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + t_FmRtcMemMap *p_MemMap; + register uint32_t events; + + ASSERT_COND(p_Rtc); + p_MemMap = p_Rtc->p_MemMap; + + /* Get valid events */ + events = GET_UINT32(p_MemMap->tmr_tevent); + events &= GET_UINT32(p_MemMap->tmr_temask); + + /* Clear event bits */ + WRITE_UINT32(p_MemMap->tmr_tevent, events); + + if (events & TMR_TEVENT_ALM1) + { + if(p_Rtc->alarmParams[0].clearOnExpiration) + { + WRITE_UINT32(p_MemMap->tmr_alarm[0].tmr_alarm_l, 0); + WRITE_UINT32(p_MemMap->tmr_temask, GET_UINT32(p_MemMap->tmr_temask) & ~TMR_TEVENT_ALM1); + } + ASSERT_COND(p_Rtc->alarmParams[0].f_AlarmCallback); + p_Rtc->alarmParams[0].f_AlarmCallback(p_Rtc->h_App, 0); + } + if (events & TMR_TEVENT_ALM2) + { + if(p_Rtc->alarmParams[1].clearOnExpiration) + { + WRITE_UINT32(p_MemMap->tmr_alarm[1].tmr_alarm_l, 0); + WRITE_UINT32(p_MemMap->tmr_temask, GET_UINT32(p_MemMap->tmr_temask) & ~TMR_TEVENT_ALM2); + } + ASSERT_COND(p_Rtc->alarmParams[1].f_AlarmCallback); + p_Rtc->alarmParams[1].f_AlarmCallback(p_Rtc->h_App, 1); + } + if (events & TMR_TEVENT_PP1) + { + ASSERT_COND(p_Rtc->periodicPulseParams[0].f_PeriodicPulseCallback); + p_Rtc->periodicPulseParams[0].f_PeriodicPulseCallback(p_Rtc->h_App, 0); + } + if (events & TMR_TEVENT_PP2) + { + ASSERT_COND(p_Rtc->periodicPulseParams[1].f_PeriodicPulseCallback); + p_Rtc->periodicPulseParams[1].f_PeriodicPulseCallback(p_Rtc->h_App, 1); + } + if (events & TMR_TEVENT_ETS1) + { + ASSERT_COND(p_Rtc->externalTriggerParams[0].f_ExternalTriggerCallback); + p_Rtc->externalTriggerParams[0].f_ExternalTriggerCallback(p_Rtc->h_App, 0); + } + if (events & TMR_TEVENT_ETS2) + { + ASSERT_COND(p_Rtc->externalTriggerParams[1].f_ExternalTriggerCallback); + p_Rtc->externalTriggerParams[1].f_ExternalTriggerCallback(p_Rtc->h_App, 1); + } +} + + +/*****************************************************************************/ +t_Handle FM_RTC_Config(t_FmRtcParams *p_FmRtcParam) +{ + t_FmRtc *p_Rtc; + + SANITY_CHECK_RETURN_VALUE(p_FmRtcParam, E_NULL_POINTER, NULL); + + /* Allocate memory for the FM RTC driver parameters */ + p_Rtc = (t_FmRtc *)XX_Malloc(sizeof(t_FmRtc)); + if (!p_Rtc) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM RTC driver structure")); + return NULL; + } + + memset(p_Rtc, 0, sizeof(t_FmRtc)); + + /* Allocate memory for the FM RTC driver parameters */ + p_Rtc->p_RtcDriverParam = (t_FmRtcDriverParam *)XX_Malloc(sizeof(t_FmRtcDriverParam)); + if (!p_Rtc->p_RtcDriverParam) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM RTC driver parameters")); + XX_Free(p_Rtc); + return NULL; + } + + memset(p_Rtc->p_RtcDriverParam, 0, sizeof(t_FmRtcDriverParam)); + + /* Store RTC configuration parameters */ + p_Rtc->h_Fm = p_FmRtcParam->h_Fm; + + /* Set default RTC configuration parameters */ + SetDefaultParam(p_Rtc); + + /* Store RTC parameters in the RTC control structure */ + p_Rtc->p_MemMap = (t_FmRtcMemMap *)UINT_TO_PTR(p_FmRtcParam->baseAddress); + p_Rtc->h_App = p_FmRtcParam->h_App; + + return p_Rtc; +} + +/*****************************************************************************/ +t_Error FM_RTC_Init(t_Handle h_FmRtc) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + t_FmRtcDriverParam *p_RtcDriverParam; + t_FmRtcMemMap *p_MemMap; + uint32_t freqCompensation; + uint32_t tmrCtrl; + int i; + uint64_t tmpDouble; + + p_RtcDriverParam = p_Rtc->p_RtcDriverParam; + p_MemMap = p_Rtc->p_MemMap; + + if(CheckInitParameters(p_Rtc)!=E_OK) + RETURN_ERROR(MAJOR, E_CONFLICT, + ("Init Parameters are not Valid")); + + /* TODO A check must be added here, that no timestamping MAC's + * are working in this stage. */ + WRITE_UINT32(p_MemMap->tmr_ctrl, TMR_CTRL_TMSR); + XX_UDelay(10); + WRITE_UINT32(p_MemMap->tmr_ctrl, 0); + + /* Set the source clock */ + switch (p_RtcDriverParam->srcClk) + { + case e_FM_RTC_SOURCE_CLOCK_SYSTEM: + tmrCtrl = TMR_CTRL_CKSEL_MAC_CLK; + break; + case e_FM_RTC_SOURCE_CLOCK_OSCILATOR: + tmrCtrl = TMR_CTRL_CKSEL_OSC_CLK; + break; + default: + /* Use a clock from the External TMR reference clock.*/ + tmrCtrl = TMR_CTRL_CKSEL_EXT_CLK; + break; + } + + /* whatever period the user picked, the timestamp will advance in '1' every time + * the period passed. */ + tmrCtrl |= ((1 << TMR_CTRL_TCLK_PERIOD_SHIFT) & TMR_CTRL_TCLK_PERIOD_MASK); + + if (p_RtcDriverParam->invertInputClkPhase) + tmrCtrl |= TMR_CTRL_CIPH; + if (p_RtcDriverParam->invertOutputClkPhase) + tmrCtrl |= TMR_CTRL_COPH; + + for (i=0; i < FM_RTC_NUM_OF_ALARMS; i++) + { + if (p_RtcDriverParam->alarmPolarity[i] == e_FM_RTC_ALARM_POLARITY_ACTIVE_LOW) + tmrCtrl |= (TMR_CTRL_ALMP1 >> i); + } + + for (i=0; i < FM_RTC_NUM_OF_EXT_TRIGGERS; i++) + if (p_RtcDriverParam->triggerPolarity[i] == e_FM_RTC_TRIGGER_ON_FALLING_EDGE) + tmrCtrl |= (TMR_CTRL_ETEP1 << i); + + if (!p_RtcDriverParam->timerSlaveMode && p_Rtc->p_RtcDriverParam->bypass) + tmrCtrl |= TMR_CTRL_BYP; + + WRITE_UINT32(p_MemMap->tmr_ctrl, tmrCtrl); + + for (i=0; i < FM_RTC_NUM_OF_ALARMS; i++) + { + /* Clear TMR_ALARM registers */ + WRITE_UINT32(p_MemMap->tmr_alarm[i].tmr_alarm_l, 0xFFFFFFFF); + WRITE_UINT32(p_MemMap->tmr_alarm[i].tmr_alarm_h, 0xFFFFFFFF); + } + + /* Clear TMR_TEVENT */ + WRITE_UINT32(p_MemMap->tmr_tevent, TMR_TEVENT_ALL); + + /* Initialize TMR_TEMASK */ + WRITE_UINT32(p_MemMap->tmr_temask, 0); + + + /* find source clock frequency in Mhz */ + if (p_Rtc->p_RtcDriverParam->srcClk != e_FM_RTC_SOURCE_CLOCK_SYSTEM) + p_Rtc->srcClkFreqMhz = p_Rtc->p_RtcDriverParam->extSrcClkFreq; + else + p_Rtc->srcClkFreqMhz = (uint32_t)(FmGetClockFreq(p_Rtc->h_Fm)/2); + + /* if timer in Master mode Initialize TMR_CTRL */ + /* We want the counter (TMR_CNT) to count in nano-seconds */ + if (!p_RtcDriverParam->timerSlaveMode && p_Rtc->p_RtcDriverParam->bypass) + { + p_Rtc->clockPeriodNanoSec = (1000 / p_Rtc->srcClkFreqMhz); + } + else + { + /* Initialize TMR_ADD with the initial frequency compensation value: + freqCompensation = (2^32 / frequency ratio) */ + /* frequency ratio = sorce clock/rtc clock = + * (p_Rtc->srcClkFreqMhz*1000000))/ 1/(p_Rtc->clockPeriodNanoSec * 1000000000) */ + freqCompensation = (uint32_t)DIV_CEIL(ACCUMULATOR_OVERFLOW * 1000, + p_Rtc->clockPeriodNanoSec * p_Rtc->srcClkFreqMhz); + WRITE_UINT32(p_MemMap->tmr_add, freqCompensation); + } + /* check the legality of the relation between source and destination clocks */ + /* should be larger than 1.0001 */ + tmpDouble = 10000 * (uint64_t)p_Rtc->clockPeriodNanoSec * (uint64_t)p_Rtc->srcClkFreqMhz; + if((tmpDouble) <= 10001) + RETURN_ERROR(MAJOR, E_CONFLICT, + ("Invalid relation between source and destination clocks. Should be larger than 1.0001")); + + + for (i=0; i < 2; i++) + /* Clear TMR_FIPER registers */ + WRITE_UINT32(p_MemMap->tmr_fiper[i], 0xFFFFFFFF); + + /* Initialize TMR_PRSC */ + WRITE_UINT32(p_MemMap->tmr_prsc, p_Rtc->outputClockDivisor); + + /* Clear TMR_OFF */ + WRITE_UINT32(p_MemMap->tmr_off_l, 0); + WRITE_UINT32(p_MemMap->tmr_off_h, 0); + + /* Register the FM RTC interrupt */ + FmRegisterIntr(p_Rtc->h_Fm, e_FM_MOD_TMR, 0, e_FM_INTR_TYPE_NORMAL, RtcExceptions , p_Rtc); + + /* Free parameters structures */ + XX_Free(p_Rtc->p_RtcDriverParam); + p_Rtc->p_RtcDriverParam = NULL; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_Free(t_Handle h_FmRtc) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + + if (p_Rtc->p_RtcDriverParam) + { + XX_Free(p_Rtc->p_RtcDriverParam); + } + else + { + FM_RTC_Disable(h_FmRtc); + } + + /* Unregister FM RTC interrupt */ + FmUnregisterIntr(p_Rtc->h_Fm, e_FM_MOD_TMR, 0, e_FM_INTR_TYPE_NORMAL); + XX_Free(p_Rtc); + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigSourceClock(t_Handle h_FmRtc, + e_FmSrcClk srcClk, + uint32_t freqInMhz) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_Rtc->p_RtcDriverParam->srcClk = srcClk; + if(srcClk != e_FM_RTC_SOURCE_CLOCK_SYSTEM) + p_Rtc->p_RtcDriverParam->extSrcClkFreq = freqInMhz; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigPeriod(t_Handle h_FmRtc, uint32_t period) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_Rtc->clockPeriodNanoSec = period; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigFrequencyBypass(t_Handle h_FmRtc, bool enabled) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_Rtc->p_RtcDriverParam->bypass = enabled; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigInvertedInputClockPhase(t_Handle h_FmRtc, bool inverted) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_Rtc->p_RtcDriverParam->invertInputClkPhase = inverted; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigInvertedOutputClockPhase(t_Handle h_FmRtc, bool inverted) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_Rtc->p_RtcDriverParam->invertOutputClkPhase = inverted; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigOutputClockDivisor(t_Handle h_FmRtc, uint16_t divisor) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_Rtc->outputClockDivisor = divisor; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigPulseRealignment(t_Handle h_FmRtc, bool enable) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_Rtc->p_RtcDriverParam->pulseRealign = enable; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigAlarmPolarity(t_Handle h_FmRtc, + uint8_t alarmId, + e_FmRtcAlarmPolarity alarmPolarity) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + if (alarmId >= FM_RTC_NUM_OF_ALARMS) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm ID")); + } + + p_Rtc->p_RtcDriverParam->alarmPolarity[alarmId] = alarmPolarity; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ConfigExternalTriggerPolarity(t_Handle h_FmRtc, + uint8_t triggerId, + e_FmRtcTriggerPolarity triggerPolarity) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + if (triggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External trigger ID")); + } + + p_Rtc->p_RtcDriverParam->triggerPolarity[triggerId] = triggerPolarity; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_Enable(t_Handle h_FmRtc, bool resetClock) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + uint32_t tmrCtrl; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + tmrCtrl = GET_UINT32(p_Rtc->p_MemMap->tmr_ctrl); + + /* TODO A check must be added here, that no timestamping MAC's + * are working in this stage. */ + if (resetClock) + { + WRITE_UINT32(p_Rtc->p_MemMap->tmr_ctrl, (tmrCtrl | TMR_CTRL_TMSR)); + + XX_UDelay(10); + /* Clear TMR_OFF */ + WRITE_UINT32(p_Rtc->p_MemMap->tmr_off_l, 0); + WRITE_UINT32(p_Rtc->p_MemMap->tmr_off_h, 0); + } + + WRITE_UINT32(p_Rtc->p_MemMap->tmr_ctrl, (tmrCtrl | TMR_CTRL_TE)); + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_Disable(t_Handle h_FmRtc) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + uint32_t tmrCtrl; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + /* TODO A check must be added here, that no timestamping MAC's + * are working in this stage. */ + tmrCtrl = GET_UINT32(p_Rtc->p_MemMap->tmr_ctrl); + WRITE_UINT32(p_Rtc->p_MemMap->tmr_ctrl, (tmrCtrl & ~(TMR_CTRL_TE))); + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_SetClockOffset(t_Handle h_FmRtc, int64_t offset) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + /* TMR_OFF_L must be written first */ + WRITE_UINT32(p_Rtc->p_MemMap->tmr_off_l, (uint32_t)offset); + WRITE_UINT32(p_Rtc->p_MemMap->tmr_off_h, (uint32_t)(offset >> 32)); + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_SetAlarm(t_Handle h_FmRtc, t_FmRtcAlarmParams *p_FmRtcAlarmParams) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + t_FmRtcMemMap *p_MemMap; + uint32_t tmpReg; + uint64_t tmpAlarm; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_MemMap = p_Rtc->p_MemMap; + + if (p_FmRtcAlarmParams->alarmId >= FM_RTC_NUM_OF_ALARMS) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm ID")); + } + + if(p_FmRtcAlarmParams->alarmTime < p_Rtc->clockPeriodNanoSec) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm time must be equal or larger than RTC period - %d nanoseconds", p_Rtc->clockPeriodNanoSec)); + if(p_FmRtcAlarmParams->alarmTime % (uint64_t)p_Rtc->clockPeriodNanoSec) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Alarm time must be a multiple of RTC period - %d nanoseconds", p_Rtc->clockPeriodNanoSec)); + tmpAlarm = p_FmRtcAlarmParams->alarmTime/(uint64_t)p_Rtc->clockPeriodNanoSec; + + /* TMR_ALARM_L must be written first */ + WRITE_UINT32(p_MemMap->tmr_alarm[p_FmRtcAlarmParams->alarmId].tmr_alarm_l, (uint32_t)tmpAlarm); + WRITE_UINT32(p_MemMap->tmr_alarm[p_FmRtcAlarmParams->alarmId].tmr_alarm_h, + (uint32_t)(tmpAlarm >> 32)); + + if (p_FmRtcAlarmParams->f_AlarmCallback) + { + p_Rtc->alarmParams[p_FmRtcAlarmParams->alarmId].f_AlarmCallback = p_FmRtcAlarmParams->f_AlarmCallback; + p_Rtc->alarmParams[p_FmRtcAlarmParams->alarmId].clearOnExpiration = p_FmRtcAlarmParams->clearOnExpiration; + + if(p_FmRtcAlarmParams->alarmId == 0) + tmpReg = TMR_TEVENT_ALM1; + else + tmpReg = TMR_TEVENT_ALM2; + WRITE_UINT32(p_MemMap->tmr_temask, GET_UINT32(p_MemMap->tmr_temask) | tmpReg); + } + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_SetPeriodicPulse(t_Handle h_FmRtc, t_FmRtcPeriodicPulseParams *p_FmRtcPeriodicPulseParams) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + t_FmRtcMemMap *p_MemMap; + uint32_t tmpReg; + uint64_t tmpFiper; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + p_MemMap = p_Rtc->p_MemMap; + + if (p_FmRtcPeriodicPulseParams->periodicPulseId >= FM_RTC_NUM_OF_PERIODIC_PULSES) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse ID")); + } + if(GET_UINT32(p_MemMap->tmr_ctrl) & TMR_CTRL_TE) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Can't set Periodic pulse when RTC is enabled.")); + if(p_FmRtcPeriodicPulseParams->periodicPulsePeriod < p_Rtc->clockPeriodNanoSec) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse must be equal or larger than RTC period - %d nanoseconds", p_Rtc->clockPeriodNanoSec)); + if(p_FmRtcPeriodicPulseParams->periodicPulsePeriod % (uint64_t)p_Rtc->clockPeriodNanoSec) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse must be a multiple of RTC period - %d nanoseconds", p_Rtc->clockPeriodNanoSec)); + tmpFiper = p_FmRtcPeriodicPulseParams->periodicPulsePeriod/(uint64_t)p_Rtc->clockPeriodNanoSec; + if(tmpFiper & 0xffffffff00000000LL) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse/RTC Period must be smaller than 4294967296", p_Rtc->clockPeriodNanoSec)); + + WRITE_UINT32(p_MemMap->tmr_fiper[p_FmRtcPeriodicPulseParams->periodicPulseId], (uint32_t)tmpFiper); + + if (p_FmRtcPeriodicPulseParams->f_PeriodicPulseCallback) + { + p_Rtc->periodicPulseParams[p_FmRtcPeriodicPulseParams->periodicPulseId].f_PeriodicPulseCallback = + p_FmRtcPeriodicPulseParams->f_PeriodicPulseCallback; + + if(p_FmRtcPeriodicPulseParams->periodicPulseId == 0) + tmpReg = TMR_TEVENT_PP1; + else + tmpReg = TMR_TEVENT_PP2; + WRITE_UINT32(p_MemMap->tmr_temask, GET_UINT32(p_MemMap->tmr_temask) | tmpReg); + } + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ClearPeriodicPulse(t_Handle h_FmRtc, uint8_t periodicPulseId) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + if (periodicPulseId >= FM_RTC_NUM_OF_PERIODIC_PULSES) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("Periodic pulse ID")); + } + + p_Rtc->periodicPulseParams[periodicPulseId].f_PeriodicPulseCallback = NULL; + + if(periodicPulseId == 0) + tmpReg = TMR_TEVENT_PP1; + else + tmpReg = TMR_TEVENT_PP2; + WRITE_UINT32(p_Rtc->p_MemMap->tmr_temask, GET_UINT32(p_Rtc->p_MemMap->tmr_temask) & ~tmpReg); + + if (GET_UINT32(p_Rtc->p_MemMap->tmr_ctrl) & TMR_CTRL_FS) + WRITE_UINT32(p_Rtc->p_MemMap->tmr_ctrl, GET_UINT32(p_Rtc->p_MemMap->tmr_ctrl) & ~TMR_CTRL_FS); + + WRITE_UINT32(p_Rtc->p_MemMap->tmr_fiper[periodicPulseId], 0xFFFFFFFF); + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_SetExternalTrigger(t_Handle h_FmRtc, t_FmRtcExternalTriggerParams *p_FmRtcExternalTriggerParams) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + if (p_FmRtcExternalTriggerParams->externalTriggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External Trigger ID")); + } + + if (p_FmRtcExternalTriggerParams->f_ExternalTriggerCallback) + { + p_Rtc->externalTriggerParams[p_FmRtcExternalTriggerParams->externalTriggerId].f_ExternalTriggerCallback = p_FmRtcExternalTriggerParams->f_ExternalTriggerCallback; + if(p_FmRtcExternalTriggerParams->externalTriggerId == 0) + tmpReg = TMR_TEVENT_ETS1; + else + tmpReg = TMR_TEVENT_ETS2; + WRITE_UINT32(p_Rtc->p_MemMap->tmr_temask, GET_UINT32(p_Rtc->p_MemMap->tmr_temask) | tmpReg); + } + + if(p_FmRtcExternalTriggerParams->usePulseAsInput) + { + if(p_FmRtcExternalTriggerParams->externalTriggerId == 0) + tmpReg = TMR_CTRL_PP1L; + else + tmpReg = TMR_CTRL_PP2L; + WRITE_UINT32(p_Rtc->p_MemMap->tmr_ctrl, GET_UINT32(p_Rtc->p_MemMap->tmr_ctrl) | tmpReg); + } + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_ClearExternalTrigger(t_Handle h_FmRtc, uint8_t externalTriggerId) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + if (externalTriggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS) + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External Trigger ID")); + + p_Rtc->externalTriggerParams[externalTriggerId].f_ExternalTriggerCallback = NULL; + + if(externalTriggerId == 0) + tmpReg = TMR_TEVENT_ETS1; + else + tmpReg = TMR_TEVENT_ETS2; + WRITE_UINT32(p_Rtc->p_MemMap->tmr_temask, GET_UINT32(p_Rtc->p_MemMap->tmr_temask) & ~tmpReg); + + if(externalTriggerId == 0) + tmpReg = TMR_CTRL_PP1L; + else + tmpReg = TMR_CTRL_PP2L; + + if (GET_UINT32(p_Rtc->p_MemMap->tmr_ctrl) & tmpReg) + WRITE_UINT32(p_Rtc->p_MemMap->tmr_ctrl, GET_UINT32(p_Rtc->p_MemMap->tmr_ctrl) & ~tmpReg); + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_GetExternalTriggerTimeStamp(t_Handle h_FmRtc, + uint8_t triggerId, + uint64_t *p_TimeStamp) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + uint64_t timeStamp; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + if (triggerId >= FM_RTC_NUM_OF_EXT_TRIGGERS) + { + RETURN_ERROR(MAJOR, E_INVALID_SELECTION, ("External trigger ID")); + } + + timeStamp = (uint64_t)GET_UINT32(p_Rtc->p_MemMap->tmr_etts[triggerId].tmr_etts_l); + timeStamp |= ((uint64_t)GET_UINT32(p_Rtc->p_MemMap->tmr_etts[triggerId].tmr_etts_h) << 32); + + timeStamp = timeStamp*p_Rtc->clockPeriodNanoSec; + *p_TimeStamp = timeStamp; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_GetCurrentTime(t_Handle h_FmRtc, uint64_t *p_Ts) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + uint64_t time; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + /* TMR_CNT_L must be read first to get an accurate value */ + time = (uint64_t)GET_UINT32(p_Rtc->p_MemMap->tmr_cnt_l); + time |= ((uint64_t)GET_UINT32(p_Rtc->p_MemMap->tmr_cnt_h) << 32); + + time = time*p_Rtc->clockPeriodNanoSec; + + *p_Ts = time; + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_SetCurrentTime(t_Handle h_FmRtc, uint64_t ts) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + ts = ts/p_Rtc->clockPeriodNanoSec; + /* TMR_CNT_L must be written first to get an accurate value */ + WRITE_UINT32(p_Rtc->p_MemMap->tmr_cnt_l, (uint32_t)ts); + WRITE_UINT32(p_Rtc->p_MemMap->tmr_cnt_h, (uint32_t)(ts >> 32)); + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_GetFreqCompensation(t_Handle h_FmRtc, uint32_t *p_Compensation) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + *p_Compensation = (uint32_t) + DIV_CEIL(ACCUMULATOR_OVERFLOW * 1000, + p_Rtc->clockPeriodNanoSec * p_Rtc->srcClkFreqMhz); + + return E_OK; +} + +/*****************************************************************************/ +t_Error FM_RTC_SetFreqCompensation(t_Handle h_FmRtc, uint32_t freqCompensation) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + + SANITY_CHECK_RETURN_ERROR(p_Rtc, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Rtc->p_RtcDriverParam, E_INVALID_STATE); + + /* set the new freqCompensation */ + WRITE_UINT32(p_Rtc->p_MemMap->tmr_add, freqCompensation); + + return E_OK; +} + +/*****************************************************************************/ +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_RTC_DumpRegs(t_Handle h_FmRtc) +{ + t_FmRtc *p_Rtc = (t_FmRtc *)h_FmRtc; + t_FmRtcMemMap *p_MemMap = p_Rtc->p_MemMap; + int i = 0; + + DECLARE_DUMP; + + if (p_MemMap) + { + + DUMP_TITLE(p_MemMap, ("RTC:")); + DUMP_VAR(p_MemMap, tmr_id); + DUMP_VAR(p_MemMap, tmr_id2); + DUMP_VAR(p_MemMap, tmr_ctrl); + DUMP_VAR(p_MemMap, tmr_tevent); + DUMP_VAR(p_MemMap, tmr_temask); + DUMP_VAR(p_MemMap, tmr_cnt_h); + DUMP_VAR(p_MemMap, tmr_cnt_l); + DUMP_VAR(p_MemMap, tmr_ctrl); + DUMP_VAR(p_MemMap, tmr_add); + DUMP_VAR(p_MemMap, tmr_acc); + DUMP_VAR(p_MemMap, tmr_prsc); + DUMP_VAR(p_MemMap, tmr_off_h); + DUMP_VAR(p_MemMap, tmr_off_l); + + DUMP_SUBSTRUCT_ARRAY(i, 2) + { + DUMP_VAR(p_MemMap, tmr_alarm[i].tmr_alarm_h); + DUMP_VAR(p_MemMap, tmr_alarm[i].tmr_alarm_l); + } + DUMP_SUBSTRUCT_ARRAY(i, 2) + { + DUMP_VAR(p_MemMap, tmr_fiper[i]); + DUMP_VAR(p_MemMap, tmr_fiper[i]); + } + DUMP_SUBSTRUCT_ARRAY(i, 2) + { + DUMP_VAR(p_MemMap, tmr_etts[i].tmr_etts_l); + DUMP_VAR(p_MemMap, tmr_etts[i].tmr_etts_l); + } + } + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/Rtc/fm_rtc.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/Rtc/fm_rtc.h @@ -0,0 +1,217 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_rtc.h + + @Description Memory map and internal definitions for FM RTC IEEE1588 Timer driver. + + @Cautions None +*//***************************************************************************/ + +#ifndef __FM_RTC_H__ +#define __FM_RTC_H__ + +#include "std_ext.h" +#include "fm_rtc_ext.h" + + +#define __ERR_MODULE__ MODULE_FM_RTC + +/* General definitions */ + +#define NANOSEC_PER_ONE_HZ_TICK 1000000000 +#define MIN_RTC_CLK_FREQ_HZ 1000 +#define MHz 1000000 + +#define ACCUMULATOR_OVERFLOW ((uint64_t)(1LL << 32)) + +/* RTC default values */ +#define DEFAULT_srcClock e_FM_RTC_SOURCE_CLOCK_SYSTEM +#define DEFAULT_bypass FALSE +#define DEFAULT_invertInputClkPhase FALSE +#define DEFAULT_invertOutputClkPhase FALSE +#define DEFAULT_outputClockDivisor 0x00000002 +#define DEFAULT_alarmPolarity e_FM_RTC_ALARM_POLARITY_ACTIVE_HIGH +#define DEFAULT_triggerPolarity e_FM_RTC_TRIGGER_ON_FALLING_EDGE +#define DEFAULT_pulseRealign FALSE +#define DEFAULT_clockPeriod 1000 + +/* FM RTC Registers definitions */ +#define TMR_CTRL_ALMP1 0x80000000 +#define TMR_CTRL_ALMP2 0x40000000 +#define TMR_CTRL_FS 0x10000000 +#define TMR_CTRL_PP1L 0x08000000 +#define TMR_CTRL_PP2L 0x04000000 +#define TMR_CTRL_TCLK_PERIOD_MASK 0x03FF0000 +#define TMR_CTRL_FRD 0x00004000 +#define TMR_CTRL_SLV 0x00002000 +#define TMR_CTRL_ETEP1 0x00000100 +#define TMR_CTRL_COPH 0x00000080 +#define TMR_CTRL_CIPH 0x00000040 +#define TMR_CTRL_TMSR 0x00000020 +#define TMR_CTRL_DBG 0x00000010 +#define TMR_CTRL_BYP 0x00000008 +#define TMR_CTRL_TE 0x00000004 +#define TMR_CTRL_CKSEL_OSC_CLK 0x00000003 +#define TMR_CTRL_CKSEL_MAC_CLK 0x00000001 +#define TMR_CTRL_CKSEL_EXT_CLK 0x00000000 +#define TMR_CTRL_TCLK_PERIOD_SHIFT 16 + +#define TMR_TEVENT_ETS2 0x02000000 +#define TMR_TEVENT_ETS1 0x01000000 +#define TMR_TEVENT_ALM2 0x00020000 +#define TMR_TEVENT_ALM1 0x00010000 +#define TMR_TEVENT_PP1 0x00000080 +#define TMR_TEVENT_PP2 0x00000040 +#define TMR_TEVENT_PP3 0x00000020 +#define TMR_TEVENT_ALL (TMR_TEVENT_ETS2 | TMR_TEVENT_ETS1 | \ + TMR_TEVENT_ALM2 | TMR_TEVENT_ALM1 | \ + TMR_TEVENT_PP1 | TMR_TEVENT_PP2 | TMR_TEVENT_PP3) + +#define TMR_PRSC_OCK_MASK 0x0000FFFF + + +/**************************************************************************//** + @Description Memory Mapped Registers +*//***************************************************************************/ + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/**************************************************************************//** + @Description FM RTC timer alarm +*//***************************************************************************/ +typedef _Packed struct t_TmrAlaram +{ + volatile uint32_t tmr_alarm_h; /**< */ + volatile uint32_t tmr_alarm_l; /**< */ +} _PackedType t_TmrAlaram; + +/**************************************************************************//** + @Description FM RTC timer Ex trigger +*//***************************************************************************/ +typedef _Packed struct t_TmrExtTrigger +{ + volatile uint32_t tmr_etts_h; /**< */ + volatile uint32_t tmr_etts_l; /**< */ +} _PackedType t_TmrExtTrigger; + +typedef _Packed struct +{ + volatile uint32_t tmr_id; /* Module ID and version register */ + volatile uint32_t tmr_id2; /* Module ID and configuration register */ + volatile uint32_t PTP_RESERVED1[30]; + volatile uint32_t tmr_ctrl; /* timer control register */ + volatile uint32_t tmr_tevent; /* timer event register */ + volatile uint32_t tmr_temask; /* timer event mask register */ + volatile uint32_t PTP_RESERVED2[3]; + volatile uint32_t tmr_cnt_h; /* timer counter high register */ + volatile uint32_t tmr_cnt_l; /* timer counter low register */ + volatile uint32_t tmr_add; /* timer drift compensation addend register */ + volatile uint32_t tmr_acc; /* timer accumulator register */ + volatile uint32_t tmr_prsc; /* timer prescale */ + volatile uint32_t PTP_RESERVED3; + volatile uint32_t tmr_off_h; /* timer offset high */ + volatile uint32_t tmr_off_l; /* timer offset low */ + volatile t_TmrAlaram tmr_alarm[FM_RTC_NUM_OF_ALARMS]; /* timer alarm */ + volatile uint32_t PTP_RESERVED4[2]; + volatile uint32_t tmr_fiper[FM_RTC_NUM_OF_PERIODIC_PULSES]; /* timer fixed period interval */ + volatile uint32_t PTP_RESERVED5[2]; + volatile t_TmrExtTrigger tmr_etts[FM_RTC_NUM_OF_EXT_TRIGGERS]; /*time stamp general purpose external */ + volatile uint32_t PTP_RESERVED6[3]; +} _PackedType t_FmRtcMemMap; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/**************************************************************************//** + @Description RTC FM driver parameters structure. +*//***************************************************************************/ +typedef struct t_FmRtcDriverParam +{ + t_Handle h_Fm; /**< */ + e_FmSrcClk srcClk; /**< */ + uint32_t extSrcClkFreq; /**< */ + uint32_t rtcFreqHz; /**< */ + bool timerSlaveMode; /*Slave/Master Mode*/ + bool invertInputClkPhase; + bool invertOutputClkPhase; + uint32_t eventsMask; + bool bypass; /**< Indicates if frequency compensation is bypassed */ + bool pulseRealign; + e_FmRtcAlarmPolarity alarmPolarity[FM_RTC_NUM_OF_ALARMS]; + e_FmRtcTriggerPolarity triggerPolarity[FM_RTC_NUM_OF_EXT_TRIGGERS]; +} t_FmRtcDriverParam; + +typedef struct t_FmRtcAlarm +{ + t_FmRtcExceptionsCallback *f_AlarmCallback; + bool clearOnExpiration; +} t_FmRtcAlarm; + +typedef struct t_FmRtcPeriodicPulse +{ + t_FmRtcExceptionsCallback *f_PeriodicPulseCallback; +} t_FmRtcPeriodicPulse; + +typedef struct t_FmRtcExternalTrigger +{ + t_FmRtcExceptionsCallback *f_ExternalTriggerCallback; +} t_FmRtcExternalTrigger; + + +/**************************************************************************//** + @Description RTC FM driver control structure. +*//***************************************************************************/ +typedef struct t_FmRtc +{ + t_Part *p_Part; /**< Pointer to the integration device */ + t_Handle h_Fm; + t_Handle h_App; /**< Application handle */ + t_FmRtcMemMap *p_MemMap; /**< Pointer to RTC memory map */ + uint32_t clockPeriodNanoSec; /**< RTC clock period in nano-seconds (for FS mode) */ + uint32_t srcClkFreqMhz; + uint16_t outputClockDivisor; /**< Output clock divisor (for FS mode) */ + t_FmRtcAlarm alarmParams[FM_RTC_NUM_OF_ALARMS]; + t_FmRtcPeriodicPulse periodicPulseParams[FM_RTC_NUM_OF_PERIODIC_PULSES]; + t_FmRtcExternalTrigger externalTriggerParams[FM_RTC_NUM_OF_EXT_TRIGGERS]; + t_FmRtcDriverParam *p_RtcDriverParam; /**< RTC Driver parameters (for Init phase) */ +} t_FmRtc; + + +#endif /* __FM_RTC_H__ */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/fm.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/fm.c @@ -0,0 +1,4605 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm.c + + @Description FM driver routines implementation. +*//***************************************************************************/ +#include "std_ext.h" +#include "error_ext.h" +#include "xx_ext.h" +#include "string_ext.h" +#include "sprint_ext.h" +#include "debug_ext.h" +#include "fm_muram_ext.h" + +#include "fm_common.h" +#include "fm_ipc.h" +#include "fm.h" + + +/****************************************/ +/* static functions */ +/****************************************/ + +static volatile bool blockingFlag = FALSE; +static void IpcMsgCompletionCB(t_Handle h_Fm, + uint8_t *p_Msg, + uint8_t *p_Reply, + uint32_t replyLength, + t_Error status) +{ + UNUSED(h_Fm);UNUSED(p_Msg);UNUSED(p_Reply);UNUSED(replyLength);UNUSED(status); + blockingFlag = FALSE; +} + +static bool IsFmanCtrlCodeLoaded(t_Fm *p_Fm) +{ + t_FMIramRegs *p_Iram; + + ASSERT_COND(p_Fm); + p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM); + + return (bool)!!(GET_UINT32(p_Iram->iready) & IRAM_READY); +} + +static t_Error CheckFmParameters(t_Fm *p_Fm) +{ + if (IsFmanCtrlCodeLoaded(p_Fm) && !p_Fm->p_FmDriverParam->resetOnInit) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Old FMan CTRL code is loaded; FM must be reset!")); + if(!p_Fm->p_FmDriverParam->dmaAxiDbgNumOfBeats || (p_Fm->p_FmDriverParam->dmaAxiDbgNumOfBeats > DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("axiDbgNumOfBeats has to be in the range 1 - %d", DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS)); + if(p_Fm->p_FmDriverParam->dmaCamNumOfEntries % DMA_CAM_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaCamNumOfEntries has to be divisble by %d", DMA_CAM_UNITS)); + if(!p_Fm->p_FmDriverParam->dmaCamNumOfEntries || (p_Fm->p_FmDriverParam->dmaCamNumOfEntries > DMA_MODE_MAX_CAM_NUM_OF_ENTRIES)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaCamNumOfEntries has to be in the range 1 - %d", DMA_MODE_MAX_CAM_NUM_OF_ENTRIES)); + if(p_Fm->p_FmDriverParam->dmaCommQThresholds.assertEmergency > DMA_THRESH_MAX_COMMQ) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaCommQThresholds.assertEmergency can not be larger than %d", DMA_THRESH_MAX_COMMQ)); + if(p_Fm->p_FmDriverParam->dmaCommQThresholds.clearEmergency > DMA_THRESH_MAX_COMMQ) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaCommQThresholds.clearEmergency can not be larger than %d", DMA_THRESH_MAX_COMMQ)); + if(p_Fm->p_FmDriverParam->dmaCommQThresholds.clearEmergency >= p_Fm->p_FmDriverParam->dmaCommQThresholds.assertEmergency) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaCommQThresholds.clearEmergency must be smaller than dmaCommQThresholds.assertEmergency")); + if(p_Fm->p_FmDriverParam->dmaReadBufThresholds.assertEmergency > DMA_THRESH_MAX_BUF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaReadBufThresholds.assertEmergency can not be larger than %d", DMA_THRESH_MAX_BUF)); + if(p_Fm->p_FmDriverParam->dmaReadBufThresholds.clearEmergency > DMA_THRESH_MAX_BUF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaReadBufThresholds.clearEmergency can not be larger than %d", DMA_THRESH_MAX_BUF)); + if(p_Fm->p_FmDriverParam->dmaReadBufThresholds.clearEmergency >= p_Fm->p_FmDriverParam->dmaReadBufThresholds.assertEmergency) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaReadBufThresholds.clearEmergency must be smaller than dmaReadBufThresholds.assertEmergency")); + if(p_Fm->p_FmDriverParam->dmaWriteBufThresholds.assertEmergency > DMA_THRESH_MAX_BUF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaWriteBufThresholds.assertEmergency can not be larger than %d", DMA_THRESH_MAX_BUF)); + if(p_Fm->p_FmDriverParam->dmaWriteBufThresholds.clearEmergency > DMA_THRESH_MAX_BUF) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaWriteBufThresholds.clearEmergency can not be larger than %d", DMA_THRESH_MAX_BUF)); + if(p_Fm->p_FmDriverParam->dmaWriteBufThresholds.clearEmergency >= p_Fm->p_FmDriverParam->dmaWriteBufThresholds.assertEmergency) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("dmaWriteBufThresholds.clearEmergency must be smaller than dmaWriteBufThresholds.assertEmergency")); + + if(!p_Fm->p_FmStateStruct->fmClkFreq) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fmClkFreq must be set.")); + if (USEC_TO_CLK(p_Fm->p_FmDriverParam->dmaWatchdog, p_Fm->p_FmStateStruct->fmClkFreq) > DMA_MAX_WATCHDOG) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, + ("dmaWatchdog depends on FM clock. dmaWatchdog(in microseconds) * clk (in Mhz), may not exceed 0x08x", DMA_MAX_WATCHDOG)); + +#ifdef FM_PARTITION_ARRAY + { + t_FmRevisionInfo revInfo; + uint8_t i; + + FM_GetRevision(p_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + for (i=0; ip_FmDriverParam->liodnBasePerPort[i] & ~FM_LIODN_BASE_MASK) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("liodn number is out of range")); + } +#endif /* FM_PARTITION_ARRAY */ + + if(p_Fm->p_FmStateStruct->totalFifoSize % BMI_FIFO_UNITS) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("totalFifoSize number has to be divisible by %d", BMI_FIFO_UNITS)); + if(!p_Fm->p_FmStateStruct->totalFifoSize || (p_Fm->p_FmStateStruct->totalFifoSize > BMI_MAX_FIFO_SIZE)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("totalFifoSize number has to be in the range 256 - %d", BMI_MAX_FIFO_SIZE)); + if(!p_Fm->p_FmStateStruct->totalNumOfTasks || (p_Fm->p_FmStateStruct->totalNumOfTasks > BMI_MAX_NUM_OF_TASKS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("totalNumOfTasks number has to be in the range 1 - %d", BMI_MAX_NUM_OF_TASKS)); + if(!p_Fm->p_FmStateStruct->maxNumOfOpenDmas || (p_Fm->p_FmStateStruct->maxNumOfOpenDmas > BMI_MAX_NUM_OF_DMAS)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("maxNumOfOpenDmas number has to be in the range 1 - %d", BMI_MAX_NUM_OF_DMAS)); + + if(p_Fm->p_FmDriverParam->thresholds.dispLimit > FPM_MAX_DISP_LIMIT) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("thresholds.dispLimit can't be greater than %d", FPM_MAX_DISP_LIMIT)); + + if(!p_Fm->f_Exception) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided")); + if(!p_Fm->f_BusError) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Exceptions callback not provided")); + + return E_OK; +} + +static void SendIpcIsr(t_Fm *p_Fm, uint32_t macEvent, uint32_t pendingReg) +{ + t_Error err; + t_FmIpcIsr fmIpcIsr; + t_FmIpcMsg msg; + + ASSERT_COND(p_Fm->guestId == NCSW_MASTER_ID); + ASSERT_COND(p_Fm->h_IpcSessions[p_Fm->intrMng[macEvent].guestId]); + if (p_Fm->intrMng[macEvent].guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_GUEST_ISR; + fmIpcIsr.pendingReg = pendingReg; + fmIpcIsr.boolErr = FALSE; + memcpy(msg.msgBody, &fmIpcIsr, sizeof(fmIpcIsr)); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[p_Fm->intrMng[macEvent].guestId], + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(fmIpcIsr), + NULL, + NULL, + NULL, + NULL)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + return; + } + else + p_Fm->intrMng[macEvent].f_Isr(p_Fm->intrMng[macEvent].h_SrcHandle); +} + +static void BmiErrEvent(t_Fm *p_Fm) +{ + uint32_t event, mask, force; + + event = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_ievr); + mask = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier); + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_ifr); + if(force & event) + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ifr, force & ~event); + + + /* clear the acknowledged events */ + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ievr, event); + + if(event & BMI_ERR_INTR_EN_PIPELINE_ECC) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_PIPELINE_ECC); + if(event & BMI_ERR_INTR_EN_LIST_RAM_ECC) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_LIST_RAM_ECC); + if(event & BMI_ERR_INTR_EN_STATISTICS_RAM_ECC) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_STATISTICS_RAM_ECC); + if(event & BMI_ERR_INTR_EN_DISPATCH_RAM_ECC) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_BMI_DISPATCH_RAM_ECC); +} + +static void QmiErrEvent(t_Fm *p_Fm) +{ + uint32_t event, mask, force; + + event = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_eie); + mask = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_eien); + + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_eif); + if(force & event) + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_eif, force & ~event); + + /* clear the acknowledged events */ + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_eie, event); + + if(event & QMI_ERR_INTR_EN_DOUBLE_ECC) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_DOUBLE_ECC); + if(event & QMI_ERR_INTR_EN_DEQ_FROM_DEF) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID); +} + +static void DmaErrEvent(t_Fm *p_Fm) +{ + uint64_t addr=0; + uint32_t status, mask, tmpReg=0; + uint8_t tnum; + uint8_t hardwarePortId; + uint8_t relativePortId; + uint16_t liodn; + + status = GET_UINT32(p_Fm->p_FmDmaRegs->fmdmsr); + mask = GET_UINT32(p_Fm->p_FmDmaRegs->fmdmmr); + + /* get bus error regs befor clearing BER */ + if ((status & DMA_STATUS_BUS_ERR) && (mask & DMA_MODE_BER)) + { + addr = (uint64_t)GET_UINT32(p_Fm->p_FmDmaRegs->fmdmtal); + addr |= ((uint64_t)(GET_UINT32(p_Fm->p_FmDmaRegs->fmdmtah)) << 32); + + /* get information about the owner of that bus error */ + tmpReg = GET_UINT32(p_Fm->p_FmDmaRegs->fmdmtcid); + } + + /* clear set events */ + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmsr, status); + + if ((status & DMA_STATUS_BUS_ERR) && (mask & DMA_MODE_BER)) + { + hardwarePortId = (uint8_t)(((tmpReg & DMA_TRANSFER_PORTID_MASK) >> DMA_TRANSFER_PORTID_SHIFT)); + HW_PORT_ID_TO_SW_PORT_ID(relativePortId, hardwarePortId); + tnum = (uint8_t)((tmpReg & DMA_TRANSFER_TNUM_MASK) >> DMA_TRANSFER_TNUM_SHIFT); + liodn = (uint16_t)(tmpReg & DMA_TRANSFER_LIODN_MASK); + ASSERT_COND(p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] != e_FM_PORT_TYPE_DUMMY); + p_Fm->f_BusError(p_Fm->h_App, p_Fm->p_FmStateStruct->portsTypes[hardwarePortId], relativePortId, addr, tnum, liodn); + } + if(mask & DMA_MODE_ECC) + { + if (status & DMA_STATUS_READ_ECC) + p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_READ_ECC); + if (status & DMA_STATUS_SYSTEM_WRITE_ECC) + p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_SYSTEM_WRITE_ECC); + if (status & DMA_STATUS_FM_WRITE_ECC) + p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_DMA_FM_WRITE_ECC); + } +} + +static void FpmErrEvent(t_Fm *p_Fm) +{ + uint32_t event; + + event = GET_UINT32(p_Fm->p_FmFpmRegs->fpmem); + + /* clear the all occurred events */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmem, event); + + if((event & FPM_EV_MASK_DOUBLE_ECC) && (event & FPM_EV_MASK_DOUBLE_ECC_EN)) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_DOUBLE_ECC); + if((event & FPM_EV_MASK_STALL) && (event & FPM_EV_MASK_STALL_EN)) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_STALL_ON_TASKS); + if((event & FPM_EV_MASK_SINGLE_ECC) && (event & FPM_EV_MASK_SINGLE_ECC_EN)) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_FPM_SINGLE_ECC); +} + +static void MuramErrIntr(t_Fm *p_Fm) +{ + uint32_t event, mask; + + event = GET_UINT32(p_Fm->p_FmFpmRegs->fmrcr); + mask = GET_UINT32(p_Fm->p_FmFpmRegs->fmrie); + + /* clear MURAM event bit */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrcr, event & ~FPM_RAM_CTL_IRAM_ECC); + + ASSERT_COND(event & FPM_RAM_CTL_MURAM_ECC); + ASSERT_COND(event & FPM_RAM_CTL_RAMS_ECC_EN); + + if ((mask & FPM_MURAM_ECC_ERR_EX_EN)) + p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_MURAM_ECC); +} + +static void IramErrIntr(t_Fm *p_Fm) +{ + uint32_t event, mask; + + event = GET_UINT32(p_Fm->p_FmFpmRegs->fmrcr) ; + mask = GET_UINT32(p_Fm->p_FmFpmRegs->fmrie); + /* clear the acknowledged events (do not clear IRAM event) */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrcr, event & ~FPM_RAM_CTL_MURAM_ECC); + + ASSERT_COND(event & FPM_RAM_CTL_IRAM_ECC); + ASSERT_COND(event & FPM_RAM_CTL_IRAM_ECC_EN); + + if ((mask & FPM_IRAM_ECC_ERR_EX_EN)) + p_Fm->f_Exception(p_Fm->h_App, e_FM_EX_IRAM_ECC); +} + +static void QmiEvent(t_Fm *p_Fm) +{ + uint32_t event, mask, force; + + event = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_ie); + mask = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_ien); + + event &= mask; + + /* clear the forced events */ + force = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_if); + if(force & event) + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_if, force & ~event); + + /* clear the acknowledged events */ + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_ie, event); + + if(event & QMI_INTR_EN_SINGLE_ECC) + p_Fm->f_Exception(p_Fm->h_App,e_FM_EX_QMI_SINGLE_ECC); +} + +static void UnimplementedIsr(t_Handle h_Arg) +{ + UNUSED(h_Arg); + + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unimplemented Isr!")); +} + +static void UnimplementedFmanCtrlIsr(t_Handle h_Arg, uint32_t event) +{ + UNUSED(h_Arg); UNUSED(event); + + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unimplemented FmCtl Isr!")); +} + +static void FmEnableTimeStamp(t_Fm *p_Fm) +{ + uint32_t tmpReg; + uint64_t fraction; + uint32_t integer; + uint8_t count1MicroBit = 8; + uint32_t tsFrequency = (uint32_t)(1<p_FmStateStruct->fmClkFreq; + /* we multiply by 2^16 to keep the fraction of the division */ + /* we do not divid back, since we write this value as fraction - see spec */ + fraction = ((tsFrequency << 16) - (integer << 16)*p_Fm->p_FmStateStruct->fmClkFreq)/p_Fm->p_FmStateStruct->fmClkFreq; + /* we check remainder of the division in order to round up if not integer */ + if(((tsFrequency << 16) - (integer << 16)*p_Fm->p_FmStateStruct->fmClkFreq) % p_Fm->p_FmStateStruct->fmClkFreq) + fraction++; + + tmpReg = (integer << FPM_TS_INT_SHIFT) | (uint16_t)fraction; + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmtsc2, tmpReg); + + /* enable timestamp with original clock */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmtsc1, FPM_TS_CTL_EN); + + p_Fm->p_FmStateStruct->count1MicroBit = count1MicroBit; + p_Fm->p_FmStateStruct->enabledTimeStamp = TRUE; +} + +static void FreeInitResources(t_Fm *p_Fm) +{ + if (p_Fm->camBaseAddr) + FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->camBaseAddr)); + if (p_Fm->fifoBaseAddr) + FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->fifoBaseAddr)); + if (p_Fm->resAddr) + FM_MURAM_FreeMem(p_Fm->h_FmMuram, UINT_TO_PTR(p_Fm->resAddr)); +} + +static t_Error ClearIRam(t_Fm *p_Fm) +{ + t_FMIramRegs *p_Iram; + int i; + + ASSERT_COND(p_Fm); + p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM); + + /* Enable the auto-increment */ + WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE); + while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ; + + for (i=0; i < (FM_IRAM_SIZE/4); i++) + WRITE_UINT32(p_Iram->idata, 0xffffffff); + + WRITE_UINT32(p_Iram->iadd, FM_IRAM_SIZE - 4); + CORE_MemoryBarrier(); + while (GET_UINT32(p_Iram->idata) != 0xffffffff) ; + + return E_OK; +} + +static t_Error LoadFmanCtrlCode(t_Fm *p_Fm) +{ + t_FMIramRegs *p_Iram; + int i; + uint32_t tmp; + uint8_t compTo16; + + ASSERT_COND(p_Fm); + p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM); + + /* Enable the auto-increment */ + WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE); + while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ; + + for (i=0; i < (p_Fm->p_FmDriverParam->firmware.size / 4); i++) + WRITE_UINT32(p_Iram->idata, p_Fm->p_FmDriverParam->firmware.p_Code[i]); + + compTo16 = (uint8_t)(p_Fm->p_FmDriverParam->firmware.size % 16); + if(compTo16) + for (i=0; i < ((16-compTo16) / 4); i++) + WRITE_UINT32(p_Iram->idata, 0xffffffff); + + WRITE_UINT32(p_Iram->iadd,p_Fm->p_FmDriverParam->firmware.size-4); + while(GET_UINT32(p_Iram->iadd) != (p_Fm->p_FmDriverParam->firmware.size-4)) ; + + /* verify that writing has completed */ + while (GET_UINT32(p_Iram->idata) != p_Fm->p_FmDriverParam->firmware.p_Code[(p_Fm->p_FmDriverParam->firmware.size / 4)-1]) ; + + if (p_Fm->p_FmDriverParam->fwVerify) + { + WRITE_UINT32(p_Iram->iadd, IRAM_IADD_AIE); + while (GET_UINT32(p_Iram->iadd) != IRAM_IADD_AIE) ; + for (i=0; i < (p_Fm->p_FmDriverParam->firmware.size / 4); i++) + if ((tmp=GET_UINT32(p_Iram->idata)) != p_Fm->p_FmDriverParam->firmware.p_Code[i]) + RETURN_ERROR(MAJOR, E_WRITE_FAILED, + ("UCode write error : write 0x%x, read 0x%x", + p_Fm->p_FmDriverParam->firmware.p_Code[i],tmp)); + WRITE_UINT32(p_Iram->iadd, 0x0); + } + + /* Enable patch from IRAM */ + WRITE_UINT32(p_Iram->iready, IRAM_READY); + XX_UDelay(1000); + + DBG(INFO, ("FMan-Controller code (ver %d.%d) loaded to IRAM.", + ((uint8_t *)p_Fm->p_FmDriverParam->firmware.p_Code)[5], + ((uint8_t *)p_Fm->p_FmDriverParam->firmware.p_Code)[7])); + + return E_OK; +} + +static void GuestErrorIsr(t_Fm *p_Fm, uint32_t pending) +{ +#define FM_G_CALL_1G_MAC_ERR_ISR(_id) \ +do { \ + p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].h_SrcHandle);\ +} while (0) +#define FM_G_CALL_10G_MAC_ERR_ISR(_id) \ +do { \ + p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_10G_MAC0+_id)].h_SrcHandle);\ +} while (0) + + /* error interrupts */ + if (pending & ERR_INTR_EN_1G_MAC0) + FM_G_CALL_1G_MAC_ERR_ISR(0); + if (pending & ERR_INTR_EN_1G_MAC1) + FM_G_CALL_1G_MAC_ERR_ISR(1); + if (pending & ERR_INTR_EN_1G_MAC2) + FM_G_CALL_1G_MAC_ERR_ISR(2); + if (pending & ERR_INTR_EN_1G_MAC3) + FM_G_CALL_1G_MAC_ERR_ISR(3); + if (pending & ERR_INTR_EN_1G_MAC4) + FM_G_CALL_1G_MAC_ERR_ISR(4); + if (pending & ERR_INTR_EN_10G_MAC0) + FM_G_CALL_10G_MAC_ERR_ISR(0); +} + +static void GuestEventIsr(t_Fm *p_Fm, uint32_t pending) +{ +#define FM_G_CALL_1G_MAC_TMR_ISR(_id) \ +do { \ + p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0_TMR+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0_TMR+_id)].h_SrcHandle);\ +} while (0) + + if (pending & INTR_EN_1G_MAC0_TMR) + FM_G_CALL_1G_MAC_TMR_ISR(0); + if (pending & INTR_EN_1G_MAC1_TMR) + FM_G_CALL_1G_MAC_TMR_ISR(1); + if (pending & INTR_EN_1G_MAC2_TMR) + FM_G_CALL_1G_MAC_TMR_ISR(2); + if (pending & INTR_EN_1G_MAC3_TMR) + FM_G_CALL_1G_MAC_TMR_ISR(3); + if (pending & INTR_EN_1G_MAC4_TMR) + FM_G_CALL_1G_MAC_TMR_ISR(4); + if(pending & INTR_EN_TMR) + p_Fm->intrMng[e_FM_EV_TMR].f_Isr(p_Fm->intrMng[e_FM_EV_TMR].h_SrcHandle); +} + + +/****************************************/ +/* Inter-Module functions */ +/****************************************/ +static t_Error FmGuestHandleIpcMsgCB(t_Handle h_Fm, + uint8_t *p_Msg, + uint32_t msgLength, + uint8_t *p_Reply, + uint32_t *p_ReplyLength) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_FmIpcMsg *p_IpcMsg = (t_FmIpcMsg*)p_Msg; + + UNUSED(p_Reply); + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((msgLength > sizeof(uint32_t)), E_INVALID_VALUE); + +#ifdef DISABLE_SANITY_CHECKS + UNUSED(msgLength); +#endif /* DISABLE_SANITY_CHECKS */ + + ASSERT_COND(p_Msg); + + *p_ReplyLength = 0; + + switch(p_IpcMsg->msgId) + { + case (FM_GUEST_ISR): + { + t_FmIpcIsr ipcIsr; + + memcpy((uint8_t*)&ipcIsr, p_IpcMsg->msgBody, sizeof(t_FmIpcIsr)); + if(ipcIsr.boolErr) + GuestErrorIsr(p_Fm, ipcIsr.pendingReg); + else + GuestEventIsr(p_Fm, ipcIsr.pendingReg); + break; + } + default: + *p_ReplyLength = 0; + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!")); + } + return E_OK; +} + +static t_Error FmHandleIpcMsgCB(t_Handle h_Fm, + uint8_t *p_Msg, + uint32_t msgLength, + uint8_t *p_Reply, + uint32_t *p_ReplyLength) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_FmIpcMsg *p_IpcMsg = (t_FmIpcMsg*)p_Msg; + t_FmIpcReply *p_IpcReply = (t_FmIpcReply*)p_Reply; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR((msgLength >= sizeof(uint32_t)), E_INVALID_VALUE); + +#ifdef DISABLE_SANITY_CHECKS + UNUSED(msgLength); +#endif /* DISABLE_SANITY_CHECKS */ + + ASSERT_COND(p_IpcMsg); + + memset(p_IpcReply, 0, (sizeof(uint8_t) * FM_IPC_MAX_REPLY_SIZE)); + *p_ReplyLength = 0; + + switch(p_IpcMsg->msgId) + { + case (FM_GET_SET_PORT_PARAMS): + { + t_FmIpcPortInInitParams ipcInitParams; + t_FmInterModulePortInitParams initParams; + t_FmIpcPhysAddr ipcPhysAddr; + + memcpy((uint8_t*)&ipcInitParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortInInitParams)); + initParams.hardwarePortId = ipcInitParams.hardwarePortId; + initParams.portType = (e_FmPortType)ipcInitParams.enumPortType; + initParams.independentMode = (bool)(ipcInitParams.boolIndependentMode); + initParams.liodnOffset = ipcInitParams.liodnOffset; + initParams.numOfTasks = ipcInitParams.numOfTasks; + initParams.numOfExtraTasks = ipcInitParams.numOfExtraTasks; + initParams.numOfOpenDmas = ipcInitParams.numOfOpenDmas; + initParams.numOfExtraOpenDmas = ipcInitParams.numOfExtraOpenDmas; + initParams.sizeOfFifo = ipcInitParams.sizeOfFifo; + initParams.extraSizeOfFifo = ipcInitParams.extraSizeOfFifo; + initParams.deqPipelineDepth = ipcInitParams.deqPipelineDepth; + initParams.liodnBase = ipcInitParams.liodnBase; + + p_IpcReply->error = (uint32_t)FmGetSetPortParams(h_Fm, &initParams); + ipcPhysAddr.high = initParams.fmMuramPhysBaseAddr.high; + ipcPhysAddr.low = initParams.fmMuramPhysBaseAddr.low; + memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcPhysAddr, sizeof(t_FmIpcPhysAddr)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcPhysAddr); + break; + } + case (FM_SET_SIZE_OF_FIFO): + { + t_FmIpcPortFifoParams ipcPortFifoParams; + t_FmInterModulePortRxPoolsParams rxPoolsParams; + + memcpy((uint8_t*)&ipcPortFifoParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortFifoParams)); + rxPoolsParams.numOfPools = ipcPortFifoParams.numOfPools; + rxPoolsParams.secondLargestBufSize = ipcPortFifoParams.secondLargestBufSize; + rxPoolsParams.largestBufSize = ipcPortFifoParams.largestBufSize; + + p_IpcReply->error = (uint32_t)FmSetSizeOfFifo(h_Fm, ipcPortFifoParams.rsrcParams.hardwarePortId, + (e_FmPortType)ipcPortFifoParams.enumPortType, + (bool)ipcPortFifoParams.boolIndependentMode, + &ipcPortFifoParams.rsrcParams.val, + ipcPortFifoParams.rsrcParams.extra, + ipcPortFifoParams.deqPipelineDepth, + &rxPoolsParams, + (bool)ipcPortFifoParams.boolInitialConfig); + memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcPortFifoParams.rsrcParams.val, sizeof(uint32_t)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t); + break; + } + case (FM_SET_NUM_OF_TASKS): + { + t_FmIpcPortRsrcParams ipcPortRsrcParams; + + memcpy((uint8_t*)&ipcPortRsrcParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortRsrcParams)); + p_IpcReply->error = (uint32_t)FmSetNumOfTasks(h_Fm, ipcPortRsrcParams.hardwarePortId, + (uint8_t)ipcPortRsrcParams.val, + (uint8_t)ipcPortRsrcParams.extra, + (bool)ipcPortRsrcParams.boolInitialConfig); + *p_ReplyLength = sizeof(uint32_t); + break; + } + case (FM_SET_NUM_OF_OPEN_DMAS): + { + t_FmIpcPortRsrcParams ipcPortRsrcParams; + + memcpy((uint8_t*)&ipcPortRsrcParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortRsrcParams)); + p_IpcReply->error = (uint32_t)FmSetNumOfOpenDmas(h_Fm, ipcPortRsrcParams.hardwarePortId, + (uint8_t)ipcPortRsrcParams.val, + (uint8_t)ipcPortRsrcParams.extra, + (bool)ipcPortRsrcParams.boolInitialConfig); + *p_ReplyLength = sizeof(uint32_t); + break; + } + case (FM_RESUME_STALLED_PORT): + *p_ReplyLength = sizeof(uint32_t); + p_IpcReply->error = (uint32_t)FmResumeStalledPort(h_Fm, p_IpcMsg->msgBody[0]); + break; + case (FM_MASTER_IS_ALIVE): + { + uint8_t guestId = p_IpcMsg->msgBody[0]; + /* build the FM master partition IPC address */ + memset(p_Fm->fmIpcHandlerModuleName[guestId], 0, (sizeof(char)) * MODULE_NAME_SIZE); + if(Sprint (p_Fm->fmIpcHandlerModuleName[guestId], "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, guestId) != (guestId<10 ? 6:7)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + p_Fm->h_IpcSessions[guestId] = XX_IpcInitSession(p_Fm->fmIpcHandlerModuleName[guestId], p_Fm->fmModuleName); + if (p_Fm->h_IpcSessions[guestId] == NULL) + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("FM Master IPC session for guest %d", guestId)); + *(uint8_t*)(p_IpcReply->replyBody) = 1; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + } + case (FM_IS_PORT_STALLED): + { + bool tmp; + + p_IpcReply->error = (uint32_t)FmIsPortStalled(h_Fm, p_IpcMsg->msgBody[0], &tmp); + *(uint8_t*)(p_IpcReply->replyBody) = (uint8_t)tmp; + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + } + case (FM_RESET_MAC): + { + t_FmIpcMacParams ipcMacParams; + + memcpy((uint8_t*)&ipcMacParams, p_IpcMsg->msgBody, sizeof(t_FmIpcMacParams)); + p_IpcReply->error = (uint32_t)FmResetMac(p_Fm, + (e_FmMacType)(ipcMacParams.enumType), + ipcMacParams.id); + *p_ReplyLength = sizeof(uint32_t); + break; + } + case (FM_SET_MAC_MAX_FRAME): + { + t_Error err; + t_FmIpcMacMaxFrameParams ipcMacMaxFrameParams; + + memcpy((uint8_t*)&ipcMacMaxFrameParams, p_IpcMsg->msgBody, sizeof(t_FmIpcMacMaxFrameParams)); + if ((err = FmSetMacMaxFrame(p_Fm, + (e_FmMacType)(ipcMacMaxFrameParams.macParams.enumType), + ipcMacMaxFrameParams.macParams.id, + ipcMacMaxFrameParams.maxFrameLength)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + } + case (FM_GET_CLK_FREQ): + memcpy(p_IpcReply->replyBody, (uint8_t*)&p_Fm->p_FmStateStruct->fmClkFreq, sizeof(uint16_t)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint16_t); + break; + case (FM_FREE_PORT): + { + t_FmInterModulePortFreeParams portParams; + t_FmIpcPortFreeParams ipcPortParams; + + memcpy((uint8_t*)&ipcPortParams, p_IpcMsg->msgBody, sizeof(t_FmIpcPortFreeParams)); + portParams.hardwarePortId = ipcPortParams.hardwarePortId; + portParams.portType = (e_FmPortType)(ipcPortParams.enumPortType); +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + portParams.deqPipelineDepth = ipcPortParams.deqPipelineDepth; +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + FmFreePortParams(h_Fm, &portParams); + break; + } + case (FM_REGISTER_INTR): + { + t_FmIpcRegisterIntr ipcRegIntr; + + memcpy((uint8_t*)&ipcRegIntr, p_IpcMsg->msgBody, sizeof(ipcRegIntr)); + p_Fm->intrMng[ipcRegIntr.event].guestId = ipcRegIntr.guestId; + break; + } +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + case (FM_DUMP_REGS): + { + t_Error err; + if ((err = FM_DumpRegs(h_Fm)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + } + case (FM_DUMP_PORT_REGS): + { + t_Error err; + + if ((err = FmDumpPortRegs(h_Fm, p_IpcMsg->msgBody[0])) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + } +#endif /* (defined(DEBUG_ERRORS) && ... */ + case (FM_GET_REV): + { + t_FmRevisionInfo revInfo; + t_FmIpcRevisionInfo ipcRevInfo; + + p_IpcReply->error = (uint32_t)FM_GetRevision(h_Fm, &revInfo); + ipcRevInfo.majorRev = revInfo.majorRev; + ipcRevInfo.minorRev = revInfo.minorRev; + memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcRevInfo, sizeof(t_FmIpcRevisionInfo)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcRevisionInfo); + break; + } + case (FM_DMA_STAT): + { + t_FmDmaStatus dmaStatus; + t_FmIpcDmaStatus ipcDmaStatus; + + FM_GetDmaStatus(h_Fm, &dmaStatus); + ipcDmaStatus.boolCmqNotEmpty = (uint8_t)dmaStatus.cmqNotEmpty; + ipcDmaStatus.boolBusError = (uint8_t)dmaStatus.busError; + ipcDmaStatus.boolReadBufEccError = (uint8_t)dmaStatus.readBufEccError; + ipcDmaStatus.boolWriteBufEccSysError = (uint8_t)dmaStatus.writeBufEccSysError; + ipcDmaStatus.boolWriteBufEccFmError = (uint8_t)dmaStatus.writeBufEccFmError; + memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcDmaStatus, sizeof(t_FmIpcDmaStatus)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus); + break; + } + case (FM_ALLOC_FMAN_CTRL_EVENT_REG): + p_IpcReply->error = (uint32_t)FmAllocFmanCtrlEventReg(h_Fm, (uint8_t*)p_IpcReply->replyBody); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint8_t); + break; + case (FM_FREE_FMAN_CTRL_EVENT_REG): + FmFreeFmanCtrlEventReg(h_Fm, p_IpcMsg->msgBody[0]); + break; + case (FM_GET_TIMESTAMP_SCALE): + { + uint32_t timeStamp = FmGetTimeStampScale(h_Fm); + + memcpy(p_IpcReply->replyBody, (uint8_t*)&timeStamp, sizeof(uint32_t)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t); + break; + } + case (FM_GET_COUNTER): + { + e_FmCounters inCounter; + uint32_t outCounter; + + memcpy((uint8_t*)&inCounter, p_IpcMsg->msgBody, sizeof(uint32_t)); + outCounter = FM_GetCounter(h_Fm, inCounter); + memcpy(p_IpcReply->replyBody, (uint8_t*)&outCounter, sizeof(uint32_t)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t); + break; + } + case (FM_SET_FMAN_CTRL_EVENTS_ENABLE): + { + t_FmIpcFmanEvents ipcFmanEvents; + + memcpy((uint8_t*)&ipcFmanEvents, p_IpcMsg->msgBody, sizeof(t_FmIpcFmanEvents)); + FmSetFmanCtrlIntr(h_Fm, + ipcFmanEvents.eventRegId, + ipcFmanEvents.enableEvents); + break; + } + case (FM_GET_FMAN_CTRL_EVENTS_ENABLE): + { + uint32_t tmp = FmGetFmanCtrlIntr(h_Fm, p_IpcMsg->msgBody[0]); + + memcpy(p_IpcReply->replyBody, (uint8_t*)&tmp, sizeof(uint32_t)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(uint32_t); + break; + } + case (FM_GET_PHYS_MURAM_BASE): + { + t_FmPhysAddr physAddr; + t_FmIpcPhysAddr ipcPhysAddr; + + FmGetPhysicalMuramBase(h_Fm, &physAddr); + ipcPhysAddr.high = physAddr.high; + ipcPhysAddr.low = physAddr.low; + memcpy(p_IpcReply->replyBody, (uint8_t*)&ipcPhysAddr, sizeof(t_FmIpcPhysAddr)); + *p_ReplyLength = sizeof(uint32_t) + sizeof(t_FmIpcPhysAddr); + break; + } + case (FM_ENABLE_RAM_ECC): + { + t_Error err; + + if (((err = FM_EnableRamsEcc(h_Fm)) != E_OK) || + ((err = FM_SetException(h_Fm, e_FM_EX_IRAM_ECC, TRUE)) != E_OK) || + ((err = FM_SetException(h_Fm, e_FM_EX_MURAM_ECC, TRUE)) != E_OK)) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + } + case (FM_DISABLE_RAM_ECC): + { + t_Error err; + + if (((err = FM_SetException(h_Fm, e_FM_EX_IRAM_ECC, FALSE)) != E_OK) || + ((err = FM_SetException(h_Fm, e_FM_EX_MURAM_ECC, FALSE)) != E_OK) || + ((err = FM_DisableRamsEcc(h_Fm)) != E_OK)) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + } + case (FM_SET_NUM_OF_FMAN_CTRL): + { + t_Error err; + t_FmIpcPortNumOfFmanCtrls ipcPortNumOfFmanCtrls; + + memcpy((uint8_t*)&ipcPortNumOfFmanCtrls, p_IpcMsg->msgBody, sizeof(t_FmIpcPortNumOfFmanCtrls)); + if ((err = FmSetNumOfRiscsPerPort(h_Fm, + ipcPortNumOfFmanCtrls.hardwarePortId, + ipcPortNumOfFmanCtrls.numOfFmanCtrls)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + break; + } +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 + case (FM_10G_TX_ECC_WA): + p_IpcReply->error = (uint32_t)Fm10GTxEccWorkaround(h_Fm, p_IpcMsg->msgBody[0]); + *p_ReplyLength = sizeof(uint32_t); + break; +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + default: + *p_ReplyLength = 0; + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("command not found!!!")); + } + return E_OK; +} + +static void ErrorIsrCB(t_Handle h_Fm) +{ +#define FM_M_CALL_1G_MAC_ERR_ISR(_id) \ + { \ + if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].guestId) \ + SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id), pending); \ + else \ + p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_ERR_1G_MAC0+_id)].h_SrcHandle);\ + } + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t pending; + + SANITY_CHECK_RETURN(h_Fm, E_INVALID_HANDLE); + + /* error interrupts */ + pending = GET_UINT32(p_Fm->p_FmFpmRegs->fmepi); + if (!pending) + return; + + if(pending & ERR_INTR_EN_BMI) + BmiErrEvent(p_Fm); + if(pending & ERR_INTR_EN_QMI) + QmiErrEvent(p_Fm); + if(pending & ERR_INTR_EN_FPM) + FpmErrEvent(p_Fm); + if(pending & ERR_INTR_EN_DMA) + DmaErrEvent(p_Fm); + if(pending & ERR_INTR_EN_IRAM) + IramErrIntr(p_Fm); + if(pending & ERR_INTR_EN_MURAM) + MuramErrIntr(p_Fm); + if(pending & ERR_INTR_EN_PRS) + p_Fm->intrMng[e_FM_EV_ERR_PRS].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_PRS].h_SrcHandle); + if(pending & ERR_INTR_EN_PLCR) + p_Fm->intrMng[e_FM_EV_ERR_PLCR].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_PLCR].h_SrcHandle); + if(pending & ERR_INTR_EN_KG) + p_Fm->intrMng[e_FM_EV_ERR_KG].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_KG].h_SrcHandle); + + /* MAC events may belong to different partitions */ + if(pending & ERR_INTR_EN_1G_MAC0) + FM_M_CALL_1G_MAC_ERR_ISR(0); + if(pending & ERR_INTR_EN_1G_MAC1) + FM_M_CALL_1G_MAC_ERR_ISR(1); + if(pending & ERR_INTR_EN_1G_MAC2) + FM_M_CALL_1G_MAC_ERR_ISR(2); + if(pending & ERR_INTR_EN_1G_MAC3) + FM_M_CALL_1G_MAC_ERR_ISR(3); + if(pending & ERR_INTR_EN_1G_MAC4) + FM_M_CALL_1G_MAC_ERR_ISR(4); + if(pending & ERR_INTR_EN_10G_MAC0) + { + if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_ERR_10G_MAC0].guestId) + SendIpcIsr(p_Fm, e_FM_EV_ERR_10G_MAC0, pending); + else + p_Fm->intrMng[e_FM_EV_ERR_10G_MAC0].f_Isr(p_Fm->intrMng[e_FM_EV_ERR_10G_MAC0].h_SrcHandle); + } +} + + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +t_Error Fm10GTxEccWorkaround(t_Handle h_Fm, uint8_t macId) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + int timeout = 1000; + t_Error err = E_OK; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + uint8_t rxHardwarePortId, txHardwarePortId; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_10G_TX_ECC_WA; + msg.msgBody[0] = macId; + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(macId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return (t_Error)(reply.error); + } + + SANITY_CHECK_RETURN_ERROR((macId == 0), E_NOT_SUPPORTED); + SANITY_CHECK_RETURN_ERROR(IsFmanCtrlCodeLoaded(p_Fm), E_INVALID_STATE); + + SW_PORT_ID_TO_HW_PORT_ID(rxHardwarePortId, e_FM_PORT_TYPE_RX_10G, macId); + SW_PORT_ID_TO_HW_PORT_ID(txHardwarePortId, e_FM_PORT_TYPE_TX_10G, macId); + if ((p_Fm->p_FmStateStruct->portsTypes[rxHardwarePortId] != e_FM_PORT_TYPE_DUMMY) || + (p_Fm->p_FmStateStruct->portsTypes[txHardwarePortId] != e_FM_PORT_TYPE_DUMMY)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("MAC should be initialized prior to rx and tx ports!")); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmextc, 0x40000000); + CORE_MemoryBarrier(); + while ((GET_UINT32(p_Fm->p_FmFpmRegs->fpmextc) & 0x40000000) && + --timeout) ; + if (!timeout) + return ERROR_CODE(E_TIMEOUT); + return E_OK; +} +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + +uintptr_t FmGetPcdPrsBaseAddr(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0); + + if(p_Fm->guestId != NCSW_MASTER_ID) + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Guset")); + + return (p_Fm->baseAddr + FM_MM_PRS); +} + +uintptr_t FmGetPcdKgBaseAddr(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0); + + if(p_Fm->guestId != NCSW_MASTER_ID) + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Guset")); + + return (p_Fm->baseAddr + FM_MM_KG); +} + +uintptr_t FmGetPcdPlcrBaseAddr(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0); + + if(p_Fm->guestId != NCSW_MASTER_ID) + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Guset")); + + return (p_Fm->baseAddr + FM_MM_PLCR); +} + +t_Handle FmGetMuramHandle(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, NULL); + + return (p_Fm->h_FmMuram); +} + +void FmGetPhysicalMuramBase(t_Handle h_Fm, t_FmPhysAddr *p_FmPhysAddr) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + t_FmIpcPhysAddr ipcPhysAddr; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_GET_PHYS_MURAM_BASE; + replyLength = sizeof(uint32_t) + sizeof(t_FmPhysAddr); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + { + REPORT_ERROR(MINOR, err, NO_MSG); + return; + } + if (replyLength != (sizeof(uint32_t) + sizeof(t_FmPhysAddr))) + { + REPORT_ERROR(MINOR, E_INVALID_VALUE,("IPC reply length mismatch")); + return; + } + memcpy((uint8_t*)&ipcPhysAddr, reply.replyBody, sizeof(t_FmIpcPhysAddr)); + p_FmPhysAddr->high = ipcPhysAddr.high; + p_FmPhysAddr->low = ipcPhysAddr.low; + return ; + } + + /* General FM driver initialization */ + p_FmPhysAddr->low = (uint32_t)p_Fm->fmMuramPhysBaseAddr; + p_FmPhysAddr->high = (uint8_t)((p_Fm->fmMuramPhysBaseAddr & 0x000000ff00000000LL) >> 32); +} + +t_Error FmAllocFmanCtrlEventReg(t_Handle h_Fm, uint8_t *p_EventId) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint8_t i; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_ALLOC_FMAN_CTRL_EVENT_REG; + replyLength = sizeof(uint32_t) + sizeof(uint8_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if (replyLength != (sizeof(uint32_t) + sizeof(uint8_t))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + *p_EventId = *(uint8_t*)(reply.replyBody); + + return (t_Error)(reply.error); + } + + for(i=0;iusedEventRegs[i]) + { + p_Fm->usedEventRegs[i] = TRUE; + *p_EventId = i; + break; + } + + if (i==FM_NUM_OF_FMAN_CTRL_EVENT_REGS) + RETURN_ERROR(MAJOR, E_BUSY, ("No resource - Fman controller event register.")); + + return E_OK; +} + +void FmFreeFmanCtrlEventReg(t_Handle h_Fm, uint8_t eventId) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_Error err; + t_FmIpcMsg msg; + + if(((t_Fm *)h_Fm)->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_FREE_FMAN_CTRL_EVENT_REG; + msg.msgBody[0] = eventId; + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(eventId), + NULL, + NULL, + NULL, + NULL)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + return; + } + + ((t_Fm*)h_Fm)->usedEventRegs[eventId] = FALSE; +} + +void FmRegisterIntr(t_Handle h_Fm, + e_FmEventModules module, + uint8_t modId, + e_FmIntrType intrType, + void (*f_Isr) (t_Handle h_Arg), + t_Handle h_Arg) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint8_t event= 0; + t_FmIpcRegisterIntr fmIpcRegisterIntr; + t_Error err; + t_FmIpcMsg msg; + + ASSERT_COND(h_Fm); + + GET_FM_MODULE_EVENT(module, modId,intrType, event); + + /* register in local FM structure */ + ASSERT_COND(event != e_FM_EV_DUMMY_LAST); + p_Fm->intrMng[event].f_Isr = f_Isr; + p_Fm->intrMng[event].h_SrcHandle = h_Arg; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + if(p_Fm->h_IpcSessions[0]) + { + /* register in Master FM structure */ + fmIpcRegisterIntr.event = event; + fmIpcRegisterIntr.guestId = p_Fm->guestId; + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_REGISTER_INTR; + memcpy(msg.msgBody, &fmIpcRegisterIntr, sizeof(fmIpcRegisterIntr)); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(fmIpcRegisterIntr), + NULL, + NULL, + NULL, + NULL)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + } + else + DBG(WARNING,("'Register interrupt' - unavailable - No IPC")); + } + +} + +void FmUnregisterIntr(t_Handle h_Fm, + e_FmEventModules module, + uint8_t modId, + e_FmIntrType intrType) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint8_t event= 0; + + ASSERT_COND(h_Fm); + + GET_FM_MODULE_EVENT(module, modId,intrType, event); + + ASSERT_COND(event != e_FM_EV_DUMMY_LAST); + p_Fm->intrMng[event].f_Isr = UnimplementedIsr; + p_Fm->intrMng[event].h_SrcHandle = NULL; +} + +void FmSetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, uint32_t enableEvents) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_FmIpcFmanEvents fmanCtrl; + t_Error err; + t_FmIpcMsg msg; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + fmanCtrl.eventRegId = eventRegId; + fmanCtrl.enableEvents = enableEvents; + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_SET_FMAN_CTRL_EVENTS_ENABLE; + memcpy(msg.msgBody, &fmanCtrl, sizeof(fmanCtrl)); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(fmanCtrl), + NULL, + NULL, + NULL, + NULL)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + return; + } + + ASSERT_COND(eventRegId < FM_NUM_OF_FMAN_CTRL_EVENT_REGS); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmfpfcee[eventRegId], enableEvents); +} + +uint32_t FmGetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength, ctrlIntr; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_GET_FMAN_CTRL_EVENTS_ENABLE; + msg.msgBody[0] = eventRegId; + replyLength = sizeof(uint32_t) + sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(eventRegId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + { + REPORT_ERROR(MINOR, err, NO_MSG); + return 0; + } + if (replyLength != (sizeof(uint32_t) + sizeof(uint32_t))) + { + REPORT_ERROR(MINOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return 0; + } + memcpy((uint8_t*)&ctrlIntr, reply.replyBody, sizeof(uint32_t)); + return ctrlIntr; + } + + return GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcee[eventRegId]); +} + +void FmRegisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Arg, uint32_t event), t_Handle h_Arg) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + ASSERT_COND(eventRegIdguestId != NCSW_MASTER_ID) + { + ASSERT_COND(0); + /* TODO */ + } + + p_Fm->fmanCtrlIntr[eventRegId].f_Isr = f_Isr; + p_Fm->fmanCtrlIntr[eventRegId].h_SrcHandle = h_Arg; +} + +void FmUnregisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + ASSERT_COND(eventRegIdguestId != NCSW_MASTER_ID) + { + ASSERT_COND(0); + /* TODO */ + } + + p_Fm->fmanCtrlIntr[eventRegId].f_Isr = UnimplementedFmanCtrlIsr; + p_Fm->fmanCtrlIntr[eventRegId].h_SrcHandle = NULL; +} + +void FmRegisterPcd(t_Handle h_Fm, t_Handle h_FmPcd) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + if(p_Fm->h_Pcd) + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("PCD already set")); + + p_Fm->h_Pcd = h_FmPcd; + +} + +void FmUnregisterPcd(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + if(!p_Fm->h_Pcd) + REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("No PCD")); + + p_Fm->h_Pcd = NULL; + +} + +t_Handle FmGetPcdHandle(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + return p_Fm->h_Pcd; +} + +uint8_t FmGetId(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0xff); + + return p_Fm->p_FmStateStruct->fmId; +} + +t_Error FmSetNumOfRiscsPerPort(t_Handle h_Fm, uint8_t hardwarePortId, uint8_t numOfFmanCtrls) +{ + + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg = 0; + t_Error err; + t_FmIpcPortNumOfFmanCtrls params; + t_FmIpcMsg msg; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(((numOfFmanCtrls > 0) && (numOfFmanCtrls < 3)) , E_INVALID_HANDLE); + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + params.hardwarePortId = hardwarePortId; + params.numOfFmanCtrls = numOfFmanCtrls; + msg.msgId = FM_SET_NUM_OF_FMAN_CTRL; + memcpy(msg.msgBody, ¶ms, sizeof(params)); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(params), + NULL, + NULL, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + + return E_OK; + } + + XX_LockSpinlock(p_Fm->h_Spinlock); + + tmpReg = (uint32_t)(hardwarePortId << FPM_PORT_FM_CTL_PORTID_SHIFT); + + /*TODO - maybe to put CTL# according to another criteria*/ + + if(numOfFmanCtrls == 2) + tmpReg = FPM_PORT_FM_CTL2 | FPM_PORT_FM_CTL1; + + /* order restoration */ + if(hardwarePortId%2) + tmpReg |= (FPM_PORT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | FPM_PORT_FM_CTL1; + else + tmpReg |= (FPM_PORT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT) | FPM_PORT_FM_CTL2; + + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmpr, tmpReg); + XX_UnlockSpinlock(p_Fm->h_Spinlock); + + return E_OK; +} + +t_Error FmGetSetPortParams(t_Handle h_Fm,t_FmInterModulePortInitParams *p_PortParams) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + uint8_t hardwarePortId = p_PortParams->hardwarePortId; + t_FmIpcPortInInitParams portInParams; + t_FmIpcPhysAddr ipcPhysAddr; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + portInParams.hardwarePortId = p_PortParams->hardwarePortId; + portInParams.enumPortType = (uint32_t)p_PortParams->portType; + portInParams.boolIndependentMode = (uint8_t)p_PortParams->independentMode; + portInParams.liodnOffset = p_PortParams->liodnOffset; + portInParams.numOfTasks = p_PortParams->numOfTasks; + portInParams.numOfExtraTasks = p_PortParams->numOfExtraTasks; + portInParams.numOfOpenDmas = p_PortParams->numOfOpenDmas; + portInParams.numOfExtraOpenDmas = p_PortParams->numOfExtraOpenDmas; + portInParams.sizeOfFifo = p_PortParams->sizeOfFifo; + portInParams.extraSizeOfFifo = p_PortParams->extraSizeOfFifo; + portInParams.deqPipelineDepth = p_PortParams->deqPipelineDepth; + portInParams.liodnBase = p_PortParams->liodnBase; + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_GET_SET_PORT_PARAMS; + memcpy(msg.msgBody, &portInParams, sizeof(portInParams)); + replyLength = (sizeof(uint32_t) + sizeof(p_PortParams->fmMuramPhysBaseAddr)); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(portInParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != (sizeof(uint32_t) + sizeof(p_PortParams->fmMuramPhysBaseAddr))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + memcpy((uint8_t*)&ipcPhysAddr, reply.replyBody, sizeof(t_FmIpcPhysAddr)); + p_PortParams->fmMuramPhysBaseAddr.high = ipcPhysAddr.high; + p_PortParams->fmMuramPhysBaseAddr.low = ipcPhysAddr.low; + + return (t_Error)(reply.error); + } + + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + XX_LockSpinlock(p_Fm->h_Spinlock); + + if(p_PortParams->independentMode) + { + /* set port parameters */ + p_Fm->independentMode = p_PortParams->independentMode; + /* disable dispatch limit */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmflc, 0); + } + + if(p_PortParams->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) + { + if(p_Fm->hcPortInitialized) + { + XX_UnlockSpinlock(p_Fm->h_Spinlock); + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Only one host command port is allowed.")); + } + else + p_Fm->hcPortInitialized = TRUE; + } + p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] = p_PortParams->portType; + + err = FmSetNumOfTasks(p_Fm, p_PortParams->hardwarePortId, p_PortParams->numOfTasks, p_PortParams->numOfExtraTasks, TRUE); + if(err) + { + XX_UnlockSpinlock(p_Fm->h_Spinlock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + if((p_PortParams->portType != e_FM_PORT_TYPE_RX) && (p_PortParams->portType != e_FM_PORT_TYPE_RX_10G)) + /* for transmit & O/H ports */ + { + uint8_t enqTh; + uint8_t deqTh; + bool update = FALSE; + + /* update qmi ENQ/DEQ threshold */ + p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums += p_PortParams->deqPipelineDepth; + tmpReg = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc); + enqTh = (uint8_t)(tmpReg>>8); + /* if enqTh is too big, we reduce it to the max value that is still OK */ + if(enqTh >= (QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums)) + { + enqTh = (uint8_t)(QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums - 1); + tmpReg &= ~QMI_CFG_ENQ_MASK; + tmpReg |= ((uint32_t)enqTh << 8); + update = TRUE; + } + + deqTh = (uint8_t)tmpReg; + /* if deqTh is too small, we enlarge it to the min value that is still OK. + deqTh may not be larger than 63 (QMI_MAX_NUM_OF_TNUMS-1). */ + if((deqTh <= p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums) && (deqTh < QMI_MAX_NUM_OF_TNUMS-1)) + { + deqTh = (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums + 1); + tmpReg &= ~QMI_CFG_DEQ_MASK; + tmpReg |= (uint32_t)deqTh; + update = TRUE; + } + if(update) + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc, tmpReg); + } +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + +#ifdef FM_LOW_END_RESTRICTION + if((hardwarePortId==0x1) || (hardwarePortId==0x29)) + { + if(p_Fm->p_FmStateStruct->lowEndRestriction) + { + XX_UnlockSpinlock(p_Fm->h_Spinlock); + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("OP #0 cannot work with Tx Port #1.")); + } + else + p_Fm->p_FmStateStruct->lowEndRestriction = TRUE; + } +#endif /* FM_LOW_END_RESTRICTION */ + + err = FmSetSizeOfFifo(p_Fm, + p_PortParams->hardwarePortId, + p_PortParams->portType, + p_PortParams->independentMode, + &p_PortParams->sizeOfFifo, + p_PortParams->extraSizeOfFifo, + p_PortParams->deqPipelineDepth, + NULL, + TRUE); + if(err) + { + XX_UnlockSpinlock(p_Fm->h_Spinlock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + err = FmSetNumOfOpenDmas(p_Fm, p_PortParams->hardwarePortId, p_PortParams->numOfOpenDmas, p_PortParams->numOfExtraOpenDmas, TRUE); + if(err) + { + XX_UnlockSpinlock(p_Fm->h_Spinlock); + RETURN_ERROR(MAJOR, err, NO_MSG); + } + + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ppid[hardwarePortId-1], (uint32_t)p_PortParams->liodnOffset); + + tmpReg = (uint32_t)(hardwarePortId << FPM_PORT_FM_CTL_PORTID_SHIFT); + if(p_PortParams->independentMode) + { + if((p_PortParams->portType==e_FM_PORT_TYPE_RX) || (p_PortParams->portType==e_FM_PORT_TYPE_RX_10G)) + tmpReg |= (FPM_PORT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT) |FPM_PORT_FM_CTL1; + else + tmpReg |= (FPM_PORT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT) |FPM_PORT_FM_CTL2; + } + else + { + tmpReg |= (FPM_PORT_FM_CTL2|FPM_PORT_FM_CTL1); + + /* order restoration */ + if(hardwarePortId%2) + tmpReg |= (FPM_PORT_FM_CTL1 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT); + else + tmpReg |= (FPM_PORT_FM_CTL2 << FPM_PRC_ORA_FM_CTL_SEL_SHIFT); + } + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmpr, tmpReg); + + { +#ifdef FM_PARTITION_ARRAY + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_Fm, &revInfo); + if (revInfo.majorRev >= 2) +#endif /* FM_PARTITION_ARRAY */ + { + /* set LIODN base for this port */ + tmpReg = GET_UINT32(p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId/2]); + if(hardwarePortId%2) + { + tmpReg &= ~FM_LIODN_BASE_MASK; + tmpReg |= (uint32_t)p_PortParams->liodnBase; + } + else + { + tmpReg &= ~(FM_LIODN_BASE_MASK<< DMA_LIODN_SHIFT); + tmpReg |= (uint32_t)p_PortParams->liodnBase << DMA_LIODN_SHIFT; + } + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmplr[hardwarePortId/2], tmpReg); + } + } + + FmGetPhysicalMuramBase(p_Fm, &p_PortParams->fmMuramPhysBaseAddr); + XX_UnlockSpinlock(p_Fm->h_Spinlock); + + return E_OK; +} + +void FmFreePortParams(t_Handle h_Fm,t_FmInterModulePortFreeParams *p_PortParams) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + uint8_t hardwarePortId = p_PortParams->hardwarePortId; + uint8_t numOfTasks; + t_Error err; + t_FmIpcPortFreeParams portParams; + t_FmIpcMsg msg; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + portParams.hardwarePortId = p_PortParams->hardwarePortId; + portParams.enumPortType = (uint32_t)p_PortParams->portType; +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + portParams.deqPipelineDepth = p_PortParams->deqPipelineDepth; +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_FREE_PORT; + memcpy(msg.msgBody, &portParams, sizeof(portParams)); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(portParams), + NULL, + NULL, + NULL, + NULL)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + return; + } + + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + XX_LockSpinlock(p_Fm->h_Spinlock); + + + if(p_PortParams->portType == e_FM_PORT_TYPE_OH_HOST_COMMAND) + { + ASSERT_COND(p_Fm->hcPortInitialized); + p_Fm->hcPortInitialized = FALSE; + } + + p_Fm->p_FmStateStruct->portsTypes[hardwarePortId] = e_FM_PORT_TYPE_DUMMY; + + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1]); + /* free numOfTasks */ + numOfTasks = (uint8_t)(((tmpReg & BMI_NUM_OF_TASKS_MASK) >> BMI_NUM_OF_TASKS_SHIFT) + 1); + ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfTasks >= numOfTasks); + p_Fm->p_FmStateStruct->accumulatedNumOfTasks -= numOfTasks; + + /* free numOfOpenDmas */ + ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas >= ((tmpReg & BMI_NUM_OF_DMAS_MASK) >> BMI_NUM_OF_DMAS_SHIFT) + 1); + p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas -= (((tmpReg & BMI_NUM_OF_DMAS_MASK) >> BMI_NUM_OF_DMAS_SHIFT) + 1); + + /* update total num of DMA's with committed number of open DMAS, and max uncommitted pool. */ + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK; + tmpReg |= (uint32_t)(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize - 1) << BMI_CFG2_DMAS_SHIFT; + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_cfg2, tmpReg); + + /* free sizeOfFifo */ + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1]); + ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedFifoSize >= + (((tmpReg & BMI_FIFO_SIZE_MASK) + 1) * BMI_FIFO_UNITS)); + p_Fm->p_FmStateStruct->accumulatedFifoSize -= + (((tmpReg & BMI_FIFO_SIZE_MASK) + 1) * BMI_FIFO_UNITS); + + /* clear registers */ + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1], 0); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1], 0); + /* WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ppid[hardwarePortId-1], 0); */ + +#ifdef FM_PORT_DISABLED_ERRATA_FMANx9 + /* this errata means that when a port is taken down, other port may not use its + * resources for a while as it may still be using it (in case of reject). + */ + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + XX_UDelay(100000); + } +#endif /* FM_PORT_DISABLED_ERRATA_FMANx9 */ + +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + if((p_PortParams->portType != e_FM_PORT_TYPE_RX) && (p_PortParams->portType != e_FM_PORT_TYPE_RX_10G)) + /* for transmit & O/H ports */ + { + uint8_t enqTh; + uint8_t deqTh; + + tmpReg = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc); + /* update qmi ENQ/DEQ threshold */ + p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums -= p_PortParams->deqPipelineDepth; + + /* p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums is now smaller, + so we can enlarge enqTh */ + enqTh = (uint8_t)(QMI_MAX_NUM_OF_TNUMS - p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums - 1); + tmpReg &= ~QMI_CFG_ENQ_MASK; + tmpReg |= ((uint32_t)enqTh << QMI_CFG_ENQ_SHIFT); + + /* p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums is now smaller, + so we can reduce deqTh */ + deqTh = (uint8_t)(p_Fm->p_FmStateStruct->accumulatedNumOfDeqTnums + 1); + tmpReg &= ~QMI_CFG_DEQ_MASK; + tmpReg |= (uint32_t)deqTh; + + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc, tmpReg); + } +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + +#ifdef FM_LOW_END_RESTRICTION + if((hardwarePortId==0x1) || (hardwarePortId==0x29)) + p_Fm->p_FmStateStruct->lowEndRestriction = FALSE; +#endif /* FM_LOW_END_RESTRICTION */ + XX_UnlockSpinlock(p_Fm->h_Spinlock); +} + +t_Error FmIsPortStalled(t_Handle h_Fm, uint8_t hardwarePortId, bool *p_IsStalled) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_IS_PORT_STALLED; + msg.msgBody[0] = hardwarePortId; + replyLength = sizeof(uint32_t) + sizeof(uint8_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(hardwarePortId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != (sizeof(uint32_t) + sizeof(uint8_t))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + *p_IsStalled = (bool)!!(*(uint8_t*)(reply.replyBody)); + + return (t_Error)(reply.error); + } + + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fmfp_ps[hardwarePortId]); + *p_IsStalled = (bool)!!(tmpReg & FPM_PS_STALLED); + + return E_OK; +} + +t_Error FmResumeStalledPort(t_Handle h_Fm, uint8_t hardwarePortId) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + t_Error err; + bool isStalled; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_RESUME_STALLED_PORT; + msg.msgBody[0] = hardwarePortId; + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(hardwarePortId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return (t_Error)(reply.error); + } + + /* Get port status */ + err = FmIsPortStalled(h_Fm, hardwarePortId, &isStalled); + if(err) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Can't get port status")); + if (!isStalled) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Port is not stalled")); + + tmpReg = (uint32_t)((hardwarePortId << FPM_PORT_FM_CTL_PORTID_SHIFT) | FPM_PRC_REALSE_STALLED); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmpr, tmpReg); + + return E_OK; +} + +t_Error FmResetMac(t_Handle h_Fm, e_FmMacType type, uint8_t macId) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t bitMask, timeout = 1000; + t_FmIpcMacParams macParams; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + if(p_Fm->h_IpcSessions[0]) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + macParams.id = macId; + macParams.enumType = (uint32_t)type; + msg.msgId = FM_RESET_MAC; + memcpy(msg.msgBody, &macParams, sizeof(macParams)); + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(macParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return (t_Error)(reply.error); + } + else + if(!p_Fm->p_FmFpmRegs) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("No IPC and no registers address")); + } + + /* Get the relevant bit mask */ + if (type == e_FM_MAC_10G) + { + switch(macId) + { + case(0): + bitMask = FPM_RSTC_10G0_RESET; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Illegal MAC Id")); + } + } + else + { + switch(macId) + { + case(0): + bitMask = FPM_RSTC_1G0_RESET; + break; + case(1): + bitMask = FPM_RSTC_1G1_RESET; + break; + case(2): + bitMask = FPM_RSTC_1G2_RESET; + break; + case(3): + bitMask = FPM_RSTC_1G3_RESET; + break; + case(4): + bitMask = FPM_RSTC_1G4_RESET; + break; + default: + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Illegal MAC Id")); + } + } + + /* reset */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrstc, bitMask); + while ((GET_UINT32(p_Fm->p_FmFpmRegs->fmrstc) & bitMask) && + --timeout) ; + if (!timeout) + return ERROR_CODE(E_TIMEOUT); + return E_OK; +} + +t_Error FmSetMacMaxFrame(t_Handle h_Fm, e_FmMacType type, uint8_t macId, uint16_t mtu) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_FmIpcMacMaxFrameParams macMaxFrameLengthParams; + t_Error err; + t_FmIpcMsg msg; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + macMaxFrameLengthParams.macParams.id = macId; + macMaxFrameLengthParams.macParams.enumType = (uint32_t)type; + macMaxFrameLengthParams.maxFrameLength = (uint16_t)mtu; + msg.msgId = FM_SET_MAC_MAX_FRAME; + memcpy(msg.msgBody, &macMaxFrameLengthParams, sizeof(macMaxFrameLengthParams)); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(macMaxFrameLengthParams), + NULL, + NULL, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + return E_OK; + } + +#if (defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)) + if (type == e_FM_MAC_10G) + p_Fm->p_FmStateStruct->macMaxFrameLengths10G[macId] = mtu; + else +#else + UNUSED(type); +#endif /* (defined(FM_MAX_NUM_OF_10G_MACS) && ... */ + p_Fm->p_FmStateStruct->macMaxFrameLengths1G[macId] = mtu; + + return E_OK; +} + +uint16_t FmGetClockFreq(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + /* for MC environment: this depends on the + * fact that fmClkFreq was properly initialized at "init". */ + return p_Fm->p_FmStateStruct->fmClkFreq; +} + +uint32_t FmGetTimeStampScale(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength, timeStamp; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_GET_TIMESTAMP_SCALE; + replyLength = sizeof(uint32_t) + sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if(replyLength != (sizeof(uint32_t) + sizeof(uint32_t))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + + memcpy((uint8_t*)&timeStamp, reply.replyBody, sizeof(uint32_t)); + return timeStamp; + } + + if(!p_Fm->p_FmStateStruct->enabledTimeStamp) + FmEnableTimeStamp(p_Fm); + + return p_Fm->p_FmStateStruct->count1MicroBit; +} + +bool FmRamsEccIsExternalCtl(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fmrcr); + if(tmpReg & FPM_RAM_CTL_RAMS_ECC_EN_SRC_SEL) + return TRUE; + else + return FALSE; +} + +t_Error FmEnableRamsEcc(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + + p_Fm->p_FmStateStruct->ramsEccOwners++; + p_Fm->p_FmStateStruct->internalCall = TRUE; + + return FM_EnableRamsEcc(p_Fm); +} + +t_Error FmDisableRamsEcc(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + + ASSERT_COND(p_Fm->p_FmStateStruct->ramsEccOwners); + p_Fm->p_FmStateStruct->ramsEccOwners--; + + if(p_Fm->p_FmStateStruct->ramsEccOwners==0) + { + p_Fm->p_FmStateStruct->internalCall = TRUE; + return FM_DisableRamsEcc(p_Fm); + } + return E_OK; +} + +uint8_t FmGetGuestId(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + return p_Fm->guestId; +} + +bool FmIsMaster(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + return (p_Fm->guestId == NCSW_MASTER_ID); +} + +t_Error FmSetSizeOfFifo(t_Handle h_Fm, + uint8_t hardwarePortId, + e_FmPortType portType, + bool independentMode, + uint32_t *p_SizeOfFifo, + uint32_t extraSizeOfFifo, + uint8_t deqPipelineDepth, + t_FmInterModulePortRxPoolsParams *p_RxPoolsParams, + bool initialConfig) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint8_t relativePortId; + uint16_t macMaxFrameLength = 0, oldVal; + uint32_t minFifoSizeRequired = 0, sizeOfFifo, tmpReg = 0; + t_FmIpcPortFifoParams fifoParams; + t_Error err; + + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + ASSERT_COND(initialConfig || p_RxPoolsParams); + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + ASSERT_COND(p_RxPoolsParams); + + memset(&fifoParams, 0, sizeof(fifoParams)); + fifoParams.rsrcParams.hardwarePortId = hardwarePortId; + fifoParams.rsrcParams.val = *p_SizeOfFifo; + fifoParams.rsrcParams.extra = extraSizeOfFifo; + fifoParams.enumPortType = (uint32_t)portType; + fifoParams.boolIndependentMode = (uint8_t)independentMode; + fifoParams.deqPipelineDepth = deqPipelineDepth; + fifoParams.numOfPools = p_RxPoolsParams->numOfPools; + fifoParams.secondLargestBufSize = p_RxPoolsParams->secondLargestBufSize; + fifoParams.largestBufSize = p_RxPoolsParams->largestBufSize; + fifoParams.boolInitialConfig = (uint8_t)initialConfig; + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_SET_SIZE_OF_FIFO; + memcpy(msg.msgBody, &fifoParams, sizeof(fifoParams)); + replyLength = sizeof(uint32_t) + sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(fifoParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != (sizeof(uint32_t) + sizeof(uint32_t))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + memcpy((uint8_t*)p_SizeOfFifo, reply.replyBody, sizeof(uint32_t)); + + return (t_Error)(reply.error); + } + sizeOfFifo = *p_SizeOfFifo; + /* if neseccary (cases where frame length is relevant), update sizeOfFifo field. */ + if((portType == e_FM_PORT_TYPE_TX) || ((portType == e_FM_PORT_TYPE_RX) && independentMode)) + { + HW_PORT_ID_TO_SW_PORT_ID(relativePortId, hardwarePortId); + ASSERT_COND(relativePortId < FM_MAX_NUM_OF_1G_MACS); + macMaxFrameLength = p_Fm->p_FmStateStruct->macMaxFrameLengths1G[relativePortId]; + } + +#if (defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)) + if((portType == e_FM_PORT_TYPE_TX_10G) || ((portType == e_FM_PORT_TYPE_RX_10G) && independentMode)) + { + HW_PORT_ID_TO_SW_PORT_ID(relativePortId, hardwarePortId); + ASSERT_COND(relativePortId < FM_MAX_NUM_OF_10G_MACS); + macMaxFrameLength = p_Fm->p_FmStateStruct->macMaxFrameLengths10G[relativePortId]; + } +#endif /* (defined(FM_MAX_NUM_OF_10G_MACS) && ... */ + + /*************************/ + /* TX PORTS */ + /*************************/ + if((portType == e_FM_PORT_TYPE_TX) || (portType == e_FM_PORT_TYPE_TX_10G)) + { + if(independentMode) + minFifoSizeRequired = (uint32_t)((macMaxFrameLength % BMI_FIFO_UNITS ? + (macMaxFrameLength/BMI_FIFO_UNITS + 1) * BMI_FIFO_UNITS : + macMaxFrameLength) + + (3*BMI_FIFO_UNITS)); + else + minFifoSizeRequired = (uint32_t)((macMaxFrameLength % BMI_FIFO_UNITS ? + (macMaxFrameLength/BMI_FIFO_UNITS + 1) * BMI_FIFO_UNITS : + macMaxFrameLength) + + (deqPipelineDepth+3)*BMI_FIFO_UNITS); + } + /*************************/ + /* RX IM PORTS */ + /*************************/ + else if(((portType == e_FM_PORT_TYPE_RX) || (portType == e_FM_PORT_TYPE_RX_10G)) && independentMode) + minFifoSizeRequired = (uint32_t)(((macMaxFrameLength % BMI_FIFO_UNITS) ? + ((macMaxFrameLength/BMI_FIFO_UNITS + 1) * BMI_FIFO_UNITS) : + macMaxFrameLength) + + (4*BMI_FIFO_UNITS)); + + /* for Rx (non-Im) ports or OP, buffer pools are relevant for fifo size. + If this routine is called as part of the "GetSet" routine, initialConfig is TRUE + and these checks where done in the port routine. + If it is called by an explicit user request ("SetSizeOfFifo"), than these parameters + should be checked/updated */ + if(!initialConfig && + ((portType == e_FM_PORT_TYPE_OH_OFFLINE_PARSING) || + (((portType == e_FM_PORT_TYPE_RX) || (portType == e_FM_PORT_TYPE_RX_10G)) && !independentMode))) + { + if((portType == e_FM_PORT_TYPE_RX) || (portType == e_FM_PORT_TYPE_RX_10G)) + { + /*************************/ + /* RX non-IM PORTS */ + /*************************/ +#ifdef FM_FIFO_ALLOCATION_OLD_ALG + t_FmRevisionInfo revInfo; + + FM_GetRevision(p_Fm, &revInfo); + if(revInfo.majorRev != 4) + minFifoSizeRequired = (uint32_t)(((p_RxPoolsParams->largestBufSize % BMI_FIFO_UNITS) ? + ((p_RxPoolsParams->largestBufSize/BMI_FIFO_UNITS + 1) * BMI_FIFO_UNITS) : + p_RxPoolsParams->largestBufSize) + + (7*BMI_FIFO_UNITS)); + else +#endif /* FM_FIFO_ALLOCATION_OLD_ALG */ + { + if(p_RxPoolsParams->numOfPools == 1) + minFifoSizeRequired = 8*BMI_FIFO_UNITS; + else + { + minFifoSizeRequired = (uint32_t)(((p_RxPoolsParams->secondLargestBufSize % BMI_FIFO_UNITS) ? + ((p_RxPoolsParams->secondLargestBufSize/BMI_FIFO_UNITS + 1) * BMI_FIFO_UNITS) : + p_RxPoolsParams->secondLargestBufSize) + + (7*BMI_FIFO_UNITS)); + if((sizeOfFifo < minFifoSizeRequired)) + { + DBG(WARNING, ("User set FIFO size for Rx port is not optimized. (not modified by driver)")); + minFifoSizeRequired = 8*BMI_FIFO_UNITS; + } + } + } + } + else + { + /*************************/ + /* OP PORTS */ + /*************************/ + /* check if pool size is not too big */ + if(p_RxPoolsParams->largestBufSize > sizeOfFifo ) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Largest pool size is bigger than ports committed fifo size")); + } + } + + + if (minFifoSizeRequired && (sizeOfFifo < minFifoSizeRequired)) + { + sizeOfFifo = minFifoSizeRequired; + DBG(WARNING, ("FIFO size enlarged to %d for port %#x", minFifoSizeRequired, hardwarePortId)); + } + + if(initialConfig) + oldVal = 0; + else + { + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1]); + /* read into oldVal the current extra fifo size */ + oldVal = (uint16_t)((((tmpReg & BMI_EXTRA_FIFO_SIZE_MASK) + 1)*BMI_FIFO_UNITS) >> BMI_EXTRA_FIFO_SIZE_SHIFT); + } + + if(extraSizeOfFifo > oldVal) + p_Fm->p_FmStateStruct->extraFifoPoolSize = MAX(p_Fm->p_FmStateStruct->extraFifoPoolSize, extraSizeOfFifo); + + if(!initialConfig) + /* read into oldVal the current num of tasks */ + oldVal = (uint16_t)(((tmpReg & BMI_FIFO_SIZE_MASK) + 1)*BMI_FIFO_UNITS); + + /* check that there are enough uncommitted fifo size */ + if((p_Fm->p_FmStateStruct->accumulatedFifoSize - oldVal + sizeOfFifo) > + (p_Fm->p_FmStateStruct->totalFifoSize - p_Fm->p_FmStateStruct->extraFifoPoolSize)) + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, ("Requested fifo size and extra size exceed total FIFO size.")); + else + { + /* update acummulated */ + ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedFifoSize >= oldVal); + p_Fm->p_FmStateStruct->accumulatedFifoSize -= oldVal; + p_Fm->p_FmStateStruct->accumulatedFifoSize += sizeOfFifo; + /* calculate reg */ + tmpReg = (uint32_t)((sizeOfFifo/BMI_FIFO_UNITS - 1) | + ((extraSizeOfFifo/BMI_FIFO_UNITS) << BMI_EXTRA_FIFO_SIZE_SHIFT)); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1], tmpReg); + } + *p_SizeOfFifo = sizeOfFifo; + + return E_OK; +} + +t_Error FmSetNumOfTasks(t_Handle h_Fm, + uint8_t hardwarePortId, + uint8_t numOfTasks, + uint8_t numOfExtraTasks, + bool initialConfig) +{ + t_Fm *p_Fm = (t_Fm *)h_Fm; + uint8_t oldVal; + uint32_t tmpReg = 0; + t_FmIpcPortRsrcParams rsrcParams; + t_Error err; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + rsrcParams.hardwarePortId = hardwarePortId; + rsrcParams.val = numOfTasks; + rsrcParams.extra = numOfExtraTasks; + rsrcParams.boolInitialConfig = (uint8_t)initialConfig; + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_SET_NUM_OF_TASKS; + memcpy(msg.msgBody, &rsrcParams, sizeof(rsrcParams)); + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(rsrcParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return (t_Error)(reply.error); + } + + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + + if(initialConfig) + oldVal = 0; + else + { + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1]); + /* read into oldVal the current extra tasks */ + oldVal = (uint8_t)((tmpReg & BMI_NUM_OF_EXTRA_TASKS_MASK) >> BMI_EXTRA_NUM_OF_TASKS_SHIFT); + } + + if(numOfExtraTasks > oldVal) + p_Fm->p_FmStateStruct->extraTasksPoolSize = (uint8_t)MAX(p_Fm->p_FmStateStruct->extraTasksPoolSize, numOfExtraTasks); + + if(!initialConfig) + /* read into oldVal the current num of tasks */ + oldVal = (uint8_t)(((tmpReg & BMI_NUM_OF_TASKS_MASK) >> BMI_NUM_OF_TASKS_SHIFT) + 1); + + /* check that there are enough uncommitted tasks */ + if((p_Fm->p_FmStateStruct->accumulatedNumOfTasks - oldVal + numOfTasks) > + (p_Fm->p_FmStateStruct->totalNumOfTasks - p_Fm->p_FmStateStruct->extraTasksPoolSize)) + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, + ("Requested numOfTasks and extra tasks pool for fm%d exceed total numOfTasks.", + p_Fm->p_FmStateStruct->fmId)); + else + { + ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfTasks >= oldVal); + /* update acummulated */ + p_Fm->p_FmStateStruct->accumulatedNumOfTasks -= oldVal; + p_Fm->p_FmStateStruct->accumulatedNumOfTasks += numOfTasks; + /* calculate reg */ + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1]) & ~(BMI_NUM_OF_TASKS_MASK | BMI_NUM_OF_EXTRA_TASKS_MASK); + tmpReg |= (uint32_t)(((numOfTasks-1) << BMI_NUM_OF_TASKS_SHIFT) | + (numOfExtraTasks << BMI_EXTRA_NUM_OF_TASKS_SHIFT)); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1],tmpReg); + } + + return E_OK; +} + +t_Error FmSetNumOfOpenDmas(t_Handle h_Fm, + uint8_t hardwarePortId, + uint8_t numOfOpenDmas, + uint8_t numOfExtraOpenDmas, + bool initialConfig) + +{ + t_Fm *p_Fm = (t_Fm *)h_Fm; + uint8_t oldVal; + uint32_t tmpReg = 0; + t_FmIpcPortRsrcParams rsrcParams; + t_Error err; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + rsrcParams.hardwarePortId = hardwarePortId; + rsrcParams.val = numOfOpenDmas; + rsrcParams.extra = numOfExtraOpenDmas; + rsrcParams.boolInitialConfig = (uint8_t)initialConfig; + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_SET_NUM_OF_OPEN_DMAS; + memcpy(msg.msgBody, &rsrcParams, sizeof(rsrcParams)); + replyLength = sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId) + sizeof(rsrcParams), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != sizeof(uint32_t)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return (t_Error)(reply.error); + } + + ASSERT_COND(IN_RANGE(1, hardwarePortId, 63)); + + if(initialConfig) + oldVal = 0; + else + { + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1]); + /* read into oldVal the current extra tasks */ + oldVal = (uint8_t)((tmpReg & BMI_NUM_OF_EXTRA_DMAS_MASK) >> BMI_EXTRA_NUM_OF_DMAS_SHIFT); + } + + if(numOfExtraOpenDmas > oldVal) + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize = (uint8_t)MAX(p_Fm->p_FmStateStruct->extraOpenDmasPoolSize, numOfExtraOpenDmas); + + if(!initialConfig) + /* read into oldVal the current num of tasks */ + oldVal = (uint8_t)(((tmpReg & BMI_NUM_OF_DMAS_MASK) >> BMI_NUM_OF_DMAS_SHIFT) + 1); + + /* check that there are enough uncommitted open DMA's */ + ASSERT_COND(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas >= oldVal); + if((p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas - oldVal + numOfOpenDmas) > + p_Fm->p_FmStateStruct->maxNumOfOpenDmas) + RETURN_ERROR(MAJOR, E_NOT_AVAILABLE, + ("Requested numOfOpenDmas for fm%d exceeds total numOfOpenDmas.", + p_Fm->p_FmStateStruct->fmId)); + else + { + /* update acummulated */ + p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas -= oldVal; + p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas += numOfOpenDmas; + /* calculate reg */ + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1]) & ~(BMI_NUM_OF_DMAS_MASK | BMI_NUM_OF_EXTRA_DMAS_MASK); + tmpReg |= (uint32_t)(((numOfOpenDmas-1) << BMI_NUM_OF_DMAS_SHIFT) | + (numOfExtraOpenDmas << BMI_EXTRA_NUM_OF_DMAS_SHIFT)); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1], tmpReg); + + /* update total num of DMA's with committed number of open DMAS, and max uncommitted pool. */ + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_cfg2) & ~BMI_CFG2_DMAS_MASK; + tmpReg |= (uint32_t)(p_Fm->p_FmStateStruct->accumulatedNumOfOpenDmas + p_Fm->p_FmStateStruct->extraOpenDmasPoolSize - 1) << BMI_CFG2_DMAS_SHIFT; + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_cfg2, tmpReg); + } + + return E_OK; +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FmDumpPortRegs (t_Handle h_Fm,uint8_t hardwarePortId) +{ + t_Fm *p_Fm = (t_Fm *)h_Fm; + t_FmIpcMsg msg; + t_Error err; + + DECLARE_DUMP; + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_DUMP_PORT_REGS; + msg.msgBody[0] = hardwarePortId; + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(hardwarePortId), + NULL, + NULL, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + return E_OK; + } + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + + DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1], ("fmbm_pp for port %u", (hardwarePortId))); + DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_pp[hardwarePortId-1], sizeof(uint32_t)); + + DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1], ("fmbm_pfs for port %u", (hardwarePortId ))); + DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_pfs[hardwarePortId-1], sizeof(uint32_t)); + + DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_ppid[hardwarePortId-1], ("bm_ppid for port %u", (hardwarePortId))); + DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_ppid[hardwarePortId-1], sizeof(uint32_t)); + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */ + + +/*****************************************************************************/ +/* API Init unit functions */ +/*****************************************************************************/ +t_Handle FM_Config(t_FmParams *p_FmParam) +{ + t_Fm *p_Fm; + uint8_t i; + uintptr_t baseAddr; + + SANITY_CHECK_RETURN_VALUE(p_FmParam, E_NULL_POINTER, NULL); + SANITY_CHECK_RETURN_VALUE(((p_FmParam->firmware.p_Code && p_FmParam->firmware.size) || + (!p_FmParam->firmware.p_Code && !p_FmParam->firmware.size)), + E_INVALID_VALUE, NULL); + + baseAddr = p_FmParam->baseAddr; + + /* Allocate FM structure */ + p_Fm = (t_Fm *) XX_Malloc(sizeof(t_Fm)); + if (!p_Fm) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM driver structure")); + return NULL; + } + memset(p_Fm, 0, sizeof(t_Fm)); + + p_Fm->p_FmStateStruct = (t_FmStateStruct *) XX_Malloc(sizeof(t_FmStateStruct)); + if (!p_Fm->p_FmStateStruct) + { + XX_Free(p_Fm); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM Status structure")); + return NULL; + } + memset(p_Fm->p_FmStateStruct, 0, sizeof(t_FmStateStruct)); + + /* Initialize FM parameters which will be kept by the driver */ + p_Fm->p_FmStateStruct->fmId = p_FmParam->fmId; + p_Fm->guestId = p_FmParam->guestId; + + for(i=0; ip_FmStateStruct->portsTypes[i] = e_FM_PORT_TYPE_DUMMY; + + /* Allocate the FM driver's parameters structure */ + p_Fm->p_FmDriverParam = (t_FmDriverParam *)XX_Malloc(sizeof(t_FmDriverParam)); + if (!p_Fm->p_FmDriverParam) + { + XX_Free(p_Fm->p_FmStateStruct); + XX_Free(p_Fm); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM driver parameters")); + return NULL; + } + memset(p_Fm->p_FmDriverParam, 0, sizeof(t_FmDriverParam)); + + /* Initialize FM parameters which will be kept by the driver */ + p_Fm->p_FmStateStruct->fmId = p_FmParam->fmId; + p_Fm->h_FmMuram = p_FmParam->h_FmMuram; + p_Fm->h_App = p_FmParam->h_App; + p_Fm->p_FmStateStruct->fmClkFreq = p_FmParam->fmClkFreq; + p_Fm->f_Exception = p_FmParam->f_Exception; + p_Fm->f_BusError = p_FmParam->f_BusError; + p_Fm->p_FmFpmRegs = (t_FmFpmRegs *)UINT_TO_PTR(baseAddr + FM_MM_FPM); + p_Fm->p_FmBmiRegs = (t_FmBmiRegs *)UINT_TO_PTR(baseAddr + FM_MM_BMI); + p_Fm->p_FmQmiRegs = (t_FmQmiRegs *)UINT_TO_PTR(baseAddr + FM_MM_QMI); + p_Fm->p_FmDmaRegs = (t_FmDmaRegs *)UINT_TO_PTR(baseAddr + FM_MM_DMA); + p_Fm->baseAddr = baseAddr; + p_Fm->p_FmStateStruct->irq = p_FmParam->irq; + p_Fm->p_FmStateStruct->errIrq = p_FmParam->errIrq; + p_Fm->hcPortInitialized = FALSE; + p_Fm->independentMode = FALSE; + p_Fm->p_FmStateStruct->ramsEccEnable = FALSE; + p_Fm->p_FmStateStruct->totalNumOfTasks = DEFAULT_totalNumOfTasks; + p_Fm->p_FmStateStruct->totalFifoSize = DEFAULT_totalFifoSize; + p_Fm->p_FmStateStruct->maxNumOfOpenDmas = DEFAULT_maxNumOfOpenDmas; + p_Fm->p_FmStateStruct->extraFifoPoolSize = FM_MAX_NUM_OF_RX_PORTS*BMI_FIFO_UNITS; + p_Fm->p_FmStateStruct->exceptions = DEFAULT_exceptions; + for(i = 0;ip_FmStateStruct->macMaxFrameLengths1G[i] = DEFAULT_mtu; +#if defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS) + for(i = 0;ip_FmStateStruct->macMaxFrameLengths10G[i] = DEFAULT_mtu; +#endif /*defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS)*/ + + p_Fm->h_Spinlock = XX_InitSpinlock(); + if (!p_Fm->h_Spinlock) + { + XX_Free(p_Fm->p_FmDriverParam); + XX_Free(p_Fm->p_FmStateStruct); + XX_Free(p_Fm); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("cant allocate spinlock!")); + return NULL; + } + +#ifdef FM_PARTITION_ARRAY + /* Initialize FM driver parameters parameters (for initialization phase only) */ + memcpy(p_Fm->p_FmDriverParam->liodnBasePerPort, p_FmParam->liodnBasePerPort, FM_SIZE_OF_LIODN_TABLE*sizeof(uint16_t)); +#endif /* FM_PARTITION_ARRAY */ + + /*p_Fm->p_FmDriverParam->numOfPartitions = p_FmParam->numOfPartitions; */ + p_Fm->p_FmDriverParam->enCounters = FALSE; + + p_Fm->p_FmDriverParam->resetOnInit = DEFAULT_resetOnInit; + + p_Fm->p_FmDriverParam->thresholds.dispLimit = DEFAULT_dispLimit; + p_Fm->p_FmDriverParam->thresholds.prsDispTh = DEFAULT_prsDispTh; + p_Fm->p_FmDriverParam->thresholds.plcrDispTh = DEFAULT_plcrDispTh; + p_Fm->p_FmDriverParam->thresholds.kgDispTh = DEFAULT_kgDispTh; + p_Fm->p_FmDriverParam->thresholds.bmiDispTh = DEFAULT_bmiDispTh; + p_Fm->p_FmDriverParam->thresholds.qmiEnqDispTh = DEFAULT_qmiEnqDispTh; + p_Fm->p_FmDriverParam->thresholds.qmiDeqDispTh = DEFAULT_qmiDeqDispTh; + p_Fm->p_FmDriverParam->thresholds.fmCtl1DispTh = DEFAULT_fmCtl1DispTh; + p_Fm->p_FmDriverParam->thresholds.fmCtl2DispTh = DEFAULT_fmCtl2DispTh; + + p_Fm->p_FmDriverParam->dmaStopOnBusError = DEFAULT_dmaStopOnBusError; + + p_Fm->p_FmDriverParam->dmaCacheOverride = DEFAULT_cacheOverride; + p_Fm->p_FmDriverParam->dmaAidMode = DEFAULT_aidMode; + p_Fm->p_FmDriverParam->dmaAidOverride = DEFAULT_aidOverride; + p_Fm->p_FmDriverParam->dmaAxiDbgNumOfBeats = DEFAULT_axiDbgNumOfBeats; + p_Fm->p_FmDriverParam->dmaCamNumOfEntries = DEFAULT_dmaCamNumOfEntries; + p_Fm->p_FmDriverParam->dmaWatchdog = DEFAULT_dmaWatchdog; + + p_Fm->p_FmDriverParam->dmaCommQThresholds.clearEmergency = DEFAULT_dmaCommQLow; + p_Fm->p_FmDriverParam->dmaCommQThresholds.assertEmergency = DEFAULT_dmaCommQHigh; + p_Fm->p_FmDriverParam->dmaReadBufThresholds.clearEmergency = DEFAULT_dmaReadIntBufLow; + p_Fm->p_FmDriverParam->dmaReadBufThresholds.assertEmergency = DEFAULT_dmaReadIntBufHigh; + p_Fm->p_FmDriverParam->dmaWriteBufThresholds.clearEmergency = DEFAULT_dmaWriteIntBufLow; + p_Fm->p_FmDriverParam->dmaWriteBufThresholds.assertEmergency = DEFAULT_dmaWriteIntBufHigh; + p_Fm->p_FmDriverParam->dmaSosEmergency = DEFAULT_dmaSosEmergency; + + p_Fm->p_FmDriverParam->dmaDbgCntMode = DEFAULT_dmaDbgCntMode; + + p_Fm->p_FmDriverParam->dmaEnEmergency = FALSE; + p_Fm->p_FmDriverParam->dmaEnEmergencySmoother = FALSE; + p_Fm->p_FmDriverParam->catastrophicErr = DEFAULT_catastrophicErr; + p_Fm->p_FmDriverParam->dmaErr = DEFAULT_dmaErr; + p_Fm->p_FmDriverParam->haltOnExternalActivation = DEFAULT_haltOnExternalActivation; + p_Fm->p_FmDriverParam->haltOnUnrecoverableEccError = DEFAULT_haltOnUnrecoverableEccError; + p_Fm->p_FmDriverParam->enIramTestMode = FALSE; + p_Fm->p_FmDriverParam->enMuramTestMode = FALSE; + p_Fm->p_FmDriverParam->externalEccRamsEnable = DEFAULT_externalEccRamsEnable; + + p_Fm->p_FmDriverParam->fwVerify = DEFAULT_VerifyUcode; + p_Fm->p_FmDriverParam->firmware.size = p_FmParam->firmware.size; + if (p_Fm->p_FmDriverParam->firmware.size) + { + p_Fm->p_FmDriverParam->firmware.p_Code = (uint32_t *)XX_Malloc(p_Fm->p_FmDriverParam->firmware.size); + if (!p_Fm->p_FmDriverParam->firmware.p_Code) + { + XX_FreeSpinlock(p_Fm->h_Spinlock); + XX_Free(p_Fm->p_FmStateStruct); + XX_Free(p_Fm->p_FmDriverParam); + XX_Free(p_Fm); + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM firmware code")); + return NULL; + } + memcpy(p_Fm->p_FmDriverParam->firmware.p_Code, p_FmParam->firmware.p_Code, p_Fm->p_FmDriverParam->firmware.size); + } + + return p_Fm; +} + +/**************************************************************************//** + @Function FM_Init + + @Description Initializes the FM module + + @Param[in] h_Fm - FM module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_Init(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_FmDriverParam *p_FmDriverParam = NULL; + t_Error err = E_OK; + uint32_t tmpReg, cfgReg = 0; + int i; + uint16_t periodInFmClocks; + uint8_t remainder; + t_FmRevisionInfo revInfo; + + SANITY_CHECK_RETURN_ERROR(h_Fm, E_INVALID_HANDLE); + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + uint8_t isMasterAlive; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + + /* build the FM guest partition IPC address */ + if(Sprint (p_Fm->fmModuleName, "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, p_Fm->guestId) != (p_Fm->guestId<10 ? 6:7)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + + /* build the FM master partition IPC address */ + memset(p_Fm->fmIpcHandlerModuleName, 0, (sizeof(char)) * MODULE_NAME_SIZE); + if(Sprint (p_Fm->fmIpcHandlerModuleName[0], "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, NCSW_MASTER_ID) != 6) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + + for(i=0;iintrMng[i].f_Isr = UnimplementedIsr; + + p_Fm->h_IpcSessions[0] = XX_IpcInitSession(p_Fm->fmIpcHandlerModuleName[0], p_Fm->fmModuleName); + if (p_Fm->h_IpcSessions[0]) + { + err = XX_IpcRegisterMsgHandler(p_Fm->fmModuleName, FmGuestHandleIpcMsgCB, p_Fm, FM_IPC_MAX_REPLY_SIZE); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_MASTER_IS_ALIVE; + msg.msgBody[0] = p_Fm->guestId; + replyLength = sizeof(uint32_t) + sizeof(uint8_t); + do + { + blockingFlag = TRUE; + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId)+sizeof(p_Fm->guestId), + (uint8_t*)&reply, + &replyLength, + IpcMsgCompletionCB, + h_Fm)) != E_OK) + REPORT_ERROR(MINOR, err, NO_MSG); + while(blockingFlag) ; + if(replyLength != (sizeof(uint32_t) + sizeof(uint8_t))) + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + isMasterAlive = *(uint8_t*)(reply.replyBody); + } while (!isMasterAlive); + + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_GET_CLK_FREQ; + replyLength = sizeof(uint32_t) + sizeof(p_Fm->p_FmStateStruct->fmClkFreq); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if(replyLength != (sizeof(uint32_t) + sizeof(p_Fm->p_FmStateStruct->fmClkFreq))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + memcpy((uint8_t*)&p_Fm->p_FmStateStruct->fmClkFreq, reply.replyBody, sizeof(uint16_t)); + } + else + { + DBG(WARNING, ("FM Guest mode - without IPC")); + if(!p_Fm->p_FmStateStruct->fmClkFreq ) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("No fmClkFreq configured for guest without IPC")); + if(!p_Fm->baseAddr) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("No baseAddr configured for guest without IPC")); + } + + XX_Free(p_Fm->p_FmDriverParam); + p_Fm->p_FmDriverParam = NULL; + + if ((p_Fm->guestId == NCSW_MASTER_ID) || + (p_Fm->h_IpcSessions[0])) + { + FM_DisableRamsEcc(p_Fm); + FmMuramClear(p_Fm->h_FmMuram); + FM_EnableRamsEcc(p_Fm); + } + + return E_OK; + } + + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + FM_GetRevision(p_Fm, &revInfo); + +#ifdef FM_NO_DISPATCH_RAM_ECC + if (revInfo.majorRev != 4) + p_Fm->p_FmStateStruct->exceptions &= ~FM_EX_BMI_DISPATCH_RAM_ECC; +#endif /* FM_NO_DISPATCH_RAM_ECC */ + +#ifdef FM_RAM_LIST_ERR_IRQ_ERRATA_FMAN8 + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + p_Fm->p_FmStateStruct->exceptions &= ~FM_EX_BMI_LIST_RAM_ECC; +#endif /* FM_RAM_LIST_ERR_IRQ_ERRATA_FMAN8 */ + +#ifdef FM_BMI_PIPELINE_ERR_IRQ_ERRATA_FMAN9 + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + p_Fm->p_FmStateStruct->exceptions &= ~FM_EX_BMI_PIPELINE_ECC; +#endif /* FM_BMI_PIPELINE_ERR_IRQ_ERRATA_FMAN9 */ + +#ifdef FM_QMI_NO_ECC_EXCEPTIONS + if (revInfo.majorRev == 4) + p_Fm->p_FmStateStruct->exceptions &= ~(FM_EX_QMI_SINGLE_ECC | FM_EX_QMI_DOUBLE_ECC); +#endif /* FM_QMI_NO_ECC_EXCEPTIONS */ + + CHECK_INIT_PARAMETERS(p_Fm, CheckFmParameters); + + p_FmDriverParam = p_Fm->p_FmDriverParam; + + FmMuramClear(p_Fm->h_FmMuram); + +#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 + if (p_FmDriverParam->resetOnInit) + { + t_FMIramRegs *p_Iram = (t_FMIramRegs *)UINT_TO_PTR(p_Fm->baseAddr + FM_MM_IMEM); + uint32_t debug_reg; + + /* write to IRAM first location the debug instruction */ + WRITE_UINT32(p_Iram->iadd, 0); + while (GET_UINT32(p_Iram->iadd) != 0) ; + WRITE_UINT32(p_Iram->idata, FM_UCODE_DEBUG_INSTRUCTION); + + WRITE_UINT32(p_Iram->iadd, 0); + while (GET_UINT32(p_Iram->iadd) != 0) ; + while (GET_UINT32(p_Iram->idata) != FM_UCODE_DEBUG_INSTRUCTION) ; + + /* Enable patch from IRAM */ + WRITE_UINT32(p_Iram->iready, IRAM_READY); + XX_UDelay(100); + + /* reset FMAN */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrstc, FPM_RSTC_FM_RESET); + XX_UDelay(100); + + /* verify breakpoint debug status register */ + debug_reg = GET_UINT32(*(uint32_t *)UINT_TO_PTR(p_Fm->baseAddr + FM_DEBUG_STATUS_REGISTER_OFFSET)); +#ifndef NCSW_LINUX + if(!debug_reg) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Invalid debug status register value = 0")); +#else + if(!debug_reg) + DBG(INFO,("Invalid debug status register value = 0")); +#endif + /*************************************/ + /* Load FMan-Controller code to Iram */ + /*************************************/ + if (ClearIRam(p_Fm) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + if (p_Fm->p_FmDriverParam->firmware.p_Code && + (LoadFmanCtrlCode(p_Fm) != E_OK)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + XX_UDelay(100); + + /* reset FMAN again to start the microcode */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrstc, FPM_RSTC_FM_RESET); + XX_UDelay(1000); + } + else + { +#endif /* FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */ + if(p_FmDriverParam->resetOnInit) + { + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrstc, FPM_RSTC_FM_RESET); + XX_UDelay(100); + } + + /*************************************/ + /* Load FMan-Controller code to Iram */ + /*************************************/ + if (ClearIRam(p_Fm) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + if (p_Fm->p_FmDriverParam->firmware.p_Code && + (LoadFmanCtrlCode(p_Fm) != E_OK)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); +#ifdef FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 + } +#endif /* FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */ + +#ifdef FM_CAPWAP_SUPPORT + /* save first 256 byte in MURAM */ + p_Fm->resAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram, 256, 0)); + if (!p_Fm->resAddr) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for reserved Area failed")); + + WRITE_BLOCK(UINT_TO_PTR(p_Fm->resAddr), 0, 256); +#endif /* FM_CAPWAP_SUPPORT */ + + /* General FM driver initialization */ + p_Fm->fmMuramPhysBaseAddr = (uint64_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->baseAddr + FM_MM_MURAM))); + for(i=0;iintrMng[i].f_Isr = UnimplementedIsr; + for(i=0;ifmanCtrlIntr[i].f_Isr = UnimplementedFmanCtrlIsr; + + /**********************/ + /* Init DMA Registers */ + /**********************/ + /* clear status reg events */ + tmpReg = (DMA_STATUS_BUS_ERR | DMA_STATUS_READ_ECC | DMA_STATUS_SYSTEM_WRITE_ECC | DMA_STATUS_FM_WRITE_ECC); + /*tmpReg |= (DMA_STATUS_SYSTEM_DPEXT_ECC | DMA_STATUS_FM_DPEXT_ECC | DMA_STATUS_SYSTEM_DPDAT_ECC | DMA_STATUS_FM_DPDAT_ECC | DMA_STATUS_FM_SPDAT_ECC);*/ + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmsr, GET_UINT32(p_Fm->p_FmDmaRegs->fmdmsr) | tmpReg); + + /* configure mode register */ + tmpReg = 0; + tmpReg |= p_FmDriverParam->dmaCacheOverride << DMA_MODE_CACHE_OR_SHIFT; + if(p_FmDriverParam->dmaAidOverride) + tmpReg |= DMA_MODE_AID_OR; + if (p_Fm->p_FmStateStruct->exceptions & FM_EX_DMA_BUS_ERROR) + tmpReg |= DMA_MODE_BER; + if ((p_Fm->p_FmStateStruct->exceptions & FM_EX_DMA_SYSTEM_WRITE_ECC) | (p_Fm->p_FmStateStruct->exceptions & FM_EX_DMA_READ_ECC) | (p_Fm->p_FmStateStruct->exceptions & FM_EX_DMA_FM_WRITE_ECC)) + tmpReg |= DMA_MODE_ECC; + if(p_FmDriverParam->dmaStopOnBusError) + tmpReg |= DMA_MODE_SBER; + tmpReg |= (uint32_t)(p_FmDriverParam->dmaAxiDbgNumOfBeats - 1) << DMA_MODE_AXI_DBG_SHIFT; + if (p_FmDriverParam->dmaEnEmergency) + { + tmpReg |= p_FmDriverParam->dmaEmergency.emergencyBusSelect; + tmpReg |= p_FmDriverParam->dmaEmergency.emergencyLevel << DMA_MODE_EMERGENCY_LEVEL_SHIFT; + if(p_FmDriverParam->dmaEnEmergencySmoother) + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmemsr, p_FmDriverParam->dmaEmergencySwitchCounter); + } + tmpReg |= ((p_FmDriverParam->dmaCamNumOfEntries/DMA_CAM_UNITS) - 1) << DMA_MODE_CEN_SHIFT; + + tmpReg |= DMA_MODE_SECURE_PROT; + tmpReg |= p_FmDriverParam->dmaDbgCntMode << DMA_MODE_DBG_SHIFT; + tmpReg |= p_FmDriverParam->dmaAidMode << DMA_MODE_AID_MODE_SHIFT; + +#ifdef FM_PEDANTIC_DMA + tmpReg |= DMA_MODE_EMERGENCY_READ; +#endif /* FM_PEDANTIC_DMA */ + + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmmr, tmpReg); + + /* configure thresholds register */ + tmpReg = ((uint32_t)p_FmDriverParam->dmaCommQThresholds.assertEmergency << DMA_THRESH_COMMQ_SHIFT) | + ((uint32_t)p_FmDriverParam->dmaReadBufThresholds.assertEmergency << DMA_THRESH_READ_INT_BUF_SHIFT) | + ((uint32_t)p_FmDriverParam->dmaWriteBufThresholds.assertEmergency); + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmtr, tmpReg); + + /* configure hysteresis register */ + tmpReg = ((uint32_t)p_FmDriverParam->dmaCommQThresholds.clearEmergency << DMA_THRESH_COMMQ_SHIFT) | + ((uint32_t)p_FmDriverParam->dmaReadBufThresholds.clearEmergency << DMA_THRESH_READ_INT_BUF_SHIFT) | + ((uint32_t)p_FmDriverParam->dmaWriteBufThresholds.clearEmergency); + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmhy, tmpReg); + + /* configure emergency threshold */ + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmsetr, p_FmDriverParam->dmaSosEmergency); + + /* configure Watchdog */ + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmwcr, USEC_TO_CLK(p_FmDriverParam->dmaWatchdog, p_Fm->p_FmStateStruct->fmClkFreq)); + + /* Allocate MURAM for CAM */ + p_Fm->camBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram, + (uint32_t)(p_FmDriverParam->dmaCamNumOfEntries*DMA_CAM_SIZEOF_ENTRY), + DMA_CAM_ALIGN)); + if (!p_Fm->camBaseAddr ) + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for DMA CAM failed")); + + WRITE_BLOCK(UINT_TO_PTR(p_Fm->camBaseAddr), 0, (uint32_t)(p_FmDriverParam->dmaCamNumOfEntries*DMA_CAM_SIZEOF_ENTRY)); + + /* VirtToPhys */ + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmebcr, + (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->camBaseAddr)) - p_Fm->fmMuramPhysBaseAddr)); + +#ifdef FM_PARTITION_ARRAY + { + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + /* liodn-partitions */ + for (i=0 ; iliodnBasePerPort[i] << DMA_LIODN_SHIFT) | + (uint32_t)p_FmDriverParam->liodnBasePerPort[i+1]); + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmplr[i/2], tmpReg); + } + } +#endif /* FM_PARTITION_ARRAY */ + + /**********************/ + /* Init FPM Registers */ + /**********************/ + tmpReg = (uint32_t)(p_FmDriverParam->thresholds.dispLimit << FPM_DISP_LIMIT_SHIFT); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmflc, tmpReg); + + tmpReg = (((uint32_t)p_FmDriverParam->thresholds.prsDispTh << FPM_THR1_PRS_SHIFT) | + ((uint32_t)p_FmDriverParam->thresholds.kgDispTh << FPM_THR1_KG_SHIFT) | + ((uint32_t)p_FmDriverParam->thresholds.plcrDispTh << FPM_THR1_PLCR_SHIFT) | + ((uint32_t)p_FmDriverParam->thresholds.bmiDispTh << FPM_THR1_BMI_SHIFT)); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmdis1, tmpReg); + + tmpReg = (((uint32_t)p_FmDriverParam->thresholds.qmiEnqDispTh << FPM_THR2_QMI_ENQ_SHIFT) | + ((uint32_t)p_FmDriverParam->thresholds.qmiDeqDispTh << FPM_THR2_QMI_DEQ_SHIFT) | + ((uint32_t)p_FmDriverParam->thresholds.fmCtl1DispTh << FPM_THR2_FM_CTL1_SHIFT) | + ((uint32_t)p_FmDriverParam->thresholds.fmCtl2DispTh << FPM_THR2_FM_CTL2_SHIFT)); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmdis2, tmpReg); + + /* define exceptions and error behavior */ + tmpReg = 0; + /* Clear events */ + tmpReg |= (FPM_EV_MASK_STALL | FPM_EV_MASK_DOUBLE_ECC | FPM_EV_MASK_SINGLE_ECC); + /* enable interrupts */ + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_FPM_STALL_ON_TASKS) + tmpReg |= FPM_EV_MASK_STALL_EN; + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_FPM_SINGLE_ECC) + tmpReg |= FPM_EV_MASK_SINGLE_ECC_EN; + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_FPM_DOUBLE_ECC) + tmpReg |= FPM_EV_MASK_DOUBLE_ECC_EN; + tmpReg |= (p_Fm->p_FmDriverParam->catastrophicErr << FPM_EV_MASK_CAT_ERR_SHIFT); + tmpReg |= (p_Fm->p_FmDriverParam->dmaErr << FPM_EV_MASK_DMA_ERR_SHIFT); + if(!p_Fm->p_FmDriverParam->haltOnExternalActivation) + tmpReg |= FPM_EV_MASK_EXTERNAL_HALT; + if(!p_Fm->p_FmDriverParam->haltOnUnrecoverableEccError) + tmpReg |= FPM_EV_MASK_ECC_ERR_HALT; + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmem, tmpReg); + + /* clear all fmCtls event registers */ + for(i=0;ip_FmFpmRegs->fpmcev[i], 0xFFFFFFFF); + + /* RAM ECC - enable and clear events*/ + /* first we need to clear all parser memory, as it is uninitialized and + may cause ECC errors */ + tmpReg = 0; + /* event bits */ + tmpReg = (FPM_RAM_CTL_MURAM_ECC | FPM_RAM_CTL_IRAM_ECC); + /* Rams enable is not effected by the RCR bit, but by a COP configuration */ + if(p_Fm->p_FmDriverParam->externalEccRamsEnable) + tmpReg |= FPM_RAM_CTL_RAMS_ECC_EN_SRC_SEL; + + /* enable test mode */ + if(p_FmDriverParam->enMuramTestMode) + tmpReg |= FPM_RAM_CTL_MURAM_TEST_ECC; + if(p_FmDriverParam->enIramTestMode) + tmpReg |= FPM_RAM_CTL_IRAM_TEST_ECC; + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrcr, tmpReg); + + tmpReg = 0; + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_IRAM_ECC) + { + tmpReg |= FPM_IRAM_ECC_ERR_EX_EN; + FmEnableRamsEcc(p_Fm); + } + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_NURAM_ECC) + { + tmpReg |= FPM_MURAM_ECC_ERR_EX_EN; + FmEnableRamsEcc(p_Fm); + } + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrie, tmpReg); + + /**********************/ + /* Init BMI Registers */ + /**********************/ + + /* define common resources */ + /* allocate MURAM for FIFO according to total size */ + p_Fm->fifoBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(p_Fm->h_FmMuram, + p_Fm->p_FmStateStruct->totalFifoSize, + BMI_FIFO_ALIGN)); + if (!p_Fm->fifoBaseAddr) + { + FreeInitResources(p_Fm); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM alloc for FIFO failed")); + } + + tmpReg = (uint32_t)(XX_VirtToPhys(UINT_TO_PTR(p_Fm->fifoBaseAddr)) - p_Fm->fmMuramPhysBaseAddr); + tmpReg = tmpReg / BMI_FIFO_ALIGN; + + tmpReg |= ((p_Fm->p_FmStateStruct->totalFifoSize/BMI_FIFO_UNITS - 1) << BMI_CFG1_FIFO_SIZE_SHIFT); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_cfg1, tmpReg); + + tmpReg = ((uint32_t)(p_Fm->p_FmStateStruct->totalNumOfTasks - 1) << BMI_CFG2_TASKS_SHIFT ); + /* num of DMA's will be dynamically updated when each port is set */ + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_cfg2, tmpReg); + + /* define unmaskable exceptions, enable and clear events */ + tmpReg = 0; + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ievr, (BMI_ERR_INTR_EN_LIST_RAM_ECC | + BMI_ERR_INTR_EN_PIPELINE_ECC | + BMI_ERR_INTR_EN_STATISTICS_RAM_ECC | + BMI_ERR_INTR_EN_DISPATCH_RAM_ECC)); + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_LIST_RAM_ECC) + tmpReg |= BMI_ERR_INTR_EN_LIST_RAM_ECC; + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_PIPELINE_ECC) + tmpReg |= BMI_ERR_INTR_EN_PIPELINE_ECC; + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_STATISTICS_RAM_ECC) + tmpReg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_DISPATCH_RAM_ECC) + tmpReg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier, tmpReg); + + /**********************/ + /* Init QMI Registers */ + /**********************/ + /* Clear error interrupt events */ + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_eie, (QMI_ERR_INTR_EN_DOUBLE_ECC | QMI_ERR_INTR_EN_DEQ_FROM_DEF)); + tmpReg = 0; + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID) + tmpReg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_DOUBLE_ECC) + tmpReg |= QMI_ERR_INTR_EN_DOUBLE_ECC; + /* enable events */ + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_eien, tmpReg); + + if(p_Fm->p_FmDriverParam->tnumAgingPeriod) + { + /* tnumAgingPeriod is in units of microseconds, p_FmClockFreq is in Mhz */ + periodInFmClocks = (uint16_t)(p_Fm->p_FmDriverParam->tnumAgingPeriod*p_Fm->p_FmStateStruct->fmClkFreq); + /* periodInFmClocks must be a 64 multiply */ + remainder = (uint8_t)(periodInFmClocks % 64); + if (remainder > 64) + tmpReg = (uint32_t)((periodInFmClocks/64) + 1); + else + { + tmpReg = (uint32_t)(periodInFmClocks/64); + if(!tmpReg) + tmpReg = 1; + } + tmpReg <<= QMI_TAPC_TAP; + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_tapc, tmpReg); + + } + tmpReg = 0; + /* Clear interrupt events */ + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_ie, QMI_INTR_EN_SINGLE_ECC); + if(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_SINGLE_ECC) + tmpReg |= QMI_INTR_EN_SINGLE_ECC; + /* enable events */ + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_ien, tmpReg); + + /* clear & enable global counters - calculate reg and save for later, + because it's the same reg for QMI enable */ + if(p_Fm->p_FmDriverParam->enCounters) + cfgReg = QMI_CFG_EN_COUNTERS; +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + cfgReg |= (uint32_t)(((QMI_DEF_TNUMS_THRESH) << 8) | (uint32_t)QMI_DEF_TNUMS_THRESH); +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + + if (p_Fm->p_FmStateStruct->irq != NO_IRQ) + { + XX_SetIntr(p_Fm->p_FmStateStruct->irq, FM_EventIsr, p_Fm); + XX_EnableIntr(p_Fm->p_FmStateStruct->irq); + } + + if (p_Fm->p_FmStateStruct->errIrq != NO_IRQ) + { + XX_SetIntr(p_Fm->p_FmStateStruct->errIrq, ErrorIsrCB, p_Fm); + XX_EnableIntr(p_Fm->p_FmStateStruct->errIrq); + } + + /* build the FM master partition IPC address */ + if (Sprint (p_Fm->fmModuleName, "FM_%d_%d",p_Fm->p_FmStateStruct->fmId, NCSW_MASTER_ID) != 6) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Sprint failed")); + + err = XX_IpcRegisterMsgHandler(p_Fm->fmModuleName, FmHandleIpcMsgCB, p_Fm, FM_IPC_MAX_REPLY_SIZE); + if(err) + RETURN_ERROR(MAJOR, err, NO_MSG); + + /**********************/ + /* Enable all modules */ + /**********************/ + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_init, BMI_INIT_START); + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc, cfgReg | QMI_CFG_ENQ_EN | QMI_CFG_DEQ_EN); + + if (p_Fm->p_FmDriverParam->firmware.p_Code) + { + XX_Free(p_Fm->p_FmDriverParam->firmware.p_Code); + p_Fm->p_FmDriverParam->firmware.p_Code = NULL; + } + + XX_Free(p_Fm->p_FmDriverParam); + p_Fm->p_FmDriverParam = NULL; + + return E_OK; +} + +/**************************************************************************//** + @Function FM_Free + + @Description Frees all resources that were assigned to FM module. + + Calling this routine invalidates the descriptor. + + @Param[in] h_Fm - FM module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_Free(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + + if (p_Fm->guestId != NCSW_MASTER_ID) + { + XX_IpcUnregisterMsgHandler(p_Fm->fmModuleName); + + if(!p_Fm->recoveryMode) + XX_Free(p_Fm->p_FmStateStruct); + + XX_Free(p_Fm); + + return E_OK; + } + + /* disable BMI and QMI */ + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_init, 0); + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc, 0); + + /* release BMI resources */ + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_cfg2, 0); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_cfg1, 0); + + /* disable ECC */ + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrcr, 0); + + if ((p_Fm->guestId == NCSW_MASTER_ID) && (p_Fm->fmModuleName[0] != 0)) + XX_IpcUnregisterMsgHandler(p_Fm->fmModuleName); + + if (p_Fm->p_FmStateStruct) + { + if (p_Fm->p_FmStateStruct->irq != NO_IRQ) + { + XX_DisableIntr(p_Fm->p_FmStateStruct->irq); + XX_FreeIntr(p_Fm->p_FmStateStruct->irq); + } + if (p_Fm->p_FmStateStruct->errIrq != NO_IRQ) + { + XX_DisableIntr(p_Fm->p_FmStateStruct->errIrq); + XX_FreeIntr(p_Fm->p_FmStateStruct->errIrq); + } + } + + if (p_Fm->h_Spinlock) + XX_FreeSpinlock(p_Fm->h_Spinlock); + + if (p_Fm->p_FmDriverParam) + { + if (p_Fm->p_FmDriverParam->firmware.p_Code) + XX_Free(p_Fm->p_FmDriverParam->firmware.p_Code); + XX_Free(p_Fm->p_FmDriverParam); + p_Fm->p_FmDriverParam = NULL; + } + + FreeInitResources(p_Fm); + + if (!p_Fm->recoveryMode && p_Fm->p_FmStateStruct) + XX_Free(p_Fm->p_FmStateStruct); + + XX_Free(p_Fm); + + return E_OK; +} + +/*************************************************/ +/* API Advanced Init unit functions */ +/*************************************************/ + +t_Error FM_ConfigResetOnInit(t_Handle h_Fm, bool enable) +{ + + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->resetOnInit = enable; + + return E_OK; +} + + +t_Error FM_ConfigTotalNumOfTasks(t_Handle h_Fm, uint8_t totalNumOfTasks) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmStateStruct->totalNumOfTasks = totalNumOfTasks; + + return E_OK; +} + +t_Error FM_ConfigTotalFifoSize(t_Handle h_Fm, uint32_t totalFifoSize) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmStateStruct->totalFifoSize = totalFifoSize; + + return E_OK; +} + +t_Error FM_ConfigMaxNumOfOpenDmas(t_Handle h_Fm, uint8_t maxNumOfOpenDmas) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmStateStruct->maxNumOfOpenDmas = maxNumOfOpenDmas; + + return E_OK; +} + +t_Error FM_ConfigThresholds(t_Handle h_Fm, t_FmThresholds *p_FmThresholds) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + memcpy(&p_Fm->p_FmDriverParam->thresholds, p_FmThresholds, sizeof(t_FmThresholds)); + + return E_OK; +} + +t_Error FM_ConfigDmaCacheOverride(t_Handle h_Fm, e_FmDmaCacheOverride cacheOverride) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaCacheOverride = cacheOverride; + + return E_OK; +} + +t_Error FM_ConfigDmaAidOverride(t_Handle h_Fm, bool aidOverride) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaAidOverride = aidOverride; + + return E_OK; +} + +t_Error FM_ConfigDmaAidMode(t_Handle h_Fm, e_FmDmaAidMode aidMode) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaAidMode = aidMode; + + return E_OK; +} + +t_Error FM_ConfigDmaAxiDbgNumOfBeats(t_Handle h_Fm, uint8_t axiDbgNumOfBeats) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaAxiDbgNumOfBeats = axiDbgNumOfBeats; + + return E_OK; +} + +t_Error FM_ConfigDmaCamNumOfEntries(t_Handle h_Fm, uint8_t numOfEntries) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaCamNumOfEntries = numOfEntries; + + return E_OK; +} + +t_Error FM_ConfigDmaWatchdog(t_Handle h_Fm, uint32_t watchdogValue) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + +#ifdef FM_NO_WATCHDOG + { + t_FmRevisionInfo revInfo; + FM_GetRevision(h_Fm, &revInfo); + if (revInfo.majorRev != 4) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("watchdog!")); + } +#endif /* FM_NO_WATCHDOG */ + + p_Fm->p_FmDriverParam->dmaWatchdog = watchdogValue; + + return E_OK; +} + +t_Error FM_ConfigDmaWriteBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds) + +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + memcpy(&p_Fm->p_FmDriverParam->dmaWriteBufThresholds, p_FmDmaThresholds, sizeof(t_FmDmaThresholds)); + + return E_OK; +} + +t_Error FM_ConfigDmaCommQThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + memcpy(&p_Fm->p_FmDriverParam->dmaCommQThresholds, p_FmDmaThresholds, sizeof(t_FmDmaThresholds)); + + return E_OK; +} + +t_Error FM_ConfigDmaReadBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + memcpy(&p_Fm->p_FmDriverParam->dmaReadBufThresholds, p_FmDmaThresholds, sizeof(t_FmDmaThresholds)); + + return E_OK; +} + +t_Error FM_ConfigDmaEmergency(t_Handle h_Fm, t_FmDmaEmergency *p_Emergency) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaEnEmergency = TRUE; + memcpy(&p_Fm->p_FmDriverParam->dmaEmergency, p_Emergency, sizeof(t_FmDmaEmergency)); + + return E_OK; +} + +t_Error FM_ConfigDmaEmergencySmoother(t_Handle h_Fm, uint32_t emergencyCnt) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + if(!p_Fm->p_FmDriverParam->dmaEnEmergency) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FM_ConfigEnDmaEmergencySmoother may be called only after FM_ConfigEnDmaEmergency")); + + p_Fm->p_FmDriverParam->dmaEnEmergencySmoother = TRUE; + p_Fm->p_FmDriverParam->dmaEmergencySwitchCounter = emergencyCnt; + + return E_OK; +} + +t_Error FM_ConfigDmaDbgCounter(t_Handle h_Fm, e_FmDmaDbgCntMode fmDmaDbgCntMode) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaDbgCntMode = fmDmaDbgCntMode; + + return E_OK; +} + +t_Error FM_ConfigDmaStopOnBusErr(t_Handle h_Fm, bool stop) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaStopOnBusError = stop; + + return E_OK; +} + +t_Error FM_ConfigDmaSosEmergencyThreshold(t_Handle h_Fm, uint32_t dmaSosEmergency) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaSosEmergency = dmaSosEmergency; + + return E_OK; +} + +t_Error FM_ConfigEnableCounters(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->enCounters = TRUE; + + return E_OK; +} + +t_Error FM_ConfigDmaErr(t_Handle h_Fm, e_FmDmaErr dmaErr) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->dmaErr = dmaErr; + + return E_OK; +} + +t_Error FM_ConfigCatastrophicErr(t_Handle h_Fm, e_FmCatastrophicErr catastrophicErr) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->catastrophicErr = catastrophicErr; + + return E_OK; +} + +t_Error FM_ConfigEnableMuramTestMode(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->enMuramTestMode = TRUE; + + return E_OK; +} + +t_Error FM_ConfigEnableIramTestMode(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->enIramTestMode = TRUE; + + return E_OK; +} + +t_Error FM_ConfigHaltOnExternalActivation(t_Handle h_Fm, bool enable) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + +#ifdef FM_HALT_SIG_ERRATA_GEN12 + { + t_FmRevisionInfo revInfo; + FM_GetRevision(h_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("HaltOnExternalActivation!")); + } +#endif /* FM_HALT_SIG_ERRATA_GEN12 */ + + p_Fm->p_FmDriverParam->haltOnExternalActivation = enable; + + return E_OK; +} + +t_Error FM_ConfigHaltOnUnrecoverableEccError(t_Handle h_Fm, bool enable) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + +#ifdef FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008 + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("HaltOnEccError!")); +#endif /* FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008 */ + + p_Fm->p_FmDriverParam->haltOnUnrecoverableEccError = enable; + + return E_OK; +} + +t_Error FM_ConfigException(t_Handle h_Fm, e_FmExceptions exception, bool enable) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t bitMask = 0; + t_FmRevisionInfo revInfo; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + + FM_GetRevision(p_Fm, &revInfo); +#ifdef FM_BMI_PIPELINE_ERR_IRQ_ERRATA_FMAN9 + if((exception == e_FM_EX_BMI_PIPELINE_ECC) && (enable)) + { + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_EX_BMI_PIPELINE_ECC!")); + return E_OK; + } + } +#endif /* FM_BMI_PIPELINE_ERR_IRQ_ERRATA_FMAN9 */ +#ifdef FM_RAM_LIST_ERR_IRQ_ERRATA_FMAN8 + if((exception == e_FM_EX_BMI_LIST_RAM_ECC) && (enable)) + { + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_EX_BMI_LIST_RAM_ECC!")); + return E_OK; + } + } +#endif /* FM_RAM_LIST_ERR_IRQ_ERRATA_FMAN8 */ +#ifdef FM_QMI_NO_ECC_EXCEPTIONS + if(((exception == e_FM_EX_QMI_SINGLE_ECC) || (exception == e_FM_EX_QMI_DOUBLE_ECC)) && + enable) + { + if (revInfo.majorRev == 4) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("QMI ECC exception!")); + return E_OK; + } + } +#endif /* FM_QMI_NO_ECC_EXCEPTIONS */ +#ifdef FM_NO_DISPATCH_RAM_ECC + if((exception == e_FM_EX_BMI_DISPATCH_RAM_ECC) && (enable)) + { + if (revInfo.majorRev != 4) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_EX_BMI_DISPATCH_RAM_ECC!")); + return E_OK; + } + } +#endif /* FM_NO_DISPATCH_RAM_ECC */ + + GET_EXCEPTION_FLAG(bitMask, exception); + if(bitMask) + { + if (enable) + p_Fm->p_FmStateStruct->exceptions |= bitMask; + else + p_Fm->p_FmStateStruct->exceptions &= ~bitMask; + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + return E_OK; +} + +t_Error FM_ConfigExternalEccRamsEnable(t_Handle h_Fm, bool enable) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + + p_Fm->p_FmDriverParam->externalEccRamsEnable = enable; + + return E_OK; +} + +t_Error FM_ConfigTnumAgingPeriod(t_Handle h_Fm, uint16_t tnumAgingPeriod) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; +#ifdef FM_NO_TNUM_AGING + t_FmRevisionInfo revInfo; +#endif /* FM_NO_TNUM_AGING */ + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + +#ifdef FM_NO_TNUM_AGING + FM_GetRevision(h_Fm, &revInfo); + if (revInfo.majorRev != 4) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("FM_ConfigTnumAgingPeriod!")); +#endif /* FM_NO_TNUM_AGING */ + + p_Fm->p_FmDriverParam->tnumAgingPeriod = tnumAgingPeriod; + + return E_OK; + +} + +/****************************************************/ +/* API Run-time Control uint functions */ +/****************************************************/ +t_Handle FM_GetPcdHandle(t_Handle h_Fm) +{ + SANITY_CHECK_RETURN_VALUE(h_Fm, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(!((t_Fm*)h_Fm)->p_FmDriverParam, E_INVALID_STATE, NULL); + + return ((t_Fm*)h_Fm)->h_Pcd; +} + +void FM_EventIsr(t_Handle h_Fm) +{ +#define FM_M_CALL_1G_MAC_TMR_ISR(_id) \ + { \ + if (p_Fm->guestId != p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0_TMR+_id)].guestId) \ + SendIpcIsr(p_Fm, (e_FmInterModuleEvent)(e_FM_EV_1G_MAC0_TMR+_id), pending); \ + else \ + p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0_TMR+_id)].f_Isr(p_Fm->intrMng[(e_FmInterModuleEvent)(e_FM_EV_1G_MAC0_TMR+_id)].h_SrcHandle);\ + } + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t pending, event; + + SANITY_CHECK_RETURN(h_Fm, E_INVALID_HANDLE); + + /* normal interrupts */ + pending = GET_UINT32(p_Fm->p_FmFpmRegs->fmnpi); + ASSERT_COND(pending); + if (pending & INTR_EN_BMI) + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("BMI Event - undefined!")); + if (pending & INTR_EN_QMI) + QmiEvent(p_Fm); + if (pending & INTR_EN_PRS) + p_Fm->intrMng[e_FM_EV_PRS].f_Isr(p_Fm->intrMng[e_FM_EV_PRS].h_SrcHandle); + if (pending & INTR_EN_PLCR) + p_Fm->intrMng[e_FM_EV_PLCR].f_Isr(p_Fm->intrMng[e_FM_EV_PLCR].h_SrcHandle); + if (pending & INTR_EN_KG) + p_Fm->intrMng[e_FM_EV_KG].f_Isr(p_Fm->intrMng[e_FM_EV_KG].h_SrcHandle); + if (pending & INTR_EN_TMR) + p_Fm->intrMng[e_FM_EV_TMR].f_Isr(p_Fm->intrMng[e_FM_EV_TMR].h_SrcHandle); + + /* MAC events may belong to different partitions */ + if (pending & INTR_EN_1G_MAC0_TMR) + FM_M_CALL_1G_MAC_TMR_ISR(0); + if (pending & INTR_EN_1G_MAC1_TMR) + FM_M_CALL_1G_MAC_TMR_ISR(1); + if (pending & INTR_EN_1G_MAC2_TMR) + FM_M_CALL_1G_MAC_TMR_ISR(2); + if (pending & INTR_EN_1G_MAC3_TMR) + FM_M_CALL_1G_MAC_TMR_ISR(3); + if (pending & INTR_EN_1G_MAC4_TMR) + FM_M_CALL_1G_MAC_TMR_ISR(4); + + /* IM port events may belong to different partitions */ + if (pending & INTR_EN_REV0) + { + event = GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcev[0]) & GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcee[0]); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmcev[0], event); + if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_0].guestId) + /*TODO IPC ISR For Fman Ctrl */ + ASSERT_COND(0); + /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_0, pending); */ + else + p_Fm->fmanCtrlIntr[0].f_Isr(p_Fm->fmanCtrlIntr[0].h_SrcHandle, event); + + } + if (pending & INTR_EN_REV1) + { + event = GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcev[1]) & GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcee[1]); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmcev[1], event); + if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_1].guestId) + /*TODO IPC ISR For Fman Ctrl */ + ASSERT_COND(0); + /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_1, pending); */ + else + p_Fm->fmanCtrlIntr[1].f_Isr(p_Fm->fmanCtrlIntr[1].h_SrcHandle, event); + + } + if (pending & INTR_EN_REV2) + { + event = GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcev[2]) & GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcee[2]); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmcev[2], event); + if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_2].guestId) + /*TODO IPC ISR For Fman Ctrl */ + ASSERT_COND(0); + /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_2, pending); */ + else + p_Fm->fmanCtrlIntr[2].f_Isr(p_Fm->fmanCtrlIntr[2].h_SrcHandle, event); + } + if (pending & INTR_EN_REV3) + { + event = GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcev[3]) & GET_UINT32(p_Fm->p_FmFpmRegs->fmfpfcee[3]); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmcev[3], event); + if (p_Fm->guestId != p_Fm->intrMng[e_FM_EV_FMAN_CTRL_3].guestId) + /*TODO IPC ISR For Fman Ctrl */ + ASSERT_COND(0); + /* SendIpcIsr(p_Fm, e_FM_EV_FMAN_CTRL_2, pendin3); */ + else + p_Fm->fmanCtrlIntr[3].f_Isr(p_Fm->fmanCtrlIntr[3].h_SrcHandle, event); + } +} + +t_Error FM_ErrorIsr(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(h_Fm, E_INVALID_HANDLE); + + /* error interrupts */ + if (GET_UINT32(p_Fm->p_FmFpmRegs->fmepi) == 0) + return ERROR_CODE(E_EMPTY); + + ErrorIsrCB(p_Fm); + return E_OK; +} + +t_Error FM_SetPortsBandwidth(t_Handle h_Fm, t_FmPortsBandwidthParams *p_PortsBandwidth) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + int i; + uint8_t sum; + uint8_t hardwarePortId; + uint32_t tmpRegs[8] = {0,0,0,0,0,0,0,0}; + uint8_t relativePortId, shift, weight, maxPercent = 0; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + /* check that all ports add up to 100% */ + sum = 0; + for (i=0;inumOfPorts;i++) + sum +=p_PortsBandwidth->portsBandwidths[i].bandwidth; + if (sum != 100) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Sum of ports bandwidth differ from 100%")); + + /* find highest precent */ + for (i=0;inumOfPorts;i++) + { + if (p_PortsBandwidth->portsBandwidths[i].bandwidth > maxPercent) + maxPercent = p_PortsBandwidth->portsBandwidths[i].bandwidth; + } + + /* calculate weight for each port */ + for (i=0;inumOfPorts;i++) + { + weight = (uint8_t)((p_PortsBandwidth->portsBandwidths[i].bandwidth * PORT_MAX_WEIGHT )/maxPercent); + /* we want even division between 1-to-PORT_MAX_WEIGHT. so if exect division + is not reached, we round up so that: + 0 until maxPercent/PORT_MAX_WEIGHT get "1" + maxPercent/PORT_MAX_WEIGHT+1 until (maxPercent/PORT_MAX_WEIGHT)*2 get "2" + ... + maxPercent - maxPercent/PORT_MAX_WEIGHT until maxPercent get "PORT_MAX_WEIGHT: */ + if ((uint8_t)((p_PortsBandwidth->portsBandwidths[i].bandwidth * PORT_MAX_WEIGHT ) % maxPercent)) + weight++; + + /* find the location of this port within the register */ + SW_PORT_ID_TO_HW_PORT_ID(hardwarePortId, + p_PortsBandwidth->portsBandwidths[i].type, + p_PortsBandwidth->portsBandwidths[i].relativePortId); + relativePortId = (uint8_t)(hardwarePortId % 8); + shift = (uint8_t)(32-4*(relativePortId+1)); + + + if(weight > 1) + /* Add this port to tmpReg */ + /* (each 8 ports result in one register)*/ + tmpRegs[hardwarePortId/8] |= ((weight-1) << shift); + } + + for(i=0;i<8;i++) + if(tmpRegs[i]) + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_arb[i], tmpRegs[i]); + + return E_OK; +} + +t_Error FM_EnableRamsEcc(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + t_FmIpcMsg msg; + t_Error err; + + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_ENABLE_RAM_ECC; + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + NULL, + NULL, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + return E_OK; + } + + if(!p_Fm->p_FmStateStruct->internalCall) + p_Fm->p_FmStateStruct->explicitEnable = TRUE; + p_Fm->p_FmStateStruct->internalCall = FALSE; + + if(p_Fm->p_FmStateStruct->ramsEccEnable) + return E_OK; + else + { + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fmrcr); + if(tmpReg & FPM_RAM_CTL_RAMS_ECC_EN_SRC_SEL) + { + DBG(WARNING, ("Rams ECC is configured to be controlled through JTAG")); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrcr, tmpReg | FPM_RAM_CTL_IRAM_ECC_EN); + } + else + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrcr, tmpReg | (FPM_RAM_CTL_RAMS_ECC_EN | FPM_RAM_CTL_IRAM_ECC_EN)); + p_Fm->p_FmStateStruct->ramsEccEnable = TRUE; + } + + return E_OK; +} + +t_Error FM_DisableRamsEcc(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + bool explicitDisable = FALSE; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_HANDLE); + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + t_Error err; + t_FmIpcMsg msg; + + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_DISABLE_RAM_ECC; + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + NULL, + NULL, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + return E_OK; + } + + if(!p_Fm->p_FmStateStruct->internalCall) + explicitDisable = TRUE; + p_Fm->p_FmStateStruct->internalCall = FALSE; + + /* if rams are already disabled, or if rams were explicitly enabled and are + currently called indirectly (not explicitly), ignore this call. */ + if(!p_Fm->p_FmStateStruct->ramsEccEnable || (p_Fm->p_FmStateStruct->explicitEnable && !explicitDisable)) + return E_OK; + else + { + if(p_Fm->p_FmStateStruct->explicitEnable) + /* This is the case were both explicit are TRUE. + Turn off this flag for cases were following ramsEnable + routines are called */ + p_Fm->p_FmStateStruct->explicitEnable = FALSE; + + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fmrcr); + if(tmpReg & FPM_RAM_CTL_RAMS_ECC_EN_SRC_SEL) + { + DBG(WARNING, ("Rams ECC is configured to be controlled through JTAG")); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrcr, tmpReg & ~FPM_RAM_CTL_IRAM_ECC_EN); + } + else + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrcr, tmpReg & ~(FPM_RAM_CTL_RAMS_ECC_EN | FPM_RAM_CTL_IRAM_ECC_EN)); + p_Fm->p_FmStateStruct->ramsEccEnable = FALSE; + } + + return E_OK; +} + +t_Error FM_SetException(t_Handle h_Fm, e_FmExceptions exception, bool enable) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t bitMask = 0; + uint32_t tmpReg; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + GET_EXCEPTION_FLAG(bitMask, exception); + if(bitMask) + { + if (enable) + p_Fm->p_FmStateStruct->exceptions |= bitMask; + else + p_Fm->p_FmStateStruct->exceptions &= ~bitMask; + + switch(exception) + { + case(e_FM_EX_DMA_BUS_ERROR): + tmpReg = GET_UINT32(p_Fm->p_FmDmaRegs->fmdmmr); + if(enable) + tmpReg |= DMA_MODE_BER; + else + tmpReg &= ~DMA_MODE_BER; + /* disable bus error */ + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmmr, tmpReg); + break; + case(e_FM_EX_DMA_READ_ECC): + case(e_FM_EX_DMA_SYSTEM_WRITE_ECC): + case(e_FM_EX_DMA_FM_WRITE_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmDmaRegs->fmdmmr); + if(enable) + tmpReg |= DMA_MODE_ECC; + else + tmpReg &= ~DMA_MODE_ECC; + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmmr, tmpReg); + break; + case(e_FM_EX_FPM_STALL_ON_TASKS): + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fpmem); + if(enable) + tmpReg |= FPM_EV_MASK_STALL_EN; + else + tmpReg &= ~FPM_EV_MASK_STALL_EN; + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmem, tmpReg); + break; + case(e_FM_EX_FPM_SINGLE_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fpmem); + if(enable) + tmpReg |= FPM_EV_MASK_SINGLE_ECC_EN; + else + tmpReg &= ~FPM_EV_MASK_SINGLE_ECC_EN; + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmem, tmpReg); + break; + case( e_FM_EX_FPM_DOUBLE_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fpmem); + if(enable) + tmpReg |= FPM_EV_MASK_DOUBLE_ECC_EN; + else + tmpReg &= ~FPM_EV_MASK_DOUBLE_ECC_EN; + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmem, tmpReg); + break; + case( e_FM_EX_QMI_SINGLE_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_ien); + if(enable) + { +#ifdef FM_QMI_NO_ECC_EXCEPTIONS + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Fm, &revInfo); + if (revInfo.majorRev == 4) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_EX_QMI_SINGLE_ECC")); + return E_OK; + } +#endif /* FM_QMI_NO_ECC_EXCEPTIONS */ + tmpReg |= QMI_INTR_EN_SINGLE_ECC; + } + else + tmpReg &= ~QMI_INTR_EN_SINGLE_ECC; + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_ien, tmpReg); + break; + case(e_FM_EX_QMI_DOUBLE_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_eien); + if(enable) + { +#ifdef FM_QMI_NO_ECC_EXCEPTIONS + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Fm, &revInfo); + if (revInfo.majorRev == 4) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_EX_QMI_DOUBLE_ECC")); + return E_OK; + } +#endif /* FM_QMI_NO_ECC_EXCEPTIONS */ + tmpReg |= QMI_ERR_INTR_EN_DOUBLE_ECC; + } + else + tmpReg &= ~QMI_ERR_INTR_EN_DOUBLE_ECC; + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_eien, tmpReg); + break; + case(e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID): + tmpReg = GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_eien); + if(enable) + tmpReg |= QMI_ERR_INTR_EN_DEQ_FROM_DEF; + else + tmpReg &= ~QMI_ERR_INTR_EN_DEQ_FROM_DEF; + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_eien, tmpReg); + break; + case(e_FM_EX_BMI_LIST_RAM_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier); + if(enable) + { +#ifdef FM_RAM_LIST_ERR_IRQ_ERRATA_FMAN8 + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_EX_BMI_LIST_RAM_ECC")); + return E_OK; + } +#endif /* FM_RAM_LIST_ERR_IRQ_ERRATA_FMAN8 */ + tmpReg |= BMI_ERR_INTR_EN_LIST_RAM_ECC; + } + else + tmpReg &= ~BMI_ERR_INTR_EN_LIST_RAM_ECC; + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier, tmpReg); + break; + case(e_FM_EX_BMI_PIPELINE_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier); + if(enable) + { +#ifdef FM_BMI_PIPELINE_ERR_IRQ_ERRATA_FMAN9 + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Fm, &revInfo); + if ((revInfo.majorRev == 1) && (revInfo.minorRev == 0)) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_EX_BMI_PIPELINE_ECCBMI_LIST_RAM_ECC")); + return E_OK; + } +#endif /* FM_BMI_PIPELINE_ERR_IRQ_ERRATA_FMAN9 */ + tmpReg |= BMI_ERR_INTR_EN_PIPELINE_ECC; + } + else + tmpReg &= ~BMI_ERR_INTR_EN_PIPELINE_ECC; + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier, tmpReg); + break; + case(e_FM_EX_BMI_STATISTICS_RAM_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier); + if(enable) + tmpReg |= BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + else + tmpReg &= ~BMI_ERR_INTR_EN_STATISTICS_RAM_ECC; + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier, tmpReg); + break; + case(e_FM_EX_BMI_DISPATCH_RAM_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier); + if(enable) + { +#ifdef FM_NO_DISPATCH_RAM_ECC + t_FmRevisionInfo revInfo; + FM_GetRevision(p_Fm, &revInfo); + if (revInfo.majorRev != 4) + { + REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("e_FM_EX_BMI_DISPATCH_RAM_ECC")); + return E_OK; + } +#endif /* FM_NO_DISPATCH_RAM_ECC */ + tmpReg |= BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + } + else + tmpReg &= ~BMI_ERR_INTR_EN_DISPATCH_RAM_ECC; + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ier, tmpReg); + break; + case(e_FM_EX_IRAM_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fmrie); + if(enable) + { + /* enable ECC if not enabled */ + FmEnableRamsEcc(p_Fm); + /* enable ECC interrupts */ + tmpReg |= FPM_IRAM_ECC_ERR_EX_EN; + } + else + { + /* ECC mechanism may be disabled, depending on driver status */ + FmDisableRamsEcc(p_Fm); + tmpReg &= ~FPM_IRAM_ECC_ERR_EX_EN; + } + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrie, tmpReg); + break; + + case(e_FM_EX_MURAM_ECC): + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fmrie); + if(enable) + { + /* enable ECC if not enabled */ + FmEnableRamsEcc(p_Fm); + /* enable ECC interrupts */ + tmpReg |= FPM_MURAM_ECC_ERR_EX_EN; + } + else + { + /* ECC mechanism may be disabled, depending on driver status */ + FmDisableRamsEcc(p_Fm); + tmpReg &= ~FPM_MURAM_ECC_ERR_EX_EN; + } + + WRITE_UINT32(p_Fm->p_FmFpmRegs->fmrie, tmpReg); + break; + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, NO_MSG); + } + } + else + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Undefined exception")); + + return E_OK; +} + +t_Error FM_GetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + t_FmIpcRevisionInfo ipcRevInfo; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + + if (p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_GET_REV; + replyLength = sizeof(uint32_t) + sizeof(t_FmRevisionInfo); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + if (replyLength != (sizeof(uint32_t) + sizeof(t_FmRevisionInfo))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + memcpy((uint8_t*)&ipcRevInfo, reply.replyBody, sizeof(t_FmRevisionInfo)); + p_FmRevisionInfo->majorRev = ipcRevInfo.majorRev; + p_FmRevisionInfo->minorRev = ipcRevInfo.minorRev; + return (t_Error)(reply.error); + } + + /* read revision register 1 */ + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fm_ip_rev_1); + p_FmRevisionInfo->majorRev = (uint8_t)((tmpReg & FPM_REV1_MAJOR_MASK) >> FPM_REV1_MAJOR_SHIFT); + p_FmRevisionInfo->minorRev = (uint8_t)((tmpReg & FPM_REV1_MINOR_MASK) >> FPM_REV1_MINOR_SHIFT); + + return E_OK; +} + +uint32_t FM_GetCounter(t_Handle h_Fm, e_FmCounters counter) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + t_Error err; + uint32_t counterValue; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength, outCounter; + + SANITY_CHECK_RETURN_VALUE(p_Fm, E_INVALID_HANDLE, 0); + SANITY_CHECK_RETURN_VALUE(!p_Fm->p_FmDriverParam, E_INVALID_STATE, 0); + + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_GET_COUNTER; + memcpy(msg.msgBody, (uint8_t *)&counter, sizeof(uint32_t)); + replyLength = sizeof(uint32_t) + sizeof(uint32_t); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId) +sizeof(counterValue), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + if(replyLength != (sizeof(uint32_t) + sizeof(uint32_t))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + memcpy((uint8_t*)&outCounter, reply.replyBody, sizeof(uint32_t)); + + return outCounter; + } + + switch(counter) + { + case(e_FM_COUNTERS_ENQ_TOTAL_FRAME): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_etfc); + case(e_FM_COUNTERS_DEQ_TOTAL_FRAME): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dtfc); + case(e_FM_COUNTERS_DEQ_0): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dc0); + case(e_FM_COUNTERS_DEQ_1): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dc1); + case(e_FM_COUNTERS_DEQ_2): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dc2); + case(e_FM_COUNTERS_DEQ_3): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dc3); + case(e_FM_COUNTERS_DEQ_FROM_DEFAULT): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dfdc); + case(e_FM_COUNTERS_DEQ_FROM_CONTEXT): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dfcc); + case(e_FM_COUNTERS_DEQ_FROM_FD): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dffc); + case(e_FM_COUNTERS_DEQ_CONFIRM): + return GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_dcc); + case(e_FM_COUNTERS_SEMAPHOR_ENTRY_FULL_REJECT): + return GET_UINT32(p_Fm->p_FmDmaRegs->fmdmsefrc); + case(e_FM_COUNTERS_SEMAPHOR_QUEUE_FULL_REJECT): + return GET_UINT32(p_Fm->p_FmDmaRegs->fmdmsqfrc); + case(e_FM_COUNTERS_SEMAPHOR_SYNC_REJECT): + return GET_UINT32(p_Fm->p_FmDmaRegs->fmdmssrc); + default: + break; + } + /* should never get here */ + ASSERT_COND(FALSE); + + return 0; +} + +t_Error FM_ModifyCounter(t_Handle h_Fm, e_FmCounters counter, uint32_t val) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + /* When applicable (when there is an 'enable counters' bit, + check that counters are enabled */ + switch(counter) + { + case(e_FM_COUNTERS_ENQ_TOTAL_FRAME): + case(e_FM_COUNTERS_DEQ_TOTAL_FRAME): + case(e_FM_COUNTERS_DEQ_0): + case(e_FM_COUNTERS_DEQ_1): + case(e_FM_COUNTERS_DEQ_2): + case(e_FM_COUNTERS_DEQ_3): + case(e_FM_COUNTERS_DEQ_FROM_DEFAULT): + case(e_FM_COUNTERS_DEQ_FROM_CONTEXT): + case(e_FM_COUNTERS_DEQ_FROM_FD): + case(e_FM_COUNTERS_DEQ_CONFIRM): + if(!(GET_UINT32(p_Fm->p_FmQmiRegs->fmqm_gc) & QMI_CFG_EN_COUNTERS)) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Requested counter was not enabled")); + break; + default: + break; + } + + /* Set counter */ + switch(counter) + { + case(e_FM_COUNTERS_ENQ_TOTAL_FRAME): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_etfc, val); + break; + case(e_FM_COUNTERS_DEQ_TOTAL_FRAME): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dtfc, val); + break; + case(e_FM_COUNTERS_DEQ_0): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dc0, val); + break; + case(e_FM_COUNTERS_DEQ_1): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dc1, val); + break; + case(e_FM_COUNTERS_DEQ_2): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dc2, val); + break; + case(e_FM_COUNTERS_DEQ_3): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dc3, val); + break; + case(e_FM_COUNTERS_DEQ_FROM_DEFAULT): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dfdc, val); + break; + case(e_FM_COUNTERS_DEQ_FROM_CONTEXT): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dfcc, val); + break; + case(e_FM_COUNTERS_DEQ_FROM_FD): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dffc, val); + break; + case(e_FM_COUNTERS_DEQ_CONFIRM): + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_dcc, val); + break; + case(e_FM_COUNTERS_SEMAPHOR_ENTRY_FULL_REJECT): + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmsefrc, val); + break; + case(e_FM_COUNTERS_SEMAPHOR_QUEUE_FULL_REJECT): + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmsqfrc, val); + break; + case(e_FM_COUNTERS_SEMAPHOR_SYNC_REJECT): + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmssrc, val); + break; + default: + break; + } + + return E_OK; +} + +void FM_SetDmaEmergency(t_Handle h_Fm, e_FmDmaMuramPort muramPort, bool enable) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t bitMask; + + SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + bitMask = (uint32_t)((muramPort==e_FM_DMA_MURAM_PORT_WRITE) ? DMA_MODE_EMERGENCY_WRITE : DMA_MODE_EMERGENCY_READ); + + if(enable) + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmmr, GET_UINT32(p_Fm->p_FmDmaRegs->fmdmmr) | bitMask); + else /* disable */ + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmmr, GET_UINT32(p_Fm->p_FmDmaRegs->fmdmmr) & ~bitMask); + + return; +} + +void FM_SetDmaExtBusPri(t_Handle h_Fm, e_FmDmaExtBusPri pri) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + WRITE_UINT32(p_Fm->p_FmDmaRegs->fmdmmr, GET_UINT32(p_Fm->p_FmDmaRegs->fmdmmr) | ((uint32_t)pri << DMA_MODE_BUS_PRI_SHIFT) ); + + return; +} + +void FM_GetDmaStatus(t_Handle h_Fm, t_FmDmaStatus *p_FmDmaStatus) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + t_Error err; + t_FmIpcMsg msg; + t_FmIpcReply reply; + uint32_t replyLength; + t_FmIpcDmaStatus ipcDmaStatus; + + SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + memset(&reply, 0, sizeof(reply)); + msg.msgId = FM_DMA_STAT; + replyLength = sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus); + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + (uint8_t*)&reply, + &replyLength, + NULL, + NULL)) != E_OK) + { + REPORT_ERROR(MINOR, err, NO_MSG); + return; + } + if (replyLength != (sizeof(uint32_t) + sizeof(t_FmIpcDmaStatus))) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("IPC reply length mismatch")); + return; + } + memcpy((uint8_t*)&ipcDmaStatus, reply.replyBody, sizeof(t_FmIpcDmaStatus)); + + p_FmDmaStatus->cmqNotEmpty = (bool)ipcDmaStatus.boolCmqNotEmpty; /**< Command queue is not empty */ + p_FmDmaStatus->busError = (bool)ipcDmaStatus.boolBusError; /**< Bus error occurred */ + p_FmDmaStatus->readBufEccError = (bool)ipcDmaStatus.boolReadBufEccError; /**< Double ECC error on buffer Read */ + p_FmDmaStatus->writeBufEccSysError =(bool)ipcDmaStatus.boolWriteBufEccSysError; /**< Double ECC error on buffer write from system side */ + p_FmDmaStatus->writeBufEccFmError = (bool)ipcDmaStatus.boolWriteBufEccFmError; /**< Double ECC error on buffer write from FM side */ + return; + } + + tmpReg = GET_UINT32(p_Fm->p_FmDmaRegs->fmdmsr); + + p_FmDmaStatus->cmqNotEmpty = (bool)(tmpReg & DMA_STATUS_CMD_QUEUE_NOT_EMPTY); + p_FmDmaStatus->busError = (bool)(tmpReg & DMA_STATUS_BUS_ERR); + p_FmDmaStatus->readBufEccError = (bool)(tmpReg & DMA_STATUS_READ_ECC); + p_FmDmaStatus->writeBufEccSysError = (bool)(tmpReg & DMA_STATUS_SYSTEM_WRITE_ECC); + p_FmDmaStatus->writeBufEccFmError = (bool)(tmpReg & DMA_STATUS_FM_WRITE_ECC); + return; +} + +t_Error FM_ForceIntr (t_Handle h_Fm, e_FmExceptions exception) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + switch(exception) + { + case e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: + if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_eif, QMI_ERR_INTR_EN_DEQ_FROM_DEF); + break; + case e_FM_EX_QMI_SINGLE_ECC: + if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_SINGLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_if, QMI_INTR_EN_SINGLE_ECC); + break; + case e_FM_EX_QMI_DOUBLE_ECC: + if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_QMI_DOUBLE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_Fm->p_FmQmiRegs->fmqm_eif, QMI_ERR_INTR_EN_DOUBLE_ECC); + break; + case e_FM_EX_BMI_LIST_RAM_ECC: + if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_LIST_RAM_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ifr, BMI_ERR_INTR_EN_LIST_RAM_ECC); + break; + case e_FM_EX_BMI_PIPELINE_ECC: + if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_PIPELINE_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ifr, BMI_ERR_INTR_EN_PIPELINE_ECC); + break; + case e_FM_EX_BMI_STATISTICS_RAM_ECC: + if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_STATISTICS_RAM_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ifr, BMI_ERR_INTR_EN_STATISTICS_RAM_ECC); + break; + case e_FM_EX_BMI_DISPATCH_RAM_ECC: + if (!(p_Fm->p_FmStateStruct->exceptions & FM_EX_BMI_DISPATCH_RAM_ECC)) + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception is masked")); + WRITE_UINT32(p_Fm->p_FmBmiRegs->fmbm_ifr, BMI_ERR_INTR_EN_DISPATCH_RAM_ECC); + break; + default: + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("The selected exception may not be forced")); + } + + return E_OK; +} + +void FM_Resume(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm*)h_Fm; + uint32_t tmpReg; + + SANITY_CHECK_RETURN(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + if (p_Fm->guestId == NCSW_MASTER_ID) + { + tmpReg = GET_UINT32(p_Fm->p_FmFpmRegs->fpmem); + /* clear tmpReg event bits in order not to clear standing events */ + tmpReg &= ~(FPM_EV_MASK_DOUBLE_ECC | FPM_EV_MASK_STALL | FPM_EV_MASK_SINGLE_ECC); + WRITE_UINT32(p_Fm->p_FmFpmRegs->fpmem, tmpReg | FPM_EV_MASK_RELEASE_FM); + } + else + ASSERT_COND(0); /* TODO */ +} + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +t_Error FM_DumpRegs(t_Handle h_Fm) +{ + t_Fm *p_Fm = (t_Fm *)h_Fm; + uint8_t i = 0; + t_Error err; + t_FmIpcMsg msg; + + DECLARE_DUMP; + + SANITY_CHECK_RETURN_ERROR(p_Fm, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(!p_Fm->p_FmDriverParam, E_INVALID_STATE); + + + if(p_Fm->guestId != NCSW_MASTER_ID) + { + memset(&msg, 0, sizeof(msg)); + msg.msgId = FM_DUMP_REGS; + if ((err = XX_IpcSendMessage(p_Fm->h_IpcSessions[0], + (uint8_t*)&msg, + sizeof(msg.msgId), + NULL, + NULL, + NULL, + NULL)) != E_OK) + RETURN_ERROR(MINOR, err, NO_MSG); + return E_OK; + } + + + DUMP_SUBTITLE(("\n")); + + DUMP_TITLE(p_Fm->p_FmFpmRegs, ("FmFpmRegs Regs")); + + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmtnc); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmpr); + DUMP_VAR(p_Fm->p_FmFpmRegs,brkc); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmflc); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmdis1); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmdis2); + DUMP_VAR(p_Fm->p_FmFpmRegs,fmepi); + DUMP_VAR(p_Fm->p_FmFpmRegs,fmrie); + + DUMP_TITLE(&p_Fm->p_FmFpmRegs->fmfpfcev, ("fmfpfcev")); + DUMP_SUBSTRUCT_ARRAY(i, 4) + { + DUMP_MEMORY(&p_Fm->p_FmFpmRegs->fmfpfcev[i], sizeof(uint32_t)); + } + + DUMP_TITLE(&p_Fm->p_FmFpmRegs->fmfpfcee, ("fmfpfcee")); + DUMP_SUBSTRUCT_ARRAY(i, 4) + { + DUMP_MEMORY(&p_Fm->p_FmFpmRegs->fmfpfcee[i], sizeof(uint32_t)); + } + + DUMP_SUBTITLE(("\n")); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmtsc1); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmtsc2); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmtsp); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmtsf); + DUMP_VAR(p_Fm->p_FmFpmRegs,fmrcr); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmextc); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmext1); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmext2); + + DUMP_TITLE(&p_Fm->p_FmFpmRegs->fpmdrd, ("fpmdrd")); + DUMP_SUBSTRUCT_ARRAY(i, 16) + { + DUMP_MEMORY(&p_Fm->p_FmFpmRegs->fpmdrd[i], sizeof(uint32_t)); + } + + DUMP_SUBTITLE(("\n")); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmdra); + DUMP_VAR(p_Fm->p_FmFpmRegs,fm_ip_rev_1); + DUMP_VAR(p_Fm->p_FmFpmRegs,fm_ip_rev_2); + DUMP_VAR(p_Fm->p_FmFpmRegs,fmrstc); + DUMP_VAR(p_Fm->p_FmFpmRegs,fmcld); + DUMP_VAR(p_Fm->p_FmFpmRegs,fmnpi); + DUMP_VAR(p_Fm->p_FmFpmRegs,fpmem); + + DUMP_TITLE(&p_Fm->p_FmFpmRegs->fpmcev, ("fpmcev")); + DUMP_SUBSTRUCT_ARRAY(i, 4) + { + DUMP_MEMORY(&p_Fm->p_FmFpmRegs->fpmcev[i], sizeof(uint32_t)); + } + + DUMP_TITLE(&p_Fm->p_FmFpmRegs->fmfp_ps, ("fmfp_ps")); + DUMP_SUBSTRUCT_ARRAY(i, 64) + { + DUMP_MEMORY(&p_Fm->p_FmFpmRegs->fmfp_ps[i], sizeof(uint32_t)); + } + + + DUMP_TITLE(p_Fm->p_FmDmaRegs, ("p_FmDmaRegs Regs")); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmsr); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmmr); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmtr); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmhy); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmsetr); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmtah); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmtal); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmtcid); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmra); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmrd); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmwcr); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmebcr); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmccqdr); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmccqvr1); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmccqvr2); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmcqvr3); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmcqvr4); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmcqvr5); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmsefrc); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmsqfrc); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmssrc); + DUMP_VAR(p_Fm->p_FmDmaRegs,fmdmdcr); + + DUMP_TITLE(&p_Fm->p_FmDmaRegs->fmdmplr, ("fmdmplr")); + + DUMP_SUBSTRUCT_ARRAY(i, FM_SIZE_OF_LIODN_TABLE/2) + { + DUMP_MEMORY(&p_Fm->p_FmDmaRegs->fmdmplr[i], sizeof(uint32_t)); + } + + DUMP_TITLE(p_Fm->p_FmBmiRegs, ("p_FmBmiRegs COMMON Regs")); + DUMP_VAR(p_Fm->p_FmBmiRegs,fmbm_init); + DUMP_VAR(p_Fm->p_FmBmiRegs,fmbm_cfg1); + DUMP_VAR(p_Fm->p_FmBmiRegs,fmbm_cfg2); + DUMP_VAR(p_Fm->p_FmBmiRegs,fmbm_ievr); + DUMP_VAR(p_Fm->p_FmBmiRegs,fmbm_ier); + + DUMP_TITLE(&p_Fm->p_FmBmiRegs->fmbm_arb, ("fmbm_arb")); + DUMP_SUBSTRUCT_ARRAY(i, 8) + { + DUMP_MEMORY(&p_Fm->p_FmBmiRegs->fmbm_arb[i], sizeof(uint32_t)); + } + + + DUMP_TITLE(p_Fm->p_FmQmiRegs, ("p_FmQmiRegs COMMON Regs")); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_gc); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_eie); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_eien); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_eif); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_ie); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_ien); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_if); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_gs); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_ts); + DUMP_VAR(p_Fm->p_FmQmiRegs,fmqm_etfc); + + return E_OK; +} +#endif /* (defined(DEBUG_ERRORS) && ... */ + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/fm.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/fm.h @@ -0,0 +1,699 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm.h + + @Description FM internal structures and definitions. +*//***************************************************************************/ +#ifndef __FM_H +#define __FM_H + +#include "error_ext.h" +#include "std_ext.h" +#include "fm_ext.h" +#include "fm_ipc.h" + + +#define __ERR_MODULE__ MODULE_FM + +#define FM_MAX_NUM_OF_HW_PORT_IDS 64 +#define FM_MAX_NUM_OF_GUESTS 100 + +/**************************************************************************//** + @Description Exceptions +*//***************************************************************************/ +#define FM_EX_DMA_BUS_ERROR 0x80000000 /**< DMA bus error. */ +#define FM_EX_DMA_READ_ECC 0x40000000 +#define FM_EX_DMA_SYSTEM_WRITE_ECC 0x20000000 +#define FM_EX_DMA_FM_WRITE_ECC 0x10000000 +#define FM_EX_FPM_STALL_ON_TASKS 0x08000000 /**< Stall of tasks on FPM */ +#define FM_EX_FPM_SINGLE_ECC 0x04000000 /**< Single ECC on FPM */ +#define FM_EX_FPM_DOUBLE_ECC 0x02000000 +#define FM_EX_QMI_SINGLE_ECC 0x01000000 /**< Single ECC on FPM */ +#define FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID 0x00800000 /**< Dequeu from default queue id */ +#define FM_EX_QMI_DOUBLE_ECC 0x00400000 +#define FM_EX_BMI_LIST_RAM_ECC 0x00200000 +#define FM_EX_BMI_PIPELINE_ECC 0x00100000 +#define FM_EX_BMI_STATISTICS_RAM_ECC 0x00080000 +#define FM_EX_IRAM_ECC 0x00040000 +#define FM_EX_NURAM_ECC 0x00020000 +#define FM_EX_BMI_DISPATCH_RAM_ECC 0x00010000 + +#define GET_EXCEPTION_FLAG(bitMask, exception) switch(exception){ \ + case e_FM_EX_DMA_BUS_ERROR: \ + bitMask = FM_EX_DMA_BUS_ERROR; break; \ + case e_FM_EX_DMA_READ_ECC: \ + bitMask = FM_EX_DMA_READ_ECC; break; \ + case e_FM_EX_DMA_SYSTEM_WRITE_ECC: \ + bitMask = FM_EX_DMA_SYSTEM_WRITE_ECC; break; \ + case e_FM_EX_DMA_FM_WRITE_ECC: \ + bitMask = FM_EX_DMA_FM_WRITE_ECC; break; \ + case e_FM_EX_FPM_STALL_ON_TASKS: \ + bitMask = FM_EX_FPM_STALL_ON_TASKS; break; \ + case e_FM_EX_FPM_SINGLE_ECC: \ + bitMask = FM_EX_FPM_SINGLE_ECC; break; \ + case e_FM_EX_FPM_DOUBLE_ECC: \ + bitMask = FM_EX_FPM_DOUBLE_ECC; break; \ + case e_FM_EX_QMI_SINGLE_ECC: \ + bitMask = FM_EX_QMI_SINGLE_ECC; break; \ + case e_FM_EX_QMI_DOUBLE_ECC: \ + bitMask = FM_EX_QMI_DOUBLE_ECC; break; \ + case e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID: \ + bitMask = FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID; break; \ + case e_FM_EX_BMI_LIST_RAM_ECC: \ + bitMask = FM_EX_BMI_LIST_RAM_ECC; break; \ + case e_FM_EX_BMI_PIPELINE_ECC: \ + bitMask = FM_EX_BMI_PIPELINE_ECC; break; \ + case e_FM_EX_BMI_STATISTICS_RAM_ECC: \ + bitMask = FM_EX_BMI_STATISTICS_RAM_ECC; break; \ + case e_FM_EX_BMI_DISPATCH_RAM_ECC: \ + bitMask = FM_EX_BMI_DISPATCH_RAM_ECC; break; \ + case e_FM_EX_IRAM_ECC: \ + bitMask = FM_EX_IRAM_ECC; break; \ + case e_FM_EX_MURAM_ECC: \ + bitMask = FM_EX_NURAM_ECC; break; \ + default: bitMask = 0;break;} + +/**************************************************************************//** + @Description defaults +*//***************************************************************************/ +#define DEFAULT_exceptions (FM_EX_DMA_BUS_ERROR |\ + FM_EX_DMA_READ_ECC |\ + FM_EX_DMA_SYSTEM_WRITE_ECC |\ + FM_EX_DMA_FM_WRITE_ECC |\ + FM_EX_FPM_STALL_ON_TASKS |\ + FM_EX_FPM_SINGLE_ECC |\ + FM_EX_FPM_DOUBLE_ECC |\ + FM_EX_QMI_SINGLE_ECC |\ + FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID|\ + FM_EX_QMI_DOUBLE_ECC |\ + FM_EX_BMI_LIST_RAM_ECC |\ + FM_EX_BMI_PIPELINE_ECC |\ + FM_EX_BMI_STATISTICS_RAM_ECC |\ + FM_EX_BMI_DISPATCH_RAM_ECC |\ + FM_EX_IRAM_ECC |\ + FM_EX_NURAM_ECC ) +#define DEFAULT_totalNumOfTasks (BMI_MAX_NUM_OF_TASKS*3/4) +#define DEFAULT_totalFifoSize (BMI_MAX_FIFO_SIZE*3/4) +#define DEFAULT_maxNumOfOpenDmas (BMI_MAX_NUM_OF_DMAS*3/4) +#define DEFAULT_eccEnable FALSE +#define DEFAULT_dispLimit 0 +#define DEFAULT_prsDispTh 16 +#define DEFAULT_plcrDispTh 16 +#define DEFAULT_kgDispTh 16 +#define DEFAULT_bmiDispTh 16 +#define DEFAULT_qmiEnqDispTh 16 +#define DEFAULT_qmiDeqDispTh 16 +#define DEFAULT_fmCtl1DispTh 16 +#define DEFAULT_fmCtl2DispTh 16 +#define DEFAULT_cacheOverride e_FM_DMA_NO_CACHE_OR +#ifdef FM_PEDANTIC_DMA +#define DEFAULT_aidOverride TRUE +#else +#define DEFAULT_aidOverride FALSE +#endif /* FM_PEDANTIC_DMA */ +#define DEFAULT_aidMode e_FM_DMA_AID_OUT_TNUM +#define DEFAULT_dmaStopOnBusError FALSE +#define DEFAULT_stopAtBusError FALSE +#define DEFAULT_axiDbgNumOfBeats 1 +#define DEFAULT_dmaCamNumOfEntries 32 +#define DEFAULT_dmaCommQLow ((DMA_THRESH_MAX_COMMQ+1)/2) +#define DEFAULT_dmaCommQHigh ((DMA_THRESH_MAX_COMMQ+1)*3/4) +#define DEFAULT_dmaReadIntBufLow ((DMA_THRESH_MAX_BUF+1)/2) +#define DEFAULT_dmaReadIntBufHigh ((DMA_THRESH_MAX_BUF+1)*3/4) +#define DEFAULT_dmaWriteIntBufLow ((DMA_THRESH_MAX_BUF+1)/2) +#define DEFAULT_dmaWriteIntBufHigh ((DMA_THRESH_MAX_BUF+1)*3/4) +#define DEFAULT_dmaSosEmergency 0 +#define DEFAULT_dmaDbgCntMode e_FM_DMA_DBG_NO_CNT +#define DEFAULT_catastrophicErr e_FM_CATASTROPHIC_ERR_STALL_PORT +#define DEFAULT_dmaErr e_FM_DMA_ERR_CATASTROPHIC +#define DEFAULT_resetOnInit FALSE +#define DEFAULT_haltOnExternalActivation FALSE /* do not change! if changed, must be disabled for rev1 ! */ +#define DEFAULT_haltOnUnrecoverableEccError FALSE /* do not change! if changed, must be disabled for rev1 ! */ +#define DEFAULT_externalEccRamsEnable FALSE +#define DEFAULT_VerifyUcode FALSE +#define DEFAULT_tnumAgingPeriod 0 +#define DEFAULT_dmaWatchdog 0 /* disabled */ +#define DEFAULT_mtu 9600 + +/**************************************************************************//** + @Description Modules registers offsets +*//***************************************************************************/ +#define FM_MM_MURAM 0x00000000 +#define FM_MM_BMI 0x00080000 +#define FM_MM_QMI 0x00080400 +#define FM_MM_PRS 0x000c7000 +#define FM_MM_KG 0x000C1000 +#define FM_MM_DMA 0x000C2000 +#define FM_MM_FPM 0x000C3000 +#define FM_MM_PLCR 0x000C0000 +#define FM_MM_IMEM 0x000C4000 + +/**************************************************************************//** + @Description Interrupt Enable/Mask +*//***************************************************************************/ + +/**************************************************************************//** + @Description Memory Mapped Registers +*//***************************************************************************/ + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +typedef _Packed struct +{ + volatile uint32_t fpmtnc; /**< FPM TNUM Control */ + volatile uint32_t fpmpr; /**< FPM Port_ID FmCtl Association */ + volatile uint32_t brkc; /**< FPM Breakpoint Control */ + volatile uint32_t fpmflc; /**< FPM Flush Control */ + volatile uint32_t fpmdis1; /**< FPM Dispatch Thresholds1 */ + volatile uint32_t fpmdis2; /**< FPM Dispatch Thresholds2 */ + volatile uint32_t fmepi; /**< FM Error Pending Interrupts */ + volatile uint32_t fmrie; /**< FM Error Interrupt Enable */ + volatile uint32_t fmfpfcev[4]; /**< FPM FMan-Controller Event 1-4 */ + volatile uint8_t res1[16]; /**< reserved */ + volatile uint32_t fmfpfcee[4]; /**< PM FMan-Controller Event 1-4 */ + volatile uint8_t res2[16]; /**< reserved */ + volatile uint32_t fpmtsc1; /**< FPM TimeStamp Control1 */ + volatile uint32_t fpmtsc2; /**< FPM TimeStamp Control2 */ + volatile uint32_t fpmtsp; /**< FPM Time Stamp */ + volatile uint32_t fpmtsf; /**< FPM Time Stamp Fraction */ + volatile uint32_t fmrcr; /**< FM Rams Control */ + volatile uint32_t fpmextc; /**< FPM External Requests Control */ + volatile uint32_t fpmext1; /**< FPM External Requests Config1 */ + volatile uint32_t fpmext2; /**< FPM External Requests Config2 */ + volatile uint32_t fpmdrd[16]; /**< FPM Data_Ram Data 0-15 */ + volatile uint32_t fpmdra; /**< FPM Data Ram Access */ + volatile uint32_t fm_ip_rev_1; /**< FM IP Block Revision 1 */ + volatile uint32_t fm_ip_rev_2; /**< FM IP Block Revision 2 */ + volatile uint32_t fmrstc; /**< FM Reset Command */ + volatile uint32_t fmcld; /**< FM Classifier Debug */ + volatile uint32_t fmnpi; /**< FM Normal Pending Interrupts */ + volatile uint32_t fmfp_exte; /**< FPM External Requests Enable */ + volatile uint32_t fpmem; /**< FPM Event & Mask */ + volatile uint32_t fpmcev[4]; /**< FPM CPU Event 1-4 */ + volatile uint8_t res4[16]; /**< reserved */ + volatile uint32_t fmfp_ps[0x40]; /**< FPM Port Status */ + volatile uint8_t reserved1[0x260]; + volatile uint32_t fpmts[128]; /**< 0x400: FPM Task Status */ +} _PackedType t_FmFpmRegs; + +#define NUM_OF_DBG_TRAPS 3 + +typedef _Packed struct +{ + volatile uint32_t fmbm_init; /**< BMI Initialization */ + volatile uint32_t fmbm_cfg1; /**< BMI Configuration 1 */ + volatile uint32_t fmbm_cfg2; /**< BMI Configuration 2 */ + volatile uint32_t reserved[5]; + volatile uint32_t fmbm_ievr; /**< Interrupt Event Register */ + volatile uint32_t fmbm_ier; /**< Interrupt Enable Register */ + volatile uint32_t fmbm_ifr; /**< Interrupt Force Register */ + volatile uint32_t reserved1[5]; + volatile uint32_t fmbm_arb[8]; /**< BMI Arbitration */ + volatile uint32_t reserved2[12]; + volatile uint32_t fmbm_dtc[NUM_OF_DBG_TRAPS]; /**< BMI Debug Trap Counter */ + volatile uint32_t reserved3; + volatile uint32_t fmbm_dcv[NUM_OF_DBG_TRAPS][4]; /**< BMI Debug Compare Value */ + volatile uint32_t fmbm_dcm[NUM_OF_DBG_TRAPS][4]; /**< BMI Debug Compare Mask */ + volatile uint32_t fmbm_gde; /**< BMI Global Debug Enable */ + volatile uint32_t fmbm_pp[63]; /**< BMI Port Parameters */ + volatile uint32_t reserved4; + volatile uint32_t fmbm_pfs[63]; /**< BMI Port FIFO Size */ + volatile uint32_t reserved5; + volatile uint32_t fmbm_ppid[63]; /**< Port Partition ID */ +} _PackedType t_FmBmiRegs; + +typedef _Packed struct +{ + volatile uint32_t fmqm_gc; /**< General Configuration Register */ + volatile uint32_t Reserved0; + volatile uint32_t fmqm_eie; /**< Error Interrupt Event Register */ + volatile uint32_t fmqm_eien; /**< Error Interrupt Enable Register */ + volatile uint32_t fmqm_eif; /**< Error Interrupt Force Register */ + volatile uint32_t fmqm_ie; /**< Interrupt Event Register */ + volatile uint32_t fmqm_ien; /**< Interrupt Enable Register */ + volatile uint32_t fmqm_if; /**< Interrupt Force Register */ + volatile uint32_t fmqm_gs; /**< Global Status Register */ + volatile uint32_t fmqm_ts; /**< Task Status Register */ + volatile uint32_t fmqm_etfc; /**< Enqueue Total Frame Counter */ + volatile uint32_t fmqm_dtfc; /**< Dequeue Total Frame Counter */ + volatile uint32_t fmqm_dc0; /**< Dequeue Counter 0 */ + volatile uint32_t fmqm_dc1; /**< Dequeue Counter 1 */ + volatile uint32_t fmqm_dc2; /**< Dequeue Counter 2 */ + volatile uint32_t fmqm_dc3; /**< Dequeue Counter 3 */ + volatile uint32_t fmqm_dfdc; /**< Dequeue FQID from Default Counter */ + volatile uint32_t fmqm_dfcc; /**< Dequeue FQID from Context Counter */ + volatile uint32_t fmqm_dffc; /**< Dequeue FQID from FD Counter */ + volatile uint32_t fmqm_dcc; /**< Dequeue Confirm Counter */ + volatile uint32_t Reserved1a[7]; + volatile uint32_t fmqm_tapc; /**< Tnum Aging Period Control */ + volatile uint32_t fmqm_dmcvc; /**< Dequeue MAC Command Valid Counter */ + volatile uint32_t fmqm_difdcc; /**< Dequeue Invalid FD Command Counter */ + volatile uint32_t fmqm_da1v; /**< Dequeue A1 Valid Counter */ + volatile uint32_t Reserved1b; + volatile uint32_t fmqm_dtc; /**< 0x0080 Debug Trap Counter */ + volatile uint32_t fmqm_efddd; /**< 0x0084 Enqueue Frame Descriptor Dynamic Debug */ + volatile uint32_t Reserved3[2]; + _Packed struct { + volatile uint32_t fmqm_dtcfg1; /**< 0x0090 Debug Trap Configuration 1 Register */ + volatile uint32_t fmqm_dtval1; /**< Debug Trap Value 1 Register */ + volatile uint32_t fmqm_dtm1; /**< Debug Trap Mask 1 Register */ + volatile uint32_t fmqm_dtc1; /**< Debug Trap Counter 1 Register */ + volatile uint32_t fmqm_dtcfg2; /**< Debug Trap Configuration 2 Register */ + volatile uint32_t fmqm_dtval2; /**< Debug Trap Value 2 Register */ + volatile uint32_t fmqm_dtm2; /**< Debug Trap Mask 2 Register */ + volatile uint32_t Reserved1; + } _PackedType dbgTraps[NUM_OF_DBG_TRAPS]; +} _PackedType t_FmQmiRegs; + +typedef _Packed struct +{ + volatile uint32_t fmdmsr; /**< FM DMA status register 0x04 */ + volatile uint32_t fmdmmr; /**< FM DMA mode register 0x08 */ + volatile uint32_t fmdmtr; /**< FM DMA bus threshold register 0x0c */ + volatile uint32_t fmdmhy; /**< FM DMA bus hysteresis register 0x10 */ + volatile uint32_t fmdmsetr; /**< FM DMA SOS emergency Threshold Register 0x14 */ + volatile uint32_t fmdmtah; /**< FM DMA transfer bus address high register 0x18 */ + volatile uint32_t fmdmtal; /**< FM DMA transfer bus address low register 0x1C */ + volatile uint32_t fmdmtcid; /**< FM DMA transfer bus communication ID register 0x20 */ + volatile uint32_t fmdmra; /**< FM DMA bus internal ram address register 0x24 */ + volatile uint32_t fmdmrd; /**< FM DMA bus internal ram data register 0x28 */ + volatile uint32_t fmdmwcr; /**< FM DMA CAM watchdog counter value 0x2C */ + volatile uint32_t fmdmebcr; /**< FM DMA CAM base in MURAM register 0x30 */ + volatile uint32_t fmdmccqdr; /**< FM DMA CAM and CMD Queue Debug register 0x34 */ + volatile uint32_t fmdmccqvr1; /**< FM DMA CAM and CMD Queue Value register #1 0x38 */ + volatile uint32_t fmdmccqvr2; /**< FM DMA CAM and CMD Queue Value register #2 0x3C */ + volatile uint32_t fmdmcqvr3; /**< FM DMA CMD Queue Value register #3 0x40 */ + volatile uint32_t fmdmcqvr4; /**< FM DMA CMD Queue Value register #4 0x44 */ + volatile uint32_t fmdmcqvr5; /**< FM DMA CMD Queue Value register #5 0x48 */ + volatile uint32_t fmdmsefrc; /**< FM DMA Semaphore Entry Full Reject Counter 0x50 */ + volatile uint32_t fmdmsqfrc; /**< FM DMA Semaphore Queue Full Reject Counter 0x54 */ + volatile uint32_t fmdmssrc; /**< FM DMA Semaphore SYNC Reject Counter 0x54 */ + volatile uint32_t fmdmdcr; /**< FM DMA Debug Counter */ + volatile uint32_t fmdmemsr; /**< FM DMA Emrgency Smoother Register */ + volatile uint32_t reserved; + volatile uint32_t fmdmplr[FM_SIZE_OF_LIODN_TABLE/2]; + /**< FM DMA PID-LIODN # register */ +} _PackedType t_FmDmaRegs; + +typedef _Packed struct +{ + volatile uint32_t iadd; /**< FM IRAM instruction address register */ + volatile uint32_t idata; /**< FM IRAM instruction data register */ + volatile uint32_t itcfg; /**< FM IRAM timing config register */ + volatile uint32_t iready; /**< FM IRAM ready register */ + volatile uint8_t res[0x80000-0x10]; +} _PackedType t_FMIramRegs; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/**************************************************************************//** + @Description General defines +*//***************************************************************************/ + +#define FM_DEBUG_STATUS_REGISTER_OFFSET 0x000d1084UL +#define FM_UCODE_DEBUG_INSTRUCTION 0x6ffff805UL + + +/**************************************************************************//** + @Description DMA definitions +*//***************************************************************************/ + +/* masks */ +#define DMA_MODE_AID_OR 0x20000000 +#define DMA_MODE_SBER 0x10000000 +#define DMA_MODE_BER 0x00200000 +#define DMA_MODE_ECC 0x00000020 +#define DMA_MODE_PRIVILEGE_PROT 0x00001000 +#define DMA_MODE_SECURE_PROT 0x00000800 +#define DMA_MODE_EMERGENCY_READ 0x00080000 +#define DMA_MODE_EMERGENCY_WRITE 0x00040000 + +#define DMA_TRANSFER_PORTID_MASK 0xFF000000 +#define DMA_TRANSFER_TNUM_MASK 0x00FF0000 +#define DMA_TRANSFER_LIODN_MASK 0x00000FFF + +#define DMA_HIGH_LIODN_MASK 0x0FFF0000 +#define DMA_LOW_LIODN_MASK 0x00000FFF + +#define DMA_STATUS_CMD_QUEUE_NOT_EMPTY 0x10000000 +#define DMA_STATUS_BUS_ERR 0x08000000 +#define DMA_STATUS_READ_ECC 0x04000000 +#define DMA_STATUS_SYSTEM_WRITE_ECC 0x02000000 +#define DMA_STATUS_FM_WRITE_ECC 0x01000000 +#define DMA_STATUS_SYSTEM_DPEXT_ECC 0x00800000 +#define DMA_STATUS_FM_DPEXT_ECC 0x00400000 +#define DMA_STATUS_SYSTEM_DPDAT_ECC 0x00200000 +#define DMA_STATUS_FM_DPDAT_ECC 0x00100000 +#define DMA_STATUS_FM_SPDAT_ECC 0x00080000 + +#define FM_LIODN_BASE_MASK 0x00000FFF + +/* shifts */ +#define DMA_MODE_CACHE_OR_SHIFT 30 +#define DMA_MODE_BUS_PRI_SHIFT 16 +#define DMA_MODE_AXI_DBG_SHIFT 24 +#define DMA_MODE_CEN_SHIFT 13 +#define DMA_MODE_BUS_PROT_SHIFT 10 +#define DMA_MODE_DBG_SHIFT 7 +#define DMA_MODE_EMERGENCY_LEVEL_SHIFT 6 +#define DMA_MODE_AID_MODE_SHIFT 4 +#define DMA_MODE_MAX_AXI_DBG_NUM_OF_BEATS 16 +#define DMA_MODE_MAX_CAM_NUM_OF_ENTRIES 32 + +#define DMA_THRESH_COMMQ_SHIFT 24 +#define DMA_THRESH_READ_INT_BUF_SHIFT 16 + +#define DMA_LIODN_SHIFT 16 + +#define DMA_TRANSFER_PORTID_SHIFT 24 +#define DMA_TRANSFER_TNUM_SHIFT 16 + +/* sizes */ +#define DMA_MAX_WATCHDOG 0xffffffff + +/* others */ +#define DMA_CAM_SIZEOF_ENTRY 0x40 +#define DMA_CAM_ALIGN 0x1000 +#define DMA_CAM_UNITS 8 + + +/**************************************************************************//** + @Description FPM defines +*//***************************************************************************/ + +/* masks */ +#define FPM_EV_MASK_DOUBLE_ECC 0x80000000 +#define FPM_EV_MASK_STALL 0x40000000 +#define FPM_EV_MASK_SINGLE_ECC 0x20000000 +#define FPM_EV_MASK_RELEASE_FM 0x00010000 +#define FPM_EV_MASK_DOUBLE_ECC_EN 0x00008000 +#define FPM_EV_MASK_STALL_EN 0x00004000 +#define FPM_EV_MASK_SINGLE_ECC_EN 0x00002000 +#define FPM_EV_MASK_EXTERNAL_HALT 0x00000008 +#define FPM_EV_MASK_ECC_ERR_HALT 0x00000004 + +#define FPM_RAM_CTL_RAMS_ECC_EN 0x80000000 +#define FPM_RAM_CTL_IRAM_ECC_EN 0x40000000 +#define FPM_RAM_CTL_MURAM_ECC 0x00008000 +#define FPM_RAM_CTL_IRAM_ECC 0x00004000 +#define FPM_RAM_CTL_MURAM_TEST_ECC 0x20000000 +#define FPM_RAM_CTL_IRAM_TEST_ECC 0x10000000 +#define FPM_RAM_CTL_RAMS_ECC_EN_SRC_SEL 0x08000000 + +#define FPM_IRAM_ECC_ERR_EX_EN 0x00020000 +#define FPM_MURAM_ECC_ERR_EX_EN 0x00040000 + +#define FPM_REV1_MAJOR_MASK 0x0000FF00 +#define FPM_REV1_MINOR_MASK 0x000000FF + +#define FPM_REV2_INTEG_MASK 0x00FF0000 +#define FPM_REV2_ERR_MASK 0x0000FF00 +#define FPM_REV2_CFG_MASK 0x000000FF + +#define FPM_TS_FRACTION_MASK 0x0000FFFF +#define FPM_TS_CTL_EN 0x80000000 + +#define FPM_PORT_FM_CTL1 0x00000001 +#define FPM_PORT_FM_CTL2 0x00000002 +#define FPM_PRC_REALSE_STALLED 0x00800000 + +#define FPM_PS_STALLED 0x00800000 +#define FPM_PS_FM_CTL1_SEL 0x80000000 +#define FPM_PS_FM_CTL2_SEL 0x40000000 +#define FPM_PS_FM_CTL_SEL_MASK (FPM_PS_FM_CTL1_SEL | FPM_PS_FM_CTL2_SEL) + +#define FPM_RSTC_FM_RESET 0x80000000 +#define FPM_RSTC_10G0_RESET 0x04000000 +#define FPM_RSTC_1G0_RESET 0x40000000 +#define FPM_RSTC_1G1_RESET 0x20000000 +#define FPM_RSTC_1G2_RESET 0x10000000 +#define FPM_RSTC_1G3_RESET 0x08000000 +#define FPM_RSTC_1G4_RESET 0x02000000 + + +/* shifts */ +#define FPM_DISP_LIMIT_SHIFT 24 + +#define FPM_THR1_PRS_SHIFT 24 +#define FPM_THR1_KG_SHIFT 16 +#define FPM_THR1_PLCR_SHIFT 8 +#define FPM_THR1_BMI_SHIFT 0 + +#define FPM_THR2_QMI_ENQ_SHIFT 24 +#define FPM_THR2_QMI_DEQ_SHIFT 0 +#define FPM_THR2_FM_CTL1_SHIFT 16 +#define FPM_THR2_FM_CTL2_SHIFT 8 + +#define FPM_EV_MASK_CAT_ERR_SHIFT 1 +#define FPM_EV_MASK_DMA_ERR_SHIFT 0 + +#define FPM_REV1_MAJOR_SHIFT 8 +#define FPM_REV1_MINOR_SHIFT 0 + +#define FPM_REV2_INTEG_SHIFT 16 +#define FPM_REV2_ERR_SHIFT 8 +#define FPM_REV2_CFG_SHIFT 0 + +#define FPM_TS_INT_SHIFT 16 + +#define FPM_PORT_FM_CTL_PORTID_SHIFT 24 + +#define FPM_PS_FM_CTL_SEL_SHIFT 30 +#define FPM_PRC_ORA_FM_CTL_SEL_SHIFT 16 + +/* Interrupts defines */ +#define FPM_EVENT_FM_CTL_0 0x00008000 +#define FPM_EVENT_FM_CTL 0x0000FF00 +#define FPM_EVENT_FM_CTL_BRK 0x00000080 + +/* others */ +#define FPM_MAX_DISP_LIMIT 31 + +/**************************************************************************//** + @Description BMI defines +*//***************************************************************************/ +/* masks */ +#define BMI_INIT_START 0x80000000 +#define BMI_ERR_INTR_EN_PIPELINE_ECC 0x80000000 +#define BMI_ERR_INTR_EN_LIST_RAM_ECC 0x40000000 +#define BMI_ERR_INTR_EN_STATISTICS_RAM_ECC 0x20000000 +#define BMI_ERR_INTR_EN_DISPATCH_RAM_ECC 0x10000000 +#define BMI_NUM_OF_TASKS_MASK 0x3F000000 +#define BMI_NUM_OF_EXTRA_TASKS_MASK 0x000F0000 +#define BMI_NUM_OF_DMAS_MASK 0x00000F00 +#define BMI_NUM_OF_EXTRA_DMAS_MASK 0x0000000F +#define BMI_FIFO_SIZE_MASK 0x000003FF +#define BMI_EXTRA_FIFO_SIZE_MASK 0x03FF0000 +#define BMI_CFG2_DMAS_MASK 0x0000003F + +/* shifts */ +#define BMI_CFG2_TASKS_SHIFT 16 +#define BMI_CFG2_DMAS_SHIFT 0 +#define BMI_CFG1_FIFO_SIZE_SHIFT 16 +#define BMI_FIFO_SIZE_SHIFT 0 +#define BMI_EXTRA_FIFO_SIZE_SHIFT 16 +#define BMI_NUM_OF_TASKS_SHIFT 24 +#define BMI_EXTRA_NUM_OF_TASKS_SHIFT 16 +#define BMI_NUM_OF_DMAS_SHIFT 8 +#define BMI_EXTRA_NUM_OF_DMAS_SHIFT 0 + +/* others */ +#define BMI_FIFO_ALIGN 0x100 + + +/**************************************************************************//** + @Description QMI defines +*//***************************************************************************/ +/* masks */ +#define QMI_CFG_ENQ_EN 0x80000000 +#define QMI_CFG_DEQ_EN 0x40000000 +#define QMI_CFG_EN_COUNTERS 0x10000000 +#define QMI_CFG_SOFT_RESET 0x01000000 +#define QMI_CFG_DEQ_MASK 0x0000003F +#define QMI_CFG_ENQ_MASK 0x00003F00 + +#define QMI_ERR_INTR_EN_DOUBLE_ECC 0x80000000 +#define QMI_ERR_INTR_EN_DEQ_FROM_DEF 0x40000000 +#define QMI_INTR_EN_SINGLE_ECC 0x80000000 + +/* shifts */ +#define QMI_CFG_ENQ_SHIFT 8 +#define QMI_TAPC_TAP 22 + + +/**************************************************************************//** + @Description IRAM defines +*//***************************************************************************/ +/* masks */ +#define IRAM_IADD_AIE 0x80000000 +#define IRAM_READY 0x80000000 + +typedef struct { + void (*f_Isr) (t_Handle h_Arg, uint32_t event); + t_Handle h_SrcHandle; +} t_FmanCtrlIntrSrc; + + +typedef struct +{ + /* uint8_t numOfPartitions; */ + bool resetOnInit; +#ifdef FM_PARTITION_ARRAY + uint16_t liodnBasePerPort[FM_SIZE_OF_LIODN_TABLE]; +#endif + bool enCounters; + t_FmThresholds thresholds; + e_FmDmaCacheOverride dmaCacheOverride; + e_FmDmaAidMode dmaAidMode; + bool dmaAidOverride; + uint8_t dmaAxiDbgNumOfBeats; + uint8_t dmaCamNumOfEntries; + uint32_t dmaWatchdog; + t_FmDmaThresholds dmaCommQThresholds; + t_FmDmaThresholds dmaWriteBufThresholds; + t_FmDmaThresholds dmaReadBufThresholds; + uint32_t dmaSosEmergency; + e_FmDmaDbgCntMode dmaDbgCntMode; + bool dmaStopOnBusError; + bool dmaEnEmergency; + t_FmDmaEmergency dmaEmergency; + bool dmaEnEmergencySmoother; + uint32_t dmaEmergencySwitchCounter; + bool haltOnExternalActivation; + bool haltOnUnrecoverableEccError; + e_FmCatastrophicErr catastrophicErr; + e_FmDmaErr dmaErr; + bool enMuramTestMode; + bool enIramTestMode; + bool externalEccRamsEnable; + uint16_t tnumAgingPeriod; + t_FmPcdFirmwareParams firmware; + bool fwVerify; +} t_FmDriverParam; + +typedef void (t_FmanCtrlIsr)( t_Handle h_Fm, uint32_t event); + +typedef struct +{ +/***************************/ +/* Master/Guest parameters */ +/***************************/ + uint8_t fmId; + e_FmPortType portsTypes[FM_MAX_NUM_OF_HW_PORT_IDS]; + uint16_t fmClkFreq; +/**************************/ +/* Master Only parameters */ +/**************************/ + bool enabledTimeStamp; + uint8_t count1MicroBit; + uint8_t totalNumOfTasks; + uint32_t totalFifoSize; + uint8_t maxNumOfOpenDmas; + uint8_t accumulatedNumOfTasks; + uint32_t accumulatedFifoSize; + uint8_t accumulatedNumOfOpenDmas; +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + uint8_t accumulatedNumOfDeqTnums; +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ +#ifdef FM_LOW_END_RESTRICTION + bool lowEndRestriction; +#endif /* FM_LOW_END_RESTRICTION */ + uint32_t exceptions; + int irq; + int errIrq; + bool ramsEccEnable; + bool explicitEnable; + bool internalCall; + uint8_t ramsEccOwners; + uint32_t extraFifoPoolSize; + uint8_t extraTasksPoolSize; + uint8_t extraOpenDmasPoolSize; +#if defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS) + uint16_t macMaxFrameLengths10G[FM_MAX_NUM_OF_10G_MACS]; +#endif /* defined(FM_MAX_NUM_OF_10G_MACS) && (FM_MAX_NUM_OF_10G_MACS) */ + uint16_t macMaxFrameLengths1G[FM_MAX_NUM_OF_1G_MACS]; +} t_FmStateStruct; + +typedef struct +{ +/***************************/ +/* Master/Guest parameters */ +/***************************/ +/* locals for recovery */ + uintptr_t baseAddr; + +/* un-needed for recovery */ + t_Handle h_Pcd; + char fmModuleName[MODULE_NAME_SIZE]; + char fmIpcHandlerModuleName[FM_MAX_NUM_OF_GUESTS][MODULE_NAME_SIZE]; + t_Handle h_IpcSessions[FM_MAX_NUM_OF_GUESTS]; + t_FmIntrSrc intrMng[e_FM_EV_DUMMY_LAST]; /* FM exceptions user callback */ + uint8_t guestId; +/**************************/ +/* Master Only parameters */ +/**************************/ +/* locals for recovery */ + t_FmFpmRegs *p_FmFpmRegs; + t_FmBmiRegs *p_FmBmiRegs; + t_FmQmiRegs *p_FmQmiRegs; + t_FmDmaRegs *p_FmDmaRegs; + t_FmExceptionsCallback *f_Exception; + t_FmBusErrorCallback *f_BusError; + t_Handle h_App; /* Application handle */ + t_Handle h_Spinlock; + bool recoveryMode; + t_FmStateStruct *p_FmStateStruct; + +/* un-needed for recovery */ + t_FmDriverParam *p_FmDriverParam; + t_Handle h_FmMuram; + uint64_t fmMuramPhysBaseAddr; + bool independentMode; + bool hcPortInitialized; + uintptr_t camBaseAddr; /* save for freeing */ + uintptr_t resAddr; + uintptr_t fifoBaseAddr; /* save for freeing */ + t_FmanCtrlIntrSrc fmanCtrlIntr[FM_NUM_OF_FMAN_CTRL_EVENT_REGS]; /* FM exceptions user callback */ + bool usedEventRegs[FM_NUM_OF_FMAN_CTRL_EVENT_REGS]; +} t_Fm; + + +#endif /* __FM_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/fm_ipc.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/fm_ipc.h @@ -0,0 +1,449 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File fm_ipc.h + + @Description FM Inter-Partition prototypes, structures and definitions. +*//***************************************************************************/ +#ifndef __FM_IPC_H +#define __FM_IPC_H + +#include "error_ext.h" +#include "std_ext.h" + + +/**************************************************************************//** + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_IPC_grp FM Inter-Partition messaging Unit + + @Description FM Inter-Partition messaging unit API definitions and enums. + + @{ +*//***************************************************************************/ + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/**************************************************************************//** + @Description enum for defining MAC types +*//***************************************************************************/ + +/**************************************************************************//** + @Description A structure of parameters for specifying a MAC. +*//***************************************************************************/ +typedef _Packed struct +{ + uint8_t id; + uint32_t enumType; +} _PackedType t_FmIpcMacParams; + +/**************************************************************************//** + @Description A structure of parameters for specifying a MAC. +*//***************************************************************************/ +typedef _Packed struct +{ + t_FmIpcMacParams macParams; + uint16_t maxFrameLength; +} _PackedType t_FmIpcMacMaxFrameParams; + +/**************************************************************************//** + @Description FM physical Address +*//***************************************************************************/ +typedef _Packed struct t_FmIpcPhysAddr +{ + volatile uint8_t high; + volatile uint32_t low; +} _PackedType t_FmIpcPhysAddr; + +/**************************************************************************//** + @Description Structure for IPC communication during FM_PORT_Init. +*//***************************************************************************/ +typedef _Packed struct t_FmIpcPortInInitParams { + uint8_t hardwarePortId; /**< IN. port Id */ + uint32_t enumPortType; /**< IN. Port type */ + uint8_t boolIndependentMode;/**< IN. TRUE if FM Port operates in independent mode */ + uint16_t liodnOffset; /**< IN. Port's requested resource */ + uint8_t numOfTasks; /**< IN. Port's requested resource */ + uint8_t numOfExtraTasks; /**< IN. Port's requested resource */ + uint8_t numOfOpenDmas; /**< IN. Port's requested resource */ + uint8_t numOfExtraOpenDmas; /**< IN. Port's requested resource */ + uint32_t sizeOfFifo; /**< IN. Port's requested resource */ + uint32_t extraSizeOfFifo; /**< IN. Port's requested resource */ + uint8_t deqPipelineDepth; /**< IN. Port's requested resource */ + uint16_t liodnBase; /**< IN. Irrelevant for P4080 rev 1. + LIODN base for this port, to be + used together with LIODN offset. */ +} _PackedType t_FmIpcPortInInitParams; + + +/**************************************************************************//** + @Description Structure for IPC communication between port and FM + regarding tasks and open DMA resources management. +*//***************************************************************************/ +typedef _Packed struct t_FmIpcPortRsrcParams { + uint8_t hardwarePortId; /**< IN. port Id */ + uint32_t val; /**< IN. Port's requested resource */ + uint32_t extra; /**< IN. Port's requested resource */ + uint8_t boolInitialConfig; +} _PackedType t_FmIpcPortRsrcParams; + + +/**************************************************************************//** + @Description Structure for IPC communication between port and FM + regarding tasks and open DMA resources management. +*//***************************************************************************/ +typedef _Packed struct t_FmIpcPortFifoParams { + t_FmIpcPortRsrcParams rsrcParams; + uint32_t enumPortType; + uint8_t boolIndependentMode; + uint8_t deqPipelineDepth; + uint8_t numOfPools; + uint16_t secondLargestBufSize; + uint16_t largestBufSize; + uint8_t boolInitialConfig; +} _PackedType t_FmIpcPortFifoParams; + +/**************************************************************************//** + @Description Structure for port-FM communication during FM_PORT_Free. +*//***************************************************************************/ +typedef _Packed struct t_FmIpcPortFreeParams { + uint8_t hardwarePortId; /**< IN. port Id */ + uint32_t enumPortType; /**< IN. Port type */ +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + uint8_t deqPipelineDepth; /**< IN. Port's requested resource */ +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ +} _PackedType t_FmIpcPortFreeParams; + +/**************************************************************************//** + @Description Structure for defining DMA status +*//***************************************************************************/ +typedef _Packed struct t_FmIpcDmaStatus { + uint8_t boolCmqNotEmpty; /**< Command queue is not empty */ + uint8_t boolBusError; /**< Bus error occurred */ + uint8_t boolReadBufEccError; /**< Double ECC error on buffer Read */ + uint8_t boolWriteBufEccSysError; /**< Double ECC error on buffer write from system side */ + uint8_t boolWriteBufEccFmError; /**< Double ECC error on buffer write from FM side */ +} _PackedType t_FmIpcDmaStatus; + +typedef _Packed struct t_FmIpcRegisterIntr +{ + uint8_t guestId; /* IN */ + uint32_t event; /* IN */ +} _PackedType t_FmIpcRegisterIntr; + +typedef _Packed struct t_FmIpcIsr +{ + uint8_t boolErr; /* IN */ + uint32_t pendingReg; /* IN */ +} _PackedType t_FmIpcIsr; + +/**************************************************************************//** + @Description structure for returning revision information +*//***************************************************************************/ +typedef _Packed struct t_FmIpcRevisionInfo { + uint8_t majorRev; /**< OUT: Major revision */ + uint8_t minorRev; /**< OUT: Minor revision */ +} _PackedType t_FmIpcRevisionInfo; + +/**************************************************************************//** + @Description Structure for defining Fm number of Fman controlers +*//***************************************************************************/ +typedef _Packed struct t_FmIpcPortNumOfFmanCtrls { + uint8_t hardwarePortId; /**< IN. port Id */ + uint8_t numOfFmanCtrls; /**< IN. Port type */ +} t_FmIpcPortNumOfFmanCtrls; + +/**************************************************************************//** + @Description structure for setting Fman contriller events +*//***************************************************************************/ +typedef _Packed struct t_FmIpcFmanEvents { + uint8_t eventRegId; /**< IN: Fman controller event register id */ + uint32_t enableEvents; /**< IN/OUT: required enabled events mask */ +} _PackedType t_FmIpcFmanEvents; + +#define FM_IPC_MAX_REPLY_BODY_SIZE 16 +#define FM_IPC_MAX_REPLY_SIZE (FM_IPC_MAX_REPLY_BODY_SIZE + sizeof(uint32_t)) +#define FM_IPC_MAX_MSG_SIZE 30 +typedef _Packed struct t_FmIpcMsg +{ + uint32_t msgId; + uint8_t msgBody[FM_IPC_MAX_MSG_SIZE]; +} _PackedType t_FmIpcMsg; + +typedef _Packed struct t_FmIpcReply +{ + uint32_t error; + uint8_t replyBody[FM_IPC_MAX_REPLY_BODY_SIZE]; +} _PackedType t_FmIpcReply; + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/***************************************************************************/ +/************************ FRONT-END-TO-BACK-END*****************************/ +/***************************************************************************/ + +/**************************************************************************//** + @Function FM_GET_TIMESTAMP_SCALE + + @Description Used by FM front-end. + + @Param[out] uint32_t Pointer +*//***************************************************************************/ +#define FM_GET_TIMESTAMP_SCALE 1 + +/**************************************************************************//** + @Function FM_GET_COUNTER + + @Description Used by FM front-end. + + @Param[in/out] t_FmIpcGetCounter Pointer +*//***************************************************************************/ +#define FM_GET_COUNTER 2 + +/**************************************************************************//** + @Function FM_DUMP_REGS + + @Description Used by FM front-end for the PORT module in order to set and get + parameters in/from master FM module on FM PORT initialization time. + + @Param None +*//***************************************************************************/ +#define FM_DUMP_REGS 3 + +/**************************************************************************//** + @Function FM_GET_SET_PORT_PARAMS + + @Description Used by FM front-end for the PORT module in order to set and get + parameters in/from master FM module on FM PORT initialization time. + + @Param[in/out] t_FmIcPortInitParams Pointer +*//***************************************************************************/ +#define FM_GET_SET_PORT_PARAMS 4 + +/**************************************************************************//** + @Function FM_FREE_PORT + + @Description Used by FM front-end for the PORT module when a port is freed + to free all FM PORT resources. + + @Param[in] uint8_t Pointer +*//***************************************************************************/ +#define FM_FREE_PORT 5 + +/**************************************************************************//** + @Function FM_RESET_MAC + + @Description Used by front-end for the MAC module to reset the MAC registers + + @Param[in] t_FmIpcMacParams Pointer . +*//***************************************************************************/ +#define FM_RESET_MAC 6 + +/**************************************************************************//** + @Function FM_RESUME_STALLED_PORT + + @Description Used by FM front-end for the PORT module in order to + release a stalled FM Port. + + @Param[in] uint8_t Pointer +*//***************************************************************************/ +#define FM_RESUME_STALLED_PORT 7 + +/**************************************************************************//** + @Function FM_IS_PORT_STALLED + + @Description Used by FM front-end for the PORT module in order to check whether + an FM port is stalled. + + @Param[in/out] t_FmIcPortIsStalled Pointer +*//***************************************************************************/ +#define FM_IS_PORT_STALLED 8 + +/**************************************************************************//** + @Function FM_DUMP_PORT_REGS + + @Description Used by FM front-end for the PORT module in order to dump + all port registers. + + @Param[in] uint8_t Pointer +*//***************************************************************************/ +#define FM_DUMP_PORT_REGS 9 + +/**************************************************************************//** + @Function FM_GET_REV + + @Description Used by FM front-end for the PORT module in order to dump + all port registers. + + @Param[in] uint8_t Pointer +*//***************************************************************************/ +#define FM_GET_REV 10 + +/**************************************************************************//** + @Function FM_REGISTER_INTR + + @Description Used by FM front-end to register an interrupt handler to + be called upon interrupt for guest. + + @Param[out] t_FmIpcRegisterIntr Pointer +*//***************************************************************************/ +#define FM_REGISTER_INTR 11 + +/**************************************************************************//** + @Function FM_GET_CLK_FREQ + + @Description Used by FM Front-end to read the FM clock frequency. + + @Param[out] uint32_t Pointer +*//***************************************************************************/ +#define FM_GET_CLK_FREQ 12 + +/**************************************************************************//** + @Function FM_DMA_STAT + + @Description Used by FM front-end to read the FM DMA status. + + @Param[out] t_FmIpcDmaStatus Pointer +*//***************************************************************************/ +#define FM_DMA_STAT 13 + +/**************************************************************************//** + @Function FM_ALLOC_FMAN_CTRL_EVENT_REG + + @Description Used by FM front-end to allocate event register. + + @Param[out] Event register id Pointer +*//***************************************************************************/ +#define FM_ALLOC_FMAN_CTRL_EVENT_REG 14 + +/**************************************************************************//** + @Function FM_FREE_FMAN_CTRL_EVENT_REG + + @Description Used by FM front-end to free locate event register. + + @Param[in] uint8_t Pointer - Event register id +*//***************************************************************************/ +#define FM_FREE_FMAN_CTRL_EVENT_REG 15 + +/**************************************************************************//** + @Function FM_SET_FMAN_CTRL_EVENTS_ENABLE + + @Description Used by FM front-end to enable events in the FPM + Fman controller event register. + + @Param[in] t_FmIpcFmanEvents Pointer +*//***************************************************************************/ +#define FM_SET_FMAN_CTRL_EVENTS_ENABLE 16 + +/**************************************************************************//** + @Function FM_SET_FMAN_CTRL_EVENTS_ENABLE + + @Description Used by FM front-end to enable events in the FPM + Fman controller event register. + + @Param[in/out] t_FmIpcFmanEvents Pointer +*//***************************************************************************/ +#define FM_GET_FMAN_CTRL_EVENTS_ENABLE 17 + +/**************************************************************************//** + @Function FM_SET_MAC_MAX_FRAME + + @Description Used by FM front-end to set MAC's MTU/RTU's in + back-end. + + @Param[in/out] t_FmIpcMacMaxFrameParams Pointer +*//***************************************************************************/ +#define FM_SET_MAC_MAX_FRAME 18 + +/**************************************************************************//** + @Function FM_GET_PHYS_MURAM_BASE + + @Description Used by FM front-end in order to get MURAM base address + + @Param[in/out] t_FmIpcPhysAddr Pointer +*//***************************************************************************/ +#define FM_GET_PHYS_MURAM_BASE 19 + +/**************************************************************************//** + @Function FM_MASTER_IS_ALIVE + + @Description Used by FM front-end in order to verify Master is up + + @Param[in/out] bool +*//***************************************************************************/ +#define FM_MASTER_IS_ALIVE 20 + +#define FM_ENABLE_RAM_ECC 21 +#define FM_DISABLE_RAM_ECC 22 +#define FM_SET_NUM_OF_FMAN_CTRL 23 +#define FM_SET_SIZE_OF_FIFO 24 +#define FM_SET_NUM_OF_TASKS 25 +#define FM_SET_NUM_OF_OPEN_DMAS 26 + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +#define FM_10G_TX_ECC_WA 100 +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + +/***************************************************************************/ +/************************ BACK-END-TO-FRONT-END*****************************/ +/***************************************************************************/ + +/**************************************************************************//** + @Function FM_GUEST_ISR + + @Description Used by FM back-end to report an interrupt to the front-end. + + @Param[out] t_FmIpcIsr Pointer +*//***************************************************************************/ +#define FM_GUEST_ISR 1 + + + +/** @} */ /* end of FM_IPC_grp group */ +/** @} */ /* end of FM_grp group */ + + +#endif /* __FM_IPC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/fm_muram.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/fm_muram.c @@ -0,0 +1,164 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File FM_muram.c + + @Description FM MURAM ... +*//***************************************************************************/ +#include "error_ext.h" +#include "std_ext.h" +#include "mm_ext.h" +#include "string_ext.h" +#include "sprint_ext.h" +#include "fm_muram_ext.h" +#include "fm_common.h" + + +#define __ERR_MODULE__ MODULE_FM_MURAM + + +typedef struct +{ + t_Handle h_Mem; + uintptr_t baseAddr; + uint32_t size; +} t_FmMuram; + + +void FmMuramClear(t_Handle h_FmMuram) +{ + t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram; + + SANITY_CHECK_RETURN(h_FmMuram, E_INVALID_HANDLE); + IOMemSet32(UINT_TO_PTR(p_FmMuram->baseAddr), 0, p_FmMuram->size); +} + + +t_Handle FM_MURAM_ConfigAndInit(uintptr_t baseAddress, uint32_t size) +{ + t_Handle h_Mem; + t_FmMuram *p_FmMuram; + + if (!baseAddress) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("baseAddress 0 is not supported")); + return NULL; + } + + if (baseAddress%4) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("baseAddress not 4 bytes aligned!")); + return NULL; + } + + /* Allocate FM MURAM structure */ + p_FmMuram = (t_FmMuram *) XX_Malloc(sizeof(t_FmMuram)); + if (!p_FmMuram) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM MURAM driver structure")); + return NULL; + } + memset(p_FmMuram, 0, sizeof(t_FmMuram)); + + + if ((MM_Init(&h_Mem, baseAddress, size) != E_OK) || (!h_Mem)) + { + XX_Free(p_FmMuram); + REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-MURAM partition!!!")); + return NULL; + } + + /* Initialize FM MURAM parameters which will be kept by the driver */ + p_FmMuram->baseAddr = baseAddress; + p_FmMuram->size = size; + p_FmMuram->h_Mem = h_Mem; + + return p_FmMuram; +} + +t_Error FM_MURAM_Free(t_Handle h_FmMuram) +{ + t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram; + + if (p_FmMuram->h_Mem) + MM_Free(p_FmMuram->h_Mem); + + XX_Free(h_FmMuram); + + return E_OK; +} + +void * FM_MURAM_AllocMem(t_Handle h_FmMuram, uint32_t size, uint32_t align) +{ + t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram; + uintptr_t addr; + + SANITY_CHECK_RETURN_VALUE(h_FmMuram, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_FmMuram->h_Mem, E_INVALID_HANDLE, NULL); + + addr = (uintptr_t)MM_Get(p_FmMuram->h_Mem, size, align ,"FM MURAM"); + + if (addr == ILLEGAL_BASE) + return NULL; + + return UINT_TO_PTR(addr); +} + +void * FM_MURAM_AllocMemForce(t_Handle h_FmMuram, uint64_t base, uint32_t size) +{ + t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram; + uintptr_t addr; + + SANITY_CHECK_RETURN_VALUE(h_FmMuram, E_INVALID_HANDLE, NULL); + SANITY_CHECK_RETURN_VALUE(p_FmMuram->h_Mem, E_INVALID_HANDLE, NULL); + + addr = (uintptr_t)MM_GetForce(p_FmMuram->h_Mem, base, size, "FM MURAM"); + + if (addr == ILLEGAL_BASE) + return NULL; + + return UINT_TO_PTR(addr); +} + +t_Error FM_MURAM_FreeMem(t_Handle h_FmMuram, void *ptr) +{ + t_FmMuram *p_FmMuram = ( t_FmMuram *)h_FmMuram; + + SANITY_CHECK_RETURN_ERROR(h_FmMuram, E_INVALID_HANDLE); + SANITY_CHECK_RETURN_ERROR(p_FmMuram->h_Mem, E_INVALID_HANDLE); + + if (MM_Put(p_FmMuram->h_Mem, PTR_TO_UINT(ptr)) == 0) + RETURN_ERROR(MINOR, E_INVALID_HANDLE, ("memory pointer!!!")); + + return E_OK; +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/inc/fm_common.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/inc/fm_common.h @@ -0,0 +1,1173 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fm_common.h + + @Description FM internal structures and definitions. +*//***************************************************************************/ +#ifndef __FM_COMMON_H +#define __FM_COMMON_H + +#include "error_ext.h" +#include "std_ext.h" +#include "fm_pcd_ext.h" +#include "fm_port_ext.h" + +#define CLS_PLAN_NUM_PER_GRP 8 + + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/**************************************************************************//** + @Description PCD KG scheme registers +*//***************************************************************************/ +typedef _Packed struct t_FmPcdPlcrInterModuleProfileRegs { + volatile uint32_t fmpl_pemode; /* 0x090 FMPL_PEMODE - FM Policer Profile Entry Mode*/ + volatile uint32_t fmpl_pegnia; /* 0x094 FMPL_PEGNIA - FM Policer Profile Entry GREEN Next Invoked Action*/ + volatile uint32_t fmpl_peynia; /* 0x098 FMPL_PEYNIA - FM Policer Profile Entry YELLOW Next Invoked Action*/ + volatile uint32_t fmpl_pernia; /* 0x09C FMPL_PERNIA - FM Policer Profile Entry RED Next Invoked Action*/ + volatile uint32_t fmpl_pecir; /* 0x0A0 FMPL_PECIR - FM Policer Profile Entry Committed Information Rate*/ + volatile uint32_t fmpl_pecbs; /* 0x0A4 FMPL_PECBS - FM Policer Profile Entry Committed Burst Size*/ + volatile uint32_t fmpl_pepepir_eir; /* 0x0A8 FMPL_PEPIR_EIR - FM Policer Profile Entry Peak/Excess Information Rate*/ + volatile uint32_t fmpl_pepbs_ebs; /* 0x0AC FMPL_PEPBS_EBS - FM Policer Profile Entry Peak/Excess Information Rate*/ + volatile uint32_t fmpl_pelts; /* 0x0B0 FMPL_PELTS - FM Policer Profile Entry Last TimeStamp*/ + volatile uint32_t fmpl_pects; /* 0x0B4 FMPL_PECTS - FM Policer Profile Entry Committed Token Status*/ + volatile uint32_t fmpl_pepts_ets; /* 0x0B8 FMPL_PEPTS_ETS - FM Policer Profile Entry Peak/Excess Token Status*/ + volatile uint32_t fmpl_pegpc; /* 0x0BC FMPL_PEGPC - FM Policer Profile Entry GREEN Packet Counter*/ + volatile uint32_t fmpl_peypc; /* 0x0C0 FMPL_PEYPC - FM Policer Profile Entry YELLOW Packet Counter*/ + volatile uint32_t fmpl_perpc; /* 0x0C4 FMPL_PERPC - FM Policer Profile Entry RED Packet Counter */ + volatile uint32_t fmpl_perypc; /* 0x0C8 FMPL_PERYPC - FM Policer Profile Entry Recolored YELLOW Packet Counter*/ + volatile uint32_t fmpl_perrpc; /* 0x0CC FMPL_PERRPC - FM Policer Profile Entry Recolored RED Packet Counter*/ + volatile uint32_t fmpl_res1[12]; /* 0x0D0-0x0FF Reserved */ +} _PackedType t_FmPcdPlcrInterModuleProfileRegs; + +/**************************************************************************//** + @Description PCD KG scheme registers +*//***************************************************************************/ +typedef _Packed struct t_FmPcdKgInterModuleSchemeRegs { + volatile uint32_t kgse_mode; /**< MODE */ + volatile uint32_t kgse_ekfc; /**< Extract Known Fields Command */ + volatile uint32_t kgse_ekdv; /**< Extract Known Default Value */ + volatile uint32_t kgse_bmch; /**< Bit Mask Command High */ + volatile uint32_t kgse_bmcl; /**< Bit Mask Command Low */ + volatile uint32_t kgse_fqb; /**< Frame Queue Base */ + volatile uint32_t kgse_hc; /**< Hash Command */ + volatile uint32_t kgse_ppc; /**< Policer Profile Command */ + volatile uint32_t kgse_gec[FM_PCD_KG_NUM_OF_GENERIC_REGS]; + /**< Generic Extract Command */ + volatile uint32_t kgse_spc; /**< KeyGen Scheme Entry Statistic Packet Counter */ + volatile uint32_t kgse_dv0; /**< KeyGen Scheme Entry Default Value 0 */ + volatile uint32_t kgse_dv1; /**< KeyGen Scheme Entry Default Value 1 */ + volatile uint32_t kgse_ccbs; /**< KeyGen Scheme Entry Coarse Classification Bit*/ + volatile uint32_t kgse_mv; /**< KeyGen Scheme Entry Match vector */ +} _PackedType t_FmPcdKgInterModuleSchemeRegs; + +typedef _Packed struct t_FmPcdCcCapwapReassmTimeoutParams { + volatile uint32_t portIdAndCapwapReassmTbl; + volatile uint32_t fqidForTimeOutFrames; + volatile uint32_t timeoutRequestTime; +}_PackedType t_FmPcdCcCapwapReassmTimeoutParams; + + + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +typedef struct { + uint8_t baseEntry; + uint16_t numOfClsPlanEntries; + uint32_t vectors[FM_PCD_MAX_NUM_OF_CLS_PLANS]; +} t_FmPcdKgInterModuleClsPlanSet; + +/**************************************************************************//** + @Description Structure for binding a port to keygen schemes. +*//***************************************************************************/ +typedef struct t_FmPcdKgInterModuleBindPortToSchemes { + uint8_t hardwarePortId; + uint8_t netEnvId; + bool useClsPlan; /**< TRUE if this port uses the clsPlan mechanism */ + uint8_t numOfSchemes; + uint8_t schemesIds[FM_PCD_KG_NUM_OF_SCHEMES]; +} t_FmPcdKgInterModuleBindPortToSchemes; + +typedef struct { + uint32_t nextCcNodeInfo; + t_List node; +} t_CcNodeInfo; + +typedef struct +{ + t_Handle h_CcNode; + uint16_t index; + t_List node; +}t_CcNodeInformation; +#define CC_NODE_F_OBJECT(ptr) LIST_OBJECT(ptr, t_CcNodeInformation, node) + +typedef struct +{ + t_Handle h_Manip; + t_List node; +}t_ManipInfo; +#define CC_NEXT_NODE_F_OBJECT(ptr) LIST_OBJECT(ptr, t_CcNodeInfo, node) + +typedef struct { + uint32_t type; + uint8_t prOffset; + + uint16_t dataOffset; + uint8_t poolIndex; + + uint8_t poolIdForManip; + uint8_t numOfTasks; + + uint8_t hardwarePortId; + +} t_GetCcParams; + +typedef struct { + uint32_t type; + int psoSize; + uint32_t nia; + +} t_SetCcParams; + +typedef struct { + t_GetCcParams getCcParams; + t_SetCcParams setCcParams; +} t_FmPortGetSetCcParams; + + +static __inline__ bool TRY_LOCK(t_Handle h_Spinlock, volatile bool *p_Flag) +{ + uint32_t intFlags; + if (h_Spinlock) + intFlags = XX_LockIntrSpinlock(h_Spinlock); + else + intFlags = XX_DisableAllIntr(); + if (*p_Flag) + { + if (h_Spinlock) + XX_UnlockIntrSpinlock(h_Spinlock, intFlags); + else + XX_RestoreAllIntr(intFlags); + return FALSE; + } + *p_Flag = TRUE; + if (h_Spinlock) + XX_UnlockIntrSpinlock(h_Spinlock, intFlags); + else + XX_RestoreAllIntr(intFlags); + return TRUE; +} + +#define RELEASE_LOCK(_flag) _flag = FALSE; + +/**************************************************************************//** + @Collection Defines used for manipulation CC and BMI + @{ +*//***************************************************************************/ +#define INTERNAL_CONTEXT_OFFSET 0x80000000 +#define OFFSET_OF_PR 0x40000000 +#define BUFFER_POOL_ID_FOR_MANIP 0x20000000 +#define NUM_OF_TASKS 0x10000000 +#define OFFSET_OF_DATA 0x08000000 +#define HW_PORT_ID 0x04000000 + + +#define UPDATE_NIA_PNEN 0x80000000 +#define UPDATE_PSO 0x40000000 +#define UPDATE_NIA_PNDN 0x20000000 +#define UPDATE_FMFP_PRC_WITH_ONE_RISC_ONLY 0x10000000 +/* @} */ + +/**************************************************************************//** + @Collection Defines used for manipulation CC and CC + @{ +*//***************************************************************************/ +#define UPDATE_NIA_ENQ_WITHOUT_DMA 0x80000000 +#define UPDATE_CC_WITH_TREE 0x40000000 +#define UPDATE_CC_WITH_DELETE_TREE 0x20000000 +/* @} */ + +/**************************************************************************//** + @Collection Defines used for enabling/disabling FM interrupts + @{ +*//***************************************************************************/ +typedef uint32_t t_FmBlockErrIntrEnable; + +#define ERR_INTR_EN_DMA 0x00010000 +#define ERR_INTR_EN_FPM 0x80000000 +#define ERR_INTR_EN_BMI 0x00800000 +#define ERR_INTR_EN_QMI 0x00400000 +#define ERR_INTR_EN_PRS 0x00200000 +#define ERR_INTR_EN_KG 0x00100000 +#define ERR_INTR_EN_PLCR 0x00080000 +#define ERR_INTR_EN_MURAM 0x00040000 +#define ERR_INTR_EN_IRAM 0x00020000 +#define ERR_INTR_EN_10G_MAC0 0x00008000 +#define ERR_INTR_EN_1G_MAC0 0x00004000 +#define ERR_INTR_EN_1G_MAC1 0x00002000 +#define ERR_INTR_EN_1G_MAC2 0x00001000 +#define ERR_INTR_EN_1G_MAC3 0x00000800 +#define ERR_INTR_EN_1G_MAC4 0x00000400 +#define ERR_INTR_EN_MACSEC_MAC0 0x00000200 + + +typedef uint32_t t_FmBlockIntrEnable; + +#define INTR_EN_BMI 0x80000000 +#define INTR_EN_QMI 0x40000000 +#define INTR_EN_PRS 0x20000000 +#define INTR_EN_KG 0x10000000 +#define INTR_EN_PLCR 0x08000000 +#define INTR_EN_1G_MAC0_TMR 0x00080000 +#define INTR_EN_1G_MAC1_TMR 0x00040000 +#define INTR_EN_1G_MAC2_TMR 0x00020000 +#define INTR_EN_1G_MAC3_TMR 0x00010000 +#define INTR_EN_1G_MAC4_TMR 0x00000040 +#define INTR_EN_REV0 0x00008000 +#define INTR_EN_REV1 0x00004000 +#define INTR_EN_REV2 0x00002000 +#define INTR_EN_REV3 0x00001000 +#define INTR_EN_BRK 0x00000080 +#define INTR_EN_TMR 0x01000000 +#define INTR_EN_MACSEC_MAC0 0x00000001 +/* @} */ + +#define FM_MAX_NUM_OF_PORTS (FM_MAX_NUM_OF_OH_PORTS + \ + FM_MAX_NUM_OF_1G_RX_PORTS + \ + FM_MAX_NUM_OF_10G_RX_PORTS + \ + FM_MAX_NUM_OF_1G_TX_PORTS + \ + FM_MAX_NUM_OF_10G_TX_PORTS) + +#define MODULE_NAME_SIZE 30 +#define DUMMY_PORT_ID 0 + +#define FM_LIODN_OFFSET_MASK 0x3FF + +/**************************************************************************//** + @Description NIA Description +*//***************************************************************************/ +#define NIA_ORDER_RESTOR 0x00800000 +#define NIA_ENG_FM_CTL 0x00000000 +#define NIA_ENG_PRS 0x00440000 +#define NIA_ENG_KG 0x00480000 +#define NIA_ENG_PLCR 0x004C0000 +#define NIA_ENG_BMI 0x00500000 +#define NIA_ENG_QMI_ENQ 0x00540000 +#define NIA_ENG_QMI_DEQ 0x00580000 +#define NIA_ENG_MASK 0x007C0000 + +#define NIA_FM_CTL_AC_CC 0x00000006 +#define NIA_FM_CTL_AC_HC 0x0000000C +#define NIA_FM_CTL_AC_IND_MODE_TX 0x00000008 +#define NIA_FM_CTL_AC_IND_MODE_RX 0x0000000A +#define NIA_FM_CTL_AC_FRAG 0x0000000e +#define NIA_FM_CTL_AC_PRE_FETCH 0x00000010 +#define NIA_FM_CTL_AC_POST_FETCH_PCD 0x00000012 +#define NIA_FM_CTL_AC_POST_FETCH_PCD_UDP_LEN 0x00000018 +#define NIA_FM_CTL_AC_POST_FETCH_NO_PCD 0x00000012 +#define NIA_FM_CTL_AC_FRAG_CHECK 0x00000014 +#define NIA_FM_CTL_AC_MASK 0x0000001f + +#define NIA_BMI_AC_ENQ_FRAME 0x00000002 +#define NIA_BMI_AC_TX_RELEASE 0x000002C0 +#define NIA_BMI_AC_RELEASE 0x000000C0 +#define NIA_BMI_AC_DISCARD 0x000000C1 +#define NIA_BMI_AC_TX 0x00000274 +#define NIA_BMI_AC_FETCH 0x00000208 +#define NIA_BMI_AC_MASK 0x000003FF + +#define NIA_KG_DIRECT 0x00000100 +#define NIA_KG_CC_EN 0x00000200 +#define NIA_PLCR_ABSOLUTE 0x00008000 + +#define NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA 0x00000202 + +/**************************************************************************//** + @Description Port Id defines +*//***************************************************************************/ +#define BASE_OH_PORTID 1 +#define BASE_1G_RX_PORTID 8 +#define BASE_10G_RX_PORTID 0x10 +#define BASE_1G_TX_PORTID 0x28 +#define BASE_10G_TX_PORTID 0x30 + +#define FM_PCD_PORT_OH_BASE_INDX 0 +#define FM_PCD_PORT_1G_RX_BASE_INDX (FM_PCD_PORT_OH_BASE_INDX+FM_MAX_NUM_OF_OH_PORTS) +#define FM_PCD_PORT_10G_RX_BASE_INDX (FM_PCD_PORT_1G_RX_BASE_INDX+FM_MAX_NUM_OF_1G_RX_PORTS) +#define FM_PCD_PORT_1G_TX_BASE_INDX (FM_PCD_PORT_10G_RX_BASE_INDX+FM_MAX_NUM_OF_10G_RX_PORTS) +#define FM_PCD_PORT_10G_TX_BASE_INDX (FM_PCD_PORT_1G_TX_BASE_INDX+FM_MAX_NUM_OF_1G_TX_PORTS) + +#if (FM_MAX_NUM_OF_OH_PORTS > 0) +#define CHECK_PORT_ID_OH_PORTS(_relativePortId) \ + if ((_relativePortId) >= FM_MAX_NUM_OF_OH_PORTS) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal OH_PORT port id")) +#else +#define CHECK_PORT_ID_OH_PORTS(_relativePortId) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal OH_PORT port id")) +#endif +#if (FM_MAX_NUM_OF_1G_RX_PORTS > 0) +#define CHECK_PORT_ID_1G_RX_PORTS(_relativePortId) \ + if ((_relativePortId) >= FM_MAX_NUM_OF_1G_RX_PORTS) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_RX_PORT port id")) +#else +#define CHECK_PORT_ID_1G_RX_PORTS(_relativePortId) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_RX_PORT port id")) +#endif +#if (FM_MAX_NUM_OF_10G_RX_PORTS > 0) +#define CHECK_PORT_ID_10G_RX_PORTS(_relativePortId) \ + if ((_relativePortId) >= FM_MAX_NUM_OF_10G_RX_PORTS) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_RX_PORT port id")) +#else +#define CHECK_PORT_ID_10G_RX_PORTS(_relativePortId) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_RX_PORT port id")) +#endif +#if (FM_MAX_NUM_OF_1G_TX_PORTS > 0) +#define CHECK_PORT_ID_1G_TX_PORTS(_relativePortId) \ + if ((_relativePortId) >= FM_MAX_NUM_OF_1G_TX_PORTS) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_TX_PORT port id")) +#else +#define CHECK_PORT_ID_1G_TX_PORTS(_relativePortId) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 1G_TX_PORT port id")) +#endif +#if (FM_MAX_NUM_OF_10G_TX_PORTS > 0) +#define CHECK_PORT_ID_10G_TX_PORTS(_relativePortId) \ + if ((_relativePortId) >= FM_MAX_NUM_OF_10G_TX_PORTS) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_TX_PORT port id")) +#else +#define CHECK_PORT_ID_10G_TX_PORTS(_relativePortId) \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 10G_TX_PORT port id")) +#endif + + +#define SW_PORT_ID_TO_HW_PORT_ID(_port, _type, _relativePortId) \ +switch(_type) { \ + case(e_FM_PORT_TYPE_OH_OFFLINE_PARSING): \ + case(e_FM_PORT_TYPE_OH_HOST_COMMAND): \ + CHECK_PORT_ID_OH_PORTS(_relativePortId); \ + _port = (uint8_t)(BASE_OH_PORTID + (_relativePortId)); \ + break; \ + case(e_FM_PORT_TYPE_RX): \ + CHECK_PORT_ID_1G_RX_PORTS(_relativePortId); \ + _port = (uint8_t)(BASE_1G_RX_PORTID + (_relativePortId)); \ + break; \ + case(e_FM_PORT_TYPE_RX_10G): \ + CHECK_PORT_ID_10G_RX_PORTS(_relativePortId); \ + _port = (uint8_t)(BASE_10G_RX_PORTID + (_relativePortId)); \ + break; \ + case(e_FM_PORT_TYPE_TX): \ + CHECK_PORT_ID_1G_TX_PORTS(_relativePortId); \ + _port = (uint8_t)(BASE_1G_TX_PORTID + (_relativePortId)); \ + break; \ + case(e_FM_PORT_TYPE_TX_10G): \ + CHECK_PORT_ID_10G_TX_PORTS(_relativePortId); \ + _port = (uint8_t)(BASE_10G_TX_PORTID + (_relativePortId)); \ + break; \ + default: \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal port type")); \ + _port = 0; \ + break; \ +} + +#define HW_PORT_ID_TO_SW_PORT_ID(_relativePortId, hardwarePortId) \ +{ if (((hardwarePortId) >= BASE_OH_PORTID) && \ + ((hardwarePortId) < BASE_OH_PORTID+FM_MAX_NUM_OF_OH_PORTS)) \ + _relativePortId = (uint8_t)((hardwarePortId)-BASE_OH_PORTID); \ + else if (((hardwarePortId) >= BASE_10G_TX_PORTID) && \ + ((hardwarePortId) < BASE_10G_TX_PORTID+FM_MAX_NUM_OF_10G_TX_PORTS)) \ + _relativePortId = (uint8_t)((hardwarePortId)-BASE_10G_TX_PORTID); \ + else if (((hardwarePortId) >= BASE_1G_TX_PORTID) && \ + ((hardwarePortId) < BASE_1G_TX_PORTID+FM_MAX_NUM_OF_1G_TX_PORTS)) \ + _relativePortId = (uint8_t)((hardwarePortId)-BASE_1G_TX_PORTID); \ + else if (((hardwarePortId) >= BASE_10G_RX_PORTID) && \ + ((hardwarePortId) < BASE_10G_RX_PORTID+FM_MAX_NUM_OF_10G_RX_PORTS)) \ + _relativePortId = (uint8_t)((hardwarePortId)-BASE_10G_RX_PORTID); \ + else if (((hardwarePortId) >= BASE_1G_RX_PORTID) && \ + ((hardwarePortId) < BASE_1G_RX_PORTID+FM_MAX_NUM_OF_1G_RX_PORTS)) \ + _relativePortId = (uint8_t)((hardwarePortId)-BASE_1G_RX_PORTID); \ + else { \ + _relativePortId = (uint8_t)DUMMY_PORT_ID; \ + ASSERT_COND(TRUE); \ + } \ +} + +#define HW_PORT_ID_TO_SW_PORT_INDX(swPortIndex, hardwarePortId) \ +do { \ + if (((hardwarePortId) >= BASE_OH_PORTID) && ((hardwarePortId) < BASE_OH_PORTID+FM_MAX_NUM_OF_OH_PORTS)) \ + swPortIndex = (uint8_t)((hardwarePortId)-BASE_OH_PORTID+FM_PCD_PORT_OH_BASE_INDX); \ + else if (((hardwarePortId) >= BASE_1G_RX_PORTID) && \ + ((hardwarePortId) < BASE_1G_RX_PORTID+FM_MAX_NUM_OF_1G_RX_PORTS)) \ + swPortIndex = (uint8_t)((hardwarePortId)-BASE_1G_RX_PORTID+FM_PCD_PORT_1G_RX_BASE_INDX); \ + else if (((hardwarePortId) >= BASE_10G_RX_PORTID) && \ + ((hardwarePortId) < BASE_10G_RX_PORTID+FM_MAX_NUM_OF_10G_RX_PORTS)) \ + swPortIndex = (uint8_t)((hardwarePortId)-BASE_10G_RX_PORTID+FM_PCD_PORT_10G_RX_BASE_INDX); \ + else if (((hardwarePortId) >= BASE_1G_TX_PORTID) && \ + ((hardwarePortId) < BASE_1G_TX_PORTID+FM_MAX_NUM_OF_1G_TX_PORTS)) \ + swPortIndex = (uint8_t)((hardwarePortId)-BASE_1G_TX_PORTID+FM_PCD_PORT_1G_TX_BASE_INDX); \ + else if (((hardwarePortId) >= BASE_10G_TX_PORTID) && \ + ((hardwarePortId) < BASE_10G_TX_PORTID+FM_MAX_NUM_OF_10G_TX_PORTS)) \ + swPortIndex = (uint8_t)((hardwarePortId)-BASE_10G_TX_PORTID+FM_PCD_PORT_10G_TX_BASE_INDX); \ + else ASSERT_COND(FALSE); \ +} while (0) + +#define SW_PORT_INDX_TO_HW_PORT_ID(hardwarePortId, swPortIndex) \ +do { \ + if (((swPortIndex) >= FM_PCD_PORT_OH_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_1G_RX_BASE_INDX)) \ + hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_OH_BASE_INDX+BASE_OH_PORTID); \ + else if (((swPortIndex) >= FM_PCD_PORT_1G_RX_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_10G_RX_BASE_INDX)) \ + hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_1G_RX_BASE_INDX+BASE_1G_RX_PORTID); \ + else if (((swPortIndex) >= FM_PCD_PORT_10G_RX_BASE_INDX) && ((swPortIndex) < FM_MAX_NUM_OF_PORTS)) \ + hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_10G_RX_BASE_INDX+BASE_10G_RX_PORTID); \ + else if (((swPortIndex) >= FM_PCD_PORT_1G_TX_BASE_INDX) && ((swPortIndex) < FM_PCD_PORT_10G_TX_BASE_INDX)) \ + hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_1G_TX_BASE_INDX+BASE_1G_TX_PORTID); \ + else if (((swPortIndex) >= FM_PCD_PORT_10G_TX_BASE_INDX) && ((swPortIndex) < FM_MAX_NUM_OF_PORTS)) \ + hardwarePortId = (uint8_t)((swPortIndex)-FM_PCD_PORT_10G_TX_BASE_INDX+BASE_10G_TX_PORTID); \ + else ASSERT_COND(FALSE); \ +} while (0) + +#define BMI_FIFO_UNITS 0x100 + +typedef struct { + void (*f_Isr) (t_Handle h_Arg); + t_Handle h_SrcHandle; + uint8_t guestId; +} t_FmIntrSrc; + +#define ILLEGAL_HDR_NUM 0xFF +#define NO_HDR_NUM FM_PCD_PRS_NUM_OF_HDRS + +#define IS_PRIVATE_HEADER(hdr) (((hdr) == HEADER_TYPE_USER_DEFINED_SHIM1) || \ + ((hdr) == HEADER_TYPE_USER_DEFINED_SHIM2)) +#define IS_SPECIAL_HEADER(hdr) ((hdr) == HEADER_TYPE_MACSEC) + +#define GET_PRS_HDR_NUM(num, hdr) \ +switch(hdr) \ +{ case(HEADER_TYPE_ETH): num = 0; break; \ + case(HEADER_TYPE_LLC_SNAP): num = 1; break; \ + case(HEADER_TYPE_VLAN): num = 2; break; \ + case(HEADER_TYPE_PPPoE): num = 3; break; \ + case(HEADER_TYPE_MPLS): num = 4; break; \ + case(HEADER_TYPE_IPv4): num = 5; break; \ + case(HEADER_TYPE_IPv6): num = 6; break; \ + case(HEADER_TYPE_GRE): num = 7; break; \ + case(HEADER_TYPE_MINENCAP): num = 8; break; \ + case(HEADER_TYPE_USER_DEFINED_L3): num = 9; break; \ + case(HEADER_TYPE_TCP): num = 10; break; \ + case(HEADER_TYPE_UDP): num = 11; break; \ + case(HEADER_TYPE_IPSEC_AH): \ + case(HEADER_TYPE_IPSEC_ESP): num = 12; break; \ + case(HEADER_TYPE_SCTP): num = 13; break; \ + case(HEADER_TYPE_DCCP): num = 14; break; \ + case(HEADER_TYPE_USER_DEFINED_L4): num = 15; break; \ + case(HEADER_TYPE_USER_DEFINED_SHIM1): \ + case(HEADER_TYPE_USER_DEFINED_SHIM2): \ + case(HEADER_TYPE_MACSEC): \ + num = NO_HDR_NUM; break; \ + default: \ + REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Unsupported header for parser"));\ + num = ILLEGAL_HDR_NUM; break; \ +} + +/***********************************************************************/ +/* Policer defines */ +/***********************************************************************/ +#define FM_PCD_PLCR_PAR_GO 0x80000000 +#define FM_PCD_PLCR_PAR_PWSEL_MASK 0x0000FFFF +#define FM_PCD_PLCR_PAR_R 0x40000000 + +/* shifts */ +#define FM_PCD_PLCR_PAR_PNUM_SHIFT 16 + + +/***********************************************************************/ +/* Keygen defines */ +/***********************************************************************/ +/* maskes */ +#define KG_SCH_PP_SHIFT_HIGH 0x80000000 +#define KG_SCH_PP_NO_GEN 0x10000000 +#define KG_SCH_PP_SHIFT_LOW 0x0000F000 +#define KG_SCH_MODE_NIA_PLCR 0x40000000 +#define KG_SCH_GEN_EXTRACT_TYPE 0x00008000 +#define KG_SCH_BITMASK_MASK 0x000000FF +#define KG_SCH_GEN_VALID 0x80000000 +#define KG_SCH_GEN_MASK 0x00FF0000 +#define FM_PCD_KG_KGAR_ERR 0x20000000 +#define FM_PCD_KG_KGAR_SEL_CLS_PLAN_ENTRY 0x01000000 +#define FM_PCD_KG_KGAR_SEL_PORT_ENTRY 0x02000000 +#define FM_PCD_KG_KGAR_SEL_PORT_WSEL_SP 0x00008000 +#define FM_PCD_KG_KGAR_SEL_PORT_WSEL_CPP 0x00004000 +#define FM_PCD_KG_KGAR_WSEL_MASK 0x0000FF00 +#define KG_SCH_HASH_CONFIG_NO_FQID 0x80000000 +#define KG_SCH_HASH_CONFIG_SYM 0x40000000 + +#define FM_PCD_KG_KGAR_GO 0x80000000 +#define FM_PCD_KG_KGAR_READ 0x40000000 +#define FM_PCD_KG_KGAR_WRITE 0x00000000 +#define FM_PCD_KG_KGAR_SEL_SCHEME_ENTRY 0x00000000 +#define FM_PCD_KG_KGAR_SCHEME_WSEL_UPDATE_CNT 0x00008000 + + +typedef uint32_t t_KnownFieldsMasks; + +#define KG_SCH_KN_PORT_ID 0x80000000 +#define KG_SCH_KN_MACDST 0x40000000 +#define KG_SCH_KN_MACSRC 0x20000000 +#define KG_SCH_KN_TCI1 0x10000000 +#define KG_SCH_KN_TCI2 0x08000000 +#define KG_SCH_KN_ETYPE 0x04000000 +#define KG_SCH_KN_PPPSID 0x02000000 +#define KG_SCH_KN_PPPID 0x01000000 +#define KG_SCH_KN_MPLS1 0x00800000 +#define KG_SCH_KN_MPLS2 0x00400000 +#define KG_SCH_KN_MPLS_LAST 0x00200000 +#define KG_SCH_KN_IPSRC1 0x00100000 +#define KG_SCH_KN_IPDST1 0x00080000 +#define KG_SCH_KN_PTYPE1 0x00040000 +#define KG_SCH_KN_IPTOS_TC1 0x00020000 +#define KG_SCH_KN_IPV6FL1 0x00010000 +#define KG_SCH_KN_IPSRC2 0x00008000 +#define KG_SCH_KN_IPDST2 0x00004000 +#define KG_SCH_KN_PTYPE2 0x00002000 +#define KG_SCH_KN_IPTOS_TC2 0x00001000 +#define KG_SCH_KN_IPV6FL2 0x00000800 +#define KG_SCH_KN_GREPTYPE 0x00000400 +#define KG_SCH_KN_IPSEC_SPI 0x00000200 +#define KG_SCH_KN_IPSEC_NH 0x00000100 +#define KG_SCH_KN_L4PSRC 0x00000004 +#define KG_SCH_KN_L4PDST 0x00000002 +#define KG_SCH_KN_TFLG 0x00000001 + +typedef uint8_t t_GenericCodes; + +#define KG_SCH_GEN_SHIM1 0x70 +#define KG_SCH_GEN_DEFAULT 0x10 +#define KG_SCH_GEN_PARSE_RESULT_N_FQID 0x20 +#define KG_SCH_GEN_START_OF_FRM 0x40 +#define KG_SCH_GEN_SHIM2 0x71 +#define KG_SCH_GEN_IP_PID_NO_V 0x72 +#define KG_SCH_GEN_ETH 0x03 +#define KG_SCH_GEN_ETH_NO_V 0x73 +#define KG_SCH_GEN_SNAP 0x04 +#define KG_SCH_GEN_SNAP_NO_V 0x74 +#define KG_SCH_GEN_VLAN1 0x05 +#define KG_SCH_GEN_VLAN1_NO_V 0x75 +#define KG_SCH_GEN_VLAN2 0x06 +#define KG_SCH_GEN_VLAN2_NO_V 0x76 +#define KG_SCH_GEN_ETH_TYPE 0x07 +#define KG_SCH_GEN_ETH_TYPE_NO_V 0x77 +#define KG_SCH_GEN_PPP 0x08 +#define KG_SCH_GEN_PPP_NO_V 0x78 +#define KG_SCH_GEN_MPLS1 0x09 +#define KG_SCH_GEN_MPLS2 0x19 +#define KG_SCH_GEN_MPLS3 0x29 +#define KG_SCH_GEN_MPLS1_NO_V 0x79 +#define KG_SCH_GEN_MPLS_LAST 0x0a +#define KG_SCH_GEN_MPLS_LAST_NO_V 0x7a +#define KG_SCH_GEN_IPV4 0x0b +#define KG_SCH_GEN_IPV6 0x1b +#define KG_SCH_GEN_L3_NO_V 0x7b +#define KG_SCH_GEN_IPV4_TUNNELED 0x0c +#define KG_SCH_GEN_IPV6_TUNNELED 0x1c +#define KG_SCH_GEN_MIN_ENCAP 0x2c +#define KG_SCH_GEN_IP2_NO_V 0x7c +#define KG_SCH_GEN_GRE 0x0d +#define KG_SCH_GEN_GRE_NO_V 0x7d +#define KG_SCH_GEN_TCP 0x0e +#define KG_SCH_GEN_UDP 0x1e +#define KG_SCH_GEN_IPSEC_AH 0x2e +#define KG_SCH_GEN_SCTP 0x3e +#define KG_SCH_GEN_DCCP 0x4e +#define KG_SCH_GEN_IPSEC_ESP 0x6e +#define KG_SCH_GEN_L4_NO_V 0x7e +#define KG_SCH_GEN_NEXTHDR 0x7f + +/* shifts */ +#define KG_SCH_PP_SHIFT_HIGH_SHIFT 27 +#define KG_SCH_PP_SHIFT_LOW_SHIFT 12 +#define KG_SCH_PP_MASK_SHIFT 16 +#define KG_SCH_MODE_CCOBASE_SHIFT 24 +#define KG_SCH_DEF_MAC_ADDR_SHIFT 30 +#define KG_SCH_DEF_TCI_SHIFT 28 +#define KG_SCH_DEF_ENET_TYPE_SHIFT 26 +#define KG_SCH_DEF_PPP_SESSION_ID_SHIFT 24 +#define KG_SCH_DEF_PPP_PROTOCOL_ID_SHIFT 22 +#define KG_SCH_DEF_MPLS_LABEL_SHIFT 20 +#define KG_SCH_DEF_IP_ADDR_SHIFT 18 +#define KG_SCH_DEF_PROTOCOL_TYPE_SHIFT 16 +#define KG_SCH_DEF_IP_TOS_TC_SHIFT 14 +#define KG_SCH_DEF_IPV6_FLOW_LABEL_SHIFT 12 +#define KG_SCH_DEF_IPSEC_SPI_SHIFT 10 +#define KG_SCH_DEF_L4_PORT_SHIFT 8 +#define KG_SCH_DEF_TCP_FLAG_SHIFT 6 +#define KG_SCH_HASH_CONFIG_SHIFT_SHIFT 24 +#define KG_SCH_GEN_MASK_SHIFT 16 +#define KG_SCH_GEN_HT_SHIFT 8 +#define KG_SCH_GEN_SIZE_SHIFT 24 +#define KG_SCH_GEN_DEF_SHIFT 29 +#define FM_PCD_KG_KGAR_NUM_SHIFT 16 + + +/* others */ +#define NUM_OF_SW_DEFAULTS 3 +#define MAX_PP_SHIFT 15 +#define MAX_KG_SCH_SIZE 16 +#define MASK_FOR_GENERIC_BASE_ID 0x20 +#define MAX_HASH_SHIFT 40 +#define MAX_KG_SCH_FQID_BIT_OFFSET 31 +#define MAX_KG_SCH_PP_BIT_OFFSET 15 +#define MAX_DIST_FQID_SHIFT 23 + +#define GET_MASK_SEL_SHIFT(shift,i) \ +switch(i) { \ + case(0):shift = 26;break; \ + case(1):shift = 20;break; \ + case(2):shift = 10;break; \ + case(3):shift = 4;break; \ + default: \ + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);\ +} + +#define GET_MASK_OFFSET_SHIFT(shift,i) \ +switch(i) { \ + case(0):shift = 16;break; \ + case(1):shift = 0;break; \ + case(2):shift = 28;break; \ + case(3):shift = 24;break; \ + default: \ + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);\ +} + +#define GET_MASK_SHIFT(shift,i) \ +switch(i) { \ + case(0):shift = 24;break; \ + case(1):shift = 16;break; \ + case(2):shift = 8;break; \ + case(3):shift = 0;break; \ + default: \ + RETURN_ERROR(MAJOR, E_INVALID_VALUE, NO_MSG);\ +} + +#define FM_PCD_MAX_NUM_OF_OPTIONS(clsPlanEntries) ((clsPlanEntries==256)? 8:((clsPlanEntries==128)? 7: ((clsPlanEntries==64)? 6: ((clsPlanEntries==32)? 5:0)))) + +typedef struct { + uint16_t num; + uint8_t hardwarePortId; + uint16_t plcrProfilesBase; +} t_FmPortPcdInterModulePlcrParams; + +/**************************************************************************//** + @Description A structure for initializing a keygen classification plan group +*//***************************************************************************/ +typedef struct t_FmPcdKgInterModuleClsPlanGrpParams { + uint8_t netEnvId; /* IN */ + bool grpExists; /* OUT (unused in FmPcdKgBuildClsPlanGrp)*/ + uint8_t clsPlanGrpId; /* OUT */ + bool emptyClsPlanGrp; /* OUT */ + uint8_t numOfOptions; /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/ + protocolOpt_t options[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)]; + /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/ + uint32_t optVectors[FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)]; + /* OUT in FmPcdGetSetClsPlanGrpParams IN in FmPcdKgBuildClsPlanGrp*/ +} t_FmPcdKgInterModuleClsPlanGrpParams; + +typedef struct t_FmInterModulePortRxPoolsParams +{ + uint8_t numOfPools; + uint16_t secondLargestBufSize; + uint16_t largestBufSize; +} t_FmInterModulePortRxPoolsParams; + + +typedef t_Error (t_FmPortGetSetCcParamsCallback) (t_Handle h_FmPort, + t_FmPortGetSetCcParams *p_FmPortGetSetCcParams); + + +t_Handle FmPcdGetHcHandle(t_Handle h_FmPcd); +uint32_t FmPcdGetSwPrsOffset(t_Handle h_FmPcd, e_NetHeaderType hdr, uint8_t indexPerHdr); +uint32_t FmPcdGetLcv(t_Handle h_FmPcd, uint32_t netEnvId, uint8_t hdrNum); +uint32_t FmPcdGetMacsecLcv(t_Handle h_FmPcd, uint32_t netEnvId); +void FmPcdIncNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId); +void FmPcdDecNetEnvOwners(t_Handle h_FmPcd, uint8_t netEnvId); +void FmPcdPortRegister(t_Handle h_FmPcd, t_Handle h_FmPort, uint8_t hardwarePortId); +uint32_t FmPcdLock(t_Handle h_FmPcd); +void FmPcdUnlock(t_Handle h_FmPcd, uint32_t intFlags); +bool FmPcdNetEnvIsHdrExist(t_Handle h_FmPcd, uint8_t netEnvId, e_NetHeaderType hdr); +bool FmPcdIsIpFrag(t_Handle h_FmPcd, uint8_t netEnvId); + +t_Error FmPcdCcReleaseModifiedDataStructure(t_Handle h_FmPcd, t_List *h_FmPcdOldPointersLst, t_List *h_FmPcdNewPointersLst, uint16_t numOfGoodChanges, t_Handle *h_Params); +uint32_t FmPcdCcGetNodeAddrOffset(t_Handle h_FmPcd, t_Handle h_Pointer); +t_Error FmPcdCcRemoveKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint8_t keyIndex, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_AdditionalParams); +t_Error FmPcdCcAddKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPCdCcKeyParams, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_Params); +t_Error FmPcdCcModifyKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_AdditionalParams); +t_Error FmPcdCcModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPcdCcKeyParams, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_AdditionalParams); +t_Error FmPcdCcModifyMissNextEngineParamNode(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,t_List *h_OldPointer, t_List *h_NewPointer,t_Handle *h_AdditionalParams); +t_Error FmPcdCcModifyNextEngineParamTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, t_List *h_OldLst, t_List *h_NewLst, t_Handle *h_AdditionalParams); +t_Error FmPcdCcModiyNextEngineParamNode(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, uint8_t keyIndex,t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams,t_List *h_OldPointer, t_List *h_NewPointer,t_Handle *h_AdditionalParams); +uint32_t FmPcdCcGetNodeAddrOffsetFromNodeInfo(t_Handle h_FmPcd, t_Handle h_Pointer); +t_Error FmPcdCcTreeTryLock(t_Handle h_FmPcdCcTree); +t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd,t_Handle h_FmPcdCcNode, t_List *p_List); +void FmPcdCcTreeReleaseLock(t_Handle h_FmPcdCcTree); +void FmPcdCcNodeTreeReleaseLock(t_List *p_List); +t_Handle FmPcdCcTreeGetSavedManipParams(t_Handle h_FmTree, uint8_t manipIndx); +void FmPcdCcTreeSetSavedManipParams(t_Handle h_FmTree, t_Handle h_SavedManipParams, uint8_t manipIndx); + +bool FmPcdKgIsSchemeValidSw(t_Handle h_FmPcd, uint8_t schemeId); +uint8_t FmPcdKgGetClsPlanGrpBase(t_Handle h_FmPcd, uint8_t clsPlanGrp); +uint16_t FmPcdKgGetClsPlanGrpSize(t_Handle h_FmPcd, uint8_t clsPlanGrp); + +t_Error FmPcdKgBuildScheme(t_Handle h_FmPcd, t_FmPcdKgSchemeParams *p_Scheme, t_FmPcdKgInterModuleSchemeRegs *p_SchemeRegs); +t_Error FmPcdKgBuildClsPlanGrp(t_Handle h_FmPcd, t_FmPcdKgInterModuleClsPlanGrpParams *p_Grp, t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet); +uint8_t FmPcdKgGetNumOfPartitionSchemes(t_Handle h_FmPcd); +uint8_t FmPcdKgGetPhysicalSchemeId(t_Handle h_FmPcd, uint8_t schemeId); +uint8_t FmPcdKgGetRelativeSchemeId(t_Handle h_FmPcd, uint8_t schemeId); +void FmPcdKgDestroyClsPlanGrp(t_Handle h_FmPcd, uint8_t grpId); +void FmPcdKgValidateSchemeSw(t_Handle h_FmPcd, uint8_t schemeId); +void FmPcdKgInvalidateSchemeSw(t_Handle h_FmPcd, uint8_t schemeId); +t_Error FmPcdKgCheckInvalidateSchemeSw(t_Handle h_FmPcd, uint8_t schemeId); +t_Error FmPcdKgBuildBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPortToSchemes, uint32_t *p_SpReg, bool add); +void FmPcdKgIncSchemeOwners(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort); +void FmPcdKgDecSchemeOwners(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_BindPort); +bool FmPcdKgIsDriverClsPlan(t_Handle h_FmPcd); +bool FmPcdKgHwSchemeIsValid(uint32_t schemeModeReg); +uint32_t FmPcdKgBuildCppReg(t_Handle h_FmPcd, uint8_t clsPlanGrpId); +uint32_t FmPcdKgBuildWriteSchemeActionReg(uint8_t schemeId, bool updateCounter); +uint32_t FmPcdKgBuildReadSchemeActionReg(uint8_t schemeId); +uint32_t FmPcdKgBuildWriteClsPlanBlockActionReg(uint8_t grpId); +uint32_t FmPcdKgBuildReadClsPlanBlockActionReg(uint8_t grpId); +uint32_t FmPcdKgBuildWritePortSchemeBindActionReg(uint8_t hardwarePortId); +uint32_t FmPcdKgBuildReadPortSchemeBindActionReg(uint8_t hardwarePortId); +uint32_t FmPcdKgBuildWritePortClsPlanBindActionReg(uint8_t hardwarePortId); +uint8_t FmPcdKgGetSchemeSwId(t_Handle h_FmPcd, uint8_t schemeHwId); +t_Error FmPcdKgSchemeTryLock(t_Handle h_FmPcd, uint8_t schemeId, bool intr); +void FmPcdKgReleaseSchemeLock(t_Handle h_FmPcd, uint8_t schemeId); +void FmPcdKgUpatePointedOwner(t_Handle h_FmPcd, uint8_t schemeId, bool add); + +t_Error FmPcdKgBindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind); +t_Error FmPcdKgUnbindPortToSchemes(t_Handle h_FmPcd , t_FmPcdKgInterModuleBindPortToSchemes *p_SchemeBind); +uint32_t FmPcdKgGetRequiredAction(t_Handle h_FmPcd, uint8_t schemeId); +uint32_t FmPcdKgGetPointedOwners(t_Handle h_FmPcd, uint8_t schemeId); +e_FmPcdDoneAction FmPcdKgGetDoneAction(t_Handle h_FmPcd, uint8_t schemeId); +e_FmPcdEngine FmPcdKgGetNextEngine(t_Handle h_FmPcd, uint8_t schemeId); +void FmPcdKgUpdateRequiredAction(t_Handle h_FmPcd, uint8_t schemeId, uint32_t requiredAction); +bool FmPcdKgIsDirectPlcr(t_Handle h_FmPcd, uint8_t schemeId); +bool FmPcdKgIsDistrOnPlcrProfile(t_Handle h_FmPcd, uint8_t schemeId); +uint16_t FmPcdKgGetRelativeProfileId(t_Handle h_FmPcd, uint8_t schemeId); + +/* FM-PCD parser API routines */ +t_Error FmPcdPrsIncludePortInStatistics(t_Handle p_FmPcd, uint8_t hardwarePortId, bool include); + +/* FM-PCD policer API routines */ +t_Error FmPcdPlcrAllocProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId, uint16_t numOfProfiles); +t_Error FmPcdPlcrFreeProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId); +bool FmPcdPlcrIsProfileValid(t_Handle h_FmPcd, uint16_t absoluteProfileId); +uint16_t FmPcdPlcrGetPortProfilesBase(t_Handle h_FmPcd, uint8_t hardwarePortId); +uint16_t FmPcdPlcrGetPortNumOfProfiles(t_Handle h_FmPcd, uint8_t hardwarePortId); +uint32_t FmPcdPlcrBuildWritePlcrActionRegs(uint16_t absoluteProfileId); +uint32_t FmPcdPlcrBuildCounterProfileReg(e_FmPcdPlcrProfileCounters counter); +uint32_t FmPcdPlcrBuildWritePlcrActionReg(uint16_t absoluteProfileId); +uint32_t FmPcdPlcrBuildReadPlcrActionReg(uint16_t absoluteProfileId); +t_Error FmPcdPlcrBuildProfile(t_Handle h_FmPcd, t_FmPcdPlcrProfileParams *p_Profile, t_FmPcdPlcrInterModuleProfileRegs *p_PlcrRegs); +t_Error FmPcdPlcrGetAbsoluteProfileId(t_Handle h_FmPcd, + e_FmPcdProfileTypeSelection profileType, + t_Handle h_FmPort, + uint16_t relativeProfile, + uint16_t *p_AbsoluteId); +void FmPcdPlcrInvalidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId); +void FmPcdPlcrValidateProfileSw(t_Handle h_FmPcd, uint16_t absoluteProfileId); +bool FmPcdPlcrHwProfileIsValid(uint32_t profileModeReg); +t_Error FmPcdPlcrProfileTryLock(t_Handle h_FmPcd, uint16_t profileId, bool intr); +void FmPcdPlcrReleaseProfileLock(t_Handle h_FmPcd, uint16_t profileId); +uint32_t FmPcdPlcrGetRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId); +uint32_t FmPcdPlcrGetPointedOwners(t_Handle h_FmPcd, uint16_t absoluteProfileId); +void FmPcdPlcrUpatePointedOwner(t_Handle h_FmPcd, uint16_t absoluteProfileId, bool add); +uint32_t FmPcdPlcrBuildNiaProfileReg(bool green, bool yellow, bool red); +void FmPcdPlcrUpdateRequiredAction(t_Handle h_FmPcd, uint16_t absoluteProfileId, uint32_t requiredAction); + +/* FM-PCD Coarse-Classification API routines */ +uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode); +uint8_t FmPcdCcGetOffset(t_Handle h_CcNode); + +t_Error FmPcdManipUpdate(t_Handle h_FmPcd, t_Handle h_FmPort, t_Handle h_Manip, t_Handle h_Ad, bool validate, int level, t_Handle h_FmTree, bool modify); +t_Error FmPortGetSetCcParams(t_Handle h_FmPort, t_FmPortGetSetCcParams *p_FmPortGetSetCcParams); +uint32_t FmPcdManipGetRequiredAction (t_Handle h_Manip); +t_Error FmPcdCcBindTree(t_Handle h_FmPcd, t_Handle h_CcTree, uint32_t *p_Offset,t_Handle h_FmPort); +t_Error FmPcdCcUnbindTree(t_Handle h_FmPcd, t_Handle h_CcTree); + +t_Error FmPcdPlcrCcGetSetParams(t_Handle h_FmPcd, uint16_t profileIndx,uint32_t requiredAction); +t_Error FmPcdKgCcGetSetParams(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t requiredAction); + +uint8_t FmPortGetNetEnvId(t_Handle h_FmPort); +uint8_t FmPortGetHardwarePortId(t_Handle h_FmPort); +uint32_t FmPortGetPcdEngines(t_Handle h_FmPort); +void FmPortPcdKgSwUnbindClsPlanGrp (t_Handle h_FmPort); +t_Error FmPortAttachPCD(t_Handle h_FmPort); +t_Error FmPcdKgSetOrBindToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t netEnvId, protocolOpt_t *p_OptArray, uint8_t *p_ClsPlanGrpId, bool *p_IsEmptyClsPlanGrp); +t_Error FmPcdKgDeleteOrUnbindPortToClsPlanGrp(t_Handle h_FmPcd, uint8_t hardwarePortId, uint8_t clsPlanGrpId); + + +/**************************************************************************//** + @Function FmRegisterIntr + + @Description Used to register an inter-module event handler to be processed by FM + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] mod The module that causes the event + @Param[in] modId Module id - if more than 1 instansiation of this + mode exists,0 otherwise. + @Param[in] intrType Interrupt type (error/normal) selection. + @Param[in] f_Isr The interrupt service routine. + @Param[in] h_Arg Argument to be passed to f_Isr. + + @Return None. +*//***************************************************************************/ +void FmRegisterIntr(t_Handle h_Fm, + e_FmEventModules mod, + uint8_t modId, + e_FmIntrType intrType, + void (*f_Isr) (t_Handle h_Arg), + t_Handle h_Arg); + +/**************************************************************************//** + @Function FmUnregisterIntr + + @Description Used to un-register an inter-module event handler that was processed by FM + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] mod The module that causes the event + @Param[in] modId Module id - if more than 1 instansiation of this + mode exists,0 otherwise. + @Param[in] intrType Interrupt type (error/normal) selection. + + @Return None. +*//***************************************************************************/ +void FmUnregisterIntr(t_Handle h_Fm, + e_FmEventModules mod, + uint8_t modId, + e_FmIntrType intrType); + +/**************************************************************************//** + @Function FmRegisterFmCtlIntr + + @Description Used to register to one of the fmCtl events in the FM module + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] eventRegId FmCtl event id (0-7). + @Param[in] f_Isr The interrupt service routine. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +void FmRegisterFmCtlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Fm, uint32_t event)); + + +/**************************************************************************//** + @Description enum for defining MAC types +*//***************************************************************************/ +typedef enum e_FmMacType { + e_FM_MAC_10G = 0, /**< 10G MAC */ + e_FM_MAC_1G /**< 1G MAC */ +} e_FmMacType; + +/**************************************************************************//** + @Description Structure for port-FM communication during FM_PORT_Init. + Fields commented 'IN' are passed by the port module to be used + by the FM module. + Fields commented 'OUT' will be filled by FM before returning to port. + Some fields are optional (depending on configuration) and + will be analized by the port and FM modules accordingly. +*//***************************************************************************/ +typedef struct t_FmInterModulePortInitParams { + uint8_t hardwarePortId; /**< IN. port Id */ + e_FmPortType portType; /**< IN. Port type */ + bool independentMode; /**< IN. TRUE if FM Port operates in independent mode */ + uint16_t liodnOffset; /**< IN. Port's requested resource */ + uint8_t numOfTasks; /**< IN. Port's requested resource */ + uint8_t numOfExtraTasks; /**< IN. Port's requested resource */ + uint8_t numOfOpenDmas; /**< IN. Port's requested resource */ + uint8_t numOfExtraOpenDmas; /**< IN. Port's requested resource */ + uint32_t sizeOfFifo; /**< IN. Port's requested resource */ + uint32_t extraSizeOfFifo; /**< IN. Port's requested resource */ + uint8_t deqPipelineDepth; /**< IN. Port's requested resource */ + uint16_t liodnBase; /**< IN. Irrelevant for P4080 rev 1. + LIODN base for this port, to be + used together with LIODN offset. */ + t_FmPhysAddr fmMuramPhysBaseAddr;/**< OUT. FM-MURAM physical address*/ +} t_FmInterModulePortInitParams; + +/**************************************************************************//** + @Description Structure for port-FM communication during FM_PORT_Free. +*//***************************************************************************/ +typedef struct t_FmInterModulePortFreeParams { + uint8_t hardwarePortId; /**< IN. port Id */ + e_FmPortType portType; /**< IN. Port type */ +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + uint8_t deqPipelineDepth; /**< IN. Port's requested resource */ +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ +} t_FmInterModulePortFreeParams; + +/**************************************************************************//** + @Function FmGetPcdPrsBaseAddr + + @Description Get the base address of the Parser from the FM module + + @Param[in] h_Fm A handle to an FM Module. + + @Return Base address. +*//***************************************************************************/ +uintptr_t FmGetPcdPrsBaseAddr(t_Handle h_Fm); + +/**************************************************************************//** + @Function FmGetPcdKgBaseAddr + + @Description Get the base address of the Keygen from the FM module + + @Param[in] h_Fm A handle to an FM Module. + + @Return Base address. +*//***************************************************************************/ +uintptr_t FmGetPcdKgBaseAddr(t_Handle h_Fm); + +/**************************************************************************//** + @Function FmGetPcdPlcrBaseAddr + + @Description Get the base address of the Policer from the FM module + + @Param[in] h_Fm A handle to an FM Module. + + @Return Base address. +*//***************************************************************************/ +uintptr_t FmGetPcdPlcrBaseAddr(t_Handle h_Fm); + +/**************************************************************************//** + @Function FmGetMuramHandle + + @Description Get the handle of the MURAM from the FM module + + @Param[in] h_Fm A handle to an FM Module. + + @Return MURAM module handle. +*//***************************************************************************/ +t_Handle FmGetMuramHandle(t_Handle h_Fm); + +/**************************************************************************//** + @Function FmGetPhysicalMuramBase + + @Description Get the physical base address of the MURAM from the FM module + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] fmPhysAddr Physical MURAM base + + @Return Physical base address. +*//***************************************************************************/ +void FmGetPhysicalMuramBase(t_Handle h_Fm, t_FmPhysAddr *fmPhysAddr); + +/**************************************************************************//** + @Function FmGetTimeStampScale + + @Description Used internally by other modules in order to get the timeStamp + period as requested by the application. + + @Param[in] h_Fm A handle to an FM Module. + + @Return TimeStamp period in nanoseconds. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +uint32_t FmGetTimeStampScale(t_Handle h_Fm); + +/**************************************************************************//** + @Function FmResumeStalledPort + + @Description Used internally by FM port to release a stalled port. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] hardwarePortId HW port id. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FmResumeStalledPort(t_Handle h_Fm, uint8_t hardwarePortId); + +/**************************************************************************//** + @Function FmIsPortStalled + + @Description Used internally by FM port to read the port's status. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] hardwarePortId HW port id. + @Param[in] p_IsStalled A pointer to the boolean port stalled state + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FmIsPortStalled(t_Handle h_Fm, uint8_t hardwarePortId, bool *p_IsStalled); + +/**************************************************************************//** + @Function FmResetMac + + @Description Used by MAC driver to reset the MAC registers + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] type MAC type. + @Param[in] macId MAC id - according to type. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FmResetMac(t_Handle h_Fm, e_FmMacType type, uint8_t macId); + +/**************************************************************************//** + @Function FmGetClockFreq + + @Description Used by MAC driver to get the FM clock frequency + + @Param[in] h_Fm A handle to an FM Module. + + @Return clock-freq on success; 0 otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +uint16_t FmGetClockFreq(t_Handle h_Fm); + +/**************************************************************************//** + @Function FmGetId + + @Description Used by PCD driver to read rhe FM id + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +uint8_t FmGetId(t_Handle h_Fm); + +/**************************************************************************//** + @Function FmGetSetPortParams + + @Description Used by FM-PORT driver to pass and receive parameters between + PORT and FM modules. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in,out] p_PortParams A structure of FM Port parameters. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FmGetSetPortParams(t_Handle h_Fm,t_FmInterModulePortInitParams *p_PortParams); + +/**************************************************************************//** + @Function FmFreePortParams + + @Description Used by FM-PORT driver to free port's resources within the FM. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in,out] p_PortParams A structure of FM Port parameters. + + @Return None. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +void FmFreePortParams(t_Handle h_Fm,t_FmInterModulePortFreeParams *p_PortParams); + +/**************************************************************************//** + @Function FmSetPortToWorkWithOneRiscOnly + + @Description Used by FM-PORT driver to pass parameter between + PORT and FM modules for working with number of RISC.. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in,out] p_PortParams A structure of FM Port parameters. + + @Return None. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FmSetNumOfRiscsPerPort(t_Handle h_Fm, uint8_t hardwarePortId, uint8_t numOfFmanCtrls); + + +void FmRegisterPcd(t_Handle h_Fm, t_Handle h_FmPcd); +void FmUnregisterPcd(t_Handle h_Fm); +t_Handle FmGetPcdHandle(t_Handle h_Fm); +bool FmRamsEccIsExternalCtl(t_Handle h_Fm); +t_Error FmEnableRamsEcc(t_Handle h_Fm); +t_Error FmDisableRamsEcc(t_Handle h_Fm); +void FmGetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo); +t_Error FmAllocFmanCtrlEventReg(t_Handle h_Fm, uint8_t *p_EventId); +void FmFreeFmanCtrlEventReg(t_Handle h_Fm, uint8_t eventId); +void FmSetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, uint32_t enableEvents); +uint32_t FmGetFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId); +void FmRegisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId, void (*f_Isr) (t_Handle h_Fm, uint32_t event), t_Handle h_Arg); +void FmUnregisterFmanCtrlIntr(t_Handle h_Fm, uint8_t eventRegId); +t_Error FmSetMacMaxFrame(t_Handle h_Fm, e_FmMacType type, uint8_t macId, uint16_t mtu); +bool FmIsMaster(t_Handle h_Fm); +uint8_t FmGetGuestId(t_Handle h_Fm); +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +t_Error Fm10GTxEccWorkaround(t_Handle h_Fm, uint8_t macId); +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ + +void FmMuramClear(t_Handle h_FmMuram); +t_Error FmSetNumOfOpenDmas(t_Handle h_Fm, + uint8_t hardwarePortId, + uint8_t numOfOpenDmas, + uint8_t numOfExtraOpenDmas, + bool initialConfig); +t_Error FmSetNumOfTasks(t_Handle h_Fm, + uint8_t hardwarePortId, + uint8_t numOfTasks, + uint8_t numOfExtraTasks, + bool initialConfig); +t_Error FmSetSizeOfFifo(t_Handle h_Fm, + uint8_t hardwarePortId, + e_FmPortType portType, + bool independentMode, + uint32_t *p_SizeOfFifo, + uint32_t extraSizeOfFifo, + uint8_t deqPipelineDepth, + t_FmInterModulePortRxPoolsParams *p_RxPoolsParams, + bool initialConfig); + + +#endif /* __FM_COMMON_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/Peripherals/FM/inc/fm_hc.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/Peripherals/FM/inc/fm_hc.h @@ -0,0 +1,86 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __FM_HC_H +#define __FM_HC_H + +#include "std_ext.h" +#include "error_ext.h" + + +#define __ERR_MODULE__ MODULE_FM_PCD + + +typedef struct t_FmHcParams { + t_Handle h_Fm; + t_Handle h_FmPcd; + t_FmPcdHcParams params; +} t_FmHcParams; + + +t_Handle FmHcConfigAndInit(t_FmHcParams *p_FmHcParams); +void FmHcFree(t_Handle h_FmHc); +t_Error FmHcDumpRegs(t_Handle h_FmHc); + +void FmHcTxConf(t_Handle h_FmHc, t_DpaaFD *p_Fd); + +t_Handle FmHcPcdKgSetScheme(t_Handle h_FmHc, t_FmPcdKgSchemeParams *p_Scheme); +t_Error FmHcPcdKgDeleteScheme(t_Handle h_FmHc, t_Handle h_Scheme); +t_Error FmHcPcdCcCapwapTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcCapwapReassmTimeoutParams *p_CcCapwapReassmTimeoutParams ); +t_Error FmHcPcdKgSetClsPlan(t_Handle h_FmHc, t_FmPcdKgInterModuleClsPlanSet *p_Set); +t_Error FmHcPcdKgDeleteClsPlan(t_Handle h_FmHc, uint8_t clsPlanGrpId); + +t_Error FmHcPcdKgSetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t value); +uint32_t FmHcPcdKgGetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme); + +t_Error FmHcPcdCcModifyTreeNextEngine(t_Handle h_FmHc, t_Handle h_CcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams); +t_Error FmHcPcdCcModifyNodeNextEngine(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams); +t_Error FmHcPcdCcModifyNodeMissNextEngine(t_Handle h_FmHc, t_Handle h_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams); +t_Error FmHcPcdCcRemoveKey(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex); +t_Error FmHcPcdCcAddKey(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams); +t_Error FmHcPcdCcModifyKeyAndNextEngine(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams); +t_Error FmHcPcdCcModifyKey(t_Handle h_FmHc, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask); + +t_Handle FmHcPcdPlcrSetProfile(t_Handle h_FmHc,t_FmPcdPlcrProfileParams *p_Profile); +t_Error FmHcPcdPlcrDeleteProfile(t_Handle h_FmHc, t_Handle h_Profile); + +t_Error FmHcPcdPlcrSetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value); +uint32_t FmHcPcdPlcrGetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter); + +t_Error FmHcKgWriteSp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t spReg, bool add); +t_Error FmHcKgWriteCpp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t cppReg); + +t_Error FmHcPcdKgCcGetSetParams(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t requiredAction); +t_Error FmHcPcdPlcrCcGetSetParams(t_Handle h_FmHc,uint16_t absoluteProfileId, uint32_t requiredAction); + + +#endif /* __FM_HC_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/etc/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/etc/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +obj-y += fsl-ncsw-etc.o + +fsl-ncsw-etc-objs := mm.o memcpy.o sprint.o list.o error.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/etc/error.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/etc/error.c @@ -0,0 +1,118 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + + @File error.c + + @Description General errors and events reporting utilities. +*//***************************************************************************/ + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + +const char *dbgLevelStrings[] = +{ + "CRITICAL" + ,"MAJOR" + ,"MINOR" + ,"WARNING" + ,"INFO" + ,"TRACE" +}; + +const char *errTypeStrings[] = +{ + "Invalid State" /* E_INVALID_STATE */ + ,"Invalid Operation" /* E_INVALID_OPERATION */ + ,"Unsupported Operation" /* E_NOT_SUPPORTED */ + ,"No Device" /* E_NO_DEVICE */ + ,"Invalid Handle" /* E_INVALID_HANDLE */ + ,"Invalid ID" /* E_INVALID_ID */ + ,"Unexpected NULL Pointer" /* E_NULL_POINTER */ + ,"Invalid Value" /* E_INVALID_VALUE */ + ,"Invalid Selection" /* E_INVALID_SELECTION */ + ,"Invalid Communication Mode" /* E_INVALID_COMM_MODE */ + ,"Invalid Byte Order" /* E_INVALID_BYTE_ORDER */ + ,"Invalid Memory Type" /* E_INVALID_MEMORY_TYPE */ + ,"Invalid Interrupt Queue" /* E_INVALID_INTR_QUEUE */ + ,"Invalid Priority" /* E_INVALID_PRIORITY */ + ,"Invalid Clock" /* E_INVALID_CLOCK */ + ,"Invalid Rate" /* E_INVALID_RATE */ + ,"Invalid Address" /* E_INVALID_ADDRESS */ + ,"Invalid Bus" /* E_INVALID_BUS */ + ,"Conflict In Bus Selection" /* E_BUS_CONFLICT */ + ,"Conflict In Settings" /* E_CONFLICT */ + ,"Incorrect Alignment" /* E_NOT_ALIGNED */ + ,"Value Out Of Range" /* E_NOT_IN_RANGE */ + ,"Invalid Frame" /* E_INVALID_FRAME */ + ,"Frame Is Empty" /* E_EMPTY_FRAME */ + ,"Buffer Is Empty" /* E_EMPTY_BUFFER */ + ,"Memory Allocation Failed" /* E_NO_MEMORY */ + ,"Resource Not Found" /* E_NOT_FOUND */ + ,"Resource Is Unavailable" /* E_NOT_AVAILABLE */ + ,"Resource Already Exists" /* E_ALREADY_EXISTS */ + ,"Resource Is Full" /* E_FULL */ + ,"Resource Is Empty" /* E_EMPTY */ + ,"Resource Is Busy" /* E_BUSY */ + ,"Resource Already Free" /* E_ALREADY_FREE */ + ,"Read Access Failed" /* E_READ_FAILED */ + ,"Write Access Failed" /* E_WRITE_FAILED */ + ,"Send Operation Failed" /* E_SEND_FAILED */ + ,"Receive Operation Failed" /* E_RECEIVE_FAILED */ + ,"Operation Timed Out" /* E_TIMEOUT */ +}; + + +#if (defined(REPORT_EVENTS) && (REPORT_EVENTS > 0)) + +const char *eventStrings[] = +{ + "Rx Discard" /* EV_RX_DISCARD */ + ,"Rx Error" /* EV_RX_ERROR */ + ,"Tx Error" /* EV_TX_ERROR */ + ,"No Buffer Objects" /* EV_NO_BUFFERS */ + ,"No MB-Frame Objects" /* EV_NO_MB_FRAMES */ + ,"No SB-Frame Objects" /* EV_NO_SB_FRAMES */ + ,"Tx Queue Is Full" /* EV_TX_QUEUE_FULL */ + ,"Rx Queue Is Full" /* EV_RX_QUEUE_FULL */ + ,"Interrupts Queue Is Full" /* EV_INTR_QUEUE_FULL */ + ,"Data Buffer Is Unavailable" /* EV_NO_DATA_BUFFER */ + ,"Objects Pool Is Empty" /* EV_OBJ_POOL_EMPTY */ + ,"Illegal bus access" /* EV_BUS_ERROR */ + ,"PTP Tx Timestamps Queue Is Full" /* EV_PTP_TXTS_QUEUE_FULL */ + ,"PTP Rx Timestamps Queue Is Full" /* EV_PTP_RXTS_QUEUE_FULL */ +}; + +#endif /* (defined(REPORT_EVENTS) && (REPORT_EVENTS > 0)) */ + +#endif /* (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) */ + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/etc/list.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/etc/list.c @@ -0,0 +1,70 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + + @File list.c + + @Description Implementation of list. +*//***************************************************************************/ +#include "std_ext.h" +#include "list_ext.h" + + +void LIST_Append(t_List *p_NewList, t_List *p_Head) +{ + t_List *p_First = LIST_FIRST(p_NewList); + + if (p_First != p_NewList) + { + t_List *p_Last = LIST_LAST(p_NewList); + t_List *p_Cur = LIST_NEXT(p_Head); + + LIST_PREV(p_First) = p_Head; + LIST_FIRST(p_Head) = p_First; + LIST_NEXT(p_Last) = p_Cur; + LIST_LAST(p_Cur) = p_Last; + } +} + + +int LIST_NumOfObjs(t_List *p_List) +{ + t_List *p_Tmp; + int numOfObjs = 0; + + if (!LIST_IsEmpty(p_List)) + LIST_FOR_EACH(p_Tmp, p_List) + numOfObjs++; + + return numOfObjs; +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/etc/memcpy.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/etc/memcpy.c @@ -0,0 +1,665 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#include "std_ext.h" +#include "xx_ext.h" +#include "memcpy_ext.h" + + +#ifdef CORE_8BIT_ACCESS_ERRATA +static void MY_MY_WRITE_UINT8(uint8_t *addr, uint8_t val) +{ + uint32_t newAddr, newVal; + newAddr = (uint32_t)addr & ~0x3L; + switch ((uint32_t)addr%4) + { + case (0): + newVal = GET_UINT32(*(uint32_t*)newAddr); + newVal = (newVal & 0x00ffffff) | (((uint32_t)val)<<24); + WRITE_UINT32(*(uint32_t*)newAddr, newVal); + break; + case (1): + newVal = GET_UINT32(*(uint32_t*)newAddr); + newVal = (newVal & 0xff00ffff) | (((uint32_t)val)<<16); + WRITE_UINT32(*(uint32_t*)newAddr, newVal); + break; + case (2): + newVal = GET_UINT32(*(uint32_t*)newAddr); + newVal = (newVal & 0xffff00ff) | (((uint32_t)val)<<8); + WRITE_UINT32(*(uint32_t*)newAddr, newVal); + break; + case (3): + newVal = GET_UINT32(*(uint32_t*)newAddr); + newVal = (newVal & 0xffffff00) | val; + WRITE_UINT32(*(uint32_t*)newAddr, newVal); + break; + } +} + +static uint8_t MY_MY_GET_UINT8(uint8_t *addr) +{ + uint32_t newAddr, newVal=0; + newAddr = (uint32_t)addr & ~0x3L; + switch ((uint32_t)addr%4) + { + case (0): + newVal = GET_UINT32(*(uint32_t*)newAddr); + newVal = (newVal & 0xff000000)>>24; + break; + case (1): + newVal = GET_UINT32(*(uint32_t*)newAddr); + newVal = (newVal & 0x00ff0000)>>16; + break; + case (2): + newVal = GET_UINT32(*(uint32_t*)newAddr); + newVal = (newVal & 0x0000ff00)>>8; + break; + case (3): + newVal = GET_UINT32(*(uint32_t*)newAddr); + newVal = (newVal & 0x000000ff); + break; + } + + return (uint8_t)newVal; +} + +#define MY_WRITE_UINT8(addr,val) MY_MY_WRITE_UINT8(&addr,val) +#define MY_GET_UINT8(addr) MY_MY_GET_UINT8(&addr) +#else +#define MY_WRITE_UINT8 WRITE_UINT8 +#define MY_GET_UINT8 GET_UINT8 +#endif /* CORE_8BIT_ACCESS_ERRATA */ + + +void * MemCpy32(void* pDst,void* pSrc, uint32_t size) +{ + uint32_t leftAlign; + uint32_t rightAlign; + uint32_t lastWord; + uint32_t currWord; + uint32_t *p_Src32; + uint32_t *p_Dst32; + uint8_t *p_Src8; + uint8_t *p_Dst8; + + p_Src8 = (uint8_t*)(pSrc); + p_Dst8 = (uint8_t*)(pDst); + /* first copy byte by byte till the source first alignment + * this step is necessary to ensure we do not even try to access + * data which is before the source buffer, hence it is not ours. + */ + while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */ + { + *p_Dst8++ = *p_Src8++; + size--; + } + + /* align destination (possibly disaligning source)*/ + while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ + { + *p_Dst8++ = *p_Src8++; + size--; + } + + /* dest is aligned and source is not necessarily aligned */ + leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */ + rightAlign = 32 - leftAlign; + + + if (leftAlign == 0) + { + /* source is also aligned */ + p_Src32 = (uint32_t*)(p_Src8); + p_Dst32 = (uint32_t*)(p_Dst8); + while (size >> 2) /* size >= 4 */ + { + *p_Dst32++ = *p_Src32++; + size -= 4; + } + p_Src8 = (uint8_t*)(p_Src32); + p_Dst8 = (uint8_t*)(p_Dst32); + } + else + { + /* source is not aligned (destination is aligned)*/ + p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3)); + p_Dst32 = (uint32_t*)(p_Dst8); + lastWord = *p_Src32++; + while(size >> 3) /* size >= 8 */ + { + currWord = *p_Src32; + *p_Dst32 = (lastWord << leftAlign) | (currWord >> rightAlign); + lastWord = currWord; + p_Src32++; + p_Dst32++; + size -= 4; + } + p_Dst8 = (uint8_t*)(p_Dst32); + p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3); + } + + /* complete the left overs */ + while (size--) + *p_Dst8++ = *p_Src8++; + + return pDst; +} + +void * IO2IOCpy32(void* pDst,void* pSrc, uint32_t size) +{ + uint32_t leftAlign; + uint32_t rightAlign; + uint32_t lastWord; + uint32_t currWord; + uint32_t *p_Src32; + uint32_t *p_Dst32; + uint8_t *p_Src8; + uint8_t *p_Dst8; + + p_Src8 = (uint8_t*)(pSrc); + p_Dst8 = (uint8_t*)(pDst); + /* first copy byte by byte till the source first alignment + * this step is necessary to ensure we do not even try to access + * data which is before the source buffer, hence it is not ours. + */ + while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */ + { + MY_WRITE_UINT8(*p_Dst8, MY_GET_UINT8(*p_Src8)); + p_Dst8++;p_Src8++; + size--; + } + + /* align destination (possibly disaligning source)*/ + while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ + { + MY_WRITE_UINT8(*p_Dst8, MY_GET_UINT8(*p_Src8)); + p_Dst8++;p_Src8++; + size--; + } + + /* dest is aligned and source is not necessarily aligned */ + leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */ + rightAlign = 32 - leftAlign; + + if (leftAlign == 0) + { + /* source is also aligned */ + p_Src32 = (uint32_t*)(p_Src8); + p_Dst32 = (uint32_t*)(p_Dst8); + while (size >> 2) /* size >= 4 */ + { + WRITE_UINT32(*p_Dst32, GET_UINT32(*p_Src32)); + p_Dst32++;p_Src32++; + size -= 4; + } + p_Src8 = (uint8_t*)(p_Src32); + p_Dst8 = (uint8_t*)(p_Dst32); + } + else + { + /* source is not aligned (destination is aligned)*/ + p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3)); + p_Dst32 = (uint32_t*)(p_Dst8); + lastWord = GET_UINT32(*p_Src32); + p_Src32++; + while(size >> 3) /* size >= 8 */ + { + currWord = GET_UINT32(*p_Src32); + WRITE_UINT32(*p_Dst32, (lastWord << leftAlign) | (currWord >> rightAlign)); + lastWord = currWord; + p_Src32++;p_Dst32++; + size -= 4; + } + p_Dst8 = (uint8_t*)(p_Dst32); + p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3); + } + + /* complete the left overs */ + while (size--) + { + MY_WRITE_UINT8(*p_Dst8, MY_GET_UINT8(*p_Src8)); + p_Dst8++;p_Src8++; + } + + return pDst; +} + +void * Mem2IOCpy32(void* pDst,void* pSrc, uint32_t size) +{ + uint32_t leftAlign; + uint32_t rightAlign; + uint32_t lastWord; + uint32_t currWord; + uint32_t *p_Src32; + uint32_t *p_Dst32; + uint8_t *p_Src8; + uint8_t *p_Dst8; + + p_Src8 = (uint8_t*)(pSrc); + p_Dst8 = (uint8_t*)(pDst); + /* first copy byte by byte till the source first alignment + * this step is necessary to ensure we do not even try to access + * data which is before the source buffer, hence it is not ours. + */ + while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */ + { + MY_WRITE_UINT8(*p_Dst8, *p_Src8); + p_Dst8++;p_Src8++; + size--; + } + + /* align destination (possibly disaligning source)*/ + while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ + { + MY_WRITE_UINT8(*p_Dst8, *p_Src8); + p_Dst8++;p_Src8++; + size--; + } + + /* dest is aligned and source is not necessarily aligned */ + leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */ + rightAlign = 32 - leftAlign; + + if (leftAlign == 0) + { + /* source is also aligned */ + p_Src32 = (uint32_t*)(p_Src8); + p_Dst32 = (uint32_t*)(p_Dst8); + while (size >> 2) /* size >= 4 */ + { + WRITE_UINT32(*p_Dst32, *p_Src32); + p_Dst32++;p_Src32++; + size -= 4; + } + p_Src8 = (uint8_t*)(p_Src32); + p_Dst8 = (uint8_t*)(p_Dst32); + } + else + { + /* source is not aligned (destination is aligned)*/ + p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3)); + p_Dst32 = (uint32_t*)(p_Dst8); + lastWord = *p_Src32++; + while(size >> 3) /* size >= 8 */ + { + currWord = *p_Src32; + WRITE_UINT32(*p_Dst32, (lastWord << leftAlign) | (currWord >> rightAlign)); + lastWord = currWord; + p_Src32++;p_Dst32++; + size -= 4; + } + p_Dst8 = (uint8_t*)(p_Dst32); + p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3); + } + + /* complete the left overs */ + while (size--) + { + MY_WRITE_UINT8(*p_Dst8, *p_Src8); + p_Dst8++;p_Src8++; + } + + return pDst; +} + +void * IO2MemCpy32(void* pDst,void* pSrc, uint32_t size) +{ + uint32_t leftAlign; + uint32_t rightAlign; + uint32_t lastWord; + uint32_t currWord; + uint32_t *p_Src32; + uint32_t *p_Dst32; + uint8_t *p_Src8; + uint8_t *p_Dst8; + + p_Src8 = (uint8_t*)(pSrc); + p_Dst8 = (uint8_t*)(pDst); + /* first copy byte by byte till the source first alignment + * this step is necessary to ensure we do not even try to access + * data which is before the source buffer, hence it is not ours. + */ + while((PTR_TO_UINT(p_Src8) & 3) && size) /* (pSrc mod 4) > 0 and size > 0 */ + { + *p_Dst8 = MY_GET_UINT8(*p_Src8); + p_Dst8++;p_Src8++; + size--; + } + + /* align destination (possibly disaligning source)*/ + while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ + { + *p_Dst8 = MY_GET_UINT8(*p_Src8); + p_Dst8++;p_Src8++; + size--; + } + + /* dest is aligned and source is not necessarily aligned */ + leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 3) << 3); /* leftAlign = (pSrc mod 4)*8 */ + rightAlign = 32 - leftAlign; + + if (leftAlign == 0) + { + /* source is also aligned */ + p_Src32 = (uint32_t*)(p_Src8); + p_Dst32 = (uint32_t*)(p_Dst8); + while (size >> 2) /* size >= 4 */ + { + *p_Dst32 = GET_UINT32(*p_Src32); + p_Dst32++;p_Src32++; + size -= 4; + } + p_Src8 = (uint8_t*)(p_Src32); + p_Dst8 = (uint8_t*)(p_Dst32); + } + else + { + /* source is not aligned (destination is aligned)*/ + p_Src32 = (uint32_t*)(p_Src8 - (leftAlign >> 3)); + p_Dst32 = (uint32_t*)(p_Dst8); + lastWord = GET_UINT32(*p_Src32); + p_Src32++; + while(size >> 3) /* size >= 8 */ + { + currWord = GET_UINT32(*p_Src32); + *p_Dst32 = (lastWord << leftAlign) | (currWord >> rightAlign); + lastWord = currWord; + p_Src32++;p_Dst32++; + size -= 4; + } + p_Dst8 = (uint8_t*)(p_Dst32); + p_Src8 = (uint8_t*)(p_Src32) - 4 + (leftAlign >> 3); + } + + /* complete the left overs */ + while (size--) + { + *p_Dst8 = MY_GET_UINT8(*p_Src8); + p_Dst8++;p_Src8++; + } + + return pDst; +} + +void * MemCpy64(void* pDst,void* pSrc, uint32_t size) +{ + uint32_t leftAlign; + uint32_t rightAlign; + uint64_t lastWord; + uint64_t currWord; + uint64_t *pSrc64; + uint64_t *pDst64; + uint8_t *p_Src8; + uint8_t *p_Dst8; + + p_Src8 = (uint8_t*)(pSrc); + p_Dst8 = (uint8_t*)(pDst); + /* first copy byte by byte till the source first alignment + * this step is necessarily to ensure we do not even try to access + * data which is before the source buffer, hence it is not ours. + */ + while((PTR_TO_UINT(p_Src8) & 7) && size) /* (pSrc mod 8) > 0 and size > 0 */ + { + *p_Dst8++ = *p_Src8++; + size--; + } + + /* align destination (possibly disaligning source)*/ + while((PTR_TO_UINT(p_Dst8) & 7) && size) /* (pDst mod 8) > 0 and size > 0 */ + { + *p_Dst8++ = *p_Src8++; + size--; + } + + /* dest is aligned and source is not necessarily aligned */ + leftAlign = (uint32_t)((PTR_TO_UINT(p_Src8) & 7) << 3); /* leftAlign = (pSrc mod 8)*8 */ + rightAlign = 64 - leftAlign; + + + if (leftAlign == 0) + { + /* source is also aligned */ + pSrc64 = (uint64_t*)(p_Src8); + pDst64 = (uint64_t*)(p_Dst8); + while (size >> 3) /* size >= 8 */ + { + *pDst64++ = *pSrc64++; + size -= 8; + } + p_Src8 = (uint8_t*)(pSrc64); + p_Dst8 = (uint8_t*)(pDst64); + } + else + { + /* source is not aligned (destination is aligned)*/ + pSrc64 = (uint64_t*)(p_Src8 - (leftAlign >> 3)); + pDst64 = (uint64_t*)(p_Dst8); + lastWord = *pSrc64++; + while(size >> 4) /* size >= 16 */ + { + currWord = *pSrc64; + *pDst64 = (lastWord << leftAlign) | (currWord >> rightAlign); + lastWord = currWord; + pSrc64++; + pDst64++; + size -= 8; + } + p_Dst8 = (uint8_t*)(pDst64); + p_Src8 = (uint8_t*)(pSrc64) - 8 + (leftAlign >> 3); + } + + /* complete the left overs */ + while (size--) + *p_Dst8++ = *p_Src8++; + + return pDst; +} + +void * MemSet32(void* pDst, uint8_t val, uint32_t size) +{ + uint32_t val32; + uint32_t *p_Dst32; + uint8_t *p_Dst8; + + p_Dst8 = (uint8_t*)(pDst); + + /* generate four 8-bit val's in 32-bit container */ + val32 = (uint32_t) val; + val32 |= (val32 << 8); + val32 |= (val32 << 16); + + /* align destination to 32 */ + while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ + { + *p_Dst8++ = val; + size--; + } + + /* 32-bit chunks */ + p_Dst32 = (uint32_t*)(p_Dst8); + while (size >> 2) /* size >= 4 */ + { + *p_Dst32++ = val32; + size -= 4; + } + + /* complete the leftovers */ + p_Dst8 = (uint8_t*)(p_Dst32); + while (size--) + *p_Dst8++ = val; + + return pDst; +} + +void * IOMemSet32(void* pDst, uint8_t val, uint32_t size) +{ + uint32_t val32; + uint32_t *p_Dst32; + uint8_t *p_Dst8; + + p_Dst8 = (uint8_t*)(pDst); + + /* generate four 8-bit val's in 32-bit container */ + val32 = (uint32_t) val; + val32 |= (val32 << 8); + val32 |= (val32 << 16); + + /* align destination to 32 */ + while((PTR_TO_UINT(p_Dst8) & 3) && size) /* (pDst mod 4) > 0 and size > 0 */ + { + MY_WRITE_UINT8(*p_Dst8, val); + p_Dst8++; + size--; + } + + /* 32-bit chunks */ + p_Dst32 = (uint32_t*)(p_Dst8); + while (size >> 2) /* size >= 4 */ + { + WRITE_UINT32(*p_Dst32, val32); + p_Dst32++; + size -= 4; + } + + /* complete the leftovers */ + p_Dst8 = (uint8_t*)(p_Dst32); + while (size--) + { + MY_WRITE_UINT8(*p_Dst8, val); + p_Dst8++; + } + + return pDst; +} + +void * MemSet64(void* pDst, uint8_t val, uint32_t size) +{ + uint64_t val64; + uint64_t *pDst64; + uint8_t *p_Dst8; + + p_Dst8 = (uint8_t*)(pDst); + + /* generate four 8-bit val's in 32-bit container */ + val64 = (uint64_t) val; + val64 |= (val64 << 8); + val64 |= (val64 << 16); + val64 |= (val64 << 24); + val64 |= (val64 << 32); + + /* align destination to 64 */ + while((PTR_TO_UINT(p_Dst8) & 7) && size) /* (pDst mod 8) > 0 and size > 0 */ + { + *p_Dst8++ = val; + size--; + } + + /* 64-bit chunks */ + pDst64 = (uint64_t*)(p_Dst8); + while (size >> 4) /* size >= 8 */ + { + *pDst64++ = val64; + size -= 8; + } + + /* complete the leftovers */ + p_Dst8 = (uint8_t*)(pDst64); + while (size--) + *p_Dst8++ = val; + + return pDst; +} + +void MemDisp(uint8_t *p, int size) +{ + uint32_t space = (uint32_t)(PTR_TO_UINT(p) & 0x3); + uint8_t *p_Limit; + + if (space) + { + p_Limit = (p - space + 4); + + XX_Print("0x%08X: ", (p - space)); + + while (space--) + { + XX_Print("--"); + } + while (size && (p < p_Limit)) + { + XX_Print("%02x", *(uint8_t*)p); + size--; + p++; + } + + XX_Print(" "); + p_Limit += 12; + + while ((size > 3) && (p < p_Limit)) + { + XX_Print("%08x ", *(uint32_t*)p); + size -= 4; + p += 4; + } + XX_Print("\r\n"); + } + + while (size > 15) + { + XX_Print("0x%08X: %08x %08x %08x %08x\r\n", + p, *(uint32_t *)p, *(uint32_t *)(p + 4), + *(uint32_t *)(p + 8), *(uint32_t *)(p + 12)); + size -= 16; + p += 16; + } + + if (size) + { + XX_Print("0x%08X: ", p); + + while (size > 3) + { + XX_Print("%08x ", *(uint32_t *)p); + size -= 4; + p += 4; + } + while (size) + { + XX_Print("%02x", *(uint8_t *)p); + size--; + p++; + } + + XX_Print("\r\n"); + } +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/etc/mm.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/etc/mm.c @@ -0,0 +1,1109 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "string_ext.h" +#include "error_ext.h" +#include "std_ext.h" +#include "sprint_ext.h" +#include "part_ext.h" +#include "xx_ext.h" + +#include "mm.h" + + + + +/********************************************************************** + * MM internal routines set * + **********************************************************************/ + +/**************************************************************** + * Routine: CreateBusyBlock + * + * Description: + * Initializes a new busy block of "size" bytes and started + * rom "base" address. Each busy block has a name that + * specified the purpose of the memory allocation. + * + * Arguments: + * base - base address of the busy block + * size - size of the busy block + * name - name that specified the busy block + * + * Return value: + * A pointer to new created structure returned on success; + * Otherwise, NULL. + ****************************************************************/ +static t_BusyBlock * CreateBusyBlock(uint64_t base, uint64_t size, char *name) +{ + t_BusyBlock *p_BusyBlock; + uint32_t n; + + p_BusyBlock = (t_BusyBlock *)XX_Malloc(sizeof(t_BusyBlock)); + if ( !p_BusyBlock ) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + return NULL; + } + + p_BusyBlock->base = base; + p_BusyBlock->end = base + size; + + n = strlen(name); + if (n >= MM_MAX_NAME_LEN) + n = MM_MAX_NAME_LEN - 1; + strncpy(p_BusyBlock->name, name, MM_MAX_NAME_LEN-1); + p_BusyBlock->name[n] = '\0'; + p_BusyBlock->p_Next = 0; + + return p_BusyBlock; +} + +/**************************************************************** + * Routine: CreateNewBlock + * + * Description: + * Initializes a new memory block of "size" bytes and started + * from "base" address. + * + * Arguments: + * base - base address of the memory block + * size - size of the memory block + * + * Return value: + * A pointer to new created structure returned on success; + * Otherwise, NULL. + ****************************************************************/ +static t_MemBlock * CreateNewBlock(uint64_t base, uint64_t size) +{ + t_MemBlock *p_MemBlock; + + p_MemBlock = (t_MemBlock *)XX_Malloc(sizeof(t_MemBlock)); + if ( !p_MemBlock ) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + return NULL; + } + + p_MemBlock->base = base; + p_MemBlock->end = base+size; + p_MemBlock->p_Next = 0; + + return p_MemBlock; +} + +/**************************************************************** + * Routine: CreateFreeBlock + * + * Description: + * Initializes a new free block of of "size" bytes and + * started from "base" address. + * + * Arguments: + * base - base address of the free block + * size - size of the free block + * + * Return value: + * A pointer to new created structure returned on success; + * Otherwise, NULL. + ****************************************************************/ +static t_FreeBlock * CreateFreeBlock(uint64_t base, uint64_t size) +{ + t_FreeBlock *p_FreeBlock; + + p_FreeBlock = (t_FreeBlock *)XX_Malloc(sizeof(t_FreeBlock)); + if ( !p_FreeBlock ) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + return NULL; + } + + p_FreeBlock->base = base; + p_FreeBlock->end = base + size; + p_FreeBlock->p_Next = 0; + + return p_FreeBlock; +} + +/**************************************************************** + * Routine: AddFree + * + * Description: + * Adds a new free block to the free lists. It updates each + * free list to include a new free block. + * Note, that all free block in each free list are ordered + * by their base address. + * + * Arguments: + * p_MM - pointer to the MM object + * base - base address of a given free block + * end - end address of a given free block + * + * Return value: + * + * + ****************************************************************/ +static t_Error AddFree(t_MM *p_MM, uint64_t base, uint64_t end) +{ + t_FreeBlock *p_PrevB, *p_CurrB, *p_NewB; + uint64_t alignment; + uint64_t alignBase; + int i; + + /* Updates free lists to include a just released block */ + for (i=0; i <= MM_MAX_ALIGNMENT; i++) + { + p_PrevB = p_NewB = 0; + p_CurrB = p_MM->freeBlocks[i]; + + alignment = (uint64_t)(0x1 << i); + alignBase = MAKE_ALIGNED(base, alignment); + + /* Goes to the next free list if there is no block to free */ + if (alignBase >= end) + continue; + + /* Looks for a free block that should be updated */ + while ( p_CurrB ) + { + if ( alignBase <= p_CurrB->end ) + { + if ( end > p_CurrB->end ) + { + t_FreeBlock *p_NextB; + while ( p_CurrB->p_Next && end > p_CurrB->p_Next->end ) + { + p_NextB = p_CurrB->p_Next; + p_CurrB->p_Next = p_CurrB->p_Next->p_Next; + XX_Free(p_NextB); + } + + p_NextB = p_CurrB->p_Next; + if ( !p_NextB || (p_NextB && end < p_NextB->base) ) + { + p_CurrB->end = end; + } + else + { + p_CurrB->end = p_NextB->end; + p_CurrB->p_Next = p_NextB->p_Next; + XX_Free(p_NextB); + } + } + else if ( (end < p_CurrB->base) && ((end-alignBase) >= alignment) ) + { + if ((p_NewB = CreateFreeBlock(alignBase, end-alignBase)) == NULL) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + + p_NewB->p_Next = p_CurrB; + if (p_PrevB) + p_PrevB->p_Next = p_NewB; + else + p_MM->freeBlocks[i] = p_NewB; + break; + } + + if ((alignBase < p_CurrB->base) && (end >= p_CurrB->base)) + { + p_CurrB->base = alignBase; + } + + /* if size of the free block is less then alignment + * deletes that free block from the free list. */ + if ( (p_CurrB->end - p_CurrB->base) < alignment) + { + if ( p_PrevB ) + p_PrevB->p_Next = p_CurrB->p_Next; + else + p_MM->freeBlocks[i] = p_CurrB->p_Next; + XX_Free(p_CurrB); + } + break; + } + else + { + p_PrevB = p_CurrB; + p_CurrB = p_CurrB->p_Next; + } + } + + /* If no free block found to be updated, insert a new free block + * to the end of the free list. + */ + if ( !p_CurrB && ((((uint64_t)(end-base)) & ((uint64_t)(alignment-1))) == 0) ) + { + if ((p_NewB = CreateFreeBlock(alignBase, end-base)) == NULL) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + + if (p_PrevB) + p_PrevB->p_Next = p_NewB; + else + p_MM->freeBlocks[i] = p_NewB; + } + + /* Update boundaries of the new free block */ + if ((alignment == 1) && !p_NewB) + { + if ( p_CurrB && base > p_CurrB->base ) + base = p_CurrB->base; + if ( p_CurrB && end < p_CurrB->end ) + end = p_CurrB->end; + } + } + + return (E_OK); +} + +/**************************************************************** + * Routine: CutFree + * + * Description: + * Cuts a free block from holdBase to holdEnd from the free lists. + * That is, it updates all free lists of the MM object do + * not include a block of memory from holdBase to holdEnd. + * For each free lists it seek for a free block that holds + * either holdBase or holdEnd. If such block is found it updates it. + * + * Arguments: + * p_MM - pointer to the MM object + * holdBase - base address of the allocated block + * holdEnd - end address of the allocated block + * + * Return value: + * E_OK is returned on success, + * otherwise returns an error code. + * + ****************************************************************/ +static t_Error CutFree(t_MM *p_MM, uint64_t holdBase, uint64_t holdEnd) +{ + t_FreeBlock *p_PrevB, *p_CurrB, *p_NewB; + uint64_t alignBase, base, end; + uint64_t alignment; + int i; + + for (i=0; i <= MM_MAX_ALIGNMENT; i++) + { + p_PrevB = p_NewB = 0; + p_CurrB = p_MM->freeBlocks[i]; + + alignment = (uint64_t)(0x1 << i); + alignBase = MAKE_ALIGNED(holdEnd, alignment); + + while ( p_CurrB ) + { + base = p_CurrB->base; + end = p_CurrB->end; + + if ( (holdBase <= base) && (holdEnd <= end) && (holdEnd > base) ) + { + if ( alignBase >= end || + (alignBase < end && ((end-alignBase) < alignment)) ) + { + if (p_PrevB) + p_PrevB->p_Next = p_CurrB->p_Next; + else + p_MM->freeBlocks[i] = p_CurrB->p_Next; + XX_Free(p_CurrB); + } + else + { + p_CurrB->base = alignBase; + } + break; + } + else if ( (holdBase > base) && (holdEnd <= end) ) + { + if ( (holdBase-base) >= alignment ) + { + if ( (alignBase < end) && ((end-alignBase) >= alignment) ) + { + if ((p_NewB = CreateFreeBlock(alignBase, end-alignBase)) == NULL) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + p_NewB->p_Next = p_CurrB->p_Next; + p_CurrB->p_Next = p_NewB; + } + p_CurrB->end = holdBase; + } + else if ( (alignBase < end) && ((end-alignBase) >= alignment) ) + { + p_CurrB->base = alignBase; + } + else + { + if (p_PrevB) + p_PrevB->p_Next = p_CurrB->p_Next; + else + p_MM->freeBlocks[i] = p_CurrB->p_Next; + XX_Free(p_CurrB); + } + break; + } + else + { + p_PrevB = p_CurrB; + p_CurrB = p_CurrB->p_Next; + } + } + } + + return (E_OK); +} + +/**************************************************************** + * Routine: AddBusy + * + * Description: + * Adds a new busy block to the list of busy blocks. Note, + * that all busy blocks are ordered by their base address in + * the busy list. + * + * Arguments: + * MM - handler to the MM object + * p_NewBusyB - pointer to the a busy block + * + * Return value: + * None. + * + ****************************************************************/ +static void AddBusy(t_MM *p_MM, t_BusyBlock *p_NewBusyB) +{ + t_BusyBlock *p_CurrBusyB, *p_PrevBusyB; + + /* finds a place of a new busy block in the list of busy blocks */ + p_PrevBusyB = 0; + p_CurrBusyB = p_MM->busyBlocks; + + while ( p_CurrBusyB && p_NewBusyB->base > p_CurrBusyB->base ) + { + p_PrevBusyB = p_CurrBusyB; + p_CurrBusyB = p_CurrBusyB->p_Next; + } + + /* insert the new busy block into the list of busy blocks */ + if ( p_CurrBusyB ) + p_NewBusyB->p_Next = p_CurrBusyB; + if ( p_PrevBusyB ) + p_PrevBusyB->p_Next = p_NewBusyB; + else + p_MM->busyBlocks = p_NewBusyB; +} + +/**************************************************************** + * Routine: CutBusy + * + * Description: + * Cuts a block from base to end from the list of busy blocks. + * This is done by updating the list of busy blocks do not + * include a given block, that block is going to be free. If a + * given block is a part of some other busy block, so that + * busy block is updated. If there are number of busy blocks + * included in the given block, so all that blocks are removed + * from the busy list and the end blocks are updated. + * If the given block devides some block into two parts, a new + * busy block is added to the busy list. + * + * Arguments: + * p_MM - pointer to the MM object + * base - base address of a given busy block + * end - end address of a given busy block + * + * Return value: + * E_OK on success, E_NOMEMORY otherwise. + * + ****************************************************************/ +static t_Error CutBusy(t_MM *p_MM, uint64_t base, uint64_t end) +{ + t_BusyBlock *p_CurrB, *p_PrevB, *p_NewB; + + p_CurrB = p_MM->busyBlocks; + p_PrevB = p_NewB = 0; + + while ( p_CurrB ) + { + if ( base < p_CurrB->end ) + { + if ( end > p_CurrB->end ) + { + t_BusyBlock *p_NextB; + while ( p_CurrB->p_Next && end >= p_CurrB->p_Next->end ) + { + p_NextB = p_CurrB->p_Next; + p_CurrB->p_Next = p_CurrB->p_Next->p_Next; + XX_Free(p_NextB); + } + + p_NextB = p_CurrB->p_Next; + if ( p_NextB && end > p_NextB->base ) + { + p_NextB->base = end; + } + } + + if ( base <= p_CurrB->base ) + { + if ( end < p_CurrB->end && end > p_CurrB->base ) + { + p_CurrB->base = end; + } + else if ( end >= p_CurrB->end ) + { + if ( p_PrevB ) + p_PrevB->p_Next = p_CurrB->p_Next; + else + p_MM->busyBlocks = p_CurrB->p_Next; + XX_Free(p_CurrB); + } + } + else + { + if ( end < p_CurrB->end && end > p_CurrB->base ) + { + if ((p_NewB = CreateBusyBlock(end, + p_CurrB->end-end, + p_CurrB->name)) == NULL) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + p_NewB->p_Next = p_CurrB->p_Next; + p_CurrB->p_Next = p_NewB; + } + p_CurrB->end = base; + } + break; + } + else + { + p_PrevB = p_CurrB; + p_CurrB = p_CurrB->p_Next; + } + } + + return (E_OK); +} + +/**************************************************************** + * Routine: MmGetGreaterAlignment + * + * Description: + * Allocates a block of memory according to the given size + * and the alignment. That routine is called from the MM_Get + * routine if the required alignment is greater then MM_MAX_ALIGNMENT. + * In that case, it goes over free blocks of 64 byte align list + * and checks if it has the required size of bytes of the required + * alignment. If no blocks found returns ILLEGAL_BASE. + * After the block is found and data is allocated, it calls + * the internal CutFree routine to update all free lists + * do not include a just allocated block. Of course, each + * free list contains a free blocks with the same alignment. + * It is also creates a busy block that holds + * information about an allocated block. + * + * Arguments: + * MM - handle to the MM object + * size - size of the MM + * alignment - index as a power of two defines + * a required alignment that is greater then 64. + * name - the name that specifies an allocated block. + * + * Return value: + * base address of an allocated block. + * ILLEGAL_BASE if can't allocate a block + * + ****************************************************************/ +static uint64_t MmGetGreaterAlignment(t_MM *p_MM, uint64_t size, uint64_t alignment, char* name) +{ + t_FreeBlock *p_FreeB; + t_BusyBlock *p_NewBusyB; + uint64_t holdBase, holdEnd, alignBase = 0; + + /* goes over free blocks of the 64 byte alignment list + and look for a block of the suitable size and + base address according to the alignment. */ + p_FreeB = p_MM->freeBlocks[MM_MAX_ALIGNMENT]; + + while ( p_FreeB ) + { + alignBase = MAKE_ALIGNED(p_FreeB->base, alignment); + + /* the block is found if the aligned base inside the block + * and has the anough size. */ + if ( alignBase >= p_FreeB->base && + alignBase < p_FreeB->end && + size <= (p_FreeB->end - alignBase) ) + break; + else + p_FreeB = p_FreeB->p_Next; + } + + /* If such block isn't found */ + if ( !p_FreeB ) + return (uint64_t)(ILLEGAL_BASE); + + holdBase = alignBase; + holdEnd = alignBase + size; + + /* init a new busy block */ + if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL) + return (uint64_t)(ILLEGAL_BASE); + + /* calls Update routine to update a lists of free blocks */ + if ( CutFree ( p_MM, holdBase, holdEnd ) != E_OK ) + return (uint64_t)(ILLEGAL_BASE); + + /* insert the new busy block into the list of busy blocks */ + AddBusy ( p_MM, p_NewBusyB ); + + return (holdBase); +} + + +/********************************************************************** + * MM API routines set * + **********************************************************************/ + +/*****************************************************************************/ +t_Error MM_Init(t_Handle *h_MM, uint64_t base, uint64_t size) +{ + t_MM *p_MM; + uint64_t newBase, newSize; + int i; + + if (!size) + { + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Size (should be positive)")); + } + + /* Initializes a new MM object */ + p_MM = (t_MM *)XX_Malloc(sizeof(t_MM)); + if (!p_MM) + { + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + } + + p_MM->h_Spinlock = XX_InitSpinlock(); + if (!p_MM->h_Spinlock) + { + XX_Free(p_MM); + RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MM spinlock!")); + } + + /* initializes a new memory block */ + if ((p_MM->memBlocks = CreateNewBlock(base, size)) == NULL) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + + /* A busy list is empty */ + p_MM->busyBlocks = 0; + + /*Initializes a new free block for each free list*/ + for (i=0; i <= MM_MAX_ALIGNMENT; i++) + { + newBase = MAKE_ALIGNED( base, (0x1 << i) ); + newSize = size - (newBase - base); + + if ((p_MM->freeBlocks[i] = CreateFreeBlock(newBase, newSize)) == NULL) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + } + + *h_MM = p_MM; + + return (E_OK); +} + +/*****************************************************************************/ +void MM_Free(t_Handle h_MM) +{ + t_MM *p_MM = (t_MM *)h_MM; + t_MemBlock *p_MemBlock; + t_BusyBlock *p_BusyBlock; + t_FreeBlock *p_FreeBlock; + void *p_Block; + int i; + + ASSERT_COND(p_MM); + + /* release memory allocated for busy blocks */ + p_BusyBlock = p_MM->busyBlocks; + while ( p_BusyBlock ) + { + p_Block = p_BusyBlock; + p_BusyBlock = p_BusyBlock->p_Next; + XX_Free(p_Block); + } + + /* release memory allocated for free blocks */ + for (i=0; i <= MM_MAX_ALIGNMENT; i++) + { + p_FreeBlock = p_MM->freeBlocks[i]; + while ( p_FreeBlock ) + { + p_Block = p_FreeBlock; + p_FreeBlock = p_FreeBlock->p_Next; + XX_Free(p_Block); + } + } + + /* release memory allocated for memory blocks */ + p_MemBlock = p_MM->memBlocks; + while ( p_MemBlock ) + { + p_Block = p_MemBlock; + p_MemBlock = p_MemBlock->p_Next; + XX_Free(p_Block); + } + + if (p_MM->h_Spinlock) + XX_FreeSpinlock(p_MM->h_Spinlock); + + /* release memory allocated for MM object itself */ + XX_Free(p_MM); +} + +/*****************************************************************************/ +uint64_t MM_Get(t_Handle h_MM, uint64_t size, uint64_t alignment, char* name) +{ + t_MM *p_MM = (t_MM *)h_MM; + t_FreeBlock *p_FreeB; + t_BusyBlock *p_NewBusyB; + uint64_t holdBase, holdEnd, j, i = 0; + uint32_t intFlags; + + SANITY_CHECK_RETURN_VALUE(p_MM, E_INVALID_HANDLE, (uint64_t)ILLEGAL_BASE); + + /* checks that alignment value is greater then zero */ + if (alignment == 0) + { + alignment = 1; + } + + j = alignment; + + /* checks if alignment is a power of two, if it correct and if the + required size is multiple of the given alignment. */ + while ((j & 0x1) == 0) + { + i++; + j = j >> 1; + } + + /* if the given alignment isn't power of two, returns an error */ + if (j != 1) + { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("alignment (should be power of 2)")); + return (uint64_t)ILLEGAL_BASE; + } + + if (i > MM_MAX_ALIGNMENT) + { + return (MmGetGreaterAlignment(p_MM, size, alignment, name)); + } + + intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); + /* look for a block of the size greater or equal to the required size. */ + p_FreeB = p_MM->freeBlocks[i]; + while ( p_FreeB && (p_FreeB->end - p_FreeB->base) < size ) + p_FreeB = p_FreeB->p_Next; + + /* If such block is found */ + if ( !p_FreeB ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + holdBase = p_FreeB->base; + holdEnd = holdBase + size; + + /* init a new busy block */ + if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + /* calls Update routine to update a lists of free blocks */ + if ( CutFree ( p_MM, holdBase, holdEnd ) != E_OK ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + /* insert the new busy block into the list of busy blocks */ + AddBusy ( p_MM, p_NewBusyB ); + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + + return (holdBase); +} + +/*****************************************************************************/ +uint64_t MM_GetForce(t_Handle h_MM, uint64_t base, uint64_t size, char* name) +{ + t_MM *p_MM = (t_MM *)h_MM; + t_FreeBlock *p_FreeB; + t_BusyBlock *p_NewBusyB; + uint32_t intFlags; + bool blockIsFree = FALSE; + + ASSERT_COND(p_MM); + + intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); + p_FreeB = p_MM->freeBlocks[0]; /* The biggest free blocks are in the + free list with alignment 1 */ + + while ( p_FreeB ) + { + if ( base >= p_FreeB->base && (base+size) <= p_FreeB->end ) + { + blockIsFree = TRUE; + break; + } + else + p_FreeB = p_FreeB->p_Next; + } + + if ( !blockIsFree ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + /* init a new busy block */ + if ((p_NewBusyB = CreateBusyBlock(base, size, name)) == NULL) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + /* calls Update routine to update a lists of free blocks */ + if ( CutFree ( p_MM, base, base+size ) != E_OK ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + /* insert the new busy block into the list of busy blocks */ + AddBusy ( p_MM, p_NewBusyB ); + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + + return (base); +} + +/*****************************************************************************/ +uint64_t MM_GetForceMin(t_Handle h_MM, uint64_t size, uint64_t alignment, uint64_t min, char* name) +{ + t_MM *p_MM = (t_MM *)h_MM; + t_FreeBlock *p_FreeB; + t_BusyBlock *p_NewBusyB; + uint64_t holdBase, holdEnd, j = alignment, i=0; + uint32_t intFlags; + + ASSERT_COND(p_MM); + + /* checks if alignment is a power of two, if it correct and if the + required size is multiple of the given alignment. */ + while ((j & 0x1) == 0) + { + i++; + j = j >> 1; + } + + if ( (j != 1) || (i > MM_MAX_ALIGNMENT) ) + { + return (uint64_t)(ILLEGAL_BASE); + } + + intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); + p_FreeB = p_MM->freeBlocks[i]; + + /* look for the first block that contains the minimum + base address. If the whole required size may be fit + into it, use that block, otherwise look for the next + block of size greater or equal to the required size. */ + while ( p_FreeB && (min >= p_FreeB->end)) + p_FreeB = p_FreeB->p_Next; + + /* If such block is found */ + if ( !p_FreeB ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + /* if this block is large enough, use this block */ + holdBase = ( min <= p_FreeB->base ) ? p_FreeB->base : min; + if ((holdBase + size) <= p_FreeB->end ) + { + holdEnd = holdBase + size; + } + else + { + p_FreeB = p_FreeB->p_Next; + while ( p_FreeB && ((p_FreeB->end - p_FreeB->base) < size) ) + p_FreeB = p_FreeB->p_Next; + + /* If such block is found */ + if ( !p_FreeB ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + holdBase = p_FreeB->base; + holdEnd = holdBase + size; + } + + /* init a new busy block */ + if ((p_NewBusyB = CreateBusyBlock(holdBase, size, name)) == NULL) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + /* calls Update routine to update a lists of free blocks */ + if ( CutFree( p_MM, holdBase, holdEnd ) != E_OK ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(ILLEGAL_BASE); + } + + /* insert the new busy block into the list of busy blocks */ + AddBusy( p_MM, p_NewBusyB ); + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + + return (holdBase); +} + +/*****************************************************************************/ +uint64_t MM_Put(t_Handle h_MM, uint64_t base) +{ + t_MM *p_MM = (t_MM *)h_MM; + t_BusyBlock *p_BusyB, *p_PrevBusyB; + uint64_t size; + uint32_t intFlags; + + ASSERT_COND(p_MM); + + /* Look for a busy block that have the given base value. + * That block will be returned back to the memory. + */ + p_PrevBusyB = 0; + + intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); + p_BusyB = p_MM->busyBlocks; + while ( p_BusyB && base != p_BusyB->base ) + { + p_PrevBusyB = p_BusyB; + p_BusyB = p_BusyB->p_Next; + } + + if ( !p_BusyB ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(0); + } + + if ( AddFree( p_MM, p_BusyB->base, p_BusyB->end ) != E_OK ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(0); + } + + /* removes a busy block form the list of busy blocks */ + if ( p_PrevBusyB ) + p_PrevBusyB->p_Next = p_BusyB->p_Next; + else + p_MM->busyBlocks = p_BusyB->p_Next; + + size = p_BusyB->end - p_BusyB->base; + + XX_Free(p_BusyB); + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + + return (size); +} + +/*****************************************************************************/ +uint64_t MM_PutForce(t_Handle h_MM, uint64_t base, uint64_t size) +{ + t_MM *p_MM = (t_MM *)h_MM; + uint64_t end = base + size; + uint32_t intFlags; + + ASSERT_COND(p_MM); + + intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); + if ( CutBusy( p_MM, base, end ) != E_OK ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(0); + } + + if ( AddFree ( p_MM, base, end ) != E_OK ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + return (uint64_t)(0); + } + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + + return (size); +} + +/*****************************************************************************/ +t_Error MM_Add(t_Handle h_MM, uint64_t base, uint64_t size) +{ + t_MM *p_MM = (t_MM *)h_MM; + t_MemBlock *p_MemB, *p_NewMemB; + t_Error errCode; + uint32_t intFlags; + + ASSERT_COND(p_MM); + + /* find a last block in the list of memory blocks to insert a new + * memory block + */ + intFlags = XX_LockIntrSpinlock(p_MM->h_Spinlock); + p_MemB = p_MM->memBlocks; + while ( p_MemB->p_Next ) + { + if ( base >= p_MemB->base && base < p_MemB->end ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG); + } + p_MemB = p_MemB->p_Next; + } + /* check for a last memory block */ + if ( base >= p_MemB->base && base < p_MemB->end ) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + RETURN_ERROR(MAJOR, E_ALREADY_EXISTS, NO_MSG); + } + + /* create a new memory block */ + if ((p_NewMemB = CreateNewBlock(base, size)) == NULL) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + } + + /* append a new memory block to the end of the list of memory blocks */ + p_MemB->p_Next = p_NewMemB; + + /* add a new free block to the free lists */ + errCode = AddFree(p_MM, base, base+size); + if (errCode) + { + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + p_MemB->p_Next = 0; + XX_Free(p_NewMemB); + return ((t_Error)errCode); + } + XX_UnlockIntrSpinlock(p_MM->h_Spinlock, intFlags); + + return (E_OK); +} + +/*****************************************************************************/ +uint64_t MM_GetMemBlock(t_Handle h_MM, int index) +{ + t_MM *p_MM = (t_MM*)h_MM; + t_MemBlock *p_MemBlock; + int i; + + ASSERT_COND(p_MM); + + p_MemBlock = p_MM->memBlocks; + for (i=0; i < index; i++) + p_MemBlock = p_MemBlock->p_Next; + + if ( p_MemBlock ) + return (p_MemBlock->base); + else + return (uint64_t)ILLEGAL_BASE; +} + +/*****************************************************************************/ +uint64_t MM_GetBase(t_Handle h_MM) +{ + t_MM *p_MM = (t_MM*)h_MM; + t_MemBlock *p_MemBlock; + + ASSERT_COND(p_MM); + + p_MemBlock = p_MM->memBlocks; + return p_MemBlock->base; +} + +/*****************************************************************************/ +bool MM_InRange(t_Handle h_MM, uint64_t addr) +{ + t_MM *p_MM = (t_MM*)h_MM; + t_MemBlock *p_MemBlock; + + ASSERT_COND(p_MM); + + p_MemBlock = p_MM->memBlocks; + + if ((addr >= p_MemBlock->base) && (addr < p_MemBlock->end)) + return TRUE; + else + return FALSE; +} + +/*****************************************************************************/ +void MM_Dump(t_Handle h_MM, void *buff) +{ + t_MM *p_MM = (t_MM *)h_MM; + t_FreeBlock *p_FreeB; + t_BusyBlock *p_BusyB; + int i; + + p_BusyB = p_MM->busyBlocks; + Sprint(buff, "List of busy blocks:\n"); + while (p_BusyB) + { + Sprint(buff, "\t0x%p: (%s: b=0x%lx, e=0x%lx)\n", + p_BusyB, p_BusyB->name, p_BusyB->base, p_BusyB->end ); + p_BusyB = p_BusyB->p_Next; + } + + Sprint(buff, "\nLists of free blocks according to alignment:\n"); + for (i=0; i <= MM_MAX_ALIGNMENT; i++) + { + Sprint(buff, "%d alignment:\n", (0x1 << i)); + p_FreeB = p_MM->freeBlocks[i]; + while (p_FreeB) + { + Sprint(buff, "\t0x%p: (b=0x%lx, e=0x%lx)\n", + p_FreeB, p_FreeB->base, p_FreeB->end); + p_FreeB = p_FreeB->p_Next; + } + Sprint(buff, "\n"); + } +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/etc/mm.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/etc/mm.h @@ -0,0 +1,101 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************** + * + * File: mm.h + * + * + * Description: + * MM (Memory Management) object definitions. + * It also includes definitions of the Free Block, Busy Block + * and Memory Block structures used by the MM object. + * + ****************************************************************/ + +#ifndef __MM_H +#define __MM_H + + +#include "mm_ext.h" + +#define __ERR_MODULE__ MODULE_MM + + +#define MAKE_ALIGNED(addr, align) \ + (((uint64_t)(addr) + ((align) - 1)) & (~(((uint64_t)align) - 1))) + + +/* t_MemBlock data stucutre defines parameters of the Memory Block */ +typedef struct t_MemBlock +{ + struct t_MemBlock *p_Next; /* Pointer to the next memory block */ + + uint64_t base; /* Base address of the memory block */ + uint64_t end; /* End address of the memory block */ +} t_MemBlock; + + +/* t_FreeBlock data stucutre defines parameters of the Free Block */ +typedef struct t_FreeBlock +{ + struct t_FreeBlock *p_Next; /* Pointer to the next free block */ + + uint64_t base; /* Base address of the block */ + uint64_t end; /* End address of the block */ +} t_FreeBlock; + + +/* t_BusyBlock data stucutre defines parameters of the Busy Block */ +typedef struct t_BusyBlock +{ + struct t_BusyBlock *p_Next; /* Pointer to the next free block */ + + uint64_t base; /* Base address of the block */ + uint64_t end; /* End address of the block */ + char name[MM_MAX_NAME_LEN]; /* That block of memory was allocated for + something specified by the Name */ +} t_BusyBlock; + + +/* t_MM data structure defines parameters of the MM object */ +typedef struct t_MM +{ + t_MemBlock *memBlocks; /* List of memory blocks (Memory list) */ + t_BusyBlock *busyBlocks; /* List of busy blocks (Busy list) */ + t_FreeBlock *freeBlocks[MM_MAX_ALIGNMENT + 1]; + /* Alignment lists of free blocks (Free lists) */ + t_Handle h_Spinlock; +} t_MM; + + +#endif /* __MM_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/etc/sprint.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/etc/sprint.c @@ -0,0 +1,81 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*------------------------------------------------------*/ +/* File: sprint.c */ +/* */ +/* Description: */ +/* Debug routines (externals) */ +/*------------------------------------------------------*/ +#include "string_ext.h" +#include "stdlib_ext.h" +#include "ctype_ext.h" +#include "stdarg_ext.h" +#include "sprint_ext.h" +#include "std_ext.h" +#include "xx_ext.h" + + +int Sprint(char * buf, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i=vsprintf(buf,fmt,args); + va_end(args); + return i; +} + +int Snprint(char * buf, uint32_t size, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i=vsnprintf(buf,size,fmt,args); + va_end(args); + return i; +} + +#ifndef NCSW_VXWORKS +int Sscan(const char * buf, const char * fmt, ...) +{ + va_list args; + int i; + + va_start(args,fmt); + i = vsscanf(buf,fmt,args); + va_end(args); + return i; +} +#endif /* NCSW_VXWORKS */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/crc_mac_addr_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/crc_mac_addr_ext.h @@ -0,0 +1,363 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*------------------------------------------------------*/ +/* */ +/* File: crc_mac_addr_ext.h */ +/* */ +/* Description: */ +/* Define a macro that calculate the crc value of */ +/* an Ethernet MAC address (48 bitd address */ +/*------------------------------------------------------*/ + +#ifndef __crc_mac_addr_ext_h +#define __crc_mac_addr_ext_h + +#include "std_ext.h" + + +static uint32_t crc_table[256] = +{ + 0x00000000, + 0x77073096, + 0xee0e612c, + 0x990951ba, + 0x076dc419, + 0x706af48f, + 0xe963a535, + 0x9e6495a3, + 0x0edb8832, + 0x79dcb8a4, + 0xe0d5e91e, + 0x97d2d988, + 0x09b64c2b, + 0x7eb17cbd, + 0xe7b82d07, + 0x90bf1d91, + 0x1db71064, + 0x6ab020f2, + 0xf3b97148, + 0x84be41de, + 0x1adad47d, + 0x6ddde4eb, + 0xf4d4b551, + 0x83d385c7, + 0x136c9856, + 0x646ba8c0, + 0xfd62f97a, + 0x8a65c9ec, + 0x14015c4f, + 0x63066cd9, + 0xfa0f3d63, + 0x8d080df5, + 0x3b6e20c8, + 0x4c69105e, + 0xd56041e4, + 0xa2677172, + 0x3c03e4d1, + 0x4b04d447, + 0xd20d85fd, + 0xa50ab56b, + 0x35b5a8fa, + 0x42b2986c, + 0xdbbbc9d6, + 0xacbcf940, + 0x32d86ce3, + 0x45df5c75, + 0xdcd60dcf, + 0xabd13d59, + 0x26d930ac, + 0x51de003a, + 0xc8d75180, + 0xbfd06116, + 0x21b4f4b5, + 0x56b3c423, + 0xcfba9599, + 0xb8bda50f, + 0x2802b89e, + 0x5f058808, + 0xc60cd9b2, + 0xb10be924, + 0x2f6f7c87, + 0x58684c11, + 0xc1611dab, + 0xb6662d3d, + 0x76dc4190, + 0x01db7106, + 0x98d220bc, + 0xefd5102a, + 0x71b18589, + 0x06b6b51f, + 0x9fbfe4a5, + 0xe8b8d433, + 0x7807c9a2, + 0x0f00f934, + 0x9609a88e, + 0xe10e9818, + 0x7f6a0dbb, + 0x086d3d2d, + 0x91646c97, + 0xe6635c01, + 0x6b6b51f4, + 0x1c6c6162, + 0x856530d8, + 0xf262004e, + 0x6c0695ed, + 0x1b01a57b, + 0x8208f4c1, + 0xf50fc457, + 0x65b0d9c6, + 0x12b7e950, + 0x8bbeb8ea, + 0xfcb9887c, + 0x62dd1ddf, + 0x15da2d49, + 0x8cd37cf3, + 0xfbd44c65, + 0x4db26158, + 0x3ab551ce, + 0xa3bc0074, + 0xd4bb30e2, + 0x4adfa541, + 0x3dd895d7, + 0xa4d1c46d, + 0xd3d6f4fb, + 0x4369e96a, + 0x346ed9fc, + 0xad678846, + 0xda60b8d0, + 0x44042d73, + 0x33031de5, + 0xaa0a4c5f, + 0xdd0d7cc9, + 0x5005713c, + 0x270241aa, + 0xbe0b1010, + 0xc90c2086, + 0x5768b525, + 0x206f85b3, + 0xb966d409, + 0xce61e49f, + 0x5edef90e, + 0x29d9c998, + 0xb0d09822, + 0xc7d7a8b4, + 0x59b33d17, + 0x2eb40d81, + 0xb7bd5c3b, + 0xc0ba6cad, + 0xedb88320, + 0x9abfb3b6, + 0x03b6e20c, + 0x74b1d29a, + 0xead54739, + 0x9dd277af, + 0x04db2615, + 0x73dc1683, + 0xe3630b12, + 0x94643b84, + 0x0d6d6a3e, + 0x7a6a5aa8, + 0xe40ecf0b, + 0x9309ff9d, + 0x0a00ae27, + 0x7d079eb1, + 0xf00f9344, + 0x8708a3d2, + 0x1e01f268, + 0x6906c2fe, + 0xf762575d, + 0x806567cb, + 0x196c3671, + 0x6e6b06e7, + 0xfed41b76, + 0x89d32be0, + 0x10da7a5a, + 0x67dd4acc, + 0xf9b9df6f, + 0x8ebeeff9, + 0x17b7be43, + 0x60b08ed5, + 0xd6d6a3e8, + 0xa1d1937e, + 0x38d8c2c4, + 0x4fdff252, + 0xd1bb67f1, + 0xa6bc5767, + 0x3fb506dd, + 0x48b2364b, + 0xd80d2bda, + 0xaf0a1b4c, + 0x36034af6, + 0x41047a60, + 0xdf60efc3, + 0xa867df55, + 0x316e8eef, + 0x4669be79, + 0xcb61b38c, + 0xbc66831a, + 0x256fd2a0, + 0x5268e236, + 0xcc0c7795, + 0xbb0b4703, + 0x220216b9, + 0x5505262f, + 0xc5ba3bbe, + 0xb2bd0b28, + 0x2bb45a92, + 0x5cb36a04, + 0xc2d7ffa7, + 0xb5d0cf31, + 0x2cd99e8b, + 0x5bdeae1d, + 0x9b64c2b0, + 0xec63f226, + 0x756aa39c, + 0x026d930a, + 0x9c0906a9, + 0xeb0e363f, + 0x72076785, + 0x05005713, + 0x95bf4a82, + 0xe2b87a14, + 0x7bb12bae, + 0x0cb61b38, + 0x92d28e9b, + 0xe5d5be0d, + 0x7cdcefb7, + 0x0bdbdf21, + 0x86d3d2d4, + 0xf1d4e242, + 0x68ddb3f8, + 0x1fda836e, + 0x81be16cd, + 0xf6b9265b, + 0x6fb077e1, + 0x18b74777, + 0x88085ae6, + 0xff0f6a70, + 0x66063bca, + 0x11010b5c, + 0x8f659eff, + 0xf862ae69, + 0x616bffd3, + 0x166ccf45, + 0xa00ae278, + 0xd70dd2ee, + 0x4e048354, + 0x3903b3c2, + 0xa7672661, + 0xd06016f7, + 0x4969474d, + 0x3e6e77db, + 0xaed16a4a, + 0xd9d65adc, + 0x40df0b66, + 0x37d83bf0, + 0xa9bcae53, + 0xdebb9ec5, + 0x47b2cf7f, + 0x30b5ffe9, + 0xbdbdf21c, + 0xcabac28a, + 0x53b39330, + 0x24b4a3a6, + 0xbad03605, + 0xcdd70693, + 0x54de5729, + 0x23d967bf, + 0xb3667a2e, + 0xc4614ab8, + 0x5d681b02, + 0x2a6f2b94, + 0xb40bbe37, + 0xc30c8ea1, + 0x5a05df1b, + 0x2d02ef8d +}; + + +#define GET_MAC_ADDR_CRC(addr, crc) \ +{ \ + uint32_t i; \ + uint8_t data; \ + \ + /* CRC calculation */ \ + crc = 0xffffffff; \ + for (i=0; i < 6; i++) \ + { \ + data = (uint8_t)(addr >> ((5-i)*8)); \ + crc = crc^data; \ + crc = crc_table[crc&0xff] ^ (crc>>8); \ + } \ +} \ + +/* Define a macro for getting the mirrored value of */ +/* a byte size number. (0x11010011 --> 0x11001011) */ +/* Sometimes the mirrored value of the CRC is required */ +static __inline__ uint8_t GetMirror(uint8_t n) +{ + uint8_t mirror[16] = + { + 0x00, + 0x08, + 0x04, + 0x0c, + 0x02, + 0x0a, + 0x06, + 0x0e, + 0x01, + 0x09, + 0x05, + 0x0d, + 0x03, + 0x0b, + 0x07, + 0x0f + }; + return ((uint8_t)(((mirror[n & 0x0f] << 4) | (mirror[n >> 4])))); +} + +static __inline__ uint32_t GetMirror32(uint32_t n) +{ + return (((uint32_t)GetMirror((uint8_t)(n))<<24) | + ((uint32_t)GetMirror((uint8_t)(n>>8))<<16) | + ((uint32_t)GetMirror((uint8_t)(n>>16))<<8) | + ((uint32_t)GetMirror((uint8_t)(n>>24)))); +} + +#define MIRROR GetMirror +#define MIRROR_32 GetMirror32 + + +#endif /* __crc_mac_addr_ext_h */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/dpaa_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/dpaa_ext.h @@ -0,0 +1,206 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File dpaa_ext.h + + @Description DPAA Application Programming Interface. +*//***************************************************************************/ +#ifndef __DPAA_EXT_H +#define __DPAA_EXT_H + +#include "std_ext.h" +#include "error_ext.h" + + +/**************************************************************************//** + @Group DPAA_grp Data Path Acceleration Architecture API + + @Description DPAA API functions, definitions and enums. + + @{ +*//***************************************************************************/ + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/**************************************************************************//** + @Description Frame descriptor +*//***************************************************************************/ +typedef _Packed struct t_DpaaFD { + volatile uint32_t id; /**< FD id */ + volatile uint32_t addrl; /**< Data Address */ + volatile uint32_t length; /**< Frame length */ + volatile uint32_t status; /**< FD status */ +} _PackedType t_DpaaFD; + +/**************************************************************************//** + @Description enum for defining frame format +*//***************************************************************************/ +typedef enum e_DpaaFDFormatType { + e_DPAA_FD_FORMAT_TYPE_SHORT_SBSF = 0x0, /**< Simple frame Single buffer; Offset and + small length (9b OFFSET, 20b LENGTH) */ + e_DPAA_FD_FORMAT_TYPE_LONG_SBSF = 0x2, /**< Simple frame, single buffer; big length + (29b LENGTH ,No OFFSET) */ + e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF = 0x4, /**< Simple frame, Scatter Gather table; Offset + and small length (9b OFFSET, 20b LENGTH) */ + e_DPAA_FD_FORMAT_TYPE_LONG_MBSF = 0x6, /**< Simple frame, Scatter Gather table; + big length (29b LENGTH ,No OFFSET) */ + e_DPAA_FD_FORMAT_TYPE_COMPOUND = 0x1, /**< Compound Frame (29b CONGESTION-WEIGHT + No LENGTH or OFFSET) */ + e_DPAA_FD_FORMAT_TYPE_DUMMY +} e_DpaaFDFormatType; + +/**************************************************************************//** + @Collection Frame descriptor macros +*//***************************************************************************/ +#define DPAA_FD_DD_MASK 0xc0000000 /**< FD DD field mask */ +#define DPAA_FD_PID_MASK 0x3f000000 /**< FD PID field mask */ +#define DPAA_FD_ELIODN_MASK 0x0000f000 /**< FD ELIODN field mask */ +#define DPAA_FD_BPID_MASK 0x00ff0000 /**< FD BPID field mask */ +#define DPAA_FD_ADDRH_MASK 0x000000ff /**< FD ADDRH field mask */ +#define DPAA_FD_ADDRL_MASK 0xffffffff /**< FD ADDRL field mask */ +#define DPAA_FD_FORMAT_MASK 0xe0000000 /**< FD FORMAT field mask */ +#define DPAA_FD_OFFSET_MASK 0x1ff00000 /**< FD OFFSET field mask */ +#define DPAA_FD_LENGTH_MASK 0x000fffff /**< FD LENGTH field mask */ + +#define DPAA_FD_GET_DD(fd) ((((t_DpaaFD *)fd)->id & DPAA_FD_DD_MASK) >> (31-1)) /**< Macro to get FD DD field */ +#define DPAA_FD_GET_PID(fd) (((((t_DpaaFD *)fd)->id & DPAA_FD_PID_MASK) >> (31-7)) | \ + ((((t_DpaaFD *)fd)->id & DPAA_FD_ELIODN_MASK) >> (31-19-6))) /**< Macro to get FD PID field */ +#define DPAA_FD_GET_BPID(fd) ((((t_DpaaFD *)fd)->id & DPAA_FD_BPID_MASK) >> (31-15)) /**< Macro to get FD BPID field */ +#define DPAA_FD_GET_ADDRH(fd) (((t_DpaaFD *)fd)->id & DPAA_FD_ADDRH_MASK) /**< Macro to get FD ADDRH field */ +#define DPAA_FD_GET_ADDRL(fd) ((t_DpaaFD *)fd)->addrl /**< Macro to get FD ADDRL field */ +#define DPAA_FD_GET_PHYS_ADDR(fd) ((physAddress_t)(((uint64_t)DPAA_FD_GET_ADDRH(fd) << 32) | (uint64_t)DPAA_FD_GET_ADDRL(fd))) /**< Macro to get FD ADDR field */ +#define DPAA_FD_GET_FORMAT(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_FORMAT_MASK) >> (31-2)) /**< Macro to get FD FORMAT field */ +#define DPAA_FD_GET_OFFSET(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_OFFSET_MASK) >> (31-11)) /**< Macro to get FD OFFSET field */ +#define DPAA_FD_GET_LENGTH(fd) (((t_DpaaFD *)fd)->length & DPAA_FD_LENGTH_MASK) /**< Macro to get FD LENGTH field */ +#define DPAA_FD_GET_STATUS(fd) ((t_DpaaFD *)fd)->status /**< Macro to get FD STATUS field */ +#define DPAA_FD_GET_ADDR(fd) XX_PhysToVirt(DPAA_FD_GET_PHYS_ADDR(fd)) + +#define DPAA_FD_SET_DD(fd,val) (((t_DpaaFD *)fd)->id = ((((t_DpaaFD *)fd)->id & ~DPAA_FD_DD_MASK) | (((val) << (31-1)) & DPAA_FD_DD_MASK ))) /**< Macro to set FD DD field */ + /**< Macro to set FD PID field or LIODN offset*/ +#define DPAA_FD_SET_PID(fd,val) (((t_DpaaFD *)fd)->id = ((((t_DpaaFD *)fd)->id & ~(DPAA_FD_PID_MASK|DPAA_FD_ELIODN_MASK)) | ((((val) << (31-7)) & DPAA_FD_PID_MASK) | ((((val)>>6) << (31-19)) & DPAA_FD_ELIODN_MASK)))) +#define DPAA_FD_SET_BPID(fd,val) (((t_DpaaFD *)fd)->id = ((((t_DpaaFD *)fd)->id & ~DPAA_FD_BPID_MASK) | (((val) << (31-15)) & DPAA_FD_BPID_MASK))) /**< Macro to set FD BPID field */ +#define DPAA_FD_SET_ADDRH(fd,val) (((t_DpaaFD *)fd)->id = ((((t_DpaaFD *)fd)->id & ~DPAA_FD_ADDRH_MASK) | ((val) & DPAA_FD_ADDRH_MASK))) /**< Macro to set FD ADDRH field */ +#define DPAA_FD_SET_ADDRL(fd,val) ((t_DpaaFD *)fd)->addrl = (val) /**< Macro to set FD ADDRL field */ +#define DPAA_FD_SET_ADDR(fd,val) \ +do { \ + uint64_t physAddr = (uint64_t)(XX_VirtToPhys(val)); \ + DPAA_FD_SET_ADDRH(fd, ((uint32_t)(physAddr >> 32))); \ + DPAA_FD_SET_ADDRL(fd, (uint32_t)physAddr); \ +} while (0) /**< Macro to set FD ADDR field */ +#define DPAA_FD_SET_FORMAT(fd,val) (((t_DpaaFD *)fd)->length = ((((t_DpaaFD *)fd)->length & ~DPAA_FD_FORMAT_MASK) | (((val) << (31-2))& DPAA_FD_FORMAT_MASK))) /**< Macro to set FD FORMAT field */ +#define DPAA_FD_SET_OFFSET(fd,val) (((t_DpaaFD *)fd)->length = ((((t_DpaaFD *)fd)->length & ~DPAA_FD_OFFSET_MASK) | (((val) << (31-11))& DPAA_FD_OFFSET_MASK) )) /**< Macro to set FD OFFSET field */ +#define DPAA_FD_SET_LENGTH(fd,val) (((t_DpaaFD *)fd)->length = (((t_DpaaFD *)fd)->length & ~DPAA_FD_LENGTH_MASK) | ((val) & DPAA_FD_LENGTH_MASK)) /**< Macro to set FD LENGTH field */ +#define DPAA_FD_SET_STATUS(fd,val) ((t_DpaaFD *)fd)->status = (val) /**< Macro to set FD STATUS field */ +/* @} */ + +/**************************************************************************//** + @Description Frame Scatter/Gather Table Entry +*//***************************************************************************/ +typedef _Packed struct t_DpaaSGTE { + volatile uint32_t addrh; /**< Buffer Address high */ + volatile uint32_t addrl; /**< Buffer Address low */ + volatile uint32_t length; /**< Buffer length */ + volatile uint32_t offset; /**< SGTE offset */ +} _PackedType t_DpaaSGTE; + +#define DPAA_NUM_OF_SG_TABLE_ENTRY 16 + +/**************************************************************************//** + @Description Frame Scatter/Gather Table +*//***************************************************************************/ +typedef _Packed struct t_DpaaSGT { + t_DpaaSGTE tableEntry[DPAA_NUM_OF_SG_TABLE_ENTRY]; + /**< structure that hold the information about + a single S/G entry. */ +} _PackedType t_DpaaSGT; + +/**************************************************************************//** + @Description Compound Frame Table +*//***************************************************************************/ +typedef _Packed struct t_DpaaCompTbl { + t_DpaaSGTE outputBuffInfo; /**< structure that holds the information about + the compound-frame output buffer; + NOTE: this may point to a S/G table */ + t_DpaaSGTE inputBuffInfo; /**< structure that holds the information about + the compound-frame input buffer; + NOTE: this may point to a S/G table */ +} _PackedType t_DpaaCompTbl; + +/**************************************************************************//** + @Collection Frame Scatter/Gather Table Entry macros +*//***************************************************************************/ +#define DPAA_SGTE_ADDRH_MASK 0x000000ff /**< SGTE ADDRH field mask */ +#define DPAA_SGTE_ADDRL_MASK 0xffffffff /**< SGTE ADDRL field mask */ +#define DPAA_SGTE_E_MASK 0x80000000 /**< SGTE Extension field mask */ +#define DPAA_SGTE_F_MASK 0x40000000 /**< SGTE Final field mask */ +#define DPAA_SGTE_LENGTH_MASK 0x3fffffff /**< SGTE LENGTH field mask */ +#define DPAA_SGTE_BPID_MASK 0x00ff0000 /**< SGTE BPID field mask */ +#define DPAA_SGTE_OFFSET_MASK 0x00001fff /**< SGTE OFFSET field mask */ + +#define DPAA_SGTE_GET_ADDRH(sgte) (((t_DpaaSGTE *)sgte)->addrh & DPAA_SGTE_ADDRH_MASK) /**< Macro to get SGTE ADDRH field */ +#define DPAA_SGTE_GET_ADDRL(sgte) ((t_DpaaSGTE *)sgte)->addrl /**< Macro to get SGTE ADDRL field */ +#define DPAA_SGTE_GET_PHYS_ADDR(sgte) ((physAddress_t)(((uint64_t)DPAA_SGTE_GET_ADDRH(sgte) << 32) | (uint64_t)DPAA_SGTE_GET_ADDRL(sgte))) /**< Macro to get FD ADDR field */ +#define DPAA_SGTE_GET_EXTENSION(sgte) ((((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_E_MASK) >> (31-0)) /**< Macro to get SGTE EXTENSION field */ +#define DPAA_SGTE_GET_FINAL(sgte) ((((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_F_MASK) >> (31-1)) /**< Macro to get SGTE FINAL field */ +#define DPAA_SGTE_GET_LENGTH(sgte) (((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_LENGTH_MASK) /**< Macro to get SGTE LENGTH field */ +#define DPAA_SGTE_GET_BPID(sgte) ((((t_DpaaSGTE *)sgte)->offset & DPAA_SGTE_BPID_MASK) >> (31-15)) /**< Macro to get SGTE BPID field */ +#define DPAA_SGTE_GET_OFFSET(sgte) (((t_DpaaSGTE *)sgte)->offset & DPAA_SGTE_OFFSET_MASK) /**< Macro to get SGTE OFFSET field */ +#define DPAA_SGTE_GET_ADDR(sgte) XX_PhysToVirt(DPAA_SGTE_GET_PHYS_ADDR(sgte)) + +#define DPAA_SGTE_SET_ADDRH(sgte,val) (((t_DpaaSGTE *)sgte)->addrh = ((((t_DpaaSGTE *)sgte)->addrh & ~DPAA_SGTE_ADDRH_MASK) | ((val) & DPAA_SGTE_ADDRH_MASK))) /**< Macro to set SGTE ADDRH field */ +#define DPAA_SGTE_SET_ADDRL(sgte,val) ((t_DpaaSGTE *)sgte)->addrl = (val) /**< Macro to set SGTE ADDRL field */ +#define DPAA_SGTE_SET_ADDR(sgte,val) \ +do { \ + uint64_t physAddr = (uint64_t)(XX_VirtToPhys(val)); \ + DPAA_SGTE_SET_ADDRH(sgte, ((uint32_t)(physAddr >> 32))); \ + DPAA_SGTE_SET_ADDRL(sgte, (uint32_t)physAddr); \ +} while (0) /**< Macro to set SGTE ADDR field */ +#define DPAA_SGTE_SET_EXTENSION(sgte,val) (((t_DpaaSGTE *)sgte)->length = ((((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_E_MASK) | (((val) << (31-0))& DPAA_SGTE_E_MASK))) /**< Macro to set SGTE EXTENSION field */ +#define DPAA_SGTE_SET_FINAL(sgte,val) (((t_DpaaSGTE *)sgte)->length = ((((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_F_MASK) | (((val) << (31-1))& DPAA_SGTE_F_MASK))) /**< Macro to set SGTE FINAL field */ +#define DPAA_SGTE_SET_LENGTH(sgte,val) (((t_DpaaSGTE *)sgte)->length = (((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_LENGTH_MASK) | ((val) & DPAA_SGTE_LENGTH_MASK)) /**< Macro to set SGTE LENGTH field */ +#define DPAA_SGTE_SET_BPID(sgte,val) (((t_DpaaSGTE *)sgte)->offset = ((((t_DpaaSGTE *)sgte)->offset & ~DPAA_SGTE_BPID_MASK) | (((val) << (31-15))& DPAA_SGTE_BPID_MASK))) /**< Macro to set SGTE BPID field */ +#define DPAA_SGTE_SET_OFFSET(sgte,val) (((t_DpaaSGTE *)sgte)->offset = ((((t_DpaaSGTE *)sgte)->offset & ~DPAA_SGTE_OFFSET_MASK) | (((val) << (31-31))& DPAA_SGTE_OFFSET_MASK) )) /**< Macro to set SGTE OFFSET field */ +/* @} */ + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + +/** @} */ /* end of DPAA_grp group */ + + +#endif /* __DPAA_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_ext.h @@ -0,0 +1,1347 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File fm_ext.h + + @Description FM Application Programming Interface. +*//***************************************************************************/ +#ifndef __FM_EXT +#define __FM_EXT + +#include "error_ext.h" +#include "std_ext.h" +#include "dpaa_ext.h" + + +/**************************************************************************//** + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_lib_grp FM library + + @Description FM API functions, definitions and enums + The FM module is the main driver module and is a mandatory module + for FM driver users. Before any further module initialization, + this module must be initialized. + The FM is a "singletone" module. It is responsible of the common + HW modules: FPM, DMA, common QMI, common BMI initializations and + run-time control routines. This module must be initialized always + when working with any of the FM modules. + NOTE - We assumes that the FML will be initialize only by core No. 0! + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description enum for defining port types +*//***************************************************************************/ +typedef enum e_FmPortType { + e_FM_PORT_TYPE_OH_OFFLINE_PARSING = 0, /**< Offline parsing port (id's: 0-6, share id's with + host command, so must have exclusive id) */ + e_FM_PORT_TYPE_OH_HOST_COMMAND, /**< Host command port (id's: 0-6, share id's with + offline parsing ports, so must have exclusive id) */ + e_FM_PORT_TYPE_RX, /**< 1G Rx port (id's: 0-3) */ + e_FM_PORT_TYPE_RX_10G, /**< 10G Rx port (id's: 0) */ + e_FM_PORT_TYPE_TX, /**< 1G Tx port (id's: 0-3) */ + e_FM_PORT_TYPE_TX_10G, /**< 10G Tx port (id's: 0) */ + e_FM_PORT_TYPE_DUMMY +} e_FmPortType; + +/**************************************************************************//** + @Collection General FM defines +*//***************************************************************************/ +#define FM_MAX_NUM_OF_PARTITIONS 64 /**< Maximum number of partitions */ +#define FM_PHYS_ADDRESS_SIZE 6 /**< FM Physical address size */ +/* @} */ + + +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + +/**************************************************************************//** + @Description FM physical Address +*//***************************************************************************/ +typedef _Packed struct t_FmPhysAddr { + volatile uint8_t high; /**< High part of the physical address */ + volatile uint32_t low; /**< Low part of the physical address */ +} _PackedType t_FmPhysAddr; + +/**************************************************************************//** + @Description Parse results memory layout +*//***************************************************************************/ +typedef _Packed struct t_FmPrsResult { + volatile uint8_t lpid; /**< Logical port id */ + volatile uint8_t shimr; /**< Shim header result */ + volatile uint16_t l2r; /**< Layer 2 result */ + volatile uint16_t l3r; /**< Layer 3 result */ + volatile uint8_t l4r; /**< Layer 4 result */ + volatile uint8_t cplan; /**< Classification plan id */ + volatile uint16_t nxthdr; /**< Next Header */ + volatile uint16_t cksum; /**< Checksum */ + volatile uint32_t lcv; /**< LCV */ + volatile uint8_t shim_off[3]; /**< Shim offset */ + volatile uint8_t eth_off; /**< ETH offset */ + volatile uint8_t llc_snap_off; /**< LLC_SNAP offset */ + volatile uint8_t vlan_off[2]; /**< VLAN offset */ + volatile uint8_t etype_off; /**< ETYPE offset */ + volatile uint8_t pppoe_off; /**< PPP offset */ + volatile uint8_t mpls_off[2]; /**< MPLS offset */ + volatile uint8_t ip_off[2]; /**< IP offset */ + volatile uint8_t gre_off; /**< GRE offset */ + volatile uint8_t l4_off; /**< Layer 4 offset */ + volatile uint8_t nxthdr_off; /**< Parser end point */ +} _PackedType t_FmPrsResult; + +/**************************************************************************//** + @Collection FM Parser results +*//***************************************************************************/ +#define FM_PR_L2_VLAN_STACK 0x00000100 /**< Parse Result: VLAN stack */ +#define FM_PR_L2_ETHERNET 0x00008000 /**< Parse Result: Ethernet*/ +#define FM_PR_L2_VLAN 0x00004000 /**< Parse Result: VLAN */ +#define FM_PR_L2_LLC_SNAP 0x00002000 /**< Parse Result: LLC_SNAP */ +#define FM_PR_L2_MPLS 0x00001000 /**< Parse Result: MPLS */ +#define FM_PR_L2_PPPoE 0x00000800 /**< Parse Result: PPPoE */ +/* @} */ + +/**************************************************************************//** + @Collection FM Frame descriptor macros +*//***************************************************************************/ +#define FM_FD_CMD_FCO 0x80000000 /**< Frame queue Context Override */ +#define FM_FD_CMD_RPD 0x40000000 /**< Read Prepended Data */ +#define FM_FD_CMD_UPD 0x20000000 /**< Update Prepended Data */ +#define FM_FD_CMD_DTC 0x10000000 /**< Do L4 Checksum */ +#define FM_FD_CMD_DCL4C 0x10000000 /**< Didn't calculate L4 Checksum */ +#define FM_FD_CMD_CFQ 0x00ffffff /**< Confirmation Frame Queue */ + +#define FM_FD_TX_STATUS_ERR_MASK 0x07000000 /**< TX Error FD bits */ +#define FM_FD_RX_STATUS_ERR_MASK 0x070ee3f8 /**< RX Error FD bits */ +/* @} */ + +/**************************************************************************//** + @Description Context A +*//***************************************************************************/ +typedef _Packed struct t_FmContextA { + volatile uint32_t command; /**< ContextA Command */ + volatile uint8_t res0[4]; /**< ContextA Reserved bits */ +} _PackedType t_FmContextA; + +/**************************************************************************//** + @Description Context B +*//***************************************************************************/ +typedef uint32_t t_FmContextB; + +/**************************************************************************//** + @Collection Context A macros +*//***************************************************************************/ +#define FM_CONTEXTA_OVERRIDE_MASK 0x80000000 +#define FM_CONTEXTA_ICMD_MASK 0x40000000 +#define FM_CONTEXTA_A1_VALID_MASK 0x20000000 +#define FM_CONTEXTA_MACCMD_MASK 0x00ff0000 +#define FM_CONTEXTA_MACCMD_VALID_MASK 0x00800000 +#define FM_CONTEXTA_MACCMD_SECURED_MASK 0x00100000 +#define FM_CONTEXTA_MACCMD_SC_MASK 0x000f0000 +#define FM_CONTEXTA_A1_MASK 0x0000ffff + +#define FM_CONTEXTA_GET_OVERRIDE(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_OVERRIDE_MASK) >> (31-0)) +#define FM_CONTEXTA_GET_ICMD(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_ICMD_MASK) >> (31-1)) +#define FM_CONTEXTA_GET_A1_VALID(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_A1_VALID_MASK) >> (31-2)) +#define FM_CONTEXTA_GET_A1(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_A1_MASK) >> (31-31)) +#define FM_CONTEXTA_GET_MACCMD(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_MASK) >> (31-15)) +#define FM_CONTEXTA_GET_MACCMD_VALID(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_VALID_MASK) >> (31-8)) +#define FM_CONTEXTA_GET_MACCMD_SECURED(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_SECURED_MASK) >> (31-11)) +#define FM_CONTEXTA_GET_MACCMD_SECURE_CHANNEL(contextA) ((((t_FmContextA *)contextA)->command & FM_CONTEXTA_MACCMD_SC_MASK) >> (31-15)) + +#define FM_CONTEXTA_SET_OVERRIDE(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_OVERRIDE_MASK) | (((uint32_t)(val) << (31-0)) & FM_CONTEXTA_OVERRIDE_MASK) )) +#define FM_CONTEXTA_SET_ICMD(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_ICMD_MASK) | (((val) << (31-1)) & FM_CONTEXTA_ICMD_MASK) )) +#define FM_CONTEXTA_SET_A1_VALID(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_A1_VALID_MASK) | (((val) << (31-2)) & FM_CONTEXTA_A1_VALID_MASK) )) +#define FM_CONTEXTA_SET_A1(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_A1_MASK) | (((val) << (31-31)) & FM_CONTEXTA_A1_MASK) )) +#define FM_CONTEXTA_SET_MACCMD(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_MASK) | (((val) << (31-15)) & FM_CONTEXTA_MACCMD_MASK) )) +#define FM_CONTEXTA_SET_MACCMD_VALID(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_VALID_MASK) | (((val) << (31-8)) & FM_CONTEXTA_MACCMD_VALID_MASK) )) +#define FM_CONTEXTA_SET_MACCMD_SECURED(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_SECURED_MASK) | (((val) << (31-11)) & FM_CONTEXTA_MACCMD_SECURED_MASK) )) +#define FM_CONTEXTA_SET_MACCMD_SECURE_CHANNEL(contextA,val) (((t_FmContextA *)contextA)->command = (uint32_t)((((t_FmContextA *)contextA)->command & ~FM_CONTEXTA_MACCMD_SC_MASK) | (((val) << (31-15)) & FM_CONTEXTA_MACCMD_SC_MASK) )) +/* @} */ + +/**************************************************************************//** + @Collection Context B macros +*//***************************************************************************/ +#define FM_CONTEXTB_FQID_MASK 0x00ffffff + +#define FM_CONTEXTB_GET_FQID(contextB) (*((t_FmContextB *)contextB) & FM_CONTEXTB_FQID_MASK) +#define FM_CONTEXTB_SET_FQID(contextB,val) (*((t_FmContextB *)contextB) = ((*((t_FmContextB *)contextB) & ~FM_CONTEXTB_FQID_MASK) | ((val) & FM_CONTEXTB_FQID_MASK))) +/* @} */ + +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + + +/**************************************************************************//** + @Description FM Exceptions +*//***************************************************************************/ +typedef enum e_FmExceptions { + e_FM_EX_DMA_BUS_ERROR, /**< DMA bus error. */ + e_FM_EX_DMA_READ_ECC, /**< Read Buffer ECC error */ + e_FM_EX_DMA_SYSTEM_WRITE_ECC, /**< Write Buffer ECC error on system side */ + e_FM_EX_DMA_FM_WRITE_ECC, /**< Write Buffer ECC error on FM side */ + e_FM_EX_FPM_STALL_ON_TASKS, /**< Stall of tasks on FPM */ + e_FM_EX_FPM_SINGLE_ECC, /**< Single ECC on FPM. */ + e_FM_EX_FPM_DOUBLE_ECC, /**< Double ECC error on FPM ram access */ + e_FM_EX_QMI_SINGLE_ECC, /**< Single ECC on QMI. */ + e_FM_EX_QMI_DOUBLE_ECC, /**< Double bit ECC occurred on QMI */ + e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,/**< Dequeu from unknown port id */ + e_FM_EX_BMI_LIST_RAM_ECC, /**< Linked List RAM ECC error */ + e_FM_EX_BMI_PIPELINE_ECC, /**< Pipeline Table ECC Error */ + e_FM_EX_BMI_STATISTICS_RAM_ECC, /**< Statistics Count RAM ECC Error Enable */ + e_FM_EX_BMI_DISPATCH_RAM_ECC, /**< Dispatch RAM ECC Error Enable */ + e_FM_EX_IRAM_ECC, /**< Double bit ECC occurred on IRAM*/ + e_FM_EX_MURAM_ECC /**< Double bit ECC occurred on MURAM*/ +} e_FmExceptions; + +/**************************************************************************//** + @Group FM_init_grp FM Initialization Unit + + @Description FM Initialization Unit + + Initialization Flow + Initialization of the FM Module will be carried out by the application + according to the following sequence: + a. Calling the configuration routine with basic parameters. + b. Calling the advance initialization routines to change driver's defaults. + c. Calling the initialization routine. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function t_FmExceptionsCallback + + @Description Exceptions user callback routine, will be called upon an + exception passing the exception identification. + + @Param[in] h_App - User's application descriptor. + @Param[in] exception - The exception. +*//***************************************************************************/ +typedef void (t_FmExceptionsCallback) (t_Handle h_App, + e_FmExceptions exception); + +/**************************************************************************//** + @Function t_FmBusErrorCallback + + @Description Bus error user callback routine, will be called upon a + bus error, passing parameters describing the errors and the owner. + + @Param[in] h_App - User's application descriptor. + @Param[in] portType - Port type (e_FmPortType) + @Param[in] portId - Port id - relative to type. + @Param[in] addr - Address that caused the error + @Param[in] tnum - Owner of error + @Param[in] liodn - Logical IO device number +*//***************************************************************************/ +typedef void (t_FmBusErrorCallback) (t_Handle h_App, + e_FmPortType portType, + uint8_t portId, + uint64_t addr, + uint8_t tnum, + uint16_t liodn); + +/**************************************************************************//** + @Description structure for defining Ucode patch for loading. +*//***************************************************************************/ +typedef struct t_FmPcdFirmwareParams { + uint32_t size; /**< Size of uCode */ + uint32_t *p_Code; /**< A pointer to the uCode */ +} t_FmPcdFirmwareParams; + +/**************************************************************************//** + @Description structure representing FM initialization parameters +*//***************************************************************************/ +#define FM_SIZE_OF_LIODN_TABLE 64 +typedef struct t_FmParams { + uint8_t fmId; /**< Index of the FM */ + + uint8_t guestId; /**< FM Partition Id */ + + uintptr_t baseAddr; /**< Relevant when guestId = NCSW_MASSTER_ID only. + A pointer to base of memory mapped FM registers (virtual); + NOTE that this should include ALL common regs of the FM including + the PCD regs area. */ + t_Handle h_FmMuram; /**< Relevant when guestId = NCSW_MASSTER_ID only. + A handle of an initialized MURAM object, + to be used by the FM */ + uint16_t fmClkFreq; /**< Relevant when guestId = NCSW_MASSTER_ID only. + In Mhz */ +#ifdef FM_PARTITION_ARRAY + uint16_t liodnBasePerPort[FM_SIZE_OF_LIODN_TABLE]; + /**< Relevant when guestId = NCSW_MASSTER_ID only. + For each partition, LIODN should be configured here. */ +#endif /* FM_PARTITION_ARRAY */ + t_FmExceptionsCallback *f_Exception; /**< Relevant when guestId = NCSW_MASSTER_ID only. + An application callback routine to + handle exceptions.*/ + t_FmBusErrorCallback *f_BusError; /**< Relevant when guestId = NCSW_MASSTER_ID only. + An application callback routine to + handle exceptions.*/ + t_Handle h_App; /**< Relevant when guestId = NCSW_MASSTER_ID only. + A handle to an application layer object; This handle will + be passed by the driver upon calling the above callbacks */ + int irq; /**< Relevant when guestId = NCSW_MASSTER_ID only. + FM interrupt source for normal events */ + int errIrq; /**< Relevant when guestId = NCSW_MASSTER_ID only. + FM interrupt source for errors */ + t_FmPcdFirmwareParams firmware; /**< Relevant when guestId = NCSW_MASSTER_ID only. + Ucode */ +} t_FmParams; + + +/**************************************************************************//** + @Function FM_Config + + @Description Creates descriptor for the FM module. + + The routine returns a handle (descriptor) to the FM object. + This descriptor must be passed as first parameter to all other + FM function calls. + + No actual initialization or configuration of FM hardware is + done by this routine. + + @Param[in] p_FmParams - A pointer to data structure of parameters + + @Return Handle to FM object, or NULL for Failure. +*//***************************************************************************/ +t_Handle FM_Config(t_FmParams *p_FmParams); + +/**************************************************************************//** + @Function FM_Init + + @Description Initializes the FM module + + @Param[in] h_Fm - FM module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_Init(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_Free + + @Description Frees all resources that were assigned to FM module. + + Calling this routine invalidates the descriptor. + + @Param[in] h_Fm - FM module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_Free(t_Handle h_Fm); + + +/**************************************************************************//** + @Group FM_advanced_init_grp FM Advanced Configuration Unit + + @Description Configuration functions used to change default values; + Note: Advanced init routines are not available for guest partition. + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description DMA debug mode +*//***************************************************************************/ +typedef enum e_FmDmaDbgCntMode { + e_FM_DMA_DBG_NO_CNT = 0, /**< No counting */ + e_FM_DMA_DBG_CNT_DONE, /**< Count DONE commands */ + e_FM_DMA_DBG_CNT_COMM_Q_EM, /**< count command queue emergency signals */ + e_FM_DMA_DBG_CNT_INT_READ_EM, /**< Count Internal Read buffer emergency signal */ + e_FM_DMA_DBG_CNT_INT_WRITE_EM, /**< Count Internal Write buffer emergency signal */ + e_FM_DMA_DBG_CNT_FPM_WAIT, /**< Count FPM WAIT signal */ + e_FM_DMA_DBG_CNT_SIGLE_BIT_ECC, /**< Single bit ECC errors. */ + e_FM_DMA_DBG_CNT_RAW_WAR_PROT /**< Number of times there was a need for RAW & WAR protection. */ +} e_FmDmaDbgCntMode; + +/**************************************************************************//** + @Description DMA Cache Override +*//***************************************************************************/ +typedef enum e_FmDmaCacheOverride { + e_FM_DMA_NO_CACHE_OR = 0, /**< No override of the Cache field */ + e_FM_DMA_NO_STASH_DATA, /**< Data should not be stashed in system level cache */ + e_FM_DMA_MAY_STASH_DATA, /**< Data may be stashed in system level cache */ + e_FM_DMA_STASH_DATA /**< Data should be stashed in system level cache */ +} e_FmDmaCacheOverride; + +/**************************************************************************//** + @Description DMA External Bus Priority +*//***************************************************************************/ +typedef enum e_FmDmaExtBusPri { + e_FM_DMA_EXT_BUS_NORMAL = 0, /**< Normal priority */ + e_FM_DMA_EXT_BUS_EBS, /**< AXI extended bus service priority */ + e_FM_DMA_EXT_BUS_SOS, /**< AXI sos priority */ + e_FM_DMA_EXT_BUS_EBS_AND_SOS /**< AXI ebs + sos priority */ +} e_FmDmaExtBusPri; + +/**************************************************************************//** + @Description enum for choosing the field that will be output on AID +*//***************************************************************************/ +typedef enum e_FmDmaAidMode { + e_FM_DMA_AID_OUT_PORT_ID = 0, /**< 4 LSB of PORT_ID */ + e_FM_DMA_AID_OUT_TNUM /**< 4 LSB of TNUM */ +} e_FmDmaAidMode; + +/**************************************************************************//** + @Description FPM Catasrophic error behaviour +*//***************************************************************************/ +typedef enum e_FmCatastrophicErr { + e_FM_CATASTROPHIC_ERR_STALL_PORT = 0, /**< Port_ID is stalled (only reset can release it) */ + e_FM_CATASTROPHIC_ERR_STALL_TASK /**< Only errornous task is stalled */ +} e_FmCatastrophicErr; + +/**************************************************************************//** + @Description FPM DMA error behaviour +*//***************************************************************************/ +typedef enum e_FmDmaErr { + e_FM_DMA_ERR_CATASTROPHIC = 0, /**< Dma error is treated as a catastrophic error */ + e_FM_DMA_ERR_REPORT /**< Dma error is just reported */ +} e_FmDmaErr; + +/**************************************************************************//** + @Description DMA Emergency level by BMI emergency signal +*//***************************************************************************/ +typedef enum e_FmDmaEmergencyLevel { + e_FM_DMA_EM_EBS = 0, /**< EBS emergency */ + e_FM_DMA_EM_SOS /**< SOS emergency */ +} e_FmDmaEmergencyLevel; + +/**************************************************************************//** + @Collection DMA emergency options +*//***************************************************************************/ +typedef uint32_t fmEmergencyBus_t; /**< DMA emergency options */ + +#define FM_DMA_MURAM_READ_EMERGENCY 0x00800000 /**< Enable emergency for MURAM1 */ +#define FM_DMA_MURAM_WRITE_EMERGENCY 0x00400000 /**< Enable emergency for MURAM2 */ +#define FM_DMA_EXT_BUS_EMERGENCY 0x00100000 /**< Enable emergency for external bus */ +/* @} */ + +/**************************************************************************//** + @Description A structure for defining DMA emergency level +*//***************************************************************************/ +typedef struct t_FmDmaEmergency { + fmEmergencyBus_t emergencyBusSelect; /**< An OR of the busses where emergency + should be enabled */ + e_FmDmaEmergencyLevel emergencyLevel; /**< EBS/SOS */ +} t_FmDmaEmergency; + +/**************************************************************************//** + @Description structure for defining FM threshold +*//***************************************************************************/ +typedef struct t_FmThresholds { + uint8_t dispLimit; /**< The number of times a frames may + be passed in the FM before assumed to + be looping. */ + uint8_t prsDispTh; /**< This is the number pf packets that may be + queued in the parser dispatch queue*/ + uint8_t plcrDispTh; /**< This is the number pf packets that may be + queued in the policer dispatch queue*/ + uint8_t kgDispTh; /**< This is the number pf packets that may be + queued in the keygen dispatch queue*/ + uint8_t bmiDispTh; /**< This is the number pf packets that may be + queued in the BMI dispatch queue*/ + uint8_t qmiEnqDispTh; /**< This is the number pf packets that may be + queued in the QMI enqueue dispatch queue*/ + uint8_t qmiDeqDispTh; /**< This is the number pf packets that may be + queued in the QMI dequeue dispatch queue*/ + uint8_t fmCtl1DispTh; /**< This is the number pf packets that may be + queued in fmCtl1 dispatch queue*/ + uint8_t fmCtl2DispTh; /**< This is the number pf packets that may be + queued in fmCtl2 dispatch queue*/ +} t_FmThresholds; + + +/**************************************************************************//** + @Description structure for defining DMA thresholds +*//***************************************************************************/ +typedef struct t_FmDmaThresholds { + uint8_t assertEmergency; /**< When this value is reached, + assert emergency (Threshold)*/ + uint8_t clearEmergency; /**< After emergency is asserted, it is held + until this value is reached (Hystheresis) */ +} t_FmDmaThresholds; + + +/**************************************************************************//** + @Function FM_ConfigResetOnInit + + @Description Tell the driver whether to reset the FM before initialization or + not. It changes the default configuration [FALSE]. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] enable When TRUE, FM will be reset before any initialization. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigResetOnInit(t_Handle h_Fm, bool enable); + +/**************************************************************************//** + @Function FM_ConfigTotalNumOfTasks + + @Description Change the total number of tasks from its default + configuration [BMI_MAX_NUM_OF_TASKS] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] totalNumOfTasks The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigTotalNumOfTasks(t_Handle h_Fm, uint8_t totalNumOfTasks); + +/**************************************************************************//** + @Function FM_ConfigTotalFifoSize + + @Description Change the total Fifo size from its default + configuration [BMI_MAX_FIFO_SIZE] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] totalFifoSize The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigTotalFifoSize(t_Handle h_Fm, uint32_t totalFifoSize); + +/**************************************************************************//** + @Function FM_ConfigMaxNumOfOpenDmas + + @Description Change the maximum allowed open DMA's for this FM from its default + configuration [BMI_MAX_NUM_OF_DMAS] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] maxNumOfOpenDmas The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigMaxNumOfOpenDmas(t_Handle h_Fm, uint8_t maxNumOfOpenDmas); + +/**************************************************************************//** + @Function FM_ConfigThresholds + + @Description Calling this routine changes the internal driver data base + from its default FM threshold configuration: + dispLimit: [0] + prsDispTh: [16] + plcrDispTh: [16] + kgDispTh: [16] + bmiDispTh: [16] + qmiEnqDispTh: [16] + qmiDeqDispTh: [16] + fmCtl1DispTh: [16] + fmCtl2DispTh: [16] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] p_FmThresholds A structure of threshold parameters. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigThresholds(t_Handle h_Fm, t_FmThresholds *p_FmThresholds); + + /**************************************************************************//** + @Function FM_ConfigDmaCacheOverride + + @Description Calling this routine changes the internal driver data base + from its default configuration of cache override mode [e_FM_DMA_NO_CACHE_OR] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] cacheOverride The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaCacheOverride(t_Handle h_Fm, e_FmDmaCacheOverride cacheOverride); + +/**************************************************************************//** + @Function FM_ConfigDmaAidOverride + + @Description Calling this routine changes the internal driver data base + from its default configuration of aid override mode [TRUE] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] aidOverride The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaAidOverride(t_Handle h_Fm, bool aidOverride); + +/**************************************************************************//** + @Function FM_ConfigDmaAidMode + + @Description Calling this routine changes the internal driver data base + from its default configuration of aid mode [e_FM_DMA_AID_OUT_TNUM] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] aidMode The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaAidMode(t_Handle h_Fm, e_FmDmaAidMode aidMode); + +/**************************************************************************//** + @Function FM_ConfigDmaAxiDbgNumOfBeats + + @Description Calling this routine changes the internal driver data base + from its default configuration of axi debug [1] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] axiDbgNumOfBeats The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaAxiDbgNumOfBeats(t_Handle h_Fm, uint8_t axiDbgNumOfBeats); + +/**************************************************************************//** + @Function FM_ConfigDmaCamNumOfEntries + + @Description Calling this routine changes the internal driver data base + from its default configuration of number of CAM entries [32] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] numOfEntries The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaCamNumOfEntries(t_Handle h_Fm, uint8_t numOfEntries); + +/**************************************************************************//** + @Function FM_ConfigDmaWatchdog + + @Description Calling this routine changes the internal driver data base + from its default watchdog configuration, which is disabled + [0]. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] watchDogValue The selected new value - in microseconds. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaWatchdog(t_Handle h_Fm, uint32_t watchDogValue); + +/**************************************************************************//** + @Function FM_ConfigDmaWriteBufThresholds + + @Description Calling this routine changes the internal driver data base + from its default configuration of DMA write buffer threshold + assertEmergency: [DMA_THRESH_MAX_BUF] + clearEmergency: [DMA_THRESH_MAX_BUF] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior - + When 'assertEmergency' value is reached, emergency is asserted, + then it is held until 'clearEmergency' value is reached. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaWriteBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds); + + /**************************************************************************//** + @Function FM_ConfigDmaCommQThresholds + + @Description Calling this routine changes the internal driver data base + from its default configuration of DMA command queue threshold + assertEmergency: [DMA_THRESH_MAX_COMMQ] + clearEmergency: [DMA_THRESH_MAX_COMMQ] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior - + When 'assertEmergency' value is reached, emergency is asserted, + then it is held until 'clearEmergency' value is reached.. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaCommQThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds); + +/**************************************************************************//** + @Function FM_ConfigDmaReadBufThresholds + + @Description Calling this routine changes the internal driver data base + from its default configuration of DMA read buffer threshold + assertEmergency: [DMA_THRESH_MAX_BUF] + clearEmergency: [DMA_THRESH_MAX_BUF] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] p_FmDmaThresholds A structure of thresholds to define emergency behavior - + When 'assertEmergency' value is reached, emergency is asserted, + then it is held until 'clearEmergency' value is reached.. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaReadBufThresholds(t_Handle h_Fm, t_FmDmaThresholds *p_FmDmaThresholds); + +/**************************************************************************//** + @Function FM_ConfigDmaSosEmergencyThreshold + + @Description Calling this routine changes the internal driver data base + from its default dma SOS emergency configuration [0] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] dmaSosEmergency The selected new value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaSosEmergencyThreshold(t_Handle h_Fm, uint32_t dmaSosEmergency); + +/**************************************************************************//** + @Function FM_ConfigEnableCounters + + @Description Calling this routine changes the internal driver data base + from its default counters configuration where counters are disabled. + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigEnableCounters(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_ConfigDmaDbgCounter + + @Description Calling this routine changes the internal driver data base + from its default DMA debug counters configuration [e_FM_DMA_DBG_NO_CNT] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] fmDmaDbgCntMode An enum selecting the debug counter mode. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaDbgCounter(t_Handle h_Fm, e_FmDmaDbgCntMode fmDmaDbgCntMode); + +/**************************************************************************//** + @Function FM_ConfigDmaStopOnBusErr + + @Description Calling this routine changes the internal driver data base + from its default selection of bus error behavior [FALSE] + + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] stop TRUE to stop on bus error, FALSE to continue. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). + Only if bus error is enabled. +*//***************************************************************************/ +t_Error FM_ConfigDmaStopOnBusErr(t_Handle h_Fm, bool stop); + +/**************************************************************************//** + @Function FM_ConfigDmaEmergency + + @Description Calling this routine changes the internal driver data base + from its default selection of DMA emergency where's it's disabled. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] p_Emergency An OR mask of all required options. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaEmergency(t_Handle h_Fm, t_FmDmaEmergency *p_Emergency); + +/**************************************************************************//** + @Function FM_ConfigDmaEmergencySmoother + + @Description sets the minimum amount of DATA beats transferred on the AXI + READ and WRITE ports before lowering the emergency level. + By default smother is disabled. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] emergencyCnt emergency switching counter. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaEmergencySmoother(t_Handle h_Fm, uint32_t emergencyCnt); + +/**************************************************************************//** + @Function FM_ConfigDmaErr + + @Description Calling this routine changes the internal driver data base + from its default DMA error treatment [e_FM_DMA_ERR_CATASTROPHIC] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] dmaErr The selected new choice. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigDmaErr(t_Handle h_Fm, e_FmDmaErr dmaErr); + +/**************************************************************************//** + @Function FM_ConfigCatastrophicErr + + @Description Calling this routine changes the internal driver data base + from its default behavior on catastrophic error [e_FM_CATASTROPHIC_ERR_STALL_PORT] + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] catastrophicErr The selected new choice. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigCatastrophicErr(t_Handle h_Fm, e_FmCatastrophicErr catastrophicErr); + +/**************************************************************************//** + @Function FM_ConfigEnableMuramTestMode + + @Description Calling this routine changes the internal driver data base + from its default selection of test mode where it's disabled. + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigEnableMuramTestMode(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_ConfigEnableIramTestMode + + @Description Calling this routine changes the internal driver data base + from its default selection of test mode where it's disabled. + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigEnableIramTestMode(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_ConfigHaltOnExternalActivation + + @Description Calling this routine changes the internal driver data base + from its default selection of FM behaviour on external halt + activation [FALSE]. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] enable TRUE to enable halt on external halt + activation. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigHaltOnExternalActivation(t_Handle h_Fm, bool enable); + +/**************************************************************************//** + @Function FM_ConfigHaltOnUnrecoverableEccError + + @Description Calling this routine changes the internal driver data base + from its default selection of FM behaviour on unrecoverable + Ecc error [FALSE]. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] enable TRUE to enable halt on unrecoverable Ecc error + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigHaltOnUnrecoverableEccError(t_Handle h_Fm, bool enable); + +/**************************************************************************//** + @Function FM_ConfigException + + @Description Calling this routine changes the internal driver data base + from its default selection of exceptions enablement. + By default all exceptions are enabled. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] exception The exception to be selected. + @Param[in] enable TRUE to enable interrupt, FALSE to mask it. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigException(t_Handle h_Fm, e_FmExceptions exception, bool enable); + +/**************************************************************************//** + @Function FM_ConfigExternalEccRamsEnable + + @Description Calling this routine changes the internal driver data base + from its default [FALSE]. + When this option is enabled Rams ECC enable is not effected + by the FPM RCR bit, but by a JTAG. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] enable TRUE to enable this option. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigExternalEccRamsEnable(t_Handle h_Fm, bool enable); + +/**************************************************************************//** + @Function FM_ConfigTnumAgingPeriod + + @Description Calling this routine changes the internal driver data base + from its default configuration for aging of dequeue TNUM's + in the QMI.[0] + Note that this functionality is not available in all chips. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] tnumAgingPeriod Tnum Aging Period in microseconds. + Note that period is recalculated in units of + 64 FM clocks. Driver will pick the closest + possible period. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_ConfigTnumAgingPeriod(t_Handle h_Fm, uint16_t tnumAgingPeriod); + +/** @} */ /* end of FM_advanced_init_grp group */ +/** @} */ /* end of FM_init_grp group */ + + +/**************************************************************************//** + @Group FM_runtime_control_grp FM Runtime Control Unit + + @Description FM Runtime control unit API functions, definitions and enums. + The FM driver provides a set of control routines for each module. + These routines may only be called after the module was fully + initialized (both configuration and initialization routines were + called). They are typically used to get information from hardware + (status, counters/statistics, revision etc.), to modify a current + state or to force/enable a required action. Run-time control may + be called whenever necessary and as many times as needed. + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Collection General FM defines. +*//***************************************************************************/ +#define FM_MAX_NUM_OF_VALID_PORTS (FM_MAX_NUM_OF_OH_PORTS + \ + FM_MAX_NUM_OF_1G_RX_PORTS + \ + FM_MAX_NUM_OF_10G_RX_PORTS + \ + FM_MAX_NUM_OF_1G_TX_PORTS + \ + FM_MAX_NUM_OF_10G_TX_PORTS) +/* @} */ + +/**************************************************************************//** + @Description Structure for Port bandwidth requirement. Port is identified + by type and relative id. +*//***************************************************************************/ +typedef struct t_FmPortBandwidth { + e_FmPortType type; /**< FM port type */ + uint8_t relativePortId; /**< Type relative port id */ + uint8_t bandwidth; /**< bandwidth - (in term of percents) */ +} t_FmPortBandwidth; + +/**************************************************************************//** + @Description A Structure containing an array of Port bandwidth requirements. + The user should state the ports requiring bandwidth in terms of + percentage - i.e. all port's bandwidths in the array must add + up to 100. +*//***************************************************************************/ +typedef struct t_FmPortsBandwidthParams { + uint8_t numOfPorts; /**< num of ports listed in the array below */ + t_FmPortBandwidth portsBandwidths[FM_MAX_NUM_OF_VALID_PORTS]; + /**< for each port, it's bandwidth (all port's + bandwidths must add up to 100.*/ +} t_FmPortsBandwidthParams; + +/**************************************************************************//** + @Description DMA Emergency control on MURAM +*//***************************************************************************/ +typedef enum e_FmDmaMuramPort { + e_FM_DMA_MURAM_PORT_WRITE, /**< MURAM write port */ + e_FM_DMA_MURAM_PORT_READ /**< MURAM read port */ +} e_FmDmaMuramPort; + +/**************************************************************************//** + @Description enum for defining FM counters +*//***************************************************************************/ +typedef enum e_FmCounters { + e_FM_COUNTERS_ENQ_TOTAL_FRAME = 0, /**< QMI total enqueued frames counter */ + e_FM_COUNTERS_DEQ_TOTAL_FRAME, /**< QMI total dequeued frames counter */ + e_FM_COUNTERS_DEQ_0, /**< QMI 0 frames from QMan counter */ + e_FM_COUNTERS_DEQ_1, /**< QMI 1 frames from QMan counter */ + e_FM_COUNTERS_DEQ_2, /**< QMI 2 frames from QMan counter */ + e_FM_COUNTERS_DEQ_3, /**< QMI 3 frames from QMan counter */ + e_FM_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI dequeue from default queue counter */ + e_FM_COUNTERS_DEQ_FROM_CONTEXT, /**< QMI dequeue from FQ context counter */ + e_FM_COUNTERS_DEQ_FROM_FD, /**< QMI dequeue from FD command field counter */ + e_FM_COUNTERS_DEQ_CONFIRM, /**< QMI dequeue confirm counter */ + e_FM_COUNTERS_SEMAPHOR_ENTRY_FULL_REJECT, /**< DMA semaphor reject due to full entry counter */ + e_FM_COUNTERS_SEMAPHOR_QUEUE_FULL_REJECT, /**< DMA semaphor reject due to full CAM queue counter */ + e_FM_COUNTERS_SEMAPHOR_SYNC_REJECT /**< DMA semaphor reject due to sync counter */ +} e_FmCounters; + +/**************************************************************************//** + @Description structure for returning revision information +*//***************************************************************************/ +typedef struct t_FmRevisionInfo { + uint8_t majorRev; /**< Major revision */ + uint8_t minorRev; /**< Minor revision */ +} t_FmRevisionInfo; + +/**************************************************************************//** + @Description struct for defining DMA status +*//***************************************************************************/ +typedef struct t_FmDmaStatus { + bool cmqNotEmpty; /**< Command queue is not empty */ + bool busError; /**< Bus error occurred */ + bool readBufEccError; /**< Double ECC error on buffer Read */ + bool writeBufEccSysError; /**< Double ECC error on buffer write from system side */ + bool writeBufEccFmError; /**< Double ECC error on buffer write from FM side */ +} t_FmDmaStatus; + + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +/**************************************************************************//** + @Function FM_DumpRegs + + @Description Dumps all FM registers + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; + + @Cautions Allowed only FM_Init(). +*//***************************************************************************/ +t_Error FM_DumpRegs(t_Handle h_Fm); +#endif /* (defined(DEBUG_ERRORS) && ... */ + +/**************************************************************************//** + @Function FM_SetException + + @Description Calling this routine enables/disables the specified exception. + Note: Not available for guest partition. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] exception The exception to be selected. + @Param[in] enable TRUE to enable interrupt, FALSE to mask it. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FM_SetException(t_Handle h_Fm, e_FmExceptions exception, bool enable); + +/**************************************************************************//** + @Function FM_SetPortsBandwidth + + @Description Sets relative weights between ports when accessing common resources. + Note: Not available for guest partition. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] p_PortsBandwidth A structure of ports bandwidths in percentage, i.e. + total must equal 100. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FM_SetPortsBandwidth(t_Handle h_Fm, t_FmPortsBandwidthParams *p_PortsBandwidth); + +/**************************************************************************//** + @Function FM_EnableRamsEcc + + @Description Enables ECC mechanism for all the different FM RAM's; E.g. IRAM, + MURAM, Parser, Keygen, Policer, etc. + Note: + If FM_ConfigExternalEccRamsEnable was called to enable external + setting of ECC, this routine effects IRAM ECC only. + This routine is also called by the driver if an ECC exception is + enabled. + Note: Not available for guest partition. + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_EnableRamsEcc(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_DisableRamsEcc + + @Description Disables ECC mechanism for all the different FM RAM's; E.g. IRAM, + MURAM, Parser, Keygen, Policer, etc. + Note: + If FM_ConfigExternalEccRamsEnable was called to enable external + setting of ECC, this routine effects IRAM ECC only. + In opposed to FM_EnableRamsEcc, this routine must be called + explicitly to disable all Rams ECC. + Note: Not available for guest partition. + + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Config() and before FM_Init(). +*//***************************************************************************/ +t_Error FM_DisableRamsEcc(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_GetRevision + + @Description Returns the FM revision + + @Param[in] h_Fm A handle to an FM Module. + @Param[out] p_FmRevisionInfo A structure of revision information parameters. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FM_GetRevision(t_Handle h_Fm, t_FmRevisionInfo *p_FmRevisionInfo); + +/**************************************************************************//** + @Function FM_GetCounter + + @Description Reads one of the FM counters. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] counter The requested counter. + + @Return Counter's current value. + + @Cautions Allowed only following FM_Init(). + Note that it is user's responsibility to call this routine only + for enabled counters, and there will be no indication if a + disabled counter is accessed. +*//***************************************************************************/ +uint32_t FM_GetCounter(t_Handle h_Fm, e_FmCounters counter); + +/**************************************************************************//** + @Function FM_ModifyCounter + + @Description Sets a value to an enabled counter. Use "0" to reset the counter. + Note: Not available for guest partition. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] counter The requested counter. + @Param[in] val The requested value to be written into the counter. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FM_ModifyCounter(t_Handle h_Fm, e_FmCounters counter, uint32_t val); + +/**************************************************************************//** + @Function FM_Resume + + @Description Release FM after halt FM command or after unrecoverable ECC error. + Note: Not available for guest partition. + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +void FM_Resume(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_SetDmaEmergency + + @Description Manual emergency set + Note: Not available for guest partition. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] muramPort MURAM direction select. + @Param[in] enable TRUE to manually enable emergency, FALSE to disable. + + @Return None. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +void FM_SetDmaEmergency(t_Handle h_Fm, e_FmDmaMuramPort muramPort, bool enable); + +/**************************************************************************//** + @Function FM_SetDmaExtBusPri + + @Description Manual emergency set + Note: Not available for guest partition. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] pri External bus priority select + + @Return None. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +void FM_SetDmaExtBusPri(t_Handle h_Fm, e_FmDmaExtBusPri pri); + +/**************************************************************************//** + @Function FM_ForceIntr + + @Description Causes an interrupt event on the requested source. + Note: Not available for guest partition. + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] exception An exception to be forced. + + @Return E_OK on success; Error code if the exception is not enabled, + or is not able to create interrupt. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FM_ForceIntr (t_Handle h_Fm, e_FmExceptions exception); + +/**************************************************************************//** + @Function FM_GetDmaStatus + + @Description Reads the DMA current status + + @Param[in] h_Fm A handle to an FM Module. + @Param[out] p_FmDmaStatus A structure of DMA status parameters. + + @Return None + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +void FM_GetDmaStatus(t_Handle h_Fm, t_FmDmaStatus *p_FmDmaStatus); + +/**************************************************************************//** + @Function FM_GetPcdHandle + + @Description Used by FMC in order to get PCD handle + + @Param[in] h_Fm A handle to an FM Module. + + @Return A handle to the PCD module, NULL if uninitialized. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Handle FM_GetPcdHandle(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_ErrorIsr + Note: Not available for guest partition. + + @Description FM interrupt-service-routine for errors. + + @Param[in] h_Fm A handle to an FM Module. + + @Return E_OK on success; E_EMPTY if no errors found in register, other + error code otherwise. + + @Cautions Allowed only following FM_Init(). + This routine should NOT be called from guest-partition + (i.e. guestId != NCSW_MASTER_ID) +*//***************************************************************************/ +t_Error FM_ErrorIsr(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_EventIsr + Note: Not available for guest partition. + + @Description FM interrupt-service-routine for normal events. + + @Param[in] h_Fm A handle to an FM Module. + + @Cautions Allowed only following FM_Init(). + This routine should NOT be called from guest-partition + (i.e. guestId != NCSW_MASTER_ID) +*//***************************************************************************/ +void FM_EventIsr(t_Handle h_Fm); + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +/**************************************************************************//** + @Function FmDumpPortRegs + + @Description Dumps FM port registers which are part of FM common registers + + @Param[in] h_Fm A handle to an FM Module. + @Param[in] hardwarePortId HW port id. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only FM_Init(). +*//***************************************************************************/ +t_Error FmDumpPortRegs(t_Handle h_Fm,uint8_t hardwarePortId); +#endif /* (defined(DEBUG_ERRORS) && ... */ + + +/** @} */ /* end of FM_runtime_control_grp group */ +/** @} */ /* end of FM_lib_grp group */ +/** @} */ /* end of FM_grp group */ + +#endif /* __FM_EXT */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_mac_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_mac_ext.h @@ -0,0 +1,713 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File fm_mac_ext.h + + @Description FM MAC ... +*//***************************************************************************/ +#ifndef __FM_MAC_EXT_H +#define __FM_MAC_EXT_H + +#include "std_ext.h" +#include "enet_ext.h" + + +/**************************************************************************//** + + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_mac_grp FM MAC + + @Description FM MAC API functions, definitions and enums + + @{ +*//***************************************************************************/ + + +/**************************************************************************//** + @Description FM MAC Exceptions +*//***************************************************************************/ +typedef enum e_FmMacExceptions { + e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO = 0 + ,e_FM_MAC_EX_10G_MDIO_CMD_CMPL + ,e_FM_MAC_EX_10G_REM_FAULT + ,e_FM_MAC_EX_10G_LOC_FAULT + ,e_FM_MAC_EX_10G_1TX_ECC_ER + ,e_FM_MAC_EX_10G_TX_FIFO_UNFL + ,e_FM_MAC_EX_10G_TX_FIFO_OVFL + ,e_FM_MAC_EX_10G_TX_ER + ,e_FM_MAC_EX_10G_RX_FIFO_OVFL + ,e_FM_MAC_EX_10G_RX_ECC_ER + ,e_FM_MAC_EX_10G_RX_JAB_FRM + ,e_FM_MAC_EX_10G_RX_OVRSZ_FRM + ,e_FM_MAC_EX_10G_RX_RUNT_FRM + ,e_FM_MAC_EX_10G_RX_FRAG_FRM + ,e_FM_MAC_EX_10G_RX_LEN_ER + ,e_FM_MAC_EX_10G_RX_CRC_ER + ,e_FM_MAC_EX_10G_RX_ALIGN_ER + ,e_FM_MAC_EX_1G_BAB_RX + ,e_FM_MAC_EX_1G_RX_CTL + ,e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET + ,e_FM_MAC_EX_1G_BAB_TX + ,e_FM_MAC_EX_1G_TX_CTL + ,e_FM_MAC_EX_1G_TX_ERR + ,e_FM_MAC_EX_1G_LATE_COL + ,e_FM_MAC_EX_1G_COL_RET_LMT + ,e_FM_MAC_EX_1G_TX_FIFO_UNDRN + ,e_FM_MAC_EX_1G_MAG_PCKT + ,e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET + ,e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET + ,e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET + ,e_FM_MAC_EX_1G_TX_DATA_ERR + ,e_FM_MAC_EX_1G_RX_DATA_ERR + ,e_FM_MAC_EX_1G_1588_TS_RX_ERR + ,e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL +} e_FmMacExceptions; + +/**************************************************************************//** + @Description TM MAC statistics level +*//***************************************************************************/ +typedef enum e_FmMacStatisticsLevel { + e_FM_MAC_NONE_STATISTICS = 0, /**< No statistics */ + e_FM_MAC_PARTIAL_STATISTICS, /**< Only error counters are available. Optimized for performance */ + e_FM_MAC_FULL_STATISTICS /**< All counters available. Not optimized for performance */ +} e_FmMacStatisticsLevel; + + +/**************************************************************************//** + @Function t_FmMacExceptionCallback + + @Description Fm Mac Exception Callback from FM MAC to the user + + @Param[in] h_App - Handle to the upper layer handler + + @Param[in] exceptions - The exception that occurred + + + @Return void. +*//***************************************************************************/ +typedef void (t_FmMacExceptionCallback)(t_Handle h_App, e_FmMacExceptions exceptions); + + +/**************************************************************************//** + @Description TM MAC statistics rfc3635 +*//***************************************************************************/ +typedef struct t_FmMacStatistics { +/* RMON */ + uint64_t eStatPkts64; /**< r-10G tr-DT 64 byte frame counter */ + uint64_t eStatPkts65to127; /**< r-10G 65 to 127 byte frame counter */ + uint64_t eStatPkts128to255; /**< r-10G 128 to 255 byte frame counter */ + uint64_t eStatPkts256to511; /**< r-10G 256 to 511 byte frame counter */ + uint64_t eStatPkts512to1023; /**< r-10G 512 to 1023 byte frame counter */ + uint64_t eStatPkts1024to1518; /**< r-10G 1024 to 1518 byte frame counter */ + uint64_t eStatPkts1519to1522; /**< r-10G 1519 to 1522 byte good frame count */ +/* */ + uint64_t eStatFragments; /**< Total number of packets that were less than 64 octets long with a wrong CRC.*/ + uint64_t eStatJabbers; /**< Total number of packets longer than valid maximum length octets */ + uint64_t eStatsDropEvents; /**< number of dropped packets due to internal errors of the MAC Client. */ + uint64_t eStatCRCAlignErrors; /**< Incremented when frames of correct length but with CRC error are received.*/ + uint64_t eStatUndersizePkts; /**< Total number of packets that were less than 64 octets long with a good CRC.*/ + uint64_t eStatOversizePkts; /**< T,B.D*/ +/* Pause */ + uint64_t teStatPause; /**< Pause MAC Control received */ + uint64_t reStatPause; /**< Pause MAC Control sent */ + +/* MIB II */ + uint64_t ifInOctets; /**< Total number of byte received. */ + uint64_t ifInPkts; /**< Total number of packets received.*/ + uint64_t ifInMcastPkts; /**< Total number of multicast frame received*/ + uint64_t ifInBcastPkts; /**< Total number of broadcast frame received */ + uint64_t ifInDiscards; /**< Frames received, but discarded due to problems within the MAC RX. */ + uint64_t ifInErrors; /**< Number of frames received with error: + - FIFO Overflow Error + - CRC Error + - Frame Too Long Error + - Alignment Error + - The dedicated Error Code (0xfe, not a code error) was received */ + uint64_t ifOutOctets; /**< Total number of byte sent. */ + uint64_t ifOutPkts; /**< Total number of packets sent .*/ + uint64_t ifOutMcastPkts; /**< Total number of multicast frame sent */ + uint64_t ifOutBcastPkts; /**< Total number of multicast frame sent */ + uint64_t ifOutDiscards; /**< Frames received, but discarded due to problems within the MAC TX N/A!.*/ + uint64_t ifOutErrors; /**< Number of frames transmitted with error: + - FIFO Overflow Error + - FIFO Underflow Error + - Other */ +} t_FmMacStatistics; + + +/**************************************************************************//** + @Group FM_mac_init_grp Initialization Unit + + @Description FM MAC Initialization Unit + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description FM MAC config input +*//***************************************************************************/ +typedef struct t_FmMacParams { + uintptr_t baseAddr; /**< Base of memory mapped FM MAC registers */ + t_EnetAddr addr; /**< MAC address of device; First octet is sent first */ + uint8_t macId; /**< MAC ID <10G 0> */ + e_EnetMode enetMode; /**< Ethernet operation mode (MAC-PHY interface and speed) */ + t_Handle h_Fm; /**< A handle to the FM object this port related to */ + int mdioIrq; /**< MDIO exceptions interrupt source - not valid for all + MACs; MUST be set to 'NO_IRQ' for MACs that don't have + mdio-irq, or for polling */ + t_FmMacExceptionCallback *f_Event; /**< MDIO Events Callback Routine */ + t_FmMacExceptionCallback *f_Exception; /**< Exception Callback Routine */ + t_Handle h_App; /**< A handle to an application layer object; This handle will + be passed by the driver upon calling the above callbacks */ +} t_FmMacParams; + + +/**************************************************************************//** + @Function FM_MAC_Config + + @Description Creates descriptor for the FM MAC module. + + The routine returns a handle (descriptor) to the FM MAC object. + This descriptor must be passed as first parameter to all other + FM MAC function calls. + + No actual initialization or configuration of FM MAC hardware is + done by this routine. + + @Param[in] p_FmMacParam - Pointer to data structure of parameters + + @Retval Handle to FM MAC object, or NULL for Failure. +*//***************************************************************************/ +t_Handle FM_MAC_Config (t_FmMacParams *p_FmMacParam); + +/**************************************************************************//** + @Function FM_MAC_Init + + @Description Initializes the FM MAC module + + @Param[in] h_FmMac - FM module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_MAC_Init (t_Handle h_FmMac); + +/**************************************************************************//** + @Function FM_Free + + @Description Frees all resources that were assigned to FM MAC module. + + Calling this routine invalidates the descriptor. + + @Param[in] h_FmMac - FM module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_MAC_Free (t_Handle h_FmMac); + + +/**************************************************************************//** + @Group FM_mac_advanced_init_grp Advanced Configuration Unit + + @Description Configuration functions used to change default values. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function FM_MAC_ConfigResetOnInit + + @Description Tell the driver whether to reset the FM MAC before initialization or + not. It changes the default configuration [FALSE]. + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] enable When TRUE, FM will be reset before any initialization. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ConfigResetOnInit (t_Handle h_FmMac, bool enable); + +/**************************************************************************//** + @Function FM_MAC_ConfigLoopback + + @Description Enable/Disable internal loopback mode + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] enable TRUE to enable or FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ConfigLoopback (t_Handle h_FmMac, bool enable); + +/**************************************************************************//** + @Function FM_MAC_ConfigMaxFrameLength + + @Description Setup maximum Frame Length + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] newVal MAX Frame length + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ConfigMaxFrameLength (t_Handle h_FmMac, uint16_t newVal); + +/**************************************************************************//** + @Function FM_MAC_ConfigWan + + @Description ENABLE WAN mode in 10G MAC + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] enable TRUE to enable or FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ConfigWan (t_Handle h_FmMac, bool enable); + +/**************************************************************************//** + @Function FM_MAC_ConfigPadAndCrc + + @Description Config PAD and CRC mode + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] enable TRUE to enable or FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ConfigPadAndCrc (t_Handle h_FmMac, bool enable); + +/**************************************************************************//** + @Function FM_MAC_ConfigHalfDuplex + + @Description Config Half Duplex Mode + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] enable TRUE to enable or FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ConfigHalfDuplex (t_Handle h_FmMac, bool enable); + +/**************************************************************************//** + @Function FM_MAC_ConfigLengthCheck + + @Description Configure thef frame length checking. + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] enable TRUE to enable or FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ConfigLengthCheck (t_Handle h_FmMac, bool enable); + +/**************************************************************************//** + @Function FM_MAC_ConfigException + + @Description Change Exception selection from default + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] ex Type of the desired exceptions + @Param[in] enable TRUE to enable the specified exception, FALSE to disable it. + + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Config() and before FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ConfigException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable); + +#ifdef FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +t_Error FM_MAC_ConfigSkipFman11Workaround (t_Handle h_FmMac); +#endif /* FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 */ +/** @} */ /* end of FM_mac_advanced_init_grp group */ +/** @} */ /* end of FM_mac_init_grp group */ + + +/**************************************************************************//** + @Group FM_mac_runtime_control_grp Runtime Control Unit + + @Description FM MAC Runtime control unit API functions, definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function FM_MAC_Enable + + @Description Enable the MAC + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] mode Mode of operation (RX, TX, Both) + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_Enable (t_Handle h_FmMac, e_CommMode mode); + +/**************************************************************************//** + @Function FM_MAC_Disable + + @Description DISABLE the MAC + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] mode Define what part to Disable (RX, TX or BOTH) + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_Disable (t_Handle h_FmMac, e_CommMode mode); + +/**************************************************************************//** + @Function FM_MAC_Enable1588TimeStamp + + @Description Enables the TSU operation. + + @Param[in] h_Fm - Handle to the PTP as returned from the FM_MAC_PtpConfig. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_Enable1588TimeStamp(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_MAC_Disable1588TimeStamp + + @Description Disables the TSU operation. + + @Param[in] h_Fm - Handle to the PTP as returned from the FM_MAC_PtpConfig. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_Disable1588TimeStamp(t_Handle h_Fm); + +/**************************************************************************//** + @Function FM_MAC_SetTxAutoPauseFrames + + @Description Enable/Disable transmition of Pause-Frames. + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] pauseTime Pause quanta value used with transmitted pause frames. + Each quanta represents a 512 bit-times; Note that '0' + as an input here will be used as disabling the + transmission of the pause-frames. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_SetTxAutoPauseFrames (t_Handle h_FmMac, uint16_t pauseTime); + +/**************************************************************************//** + @Function FM_MAC_SetRxIgnorePauseFrames + + @Description Enable/Disable ignoring of Pause-Frames. + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] en boolean indicates whether to ignore the incoming pause + frames or not. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_SetRxIgnorePauseFrames (t_Handle h_FmMac, bool en); + +/**************************************************************************//** + @Function FM_MAC_ResetCounters + + @Description reset all statistics counters + + @Param[in] h_FmMac A handle to a FM MAC Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ResetCounters (t_Handle h_FmMac); + +/**************************************************************************//** + @Function FM_MAC_SetException + + @Description Enable/Disable a specific Exception + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] ex Type of the desired exceptions + @Param[in] enable TRUE to enable the specified exception, FALSE to disable it. + + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_SetException(t_Handle h_FmMac, e_FmMacExceptions ex, bool enable); + +/**************************************************************************//** + @Function FM_MAC_SetStatistics + + @Description Define Statistics level. + Where applicable, the routine also enables the MIB counters + overflow interrupt in order to keep counters accurate + and account for overflows. + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] statisticsLevel Full statistics level provides all standard counters but may + reduce performance. Partial statistics provides only special + event counters (errors etc.). If selected, regular counters (such as + byte/packet) will be invalid and will return -1. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_SetStatistics (t_Handle h_FmMac, e_FmMacStatisticsLevel statisticsLevel); + +/**************************************************************************//** + @Function FM_MAC_GetStatistics + + @Description get all statistics counters + + @Param[in] h_FmMac A handle to a FM MAC Module. + @Param[in] p_Statistics Staructure with statistics + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_Init(). +*//***************************************************************************/ +t_Error FM_MAC_GetStatistics (t_Handle h_FmMac, t_FmMacStatistics *p_Statistics); + +/**************************************************************************//** + @Function FM_MAC_ModifyMacAddr + + @Description Replace the main MAC Address + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[in] p_EnetAddr - Ethernet Mac address + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_ModifyMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + +/**************************************************************************//** + @Function FM_MAC_AddHashMacAddr + + @Description Add an Address to the hash table. This is for filter purpose only. + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[in] p_EnetAddr - Ethernet Mac address + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). It is a filter only address. + @Cautions Some address need to be filterd out in upper FM blocks. +*//***************************************************************************/ +t_Error FM_MAC_AddHashMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + +/**************************************************************************//** + @Function FM_MAC_RemoveHashMacAddr + + @Description Delete an Address to the hash table. This is for filter purpose only. + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[in] p_EnetAddr - Ethernet Mac address + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_RemoveHashMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + +/**************************************************************************//** + @Function FM_MAC_AddExactMatchMacAddr + + @Description Add a unicast or multicast mac address for exact-match filtering + (8 on dTSEC, 2 for 10G-MAC) + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[in] p_EnetAddr - MAC Address to ADD + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_AddExactMatchMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + +/**************************************************************************//** + @Function FM_MAC_RemovelExactMatchMacAddr + + @Description Remove a uni cast or multi cast mac address. + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[in] p_EnetAddr - MAC Address to remove + + @Return E_OK on success; Error code otherwise.. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_RemovelExactMatchMacAddr (t_Handle h_FmMac, t_EnetAddr *p_EnetAddr); + +/**************************************************************************//** + @Function FM_MAC_SetPromiscuous + + @Description Enable/Disable MAC Promiscuous mode for ALL mac addresses. + + @Param[in] h_FmMac - A handle to a FM MAC Module. + @Param[in] enable - TRUE to enable or FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_SetPromiscuous (t_Handle h_FmMac, bool enable); + +/**************************************************************************//** + @Function FM_MAC_AdjustLink + + @Description Adjusts the Ethernet link with new speed/duplex setup. + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[in] speed - Ethernet speed. + @Param[in] fullDuplex - TRUE for Full-Duplex mode; + FALSE for Half-Duplex mode. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_MAC_AdjustLink(t_Handle h_FmMac, e_EnetSpeed speed, bool fullDuplex); + +/**************************************************************************//** + @Function FM_MAC_GetId + + @Description Return the MAC ID + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[out] p_MacId - MAC ID of device + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_GetId (t_Handle h_FmMac, uint32_t *p_MacId); + +/**************************************************************************//** + @Function FM_MAC_GetVesrion + + @Description Return Mac HW chip version + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[out] p_MacVresion - Mac version as defined by the chip + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_GetVesrion (t_Handle h_FmMac, uint32_t *p_MacVresion); + +/**************************************************************************//** + @Function FM_MAC_MII_WritePhyReg + + @Description Write data into Phy Register + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[in] phyAddr - Phy Address on the MII bus + @Param[in] reg - Register Number. + @Param[in] data - Data to write. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_MII_WritePhyReg (t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t data); + +/**************************************************************************//** + @Function FM_MAC_MII_ReadPhyReg + + @Description Read data from Phy Register + + @Param[in] h_FmMac - A handle to a FM Module. + @Param[in] phyAddr - Phy Address on the MII bus + @Param[in] reg - Register Number. + @Param[out] p_Data - Data from PHY. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_MII_ReadPhyReg(t_Handle h_FmMac, uint8_t phyAddr, uint8_t reg, uint16_t *p_Data); + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +/**************************************************************************//** + @Function FM_MAC_DumpRegs + + @Description Dump internal registers + + @Param[in] h_FmMac - A handle to a FM Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only after FM_MAC_Init(). +*//***************************************************************************/ +t_Error FM_MAC_DumpRegs(t_Handle h_FmMac); +#endif /* (defined(DEBUG_ERRORS) && ... */ + +/** @} */ /* end of FM_mac_runtime_control_grp group */ +/** @} */ /* end of FM_mac_grp group */ +/** @} */ /* end of FM_grp group */ + + + +#endif /* __FM_MAC_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_muram_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_muram_ext.h @@ -0,0 +1,158 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File fm_muram_ext.h + + @Description FM MURAM Application Programming Interface. +*//***************************************************************************/ +#ifndef __FM_MURAM_EXT +#define __FM_MURAM_EXT + +#include "error_ext.h" +#include "std_ext.h" + + +/**************************************************************************//** + + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_muram_grp FM MURAM + + @Description FM MURAM API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_muram_init_grp FM MURAM Initialization + + @Description FM MURAM initialization API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function FM_MURAM_ConfigAndInit + + @Description Creates partition in the MURAM. + + The routine returns a handle (descriptor) to the MURAM partition. + This descriptor must be passed as first parameter to all other + FM-MURAM function calls. + + No actual initialization or configuration of FM_MURAM hardware is + done by this routine. + + @Param[in] baseAddress - Pointer to base of memory mapped FM-MURAM. + @Param[in] size - Size of the FM-MURAM partition. + + @Return Handle to FM-MURAM object, or NULL for Failure. +*//***************************************************************************/ +t_Handle FM_MURAM_ConfigAndInit(uintptr_t baseAddress, uint32_t size); + +/**************************************************************************//** + @Function FM_MURAM_Free + + @Description Frees all resources that were assigned to FM-MURAM module. + + Calling this routine invalidates the descriptor. + + @Param[in] h_FmMuram - FM-MURAM module descriptor. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_MURAM_Free(t_Handle h_FmMuram); + +/** @} */ /* end of FM_muram_init_grp group */ + + +/**************************************************************************//** + @Group FM_muram_ctrl_grp FM MURAM Control + + @Description FM MURAM control API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function FM_MURAM_AllocMem + + @Description Allocate some memory from FM-MURAM partition. + + @Param[in] h_FmMuram - FM-MURAM module descriptor. + @Param[in] size - size of the memory to be allocated. + @Param[in] align - Alignment of the memory. + + @Return address of the allocated memory; NULL otherwise. +*//***************************************************************************/ +void * FM_MURAM_AllocMem(t_Handle h_FmMuram, uint32_t size, uint32_t align); + +/**************************************************************************//** + @Function FM_MURAM_AllocMemForce + + @Description Allocate some specific memory from FM-MURAM partition (according + to base). + + @Param[in] h_FmMuram - FM-MURAM module descriptor. + @Param[in] base - the desired base-address to be allocated. + @Param[in] size - size of the memory to be allocated. + + @Return address of the allocated memory; NULL otherwise. +*//***************************************************************************/ +void * FM_MURAM_AllocMemForce(t_Handle h_FmMuram, uint64_t base, uint32_t size); + +/**************************************************************************//** + @Function FM_MURAM_FreeMem + + @Description Free an allocated memory from FM-MURAM partition. + + @Param[in] h_FmMuram - FM-MURAM module descriptor. + @Param[in] ptr - A pointer to an allocated memory. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_MURAM_FreeMem(t_Handle h_FmMuram, void *ptr); + +/** @} */ /* end of FM_muram_ctrl_grp group */ +/** @} */ /* end of FM_muram_grp group */ +/** @} */ /* end of FM_grp group */ + + + +#endif /* __FM_MURAM_EXT */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_pcd_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_pcd_ext.h @@ -0,0 +1,2160 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File fm_pcd_ext.h + + @Description FM PCD ... +*//***************************************************************************/ +#ifndef __FM_PCD_EXT +#define __FM_PCD_EXT + +#include "std_ext.h" +#include "net_ext.h" +#include "list_ext.h" +#include "fm_ext.h" + + +/**************************************************************************//** + + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_PCD_grp FM PCD + + @Description FM PCD API functions, definitions and enums + + The FM PCD module is responsible for the initialization of all + global classifying FM modules. This includes the parser general and + common registers, the key generator global and common registers, + and the Policer global and common registers. + In addition, the FM PCD SW module will initialize all required + key generator schemes, coarse classification flows, and Policer + profiles. When An FM module is configured to work with one of these + entities, it will register to it using the FM PORT API. The PCD + module will manage the PCD resources - i.e. resource management of + Keygen schemes, etc. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Collection General PCD defines +*//***************************************************************************/ +typedef uint32_t fmPcdEngines_t; /**< options as defined below: */ + +#define FM_PCD_NONE 0 /**< No PCD Engine indicated */ +#define FM_PCD_PRS 0x80000000 /**< Parser indicated */ +#define FM_PCD_KG 0x40000000 /**< Keygen indicated */ +#define FM_PCD_CC 0x20000000 /**< Coarse classification indicated */ +#define FM_PCD_PLCR 0x10000000 /**< Policer indicated */ +#define FM_PCD_MANIP 0x08000000 /**< Manipulation indicated */ + +#define FM_PCD_MAX_NUM_OF_PRIVATE_HDRS 2 /**< Number of units/headers saved for user */ + +#define FM_PCD_PRS_NUM_OF_HDRS 16 /**< Number of headers supported by HW parser */ +#define FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS (32 - FM_PCD_MAX_NUM_OF_PRIVATE_HDRS) + /**< number of distinction units is limited by + register size (32), - reserved bits for + private headers. */ + +#define FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS 4 /**< Maximum number of interchangeable headers in a distinction unit */ +#define FM_PCD_KG_NUM_OF_GENERIC_REGS 8 /**< Total number of generic KG registers */ +#define FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY 35 /**< Max number allowed on any configuration. + For reason of HW implementation, in most + cases less than this will be allowed. The + driver will return error in initialization + time if resource is overused. */ +#define FM_PCD_KG_NUM_OF_EXTRACT_MASKS 4 /**< Total number of masks allowed on KG extractions. */ +#define FM_PCD_KG_NUM_OF_DEFAULT_GROUPS 16 /**< Number of default value logical groups */ + +#define FM_PCD_PRS_NUM_OF_LABELS 32 /**< Max number of SW parser label */ +#define FM_PCD_SW_PRS_SIZE 0x00000800 /**< Total size of sw parser area */ +#define FM_PCD_PRS_SW_OFFSET 0x00000040 /**< Size of illegal addresses at the beginning + of the SW parser area */ +#define FM_PCD_PRS_SW_PATCHES_SIZE 0x00000200 /**< Number of bytes saved for patches */ +#define FM_PCD_PRS_SW_TAIL_SIZE 4 /**< Number of bytes that must be cleared at + the end of the SW parser area */ +#define FM_SW_PRS_MAX_IMAGE_SIZE (FM_PCD_SW_PRS_SIZE-FM_PCD_PRS_SW_OFFSET-FM_PCD_PRS_SW_TAIL_SIZE-FM_PCD_PRS_SW_PATCHES_SIZE) + /**< Max possible size of SW parser code */ + +#define FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE 128 /**< Max possible size of insertion template for + insert manipulation*/ +/* @} */ + + +/**************************************************************************//** + @Group FM_PCD_init_grp FM PCD Initialization Unit + + @Description FM PCD Initialization Unit + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description PCD counters +*//***************************************************************************/ +typedef enum e_FmPcdCounters { + e_FM_PCD_KG_COUNTERS_TOTAL, /**< Policer counter */ + e_FM_PCD_PLCR_COUNTERS_YELLOW, /**< Policer counter */ + e_FM_PCD_PLCR_COUNTERS_RED, /**< Policer counter */ + e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED, /**< Policer counter */ + e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW, /**< Policer counter */ + e_FM_PCD_PLCR_COUNTERS_TOTAL, /**< Policer counter */ + e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH, /**< Policer counter */ + e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES, /**< Parser counter */ + e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES, /**< MURAM counter */ + e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES, /**< MURAM counter */ + e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES, /**< MURAM counter */ + e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES, /**< MURAM counter */ + e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES /**< FPM counter */ +} e_FmPcdCounters; + +/**************************************************************************//** + @Description PCD interrupts +*//***************************************************************************/ +typedef enum e_FmPcdExceptions { + e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC, /**< Keygen ECC error */ + e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC, /**< Read Buffer ECC error */ + e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, /**< Write Buffer ECC error on system side */ + e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR, /**< Write Buffer ECC error on FM side */ + e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE, /**< Self init complete */ + e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE, /**< Atomic action complete */ + e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC, /**< Parser ECC error */ + e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC /**< Parser single ECC */ +} e_FmPcdExceptions; + + +/**************************************************************************//** + @Description Exceptions user callback routine, will be called upon an + exception passing the exception identification. + + @Param[in] h_App - User's application descriptor. + @Param[in] exception - The exception. + *//***************************************************************************/ +typedef void (t_FmPcdExceptionCallback) (t_Handle h_App, e_FmPcdExceptions exception); + +/**************************************************************************//** + @Description Exceptions user callback routine, will be called upon an exception + passing the exception identification. + + @Param[in] h_App - User's application descriptor. + @Param[in] exception - The exception. + @Param[in] index - id of the relevant source (may be scheme or profile id). + *//***************************************************************************/ +typedef void (t_FmPcdIdExceptionCallback) ( t_Handle h_App, + e_FmPcdExceptions exception, + uint16_t index); + +/**************************************************************************//** + @Description A callback for enqueuing frame onto a QM queue. + + @Param[in] h_App - User's application descriptor. + @Param[in] p_Fd - Frame descriptor for the frame. + + @Return E_OK on success; Error code otherwise. + *//***************************************************************************/ +typedef t_Error (t_FmPcdQmEnqueueCallback) (t_Handle h_QmArg, void *p_Fd); + +/**************************************************************************//** + @Description A structure for Host-Command + When using Host command for PCD functionalities, a dedicated port + must be used. If this routine is called for a PCD in a single partition + environment, or it is the Master partition in a Multi partition + environment, The port will be initialized by the PCD driver + initialization routine. + *//***************************************************************************/ +typedef struct t_FmPcdHcParams { + uintptr_t portBaseAddr; /**< Host-Command Port Virtual Address of + memory mapped registers.*/ + uint8_t portId; /**< Host-Command Port Id (0-6 relative + to Host-Command/Offline parsing ports) */ + uint16_t liodnBase; /**< Irrelevant for P4080 rev 1. LIODN base for this port, to be + used together with LIODN offset. */ + uint32_t errFqid; /**< Host-Command Port Error Queue Id. */ + uint32_t confFqid; /**< Host-Command Port Confirmation queue Id. */ + uint32_t qmChannel; /**< Host-Command port - QM-channel dedicated to + this port will be used by the FM for dequeue. */ + t_FmPcdQmEnqueueCallback *f_QmEnqueue; /**< Call back routine for enqueuing a frame to the QM */ + t_Handle h_QmArg; /**< A handle of the QM module */ +} t_FmPcdHcParams; + +/**************************************************************************//** + @Description The main structure for PCD initialization + *//***************************************************************************/ +typedef struct t_FmPcdParams { + bool prsSupport; /**< TRUE if Parser will be used for any + of the FM ports */ + bool ccSupport; /**< TRUE if Coarse Classification will be used for any + of the FM ports */ + bool kgSupport; /**< TRUE if Keygen will be used for any + of the FM ports */ + bool plcrSupport; /**< TRUE if Policer will be used for any + of the FM ports */ + t_Handle h_Fm; /**< A handle to the FM module */ + uint8_t numOfSchemes; /**< Number of schemes dedicated to this partition. */ + bool useHostCommand; /**< Optional for single partition, Mandatory for Multi partition */ + t_FmPcdHcParams hc; /**< Relevant only if useHostCommand=TRUE. + Host Command parameters. */ + + t_FmPcdExceptionCallback *f_Exception; /**< Relevant for master (or single) partition only: Callback routine + to be called of PCD exception */ + t_FmPcdIdExceptionCallback *f_ExceptionId; /**< Relevant for master (or single) partition only: Callback routine + to be used for a single scheme and + profile exceptions */ + t_Handle h_App; /**< Relevant for master (or single) partition only: A handle to an + application layer object; This handle will + be passed by the driver upon calling the above callbacks */ +} t_FmPcdParams; + + +/**************************************************************************//** + @Function FM_PCD_Config + + @Description Basic configuration of the PCD module. + Creates descriptor for the FM PCD module. + + @Param[in] p_FmPcdParams A structure of parameters for the initialization of PCD. + + @Return A handle to the initialized module. +*//***************************************************************************/ +t_Handle FM_PCD_Config(t_FmPcdParams *p_FmPcdParams); + +/**************************************************************************//** + @Function FM_PCD_Init + + @Description Initialization of the PCD module. + + @Param[in] h_FmPcd - FM PCD module descriptor. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PCD_Init(t_Handle h_FmPcd); + +/**************************************************************************//** + @Function FM_PCD_Free + + @Description Frees all resources that were assigned to FM module. + + Calling this routine invalidates the descriptor. + + @Param[in] h_FmPcd - FM PCD module descriptor. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PCD_Free(t_Handle h_FmPcd); + +/**************************************************************************//** + @Group FM_PCD_advanced_init_grp FM PCD Advanced Configuration Unit + + @Description Configuration functions used to change default values. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function FM_PCD_ConfigPlcrNumOfSharedProfiles + + @Description Calling this routine changes the internal driver data base + from its default selection of exceptions enablement. + [4]. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] numOfSharedPlcrProfiles Number of profiles to + be shared between ports on this partition + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PCD_ConfigPlcrNumOfSharedProfiles(t_Handle h_FmPcd, uint16_t numOfSharedPlcrProfiles); + +/**************************************************************************//** + @Function FM_PCD_ConfigException + + @Description Calling this routine changes the internal driver data base + from its default selection of exceptions enablement. + By default all exceptions are enabled. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] exception The exception to be selected. + @Param[in] enable TRUE to enable interrupt, FALSE to mask it. + + @Return E_OK on success; Error code otherwise. + + @Cautions Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_ConfigException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable); + +/**************************************************************************//** + @Function FM_PCD_ConfigPlcrAutoRefreshMode + + @Description Calling this routine changes the internal driver data base + from its default selection of exceptions enablement. + By default autorefresh is enabled. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] enable TRUE to enable, FALSE to disable + + @Return E_OK on success; Error code otherwise. + + @Cautions Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_ConfigPlcrAutoRefreshMode(t_Handle h_FmPcd, bool enable); + +/**************************************************************************//** + @Function FM_PCD_ConfigPrsMaxCycleLimit + + @Description Calling this routine changes the internal data structure for + the maximum parsing time from its default value + [0]. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] value 0 to disable the mechanism, or new + maximum parsing time. + + @Return E_OK on success; Error code otherwise. + + @Cautions Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_ConfigPrsMaxCycleLimit(t_Handle h_FmPcd,uint16_t value); + +/** @} */ /* end of FM_PCD_advanced_init_grp group */ +/** @} */ /* end of FM_PCD_init_grp group */ + + +/**************************************************************************//** + @Group FM_PCD_Runtime_grp FM PCD Runtime Unit + + @Description FM PCD Runtime Unit + + The runtime control allows creation of PCD infrastructure modules + such as Network Environment Characteristics, Classification Plan + Groups and Coarse Classification Trees. + It also allows on-the-fly initialization, modification and removal + of PCD modules such as Keygen schemes, coarse classification nodes + and Policer profiles. + + + In order to explain the programming model of the PCD driver interface + a few terms should be explained, and will be used below. + * Distinction Header - One of the 16 protocols supported by the FM parser, + or one of the shim headers (1 or 2). May be a header with a special + option (see below). + * Interchangeable Headers Group- This is a group of Headers recognized + by either one of them. For example, if in a specific context the user + chooses to treat IPv4 and IPV6 in the same way, they may create an + interchangeable Headers Unit consisting of these 2 headers. + * A Distinction Unit - a Distinction Header or an Interchangeable Headers + Group. + * Header with special option - applies to ethernet, mpls, vlan, ipv4 and + ipv6, includes multicast, broadcast and other protocol specific options. + In terms of hardware it relates to the options available in the classification + plan. + * Network Environment Characteristics - a set of Distinction Units that define + the total recognizable header selection for a certain environment. This is + NOT the list of all headers that will ever appear in a flow, but rather + everything that needs distinction in a flow, where distinction is made by keygen + schemes and coarse classification action descriptors. + + The PCD runtime modules initialization is done in stages. The first stage after + initializing the PCD module itself is to establish a Network Flows Environment + Definition. The application may choose to establish one or more such environments. + Later, when needed, the application will have to state, for some of its modules, + to which single environment it belongs. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description A structure for sw parser labels + *//***************************************************************************/ +typedef struct t_FmPcdPrsLabelParams { + uint32_t instructionOffset; /**< SW parser label instruction offset (2 bytes + resolution), relative to Parser RAM. */ + e_NetHeaderType hdr; /**< The existance of this header will envoke + the sw parser code. */ + uint8_t indexPerHdr; /**< Normally 0, if more than one sw parser + attachments for the same header, use this + index to distinguish between them. */ +} t_FmPcdPrsLabelParams; + +/**************************************************************************//** + @Description A structure for sw parser + *//***************************************************************************/ +typedef struct t_FmPcdPrsSwParams { + bool override; /**< FALSE to invoke a check that nothing else + was loaded to this address, including + internal patches. + TRUE to override any existing code.*/ + uint32_t size; /**< SW parser code size */ + uint16_t base; /**< SW parser base (in instruction counts! + must be larger than 0x20)*/ + uint8_t *p_Code; /**< SW parser code */ + uint32_t swPrsDataParams[FM_PCD_PRS_NUM_OF_HDRS]; + /**< SW parser data (parameters) */ + uint8_t numOfLabels; /**< Number of labels for SW parser. */ + t_FmPcdPrsLabelParams labelsTable[FM_PCD_PRS_NUM_OF_LABELS]; + /**< SW parser labels table, containing + numOfLabels entries */ +} t_FmPcdPrsSwParams; + + +/**************************************************************************//** + @Function FM_PCD_Enable + + @Description This routine should be called after PCD is initialized for enabling all + PCD engines according to their existing configuration. + + @Param[in] h_FmPcd FM PCD module descriptor. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled. +*//***************************************************************************/ +t_Error FM_PCD_Enable(t_Handle h_FmPcd); + +/**************************************************************************//** + @Function FM_PCD_Disable + + @Description This routine may be called when PCD is enabled in order to + disable all PCD engines. It may be called + only when none of the ports in the system are using the PCD. + + @Param[in] h_FmPcd FM PCD module descriptor. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init() and when PCD is enabled. +*//***************************************************************************/ +t_Error FM_PCD_Disable(t_Handle h_FmPcd); + + +/**************************************************************************//** + @Function FM_PCD_GetCounter + + @Description Reads one of the FM PCD counters. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] counter The requested counter. + + @Return Counter's current value. + + @Cautions Allowed only following FM_PCD_Init(). + Note that it is user's responsibility to call this routine only + for enabled counters, and there will be no indication if a + disabled counter is accessed. +*//***************************************************************************/ +uint32_t FM_PCD_GetCounter(t_Handle h_FmPcd, e_FmPcdCounters counter); + +/**************************************************************************//** +@Function FM_PCD_PrsLoadSw + +@Description This routine may be called in order to load software parsing code. + + +@Param[in] h_FmPcd FM PCD module descriptor. +@Param[in] p_SwPrs A pointer to a structure of software + parser parameters, including the software + parser image. + +@Return E_OK on success; Error code otherwise. + +@Cautions Allowed only following FM_PCD_Init() and when PCD is disabled. + Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_PrsLoadSw(t_Handle h_FmPcd, t_FmPcdPrsSwParams *p_SwPrs); + +/**************************************************************************//** + @Function FM_PCD_KgSetDfltValue + + @Description Calling this routine sets a global default value to be used + by the keygen when parser does not recognize a required + field/header. + By default default values are 0. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] valueId 0,1 - one of 2 global default values. + @Param[in] value The requested default value. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled. + Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_KgSetDfltValue(t_Handle h_FmPcd, uint8_t valueId, uint32_t value); + +/**************************************************************************//** + @Function FM_PCD_KgSetAdditionalDataAfterParsing + + @Description Calling this routine allows the keygen to access data past + the parser finishing point. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] payloadOffset the number of bytes beyond the parser location. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init() and when PCD is disabled. + Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_KgSetAdditionalDataAfterParsing(t_Handle h_FmPcd, uint8_t payloadOffset); + +/**************************************************************************//** + @Function FM_PCD_SetException + + @Description Calling this routine enables/disables PCD interrupts. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] exception The exception to be selected. + @Param[in] enable TRUE to enable interrupt, FALSE to mask it. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). + Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_SetException(t_Handle h_FmPcd, e_FmPcdExceptions exception, bool enable); + +/**************************************************************************//** + @Function FM_PCD_ModifyCounter + + @Description Sets a value to an enabled counter. Use "0" to reset the counter. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] counter The requested counter. + @Param[in] value The requested value to be written into the counter. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). + Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_ModifyCounter(t_Handle h_FmPcd, e_FmPcdCounters counter, uint32_t value); + +/**************************************************************************//** + @Function FM_PCD_SetPlcrStatistics + + @Description This routine may be used to enable/disable policer statistics + counter. By default the statistics is enabled. + + @Param[in] h_FmPcd FM PCD module descriptor + @Param[in] enable TRUE to enable, FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). + Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_SetPlcrStatistics(t_Handle h_FmPcd, bool enable); + +/**************************************************************************//** + @Function FM_PCD_SetPrsStatistics + + @Description Defines whether to gather parser statistics including all ports. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] enable TRUE to enable, FALSE to disable. + + @Return None + + @Cautions Allowed only following FM_PCD_Init(). + Not available for guest partition. +*//***************************************************************************/ +void FM_PCD_SetPrsStatistics(t_Handle h_FmPcd, bool enable); + +/**************************************************************************//** + @Function FM_PCD_ForceIntr + + @Description Causes an interrupt event on the requested source. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] exception An exception to be forced. + + @Return E_OK on success; Error code if the exception is not enabled, + or is not able to create interrupt. + + @Cautions Allowed only following FM_PCD_Init(). + Not available for guest partition. +*//***************************************************************************/ +t_Error FM_PCD_ForceIntr (t_Handle h_FmPcd, e_FmPcdExceptions exception); + +/**************************************************************************//** + @Function FM_PCD_HcTxConf + + @Description This routine should be called to confirm frames that were + received on the HC confirmation queue. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] p_Fd Frame descriptor of the received frame. + + @Cautions Allowed only following FM_PCD_Init(). Allowed only if 'useHostCommand' + option was selected in the initialization. +*//***************************************************************************/ +void FM_PCD_HcTxConf(t_Handle h_FmPcd, t_DpaaFD *p_Fd); + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +/**************************************************************************//** + @Function FM_PCD_DumpRegs + + @Description Dumps all PCD registers + + @Param[in] h_FmPcd A handle to an FM PCD Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_DumpRegs(t_Handle h_FmPcd); + +/**************************************************************************//** + @Function FM_PCD_KgDumpRegs + + @Description Dumps all PCD KG registers + + @Param[in] h_FmPcd A handle to an FM PCD Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_KgDumpRegs(t_Handle h_FmPcd); + +/**************************************************************************//** + @Function FM_PCD_PlcrDumpRegs + + @Description Dumps all PCD Plcr registers + + @Param[in] h_FmPcd A handle to an FM PCD Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_PlcrDumpRegs(t_Handle h_FmPcd); + +/**************************************************************************//** + @Function FM_PCD_PlcrProfileDumpRegs + + @Description Dumps all PCD Plcr registers + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_Profile A handle to a profile. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_PlcrProfileDumpRegs(t_Handle h_FmPcd, t_Handle h_Profile); + +/**************************************************************************//** + @Function FM_PCD_PrsDumpRegs + + @Description Dumps all PCD Prs registers + + @Param[in] h_FmPcd A handle to an FM PCD Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_PrsDumpRegs(t_Handle h_FmPcd); + +/**************************************************************************//** + @Function FM_PCD_HcDumpRegs + + @Description Dumps HC Port registers + + @Param[in] h_FmPcd A handle to an FM PCD Module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_HcDumpRegs(t_Handle h_FmPcd); +#endif /* (defined(DEBUG_ERRORS) && ... */ + + + +/**************************************************************************//** + @Group FM_PCD_Runtime_tree_buildgrp FM PCD Tree building Unit + + @Description FM PCD Runtime Unit + + This group contains routines for setting, deleting and modifying + PCD resources, for defining the total PCD tree. + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Collection Definitions of coarse classification + parameters as required by keygen (when coarse classification + is the next engine after this scheme). +*//***************************************************************************/ +#define FM_PCD_MAX_NUM_OF_CC_NODES 255 +#define FM_PCD_MAX_NUM_OF_CC_TREES 8 +#define FM_PCD_MAX_NUM_OF_CC_GROUPS 16 +#define FM_PCD_MAX_NUM_OF_CC_UNITS 4 +#define FM_PCD_MAX_NUM_OF_KEYS 256 +#define FM_PCD_MAX_SIZE_OF_KEY 56 +#define FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP 16 +/* @} */ + +/**************************************************************************//** + @Collection A set of definitions to allow protocol + special option description. +*//***************************************************************************/ +typedef uint32_t protocolOpt_t; /**< A general type to define a protocol option. */ + +typedef protocolOpt_t ethProtocolOpt_t; /**< Ethernet protocol options. */ +#define ETH_BROADCAST 0x80000000 /**< Ethernet Broadcast. */ +#define ETH_MULTICAST 0x40000000 /**< Ethernet Multicast. */ + +typedef protocolOpt_t vlanProtocolOpt_t; /**< Vlan protocol options. */ +#define VLAN_STACKED 0x20000000 /**< Vlan Stacked. */ + +typedef protocolOpt_t mplsProtocolOpt_t; /**< MPLS protocol options. */ +#define MPLS_STACKED 0x10000000 /**< MPLS Stacked. */ + +typedef protocolOpt_t ipv4ProtocolOpt_t; /**< IPv4 protocol options. */ +#define IPV4_BROADCAST_1 0x08000000 /**< IPv4 Broadcast. */ +#define IPV4_MULTICAST_1 0x04000000 /**< IPv4 Multicast. */ +#define IPV4_UNICAST_2 0x02000000 /**< Tunneled IPv4 - Unicast. */ +#define IPV4_MULTICAST_BROADCAST_2 0x01000000 /**< Tunneled IPv4 - Broadcast/Multicast. */ + +typedef protocolOpt_t ipv6ProtocolOpt_t; /**< IPv6 protocol options. */ +#define IPV6_MULTICAST_1 0x00800000 /**< IPv6 Multicast. */ +#define IPV6_UNICAST_2 0x00400000 /**< Tunneled IPv6 - Unicast. */ +#define IPV6_MULTICAST_2 0x00200000 /**< Tunneled IPv6 - Multicast. */ +/* @} */ + +/**************************************************************************//** + @Description A type used for returning the order of the key extraction. + each value in this array represents the index of the extraction + command as defined by the user in the initialization extraction array. + The valid size of this array is the user define number of extractions + required (also marked by the second '0' in this array). +*//***************************************************************************/ +typedef uint8_t t_FmPcdKgKeyOrder [FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY]; + +/**************************************************************************//** + @Description All PCD engines +*//***************************************************************************/ +typedef enum e_FmPcdEngine { + e_FM_PCD_INVALID = 0, /**< Invalid PCD engine indicated*/ + e_FM_PCD_DONE, /**< No PCD Engine indicated */ + e_FM_PCD_KG, /**< Keygen indicated */ + e_FM_PCD_CC, /**< Coarse classification indicated */ + e_FM_PCD_PLCR, /**< Policer indicated */ + e_FM_PCD_PRS /**< Parser indicated */ +} e_FmPcdEngine; + +/**************************************************************************//** + @Description An enum for selecting extraction by header types +*//***************************************************************************/ +typedef enum e_FmPcdExtractByHdrType { + e_FM_PCD_EXTRACT_FROM_HDR, /**< Extract bytes from header */ + e_FM_PCD_EXTRACT_FROM_FIELD, /**< Extract bytes from header field */ + e_FM_PCD_EXTRACT_FULL_FIELD /**< Extract a full field */ +} e_FmPcdExtractByHdrType; + +/**************************************************************************//** + @Description An enum for selecting extraction source + (when it is not the header) +*//***************************************************************************/ +typedef enum e_FmPcdExtractFrom { + e_FM_PCD_EXTRACT_FROM_FRAME_START, /**< KG & CC: Extract from beginning of frame */ + e_FM_PCD_EXTRACT_FROM_DFLT_VALUE, /**< KG only: Extract from a default value */ + e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE, /**< KG only: Extract from the point where parsing had finished */ + e_FM_PCD_EXTRACT_FROM_KEY, /**< CC only: Field where saved KEY */ + e_FM_PCD_EXTRACT_FROM_HASH, /**< CC only: Field where saved HASH */ + e_FM_PCD_EXTRACT_FROM_PARSE_RESULT, /**< KG & CC: Extract from the parser result */ + e_FM_PCD_EXTRACT_FROM_ENQ_FQID, /**< KG & CC: Extract from enqueue FQID */ + e_FM_PCD_EXTRACT_FROM_FLOW_ID /**< CC only: Field where saved Dequeue FQID */ +} e_FmPcdExtractFrom; + +/**************************************************************************//** + @Description An enum for selecting extraction type +*//***************************************************************************/ +typedef enum e_FmPcdExtractType { + e_FM_PCD_EXTRACT_BY_HDR, /**< Extract according to header */ + e_FM_PCD_EXTRACT_NON_HDR, /**< Extract from data that is not the header */ + e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO /**< Extract private info as specified by user */ +} e_FmPcdExtractType; + +/**************************************************************************//** + @Description An enum for selecting a default +*//***************************************************************************/ +typedef enum e_FmPcdKgExtractDfltSelect { + e_FM_PCD_KG_DFLT_GBL_0, /**< Default selection is KG register 0 */ + e_FM_PCD_KG_DFLT_GBL_1, /**< Default selection is KG register 1 */ + e_FM_PCD_KG_DFLT_PRIVATE_0, /**< Default selection is a per scheme register 0 */ + e_FM_PCD_KG_DFLT_PRIVATE_1, /**< Default selection is a per scheme register 1 */ + e_FM_PCD_KG_DFLT_ILLEGAL /**< Illegal selection */ +} e_FmPcdKgExtractDfltSelect; + +/**************************************************************************//** + @Description An enum defining all default groups - + each group shares a default value, one of 4 user + initialized values. +*//***************************************************************************/ +typedef enum e_FmPcdKgKnownFieldsDfltTypes { + e_FM_PCD_KG_MAC_ADDR, /**< MAC Address */ + e_FM_PCD_KG_TCI, /**< TCI field */ + e_FM_PCD_KG_ENET_TYPE, /**< ENET Type */ + e_FM_PCD_KG_PPP_SESSION_ID, /**< PPP Session id */ + e_FM_PCD_KG_PPP_PROTOCOL_ID, /**< PPP Protocol id */ + e_FM_PCD_KG_MPLS_LABEL, /**< MPLS label */ + e_FM_PCD_KG_IP_ADDR, /**< IP addr */ + e_FM_PCD_KG_PROTOCOL_TYPE, /**< Protocol type */ + e_FM_PCD_KG_IP_TOS_TC, /**< TOS or TC */ + e_FM_PCD_KG_IPV6_FLOW_LABEL, /**< IPV6 flow label */ + e_FM_PCD_KG_IPSEC_SPI, /**< IPSEC SPI */ + e_FM_PCD_KG_L4_PORT, /**< L4 Port */ + e_FM_PCD_KG_TCP_FLAG, /**< TCP Flag */ + e_FM_PCD_KG_GENERIC_FROM_DATA, /**< grouping implemented by sw, + any data extraction that is not the full + field described above */ + e_FM_PCD_KG_GENERIC_FROM_DATA_NO_V, /**< grouping implemented by sw, + any data extraction without validation */ + e_FM_PCD_KG_GENERIC_NOT_FROM_DATA /**< grouping implemented by sw, + extraction from parser result or + direct use of default value */ +} e_FmPcdKgKnownFieldsDfltTypes; + +/**************************************************************************//** + @Description enum for defining header index when headers may repeat +*//***************************************************************************/ +typedef enum e_FmPcdHdrIndex { + e_FM_PCD_HDR_INDEX_NONE = 0, /**< used when multiple headers not used, also + to specify regular IP (not tunneled). */ + e_FM_PCD_HDR_INDEX_1, /**< may be used for VLAN, MPLS, tunneled IP */ + e_FM_PCD_HDR_INDEX_2, /**< may be used for MPLS, tunneled IP */ + e_FM_PCD_HDR_INDEX_3, /**< may be used for MPLS */ + e_FM_PCD_HDR_INDEX_LAST = 0xFF /**< may be used for VLAN, MPLS */ +} e_FmPcdHdrIndex; + +/**************************************************************************//** + @Description A structure for selcting the policer profile functional type +*//***************************************************************************/ +typedef enum e_FmPcdProfileTypeSelection { + e_FM_PCD_PLCR_PORT_PRIVATE, /**< Port dedicated profile */ + e_FM_PCD_PLCR_SHARED /**< Shared profile (shared within partition) */ +} e_FmPcdProfileTypeSelection; + +/**************************************************************************//** + @Description A structure for selcting the policer profile algorithem +*//***************************************************************************/ +typedef enum e_FmPcdPlcrAlgorithmSelection { + e_FM_PCD_PLCR_PASS_THROUGH, /**< Policer pass through */ + e_FM_PCD_PLCR_RFC_2698, /**< Policer algorythm RFC 2698 */ + e_FM_PCD_PLCR_RFC_4115 /**< Policer algorythm RFC 4115 */ +} e_FmPcdPlcrAlgorithmSelection; + +/**************************************************************************//** + @Description A structure for selcting the policer profile color mode +*//***************************************************************************/ +typedef enum e_FmPcdPlcrColorMode { + e_FM_PCD_PLCR_COLOR_BLIND, /**< Color blind */ + e_FM_PCD_PLCR_COLOR_AWARE /**< Color aware */ +} e_FmPcdPlcrColorMode; + +/**************************************************************************//** + @Description A structure for selcting the policer profile color functional mode +*//***************************************************************************/ +typedef enum e_FmPcdPlcrColor { + e_FM_PCD_PLCR_GREEN, /**< Green */ + e_FM_PCD_PLCR_YELLOW, /**< Yellow */ + e_FM_PCD_PLCR_RED, /**< Red */ + e_FM_PCD_PLCR_OVERRIDE /**< Color override */ +} e_FmPcdPlcrColor; + +/**************************************************************************//** + @Description A structure for selcting the policer profile packet frame length selector +*//***************************************************************************/ +typedef enum e_FmPcdPlcrFrameLengthSelect { + e_FM_PCD_PLCR_L2_FRM_LEN, /**< L2 frame length */ + e_FM_PCD_PLCR_L3_FRM_LEN, /**< L3 frame length */ + e_FM_PCD_PLCR_L4_FRM_LEN, /**< L4 frame length */ + e_FM_PCD_PLCR_FULL_FRM_LEN /**< Full frame length */ +} e_FmPcdPlcrFrameLengthSelect; + +/**************************************************************************//** + @Description An enum for selecting rollback frame +*//***************************************************************************/ +typedef enum e_FmPcdPlcrRollBackFrameSelect { + e_FM_PCD_PLCR_ROLLBACK_L2_FRM_LEN, /**< Rollback L2 frame length */ + e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN /**< Rollback Full frame length */ +} e_FmPcdPlcrRollBackFrameSelect; + +/**************************************************************************//** + @Description A structure for selcting the policer profile packet or byte mode +*//***************************************************************************/ +typedef enum e_FmPcdPlcrRateMode { + e_FM_PCD_PLCR_BYTE_MODE, /**< Byte mode */ + e_FM_PCD_PLCR_PACKET_MODE /**< Packet mode */ +} e_FmPcdPlcrRateMode; + +/**************************************************************************//** + @Description An enum for defining action of frame +*//***************************************************************************/ +typedef enum e_FmPcdDoneAction { + e_FM_PCD_ENQ_FRAME = 0, /**< Enqueue frame */ + e_FM_PCD_DROP_FRAME /**< Drop frame */ +} e_FmPcdDoneAction; + +/**************************************************************************//** + @Description A structure for selecting the policer counter +*//***************************************************************************/ +typedef enum e_FmPcdPlcrProfileCounters { + e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER, /**< Green packets counter */ + e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER, /**< Yellow packets counter */ + e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER, /**< Red packets counter */ + e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER, /**< Recolored yellow packets counter */ + e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER /**< Recolored red packets counter */ +} e_FmPcdPlcrProfileCounters; + +/**************************************************************************//** + @Description A structure for selecting action +*//***************************************************************************/ +typedef enum e_FmPcdAction { + e_FM_PCD_ACTION_NONE, /**< NONE */ + e_FM_PCD_ACTION_EXACT_MATCH, /**< Exact match on the selected extraction*/ + e_FM_PCD_ACTION_INDEXED_LOOKUP /**< Indexed lookup on the selected extraction*/ +} e_FmPcdAction; + +#if defined(FM_CAPWAP_SUPPORT) +/**************************************************************************//** + @Description An enum for selecting type of insert manipulation +*//***************************************************************************/ +typedef enum e_FmPcdManipInsrtType { + e_FM_PCD_MANIP_INSRT_NONE = 0, /**< No insertion */ + e_FM_PCD_MANIP_INSRT_TO_START_OF_FRAME_INT_FRAME_HDR, /**< Insert internal frame header to start of frame */ + e_FM_PCD_MANIP_INSRT_TO_START_OF_FRAME_TEMPLATE /**< Insert template to start of frame*/ +} e_FmPcdManipInsrtType; + +/**************************************************************************//** + @Description An enum for selecting type of remove manipulation +*//***************************************************************************/ +typedef enum e_FmPcdManipRmvParamsType { + e_FM_PCD_MANIP_RMV_NONE = 0, /**< No remove */ + e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_TILL_SPECIFIC_LOCATION, /**< Remove from start of frame till (excluding) specified indication */ + e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_INCLUDE_SPECIFIC_LOCATION, /**< Remove from start of frame till (including) specified indication */ + e_FM_PCD_MANIP_RMV_INT_FRAME_HDR /**< Remove internal frame header to start of frame */ +} e_FmPcdManipRmvParamsType; + +/**************************************************************************//** + @Description An enum for selecting type of location +*//***************************************************************************/ +typedef enum e_FmPcdManipLocateType { + e_FM_PCD_MANIP_LOC_BY_HDR = 0, /**< Locate according to header */ + e_FM_PCD_MANIP_LOC_NON_HDR /**< Locate from data that is not the header */ +} e_FmPcdManipLocateType; + +/**************************************************************************//** + @Description An enum for selecting type of Timeout mode +*//***************************************************************************/ +typedef enum e_FmPcdManipReassemTimeOutMode { + e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAMES, /**< limits the time of the reassm process from the first frag to the last */ + e_FM_PCD_MANIP_TIME_OUT_BETWEEN_FRAG /**< limits the time of receiving the fragment */ +} e_FmPcdManipReassemTimeOutMode; + +/**************************************************************************//** + @Description An enum for selecting type of WaysNumber mode +*//***************************************************************************/ +typedef enum e_FmPcdManipReassemWaysNumber { + e_FM_PCD_MANIP_ONE_WAY_HASH = 1, /**< -------------- */ + e_FM_PCD_MANIP_TWO_WAYS_HASH, /**< -------------- */ + e_FM_PCD_MANIP_THREE_WAYS_HASH, /**< -------------- */ + e_FM_PCD_MANIP_FOUR_WAYS_HASH, /**< four ways hash */ + e_FM_PCD_MANIP_FIVE_WAYS_HASH, /**< -------------- */ + e_FM_PCD_MANIP_SIX_WAYS_HASH, /**< -------------- */ + e_FM_PCD_MANIP_SEVEN_WAYS_HASH, /**< -------------- */ + e_FM_PCD_MANIP_EIGHT_WAYS_HASH /**< eight ways hash*/ +} e_FmPcdManipReassemWaysNumber; + +/**************************************************************************//** + @Description An enum for selecting type of statistics mode +*//***************************************************************************/ +typedef enum e_FmPcdStatsType { + e_FM_PCD_STATS_PER_FLOWID = 0 /**< type where flowId used as index for getting statistics */ +} e_FmPcdStatsType; + +#endif /* FM_CAPWAP_SUPPORT */ + + +/**************************************************************************//** + @Description A Union of protocol dependent special options +*//***************************************************************************/ +typedef union u_FmPcdHdrProtocolOpt { + ethProtocolOpt_t ethOpt; /**< Ethernet options */ + vlanProtocolOpt_t vlanOpt; /**< Vlan options */ + mplsProtocolOpt_t mplsOpt; /**< MPLS options */ + ipv4ProtocolOpt_t ipv4Opt; /**< IPv4 options */ + ipv6ProtocolOpt_t ipv6Opt; /**< IPv6 options */ +} u_FmPcdHdrProtocolOpt; + +/**************************************************************************//** + @Description A union holding all known protocol fields +*//***************************************************************************/ +typedef union t_FmPcdFields { + headerFieldEth_t eth; /**< eth */ + headerFieldVlan_t vlan; /**< vlan */ + headerFieldLlcSnap_t llcSnap; /**< llcSnap */ + headerFieldPppoe_t pppoe; /**< pppoe */ + headerFieldMpls_t mpls; /**< mpls */ + headerFieldIpv4_t ipv4; /**< ipv4 */ + headerFieldIpv6_t ipv6; /**< ipv6 */ + headerFieldUdp_t udp; /**< udp */ + headerFieldTcp_t tcp; /**< tcp */ + headerFieldSctp_t sctp; /**< sctp */ + headerFieldDccp_t dccp; /**< dccp */ + headerFieldGre_t gre; /**< gre */ + headerFieldMinencap_t minencap; /**< minencap */ + headerFieldIpsecAh_t ipsecAh; /**< ipsecAh */ + headerFieldIpsecEsp_t ipsecEsp; /**< ipsecEsp */ + headerFieldUdpEncapEsp_t udpEncapEsp; /**< udpEncapEsp */ +} t_FmPcdFields; + +/**************************************************************************//** + @Description structure for defining header extraction for key generation +*//***************************************************************************/ +typedef struct t_FmPcdFromHdr { + uint8_t size; /**< Size in byte */ + uint8_t offset; /**< Byte offset */ +} t_FmPcdFromHdr; + +/**************************************************************************//** + @Description structure for defining field extraction for key generation +*//***************************************************************************/ +typedef struct t_FmPcdFromField { + t_FmPcdFields field; /**< Field selection */ + uint8_t size; /**< Size in byte */ + uint8_t offset; /**< Byte offset */ +} t_FmPcdFromField; + +/**************************************************************************//** + @Description A structure of parameters used to define a single network + environment unit. + A unit should be defined if it will later be used by one or + more PCD engines to distinguich between flows. +*//***************************************************************************/ +typedef struct t_FmPcdDistinctionUnit { + struct { + e_NetHeaderType hdr; /**< One of the headers supported by the FM */ + u_FmPcdHdrProtocolOpt opt; /**< only one option !! */ + } hdrs[FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS]; +} t_FmPcdDistinctionUnit; + +/**************************************************************************//** + @Description A structure of parameters used to define the different + units supported by a specific PCD Network Environment + Characteristics module. Each unit represent + a protocol or a group of protocols that may be used later + by the different PCD engined to distinguich between flows. +*//***************************************************************************/ +typedef struct t_FmPcdNetEnvParams { + uint8_t numOfDistinctionUnits; /**< Number of different units to be identified */ + t_FmPcdDistinctionUnit units[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /**< An array of numOfDistinctionUnits of the + different units to be identified */ +} t_FmPcdNetEnvParams; + +/**************************************************************************//** + @Description structure for defining a single extraction action + when creating a key +*//***************************************************************************/ +typedef struct t_FmPcdExtractEntry { + e_FmPcdExtractType type; /**< Extraction type select */ + union { + struct { + e_NetHeaderType hdr; /**< Header selection */ + bool ignoreProtocolValidation; + /**< Ignore protocol validation */ + e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled + IP. Otherwise should be cleared.*/ + e_FmPcdExtractByHdrType type; /**< Header extraction type select */ + union { + t_FmPcdFromHdr fromHdr; /**< Extract bytes from header parameters */ + t_FmPcdFromField fromField; /**< Extract bytes from field parameters*/ + t_FmPcdFields fullField; /**< Extract full filed parameters*/ + } extractByHdrType; + } extractByHdr; /**< used when type = e_FM_PCD_KG_EXTRACT_BY_HDR */ + struct { + e_FmPcdExtractFrom src; /**< Non-header extraction source */ + e_FmPcdAction action; /**< Relevant for CC Only */ + uint16_t icIndxMask; /**< Relevant only for CC where + action=e_FM_PCD_ACTION_INDEXED_LOOKUP */ + uint8_t offset; /**< Byte offset */ + uint8_t size; /**< Size in byte */ + } extractNonHdr; /**< used when type = e_FM_PCD_KG_EXTRACT_NON_HDR */ + }; +} t_FmPcdExtractEntry; + +/**************************************************************************//** + @Description A structure for defining masks for each extracted + field in the key. +*//***************************************************************************/ +typedef struct t_FmPcdKgExtractMask { + uint8_t extractArrayIndex; /**< Index in the extraction array, as initialized by user */ + uint8_t offset; /**< Byte offset */ + uint8_t mask; /**< A byte mask (selected bits will be used) */ +} t_FmPcdKgExtractMask; + +/**************************************************************************//** + @Description A structure for defining default selection per groups + of fields +*//***************************************************************************/ +typedef struct t_FmPcdKgExtractDflt { + e_FmPcdKgKnownFieldsDfltTypes type; /**< Default type select*/ + e_FmPcdKgExtractDfltSelect dfltSelect; /**< Default register select */ +} t_FmPcdKgExtractDflt; + +/**************************************************************************//** + @Description A structure for defining all parameters needed for + generation a key and using a hash function +*//***************************************************************************/ +typedef struct t_FmPcdKgKeyExtractAndHashParams { + uint32_t privateDflt0; /**< Scheme default register 0 */ + uint32_t privateDflt1; /**< Scheme default register 1 */ + uint8_t numOfUsedExtracts; /**< defines the valid size of the following array */ + t_FmPcdExtractEntry extractArray [FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY]; /**< An array of extractions definition. */ + uint8_t numOfUsedDflts; /**< defines the valid size of the following array */ + t_FmPcdKgExtractDflt dflts[FM_PCD_KG_NUM_OF_DEFAULT_GROUPS]; + /**< For each extraction used in this scheme, specify the required + default register to be used when header is not found. + types not specified in this array will get undefined value. */ + uint8_t numOfUsedMasks; /**< defines the valid size of the following array */ + t_FmPcdKgExtractMask masks[FM_PCD_KG_NUM_OF_EXTRACT_MASKS]; + uint8_t hashShift; /**< hash result right shift. Select the 24 bits out of the 64 hash + result. 0 means using the 24 LSB's, otherwise use the + 24 LSB's after shifting right.*/ + uint32_t hashDistributionNumOfFqids; /**< must be > 1 and a power of 2. Represents the range + of queues for the key and hash functionality */ + uint8_t hashDistributionFqidsShift; /**< selects the FQID bits that will be effected by the hash */ + bool symmetricHash; /**< TRUE to generate the same hash for frames with swapped source and + destination fields on all layers; If TRUE, driver will check that for + all layers, if SRC extraction is selected, DST extraction must also be + selected, and vice versa. */ +} t_FmPcdKgKeyExtractAndHashParams; + +/**************************************************************************//** + @Description A structure of parameters for defining a single + Fqid mask (extracted OR). +*//***************************************************************************/ +typedef struct t_FmPcdKgExtractedOrParams { + e_FmPcdExtractType type; /**< Extraction type select */ + union { + struct { /**< used when type = e_FM_PCD_KG_EXTRACT_BY_HDR */ + e_NetHeaderType hdr; + e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled + IP. Otherwise should be cleared.*/ + bool ignoreProtocolValidation; + /**< continue extraction even if protocol is not recognized */ + } extractByHdr; + e_FmPcdExtractFrom src; /**< used when type = e_FM_PCD_KG_EXTRACT_NON_HDR */ + }; + uint8_t extractionOffset; /**< Offset for extraction (in bytes). */ + e_FmPcdKgExtractDfltSelect dfltValue; /**< Select register from which extraction is taken if + field not found */ + uint8_t mask; /**< Extraction mask (specified bits are used) */ + uint8_t bitOffsetInFqid; /**< 0-31, Selects which bits of the 24 FQID bits to effect using + the extracted byte; Assume byte is placed as the 8 MSB's in + a 32 bit word where the lower bits + are the FQID; i.e if bitOffsetInFqid=1 than its LSB + will effect the FQID MSB, if bitOffsetInFqid=24 than the + extracted byte will effect the 8 LSB's of the FQID, + if bitOffsetInFqid=31 than the byte's MSB will effect + the FQID's LSB; 0 means - no effect on FQID; + Note that one, and only one of + bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e, + extracted byte must effect either FQID or Policer profile).*/ + uint8_t bitOffsetInPlcrProfile; + /**< 0-15, Selects which bits of the 8 policer profile id bits to + effect using the extracted byte; Assume byte is placed + as the 8 MSB's in a 16 bit word where the lower bits + are the policer profile id; i.e if bitOffsetInPlcrProfile=1 + than its LSB will effect the profile MSB, if bitOffsetInFqid=8 + than the extracted byte will effect the whole policer profile id, + if bitOffsetInFqid=15 than the byte's MSB will effect + the Policer Profile id's LSB; + 0 means - no effect on policer profile; Note that one, and only one of + bitOffsetInFqid or bitOffsetInPlcrProfile must be set (i.e, + extracted byte must effect either FQID or Policer profile).*/ +} t_FmPcdKgExtractedOrParams; + +/**************************************************************************//** + @Description A structure for configuring scheme counter +*//***************************************************************************/ +typedef struct t_FmPcdKgSchemeCounter { + bool update; /**< FALSE to keep the current counter state + and continue from that point, TRUE to update/reset + the counter when the scheme is written. */ + uint32_t value; /**< If update=TRUE, this value will be written into the + counter. clear this field to reset the counter. */ +} t_FmPcdKgSchemeCounter; + +/**************************************************************************//** + @Description A structure for defining policer profile + parameters as required by keygen (when policer + is the next engine after this scheme). +*//***************************************************************************/ +typedef struct t_FmPcdKgPlcrProfile { + bool sharedProfile; /**< TRUE if this profile is shared between ports + (i.e. managed by master partition) May not be TRUE + if profile is after Coarse Classification*/ + bool direct; /**< if TRUE, directRelativeProfileId only selects the profile + id, if FALSE fqidOffsetRelativeProfileIdBase is used + together with fqidOffsetShift and numOfProfiles + parameters, to define a range of profiles from + which the keygen result will determine the + destination policer profile. */ + union { + uint16_t directRelativeProfileId; /**< Used if 'direct' is TRUE, to select policer profile. + This parameter should + indicate the policer profile offset within the port's + policer profiles or SHARED window. */ + struct { + uint8_t fqidOffsetShift; /**< shift of KG results without the qid base */ + uint8_t fqidOffsetRelativeProfileIdBase; + /**< OR of KG results without the qid base + This parameter should indicate the policer profile + offset within the port's policer profiles window or + SHARED window depends on sharedProfile */ + uint8_t numOfProfiles; /**< Range of profiles starting at base */ + } indirectProfile; + } profileSelect; +} t_FmPcdKgPlcrProfile; + +/**************************************************************************//** + @Description A structure for CC parameters if CC is the next engine after KG +*//***************************************************************************/ +typedef struct t_FmPcdKgCc { + t_Handle h_CcTree; /**< A handle to a CC Tree */ + uint8_t grpId; /**< CC group id within the CC tree */ + bool plcrNext; /**< TRUE if after CC, in case of data frame, + policing is required. */ + bool bypassPlcrProfileGeneration; + /**< TRUE to bypass keygen policer profile + generation (profile selected is the one selected at + port initialization). */ + t_FmPcdKgPlcrProfile plcrProfile; /**< only if plcrNext=TRUE and bypassPlcrProfileGeneration=FALSE */ +} t_FmPcdKgCc; + +/**************************************************************************//** + @Description A structure for initializing a keygen single scheme +*//***************************************************************************/ +typedef struct t_FmPcdKgSchemeParams { + bool modify; /**< TRUE to change an existing scheme */ + union + { + uint8_t relativeSchemeId; /**< if modify=FALSE:Partition relative scheme id */ + t_Handle h_Scheme; /**< if modify=TRUE: a handle of the existing scheme */ + }id; + bool alwaysDirect; /**< This scheme is reached only directly, i.e. no need for match vector. Keygen will ignore + it when matching */ + struct { /**< HL Relevant only if alwaysDirect = FALSE */ + t_Handle h_NetEnv; /**< A handle to the Network environment as returned + by FM_PCD_SetNetEnvCharacteristics() */ + uint8_t numOfDistinctionUnits; /**< Number of netenv units listed in unitIds array */ + uint8_t unitIds[FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; + /**< Indexes as passed to SetNetEnvCharacteristics array*/ + } netEnvParams; + bool useHash; /**< use the KG Hash functionality */ + t_FmPcdKgKeyExtractAndHashParams keyExtractAndHashParams; + /**< used only if useHash = TRUE */ + bool bypassFqidGeneration; /**< Normally - FALSE, TRUE to avoid FQID update in the IC; + In such a case FQID after KG will be the default FQID + defined for the relevant port, or the FQID defined by CC + in cases where CC was the previous engine. */ + uint32_t baseFqid; /**< Base FQID; Relevant only if bypassFqidGeneration = FALSE; + If hash is used and an even distribution is expected + according to hashDistributionNumOfFqids, baseFqid must be aligned to + hashDistributionNumOfFqids. */ + uint8_t numOfUsedExtractedOrs; /**< Number of Fqid masks listed in extractedOrs array*/ + t_FmPcdKgExtractedOrParams extractedOrs[FM_PCD_KG_NUM_OF_GENERIC_REGS]; + /**< IN: FM_PCD_KG_NUM_OF_GENERIC_REGS + registers are shared between qidMasks + functionality and some of the extraction + actions; Normally only some will be used + for qidMask. Driver will return error if + resource is full at initialization time. */ + e_FmPcdEngine nextEngine; /**< may be BMI, PLCR or CC */ + union { /**< depends on nextEngine */ + e_FmPcdDoneAction doneAction; /**< Used when next engine is BMI (done) */ + t_FmPcdKgPlcrProfile plcrProfile; /**< Used when next engine is PLCR */ + t_FmPcdKgCc cc; /**< Used when next engine is CC */ + } kgNextEngineParams; + t_FmPcdKgSchemeCounter schemeCounter; /**< A structure of parameters for updating + the scheme counter */ +} t_FmPcdKgSchemeParams; + +/**************************************************************************//** + @Description A structure for defining CC params when CC is the + next engine after a CC node. +*//***************************************************************************/ +typedef struct t_FmPcdCcNextCcParams { + t_Handle h_CcNode; /**< A handle of the next CC node */ +} t_FmPcdCcNextCcParams; + +/**************************************************************************//** + @Description A structure for defining PLCR params when PLCR is the + next engine after a CC node. +*//***************************************************************************/ +typedef struct t_FmPcdCcNextPlcrParams { + bool overrideParams; /**< TRUE if CC override previously decided parameters*/ + bool sharedProfile; /**< Relevant only if overrideParams=TRUE: + TRUE if this profile is shared between ports */ + uint16_t newRelativeProfileId; /**< Relevant only if overrideParams=TRUE: + (otherwise profile id is taken from keygen); + This parameter should indicate the policer + profile offset within the port's + policer profiles or from SHARED window.*/ + uint32_t newFqid; /**< Relevant only if overrideParams=TRUE: + FQID for enqueuing the frame; + In earlier chips if policer next engine is KEYGEN, + this parameter can be 0, because the KEYGEN + always decides the enqueue FQID.*/ + bool statisticsEn; /**< In the case of TRUE Statistic counter is + incremented for each received frame passed through + this Coarse Classification entry.*/ +} t_FmPcdCcNextPlcrParams; + +/**************************************************************************//** + @Description A structure for defining enqueue params when BMI is the + next engine after a CC node. +*//***************************************************************************/ +typedef struct t_FmPcdCcNextEnqueueParams { + + e_FmPcdDoneAction action; /**< Action - when next engine is BMI (done) */ + bool overrideFqid; /**< TRUE if CC override previously decided Fqid(by Keygen), + relevant if action = e_FM_PCD_ENQ_FRAME */ + uint32_t newFqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame + (otherwise FQID is taken from keygen), + relevant if action = e_FM_PCD_ENQ_FRAME*/ + bool statisticsEn; /**< In the case of TRUE Statistic counter is + incremented for each received frame passed through + this Coarse Classification entry.*/ +} t_FmPcdCcNextEnqueueParams; + +/**************************************************************************//** + @Description A structure for defining KG params when KG is the + next engine after a CC node. +*//***************************************************************************/ +typedef struct t_FmPcdCcNextKgParams { + bool overrideFqid; /**< TRUE if CC override previously decided Fqid (by keygen), + Note - this parameters irrelevant for earlier chips*/ + uint32_t newFqid; /**< Valid if overrideFqid=TRUE, FQID for enqueuing the frame + (otherwise FQID is taken from keygen), + Note - this parameters irrelevant for earlier chips*/ + t_Handle h_DirectScheme; /**< Direct scheme handle to go to. */ + bool statisticsEn; /**< In the case of TRUE Statistic counter is + incremented for each received frame passed through + this Coarse Classification entry.*/ +} t_FmPcdCcNextKgParams; + +/**************************************************************************//** + @Description A structure for defining next engine params after a CC node. +*//***************************************************************************/ +typedef struct t_FmPcdCcNextEngineParams { + e_FmPcdEngine nextEngine; /**< User has to initialize parameters + according to nextEngine definition */ + union { + t_FmPcdCcNextCcParams ccParams; /**< Parameters in case next engine is CC */ + t_FmPcdCcNextPlcrParams plcrParams; /**< Parameters in case next engine is PLCR */ + t_FmPcdCcNextEnqueueParams enqueueParams; /**< Parameters in case next engine is BMI */ + t_FmPcdCcNextKgParams kgParams; /**< Parameters in case next engine is KG */ + } params; +#if defined(FM_CAPWAP_SUPPORT) + t_Handle h_Manip; /**< Handler to headerManip. + Relevant if next engine of the type result + (e_FM_PCD_PLCR, e_FM_PCD_KG, e_FM_PCD_DONE) */ +#endif /* defined(FM_CAPWAP_SUPPORT) || ... */ +} t_FmPcdCcNextEngineParams; + +/**************************************************************************//** + @Description A structure for defining a single CC Key parameters +*//***************************************************************************/ +typedef struct t_FmPcdCcKeyParams { + uint8_t *p_Key; /**< pointer to the key of the size defined in keySize*/ + uint8_t *p_Mask; /**< pointer to the Mask per key of the size defined + in keySize. p_Key and p_Mask (if defined) has to be + of the same size defined in the keySize */ + t_FmPcdCcNextEngineParams ccNextEngineParams; + /**< parameters for the next for the defined Key in + the p_Key */ +} t_FmPcdCcKeyParams; + +/**************************************************************************//** + @Description A structure for defining CC Keys parameters +*//***************************************************************************/ +typedef struct t_KeysParams { + uint8_t numOfKeys; /**< Number Of relevant Keys */ + uint8_t keySize; /**< size of the key - in the case of the extraction of + the type FULL_FIELD keySize has to be as standard size of the relevant + key. In the another type of extraction keySize has to be as size of extraction. + In the case of action = e_FM_PCD_ACTION_INDEXED_LOOKUP the size of keySize has to be 2*/ + t_FmPcdCcKeyParams keyParams[FM_PCD_MAX_NUM_OF_KEYS]; + /**< it's array with numOfKeys entries each entry in + the array of the type t_FmPcdCcKeyParams */ + t_FmPcdCcNextEngineParams ccNextEngineParamsForMiss; + /**< parameters for the next step of + unfound (or undefined) key . Not relevant in the case + of action = e_FM_PCD_ACTION_INDEXED_LOOKUP*/ +} t_KeysParams; + +/**************************************************************************//** + @Description A structure for defining the CC node params +*//***************************************************************************/ +typedef struct t_FmPcdCcNodeParams { + t_FmPcdExtractEntry extractCcParams; /**< params which defines extraction parameters */ + t_KeysParams keysParams; /**< params which defines Keys parameters of the + extraction defined in extractCcParams */ +} t_FmPcdCcNodeParams; + +/**************************************************************************//** + @Description A structure for defining each CC tree group in term of + NetEnv units and the action to be taken in each case. + the unitIds list must be in order from lower to higher indexes. + + t_FmPcdCcNextEngineParams is a list of 2^numOfDistinctionUnits + structures where each defines the next action to be taken for + each units combination. for example: + numOfDistinctionUnits = 2 + unitIds = {1,3} + p_NextEnginePerEntriesInGrp[0] = t_FmPcdCcNextEngineParams for the case that + unit 1 - not found; unit 3 - not found; + p_NextEnginePerEntriesInGrp[1] = t_FmPcdCcNextEngineParams for the case that + unit 1 - not found; unit 3 - found; + p_NextEnginePerEntriesInGrp[2] = t_FmPcdCcNextEngineParams for the case that + unit 1 - found; unit 3 - not found; + p_NextEnginePerEntriesInGrp[3] = t_FmPcdCcNextEngineParams for the case that + unit 1 - found; unit 3 - found; +*//***************************************************************************/ +typedef struct t_FmPcdCcGrpParams { + uint8_t numOfDistinctionUnits; /**< up to 4 */ + uint8_t unitIds[FM_PCD_MAX_NUM_OF_CC_UNITS]; + /**< Indexes of the units as defined in + FM_PCD_SetNetEnvCharacteristics() */ + t_FmPcdCcNextEngineParams nextEnginePerEntriesInGrp[FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP]; + /**< Max size is 16 - if only one group used */ +} t_FmPcdCcGrpParams; + +/**************************************************************************//** + @Description A structure for defining the CC tree groups +*//***************************************************************************/ +typedef struct t_FmPcdCcTreeParams { + t_Handle h_NetEnv; /**< A handle to the Network environment as returned + by FM_PCD_SetNetEnvCharacteristics() */ + uint8_t numOfGrps; /**< Number of CC groups within the CC tree */ + t_FmPcdCcGrpParams ccGrpParams[FM_PCD_MAX_NUM_OF_CC_GROUPS]; /**< Parameters for each group. */ +} t_FmPcdCcTreeParams; + +/**************************************************************************//** + @Description A structure for defining parameters for byte rate +*//***************************************************************************/ +typedef struct t_FmPcdPlcrByteRateModeParams { + e_FmPcdPlcrFrameLengthSelect frameLengthSelection; /**< Frame length selection */ + e_FmPcdPlcrRollBackFrameSelect rollBackFrameSelection; /**< relevant option only e_FM_PCD_PLCR_L2_FRM_LEN, + e_FM_PCD_PLCR_FULL_FRM_LEN */ +} t_FmPcdPlcrByteRateModeParams; + +/**************************************************************************//** + @Description A structure for selcting the policer profile RFC-2698 or + RFC-4115 parameters +*//***************************************************************************/ +typedef struct t_FmPcdPlcrNonPassthroughAlgParams { + e_FmPcdPlcrRateMode rateMode; /**< Byte / Packet */ + t_FmPcdPlcrByteRateModeParams byteModeParams; /**< Valid for Byte NULL for Packet */ + uint32_t comittedInfoRate; /**< KBits/Sec or Packets/Sec */ + uint32_t comittedBurstSize; /**< Bytes/Packets */ + uint32_t peakOrAccessiveInfoRate; /**< KBits/Sec or Packets/Sec */ + uint32_t peakOrAccessiveBurstSize; /**< Bytes/Packets */ +} t_FmPcdPlcrNonPassthroughAlgParams; + +/**************************************************************************//** + @Description A union for defining Policer next engine parameters +*//***************************************************************************/ +typedef union u_FmPcdPlcrNextEngineParams { + e_FmPcdDoneAction action; /**< Action - when next engine is BMI (done) */ + t_Handle h_Profile; /**< Policer profile handle - used when next engine + is PLCR, must be a SHARED profile */ + t_Handle h_DirectScheme; /**< Direct scheme select - when next engine is Keygen */ +} u_FmPcdPlcrNextEngineParams; + +/**************************************************************************//** + @Description A structure for selecting the policer profile entry parameters +*//***************************************************************************/ +typedef struct t_FmPcdPlcrProfileParams { + bool modify; /**< TRUE to change an existing profile */ + union { + struct { + e_FmPcdProfileTypeSelection profileType; /**< Type of policer profile */ + t_Handle h_FmPort; /**< Relevant for per-port profiles only */ + uint16_t relativeProfileId; /**< Profile id - relative to shared group or to port */ + } newParams; /**< use it when modify=FALSE */ + t_Handle h_Profile; /**< A handle to a profile - use it when modify=TRUE */ + } id; + e_FmPcdPlcrAlgorithmSelection algSelection; /**< Profile Algorithm PASS_THROUGH, RFC_2698, RFC_4115 */ + e_FmPcdPlcrColorMode colorMode; /**< COLOR_BLIND, COLOR_AWARE */ + + union { + e_FmPcdPlcrColor dfltColor; /**< For Color-Blind Pass-Through mode. the policer will re-color + any incoming packet with the default value. */ + e_FmPcdPlcrColor override; /**< For Color-Aware modes. The profile response to a + pre-color value of 2'b11. */ + } color; + + t_FmPcdPlcrNonPassthroughAlgParams nonPassthroughAlgParams; /**< RFC2698 or RFC4115 params */ + + e_FmPcdEngine nextEngineOnGreen; /**< Green next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnGreen; /**< Green next engine params */ + + e_FmPcdEngine nextEngineOnYellow; /**< Yellow next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnYellow; /**< Yellow next engine params */ + + e_FmPcdEngine nextEngineOnRed; /**< Red next engine type */ + u_FmPcdPlcrNextEngineParams paramsOnRed; /**< Red next engine params */ + + bool trapProfileOnFlowA; /**< Trap on flow A */ + bool trapProfileOnFlowB; /**< Trap on flow B */ + bool trapProfileOnFlowC; /**< Trap on flow C */ +} t_FmPcdPlcrProfileParams; + +#if defined(FM_CAPWAP_SUPPORT) +/**************************************************************************//** + @Description A structure for selecting the location of manipulation +*//***************************************************************************/ +typedef struct t_FmPcdManipLocationParams { + e_FmPcdManipLocateType type; /**< location of manipulation type select */ + struct { /**< used when type = e_FM_PCD_MANIP_BY_HDR */ + e_NetHeaderType hdr; /**< Header selection */ + e_FmPcdHdrIndex hdrIndex; /**< Relevant only for MPLS, VLAN and tunneled + IP. Otherwise should be cleared. */ + bool byField; /**< TRUE if the location of manipulation is according to some field in the specific header*/ + t_FmPcdFields fullField; /**< Relevant only when byField = TRUE: Extract field */ + } manipByHdr; +} t_FmPcdManipLocationParams; + +/**************************************************************************//** + @Description structure for defining insert manipulation + of the type e_FM_PCD_MANIP_INSRT_TO_START_OF_FRAME_TEMPLATE +*//***************************************************************************/ +typedef struct t_FmPcdManipInsrtByTemplateParams { + uint8_t size; /**< size of insert template to the start of the frame. */ + uint8_t hdrTemplate[FM_PCD_MAX_MANIP_INSRT_TEMPLATE_SIZE]; + /**< array of the insertion template. */ + + bool modifyOuterIp; /**< TRUE if user want to modify some fields in outer IP. */ + struct { + uint16_t ipOuterOffset; /**< offset of outer IP in the insert template, relevant if modifyOuterIp = TRUE.*/ + uint16_t dscpEcn; /**< value of dscpEcn in IP outer, relevant if modifyOuterIp = TRUE. + in IPV4 dscpEcn only byte - it has to be adjusted to the right*/ + bool udpPresent; /**< TRUE if UDP is present in the insert template, relevant if modifyOuterIp = TRUE.*/ + uint8_t udpOffset; /**< offset in the insert template of UDP, relevant if modifyOuterIp = TRUE and udpPresent=TRUE.*/ + uint8_t ipIdentGenId; /**< Used by FMan-CTRL to calculate IP-identification field,relevant if modifyOuterIp = TRUE.*/ + bool recalculateLength; /**< TRUE if recalculate length has to be performed due to the engines in the path which can change the frame later, relevant if modifyOuterIp = TRUE.*/ + struct { + uint8_t blockSize; /**< The CAAM block-size; Used by FMan-CTRL to calculate the IP-total-len field.*/ + uint8_t extraBytesAddedAlignedToBlockSize; /**< Used by FMan-CTRL to calculate the IP-total-len field and UDP length*/ + uint8_t extraBytesAddedNotAlignedToBlockSize;/**< Used by FMan-CTRL to calculate the IP-total-len field and UDP length.*/ + } recalculateLengthParams; /**< recalculate length parameters - relevant if modifyOuterIp = TRUE and recalculateLength = TRUE */ + } modifyOuterIpParams; /**< Outer IP modification parameters - ignored if modifyOuterIp is FALSE */ + + bool modifyOuterVlan; /**< TRUE if user wants to modify vpri field in the outer VLAN header*/ + struct { + uint8_t vpri; /**< value of vpri, relevant if modifyOuterVlan = TRUE + vpri only 3 bits, it has to be adjusted to the right*/ + } modifyOuterVlanParams; +} t_FmPcdManipInsrtByTemplateParams; +#endif /* defined(FM_CAPWAP_SUPPORT) || ... */ + + +#ifdef FM_CAPWAP_SUPPORT +/**************************************************************************//** + @Description structure for defining CAPWAP fragmentation +*//***************************************************************************/ +typedef struct t_CapwapFragmentationParams { + uint16_t sizeForFragmentation; /**< if length of the frame is greater than this value, CAPWAP fragmentation will be executed.*/ + bool headerOptionsCompr; /**< TRUE - first fragment include the CAPWAP header options field, + and all other fragments exclude the CAPWAP options field, + FALSE - all fragments include CAPWAP header options field. */ +} t_CapwapFragmentationParams; + +/**************************************************************************//** + @Description structure for defining CAPWAP Re-assembly +*//***************************************************************************/ +typedef struct t_CapwapReassemblyParams { + uint16_t maxNumFramesInProcess; /**< Number of frames which can be processed by Reassembly in the same time. + It has to be power of 2. + In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_FOUR_WAYS_HASH, + maxNumFramesInProcess has to be in the range of 4 - 512, + In the case numOfFramesPerHashEntry == e_FM_PCD_MANIP_EIGHT_WAYS_HASH, + maxNumFramesInProcess has to be in the range of 8 - 2048 */ + bool haltOnDuplicationFrag; /**< In the case of TRUE, Reassembly process halted due to duplicated fragment, + and all processed fragments passed for enqueue with error indication. + In the case of FALSE, only duplicated fragment passed for enqueue with error indication */ + + e_FmPcdManipReassemTimeOutMode timeOutMode; /**< Expiration delay initialized by Reassembly process */ + uint32_t fqidForTimeOutFrames; /**< Fqid in which time out frames will enqueue during Time Out Process */ + uint32_t timeoutRoutineRequestTime; + /**< Represents the time interval in microseconds between consecutive + timeout routine requests It has to be power of 2. */ + uint32_t timeoutThresholdForReassmProcess; + /**< Represents the time interval in microseconds which defines + if opened frame (at least one fragment was processed but not all the fragments)is found as too old*/ + + e_FmPcdManipReassemWaysNumber numOfFramesPerHashEntry;/**< Number of frames per hash entry needed for reassembly process */ +} t_CapwapReassemblyParams; +#endif /* FM_CAPWAP_SUPPORT */ + + +#if defined(FM_CAPWAP_SUPPORT) +/**************************************************************************//** + @Description structure for defining fragmentation/reassembly +*//***************************************************************************/ +typedef struct t_FmPcdManipFragOrReasmParams { + bool frag; /**< TRUE if using the structure for fragmentation, + otherwise this structure is used for reassembly */ + uint8_t extBufPoolIndx; /**< Index of the buffer pool ID which was configured for port + and can be used for manipulation; + NOTE: This field is relevant only for CAPWAP fragmentation + and reassembly */ + e_NetHeaderType hdr; /**< Header selection */ + union { +#ifdef FM_CAPWAP_SUPPORT + t_CapwapFragmentationParams capwapFragParams; /**< Structure for CAPWAP fragmentation, relevant if frag = TRUE, hdr = HEADER_TYPE_CAPWAP */ + t_CapwapReassemblyParams capwapReasmParams; /**< Structure for CAPWAP reassembly, relevant if frag = FALSE, hdr = HEADER_TYPE_CAPWAP */ +#endif /* FM_CAPWAP_SUPPORT */ + }; +} t_FmPcdManipFragOrReasmParams; + +/**************************************************************************//** + @Description structure for defining insert manipulation +*//***************************************************************************/ +typedef struct t_FmPcdManipInsrtParams { + e_FmPcdManipInsrtType type; /**< Type of insert manipulation */ + union { + t_FmPcdManipInsrtByTemplateParams insrtByTemplateParams; + /**< parameters for insert manipulation, relevant if + type = e_FM_PCD_MANIP_INSRT_TO_START_OF_FRAME_TEMPLATE */ + }; +} t_FmPcdManipInsrtParams; + +/**************************************************************************//** + @Description structure for defining remove manipulation +*//***************************************************************************/ +typedef struct t_FmPcdManipRmvParams { + e_FmPcdManipRmvParamsType type; /**< Type of remove manipulation */ + t_FmPcdManipLocationParams rmvSpecificLocationParams; + /**< Specified location of remove manipulation; + This params should be initialized in cases: + - e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_TILL_SPECIFIC_LOCATION + - e_FM_PCD_MANIP_RMV_FROM_START_OF_FRAME_INCLUDE_SPECIFIC_LOCATION */ +} t_FmPcdManipRmvParams; + +/**************************************************************************//** + @Description structure for defining manipulation +*//***************************************************************************/ +typedef struct t_FmPcdManipParams { + bool rmv; /**< TRUE, if defined remove manipulation */ + t_FmPcdManipRmvParams rmvParams; /**< Parameters for remove manipulation, relevant if rmv = TRUE */ + + bool insrt; /**< TRUE, if defined insert manipulation */ + t_FmPcdManipInsrtParams insrtParams; /**< Parameters for insert manipulation, relevant if insrt = TRUE */ + + bool fragOrReasm; /**< TRUE, if defined fragmentation/reassembly manipulation */ + t_FmPcdManipFragOrReasmParams fragOrReasmParams; /**< Parameters for fragmentation/reassembly manipulation, relevant if fragOrReasm = TRUE */ + + /**< General parameters */ + bool treatFdStatusFieldsAsErrors; + /**< Set to TRUE when the port that is using this manip is chained + to SEC (i.e. the traffic was forwarded from SEC) */ +} t_FmPcdManipParams; + +/**************************************************************************//** + @Description structure for defining statistics node +*//***************************************************************************/ +typedef struct t_FmPcdStatsParams { + e_FmPcdStatsType type; /**< type of statistics node */ +} t_FmPcdStatsParams; +#endif /* defined(FM_CAPWAP_SUPPORT) || ... */ + + +/**************************************************************************//** + @Function FM_PCD_SetNetEnvCharacteristics + + @Description Define a set of Network Environment Characteristics. + When setting an environment it is important to understand its + application. It is not meant to describe the flows that will run + on the ports using this environment, but what the user means TO DO + with the PCD mechanisms in order to parse-classify-distribute those + frames. + By specifying a distinction unit, the user means it would use that option + for distinction between frames at either a keygen scheme keygen or a coarse + classification action descriptor. Using interchangeable headers to define a + unit means that the user is indifferent to which of the interchangeable + headers is present in the frame, and they want the distinction to be based + on the presence of either one of them. + Depending on context, there are limitations to the use of environments. A + port using the PCD functionality is bound to an environment. Some or even + all ports may share an environment but also an environment per port is + possible. When initializing a scheme, a classification plan group (see below), + or a coarse classification tree, one of the initialized environments must be + stated and related to. When a port is bound to a scheme, a classification + plan group, or a coarse classification tree, it MUST be bound to the same + environment. + The different PCD modules, may relate (for flows definition) ONLY on + distinction units as defined by their environment. When initializing a + scheme for example, it may not choose to select IPV4 as a match for + recognizing flows unless it was defined in the relating environment. In + fact, to guide the user through the configuration of the PCD, each module's + characterization in terms of flows is not done using protocol names, but using + environment indexes. + In terms of HW implementation, the list of distinction units sets the LCV vectors + and later used for match vector, classification plan vectors and coarse classification + indexing. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] p_NetEnvParams A structure of parameters for the initialization of + the network environment. + + @Return A handle to the initialized object on success; NULL code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Handle FM_PCD_SetNetEnvCharacteristics(t_Handle h_FmPcd, t_FmPcdNetEnvParams *p_NetEnvParams); + +/**************************************************************************//** + @Function FM_PCD_DeleteNetEnvCharacteristics + + @Description Deletes a set of Network Environment Characteristics. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] h_NetEnv A handle to the Network environment. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PCD_DeleteNetEnvCharacteristics(t_Handle h_FmPcd, t_Handle h_NetEnv); + +/**************************************************************************//** + @Function FM_PCD_KgSetScheme + + @Description Initializing or modifying and enabling a scheme for the keygen. + This routine should be called for adding or modifying a scheme. + When a scheme needs modifying, the API requires that it will be + rewritten. In such a case 'modify' should be TRUE. If the + routine is called for a valid scheme and 'modify' is FALSE, + it will return error. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in,out] p_Scheme A structure of parameters for defining the scheme + + @Return A handle to the initialized scheme on success; NULL code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Handle FM_PCD_KgSetScheme (t_Handle h_FmPcd, + t_FmPcdKgSchemeParams *p_Scheme); + +/**************************************************************************//** + @Function FM_PCD_KgDeleteScheme + + @Description Deleting an initialized scheme. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSetScheme + + @Return E_OK on success; Error code otherwise. + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_KgDeleteScheme(t_Handle h_FmPcd, t_Handle h_Scheme); + +/**************************************************************************//** + @Function FM_PCD_KgGetSchemeCounter + + @Description Reads scheme packet counter. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSetScheme. + + @Return Counter's current value. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +uint32_t FM_PCD_KgGetSchemeCounter(t_Handle h_FmPcd, t_Handle h_Scheme); + +/**************************************************************************//** + @Function FM_PCD_KgSetSchemeCounter + + @Description Writes scheme packet counter. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] h_Scheme scheme handle as returned by FM_PCD_KgSetScheme. + @Param[in] value New scheme counter value - typically '0' for + resetting the counter. + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_KgSetSchemeCounter(t_Handle h_FmPcd, t_Handle h_Scheme, uint32_t value); + +/**************************************************************************//** + @Function FM_PCD_CcBuildTree + + @Description This routine must be called to define a complete coarse + classification tree. This is the way to define coarse + classification to a certain flow - the keygen schemes + may point only to trees defined in this way. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] p_FmPcdCcTreeParams A structure of parameters to define the tree. + + @Return A handle to the initialized object on success; NULL code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Handle FM_PCD_CcBuildTree (t_Handle h_FmPcd, + t_FmPcdCcTreeParams *p_FmPcdCcTreeParams); + +/**************************************************************************//** + @Function FM_PCD_CcDeleteTree + + @Description Deleting an built tree. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcTree A handle to a CC tree. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_CcDeleteTree(t_Handle h_FmPcd, t_Handle h_CcTree); + +/**************************************************************************//** + @Function FM_PCD_CcSetNode + + @Description This routine should be called for each CC (coarse classification) + node. The whole CC tree should be built bottom up so that each + node points to already defined nodes. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] p_CcNodeParam A structure of parameters defining the CC node + + @Return A handle to the initialized object on success; NULL code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Handle FM_PCD_CcSetNode(t_Handle h_FmPcd, + t_FmPcdCcNodeParams *p_CcNodeParam); + +/**************************************************************************//** + @Function FM_PCD_CcDeleteNode + + @Description Deleting an built node. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcNode A handle to a CC node. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_CcDeleteNode(t_Handle h_FmPcd, t_Handle h_CcNode); + +/**************************************************************************//** + @Function FM_PCD_CcTreeModifyNextEngine + + @Description Modify the Next Engine Parameters in the entry of the tree. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcTree A handle to the tree + @Param[in] grpId A Group index in the tree + @Param[in] index Entry index in the group defined by grpId + @Param[in] p_FmPcdCcNextEngineParams A structure for defining new next engine params + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_CcBuildTree(). +*//***************************************************************************/ +t_Error FM_PCD_CcTreeModifyNextEngine(t_Handle h_FmPcd, t_Handle h_CcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams); + +/**************************************************************************//** + @Function FM_PCD_CcNodeModifyNextEngine + + @Description Modify the Next Engine Parameters in the relevant key entry of the node. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcNode A handle to the node + @Param[in] keyIndex Key index for Next Engine Params modifications + @Param[in] p_FmPcdCcNextEngineParams A structure for defining new next engine params + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_CcSetNode(). +*//***************************************************************************/ +t_Error FM_PCD_CcNodeModifyNextEngine(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams); + +/**************************************************************************//** + @Function FM_PCD_CcNodeModifyMissNextEngine + + @Description Modify the Next Engine Parameters of the Miss key case of the node. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcNode A handle to the node + @Param[in] p_FmPcdCcNextEngineParams A structure for defining new next engine params + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_CcSetNode(). +*//***************************************************************************/ +t_Error FM_PCD_CcNodeModifyMissNextEngine(t_Handle h_FmPcd, t_Handle h_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams); + +/**************************************************************************//** + @Function FM_PCD_CcNodeRemoveKey + + @Description Remove the key (include Next Engine Parameters of this key) defined by the index of the relevant node . + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcNode A handle to the node + @Param[in] keyIndex Key index for removing + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_CcSetNode() not only of the relevant node but also + the node that points to this node +*//***************************************************************************/ +t_Error FM_PCD_CcNodeRemoveKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex); + +/**************************************************************************//** + @Function FM_PCD_CcNodeAddKey + + @Description Add the key(include Next Engine Parameters of this key)in the index defined by the keyIndex . + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcNode A handle to the node + @Param[in] keyIndex Key index for adding + @Param[in] keySize Key size of added key + @Param[in] p_KeyParams A pointer to the parameters includes new key with Next Engine Parameters + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_CcSetNode() not only of the relevant node but also + the node that points to this node +*//***************************************************************************/ +t_Error FM_PCD_CcNodeAddKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams); + +/**************************************************************************//** + @Function FM_PCD_CcNodeModifyKeyAndNextEngine + + @Description Modify the key and Next Engine Parameters of this key in the index defined by the keyIndex . + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcNode A handle to the node + @Param[in] keyIndex Key index for adding + @Param[in] keySize Key size of added key + @Param[in] p_KeyParams A pointer to the parameters includes modified key and modified Next Engine Parameters + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_CcSetNode() not only of the relevant node but also + the node that points to this node +*//***************************************************************************/ +t_Error FM_PCD_CcNodeModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams); + +/**************************************************************************//** + @Function FM_PCD_CcNodeModifyKey + + @Description Modify the key in the index defined by the keyIndex . + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcNode A handle to the node + @Param[in] keyIndex Key index for adding + @Param[in] keySize Key size of added key + @Param[in] p_Key A pointer to the new key + @Param[in] p_Mask A pointer to the new mask if relevant, otherwise pointer to NULL + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_CcSetNode() not only of the relevant node but also + the node that points to this node +*//***************************************************************************/ +t_Error FM_PCD_CcNodeModifyKey(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask); + +/**************************************************************************//** + @Function FM_PCD_CcNodeGetKeyCounter + + @Description This routine may be used to get a counter of specific key in a CC + Node; This counter reflects how many frames passed that were matched + this key. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_CcNode A handle to the node + @Param[in] keyIndex Key index for adding + + @Return The specific key counter. + + @Cautions Allowed only following FM_PCD_CcSetNode() not only of the relevant node but also + the node that points to this node +*//***************************************************************************/ +uint32_t FM_PCD_CcNodeGetKeyCounter(t_Handle h_FmPcd, t_Handle h_CcNode, uint8_t keyIndex); + +/**************************************************************************//** + @Function FM_PCD_PlcrSetProfile + + @Description Sets a profile entry in the policer profile table. + The routine overrides any existing value. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] p_Profile A structure of parameters for defining a + policer profile entry. + + @Return A handle to the initialized object on success; NULL code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Handle FM_PCD_PlcrSetProfile(t_Handle h_FmPcd, + t_FmPcdPlcrProfileParams *p_Profile); + +/**************************************************************************//** + @Function FM_PCD_PlcrDeleteProfile + + @Description Delete a profile entry in the policer profile table. + The routine set entry to invalid. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_Profile A handle to the profile. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_PlcrDeleteProfile(t_Handle h_FmPcd, t_Handle h_Profile); + +/**************************************************************************//** + @Function FM_PCD_PlcrGetProfileCounter + + @Description Sets an entry in the classification plan. + The routine overrides any existing value. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_Profile A handle to the profile. + @Param[in] counter Counter selector. + + @Return specific counter value. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +uint32_t FM_PCD_PlcrGetProfileCounter(t_Handle h_FmPcd, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter); + +/**************************************************************************//** + @Function FM_PCD_PlcrSetProfileCounter + + @Description Sets an entry in the classification plan. + The routine overrides any existing value. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_Profile A handle to the profile. + @Param[in] counter Counter selector. + @Param[in] value value to set counter with. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_PlcrSetProfileCounter(t_Handle h_FmPcd, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value); + +#if defined(FM_CAPWAP_SUPPORT) +/**************************************************************************//** + @Function FM_PCD_ManipSetNode + + @Description This routine should be called for defining a manipulation + node. A manipulation node must be defined before the CC node + that precedes it. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] p_FmPcdManipParams A structure of parameters defining the manipulation + + @Return A handle to the initialized object on success; NULL code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Handle FM_PCD_ManipSetNode(t_Handle h_FmPcd, t_FmPcdManipParams *p_FmPcdManipParams); + +/**************************************************************************//** + @Function FM_PCD_ManipDeleteNode + + @Description Delete an existing manip node. + + @Param[in] h_FmPcd A handle to an FM PCD Module. + @Param[in] h_HdrManipNode A handle to a Manip node. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Error FM_PCD_ManipDeleteNode(t_Handle h_FmPcd, t_Handle h_HdrManipNode); +#endif /* defined(FM_CAPWAP_SUPPORT) || ... */ + + +#ifdef FM_CAPWAP_SUPPORT +/**************************************************************************//** + @Function FM_PCD_StatisticsSetNode + + @Description This routine should be called for defining a statistics + node. + + @Param[in] h_FmPcd FM PCD module descriptor. + @Param[in] p_FmPcdstatsParams A structure of parameters defining the statistics + + @Return A handle to the initialized object on success; NULL code otherwise. + + @Cautions Allowed only following FM_PCD_Init(). +*//***************************************************************************/ +t_Handle FM_PCD_StatisticsSetNode(t_Handle h_FmPcd, t_FmPcdStatsParams *p_FmPcdstatsParams); +#endif /* FM_CAPWAP_SUPPORT */ + +/** @} */ /* end of FM_PCD_Runtime_tree_buildgrp group */ +/** @} */ /* end of FM_PCD_Runtime_grp group */ +/** @} */ /* end of FM_PCD_grp group */ +/** @} */ /* end of FM_grp group */ + + + +#endif /* __FM_PCD_EXT */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_port_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_port_ext.h @@ -0,0 +1,2196 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File fm_port_ext.h + + @Description FM-Port Application Programming Interface. +*//***************************************************************************/ +#ifndef __FM_PORT_EXT +#define __FM_PORT_EXT + +#include "error_ext.h" +#include "std_ext.h" +#include "fm_pcd_ext.h" +#include "fm_ext.h" +#include "net_ext.h" + + +/**************************************************************************//** + + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_PORT_grp FM Port + + @Description FM Port API + + The FM uses a general module called "port" to represent a Tx port + (MAC), an Rx port (MAC), offline parsing flow or host command + flow. There may be up to 17 (may change) ports in an FM - 5 Tx + ports (4 for the 1G MACs, 1 for the 10G MAC), 5 Rx Ports, and 7 + Host command/Offline parsing ports. The SW driver manages these + ports as sub-modules of the FM, i.e. after an FM is initialized, + its ports may be initialized and operated upon. + + The port is initialized aware of its type, but other functions on + a port may be indifferent to its type. When necessary, the driver + verifies coherency and returns error if applicable. + + On initialization, user specifies the port type and it's index + (relative to the port's type). Host command and Offline parsing + ports share the same id range, I.e user may not initialized host + command port 0 and offline parsing port 0. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description An enum for defining port PCD modes. + This enum defines the superset of PCD engines support - i.e. not + all engines have to be used, but all have to be enabled. The real + flow of a specific frame depends on the PCD configuration and the + frame headers and payload. +*//***************************************************************************/ +typedef enum e_FmPortPcdSupport { + e_FM_PORT_PCD_SUPPORT_NONE = 0, /**< BMI to BMI, PCD is not used */ + e_FM_PORT_PCD_SUPPORT_PRS_ONLY, /**< Use only Parser */ + e_FM_PORT_PCD_SUPPORT_PLCR_ONLY, /**< Use only Policer */ + e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR, /**< Use Parser and Policer */ + e_FM_PORT_PCD_SUPPORT_PRS_AND_KG, /**< Use Parser and Keygen */ + e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC, /**< Use Parser, Keygen and Coarse Classification */ + e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_CC_AND_PLCR, + /**< Use all PCD engines */ + e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR /**< Use Parser, Keygen and Policer */ +#ifdef FM_CAPWAP_SUPPORT + , + e_FM_PORT_PCD_SUPPORT_CC_ONLY, /**< Use only Coarse Classification */ + e_FM_PORT_PCD_SUPPORT_CC_AND_KG, /**< Use Coarse Classification,and Keygen */ + e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR /**< Use Coarse Classification, Keygen and Policer */ +#endif /* FM_CAPWAP_SUPPORT */ +} e_FmPortPcdSupport; + +/**************************************************************************//** + @Description Port interrupts +*//***************************************************************************/ +typedef enum e_FmPortExceptions { + e_FM_PORT_EXCEPTION_IM_BUSY /**< Independent-Mode Rx-BUSY */ +} e_FmPortExceptions; + + +/**************************************************************************//** + @Collection General FM Port defines +*//***************************************************************************/ +#define FM_PORT_PRS_RESULT_NUM_OF_WORDS 8 /**< Number of 4 bytes words in parser result */ +/* @} */ + +/**************************************************************************//** + @Collection FM Frame error +*//***************************************************************************/ +typedef uint32_t fmPortFrameErrSelect_t; /**< typedef for defining Frame Descriptor errors */ + +#define FM_PORT_FRM_ERR_UNSUPPORTED_FORMAT 0x04000000 /**< Offline parsing only! Unsupported Format */ +#define FM_PORT_FRM_ERR_LENGTH 0x02000000 /**< Offline parsing only! Length Error */ +#define FM_PORT_FRM_ERR_DMA 0x01000000 /**< DMA Data error */ +#ifdef FM_CAPWAP_SUPPORT +#define FM_PORT_FRM_ERR_NON_FM 0x00400000 /**< non Frame-Manager error; probably come from SEC that + was chained to FM */ +#endif /* FM_CAPWAP_SUPPORT */ +#define FM_PORT_FRM_ERR_PHYSICAL 0x00080000 /**< Rx FIFO overflow, FCS error, code error, running disparity + error (SGMII and TBI modes), FIFO parity error. PHY + Sequence error, PHY error control character detected. */ +#define FM_PORT_FRM_ERR_SIZE 0x00040000 /**< Frame too long OR Frame size exceeds max_length_frame */ +#define FM_PORT_FRM_ERR_CLS_DISCARD 0x00020000 /**< classification discard */ +#define FM_PORT_FRM_ERR_EXTRACTION 0x00008000 /**< Extract Out of Frame */ +#define FM_PORT_FRM_ERR_NO_SCHEME 0x00004000 /**< No Scheme Selected */ +#define FM_PORT_FRM_ERR_KEYSIZE_OVERFLOW 0x00002000 /**< Keysize Overflow */ +#define FM_PORT_FRM_ERR_COLOR_YELLOW 0x00000400 /**< Frame color is yellow */ +#define FM_PORT_FRM_ERR_COLOR_RED 0x00000800 /**< Frame color is red */ +#define FM_PORT_FRM_ERR_ILL_PLCR 0x00000200 /**< Illegal Policer Profile selected */ +#define FM_PORT_FRM_ERR_PLCR_FRAME_LEN 0x00000100 /**< Policer frame length error */ +#define FM_PORT_FRM_ERR_PRS_TIMEOUT 0x00000080 /**< Parser Time out Exceed */ +#define FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT 0x00000040 /**< Invalid Soft Parser instruction */ +#define FM_PORT_FRM_ERR_PRS_HDR_ERR 0x00000020 /**< Header error was identified during parsing */ +#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008 /**< Frame parsed beyind 256 first bytes */ +#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001 /**< FPM Frame Processing Timeout Exceeded */ +/* @} */ + + + +/**************************************************************************//** + @Group FM_PORT_init_grp FM Port Initialization Unit + + @Description FM Port Initialization Unit + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description Exceptions user callback routine, will be called upon an + exception passing the exception identification. + + @Param[in] h_App - User's application descriptor. + @Param[in] exception - The exception. + *//***************************************************************************/ +typedef void (t_FmPortExceptionCallback) (t_Handle h_App, e_FmPortExceptions exception); + +/**************************************************************************//** + @Description User callback function called by driver with received data. + + User provides this function. Driver invokes it. + + @Param[in] h_App Application's handle originally specified to + the API Config function + @Param[in] p_Data A pointer to data received + @Param[in] length length of received data + @Param[in] status receive status and errors + @Param[in] position position of buffer in frame + @Param[in] h_BufContext A handle of the user acossiated with this buffer + + @Retval e_RX_STORE_RESPONSE_CONTINUE - order the driver to continue Rx + operation for all ready data. + @Retval e_RX_STORE_RESPONSE_PAUSE - order the driver to stop Rx operation. +*//***************************************************************************/ +typedef e_RxStoreResponse (t_FmPortImRxStoreCallback) (t_Handle h_App, + uint8_t *p_Data, + uint16_t length, + uint16_t status, + uint8_t position, + t_Handle h_BufContext); + +/**************************************************************************//** + @Description User callback function called by driver when transmit completed. + + User provides this function. Driver invokes it. + + @Param[in] h_App Application's handle originally specified to + the API Config function + @Param[in] p_Data A pointer to data received + @Param[in] status transmit status and errors + @Param[in] lastBuffer is last buffer in frame + @Param[in] h_BufContext A handle of the user acossiated with this buffer + *//***************************************************************************/ +typedef void (t_FmPortImTxConfCallback) (t_Handle h_App, + uint8_t *p_Data, + uint16_t status, + t_Handle h_BufContext); + +/**************************************************************************//** + @Description A structure of information about each of the external + buffer pools used by the port, +*//***************************************************************************/ +typedef struct t_FmPortExtPoolParams { + uint8_t id; /**< External buffer pool id */ + uint16_t size; /**< External buffer pool buffer size */ +} t_FmPortExtPoolParams; + +/**************************************************************************//** + @Description A structure for informing the driver about the external + buffer pools allocated in the BM and used by this port. +*//***************************************************************************/ +typedef struct t_FmPortExtPools { + uint8_t numOfPoolsUsed; /**< Number of pools use by this port */ + t_FmPortExtPoolParams extBufPool[FM_PORT_MAX_NUM_OF_EXT_POOLS]; + /**< Parameters for each port */ +} t_FmPortExtPools; + +/**************************************************************************//** + @Description structure for additional Rx port parameters +*//***************************************************************************/ +typedef struct t_FmPortRxParams { + uint32_t errFqid; /**< Error Queue Id. */ + uint32_t dfltFqid; /**< Default Queue Id. */ + uint16_t liodnOffset; /**< Port's LIODN offset. */ + t_FmPortExtPools extBufPools; /**< Which external buffer pools are used + (up to FM_PORT_MAX_NUM_OF_EXT_POOLS), and their sizes. */ +} t_FmPortRxParams; + +/**************************************************************************//** + @Description structure for additional non-Rx port parameters +*//***************************************************************************/ +typedef struct t_FmPortNonRxParams { + uint32_t errFqid; /**< Error Queue Id. */ + uint32_t dfltFqid; /**< For Tx and HC - Default Confirmation queue, + 0 means no Tx confirmation for processed + frames. For OP - default Rx queue. */ + uint32_t qmChannel; /**< QM-channel dedicated to this port; will be used + by the FM for dequeue. */ +#ifdef FM_OP_PARTITION_ERRATA_FMANx8 + uint16_t opLiodnOffset; /**< For Offline Parsing ports only. Port's LIODN offset. */ +#endif /* FM_OP_PARTITION_ERRATA_FMANx8 */ +} t_FmPortNonRxParams; + +/**************************************************************************//** + @Description structure for additional Rx port parameters +*//***************************************************************************/ +typedef struct t_FmPortImRxTxParams { + t_Handle h_FmMuram; /**< A handle of the FM-MURAM partition */ + uint16_t liodnOffset; /**< For Rx ports only. Port's LIODN Offset. */ + uint8_t dataMemId; /**< Memory partition ID for data buffers */ + uint32_t dataMemAttributes; /**< Memory attributes for data buffers */ + t_BufferPoolInfo rxPoolParams; /**< For Rx ports only. */ + t_FmPortImRxStoreCallback *f_RxStore; /**< For Rx ports only. */ + t_FmPortImTxConfCallback *f_TxConf; /**< For Tx ports only. */ +} t_FmPortImRxTxParams; + +/**************************************************************************//** + @Description Union for additional parameters depending on port type +*//***************************************************************************/ +typedef union u_FmPortSpecificParams { + t_FmPortImRxTxParams imRxTxParams; /**< Rx/Tx Independent-Mode port parameter structure */ + t_FmPortRxParams rxParams; /**< Rx port parameters structure */ + t_FmPortNonRxParams nonRxParams; /**< Non-Rx port parameters structure */ +} u_FmPortSpecificParams; + +/**************************************************************************//** + @Description structure representing FM initialization parameters +*//***************************************************************************/ +typedef struct t_FmPortParams { + uintptr_t baseAddr; /**< Virtual Address of memory mapped FM Port registers.*/ + t_Handle h_Fm; /**< A handle to the FM object this port related to */ + e_FmPortType portType; /**< Port type */ + uint8_t portId; /**< Port Id - relative to type */ + bool independentModeEnable; + /**< This port is Independent-Mode - Used for Rx/Tx ports only! */ + uint16_t liodnBase; /**< Irrelevant for P4080 rev 1. LIODN base for this port, to be + used together with LIODN offset. */ + u_FmPortSpecificParams specificParams; /**< Additional parameters depending on port + type. */ + + t_FmPortExceptionCallback *f_Exception; /**< Callback routine to be called of PCD exception */ + t_Handle h_App; /**< A handle to an application layer object; This handle will + be passed by the driver upon calling the above callbacks */ +} t_FmPortParams; + + +/**************************************************************************//** + @Function FM_PORT_Config + + @Description Creates descriptor for the FM PORT module. + + The routine returns a handle (descriptor) to the FM PORT object. + This descriptor must be passed as first parameter to all other + FM PORT function calls. + + No actual initialization or configuration of FM hardware is + done by this routine. + + @Param[in] p_FmPortParams - Pointer to data structure of parameters + + @Retval Handle to FM object, or NULL for Failure. +*//***************************************************************************/ +t_Handle FM_PORT_Config(t_FmPortParams *p_FmPortParams); + +/**************************************************************************//** + @Function FM_PORT_Init + + @Description Initializes the FM PORT module + + @Param[in] h_FmPort - FM PORT module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PORT_Init(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_Free + + @Description Frees all resources that were assigned to FM PORT module. + + Calling this routine invalidates the descriptor. + + @Param[in] h_FmPort - FM PORT module descriptor + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PORT_Free(t_Handle h_FmPort); + + +/**************************************************************************//** + @Group FM_PORT_advanced_init_grp FM Port Advanced Configuration Unit + + @Description Configuration functions used to change default values. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description enum for defining QM frame dequeue +*//***************************************************************************/ +typedef enum e_FmPortDeqType { + e_FM_PORT_DEQ_TYPE1, /**< Dequeue from the SP channel - with priority precedence, + and Intra-Class Scheduling respected. */ + e_FM_PORT_DEQ_TYPE2, /**< Dequeue from the SP channel - with active FQ precedence, + and Intra-Class Scheduling respected. */ + e_FM_PORT_DEQ_TYPE3 /**< Dequeue from the SP channel - with active FQ precedence, + and override Intra-Class Scheduling */ +} e_FmPortDeqType; + +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT +/**************************************************************************//** + @Description enum for defining QM frame dequeue +*//***************************************************************************/ +typedef enum e_FmPortDeqPrefetchOption { + e_FM_PORT_DEQ_NO_PREFETCH, /**< QMI preforms a dequeue action for a single frame + only when a dedicated portID Tnum is waiting. */ + e_FM_PORT_DEQ_PARTIAL_PREFETCH, /**< QMI preforms a dequeue action for 3 frames when + one dedicated portId tnum is waiting. */ + e_FM_PORT_DEQ_FULL_PREFETCH /**< QMI preforms a dequeue action for 3 frames when + no dedicated portId tnums are waiting. */ + +} e_FmPortDeqPrefetchOption; +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + +/**************************************************************************//** + @Description enum for defining port DMA swap mode +*//***************************************************************************/ +typedef enum e_FmPortDmaSwap { + e_FM_PORT_DMA_NO_SWP, /**< No swap, transfer data as is.*/ + e_FM_PORT_DMA_SWP_PPC_LE, /**< The transferred data should be swapped + in PowerPc Little Endian mode. */ + e_FM_PORT_DMA_SWP_BE /**< The transferred data should be swapped + in Big Endian mode */ +} e_FmPortDmaSwap; + +/**************************************************************************//** + @Description enum for defining port DMA cache attributes +*//***************************************************************************/ +typedef enum e_FmPortDmaCache { + e_FM_PORT_DMA_NO_STASH = 0, /**< Cacheable, no Allocate (No Stashing) */ + e_FM_PORT_DMA_STASH = 1 /**< Cacheable and Allocate (Stashing on) */ +} e_FmPortDmaCache; + +/**************************************************************************//** + @Description enum for defining port default color +*//***************************************************************************/ +typedef enum e_FmPortColor { + e_FM_PORT_COLOR_GREEN, /**< Default port color is green */ + e_FM_PORT_COLOR_YELLOW, /**< Default port color is yellow */ + e_FM_PORT_COLOR_RED, /**< Default port color is red */ + e_FM_PORT_COLOR_OVERRIDE /**< Ignore color */ +} e_FmPortColor; + +/**************************************************************************//** + @Description struct for defining Dual Tx rate limiting scale +*//***************************************************************************/ +typedef enum e_FmPortDualRateLimiterScaleDown { + e_FM_PORT_DUAL_RATE_LIMITER_NONE = 0, /**< Use only single rate limiter */ + e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_2, /**< Divide high rate limiter by 2 */ + e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_4, /**< Divide high rate limiter by 4 */ + e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8 /**< Divide high rate limiter by 8 */ +} e_FmPortDualRateLimiterScaleDown; + + +/**************************************************************************//** + @Description struct for defining FM port resources +*//***************************************************************************/ +typedef struct t_FmPortRsrc { + uint32_t num; /**< Committed required resource */ + uint32_t extra; /**< Extra (not committed) required resource */ +} t_FmPortRsrc; + +/**************************************************************************//** + @Description struct for defining pool depletion criteria +*//***************************************************************************/ +typedef struct t_FmPortBufPoolDepletion { + bool numberOfPoolsModeEnable; /**< select mode in which pause frames will be sent after + a number of pools are depleted */ + uint8_t numOfPools; /**< the minimum number of depleted pools that will + invoke pause frames transmission. */ + bool poolsToConsider[BM_MAX_NUM_OF_POOLS]; + /**< For each pool, TRUE if it should be considered for + depletion (Note - this pool must be used by this port!) */ + bool singlePoolModeEnable; /**< select mode in which pause frames will be sent after + a single of pools are depleted */ + bool poolsToConsiderForSingleMode[BM_MAX_NUM_OF_POOLS]; + /**< For each pool, TRUE if it should be considered for + depletion (Note - this pool must be used by this port!) */ +} t_FmPortBufPoolDepletion; + +/**************************************************************************//** + @Description struct for defining observed pool depletion +*//***************************************************************************/ +typedef struct t_FmPortObservedBufPoolDepletion { + t_FmPortBufPoolDepletion poolDepletionParams;/**< parameters to define pool depletion */ + t_FmPortExtPools poolsParams; /**< Which external buffer pools are observed + (up to FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS), + and their sizes. */ +} t_FmPortObservedBufPoolDepletion; + +/**************************************************************************//** + @Description struct for defining Tx rate limiting +*//***************************************************************************/ +typedef struct t_FmPortRateLimit { + uint16_t maxBurstSize; /**< in kBytes for Tx ports, in frames + for offline parsing ports. (note that + for early chips burst size is + rounded up to a multiply of 1000 frames).*/ + uint32_t rateLimit; /**< in Kb/sec for Tx ports, in frame/sec for + offline parsing ports. Rate limit refers to + data rate (rather than line rate). */ + e_FmPortDualRateLimiterScaleDown rateLimitDivider; /**< For offline parsing ports only. Not-valid + for some earlier chip revisions */ +} t_FmPortRateLimit; + +/**************************************************************************//** + @Description struct for defining define the parameters of + the Rx port performance counters +*//***************************************************************************/ +typedef struct t_FmPortPerformanceCnt { + uint8_t taskCompVal; /**< Task compare value */ + uint8_t queueCompVal; /**< Rx queue/Tx confirm queue compare + value (unused for H/O) */ + uint8_t dmaCompVal; /**< Dma compare value */ + uint32_t fifoCompVal; /**< Fifo compare value (in bytes) */ +} t_FmPortPerformanceCnt; + +/**************************************************************************//** + @Description struct for defining buffer content. +*//***************************************************************************/ +typedef struct t_FmPortBufferPrefixContent { + uint16_t privDataSize; /**< Number of bytes to be left at the beginning + of the external buffer */ + bool passPrsResult; /**< TRUE to pass the parse result to/from the FM */ + bool passTimeStamp; /**< TRUE to pass the timeStamp to/from the FM */ + bool passHashResult; /**< TRUE to pass the KG hash result to/from the FM */ + bool passAllOtherPCDInfo;/**< Add all other Internal-Context information: + AD, hash-result, key, etc. */ + uint16_t dataAlign; /**< 0 to use driver's default alignment, other value + for selecting a data alignment (must be a + power of 2) */ +#ifdef DEBUG + bool passDebugInfo; /**< Debug-information */ +#endif /* DEBUG */ +#ifdef FM_CAPWAP_SUPPORT + uint8_t manipExtraSpace; /**< Maximum extra size needed (insertion-size minus removal-size) */ +#endif /* FM_CAPWAP_SUPPORT */ +} t_FmPortBufferPrefixContent; + +/**************************************************************************//** + @Description struct for defining backup Bm Pools. +*//***************************************************************************/ +typedef struct t_FmPortBackupBmPools { + uint8_t numOfBackupPools; /**< Number of BM backup pools - + must be smaller than the total number of + pools defined for the specified port.*/ + uint8_t poolIds[FM_PORT_MAX_NUM_OF_EXT_POOLS]; + /**< numOfBackupPools pool id's, specifying which + pools should be used only as backup. Pool + id's specified here must be a subset of the + pools used by the specified port.*/ +} t_FmPortBackupBmPools; + + +/**************************************************************************//** + @Function FM_PORT_ConfigDeqHighPriority + + @Description Calling this routine changes the dequeue priority in the + internal driver data base from its default configuration + [TRUE] + + May be used for Non-Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] highPri TRUE to select high priority, FALSE for normal operation. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDeqHighPriority(t_Handle h_FmPort, bool highPri); + +/**************************************************************************//** + @Function FM_PORT_ConfigDeqType + + @Description Calling this routine changes the dequeue type parameter in the + internal driver data base from its default configuration + [e_FM_PORT_DEQ_TYPE1]. + + May be used for Non-Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] deqType According to QM definition. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDeqType(t_Handle h_FmPort, e_FmPortDeqType deqType); + +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT +/**************************************************************************//** + @Function FM_PORT_ConfigDeqPrefetchOption + + @Description Calling this routine changes the dequeue prefetch option parameter in the + internal driver data base from its default configuration + [e_FM_PORT_DEQ_FULL_PREFETCH] + Note: Available for some chips only + + May be used for Non-Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] deqPrefetchOption New option + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDeqPrefetchOption(t_Handle h_FmPort, e_FmPortDeqPrefetchOption deqPrefetchOption); +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + +/**************************************************************************//** + @Function FM_PORT_ConfigDeqByteCnt + + @Description Calling this routine changes the dequeue byte count parameter in + the internal driver data base from its default configuration [2000]. + + May be used for Non-Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] deqByteCnt New byte count + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDeqByteCnt(t_Handle h_FmPort, uint16_t deqByteCnt); + +/**************************************************************************//** + @Function FM_PORT_ConfigTxFifoMinFillLevel + + @Description Calling this routine changes the fifo minimum + fill level parameter in the internal driver data base + from its default configuration [0] + + May be used for Tx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] minFillLevel New value + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigTxFifoMinFillLevel(t_Handle h_FmPort, uint32_t minFillLevel); + +/**************************************************************************//** + @Function FM_PORT_ConfigTxFifoDeqPipelineDepth + + @Description Calling this routine changes the fifo dequeue + pipeline depth parameter in the internal driver data base + + from its default configuration: 1G ports: [2], + 10G port: [8] + + May be used for Tx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] deqPipelineDepth New value + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigTxFifoDeqPipelineDepth(t_Handle h_FmPort, uint8_t deqPipelineDepth); + +/**************************************************************************//** + @Function FM_PORT_ConfigTxFifoLowComfLevel + + @Description Calling this routine changes the fifo low comfort level + parameter in internal driver data base + from its default configuration [5] + + May be used for Tx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] fifoLowComfLevel New value + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigTxFifoLowComfLevel(t_Handle h_FmPort, uint32_t fifoLowComfLevel); + +/**************************************************************************//** + @Function FM_PORT_ConfigRxFifoThreshold + + @Description Calling this routine changes the threshold of the FIFO + fill level parameter in the internal driver data base + from its default configuration [BMI_MAX_FIFO_SIZE] + + If the total number of buffers which are + currently in use and associated with the + specific RX port exceed this threshold, the + BMI will signal the MAC to send a pause frame + over the link. + + May be used for Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] fifoThreshold New value + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigRxFifoThreshold(t_Handle h_FmPort, uint32_t fifoThreshold); + +/**************************************************************************//** + @Function FM_PORT_ConfigRxFifoPriElevationLevel + + @Description Calling this routine changes the priority elevation level + parameter in the internal driver data base from its default + configuration [BMI_MAX_FIFO_SIZE] + + If the total number of buffers which are currently in use and + associated with the specific RX port exceed the amount specified + in priElevationLevel, BMI will signal the main FM's DMA to + elevate the FM priority on the system bus. + + May be used for Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] priElevationLevel New value + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigRxFifoPriElevationLevel(t_Handle h_FmPort, uint32_t priElevationLevel); + +/**************************************************************************//** + @Function FM_PORT_ConfigBufferPrefixContent + + @Description Defines the structure, size and content of the application buffer. + The prefix will + In Tx ports, if 'passPrsResult', the application + should set a value to their offsets in the prefix of + the FM will save the first 'privDataSize', than, + depending on 'passPrsResult' and 'passTimeStamp', copy parse result + and timeStamp, and the packet itself (in this order), to the + application buffer, and to offset. + Calling this routine changes the buffer margins definitions + in the internal driver data base from its default + configuration: Data size: [0] + Pass Parser result: [FALSE]. + Pass timestamp: [FALSE]. + + May be used for all ports + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in,out] p_FmPortBufferPrefixContent A structure of parameters describing the + structure of the buffer. + Out parameter: Start margin - offset + of data from start of external buffer. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigBufferPrefixContent(t_Handle h_FmPort, t_FmPortBufferPrefixContent *p_FmPortBufferPrefixContent); + + +/**************************************************************************//** + @Function FM_PORT_ConfigCheksumLastBytesIgnore + + @Description Calling this routine changes the number of checksum bytes to ignore + parameter in the internal driver data base from its default configuration + [0] + + May be used by Tx & Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] cheksumLastBytesIgnore New value + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigCheksumLastBytesIgnore(t_Handle h_FmPort, uint8_t cheksumLastBytesIgnore); + +/**************************************************************************//** + @Function FM_PORT_ConfigCutBytesFromEnd + + @Description Calling this routine changes the number of bytes to cut from a + frame's end parameter in the internal driver data base + from its default configuration [4] + Note that if the result of (frame length before chop - cutBytesFromEnd) is + less than 14 bytes, the chop operation is not executed. + + May be used for Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] cutBytesFromEnd New value + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigCutBytesFromEnd(t_Handle h_FmPort, uint8_t cutBytesFromEnd); + +/**************************************************************************//** + @Function FM_PORT_ConfigPoolDepletion + + @Description Calling this routine enables pause frame generation depending on the + depletion status of BM pools. It also defines the conditions to activate + this functionality. By default, this functionality is disabled. + + May be used for Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_BufPoolDepletion A structure of pool depletion parameters + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigPoolDepletion(t_Handle h_FmPort, t_FmPortBufPoolDepletion *p_BufPoolDepletion); + +/**************************************************************************//** + @Function FM_PORT_ConfigObservedPoolDepletion + + @Description Calling this routine enables a mechanism to stop port enqueue + depending on the depletion status of selected BM pools. + It also defines the conditions to activate + this functionality. By default, this functionality is disabled. + + Note: Available for some chips only + + May be used for Offline Parsing ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_FmPortObservedBufPoolDepletion A structure of parameters for pool depletion. + + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigObservedPoolDepletion(t_Handle h_FmPort, t_FmPortObservedBufPoolDepletion *p_FmPortObservedBufPoolDepletion); + +/**************************************************************************//** + @Function FM_PORT_ConfigExtBufPools + + @Description This routine should be called for offline parsing ports + that internally use BM buffer pools. In such cases, e.g. for fragmentation and + re-assembly, the FM needs new BM buffers. By calling this routine the user + specifies the BM buffer pools that should be used. + + Note: Available for some chips only + + May be used for Offline Parsing ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_FmPortExtPools A structure of parameters for the external pools. + + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigExtBufPools(t_Handle h_FmPort, t_FmPortExtPools *p_FmPortExtPools); + +/**************************************************************************//** + @Function FM_PORT_ConfigBackupPools + + @Description Calling this routine allows the configuration of some of the BM pools + defined for this port as backup pools. + A pool configured to be a backup pool will be used only if all other + enabled non-backup pools are depleted. + + May be used for Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_FmPortBackupBmPools An array of pool id's. All pools specified here will + be defined as backup pools. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigBackupPools(t_Handle h_FmPort, t_FmPortBackupBmPools *p_FmPortBackupBmPools); + +/**************************************************************************//** + @Function FM_PORT_ConfigFrmDiscardOverride + + @Description Calling this routine changes the error frames destination parameter + in the internal driver data base from its default configuration: + override = [FALSE] + + May be used for Rx and offline parsing ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] override TRUE to override dicarding of error frames and + enqueueing them to error queue. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigFrmDiscardOverride(t_Handle h_FmPort, bool override); + +/**************************************************************************//** + @Function FM_PORT_ConfigErrorsToDiscard + + @Description Calling this routine changes the behaviour on error parameter + in the internal driver data base from its default configuration: + [FM_PORT_FRM_ERR_CLS_DISCARD]. + If a requested error was previously defined as "ErrorsToEnqueue" it's + definition will change and the frame will be discarded. + Errors that were not defined either as "ErrorsToEnqueue" nor as + "ErrorsToDiscard", will be forwarded to CPU. + + + May be used for Rx and offline parsing ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] errs A list of errors to discard + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigErrorsToDiscard(t_Handle h_FmPort, fmPortFrameErrSelect_t errs); + +/**************************************************************************//** + @Function FM_PORT_ConfigDmaSwapData + + @Description Calling this routine changes the DMA swap data aparameter + in the internal driver data base from its default + configuration [e_FM_PORT_DMA_NO_SWP] + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] swapData New selection + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDmaSwapData(t_Handle h_FmPort, e_FmPortDmaSwap swapData); + +/**************************************************************************//** + @Function FM_PORT_ConfigDmaIcCacheAttr + + @Description Calling this routine changes the internal context cache + attribute parameter in the internal driver data base + from its default configuration [e_FM_PORT_DMA_NO_STASH] + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] intContextCacheAttr New selection + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDmaIcCacheAttr(t_Handle h_FmPort, e_FmPortDmaCache intContextCacheAttr); + +/**************************************************************************//** + @Function FM_PORT_ConfigDmaHdrAttr + + @Description Calling this routine changes the header cache + attribute parameter in the internal driver data base + from its default configuration [e_FM_PORT_DMA_NO_STASH] + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] headerCacheAttr New selection + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDmaHdrAttr(t_Handle h_FmPort, e_FmPortDmaCache headerCacheAttr); + +/**************************************************************************//** + @Function FM_PORT_ConfigDmaScatterGatherAttr + + @Description Calling this routine changes the scatter gather cache + attribute parameter in the internal driver data base + from its default configuration [e_FM_PORT_DMA_NO_STASH] + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] scatterGatherCacheAttr New selection + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDmaScatterGatherAttr(t_Handle h_FmPort, e_FmPortDmaCache scatterGatherCacheAttr); + +/**************************************************************************//** + @Function FM_PORT_ConfigDmaWriteOptimize + + @Description Calling this routine changes the write optimization + parameter in the internal driver data base + from its default configuration: optimize = [TRUE] + + May be used for non-Tx port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] optimize TRUE to enable optimization, FALSE for normal operation + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDmaWriteOptimize(t_Handle h_FmPort, bool optimize); + +/**************************************************************************//** + @Function FM_PORT_ConfigDfltColor + + @Description Calling this routine changes the internal default color parameter + in the internal driver data base + from its default configuration [e_FM_PORT_COLOR_GREEN] + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] color New selection + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDfltColor(t_Handle h_FmPort, e_FmPortColor color); + +/**************************************************************************//** + @Function FM_PORT_ConfigSyncReq + + @Description Calling this routine changes the synchronization attribute parameter + in the internal driver data base from its default configuration: + syncReq = [TRUE] + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] syncReq TRUE to request synchronization, FALSE otherwize. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigSyncReq(t_Handle h_FmPort, bool syncReq); + +/**************************************************************************//** + @Function FM_PORT_ConfigForwardReuseIntContext + + @Description This routine is relevant for Rx ports that are routed to offline + parsing. It changes the internal context reuse option + in the internal driver data base from its default configuration: + reuse = [FALSE] + + May be used for Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] reuse TRUE to reuse internal context on frames + forwarded to offline parsing. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigForwardReuseIntContext(t_Handle h_FmPort, bool reuse); + +/**************************************************************************//** + @Function FM_PORT_ConfigDontReleaseTxBufToBM + + @Description This routine should be called if no Tx confirmation + is done, and yet buffers should not be released to the BM. + Normally, buffers are returned using the Tx confirmation + process. When Tx confirmation is not used (defFqid=0), + buffers are typically released to the BM. This routine + may be called to avoid this behavior and not release the + buffers. + + May be used for Tx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ConfigDontReleaseTxBufToBM(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_ConfigIMMaxRxBufLength + + @Description Changes the maximum receive buffer length from its default + configuration: Closest rounded down power of 2 value of the + data buffer size. + + The maximum receive buffer length directly affects the structure + of received frames (single- or multi-buffered) and the performance + of both the FM and the driver. + + The selection between single- or multi-buffered frames should be + done according to the characteristics of the specific application. + The recommended mode is to use a single data buffer per packet, + as this mode provides the best performance. However, the user can + select to use multiple data buffers per packet. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] newVal Maximum receive buffer length (in bytes). + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). + This routine is to be used only if Independent-Mode is enabled. +*//***************************************************************************/ +t_Error FM_PORT_ConfigIMMaxRxBufLength(t_Handle h_FmPort, uint16_t newVal); + +/**************************************************************************//** + @Function FM_PORT_ConfigIMRxBdRingLength + + @Description Changes the receive BD ring length from its default + configuration:[128] + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] newVal The desired BD ring length. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). + This routine is to be used only if Independent-Mode is enabled. +*//***************************************************************************/ +t_Error FM_PORT_ConfigIMRxBdRingLength(t_Handle h_FmPort, uint16_t newVal); + +/**************************************************************************//** + @Function FM_PORT_ConfigIMTxBdRingLength + + @Description Changes the transmit BD ring length from its default + configuration:[16] + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] newVal The desired BD ring length. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). + This routine is to be used only if Independent-Mode is enabled. +*//***************************************************************************/ +t_Error FM_PORT_ConfigIMTxBdRingLength(t_Handle h_FmPort, uint16_t newVal); + +/**************************************************************************//** + @Function FM_PORT_ConfigIMFmanCtrlExternalStructsMemory + + @Description Configures memory partition and attributes for FMan-Controller + data structures (e.g. BD rings). + Calling this routine changes the internal driver data base + from its default configuration + [0 , MEMORY_ATTR_CACHEABLE]. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] memId Memory partition ID. + @Param[in] memAttributes Memory attributes mask (a combination of MEMORY_ATTR_x flags). + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error FM_PORT_ConfigIMFmanCtrlExternalStructsMemory(t_Handle h_FmPort, + uint8_t memId, + uint32_t memAttributes); + +/**************************************************************************//** + @Function FM_PORT_ConfigIMPolling + + @Description Changes the Rx flow from interrupt driven (default) to polling. + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). + This routine is to be used only if Independent-Mode is enabled. +*//***************************************************************************/ +t_Error FM_PORT_ConfigIMPolling(t_Handle h_FmPort); + +/** @} */ /* end of FM_PORT_advanced_init_grp group */ +/** @} */ /* end of FM_PORT_init_grp group */ + + +/**************************************************************************//** + @Group FM_PORT_runtime_control_grp FM Port Runtime Control Unit + + @Description FM Port Runtime control unit API functions, definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description enum for defining FM Port counters +*//***************************************************************************/ +typedef enum e_FmPortCounters { + e_FM_PORT_COUNTERS_CYCLE, /**< BMI performance counter */ + e_FM_PORT_COUNTERS_TASK_UTIL, /**< BMI performance counter */ + e_FM_PORT_COUNTERS_QUEUE_UTIL, /**< BMI performance counter */ + e_FM_PORT_COUNTERS_DMA_UTIL, /**< BMI performance counter */ + e_FM_PORT_COUNTERS_FIFO_UTIL, /**< BMI performance counter */ + e_FM_PORT_COUNTERS_RX_PAUSE_ACTIVATION, /**< BMI Rx only performance counter */ + e_FM_PORT_COUNTERS_FRAME, /**< BMI statistics counter */ + e_FM_PORT_COUNTERS_DISCARD_FRAME, /**< BMI statistics counter */ + e_FM_PORT_COUNTERS_DEALLOC_BUF, /**< BMI deallocate buffer statistics counter */ + e_FM_PORT_COUNTERS_RX_BAD_FRAME, /**< BMI Rx only statistics counter */ + e_FM_PORT_COUNTERS_RX_LARGE_FRAME, /**< BMI Rx only statistics counter */ + e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD, /**< BMI Rx only statistics counter */ + e_FM_PORT_COUNTERS_RX_FILTER_FRAME, /**< BMI Rx & OP only statistics counter */ + e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR, /**< BMI Rx, OP & HC only statistics counter */ + e_FM_PORT_COUNTERS_WRED_DISCARD, /**< BMI OP & HC only statistics counter */ + e_FM_PORT_COUNTERS_LENGTH_ERR, /**< BMI non-Rx statistics counter */ + e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT, /**< BMI non-Rx statistics counter */ + e_FM_PORT_COUNTERS_DEQ_TOTAL, /**< QMI counter */ + e_FM_PORT_COUNTERS_ENQ_TOTAL, /**< QMI counter */ + e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT, /**< QMI counter */ + e_FM_PORT_COUNTERS_DEQ_CONFIRM /**< QMI counter */ +} e_FmPortCounters; + +/**************************************************************************//** + @Description Structure for Port id parameters. + Fields commented 'IN' are passed by the port module to be used + by the FM module. + Fields commented 'OUT' will be filled by FM before returning to port. +*//***************************************************************************/ +typedef struct t_FmPortCongestionGrps { + uint16_t numOfCongestionGrpsToConsider; /**< The number of required congestion groups + to define the size of the following array */ + uint8_t congestionGrpsToConsider[FM_PORT_NUM_OF_CONGESTION_GRPS]; + /**< An array of 'numOfCongestionGrpsToConsider' + describing the groups */ +} t_FmPortCongestionGrps; + + + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +/**************************************************************************//** + @Function FM_PORT_DumpRegs + + @Description Dump all regs. + + Calling this routine invalidates the descriptor. + + @Param[in] h_FmPort - FM PORT module descriptor + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_DumpRegs(t_Handle h_FmPort); +#endif /* (defined(DEBUG_ERRORS) && ... */ + +/**************************************************************************//** + @Function FM_PORT_GetBufferDataOffset + + @Description Relevant for Rx ports. + Returns the data offset from the beginning of the data buffer + + @Param[in] h_FmPort - FM PORT module descriptor + + @Return data offset. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +uint32_t FM_PORT_GetBufferDataOffset(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_GetBufferICInfo + + @Description Returns the Internal Context offset from the beginning of the data buffer + + @Param[in] h_FmPort - FM PORT module descriptor + @Param[in] p_Data - A pointer to the data buffer. + + @Return Internal context info pointer on success, NULL if 'allOtherInfo' was not + configured for this port. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +uint8_t * FM_PORT_GetBufferICInfo(t_Handle h_FmPort, char *p_Data); + +#ifdef DEBUG +/**************************************************************************//** + @Function FM_PORT_GetBufferDebugInfo + + @Description Returns the debug info offset from the beginning of the data buffer + + @Param[in] h_FmPort - FM PORT module descriptor + @Param[in] p_Data - A pointer to the data buffer. + + @Return Debug info pointer on success, NULL if 'passDebugInfo' was not + configured for this port. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +uint8_t * FM_PORT_GetBufferDebugInfo(t_Handle h_FmPort, char *p_Data); +#endif /* DEBUG */ + +/**************************************************************************//** + @Function FM_PORT_GetBufferPrsResult + + @Description Returns the pointer to the parse result in the data buffer. + In Rx ports this is relevant after reception, if parse + result is configured to be part of the data passed to the + application. For non Rx ports it may be used to get the pointer + of the area in the buffer where parse result should be + initialized - if so configured. + See FM_PORT_ConfigBufferPrefixContent for data buffer prefix + configuration. + + @Param[in] h_FmPort - FM PORT module descriptor + @Param[in] p_Data - A pointer to the data buffer. + + @Return Parse result pointer on success, NULL if parse result was not + configured for this port. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_FmPrsResult * FM_PORT_GetBufferPrsResult(t_Handle h_FmPort, char *p_Data); + +/**************************************************************************//** + @Function FM_PORT_GetBufferTimeStamp + + @Description Returns the time stamp in the data buffer. + Relevant for Rx ports for getting the buffer time stamp. + See FM_PORT_ConfigBufferPrefixContent for data buffer prefix + configuration. + + @Param[in] h_FmPort - FM PORT module descriptor + @Param[in] p_Data - A pointer to the data buffer. + + @Return A pointer to the hash result on success, NULL otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +uint64_t * FM_PORT_GetBufferTimeStamp(t_Handle h_FmPort, char *p_Data); + +/**************************************************************************//** + @Function FM_PORT_GetBufferHashResult + + @Description Given a data buffer, on the condition that hash result was defined + as a part of the buffer content (see FM_PORT_ConfigBufferPrefixContent) + this routine will return the pointer to the hash result location in the + buffer prefix. + + @Param[in] h_FmPort - FM PORT module descriptor + @Param[in] p_Data - A pointer to the data buffer. + + @Return A pointer to the hash result on success, NULL otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +uint8_t * FM_PORT_GetBufferHashResult(t_Handle h_FmPort, char *p_Data); + +/**************************************************************************//** + @Function FM_PORT_Disable + + @Description Gracefully disable an FM port. The port will not start new tasks after all + tasks associated with the port are terminated. + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). + This is a blocking routine, it returns after port is + gracefully stopped, i.e. the port will not except new frames, + but it will finish all frames or tasks which were already began +*//***************************************************************************/ +t_Error FM_PORT_Disable(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_Enable + + @Description A runtime routine provided to allow disable/enable of port. + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_Enable(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_SetRateLimit + + @Description Calling this routine enables rate limit algorithm. + By default, this functionality is disabled. + Note that rate-limit mechanism uses the FM time stamp. + The selected rate limit specified here would be + rounded DOWN to the nearest 16M. + + May be used for Tx and offline parsing ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_RateLimit A structure of rate limit parameters + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetRateLimit(t_Handle h_FmPort, t_FmPortRateLimit *p_RateLimit); + +/**************************************************************************//** + @Function FM_PORT_DeleteRateLimit + + @Description Calling this routine disables and clears rate limit + initialization. + + May be used for Tx and offline parsing ports only + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_DeleteRateLimit(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_SetStatisticsCounters + + @Description Calling this routine enables/disables port's statistics counters. + By default, counters are enabled. + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] enable TRUE to enable, FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetStatisticsCounters(t_Handle h_FmPort, bool enable); + +/**************************************************************************//** + @Function FM_PORT_SetFrameQueueCounters + + @Description Calling this routine enables/disables port's enqueue/dequeue counters. + By default, counters are enabled. + + May be used for all ports + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] enable TRUE to enable, FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetFrameQueueCounters(t_Handle h_FmPort, bool enable); + +/**************************************************************************//** + @Function FM_PORT_SetPerformanceCounters + + @Description Calling this routine enables/disables port's performance counters. + By default, counters are enabled. + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] enable TRUE to enable, FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetPerformanceCounters(t_Handle h_FmPort, bool enable); + +/**************************************************************************//** + @Function FM_PORT_SetPerformanceCounters + + @Description Calling this routine defines port's performance + counters parameters. + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_FmPortPerformanceCnt A pointer to a structure of performance + counters parameters. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetPerformanceCountersParams(t_Handle h_FmPort, t_FmPortPerformanceCnt *p_FmPortPerformanceCnt); + +/**************************************************************************//** + @Function FM_PORT_AnalyzePerformanceParams + + @Description User may call this routine to so the driver will analyze if the + basic performance parameters are correct and also the driver may + suggest of improvments; The basic parameters are FIFO sizes, number + of DMAs and number of TNUMs for the port. + + May be used for all port types + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_AnalyzePerformanceParams(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_SetNumOfOpenDmas + + @Description Calling this routine updates the number of open DMA requested for + this port. + + + May be used for all port types. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_NumOfOpenDmas A structure of resource requested parameters + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetNumOfOpenDmas(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfOpenDmas); + +/**************************************************************************//** + @Function FM_PORT_SetNumOfTasks + + @Description Calling this routine updates the number of tasks requested for + this port. + + May be used for all port types. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_NumOfTasks A structure of resource requested parameters + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetNumOfTasks(t_Handle h_FmPort, t_FmPortRsrc *p_NumOfTasks); + +/**************************************************************************//** + @Function FM_PORT_SetSizeOfFifo + + @Description Calling this routine updates the Fifo size resource requested for + this port. + + May be used for all port types - note that only Rx has 'extra' + fifo size. For other ports 'extra' field must be disabled. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_SizeOfFifo A structure of resource requested parameters + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetSizeOfFifo(t_Handle h_FmPort, t_FmPortRsrc *p_SizeOfFifo); + +/**************************************************************************//** + @Function FM_PORT_SetAllocBufCounter + + @Description Calling this routine enables/disables BM pool allocate + buffer counters. + By default, counters are enabled. + + May be used for Rx ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] poolId BM pool id. + @Param[in] enable TRUE to enable, FALSE to disable. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, bool enable); + +/**************************************************************************//** + @Function FM_PORT_GetCounter + + @Description Reads one of the FM PORT counters. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] fmPortCounter The requested counter. + + @Return Counter's current value. + + @Cautions Allowed only following FM_PORT_Init(). + Note that it is user's responsibility to call this routine only + for enabled counters, and there will be no indication if a + disabled counter is accessed. +*//***************************************************************************/ +uint32_t FM_PORT_GetCounter(t_Handle h_FmPort, e_FmPortCounters fmPortCounter); + +/**************************************************************************//** + @Function FM_PORT_ModifyCounter + + @Description Sets a value to an enabled counter. Use "0" to reset the counter. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] fmPortCounter The requested counter. + @Param[in] value The requested value to be written into the counter. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ModifyCounter(t_Handle h_FmPort, e_FmPortCounters fmPortCounter, uint32_t value); + +/**************************************************************************//** + @Function FM_PORT_GetAllocBufCounter + + @Description Reads one of the FM PORT buffer counters. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] poolId The requested pool. + + @Return Counter's current value. + + @Cautions Allowed only following FM_PORT_Init(). + Note that it is user's responsibility to call this routine only + for enabled counters, and there will be no indication if a + disabled counter is accessed. +*//***************************************************************************/ +uint32_t FM_PORT_GetAllocBufCounter(t_Handle h_FmPort, uint8_t poolId); + +/**************************************************************************//** + @Function FM_PORT_ModifyAllocBufCounter + + @Description Sets a value to an enabled counter. Use "0" to reset the counter. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] poolId The requested pool. + @Param[in] value The requested value to be written into the counter. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ModifyAllocBufCounter(t_Handle h_FmPort, uint8_t poolId, uint32_t value); + +/**************************************************************************//** + @Function FM_PORT_AddCongestionGrps + + @Description This routine effects the corresponding Tx port. + It should be called in order to enable pause + frame transmission in case of congestion in one or more + of the congestion groups relevant to this port. + Each call to this routine may add one or more congestion + groups to be considered relevant to this port. + + May be used for Rx, or RX+OP ports only (depending on chip) + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_CongestionGrps A pointer to an array of congestion groups + id's to consider. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_AddCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps); + +/**************************************************************************//** + @Function FM_PORT_RemoveCongestionGrps + + @Description This routine effects the corresponding Tx port. It should be + called when congestion groups were + defined for this port and are no longer relevant, or pause + frames transmitting is not required on their behalf. + Each call to this routine may remove one or more congestion + groups to be considered relevant to this port. + + May be used for Rx, or RX+OP ports only (depending on chip) + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_CongestionGrps A pointer to an array of congestion groups + id's to consider. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_RemoveCongestionGrps(t_Handle h_FmPort, t_FmPortCongestionGrps *p_CongestionGrps); + +/**************************************************************************//** + @Function FM_PORT_IsStalled + + @Description A routine for checking whether the specified port is stalled. + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return TRUE if port is stalled, FALSE otherwize + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +bool FM_PORT_IsStalled(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_ReleaseStalled + + @Description This routine may be called in case the port was stalled and may + now be released. + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_ReleaseStalled(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_SetRxL4ChecksumVerify + + @Description This routine is relevant for Rx ports (1G and 10G). The routine + set/clear the L3/L4 checksum verification (on RX side). + Note that this takes affect only if hw-parser is enabled! + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] l4Checksum boolean indicates whether to do L3/L4 checksum + on frames or not. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetRxL4ChecksumVerify(t_Handle h_FmPort, bool l4Checksum); + +/**************************************************************************//** + @Function FM_PORT_SetErrorsRoute + + @Description Errors selected for this routine will cause a frame with that error + to be enqueued to error queue. + Errors not selected for this routine will cause a frame with that error + to be enqueued to the one of the other port queues. + By default all errors are defined to be enqueued to error queue. + Errors that were configured to be discarded (at initialization) + may not be selected here. + + May be used for Rx and offline parsing ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] errs A list of errors to enqueue to error queue + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Config() and before FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetErrorsRoute(t_Handle h_FmPort, fmPortFrameErrSelect_t errs); + +/**************************************************************************//** + @Function FM_PORT_SetIMExceptions + + @Description Calling this routine enables/disables FM PORT interrupts. + Note: Not available for guest partition. + + @Param[in] h_FmPort FM PORT module descriptor. + @Param[in] exception The exception to be selected. + @Param[in] enable TRUE to enable interrupt, FALSE to mask it. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetIMExceptions(t_Handle h_FmPort, e_FmPortExceptions exception, bool enable); + + + +/**************************************************************************//** + @Group FM_PORT_pcd_runtime_control_grp FM Port PCD Runtime Control Unit + + @Description FM Port PCD Runtime control unit API functions, definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description A structure defining the KG scheme after the parser. + This is relevant only to change scheme selection mode - from + direct to indirect and vice versa, or when the scheme is selected directly, + to select the scheme id. + +*//***************************************************************************/ +typedef struct t_FmPcdKgSchemeSelect { + bool direct; /**< TRUE to use 'h_Scheme' directly, FALSE to use LCV.*/ + t_Handle h_DirectScheme; /**< Relevant for 'direct'=TRUE only. + 'h_DirectScheme' selects the scheme after parser. */ +} t_FmPcdKgSchemeSelect; + +/**************************************************************************//** + @Description A structure of scheme parameters +*//***************************************************************************/ +typedef struct t_FmPcdPortSchemesParams { + uint8_t numOfSchemes; /**< Number of schemes for port to be bound to. */ + t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES]; /**< Array of 'numOfSchemes' schemes for the + port to be bound to */ +} t_FmPcdPortSchemesParams; + +/**************************************************************************//** + @Description Union for defining port protocol parameters for parser +*//***************************************************************************/ +typedef union u_FmPcdHdrPrsOpts { + /* MPLS */ + struct { + bool labelInterpretationEnable; /**< When this bit is set, the last MPLS label will be + interpreted as described in HW spec table. When the bit + is cleared, the parser will advance to MPLS next parse */ + e_NetHeaderType nextParse; /**< must be equal or higher than IPv4 */ + } mplsPrsOptions; + /* VLAN */ + struct { + uint16_t tagProtocolId1; /**< User defined Tag Protocol Identifier, to be recognized + on VLAN TAG on top of 0x8100 and 0x88A8 */ + uint16_t tagProtocolId2; /**< User defined Tag Protocol Identifier, to be recognized + on VLAN TAG on top of 0x8100 and 0x88A8 */ + } vlanPrsOptions; + /* PPP */ + struct{ + bool enableMTUCheck; /**< Check validity of MTU according to RFC2516 */ + } pppoePrsOptions; + + /* IPV6 */ + struct{ + bool routingHdrDisable; /**< Disable routing header */ + } ipv6PrsOptions; + + /* UDP */ + struct{ + bool padIgnoreChecksum; /**< TRUE to ignore pad in checksum */ + } udpPrsOptions; + + /* TCP */ + struct { + bool padIgnoreChecksum; /**< TRUE to ignore pad in checksum */ + } tcpPrsOptions; +} u_FmPcdHdrPrsOpts; + +/**************************************************************************//** + @Description A structure for defining each header for the parser +*//***************************************************************************/ +typedef struct t_FmPcdPrsAdditionalHdrParams { + e_NetHeaderType hdr; /**< Selected header */ + bool errDisable; /**< TRUE to disable error indication */ + bool swPrsEnable; /**< Enable jump to SW parser when this + header is recognized by the HW parser. */ + uint8_t indexPerHdr; /**< Normally 0, if more than one sw parser + attachments exists for the same header, + (in the main sw parser code) use this + index to distinguish between them. */ + bool usePrsOpts; /**< TRUE to use parser options. */ + u_FmPcdHdrPrsOpts prsOpts; /**< A union according to header type, + defining the parser options selected.*/ +} t_FmPcdPrsAdditionalHdrParams; + +/**************************************************************************//** + @Description struct for defining port PCD parameters +*//***************************************************************************/ +typedef struct t_FmPortPcdPrsParams { + uint8_t prsResultPrivateInfo; /**< The private info provides a method of inserting + port information into the parser result. This information + may be extracted by Keygen and be used for frames + distribution when a per-port distinction is required, + it may also be used as a port logical id for analyzing + incoming frames. */ + uint8_t parsingOffset; /**< Number of bytes from beginning of packet to start parsing */ + e_NetHeaderType firstPrsHdr; /**< The type of the first header expected at 'parsingOffset' */ + bool includeInPrsStatistics; /**< TRUE to include this port in the parser statistics; + NOTE: this field is not valid when the FN is in "guest" mode. */ + uint8_t numOfHdrsWithAdditionalParams; /**< Normally 0, some headers may get + special parameters */ + t_FmPcdPrsAdditionalHdrParams additionalParams[FM_PCD_PRS_NUM_OF_HDRS]; + /**< 'numOfHdrsWithAdditionalParams' structures + of additional parameters + for each header that requires them */ + bool setVlanTpid1; /**< TRUE to configure user selection of Ethertype to + indicate a VLAN tag (in addition to the TPID values + 0x8100 and 0x88A8). */ + uint16_t vlanTpid1; /**< extra tag to use if setVlanTpid1=TRUE. */ + bool setVlanTpid2; /**< TRUE to configure user selection of Ethertype to + indicate a VLAN tag (in addition to the TPID values + 0x8100 and 0x88A8). */ + uint16_t vlanTpid2; /**< extra tag to use if setVlanTpid1=TRUE. */ +} t_FmPortPcdPrsParams; + +/**************************************************************************//** + @Description struct for defining coarse alassification parameters +*//***************************************************************************/ +typedef struct t_FmPortPcdCcParams { + t_Handle h_CcTree; /**< A handle to a CC tree */ +} t_FmPortPcdCcParams; + +/**************************************************************************//** + @Description struct for defining keygen parameters +*//***************************************************************************/ +typedef struct t_FmPortPcdKgParams { + uint8_t numOfSchemes; /**< Number of schemes for port to be bound to. */ + t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES]; + /**< Array of 'numOfSchemes' schemes handles for the + port to be bound to */ + bool directScheme; /**< TRUE for going from parser to a specific scheme, + regardless of parser result */ + t_Handle h_DirectScheme; /**< relevant only if direct == TRUE, Scheme handle, + as returned by FM_PCD_KgSetScheme */ +} t_FmPortPcdKgParams; + +/**************************************************************************//** + @Description struct for defining policer parameters +*//***************************************************************************/ +typedef struct t_FmPortPcdPlcrParams { + t_Handle h_Profile; /**< Selected profile handle; Relevant for one of + following cases: + e_FM_PORT_PCD_SUPPORT_PLCR_ONLY or + e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR were selected, + or if any flow uses a KG scheme were policer + profile is not generated + (bypassPlcrProfileGeneration selected) */ +} t_FmPortPcdPlcrParams; + +/**************************************************************************//** + @Description struct for defining port PCD parameters +*//***************************************************************************/ +typedef struct t_FmPortPcdParams { + e_FmPortPcdSupport pcdSupport; /**< Relevant for Rx and offline ports only. + Describes the active PCD engines for this port. */ + t_Handle h_NetEnv; /**< HL Unused in PLCR only mode */ + t_FmPortPcdPrsParams *p_PrsParams; /**< Parser parameters for this port */ + t_FmPortPcdCcParams *p_CcParams; /**< Coarse classification parameters for this port */ + t_FmPortPcdKgParams *p_KgParams; /**< Keygen parameters for this port */ + t_FmPortPcdPlcrParams *p_PlcrParams; /**< Policer parameters for this port */ +} t_FmPortPcdParams; + +/**************************************************************************//** + @Description A structure for defining the Parser starting point +*//***************************************************************************/ +typedef struct t_FmPcdPrsStart { + uint8_t parsingOffset; /**< Number of bytes from beginning of packet to + start parsing */ + e_NetHeaderType firstPrsHdr; /**< The type of the first header axpected at + 'parsingOffset' */ +} t_FmPcdPrsStart; + + +/**************************************************************************//** + @Function FM_PORT_SetPCD + + @Description Calling this routine defines the port's PCD configuration. + It changes it from its default configuration which is PCD + disabled (BMI to BMI) and configures it according to the passed + parameters. + + May be used for Rx and offline parsing ports only + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_FmPortPcd A Structure of parameters defining the port's PCD + configuration. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_SetPCD(t_Handle h_FmPort, t_FmPortPcdParams *p_FmPortPcd); + +/**************************************************************************//** + @Function FM_PORT_DeletePCD + + @Description Calling this routine releases the port's PCD configuration. + The port returns to its default configuration which is PCD + disabled (BMI to BMI) and all PCD configuration is removed. + + May be used for Rx and offline parsing ports which are + in PCD mode only + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_DeletePCD(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_AttachPCD + + @Description This routine may be called after FM_PORT_DetachPCD was called, + to return to the originally configured PCD support flow. + The couple of routines are used to allow PCD configuration changes + that demand that PCD will not be used while changes take place. + + May be used for Rx and offline parsing ports which are + in PCD mode only + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). +*//***************************************************************************/ +t_Error FM_PORT_AttachPCD(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_DetachPCD + + @Description Calling this routine detaches the port from its PCD functionality. + The port returns to its default flow which is BMI to BMI. + + May be used for Rx and offline parsing ports which are + in PCD mode only + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_AttachPCD(). +*//***************************************************************************/ +t_Error FM_PORT_DetachPCD(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_PcdPlcrAllocProfiles + + @Description This routine may be called only for ports that use the Policer in + order to allocate private policer profiles. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] numOfProfiles The number of required policer profiles + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init() and FM_PCD_Init(), and before FM_PORT_SetPCD(). +*//***************************************************************************/ +t_Error FM_PORT_PcdPlcrAllocProfiles(t_Handle h_FmPort, uint16_t numOfProfiles); + +/**************************************************************************//** + @Function FM_PORT_PcdPlcrFreeProfiles + + @Description This routine should be called for freeing private policer profiles. + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init() and FM_PCD_Init(), and before FM_PORT_SetPCD(). +*//***************************************************************************/ +t_Error FM_PORT_PcdPlcrFreeProfiles(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_PcdKgModifyInitialScheme + + @Description This routine may be called only for ports that use the keygen in + order to change the initial scheme frame should be routed to. + The change may be of a scheme id (in case of direct mode), + from direct to indirect, or from indirect to direct - specifying the scheme id. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_FmPcdKgScheme A structure of parameters for defining whether + a scheme is direct/indirect, and if direct - scheme id. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD(). +*//***************************************************************************/ +t_Error FM_PORT_PcdKgModifyInitialScheme (t_Handle h_FmPort, t_FmPcdKgSchemeSelect *p_FmPcdKgScheme); + +/**************************************************************************//** + @Function FM_PORT_PcdPlcrModifyInitialProfile + + @Description This routine may be called for ports with flows + e_FM_PORT_PCD_SUPPORT_PLCR_ONLY or e_FM_PORT_PCD_SUPPORT_PRS_AND_PLCR + only, to change the initial Policer profile frame should be + routed to. The change may be of a profile and/or absolute/direct + mode selection. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] h_Profile Policer profile handle + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD(). +*//***************************************************************************/ +t_Error FM_PORT_PcdPlcrModifyInitialProfile (t_Handle h_FmPort, t_Handle h_Profile); + +/**************************************************************************//** + @Function FM_PORT_PcdCcModifyTree + + @Description This routine may be called for ports that use coarse classification tree + if the user wishes to replace the tree. The routine may not be called while port + receives packets using the PCD functionalities, therefor port must be first detached + from the PCD, only than the routine may be called, and than port be attached to PCD again. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] h_CcTree A CC tree that was already built. The tree id as returned from + the BuildTree routine. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(), FM_PORT_SetPCD() and FM_PORT_DetachPCD() +*//***************************************************************************/ +t_Error FM_PORT_PcdCcModifyTree (t_Handle h_FmPort, t_Handle h_CcTree); + +/**************************************************************************//** + @Function FM_PORT_PcdKgBindSchemes + + @Description These routines may be called for adding more schemes for the + port to be bound to. The selected schemes are not added, + just this specific port starts using them. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_PortScheme A structure defining the list of schemes to be added. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD(). +*//***************************************************************************/ +t_Error FM_PORT_PcdKgBindSchemes (t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme); + +/**************************************************************************//** + @Function FM_PORT_PcdKgUnbindSchemes + + @Description These routines may be called for adding more schemes for the + port to be bound to. The selected schemes are not removed or invalidated, + just this specific port stops using them. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_PortScheme A structure defining the list of schemes to be added. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init() and FM_PORT_SetPCD(). +*//***************************************************************************/ +t_Error FM_PORT_PcdKgUnbindSchemes (t_Handle h_FmPort, t_FmPcdPortSchemesParams *p_PortScheme); + +/**************************************************************************//** + @Function FM_PORT_PcdPrsModifyStartOffset + + @Description Runtime change of the parser start offset within the header. + The routine may not be called while port + receives packets using the PCD functionalities, therefore port must be first detached + from the PCD, only than the routine may be called, and than port be attached to PCD again. + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_FmPcdPrsStart A structure of parameters for defining the + start point for the parser. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(), FM_PORT_SetPCD() and FM_PORT_DetatchPCD(). +*//***************************************************************************/ +t_Error FM_PORT_PcdPrsModifyStartOffset (t_Handle h_FmPort, t_FmPcdPrsStart *p_FmPcdPrsStart); + +/** @} */ /* end of FM_PORT_pcd_runtime_control_grp group */ +/** @} */ /* end of FM_PORT_runtime_control_grp group */ + + +/**************************************************************************//** + @Group FM_PORT_runtime_data_grp FM Port Runtime Data-path Unit + + @Description FM Port Runtime data unit API functions, definitions and enums. + This API is valid only if working in Independent-Mode. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function FM_PORT_ImTx + + @Description Tx function, called to transmit a data buffer on the port. + + @Param[in] h_FmPort A handle to a FM Port module. + @Param[in] p_Data A pointer to an LCP data buffer. + @Param[in] length Size of data for transmission. + @Param[in] lastBuffer Buffer position - TRUE for the last buffer + of a frame, including a single buffer frame + @Param[in] h_BufContext A handle of the user acossiated with this buffer + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). + NOTE - This routine can be used only when working in + Independent-Mode mode. +*//***************************************************************************/ +t_Error FM_PORT_ImTx( t_Handle h_FmPort, + uint8_t *p_Data, + uint16_t length, + bool lastBuffer, + t_Handle h_BufContext); + +/**************************************************************************//** + @Function FM_PORT_ImTxConf + + @Description Tx port confirmation routine, optional, may be called to verify + transmission of all frames. The procedure performed by this + routine will be performed automatically on next buffer transmission, + but if desired, calling this routine will invoke this action on + demand. + + @Param[in] h_FmPort A handle to a FM Port module. + + @Cautions Allowed only following FM_PORT_Init(). + NOTE - This routine can be used only when working in + Independent-Mode mode. +*//***************************************************************************/ +void FM_PORT_ImTxConf(t_Handle h_FmPort); + +/**************************************************************************//** + @Function FM_PORT_ImRx + + @Description Rx function, may be called to poll for received buffers. + Normally, Rx process is invoked by the driver on Rx interrupt. + Alternatively, this routine may be called on demand. + + @Param[in] h_FmPort A handle to a FM Port module. + + @Return E_OK on success; Error code otherwise. + + @Cautions Allowed only following FM_PORT_Init(). + NOTE - This routine can be used only when working in + Independent-Mode mode. +*//***************************************************************************/ +t_Error FM_PORT_ImRx(t_Handle h_FmPort); + +/** @} */ /* end of FM_PORT_runtime_data_grp group */ +/** @} */ /* end of FM_PORT_grp group */ +/** @} */ /* end of FM_grp group */ + + + + +#endif /* __FM_PORT_EXT */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_rtc_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/fm_rtc_ext.h @@ -0,0 +1,592 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File fm_rtc_ext.h + + @Description External definitions and API for FM RTC IEEE1588 Timer Module. + + @Cautions None. +*//***************************************************************************/ + +#ifndef __FM_RTC_EXT_H__ +#define __FM_RTC_EXT_H__ + + +#include "error_ext.h" +#include "std_ext.h" + + +/**************************************************************************//** + + @Group FM_grp Frame Manager API + + @Description FM API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group fm_rtc_grp FM RTC + + @Description FM RTC functions, definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group fm_rtc_init_grp FM RTC Initialization Unit + + @Description FM RTC initialization API. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description FM RTC Alarm Polarity Options. +*//***************************************************************************/ +typedef enum e_FmRtcAlarmPolarity +{ + e_FM_RTC_ALARM_POLARITY_ACTIVE_HIGH, /**< Active-high output polarity */ + e_FM_RTC_ALARM_POLARITY_ACTIVE_LOW /**< Active-low output polarity */ +} e_FmRtcAlarmPolarity; + +/**************************************************************************//** + @Description FM RTC Trigger Polarity Options. +*//***************************************************************************/ +typedef enum e_FmRtcTriggerPolarity +{ + e_FM_RTC_TRIGGER_ON_RISING_EDGE, /**< Trigger on rising edge */ + e_FM_RTC_TRIGGER_ON_FALLING_EDGE /**< Trigger on falling edge */ +} e_FmRtcTriggerPolarity; + +/**************************************************************************//** + @Description IEEE1588 Timer Module FM RTC Optional Clock Sources. +*//***************************************************************************/ +typedef enum e_FmSrcClock +{ + e_FM_RTC_SOURCE_CLOCK_EXTERNAL, /**< external high precision timer reference clock */ + e_FM_RTC_SOURCE_CLOCK_SYSTEM, /**< MAC system clock */ + e_FM_RTC_SOURCE_CLOCK_OSCILATOR /**< RTC clock oscilator */ +}e_FmSrcClk; + +/**************************************************************************//** + @Description FM RTC configuration parameters structure. + + This structure should be passed to FM_RTC_Config(). +*//***************************************************************************/ +typedef struct t_FmRtcParams +{ + t_Handle h_Fm; /**< FM Handle*/ + uintptr_t baseAddress; /**< Base address of FM RTC registers */ + t_Handle h_App; /**< A handle to an application layer object; This handle will + be passed by the driver upon calling the above callbacks */ +} t_FmRtcParams; + + +/**************************************************************************//** + @Function FM_RTC_Config + + @Description Configures the FM RTC module according to user's parameters. + + The driver assigns default values to some FM RTC parameters. + These parameters can be overwritten using the advanced + configuration routines. + + @Param[in] p_FmRtcParam - FM RTC configuration parameters. + + @Return Handle to the new FM RTC object; NULL pointer on failure. + + @Cautions None +*//***************************************************************************/ +t_Handle FM_RTC_Config(t_FmRtcParams *p_FmRtcParam); + +/**************************************************************************//** + @Function FM_RTC_Init + + @Description Initializes the FM RTC driver and hardware. + + @Param[in] h_FmRtc - Handle to FM RTC object. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_Init(t_Handle h_FmRtc); + +/**************************************************************************//** + @Function FM_RTC_Free + + @Description Frees the FM RTC object and all allocated resources. + + @Param[in] h_FmRtc - Handle to FM RTC object. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_Free(t_Handle h_FmRtc); + + +/**************************************************************************//** + @Group fm_rtc_adv_config_grp FM RTC Advanced Configuration Unit + + @Description FM RTC advanced configuration functions. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function FM_RTC_ConfigPeriod + + @Description Configures the period of the timestamp if different than + default [1000]. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] period - Period in nano-seconds. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigPeriod(t_Handle h_FmRtc, uint32_t period); + +/**************************************************************************//** + @Function FM_RTC_ConfigSourceClock + + @Description Configures the source clock of the RTC. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] srcClk - Source clock selection. + @Param[in] freqInMhz - the source-clock frequency (in MHz). + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigSourceClock(t_Handle h_FmRtc, + e_FmSrcClk srcClk, + uint32_t freqInMhz); + +/**************************************************************************//** + @Function FM_RTC_ConfigPulseRealignment + + @Description Configures the RTC to automatic FIPER pulse realignment in + response to timer adjustments [FALSE] + + In this mode, the RTC clock is identical to the source clock. + This feature can be useful when the system contains an external + RTC with inherent frequency compensation. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] enable - TRUE to enable automatic realignment. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigPulseRealignment(t_Handle h_FmRtc, bool enable); + +/**************************************************************************//** + @Function FM_RTC_ConfigFrequencyBypass + + @Description Configures the RTC to bypass the frequency compensation + mechanism. [FALSE] + + In this mode, the RTC clock is identical to the source clock. + This feature can be useful when the system contains an external + RTC with inherent frequency compensation. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] enabled - TRUE to bypass frequency compensation; + FALSE otherwise. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigFrequencyBypass(t_Handle h_FmRtc, bool enabled); + +/**************************************************************************//** + @Function FM_RTC_ConfigInvertedInputClockPhase + + @Description Configures the RTC to invert the source clock phase on input. + [FALSE] + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] inverted - TRUE to invert the source clock phase on input. + FALSE otherwise. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigInvertedInputClockPhase(t_Handle h_FmRtc, bool inverted); + +/**************************************************************************//** + @Function FM_RTC_ConfigInvertedOutputClockPhase + + @Description Configures the RTC to invert the output clock phase. + [FALSE] + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] inverted - TRUE to invert the output clock phase. + FALSE otherwise. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigInvertedOutputClockPhase(t_Handle h_FmRtc, bool inverted); + +/**************************************************************************//** + @Function FM_RTC_ConfigOutputClockDivisor + + @Description Configures the divisor for generating the output clock from + the RTC clock. [0x00000002] + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] divisor - Divisor for generation of the output clock. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigOutputClockDivisor(t_Handle h_FmRtc, uint16_t divisor); + +/**************************************************************************//** + @Function FM_RTC_ConfigAlarmPolarity + + @Description Configures the polarity (active-high/active-low) of a specific + alarm signal. [e_FM_RTC_ALARM_POLARITY_ACTIVE_HIGH] + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] alarmId - Alarm ID. + @Param[in] alarmPolarity - Alarm polarity. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigAlarmPolarity(t_Handle h_FmRtc, + uint8_t alarmId, + e_FmRtcAlarmPolarity alarmPolarity); + +/**************************************************************************//** + @Function FM_RTC_ConfigExternalTriggerPolarity + + @Description Configures the polarity (rising/falling edge) of a specific + external trigger signal. [e_FM_RTC_TRIGGER_ON_FALLING_EDGE] + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] triggerId - Trigger ID. + @Param[in] triggerPolarity - Trigger polarity. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously created using FM_RTC_Config(). +*//***************************************************************************/ +t_Error FM_RTC_ConfigExternalTriggerPolarity(t_Handle h_FmRtc, + uint8_t triggerId, + e_FmRtcTriggerPolarity triggerPolarity); + +/** @} */ /* end of fm_rtc_adv_config_grp */ +/** @} */ /* end of fm_rtc_init_grp */ + + +/**************************************************************************//** + @Group fm_rtc_control_grp FM RTC Control Unit + + @Description FM RTC runtime control API. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function t_FmRtcExceptionsCallback + + @Description Exceptions user callback routine, used for RTC different mechanisms. + + @Param[in] h_App - User's application descriptor. + @Param[in] id - source id. +*//***************************************************************************/ +typedef void (t_FmRtcExceptionsCallback) ( t_Handle h_App, uint8_t id); + +/**************************************************************************//** + @Description FM RTC alarm parameters. +*//***************************************************************************/ +typedef struct t_FmRtcAlarmParams { + uint8_t alarmId; /**< 0 or 1 */ + uint64_t alarmTime; /**< In nanoseconds, the time when the alarm + should go off - must be a multiple of + the RTC period */ + t_FmRtcExceptionsCallback *f_AlarmCallback; /**< This routine will be called when RTC + reaches alarmTime */ + bool clearOnExpiration; /**< TRUE to turn off the alarm once expired. */ +} t_FmRtcAlarmParams; + +/**************************************************************************//** + @Description FM RTC Periodic Pulse parameters. +*//***************************************************************************/ +typedef struct t_FmRtcPeriodicPulseParams { + uint8_t periodicPulseId; /**< 0 or 1 */ + uint64_t periodicPulsePeriod; /**< In Nanoseconds. Must be + a multiple of the RTC period */ + t_FmRtcExceptionsCallback *f_PeriodicPulseCallback; /**< This routine will be called every + periodicPulsePeriod. */ +} t_FmRtcPeriodicPulseParams; + +/**************************************************************************//** + @Description FM RTC Periodic Pulse parameters. +*//***************************************************************************/ +typedef struct t_FmRtcExternalTriggerParams { + uint8_t externalTriggerId; /**< 0 or 1 */ + bool usePulseAsInput; /**< Use the pulse interrupt instead of + an external signal */ + t_FmRtcExceptionsCallback *f_ExternalTriggerCallback; /**< This routine will be called every + periodicPulsePeriod. */ +} t_FmRtcExternalTriggerParams; + + +/**************************************************************************//** + @Function FM_RTC_Enable + + @Description Enable the RTC (time count is started). + + The user can select to resume the time count from previous + point, or to restart the time count. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] resetClock - Restart the time count from zero. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_Enable(t_Handle h_FmRtc, bool resetClock); + +/**************************************************************************//** + @Function FM_RTC_Disable + + @Description Disables the RTC (time count is stopped). + + @Param[in] h_FmRtc - Handle to FM RTC object. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_Disable(t_Handle h_FmRtc); + +/**************************************************************************//** + @Function FM_RTC_SetClockOffset + + @Description Sets the clock offset (usually relative to another clock). + + The user can pass a negative offset value. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] offset - New clock offset (in nanoseconds). + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_SetClockOffset(t_Handle h_FmRtc, int64_t offset); + +/**************************************************************************//** + @Function FM_RTC_SetAlarm + + @Description Schedules an alarm event to a given RTC time. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] p_FmRtcAlarmParams - Alarm parameters. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). + Must be called only prior to FM_RTC_Enable(). +*//***************************************************************************/ +t_Error FM_RTC_SetAlarm(t_Handle h_FmRtc, t_FmRtcAlarmParams *p_FmRtcAlarmParams); + +/**************************************************************************//** + @Function FM_RTC_SetPeriodicPulse + + @Description Sets a periodic pulse. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] p_FmRtcPeriodicPulseParams - Periodic pulse parameters. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). + Must be called only prior to FM_RTC_Enable(). +*//***************************************************************************/ +t_Error FM_RTC_SetPeriodicPulse(t_Handle h_FmRtc, t_FmRtcPeriodicPulseParams *p_FmRtcPeriodicPulseParams); + +/**************************************************************************//** + @Function FM_RTC_ClearPeriodicPulse + + @Description Clears a periodic pulse. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] periodicPulseId - Periodic pulse id. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_ClearPeriodicPulse(t_Handle h_FmRtc, uint8_t periodicPulseId); + +/**************************************************************************//** + @Function FM_RTC_SetExternalTrigger + + @Description Sets an external trigger indication and define a callback + routine to be called on such event. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] p_FmRtcExternalTriggerParams - External Trigger parameters. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_SetExternalTrigger(t_Handle h_FmRtc, t_FmRtcExternalTriggerParams *p_FmRtcExternalTriggerParams); + +/**************************************************************************//** + @Function FM_RTC_ClearExternalTrigger + + @Description Clears external trigger indication. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] id - External Trigger id. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_ClearExternalTrigger(t_Handle h_FmRtc, uint8_t id); + +/**************************************************************************//** + @Function FM_RTC_GetExternalTriggerTimeStamp + + @Description Reads the External Trigger TimeStamp. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] triggerId - External Trigger id. + @Param[out] p_TimeStamp - External Trigger timestamp (in nanoseconds). + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_GetExternalTriggerTimeStamp(t_Handle h_FmRtc, + uint8_t triggerId, + uint64_t *p_TimeStamp); + +/**************************************************************************//** + @Function FM_RTC_GetCurrentTime + + @Description Returns the current RTC time. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[out] p_Ts - returned time stamp (in nanoseconds). + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_GetCurrentTime(t_Handle h_FmRtc, uint64_t *p_Ts); + +/**************************************************************************//** + @Function FM_RTC_SetCurrentTime + + @Description Sets the current RTC time. + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] ts - The new time stamp (in nanoseconds). + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_SetCurrentTime(t_Handle h_FmRtc, uint64_t ts); + +/**************************************************************************//** + @Function FM_RTC_GetFreqCompensation + + @Description TODO + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[out] p_Compensation - A pointer to the returned value of compensation. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_GetFreqCompensation(t_Handle h_FmRtc, uint32_t *p_Compensation); + +/**************************************************************************//** + @Function FM_RTC_SetFreqCompensation + + @Description TODO + + @Param[in] h_FmRtc - Handle to FM RTC object. + @Param[in] freqCompensation - the new desired compensation value to be set. + + @Return E_OK on success; Error code otherwise. + + @Cautions h_FmRtc must have been previously initialized using FM_RTC_Init(). +*//***************************************************************************/ +t_Error FM_RTC_SetFreqCompensation(t_Handle h_FmRtc, uint32_t freqCompensation); + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) +/**************************************************************************//** + @Function FM_RTC_DumpRegs + + @Description Dumps all FM registers + + @Param[in] h_FmRtc A handle to an FM RTC Module. + + @Return E_OK on success; + + @Cautions Allowed only FM_Init(). +*//***************************************************************************/ +t_Error FM_RTC_DumpRegs(t_Handle h_FmRtc); +#endif /* (defined(DEBUG_ERRORS) && ... */ + +/** @} */ /* end of fm_rtc_control_grp */ +/** @} */ /* end of fm_rtc_grp */ +/** @} */ /* end of FM_grp group */ + + +#endif /* __FM_RTC_EXT_H__ */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/Peripherals/mii_acc_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/Peripherals/mii_acc_ext.h @@ -0,0 +1,75 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef __MII_ACC_EXT_H +#define __MII_ACC_EXT_H + + +/**************************************************************************//** + @Function MII_ReadPhyReg + + @Description This routine is called to read a specified PHY + register value. + + @Param[in] h_MiiAccess - Handle to MII configuration access registers + @Param[in] phyAddr - PHY address (0-31). + @Param[in] reg - PHY register to read + @Param[out] p_Data - Gets the register value. + + @Return Always zero (success). +*//***************************************************************************/ +int MII_ReadPhyReg(t_Handle h_MiiAccess, + uint8_t phyAddr, + uint8_t reg, + uint16_t *p_Data); + +/**************************************************************************//** + @Function MII_WritePhyReg + + @Description This routine is called to write data to a specified PHY + register. + + @Param[in] h_MiiAccess - Handle to MII configuration access registers + @Param[in] phyAddr - PHY address (0-31). + @Param[in] reg - PHY register to write + @Param[in] data - Data to write in register. + + @Return Always zero (success). +*//***************************************************************************/ +int MII_WritePhyReg(t_Handle h_MiiAccess, + uint8_t phyAddr, + uint8_t reg, + uint16_t data); + + +#endif /* __MII_ACC_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/core_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/core_ext.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File core_ext.h + + @Description Generic interface to basic core operations. + + The system integrator must ensure that this interface is + mapped to a specific core implementation, by including the + appropriate header file. +*//***************************************************************************/ +#ifndef __CORE_EXT_H +#define __CORE_EXT_H + + +#ifdef NCSW_PPC_CORE +#include "ppc_ext.h" +#elif defined(NCSW_VXWORKS) +#include "core_vxw_ext.h" +#else +#error "Core is not defined!" +#endif /* NCSW_CORE */ + +#if (!defined(CORE_IS_LITTLE_ENDIAN) && !defined(CORE_IS_BIG_ENDIAN)) +#error "Must define core as little-endian or big-endian!" +#endif /* (!defined(CORE_IS_LITTLE_ENDIAN) && ... */ + + +/**************************************************************************//** + @Function CORE_GetId + + @Description Returns the core ID in the system. + + @Return Core ID. +*//***************************************************************************/ +uint32_t CORE_GetId(void); + +/**************************************************************************//** + @Function CORE_MemoryBarrier + + @Description This routine will cause the core to stop executing any commands + until all previous memory read/write commands are completely out + of the core's pipeline. + + @Return None. +*//***************************************************************************/ +void CORE_MemoryBarrier(void); + + +#endif /* __CORE_EXT_H */ + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/cores/e500v2_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/cores/e500v2_ext.h @@ -0,0 +1,413 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File e500v2_ext.h + + @Description E500 external definitions prototypes + This file is not included by the E500 + source file as it is an assembly file. It is used + only for prototypes exposure, for inclusion + by user and other modules. +*//***************************************************************************/ + +#ifndef __E500V2_EXT_H +#define __E500V2_EXT_H + +#include "std_ext.h" + + +/* Layer 1 Cache Manipulations + *============================== + * Should not be called directly by the user. + */ +void L1DCache_Invalidate (void); +void L1ICache_Invalidate(void); +void L1DCache_Enable(void); +void L1ICache_Enable(void); +void L1DCache_Disable(void); +void L1ICache_Disable(void); +void L1DCache_Flush(void); +void L1ICache_Flush(void); +/* + * + */ +uint32_t L1DCache_LineLock(uint32_t addr); +uint32_t L1ICache_LineLock(uint32_t addr); +void L1Cache_BroadCastEnable(void); +void L1Cache_BroadCastDisable(void); + + +#define CORE_DCacheEnable E500_DCacheEnable +#define CORE_ICacheEnable E500_ICacheEnable +#define CORE_DCacheDisable E500_DCacheDisable +#define CORE_ICacheDisable E500_ICacheDisable +#define CORE_GetId E500_GetId +#define CORE_TestAndSet E500_TestAndSet +#define CORE_MemoryBarrier E500_MemoryBarrier +#define CORE_InstructionSync E500_InstructionSync + +#define CORE_SetDozeMode E500_SetDozeMode +#define CORE_SetNapMode E500_SetNapMode +#define CORE_SetSleepMode E500_SetSleepMode +#define CORE_SetJogMode E500_SetJogMode +#define CORE_SetDeepSleepMode E500_SetDeepSleepMode + +#define CORE_RecoverDozeMode E500_RecoverDozeMode +#define CORE_RecoverNapMode E500_RecoverNapMode +#define CORE_RecoverSleepMode E500_RecoverSleepMode +#define CORE_RecoverJogMode E500_RecoverJogMode + +void E500_SetDozeMode(void); +void E500_SetNapMode(void); +void E500_SetSleepMode(void); +void E500_SetJogMode(void); +t_Error E500_SetDeepSleepMode(uint32_t bptrAddress); + +void E500_RecoverDozeMode(void); +void E500_RecoverNapMode(void); +void E500_RecoverSleepMode(void); +void E500_RecoverJogMode(void); + + +/**************************************************************************//** + @Group E500_id E500 Application Programming Interface + + @Description E500 API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group E500_init_grp E500 Initialization Unit + + @Description E500 initialization unit API functions, definitions and enums + + @{ +*//***************************************************************************/ + + +/**************************************************************************//** + @Function E500_DCacheEnable + + @Description Enables the data cache for memory pages that are + not cache inhibited. + + @Return None. +*//***************************************************************************/ +void E500_DCacheEnable(void); + +/**************************************************************************//** + @Function E500_ICacheEnable + + @Description Enables the instruction cache for memory pages that are + not cache inhibited. + + @Return None. +*//***************************************************************************/ +void E500_ICacheEnable(void); + +/**************************************************************************//** + @Function E500_DCacheDisable + + @Description Disables the data cache. + + @Return None. +*//***************************************************************************/ +void E500_DCacheDisable(void); + +/**************************************************************************//** + @Function E500_ICacheDisable + + @Description Disables the instruction cache. + + @Return None. +*//***************************************************************************/ +void E500_ICacheDisable(void); + +/**************************************************************************//** + @Function E500_DCacheFlush + + @Description Flushes the data cache + + @Return None. +*//***************************************************************************/ +void E500_DCacheFlush(void); + +/**************************************************************************//** + @Function E500_ICacheFlush + + @Description Flushes the instruction cache. + + @Return None. +*//***************************************************************************/ +void E500_ICacheFlush(void); + +/**************************************************************************//** + @Function E500_DCacheSetStashId + + @Description Set Stash Id for data cache + + @Param[in] stashId the stash id to be set. + + @Return None. +*//***************************************************************************/ +void E500_DCacheSetStashId(uint8_t stashId); + +/**************************************************************************//** + @Description E500mc L2 Cache Operation Mode +*//***************************************************************************/ +typedef enum e_E500mcL2CacheMode +{ + e_L2_CACHE_MODE_DATA_ONLY = 0x00000001, /**< Cache data only */ + e_L2_CACHE_MODE_INST_ONLY = 0x00000002, /**< Cache instructions only */ + e_L2_CACHE_MODE_DATA_AND_INST = 0x00000003 /**< Cache data and instructions */ +} e_E500mcL2CacheMode; + +/**************************************************************************//** + @Function E500_L2CacheEnable + + @Description Enables the cache for memory pages that are not cache inhibited. + + @param[in] mode - L2 cache mode: data only, instruction only or instruction and data. + + @Return None. + + @Cautions This routine must be call only ONCE for both caches. I.e. it is + not possible to call this routine for i-cache and than to call + again for d-cache; The second call will override the first one. +*//***************************************************************************/ +void E500_L2CacheEnable(e_E500mcL2CacheMode mode); + +/**************************************************************************//** + @Function E500_L2CacheDisable + + @Description Disables the cache (data instruction or both). + + @Return None. + +*//***************************************************************************/ +void E500_L2CacheDisable(void); + +/**************************************************************************//** + @Function E500_L2CacheFlush + + @Description Flushes the cache. + + @Return None. +*//***************************************************************************/ +void E500_L2CacheFlush(void); + +/**************************************************************************//** + @Function E500_L2SetStashId + + @Description Set Stash Id + + @Param[in] stashId the stash id to be set. + + @Return None. +*//***************************************************************************/ +void E500_L2SetStashId(uint8_t stashId); + +/**************************************************************************//** + @Function E500_AddressBusStreamingEnable + + @Description Enables address bus streaming on the CCB. + + This setting, along with the ECM streaming configuration + parameters, enables address bus streaming on the CCB. + + @Return None. +*//***************************************************************************/ +void E500_AddressBusStreamingEnable(void); + +/**************************************************************************//** + @Function E500_AddressBusStreamingDisable + + @Description Disables address bus streaming on the CCB. + + @Return None. +*//***************************************************************************/ +void E500_AddressBusStreamingDisable(void); + +/**************************************************************************//** + @Function E500_AddressBroadcastEnable + + @Description Enables address broadcast. + + The e500 broadcasts cache management instructions (dcbst, dcblc + (CT = 1), icblc (CT = 1), dcbf, dcbi, mbar, msync, tlbsync, icbi) + based on ABE. ABE must be set to allow management of external + L2 caches. + + @Return None. +*//***************************************************************************/ +void E500_AddressBroadcastEnable(void); + +/**************************************************************************//** + @Function E500_AddressBroadcastDisable + + @Description Disables address broadcast. + + The e500 broadcasts cache management instructions (dcbst, dcblc + (CT = 1), icblc (CT = 1), dcbf, dcbi, mbar, msync, tlbsync, icbi) + based on ABE. ABE must be set to allow management of external + L2 caches. + + @Return None. +*//***************************************************************************/ +void E500_AddressBroadcastDisable(void); + +/**************************************************************************//** + @Function E500_IsTaskletSupported + + @Description Checks if tasklets are supported by the e500 interrupt handler. + + @Retval TRUE - Tasklets are supported. + @Retval FALSE - Tasklets are not supported. +*//***************************************************************************/ +bool E500_IsTaskletSupported(void); + +void E500_EnableTimeBase(void); +void E500_DisableTimeBase(void); + +uint64_t E500_GetTimeBaseTime(void); + +void E500_GenericIntrInit(void); + +t_Error E500_SetIntr(int ppcIntrSrc, + void (* Isr)(t_Handle handle), + t_Handle handle); + +t_Error E500_ClearIntr(int ppcIntrSrc); + +/**************************************************************************//** + @Function E500_GenericIntrHandler + + @Description This is the general e500 interrupt handler. + + It is called by the main assembly interrupt handler + when an exception occurs and no other function has been + assigned to this exception. + + @Param intrEntry - (In) The exception interrupt vector entry. +*//***************************************************************************/ +void E500_GenericIntrHandler(uint32_t intrEntry); + +/**************************************************************************//** + @Function CriticalIntr + + @Description This is the specific critical e500 interrupt handler. + + It is called by the main assembly interrupt handler + when an critical interrupt. + + @Param intrEntry - (In) The exception interrupt vector entry. +*//***************************************************************************/ +void CriticalIntr(uint32_t intrEntry); + + +/**************************************************************************//** + @Function E500_GetId + + @Description Returns the core ID in the system. + + @Return Core ID. +*//***************************************************************************/ +uint32_t E500_GetId(void); + +/**************************************************************************//** + @Function E500_TestAndSet + + @Description This routine tries to atomically test-and-set an integer + in memory to a non-zero value. + + The memory will be set only if it is tested as zero, in which + case the routine returns the new non-zero value; otherwise the + routine returns zero. + + @Param[in] p - pointer to a volatile int in memory, on which test-and-set + operation should be made. + + @Retval Zero - Operation failed - memory was already set. + @Retval Non-zero - Operation succeeded - memory has been set. +*//***************************************************************************/ +int E500_TestAndSet(volatile int *p); + +/**************************************************************************//** + @Function E500_MemoryBarrier + + @Description This routine will cause the core to stop executing any commands + until all previous memory read/write commands are completely out + of the core's pipeline. + + @Return None. +*//***************************************************************************/ +static __inline__ void E500_MemoryBarrier(void) +{ +#ifdef CORE_E500MC + __asm__ ("mbar 1"); +#else + /**** ERRATA WORK AROUND START ****/ + /* ERRATA num: CPU1 */ + /* Description: "mbar MO = 1" instruction fails to order caching-inhibited + guarded loads and stores. */ + + /* "msync" instruction is used instead */ + + __asm__ ("msync"); + + /**** ERRATA WORK AROUND END ****/ +#endif +} + +/**************************************************************************//** + @Function E500_InstructionSync + + @Description This routine will cause the core to wait for previous instructions + (including any interrupts they generate) to complete before the + synchronization command executes, which purges all instructions + from the processor's pipeline and refetches the next instruction. + + @Return None. +*//***************************************************************************/ +static __inline__ void E500_InstructionSync(void) +{ + __asm__ ("isync"); +} + + +/** @} */ /* end of E500_init_grp group */ +/** @} */ /* end of E500_grp group */ + + +#endif /* __E500V2_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/cores/ppc_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/cores/ppc_ext.h @@ -0,0 +1,130 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File ppc_ext.h + + @Description Core API for PowerPC cores + + These routines must be implemented by each specific PowerPC + core driver. +*//***************************************************************************/ +#ifndef __PPC_EXT_H +#define __PPC_EXT_H + +#include "part_ext.h" + + +#define CORE_IS_BIG_ENDIAN + + +/**************************************************************************//** + @Function CORE_TestAndSet + + @Description This routine tries to atomically test-and-set an integer + in memory to a non-zero value. + + The memory will be set only if it is tested as zero, in which + case the routine returns the new non-zero value; otherwise the + routine returns zero. + + @Param[in] p - pointer to a volatile int in memory, on which test-and-set + operation should be made. + + @Retval Zero - Operation failed - memory was already set. + @Retval Non-zero - Operation succeeded - memory has been set. +*//***************************************************************************/ +int CORE_TestAndSet(volatile int *p); + +/**************************************************************************//** + @Function CORE_InstructionSync + + @Description This routine will cause the core to wait for previous instructions + (including any interrupts they generate) to complete before the + synchronization command executes, which purges all instructions + from the processor's pipeline and refetches the next instruction. + + @Return None. +*//***************************************************************************/ +void CORE_InstructionSync(void); + +/**************************************************************************//** + @Function CORE_DCacheEnable + + @Description Enables the data cache for memory pages that are + not cache inhibited. + + @Return None. +*//***************************************************************************/ +void CORE_DCacheEnable(void); + +/**************************************************************************//** + @Function CORE_ICacheEnable + + @Description Enables the instruction cache for memory pages that are + not cache inhibited. + + @Return None. +*//***************************************************************************/ +void CORE_ICacheEnable(void); + +/**************************************************************************//** + @Function CORE_DCacheDisable + + @Description Disables the data cache. + + @Return None. +*//***************************************************************************/ +void CORE_DCacheDisable(void); + +/**************************************************************************//** + @Function CORE_ICacheDisable + + @Description Disables the instruction cache. + + @Return None. +*//***************************************************************************/ +void CORE_ICacheDisable(void); + + + + +#if defined(CORE_E300) +#include "e300_ext.h" +#elif defined(CORE_E500V2) || defined(CORE_E500MC) +#include "e500v2_ext.h" +#else +#error "Core not defined!" +#endif + + +#endif /* __PPC_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/ctype_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/ctype_ext.h @@ -0,0 +1,93 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CTYPE_EXT_H +#define __CTYPE_EXT_H + + +#if defined(NCSW_LINUX) && defined(__KERNEL__) +/* + * NOTE! This ctype does not handle EOF like the standard C + * library is required to. + */ + +#define _U 0x01 /* upper */ +#define _L 0x02 /* lower */ +#define _D 0x04 /* digit */ +#define _C 0x08 /* cntrl */ +#define _P 0x10 /* punct */ +#define _S 0x20 /* white space (space/lf/tab) */ +#define _X 0x40 /* hex digit */ +#define _SP 0x80 /* hard space (0x20) */ + +extern unsigned char _ctype[]; + +#define __ismask(x) (_ctype[(int)(unsigned char)(x)]) + +#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) +#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) +#define iscntrl(c) ((__ismask(c)&(_C)) != 0) +#define isdigit(c) ((__ismask(c)&(_D)) != 0) +#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) +#define islower(c) ((__ismask(c)&(_L)) != 0) +#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) +#define ispunct(c) ((__ismask(c)&(_P)) != 0) +#define isspace(c) ((__ismask(c)&(_S)) != 0) +#define isupper(c) ((__ismask(c)&(_U)) != 0) +#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) + +#define isascii(c) (((unsigned char)(c))<=0x7f) +#define toascii(c) (((unsigned char)(c))&0x7f) + +static __inline__ unsigned char __tolower(unsigned char c) +{ + if (isupper(c)) + c -= 'A'-'a'; + return c; +} + +static __inline__ unsigned char __toupper(unsigned char c) +{ + if (islower(c)) + c -= 'a'-'A'; + return c; +} + +#define tolower(c) __tolower(c) +#define toupper(c) __toupper(c) + +#else +#include +#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */ + + +#endif /* __CTYPE_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/debug_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/debug_ext.h @@ -0,0 +1,259 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File debug_ext.h + + @Description Debug mode definitions. +*//***************************************************************************/ + +#ifndef __DEBUG_EXT_H +#define __DEBUG_EXT_H + +#include "std_ext.h" +#include "xx_ext.h" +#include "memcpy_ext.h" +#if (DEBUG_ERRORS > 0) +#include "sprint_ext.h" +#include "string_ext.h" +#endif /* DEBUG_ERRORS > 0 */ + + +#if (DEBUG_ERRORS > 0) + +/* Internally used macros */ + +#define DUMP_Print XX_Print +#define DUMP_MAX_LEVELS 6 +#define DUMP_MAX_STR 64 + + +#define _CREATE_DUMP_SUBSTR(phrase) \ + dumpTmpLevel = 0; dumpSubStr[0] = '\0'; \ + sprintf(dumpTmpStr, "%s", #phrase); \ + p_DumpToken = strtok(dumpTmpStr, (dumpIsArr[0] ? "[" : ".")); \ + while (p_DumpToken != NULL) \ + { \ + strcat(dumpSubStr, p_DumpToken); \ + if (dumpIsArr[dumpTmpLevel]) \ + { \ + strcat(dumpSubStr, dumpIdxStr[dumpTmpLevel]); \ + p_DumpToken = strtok(NULL, "."); \ + } \ + if ((p_DumpToken = strtok(NULL, (dumpIsArr[++dumpTmpLevel] ? "[" : "."))) != 0) \ + strcat(dumpSubStr, "."); \ + }\ + + +/**************************************************************************//** + @Group gen_id General Drivers Utilities + + @Description External routines. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group dump_id Memory and Registers Dump Mechanism + + @Description Macros for dumping memory mapped structures. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description Declaration of dump mechanism variables. + + This macro must be declared at the beginning of each routine + which uses the dump mechanism macros, before the routine's code + starts. +*//***************************************************************************/ +#define DECLARE_DUMP \ + char dumpIdxStr[DUMP_MAX_LEVELS + 1][6] = { "", }; \ + char dumpSubStr[DUMP_MAX_STR] = ""; \ + char dumpTmpStr[DUMP_MAX_STR] = ""; \ + char *p_DumpToken = NULL; \ + int dumpArrIdx = 0, dumpArrSize = 0, dumpVarSize = 0, dumpLevel = 0, dumpTmpLevel = 0; \ + uint8_t dumpIsArr[DUMP_MAX_LEVELS + 1] = { 0 }; \ + /* Prevent warnings if not all used */ \ + UNUSED(dumpIdxStr[0][0]); \ + UNUSED(dumpSubStr[0]); \ + UNUSED(dumpTmpStr[0]); \ + UNUSED(p_DumpToken); \ + UNUSED(dumpArrIdx); \ + UNUSED(dumpArrSize); \ + UNUSED(dumpVarSize); \ + UNUSED(dumpLevel); \ + UNUSED(dumpTmpLevel); \ + UNUSED(dumpIsArr[0]); + + +/**************************************************************************//** + @Description Prints a title for a subsequent dumped structure or memory. + + The inputs for this macro are the structure/memory title and + its base addresses. +*//***************************************************************************/ +#define DUMP_TITLE(addr, msg) \ + DUMP_Print("\r\n"); DUMP_Print msg; \ + DUMP_Print(" (0x%p)\r\n" \ + "---------------------------------------------------------\r\n", \ + (addr)) + +/**************************************************************************//** + @Description Prints a subtitle for a subsequent dumped sub-structure (optional). + + The inputs for this macro are the sub-structure subtitle. + A separating line with this subtitle will be printed. +*//***************************************************************************/ +#define DUMP_SUBTITLE(subtitle) \ + DUMP_Print("----------- "); DUMP_Print subtitle; DUMP_Print("\r\n") + + +/**************************************************************************//** + @Description Dumps a memory region in 4-bytes aligned format. + + The inputs for this macro are the base addresses and size + (in bytes) of the memory region. +*//***************************************************************************/ +#define DUMP_MEMORY(addr, size) \ + MemDisp((uint8_t *)(addr), (int)(size)) + + +/**************************************************************************//** + @Description Declares a dump loop, for dumping a sub-structure array. + + The inputs for this macro are: + - idx: an index variable, for indexing the sub-structure items + inside the loop. This variable must be declared separately + in the beginning of the routine. + - cnt: the number of times to repeat the loop. This number should + equal the number of items in the sub-structures array. + + Note, that the body of the loop must be written inside brackets. +*//***************************************************************************/ +#define DUMP_SUBSTRUCT_ARRAY(idx, cnt) \ + for (idx=0, dumpIsArr[dumpLevel++] = 1; \ + (idx < cnt) && sprintf(dumpIdxStr[dumpLevel-1], "[%d]", idx); \ + idx++, ((idx < cnt) || ((dumpIsArr[--dumpLevel] = 0) == 0))) + + +/**************************************************************************//** + @Description Dumps a structure's member variable. + + The input for this macro is the full reference for the member + variable, where the structure is referenced using a pointer. + + Note, that a members array must be dumped using DUMP_ARR macro, + rather than using this macro. + + If the member variable is part of a sub-structure hierarchy, + the full hierarchy (including array indexing) must be specified. + + Examples: p_Struct->member + p_Struct->sub.member + p_Struct->sub[i].member +*//***************************************************************************/ +#define DUMP_VAR(st, phrase) \ + do { \ + void *addr = (void *)&((st)->phrase); \ + _CREATE_DUMP_SUBSTR(phrase); \ + dumpVarSize = sizeof((st)->phrase); \ + switch (dumpVarSize) \ + { \ + case 1: DUMP_Print("0x%08X: 0x%02x%14s\t%s\r\n", \ + addr, GET_UINT8(*(uint8_t*)addr), "", dumpSubStr); break; \ + case 2: DUMP_Print("0x%08X: 0x%04x%12s\t%s\r\n", \ + addr, GET_UINT16(*(uint16_t*)addr), "", dumpSubStr); break; \ + case 4: DUMP_Print("0x%08X: 0x%08x%8s\t%s\r\n", \ + addr, GET_UINT32(*(uint32_t*)addr), "", dumpSubStr); break; \ + case 8: DUMP_Print("0x%08X: 0x%016llx\t%s\r\n", \ + addr, GET_UINT64(*(uint64_t*)addr), dumpSubStr); break; \ + default: DUMP_Print("Bad size %d (" #st "->" #phrase ")\r\n", dumpVarSize); \ + } \ + } while (0) + + +/**************************************************************************//** + @Description Dumps a structure's members array. + + The input for this macro is the full reference for the members + array, where the structure is referenced using a pointer. + + If the members array is part of a sub-structure hierarchy, + the full hierarchy (including array indexing) must be specified. + + Examples: p_Struct->array + p_Struct->sub.array + p_Struct->sub[i].array +*//***************************************************************************/ +#define DUMP_ARR(st, phrase) \ + do { \ + _CREATE_DUMP_SUBSTR(phrase); \ + dumpArrSize = ARRAY_SIZE((st)->phrase); \ + dumpVarSize = sizeof((st)->phrase[0]); \ + switch (dumpVarSize) \ + { \ + case 1: \ + for (dumpArrIdx=0; dumpArrIdx < dumpArrSize; dumpArrIdx++) { \ + DUMP_Print("0x%08X: 0x%02x%14s\t%s[%d]\r\n", \ + &((st)->phrase[dumpArrIdx]), GET_UINT8((st)->phrase[dumpArrIdx]), "", dumpSubStr, dumpArrIdx); \ + } break; \ + case 2: \ + for (dumpArrIdx=0; dumpArrIdx < dumpArrSize; dumpArrIdx++) { \ + DUMP_Print("0x%08X: 0x%04x%12s\t%s[%d]\r\n", \ + &((st)->phrase[dumpArrIdx]), GET_UINT16((st)->phrase[dumpArrIdx]), "", dumpSubStr, dumpArrIdx); \ + } break; \ + case 4: \ + for (dumpArrIdx=0; dumpArrIdx < dumpArrSize; dumpArrIdx++) { \ + DUMP_Print("0x%08X: 0x%08x%8s\t%s[%d]\r\n", \ + &((st)->phrase[dumpArrIdx]), GET_UINT32((st)->phrase[dumpArrIdx]), "", dumpSubStr, dumpArrIdx); \ + } break; \ + case 8: \ + for (dumpArrIdx=0; dumpArrIdx < dumpArrSize; dumpArrIdx++) { \ + DUMP_Print("0x%08X: 0x%016llx\t%s[%d]\r\n", \ + &((st)->phrase[dumpArrIdx]), GET_UINT64((st)->phrase[dumpArrIdx]), dumpSubStr, dumpArrIdx); \ + } break; \ + default: DUMP_Print("Bad size %d (" #st "->" #phrase "[0])\r\n", dumpVarSize); \ + } \ + } while (0) + + +#endif /* DEBUG_ERRORS > 0 */ + + +/** @} */ /* end of dump_id group */ +/** @} */ /* end of gen_id group */ + + +#endif /* __DEBUG_EXT_H */ + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/endian_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/endian_ext.h @@ -0,0 +1,446 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + + @File endian_ext.h + + @Description Big/little endian swapping routines. +*//***************************************************************************/ + +#ifndef __ENDIAN_EXT_H +#define __ENDIAN_EXT_H + +#include "std_ext.h" + + +/**************************************************************************//** + @Group gen_id General Drivers Utilities + + @Description General usage API. This API is intended for usage by both the + internal modules and the user's application. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group endian_id Big/Little-Endian Conversion + + @Description Routines and macros for Big/Little-Endian conversion and + general byte swapping. + + All routines and macros are expecting unsigned values as + parameters, but will generate the correct result also for + signed values. Therefore, signed/unsigned casting is allowed. + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Collection Byte-Swap Macros + + Macros for swapping byte order. + + @Cautions The parameters of these macros are evaluated multiple times. + For calculated expressions or expressions that contain function + calls it is recommended to use the byte-swap routines. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description Swaps the byte order of a given 16-bit value. + + @Param[in] val - The 16-bit value to swap. + + @Return The byte-swapped value.. + + @Cautions The given value is evaluated multiple times by this macro. + For calculated expressions or expressions that contain function + calls it is recommended to use the SwapUint16() routine. + + @hideinitializer +*//***************************************************************************/ +#define SWAP_UINT16(val) \ + ((uint16_t)((((val) & 0x00FF) << 8) | (((val) & 0xFF00) >> 8))) + +/**************************************************************************//** + @Description Swaps the byte order of a given 32-bit value. + + @Param[in] val - The 32-bit value to swap. + + @Return The byte-swapped value.. + + @Cautions The given value is evaluated multiple times by this macro. + For calculated expressions or expressions that contain function + calls it is recommended to use the SwapUint32() routine. + + @hideinitializer +*//***************************************************************************/ +#define SWAP_UINT32(val) \ + ((uint32_t)((((val) & 0x000000FF) << 24) | \ + (((val) & 0x0000FF00) << 8) | \ + (((val) & 0x00FF0000) >> 8) | \ + (((val) & 0xFF000000) >> 24))) + +/**************************************************************************//** + @Description Swaps the byte order of a given 64-bit value. + + @Param[in] val - The 64-bit value to swap. + + @Return The byte-swapped value.. + + @Cautions The given value is evaluated multiple times by this macro. + For calculated expressions or expressions that contain function + calls it is recommended to use the SwapUint64() routine. + + @hideinitializer +*//***************************************************************************/ +#define SWAP_UINT64(val) \ + ((uint64_t)((((val) & 0x00000000000000FFULL) << 56) | \ + (((val) & 0x000000000000FF00ULL) << 40) | \ + (((val) & 0x0000000000FF0000ULL) << 24) | \ + (((val) & 0x00000000FF000000ULL) << 8) | \ + (((val) & 0x000000FF00000000ULL) >> 8) | \ + (((val) & 0x0000FF0000000000ULL) >> 24) | \ + (((val) & 0x00FF000000000000ULL) >> 40) | \ + (((val) & 0xFF00000000000000ULL) >> 56))) + +/* @} */ + +/**************************************************************************//** + @Collection Byte-Swap Routines + + Routines for swapping the byte order of a given parameter and + returning the swapped value. + + These inline routines are safer than the byte-swap macros, + because they evaluate the parameter expression only once. + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function SwapUint16 + + @Description Returns the byte-swapped value of a given 16-bit value. + + @Param[in] val - The 16-bit value. + + @Return The byte-swapped value of the parameter. +*//***************************************************************************/ +static __inline__ uint16_t SwapUint16(uint16_t val) +{ + return (uint16_t)(((val & 0x00FF) << 8) | + ((val & 0xFF00) >> 8)); +} + +/**************************************************************************//** + @Function SwapUint32 + + @Description Returns the byte-swapped value of a given 32-bit value. + + @Param[in] val - The 32-bit value. + + @Return The byte-swapped value of the parameter. +*//***************************************************************************/ +static __inline__ uint32_t SwapUint32(uint32_t val) +{ + return (uint32_t)(((val & 0x000000FF) << 24) | + ((val & 0x0000FF00) << 8) | + ((val & 0x00FF0000) >> 8) | + ((val & 0xFF000000) >> 24)); +} + +/**************************************************************************//** + @Function SwapUint64 + + @Description Returns the byte-swapped value of a given 64-bit value. + + @Param[in] val - The 64-bit value. + + @Return The byte-swapped value of the parameter. +*//***************************************************************************/ +static __inline__ uint64_t SwapUint64(uint64_t val) +{ + return (uint64_t)(((val & 0x00000000000000FFULL) << 56) | + ((val & 0x000000000000FF00ULL) << 40) | + ((val & 0x0000000000FF0000ULL) << 24) | + ((val & 0x00000000FF000000ULL) << 8) | + ((val & 0x000000FF00000000ULL) >> 8) | + ((val & 0x0000FF0000000000ULL) >> 24) | + ((val & 0x00FF000000000000ULL) >> 40) | + ((val & 0xFF00000000000000ULL) >> 56)); +} + +/* @} */ + +/**************************************************************************//** + @Collection In-place Byte-Swap-And-Set Routines + + Routines for swapping the byte order of a given variable and + setting the swapped value back to the same variable. + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function SwapUint16P + + @Description Swaps the byte order of a given 16-bit variable. + + @Param[in] p_Val - Pointer to the 16-bit variable. + + @Return None. +*//***************************************************************************/ +static __inline__ void SwapUint16P(uint16_t *p_Val) +{ + *p_Val = SwapUint16(*p_Val); +} + +/**************************************************************************//** + @Function SwapUint32P + + @Description Swaps the byte order of a given 32-bit variable. + + @Param[in] p_Val - Pointer to the 32-bit variable. + + @Return None. +*//***************************************************************************/ +static __inline__ void SwapUint32P(uint32_t *p_Val) +{ + *p_Val = SwapUint32(*p_Val); +} + +/**************************************************************************//** + @Function SwapUint64P + + @Description Swaps the byte order of a given 64-bit variable. + + @Param[in] p_Val - Pointer to the 64-bit variable. + + @Return None. +*//***************************************************************************/ +static __inline__ void SwapUint64P(uint64_t *p_Val) +{ + *p_Val = SwapUint64(*p_Val); +} + +/* @} */ + + +/**************************************************************************//** + @Collection Little-Endian Conversion Macros + + These macros convert given parameters to or from Little-Endian + format. Use these macros when you want to read or write a specific + Little-Endian value in memory, without a-priori knowing the CPU + byte order. + + These macros use the byte-swap routines. For conversion of + constants in initialization structures, you may use the CONST + versions of these macros (see below), which are using the + byte-swap macros instead. + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description Converts a given 16-bit value from CPU byte order to + Little-Endian byte order. + + @Param[in] val - The 16-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CPU_TO_LE16(val) SwapUint16(val) + +/**************************************************************************//** + @Description Converts a given 32-bit value from CPU byte order to + Little-Endian byte order. + + @Param[in] val - The 32-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CPU_TO_LE32(val) SwapUint32(val) + +/**************************************************************************//** + @Description Converts a given 64-bit value from CPU byte order to + Little-Endian byte order. + + @Param[in] val - The 64-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CPU_TO_LE64(val) SwapUint64(val) + + +/**************************************************************************//** + @Description Converts a given 16-bit value from Little-Endian byte order to + CPU byte order. + + @Param[in] val - The 16-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define LE16_TO_CPU(val) CPU_TO_LE16(val) + +/**************************************************************************//** + @Description Converts a given 32-bit value from Little-Endian byte order to + CPU byte order. + + @Param[in] val - The 32-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define LE32_TO_CPU(val) CPU_TO_LE32(val) + +/**************************************************************************//** + @Description Converts a given 64-bit value from Little-Endian byte order to + CPU byte order. + + @Param[in] val - The 64-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define LE64_TO_CPU(val) CPU_TO_LE64(val) + +/* @} */ + +/**************************************************************************//** + @Collection Little-Endian Constant Conversion Macros + + These macros convert given constants to or from Little-Endian + format. Use these macros when you want to read or write a specific + Little-Endian constant in memory, without a-priori knowing the + CPU byte order. + + These macros use the byte-swap macros, therefore can be used for + conversion of constants in initialization structures. + + @Cautions The parameters of these macros are evaluated multiple times. + For non-constant expressions, use the non-CONST macro versions. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description Converts a given 16-bit constant from CPU byte order to + Little-Endian byte order. + + @Param[in] val - The 16-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CONST_CPU_TO_LE16(val) SWAP_UINT16(val) + +/**************************************************************************//** + @Description Converts a given 32-bit constant from CPU byte order to + Little-Endian byte order. + + @Param[in] val - The 32-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CONST_CPU_TO_LE32(val) SWAP_UINT32(val) + +/**************************************************************************//** + @Description Converts a given 64-bit constant from CPU byte order to + Little-Endian byte order. + + @Param[in] val - The 64-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CONST_CPU_TO_LE64(val) SWAP_UINT64(val) + + +/**************************************************************************//** + @Description Converts a given 16-bit constant from Little-Endian byte order + to CPU byte order. + + @Param[in] val - The 16-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CONST_LE16_TO_CPU(val) CONST_CPU_TO_LE16(val) + +/**************************************************************************//** + @Description Converts a given 32-bit constant from Little-Endian byte order + to CPU byte order. + + @Param[in] val - The 32-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CONST_LE32_TO_CPU(val) CONST_CPU_TO_LE32(val) + +/**************************************************************************//** + @Description Converts a given 64-bit constant from Little-Endian byte order + to CPU byte order. + + @Param[in] val - The 64-bit value to convert. + + @Return The converted value. + + @hideinitializer +*//***************************************************************************/ +#define CONST_LE64_TO_CPU(val) CONST_CPU_TO_LE64(val) + +/* @} */ + + +/** @} */ /* end of endian_id group */ +/** @} */ /* end of gen_id group */ + + +#endif /* __ENDIAN_EXT_H */ + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/enet_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/enet_ext.h @@ -0,0 +1,154 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File enet_ext.h + + @Description Ethernet generic definitions and enums. +*//***************************************************************************/ + +#ifndef __ENET_EXT_H +#define __ENET_EXT_H + + +#define ENET_NUM_OCTETS_PER_ADDRESS 6 /**< Number of octets (8-bit bytes) in an ethernet address */ +#define ENET_GROUP_ADDR 0x01 /**< Group address mask for ethernet addresses */ + + +/**************************************************************************//** + @Description Ethernet Address +*//***************************************************************************/ +typedef uint8_t t_EnetAddr[ENET_NUM_OCTETS_PER_ADDRESS]; + +/**************************************************************************//** + @Description Ethernet Address Type. +*//***************************************************************************/ +typedef enum e_EnetAddrType +{ + e_ENET_ADDR_TYPE_INDIVIDUAL, /**< Individual (unicast) address */ + e_ENET_ADDR_TYPE_GROUP, /**< Group (multicast) address */ + e_ENET_ADDR_TYPE_BROADCAST /**< Broadcast address */ +} e_EnetAddrType; + + +/**************************************************************************//** + @Description Ethernet MAC-PHY Interface +*//***************************************************************************/ +typedef enum e_EnetInterface +{ + e_ENET_IF_MII = 0x00010000, /**< MII interface */ + e_ENET_IF_RMII = 0x00020000, /**< RMII interface */ + e_ENET_IF_SMII = 0x00030000, /**< SMII interface */ + e_ENET_IF_GMII = 0x00040000, /**< GMII interface */ + e_ENET_IF_RGMII = 0x00050000, /**< RGMII interface */ + e_ENET_IF_TBI = 0x00060000, /**< TBI interface */ + e_ENET_IF_RTBI = 0x00070000, /**< RTBI interface */ + e_ENET_IF_SGMII = 0x00080000, /**< SGMII interface */ + e_ENET_IF_XGMII = 0x00090000, /**< XGMII interface */ + e_ENET_IF_QSGMII= 0x000a0000 /**< QSGMII interface */ +} e_EnetInterface; + +/**************************************************************************//** + @Description Ethernet Duplex Mode +*//***************************************************************************/ +typedef enum e_EnetDuplexMode +{ + e_ENET_HALF_DUPLEX, /**< Half-Duplex mode */ + e_ENET_FULL_DUPLEX /**< Full-Duplex mode */ +} e_EnetDuplexMode; + +/**************************************************************************//** + @Description Ethernet Speed (nominal data rate) +*//***************************************************************************/ +typedef enum e_EnetSpeed +{ + e_ENET_SPEED_10 = 10, /**< 10 Mbps */ + e_ENET_SPEED_100 = 100, /**< 100 Mbps */ + e_ENET_SPEED_1000 = 1000, /**< 1000 Mbps = 1 Gbps */ + e_ENET_SPEED_10000 = 10000 /**< 10000 Mbps = 10 Gbps */ +} e_EnetSpeed; + +/**************************************************************************//** + @Description Ethernet mode (combination of MAC-PHY interface and speed) +*//***************************************************************************/ +typedef enum e_EnetMode +{ + e_ENET_MODE_INVALID = 0, /**< Invalid Ethernet mode */ + e_ENET_MODE_MII_10 = (e_ENET_IF_MII | e_ENET_SPEED_10), /**< 10 Mbps MII */ + e_ENET_MODE_MII_100 = (e_ENET_IF_MII | e_ENET_SPEED_100), /**< 100 Mbps MII */ + e_ENET_MODE_RMII_10 = (e_ENET_IF_RMII | e_ENET_SPEED_10), /**< 10 Mbps RMII */ + e_ENET_MODE_RMII_100 = (e_ENET_IF_RMII | e_ENET_SPEED_100), /**< 100 Mbps RMII */ + e_ENET_MODE_SMII_10 = (e_ENET_IF_SMII | e_ENET_SPEED_10), /**< 10 Mbps SMII */ + e_ENET_MODE_SMII_100 = (e_ENET_IF_SMII | e_ENET_SPEED_100), /**< 100 Mbps SMII */ + e_ENET_MODE_GMII_1000 = (e_ENET_IF_GMII | e_ENET_SPEED_1000), /**< 1000 Mbps GMII */ + e_ENET_MODE_RGMII_10 = (e_ENET_IF_RGMII | e_ENET_SPEED_10), /**< 10 Mbps RGMII */ + e_ENET_MODE_RGMII_100 = (e_ENET_IF_RGMII | e_ENET_SPEED_100), /**< 100 Mbps RGMII */ + e_ENET_MODE_RGMII_1000 = (e_ENET_IF_RGMII | e_ENET_SPEED_1000), /**< 1000 Mbps RGMII */ + e_ENET_MODE_TBI_1000 = (e_ENET_IF_TBI | e_ENET_SPEED_1000), /**< 1000 Mbps TBI */ + e_ENET_MODE_RTBI_1000 = (e_ENET_IF_RTBI | e_ENET_SPEED_1000), /**< 1000 Mbps RTBI */ + e_ENET_MODE_SGMII_10 = (e_ENET_IF_SGMII | e_ENET_SPEED_10), /**< 10 Mbps SGMII */ + e_ENET_MODE_SGMII_100 = (e_ENET_IF_SGMII | e_ENET_SPEED_100), /**< 100 Mbps SGMII */ + e_ENET_MODE_SGMII_1000 = (e_ENET_IF_SGMII | e_ENET_SPEED_1000), /**< 1000 Mbps SGMII */ + e_ENET_MODE_XGMII_10000 = (e_ENET_IF_XGMII | e_ENET_SPEED_10000), /**< 10000 Mbps XGMII */ + e_ENET_MODE_QSGMII_1000 = (e_ENET_IF_QSGMII| e_ENET_SPEED_1000) /**< 1000 Mbps QSGMII */ +} e_EnetMode; + + +#define IS_ENET_MODE_VALID(mode) \ + (((mode) == e_ENET_MODE_MII_10 ) || \ + ((mode) == e_ENET_MODE_MII_100 ) || \ + ((mode) == e_ENET_MODE_RMII_10 ) || \ + ((mode) == e_ENET_MODE_RMII_100 ) || \ + ((mode) == e_ENET_MODE_SMII_10 ) || \ + ((mode) == e_ENET_MODE_SMII_100 ) || \ + ((mode) == e_ENET_MODE_GMII_1000 ) || \ + ((mode) == e_ENET_MODE_RGMII_10 ) || \ + ((mode) == e_ENET_MODE_RGMII_100 ) || \ + ((mode) == e_ENET_MODE_RGMII_1000 ) || \ + ((mode) == e_ENET_MODE_TBI_1000 ) || \ + ((mode) == e_ENET_MODE_RTBI_1000 ) || \ + ((mode) == e_ENET_MODE_SGMII_10 ) || \ + ((mode) == e_ENET_MODE_SGMII_100 ) || \ + ((mode) == e_ENET_MODE_SGMII_1000 ) || \ + ((mode) == e_ENET_MODE_XGMII_10000) || \ + ((mode) == e_ENET_MODE_QSGMII_1000)) + + +#define MAKE_ENET_MODE(_interface, _speed) (e_EnetMode)((_interface) | (_speed)) + +#define ENET_INTERFACE_FROM_MODE(mode) (e_EnetInterface)((mode) & 0xFFFF0000) +#define ENET_SPEED_FROM_MODE(mode) (e_EnetSpeed)((mode) & 0x0000FFFF) + + + +#endif /* __ENET_EXT_H */ + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/error_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/error_ext.h @@ -0,0 +1,553 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + @File error_ext.h + + @Description Error definitions. +*//***************************************************************************/ + +#ifndef __ERROR_EXT_H +#define __ERROR_EXT_H + +#include "std_ext.h" +#include "xx_ext.h" +#include "core_ext.h" + +/**************************************************************************//** + @Group gen_id General Drivers Utilities + + @Description External routines. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group gen_error_id Errors, Events and Debug + + @Description External routines. + + @{ +*//***************************************************************************/ + +/****************************************************************************** +The scheme below provides the bits description for error codes: + + 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 +| Reserved (should be zero) | Module ID | + + 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 +| Error Type | +******************************************************************************/ + +#define ERROR_CODE(_err) ((((uint32_t)_err) & 0x0000FFFF) | __ERR_MODULE__) + +#define GET_ERROR_TYPE(_errcode) ((_errcode) & 0x0000FFFF) + /**< Extract module code from error code (#t_Error) */ + +#define GET_ERROR_MODULE(_errcode) ((_errcode) & 0x00FF0000) + /**< Extract error type (#e_ErrorType) from + error code (#t_Error) */ + + +/**************************************************************************//** + @Description Error Type Enumeration +*//***************************************************************************/ +typedef enum e_ErrorType /* Comments / Associated Message Strings */ +{ /* ------------------------------------------------------------ */ + E_OK = 0 /* Never use "RETURN_ERROR" with E_OK; Use "return E_OK;" */ + + /* Invalid Function Calls */ + ,E_INVALID_STATE /**< The operation is not allowed in current module state. */ + /* String: none. */ + ,E_INVALID_OPERATION /**< The operation/command is invalid (unrecognized). */ + /* String: none. */ + ,E_NOT_SUPPORTED /**< The function is not supported or not implemented. */ + /* String: none. */ + ,E_NO_DEVICE /**< The associated device is not initialized. */ + /* String: none. */ + + /* Invalid Parameters */ + ,E_INVALID_HANDLE /**< Invalid handle of module or object. */ + /* String: none, unless the function takes in more than one + handle (in this case add the handle description) */ + ,E_INVALID_ID /**< Invalid module ID (usually enumeration or index). */ + /* String: none, unless the function takes in more than one + ID (in this case add the ID description) */ + ,E_NULL_POINTER /**< Unexpected NULL pointer. */ + /* String: pointer description. */ + ,E_INVALID_VALUE /**< Invalid value. */ + /* Use for non-enumeration parameters, and + only when other error types are not suitable. + String: parameter description + "(should be )", + e.g: "Maximum Rx buffer length (should be divisible by 8)", + "Channel number (should be even)". */ + ,E_INVALID_SELECTION /**< Invalid selection or mode. */ + /* Use for enumeration values, only when other error types + are not suitable. + String: parameter description. */ + ,E_INVALID_COMM_MODE /**< Invalid communication mode. */ + /* String: none, unless the function takes in more than one + communication mode indications (in this case add + parameter description). */ + ,E_INVALID_BYTE_ORDER /**< Invalid byte order. */ + /* String: none, unless the function takes in more than one + byte order indications (in this case add parameter + description). */ + ,E_INVALID_MEMORY_TYPE /**< Invalid memory type. */ + /* String: none, unless the function takes in more than one + memory types (in this case add memory description, + e.g: "Data memory", "Buffer descriptors memory"). */ + ,E_INVALID_INTR_QUEUE /**< Invalid interrupt queue. */ + /* String: none, unless the function takes in more than one + interrupt queues (in this case add queue description, + e.g: "Rx interrupt queue", "Tx interrupt queue"). */ + ,E_INVALID_PRIORITY /**< Invalid priority. */ + /* String: none, unless the function takes in more than one + priority (in this case add priority description). */ + ,E_INVALID_CLOCK /**< Invalid clock. */ + /* String: none, unless the function takes in more than one + clocks (in this case add clock description, + e.g: "Rx clock", "Tx clock"). */ + ,E_INVALID_RATE /**< Invalid rate value. */ + /* String: none, unless the function takes in more than one + rate values (in this case add rate description). */ + ,E_INVALID_ADDRESS /**< Invalid address. */ + /* String: description of the specific violation. */ + ,E_INVALID_BUS /**< Invalid bus type. */ + /* String: none, unless the function takes in more than one + bus parameters (in this case add bus description). */ + ,E_BUS_CONFLICT /**< Bus (or memory) type conflicts with another setting. */ + /* String: description of the conflicting buses/memories. */ + ,E_CONFLICT /**< Some setting conflicts with another setting. */ + /* String: description of the conflicting settings. */ + ,E_NOT_ALIGNED /**< Non-aligned address. */ + /* String: parameter description + "(should be %d-bytes aligned)", + e.g: "Rx data buffer (should be 32-bytes aligned)". */ + ,E_NOT_IN_RANGE /**< Parameter value is out of range. */ + /* Don't use this error for enumeration parameters. + String: parameter description + "(should be %d-%d)", + e.g: "Number of pad characters (should be 0-15)". */ + + /* Frame/Buffer Errors */ + ,E_INVALID_FRAME /**< Invalid frame object (NULL handle or missing buffers). */ + /* String: none. */ + ,E_EMPTY_FRAME /**< Frame object is empty (has no buffers). */ + /* String: none. */ + ,E_EMPTY_BUFFER /**< Buffer object is empty (no data, or zero data length). */ + /* String: none. */ + + /* Resource Errors */ + ,E_NO_MEMORY /**< External memory allocation failed. */ + /* String: description of item for which allocation failed. */ + ,E_NOT_FOUND /**< Requested resource or item was not found. */ + /* Use only when the resource/item is uniquely identified. + String: none, unless the operation is not the main goal + of the function (in this case add item description). */ + ,E_NOT_AVAILABLE /**< Resource is unavailable. */ + /* String: none, unless the operation is not the main goal + of the function (in this case add resource description). */ + ,E_ALREADY_EXISTS /**< Requested resource or item already exists. */ + /* Use when resource duplication or sharing are not allowed. + String: none, unless the operation is not the main goal + of the function (in this case add item description). */ + ,E_FULL /**< Resource is full. */ + /* String: none, unless the operation is not the main goal + of the function (in this case add resource description). */ + ,E_EMPTY /**< Resource is empty. */ + /* String: none, unless the operation is not the main goal + of the function (in this case add resource description). */ + ,E_BUSY /**< Resource or module is busy. */ + /* String: none, unless the operation is not the main goal + of the function (in this case add resource description). */ + ,E_ALREADY_FREE /**< Specified resource or item is already free or deleted. */ + /* String: none, unless the operation is not the main goal + of the function (in this case add item description). */ + + /* Read/Write Access Errors */ + ,E_READ_FAILED /**< Read access failed on memory/device. */ + /* String: none, or device name. */ + ,E_WRITE_FAILED /**< Write access failed on memory/device. */ + /* String: none, or device name. */ + + /* Send/Receive Failures */ + ,E_SEND_FAILED /**< Send operation failed on device. */ + /* String: none, or device name. */ + ,E_RECEIVE_FAILED /**< Receive operation failed on device. */ + /* String: none, or device name. */ + + /* Operation time-out */ + ,E_TIMEOUT /**< The operation timed out. */ + /* String: none. */ + + ,E_DUMMY_LAST /* NEVER USED */ + +} e_ErrorType; + + +/**************************************************************************//** + @Description Event Type Enumeration +*//***************************************************************************/ +typedef enum e_Event /* Comments / Associated Flags and Message Strings */ +{ /* ------------------------------------------------------------ */ + EV_NO_EVENT = 0 /**< No event; Never used. */ + + ,EV_RX_DISCARD /**< Received packet discarded (by the driver, and only for + complete packets); + Flags: error flags in case of error, zero otherwise. */ + /* String: reason for discard, e.g: "Error in frame", + "Disordered frame", "Incomplete frame", "No frame object". */ + ,EV_RX_ERROR /**< Receive error (by hardware/firmware); + Flags: usually status flags from the buffer descriptor. */ + /* String: none. */ + ,EV_TX_ERROR /**< Transmit error (by hardware/firmware); + Flags: usually status flags from the buffer descriptor. */ + /* String: none. */ + ,EV_NO_BUFFERS /**< System ran out of buffer objects; + Flags: zero. */ + /* String: none. */ + ,EV_NO_MB_FRAMES /**< System ran out of multi-buffer frame objects; + Flags: zero. */ + /* String: none. */ + ,EV_NO_SB_FRAMES /**< System ran out of single-buffer frame objects; + Flags: zero. */ + /* String: none. */ + ,EV_TX_QUEUE_FULL /**< Transmit queue is full; + Flags: zero. */ + /* String: none. */ + ,EV_RX_QUEUE_FULL /**< Receive queue is full; + Flags: zero. */ + /* String: none. */ + ,EV_INTR_QUEUE_FULL /**< Interrupt queue overflow; + Flags: zero. */ + /* String: none. */ + ,EV_NO_DATA_BUFFER /**< Data buffer allocation (from higher layer) failed; + Flags: zero. */ + /* String: none. */ + ,EV_OBJ_POOL_EMPTY /**< Objects pool is empty; + Flags: zero. */ + /* String: object description (name). */ + ,EV_BUS_ERROR /**< Illegal access on bus; + Flags: the address (if available) or bus identifier */ + /* String: bus/address/module description. */ + ,EV_PTP_TXTS_QUEUE_FULL /**< PTP Tx timestamps queue is full; + Flags: zero. */ + /* String: none. */ + ,EV_PTP_RXTS_QUEUE_FULL /**< PTP Rx timestamps queue is full; + Flags: zero. */ + /* String: none. */ + ,EV_DUMMY_LAST + +} e_Event; + + +/**************************************************************************//** + @Collection Debug Levels for Errors and Events + + The level description refers to errors only. + For events, classification is done by the user. + + The TRACE, INFO and WARNING levels are allowed only when using + the DBG macro, and are not allowed when using the error macros + (RETURN_ERROR or REPORT_ERROR). + @{ +*//***************************************************************************/ +#define REPORT_LEVEL_CRITICAL 1 /**< Crasher: Incorrect flow, NULL pointers/handles. */ +#define REPORT_LEVEL_MAJOR 2 /**< Cannot proceed: Invalid operation, parameters or + configuration. */ +#define REPORT_LEVEL_MINOR 3 /**< Recoverable problem: a repeating call with the same + parameters may be successful. */ +#define REPORT_LEVEL_WARNING 4 /**< Something is not exactly right, yet it is not an error. */ +#define REPORT_LEVEL_INFO 5 /**< Messages which may be of interest to user/programmer. */ +#define REPORT_LEVEL_TRACE 6 /**< Program flow messages. */ + +#define EVENT_DISABLED 0xFF /**< Disabled event (not reported at all) */ + +/* @} */ + + + +#define NO_MSG ("") + +#ifndef DEBUG_GLOBAL_LEVEL +#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING +#endif /* DEBUG_GLOBAL_LEVEL */ + +#ifndef ERROR_GLOBAL_LEVEL +#define ERROR_GLOBAL_LEVEL DEBUG_GLOBAL_LEVEL +#endif /* ERROR_GLOBAL_LEVEL */ + +#ifndef EVENT_GLOBAL_LEVEL +#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR +#endif /* EVENT_GLOBAL_LEVEL */ + +#ifdef EVENT_LOCAL_LEVEL +#define EVENT_DYNAMIC_LEVEL EVENT_LOCAL_LEVEL +#else +#define EVENT_DYNAMIC_LEVEL EVENT_GLOBAL_LEVEL +#endif /* EVENT_LOCAL_LEVEL */ + + +#ifndef DEBUG_DYNAMIC_LEVEL +#define DEBUG_USING_STATIC_LEVEL + +#ifdef DEBUG_STATIC_LEVEL +#define DEBUG_DYNAMIC_LEVEL DEBUG_STATIC_LEVEL +#else +#define DEBUG_DYNAMIC_LEVEL DEBUG_GLOBAL_LEVEL +#endif /* DEBUG_STATIC_LEVEL */ + +#else /* DEBUG_DYNAMIC_LEVEL */ +#ifdef DEBUG_STATIC_LEVEL +#error "Please use either DEBUG_STATIC_LEVEL or DEBUG_DYNAMIC_LEVEL (not both)" +#else +int DEBUG_DYNAMIC_LEVEL = DEBUG_GLOBAL_LEVEL; +#endif /* DEBUG_STATIC_LEVEL */ +#endif /* !DEBUG_DYNAMIC_LEVEL */ + + +#ifndef ERROR_DYNAMIC_LEVEL + +#ifdef ERROR_STATIC_LEVEL +#define ERROR_DYNAMIC_LEVEL ERROR_STATIC_LEVEL +#else +#define ERROR_DYNAMIC_LEVEL ERROR_GLOBAL_LEVEL +#endif /* ERROR_STATIC_LEVEL */ + +#else /* ERROR_DYNAMIC_LEVEL */ +#ifdef ERROR_STATIC_LEVEL +#error "Please use either ERROR_STATIC_LEVEL or ERROR_DYNAMIC_LEVEL (not both)" +#else +int ERROR_DYNAMIC_LEVEL = ERROR_GLOBAL_LEVEL; +#endif /* ERROR_STATIC_LEVEL */ +#endif /* !ERROR_DYNAMIC_LEVEL */ + +#define PRINT_FORMAT "[CPU%02d, %s:%d %s]" +#define PRINT_FMT_PARAMS CORE_GetId(), __FILE__, __LINE__, __FUNCTION__ + +#if (!(defined(DEBUG_ERRORS)) || (DEBUG_ERRORS == 0)) +/* No debug/error/event messages at all */ +#define DBG(_level, _vmsg) + +#define REPORT_ERROR(_level, _err, _vmsg) + +#define RETURN_ERROR(_level, _err, _vmsg) \ + return ERROR_CODE(_err) + +#if (REPORT_EVENTS > 0) + +#define REPORT_EVENT(_ev, _appId, _flg, _vmsg) \ + do { \ + if (_ev##_LEVEL <= EVENT_DYNAMIC_LEVEL) { \ + XX_EventById((uint32_t)(_ev), (t_Handle)(_appId), (uint16_t)(_flg), NO_MSG); \ + } \ + } while (0) + +#else + +#define REPORT_EVENT(_ev, _appId, _flg, _vmsg) + +#endif /* (REPORT_EVENTS > 0) */ + + +#else /* DEBUG_ERRORS > 0 */ + +extern const char *dbgLevelStrings[]; +extern const char *errTypeStrings[]; +extern const char *moduleStrings[]; +#if (REPORT_EVENTS > 0) +extern const char *eventStrings[]; +#endif /* (REPORT_EVENTS > 0) */ + + +#if ((defined(DEBUG_USING_STATIC_LEVEL)) && (DEBUG_DYNAMIC_LEVEL < REPORT_LEVEL_WARNING)) +/* No need for DBG macro - debug level is higher anyway */ +#define DBG(_level, _vmsg) +#else +#define DBG(_level, _vmsg) \ + do { \ + if (REPORT_LEVEL_##_level <= DEBUG_DYNAMIC_LEVEL) { \ + XX_Print("> %s (%s) " PRINT_FORMAT ": ", \ + dbgLevelStrings[REPORT_LEVEL_##_level - 1], \ + moduleStrings[__ERR_MODULE__ >> 16], \ + PRINT_FMT_PARAMS); \ + XX_Print _vmsg; \ + XX_Print("\r\n"); \ + } \ + } while (0) +#endif /* (defined(DEBUG_USING_STATIC_LEVEL) && (DEBUG_DYNAMIC_LEVEL < WARNING)) */ + + +#define REPORT_ERROR(_level, _err, _vmsg) \ + do { \ + if (REPORT_LEVEL_##_level <= ERROR_DYNAMIC_LEVEL) { \ + XX_Print("! %s %s Error " PRINT_FORMAT ": %s; ", \ + dbgLevelStrings[REPORT_LEVEL_##_level - 1], \ + moduleStrings[__ERR_MODULE__ >> 16], \ + PRINT_FMT_PARAMS, \ + errTypeStrings[(GET_ERROR_TYPE(_err) - E_OK - 1)]); \ + XX_Print _vmsg; \ + XX_Print("\r\n"); \ + } \ + } while (0) + + +#define RETURN_ERROR(_level, _err, _vmsg) \ + do { \ + REPORT_ERROR(_level, (_err), _vmsg); \ + return ERROR_CODE(_err); \ + } while (0) + + +#if (REPORT_EVENTS > 0) + +#define REPORT_EVENT(_ev, _appId, _flg, _vmsg) \ + do { \ + if (_ev##_LEVEL <= EVENT_DYNAMIC_LEVEL) { \ + XX_Print("~ %s %s Event " PRINT_FORMAT ": %s (flags: 0x%04x); ", \ + dbgLevelStrings[_ev##_LEVEL - 1], \ + moduleStrings[__ERR_MODULE__ >> 16], \ + PRINT_FMT_PARAMS, \ + eventStrings[((_ev) - EV_NO_EVENT - 1)], \ + (uint16_t)(_flg)); \ + XX_Print _vmsg; \ + XX_Print("\r\n"); \ + XX_EventById((uint32_t)(_ev), (t_Handle)(_appId), (uint16_t)(_flg), NO_MSG); \ + } \ + } while (0) + +#else /* not REPORT_EVENTS */ + +#define REPORT_EVENT(_ev, _appId, _flg, _vmsg) + +#endif /* (REPORT_EVENTS > 0) */ + +#endif /* (DEBUG_ERRORS > 0) */ + + +/**************************************************************************//** + @Function ASSERT_COND + + @Description Assertion macro. + + @Param[in] _cond - The condition being checked, in positive form; + Failure of the condition triggers the assert. +*//***************************************************************************/ +#ifdef DISABLE_ASSERTIONS +#define ASSERT_COND(_cond) +#else +#define ASSERT_COND(_cond) \ + do { \ + if (!(_cond)) { \ + XX_Print("*** ASSERT_COND failed " PRINT_FORMAT "\r\n", \ + PRINT_FMT_PARAMS); \ + XX_Exit(1); \ + } \ + } while (0) +#endif /* DISABLE_ASSERTIONS */ + + +#ifdef DISABLE_INIT_PARAMETERS_CHECK + +#define CHECK_INIT_PARAMETERS(handle, f_check) +#define CHECK_INIT_PARAMETERS_RETURN_VALUE(handle, f_check, retval) + +#else + +#define CHECK_INIT_PARAMETERS(handle, f_check) \ + do { \ + t_Error err = f_check(handle); \ + if (err != E_OK) { \ + RETURN_ERROR(MAJOR, err, NO_MSG); \ + } \ + } while (0) + +#define CHECK_INIT_PARAMETERS_RETURN_VALUE(handle, f_check, retval) \ + do { \ + t_Error err = f_check(handle); \ + if (err != E_OK) { \ + REPORT_ERROR(MAJOR, err, NO_MSG); \ + return (retval); \ + } \ + } while (0) + +#endif /* DISABLE_INIT_PARAMETERS_CHECK */ + +#ifdef DISABLE_SANITY_CHECKS + +#define SANITY_CHECK_RETURN_ERROR(_cond, _err) +#define SANITY_CHECK_RETURN_VALUE(_cond, _err, retval) +#define SANITY_CHECK_RETURN(_cond, _err) +#define SANITY_CHECK_EXIT(_cond, _err) + +#else /* DISABLE_SANITY_CHECKS */ + +#define SANITY_CHECK_RETURN_ERROR(_cond, _err) \ + do { \ + if (!(_cond)) { \ + RETURN_ERROR(CRITICAL, (_err), NO_MSG); \ + } \ + } while (0) + +#define SANITY_CHECK_RETURN_VALUE(_cond, _err, retval) \ + do { \ + if (!(_cond)) { \ + REPORT_ERROR(CRITICAL, (_err), NO_MSG); \ + return (retval); \ + } \ + } while (0) + +#define SANITY_CHECK_RETURN(_cond, _err) \ + do { \ + if (!(_cond)) { \ + REPORT_ERROR(CRITICAL, (_err), NO_MSG); \ + return; \ + } \ + } while (0) + +#define SANITY_CHECK_EXIT(_cond, _err) \ + do { \ + if (!(_cond)) { \ + REPORT_ERROR(CRITICAL, (_err), NO_MSG); \ + XX_Exit(1); \ + } \ + } while (0) + +#endif /* DISABLE_SANITY_CHECKS */ + +/** @} */ /* end of Debug/error Utils group */ + +/** @} */ /* end of General Utils group */ + +#endif /* __ERROR_EXT_H */ + + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/etc/list_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/etc/list_ext.h @@ -0,0 +1,357 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + + @File list_ext.h + + @Description External prototypes for list.c +*//***************************************************************************/ + +#ifndef __LIST_EXT_H +#define __LIST_EXT_H + + +#include "std_ext.h" + + +/**************************************************************************//** + @Group etc_id Utility Library Application Programming Interface + + @Description External routines. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group list_id List + + @Description List module functions,definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description List structure. +*//***************************************************************************/ +typedef struct List +{ + struct List *p_Next; /**< A pointer to the next list object */ + struct List *p_Prev; /**< A pointer to the previous list object */ +} t_List; + + +/**************************************************************************//** + @Function LIST_FIRST/LIST_LAST/LIST_NEXT/LIST_PREV + + @Description Macro to get first/last/next/previous entry in a list. + + @Param[in] p_List - A pointer to a list. +*//***************************************************************************/ +#define LIST_FIRST(p_List) (p_List)->p_Next +#define LIST_LAST(p_List) (p_List)->p_Prev +#define LIST_NEXT LIST_FIRST +#define LIST_PREV LIST_LAST + + +/**************************************************************************//** + @Function LIST_INIT + + @Description Macro for initialization of a list struct. + + @Param[in] lst - The t_List object to initialize. +*//***************************************************************************/ +#define LIST_INIT(lst) {&(lst), &(lst)} + + +/**************************************************************************//** + @Function LIST + + @Description Macro to declare of a list. + + @Param[in] listName - The list object name. +*//***************************************************************************/ +#define LIST(listName) t_List listName = LIST_INIT(listName) + + +/**************************************************************************//** + @Function INIT_LIST + + @Description Macro to initialize a list pointer. + + @Param[in] p_List - The list pointer. +*//***************************************************************************/ +#define INIT_LIST(p_List) LIST_FIRST(p_List) = LIST_LAST(p_List) = (p_List) + + +/**************************************************************************//** + @Function LIST_OBJECT + + @Description Macro to get the struct (object) for this entry. + + @Param[in] type - The type of the struct (object) this list is embedded in. + @Param[in] member - The name of the t_List object within the struct. + + @Return The structure pointer for this entry. +*//***************************************************************************/ +#define MEMBER_OFFSET(type, member) (PTR_TO_UINT(&((type *)0)->member)) +#define LIST_OBJECT(p_List, type, member) \ + ((type *)((char *)(p_List)-MEMBER_OFFSET(type, member))) + + +/**************************************************************************//** + @Function LIST_FOR_EACH + + @Description Macro to iterate over a list. + + @Param[in] p_Pos - A pointer to a list to use as a loop counter. + @Param[in] p_Head - A pointer to the head for your list pointer. + + @Cautions You can't delete items with this routine. + For deletion use LIST_FOR_EACH_SAFE(). +*//***************************************************************************/ +#define LIST_FOR_EACH(p_Pos, p_Head) \ + for (p_Pos = LIST_FIRST(p_Head); p_Pos != (p_Head); p_Pos = LIST_NEXT(p_Pos)) + + +/**************************************************************************//** + @Function LIST_FOR_EACH_SAFE + + @Description Macro to iterate over a list safe against removal of list entry. + + @Param[in] p_Pos - A pointer to a list to use as a loop counter. + @Param[in] p_Tmp - Another pointer to a list to use as temporary storage. + @Param[in] p_Head - A pointer to the head for your list pointer. +*//***************************************************************************/ +#define LIST_FOR_EACH_SAFE(p_Pos, p_Tmp, p_Head) \ + for (p_Pos = LIST_FIRST(p_Head), p_Tmp = LIST_FIRST(p_Pos); \ + p_Pos != (p_Head); \ + p_Pos = p_Tmp, p_Tmp = LIST_NEXT(p_Pos)) + + +/**************************************************************************//** + @Function LIST_FOR_EACH_OBJECT_SAFE + + @Description Macro to iterate over list of given type safely. + + @Param[in] p_Pos - A pointer to a list to use as a loop counter. + @Param[in] p_Tmp - Another pointer to a list to use as temporary storage. + @Param[in] type - The type of the struct this is embedded in. + @Param[in] p_Head - A pointer to the head for your list pointer. + @Param[in] member - The name of the list_struct within the struct. + + @Cautions You can't delete items with this routine. + For deletion use LIST_FOR_EACH_SAFE(). +*//***************************************************************************/ +#define LIST_FOR_EACH_OBJECT_SAFE(p_Pos, p_Tmp, p_Head, type, member) \ + for (p_Pos = LIST_OBJECT(LIST_FIRST(p_Head), type, member), \ + p_Tmp = LIST_OBJECT(LIST_FIRST(&p_Pos->member), type, member); \ + &p_Pos->member != (p_Head); \ + p_Pos = p_Tmp, \ + p_Tmp = LIST_OBJECT(LIST_FIRST(&p_Pos->member), type, member)) + +/**************************************************************************//** + @Function LIST_FOR_EACH_OBJECT + + @Description Macro to iterate over list of given type. + + @Param[in] p_Pos - A pointer to a list to use as a loop counter. + @Param[in] type - The type of the struct this is embedded in. + @Param[in] p_Head - A pointer to the head for your list pointer. + @Param[in] member - The name of the list_struct within the struct. + + @Cautions You can't delete items with this routine. + For deletion use LIST_FOR_EACH_SAFE(). +*//***************************************************************************/ +#define LIST_FOR_EACH_OBJECT(p_Pos, type, p_Head, member) \ + for (p_Pos = LIST_OBJECT(LIST_FIRST(p_Head), type, member); \ + &p_Pos->member != (p_Head); \ + p_Pos = LIST_OBJECT(LIST_FIRST(&(p_Pos->member)), type, member)) + + +/**************************************************************************//** + @Function LIST_Add + + @Description Add a new entry to a list. + + Insert a new entry after the specified head. + This is good for implementing stacks. + + @Param[in] p_New - A pointer to a new list entry to be added. + @Param[in] p_Head - A pointer to a list head to add it after. + + @Return none. +*//***************************************************************************/ +static __inline__ void LIST_Add(t_List *p_New, t_List *p_Head) +{ + LIST_PREV(LIST_NEXT(p_Head)) = p_New; + LIST_NEXT(p_New) = LIST_NEXT(p_Head); + LIST_PREV(p_New) = p_Head; + LIST_NEXT(p_Head) = p_New; +} + + +/**************************************************************************//** + @Function LIST_AddToTail + + @Description Add a new entry to a list. + + Insert a new entry before the specified head. + This is useful for implementing queues. + + @Param[in] p_New - A pointer to a new list entry to be added. + @Param[in] p_Head - A pointer to a list head to add it after. + + @Return none. +*//***************************************************************************/ +static __inline__ void LIST_AddToTail(t_List *p_New, t_List *p_Head) +{ + LIST_NEXT(LIST_PREV(p_Head)) = p_New; + LIST_PREV(p_New) = LIST_PREV(p_Head); + LIST_NEXT(p_New) = p_Head; + LIST_PREV(p_Head) = p_New; +} + + +/**************************************************************************//** + @Function LIST_Del + + @Description Deletes entry from a list. + + @Param[in] p_Entry - A pointer to the element to delete from the list. + + @Return none. + + @Cautions LIST_IsEmpty() on entry does not return true after this, + the entry is in an undefined state. +*//***************************************************************************/ +static __inline__ void LIST_Del(t_List *p_Entry) +{ + LIST_PREV(LIST_NEXT(p_Entry)) = LIST_PREV(p_Entry); + LIST_NEXT(LIST_PREV(p_Entry)) = LIST_NEXT(p_Entry); +} + + +/**************************************************************************//** + @Function LIST_DelAndInit + + @Description Deletes entry from list and reinitialize it. + + @Param[in] p_Entry - A pointer to the element to delete from the list. + + @Return none. +*//***************************************************************************/ +static __inline__ void LIST_DelAndInit(t_List *p_Entry) +{ + LIST_Del(p_Entry); + INIT_LIST(p_Entry); +} + + +/**************************************************************************//** + @Function LIST_Move + + @Description Delete from one list and add as another's head. + + @Param[in] p_Entry - A pointer to the list entry to move. + @Param[in] p_Head - A pointer to the list head that will precede our entry. + + @Return none. +*//***************************************************************************/ +static __inline__ void LIST_Move(t_List *p_Entry, t_List *p_Head) +{ + LIST_Del(p_Entry); + LIST_Add(p_Entry, p_Head); +} + + +/**************************************************************************//** + @Function LIST_MoveToTail + + @Description Delete from one list and add as another's tail. + + @Param[in] p_Entry - A pointer to the entry to move. + @Param[in] p_Head - A pointer to the list head that will follow our entry. + + @Return none. +*//***************************************************************************/ +static __inline__ void LIST_MoveToTail(t_List *p_Entry, t_List *p_Head) +{ + LIST_Del(p_Entry); + LIST_AddToTail(p_Entry, p_Head); +} + + +/**************************************************************************//** + @Function LIST_IsEmpty + + @Description Tests whether a list is empty. + + @Param[in] p_List - A pointer to the list to test. + + @Return 1 if the list is empty, 0 otherwise. +*//***************************************************************************/ +static __inline__ int LIST_IsEmpty(t_List *p_List) +{ + return (LIST_FIRST(p_List) == p_List); +} + + +/**************************************************************************//** + @Function LIST_Append + + @Description Join two lists. + + @Param[in] p_NewList - A pointer to the new list to add. + @Param[in] p_Head - A pointer to the place to add it in the first list. + + @Return none. +*//***************************************************************************/ +void LIST_Append(t_List *p_NewList, t_List *p_Head); + + +/**************************************************************************//** + @Function LIST_NumOfObjs + + @Description Counts number of objects in the list + + @Param[in] p_List - A pointer to the list which objects are to be counted. + + @Return Number of objects in the list. +*//***************************************************************************/ +int LIST_NumOfObjs(t_List *p_List); + +/** @} */ /* end of list_id group */ +/** @} */ /* end of etc_id group */ + + +#endif /* __LIST_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/etc/mem_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/etc/mem_ext.h @@ -0,0 +1,317 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + + @File mem_ext.h + + @Description External prototypes for the memory manager object +*//***************************************************************************/ + +#ifndef __MEM_EXT_H +#define __MEM_EXT_H + +#include "std_ext.h" +#include "part_ext.h" + + +/**************************************************************************//** + @Group etc_id Utility Library Application Programming Interface + + @Description External routines. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group mem_id Slab Memory Manager + + @Description Slab Memory Manager module functions, definitions and enums. + + @{ +*//***************************************************************************/ + +/* Each block is of the following structure: + * + * + * +-----------+----------+---------------------------+-----------+-----------+ + * | Alignment | Prefix | Data | Postfix | Alignment | + * | field | field | field | field | Padding | + * | | | | | | + * +-----------+----------+---------------------------+-----------+-----------+ + * and at the beginning of all bytes, an additional optional padding might reside + * to ensure that the first blocks data field is aligned as requested. + */ + + +#define MEM_MAX_NAME_LENGTH 8 + +/**************************************************************************//* + @Description Memory Segment structure +*//***************************************************************************/ + +typedef struct +{ + char name[MEM_MAX_NAME_LENGTH]; + /* The segment's name */ + uint8_t **p_Bases; /* Base addresses of the segments */ + uint8_t **p_BlocksStack; /* Array of pointers to blocks */ + t_Handle h_Spinlock; + uint16_t dataSize; /* Size of each data block */ + uint16_t prefixSize; /* How many bytes to reserve before the data */ + uint16_t postfixSize; /* How many bytes to reserve after the data */ + uint16_t alignment; /* Requested alignment for the data field */ + int allocOwner; /* Memory allocation owner */ + uint32_t getFailures; /* Number of times get failed */ + uint32_t num; /* Number of blocks in segment */ + uint32_t current; /* Current block */ + bool consecutiveMem; /* Allocate consecutive data blocks memory */ +#ifdef DEBUG_MEM_LEAKS + void *p_MemDbg; /* MEM debug database (MEM leaks detection) */ + uint32_t blockOffset; + uint32_t blockSize; +#endif /* DEBUG_MEM_LEAKS */ +} t_MemorySegment; + + + +/**************************************************************************//** + @Function MEM_Init + + @Description Create a new memory segment. + + @Param[in] name - Name of memory partition. + @Param[in] p_Handle - Handle to new segment is returned through here. + @Param[in] num - Number of blocks in new segment. + @Param[in] dataSize - Size of blocks in segment. + @Param[in] prefixSize - How many bytes to allocate before the data. + @Param[in] postfixSize - How many bytes to allocate after the data. + @Param[in] alignment - Requested alignment for data field (in bytes). + + @Return E_OK - success, E_NO_MEMORY - out of memory. +*//***************************************************************************/ +t_Error MEM_Init(char name[], + t_Handle *p_Handle, + uint32_t num, + uint16_t dataSize, + uint16_t prefixSize, + uint16_t postfixSize, + uint16_t alignment); + +/**************************************************************************//** + @Function MEM_InitSmart + + @Description Create a new memory segment. + + @Param[in] name - Name of memory partition. + @Param[in] p_Handle - Handle to new segment is returned through here. + @Param[in] num - Number of blocks in new segment. + @Param[in] dataSize - Size of blocks in segment. + @Param[in] prefixSize - How many bytes to allocate before the data. + @Param[in] postfixSize - How many bytes to allocate after the data. + @Param[in] alignment - Requested alignment for data field (in bytes). + @Param[in] memPartitionId - Memory partition ID for allocation. + @Param[in] consecutiveMem - Whether to allocate the memory blocks + continuously or not. + + @Return E_OK - success, E_NO_MEMORY - out of memory. +*//***************************************************************************/ +t_Error MEM_InitSmart(char name[], + t_Handle *p_Handle, + uint32_t num, + uint16_t dataSize, + uint16_t prefixSize, + uint16_t postfixSize, + uint16_t alignment, + uint8_t memPartitionId, + bool consecutiveMem); + +/**************************************************************************//** + @Function MEM_InitByAddress + + @Description Create a new memory segment with a specified base address. + + @Param[in] name - Name of memory partition. + @Param[in] p_Handle - Handle to new segment is returned through here. + @Param[in] num - Number of blocks in new segment. + @Param[in] dataSize - Size of blocks in segment. + @Param[in] prefixSize - How many bytes to allocate before the data. + @Param[in] postfixSize - How many bytes to allocate after the data. + @Param[in] alignment - Requested alignment for data field (in bytes). + @Param[in] address - The required base address. + + @Return E_OK - success, E_NO_MEMORY - out of memory. + *//***************************************************************************/ +t_Error MEM_InitByAddress(char name[], + t_Handle *p_Handle, + uint32_t num, + uint16_t dataSize, + uint16_t prefixSize, + uint16_t postfixSize, + uint16_t alignment, + uint8_t *address); + +/**************************************************************************//** + @Function MEM_Free + + @Description Free a specific memory segment. + + @Param[in] h_Mem - Handle to memory segment. + + @Return None. +*//***************************************************************************/ +void MEM_Free(t_Handle h_Mem); + +/**************************************************************************//** + @Function MEM_Get + + @Description Get a block of memory from a segment. + + @Param[in] h_Mem - Handle to memory segment. + + @Return Pointer to new memory block on success,0 otherwise. +*//***************************************************************************/ +void * MEM_Get(t_Handle h_Mem); + +/**************************************************************************//** + @Function MEM_GetN + + @Description Get up to N blocks of memory from a segment. + + The blocks are assumed to be of a fixed size (one size per segment). + + @Param[in] h_Mem - Handle to memory segment. + @Param[in] num - Number of blocks to allocate. + @Param[out] array - Array of at least num pointers to which the addresses + of the allocated blocks are written. + + @Return The number of blocks actually allocated. + + @Cautions Interrupts are disabled for all of the allocation loop. + Although this loop is very short for each block (several machine + instructions), you should not allocate a very large number + of blocks via this routine. +*//***************************************************************************/ +uint16_t MEM_GetN(t_Handle h_Mem, uint32_t num, void *array[]); + +/**************************************************************************//** + @Function MEM_Put + + @Description Put a block of memory back to a segment. + + @Param[in] h_Mem - Handle to memory segment. + @Param[in] p_Block - The block to return. + + @Return Pointer to new memory block on success,0 otherwise. +*//***************************************************************************/ +t_Error MEM_Put(t_Handle h_Mem, void *p_Block); + +/**************************************************************************//** + @Function MEM_ComputePartitionSize + + @Description calculate a tight upper boundary of the size of a partition with + given attributes. + + The returned value is suitable if one wants to use MEM_InitByAddress(). + + @Param[in] num - The number of blocks in the segment. + @Param[in] dataSize - Size of block to get. + @Param[in] prefixSize - The prefix size + @Param postfixSize - The postfix size + @Param[in] alignment - The requested alignment value (in bytes) + + @Return The memory block size a segment with the given attributes needs. +*//***************************************************************************/ +uint32_t MEM_ComputePartitionSize(uint32_t num, + uint16_t dataSize, + uint16_t prefixSize, + uint16_t postfixSize, + uint16_t alignment); + +#ifdef DEBUG_MEM_LEAKS +#if !(defined(__MWERKS__) && (__dest_os == __ppc_eabi)) +#error "Memory-Leaks-Debug option is supported only for freescale CodeWarrior" +#endif /* !(defined(__MWERKS__) && ... */ + +/**************************************************************************//** + @Function MEM_CheckLeaks + + @Description Report MEM object leaks. + + This routine is automatically called by the MEM_Free() routine, + but it can also be invoked while the MEM object is alive. + + @Param[in] h_Mem - Handle to memory segment. + + @Return None. +*//***************************************************************************/ +void MEM_CheckLeaks(t_Handle h_Mem); + +#else /* not DEBUG_MEM_LEAKS */ +#define MEM_CheckLeaks(h_Mem) +#endif /* not DEBUG_MEM_LEAKS */ + +/**************************************************************************//** + @Description Get base of MEM +*//***************************************************************************/ +#define MEM_GetBase(h_Mem) ((t_MemorySegment *)(h_Mem))->p_Bases[0] + +/**************************************************************************//** + @Description Get size of MEM block +*//***************************************************************************/ +#define MEM_GetSize(h_Mem) ((t_MemorySegment *)(h_Mem))->dataSize + +/**************************************************************************//** + @Description Get prefix size of MEM block +*//***************************************************************************/ +#define MEM_GetPrefixSize(h_Mem) ((t_MemorySegment *)(h_Mem))->prefixSize + +/**************************************************************************//** + @Description Get postfix size of MEM block +*//***************************************************************************/ +#define MEM_GetPostfixSize(h_Mem) ((t_MemorySegment *)(h_Mem))->postfixSize + +/**************************************************************************//** + @Description Get alignment of MEM block (in bytes) +*//***************************************************************************/ +#define MEM_GetAlignment(h_Mem) ((t_MemorySegment *)(h_Mem))->alignment + +/**************************************************************************//** + @Description Get the number of blocks in the segment +*//***************************************************************************/ +#define MEM_GetNumOfBlocks(h_Mem) ((t_MemorySegment *)(h_Mem))->num + +/** @} */ /* end of MEM group */ +/** @} */ /* end of etc_id group */ + + +#endif /* __MEM_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/etc/memcpy_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/etc/memcpy_ext.h @@ -0,0 +1,173 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + + @File memcpy_ext.h + + @Description Efficient functions for copying and setting blocks of memory. +*//***************************************************************************/ + +#ifndef __MEMCPY_EXT_H +#define __MEMCPY_EXT_H + +#include "std_ext.h" + + +/**************************************************************************//** + @Group etc_id Utility Library Application Programming Interface + + @Description External routines. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group mem_cpy Memory Copy + + @Description Memory Copy module functions,definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function MemCpy32 + + @Description Copies one memory buffer into another one in 4-byte chunks! + Which should be more efficient than byte by byte. + + For large buffers (over 60 bytes) this function is about 4 times + more efficient than the trivial memory copy. For short buffers + it is reduced to the trivial copy and may be a bit worse. + + @Param[in] pDst - The address of the destination buffer. + @Param[in] pSrc - The address of the source buffer. + @Param[in] size - The number of bytes that will be copied from pSrc to pDst. + + @Return pDst (the address of the destination buffer). + + @Cautions There is no parameter or boundary checking! It is up to the user + to supply non-null parameters as source & destination and size + that actually fits into the destination buffer. +*//***************************************************************************/ +void * MemCpy32(void* pDst,void* pSrc, uint32_t size); +void * IO2IOCpy32(void* pDst,void* pSrc, uint32_t size); +void * IO2MemCpy32(void* pDst,void* pSrc, uint32_t size); +void * Mem2IOCpy32(void* pDst,void* pSrc, uint32_t size); + +/**************************************************************************//** + @Function MemCpy64 + + @Description Copies one memory buffer into another one in 8-byte chunks! + Which should be more efficient than byte by byte. + + For large buffers (over 60 bytes) this function is about 8 times + more efficient than the trivial memory copy. For short buffers + it is reduced to the trivial copy and may be a bit worse. + + Some testing suggests that MemCpy32() preforms better than + MemCpy64() over small buffers. On average they break even at + 100 byte buffers. For buffers larger than that MemCpy64 is + superior. + + @Param[in] pDst - The address of the destination buffer. + @Param[in] pSrc - The address of the source buffer. + @Param[in] size - The number of bytes that will be copied from pSrc to pDst. + + @Return pDst (the address of the destination buffer). + + @Cautions There is no parameter or boundary checking! It is up to the user + to supply non null parameters as source & destination and size + that actually fits into their buffer. + + Do not use under Linux. +*//***************************************************************************/ +void * MemCpy64(void* pDst,void* pSrc, uint32_t size); + +/**************************************************************************//** + @Function MemSet32 + + @Description Sets all bytes of a memory buffer to a specific value, in + 4-byte chunks. + + @Param[in] pDst - The address of the destination buffer. + @Param[in] val - Value to set destination bytes to. + @Param[in] size - The number of bytes that will be set to val. + + @Return pDst (the address of the destination buffer). + + @Cautions There is no parameter or boundary checking! It is up to the user + to supply non null parameter as destination and size + that actually fits into the destination buffer. +*//***************************************************************************/ +void * MemSet32(void* pDst, uint8_t val, uint32_t size); +void * IOMemSet32(void* pDst, uint8_t val, uint32_t size); + +/**************************************************************************//** + @Function MemSet64 + + @Description Sets all bytes of a memory buffer to a specific value, in + 8-byte chunks. + + @Param[in] pDst - The address of the destination buffer. + @Param[in] val - Value to set destination bytes to. + @Param[in] size - The number of bytes that will be set to val. + + @Return pDst (the address of the destination buffer). + + @Cautions There is no parameter or boundary checking! It is up to the user + to supply non null parameter as destination and size + that actually fits into the destination buffer. +*//***************************************************************************/ +void * MemSet64(void* pDst, uint8_t val, uint32_t size); + +/**************************************************************************//** + @Function MemDisp + + @Description Displays a block of memory in chunks of 32 bits. + + @Param[in] addr - The address of the memory to display. + @Param[in] size - The number of bytes that will be displayed. + + @Return None. + + @Cautions There is no parameter or boundary checking! It is up to the user + to supply non null parameter as destination and size + that actually fits into the destination buffer. +*//***************************************************************************/ +void MemDisp(uint8_t *addr, int size); + +/** @} */ /* end of mem_cpy group */ +/** @} */ /* end of etc_id group */ + + +#endif /* __MEMCPY_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/etc/mm_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/etc/mm_ext.h @@ -0,0 +1,300 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /**************************************************************************//** + + @File mm_ext.h + + @Description Memory Manager Application Programming Interface +*//***************************************************************************/ +#ifndef __MM_EXT +#define __MM_EXT + +#include "std_ext.h" + +#define MM_MAX_ALIGNMENT 20 /* Alignments from 2 to 128 are available + where maximum alignment defined as + MM_MAX_ALIGNMENT power of 2 */ + +#define MM_MAX_NAME_LEN 32 + +/**************************************************************************//** + @Group etc_id Utility Library Application Programming Interface + + @Description External routines. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group mm_grp Flexible Memory Manager + + @Description Flexible Memory Manager module functions,definitions and enums. + (All of the following functions,definitions and enums can be found in mm_ext.h) + + @{ +*//***************************************************************************/ + + +/**************************************************************************//** + @Function MM_Init + + @Description Initializes a new MM object. + + It initializes a new memory block consisting of base address + and size of the available memory by calling to MemBlock_Init + routine. It is also initializes a new free block for each + by calling FreeBlock_Init routine, which is pointed to + the almost all memory started from the required alignment + from the base address and to the end of the memory. + The handle to the new MM object is returned via "MM" + argument (passed by reference). + + @Param[in] h_MM - Handle to the MM object. + @Param[in] base - Base address of the MM. + @Param[in] size - Size of the MM. + + @Return E_OK is returned on success. E_NOMEMORY is returned if the new MM object or a new free block can not be initialized. +*//***************************************************************************/ +t_Error MM_Init(t_Handle *h_MM, uint64_t base, uint64_t size); + +/**************************************************************************//** + @Function MM_Get + + @Description Allocates a block of memory according to the given size and the alignment. + + The Alignment argument tells from which + free list allocate a block of memory. 2^alignment indicates + the alignment that the base address of the allocated block + should have. So, the only values 1, 2, 4, 8, 16, 32 and 64 + are available for the alignment argument. + The routine passes through the specific free list of free + blocks and seeks for a first block that have anough memory + that is required (best fit). + After the block is found and data is allocated, it calls + the internal MM_CutFree routine to update all free lists + do not include a just allocated block. Of course, each + free list contains a free blocks with the same alignment. + It is also creates a busy block that holds + information about an allocated block. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] size - Size of the MM. + @Param[in] alignment - Index as a power of two defines a required + alignment (in bytes); Should be 1, 2, 4, 8, 16, 32 or 64 + @Param[in] name - The name that specifies an allocated block. + + @Return base address of an allocated block ILLEGAL_BASE if can't allocate a block +*//***************************************************************************/ +uint64_t MM_Get(t_Handle h_MM, uint64_t size, uint64_t alignment, char *name); + +/**************************************************************************//** + @Function MM_GetBase + + @Description Gets the base address of the required MM objects. + + @Param[in] h_MM - Handle to the MM object. + + @Return base address of the block. +*//***************************************************************************/ +uint64_t MM_GetBase(t_Handle h_MM); + +/**************************************************************************//** + @Function MM_GetForce + + @Description Force memory allocation. + + It means to allocate a block of memory of the given + size from the given base address. + The routine checks if the required block can be allocated + (that is it is free) and then, calls the internal MM_CutFree + routine to update all free lists do not include that block. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] base - Base address of the MM. + @Param[in] size - Size of the MM. + @Param[in] name - Name that specifies an allocated block. + + @Return base address of an allocated block, ILLEGAL_BASE if can't allocate a block. +*//***************************************************************************/ +uint64_t MM_GetForce(t_Handle h_MM, uint64_t base, uint64_t size, char *name); + +/**************************************************************************//** + @Function MM_GetForceMin + + @Description Allocates a block of memory according to the given size, the alignment and minimum base address. + + The Alignment argument tells from which + free list allocate a block of memory. 2^alignment indicates + the alignment that the base address of the allocated block + should have. So, the only values 1, 2, 4, 8, 16, 32 and 64 + are available for the alignment argument. + The minimum baser address forces the location of the block + to be from a given address onward. + The routine passes through the specific free list of free + blocks and seeks for the first base address equal or smaller + than the required minimum address and end address larger than + than the required base + its size - i.e. that may contain + the required block. + After the block is found and data is allocated, it calls + the internal MM_CutFree routine to update all free lists + do not include a just allocated block. Of course, each + free list contains a free blocks with the same alignment. + It is also creates a busy block that holds + information about an allocated block. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] size - Size of the MM. + @Param[in] alignment - Index as a power of two defines a required + alignment (in bytes); Should be 1, 2, 4, 8, 16, 32 or 64 + @Param[in] min - The minimum base address of the block. + @Param[in] name - Name that specifies an allocated block. + + @Return base address of an allocated block,ILLEGAL_BASE if can't allocate a block. +*//***************************************************************************/ +uint64_t MM_GetForceMin(t_Handle h_MM, + uint64_t size, + uint64_t alignment, + uint64_t min, + char *name); + +/**************************************************************************//** + @Function MM_Put + + @Description Puts a block of memory of the given base address back to the memory. + + It checks if there is a busy block with the + given base address. If not, it returns 0, that + means can't free a block. Otherwise, it gets parameters of + the busy block and after it updates lists of free blocks, + removes that busy block from the list by calling to MM_CutBusy + routine. + After that it calls to MM_AddFree routine to add a new free + block to the free lists. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] base - Base address of the MM. + + @Return The size of bytes released, 0 if failed. +*//***************************************************************************/ +uint64_t MM_Put(t_Handle h_MM, uint64_t base); + +/**************************************************************************//** + @Function MM_PutForce + + @Description Releases a block of memory of the required size from the required base address. + + First, it calls to MM_CutBusy routine + to cut a free block from the busy list. And then, calls to + MM_AddFree routine to add the free block to the free lists. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] base - Base address of of a block to free. + @Param[in] size - Size of a block to free. + + @Return The number of bytes released, 0 on failure. +*//***************************************************************************/ +uint64_t MM_PutForce(t_Handle h_MM, uint64_t base, uint64_t size); + +/**************************************************************************//** + @Function MM_Add + + @Description Adds a new memory block for memory allocation. + + When a new memory block is initialized and added to the + memory list, it calls to MM_AddFree routine to add the + new free block to the free lists. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] base - Base address of the memory block. + @Param[in] size - Size of the memory block. + + @Return E_OK on success, otherwise returns an error code. +*//***************************************************************************/ +t_Error MM_Add(t_Handle h_MM, uint64_t base, uint64_t size); + +/**************************************************************************//** + @Function MM_Dump + + @Description Prints results of free and busy lists into the file. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] buff - A pointer to a buffer +*//***************************************************************************/ +void MM_Dump(t_Handle h_MM, void *buff); + +/**************************************************************************//** + @Function MM_Free + + @Description Releases memory allocated for MM object. + + @Param[in] h_MM - Handle of the MM object. +*//***************************************************************************/ +void MM_Free(t_Handle h_MM); + +/**************************************************************************//** + @Function MM_GetMemBlock + + @Description Returns base address of the memory block specified by the index. + + If index is 0, returns base address + of the first memory block, 1 - returns base address + of the second memory block, etc. + Note, those memory blocks are allocated by the + application before MM_Init or MM_Add and have to + be released by the application before or after invoking + the MM_Free routine. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] index - Index of the memory block. + + @Return valid base address or ILLEGAL_BASE if no memory block specified by the index. +*//***************************************************************************/ +uint64_t MM_GetMemBlock(t_Handle h_MM, int index); + +/**************************************************************************//** + @Function MM_InRange + + @Description Checks if a specific address is in the memory range of the passed MM object. + + @Param[in] h_MM - Handle to the MM object. + @Param[in] addr - The address to be checked. + + @Return TRUE if the address is in the address range of the block, FALSE otherwise. +*//***************************************************************************/ +bool MM_InRange(t_Handle h_MM, uint64_t addr); + + +/** @} */ /* end of mm_grp group */ +/** @} */ /* end of etc_id group */ + +#endif /* __MM_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/etc/sprint_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/etc/sprint_ext.h @@ -0,0 +1,125 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + + @File sprint_ext.h + + @Description Debug routines (externals). + +*//***************************************************************************/ + +#ifndef __SPRINT_EXT_H +#define __SPRINT_EXT_H + + +#if defined(NCSW_LINUX) && defined(__KERNEL__) +#include + +#elif defined(NCSW_LINUX_USD) +#include +#include "stdarg_ext.h" +#include "std_ext.h" + +extern int vsscanf(const char *, const char *, va_list); + +#elif defined(NCSW_VXWORKS) +#include "private/stdioP.h" + +#else +#include +#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */ + +#include "std_ext.h" + + +/**************************************************************************//** + @Group etc_id Utility Library Application Programming Interface + + @Description External routines. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group sprint_id Sprint + + @Description Sprint & Sscan module functions,definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Function Sprint + + @Description Format a string and place it in a buffer. + + @Param[in] buff - The buffer to place the result into. + @Param[in] str - The format string to use. + @Param[in] ... - Arguments for the format string. + + @Return Number of bytes formatted. +*//***************************************************************************/ +int Sprint(char *buff, const char *str, ...); + +/**************************************************************************//** + @Function Snprint + + @Description Format a string and place it in a buffer. + + @Param[in] buf - The buffer to place the result into. + @Param[in] size - The size of the buffer, including the trailing null space. + @Param[in] fmt - The format string to use. + @Param[in] ... - Arguments for the format string. + + @Return Number of bytes formatted. +*//***************************************************************************/ +int Snprint(char * buf, uint32_t size, const char *fmt, ...); + +/**************************************************************************//** + @Function Sscan + + @Description Unformat a buffer into a list of arguments. + + @Param[in] buf - input buffer. + @Param[in] fmt - formatting of buffer. + @Param[out] ... - resulting arguments. + + @Return Number of bytes unformatted. +*//***************************************************************************/ +int Sscan(const char * buf, const char * fmt, ...); + +/** @} */ /* end of sprint_id group */ +/** @} */ /* end of etc_id group */ + + +#endif /* __SPRINT_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/integrations/P1023/dpaa_integration_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/integrations/P1023/dpaa_integration_ext.h @@ -0,0 +1,328 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + + @File dpaa_integration_ext.h + + @Description P1023 FM external definitions and structures. +*//***************************************************************************/ +#ifndef __DPAA_INTEGRATION_EXT_H +#define __DPAA_INTEGRATION_EXT_H + +#include "std_ext.h" + + +typedef enum e_DpaaSwPortal { + e_DPAA_SWPORTAL0 = 0, + e_DPAA_SWPORTAL1, + e_DPAA_SWPORTAL2 +} e_DpaaSwPortal; + +typedef enum { + e_DPAA_DCPORTAL0 = 0, + e_DPAA_DCPORTAL1, + e_DPAA_DCPORTAL2, + e_DPAA_DCPORTAL3 +} e_DpaaDcPortal; + +#define DPAA_MAX_NUM_OF_SW_PORTALS 3 +#define DPAA_MAX_NUM_OF_DC_PORTALS 3 + +/***************************************************************************** + QMAN INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define QM_MAX_NUM_OF_POOL_CHANNELS 3 +#define QM_MAX_NUM_OF_WQ 8 +#define QM_MAX_NUM_OF_SWP_AS 2 +#define QM_MAX_NUM_OF_CGS 64 +#define QM_MAX_NUM_OF_FQIDS (16*MEGABYTE) + +typedef enum { + e_QM_FQ_CHANNEL_SWPORTAL0 = 0, + e_QM_FQ_CHANNEL_SWPORTAL1, + e_QM_FQ_CHANNEL_SWPORTAL2, + + e_QM_FQ_CHANNEL_POOL1 = 0x21, + e_QM_FQ_CHANNEL_POOL2, + e_QM_FQ_CHANNEL_POOL3, + + e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x40, + e_QM_FQ_CHANNEL_FMAN0_SP1, + e_QM_FQ_CHANNEL_FMAN0_SP2, + e_QM_FQ_CHANNEL_FMAN0_SP3, + e_QM_FQ_CHANNEL_FMAN0_SP4, + e_QM_FQ_CHANNEL_FMAN0_SP5, + e_QM_FQ_CHANNEL_FMAN0_SP6, + + + e_QM_FQ_CHANNEL_CAAM = 0x80 +} e_QmFQChannel; + +/***************************************************************************** + BMAN INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define BM_MAX_NUM_OF_POOLS 8 + +/***************************************************************************** + FM INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define INTG_MAX_NUM_OF_FM 1 + +/* Ports defines */ +#define FM_MAX_NUM_OF_1G_RX_PORTS 2 +#define FM_MAX_NUM_OF_10G_RX_PORTS 0 +#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS+FM_MAX_NUM_OF_1G_RX_PORTS) +#define FM_MAX_NUM_OF_1G_TX_PORTS 2 +#define FM_MAX_NUM_OF_10G_TX_PORTS 0 +#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS+FM_MAX_NUM_OF_1G_TX_PORTS) +#define FM_MAX_NUM_OF_OH_PORTS 5 +#define FM_MAX_NUM_OF_1G_MACS (FM_MAX_NUM_OF_1G_RX_PORTS) +#define FM_MAX_NUM_OF_10G_MACS (FM_MAX_NUM_OF_10G_RX_PORTS) +#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS+FM_MAX_NUM_OF_10G_MACS) +#define FM_MAX_NUM_OF_MACSECS 1 + +#if 0 +#define FM_MACSEC_SUPPORT +#define FM_CAPWAP_SUPPORT +#endif + +#define FM_LOW_END_RESTRICTION /* prevents the use of TX port 1 with OP port 0 */ + +#define FM_PORT_MAX_NUM_OF_EXT_POOLS 4 /**< Number of external BM pools per Rx port */ +#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 2 /**< Number of Offline parsing port external BM pools per Rx port */ +#define FM_PORT_NUM_OF_CONGESTION_GRPS 32 /**< Total number of congestion groups in QM */ +#define FM_MAX_NUM_OF_SUB_PORTALS 7 + +/* Rams defines */ +#define FM_MURAM_SIZE (64*KILOBYTE) +#define FM_IRAM_SIZE (32*KILOBYTE) + +/* PCD defines */ +#define FM_PCD_PLCR_NUM_ENTRIES 32 /**< Total number of policer profiles */ +#define FM_PCD_KG_NUM_OF_SCHEMES 16 /**< Total number of KG schemes */ +#define FM_PCD_MAX_NUM_OF_CLS_PLANS 128 /**< Number of classification plan entries. */ + +/* RTC defines */ +#define FM_RTC_NUM_OF_ALARMS 2 +#define FM_RTC_NUM_OF_PERIODIC_PULSES 2 +#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 + +/* QMI defines */ +#define QMI_MAX_NUM_OF_TNUMS 15 +#define MAX_QMI_DEQ_SUBPORTAL 7 + +/* FPM defines */ +#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4 + +/* DMA defines */ +#define DMA_THRESH_MAX_COMMQ 15 +#define DMA_THRESH_MAX_BUF 7 + +/* BMI defines */ +#define BMI_MAX_NUM_OF_TASKS 64 +#define BMI_MAX_NUM_OF_DMAS 16 +#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE) +#define PORT_MAX_WEIGHT 4 + +/**************************************************************************//** + @Description Enum for inter-module interrupts registration +*//***************************************************************************/ +typedef enum e_FmEventModules{ + e_FM_MOD_PRS, /**< Parser event */ + e_FM_MOD_KG, /**< Keygen event */ + e_FM_MOD_PLCR, /**< Policer event */ + e_FM_MOD_10G_MAC, /**< 10G MAC error event */ + e_FM_MOD_1G_MAC, /**< 1G MAC error event */ + e_FM_MOD_TMR, /**< Timer event */ + e_FM_MOD_1G_MAC_TMR, /**< 1G MAC Timer event */ + e_FM_MOD_FMAN_CTRL, /**< FMAN Controller Timer event */ + e_FM_MOD_MACSEC, + e_FM_MOD_DUMMY_LAST +} e_FmEventModules; + +/**************************************************************************//** + @Description Enum for interrupts types +*//***************************************************************************/ +typedef enum e_FmIntrType { + e_FM_INTR_TYPE_ERR, + e_FM_INTR_TYPE_NORMAL +} e_FmIntrType; + +/**************************************************************************//** + @Description Enum for inter-module interrupts registration +*//***************************************************************************/ +typedef enum e_FmInterModuleEvent { + e_FM_EV_PRS, /**< Parser event */ + e_FM_EV_ERR_PRS, /**< Parser error event */ + e_FM_EV_KG, /**< Keygen event */ + e_FM_EV_ERR_KG, /**< Keygen error event */ + e_FM_EV_PLCR, /**< Policer event */ + e_FM_EV_ERR_PLCR, /**< Policer error event */ + e_FM_EV_ERR_10G_MAC0, /**< 10G MAC 0 error event */ + e_FM_EV_ERR_1G_MAC0, /**< 1G MAC 0 error event */ + e_FM_EV_ERR_1G_MAC1, /**< 1G MAC 1 error event */ + e_FM_EV_ERR_1G_MAC2, /**< 1G MAC 2 error event */ + e_FM_EV_ERR_1G_MAC3, /**< 1G MAC 3 error event */ + e_FM_EV_ERR_MACSEC_MAC0, /**< MACSEC MAC 0 error event */ + e_FM_EV_TMR, /**< Timer event */ + e_FM_EV_1G_MAC0_TMR, /**< 1G MAC 0 Timer event */ + e_FM_EV_1G_MAC1_TMR, /**< 1G MAC 1 Timer event */ + e_FM_EV_1G_MAC2_TMR, /**< 1G MAC 2 Timer event */ + e_FM_EV_1G_MAC3_TMR, /**< 1G MAC 3 Timer event */ + e_FM_EV_MACSEC_MAC0, /**< MACSEC MAC 0 event */ + e_FM_EV_FMAN_CTRL_0, /**< Fman controller event 0 */ + e_FM_EV_FMAN_CTRL_1, /**< Fman controller event 1 */ + e_FM_EV_FMAN_CTRL_2, /**< Fman controller event 2 */ + e_FM_EV_FMAN_CTRL_3, /**< Fman controller event 3 */ + e_FM_EV_DUMMY_LAST +} e_FmInterModuleEvent; + +#define GET_FM_MODULE_EVENT(mod, id, intrType, event) \ + switch(mod){ \ + case e_FM_MOD_PRS: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_PRS:e_FM_EV_PRS; \ + break; \ + case e_FM_MOD_KG: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_KG:e_FM_EV_DUMMY_LAST; \ + break; \ + case e_FM_MOD_PLCR: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_PLCR:e_FM_EV_PLCR; \ + break; \ + case e_FM_MOD_1G_MAC: \ + switch(id){ \ + case(0): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC0:e_FM_EV_DUMMY_LAST; break; \ + case(1): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC1:e_FM_EV_DUMMY_LAST; break; \ + case(2): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC2:e_FM_EV_DUMMY_LAST; break; \ + case(3): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC3:e_FM_EV_DUMMY_LAST; break; \ + } \ + break; \ + case e_FM_MOD_TMR: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST:e_FM_EV_TMR; \ + break; \ + case e_FM_MOD_1G_MAC_TMR: \ + switch(id){ \ + case(0): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST:e_FM_EV_1G_MAC0_TMR; break; \ + case(1): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST:e_FM_EV_1G_MAC1_TMR; break; \ + case(2): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST:e_FM_EV_1G_MAC2_TMR; break; \ + case(3): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST:e_FM_EV_1G_MAC3_TMR; break; \ + } \ + break; \ + case e_FM_MOD_MACSEC: \ + switch(id){ \ + case(0): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_MACSEC_MAC0:e_FM_EV_MACSEC_MAC0; break; \ + } \ + break; \ + case e_FM_MOD_FMAN_CTRL: \ + if (intrType == e_FM_INTR_TYPE_ERR) event = e_FM_EV_DUMMY_LAST; \ + else switch(id){ \ + case(0): event = e_FM_EV_FMAN_CTRL_0; break; \ + case(1): event = e_FM_EV_FMAN_CTRL_1; break; \ + case(2): event = e_FM_EV_FMAN_CTRL_2; break; \ + case(3): event = e_FM_EV_FMAN_CTRL_3; break; \ + } \ + break; \ + default:event = e_FM_EV_DUMMY_LAST; \ + break;} + +/***************************************************************************** + FM MACSEC INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define NUM_OF_RX_SC 16 +#define NUM_OF_TX_SC 16 + +#define NUM_OF_SA_PER_RX_SC 2 +#define NUM_OF_SA_PER_TX_SC 2 + +/**************************************************************************//** + @Description Enum for inter-module interrupts registration +*//***************************************************************************/ + +typedef enum e_FmMacsecEventModules{ + e_FM_MACSEC_MOD_SC_TX, + e_FM_MACSEC_MOD_DUMMY_LAST +} e_FmMacsecEventModules; + +typedef enum e_FmMacsecInterModuleEvent { + e_FM_MACSEC_EV_SC_TX, + e_FM_MACSEC_EV_ERR_SC_TX, + e_FM_MACSEC_EV_DUMMY_LAST +} e_FmMacsecInterModuleEvent; + +#define NUM_OF_INTER_MODULE_EVENTS (NUM_OF_TX_SC * 2) + +#define GET_MACSEC_MODULE_EVENT(mod, id, intrType, event) \ + switch(mod){ \ + case e_FM_MACSEC_MOD_SC_TX: \ + event = (intrType == e_FM_INTR_TYPE_ERR) ? \ + e_FM_MACSEC_EV_ERR_SC_TX: \ + e_FM_MACSEC_EV_SC_TX; \ + event += (uint8_t)(2 * id);break; \ + break; \ + default:event = e_FM_MACSEC_EV_DUMMY_LAST; \ + break;} + + +/* 1023 unique features */ +#define FM_QMI_NO_ECC_EXCEPTIONS +#define FM_CSI_CFED_LIMIT +#define FM_PEDANTIC_DMA + +/* FM erratas */ +#define FM_NO_RX_PREAM_ERRATA_DTSECx1 +#define FM_RX_PREAM_4_ERRATA_DTSEC_A001 FM_NO_RX_PREAM_ERRATA_DTSECx1 +#define FM_MAGIC_PACKET_UNRECOGNIZED_ERRATA_DTSEC2 /* No implementation, Out of LLD scope */ + +#define FM_IM_TX_SYNC_SKIP_TNUM_ERRATA_FMAN_A001 /* Implemented by ucode */ +#define FM_HC_DEF_FQID_ONLY_ERRATA_FMAN_A003 /* Implemented by ucode */ +#define FM_IM_TX_SHARED_TNUM_ERRATA_FMAN4 /* Implemented by ucode */ +#define FM_IM_GS_DEADLOCK_ERRATA_FMAN5 /* Implemented by ucode */ +#define FM_IM_DEQ_PIPELINE_DEPTH_ERRATA_FMAN10 /* Implemented by ucode */ +#define FM_CC_GEN6_MISSMATCH_ERRATA_FMAN12 /* Implemented by ucode */ +#define FM_CC_CHANGE_SHARED_TNUM_ERRATA_FMAN13 /* Implemented by ucode */ +#define FM_IM_LARGE_MRBLR_ERRATA_FMAN15 /* Implemented by ucode */ + +/* #define FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 */ + +/* ??? */ +#define FM_GRS_ERRATA_DTSEC_A002 +#define FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 +#define FM_GTS_ERRATA_DTSEC_A004 +#define FM_TX_LOCKUP_ERRATA_DTSEC6 + +#define FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 + +#endif /* __FM_INTEGRATION_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/integrations/P1023/part_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/integrations/P1023/part_ext.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + + @File part_ext.h + + @Description Definitions for the part (integration) module. +*//***************************************************************************/ + +#ifndef __PART_EXT_H +#define __PART_EXT_H + +#include "std_ext.h" +#include "part_integration_ext.h" + + +#if !(defined(MPC8306) || \ + defined(MPC8309) || \ + defined(MPC834x) || \ + defined(MPC836x) || \ + defined(MPC832x) || \ + defined(MPC837x) || \ + defined(MPC8568) || \ + defined(MPC8569) || \ + defined(P1020) || \ + defined(P1021) || \ + defined(P1022) || \ + defined(P1023) || \ + defined(P2020) || \ + defined(P3041) || \ + defined(P4080) || \ + defined(P5020) || \ + defined(MSC814x)) +#error "unable to proceed without chip-definition" +#endif + + +/**************************************************************************//* + @Description Part data structure - must be contained in any integration + data structure. +*//***************************************************************************/ +typedef struct t_Part +{ + uint64_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId); + /**< Returns the address of the module's memory map base. */ + e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uint64_t baseAddress); + /**< Returns the module's ID according to its memory map base. */ +} t_Part; + + +#endif /* __PART_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/integrations/P1023/part_integration_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/integrations/P1023/part_integration_ext.h @@ -0,0 +1,659 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + + @File part_integration_ext.h + + @Description P1023 external definitions and structures. +*//***************************************************************************/ +#ifndef __PART_INTEGRATION_EXT_H +#define __PART_INTEGRATION_EXT_H + +#include "std_ext.h" +#ifndef NCSW_LINUX +#include "ddr_std_ext.h" +#endif +#include "dpaa_integration_ext.h" + + +/**************************************************************************//** + @Group 1023_chip_id P1023 Application Programming Interface + + @Description P1023 Chip functions,definitions and enums. + + @{ +*//***************************************************************************/ + +#define INTG_MAX_NUM_OF_CORES 1 + + +/**************************************************************************//** + @Description Module types. +*//***************************************************************************/ +typedef enum e_ModuleId +{ + e_MODULE_ID_LAW, /**< Local Access module */ + e_MODULE_ID_ECM, /**< e500 Coherency Module */ + e_MODULE_ID_DDR, /**< DDR memory controller */ + e_MODULE_ID_I2C_1, /**< I2C 1 */ + e_MODULE_ID_I2C_2, /**< I2C 1 */ + e_MODULE_ID_DUART_1, /**< DUART module 1 */ + e_MODULE_ID_DUART_2, /**< DUART module 2 */ + e_MODULE_ID_LBC, /**< Local bus memory controller module */ + e_MODULE_ID_PCIE_1, /**< PCI Express 1 controller module */ + e_MODULE_ID_PCIE_ATMU_1, /**< PCI 1 ATMU Window */ + e_MODULE_ID_PCIE_2, /**< PCI Express 2 controller module */ + e_MODULE_ID_PCIE_ATMU_2, /**< PCI 2 ATMU Window */ + e_MODULE_ID_PCIE_3, /**< PCI Express 3 controller module */ + e_MODULE_ID_PCIE_ATMU_3, /**< PCI 3 ATMU Window */ + e_MODULE_ID_MSI, /**< MSI registers */ + e_MODULE_ID_L2_SRAM, /**< L2/SRAM Memory-Mapped controller module */ + e_MODULE_ID_DMA_1, /**< DMA controller 1 */ + e_MODULE_ID_DMA_2, /**< DMA controller 2 */ + e_MODULE_ID_EPIC, /**< Programmable interrupt controller */ + e_MODULE_ID_ESPI, /**< ESPI module */ + e_MODULE_ID_GPIO, /**< General Purpose I/O */ + e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */ + e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */ + e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */ + e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */ + e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */ + e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */ + e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */ + e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */ + e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */ + e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */ + e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */ + e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */ + e_MODULE_ID_USB_DR_1, /**< USB 2.0 module 1 */ + e_MODULE_ID_USB_DR_2, /**< USB 2.0 module 2 */ + e_MODULE_ID_ETSEC_MII_MNG, /**< MII MNG registers */ + e_MODULE_ID_ETSEC_1, /**< ETSEC module 1 */ + e_MODULE_ID_ETSEC_2, /**< ETSEC module 2 */ + e_MODULE_ID_GUTS, /**< Serial DMA */ + e_MODULE_ID_PM, /**< Performance Monitor module */ + e_MODULE_ID_QM, /**< Queue manager module */ + e_MODULE_ID_BM, /**< Buffer manager module */ + e_MODULE_ID_QM_CE_PORTAL, + e_MODULE_ID_QM_CI_PORTAL, + e_MODULE_ID_BM_CE_PORTAL, + e_MODULE_ID_BM_CI_PORTAL, + e_MODULE_ID_FM, /**< Frame manager #1 module */ + e_MODULE_ID_FM_RTC, /**< FM Real-Time-Clock */ + e_MODULE_ID_FM_MURAM, /**< FM Multi-User-RAM */ + e_MODULE_ID_FM_BMI, /**< FM BMI block */ + e_MODULE_ID_FM_QMI, /**< FM QMI block */ + e_MODULE_ID_FM_PRS, /**< FM parser block */ + e_MODULE_ID_FM_PORT_HO0, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM_PORT_HO1, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM_PORT_HO2, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM_PORT_HO3, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM_PORT_HO4, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM_PORT_1GRx0, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM_PORT_1GRx1, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM_PORT_1GTx0, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM_PORT_1GTx1, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM_PLCR, /**< FM Policer */ + e_MODULE_ID_FM_KG, /**< FM Keygen */ + e_MODULE_ID_FM_DMA, /**< FM DMA */ + e_MODULE_ID_FM_FPM, /**< FM FPM */ + e_MODULE_ID_FM_IRAM, /**< FM Instruction-RAM */ + e_MODULE_ID_FM_1GMDIO0, /**< FM 1G MDIO MAC 0*/ + e_MODULE_ID_FM_1GMDIO1, /**< FM 1G MDIO MAC 1*/ + e_MODULE_ID_FM_PRS_IRAM, /**< FM SW-parser Instruction-RAM */ + e_MODULE_ID_FM_RISC0, /**< FM risc #0 */ + e_MODULE_ID_FM_RISC1, /**< FM risc #1 */ + e_MODULE_ID_FM_1GMAC0, /**< FM 1G MAC #0 */ + e_MODULE_ID_FM_1GMAC1, /**< FM 1G MAC #1 */ + e_MODULE_ID_FM_MACSEC, /**< FM MACSEC */ + + e_MODULE_ID_DUMMY_LAST +} e_ModuleId; + +#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST + + +#define P1023_OFFSET_LAW 0x00000C08 +#define P1023_OFFSET_ECM 0x00001000 +#define P1023_OFFSET_DDR 0x00002000 +#define P1023_OFFSET_I2C1 0x00003000 +#define P1023_OFFSET_I2C2 0x00003100 +#define P1023_OFFSET_DUART1 0x00004500 +#define P1023_OFFSET_DUART2 0x00004600 +#define P1023_OFFSET_LBC 0x00005000 +#define P1023_OFFSET_ESPI 0x00007000 +#define P1023_OFFSET_PCIE2 0x00009000 +#define P1023_OFFSET_PCIE2_ATMU 0x00009C00 +#define P1023_OFFSET_PCIE1 0x0000A000 +#define P1023_OFFSET_PCIE1_ATMU 0x0000AC00 +#define P1023_OFFSET_PCIE3 0x0000B000 +#define P1023_OFFSET_PCIE3_ATMU 0x0000BC00 +#define P1023_OFFSET_DMA2 0x0000C100 +#define P1023_OFFSET_GPIO 0x0000F000 +#define P1023_OFFSET_L2_SRAM 0x00020000 +#define P1023_OFFSET_DMA1 0x00021100 +#define P1023_OFFSET_USB1 0x00022000 +#define P1023_OFFSET_SEC_GEN 0x00030000 +#define P1023_OFFSET_SEC_JQ0 0x00031000 +#define P1023_OFFSET_SEC_JQ1 0x00032000 +#define P1023_OFFSET_SEC_JQ2 0x00033000 +#define P1023_OFFSET_SEC_JQ3 0x00034000 +#define P1023_OFFSET_SEC_RTIC 0x00036000 +#define P1023_OFFSET_SEC_QI 0x00037000 +#define P1023_OFFSET_SEC_DECO0_CCB0 0x00038000 +#define P1023_OFFSET_SEC_DECO1_CCB1 0x00039000 +#define P1023_OFFSET_SEC_DECO2_CCB2 0x0003a000 +#define P1023_OFFSET_SEC_DECO3_CCB3 0x0003b000 +#define P1023_OFFSET_SEC_DECO4_CCB4 0x0003c000 +#define P1023_OFFSET_PIC 0x00040000 +#define P1023_OFFSET_MSI 0x00041600 +#define P1023_OFFSET_AXI 0x00081000 +#define P1023_OFFSET_QM 0x00088000 +#define P1023_OFFSET_BM 0x0008A000 +#define P1022_OFFSET_PM 0x000E1000 + +#define P1023_OFFSET_GUTIL 0x000E0000 +#define P1023_OFFSET_PM 0x000E1000 +#define P1023_OFFSET_DEBUG 0x000E2000 +#define P1023_OFFSET_SERDES 0x000E3000 +#define P1023_OFFSET_ROM 0x000F0000 +#define P1023_OFFSET_FM 0x00100000 + +#define P1023_OFFSET_FM_MURAM (P1023_OFFSET_FM + 0x00000000) +#define P1023_OFFSET_FM_BMI (P1023_OFFSET_FM + 0x00080000) +#define P1023_OFFSET_FM_QMI (P1023_OFFSET_FM + 0x00080400) +#define P1023_OFFSET_FM_PRS (P1023_OFFSET_FM + 0x00080800) +#define P1023_OFFSET_FM_PORT_HO0 (P1023_OFFSET_FM + 0x00081000) +#define P1023_OFFSET_FM_PORT_HO1 (P1023_OFFSET_FM + 0x00082000) +#define P1023_OFFSET_FM_PORT_HO2 (P1023_OFFSET_FM + 0x00083000) +#define P1023_OFFSET_FM_PORT_HO3 (P1023_OFFSET_FM + 0x00084000) +#define P1023_OFFSET_FM_PORT_HO4 (P1023_OFFSET_FM + 0x00085000) +#define P1023_OFFSET_FM_PORT_1GRX0 (P1023_OFFSET_FM + 0x00088000) +#define P1023_OFFSET_FM_PORT_1GRX1 (P1023_OFFSET_FM + 0x00089000) +#define P1023_OFFSET_FM_PORT_1GTX0 (P1023_OFFSET_FM + 0x000A8000) +#define P1023_OFFSET_FM_PORT_1GTX1 (P1023_OFFSET_FM + 0x000A9000) +#define P1023_OFFSET_FM_PLCR (P1023_OFFSET_FM + 0x000C0000) +#define P1023_OFFSET_FM_KG (P1023_OFFSET_FM + 0x000C1000) +#define P1023_OFFSET_FM_DMA (P1023_OFFSET_FM + 0x000C2000) +#define P1023_OFFSET_FM_FPM (P1023_OFFSET_FM + 0x000C3000) +#define P1023_OFFSET_FM_IRAM (P1023_OFFSET_FM + 0x000C4000) +#define P1023_OFFSET_FM_PRS_IRAM (P1023_OFFSET_FM + 0x000C7000) +#define P1023_OFFSET_FM_RISC0 (P1023_OFFSET_FM + 0x000D0000) +#define P1023_OFFSET_FM_RISC1 (P1023_OFFSET_FM + 0x000D0400) +#define P1023_OFFSET_FM_MACSEC (P1023_OFFSET_FM + 0x000D8000) +#define P1023_OFFSET_FM_1GMAC0 (P1023_OFFSET_FM + 0x000E0000) +#define P1023_OFFSET_FM_1GMDIO0 (P1023_OFFSET_FM + 0x000E1120) +#define P1023_OFFSET_FM_1GMAC1 (P1023_OFFSET_FM + 0x000E2000) +#define P1023_OFFSET_FM_1GMDIO1 (P1023_OFFSET_FM + 0x000E3000) +#define P1023_OFFSET_FM_RTC (P1023_OFFSET_FM + 0x000FE000) + +/* Offsets relative to QM or BM portals base */ +#define P1023_OFFSET_PORTALS_CE_AREA 0x00000000 /* cache enabled area */ +#define P1023_OFFSET_PORTALS_CI_AREA 0x00100000 /* cache inhibited area */ + +#define P1023_OFFSET_PORTALS_CE(portal) (P1023_OFFSET_PORTALS_CE_AREA + 0x4000 * (portal)) +#define P1023_OFFSET_PORTALS_CI(portal) (P1023_OFFSET_PORTALS_CI_AREA + 0x1000 * (portal)) + +/**************************************************************************//** + @Description Transaction source ID (for memory controllers error reporting). +*//***************************************************************************/ +typedef enum e_TransSrc +{ + e_TRANS_SRC_PCIE_2 = 0x01, /**< PCIe port 2 */ + e_TRANS_SRC_PCIE_1 = 0x02, /**< PCIe port 1 */ + e_TRANS_SRC_PCIE_3 = 0x03, /**< PCIe port 3 */ + e_TRANS_SRC_LBC = 0x04, /**< Enhanced local bus */ + e_TRANS_SRC_DPAA_SW_PORTALS = 0x0E, /**< DPAA software portals or SRAM */ + e_TRANS_SRC_DDR = 0x0F, /**< DDR controller */ + e_TRANS_SRC_CORE_INS_FETCH = 0x10, /**< Processor (instruction) */ + e_TRANS_SRC_CORE_DATA = 0x11, /**< Processor (data) */ + e_TRANS_SRC_DMA = 0x15 /**< DMA */ +} e_TransSrc; + +/**************************************************************************//** + @Description Local Access Window Target interface ID +*//***************************************************************************/ +typedef enum e_P1023LawTargetId +{ + e_P1023_LAW_TARGET_PCIE_2 = 0x01, /**< PCI Express 2 target interface */ + e_P1023_LAW_TARGET_PCIE_1 = 0x02, /**< PCI Express 1 target interface */ + e_P1023_LAW_TARGET_PCIE_3 = 0x03, /**< PCI Express 3 target interface */ + e_P1023_LAW_TARGET_LBC = 0x04, /**< Local bus target interface */ + e_P1023_LAW_TARGET_QM_PORTALS = 0x0E, /**< Queue Manager Portals */ + e_P1023_LAW_TARGET_BM_PORTALS = 0x0E, /**< Buffer Manager Portals */ + e_P1023_LAW_TARGET_SRAM = 0x0E, /**< SRAM scratchpad */ + e_P1023_LAW_TARGET_DDR = 0x0F, /**< DDR target interface */ + e_P1023_LAW_TARGET_NONE = 0xFF /**< Invalid target interface */ +} e_P1023LawTargetId; + + +/**************************************************************************//** + @Group 1023_init_grp P1023 Initialization Unit + + @Description P1023 initialization unit API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Description Part ID and revision number +*//***************************************************************************/ +typedef enum e_P1023DeviceName +{ + e_P1023_REV_INVALID = 0x00000000, /**< Invalid revision */ + e_SC1023_REV_1_0 = (int)0x80FC0010, /**< SC1023 rev 1.0 */ + e_SC1023_REV_1_1 = (int)0x80FC0011, /**< SC1023 rev 1.1 */ + e_P1023_REV_1_0 = (int)0x80FE0010, /**< P1023 rev 1.0 with security */ + e_P1023_REV_1_1 = (int)0x80FE0011, /**< P1023 rev 1.1 with security */ + e_P1023_REV_1_0_NO_SEC = (int)0x80F60010, /**< P1023 rev 1.0 without security */ + e_P1023_REV_1_1_NO_SEC = (int)0x80F60011 /**< P1023 rev 1.1 without security */ +} e_P1023DeviceName; + +/**************************************************************************//** + @Description structure representing P1023 initialization parameters +*//***************************************************************************/ +typedef struct t_P1023Params +{ + uintptr_t ccsrBaseAddress; /**< CCSR base address (virtual) */ + uintptr_t bmPortalsBaseAddress; /**< Portals base address (virtual) */ + uintptr_t qmPortalsBaseAddress; /**< Portals base address (virtual) */ +} t_P1023Params; + +/**************************************************************************//** + @Function P1023_ConfigAndInit + + @Description General initiation of the chip registers. + + @Param[in] p_P1023Params - A pointer to data structure of parameters + + @Return A handle to the P1023 data structure. +*//***************************************************************************/ +t_Handle P1023_ConfigAndInit(t_P1023Params *p_P1023Params); + +/**************************************************************************//** + @Function P1023_Free + + @Description Free all resources. + + @Param h_P1023 - (In) The handle of the initialized P1023 object. + + @Return E_OK on success; Other value otherwise. +*//***************************************************************************/ +t_Error P1023_Free(t_Handle h_P1023); + +/**************************************************************************//** + @Function P1023_GetRevInfo + + @Description This routine enables access to chip and revision information. + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + + @Return Part ID and revision. +*//***************************************************************************/ +e_P1023DeviceName P1023_GetRevInfo(uintptr_t gutilBase); + +/**************************************************************************//** + @Function P1023_GetE500Factor + + @Description Returns E500 core clock multiplication factor. + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + @Param[in] coreId - Id of the requested core. + @Param[out] p_E500MulFactor - Returns E500 to CCB multification factor. + @Param[out] p_E500DivFactor - Returns E500 to CCB division factor. + + @Return E_OK on success; Other value otherwise. +* +*//***************************************************************************/ +t_Error P1023_GetE500Factor(uintptr_t gutilBase, + uint32_t coreId, + uint32_t *p_E500MulFactor, + uint32_t *p_E500DivFactor); + +/**************************************************************************//** + @Function P1023_GetFmFactor + + @Description returns FM multiplication factors. (This value is returned using + two parameters to avoid using float parameter). + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + @Param[out] p_FmMulFactor - returns E500 to CCB multification factor. + @Param[out] p_FmDivFactor - returns E500 to CCB division factor. + + @Return E_OK on success; Other value otherwise. +*//***************************************************************************/ +t_Error P1023_GetFmFactor(uintptr_t gutilBase, uint32_t *p_FmMulFactor, uint32_t *p_FmDivFactor); + +/**************************************************************************//** + @Function P1023_GetCcbFactor + + @Description returns system multiplication factor. + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + + @Return System multiplication factor. +*//***************************************************************************/ +uint32_t P1023_GetCcbFactor(uintptr_t gutilBase); + +/**************************************************************************//** + @Function P1023_GetDdrFactor + + @Description returns the multiplication factor of the clock in for the DDR clock . + Note: assumes the ddr_in_clk is identical to the sys_in_clk + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + @Param p_DdrMulFactor - returns DDR in clk multification factor. + @Param p_DdrDivFactor - returns DDR division factor. + + @Return E_OK on success; Other value otherwise.. +*//***************************************************************************/ +t_Error P1023_GetDdrFactor( uintptr_t gutilBase, + uint32_t *p_DdrMulFactor, + uint32_t *p_DdrDivFactor); + + +/**************************************************************************//** + @Function P1023_GetDdrType + + @Description returns the multiplication factor of the clock in for the DDR clock . + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + @Param p_DdrType - (Out) returns DDR type DDR1/DDR2/DDR3. + + @Return E_OK on success; Other value otherwise. +*//***************************************************************************/ +#ifndef NCSW_LINUX +t_Error P1023_GetDdrType(uintptr_t gutilBase, e_DdrType *p_DdrType ); +#endif + + +/** @} */ /* end of 1023_init_grp group */ +/** @} */ /* end of 1023_grp group */ + +#define CORE_E500V2 + +/***************************************************************************** + INTEGRATION-SPECIFIC MODULE CODES +******************************************************************************/ +#define MODULE_UNKNOWN 0x00000000 +#define MODULE_MEM 0x00010000 +#define MODULE_MM 0x00020000 +#define MODULE_CORE 0x00030000 +#define MODULE_P1023 0x00040000 +#define MODULE_MII 0x00050000 +#define MODULE_PM 0x00060000 +#define MODULE_MMU 0x00070000 +#define MODULE_PIC 0x00080000 +#define MODULE_L2_CACHE 0x00090000 +#define MODULE_DUART 0x000a0000 +#define MODULE_SERDES 0x000b0000 +#define MODULE_PIO 0x000c0000 +#define MODULE_QM 0x000d0000 +#define MODULE_BM 0x000e0000 +#define MODULE_SEC 0x000f0000 +#define MODULE_FM 0x00100000 +#define MODULE_FM_MURAM 0x00110000 +#define MODULE_FM_PCD 0x00120000 +#define MODULE_FM_RTC 0x00130000 +#define MODULE_FM_MAC 0x00140000 +#define MODULE_FM_PORT 0x00150000 +#define MODULE_FM_MACSEC 0x00160000 +#define MODULE_FM_MACSEC_SECY 0x00170000 +#define MODULE_ECM 0x00180000 +#define MODULE_DMA 0x00190000 +#define MODULE_DDR 0x001a0000 +#define MODULE_LAW 0x001b0000 +#define MODULE_LBC 0x001c0000 +#define MODULE_I2C 0x001d0000 +#define MODULE_ESPI 0x001e0000 +#define MODULE_PCI 0x001f0000 +#define MODULE_DPA 0x00200000 +#define MODULE_USB 0x00210000 + +/***************************************************************************** + LBC INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +/**************************************************************************//** + @Group lbc_exception_grp LBC Exception Unit + + @Description LBC Exception unit API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Anchor lbc_exbm + + @Collection LBC Errors Bit Mask + + These errors are reported through the exceptions callback.. + The values can be or'ed in any combination in the errors mask + parameter of the errors report structure. + + These errors can also be passed as a bit-mask to + LBC_EnableErrorChecking() or LBC_DisableErrorChecking(), + for enabling or disabling error checking. + @{ +*//***************************************************************************/ +#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */ +#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */ +#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */ +#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */ + +#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \ + LBC_ERR_WRITE_PROTECT | LBC_ERR_CHIP_SELECT) + /**< All possible errors */ +/* @} */ +/** @} */ /* end of lbc_exception_grp group */ + +#define LBC_NUM_OF_BANKS 2 +#define LBC_MAX_CS_SIZE 0x0000000100000000LL +#define LBC_ATOMIC_OPERATION_SUPPORT +#define LBC_PARITY_SUPPORT +#define LBC_ADDRESS_SHIFT_SUPPORT +#define LBC_ADDRESS_HOLD_TIME_CTRL +#define LBC_HIGH_CLK_DIVIDERS +#define LBC_FCM_AVAILABLE + + +/***************************************************************************** + LAW INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define LAW_ARCH_CCB +#define LAW_NUM_OF_WINDOWS 12 +#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4KB */ +#define LAW_MAX_WINDOW_SIZE 0x0000001000000000LL /**< 32GB */ + + +/***************************************************************************** + SPI INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define SPI_NUM_OF_CONTROLLERS 1 + +/***************************************************************************** + PCI/PCIe INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ + +#define PCI_MAX_INBOUND_WINDOWS_NUM 4 +#define PCI_MAX_OUTBOUND_WINDOWS_NUM 5 + +/**************************************************************************//** + @Description Target interface of an inbound window +*//***************************************************************************/ +typedef enum e_PciTargetInterface +{ + e_PCI_TARGET_PCIE_2 = 0x1, /**< PCI Express target interface 2 */ + e_PCI_TARGET_PCIE_1 = 0x2, /**< PCI Express target interface 1 */ + e_PCI_TARGET_PCIE_3 = 0x3, /**< PCI Express target interface 3 */ + e_PCI_TARGET_LOCAL_MEMORY = 0xF /**< Local Memory (DDR SDRAM, Local Bus, SRAM) target interface */ + +} e_PciTargetInterface; + +/***************************************************************************** + DDR INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define DDR_NUM_OF_VALID_CS 2 + +/***************************************************************************** + SEC INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define SEC_ERRATA_STAT_REGS_UNUSABLE + +/***************************************************************************** + DMA INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define DMA_NUM_OF_CONTROLLERS 1 + + + + +/***************************************************************************** + 1588 INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define PTP_V2 + +/**************************************************************************//** + @Function P1023_GetMuxControlReg + + @Description Returns the value of PMUXCR (Alternate Function Signal Multiplex + Control Register) + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + + @Return Value of PMUXCR +*//***************************************************************************/ +uint32_t P1023_GetMuxControlReg(uintptr_t gutilBase); + +/**************************************************************************//** + @Function P1023_SetMuxControlReg + + @Description Sets the value of PMUXCR (Alternate Function Signal Multiplex + Control Register) + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + @Param[in] val - the new value for PMUXCR. + + @Return None +*//***************************************************************************/ +void P1023_SetMuxControlReg(uintptr_t gutilBase, uint32_t val); + +/**************************************************************************//** + @Function P1023_GetPowerMngClkDisableReg + + @Description Returns the value of PMCDR (Power Management Clock Disable Register) + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + + @Return Value of PMCDR +*//***************************************************************************/ +uint32_t P1023_GetPowerMngClkDisableReg(uintptr_t gutilBase); + +/**************************************************************************//** + @Function P1023_SetPowerMngClkDisableReg + + @Description Sets the value of PMCDR ((Power Management Clock Disable Register)) + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + @Param[in] val - the new value for PMCDR. + + @Return None +*//***************************************************************************/ +void P1023_SetPowerMngClkDisableReg(uintptr_t gutilBase, uint32_t val); + +/**************************************************************************//** + @Function P1023_GetDeviceDisableStatusRegister + + @Description Returns the value of DEVDISR (Device Disable Register) + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + + @Return Value of DEVDISR +*//***************************************************************************/ +uint32_t P1023_GetDeviceDisableStatusRegister(uintptr_t gutilBase); + +/**************************************************************************//** + @Function P1023_GetPorDeviceStatusRegister + + @Description Returns the value of POR Device Status Register + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + + @Return POR Device Status Register +*//***************************************************************************/ +uint32_t P1023_GetPorDeviceStatusRegister(uintptr_t gutilBase); + +/**************************************************************************//** + @Function P1023_GetPorBootModeStatusRegister + + @Description Returns the value of POR Boot Mode Status Register + + @Param[in] gutilBase - Base address of P1023 GUTIL registers. + + @Return POR Boot Mode Status Register value +*//***************************************************************************/ +uint32_t P1023_GetPorBootModeStatusRegister(uintptr_t gutilBase); + + +#define PORDEVSR_SGMII1_DIS 0x10000000 +#define PORDEVSR_SGMII2_DIS 0x08000000 +#define PORDEVSR_ECP1 0x02000000 +#define PORDEVSR_IO_SEL 0x00780000 +#define PORDEVSR_IO_SEL_SHIFT 19 +#define PORBMSR_HA 0x00070000 +#define PORBMSR_HA_SHIFT 16 + +#define DEVDISR_QM_BM 0x80000000 +#define DEVDISR_FM 0x40000000 +#define DEVDISR_PCIE1 0x20000000 +#define DEVDISR_MAC_SEC 0x10000000 +#define DEVDISR_ELBC 0x08000000 +#define DEVDISR_PCIE2 0x04000000 +#define DEVDISR_PCIE3 0x02000000 +#define DEVDISR_CAAM 0x01000000 +#define DEVDISR_USB0 0x00800000 +#define DEVDISR_1588 0x00020000 +#define DEVDISR_CORE0 0x00008000 +#define DEVDISR_TB0 0x00004000 +#define DEVDISR_CORE1 0x00002000 +#define DEVDISR_TB1 0x00001000 +#define DEVDISR_DMA1 0x00000400 +#define DEVDISR_DMA2 0x00000200 +#define DEVDISR_DDR 0x00000010 +#define DEVDISR_TSEC1 0x00000080 +#define DEVDISR_TSEC2 0x00000040 +#define DEVDISR_SPI 0x00000008 +#define DEVDISR_I2C 0x00000004 +#define DEVDISR_DUART 0x00000002 + + +#endif /* __PART_INTEGRATION_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/integrations/P3040_P4080_P5020/dpaa_integration_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/integrations/P3040_P4080_P5020/dpaa_integration_ext.h @@ -0,0 +1,452 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + + @File dpaa_integration_ext.h + + @Description P4080 FM external definitions and structures. +*//***************************************************************************/ +#ifndef __DPAA_INTEGRATION_EXT_H +#define __DPAA_INTEGRATION_EXT_H + +#include "std_ext.h" + + +typedef enum { + e_DPAA_SWPORTAL0 = 0, + e_DPAA_SWPORTAL1, + e_DPAA_SWPORTAL2, + e_DPAA_SWPORTAL3, + e_DPAA_SWPORTAL4, + e_DPAA_SWPORTAL5, + e_DPAA_SWPORTAL6, + e_DPAA_SWPORTAL7, + e_DPAA_SWPORTAL8, + e_DPAA_SWPORTAL9, + e_DPAA_SWPORTAL_DUMMY_LAST +} e_DpaaSwPortal; + +typedef enum { + e_DPAA_DCPORTAL0 = 0, + e_DPAA_DCPORTAL1, + e_DPAA_DCPORTAL2, + e_DPAA_DCPORTAL3, + e_DPAA_DCPORTAL4, + e_DPAA_DCPORTAL_DUMMY_LAST +} e_DpaaDcPortal; + +#define DPAA_MAX_NUM_OF_SW_PORTALS e_DPAA_SWPORTAL_DUMMY_LAST +#define DPAA_MAX_NUM_OF_DC_PORTALS e_DPAA_DCPORTAL_DUMMY_LAST + +/***************************************************************************** + QMan INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define QMAN_PM_DCP_COUNTERS_ERRATA_QMAN1 +#define QMAN_FQD_AVOID_BLK_ERRATA_QMAN2 +#define QMAN_DBG_TRC_EV_ERRATA_QMAN3 +#define QMAN_WQ_CS_CFG_ERRATA_QMAN4 +#define QMAN_SFDR_LEAK_ERRATA_QMAN5 +#define QMAN_FQ_TD_THRESH_ERRATA_QMAN6 +#define QMAN_FQ_INIT_ON_PARKED_ERRATA_QMAN7 +#define QMAN_NESN_ORR_ERRATA_QMAN8 +#define QMAN_ERN_REJ_CODE6_ERRATA_QMAN9 +#define QMAN_ERN_MOULTI_CORE_ERRATA_QMAN10 +#define QMAN_PERFMON_FOR_DCP_FQD_ERRATA_QMAN11 + +#define QM_MAX_NUM_OF_POOL_CHANNELS 15 +#define QM_MAX_NUM_OF_WQ 8 +#define QM_MAX_NUM_OF_SWP_AS 4 +#define QM_MAX_NUM_OF_CGS 256 +#define QM_MAX_NUM_OF_FQIDS (16*MEGABYTE) + +/**************************************************************************//** + @Description Work Queue Channel assignments in QMan. +*//***************************************************************************/ +typedef enum +{ + e_QM_FQ_CHANNEL_SWPORTAL0 = 0, /**< Dedicated channels serviced by software portals 0 to 9 */ + e_QM_FQ_CHANNEL_SWPORTAL1, + e_QM_FQ_CHANNEL_SWPORTAL2, + e_QM_FQ_CHANNEL_SWPORTAL3, + e_QM_FQ_CHANNEL_SWPORTAL4, + e_QM_FQ_CHANNEL_SWPORTAL5, + e_QM_FQ_CHANNEL_SWPORTAL6, + e_QM_FQ_CHANNEL_SWPORTAL7, + e_QM_FQ_CHANNEL_SWPORTAL8, + e_QM_FQ_CHANNEL_SWPORTAL9, + + e_QM_FQ_CHANNEL_POOL1 = 0x21, /**< Pool channels that can be serviced by any of the software portals */ + e_QM_FQ_CHANNEL_POOL2, + e_QM_FQ_CHANNEL_POOL3, + e_QM_FQ_CHANNEL_POOL4, + e_QM_FQ_CHANNEL_POOL5, + e_QM_FQ_CHANNEL_POOL6, + e_QM_FQ_CHANNEL_POOL7, + e_QM_FQ_CHANNEL_POOL8, + e_QM_FQ_CHANNEL_POOL9, + e_QM_FQ_CHANNEL_POOL10, + e_QM_FQ_CHANNEL_POOL11, + e_QM_FQ_CHANNEL_POOL12, + e_QM_FQ_CHANNEL_POOL13, + e_QM_FQ_CHANNEL_POOL14, + e_QM_FQ_CHANNEL_POOL15, + + e_QM_FQ_CHANNEL_FMAN0_SP0 = 0x40, /**< Dedicated channels serviced by Direct Connect Portal 0: + connected to FMan 0; assigned in incrementing order to + each sub-portal (SP) in the portal */ + e_QM_FQ_CHANNEL_FMAN0_SP1, + e_QM_FQ_CHANNEL_FMAN0_SP2, + e_QM_FQ_CHANNEL_FMAN0_SP3, + e_QM_FQ_CHANNEL_FMAN0_SP4, + e_QM_FQ_CHANNEL_FMAN0_SP5, + e_QM_FQ_CHANNEL_FMAN0_SP6, + e_QM_FQ_CHANNEL_FMAN0_SP7, + e_QM_FQ_CHANNEL_FMAN0_SP8, + e_QM_FQ_CHANNEL_FMAN0_SP9, + e_QM_FQ_CHANNEL_FMAN0_SP10, + e_QM_FQ_CHANNEL_FMAN0_SP11, + + e_QM_FQ_CHANNEL_FMAN1_SP0 = 0x60, + e_QM_FQ_CHANNEL_FMAN1_SP1, + e_QM_FQ_CHANNEL_FMAN1_SP2, + e_QM_FQ_CHANNEL_FMAN1_SP3, + e_QM_FQ_CHANNEL_FMAN1_SP4, + e_QM_FQ_CHANNEL_FMAN1_SP5, + e_QM_FQ_CHANNEL_FMAN1_SP6, + e_QM_FQ_CHANNEL_FMAN1_SP7, + e_QM_FQ_CHANNEL_FMAN1_SP8, + e_QM_FQ_CHANNEL_FMAN1_SP9, + e_QM_FQ_CHANNEL_FMAN1_SP10, + e_QM_FQ_CHANNEL_FMAN1_SP11, + + e_QM_FQ_CHANNEL_CAAM = 0x80, /**< Dedicated channel serviced by Direct Connect Portal 2: + connected to SEC 4.x */ + + e_QM_FQ_CHANNEL_PME = 0xA0, /**< Dedicated channel serviced by Direct Connect Portal 3: + connected to PME */ +} e_QmFQChannel; + +/* p4080-rev1 unique features */ +#define QM_CGS_NO_FRAME_MODE + +/***************************************************************************** + BMan INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define BM_MAX_NUM_OF_POOLS 64 + +/***************************************************************************** + SEC INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +/* SEC erratas */ +#ifdef UNDER_CONSTRUCTION_IPSEC +#define SEC_IPV6_UDP_CHECKSUM_UPDATE +#define SEC_UDP_LENGTH_UPDATE +#endif /* UNDER_CONSTRUCTION_IPSEC */ + +/***************************************************************************** + FM INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define INTG_MAX_NUM_OF_FM 2 + +/* Ports defines */ +#define FM_MAX_NUM_OF_1G_RX_PORTS 5 +#define FM_MAX_NUM_OF_10G_RX_PORTS 1 +#define FM_MAX_NUM_OF_RX_PORTS (FM_MAX_NUM_OF_10G_RX_PORTS+FM_MAX_NUM_OF_1G_RX_PORTS) +#define FM_MAX_NUM_OF_1G_TX_PORTS 5 +#define FM_MAX_NUM_OF_10G_TX_PORTS 1 +#define FM_MAX_NUM_OF_TX_PORTS (FM_MAX_NUM_OF_10G_TX_PORTS+FM_MAX_NUM_OF_1G_TX_PORTS) +#define FM_MAX_NUM_OF_OH_PORTS 7 +#define FM_MAX_NUM_OF_1G_MACS (FM_MAX_NUM_OF_1G_RX_PORTS) +#define FM_MAX_NUM_OF_10G_MACS (FM_MAX_NUM_OF_10G_RX_PORTS) +#define FM_MAX_NUM_OF_MACS (FM_MAX_NUM_OF_1G_MACS+FM_MAX_NUM_OF_10G_MACS) + + +#define FM_PORT_MAX_NUM_OF_EXT_POOLS 8 /**< Number of external BM pools per Rx port */ +#define FM_PORT_NUM_OF_CONGESTION_GRPS 256 /**< Total number of congestion groups in QM */ +#define FM_MAX_NUM_OF_SUB_PORTALS 12 +#define FM_PORT_MAX_NUM_OF_OBSERVED_EXT_POOLS 0 + +/* RAMs defines */ +#define FM_MURAM_SIZE (160 * KILOBYTE) +#define FM_IRAM_SIZE ( 64 * KILOBYTE) + +/* PCD defines */ +#define FM_PCD_PLCR_NUM_ENTRIES 256 /**< Total number of policer profiles */ +#define FM_PCD_KG_NUM_OF_SCHEMES 32 /**< Total number of KG schemes */ +#define FM_PCD_MAX_NUM_OF_CLS_PLANS 256 /**< Number of classification plan entries. */ + +/* RTC defines */ +#define FM_RTC_NUM_OF_ALARMS 2 /**< RTC number of alarms */ +#define FM_RTC_NUM_OF_PERIODIC_PULSES 2 /**< RTC number of periodic pulses */ +#define FM_RTC_NUM_OF_EXT_TRIGGERS 2 /**< RTC number of external triggers */ + +/* QMI defines */ +#define QMI_MAX_NUM_OF_TNUMS 64 +#define MAX_QMI_DEQ_SUBPORTAL 12 +#define QMI_DEF_TNUMS_THRESH 48 + +/* FPM defines */ +#define FM_NUM_OF_FMAN_CTRL_EVENT_REGS 4 + +/* DMA defines */ +#define DMA_THRESH_MAX_COMMQ 31 +#define DMA_THRESH_MAX_BUF 127 + +/* BMI defines */ +#define BMI_MAX_NUM_OF_TASKS 128 +#define BMI_MAX_NUM_OF_DMAS 32 +#define BMI_MAX_FIFO_SIZE (FM_MURAM_SIZE) +#define PORT_MAX_WEIGHT 16 + +#ifdef UNDER_CONSTRUCTION_FRAG_REASSEMBLY +/* Reassembly defines */ +#define FM_MAX_NUM_OF_REASSEMBLY_PORTS 4 +#endif /* UNDER_CONSTRUCTION_FRAG_REASSEMBLY */ + + +/**************************************************************************//** + @Description Enum for inter-module interrupts registration +*//***************************************************************************/ +typedef enum e_FmEventModules{ + e_FM_MOD_PRS, /**< Parser event */ + e_FM_MOD_KG, /**< Keygen event */ + e_FM_MOD_PLCR, /**< Policer event */ + e_FM_MOD_10G_MAC, /**< 10G MAC error event */ + e_FM_MOD_1G_MAC, /**< 1G MAC error event */ + e_FM_MOD_TMR, /**< Timer event */ + e_FM_MOD_1G_MAC_TMR, /**< 1G MAC timer event */ + e_FM_MOD_FMAN_CTRL, /**< FMAN Controller timer event */ + e_FM_MOD_DUMMY_LAST +} e_FmEventModules; + +/**************************************************************************//** + @Description Enum for interrupts types +*//***************************************************************************/ +typedef enum e_FmIntrType { + e_FM_INTR_TYPE_ERR, + e_FM_INTR_TYPE_NORMAL +} e_FmIntrType; + +/**************************************************************************//** + @Description Enum for inter-module interrupts registration +*//***************************************************************************/ +typedef enum e_FmInterModuleEvent { + e_FM_EV_PRS, /**< Parser event */ + e_FM_EV_ERR_PRS, /**< Parser error event */ + e_FM_EV_KG, /**< Keygen event */ + e_FM_EV_ERR_KG, /**< Keygen error event */ + e_FM_EV_PLCR, /**< Policer event */ + e_FM_EV_ERR_PLCR, /**< Policer error event */ + e_FM_EV_ERR_10G_MAC0, /**< 10G MAC 0 error event */ + e_FM_EV_ERR_1G_MAC0, /**< 1G MAC 0 error event */ + e_FM_EV_ERR_1G_MAC1, /**< 1G MAC 1 error event */ + e_FM_EV_ERR_1G_MAC2, /**< 1G MAC 2 error event */ + e_FM_EV_ERR_1G_MAC3, /**< 1G MAC 3 error event */ + e_FM_EV_ERR_1G_MAC4, /**< 1G MAC 4 error event */ + e_FM_EV_TMR, /**< Timer event */ + e_FM_EV_1G_MAC1, /**< 1G MAC 1 event */ + e_FM_EV_1G_MAC2, /**< 1G MAC 2 event */ + e_FM_EV_1G_MAC3, /**< 1G MAC 3 event */ + e_FM_EV_1G_MAC4, /**< 1G MAC 3 event */ + e_FM_EV_1G_MAC0_TMR, /**< 1G MAC 0 Timer event */ + e_FM_EV_1G_MAC1_TMR, /**< 1G MAC 1 Timer event */ + e_FM_EV_1G_MAC2_TMR, /**< 1G MAC 2 Timer event */ + e_FM_EV_1G_MAC3_TMR, /**< 1G MAC 3 Timer event */ + e_FM_EV_1G_MAC4_TMR, /**< 1G MAC 4 Timer event */ + e_FM_EV_FMAN_CTRL_0, /**< Fman controller event 0 */ + e_FM_EV_FMAN_CTRL_1, /**< Fman controller event 1 */ + e_FM_EV_FMAN_CTRL_2, /**< Fman controller event 2 */ + e_FM_EV_FMAN_CTRL_3, /**< Fman controller event 3 */ + e_FM_EV_DUMMY_LAST +} e_FmInterModuleEvent; + +#define GET_FM_MODULE_EVENT(mod, id, intrType, event) \ + switch(mod){ \ + case e_FM_MOD_PRS: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_PRS : e_FM_EV_PRS; \ + break; \ + case e_FM_MOD_KG: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_KG : e_FM_EV_DUMMY_LAST; \ + break; \ + case e_FM_MOD_PLCR: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_PLCR : e_FM_EV_PLCR; \ + break; \ + case e_FM_MOD_10G_MAC: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_10G_MAC0 : e_FM_EV_DUMMY_LAST;\ + break; \ + case e_FM_MOD_1G_MAC: \ + switch(id){ \ + case(0): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC0 : e_FM_EV_DUMMY_LAST; break; \ + case(1): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC1 : e_FM_EV_DUMMY_LAST; break; \ + case(2): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC2 : e_FM_EV_DUMMY_LAST; break; \ + case(3): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC3 : e_FM_EV_DUMMY_LAST; break; \ + case(4): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_ERR_1G_MAC4 : e_FM_EV_DUMMY_LAST; break; \ + } \ + break; \ + case e_FM_MOD_TMR: \ + if (id) event = e_FM_EV_DUMMY_LAST; \ + else event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST : e_FM_EV_TMR; \ + break; \ + case e_FM_MOD_1G_MAC_TMR: \ + switch(id){ \ + case(0): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST : e_FM_EV_1G_MAC0_TMR; break;\ + case(1): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST : e_FM_EV_1G_MAC1_TMR; break;\ + case(2): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST : e_FM_EV_1G_MAC2_TMR; break;\ + case(3): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST : e_FM_EV_1G_MAC3_TMR; break;\ + case(4): event = (intrType == e_FM_INTR_TYPE_ERR) ? e_FM_EV_DUMMY_LAST : e_FM_EV_1G_MAC4_TMR; break;\ + } \ + break; \ + case e_FM_MOD_FMAN_CTRL: \ + if (intrType == e_FM_INTR_TYPE_ERR) event = e_FM_EV_DUMMY_LAST; \ + else switch(id){ \ + case(0): event = e_FM_EV_FMAN_CTRL_0; break; \ + case(1): event = e_FM_EV_FMAN_CTRL_1; break; \ + case(2): event = e_FM_EV_FMAN_CTRL_2; break; \ + case(3): event = e_FM_EV_FMAN_CTRL_3; break; \ + } \ + break; \ + default: event = e_FM_EV_DUMMY_LAST; \ + break;} + +#define FM_CHECK_PORT_RESTRICTIONS(__validPorts, __newPortIndx) TRUE + +/* p4080-rev1 unique features */ +#define FM_PARTITION_ARRAY +#define FM_PPPOE_NO_MTU_CHECK + +/* p4080 unique features */ +#ifdef UNDER_CONSTRUCTION_IPSEC +#define FM_ETH_TYPE_FIX +#define FM_DISABLE_SEC_ERRORS +#endif /* UNDER_CONSTRUCTION_IPSEC */ +#define FM_QMI_DEQ_OPTIONS_SUPPORT +#define FM_NO_DISPATCH_RAM_ECC +#define FM_FIFO_ALLOCATION_OLD_ALG +#define FM_NO_WATCHDOG +#define FM_NO_TNUM_AGING +#define FM_NO_TGEC_LOOPBACK +#define FM_KG_NO_BYPASS_FQID_GEN +#define FM_KG_NO_BYPASS_PLCR_PROFILE_GEN +#define FM_NO_BACKUP_POOLS +#define FM_NO_OP_OBSERVED_POOLS +#define FM_NO_ADVANCED_RATE_LIMITER +#define FM_NO_OP_OBSERVED_CGS + +/* FM erratas */ +#define FM_SINGLE_MDIO_ERRATA_GEN8 /* implemented in platform */ +#define FM_HALT_SIG_ERRATA_GEN12 + +#define FM_10G_MDIO_HOLD_ERRATA_XAUI3 /* implemented in platform */ +#define FM_10G_PCS_ALIGNMENT_ERRATA_XAUI4 /* implemented in platform */ + +#define FM_IEEE_BAD_TS_ERRATA_IEEE1588_A001 /* No implementation, Out of LLD scope */ + +#define FM_FALSE_RDRP_ERRATA_10GMAC_A001 /* No implementation, Out of LLD scope */ +#define FM_RX_EXTRA_BYTES_ERRATA_10GMAC_A002 /* No implementation, Out of LLD scope */ +#define FM_TX_PAUSE_ON_ENABLE_ERRATA_10GMAC_A003 /* No implementation, Out of LLD scope */ +#define FM_TX_ECC_FRMS_ERRATA_10GMAC_A004 +#define FM_TX_SHORT_FRAME_BAD_TS_ERRATA_10GMAC_A006 /* No implementation, Out of LLD scope */ +#define FM_TX_FIFO_CORRUPTION_ERRATA_10GMAC_A007 +#define FM_ECC_HALT_NO_SYNC_ERRATA_10GMAC_A008 + +#define FM_TX_INVALID_ECC_ERRATA_10GMAC_A009 + +#define FM_NO_RX_PREAM_ERRATA_DTSECx1 +#define FM_RX_PREAM_4_ERRATA_DTSEC_A001 FM_NO_RX_PREAM_ERRATA_DTSECx1 +#define FM_GRS_ERRATA_DTSEC_A002 +#define FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 +#define FM_GTS_ERRATA_DTSEC_A004 +#define FM_PAUSE_BLOCK_ERRATA_DTSEC_A006 /* do nothing */ +#define FM_RESERVED_ACCESS_TO_DISABLED_DEV_ERRATA_DTSEC_A0011 /* do nothing */ +#define FM_GTS_AFTER_MAC_ABORTED_FRAME_ERRATA_DTSEC_A0012 FM_GTS_ERRATA_DTSEC_A004 + +#define FM_SHORT_PAUSE_TIME_ERRATA_DTSEC1 +#define FM_MAGIC_PACKET_UNRECOGNIZED_ERRATA_DTSEC2 /* No implementation, Out of LLD scope */ +#define FM_10_100_SGMII_NO_TS_ERRATA_DTSEC3 +#define FM_TX_LOCKUP_ERRATA_DTSEC6 + +#define FM_IM_TX_SYNC_SKIP_TNUM_ERRATA_FMAN_A001 /* Implemented by ucode */ +#define FM_RX_PIPELINE_OF_DATA_CORRUPTION_ERRATA_FMAN_A002 /* No implementation, Out of LLD scope */ +#define FM_HC_DEF_FQID_ONLY_ERRATA_FMAN_A003 /* Implemented by ucode */ + +#define FM_1588_SRC_CLK_ERRATA_FMAN1 +#define FM_NO_RUNNING_SUM_FOR_DBG_N_SWPRS_ERRATA_FMAN2 /* No implementation, Out of LLD scope */ +#define FM_IM_TX_SHARED_TNUM_ERRATA_FMAN4 /* Implemented by ucode */ +#define FM_IM_GS_DEADLOCK_ERRATA_FMAN5 /* Implemented by ucode */ +#define FM_PORT_SYNC_ERRATA_FMAN6 +#define FM_RAM_LIST_ERR_IRQ_ERRATA_FMAN8 +#define FM_BMI_PIPELINE_ERR_IRQ_ERRATA_FMAN9 +#define FM_IM_DEQ_PIPELINE_DEPTH_ERRATA_FMAN10 /* Implemented by ucode */ +#define FM_CC_GEN6_MISSMATCH_ERRATA_FMAN12 /* Implemented by ucode */ +#define FM_CC_CHANGE_SHARED_TNUM_ERRATA_FMAN13 /* Implemented by ucode */ +#define FM_IM_LARGE_MRBLR_ERRATA_FMAN15 /* Implemented by ucode */ +#define FM_RESET_ERRATA_FMAN16 /* No implementation, Out of LLD scope */ +#define FM_IPV4_HDRLEN0_ERRATA_FMAN17 /* No implementation, Out of LLD scope */ +#define FM_INCORRECT_CS_ERRATA_FMAN18 +#define FM_ILLEGAL_FRM_LEN_ERRATA_FMAN20 /* No implementation, Out of LLD scope */ +#define FM_OP_PORT_QMAN_REJECT_ERRATA_FMAN21 + +#define FM_PRS_L4_SHELL_ERRATA_FMANb +#define FM_BMI_TO_RISC_ENQ_ERRATA_FMANc +#define FM_INVALID_SWPRS_DATA_ERRATA_FMANd /* No implementation, Out of LLD scope */ +//#define FM_PRS_L4_NO_CLEAR_ERRATA_FMANe /* No implementation, No patch yet */ +//#define FM_PRS_MPLS_ERROR_ERRATA_FMANf /* No implementation, No patch yet */ +#define FM_PORT_COUNTERS_ERRATA_FMANg +#define FM_BAD_RX_FD_ERRATA_FMANh /* No implementation, Out of LLD scope */ +//#define FM_PRS_MPLS_SSA_ERRATA_FMANj /* No implementation, No patch yet */ +//#define FM_PRS_INITIAL_PLANID_ERRATA_FMANk /* No implementation, No patch yet */ + + +#define FM_OP_PARTITION_ERRATA_FMANx8 +#define FM_PORT_DISABLED_ERRATA_FMANx9 +#define FM_TX_PORT_IM_OR_ERRATA_FMANx11 /* Implemented by ucode */ +#define FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 +#define FM_PORT_OTF_CHANGES_ERRATA_FMANx12 FM_PORT_EXCESSIVE_BUDGET_ERRATA_FMANx16 +#define FM_SOFT_RESET_ERRATA_FMANx15 /* No implementation, Out of LLD scope */ + +#define FM_UCODE_NOT_RESET_ERRATA_BUGZILLA6173 + +#define FM_PRS_MEM_ERRATA_FMAN_SW003 +#define FM_LEN_CHECK_ERRATA_FMAN_SW002 +#define FM_10G_REM_N_LCL_FLT_EX_ERRATA_10GMAC001 + +#ifdef UNDER_CONSTRUCTION_IPSEC +#define FM_NO_COPY_CTXA_CTXB_ERRATA_FMAN_SW001 +#endif /* UNDER_CONSTRUCTION_IPSEC */ + + +#endif /* __DPAA_INTEGRATION_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/integrations/P3040_P4080_P5020/part_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/integrations/P3040_P4080_P5020/part_ext.h @@ -0,0 +1,83 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + + @File part_ext.h + + @Description Definitions for the part (integration) module. +*//***************************************************************************/ + +#ifndef __PART_EXT_H +#define __PART_EXT_H + +#include "std_ext.h" +#include "part_integration_ext.h" + + +#if !(defined(MPC8306) || \ + defined(MPC8309) || \ + defined(MPC834x) || \ + defined(MPC836x) || \ + defined(MPC832x) || \ + defined(MPC837x) || \ + defined(MPC8568) || \ + defined(MPC8569) || \ + defined(P1020) || \ + defined(P1021) || \ + defined(P1022) || \ + defined(P1023) || \ + defined(P2020) || \ + defined(P2040) || \ + defined(P3041) || \ + defined(P4080) || \ + defined(SC4080) || \ + defined(P5020) || \ + defined(MSC814x)) +#error "unable to proceed without chip-definition" +#endif /* !(defined(MPC834x) || ... */ + + +/**************************************************************************//* + @Description Part data structure - must be contained in any integration + data structure. +*//***************************************************************************/ +typedef struct t_Part +{ + uintptr_t (* f_GetModuleBase)(t_Handle h_Part, e_ModuleId moduleId); + /**< Returns the address of the module's memory map base. */ + e_ModuleId (* f_GetModuleIdByBase)(t_Handle h_Part, uintptr_t baseAddress); + /**< Returns the module's ID according to its memory map base. */ +} t_Part; + + +#endif /* __PART_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/integrations/P3040_P4080_P5020/part_integration_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/integrations/P3040_P4080_P5020/part_integration_ext.h @@ -0,0 +1,331 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + + @File part_integration_ext.h + + @Description P3040/P4080/P5020 external definitions and structures. +*//***************************************************************************/ +#ifndef __PART_INTEGRATION_EXT_H +#define __PART_INTEGRATION_EXT_H + +#include "std_ext.h" +#include "dpaa_integration_ext.h" + + +/**************************************************************************//** + @Group P3040/P4080/P5020_chip_id P5020 Application Programming Interface + + @Description P3040/P4080/P5020 Chip functions,definitions and enums. + + @{ +*//***************************************************************************/ + +#define CORE_E500MC + +#define INTG_MAX_NUM_OF_CORES 1 + + +/**************************************************************************//** + @Description Module types. +*//***************************************************************************/ +typedef enum e_ModuleId +{ + e_MODULE_ID_DUART_1 = 0, + e_MODULE_ID_DUART_2, + e_MODULE_ID_DUART_3, + e_MODULE_ID_DUART_4, + e_MODULE_ID_LAW, + e_MODULE_ID_LBC, + e_MODULE_ID_PAMU, + e_MODULE_ID_QM, /**< Queue manager module */ + e_MODULE_ID_BM, /**< Buffer manager module */ + e_MODULE_ID_QM_CE_PORTAL_0, + e_MODULE_ID_QM_CI_PORTAL_0, + e_MODULE_ID_QM_CE_PORTAL_1, + e_MODULE_ID_QM_CI_PORTAL_1, + e_MODULE_ID_QM_CE_PORTAL_2, + e_MODULE_ID_QM_CI_PORTAL_2, + e_MODULE_ID_QM_CE_PORTAL_3, + e_MODULE_ID_QM_CI_PORTAL_3, + e_MODULE_ID_QM_CE_PORTAL_4, + e_MODULE_ID_QM_CI_PORTAL_4, + e_MODULE_ID_QM_CE_PORTAL_5, + e_MODULE_ID_QM_CI_PORTAL_5, + e_MODULE_ID_QM_CE_PORTAL_6, + e_MODULE_ID_QM_CI_PORTAL_6, + e_MODULE_ID_QM_CE_PORTAL_7, + e_MODULE_ID_QM_CI_PORTAL_7, + e_MODULE_ID_QM_CE_PORTAL_8, + e_MODULE_ID_QM_CI_PORTAL_8, + e_MODULE_ID_QM_CE_PORTAL_9, + e_MODULE_ID_QM_CI_PORTAL_9, + e_MODULE_ID_BM_CE_PORTAL_0, + e_MODULE_ID_BM_CI_PORTAL_0, + e_MODULE_ID_BM_CE_PORTAL_1, + e_MODULE_ID_BM_CI_PORTAL_1, + e_MODULE_ID_BM_CE_PORTAL_2, + e_MODULE_ID_BM_CI_PORTAL_2, + e_MODULE_ID_BM_CE_PORTAL_3, + e_MODULE_ID_BM_CI_PORTAL_3, + e_MODULE_ID_BM_CE_PORTAL_4, + e_MODULE_ID_BM_CI_PORTAL_4, + e_MODULE_ID_BM_CE_PORTAL_5, + e_MODULE_ID_BM_CI_PORTAL_5, + e_MODULE_ID_BM_CE_PORTAL_6, + e_MODULE_ID_BM_CI_PORTAL_6, + e_MODULE_ID_BM_CE_PORTAL_7, + e_MODULE_ID_BM_CI_PORTAL_7, + e_MODULE_ID_BM_CE_PORTAL_8, + e_MODULE_ID_BM_CI_PORTAL_8, + e_MODULE_ID_BM_CE_PORTAL_9, + e_MODULE_ID_BM_CI_PORTAL_9, + e_MODULE_ID_FM1, /**< Frame manager #1 module */ + e_MODULE_ID_FM1_RTC, /**< FM Real-Time-Clock */ + e_MODULE_ID_FM1_MURAM, /**< FM Multi-User-RAM */ + e_MODULE_ID_FM1_BMI, /**< FM BMI block */ + e_MODULE_ID_FM1_QMI, /**< FM QMI block */ + e_MODULE_ID_FM1_PRS, /**< FM parser block */ + e_MODULE_ID_FM1_PORT_HO0, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM1_PORT_HO1, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM1_PORT_HO2, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM1_PORT_HO3, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM1_PORT_HO4, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM1_PORT_HO5, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM1_PORT_HO6, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM1_PORT_1GRx0, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_1GRx1, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_1GRx2, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_1GRx3, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_1GRx4, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_10GRx0, /**< FM Rx 10G MAC port block */ + e_MODULE_ID_FM1_PORT_1GTx0, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_1GTx1, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_1GTx2, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_1GTx3, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_1GTx4, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM1_PORT_10GTx0, /**< FM Tx 10G MAC port block */ + e_MODULE_ID_FM1_PLCR, /**< FM Policer */ + e_MODULE_ID_FM1_KG, /**< FM Keygen */ + e_MODULE_ID_FM1_DMA, /**< FM DMA */ + e_MODULE_ID_FM1_FPM, /**< FM FPM */ + e_MODULE_ID_FM1_IRAM, /**< FM Instruction-RAM */ + e_MODULE_ID_FM1_1GMDIO0, /**< FM 1G MDIO MAC 0*/ + e_MODULE_ID_FM1_1GMDIO1, /**< FM 1G MDIO MAC 1*/ + e_MODULE_ID_FM1_1GMDIO2, /**< FM 1G MDIO MAC 2*/ + e_MODULE_ID_FM1_1GMDIO3, /**< FM 1G MDIO MAC 3*/ + e_MODULE_ID_FM1_10GMDIO, /**< FM 10G MDIO */ + e_MODULE_ID_FM1_PRS_IRAM, /**< FM SW-parser Instruction-RAM */ + e_MODULE_ID_FM1_1GMAC0, /**< FM 1G MAC #0 */ + e_MODULE_ID_FM1_1GMAC1, /**< FM 1G MAC #1 */ + e_MODULE_ID_FM1_1GMAC2, /**< FM 1G MAC #2 */ + e_MODULE_ID_FM1_1GMAC3, /**< FM 1G MAC #3 */ + e_MODULE_ID_FM1_10GMAC0, /**< FM 10G MAC #0 */ + + e_MODULE_ID_FM2, /**< Frame manager #2 module */ + e_MODULE_ID_FM2_RTC, /**< FM Real-Time-Clock */ + e_MODULE_ID_FM2_MURAM, /**< FM Multi-User-RAM */ + e_MODULE_ID_FM2_BMI, /**< FM BMI block */ + e_MODULE_ID_FM2_QMI, /**< FM QMI block */ + e_MODULE_ID_FM2_PRS, /**< FM parser block */ + e_MODULE_ID_FM2_PORT_HO0, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM2_PORT_HO1, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM2_PORT_HO2, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM2_PORT_HO3, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM2_PORT_HO4, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM2_PORT_HO5, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM2_PORT_HO6, /**< FM Host-command/offline-parsing port block */ + e_MODULE_ID_FM2_PORT_1GRx0, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM2_PORT_1GRx1, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM2_PORT_1GRx2, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM2_PORT_1GRx3, /**< FM Rx 1G MAC port block */ + e_MODULE_ID_FM2_PORT_10GRx0, /**< FM Rx 10G MAC port block */ + e_MODULE_ID_FM2_PORT_1GTx0, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM2_PORT_1GTx1, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM2_PORT_1GTx2, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM2_PORT_1GTx3, /**< FM Tx 1G MAC port block */ + e_MODULE_ID_FM2_PORT_10GTx0, /**< FM Tx 10G MAC port block */ + e_MODULE_ID_FM2_PLCR, /**< FM Policer */ + e_MODULE_ID_FM2_KG, /**< FM Keygen */ + e_MODULE_ID_FM2_DMA, /**< FM DMA */ + e_MODULE_ID_FM2_FPM, /**< FM FPM */ + e_MODULE_ID_FM2_IRAM, /**< FM Instruction-RAM */ + e_MODULE_ID_FM2_1GMDIO0, /**< FM 1G MDIO MAC 0*/ + e_MODULE_ID_FM2_1GMDIO1, /**< FM 1G MDIO MAC 1*/ + e_MODULE_ID_FM2_1GMDIO2, /**< FM 1G MDIO MAC 2*/ + e_MODULE_ID_FM2_1GMDIO3, /**< FM 1G MDIO MAC 3*/ + e_MODULE_ID_FM2_10GMDIO, /**< FM 10G MDIO */ + e_MODULE_ID_FM2_PRS_IRAM, /**< FM SW-parser Instruction-RAM */ + e_MODULE_ID_FM2_1GMAC0, /**< FM 1G MAC #0 */ + e_MODULE_ID_FM2_1GMAC1, /**< FM 1G MAC #1 */ + e_MODULE_ID_FM2_1GMAC2, /**< FM 1G MAC #2 */ + e_MODULE_ID_FM2_1GMAC3, /**< FM 1G MAC #3 */ + e_MODULE_ID_FM2_10GMAC0, /**< FM 10G MAC #0 */ + + e_MODULE_ID_SEC_GEN, /**< SEC 4.0 General registers */ + e_MODULE_ID_SEC_QI, /**< SEC 4.0 QI registers */ + e_MODULE_ID_SEC_JQ0, /**< SEC 4.0 JQ-0 registers */ + e_MODULE_ID_SEC_JQ1, /**< SEC 4.0 JQ-1 registers */ + e_MODULE_ID_SEC_JQ2, /**< SEC 4.0 JQ-2 registers */ + e_MODULE_ID_SEC_JQ3, /**< SEC 4.0 JQ-3 registers */ + e_MODULE_ID_SEC_RTIC, /**< SEC 4.0 RTIC registers */ + e_MODULE_ID_SEC_DECO0_CCB0, /**< SEC 4.0 DECO-0/CCB-0 registers */ + e_MODULE_ID_SEC_DECO1_CCB1, /**< SEC 4.0 DECO-1/CCB-1 registers */ + e_MODULE_ID_SEC_DECO2_CCB2, /**< SEC 4.0 DECO-2/CCB-2 registers */ + e_MODULE_ID_SEC_DECO3_CCB3, /**< SEC 4.0 DECO-3/CCB-3 registers */ + e_MODULE_ID_SEC_DECO4_CCB4, /**< SEC 4.0 DECO-4/CCB-4 registers */ + + e_MODULE_ID_MPIC, /**< MPIC */ + e_MODULE_ID_GPIO, /**< GPIO */ + e_MODULE_ID_SERDES, /**< SERDES */ + e_MODULE_ID_CPC_1, /**< CoreNet-Platform-Cache 1 */ + e_MODULE_ID_CPC_2, /**< CoreNet-Platform-Cache 2 */ + + e_MODULE_ID_SRIO_PORTS, /**< RapidIO controller */ + e_MODULE_ID_SRIO_MU, /**< RapidIO messaging unit module */ + + e_MODULE_ID_DUMMY_LAST +} e_ModuleId; + +#define NUM_OF_MODULES e_MODULE_ID_DUMMY_LAST + + +/***************************************************************************** + INTEGRATION-SPECIFIC MODULE CODES +******************************************************************************/ +#define MODULE_UNKNOWN 0x00000000 +#define MODULE_MEM 0x00010000 +#define MODULE_MM 0x00020000 +#define MODULE_CORE 0x00030000 +#define MODULE_CHIP 0x00040000 +#define MODULE_PLTFRM 0x00050000 +#define MODULE_PM 0x00060000 +#define MODULE_MMU 0x00070000 +#define MODULE_PIC 0x00080000 +#define MODULE_CPC 0x00090000 +#define MODULE_DUART 0x000a0000 +#define MODULE_SERDES 0x000b0000 +#define MODULE_PIO 0x000c0000 +#define MODULE_QM 0x000d0000 +#define MODULE_BM 0x000e0000 +#define MODULE_SEC 0x000f0000 +#define MODULE_LAW 0x00100000 +#define MODULE_LBC 0x00110000 +#define MODULE_PAMU 0x00120000 +#define MODULE_FM 0x00130000 +#define MODULE_FM_MURAM 0x00140000 +#define MODULE_FM_PCD 0x00150000 +#define MODULE_FM_RTC 0x00160000 +#define MODULE_FM_MAC 0x00170000 +#define MODULE_FM_PORT 0x00180000 +#define MODULE_DPA 0x00190000 +#define MODULE_SRIO 0x00200000 +#define MODULE_DMA 0x00100000 + +/***************************************************************************** + PAMU INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define PAMU_NUM_OF_PARTITIONS 5 + +#define PAMU_PICS_AVICS_ERRATA_PAMU3 + +/***************************************************************************** + LAW INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define LAW_NUM_OF_WINDOWS 32 +#define LAW_MIN_WINDOW_SIZE 0x0000000000001000LL /**< 4KB */ +#define LAW_MAX_WINDOW_SIZE 0x0000002000000000LL /**< 64GB */ + + +/***************************************************************************** + LBC INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +/**************************************************************************//** + @Group lbc_exception_grp LBC Exception Unit + + @Description LBC Exception unit API functions, definitions and enums + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Anchor lbc_exbm + + @Collection LBC Errors Bit Mask + + These errors are reported through the exceptions callback.. + The values can be or'ed in any combination in the errors mask + parameter of the errors report structure. + + These errors can also be passed as a bit-mask to + LBC_EnableErrorChecking() or LBC_DisableErrorChecking(), + for enabling or disabling error checking. + @{ +*//***************************************************************************/ +#define LBC_ERR_BUS_MONITOR 0x80000000 /**< Bus monitor error */ +#define LBC_ERR_PARITY_ECC 0x20000000 /**< Parity error for GPCM/UPM */ +#define LBC_ERR_WRITE_PROTECT 0x04000000 /**< Write protection error */ +#define LBC_ERR_ATOMIC_WRITE 0x00800000 /**< Atomic write error */ +#define LBC_ERR_ATOMIC_READ 0x00400000 /**< Atomic read error */ +#define LBC_ERR_CHIP_SELECT 0x00080000 /**< Unrecognized chip select */ + +#define LBC_ERR_ALL (LBC_ERR_BUS_MONITOR | LBC_ERR_PARITY_ECC | \ + LBC_ERR_WRITE_PROTECT | LBC_ERR_ATOMIC_WRITE | \ + LBC_ERR_ATOMIC_READ | LBC_ERR_CHIP_SELECT) + /**< All possible errors */ +/* @} */ +/** @} */ /* end of lbc_exception_grp group */ + +#define LBC_INCORRECT_ERROR_REPORT_ERRATA + +#define LBC_NUM_OF_BANKS 8 +#define LBC_MAX_CS_SIZE 0x0000000100000000LL +#define LBC_ATOMIC_OPERATION_SUPPORT +#define LBC_PARITY_SUPPORT +#define LBC_ADDRESS_HOLD_TIME_CTRL +#define LBC_HIGH_CLK_DIVIDERS +#define LBC_FCM_AVAILABLE + +/***************************************************************************** + GPIO INTEGRATION-SPECIFIC DEFINITIONS +******************************************************************************/ +#define GPIO_NUM_OF_PORTS 1 /**< Number of ports in GPIO module; + Each port contains up to 32 i/O pins. */ + +#define GPIO_VALID_PIN_MASKS \ + { /* Port A */ 0xFFFFFFFF } + +#define GPIO_VALID_INTR_MASKS \ + { /* Port A */ 0xFFFFFFFF } + +#endif /* __PART_INTEGRATION_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/math_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/math_ext.h @@ -0,0 +1,98 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MATH_EXT_H +#define __MATH_EXT_H + + +#if defined(NCSW_LINUX) && defined(__KERNEL__) +#include + +#elif defined(__MWERKS__) +#define LOW(x) ( sizeof(x)==8 ? *(1+(int32_t*)&x) : (*(int32_t*)&x)) +#define HIGH(x) (*(int32_t*)&x) +#define ULOW(x) ( sizeof(x)==8 ? *(1+(uint32_t*)&x) : (*(uint32_t*)&x)) +#define UHIGH(x) (*(uint32_t*)&x) + +static const double big = 1.0e300; + +/* Macro for checking if a number is a power of 2 */ +static __inline__ double ceil(double x) +{ + int32_t i0,i1,j0; /*- cc 020130 -*/ + uint32_t i,j; /*- cc 020130 -*/ + i0 = HIGH(x); + i1 = LOW(x); + j0 = ((i0>>20)&0x7ff)-0x3ff; + if(j0<20) { + if(j0<0) { /* raise inexact if x != 0 */ + if(big+x>0.0) {/* return 0*sign(x) if |x|<1 */ + if(i0<0) {i0=0x80000000;i1=0;} + else if((i0|i1)!=0) { i0=0x3ff00000;i1=0;} + } + } else { + i = (uint32_t)(0x000fffff)>>j0; + if(((i0&i)|i1)==0) return x; /* x is integral */ + if(big+x>0.0) { /* raise inexact flag */ + if(i0>0) i0 += (0x00100000)>>j0; + i0 &= (~i); i1=0; + } + } + } else if (j0>51) { + if(j0==0x400) return x+x; /* inf or NaN */ + else return x; /* x is integral */ + } else { + i = ((uint32_t)(0xffffffff))>>(j0-20); /*- cc 020130 -*/ + if((i1&i)==0) return x; /* x is integral */ + if(big+x>0.0) { /* raise inexact flag */ + if(i0>0) { + if(j0==20) i0+=1; + else { + j = (uint32_t)(i1 + (1<<(52-j0))); + if(j +#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */ + + +#endif /* __MATH_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/ncsw_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/ncsw_ext.h @@ -0,0 +1,430 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /**************************************************************************//** + @File ncsw_ext.h + + @Description General NetCommSw Standard Definitions +*//***************************************************************************/ + +#ifndef __NCSW_EXT_H +#define __NCSW_EXT_H + +#include "memcpy_ext.h" + + +#define WRITE_BLOCK IOMemSet32 +#define COPY_BLOCK Mem2IOCpy32 + +#define PTR_TO_UINT(_ptr) ((uintptr_t)(_ptr)) +#define UINT_TO_PTR(_val) ((void*)(uintptr_t)(_val)) + +#define PTR_MOVE(_ptr, _offset) (void*)((uint8_t*)(_ptr) + (_offset)) + + +#define WRITE_UINT8_UINT24(arg, data08, data24) WRITE_UINT32(arg,((uint32_t)(data08)<<24)|((uint32_t)(data24)&0x00FFFFFF)) +#define WRITE_UINT24_UINT8(arg, data24, data08) WRITE_UINT32(arg,((uint32_t)(data24)<< 8)|((uint32_t)(data08)&0x000000FF)) + +/* Little-Endian access macros */ + +#define WRITE_UINT16_LE(arg, data) \ + WRITE_UINT16((arg), SwapUint16(data)) + +#define WRITE_UINT32_LE(arg, data) \ + WRITE_UINT32((arg), SwapUint32(data)) + +#define WRITE_UINT64_LE(arg, data) \ + WRITE_UINT64((arg), SwapUint64(data)) + +#define GET_UINT16_LE(arg) \ + SwapUint16(GET_UINT16(arg)) + +#define GET_UINT32_LE(arg) \ + SwapUint32(GET_UINT32(arg)) + +#define GET_UINT64_LE(arg) \ + SwapUint64(GET_UINT64(arg)) + +/* Write and Read again macros */ +#define WRITE_UINT_SYNC(size, arg, data) \ + do { \ + WRITE_UINT##size((arg), (data)); \ + CORE_MemoryBarrier(); \ + } while (0) + +#define WRITE_UINT8_SYNC(arg, data) WRITE_UINT_SYNC(8, (arg), (data)) + +#define WRITE_UINT16_SYNC(arg, data) WRITE_UINT_SYNC(16, (arg), (data)) +#define WRITE_UINT32_SYNC(arg, data) WRITE_UINT_SYNC(32, (arg), (data)) + +#define MAKE_UINT64(high32, low32) (((uint64_t)high32 << 32) | (low32)) + + +/*----------------------*/ +/* Miscellaneous macros */ +/*----------------------*/ + +#define UNUSED(X) (X=X) + +#define KILOBYTE 0x400UL /* 1024 */ +#define MEGABYTE (KILOBYTE * KILOBYTE) /* 1024*1024 */ +#define GIGABYTE (KILOBYTE * MEGABYTE) /* 1024*1024*1024 */ + +#undef NO_IRQ +#define NO_IRQ (-1) +#define NCSW_MASTER_ID (0) + +/* Macro for checking if a number is a power of 2 */ +#define POWER_OF_2(n) (!((n) & ((n)-1))) + +/* Macro for calculating log of base 2 */ +#define LOG2(num, log2Num) \ + do \ + { \ + uint64_t tmp = (num); \ + log2Num = 0; \ + while (tmp > 1) \ + { \ + log2Num++; \ + tmp >>= 1; \ + } \ + } while (0) + +#define NEXT_POWER_OF_2(_num, _nextPow) \ +do \ +{ \ + if (POWER_OF_2(_num)) \ + _nextPow = (_num); \ + else \ + { \ + uint64_t tmp = (_num); \ + _nextPow = 1; \ + while (tmp) \ + { \ + _nextPow <<= 1; \ + tmp >>= 1; \ + } \ + } \ +} while (0) + +/* Ceiling division - not the fastest way, but safer in terms of overflow */ +#define DIV_CEIL(x,y) (((x)/(y)) + ((((((x)/(y)))*(y)) == (x)) ? 0 : 1)) + +/* Round up a number to be a multiple of a second number */ +#define ROUND_UP(x,y) ((((x) + (y) - 1) / (y)) * (y)) + +/* Timing macro for converting usec units to number of ticks. */ +/* (number of usec * clock_Hz) / 1,000,000) - since */ +/* clk is in MHz units, no division needed. */ +#define USEC_TO_CLK(usec,clk) ((usec) * (clk)) +#define CYCLES_TO_USEC(cycles,clk) ((cycles) / (clk)) + +/* Timing macros for converting between nsec units and number of clocks. */ +#define NSEC_TO_CLK(nsec,clk) DIV_CEIL(((nsec) * (clk)), 1000) +#define CYCLES_TO_NSEC(cycles,clk) (((cycles) * 1000) / (clk)) + +/* Timing macros for converting between psec units and number of clocks. */ +#define PSEC_TO_CLK(psec,clk) DIV_CEIL(((psec) * (clk)), 1000000) +#define CYCLES_TO_PSEC(cycles,clk) (((cycles) * 1000000) / (clk)) + +/* Min, Max macros */ +#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#define MAX(a,b) ((a) > (b) ? (a) : (b)) +#define IN_RANGE(min,val,max) ((min)<=(val) && (val)<=(max)) + +#define ABS(a) ((a<0)?(a*-1):a) + +#if !(defined(ARRAY_SIZE)) +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif /* !defined(ARRAY_SIZE) */ + + +/* possible alignments */ +#define HALF_WORD_ALIGNMENT 2 +#define WORD_ALIGNMENT 4 +#define DOUBLE_WORD_ALIGNMENT 8 +#define BURST_ALIGNMENT 32 + +#define HALF_WORD_ALIGNED 0x00000001 +#define WORD_ALIGNED 0x00000003 +#define DOUBLE_WORD_ALIGNED 0x00000007 +#define BURST_ALIGNED 0x0000001f +#ifndef IS_ALIGNED +#define IS_ALIGNED(n,align) (!((uint32_t)(n) & (align - 1))) +#endif /* IS_ALIGNED */ + + +#define LAST_BUF 1 +#define FIRST_BUF 2 +#define SINGLE_BUF (LAST_BUF | FIRST_BUF) +#define MIDDLE_BUF 4 + +#define ARRAY_END -1 + +#define ILLEGAL_BASE (~0) + +#define BUF_POSITION(first, last) state[(!!(last))<<1 | !!(first)] +#define DECLARE_POSITION static uint8_t state[4] = { (uint8_t)MIDDLE_BUF, (uint8_t)FIRST_BUF, (uint8_t)LAST_BUF, (uint8_t)SINGLE_BUF }; + + +/**************************************************************************//** + @Description Timers operation mode +*//***************************************************************************/ +typedef enum e_TimerMode +{ + e_TIMER_MODE_INVALID = 0, + e_TIMER_MODE_FREE_RUN, /**< Free run - counter continues to increase + after reaching the reference value. */ + e_TIMER_MODE_PERIODIC, /**< Periodic - counter restarts counting from 0 + after reaching the reference value. */ + e_TIMER_MODE_SINGLE /**< Single (one-shot) - counter stops counting + after reaching the reference value. */ +} e_TimerMode; + + +/**************************************************************************//** + @Description Enumeration (bit flags) of communication modes (Transmit, + receive or both). +*//***************************************************************************/ +typedef enum e_CommMode +{ + e_COMM_MODE_NONE = 0, /**< No transmit/receive communication */ + e_COMM_MODE_RX = 1, /**< Only receive communication */ + e_COMM_MODE_TX = 2, /**< Only transmit communication */ + e_COMM_MODE_RX_AND_TX = 3 /**< Both transmit and receive communication */ +} e_CommMode; + +/**************************************************************************//** + @Description General Diagnostic Mode +*//***************************************************************************/ +typedef enum e_DiagMode +{ + e_DIAG_MODE_NONE = 0, /**< Normal operation; no diagnostic mode */ + e_DIAG_MODE_CTRL_LOOPBACK, /**< Loopback in the controller */ + e_DIAG_MODE_CHIP_LOOPBACK, /**< Loopback in the chip but not in the + controller; e.g. IO-pins, SerDes, etc. */ + e_DIAG_MODE_PHY_LOOPBACK, /**< Loopback in the external PHY */ + e_DIAG_MODE_EXT_LOOPBACK, /**< Loopback in the external line (beyond the PHY) */ + e_DIAG_MODE_CTRL_ECHO, /**< Echo incoming data by the controller */ + e_DIAG_MODE_PHY_ECHO /**< Echo incoming data by the PHY */ +} e_DiagMode; + +/**************************************************************************//** + @Description Possible RxStore callback responses. +*//***************************************************************************/ +typedef enum e_RxStoreResponse +{ + e_RX_STORE_RESPONSE_PAUSE /**< Pause invoking callback with received data; + in polling mode, start again invoking callback + only next time user invokes the receive routine; + in interrupt mode, start again invoking callback + only next time a receive event triggers an interrupt; + in all cases, received data that are pending are not + lost, rather, their processing is temporarily deferred; + in all cases, received data are processed in the order + in which they were received. */ + , e_RX_STORE_RESPONSE_CONTINUE /**< Continue invoking callback with received data. */ +} e_RxStoreResponse; + + +/**************************************************************************//** + @Description General Handle +*//***************************************************************************/ +typedef void * t_Handle; /**< handle, used as object's descriptor */ + +/**************************************************************************//** + @Description MUTEX type +*//***************************************************************************/ +typedef uint32_t t_Mutex; + +/**************************************************************************//** + @Description Error Code. + + The high word of the error code is the code of the software + module (driver). The low word is the error type (e_ErrorType). + To get the values from the error code, use GET_ERROR_TYPE() + and GET_ERROR_MODULE(). +*//***************************************************************************/ +typedef uint32_t t_Error; + +/**************************************************************************//** + @Description General prototype of interrupt service routine (ISR). + + @Param[in] handle - Optional handle of the module handling the interrupt. + + @Return None + *//***************************************************************************/ +typedef void (t_Isr)(t_Handle handle); + +/**************************************************************************//** + @Anchor mem_attr + + @Collection Memory Attributes + + Various attributes of memory partitions. These values may be + or'ed together to create a mask of all memory attributes. + @{ +*//***************************************************************************/ +#define MEMORY_ATTR_CACHEABLE 0x00000001 + /**< Memory is cacheable */ +#define MEMORY_ATTR_QE_2ND_BUS_ACCESS 0x00000002 + /**< Memory can be accessed by QUICC Engine + through its secondary bus interface */ + +/* @} */ + + +/**************************************************************************//** + @Function t_GetBufFunction + + @Description User callback function called by driver to get data buffer. + + User provides this function. Driver invokes it. + + @Param[in] h_BufferPool - A handle to buffer pool manager + @Param[out] p_BufContextHandle - Returns the user's private context that + should be associated with the buffer + + @Return Pointer to data buffer, NULL if error + *//***************************************************************************/ +typedef uint8_t * (t_GetBufFunction)(t_Handle h_BufferPool, + t_Handle *p_BufContextHandle); + +/**************************************************************************//** + @Function t_PutBufFunction + + @Description User callback function called by driver to return data buffer. + + User provides this function. Driver invokes it. + + @Param[in] h_BufferPool - A handle to buffer pool manager + @Param[in] p_Buffer - A pointer to buffer to return + @Param[in] h_BufContext - The user's private context associated with + the returned buffer + + @Return E_OK on success; Error code otherwise + *//***************************************************************************/ +typedef t_Error (t_PutBufFunction)(t_Handle h_BufferPool, + uint8_t *p_Buffer, + t_Handle h_BufContext); + +/**************************************************************************//** + @Function t_PhysToVirt + + @Description Translates a physical address to the matching virtual address. + + @Param[in] addr - The physical address to translate. + + @Return Virtual address. +*//***************************************************************************/ +typedef void * t_PhysToVirt(physAddress_t addr); + +/**************************************************************************//** + @Function t_VirtToPhys + + @Description Translates a virtual address to the matching physical address. + + @Param[in] addr - The virtual address to translate. + + @Return Physical address. +*//***************************************************************************/ +typedef physAddress_t t_VirtToPhys(void *addr); + +/**************************************************************************//** + @Description Buffer Pool Information Structure. +*//***************************************************************************/ +typedef struct t_BufferPoolInfo +{ + t_Handle h_BufferPool; /**< A handle to the buffer pool manager */ + t_GetBufFunction *f_GetBuf; /**< User callback to get a free buffer */ + t_PutBufFunction *f_PutBuf; /**< User callback to return a buffer */ + uint16_t bufferSize; /**< Buffer size (in bytes) */ + + t_PhysToVirt *f_PhysToVirt; /**< User callback to translate pool buffers + physical addresses to virtual addresses */ + t_VirtToPhys *f_VirtToPhys; /**< User callback to translate pool buffers + virtual addresses to physical addresses */ +} t_BufferPoolInfo; + + +/**************************************************************************//** + @Description User callback function called by driver when transmit completed. + + User provides this function. Driver invokes it. + + @Param[in] h_App - Application's handle, as was provided to the + driver by the user + @Param[in] queueId - Transmit queue ID + @Param[in] p_Data - Pointer to the data buffer + @Param[in] h_BufContext - The user's private context associated with + the given data buffer + @Param[in] status - Transmit status and errors + @Param[in] flags - Driver-dependent information + *//***************************************************************************/ +typedef void (t_TxConfFunction)(t_Handle h_App, + uint32_t queueId, + uint8_t *p_Data, + t_Handle h_BufContext, + uint16_t status, + uint32_t flags); + +/**************************************************************************//** + @Description User callback function called by driver with receive data. + + User provides this function. Driver invokes it. + + @Param[in] h_App - Application's handle, as was provided to the + driver by the user + @Param[in] queueId - Receive queue ID + @Param[in] p_Data - Pointer to the buffer with received data + @Param[in] h_BufContext - The user's private context associated with + the given data buffer + @Param[in] length - Length of received data + @Param[in] status - Receive status and errors + @Param[in] position - Position of buffer in frame + @Param[in] flags - Driver-dependent information + + @Retval e_RX_STORE_RESPONSE_CONTINUE - order the driver to continue Rx + operation for all ready data. + @Retval e_RX_STORE_RESPONSE_PAUSE - order the driver to stop Rx operation. + *//***************************************************************************/ +typedef e_RxStoreResponse (t_RxStoreFunction)(t_Handle h_App, + uint32_t queueId, + uint8_t *p_Data, + t_Handle h_BufContext, + uint32_t length, + uint16_t status, + uint8_t position, + uint32_t flags); + + +#endif /* __NCSW_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/net_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/net_ext.h @@ -0,0 +1,388 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File net_ext.h + + @Description This file contains common and general netcomm headers definitions. +*//***************************************************************************/ +#ifndef __NET_EXT_H +#define __NET_EXT_H + +#include "std_ext.h" + + +typedef uint8_t headerFieldPpp_t; + +#define NET_HEADER_FIELD_PPP_PID (1) +#define NET_HEADER_FIELD_PPP_COMPRESSED (NET_HEADER_FIELD_PPP_PID << 1) +#define NET_HEADER_FIELD_PPP_ALL_FIELDS ((NET_HEADER_FIELD_PPP_PID << 2) - 1) + + +typedef uint8_t headerFieldPppoe_t; + +#define NET_HEADER_FIELD_PPPoE_VER (1) +#define NET_HEADER_FIELD_PPPoE_TYPE (NET_HEADER_FIELD_PPPoE_VER << 1) +#define NET_HEADER_FIELD_PPPoE_CODE (NET_HEADER_FIELD_PPPoE_VER << 2) +#define NET_HEADER_FIELD_PPPoE_SID (NET_HEADER_FIELD_PPPoE_VER << 3) +#define NET_HEADER_FIELD_PPPoE_LEN (NET_HEADER_FIELD_PPPoE_VER << 4) +#define NET_HEADER_FIELD_PPPoE_SESSION (NET_HEADER_FIELD_PPPoE_VER << 5) +#define NET_HEADER_FIELD_PPPoE_PID (NET_HEADER_FIELD_PPPoE_VER << 6) +#define NET_HEADER_FIELD_PPPoE_ALL_FIELDS ((NET_HEADER_FIELD_PPPoE_VER << 7) - 1) + +#define NET_HEADER_FIELD_PPPMUX_PID (1) +#define NET_HEADER_FIELD_PPPMUX_CKSUM (NET_HEADER_FIELD_PPPMUX_PID << 1) +#define NET_HEADER_FIELD_PPPMUX_COMPRESSED (NET_HEADER_FIELD_PPPMUX_PID << 2) +#define NET_HEADER_FIELD_PPPMUX_ALL_FIELDS ((NET_HEADER_FIELD_PPPMUX_PID << 3) - 1) + +#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF (1) +#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_LXT (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 1) +#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_LEN (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 2) +#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_PID (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 3) +#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_USE_PID (NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 4) +#define NET_HEADER_FIELD_PPPMUX_SUBFRAME_ALL_FIELDS ((NET_HEADER_FIELD_PPPMUX_SUBFRAME_PFF << 5) - 1) + + +typedef uint8_t headerFieldEth_t; + +#define NET_HEADER_FIELD_ETH_DA (1) +#define NET_HEADER_FIELD_ETH_SA (NET_HEADER_FIELD_ETH_DA << 1) +#define NET_HEADER_FIELD_ETH_LENGTH (NET_HEADER_FIELD_ETH_DA << 2) +#define NET_HEADER_FIELD_ETH_TYPE (NET_HEADER_FIELD_ETH_DA << 3) +#define NET_HEADER_FIELD_ETH_FINAL_CKSUM (NET_HEADER_FIELD_ETH_DA << 4) +#define NET_HEADER_FIELD_ETH_PADDING (NET_HEADER_FIELD_ETH_DA << 5) +#define NET_HEADER_FIELD_ETH_ALL_FIELDS ((NET_HEADER_FIELD_ETH_DA << 6) - 1) + + +typedef uint16_t headerFieldIpv4_t; + +#define NET_HEADER_FIELD_IPv4_VER (1) +#define NET_HEADER_FIELD_IPv4_HDR_LEN (NET_HEADER_FIELD_IPv4_VER << 1) +#define NET_HEADER_FIELD_IPv4_TOS (NET_HEADER_FIELD_IPv4_VER << 2) +#define NET_HEADER_FIELD_IPv4_TOTAL_LEN (NET_HEADER_FIELD_IPv4_VER << 3) +#define NET_HEADER_FIELD_IPv4_ID (NET_HEADER_FIELD_IPv4_VER << 4) +#define NET_HEADER_FIELD_IPv4_FLAG_D (NET_HEADER_FIELD_IPv4_VER << 5) +#define NET_HEADER_FIELD_IPv4_FLAG_M (NET_HEADER_FIELD_IPv4_VER << 6) +#define NET_HEADER_FIELD_IPv4_OFFSET (NET_HEADER_FIELD_IPv4_VER << 7) +#define NET_HEADER_FIELD_IPv4_TTL (NET_HEADER_FIELD_IPv4_VER << 8) +#define NET_HEADER_FIELD_IPv4_PROTO (NET_HEADER_FIELD_IPv4_VER << 9) +#define NET_HEADER_FIELD_IPv4_CKSUM (NET_HEADER_FIELD_IPv4_VER << 10) +#define NET_HEADER_FIELD_IPv4_SRC_IP (NET_HEADER_FIELD_IPv4_VER << 11) +#define NET_HEADER_FIELD_IPv4_DST_IP (NET_HEADER_FIELD_IPv4_VER << 12) +#define NET_HEADER_FIELD_IPv4_OPTS (NET_HEADER_FIELD_IPv4_VER << 13) +#define NET_HEADER_FIELD_IPv4_OPTS_COUNT (NET_HEADER_FIELD_IPv4_VER << 14) +#define NET_HEADER_FIELD_IPv4_ALL_FIELDS ((NET_HEADER_FIELD_IPv4_VER << 15) - 1) + + +typedef uint8_t headerFieldIpv6_t; + +#define NET_HEADER_FIELD_IPv6_VER (1) +#define NET_HEADER_FIELD_IPv6_TC (NET_HEADER_FIELD_IPv6_VER << 1) +#define NET_HEADER_FIELD_IPv6_SRC_IP (NET_HEADER_FIELD_IPv6_VER << 2) +#define NET_HEADER_FIELD_IPv6_DST_IP (NET_HEADER_FIELD_IPv6_VER << 3) +#define NET_HEADER_FIELD_IPv6_NEXT_HDR (NET_HEADER_FIELD_IPv6_VER << 4) +#define NET_HEADER_FIELD_IPv6_FL (NET_HEADER_FIELD_IPv6_VER << 5) +#define NET_HEADER_FIELD_IPv6_HOP_LIMIT (NET_HEADER_FIELD_IPv6_VER << 6) +#define NET_HEADER_FIELD_IPv6_ALL_FIELDS ((NET_HEADER_FIELD_IPv6_VER << 7) - 1) + +#define NET_HEADER_FIELD_ICMP_TYPE (1) +#define NET_HEADER_FIELD_ICMP_CODE (NET_HEADER_FIELD_ICMP_TYPE << 1) +#define NET_HEADER_FIELD_ICMP_CKSUM (NET_HEADER_FIELD_ICMP_TYPE << 2) +#define NET_HEADER_FIELD_ICMP_ID (NET_HEADER_FIELD_ICMP_TYPE << 3) +#define NET_HEADER_FIELD_ICMP_SQ_NUM (NET_HEADER_FIELD_ICMP_TYPE << 4) +#define NET_HEADER_FIELD_ICMP_ALL_FIELDS ((NET_HEADER_FIELD_ICMP_TYPE << 5) - 1) + +#define NET_HEADER_FIELD_IGMP_VERSION (1) +#define NET_HEADER_FIELD_IGMP_TYPE (NET_HEADER_FIELD_IGMP_VERSION << 1) +#define NET_HEADER_FIELD_IGMP_CKSUM (NET_HEADER_FIELD_IGMP_VERSION << 2) +#define NET_HEADER_FIELD_IGMP_DATA (NET_HEADER_FIELD_IGMP_VERSION << 3) +#define NET_HEADER_FIELD_IGMP_ALL_FIELDS ((NET_HEADER_FIELD_IGMP_VERSION << 4) - 1) + + +typedef uint16_t headerFieldTcp_t; + +#define NET_HEADER_FIELD_TCP_PORT_SRC (1) +#define NET_HEADER_FIELD_TCP_PORT_DST (NET_HEADER_FIELD_TCP_PORT_SRC << 1) +#define NET_HEADER_FIELD_TCP_SEQ (NET_HEADER_FIELD_TCP_PORT_SRC << 2) +#define NET_HEADER_FIELD_TCP_ACK (NET_HEADER_FIELD_TCP_PORT_SRC << 3) +#define NET_HEADER_FIELD_TCP_OFFSET (NET_HEADER_FIELD_TCP_PORT_SRC << 4) +#define NET_HEADER_FIELD_TCP_FLAGS (NET_HEADER_FIELD_TCP_PORT_SRC << 5) +#define NET_HEADER_FIELD_TCP_WINDOW (NET_HEADER_FIELD_TCP_PORT_SRC << 6) +#define NET_HEADER_FIELD_TCP_CKSUM (NET_HEADER_FIELD_TCP_PORT_SRC << 7) +#define NET_HEADER_FIELD_TCP_URGPTR (NET_HEADER_FIELD_TCP_PORT_SRC << 8) +#define NET_HEADER_FIELD_TCP_OPTS (NET_HEADER_FIELD_TCP_PORT_SRC << 9) +#define NET_HEADER_FIELD_TCP_OPTS_COUNT (NET_HEADER_FIELD_TCP_PORT_SRC << 10) +#define NET_HEADER_FIELD_TCP_ALL_FIELDS ((NET_HEADER_FIELD_TCP_PORT_SRC << 11) - 1) + + +typedef uint8_t headerFieldSctp_t; + +#define NET_HEADER_FIELD_SCTP_PORT_SRC (1) +#define NET_HEADER_FIELD_SCTP_PORT_DST (NET_HEADER_FIELD_SCTP_PORT_SRC << 1) +#define NET_HEADER_FIELD_SCTP_VER_TAG (NET_HEADER_FIELD_SCTP_PORT_SRC << 2) +#define NET_HEADER_FIELD_SCTP_CKSUM (NET_HEADER_FIELD_SCTP_PORT_SRC << 3) +#define NET_HEADER_FIELD_SCTP_ALL_FIELDS ((NET_HEADER_FIELD_SCTP_PORT_SRC << 4) - 1) + + +typedef uint8_t headerFieldDccp_t; + +#define NET_HEADER_FIELD_DCCP_PORT_SRC (1) +#define NET_HEADER_FIELD_DCCP_PORT_DST (NET_HEADER_FIELD_DCCP_PORT_SRC << 1) +#define NET_HEADER_FIELD_DCCP_ALL_FIELDS ((NET_HEADER_FIELD_DCCP_PORT_SRC << 2) - 1) + + +typedef uint8_t headerFieldUdp_t; + +#define NET_HEADER_FIELD_UDP_PORT_SRC (1) +#define NET_HEADER_FIELD_UDP_PORT_DST (NET_HEADER_FIELD_UDP_PORT_SRC << 1) +#define NET_HEADER_FIELD_UDP_LEN (NET_HEADER_FIELD_UDP_PORT_SRC << 2) +#define NET_HEADER_FIELD_UDP_CKSUM (NET_HEADER_FIELD_UDP_PORT_SRC << 3) +#define NET_HEADER_FIELD_UDP_ALL_FIELDS ((NET_HEADER_FIELD_UDP_PORT_SRC << 4) - 1) + +typedef uint8_t headerFieldUdpEncapEsp_t; + +#define NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC (1) +#define NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_DST (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 1) +#define NET_HEADER_FIELD_UDP_ENCAP_ESP_LEN (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 2) +#define NET_HEADER_FIELD_UDP_ENCAP_ESP_CKSUM (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 3) +#define NET_HEADER_FIELD_UDP_ENCAP_ESP_SPI (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 4) +#define NET_HEADER_FIELD_UDP_ENCAP_ESP_SEQUENCE_NUM (NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 5) +#define NET_HEADER_FIELD_UDP_ENCAP_ESP_ALL_FIELDS ((NET_HEADER_FIELD_UDP_ENCAP_ESP_PORT_SRC << 6) - 1) + +#define NET_HEADER_FIELD_IPHC_CID (1) +#define NET_HEADER_FIELD_IPHC_CID_TYPE (NET_HEADER_FIELD_IPHC_CID << 1) +#define NET_HEADER_FIELD_IPHC_HCINDEX (NET_HEADER_FIELD_IPHC_CID << 2) +#define NET_HEADER_FIELD_IPHC_GEN (NET_HEADER_FIELD_IPHC_CID << 3) +#define NET_HEADER_FIELD_IPHC_D_BIT (NET_HEADER_FIELD_IPHC_CID << 4) +#define NET_HEADER_FIELD_IPHC_ALL_FIELDS ((NET_HEADER_FIELD_IPHC_CID << 5) - 1) + +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE (1) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_FLAGS (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 1) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_LENGTH (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 2) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_TSN (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 3) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_ID (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 4) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_STREAM_SQN (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 5) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_PAYLOAD_PID (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 6) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_UNORDERED (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 7) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_BEGGINING (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 8) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_END (NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 9) +#define NET_HEADER_FIELD_SCTP_CHUNK_DATA_ALL_FIELDS ((NET_HEADER_FIELD_SCTP_CHUNK_DATA_TYPE << 10) - 1) + +#define NET_HEADER_FIELD_L2TPv2_TYPE_BIT (1) +#define NET_HEADER_FIELD_L2TPv2_LENGTH_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 1) +#define NET_HEADER_FIELD_L2TPv2_SEQUENCE_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 2) +#define NET_HEADER_FIELD_L2TPv2_OFFSET_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 3) +#define NET_HEADER_FIELD_L2TPv2_PRIORITY_BIT (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 4) +#define NET_HEADER_FIELD_L2TPv2_VERSION (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 5) +#define NET_HEADER_FIELD_L2TPv2_LEN (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 6) +#define NET_HEADER_FIELD_L2TPv2_TUNNEL_ID (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 7) +#define NET_HEADER_FIELD_L2TPv2_SESSION_ID (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 8) +#define NET_HEADER_FIELD_L2TPv2_NS (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 9) +#define NET_HEADER_FIELD_L2TPv2_NR (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 10) +#define NET_HEADER_FIELD_L2TPv2_OFFSET_SIZE (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 11) +#define NET_HEADER_FIELD_L2TPv2_FIRST_BYTE (NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 12) +#define NET_HEADER_FIELD_L2TPv2_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv2_TYPE_BIT << 13) - 1) + +#define NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT (1) +#define NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH_BIT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 1) +#define NET_HEADER_FIELD_L2TPv3_CTRL_SEQUENCE_BIT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 2) +#define NET_HEADER_FIELD_L2TPv3_CTRL_VERSION (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 3) +#define NET_HEADER_FIELD_L2TPv3_CTRL_LENGTH (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 4) +#define NET_HEADER_FIELD_L2TPv3_CTRL_CONTROL (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 5) +#define NET_HEADER_FIELD_L2TPv3_CTRL_SENT (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 6) +#define NET_HEADER_FIELD_L2TPv3_CTRL_RECV (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 7) +#define NET_HEADER_FIELD_L2TPv3_CTRL_FIRST_BYTE (NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 8) +#define NET_HEADER_FIELD_L2TPv3_CTRL_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv3_CTRL_TYPE_BIT << 9) - 1) + +#define NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT (1) +#define NET_HEADER_FIELD_L2TPv3_SESS_VERSION (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 1) +#define NET_HEADER_FIELD_L2TPv3_SESS_ID (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 2) +#define NET_HEADER_FIELD_L2TPv3_SESS_COOKIE (NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 3) +#define NET_HEADER_FIELD_L2TPv3_SESS_ALL_FIELDS ((NET_HEADER_FIELD_L2TPv3_SESS_TYPE_BIT << 4) - 1) + + +typedef uint8_t headerFieldVlan_t; + +#define NET_HEADER_FIELD_VLAN_VPRI (1) +#define NET_HEADER_FIELD_VLAN_CFI (NET_HEADER_FIELD_VLAN_VPRI << 1) +#define NET_HEADER_FIELD_VLAN_VID (NET_HEADER_FIELD_VLAN_VPRI << 2) +#define NET_HEADER_FIELD_VLAN_LENGTH (NET_HEADER_FIELD_VLAN_VPRI << 3) +#define NET_HEADER_FIELD_VLAN_TYPE (NET_HEADER_FIELD_VLAN_VPRI << 4) +#define NET_HEADER_FIELD_VLAN_ALL_FIELDS ((NET_HEADER_FIELD_VLAN_VPRI << 5) - 1) + +#define NET_HEADER_FIELD_VLAN_TCI (NET_HEADER_FIELD_VLAN_VPRI | \ + NET_HEADER_FIELD_VLAN_CFI | \ + NET_HEADER_FIELD_VLAN_VID) + + +typedef uint8_t headerFieldLlc_t; + +#define NET_HEADER_FIELD_LLC_DSAP (1) +#define NET_HEADER_FIELD_LLC_SSAP (NET_HEADER_FIELD_LLC_DSAP << 1) +#define NET_HEADER_FIELD_LLC_CTRL (NET_HEADER_FIELD_LLC_DSAP << 2) +#define NET_HEADER_FIELD_LLC_ALL_FIELDS ((NET_HEADER_FIELD_LLC_DSAP << 3) - 1) + +#define NET_HEADER_FIELD_NLPID_NLPID (1) +#define NET_HEADER_FIELD_NLPID_ALL_FIELDS ((NET_HEADER_FIELD_NLPID_NLPID << 1) - 1) + + +typedef uint8_t headerFieldSnap_t; + +#define NET_HEADER_FIELD_SNAP_OUI (1) +#define NET_HEADER_FIELD_SNAP_PID (NET_HEADER_FIELD_SNAP_OUI << 1) +#define NET_HEADER_FIELD_SNAP_ALL_FIELDS ((NET_HEADER_FIELD_SNAP_OUI << 2) - 1) + + +typedef uint8_t headerFieldLlcSnap_t; + +#define NET_HEADER_FIELD_LLC_SNAP_TYPE (1) +#define NET_HEADER_FIELD_LLC_SNAP_ALL_FIELDS ((NET_HEADER_FIELD_LLC_SNAP_TYPE << 1) - 1) + +#define NET_HEADER_FIELD_ARP_HTYPE (1) +#define NET_HEADER_FIELD_ARP_PTYPE (NET_HEADER_FIELD_ARP_HTYPE << 1) +#define NET_HEADER_FIELD_ARP_HLEN (NET_HEADER_FIELD_ARP_HTYPE << 2) +#define NET_HEADER_FIELD_ARP_PLEN (NET_HEADER_FIELD_ARP_HTYPE << 3) +#define NET_HEADER_FIELD_ARP_OPER (NET_HEADER_FIELD_ARP_HTYPE << 4) +#define NET_HEADER_FIELD_ARP_SHA (NET_HEADER_FIELD_ARP_HTYPE << 5) +#define NET_HEADER_FIELD_ARP_SPA (NET_HEADER_FIELD_ARP_HTYPE << 6) +#define NET_HEADER_FIELD_ARP_THA (NET_HEADER_FIELD_ARP_HTYPE << 7) +#define NET_HEADER_FIELD_ARP_TPA (NET_HEADER_FIELD_ARP_HTYPE << 8) +#define NET_HEADER_FIELD_ARP_ALL_FIELDS ((NET_HEADER_FIELD_ARP_HTYPE << 9) - 1) + +#define NET_HEADER_FIELD_RFC2684_LLC (1) +#define NET_HEADER_FIELD_RFC2684_NLPID (NET_HEADER_FIELD_RFC2684_LLC << 1) +#define NET_HEADER_FIELD_RFC2684_OUI (NET_HEADER_FIELD_RFC2684_LLC << 2) +#define NET_HEADER_FIELD_RFC2684_PID (NET_HEADER_FIELD_RFC2684_LLC << 3) +#define NET_HEADER_FIELD_RFC2684_VPN_OUI (NET_HEADER_FIELD_RFC2684_LLC << 4) +#define NET_HEADER_FIELD_RFC2684_VPN_IDX (NET_HEADER_FIELD_RFC2684_LLC << 5) +#define NET_HEADER_FIELD_RFC2684_ALL_FIELDS ((NET_HEADER_FIELD_RFC2684_LLC << 6) - 1) + +#define NET_HEADER_FIELD_USER_DEFINED_SRCPORT (1) +#define NET_HEADER_FIELD_USER_DEFINED_PCDID (NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 1) +#define NET_HEADER_FIELD_USER_DEFINED_ALL_FIELDS ((NET_HEADER_FIELD_USER_DEFINED_SRCPORT << 2) - 1) + +#define NET_HEADER_FIELD_PAYLOAD_BUFFER (1) +#define NET_HEADER_FIELD_PAYLOAD_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 1) +#define NET_HEADER_FIELD_MAX_FRM_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 2) +#define NET_HEADER_FIELD_MIN_FRM_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 3) +#define NET_HEADER_FIELD_PAYLOAD_TYPE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 4) +#define NET_HEADER_FIELD_FRAME_SIZE (NET_HEADER_FIELD_PAYLOAD_BUFFER << 5) +#define NET_HEADER_FIELD_PAYLOAD_ALL_FIELDS ((NET_HEADER_FIELD_PAYLOAD_BUFFER << 6) - 1) + + +typedef uint8_t headerFieldGre_t; + +#define NET_HEADER_FIELD_GRE_TYPE (1) +#define NET_HEADER_FIELD_GRE_ALL_FIELDS ((NET_HEADER_FIELD_GRE_TYPE << 1) - 1) + + +typedef uint8_t headerFieldMinencap_t; + +#define NET_HEADER_FIELD_MINENCAP_SRC_IP (1) +#define NET_HEADER_FIELD_MINENCAP_DST_IP (NET_HEADER_FIELD_MINENCAP_SRC_IP << 1) +#define NET_HEADER_FIELD_MINENCAP_TYPE (NET_HEADER_FIELD_MINENCAP_SRC_IP << 2) +#define NET_HEADER_FIELD_MINENCAP_ALL_FIELDS ((NET_HEADER_FIELD_MINENCAP_SRC_IP << 3) - 1) + + +typedef uint8_t headerFieldIpsecAh_t; + +#define NET_HEADER_FIELD_IPSEC_AH_SPI (1) +#define NET_HEADER_FIELD_IPSEC_AH_NH (NET_HEADER_FIELD_IPSEC_AH_SPI << 1) +#define NET_HEADER_FIELD_IPSEC_AH_ALL_FIELDS ((NET_HEADER_FIELD_IPSEC_AH_SPI << 2) - 1) + + +typedef uint8_t headerFieldIpsecEsp_t; + +#define NET_HEADER_FIELD_IPSEC_ESP_SPI (1) +#define NET_HEADER_FIELD_IPSEC_ESP_SEQUENCE_NUM (NET_HEADER_FIELD_IPSEC_ESP_SPI << 1) +#define NET_HEADER_FIELD_IPSEC_ESP_ALL_FIELDS ((NET_HEADER_FIELD_IPSEC_ESP_SPI << 2) - 1) + + +typedef uint8_t headerFieldMpls_t; + +#define NET_HEADER_FIELD_MPLS_LABEL_STACK (1) +#define NET_HEADER_FIELD_MPLS_LABEL_STACK_ALL_FIELDS ((NET_HEADER_FIELD_MPLS_LABEL_STACK << 1) - 1) + + +typedef uint8_t headerFieldMacsec_t; + +#define NET_HEADER_FIELD_MACSEC_SECTAG (1) +#define NET_HEADER_FIELD_MACSEC_ALL_FIELDS ((NET_HEADER_FIELD_MACSEC_SECTAG << 1) - 1) + + +typedef enum { + HEADER_TYPE_NONE = 0, + HEADER_TYPE_PAYLOAD, + HEADER_TYPE_ETH, + HEADER_TYPE_VLAN, + HEADER_TYPE_IPv4, + HEADER_TYPE_IPv6, + HEADER_TYPE_TCP, + HEADER_TYPE_UDP, + HEADER_TYPE_IPHC, + HEADER_TYPE_SCTP, + HEADER_TYPE_SCTP_CHUNK_DATA, + HEADER_TYPE_PPPoE, + HEADER_TYPE_PPP, + HEADER_TYPE_PPPMUX, + HEADER_TYPE_PPPMUX_SUBFRAME, + HEADER_TYPE_L2TPv2, + HEADER_TYPE_L2TPv3_CTRL, + HEADER_TYPE_L2TPv3_SESS, + HEADER_TYPE_LLC, + HEADER_TYPE_LLC_SNAP, + HEADER_TYPE_NLPID, + HEADER_TYPE_SNAP, + HEADER_TYPE_MPLS, + HEADER_TYPE_IPSEC_AH, + HEADER_TYPE_IPSEC_ESP, + HEADER_TYPE_UDP_ENCAP_ESP, /* RFC 3948 */ + HEADER_TYPE_MACSEC, + HEADER_TYPE_GRE, + HEADER_TYPE_MINENCAP, + HEADER_TYPE_DCCP, + HEADER_TYPE_ICMP, + HEADER_TYPE_IGMP, + HEADER_TYPE_ARP, + HEADER_TYPE_CAPWAP, + HEADER_TYPE_CAPWAP_DTLS, + HEADER_TYPE_RFC2684, + HEADER_TYPE_USER_DEFINED_L2, + HEADER_TYPE_USER_DEFINED_L3, + HEADER_TYPE_USER_DEFINED_L4, + HEADER_TYPE_USER_DEFINED_SHIM1, + HEADER_TYPE_USER_DEFINED_SHIM2, + MAX_HEADER_TYPE_COUNT +} e_NetHeaderType; + + +#endif /* __NET_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/std_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/std_ext.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /**************************************************************************//** + + @File std_ext.h + + @Description General Standard Definitions +*//***************************************************************************/ + +#ifndef __STD_EXT_H +#define __STD_EXT_H + + +#include "types_ext.h" +#include "ncsw_ext.h" + + +#endif /* __STD_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/stdarg_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/stdarg_ext.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __STDARG_EXT_H +#define __STDARG_EXT_H + + +#if defined(NCSW_LINUX) && defined(__KERNEL__) +#include + +#else +#include + +#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */ + +#include "std_ext.h" + + +#endif /* __STDARG_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/stdlib_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/stdlib_ext.h @@ -0,0 +1,161 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef __STDLIB_EXT_H +#define __STDLIB_EXT_H + + +#if (defined(NCSW_LINUX)) && defined(__KERNEL__) +#include "stdarg_ext.h" +#include "std_ext.h" + + +/** + * strtoul - convert a string to an uint32_t + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +uint32_t strtoul(const char *cp,char **endp,uint32_t base); + +/** + * strtol - convert a string to a int32_t + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long strtol(const char *cp,char **endp,uint32_t base); + +/** + * strtoull - convert a string to an uint64_t + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +uint64_t strtoull(const char *cp,char **endp,uint32_t base); + +/** + * strtoll - convert a string to a int64 long + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long long strtoll(const char *cp,char **endp,uint32_t base); + +/** + * atoi - convert a character to a int + * @s: The start of the string + */ +int atoi(const char *s); + +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @count: The maximum number of bytes to search + */ +size_t strnlen(const char * s, size_t count); + +/** + * strlen - Find the length of a string + * @s: The string to be sized + */ +size_t strlen(const char * s); + +/** + * strtok - Split a string into tokens + * @s: The string to be searched + * @ct: The characters to search for + * + * WARNING: strtok is deprecated, use strsep instead. + */ +char * strtok(char * s,const char * ct); + +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: The maximum number of bytes to copy + * + * Note that unlike userspace strncpy, this does not %NUL-pad the buffer. + * However, the result is not %NUL-terminated if the source exceeds + * @count bytes. + */ +char * strncpy(char * dest,const char *src,size_t count); + +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + */ +char * strcpy(char * dest,const char *src); + +/** + * vsscanf - Unformat a buffer into a list of arguments + * @buf: input buffer + * @fmt: format of buffer + * @args: arguments + */ +int vsscanf(const char * buf, const char * fmt, va_list args); + +/** + * vsnprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @size: The size of the buffer, including the trailing null space + * @fmt: The format string to use + * @args: Arguments for the format string + * + * Call this function if you are already dealing with a va_list. + * You probably want snprintf instead. + */ +int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); + +/** + * vsprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @fmt: The format string to use + * @args: Arguments for the format string + * + * Call this function if you are already dealing with a va_list. + * You probably want sprintf instead. + */ +int vsprintf(char *buf, const char *fmt, va_list args); + +#else +#include +#include +#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */ + +#include "std_ext.h" + + +#endif /* __STDLIB_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/string_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/string_ext.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __STRING_EXT_H +#define __STRING_EXT_H + + +#if defined(NCSW_LINUX) && defined(__KERNEL__) +#include +#include +extern char * strtok ( char * str, const char * delimiters ); + +#elif defined(__KERNEL__) +#include "linux/types.h" +#include "linux/posix_types.h" +#include "linux/string.h" + +#else +#include + +#endif /* defined(NCSW_LINUX) && defined(__KERNEL__) */ + +#include "std_ext.h" + + +#endif /* __STRING_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/types_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/types_ext.h @@ -0,0 +1,111 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /**************************************************************************//** + @File types_ext.h + + @Description General types Standard Definitions +*//***************************************************************************/ + +#ifndef __TYPES_EXT_H +#define __TYPES_EXT_H + + +#if defined(NCSW_LINUX) +#include "types_linux.h" + +#elif defined(NCSW_LINUX_USD) +#include "types_linux_usd.h" + +#elif defined(NCSW_VXWORKS) +#include "types_vxworks.h" + +#elif defined(__MWERKS__) && defined(__GNUC__) && defined(__cplusplus) +#include "types_bb_gpp.h" + +#elif defined(__MWERKS__) && defined(__GNUC__) +#include "types_bb_gcc.h" + +#elif defined(__ghs__) +#include "types_ghs.h" + +#else +#include "types_dflt.h" +#endif /* defined (__ROCOO__) */ + + +static __inline__ void TypesChecker(void) +{ +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(push,1) +#endif /* defined(__MWERKS__) && ... */ +#define MEM_MAP_START + _Packed struct strct { + __volatile__ int vi; + } _PackedType; +#define MEM_MAP_END +#if defined(__MWERKS__) && !defined(__GNUC__) +#pragma pack(pop) +#endif /* defined(__MWERKS__) && ... */ + size_t size = 0; + bool tr = TRUE, fls = FALSE; + struct strct *p_Strct = NULL; + physAddress_t addr = 0x100; + + tr = fls; + p_Strct = p_Strct; + size++; + if (tr) size++; + + WRITE_UINT8(*((uint8_t*)((size_t)(addr))), + GET_UINT8(*((uint8_t*)((size_t)(addr))))); + + WRITE_UINT8(*((uint8_t*)((size_t)(UINT8_MAX))), + GET_UINT8(*((uint8_t*)((size_t)(UINT8_MAX))))); + WRITE_UINT16(*((uint16_t*)((size_t)(UINT16_MAX))), + GET_UINT16(*((uint16_t*)((size_t)(UINT16_MAX))))); + WRITE_UINT32(*((uint32_t*)((size_t)(UINT32_MAX))), + GET_UINT32(*((uint32_t*)((size_t)(UINT32_MAX))))); + WRITE_UINT64(*((uint64_t*)((size_t)(UINT64_MAX))), + GET_UINT64(*((uint64_t*)((size_t)(UINT64_MAX))))); + WRITE_UINT8(*((uint8_t*)((size_t)(INT8_MAX))), + GET_UINT8(*((uint8_t*)((size_t)(INT8_MIN))))); + WRITE_UINT16(*((uint16_t*)((size_t)(INT16_MAX))), + GET_UINT16(*((uint16_t*)((size_t)(INT16_MIN))))); + WRITE_UINT32(*((uint32_t*)((size_t)(INT32_MAX))), + GET_UINT32(*((uint32_t*)((size_t)(INT32_MIN))))); + WRITE_UINT64(*((uint64_t*)((size_t)(INT64_MAX))), + GET_UINT64(*((uint64_t*)((size_t)(INT64_MIN))))); +} + + +#endif /* __TYPES_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/inc/xx_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/inc/xx_ext.h @@ -0,0 +1,881 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File xx_ext.h + + @Description Prototypes, externals and typedefs for system-supplied + (external) routines +*//***************************************************************************/ + +#ifndef __XX_EXT_H +#define __XX_EXT_H + +#include "std_ext.h" +#include "part_ext.h" + +#if defined(__MWERKS__) && defined(OPTIMIZED_FOR_SPEED) +#include "xx_integration_ext.h" +#endif /* defined(__MWERKS__) && defined(OPTIMIZED_FOR_SPEED) */ + + +/**************************************************************************//** + @Group xx_id XX Interface (System call hooks) + + @Description Prototypes, externals and typedefs for system-supplied + (external) routines + + @{ +*//***************************************************************************/ + +#if (defined(REPORT_EVENTS) && (REPORT_EVENTS > 0)) +/**************************************************************************//** + @Function XX_EventById + + @Description Event reporting routine - executed only when REPORT_EVENTS=1. + + @Param[in] event - Event code (e_Event). + @Param[in] appId - Application identifier. + @Param[in] flags - Event flags. + @Param[in] msg - Event message. + + @Return None +*//***************************************************************************/ +void XX_EventById(uint32_t event, t_Handle appId, uint16_t flags, char *msg); + +#else /* not REPORT_EVENTS */ +#define XX_EventById(event, appId, flags, msg) +#endif /* REPORT_EVENTS */ + + + +#ifdef DEBUG_XX_MALLOC +void * XX_MallocDebug(uint32_t size, char *fname, int line); + +void * XX_MallocSmartDebug(uint32_t size, + int memPartitionId, + uint32_t alignment, + char *fname, + int line); + +#define XX_Malloc(sz) \ + XX_MallocDebug((sz), __FILE__, __LINE__) + +#define XX_MallocSmart(sz, memt, al) \ + XX_MallocSmartDebug((sz), (memt), (al), __FILE__, __LINE__) + +#else /* not DEBUG_XX_MALLOC */ +/**************************************************************************//** + @Function XX_Malloc + + @Description allocates contiguous block of memory. + + @Param[in] size - Number of bytes to allocate. + + @Return The address of the newly allocated block on success, NULL on failure. +*//***************************************************************************/ +void * XX_Malloc(uint32_t size); + +/**************************************************************************//** + @Function XX_MallocSmart + + @Description Allocates contiguous block of memory in a specified + alignment and from the specified segment. + + @Param[in] size - Number of bytes to allocate. + @Param[in] memPartitionId - Memory partition ID; The value zero must + be mapped to the default heap partition. + @Param[in] alignment - Required memory alignment (in bytes). + + @Return The address of the newly allocated block on success, NULL on failure. +*//***************************************************************************/ +void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment); +#endif /* not DEBUG_XX_MALLOC */ + +/**************************************************************************//** + @Function XX_FreeSmart + + @Description Frees the memory block pointed to by "p". + Only for memory allocated by XX_MallocSmart + + @Param[in] p_Memory - pointer to the memory block. + + @Return None. +*//***************************************************************************/ +void XX_FreeSmart(void *p_Memory); + +/**************************************************************************//** + @Function XX_Free + + @Description frees the memory block pointed to by "p". + + @Param[in] p_Memory - pointer to the memory block. + + @Return None. +*//***************************************************************************/ +void XX_Free(void *p_Memory); + +#ifndef NCSW_LINUX +/**************************************************************************//** + @Function XX_GetMemPartitionBase + + @Description This routine gets the address of a memory segment according to + the memory type. + + @Param[in] memPartitionId - Memory partition ID; The value zero must + be mapped to the default heap partition. + + @Return The address of the required memory type. +*//***************************************************************************/ +void * XX_GetMemPartitionBase(int memPartitionId); +#endif + +/**************************************************************************//** + @Function XX_Print + + @Description print a string. + + @Param[in] str - string to print. + + @Return None. +*//***************************************************************************/ +void XX_Print(char *str, ...); + +/**************************************************************************//** + @Function XX_GetChar + + @Description Get character from console. + + @Return Character is returned on success. Zero is returned otherwise. +*//***************************************************************************/ +char XX_GetChar(void); + +/**************************************************************************//** + @Function XX_SetIntr + + @Description Set an interrupt service routine for a specific interrupt source. + + @Param[in] irq - Interrupt ID (system-specific number). + @Param[in] f_Isr - Callback routine that will be called when the interrupt occurs. + @Param[in] handle - The argument for the user callback routine. + + @Return E_OK on success; error code otherwise.. +*//***************************************************************************/ +t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle); + +/**************************************************************************//** + @Function XX_FreeIntr + + @Description Free a specific interrupt and a specific callback routine. + + @Param[in] irq - Interrupt ID (system-specific number). + + @Return E_OK on success; error code otherwise.. +*//***************************************************************************/ +t_Error XX_FreeIntr(int irq); + +/**************************************************************************//** + @Function XX_EnableIntr + + @Description Enable a specific interrupt. + + @Param[in] irq - Interrupt ID (system-specific number). + + @Return E_OK on success; error code otherwise.. +*//***************************************************************************/ +t_Error XX_EnableIntr(int irq); + +/**************************************************************************//** + @Function XX_DisableIntr + + @Description Disable a specific interrupt. + + @Param[in] irq - Interrupt ID (system-specific number). + + @Return E_OK on success; error code otherwise.. +*//***************************************************************************/ +t_Error XX_DisableIntr(int irq); + +#if !(defined(__MWERKS__) && defined(OPTIMIZED_FOR_SPEED)) +/**************************************************************************//** + @Function XX_DisableAllIntr + + @Description Disable all interrupts by masking them at the CPU. + + @Return A value that represents the interrupts state before the + operation, and should be passed to the matching + XX_RestoreAllIntr() call. +*//***************************************************************************/ +uint32_t XX_DisableAllIntr(void); + +/**************************************************************************//** + @Function XX_RestoreAllIntr + + @Description Restore previous state of interrupts level at the CPU. + + @Param[in] flags - A value that represents the interrupts state to restore, + as returned by the matching call for XX_DisableAllIntr(). + + @Return None. +*//***************************************************************************/ +void XX_RestoreAllIntr(uint32_t flags); +#endif /* !(defined(__MWERKS__) && defined(OPTIMIZED_FOR_SPEED)) */ + +/**************************************************************************//** + @Function XX_Call + + @Description Call a service in another task. + + Activate the routine "f" via the queue identified by "IntrManagerId". The + parameter to "f" is Id - the handle of the destination object + + @Param[in] intrManagerId - Queue ID. + @Param[in] f - routine pointer. + @Param[in] Id - the parameter to be passed to f(). + @Param[in] h_App - Application handle. + @Param[in] flags - Unused, + + @Return E_OK is returned on success. E_FAIL is returned otherwise (usually an operating system level failure). +*//***************************************************************************/ +t_Error XX_Call( uint32_t intrManagerId, + t_Error (* f)(t_Handle), + t_Handle Id, + t_Handle h_App, + uint16_t flags ); + +/**************************************************************************//** + @Function XX_Exit + + @Description Stop execution and report status (where it is applicable) + + @Param[in] status - exit status +*//***************************************************************************/ +void XX_Exit(int status); + +/*****************************************************************************/ +/* Tasklet Service Routines */ +/*****************************************************************************/ +typedef t_Handle t_TaskletHandle; + +/**************************************************************************//** + @Function XX_InitTasklet + + @Description Create and initialize a tasklet object. + + @Param[in] routine - A routine to be ran as a tasklet. + @Param[in] data - An argument to pass to the tasklet. + + @Return Tasklet handle is returned on success. NULL is returned otherwise. +*//***************************************************************************/ +t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data); + +/**************************************************************************//** + @Function XX_FreeTasklet + + @Description Free a tasklet object. + + @Param[in] h_Tasklet - A handle to a tasklet to be free. + + @Return None. +*//***************************************************************************/ +void XX_FreeTasklet (t_TaskletHandle h_Tasklet); + +/**************************************************************************//** + @Function XX_ScheduleTask + + @Description Schedule a tasklet object. + + @Param[in] h_Tasklet - A handle to a tasklet to be scheduled. + @Param[in] immediate - Indicate whether to schedule this tasklet on + the immediate queue or on the delayed one. + + @Return 0 - on success. Error code - otherwise. +*//***************************************************************************/ +int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate); + +/**************************************************************************//** + @Function XX_FlushScheduledTasks + + @Description Flush all tasks there are in the scheduled tasks queue. + + @Return None. +*//***************************************************************************/ +void XX_FlushScheduledTasks(void); + +/**************************************************************************//** + @Function XX_TaskletIsQueued + + @Description Check if task is queued. + + @Param[in] h_Tasklet - A handle to a tasklet to be scheduled. + + @Return 1 - task is queued. 0 - otherwise. +*//***************************************************************************/ +int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet); + +/**************************************************************************//** + @Function XX_SetTaskletData + + @Description Set data to a scheduled task. Used to change data of already + scheduled task. + + @Param[in] h_Tasklet - A handle to a tasklet to be scheduled. + @Param[in] data - Data to be set. +*//***************************************************************************/ +void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data); + +/**************************************************************************//** + @Function XX_GetTaskletData + + @Description Get the data of scheduled task. + + @Param[in] h_Tasklet - A handle to a tasklet to be scheduled. + + @Return handle to the data of the task. +*//***************************************************************************/ +t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet); + +/**************************************************************************//** + @Function XX_BottomHalf + + @Description Bottom half implementation, invoked by the interrupt handler. + + This routine handles all bottom-half tasklets with interrupts + enabled. + + @Return None. +*//***************************************************************************/ +void XX_BottomHalf(void); + + +/*****************************************************************************/ +/* Spinlock Service Routines */ +/*****************************************************************************/ + +/**************************************************************************//** + @Function XX_InitSpinlock + + @Description Creates a spinlock. + + @Return Spinlock handle is returned on success; NULL otherwise. +*//***************************************************************************/ +t_Handle XX_InitSpinlock(void); + +/**************************************************************************//** + @Function XX_FreeSpinlock + + @Description Frees the memory allocated for the spinlock creation. + + @Param[in] h_Spinlock - A handle to a spinlock. + + @Return None. +*//***************************************************************************/ +void XX_FreeSpinlock(t_Handle h_Spinlock); + +/**************************************************************************//** + @Function XX_LockSpinlock + + @Description Locks a spinlock. + + @Param[in] h_Spinlock - A handle to a spinlock. + + @Return None. +*//***************************************************************************/ +void XX_LockSpinlock(t_Handle h_Spinlock); + +/**************************************************************************//** + @Function XX_UnlockSpinlock + + @Description Unlocks a spinlock. + + @Param[in] h_Spinlock - A handle to a spinlock. + + @Return None. +*//***************************************************************************/ +void XX_UnlockSpinlock(t_Handle h_Spinlock); + +/**************************************************************************//** + @Function XX_LockIntrSpinlock + + @Description Locks a spinlock (interrupt safe). + + @Param[in] h_Spinlock - A handle to a spinlock. + + @Return A value that represents the interrupts state before the + operation, and should be passed to the matching + XX_UnlockIntrSpinlock() call. +*//***************************************************************************/ +uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock); + +/**************************************************************************//** + @Function XX_UnlockIntrSpinlock + + @Description Unlocks a spinlock (interrupt safe). + + @Param[in] h_Spinlock - A handle to a spinlock. + @Param[in] intrFlags - A value that represents the interrupts state to + restore, as returned by the matching call for + XX_LockIntrSpinlock(). + + @Return None. +*//***************************************************************************/ +void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags); + + +/*****************************************************************************/ +/* Timers Service Routines */ +/*****************************************************************************/ + +/**************************************************************************//** + @Function XX_CurrentTime + + @Description Returns current system time. + + @Return Current system time (in milliseconds). +*//***************************************************************************/ +uint32_t XX_CurrentTime(void); + +/**************************************************************************//** + @Function XX_CreateTimer + + @Description Creates a timer. + + @Return Timer handle is returned on success; NULL otherwise. +*//***************************************************************************/ +t_Handle XX_CreateTimer(void); + +/**************************************************************************//** + @Function XX_FreeTimer + + @Description Frees the memory allocated for the timer creation. + + @Param[in] h_Timer - A handle to a timer. + + @Return None. +*//***************************************************************************/ +void XX_FreeTimer(t_Handle h_Timer); + +/**************************************************************************//** + @Function XX_StartTimer + + @Description Starts a timer. + + The user can select to start the timer as periodic timer or as + one-shot timer. The user should provide a callback routine that + will be called when the timer expires. + + @Param[in] h_Timer - A handle to a timer. + @Param[in] msecs - Timer expiration period (in milliseconds). + @Param[in] periodic - TRUE for a periodic timer; + FALSE for a one-shot timer.. + @Param[in] f_TimerExpired - A callback routine to be called when the + timer expires. + @Param[in] h_Arg - The argument to pass in the timer-expired + callback routine. + + @Return None. +*//***************************************************************************/ +void XX_StartTimer(t_Handle h_Timer, + uint32_t msecs, + bool periodic, + void (*f_TimerExpired)(t_Handle h_Arg), + t_Handle h_Arg); + +/**************************************************************************//** + @Function XX_StopTimer + + @Description Frees the memory allocated for the timer creation. + + @Param[in] h_Timer - A handle to a timer. + + @Return None. +*//***************************************************************************/ +void XX_StopTimer(t_Handle h_Timer); + +/**************************************************************************//** + @Function XX_GetExpirationTime + + @Description Returns the time (in milliseconds) remaining until the + expiration of a timer. + + @Param[in] h_Timer - A handle to a timer. + + @Return The time left until the timer expires. +*//***************************************************************************/ +uint32_t XX_GetExpirationTime(t_Handle h_Timer); + +/**************************************************************************//** + @Function XX_ModTimer + + @Description Updates the expiration time of a timer. + + This routine adds the given time to the current system time, + and sets this value as the new expiration time of the timer. + + @Param[in] h_Timer - A handle to a timer. + @Param[in] msecs - The new interval until timer expiration + (in milliseconds). + + @Return None. +*//***************************************************************************/ +void XX_ModTimer(t_Handle h_Timer, uint32_t msecs); + +/**************************************************************************//** + @Function XX_TimerIsActive + + @Description Checks whether a timer is active (pending) or not. + + @Param[in] h_Timer - A handle to a timer. + + @Return 0 - the timer is inactive; Non-zero value - the timer is active; +*//***************************************************************************/ +int XX_TimerIsActive(t_Handle h_Timer); + +/**************************************************************************//** + @Function XX_Sleep + + @Description Non-busy wait until the desired time (in milliseconds) has passed. + + @Param[in] msecs - The requested sleep time (in milliseconds). + + @Return None. + + @Cautions This routine enables interrupts during its wait time. +*//***************************************************************************/ +uint32_t XX_Sleep(uint32_t msecs); + +/**************************************************************************//** + @Function XX_UDelay + + @Description Busy-wait until the desired time (in microseconds) has passed. + + @Param[in] usecs - The requested delay time (in microseconds). + + @Return None. + + @Cautions It is highly unrecommended to call this routine during interrupt + time, because the system time may not be updated properly during + the delay loop. The behavior of this routine during interrupt + time is unexpected. +*//***************************************************************************/ +void XX_UDelay(uint32_t usecs); + + +/*****************************************************************************/ +/* Other Service Routines */ +/*****************************************************************************/ + +/**************************************************************************//** + @Function XX_PhysToVirt + + @Description Translates a physical address to the matching virtual address. + + @Param[in] addr - The physical address to translate. + + @Return Virtual address. +*//***************************************************************************/ +void * XX_PhysToVirt(physAddress_t addr); + +/**************************************************************************//** + @Function XX_VirtToPhys + + @Description Translates a virtual address to the matching physical address. + + @Param[in] addr - The virtual address to translate. + + @Return Physical address. +*//***************************************************************************/ +physAddress_t XX_VirtToPhys(void *addr); + + +/**************************************************************************//** + @Group xx_ipc XX Inter-Partition-Communication API + + @Description The following API is to be used when working with multiple + partitions configuration. + + @{ +*//***************************************************************************/ + +#define XX_IPC_MAX_ADDR_NAME_LENGTH 16 /**< Maximum length of an endpoint name string; + The IPC service can use this constant to limit + the storage space for IPC endpoint names. */ + + +/**************************************************************************//** + @Function t_IpcMsgCompletion + + @Description Callback function used upon IPC non-blocking transaction completion + to return message buffer to the caller and to forward reply if available. + + This callback function may be attached by the source endpoint to any outgoing + IPC message to indicate a non-blocking send (see also XX_IpcSendMessage() routine). + Upon completion of an IPC transaction (consisting of a message and an optional reply), + the IPC service invokes this callback routine to return the message buffer to the sender + and to provide the received reply, if requested. + + User provides this function. Driver invokes it. + + @Param[in] h_Module - Abstract handle to the sending module - the same handle as was passed + in the XX_IpcSendMessage() function; This handle is typically used to point + to the internal data structure of the source endpoint. + @Param[in] p_Msg - Pointer to original (sent) message buffer; + The source endpoint can free (or reuse) this buffer when message + completion callback is called. + @Param[in] p_Reply - Pointer to (received) reply buffer; + This pointer is the same as was provided by the source endpoint in + XX_IpcSendMessage(). + @Param[in] replyLength - Length (in bytes) of actual data in the reply buffer. + @Param[in] status - Completion status - E_OK or failure indication, e.g. IPC transaction completion + timeout. + + @Return None + *//***************************************************************************/ +typedef void (t_IpcMsgCompletion)(t_Handle h_Module, + uint8_t *p_Msg, + uint8_t *p_Reply, + uint32_t replyLength, + t_Error status); + +/**************************************************************************//** + @Function t_IpcMsgHandler + + @Description Callback function used as IPC message handler. + + The IPC service invokes message handlers for each IPC message received. + The actual function pointer should be registered by each destination endpoint + via the XX_IpcRegisterMsgHandler() routine. + + User provides this function. Driver invokes it. + + @Param[in] h_Module - Abstract handle to the message handling module - the same handle as + was passed in the XX_IpcRegisterMsgHandler() function; this handle is + typically used to point to the internal data structure of the destination + endpoint. + @Param[in] p_Msg - Pointer to message buffer with data received from peer. + @Param[in] msgLength - Length (in bytes) of message data. + @Param[in] p_Reply - Pointer to reply buffer, to be filled by the message handler and then sent + by the IPC service; + The reply buffer is allocated by the IPC service with size equals to the + replyLength parameter provided in message handler registration (see + XX_IpcRegisterMsgHandler() function); + If replyLength was initially specified as zero during message handler registration, + the IPC service may set this pointer to NULL and assume that a reply is not needed; + The IPC service is also responsible for freeing the reply buffer after the + reply has been sent or dismissed. + @Param[in,out] p_ReplyLength - Pointer to reply length, which has a dual role in this function: + [In] equals the replyLength parameter provided in message handler + registration (see XX_IpcRegisterMsgHandler() function), and + [Out] should be updated by message handler to the actual reply length; if + this value is set to zero, the IPC service must assume that a reply should + not be sent; + Note: If p_Reply is not NULL, p_ReplyLength must not be NULL as well. + + @Return E_OK on success; Error code otherwise. + *//***************************************************************************/ +typedef t_Error (t_IpcMsgHandler)(t_Handle h_Module, + uint8_t *p_Msg, + uint32_t msgLength, + uint8_t *p_Reply, + uint32_t *p_ReplyLength); + +/**************************************************************************//** + @Function XX_IpcRegisterMsgHandler + + @Description IPC mailbox registration. + + This function is used for registering an IPC message handler in the IPC service. + This function is called by each destination endpoint to indicate that it is ready + to handle incoming messages. The IPC service invokes the message handler upon receiving + a message addressed to the specified destination endpoint. + + @Param[in] addr - The address name string associated with the destination endpoint; + This address must be unique across the IPC service domain to ensure + correct message routing. + @Param[in] f_MsgHandler - Pointer to the message handler callback for processing incoming + message; invoked by the IPC service upon receiving a message + addressed to the destination endpoint specified by the addr + parameter. + @Param[in] h_Module - Abstract handle to the message handling module, passed unchanged + to f_MsgHandler callback function. + @Param[in] replyLength - The maximal data length (in bytes) of any reply that the specified message handler + may generate; the IPC service provides the message handler with buffer + for reply according to the length specified here (refer also to the description + of #t_IpcMsgHandler callback function type); + This size shall be zero if the message handler never generates replies. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH], + t_IpcMsgHandler *f_MsgHandler, + t_Handle h_Module, + uint32_t replyLength); + +/**************************************************************************//** + @Function XX_IpcUnregisterMsgHandler + + @Description Release IPC mailbox routine. + + This function is used for unregistering an IPC message handler from the IPC service. + This function is called by each destination endpoint to indicate that it is no longer + capable of handling incoming messages. + + @Param[in] addr - The address name string associated with the destination endpoint; + This address is the same as was used when the message handler was + registered via XX_IpcRegisterMsgHandler(). + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH]); + +/**************************************************************************//** + @Function XX_IpcInitSession + + @Description This function is used for creating an IPC session between the source endpoint + and the destination endpoint. + + The actual implementation and representation of a session is left for the IPC service. + The function returns an abstract handle to the created session. This handle shall be used + by the source endpoint in subsequent calls to XX_IpcSendMessage(). + The IPC service assumes that before this function is called, no messages are sent from + the specified source endpoint to the specified destination endpoint. + + The IPC service may use a connection-oriented approach or a connectionless approach (or both) + as described below. + + @par Connection-Oriented Approach + + The IPC service may implement a session in a connection-oriented approach - when this function is called, + the IPC service should take the necessary steps to bring up a source-to-destination channel for messages + and a destination-to-source channel for replies. The returned handle should represent the internal + representation of these channels. + + @par Connectionless Approach + + The IPC service may implement a session in a connectionless approach - when this function is called, the + IPC service should not perform any particular steps, but it must store the pair of source and destination + addresses in some session representation and return it as a handle. When XX_IpcSendMessage() shall be + called, the IPC service may use this handle to provide the necessary identifiers for routing the messages + through the connectionless medium. + + @Param[in] destAddr - The address name string associated with the destination endpoint. + @Param[in] srcAddr - The address name string associated with the source endpoint. + + @Return Abstract handle to the initialized session, or NULL on error. +*//***************************************************************************/ +t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH], + char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH]); + +/**************************************************************************//** + @Function XX_IpcFreeSession + + @Description This function is used for terminating an existing IPC session between a source endpoint + and a destination endpoint. + + The IPC service assumes that after this function is called, no messages shall be sent from + the associated source endpoint to the associated destination endpoint. + + @Param[in] h_Session - Abstract handle to the IPC session - the same handle as was originally + returned by the XX_IpcInitSession() function. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error XX_IpcFreeSession(t_Handle h_Session); + +/**************************************************************************//** + @Function XX_IpcSendMessage + + @Description IPC message send routine. + + This function may be used by a source endpoint to send an IPC message to a destination + endpoint. The source endpoint cannot send a message to the destination endpoint without + first initiating a session with that destination endpoint via XX_IpcInitSession() routine. + + The source endpoint must provide the buffer pointer and length of the outgoing message. + Optionally, it may also provide a buffer for an expected reply. In the latter case, the + transaction is not considered complete by the IPC service until the reply has been received. + If the source endpoint does not provide a reply buffer, the transaction is considered + complete after the message has been sent. The source endpoint must keep the message (and + optional reply) buffers valid until the transaction is complete. + + @par Non-blocking mode + + The source endpoint may request a non-blocking send by providing a non-NULL pointer to a message + completion callback function (f_Completion). Upon completion of the IPC transaction (consisting of a + message and an optional reply), the IPC service invokes this callback routine to return the message + buffer to the sender and to provide the received reply, if requested. + + @par Blocking mode + + The source endpoint may request a blocking send by setting f_Completion to NULL. The function is + expected to block until the IPC transaction is complete - either the reply has been received or (if no reply + was requested) the message has been sent. + + @Param[in] h_Session - Abstract handle to the IPC session - the same handle as was originally + returned by the XX_IpcInitSession() function. + @Param[in] p_Msg - Pointer to message buffer to send. + @Param[in] msgLength - Length (in bytes) of actual data in the message buffer. + @Param[in] p_Reply - Pointer to reply buffer - if this buffer is not NULL, the IPC service + fills this buffer with the received reply data; + In blocking mode, the reply data must be valid when the function returns; + In non-blocking mode, the reply data is valid when f_Completion is called; + If this pointer is NULL, no reply is expected. + @Param[in,out] p_ReplyLength - Pointer to reply length, which has a dual role in this function: + [In] specifies the maximal length (in bytes) of the reply buffer pointed by + p_Reply, and + [Out] in non-blocking mode this value is updated by the IPC service to the + actual reply length (in bytes). + @Param[in] f_Completion - Pointer to a completion callback to be used in non-blocking send mode; + The completion callback is invoked by the IPC service upon + completion of the IPC transaction (consisting of a message and an optional + reply); + If this pointer is NULL, the function is expected to block until the IPC + transaction is complete. + @Param[in] h_Arg - Abstract handle to the sending module; passed unchanged to the f_Completion + callback function as the first argument. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error XX_IpcSendMessage(t_Handle h_Session, + uint8_t *p_Msg, + uint32_t msgLength, + uint8_t *p_Reply, + uint32_t *p_ReplyLength, + t_IpcMsgCompletion *f_Completion, + t_Handle h_Arg); + + +/** @} */ /* end of xx_ipc group */ +/** @} */ /* end of xx_id group */ + + +#endif /* __XX_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/integrations/P1023/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/integrations/P1023/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +obj-y += module_strings.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/integrations/P1023/module_strings.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/integrations/P1023/module_strings.c @@ -0,0 +1,71 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +/* Module names for debug messages */ +const char *moduleStrings[] = +{ + "" /* MODULE_UNKNOWN */ + ,"MEM" /* MODULE_ */ + ,"MM" /* MODULE_MM */ + ,"CORE" /* MODULE_CORE */ + ,"P1023" /* MODULE_P1023 */ + ,"MII" /* MODULE_MII */ + ,"PM" /* MODULE_PM */ + ,"MMU" /* MODULE_MMU */ + ,"PIC" /* MODULE_PIC */ + ,"L2 cache" /* MODULE_L2_CACHE */ + ,"DUART" /* MODULE_DUART */ + ,"SerDes" /* MODULE_SERDES */ + ,"PIO" /* MODULE_PIO */ + ,"QM" /* MODULE_QM */ + ,"BM" /* MODULE_BM */ + ,"SEC" /* MODULE_SEC */ + ,"FM" /* MODULE_FM */ + ,"FM-MURAM" /* MODULE_FM_MURAM */ + ,"FM-PCD" /* MODULE_FM_PCD */ + ,"FM-RTC" /* MODULE_FM_RTC */ + ,"FM-MAC" /* MODULE_FM_MAC */ + ,"FM-Port" /* MODULE_FM_PORT */ + ,"FM-MACSEC" /* MODULE_FM_MACSEC */ + ,"FM-MACSEC-SecY" /* MODULE_FM_MACSEC_SECY */ + ,"ECM" /* MODULE_ECM */ + ,"DMA" /* MODULE_DMA */ + ,"DDR" /* MODULE_DDR */ + ,"LAW" /* MODULE_LAW */ + ,"LBC" /* MODULE_LBC */ + ,"I2C" /* MODULE_I2C */ + ,"ESPI" /* MODULE_ESPI */ + ,"PCI" /* MODULE_PCI */ + ,"DPA" /* MODULE_DPA */ + ,"USB" /* MODULE_USB */ +}; --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/integrations/P3040_P4080_P5020/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/integrations/P3040_P4080_P5020/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +obj-y += module_strings.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/integrations/P3040_P4080_P5020/module_strings.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/integrations/P3040_P4080_P5020/module_strings.c @@ -0,0 +1,62 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Module names for debug messages */ +const char *moduleStrings[] = +{ + "???" /* MODULE_UNKNOWN */ + ,"MEM" /* MODULE_ */ + ,"MM" /* MODULE_MM */ + ,"CORE" /* MODULE_CORE */ + ,"P4080" /* MODULE_P4080 */ + ,"P4080-Platform" /* MODULE_P4080_PLTFRM */ + ,"PM" /* MODULE_PM */ + ,"MMU" /* MODULE_MMU */ + ,"PIC" /* MODULE_PIC */ + ,"L3 cache (CPC)" /* MODULE_CPC */ + ,"DUART" /* MODULE_DUART */ + ,"SerDes" /* MODULE_SERDES */ + ,"PIO" /* MODULE_PIO */ + ,"QM" /* MODULE_QM */ + ,"BM" /* MODULE_BM */ + ,"SEC" /* MODULE_SEC */ + ,"LAW" /* MODULE_LAW */ + ,"LBC" /* MODULE_LBC */ + ,"PAMU" /* MODULE_PAMU */ + ,"FM" /* MODULE_FM */ + ,"FM-MURAM" /* MODULE_FM_MURAM */ + ,"FM-PCD" /* MODULE_FM_PCD */ + ,"FM-RTC" /* MODULE_FM_RTC */ + ,"FM-MAC" /* MODULE_FM_MAC */ + ,"FM-Port" /* MODULE_FM_PORT */ + ,"DPA" /* MODULE_DPA */ +}; --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/ncsw_config.mk +++ linux-3.13.0/drivers/net/dpa/NetCommSw/ncsw_config.mk @@ -0,0 +1,34 @@ +# +# Makefile config for the Freescale NetcommSW +# +NET_DPA = $(srctree)/drivers/net/ethernet/freescale +DRV_DPA = $(srctree)/drivers/net/dpa +NCSW = $(srctree)/drivers/net/dpa/NetCommSw + +ifdef CONFIG_FMAN_P3040_P4080_P5020 +EXTRA_CFLAGS +=-include $(NCSW)/p3040_4080_5020_dflags.h +endif +ifdef CONFIG_FMAN_P1023 +EXTRA_CFLAGS +=-include $(NCSW)/p1023_dflags.h +endif + +EXTRA_CFLAGS += -I$(DRV_DPA)/ +EXTRA_CFLAGS += -I$(NCSW)/inc +EXTRA_CFLAGS += -I$(NCSW)/inc/cores +EXTRA_CFLAGS += -I$(NCSW)/inc/etc +EXTRA_CFLAGS += -I$(NCSW)/inc/Peripherals + +ifdef CONFIG_FMAN_P3040_P4080_P5020 +EXTRA_CFLAGS += -I$(NCSW)/inc/integrations/P3040_P4080_P5020 +endif +ifdef CONFIG_FMAN_P1023 +EXTRA_CFLAGS += -I$(NCSW)/inc/integrations/P1023 +endif + +EXTRA_CFLAGS += -I$(NCSW)/src/inc +EXTRA_CFLAGS += -I$(NCSW)/src/inc/system +EXTRA_CFLAGS += -I$(NCSW)/src/inc/wrapper +EXTRA_CFLAGS += -I$(NCSW)/src/inc/xx +EXTRA_CFLAGS += -I$(srctree)/include/linux/fmd +EXTRA_CFLAGS += -I$(srctree)/include/linux/fmd/Peripherals +EXTRA_CFLAGS += -I$(srctree)/include/linux/fmd/integrations --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/p1023_dflags.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/p1023_dflags.h @@ -0,0 +1,65 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __dflags_h +#define __dflags_h + + +#define NCSW_LINUX +#if 0 +#define DEBUG +#endif + +#define P1023 +#define NCSW_PPC_CORE + +#define DEBUG_ERRORS 1 + +#if defined(DEBUG) +#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO + +#define DEBUG_XX_MALLOC +#define DEBUG_MEM_LEAKS + +#else +#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_WARNING +#endif /* (DEBUG) */ + +#define REPORT_EVENTS 1 +#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR + +#ifdef CONFIG_P4080_SIM +#error "Do not define CONFIG_P4080_SIM..." +#endif + + +#endif /* __dflags_h */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/p3040_4080_5020_dflags.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/p3040_4080_5020_dflags.h @@ -0,0 +1,62 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __dflags_h +#define __dflags_h + + +#define NCSW_LINUX + +#define P4080 +#define NCSW_PPC_CORE + +#define DEBUG_ERRORS 1 + +#if defined(DEBUG) +#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_INFO + +#define DEBUG_XX_MALLOC +#define DEBUG_MEM_LEAKS + +#else +#define DEBUG_GLOBAL_LEVEL REPORT_LEVEL_MAJOR +#endif /* (DEBUG) */ + +#define REPORT_EVENTS 0 +#define EVENT_GLOBAL_LEVEL REPORT_LEVEL_MINOR + +#ifdef CONFIG_P4080_SIM +#define SIMULATOR +#endif /* CONFIG_P4080_SIM */ + + +#endif /* __dflags_h */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk +# +obj-y += system/ +obj-y += wrapper/ +obj-y += xx/ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/inc/system/sys_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/inc/system/sys_ext.h @@ -0,0 +1,118 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SYS_EXT_H +#define __SYS_EXT_H + +#include "std_ext.h" + + +/**************************************************************************//** + @Group sys_grp System Interfaces + + @Description Linux system programming interfaces. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group sys_gen_grp System General Interface + + @Description General definitions, structures and routines of the linux + system programming interface. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Collection Macros for Advanced Configuration Requests + @{ +*//***************************************************************************/ +#define SYS_MAX_ADV_CONFIG_ARGS 4 + /**< Maximum number of arguments in + an advanced configuration entry */ +/* @} */ + +/**************************************************************************//** + @Description System Object Advanced Configuration Entry + + This structure represents a single request for an advanced + configuration call on the initialized object. An array of such + requests may be contained in the settings structure of the + corresponding object. + + The maximum number of arguments is limited to #SYS_MAX_ADV_CONFIG_ARGS. +*//***************************************************************************/ +typedef struct t_SysObjectAdvConfigEntry +{ + void *p_Function; /**< Pointer to advanced configuration routine */ + + uintptr_t args[SYS_MAX_ADV_CONFIG_ARGS]; + /**< Array of arguments for the specified routine; + All arguments should be casted to uint32_t. */ +} t_SysObjectAdvConfigEntry; + + +/** @} */ /* end of sys_gen_grp */ +/** @} */ /* end of sys_grp */ + +#define PARAMS(_num, _params) ADV_CONFIG_PARAMS_##_num _params + +#define ADV_CONFIG_PARAMS_1(_type) \ + , (_type)p_Entry->args[0] + +#define SET_ADV_CONFIG_ARGS_1(_arg0) \ + p_Entry->args[0] = (uintptr_t )(_arg0); \ + +#define ARGS(_num, _params) SET_ADV_CONFIG_ARGS_##_num _params + +#define ADD_ADV_CONFIG_START(_p_Entries, _maxEntries) \ + { \ + t_SysObjectAdvConfigEntry *p_Entry; \ + t_SysObjectAdvConfigEntry *p_Entrys = (_p_Entries); \ + int i=0, max = (_maxEntries); \ + +#define ADD_ADV_CONFIG_END \ + } + +#define ADV_CONFIG_CHECK_START(_p_Entry) \ + { \ + t_SysObjectAdvConfigEntry *p_Entry = _p_Entry; \ + t_Error errCode; \ + +#define ADV_CONFIG_CHECK(_handle, _func, _params) \ + if (p_Entry->p_Function == _func) \ + { \ + errCode = _func(_handle _params); \ + } else + +#endif /* __SYS_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/inc/system/sys_io_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/inc/system/sys_io_ext.h @@ -0,0 +1,46 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SYS_IO_EXT_H +#define __SYS_IO_EXT_H + +#include "std_ext.h" +#include "error_ext.h" + + +t_Error SYS_RegisterIoMap (uint64_t virtAddr, uint64_t physAddr, uint32_t size); +t_Error SYS_UnregisterIoMap (uint64_t virtAddr); +uint64_t SYS_PhysToVirt (uint64_t addr); +uint64_t SYS_VirtToPhys (uint64_t addr); + + +#endif /* __SYS_IO_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/inc/types_linux.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/inc/types_linux.h @@ -0,0 +1,200 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __TYPES_LINUX_H__ +#define __TYPES_LINUX_H__ + +#include + +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ + +#include +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) + #error "This kernel is probably not supported!!!" +#elif (!((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) || \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) || \ + (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)))) + #warning "This kernel is probably not supported!!! You may need to add some fixes." +#endif /* LINUX_VERSION_CODE */ + + +typedef float float_t; /* Single precision floating point */ +typedef double double_t; /* Double precision floating point */ + + +#define _Packed +#define _PackedType __attribute__ ((packed)) + +typedef phys_addr_t physAddress_t; + +#define UINT8_MAX 0xFF +#define UINT8_MIN 0 +#define UINT16_MAX 0xFFFF +#define UINT16_MIN 0 +#define UINT32_MAX 0xFFFFFFFF +#define UINT32_MIN 0 +#define UINT64_MAX 0xFFFFFFFFFFFFFFFFLL +#define UINT64_MIN 0 +#define INT8_MAX 0x7F +#define INT8_MIN 0x80 +#define INT16_MAX 0x7FFF +#define INT16_MIN 0x8000 +#define INT32_MAX 0x7FFFFFFF +#define INT32_MIN 0x80000000 +#define INT64_MAX 0x7FFFFFFFFFFFFFFFLL +#define INT64_MIN 0x8000000000000000LL + +#define ON 1 +#define OFF 0 + +#define FALSE false +#define TRUE true + + +/************************/ +/* memory access macros */ +/************************/ +#define GET_UINT8(arg) *(volatile uint8_t *)(&(arg)) +#define GET_UINT16(arg) in_be16(&(arg))//*(volatile uint16_t*)(&(arg)) +#define GET_UINT32(arg) in_be32(&(arg))//*(volatile uint32_t*)(&(arg)) +#define GET_UINT64(arg) *(volatile uint64_t*)(&(arg)) + +#ifdef VERBOSE_WRITE +void XX_Print(char *str, ...); +#define WRITE_UINT8(arg, data) \ + do { XX_Print("ADDR: 0x%08x, VAL: 0x%02x\r\n", (uint32_t)&(arg), (data)); *(volatile uint8_t *)(&(arg)) = (data); } while (0) +#define WRITE_UINT16(arg, data) \ + do { XX_Print("ADDR: 0x%08x, VAL: 0x%04x\r\n", (uint32_t)&(arg), (data)); out_be16(&(arg), data); /* *(volatile uint16_t*)(&(arg)) = (data);*/ } while (0) +#define WRITE_UINT32(arg, data) \ + do { XX_Print("ADDR: 0x%08x, VAL: 0x%08x\r\n", (uint32_t)&(arg), (data)); out_be32(&(arg), data); /* *(volatile uint32_t*)(&(arg)) = (data);*/ } while (0) +#define WRITE_UINT64(arg, data) \ + do { XX_Print("ADDR: 0x%08x, VAL: 0x%016llx\r\n", (uint32_t)&(arg), (data)); *(volatile uint64_t*)(&(arg)) = (data); } while (0) + +#else /* not VERBOSE_WRITE */ +#define WRITE_UINT8(arg, data) *(volatile uint8_t *)(&(arg)) = (data) +#define WRITE_UINT16(arg, data) out_be16(&(arg), data)//*(volatile uint16_t*)(&(arg)) = (data) +#define WRITE_UINT32(arg, data) out_be32(&(arg), data)//*(volatile unsigned int *)(&(arg)) = (data) +#define WRITE_UINT64(arg, data) *(volatile uint64_t*)(&(arg)) = (data) +#endif /* not VERBOSE_WRITE */ + + +/*****************************************************************************/ +/* General stuff */ +/*****************************************************************************/ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#endif /* ARRAY_SIZE */ + +#ifdef MAJOR +#undef MAJOR +#endif /* MAJOR */ + +#ifdef MINOR +#undef MINOR +#endif /* MINOR */ + +#ifdef QE_SIZEOF_BD +#undef QE_SIZEOF_BD +#endif /* QE_SIZEOF_BD */ + +#ifdef BD_BUFFER_CLEAR +#undef BD_BUFFER_CLEAR +#endif /* BD_BUFFER_CLEAR */ + +#ifdef BD_BUFFER +#undef BD_BUFFER +#endif /* BD_BUFFER */ + +#ifdef BD_STATUS_AND_LENGTH_SET +#undef BD_STATUS_AND_LENGTH_SET +#endif /* BD_STATUS_AND_LENGTH_SET */ + +#ifdef BD_STATUS_AND_LENGTH +#undef BD_STATUS_AND_LENGTH +#endif /* BD_STATUS_AND_LENGTH */ + +#ifdef BD_BUFFER_ARG +#undef BD_BUFFER_ARG +#endif /* BD_BUFFER_ARG */ + +#ifdef BD_GET_NEXT +#undef BD_GET_NEXT +#endif /* BD_GET_NEXT */ + +#ifdef QE_SDEBCR_BA_MASK +#undef QE_SDEBCR_BA_MASK +#endif /* QE_SDEBCR_BA_MASK */ + +#ifdef BD_BUFFER_SET +#undef BD_BUFFER_SET +#endif /* BD_BUFFER_SET */ + +#ifdef UPGCR_PROTOCOL +#undef UPGCR_PROTOCOL +#endif /* UPGCR_PROTOCOL */ + +#ifdef UPGCR_TMS +#undef UPGCR_TMS +#endif /* UPGCR_TMS */ + +#ifdef UPGCR_RMS +#undef UPGCR_RMS +#endif /* UPGCR_RMS */ + +#ifdef UPGCR_ADDR +#undef UPGCR_ADDR +#endif /* UPGCR_ADDR */ + +#ifdef UPGCR_DIAG +#undef UPGCR_DIAG +#endif /* UPGCR_DIAG */ + +#ifdef PARAMS +#undef PARAMS +#endif /* PARAMS */ + +#ifdef NO_IRQ +#undef NO_IRQ +#endif /* NO_IRQ */ + +#define PRINT_LINE XX_Print("%s:\n %s [%d]\n",__FILE__,__FUNCTION__,__LINE__); + + +#endif /* __TYPES_LINUX_H__ */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/inc/wrapper/fsl_fman.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/inc/wrapper/fsl_fman.h @@ -0,0 +1,279 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fsl_fman.h + + @Description Linux internal kernel API +*//***************************************************************************/ + +#ifndef __FSL_FMAN_H +#define __FSL_FMAN_H + +#include +#include /* struct device */ +#include /* struct qman_fq */ +#include "dpaa_integration_ext.h" + +/**************************************************************************//** + @Group FM_LnxKern_grp Frame Manager Linux wrapper API + + @Description FM API functions, definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_LnxKern_ctrl_grp Control Unit + + @Description Control Unit + + Internal Kernel Control Unit API + @{ +*//***************************************************************************/ + +/*****************************************************************************/ +/* Internal Linux kernel routines */ +/*****************************************************************************/ + +/**************************************************************************//** + @Description A structure .., +*//***************************************************************************/ +struct fm; + +/**************************************************************************//** + @Description A structure .., +*//***************************************************************************/ +struct fm_port; + +typedef int (*alloc_pcd_fqids)(struct device *dev, + uint32_t num, + uint8_t alignment, + uint32_t *base_fqid); + +typedef int (*free_pcd_fqids)(struct device *dev, + uint32_t base_fqid); + +struct fm_port_pcd_param { + alloc_pcd_fqids cba; + free_pcd_fqids cbf; + struct device *dev; +}; + +/**************************************************************************//** + @Description A structure of information about each of the external + buffer pools used by the port, +*//***************************************************************************/ +struct fm_port_pool_param { + uint8_t id; /**< External buffer pool id */ + uint16_t size; /**< External buffer pool buffer size */ +}; + +/**************************************************************************//** + @Description structure for additional Rx port parameters +*//***************************************************************************/ +struct fm_port_rx_params { + uint32_t errq; /**< Error Queue Id. */ + uint32_t defq; /**< Default Queue Id. */ + uint8_t num_pools; /**< Number of pools use by this port */ + struct fm_port_pool_param pool_param[FM_PORT_MAX_NUM_OF_EXT_POOLS]; + /**< Parameters for each pool */ + uint16_t priv_data_size; /**< Area that user may save for his own need (E.g. save the SKB) */ + bool parse_results; /**< Whether to have the parser-results in the Received buffer */ + bool hash_results; /**< Whether to have the hash-results in the Received buffer */ + bool time_stamp; /**< Whether to have the time-stamp in the Received buffer */ +}; + +/**************************************************************************//** + @Description structure for additional non-Rx port parameters +*//***************************************************************************/ +struct fm_port_non_rx_params { + uint32_t errq; /**< Error Queue Id. */ + uint32_t defq; /**< For Tx and HC - Default Confirmation queue, + 0 means no Tx confirmation for processed + frames. For OP - default Rx queue. */ + uint16_t priv_data_size; /**< Area that user may save for his own need (E.g. save the SKB) */ + bool parse_results; /**< Whether to put the parser-results in the Transmitted buffer */ + bool hash_results; /**< Whether to have the hash-results in the Received buffer */ + bool time_stamp; /**< Whether to have the time-stamp in the Received buffer */ +}; + + +/**************************************************************************//** + @Function fm_bind + + @Description Bind to a specific FM device. + + @Param[in] fm_dev - the OF handle of the FM device. + + @Return A handle of the FM device. + + @Cautions Allowed only after the port was created. +*//***************************************************************************/ +struct fm * fm_bind (struct device *fm_dev); + +/**************************************************************************//** + @Function fm_unbind + + @Description Un-bind from a specific FM device. + + @Param[in] fm - A handle of the FM device. + + @Cautions Allowed only after the port was created. +*//***************************************************************************/ +void fm_unbind(struct fm *fm); + +void * fm_get_handle(struct fm *fm); +void * fm_get_rtc_handle(struct fm *fm); +struct resource * fm_get_mem_region(struct fm *fm); + +/**************************************************************************//** + @Function fm_port_bind + + @Description Bind to a specific FM-port device (may be Rx or Tx port). + + @Param[in] fm_port_dev - the OF handle of the FM port device. + + @Return A handle of the FM port device. + + @Cautions Allowed only after the port was created. +*//***************************************************************************/ +struct fm_port * fm_port_bind (struct device *fm_port_dev); + +/**************************************************************************//** + @Function fm_port_unbind + + @Description Un-bind from a specific FM-port device (may be Rx or Tx port). + + @Param[in] port - A handle of the FM port device. + + @Cautions Allowed only after the port was created. +*//***************************************************************************/ +void fm_port_unbind(struct fm_port *port); + +/**************************************************************************//** + @Function fm_set_rx_port_params + + @Description Configure parameters for a specific Rx FM-port device. + + @Param[in] port - A handle of the FM port device. + @Param[in] params - Rx port parameters + + @Cautions Allowed only after the port is binded. +*//***************************************************************************/ +void fm_set_rx_port_params(struct fm_port *port, struct fm_port_rx_params *params); + +/**************************************************************************//** + @Function fm_port_pcd_bind + + @Description Bind as a listener on a port PCD. + + @Param[in] port - A handle of the FM port device. + @Param[in] params - PCD port parameters + + @Cautions Allowed only after the port is binded. +*//***************************************************************************/ +void fm_port_pcd_bind (struct fm_port *port, struct fm_port_pcd_param *params); + +/**************************************************************************//** + @Function fm_get_tx_port_channel + + @Description Get qman-channel number for this Tx port. + + @Param[in] port - A handle of the FM port device. + + @Return qman-channel number for this Tx port. + + @Cautions Allowed only after the port is binded. +*//***************************************************************************/ +int fm_get_tx_port_channel(struct fm_port *port); + +/**************************************************************************//** + @Function fm_set_tx_port_params + + @Description Configure parameters for a specific Tx FM-port device + + @Param[in] port - A handle of the FM port device. + @Param[in] params - Tx port parameters + + @Cautions Allowed only after the port is binded. +*//***************************************************************************/ +void fm_set_tx_port_params(struct fm_port *port, struct fm_port_non_rx_params *params); + +/**************************************************************************//** + @Function fm_port_enable + + @Description Enable specific FM-port device (may be Rx or Tx port). + + @Param[in] port - A handle of the FM port device. + + @Cautions Allowed only the port is initialized. +*//***************************************************************************/ +int fm_port_enable (struct fm_port *port); + +/**************************************************************************//** + @Function fm_port_disable + + @Description Disable specific FM-port device (may be Rx or Tx port). + + @Param[in] port - A handle of the FM port device. + + @Cautions Allowed only the port is initialized. +*//***************************************************************************/ +void fm_port_disable(struct fm_port *port); + +void * fm_port_get_handle(struct fm_port *port); + +/**************************************************************************//** + @Description Get base address of this port. Useful for accessing + port-specific registers (i.e., not common ones). + + @Param[in] port - A handle of the FM port device. + @Param[out] base_addr - The port's base addr (virtual address). +*//***************************************************************************/ +void fm_port_get_base_addr(const struct fm_port *port, uint64_t *base_addr); + +/**************************************************************************//** + @Description Lock function required before any FMD/LLD call. +*//***************************************************************************/ +void fm_mutex_lock(void); + +/**************************************************************************//** + @Description Unlock function required after any FMD/LLD call. +*//***************************************************************************/ +void fm_mutex_unlock(void); + +/** @} */ /* end of FM_LnxKern_ctrl_grp group */ +/** @} */ /* end of FM_LnxKern_grp group */ + + +#endif /* __FSL_FMAN_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/inc/wrapper/fsl_fman_test.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/inc/wrapper/fsl_fman_test.h @@ -0,0 +1,83 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File fsl_fman_test.h + + @Description +*//***************************************************************************/ + +#ifndef __FSL_FMAN_TEST_H +#define __FSL_FMAN_TEST_H + +#include + + +#define FMT_RX_ERR_Q 0xffffffff +#define FMT_RX_DFLT_Q 0xfffffffe +#define FMT_TX_ERR_Q 0xfffffffd +#define FMT_TX_CONF_Q 0xfffffffc + + +/**************************************************************************//** + @Function is_fman_test + + @Description Check if arriving frame belong to the test + + @Param[in] mac_dev - TODO + @Param[in] queueId - TODO + @Param[in] buffer - A pointer to the buffer to check. + @Param[in] size - size of the given buffer. + + @Return true if this buffer belongs to FMan test application; false otherwise. + + @Cautions Allowed only the port is initialized. +*//***************************************************************************/ +bool is_fman_test (void *mac_dev, + uint32_t queueId, + uint8_t *buffer, + uint32_t size); + +/**************************************************************************//** + @Function fman_test_ip_manip + + @Description IP header manipulation + + @Param[in] mac_dev - TODO + @Param[in] data - A pointer to the data (payload) to manipulate. + + @Cautions Allowed only the port is initialized. +*//***************************************************************************/ +void fman_test_ip_manip (void *mac_dev, uint8_t *data); + + +#endif /* __FSL_FMAN_TEST_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/inc/wrapper/lnxwrp_fm_ext.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/inc/wrapper/lnxwrp_fm_ext.h @@ -0,0 +1,162 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/****************************************************************************** + @File lnxwrp_fm_ext.h + + @Description TODO +*//***************************************************************************/ + +#ifndef __LNXWRP_FM_EXT_H +#define __LNXWRP_FM_EXT_H + +#include "std_ext.h" +#include "sys_ext.h" +#include "fm_ext.h" +#include "fm_muram_ext.h" +#include "fm_pcd_ext.h" +#include "fm_port_ext.h" +#include "fm_mac_ext.h" +#include "fm_rtc_ext.h" + + +/**************************************************************************//** + @Group FM_LnxKern_grp Frame Manager Linux wrapper API + + @Description FM API functions, definitions and enums. + + @{ +*//***************************************************************************/ + +/**************************************************************************//** + @Group FM_LnxKern_init_grp Initialization Unit + + @Description Initialization Unit + + Initialization Flow: + Initialization of the FM Module will be carried out by the Linux + kernel according to the following sequence: + a. Calling the initialization routine with no parameters. + b. The driver will register to the Device-Tree. + c. The Linux Device-Tree will initiate a call to the driver for + initialization. + d. The driver will read the appropriate information from the Device-Tree + e. [Optional] Calling the advance initialization routines to change + driver's defaults. + f. Initialization of the device will be automatically upon using it. + + @{ +*//***************************************************************************/ + +typedef struct t_WrpFmDevSettings +{ + t_FmParams param; + t_SysObjectAdvConfigEntry *advConfig; +} t_WrpFmDevSettings; + +typedef struct t_WrpFmPcdDevSettings +{ + t_FmPcdParams param; + t_SysObjectAdvConfigEntry *advConfig; +} t_WrpFmPcdDevSettings; + +typedef struct t_WrpFmPortDevSettings +{ + t_FmPortParams param; + t_SysObjectAdvConfigEntry *advConfig; +} t_WrpFmPortDevSettings; + +typedef struct t_WrpFmMacDevSettings +{ + t_FmMacParams param; + t_SysObjectAdvConfigEntry *advConfig; +} t_WrpFmMacDevSettings; + + +/**************************************************************************//** + @Function LNXWRP_FM_Init + + @Description Initialize the FM linux wrapper. + + @Return A handle (descriptor) of the newly created FM Linux wrapper + structure. +*//***************************************************************************/ +t_Handle LNXWRP_FM_Init(void); + +/**************************************************************************//** + @Function LNXWRP_FM_Free + + @Description Free the FM linux wrapper. + + @Param[in] h_LnxWrpFm - A handle to the FM linux wrapper. + + @Return E_OK on success; Error code otherwise. +*//***************************************************************************/ +t_Error LNXWRP_FM_Free(t_Handle h_LnxWrpFm); + +/**************************************************************************//** + @Function LNXWRP_FM_GetMacHandle + + @Description Get the FM-MAC LLD handle from the FM linux wrapper. + + @Param[in] h_LnxWrpFm - A handle to the FM linux wrapper. + @Param[in] fmId - Index of the FM device to get the MAC handle from. + @Param[in] macId - Index of the mac handle. + + @Return A handle of the LLD compressor. +*//***************************************************************************/ +t_Handle LNXWRP_FM_GetMacHandle(t_Handle h_LnxWrpFm, uint8_t fmId, uint8_t macId); + +#ifdef CONFIG_FSL_FMAN_TEST +t_Handle LNXWRP_FM_TEST_Init(void); +t_Error LNXWRP_FM_TEST_Free(t_Handle h_FmTestLnxWrp); +#endif /* CONFIG_FSL_FMAN_TEST */ + +/** @} */ /* end of FM_LnxKern_init_grp group */ + + +/**************************************************************************//** + @Group FM_LnxKern_ctrl_grp Control Unit + + @Description Control Unit + + TODO + @{ +*//***************************************************************************/ + +#include "fsl_fman.h" + +/** @} */ /* end of FM_LnxKern_ctrl_grp group */ +/** @} */ /* end of FM_LnxKern_grp group */ + + +#endif /* __LNXWRP_FM_EXT_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/inc/xx/xx.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/inc/xx/xx.h @@ -0,0 +1,50 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __XX_H +#define __XX_H + +#include "xx_ext.h" + +void * xx_Malloc(uint32_t n); +void xx_Free(void *p); + +void *xx_MallocSmart(uint32_t size, int memPartitionId, uint32_t align); +void xx_FreeSmart(void *p); + +/* never used: */ +#define GetDeviceName(irq) ((char *)NULL) + +int GetDeviceIrqNum(int irq); + + +#endif /* __XX_H */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/system/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/system/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk +# + +obj-y += sys_io.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/system/sys_io.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/system/sys_io.c @@ -0,0 +1,171 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +#include +#else +#include +#endif /* LINUX_VERSION_CODE */ +#endif /* MODVERSIONS */ + +#include +#include + +#include + +#include "std_ext.h" +#include "error_ext.h" +#include "string_ext.h" +#include "list_ext.h" +#include "sys_io_ext.h" + + +#define __ERR_MODULE__ MODULE_UNKNOWN + + +typedef struct { + uint64_t virtAddr; + uint64_t physAddr; + uint32_t size; + t_List node; +} t_IoMap; +#define IOMAP_OBJECT(ptr) LIST_OBJECT(ptr, t_IoMap, node) + +LIST(mapsList); + + +static void EnqueueIoMap(t_IoMap *p_IoMap) +{ + uint32_t intFlags; + + intFlags = XX_DisableAllIntr(); + LIST_AddToTail(&p_IoMap->node, &mapsList); + XX_RestoreAllIntr(intFlags); +} + +static t_IoMap * FindIoMapByVirtAddr(uint64_t addr) +{ + t_IoMap *p_IoMap; + t_List *p_Pos; + + LIST_FOR_EACH(p_Pos, &mapsList) + { + p_IoMap = IOMAP_OBJECT(p_Pos); + if ((addr >= p_IoMap->virtAddr) && (addr < p_IoMap->virtAddr+p_IoMap->size)) + return p_IoMap; + } + + return NULL; +} + +static t_IoMap * FindIoMapByPhysAddr(uint64_t addr) +{ + t_IoMap *p_IoMap; + t_List *p_Pos; + + LIST_FOR_EACH(p_Pos, &mapsList) + { + p_IoMap = IOMAP_OBJECT(p_Pos); + if ((addr >= p_IoMap->physAddr) && (addr < p_IoMap->physAddr+p_IoMap->size)) + return p_IoMap; + } + + return NULL; +} + +t_Error SYS_RegisterIoMap (uint64_t virtAddr, uint64_t physAddr, uint32_t size) +{ + t_IoMap *p_IoMap; + + p_IoMap = (t_IoMap*)XX_Malloc(sizeof(t_IoMap)); + if (!p_IoMap) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("message handler object!!!")); + memset(p_IoMap, 0, sizeof(t_IoMap)); + + p_IoMap->virtAddr = virtAddr; + p_IoMap->physAddr = physAddr; + p_IoMap->size = size; + + INIT_LIST(&p_IoMap->node); + EnqueueIoMap(p_IoMap); + + return E_OK; +} + +t_Error SYS_UnregisterIoMap (uint64_t virtAddr) +{ + t_IoMap *p_IoMap = FindIoMapByVirtAddr(virtAddr); + if (!p_IoMap) + RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!")); + + LIST_Del(&p_IoMap->node); + XX_Free(p_IoMap); + + return E_OK; +} + +uint64_t SYS_PhysToVirt(uint64_t addr) +{ + t_IoMap *p_IoMap = FindIoMapByPhysAddr(addr); + if (p_IoMap) + { + /* This is optimization - put the latest in the list-head - like a cache */ + if (mapsList.p_Next != &p_IoMap->node) + { + uint32_t intFlags = XX_DisableAllIntr(); + LIST_DelAndInit(&p_IoMap->node); + LIST_Add(&p_IoMap->node, &mapsList); + XX_RestoreAllIntr(intFlags); + } + return (uint64_t)(addr - p_IoMap->physAddr + p_IoMap->virtAddr); + } + return PTR_TO_UINT(phys_to_virt((unsigned long)addr)); +} + +uint64_t SYS_VirtToPhys(uint64_t addr) +{ + t_IoMap *p_IoMap; + + if (addr == 0) + return 0; + + p_IoMap = FindIoMapByVirtAddr(addr); + if (p_IoMap) + return (uint64_t)(addr - p_IoMap->virtAddr + p_IoMap->physAddr); + return (uint64_t)virt_to_phys(UINT_TO_PTR(addr)); +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/Makefile @@ -0,0 +1,18 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +EXTRA_CFLAGS += -I$(NCSW_FM_INC) +EXTRA_CFLAGS += -I$(NET_DPA) + +obj-y += fsl-ncsw-PFM.o +obj-$(CONFIG_FSL_FMAN_TEST) += fman_test.o + +fsl-ncsw-PFM-objs := lnxwrp_fm.o lnxwrp_fm_port.o lnxwrp_ioctls_fm.o \ + lnxwrp_sysfs.o lnxwrp_sysfs_fm.o lnxwrp_sysfs_fm_port.o \ + lnxwrp_resources.o +obj-$(CONFIG_COMPAT) += lnxwrp_ioctls_fm_compat.o --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/fman_test.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/fman_test.c @@ -0,0 +1,1076 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File fman_test.c + + @Author Moti Bar + + @Description FM Linux test + +*/ + +/* Linux Headers ------------------- */ +#include + +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ + +#include +#include +#include +#include +#include +#include +#include +#include /*struct qman_fq */ +#include +#include +#include /*for mixed environment */ +#include +#include + +/* NetCommSw Headers --------------- */ +#include "std_ext.h" +#include "error_ext.h" +#include "debug_ext.h" +#include "list_ext.h" +#include "fm_ext.h" + +#include "fm_test_ioctls.h" +#include "fsl_fman.h" +#include "fsl_fman_test.h" +#include "fm_port_ext.h" + + +#define __ERR_MODULE__ MODULE_FM + +#define FMT_FRM_WATERMARK 0xdeadbeefdeadbeeaLL + +#ifdef CONFIG_COMPAT +/* Define this for KS64b - US32b */ +#define FMAN_TEST_CONFIG_COMPAT +#warning Please make sure you have the right value for FMAN_TEST_CONFIG_COMPAT +#endif + +typedef struct { + ioc_fmt_buff_desc_t buff; + t_List node; +} t_FmTestFrame; + +#define FMT_FRAME_OBJECT(ptr) LIST_OBJECT(ptr, t_FmTestFrame, node) + +typedef struct t_FmTestFq { + struct qman_fq fq_base; + struct list_head list; + void *port; + bool init; +} t_FmTestFq; + +typedef struct { + bool valid; + uint8_t id; + ioc_fmt_port_type portType; + ioc_diag_mode diag; + bool echo; + bool ip_header_manip; + struct fm_port *p_TxPort; + t_Handle h_TxFmPortDev; + struct fm_port *p_RxPort; + t_Handle h_RxFmPortDev; + t_Handle h_Mac; + uint64_t fmPhysBaseAddr; + t_List rxFrmsQ; + + int numOfTxQs; + struct qman_fq *p_TxFqs[8]; +} t_FmTestPort; + +typedef struct { + int major; + t_FmTestPort ports[IOC_FMT_MAX_NUM_OF_PORTS]; + struct class *fm_test_class; +} t_FmTest; + + +static t_FmTest fmTest; + + +static t_Error Set1GMacIntLoopback(t_FmTestPort *p_FmTestPort, bool en) +{ +#define FM_1GMAC0_OFFSET 0x000e0000 +#define FM_1GMAC_CMD_CONF_CTRL_OFFSET 0x100 +#define MACCFG1_LOOPBACK 0x00000100 + + uint64_t baseAddr, regAddr; + uint32_t tmpVal; + + baseAddr = p_FmTestPort->fmPhysBaseAddr + (FM_1GMAC0_OFFSET + (p_FmTestPort->id*0x2000)); + + baseAddr = PTR_TO_UINT(ioremap(baseAddr, 0x1000)); + + regAddr = baseAddr + FM_1GMAC_CMD_CONF_CTRL_OFFSET; + tmpVal = GET_UINT32(*((uint32_t *)UINT_TO_PTR(regAddr))); + if (en) + tmpVal |= MACCFG1_LOOPBACK; + else + tmpVal &= ~MACCFG1_LOOPBACK; + WRITE_UINT32(*((uint32_t *)UINT_TO_PTR(regAddr)), tmpVal); + + iounmap(UINT_TO_PTR(baseAddr)); + + return E_OK; +} + +#ifndef FM_10G_MAC_NO_CTRL_LOOPBACK +static t_Error Set10GMacIntLoopback(t_FmTestPort *p_FmTestPort, bool en) +{ +#define FM_10GMAC0_OFFSET 0x000f0000 +#define FM_10GMAC_CMD_CONF_CTRL_OFFSET 0x8 +#define CMD_CFG_LOOPBACK_EN 0x00000400 + + uint64_t baseAddr, regAddr; + uint32_t tmpVal; + + baseAddr = p_FmTestPort->fmPhysBaseAddr + (FM_10GMAC0_OFFSET + ((p_FmTestPort->id-FM_MAX_NUM_OF_1G_RX_PORTS)*0x2000)); + + baseAddr = PTR_TO_UINT(ioremap(baseAddr, 0x1000)); + + regAddr = baseAddr + FM_10GMAC_CMD_CONF_CTRL_OFFSET; + tmpVal = GET_UINT32(*((uint32_t *)UINT_TO_PTR(regAddr))); + if (en) + tmpVal |= CMD_CFG_LOOPBACK_EN; + else + tmpVal &= ~CMD_CFG_LOOPBACK_EN; + WRITE_UINT32(*((uint32_t *)UINT_TO_PTR(regAddr)), tmpVal); + + iounmap(UINT_TO_PTR(baseAddr)); + + return E_OK; +} +#endif /* !FM_10G_MAC_NO_CTRL_LOOPBACK */ + +static t_Error SetMacIntLoopback(t_FmTestPort *p_FmTestPort, bool en) +{ + + if (p_FmTestPort->portType == e_IOC_FMT_PORT_T_RXTX) + { + if (p_FmTestPort->id < FM_MAX_NUM_OF_1G_RX_PORTS) + return Set1GMacIntLoopback(p_FmTestPort, en); + else if ((p_FmTestPort->id >= FM_MAX_NUM_OF_1G_RX_PORTS) && (p_FmTestPort->id < FM_MAX_NUM_OF_1G_RX_PORTS + FM_MAX_NUM_OF_10G_RX_PORTS)) +#ifndef FM_10G_MAC_NO_CTRL_LOOPBACK + return Set10GMacIntLoopback(p_FmTestPort, en); +#else + RETURN_ERROR(MINOR, E_NOT_SUPPORTED, ("TGEC don't have internal-loopback")); +#endif /* !FM_10G_MAC_NO_CTRL_LOOPBACK */ + } + else if (p_FmTestPort->portType == e_IOC_FMT_PORT_T_OP) + return E_OK; /* no Mac loopback can be set on OH ports */ + else + RETURN_ERROR(MINOR, E_INVALID_VALUE, ("fm-port-test id!")); + RETURN_ERROR(MINOR, E_INVALID_STATE, NO_MSG); +} + +static void EnqueueFrameToRxQ(t_FmTestPort *p_FmTestPort, t_FmTestFrame *p_FmTestFrame) +{ + uint32_t intFlags; + + intFlags = XX_DisableAllIntr(); + LIST_AddToTail(&p_FmTestFrame->node, &p_FmTestPort->rxFrmsQ); + XX_RestoreAllIntr(intFlags); +} + +static t_FmTestFrame * DequeueFrameFromRxQ(t_FmTestPort *p_FmTestPort) +{ + t_FmTestFrame *p_FmTestFrame = NULL; + uint32_t intFlags; + + intFlags = XX_DisableAllIntr(); + if (!LIST_IsEmpty(&p_FmTestPort->rxFrmsQ)) + { + p_FmTestFrame = FMT_FRAME_OBJECT(p_FmTestPort->rxFrmsQ.p_Next); + LIST_DelAndInit(&p_FmTestFrame->node); + } + XX_RestoreAllIntr(intFlags); + + return p_FmTestFrame; +} + +static enum qman_cb_dqrr_result egress_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); + return qman_cb_dqrr_consume; +} + +static void egress_ern(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); +} + +static struct qman_fq * FqAlloc(t_FmTestPort *p_FmTestPort, + uint32_t fqid, + uint32_t flags, + uint16_t channel, + uint8_t wq) +{ + int _errno; + struct qman_fq *fq = NULL; + t_FmTestFq *p_FmtFq; + struct qm_mcc_initfq initfq; + + p_FmtFq = (t_FmTestFq *)XX_Malloc(sizeof(t_FmTestFq)); + if (!p_FmtFq) { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!")); + return NULL; + } + + p_FmtFq->fq_base.cb.dqrr = egress_dqrr; + p_FmtFq->fq_base.cb.ern = p_FmtFq->fq_base.cb.dc_ern = p_FmtFq->fq_base.cb.fqs = egress_ern; + p_FmtFq->port = (void *)p_FmTestPort; + if (fqid == 0) { + flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; + flags &= ~QMAN_FQ_FLAG_NO_MODIFY; + } else { + flags &= ~QMAN_FQ_FLAG_DYNAMIC_FQID; + } + + p_FmtFq->init = !(flags & QMAN_FQ_FLAG_NO_MODIFY); + + DBG(TRACE, ("fqid %d, flags 0x%08x, channel %d, wq %d",fqid,flags,channel,wq)); + + _errno = qman_create_fq(fqid, flags, &p_FmtFq->fq_base); + if (unlikely(_errno)) { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj - qman_new_fq!!!")); + XX_Free(p_FmtFq); + return NULL; + } + fq = &p_FmtFq->fq_base; + + if (p_FmtFq->init) { + initfq.we_mask = QM_INITFQ_WE_DESTWQ; + initfq.fqd.dest.channel = channel; + initfq.fqd.dest.wq = wq; + + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj - qman_init_fq!!!")); + qman_destroy_fq(fq, 0); + XX_Free(p_FmtFq); + return NULL; + } + } + + return fq; +} + +static t_Error PortInit (t_FmTestPort *p_FmTestPort, ioc_fmt_port_param_t *p_Params) +{ + struct device_node *fm_node, *fm_port_node; + const uint32_t *uint32_prop; + int _errno=0, lenp; + uint32_t i; + static struct of_device_id fm_node_of_match[] = { + { .compatible = "fsl,fman", }, + { /* end of list */ }, + }; + + INIT_LIST(&p_FmTestPort->rxFrmsQ); + p_FmTestPort->numOfTxQs = p_Params->num_tx_queues; + p_FmTestPort->id = p_Params->fm_port_id; + p_FmTestPort->portType = p_Params->fm_port_type; + p_FmTestPort->diag = e_IOC_DIAG_MODE_NONE; + p_FmTestPort->ip_header_manip = FALSE; + + /* Get all the FM nodes */ + for_each_matching_node(fm_node, fm_node_of_match) { + + uint32_prop = (uint32_t *)of_get_property(fm_node, "cell-index", &lenp); + if (unlikely(uint32_prop == NULL)) { + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, cell-index) failed", fm_node->full_name)); + } + if (WARN_ON(lenp != sizeof(uint32_t))) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, cell-index) invalid", fm_node->full_name)); + if (*uint32_prop == p_Params->fm_id) { + struct resource res; + /* Get the FM address */ + _errno = of_address_to_resource(fm_node, 0, &res); + if (unlikely(_errno < 0)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno)); + + p_FmTestPort->fmPhysBaseAddr = res.start; + + for_each_child_of_node(fm_node, fm_port_node) { + struct platform_device *of_dev; + + uint32_prop = (uint32_t *)of_get_property(fm_port_node, "cell-index", &lenp); + if (uint32_prop == NULL) + continue; + + if (of_device_is_compatible(fm_port_node, "fsl,fman-port-oh") && + (p_FmTestPort->portType == e_IOC_FMT_PORT_T_OP)) { + if (*uint32_prop == p_FmTestPort->id) + { + of_dev = of_find_device_by_node(fm_port_node); + if (unlikely(of_dev == NULL)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!")); + p_FmTestPort->p_TxPort = fm_port_bind(&of_dev->dev); + p_FmTestPort->h_TxFmPortDev = (t_Handle)fm_port_get_handle(p_FmTestPort->p_TxPort); + p_FmTestPort->h_RxFmPortDev = p_FmTestPort->h_TxFmPortDev; + p_FmTestPort->h_Mac = NULL; + break; + } + } + else if ((*uint32_prop == p_FmTestPort->id) && + p_FmTestPort->portType == e_IOC_FMT_PORT_T_RXTX) { + of_dev = of_find_device_by_node(fm_port_node); + if (unlikely(of_dev == NULL)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!")); + if(of_device_is_compatible(fm_port_node, "fsl,fman-port-1g-tx")) + { + p_FmTestPort->p_TxPort = fm_port_bind(&of_dev->dev); + p_FmTestPort->h_TxFmPortDev = (t_Handle)fm_port_get_handle(p_FmTestPort->p_TxPort); + } + else if(of_device_is_compatible(fm_port_node, "fsl,fman-port-1g-rx")) + { + p_FmTestPort->p_RxPort = fm_port_bind(&of_dev->dev); + p_FmTestPort->h_RxFmPortDev = (t_Handle)fm_port_get_handle(p_FmTestPort->p_RxPort); + } + else if (of_device_is_compatible(fm_port_node, "fsl,fman-1g-mac")) + p_FmTestPort->h_Mac = (typeof(p_FmTestPort->h_Mac))dev_get_drvdata(&of_dev->dev); + else + continue; + if(p_FmTestPort->h_TxFmPortDev && p_FmTestPort->h_RxFmPortDev && p_FmTestPort->h_Mac) + break; + } + else if (((*uint32_prop + FM_MAX_NUM_OF_1G_RX_PORTS )== p_FmTestPort->id) && + p_FmTestPort->portType == e_IOC_FMT_PORT_T_RXTX) { + of_dev = of_find_device_by_node(fm_port_node); + if (unlikely(of_dev == NULL)) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!")); + if(of_device_is_compatible(fm_port_node, "fsl,fman-port-10g-tx")) + { + p_FmTestPort->p_TxPort = fm_port_bind(&of_dev->dev); + p_FmTestPort->h_TxFmPortDev = (t_Handle)fm_port_get_handle(p_FmTestPort->p_TxPort); + } + else if(of_device_is_compatible(fm_port_node, "fsl,fman-port-10g-rx")) + { + p_FmTestPort->p_RxPort = fm_port_bind(&of_dev->dev); + p_FmTestPort->h_RxFmPortDev = (t_Handle)fm_port_get_handle(p_FmTestPort->p_RxPort); + } + else if (of_device_is_compatible(fm_port_node, "fsl,fman-10g-mac")) + p_FmTestPort->h_Mac = (typeof(p_FmTestPort->h_Mac))dev_get_drvdata(&of_dev->dev); + else + continue; + if(p_FmTestPort->h_TxFmPortDev && p_FmTestPort->h_RxFmPortDev && p_FmTestPort->h_Mac) + break; + } + } //for_each_child + } + } //for each matching node + + DBG(TRACE, ("h_TxFmPortDev - 0x%08x, h_RxFmPortDev - 0x%08x, h_Mac - 0x%08x\n", + p_FmTestPort->h_TxFmPortDev,p_FmTestPort->h_RxFmPortDev,p_FmTestPort->h_Mac)); + + if(p_FmTestPort->h_TxFmPortDev == 0 || p_FmTestPort->h_RxFmPortDev == 0) + RETURN_ERROR(MINOR, E_INVALID_ADDRESS, ("Bad pointers!")); + + //init Queues + for (i=0; inumOfTxQs; i++) { + p_FmTestPort->p_TxFqs[i] = + FqAlloc(p_FmTestPort, + 0, + QMAN_FQ_FLAG_TO_DCPORTAL, + fm_get_tx_port_channel(p_FmTestPort->p_TxPort), + i); + if (IS_ERR(p_FmTestPort->p_TxFqs[i])) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("Tx FQs!")); + } + + p_FmTestPort->valid = TRUE; + + return E_OK; +} + + +bool is_fman_test (void *mac_dev, + uint32_t queueId, + uint8_t *buffer, + uint32_t size) +{ + t_FmTest *p_FmTest = &fmTest; + t_FmTestPort *p_FmTestPort=NULL; + t_FmTestFrame *p_FmTestFrame; + uint32_t count=size-7; + uint64_t temp; + uint8_t *temp_buf=buffer, i; + bool fman_test_flag = false; + uint32_t dataOffset; + struct fm_port *fm_port = (struct fm_port *) mac_dev; + +#if 0 + if ((queueId == FMT_TX_CONF_Q) || (queueId == FMT_TX_ERR_Q)) + { + /* Check for the FM-test-port object existence, otherwise we make wrong frees all the time */ + for (i=0; iports[i].h_Mac) + p_FmTestPort = &p_FmTest->ports[i]; + if (!p_FmTestPort) + return false; + + if (buffer != NULL) + XX_Free(buffer); + return true; + } +#endif + + /* Get the FM-test-port object */ + for (i=0; iports[i].h_Mac && mac_dev == p_FmTest->ports[i].h_Mac) || + fm_port == p_FmTest->ports[i].p_TxPort) + { + p_FmTestPort = &p_FmTest->ports[i]; + break; + } + if (!p_FmTestPort) + return false; + + /* Check according to watermark if this frame is for FM-test */ + while(count--) + { + temp = *(uint64_t *)temp_buf; + if (temp == FMT_FRM_WATERMARK) + { + fman_test_flag = true; + break; + } + temp_buf++; + } + + if (fman_test_flag /*|| p_FmTestPort->echo*/) + { + if ((queueId == FMT_TX_CONF_Q) || (queueId == FMT_TX_ERR_Q)) + { + if (buffer != NULL) + XX_Free(buffer); + return true; + } + + DBG(TRACE, ("Port %d got FMUC frame\n", p_FmTestPort->id)); + dataOffset = FM_PORT_GetBufferDataOffset(p_FmTestPort->h_RxFmPortDev); + + p_FmTestFrame = (t_FmTestFrame *)XX_Malloc(sizeof(t_FmTestFrame)); + /* dump frame... no more space left on device */ + if(p_FmTestFrame == NULL) + return false; + + memset(p_FmTestFrame, 0, sizeof(t_FmTestFrame)); + INIT_LIST(&p_FmTestFrame->node); + + p_FmTestFrame->buff.p_data = (uint8_t *)XX_Malloc(size * sizeof(uint8_t)); + /* No more space left on device*/ + if(p_FmTestFrame->buff.p_data == NULL){ + XX_Free(p_FmTestFrame); + return false; + } + + p_FmTestFrame->buff.size = size-dataOffset; + p_FmTestFrame->buff.qid = queueId; + + memcpy(p_FmTestFrame->buff.p_data, + (uint8_t *)PTR_MOVE(buffer, dataOffset), + p_FmTestFrame->buff.size); + + memcpy(p_FmTestFrame->buff.buff_context.fm_prs_res, + FM_PORT_GetBufferPrsResult(p_FmTestPort->h_RxFmPortDev, (char*)buffer), + 32); + + EnqueueFrameToRxQ(p_FmTestPort, p_FmTestFrame); + return true; + } + + return false; +} + +void fman_test_ip_manip (void *mac_dev, uint8_t *data) +{ + t_FmTest *p_FmTest = &fmTest; + t_FmTestPort *p_FmTestPort=NULL; + struct iphdr *iph; + uint32_t *p_Data = (uint32_t *)data; + uint32_t net; + uint32_t saddr, daddr; + uint8_t i; + + /* Get the FM-test-port object */ + for (i=0; iports[i].h_Mac) + p_FmTestPort = &p_FmTest->ports[i]; + if (!p_FmTestPort || !p_FmTestPort->ip_header_manip) + return; + + iph = (struct iphdr *)p_Data; + saddr = iph->saddr; + daddr = iph->daddr; + + /* If it is ARP packet ... */ + if (*p_Data == 0x00010800) + { + saddr = *((uint32_t *)PTR_MOVE(p_Data, 14)); + daddr = *((uint32_t *)PTR_MOVE(p_Data, 24)); + } + + DBG(TRACE, + ("\nSrc IP before header-manipulation: %d.%d.%d.%d" + "\nDest IP before header-manipulation: %d.%d.%d.%d", + (int)((saddr & 0xff000000) >> 24), + (int)((saddr & 0x00ff0000) >> 16), + (int)((saddr & 0x0000ff00) >> 8), + (int)((saddr & 0x000000ff) >> 0), + (int)((daddr & 0xff000000) >> 24), + (int)((daddr & 0x00ff0000) >> 16), + (int)((daddr & 0x0000ff00) >> 8), + (int)((daddr & 0x000000ff) >> 0))); + + if ((p_FmTestPort->diag == e_IOC_DIAG_MODE_CTRL_LOOPBACK) || + (p_FmTestPort->diag == e_IOC_DIAG_MODE_CHIP_LOOPBACK) || + (p_FmTestPort->diag == e_IOC_DIAG_MODE_PHY_LOOPBACK) || + (p_FmTestPort->diag == e_IOC_DIAG_MODE_LINE_LOOPBACK)) + { + net = saddr; + saddr = daddr; + daddr = net; + } + else + { + /* We allow only up to 10 eth ports */ + net = ((daddr & 0x000000ff) % 10); + saddr = (uint32_t)((saddr & ~0x0000ff00) | (net << 8)); + daddr = (uint32_t)((daddr & ~0x0000ff00) | (net << 8)); + } + + /* If not ARP ... */ + if (*p_Data != 0x00010800) + { + iph->check = 0; + + iph->saddr = saddr; + iph->daddr = daddr; + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + } + else /* The packet is ARP */ + { + *((uint32_t *)PTR_MOVE(p_Data, 14)) = saddr; + *((uint32_t *)PTR_MOVE(p_Data, 24)) = daddr; + } + + DBG(TRACE, + ("\nSrc IP after header-manipulation: %d.%d.%d.%d" + "\nDest IP after header-manipulation: %d.%d.%d.%d", + (int)((saddr & 0xff000000) >> 24), + (int)((saddr & 0x00ff0000) >> 16), + (int)((saddr & 0x0000ff00) >> 8), + (int)((saddr & 0x000000ff) >> 0), + (int)((daddr & 0xff000000) >> 24), + (int)((daddr & 0x00ff0000) >> 16), + (int)((daddr & 0x0000ff00) >> 8), + (int)((daddr & 0x000000ff) >> 0))); +} + + +/*****************************************************************************/ +/* API routines for the FM Linux Device */ +/*****************************************************************************/ + +static int fm_test_open(struct inode *inode, struct file *file) +{ + t_FmTest *p_FmTest = &fmTest; + //unsigned int major = imajor(inode); + unsigned int minor = iminor(inode); + + DBG(TRACE, ("Opening minor - %d - ", minor)); + + if (file->private_data != NULL) + return 0; + + if ((minor >= DEV_FM_TEST_PORTS_MINOR_BASE) && + (minor < DEV_FM_TEST_MAX_MINORS)) + file->private_data = &p_FmTest->ports[minor]; + else + return -ENXIO; + + return 0; +} + +static int fm_test_close(struct inode *inode, struct file *file) +{ + t_FmTestPort *p_FmTestPort; + unsigned int minor = iminor(inode); + int err = 0; + + DBG(TRACE, ("Closing minor - %d - ", minor)); + + p_FmTestPort = file->private_data; + if (!p_FmTestPort) + return -ENODEV; + + p_FmTestPort->valid = FALSE; + + /* Complete!!! */ + return err; +} + +static int fm_test_ioctls(unsigned int minor, struct file *file, unsigned int cmd, unsigned long arg, bool compat) +{ + t_FmTestPort *p_FmTestPort; + + DBG(TRACE, ("IOCTL minor - %d, cmd - 0x%08x, arg - 0x%08x", minor, cmd, arg)); + + p_FmTestPort = file->private_data; + if (!p_FmTestPort) + return -ENODEV; + + switch (cmd) + { + case FMT_PORT_IOC_INIT: + { + ioc_fmt_port_param_t param; + + if (p_FmTestPort->valid) { + REPORT_ERROR(MINOR, E_INVALID_STATE, ("port is already initialized!!!")); + return -EFAULT; + } +#if defined(FMAN_TEST_CONFIG_COMPAT) + if (compat){ + if (copy_from_user(¶m, (ioc_fmt_port_param_t *)compat_ptr(arg), sizeof(ioc_fmt_port_param_t))) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -EFAULT; + } + } +#endif + else{ + if (copy_from_user(¶m, (ioc_fmt_port_param_t *) arg, sizeof(ioc_fmt_port_param_t))) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -EFAULT; + } + } + + return PortInit(p_FmTestPort, ¶m); + } + + case FMT_PORT_IOC_SET_DIAG_MODE: + { + if (get_user(p_FmTestPort->diag, (ioc_diag_mode *)arg)) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -EFAULT; + } + + if (p_FmTestPort->diag == e_IOC_DIAG_MODE_CTRL_LOOPBACK) + return SetMacIntLoopback(p_FmTestPort, TRUE); + else + return SetMacIntLoopback(p_FmTestPort, FALSE); + break; + } + + case FMT_PORT_IOC_SET_DPAECHO_MODE: + { +#if defined(FMAN_TEST_CONFIG_COMPAT) + if (compat){ + if (get_user(p_FmTestPort->echo, (int *)compat_ptr(arg))) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -EFAULT; + } + } + else +#endif + { + if (get_user(p_FmTestPort->echo, (int *)arg)) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -EFAULT; + } + } + break; + } + + case FMT_PORT_IOC_SET_IP_HEADER_MANIP: + { +#if defined(FMAN_TEST_CONFIG_COMPAT) + if (compat){ + if (get_user(p_FmTestPort->ip_header_manip, (int *)compat_ptr(arg))) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -EFAULT; + } + } + else +#endif + { + if (get_user(p_FmTestPort->ip_header_manip, (int *)arg)) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -EFAULT; + } + } + break; + } + + default: + REPORT_ERROR(MINOR, E_INVALID_SELECTION, ("IOCTL TEST cmd (0x%08lx):(0x%02lx:0x%02lx)!", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd))); + return -EFAULT; + } + + return 0; +} + +#ifdef FMAN_TEST_CONFIG_COMPAT +static long fm_test_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned int minor = iminor(file->f_path.dentry->d_inode); + + return fm_test_ioctls(minor, file, cmd, arg, true); +} +#endif + +static long fm_test_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned int minor = iminor(file->f_path.dentry->d_inode); + unsigned int res; + + fm_mutex_lock(); + res = fm_test_ioctls(minor, file, cmd, arg, false); + fm_mutex_unlock(); + + return res; +} + +#ifdef FMAN_TEST_CONFIG_COMPAT +void copy_compat_test_frame_buffer(ioc_fmt_buff_desc_t *buff, ioc_fmt_compat_buff_desc_t *compat_buff) +{ + compat_buff->qid = buff->qid; + compat_buff->p_data = ptr_to_compat(buff->p_data); + compat_buff->size = buff->size; + compat_buff->status = buff->status; + + compat_buff->buff_context.p_user_priv = ptr_to_compat(buff->buff_context.p_user_priv); + memcpy(compat_buff->buff_context.fm_prs_res,buff->buff_context.fm_prs_res,FM_PRS_MAX * sizeof(uint8_t)); + memcpy(compat_buff->buff_context.fm_time_stamp,buff->buff_context.fm_time_stamp,FM_TIME_STAMP_MAX * sizeof(uint8_t)); +} +#endif + +ssize_t fm_test_read (struct file *file, char __user *buf, size_t size, loff_t *ppos) +{ + t_FmTestPort *p_FmTestPort; + t_FmTestFrame *p_FmTestFrame; + ssize_t cnt = 0; + + p_FmTestPort = file->private_data; + if (!p_FmTestPort || !p_FmTestPort->valid) + return -ENODEV; + + p_FmTestFrame = DequeueFrameFromRxQ(p_FmTestPort); + if (!p_FmTestFrame) + return 0; + + if (!p_FmTestPort->echo) { +#ifdef FMAN_TEST_CONFIG_COMPAT + cnt = sizeof(ioc_fmt_compat_buff_desc_t); +#else + cnt = sizeof(ioc_fmt_buff_desc_t); +#endif + if (sizebuff.p_data); + XX_Free(p_FmTestFrame); + REPORT_ERROR(MINOR, E_NO_MEMORY, ("Illegal buffer-size!")); + return 0; + } + + /* Copy structure */ +#ifdef FMAN_TEST_CONFIG_COMPAT + { + ioc_fmt_compat_buff_desc_t compat_buff; + copy_compat_test_frame_buffer(&p_FmTestFrame->buff, &compat_buff); + + if (copy_to_user(buf, &compat_buff, cnt)) { + XX_Free(p_FmTestFrame->buff.p_data); + XX_Free(p_FmTestFrame); + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return 0; + } + } + + ((ioc_fmt_compat_buff_desc_t *)buf)->p_data = ptr_to_compat(buf+sizeof(ioc_fmt_compat_buff_desc_t)); + cnt += MIN(p_FmTestFrame->buff.size, size-cnt); +#else + if (copy_to_user(buf, &p_FmTestFrame->buff, cnt)) { + XX_Free(p_FmTestFrame->buff.p_data); + XX_Free(p_FmTestFrame); + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return 0; + } + + ((ioc_fmt_buff_desc_t *)buf)->p_data = buf+sizeof(ioc_fmt_buff_desc_t); + cnt += MIN(p_FmTestFrame->buff.size, size-cnt); +#endif + + if (sizebuff.p_data); + XX_Free(p_FmTestFrame); + REPORT_ERROR(MINOR, E_NO_MEMORY, ("Illegal buffer-size!")); + return 0; + } + + /* copy frame */ +#ifdef FMAN_TEST_CONFIG_COMPAT + if (copy_to_user(buf+sizeof(ioc_fmt_compat_buff_desc_t), p_FmTestFrame->buff.p_data, cnt)) { + XX_Free(p_FmTestFrame->buff.p_data); + XX_Free(p_FmTestFrame); + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return 0; + } +#else + if (copy_to_user(buf+sizeof(ioc_fmt_buff_desc_t), p_FmTestFrame->buff.p_data, cnt)) { + XX_Free(p_FmTestFrame->buff.p_data); + XX_Free(p_FmTestFrame); + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return 0; + } +#endif + + XX_Free(p_FmTestFrame->buff.p_data); + XX_Free(p_FmTestFrame); + } + else { + uint8_t *p_Data = NULL; + uint32_t frameSize =0; + t_DpaaFD fd; + int _errno = 0; + + p_Data = (uint8_t *)p_FmTestFrame->buff.p_data; + frameSize = p_FmTestFrame->buff.size; + + /* paranoia ...*/ + if(!p_Data){ + XX_Free(p_FmTestFrame); + return 0; + } + + /* Set frame descriptor... */ + memset(&fd, 0, sizeof(fd)); + DPAA_FD_SET_ADDR(&fd, p_Data); + DPAA_FD_SET_OFFSET(&fd, 0); + DPAA_FD_SET_LENGTH(&fd, frameSize); + + /* Enqueue frame... */ + _errno = qman_enqueue(p_FmTestPort->p_TxFqs[0], (struct qm_fd*)&fd, 0); + if (_errno) { + XX_Free(p_FmTestFrame); + XX_Free(p_Data); + return 0; + } + + XX_Free(p_FmTestFrame); + } + + return cnt; +} + +ssize_t fm_test_write (struct file *file, const char __user *buf, size_t size, loff_t *ppos) +{ + t_FmTestPort *p_FmTestPort; + + ioc_fmt_buff_desc_t buffDesc; +#ifdef FMAN_TEST_CONFIG_COMPAT + ioc_fmt_compat_buff_desc_t compatBuffDesc; +#endif + t_DpaaFD fd; + uint8_t *p_Data; + uint32_t dataOffset; + int _errno; + + p_FmTestPort = file->private_data; + if (!p_FmTestPort || !p_FmTestPort->valid) { + REPORT_ERROR(MINOR, E_INVALID_HANDLE, NO_MSG); + return -1; + } + + /* If Compat (32B UserSpace - 64B KernelSpace) */ +#ifdef FMAN_TEST_CONFIG_COMPAT + if (copy_from_user(&compatBuffDesc, buf, sizeof(ioc_fmt_compat_buff_desc_t))) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -1; + } + + buffDesc.qid = compatBuffDesc.qid; + buffDesc.p_data = compat_ptr(compatBuffDesc.p_data); + buffDesc.size = compatBuffDesc.size; + buffDesc.status = compatBuffDesc.status; + + buffDesc.buff_context.p_user_priv = compat_ptr(compatBuffDesc.buff_context.p_user_priv); + memcpy(buffDesc.buff_context.fm_prs_res, compatBuffDesc.buff_context.fm_prs_res, FM_PRS_MAX * sizeof(uint8_t)); + memcpy(buffDesc.buff_context.fm_time_stamp, compatBuffDesc.buff_context.fm_time_stamp, FM_TIME_STAMP_MAX * sizeof(uint8_t)); +#else + if (copy_from_user(&buffDesc, (ioc_fmt_buff_desc_t *)buf, sizeof(ioc_fmt_buff_desc_t))) { + REPORT_ERROR(MINOR, E_INVALID_STATE, NO_MSG); + return -1; + } +#endif + + dataOffset = FM_PORT_GetBufferDataOffset(p_FmTestPort->h_TxFmPortDev); + p_Data = (uint8_t*)XX_Malloc(buffDesc.size+dataOffset); + if (!p_Data) { + REPORT_ERROR(MINOR, E_NO_MEMORY, ("data buff!")); + return -1; + } + + /* If Compat (32UserSpace - 64KernelSpace) the buffDesc.p_data is ok */ + if (copy_from_user ((uint8_t *)PTR_MOVE(p_Data, dataOffset), + buffDesc.p_data, + buffDesc.size)) { + REPORT_ERROR(MINOR, E_NO_MEMORY, ("data buff!")); + XX_Free(p_Data); + return -1; + } + + memset(&fd, 0, sizeof(fd)); + DPAA_FD_SET_ADDR(&fd, p_Data); + DPAA_FD_SET_OFFSET(&fd, dataOffset); + DPAA_FD_SET_LENGTH(&fd, buffDesc.size); + + DBG(TRACE, ("buffDesc qId %d, fqid %d, frame len %d, fq 0x%8x\n", + buffDesc.qid, qman_fq_fqid(p_FmTestPort->p_TxFqs[buffDesc.qid]), buffDesc.size, p_FmTestPort->p_TxFqs[buffDesc.qid])); + + _errno = qman_enqueue(p_FmTestPort->p_TxFqs[buffDesc.qid], (struct qm_fd*)&fd, 0); + if (_errno) { + buffDesc.status = (uint32_t)_errno; + if (copy_to_user((ioc_fmt_buff_desc_t*)buf, &buffDesc, sizeof(ioc_fmt_buff_desc_t))) { + REPORT_ERROR(MINOR, E_WRITE_FAILED, NO_MSG); + XX_Free(p_Data); + return -1; + } + } + return buffDesc.size; +} + +/* Globals for FM character device */ +static struct file_operations fm_test_fops = +{ + owner: THIS_MODULE, +#ifdef FMAN_TEST_CONFIG_COMPAT + compat_ioctl:fm_test_compat_ioctl, +#endif + unlocked_ioctl: fm_test_ioctl, + open: fm_test_open, + release: fm_test_close, + read: fm_test_read, + write: fm_test_write, +}; + +t_Handle LNXWRP_FM_TEST_Init(void) +{ + t_FmTest *p_FmTest = &fmTest; + int id; + + /* Register to the /dev for IOCTL API */ + /* Register dynamically a new major number for the character device: */ + if ((p_FmTest->major = register_chrdev(0, DEV_FM_TEST_NAME, &fm_test_fops)) <= 0) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Failed to allocate a major number for device \"%s\"", DEV_FM_TEST_NAME)); + return NULL; + } + + /* Creating class for FMan_test */ + DBG(TRACE ,("class_create fm_test_class")); + p_FmTest->fm_test_class = class_create(THIS_MODULE, DEV_FM_TEST_NAME); + if (IS_ERR(p_FmTest->fm_test_class)) { + unregister_chrdev(p_FmTest->major, DEV_FM_TEST_NAME); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("class_create error fm_test_class")); + return NULL; + } + + for (id = 0; id < IOC_FMT_MAX_NUM_OF_PORTS; id++) + if(NULL == device_create(p_FmTest->fm_test_class, NULL, MKDEV(p_FmTest->major, DEV_FM_TEST_PORTS_MINOR_BASE + id), NULL, + DEV_FM_TEST_NAME "%d", id)) + { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Error creating device.\n")); + return NULL; + } + + /* Seed the QMan allocator so we'll have enough queues to run PCD with + dinamically fqid-range allocation */ + qman_release_fqid_range(0x100, 0x400); + + return p_FmTest; +} + +t_Error LNXWRP_FM_TEST_Free(t_Handle h_FmTestLnxWrp) +{ + t_FmTest *p_FmTest = (t_FmTest*)h_FmTestLnxWrp; + int id; + + DBG(TRACE, ("destroy fm_test_class")); + for (id = 0; id < IOC_FMT_MAX_NUM_OF_PORTS; id++) + device_destroy(p_FmTest->fm_test_class, MKDEV(p_FmTest->major, DEV_FM_TEST_PORTS_MINOR_BASE + id)); + class_destroy(p_FmTest->fm_test_class); + + return E_OK; +} + +static t_Handle h_FmTestLnxWrp; + +static int __init __cold fm_test_load (void) +{ + if ((h_FmTestLnxWrp = LNXWRP_FM_TEST_Init()) == NULL) + { + printk("Failed to init FM-test wrapper!\n"); + if (h_FmTestLnxWrp) + LNXWRP_FM_TEST_Free(h_FmTestLnxWrp); + return -ENODEV; + } + + printk (KERN_CRIT "Freescale FM test module ("__DATE__ ":"__TIME__")\n"); + + return 0; +} + +static void __exit __cold fm_test_unload (void) +{ + if (h_FmTestLnxWrp) + LNXWRP_FM_TEST_Free(h_FmTestLnxWrp); +} + +module_init (fm_test_load); +module_exit (fm_test_unload); --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_fm.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_fm.c @@ -0,0 +1,1204 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_fm.c + + @Author Shlomi Gridish + + @Description FM Linux wrapper functions. + +*/ + +#include +#include +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For struct qe_firmware */ +#include +#include /* For file access mask */ +#include +#include +#include + +/* NetCommSw Headers --------------- */ +#include "std_ext.h" +#include "error_ext.h" +#include "sprint_ext.h" +#include "debug_ext.h" +#include "sys_io_ext.h" + +#include "fm_ioctls.h" + +#include "lnxwrp_fm.h" +#include "lnxwrp_resources.h" +#include "lnxwrp_sysfs_fm.h" +#include "lnxwrp_sysfs_fm_port.h" + +#define PROC_PRINT(args...) offset += sprintf(buf+offset,args) + +#define ADD_ADV_CONFIG_NO_RET(_func, _param) \ + do { \ + if (ip_Function = _func; \ + _param \ + i++; \ + } \ + else \ + REPORT_ERROR(MAJOR, E_INVALID_VALUE,\ + ("Number of advanced-configuration entries exceeded"));\ + } while (0) + +static t_LnxWrpFm lnxWrpFm; + + +static irqreturn_t fm_irq(int irq, void *_dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)_dev; + + if (!p_LnxWrpFmDev || !p_LnxWrpFmDev->h_Dev) + return IRQ_NONE; + + FM_EventIsr(p_LnxWrpFmDev->h_Dev); + + return IRQ_HANDLED; +} + +static irqreturn_t fm_err_irq(int irq, void *_dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)_dev; + + if (!p_LnxWrpFmDev || !p_LnxWrpFmDev->h_Dev) + return IRQ_NONE; + + if (FM_ErrorIsr(p_LnxWrpFmDev->h_Dev) == E_OK) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +/* used to protect FMD/LLD from concurrent calls in functions fm_mutex_lock / fm_mutex_unlock */ +static struct mutex lnxwrp_mutex; + +static t_LnxWrpFmDev * CreateFmDev(uint8_t id) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev; + int j; + + p_LnxWrpFmDev = (t_LnxWrpFmDev *)XX_Malloc(sizeof(t_LnxWrpFmDev)); + if (!p_LnxWrpFmDev) + { + REPORT_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + return NULL; + } + + memset(p_LnxWrpFmDev, 0, sizeof(t_LnxWrpFmDev)); + p_LnxWrpFmDev->fmDevSettings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)); + memset(p_LnxWrpFmDev->fmDevSettings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry))); + p_LnxWrpFmDev->fmPcdDevSettings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)); + memset(p_LnxWrpFmDev->fmPcdDevSettings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry))); + p_LnxWrpFmDev->hcPort.settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)); + memset(p_LnxWrpFmDev->hcPort.settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry))); + for (j=0; jrxPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)); + memset(p_LnxWrpFmDev->rxPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry))); + } + for (j=0; jtxPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)); + memset(p_LnxWrpFmDev->txPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry))); + } + for (j=0; jopPorts[j].settings.advConfig = (t_SysObjectAdvConfigEntry*)XX_Malloc(FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry)); + memset(p_LnxWrpFmDev->opPorts[j].settings.advConfig, 0, (FM_MAX_NUM_OF_ADV_SETTINGS*sizeof(t_SysObjectAdvConfigEntry))); + } + + return p_LnxWrpFmDev; +} + +static void DestroyFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev) +{ + int j; + + for (j=0; jopPorts[j].settings.advConfig) + XX_Free(p_LnxWrpFmDev->opPorts[j].settings.advConfig); + for (j=0; jtxPorts[j].settings.advConfig) + XX_Free(p_LnxWrpFmDev->txPorts[j].settings.advConfig); + for (j=0; jrxPorts[j].settings.advConfig) + XX_Free(p_LnxWrpFmDev->rxPorts[j].settings.advConfig); + if (p_LnxWrpFmDev->hcPort.settings.advConfig) + XX_Free(p_LnxWrpFmDev->hcPort.settings.advConfig); + if (p_LnxWrpFmDev->fmPcdDevSettings.advConfig) + XX_Free(p_LnxWrpFmDev->fmPcdDevSettings.advConfig); + if (p_LnxWrpFmDev->fmDevSettings.advConfig) + XX_Free(p_LnxWrpFmDev->fmDevSettings.advConfig); + + XX_Free(p_LnxWrpFmDev); +} + +static t_Error FillRestFmInfo(t_LnxWrpFmDev *p_LnxWrpFmDev) +{ +#define FM_BMI_PPIDS_OFFSET 0x00080304 +#define FM_DMA_PLR_OFFSET 0x000c2060 +#define FM_FPM_IP_REV_1_OFFSET 0x000c30c4 +#define DMA_HIGH_LIODN_MASK 0x0FFF0000 +#define DMA_LOW_LIODN_MASK 0x00000FFF +#define DMA_LIODN_SHIFT 16 + +typedef _Packed struct { + uint32_t plr[32]; +} _PackedType t_Plr; + +typedef _Packed struct { + volatile uint32_t fmbm_ppid[63]; +} _PackedType t_Ppids; + + t_Plr *p_Plr; + t_Ppids *p_Ppids; + int i,j; + uint32_t fmRev; + + static const uint8_t phys1GRxPortId[] = {0x8,0x9,0xa,0xb,0xc}; + static const uint8_t phys10GRxPortId[] = {0x10}; + static const uint8_t physOhPortId[] = {0x1,0x2,0x3,0x4,0x5,0x6,0x7}; + static const uint8_t phys1GTxPortId[] = {0x28,0x29,0x2a,0x2b,0x2c}; + static const uint8_t phys10GTxPortId[] = {0x30}; + + fmRev = (uint32_t)(*((volatile uint32_t *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_FPM_IP_REV_1_OFFSET))); + fmRev &= 0xffff; + + p_Plr = (t_Plr *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_DMA_PLR_OFFSET); +#ifdef MODULE + for (i=0;iplr[i] = 0; +#endif /* MODULE */ + + for (i=0; iplr[i/2] & DMA_LOW_LIODN_MASK) : + ((p_Plr->plr[i/2] & DMA_HIGH_LIODN_MASK) >> DMA_LIODN_SHIFT)); +#ifdef FM_PARTITION_ARRAY + /* TODO: this was .liodnPerPartition[i] = liodnBase; is the index meaning the same? */ + p_LnxWrpFmDev->fmDevSettings.param.liodnBasePerPort[i] = liodnBase; +#endif /* FM_PARTITION_ARRAY */ + + if ((i >= phys1GRxPortId[0]) && + (i <= phys1GRxPortId[FM_MAX_NUM_OF_1G_RX_PORTS-1])) + { + for (j=0; jrxPorts[j].settings.param.liodnBase = liodnBase; + } + else if (FM_MAX_NUM_OF_10G_RX_PORTS && + (i >= phys10GRxPortId[0]) && + (i <= phys10GRxPortId[FM_MAX_NUM_OF_10G_RX_PORTS-1])) + { + for (j=0; jrxPorts[FM_MAX_NUM_OF_1G_RX_PORTS+j].settings.param.liodnBase = liodnBase; + } + else if ((i >= physOhPortId[0]) && + (i <= physOhPortId[FM_MAX_NUM_OF_OH_PORTS-1])) + { + for (j=0; jhcPort.settings.param.liodnBase = liodnBase; + else + p_LnxWrpFmDev->opPorts[j - 1].settings.param.liodnBase = liodnBase; + } + else if ((i >= phys1GTxPortId[0]) && + (i <= phys1GTxPortId[FM_MAX_NUM_OF_1G_TX_PORTS-1])) + { + for (j=0; jtxPorts[j].settings.param.liodnBase = liodnBase; + } + else if (FM_MAX_NUM_OF_10G_TX_PORTS && + (i >= phys10GTxPortId[0]) && + (i <= phys10GTxPortId[FM_MAX_NUM_OF_10G_TX_PORTS-1])) + { + for (j=0; jtxPorts[FM_MAX_NUM_OF_1G_TX_PORTS+j].settings.param.liodnBase = liodnBase; + } + } + + p_Ppids = (t_Ppids *)UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr+FM_BMI_PPIDS_OFFSET); + + for (i=0; irxPorts[i].settings.param.specificParams.rxParams.liodnOffset = + p_Ppids->fmbm_ppid[phys1GRxPortId[i]-1]; + + for (i=0; irxPorts[FM_MAX_NUM_OF_1G_RX_PORTS+i].settings.param.specificParams.rxParams.liodnOffset = + p_Ppids->fmbm_ppid[phys10GRxPortId[i]-1]; + +#ifdef FM_OP_PARTITION_ERRATA_FMANx8 + for (i=0; iopPorts[i-1].settings.param.specificParams.nonRxParams.opLiodnOffset = + p_Ppids->fmbm_ppid[physOhPortId[i]-1]; + } +#endif /* FM_OP_PARTITION_ERRATA_FMANx8 */ + + return E_OK; +} + +/* The default address for the Fman microcode in flash. Having a default + * allows older systems to continue functioning. 0xEF000000 is the address + * where the firmware is normally on a P4080DS. + */ +#ifdef CONFIG_PHYS_64BIT +static phys_addr_t P4080_UCAddr = 0xfef000000ull; +#else +static phys_addr_t P4080_UCAddr = 0xef000000; +#endif + + +/** + * FmanUcodeAddrParam - process the fman_ucode kernel command-line parameter + * + * This function is called when the kernel encounters a fman_ucode command- + * line parameter. This parameter contains the address of the Fman microcode + * in flash. + */ +static int FmanUcodeAddrParam(char *str) +{ + unsigned long long l; + int ret; + + ret = strict_strtoull(str, 0, &l); + if (!ret) + P4080_UCAddr = (phys_addr_t) l; + + return ret; +} +__setup("fman_ucode=", FmanUcodeAddrParam); + +/** + * FindFmanMicrocode - find the Fman microcode in memory + * + * This function returns a pointer to the QE Firmware blob that holds + * the Fman microcode. We use the QE Firmware structure because Fman microcode + * is similar to QE microcode, so there's no point in defining a new layout. + * + * Current versions of U-Boot embed the Fman firmware into the device tree, + * so we check for that first. Each Fman node in the device tree contains a + * node or a pointer to node that holds the firmware. Technically, we should + * be fetching the firmware node for the current Fman, but we don't have that + * information any more, so we assume that there is only one firmware node in + * the device tree, and that all Fmen use the same firmware. + * + * If we have an older U-Boot, then we assume that the firmware is located in + * flash at physical address 'P4080_UCAddr' + */ +static const struct qe_firmware *FindFmanMicrocode(void) +{ + static const struct qe_firmware *P4080_UCPatch; + struct device_node *np; +#ifdef FMAN_READ_MICROCODE_FROM_NOR_FLASH + unsigned long P4080_UCSize; + const struct qe_header *hdr; +#endif + + if (P4080_UCPatch) + return P4080_UCPatch; + + /* The firmware should be inside the device tree. */ + np = of_find_compatible_node(NULL, NULL, "fsl,fman-firmware"); + if (np) { + P4080_UCPatch = of_get_property(np, "fsl,firmware", NULL); + of_node_put(np); + if (P4080_UCPatch) + return P4080_UCPatch; + else + REPORT_ERROR(WARNING, E_NOT_FOUND, ("firmware node is incomplete")); + } + +#ifdef FMAN_READ_MICROCODE_FROM_NOR_FLASH + /* If not, then we have a legacy U-Boot. The firmware is in flash. */ + /* Only map enough to the get the core structure */ + P4080_UCPatch = ioremap(P4080_UCAddr, sizeof(struct qe_firmware)); + if (!P4080_UCPatch) { + REPORT_ERROR(MAJOR, E_NULL_POINTER, ("ioremap(%llx) returned NULL", (u64) P4080_UCAddr)); + return NULL; + } + /* Make sure it really is a QE Firmware blob */ + hdr = &P4080_UCPatch->header; + if (!hdr || + (hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') || + (hdr->magic[2] != 'F')) { + REPORT_ERROR(MAJOR, E_NOT_FOUND, ("data at %llx is not a Fman microcode", (u64) P4080_UCAddr)); + return NULL; + } + + /* Now we call ioremap again, this time to pick up the whole blob. We never + * iounmap() the memory because we might reset the Fman at any time. + */ + /* TODO: ionumap() should be performed when unloading the driver */ + P4080_UCSize = sizeof(u32) * P4080_UCPatch->microcode[0].count; + iounmap((void *)P4080_UCPatch); + P4080_UCPatch = ioremap(P4080_UCAddr, P4080_UCSize); + if (!P4080_UCPatch) { + REPORT_ERROR(MAJOR, E_NULL_POINTER, ("ioremap(%llx) returned NULL", (u64) P4080_UCAddr)); + return NULL; + } +#else + /* Returning NULL here forces the reuse of the IRAM content */ + P4080_UCPatch = NULL; +#endif /* FMAN_READ_MICROCODE_FROM_NOR_FLASH */ + return P4080_UCPatch; +} + +static t_LnxWrpFmDev * ReadFmDevTreeNode (struct platform_device *of_dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev; + struct device_node *fm_node, *dev_node, *dpa_node; + struct of_device_id name; + struct resource res; + const uint32_t *uint32_prop; + int _errno=0, lenp; + static struct of_device_id dpa_eth_node_of_match[] = { + { .compatible = "fsl,dpa-ethernet", }, + { /* end of list */ }, + }; + + fm_node = of_node_get(of_dev->dev.of_node); + + uint32_prop = (uint32_t *)of_get_property(fm_node, "cell-index", &lenp); + if (unlikely(uint32_prop == NULL)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, cell-index) failed", fm_node->full_name)); + return NULL; + } + if (WARN_ON(lenp != sizeof(uint32_t))) + return NULL; + if (*uint32_prop > INTG_MAX_NUM_OF_FM) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!")); + return NULL; + } + p_LnxWrpFmDev = CreateFmDev(*uint32_prop); + if (!p_LnxWrpFmDev) { + REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG); + return NULL; + } + p_LnxWrpFmDev->dev = &of_dev->dev; + p_LnxWrpFmDev->id = *uint32_prop; + + /* Get the FM interrupt */ + p_LnxWrpFmDev->irq = of_irq_to_resource(fm_node, 0, NULL); + if (unlikely(p_LnxWrpFmDev->irq == /*NO_IRQ*/0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_irq_to_resource() = %d", NO_IRQ)); + return NULL; + } + + /* Get the FM error interrupt */ + p_LnxWrpFmDev->err_irq = of_irq_to_resource(fm_node, 1, NULL); + /* TODO - un-comment it once there will be err_irq in the DTS */ +#if 0 + if (unlikely(p_LnxWrpFmDev->err_irq == /*NO_IRQ*/0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_irq_to_resource() = %d", NO_IRQ)); + return NULL; + } +#endif /* 0 */ + + /* Get the FM address */ + _errno = of_address_to_resource(fm_node, 0, &res); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno)); + return NULL; + } + + p_LnxWrpFmDev->fmBaseAddr = 0; + p_LnxWrpFmDev->fmPhysBaseAddr = res.start; + p_LnxWrpFmDev->fmMemSize = res.end + 1 - res.start; + + uint32_prop = (uint32_t *)of_get_property(fm_node, "clock-frequency", &lenp); + if (unlikely(uint32_prop == NULL)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, clock-frequency) failed", fm_node->full_name)); + return NULL; + } + if (WARN_ON(lenp != sizeof(uint32_t))) + return NULL; + p_LnxWrpFmDev->fmDevSettings.param.fmClkFreq = (*uint32_prop + 500000)/1000000; /* In MHz, rounded */ + + /* Get the MURAM base address and size */ + memset(&name, 0, sizeof(struct of_device_id)); + if (WARN_ON(strlen("muram") >= sizeof(name.name))) + return NULL; + strcpy(name.name, "muram"); + if (WARN_ON(strlen("fsl,fman-muram") >= sizeof(name.compatible))) + return NULL; + strcpy(name.compatible, "fsl,fman-muram"); + for_each_child_of_node(fm_node, dev_node) { + if (likely(of_match_node(&name, dev_node) != NULL)) { + _errno = of_address_to_resource(dev_node, 0, &res); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno)); + return NULL; + } + + p_LnxWrpFmDev->fmMuramBaseAddr = 0; + p_LnxWrpFmDev->fmMuramPhysBaseAddr = res.start; + p_LnxWrpFmDev->fmMuramMemSize = res.end + 1 - res.start; + } + } + + /* Get the RTC base address and size */ + memset(&name, 0, sizeof(struct of_device_id)); + if (WARN_ON(strlen("rtc") >= sizeof(name.name))) + return NULL; + strcpy(name.name, "rtc"); + if (WARN_ON(strlen("fsl,fman-rtc") >= sizeof(name.compatible))) + return NULL; + strcpy(name.compatible, "fsl,fman-rtc"); + for_each_child_of_node(fm_node, dev_node) { + if (likely(of_match_node(&name, dev_node) != NULL)) { + _errno = of_address_to_resource(dev_node, 0, &res); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_address_to_resource() = %d", _errno)); + return NULL; + } + + p_LnxWrpFmDev->fmRtcBaseAddr = 0; + p_LnxWrpFmDev->fmRtcPhysBaseAddr = res.start; + p_LnxWrpFmDev->fmRtcMemSize = res.end + 1 - res.start; + } + } + + /* Get all PCD nodes */ + memset(&name, 0, sizeof(struct of_device_id)); + if (WARN_ON(strlen("parser") >= sizeof(name.name))) + return NULL; + strcpy(name.name, "parser"); + if (WARN_ON(strlen("fsl,fman-parser") >= sizeof(name.compatible))) + return NULL; + strcpy(name.compatible, "fsl,fman-parser"); + for_each_child_of_node(fm_node, dev_node) + if (likely(of_match_node(&name, dev_node) != NULL)) + p_LnxWrpFmDev->prsActive = TRUE; + + memset(&name, 0, sizeof(struct of_device_id)); + if (WARN_ON(strlen("keygen") >= sizeof(name.name))) + return NULL; + strcpy(name.name, "keygen"); + if (WARN_ON(strlen("fsl,fman-keygen") >= sizeof(name.compatible))) + return NULL; + strcpy(name.compatible, "fsl,fman-keygen"); + for_each_child_of_node(fm_node, dev_node) + if (likely(of_match_node(&name, dev_node) != NULL)) + p_LnxWrpFmDev->kgActive = TRUE; + + memset(&name, 0, sizeof(struct of_device_id)); + if (WARN_ON(strlen("cc") >= sizeof(name.name))) + return NULL; + strcpy(name.name, "cc"); + if (WARN_ON(strlen("fsl,fman-cc") >= sizeof(name.compatible))) + return NULL; + strcpy(name.compatible, "fsl,fman-cc"); + for_each_child_of_node(fm_node, dev_node) + if (likely(of_match_node(&name, dev_node) != NULL)) + p_LnxWrpFmDev->ccActive = TRUE; + + memset(&name, 0, sizeof(struct of_device_id)); + if (WARN_ON(strlen("policer") >= sizeof(name.name))) + return NULL; + strcpy(name.name, "policer"); + if (WARN_ON(strlen("fsl,fman-policer") >= sizeof(name.compatible))) + return NULL; + strcpy(name.compatible, "fsl,fman-policer"); + for_each_child_of_node(fm_node, dev_node) + if (likely(of_match_node(&name, dev_node) != NULL)) + p_LnxWrpFmDev->plcrActive = TRUE; + + if (p_LnxWrpFmDev->prsActive || p_LnxWrpFmDev->kgActive || + p_LnxWrpFmDev->ccActive || p_LnxWrpFmDev->plcrActive) + p_LnxWrpFmDev->pcdActive = TRUE; + + if (p_LnxWrpFmDev->pcdActive) + { + const char *str_prop = (char *)of_get_property(fm_node, "fsl,default-pcd", &lenp); + if (str_prop) { + if (strncmp(str_prop, "3-tuple", strlen("3-tuple")) == 0) + p_LnxWrpFmDev->defPcd = e_FM_PCD_3_TUPLE; + } + else + p_LnxWrpFmDev->defPcd = e_NO_PCD; + } + + of_node_put(fm_node); + + for_each_matching_node(dpa_node, dpa_eth_node_of_match) { + struct device_node *mac_node; + const phandle *phandle_prop; + + phandle_prop = (typeof(phandle_prop))of_get_property(dpa_node, "fsl,fman-mac", &lenp); + if (phandle_prop == NULL) + continue; + + if (WARN_ON(lenp != sizeof(phandle))) + return NULL; + + mac_node = of_find_node_by_phandle(*phandle_prop); + if (unlikely(mac_node == NULL)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_find_node_by_phandle() failed")); + return NULL; + } + + fm_node = of_get_parent(mac_node); + of_node_put(mac_node); + if (unlikely(fm_node == NULL)) { + REPORT_ERROR(MAJOR, E_NO_DEVICE, ("of_get_parent() = %d", _errno)); + return NULL; + } + + uint32_prop = (uint32_t *)of_get_property(fm_node, "cell-index", &lenp); + if (unlikely(uint32_prop == NULL)) { + of_node_put(fm_node); + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, cell-index) failed", fm_node->full_name)); + return NULL; + } + if (WARN_ON(lenp != sizeof(uint32_t))) + return NULL; + of_node_put(fm_node); + + if (*uint32_prop == p_LnxWrpFmDev->id) { + phandle_prop = (typeof(phandle_prop))of_get_property(dpa_node, "fsl,qman-channel", &lenp); + if (unlikely(phandle_prop == NULL)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, fsl,qman-channel) failed", dpa_node->full_name)); + return NULL; + } + if (WARN_ON(lenp != sizeof(phandle))) + return NULL; + + dev_node = of_find_node_by_phandle(*phandle_prop); + if (unlikely(dev_node == NULL)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_find_node_by_phandle() failed")); + return NULL; + } + + uint32_prop = (typeof(uint32_prop))of_get_property(dev_node, "fsl,qman-channel-id", &lenp); + if (unlikely(uint32_prop == NULL)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("of_get_property(%s, fsl,qman-channel-id) failed", dev_node->full_name)); + of_node_put(dev_node); + return NULL; + } + of_node_put(dev_node); + if (WARN_ON(lenp != sizeof(uint32_t))) + return NULL; + p_LnxWrpFmDev->hcCh = *uint32_prop; + break; + } + } + + p_LnxWrpFmDev->active = TRUE; + + return p_LnxWrpFmDev; +} + +static void LnxwrpFmDevExceptionsCb(t_Handle h_App, e_FmExceptions exception) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)h_App; + + ASSERT_COND(p_LnxWrpFmDev); + + DBG(INFO, ("got fm exception %d", exception)); + + /* do nothing */ + UNUSED(exception); +} + +static void LnxwrpFmDevBusErrorCb(t_Handle h_App, + e_FmPortType portType, + uint8_t portId, + uint64_t addr, + uint8_t tnum, + uint16_t liodn) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *)h_App; + + ASSERT_COND(p_LnxWrpFmDev); + + /* do nothing */ + UNUSED(portType);UNUSED(portId);UNUSED(addr);UNUSED(tnum);UNUSED(liodn); +} + +static t_Error ConfigureFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev) +{ + struct resource *dev_res; + int _errno; + + if (!p_LnxWrpFmDev->active) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM not configured!!!")); + +#ifndef MODULE + _errno = can_request_irq(p_LnxWrpFmDev->irq, 0); + if (unlikely(_errno < 0)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno)); +#endif + _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, fm_irq, 0, "fman", p_LnxWrpFmDev); + if (unlikely(_errno < 0)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->irq, _errno)); + + if (p_LnxWrpFmDev->err_irq != 0) { +#ifndef MODULE + _errno = can_request_irq(p_LnxWrpFmDev->err_irq, 0); + if (unlikely(_errno < 0)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("can_request_irq() = %d", _errno)); +#endif + _errno = devm_request_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->err_irq, fm_err_irq, IRQF_SHARED, "fman-err", p_LnxWrpFmDev); + if (unlikely(_errno < 0)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_irq(%d) = %d", p_LnxWrpFmDev->err_irq, _errno)); + } + + p_LnxWrpFmDev->res = devm_request_mem_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize, "fman"); + if (unlikely(p_LnxWrpFmDev->res == NULL)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("request_mem_region() failed")); + + p_LnxWrpFmDev->fmBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize)); + if (unlikely(p_LnxWrpFmDev->fmBaseAddr == 0)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed")); + + if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmBaseAddr, (uint64_t)p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM memory map")); + + dev_res = __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize, "fman-muram"); + if (unlikely(dev_res == NULL)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("__devm_request_region() failed")); + + p_LnxWrpFmDev->fmMuramBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize)); + if (unlikely(p_LnxWrpFmDev->fmMuramBaseAddr == 0)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed")); + + if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmMuramBaseAddr, (uint64_t)p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM MURAM memory map")); + + if (p_LnxWrpFmDev->fmRtcPhysBaseAddr) + { + dev_res = __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize, "fman-rtc"); + if (unlikely(dev_res == NULL)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("__devm_request_region() failed")); + + p_LnxWrpFmDev->fmRtcBaseAddr = PTR_TO_UINT(devm_ioremap(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize)); + if (unlikely(p_LnxWrpFmDev->fmRtcBaseAddr == 0)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("devm_ioremap() failed")); + + if (SYS_RegisterIoMap((uint64_t)p_LnxWrpFmDev->fmRtcBaseAddr, (uint64_t)p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC memory map")); + } + + p_LnxWrpFmDev->fmDevSettings.param.baseAddr = p_LnxWrpFmDev->fmBaseAddr; + p_LnxWrpFmDev->fmDevSettings.param.fmId = p_LnxWrpFmDev->id; + p_LnxWrpFmDev->fmDevSettings.param.irq = NO_IRQ; + p_LnxWrpFmDev->fmDevSettings.param.errIrq = NO_IRQ; + p_LnxWrpFmDev->fmDevSettings.param.f_Exception = LnxwrpFmDevExceptionsCb; + p_LnxWrpFmDev->fmDevSettings.param.f_BusError = LnxwrpFmDevBusErrorCb; + p_LnxWrpFmDev->fmDevSettings.param.h_App = p_LnxWrpFmDev; + + return FillRestFmInfo(p_LnxWrpFmDev); +} + +static t_Error InitFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev) +{ + const struct qe_firmware *fw; + + if (!p_LnxWrpFmDev->active) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM not configured!!!")); + + if ((p_LnxWrpFmDev->h_MuramDev = FM_MURAM_ConfigAndInit(p_LnxWrpFmDev->fmMuramBaseAddr, p_LnxWrpFmDev->fmMuramMemSize)) == NULL) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-MURAM!")); + + /* Loading the fman-controller code */ + fw = FindFmanMicrocode(); + + if (!fw) { +#ifdef FMAN_READ_MICROCODE_FROM_NOR_FLASH + /* We already reported an error, so just return NULL*/ + return ERROR_CODE(E_NULL_POINTER); +#else + /* this forces the reuse of the current IRAM content */ + p_LnxWrpFmDev->fmDevSettings.param.firmware.size = 0; + p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code = NULL; +#endif + } else { + p_LnxWrpFmDev->fmDevSettings.param.firmware.p_Code = + (void *) fw + fw->microcode[0].code_offset; + p_LnxWrpFmDev->fmDevSettings.param.firmware.size = + sizeof(u32) * fw->microcode[0].count; + DBG(INFO, ("Loading fman-controller code version %d.%d.%d", + fw->microcode[0].major, + fw->microcode[0].minor, + fw->microcode[0].revision)); + } + + p_LnxWrpFmDev->fmDevSettings.param.h_FmMuram = p_LnxWrpFmDev->h_MuramDev; + + if ((p_LnxWrpFmDev->h_Dev = FM_Config(&p_LnxWrpFmDev->fmDevSettings.param)) == NULL) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM")); + + if (FM_ConfigMaxNumOfOpenDmas(p_LnxWrpFmDev->h_Dev,BMI_MAX_NUM_OF_DMAS) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM")); + + if (FM_ConfigResetOnInit(p_LnxWrpFmDev->h_Dev, TRUE) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM")); + +#ifdef CONFIG_FMAN_P1023 + if (FM_ConfigDmaAidOverride(p_LnxWrpFmDev->h_Dev, TRUE) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM")); +#endif + + /* Use the entire amount of TNUMS, maybe performance will improve... + for OPEN DMAs - are all by default = 32 and fifosize = MURAM*3/4 and + the rest of it is for PCD */ + FM_ConfigTotalNumOfTasks(p_LnxWrpFmDev->h_Dev, BMI_MAX_NUM_OF_TASKS); + +#if defined(CONFIG_FMAN_RESOURCE_ALLOCATION_ALGORITHM) && defined(CONFIG_FMAN_P3040_P4080_P5020) + /* Enable 14g w/ jumbo frames following HW suggestion. */ + FM_ConfigTotalFifoSize(p_LnxWrpFmDev->h_Dev, 128*KILOBYTE); +#endif + + if (FM_Init(p_LnxWrpFmDev->h_Dev) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM")); + + if (p_LnxWrpFmDev->err_irq == 0) { + FM_SetException(p_LnxWrpFmDev->h_Dev, e_FM_EX_DMA_BUS_ERROR,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_READ_ECC,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_SYSTEM_WRITE_ECC,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_DMA_FM_WRITE_ECC,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_STALL_ON_TASKS , FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_DOUBLE_ECC,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_IRAM_ECC,FALSE); + /* TODO: FmDisableRamsEcc assert for ramsEccOwners. + * FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_MURAM_ECC,FALSE);*/ + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_DOUBLE_ECC,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_DEQ_FROM_UNKNOWN_PORTID,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_LIST_RAM_ECC,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_PIPELINE_ECC,FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_BMI_STATISTICS_RAM_ECC, FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_FPM_SINGLE_ECC, FALSE); + FM_SetException(p_LnxWrpFmDev->h_Dev,e_FM_EX_QMI_SINGLE_ECC, FALSE); + } + + if (p_LnxWrpFmDev->fmRtcBaseAddr) + { + t_FmRtcParams fmRtcParam; + + memset(&fmRtcParam, 0, sizeof(fmRtcParam)); + fmRtcParam.h_App = p_LnxWrpFmDev; + fmRtcParam.h_Fm = p_LnxWrpFmDev->h_Dev; + fmRtcParam.baseAddress = p_LnxWrpFmDev->fmRtcBaseAddr; + + if(!(p_LnxWrpFmDev->h_RtcDev = FM_RTC_Config(&fmRtcParam))) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-RTC")); + + if (FM_RTC_ConfigPeriod(p_LnxWrpFmDev->h_RtcDev, 10) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC")); + + if (FM_RTC_Init(p_LnxWrpFmDev->h_RtcDev) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, ("FM-RTC")); + } + + return E_OK; +} + +/* TODO: to be moved back here */ +extern void FreeFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev); + +static void FreeFmDev(t_LnxWrpFmDev *p_LnxWrpFmDev) +{ + if (!p_LnxWrpFmDev->active) + return; + + FreeFmPcdDev(p_LnxWrpFmDev); + + if (p_LnxWrpFmDev->h_RtcDev) + FM_RTC_Free(p_LnxWrpFmDev->h_RtcDev); + + if (p_LnxWrpFmDev->h_Dev) + FM_Free(p_LnxWrpFmDev->h_Dev); + + if (p_LnxWrpFmDev->h_MuramDev) + FM_MURAM_Free(p_LnxWrpFmDev->h_MuramDev); + + if (p_LnxWrpFmDev->fmRtcBaseAddr) + { + SYS_UnregisterIoMap(p_LnxWrpFmDev->fmRtcBaseAddr); + devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmRtcBaseAddr)); + __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmRtcPhysBaseAddr, p_LnxWrpFmDev->fmRtcMemSize); + } + SYS_UnregisterIoMap(p_LnxWrpFmDev->fmMuramBaseAddr); + devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmMuramBaseAddr)); + __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, p_LnxWrpFmDev->fmMuramPhysBaseAddr, p_LnxWrpFmDev->fmMuramMemSize); + SYS_UnregisterIoMap(p_LnxWrpFmDev->fmBaseAddr); + devm_iounmap(p_LnxWrpFmDev->dev, UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr)); + devm_release_mem_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->fmPhysBaseAddr, p_LnxWrpFmDev->fmMemSize); + if (p_LnxWrpFmDev->err_irq != 0) { + devm_free_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->err_irq, p_LnxWrpFmDev); + } + + devm_free_irq(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->irq, p_LnxWrpFmDev); +} + +/* FMan character device file operations */ +extern struct file_operations fm_fops; + +static int fm_probe(struct platform_device *of_dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev; + + if ((p_LnxWrpFmDev = ReadFmDevTreeNode(of_dev)) == NULL) + return -EIO; + if (ConfigureFmDev(p_LnxWrpFmDev) != E_OK) + return -EIO; + if (InitFmDev(p_LnxWrpFmDev) != E_OK) + return -EIO; + + Sprint (p_LnxWrpFmDev->name, "%s%d", DEV_FM_NAME, p_LnxWrpFmDev->id); + + /* Register to the /dev for IOCTL API */ + /* Register dynamically a new major number for the character device: */ + if ((p_LnxWrpFmDev->major = register_chrdev(0, p_LnxWrpFmDev->name, &fm_fops)) <= 0) { + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Failed to allocate a major number for device \"%s\"", p_LnxWrpFmDev->name)); + return -EIO; + } + + /* Creating classes for FM */ + DBG(TRACE ,("class_create fm_class")); + p_LnxWrpFmDev->fm_class = class_create(THIS_MODULE, p_LnxWrpFmDev->name); + if (IS_ERR(p_LnxWrpFmDev->fm_class)) { + unregister_chrdev(p_LnxWrpFmDev->major, p_LnxWrpFmDev->name); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("class_create error fm_class")); + return -EIO; + } + + device_create(p_LnxWrpFmDev->fm_class, NULL, MKDEV(p_LnxWrpFmDev->major, DEV_FM_MINOR_BASE), NULL, + "fm%d", p_LnxWrpFmDev->id); + device_create(p_LnxWrpFmDev->fm_class, NULL, MKDEV(p_LnxWrpFmDev->major, DEV_FM_PCD_MINOR_BASE), NULL, + "fm%d-pcd", p_LnxWrpFmDev->id); + dev_set_drvdata(p_LnxWrpFmDev->dev, p_LnxWrpFmDev); + + /* create sysfs entries for stats and regs */ + if ( fm_sysfs_create(p_LnxWrpFmDev->dev) !=0 ) + { + FreeFmDev(p_LnxWrpFmDev); + REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Unable to create sysfs entry - fm!!!")); + return -EIO; + } + + DBG(TRACE, ("FM%d probed", p_LnxWrpFmDev->id)); + +#if defined(CONFIG_FMAN_RESOURCE_ALLOCATION_ALGORITHM) + /* Precalculate resources for FMAN based on number of + * FMan ports available + */ + if(fm_set_active_fman_ports(of_dev, p_LnxWrpFmDev)!= 0) + return -EIO; + +#if defined(CONFIG_FMAN_P3040_P4080_P5020) + /* 128K MURAM for p3,p4 and p5 */ + if(fm_precalculate_fifosizes( + p_LnxWrpFmDev, + 128*KILOBYTE) + != 0) + return -EIO; +#else + /* for all other platforms: MURAM Space for fifosize=3/4 * MURAM_SIZE*/ + if(fm_precalculate_fifosizes( + p_LnxWrpFmDev, + CEIL_DIV((3*FM_MURAM_SIZE-1),4)) + != 0) + return -EIO; +#endif + if(fm_precalculate_open_dma( + p_LnxWrpFmDev, + BMI_MAX_NUM_OF_DMAS, /* max open dmas:dpaa_integration_ext.h */ + FM_DEFAULT_TX10G_OPENDMA, /* default TX 10g open dmas */ + FM_DEFAULT_RX10G_OPENDMA, /* default RX 10g open dmas */ + FM_10G_OPENDMA_MIN_TRESHOLD,/* TX 10g minimum treshold */ + FM_10G_OPENDMA_MIN_TRESHOLD)/* RX 10g minimum treshold */ + != 0) + return -EIO; + if(fm_precalculate_tnums( + p_LnxWrpFmDev, + BMI_MAX_NUM_OF_TASKS) /* max TNUMS: dpa integration file. */ + != 0) + return -EIO; +#endif + + return 0; +} + +static int fm_remove(struct platform_device *of_dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev; + struct device *dev; + + dev = &of_dev->dev; + p_LnxWrpFmDev = dev_get_drvdata(dev); + + fm_sysfs_destroy(dev); + + DBG(TRACE, ("destroy fm_class")); + device_destroy(p_LnxWrpFmDev->fm_class, MKDEV(p_LnxWrpFmDev->major, DEV_FM_MINOR_BASE)); + device_destroy(p_LnxWrpFmDev->fm_class, MKDEV(p_LnxWrpFmDev->major, DEV_FM_PCD_MINOR_BASE)); + class_destroy(p_LnxWrpFmDev->fm_class); + + /* Destroy chardev */ + unregister_chrdev(p_LnxWrpFmDev->major, p_LnxWrpFmDev->name); + + FreeFmDev(p_LnxWrpFmDev); + + DestroyFmDev(p_LnxWrpFmDev); + + dev_set_drvdata(dev, NULL); + + return 0; +} + +static const struct of_device_id fm_match[] = { + { + .compatible = "fsl,fman" + }, + {} +}; +#ifndef MODULE +MODULE_DEVICE_TABLE(of, fm_match); +#endif /* !MODULE */ + +static struct platform_driver fm_driver = { + .driver = { + .name = "fsl-fman", + .of_match_table = fm_match, + .owner = THIS_MODULE, + }, + .probe = fm_probe, + .remove = fm_remove, +}; + +t_Handle LNXWRP_FM_Init(void) +{ + memset(&lnxWrpFm, 0, sizeof(lnxWrpFm)); + mutex_init(&lnxwrp_mutex); + + /* Register to the DTB for basic FM API */ + platform_driver_register(&fm_driver); + + return &lnxWrpFm; +} + +t_Error LNXWRP_FM_Free(t_Handle h_LnxWrpFm) +{ + platform_driver_unregister(&fm_driver); + mutex_destroy(&lnxwrp_mutex); + + return E_OK; +} + + +struct fm * fm_bind(struct device *fm_dev) +{ + return (struct fm *)(dev_get_drvdata(get_device(fm_dev))); +} +EXPORT_SYMBOL(fm_bind); + +void fm_unbind(struct fm *fm) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm; + + put_device(p_LnxWrpFmDev->dev); +} +EXPORT_SYMBOL(fm_unbind); + +struct resource * fm_get_mem_region(struct fm *fm) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm; + + return p_LnxWrpFmDev->res; +} +EXPORT_SYMBOL(fm_get_mem_region); + +void * fm_get_handle(struct fm *fm) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm; + + return (void *)p_LnxWrpFmDev->h_Dev; +} +EXPORT_SYMBOL(fm_get_handle); + +void * fm_get_rtc_handle(struct fm *fm) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev*)fm; + + return (void *)p_LnxWrpFmDev->h_RtcDev; +} +EXPORT_SYMBOL(fm_get_rtc_handle); + +struct fm_port * fm_port_bind (struct device *fm_port_dev) +{ + return (struct fm_port *)(dev_get_drvdata(get_device(fm_port_dev))); +} +EXPORT_SYMBOL(fm_port_bind); + +void fm_port_unbind(struct fm_port *port) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port; + + put_device(p_LnxWrpFmPortDev->dev); +} +EXPORT_SYMBOL(fm_port_unbind); + +void * fm_port_get_handle(struct fm_port *port) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port; + + return (void *)p_LnxWrpFmPortDev->h_Dev; +} +EXPORT_SYMBOL(fm_port_get_handle); + +void fm_port_get_base_addr(const struct fm_port *port, uint64_t *base_addr) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)port; + + *base_addr = p_LnxWrpFmPortDev->settings.param.baseAddr; +} +EXPORT_SYMBOL(fm_port_get_base_addr); + +void fm_port_pcd_bind (struct fm_port *port, struct fm_port_pcd_param *params) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port; + + p_LnxWrpFmPortDev->pcd_owner_params.cba = params->cba; + p_LnxWrpFmPortDev->pcd_owner_params.cbf = params->cbf; + p_LnxWrpFmPortDev->pcd_owner_params.dev = params->dev; +} +EXPORT_SYMBOL(fm_port_pcd_bind); + +int fm_get_tx_port_channel(struct fm_port *port) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port; + + return p_LnxWrpFmPortDev->txCh; +} +EXPORT_SYMBOL(fm_get_tx_port_channel); + +int fm_port_enable (struct fm_port *port) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port; + + FM_PORT_Enable(p_LnxWrpFmPortDev->h_Dev); + + return 0; +} +EXPORT_SYMBOL(fm_port_enable); + +void fm_port_disable(struct fm_port *port) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)port; + + FM_PORT_Disable(p_LnxWrpFmPortDev->h_Dev); +} +EXPORT_SYMBOL(fm_port_disable); + +void fm_mutex_lock(void) +{ + mutex_lock(&lnxwrp_mutex); +} +EXPORT_SYMBOL(fm_mutex_lock); + +void fm_mutex_unlock(void) +{ + mutex_unlock(&lnxwrp_mutex); +} +EXPORT_SYMBOL(fm_mutex_unlock); + +static t_Handle h_FmLnxWrp; + +static int __init __cold fm_load (void) +{ + if ((h_FmLnxWrp = LNXWRP_FM_Init()) == NULL) + { + printk("Failed to init FM wrapper!\n"); + return -ENODEV; + } + + printk (KERN_INFO "Freescale FM module ("__DATE__ ":"__TIME__")\n"); + + return 0; +} + +static void __exit __cold fm_unload (void) +{ + if (h_FmLnxWrp) + LNXWRP_FM_Free(h_FmLnxWrp); +} + +module_init (fm_load); +module_exit (fm_unload); --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_fm.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_fm.h @@ -0,0 +1,257 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_fm.h + + @Author Shlomi Gridish + + @Description FM Linux wrapper functions. + +*/ + +#ifndef __LNXWRP_FM_H__ +#define __LNXWRP_FM_H__ + +#include /* struct qman_fq */ + +#include "std_ext.h" +#include "error_ext.h" +#include "list_ext.h" + +#include "lnxwrp_fm_ext.h" + +#define __ERR_MODULE__ MODULE_FM + +#define FM_MAX_NUM_OF_ADV_SETTINGS 10 + +#define LNXWRP_FM_NUM_OF_SHARED_PROFILES 16 + +#if defined(CONFIG_FMAN_DISABLE_OH_TO_REUSE_RESOURCES) +#define FM_10G_OPENDMA_MIN_TRESHOLD 8 /* 10g minimum treshold if only HC is enabled and no OH port enabled */ +#define FM_OPENDMA_RX_TX_RAPORT 2 /* RX = 2*TX */ +#else +#define FM_10G_OPENDMA_MIN_TRESHOLD 7 /* 10g minimum treshold if 7 OH ports are enabled */ +#define FM_OPENDMA_RX_TX_RAPORT 1 /* RX = TX */ +#endif +#define FM_DEFAULT_TX10G_OPENDMA 8 /* default TX 10g open dmas */ +#define FM_DEFAULT_RX10G_OPENDMA 8 /* default RX 10g open dmas */ + +typedef enum { + e_NO_PCD = 0, + e_FM_PCD_3_TUPLE +} e_LnxWrpFmPortPcdDefUseCase; + + +typedef struct t_FmTestFq { + struct qman_fq fq_base; + t_Handle h_Arg; +} t_FmTestFq; + +typedef struct { + uint8_t id; /* sw port id, see SW_PORT_ID_TO_HW_PORT_ID() in fm_common.h */ + int minor; + char name[20]; + bool active; + uint64_t phys_baseAddr; + uint64_t baseAddr; /* Port's *virtual* address */ + uint32_t memSize; + t_WrpFmPortDevSettings settings; + uint8_t totalNumOfSchemes; + uint8_t schemesBase; + uint8_t numOfSchemesUsed; + uint32_t pcdBaseQ; + uint16_t pcdNumOfQs; + struct fm_port_pcd_param pcd_owner_params; + e_LnxWrpFmPortPcdDefUseCase defPcd; + t_Handle h_DefNetEnv; + t_Handle h_Schemes[FM_PCD_KG_NUM_OF_SCHEMES]; + t_FmPortBufferPrefixContent buffPrefixContent; + t_Handle h_Dev; + t_Handle h_LnxWrpFmDev; + uint16_t txCh; + struct device *dev; + struct device_attribute *dev_attr_stats; + struct device_attribute *dev_attr_regs; +} t_LnxWrpFmPortDev; + +typedef struct { + uint8_t id; + bool active; + uint64_t baseAddr; + uint32_t memSize; + t_WrpFmMacDevSettings settings; + t_Handle h_Dev; + t_Handle h_LnxWrpFmDev; +} t_LnxWrpFmMacDev; + +/* information about all active ports for an FMan. + * !Some ports may be disabled by u-boot, thus will not be available */ +struct fm_active_ports { + uint32_t num_oh_ports; + uint32_t num_tx_ports; + uint32_t num_rx_ports; + uint32_t num_tx25_ports; + uint32_t num_rx25_ports; + uint32_t num_tx10_ports; + uint32_t num_rx10_ports; +}; + +/* FMan resources precalculated at fm probe based + * on available FMan port. */ +struct fm_resource_settings { + /* buffers - fifo sizes */ + uint32_t tx1g_num_buffers; + uint32_t rx1g_num_buffers; + uint32_t tx2g5_num_buffers; /* Not supported yet by LLD */ + uint32_t rx2g5_num_buffers; /* Not supported yet by LLD */ + uint32_t tx10g_num_buffers; + uint32_t rx10g_num_buffers; + uint32_t oh_num_buffers; + uint32_t shared_ext_buffers; + + /* open DMAs */ + uint32_t tx_1g_dmas; + uint32_t rx_1g_dmas; + uint32_t tx_2g5_dmas; /* Not supported yet by LLD */ + uint32_t rx_2g5_dmas; /* Not supported yet by LLD */ + uint32_t tx_10g_dmas; + uint32_t rx_10g_dmas; + uint32_t oh_dmas; + uint32_t shared_ext_open_dma; + + /* Tnums */ + uint32_t tx_1g_tnums; + uint32_t rx_1g_tnums; + uint32_t tx_2g5_tnums; /* Not supported yet by LLD */ + uint32_t rx_2g5_tnums; /* Not supported yet by LLD */ + uint32_t tx_10g_tnums; + uint32_t rx_10g_tnums; + uint32_t oh_tnums; + uint32_t shared_ext_tnums; +}; + +typedef struct { + uint8_t id; + char name[10]; + bool active; + bool pcdActive; + bool prsActive; + bool kgActive; + bool ccActive; + bool plcrActive; + e_LnxWrpFmPortPcdDefUseCase defPcd; + uint32_t usedSchemes; + uint8_t totalNumOfSharedSchemes; + uint8_t sharedSchemesBase; + uint8_t numOfSchemesUsed; + uint8_t defNetEnvId; + uint64_t fmPhysBaseAddr; + uint64_t fmBaseAddr; + uint32_t fmMemSize; + uint64_t fmMuramPhysBaseAddr; + uint64_t fmMuramBaseAddr; + uint32_t fmMuramMemSize; + uint64_t fmRtcPhysBaseAddr; + uint64_t fmRtcBaseAddr; + uint32_t fmRtcMemSize; + int irq; + int err_irq; + t_WrpFmDevSettings fmDevSettings; + t_WrpFmPcdDevSettings fmPcdDevSettings; + t_Handle h_Dev; + uint16_t hcCh; + + t_Handle h_MuramDev; + t_Handle h_PcdDev; + t_Handle h_RtcDev; + + t_LnxWrpFmPortDev hcPort; + t_LnxWrpFmPortDev opPorts[FM_MAX_NUM_OF_OH_PORTS-1]; + t_LnxWrpFmPortDev rxPorts[FM_MAX_NUM_OF_RX_PORTS]; + t_LnxWrpFmPortDev txPorts[FM_MAX_NUM_OF_TX_PORTS]; + t_LnxWrpFmMacDev macs[FM_MAX_NUM_OF_MACS]; + struct fm_active_ports fm_active_ports_info; + struct fm_resource_settings fm_resource_settings_info; + + struct device *dev; + struct resource *res; + int major; + struct class *fm_class; + struct device_attribute *dev_attr_stats; + struct device_attribute *dev_attr_regs; + + struct device_attribute *dev_pcd_attr_stats; + struct device_attribute *dev_pcd_attr_regs; + + struct qman_fq *hc_tx_conf_fq, *hc_tx_err_fq, *hc_tx_fq; +} t_LnxWrpFmDev; + +typedef struct { + t_LnxWrpFmDev *p_FmDevs[INTG_MAX_NUM_OF_FM]; +} t_LnxWrpFm; +#define LNXWRP_FM_OBJECT(ptr) LIST_OBJECT(ptr, t_LnxWrpFm, fms[((t_LnxWrpFmDev *)ptr)->id]) + + +t_Error LnxwrpFmIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat); +t_Error LnxwrpFmPortIOCTL(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev, unsigned int cmd, unsigned long arg, bool compat); + + +static __inline__ t_Error AllocSchemesForPort(t_LnxWrpFmDev *p_LnxWrpFmDev, uint8_t numSchemes, uint8_t *p_BaseSchemeNum) +{ + uint32_t schemeMask; + uint8_t i; + + if (!numSchemes) + RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG); + + schemeMask = 0x80000000; + *p_BaseSchemeNum = 0xff; + + for (i=0; schemeMask && numSchemes; schemeMask>>=1, i++) + if ((p_LnxWrpFmDev->usedSchemes & schemeMask) == 0) + { + p_LnxWrpFmDev->usedSchemes |= schemeMask; + numSchemes--; + if (*p_BaseSchemeNum==0xff) + *p_BaseSchemeNum = i; + } + else if (*p_BaseSchemeNum!=0xff) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("Fragmentation on schemes array!!!")); + + if (numSchemes) + RETURN_ERROR(MINOR, E_FULL, ("schemes!!!")); + return E_OK; +} + + +#endif /* __LNXWRP_FM_H__ */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_fm_port.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_fm_port.c @@ -0,0 +1,1183 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_fm_port.c + + @Description FMD wrapper - FMan port functions. + +*/ + +#include +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ +#include +#include +#include +#include +#include +#include +#include + +#include "sprint_ext.h" +#include "fm_port_ext.h" +#include "fm_ioctls.h" +#include "lnxwrp_resources.h" +#include "lnxwrp_sysfs_fm_port.h" + +/* TODO: duplicated, see lnxwrp_fm.c */ +#define ADD_ADV_CONFIG_NO_RET(_func, _param)\ +do {\ + if (i < max) {\ + p_Entry = &p_Entrys[i];\ + p_Entry->p_Function = _func;\ + _param\ + i++;\ + } else {\ + REPORT_ERROR(MAJOR, E_INVALID_VALUE,\ + ("Number of advanced-configuration entries exceeded"));\ + } \ +} while (0) + + +static volatile int hcFrmRcv/* = 0 */; +static spinlock_t lock; + +static enum qman_cb_dqrr_result qm_tx_conf_dqrr_cb(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry + *dq) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = ((t_FmTestFq *) fq)->h_Arg; + unsigned long flags; + + FM_PCD_HcTxConf(p_LnxWrpFmDev->h_PcdDev, (t_DpaaFD *)&dq->fd); + spin_lock_irqsave(&lock, flags); + hcFrmRcv--; + spin_unlock_irqrestore(&lock, flags); + + return qman_cb_dqrr_consume; +} + +static enum qman_cb_dqrr_result qm_tx_dqrr_cb(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + return qman_cb_dqrr_consume; +} + +static void qm_err_cb(struct qman_portal *portal, + struct qman_fq *fq, const struct qm_mr_entry *msg) +{ + WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); +} + +static struct qman_fq *FqAlloc(t_LnxWrpFmDev * p_LnxWrpFmDev, + uint32_t fqid, + uint32_t flags, uint16_t channel, uint8_t wq) +{ + int _errno; + struct qman_fq *fq = NULL; + t_FmTestFq *p_FmtFq; + struct qm_mcc_initfq initfq; + + p_FmtFq = (t_FmTestFq *) XX_Malloc(sizeof(t_FmTestFq)); + if (!p_FmtFq) { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!")); + return NULL; + } + + p_FmtFq->fq_base.cb.dqrr = + (QMAN_FQ_FLAG_NO_ENQUEUE ? qm_tx_conf_dqrr_cb : + qm_tx_dqrr_cb); + p_FmtFq->fq_base.cb.ern = qm_err_cb; + p_FmtFq->fq_base.cb.dc_ern = qm_err_cb; + /* p_FmtFq->fq_base.cb.fqs = qm_err_cb; */ + /* qm_err_cb wrongly called when the FQ is parked */ + p_FmtFq->fq_base.cb.fqs = NULL; + p_FmtFq->h_Arg = (t_Handle) p_LnxWrpFmDev; + if (fqid == 0) { + flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; + flags &= ~QMAN_FQ_FLAG_NO_MODIFY; + } else { + flags &= ~QMAN_FQ_FLAG_DYNAMIC_FQID; + } + + if (qman_create_fq(fqid, flags, &p_FmtFq->fq_base)) { + REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj - qman_new_fq!!!")); + XX_Free(p_FmtFq); + return NULL; + } + fq = &p_FmtFq->fq_base; + + if (!(flags & QMAN_FQ_FLAG_NO_MODIFY)) { + initfq.we_mask = QM_INITFQ_WE_DESTWQ; + initfq.fqd.dest.channel = channel; + initfq.fqd.dest.wq = wq; + + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_NO_MEMORY, + ("FQ obj - qman_init_fq!!!")); + qman_destroy_fq(fq, 0); + XX_Free(p_FmtFq); + return NULL; + } + } + + DBG(TRACE, + ("fqid %d, flags 0x%08x, channel %d, wq %d", qman_fq_fqid(fq), + flags, channel, wq)); + + return fq; +} + +static void FqFree(struct qman_fq *fq) +{ + int _errno; + + _errno = qman_retire_fq(fq, NULL); + if (unlikely(_errno < 0)) + printk(KERN_WARNING "qman_retire_fq(%u) = %d\n", qman_fq_fqid(fq), _errno); + + _errno = qman_oos_fq(fq); + if (unlikely(_errno < 0)) + printk(KERN_WARNING "qman_oos_fq(%u) = %d\n", qman_fq_fqid(fq), _errno); + + qman_destroy_fq(fq, 0); + XX_Free((t_FmTestFq *) fq); +} + +static t_Error QmEnqueueCB(t_Handle h_Arg, void *p_Fd) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_Arg; + int _errno, timeout = 1000000; + unsigned long flags; + + ASSERT_COND(p_LnxWrpFmDev); + + spin_lock_irqsave(&lock, flags); + hcFrmRcv++; + spin_unlock_irqrestore(&lock, flags); + + _errno = qman_enqueue(p_LnxWrpFmDev->hc_tx_fq, (struct qm_fd *) p_Fd, + 0); + if (_errno) + RETURN_ERROR(MINOR, E_INVALID_STATE, + ("qman_enqueue() failed")); + + while (hcFrmRcv && --timeout) { + udelay(1); + cpu_relax(); + } + if (timeout == 0) { + dump_stack(); + RETURN_ERROR(MINOR, E_WRITE_FAILED, + ("timeout waiting for Tx confirmation")); + return E_WRITE_FAILED; + } + + return E_OK; +} + +static t_LnxWrpFmPortDev *ReadFmPortDevTreeNode(struct platform_device + *of_dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev; + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; + struct device_node *fm_node, *port_node; + struct resource res; + const uint32_t *uint32_prop; + int _errno = 0, lenp; +#ifdef CONFIG_FMAN_P1023 + static unsigned char have_oh_port/* = 0 */; +#endif + + port_node = of_node_get(of_dev->dev.of_node); + + /* Get the FM node */ + fm_node = of_get_parent(port_node); + if (unlikely(fm_node == NULL)) { + REPORT_ERROR(MAJOR, E_NO_DEVICE, + ("of_get_parent() = %d", _errno)); + return NULL; + } + + p_LnxWrpFmDev = + dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + of_node_put(fm_node); + + /* if fm_probe() failed, no point in going further with port probing */ + if (p_LnxWrpFmDev == NULL) + return NULL; + + uint32_prop = + (uint32_t *) of_get_property(port_node, "cell-index", &lenp); + if (unlikely(uint32_prop == NULL)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_get_property(%s, cell-index) failed", + port_node->full_name)); + return NULL; + } + if (WARN_ON(lenp != sizeof(uint32_t))) + return NULL; + if (of_device_is_compatible(port_node, "fsl,fman-port-oh")) { + if (unlikely(*uint32_prop >= FM_MAX_NUM_OF_OH_PORTS)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_get_property(%s, cell-index) failed", + port_node->full_name)); + return NULL; + } + +#ifdef CONFIG_FMAN_P1023 + /* Beware, this can be done when there is only + one FMan to be initialized */ + if (!have_oh_port) { + have_oh_port = 1; /* first OP/HC port + is used for host command */ +#else + /* Here it is hardcoded the use of the OH port 1 + (with cell-index 0) */ + if (*uint32_prop == 0) { +#endif + p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort; + p_LnxWrpFmPortDev->id = 0; + /* + p_LnxWrpFmPortDev->id = *uint32_prop-1; + p_LnxWrpFmPortDev->id = *uint32_prop; + */ + p_LnxWrpFmPortDev->settings.param.portType = + e_FM_PORT_TYPE_OH_HOST_COMMAND; + } else { + p_LnxWrpFmPortDev = + &p_LnxWrpFmDev->opPorts[*uint32_prop - 1]; + p_LnxWrpFmPortDev->id = *uint32_prop - 1; + p_LnxWrpFmPortDev->settings.param.portType = + e_FM_PORT_TYPE_OH_OFFLINE_PARSING; + } + p_LnxWrpFmPortDev->settings.param.portId = *uint32_prop; + + uint32_prop = + (uint32_t *) of_get_property(port_node, + "fsl,qman-channel-id", + &lenp); + if (uint32_prop == NULL) { + /* + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("missing fsl,qman-channel-id")); + */ + XX_Print("FM warning: missing fsl,qman-channel-id" + " for OH port.\n"); + return NULL; + } + if (WARN_ON(lenp != sizeof(uint32_t))) + return NULL; + p_LnxWrpFmPortDev->txCh = *uint32_prop; + + p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams. + qmChannel = p_LnxWrpFmPortDev->txCh; + } else if (of_device_is_compatible(port_node, "fsl,fman-port-1g-tx") || + of_device_is_compatible(port_node, "fsl,fman-port-10g-tx")) { + if (unlikely(*uint32_prop >= FM_MAX_NUM_OF_TX_PORTS)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_get_property(%s, cell-index) failed", + port_node->full_name)); + return NULL; + } + if (of_device_is_compatible + (port_node, "fsl,fman-port-10g-tx")) + p_LnxWrpFmPortDev = + &p_LnxWrpFmDev->txPorts[*uint32_prop + + FM_MAX_NUM_OF_1G_TX_PORTS]; + else + p_LnxWrpFmPortDev = + &p_LnxWrpFmDev->txPorts[*uint32_prop]; + + p_LnxWrpFmPortDev->id = *uint32_prop; + p_LnxWrpFmPortDev->settings.param.portId = + p_LnxWrpFmPortDev->id; + if (of_device_is_compatible + (port_node, "fsl,fman-port-10g-tx")) + p_LnxWrpFmPortDev->settings.param.portType = + e_FM_PORT_TYPE_TX_10G; + else + p_LnxWrpFmPortDev->settings.param.portType = + e_FM_PORT_TYPE_TX; + + uint32_prop = + (uint32_t *) of_get_property(port_node, + "fsl,qman-channel-id", + &lenp); + if (uint32_prop == NULL) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("missing fsl,qman-channel-id")); + return NULL; + } + if (WARN_ON(lenp != sizeof(uint32_t))) + return NULL; + p_LnxWrpFmPortDev->txCh = *uint32_prop; + p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams. + qmChannel = p_LnxWrpFmPortDev->txCh; + } else if (of_device_is_compatible(port_node, "fsl,fman-port-1g-rx") || + of_device_is_compatible(port_node, "fsl,fman-port-10g-rx")) { + if (unlikely(*uint32_prop >= FM_MAX_NUM_OF_RX_PORTS)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_get_property(%s, cell-index) failed", + port_node->full_name)); + return NULL; + } + if (of_device_is_compatible + (port_node, "fsl,fman-port-10g-rx")) + p_LnxWrpFmPortDev = + &p_LnxWrpFmDev->rxPorts[*uint32_prop + + FM_MAX_NUM_OF_1G_RX_PORTS]; + else + p_LnxWrpFmPortDev = + &p_LnxWrpFmDev->rxPorts[*uint32_prop]; + + p_LnxWrpFmPortDev->id = *uint32_prop; + p_LnxWrpFmPortDev->settings.param.portId = + p_LnxWrpFmPortDev->id; + if (of_device_is_compatible + (port_node, "fsl,fman-port-10g-rx")) + p_LnxWrpFmPortDev->settings.param.portType = + e_FM_PORT_TYPE_RX_10G; + else + p_LnxWrpFmPortDev->settings.param.portType = + e_FM_PORT_TYPE_RX; + + if (p_LnxWrpFmDev->pcdActive) + p_LnxWrpFmPortDev->defPcd = p_LnxWrpFmDev->defPcd; + } else { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal port type")); + return NULL; + } + + _errno = of_address_to_resource(port_node, 0, &res); + if (unlikely(_errno < 0)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, + ("of_address_to_resource() = %d", _errno)); + return NULL; + } + + p_LnxWrpFmPortDev->dev = &of_dev->dev; + p_LnxWrpFmPortDev->baseAddr = 0; + p_LnxWrpFmPortDev->phys_baseAddr = res.start; + p_LnxWrpFmPortDev->memSize = res.end + 1 - res.start; + p_LnxWrpFmPortDev->settings.param.h_Fm = p_LnxWrpFmDev->h_Dev; + p_LnxWrpFmPortDev->h_LnxWrpFmDev = (t_Handle) p_LnxWrpFmDev; + + of_node_put(port_node); + + p_LnxWrpFmPortDev->active = TRUE; + +#if defined(CONFIG_FMAN_DISABLE_OH_TO_REUSE_RESOURCES) + /* for performance mode no OH port available. */ + if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_OH_OFFLINE_PARSING) + p_LnxWrpFmPortDev->active = FALSE; +#endif + + return p_LnxWrpFmPortDev; +} + +static t_Error ConfigureFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = + (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + struct resource *dev_res; + + if (!p_LnxWrpFmPortDev->active) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("FM port not configured!!!")); + + dev_res = + __devm_request_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, + p_LnxWrpFmPortDev->phys_baseAddr, + p_LnxWrpFmPortDev->memSize, + "fman-port-hc"); + if (unlikely(dev_res == NULL)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("__devm_request_region() failed")); + p_LnxWrpFmPortDev->baseAddr = + PTR_TO_UINT(devm_ioremap + (p_LnxWrpFmDev->dev, + p_LnxWrpFmPortDev->phys_baseAddr, + p_LnxWrpFmPortDev->memSize)); + if (unlikely(p_LnxWrpFmPortDev->baseAddr == 0)) + REPORT_ERROR(MAJOR, E_INVALID_STATE, + ("devm_ioremap() failed")); + + p_LnxWrpFmPortDev->settings.param.baseAddr = + p_LnxWrpFmPortDev->baseAddr; + + return E_OK; +} + +static t_Error InitFmPort3TupleDefPcd(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = + (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + t_FmPcdNetEnvParams *p_netEnvParam = NULL; + t_FmPcdKgSchemeParams *p_schemeParam = NULL; + t_FmPortPcdParams pcdParam; + t_FmPortPcdPrsParams prsParam; + t_FmPortPcdKgParams kgParam; + uint8_t i, j; + + if (!p_LnxWrpFmDev->kgActive) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("keygen must be enabled for 3-tuple PCD!")); + + if (!p_LnxWrpFmDev->prsActive) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("parser must be enabled for 3-tuple PCD!")); + + if (p_LnxWrpFmPortDev->pcdNumOfQs < 9) + RETURN_ERROR(MINOR, E_INVALID_VALUE, + ("Need to save at least 18 queues for" + "3-tuple PCD!!!")); + + p_LnxWrpFmPortDev->totalNumOfSchemes = + p_LnxWrpFmPortDev->numOfSchemesUsed = 2; + + if (AllocSchemesForPort + (p_LnxWrpFmDev, p_LnxWrpFmPortDev->totalNumOfSchemes, + &p_LnxWrpFmPortDev->schemesBase) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, + ("No schemes for Rx or OP port for" + " 3-tuple PCD!!!")); + + p_netEnvParam = kzalloc(sizeof(*p_netEnvParam), GFP_KERNEL); + if (!p_netEnvParam) { + RETURN_ERROR(MAJOR, E_NO_MEMORY, + ("Failed to allocate p_netEnvParam")); + } + /* set netEnv */ + p_netEnvParam->numOfDistinctionUnits = 2; + p_netEnvParam->units[0].hdrs[0].hdr = + HEADER_TYPE_IPv4; /* no special options */ + p_netEnvParam->units[1].hdrs[0].hdr = HEADER_TYPE_ETH; + p_LnxWrpFmPortDev->h_DefNetEnv = + FM_PCD_SetNetEnvCharacteristics(p_LnxWrpFmDev->h_PcdDev, + p_netEnvParam); + kfree(p_netEnvParam); + if (!p_LnxWrpFmPortDev->h_DefNetEnv) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM PCD!")); + + p_schemeParam = kmalloc(sizeof(*p_schemeParam), GFP_KERNEL); + if (!p_schemeParam) { + RETURN_ERROR(MAJOR, E_NO_MEMORY, + ("Failed to allocate p_schemeParam")); + } + for (i = 0; i < p_LnxWrpFmPortDev->numOfSchemesUsed; i++) { + memset(p_schemeParam, 0, sizeof(*p_schemeParam)); + p_schemeParam->modify = FALSE; + p_schemeParam->id.relativeSchemeId = + i + p_LnxWrpFmPortDev->schemesBase; + p_schemeParam->alwaysDirect = FALSE; + p_schemeParam->netEnvParams.h_NetEnv = + p_LnxWrpFmPortDev->h_DefNetEnv; + p_schemeParam->schemeCounter.update = TRUE; + p_schemeParam->schemeCounter.value = 0; + + switch (i) { + case (0): /* catch IPv4 */ + p_schemeParam->netEnvParams.numOfDistinctionUnits = 1; + p_schemeParam->netEnvParams.unitIds[0] = 0; + p_schemeParam->baseFqid = p_LnxWrpFmPortDev->pcdBaseQ; + p_schemeParam->nextEngine = e_FM_PCD_DONE; + p_schemeParam->numOfUsedExtractedOrs = 0; + p_schemeParam->useHash = TRUE; + p_schemeParam->keyExtractAndHashParams. + numOfUsedExtracts = 3; + for (j = 0; + j < + p_schemeParam->keyExtractAndHashParams. + numOfUsedExtracts; j++) { + p_schemeParam->keyExtractAndHashParams. + extractArray[j].type = + e_FM_PCD_EXTRACT_BY_HDR; + p_schemeParam->keyExtractAndHashParams. + extractArray[j].extractByHdr.hdr = + HEADER_TYPE_IPv4; + p_schemeParam->keyExtractAndHashParams. + extractArray[j].extractByHdr. + ignoreProtocolValidation = FALSE; + p_schemeParam->keyExtractAndHashParams. + extractArray[j].extractByHdr.type = + e_FM_PCD_EXTRACT_FULL_FIELD; + } + p_schemeParam->keyExtractAndHashParams. + extractArray[0].extractByHdr.extractByHdrType. + fullField.ipv4 = NET_HEADER_FIELD_IPv4_PROTO; + p_schemeParam->keyExtractAndHashParams. + extractArray[1].extractByHdr.extractByHdrType. + fullField.ipv4 = NET_HEADER_FIELD_IPv4_SRC_IP; + p_schemeParam->keyExtractAndHashParams. + extractArray[2].extractByHdr.extractByHdrType. + fullField.ipv4 = NET_HEADER_FIELD_IPv4_DST_IP; + + if (p_schemeParam->useHash) { + p_schemeParam->keyExtractAndHashParams. + privateDflt0 = 0x01020304; + p_schemeParam->keyExtractAndHashParams. + privateDflt1 = 0x11121314; + p_schemeParam->keyExtractAndHashParams. + numOfUsedDflts = + FM_PCD_KG_NUM_OF_DEFAULT_GROUPS; + for (j = 0; + j < FM_PCD_KG_NUM_OF_DEFAULT_GROUPS; + j++) { + /* all types */ + p_schemeParam->keyExtractAndHashParams.dflts[j].type = + (e_FmPcdKgKnownFieldsDfltTypes) j; + p_schemeParam-> + keyExtractAndHashParams. + dflts[j].dfltSelect = + e_FM_PCD_KG_DFLT_GBL_0; + } + p_schemeParam->keyExtractAndHashParams. + numOfUsedMasks = 0; + p_schemeParam->keyExtractAndHashParams. + hashShift = 0; + p_schemeParam->keyExtractAndHashParams. + hashDistributionNumOfFqids = 8; + } + break; + + case (1): /* Garbage collector */ + p_schemeParam->netEnvParams.numOfDistinctionUnits = 0; + p_schemeParam->baseFqid = + p_LnxWrpFmPortDev->pcdBaseQ + 8; + break; + + default: + break; + } + + p_LnxWrpFmPortDev->h_Schemes[i] = + FM_PCD_KgSetScheme(p_LnxWrpFmDev->h_PcdDev, + p_schemeParam); + if (!p_LnxWrpFmPortDev->h_Schemes[i]) { + kfree(p_schemeParam); + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, + ("FM_PCD_KgSetScheme failed")); + } + } + kfree(p_schemeParam); + + /* initialize PCD parameters */ + memset(&pcdParam, 0, sizeof(t_FmPortPcdParams)); + pcdParam.h_NetEnv = p_LnxWrpFmPortDev->h_DefNetEnv; + pcdParam.pcdSupport = e_FM_PORT_PCD_SUPPORT_PRS_AND_KG; + + /* initialize Keygen parameters */ + memset(&prsParam, 0, sizeof(t_FmPortPcdPrsParams)); + + prsParam.parsingOffset = 0; + prsParam.firstPrsHdr = HEADER_TYPE_ETH; + pcdParam.p_PrsParams = &prsParam; + + /* initialize Parser parameters */ + memset(&kgParam, 0, sizeof(t_FmPortPcdKgParams)); + kgParam.numOfSchemes = p_LnxWrpFmPortDev->numOfSchemesUsed; + for (i = 0; i < kgParam.numOfSchemes; i++) + kgParam.h_Schemes[i] = p_LnxWrpFmPortDev->h_Schemes[i]; + + pcdParam.p_KgParams = &kgParam; + + return FM_PORT_SetPCD(p_LnxWrpFmPortDev->h_Dev, &pcdParam); +} + +static t_Error InitFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev) +{ +#define MY_ADV_CONFIG_CHECK_END \ + RETURN_ERROR(MAJOR, E_INVALID_SELECTION,\ + ("Advanced configuration routine"));\ + if (errCode != E_OK)\ + RETURN_ERROR(MAJOR, errCode, NO_MSG);\ + } + + int i = 0; + + if (!p_LnxWrpFmPortDev->active || p_LnxWrpFmPortDev->h_Dev) + return E_INVALID_STATE; + + p_LnxWrpFmPortDev->h_Dev = + FM_PORT_Config(&p_LnxWrpFmPortDev->settings.param); + if (p_LnxWrpFmPortDev->h_Dev == NULL) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM-port")); + + if ((p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_TX_10G) + || (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_TX)) { + t_Error errCode = E_OK; + errCode = + FM_PORT_ConfigDeqHighPriority(p_LnxWrpFmPortDev->h_Dev, + TRUE); + if (errCode != E_OK) + RETURN_ERROR(MAJOR, errCode, NO_MSG); +#ifdef FM_QMI_DEQ_OPTIONS_SUPPORT + errCode = + FM_PORT_ConfigDeqPrefetchOption(p_LnxWrpFmPortDev->h_Dev, + e_FM_PORT_DEQ_FULL_PREFETCH); + if (errCode + != E_OK) + RETURN_ERROR(MAJOR, errCode, NO_MSG); +#endif /* FM_QMI_DEQ_OPTIONS_SUPPORT */ + } + +/* Call the driver's advanced configuration routines, if requested: + Compare the function pointer of each entry to the available routines, + and invoke the matching routine with proper casting of arguments. */ + while (p_LnxWrpFmPortDev->settings.advConfig[i].p_Function + && (i < FM_MAX_NUM_OF_ADV_SETTINGS)) { + ADV_CONFIG_CHECK_START(& + (p_LnxWrpFmPortDev->settings. + advConfig[i])) + + ADV_CONFIG_CHECK(p_LnxWrpFmPortDev->h_Dev, + FM_PORT_ConfigBufferPrefixContent, + PARAMS(1, + (t_FmPortBufferPrefixContent + *))) + + MY_ADV_CONFIG_CHECK_END + /* Advance to next advanced configuration entry */ + i++; + } + + if (FM_PORT_Init(p_LnxWrpFmPortDev->h_Dev) != E_OK) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); +#if defined(CONFIG_FMAN_RESOURCE_ALLOCATION_ALGORITHM) + /* even if these functions return w/ error, do not crash kernel. + Do not return anything because the container function is not + linux complient (it should return -EIO). */ + fm_set_precalculate_fifosize(p_LnxWrpFmPortDev); + fm_set_precalculate_open_dma(p_LnxWrpFmPortDev); + fm_set_precalculate_tnums(p_LnxWrpFmPortDev); +#endif + +/* FMan Fifo sizes behind the scene": + * Using the following formulae (*), under a set of simplifying assumptions (.): + * . all ports are configured in Normal Mode (rather than Independent Mode) + * . the DPAA Eth driver allocates buffers of size: + * . MAXFRM + NET_IP_ALIGN + DPA_PRIV_DATA_SIZE + DPA_PARSE_RESULTS_SIZE + * + DPA_HASH_RESULTS_SIZE, i.e.: + * MAXFRM + 2 + 16 + sizeof(t_FmPrsResult) + 16, i.e.: + * MAXFRM + 66 + * . excessive buffer pools not accounted for + * + * * for Rx ports on P4080: + * . IFSZ = ceil(max(FMBM_EBMPI[PBS]) / 256) * 256 + 7 * 256 + * . no internal frame offset (FMBM_RIM[FOF] == 0) - otherwise, + * add up to 256 to the above + * + * * for Rx ports on P1023: + * . IFSZ = ceil(second_largest(FMBM_EBMPI[PBS] / 256)) * 256 + 7 * 256, + * if at least 2 bpools are configured + * . IFSZ = 8 * 256, if only a single bpool is configured + * + * * for Tx ports: + * . IFSZ = ceil(frame_size / 256) * 256 + 3 * 256 + * + FMBM_TFP[DPDE] * 256, i.e.: + * IFSZ = ceil(MAXFRM / 256) * 256 + 3 x 256 + FMBM_TFP[DPDE] * 256 + * + * * for OH ports on P4080: + * . IFSZ = ceil(frame_size / 256) * 256 + 1 * 256 + FMBM_PP[MXT] * 256 + * * for OH ports on P1023: + * . IFSZ = ceil(frame_size / 256) * 256 + 3 * 256 + FMBM_TFP[DPDE] * 256 + * * for both P4080 and P1023: + * . (conservative decisions, assuming that BMI must bring the entire + * frame, not only the frame header) + * . no internal frame offset (FMBM_OIM[FOF] == 0) - otherwise, + * add up to 256 to the above + * + * . for P4080/P5020/P3041/P2040, DPDE is: + * > 0 or 1, for 1Gb ports, HW default: 0 + * > 2..7 (recommended: 3..7) for 10Gb ports, HW default: 3 + * . for P1023, DPDE should be 1 + * + * . for P1023, MXT is in range (0..31) + * . for P4080, MXT is in range (0..63) + * + */ + + if ((p_LnxWrpFmPortDev->defPcd != e_NO_PCD) && + (InitFmPort3TupleDefPcd(p_LnxWrpFmPortDev) != E_OK)) + RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); + + return E_OK; +} + +void fm_set_rx_port_params(struct fm_port *port, + struct fm_port_rx_params *params) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) port; + int i; + + p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.errFqid = + params->errq; + p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.dfltFqid = + params->defq; + p_LnxWrpFmPortDev->settings.param.specificParams.rxParams.extBufPools. + numOfPoolsUsed = params->num_pools; + for (i = 0; i < params->num_pools; i++) { + p_LnxWrpFmPortDev->settings.param.specificParams.rxParams. + extBufPools.extBufPool[i].id = + params->pool_param[i].id; + p_LnxWrpFmPortDev->settings.param.specificParams.rxParams. + extBufPools.extBufPool[i].size = + params->pool_param[i].size; + } + + p_LnxWrpFmPortDev->buffPrefixContent.privDataSize = + params->priv_data_size; + p_LnxWrpFmPortDev->buffPrefixContent.passPrsResult = + params->parse_results; + p_LnxWrpFmPortDev->buffPrefixContent.passHashResult = + params->hash_results; + p_LnxWrpFmPortDev->buffPrefixContent.passTimeStamp = + params->time_stamp; + + ADD_ADV_CONFIG_START(p_LnxWrpFmPortDev->settings.advConfig, + FM_MAX_NUM_OF_ADV_SETTINGS) + + ADD_ADV_CONFIG_NO_RET(FM_PORT_ConfigBufferPrefixContent, + ARGS(1, + (&p_LnxWrpFmPortDev-> + buffPrefixContent))); + + ADD_ADV_CONFIG_END InitFmPortDev(p_LnxWrpFmPortDev); +} +EXPORT_SYMBOL(fm_set_rx_port_params); + +void fm_set_tx_port_params(struct fm_port *port, + struct fm_port_non_rx_params *params) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) port; + + p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams.errFqid = + params->errq; + p_LnxWrpFmPortDev->settings.param.specificParams.nonRxParams. + dfltFqid = params->defq; + + p_LnxWrpFmPortDev->buffPrefixContent.privDataSize = + params->priv_data_size; + p_LnxWrpFmPortDev->buffPrefixContent.passPrsResult = + params->parse_results; + p_LnxWrpFmPortDev->buffPrefixContent.passHashResult = + params->hash_results; + p_LnxWrpFmPortDev->buffPrefixContent.passTimeStamp = + params->time_stamp; + + ADD_ADV_CONFIG_START(p_LnxWrpFmPortDev->settings.advConfig, + FM_MAX_NUM_OF_ADV_SETTINGS) + + ADD_ADV_CONFIG_NO_RET(FM_PORT_ConfigBufferPrefixContent, + ARGS(1, + (&p_LnxWrpFmPortDev-> + buffPrefixContent))); + + ADD_ADV_CONFIG_END InitFmPortDev(p_LnxWrpFmPortDev); +} +EXPORT_SYMBOL(fm_set_tx_port_params); + +static void LnxwrpFmPcdDevExceptionsCb(t_Handle h_App, + e_FmPcdExceptions exception) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_App; + + ASSERT_COND(p_LnxWrpFmDev); + + DBG(INFO, ("got fm-pcd exception %d", exception)); + + /* do nothing */ + UNUSED(exception); +} + +static void LnxwrpFmPcdDevIndexedExceptionsCb(t_Handle h_App, + e_FmPcdExceptions exception, + uint16_t index) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = (t_LnxWrpFmDev *) h_App; + + ASSERT_COND(p_LnxWrpFmDev); + + DBG(INFO, + ("got fm-pcd-indexed exception %d, indx %d", exception, index)); + + /* do nothing */ + UNUSED(exception); + UNUSED(index); +} + +static t_Error InitFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev) +{ + spin_lock_init(&lock); + + if (p_LnxWrpFmDev->pcdActive) { + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort; + t_FmPcdParams fmPcdParams; + t_Error err; + + memset(&fmPcdParams, 0, sizeof(fmPcdParams)); + fmPcdParams.h_Fm = p_LnxWrpFmDev->h_Dev; + fmPcdParams.prsSupport = p_LnxWrpFmDev->prsActive; + fmPcdParams.kgSupport = p_LnxWrpFmDev->kgActive; + fmPcdParams.plcrSupport = p_LnxWrpFmDev->plcrActive; + fmPcdParams.ccSupport = p_LnxWrpFmDev->ccActive; + fmPcdParams.numOfSchemes = FM_PCD_KG_NUM_OF_SCHEMES; + +#ifndef CONFIG_GUEST_PARTITION + fmPcdParams.f_Exception = LnxwrpFmPcdDevExceptionsCb; + if (fmPcdParams.kgSupport) + fmPcdParams.f_ExceptionId = + LnxwrpFmPcdDevIndexedExceptionsCb; + fmPcdParams.h_App = p_LnxWrpFmDev; +#endif /* !CONFIG_GUEST_PARTITION */ + +#ifdef CONFIG_MULTI_PARTITION_SUPPORT + fmPcdParams.numOfSchemes = 0; + fmPcdParams.numOfClsPlanEntries = 0; + fmPcdParams.partitionId = 0; +#endif /* CONFIG_MULTI_PARTITION_SUPPORT */ + fmPcdParams.useHostCommand = TRUE; + + p_LnxWrpFmDev->hc_tx_fq = + FqAlloc(p_LnxWrpFmDev, + 0, + QMAN_FQ_FLAG_TO_DCPORTAL, + p_LnxWrpFmPortDev->txCh, 0); + if (!p_LnxWrpFmDev->hc_tx_fq) + RETURN_ERROR(MAJOR, E_NULL_POINTER, + ("Frame queue allocation failed...")); + + p_LnxWrpFmDev->hc_tx_conf_fq = + FqAlloc(p_LnxWrpFmDev, + 0, + QMAN_FQ_FLAG_NO_ENQUEUE, + p_LnxWrpFmDev->hcCh, 7); + if (!p_LnxWrpFmDev->hc_tx_conf_fq) + RETURN_ERROR(MAJOR, E_NULL_POINTER, + ("Frame queue allocation failed...")); + + p_LnxWrpFmDev->hc_tx_err_fq = + FqAlloc(p_LnxWrpFmDev, + 0, + QMAN_FQ_FLAG_NO_ENQUEUE, + p_LnxWrpFmDev->hcCh, 7); + if (!p_LnxWrpFmDev->hc_tx_err_fq) + RETURN_ERROR(MAJOR, E_NULL_POINTER, + ("Frame queue allocation failed...")); + + fmPcdParams.hc.portBaseAddr = p_LnxWrpFmPortDev->baseAddr; + fmPcdParams.hc.portId = + p_LnxWrpFmPortDev->settings.param.portId; + fmPcdParams.hc.liodnBase = + p_LnxWrpFmPortDev->settings.param.liodnBase; + fmPcdParams.hc.errFqid = + qman_fq_fqid(p_LnxWrpFmDev->hc_tx_err_fq); + fmPcdParams.hc.confFqid = + qman_fq_fqid(p_LnxWrpFmDev->hc_tx_conf_fq); + fmPcdParams.hc.qmChannel = p_LnxWrpFmPortDev->txCh; + fmPcdParams.hc.f_QmEnqueue = QmEnqueueCB; + fmPcdParams.hc.h_QmArg = (t_Handle) p_LnxWrpFmDev; + + p_LnxWrpFmDev->h_PcdDev = FM_PCD_Config(&fmPcdParams); + if (!p_LnxWrpFmDev->h_PcdDev) + RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM PCD!")); + + err = + FM_PCD_ConfigPlcrNumOfSharedProfiles(p_LnxWrpFmDev->h_PcdDev, + LNXWRP_FM_NUM_OF_SHARED_PROFILES); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + err = FM_PCD_Init(p_LnxWrpFmDev->h_PcdDev); + if (err != E_OK) + RETURN_ERROR(MAJOR, err, NO_MSG); + + if (p_LnxWrpFmDev->err_irq == 0) { + FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, + e_FM_PCD_KG_EXCEPTION_DOUBLE_ECC, + FALSE); + FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, + e_FM_PCD_KG_EXCEPTION_KEYSIZE_OVERFLOW, + FALSE); + FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, + e_FM_PCD_PLCR_EXCEPTION_INIT_ENTRY_ERROR, + FALSE); + FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, + e_FM_PCD_PLCR_EXCEPTION_DOUBLE_ECC, + FALSE); + FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, + e_FM_PCD_PRS_EXCEPTION_DOUBLE_ECC, + FALSE); + FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, + e_FM_PCD_PLCR_EXCEPTION_PRAM_SELF_INIT_COMPLETE, + FALSE); + FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, + e_FM_PCD_PLCR_EXCEPTION_ATOMIC_ACTION_COMPLETE, + FALSE); + FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, + e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC, + FALSE); + } + } + + return E_OK; +} + +void FreeFmPcdDev(t_LnxWrpFmDev *p_LnxWrpFmDev) +{ + + if (p_LnxWrpFmDev->h_PcdDev) + FM_PCD_Free(p_LnxWrpFmDev->h_PcdDev); + + if (p_LnxWrpFmDev->hc_tx_err_fq) + FqFree(p_LnxWrpFmDev->hc_tx_err_fq); + + if (p_LnxWrpFmDev->hc_tx_conf_fq) + FqFree(p_LnxWrpFmDev->hc_tx_conf_fq); + + if (p_LnxWrpFmDev->hc_tx_fq) + FqFree(p_LnxWrpFmDev->hc_tx_fq); +} + +static void FreeFmPortDev(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = + (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + + if (!p_LnxWrpFmPortDev->active) + return; + + if (p_LnxWrpFmPortDev->h_Dev) + FM_PORT_Free(p_LnxWrpFmPortDev->h_Dev); + + devm_iounmap(p_LnxWrpFmDev->dev, + UINT_TO_PTR(p_LnxWrpFmPortDev->baseAddr)); + __devm_release_region(p_LnxWrpFmDev->dev, p_LnxWrpFmDev->res, + p_LnxWrpFmPortDev->phys_baseAddr, + p_LnxWrpFmPortDev->memSize); +} + +static int fm_port_probe(struct platform_device *of_dev) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; + t_LnxWrpFmDev *p_LnxWrpFmDev; + struct device *dev; + + dev = &of_dev->dev; + + p_LnxWrpFmPortDev = ReadFmPortDevTreeNode(of_dev); + if (p_LnxWrpFmPortDev == NULL) + return -EIO; + /* Port can be inactive, thus will not be probed: + - in performance mode, OH ports are disabled + ... + */ + if (!p_LnxWrpFmPortDev->active) + return 0; + + if (ConfigureFmPortDev(p_LnxWrpFmPortDev) != E_OK) + return -EIO; + + dev_set_drvdata(dev, p_LnxWrpFmPortDev); + + if ((p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_OH_HOST_COMMAND) + && + (InitFmPcdDev((t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev) + != E_OK)) + return -EIO; + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + + if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_RX) { + Sprint(p_LnxWrpFmPortDev->name, "%s-port-rx%d", + p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id); + p_LnxWrpFmPortDev->minor = + p_LnxWrpFmPortDev->id + DEV_FM_RX_PORTS_MINOR_BASE; + } else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_RX_10G) { + Sprint(p_LnxWrpFmPortDev->name, "%s-port-rx%d", + p_LnxWrpFmDev->name, + p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_RX_PORTS); + p_LnxWrpFmPortDev->minor = + p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_RX_PORTS + + DEV_FM_RX_PORTS_MINOR_BASE; + } else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_TX) { + Sprint(p_LnxWrpFmPortDev->name, "%s-port-tx%d", + p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id); + p_LnxWrpFmPortDev->minor = + p_LnxWrpFmPortDev->id + DEV_FM_TX_PORTS_MINOR_BASE; + } else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_TX_10G) { + Sprint(p_LnxWrpFmPortDev->name, "%s-port-tx%d", + p_LnxWrpFmDev->name, + p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_TX_PORTS); + p_LnxWrpFmPortDev->minor = + p_LnxWrpFmPortDev->id + FM_MAX_NUM_OF_1G_TX_PORTS + + DEV_FM_TX_PORTS_MINOR_BASE; + } else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_OH_HOST_COMMAND) { + Sprint(p_LnxWrpFmPortDev->name, "%s-port-oh%d", + p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id); + p_LnxWrpFmPortDev->minor = + p_LnxWrpFmPortDev->id + DEV_FM_OH_PORTS_MINOR_BASE; + } else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_OH_OFFLINE_PARSING) { + Sprint(p_LnxWrpFmPortDev->name, "%s-port-oh%d", + p_LnxWrpFmDev->name, p_LnxWrpFmPortDev->id + 1); + p_LnxWrpFmPortDev->minor = + p_LnxWrpFmPortDev->id + 1 + + DEV_FM_OH_PORTS_MINOR_BASE; + } + + device_create(p_LnxWrpFmDev->fm_class, NULL, + MKDEV(p_LnxWrpFmDev->major, p_LnxWrpFmPortDev->minor), + NULL, p_LnxWrpFmPortDev->name); + + /* create sysfs entries for stats and regs */ + + if (fm_port_sysfs_create(dev) != 0) { + FreeFmPortDev(p_LnxWrpFmPortDev); + REPORT_ERROR(MAJOR, E_INVALID_STATE, + ("Unable to create sys entry - fm port!!!")); + return -EIO; + } + +#ifdef FM_TX_INVALID_ECC_ERRATA_10GMAC_A009 + FM_DisableRamsEcc(p_LnxWrpFmDev->h_Dev); +#endif /* FM_TX_INVALID_ECC_ERRATA_10GMAC_A009 */ + + DBG(TRACE, ("%s probed", p_LnxWrpFmPortDev->name)); + + return 0; +} + +static int fm_port_remove(struct platform_device *of_dev) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; + t_LnxWrpFmDev *p_LnxWrpFmDev; + struct device *dev; + + dev = &of_dev->dev; + p_LnxWrpFmPortDev = dev_get_drvdata(dev); + + fm_port_sysfs_destroy(dev); + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + device_destroy(p_LnxWrpFmDev->fm_class, + MKDEV(p_LnxWrpFmDev->major, p_LnxWrpFmPortDev->minor)); + + FreeFmPortDev(p_LnxWrpFmPortDev); + + dev_set_drvdata(dev, NULL); + + return 0; +} + +static const struct of_device_id fm_port_match[] = { + { + .compatible = "fsl,fman-port-oh"}, + { + .compatible = "fsl,fman-port-1g-rx"}, + { + .compatible = "fsl,fman-port-10g-rx"}, + { + .compatible = "fsl,fman-port-1g-tx"}, + { + .compatible = "fsl,fman-port-10g-tx"}, + {} +}; + +#ifndef MODULE +MODULE_DEVICE_TABLE(of, fm_port_match); +#endif /* !MODULE */ + +static struct platform_driver fm_port_driver = { + + .driver = { + .name = "fsl-fman-port", + .of_match_table = fm_port_match, + .owner = THIS_MODULE, + }, + .probe = fm_port_probe, + .remove = fm_port_remove, +}; + + +t_Error LNXWRP_FM_Port_Init(void) +{ + /* Register to the DTB for basic FM port API */ + if (platform_driver_register(&fm_port_driver)) + return E_NO_DEVICE; + + return E_OK; +} + +void LNXWRP_FM_Port_Free(void) +{ + platform_driver_unregister(&fm_port_driver); +} + +static int __init __cold fm_port_load(void) +{ + if (LNXWRP_FM_Port_Init() != E_OK) { + printk(KERN_CRIT "Failed to init FM Ports wrapper!\n"); + return -ENODEV; + } + + printk(KERN_INFO "Freescale FM Ports module (" __DATE__ ":" __TIME__ ")\n"); + + return 0; +} + +static void __exit __cold fm_port_unload(void) +{ + LNXWRP_FM_Port_Free(); +} + +module_init(fm_port_load); +module_exit(fm_port_unload); --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_ioctls_fm.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_ioctls_fm.c @@ -0,0 +1,2648 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_ioctls_fm.c + + @Author Shlomi Gridish + + @Description FM Linux wrapper functions. + +*/ + +/* Linux Headers ------------------- */ +#include + +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_COMPAT) +#include +#endif + +#include "part_ext.h" +#include "fm_ioctls.h" +#include "fm_pcd_ioctls.h" +#include "fm_port_ioctls.h" + +#if defined(CONFIG_COMPAT) +#include "lnxwrp_ioctls_fm_compat.h" +#endif + +#include "lnxwrp_fm.h" + +#define CMP_IOC_DEFINE(def) (IOC_##def != def) + +/* fm_pcd_ioctls.h === fm_pcd_ext.h assertions */ +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_PRIVATE_HDRS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_PRS_NUM_OF_HDRS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_SCHEMES) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS) +#error Error: please synchronize IOC_ defines! +#endif + +/* please check for this one in fm_common.h: */ +#define FM_PCD_MAX_NUM_OF_OPTIONS(clsPlanEntries) ((clsPlanEntries==256)? 8:((clsPlanEntries==128)? 7: ((clsPlanEntries==64)? 6: ((clsPlanEntries==32)? 5:0)))) +#if (IOC_FM_PCD_MAX_NUM_OF_OPTIONS != FM_PCD_MAX_NUM_OF_OPTIONS(FM_PCD_MAX_NUM_OF_CLS_PLANS)) +#error Error: please synchronize IOC_ defines! +#endif +#undef FM_PCD_MAX_NUM_OF_OPTIONS + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_INTERCHANGEABLE_HDRS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_GENERIC_REGS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_KG_MAX_NUM_OF_EXTRACTS_PER_KEY) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CLS_PLANS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_EXTRACT_MASKS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_KG_NUM_OF_DEFAULT_GROUPS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_PRS_NUM_OF_LABELS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_NODES) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_TREES) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_GROUPS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_UNITS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_KEYS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_SIZE_OF_KEY) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP) +#error Error: please synchronize IOC_ defines! +#endif + +/* net_ioctls.h === net_ext.h assertions */ +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPoE_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPMUX_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PPPMUX_SUBFRAME_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ETH_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPv4_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPv6_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ICMP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IGMP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_TCP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SCTP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_DCCP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_UDP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_UDP_ENCAP_ESP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPHC_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SCTP_CHUNK_DATA_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv2_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv3_CTRL_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_L2TPv3_SESS_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_VLAN_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_LLC_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_NLPID_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_SNAP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_LLC_SNAP_ALL_FIELDS) +#warning Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_ARP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_RFC2684_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_USER_DEFINED_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_PAYLOAD_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_GRE_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MINENCAP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPSEC_AH_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_IPSEC_ESP_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MPLS_LABEL_STACK_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +#if CMP_IOC_DEFINE(NET_HEADER_FIELD_MACSEC_ALL_FIELDS) +#error Error: please synchronize IOC_ defines! +#endif + +/* fm_ioctls.h === fm_ext.h assertions */ +#if CMP_IOC_DEFINE(FM_MAX_NUM_OF_VALID_PORTS) +#error Error: please synchronize IOC_ defines! +#endif + +/* fm_port_ioctls.h === dpaa_integrations_ext.h assertions */ +#if CMP_IOC_DEFINE(FM_PORT_NUM_OF_CONGESTION_GRPS) +#error Error: please synchronize IOC_ defines! +#endif + +#define ASSERT_IOC_NET_ENUM(def) ASSERT_COND((unsigned long)e_IOC_NET_##def == (unsigned long)def) + +static void LnxwrpAssertions(void) +{ + /* sampling checks */ + ASSERT_IOC_NET_ENUM(HEADER_TYPE_MACSEC); + ASSERT_IOC_NET_ENUM(HEADER_TYPE_PPP); + ASSERT_IOC_NET_ENUM(MAX_HEADER_TYPE_COUNT); + ASSERT_COND((unsigned long)e_IOC_FM_PORT_TYPE_DUMMY == (unsigned long)e_FM_PORT_TYPE_DUMMY); + ASSERT_COND((unsigned long)e_IOC_FM_EX_MURAM_ECC == (unsigned long)e_FM_EX_MURAM_ECC); + ASSERT_COND((unsigned long)e_IOC_FM_COUNTERS_SEMAPHOR_SYNC_REJECT == (unsigned long)e_FM_COUNTERS_SEMAPHOR_SYNC_REJECT); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES == (unsigned long)e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS_EXCEPTION_SINGLE_ECC == (unsigned long)e_FM_PCD_PRS_EXCEPTION_SINGLE_ECC); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PRS == (unsigned long)e_FM_PCD_PRS); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_EXTRACT_FULL_FIELD == (unsigned long)e_FM_PCD_EXTRACT_FULL_FIELD); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_EXTRACT_FROM_FLOW_ID == (unsigned long)e_FM_PCD_EXTRACT_FROM_FLOW_ID); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO == (unsigned long)e_FM_PCD_KG_EXTRACT_PORT_PRIVATE_INFO); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_DFLT_ILLEGAL == (unsigned long)e_FM_PCD_KG_DFLT_ILLEGAL); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_KG_GENERIC_NOT_FROM_DATA == (unsigned long)e_FM_PCD_KG_GENERIC_NOT_FROM_DATA); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_HDR_INDEX_3 == (unsigned long)e_FM_PCD_HDR_INDEX_3); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_SHARED == (unsigned long)e_FM_PCD_PLCR_SHARED); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_RFC_4115 == (unsigned long)e_FM_PCD_PLCR_RFC_4115); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_COLOR_AWARE == (unsigned long)e_FM_PCD_PLCR_COLOR_AWARE); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_OVERRIDE == (unsigned long)e_FM_PCD_PLCR_OVERRIDE); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_FULL_FRM_LEN == (unsigned long)e_FM_PCD_PLCR_FULL_FRM_LEN); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN == (unsigned long)e_FM_PCD_PLCR_ROLLBACK_FULL_FRM_LEN); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_PACKET_MODE == (unsigned long)e_FM_PCD_PLCR_PACKET_MODE); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_DROP_FRAME == (unsigned long)e_FM_PCD_DROP_FRAME); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER == (unsigned long)e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER); + ASSERT_COND((unsigned long)e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP == (unsigned long)e_FM_PCD_ACTION_INDEXED_LOOKUP); + ASSERT_COND((unsigned long)e_IOC_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR == (unsigned long)e_FM_PORT_PCD_SUPPORT_PRS_AND_KG_AND_PLCR); +#ifdef FM_CAPWAP_SUPPORT + ASSERT_COND((unsigned long)e_IOC_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR == (unsigned long)e_FM_PORT_PCD_SUPPORT_CC_AND_KG_AND_PLCR); +#endif + ASSERT_COND((unsigned long)e_IOC_FM_PORT_COUNTERS_DEQ_CONFIRM == (unsigned long)e_FM_PORT_COUNTERS_DEQ_CONFIRM); + ASSERT_COND((unsigned long)e_IOC_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8 == (unsigned long)e_FM_PORT_DUAL_RATE_LIMITER_SCALE_DOWN_BY_8); +} + +static t_Error LnxwrpFmPcdIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat) +{ + t_Error err = E_READ_FAILED; + + /* can be moved from here */ + LnxwrpAssertions(); + + switch (cmd) + { +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_PRS_LOAD_SW_COMPAT: +#endif + case FM_PCD_IOC_PRS_LOAD_SW: + { + ioc_fm_pcd_prs_sw_params_t *param; + uint8_t *p_code; + + ASSERT_COND(sizeof(ioc_fm_pcd_prs_sw_params_t) == sizeof(t_FmPcdPrsSwParams)); + + param = (ioc_fm_pcd_prs_sw_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_prs_sw_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_prs_sw_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_prs_sw_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_prs_sw_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_prs_sw_params_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_prs_sw_params_t))) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_fm_pcd_prs_sw(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_prs_sw_params_t *)arg, + sizeof(ioc_fm_pcd_prs_sw_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + p_code = (uint8_t *) XX_Malloc(param->size); + if (!p_code) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + if (copy_from_user(p_code, param->p_code, param->size)) { + XX_Free(p_code); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + param->p_code = p_code; + + err = FM_PCD_PrsLoadSw(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdPrsSwParams*)param); + XX_Free(p_code); + XX_Free(param); + break; + } + + case FM_PCD_IOC_ENABLE: + return FM_PCD_Enable(p_LnxWrpFmDev->h_PcdDev); + + case FM_PCD_IOC_DISABLE: + return FM_PCD_Disable(p_LnxWrpFmDev->h_PcdDev); + + case FM_PCD_IOC_FORCE_INTR: + { + int exception; + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (get_user(exception, (int *) compat_ptr(arg))) + break; + } + else +#endif + { + if (get_user(exception, (int *)arg)) + break; + } + + return FM_PCD_ForceIntr(p_LnxWrpFmDev->h_PcdDev, (e_FmPcdExceptions)exception); + } + + case FM_PCD_IOC_SET_EXCEPTION: + { + ioc_fm_pcd_exception_params_t *param; + + param = (ioc_fm_pcd_exception_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_exception_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_from_user(param, (ioc_fm_pcd_exception_params_t *)compat_ptr(arg), + sizeof(ioc_fm_pcd_exception_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_exception_params_t *)arg, + sizeof(ioc_fm_pcd_exception_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + err = FM_PCD_SetException(p_LnxWrpFmDev->h_PcdDev, param->exception, param->enable); + XX_Free(param); + break; + } + + case FM_PCD_IOC_KG_SET_ADDITIONAL_DATA_AFTER_PARSING: + { + uint8_t payloadOffset; + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (get_user(payloadOffset, (uint8_t*) compat_ptr(arg))) + break; + } + else +#endif + { + if (get_user(payloadOffset, (uint8_t*) arg)) + break; + } + + return FM_PCD_KgSetAdditionalDataAfterParsing(p_LnxWrpFmDev->h_PcdDev, payloadOffset); + } + + case FM_PCD_IOC_KG_SET_DFLT_VALUE: + { + ioc_fm_pcd_kg_dflt_value_params_t *param; + + param = (ioc_fm_pcd_kg_dflt_value_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_kg_dflt_value_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_from_user(param, (ioc_fm_pcd_kg_dflt_value_params_t *)compat_ptr(arg), + sizeof(ioc_fm_pcd_kg_dflt_value_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_kg_dflt_value_params_t *)arg, + sizeof(ioc_fm_pcd_kg_dflt_value_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + err = FM_PCD_KgSetDfltValue(p_LnxWrpFmDev->h_PcdDev, param->valueId, param->value); + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_SET_NET_ENV_CHARACTERISTICS_COMPAT: +#endif + case FM_PCD_IOC_SET_NET_ENV_CHARACTERISTICS: + { + ioc_fm_pcd_net_env_params_t *param; + + param = (ioc_fm_pcd_net_env_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_net_env_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_net_env_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_net_env_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_net_env_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, (ioc_compat_fm_pcd_net_env_params_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_net_env_params_t))) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_net_env(compat_param, param, COMPAT_US_TO_K); + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_net_env_params_t *) arg, + sizeof(ioc_fm_pcd_net_env_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + param->id = FM_PCD_SetNetEnvCharacteristics(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdNetEnvParams*)param); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_net_env_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_net_env_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_net_env_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + compat_copy_fm_pcd_net_env(compat_param, param, COMPAT_K_TO_US); + + if (param->id && !copy_to_user((ioc_compat_fm_pcd_net_env_params_t *) compat_ptr(arg), + compat_param, + sizeof(ioc_compat_fm_pcd_net_env_params_t))) + err = E_OK; + + XX_Free(compat_param); + } + else +#endif + { + if (param->id && !copy_to_user((ioc_fm_pcd_net_env_params_t *)arg, param, sizeof(ioc_fm_pcd_net_env_params_t))) + err = E_OK; + } + + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_DELETE_NET_ENV_CHARACTERISTICS_COMPAT: +#endif + case FM_PCD_IOC_DELETE_NET_ENV_CHARACTERISTICS: + { + ioc_fm_obj_t id; + + memset(&id, 0 , sizeof(ioc_fm_obj_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_obj_t compat_id; + + if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t))) + break; + + id.obj = compat_ptr(compat_id.obj); + } + else +#endif + { + if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t))) + break; + } + + return FM_PCD_DeleteNetEnvCharacteristics(p_LnxWrpFmDev->h_PcdDev, id.obj); + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_KG_SET_SCHEME_COMPAT: +#endif + case FM_PCD_IOC_KG_SET_SCHEME: + { + ioc_fm_pcd_kg_scheme_params_t *param; + + ASSERT_COND(sizeof(t_FmPcdKgSchemeParams) + sizeof(void *) == sizeof(ioc_fm_pcd_kg_scheme_params_t)); + param = (ioc_fm_pcd_kg_scheme_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_kg_scheme_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_kg_scheme_params_t *compat_param = NULL; + + compat_param = (ioc_compat_fm_pcd_kg_scheme_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_kg_scheme_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, (ioc_compat_fm_pcd_kg_scheme_params_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_kg_scheme_params_t))) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_kg_scheme(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_params_t *)arg, + sizeof(ioc_fm_pcd_kg_scheme_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + param->id = FM_PCD_KgSetScheme(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdKgSchemeParams*)param); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_kg_scheme_params_t *compat_param = NULL; + + compat_param = (ioc_compat_fm_pcd_kg_scheme_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_kg_scheme_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + compat_copy_fm_pcd_kg_scheme(compat_param, param, COMPAT_K_TO_US); + + if (param->id && !copy_to_user((ioc_compat_fm_pcd_kg_scheme_params_t *)compat_ptr(arg), + compat_param, + sizeof(ioc_compat_fm_pcd_kg_scheme_params_t))) + err = E_OK; + XX_Free(compat_param); + } + else +#endif + { + if (param->id && !copy_to_user((ioc_fm_pcd_kg_scheme_params_t *)arg, + param, + sizeof(ioc_fm_pcd_kg_scheme_params_t))) + err = E_OK; + } + + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_KG_DEL_SCHEME_COMPAT: +#endif + case FM_PCD_IOC_KG_DEL_SCHEME: + { + ioc_fm_obj_t id; + + memset(&id, 0 , sizeof(ioc_fm_obj_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_obj_t compat_id; + + if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t))) + break; + + id.obj = compat_ptr(compat_id.obj); + } + else +#endif + { + if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t))) + break; + } + + return FM_PCD_KgDeleteScheme(p_LnxWrpFmDev->h_PcdDev, id.obj); + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_SET_NODE_COMPAT: +#endif + case FM_PCD_IOC_CC_SET_NODE: + { + ioc_fm_pcd_cc_node_params_t *param; + uint8_t *keys; + uint8_t *masks; + int i,k; + + ASSERT_COND(sizeof(t_FmPcdCcNodeParams) + sizeof(void *) == sizeof(ioc_fm_pcd_cc_node_params_t)); + + param = (ioc_fm_pcd_cc_node_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_cc_node_params_t) + + 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + + memset(param, 0, sizeof(ioc_fm_pcd_cc_node_params_t) + + 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY); + + keys = (uint8_t *) (param + 1); + masks = keys + IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY; + memset(keys, 0, 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_node_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_node_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_node_params_t) + + 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_params_t) + + 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY); + + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_cc_node_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_node_params_t))) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_cc_node(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_node_params_t *)arg, sizeof(ioc_fm_pcd_cc_node_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + ASSERT_COND(param->keys_params.num_of_keys <= IOC_FM_PCD_MAX_NUM_OF_KEYS); + ASSERT_COND(param->keys_params.key_size <= IOC_FM_PCD_MAX_SIZE_OF_KEY); + + /* support for indexed lookup */ + if( !(param->extract_cc_params.type == e_IOC_FM_PCD_EXTRACT_NON_HDR && + param->extract_cc_params.extract_params.extract_non_hdr.src == e_IOC_FM_PCD_EXTRACT_FROM_HASH && + param->extract_cc_params.extract_params.extract_non_hdr.action == e_IOC_FM_PCD_ACTION_INDEXED_LOOKUP)) + { + for (i=0, k=0; + i < param->keys_params.num_of_keys; + i++, k += IOC_FM_PCD_MAX_SIZE_OF_KEY) + { + if (copy_from_user(&keys[k], + param->keys_params.key_params[i].p_key, + param->keys_params.key_size)) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + param->keys_params.key_params[i].p_key = &keys[k]; + + if (param->keys_params.key_params[i].p_mask) + { + if (copy_from_user(&masks[k], + param->keys_params.key_params[i].p_mask, + param->keys_params.key_size)) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + param->keys_params.key_params[i].p_mask = &masks[k]; + } + } + } + + param->id = FM_PCD_CcSetNode(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdCcNodeParams*)param); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_node_params_t *compat_param; + compat_param = (ioc_compat_fm_pcd_cc_node_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_node_params_t) + + 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_node_params_t) + + 2 * IOC_FM_PCD_MAX_NUM_OF_KEYS * IOC_FM_PCD_MAX_SIZE_OF_KEY); + + /* setup user space structure */ + compat_copy_fm_pcd_cc_node(compat_param, param, COMPAT_K_TO_US); + + compat_param->id = compat_add_ptr2id(param->id); + + if (param->id && !copy_to_user((ioc_compat_fm_pcd_cc_node_params_t *)compat_ptr(arg), + compat_param, + sizeof(ioc_compat_fm_pcd_cc_node_params_t))) + err = E_OK; + + XX_Free(compat_param); + } + else +#endif + { + if (param->id && !copy_to_user((ioc_fm_pcd_cc_node_params_t *)arg, param, sizeof(ioc_fm_pcd_cc_node_params_t))) + err = E_OK; + } + + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_DELETE_NODE_COMPAT: +#endif + case FM_PCD_IOC_CC_DELETE_NODE: + { + ioc_fm_obj_t id; + + memset(&id, 0 , sizeof(ioc_fm_obj_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_obj_t compat_id; + + if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t))) + break; + + id.obj = compat_get_id2ptr(compat_id.obj); + compat_del_ptr2id(id.obj); + } + else +#endif + { + if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t))) + break; + } + + return FM_PCD_CcDeleteNode(p_LnxWrpFmDev->h_PcdDev, id.obj); + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_BUILD_TREE_COMPAT: +#endif + case FM_PCD_IOC_CC_BUILD_TREE: + { + ioc_fm_pcd_cc_tree_params_t *param; + + ASSERT_COND(sizeof(t_FmPcdCcTreeParams) + sizeof(void *) == sizeof(ioc_fm_pcd_cc_tree_params_t)); + + param = (ioc_fm_pcd_cc_tree_params_t *) XX_Malloc(sizeof(ioc_fm_pcd_cc_tree_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + + memset(param, 0, sizeof(ioc_fm_pcd_cc_tree_params_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_tree_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_tree_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_tree_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tree_params_t)); + + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_cc_tree_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_tree_params_t))) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_cc_tree(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_tree_params_t *)arg, + sizeof(ioc_fm_pcd_cc_tree_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + param->id = FM_PCD_CcBuildTree(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdCcTreeParams*)param); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_tree_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_tree_params_t *) XX_Malloc(sizeof(ioc_compat_fm_pcd_cc_tree_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + memset(compat_param, 0, sizeof(ioc_compat_fm_pcd_cc_tree_params_t)); + + compat_add_ptr2id(param->id); + param->id = (void *)(uint64_t)compat_get_ptr2id(param->id); + + compat_copy_fm_pcd_cc_tree(compat_param, param, COMPAT_K_TO_US); + + if (param->id && !copy_to_user((ioc_compat_fm_pcd_cc_tree_params_t *)compat_ptr(arg), + compat_param, + sizeof(ioc_compat_fm_pcd_cc_tree_params_t))) + err = E_OK; + + XX_Free(compat_param); + } + else +#endif + { + if (param->id && !copy_to_user((ioc_fm_pcd_cc_tree_params_t *)arg, param, sizeof(ioc_fm_pcd_cc_tree_params_t))) + err = E_OK; + } + + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_DELETE_TREE_COMPAT: +#endif + case FM_PCD_IOC_CC_DELETE_TREE: + { + ioc_fm_obj_t id; + + memset(&id, 0 , sizeof(ioc_fm_obj_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_obj_t compat_id; + + if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t))) + break; + + id.obj = compat_get_id2ptr(compat_id.obj); + } + else +#endif + { + if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t))) + break; + } + + return FM_PCD_CcDeleteTree(p_LnxWrpFmDev->h_PcdDev, id.obj); + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_PLCR_SET_PROFILE_COMPAT: +#endif + case FM_PCD_IOC_PLCR_SET_PROFILE: + { + ioc_fm_pcd_plcr_profile_params_t *param; + + ASSERT_COND(sizeof(t_FmPcdPlcrProfileParams) + sizeof(void *) == sizeof(ioc_fm_pcd_plcr_profile_params_t)); + + param = (ioc_fm_pcd_plcr_profile_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_plcr_profile_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_plcr_profile_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_plcr_profile_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_plcr_profile_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, (ioc_compat_fm_pcd_plcr_profile_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_plcr_profile_params_t))) { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_plcr_profile(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_plcr_profile_params_t *)arg, + sizeof(ioc_fm_pcd_plcr_profile_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + if (!param->modify && + (((t_FmPcdPlcrProfileParams*)param)->id.newParams.profileType != e_FM_PCD_PLCR_SHARED)) + { + t_Handle h_Port; + fm_pcd_port_params_t *port_params; + + port_params = (fm_pcd_port_params_t*) XX_Malloc(sizeof(fm_pcd_port_params_t)); + if (!port_params) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(port_params, (fm_pcd_port_params_t*)((t_FmPcdPlcrProfileParams*)param)->id.newParams.h_FmPort, + sizeof(fm_pcd_port_params_t))) + { + XX_Free(port_params); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + switch(port_params->port_type) + { + case (e_IOC_FM_PORT_TYPE_RX): + h_Port = p_LnxWrpFmDev->rxPorts[port_params->port_id].h_Dev; + break; + case (e_IOC_FM_PORT_TYPE_RX_10G): + h_Port = p_LnxWrpFmDev->rxPorts[port_params->port_id + FM_MAX_NUM_OF_1G_RX_PORTS].h_Dev; + break; + case (e_IOC_FM_PORT_TYPE_OFFLINE_PARSING): + if (port_params->port_id) + { + h_Port = p_LnxWrpFmDev->opPorts[port_params->port_id - 1].h_Dev; + break; + } + default: + XX_Free(port_params); + XX_Free(param); + RETURN_ERROR(MINOR, E_INVALID_SELECTION, NO_MSG); + } + + ((t_FmPcdPlcrProfileParams*)param)->id.newParams.h_FmPort = h_Port; + XX_Free(port_params); + } + + param->id = FM_PCD_PlcrSetProfile(p_LnxWrpFmDev->h_PcdDev, (t_FmPcdPlcrProfileParams*)param); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_plcr_profile_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_plcr_profile_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_plcr_profile_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + compat_copy_fm_pcd_plcr_profile(compat_param, param, COMPAT_K_TO_US); + + if (param->id && !copy_to_user((ioc_compat_fm_pcd_plcr_profile_params_t *) compat_ptr(arg), + compat_param, + sizeof(ioc_compat_fm_pcd_plcr_profile_params_t))) + err = E_OK; + + XX_Free(compat_param); + } + else +#endif + { + if (param->id && !copy_to_user((ioc_fm_pcd_plcr_profile_params_t *)arg, param, sizeof(ioc_fm_pcd_plcr_profile_params_t))) + err = E_OK; + } + + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_PLCR_DEL_PROFILE_COMPAT: +#endif + case FM_PCD_IOC_PLCR_DEL_PROFILE: + { + ioc_fm_obj_t id; + + memset(&id, 0 , sizeof(ioc_fm_obj_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_obj_t compat_id; + + if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t))) + break; + + id.obj = compat_ptr(compat_id.obj); + } + else +#endif + { + if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t))) + break; + } + + return FM_PCD_PlcrDeleteProfile(p_LnxWrpFmDev->h_PcdDev, id.obj); + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_TREE_MODIFY_NEXT_ENGINE_COMPAT: +#endif + case FM_PCD_IOC_CC_TREE_MODIFY_NEXT_ENGINE: + { + ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param; + + ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_engine_params_t) == sizeof(t_FmPcdCcNextEngineParams)); + + param = (ioc_fm_pcd_cc_tree_modify_next_engine_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_cc_tree_modify_next_engine_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t))) + { + XX_Free(param); + XX_Free(compat_param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_fm_pcd_cc_tree_modify_next_engine(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_tree_modify_next_engine_params_t *)arg, + sizeof(ioc_fm_pcd_cc_tree_modify_next_engine_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + err = FM_PCD_CcTreeModifyNextEngine(p_LnxWrpFmDev->h_PcdDev, + param->id, + param->grp_indx, + param->indx, + (t_FmPcdCcNextEngineParams*)(¶m->cc_next_engine_params)); + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_NODE_MODIFY_NEXT_ENGINE_COMPAT: +#endif + case FM_PCD_IOC_CC_NODE_MODIFY_NEXT_ENGINE: + { + ioc_fm_pcd_cc_node_modify_next_engine_params_t *param; + + ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_engine_params_t) == sizeof(t_FmPcdCcNextEngineParams)); + + param = (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t))) + { + XX_Free(param); + XX_Free(compat_param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_cc_node_modify_next_engine(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_next_engine_params_t *)arg, + sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + err = FM_PCD_CcNodeModifyNextEngine(p_LnxWrpFmDev->h_PcdDev, + param->id, + param->key_indx, + (t_FmPcdCcNextEngineParams*)(¶m->cc_next_engine_params)); + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_NODE_MODIFY_MISS_NEXT_ENGINE_COMPAT: +#endif + case FM_PCD_IOC_CC_NODE_MODIFY_MISS_NEXT_ENGINE: + { + ioc_fm_pcd_cc_node_modify_next_engine_params_t *param; + + ASSERT_COND(sizeof(ioc_fm_pcd_cc_next_engine_params_t) == sizeof(t_FmPcdCcNextEngineParams)); + + param = (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t))) + { + XX_Free(param); + XX_Free(compat_param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_cc_node_modify_next_engine(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_next_engine_params_t *) arg, + sizeof(ioc_fm_pcd_cc_node_modify_next_engine_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + err = FM_PCD_CcNodeModifyMissNextEngine(p_LnxWrpFmDev->h_PcdDev, param->id, + (t_FmPcdCcNextEngineParams*)(¶m->cc_next_engine_params)); + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_NODE_REMOVE_KEY_COMPAT: +#endif + case FM_PCD_IOC_CC_NODE_REMOVE_KEY: + { + ioc_fm_pcd_cc_node_remove_key_params_t *param; + + param = (ioc_fm_pcd_cc_node_remove_key_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_cc_node_remove_key_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_node_remove_key_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_node_remove_key_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_node_remove_key_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_cc_node_remove_key_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_node_remove_key_params_t))) + { + XX_Free(param); + XX_Free(compat_param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + param->id = compat_ptr(compat_param->id); + param->key_indx = compat_param->key_indx; + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_node_remove_key_params_t *) arg, + sizeof(ioc_fm_pcd_cc_node_remove_key_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + err = FM_PCD_CcNodeRemoveKey(p_LnxWrpFmDev->h_PcdDev, param->id, param->key_indx); + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_NODE_ADD_KEY_COMPAT: +#endif + case FM_PCD_IOC_CC_NODE_ADD_KEY: + { + ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param; + + ASSERT_COND(sizeof(ioc_fm_pcd_cc_key_params_t) == sizeof(t_FmPcdCcKeyParams)); + + param = (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)); + if (!compat_param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t))) + { + XX_Free(param); + XX_Free(compat_param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)arg, + sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + err = FM_PCD_CcNodeAddKey(p_LnxWrpFmDev->h_PcdDev, + param->id, + param->key_indx, + param->key_size, + (t_FmPcdCcKeyParams*)(¶m->key_params)); + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_NODE_MODIFY_KEY_AND_NEXT_ENGINE_COMPAT: +#endif + case FM_PCD_IOC_CC_NODE_MODIFY_KEY_AND_NEXT_ENGINE: + { + ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param; + + ASSERT_COND(sizeof(ioc_fm_pcd_cc_key_params_t) == sizeof(t_FmPcdCcKeyParams)); + + param = (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t)); + if (!compat_param) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t))) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_cc_node_modify_key_and_next_engine(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *)arg, + sizeof(ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + err = FM_PCD_CcNodeModifyKeyAndNextEngine(p_LnxWrpFmDev->h_PcdDev, + param->id, + param->key_indx, + param->key_size, + (t_FmPcdCcKeyParams*)(¶m->key_params)); + XX_Free(param); + break; + } + +#if defined(CONFIG_COMPAT) + case FM_PCD_IOC_CC_NODE_MODIFY_KEY_COMPAT: +#endif + case FM_PCD_IOC_CC_NODE_MODIFY_KEY: + { + ioc_fm_pcd_cc_node_modify_key_params_t *param = NULL; + uint8_t *key = NULL; + uint8_t *mask = NULL; + + param = (ioc_fm_pcd_cc_node_modify_key_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_cc_node_modify_key_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param = NULL; + compat_param = (ioc_compat_fm_pcd_cc_node_modify_key_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_cc_node_modify_key_params_t)); + if (!param) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + } + + if (copy_from_user(compat_param, (ioc_compat_fm_pcd_cc_node_modify_key_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_cc_node_modify_key_params_t))) + { + XX_Free(compat_param); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + + compat_copy_fm_pcd_cc_node_modify_key(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_cc_node_modify_key_params_t *)arg, + sizeof(ioc_fm_pcd_cc_node_modify_key_params_t))) + { + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + } + + if (param->p_key) + { + key = (uint8_t *) XX_Malloc(sizeof(uint8_t)*IOC_FM_PCD_MAX_SIZE_OF_KEY); + if (!key) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD key")); + } + memset(key, 0, sizeof(uint8_t)*IOC_FM_PCD_MAX_SIZE_OF_KEY); + + if (copy_from_user(key, param->p_key, param->key_size)) + { + XX_Free(key); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + param->p_key = key; + } + + if (param->p_mask) + { + mask = (uint8_t *) XX_Malloc(sizeof(uint8_t)*IOC_FM_PCD_MAX_SIZE_OF_KEY); + if (!mask) + { + if (key) + XX_Free(key); + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD mask")); + } + memset(mask, 0, sizeof(uint8_t)*IOC_FM_PCD_MAX_SIZE_OF_KEY); + + if (copy_from_user(mask, param->p_mask, param->key_size)) + { + if (mask) + XX_Free(mask); + if (key) + XX_Free(key); + XX_Free(param); + RETURN_ERROR(MINOR, err, NO_MSG); + } + param->p_mask = mask; + } + + err = FM_PCD_CcNodeModifyKey(p_LnxWrpFmDev->h_PcdDev, + param->id, + param->key_indx, + param->key_size, + param->p_key, + param->p_mask); + if (mask) + XX_Free(mask); + if (key) + XX_Free(key); + XX_Free(param); + break; + } + + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("IOCTL cmd (0x%08x):(0x%02x:0x%02x)!", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd))); + break; + } + + return err; +} + +t_Error LnxwrpFmIOCTL(t_LnxWrpFmDev *p_LnxWrpFmDev, unsigned int cmd, unsigned long arg, bool compat) +{ + t_Error err = E_READ_FAILED; + + DBG(TRACE, ("p_LnxWrpFmDev - 0x%08lx, cmd - 0x%08x, arg - 0x%08lx \n", (uintptr_t)p_LnxWrpFmDev, cmd, arg)); + + switch (cmd) + { + case FM_IOC_SET_PORTS_BANDWIDTH: + { + ioc_fm_port_bandwidth_params *param; + + ASSERT_COND(sizeof(t_FmPortsBandwidthParams) == sizeof(ioc_fm_port_bandwidth_params)); + + param = (ioc_fm_port_bandwidth_params*) XX_Malloc(sizeof(ioc_fm_port_bandwidth_params)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_from_user(param, (ioc_fm_port_bandwidth_params*)compat_ptr(arg), sizeof(ioc_fm_port_bandwidth_params))) + { + XX_Free(param); + return err; + } + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_port_bandwidth_params*)arg, sizeof(ioc_fm_port_bandwidth_params))) + { + XX_Free(param); + return err; + } + } + + err = FM_SetPortsBandwidth(p_LnxWrpFmDev->h_Dev, (t_FmPortsBandwidthParams*) param); + XX_Free(param); + return err; + } + + case FM_IOC_GET_REVISION: + { + ioc_fm_revision_info_t *param; + + ASSERT_COND(sizeof(t_FmRevisionInfo) == sizeof(ioc_fm_revision_info_t)); + + param = (ioc_fm_revision_info_t *) XX_Malloc(sizeof(ioc_fm_revision_info_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + + FM_GetRevision(p_LnxWrpFmDev->h_Dev, (t_FmRevisionInfo*)param); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_to_user((ioc_fm_revision_info_t *)compat_ptr(arg), + param, + sizeof(ioc_fm_revision_info_t))) + err = E_WRITE_FAILED; + else + err = E_OK; + } + else +#endif + { + if (copy_to_user((ioc_fm_revision_info_t *)arg, + param, + sizeof(ioc_fm_revision_info_t))) + err = E_WRITE_FAILED; + else + err = E_OK; + } + + XX_Free(param); + return err; + } + + case FM_IOC_SET_COUNTER: + { + ioc_fm_counters_params_t *param; + + param = (ioc_fm_counters_params_t *) XX_Malloc(sizeof(ioc_fm_counters_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_from_user(param, (ioc_fm_counters_params_t *)compat_ptr(arg), sizeof(ioc_fm_counters_params_t))) + { + XX_Free(param); + return err; + } + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_counters_params_t *)arg, sizeof(ioc_fm_counters_params_t))) + { + XX_Free(param); + return err; + } + } + + err = FM_ModifyCounter(p_LnxWrpFmDev->h_Dev, param->cnt, param->val); + + XX_Free(param); + return err; + } + + case FM_IOC_GET_COUNTER: + { + ioc_fm_counters_params_t *param; + + param = (ioc_fm_counters_params_t *) XX_Malloc(sizeof(ioc_fm_counters_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PCD")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_from_user(param, (ioc_fm_counters_params_t *)compat_ptr(arg), sizeof(ioc_fm_counters_params_t))) + { + XX_Free(param); + return err; + } + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_counters_params_t *)arg, sizeof(ioc_fm_counters_params_t))) + { + XX_Free(param); + return err; + } + } + + param->val = FM_GetCounter(p_LnxWrpFmDev->h_Dev, param->cnt); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_to_user((ioc_fm_counters_params_t *)compat_ptr(arg), param, sizeof(ioc_fm_counters_params_t))) + err = E_WRITE_FAILED; + } + else +#endif + { + if (copy_to_user((ioc_fm_counters_params_t *)arg, param, sizeof(ioc_fm_counters_params_t))) + err = E_WRITE_FAILED; + } + + XX_Free(param); + return err; + } + + case FM_IOC_FORCE_INTR: + { + ioc_fm_exceptions param; + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (get_user(param, (ioc_fm_exceptions*) compat_ptr(arg))) + break; + } + else +#endif + { + if (get_user(param, (ioc_fm_exceptions*)arg)) + break; + } + + return FM_ForceIntr(p_LnxWrpFmDev->h_Dev, (e_FmExceptions)param); + } + + default: + return LnxwrpFmPcdIOCTL(p_LnxWrpFmDev, cmd, arg, compat); + } + + RETURN_ERROR(MINOR, E_INVALID_OPERATION, ("IOCTL FM")); +} + +t_Error LnxwrpFmPortIOCTL(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev, unsigned int cmd, unsigned long arg, bool compat) +{ + t_Error err = E_READ_FAILED; + DBG(TRACE, ("p_LnxWrpFmPortDev - 0x%08lx, cmd - 0x%08x, arg - 0x%08lx", (uintptr_t)p_LnxWrpFmPortDev, cmd, arg)); + + switch (cmd) + { + case FM_PORT_IOC_DISABLE: + FM_PORT_Disable(p_LnxWrpFmPortDev->h_Dev); + return E_OK; + + case FM_PORT_IOC_ENABLE: + FM_PORT_Enable(p_LnxWrpFmPortDev->h_Dev); + return E_OK; + + case FM_PORT_IOC_SET_ERRORS_ROUTE: + { + ioc_fm_port_frame_err_select_t errs; + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (get_user(errs, (ioc_fm_port_frame_err_select_t*)compat_ptr(arg))) + break; + } + else +#endif + { + if (get_user(errs, (ioc_fm_port_frame_err_select_t*)arg)) + break; + } + + return FM_PORT_SetErrorsRoute(p_LnxWrpFmPortDev->h_Dev, (fmPortFrameErrSelect_t)errs); + } + + case FM_PORT_IOC_SET_RATE_LIMIT: + { + ioc_fm_port_rate_limit_t *param; + + ASSERT_COND(sizeof(t_FmPortRateLimit) == sizeof(ioc_fm_port_rate_limit_t)); + + param = (ioc_fm_port_rate_limit_t *) XX_Malloc(sizeof(ioc_fm_port_rate_limit_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_from_user(param, (ioc_fm_port_rate_limit_t *)compat_ptr(arg), sizeof(ioc_fm_port_rate_limit_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_READ_FAILED, NO_MSG); + } + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_port_rate_limit_t *)arg, sizeof(ioc_fm_port_rate_limit_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_READ_FAILED, NO_MSG); + } + } + + err = FM_PORT_SetRateLimit(p_LnxWrpFmPortDev->h_Dev, (t_FmPortRateLimit *)param); + + XX_Free(param); + return err; + } + + case FM_PORT_IOC_REMOVE_RATE_LIMIT: + FM_PORT_DeleteRateLimit(p_LnxWrpFmPortDev->h_Dev); + return E_OK; + + case FM_PORT_IOC_ALLOC_PCD_FQIDS: + { + ioc_fm_port_pcd_fqids_params_t *param; + + if (!p_LnxWrpFmPortDev->pcd_owner_params.cba) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("No one to listen on this PCD!!!")); + + param = (ioc_fm_port_pcd_fqids_params_t *) XX_Malloc(sizeof(ioc_fm_port_pcd_fqids_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_from_user(param, (ioc_fm_port_pcd_fqids_params_t *)compat_ptr(arg), + sizeof(ioc_fm_port_pcd_fqids_params_t))) + { + XX_Free(param); + return err; + } + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_port_pcd_fqids_params_t *)arg, + sizeof(ioc_fm_port_pcd_fqids_params_t))) + { + XX_Free(param); + return err; + } + } + + if (p_LnxWrpFmPortDev->pcd_owner_params.cba(p_LnxWrpFmPortDev->pcd_owner_params.dev, + param->num_fqids, + param->alignment, + ¶m->base_fqid)) + { + XX_Free(param); + RETURN_ERROR(MINOR, E_INVALID_STATE, ("can't allocate fqids for PCD!!!")); + } + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_to_user((ioc_fm_port_pcd_fqids_params_t *)compat_ptr(arg), + param, sizeof(ioc_fm_port_pcd_fqids_params_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_WRITE_FAILED, NO_MSG); + } + } + else +#endif + { + if (copy_to_user((ioc_fm_port_pcd_fqids_params_t *)arg, + param, sizeof(ioc_fm_port_pcd_fqids_params_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_WRITE_FAILED, NO_MSG); + } + } + + XX_Free(param); + return E_OK; + } + + case FM_PORT_IOC_FREE_PCD_FQIDS: + { + uint32_t base_fqid; + + if (!p_LnxWrpFmPortDev->pcd_owner_params.cbf) + RETURN_ERROR(MINOR, E_INVALID_STATE, ("No one to listen on this PCD!!!")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (get_user(base_fqid, (uint32_t*) compat_ptr(arg))) + break; + } + else +#endif + { + if (get_user(base_fqid, (uint32_t*)arg)) + break; + } + + if (p_LnxWrpFmPortDev->pcd_owner_params.cbf(p_LnxWrpFmPortDev->pcd_owner_params.dev, base_fqid)) + RETURN_ERROR(MAJOR, E_WRITE_FAILED, NO_MSG); + + return E_OK; + } + +#if defined(CONFIG_COMPAT) + case FM_PORT_IOC_SET_PCD_COMPAT: +#endif + case FM_PORT_IOC_SET_PCD: + { + ioc_fm_port_pcd_params_t *port_pcd_params; + ioc_fm_port_pcd_prs_params_t *port_pcd_prs_params; + ioc_fm_port_pcd_cc_params_t *port_pcd_cc_params; + ioc_fm_port_pcd_kg_params_t *port_pcd_kg_params; + ioc_fm_port_pcd_plcr_params_t *port_pcd_plcr_params; + + long copy_fail = 0; + + ASSERT_COND(sizeof(t_FmPortPcdParams) == sizeof(ioc_fm_port_pcd_params_t)); + + port_pcd_params = (ioc_fm_port_pcd_params_t *) XX_Malloc( + sizeof(ioc_fm_port_pcd_params_t) + + sizeof(ioc_fm_port_pcd_prs_params_t) + + sizeof(ioc_fm_port_pcd_cc_params_t) + + sizeof(ioc_fm_port_pcd_kg_params_t) + + sizeof(ioc_fm_port_pcd_plcr_params_t)); + if (!port_pcd_params) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + + port_pcd_prs_params = (ioc_fm_port_pcd_prs_params_t *) (port_pcd_params + 1); + port_pcd_cc_params = (ioc_fm_port_pcd_cc_params_t *) (port_pcd_prs_params + 1); + port_pcd_kg_params = (ioc_fm_port_pcd_kg_params_t *) (port_pcd_cc_params + 1); + port_pcd_plcr_params = (ioc_fm_port_pcd_plcr_params_t *) (port_pcd_kg_params + 1); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_port_pcd_params_t *compat_port_pcd_params; + ioc_fm_port_pcd_prs_params_t *same_port_pcd_prs_params; + ioc_compat_fm_port_pcd_cc_params_t *compat_port_pcd_cc_params; + ioc_compat_fm_port_pcd_kg_params_t *compat_port_pcd_kg_params; + ioc_compat_fm_port_pcd_plcr_params_t *compat_port_pcd_plcr_params; + + compat_port_pcd_params = (ioc_compat_fm_port_pcd_params_t *) XX_Malloc( + sizeof(ioc_compat_fm_port_pcd_params_t) + + sizeof(ioc_fm_port_pcd_prs_params_t) + + sizeof(ioc_compat_fm_port_pcd_cc_params_t) + + sizeof(ioc_compat_fm_port_pcd_kg_params_t) + + sizeof(ioc_compat_fm_port_pcd_plcr_params_t)); + if (!compat_port_pcd_params) + { + XX_Free(port_pcd_params); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + } + + same_port_pcd_prs_params = (ioc_fm_port_pcd_prs_params_t *) (compat_port_pcd_params + 1); + compat_port_pcd_cc_params = (ioc_compat_fm_port_pcd_cc_params_t *) (same_port_pcd_prs_params + 1); + compat_port_pcd_kg_params = (ioc_compat_fm_port_pcd_kg_params_t *) (compat_port_pcd_cc_params + 1); + compat_port_pcd_plcr_params = (ioc_compat_fm_port_pcd_plcr_params_t *) (compat_port_pcd_kg_params + 1); + + /* Pseudo-while */ + while (!(copy_fail = copy_from_user(compat_port_pcd_params, + (ioc_compat_fm_port_pcd_params_t *)compat_ptr(arg), + sizeof(ioc_compat_fm_port_pcd_params_t)))) + { + compat_copy_fm_port_pcd(compat_port_pcd_params, port_pcd_params, COMPAT_US_TO_K); + + /* the prs member is the same, no compat structure...memcpy only */ + if (port_pcd_params->p_prs_params && !copy_fail) + { + if(!(copy_fail = copy_from_user(same_port_pcd_prs_params, + port_pcd_params->p_prs_params, + sizeof(ioc_fm_port_pcd_prs_params_t)))) + { + memcpy(port_pcd_prs_params, same_port_pcd_prs_params, sizeof(ioc_fm_port_pcd_prs_params_t)); + port_pcd_params->p_prs_params = port_pcd_prs_params; + } + else + break; + } + + if (port_pcd_params->p_cc_params && !copy_fail) + { + if(!(copy_fail = copy_from_user(compat_port_pcd_cc_params, + port_pcd_params->p_cc_params, + sizeof(ioc_compat_fm_port_pcd_cc_params_t)))) + { + port_pcd_params->p_cc_params = port_pcd_cc_params; + port_pcd_params->p_cc_params->cc_tree_id = compat_get_id2ptr(compat_port_pcd_cc_params->cc_tree_id); + } + else + break; + } + + if (port_pcd_params->p_kg_params && !copy_fail) + { + if(!(copy_fail = copy_from_user(compat_port_pcd_kg_params, + port_pcd_params->p_kg_params, + sizeof(ioc_compat_fm_port_pcd_kg_params_t)))) + { + compat_copy_fm_port_pcd_kg(compat_port_pcd_kg_params, port_pcd_kg_params, COMPAT_US_TO_K); + port_pcd_params->p_kg_params = port_pcd_kg_params; + } + else + break; + } + + if (port_pcd_params->p_plcr_params && !copy_fail) + { + if(!(copy_fail = copy_from_user(compat_port_pcd_plcr_params, + port_pcd_params->p_plcr_params, + sizeof(ioc_compat_fm_port_pcd_plcr_params_t)))) + { + port_pcd_params->p_plcr_params = port_pcd_plcr_params; + port_pcd_params->p_plcr_params->plcr_profile_id = compat_ptr(compat_port_pcd_plcr_params->plcr_profile_id); + } + } + + /* always run once! */ + break; + } + + XX_Free(compat_port_pcd_params); + } + else +#endif + { + /* Pseudo-while */ + while (!(copy_fail = copy_from_user(port_pcd_params, + (ioc_fm_port_pcd_params_t *)arg, + sizeof(ioc_fm_port_pcd_params_t)))) + { + if (port_pcd_params->p_prs_params && !copy_fail) + { + if (!(copy_fail = copy_from_user(port_pcd_prs_params, + port_pcd_params->p_prs_params, + sizeof(ioc_fm_port_pcd_prs_params_t)))) + port_pcd_params->p_prs_params = port_pcd_prs_params; + else + break; + } + + if (port_pcd_params->p_cc_params && !copy_fail) + { + if (!(copy_fail = copy_from_user(port_pcd_cc_params, + port_pcd_params->p_cc_params, + sizeof(ioc_fm_port_pcd_cc_params_t)))) + port_pcd_params->p_cc_params = port_pcd_cc_params; + else + break; + } + + if (port_pcd_params->p_kg_params && !copy_fail) + { + if (!(copy_fail = copy_from_user(port_pcd_kg_params, + port_pcd_params->p_kg_params, + sizeof(ioc_fm_port_pcd_kg_params_t)))) + port_pcd_params->p_kg_params = port_pcd_kg_params; + else + break; + } + + if (port_pcd_params->p_plcr_params && !copy_fail) + { + if (!(copy_fail = copy_from_user(port_pcd_plcr_params, + port_pcd_params->p_plcr_params, + sizeof(ioc_fm_port_pcd_plcr_params_t)))) + port_pcd_params->p_plcr_params = port_pcd_plcr_params; + } + + /* always run once! */ + break; + } + } + + if (!copy_fail) + err = FM_PORT_SetPCD(p_LnxWrpFmPortDev->h_Dev, (t_FmPortPcdParams*) port_pcd_params); + else + err = E_READ_FAILED; + + XX_Free(port_pcd_params); + + return err; + } + + case FM_PORT_IOC_DELETE_PCD: + return FM_PORT_DeletePCD(p_LnxWrpFmPortDev->h_Dev); + +#if defined(CONFIG_COMPAT) + case FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME_COMPAT: +#endif + case FM_PORT_IOC_PCD_KG_MODIFY_INITIAL_SCHEME: + { + ioc_fm_pcd_kg_scheme_select_t *param; + + ASSERT_COND(sizeof(t_FmPcdKgSchemeSelect) == sizeof(ioc_fm_pcd_kg_scheme_select_t)); + + param = (ioc_fm_pcd_kg_scheme_select_t *) XX_Malloc( + sizeof(ioc_fm_pcd_kg_scheme_select_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_kg_scheme_select_t *compat_param; + + compat_param = (ioc_compat_fm_pcd_kg_scheme_select_t *) XX_Malloc( + sizeof(ioc_compat_fm_pcd_kg_scheme_select_t)); + if (!compat_param){ + XX_Free(param); + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + } + + if (copy_from_user(compat_param, + (ioc_compat_fm_pcd_kg_scheme_select_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_kg_scheme_select_t))) + { + XX_Free(param); + XX_Free(compat_param); + RETURN_ERROR(MAJOR, E_READ_FAILED, NO_MSG); + } + + compat_copy_fm_pcd_kg_scheme_select(compat_param, param, COMPAT_US_TO_K); + + XX_Free(compat_param); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_kg_scheme_select_t *)arg, + sizeof(ioc_fm_pcd_kg_scheme_select_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_READ_FAILED, NO_MSG); + } + } + + err = FM_PORT_PcdKgModifyInitialScheme(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdKgSchemeSelect *)param); + + XX_Free(param); + return err; + } + +#if defined(CONFIG_COMPAT) + case FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE_COMPAT: +#endif + case FM_PORT_IOC_PCD_PLCR_MODIFY_INITIAL_PROFILE: + { + ioc_fm_obj_t id; + + memset(&id, 0 , sizeof(ioc_fm_obj_t)); +#if defined(CONFIG_COMPAT) + if (compat) { + ioc_compat_fm_obj_t compat_id; + + if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t))) + break; + + id.obj = compat_ptr(compat_id.obj); + } + else +#endif + { + if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t))) + break; + } + + return FM_PORT_PcdPlcrModifyInitialProfile(p_LnxWrpFmPortDev->h_Dev, id.obj); + } + +#if defined(CONFIG_COMPAT) + case FM_PORT_IOC_PCD_KG_BIND_SCHEMES_COMPAT: +#endif + case FM_PORT_IOC_PCD_KG_BIND_SCHEMES: + { + ioc_fm_pcd_port_schemes_params_t *param; + + ASSERT_COND(sizeof(t_FmPcdPortSchemesParams) == sizeof(ioc_fm_pcd_port_schemes_params_t)); + + param = (ioc_fm_pcd_port_schemes_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_port_schemes_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + + memset(¶m, 0 , sizeof(ioc_fm_pcd_port_schemes_params_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_port_schemes_params_t compat_param; + + if (copy_from_user(&compat_param, + (ioc_compat_fm_pcd_port_schemes_params_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_port_schemes_params_t))) + break; + + compat_copy_fm_pcd_kg_schemes_params(&compat_param, param, COMPAT_US_TO_K); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_port_schemes_params_t *) arg, + sizeof(ioc_fm_pcd_port_schemes_params_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_WRITE_FAILED, NO_MSG); + } + } + + err = FM_PORT_PcdKgBindSchemes(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdPortSchemesParams *)param); + + XX_Free(param); + return err; + } + +#if defined(CONFIG_COMPAT) + case FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES_COMPAT: +#endif + case FM_PORT_IOC_PCD_KG_UNBIND_SCHEMES: + { + ioc_fm_pcd_port_schemes_params_t *param; + + ASSERT_COND(sizeof(t_FmPcdPortSchemesParams) == sizeof(ioc_fm_pcd_port_schemes_params_t)); + + param = (ioc_fm_pcd_port_schemes_params_t *) XX_Malloc( + sizeof(ioc_fm_pcd_port_schemes_params_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + + memset(¶m, 0 , sizeof(ioc_fm_pcd_port_schemes_params_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_pcd_port_schemes_params_t compat_param; + + if (copy_from_user(&compat_param, + (ioc_compat_fm_pcd_port_schemes_params_t *) compat_ptr(arg), + sizeof(ioc_compat_fm_pcd_port_schemes_params_t))) + break; + + compat_copy_fm_pcd_kg_schemes_params(&compat_param, param, COMPAT_US_TO_K); + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_port_schemes_params_t *) arg, + sizeof(ioc_fm_pcd_port_schemes_params_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_WRITE_FAILED, NO_MSG); + } + } + + err = FM_PORT_PcdKgUnbindSchemes(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdPortSchemesParams *)param); + + XX_Free(param); + return err; + } + + case FM_PORT_IOC_PCD_PRS_MODIFY_START_OFFSET: + { + ioc_fm_pcd_prs_start_t *param; + + ASSERT_COND(sizeof(t_FmPcdPrsStart) == sizeof(ioc_fm_pcd_prs_start_t)); + + param = (ioc_fm_pcd_prs_start_t *) XX_Malloc(sizeof(ioc_fm_pcd_prs_start_t)); + if (!param) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("IOCTL FM PORT")); + +#if defined(CONFIG_COMPAT) + if (compat) + { + if (copy_from_user(param, (ioc_fm_pcd_prs_start_t *)compat_ptr(arg), + sizeof(ioc_fm_pcd_prs_start_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_WRITE_FAILED, NO_MSG); + } + } + else +#endif + { + if (copy_from_user(param, (ioc_fm_pcd_prs_start_t *)arg, + sizeof(ioc_fm_pcd_prs_start_t))) + { + XX_Free(param); + RETURN_ERROR(MAJOR, E_WRITE_FAILED, NO_MSG); + } + } + err = FM_PORT_PcdPrsModifyStartOffset(p_LnxWrpFmPortDev->h_Dev, (t_FmPcdPrsStart *)param); + + XX_Free(param); + return err; + } + + case FM_PORT_IOC_PCD_PLCR_ALLOC_PROFILES: + { + uint16_t num; + if (get_user(num, (uint16_t*) arg)) + break; + return FM_PORT_PcdPlcrAllocProfiles(p_LnxWrpFmPortDev->h_Dev, num); + } + + case FM_PORT_IOC_PCD_PLCR_FREE_PROFILES: + return FM_PORT_PcdPlcrFreeProfiles(p_LnxWrpFmPortDev->h_Dev); + + case FM_PORT_IOC_DETACH_PCD: + return FM_PORT_DetachPCD(p_LnxWrpFmPortDev->h_Dev); + + case FM_PORT_IOC_ATTACH_PCD: + return FM_PORT_AttachPCD(p_LnxWrpFmPortDev->h_Dev); + +#if defined(CONFIG_COMPAT) + case FM_PORT_IOC_PCD_CC_MODIFY_TREE_COMPAT: +#endif + case FM_PORT_IOC_PCD_CC_MODIFY_TREE: + { + ioc_fm_obj_t id; + + memset(&id, 0 , sizeof(ioc_fm_obj_t)); + +#if defined(CONFIG_COMPAT) + if (compat) + { + ioc_compat_fm_obj_t compat_id; + + if (copy_from_user(&compat_id, (ioc_compat_fm_obj_t *) compat_ptr(arg), sizeof(ioc_compat_fm_obj_t))) + break; + + id.obj = compat_get_id2ptr(compat_id.obj); + } + else +#endif + { + if (copy_from_user(&id, (ioc_fm_obj_t *) arg, sizeof(ioc_fm_obj_t))) + break; + } + + return FM_PORT_PcdCcModifyTree(p_LnxWrpFmPortDev->h_Dev, id.obj); + } + + default: + RETURN_ERROR(MINOR, E_INVALID_SELECTION, ("IOCTL cmd (0x%08x):(0x%02x:0x%02x)!", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd))); + } + + RETURN_ERROR(MINOR, E_INVALID_OPERATION, ("IOCTL port")); +} + +/*****************************************************************************/ +/* API routines for the FM Linux Device */ +/*****************************************************************************/ + +static int fm_open(struct inode *inode, struct file *file) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = NULL; + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = NULL; + unsigned int major = imajor(inode); + unsigned int minor = iminor(inode); + struct device_node *fm_node; + static struct of_device_id fm_node_of_match[] = { + { .compatible = "fsl,fman", }, + { /* end of list */ }, + }; + + DBG(TRACE, ("Opening minor - %d - ", minor)); + + if (file->private_data != NULL) + return 0; + + /* Get all the FM nodes */ + for_each_matching_node(fm_node, fm_node_of_match) { + struct platform_device *of_dev; + + of_dev = of_find_device_by_node(fm_node); + if (unlikely(of_dev == NULL)) { + REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("fm id!")); + return -ENXIO; + } + + p_LnxWrpFmDev = (t_LnxWrpFmDev *)fm_bind(&of_dev->dev); + if (p_LnxWrpFmDev->major == major) + break; + fm_unbind((struct fm *)p_LnxWrpFmDev); + p_LnxWrpFmDev = NULL; + } + + if (!p_LnxWrpFmDev) + return -ENODEV; + + if (minor == DEV_FM_MINOR_BASE) + file->private_data = p_LnxWrpFmDev; + else if (minor == DEV_FM_PCD_MINOR_BASE) + file->private_data = p_LnxWrpFmDev; + else { + if (minor == DEV_FM_OH_PORTS_MINOR_BASE) + p_LnxWrpFmPortDev = &p_LnxWrpFmDev->hcPort; + else if ((minor > DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE)) + p_LnxWrpFmPortDev = &p_LnxWrpFmDev->opPorts[minor-DEV_FM_OH_PORTS_MINOR_BASE-1]; + else if ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE)) + p_LnxWrpFmPortDev = &p_LnxWrpFmDev->rxPorts[minor-DEV_FM_RX_PORTS_MINOR_BASE]; + else if ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS)) + p_LnxWrpFmPortDev = &p_LnxWrpFmDev->txPorts[minor-DEV_FM_TX_PORTS_MINOR_BASE]; + else + return -EINVAL; + + /* if trying to open port, check if it initialized */ + if (!p_LnxWrpFmPortDev->h_Dev) + return -ENODEV; + + p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *)fm_port_bind(p_LnxWrpFmPortDev->dev); + file->private_data = p_LnxWrpFmPortDev; + fm_unbind((struct fm *)p_LnxWrpFmDev); + } + + if (file->private_data == NULL) + return -ENXIO; + + return 0; +} + +static int fm_close(struct inode *inode, struct file *file) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev; + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; + unsigned int minor = iminor(inode); + int err = 0; + + DBG(TRACE, ("Closing minor - %d - ", minor)); + + if ((minor == DEV_FM_MINOR_BASE) || + (minor == DEV_FM_PCD_MINOR_BASE)) + { + p_LnxWrpFmDev = (t_LnxWrpFmDev*)file->private_data; + if (!p_LnxWrpFmDev) + return -ENODEV; + fm_unbind((struct fm *)p_LnxWrpFmDev); + } + else if (((minor >= DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE)) || + ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE)) || + ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS))) + { + p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev*)file->private_data; + if (!p_LnxWrpFmPortDev) + return -ENODEV; + fm_port_unbind((struct fm_port *)p_LnxWrpFmPortDev); + } + + return err; +} + +static int fm_ioctls(unsigned int minor, struct file *file, unsigned int cmd, unsigned long arg, bool compat) +{ + DBG(TRACE, ("IOCTL minor - %u, cmd - 0x%08x, arg - 0x%08lx \n", minor, cmd, arg)); + + if ((minor == DEV_FM_MINOR_BASE) || + (minor == DEV_FM_PCD_MINOR_BASE)) + { + t_LnxWrpFmDev *p_LnxWrpFmDev = ((t_LnxWrpFmDev*)file->private_data); + if (!p_LnxWrpFmDev) + return -ENODEV; + if (LnxwrpFmIOCTL(p_LnxWrpFmDev, cmd, arg, compat)) + return -EFAULT; + } + else if (((minor >= DEV_FM_OH_PORTS_MINOR_BASE) && (minor < DEV_FM_RX_PORTS_MINOR_BASE)) || + ((minor >= DEV_FM_RX_PORTS_MINOR_BASE) && (minor < DEV_FM_TX_PORTS_MINOR_BASE)) || + ((minor >= DEV_FM_TX_PORTS_MINOR_BASE) && (minor < DEV_FM_MAX_MINORS))) + { + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = ((t_LnxWrpFmPortDev*)file->private_data); + if (!p_LnxWrpFmPortDev) + return -ENODEV; + if (LnxwrpFmPortIOCTL(p_LnxWrpFmPortDev, cmd, arg, compat)) + return -EFAULT; + } + else + { + REPORT_ERROR(MINOR, E_INVALID_VALUE, ("minor")); + return -ENODEV; + } + + return 0; +} + +#ifdef CONFIG_COMPAT +static long fm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned int minor = iminor(file->f_path.dentry->d_inode); + long res; + + fm_mutex_lock(); + res = fm_ioctls(minor, file, cmd, arg, true); + fm_mutex_unlock(); + + return res; +} +#endif + +static long fm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + unsigned int minor = iminor(file->f_path.dentry->d_inode); + long res; + + fm_mutex_lock(); + res = fm_ioctls(minor, file, cmd, arg, false); + fm_mutex_unlock(); + + return res; +} + +/* Globals for FM character device */ +struct file_operations fm_fops = +{ + .owner = THIS_MODULE, + .unlocked_ioctl = fm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = fm_compat_ioctl, +#endif + .open = fm_open, + .release = fm_close, +}; --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_ioctls_fm_compat.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_ioctls_fm_compat.c @@ -0,0 +1,789 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_fm_compat_ioctls.c + + @Description FM PCD compat functions + +*/ + +#if !defined(CONFIG_COMPAT) +#error "missing COMPAT layer..." +#endif + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "part_ext.h" +#include "fm_ioctls.h" +#include "fm_pcd_ioctls.h" +#include "fm_port_ioctls.h" +#include "lnxwrp_ioctls_fm_compat.h" + +/* debug dump */ +#if 0 +static void hex_dump(void * p_addr, unsigned int size) +{ + int i; + + for(i=0; i ", p);*/ + + if(!p) + return 0; + + for(k=1; k < COMPAT_PTR2ID_ARRAY_MAX; k++) + if(compat_ptr2id_array[k] == NULL) + { + compat_ptr2id_array[k] = p; + /*printk("0x%08x\n", k | COMPAT_PTR2ID_WATERMARK);*/ + return k | COMPAT_PTR2ID_WATERMARK; + } + + return 0; +} + +compat_uptr_t compat_get_ptr2id(void *p) +{ + compat_uptr_t k; + + /*printk("\ncompat_get_ptr2id(%p) -> ", p);*/ + + for(k=1; k < COMPAT_PTR2ID_ARRAY_MAX; k++) + if(compat_ptr2id_array[k] == p) { + /*printk("0x%08x\n", k | COMPAT_PTR2ID_WATERMARK);*/ + return k | COMPAT_PTR2ID_WATERMARK; + } + + return 0; +} + +void *compat_get_id2ptr(compat_uptr_t comp) +{ + + /*printk("\ncompat_get_id2ptr(0x%08x) -> ", comp);*/ + + if((COMPAT_PTR2ID_WM_MASK & comp) != COMPAT_PTR2ID_WATERMARK) { + /*printk("Error, invalid watermark!\n\n"); + dump_stack();*/ + return compat_ptr(comp); + } + + comp &= ~COMPAT_PTR2ID_WM_MASK; + + if((0 < comp) && (comp < COMPAT_PTR2ID_ARRAY_MAX) && (compat_ptr2id_array[comp] != NULL)) { + /*printk("%p\n", compat_ptr2id_array[comp]);*/ + return compat_ptr2id_array[comp]; + } + return NULL; +} +/* } maping kernel pointers w/ UserSpace id's */ + +static inline void compat_copy_fm_pcd_plcr_next_engine( + ioc_compat_fm_pcd_plcr_next_engine_params_u *compat_param, + ioc_fm_pcd_plcr_next_engine_params_u *param, + ioc_fm_pcd_engine next_engine, + uint8_t compat) +{ + switch (next_engine) + { + case e_IOC_FM_PCD_PLCR: + if (compat) + param->p_profile = compat_ptr(compat_param->p_profile); + else + compat_param->p_profile = ptr_to_compat(param->p_profile); + break; + + case e_IOC_FM_PCD_KG: + if (compat) + param->p_direct_scheme = compat_ptr(compat_param->p_direct_scheme); + else + compat_param->p_direct_scheme = ptr_to_compat(param->p_direct_scheme); + break; + + default: + if (compat) + param->action = compat_param->action; + else + compat_param->action = param->action; + } +} + +void compat_copy_fm_pcd_plcr_profile( + ioc_compat_fm_pcd_plcr_profile_params_t *compat_param, + ioc_fm_pcd_plcr_profile_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->modify = compat_param->modify; + + /* profile_select */ + if (!compat_param->modify) + { + param->profile_select.new_params.profile_type = + compat_param->profile_select.new_params.profile_type; + param->profile_select.new_params.p_port = + compat_ptr(compat_param->profile_select.new_params.p_port); + param->profile_select.new_params.relative_profile_id = + compat_param->profile_select.new_params.relative_profile_id; + } + else + param->profile_select.p_profile = + compat_ptr(compat_param->profile_select.p_profile); + + param->alg_selection = compat_param->alg_selection; + param->color_mode = compat_param->color_mode; + + /* both parameters in the union has the same size, so memcpy works */ + memcpy(¶m->color, &compat_param->color, sizeof(param->color)); + + memcpy(¶m->non_passthrough_alg_param, &compat_param->non_passthrough_alg_param, sizeof(ioc_fm_pcd_plcr_non_passthrough_alg_param_t)); + + param->next_engine_on_green = compat_param->next_engine_on_green; + param->next_engine_on_yellow = compat_param->next_engine_on_yellow; + param->next_engine_on_red = compat_param->next_engine_on_red; + + param->trap_profile_on_flow_A = compat_param->trap_profile_on_flow_A; + param->trap_profile_on_flow_B = compat_param->trap_profile_on_flow_B; + param->trap_profile_on_flow_C = compat_param->trap_profile_on_flow_C; + + param->id = compat_ptr(compat_param->id); + } + else + { + compat_param->modify = param->modify; + + /* profile_select */ + if(!param->modify){ + compat_param->profile_select.new_params.profile_type = + param->profile_select.new_params.profile_type; + compat_param->profile_select.new_params.p_port = + ptr_to_compat(param->profile_select.new_params.p_port); + compat_param->profile_select.new_params.relative_profile_id = + param->profile_select.new_params.relative_profile_id; + } + else + compat_param->profile_select.p_profile = + ptr_to_compat(param->profile_select.p_profile); + + compat_param->alg_selection = param->alg_selection; + compat_param->color_mode = param->color_mode; + + /* both parameters in the union has the same size, so memcpy works */ + memcpy(&compat_param->color, ¶m->color, sizeof(compat_param->color)); + + memcpy(&compat_param->non_passthrough_alg_param, ¶m->non_passthrough_alg_param, sizeof(ioc_fm_pcd_plcr_non_passthrough_alg_param_t)); + + compat_param->next_engine_on_green = param->next_engine_on_green; + compat_param->next_engine_on_yellow = param->next_engine_on_yellow; + compat_param->next_engine_on_red = param->next_engine_on_red; + + compat_param->trap_profile_on_flow_A = param->trap_profile_on_flow_A; + compat_param->trap_profile_on_flow_B = param->trap_profile_on_flow_B; + compat_param->trap_profile_on_flow_C = param->trap_profile_on_flow_C; + + compat_param->id = ptr_to_compat(param->id); + } + + compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_green, + ¶m->params_on_green, param->next_engine_on_green, compat); + + compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_yellow, + ¶m->params_on_yellow, param->next_engine_on_yellow, compat); + + compat_copy_fm_pcd_plcr_next_engine(&compat_param->params_on_red, + ¶m->params_on_red, param->next_engine_on_red, compat); +} + +static inline void compat_copy_fm_pcd_cc_next_kg( + ioc_compat_fm_pcd_cc_next_kg_params_t *compat_param, + ioc_fm_pcd_cc_next_kg_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->new_fqid = compat_param->new_fqid; + param->override_fqid = compat_param->override_fqid; + param->p_direct_scheme = compat_ptr(compat_param->p_direct_scheme); + } + else + { + compat_param->new_fqid = param->new_fqid; + compat_param->override_fqid = param->override_fqid; + compat_param->p_direct_scheme = ptr_to_compat(param->p_direct_scheme); + } +} + +static inline void compat_copy_fm_pcd_cc_next_cc( + ioc_compat_fm_pcd_cc_next_cc_params_t *compat_param, + ioc_fm_pcd_cc_next_cc_params_t *param, + uint8_t compat) +{ + if (compat) + param->cc_node_id = compat_get_id2ptr(compat_param->cc_node_id); + else + compat_param->cc_node_id = compat_get_ptr2id(param->cc_node_id); +} + +static inline void compat_copy_fm_pcd_cc_next_engine( + ioc_compat_fm_pcd_cc_next_engine_params_t *compat_param, + ioc_fm_pcd_cc_next_engine_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->next_engine = compat_param->next_engine; + + switch (compat_param->next_engine) + { + case e_IOC_FM_PCD_KG: + compat_copy_fm_pcd_cc_next_kg(&compat_param->params.kg_params, ¶m->params.kg_params, compat); + break; + case e_IOC_FM_PCD_CC: + compat_copy_fm_pcd_cc_next_cc(&compat_param->params.cc_params, ¶m->params.cc_params, compat); + break; + default: + memcpy(¶m->params, &compat_param->params, sizeof(param->params)); + break; + } +#ifdef FM_PCD_CC_MANIP + param->p_manip = compat_ptr(compat_param->p_manip); +#endif + } + else + { + compat_param->next_engine = param->next_engine; + + switch (compat_param->next_engine) + { + case e_IOC_FM_PCD_KG: + compat_copy_fm_pcd_cc_next_kg(&compat_param->params.kg_params, ¶m->params.kg_params, compat); + break; + case e_IOC_FM_PCD_CC: + compat_copy_fm_pcd_cc_next_cc(&compat_param->params.cc_params, ¶m->params.cc_params, compat); + break; + default: + memcpy(&compat_param->params, ¶m->params, sizeof(compat_param->params)); + break; + } + +#ifdef FM_PCD_CC_MANIP + compat_param->p_manip = ptr_to_compat(param->p_manip); +#endif + } +} + +void compat_copy_fm_pcd_cc_key( + ioc_compat_fm_pcd_cc_key_params_t *compat_param, + ioc_fm_pcd_cc_key_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->p_key = compat_ptr(compat_param->p_key); + param->p_mask = compat_ptr(compat_param->p_mask); + } + else + { + compat_param->p_key = ptr_to_compat(param->p_key); + compat_param->p_mask = ptr_to_compat(param->p_mask); + } + + compat_copy_fm_pcd_cc_next_engine( + &compat_param->cc_next_engine_params, + ¶m->cc_next_engine_params, + compat); +} + +void compat_copy_fm_pcd_cc_node_modify_key_and_next_engine( + ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param, + ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->id = compat_ptr(compat_param->id); + param->key_indx = compat_param->key_indx; + param->key_size = compat_param->key_indx; + compat_copy_fm_pcd_cc_key( + &compat_param->key_params, + ¶m->key_params, + compat); + } + else + { + compat_param->id = ptr_to_compat(param->id); + compat_param->key_indx = param->key_indx; + compat_param->key_size = param->key_indx; + compat_copy_fm_pcd_cc_key( + &compat_param->key_params, + ¶m->key_params, + compat); + } +} + +void compat_copy_fm_pcd_cc_node_modify_next_engine( + ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param, + ioc_fm_pcd_cc_node_modify_next_engine_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->id = compat_ptr(compat_param->id); + param->key_indx = compat_param->key_indx; + param->key_size = compat_param->key_size; + } + else + { + compat_param->id = ptr_to_compat(param->id); + compat_param->key_indx = param->key_indx; + compat_param->key_size = param->key_size; + } + + compat_copy_fm_pcd_cc_next_engine( + &compat_param->cc_next_engine_params, + ¶m->cc_next_engine_params, + compat); +} + +void compat_fm_pcd_cc_tree_modify_next_engine( + ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param, + ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->id = compat_ptr(compat_param->id); + param->grp_indx = compat_param->grp_indx; + param->indx = compat_param->indx; + } + else + { + compat_param->id = ptr_to_compat(param->id); + compat_param->grp_indx = param->grp_indx; + compat_param->indx = param->indx; + } + + compat_copy_fm_pcd_cc_next_engine( + &compat_param->cc_next_engine_params, + ¶m->cc_next_engine_params, + compat); +} + +void compat_copy_fm_pcd_cc_grp( + ioc_compat_fm_pcd_cc_grp_params_t *compat_param, + ioc_fm_pcd_cc_grp_params_t *param, + uint8_t compat) +{ + int k; + + if (compat) + { + param->num_of_distinction_units = compat_param->num_of_distinction_units; + memcpy(param->unit_ids, compat_param->unit_ids, IOC_FM_PCD_MAX_NUM_OF_CC_UNITS); + } + else + { + compat_param->num_of_distinction_units = param->num_of_distinction_units; + memcpy(compat_param->unit_ids, param->unit_ids, IOC_FM_PCD_MAX_NUM_OF_CC_UNITS); + } + + for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP; k++) + compat_copy_fm_pcd_cc_next_engine( + &compat_param->next_engine_per_entries_in_grp[k], + ¶m->next_engine_per_entries_in_grp[k], + compat); +} + +void compat_copy_fm_pcd_cc_tree( + ioc_compat_fm_pcd_cc_tree_params_t *compat_param, + ioc_fm_pcd_cc_tree_params_t *param, + uint8_t compat) +{ + int k; + + if (compat) + { + param->net_env_id = compat_ptr(compat_param->net_env_id); + param->num_of_groups = compat_param->num_of_groups; + + for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS; k++) + compat_copy_fm_pcd_cc_grp( + &compat_param->fm_pcd_cc_group_params[k], + ¶m->fm_pcd_cc_group_params[k], + compat); + param->id = compat_ptr(compat_param->id); + } + else + { + compat_param->net_env_id = ptr_to_compat(param->net_env_id); + compat_param->num_of_groups = param->num_of_groups; + for (k=0; k < IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS; k++) + compat_copy_fm_pcd_cc_grp( + &compat_param->fm_pcd_cc_group_params[k], + ¶m->fm_pcd_cc_group_params[k], + compat); + compat_param->id = ptr_to_compat(param->id); + } +} + +void compat_fm_pcd_prs_sw( + ioc_compat_fm_pcd_prs_sw_params_t *compat_param, + ioc_fm_pcd_prs_sw_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->override = compat_param->override; + param->size = compat_param->size; + param->base = compat_param->base; + param->p_code = compat_ptr(compat_param->p_code); + memcpy(param->sw_prs_data_params,compat_param->sw_prs_data_params,IOC_FM_PCD_PRS_NUM_OF_HDRS*sizeof(uint32_t)); + param->num_of_labels = compat_param->num_of_labels; + memcpy(param->labels_table,compat_param->labels_table,IOC_FM_PCD_PRS_NUM_OF_LABELS*sizeof(ioc_fm_pcd_prs_label_params_t)); + return; + } + + WARN(1, "\n\nFMD: fatal error, feature not implemented!\n\n"); +} + +void compat_copy_fm_pcd_kg_scheme( + ioc_compat_fm_pcd_kg_scheme_params_t *compat_param, + ioc_fm_pcd_kg_scheme_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->modify = compat_param->modify; + + /* scm_id */ + if(compat_param->modify) + param->scm_id.scheme_id = compat_ptr(compat_param->scm_id.scheme_id); + else + param->scm_id.relative_scheme_id = compat_param->scm_id.relative_scheme_id; + + param->always_direct = compat_param->always_direct; + /* netEnvParams */ + param->netEnvParams.net_env_id = compat_ptr(compat_param->netEnvParams.net_env_id); + param->netEnvParams.num_of_distinction_units = compat_param->netEnvParams.num_of_distinction_units; + memcpy(param->netEnvParams.unit_ids, + compat_param->netEnvParams.unit_ids, + IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + + param->use_hash = compat_param->use_hash; + memcpy(¶m->key_extract_and_hash_params, + &compat_param->key_extract_and_hash_params, + sizeof(ioc_fm_pcd_kg_key_extract_and_hash_params_t)); + param->bypass_fqid_generation = compat_param->bypass_fqid_generation; + param->base_fqid = compat_param->base_fqid; + param->numOfUsedExtractedOrs = compat_param->numOfUsedExtractedOrs; + memcpy(param->extracted_ors, compat_param->extracted_ors, IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS * sizeof(ioc_fm_pcd_kg_extracted_or_params_t)); + param->next_engine = compat_param->next_engine; + + /* kg_next_engine_params */ + if(param->next_engine == e_IOC_FM_PCD_CC){ + param->kg_next_engine_params.cc.tree_id = compat_get_id2ptr(compat_param->kg_next_engine_params.cc.tree_id); + param->kg_next_engine_params.cc.grp_id = compat_param->kg_next_engine_params.cc.grp_id; + param->kg_next_engine_params.cc.plcr_next = compat_param->kg_next_engine_params.cc.plcr_next; + param->kg_next_engine_params.cc.bypass_plcr_profile_generation = compat_param->kg_next_engine_params.cc.bypass_plcr_profile_generation; + memcpy(¶m->kg_next_engine_params.cc.plcr_profile, + &compat_param->kg_next_engine_params.cc.plcr_profile, + sizeof(ioc_fm_pcd_kg_plcr_profile_t)); + } + else + memcpy(¶m->kg_next_engine_params, + &compat_param->kg_next_engine_params, + sizeof(param->kg_next_engine_params)); + + memcpy(¶m->scheme_counter, &compat_param->scheme_counter, sizeof(ioc_fm_pcd_kg_scheme_counter_t)); + param->id = compat_ptr(compat_param->id); + } + else + { + compat_param->modify = param->modify; + + /* scm_id */ + if(param->modify) + compat_param->scm_id.scheme_id = ptr_to_compat(param->scm_id.scheme_id); + else + compat_param->scm_id.relative_scheme_id = param->scm_id.relative_scheme_id; + + compat_param->always_direct = param->always_direct; + + /* netEnvParams */ + compat_param->netEnvParams.net_env_id = ptr_to_compat(param->netEnvParams.net_env_id); + compat_param->netEnvParams.num_of_distinction_units = param->netEnvParams.num_of_distinction_units; + memcpy(compat_param->netEnvParams.unit_ids, param->netEnvParams.unit_ids, IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + + compat_param->use_hash = param->use_hash; + memcpy(&compat_param->key_extract_and_hash_params, ¶m->key_extract_and_hash_params, sizeof(ioc_fm_pcd_kg_key_extract_and_hash_params_t)); + compat_param->bypass_fqid_generation = param->bypass_fqid_generation; + compat_param->base_fqid = param->base_fqid; + compat_param->numOfUsedExtractedOrs = param->numOfUsedExtractedOrs; + memcpy(compat_param->extracted_ors, param->extracted_ors, IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS * sizeof(ioc_fm_pcd_kg_extracted_or_params_t)); + compat_param->next_engine = param->next_engine; + + /* kg_next_engine_params */ + if(compat_param->next_engine == e_IOC_FM_PCD_CC){ + compat_param->kg_next_engine_params.cc.tree_id = compat_get_ptr2id(param->kg_next_engine_params.cc.tree_id); + compat_param->kg_next_engine_params.cc.grp_id = param->kg_next_engine_params.cc.grp_id; + compat_param->kg_next_engine_params.cc.plcr_next = param->kg_next_engine_params.cc.plcr_next; + compat_param->kg_next_engine_params.cc.bypass_plcr_profile_generation = param->kg_next_engine_params.cc.bypass_plcr_profile_generation; + memcpy(&compat_param->kg_next_engine_params.cc.plcr_profile, ¶m->kg_next_engine_params.cc.plcr_profile, sizeof(ioc_fm_pcd_kg_plcr_profile_t)); + } + else + memcpy(¶m->kg_next_engine_params, &compat_param->kg_next_engine_params, sizeof(compat_param->kg_next_engine_params)); + + memcpy(&compat_param->scheme_counter, ¶m->scheme_counter, sizeof(ioc_fm_pcd_kg_scheme_counter_t)); + compat_param->id = ptr_to_compat(param->id); + } +} + +void compat_copy_fm_pcd_kg_scheme_select( + ioc_compat_fm_pcd_kg_scheme_select_t *compat_param, + ioc_fm_pcd_kg_scheme_select_t *param, + uint8_t compat) +{ + if (compat){ + param->direct = compat_param->direct; + param->scheme_id = compat_ptr(compat_param->direct); + } + else { + printk(" %s:%u feature not implemented... \n", __func__, __LINE__); + } +} + +void compat_copy_fm_pcd_kg_schemes_params( + ioc_compat_fm_pcd_port_schemes_params_t *compat_param, + ioc_fm_pcd_port_schemes_params_t *param, + uint8_t compat) +{ + int k; + + if (compat) { + param->num_of_schemes = compat_param->num_of_schemes; + for(k=0; k < IOC_FM_PCD_KG_NUM_OF_SCHEMES; k++) + param->scheme_ids[k] = compat_ptr(compat_param->schemes_ids[k]); + } + else { + printk(" %s:%u feature not implemented... \n", __func__, __LINE__); + } +} + +void compat_copy_fm_port_pcd_kg( + ioc_compat_fm_port_pcd_kg_params_t *compat_param, + ioc_fm_port_pcd_kg_params_t *param, + uint8_t compat) +{ + if (compat){ + uint8_t k; + param->num_of_schemes = compat_param->num_of_schemes; + for(k=0;kschemes_ids[k] = compat_ptr(compat_param->schemes_ids[k]); + param->direct_scheme = compat_param->direct_scheme; + param->direct_scheme_id = compat_ptr(compat_param->direct_scheme_id); + } + else { + printk(" %s:%u feature not implemented... \n", __func__, __LINE__);; + } +} + +void compat_copy_fm_port_pcd( + ioc_compat_fm_port_pcd_params_t *compat_param, + ioc_fm_port_pcd_params_t *param, + uint8_t compat) +{ + if (compat){ + param->pcd_support = compat_param->pcd_support; + param->net_env_id = compat_ptr(compat_param->net_env_id); + param->p_prs_params = compat_ptr(compat_param->p_prs_params); /* same structure */ + param->p_cc_params = compat_ptr(compat_param->p_cc_params); + param->p_kg_params = compat_ptr(compat_param->p_kg_params); + param->p_plcr_params = compat_ptr(compat_param->p_plcr_params); + } + else { + compat_param->pcd_support = param->pcd_support; + compat_param->net_env_id = ptr_to_compat(param->net_env_id); + compat_param->p_prs_params = ptr_to_compat(param->p_prs_params); /* same structure */ + compat_param->p_cc_params = ptr_to_compat(param->p_cc_params); + compat_param->p_kg_params = ptr_to_compat(param->p_kg_params); + compat_param->p_plcr_params = ptr_to_compat(param->p_plcr_params); + } +} + +void compat_copy_fm_pcd_net_env( + ioc_compat_fm_pcd_net_env_params_t *compat_param, + ioc_fm_pcd_net_env_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->num_of_distinction_units = compat_param->num_of_distinction_units; + memcpy(param->units, compat_param->units, sizeof(ioc_fm_pcd_distinction_unit_t)*IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + param->id = compat_ptr(compat_param->id); + } + else + { + compat_param->num_of_distinction_units = param->num_of_distinction_units; + memcpy(compat_param->units, param->units, sizeof(ioc_fm_pcd_distinction_unit_t)*IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS); + compat_param->id = ptr_to_compat(param->id); + } +} + +void compat_copy_fm_pcd_cc_node_modify_key( + ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param, + ioc_fm_pcd_cc_node_modify_key_params_t *param, + uint8_t compat) +{ + if (compat) + { + param->id = compat_ptr(compat_param->id); + param->key_indx = compat_param->key_indx; + param->key_size = compat_param->key_size; + param->p_key = (uint8_t *)compat_ptr(compat_param->p_key); + param->p_mask = (uint8_t *)compat_ptr(compat_param->p_mask); + } + else + { + compat_param->id = ptr_to_compat(param->id); + compat_param->key_indx = param->key_indx; + compat_param->key_size = param->key_size; + compat_param->p_key = ptr_to_compat((void *)param->p_key); + compat_param->p_mask = ptr_to_compat((void *)param->p_mask); + } +} + +void compat_copy_keys( + ioc_compat_keys_params_t *compat_param, + ioc_keys_params_t *param, + uint8_t compat) +{ + int k = 0; + if (compat){ + param->num_of_keys = compat_param->num_of_keys; + param->key_size = compat_param->key_size; + + for(k=0;kkey_params[k], + ¶m->key_params[k], + compat); + + } + else { + + compat_param->num_of_keys = param->num_of_keys; + compat_param->key_size = param->key_size; + + for(k=0;kkey_params[k], + ¶m->key_params[k], + compat); + } + + compat_copy_fm_pcd_cc_next_engine( + &compat_param->cc_next_engine_params_for_miss, + ¶m->cc_next_engine_params_for_miss, + compat); + +} + +void compat_copy_fm_pcd_cc_node( + ioc_compat_fm_pcd_cc_node_params_t *compat_param, + ioc_fm_pcd_cc_node_params_t *param, + uint8_t compat) +{ + if (compat) + { + /* no pointer inside, so sizeof US and KS are the same - memcpy is ok */ + memcpy(¶m->extract_cc_params, &compat_param->extract_cc_params, sizeof(ioc_fm_pcd_extract_entry_t)); + + compat_copy_keys(&compat_param->keys_params, ¶m->keys_params, compat); + + param->id = compat_get_id2ptr(compat_param->id); + } + else + { + /* no pointer inside, so sizeof US and KS are the same - memcpy is ok */ + memcpy(&compat_param->extract_cc_params, ¶m->extract_cc_params, sizeof(ioc_fm_pcd_extract_entry_t)); + + compat_copy_keys(&compat_param->keys_params, ¶m->keys_params, compat); + + compat_param->id = ptr_to_compat(param->id); + } +} + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_ioctls_fm_compat.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_ioctls_fm_compat.h @@ -0,0 +1,379 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_ioctls_fm_compat.h + + @Description FM PCD compat structures definition. + +*/ + +#ifndef __FM_COMPAT_IOCTLS_H +#define __FM_COMPAT_IOCTLS_H + +#include + +#define COMPAT_K_TO_US 0 /* copy from Kernel to User */ +#define COMPAT_US_TO_K 1 /* copy from User to Kernel */ + +#define COMPAT_COPY_K2US(dest, src, type) compat_copy_##type(src, dest, 0) +#define COMPAT_COPY_US2K(dest, src, type) compat_copy_##type(dest, src, 1) + +/* maping kernel pointers w/ UserSpace id's { */ +/* Because compat_ptr(ptr_to_compat(X)) != X, this way we cannot exchange pointers + back and forth (US - KS). compat_ptr is a cast and pointers are broken. */ +#define COMPAT_PTR2ID_ARRAY_MAX (256+1) /* first location is not used */ +#define COMPAT_PTR2ID_WATERMARK 0xface0000 +#define COMPAT_PTR2ID_WM_MASK 0xffff0000 + +void compat_del_ptr2id(void *p); +compat_uptr_t compat_add_ptr2id(void *p); +compat_uptr_t compat_get_ptr2id(void *p); +void *compat_get_id2ptr(compat_uptr_t comp); +/* } maping kernel pointers w/ UserSpace id's */ + +/* pcd compat structures { */ +typedef struct ioc_compat_fm_pcd_cc_node_remove_key_params_t { + compat_uptr_t id; + uint8_t key_indx; +} ioc_compat_fm_pcd_cc_node_remove_key_params_t; + +typedef union ioc_compat_fm_pcd_plcr_next_engine_params_u { + ioc_fm_pcd_done_action action; + compat_uptr_t p_profile; + compat_uptr_t p_direct_scheme; +} ioc_compat_fm_pcd_plcr_next_engine_params_u; + +typedef struct ioc_compat_fm_pcd_plcr_profile_params_t { + bool modify; + union { + struct { + ioc_fm_pcd_profile_type_selection profile_type; + compat_uptr_t p_port; + uint16_t relative_profile_id; + } new_params; + compat_uptr_t p_profile; + } profile_select; + ioc_fm_pcd_plcr_algorithm_selection alg_selection; + ioc_fm_pcd_plcr_color_mode color_mode; + + union { + ioc_fm_pcd_plcr_color dflt_color; + ioc_fm_pcd_plcr_color override; + } color; + + ioc_fm_pcd_plcr_non_passthrough_alg_param_t non_passthrough_alg_param; + + ioc_fm_pcd_engine next_engine_on_green; + ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_green; + + ioc_fm_pcd_engine next_engine_on_yellow; + ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_yellow; + + ioc_fm_pcd_engine next_engine_on_red; + ioc_compat_fm_pcd_plcr_next_engine_params_u params_on_red; + + bool trap_profile_on_flow_A; + bool trap_profile_on_flow_B; + bool trap_profile_on_flow_C; + compat_uptr_t id; +} ioc_compat_fm_pcd_plcr_profile_params_t; + +typedef struct ioc_compat_fm_obj_t { + compat_uptr_t obj; +} ioc_compat_fm_obj_t; + +typedef struct ioc_compat_fm_pcd_kg_scheme_select_t { + bool direct; + compat_uptr_t scheme_id; +} ioc_compat_fm_pcd_kg_scheme_select_t; + +typedef struct ioc_compat_fm_pcd_port_schemes_params_t { + uint8_t num_of_schemes; + compat_uptr_t schemes_ids [IOC_FM_PCD_KG_NUM_OF_SCHEMES]; +} ioc_compat_fm_pcd_port_schemes_params_t; + +typedef struct ioc_compat_fm_pcd_net_env_params_t { + uint8_t num_of_distinction_units; + ioc_fm_pcd_distinction_unit_t units[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; /* same structure*/ + compat_uptr_t id; +} ioc_compat_fm_pcd_net_env_params_t; + +typedef struct ioc_compat_fm_pcd_prs_sw_params_t { + bool override; + uint32_t size; + uint16_t base; + compat_uptr_t p_code; + uint32_t sw_prs_data_params[IOC_FM_PCD_PRS_NUM_OF_HDRS]; + uint8_t num_of_labels; + ioc_fm_pcd_prs_label_params_t labels_table[IOC_FM_PCD_PRS_NUM_OF_LABELS]; +} ioc_compat_fm_pcd_prs_sw_params_t; + +typedef struct ioc_compat_fm_pcd_cc_next_kg_params_t { + bool override_fqid; + uint32_t new_fqid; + compat_uptr_t p_direct_scheme; +} ioc_compat_fm_pcd_cc_next_kg_params_t; + +typedef struct ioc_compat_fm_pcd_cc_next_cc_params_t { + compat_uptr_t cc_node_id; +} ioc_compat_fm_pcd_cc_next_cc_params_t; + +typedef struct ioc_compat_fm_pcd_cc_next_engine_params_t { + ioc_fm_pcd_engine next_engine; + union { + ioc_compat_fm_pcd_cc_next_cc_params_t cc_params; /**< compat structure*/ + ioc_fm_pcd_cc_next_plcr_params_t plcr_params; /**< same structure*/ + ioc_fm_pcd_cc_next_enqueue_params_t enqueue_params; /**< same structure*/ + ioc_compat_fm_pcd_cc_next_kg_params_t kg_params; /**< compat structure*/ + } params; +#ifdef FM_PCD_CC_MANIP + compat_uptr_t p_manip; +#endif +} ioc_compat_fm_pcd_cc_next_engine_params_t; + + +typedef struct ioc_compat_fm_pcd_cc_grp_params_t { + uint8_t num_of_distinction_units; /**< up to 4 */ + uint8_t unit_ids [IOC_FM_PCD_MAX_NUM_OF_CC_UNITS]; + /**< Indexes of the units as defined in + FM_PCD_SetNetEnvCharacteristics */ + ioc_compat_fm_pcd_cc_next_engine_params_t next_engine_per_entries_in_grp[IOC_FM_PCD_MAX_NUM_OF_CC_ENTRIES_IN_GRP]; + /**< Max size is 16 - if only one group used */ +} ioc_compat_fm_pcd_cc_grp_params_t; + +typedef struct ioc_compat_fm_pcd_cc_tree_params_t { + compat_uptr_t net_env_id; + uint8_t num_of_groups; + ioc_compat_fm_pcd_cc_grp_params_t fm_pcd_cc_group_params [IOC_FM_PCD_MAX_NUM_OF_CC_GROUPS]; + compat_uptr_t id; +} ioc_compat_fm_pcd_cc_tree_params_t; + +typedef struct ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t { + compat_uptr_t id; + uint8_t grp_indx; + uint8_t indx; + ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params; +} ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t; + +typedef struct ioc_compat_fm_pcd_cc_key_params_t { + compat_uptr_t p_key; + compat_uptr_t p_mask; + ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params; /**< compat structure*/ +} ioc_compat_fm_pcd_cc_key_params_t; + +typedef struct ioc_compat_keys_params_t { + uint8_t num_of_keys; + uint8_t key_size; + ioc_compat_fm_pcd_cc_key_params_t key_params[IOC_FM_PCD_MAX_NUM_OF_KEYS]; /**< compat structure*/ + ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params_for_miss; /**< compat structure*/ +} ioc_compat_keys_params_t; + +typedef struct ioc_compat_fm_pcd_cc_node_params_t { + ioc_fm_pcd_extract_entry_t extract_cc_params; /**< same structure*/ + ioc_compat_keys_params_t keys_params; /**< compat structure*/ + compat_uptr_t id; +} ioc_compat_fm_pcd_cc_node_params_t; + +typedef struct ioc_compat_fm_pcd_cc_node_modify_key_params_t { + compat_uptr_t id; + uint8_t key_indx; + uint8_t key_size; + compat_uptr_t p_key; + compat_uptr_t p_mask; +} ioc_compat_fm_pcd_cc_node_modify_key_params_t; + +typedef struct ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t { + compat_uptr_t id; + uint8_t key_indx; + uint8_t key_size; + ioc_compat_fm_pcd_cc_key_params_t key_params; +} ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t; + +typedef struct ioc_compat_fm_port_pcd_plcr_params_t { + compat_uptr_t plcr_profile_id; +} ioc_compat_fm_port_pcd_plcr_params_t; + +typedef struct ioc_compat_fm_port_pcd_cc_params_t { + compat_uptr_t cc_tree_id; +} ioc_compat_fm_port_pcd_cc_params_t; + +typedef struct ioc_compat_fm_port_pcd_kg_params_t { + uint8_t num_of_schemes; + compat_uptr_t schemes_ids[IOC_FM_PCD_KG_NUM_OF_SCHEMES]; + bool direct_scheme; + compat_uptr_t direct_scheme_id; +} ioc_compat_fm_port_pcd_kg_params_t; + +typedef struct ioc_compat_fm_port_pcd_params_t { + ioc_fm_port_pcd_support pcd_support; + compat_uptr_t net_env_id; + compat_uptr_t p_prs_params; + compat_uptr_t p_cc_params; + compat_uptr_t p_kg_params; + compat_uptr_t p_plcr_params; +} ioc_compat_fm_port_pcd_params_t; + +typedef struct ioc_compat_fm_pcd_kg_cc_t { + compat_uptr_t tree_id; + uint8_t grp_id; + bool plcr_next; + bool bypass_plcr_profile_generation; + ioc_fm_pcd_kg_plcr_profile_t plcr_profile; +} ioc_compat_fm_pcd_kg_cc_t; + +typedef struct ioc_compat_fm_pcd_kg_scheme_params_t { + bool modify; + union + { + uint8_t relative_scheme_id; + compat_uptr_t scheme_id; + } scm_id; + bool always_direct; + struct + { + compat_uptr_t net_env_id; + uint8_t num_of_distinction_units; + uint8_t unit_ids[IOC_FM_PCD_MAX_NUM_OF_DISTINCTION_UNITS]; + } netEnvParams; + bool use_hash; + ioc_fm_pcd_kg_key_extract_and_hash_params_t key_extract_and_hash_params; + bool bypass_fqid_generation; + uint32_t base_fqid; + uint8_t numOfUsedExtractedOrs; + ioc_fm_pcd_kg_extracted_or_params_t extracted_ors[IOC_FM_PCD_KG_NUM_OF_GENERIC_REGS]; + ioc_fm_pcd_engine next_engine; + union{ + ioc_fm_pcd_done_action done_action; + ioc_fm_pcd_kg_plcr_profile_t plcr_profile; + ioc_compat_fm_pcd_kg_cc_t cc; + } kg_next_engine_params; + ioc_fm_pcd_kg_scheme_counter_t scheme_counter; + compat_uptr_t id; +} ioc_compat_fm_pcd_kg_scheme_params_t; + +typedef struct ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t { + compat_uptr_t id; + uint8_t key_indx; + uint8_t key_size; + ioc_compat_fm_pcd_cc_next_engine_params_t cc_next_engine_params; +} ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t; +/* } pcd compat structures */ + +/* pcd compat functions { */ +void compat_copy_fm_pcd_plcr_profile( + ioc_compat_fm_pcd_plcr_profile_params_t *compat_param, + ioc_fm_pcd_plcr_profile_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_cc_key( + ioc_compat_fm_pcd_cc_key_params_t *compat_param, + ioc_fm_pcd_cc_key_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_cc_node_modify_key_and_next_engine( + ioc_compat_fm_pcd_cc_node_modify_key_and_next_engine_params_t *compat_param, + ioc_fm_pcd_cc_node_modify_key_and_next_engine_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_cc_node_modify_next_engine( + ioc_compat_fm_pcd_cc_node_modify_next_engine_params_t *compat_param, + ioc_fm_pcd_cc_node_modify_next_engine_params_t *param, + uint8_t compat); + +void compat_fm_pcd_cc_tree_modify_next_engine( + ioc_compat_fm_pcd_cc_tree_modify_next_engine_params_t *compat_param, + ioc_fm_pcd_cc_tree_modify_next_engine_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_cc_grp( + ioc_compat_fm_pcd_cc_grp_params_t *compat_param, + ioc_fm_pcd_cc_grp_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_cc_tree( + ioc_compat_fm_pcd_cc_tree_params_t *compat_param, + ioc_fm_pcd_cc_tree_params_t *param, + uint8_t compat); + +void compat_fm_pcd_prs_sw( + ioc_compat_fm_pcd_prs_sw_params_t *compat_param, + ioc_fm_pcd_prs_sw_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_kg_scheme( + ioc_compat_fm_pcd_kg_scheme_params_t *compat_param, + ioc_fm_pcd_kg_scheme_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_kg_scheme_select( + ioc_compat_fm_pcd_kg_scheme_select_t *compat_param, + ioc_fm_pcd_kg_scheme_select_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_kg_schemes_params( + ioc_compat_fm_pcd_port_schemes_params_t *compat_param, + ioc_fm_pcd_port_schemes_params_t *param, + uint8_t compat); + +void compat_copy_fm_port_pcd_kg( + ioc_compat_fm_port_pcd_kg_params_t *compat_param, + ioc_fm_port_pcd_kg_params_t *param, + uint8_t compat); + +void compat_copy_fm_port_pcd( + ioc_compat_fm_port_pcd_params_t *compat_param, + ioc_fm_port_pcd_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_net_env( + ioc_compat_fm_pcd_net_env_params_t *compat_param, + ioc_fm_pcd_net_env_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_cc_node_modify_key( + ioc_compat_fm_pcd_cc_node_modify_key_params_t *compat_param, + ioc_fm_pcd_cc_node_modify_key_params_t *param, + uint8_t compat); + +void compat_copy_keys( + ioc_compat_keys_params_t *compat_param, + ioc_keys_params_t *param, + uint8_t compat); + +void compat_copy_fm_pcd_cc_node( + ioc_compat_fm_pcd_cc_node_params_t *compat_param, + ioc_fm_pcd_cc_node_params_t *param, + uint8_t compat); + +/* } pcd compat functions */ +#endif --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_resources.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_resources.c @@ -0,0 +1,1172 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_resources.c + + @Description FMD wrapper resource allocation functions. + +*/ + +#include +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ +#include +#include +#include +#include + +#include "lnxwrp_resources.h" + +extern int fsl_fman_phy_maxfrm; /* MAC file */ + +static struct device_node *match_mac_to_dpaa_port(struct device_node + *enet_mac_node) +{ + struct device_node *dpaa_node = NULL; + struct device_node *dpaa_itf = NULL; + + /* find DPAA node starting from root */ + dpaa_node = of_find_compatible_node(NULL, NULL, "fsl,dpaa"); + if (dpaa_node) { + /* for all dpaa ports check which one refers this mac node. */ + for_each_child_of_node(dpaa_node, dpaa_itf) { + struct device_node *by_handle_enet_mac_node = NULL; + const phandle *phandle_prop = NULL; + int lenp = 0; + + phandle_prop = + (typeof(phandle_prop)) + of_get_property(dpaa_itf, "fsl,fman-mac", + &lenp); + if (phandle_prop == NULL) + continue; + + if (WARN_ON(lenp != sizeof(phandle))) + return NULL; + + by_handle_enet_mac_node = + of_find_node_by_phandle(*phandle_prop); + if (unlikely(by_handle_enet_mac_node == NULL)) + return NULL; + + /* check */ + if (by_handle_enet_mac_node == enet_mac_node) { + of_node_put(by_handle_enet_mac_node); + return dpaa_itf; + } + + of_node_put(by_handle_enet_mac_node); + } + of_node_put(dpaa_node); + } + + return NULL; +} + +static struct device_node *match_fman_port_to_mac(struct device_node *fm_node, + struct device_node + *fm_port_node) +{ + struct device_node *fm_node_idx = NULL; + + /* for all enet nodes (macs) check which one refers this FMan port. */ + for_each_child_of_node(fm_node, fm_node_idx) { + if (of_device_is_compatible(fm_node_idx, "fsl,fman-1g-mac") || + of_device_is_compatible(fm_node_idx, + "fsl,fman-10g-mac")) { + struct device_node *fman_port_node_rx = NULL; + struct device_node *fman_port_node_tx = NULL; + /* RX is first */ + fman_port_node_rx = of_parse_phandle(fm_node_idx, + "fsl,port-handles", 0); + if (unlikely(fman_port_node_rx == NULL)) + continue; + /* TX is second */ + fman_port_node_tx = of_parse_phandle(fm_node_idx, + "fsl,port-handles", 1); + if (unlikely(fman_port_node_tx == NULL)) { + of_node_put(fman_port_node_rx); + continue; + } + + /* check */ + if (fman_port_node_rx == fm_port_node + || fman_port_node_tx == fm_port_node) { + of_node_put(fman_port_node_rx); + of_node_put(fman_port_node_tx); + return fm_node_idx; + } + + of_node_put(fman_port_node_rx); + of_node_put(fman_port_node_tx); + } + } + + return NULL; +} + +static bool is_fman_port_active(struct device_node *fm_node, + struct device_node *fm_port_node) +{ + struct device_node *enet_mac_node = NULL; + struct device_node *itf_node = NULL; + + /* Which MAC node refers to this FMan port. */ + enet_mac_node = match_fman_port_to_mac(fm_node, fm_port_node); + + if (unlikely(enet_mac_node == NULL)) + return false; + + /* Which dpaa port node refers this MAC node. */ + itf_node = match_mac_to_dpaa_port(enet_mac_node); + of_node_put(enet_mac_node); + + if (unlikely(!itf_node)) + return false; + + /* check if itf (DPAA ports) is available. + * if available, means that the FMan port is + * also available - return true + */ + if (!of_device_is_available(itf_node)) { + of_node_put(itf_node); + return false; + } + of_node_put(itf_node); + + return true; +} + +int fm_set_active_fman_ports(struct platform_device *of_dev, + t_LnxWrpFmDev *p_LnxWrpFmDev) +{ + struct device_node *fm_node = NULL; + struct device_node *fm_port_node = NULL; + + memset(&p_LnxWrpFmDev->fm_active_ports_info, 0, + sizeof(struct fm_active_ports)); + + /* get FMan node */ + fm_node = of_dev->dev.of_node; + + /* for all ports which belong to this FMan, check if they are active. + * If active, set their parameters. */ + for_each_child_of_node(fm_node, fm_port_node) { + + /* OH FMan ports */ + if (of_device_is_compatible(fm_port_node, + "fsl,fman-port-oh")) + /* all oh ports are active */ + p_LnxWrpFmDev->fm_active_ports_info.num_oh_ports++; + + if (!is_fman_port_active(fm_node, fm_port_node)) + continue; + + /* 10g TX FMan ports */ + if (of_device_is_compatible(fm_port_node, + "fsl,fman-port-10g-tx")) + p_LnxWrpFmDev->fm_active_ports_info.num_tx10_ports++; + + /* 10g RX FMan ports */ + else if (of_device_is_compatible(fm_port_node, + "fsl,fman-port-10g-rx")) + p_LnxWrpFmDev->fm_active_ports_info.num_rx10_ports++; + + /* 1G TX FMan ports */ + else if (of_device_is_compatible(fm_port_node, + "fsl,fman-port-1g-tx")) + p_LnxWrpFmDev->fm_active_ports_info.num_tx_ports++; + + /* 1G RX FMan ports */ + else if (of_device_is_compatible(fm_port_node, + "fsl,fman-port-1g-rx")) + p_LnxWrpFmDev->fm_active_ports_info.num_rx_ports++; + } + + /* If performance is needed no oh port is probed + * except the one used for host command. */ +#if defined(CONFIG_FMAN_DISABLE_OH_TO_REUSE_RESOURCES) + if (p_LnxWrpFmDev->fm_active_ports_info.num_oh_ports) + p_LnxWrpFmDev->fm_active_ports_info.num_oh_ports = 1; + + printk(KERN_WARNING "FMAN(%u)-Performance mode - no OH support...\n", + p_LnxWrpFmDev->id); +#endif + + return 0; +} + +#ifdef FM_FIFO_ALLOCATION_OLD_ALG +/* BPOOL size is constant and equal w/ DPA_BP_SIZE */ +static uint32_t get_largest_buf_size(uint32_t max_rx_frame_size, uint32_t buf_size) +{ + uint32_t priv_data_size = 16; /* DPA_PRIV_DATA_SIZE */ + uint32_t hash_results_size = 16; /* DPA_HASH_RESULTS_SIZE */ + uint32_t parse_results_size = + sizeof(t_FmPrsResult); /* DPA_PARSE_RESULTS_SIZE */ + uint32_t bp_head = priv_data_size + hash_results_size + + parse_results_size; /* DPA_BP_HEAD */ + uint32_t bp_size = bp_head + max_rx_frame_size + + NET_IP_ALIGN; /* DPA_BP_SIZE */ + + return CEIL_DIV(bp_size, buf_size); +} +#endif + +/* Calculate the fifosize based on MURAM allocation, number of ports, dpde + value and s/g software support (! Kernel does not suport s/g). + + Algorithm summary: + - Calculate the the minimum fifosize required for every type of port + (TX,RX for 1G, 2.5G and 10G). + - Set TX the minimum fifosize required. + - Distribute the remaining buffers (after all TX were set) to RX ports + based on: + 1G RX = Remaining_buffers * 1/(1+2.5+10) + 2.5G RX = Remaining_buffers * 2.5/(1+2.5+10) + 10G RX = Remaining_buffers * 10/(1+2.5+10) + - if the RX is smaller than the minimum required, then set the minimum + required + - In the end distribuite the leftovers if there are any (due to + unprecise calculus) or if over allocation cat some buffers from all RX + ports w/o pass over minimum required treshold, but if there must be + pass the treshold in order to cat the over allocation ,then this + configuration can not be set - KERN_ALERT. +*/ +int fm_precalculate_fifosizes(t_LnxWrpFmDev *p_LnxWrpFmDev, int muram_fifo_size) +{ + + /* input parameters */ + struct fm_active_ports *fm_active_ports_info = NULL; + int num_1g_ports = 0; + int num_2g5_ports = 0; + int num_10g_ports = 0; + int num_oh_ports = 0; + + /* output parameters */ + struct fm_resource_settings *fm_resource_settings_info = NULL; + int oh_buff = 0; + int tx_1g_bufs = 0, rx_1g_bufs = 0; + int tx_2g5_bufs = 0, rx_2g5_bufs = 0; + int tx_10g_bufs = 0, rx_10g_bufs = 0; + int err = 0; + + /* throughput parameters: divide it by 10 when used */ + int gb1g = 10, gb2g5 = 25, gb10g = 100, gb_sum = 0; + + /* buffers parameters */ + int buf_size = 0x100; /* Buffer unit size */ + int total_no_buffers = 0; /* Calculus based on MURAM size for + fifos and buf. unit size */ + + int shared_ext_buff = 0; /* External buffers allocated - LLD + boundaries:DEFAULT_PORT_extraSizeOfFifo */ + + int min_tx_1g_2g5_bufs = 0; /* minimum TX1g buffers required + (see refman.) */ + int min_tx_10g_bufs = 0; /* minimum TX10g buffers required + (see refman.) */ + int min_rx_bufs = 0; /* minimum RX buffers required (see refman.) */ + + /* Buffer sizes calculus */ + int max_frame_size = + fsl_fman_phy_maxfrm ? fsl_fman_phy_maxfrm : + CONFIG_DPA_MAX_FRM_SIZE; + int remaining_bufs = 0; + int rx_1g_bufs_ceil = 0, rx_2g5_bufs_ceil = 0, rx_10g_bufs_ceil = 0; + int rx_2g5_max_bufs = 0, rx_10g_max_bufs = 0; + int rx_1g_used = 0, rx_1g_2g5_used = 0, rx_1g_10g_used =0, + rx_2g5_used = 0, rx_2g5_10g_used = 0, rx_1g_2g5_10g_used = 0; + + /* overflow checking */ + int tot_rx_buffs, tot_tx_buffs, tot_oh_buffs, tot_used_buffs, + leftovers = 0; + int overflow = 0; + bool loop = false; + + /* check input parameters correctness */ + ASSERT_COND(p_LnxWrpFmDev != NULL); + fm_active_ports_info = &p_LnxWrpFmDev->fm_active_ports_info; + fm_resource_settings_info = &p_LnxWrpFmDev->fm_resource_settings_info; + ASSERT_COND(fm_active_ports_info != NULL); + ASSERT_COND(fm_resource_settings_info != NULL); + ASSERT_COND(fm_active_ports_info->num_tx_ports == + fm_active_ports_info->num_rx_ports); + ASSERT_COND(fm_active_ports_info->num_tx25_ports == + fm_active_ports_info->num_tx25_ports); + ASSERT_COND(fm_active_ports_info->num_tx10_ports == + fm_active_ports_info->num_tx10_ports); + ASSERT_COND(max_frame_size != 0); + ASSERT_COND(muram_fifo_size != 0); + + /* set input parameters */ + num_1g_ports = fm_active_ports_info->num_tx_ports; + num_2g5_ports = fm_active_ports_info->num_tx25_ports; + num_10g_ports = fm_active_ports_info->num_tx10_ports; + num_oh_ports = fm_active_ports_info->num_oh_ports; + + /* throughput calculus */ + gb_sum = gb1g * num_1g_ports + gb2g5 * num_2g5_ports + + gb10g * num_10g_ports; /* divide it by 10 */ + + /* Base buffer calculus */ + oh_buff = DPDE_1G + 4; /* should be: + get_largest_buf_size(max_frame_size, buf_size), + but LLD: DPDE + 4 */ + total_no_buffers = muram_fifo_size / buf_size; + + min_tx_1g_2g5_bufs = CEIL_DIV(max_frame_size, buf_size) + + DPDE_1G + 3 + 1; /* +1 to handle Jumbo Frames */ + min_tx_10g_bufs = CEIL_DIV(max_frame_size, buf_size) + + DPDE_10G + 3 + 1; /* +1 to handle Jumbo Frames */ + + { +#ifdef FM_FIFO_ALLOCATION_OLD_ALG + uint8_t fm_rev_major = 0; + fm_rev_major = + (uint8_t) ((* + ((volatile uint32_t *) + UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr + + 0x000c30c4)) & 0xff00) >> 8); + + if (fm_rev_major < 4) + min_rx_bufs = + get_largest_buf_size(max_frame_size, + buf_size) + 7; + else +#endif + min_rx_bufs = 8; + } + + shared_ext_buff = num_10g_ports ? 32 : 16; /* LLD boundaries: + DEFAULT_PORT_extraSizeOfFifo */ + + /* TX ports will have minimum required buffers + Calculus of the remaining buffers for all RX ports */ + tx_1g_bufs = num_1g_ports ? min_tx_1g_2g5_bufs : 0; + tx_2g5_bufs = num_2g5_ports ? min_tx_1g_2g5_bufs : 0; + tx_10g_bufs = num_10g_ports ? min_tx_10g_bufs : 0; + + remaining_bufs = total_no_buffers - + oh_buff * num_oh_ports - + num_1g_ports * min_tx_1g_2g5_bufs - + num_2g5_ports * min_tx_1g_2g5_bufs - + num_10g_ports * min_tx_10g_bufs - shared_ext_buff; + + if (remaining_bufs < 0) { + printk(KERN_ALERT + "This configuration will not work due to low number of" + " buffers (%u buffers)...\n", + total_no_buffers); + err = -1; + goto precalculated_fifosize_out; + } + + /* Per port buffer size calculus + . for TX ports give always minimum required + . for RX ports give whatever left scaled per port type */ + /* ------------------------------------------------------- */ + if (num_1g_ports) { + rx_1g_bufs_ceil = + (gb_sum / + 10) ? CEIL_DIV(((remaining_bufs * gb1g) / 10), + (gb_sum / 10)) : 0; + rx_1g_bufs = MAX(min_rx_bufs, rx_1g_bufs_ceil); + rx_1g_used = rx_1g_bufs - rx_1g_bufs_ceil; /* always >= 0 */ + /* distribute to 2.5g and 10g ports */ + rx_1g_2g5_used = + (num_2g5_ports + + num_10g_ports) ? CEIL_DIV(rx_1g_used * num_1g_ports * + num_2g5_ports, + num_2g5_ports + + num_10g_ports) : 0; + rx_1g_10g_used = + (num_2g5_ports + + num_10g_ports) ? CEIL_DIV(rx_1g_used * num_1g_ports * + num_10g_ports, + num_2g5_ports + + num_10g_ports) : 0; + } + + if (num_2g5_ports) { + rx_2g5_bufs_ceil = + (gb_sum / + 10) ? CEIL_DIV(((remaining_bufs * gb2g5) / 10), + (gb_sum / 10)) : 0; + rx_2g5_max_bufs = MAX(min_rx_bufs, rx_2g5_bufs_ceil); + rx_2g5_bufs = + MAX(min_rx_bufs, rx_2g5_max_bufs - rx_1g_2g5_used); + rx_2g5_used = rx_2g5_bufs - rx_2g5_bufs_ceil; /* always >= 0 */ + /* distribute to 10g ports */ + rx_2g5_10g_used = + num_10g_ports ? CEIL_DIV(rx_2g5_used * num_2g5_ports, + num_10g_ports) : 0; + } + + if (num_10g_ports) { + rx_10g_bufs_ceil = + (gb_sum / + 10) ? CEIL_DIV(((remaining_bufs * gb10g) / 10), + (gb_sum / 10)) : 0; + rx_10g_max_bufs = MAX(min_rx_bufs, rx_10g_bufs_ceil); + /* keep count of all distribution */ + rx_1g_2g5_10g_used = rx_1g_10g_used + rx_2g5_10g_used; + rx_10g_bufs = + MAX(min_rx_bufs, + rx_10g_max_bufs - rx_1g_2g5_10g_used); + } + + /* overflow-leftover calculus */ + tot_rx_buffs = rx_1g_bufs * num_1g_ports + + rx_2g5_bufs * num_2g5_ports + rx_10g_bufs * num_10g_ports; + tot_tx_buffs = tx_1g_bufs * num_1g_ports + + tx_2g5_bufs * num_2g5_ports + tx_10g_bufs * num_10g_ports; + tot_oh_buffs = oh_buff * num_oh_ports; + tot_used_buffs = + tot_oh_buffs + tot_tx_buffs + tot_rx_buffs + shared_ext_buff; + + overflow = tot_used_buffs - total_no_buffers; + /* used more than available */ + if (overflow > 0) { + loop = true; + while (overflow > 0 && loop) { + loop = false; + if (overflow && num_10g_ports + && rx_10g_bufs > min_rx_bufs) { + rx_10g_bufs--; + overflow -= num_10g_ports; + loop = true; + } + if (overflow && num_2g5_ports + && rx_2g5_bufs > min_rx_bufs) { + rx_2g5_bufs--; + overflow -= num_2g5_ports; + loop = true; + } + if (overflow && num_1g_ports + && rx_1g_bufs > min_rx_bufs) { + rx_1g_bufs--; + overflow -= num_1g_ports; + loop = true; + } + } + + if (overflow > 0) { + printk(KERN_ALERT + "This configuration will not work due to over" + " buffer allocation (%d buffers)...\n", + overflow); + err = -1; + goto precalculated_fifosize_out; + } + } + /* left a few buffers */ + else if (overflow < 0) { + leftovers = total_no_buffers - tot_used_buffs; + loop = true; + while (leftovers > 0 && loop) { + loop = false; + if (leftovers && num_1g_ports) { + rx_1g_bufs++; + leftovers -= num_1g_ports; + loop = true; + } + + if (leftovers && num_2g5_ports) { + rx_2g5_bufs++; + leftovers -= num_2g5_ports; + loop = true; + } + + if (leftovers && num_10g_ports) { + rx_10g_bufs++; + leftovers -= num_10g_ports; + loop = true; + } + } + } + + /* set fifosizes for this FMan ports */ + fm_resource_settings_info->tx1g_num_buffers = tx_1g_bufs; + fm_resource_settings_info->rx1g_num_buffers = rx_1g_bufs; + fm_resource_settings_info->tx2g5_num_buffers = tx_2g5_bufs; + fm_resource_settings_info->rx2g5_num_buffers = rx_2g5_bufs; + fm_resource_settings_info->tx10g_num_buffers = tx_10g_bufs; + fm_resource_settings_info->rx10g_num_buffers = rx_10g_bufs; + fm_resource_settings_info->oh_num_buffers = oh_buff; + fm_resource_settings_info->shared_ext_buffers = shared_ext_buff; + +precalculated_fifosize_out: + printk(KERN_INFO " FMAN(%u) Fifo size settings:\n", + p_LnxWrpFmDev->id); + printk(KERN_INFO " - Total buffers available(%u - 256B/buffer)\n", + total_no_buffers); + printk(KERN_INFO " - Total throughput(%uGbps)\n", (gb_sum / 10)); + printk(KERN_INFO " - Max frame size(%uB)\n", max_frame_size); + if (num_1g_ports) { + printk(KERN_INFO + " - 1G ports TX %u(%u bufs set (min: %u))\n", + num_1g_ports, tx_1g_bufs, min_tx_1g_2g5_bufs); + printk(KERN_INFO + " - 1G ports RX %u(%u bufs set (min: %u))\n", + num_1g_ports, rx_1g_bufs, min_rx_bufs); + } + if (num_2g5_ports) { + printk(KERN_INFO + " - 2.5G ports TX %u(%u bufs set (min: %u))\n", + num_2g5_ports, tx_2g5_bufs, min_tx_1g_2g5_bufs); + printk(KERN_INFO + " - 2.5G ports RX %u(%u bufs set (min: %u))\n", + num_2g5_ports, rx_2g5_bufs, min_rx_bufs); + } + if (num_10g_ports) { + printk(KERN_INFO + " - 10G ports TX %u(%u bufs set (min: %u))\n", + num_10g_ports, tx_10g_bufs, min_tx_10g_bufs); + printk(KERN_INFO + " - 10G ports RX %u(%u bufs set (min: %u))\n", + num_10g_ports, rx_10g_bufs, min_rx_bufs); + } + if (num_oh_ports) + printk(KERN_INFO " - OH-HC ports %u(%u)\n", num_oh_ports, + oh_buff); + printk(KERN_INFO " - Shared extra buffers(%u)\n", shared_ext_buff); + + return err; +} + +int fm_set_precalculate_fifosize(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = + (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + struct fm_resource_settings *fm_resource_settings_info = NULL; + struct fm_active_ports *fm_active_ports_info = NULL; + t_FmPortRsrc portRsrc; + t_Error errCode; + uint32_t buf_size = 0x100; + + ASSERT_COND(p_LnxWrpFmDev != NULL); + fm_resource_settings_info = &p_LnxWrpFmDev->fm_resource_settings_info; + fm_active_ports_info = &p_LnxWrpFmDev->fm_active_ports_info; + + memset(&portRsrc, 0, sizeof(t_FmPortRsrc)); + +/* IF 1G PORT */ + if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_TX) { + portRsrc.num = + fm_resource_settings_info->tx1g_num_buffers * buf_size; + portRsrc.extra = 0; + } else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_RX) { + portRsrc.num = + fm_resource_settings_info->rx1g_num_buffers * buf_size; + portRsrc.extra = + fm_resource_settings_info->shared_ext_buffers * + buf_size; + } +/* IF 2.5G PORT */ + /* TODO: Not supported by LLD yet. */ + +/* IF 10G PORT */ + else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_TX_10G) { + portRsrc.num = + fm_resource_settings_info->tx10g_num_buffers * + buf_size; + portRsrc.extra = 0; + } else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_RX_10G) { + portRsrc.num = + fm_resource_settings_info->rx10g_num_buffers * + buf_size; + portRsrc.extra = + fm_resource_settings_info->shared_ext_buffers * + buf_size; + } else { /* IF OH PORT */ + portRsrc.num = + fm_resource_settings_info->oh_num_buffers * buf_size; + portRsrc.extra = 0; + } + + errCode = FM_PORT_SetSizeOfFifo(p_LnxWrpFmPortDev->h_Dev, &portRsrc); + if (errCode != E_OK) { + printk(KERN_WARNING + "FM_PORT_SetSizeOfFifo failed (errCode:0x%2x)", + errCode); + return -EIO; + } + + return 0; +} + +/* Compute FMan open DMA based on total number of open DMAs and + * number of available FMan ports. + * + * By default 10g ports are set to input parameters. The other ports + * tries to keep the proportion rx=2tx open DMAs or thresholds. + * + * If leftovers, then those will be set as shared. + * + * If after computing overflow appears, then it decrements open DMA + * for all ports w/o cross the thresholds. If the thresholds are meet + * and is still overflow, then it returns error. + */ +int fm_precalculate_open_dma(t_LnxWrpFmDev *p_LnxWrpFmDev, + int max_fm_open_dma, + int default_tx_10g_dmas, + int default_rx_10g_dmas, + int min_tx_10g_treshold, int min_rx_10g_treshold) +{ + /* input parameters */ + struct fm_active_ports *fm_active_ports_info = NULL; + int num_1g_ports = 0; + int num_2g5_ports = 0; + int num_10g_ports = 0; + int num_oh_ports = 0; + + /* output parameters */ + struct fm_resource_settings *fm_resource_settings_info = NULL; + int tx_1g_dmas = 0, rx_1g_dmas = 0; + int tx_2g5_dmas = 0, rx_2g5_dmas = 0; + int tx_10g_dmas = 0, rx_10g_dmas = 0; + int oh_dmas = 0; + int shared_ext_open_dma = 0; + int err = 0; + + /* open dma calculus */ + int remaing_dmas = 0; + int rx_tx_raport = + FM_OPENDMA_RX_TX_RAPORT; /* RX = FM_OPENDMA_RX_TX_RAPORT *TX */ + int min_tx_1_2g5_treshold = 1; + int min_rx_1_2g5_treshold = 1; + int max_open_dma_treshold = 16; /* LLD: MAX_NUM_OF_DMAS */ + int max_ext_open_dma_treshold = 8; /* LLD: MAX_NUM_OF_EXTRA_DMAS */ + + int open_dmas_computed = 0; + int weighted_remaining_ports = 0; + int overflow = 0; + bool re_loop = false; + + /* check input parameters correctness */ + ASSERT_COND(p_LnxWrpFmDev != NULL); + fm_active_ports_info = &p_LnxWrpFmDev->fm_active_ports_info; + fm_resource_settings_info = &p_LnxWrpFmDev->fm_resource_settings_info; + ASSERT_COND(fm_active_ports_info != NULL); + ASSERT_COND(fm_resource_settings_info != NULL); + ASSERT_COND(fm_active_ports_info->num_tx_ports == + fm_active_ports_info->num_rx_ports); + ASSERT_COND(fm_active_ports_info->num_tx25_ports == + fm_active_ports_info->num_tx25_ports); + ASSERT_COND(fm_active_ports_info->num_tx10_ports == + fm_active_ports_info->num_tx10_ports); + ASSERT_COND(min_tx_10g_treshold <= max_open_dma_treshold); + ASSERT_COND(min_tx_10g_treshold <= max_open_dma_treshold); + + /* set input parameters */ + num_1g_ports = fm_active_ports_info->num_tx_ports; + num_2g5_ports = fm_active_ports_info->num_tx25_ports; + num_10g_ports = fm_active_ports_info->num_tx10_ports; + num_oh_ports = fm_active_ports_info->num_oh_ports; + + /* compute open DMAs per port */ + /* ------------------------------------------------------- */ + if (num_10g_ports) { + tx_10g_dmas = default_tx_10g_dmas; /* per 10G TX port */ + rx_10g_dmas = default_rx_10g_dmas; /* per 10G RX port */ + } + if (num_oh_ports) + oh_dmas = 1; /* per OH port */ + + /* should this be null? or LLD: + DEFAULT_PORT_extraNumOfOpenDmas:10g-8,else 1 */ + shared_ext_open_dma = 0; + + /* based on total number of ports set open DMAs for all other ports */ + remaing_dmas = max_fm_open_dma - + (oh_dmas * num_oh_ports) - + (tx_10g_dmas * num_10g_ports + rx_10g_dmas * num_10g_ports) - + shared_ext_open_dma; + + if (remaing_dmas < 0) { + printk(KERN_ALERT + "This configuration will not work due to low number" + " of open dmas (%u open dmas)...\n", + max_fm_open_dma); + err = -1; + goto precalculated_open_dma_out; + } + + weighted_remaining_ports = + /*tx */ num_1g_ports * rx_tx_raport + /*rx */ num_1g_ports + + /*tx */ num_2g5_ports * rx_tx_raport + /*rx */ num_2g5_ports; + + /* compute the other ports */ + if (num_1g_ports) { + tx_1g_dmas = + MAX(MIN + (ROUND_DIV + (remaing_dmas, weighted_remaining_ports), + max_open_dma_treshold), min_tx_1_2g5_treshold); + rx_1g_dmas = + MAX(MIN + (ROUND_DIV + ((remaing_dmas * rx_tx_raport), + weighted_remaining_ports), + max_open_dma_treshold), min_rx_1_2g5_treshold); + } + if (num_2g5_ports) { + tx_2g5_dmas = + MAX(MIN + (CEIL_DIV(remaing_dmas, weighted_remaining_ports), + max_open_dma_treshold), min_tx_1_2g5_treshold); + rx_2g5_dmas = + MAX(MIN + (CEIL_DIV + ((remaing_dmas * rx_tx_raport), + weighted_remaining_ports), + max_open_dma_treshold), min_rx_1_2g5_treshold); + + } + + /* Check if these settings is not exceding treshold */ + open_dmas_computed = num_1g_ports * tx_1g_dmas + + num_1g_ports * rx_1g_dmas + + num_2g5_ports * tx_2g5_dmas + + num_2g5_ports * rx_2g5_dmas + + num_10g_ports * tx_10g_dmas + + num_10g_ports * rx_10g_dmas + + num_oh_ports * oh_dmas + shared_ext_open_dma; + + /* overflow-leftover calculus */ + overflow = open_dmas_computed - max_fm_open_dma; + re_loop = true; + while (overflow > 0 && re_loop == true) { + re_loop = false; + if (num_1g_ports && overflow + && rx_1g_dmas > min_rx_1_2g5_treshold) { + rx_1g_dmas--; + overflow -= num_1g_ports; + re_loop = true; + } + if (num_2g5_ports && overflow + && rx_2g5_dmas > min_rx_1_2g5_treshold) { + rx_2g5_dmas--; + overflow -= num_2g5_ports; + re_loop = true; + } + if (num_10g_ports && overflow + && rx_10g_dmas > min_rx_10g_treshold) { + rx_10g_dmas--; + overflow -= num_10g_ports; + re_loop = true; + } + + if (num_1g_ports && overflow + && tx_1g_dmas > min_tx_1_2g5_treshold) { + tx_1g_dmas--; + overflow -= num_1g_ports; + re_loop = true; + } + if (num_2g5_ports && overflow + && tx_2g5_dmas > min_tx_1_2g5_treshold) { + tx_2g5_dmas--; + overflow -= num_2g5_ports; + re_loop = true; + } + if (num_10g_ports && overflow + && tx_10g_dmas > min_tx_10g_treshold) { + tx_10g_dmas--; + overflow -= num_10g_ports; + re_loop = true; + } + } + + if (overflow > 0) { + printk(KERN_ALERT + "This configuration will not work due to over open dma" + " allocation (%d open dmas)...\n", + overflow); + err = -1; + goto precalculated_open_dma_out; + } + + /* could remain leftovers... e.g. overflow=1, + 2ports => leftover=1 => shared=1 */ + open_dmas_computed = num_1g_ports * tx_1g_dmas + + num_1g_ports * rx_1g_dmas + + num_2g5_ports * tx_2g5_dmas + + num_2g5_ports * rx_2g5_dmas + + num_10g_ports * tx_10g_dmas + + num_10g_ports * rx_10g_dmas + + num_oh_ports * oh_dmas + shared_ext_open_dma; + + if (max_fm_open_dma - open_dmas_computed > 0) + shared_ext_open_dma = + MIN(shared_ext_open_dma + max_fm_open_dma - + open_dmas_computed, max_ext_open_dma_treshold); + + /* set open dmas */ + fm_resource_settings_info->tx_1g_dmas = tx_1g_dmas; + fm_resource_settings_info->rx_1g_dmas = rx_1g_dmas; + fm_resource_settings_info->tx_2g5_dmas = tx_2g5_dmas; + fm_resource_settings_info->rx_2g5_dmas = rx_2g5_dmas; + fm_resource_settings_info->tx_10g_dmas = tx_10g_dmas; + fm_resource_settings_info->rx_10g_dmas = rx_10g_dmas; + fm_resource_settings_info->oh_dmas = oh_dmas; + fm_resource_settings_info->shared_ext_open_dma = shared_ext_open_dma; + +precalculated_open_dma_out: + printk(KERN_INFO " FMAN(%u) open dma settings:\n", + p_LnxWrpFmDev->id); + printk(KERN_INFO " - Total open dma available(%u)\n", + max_fm_open_dma); + if (num_1g_ports) { + printk(KERN_INFO " - 1G ports TX %u(%u)\n", num_1g_ports, + tx_1g_dmas); + printk(KERN_INFO " - 1G ports RX %u(%u)\n", num_1g_ports, + rx_1g_dmas); + } + if (num_2g5_ports) { + printk(KERN_INFO " - 2.5G ports TX %u(%u)\n", num_2g5_ports, + tx_2g5_dmas); + printk(KERN_INFO " - 2.5G ports RX %u(%u)\n", num_2g5_ports, + tx_2g5_dmas); + } + if (num_10g_ports) { + printk(KERN_INFO " - 10G ports TX %u(%u)\n", num_10g_ports, + tx_10g_dmas); + printk(KERN_INFO " - 10G ports RX %u(%u)\n", num_10g_ports, + rx_10g_dmas); + } + if (num_oh_ports) + printk(KERN_INFO " - OH-HC ports %u(%u)\n", num_oh_ports, + oh_dmas); + printk(KERN_INFO " - Shared extra open dma(%u)\n", + shared_ext_open_dma ? shared_ext_open_dma : 0); + + return err; +} + +int fm_set_precalculate_open_dma(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = + (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + struct fm_resource_settings *fm_resource_settings_info = NULL; + t_FmPortRsrc numOfOpenDmas; + t_Error errCode; + + ASSERT_COND(p_LnxWrpFmDev != NULL); + fm_resource_settings_info = &p_LnxWrpFmDev->fm_resource_settings_info; + + memset(&numOfOpenDmas, 0, sizeof(t_FmPortRsrc)); + +/* IF 1G PORT */ + if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_TX) + numOfOpenDmas.num = fm_resource_settings_info->tx_1g_dmas; + else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_RX) + numOfOpenDmas.num = fm_resource_settings_info->rx_1g_dmas; +/* IF 2.5G PORT*/ + /* TODO: Not supported by LLD yet. */ + +/* IF 10G PORT */ + else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_TX_10G) + numOfOpenDmas.num = fm_resource_settings_info->tx_10g_dmas; + else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_RX_10G) + numOfOpenDmas.num = fm_resource_settings_info->rx_10g_dmas; +/* IF OH PORT */ + else + numOfOpenDmas.num = fm_resource_settings_info->oh_dmas; + + numOfOpenDmas.extra = fm_resource_settings_info->shared_ext_open_dma; + + errCode = FM_PORT_SetNumOfOpenDmas(p_LnxWrpFmPortDev->h_Dev, + &numOfOpenDmas); + if (errCode != E_OK) { + printk(KERN_WARNING + "FM_PORT_SetNumOfOpenDmas failed (errCode:0x%2x)", + errCode); + return -EIO; + } + + return 0; +} + +/* Compute FMan tnums based on available tnums and number of ports. + Set defaults (minim tresholds) and then distribute leftovers.*/ +int fm_precalculate_tnums(t_LnxWrpFmDev *p_LnxWrpFmDev, int max_fm_tnums) +{ + /* input parameters */ + struct fm_active_ports *fm_active_ports_info = NULL; + int num_1g_ports = 0; + int num_2g5_ports = 0; + int num_10g_ports = 0; + int num_oh_ports = 0; + + /* output parameters */ + struct fm_resource_settings *fm_resource_settings_info = NULL; + int tx_1g_tnums = 0, rx_1g_tnums = 0; + int tx_2g5_tnums = 0, rx_2g5_tnums = 0; + int tx_10g_tnums = 0, rx_10g_tnums = 0; + int oh_tnums = 0; + int shared_ext_tnums = 0; + int err = 0; + + /* open dma calculus */ + int default_and_treshold_rx_tx_10g_tnums = 16; /* DPDE_10g */ + int default_and_treshold_rx_tx_1g_2g5_tnums = 4; /* DPDE_1g */ + int default_and_treshold_oh_tnums = 2; /* Hell knows why */ + int max_tnums_treshold = 64; /* LLD: MAX_NUM_OF_TASKS */ + int max_ext_tnums_treshold = 8; /* LLD: MAX_NUM_OF_EXTRA_TASKS */ + int remaing_tnums = 0; + int tnums_computed = 0; + int leftovers = 0; + bool re_loop = true; + + /* check input parameters correctness */ + ASSERT_COND(p_LnxWrpFmDev != NULL); + fm_active_ports_info = &p_LnxWrpFmDev->fm_active_ports_info; + fm_resource_settings_info = &p_LnxWrpFmDev->fm_resource_settings_info; + ASSERT_COND(fm_active_ports_info != NULL); + ASSERT_COND(fm_resource_settings_info != NULL); + ASSERT_COND(fm_active_ports_info->num_tx_ports == + fm_active_ports_info->num_rx_ports); + ASSERT_COND(fm_active_ports_info->num_tx25_ports == + fm_active_ports_info->num_tx25_ports); + ASSERT_COND(fm_active_ports_info->num_tx10_ports == + fm_active_ports_info->num_tx10_ports); + + /* set input parameters */ + num_1g_ports = fm_active_ports_info->num_tx_ports; + num_2g5_ports = fm_active_ports_info->num_tx25_ports; + num_10g_ports = fm_active_ports_info->num_tx10_ports; + num_oh_ports = fm_active_ports_info->num_oh_ports; + + /* compute FMan TNUMs per port */ + /* ------------------------------------------------------- */ + if (num_1g_ports) { + tx_1g_tnums = default_and_treshold_rx_tx_1g_2g5_tnums; + rx_1g_tnums = default_and_treshold_rx_tx_1g_2g5_tnums; + } + if (num_2g5_ports) { + tx_2g5_tnums = default_and_treshold_rx_tx_1g_2g5_tnums; + rx_2g5_tnums = default_and_treshold_rx_tx_1g_2g5_tnums; + } + if (num_10g_ports) { + tx_10g_tnums = default_and_treshold_rx_tx_10g_tnums; + rx_10g_tnums = default_and_treshold_rx_tx_10g_tnums; + } + if (num_oh_ports) + oh_tnums = default_and_treshold_oh_tnums; + + shared_ext_tnums = num_10g_ports ? + max_ext_tnums_treshold : 2; /* DEFAULT_PORT_extraNumOfTasks */ + + /* based on total number of ports set open DMAs for all other ports */ + remaing_tnums = max_fm_tnums - + (oh_tnums * num_oh_ports) - + (tx_1g_tnums * num_1g_ports + rx_1g_tnums * num_1g_ports) - + (tx_2g5_tnums * num_2g5_ports + rx_2g5_tnums * num_2g5_ports) - + (tx_10g_tnums * num_10g_ports + rx_10g_tnums * num_10g_ports) - + shared_ext_tnums; + + if (remaing_tnums < 0) { + printk(KERN_ALERT + "This configuration will not work due to low number" + " of tnums (%u tnums) and number of total ports" + " available...\n", + max_fm_tnums); + err = -1; + goto precalculated_tnums_out; + } + + leftovers = remaing_tnums; + re_loop = true; + while (leftovers > 0 && re_loop == true) { + re_loop = false; + if (num_10g_ports && (leftovers - (int) num_10g_ports) >= 0 + && (rx_10g_tnums < max_tnums_treshold)) { + rx_10g_tnums++; + leftovers -= num_10g_ports; + re_loop = true; + } + + if (num_10g_ports && (leftovers - (int) num_10g_ports) >= 0 + && (tx_10g_tnums < max_tnums_treshold)) { + tx_10g_tnums++; + leftovers -= num_10g_ports; + re_loop = true; + } + + if (num_2g5_ports && (leftovers - (int) num_2g5_ports) >= 0 + && (rx_2g5_tnums < max_tnums_treshold)) { + rx_2g5_tnums++; + leftovers -= num_2g5_ports; + re_loop = true; + } + + if (num_2g5_ports && (leftovers - (int) num_2g5_ports) >= 0 + && (tx_2g5_tnums < max_tnums_treshold)) { + tx_2g5_tnums++; + leftovers -= num_2g5_ports; + re_loop = true; + } + + if (num_1g_ports && (leftovers - (int) num_1g_ports) >= 0 + && (rx_1g_tnums < max_tnums_treshold)) { + rx_1g_tnums++; + leftovers -= num_1g_ports; + re_loop = true; + } + + if (num_1g_ports && (leftovers - (int) num_1g_ports) >= 0 + && (tx_1g_tnums < max_tnums_treshold)) { + tx_1g_tnums++; + leftovers -= num_1g_ports; + re_loop = true; + } + } + + tnums_computed = + num_1g_ports * tx_1g_tnums + + num_1g_ports * rx_1g_tnums + + num_2g5_ports * tx_2g5_tnums + + num_2g5_ports * rx_2g5_tnums + + num_10g_ports * tx_10g_tnums + + num_10g_ports * rx_10g_tnums + + num_oh_ports * oh_tnums + + shared_ext_tnums; + + if (leftovers > 0) + shared_ext_tnums = + MIN(shared_ext_tnums + max_fm_tnums - tnums_computed, + max_ext_tnums_treshold); + + ASSERT_COND((oh_tnums * num_oh_ports) + + (tx_1g_tnums * num_1g_ports + rx_1g_tnums * num_1g_ports) + + (tx_2g5_tnums * num_2g5_ports + + rx_2g5_tnums * num_2g5_ports) + + (tx_10g_tnums * num_10g_ports + + rx_10g_tnums * num_10g_ports) + shared_ext_tnums <= + max_fm_tnums); + + /* set computed tnums */ + fm_resource_settings_info->tx_1g_tnums = tx_1g_tnums; + fm_resource_settings_info->rx_1g_tnums = rx_1g_tnums; + fm_resource_settings_info->tx_2g5_tnums = tx_2g5_tnums; + fm_resource_settings_info->rx_2g5_tnums = rx_2g5_tnums; + fm_resource_settings_info->tx_10g_tnums = tx_10g_tnums; + fm_resource_settings_info->rx_10g_tnums = rx_10g_tnums; + fm_resource_settings_info->oh_tnums = oh_tnums; + fm_resource_settings_info->shared_ext_tnums = shared_ext_tnums; + +precalculated_tnums_out: + printk(KERN_INFO " FMAN(%u) Tnums settings:\n", p_LnxWrpFmDev->id); + printk(KERN_INFO " - Total Tnums available(%u)\n", max_fm_tnums); + if (num_1g_ports) { + printk(KERN_INFO " - 1G ports TX %u(%u)\n", num_1g_ports, + tx_1g_tnums); + printk(KERN_INFO " - 1G ports RX %u(%u)\n", num_1g_ports, + rx_1g_tnums); + } + if (num_2g5_ports) { + printk(KERN_INFO " - 2.5G ports TX %u(%u)\n", num_2g5_ports, + tx_2g5_tnums); + printk(KERN_INFO " - 2.5G ports RX %u(%u)\n", num_2g5_ports, + rx_2g5_tnums); + } + if (num_10g_ports) { + printk(KERN_INFO " - 10G ports TX %u(%u)\n", num_10g_ports, + tx_10g_tnums); + printk(KERN_INFO " - 10G ports RX %u(%u)\n", num_10g_ports, + rx_10g_tnums); + } + if (num_oh_ports) + printk(KERN_INFO " - OH-HC ports %u(%u)\n", num_oh_ports, + oh_tnums); + printk(KERN_INFO " - Shared extra tnums(%u)\n", shared_ext_tnums); + + return err; +} + +int fm_set_precalculate_tnums(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = + (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + struct fm_resource_settings *fm_resource_settings_info = NULL; + t_FmPortRsrc numOfTask; + t_Error errCode; + + ASSERT_COND(p_LnxWrpFmDev != NULL); + fm_resource_settings_info = &p_LnxWrpFmDev->fm_resource_settings_info; + + memset(&numOfTask, 0, sizeof(t_FmPortRsrc)); + +/* IF 1G PORT */ + if (p_LnxWrpFmPortDev->settings.param.portType == e_FM_PORT_TYPE_TX) + numOfTask.num = fm_resource_settings_info->tx_1g_tnums; + else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_RX) + numOfTask.num = fm_resource_settings_info->rx_1g_tnums; +/* IF 2.5G PORT*/ + /* TODO: Not supported by LLD yet. */ + +/* IF 10G PORT */ + else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_TX_10G) + numOfTask.num = fm_resource_settings_info->tx_10g_tnums; + else if (p_LnxWrpFmPortDev->settings.param.portType == + e_FM_PORT_TYPE_RX_10G) + numOfTask.num = fm_resource_settings_info->rx_10g_tnums; +/* IF OH PORT */ + else + numOfTask.num = fm_resource_settings_info->oh_dmas; + + numOfTask.extra = fm_resource_settings_info->shared_ext_tnums; + + errCode = FM_PORT_SetNumOfTasks(p_LnxWrpFmPortDev->h_Dev, &numOfTask); + if (errCode != E_OK) { + printk(KERN_WARNING + "FM_PORT_SetNumOfTasks failed (errCode:0x%2x)", + errCode); + return -EIO; + } + + return 0; +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_resources.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_resources.h @@ -0,0 +1,111 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_resources.h + + @Description FMD wrapper resource allocation functions. + +*/ + +#ifndef LNXWRP_RESOURCES_H_ +#define LNXWRP_RESOURCES_H_ + +#include "lnxwrp_fm.h" + +#define ROUND(X) ((2*(X)+1)/2) +#define CEIL(X) ((X)+1) +/* #define ROUND_DIV(X, Y) (((X)+(Y)/2)/(Y)) */ +#define ROUND_DIV(X, Y) ((2*(X)+(Y))/(2*(Y))) +#define CEIL_DIV(X, Y) (((X)+(Y)-1)/(Y)) + +/* used for resource calculus */ +#define DPDE_1G 2 /* DQDP 1g - from LLD: + DEFAULT_PORT_txFifoDeqPipelineDepth_1G */ +#define DPDE_10G 8 /* DQDP 10g - from LLD: + DEFAULT_PORT_txFifoDeqPipelineDepth_10G */ + +int fm_set_active_fman_ports(struct platform_device *of_dev, + t_LnxWrpFmDev *p_LnxWrpFmDev); + +/* Calculate the fifosize based on MURAM allocation, number of ports, dpde + * value and s/g software support (! Kernel does not suport s/g). + * + * Algorithm summary: + * - Calculate the the minimum fifosize required for every type of port + * (TX,RX for 1G, 2.5G and 10G). + * - Set TX the minimum fifosize required. + * - Distribute the remaining buffers (after all TX were set) to RX ports + * based on: + * 1G RX = Remaining_buffers * 1/(1+2.5+10) + * 2.5G RX = Remaining_buffers * 2.5/(1+2.5+10) + * 10G RX = Remaining_buffers * 10/(1+2.5+10) + * - if the RX is smaller than the minimum required, then set the minimum + * required + * - In the end distribuite the leftovers if there are any (due to + * unprecise calculus) or if over allocation cat some buffers from all RX + * ports w/o pass over minimum required treshold, but if there must be + * pass the treshold in order to cat the over allocation ,then this + * configuration can not be set - KERN_ALERT. +*/ +int fm_precalculate_fifosizes(t_LnxWrpFmDev *p_LnxWrpFmDev, + int muram_fifo_size); + +int fm_set_precalculate_fifosize(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev); + +/* Compute FMan open DMA based on total number of open DMAs and + * number of available fman ports. + * + * By default 10g ports are set to input parameters. The other ports + * tries to keep the proportion rx=2tx open dmas or tresholds. + * + * If leftovers, then those will be set as shared. + * + * If after computing overflow appears, then it decrements open dma + * for all ports w/o cross the tresholds. If the tresholds are meet + * and is still overflow, then it returns error. +*/ +int fm_precalculate_open_dma(t_LnxWrpFmDev *p_LnxWrpFmDev, + int max_fm_open_dma, + int default_tx_10g_dmas, + int default_rx_10g_dmas, + int min_tx_10g_treshold, int min_rx_10g_treshold); + +int fm_set_precalculate_open_dma(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev); + +/* Compute FMan tnums based on available tnums and number of ports. + * Set defaults (minim tresholds) and then distribute leftovers.*/ +int fm_precalculate_tnums(t_LnxWrpFmDev *p_LnxWrpFmDev, int max_fm_tnums); + +int fm_set_precalculate_tnums(t_LnxWrpFmPortDev *p_LnxWrpFmPortDev); + +#endif /* LNXWRP_RESOURCES_H_ */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs.c @@ -0,0 +1,60 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_sysfs.c + + @Description FM wrapper sysfs related functions. + +*/ + +#include +#include "lnxwrp_sysfs.h" + +uint8_t fm_find_statistic_counter_by_name(const char *attr_name, + struct SysfsStats_t *sysfs_stats, + uint8_t *offset) +{ + int i = 0; + + while (sysfs_stats[i].statisticName != NULL) { + if (strcmp(sysfs_stats[i].statisticName, attr_name) == 0) { + if (offset != NULL) + *offset = i; + return sysfs_stats[i].statisticCounter; + } + + i++; + } + WARN(1, "FMD: Should never get here!"); + return 0; +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs.h @@ -0,0 +1,67 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_sysfs.h + + @Description FM sysfs functions. + +*/ + +#ifndef LNXWRP_SYSFS_H_ +#define LNXWRP_SYSFS_H_ + +/* Linux Headers ------------------- */ +#include + +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ + +#include +#include +#include +#include + +struct SysfsStats_t { + const char *statisticName; + uint8_t statisticCounter; +}; + +uint8_t fm_find_statistic_counter_by_name(const char *attr_name, + struct SysfsStats_t *sysfs_stats, + uint8_t *offset); + +#endif /* LNXWRP_SYSFS_H_ */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs_fm.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs_fm.c @@ -0,0 +1,575 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_sysfs_fm.c + + @Description FM sysfs related functions. + +*/ + +#include "lnxwrp_sysfs.h" +#include "lnxwrp_fm.h" + +enum e_FmDmaMatchStatistics { + e_FM_DMA_COUNTERS_CMQ_NOT_EMPTY, + e_FM_DMA_COUNTERS_BUS_ERROR, + e_FM_DMA_COUNTERS_READ_BUF_ECC_ERROR, + e_FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR, + e_FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR +}; + +static const struct SysfsStats_t fmSysfsStats[] = { + /* FM statistics */ + { + .statisticName = "enq_total_frame", + .statisticCounter = e_FM_COUNTERS_ENQ_TOTAL_FRAME, + }, + { + .statisticName = "deq_total_frame", + .statisticCounter = e_FM_COUNTERS_DEQ_TOTAL_FRAME, + }, + { + .statisticName = "deq_0", + .statisticCounter = e_FM_COUNTERS_DEQ_0, + }, + { + .statisticName = "deq_1", + .statisticCounter = e_FM_COUNTERS_DEQ_1, + }, + { + .statisticName = "deq_2", + .statisticCounter = e_FM_COUNTERS_DEQ_2, + }, + { + .statisticName = "deq_from_default", + .statisticCounter = e_FM_COUNTERS_DEQ_FROM_DEFAULT, + }, + { + .statisticName = "deq_from_context", + .statisticCounter = e_FM_COUNTERS_DEQ_FROM_CONTEXT, + }, + { + .statisticName = "deq_from_fd", + .statisticCounter = e_FM_COUNTERS_DEQ_FROM_FD, + }, + { + .statisticName = "deq_confirm", + .statisticCounter = e_FM_COUNTERS_DEQ_CONFIRM, + }, + /* FM:DMA statistics */ + { + .statisticName = "cmq_not_empty", + .statisticCounter = e_FM_DMA_COUNTERS_CMQ_NOT_EMPTY, + }, + { + .statisticName = "bus_error", + .statisticCounter = e_FM_DMA_COUNTERS_BUS_ERROR, + }, + { + .statisticName = "read_buf_ecc_error", + .statisticCounter = e_FM_DMA_COUNTERS_READ_BUF_ECC_ERROR, + }, + { + .statisticName = "write_buf_ecc_sys_error", + .statisticCounter = e_FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR, + }, + { + .statisticName = "write_buf_ecc_fm_error", + .statisticCounter = e_FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR, + }, + /* FM:PCD statistics */ + { + .statisticName = "pcd_enq_total_frame", + .statisticCounter = e_FM_COUNTERS_ENQ_TOTAL_FRAME, + }, + { + .statisticName = "pcd_kg_total", + .statisticCounter = e_FM_PCD_KG_COUNTERS_TOTAL, + }, + { + .statisticName = "pcd_plcr_yellow", + .statisticCounter = e_FM_PCD_PLCR_COUNTERS_YELLOW, + }, + { + .statisticName = "pcd_plcr_red", + .statisticCounter = e_FM_PCD_PLCR_COUNTERS_RED, + }, + { + .statisticName = "pcd_plcr_recolored_to_red", + .statisticCounter = e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_RED, + }, + { + .statisticName = "pcd_plcr_recolored_to_yellow", + .statisticCounter = e_FM_PCD_PLCR_COUNTERS_RECOLORED_TO_YELLOW, + }, + { + .statisticName = "pcd_plcr_total", + .statisticCounter = e_FM_PCD_PLCR_COUNTERS_TOTAL, + }, + { + .statisticName = "pcd_plcr_length_mismatch", + .statisticCounter = e_FM_PCD_PLCR_COUNTERS_LENGTH_MISMATCH, + }, + { + .statisticName = "pcd_prs_parse_dispatch", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_PARSE_DISPATCH, + }, + { + .statisticName = "pcd_prs_l2_parse_result_returned", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED, + }, + { + .statisticName = "pcd_prs_l3_parse_result_returned", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED, + }, + { + .statisticName = "pcd_prs_l4_parse_result_returned", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED, + }, + { + .statisticName = "pcd_prs_shim_parse_result_returned", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED, + }, + { + .statisticName = "pcd_prs_l2_parse_result_returned_with_err", + .statisticCounter = + e_FM_PCD_PRS_COUNTERS_L2_PARSE_RESULT_RETURNED_WITH_ERR, + }, + { + .statisticName = "pcd_prs_l3_parse_result_returned_with_err", + .statisticCounter = + e_FM_PCD_PRS_COUNTERS_L3_PARSE_RESULT_RETURNED_WITH_ERR, + }, + { + .statisticName = "pcd_prs_l4_parse_result_returned_with_err", + .statisticCounter = + e_FM_PCD_PRS_COUNTERS_L4_PARSE_RESULT_RETURNED_WITH_ERR, + }, + { + .statisticName = "pcd_prs_shim_parse_result_returned_with_err", + .statisticCounter = + e_FM_PCD_PRS_COUNTERS_SHIM_PARSE_RESULT_RETURNED_WITH_ERR, + }, + { + .statisticName = "pcd_prs_soft_prs_cycles", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_SOFT_PRS_CYCLES, + }, + { + .statisticName = "pcd_prs_soft_prs_stall_cycles", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_SOFT_PRS_STALL_CYCLES, + }, + { + .statisticName = "pcd_prs_hard_prs_cycle_incl_stall_cycles", + .statisticCounter = + e_FM_PCD_PRS_COUNTERS_HARD_PRS_CYCLE_INCL_STALL_CYCLES, + }, + { + .statisticName = "pcd_prs_muram_read_cycles", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_MURAM_READ_CYCLES, + }, + { + .statisticName = "pcd_prs_muram_read_stall_cycles", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_MURAM_READ_STALL_CYCLES, + }, + { + .statisticName = "pcd_prs_muram_write_cycles", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_CYCLES, + }, + { + .statisticName = "pcd_prs_muram_write_stall_cycles", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_MURAM_WRITE_STALL_CYCLES, + }, + { + .statisticName = "pcd_prs_fpm_command_stall_cycles", + .statisticCounter = e_FM_PCD_PRS_COUNTERS_FPM_COMMAND_STALL_CYCLES, + }, + {} +}; + +/* Fm stats and regs dumps via sysfs */ +static ssize_t show_fm_dma_stats(struct device *dev, + struct device_attribute *attr, char *buf) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = NULL; + t_FmDmaStatus fmDmaStatus; + unsigned long flags = 0; + unsigned n = 0; + uint8_t counter_value = 0, counter = 0; + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmDev == NULL)) + return -EINVAL; + + if (!p_LnxWrpFmDev->active || !p_LnxWrpFmDev->h_Dev) + return -EIO; + + counter = + fm_find_statistic_counter_by_name(attr->attr.name, + (struct SysfsStats_t *) + &fmSysfsStats[0], NULL); + + local_irq_save(flags); + + memset(&fmDmaStatus, 0, sizeof(fmDmaStatus)); + FM_GetDmaStatus(p_LnxWrpFmDev->h_Dev, &fmDmaStatus); + + switch (counter) { + case e_FM_DMA_COUNTERS_CMQ_NOT_EMPTY: + counter_value = fmDmaStatus.cmqNotEmpty; + break; + case e_FM_DMA_COUNTERS_BUS_ERROR: + counter_value = fmDmaStatus.busError; + break; + case e_FM_DMA_COUNTERS_READ_BUF_ECC_ERROR: + counter_value = fmDmaStatus.readBufEccError; + break; + case e_FM_DMA_COUNTERS_WRITE_BUF_ECC_SYS_ERROR: + counter_value = fmDmaStatus.writeBufEccSysError; + break; + case e_FM_DMA_COUNTERS_WRITE_BUF_ECC_FM_ERROR: + counter_value = fmDmaStatus.writeBufEccFmError; + break; + default: + WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + break; + }; + + n = snprintf(buf, PAGE_SIZE, "\tFM %u counter: %c\n", + p_LnxWrpFmDev->id, counter_value ? 'T' : 'F'); + + local_irq_restore(flags); + + return n; +} + +static ssize_t show_fm_stats(struct device *dev, + struct device_attribute *attr, char *buf) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = NULL; + unsigned long flags = 0; + unsigned n = 0, counter = 0; + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmDev == NULL)) + return -EINVAL; + + if (!p_LnxWrpFmDev->active || !p_LnxWrpFmDev->h_Dev) + return -EIO; + + counter = + fm_find_statistic_counter_by_name(attr->attr.name, + (struct SysfsStats_t *) + &fmSysfsStats[0], NULL); + + local_irq_save(flags); + + n = snprintf(buf, PAGE_SIZE, "\tFM %d counter: %d\n", + p_LnxWrpFmDev->id, + FM_GetCounter(p_LnxWrpFmDev->h_Dev, + (e_FmCounters) counter)); + + local_irq_restore(flags); + + return n; +} + +static ssize_t show_fm_pcd_stats(struct device *dev, + struct device_attribute *attr, char *buf) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = NULL; + unsigned long flags = 0; + unsigned n = 0, counter = 0; + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmDev == NULL)) + return -EINVAL; + + if (!p_LnxWrpFmDev->active || !p_LnxWrpFmDev->h_Dev) + return -EIO; + + counter = + fm_find_statistic_counter_by_name(attr->attr.name, + (struct SysfsStats_t *) + &fmSysfsStats[0], NULL); + + local_irq_save(flags); + + n = snprintf(buf, PAGE_SIZE, "\tFM %d counter: %d\n", + p_LnxWrpFmDev->id, + FM_PCD_GetCounter(p_LnxWrpFmDev->h_PcdDev, + (e_FmPcdCounters) counter)); + + local_irq_restore(flags); + + return n; +} + +/* FM */ +static DEVICE_ATTR(enq_total_frame, S_IRUGO, show_fm_stats, NULL); +static DEVICE_ATTR(deq_total_frame, S_IRUGO, show_fm_stats, NULL); +static DEVICE_ATTR(deq_0, S_IRUGO, show_fm_stats, NULL); +static DEVICE_ATTR(deq_1, S_IRUGO, show_fm_stats, NULL); +static DEVICE_ATTR(deq_2, S_IRUGO, show_fm_stats, NULL); +static DEVICE_ATTR(deq_from_default, S_IRUGO, show_fm_stats, NULL); +static DEVICE_ATTR(deq_from_context, S_IRUGO, show_fm_stats, NULL); +static DEVICE_ATTR(deq_from_fd, S_IRUGO, show_fm_stats, NULL); +static DEVICE_ATTR(deq_confirm, S_IRUGO, show_fm_stats, NULL); +/* FM:DMA */ +static DEVICE_ATTR(cmq_not_empty, S_IRUGO, show_fm_dma_stats, NULL); +static DEVICE_ATTR(bus_error, S_IRUGO, show_fm_dma_stats, NULL); +static DEVICE_ATTR(read_buf_ecc_error, S_IRUGO, show_fm_dma_stats, NULL); +static DEVICE_ATTR(write_buf_ecc_sys_error, S_IRUGO, show_fm_dma_stats, NULL); +static DEVICE_ATTR(write_buf_ecc_fm_error, S_IRUGO, show_fm_dma_stats, NULL); +/* FM:PCD */ +static DEVICE_ATTR(pcd_enq_total_frame, S_IRUGO, show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_kg_total, S_IRUGO, show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_plcr_yellow, S_IRUGO, show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_plcr_red, S_IRUGO, show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_plcr_recolored_to_red, S_IRUGO, show_fm_pcd_stats, + NULL); +static DEVICE_ATTR(pcd_plcr_recolored_to_yellow, S_IRUGO, show_fm_pcd_stats, + NULL); +static DEVICE_ATTR(pcd_plcr_total, S_IRUGO, show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_plcr_length_mismatch, S_IRUGO, show_fm_pcd_stats, + NULL); +static DEVICE_ATTR(pcd_prs_parse_dispatch, S_IRUGO, show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_l2_parse_result_returned, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_l3_parse_result_returned, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_l4_parse_result_returned, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_shim_parse_result_returned, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_l2_parse_result_returned_with_err, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_l3_parse_result_returned_with_err, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_l4_parse_result_returned_with_err, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_shim_parse_result_returned_with_err, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_soft_prs_cycles, S_IRUGO, show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_soft_prs_stall_cycles, S_IRUGO, show_fm_pcd_stats, + NULL); +static DEVICE_ATTR(pcd_prs_hard_prs_cycle_incl_stall_cycles, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_muram_read_cycles, S_IRUGO, show_fm_pcd_stats, + NULL); +static DEVICE_ATTR(pcd_prs_muram_read_stall_cycles, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_muram_write_cycles, S_IRUGO, show_fm_pcd_stats, + NULL); +static DEVICE_ATTR(pcd_prs_muram_write_stall_cycles, S_IRUGO, + show_fm_pcd_stats, NULL); +static DEVICE_ATTR(pcd_prs_fpm_command_stall_cycles, S_IRUGO, + show_fm_pcd_stats, NULL); + +static struct attribute *fm_dev_stats_attributes[] = { + &dev_attr_enq_total_frame.attr, + &dev_attr_deq_total_frame.attr, + &dev_attr_deq_0.attr, + &dev_attr_deq_1.attr, + &dev_attr_deq_2.attr, + &dev_attr_deq_from_default.attr, + &dev_attr_deq_from_context.attr, + &dev_attr_deq_from_fd.attr, + &dev_attr_deq_confirm.attr, + &dev_attr_cmq_not_empty.attr, + &dev_attr_bus_error.attr, + &dev_attr_read_buf_ecc_error.attr, + &dev_attr_write_buf_ecc_sys_error.attr, + &dev_attr_write_buf_ecc_fm_error.attr, + &dev_attr_pcd_enq_total_frame.attr, + &dev_attr_pcd_kg_total.attr, + &dev_attr_pcd_plcr_yellow.attr, + &dev_attr_pcd_plcr_red.attr, + &dev_attr_pcd_plcr_recolored_to_red.attr, + &dev_attr_pcd_plcr_recolored_to_yellow.attr, + &dev_attr_pcd_plcr_total.attr, + &dev_attr_pcd_plcr_length_mismatch.attr, + &dev_attr_pcd_prs_parse_dispatch.attr, + &dev_attr_pcd_prs_l2_parse_result_returned.attr, + &dev_attr_pcd_prs_l3_parse_result_returned.attr, + &dev_attr_pcd_prs_l4_parse_result_returned.attr, + &dev_attr_pcd_prs_shim_parse_result_returned.attr, + &dev_attr_pcd_prs_l2_parse_result_returned_with_err.attr, + &dev_attr_pcd_prs_l3_parse_result_returned_with_err.attr, + &dev_attr_pcd_prs_l4_parse_result_returned_with_err.attr, + &dev_attr_pcd_prs_shim_parse_result_returned_with_err.attr, + &dev_attr_pcd_prs_soft_prs_cycles.attr, + &dev_attr_pcd_prs_soft_prs_stall_cycles.attr, + &dev_attr_pcd_prs_hard_prs_cycle_incl_stall_cycles.attr, + &dev_attr_pcd_prs_muram_read_cycles.attr, + &dev_attr_pcd_prs_muram_read_stall_cycles.attr, + &dev_attr_pcd_prs_muram_write_cycles.attr, + &dev_attr_pcd_prs_muram_write_stall_cycles.attr, + &dev_attr_pcd_prs_fpm_command_stall_cycles.attr, + NULL +}; + +static const struct attribute_group fm_dev_stats_attr_grp = { + .name = "statistics", + .attrs = fm_dev_stats_attributes +}; + +static ssize_t show_fm_regs(struct device *dev, struct device_attribute *attr, + char *buf) +{ + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + t_LnxWrpFmDev *p_LnxWrpFmDev = NULL; +#endif + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmDev == NULL)) + return -EINVAL; + + local_irq_save(flags); + + n = snprintf(buf, PAGE_SIZE, "FM driver registers dump.\n"); + + if (!p_LnxWrpFmDev->active || !p_LnxWrpFmDev->h_Dev) + return -EIO; + else + FM_DumpRegs(p_LnxWrpFmDev->h_Dev); + + local_irq_restore(flags); +#else + + local_irq_save(flags); + n = snprintf(buf, PAGE_SIZE, + "Debug level is too low to dump registers!!!\n"); + local_irq_restore(flags); +#endif /* (defined(DEBUG_ERRORS) && ... */ + + return n; +} + +static ssize_t show_pcd_regs(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + t_LnxWrpFmDev *p_LnxWrpFmDev = NULL; +#endif + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + p_LnxWrpFmDev = (t_LnxWrpFmDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmDev == NULL)) + return -EINVAL; + + local_irq_save(flags); + n = snprintf(buf, PAGE_SIZE, "FM driver registers dump.\n"); + + if (!p_LnxWrpFmDev->active || !p_LnxWrpFmDev->h_PcdDev) + return -EIO; + else + FM_PCD_DumpRegs(p_LnxWrpFmDev->h_PcdDev); + + local_irq_restore(flags); +#else + + local_irq_save(flags); + n = snprintf(buf, PAGE_SIZE, + "Debug level is too low to dump registers!!!\n"); + local_irq_restore(flags); + +#endif /* (defined(DEBUG_ERRORS) && ... */ + + return n; +} + +static DEVICE_ATTR(fm_regs, S_IRUGO, show_fm_regs, NULL); +static DEVICE_ATTR(fm_pcd_regs, S_IRUGO, show_pcd_regs, NULL); + +int fm_sysfs_create(struct device *dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = NULL; + + if (dev == NULL) + return -EIO; + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) dev_get_drvdata(dev); + + /* store to remove them when module is disabled */ + p_LnxWrpFmDev->dev_attr_regs = &dev_attr_fm_regs; + p_LnxWrpFmDev->dev_pcd_attr_regs = &dev_attr_fm_pcd_regs; + + /* Create sysfs statistics group for FM module */ + if (sysfs_create_group(&dev->kobj, &fm_dev_stats_attr_grp) != 0) + return -EIO; + + /* Registers dump entry - in future will be moved to debugfs */ + if (device_create_file(dev, &dev_attr_fm_regs) != 0 || + device_create_file(dev, &dev_attr_fm_pcd_regs) != 0) + return -EIO; + + return 0; +} + +void fm_sysfs_destroy(struct device *dev) +{ + t_LnxWrpFmDev *p_LnxWrpFmDev = NULL; + + if (WARN_ON(dev == NULL)) + return; + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmDev == NULL)) + return; + + sysfs_remove_group(&dev->kobj, &fm_dev_stats_attr_grp); + device_remove_file(dev, p_LnxWrpFmDev->dev_attr_regs); + device_remove_file(dev, p_LnxWrpFmDev->dev_pcd_attr_regs); +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs_fm.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs_fm.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_sysfs_fm.h + + @Description FM sysfs functions. + +*/ + +#ifndef LNXWRP_SYSFS_FM_H_ +#define LNXWRP_SYSFS_FM_H_ + +#include "lnxwrp_sysfs.h" + +int fm_sysfs_create(struct device *dev); +void fm_sysfs_destroy(struct device *dev); + +#endif /* LNXWRP_SYSFS_FM_H_ */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs_fm_port.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs_fm_port.c @@ -0,0 +1,364 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_sysfs_fm_port.c + + @Description FM port sysfs related functions. + +*/ + +#include "lnxwrp_sysfs.h" +#include "lnxwrp_fm.h" + +static const struct SysfsStats_t portSysfsStats[] = { + /* RX/TX/OH common statistics */ + { + .statisticName = "port_frame", + .statisticCounter = e_FM_PORT_COUNTERS_FRAME, + }, + { + .statisticName = "port_discard_frame", + .statisticCounter = e_FM_PORT_COUNTERS_DISCARD_FRAME, + }, + { + .statisticName = "port_dealloc_buf", + .statisticCounter = e_FM_PORT_COUNTERS_DEALLOC_BUF, + }, + { + .statisticName = "port_enq_total", + .statisticCounter = e_FM_PORT_COUNTERS_ENQ_TOTAL, + }, + /* TX/OH */ + { + .statisticName = "port_length_err", + .statisticCounter = e_FM_PORT_COUNTERS_LENGTH_ERR, + }, + { + .statisticName = "port_unsupprted_format", + .statisticCounter = e_FM_PORT_COUNTERS_UNSUPPRTED_FORMAT, + }, + { + .statisticName = "port_deq_total", + .statisticCounter = e_FM_PORT_COUNTERS_DEQ_TOTAL, + }, + { + .statisticName = "port_deq_from_default", + .statisticCounter = e_FM_PORT_COUNTERS_DEQ_FROM_DEFAULT, + }, + { + .statisticName = "port_deq_confirm", + .statisticCounter = e_FM_PORT_COUNTERS_DEQ_CONFIRM, + }, + /* RX/OH */ + { + .statisticName = "port_rx_bad_frame", + .statisticCounter = e_FM_PORT_COUNTERS_RX_BAD_FRAME, + }, + { + .statisticName = "port_rx_large_frame", + .statisticCounter = e_FM_PORT_COUNTERS_RX_LARGE_FRAME, + }, + { + .statisticName = "port_rx_out_of_buffers_discard", + .statisticCounter = e_FM_PORT_COUNTERS_RX_OUT_OF_BUFFERS_DISCARD, + }, + { + .statisticName = "port_rx_filter_frame", + .statisticCounter = e_FM_PORT_COUNTERS_RX_FILTER_FRAME, + }, + /* TODO: Particular statistics for OH ports */ + {} +}; + +static ssize_t show_fm_port_stats(struct device *dev, + struct device_attribute *attr, char *buf) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; + t_LnxWrpFmDev *p_LnxWrpFmDev; + unsigned long flags; + int n = 0; + uint8_t counter = 0; + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + + p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmPortDev == NULL)) + return -EINVAL; + + p_LnxWrpFmDev = (t_LnxWrpFmDev *) p_LnxWrpFmPortDev->h_LnxWrpFmDev; + if (WARN_ON(p_LnxWrpFmDev == NULL)) + return -EINVAL; + + if (!p_LnxWrpFmDev->active || !p_LnxWrpFmDev->h_Dev) + return -EIO; + + if (!p_LnxWrpFmPortDev->h_Dev) { + n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n"); + return n; + } + + counter = + fm_find_statistic_counter_by_name(attr->attr.name, + (struct SysfsStats_t *) & + portSysfsStats[0], NULL); + + if (counter == e_FM_PORT_COUNTERS_RX_LIST_DMA_ERR) { + uint32_t fmRev = 0; + fmRev = 0xffff & ioread32(UINT_TO_PTR(p_LnxWrpFmDev->fmBaseAddr + + 0x000c30c4)); + + if (fmRev == 0x0100) { + local_irq_save(flags); + n = snprintf(buf, PAGE_SIZE, + "counter not available for revision 1\n"); + local_irq_restore(flags); + } + return n; + } + + local_irq_save(flags); + n = snprintf(buf, PAGE_SIZE, "\tFM %d Port %d counter: %d\n", + p_LnxWrpFmDev->id, + p_LnxWrpFmPortDev->id, + FM_PORT_GetCounter(p_LnxWrpFmPortDev->h_Dev, + (e_FmPortCounters) counter)); + local_irq_restore(flags); + + return n; +} + +/* FM PORT RX/TX/OH statistics */ +static DEVICE_ATTR(port_frame, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_discard_frame, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_dealloc_buf, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_enq_total, S_IRUGO, show_fm_port_stats, NULL); +/* FM PORT TX/OH statistics */ +static DEVICE_ATTR(port_length_err, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_unsupprted_format, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_deq_total, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_deq_from_default, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_deq_confirm, S_IRUGO, show_fm_port_stats, NULL); +/* FM PORT RX/OH statistics */ +static DEVICE_ATTR(port_rx_bad_frame, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_rx_large_frame, S_IRUGO, show_fm_port_stats, NULL); +static DEVICE_ATTR(port_rx_out_of_buffers_discard, S_IRUGO, + show_fm_port_stats, NULL); +static DEVICE_ATTR(port_rx_filter_frame, S_IRUGO, show_fm_port_stats, NULL); + +/* FM PORT TX statistics */ +static struct attribute *fm_tx_port_dev_stats_attributes[] = { + &dev_attr_port_frame.attr, + &dev_attr_port_discard_frame.attr, + &dev_attr_port_dealloc_buf.attr, + &dev_attr_port_enq_total.attr, + &dev_attr_port_length_err.attr, + &dev_attr_port_unsupprted_format.attr, + &dev_attr_port_deq_total.attr, + &dev_attr_port_deq_from_default.attr, + &dev_attr_port_deq_confirm.attr, + NULL +}; + +static const struct attribute_group fm_tx_port_dev_stats_attr_grp = { + .name = "statistics", + .attrs = fm_tx_port_dev_stats_attributes +}; + +/* FM PORT RX statistics */ +static struct attribute *fm_rx_port_dev_stats_attributes[] = { + &dev_attr_port_frame.attr, + &dev_attr_port_discard_frame.attr, + &dev_attr_port_dealloc_buf.attr, + &dev_attr_port_enq_total.attr, + &dev_attr_port_rx_bad_frame.attr, + &dev_attr_port_rx_large_frame.attr, + &dev_attr_port_rx_out_of_buffers_discard.attr, + &dev_attr_port_rx_filter_frame.attr, + NULL +}; + +static const struct attribute_group fm_rx_port_dev_stats_attr_grp = { + .name = "statistics", + .attrs = fm_rx_port_dev_stats_attributes +}; + +/* TODO: add particular OH ports statistics */ +static struct attribute *fm_oh_port_dev_stats_attributes[] = { + &dev_attr_port_frame.attr, + &dev_attr_port_discard_frame.attr, + &dev_attr_port_dealloc_buf.attr, + &dev_attr_port_enq_total.attr, + /*TX*/ &dev_attr_port_length_err.attr, + &dev_attr_port_unsupprted_format.attr, + &dev_attr_port_deq_total.attr, + &dev_attr_port_deq_from_default.attr, + &dev_attr_port_deq_confirm.attr, + /*RX*/ &dev_attr_port_rx_bad_frame.attr, + &dev_attr_port_rx_large_frame.attr, + &dev_attr_port_rx_out_of_buffers_discard.attr, + /*&dev_attr_port_rx_filter_frame.attr, */ + NULL +}; + +static const struct attribute_group fm_oh_port_dev_stats_attr_grp = { + .name = "statistics", + .attrs = fm_oh_port_dev_stats_attributes +}; + +static ssize_t show_fm_port_regs(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long flags; + unsigned n = 0; +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = + (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); +#endif + + if (attr == NULL || buf == NULL || dev == NULL) + return -EINVAL; + +#if (defined(DEBUG_ERRORS) && (DEBUG_ERRORS > 0)) + local_irq_save(flags); + + if (!p_LnxWrpFmPortDev->h_Dev) { + n = snprintf(buf, PAGE_SIZE, "\tFM Port not configured...\n"); + return n; + } else { + n = snprintf(buf, PAGE_SIZE, + "FM port driver registers dump.\n"); + FM_PORT_DumpRegs(p_LnxWrpFmPortDev->h_Dev); + } + + local_irq_restore(flags); + + return n; +#else + + local_irq_save(flags); + n = snprintf(buf, PAGE_SIZE, + "Debug level is too low to dump registers!!!\n"); + local_irq_restore(flags); + + return n; +#endif +} + +static DEVICE_ATTR(fm_port_regs, 0x644, show_fm_port_regs, NULL); + +int fm_port_sysfs_create(struct device *dev) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev; + + if (dev == NULL) + return -EINVAL; + + p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmPortDev == NULL)) + return -EINVAL; + + /* store to remove them when module is disabled */ + p_LnxWrpFmPortDev->dev_attr_regs = &dev_attr_fm_port_regs; + + /* Registers dump entry - in future will be moved to debugfs */ + if (device_create_file(dev, &dev_attr_fm_port_regs) != 0) + return -EIO; + + /* FM Ports statistics */ + switch (p_LnxWrpFmPortDev->settings.param.portType) { + case e_FM_PORT_TYPE_TX: + case e_FM_PORT_TYPE_TX_10G: + if (sysfs_create_group + (&dev->kobj, &fm_tx_port_dev_stats_attr_grp) != 0) + return -EIO; + break; + case e_FM_PORT_TYPE_RX: + case e_FM_PORT_TYPE_RX_10G: + if (sysfs_create_group + (&dev->kobj, &fm_rx_port_dev_stats_attr_grp) != 0) + return -EIO; + break; + case e_FM_PORT_TYPE_OH_OFFLINE_PARSING: + case e_FM_PORT_TYPE_OH_HOST_COMMAND: + if (sysfs_create_group + (&dev->kobj, &fm_oh_port_dev_stats_attr_grp) != 0) + return -EIO; + break; + case e_FM_PORT_TYPE_DUMMY: + default: + WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + return -EINVAL; + break; + }; + + return 0; +} + +void fm_port_sysfs_destroy(struct device *dev) +{ + t_LnxWrpFmPortDev *p_LnxWrpFmPortDev = NULL; + + /* this function has never been tested !!! */ + + if (WARN_ON(dev == NULL)) + return; + + p_LnxWrpFmPortDev = (t_LnxWrpFmPortDev *) dev_get_drvdata(dev); + if (WARN_ON(p_LnxWrpFmPortDev == NULL)) + return; + + /* The name attribute will be freed also by these 2 functions? */ + switch (p_LnxWrpFmPortDev->settings.param.portType) { + case e_FM_PORT_TYPE_TX: + case e_FM_PORT_TYPE_TX_10G: + sysfs_remove_group(&dev->kobj, &fm_tx_port_dev_stats_attr_grp); + break; + case e_FM_PORT_TYPE_RX: + case e_FM_PORT_TYPE_RX_10G: + sysfs_remove_group(&dev->kobj, &fm_rx_port_dev_stats_attr_grp); + break; + case e_FM_PORT_TYPE_OH_OFFLINE_PARSING: + case e_FM_PORT_TYPE_OH_HOST_COMMAND: + sysfs_remove_group(&dev->kobj, &fm_oh_port_dev_stats_attr_grp); + break; + case e_FM_PORT_TYPE_DUMMY: + default: + WARN(1, "FMD: failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + break; + }; + + device_remove_file(dev, p_LnxWrpFmPortDev->dev_attr_regs); +} --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs_fm_port.h +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/wrapper/lnxwrp_sysfs_fm_port.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* + + @File lnxwrp_sysfs_fm_port.h + + @Description FM port sysfs functions. + +*/ + +#ifndef LNXWRP_SYSFS_FM_PORT_H_ +#define LNXWRP_SYSFS_FM_PORT_H_ + +#include "lnxwrp_sysfs.h" + +int fm_port_sysfs_create(struct device *dev); +void fm_port_sysfs_destroy(struct device *dev); + +#endif /* LNXWRP_SYSFS_FM_PORT_H_ */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/xx/Makefile +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/xx/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the Freescale Ethernet controllers +# +EXTRA_CFLAGS += -DVERSION=\"\" +# +#Include netcomm SW specific definitions +include $(srctree)/drivers/net/dpa/NetCommSw/ncsw_config.mk + +obj-y += fsl-ncsw-xx.o + +fsl-ncsw-xx-objs := xx_linux.o udivdi3.o stdlib.o + --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/xx/stdlib.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/xx/stdlib.c @@ -0,0 +1,264 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/*------------------------------------------------------*/ +/* */ +/* File: stdlib.c */ +/* */ +/* Description: */ +/* Standard library routines (externals) */ +/* */ +/* Modifications: */ +/* ============== */ +/* */ +/*------------------------------------------------------*/ +#include "stdlib_ext.h" +#include "stdarg_ext.h" +#include "ctype_ext.h" +#include "string_ext.h" +#include "std_ext.h" +#include "xx_ext.h" + + +#ifdef MODULE +/** + * strtoul - convert a string to an uint32_t + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +uint32_t strtoul(const char *cp,char **endp,uint32_t base) +{ + uint32_t result = 0,value; + + if (!base) { + base = 10; + if (*cp == '0') { + base = 8; + cp++; + if ((*cp == 'x') && isxdigit(cp[1])) { + cp++; + base = 16; + } + } + } + while (isxdigit(*cp) && + (value = (uint32_t)(isdigit(*cp) ? *cp-'0' : toupper((uint8_t)(*cp))-'A'+10)) < base) { + result = result*base + value; + cp++; + } + if (endp) + *endp = (char *)cp; + return result; +} + +/** + * strtol - convert a string to a int32_t + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long strtol(const char *cp,char **endp,uint32_t base) +{ + if(*cp=='-') + return (long)(-strtoul(cp+1,endp,base)); + return (long)strtoul(cp,endp,base); +} + +/** + * strtoull - convert a string to an uint64_t + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +uint64_t strtoull(const char *cp,char **endp,uint32_t base) +{ + uint64_t result = 0,value; + + if (!base) { + base = 10; + if (*cp == '0') { + base = 8; + cp++; + if ((*cp == 'x') && isxdigit(cp[1])) { + cp++; + base = 16; + } + } + } + while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp) + ? toupper((uint8_t)(*cp)) : *cp)-'A'+10) < base) { + result = result*base + value; + cp++; + } + if (endp) + *endp = (char *)cp; + return result; +} + +/** + * strtoll - convert a string to a int64 + * @cp: The start of the string + * @endp: A pointer to the end of the parsed string will be placed here + * @base: The number base to use + */ +long long strtoll(const char *cp,char **endp,uint32_t base) +{ + if(*cp=='-') + return (long long)(-strtoull(cp+1,endp,base)); + return (long long)(strtoull(cp,endp,base)); +} + +/** + * atoi - convert a string to a int + * @s: The start of the string + */ +int atoi(const char *s) +{ + int i=0; + const char **tmp_s = &s; + + while (isdigit(**tmp_s)) + i = i*10 + *((*tmp_s)++) - '0'; + return i; +} + +/** + * strlen - Find the length of a string + * @s: The string to be sized + */ +size_t strlen(const char * s) +{ + const char *sc; + + for (sc = s; *sc != '\0'; ++sc) + /* nothing */; + + return sc - s; +} + +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @count: The maximum number of bytes to search + */ +size_t strnlen(const char * s, size_t count) +{ + const char *sc; + + for (sc = s; count-- && *sc != '\0'; ++sc) + /* nothing */; + + return sc - s; +} + +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + */ +char * strcpy(char * dest,const char *src) +{ + char *tmp = dest; + + while ((*dest++ = *src++) != '\0') + /* nothing */; + + return tmp; +} +#endif /* MODULE */ + +/** + * strtok - Split a string into tokens + * @s: The string to be searched + * @ct: The characters to search for + * + * WARNING: strtok is deprecated, use strsep instead. + */ +char *___strtok; + +char * strtok(char * s,const char * ct) +{ + char *sbegin, *send; + + sbegin = s ? s : ___strtok; + if (!sbegin) { + return NULL; + } + sbegin += strspn(sbegin,ct); + if (*sbegin == '\0') { + ___strtok = NULL; + return( NULL ); + } + send = strpbrk( sbegin, ct); + if (send && *send != '\0') + *send++ = '\0'; + ___strtok = send; + return (sbegin); +} + + +#ifdef MODULE +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: The maximum number of bytes to copy + * + * Note that unlike userspace strncpy, this does not %NUL-pad the buffer. + * However, the result is not %NUL-terminated if the source exceeds + * @count bytes. + */ +char * strncpy(char * dest,const char *src,size_t count) +{ + char *tmp = dest; + + while (count-- && (*dest++ = *src++) != '\0') + /* nothing */; + + return tmp; +} + +/** + * vsprintf - Format a string and place it in a buffer + * @buf: The buffer to place the result into + * @fmt: The format string to use + * @args: Arguments for the format string + * + * Call this function if you are already dealing with a va_list. + * You probably want sprintf instead. + */ +int vsprintf(char *buf, const char *fmt, va_list args) +{ + return vsnprintf(buf, INT32_MAX, fmt, args); +} +#endif /* MODULE */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/xx/udivdi3.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/xx/udivdi3.c @@ -0,0 +1,132 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ + +#include +#include +#include + + +#define BITS_PER_UNIT 8 +#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT) + + +typedef unsigned int UQItype __attribute__ ((mode (QI))); +typedef int SItype __attribute__ ((mode (SI))); +typedef unsigned int USItype __attribute__ ((mode (SI))); +typedef int DItype __attribute__ ((mode (DI))); +typedef int word_type __attribute__ ((mode (__word__))); +typedef unsigned int UDItype __attribute__ ((mode (DI))); + +struct DIstruct {SItype low, high;}; + +typedef union +{ + struct DIstruct s; + DItype ll; +} DIunion; + + +/* bit divisor, dividend and result. dynamic precision */ +static __inline__ uint64_t _div64_64(uint64_t dividend, uint64_t divisor) +{ + uint32_t d = divisor; + + if (divisor > 0xffffffffULL) + { + unsigned int shift = fls(divisor >> 32); + + d = divisor >> shift; + dividend >>= shift; + } + + /* avoid 64 bit division if possible */ + if (dividend >> 32) + do_div(dividend, d); + else + dividend = (uint32_t) dividend / d; + + return dividend; +} + +UDItype __udivdi3 (UDItype n, UDItype d) +{ + return _div64_64(n, d); +} + +DItype __divdi3 (DItype n, DItype d) +{ + DItype sign = 1; + if (n<0) + { + sign *= -1; + n *= -1; + } + if (d<0) + { + sign *= -1; + d *= -1; + } + return sign*_div64_64((UDItype)n, (UDItype)d); +} + +UDItype __umoddi3 (UDItype n, UDItype d) +{ + return n-(_div64_64(n, d)*d); +} + +#ifdef MODULE +word_type __ucmpdi2 (DItype a, DItype b) +{ + DIunion au, bu; + + au.ll = a, bu.ll = b; + + if ((USItype) au.s.high < (USItype) bu.s.high) + return 0; + else if ((USItype) au.s.high > (USItype) bu.s.high) + return 2; + if ((USItype) au.s.low < (USItype) bu.s.low) + return 0; + else if ((USItype) au.s.low > (USItype) bu.s.low) + return 2; + return 1; +} +#endif /* MODULE */ --- linux-3.13.0.orig/drivers/net/dpa/NetCommSw/src/xx/xx_linux.c +++ linux-3.13.0/drivers/net/dpa/NetCommSw/src/xx/xx_linux.c @@ -0,0 +1,901 @@ +/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/**************************************************************************//** + @File xx_linux.c + + @Description XX routines implementation for Linux. +*//***************************************************************************/ +#include + +#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS) +#define MODVERSIONS +#endif +#ifdef MODVERSIONS +#include +#endif /* MODVERSIONS */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef BIGPHYSAREA_ENABLE +#include +#endif /* BIGPHYSAREA_ENABLE */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "error_ext.h" +#include "std_ext.h" +#include "list_ext.h" +#include "mm_ext.h" +#include "sys_io_ext.h" +#include "xx.h" + + +#define __ERR_MODULE__ MODULE_UNKNOWN + +#ifdef BIGPHYSAREA_ENABLE +#define MAX_ALLOCATION_SIZE 128 * 1024 /* Maximum size allocated with kmalloc is 128K */ + + +/* TODO: large allocations => use big phys area */ +/****************************************************************************** + * routine: get_nr_pages + * + * description: + * calculates the number of memory pages for a given size (in bytes) + * + * arguments: + * size - the number of bytes + * + * return code: + * The number of pages + * + *****************************************************************************/ +static __inline__ uint32_t get_nr_pages (uint32_t size) +{ + return (uint32_t)((size >> PAGE_SHIFT) + (size & PAGE_SHIFT ? 1 : 0)); +} + +static bool in_big_phys_area (uint32_t addr) +{ + uint32_t base, size; + + bigphysarea_get_details (&base, &size); + return ((addr >= base) && (addr < base + size)); +} +#endif /* BIGPHYSAREA_ENABLE */ + +void * xx_Malloc(uint32_t n) +{ + void *a; + uint32_t flags; + + flags = XX_DisableAllIntr(); +#ifdef BIGPHYSAREA_ENABLE + if (n >= MAX_ALLOCATION_SIZE) + a = (void*)bigphysarea_alloc_pages(get_nr_pages(n), 0, GFP_ATOMIC); + else +#endif /* BIGPHYSAREA_ENABLE */ + a = (void *)kmalloc((uint32_t)n, GFP_ATOMIC); + if (!a) + XX_Print("No memory for XX_Malloc\n"); + XX_RestoreAllIntr(flags); + + return a; +} + +void xx_Free(void *p) +{ +#ifdef BIGPHYSAREA_ENABLE + if (in_big_phys_area ((uint32_t)p)) + bigphysarea_free_pages(p); + else +#endif /* BIGPHYSAREA_ENABLE */ + kfree(p); +} + +void XX_Exit(int status) +{ + WARN(1, "\n\nFMD: fatal error, driver can't go on!!!\n\n"); +} + +#define BUF_SIZE 512 +void XX_Print(char *str, ...) +{ + va_list args; +#ifdef CONFIG_SMP + char buf[BUF_SIZE]; +#endif /* CONFIG_SMP */ + + va_start(args, str); +#ifdef CONFIG_SMP + if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE) + printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE); + printk (KERN_WARNING "cpu%d/%d: %s",hard_smp_processor_id(), smp_processor_id(), buf); +#else + vprintk(str, args); +#endif /* CONFIG_SMP */ + va_end(args); +} + +void XX_Fprint(void *file, char *str, ...) +{ + va_list args; +#ifdef CONFIG_SMP + char buf[BUF_SIZE]; +#endif /* CONFIG_SMP */ + + va_start(args, str); +#ifdef CONFIG_SMP + if (vsnprintf (buf, BUF_SIZE, str, args) >= BUF_SIZE) + printk(KERN_WARNING "Illegal string to print!\n more than %d characters.\n\tString was not printed completelly.\n", BUF_SIZE); + printk (KERN_CRIT "cpu%d/%d: %s",hard_smp_processor_id(), smp_processor_id(), buf); +#else + vprintk(str, args); +#endif /* CONFIG_SMP */ + va_end(args); +} + +#ifdef DEBUG_XX_MALLOC +typedef void (*t_ffn)(void *); +typedef struct { + t_ffn f_free; + void *mem; + char *fname; + int fline; + uint32_t size; + t_List node; +} t_MemDebug; +#define MEMDBG_OBJECT(p_List) LIST_OBJECT(p_List, t_MemDebug, node) + +LIST(memDbgLst); + + +void * XX_MallocDebug(uint32_t size, char *fname, int line) +{ + void *mem; + t_MemDebug *p_MemDbg; + + p_MemDbg = (t_MemDebug *)xx_Malloc(sizeof(t_MemDebug)); + if (p_MemDbg == NULL) + return NULL; + + mem = xx_Malloc(size); + if (mem == NULL) + { + XX_Free(p_MemDbg); + return NULL; + } + + INIT_LIST(&p_MemDbg->node); + p_MemDbg->f_free = xx_Free; + p_MemDbg->mem = mem; + p_MemDbg->fname = fname; + p_MemDbg->fline = line; + p_MemDbg->size = size+sizeof(t_MemDebug); + LIST_AddToTail(&p_MemDbg->node, &memDbgLst); + + return mem; +} + +void * XX_MallocSmartDebug(uint32_t size, + int memPartitionId, + uint32_t align, + char *fname, + int line) +{ + void *mem; + t_MemDebug *p_MemDbg; + + p_MemDbg = (t_MemDebug *)XX_Malloc(sizeof(t_MemDebug)); + if (p_MemDbg == NULL) + return NULL; + + mem = xx_MallocSmart((uint32_t)size, memPartitionId, align); + if (mem == NULL) + { + XX_Free(p_MemDbg); + return NULL; + } + + INIT_LIST(&p_MemDbg->node); + p_MemDbg->f_free = xx_FreeSmart; + p_MemDbg->mem = mem; + p_MemDbg->fname = fname; + p_MemDbg->fline = line; + p_MemDbg->size = size+sizeof(t_MemDebug); + LIST_AddToTail(&p_MemDbg->node, &memDbgLst); + + return mem; +} + +static void debug_free(void *mem) +{ + t_List *p_MemDbgLh = NULL; + t_MemDebug *p_MemDbg; + bool found = FALSE; + + if (LIST_IsEmpty(&memDbgLst)) + { + REPORT_ERROR(MAJOR, E_ALREADY_FREE, ("Unbalanced free (0x%08x)", mem)); + return; + } + + LIST_FOR_EACH(p_MemDbgLh, &memDbgLst) + { + p_MemDbg = MEMDBG_OBJECT(p_MemDbgLh); + if (p_MemDbg->mem == mem) + { + found = TRUE; + break; + } + } + + if (!found) + { + REPORT_ERROR(MAJOR, E_NOT_FOUND, + ("Attempt to free unallocated address (0x%08x)",mem)); + dump_stack(); + return; + } + + LIST_Del(p_MemDbgLh); + p_MemDbg->f_free(mem); + p_MemDbg->f_free(p_MemDbg); +} + +void XX_FreeSmart(void *p) +{ + debug_free(p); +} + + +void XX_Free(void *p) +{ + debug_free(p); +} + +#else /* not DEBUG_XX_MALLOC */ +void * XX_Malloc(uint32_t size) +{ + return xx_Malloc(size); +} + +void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment) +{ + return xx_MallocSmart(size,memPartitionId, alignment); +} + +void XX_FreeSmart(void *p) +{ + xx_FreeSmart(p); +} + + +void XX_Free(void *p) +{ + xx_Free(p); +} +#endif /* not DEBUG_XX_MALLOC */ + + +#if (defined(REPORT_EVENTS) && (REPORT_EVENTS > 0)) +void XX_EventById(uint32_t event, t_Handle appId, uint16_t flags, char *msg) +{ + e_Event eventCode = (e_Event)event; + + UNUSED(eventCode); + UNUSED(appId); + UNUSED(flags); + UNUSED(msg); +} +#endif /* (defined(REPORT_EVENTS) && ... */ + + +uint32_t XX_DisableAllIntr(void) +{ + unsigned long flags; + + local_irq_save(flags); + + return (uint32_t)flags; +} + +void XX_RestoreAllIntr(uint32_t flags) +{ + local_irq_restore((unsigned long)flags); +} + +t_Error XX_Call( uint32_t qid, t_Error (* f)(t_Handle), t_Handle id, t_Handle appId, uint16_t flags ) +{ + UNUSED(qid); + UNUSED(appId); + UNUSED(flags); + + return f(id); +} + +int XX_IsICacheEnable(void) +{ + return TRUE; +} + +int XX_IsDCacheEnable(void) +{ + return TRUE; +} + + +typedef struct { + t_Isr *f_Isr; + t_Handle handle; +} t_InterruptHandler; + + +t_Handle interruptHandlers[0x00010000]; + +static irqreturn_t LinuxInterruptHandler (int irq, void *dev_id) +{ + t_InterruptHandler *p_IntrHndl = (t_InterruptHandler *)dev_id; + p_IntrHndl->f_Isr(p_IntrHndl->handle); + return IRQ_HANDLED; +} + +t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle) +{ + const char *device; + t_InterruptHandler *p_IntrHndl; + + device = GetDeviceName(irq); + if (device == NULL) + RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Interrupt source - %d", irq)); + + p_IntrHndl = (t_InterruptHandler *)XX_Malloc(sizeof(t_InterruptHandler)); + if (p_IntrHndl == NULL) + RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); + p_IntrHndl->f_Isr = f_Isr; + p_IntrHndl->handle = handle; + interruptHandlers[irq] = p_IntrHndl; + + if (request_irq(GetDeviceIrqNum(irq), LinuxInterruptHandler, 0, device, p_IntrHndl) < 0) + RETURN_ERROR(MAJOR, E_BUSY, ("Can't get IRQ %s\n", device)); + disable_irq(GetDeviceIrqNum(irq)); + + return E_OK; +} + +t_Error XX_FreeIntr(int irq) +{ + t_InterruptHandler *p_IntrHndl = interruptHandlers[irq]; + free_irq(GetDeviceIrqNum(irq), p_IntrHndl); + XX_Free(p_IntrHndl); + interruptHandlers[irq] = 0; + return E_OK; +} + +t_Error XX_EnableIntr(int irq) +{ + enable_irq(GetDeviceIrqNum(irq)); + return E_OK; +} + +t_Error XX_DisableIntr(int irq) +{ + disable_irq(GetDeviceIrqNum(irq)); + return E_OK; +} + + +/*****************************************************************************/ +/* Tasklet Service Routines */ +/*****************************************************************************/ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) +typedef struct +{ + t_Handle h_Data; + void (*f_Callback) (void *); + struct delayed_work dwork; +} t_Tasklet; + +static void GenericTaskletCallback(struct work_struct *p_Work) +{ + t_Tasklet *p_Task = container_of(p_Work, t_Tasklet, dwork.work); + + p_Task->f_Callback(p_Task->h_Data); +} +#endif /* LINUX_VERSION_CODE */ + + +t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + struct work_struct *p_Task; + p_Task = (struct work_struct *)XX_Malloc(sizeof(struct work_struct)); + INIT_WORK(p_Task, routine, data); +#else + t_Tasklet *p_Task = (t_Tasklet *)XX_Malloc(sizeof(t_Tasklet)); + p_Task->h_Data = data; + p_Task->f_Callback = routine; + INIT_DELAYED_WORK(&p_Task->dwork, GenericTaskletCallback); +#endif /* LINUX_VERSION_CODE */ + + return (t_TaskletHandle)p_Task; +} + + +void XX_FreeTasklet (t_TaskletHandle h_Tasklet) +{ + if (h_Tasklet) + XX_Free(h_Tasklet); +} + +int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate) +{ + int ans; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + if (immediate) + ans = schedule_work(h_Tasklet); + else + ans = schedule_delayed_work(h_Tasklet, 1); +#else + if (immediate) + ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, 0); + else + ans = schedule_delayed_work(&((t_Tasklet *)h_Tasklet)->dwork, HZ); +#endif /* LINUX_VERSION_CODE */ + + return ans; +} + +void XX_FlushScheduledTasks(void) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) + flush_scheduled_tasks(); +#else + flush_scheduled_work(); +#endif /* LINUX_VERSION_CODE */ +} + +int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + return (int)(((struct work_struct *)h_Tasklet)->pending); +#else + return (int)delayed_work_pending(&((t_Tasklet *)h_Tasklet)->dwork); +#endif /* LINUX_VERSION_CODE */ +} + +void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) + ((struct tq_struct *)h_Tasklet)->data = data; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + ((struct work_struct *)h_Tasklet)->data = data; +#else + ((t_Tasklet *)h_Tasklet)->h_Data = data; +#endif /* LINUX_VERSION_CODE */ +} + +t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + return (t_Handle)(((struct work_struct *)h_Tasklet)->data); +#else + return ((t_Tasklet *)h_Tasklet)->h_Data; +#endif /* LINUX_VERSION_CODE */ +} + + +/*****************************************************************************/ +/* Spinlock Service Routines */ +/*****************************************************************************/ + +t_Handle XX_InitSpinlock(void) +{ + spinlock_t *p_Spinlock = (spinlock_t *)XX_Malloc(sizeof(spinlock_t)); + if (!p_Spinlock) + return NULL; + + spin_lock_init(p_Spinlock); + + return (t_Handle)p_Spinlock; +} + +void XX_FreeSpinlock(t_Handle h_Spinlock) +{ + if (h_Spinlock) + XX_Free(h_Spinlock); +} + +void XX_LockSpinlock(t_Handle h_Spinlock) +{ + spin_lock((spinlock_t *)h_Spinlock); +} + +void XX_UnlockSpinlock(t_Handle h_Spinlock) +{ + spin_unlock((spinlock_t *)h_Spinlock); +} + +uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock) +{ + unsigned long intrFlags; + spin_lock_irqsave((spinlock_t *)h_Spinlock, intrFlags); + return intrFlags; +} + +void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags) +{ + spin_unlock_irqrestore((spinlock_t *)h_Spinlock, (unsigned long)intrFlags); +} + + +/*****************************************************************************/ +/* Timers Service Routines */ +/*****************************************************************************/ +/* The time now is in mili sec. resolution */ +uint32_t XX_CurrentTime(void) +{ + return (jiffies*1000)/HZ; +} + + +t_Handle XX_CreateTimer(void) +{ + struct timer_list *p_Timer = (struct timer_list *)XX_Malloc(sizeof(struct timer_list)); + if (p_Timer) + { + memset(p_Timer, 0, sizeof(struct timer_list)); + init_timer(p_Timer); + } + return (t_Handle)p_Timer; +} + +void XX_FreeTimer(t_Handle h_Timer) +{ + if (h_Timer) + XX_Free(h_Timer); +} + +void XX_StartTimer(t_Handle h_Timer, + uint32_t msecs, + bool periodic, + void (*f_TimerExpired)(t_Handle), + t_Handle h_Arg) +{ + int tmp_jiffies = (msecs*HZ)/1000; + struct timer_list *p_Timer = (struct timer_list *)h_Timer; + + SANITY_CHECK_RETURN((periodic == FALSE), E_NOT_SUPPORTED); + + p_Timer->function = (void (*)(unsigned long))f_TimerExpired; + p_Timer->data = (unsigned long)h_Arg; + if ((msecs*HZ)%1000) + tmp_jiffies++; + p_Timer->expires = (jiffies + tmp_jiffies); + + add_timer((struct timer_list *)h_Timer); +} + +void XX_SetTimerData(t_Handle h_Timer, t_Handle data) +{ + struct timer_list *p_Timer = (struct timer_list *)h_Timer; + + p_Timer->data = (unsigned long)data; +} + +t_Handle XX_GetTimerData(t_Handle h_Timer) +{ + struct timer_list *p_Timer = (struct timer_list *)h_Timer; + + return (t_Handle)p_Timer->data; +} + +uint32_t XX_GetExpirationTime(t_Handle h_Timer) +{ + struct timer_list *p_Timer = (struct timer_list *)h_Timer; + + return (uint32_t)p_Timer->expires; +} + +void XX_StopTimer(t_Handle h_Timer) +{ + del_timer((struct timer_list *)h_Timer); +} + +void XX_ModTimer(t_Handle h_Timer, uint32_t msecs) +{ + int tmp_jiffies = (msecs*HZ)/1000; + + if ((msecs*HZ)%1000) + tmp_jiffies++; + mod_timer((struct timer_list *)h_Timer, jiffies + tmp_jiffies); +} + +int XX_TimerIsActive(t_Handle h_Timer) +{ + return timer_pending((struct timer_list *)h_Timer); +} + +uint32_t XX_Sleep(uint32_t msecs) +{ + int tmp_jiffies = (msecs*HZ)/1000; + + if ((msecs*HZ)%1000) + tmp_jiffies++; + return schedule_timeout(tmp_jiffies); +} + +/*BEWARE!!!!! UDelay routine is BUSY WAITTING!!!!!*/ +void XX_UDelay(uint32_t usecs) +{ + udelay(usecs); +} + +/* TODO: verify that these are correct */ +#define MSG_BODY_SIZE 512 +typedef t_Error (t_MsgHandler) (t_Handle h_Mod, uint32_t msgId, uint8_t msgBody[MSG_BODY_SIZE]); +typedef void (t_MsgCompletionCB) (t_Handle h_Arg, uint8_t msgBody[MSG_BODY_SIZE]); +t_Error XX_SendMessage(char *p_DestAddr, + uint32_t msgId, + uint8_t msgBody[MSG_BODY_SIZE], + t_MsgCompletionCB *f_CompletionCB, + t_Handle h_CBArg); + +typedef struct { + char *p_Addr; + t_MsgHandler *f_MsgHandlerCB; + t_Handle h_Mod; + t_List node; +} t_MsgHndlr; +#define MSG_HNDLR_OBJECT(ptr) LIST_OBJECT(ptr, t_MsgHndlr, node) + +LIST(msgHndlrList); + +static void EnqueueMsgHndlr(t_MsgHndlr *p_MsgHndlr) +{ + uint32_t intFlags; + + intFlags = XX_DisableAllIntr(); + LIST_AddToTail(&p_MsgHndlr->node, &msgHndlrList); + XX_RestoreAllIntr(intFlags); +} +/* TODO: add this for multi-platform support +static t_MsgHndlr * DequeueMsgHndlr(void) +{ + t_MsgHndlr *p_MsgHndlr = NULL; + uint32_t intFlags; + + intFlags = XX_DisableAllIntr(); + if (!LIST_IsEmpty(&msgHndlrList)) + { + p_MsgHndlr = MSG_HNDLR_OBJECT(msgHndlrList.p_Next); + LIST_DelAndInit(&p_MsgHndlr->node); + } + XX_RestoreAllIntr(intFlags); + + return p_MsgHndlr; +} +*/ +static t_MsgHndlr * FindMsgHndlr(char *p_Addr) +{ + t_MsgHndlr *p_MsgHndlr; + t_List *p_Pos; + + LIST_FOR_EACH(p_Pos, &msgHndlrList) + { + p_MsgHndlr = MSG_HNDLR_OBJECT(p_Pos); + if (strstr(p_MsgHndlr->p_Addr, p_Addr)) + return p_MsgHndlr; + } + + return NULL; +} + +t_Error XX_RegisterMessageHandler (char *p_Addr, t_MsgHandler *f_MsgHandlerCB, t_Handle h_Mod) +{ + t_MsgHndlr *p_MsgHndlr; + uint32_t len; + + p_MsgHndlr = (t_MsgHndlr*)XX_Malloc(sizeof(t_MsgHndlr)); + if (!p_MsgHndlr) + RETURN_ERROR(MINOR, E_NO_MEMORY, ("message handler object!!!")); + memset(p_MsgHndlr, 0, sizeof(t_MsgHndlr)); + + len = strlen(p_Addr); + p_MsgHndlr->p_Addr = (char*)XX_Malloc(len+1); + strncpy(p_MsgHndlr->p_Addr,p_Addr, (uint32_t)(len+1)); + + p_MsgHndlr->f_MsgHandlerCB = f_MsgHandlerCB; + p_MsgHndlr->h_Mod = h_Mod; + INIT_LIST(&p_MsgHndlr->node); + EnqueueMsgHndlr(p_MsgHndlr); + + return E_OK; +} + +t_Error XX_UnregisterMessageHandler (char *p_Addr) +{ + t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_Addr); + if (!p_MsgHndlr) + RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!")); + + LIST_Del(&p_MsgHndlr->node); + XX_Free(p_MsgHndlr->p_Addr); + XX_Free(p_MsgHndlr); + + return E_OK; +} + +t_Error XX_SendMessage(char *p_DestAddr, + uint32_t msgId, + uint8_t msgBody[MSG_BODY_SIZE], + t_MsgCompletionCB *f_CompletionCB, + t_Handle h_CBArg) +{ + t_Error ans; + t_MsgHndlr *p_MsgHndlr = FindMsgHndlr(p_DestAddr); + if (!p_MsgHndlr) + RETURN_ERROR(MINOR, E_NO_DEVICE, ("message handler not found in list!!!")); + + ans = p_MsgHndlr->f_MsgHandlerCB(p_MsgHndlr->h_Mod, msgId, msgBody); + + if (f_CompletionCB) + f_CompletionCB(h_CBArg, msgBody); + + return ans; +} + +t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH], + t_IpcMsgHandler *f_MsgHandler, + t_Handle h_Module, + uint32_t replyLength) +{ + UNUSED(addr);UNUSED(f_MsgHandler);UNUSED(h_Module);UNUSED(replyLength); + return E_OK; +} + +t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH]) +{ + UNUSED(addr); + return E_OK; +} + + +t_Error XX_IpcSendMessage(t_Handle h_Session, + uint8_t *p_Msg, + uint32_t msgLength, + uint8_t *p_Reply, + uint32_t *p_ReplyLength, + t_IpcMsgCompletion *f_Completion, + t_Handle h_Arg) +{ + UNUSED(h_Session); UNUSED(p_Msg); UNUSED(msgLength); UNUSED(p_Reply); + UNUSED(p_ReplyLength); UNUSED(f_Completion); UNUSED(h_Arg); + return E_OK; +} + +t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH], + char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH]) +{ + UNUSED(destAddr); UNUSED(srcAddr); + return E_OK; +} + +/*Forced to introduce due to PRINT_FMT_PARAMS define*/ +uint32_t E500_GetId(void) +{ + return smp_processor_id(); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) +int GetDeviceIrqNum(int irq) +{ + struct device_node *iPar; + struct irq_domain *irqHost; + uint32_t hwIrq; + + /* Get the interrupt controller */ + iPar = of_find_node_by_name(NULL, "mpic"); + hwIrq = 0; + + ASSERT_COND(iPar != NULL); + /* Get the irq host */ + irqHost = irq_find_host(iPar); + of_node_put(iPar); + + /* Create irq mapping */ + return irq_create_mapping(irqHost, hwIrq); +} +#else +#error "kernel not supported!!!" +#endif /* LINUX_VERSION_CODE */ + +void * XX_PhysToVirt(physAddress_t addr) +{ + return UINT_TO_PTR(SYS_PhysToVirt((uint64_t)addr)); +} + +physAddress_t XX_VirtToPhys(void * addr) +{ + return (physAddress_t)SYS_VirtToPhys(PTR_TO_UINT(addr)); +} + +void * xx_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment) +{ + uintptr_t *returnCode, tmp; + + if (alignment < sizeof(uintptr_t)) + alignment = sizeof(uintptr_t); + size += alignment + sizeof(returnCode); + tmp = (uintptr_t)xx_Malloc(size); + if (tmp == 0) + return NULL; + returnCode = (uintptr_t*)((tmp + alignment + sizeof(returnCode)) & ~((uintptr_t)alignment - 1)); + *(returnCode - 1) = tmp; + + return (void*)returnCode; +} + +void xx_FreeSmart(void *p) +{ + xx_Free((void*)(*((uintptr_t *)(p) - 1))); +} --- linux-3.13.0.orig/drivers/net/dpa/dpa-ethtool.c +++ linux-3.13.0/drivers/net/dpa/dpa-ethtool.c @@ -0,0 +1,201 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "dpaa_eth.h" + +static int __cold dpa_get_settings(struct net_device *net_dev, struct ethtool_cmd *et_cmd) +{ + int _errno; + struct dpa_priv_s *priv; + + priv = netdev_priv(net_dev); + + if (priv->mac_dev == NULL) { + cpu_netdev_info(net_dev, "This is a MAC-less interface\n"); + return -ENODEV; + } + if (unlikely(priv->mac_dev->phy_dev == NULL)) { + cpu_netdev_err(net_dev, "phy device not initialized\n"); + return -ENODEV; + } + + _errno = phy_ethtool_gset(priv->mac_dev->phy_dev, et_cmd); + if (unlikely(_errno < 0)) + cpu_netdev_err(net_dev, "phy_ethtool_gset() = %d\n", _errno); + + return _errno; +} + +static int __cold dpa_set_settings(struct net_device *net_dev, struct ethtool_cmd *et_cmd) +{ + int _errno; + struct dpa_priv_s *priv; + + priv = netdev_priv(net_dev); + + if (priv->mac_dev == NULL) { + cpu_netdev_info(net_dev, "This is a MAC-less interface\n"); + return -ENODEV; + } + if (unlikely(priv->mac_dev->phy_dev == NULL)) { + cpu_netdev_err(net_dev, "phy device not initialized\n"); + return -ENODEV; + } + + _errno = phy_ethtool_sset(priv->mac_dev->phy_dev, et_cmd); + if (unlikely(_errno < 0)) + cpu_netdev_err(net_dev, "phy_ethtool_sset() = %d\n", _errno); + + return _errno; +} + +static void __cold dpa_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *drvinfo) +{ + int _errno; + + strncpy(drvinfo->driver, KBUILD_MODNAME, + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->driver)-1] = 0; + strncpy(drvinfo->version, VERSION, + sizeof(drvinfo->driver) - 1)[sizeof(drvinfo->version)-1] = 0; + _errno = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%X", 0); + + if (unlikely(_errno >= sizeof(drvinfo->fw_version))) { /* Truncated output */ + cpu_netdev_notice(net_dev, "snprintf() = %d\n", _errno); + } else if (unlikely(_errno < 0)) { + cpu_netdev_warn(net_dev, "snprintf() = %d\n", _errno); + memset(drvinfo->fw_version, 0, sizeof(drvinfo->fw_version)); + } + strncpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + sizeof(drvinfo->bus_info) - 1)[sizeof(drvinfo->bus_info)-1] = 0; +} + +uint32_t __cold dpa_get_msglevel(struct net_device *net_dev) +{ + return ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable; +} + +void __cold dpa_set_msglevel(struct net_device *net_dev, uint32_t msg_enable) +{ + ((struct dpa_priv_s *)netdev_priv(net_dev))->msg_enable = msg_enable; +} + +int __cold dpa_nway_reset(struct net_device *net_dev) +{ + int _errno; + struct dpa_priv_s *priv; + + priv = netdev_priv(net_dev); + + if (priv->mac_dev == NULL) { + cpu_netdev_info(net_dev, "This is a MAC-less interface\n"); + return -ENODEV; + } + if (unlikely(priv->mac_dev->phy_dev == NULL)) { + cpu_netdev_err(net_dev, "phy device not initialized\n"); + return -ENODEV; + } + + _errno = 0; + if (priv->mac_dev->phy_dev->autoneg) { + _errno = phy_start_aneg(priv->mac_dev->phy_dev); + if (unlikely(_errno < 0)) + cpu_netdev_err(net_dev, "phy_start_aneg() = %d\n", + _errno); + } + + return _errno; +} + +void __cold dpa_get_ringparam(struct net_device *net_dev, struct ethtool_ringparam *et_ringparam) +{ + et_ringparam->rx_max_pending = 0; + et_ringparam->rx_mini_max_pending = 0; + et_ringparam->rx_jumbo_max_pending = 0; + et_ringparam->tx_max_pending = 0; + + et_ringparam->rx_pending = 0; + et_ringparam->rx_mini_pending = 0; + et_ringparam->rx_jumbo_pending = 0; + et_ringparam->tx_pending = 0; +} + +void __cold dpa_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *et_pauseparam) +{ + struct dpa_priv_s *priv; + + priv = netdev_priv(net_dev); + + if (priv->mac_dev == NULL) { + cpu_netdev_info(net_dev, "This is a MAC-less interface\n"); + return; + } + if (unlikely(priv->mac_dev->phy_dev == NULL)) { + cpu_netdev_err(net_dev, "phy device not initialized\n"); + return; + } + + et_pauseparam->autoneg = priv->mac_dev->phy_dev->autoneg; +} + +int __cold dpa_set_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *et_pauseparam) +{ + struct dpa_priv_s *priv; + + priv = netdev_priv(net_dev); + + if (priv->mac_dev == NULL) { + cpu_netdev_info(net_dev, "This is a MAC-less interface\n"); + return -ENODEV; + } + if (unlikely(priv->mac_dev->phy_dev == NULL)) { + cpu_netdev_err(net_dev, "phy device not initialized\n"); + return -ENODEV; + } + + priv->mac_dev->phy_dev->autoneg = et_pauseparam->autoneg; + + return 0; +} + +const struct ethtool_ops dpa_ethtool_ops = { + .get_settings = dpa_get_settings, + .set_settings = dpa_set_settings, + .get_drvinfo = dpa_get_drvinfo, + .get_msglevel = dpa_get_msglevel, + .set_msglevel = dpa_set_msglevel, + .nway_reset = dpa_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = dpa_get_ringparam, + .get_pauseparam = dpa_get_pauseparam, + .set_pauseparam = dpa_set_pauseparam, +}; --- linux-3.13.0.orig/drivers/net/dpa/dpaa_1588.c +++ linux-3.13.0/drivers/net/dpa/dpaa_1588.c @@ -0,0 +1,562 @@ +/* + * drivers/net/dpa/dpaa_1588.c + * + * Copyright (C) 2011 Freescale Semiconductor, Inc. + * Copyright (C) 2009 IXXAT Automation, GmbH + * + * DPAA Ethernet Driver -- IEEE 1588 interface functionality + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dpaa_eth.h" +#include "dpaa_1588.h" + +static int dpa_ptp_init_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size) +{ + struct circ_buf *circ_buf = &ptp_buf->circ_buf; + + circ_buf->buf = vmalloc(sizeof(struct dpa_ptp_data) * size); + if (!circ_buf->buf) + return 1; + + circ_buf->head = 0; + circ_buf->tail = 0; + ptp_buf->size = size; + spin_lock_init(&ptp_buf->ptp_lock); + + return 0; +} + +static void dpa_ptp_reset_circ(struct dpa_ptp_circ_buf *ptp_buf, u32 size) +{ + struct circ_buf *circ_buf = &ptp_buf->circ_buf; + + circ_buf->head = 0; + circ_buf->tail = 0; + ptp_buf->size = size; +} + +static int dpa_ptp_insert(struct dpa_ptp_circ_buf *ptp_buf, + struct dpa_ptp_data *data) +{ + struct circ_buf *circ_buf = &ptp_buf->circ_buf; + int size = ptp_buf->size; + struct dpa_ptp_data *tmp; + unsigned long flags; + int head, tail; + + spin_lock_irqsave(&ptp_buf->ptp_lock, flags); + + head = circ_buf->head; + tail = circ_buf->tail; + + if (CIRC_SPACE(head, tail, size) <= 0) { + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); + return 1; + } + + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + head; + memcpy(tmp, data, sizeof(struct dpa_ptp_data)); + + circ_buf->head = (head + 1) & (size - 1); + + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); + + return 0; +} + +static int dpa_ptp_is_ident_match(struct dpa_ptp_ident *dst, + struct dpa_ptp_ident *src) +{ + int ret; + + if ((dst->version != src->version) || (dst->msg_type != src->msg_type)) + return 0; + + if ((dst->netw_prot == src->netw_prot) + || src->netw_prot == DPA_PTP_PROT_DONTCARE) { + if (dst->seq_id != src->seq_id) + return 0; + + ret = memcmp(dst->snd_port_id, src->snd_port_id, + DPA_PTP_SOURCE_PORT_LENGTH); + if (ret) + return 0; + else + return 1; + } + + return 0; +} + +static int dpa_ptp_find_and_remove(struct dpa_ptp_circ_buf *ptp_buf, + struct dpa_ptp_ident *ident, + struct dpa_ptp_time *ts) +{ + struct circ_buf *circ_buf = &ptp_buf->circ_buf; + int size = ptp_buf->size; + int head, tail, idx; + unsigned long flags; + struct dpa_ptp_data *tmp; + struct dpa_ptp_ident *tmp_ident; + + spin_lock_irqsave(&ptp_buf->ptp_lock, flags); + + head = circ_buf->head; + tail = idx = circ_buf->tail; + + if (CIRC_CNT_TO_END(head, tail, size) == 0) { + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); + return 1; + } + + while (idx != head) { + tmp = (struct dpa_ptp_data *)(circ_buf->buf) + idx; + tmp_ident = &tmp->ident; + if (dpa_ptp_is_ident_match(tmp_ident, ident)) + break; + idx = (idx + 1) & (size - 1); + } + + if (idx == head) { + circ_buf->tail = head; + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); + return 1; + } + + ts->sec = tmp->ts.sec; + ts->nsec = tmp->ts.nsec; + + circ_buf->tail = (idx + 1) & (size - 1); + + spin_unlock_irqrestore(&ptp_buf->ptp_lock, flags); + + return 0; +} + +static int dpa_ptp_get_time(dma_addr_t fd_addr, u32 *high, u32 *low) +{ + u8 *ts_addr = (u8 *)phys_to_virt(fd_addr); + u32 sec, nsec, mod; + u64 tmp; + + ts_addr += DPA_PTP_TIMESTAMP_OFFSET; + sec = *((u32 *)ts_addr); + nsec = *(((u32 *)ts_addr) + 1); + tmp = ((u64)sec << 32 | nsec) * DPA_PTP_NOMINAL_FREQ_PERIOD; + + mod = do_div(tmp, NANOSEC_PER_SECOND); + *high = (u32)tmp; + *low = mod; + + return 0; +} + +/* + * Parse the PTP packets + * + * The PTP header can be found in an IPv4 packet, IPv6 patcket or in + * an IEEE802.3 ethernet frame. This function returns the position of + * the PTP packet or NULL if no PTP found + */ +static u8 *dpa_ptp_parse_packet(struct sk_buff *skb, u16 *eth_type) +{ + u8 *pos = skb->data + ETH_ALEN + ETH_ALEN; + u8 *ptp_loc = NULL; + u8 msg_type; + struct iphdr *iph; + struct udphdr *udph; + struct ipv6hdr *ipv6h; + + *eth_type = *((u16 *)pos); + + /* Check if inner tag is here */ + if (*eth_type == ETH_P_8021Q) { + pos += DPA_VLAN_TAG_LEN; + *eth_type = *((u16 *)pos); + } + + pos += DPA_ETYPE_LEN; + + switch (*eth_type) { + /* Transport of PTP over Ethernet */ + case ETH_P_1588: + ptp_loc = pos; + msg_type = *((u8 *)(ptp_loc + PTP_OFFS_MSG_TYPE)) & 0xf; + if ((msg_type == PTP_MSGTYPE_SYNC) + || (msg_type == PTP_MSGTYPE_DELREQ) + || (msg_type == PTP_MSGTYPE_PDELREQ) + || (msg_type == PTP_MSGTYPE_PDELRESP)) + return ptp_loc; + break; + /* Transport of PTP over IPv4 */ + case ETH_P_IP: + iph = (struct iphdr *)pos; + if (ntohs(iph->protocol) != IPPROTO_UDP) + return NULL; + + pos += iph->ihl * 4; + udph = (struct udphdr *)pos; + if (ntohs(udph->dest) != 319) + return NULL; + ptp_loc = pos + sizeof(struct udphdr); + break; + /* Transport of PTP over IPv6 */ + case ETH_P_IPV6: + ipv6h = (struct ipv6hdr *)pos; + if (ntohs(ipv6h->nexthdr) != IPPROTO_UDP) + return NULL; + + pos += sizeof(struct ipv6hdr); + udph = (struct udphdr *)pos; + if (ntohs(udph->dest) != 319) + return NULL; + ptp_loc = pos + sizeof(struct udphdr); + break; + default: + break; + } + + return ptp_loc; +} + +static int dpa_ptp_store_stamp(struct net_device *dev, struct sk_buff *skb, + dma_addr_t fd_addr, struct dpa_ptp_data *ptp_data) +{ + u32 sec, nsec; + u8 *ptp_loc; + u16 eth_type; + + ptp_loc = dpa_ptp_parse_packet(skb, ð_type); + if (!ptp_loc) + return -EINVAL; + + switch (eth_type) { + case ETH_P_IP: + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV4; + break; + case ETH_P_IPV6: + ptp_data->ident.netw_prot = DPA_PTP_PROT_IPV6; + break; + case ETH_P_1588: + ptp_data->ident.netw_prot = DPA_PTP_PROT_802_3; + break; + default: + return -EINVAL; + } + + ptp_data->ident.version = *(ptp_loc + PTP_OFFS_VER_PTP) & 0xf; + ptp_data->ident.msg_type = *(ptp_loc + PTP_OFFS_MSG_TYPE) & 0xf; + ptp_data->ident.seq_id = *((u16 *)(ptp_loc + PTP_OFFS_SEQ_ID)); + memcpy(ptp_data->ident.snd_port_id, ptp_loc + PTP_OFFS_SRCPRTID, + DPA_PTP_SOURCE_PORT_LENGTH); + + dpa_ptp_get_time(fd_addr, &sec, &nsec); + ptp_data->ts.sec = (u64)sec; + ptp_data->ts.nsec = nsec; + + return 0; +} + +void dpa_ptp_store_txstamp(struct net_device *dev, struct sk_buff *skb, + const struct qm_fd *fd) +{ + struct dpa_priv_s *priv = netdev_priv(dev); + struct dpa_ptp_tsu *tsu = priv->tsu; + struct dpa_ptp_data ptp_tx_data; + dma_addr_t fd_addr = qm_fd_addr(fd); + int ret; + + ret = dpa_ptp_store_stamp(dev, skb, fd_addr, &ptp_tx_data); + if (ret) + return; + dpa_ptp_insert(&tsu->tx_timestamps, &ptp_tx_data); +} + +void dpa_ptp_store_rxstamp(struct net_device *dev, struct sk_buff *skb, + const struct qm_fd *fd) +{ + struct dpa_priv_s *priv = netdev_priv(dev); + struct dpa_ptp_tsu *tsu = priv->tsu; + struct dpa_ptp_data ptp_rx_data; + dma_addr_t fd_addr = qm_fd_addr(fd); + int ret; + + ret = dpa_ptp_store_stamp(dev, skb, fd_addr, &ptp_rx_data); + if (ret) + return; + dpa_ptp_insert(&tsu->rx_timestamps, &ptp_rx_data); +} + +static uint8_t dpa_get_tx_timestamp(struct dpa_ptp_tsu *ptp_tsu, + struct dpa_ptp_ident *ident, + struct dpa_ptp_time *ts) +{ + struct dpa_ptp_tsu *tsu = ptp_tsu; + struct dpa_ptp_time tmp; + int flag; + + flag = dpa_ptp_find_and_remove(&tsu->tx_timestamps, ident, &tmp); + if (!flag) { + ts->sec = tmp.sec; + ts->nsec = tmp.nsec; + return 0; + } + + return -1; +} + +static uint8_t dpa_get_rx_timestamp(struct dpa_ptp_tsu *ptp_tsu, + struct dpa_ptp_ident *ident, + struct dpa_ptp_time *ts) +{ + struct dpa_ptp_tsu *tsu = ptp_tsu; + struct dpa_ptp_time tmp; + int flag; + + flag = dpa_ptp_find_and_remove(&tsu->rx_timestamps, ident, &tmp); + if (!flag) { + ts->sec = tmp.sec; + ts->nsec = tmp.nsec; + return 0; + } + + return -1; +} + +static void dpa_set_fiper_alarm(struct dpa_ptp_tsu *tsu, + struct dpa_ptp_time *cnt_time) +{ + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; + u64 tmp, fiper; + + if (mac_dev->fm_rtc_disable) + mac_dev->fm_rtc_disable(tsu->dpa_priv->net_dev); + + /* TMR_FIPER1 will pulse every second after ALARM1 expired */ + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec; + fiper = NANOSEC_PER_SECOND - DPA_PTP_NOMINAL_FREQ_PERIOD; + if (mac_dev->fm_rtc_set_alarm) + mac_dev->fm_rtc_set_alarm(tsu->dpa_priv->net_dev, 0, tmp); + if (mac_dev->fm_rtc_set_fiper) + mac_dev->fm_rtc_set_fiper(tsu->dpa_priv->net_dev, 0, fiper); + + if (mac_dev->fm_rtc_enable) + mac_dev->fm_rtc_enable(tsu->dpa_priv->net_dev); +} + +static void dpa_get_curr_cnt(struct dpa_ptp_tsu *tsu, + struct dpa_ptp_time *curr_time) +{ + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; + u64 tmp; + u32 mod; + + if (mac_dev->fm_rtc_get_cnt) + mac_dev->fm_rtc_get_cnt(tsu->dpa_priv->net_dev, &tmp); + + mod = do_div(tmp, NANOSEC_PER_SECOND); + curr_time->sec = (u32)tmp; + curr_time->nsec = mod; +} + +static void dpa_set_1588cnt(struct dpa_ptp_tsu *tsu, + struct dpa_ptp_time *cnt_time) +{ + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; + u64 tmp; + + tmp = (u64)cnt_time->sec * NANOSEC_PER_SECOND + (u64)cnt_time->nsec; + + if (mac_dev->fm_rtc_set_cnt) + mac_dev->fm_rtc_set_cnt(tsu->dpa_priv->net_dev, tmp); + + /* Restart fiper two seconds later */ + cnt_time->sec += 2; + cnt_time->nsec = 0; + dpa_set_fiper_alarm(tsu, cnt_time); +} + +static void dpa_get_drift(struct dpa_ptp_tsu *tsu, u32 *addend) +{ + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; + u32 drift; + + if (mac_dev->fm_rtc_get_drift) + mac_dev->fm_rtc_get_drift(tsu->dpa_priv->net_dev, &drift); + + *addend = drift; +} + +static void dpa_set_drift(struct dpa_ptp_tsu *tsu, u32 addend) +{ + struct mac_device *mac_dev = tsu->dpa_priv->mac_dev; + + if (mac_dev->fm_rtc_set_drift) + mac_dev->fm_rtc_set_drift(tsu->dpa_priv->net_dev, addend); +} + +static void dpa_flush_timestamp(struct dpa_ptp_tsu *tsu) +{ + dpa_ptp_reset_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ); + dpa_ptp_reset_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ); +} + +int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct dpa_priv_s *priv = netdev_priv(dev); + struct dpa_ptp_tsu *tsu = priv->tsu; + struct mac_device *mac_dev = priv->mac_dev; + struct dpa_ptp_data ptp_data; + struct dpa_ptp_data *ptp_data_user; + struct dpa_ptp_time act_time; + u32 addend; + int retval = 0; + + if (!tsu || !tsu->valid) + return -ENODEV; + + switch (cmd) { + case PTP_ENBL_TXTS_IOCTL: + tsu->hwts_tx_en_ioctl = 1; + if (mac_dev->ptp_enable) + mac_dev->ptp_enable(mac_dev); + break; + case PTP_DSBL_TXTS_IOCTL: + tsu->hwts_tx_en_ioctl = 0; + if (mac_dev->ptp_disable) + mac_dev->ptp_disable(mac_dev); + break; + case PTP_ENBL_RXTS_IOCTL: + tsu->hwts_rx_en_ioctl = 1; + break; + case PTP_DSBL_RXTS_IOCTL: + tsu->hwts_rx_en_ioctl = 0; + break; + case PTP_GET_RX_TIMESTAMP: + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data; + if (copy_from_user(&ptp_data.ident, + &ptp_data_user->ident, sizeof(ptp_data.ident))) + return -EINVAL; + + if (dpa_get_rx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts)) + return -EAGAIN; + + if (copy_to_user((void __user *)&ptp_data_user->ts, + &ptp_data.ts, sizeof(ptp_data.ts))) + return -EFAULT; + break; + case PTP_GET_TX_TIMESTAMP: + ptp_data_user = (struct dpa_ptp_data *)ifr->ifr_data; + if (copy_from_user(&ptp_data.ident, + &ptp_data_user->ident, sizeof(ptp_data.ident))) + return -EINVAL; + + if (dpa_get_tx_timestamp(tsu, &ptp_data.ident, &ptp_data.ts)) + return -EAGAIN; + + if (copy_to_user((void __user *)&ptp_data_user->ts, + &ptp_data.ts, sizeof(ptp_data.ts))) + return -EFAULT; + break; + case PTP_GET_TIME: + dpa_get_curr_cnt(tsu, &act_time); + if (copy_to_user(ifr->ifr_data, &act_time, sizeof(act_time))) + return -EFAULT; + break; + case PTP_SET_TIME: + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time))) + return -EINVAL; + dpa_set_1588cnt(tsu, &act_time); + break; + case PTP_GET_ADJ: + dpa_get_drift(tsu, &addend); + if (copy_to_user(ifr->ifr_data, &addend, sizeof(addend))) + return -EFAULT; + break; + case PTP_SET_ADJ: + if (copy_from_user(&addend, ifr->ifr_data, sizeof(addend))) + return -EINVAL; + dpa_set_drift(tsu, addend); + break; + case PTP_SET_FIPER_ALARM: + if (copy_from_user(&act_time, ifr->ifr_data, sizeof(act_time))) + return -EINVAL; + dpa_set_fiper_alarm(tsu, &act_time); + break; + case PTP_CLEANUP_TS: + dpa_flush_timestamp(tsu); + break; + default: + return -EINVAL; + } + + return retval; +} + +int dpa_ptp_init(struct dpa_priv_s *priv) +{ + struct dpa_ptp_tsu *tsu; + + /* Allocate memory for PTP structure */ + tsu = kzalloc(sizeof(struct dpa_ptp_tsu), GFP_KERNEL); + if (!tsu) + return -ENOMEM; + + memset(tsu, 0, sizeof(*tsu)); + tsu->valid = TRUE; + tsu->dpa_priv = priv; + + dpa_ptp_init_circ(&tsu->rx_timestamps, DEFAULT_PTP_RX_BUF_SZ); + dpa_ptp_init_circ(&tsu->tx_timestamps, DEFAULT_PTP_TX_BUF_SZ); + + priv->tsu = tsu; + + return 0; +} +EXPORT_SYMBOL(dpa_ptp_init); + +void dpa_ptp_cleanup(struct dpa_priv_s *priv) +{ + struct dpa_ptp_tsu *tsu = priv->tsu; + + tsu->valid = FALSE; + vfree(tsu->rx_timestamps.circ_buf.buf); + vfree(tsu->tx_timestamps.circ_buf.buf); + + kfree(tsu); +} +EXPORT_SYMBOL(dpa_ptp_cleanup); + +static int __init __cold dpa_ptp_load(void) +{ + return 0; +} +module_init(dpa_ptp_load); + +static void __exit __cold dpa_ptp_unload(void) +{ +} +module_exit(dpa_ptp_unload); --- linux-3.13.0.orig/drivers/net/dpa/dpaa_1588.h +++ linux-3.13.0/drivers/net/dpa/dpaa_1588.h @@ -0,0 +1,141 @@ +/* + * drivers/net/dpa/dpaa_1588.h + * + * Copyright (C) 2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#ifndef __DPAA_1588_H__ +#define __DPAA_1588_H__ + +#include +#include +#include +#include + +#define DEFAULT_PTP_RX_BUF_SZ 2048 +#define DEFAULT_PTP_TX_BUF_SZ 512 + +/* 1588 private ioctl calls */ +#define PTP_ENBL_TXTS_IOCTL SIOCDEVPRIVATE +#define PTP_DSBL_TXTS_IOCTL (SIOCDEVPRIVATE + 1) +#define PTP_ENBL_RXTS_IOCTL (SIOCDEVPRIVATE + 2) +#define PTP_DSBL_RXTS_IOCTL (SIOCDEVPRIVATE + 3) +#define PTP_GET_TX_TIMESTAMP (SIOCDEVPRIVATE + 4) +#define PTP_GET_RX_TIMESTAMP (SIOCDEVPRIVATE + 5) +#define PTP_SET_TIME (SIOCDEVPRIVATE + 6) +#define PTP_GET_TIME (SIOCDEVPRIVATE + 7) +#define PTP_SET_FIPER_ALARM (SIOCDEVPRIVATE + 8) +#define PTP_SET_ADJ (SIOCDEVPRIVATE + 9) +#define PTP_GET_ADJ (SIOCDEVPRIVATE + 10) +#define PTP_CLEANUP_TS (SIOCDEVPRIVATE + 11) + +/* PTP V2 message type */ +enum { + PTP_MSGTYPE_SYNC = 0x0, + PTP_MSGTYPE_DELREQ = 0x1, + PTP_MSGTYPE_PDELREQ = 0x2, + PTP_MSGTYPE_PDELRESP = 0x3, + PTP_MSGTYPE_FLWUP = 0x8, + PTP_MSGTYPE_DELRESP = 0x9, + PTP_MSGTYPE_PDELRES_FLWUP = 0xA, + PTP_MSGTYPE_ANNOUNCE = 0xB, + PTP_MSGTYPE_SGNLNG = 0xC, + PTP_MSGTYPE_MNGMNT = 0xD, +}; + +/* Byte offset of data in the PTP V2 headers */ +#define PTP_OFFS_MSG_TYPE 0 +#define PTP_OFFS_VER_PTP 1 +#define PTP_OFFS_MSG_LEN 2 +#define PTP_OFFS_DOM_NMB 4 +#define PTP_OFFS_FLAGS 6 +#define PTP_OFFS_CORFIELD 8 +#define PTP_OFFS_SRCPRTID 20 +#define PTP_OFFS_SEQ_ID 30 +#define PTP_OFFS_CTRL 32 +#define PTP_OFFS_LOGMEAN 33 + +#define PTP_IP_OFFS 14 +#define PTP_UDP_OFFS 34 +#define PTP_HEADER_OFFS 42 +#define PTP_MSG_TYPE_OFFS (PTP_HEADER_OFFS + PTP_OFFS_MSG_TYPE) +#define PTP_SPORT_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SRCPRTID) +#define PTP_SEQ_ID_OFFS (PTP_HEADER_OFFS + PTP_OFFS_SEQ_ID) +#define PTP_CTRL_OFFS (PTP_HEADER_OFFS + PTP_OFFS_CTRL) + +/* 1588-2008 network protocol enumeration values */ +#define DPA_PTP_PROT_IPV4 1 +#define DPA_PTP_PROT_IPV6 2 +#define DPA_PTP_PROT_802_3 3 +#define DPA_PTP_PROT_DONTCARE 0xFFFF + +#define DPA_PTP_SOURCE_PORT_LENGTH 10 +#define DPA_PTP_HEADER_SZE 34 +#define DPA_ETYPE_LEN 2 +#define DPA_VLAN_TAG_LEN 4 + +#define DPA_PTP_TIMESTAMP_OFFSET 0x30 +#define DPA_PTP_NOMINAL_FREQ_PERIOD 0xa /* 10ns -> 100M */ +#define NANOSEC_PER_SECOND 1000000000 + +/* Struct needed to identify a timestamp */ +struct dpa_ptp_ident { + u8 version; + u8 msg_type; + u16 netw_prot; + u16 seq_id; + u8 snd_port_id[DPA_PTP_SOURCE_PORT_LENGTH]; +}; + +/* Timestamp format in 1588-2008 */ +struct dpa_ptp_time { + u64 sec; /* just 48 bit used */ + u32 nsec; +}; + +/* needed for timestamp data over ioctl */ +struct dpa_ptp_data { + struct dpa_ptp_ident ident; + struct dpa_ptp_time ts; +}; + +struct dpa_ptp_circ_buf { + struct circ_buf circ_buf; + u32 size; + spinlock_t ptp_lock; +}; + +/* PTP TSU control structure */ +struct dpa_ptp_tsu { + struct dpa_priv_s *dpa_priv; + bool valid; + struct dpa_ptp_circ_buf rx_timestamps; + struct dpa_ptp_circ_buf tx_timestamps; + + /* HW timestamping over ioctl enabled flag */ + int hwts_tx_en_ioctl; + int hwts_rx_en_ioctl; +}; + +extern int dpa_ptp_init(struct dpa_priv_s *priv); +extern void dpa_ptp_cleanup(struct dpa_priv_s *priv); +extern void dpa_ptp_store_txstamp(struct net_device *dev, struct sk_buff *skb, + const struct qm_fd *fd); +extern void dpa_ptp_store_rxstamp(struct net_device *dev, struct sk_buff *skb, + const struct qm_fd *fd); +extern int dpa_ioctl_1588(struct net_device *dev, struct ifreq *ifr, int cmd); +#endif --- linux-3.13.0.orig/drivers/net/dpa/dpaa_eth-common.h +++ linux-3.13.0/drivers/net/dpa/dpaa_eth-common.h @@ -0,0 +1,150 @@ +/* + * Copyright 2008-2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DPA_COMMON_H +#define __DPA_COMMON_H + +#include /* pr_*() */ +#include /* dev_*() */ +#include /* smp_processor_id() */ + +/* The basename of the source file is being compiled */ +#define __file__ KBUILD_BASENAME".c" + +#define __hot + +#define cpu_printk(level, format, arg...) \ + pr_##level("cpu%d: " format, smp_processor_id(), ##arg) + +#define cpu_pr_emerg(format, arg...) \ + cpu_printk(emerg, format, ##arg) +#define cpu_pr_alert(format, arg...) \ + cpu_printk(alert, format, ##arg) +#define cpu_pr_crit(format, arg...) \ + cpu_printk(crit, format, ##arg) +#define cpu_pr_err(format, arg...) \ + cpu_printk(err, format, ##arg) +#define cpu_pr_warning(format, arg...) \ + cpu_printk(warning, format, ##arg) +#define cpu_pr_notice(format, arg...) \ + cpu_printk(notice, format, ##arg) +#define cpu_pr_info(format, arg...) \ + cpu_printk(info, format, ##arg) +#define cpu_pr_debug(format, arg...) \ + cpu_printk(debug, format, ##arg) + +/* Keep this in sync with the dev_*() definitions from linux/device.h */ +#define cpu_dev_printk(level, dev, format, arg...) \ + cpu_pr_##level("%s: %s: " format, dev_driver_string(dev), \ + dev_name(dev), ##arg) + +#define cpu_dev_emerg(dev, format, arg...) \ + cpu_dev_printk(emerg, dev, format, ##arg) +#define cpu_dev_alert(dev, format, arg...) \ + cpu_dev_printk(alert, dev, format, ##arg) +#define cpu_dev_crit(dev, format, arg...) \ + cpu_dev_printk(crit, dev, format, ##arg) +#define cpu_dev_err(dev, format, arg...) \ + cpu_dev_printk(err, dev, format, ##arg) +#define cpu_dev_warn(dev, format, arg...) \ + cpu_dev_printk(warning, dev, format, ##arg) +#define cpu_dev_notice(dev, format, arg...) \ + cpu_dev_printk(notice, dev, format, ##arg) +#define cpu_dev_info(dev, format, arg...) \ + cpu_dev_printk(info, dev, format, ##arg) +#define cpu_dev_dbg(dev, format, arg...) \ + cpu_dev_printk(debug, dev, format, ##arg) + +#define dpaa_eth_printk(level, dev, format, arg...) \ + cpu_dev_printk(level, dev, "%s:%hu:%s() " format, \ + __file__, __LINE__, __func__, ##arg) + +#define dpaa_eth_emerg(dev, format, arg...) \ + dpaa_eth_printk(emerg, dev, format, ##arg) +#define dpaa_eth_alert(dev, format, arg...) \ + dpaa_eth_printk(alert, dev, format, ##arg) +#define dpaa_eth_crit(dev, format, arg...) \ + dpaa_eth_printk(crit, dev, format, ##arg) +#define dpaa_eth_err(dev, format, arg...) \ + dpaa_eth_printk(err, dev, format, ##arg) +#define dpaa_eth_warning(dev, format, arg...) \ + dpaa_eth_printk(warning, dev, format, ##arg) +#define dpaa_eth_notice(dev, format, arg...) \ + dpaa_eth_printk(notice, dev, format, ##arg) +#define dpaa_eth_info(dev, format, arg...) \ + dpaa_eth_printk(info, dev, format, ##arg) +#define dpaa_eth_debug(dev, format, arg...) \ + dpaa_eth_printk(debug, dev, format, ##arg) + +#define cpu_netdev_emerg(net_dev, format, arg...) \ + dpaa_eth_emerg((net_dev)->dev.parent, "%s: " format, \ + (net_dev)->name , ##arg) +#define cpu_netdev_alert(net_dev, format, arg...) \ + dpaa_eth_alert((net_dev)->dev.parent, "%s: " format, \ + (net_dev)->name , ##arg) +#define cpu_netdev_crit(net_dev, format, arg...) \ + dpaa_eth_crit((net_dev)->dev.parent, "%s: " format, \ + (net_dev)->name , ##arg) +#define cpu_netdev_err(net_dev, format, arg...) \ + dpaa_eth_err((net_dev)->dev.parent, "%s: " format, \ + (net_dev)->name , ##arg) +#define cpu_netdev_warn(net_dev, format, arg...) \ + dpaa_eth_warning((net_dev)->dev.parent, "%s: " format, \ + (net_dev)->name , ##arg) +#define cpu_netdev_notice(net_dev, format, arg...) \ + dpaa_eth_notice((net_dev)->dev.parent, "%s: " format, \ + (net_dev)->name , ##arg) +#define cpu_netdev_info(net_dev, format, arg...) \ + dpaa_eth_info((net_dev)->dev.parent, "%s: " format, \ + (net_dev)->name , ##arg) +#define cpu_netdev_dbg(net_dev, format, arg...) \ + dpaa_eth_debug((net_dev)->dev.parent, "%s: " format, \ + (net_dev)->name , ##arg) + +enum {RX, TX}; + +#define DPA_PRIV_DATA_SIZE 16 +#define DPA_PARSE_RESULTS_SIZE sizeof(t_FmPrsResult) +#define DPA_HASH_RESULTS_SIZE 16 + +#define dpaa_eth_init_port(type, port, param, errq_id, defq_id, has_timer) \ +{ \ + param.errq = errq_id; \ + param.defq = defq_id; \ + param.priv_data_size = DPA_PRIV_DATA_SIZE; \ + param.parse_results = true; \ + param.hash_results = true; \ + param.time_stamp = has_timer; \ + fm_set_##type##_port_params(port, ¶m); \ +} + +#endif /* __DPA_COMMON_H */ --- linux-3.13.0.orig/drivers/net/dpa/dpaa_eth.c +++ linux-3.13.0/drivers/net/dpa/dpaa_eth.c @@ -0,0 +1,3237 @@ +/* + * Copyright 2008-2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* arp_hdr_len() */ +#include /* VLAN_HLEN */ +#include /* struct icmphdr */ +#include /* struct iphdr */ +#include /* struct ipv6hdr */ +#include /* struct udphdr */ +#include /* struct tcphdr */ +#include /* net_ratelimit() */ +#include /* ETH_P_IP and ETH_P_IPV6 */ +#include +#include +#include +#include /* get_hard_smp_processor_id() */ +#include +#ifdef CONFIG_DEBUG_FS +#include +#endif +#include + +#include "fsl_fman.h" +#include "fm_ext.h" +#include "fm_port_ext.h" + +#include "mac.h" +#include "dpaa_eth.h" +#include "dpaa_1588.h" + +#define ARRAY2_SIZE(arr) (ARRAY_SIZE(arr) * ARRAY_SIZE((arr)[0])) + +#define DPA_NETIF_FEATURES (NETIF_F_HW_QDISC) +#define DEFAULT_COUNT 64 +#define DEFAULT_BUF_SIZE DPA_BP_SIZE(fsl_fman_phy_maxfrm); +#define DPA_MAX_TX_BACKLOG 512 +#define DPA_NAPI_WEIGHT 64 + +#define DPA_BP_REFILL (1 | (smp_processor_id() << 16)) +#define DPA_BP_FINE ((smp_processor_id() << 16)) +#define DPA_BP_REFILL_NEEDED 1 + +/* Bootarg used to override the Kconfig DPA_MAX_FRM_SIZE value */ +#define FSL_FMAN_PHY_MAXFRM_BOOTARG "fsl_fman_phy_max_frm" + +/* + * Values for the L3R field of the FM Parse Results + */ +/* L3 Type field: First IP Present IPv4 */ +#define FM_L3_PARSE_RESULT_IPV4 0x8000 +/* L3 Type field: First IP Present IPv6 */ +#define FM_L3_PARSE_RESULT_IPV6 0x4000 + +/* + * Values for the L4R field of the FM Parse Results + */ +/* L4 Type field: UDP */ +#define FM_L4_PARSE_RESULT_UDP 0x40 +/* L4 Type field: TCP */ +#define FM_L4_PARSE_RESULT_TCP 0x20 + +/* + * FD status field indicating whether the FM Parser has attempted to validate + * the L4 csum of the frame. + * Note that having this bit set doesn't necessarily imply that the checksum + * is valid. One would have to check the parse results to find that out. + */ +#define FM_FD_STAT_L4CV 0x00000004 + +#define DPA_DESCRIPTION "FSL DPAA Ethernet driver" + +MODULE_LICENSE("Dual BSD/GPL"); + +MODULE_AUTHOR("Andy Fleming "); + +MODULE_DESCRIPTION(DPA_DESCRIPTION); + +static uint8_t debug = -1; +module_param(debug, byte, S_IRUGO); +MODULE_PARM_DESC(debug, "Module/Driver verbosity level"); + +static uint16_t tx_timeout = 1000; +module_param(tx_timeout, ushort, S_IRUGO); +MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); + +#ifdef CONFIG_DEBUG_FS +static struct dentry *dpa_debugfs_root; +#endif + +/* + * Max frame size, across all interfaces. + * Configurable from Kconfig or bootargs, to avoid allocating + * oversized (socket) buffers when not using jumbo frames. + * Must be large enough to accomodate the network MTU, but small enough + * to avoid wasting skb memory. + * + * Could be overridden once, at boot-time, via the + * fsl_fman_phy_set_max_frm() callback. + */ +int fsl_fman_phy_maxfrm = CONFIG_DPA_MAX_FRM_SIZE; + +static const char rtx[][3] = { + [RX] = "RX", + [TX] = "TX" +}; + +struct dpa_fq { + struct qman_fq fq_base; + struct list_head list; + struct net_device *net_dev; + bool init; + uint32_t fqid; + uint32_t flags; + uint16_t channel; + uint8_t wq; +}; + +/* BM */ + +#ifdef DEBUG +#define GFP_DPA_BP (GFP_DMA | __GFP_ZERO | GFP_ATOMIC) +#else +#define GFP_DPA_BP (GFP_DMA | GFP_ATOMIC) +#endif + +#define DPA_BP_HEAD (DPA_PRIV_DATA_SIZE + DPA_PARSE_RESULTS_SIZE + \ + DPA_HASH_RESULTS_SIZE) +#define DPA_BP_SIZE(s) (DPA_BP_HEAD + (s)) + +#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8) + +#define FM_FD_STAT_ERRORS \ + (FM_PORT_FRM_ERR_DMA | FM_PORT_FRM_ERR_PHYSICAL | \ + FM_PORT_FRM_ERR_SIZE | FM_PORT_FRM_ERR_CLS_DISCARD | \ + FM_PORT_FRM_ERR_EXTRACTION | FM_PORT_FRM_ERR_NO_SCHEME | \ + FM_PORT_FRM_ERR_ILL_PLCR | FM_PORT_FRM_ERR_PRS_TIMEOUT | \ + FM_PORT_FRM_ERR_PRS_ILL_INSTRUCT | FM_PORT_FRM_ERR_PRS_HDR_ERR) + +static struct dpa_bp *dpa_bp_array[64]; + +static struct dpa_bp *default_pool; + +static struct dpa_bp *dpa_bpid2pool(int bpid) +{ + return dpa_bp_array[bpid]; +} + +static void dpa_bp_depletion(struct bman_portal *portal, + struct bman_pool *pool, void *cb_ctx, int depleted) +{ + if (net_ratelimit()) + pr_err("Invalid Pool depleted notification!\n"); +} + +static void bmb_free(struct dpa_bp *bp, struct bm_buffer *bmb) +{ + int i; + struct sk_buff **skbh; + struct sk_buff *skb; + + for (i = 0; i < 8; i++) { + dma_addr_t addr = bm_buf_addr(&bmb[i]); + if (!addr) + break; + + skbh = (struct sk_buff **)phys_to_virt(addr); + skb = *skbh; + + dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); + + dev_kfree_skb(skb); + } +} + +static void dpa_bp_add_8(struct dpa_bp *dpa_bp) +{ + struct bm_buffer bmb[8]; + struct sk_buff **skbh; + dma_addr_t addr; + int i; + struct sk_buff *skb; + int err; + int *count_ptr; + + count_ptr = per_cpu_ptr(dpa_bp->percpu_count, smp_processor_id()); + + for (i = 0; i < 8; i++) { + /* + * The buffers tend to be aligned all to the same cache + * index. A standard dequeue operation pulls in 15 packets. + * This means that when it stashes, it evicts half of the + * packets it's stashing. In order to prevent that, we pad + * by a variable number of cache lines, to reduce collisions. + * We always pad by at least 1 cache line, because we want + * a little extra room at the beginning for IPSec and to + * accommodate NET_IP_ALIGN. + */ + int pad = (i + 1) * L1_CACHE_BYTES; + + skb = dev_alloc_skb(dpa_bp->size + pad); + if (unlikely(!skb)) { + printk(KERN_ERR "dev_alloc_skb() failed for %d bytes\n", dpa_bp->size + pad); + bm_buffer_set64(&bmb[i], 0); + break; + } + + skbh = (struct sk_buff **)(skb->head + pad); + *skbh = skb; + + addr = dma_map_single(dpa_bp->dev, skb->head + pad, + dpa_bp->size, DMA_FROM_DEVICE); + + bm_buffer_set64(&bmb[i], addr); + } + + /* Avoid releasing a completely null buffer; bman_release() requires + * at least one buf. */ + if (likely(i)) { + err = bman_release(dpa_bp->pool, bmb, i, 0); + + if (unlikely(err < 0)) + bmb_free(dpa_bp, bmb); + else + *count_ptr += i; + } +} + +static void dpa_make_private_pool(struct dpa_bp *dpa_bp) +{ + int i; + + dpa_bp->percpu_count = __alloc_percpu(sizeof(*dpa_bp->percpu_count), + __alignof__(*dpa_bp->percpu_count)); + + /* Give each cpu an allotment of "count" buffers */ + for_each_online_cpu(i) { + int *thiscount; + int *countptr; + int j; + thiscount = per_cpu_ptr(dpa_bp->percpu_count, + smp_processor_id()); + countptr = per_cpu_ptr(dpa_bp->percpu_count, i); + + for (j = 0; j < dpa_bp->count; j += 8) + dpa_bp_add_8(dpa_bp); + + /* Adjust the counts */ + *countptr = j; + + if (countptr != thiscount) + *thiscount = *thiscount - j; + } +} + + +static void dpaa_eth_seed_pool(struct dpa_bp *bp) +{ + size_t count = bp->count; + size_t addr = bp->paddr; + + while (count) { + struct bm_buffer bufs[8]; + int num_bufs = 0; + + do { + BUG_ON(addr > 0xffffffffffffull); + bufs[num_bufs].bpid = bp->bpid; + bm_buffer_set64(&bufs[num_bufs++], addr); + addr += bp->size; + + } while (--count && (num_bufs < 8)); + + while (bman_release(bp->pool, bufs, num_bufs, 0)) + cpu_relax(); + } +} + +static int dpa_make_shared_pool(struct dpa_bp *bp) +{ + devm_request_mem_region(bp->dev, bp->paddr, bp->size * bp->count, + KBUILD_MODNAME); + bp->vaddr = devm_ioremap_prot(bp->dev, bp->paddr, + bp->size * bp->count, 0); + if (bp->vaddr == NULL) { + cpu_pr_err("Could not map memory for pool %d\n", bp->bpid); + return -EIO; + } + + if (bp->seed_pool) + dpaa_eth_seed_pool(bp); + + return 0; +} + +static int __must_check __attribute__((nonnull)) +dpa_bp_alloc(struct dpa_bp *dpa_bp) +{ + int err = 0; + struct bman_pool_params bp_params; + struct platform_device *pdev; + + BUG_ON(dpa_bp->size == 0); + BUG_ON(dpa_bp->count == 0); + + bp_params.flags = BMAN_POOL_FLAG_DEPLETION; + bp_params.cb = dpa_bp_depletion; + bp_params.cb_ctx = dpa_bp; + + /* We support two options. Either a global shared pool, or + * a specified pool. If the pool is specified, we only + * create one per bpid */ + if (dpa_bp->kernel_pool && default_pool) { + atomic_inc(&default_pool->refs); + return 0; + } + + if (dpa_bp_array[dpa_bp->bpid]) { + atomic_inc(&dpa_bp_array[dpa_bp->bpid]->refs); + return 0; + } + + if (dpa_bp->bpid == 0) + bp_params.flags |= BMAN_POOL_FLAG_DYNAMIC_BPID; + else + bp_params.bpid = dpa_bp->bpid; + + dpa_bp->pool = bman_new_pool(&bp_params); + if (unlikely(dpa_bp->pool == NULL)) { + cpu_pr_err("bman_new_pool() failed\n"); + return -ENODEV; + } + + dpa_bp->bpid = bman_get_params(dpa_bp->pool)->bpid; + + pdev = platform_device_register_simple("dpaa_eth_bpool", + dpa_bp->bpid, NULL, 0); + if (IS_ERR(pdev)) { + err = PTR_ERR(pdev); + goto pdev_register_failed; + } + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40))) + goto pdev_mask_failed; + + dpa_bp->dev = &pdev->dev; + + if (dpa_bp->kernel_pool) { + dpa_make_private_pool(dpa_bp); + if (!default_pool) + default_pool = dpa_bp; + } else { + err = dpa_make_shared_pool(dpa_bp); + if (err) + goto make_shared_pool_failed; + } + + dpa_bp_array[dpa_bp->bpid] = dpa_bp; + + atomic_set(&dpa_bp->refs, 1); + + return 0; + +make_shared_pool_failed: +pdev_mask_failed: + platform_device_unregister(pdev); +pdev_register_failed: + bman_free_pool(dpa_bp->pool); + + return err; +} + +static void __cold __attribute__((nonnull)) +_dpa_bp_free(struct dpa_bp *dpa_bp) +{ + struct dpa_bp *bp = dpa_bpid2pool(dpa_bp->bpid); + + if (!atomic_dec_and_test(&bp->refs)) + return; + + if (bp->kernel_pool) { + int num; + + do { + struct bm_buffer bmb[8]; + int i; + + num = bman_acquire(bp->pool, bmb, 8, 0); + + for (i = 0; i < num; i++) { + dma_addr_t addr = bm_buf_addr(&bmb[i]); + struct sk_buff **skbh = phys_to_virt(addr); + struct sk_buff *skb = *skbh; + + dma_unmap_single(bp->dev, addr, bp->size, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + } + } while (num == 8); + } + + dpa_bp_array[bp->bpid] = 0; + bman_free_pool(bp->pool); +} + +static void __cold __attribute__((nonnull)) +dpa_bp_free(struct dpa_priv_s *priv, struct dpa_bp *dpa_bp) +{ + int i; + + for (i = 0; i < priv->bp_count; i++) + _dpa_bp_free(&priv->dpa_bp[i]); +} + +/* QM */ + +static int __must_check __attribute__((nonnull)) +_dpa_fq_alloc(struct list_head *list, struct dpa_fq *dpa_fq) +{ + int _errno; + const struct dpa_priv_s *priv; + struct device *dev; + struct qman_fq *fq; + struct qm_mcc_initfq initfq; + /* Set the QMan taildrop threshold high enough to accomodate + * one 64k frame, plus an extra (here, 16k) for + * other frames awaiting Tx. */ + const u32 qman_taildrop_threshold = 0x14000; + + priv = netdev_priv(dpa_fq->net_dev); + dev = dpa_fq->net_dev->dev.parent; + + if (dpa_fq->fqid == 0) + dpa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; + + dpa_fq->init = !(dpa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); + + _errno = qman_create_fq(dpa_fq->fqid, dpa_fq->flags, &dpa_fq->fq_base); + if (_errno) { + dpaa_eth_err(dev, "qman_create_fq() failed\n"); + return _errno; + } + fq = &dpa_fq->fq_base; + + if (dpa_fq->init) { + initfq.we_mask = QM_INITFQ_WE_DESTWQ; + initfq.fqd.dest.channel = dpa_fq->channel; + initfq.fqd.dest.wq = dpa_fq->wq; + initfq.we_mask |= QM_INITFQ_WE_TDTHRESH | QM_INITFQ_WE_FQCTRL; + qm_fqd_taildrop_set(&initfq.fqd.td, qman_taildrop_threshold, 1); + initfq.fqd.fq_ctrl = QM_FQCTRL_TDE | QM_FQCTRL_PREFERINCACHE; + if (dpa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + initfq.fqd.fq_ctrl |= + QM_FQCTRL_CTXASTASHING | QM_FQCTRL_AVOIDBLOCK; + initfq.fqd.context_a.stashing.exclusive = + QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | + QM_STASHING_EXCL_ANNOTATION; + initfq.fqd.context_a.stashing.data_cl = 2; + initfq.fqd.context_a.stashing.annotation_cl = 1; + initfq.fqd.context_a.stashing.context_cl = + DIV_ROUND_UP(sizeof(struct qman_fq), 64); + }; + + _errno = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); + if (_errno < 0) { + dpaa_eth_err(dev, "qman_init_fq(%u) = %d\n", + qman_fq_fqid(fq), _errno); + qman_destroy_fq(fq, 0); + return _errno; + } + } + + dpa_fq->fqid = qman_fq_fqid(fq); + list_add_tail(&dpa_fq->list, list); + + return 0; +} + +static int __cold __attribute__((nonnull)) +_dpa_fq_free(struct device *dev, struct qman_fq *fq) +{ + int _errno, __errno; + struct dpa_fq *dpa_fq; + const struct dpa_priv_s *priv; + + _errno = 0; + + dpa_fq = container_of(fq, struct dpa_fq, fq_base); + priv = netdev_priv(dpa_fq->net_dev); + + if (dpa_fq->init) { + _errno = qman_retire_fq(fq, NULL); + if (unlikely(_errno < 0) && netif_msg_drv(priv)) + dpaa_eth_err(dev, "qman_retire_fq(%u) = %d\n", + qman_fq_fqid(fq), _errno); + + __errno = qman_oos_fq(fq); + if (unlikely(__errno < 0) && netif_msg_drv(priv)) { + dpaa_eth_err(dev, "qman_oos_fq(%u) = %d\n", + qman_fq_fqid(fq), __errno); + if (_errno >= 0) + _errno = __errno; + } + } + + qman_destroy_fq(fq, 0); + list_del(&dpa_fq->list); + + return _errno; +} + +static int __cold __attribute__((nonnull)) +dpa_fq_free(struct device *dev, struct list_head *list) +{ + int _errno, __errno; + struct dpa_fq *dpa_fq, *tmp; + + _errno = 0; + list_for_each_entry_safe(dpa_fq, tmp, list, list) { + __errno = _dpa_fq_free(dev, (struct qman_fq *)dpa_fq); + if (unlikely(__errno < 0) && _errno >= 0) + _errno = __errno; + } + + return _errno; +} + + +static inline ssize_t __const __must_check __attribute__((nonnull)) +dpa_fd_length(const struct qm_fd *fd) +{ + return fd->length20; +} + +static inline ssize_t __const __must_check __attribute__((nonnull)) +dpa_fd_offset(const struct qm_fd *fd) +{ + return fd->offset; +} + +static int __must_check __attribute__((nonnull)) +dpa_fd_release(const struct net_device *net_dev, const struct qm_fd *fd) +{ + int _errno, __errno, i, j; + const struct dpa_priv_s *priv; + const struct qm_sg_entry *sgt; + struct dpa_bp *_dpa_bp, *dpa_bp; + struct bm_buffer _bmb, bmb[8]; + + priv = netdev_priv(net_dev); + + _bmb.hi = fd->addr_hi; + _bmb.lo = fd->addr_lo; + + _dpa_bp = dpa_bpid2pool(fd->bpid); + BUG_ON(IS_ERR(_dpa_bp)); + + _errno = 0; + if (fd->format == qm_fd_sg) { + sgt = (phys_to_virt(bm_buf_addr(&_bmb)) + dpa_fd_offset(fd)); + + i = 0; + do { + dpa_bp = dpa_bpid2pool(sgt[i].bpid); + BUG_ON(IS_ERR(dpa_bp)); + + j = 0; + do { + BUG_ON(sgt[i].extension); + + bmb[j].hi = sgt[i].addr_hi; + bmb[j].lo = sgt[i].addr_lo; + j++; i++; + } while (j < ARRAY_SIZE(bmb) && + !sgt[i-1].final && + sgt[i-1].bpid == sgt[i].bpid); + + __errno = bman_release(dpa_bp->pool, bmb, j, 0); + if (unlikely(__errno < 0)) { + if (netif_msg_drv(priv) && net_ratelimit()) + cpu_netdev_err(net_dev, + "bman_release(%hu) = %d\n", + dpa_bp->bpid, _errno); + if (_errno >= 0) + _errno = __errno; + } + } while (!sgt[i-1].final); + } + + __errno = bman_release(_dpa_bp->pool, &_bmb, 1, 0); + if (unlikely(__errno < 0)) { + if (netif_msg_drv(priv) && net_ratelimit()) + cpu_netdev_err(net_dev, "bman_release(%hu) = %d\n", + _dpa_bp->bpid, __errno); + if (_errno >= 0) + _errno = __errno; + } + + return _errno; +} + +/* net_device */ + +#define NN_ALLOCATED_SPACE(net_dev) \ + max((size_t)arp_hdr_len(net_dev), sizeof(struct iphdr)) +#define NN_RESERVED_SPACE(net_dev) \ + min((size_t)arp_hdr_len(net_dev), sizeof(struct iphdr)) + +#define TT_ALLOCATED_SPACE(net_dev) \ + max(sizeof(struct icmphdr), max(sizeof(struct udphdr), \ + sizeof(struct tcphdr))) +#define TT_RESERVED_SPACE(net_dev) \ + min(sizeof(struct icmphdr), min(sizeof(struct udphdr), \ + sizeof(struct tcphdr))) + +static struct net_device_stats * __cold +dpa_get_stats(struct net_device *net_dev) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + unsigned long *netstats; + unsigned long *cpustats; + int i, j; + struct dpa_percpu_priv_s *percpu_priv; + int numstats = sizeof(net_dev->stats) / sizeof(unsigned long); + + netstats = (unsigned long *)&net_dev->stats; + + memset(netstats, 0, sizeof(net_dev->stats)); + + for_each_online_cpu(i) { + percpu_priv = per_cpu_ptr(priv->percpu_priv, i); + + cpustats = (unsigned long *)&percpu_priv->stats; + + for (j = 0; j < numstats; j++) + netstats[j] += cpustats[j]; + } + + return &net_dev->stats; +} + +static int dpa_change_mtu(struct net_device *net_dev, int new_mtu) +{ + const struct dpa_priv_s *priv; + const int max_mtu = fsl_fman_phy_maxfrm - (VLAN_ETH_HLEN + ETH_FCS_LEN); + const int min_mtu = 64; + + priv = netdev_priv(net_dev); + + /* Make sure we don't exceed the Ethernet controller's MAXFRM */ + if (new_mtu < min_mtu || new_mtu > max_mtu) { + cpu_netdev_err(net_dev, "Invalid L3 mtu %d " + "(must be between %d and %d).\n", + new_mtu, min_mtu, max_mtu); + return -EINVAL; + } + net_dev->mtu = new_mtu; + + return 0; +} + +static int dpa_set_mac_address(struct net_device *net_dev, void *addr) +{ + const struct dpa_priv_s *priv; + int _errno; + + priv = netdev_priv(net_dev); + + _errno = eth_mac_addr(net_dev, addr); + if (_errno < 0) { + if (netif_msg_drv(priv)) + cpu_netdev_err(net_dev, + "eth_mac_addr() = %d\n", + _errno); + return _errno; + } + + if (!priv->mac_dev) + /* MAC-less interface, so nothing more to do here */ + return 0; + + _errno = priv->mac_dev->change_addr(priv->mac_dev, net_dev->dev_addr); + if (_errno < 0) { + if (netif_msg_drv(priv)) + cpu_netdev_err(net_dev, + "mac_dev->change_addr() = %d\n", + _errno); + return _errno; + } + + return 0; +} + +static void __cold dpa_change_rx_flags(struct net_device *net_dev, int flags) +{ + int _errno; + const struct dpa_priv_s *priv; + + priv = netdev_priv(net_dev); + + if (!priv->mac_dev) + return; + + if ((flags & IFF_PROMISC) != 0) { + _errno = priv->mac_dev->change_promisc(priv->mac_dev); + if (unlikely(_errno < 0) && netif_msg_drv(priv)) + cpu_netdev_err(net_dev, + "mac_dev->change_promisc() = %d\n", + _errno); + } +} + +static void dpa_set_multicast_list(struct net_device *net_dev) +{ + int _errno; + struct dpa_priv_s *priv; + + priv = netdev_priv(net_dev); + + if (!priv->mac_dev) { + if (netif_msg_drv(priv)) + cpu_netdev_warn(net_dev, + "%s() called on MAC-less interface\n", + __func__); + return; + } + + _errno = priv->mac_dev->set_multi(net_dev); + if ((_errno < 0) && netif_msg_drv(priv)) + cpu_netdev_err(net_dev, "mac_dev->set_multi() = %d\n", _errno); +} + +static int dpa_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dpa_priv_s *priv = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + +#ifdef CONFIG_FSL_DPA_1588 + if ((cmd >= PTP_ENBL_TXTS_IOCTL) && (cmd <= PTP_CLEANUP_TS)) { + int ret = -ENODEV; + + if (priv->tsu && priv->tsu->valid) + ret = dpa_ioctl_1588(dev, rq, cmd); + + return ret; + } +#endif + + return phy_mii_ioctl(priv->mac_dev->phy_dev, rq, cmd); +} + +/* + * When we put the buffer into the pool, we purposefully added + * some padding to the address so that the buffers wouldn't all + * be page-aligned. But the skb has been reset to a default state, + * so it is pointing up to DPAA_ETH_MAX_PAD - L1_CACHE_BYTES bytes + * before the actual data. We subtract skb->head from the fd addr, + * and then mask off the translated part to get the actual distance. + */ +static int dpa_process_one(struct dpa_percpu_priv_s *percpu_priv, + struct sk_buff *skb, struct dpa_bp *bp, const struct qm_fd *fd) +{ + dma_addr_t addr = qm_fd_addr(fd); + u32 addrlo = lower_32_bits(addr); + u32 skblo = lower_32_bits((unsigned long)skb->head); + u32 pad = (addrlo - skblo) & (PAGE_SIZE - 1); + unsigned int data_start; + + (*percpu_priv->dpa_bp_count)--; + + /* + * The skb is currently pointed at head + NET_SKB_PAD. The packet + * starts at skb->head + pad + fd offset. + */ + data_start = pad + dpa_fd_offset(fd) - NET_SKB_PAD; + skb_put(skb, dpa_fd_length(fd) + data_start); + skb_pull(skb, data_start); + + return 0; +} + +static void _dpa_rx_error(struct net_device *net_dev, + const struct dpa_priv_s *priv, + struct dpa_percpu_priv_s *percpu_priv, + const struct qm_fd *fd) +{ + int _errno; + + if (netif_msg_hw(priv) && net_ratelimit()) + cpu_netdev_warn(net_dev, "FD status = 0x%08x\n", + fd->status & FM_FD_STAT_ERRORS); + + percpu_priv->stats.rx_errors++; + + _errno = dpa_fd_release(net_dev, fd); + if (unlikely(_errno < 0)) { + dump_stack(); + panic("Can't release buffer to the BM during RX\n"); + } +} + +static void _dpa_tx_error(struct net_device *net_dev, + const struct dpa_priv_s *priv, + struct dpa_percpu_priv_s *percpu_priv, + const struct qm_fd *fd) +{ + struct sk_buff *skb; + struct sk_buff **skbh; + dma_addr_t addr = qm_fd_addr(fd); + struct dpa_bp *bp = priv->dpa_bp; + + if (netif_msg_hw(priv) && net_ratelimit()) + cpu_netdev_warn(net_dev, "FD status = 0x%08x\n", + fd->status & FM_FD_STAT_ERRORS); + + percpu_priv->stats.tx_errors++; + + skbh = (struct sk_buff **)phys_to_virt(addr); + skb = *skbh; + + dma_unmap_single(bp->dev, addr, bp->size, DMA_TO_DEVICE); + + dev_kfree_skb(skb); +} + +static void __hot _dpa_rx(struct net_device *net_dev, + const struct dpa_priv_s *priv, + struct dpa_percpu_priv_s *percpu_priv, + const struct qm_fd *fd) +{ + int _errno; + struct dpa_bp *dpa_bp; + struct sk_buff *skb; + struct sk_buff **skbh; + dma_addr_t addr = qm_fd_addr(fd); + + skbh = (struct sk_buff **)phys_to_virt(addr); + + if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) { + if (netif_msg_hw(priv) && net_ratelimit()) + cpu_netdev_warn(net_dev, "FD status = 0x%08x\n", + fd->status & FM_FD_STAT_ERRORS); + + percpu_priv->stats.rx_errors++; + + goto _return_dpa_fd_release; + } + + if (unlikely(fd->format != qm_fd_contig)) { + percpu_priv->stats.rx_dropped++; + if (netif_msg_rx_status(priv) && net_ratelimit()) + cpu_netdev_warn(net_dev, "Dropping a SG frame\n"); + goto _return_dpa_fd_release; + } + + dpa_bp = dpa_bpid2pool(fd->bpid); + + dma_unmap_single(dpa_bp->dev, qm_fd_addr(fd), dpa_bp->size, + DMA_FROM_DEVICE); + + skb = *skbh; + prefetch(skb); + + /* Fill the SKB */ + dpa_process_one(percpu_priv, skb, dpa_bp, fd); + + prefetch(skb_shinfo(skb)); + +#ifdef CONFIG_FSL_DPA_1588 + if (priv->tsu && priv->tsu->valid) + dpa_ptp_store_rxstamp(net_dev, skb, fd); +#endif + + skb->protocol = eth_type_trans(skb, net_dev); + + if (unlikely(skb->len > net_dev->mtu)) { + if ((skb->protocol != ETH_P_8021Q) || + (skb->len > net_dev->mtu + 4)) { + percpu_priv->stats.rx_dropped++; + goto drop_large_frame; + } + } + + /* Check if the FMan Parser has already validated the L4 csum. */ + if (fd->status & FM_FD_STAT_L4CV) { + /* If we're here, the csum must be valid (if it hadn't, + * the frame would have been received on the Error FQ, + * respectively on the _dpa_rx_error() path). */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) + percpu_priv->stats.rx_dropped++; + else { + percpu_priv->stats.rx_packets++; + percpu_priv->stats.rx_bytes += dpa_fd_length(fd); + } + + net_dev->last_rx = jiffies; + + return; + +drop_large_frame: + (*percpu_priv->dpa_bp_count)++; +_return_dpa_fd_release: + _errno = dpa_fd_release(net_dev, fd); + if (unlikely(_errno < 0)) { + dump_stack(); + panic("Can't release buffer to the BM during RX\n"); + } +} + +static void dpaa_eth_napi_disable(struct dpa_priv_s *priv) +{ + struct dpa_percpu_priv_s *percpu_priv; + int i; + + if (priv->shared) + return; + + for_each_online_cpu(i) { + percpu_priv = per_cpu_ptr(priv->percpu_priv, i); + napi_disable(&percpu_priv->napi); + } +} + +static void dpaa_eth_napi_enable(struct dpa_priv_s *priv) +{ + struct dpa_percpu_priv_s *percpu_priv; + int i; + + if (priv->shared) + return; + + for_each_online_cpu(i) { + percpu_priv = per_cpu_ptr(priv->percpu_priv, i); + napi_enable(&percpu_priv->napi); + } +} + +static int dpaa_eth_poll(struct napi_struct *napi, int budget) +{ + struct dpa_percpu_priv_s *percpu_priv; + int cleaned = qman_poll_dqrr(budget); + int count; + + percpu_priv = container_of(napi, struct dpa_percpu_priv_s, napi); + + count = *percpu_priv->dpa_bp_count; + + if (count < DEFAULT_COUNT / 4) { + int i; + + for (i = count; i < DEFAULT_COUNT; i += 8) + dpa_bp_add_8(percpu_priv->dpa_bp); + } + + if (cleaned < budget) { + int tmp; + napi_complete(napi); + tmp = qman_irqsource_add(QM_PIRQ_DQRI); + BUG_ON(tmp); + } + + return cleaned; +} + +static void __hot _dpa_tx(struct net_device *net_dev, + const struct dpa_priv_s *priv, + struct dpa_percpu_priv_s *percpu_priv, + const struct qm_fd *fd) +{ + struct sk_buff **skbh; + struct sk_buff *skb; + dma_addr_t addr = qm_fd_addr(fd); + struct dpa_bp *bp = priv->dpa_bp; + + /* This might not perfectly reflect the reality, if the core dequeueing + * the Tx confirmation is different from the one that did the enqueue, + * but at least it'll show up in the total count. */ + percpu_priv->tx_confirm++; + + if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) { + if (netif_msg_hw(priv) && net_ratelimit()) + cpu_netdev_warn(net_dev, "FD status = 0x%08x\n", + fd->status & FM_FD_STAT_ERRORS); + + percpu_priv->stats.tx_errors++; + } + + skbh = (struct sk_buff **)phys_to_virt(addr); + skb = *skbh; + +#ifdef CONFIG_FSL_DPA_1588 + if (priv->tsu && priv->tsu->valid) + dpa_ptp_store_txstamp(net_dev, skb, fd); +#endif + + dma_unmap_single(bp->dev, addr, bp->size, DMA_TO_DEVICE); + + dev_kfree_skb(skb); +} + +static struct dpa_bp *dpa_size2pool(struct dpa_priv_s *priv, size_t size) +{ + int i; + + for (i = 0; i < priv->bp_count; i++) + if (DPA_BP_SIZE(size) <= priv->dpa_bp[i].size) + return dpa_bpid2pool(priv->dpa_bp[i].bpid); + return ERR_PTR(-ENODEV); +} + +static inline void * __must_check __attribute__((nonnull)) +dpa_phys2virt(const struct dpa_bp *dpa_bp, dma_addr_t addr) +{ + return dpa_bp->vaddr + (addr - dpa_bp->paddr); +} + +/** + * Turn on HW checksum computation for this outgoing frame. + * If the current protocol is not something we support in this regard + * (or if the stack has already computed the SW checksum), we do nothing. + * + * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value + * otherwise. + * + * Note that this function may modify the fd->cmd field and the skb data buffer + * (the Parse Results area). + */ +static inline int dpa_enable_tx_csum(struct dpa_priv_s *priv, + struct sk_buff *skb, struct qm_fd *fd, char *parse_results) +{ + t_FmPrsResult *parse_result; + struct iphdr *iph; + struct ipv6hdr *ipv6h = NULL; + int l4_proto; + int ethertype = ntohs(skb->protocol); + int retval = 0; + + if (!priv->mac_dev || skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + /* Note: L3 csum seems to be already computed in sw, but we can't choose + * L4 alone from the FM configuration anyway. */ + + /* Fill in some fields of the Parse Results array, so the FMan + * can find them as if they came from the FMan Parser. */ + parse_result = (t_FmPrsResult *)parse_results; + + /* If we're dealing with VLAN, get the real Ethernet type */ + if (ethertype == ETH_P_8021Q) { + /* We can't always assume the MAC header is set correctly + * by the stack, so reset to beginning of skb->data */ + skb_reset_mac_header(skb); + ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); + } + + /* Fill in the relevant L3 parse result fields + * and read the L4 protocol type */ + switch (ethertype) { + case ETH_P_IP: + parse_result->l3r = FM_L3_PARSE_RESULT_IPV4; + iph = ip_hdr(skb); + BUG_ON(iph == NULL); + l4_proto = ntohs(iph->protocol); + break; + case ETH_P_IPV6: + parse_result->l3r = FM_L3_PARSE_RESULT_IPV6; + ipv6h = ipv6_hdr(skb); + BUG_ON(ipv6h == NULL); + l4_proto = ntohs(ipv6h->nexthdr); + break; + default: + /* We shouldn't even be here */ + if (netif_msg_tx_err(priv) && net_ratelimit()) + cpu_netdev_alert(priv->net_dev, "Can't compute HW csum " + "for L3 proto 0x%x\n", ntohs(skb->protocol)); + retval = -EIO; + goto return_error; + } + + /* Fill in the relevant L4 parse result fields */ + switch (l4_proto) { + case IPPROTO_UDP: + parse_result->l4r = FM_L4_PARSE_RESULT_UDP; + break; + case IPPROTO_TCP: + parse_result->l4r = FM_L4_PARSE_RESULT_TCP; + break; + default: + /* This can as well be a BUG() */ + if (netif_msg_tx_err(priv) && net_ratelimit()) + cpu_netdev_alert(priv->net_dev, "Can't compute HW csum " + "for L4 proto 0x%x\n", l4_proto); + retval = -EIO; + goto return_error; + } + + /* At index 0 is IPOffset_1 as defined in the Parse Results */ + parse_result->ip_off[0] = skb_network_offset(skb); + parse_result->l4_off = skb_transport_offset(skb); + + /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ + fd->cmd |= FM_FD_CMD_RPD | FM_FD_CMD_DTC; + +return_error: + return retval; +} + +static inline int __hot dpa_xmit(struct dpa_priv_s *priv, + struct dpa_percpu_priv_s *percpu, int queue, + struct qm_fd *fd) +{ + int err; + + prefetchw(&percpu->start_tx); + err = qman_enqueue(priv->egress_fqs[queue], fd, 0); + if (unlikely(err < 0)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + cpu_netdev_err(priv->net_dev, "qman_enqueue() = %d\n", + err); + percpu->stats.tx_errors++; + percpu->stats.tx_fifo_errors++; + return err; + } + + percpu->stats.tx_packets++; + percpu->stats.tx_bytes += dpa_fd_length(fd); + + return NETDEV_TX_OK; +} + +static int __hot dpa_shared_tx(struct sk_buff *skb, struct net_device *net_dev) +{ + struct dpa_bp *dpa_bp; + struct bm_buffer bmb; + struct dpa_percpu_priv_s *percpu_priv; + struct dpa_priv_s *priv; + struct device *dev; + struct qm_fd fd; + int queue_mapping; + int err; + void *dpa_bp_vaddr; + + priv = netdev_priv(net_dev); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + dev = net_dev->dev.parent; + + memset(&fd, 0, sizeof(fd)); + fd.format = qm_fd_contig; + + queue_mapping = skb_get_queue_mapping(skb); + + dpa_bp = dpa_size2pool(priv, skb_headlen(skb)); + if (unlikely(IS_ERR(dpa_bp))) { + err = PTR_ERR(dpa_bp); + goto bpools_too_small_error; + } + + err = bman_acquire(dpa_bp->pool, &bmb, 1, 0); + if (unlikely(err <= 0)) { + percpu_priv->stats.tx_errors++; + if (err == 0) + err = -ENOMEM; + goto buf_acquire_failed; + } + fd.bpid = dpa_bp->bpid; + + fd.length20 = skb_headlen(skb); + fd.cmd = FM_FD_CMD_FCO; + fd.addr_hi = bmb.hi; + fd.addr_lo = bmb.lo; + fd.offset = DPA_BP_HEAD; + + dpa_bp_vaddr = dpa_phys2virt(dpa_bp, bm_buf_addr(&bmb)); + + /* Copy the packet payload */ + skb_copy_from_linear_data(skb, dpa_bp_vaddr + dpa_fd_offset(&fd), + dpa_fd_length(&fd)); + + /* Enable L3/L4 hardware checksum computation, if applicable */ + err = dpa_enable_tx_csum(priv, skb, &fd, + dpa_bp_vaddr + DPA_PRIV_DATA_SIZE); + if (unlikely(err < 0)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + cpu_netdev_err(net_dev, "Tx HW csum error: %d\n", err); + percpu_priv->stats.tx_errors++; + goto l3_l4_csum_failed; + } + + err = dpa_xmit(priv, percpu_priv, queue_mapping, &fd); + +l3_l4_csum_failed: +bpools_too_small_error: +buf_acquire_failed: + /* We're done with the skb */ + dev_kfree_skb(skb); + + return err; +} + +static int __hot dpa_tx(struct sk_buff *skb, struct net_device *net_dev) +{ + struct dpa_priv_s *priv; + struct device *dev; + struct qm_fd fd; + unsigned int headroom; + struct dpa_percpu_priv_s *percpu_priv; + struct sk_buff **skbh; + dma_addr_t addr; + struct dpa_bp *dpa_bp; + int queue_mapping; + int err; + unsigned int pad; + + priv = netdev_priv(net_dev); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + dev = net_dev->dev.parent; + + memset(&fd, 0, sizeof(fd)); + fd.format = qm_fd_contig; + + headroom = skb_headroom(skb); + queue_mapping = skb_get_queue_mapping(skb); + + if (headroom < DPA_BP_HEAD) { + struct sk_buff *skb_new; + + skb_new = skb_realloc_headroom(skb, DPA_BP_HEAD); + if (!skb_new) { + percpu_priv->stats.tx_errors++; + kfree_skb(skb); + return NETDEV_TX_OK; + } + kfree_skb(skb); + skb = skb_new; + headroom = skb_headroom(skb); + } + + skb = skb_unshare(skb, GFP_ATOMIC); + + if (!skb) + return NETDEV_TX_OK; + + /* + * We are guaranteed that we have at least DPA_BP_HEAD of headroom. + * Buffers we allocated are padded to improve cache usage. In order + * to increase buffer re-use, we aim to keep any such buffers the + * same. This means the address passed to the FM should be DPA_BP_HEAD + * before the data, and we might as well do the same for buffers + * from elsewhere in the kernel. + */ + skbh = (struct sk_buff **)(skb->data - DPA_BP_HEAD); + pad = headroom - DPA_BP_HEAD; + + *skbh = skb; + + dpa_bp = priv->dpa_bp; + + /* Enable L3/L4 hardware checksum computation. + * + * We must do this before dma_map_single(DMA_TO_DEVICE), because we may + * need to write into the skb. */ + err = dpa_enable_tx_csum(priv, skb, &fd, + ((char *)skbh) + DPA_PRIV_DATA_SIZE); + + if (unlikely(err < 0)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + cpu_netdev_err(net_dev, "HW csum error: %d\n", err); + percpu_priv->stats.tx_errors++; + goto l3_l4_csum_failed; + } + +#ifdef CONFIG_FSL_DPA_1588 + if (priv->tsu && priv->tsu->valid) + fd.cmd |= FM_FD_CMD_UPD; +#endif + + fd.length20 = skb->len; + fd.offset = DPA_BP_HEAD; /* This is now guaranteed */ + + addr = dma_map_single(dpa_bp->dev, skbh, dpa_bp->size, DMA_TO_DEVICE); + if (unlikely(addr == 0)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + cpu_netdev_err(net_dev, "dma_map_single() failed\n"); + goto dma_map_failed; + } + + fd.addr_hi = upper_32_bits(addr); + fd.addr_lo = lower_32_bits(addr); + + if (unlikely(dpa_xmit(priv, percpu_priv, queue_mapping, &fd) < 0)) + goto xmit_failed; + + net_dev->trans_start = jiffies; + + return NETDEV_TX_OK; + +xmit_failed: + dma_unmap_single(dev, addr, dpa_bp->size, DMA_TO_DEVICE); + +dma_map_failed: + if (fd.cmd & FM_FD_CMD_FCO) + (*percpu_priv->dpa_bp_count)--; + +l3_l4_csum_failed: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static enum qman_cb_dqrr_result +ingress_rx_error_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct net_device *net_dev; + struct dpa_priv_s *priv; + struct dpa_percpu_priv_s *percpu_priv; + + net_dev = ((struct dpa_fq *)fq)->net_dev; + priv = netdev_priv(net_dev); + + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + if (dpaa_eth_napi_schedule(percpu_priv)) { + percpu_priv->in_interrupt++; + return qman_cb_dqrr_stop; + } + + _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd); + + return qman_cb_dqrr_consume; +} + +static enum qman_cb_dqrr_result __hot +shared_rx_dqrr(struct qman_portal *portal, struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct net_device *net_dev; + struct dpa_priv_s *priv; + struct dpa_percpu_priv_s *percpu_priv; + int err; + const struct qm_fd *fd = &dq->fd; + struct dpa_bp *dpa_bp; + size_t size; + struct sk_buff *skb; + + net_dev = ((struct dpa_fq *)fq)->net_dev; + priv = netdev_priv(net_dev); + + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + if (unlikely(fd->status & FM_FD_STAT_ERRORS) != 0) { + if (netif_msg_hw(priv) && net_ratelimit()) + cpu_netdev_warn(net_dev, "FD status = 0x%08x\n", + fd->status & FM_FD_STAT_ERRORS); + + percpu_priv->stats.rx_errors++; + + goto out; + } + + + dpa_bp = dpa_bpid2pool(fd->bpid); + BUG_ON(IS_ERR(dpa_bp)); + + if (fd->format == qm_fd_sg) { + percpu_priv->stats.rx_dropped++; + if (netif_msg_rx_status(priv) && net_ratelimit()) + cpu_netdev_warn(net_dev, + "%s:%hu:%s(): Dropping a SG frame\n", + __file__, __LINE__, __func__); + goto out; + } + + size = dpa_fd_length(fd); + + skb = __netdev_alloc_skb(net_dev, DPA_BP_HEAD + size, GFP_ATOMIC); + if (unlikely(skb == NULL)) { + if (netif_msg_rx_err(priv) && net_ratelimit()) + cpu_netdev_err(net_dev, "Could not alloc skb\n"); + + percpu_priv->stats.rx_dropped++; + + goto out; + } + + skb_reserve(skb, DPA_BP_HEAD); + + /* Fill the SKB */ + memcpy(skb_put(skb, dpa_fd_length(fd)), + dpa_phys2virt(dpa_bp, qm_fd_addr(fd)) + + dpa_fd_offset(fd), dpa_fd_length(fd)); + + skb->protocol = eth_type_trans(skb, net_dev); + + if (unlikely(skb->len > net_dev->mtu)) { + if ((skb->protocol != ETH_P_8021Q) || + (skb->len > net_dev->mtu + 4)) { + percpu_priv->stats.rx_dropped++; + dev_kfree_skb_any(skb); + goto out; + } + } + + if (unlikely(netif_rx(skb) != NET_RX_SUCCESS)) + percpu_priv->stats.rx_dropped++; + else { + percpu_priv->stats.rx_packets++; + percpu_priv->stats.rx_bytes += dpa_fd_length(fd); + } + + net_dev->last_rx = jiffies; + +out: + err = dpa_fd_release(net_dev, fd); + if (unlikely(err < 0)) { + dump_stack(); + panic("Can't release buffer to the BM during RX\n"); + } + + return qman_cb_dqrr_consume; +} + + +static enum qman_cb_dqrr_result __hot +ingress_rx_default_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct net_device *net_dev; + struct dpa_priv_s *priv; + struct dpa_percpu_priv_s *percpu_priv; + + net_dev = ((struct dpa_fq *)fq)->net_dev; + priv = netdev_priv(net_dev); + + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + if (unlikely(dpaa_eth_napi_schedule(percpu_priv))) { + percpu_priv->in_interrupt++; + return qman_cb_dqrr_stop; + } + + prefetchw(&percpu_priv->ingress_calls); + + _dpa_rx(net_dev, priv, percpu_priv, &dq->fd); + + return qman_cb_dqrr_consume; +} + +static enum qman_cb_dqrr_result +ingress_tx_error_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct net_device *net_dev; + struct dpa_priv_s *priv; + struct dpa_percpu_priv_s *percpu_priv; + + net_dev = ((struct dpa_fq *)fq)->net_dev; + priv = netdev_priv(net_dev); + + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + if (dpaa_eth_napi_schedule(percpu_priv)) { + percpu_priv->in_interrupt++; + return qman_cb_dqrr_stop; + } + + _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd); + + return qman_cb_dqrr_consume; +} + +static enum qman_cb_dqrr_result __hot +ingress_tx_default_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct net_device *net_dev; + struct dpa_priv_s *priv; + struct dpa_percpu_priv_s *percpu_priv; + + net_dev = ((struct dpa_fq *)fq)->net_dev; + priv = netdev_priv(net_dev); + + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + if (dpaa_eth_napi_schedule(percpu_priv)) { + percpu_priv->in_interrupt++; + return qman_cb_dqrr_stop; + } + + _dpa_tx(net_dev, priv, percpu_priv, &dq->fd); + + return qman_cb_dqrr_consume; +} + +static void shared_ern(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + struct net_device *net_dev; + const struct dpa_priv_s *priv; + int err; + struct dpa_percpu_priv_s *percpu_priv; + struct dpa_fq *dpa_fq = (struct dpa_fq *)fq; + + net_dev = dpa_fq->net_dev; + priv = netdev_priv(net_dev); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + err = dpa_fd_release(net_dev, &msg->ern.fd); + if (unlikely(err < 0)) { + dump_stack(); + panic("Can't release buffer to the BM during a TX\n"); + } + + percpu_priv->stats.tx_dropped++; + percpu_priv->stats.tx_fifo_errors++; +} + +static void egress_ern(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + struct net_device *net_dev; + const struct dpa_priv_s *priv; + struct sk_buff *skb; + struct sk_buff **skbh; + struct dpa_percpu_priv_s *percpu_priv; + dma_addr_t addr = qm_fd_addr(&msg->ern.fd); + struct dpa_bp *bp; + + net_dev = ((struct dpa_fq *)fq)->net_dev; + priv = netdev_priv(net_dev); + bp = priv->dpa_bp; + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + percpu_priv->stats.tx_dropped++; + percpu_priv->stats.tx_fifo_errors++; + + /* + * If we intended this buffer to go into the pool + * when the FM was done, we need to put it in + * manually. + */ + if (msg->ern.fd.cmd & FM_FD_CMD_FCO) { + struct bm_buffer bmb; + + bm_buffer_set64(&bmb, addr); + while (bman_release(bp->pool, &bmb, 1, 0)) + cpu_relax(); + + return; + } + + skbh = (struct sk_buff **)phys_to_virt(addr); + skb = *skbh; + + dma_unmap_single(bp->dev, addr, bp->size, DMA_TO_DEVICE); + + dev_kfree_skb_any(skb); +} + +static const struct qman_fq rx_shared_fq = { + .cb = {shared_rx_dqrr, NULL, NULL, NULL} +}; +static const struct qman_fq rx_private_defq = { + .cb = {ingress_rx_default_dqrr, NULL, NULL, NULL} +}; +static const struct qman_fq rx_private_errq = { + .cb = {ingress_rx_error_dqrr, NULL, NULL, NULL} +}; +static const struct qman_fq tx_private_defq = { + .cb = {ingress_tx_default_dqrr, NULL, NULL, NULL} +}; +static const struct qman_fq tx_private_errq = { + .cb = {ingress_tx_error_dqrr, NULL, NULL, NULL} +}; +static const struct qman_fq dummyq = { + .cb = {NULL, NULL, NULL, NULL} +}; +static const struct qman_fq private_egress_fq = { + .cb = {NULL, egress_ern, NULL, NULL} +}; +static const struct qman_fq shared_egress_fq = { + .cb = {NULL, shared_ern, NULL, NULL} +}; + +#ifdef CONFIG_DPAA_ETH_UNIT_TESTS +static bool tx_unit_test_passed = true; + +static void tx_unit_test_ern(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + struct net_device *net_dev; + struct dpa_priv_s *priv; + struct sk_buff **skbh; + struct sk_buff *skb; + const struct qm_fd *fd; + dma_addr_t addr; + + net_dev = ((struct dpa_fq *)fq)->net_dev; + priv = netdev_priv(net_dev); + + tx_unit_test_passed = false; + + fd = &msg->ern.fd; + + addr = qm_fd_addr(fd); + + skbh = (struct sk_buff **)phys_to_virt(addr); + skb = *skbh; + + if (!skb || !is_kernel_addr((unsigned long)skb)) + panic("Corrupt skb in ERN!\n"); + + kfree_skb(skb); +} + +static unsigned char *tx_unit_skb_head; +static unsigned char *tx_unit_skb_end; +static int tx_unit_tested; + +static enum qman_cb_dqrr_result tx_unit_test_dqrr( + struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + struct net_device *net_dev; + struct dpa_priv_s *priv; + struct sk_buff **skbh; + struct sk_buff *skb; + const struct qm_fd *fd; + dma_addr_t addr; + unsigned char *startaddr; + struct dpa_percpu_priv_s *percpu_priv; + + tx_unit_test_passed = false; + + tx_unit_tested++; + + net_dev = ((struct dpa_fq *)fq)->net_dev; + priv = netdev_priv(net_dev); + + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + fd = &dq->fd; + + addr = qm_fd_addr(fd); + + skbh = (struct sk_buff **)phys_to_virt(addr); + startaddr = (unsigned char *)skbh; + skb = *skbh; + + if (!skb || !is_kernel_addr((unsigned long)skb)) + panic("Invalid skb address in TX Unit Test FD\n"); + + /* Make sure we're dealing with the same skb */ + if (skb->head != tx_unit_skb_head + || skb_end_pointer(skb) != tx_unit_skb_end) + goto out; + + /* + * If we recycled, then there must be enough room between fd.addr + * and skb->end for a new RX buffer + */ + if (fd->cmd & FM_FD_CMD_FCO) { + size_t bufsize = skb_end_pointer(skb) - startaddr; + + if (bufsize < fsl_fman_phy_maxfrm) + goto out; + } else { + /* + * If we didn't recycle, but the buffer was big enough, + * increment the counter to put it back + */ + if (skb_end_pointer(skb) - skb->head >= fsl_fman_phy_maxfrm) + (*percpu_priv->dpa_bp_count)++; + + /* If we didn't recycle, the data pointer should be good */ + if (skb->data != startaddr + dpa_fd_offset(fd)) + goto out; + } + + tx_unit_test_passed = true; +out: + /* The skb is no longer needed, and belongs to us */ + kfree_skb(skb); + + return qman_cb_dqrr_consume; +} + +static const struct qman_fq tx_unit_test_fq = { + .cb = {tx_unit_test_dqrr, tx_unit_test_ern, NULL, NULL} +}; + +static struct dpa_fq unit_fq; + +static bool tx_unit_test_ran; /* Starts as false */ + +static int dpa_tx_unit_test(struct net_device *net_dev) +{ + /* Create a new FQ */ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct qman_fq *oldq; + int size, headroom; + struct dpa_percpu_priv_s *percpu_priv; + cpumask_t *oldcpus; + int test_count = 0; + int err = 0; + int tests_failed = 0; + const cpumask_t *cpus = qman_affine_cpus(); + + oldcpus = tsk_cpus_allowed(current); + set_cpus_allowed_ptr(current, cpus); + /* disable bottom halves */ + local_bh_disable(); + + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + qman_irqsource_remove(QM_PIRQ_DQRI); + unit_fq.net_dev = net_dev; + unit_fq.fq_base = tx_unit_test_fq; + + /* Save old queue */ + oldq = priv->egress_fqs[smp_processor_id()]; + + err = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, &unit_fq.fq_base); + + if (err < 0) { + pr_err("UNIT test FQ create failed: %d\n", err); + goto fq_create_fail; + } + + err = qman_init_fq(&unit_fq.fq_base, + QMAN_INITFQ_FLAG_SCHED | QMAN_INITFQ_FLAG_LOCAL, NULL); + if (err < 0) { + pr_err("UNIT test FQ init failed: %d\n", err); + goto fq_init_fail; + } + + pr_err("TX Unit Test using FQ %d\n", qman_fq_fqid(&unit_fq.fq_base)); + + /* Replace queue 0 with this queue */ + priv->egress_fqs[smp_processor_id()] = &unit_fq.fq_base; + + /* Try packet sizes from 64-bytes to just above the maximum */ + for (size = 64; size <= 9600 + 128; size += 64) { + for (headroom = DPA_BP_HEAD; headroom < 0x800; headroom += 16) { + int ret; + struct sk_buff *skb; + + test_count++; + + skb = dev_alloc_skb(size + headroom); + + if (!skb) { + pr_err("Failed to allocate skb\n"); + err = -ENOMEM; + goto end_test; + } + + if (skb_end_pointer(skb) - skb->head >= + fsl_fman_phy_maxfrm) + (*percpu_priv->dpa_bp_count)--; + + skb_put(skb, size + headroom); + skb_pull(skb, headroom); + + tx_unit_skb_head = skb->head; + tx_unit_skb_end = skb_end_pointer(skb); + + skb_set_queue_mapping(skb, smp_processor_id()); + + /* tx */ + ret = net_dev->netdev_ops->ndo_start_xmit(skb, net_dev); + + if (ret != NETDEV_TX_OK) { + pr_err("Failed to TX with err %d\n", ret); + err = -EIO; + goto end_test; + } + + /* Wait for it to arrive */ + ret = spin_event_timeout(qman_poll_dqrr(1) != 0, + 100000, 1); + + if (!ret) + pr_err("TX Packet never arrived\n"); + + /* Was it good? */ + if (tx_unit_test_passed == false) { + pr_err("Test failed:\n"); + pr_err("size: %d pad: %d head: %p end: %p\n", + size, headroom, tx_unit_skb_head, + tx_unit_skb_end); + tests_failed++; + } + } + } + +end_test: + err = qman_retire_fq(&unit_fq.fq_base, NULL); + if (unlikely(err < 0)) + pr_err("Could not retire TX Unit Test FQ (%d)\n", err); + + err = qman_oos_fq(&unit_fq.fq_base); + if (unlikely(err < 0)) + pr_err("Could not OOS TX Unit Test FQ (%d)\n", err); + +fq_init_fail: + qman_destroy_fq(&unit_fq.fq_base, 0); + +fq_create_fail: + priv->egress_fqs[smp_processor_id()] = oldq; + local_bh_enable(); + qman_irqsource_add(QM_PIRQ_DQRI); + tx_unit_test_ran = true; + set_cpus_allowed_ptr(current, oldcpus); + + pr_err("Tested %d/%d packets. %d failed\n", test_count, tx_unit_tested, + tests_failed); + + if (tests_failed) + err = -EINVAL; + + return err; +} +#endif + +static int __cold dpa_start(struct net_device *net_dev) +{ + int err, i; + struct dpa_priv_s *priv; + struct mac_device *mac_dev; + + priv = netdev_priv(net_dev); + mac_dev = priv->mac_dev; + + if (!mac_dev) + goto no_mac; + +#ifdef CONFIG_FSL_DPA_1588 + if (priv->tsu && priv->tsu->valid) { + if (mac_dev->fm_rtc_enable) + mac_dev->fm_rtc_enable(net_dev); + } +#endif + + dpaa_eth_napi_enable(priv); + + err = mac_dev->init_phy(net_dev); + if (err < 0) { + if (netif_msg_ifup(priv)) + cpu_netdev_err(net_dev, "init_phy() = %d\n", err); + goto init_phy_failed; + } + + for_each_port_device(i, mac_dev->port_dev) + fm_port_enable(mac_dev->port_dev[i]); + + err = priv->mac_dev->start(mac_dev); + if (err < 0) { + if (netif_msg_ifup(priv)) + cpu_netdev_err(net_dev, "mac_dev->start() = %d\n", err); + goto mac_start_failed; + } + +no_mac: + netif_tx_start_all_queues(net_dev); + + return 0; + +mac_start_failed: + for_each_port_device(i, mac_dev->port_dev) + fm_port_disable(mac_dev->port_dev[i]); + +init_phy_failed: + dpaa_eth_napi_disable(priv); + + return err; +} + +static int __cold dpa_stop(struct net_device *net_dev) +{ + int _errno, i; + struct dpa_priv_s *priv; + struct mac_device *mac_dev; + + priv = netdev_priv(net_dev); + mac_dev = priv->mac_dev; + + netif_tx_stop_all_queues(net_dev); + + if (!mac_dev) + return 0; + +#ifdef CONFIG_FSL_DPA_1588 + if (priv->tsu && priv->tsu->valid) { + if (mac_dev->fm_rtc_disable) + mac_dev->fm_rtc_disable(net_dev); + } +#endif + + _errno = mac_dev->stop(mac_dev); + if (unlikely(_errno < 0)) + if (netif_msg_ifdown(priv)) + cpu_netdev_err(net_dev, "mac_dev->stop() = %d\n", + _errno); + + for_each_port_device(i, mac_dev->port_dev) + fm_port_disable(mac_dev->port_dev[i]); + + if (mac_dev->phy_dev) + phy_disconnect(mac_dev->phy_dev); + mac_dev->phy_dev = NULL; + + dpaa_eth_napi_disable(priv); + + return _errno; +} + +static void __cold dpa_timeout(struct net_device *net_dev) +{ + const struct dpa_priv_s *priv; + struct dpa_percpu_priv_s *percpu_priv; + + priv = netdev_priv(net_dev); + percpu_priv = per_cpu_ptr(priv->percpu_priv, smp_processor_id()); + + if (netif_msg_timer(priv)) + cpu_netdev_crit(net_dev, "Transmit timeout latency: %lu ms\n", + (jiffies - net_dev->trans_start) * 1000 / HZ); + + percpu_priv->stats.tx_errors++; +} + +static int dpa_bp_cmp(const void *dpa_bp0, const void *dpa_bp1) +{ + return ((struct dpa_bp *)dpa_bp0)->size - + ((struct dpa_bp *)dpa_bp1)->size; +} + +static struct dpa_bp * __cold __must_check __attribute__((nonnull)) +dpa_bp_probe(struct platform_device *_of_dev, size_t *count) +{ + int i, lenp, na, ns; + struct device *dev; + struct device_node *dev_node; + const phandle *phandle_prop; + const uint32_t *bpid; + const uint32_t *bpool_cfg; + struct dpa_bp *dpa_bp; + int has_kernel_pool = 0; + int has_shared_pool = 0; + + dev = &_of_dev->dev; + + /* The default is one, if there's no property */ + *count = 1; + + /* There are three types of buffer pool configuration: + * 1) No bp assignment + * 2) A static assignment to an empty configuration + * 3) A static assignment to one or more configured pools + * + * We don't support using multiple unconfigured pools. + */ + + /* Get the buffer pools to be used */ + phandle_prop = of_get_property(dev->of_node, + "fsl,bman-buffer-pools", &lenp); + + if (phandle_prop) + *count = lenp / sizeof(phandle); + else { + if (default_pool) + return default_pool; + + has_kernel_pool = 1; + } + + dpa_bp = devm_kzalloc(dev, *count * sizeof(*dpa_bp), GFP_KERNEL); + if (unlikely(dpa_bp == NULL)) { + dpaa_eth_err(dev, "devm_kzalloc() failed\n"); + return ERR_PTR(-ENOMEM); + } + + dev_node = of_find_node_by_path("/"); + if (unlikely(dev_node == NULL)) { + dpaa_eth_err(dev, "of_find_node_by_path(/) failed\n"); + return ERR_PTR(-EINVAL); + } + + na = of_n_addr_cells(dev_node); + ns = of_n_size_cells(dev_node); + + for (i = 0; i < *count && phandle_prop; i++) { + of_node_put(dev_node); + dev_node = of_find_node_by_phandle(phandle_prop[i]); + if (unlikely(dev_node == NULL)) { + dpaa_eth_err(dev, "of_find_node_by_phandle() failed\n"); + return ERR_PTR(-EFAULT); + } + + if (unlikely(!of_device_is_compatible(dev_node, "fsl,bpool"))) { + dpaa_eth_err(dev, + "!of_device_is_compatible(%s, fsl,bpool)\n", + dev_node->full_name); + dpa_bp = ERR_PTR(-EINVAL); + goto _return_of_node_put; + } + + bpid = of_get_property(dev_node, "fsl,bpid", &lenp); + if ((bpid == NULL) || (lenp != sizeof(*bpid))) { + dpaa_eth_err(dev, "fsl,bpid property not found.\n"); + dpa_bp = ERR_PTR(-EINVAL); + goto _return_of_node_put; + } + dpa_bp[i].bpid = *bpid; + + bpool_cfg = of_get_property(dev_node, "fsl,bpool-ethernet-cfg", + &lenp); + if (bpool_cfg && (lenp == (2 * ns + na) * sizeof(*bpool_cfg))) { + const uint32_t *seed_pool; + + dpa_bp[i].count = of_read_number(bpool_cfg, ns); + dpa_bp[i].size = of_read_number(bpool_cfg + ns, ns); + dpa_bp[i].paddr = + of_read_number(bpool_cfg + 2 * ns, na); + + seed_pool = of_get_property(dev_node, + "fsl,bpool-ethernet-seeds", &lenp); + dpa_bp[i].seed_pool = !!seed_pool; + + has_shared_pool = 1; + } else { + has_kernel_pool = 1; + } + + if (i > 0) + has_shared_pool = 1; + } + + if (has_kernel_pool && has_shared_pool) { + dpaa_eth_err(dev, "Invalid buffer pool configuration " + "for node %s\n", dev_node->full_name); + dpa_bp = ERR_PTR(-EINVAL); + goto _return_of_node_put; + } else if (has_kernel_pool) { + dpa_bp->count = DEFAULT_COUNT; + dpa_bp->size = DEFAULT_BUF_SIZE; + dpa_bp->kernel_pool = 1; + } + + sort(dpa_bp, *count, sizeof(*dpa_bp), dpa_bp_cmp, NULL); + + return dpa_bp; + +_return_of_node_put: + if (dev_node) + of_node_put(dev_node); + + return dpa_bp; +} + +static int dpa_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp, + size_t count) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + int i; + + if (dpa_bp->kernel_pool) { + priv->shared = 0; + + if (netif_msg_probe(priv)) + cpu_dev_info(net_dev->dev.parent, + "Using private BM buffer pools\n"); + } else { + priv->shared = 1; + } + + priv->dpa_bp = dpa_bp; + priv->bp_count = count; + + for (i = 0; i < count; i++) { + int err; + err = dpa_bp_alloc(&dpa_bp[i]); + if (err < 0) { + dpa_bp_free(priv, dpa_bp); + priv->dpa_bp = NULL; + return err; + } + + /* For now, just point to the default pool. + * We can add support for more pools, later + */ + if (dpa_bp->kernel_pool) + priv->dpa_bp = default_pool; + } + + return 0; +} + +static struct mac_device * __cold __must_check +__attribute__((nonnull)) +dpa_mac_probe(struct platform_device *_of_dev) +{ + struct device *dpa_dev, *dev; + struct device_node *mac_node; + int lenp; + const phandle *phandle_prop; + struct platform_device *of_dev; + struct mac_device *mac_dev; +#ifdef CONFIG_FSL_DPA_1588 + struct net_device *net_dev = NULL; + struct dpa_priv_s *priv = NULL; + struct device_node *timer_node; +#endif + + phandle_prop = of_get_property(_of_dev->dev.of_node, "fsl,fman-mac", &lenp); + if (phandle_prop == NULL) + return NULL; + + BUG_ON(lenp != sizeof(phandle)); + + dpa_dev = &_of_dev->dev; + + mac_node = of_find_node_by_phandle(*phandle_prop); + if (unlikely(mac_node == NULL)) { + dpaa_eth_err(dpa_dev, "of_find_node_by_phandle() failed\n"); + return ERR_PTR(-EFAULT); + } + + of_dev = of_find_device_by_node(mac_node); + if (unlikely(of_dev == NULL)) { + dpaa_eth_err(dpa_dev, "of_find_device_by_node(%s) failed\n", + mac_node->full_name); + of_node_put(mac_node); + return ERR_PTR(-EINVAL); + } + of_node_put(mac_node); + + dev = &of_dev->dev; + + mac_dev = dev_get_drvdata(dev); + if (unlikely(mac_dev == NULL)) { + dpaa_eth_err(dpa_dev, "dev_get_drvdata(%s) failed\n", + dev_name(dev)); + return ERR_PTR(-EINVAL); + } + +#ifdef CONFIG_FSL_DPA_1588 + phandle_prop = of_get_property(mac_node, "ptimer-handle", &lenp); + if (phandle_prop && ((mac_dev->phy_if != PHY_INTERFACE_MODE_SGMII) || + ((mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) && + (mac_dev->speed == SPEED_1000)))) { + timer_node = of_find_node_by_phandle(*phandle_prop); + if (timer_node && (net_dev = dev_get_drvdata(dpa_dev))) { + priv = netdev_priv(net_dev); + if (!dpa_ptp_init(priv)) + dpaa_eth_info(dev, "%s: ptp-timer enabled\n", + mac_node->full_name); + } + } +#endif + + return mac_dev; +} + +static const char fsl_qman_frame_queues[][25] = { + [RX] = "fsl,qman-frame-queues-rx", + [TX] = "fsl,qman-frame-queues-tx" +}; + +#ifdef CONFIG_DEBUG_FS +static int __cold dpa_debugfs_show(struct seq_file *file, void *offset) +{ + int i; + struct dpa_priv_s *priv; + struct dpa_percpu_priv_s *percpu_priv, total; + struct dpa_bp *dpa_bp; + unsigned int count_total = 0; + + BUG_ON(offset == NULL); + + priv = netdev_priv((struct net_device *)file->private); + + dpa_bp = priv->dpa_bp; + + memset(&total, 0, sizeof(total)); + + seq_printf(file, "\tirqs\trx\ttx\trecycle\tconfirm\ttx err\trx err" \ + "\tbp count\n"); + for_each_online_cpu(i) { + percpu_priv = per_cpu_ptr(priv->percpu_priv, i); + + total.in_interrupt += percpu_priv->in_interrupt; + total.ingress_calls += percpu_priv->stats.rx_packets; + total.stats.tx_packets += percpu_priv->stats.tx_packets; + total.tx_returned += percpu_priv->tx_returned; + total.tx_confirm += percpu_priv->tx_confirm; + total.stats.tx_errors += percpu_priv->stats.tx_errors; + total.stats.rx_errors += percpu_priv->stats.rx_errors; + count_total += *percpu_priv->dpa_bp_count; + + seq_printf(file, "%hu/%hu\t%u\t%lu\t%lu\t%u\t%u\t%lu\t%lu" \ + "\t%d\n", + get_hard_smp_processor_id(i), i, + percpu_priv->in_interrupt, + percpu_priv->stats.rx_packets, + percpu_priv->stats.tx_packets, + percpu_priv->tx_returned, + percpu_priv->tx_confirm, + percpu_priv->stats.tx_errors, + percpu_priv->stats.rx_errors, + *percpu_priv->dpa_bp_count); + } + seq_printf(file, "Total\t%u\t%u\t%lu\t%u\t%u\t%lu\t%lu\t%d\n", + total.in_interrupt, + total.ingress_calls, + total.stats.tx_packets, + total.tx_returned, + total.tx_confirm, + total.stats.tx_errors, + total.stats.rx_errors, + count_total); + + return 0; +} + +static int __cold dpa_debugfs_open(struct inode *inode, struct file *file) +{ + int _errno; + const struct net_device *net_dev; + + _errno = single_open(file, dpa_debugfs_show, inode->i_private); + if (unlikely(_errno < 0)) { + net_dev = (struct net_device *)inode->i_private; + + if (netif_msg_drv((struct dpa_priv_s *)netdev_priv(net_dev))) + cpu_netdev_err(net_dev, "single_open() = %d\n", + _errno); + } + return _errno; +} + +static const struct file_operations dpa_debugfs_fops = { + .open = dpa_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static u16 dpa_select_queue(struct net_device *net_dev, struct sk_buff *skb) +{ + return smp_processor_id(); +} + +static const struct net_device_ops dpa_private_ops = { + .ndo_open = dpa_start, + .ndo_start_xmit = dpa_tx, + .ndo_stop = dpa_stop, + .ndo_change_rx_flags = dpa_change_rx_flags, + .ndo_tx_timeout = dpa_timeout, + .ndo_get_stats = dpa_get_stats, + .ndo_set_mac_address = dpa_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_select_queue = dpa_select_queue, + .ndo_change_mtu = dpa_change_mtu, + .ndo_set_rx_mode = dpa_set_multicast_list, + .ndo_do_ioctl = dpa_ioctl, +}; + +static const struct net_device_ops dpa_shared_ops = { + .ndo_open = dpa_start, + .ndo_start_xmit = dpa_shared_tx, + .ndo_stop = dpa_stop, + .ndo_change_rx_flags = dpa_change_rx_flags, + .ndo_tx_timeout = dpa_timeout, + .ndo_get_stats = dpa_get_stats, + .ndo_set_mac_address = dpa_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = dpa_set_multicast_list, + .ndo_do_ioctl = dpa_ioctl, +}; + +static int dpa_get_channel(struct device *dev, + struct device_node *dpa_node) +{ + struct device_node *dev_node; + const uint32_t *channel_id; + int lenp; + + dev_node = of_parse_phandle(dpa_node, "fsl,qman-channel", 0); + if (dev_node == NULL) { + dpaa_eth_err(dev, "Could not find fsl,qman-channel property\n"); + return -EFAULT; + } + + channel_id = of_get_property(dev_node, "fsl,qman-channel-id", &lenp); + if ((channel_id == NULL) || (lenp < sizeof(*channel_id))) { + dpaa_eth_err(dev, "Could not get fsl,qman-channel-id in %s\n", + dev_node->full_name); + of_node_put(dev_node); + return -EINVAL; + } + of_node_put(dev_node); + return *channel_id; +} + +struct fqid_cell { + uint32_t start; + uint32_t count; +}; + +static const struct fqid_cell default_fqids[][3] = { + [RX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_RX_QUEUES} }, + [TX] = { {0, 1}, {0, 1}, {0, DPAA_ETH_TX_QUEUES} } +}; + +static int +dpa_fq_probe(struct platform_device *_of_dev, struct list_head *list, + struct dpa_fq **defq, struct dpa_fq **errq, + struct dpa_fq **fqs, int ptype) +{ + struct device *dev = &_of_dev->dev; + struct device_node *np = dev->of_node; + const struct fqid_cell *fqids; + int i, j, lenp; + int num_fqids; + struct dpa_fq *dpa_fq; + int err = 0; + + fqids = of_get_property(np, fsl_qman_frame_queues[ptype], &lenp); + if (fqids == NULL) { + fqids = default_fqids[ptype]; + num_fqids = 3; + } else + num_fqids = lenp / sizeof(*fqids); + + for (i = 0; i < num_fqids; i++) { + dpa_fq = devm_kzalloc(dev, sizeof(*dpa_fq) * fqids[i].count, + GFP_KERNEL); + if (dpa_fq == NULL) { + dpaa_eth_err(dev, "devm_kzalloc() failed\n"); + return -ENOMEM; + } + + /* The first queue is the Error queue */ + if (i == 0 && errq) { + *errq = dpa_fq; + + if (fqids[i].count != 1) { + dpaa_eth_err(dev, "Too many error queues!\n"); + err = -EINVAL; + goto invalid_error_queues; + } + } + + /* The second queue is the the Default queue */ + if (i == 1 && defq) { + *defq = dpa_fq; + + if (fqids[i].count != 1) { + dpaa_eth_err(dev, "Too many default queues!\n"); + err = -EINVAL; + goto invalid_default_queues; + } + } + + /* + * All subsequent queues are gathered together. + * The first 8 will be used by the private linux interface + * if these are TX queues + */ + if (i == 2 || (!errq && i == 0 && fqs)) + *fqs = dpa_fq; + +#warning We lost the 8-queue enforcement + +#define DPA_NUM_WQS 8 + for (j = 0; j < fqids[i].count; j++) { + dpa_fq[j].fqid = fqids[i].start ? + fqids[i].start + j : 0; + dpa_fq[j].wq = dpa_fq[j].fqid ? + dpa_fq[j].fqid % DPA_NUM_WQS : DPA_NUM_WQS - 1; + list_add_tail(&dpa_fq[j].list, list); + } + } + +invalid_default_queues: +invalid_error_queues: + return err; +} + +static void dpa_setup_ingress(struct dpa_priv_s *priv, struct dpa_fq *fq, + const struct qman_fq *template) +{ + fq->fq_base = *template; + fq->net_dev = priv->net_dev; + + fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; + fq->channel = priv->channel; +} + +static void dpa_setup_egress(struct dpa_priv_s *priv, + struct list_head *head, struct dpa_fq *fq, + struct fm_port *port) +{ + struct list_head *ptr = &fq->list; + int i = 0; + + while (true) { + struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list); + if (priv->shared) + iter->fq_base = shared_egress_fq; + else + iter->fq_base = private_egress_fq; + + iter->net_dev = priv->net_dev; + priv->egress_fqs[i++] = &iter->fq_base; + + if (port) { + iter->flags = QMAN_FQ_FLAG_TO_DCPORTAL; + iter->channel = fm_get_tx_port_channel(port); + } else + iter->flags = QMAN_FQ_FLAG_NO_MODIFY; + + if (list_is_last(ptr, head)) + break; + + ptr = ptr->next; + } +} + +static void dpa_setup_ingress_queues(struct dpa_priv_s *priv, + struct list_head *head, struct dpa_fq *fq) +{ + struct list_head *ptr = &fq->list; + u32 fqid; + int portals[NR_CPUS]; + int num_portals; + int i; + struct device_node *qm_node; + struct device_node *cpu_node; + const uint32_t *uint32_prop; + const phandle *ph; + int lenp; + int cpu; + bool found; + const cpumask_t *affine_cpus = qman_affine_cpus(); + + /* + * Make a list of the available portals. + * We're only interested in those portals which have an affine core + * and moreover that core is included in the cpumask provided by QMan + */ + num_portals = 0; + for_each_compatible_node(qm_node, NULL, "fsl,qman-portal") { + /* Check if portal has an affine core */ + ph = of_get_property(qm_node, "cpu-handle", &lenp); + if (!ph || (lenp != sizeof(phandle))) + continue; + + /* Get the hardware id of the affine core */ + cpu_node = of_find_node_by_phandle(*ph); + if (!cpu_node) + continue; + uint32_prop = of_get_property(cpu_node, "reg", &lenp); + if (!uint32_prop || (lenp != sizeof(uint32_t))) { + dpaa_eth_err(fq->net_dev->dev.parent, + "failed to get property %s for node %s", + "reg", cpu_node->full_name); + continue; + } + + /* If it's not included in the cpumask we got from QMan, + * skip portal */ + found = false; + for_each_cpu(cpu, affine_cpus) { + if (*uint32_prop == get_hard_smp_processor_id(cpu) + && !of_get_property(qm_node, + "fsl,usdpaa-portal", NULL)) { + found = true; + break; + } + } + if (!found) + continue; + + /* This portal is good, store its sw channel */ + uint32_prop = of_get_property(qm_node, + "fsl,qman-channel-id", &lenp); + if (!uint32_prop || (lenp != sizeof(uint32_t))) { + dpaa_eth_err(fq->net_dev->dev.parent, + "Failed to get property %s for node %s", + "fsl,qman-channel-id", qm_node->full_name); + continue; + } + portals[num_portals++] = *uint32_prop; + } + if (num_portals == 0) { + dpaa_eth_err(fq->net_dev->dev.parent, + "No adequate Qman portals found"); + return; + } + + i = 0; + fqid = 0; + if (priv->mac_dev) + fqid = (priv->mac_dev->res->start & 0x1fffff) >> 6; + + while (true) { + struct dpa_fq *iter = list_entry(ptr, struct dpa_fq, list); + + if (priv->shared) + dpa_setup_ingress(priv, iter, &rx_shared_fq); + else + dpa_setup_ingress(priv, iter, &rx_private_defq); + + if (!iter->fqid) + iter->fqid = fqid++; + + /* Assign the queues to a channel in a round-robin fashion */ + iter->channel = portals[i]; + i = (i + 1) % num_portals; + + if (list_is_last(ptr, head)) + break; + + ptr = ptr->next; + } +} + +static void +dpaa_eth_init_tx_port(struct fm_port *port, struct dpa_fq *errq, + struct dpa_fq *defq, bool has_timer) +{ + struct fm_port_non_rx_params tx_port_param; + + dpaa_eth_init_port(tx, port, tx_port_param, errq->fqid, defq->fqid, + has_timer); +} + +static void +dpaa_eth_init_rx_port(struct fm_port *port, struct dpa_bp *bp, size_t count, + struct dpa_fq *errq, struct dpa_fq *defq, bool has_timer) +{ + struct fm_port_rx_params rx_port_param; + int i; + + count = min(ARRAY_SIZE(rx_port_param.pool_param), count); + rx_port_param.num_pools = count; + for (i = 0; i < count; i++) { + if (i >= rx_port_param.num_pools) + break; + + rx_port_param.pool_param[i].id = bp[i].bpid; + rx_port_param.pool_param[i].size = bp[i].size; + } + + dpaa_eth_init_port(rx, port, rx_port_param, errq->fqid, defq->fqid, + has_timer); +} + +static void dpa_rx_fq_init(struct dpa_priv_s *priv, struct list_head *head, + struct dpa_fq *defq, struct dpa_fq *errq, + struct dpa_fq *fqs) +{ + if (fqs) + dpa_setup_ingress_queues(priv, head, fqs); + + /* Only real devices need default/error queues set up */ + if (!priv->mac_dev) + return; + + if (defq->fqid == 0 && netif_msg_probe(priv)) + cpu_pr_info("Using dynamic RX QM frame queues\n"); + + if (priv->shared) { + dpa_setup_ingress(priv, defq, &rx_shared_fq); + dpa_setup_ingress(priv, errq, &rx_shared_fq); + } else { + dpa_setup_ingress(priv, defq, &rx_private_defq); + dpa_setup_ingress(priv, errq, &rx_private_errq); + } +} + +static void dpa_tx_fq_init(struct dpa_priv_s *priv, struct list_head *head, + struct dpa_fq *defq, struct dpa_fq *errq, + struct dpa_fq *fqs, struct fm_port *port) +{ + if (fqs) + dpa_setup_egress(priv, head, fqs, port); + + /* Only real devices need default/error queues set up */ + if (!priv->mac_dev) + return; + + if (defq->fqid == 0 && netif_msg_probe(priv)) + cpu_pr_info("Using dynamic TX QM frame queues\n"); + + /* The shared driver doesn't use tx confirmation */ + if (priv->shared) { + dpa_setup_ingress(priv, defq, &dummyq); + dpa_setup_ingress(priv, errq, &dummyq); + } else { + dpa_setup_ingress(priv, defq, &tx_private_defq); + dpa_setup_ingress(priv, errq, &tx_private_errq); + } +} + +static int dpa_netdev_init(struct device_node *dpa_node, + struct net_device *net_dev) +{ + int err; + const uint8_t *mac_addr; + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + + net_dev->features |= DPA_NETIF_FEATURES; + net_dev->vlan_features |= DPA_NETIF_FEATURES; + + if (!priv->mac_dev) { + /* Get the MAC address */ + mac_addr = of_get_mac_address(dpa_node); + if (mac_addr == NULL) { + if (netif_msg_probe(priv)) + dpaa_eth_err(dev, "No MAC address found!\n"); + return -EINVAL; + } + } else { + net_dev->mem_start = priv->mac_dev->res->start; + net_dev->mem_end = priv->mac_dev->res->end; + + mac_addr = priv->mac_dev->addr; + net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + net_dev->vlan_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + } + + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + + SET_ETHTOOL_OPS(net_dev, &dpa_ethtool_ops); + net_dev->needed_headroom = DPA_BP_HEAD; + net_dev->watchdog_timeo = tx_timeout * HZ / 1000; + + err = register_netdev(net_dev); + if (err < 0) { + dpaa_eth_err(dev, "register_netdev() = %d\n", err); + return err; + } + +#ifdef CONFIG_DEBUG_FS + priv->debugfs_file = debugfs_create_file(net_dev->name, S_IRUGO, + dpa_debugfs_root, net_dev, + &dpa_debugfs_fops); + if (unlikely(priv->debugfs_file == NULL)) { + cpu_netdev_err(net_dev, "debugfs_create_file(%s/%s/%s) = %d\n", + powerpc_debugfs_root->d_iname, + dpa_debugfs_root->d_iname, + net_dev->name, err); + + unregister_netdev(net_dev); + return -ENOMEM; + } +#endif + + return 0; +} + +static int dpa_shared_netdev_init(struct device_node *dpa_node, + struct net_device *net_dev) +{ + net_dev->netdev_ops = &dpa_shared_ops; + + return dpa_netdev_init(dpa_node, net_dev); +} + +static int dpa_private_netdev_init(struct device_node *dpa_node, + struct net_device *net_dev) +{ + int i; + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct dpa_percpu_priv_s *percpu_priv; + + for_each_online_cpu(i) { + percpu_priv = per_cpu_ptr(priv->percpu_priv, i); + percpu_priv->net_dev = net_dev; + + percpu_priv->dpa_bp = priv->dpa_bp; + percpu_priv->dpa_bp_count = + per_cpu_ptr(priv->dpa_bp->percpu_count, i); + netif_napi_add(net_dev, &percpu_priv->napi, dpaa_eth_poll, + DPA_NAPI_WEIGHT); + } + + net_dev->netdev_ops = &dpa_private_ops; + + return dpa_netdev_init(dpa_node, net_dev); +} + +static int dpa_alloc_pcd_fqids(struct device *dev, uint32_t num, + uint8_t alignment, uint32_t *base_fqid) +{ + dpaa_eth_crit(dev, "callback not implemented!\n"); + BUG(); + + return 0; +} + +static int dpa_free_pcd_fqids(struct device *dev, uint32_t base_fqid) +{ + + dpaa_eth_crit(dev, "callback not implemented!\n"); + BUG(); + + return 0; +} + +static ssize_t dpaa_eth_show_addr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); + struct mac_device *mac_dev = priv->mac_dev; + + if (mac_dev) + return sprintf(buf, "%llx", + (unsigned long long)mac_dev->res->start); + else + return sprintf(buf, "none"); +} + +static DEVICE_ATTR(device_addr, S_IRUGO, dpaa_eth_show_addr, NULL); + +static ssize_t dpaa_eth_show_fqids(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dpa_priv_s *priv = netdev_priv(to_net_dev(dev)); + ssize_t bytes = 0; + int i = 0; + char *str; + struct dpa_fq *fq; + struct dpa_fq *tmp; + struct dpa_fq *prev = NULL; + u32 first_fqid = 0; + u32 last_fqid = 0; + char *prevstr = NULL; + + list_for_each_entry_safe(fq, tmp, &priv->dpa_fq_list, list) { + void *dqrr = fq->fq_base.cb.dqrr; + if (dqrr == ingress_rx_error_dqrr) + str = "error"; + else if (i == 1 && dqrr == ingress_rx_default_dqrr) + str = "default"; + else if (dqrr == ingress_rx_error_dqrr || + dqrr == ingress_rx_default_dqrr) + str = "RX"; + else if (dqrr == ingress_tx_default_dqrr) + str = "TX confirmation"; + else if (dqrr == ingress_tx_error_dqrr) + str = "TX error"; + else if (dqrr == NULL) + str = "TX"; + else + str = "unknown"; + + if (prev && (abs(fq->fqid - prev->fqid) != 1 || + str != prevstr)) { + if (last_fqid == first_fqid) + bytes += sprintf(buf + bytes, + "%s: %d\n", prevstr, prev->fqid); + else + bytes += sprintf(buf + bytes, + "%s: %d - %d\n", prevstr, + first_fqid, last_fqid); + } + + if (prev && abs(fq->fqid - prev->fqid) == 1 && str == prevstr) + last_fqid = fq->fqid; + else + first_fqid = last_fqid = fq->fqid; + + prev = fq; + prevstr = str; + i++; + } + + if (last_fqid == first_fqid) + bytes += sprintf(buf + bytes, "%s: %d\n", prevstr, prev->fqid); + else + bytes += sprintf(buf + bytes, "%s: %d - %d\n", prevstr, + first_fqid, last_fqid); + + return bytes; +} + +static DEVICE_ATTR(fqids, S_IRUGO, dpaa_eth_show_fqids, NULL); + + +static void dpaa_eth_sysfs_init(struct device *dev) +{ + if (device_create_file(dev, &dev_attr_device_addr)) + dev_err(dev, "Error creating dpaa_eth addr file\n"); + if (device_create_file(dev, &dev_attr_fqids)) + dev_err(dev, "Error creating dpaa_eth fqids file\n"); +} +static const struct of_device_id dpa_match[] ; +static int +dpaa_eth_probe(struct platform_device *_of_dev) +{ + int err, i; + struct device *dev; + struct device_node *dpa_node; + struct dpa_bp *dpa_bp; + struct dpa_fq *dpa_fq, *tmp; + struct list_head rxfqlist; + struct list_head txfqlist; + size_t count; + struct net_device *net_dev = NULL; + struct dpa_priv_s *priv = NULL; + struct dpa_fq *rxdefault = NULL; + struct dpa_fq *txdefault = NULL; + struct dpa_fq *rxerror = NULL; + struct dpa_fq *txerror = NULL; + struct dpa_fq *rxextra = NULL; + struct dpa_fq *txfqs = NULL; + struct fm_port *rxport = NULL; + struct fm_port *txport = NULL; + bool has_timer = FALSE; + struct mac_device *mac_dev; + int proxy_enet; + const struct of_device_id *match; + + dev = &_of_dev->dev; + + dpa_node = dev->of_node; + + match = of_match_device(dpa_match, dev); + if (!match) + return -EINVAL; + + if (!of_device_is_available(dpa_node)) + return -ENODEV; + + /* + * If it's not an fsl,dpa-ethernet node, we just serve as a proxy + * initializer driver, and don't do any linux device setup + */ + proxy_enet = strcmp(match->compatible, "fsl,dpa-ethernet"); + + /* + * Allocate this early, so we can store relevant information in + * the private area + */ + if (!proxy_enet) { + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES); + if (!net_dev) { + dpaa_eth_err(dev, "alloc_etherdev_mq() failed\n"); + return -ENOMEM; + } + + /* Do this here, so we can be verbose early */ + SET_NETDEV_DEV(net_dev, dev); + dev_set_drvdata(dev, net_dev); + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + + priv->msg_enable = netif_msg_init(debug, -1); + } + + /* Get the buffer pools assigned to this interface */ + dpa_bp = dpa_bp_probe(_of_dev, &count); + if (IS_ERR(dpa_bp)) { + err = PTR_ERR(dpa_bp); + goto bp_probe_failed; + } + + mac_dev = dpa_mac_probe(_of_dev); + if (IS_ERR(mac_dev)) { + err = PTR_ERR(mac_dev); + goto mac_probe_failed; + } else if (mac_dev) { + rxport = mac_dev->port_dev[RX]; + txport = mac_dev->port_dev[TX]; + } + + INIT_LIST_HEAD(&rxfqlist); + INIT_LIST_HEAD(&txfqlist); + + if (rxport) + err = dpa_fq_probe(_of_dev, &rxfqlist, &rxdefault, &rxerror, + &rxextra, RX); + else + err = dpa_fq_probe(_of_dev, &rxfqlist, NULL, NULL, + &rxextra, RX); + + if (err < 0) + goto rx_fq_probe_failed; + + if (txport) + err = dpa_fq_probe(_of_dev, &txfqlist, &txdefault, &txerror, + &txfqs, TX); + else + err = dpa_fq_probe(_of_dev, &txfqlist, NULL, NULL, &txfqs, TX); + + if (err < 0) + goto tx_fq_probe_failed; + + /* + * Now we have all of the configuration information. + * We support a number of configurations: + * 1) Private interface - An optimized linux ethernet driver with + * a real network connection. + * 2) Shared interface - A device intended for virtual connections + * or for a real interface that is shared between partitions + * 3) Proxy initializer - Just configures the MAC on behalf of + * another partition + */ + + /* bp init */ + if (net_dev) { + err = dpa_bp_create(net_dev, dpa_bp, count); + + if (err < 0) + goto bp_create_failed; + + priv->mac_dev = mac_dev; + + priv->channel = dpa_get_channel(dev, dpa_node); + + if (priv->channel < 0) { + err = priv->channel; + goto get_channel_failed; + } + + dpa_rx_fq_init(priv, &rxfqlist, rxdefault, rxerror, rxextra); + dpa_tx_fq_init(priv, &txfqlist, txdefault, txerror, txfqs, + txport); + + /* Add the FQs to the interface, and make them active */ + INIT_LIST_HEAD(&priv->dpa_fq_list); + + list_for_each_entry_safe(dpa_fq, tmp, &rxfqlist, list) { + err = _dpa_fq_alloc(&priv->dpa_fq_list, dpa_fq); + if (err < 0) + goto fq_alloc_failed; + } + + list_for_each_entry_safe(dpa_fq, tmp, &txfqlist, list) { + err = _dpa_fq_alloc(&priv->dpa_fq_list, dpa_fq); + if (err < 0) + goto fq_alloc_failed; + } + + if (priv->tsu && priv->tsu->valid) + has_timer = TRUE; + } + + /* All real interfaces need their ports initialized */ + if (mac_dev) { + struct fm_port_pcd_param rx_port_pcd_param; + + dpaa_eth_init_rx_port(rxport, dpa_bp, count, rxerror, + rxdefault, has_timer); + dpaa_eth_init_tx_port(txport, txerror, txdefault, has_timer); + + rx_port_pcd_param.cba = dpa_alloc_pcd_fqids; + rx_port_pcd_param.cbf = dpa_free_pcd_fqids; + rx_port_pcd_param.dev = dev; + fm_port_pcd_bind(rxport, &rx_port_pcd_param); + } + + /* + * Proxy interfaces need to be started, and the allocated + * memory freed + */ + if (!net_dev) { + devm_kfree(&_of_dev->dev, dpa_bp); + devm_kfree(&_of_dev->dev, rxdefault); + devm_kfree(&_of_dev->dev, rxerror); + devm_kfree(&_of_dev->dev, txdefault); + devm_kfree(&_of_dev->dev, txerror); + + if (mac_dev) + for_each_port_device(i, mac_dev->port_dev) + fm_port_enable(mac_dev->port_dev[i]); + + return 0; + } + + /* Now we need to initialize either a private or shared interface */ + priv->percpu_priv = __alloc_percpu(sizeof(*priv->percpu_priv), + __alignof__(*priv->percpu_priv)); + if (priv->percpu_priv == NULL) { + dpaa_eth_err(dev, "__alloc_percpu() failed\n"); + err = -ENOMEM; + goto alloc_percpu_failed; + } + + if (priv->shared) + err = dpa_shared_netdev_init(dpa_node, net_dev); + else + err = dpa_private_netdev_init(dpa_node, net_dev); + + if (err < 0) + goto netdev_init_failed; + + dpaa_eth_sysfs_init(&net_dev->dev); + +#ifdef CONFIG_DPAA_ETH_UNIT_TESTS + /* The unit test is designed to test private interfaces */ + if (!priv->shared && !tx_unit_test_ran) { + err = dpa_tx_unit_test(net_dev); + + BUG_ON(err); + } +#endif + + return 0; + +netdev_init_failed: + if (net_dev) + free_percpu(priv->percpu_priv); +alloc_percpu_failed: +fq_alloc_failed: + if (net_dev) + dpa_fq_free(dev, &priv->dpa_fq_list); +get_channel_failed: + if (net_dev) + dpa_bp_free(priv, priv->dpa_bp); +bp_create_failed: +tx_fq_probe_failed: +rx_fq_probe_failed: +mac_probe_failed: +bp_probe_failed: + dev_set_drvdata(dev, NULL); + if (net_dev) + free_netdev(net_dev); + + return err; +} + +static const struct of_device_id dpa_match[] = { + { + .compatible = "fsl,dpa-ethernet" + }, + { + .compatible = "fsl,dpa-ethernet-init" + }, + {} +}; +MODULE_DEVICE_TABLE(of, dpa_match); + +static int __cold dpa_remove(struct platform_device *of_dev) +{ + int err; + struct device *dev; + struct net_device *net_dev; + struct dpa_priv_s *priv; + + dev = &of_dev->dev; + net_dev = dev_get_drvdata(dev); + priv = netdev_priv(net_dev); + + dev_set_drvdata(dev, NULL); + unregister_netdev(net_dev); + + err = dpa_fq_free(dev, &priv->dpa_fq_list); + + free_percpu(priv->percpu_priv); + + dpa_bp_free(priv, priv->dpa_bp); + +#ifdef CONFIG_DEBUG_FS + debugfs_remove(priv->debugfs_file); +#endif + +#ifdef CONFIG_FSL_DPA_1588 + if (priv->tsu && priv->tsu->valid) + dpa_ptp_cleanup(priv); +#endif + + free_netdev(net_dev); + + return err; +} + +static struct platform_driver dpa_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = dpa_match, + .owner = THIS_MODULE, + }, + .probe = dpaa_eth_probe, + .remove = dpa_remove, +}; + +static int __init __cold dpa_load(void) +{ + int _errno; + + cpu_pr_info(KBUILD_MODNAME ": " DPA_DESCRIPTION " (" VERSION ")\n"); + +#ifdef CONFIG_DEBUG_FS + dpa_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, + powerpc_debugfs_root); + if (unlikely(dpa_debugfs_root == NULL)) { + _errno = -ENOMEM; + cpu_pr_err(KBUILD_MODNAME ": %s:%hu:%s(): " + "debugfs_create_dir(%s/"KBUILD_MODNAME") = %d\n", + __file__, __LINE__, __func__, + powerpc_debugfs_root->d_iname, _errno); + goto _return; + } +#endif + + _errno = platform_driver_register(&dpa_driver); + if (unlikely(_errno < 0)) { + cpu_pr_err(KBUILD_MODNAME + ": %s:%hu:%s(): platform_driver_register() = %d\n", + __file__, __LINE__, __func__, _errno); + goto _return_debugfs_remove; + } + + goto _return; + +_return_debugfs_remove: +#ifdef CONFIG_DEBUG_FS + debugfs_remove(dpa_debugfs_root); +#endif +_return: + cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__); + + return _errno; +} +module_init(dpa_load); + +static void __exit __cold dpa_unload(void) +{ + cpu_pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", __file__, __func__); + + platform_driver_unregister(&dpa_driver); + +#ifdef CONFIG_DEBUG_FS + debugfs_remove(dpa_debugfs_root); +#endif + + cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__); +} +module_exit(dpa_unload); + +static int __init fsl_fman_phy_set_max_frm(char *str) +{ + int ret = 0; + + ret = get_option(&str, &fsl_fman_phy_maxfrm); + if (ret != 1) { + /* This will only work if CONFIG_EARLY_PRINTK is compiled in, + * and something like "earlyprintk=serial,uart0,115200" is + * specified in the bootargs */ + printk(KERN_WARNING "No suitable %s= prop in bootargs; " + "will use the default DPA_MAX_FRM_SIZE (%d) " + "from Kconfig.\n", + FSL_FMAN_PHY_MAXFRM_BOOTARG, CONFIG_DPA_MAX_FRM_SIZE); + + fsl_fman_phy_maxfrm = CONFIG_DPA_MAX_FRM_SIZE; + return 1; + } + + /* Don't allow invalid bootargs; fallback to the Kconfig value */ + if (fsl_fman_phy_maxfrm < 64 || fsl_fman_phy_maxfrm > 9600) { + printk(KERN_WARNING "Invalid %s=%d in bootargs, valid range is " + "64-9600. Falling back to the DPA_MAX_FRM_SIZE (%d) " + "from Kconfig.\n", + FSL_FMAN_PHY_MAXFRM_BOOTARG, fsl_fman_phy_maxfrm, + CONFIG_DPA_MAX_FRM_SIZE); + + fsl_fman_phy_maxfrm = CONFIG_DPA_MAX_FRM_SIZE; + return 1; + } + + printk(KERN_INFO "Using fsl_fman_phy_maxfrm=%d from bootargs\n", + fsl_fman_phy_maxfrm); + return 0; +} +early_param(FSL_FMAN_PHY_MAXFRM_BOOTARG, fsl_fman_phy_set_max_frm); --- linux-3.13.0.orig/drivers/net/dpa/dpaa_eth.h +++ linux-3.13.0/drivers/net/dpa/dpaa_eth.h @@ -0,0 +1,127 @@ +/* + * Copyright 2008-2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DPA_H +#define __DPA_H + +#include /* struct ethtool_ops */ +#include +#include /* struct list_head */ +#include /* struct work_struct */ +#include +#include +#ifdef CONFIG_DEBUG_FS +#include /* struct dentry */ +#endif + +#include /* struct qman_fq */ + +#include "dpaa_eth-common.h" + +#include "mac.h" /* struct mac_device */ + + +/* number of Tx queues to FMan */ +#define DPAA_ETH_TX_QUEUES 8 +#define DPAA_ETH_RX_QUEUES 128 + +struct pcd_range { + uint32_t base; + uint32_t count; +}; + +struct dpa_bp { + struct bman_pool *pool; + uint8_t bpid; + struct device *dev; + size_t count; + size_t size; + bool seed_pool; + dma_addr_t paddr; + void *vaddr; + int kernel_pool; + int *percpu_count; + int *needs_refill; + atomic_t refs; +}; + +struct dpa_percpu_priv_s { + struct net_device *net_dev; + int *dpa_bp_count; + struct dpa_bp *dpa_bp; + struct napi_struct napi; + u32 start_tx; + u32 in_interrupt; + u32 ingress_calls; + u32 tx_returned; + u32 tx_confirm; + struct net_device_stats stats; +}; + +struct dpa_priv_s { + struct dpa_bp *dpa_bp; + size_t bp_count; + int shared; + struct net_device *net_dev; + + uint16_t channel; /* "fsl,qman-channel-id" */ + struct list_head dpa_fq_list; + struct qman_fq *egress_fqs[DPAA_ETH_TX_QUEUES]; + + struct mac_device *mac_dev; + + struct dpa_percpu_priv_s *percpu_priv; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_file; +#endif + + uint32_t msg_enable; /* net_device message level */ + struct dpa_ptp_tsu *tsu; +}; + +extern const struct ethtool_ops dpa_ethtool_ops; +extern int fsl_fman_phy_maxfrm; + +static inline int dpaa_eth_napi_schedule(struct dpa_percpu_priv_s *percpu_priv) +{ + if (unlikely(in_irq())) { + /* Disable QMan IRQ and invoke NAPI */ + int ret = qman_irqsource_remove(QM_PIRQ_DQRI); + if (likely(!ret)) { + napi_schedule(&percpu_priv->napi); + return 1; + } + } + return 0; +} + +#endif /* __DPA_H */ --- linux-3.13.0.orig/drivers/net/dpa/mac-api.c +++ linux-3.13.0/drivers/net/dpa/mac-api.c @@ -0,0 +1,674 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include "dpaa_eth-common.h" +#include "dpaa_eth.h" +#include "mac.h" + +#include "error_ext.h" /* GET_ERROR_TYPE, E_OK */ +#include "fm_mac_ext.h" +#include "fm_rtc_ext.h" + +#define MAC_DESCRIPTION "FSL FMan MAC API based driver" + +MODULE_LICENSE("Dual BSD/GPL"); + +MODULE_AUTHOR("Emil Medve "); + +MODULE_DESCRIPTION(MAC_DESCRIPTION); + +struct mac_priv_s { + t_Handle mac; +}; + +const char *mac_driver_description __initconst = MAC_DESCRIPTION; +const size_t mac_sizeof_priv[] = { + [DTSEC] = sizeof(struct mac_priv_s), + [XGMAC] = sizeof(struct mac_priv_s) +}; + +static const e_EnetMode _100[] = +{ + [PHY_INTERFACE_MODE_MII] = e_ENET_MODE_MII_100, + [PHY_INTERFACE_MODE_RMII] = e_ENET_MODE_RMII_100 +}; + +static const e_EnetMode _1000[] = +{ + [PHY_INTERFACE_MODE_GMII] = e_ENET_MODE_GMII_1000, + [PHY_INTERFACE_MODE_SGMII] = e_ENET_MODE_SGMII_1000, + [PHY_INTERFACE_MODE_TBI] = e_ENET_MODE_TBI_1000, + [PHY_INTERFACE_MODE_RGMII] = e_ENET_MODE_RGMII_1000, + [PHY_INTERFACE_MODE_RGMII_ID] = e_ENET_MODE_RGMII_1000, + [PHY_INTERFACE_MODE_RGMII_RXID] = e_ENET_MODE_RGMII_1000, + [PHY_INTERFACE_MODE_RGMII_TXID] = e_ENET_MODE_RGMII_1000, + [PHY_INTERFACE_MODE_RTBI] = e_ENET_MODE_RTBI_1000 +}; + +static e_EnetMode __cold __attribute__((nonnull)) +macdev2enetinterface(const struct mac_device *mac_dev) +{ + switch (mac_dev->max_speed) { + case SPEED_100: + return _100[mac_dev->phy_if]; + case SPEED_1000: + return _1000[mac_dev->phy_if]; + case SPEED_10000: + return e_ENET_MODE_XGMII_10000; + default: + return e_ENET_MODE_MII_100; + } +} + +static void mac_exception(t_Handle _mac_dev, e_FmMacExceptions exception) +{ + struct mac_device *mac_dev; + + mac_dev = (struct mac_device *)_mac_dev; + + if (e_FM_MAC_EX_10G_RX_FIFO_OVFL == exception) { + /* don't flag RX FIFO after the first */ + FM_MAC_SetException( + ((struct mac_priv_s *)macdev_priv(_mac_dev))->mac, + e_FM_MAC_EX_10G_RX_FIFO_OVFL, false); + printk(KERN_ERR "10G MAC got RX FIFO Error = %x\n", exception); + } + + cpu_dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", __file__, __func__, + exception); +} + +static int __cold init(struct mac_device *mac_dev) +{ + int _errno; + t_Error err; + struct mac_priv_s *priv; + t_FmMacParams param; + uint32_t version; + + priv = macdev_priv(mac_dev); + + param.baseAddr = (typeof(param.baseAddr))(uintptr_t)devm_ioremap( + mac_dev->dev, mac_dev->res->start, 0x2000); + param.enetMode = macdev2enetinterface(mac_dev); + memcpy(¶m.addr, mac_dev->addr, min(sizeof(param.addr), + sizeof(mac_dev->addr))); + param.macId = mac_dev->cell_index; + param.h_Fm = (t_Handle)mac_dev->fm; + param.mdioIrq = NO_IRQ; + param.f_Exception = mac_exception; + param.f_Event = mac_exception; + param.h_App = mac_dev; + + priv->mac = FM_MAC_Config(¶m); + if (unlikely(priv->mac == NULL)) { + dpaa_eth_err(mac_dev->dev, "FM_MAC_Config() failed\n"); + _errno = -EINVAL; + goto _return; + } + + err = FM_MAC_ConfigMaxFrameLength(priv->mac, fsl_fman_phy_maxfrm); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) { + dpaa_eth_err(mac_dev->dev, + "FM_MAC_ConfigMaxFrameLength() = 0x%08x\n", err); + goto _return_fm_mac_free; + } + + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) { + /* 10G always works with pad and CRC */ + err = FM_MAC_ConfigPadAndCrc(priv->mac, true); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) { + dpaa_eth_err(mac_dev->dev, + "FM_MAC_ConfigPadAndCrc() = 0x%08x\n", err); + goto _return_fm_mac_free; + } + + err = FM_MAC_ConfigHalfDuplex(priv->mac, mac_dev->half_duplex); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) { + dpaa_eth_err(mac_dev->dev, + "FM_MAC_ConfigHalfDuplex() = 0x%08x\n", err); + goto _return_fm_mac_free; + } + } + else { + err = FM_MAC_ConfigResetOnInit(priv->mac, true); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) { + dpaa_eth_err(mac_dev->dev, + "FM_MAC_ConfigResetOnInit() = 0x%08x\n", err); + goto _return_fm_mac_free; + } + } + + err = FM_MAC_Init(priv->mac); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) { + dpaa_eth_err(mac_dev->dev, "FM_MAC_Init() = 0x%08x\n", err); + goto _return_fm_mac_free; + } + +#ifndef CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN + /* For 1G MAC, disable by default the MIB counters overflow interrupt */ + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) { + err = FM_MAC_SetException(priv->mac, + e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, FALSE); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) { + dpaa_eth_err(mac_dev->dev, + "FM_MAC_SetException() = 0x%08x\n", err); + goto _return_fm_mac_free; + } + } +#endif /* !CONFIG_FMAN_MIB_CNT_OVF_IRQ_EN */ + + /* For 10G MAC, disable Tx ECC exception */ + if (macdev2enetinterface(mac_dev) == e_ENET_MODE_XGMII_10000) { + err = FM_MAC_SetException(priv->mac, + e_FM_MAC_EX_10G_1TX_ECC_ER, FALSE); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) { + dpaa_eth_err(mac_dev->dev, + "FM_MAC_SetException() = 0x%08x\n", err); + goto _return_fm_mac_free; + } + } + + err = FM_MAC_GetVesrion(priv->mac, &version); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) { + dpaa_eth_err(mac_dev->dev, "FM_MAC_GetVesrion() = 0x%08x\n", + err); + goto _return_fm_mac_free; + } + cpu_dev_info(mac_dev->dev, "FMan %s version: 0x%08x\n", + ((macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) ? + "dTSEC" : "XGEC"), version); + + goto _return; + + +_return_fm_mac_free: + err = FM_MAC_Free(priv->mac); + if (unlikely(-GET_ERROR_TYPE(err) < 0)) + dpaa_eth_err(mac_dev->dev, "FM_MAC_Free() = 0x%08x\n", err); +_return: + return _errno; +} + +static int __cold start(struct mac_device *mac_dev) +{ + int _errno; + t_Error err; + struct phy_device *phy_dev = mac_dev->phy_dev; + + err = FM_MAC_Enable(((struct mac_priv_s *)macdev_priv(mac_dev))->mac, + e_COMM_MODE_RX_AND_TX); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_MAC_Enable() = 0x%08x\n", err); + + if (phy_dev) { + if (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000) + phy_start(phy_dev); + else if (phy_dev->drv->read_status) + phy_dev->drv->read_status(phy_dev); + } + + return _errno; +} + +static int __cold stop(struct mac_device *mac_dev) +{ + int _errno; + t_Error err; + + if (mac_dev->phy_dev && + (macdev2enetinterface(mac_dev) != e_ENET_MODE_XGMII_10000)) + phy_stop(mac_dev->phy_dev); + + err = FM_MAC_Disable(((struct mac_priv_s *)macdev_priv(mac_dev))->mac, + e_COMM_MODE_RX_AND_TX); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_MAC_Disable() = 0x%08x\n", err); + + return _errno; +} + +static int __cold change_promisc(struct mac_device *mac_dev) +{ + int _errno; + t_Error err; + + err = FM_MAC_SetPromiscuous( + ((struct mac_priv_s *)macdev_priv(mac_dev))->mac, + mac_dev->promisc = !mac_dev->promisc); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, + "FM_MAC_SetPromiscuous() = 0x%08x\n", err); + + return _errno; +} + +static int __cold set_multi(struct net_device *net_dev) +{ + struct dpa_priv_s *priv; + struct mac_device *mac_dev; + struct mac_priv_s *mac_priv; + struct mac_address *old_addr, *tmp; + struct netdev_hw_addr *ha; + int _errno; + t_Error err; + + priv = netdev_priv(net_dev); + mac_dev = priv->mac_dev; + mac_priv = macdev_priv(mac_dev); + + /* Clear previous address list */ + list_for_each_entry_safe(old_addr, tmp, &mac_dev->mc_addr_list, list) { + err = FM_MAC_RemoveHashMacAddr(mac_priv->mac, + (t_EnetAddr *)old_addr->addr); + _errno = -GET_ERROR_TYPE(err); + if (_errno < 0) { + dpaa_eth_err(mac_dev->dev, + "FM_MAC_RemoveHashMacAddr() = 0x%08x\n", err); + return _errno; + } + list_del(&old_addr->list); + kfree(old_addr); + } + + /* Add all the addresses from the new list */ + netdev_for_each_mc_addr(ha, net_dev) { + err = FM_MAC_AddHashMacAddr(mac_priv->mac, + (t_EnetAddr *)ha->addr); + _errno = -GET_ERROR_TYPE(err); + if (_errno < 0) { + dpaa_eth_err(mac_dev->dev, + "FM_MAC_AddHashMacAddr() = 0x%08x\n", err); + return _errno; + } + tmp = kmalloc(sizeof(struct mac_address), GFP_ATOMIC); + if (!tmp) { + dpaa_eth_err(mac_dev->dev, "Out of memory\n"); + return -ENOMEM; + } + memcpy(tmp->addr, ha->addr, ETH_ALEN); + list_add(&tmp->list, &mac_dev->mc_addr_list); + } + return 0; +} + +static int __cold change_addr(struct mac_device *mac_dev, uint8_t *addr) +{ + int _errno; + t_Error err; + + err = FM_MAC_ModifyMacAddr( + ((struct mac_priv_s *)macdev_priv(mac_dev))->mac, + (t_EnetAddr *)addr); + _errno = -GET_ERROR_TYPE(err); + if (_errno < 0) + dpaa_eth_err(mac_dev->dev, + "FM_MAC_ModifyMacAddr() = 0x%08x\n", err); + + return _errno; +} + +static void adjust_link(struct net_device *net_dev) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + struct phy_device *phy_dev = mac_dev->phy_dev; + int _errno; + t_Error err; + + if (!phy_dev->link) + return; + + err = FM_MAC_AdjustLink( + ((struct mac_priv_s *)macdev_priv(mac_dev))->mac, + phy_dev->speed, phy_dev->duplex); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_MAC_AdjustLink() = 0x%08x\n", + err); + + return; +} + +/* Initializes driver's PHY state, and attaches to the PHY. + * Returns 0 on success. + */ +static int dtsec_init_phy(struct net_device *net_dev) +{ + struct dpa_priv_s *priv; + struct mac_device *mac_dev; + struct phy_device *phy_dev; + + priv = netdev_priv(net_dev); + mac_dev = priv->mac_dev; + + if (!mac_dev->phy_node) + phy_dev = phy_connect(net_dev, mac_dev->fixed_bus_id, + &adjust_link, mac_dev->phy_if); + else + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, + &adjust_link, 0, mac_dev->phy_if); + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { + cpu_netdev_err(net_dev, "Could not connect to PHY %s\n", + mac_dev->phy_node ? + mac_dev->phy_node->full_name : + mac_dev->fixed_bus_id); + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev); + } + + /* Remove any features not supported by the controller */ + phy_dev->supported &= priv->mac_dev->if_support; + phy_dev->advertising = phy_dev->supported; + + priv->mac_dev->phy_dev = phy_dev; + + return 0; +} + +static int xgmac_init_phy(struct net_device *net_dev) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + struct phy_device *phy_dev; + + if (!mac_dev->phy_node) + phy_dev = phy_attach(net_dev, mac_dev->fixed_bus_id, + mac_dev->phy_if); + else + phy_dev = of_phy_attach(net_dev, mac_dev->phy_node, 0, + mac_dev->phy_if); + if (unlikely(phy_dev == NULL) || IS_ERR(phy_dev)) { + cpu_netdev_err(net_dev, "Could not attach to PHY %s\n", + mac_dev->phy_node ? + mac_dev->phy_node->full_name : + mac_dev->fixed_bus_id); + return phy_dev == NULL ? -ENODEV : PTR_ERR(phy_dev); + } + + phy_dev->supported &= priv->mac_dev->if_support; + phy_dev->advertising = phy_dev->supported; + + mac_dev->phy_dev = phy_dev; + + return 0; +} + +static int __cold uninit(struct mac_device *mac_dev) +{ + int _errno, __errno; + t_Error err; + const struct mac_priv_s *priv; + + priv = macdev_priv(mac_dev); + + err = FM_MAC_Disable(priv->mac, e_COMM_MODE_RX_AND_TX); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_MAC_Disable() = 0x%08x\n", err); + + err = FM_MAC_Free(priv->mac); + __errno = -GET_ERROR_TYPE(err); + if (unlikely(__errno < 0)) { + dpaa_eth_err(mac_dev->dev, "FM_MAC_Free() = 0x%08x\n", err); + if (_errno < 0) + _errno = __errno; + } + + return _errno; +} + +static int __cold ptp_enable(struct mac_device *mac_dev) +{ + int _errno; + t_Error err; + const struct mac_priv_s *priv; + + priv = macdev_priv(mac_dev); + + err = FM_MAC_Enable1588TimeStamp(priv->mac); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_MAC_Enable1588TimeStamp()" + "= 0x%08x\n", err); + return _errno; +} + +static int __cold ptp_disable(struct mac_device *mac_dev) +{ + int _errno; + t_Error err; + const struct mac_priv_s *priv; + + priv = macdev_priv(mac_dev); + + err = FM_MAC_Disable1588TimeStamp(priv->mac); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_MAC_Disable1588TimeStamp()" + "= 0x%08x\n", err); + return _errno; +} + +static int __cold fm_rtc_enable(struct net_device *net_dev) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + int _errno; + t_Error err; + + err = FM_RTC_Enable(fm_get_rtc_handle(mac_dev->fm_dev), 0); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_RTC_Enable = 0x%08x\n", err); + + return _errno; +} + +static int __cold fm_rtc_disable(struct net_device *net_dev) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + int _errno; + t_Error err; + + err = FM_RTC_Disable(fm_get_rtc_handle(mac_dev->fm_dev)); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_RTC_Disable = 0x%08x\n", err); + + return _errno; +} + +static int __cold fm_rtc_get_cnt(struct net_device *net_dev, uint64_t *ts) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + int _errno; + t_Error err; + + err = FM_RTC_GetCurrentTime(fm_get_rtc_handle(mac_dev->fm_dev), ts); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_RTC_GetCurrentTime = 0x%08x\n", + err); + + return _errno; +} + +static int __cold fm_rtc_set_cnt(struct net_device *net_dev, uint64_t ts) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + int _errno; + t_Error err; + + err = FM_RTC_SetCurrentTime(fm_get_rtc_handle(mac_dev->fm_dev), ts); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_RTC_SetCurrentTime = 0x%08x\n", + err); + + return _errno; +} + +static int __cold fm_rtc_get_drift(struct net_device *net_dev, uint32_t *drift) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + int _errno; + t_Error err; + + err = FM_RTC_GetFreqCompensation(fm_get_rtc_handle(mac_dev->fm_dev), + drift); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_RTC_GetFreqCompensation =" + "0x%08x\n", err); + + return _errno; +} + +static int __cold fm_rtc_set_drift(struct net_device *net_dev, uint32_t drift) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + int _errno; + t_Error err; + + err = FM_RTC_SetFreqCompensation(fm_get_rtc_handle(mac_dev->fm_dev), + drift); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_RTC_SetFreqCompensation =" + "0x%08x\n", err); + + return _errno; +} + +static int __cold fm_rtc_set_alarm(struct net_device *net_dev, uint32_t id, + uint64_t time) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + t_FmRtcAlarmParams alarm; + int _errno; + t_Error err; + + alarm.alarmId = id; + alarm.alarmTime = time; + alarm.f_AlarmCallback = NULL; + err = FM_RTC_SetAlarm(fm_get_rtc_handle(mac_dev->fm_dev), + &alarm); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_RTC_SetAlarm =" + "0x%08x\n", err); + + return _errno; +} + +static int __cold fm_rtc_set_fiper(struct net_device *net_dev, uint32_t id, + uint64_t fiper) +{ + struct dpa_priv_s *priv = netdev_priv(net_dev); + struct mac_device *mac_dev = priv->mac_dev; + t_FmRtcPeriodicPulseParams pp; + int _errno; + t_Error err; + + pp.periodicPulseId = id; + pp.periodicPulsePeriod = fiper; + pp.f_PeriodicPulseCallback = NULL; + err = FM_RTC_SetPeriodicPulse(fm_get_rtc_handle(mac_dev->fm_dev), &pp); + _errno = -GET_ERROR_TYPE(err); + if (unlikely(_errno < 0)) + dpaa_eth_err(mac_dev->dev, "FM_RTC_SetPeriodicPulse =" + "0x%08x\n", err); + + return _errno; +} + +static void __cold setup_dtsec(struct mac_device *mac_dev) +{ + mac_dev->init_phy = dtsec_init_phy; + mac_dev->init = init; + mac_dev->start = start; + mac_dev->stop = stop; + mac_dev->change_promisc = change_promisc; + mac_dev->change_addr = change_addr; + mac_dev->set_multi = set_multi; + mac_dev->uninit = uninit; + mac_dev->ptp_enable = ptp_enable; + mac_dev->ptp_disable = ptp_disable; + mac_dev->fm_rtc_enable = fm_rtc_enable; + mac_dev->fm_rtc_disable = fm_rtc_disable; + mac_dev->fm_rtc_get_cnt = fm_rtc_get_cnt; + mac_dev->fm_rtc_set_cnt = fm_rtc_set_cnt; + mac_dev->fm_rtc_get_drift = fm_rtc_get_drift; + mac_dev->fm_rtc_set_drift = fm_rtc_set_drift; + mac_dev->fm_rtc_set_alarm = fm_rtc_set_alarm; + mac_dev->fm_rtc_set_fiper = fm_rtc_set_fiper; +} + +static void __cold setup_xgmac(struct mac_device *mac_dev) +{ + mac_dev->init_phy = xgmac_init_phy; + mac_dev->init = init; + mac_dev->start = start; + mac_dev->stop = stop; + mac_dev->change_promisc = change_promisc; + mac_dev->change_addr = change_addr; + mac_dev->set_multi = set_multi; + mac_dev->uninit = uninit; +} + +void (*const mac_setup[])(struct mac_device *mac_dev) = { + [DTSEC] = setup_dtsec, + [XGMAC] = setup_xgmac +}; --- linux-3.13.0.orig/drivers/net/dpa/mac.c +++ linux-3.13.0/drivers/net/dpa/mac.c @@ -0,0 +1,428 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "dpaa_eth-common.h" + +#include "lnxwrp_fm_ext.h" + +#include "mac.h" + +#define DTSEC_SUPPORTED \ + (SUPPORTED_10baseT_Half \ + | SUPPORTED_10baseT_Full \ + | SUPPORTED_100baseT_Half \ + | SUPPORTED_100baseT_Full \ + | SUPPORTED_Autoneg \ + | SUPPORTED_MII) + +static const char phy_str[][11] = +{ + [PHY_INTERFACE_MODE_MII] = "mii", + [PHY_INTERFACE_MODE_GMII] = "gmii", + [PHY_INTERFACE_MODE_SGMII] = "sgmii", + [PHY_INTERFACE_MODE_TBI] = "tbi", + [PHY_INTERFACE_MODE_RMII] = "rmii", + [PHY_INTERFACE_MODE_RGMII] = "rgmii", + [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id", + [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid", + [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", + [PHY_INTERFACE_MODE_RTBI] = "rtbi", + [PHY_INTERFACE_MODE_XGMII] = "xgmii" +}; + +static phy_interface_t __pure __attribute__((nonnull)) str2phy(const char *str) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phy_str); i++) + if (strcmp(str, phy_str[i]) == 0) + return (phy_interface_t)i; + + return PHY_INTERFACE_MODE_MII; +} + +static const uint16_t phy2speed[] = +{ + [PHY_INTERFACE_MODE_MII] = SPEED_100, + [PHY_INTERFACE_MODE_GMII] = SPEED_1000, + [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, + [PHY_INTERFACE_MODE_TBI] = SPEED_1000, + [PHY_INTERFACE_MODE_RMII] = SPEED_100, + [PHY_INTERFACE_MODE_RGMII] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000, + [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, + [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, + [PHY_INTERFACE_MODE_XGMII] = SPEED_10000 +}; + +static struct mac_device * __cold +alloc_macdev(struct device *dev, size_t sizeof_priv, void (*setup)(struct mac_device *mac_dev)) +{ + struct mac_device *mac_dev; + + mac_dev = devm_kzalloc(dev, sizeof(*mac_dev) + sizeof_priv, GFP_KERNEL); + if (unlikely(mac_dev == NULL)) + mac_dev = ERR_PTR(-ENOMEM); + else { + mac_dev->dev = dev; + dev_set_drvdata(dev, mac_dev); + setup(mac_dev); + } + + return mac_dev; +} + +static int __cold free_macdev(struct mac_device *mac_dev) +{ + dev_set_drvdata(mac_dev->dev, NULL); + + return mac_dev->uninit(mac_dev); +} + +static const struct of_device_id mac_match[] = { + [DTSEC] = { + .compatible = "fsl,fman-1g-mac" + }, + [XGMAC] = { + .compatible = "fsl,fman-10g-mac" + }, + {} +}; +MODULE_DEVICE_TABLE(of, mac_match); + +static int __cold mac_probe(struct platform_device *_of_dev) +{ + int _errno, i, lenp; + struct device *dev; + struct device_node *mac_node, *dev_node; + struct mac_device *mac_dev; + struct platform_device *of_dev; + struct resource res; + const uint8_t *mac_addr; + const char *char_prop; + const phandle *phandle_prop; + const uint32_t *uint32_prop; + const struct of_device_id *match; + + dev = &_of_dev->dev; + mac_node = dev->of_node; + + match = of_match_device(mac_match, dev); + if (!match) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(mac_match) - 1 && match != mac_match + i; i++); + BUG_ON(i >= ARRAY_SIZE(mac_match) - 1); + + mac_dev = alloc_macdev(dev, mac_sizeof_priv[i], mac_setup[i]); + if (IS_ERR(mac_dev)) { + _errno = PTR_ERR(mac_dev); + dpaa_eth_err(dev, "alloc_macdev() = %d\n", _errno); + goto _return; + } + + INIT_LIST_HEAD(&mac_dev->mc_addr_list); + + /* Get the FM node */ + dev_node = of_get_parent(mac_node); + if (unlikely(dev_node == NULL)) { + dpaa_eth_err(dev, "of_get_parent(%s) failed\n", + mac_node->full_name); + _errno = -EINVAL; + goto _return_dev_set_drvdata; + } + + of_dev = of_find_device_by_node(dev_node); + if (unlikely(of_dev == NULL)) { + dpaa_eth_err(dev, "of_find_device_by_node(%s) failed\n", + dev_node->full_name); + _errno = -EINVAL; + goto _return_of_node_put; + } + + mac_dev->fm_dev = fm_bind(&of_dev->dev); + if (unlikely(mac_dev->fm_dev == NULL)) { + dpaa_eth_err(dev, "fm_bind(%s) failed\n", dev_node->full_name); + _errno = -ENODEV; + goto _return_of_node_put; + } + + mac_dev->fm = (void *)fm_get_handle(mac_dev->fm_dev); + of_node_put(dev_node); + + /* Get the address of the memory mapped registers */ + _errno = of_address_to_resource(mac_node, 0, &res); + if (unlikely(_errno < 0)) { + dpaa_eth_err(dev, "of_address_to_resource(%s) = %d\n", + mac_node->full_name, _errno); + goto _return_dev_set_drvdata; + } + + mac_dev->res = __devm_request_region( + dev, + fm_get_mem_region(mac_dev->fm_dev), + res.start, res.end + 1 - res.start, "mac"); + if (unlikely(mac_dev->res == NULL)) { + dpaa_eth_err(dev, "__devm_request_mem_region(mac) failed\n"); + _errno = -EBUSY; + goto _return_dev_set_drvdata; + } + + mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start, + mac_dev->res->end + 1 - mac_dev->res->start); + if (unlikely(mac_dev->vaddr == NULL)) { + dpaa_eth_err(dev, "devm_ioremap() failed\n"); + _errno = -EIO; + goto _return_dev_set_drvdata; + } + + /* + * XXX: Warning, future versions of Linux will most likely not even + * call the driver code to allow us to override the TBIPA value, + * we'll need to address this when we move to newer kernel rev + */ +#define TBIPA_OFFSET 0x1c +#define TBIPA_DEFAULT_ADDR 5 + mac_dev->tbi_node = of_parse_phandle(mac_node, "tbi-handle", 0); + if (mac_dev->tbi_node) { + u32 tbiaddr = TBIPA_DEFAULT_ADDR; + + uint32_prop = of_get_property(mac_dev->tbi_node, "reg", NULL); + if (uint32_prop) + tbiaddr = *uint32_prop; + out_be32(mac_dev->vaddr + TBIPA_OFFSET, tbiaddr); + } + + if (!of_device_is_available(mac_node)) { + devm_iounmap(dev, mac_dev->vaddr); + __devm_release_region(dev, fm_get_mem_region(mac_dev->fm_dev), + res.start, res.end + 1 - res.start); + fm_unbind(mac_dev->fm_dev); + devm_kfree(dev, mac_dev); + dev_set_drvdata(dev, NULL); + return -ENODEV; + } + + /* Get the cell-index */ + uint32_prop = of_get_property(mac_node, "cell-index", &lenp); + if (unlikely(uint32_prop == NULL)) { + dpaa_eth_err(dev, "of_get_property(%s, cell-index) failed\n", + mac_node->full_name); + _errno = -EINVAL; + goto _return_dev_set_drvdata; + } + BUG_ON(lenp != sizeof(uint32_t)); + mac_dev->cell_index = *uint32_prop; + + /* Get the MAC address */ + mac_addr = of_get_mac_address(mac_node); + if (unlikely(mac_addr == NULL)) { + dpaa_eth_err(dev, "of_get_mac_address(%s) failed\n", + mac_node->full_name); + _errno = -EINVAL; + goto _return_dev_set_drvdata; + } + memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr)); + + /* Get the port handles */ + phandle_prop = of_get_property(mac_node, "fsl,port-handles", &lenp); + if (unlikely(phandle_prop == NULL)) { + dpaa_eth_err(dev, "of_get_property(%s, port-handles) failed\n", + mac_node->full_name); + _errno = -EINVAL; + goto _return_dev_set_drvdata; + } + BUG_ON(lenp != sizeof(phandle) * ARRAY_SIZE(mac_dev->port_dev)); + + for_each_port_device(i, mac_dev->port_dev) { + /* Find the port node */ + dev_node = of_find_node_by_phandle(phandle_prop[i]); + if (unlikely(dev_node == NULL)) { + dpaa_eth_err(dev, "of_find_node_by_phandle() failed\n"); + _errno = -EINVAL; + goto _return_of_node_put; + } + + of_dev = of_find_device_by_node(dev_node); + if (unlikely(of_dev == NULL)) { + dpaa_eth_err(dev, "of_find_device_by_node(%s) failed\n", + dev_node->full_name); + _errno = -EINVAL; + goto _return_of_node_put; + } + + mac_dev->port_dev[i] = fm_port_bind(&of_dev->dev); + if (unlikely(mac_dev->port_dev[i] == NULL)) { + dpaa_eth_err(dev, "dev_get_drvdata(%s) failed\n", + dev_node->full_name); + _errno = -EINVAL; + goto _return_of_node_put; + } + of_node_put(dev_node); + } + + /* Get the PHY connection type */ + char_prop = (const char *)of_get_property(mac_node, + "phy-connection-type", NULL); + if (unlikely(char_prop == NULL)) { + dpaa_eth_warning(dev, + "of_get_property(%s, phy-connection-type) " + "failed. Defaulting to MII\n", + mac_node->full_name); + mac_dev->phy_if = PHY_INTERFACE_MODE_MII; + } else + mac_dev->phy_if = str2phy(char_prop); + + mac_dev->link = false; + mac_dev->half_duplex = false; + mac_dev->speed = phy2speed[mac_dev->phy_if]; + mac_dev->max_speed = mac_dev->speed; + mac_dev->if_support = DTSEC_SUPPORTED; + /* We don't support half-duplex in SGMII mode */ + if (strstr(char_prop, "sgmii")) + mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_100baseT_Half); + + /* Gigabit support (no half-duplex) */ + if (mac_dev->max_speed == 1000) + mac_dev->if_support |= SUPPORTED_1000baseT_Full; + + /* The 10G interface only supports one mode */ + if (strstr(char_prop, "xgmii")) + mac_dev->if_support = SUPPORTED_10000baseT_Full; + + /* Get the rest of the PHY information */ + mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); + if (mac_dev->phy_node == NULL) { + int sz; + const u32 *phy_id = of_get_property(mac_node, "fixed-link", + &sz); + if (!phy_id || sz < sizeof(*phy_id)) { + cpu_dev_err(dev, "No PHY (or fixed link) found\n"); + _errno = -EINVAL; + goto _return_dev_set_drvdata; + } + + sprintf(mac_dev->fixed_bus_id, PHY_ID_FMT, "0", phy_id[0]); + } + + _errno = mac_dev->init(mac_dev); + if (unlikely(_errno < 0)) { + dpaa_eth_err(dev, "mac_dev->init() = %d\n", _errno); + goto _return_dev_set_drvdata; + } + + cpu_dev_info(dev, + "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", + mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], + mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); + + goto _return; + +_return_of_node_put: + of_node_put(dev_node); +_return_dev_set_drvdata: + dev_set_drvdata(dev, NULL); +_return: + return _errno; +} + +static int __cold mac_remove(struct platform_device *of_dev) +{ + int i, _errno; + struct device *dev; + struct mac_device *mac_dev; + + dev = &of_dev->dev; + mac_dev = (struct mac_device *)dev_get_drvdata(dev); + + for_each_port_device(i, mac_dev->port_dev) + fm_port_unbind(mac_dev->port_dev[i]); + + fm_unbind(mac_dev->fm_dev); + + _errno = free_macdev(mac_dev); + + return _errno; +} + +static struct platform_driver mac_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = mac_match, + .owner = THIS_MODULE, + }, + .probe = mac_probe, + .remove = mac_remove, +}; + +static int __init __cold mac_load(void) +{ + int _errno; + + cpu_pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", __file__, __func__); + + cpu_pr_info(KBUILD_MODNAME ": %s (" VERSION ")\n", mac_driver_description); + + _errno = platform_driver_register(&mac_driver); + if (unlikely(_errno < 0)) { + cpu_pr_err(KBUILD_MODNAME ": %s:%hu:%s(): of_register_platform_driver() = %d\n", + __file__, __LINE__, __func__, _errno); + goto _return; + } + + goto _return; + +_return: + cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__); + + return _errno; +} +module_init(mac_load); + +static void __exit __cold mac_unload(void) +{ + cpu_pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", __file__, __func__); + + platform_driver_unregister(&mac_driver); + + cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__); +} +module_exit(mac_unload); --- linux-3.13.0.orig/drivers/net/dpa/mac.h +++ linux-3.13.0/drivers/net/dpa/mac.h @@ -0,0 +1,113 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __MAC_H +#define __MAC_H + +#include /* struct device, BUS_ID_SIZE */ +#include /* ETH_ALEN */ +#include /* phy_interface_t, struct phy_device */ +#include + +#include "fsl_fman.h" /* struct port_device */ + +#ifndef CONFIG_DPA_MAX_FRM_SIZE +#define CONFIG_DPA_MAX_FRM_SIZE 0 +#endif + +enum {DTSEC, XGMAC}; + +struct mac_device { + struct device *dev; + void *priv; + uint8_t cell_index; + struct resource *res; + void *vaddr; + uint8_t addr[ETH_ALEN]; + bool promisc; + + struct fm *fm_dev; + struct fm_port *port_dev[2]; + + phy_interface_t phy_if; + u32 if_support; + bool link; + bool half_duplex; + uint16_t speed; + uint16_t max_speed; + struct device_node *phy_node; + char fixed_bus_id[MII_BUS_ID_SIZE + 3]; + struct device_node *tbi_node; + struct phy_device *phy_dev; + void *fm; + /* List of multicast addresses */ + struct list_head mc_addr_list; + + int (*init_phy)(struct net_device *net_dev); + int (*init)(struct mac_device *mac_dev); + int (*start)(struct mac_device *mac_dev); + int (*stop)(struct mac_device *mac_dev); + int (*change_promisc)(struct mac_device *mac_dev); + int (*change_addr)(struct mac_device *mac_dev, uint8_t *addr); + int (*set_multi)(struct net_device *net_dev); + int (*uninit)(struct mac_device *mac_dev); + int (*ptp_enable)(struct mac_device *mac_dev); + int (*ptp_disable)(struct mac_device *mac_dev); + int (*fm_rtc_enable)(struct net_device *net_dev); + int (*fm_rtc_disable)(struct net_device *net_dev); + int (*fm_rtc_get_cnt)(struct net_device *net_dev, uint64_t *ts); + int (*fm_rtc_set_cnt)(struct net_device *net_dev, uint64_t ts); + int (*fm_rtc_get_drift)(struct net_device *net_dev, uint32_t *drift); + int (*fm_rtc_set_drift)(struct net_device *net_dev, uint32_t drift); + int (*fm_rtc_set_alarm)(struct net_device *net_dev, uint32_t id, + uint64_t time); + int (*fm_rtc_set_fiper)(struct net_device *net_dev, uint32_t id, + uint64_t fiper); +}; + +struct mac_address { + uint8_t addr[ETH_ALEN]; + struct list_head list; +}; + +#define for_each_port_device(i, port_dev) \ + for (i = 0; i < ARRAY_SIZE(port_dev); i++) + +static inline void * __attribute((nonnull)) macdev_priv(const struct mac_device *mac_dev) +{ + return (void *)mac_dev + sizeof(*mac_dev); +} + +extern const char *mac_driver_description; +extern const size_t mac_sizeof_priv[]; +extern void (*const mac_setup[])(struct mac_device *mac_dev); + +#endif /* __MAC_H */ --- linux-3.13.0.orig/drivers/net/dpa/offline_port.c +++ linux-3.13.0/drivers/net/dpa/offline_port.c @@ -0,0 +1,340 @@ +/* + * Copyright 2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Offline Parsing / Host Command port driver for FSL QorIQ FMan. + * Validates device-tree configuration and sets up the offline ports. + */ + +#include +#include +#include + +#include "offline_port.h" +#include "dpaa_eth-common.h" + +#define OH_MOD_DESCRIPTION "FSL FMan Offline Parsing port driver" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Bogdan Hamciuc "); +MODULE_DESCRIPTION(OH_MOD_DESCRIPTION); + + +static const struct of_device_id oh_port_match_table[] = { + { + .compatible = "fsl,dpa-oh" + }, + { + .compatible = "fsl,dpa-oh-shared" + }, + {} +}; +MODULE_DEVICE_TABLE(of, oh_port_match_table); + +static int oh_port_remove(struct platform_device *_of_dev); +static int oh_port_probe(struct platform_device *_of_dev); + +static struct platform_driver oh_port_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = oh_port_match_table, + .owner = THIS_MODULE, + }, + .probe = oh_port_probe, + .remove = oh_port_remove, +}; + +/* Allocation code for the OH port's PCD frame queues */ +static int __cold oh_alloc_pcd_fqids(struct device *dev, + uint32_t num, + uint8_t alignment, + uint32_t *base_fqid) +{ + cpu_dev_crit(dev, "callback not implemented!\n"); + BUG(); + + return 0; +} + +static int __cold oh_free_pcd_fqids(struct device *dev, uint32_t base_fqid) +{ + dpaa_eth_crit(dev, "callback not implemented!\n"); + BUG(); + + return 0; +} + +static int +oh_port_probe(struct platform_device *_of_dev) +{ + struct device *dpa_oh_dev; + struct device_node *dpa_oh_node; + int lenp, _errno = 0, fq_idx; + const phandle *oh_port_handle; + struct platform_device *oh_of_dev; + struct device_node *oh_node; + struct device *oh_dev; + struct dpa_oh_config_s *oh_config; + uint32_t *oh_all_queues; + uint32_t queues_count; + uint32_t crt_fqid_base; + uint32_t crt_fq_count; + struct fm_port_non_rx_params oh_port_tx_params; + struct fm_port_pcd_param oh_port_pcd_params; + /* True if the current partition owns the OH port. */ + bool init_oh_port; + const struct of_device_id *match; + + dpa_oh_dev = &_of_dev->dev; + dpa_oh_node = dpa_oh_dev->of_node; + BUG_ON(dpa_oh_node == NULL); + + match = of_match_device(oh_port_match_table, dpa_oh_dev); + if (!match) + return -EINVAL; + + cpu_dev_dbg(dpa_oh_dev, "Probing OH port...\n"); + + /* + * Find the referenced OH node + */ + + oh_port_handle = of_get_property(dpa_oh_node, + "fsl,fman-oh-port", &lenp); + if (oh_port_handle == NULL) { + cpu_dev_err(dpa_oh_dev, "No OH port handle found in node %s\n", + dpa_oh_node->full_name); + return -EINVAL; + } + + BUG_ON(lenp % sizeof(*oh_port_handle)); + if (lenp != sizeof(*oh_port_handle)) { + cpu_dev_err(dpa_oh_dev, "Found %lu OH port bindings in node %s, " + "only 1 phandle is allowed.\n", + (unsigned long int)(lenp / sizeof(*oh_port_handle)), dpa_oh_node->full_name); + return -EINVAL; + } + + /* Read configuration for the OH port */ + oh_node = of_find_node_by_phandle(*oh_port_handle); + if (oh_node == NULL) { + cpu_dev_err(dpa_oh_dev, "Can't find OH node referenced from " + "node %s\n", dpa_oh_node->full_name); + return -EINVAL; + } + cpu_dev_info(dpa_oh_dev, "Found OH node handle compatible with %s.\n", + match->compatible); + + oh_of_dev = of_find_device_by_node(oh_node); + BUG_ON(oh_of_dev == NULL); + oh_dev = &oh_of_dev->dev; + of_node_put(oh_node); + + /* + * The OH port must be initialized exactly once. + * The following scenarios are of interest: + * - the node is Linux-private (will always initialize it); + * - the node is shared between two Linux partitions + * (only one of them will initialize it); + * - the node is shared between a Linux and a LWE partition + * (Linux will initialize it) - "fsl,dpa-oh-shared" + */ + + /* Check if the current partition owns the OH port + * and ought to initialize it. It may be the case that we leave this + * to another (also Linux) partition. */ + init_oh_port = strcmp(match->compatible, "fsl,dpa-oh-shared"); + + /* If we aren't the "owner" of the OH node, we're done here. */ + if (!init_oh_port) { + cpu_dev_dbg(dpa_oh_dev, "Not owning the shared OH port %s, " + "will not initialize it.\n", oh_node->full_name); + return 0; + } + + /* Allocate OH dev private data */ + oh_config = devm_kzalloc(dpa_oh_dev, sizeof(*oh_config), GFP_KERNEL); + if (oh_config == NULL) { + cpu_dev_err(dpa_oh_dev, "Can't allocate private data for " + "OH node %s referenced from node %s!\n", + oh_node->full_name, dpa_oh_node->full_name); + return -ENOMEM; + } + + /* + * Read FQ ids/nums for the DPA OH node + */ + oh_all_queues = (uint32_t *)of_get_property(dpa_oh_node, + "fsl,qman-frame-queues-oh", &lenp); + if (oh_all_queues == NULL) { + cpu_dev_err(dpa_oh_dev, "No frame queues have been " + "defined for OH node %s referenced from node %s\n", + oh_node->full_name, dpa_oh_node->full_name); + _errno = -EINVAL; + goto return_kfree; + } + + /* Check that the OH error and default FQs are there */ + BUG_ON(lenp % (2 * sizeof(*oh_all_queues))); + queues_count = lenp / (2 * sizeof(*oh_all_queues)); + if (queues_count != 2) { + dpaa_eth_err(dpa_oh_dev, "Error and Default queues must be " + "defined for OH node %s referenced from node %s\n", + oh_node->full_name, dpa_oh_node->full_name); + _errno = -EINVAL; + goto return_kfree; + } + + /* Read the FQIDs defined for this OH port */ + cpu_dev_dbg(dpa_oh_dev, "Reading %d queues...\n", queues_count); + fq_idx = 0; + + /* Error FQID - must be present */ + crt_fqid_base = oh_all_queues[fq_idx++]; + crt_fq_count = oh_all_queues[fq_idx++]; + if (crt_fq_count != 1) { + cpu_dev_err(dpa_oh_dev, "Only 1 Error FQ allowed in OH node %s " + "referenced from node %s (read: %d FQIDs).\n", + oh_node->full_name, dpa_oh_node->full_name, + crt_fq_count); + _errno = -EINVAL; + goto return_kfree; + } + oh_config->error_fqid = crt_fqid_base; + cpu_dev_dbg(dpa_oh_dev, "Read Error FQID 0x%x for OH port %s.\n", + oh_config->error_fqid, oh_node->full_name); + + /* Default FQID - must be present */ + crt_fqid_base = oh_all_queues[fq_idx++]; + crt_fq_count = oh_all_queues[fq_idx++]; + if (crt_fq_count != 1) { + cpu_dev_err(dpa_oh_dev, "Only 1 Default FQ allowed " + "in OH node %s referenced from %s (read: %d FQIDs).\n", + oh_node->full_name, dpa_oh_node->full_name, + crt_fq_count); + _errno = -EINVAL; + goto return_kfree; + } + oh_config->default_fqid = crt_fqid_base; + cpu_dev_dbg(dpa_oh_dev, "Read Default FQID 0x%x for OH port %s.\n", + oh_config->default_fqid, oh_node->full_name); + + /* Get a handle to the fm_port so we can set + * its configuration params */ + oh_config->oh_port = fm_port_bind(oh_dev); + if (oh_config->oh_port == NULL) { + cpu_dev_err(dpa_oh_dev, "NULL drvdata from fm port dev %s!\n", + oh_node->full_name); + _errno = -EINVAL; + goto return_kfree; + } + + /* Set Tx params */ + dpaa_eth_init_port(tx, oh_config->oh_port, oh_port_tx_params, + oh_config->error_fqid, oh_config->default_fqid, FALSE); + /* Set PCD params */ + oh_port_pcd_params.cba = oh_alloc_pcd_fqids; + oh_port_pcd_params.cbf = oh_free_pcd_fqids; + oh_port_pcd_params.dev = dpa_oh_dev; + fm_port_pcd_bind(oh_config->oh_port, &oh_port_pcd_params); + + dev_set_drvdata(dpa_oh_dev, oh_config); + + /* Enable the OH port */ + fm_port_enable(oh_config->oh_port); + cpu_dev_info(dpa_oh_dev, "OH port %s enabled.\n", oh_node->full_name); + + return 0; + +return_kfree: + devm_kfree(dpa_oh_dev, oh_config); + return _errno; +} + +static int __cold oh_port_remove(struct platform_device *_of_dev) +{ + int _errno = 0; + struct dpa_oh_config_s *oh_config; + + cpu_pr_info("Removing OH port...\n"); + + oh_config = dev_get_drvdata(&_of_dev->dev); + if (oh_config == NULL) { + cpu_pr_err(KBUILD_MODNAME + ": %s:%hu:%s(): No OH config in device private data!\n", + __file__, __LINE__, __func__); + _errno = -ENODEV; + goto return_error; + } + if (oh_config->oh_port == NULL) { + cpu_pr_err(KBUILD_MODNAME + ": %s:%hu:%s(): No fm port in device private data!\n", + __file__, __LINE__, __func__); + _errno = -EINVAL; + goto return_error; + } + + fm_port_disable(oh_config->oh_port); + devm_kfree(&_of_dev->dev, oh_config); + dev_set_drvdata(&_of_dev->dev, NULL); + +return_error: + return _errno; +} + +static int __init __cold oh_port_load(void) +{ + int _errno; + + cpu_pr_info(KBUILD_MODNAME ": " OH_MOD_DESCRIPTION " (" VERSION ")\n"); + + _errno = platform_driver_register(&oh_port_driver); + if (_errno < 0) { + cpu_pr_err(KBUILD_MODNAME + ": %s:%hu:%s(): platform_driver_register() = %d\n", + __file__, __LINE__, __func__, _errno); + } + + cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__); + return _errno; +} +module_init(oh_port_load); + +static void __exit __cold oh_port_unload(void) +{ + cpu_pr_debug(KBUILD_MODNAME ": -> %s:%s()\n", __file__, __func__); + + platform_driver_unregister(&oh_port_driver); + + cpu_pr_debug(KBUILD_MODNAME ": %s:%s() ->\n", __file__, __func__); +} +module_exit(oh_port_unload); --- linux-3.13.0.orig/drivers/net/dpa/offline_port.h +++ linux-3.13.0/drivers/net/dpa/offline_port.h @@ -0,0 +1,45 @@ +/* + * Copyright 2011 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __OFFLINE_PORT_H +#define __OFFLINE_PORT_H + +#include "fsl_fman.h" + +/* OH port configuration */ +struct dpa_oh_config_s { + uint32_t error_fqid; + uint32_t default_fqid; + struct fm_port *oh_port; +}; + +#endif /* __OFFLINE_PORT_H */ --- linux-3.13.0.orig/drivers/net/dummy.c +++ linux-3.13.0/drivers/net/dummy.c @@ -63,10 +63,10 @@ dstats = per_cpu_ptr(dev->dstats, i); do { - start = u64_stats_fetch_begin_bh(&dstats->syncp); + start = u64_stats_fetch_begin_irq(&dstats->syncp); tbytes = dstats->tx_bytes; tpackets = dstats->tx_packets; - } while (u64_stats_fetch_retry_bh(&dstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpackets; } --- linux-3.13.0.orig/drivers/net/ethernet/Kconfig +++ linux-3.13.0/drivers/net/ethernet/Kconfig @@ -23,6 +23,7 @@ source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" source "drivers/net/ethernet/amd/Kconfig" +source "drivers/net/ethernet/apm/Kconfig" source "drivers/net/ethernet/apple/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" --- linux-3.13.0.orig/drivers/net/ethernet/Makefile +++ linux-3.13.0/drivers/net/ethernet/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ obj-$(CONFIG_NET_VENDOR_AMD) += amd/ +obj-$(CONFIG_NET_XGENE) += apm/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ obj-$(CONFIG_NET_VENDOR_ARC) += arc/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ --- linux-3.13.0.orig/drivers/net/ethernet/allwinner/sun4i-emac.c +++ linux-3.13.0/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -623,8 +623,10 @@ } /* Move data from EMAC */ - skb = dev_alloc_skb(rxlen + 4); - if (good_packet && skb) { + if (good_packet) { + skb = netdev_alloc_skb(dev, rxlen + 4); + if (!skb) + continue; skb_reserve(skb, 2); rdptr = (u8 *) skb_put(skb, rxlen - 4); @@ -726,6 +728,7 @@ ret = emac_mdio_probe(dev); if (ret < 0) { + free_irq(dev->irq, dev); netdev_err(dev, "cannot probe MDIO bus\n"); return ret; } --- linux-3.13.0.orig/drivers/net/ethernet/amd/pcnet32.c +++ linux-3.13.0/drivers/net/ethernet/amd/pcnet32.c @@ -1516,7 +1516,7 @@ { struct pcnet32_private *lp; int i, media; - int fdx, mii, fset, dxsuflo; + int fdx, mii, fset, dxsuflo, sram; int chip_version; char *chipname; struct net_device *dev; @@ -1553,7 +1553,7 @@ } /* initialize variables */ - fdx = mii = fset = dxsuflo = 0; + fdx = mii = fset = dxsuflo = sram = 0; chip_version = (chip_version >> 12) & 0xffff; switch (chip_version) { @@ -1586,6 +1586,7 @@ chipname = "PCnet/FAST III 79C973"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2626: chipname = "PCnet/Home 79C978"; /* PCI */ @@ -1609,6 +1610,7 @@ chipname = "PCnet/FAST III 79C975"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2628: chipname = "PCnet/PRO 79C976"; @@ -1637,6 +1639,31 @@ dxsuflo = 1; } + /* + * The Am79C973/Am79C975 controllers come with 12K of SRAM + * which we can use for the Tx/Rx buffers but most importantly, + * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid + * Tx fifo underflows. + */ + if (sram) { + /* + * The SRAM is being configured in two steps. First we + * set the SRAM size in the BCR25:SRAM_SIZE bits. According + * to the datasheet, each bit corresponds to a 512-byte + * page so we can have at most 24 pages. The SRAM_SIZE + * holds the value of the upper 8 bits of the 16-bit SRAM size. + * The low 8-bits start at 0x00 and end at 0xff. So the + * address range is from 0x0000 up to 0x17ff. Therefore, + * the SRAM_SIZE is set to 0x17. The next step is to set + * the BCR26:SRAM_BND midway through so the Tx and Rx + * buffers can share the SRAM equally. + */ + a->write_bcr(ioaddr, 25, 0x17); + a->write_bcr(ioaddr, 26, 0xc); + /* And finally enable the NOUFLO bit */ + a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); + } + dev = alloc_etherdev(sizeof(*lp)); if (!dev) { ret = -ENOMEM; --- linux-3.13.0.orig/drivers/net/ethernet/apm/Kconfig +++ linux-3.13.0/drivers/net/ethernet/apm/Kconfig @@ -0,0 +1 @@ +source "drivers/net/ethernet/apm/xgene/Kconfig" --- linux-3.13.0.orig/drivers/net/ethernet/apm/Makefile +++ linux-3.13.0/drivers/net/ethernet/apm/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for APM X-GENE Ethernet driver. +# + +obj-$(CONFIG_NET_XGENE) += xgene/ --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/Kconfig +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/Kconfig @@ -0,0 +1,9 @@ +config NET_XGENE + tristate "APM X-Gene SoC Ethernet Driver" + select PHYLIB + help + This is the Ethernet driver for the on-chip ethernet interface on the + APM X-Gene SoC. + + To compile this driver as a module, choose M here. This module will + be called xgene_enet. --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/Makefile +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for APM X-Gene Ethernet Driver. +# + +xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \ + xgene_enet_main.o xgene_enet_ethtool.o +obj-$(CONFIG_NET_XGENE) += xgene-enet.o --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -0,0 +1,150 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include "xgene_enet_main.h" + +struct xgene_gstrings_stats { + char name[ETH_GSTRING_LEN]; + int offset; +}; + +#define XGENE_STAT(m) { #m, offsetof(struct xgene_enet_pdata, stats.m) } + +static const struct xgene_gstrings_stats gstrings_stats[] = { + XGENE_STAT(rx_packets), + XGENE_STAT(tx_packets), + XGENE_STAT(rx_bytes), + XGENE_STAT(tx_bytes), + XGENE_STAT(rx_errors), + XGENE_STAT(tx_errors), + XGENE_STAT(rx_length_errors), + XGENE_STAT(rx_crc_errors), + XGENE_STAT(rx_frame_errors), + XGENE_STAT(rx_fifo_errors) +}; + +#define XGENE_STATS_LEN ARRAY_SIZE(gstrings_stats) + +static void xgene_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct platform_device *pdev = pdata->pdev; + + strcpy(info->driver, "xgene_enet"); + strcpy(info->version, XGENE_DRV_VERSION); + snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "N/A"); + sprintf(info->bus_info, "%s", pdev->name); +} + +static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); + } else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { + cmd->supported = SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_MII; + cmd->advertising = cmd->supported; + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_MII; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE; + cmd->advertising = cmd->supported; + ethtool_cmd_speed_set(cmd, SPEED_10000); + cmd->duplex = DUPLEX_FULL; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + cmd->autoneg = AUTONEG_DISABLE; + } + + return 0; +} + +static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { + if (phydev == NULL) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); + } + + return -EINVAL; +} + +static void xgene_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < XGENE_STATS_LEN; i++) { + memcpy(p, gstrings_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } +} + +static int xgene_get_sset_count(struct net_device *ndev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EINVAL; + + return XGENE_STATS_LEN; +} + +static void xgene_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *dummy, + u64 *data) +{ + void *pdata = netdev_priv(ndev); + int i; + + for (i = 0; i < XGENE_STATS_LEN; i++) + *data++ = *(u64 *)(pdata + gstrings_stats[i].offset); +} + +static const struct ethtool_ops xgene_ethtool_ops = { + .get_drvinfo = xgene_get_drvinfo, + .get_settings = xgene_get_settings, + .set_settings = xgene_set_settings, + .get_link = ethtool_op_get_link, + .get_strings = xgene_get_strings, + .get_sset_count = xgene_get_sset_count, + .get_ethtool_stats = xgene_get_ethtool_stats +}; + +void xgene_enet_set_ethtool_ops(struct net_device *ndev) +{ + ndev->ethtool_ops = &xgene_ethtool_ops; +} --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -0,0 +1,759 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Ravi Patel + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" + +static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + u64 addr = ring->dma; + enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; + + ring_cfg[4] |= (1 << SELTHRSH_POS) & + CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); + ring_cfg[3] |= ACCEPTLERR; + ring_cfg[2] |= QCOHERENT; + + addr >>= 8; + ring_cfg[2] |= (addr << RINGADDRL_POS) & + CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); + addr >>= RINGADDRL_LEN; + ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); + ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & + CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); +} + +static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + bool is_bufpool; + u32 val; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; + ring_cfg[4] |= (val << RINGTYPE_POS) & + CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); + + if (is_bufpool) { + ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & + CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); + } +} + +static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) +{ + u32 *ring_cfg = ring->state; + + ring_cfg[3] |= RECOMBBUF; + ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & + CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); + ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); +} + +static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, + u32 offset, u32 data) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + iowrite32(data, pdata->ring_csr_addr + offset); +} + +static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, + u32 offset, u32 *data) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + *data = ioread32(pdata->ring_csr_addr + offset); +} + +static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) +{ + int i; + + xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); + for (i = 0; i < NUM_RING_CONFIG; i++) { + xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), + ring->state[i]); + } +} + +static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) +{ + memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG); + xgene_enet_write_ring_state(ring); +} + +static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) +{ + xgene_enet_ring_set_type(ring); + + if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0) + xgene_enet_ring_set_recombbuf(ring); + + xgene_enet_ring_init(ring); + xgene_enet_write_ring_state(ring); +} + +static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) +{ + u32 ring_id_val, ring_id_buf; + bool is_bufpool; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + + ring_id_val = ring->id & GENMASK(9, 0); + ring_id_val |= OVERWRITE; + + ring_id_buf = (ring->num << 9) & GENMASK(18, 9); + ring_id_buf |= PREFETCH_BUF_EN; + if (is_bufpool) + ring_id_buf |= IS_BUFFER_POOL; + + xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); + xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); +} + +static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) +{ + u32 ring_id; + + ring_id = ring->id | OVERWRITE; + xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); + xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); +} + +struct xgene_enet_desc_ring *xgene_enet_setup_ring( + struct xgene_enet_desc_ring *ring) +{ + u32 size = ring->size; + u32 i, data; + bool is_bufpool; + + xgene_enet_clr_ring_state(ring); + xgene_enet_set_ring_state(ring); + xgene_enet_set_ring_id(ring); + + ring->slots = xgene_enet_get_numslots(ring->id, size); + + is_bufpool = xgene_enet_is_bufpool(ring->id); + if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) + return ring; + + for (i = 0; i < ring->slots; i++) + xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); + + xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); + data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); + xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); + + return ring; +} + +void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) +{ + u32 data; + bool is_bufpool; + + is_bufpool = xgene_enet_is_bufpool(ring->id); + if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) + goto out; + + xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); + data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); + xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); + +out: + xgene_enet_clr_desc_ring_id(ring); + xgene_enet_clr_ring_state(ring); +} + +void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, + struct xgene_enet_pdata *pdata, + enum xgene_enet_err_code status) +{ + struct rtnl_link_stats64 *stats = &pdata->stats; + + switch (status) { + case INGRESS_CRC: + stats->rx_crc_errors++; + break; + case INGRESS_CHECKSUM: + case INGRESS_CHECKSUM_COMPUTE: + stats->rx_errors++; + break; + case INGRESS_TRUNC_FRAME: + stats->rx_frame_errors++; + break; + case INGRESS_PKT_LEN: + stats->rx_length_errors++; + break; + case INGRESS_PKT_UNDER: + stats->rx_frame_errors++; + break; + case INGRESS_FIFO_OVERRUN: + stats->rx_fifo_errors++; + break; + default: + break; + } +} + +static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_ring_if_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->mcx_mac_csr_addr + offset; + + iowrite32(val, addr); +} + +static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, + void __iomem *cmd, void __iomem *cmd_done, + u32 wr_addr, u32 wr_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(wr_addr, addr); + iowrite32(wr_data, wr); + iowrite32(XGENE_ENET_WR_CMD, cmd); + + /* wait for write command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata, + u32 wr_addr, u32 wr_data) +{ + void __iomem *addr, *wr, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) + netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", + wr_addr); +} + +static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->mcx_mac_csr_addr + offset; + + *val = ioread32(addr); +} + +static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, + void __iomem *cmd, void __iomem *cmd_done, + u32 rd_addr, u32 *rd_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(rd_addr, addr); + iowrite32(XGENE_ENET_RD_CMD, cmd); + + /* wait for read command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + *rd_data = ioread32(rd); + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, + u32 rd_addr, u32 *rd_data) +{ + void __iomem *addr, *rd, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) + netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", + rd_addr); +} + +static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, + u32 reg, u16 data) +{ + u32 addr = 0, wr_data = 0; + u32 done; + u8 wait = 10; + + PHY_ADDR_SET(&addr, phy_id); + REG_ADDR_SET(&addr, reg); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); + + PHY_CONTROL_SET(&wr_data, data); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); + do { + usleep_range(5, 10); + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); + } while ((done & BUSY_MASK) && wait--); + + if (done & BUSY_MASK) { + netdev_err(pdata->ndev, "MII_MGMT write failed\n"); + return -EBUSY; + } + + return 0; +} + +static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, + u8 phy_id, u32 reg) +{ + u32 addr = 0; + u32 data, done; + u8 wait = 10; + + PHY_ADDR_SET(&addr, phy_id); + REG_ADDR_SET(&addr, reg); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); + do { + usleep_range(5, 10); + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); + } while ((done & BUSY_MASK) && wait--); + + if (done & BUSY_MASK) { + netdev_err(pdata->ndev, "MII_MGMT read failed\n"); + return -EBUSY; + } + + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); + + return data; +} + +static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) +{ + u32 addr0, addr1; + u8 *dev_addr = pdata->ndev->dev_addr; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); + + xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0); + xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1); +} + +static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + u32 data; + u8 wait = 10; + + xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); + do { + usleep_range(100, 110); + xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); + } while ((data != 0xffffffff) && wait--); + + if (data != 0xffffffff) { + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; + } + + return 0; +} + +static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) +{ + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0); +} + +static void xgene_gmac_init(struct xgene_enet_pdata *pdata) +{ + u32 value, mc2; + u32 intf_ctl, rgmii; + u32 icm0, icm2; + + xgene_gmac_reset(pdata); + + xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); + xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2); + xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl); + xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); + + switch (pdata->phy_speed) { + case SPEED_10: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + CFG_MACMODE_SET(&icm0, 0); + CFG_WAITASYNCRD_SET(&icm2, 500); + rgmii &= ~CFG_SPEED_1250; + break; + case SPEED_100: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + intf_ctl |= ENET_LHD_MODE; + CFG_MACMODE_SET(&icm0, 1); + CFG_WAITASYNCRD_SET(&icm2, 80); + rgmii &= ~CFG_SPEED_1250; + break; + default: + ENET_INTERFACE_MODE2_SET(&mc2, 2); + intf_ctl |= ENET_GHD_MODE; + CFG_TXCLK_MUXSEL0_SET(&rgmii, 4); + xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); + value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; + xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); + break; + } + + mc2 |= FULL_DUPLEX2; + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); + xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); + + xgene_gmac_set_mac_addr(pdata); + + /* Adjust MDC clock frequency */ + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value); + MGMT_CLOCK_SEL_SET(&value, 7); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value); + + /* Enable drop if bufpool not available */ + xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); + value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); + + /* Rtype should be copied from FP */ + xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); + xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); + + /* Rx-Tx traffic resume */ + xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); + + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); + + xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); + value &= ~TX_DV_GATE_EN0; + value &= ~RX_DV_GATE_EN0; + value |= RESUME_RX0; + xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); + + xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); +} + +static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) +{ + u32 val = 0xffffffff; + + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); +} + +static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, + u32 dst_ring_num, u16 bufpool_id) +{ + u32 cb; + u32 fpsel; + + fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + + xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); + cb |= CFG_CLE_BYPASS_EN0; + CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); + xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); + + xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); + CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); + CFG_CLE_FPSEL0_SET(&cb, fpsel); + xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); +} + +static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); +} + +static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); +} + +static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); +} + +static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data); + xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); +} + +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) +{ + if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) + return false; + + if (ioread32(p->ring_csr_addr + SRST_ADDR)) + return false; + + return true; +} + +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) +{ + u32 val; + + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + + clk_prepare_enable(pdata->clk); + clk_disable_unprepare(pdata->clk); + clk_prepare_enable(pdata->clk); + xgene_enet_ecc_init(pdata); + xgene_enet_config_ring_if_assoc(pdata); + + /* Enable auto-incr for scanning */ + xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); + val |= SCAN_AUTO_INCR; + MGMT_CLOCK_SEL_SET(&val, 1); + xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); + + return 0; +} + +static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) +{ + clk_disable_unprepare(pdata->clk); +} + +static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct xgene_enet_pdata *pdata = bus->priv; + u32 val; + + val = xgene_mii_phy_read(pdata, mii_id, regnum); + netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", + mii_id, regnum, val); + + return val; +} + +static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 val) +{ + struct xgene_enet_pdata *pdata = bus->priv; + + netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", + mii_id, regnum, val); + return xgene_mii_phy_write(pdata, mii_id, regnum, val); +} + +static void xgene_enet_adjust_link(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct phy_device *phydev = pdata->phy_dev; + + if (phydev->link) { + if (pdata->phy_speed != phydev->speed) { + pdata->phy_speed = phydev->speed; + xgene_gmac_init(pdata); + xgene_gmac_rx_enable(pdata); + xgene_gmac_tx_enable(pdata); + phy_print_status(phydev); + } + } else { + xgene_gmac_rx_disable(pdata); + xgene_gmac_tx_disable(pdata); + pdata->phy_speed = SPEED_UNKNOWN; + phy_print_status(phydev); + } +} + +static int xgene_enet_phy_connect(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device_node *phy_np; + struct phy_device *phy_dev; + struct device *dev = &pdata->pdev->dev; + + phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); + if (!phy_np) { + netdev_dbg(ndev, "No phy-handle found\n"); + return -ENODEV; + } + + phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, + 0, pdata->phy_mode); + if (!phy_dev) { + netdev_err(ndev, "Could not connect to PHY\n"); + return -ENODEV; + } + + pdata->phy_speed = SPEED_UNKNOWN; + phy_dev->supported &= ~SUPPORTED_10baseT_Half & + ~SUPPORTED_100baseT_Half & + ~SUPPORTED_1000baseT_Half; + phy_dev->advertising = phy_dev->supported; + pdata->phy_dev = phy_dev; + + return 0; +} + +int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct device *dev = &pdata->pdev->dev; + struct device_node *child_np; + struct device_node *mdio_np = NULL; + struct mii_bus *mdio_bus; + int ret; + + for_each_child_of_node(dev->of_node, child_np) { + if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { + mdio_np = child_np; + break; + } + } + + if (!mdio_np) { + netdev_dbg(ndev, "No mdio node in the dts\n"); + return -ENXIO; + } + + mdio_bus = mdiobus_alloc(); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "APM X-Gene MDIO bus"; + mdio_bus->read = xgene_enet_mdio_read; + mdio_bus->write = xgene_enet_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", + ndev->name); + + mdio_bus->priv = pdata; + mdio_bus->parent = &ndev->dev; + + ret = of_mdiobus_register(mdio_bus, mdio_np); + if (ret) { + netdev_err(ndev, "Failed to register MDIO bus\n"); + mdiobus_free(mdio_bus); + return ret; + } + pdata->mdio_bus = mdio_bus; + + ret = xgene_enet_phy_connect(ndev); + if (ret) + xgene_enet_mdio_remove(pdata); + + return ret; +} + +void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) +{ + mdiobus_unregister(pdata->mdio_bus); + mdiobus_free(pdata->mdio_bus); + pdata->mdio_bus = NULL; +} + +struct xgene_mac_ops xgene_gmac_ops = { + .init = xgene_gmac_init, + .reset = xgene_gmac_reset, + .rx_enable = xgene_gmac_rx_enable, + .tx_enable = xgene_gmac_tx_enable, + .rx_disable = xgene_gmac_rx_disable, + .tx_disable = xgene_gmac_tx_disable, + .set_mac_addr = xgene_gmac_set_mac_addr, +}; + +struct xgene_port_ops xgene_gport_ops = { + .reset = xgene_enet_reset, + .cle_bypass = xgene_enet_cle_bypass, + .shutdown = xgene_gport_shutdown, +}; --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -0,0 +1,329 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Ravi Patel + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_HW_H__ +#define __XGENE_ENET_HW_H__ + +#include "xgene_enet_main.h" + +struct xgene_enet_pdata; +struct xgene_enet_stats; + +/* clears and then set bits */ +static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len) +{ + u32 end = start + len - 1; + u32 mask = GENMASK(end, start); + + *dst &= ~mask; + *dst |= (val << start) & mask; +} + +static inline u32 xgene_get_bits(u32 val, u32 start, u32 end) +{ + return (val & GENMASK(end, start)) >> start; +} + +enum xgene_enet_rm { + RM0, + RM1, + RM3 = 3 +}; + +#define CSR_RING_ID 0x0008 +#define OVERWRITE BIT(31) +#define IS_BUFFER_POOL BIT(20) +#define PREFETCH_BUF_EN BIT(21) +#define CSR_RING_ID_BUF 0x000c +#define CSR_RING_NE_INT_MODE 0x017c +#define CSR_RING_CONFIG 0x006c +#define CSR_RING_WR_BASE 0x0070 +#define NUM_RING_CONFIG 5 +#define BUFPOOL_MODE 3 +#define INC_DEC_CMD_ADDR 0x002c +#define UDP_HDR_SIZE 2 +#define BUF_LEN_CODE_2K 0x5000 + +#define CREATE_MASK(pos, len) GENMASK((pos)+(len)-1, (pos)) +#define CREATE_MASK_ULL(pos, len) GENMASK_ULL((pos)+(len)-1, (pos)) + +/* Empty slot soft signature */ +#define EMPTY_SLOT_INDEX 1 +#define EMPTY_SLOT ~0ULL + +#define WORK_DESC_SIZE 32 +#define BUFPOOL_DESC_SIZE 16 + +#define RING_OWNER_MASK GENMASK(9, 6) +#define RING_BUFNUM_MASK GENMASK(5, 0) + +#define SELTHRSH_POS 3 +#define SELTHRSH_LEN 3 +#define RINGADDRL_POS 5 +#define RINGADDRL_LEN 27 +#define RINGADDRH_POS 0 +#define RINGADDRH_LEN 6 +#define RINGSIZE_POS 23 +#define RINGSIZE_LEN 3 +#define RINGTYPE_POS 19 +#define RINGTYPE_LEN 2 +#define RINGMODE_POS 20 +#define RINGMODE_LEN 3 +#define RECOMTIMEOUTL_POS 28 +#define RECOMTIMEOUTL_LEN 3 +#define RECOMTIMEOUTH_POS 0 +#define RECOMTIMEOUTH_LEN 2 +#define NUMMSGSINQ_POS 1 +#define NUMMSGSINQ_LEN 16 +#define ACCEPTLERR BIT(19) +#define QCOHERENT BIT(4) +#define RECOMBBUF BIT(27) + +#define BLOCK_ETH_CSR_OFFSET 0x2000 +#define BLOCK_ETH_RING_IF_OFFSET 0x9000 +#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 + +#define BLOCK_ETH_MAC_OFFSET 0x0000 +#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 + +#define CLKEN_ADDR 0xc208 +#define SRST_ADDR 0xc200 + +#define MAC_ADDR_REG_OFFSET 0x00 +#define MAC_COMMAND_REG_OFFSET 0x04 +#define MAC_WRITE_REG_OFFSET 0x08 +#define MAC_READ_REG_OFFSET 0x0c +#define MAC_COMMAND_DONE_REG_OFFSET 0x10 + +#define MII_MGMT_CONFIG_ADDR 0x20 +#define MII_MGMT_COMMAND_ADDR 0x24 +#define MII_MGMT_ADDRESS_ADDR 0x28 +#define MII_MGMT_CONTROL_ADDR 0x2c +#define MII_MGMT_STATUS_ADDR 0x30 +#define MII_MGMT_INDICATORS_ADDR 0x34 + +#define BUSY_MASK BIT(0) +#define READ_CYCLE_MASK BIT(0) +#define PHY_CONTROL_SET(dst, val) xgene_set_bits(dst, val, 0, 16) + +#define ENET_SPARE_CFG_REG_ADDR 0x0750 +#define RSIF_CONFIG_REG_ADDR 0x0010 +#define RSIF_RAM_DBG_REG0_ADDR 0x0048 +#define RGMII_REG_0_ADDR 0x07e0 +#define CFG_LINK_AGGR_RESUME_0_ADDR 0x07c8 +#define DEBUG_REG_ADDR 0x0700 +#define CFG_BYPASS_ADDR 0x0294 +#define CLE_BYPASS_REG0_0_ADDR 0x0490 +#define CLE_BYPASS_REG1_0_ADDR 0x0494 +#define CFG_RSIF_FPBUFF_TIMEOUT_EN BIT(31) +#define RESUME_TX BIT(0) +#define CFG_SPEED_1250 BIT(24) +#define TX_PORT0 BIT(0) +#define CFG_BYPASS_UNISEC_TX BIT(2) +#define CFG_BYPASS_UNISEC_RX BIT(1) +#define CFG_CLE_BYPASS_EN0 BIT(31) +#define CFG_TXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 29, 3) + +#define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) +#define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) +#define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) +#define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) +#define CFG_WAITASYNCRD_SET(dst, val) xgene_set_bits(dst, val, 0, 16) +#define CFG_CLE_DSTQID0(val) (val & GENMASK(11, 0)) +#define CFG_CLE_FPSEL0(val) ((val << 16) & GENMASK(19, 16)) +#define ICM_CONFIG0_REG_0_ADDR 0x0400 +#define ICM_CONFIG2_REG_0_ADDR 0x0410 +#define RX_DV_GATE_REG_0_ADDR 0x05fc +#define TX_DV_GATE_EN0 BIT(2) +#define RX_DV_GATE_EN0 BIT(1) +#define RESUME_RX0 BIT(0) +#define ENET_CFGSSQMIWQASSOC_ADDR 0xe0 +#define ENET_CFGSSQMIFPQASSOC_ADDR 0xdc +#define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR 0xf0 +#define ENET_CFGSSQMIQMLITEWQASSOC_ADDR 0xf4 +#define ENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70 +#define ENET_BLOCK_MEM_RDY_ADDR 0x74 +#define MAC_CONFIG_1_ADDR 0x00 +#define MAC_CONFIG_2_ADDR 0x04 +#define MAX_FRAME_LEN_ADDR 0x10 +#define INTERFACE_CONTROL_ADDR 0x38 +#define STATION_ADDR0_ADDR 0x40 +#define STATION_ADDR1_ADDR 0x44 +#define PHY_ADDR_SET(dst, val) xgene_set_bits(dst, val, 8, 5) +#define REG_ADDR_SET(dst, val) xgene_set_bits(dst, val, 0, 5) +#define ENET_INTERFACE_MODE2_SET(dst, val) xgene_set_bits(dst, val, 8, 2) +#define MGMT_CLOCK_SEL_SET(dst, val) xgene_set_bits(dst, val, 0, 3) +#define SOFT_RESET1 BIT(31) +#define TX_EN BIT(0) +#define RX_EN BIT(2) +#define ENET_LHD_MODE BIT(25) +#define ENET_GHD_MODE BIT(26) +#define FULL_DUPLEX2 BIT(0) +#define SCAN_AUTO_INCR BIT(5) +#define TBYT_ADDR 0x38 +#define TPKT_ADDR 0x39 +#define TDRP_ADDR 0x45 +#define TFCS_ADDR 0x47 +#define TUND_ADDR 0x4a + +#define TSO_IPPROTO_TCP 1 + +#define USERINFO_POS 0 +#define USERINFO_LEN 32 +#define FPQNUM_POS 32 +#define FPQNUM_LEN 12 +#define LERR_POS 60 +#define LERR_LEN 3 +#define STASH_POS 52 +#define STASH_LEN 2 +#define BUFDATALEN_POS 48 +#define BUFDATALEN_LEN 12 +#define DATAADDR_POS 0 +#define DATAADDR_LEN 42 +#define COHERENT_POS 63 +#define HENQNUM_POS 48 +#define HENQNUM_LEN 12 +#define TYPESEL_POS 44 +#define TYPESEL_LEN 4 +#define ETHHDR_POS 12 +#define ETHHDR_LEN 8 +#define IC_POS 35 /* Insert CRC */ +#define TCPHDR_POS 0 +#define TCPHDR_LEN 6 +#define IPHDR_POS 6 +#define IPHDR_LEN 6 +#define EC_POS 22 /* Enable checksum */ +#define EC_LEN 1 +#define IS_POS 24 /* IP protocol select */ +#define IS_LEN 1 +#define TYPE_ETH_WORK_MESSAGE_POS 44 + +struct xgene_enet_raw_desc { + __le64 m0; + __le64 m1; + __le64 m2; + __le64 m3; +}; + +struct xgene_enet_raw_desc16 { + __le64 m0; + __le64 m1; +}; + +static inline void xgene_enet_mark_desc_slot_empty(void *desc_slot_ptr) +{ + __le64 *desc_slot = desc_slot_ptr; + + desc_slot[EMPTY_SLOT_INDEX] = cpu_to_le64(EMPTY_SLOT); +} + +static inline bool xgene_enet_is_desc_slot_empty(void *desc_slot_ptr) +{ + __le64 *desc_slot = desc_slot_ptr; + + return (desc_slot[EMPTY_SLOT_INDEX] == cpu_to_le64(EMPTY_SLOT)); +} + +enum xgene_enet_ring_cfgsize { + RING_CFGSIZE_512B, + RING_CFGSIZE_2KB, + RING_CFGSIZE_16KB, + RING_CFGSIZE_64KB, + RING_CFGSIZE_512KB, + RING_CFGSIZE_INVALID +}; + +enum xgene_enet_ring_type { + RING_DISABLED, + RING_REGULAR, + RING_BUFPOOL +}; + +enum xgene_ring_owner { + RING_OWNER_ETH0, + RING_OWNER_CPU = 15, + RING_OWNER_INVALID +}; + +enum xgene_enet_ring_bufnum { + RING_BUFNUM_REGULAR = 0x0, + RING_BUFNUM_BUFPOOL = 0x20, + RING_BUFNUM_INVALID +}; + +enum xgene_enet_cmd { + XGENE_ENET_WR_CMD = BIT(31), + XGENE_ENET_RD_CMD = BIT(30) +}; + +enum xgene_enet_err_code { + HBF_READ_DATA = 3, + HBF_LL_READ = 4, + BAD_WORK_MSG = 6, + BUFPOOL_TIMEOUT = 15, + INGRESS_CRC = 16, + INGRESS_CHECKSUM = 17, + INGRESS_TRUNC_FRAME = 18, + INGRESS_PKT_LEN = 19, + INGRESS_PKT_UNDER = 20, + INGRESS_FIFO_OVERRUN = 21, + INGRESS_CHECKSUM_COMPUTE = 26, + ERR_CODE_INVALID +}; + +static inline enum xgene_ring_owner xgene_enet_ring_owner(u16 id) +{ + return (id & RING_OWNER_MASK) >> 6; +} + +static inline u8 xgene_enet_ring_bufnum(u16 id) +{ + return id & RING_BUFNUM_MASK; +} + +static inline bool xgene_enet_is_bufpool(u16 id) +{ + return ((id & RING_BUFNUM_MASK) >= 0x20) ? true : false; +} + +static inline u16 xgene_enet_get_numslots(u16 id, u32 size) +{ + bool is_bufpool = xgene_enet_is_bufpool(id); + + return (is_bufpool) ? size / BUFPOOL_DESC_SIZE : + size / WORK_DESC_SIZE; +} + +struct xgene_enet_desc_ring *xgene_enet_setup_ring( + struct xgene_enet_desc_ring *ring); +void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring); +void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, + struct xgene_enet_pdata *pdata, + enum xgene_enet_err_code status); + +int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); +void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); +bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); + +extern struct xgene_mac_ops xgene_gmac_ops; +extern struct xgene_port_ops xgene_gport_ops; + +#endif /* __XGENE_ENET_HW_H__ */ --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -0,0 +1,1000 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Ravi Patel + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" +#include "xgene_enet_sgmac.h" +#include "xgene_enet_xgmac.h" + +static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) +{ + struct xgene_enet_raw_desc16 *raw_desc; + int i; + + for (i = 0; i < buf_pool->slots; i++) { + raw_desc = &buf_pool->raw_desc16[i]; + + /* Hardware expects descriptor in little endian format */ + raw_desc->m0 = cpu_to_le64(i | + SET_VAL(FPQNUM, buf_pool->dst_ring_num) | + SET_VAL(STASH, 3)); + } +} + +static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, + u32 nbuf) +{ + struct sk_buff *skb; + struct xgene_enet_raw_desc16 *raw_desc; + struct net_device *ndev; + struct device *dev; + dma_addr_t dma_addr; + u32 tail = buf_pool->tail; + u32 slots = buf_pool->slots - 1; + u16 bufdatalen, len; + int i; + + ndev = buf_pool->ndev; + dev = ndev_to_dev(buf_pool->ndev); + bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); + len = XGENE_ENET_MAX_MTU; + + for (i = 0; i < nbuf; i++) { + raw_desc = &buf_pool->raw_desc16[tail]; + + skb = netdev_alloc_skb_ip_align(ndev, len); + if (unlikely(!skb)) + return -ENOMEM; + buf_pool->rx_skb[tail] = skb; + + dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + netdev_err(ndev, "DMA mapping error\n"); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | + SET_VAL(BUFDATALEN, bufdatalen) | + SET_BIT(COHERENT)); + tail = (tail + 1) & slots; + } + + iowrite32(nbuf, buf_pool->cmd); + buf_pool->tail = tail; + + return 0; +} + +static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + return ((u16)pdata->rm << 10) | ring->num; +} + +static u8 xgene_enet_hdr_len(const void *data) +{ + const struct ethhdr *eth = data; + + return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN; +} + +static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) +{ + u32 __iomem *cmd_base = ring->cmd_base; + u32 ring_state, num_msgs; + + ring_state = ioread32(&cmd_base[1]); + num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN); + + return num_msgs >> NUMMSGSINQ_POS; +} + +static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) +{ + struct xgene_enet_raw_desc16 *raw_desc; + u32 slots = buf_pool->slots - 1; + u32 tail = buf_pool->tail; + u32 userinfo; + int i, len; + + len = xgene_enet_ring_len(buf_pool); + for (i = 0; i < len; i++) { + tail = (tail - 1) & slots; + raw_desc = &buf_pool->raw_desc16[tail]; + + /* Hardware stores descriptor in little endian format */ + userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); + } + + iowrite32(-len, buf_pool->cmd); + buf_pool->tail = tail; +} + +static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) +{ + struct xgene_enet_desc_ring *rx_ring = data; + + if (napi_schedule_prep(&rx_ring->napi)) { + disable_irq_nosync(irq); + __napi_schedule(&rx_ring->napi); + } + + return IRQ_HANDLED; +} + +static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, + struct xgene_enet_raw_desc *raw_desc) +{ + struct sk_buff *skb; + struct device *dev; + u16 skb_index; + u8 status; + int ret = 0; + + skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + skb = cp_ring->cp_skb[skb_index]; + + dev = ndev_to_dev(cp_ring->ndev); + dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), + GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)), + DMA_TO_DEVICE); + + /* Checking for error */ + status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); + if (unlikely(status > 2)) { + xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev), + status); + ret = -EIO; + } + + if (likely(skb)) { + dev_kfree_skb_any(skb); + } else { + netdev_err(cp_ring->ndev, "completion skb is NULL\n"); + ret = -EIO; + } + + return ret; +} + +static u64 xgene_enet_work_msg(struct sk_buff *skb) +{ + struct iphdr *iph; + u8 l3hlen, l4hlen = 0; + u8 csum_enable = 0; + u8 proto = 0; + u8 ethhdr; + u64 hopinfo; + + if (unlikely(skb->protocol != htons(ETH_P_IP)) && + unlikely(skb->protocol != htons(ETH_P_8021Q))) + goto out; + + if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM))) + goto out; + + iph = ip_hdr(skb); + if (unlikely(ip_is_fragment(iph))) + goto out; + + if (likely(iph->protocol == IPPROTO_TCP)) { + l4hlen = tcp_hdrlen(skb) >> 2; + csum_enable = 1; + proto = TSO_IPPROTO_TCP; + } else if (iph->protocol == IPPROTO_UDP) { + l4hlen = UDP_HDR_SIZE; + csum_enable = 1; + } +out: + l3hlen = ip_hdrlen(skb) >> 2; + ethhdr = xgene_enet_hdr_len(skb->data); + hopinfo = SET_VAL(TCPHDR, l4hlen) | + SET_VAL(IPHDR, l3hlen) | + SET_VAL(ETHHDR, ethhdr) | + SET_VAL(EC, csum_enable) | + SET_VAL(IS, proto) | + SET_BIT(IC) | + SET_BIT(TYPE_ETH_WORK_MESSAGE); + + return hopinfo; +} + +static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, + struct sk_buff *skb) +{ + struct device *dev = ndev_to_dev(tx_ring->ndev); + struct xgene_enet_raw_desc *raw_desc; + dma_addr_t dma_addr; + u16 tail = tx_ring->tail; + u64 hopinfo; + + raw_desc = &tx_ring->raw_desc[tail]; + memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc)); + + dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + netdev_err(tx_ring->ndev, "DMA mapping error\n"); + return -EINVAL; + } + + /* Hardware expects descriptor in little endian format */ + raw_desc->m0 = cpu_to_le64(tail); + raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | + SET_VAL(BUFDATALEN, skb->len) | + SET_BIT(COHERENT)); + hopinfo = xgene_enet_work_msg(skb); + raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) | + hopinfo); + tx_ring->cp_ring->cp_skb[tail] = skb; + + return 0; +} + +static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; + struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; + u32 tx_level, cq_level; + + tx_level = xgene_enet_ring_len(tx_ring); + cq_level = xgene_enet_ring_len(cp_ring); + if (unlikely(tx_level > pdata->tx_qcnt_hi || + cq_level > pdata->cp_qcnt_hi)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + if (xgene_enet_setup_tx_desc(tx_ring, skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + iowrite32(1, tx_ring->cmd); + skb_tx_timestamp(skb); + tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); + + pdata->stats.tx_packets++; + pdata->stats.tx_bytes += skb->len; + + return NETDEV_TX_OK; +} + +static void xgene_enet_skip_csum(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + + if (!ip_is_fragment(iph) || + (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } +} + +static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, + struct xgene_enet_raw_desc *raw_desc) +{ + struct net_device *ndev; + struct xgene_enet_pdata *pdata; + struct device *dev; + struct xgene_enet_desc_ring *buf_pool; + u32 datalen, skb_index; + struct sk_buff *skb; + u8 status; + int ret = 0; + + ndev = rx_ring->ndev; + pdata = netdev_priv(ndev); + dev = ndev_to_dev(rx_ring->ndev); + buf_pool = rx_ring->buf_pool; + + dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), + XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); + skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); + skb = buf_pool->rx_skb[skb_index]; + + /* checking for error */ + status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); + if (unlikely(status > 2)) { + dev_kfree_skb_any(skb); + xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), + status); + pdata->stats.rx_dropped++; + ret = -EIO; + goto out; + } + + /* strip off CRC as HW isn't doing this */ + datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); + datalen -= 4; + prefetch(skb->data - NET_IP_ALIGN); + skb_put(skb, datalen); + + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, ndev); + if (likely((ndev->features & NETIF_F_IP_CSUM) && + skb->protocol == htons(ETH_P_IP))) { + xgene_enet_skip_csum(skb); + } + + pdata->stats.rx_packets++; + pdata->stats.rx_bytes += datalen; + napi_gro_receive(&rx_ring->napi, skb); +out: + if (--rx_ring->nbufpool == 0) { + ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); + rx_ring->nbufpool = NUM_BUFPOOL; + } + + return ret; +} + +static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) +{ + return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false; +} + +static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, + int budget) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + struct xgene_enet_raw_desc *raw_desc; + u16 head = ring->head; + u16 slots = ring->slots - 1; + int ret, count = 0; + + do { + raw_desc = &ring->raw_desc[head]; + if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) + break; + + if (is_rx_desc(raw_desc)) + ret = xgene_enet_rx_frame(ring, raw_desc); + else + ret = xgene_enet_tx_completion(ring, raw_desc); + xgene_enet_mark_desc_slot_empty(raw_desc); + + head = (head + 1) & slots; + count++; + + if (ret) + break; + } while (--budget); + + if (likely(count)) { + iowrite32(-count, ring->cmd); + ring->head = head; + + if (netif_queue_stopped(ring->ndev)) { + if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low) + netif_wake_queue(ring->ndev); + } + } + + return count; +} + +static int xgene_enet_napi(struct napi_struct *napi, const int budget) +{ + struct xgene_enet_desc_ring *ring; + int processed; + + ring = container_of(napi, struct xgene_enet_desc_ring, napi); + processed = xgene_enet_process_ring(ring, budget); + + if (processed != budget) { + napi_complete(napi); + enable_irq(ring->irq); + } + + return processed; +} + +static void xgene_enet_timeout(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + + pdata->mac_ops->reset(pdata); +} + +static int xgene_enet_register_irq(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + int ret; + + ret = devm_request_irq(dev, pdata->rx_ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ndev->name, pdata->rx_ring); + if (ret) { + netdev_err(ndev, "rx%d interrupt request failed\n", + pdata->rx_ring->irq); + } + + return ret; +} + +static void xgene_enet_free_irq(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata; + struct device *dev; + + pdata = netdev_priv(ndev); + dev = ndev_to_dev(ndev); + devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); +} + +static int xgene_enet_open(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_mac_ops *mac_ops = pdata->mac_ops; + int ret; + + mac_ops->tx_enable(pdata); + mac_ops->rx_enable(pdata); + + ret = xgene_enet_register_irq(ndev); + if (ret) + return ret; + napi_enable(&pdata->rx_ring->napi); + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + phy_start(pdata->phy_dev); + else + schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); + + netif_start_queue(ndev); + + return ret; +} + +static int xgene_enet_close(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_mac_ops *mac_ops = pdata->mac_ops; + + netif_stop_queue(ndev); + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + phy_stop(pdata->phy_dev); + else + cancel_delayed_work_sync(&pdata->link_work); + + napi_disable(&pdata->rx_ring->napi); + xgene_enet_free_irq(ndev); + xgene_enet_process_ring(pdata->rx_ring, -1); + + mac_ops->tx_disable(pdata); + mac_ops->rx_disable(pdata); + + return 0; +} + +static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata; + struct device *dev; + + pdata = netdev_priv(ring->ndev); + dev = ndev_to_dev(ring->ndev); + + xgene_enet_clear_ring(ring); + dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); +} + +static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_desc_ring *buf_pool; + + if (pdata->tx_ring) { + xgene_enet_delete_ring(pdata->tx_ring); + pdata->tx_ring = NULL; + } + + if (pdata->rx_ring) { + buf_pool = pdata->rx_ring->buf_pool; + xgene_enet_delete_bufpool(buf_pool); + xgene_enet_delete_ring(buf_pool); + xgene_enet_delete_ring(pdata->rx_ring); + pdata->rx_ring = NULL; + } +} + +static int xgene_enet_get_ring_size(struct device *dev, + enum xgene_enet_ring_cfgsize cfgsize) +{ + int size = -EINVAL; + + switch (cfgsize) { + case RING_CFGSIZE_512B: + size = 0x200; + break; + case RING_CFGSIZE_2KB: + size = 0x800; + break; + case RING_CFGSIZE_16KB: + size = 0x4000; + break; + case RING_CFGSIZE_64KB: + size = 0x10000; + break; + case RING_CFGSIZE_512KB: + size = 0x80000; + break; + default: + dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize); + break; + } + + return size; +} + +static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) +{ + struct device *dev; + + if (!ring) + return; + + dev = ndev_to_dev(ring->ndev); + + if (ring->desc_addr) { + xgene_enet_clear_ring(ring); + dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); + } + devm_kfree(dev, ring); +} + +static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) +{ + struct device *dev = &pdata->pdev->dev; + struct xgene_enet_desc_ring *ring; + + ring = pdata->tx_ring; + if (ring && ring->cp_ring && ring->cp_ring->cp_skb) + devm_kfree(dev, ring->cp_ring->cp_skb); + xgene_enet_free_desc_ring(ring); + + ring = pdata->rx_ring; + if (ring && ring->buf_pool && ring->buf_pool->rx_skb) + devm_kfree(dev, ring->buf_pool->rx_skb); + xgene_enet_free_desc_ring(ring->buf_pool); + xgene_enet_free_desc_ring(ring); +} + +static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( + struct net_device *ndev, u32 ring_num, + enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) +{ + struct xgene_enet_desc_ring *ring; + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + u32 size; + + ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), + GFP_KERNEL); + if (!ring) + return NULL; + + ring->ndev = ndev; + ring->num = ring_num; + ring->cfgsize = cfgsize; + ring->id = ring_id; + + size = xgene_enet_get_ring_size(dev, cfgsize); + ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, + GFP_KERNEL); + if (!ring->desc_addr) { + devm_kfree(dev, ring); + return NULL; + } + ring->size = size; + + ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6); + ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR; + ring = xgene_enet_setup_ring(ring); + netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", + ring->num, ring->size, ring->id, ring->slots); + + return ring; +} + +static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum) +{ + return (owner << 6) | (bufnum & GENMASK(5, 0)); +} + +static int xgene_enet_create_desc_rings(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct device *dev = ndev_to_dev(ndev); + struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; + struct xgene_enet_desc_ring *buf_pool = NULL; + u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM; + u8 bp_bufnum = START_BP_BUFNUM; + u16 ring_id, ring_num = START_RING_NUM; + int ret; + + /* allocate rx descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); + rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, ring_id); + if (!rx_ring) { + ret = -ENOMEM; + goto err; + } + + /* allocate buffer pool for receiving packets */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++); + buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_2KB, ring_id); + if (!buf_pool) { + ret = -ENOMEM; + goto err; + } + + rx_ring->nbufpool = NUM_BUFPOOL; + rx_ring->buf_pool = buf_pool; + rx_ring->irq = pdata->rx_irq; + buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, + sizeof(struct sk_buff *), GFP_KERNEL); + if (!buf_pool->rx_skb) { + ret = -ENOMEM; + goto err; + } + + buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); + rx_ring->buf_pool = buf_pool; + pdata->rx_ring = rx_ring; + + /* allocate tx descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++); + tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, ring_id); + if (!tx_ring) { + ret = -ENOMEM; + goto err; + } + pdata->tx_ring = tx_ring; + + cp_ring = pdata->rx_ring; + cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, + sizeof(struct sk_buff *), GFP_KERNEL); + if (!cp_ring->cp_skb) { + ret = -ENOMEM; + goto err; + } + pdata->tx_ring->cp_ring = cp_ring; + pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + + pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; + pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; + pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; + + return 0; + +err: + xgene_enet_free_desc_rings(pdata); + return ret; +} + +static struct rtnl_link_stats64 *xgene_enet_get_stats64( + struct net_device *ndev, + struct rtnl_link_stats64 *storage) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct rtnl_link_stats64 *stats = &pdata->stats; + + stats->rx_errors += stats->rx_length_errors + + stats->rx_crc_errors + + stats->rx_frame_errors + + stats->rx_fifo_errors; + memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); + + return storage; +} + +static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + int ret; + + ret = eth_mac_addr(ndev, addr); + if (ret) + return ret; + pdata->mac_ops->set_mac_addr(pdata); + + return ret; +} + +static const struct net_device_ops xgene_ndev_ops = { + .ndo_open = xgene_enet_open, + .ndo_stop = xgene_enet_close, + .ndo_start_xmit = xgene_enet_start_xmit, + .ndo_tx_timeout = xgene_enet_timeout, + .ndo_get_stats64 = xgene_enet_get_stats64, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = xgene_enet_set_mac_address, +}; + +static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) +{ + struct platform_device *pdev; + struct net_device *ndev; + struct device *dev; + struct resource *res; + void __iomem *base_addr; + const char *mac; + int ret; + + pdev = pdata->pdev; + dev = &pdev->dev; + ndev = pdata->ndev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "enet_csr"); + if (!res) { + dev_err(dev, "Resource enet_csr not defined\n"); + return -ENODEV; + } + pdata->base_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(pdata->base_addr)) { + dev_err(dev, "Unable to retrieve ENET Port CSR region\n"); + return PTR_ERR(pdata->base_addr); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_csr"); + if (!res) { + dev_err(dev, "Resource ring_csr not defined\n"); + return -ENODEV; + } + pdata->ring_csr_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(pdata->ring_csr_addr)) { + dev_err(dev, "Unable to retrieve ENET Ring CSR region\n"); + return PTR_ERR(pdata->ring_csr_addr); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ring_cmd"); + if (!res) { + dev_err(dev, "Resource ring_cmd not defined\n"); + return -ENODEV; + } + pdata->ring_cmd_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(pdata->ring_cmd_addr)) { + dev_err(dev, "Unable to retrieve ENET Ring command region\n"); + return PTR_ERR(pdata->ring_cmd_addr); + } + + ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET Rx IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->rx_irq = ret; + + mac = of_get_mac_address(dev->of_node); + if (mac) + memcpy(ndev->dev_addr, mac, ndev->addr_len); + else + eth_hw_addr_random(ndev); + memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); + + pdata->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (pdata->phy_mode < 0) { + dev_err(dev, "Unable to get phy-connection-type\n"); + return pdata->phy_mode; + } + if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII && + pdata->phy_mode != PHY_INTERFACE_MODE_SGMII && + pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) { + dev_err(dev, "Incorrect phy-connection-type specified\n"); + return -ENODEV; + } + + pdata->clk = devm_clk_get(&pdev->dev, NULL); + ret = IS_ERR(pdata->clk); + if (IS_ERR(pdata->clk)) { + dev_err(&pdev->dev, "can't get clock\n"); + ret = PTR_ERR(pdata->clk); + return ret; + } + + base_addr = pdata->base_addr; + pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; + pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; + pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || + pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { + pdata->mcx_mac_addr = base_addr + BLOCK_ETH_MAC_OFFSET; + pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET; + } else { + pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET; + pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET; + } + pdata->rx_buff_cnt = NUM_PKT_BUF; + + return 0; +} + +static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + struct xgene_enet_desc_ring *buf_pool; + u16 dst_ring_num; + int ret; + + ret = pdata->port_ops->reset(pdata); + if (ret) + return ret; + + ret = xgene_enet_create_desc_rings(ndev); + if (ret) { + netdev_err(ndev, "Error in ring configuration\n"); + return ret; + } + + /* setup buffer pool */ + buf_pool = pdata->rx_ring->buf_pool; + xgene_enet_init_bufpool(buf_pool); + ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); + if (ret) { + xgene_enet_delete_desc_rings(pdata); + return ret; + } + + dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); + pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); + pdata->mac_ops->init(pdata); + + return ret; +} + +static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) +{ + switch (pdata->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + pdata->mac_ops = &xgene_gmac_ops; + pdata->port_ops = &xgene_gport_ops; + pdata->rm = RM3; + break; + case PHY_INTERFACE_MODE_SGMII: + pdata->mac_ops = &xgene_sgmac_ops; + pdata->port_ops = &xgene_sgport_ops; + pdata->rm = RM1; + break; + default: + pdata->mac_ops = &xgene_xgmac_ops; + pdata->port_ops = &xgene_xgport_ops; + pdata->rm = RM0; + break; + } +} + +static int xgene_enet_probe(struct platform_device *pdev) +{ + struct net_device *ndev; + struct xgene_enet_pdata *pdata; + struct device *dev = &pdev->dev; + struct napi_struct *napi; + struct xgene_mac_ops *mac_ops; + int ret; + + ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); + if (!ndev) + return -ENOMEM; + + pdata = netdev_priv(ndev); + + pdata->pdev = pdev; + pdata->ndev = ndev; + SET_NETDEV_DEV(ndev, dev); + platform_set_drvdata(pdev, pdata); + ndev->netdev_ops = &xgene_ndev_ops; + xgene_enet_set_ethtool_ops(ndev); + ndev->features |= NETIF_F_IP_CSUM | + NETIF_F_GSO | + NETIF_F_GRO; + + ret = xgene_enet_get_resources(pdata); + if (ret) + goto err; + + xgene_enet_setup_ops(pdata); + + ret = register_netdev(ndev); + if (ret) { + netdev_err(ndev, "Failed to register netdev\n"); + goto err; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret) { + netdev_err(ndev, "No usable DMA configuration\n"); + goto err; + } + + ret = xgene_enet_init_hw(pdata); + if (ret) + goto err; + + napi = &pdata->rx_ring->napi; + netif_napi_add(ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + mac_ops = pdata->mac_ops; + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + ret = xgene_enet_mdio_config(pdata); + else + INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); + + return ret; +err: + unregister_netdev(ndev); + free_netdev(ndev); + return ret; +} + +static int xgene_enet_remove(struct platform_device *pdev) +{ + struct xgene_enet_pdata *pdata; + struct xgene_mac_ops *mac_ops; + struct net_device *ndev; + + pdata = platform_get_drvdata(pdev); + mac_ops = pdata->mac_ops; + ndev = pdata->ndev; + + mac_ops->rx_disable(pdata); + mac_ops->tx_disable(pdata); + + netif_napi_del(&pdata->rx_ring->napi); + xgene_enet_mdio_remove(pdata); + xgene_enet_delete_desc_rings(pdata); + unregister_netdev(ndev); + pdata->port_ops->shutdown(pdata); + free_netdev(ndev); + + return 0; +} + +static struct of_device_id xgene_enet_match[] = { + {.compatible = "apm,xgene-enet",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, xgene_enet_match); + +static struct platform_driver xgene_enet_driver = { + .driver = { + .name = "xgene-enet", + .of_match_table = xgene_enet_match, + }, + .probe = xgene_enet_probe, + .remove = xgene_enet_remove, +}; + +module_platform_driver(xgene_enet_driver); + +MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver"); +MODULE_VERSION(XGENE_DRV_VERSION); +MODULE_AUTHOR("Iyappan Subramanian "); +MODULE_AUTHOR("Keyur Chudgar "); +MODULE_LICENSE("GPL"); --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -0,0 +1,167 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Ravi Patel + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_MAIN_H__ +#define __XGENE_ENET_MAIN_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "xgene_enet_hw.h" + +#define XGENE_DRV_VERSION "v1.0" +#define XGENE_ENET_MAX_MTU 1536 +#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) +#define NUM_PKT_BUF 64 +#define NUM_BUFPOOL 32 +#define START_ETH_BUFNUM 2 +#define START_BP_BUFNUM 0x22 +#define START_RING_NUM 8 + +#define PHY_POLL_LINK_ON (10 * HZ) +#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) + +/* software context of a descriptor ring */ +struct xgene_enet_desc_ring { + struct net_device *ndev; + u16 id; + u16 num; + u16 head; + u16 tail; + u16 slots; + u16 irq; + u32 size; + u32 state[NUM_RING_CONFIG]; + void __iomem *cmd_base; + void __iomem *cmd; + dma_addr_t dma; + u16 dst_ring_num; + u8 nbufpool; + struct sk_buff *(*rx_skb); + struct sk_buff *(*cp_skb); + enum xgene_enet_ring_cfgsize cfgsize; + struct xgene_enet_desc_ring *cp_ring; + struct xgene_enet_desc_ring *buf_pool; + struct napi_struct napi; + union { + void *desc_addr; + struct xgene_enet_raw_desc *raw_desc; + struct xgene_enet_raw_desc16 *raw_desc16; + }; +}; + +struct xgene_mac_ops { + void (*init)(struct xgene_enet_pdata *pdata); + void (*reset)(struct xgene_enet_pdata *pdata); + void (*tx_enable)(struct xgene_enet_pdata *pdata); + void (*rx_enable)(struct xgene_enet_pdata *pdata); + void (*tx_disable)(struct xgene_enet_pdata *pdata); + void (*rx_disable)(struct xgene_enet_pdata *pdata); + void (*set_mac_addr)(struct xgene_enet_pdata *pdata); + void (*link_state)(struct work_struct *work); +}; + +struct xgene_port_ops { + int (*reset)(struct xgene_enet_pdata *pdata); + void (*cle_bypass)(struct xgene_enet_pdata *pdata, + u32 dst_ring_num, u16 bufpool_id); + void (*shutdown)(struct xgene_enet_pdata *pdata); +}; + +/* ethernet private data */ +struct xgene_enet_pdata { + struct net_device *ndev; + struct mii_bus *mdio_bus; + struct phy_device *phy_dev; + int phy_speed; + struct clk *clk; + struct platform_device *pdev; + struct xgene_enet_desc_ring *tx_ring; + struct xgene_enet_desc_ring *rx_ring; + char *dev_name; + u32 rx_buff_cnt; + u32 tx_qcnt_hi; + u32 cp_qcnt_hi; + u32 cp_qcnt_low; + u32 rx_irq; + void __iomem *eth_csr_addr; + void __iomem *eth_ring_if_addr; + void __iomem *eth_diag_csr_addr; + void __iomem *mcx_mac_addr; + void __iomem *mcx_mac_csr_addr; + void __iomem *base_addr; + void __iomem *ring_csr_addr; + void __iomem *ring_cmd_addr; + int phy_mode; + enum xgene_enet_rm rm; + struct rtnl_link_stats64 stats; + struct xgene_mac_ops *mac_ops; + struct xgene_port_ops *port_ops; + struct delayed_work link_work; +}; + +struct xgene_indirect_ctl { + void __iomem *addr; + void __iomem *ctl; + void __iomem *cmd; + void __iomem *cmd_done; +}; + +/* Set the specified value into a bit-field defined by its starting position + * and length within a single u64. + */ +static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val) +{ + return (val & ((1ULL << len) - 1)) << pos; +} + +#define SET_VAL(field, val) \ + xgene_enet_set_field_value(field ## _POS, field ## _LEN, val) + +#define SET_BIT(field) \ + xgene_enet_set_field_value(field ## _POS, 1, 1) + +/* Get the value from a bit-field defined by its starting position + * and length within the specified u64. + */ +static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) +{ + return (src >> pos) & ((1ULL << len) - 1); +} + +#define GET_VAL(field, src) \ + xgene_enet_get_field_value(field ## _POS, field ## _LEN, src) + +static inline struct device *ndev_to_dev(struct net_device *ndev) +{ + return ndev->dev.parent; +} + +void xgene_enet_set_ethtool_ops(struct net_device *netdev); + +#endif /* __XGENE_ENET_MAIN_H__ */ --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -0,0 +1,392 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" +#include "xgene_enet_sgmac.h" + +static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val) +{ + iowrite32(val, p->eth_csr_addr + offset); +} + +static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p, + u32 offset, u32 val) +{ + iowrite32(val, p->eth_ring_if_addr + offset); +} + +static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p, + u32 offset, u32 val) +{ + iowrite32(val, p->eth_diag_csr_addr + offset); +} + +static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl, + u32 wr_addr, u32 wr_data) +{ + int i; + + iowrite32(wr_addr, ctl->addr); + iowrite32(wr_data, ctl->ctl); + iowrite32(XGENE_ENET_WR_CMD, ctl->cmd); + + /* wait for write command to complete */ + for (i = 0; i < 10; i++) { + if (ioread32(ctl->cmd_done)) { + iowrite32(0, ctl->cmd); + return true; + } + udelay(1); + } + + return false; +} + +static void xgene_enet_wr_mac(struct xgene_enet_pdata *p, + u32 wr_addr, u32 wr_data) +{ + struct xgene_indirect_ctl ctl = { + .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET, + .ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET, + .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET, + .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET + }; + + if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data)) + netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr); +} + +static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset) +{ + return ioread32(p->eth_csr_addr + offset); +} + +static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset) +{ + return ioread32(p->eth_diag_csr_addr + offset); +} + +static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr) +{ + u32 rd_data; + int i; + + iowrite32(rd_addr, ctl->addr); + iowrite32(XGENE_ENET_RD_CMD, ctl->cmd); + + /* wait for read command to complete */ + for (i = 0; i < 10; i++) { + if (ioread32(ctl->cmd_done)) { + rd_data = ioread32(ctl->ctl); + iowrite32(0, ctl->cmd); + + return rd_data; + } + udelay(1); + } + + pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr); + + return 0; +} + +static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr) +{ + struct xgene_indirect_ctl ctl = { + .addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET, + .ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET, + .cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET, + .cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET + }; + + return xgene_enet_rd_indirect(&ctl, rd_addr); +} + +static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) +{ + struct net_device *ndev = p->ndev; + u32 data; + int i = 0; + + xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); + do { + usleep_range(100, 110); + data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); + if (data == ~0U) + return 0; + } while (++i < 10); + + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; +} + +static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) +{ + u32 val = 0xffffffff; + + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val); + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val); +} + +static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id, + u32 reg, u16 data) +{ + u32 addr, wr_data, done; + int i; + + addr = PHY_ADDR(phy_id) | REG_ADDR(reg); + xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr); + + wr_data = PHY_CONTROL(data); + xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data); + + for (i = 0; i < 10; i++) { + done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR); + if (!(done & BUSY_MASK)) + return; + usleep_range(10, 20); + } + + netdev_err(p->ndev, "MII_MGMT write failed\n"); +} + +static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg) +{ + u32 addr, data, done; + int i; + + addr = PHY_ADDR(phy_id) | REG_ADDR(reg); + xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr); + xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); + + for (i = 0; i < 10; i++) { + done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR); + if (!(done & BUSY_MASK)) { + data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR); + xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0); + + return data; + } + usleep_range(10, 20); + } + + netdev_err(p->ndev, "MII_MGMT read failed\n"); + + return 0; +} + +static void xgene_sgmac_reset(struct xgene_enet_pdata *p) +{ + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1); + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0); +} + +static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p) +{ + u32 addr0, addr1; + u8 *dev_addr = p->ndev->dev_addr; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0); + + addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR); + addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16); + xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1); +} + +static u32 xgene_enet_link_status(struct xgene_enet_pdata *p) +{ + u32 data; + + data = xgene_mii_phy_read(p, INT_PHY_ADDR, + SGMII_BASE_PAGE_ABILITY_ADDR >> 2); + + return data & LINK_UP; +} + +static void xgene_sgmac_init(struct xgene_enet_pdata *p) +{ + u32 data, loop = 10; + + xgene_sgmac_reset(p); + + /* Enable auto-negotiation */ + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); + + while (loop--) { + data = xgene_mii_phy_read(p, INT_PHY_ADDR, + SGMII_STATUS_ADDR >> 2); + if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS)) + break; + usleep_range(10, 20); + } + if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS)) + netdev_err(p->ndev, "Auto-negotiation failed\n"); + + data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR); + ENET_INTERFACE_MODE2_SET(&data, 2); + xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2); + xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE); + + data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR); + data |= MPA_IDLE_WITH_QMI_EMPTY; + xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data); + + xgene_sgmac_set_mac_addr(p); + + data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR); + data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; + xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data); + + /* Adjust MDC clock frequency */ + data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR); + MGMT_CLOCK_SEL_SET(&data, 7); + xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data); + + /* Enable drop if bufpool not available */ + data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR); + data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data); + + /* Rtype should be copied from FP */ + xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0); + + /* Bypass traffic gating */ + xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); + xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX); + xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0); +} + +static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) +{ + u32 data; + + data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR); + + if (set) + data |= bits; + else + data &= ~bits; + + xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data); +} + +static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, RX_EN, true); +} + +static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, TX_EN, true); +} + +static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, RX_EN, false); +} + +static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) +{ + xgene_sgmac_rxtx(p, TX_EN, false); +} + +static int xgene_enet_reset(struct xgene_enet_pdata *p) +{ + if (!xgene_ring_mgr_init(p)) + return -ENODEV; + + clk_prepare_enable(p->clk); + clk_disable_unprepare(p->clk); + clk_prepare_enable(p->clk); + + xgene_enet_ecc_init(p); + xgene_enet_config_ring_if_assoc(p); + + return 0; +} + +static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, + u32 dst_ring_num, u16 bufpool_id) +{ + u32 data, fpsel; + + data = CFG_CLE_BYPASS_EN0; + xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data); + + fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel); + xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data); +} + +static void xgene_enet_shutdown(struct xgene_enet_pdata *p) +{ + clk_disable_unprepare(p->clk); +} + +static void xgene_enet_link_state(struct work_struct *work) +{ + struct xgene_enet_pdata *p = container_of(to_delayed_work(work), + struct xgene_enet_pdata, link_work); + struct net_device *ndev = p->ndev; + u32 link, poll_interval; + + link = xgene_enet_link_status(p); + if (link) { + if (!netif_carrier_ok(ndev)) { + netif_carrier_on(ndev); + xgene_sgmac_init(p); + xgene_sgmac_rx_enable(p); + xgene_sgmac_tx_enable(p); + netdev_info(ndev, "Link is Up - 1Gbps\n"); + } + poll_interval = PHY_POLL_LINK_ON; + } else { + if (netif_carrier_ok(ndev)) { + xgene_sgmac_rx_disable(p); + xgene_sgmac_tx_disable(p); + netif_carrier_off(ndev); + netdev_info(ndev, "Link is Down\n"); + } + poll_interval = PHY_POLL_LINK_OFF; + } + + schedule_delayed_work(&p->link_work, poll_interval); +} + +struct xgene_mac_ops xgene_sgmac_ops = { + .init = xgene_sgmac_init, + .reset = xgene_sgmac_reset, + .rx_enable = xgene_sgmac_rx_enable, + .tx_enable = xgene_sgmac_tx_enable, + .rx_disable = xgene_sgmac_rx_disable, + .tx_disable = xgene_sgmac_tx_disable, + .set_mac_addr = xgene_sgmac_set_mac_addr, + .link_state = xgene_enet_link_state +}; + +struct xgene_port_ops xgene_sgport_ops = { + .reset = xgene_enet_reset, + .cle_bypass = xgene_enet_cle_bypass, + .shutdown = xgene_enet_shutdown +}; --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h @@ -0,0 +1,41 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_SGMAC_H__ +#define __XGENE_ENET_SGMAC_H__ + +#define PHY_ADDR(src) (((src)<<8) & GENMASK(12, 8)) +#define REG_ADDR(src) ((src) & GENMASK(4, 0)) +#define PHY_CONTROL(src) ((src) & GENMASK(15, 0)) +#define INT_PHY_ADDR 0x1e +#define SGMII_TBI_CONTROL_ADDR 0x44 +#define SGMII_CONTROL_ADDR 0x00 +#define SGMII_STATUS_ADDR 0x04 +#define SGMII_BASE_PAGE_ABILITY_ADDR 0x14 +#define AUTO_NEG_COMPLETE BIT(5) +#define LINK_STATUS BIT(2) +#define LINK_UP BIT(15) +#define MPA_IDLE_WITH_QMI_EMPTY BIT(12) +#define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc + +extern struct xgene_mac_ops xgene_sgmac_ops; +extern struct xgene_port_ops xgene_sgport_ops; + +#endif /* __XGENE_ENET_SGMAC_H__ */ --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -0,0 +1,337 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" +#include "xgene_enet_hw.h" +#include "xgene_enet_xgmac.h" + +static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_ring_if_addr + offset; + + iowrite32(val, addr); +} + +static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + iowrite32(val, addr); +} + +static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr, + void __iomem *cmd, void __iomem *cmd_done, + u32 wr_addr, u32 wr_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(wr_addr, addr); + iowrite32(wr_data, wr); + iowrite32(XGENE_ENET_WR_CMD, cmd); + + /* wait for write command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, + u32 wr_addr, u32 wr_data) +{ + void __iomem *addr, *wr, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data)) + netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n", + wr_addr); +} + +static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_csr_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, + u32 offset, u32 *val) +{ + void __iomem *addr = pdata->eth_diag_csr_addr + offset; + + *val = ioread32(addr); +} + +static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd, + void __iomem *cmd, void __iomem *cmd_done, + u32 rd_addr, u32 *rd_data) +{ + u32 done; + u8 wait = 10; + + iowrite32(rd_addr, addr); + iowrite32(XGENE_ENET_RD_CMD, cmd); + + /* wait for read command to complete */ + while (!(done = ioread32(cmd_done)) && wait--) + udelay(1); + + if (!done) + return false; + + *rd_data = ioread32(rd); + iowrite32(0, cmd); + + return true; +} + +static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, + u32 rd_addr, u32 *rd_data) +{ + void __iomem *addr, *rd, *cmd, *cmd_done; + + addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; + rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; + cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; + + if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data)) + netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n", + rd_addr); +} + +static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) +{ + struct net_device *ndev = pdata->ndev; + u32 data; + u8 wait = 10; + + xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); + do { + usleep_range(100, 110); + xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); + } while ((data != 0xffffffff) && wait--); + + if (data != 0xffffffff) { + netdev_err(ndev, "Failed to release memory from shutdown\n"); + return -ENODEV; + } + + return 0; +} + +static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) +{ + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0); + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0); +} + +static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata) +{ + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0); +} + +static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata) +{ + u32 addr0, addr1; + u8 *dev_addr = pdata->ndev->dev_addr; + + addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | + (dev_addr[1] << 8) | dev_addr[0]; + addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); + + xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0); + xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1); +} + +static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data); + + return data; +} + +static void xgene_xgmac_init(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_xgmac_reset(pdata); + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + data |= HSTPPEN; + data &= ~HSTLENCHK; + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data); + + xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600); + xgene_xgmac_set_mac_addr(pdata); + + xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data); + data |= CFG_RSIF_FPBUFF_TIMEOUT_EN; + xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data); + + xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX); + xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0); + xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data); + data |= BIT(12); + xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data); + xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82); +} + +static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN); +} + +static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN); +} + +static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN); +} + +static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) +{ + u32 data; + + xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data); + xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); +} + +static int xgene_enet_reset(struct xgene_enet_pdata *pdata) +{ + if (!xgene_ring_mgr_init(pdata)) + return -ENODEV; + + clk_prepare_enable(pdata->clk); + clk_disable_unprepare(pdata->clk); + clk_prepare_enable(pdata->clk); + + xgene_enet_ecc_init(pdata); + xgene_enet_config_ring_if_assoc(pdata); + + return 0; +} + +static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, + u32 dst_ring_num, u16 bufpool_id) +{ + u32 cb, fpsel; + + xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb); + cb |= CFG_CLE_BYPASS_EN0; + CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); + xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb); + + fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20; + xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb); + CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); + CFG_CLE_FPSEL0_SET(&cb, fpsel); + xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb); +} + +static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) +{ + clk_disable_unprepare(pdata->clk); +} + +static void xgene_enet_link_state(struct work_struct *work) +{ + struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work), + struct xgene_enet_pdata, link_work); + struct net_device *ndev = pdata->ndev; + u32 link_status, poll_interval; + + link_status = xgene_enet_link_status(pdata); + if (link_status) { + if (!netif_carrier_ok(ndev)) { + netif_carrier_on(ndev); + xgene_xgmac_init(pdata); + xgene_xgmac_rx_enable(pdata); + xgene_xgmac_tx_enable(pdata); + netdev_info(ndev, "Link is Up - 10Gbps\n"); + } + poll_interval = PHY_POLL_LINK_ON; + } else { + if (netif_carrier_ok(ndev)) { + xgene_xgmac_rx_disable(pdata); + xgene_xgmac_tx_disable(pdata); + netif_carrier_off(ndev); + netdev_info(ndev, "Link is Down\n"); + } + poll_interval = PHY_POLL_LINK_OFF; + } + + schedule_delayed_work(&pdata->link_work, poll_interval); +} + +struct xgene_mac_ops xgene_xgmac_ops = { + .init = xgene_xgmac_init, + .reset = xgene_xgmac_reset, + .rx_enable = xgene_xgmac_rx_enable, + .tx_enable = xgene_xgmac_tx_enable, + .rx_disable = xgene_xgmac_rx_disable, + .tx_disable = xgene_xgmac_tx_disable, + .set_mac_addr = xgene_xgmac_set_mac_addr, + .link_state = xgene_enet_link_state +}; + +struct xgene_port_ops xgene_xgport_ops = { + .reset = xgene_enet_reset, + .cle_bypass = xgene_enet_xgcle_bypass, + .shutdown = xgene_enet_shutdown, +}; --- linux-3.13.0.orig/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ linux-3.13.0/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -0,0 +1,53 @@ +/* Applied Micro X-Gene SoC Ethernet Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Authors: Iyappan Subramanian + * Keyur Chudgar + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_XGMAC_H__ +#define __XGENE_ENET_XGMAC_H__ + +#define BLOCK_AXG_MAC_OFFSET 0x0800 +#define BLOCK_AXG_MAC_CSR_OFFSET 0x2000 + +#define AXGMAC_CONFIG_0 0x0000 +#define AXGMAC_CONFIG_1 0x0004 +#define HSTMACRST BIT(31) +#define HSTTCTLEN BIT(31) +#define HSTTFEN BIT(30) +#define HSTRCTLEN BIT(29) +#define HSTRFEN BIT(28) +#define HSTPPEN BIT(7) +#define HSTDRPLT64 BIT(5) +#define HSTLENCHK BIT(3) +#define HSTMACADR_LSW_ADDR 0x0010 +#define HSTMACADR_MSW_ADDR 0x0014 +#define HSTMAXFRAME_LENGTH_ADDR 0x0020 + +#define XG_RSIF_CONFIG_REG_ADDR 0x00a0 +#define XCLE_BYPASS_REG0_ADDR 0x0160 +#define XCLE_BYPASS_REG1_ADDR 0x0164 +#define XG_CFG_BYPASS_ADDR 0x0204 +#define XG_LINK_STATUS_ADDR 0x0228 +#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c +#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 +#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 + +extern struct xgene_mac_ops xgene_xgmac_ops; +extern struct xgene_port_ops xgene_xgport_ops; + +#endif /* __XGENE_ENET_XGMAC_H__ */ --- linux-3.13.0.orig/drivers/net/ethernet/atheros/alx/main.c +++ linux-3.13.0/drivers/net/ethernet/atheros/alx/main.c @@ -184,15 +184,16 @@ schedule_work(&alx->reset_wk); } -static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) +static int alx_clean_rx_irq(struct alx_priv *alx, int budget) { struct alx_rx_queue *rxq = &alx->rxq; struct alx_rrd *rrd; struct alx_buffer *rxb; struct sk_buff *skb; u16 length, rfd_cleaned = 0; + int work = 0; - while (budget > 0) { + while (work < budget) { rrd = &rxq->rrd[rxq->rrd_read_idx]; if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) break; @@ -203,7 +204,7 @@ ALX_GET_FIELD(le32_to_cpu(rrd->word0), RRD_NOR) != 1) { alx_schedule_reset(alx); - return 0; + return work; } rxb = &rxq->bufs[rxq->read_idx]; @@ -243,7 +244,7 @@ } napi_gro_receive(&alx->napi, skb); - budget--; + work++; next_pkt: if (++rxq->read_idx == alx->rx_ringsz) @@ -258,21 +259,22 @@ if (rfd_cleaned) alx_refill_rx_ring(alx, GFP_ATOMIC); - return budget > 0; + return work; } static int alx_poll(struct napi_struct *napi, int budget) { struct alx_priv *alx = container_of(napi, struct alx_priv, napi); struct alx_hw *hw = &alx->hw; - bool complete = true; unsigned long flags; + bool tx_complete; + int work; - complete = alx_clean_tx_irq(alx) && - alx_clean_rx_irq(alx, budget); + tx_complete = alx_clean_tx_irq(alx); + work = alx_clean_rx_irq(alx, budget); - if (!complete) - return 1; + if (!tx_complete || work == budget) + return budget; napi_complete(&alx->napi); @@ -284,7 +286,7 @@ alx_post_write(hw); - return 0; + return work; } static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/b44.c +++ linux-3.13.0/drivers/net/ethernet/broadcom/b44.c @@ -1654,7 +1654,7 @@ unsigned int start; do { - start = u64_stats_fetch_begin_bh(&hwstat->syncp); + start = u64_stats_fetch_begin_irq(&hwstat->syncp); /* Convert HW stats into rtnl_link_stats64 stats. */ nstat->rx_packets = hwstat->rx_pkts; @@ -1688,7 +1688,7 @@ /* Carrier lost counter seems to be broken for some devices */ nstat->tx_carrier_errors = hwstat->tx_carrier_lost; #endif - } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); return nstat; } @@ -2021,12 +2021,12 @@ do { data_src = &hwstat->tx_good_octets; data_dst = data; - start = u64_stats_fetch_begin_bh(&hwstat->syncp); + start = u64_stats_fetch_begin_irq(&hwstat->syncp); for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) *data_dst++ = *data_src++; - } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); + } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); } static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/bnx2.c +++ linux-3.13.0/drivers/net/ethernet/broadcom/bnx2.c @@ -2490,6 +2490,7 @@ bp->fw_wr_seq++; msg_data |= bp->fw_wr_seq; + bp->fw_last_msg = msg_data; bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); @@ -2868,7 +2869,7 @@ sw_cons = BNX2_NEXT_TX_BD(sw_cons); tx_bytes += skb->len; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); tx_pkt++; if (tx_pkt == budget) break; @@ -3982,8 +3983,23 @@ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; } - if (!(bp->flags & BNX2_FLAG_NO_WOL)) - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); + if (!(bp->flags & BNX2_FLAG_NO_WOL)) { + u32 val; + + wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; + if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { + bnx2_fw_sync(bp, wol_msg, 1, 0); + return; + } + /* Tell firmware not to power down the PHY yet, otherwise + * the chip will take a long time to respond to MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, + val | BNX2_PORT_FEATURE_ASF_ENABLED); + bnx2_fw_sync(bp, wol_msg, 1, 0); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); + } } @@ -4015,9 +4031,22 @@ if (bp->wol) pci_set_power_state(bp->pdev, PCI_D3hot); - } else { - pci_set_power_state(bp->pdev, PCI_D3hot); + break; + } + if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { + u32 val; + + /* Tell firmware not to power down the PHY yet, + * otherwise the other port may not respond to + * MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + val &= ~BNX2_CONDITION_PM_STATE_MASK; + val |= BNX2_CONDITION_PM_STATE_UNPREP; + bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); + } + pci_set_power_state(bp->pdev, PCI_D3hot); /* No more memory access after this point until * device is brought back to D0. @@ -6593,7 +6622,7 @@ mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -6686,7 +6715,7 @@ PCI_DMA_TODEVICE); } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/bnx2.h +++ linux-3.13.0/drivers/net/ethernet/broadcom/bnx2.h @@ -6890,6 +6890,7 @@ u16 fw_wr_seq; u16 fw_drv_pulse_wr_seq; + u32 fw_last_msg; int rx_max_ring; int rx_ring_size; @@ -7396,6 +7397,10 @@ #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 +#define BNX2_CONDITION_PM_STATE_MASK 0x00030000 +#define BNX2_CONDITION_PM_STATE_FULL 0x00030000 +#define BNX2_CONDITION_PM_STATE_PREP 0x00020000 +#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000 #define BNX2_BC_STATE_DEBUG_CMD 0x1dc #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ linux-3.13.0/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -337,6 +337,7 @@ u8 flags; /* Set on the first BD descriptor when there is a split BD */ #define BNX2X_TSO_SPLIT_BD (1<<0) +#define BNX2X_HAS_SECOND_PBD (1<<1) }; struct sw_rx_page { --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ linux-3.13.0/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -186,6 +186,12 @@ --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { + /* Skip second parse bd... */ + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; @@ -755,7 +761,8 @@ return; } - bnx2x_frag_free(fp, new_data); + if (new_data) + bnx2x_frag_free(fp, new_data); drop: /* drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, @@ -861,6 +868,18 @@ bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); + /* A rmb() is required to ensure that the CQE is not read + * before it is written by the adapter DMA. PCI ordering + * rules will make sure the other fields are written before + * the marker at the end of struct eth_fast_path_rx_cqe + * but without rmb() a weakly ordered processor can process + * stale data. Without the barrier TPA state-machine might + * enter inconsistent state and kernel stack might be + * provided with incorrect packet description - these lead + * to various kernel crashed. + */ + rmb(); + cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; @@ -1834,7 +1853,7 @@ } u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct bnx2x *bp = netdev_priv(dev); @@ -1856,7 +1875,7 @@ } /* select a non-FCoE queue */ - return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); + return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); } void bnx2x_set_num_queues(struct bnx2x *bp) @@ -3081,7 +3100,7 @@ } #endif if (!bnx2x_fp_lock_napi(fp)) - return work_done; + return budget; for_each_cos_in_tx_queue(fp, cos) if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) @@ -3824,6 +3843,9 @@ /* set encapsulation flag in start BD */ SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_TUNNEL_EXIST, 1); + + tx_buf->flags |= BNX2X_HAS_SECOND_PBD; + nbd++; } else if (xmit_type & XMIT_CSUM) { /* Set PBD in checksum offload case w/o encapsulation */ --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ linux-3.13.0/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -525,7 +525,7 @@ /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv); + void *accel_priv, select_queue_fallback_t fallback); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ linux-3.13.0/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -3749,7 +3749,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u16 lane, i, cl72_ctrl, an_adv = 0; + u16 lane, i, cl72_ctrl, an_adv = 0, val; + u32 wc_lane_config; struct bnx2x *bp = params->bp; static struct bnx2x_reg_set reg_set[] = { {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, @@ -3868,15 +3869,27 @@ /* Enable Auto-Detect to support 1G over CL37 as well */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); - + wc_lane_config = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + shared_hw_config.wc_lane_config)); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val); /* Force cl48 sync_status LOW to avoid getting stuck in CL73 * parallel-detect loop when CL73 and CL37 are enabled. */ - CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, 0); + val |= 1 << 11; + + /* Restore Polarity settings in case it was run over by + * previous link owner + */ + if (wc_lane_config & + (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane)) + val |= 3 << 2; + else + val &= ~(3 << 2); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800); - bnx2x_set_aer_mmd(params, phy); + MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), + val); bnx2x_disable_kr2(params, vars, phy); } --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ linux-3.13.0/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12262,6 +12262,10 @@ /* clean indirect addresses */ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); + + /* Set PCIe reset type to fundamental for EEH recovery */ + pdev->needs_freset = 1; + /* * Clean the following indirect addresses for all functions since it * is not used by the driver. @@ -12998,8 +13002,8 @@ netdev_reset_tc(bp->dev); del_timer_sync(&bp->timer); - cancel_delayed_work(&bp->sp_task); - cancel_delayed_work(&bp->period_task); + cancel_delayed_work_sync(&bp->sp_task); + cancel_delayed_work_sync(&bp->period_task); spin_lock_bh(&bp->stats_lock); bp->stats_state = STATS_STATE_DISABLED; --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/tg3.c +++ linux-3.13.0/drivers/net/ethernet/broadcom/tg3.c @@ -326,6 +326,7 @@ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, + {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791), .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY}, {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795), @@ -6577,7 +6578,7 @@ pkts_compl++; bytes_compl += skb->len; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); if (unlikely(tx_bug)) { tg3_tx_recover(tp); @@ -6827,8 +6828,7 @@ work_mask |= opaque_key; - if ((desc->err_vlan & RXD_ERR_MASK) != 0 && - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { + if (desc->err_vlan & RXD_ERR_MASK) { drop_it: tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); @@ -6908,8 +6908,9 @@ skb->protocol = eth_type_trans(skb, tp->dev); if (len > (tp->dev->mtu + ETH_HLEN) && - skb->protocol != htons(ETH_P_8021Q)) { - dev_kfree_skb(skb); + skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) { + dev_kfree_skb_any(skb); goto drop_it_no_recycle; } @@ -7792,7 +7793,7 @@ PCI_DMA_TODEVICE); /* Make sure the mapping succeeded */ if (pci_dma_mapping_error(tp->pdev, new_addr)) { - dev_kfree_skb(new_skb); + dev_kfree_skb_any(new_skb); ret = -1; } else { u32 save_entry = *entry; @@ -7807,13 +7808,13 @@ new_skb->len, base_flags, mss, vlan)) { tg3_tx_skb_unmap(tnapi, save_entry, -1); - dev_kfree_skb(new_skb); + dev_kfree_skb_any(new_skb); ret = -1; } } } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); *pskb = new_skb; return ret; } @@ -7856,7 +7857,7 @@ } while (segs); tg3_tso_bug_end: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -7900,8 +7901,6 @@ entry = tnapi->tx_prod; base_flags = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) - base_flags |= TXD_FLAG_TCPUDP_CSUM; mss = skb_shinfo(skb)->gso_size; if (mss) { @@ -7917,6 +7916,13 @@ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + /* HW/FW can not correctly segment packets that have been + * vlan encapsulated. + */ + if (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + return tg3_tso_bug(tp, skb); + if (!skb_is_gso_v6(skb)) { iph->check = 0; iph->tot_len = htons(mss + hdr_len); @@ -7963,6 +7969,17 @@ base_flags |= tsflags << 12; } } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* HW/FW can not correctly checksum packets that have been + * vlan encapsulated. + */ + if (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) { + if (skb_checksum_help(skb)) + goto drop; + } else { + base_flags |= TXD_FLAG_TCPUDP_CSUM; + } } if (tg3_flag(tp, USE_JUMBO_BDFLAG) && @@ -8078,7 +8095,7 @@ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; drop: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); drop_nofree: tp->tx_dropped++; return NETDEV_TX_OK; @@ -8516,7 +8533,8 @@ if (tnapi->rx_rcb) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); - if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { + if (tnapi->prodring.rx_std && + tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { tg3_free_rings(tp); return -ENOMEM; } @@ -12209,7 +12227,9 @@ if (tg3_flag(tp, MAX_RXPEND_64) && tp->rx_pending > 63) tp->rx_pending = 63; - tp->rx_jumbo_pending = ering->rx_jumbo_pending; + + if (tg3_flag(tp, JUMBO_RING_ENABLE)) + tp->rx_jumbo_pending = ering->rx_jumbo_pending; for (i = 0; i < tp->irq_max; i++) tp->napi[i].tx_pending = ering->tx_pending; @@ -13965,12 +13985,12 @@ tg3_netif_stop(tp); + tg3_set_mtu(dev, tp, new_mtu); + tg3_full_lock(tp, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_set_mtu(dev, tp, new_mtu); - /* Reset PHY, otherwise the read DMA engine will be in a mode that * breaks all requests to 256 bytes. */ @@ -17486,8 +17506,6 @@ tg3_init_bufmgr_config(tp); - features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - /* 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ @@ -17519,7 +17537,8 @@ features |= NETIF_F_TSO_ECN; } - dev->features |= features; + dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features |= features; /* @@ -17548,23 +17567,6 @@ goto err_out_apeunmap; } - /* - * Reset chip in case UNDI or EFI driver did not shutdown - * DMA self test will enable WDMAC and we'll see (spurious) - * pending DMA on the PCI bus at that point. - */ - if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || - (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { - tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - } - - err = tg3_test_dma(tp); - if (err) { - dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); - goto err_out_apeunmap; - } - intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; @@ -17609,6 +17611,23 @@ sndmbx += 0xc; } + /* + * Reset chip in case UNDI or EFI driver did not shutdown + * DMA self test will enable WDMAC and we'll see (spurious) + * pending DMA on the PCI bus at that point. + */ + if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || + (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { + tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + } + + err = tg3_test_dma(tp); + if (err) { + dev_err(&pdev->dev, "DMA engine test failed, aborting\n"); + goto err_out_apeunmap; + } + tg3_init_coal(tp); pci_set_drvdata(pdev, dev); --- linux-3.13.0.orig/drivers/net/ethernet/broadcom/tg3.h +++ linux-3.13.0/drivers/net/ethernet/broadcom/tg3.h @@ -2601,7 +2601,11 @@ #define RXD_ERR_TOO_SMALL 0x00400000 #define RXD_ERR_NO_RESOURCES 0x00800000 #define RXD_ERR_HUGE_FRAME 0x01000000 -#define RXD_ERR_MASK 0xffff0000 + +#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \ + RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \ + RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \ + RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME) u32 reserved; u32 opaque; --- linux-3.13.0.orig/drivers/net/ethernet/cadence/macb.c +++ linux-3.13.0/drivers/net/ethernet/cadence/macb.c @@ -29,7 +29,6 @@ #include #include #include -#include #include "macb.h" @@ -1755,7 +1754,6 @@ struct phy_device *phydev; u32 config; int err = -ENXIO; - struct pinctrl *pinctrl; const char *mac; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1764,15 +1762,6 @@ goto err_out; } - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (IS_ERR(pinctrl)) { - err = PTR_ERR(pinctrl); - if (err == -EPROBE_DEFER) - goto err_out; - - dev_warn(&pdev->dev, "No pinctrl provided\n"); - } - err = -ENOMEM; dev = alloc_etherdev(sizeof(*bp)); if (!dev) --- linux-3.13.0.orig/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ linux-3.13.0/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -496,6 +496,7 @@ spinlock_t db_lock; int db_disabled; unsigned short db_pidx; + unsigned short db_pidx_inc; u64 udb; }; --- linux-3.13.0.orig/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ linux-3.13.0/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3563,14 +3563,25 @@ static void disable_txq_db(struct sge_txq *q) { - spin_lock_irq(&q->db_lock); + unsigned long flags; + + spin_lock_irqsave(&q->db_lock, flags); q->db_disabled = 1; - spin_unlock_irq(&q->db_lock); + spin_unlock_irqrestore(&q->db_lock, flags); } -static void enable_txq_db(struct sge_txq *q) +static void enable_txq_db(struct adapter *adap, struct sge_txq *q) { spin_lock_irq(&q->db_lock); + if (q->db_pidx_inc) { + /* Make sure that all writes to the TX descriptors + * are committed before we tell HW about them. + */ + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(q->db_pidx_inc)); + q->db_pidx_inc = 0; + } q->db_disabled = 0; spin_unlock_irq(&q->db_lock); } @@ -3592,11 +3603,32 @@ int i; for_each_ethrxq(&adap->sge, i) - enable_txq_db(&adap->sge.ethtxq[i].q); + enable_txq_db(adap, &adap->sge.ethtxq[i].q); for_each_ofldrxq(&adap->sge, i) - enable_txq_db(&adap->sge.ofldtxq[i].q); + enable_txq_db(adap, &adap->sge.ofldtxq[i].q); for_each_port(adap, i) - enable_txq_db(&adap->sge.ctrlq[i].q); + enable_txq_db(adap, &adap->sge.ctrlq[i].q); +} + +static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) +{ + if (adap->uld_handle[CXGB4_ULD_RDMA]) + ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], + cmd); +} + +static void process_db_full(struct work_struct *work) +{ + struct adapter *adap; + + adap = container_of(work, struct adapter, db_full_task); + + drain_db_fifo(adap, dbfifo_drain_delay); + enable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, + DBFIFO_HP_INT | DBFIFO_LP_INT); } static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) @@ -3604,7 +3636,7 @@ u16 hw_pidx, hw_cidx; int ret; - spin_lock_bh(&q->db_lock); + spin_lock_irq(&q->db_lock); ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); if (ret) goto out; @@ -3621,7 +3653,8 @@ } out: q->db_disabled = 0; - spin_unlock_bh(&q->db_lock); + q->db_pidx_inc = 0; + spin_unlock_irq(&q->db_lock); if (ret) CH_WARN(adap, "DB drop recovery failed.\n"); } @@ -3637,29 +3670,6 @@ sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); } -static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) -{ - mutex_lock(&uld_mutex); - if (adap->uld_handle[CXGB4_ULD_RDMA]) - ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], - cmd); - mutex_unlock(&uld_mutex); -} - -static void process_db_full(struct work_struct *work) -{ - struct adapter *adap; - - adap = container_of(work, struct adapter, db_full_task); - - notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); - drain_db_fifo(adap, dbfifo_drain_delay); - t4_set_reg_field(adap, SGE_INT_ENABLE3, - DBFIFO_HP_INT | DBFIFO_LP_INT, - DBFIFO_HP_INT | DBFIFO_LP_INT); - notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); -} - static void process_db_drop(struct work_struct *work) { struct adapter *adap; @@ -3667,11 +3677,13 @@ adap = container_of(work, struct adapter, db_drop_task); if (is_t4(adap->params.chip)) { - disable_dbs(adap); + drain_db_fifo(adap, dbfifo_drain_delay); notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); - drain_db_fifo(adap, 1); + drain_db_fifo(adap, dbfifo_drain_delay); recover_all_queues(adap); + drain_db_fifo(adap, dbfifo_drain_delay); enable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); } else { u32 dropped_db = t4_read_reg(adap, 0x010ac); u16 qid = (dropped_db >> 15) & 0x1ffff; @@ -3712,6 +3724,8 @@ void t4_db_full(struct adapter *adap) { if (is_t4(adap->params.chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); t4_set_reg_field(adap, SGE_INT_ENABLE3, DBFIFO_HP_INT | DBFIFO_LP_INT, 0); queue_work(workq, &adap->db_full_task); @@ -3720,8 +3734,11 @@ void t4_db_dropped(struct adapter *adap) { - if (is_t4(adap->params.chip)) - queue_work(workq, &adap->db_drop_task); + if (is_t4(adap->params.chip)) { + disable_dbs(adap); + notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); + } + queue_work(workq, &adap->db_drop_task); } static void uld_attach(struct adapter *adap, unsigned int uld) --- linux-3.13.0.orig/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ linux-3.13.0/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -843,9 +843,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) { unsigned int *wr, index; + unsigned long flags; wmb(); /* write descriptors before telling HW */ - spin_lock(&q->db_lock); + spin_lock_irqsave(&q->db_lock, flags); if (!q->db_disabled) { if (is_t4(adap->params.chip)) { t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), @@ -861,9 +862,10 @@ writel(n, adap->bar2 + q->udb + 8); wmb(); } - } + } else + q->db_pidx_inc += n; q->db_pidx = q->pidx; - spin_unlock(&q->db_lock); + spin_unlock_irqrestore(&q->db_lock, flags); } /** --- linux-3.13.0.orig/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ linux-3.13.0/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -116,6 +116,7 @@ CPL_ERR_KEEPALIVE_TIMEDOUT = 34, CPL_ERR_RTX_NEG_ADVICE = 35, CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_KEEPALV_NEG_ADVICE = 37, CPL_ERR_ABORT_FAILED = 42, CPL_ERR_IWARP_FLM = 50, }; --- linux-3.13.0.orig/drivers/net/ethernet/cisco/enic/enic_main.c +++ linux-3.13.0/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1043,10 +1043,14 @@ skb->l4_rxhash = true; } - if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { - skb->csum = htons(checksum); - skb->ip_summed = CHECKSUM_COMPLETE; - } + /* Hardware does not provide whole packet checksum. It only + * provides pseudo checksum. Since hw validates the packet + * checksum but not provide us the checksum value. use + * CHECSUM_UNNECESSARY. + */ + if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok && + ipv4_csum_ok) + skb->ip_summed = CHECKSUM_UNNECESSARY; if (vlan_stripped) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); --- linux-3.13.0.orig/drivers/net/ethernet/emulex/benet/be.h +++ linux-3.13.0/drivers/net/ethernet/emulex/benet/be.h @@ -34,7 +34,7 @@ #include "be_hw.h" #include "be_roce.h" -#define DRV_VER "4.9.224.0u" +#define DRV_VER "10.0.600.0u" #define DRV_NAME "be2net" #define BE_NAME "Emulex BladeEngine2" #define BE3_NAME "Emulex BladeEngine3" @@ -42,7 +42,7 @@ #define OC_NAME_BE OC_NAME "(be3)" #define OC_NAME_LANCER OC_NAME "(Lancer)" #define OC_NAME_SH OC_NAME "(Skyhawk)" -#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" +#define DRV_DESC "Emulex OneConnect NIC Driver" #define BE_VENDOR_ID 0x19a2 #define EMULEX_VENDOR_ID 0x10df @@ -283,7 +283,6 @@ u32 rss_hash; u16 vlan_tag; u16 pkt_size; - u16 rxq_idx; u16 port; u8 vlanf; u8 num_rcvd; @@ -493,7 +492,7 @@ u16 pvid; struct phy_info phy; u8 wol_cap; - bool wol; + bool wol_en; u32 uc_macs; /* Count of secondary UC MAC programmed */ u16 asic_rev; u16 qnq_vid; --- linux-3.13.0.orig/drivers/net/ethernet/emulex/benet/be_cmds.c +++ linux-3.13.0/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1101,23 +1101,22 @@ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (lancer_chip(adapter)) { - req->hdr.version = 1; - req->cq_id = cpu_to_le16(cq->id); - - AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, - be_encoded_q_len(mccq->len)); - AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, - ctxt, cq->id); - AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, - ctxt, 1); - - } else { + if (BEx_chip(adapter)) { AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); + } else { + req->hdr.version = 1; + req->cq_id = cpu_to_le16(cq->id); + + AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, + be_encoded_q_len(mccq->len)); + AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, + ctxt, cq->id); + AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, + ctxt, 1); } /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ @@ -1187,7 +1186,7 @@ int status; status = be_cmd_mccq_ext_create(adapter, mccq, cq); - if (status && !lancer_chip(adapter)) { + if (status && BEx_chip(adapter)) { dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " "or newer to avoid conflicting priorities between NIC " "and FCoE traffic"); @@ -2692,6 +2691,13 @@ struct be_cmd_resp_get_fn_privileges *resp = embedded_payload(wrb); *privilege = le32_to_cpu(resp->privilege_mask); + + /* In UMC mode FW does not return right privileges. + * Override with correct privilege equivalent to PF. + */ + if (BEx_chip(adapter) && be_is_mc(adapter) && + be_physfn(adapter)) + *privilege = MAX_PRIVILEGES; } err: @@ -2736,7 +2742,8 @@ * If pmac_id is returned, pmac_id_valid is returned as true */ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, - bool *pmac_id_valid, u32 *pmac_id, u8 domain) + bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, + u8 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_mac_list *req; @@ -2774,7 +2781,7 @@ req->mac_type = MAC_ADDRESS_TYPE_NETWORK; if (*pmac_id_valid) { req->mac_id = cpu_to_le32(*pmac_id); - req->iface_id = cpu_to_le16(adapter->if_handle); + req->iface_id = cpu_to_le16(if_handle); req->perm_override = 0; } else { req->perm_override = 1; @@ -2827,17 +2834,21 @@ return status; } -int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac) +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac, + u32 if_handle, bool active, u32 domain) { - bool active = true; + if (!active) + be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, + if_handle, domain); if (BEx_chip(adapter)) return be_cmd_mac_addr_query(adapter, mac, false, - adapter->if_handle, curr_pmac_id); + if_handle, curr_pmac_id); else /* Fetch the MAC address using pmac_id */ return be_cmd_get_mac_from_list(adapter, mac, &active, - &curr_pmac_id, 0); + &curr_pmac_id, + if_handle, domain); } int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) @@ -2856,7 +2867,7 @@ adapter->if_handle, 0); } else { status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, - NULL, 0); + NULL, adapter->if_handle, 0); } return status; @@ -2917,7 +2928,8 @@ int status; status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, - &pmac_id, dom); + &pmac_id, if_id, dom); + if (!status && active_mac) be_cmd_pmac_del(adapter, if_id, pmac_id, dom); @@ -2997,7 +3009,7 @@ ctxt, intf_id); AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); - if (!BEx_chip(adapter)) { + if (!BEx_chip(adapter) && mode) { AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, adapter->hba_port_num); AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); @@ -3028,14 +3040,16 @@ { struct be_mcc_wrb *wrb; struct be_cmd_req_acpi_wol_magic_config_v1 *req; - int status; - int payload_len = sizeof(*req); + int status = 0; struct be_dma_mem cmd; if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, CMD_SUBSYSTEM_ETH)) return -EPERM; + if (be_is_wol_excluded(adapter)) + return status; + if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; @@ -3060,7 +3074,7 @@ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, - payload_len, wrb, &cmd); + sizeof(*req), wrb, &cmd); req->hdr.version = 1; req->query_options = BE_GET_WOL_CAP; @@ -3070,13 +3084,9 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; - /* the command could succeed misleadingly on old f/w - * which is not aware of the V1 version. fake an error. */ - if (resp->hdr.response_length < payload_len) { - status = -1; - goto err; - } adapter->wol_cap = resp->wol_settings; + if (adapter->wol_cap & BE_WOL_CAP) + adapter->wol_en = true; } err: mutex_unlock(&adapter->mbox_lock); @@ -3085,6 +3095,76 @@ return status; } + +int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) +{ + struct be_dma_mem extfat_cmd; + struct be_fat_conf_params *cfgs; + int status; + int i, j; + + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); + extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, + &extfat_cmd.dma); + if (!extfat_cmd.va) + return -ENOMEM; + + status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); + if (status) + goto err; + + cfgs = (struct be_fat_conf_params *) + (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); + for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { + u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); + for (j = 0; j < num_modes; j++) { + if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) + cfgs->module[i].trace_lvl[j].dbg_lvl = + cpu_to_le32(level); + } + } + + status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); +err: + pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); + return status; +} + +int be_cmd_get_fw_log_level(struct be_adapter *adapter) +{ + struct be_dma_mem extfat_cmd; + struct be_fat_conf_params *cfgs; + int status, j; + int level = 0; + + memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); + extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); + extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, + &extfat_cmd.dma); + + if (!extfat_cmd.va) { + dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", + __func__); + goto err; + } + + status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); + if (!status) { + cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + + sizeof(struct be_cmd_resp_hdr)); + for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { + if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) + level = cfgs->module[0].trace_lvl[j].dbg_lvl; + } + } + pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); +err: + return level; +} + int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, struct be_dma_mem *cmd) { @@ -3608,6 +3688,40 @@ mutex_unlock(&adapter->mbox_lock); return status; } + +/* Uses MBOX */ +int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) +{ + struct be_cmd_req_get_active_profile *req; + struct be_mcc_wrb *wrb; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), + wrb, NULL); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_active_profile *resp = + embedded_payload(wrb); + *profile_id = le16_to_cpu(resp->active_profile_id); + } + +err: + mutex_unlock(&adapter->mbox_lock); + return status; +} int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, int wrb_payload_size, u16 *cmd_status, u16 *ext_status) --- linux-3.13.0.orig/drivers/net/ethernet/emulex/benet/be_cmds.h +++ linux-3.13.0/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -216,6 +216,7 @@ #define OPCODE_COMMON_GET_FUNC_CONFIG 160 #define OPCODE_COMMON_GET_PROFILE_CONFIG 164 #define OPCODE_COMMON_SET_PROFILE_CONFIG 165 +#define OPCODE_COMMON_GET_ACTIVE_PROFILE 167 #define OPCODE_COMMON_SET_HSW_CONFIG 153 #define OPCODE_COMMON_GET_FN_PRIVILEGES 170 #define OPCODE_COMMON_READ_OBJECT 171 @@ -452,7 +453,7 @@ u8 rsvd2[32]; } __packed; -struct amap_mcc_context_lancer { +struct amap_mcc_context_v1 { u8 async_cq_id[16]; u8 ring_size[4]; u8 rsvd0[12]; @@ -476,7 +477,7 @@ u16 num_pages; u16 cq_id; u32 async_event_bitmap[1]; - u8 context[sizeof(struct amap_mcc_context_be) / 8]; + u8 context[sizeof(struct amap_mcc_context_v1) / 8]; struct phys_addr pages[8]; } __packed; @@ -1097,6 +1098,14 @@ u32 function_caps; }; +/* Is BE in a multi-channel mode */ +static inline bool be_is_mc(struct be_adapter *adapter) +{ + return adapter->function_mode & FLEX10_MODE || + adapter->function_mode & VNIC_MODE || + adapter->function_mode & UMC_ENABLED; +} + /******************** RSS Config ****************************************/ /* RSS type Input parameters used to compute RX hash * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4 @@ -1917,6 +1926,17 @@ struct be_cmd_resp_hdr hdr; }; +struct be_cmd_req_get_active_profile { + struct be_cmd_req_hdr hdr; + u32 rsvd; +} __packed; + +struct be_cmd_resp_get_active_profile { + struct be_cmd_resp_hdr hdr; + u16 active_profile_id; + u16 next_profile_id; +} __packed; + struct be_cmd_enable_disable_vf { struct be_cmd_req_hdr hdr; u8 enable; @@ -2037,8 +2057,10 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, u32 vf_num); int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, - bool *pmac_id_active, u32 *pmac_id, u8 domain); -int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac); + bool *pmac_id_active, u32 *pmac_id, + u32 if_handle, u8 domain); +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac, + u32 if_handle, bool active, u32 domain); int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain); @@ -2048,6 +2070,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain, u16 intf_id, u8 *mode); int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); +int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level); +int be_cmd_get_fw_log_level(struct be_adapter *adapter); int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, struct be_dma_mem *cmd); int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, @@ -2063,6 +2087,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 domain); int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain); +int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num); int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); --- linux-3.13.0.orig/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ linux-3.13.0/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -357,10 +357,10 @@ struct be_rx_stats *stats = rx_stats(rxo); do { - start = u64_stats_fetch_begin_bh(&stats->sync); + start = u64_stats_fetch_begin_irq(&stats->sync); data[base] = stats->rx_bytes; data[base + 1] = stats->rx_pkts; - } while (u64_stats_fetch_retry_bh(&stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&stats->sync, start)); for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { p = (u8 *)stats + et_rx_stats[i].offset; @@ -373,19 +373,19 @@ struct be_tx_stats *stats = tx_stats(txo); do { - start = u64_stats_fetch_begin_bh(&stats->sync_compl); + start = u64_stats_fetch_begin_irq(&stats->sync_compl); data[base] = stats->tx_compl; - } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); + } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start)); do { - start = u64_stats_fetch_begin_bh(&stats->sync); + start = u64_stats_fetch_begin_irq(&stats->sync); for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { p = (u8 *)stats + et_tx_stats[i].offset; data[base + i] = (et_tx_stats[i].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - } while (u64_stats_fetch_retry_bh(&stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&stats->sync, start)); base += ETHTOOL_TXSTATS_NUM; } } @@ -713,12 +713,13 @@ { struct be_adapter *adapter = netdev_priv(netdev); - if (be_is_wol_supported(adapter)) { + if (adapter->wol_cap & BE_WOL_CAP) { wol->supported |= WAKE_MAGIC; - if (adapter->wol) + if (adapter->wol_en) wol->wolopts |= WAKE_MAGIC; - } else + } else { wol->wolopts = 0; + } memset(&wol->sopass, 0, sizeof(wol->sopass)); } @@ -730,15 +731,15 @@ if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; - if (!be_is_wol_supported(adapter)) { + if (!(adapter->wol_cap & BE_WOL_CAP)) { dev_warn(&adapter->pdev->dev, "WOL not supported\n"); return -EOPNOTSUPP; } if (wol->wolopts & WAKE_MAGIC) - adapter->wol = true; + adapter->wol_en = true; else - adapter->wol = false; + adapter->wol_en = false; return 0; } @@ -904,73 +905,21 @@ { struct be_adapter *adapter = netdev_priv(netdev); - if (lancer_chip(adapter)) { - dev_err(&adapter->pdev->dev, "Operation not supported\n"); - return -EOPNOTSUPP; - } - return adapter->msg_enable; } -static void be_set_fw_log_level(struct be_adapter *adapter, u32 level) -{ - struct be_dma_mem extfat_cmd; - struct be_fat_conf_params *cfgs; - int status; - int i, j; - - memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); - extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); - if (!extfat_cmd.va) { - dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", - __func__); - goto err; - } - status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); - if (!status) { - cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + - sizeof(struct be_cmd_resp_hdr)); - for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { - u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); - for (j = 0; j < num_modes; j++) { - if (cfgs->module[i].trace_lvl[j].mode == - MODE_UART) - cfgs->module[i].trace_lvl[j].dbg_lvl = - cpu_to_le32(level); - } - } - status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, - cfgs); - if (status) - dev_err(&adapter->pdev->dev, - "Message level set failed\n"); - } else { - dev_err(&adapter->pdev->dev, "Message level get failed\n"); - } - - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); -err: - return; -} - static void be_set_msg_level(struct net_device *netdev, u32 level) { struct be_adapter *adapter = netdev_priv(netdev); - if (lancer_chip(adapter)) { - dev_err(&adapter->pdev->dev, "Operation not supported\n"); - return; - } - if (adapter->msg_enable == level) return; if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW)) - be_set_fw_log_level(adapter, level & NETIF_MSG_HW ? - FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL); + if (BEx_chip(adapter)) + be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ? + FW_LOG_LEVEL_DEFAULT : + FW_LOG_LEVEL_FATAL); adapter->msg_enable = level; return; --- linux-3.13.0.orig/drivers/net/ethernet/emulex/benet/be_main.c +++ linux-3.13.0/drivers/net/ethernet/emulex/benet/be_main.c @@ -121,12 +121,6 @@ "Unknown" }; -/* Is BE in a multi-channel mode */ -static inline bool be_is_mc(struct be_adapter *adapter) { - return (adapter->function_mode & FLEX10_MODE || - adapter->function_mode & VNIC_MODE || - adapter->function_mode & UMC_ENABLED); -} static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { @@ -258,6 +252,12 @@ if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + /* Proceed further only if, User provided MAC is different + * from active MAC + */ + if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) + return 0; + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT * privilege or if PF did not provision the new MAC address. * On BE3, this cmd will always fail if the VF doesn't have the @@ -280,7 +280,8 @@ /* Decide if the new MAC is successfully activated only after * querying the FW */ - status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac); + status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac, + adapter->if_handle, true, 0); if (status) goto err; @@ -590,10 +591,10 @@ for_all_rx_queues(adapter, rxo, i) { const struct be_rx_stats *rx_stats = rx_stats(rxo); do { - start = u64_stats_fetch_begin_bh(&rx_stats->sync); + start = u64_stats_fetch_begin_irq(&rx_stats->sync); pkts = rx_stats(rxo)->rx_pkts; bytes = rx_stats(rxo)->rx_bytes; - } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start)); stats->rx_packets += pkts; stats->rx_bytes += bytes; stats->multicast += rx_stats(rxo)->rx_mcast_pkts; @@ -604,10 +605,10 @@ for_all_tx_queues(adapter, txo, i) { const struct be_tx_stats *tx_stats = tx_stats(txo); do { - start = u64_stats_fetch_begin_bh(&tx_stats->sync); + start = u64_stats_fetch_begin_irq(&tx_stats->sync); pkts = tx_stats(txo)->tx_pkts; bytes = tx_stats(txo)->tx_bytes; - } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start)); + } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start)); stats->tx_packets += pkts; stats->tx_bytes += bytes; } @@ -1096,8 +1097,6 @@ dev_info(&adapter->pdev->dev, "Disabling VLAN Promiscuous mode.\n"); adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; - dev_info(&adapter->pdev->dev, - "Re-Enabling HW VLAN filtering\n"); } } } @@ -1105,12 +1104,12 @@ return status; set_vlan_promisc: - dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); + if (adapter->flags & BE_FLAGS_VLAN_PROMISC) + return 0; status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON); if (!status) { dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n"); - dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n"); adapter->flags |= BE_FLAGS_VLAN_PROMISC; } else dev_err(&adapter->pdev->dev, @@ -1123,19 +1122,18 @@ struct be_adapter *adapter = netdev_priv(netdev); int status = 0; - /* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) goto ret; adapter->vlan_tag[vid] = 1; - if (adapter->vlans_added <= (be_max_vlans(adapter) + 1)) - status = be_vid_config(adapter); + adapter->vlans_added++; - if (!status) - adapter->vlans_added++; - else + status = be_vid_config(adapter); + if (status) { + adapter->vlans_added--; adapter->vlan_tag[vid] = 0; + } ret: return status; } @@ -1150,9 +1148,7 @@ goto ret; adapter->vlan_tag[vid] = 0; - if (adapter->vlans_added <= be_max_vlans(adapter)) - status = be_vid_config(adapter); - + status = be_vid_config(adapter); if (!status) adapter->vlans_added--; else @@ -1374,15 +1370,15 @@ rxo = &adapter->rx_obj[eqo->idx]; do { - start = u64_stats_fetch_begin_bh(&rxo->stats.sync); + start = u64_stats_fetch_begin_irq(&rxo->stats.sync); rx_pkts = rxo->stats.rx_pkts; - } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start)); + } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); txo = &adapter->tx_obj[eqo->idx]; do { - start = u64_stats_fetch_begin_bh(&txo->stats.sync); + start = u64_stats_fetch_begin_irq(&txo->stats.sync); tx_pkts = txo->stats.tx_reqs; - } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start)); + } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); /* Skip, if wrapped around or first calculation */ @@ -1442,12 +1438,12 @@ (rxcp->ip_csum || rxcp->ipv6); } -static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo, - u16 frag_idx) +static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *rx_page_info; struct be_queue_info *rxq = &rxo->q; + u16 frag_idx = rxq->tail; rx_page_info = &rxo->page_info_tbl[frag_idx]; BUG_ON(!rx_page_info->page); @@ -1459,6 +1455,7 @@ rx_page_info->last_page_user = false; } + queue_tail_inc(rxq); atomic_dec(&rxq->used); return rx_page_info; } @@ -1467,15 +1464,13 @@ static void be_rx_compl_discard(struct be_rx_obj *rxo, struct be_rx_compl_info *rxcp) { - struct be_queue_info *rxq = &rxo->q; struct be_rx_page_info *page_info; u16 i, num_rcvd = rxcp->num_rcvd; for (i = 0; i < num_rcvd; i++) { - page_info = get_rx_page_info(rxo, rxcp->rxq_idx); + page_info = get_rx_page_info(rxo); put_page(page_info->page); memset(page_info, 0, sizeof(*page_info)); - index_inc(&rxcp->rxq_idx, rxq->len); } } @@ -1486,13 +1481,12 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, struct be_rx_compl_info *rxcp) { - struct be_queue_info *rxq = &rxo->q; struct be_rx_page_info *page_info; u16 i, j; u16 hdr_len, curr_frag_len, remaining; u8 *start; - page_info = get_rx_page_info(rxo, rxcp->rxq_idx); + page_info = get_rx_page_info(rxo); start = page_address(page_info->page) + page_info->page_offset; prefetch(start); @@ -1526,10 +1520,9 @@ } /* More frags present for this completion */ - index_inc(&rxcp->rxq_idx, rxq->len); remaining = rxcp->pkt_size - curr_frag_len; for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { - page_info = get_rx_page_info(rxo, rxcp->rxq_idx); + page_info = get_rx_page_info(rxo); curr_frag_len = min(remaining, rx_frag_size); /* Coalesce all frags from the same physical page in one slot */ @@ -1550,7 +1543,6 @@ skb->data_len += curr_frag_len; skb->truesize += rx_frag_size; remaining -= curr_frag_len; - index_inc(&rxcp->rxq_idx, rxq->len); page_info->page = NULL; } BUG_ON(j > MAX_SKB_FRAGS); @@ -1598,7 +1590,6 @@ struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info; struct sk_buff *skb = NULL; - struct be_queue_info *rxq = &rxo->q; u16 remaining, curr_frag_len; u16 i, j; @@ -1610,7 +1601,7 @@ remaining = rxcp->pkt_size; for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { - page_info = get_rx_page_info(rxo, rxcp->rxq_idx); + page_info = get_rx_page_info(rxo); curr_frag_len = min(remaining, rx_frag_size); @@ -1628,7 +1619,6 @@ skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); skb->truesize += rx_frag_size; remaining -= curr_frag_len; - index_inc(&rxcp->rxq_idx, rxq->len); memset(page_info, 0, sizeof(*page_info)); } BUG_ON(j > MAX_SKB_FRAGS); @@ -1663,8 +1653,6 @@ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl); rxcp->ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl); - rxcp->rxq_idx = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl); rxcp->num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); rxcp->pkt_type = @@ -1695,8 +1683,6 @@ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl); rxcp->ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl); - rxcp->rxq_idx = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl); rxcp->num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); rxcp->pkt_type = @@ -1881,7 +1867,7 @@ queue_tail_inc(txq); } while (cur_index != last_index); - kfree_skb(sent_skb); + dev_kfree_skb_any(sent_skb); return num_wrbs; } @@ -1921,7 +1907,6 @@ struct be_rx_compl_info *rxcp; struct be_adapter *adapter = rxo->adapter; int flush_wait = 0; - u16 tail; /* Consume pending rx completions. * Wait for the flush completion (identified by zero num_rcvd) @@ -1954,9 +1939,8 @@ be_cq_notify(adapter, rx_cq->id, false, 0); /* Then free posted rx buffers that were not used */ - tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; - for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { - page_info = get_rx_page_info(rxo, tail); + while (atomic_read(&rxq->used) > 0) { + page_info = get_rx_page_info(rxo); put_page(page_info->page); memset(page_info, 0, sizeof(*page_info)); } @@ -2797,7 +2781,7 @@ for_all_evt_queues(adapter, eqo, i) { napi_enable(&eqo->napi); be_enable_busy_poll(eqo); - be_eq_notify(adapter, eqo->q.id, true, false, 0); + be_eq_notify(adapter, eqo->q.id, true, true, 0); } adapter->flags |= BE_FLAGS_NAPI_ENABLED; @@ -2891,14 +2875,11 @@ int status, vf; u8 mac[ETH_ALEN]; struct be_vf_cfg *vf_cfg; - bool active = false; for_all_vfs(adapter, vf_cfg, vf) { - be_cmd_get_mac_from_list(adapter, mac, &active, - &vf_cfg->pmac_id, 0); - - status = be_cmd_mac_addr_query(adapter, mac, false, - vf_cfg->if_handle, 0); + status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id, + mac, vf_cfg->if_handle, + false, vf+1); if (status) return status; memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); @@ -3240,6 +3221,7 @@ /* Routine to query per function resource limits */ static int be_get_config(struct be_adapter *adapter) { + u16 profile_id; int status; status = be_cmd_query_fw_cfg(adapter, &adapter->port_num, @@ -3249,6 +3231,13 @@ if (status) return status; + if (be_physfn(adapter)) { + status = be_cmd_get_active_profile(adapter, &profile_id); + if (!status) + dev_info(&adapter->pdev->dev, + "Using profile 0x%x\n", profile_id); + } + status = be_get_resources(adapter); if (status) return status; @@ -3403,11 +3392,6 @@ goto err; be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); - /* In UMC mode FW does not return right privileges. - * Override with correct privilege equivalent to PF. - */ - if (be_is_mc(adapter)) - adapter->cmd_privileges = MAX_PRIVILEGES; status = be_mac_setup(adapter); if (status) @@ -3426,6 +3410,8 @@ be_set_rx_mode(adapter->netdev); + be_cmd_get_acpi_wol_cap(adapter); + be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) @@ -4295,74 +4281,22 @@ free_netdev(adapter->netdev); } -bool be_is_wol_supported(struct be_adapter *adapter) -{ - return ((adapter->wol_cap & BE_WOL_CAP) && - !be_is_wol_excluded(adapter)) ? true : false; -} - -u32 be_get_fw_log_level(struct be_adapter *adapter) -{ - struct be_dma_mem extfat_cmd; - struct be_fat_conf_params *cfgs; - int status; - u32 level = 0; - int j; - - if (lancer_chip(adapter)) - return 0; - - memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); - extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); - - if (!extfat_cmd.va) { - dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", - __func__); - goto err; - } - - status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); - if (!status) { - cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + - sizeof(struct be_cmd_resp_hdr)); - for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { - if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) - level = cfgs->module[0].trace_lvl[j].dbg_lvl; - } - } - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); -err: - return level; -} - static int be_get_initial_config(struct be_adapter *adapter) { - int status; - u32 level; + int status, level; status = be_cmd_get_cntl_attributes(adapter); if (status) return status; - status = be_cmd_get_acpi_wol_cap(adapter); - if (status) { - /* in case of a failure to get wol capabillities - * check the exclusion list to determine WOL capability */ - if (!be_is_wol_excluded(adapter)) - adapter->wol_cap |= BE_WOL_CAP; - } - - if (be_is_wol_supported(adapter)) - adapter->wol = true; - /* Must be a power of 2 or else MODULO will BUG_ON */ adapter->be_get_temp_freq = 64; - level = be_get_fw_log_level(adapter); - adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + if (BEx_chip(adapter)) { + level = be_cmd_get_fw_log_level(adapter); + adapter->msg_enable = + level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; + } adapter->cfg_num_qs = netif_get_num_default_rss_queues(); return 0; @@ -4625,7 +4559,7 @@ struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; - if (adapter->wol) + if (adapter->wol_en) be_setup_wol(adapter, true); be_intr_set(adapter, false); @@ -4681,7 +4615,7 @@ msecs_to_jiffies(1000)); netif_device_attach(netdev); - if (adapter->wol) + if (adapter->wol_en) be_setup_wol(adapter, false); return 0; --- linux-3.13.0.orig/drivers/net/ethernet/freescale/fec_main.c +++ linux-3.13.0/drivers/net/ethernet/freescale/fec_main.c @@ -528,13 +528,6 @@ /* Clear any outstanding interrupt. */ writel(0xffc00000, fep->hwp + FEC_IEVENT); - /* Setup multicast filter. */ - set_multicast_list(ndev); -#ifndef CONFIG_M5272 - writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_HASH_TABLE_LOW); -#endif - /* Set maximum receive buffer size. */ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); @@ -655,6 +648,13 @@ writel(rcntl, fep->hwp + FEC_R_CNTRL); + /* Setup multicast filter. */ + set_multicast_list(ndev); +#ifndef CONFIG_M5272 + writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_HASH_TABLE_LOW); +#endif + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ ecntl |= (1 << 8); --- linux-3.13.0.orig/drivers/net/ethernet/freescale/xgmac_mdio.c +++ linux-3.13.0/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -32,6 +32,10 @@ __be32 mdio_addr; /* MDIO address */ } __packed; +/* Taken from memac_mdio.c */ +#define MDIO_STAT_ENC (1 << 6) +#define MDIO_STAT_HOLD_15_CLK (7 << 2) + #define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) #define MDIO_STAT_BSY (1 << 0) #define MDIO_STAT_RD_ER (1 << 1) @@ -91,26 +95,39 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) { struct tgec_mdio_controller __iomem *regs = bus->priv; - uint16_t dev_addr = regnum >> 16; + uint16_t dev_addr; + u32 mdio_ctl, mdio_stat; int ret; - /* Setup the MII Mgmt clock speed */ - out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + mdio_stat = in_be32(®s->mdio_stat); + if (regnum & MII_ADDR_C45) { + /* Clause 45 (ie 10G) */ + dev_addr = (regnum >> 16) & 0x1f; + mdio_stat |= MDIO_STAT_ENC | MDIO_STAT_HOLD_15_CLK; + } else { + /* Clause 22 (ie 1G) */ + dev_addr = regnum & 0x1f; + mdio_stat &= ~MDIO_STAT_ENC; + } + + out_be32(®s->mdio_stat, mdio_stat); ret = xgmac_wait_until_free(&bus->dev, regs); if (ret) return ret; /* Set the port and dev addr */ - out_be32(®s->mdio_ctl, - MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + out_be32(®s->mdio_ctl, mdio_ctl); /* Set the register address */ - out_be32(®s->mdio_addr, regnum & 0xffff); + if (regnum & MII_ADDR_C45) { + out_be32(®s->mdio_addr, regnum & 0xffff); - ret = xgmac_wait_until_free(&bus->dev, regs); - if (ret) - return ret; + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + } /* Write the value to the register */ out_be32(®s->mdio_data, MDIO_DATA(value)); @@ -130,13 +147,22 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { struct tgec_mdio_controller __iomem *regs = bus->priv; - uint16_t dev_addr = regnum >> 16; + uint16_t dev_addr; + uint32_t mdio_stat; uint32_t mdio_ctl; uint16_t value; int ret; - /* Setup the MII Mgmt clock speed */ - out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + mdio_stat = in_be32(®s->mdio_stat); + if (regnum & MII_ADDR_C45) { + dev_addr = (regnum >> 16) & 0x1f; + mdio_stat |= MDIO_STAT_ENC | MDIO_STAT_HOLD_15_CLK; + } else { + dev_addr = regnum & 0x1f; + mdio_stat = ~MDIO_STAT_ENC; + } + + out_be32(®s->mdio_stat, mdio_stat); ret = xgmac_wait_until_free(&bus->dev, regs); if (ret) @@ -147,11 +173,13 @@ out_be32(®s->mdio_ctl, mdio_ctl); /* Set the register address */ - out_be32(®s->mdio_addr, regnum & 0xffff); + if (regnum & MII_ADDR_C45) { + out_be32(®s->mdio_addr, regnum & 0xffff); - ret = xgmac_wait_until_free(&bus->dev, regs); - if (ret) - return ret; + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + } /* Initiate the read */ out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); @@ -162,7 +190,7 @@ /* Return all Fs if nothing was there */ if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) { - dev_err(&bus->dev, "MDIO read error\n"); + dev_dbg(&bus->dev, "MDIO read error\n"); return 0xffff; } @@ -181,7 +209,7 @@ mutex_lock(&bus->mdio_lock); /* Setup the MII Mgmt clock speed */ - out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + clrbits32(®s->mdio_stat, MDIO_STAT_ENC); ret = xgmac_wait_until_free(&bus->dev, regs); @@ -256,6 +284,9 @@ { .compatible = "fsl,fman-xmdio", }, + { + .compatible = "fsl,fman-memac-mdio", + }, {}, }; MODULE_DEVICE_TABLE(of, xgmac_mdio_match); --- linux-3.13.0.orig/drivers/net/ethernet/ibm/ibmveth.c +++ linux-3.13.0/drivers/net/ethernet/ibm/ibmveth.c @@ -293,6 +293,18 @@ atomic_add(buffers_added, &(pool->available)); } +/* + * The final 8 bytes of the buffer list is a counter of frames dropped + * because there was not a buffer in the buffer list capable of holding + * the frame. + */ +static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) +{ + __be64 *p = adapter->buffer_list_addr + 4096 - 8; + + adapter->rx_no_buffer = be64_to_cpup(p); +} + /* replenish routine */ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) { @@ -308,8 +320,7 @@ ibmveth_replenish_buffer_pool(adapter, pool); } - adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) + - 4096 - 8); + ibmveth_update_rx_no_buffer(adapter); } /* empty and free ana buffer pool - also used to do cleanup in error paths */ @@ -523,10 +534,21 @@ return rc; } +static u64 ibmveth_encode_mac_addr(u8 *mac) +{ + int i; + u64 encoded = 0; + + for (i = 0; i < ETH_ALEN; i++) + encoded = (encoded << 8) | mac[i]; + + return encoded; +} + static int ibmveth_open(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); - u64 mac_address = 0; + u64 mac_address; int rxq_entries = 1; unsigned long lpar_rc; int rc; @@ -580,8 +602,7 @@ adapter->rx_queue.num_slots = rxq_entries; adapter->rx_queue.toggle = 1; - memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); - mac_address = mac_address >> 16; + mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; @@ -689,8 +710,7 @@ free_irq(netdev->irq, netdev); - adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) + - 4096 - 8); + ibmveth_update_rx_no_buffer(adapter); ibmveth_cleanup(adapter); @@ -1184,8 +1204,8 @@ /* add the addresses to the filter table */ netdev_for_each_mc_addr(ha, netdev) { /* add the multicast address to the filter table */ - unsigned long mcast_addr = 0; - memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN); + u64 mcast_addr; + mcast_addr = ibmveth_encode_mac_addr(ha->addr); lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastAddFilter, mcast_addr); @@ -1369,9 +1389,6 @@ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); - adapter->mac_addr = 0; - memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN); - netdev->irq = dev->irq; netdev->netdev_ops = &ibmveth_netdev_ops; netdev->ethtool_ops = &netdev_ethtool_ops; @@ -1380,7 +1397,7 @@ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->features |= netdev->hw_features; - memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); + memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; --- linux-3.13.0.orig/drivers/net/ethernet/ibm/ibmveth.h +++ linux-3.13.0/drivers/net/ethernet/ibm/ibmveth.h @@ -139,7 +139,6 @@ struct napi_struct napi; struct net_device_stats stats; unsigned int mcastFilterSize; - unsigned long mac_addr; void * buffer_list_addr; void * filter_list_addr; dma_addr_t buffer_list_dma; --- linux-3.13.0.orig/drivers/net/ethernet/intel/Kconfig +++ linux-3.13.0/drivers/net/ethernet/intel/Kconfig @@ -243,6 +243,7 @@ config I40E tristate "Intel(R) Ethernet Controller XL710 Family support" + select PTP_1588_CLOCK depends on PCI ---help--- This driver supports Intel(R) Ethernet Controller XL710 Family of @@ -259,4 +260,44 @@ To compile this driver as a module, choose M here. The module will be called i40e. +config I40E_VXLAN + bool "Virtual eXtensible Local Area Network Support" + default n + depends on I40E && VXLAN && !(I40E=y && VXLAN=m) + ---help--- + This allows one to create VXLAN virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. VXLAN is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to use Virtual eXtensible Local Area Network + (VXLAN) in the driver. + +config I40E_DCB + bool "Data Center Bridging (DCB) Support" + default n + depends on I40E && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + + If unsure, say N. + +config I40EVF + tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" + depends on PCI_MSI + ---help--- + This driver supports Intel(R) XL710 and X710 virtual functions. + For more information on how to identify your adapter, go to the + Adapter & Driver ID Guide at: + + + + For general information and support, go to the Intel support + website at: + + + + To compile this driver as a module, choose M here. The module + will be called i40evf. MSI-X interrupt support is required + for this driver to work correctly. + endif # NET_VENDOR_INTEL --- linux-3.13.0.orig/drivers/net/ethernet/intel/Makefile +++ linux-3.13.0/drivers/net/ethernet/intel/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_IXGBEVF) += ixgbevf/ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IXGB) += ixgb/ +obj-$(CONFIG_I40EVF) += i40evf/ --- linux-3.13.0.orig/drivers/net/ethernet/intel/e100.c +++ linux-3.13.0/drivers/net/ethernet/intel/e100.c @@ -3034,7 +3034,7 @@ *enable_wake = false; } - pci_disable_device(pdev); + pci_clear_master(pdev); } static int __e100_power_off(struct pci_dev *pdev, bool wake) --- linux-3.13.0.orig/drivers/net/ethernet/intel/e1000e/netdev.c +++ linux-3.13.0/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2976,11 +2976,21 @@ u32 rctl, rfctl; u32 pages = 0; - /* Workaround Si errata on PCHx - configure jumbo frame flow */ - if ((hw->mac.type >= e1000_pch2lan) && - (adapter->netdev->mtu > ETH_DATA_LEN) && - e1000_lv_jumbo_workaround_ich8lan(hw, true)) - e_dbg("failed to enable jumbo frame workaround mode\n"); + /* Workaround Si errata on PCHx - configure jumbo frame flow. + * If jumbo frames not set, program related MAC/PHY registers + * to h/w defaults + */ + if (hw->mac.type >= e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable|disable jumbo frame workaround mode\n"); + } /* Program MC offset vector base */ rctl = er32(RCTL); --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/Makefile +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Controller XL710 Family Linux Driver -# Copyright(c) 2013 Intel Corporation. +# Copyright(c) 2013 - 2014 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . # # The full GNU General Public License is included in this distribution in # the file called "COPYING". @@ -41,4 +40,7 @@ i40e_debugfs.o \ i40e_diag.o \ i40e_txrx.o \ + i40e_ptp.o \ i40e_virtchnl_pf.o + +i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -29,6 +28,7 @@ #define _I40E_H_ #include +#include #include #include #include @@ -50,11 +50,15 @@ #include #include #include +#include +#include +#include #include "i40e_type.h" #include "i40e_prototype.h" #include "i40e_virtchnl.h" #include "i40e_virtchnl_pf.h" #include "i40e_txrx.h" +#include "i40e_dcb.h" /* Useful i40e defaults */ #define I40E_BASE_PF_SEID 16 @@ -63,7 +67,7 @@ #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 -#define I40E_MAX_REGISTER 0x0038FFFF +#define I40E_MAX_REGISTER 0x800000 #define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 #define I40E_MIN_NUM_DESCRIPTORS 64 @@ -72,6 +76,7 @@ #define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ +#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */ #define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 @@ -81,12 +86,14 @@ #define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_NVM_VERSION_LO_SHIFT 0 -#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT) -#define I40E_NVM_VERSION_MID_SHIFT 4 -#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT) +#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 #define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) +/* The values in here are decimal coded as hex as is the case in the NVM map*/ +#define I40E_CURRENT_NVM_VERSION_HI 0x2 +#define I40E_CURRENT_NVM_VERSION_LO 0x40 + /* magic for getting defines into strings */ #define STRINGIFY(foo) #foo #define XSTRINGIFY(bar) STRINGIFY(bar) @@ -127,7 +134,10 @@ __I40E_PF_RESET_REQUESTED, __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, + __I40E_EMP_RESET_REQUESTED, __I40E_FILTER_OVERFLOW_PROMISC, + __I40E_SUSPENDED, + __I40E_BAD_EEPROM, }; enum i40e_interrupt_policy { @@ -144,8 +154,21 @@ }; #define I40E_DEFAULT_ATR_SAMPLE_RATE 20 -#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512 -struct i40e_fdir_data { +#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 +#define I40E_FDIR_BUFFER_FULL_MARGIN 10 +#define I40E_FDIR_BUFFER_HEAD_ROOM 200 + +struct i40e_fdir_filter { + struct hlist_node fdir_node; + /* filter ipnut set */ + u8 flow_type; + u8 ip4_proto; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be32 sctp_v_tag; + /* filter control */ u16 q_index; u8 flex_off; u8 pctype; @@ -154,9 +177,10 @@ u8 fd_status; u16 cnt_index; u32 fd_id; - u8 *raw_packet; }; +#define I40E_ETH_P_LLDP 0x88cc + #define I40E_DCB_PRIO_TYPE_STRICT 0 #define I40E_DCB_PRIO_TYPE_ETS 1 #define I40E_DCB_STRICT_PRIO_CREDITS 127 @@ -186,19 +210,28 @@ bool fc_autoneg_status; u16 eeprom_version; - u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */ + u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ u16 num_req_vfs; /* num vfs requested for this vf */ u16 num_vf_qps; /* num queue pairs per vf */ - u16 num_tc_qps; /* num queue pairs per TC */ u16 num_lan_qps; /* num lan queues this pf has set up */ u16 num_lan_msix; /* num queue vectors for the base pf vsi */ + int queues_left; /* queues left unclaimed */ u16 rss_size; /* num queues in the RSS array */ u16 rss_size_max; /* HW defined max RSS queues */ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ u8 atr_sample_rate; + bool wol_en; + + struct hlist_head fdir_filter_list; + u16 fdir_pf_active_filters; +#ifdef CONFIG_I40E_VXLAN + __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; + u16 pending_vxlan_bitmap; + +#endif enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; @@ -216,24 +249,27 @@ #define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4) #define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5) #define I40E_FLAG_RSS_ENABLED (u64)(1 << 6) -#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7) -#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8) -#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9) -#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10) -#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13) -#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14) -#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15) -#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16) -#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18) -#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19) -#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20) -#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21) -#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22) -#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23) -#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27) +#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7) +#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8) +#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9) +#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12) +#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13) +#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14) +#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15) +#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17) +#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18) +#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19) +#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20) +#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21) +#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22) +#define I40E_FLAG_PTP (u64)(1 << 25) +#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26) +#ifdef CONFIG_I40E_VXLAN +#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) +#endif - u16 num_tx_queues; - u16 num_rx_queues; + /* tracks features that get auto disabled by errors */ + u64 auto_disable_flags; bool stat_offsets_loaded; struct i40e_hw_port_stats stats; @@ -247,6 +283,7 @@ u16 globr_count; /* Global reset count */ u16 empr_count; /* EMP reset count */ u16 pfr_count; /* PF reset count */ + u16 sw_int_count; /* SW interrupt count */ struct mutex switch_mutex; u16 lan_vsi; /* our default LAN VSI */ @@ -270,6 +307,8 @@ struct dentry *i40e_dbg_pf; #endif /* CONFIG_DEBUG_FS */ + u16 instance; /* A unique number per i40e_pf instance in the system */ + /* sr-iov config info */ struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ @@ -287,6 +326,20 @@ u32 fcoe_hmc_filt_num; u32 fcoe_hmc_cntx_num; struct i40e_filter_control_settings filter_settings; + + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct sk_buff *ptp_tx_skb; + struct work_struct ptp_tx_work; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + spinlock_t tmreg_lock; /* Used to protect the device time registers. */ + u64 ptp_base_adj; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + bool ptp_tx; + bool ptp_rx; }; struct i40e_mac_filter { @@ -441,15 +494,13 @@ static char buf[32]; snprintf(buf, sizeof(buf), - "f%d.%d a%d.%d n%02d.%02d.%02d e%08x", + "f%d.%d a%d.%d n%02x.%02x e%08x", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.api_maj_ver, hw->aq.api_min_ver, - (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) - >> I40E_NVM_VERSION_HI_SHIFT, - (hw->nvm.version & I40E_NVM_VERSION_MID_MASK) - >> I40E_NVM_VERSION_MID_SHIFT, - (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) - >> I40E_NVM_VERSION_LO_SHIFT, + (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> + I40E_NVM_VERSION_HI_SHIFT, + (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> + I40E_NVM_VERSION_LO_SHIFT, hw->nvm.eetrack); return buf; @@ -495,6 +546,7 @@ void i40e_down(struct i40e_vsi *vsi); extern const char i40e_driver_name[]; extern const char i40e_driver_version_str[]; +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); void i40e_update_stats(struct i40e_vsi *vsi); void i40e_update_eth_stats(struct i40e_vsi *vsi); @@ -502,16 +554,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig); -/* needed by i40e_main.c */ -void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data, - struct i40e_ring *tx_ring); -void i40e_add_remove_filter(struct i40e_fdir_data fdir_data, - struct i40e_ring *tx_ring); -void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data, - struct i40e_ring *tx_ring); -int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, struct i40e_pf *pf, bool add); - +int i40e_add_del_fdir(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, bool add); +void i40e_fdir_check_and_reenable(struct i40e_pf *pf); +int i40e_get_current_fd_count(struct i40e_pf *pf); +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -524,10 +573,13 @@ int i40e_vsi_release(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type, struct i40e_vsi *start_vsi); +int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable); +int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc); void i40e_veb_release(struct i40e_veb *veb); +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc); i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid); void i40e_vsi_remove_pvid(struct i40e_vsi *vsi); void i40e_vsi_reset_stats(struct i40e_vsi *vsi); @@ -544,8 +596,10 @@ static inline void i40e_dbg_exit(void) {} #endif /* CONFIG_DEBUG_FS*/ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); +void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); @@ -555,5 +609,21 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); - +#ifdef CONFIG_I40E_DCB +void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *new_cfg); +void i40e_dcbnl_set_all(struct i40e_vsi *vsi); +void i40e_dcbnl_setup(struct i40e_vsi *vsi); +bool i40e_dcb_need_reconfig(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg); +#endif /* CONFIG_I40E_DCB */ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi); +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf); +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index); +void i40e_ptp_set_increment(struct i40e_pf *pf); +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +void i40e_ptp_init(struct i40e_pf *pf); +void i40e_ptp_stop(struct i40e_pf *pf); #endif /* _I40E_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -31,6 +30,8 @@ #include "i40e_adminq.h" #include "i40e_prototype.h" +static void i40e_resume_aq(struct i40e_hw *hw); + /** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -43,13 +44,17 @@ if (hw->mac.type == I40E_MAC_VF) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; + hw->aq.asq.len = I40E_VF_ATQLEN1; hw->aq.arq.tail = I40E_VF_ARQT1; hw->aq.arq.head = I40E_VF_ARQH1; + hw->aq.arq.len = I40E_VF_ARQLEN1; } else { hw->aq.asq.tail = I40E_PF_ATQT; hw->aq.asq.head = I40E_PF_ATQH; + hw->aq.asq.len = I40E_PF_ATQLEN; hw->aq.arq.tail = I40E_PF_ARQT; hw->aq.arq.head = I40E_PF_ARQH; + hw->aq.arq.len = I40E_PF_ARQLEN; } } @@ -60,9 +65,8 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) { i40e_status ret_code; - struct i40e_virt_mem mem; - ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem, + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, i40e_mem_atq_ring, (hw->aq.num_asq_entries * sizeof(struct i40e_aq_desc)), @@ -70,21 +74,14 @@ if (ret_code) return ret_code; - hw->aq.asq.desc = hw->aq.asq_mem.va; - hw->aq.asq.dma_addr = hw->aq.asq_mem.pa; - - ret_code = i40e_allocate_virt_mem(hw, &mem, + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, (hw->aq.num_asq_entries * sizeof(struct i40e_asq_cmd_details))); if (ret_code) { - i40e_free_dma_mem(hw, &hw->aq.asq_mem); - hw->aq.asq_mem.va = NULL; - hw->aq.asq_mem.pa = 0; + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); return ret_code; } - hw->aq.asq.details = mem.va; - return ret_code; } @@ -96,16 +93,11 @@ { i40e_status ret_code; - ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem, + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, i40e_mem_arq_ring, (hw->aq.num_arq_entries * sizeof(struct i40e_aq_desc)), I40E_ADMINQ_DESC_ALIGNMENT); - if (ret_code) - return ret_code; - - hw->aq.arq.desc = hw->aq.arq_mem.va; - hw->aq.arq.dma_addr = hw->aq.arq_mem.pa; return ret_code; } @@ -119,14 +111,7 @@ **/ static void i40e_free_adminq_asq(struct i40e_hw *hw) { - struct i40e_virt_mem mem; - - i40e_free_dma_mem(hw, &hw->aq.asq_mem); - hw->aq.asq_mem.va = NULL; - hw->aq.asq_mem.pa = 0; - mem.va = hw->aq.asq.details; - i40e_free_virt_mem(hw, &mem); - hw->aq.asq.details = NULL; + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); } /** @@ -138,20 +123,17 @@ **/ static void i40e_free_adminq_arq(struct i40e_hw *hw) { - i40e_free_dma_mem(hw, &hw->aq.arq_mem); - hw->aq.arq_mem.va = NULL; - hw->aq.arq_mem.pa = 0; + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); } /** * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) { i40e_status ret_code; struct i40e_aq_desc *desc; - struct i40e_virt_mem mem; struct i40e_dma_mem *bi; int i; @@ -160,11 +142,11 @@ */ /* buffer_info structures do not need alignment */ - ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries * - sizeof(struct i40e_dma_mem))); + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_arq_bufs; - hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va; + hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_arq_entries; i++) { @@ -206,29 +188,27 @@ i--; for (; i >= 0; i--) i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); - mem.va = hw->aq.arq.r.arq_bi; - i40e_free_virt_mem(hw, &mem); + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); return ret_code; } /** * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) { i40e_status ret_code; - struct i40e_virt_mem mem; struct i40e_dma_mem *bi; int i; /* No mapped memory needed yet, just the buffer info structures */ - ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries * - sizeof(struct i40e_dma_mem))); + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); if (ret_code) goto alloc_asq_bufs; - hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va; + hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; /* allocate the mapped buffers */ for (i = 0; i < hw->aq.num_asq_entries; i++) { @@ -248,35 +228,36 @@ i--; for (; i >= 0; i--) i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); - mem.va = hw->aq.asq.r.asq_bi; - i40e_free_virt_mem(hw, &mem); + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); return ret_code; } /** * i40e_free_arq_bufs - Free receive queue buffer info elements - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ static void i40e_free_arq_bufs(struct i40e_hw *hw) { - struct i40e_virt_mem mem; int i; + /* free descriptors */ for (i = 0; i < hw->aq.num_arq_entries; i++) i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); - mem.va = hw->aq.arq.r.arq_bi; - i40e_free_virt_mem(hw, &mem); + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); } /** * i40e_free_asq_bufs - Free send queue buffer info elements - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ static void i40e_free_asq_bufs(struct i40e_hw *hw) { - struct i40e_virt_mem mem; int i; /* only unmap if the address is non-NULL */ @@ -284,14 +265,19 @@ if (hw->aq.asq.r.asq_bi[i].pa) i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); - /* now free the buffer info list */ - mem.va = hw->aq.asq.r.asq_bi; - i40e_free_virt_mem(hw, &mem); + /* free the buffer info list */ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); } /** * i40e_config_asq_regs - configure ASQ registers - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * Configure base address and length registers for the transmit queue **/ @@ -299,14 +285,18 @@ { if (hw->mac.type == I40E_MAC_VF) { /* configure the transmit queue */ - wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr)); - wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr)); + wr32(hw, I40E_VF_ATQBAH1, + upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_VF_ATQBAL1, + lower_32_bits(hw->aq.asq.desc_buf.pa)); wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | I40E_VF_ATQLEN1_ATQENABLE_MASK)); } else { /* configure the transmit queue */ - wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr)); - wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr)); + wr32(hw, I40E_PF_ATQBAH, + upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQBAL, + lower_32_bits(hw->aq.asq.desc_buf.pa)); wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK)); } @@ -314,7 +304,7 @@ /** * i40e_config_arq_regs - ARQ register configuration - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * Configure base address and length registers for the receive (event queue) **/ @@ -322,14 +312,18 @@ { if (hw->mac.type == I40E_MAC_VF) { /* configure the receive queue */ - wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr)); - wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr)); + wr32(hw, I40E_VF_ARQBAH1, + upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_VF_ARQBAL1, + lower_32_bits(hw->aq.arq.desc_buf.pa)); wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | I40E_VF_ARQLEN1_ARQENABLE_MASK)); } else { /* configure the receive queue */ - wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr)); - wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr)); + wr32(hw, I40E_PF_ARQBAH, + upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQBAL, + lower_32_bits(hw->aq.arq.desc_buf.pa)); wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | I40E_PF_ARQLEN_ARQENABLE_MASK)); } @@ -340,7 +334,7 @@ /** * i40e_init_asq - main initialization routine for ASQ - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * This is the main initialization routine for the Admin Send Queue * Prior to calling this function, drivers *MUST* set the following fields @@ -397,7 +391,7 @@ /** * i40e_init_arq - initialize ARQ - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * The main initialization routine for the Admin Receive (Event) Queue. * Prior to calling this function, drivers *MUST* set the following fields @@ -454,7 +448,7 @@ /** * i40e_shutdown_asq - shutdown the ASQ - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Send Queue **/ @@ -466,10 +460,9 @@ return I40E_ERR_NOT_READY; /* Stop firmware AdminQ processing */ - if (hw->mac.type == I40E_MAC_VF) - wr32(hw, I40E_VF_ATQLEN1, 0); - else - wr32(hw, I40E_PF_ATQLEN, 0); + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, hw->aq.asq.len, 0); /* make sure lock is available */ mutex_lock(&hw->aq.asq_mutex); @@ -478,8 +471,6 @@ /* free ring buffers */ i40e_free_asq_bufs(hw); - /* free the ring descriptors */ - i40e_free_adminq_asq(hw); mutex_unlock(&hw->aq.asq_mutex); @@ -488,7 +479,7 @@ /** * i40e_shutdown_arq - shutdown ARQ - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * The main shutdown routine for the Admin Receive Queue **/ @@ -500,10 +491,9 @@ return I40E_ERR_NOT_READY; /* Stop firmware AdminQ processing */ - if (hw->mac.type == I40E_MAC_VF) - wr32(hw, I40E_VF_ARQLEN1, 0); - else - wr32(hw, I40E_PF_ARQLEN, 0); + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, hw->aq.arq.len, 0); /* make sure lock is available */ mutex_lock(&hw->aq.arq_mutex); @@ -512,8 +502,6 @@ /* free ring buffers */ i40e_free_arq_bufs(hw); - /* free the ring descriptors */ - i40e_free_adminq_arq(hw); mutex_unlock(&hw->aq.arq_mutex); @@ -522,7 +510,7 @@ /** * i40e_init_adminq - main initialization routine for Admin Queue - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure * * Prior to calling this function, drivers *MUST* set the following fields * in the hw->aq structure: @@ -533,8 +521,9 @@ **/ i40e_status i40e_init_adminq(struct i40e_hw *hw) { - u16 eetrack_lo, eetrack_hi; i40e_status ret_code; + u16 eetrack_lo, eetrack_hi; + int retry = 0; /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || @@ -562,23 +551,41 @@ if (ret_code) goto init_adminq_free_asq; - ret_code = i40e_aq_get_firmware_version(hw, - &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver, - &hw->aq.api_maj_ver, &hw->aq.api_min_ver, - NULL); - if (ret_code) + /* There are some cases where the firmware may not be quite ready + * for AdminQ operations, so we retry the AdminQ setup a few times + * if we see timeouts in this first AQ call. + */ + do { + ret_code = i40e_aq_get_firmware_version(hw, + &hw->aq.fw_maj_ver, + &hw->aq.fw_min_ver, + &hw->aq.api_maj_ver, + &hw->aq.api_min_ver, + NULL); + if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) + break; + retry++; + msleep(100); + i40e_resume_aq(hw); + } while (retry < 10); + if (ret_code != I40E_SUCCESS) goto init_adminq_free_arq; - if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR || - hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) { - ret_code = I40E_ERR_FIRMWARE_API_VERSION; - goto init_adminq_free_arq; - } + /* get the NVM version info */ i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; + if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR || + hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) { + ret_code = I40E_ERR_FIRMWARE_API_VERSION; + goto init_adminq_free_arq; + } + + /* pre-emptive resource lock release */ + i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + ret_code = i40e_aq_set_hmc_resource_profile(hw, I40E_HMC_PROFILE_DEFAULT, 0, @@ -600,12 +607,15 @@ /** * i40e_shutdown_adminq - shutdown routine for the Admin Queue - * @hw: pointer to the hardware structure + * @hw: pointer to the hardware structure **/ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) { i40e_status ret_code = 0; + if (i40e_check_asq_alive(hw)) + i40e_aq_queue_shutdown(hw, true); + i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); @@ -616,7 +626,7 @@ /** * i40e_clean_asq - cleans Admin send queue - * @asq: pointer to the adminq send ring + * @hw: pointer to the hardware structure * * returns the number of free desc **/ @@ -637,9 +647,8 @@ desc_cb = *desc; cb_func(hw, &desc_cb); } - memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); - memset((void *)details, 0, - sizeof(struct i40e_asq_cmd_details)); + memset(desc, 0, sizeof(*desc)); + memset(details, 0, sizeof(*details)); ntc++; if (ntc == asq->count) ntc = 0; @@ -659,12 +668,12 @@ * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. **/ -bool i40e_asq_done(struct i40e_hw *hw) +static bool i40e_asq_done(struct i40e_hw *hw) { /* AQ designers suggest use of head for better * timing reliability than DD bit */ - return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use); + return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; } @@ -674,7 +683,7 @@ * @desc: prefilled descriptor describing the command (non DMA mem) * @buff: buffer to use for indirect commands * @buff_size: size of buffer for indirect commands - * @opaque: pointer to info to be used in async cleanup + * @cmd_details: pointer to command details structure * * This is the main send command driver routine for the Admin Queue send * queue. It runs the queue, cleans the queue, etc @@ -854,7 +863,7 @@ /* zero out the desc */ memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); desc->opcode = cpu_to_le16(opcode); - desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI); + desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); } /** @@ -912,7 +921,7 @@ "AQRX: Event received with error 0x%X.\n", hw->aq.arq_last_status); } else { - memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc)); + e->desc = *desc; datalen = le16_to_cpu(desc->datalen); e->msg_size = min(datalen, e->msg_size); if (e->msg_buf != NULL && (e->msg_size != 0)) @@ -925,6 +934,11 @@ * size */ bi = &hw->aq.arq.r.arq_bi[ntc]; + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); desc->datalen = cpu_to_le16((u16)bi->size); desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); @@ -947,36 +961,16 @@ return ret_code; } -void i40e_resume_aq(struct i40e_hw *hw) +static void i40e_resume_aq(struct i40e_hw *hw) { - u32 reg = 0; - /* Registers are reset after PF reset */ hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; i40e_config_asq_regs(hw); - reg = hw->aq.num_asq_entries; - - if (hw->mac.type == I40E_MAC_VF) { - reg |= I40E_VF_ATQLEN_ATQENABLE_MASK; - wr32(hw, I40E_VF_ATQLEN1, reg); - } else { - reg |= I40E_PF_ATQLEN_ATQENABLE_MASK; - wr32(hw, I40E_PF_ATQLEN, reg); - } hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; i40e_config_arq_regs(hw); - reg = hw->aq.num_arq_entries; - - if (hw->mac.type == I40E_MAC_VF) { - reg |= I40E_VF_ATQLEN_ATQENABLE_MASK; - wr32(hw, I40E_VF_ARQLEN1, reg); - } else { - reg |= I40E_PF_ATQLEN_ATQENABLE_MASK; - wr32(hw, I40E_PF_ARQLEN, reg); - } } --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -32,20 +31,20 @@ #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ - (&(((struct i40e_aq_desc *)((R).desc))[i])) + (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) #define I40E_ADMINQ_DESC_ALIGNMENT 4096 struct i40e_adminq_ring { - void *desc; /* Descriptor ring memory */ - void *details; /* ASQ details */ + struct i40e_virt_mem dma_head; /* space for dma structures */ + struct i40e_dma_mem desc_buf; /* descriptor ring memory */ + struct i40e_virt_mem cmd_buf; /* command buffer memory */ union { struct i40e_dma_mem *asq_bi; struct i40e_dma_mem *arq_bi; } r; - u64 dma_addr; /* Physical address of the ring */ u16 count; /* Number of descriptors */ u16 rx_buf_len; /* Admin Receive Queue buffer length */ @@ -56,6 +55,7 @@ /* used for queue tracking */ u32 head; u32 tail; + u32 len; }; /* ASQ transaction details */ @@ -69,7 +69,7 @@ }; #define I40E_ADMINQ_DETAILS(R, i) \ - (&(((struct i40e_asq_cmd_details *)((R).details))[i])) + (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) /* ARQ event information */ struct i40e_arq_event_info { @@ -94,9 +94,6 @@ struct mutex asq_mutex; /* Send queue lock */ struct mutex arq_mutex; /* Receive queue lock */ - struct i40e_dma_mem asq_mem; /* send queue dynamic memory */ - struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */ - /* last status values on send and receive queues */ enum i40e_admin_queue_err asq_last_status; enum i40e_admin_queue_err arq_last_status; --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -35,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0000 +#define I40E_FW_API_VERSION_MINOR 0x0001 struct i40e_aq_desc { __le16 flags; @@ -137,10 +136,13 @@ i40e_aqc_opc_set_ns_proxy_entry = 0x0105, /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + /* internal switch commands */ i40e_aqc_opc_get_switch_config = 0x0200, i40e_aqc_opc_add_statistics = 0x0201, @@ -317,13 +319,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); -/* Send driver version (direct 0x0002) */ +/* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { u8 driver_major_ver; u8 driver_minor_ver; u8 driver_build_ver; u8 driver_subbuild_ver; - u8 reserved[12]; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); @@ -479,7 +483,7 @@ u8 reserved2[6]; }; -/* Manage MAC Address Read Command (0x0107) */ +/* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; #define I40E_AQC_LAN_ADDR_VALID 0x10 @@ -517,6 +521,16 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + /* Switch configuration commands (0x02xx) */ /* Used by many indirect commands that only pass an seid and a buffer in the @@ -639,13 +653,15 @@ u8 reserved2[6]; }; -/* Add VSI (indirect 0x210) +/* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) * - * Update VSI (indirect 0x211) Get VSI (indirect 0x0212) - * use the generic i40e_aqc_switch_seid descriptor format - * use the same completion and data structure as Add VSI + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { __le16 uplink_seid; @@ -664,7 +680,6 @@ #define I40E_AQ_VSI_TYPE_PF 0x2 #define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 #define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 -#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8 __le32 addr_high; __le32 addr_low; }; @@ -1026,7 +1041,9 @@ #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - u8 reserved[10]; + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); @@ -1179,33 +1196,46 @@ } v4; struct { u8 data[16]; - } v6; - } ipaddr; + } v6; + } ipaddr; __le16 flags; #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 #define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 -#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002 +/* 0x0002 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007 +/* 0x0007 reserved */ /* 0x0008 reserved */ #define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 #define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - __le32 key_low; - __le32 key_high; + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; __le16 queue_number; #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 #define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved[14]; + u8 reserved2[14]; /* response section */ u8 allocation_result; #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 @@ -1548,7 +1578,7 @@ struct i40e_aq_get_phy_abilities_resp { __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum */ + u8 link_speed; /* bitmap using the above enum bit patterns */ u8 abilities; #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 @@ -1582,6 +1612,10 @@ __le32 phy_type; u8 link_speed; u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 __le16 eee_capability; __le32 eeer; u8 low_power_ctrl; @@ -1914,22 +1948,33 @@ /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { __le16 udp_port; - u8 header_len; /* in DWords, 1 to 15 */ - u8 protocol_index; -#define I40E_AQC_TUNNEL_TYPE_MAC 0x0 -#define I40E_AQC_TUNNEL_TYPE_UDP 0x1 - u8 reserved[12]; + u8 reserved0[3]; + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 + u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { u8 reserved[2]; u8 index; /* 0 to 15 */ - u8 pf_filters; - u8 total_filters; - u8 reserved2[11]; + u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); @@ -1937,28 +1982,32 @@ struct i40e_aqc_del_udp_tunnel_completion { __le16 udp_port; u8 index; /* 0 to 15 */ - u8 multiple_entries; - u8 tunnels_used; - u8 reserved; - u8 tunnels_free; - u8 reserved1[9]; + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); /* tunnel key structure 0x0B10 */ + struct i40e_aqc_tunnel_key_structure { - __le16 key1_off; - __le16 key1_len; - __le16 key2_off; - __le16 key2_len; - __le16 flags; + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 resreved[6]; + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); @@ -2052,6 +2101,7 @@ #define I40E_AQ_CLUSTER_ID_DCB 8 #define I40E_AQ_CLUSTER_ID_EMP_MEM 9 #define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { u8 cluster_id; --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_common.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -43,20 +42,20 @@ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { switch (hw->device_id) { - case I40E_SFP_XL710_DEVICE_ID: - case I40E_SFP_X710_DEVICE_ID: - case I40E_QEMU_DEVICE_ID: - case I40E_KX_A_DEVICE_ID: - case I40E_KX_B_DEVICE_ID: - case I40E_KX_C_DEVICE_ID: - case I40E_KX_D_DEVICE_ID: - case I40E_QSFP_A_DEVICE_ID: - case I40E_QSFP_B_DEVICE_ID: - case I40E_QSFP_C_DEVICE_ID: + case I40E_DEV_ID_SFP_XL710: + case I40E_DEV_ID_SFP_X710: + case I40E_DEV_ID_QEMU: + case I40E_DEV_ID_KX_A: + case I40E_DEV_ID_KX_B: + case I40E_DEV_ID_KX_C: + case I40E_DEV_ID_KX_D: + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: hw->mac.type = I40E_MAC_XL710; break; - case I40E_VF_DEVICE_ID: - case I40E_VF_HV_DEVICE_ID: + case I40E_DEV_ID_VF: + case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; break; default: @@ -75,7 +74,8 @@ /** * i40e_debug_aq * @hw: debug mask related to admin queue - * @cap: pointer to adminq command descriptor + * @mask: debug mask + * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer * * Dumps debug log about adminq command with descriptor contents. @@ -126,6 +126,409 @@ } /** + * i40e_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40e_check_asq_alive(struct i40e_hw *hw) +{ + return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); +} + +/** + * i40e_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + +/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40e_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + + +/** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure * @@ -142,14 +545,6 @@ i40e_status status = 0; u32 reg; - hw->phy.get_link_info = true; - - /* Determine port number */ - reg = rd32(hw, I40E_PFGEN_PORTNUM); - reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> - I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT); - hw->port = (u8)reg; - i40e_set_mac_type(hw); switch (hw->mac.type) { @@ -160,6 +555,21 @@ break; } + hw->phy.get_link_info = true; + + /* Determine port number */ + reg = rd32(hw, I40E_PFGEN_PORTNUM); + reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> + I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT); + hw->port = (u8)reg; + + /* Determine the PF number based on the PCI fn */ + reg = rd32(hw, I40E_GLPCI_CAPSUP); + if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK) + hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func); + else + hw->pf_id = (u8)hw->bus.func; + status = i40e_init_nvm(hw); return status; } @@ -210,8 +620,11 @@ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_write); cmd_data->command_flags = cpu_to_le16(flags); - memcpy(&cmd_data->mac_sal, &mac_addr[0], 4); - memcpy(&cmd_data->mac_sah, &mac_addr[4], 2); + cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); + cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | + ((u32)mac_addr[3] << 16) | + ((u32)mac_addr[4] << 8) | + mac_addr[5]); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -240,32 +653,53 @@ } /** - * i40e_validate_mac_addr - Validate MAC address - * @mac_addr: pointer to MAC address - * - * Tests a MAC address to ensure it is a valid Individual Address + * i40e_get_media_type - Gets media type + * @hw: pointer to the hardware structure **/ -i40e_status i40e_validate_mac_addr(u8 *mac_addr) +static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) { - i40e_status status = 0; + enum i40e_media_type media; - /* Make sure it is not a multicast address */ - if (I40E_IS_MULTICAST(mac_addr)) { - hw_dbg(hw, "MAC address is multicast\n"); - status = I40E_ERR_INVALID_MAC_ADDR; - /* Not a broadcast address */ - } else if (I40E_IS_BROADCAST(mac_addr)) { - hw_dbg(hw, "MAC address is broadcast\n"); - status = I40E_ERR_INVALID_MAC_ADDR; - /* Reject the zero address */ - } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { - hw_dbg(hw, "MAC address is all zeros\n"); - status = I40E_ERR_INVALID_MAC_ADDR; + switch (hw->phy.link_info.phy_type) { + case I40E_PHY_TYPE_10GBASE_SR: + case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_40GBASE_SR4: + case I40E_PHY_TYPE_40GBASE_LR4: + media = I40E_MEDIA_TYPE_FIBER; + break; + case I40E_PHY_TYPE_100BASE_TX: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_10GBASE_T: + media = I40E_MEDIA_TYPE_BASET; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + media = I40E_MEDIA_TYPE_DA; + break; + case I40E_PHY_TYPE_1000BASE_KX: + case I40E_PHY_TYPE_10GBASE_KX4: + case I40E_PHY_TYPE_10GBASE_KR: + case I40E_PHY_TYPE_40GBASE_KR4: + media = I40E_MEDIA_TYPE_BACKPLANE; + break; + case I40E_PHY_TYPE_SGMII: + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + default: + media = I40E_MEDIA_TYPE_UNKNOWN; + break; } - return status; + + return media; } +#define I40E_PF_RESET_WAIT_COUNT_A0 200 +#define I40E_PF_RESET_WAIT_COUNT 10 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure @@ -275,7 +709,8 @@ **/ i40e_status i40e_pf_reset(struct i40e_hw *hw) { - u32 wait_cnt = 0; + u32 cnt = 0; + u32 cnt1 = 0; u32 reg = 0; u32 grst_del; @@ -285,7 +720,7 @@ */ grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; - for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) { + for (cnt = 0; cnt < grst_del + 2; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; @@ -296,17 +731,37 @@ return I40E_ERR_RESET_FAILED; } - /* Determine the PF number based on the PCI fn */ - hw->pf_id = (u8)hw->bus.func; + /* Now Wait for the FW to be ready */ + for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { + reg = rd32(hw, I40E_GLNVM_ULD); + reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); + if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { + hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); + break; + } + usleep_range(10000, 20000); + } + if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | + I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { + hw_dbg(hw, "wait for FW Reset complete timedout\n"); + hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); + return I40E_ERR_RESET_FAILED; + } /* If there was a Global Reset in progress when we got here, * we don't need to do the PF Reset */ - if (!wait_cnt) { + if (!cnt) { + if (hw->revision_id == 0) + cnt = I40E_PF_RESET_WAIT_COUNT_A0; + else + cnt = I40E_PF_RESET_WAIT_COUNT; reg = rd32(hw, I40E_PFGEN_CTRL); wr32(hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); - for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) { + for (; cnt; cnt--) { reg = rd32(hw, I40E_PFGEN_CTRL); if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; @@ -319,6 +774,7 @@ } i40e_clear_pxe_mode(hw); + return 0; } @@ -335,10 +791,48 @@ /* Clear single descriptor fetch/write-back mode */ reg = rd32(hw, I40E_GLLAN_RCTL_0); - wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); + + if (hw->revision_id == 0) { + /* As a work around clear PXE_MODE instead of setting it */ + wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); + } else { + wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); + } } /** + * i40e_led_is_mine - helper to find matching led + * @hw: pointer to the hw struct + * @idx: index into GPIO registers + * + * returns: 0 if no match, otherwise the value of the GPIO_CTL register + */ +static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) +{ + u32 gpio_val = 0; + u32 port; + + if (!hw->func_caps.led[idx]) + return 0; + + gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); + port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> + I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + + /* if PRT_NUM_NA is 1 then this LED is not port specific, OR + * if it is not our port then ignore + */ + if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || + (port != hw->port)) + return 0; + + return gpio_val; +} + +#define I40E_LED0 22 +#define I40E_LINK_ACTIVITY 0xC + +/** * i40e_led_get - return current on/off mode * @hw: pointer to the hw struct * @@ -349,24 +843,20 @@ **/ u32 i40e_led_get(struct i40e_hw *hw) { - u32 gpio_val = 0; u32 mode = 0; - u32 port; int i; - for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) { - if (!hw->func_caps.led[i]) - continue; - - gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i)); - port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) - >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); - if (port != hw->port) + if (!gpio_val) continue; - mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) - >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT; + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; } @@ -376,60 +866,45 @@ /** * i40e_led_set - set new on/off mode * @hw: pointer to the hw struct - * @mode: 0=off, else on (see EAS for mode details) + * @mode: 0=off, 0xf=on (else see manual for mode details) + * @blink: true if the LED should blink when on, false if steady + * + * if this function is used to turn on the blink it should + * be used to disable the blink when restoring the original state. **/ -void i40e_led_set(struct i40e_hw *hw, u32 mode) +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { - u32 gpio_val = 0; - u32 led_mode = 0; - u32 port; int i; - for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) { - if (!hw->func_caps.led[i]) - continue; + if (mode & 0xfffffff0) + hw_dbg(hw, "invalid mode passed in %X\n", mode); - gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i)); - port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) - >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + /* as per the documentation GPIO 22-29 are the LED + * GPIO pins named LED0..LED7 + */ + for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { + u32 gpio_val = i40e_led_is_mine(hw, i); - if (port != hw->port) + if (!gpio_val) continue; - led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & - I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; - gpio_val |= led_mode; + /* this & is a bit of paranoia, but serves as a range check */ + gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & + I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); + + if (mode == I40E_LINK_ACTIVITY) + blink = false; + + gpio_val |= (blink ? 1 : 0) << + I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT; + wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); + break; } } /* Admin command wrappers */ -/** - * i40e_aq_queue_shutdown - * @hw: pointer to the hw struct - * @unloading: is the driver unloading itself - * - * Tell the Firmware that we're shutting down the AdminQ and whether - * or not the driver is unloading as well. - **/ -i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_queue_shutdown *cmd = - (struct i40e_aqc_queue_shutdown *)&desc.params.raw; - i40e_status status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_queue_shutdown); - - if (unloading) - cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); - status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); - - return status; -} /** * i40e_aq_set_link_restart_an @@ -490,15 +965,16 @@ goto aq_get_link_info_exit; /* save off old link status information */ - memcpy(&hw->phy.link_info_old, hw_link_info, - sizeof(struct i40e_link_status)); + hw->phy.link_info_old = *hw_link_info; /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; + hw->phy.media_type = i40e_get_media_type(hw); hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; hw_link_info->link_info = resp->link_info; hw_link_info->an_info = resp->an_info; hw_link_info->ext_info = resp->ext_info; + hw_link_info->loopback = resp->loopback; if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE)) hw_link_info->lse_enable = true; @@ -519,7 +995,7 @@ /** * i40e_aq_add_vsi * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Add a VSI context to the hardware. @@ -571,7 +1047,8 @@ * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) + u16 seid, bool set, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -665,7 +1142,7 @@ /** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, @@ -673,8 +1150,8 @@ struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; - struct i40e_aqc_switch_seid *cmd = - (struct i40e_aqc_switch_seid *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; struct i40e_aqc_add_get_update_vsi_completion *resp = (struct i40e_aqc_add_get_update_vsi_completion *) &desc.params.raw; @@ -683,7 +1160,7 @@ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_vsi_parameters); - cmd->seid = cpu_to_le16(vsi_ctx->seid); + cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF) @@ -707,7 +1184,7 @@ /** * i40e_aq_update_vsi_params * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct * @cmd_details: pointer to command details structure or NULL * * Update a VSI context. @@ -717,13 +1194,13 @@ struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; - struct i40e_aqc_switch_seid *cmd = - (struct i40e_aqc_switch_seid *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi *cmd = + (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_vsi_parameters); - cmd->seid = cpu_to_le16(vsi_ctx->seid); + cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF) @@ -810,7 +1287,6 @@ /** * i40e_aq_send_driver_version * @hw: pointer to the hw struct - * @event: driver event: driver ok, start or stop * @dv: driver's major, minor version * @cmd_details: pointer to command details structure or NULL * @@ -873,6 +1349,7 @@ * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: true for default port VSI, false for control port + * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support * @veb_seid: pointer to where to put the resulting VEB SEID * @cmd_details: pointer to command details structure or NULL * @@ -881,7 +1358,8 @@ **/ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, - bool default_port, u16 *veb_seid, + bool default_port, bool enable_l2_filtering, + u16 *veb_seid, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -907,6 +1385,10 @@ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; + + if (enable_l2_filtering) + veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; + cmd->veb_flags = cpu_to_le16(veb_flags); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -922,10 +1404,10 @@ * @hw: pointer to the hw struct * @veb_seid: the SEID of the VEB to query * @switch_id: the uplink switch id - * @floating_veb: set to true if the VEB is floating + * @floating: set to true if the VEB is floating * @statistic_index: index of the stats counter block for this VEB * @vebs_used: number of VEB's used by function - * @vebs_unallocated: total VEB's not reserved by any function + * @vebs_free: total VEB's not reserved by any function * @cmd_details: pointer to command details structure or NULL * * This retrieves the parameters for a particular VEB, specified by @@ -1059,89 +1541,11 @@ } /** - * i40e_aq_add_vlan - Add VLAN ids to the HW filtering - * @hw: pointer to the hw struct - * @seid: VSI for the vlan filters - * @v_list: list of vlan filters to be added - * @count: length of the list - * @cmd_details: pointer to command details structure or NULL - **/ -i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid, - struct i40e_aqc_add_remove_vlan_element_data *v_list, - u8 count, struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_macvlan *cmd = - (struct i40e_aqc_macvlan *)&desc.params.raw; - i40e_status status; - u16 buf_size; - - if (count == 0 || !v_list || !hw) - return I40E_ERR_PARAM; - - buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data); - - /* prep the rest of the request */ - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan); - cmd->num_addresses = cpu_to_le16(count); - cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); - cmd->seid[1] = 0; - cmd->seid[2] = 0; - - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - if (buf_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - - status = i40e_asq_send_command(hw, &desc, v_list, buf_size, - cmd_details); - - return status; -} - -/** - * i40e_aq_remove_vlan - Remove VLANs from the HW filtering - * @hw: pointer to the hw struct - * @seid: VSI for the vlan filters - * @v_list: list of macvlans to be removed - * @count: length of the list - * @cmd_details: pointer to command details structure or NULL - **/ -i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid, - struct i40e_aqc_add_remove_vlan_element_data *v_list, - u8 count, struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_macvlan *cmd = - (struct i40e_aqc_macvlan *)&desc.params.raw; - i40e_status status; - u16 buf_size; - - if (count == 0 || !v_list || !hw) - return I40E_ERR_PARAM; - - buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data); - - /* prep the rest of the request */ - i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan); - cmd->num_addresses = cpu_to_le16(count); - cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); - cmd->seid[1] = 0; - cmd->seid[2] = 0; - - desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); - if (buf_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - - status = i40e_asq_send_command(hw, &desc, v_list, buf_size, - cmd_details); - - return status; -} - -/** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure * @vfid: vf id to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code * @msg: pointer to the msg buffer * @msglen: msg length * @cmd_details: pointer to command details @@ -1371,9 +1775,9 @@ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) - p = (struct i40e_hw_capabilities *)&hw->dev_caps; + p = &hw->dev_caps; else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) - p = (struct i40e_hw_capabilities *)&hw->func_caps; + p = &hw->func_caps; else return; @@ -1519,8 +1923,8 @@ struct i40e_asq_cmd_details *cmd_details) { struct i40e_aqc_list_capabilites *cmd; - i40e_status status = 0; struct i40e_aq_desc desc; + i40e_status status = 0; cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; @@ -1681,6 +2085,63 @@ } /** + * i40e_aq_add_udp_tunnel + * @hw: pointer to the hw struct + * @udp_port: the UDP port to add + * @header_len: length of the tunneling header length in DWords + * @protocol_index: protocol index type + * @filter_index: pointer to filter index + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 header_len, + u8 protocol_index, u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_udp_tunnel *cmd = + (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; + struct i40e_aqc_del_udp_tunnel_completion *resp = + (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); + + cmd->udp_port = cpu_to_le16(udp_port); + cmd->protocol_type = protocol_index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status) + *filter_index = resp->index; + + return status; +} + +/** + * i40e_aq_del_udp_tunnel + * @hw: pointer to the hw struct + * @index: filter index + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_remove_udp_tunnel *cmd = + (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); + + cmd->index = index; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_delete_element - Delete switch element * @hw: pointer to the hw struct * @seid: the SEID to delete from the switch @@ -1709,6 +2170,28 @@ } /** + * i40e_aq_dcb_updated - DCB Updated Command + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * EMP will return when the shared RPB settings have been + * recomputed and modified. The retval field in the descriptor + * will be set to 0 when RPB is modified. + **/ +i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler * @hw: pointer to the hw struct * @seid: seid for the physical port/switching component/vsi @@ -1787,6 +2270,40 @@ } /** + * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port + * @hw: pointer to the hw struct + * @seid: seid of the switching component connected to Physical Port + * @ets_data: Buffer holding ETS parameters + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, + sizeof(*ets_data), opcode, cmd_details); +} + +/** + * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), + i40e_aqc_opc_configure_switching_comp_bw_config, + cmd_details); +} + +/** * i40e_aq_query_vsi_bw_config - Query VSI BW configuration * @hw: pointer to the hw struct * @seid: seid of the VSI @@ -2039,3 +2556,110 @@ return 0; } + +/** + * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter + * @hw: pointer to the hw struct + * @mac_addr: MAC address to use in the filter + * @ethtype: Ethertype to use in the filter + * @flags: Flags that needs to be applied to the filter + * @vsi_seid: seid of the control VSI + * @queue: VSI queue number to send the packet to + * @is_add: Add control packet filter if True else remove + * @stats: Structure to hold information on control filter counts + * @cmd_details: pointer to command details structure or NULL + * + * This command will Add or Remove control packet filter for a control VSI. + * In return it will update the total number of perfect filter count in + * the stats member. + **/ +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_control_packet_filter *cmd = + (struct i40e_aqc_add_remove_control_packet_filter *) + &desc.params.raw; + struct i40e_aqc_add_remove_control_packet_filter_completion *resp = + (struct i40e_aqc_add_remove_control_packet_filter_completion *) + &desc.params.raw; + i40e_status status; + + if (vsi_seid == 0) + return I40E_ERR_PARAM; + + if (is_add) { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_control_packet_filter); + cmd->queue = cpu_to_le16(queue); + } else { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_control_packet_filter); + } + + if (mac_addr) + memcpy(cmd->mac, mac_addr, ETH_ALEN); + + cmd->etype = cpu_to_le16(ethtype); + cmd->flags = cpu_to_le16(flags); + cmd->seid = cpu_to_le16(vsi_seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (!status && stats) { + stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); + stats->etype_used = le16_to_cpu(resp->etype_used); + stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); + stats->etype_free = le16_to_cpu(resp->etype_free); + } + + return status; +} + +/** + * i40e_set_pci_config_data - store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status word from PCI config space + * + * Stores the PCI bus info (speed, width, type) within the i40e_hw structure + **/ +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) +{ + hw->bus.type = i40e_bus_type_pci_express; + + switch (link_status & PCI_EXP_LNKSTA_NLW) { + case PCI_EXP_LNKSTA_NLW_X1: + hw->bus.width = i40e_bus_width_pcie_x1; + break; + case PCI_EXP_LNKSTA_NLW_X2: + hw->bus.width = i40e_bus_width_pcie_x2; + break; + case PCI_EXP_LNKSTA_NLW_X4: + hw->bus.width = i40e_bus_width_pcie_x4; + break; + case PCI_EXP_LNKSTA_NLW_X8: + hw->bus.width = i40e_bus_width_pcie_x8; + break; + default: + hw->bus.width = i40e_bus_width_unknown; + break; + } + + switch (link_status & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: + hw->bus.speed = i40e_bus_speed_2500; + break; + case PCI_EXP_LNKSTA_CLS_5_0GB: + hw->bus.speed = i40e_bus_speed_5000; + break; + case PCI_EXP_LNKSTA_CLS_8_0GB: + hw->bus.speed = i40e_bus_speed_8000; + break; + default: + hw->bus.speed = i40e_bus_speed_unknown; + break; + } +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -0,0 +1,472 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_dcb.h" + +/** + * i40e_get_dcbx_status + * @hw: pointer to the hw struct + * @status: Embedded DCBX Engine Status + * + * Get the DCBX status from the Firmware + **/ +i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) +{ + u32 reg; + + if (!status) + return I40E_ERR_PARAM; + + reg = rd32(hw, I40E_PRTDCB_GENS); + *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> + I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); + + return 0; +} + +/** + * i40e_parse_ieee_etscfg_tlv + * @tlv: IEEE 802.1Qaz ETS CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + struct i40e_ieee_ets_config *etscfg; + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* First Octet post subtype + * -------------------------- + * |will-|CBS | Re- | Max | + * |ing | |served| TCs | + * -------------------------- + * |1bit | 1bit|3 bits|3bits| + */ + etscfg = &dcbcfg->etscfg; + etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> + I40E_IEEE_ETS_WILLING_SHIFT); + etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> + I40E_IEEE_ETS_CBS_SHIFT); + etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> + I40E_IEEE_ETS_MAXTC_SHIFT); + + /* Move offset to Priority Assignment Table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + etscfg->prioritytable[i * 2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + etscfg->prioritytable[i * 2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + etscfg->tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz ETS REC TLV + * @dcbcfg: Local store to update ETS REC data + * + * Parses IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u16 offset = 0; + u8 priority; + int i; + + /* Move offset to priority table */ + offset++; + + /* Priority Assignment Table (4 octets) + * Octets:| 1 | 2 | 3 | 4 | + * ----------------------------------------- + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| + * ----------------------------------------- + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| + * ----------------------------------------- + */ + for (i = 0; i < 4; i++) { + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> + I40E_IEEE_ETS_PRIO_1_SHIFT); + dcbcfg->etsrec.prioritytable[i*2] = priority; + priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> + I40E_IEEE_ETS_PRIO_0_SHIFT); + dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; + offset++; + } + + /* TC Bandwidth Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tcbwtable[i] = buf[offset++]; + + /* TSA Assignment Table (8 octets) + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | + * --------------------------------- + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| + * --------------------------------- + */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etsrec.tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_pfccfg_tlv + * @tlv: IEEE 802.1Qaz PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + + /* ---------------------------------------- + * |will-|MBC | Re- | PFC | PFC Enable | + * |ing | |served| cap | | + * ----------------------------------------- + * |1bit | 1bit|2 bits|4bits| 1 octet | + */ + dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> + I40E_IEEE_PFC_WILLING_SHIFT); + dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> + I40E_IEEE_PFC_MBC_SHIFT); + dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> + I40E_IEEE_PFC_CAP_SHIFT); + dcbcfg->pfc.pfcenable = buf[1]; +} + +/** + * i40e_parse_ieee_app_tlv + * @tlv: IEEE 802.1Qaz APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses IEEE 802.1Qaz APP PRIO TLV + **/ +static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u16 typelength; + u16 offset = 0; + u16 length; + int i = 0; + u8 *buf; + + typelength = ntohs(tlv->typelength); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + buf = tlv->tlvinfo; + + /* The App priority table starts 5 octets after TLV header */ + length -= (sizeof(tlv->ouisubtype) + 1); + + /* Move offset to App Priority Table */ + offset++; + + /* Application Priority Table (3 octets) + * Octets:| 1 | 2 | 3 | + * ----------------------------------------- + * |Priority|Rsrvd| Sel | Protocol ID | + * ----------------------------------------- + * Bits:|23 21|20 19|18 16|15 0| + * ----------------------------------------- + */ + while (offset < length) { + dcbcfg->app[i].priority = (u8)((buf[offset] & + I40E_IEEE_APP_PRIO_MASK) >> + I40E_IEEE_APP_PRIO_SHIFT); + dcbcfg->app[i].selector = (u8)((buf[offset] & + I40E_IEEE_APP_SEL_MASK) >> + I40E_IEEE_APP_SEL_SHIFT); + dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | + buf[offset + 2]; + /* Move to next app */ + offset += 3; + i++; + if (i >= I40E_DCBX_MAX_APPS) + break; + } + + dcbcfg->numapps = i; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz TLV + * @dcbcfg: Local store to update ETS REC data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u8 subtype; + + ouisubtype = ntohl(tlv->ouisubtype); + subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> + I40E_LLDP_TLV_SUBTYPE_SHIFT); + switch (subtype) { + case I40E_IEEE_SUBTYPE_ETS_CFG: + i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_ETS_REC: + i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_PFC_CFG: + i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg); + break; + case I40E_IEEE_SUBTYPE_APP_PRI: + i40e_parse_ieee_app_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_parse_org_tlv + * @tlv: Organization specific TLV + * @dcbcfg: Local store to update ETS REC data + * + * Currently only IEEE 802.1Qaz TLV is supported, all others + * will be returned + **/ +static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, + struct i40e_dcbx_config *dcbcfg) +{ + u32 ouisubtype; + u32 oui; + + ouisubtype = ntohl(tlv->ouisubtype); + oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> + I40E_LLDP_TLV_OUI_SHIFT); + switch (oui) { + case I40E_IEEE_8021QAZ_OUI: + i40e_parse_ieee_tlv(tlv, dcbcfg); + break; + default: + break; + } +} + +/** + * i40e_lldp_to_dcb_config + * @lldpmib: LLDPDU to be parsed + * @dcbcfg: store for LLDPDU data + * + * Parse DCB configuration from the LLDPDU + **/ +i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg) +{ + i40e_status ret = 0; + struct i40e_lldp_org_tlv *tlv; + u16 type; + u16 length; + u16 typelength; + u16 offset = 0; + + if (!lldpmib || !dcbcfg) + return I40E_ERR_PARAM; + + /* set to the start of LLDPDU */ + lldpmib += ETH_HLEN; + tlv = (struct i40e_lldp_org_tlv *)lldpmib; + while (1) { + typelength = ntohs(tlv->typelength); + type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> + I40E_LLDP_TLV_TYPE_SHIFT); + length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> + I40E_LLDP_TLV_LEN_SHIFT); + offset += sizeof(typelength) + length; + + /* END TLV or beyond LLDPDU size */ + if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) + break; + + switch (type) { + case I40E_TLV_TYPE_ORG: + i40e_parse_org_tlv(tlv, dcbcfg); + break; + default: + break; + } + + /* Move to next TLV */ + tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + + sizeof(tlv->typelength) + + length); + } + + return ret; +} + +/** + * i40e_aq_get_dcb_config + * @hw: pointer to the hw struct + * @mib_type: mib type for the query + * @bridgetype: bridge type for the query (remote) + * @dcbcfg: store for LLDPDU data + * + * Query DCB configuration from the Firmware + **/ +i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg) +{ + i40e_status ret = 0; + struct i40e_virt_mem mem; + u8 *lldpmib; + + /* Allocate the LLDPDU */ + ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); + if (ret) + return ret; + + lldpmib = (u8 *)mem.va; + ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type, + (void *)lldpmib, I40E_LLDPDU_SIZE, + NULL, NULL, NULL); + if (ret) + goto free_mem; + + /* Parse LLDP MIB to get dcb configuration */ + ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg); + +free_mem: + i40e_free_virt_mem(hw, &mem); + return ret; +} + +/** + * i40e_get_dcb_config + * @hw: pointer to the hw struct + * + * Get DCB configuration from the Firmware + **/ +i40e_status i40e_get_dcb_config(struct i40e_hw *hw) +{ + i40e_status ret = 0; + + /* Get Local DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, + &hw->local_dcbx_config); + if (ret) + goto out; + + /* Get Remote DCB Config */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); +out: + return ret; +} + +/** + * i40e_init_dcb + * @hw: pointer to the hw struct + * + * Update DCB configuration from the Firmware + **/ +i40e_status i40e_init_dcb(struct i40e_hw *hw) +{ + i40e_status ret = 0; + + if (!hw->func_caps.dcb) + return ret; + + /* Get DCBX status */ + ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); + if (ret) + return ret; + + /* Check the DCBX Status */ + switch (hw->dcbx_status) { + case I40E_DCBX_STATUS_DONE: + case I40E_DCBX_STATUS_IN_PROGRESS: + /* Get current DCBX configuration */ + ret = i40e_get_dcb_config(hw); + break; + case I40E_DCBX_STATUS_DISABLED: + return ret; + case I40E_DCBX_STATUS_NOT_STARTED: + case I40E_DCBX_STATUS_MULTIPLE_PEERS: + default: + break; + } + + /* Configure the LLDP MIB change event */ + ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); + if (ret) + return ret; + + return ret; +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -0,0 +1,107 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DCB_H_ +#define _I40E_DCB_H_ + +#include "i40e_type.h" + +#define I40E_DCBX_STATUS_NOT_STARTED 0 +#define I40E_DCBX_STATUS_IN_PROGRESS 1 +#define I40E_DCBX_STATUS_DONE 2 +#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 +#define I40E_DCBX_STATUS_DISABLED 7 + +#define I40E_TLV_TYPE_END 0 +#define I40E_TLV_TYPE_ORG 127 + +#define I40E_IEEE_8021QAZ_OUI 0x0080C2 +#define I40E_IEEE_SUBTYPE_ETS_CFG 9 +#define I40E_IEEE_SUBTYPE_ETS_REC 10 +#define I40E_IEEE_SUBTYPE_PFC_CFG 11 +#define I40E_IEEE_SUBTYPE_APP_PRI 12 + +/* Defines for LLDP TLV header */ +#define I40E_LLDP_TLV_LEN_SHIFT 0 +#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT) +#define I40E_LLDP_TLV_TYPE_SHIFT 9 +#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT) +#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 +#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) +#define I40E_LLDP_TLV_OUI_SHIFT 8 +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) + +/* Defines for IEEE ETS TLV */ +#define I40E_IEEE_ETS_MAXTC_SHIFT 0 +#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) +#define I40E_IEEE_ETS_CBS_SHIFT 6 +#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_WILLING_SHIFT 7 +#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_PRIO_0_SHIFT 0 +#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) +#define I40E_IEEE_ETS_PRIO_1_SHIFT 4 +#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) + +/* Defines for IEEE TSA types */ +#define I40E_IEEE_TSA_STRICT 0 +#define I40E_IEEE_TSA_ETS 2 + +/* Defines for IEEE PFC TLV */ +#define I40E_IEEE_PFC_CAP_SHIFT 0 +#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT) +#define I40E_IEEE_PFC_MBC_SHIFT 6 +#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_WILLING_SHIFT 7 +#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT) + +/* Defines for IEEE APP TLV */ +#define I40E_IEEE_APP_SEL_SHIFT 0 +#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT) +#define I40E_IEEE_APP_PRIO_SHIFT 5 +#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT) + + +#pragma pack(1) + +/* IEEE 802.1AB LLDP Organization specific TLV */ +struct i40e_lldp_org_tlv { + __be16 typelength; + __be32 ouisubtype; + u8 tlvinfo[1]; +}; +#pragma pack() + +i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, + u16 *status); +i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, + struct i40e_dcbx_config *dcbcfg); +i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, + u8 bridgetype, + struct i40e_dcbx_config *dcbcfg); +i40e_status i40e_get_dcb_config(struct i40e_hw *hw); +i40e_status i40e_init_dcb(struct i40e_hw *hw); +#endif /* _I40E_DCB_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -0,0 +1,316 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifdef CONFIG_I40E_DCB +#include "i40e.h" +#include + +/** + * i40e_get_pfc_delay - retrieve PFC Link Delay + * @hw: pointer to hardware struct + * @delay: holds the PFC Link delay value + * + * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA + **/ +static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) +{ + u32 val; + + val = rd32(hw, I40E_PRTDCB_GENC); + *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> + I40E_PRTDCB_GENC_PFCLDA_SHIFT); +} + +/** + * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration + * @netdev: the corresponding netdev + * @ets: structure to hold the ETS information + * + * Returns local IEEE ETS configuration + **/ +static int i40e_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + dcbxcfg = &hw->local_dcbx_config; + ets->willing = dcbxcfg->etscfg.willing; + ets->ets_cap = dcbxcfg->etscfg.maxtcs; + ets->cbs = dcbxcfg->etscfg.cbs; + memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, + sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, + sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, + sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable, + sizeof(ets->prio_tc)); + memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable, + sizeof(ets->tc_reco_bw)); + memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable, + sizeof(ets->tc_reco_tsa)); + memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable, + sizeof(ets->reco_prio_tc)); + + return 0; +} + +/** + * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration + * @netdev: the corresponding netdev + * @ets: structure to hold the PFC information + * + * Returns local IEEE PFC configuration + **/ +static int i40e_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + int i; + + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + dcbxcfg = &hw->local_dcbx_config; + pfc->pfc_cap = dcbxcfg->pfc.pfccap; + pfc->pfc_en = dcbxcfg->pfc.pfcenable; + pfc->mbc = dcbxcfg->pfc.mbc; + i40e_get_pfc_delay(hw, &pfc->delay); + + /* Get Requests/Indicatiosn */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = pf->stats.priority_xoff_tx[i]; + pfc->indications[i] = pf->stats.priority_xoff_rx[i]; + } + + return 0; +} + +/** + * i40e_dcbnl_getdcbx - retrieve current DCBx capability + * @netdev: the corresponding netdev + * + * Returns DCBx capability features + **/ +static u8 i40e_dcbnl_getdcbx(struct net_device *dev) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + return pf->dcbx_cap; +} + +/** + * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx + * @netdev: the corresponding netdev + * + * Returns the SAN MAC address used for LLDP exchange + **/ +static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev, + u8 *perm_addr) +{ + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + int i, j; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < dev->addr_len; i++) + perm_addr[i] = pf->hw.mac.perm_addr[i]; + + for (j = 0; j < dev->addr_len; j++, i++) + perm_addr[i] = pf->hw.mac.san_addr[j]; +} + +static const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = i40e_dcbnl_ieee_getets, + .ieee_getpfc = i40e_dcbnl_ieee_getpfc, + .getdcbx = i40e_dcbnl_getdcbx, + .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr, +}; + +/** + * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config + * @vsi: the corresponding vsi + * + * Set up all the IEEE APPs in the DCBNL App Table and generate event for + * other settings + **/ +void i40e_dcbnl_set_all(struct i40e_vsi *vsi) +{ + struct net_device *dev = vsi->netdev; + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + struct dcb_app sapp; + u8 prio, tc_map; + int i; + + /* DCB not enabled */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + return; + + dcbxcfg = &hw->local_dcbx_config; + + /* Set up all the App TLVs if DCBx is negotiated */ + for (i = 0; i < dcbxcfg->numapps; i++) { + prio = dcbxcfg->app[i].priority; + tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]); + + /* Add APP only if the TC is enabled for this VSI */ + if (tc_map & vsi->tc_config.enabled_tc) { + sapp.selector = dcbxcfg->app[i].selector; + sapp.protocol = dcbxcfg->app[i].protocolid; + sapp.priority = prio; + dcb_ieee_setapp(dev, &sapp); + } + } + + /* Notify user-space of the changes */ + dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0); +} + +/** + * i40e_dcbnl_vsi_del_app - Delete APP for given VSI + * @vsi: the corresponding vsi + * @app: APP to delete + * + * Delete given APP from the DCBNL APP table for given + * VSI + **/ +static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, + struct i40e_ieee_app_priority_table *app) +{ + struct net_device *dev = vsi->netdev; + struct dcb_app sapp; + + if (!dev) + return -EINVAL; + + sapp.selector = app->selector; + sapp.protocol = app->protocolid; + sapp.priority = app->priority; + return dcb_ieee_delapp(dev, &sapp); +} + +/** + * i40e_dcbnl_del_app - Delete APP on all VSIs + * @pf: the corresponding pf + * @app: APP to delete + * + * Delete given APP from all the VSIs for given PF + **/ +static void i40e_dcbnl_del_app(struct i40e_pf *pf, + struct i40e_ieee_app_priority_table *app) +{ + int v, err; + for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + if (pf->vsi[v] && pf->vsi[v]->netdev) { + err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); + if (err) + dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", + __func__, pf->vsi[v]->seid, + err, app->selector, + app->protocolid, app->priority); + } + } +} + +/** + * i40e_dcbnl_find_app - Search APP in given DCB config + * @cfg: DCBX configuration data + * @app: APP to search for + * + * Find given APP in the DCB configuration + **/ +static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, + struct i40e_ieee_app_priority_table *app) +{ + int i; + + for (i = 0; i < cfg->numapps; i++) { + if (app->selector == cfg->app[i].selector && + app->protocolid == cfg->app[i].protocolid && + app->priority == cfg->app[i].priority) + return true; + } + + return false; +} + +/** + * i40e_dcbnl_flush_apps - Delete all removed APPs + * @pf: the corresponding pf + * @new_cfg: new DCBX configuration data + * + * Find and delete all APPs that are not present in the passed + * DCB configuration + **/ +void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *new_cfg) +{ + struct i40e_ieee_app_priority_table app; + struct i40e_dcbx_config *dcbxcfg; + struct i40e_hw *hw = &pf->hw; + int i; + + dcbxcfg = &hw->local_dcbx_config; + for (i = 0; i < dcbxcfg->numapps; i++) { + app = dcbxcfg->app[i]; + /* The APP is not available anymore delete it */ + if (!i40e_dcbnl_find_app(new_cfg, &app)) + i40e_dcbnl_del_app(pf, &app); + } +} + +/** + * i40e_dcbnl_setup - DCBNL setup + * @vsi: the corresponding vsi + * + * Set up DCBNL ops and initial APP TLVs + **/ +void i40e_dcbnl_setup(struct i40e_vsi *vsi) +{ + struct net_device *dev = vsi->netdev; + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + /* DCB not enabled */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) + return; + + /* Do not setup DCB NL ops for MFP mode */ + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + dev->dcbnl_ops = &dcbnl_ops; + + /* Set initial IEEE DCB settings */ + i40e_dcbnl_set_all(vsi); +} +#endif /* CONFIG_I40E_DCB */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -192,12 +191,12 @@ len = (sizeof(struct i40e_aq_desc) * pf->hw.aq.num_asq_entries); - memcpy(p, pf->hw.aq.asq.desc, len); + memcpy(p, pf->hw.aq.asq.desc_buf.va, len); p += len; len = (sizeof(struct i40e_aq_desc) * pf->hw.aq.num_arq_entries); - memcpy(p, pf->hw.aq.arq.desc, len); + memcpy(p, pf->hw.aq.arq.desc_buf.va, len); p += len; i40e_dbg_dump_data_len = buflen; @@ -362,7 +361,7 @@ } /** - * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum + * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum * @pf: the i40e_pf created in command write * @seid: the seid the user put in **/ @@ -516,10 +515,10 @@ rx_ring->stats.bytes, rx_ring->rx_stats.non_eop_descs); dev_info(&pf->pdev->dev, - " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", + " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", i, - rx_ring->rx_stats.alloc_rx_page_failed, - rx_ring->rx_stats.alloc_rx_buff_failed); + rx_ring->rx_stats.alloc_page_failed, + rx_ring->rx_stats.alloc_buff_failed); dev_info(&pf->pdev->dev, " rx_rings[%i]: size = %i, dma = 0x%08lx\n", i, rx_ring->size, @@ -533,6 +532,7 @@ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; + dev_info(&pf->pdev->dev, " tx_rings[%i]: desc = %p\n", i, tx_ring->desc); @@ -707,8 +707,13 @@ { struct i40e_adminq_ring *ring; struct i40e_hw *hw = &pf->hw; + char hdr[32]; int i; + snprintf(hdr, sizeof(hdr), "%s %s: ", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); + /* first the send (command) ring, then the receive (event) ring */ dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); ring = &(hw->aq.asq); @@ -718,14 +723,8 @@ " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, d->cookie_high, d->cookie_low); - dev_info(&pf->pdev->dev, - " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - d->params.raw[0], d->params.raw[1], d->params.raw[2], - d->params.raw[3], d->params.raw[4], d->params.raw[5], - d->params.raw[6], d->params.raw[7], d->params.raw[8], - d->params.raw[9], d->params.raw[10], d->params.raw[11], - d->params.raw[12], d->params.raw[13], - d->params.raw[14], d->params.raw[15]); + print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, + 16, 1, d->params.raw, 16, 0); } dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); @@ -736,14 +735,8 @@ " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, d->cookie_high, d->cookie_low); - dev_info(&pf->pdev->dev, - " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - d->params.raw[0], d->params.raw[1], d->params.raw[2], - d->params.raw[3], d->params.raw[4], d->params.raw[5], - d->params.raw[6], d->params.raw[7], d->params.raw[8], - d->params.raw[9], d->params.raw[10], d->params.raw[11], - d->params.raw[12], d->params.raw[13], - d->params.raw[14], d->params.raw[15]); + print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, + 16, 1, d->params.raw, 16, 0); } } @@ -759,27 +752,25 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, struct i40e_pf *pf, bool is_rx_ring) { - union i40e_rx_desc *ds; + struct i40e_tx_desc *txd; + union i40e_rx_desc *rxd; struct i40e_ring ring; struct i40e_vsi *vsi; int i; vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { - dev_info(&pf->pdev->dev, - "vsi %d not found\n", vsi_seid); - if (is_rx_ring) - dev_info(&pf->pdev->dev, "dump desc rx []\n"); - else - dev_info(&pf->pdev->dev, "dump desc tx []\n"); + dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); - if (is_rx_ring) - dev_info(&pf->pdev->dev, "dump desc rx []\n"); - else - dev_info(&pf->pdev->dev, "dump desc tx []\n"); + return; + } + if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { + dev_info(&pf->pdev->dev, + "descriptor rings have not been allocated for vsi %d\n", + vsi_seid); return; } if (is_rx_ring) @@ -790,22 +781,27 @@ dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); for (i = 0; i < ring.count; i++) { - if (is_rx_ring) - ds = I40E_RX_DESC(&ring, i); - else - ds = (union i40e_rx_desc *) - I40E_TX_DESC(&ring, i); - if ((sizeof(union i40e_rx_desc) == - sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring)) + if (!is_rx_ring) { + txd = I40E_TX_DESC(&ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx\n", i, - ds->read.pkt_addr, ds->read.hdr_addr); - else + " d[%03i] = 0x%016llx 0x%016llx\n", + i, txd->buffer_addr, + txd->cmd_type_offset_bsz); + } else if (sizeof(union i40e_rx_desc) == + sizeof(union i40e_16byte_rx_desc)) { + rxd = I40E_RX_DESC(&ring, i); + dev_info(&pf->pdev->dev, + " d[%03i] = 0x%016llx 0x%016llx\n", + i, rxd->read.pkt_addr, + rxd->read.hdr_addr); + } else { + rxd = I40E_RX_DESC(&ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", - i, ds->read.pkt_addr, - ds->read.hdr_addr, - ds->read.rsvd1, ds->read.rsvd2); + i, rxd->read.pkt_addr, + rxd->read.hdr_addr, + rxd->read.rsvd1, rxd->read.rsvd2); + } } } else if (cnt == 3) { if (desc_n >= ring.count || desc_n < 0) { @@ -813,27 +809,29 @@ "descriptor %d not found\n", desc_n); return; } - if (is_rx_ring) - ds = I40E_RX_DESC(&ring, desc_n); - else - ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n); - if ((sizeof(union i40e_rx_desc) == - sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring)) - dev_info(&pf->pdev->dev, - "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", - vsi_seid, is_rx_ring ? "rx" : "tx", ring_id, - desc_n, ds->read.pkt_addr, ds->read.hdr_addr); - else + if (!is_rx_ring) { + txd = I40E_TX_DESC(&ring, desc_n); + dev_info(&pf->pdev->dev, + "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + vsi_seid, ring_id, desc_n, + txd->buffer_addr, txd->cmd_type_offset_bsz); + } else if (sizeof(union i40e_rx_desc) == + sizeof(union i40e_16byte_rx_desc)) { + rxd = I40E_RX_DESC(&ring, desc_n); + dev_info(&pf->pdev->dev, + "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + vsi_seid, ring_id, desc_n, + rxd->read.pkt_addr, rxd->read.hdr_addr); + } else { + rxd = I40E_RX_DESC(&ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", - vsi_seid, ring_id, - desc_n, ds->read.pkt_addr, ds->read.hdr_addr, - ds->read.rsvd1, ds->read.rsvd2); + vsi_seid, ring_id, desc_n, + rxd->read.pkt_addr, rxd->read.hdr_addr, + rxd->read.rsvd1, rxd->read.rsvd2); + } } else { - if (is_rx_ring) - dev_info(&pf->pdev->dev, "dump desc rx []\n"); - else - dev_info(&pf->pdev->dev, "dump desc tx []\n"); + dev_info(&pf->pdev->dev, "dump desc rx/tx []\n"); } } @@ -979,8 +977,7 @@ veb = i40e_dbg_find_veb(pf, seid); if (!veb) { - dev_info(&pf->pdev->dev, - "%d: can't find veb\n", seid); + dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); return; } dev_info(&pf->pdev->dev, @@ -1006,6 +1003,24 @@ } } +/** + * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR + * @pf: the pf that would be altered + * @flag: flag that needs enabling or disabling + * @enable: Enable/disable FD SD/ATR + **/ +static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) +{ + if (enable) { + pf->flags |= flag; + } else { + pf->flags &= ~flag; + pf->auto_disable_flags |= flag; + } + dev_info(&pf->pdev->dev, "requesting a pf reset\n"); + i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); +} + #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum @@ -1022,8 +1037,6 @@ char *cmd_buf, *cmd_buf_tmp; int bytes_not_copied; struct i40e_vsi *vsi; - u8 *print_buf_start; - u8 *print_buf; int vsi_seid; int veb_seid; int cnt; @@ -1048,11 +1061,6 @@ count = cmd_buf_tmp - cmd_buf + 1; } - print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL); - if (!print_buf_start) - goto command_write_done; - print_buf = print_buf_start; - if (strncmp(cmd_buf, "add vsi", 7) == 0) { vsi_seid = -1; cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); @@ -1104,7 +1112,7 @@ vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, - "add relay: vsi VSI %d not found\n", vsi_seid); + "add relay: VSI %d not found\n", vsi_seid); goto command_write_done; } @@ -1461,21 +1469,25 @@ pf->msg_enable); } } else if (strncmp(cmd_buf, "pfr", 3) == 0) { - dev_info(&pf->pdev->dev, "forcing PFR\n"); - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "corer", 5) == 0) { - dev_info(&pf->pdev->dev, "forcing CoreR\n"); - i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED)); + dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "globr", 5) == 0) { - dev_info(&pf->pdev->dev, "forcing GlobR\n"); - i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); + dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); + + } else if (strncmp(cmd_buf, "empr", 4) == 0) { + dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); + i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; u32 value; - cnt = sscanf(&cmd_buf[4], "%x", &address); + cnt = sscanf(&cmd_buf[4], "%i", &address); if (cnt != 1) { dev_info(&pf->pdev->dev, "read \n"); goto command_write_done; @@ -1494,7 +1506,7 @@ } else if (strncmp(cmd_buf, "write", 5) == 0) { u32 address, value; - cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value); + cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); if (cnt != 2) { dev_info(&pf->pdev->dev, "write \n"); goto command_write_done; @@ -1512,7 +1524,7 @@ address, value); } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { - cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid); + cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); if (cnt == 0) { int i; for (i = 0; i < pf->hw.func_caps.num_vsis; i++) @@ -1539,32 +1551,152 @@ } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n"); } + } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { + struct i40e_aq_desc *desc; + i40e_status ret; + + desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); + if (!desc) + goto command_write_done; + cnt = sscanf(&cmd_buf[11], + "%hx %hx %hx %hx %x %x %x %x %x %x", + &desc->flags, + &desc->opcode, &desc->datalen, &desc->retval, + &desc->cookie_high, &desc->cookie_low, + &desc->params.internal.param0, + &desc->params.internal.param1, + &desc->params.internal.param2, + &desc->params.internal.param3); + if (cnt != 10) { + dev_info(&pf->pdev->dev, + "send aq_cmd: bad command string, cnt=%d\n", + cnt); + kfree(desc); + desc = NULL; + goto command_write_done; + } + ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); + if (!ret) { + dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); + } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x AQ Error: %d\n", + desc->opcode, pf->hw.aq.asq_last_status); + } else { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x Status: %d\n", + desc->opcode, ret); + } + dev_info(&pf->pdev->dev, + "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + desc->flags, desc->opcode, desc->datalen, desc->retval, + desc->cookie_high, desc->cookie_low, + desc->params.internal.param0, + desc->params.internal.param1, + desc->params.internal.param2, + desc->params.internal.param3); + kfree(desc); + desc = NULL; + } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { + struct i40e_aq_desc *desc; + i40e_status ret; + u16 buffer_len; + u8 *buff; + + desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); + if (!desc) + goto command_write_done; + cnt = sscanf(&cmd_buf[20], + "%hx %hx %hx %hx %x %x %x %x %x %x %hd", + &desc->flags, + &desc->opcode, &desc->datalen, &desc->retval, + &desc->cookie_high, &desc->cookie_low, + &desc->params.internal.param0, + &desc->params.internal.param1, + &desc->params.internal.param2, + &desc->params.internal.param3, + &buffer_len); + if (cnt != 11) { + dev_info(&pf->pdev->dev, + "send indirect aq_cmd: bad command string, cnt=%d\n", + cnt); + kfree(desc); + desc = NULL; + goto command_write_done; + } + /* Just stub a buffer big enough in case user messed up */ + if (buffer_len == 0) + buffer_len = 1280; + + buff = kzalloc(buffer_len, GFP_KERNEL); + if (!buff) { + kfree(desc); + desc = NULL; + goto command_write_done; + } + desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + ret = i40e_asq_send_command(&pf->hw, desc, buff, + buffer_len, NULL); + if (!ret) { + dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); + } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x AQ Error: %d\n", + desc->opcode, pf->hw.aq.asq_last_status); + } else { + dev_info(&pf->pdev->dev, + "AQ command send failed Opcode %x Status: %d\n", + desc->opcode, ret); + } + dev_info(&pf->pdev->dev, + "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + desc->flags, desc->opcode, desc->datalen, desc->retval, + desc->cookie_high, desc->cookie_low, + desc->params.internal.param0, + desc->params.internal.param1, + desc->params.internal.param2, + desc->params.internal.param3); + print_hex_dump(KERN_INFO, "AQ buffer WB: ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, buffer_len, true); + kfree(buff); + buff = NULL; + kfree(desc); + desc = NULL; } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { - struct i40e_fdir_data fd_data; + struct i40e_fdir_filter fd_data; u16 packet_len, i, j = 0; char *asc_packet; + u8 *raw_packet; bool add = false; int ret; - asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + goto command_write_done; + + if (strncmp(cmd_buf, "add", 3) == 0) + add = true; + + if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + goto command_write_done; + + asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); if (!asc_packet) goto command_write_done; - fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, - GFP_KERNEL); + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, + GFP_KERNEL); - if (!fd_data.raw_packet) { + if (!raw_packet) { kfree(asc_packet); asc_packet = NULL; goto command_write_done; } - if (strncmp(cmd_buf, "add", 3) == 0) - add = true; cnt = sscanf(&cmd_buf[13], - "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s", + "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s", &fd_data.q_index, &fd_data.flex_off, &fd_data.pctype, &fd_data.dest_vsi, &fd_data.dest_ctl, @@ -1576,42 +1708,46 @@ cnt); kfree(asc_packet); asc_packet = NULL; - kfree(fd_data.raw_packet); + kfree(raw_packet); goto command_write_done; } /* fix packet length if user entered 0 */ if (packet_len == 0) - packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP; + packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE; /* make sure to check the max as well */ packet_len = min_t(u16, - packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); + packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE); - dev_info(&pf->pdev->dev, "FD raw packet:\n"); for (i = 0; i < packet_len; i++) { sscanf(&asc_packet[j], "%2hhx ", - &fd_data.raw_packet[i]); + &raw_packet[i]); j += 3; - snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]); - print_buf += 3; - if ((i % 16) == 15) { - snprintf(print_buf, 1, "\n"); - print_buf++; - } } - dev_info(&pf->pdev->dev, "%s\n", print_buf_start); - ret = i40e_program_fdir_filter(&fd_data, pf, add); + dev_info(&pf->pdev->dev, "FD raw packet dump\n"); + print_hex_dump(KERN_INFO, "FD raw packet: ", + DUMP_PREFIX_OFFSET, 16, 1, + raw_packet, packet_len, true); + ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add); if (!ret) { dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); } else { dev_info(&pf->pdev->dev, "Filter command send failed %d\n", ret); } - kfree(fd_data.raw_packet); - fd_data.raw_packet = NULL; + kfree(raw_packet); + raw_packet = NULL; kfree(asc_packet); asc_packet = NULL; + } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { + i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); + } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { + i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); + } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) { + i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false); + } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) { + i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true); } else if (strncmp(cmd_buf, "lldp", 4) == 0) { if (strncmp(&cmd_buf[5], "stop", 4) == 0) { int ret; @@ -1622,8 +1758,35 @@ pf->hw.aq.asq_last_status); goto command_write_done; } + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, + pf->hw.mac.addr, + I40E_ETH_P_LLDP, 0, + pf->vsi[pf->lan_vsi]->seid, + 0, true, NULL, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: Add Control Packet Filter AQ command failed =0x%x\n", + __func__, pf->hw.aq.asq_last_status); + goto command_write_done; + } +#ifdef CONFIG_I40E_DCB + pf->dcbx_cap = DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; +#endif /* CONFIG_I40E_DCB */ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { int ret; + ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, + pf->hw.mac.addr, + I40E_ETH_P_LLDP, 0, + pf->vsi[pf->lan_vsi]->seid, + 0, false, NULL, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: Remove Control Packet Filter AQ command failed =0x%x\n", + __func__, pf->hw.aq.asq_last_status); + /* Continue and start FW LLDP anyways */ + } + ret = i40e_aq_start_lldp(&pf->hw, NULL); if (ret) { dev_info(&pf->pdev->dev, @@ -1631,10 +1794,14 @@ pf->hw.aq.asq_last_status); goto command_write_done; } +#ifdef CONFIG_I40E_DCB + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | + DCB_CAP_DCBX_VER_IEEE; +#endif /* CONFIG_I40E_DCB */ } else if (strncmp(&cmd_buf[5], "get local", 9) == 0) { u16 llen, rlen; - int ret, i; + int ret; u8 *buff; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) @@ -1652,22 +1819,15 @@ buff = NULL; goto command_write_done; } - dev_info(&pf->pdev->dev, - "Get LLDP MIB (local) AQ buffer written back:\n"); - for (i = 0; i < I40E_LLDPDU_SIZE; i++) { - snprintf(print_buf, 3, "%02x ", buff[i]); - print_buf += 3; - if ((i % 16) == 15) { - snprintf(print_buf, 1, "\n"); - print_buf++; - } - } - dev_info(&pf->pdev->dev, "%s\n", print_buf_start); + dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); + print_hex_dump(KERN_INFO, "LLDP MIB (local): ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, I40E_LLDPDU_SIZE, true); kfree(buff); buff = NULL; } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { u16 llen, rlen; - int ret, i; + int ret; u8 *buff; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) @@ -1686,17 +1846,10 @@ buff = NULL; goto command_write_done; } - dev_info(&pf->pdev->dev, - "Get LLDP MIB (remote) AQ buffer written back:\n"); - for (i = 0; i < I40E_LLDPDU_SIZE; i++) { - snprintf(print_buf, 3, "%02x ", buff[i]); - print_buf += 3; - if ((i % 16) == 15) { - snprintf(print_buf, 1, "\n"); - print_buf++; - } - } - dev_info(&pf->pdev->dev, "%s\n", print_buf_start); + dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); + print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", + DUMP_PREFIX_OFFSET, 16, 1, + buff, I40E_LLDPDU_SIZE, true); kfree(buff); buff = NULL; } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { @@ -1721,7 +1874,7 @@ } } } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { - u16 buffer_len, i, bytes; + u16 buffer_len, bytes; u16 module; u32 offset; u16 *buff; @@ -1775,16 +1928,10 @@ dev_info(&pf->pdev->dev, "Read NVM module=0x%x offset=0x%x words=%d\n", module, offset, buffer_len); - for (i = 0; i < buffer_len; i++) { - if ((i % 16) == 0) { - snprintf(print_buf, 11, "\n0x%08x: ", - offset + i); - print_buf += 11; - } - snprintf(print_buf, 5, "%04x ", buff[i]); - print_buf += 5; - } - dev_info(&pf->pdev->dev, "%s\n", print_buf_start); + if (bytes) + print_hex_dump(KERN_INFO, "NVM Dump: ", + DUMP_PREFIX_OFFSET, 16, 2, + buff, bytes, true); } kfree(buff); buff = NULL; @@ -1814,8 +1961,14 @@ dev_info(&pf->pdev->dev, " pfr\n"); dev_info(&pf->pdev->dev, " corer\n"); dev_info(&pf->pdev->dev, " globr\n"); + dev_info(&pf->pdev->dev, " send aq_cmd \n"); + dev_info(&pf->pdev->dev, " send indirect aq_cmd \n"); dev_info(&pf->pdev->dev, " add fd_filter \n"); dev_info(&pf->pdev->dev, " rem fd_filter \n"); + dev_info(&pf->pdev->dev, " fd-atr off\n"); + dev_info(&pf->pdev->dev, " fd-atr on\n"); + dev_info(&pf->pdev->dev, " fd-sb off\n"); + dev_info(&pf->pdev->dev, " fd-sb on\n"); dev_info(&pf->pdev->dev, " lldp start\n"); dev_info(&pf->pdev->dev, " lldp stop\n"); dev_info(&pf->pdev->dev, " lldp get local\n"); @@ -1828,9 +1981,6 @@ command_write_done: kfree(cmd_buf); cmd_buf = NULL; - kfree(print_buf_start); - print_buf = NULL; - print_buf_start = NULL; return count; } @@ -1937,9 +2087,13 @@ if (!vsi) { dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not found\n", vsi_seid); - goto netdev_ops_write_done; - } - if (rtnl_trylock()) { + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", + vsi_seid); + } else if (test_bit(__I40E_DOWN, &vsi->state)) { + dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", + vsi_seid); + } else if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev); rtnl_unlock(); dev_info(&pf->pdev->dev, "tx_timeout called\n"); @@ -1958,9 +2112,10 @@ if (!vsi) { dev_info(&pf->pdev->dev, "change_mtu: VSI %d not found\n", vsi_seid); - goto netdev_ops_write_done; - } - if (rtnl_trylock()) { + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", + vsi_seid); + } else if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, mtu); rtnl_unlock(); @@ -1979,9 +2134,10 @@ if (!vsi) { dev_info(&pf->pdev->dev, "set_rx_mode: VSI %d not found\n", vsi_seid); - goto netdev_ops_write_done; - } - if (rtnl_trylock()) { + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", + vsi_seid); + } else if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); rtnl_unlock(); dev_info(&pf->pdev->dev, "set_rx_mode called\n"); @@ -1999,11 +2155,14 @@ if (!vsi) { dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", vsi_seid); - goto netdev_ops_write_done; + } else if (!vsi->netdev) { + dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", + vsi_seid); + } else { + for (i = 0; i < vsi->num_q_vectors; i++) + napi_schedule(&vsi->q_vectors[i]->napi); + dev_info(&pf->pdev->dev, "napi called\n"); } - for (i = 0; i < vsi->num_q_vectors; i++) - napi_schedule(&vsi->q_vectors[i]->napi); - dev_info(&pf->pdev->dev, "napi called\n"); } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", i40e_dbg_netdev_ops_buf); --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -68,16 +67,16 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = { /* offset mask elements stride */ - {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, + {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, - {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, - {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, - {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, + {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, + {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, + {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0}, {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0}, - {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, - {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, - {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, + {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, + {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, + {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0}, { 0 } }; @@ -119,7 +118,7 @@ /* read NVM control word and if NVM valid, validate EEPROM checksum*/ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); - if ((!ret_code) && + if (!ret_code && ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) == (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) { ret_code = i40e_validate_nvm_checksum(hw, NULL); --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -31,10 +30,10 @@ #include "i40e_type.h" enum i40e_lb_mode { - I40E_LB_MODE_NONE = 0, - I40E_LB_MODE_PHY_LOCAL, - I40E_LB_MODE_PHY_REMOTE, - I40E_LB_MODE_MAC_LOCAL, + I40E_LB_MODE_NONE = 0x0, + I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL, + I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE, + I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL, }; struct i40e_diag_reg_test_info { --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -63,6 +62,9 @@ I40E_NETDEV_STAT(rx_crc_errors), }; +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd); + /* These PF_STATs might look like duplicates of some NETDEV_STATs, * but they are separate. This device supports Virtualization, and * as such might have several netdevs supporting VMDq and FCoE going @@ -85,6 +87,7 @@ I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), + I40E_PF_STAT("tx_timeout", tx_timeout_count), I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), @@ -109,6 +112,13 @@ I40E_PF_STAT("rx_oversize", stats.rx_oversize), I40E_PF_STAT("rx_jabber", stats.rx_jabber), I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + /* LPI stats */ + I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), + I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), + I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), + I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), }; #define I40E_QUEUE_STATS_LEN(n) \ @@ -193,28 +203,48 @@ ecmd->supported = SUPPORTED_10000baseKR_Full; ecmd->advertising = ADVERTISED_10000baseKR_Full; break; - case I40E_PHY_TYPE_10GBASE_T: default: - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; + if (i40e_is_40G_device(hw->device_id)) { + ecmd->supported = SUPPORTED_40000baseSR4_Full; + ecmd->advertising = ADVERTISED_40000baseSR4_Full; + } else { + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + } break; } - /* for now just say autoneg all the time */ ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); - if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) { + switch (hw->phy.media_type) { + case I40E_MEDIA_TYPE_BACKPLANE: ecmd->supported |= SUPPORTED_Backplane; ecmd->advertising |= ADVERTISED_Backplane; ecmd->port = PORT_NONE; - } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) { + break; + case I40E_MEDIA_TYPE_BASET: ecmd->supported |= SUPPORTED_TP; ecmd->advertising |= ADVERTISED_TP; ecmd->port = PORT_TP; - } else { + break; + case I40E_MEDIA_TYPE_DA: + case I40E_MEDIA_TYPE_CX4: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + break; + case I40E_MEDIA_TYPE_FIBER: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_FIBRE; + break; + case I40E_MEDIA_TYPE_UNKNOWN: + default: + ecmd->port = PORT_OTHER; + break; } ecmd->transceiver = XCVR_EXTERNAL; @@ -256,12 +286,14 @@ ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); - pause->rx_pause = 0; - pause->tx_pause = 0; - if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX) + if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { pause->rx_pause = 1; - if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX) + } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { pause->tx_pause = 1; + } else if (hw->fc.current_mode == I40E_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } } static u32 i40e_get_msglevel(struct net_device *netdev) @@ -329,38 +361,56 @@ { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; - int first_word, last_word; - u16 i, eeprom_len; - u16 *eeprom_buff; - int ret_val = 0; - + struct i40e_pf *pf = np->vsi->back; + int ret_val = 0, len; + u8 *eeprom_buff; + u16 i, sectors; + bool last; +#define I40E_NVM_SECTOR_SIZE 4096 if (eeprom->len == 0) return -EINVAL; eeprom->magic = hw->vendor_id | (hw->device_id << 16); - first_word = eeprom->offset >> 1; - last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_len = last_word - first_word + 1; - - eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); + eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); if (!eeprom_buff) return -ENOMEM; - ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len, - eeprom_buff); - if (eeprom_len == 0) { - kfree(eeprom_buff); - return -EACCES; + ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret_val) { + dev_info(&pf->pdev->dev, + "Failed Acquiring NVM resource for read err=%d status=0x%x\n", + ret_val, hw->aq.asq_last_status); + goto free_buff; + } + + sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; + sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; + len = I40E_NVM_SECTOR_SIZE; + last = false; + for (i = 0; i < sectors; i++) { + if (i == (sectors - 1)) { + len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); + last = true; + } + ret_val = i40e_aq_read_nvm(hw, 0x0, + eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), + len, + eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), + last, NULL); + if (ret_val) { + dev_info(&pf->pdev->dev, + "read NVM failed err=%d status=0x%x\n", + ret_val, hw->aq.asq_last_status); + goto release_nvm; + } } - /* Device's eeprom is always little-endian, word addressable */ - for (i = 0; i < eeprom_len; i++) - le16_to_cpus(&eeprom_buff[i]); - - memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); +release_nvm: + i40e_release_nvm(hw); + memcpy(bytes, eeprom_buff, eeprom->len); +free_buff: kfree(eeprom_buff); - return ret_val; } @@ -368,8 +418,14 @@ { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; + u32 val; - return hw->nvm.sr_size * 2; + val = (rd32(hw, I40E_GLPCI_LBARCTRL) + & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) + >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; + /* register returns value in power of 2, 64Kbyte chunks. */ + val = (64 * 1024) * (1 << val); + return val; } static void i40e_get_drvinfo(struct net_device *netdev, @@ -418,15 +474,19 @@ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_tx_count = clamp_t(u32, ring->tx_pending, - I40E_MIN_NUM_DESCRIPTORS, - I40E_MAX_NUM_DESCRIPTORS); - new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); - - new_rx_count = clamp_t(u32, ring->rx_pending, - I40E_MIN_NUM_DESCRIPTORS, - I40E_MAX_NUM_DESCRIPTORS); - new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); + if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || + ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || + ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || + ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, + "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, + I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); /* if nothing to do return success */ if ((new_tx_count == vsi->tx_rings[0]->count) && @@ -598,18 +658,18 @@ /* process Tx ring statistics */ do { - start = u64_stats_fetch_begin_bh(&tx_ring->syncp); + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); data[i] = tx_ring->stats.packets; data[i + 1] = tx_ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); /* Rx ring is the 2nd half of the queue pair */ rx_ring = &tx_ring[1]; do { - start = u64_stats_fetch_begin_bh(&rx_ring->syncp); + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); data[i + 2] = rx_ring->stats.packets; data[i + 3] = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); } rcu_read_unlock(); if (vsi == pf->vsi[pf->lan_vsi]) { @@ -699,11 +759,44 @@ static int i40e_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { - return ethtool_op_get_ts_info(dev, info); + struct i40e_pf *pf = i40e_netdev_to_pf(dev); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (pf->ptp_clock) + info->phc_index = ptp_clock_index(pf->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + + return 0; } -static int i40e_link_test(struct i40e_pf *pf, u64 *data) +static int i40e_link_test(struct net_device *netdev, u64 *data) { + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "link test\n"); if (i40e_get_link_status(&pf->hw)) *data = 0; else @@ -712,36 +805,51 @@ return *data; } -static int i40e_reg_test(struct i40e_pf *pf, u64 *data) +static int i40e_reg_test(struct net_device *netdev, u64 *data) { - i40e_status ret; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; - ret = i40e_diag_reg_test(&pf->hw); - *data = ret; + netif_info(pf, hw, netdev, "register test\n"); + *data = i40e_diag_reg_test(&pf->hw); - return ret; + return *data; } -static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data) +static int i40e_eeprom_test(struct net_device *netdev, u64 *data) { - i40e_status ret; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; - ret = i40e_diag_eeprom_test(&pf->hw); - *data = ret; + netif_info(pf, hw, netdev, "eeprom test\n"); + *data = i40e_diag_eeprom_test(&pf->hw); - return ret; + return *data; } -static int i40e_intr_test(struct i40e_pf *pf, u64 *data) +static int i40e_intr_test(struct net_device *netdev, u64 *data) { - *data = -ENOSYS; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + u16 swc_old = pf->sw_int_count; + + netif_info(pf, hw, netdev, "interrupt test\n"); + wr32(&pf->hw, I40E_PFINT_DYN_CTL0, + (I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); + usleep_range(1000, 2000); + *data = (swc_old == pf->sw_int_count); return *data; } -static int i40e_loopback_test(struct i40e_pf *pf, u64 *data) +static int i40e_loopback_test(struct net_device *netdev, u64 *data) { - *data = -ENOSYS; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + netif_info(pf, hw, netdev, "loopback test not implemented\n"); + *data = 0; return *data; } @@ -752,42 +860,38 @@ struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; - set_bit(__I40E_TESTING, &pf->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */ + netif_info(pf, drv, netdev, "offline testing starting\n"); - netdev_info(netdev, "offline testing starting\n"); + set_bit(__I40E_TESTING, &pf->state); /* Link test performed before hardware reset * so autoneg doesn't interfere with test result */ - netdev_info(netdev, "link test starting\n"); - if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK])) + if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; - netdev_info(netdev, "register test starting\n"); - if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG])) + if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM])) eth_test->flags |= ETH_TEST_FL_FAILED; - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); - netdev_info(netdev, "eeprom test starting\n"); - if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM])) + if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR])) eth_test->flags |= ETH_TEST_FL_FAILED; - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); - netdev_info(netdev, "interrupt test starting\n"); - if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR])) + if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK])) eth_test->flags |= ETH_TEST_FL_FAILED; - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); - netdev_info(netdev, "loopback test starting\n"); - if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK])) + /* run reg test last, a reset is required after it */ + if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, &pf->state); + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); } else { - netdev_info(netdev, "online test starting\n"); /* Online tests */ - if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK])) + netif_info(pf, drv, netdev, "online testing starting\n"); + + if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) eth_test->flags |= ETH_TEST_FL_FAILED; /* Offline only tests, not run in online; pass by default */ @@ -795,16 +899,53 @@ data[I40E_ETH_TEST_EEPROM] = 0; data[I40E_ETH_TEST_INTR] = 0; data[I40E_ETH_TEST_LOOPBACK] = 0; - - clear_bit(__I40E_TESTING, &pf->state); } + + netif_info(pf, drv, netdev, "testing finished\n"); } static void i40e_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { - wol->supported = 0; - wol->wolopts = 0; + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 wol_nvm_bits; + + /* NVM bit on means WoL disabled for the port */ + i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); + if ((1 << hw->port) & wol_nvm_bits) { + wol->supported = 0; + wol->wolopts = 0; + } else { + wol->supported = WAKE_MAGIC; + wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); + } +} + +static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + u16 wol_nvm_bits; + + /* NVM bit on means WoL disabled for the port */ + i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); + if (((1 << hw->port) & wol_nvm_bits)) + return -EOPNOTSUPP; + + /* only magic packet is supported */ + if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) + return -EOPNOTSUPP; + + /* is this a new value? */ + if (pf->wol_en != !!wol->wolopts) { + pf->wol_en = !!wol->wolopts; + device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); + } + + return 0; } static int i40e_nway_reset(struct net_device *netdev) @@ -838,13 +979,13 @@ pf->led_status = i40e_led_get(hw); return blink_freq; case ETHTOOL_ID_ON: - i40e_led_set(hw, 0xF); + i40e_led_set(hw, 0xF, false); break; case ETHTOOL_ID_OFF: - i40e_led_set(hw, 0x0); + i40e_led_set(hw, 0x0, false); break; case ETHTOOL_ID_INACTIVE: - i40e_led_set(hw, pf->led_status); + i40e_led_set(hw, pf->led_status, false); break; } @@ -980,6 +1121,84 @@ } /** + * i40e_get_ethtool_fdir_all - Populates the rule count of a command + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * @rule_locs: Array of used rule locations + * + * This function populates both the total and actual rule count of + * the ethtool flow classification command + * + * Returns 0 on success or -EMSGSIZE if entry not found + **/ +static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct i40e_fdir_filter *rule; + struct hlist_node *node2; + int cnt = 0; + + /* report total rule count */ + cmd->data = pf->hw.fdir_shared_filter_count + + pf->fdir_pf_filter_count; + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = rule->fd_id; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +/** + * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * + * This function looks up a filter based on the Rx flow classification + * command and fills the flow spec info for it if found + * + * Returns 0 on success or -EINVAL if filter not found + **/ +static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct i40e_fdir_filter *rule = NULL; + struct hlist_node *node2; + + /* report total rule count */ + cmd->data = pf->hw.fdir_shared_filter_count + + pf->fdir_pf_filter_count; + + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + if (fsp->location <= rule->fd_id) + break; + } + + if (!rule || fsp->location != rule->fd_id) + return -EINVAL; + + fsp->flow_type = rule->flow_type; + fsp->h_u.tcp_ip4_spec.psrc = rule->src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0]; + fsp->ring_cookie = rule->q_index; + + return 0; +} + +/** * i40e_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -1003,14 +1222,15 @@ ret = i40e_get_rss_hash_opts(pf, cmd); break; case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = pf->fdir_pf_active_filters; ret = 0; break; case ETHTOOL_GRXCLSRULE: - ret = 0; + ret = i40e_get_ethtool_fdir_entry(pf, cmd); break; case ETHTOOL_GRXCLSRLALL: - cmd->data = 500; - ret = 0; + ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); + break; default: break; } @@ -1141,265 +1361,186 @@ return 0; } -#define IP_HEADER_OFFSET 14 /** - * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it + * i40e_match_fdir_input_set - Match a new filter against an existing one + * @rule: The filter already added + * @input: The new filter to comapre against * - * Returns 0 if the filters were successfully added or removed + * Returns true if the two input set match **/ -static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, - struct i40e_fdir_data *fd_data, - struct ethtool_rx_flow_spec *fsp, bool add) +static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule, + struct i40e_fdir_filter *input) { - struct i40e_pf *pf = vsi->back; - struct udphdr *udp; - struct iphdr *ip; - bool err = false; - int ret; - int i; - - ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); - udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET - + sizeof(struct iphdr)); - - ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src; - ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; - udp->source = fsp->h_u.tcp_ip4_spec.psrc; - udp->dest = fsp->h_u.tcp_ip4_spec.pdst; - - for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP; - i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) { - fd_data->pctype = i; - ret = i40e_program_fdir_filter(fd_data, pf, add); - - if (ret) { - dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - err = true; - } else { - dev_info(&pf->pdev->dev, - "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - } - } - - return err ? -EOPNOTSUPP : 0; + if ((rule->dst_ip[0] != input->dst_ip[0]) || + (rule->src_ip[0] != input->src_ip[0]) || + (rule->dst_port != input->dst_port) || + (rule->src_port != input->src_port)) + return false; + return true; } /** - * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it + * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry + * @vsi: Pointer to the targeted VSI + * @input: The filter to update or NULL to indicate deletion + * @sw_idx: Software index to the filter + * @cmd: The command to get or set Rx flow classification rules * - * Returns 0 if the filters were successfully added or removed + * This function updates (or deletes) a Flow Director entry from + * the hlist of the corresponding PF + * + * Returns 0 on success **/ -static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, - struct i40e_fdir_data *fd_data, - struct ethtool_rx_flow_spec *fsp, bool add) +static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, + u16 sw_idx, + struct ethtool_rxnfc *cmd) { + struct i40e_fdir_filter *rule, *parent; struct i40e_pf *pf = vsi->back; - struct tcphdr *tcp; - struct iphdr *ip; - bool err = false; - int ret; - - ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); - tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET - + sizeof(struct iphdr)); + struct hlist_node *node2; + int err = -EINVAL; - ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; - tcp->dest = fsp->h_u.tcp_ip4_spec.pdst; + parent = NULL; + rule = NULL; - fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; - ret = i40e_program_fdir_filter(fd_data, pf, add); + hlist_for_each_entry_safe(rule, node2, + &pf->fdir_filter_list, fdir_node) { + /* hash found, or no matching entry */ + if (rule->fd_id >= sw_idx) + break; + parent = rule; + } - if (ret) { - dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - err = true; - } else { - dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->fd_id == sw_idx)) { + if (input && !i40e_match_fdir_input_set(rule, input)) + err = i40e_add_del_fdir(vsi, rule, false); + else if (!input) + err = i40e_add_del_fdir(vsi, rule, false); + hlist_del(&rule->fdir_node); + kfree(rule); + pf->fdir_pf_active_filters--; } - ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src; - tcp->source = fsp->h_u.tcp_ip4_spec.psrc; + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; - fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); - ret = i40e_program_fdir_filter(fd_data, pf, add); - if (ret) { - dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - err = true; - } else { - dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - } + /* add filter to the list */ + if (parent) + hlist_add_after(&parent->fdir_node, &input->fdir_node); + else + hlist_add_head(&input->fdir_node, + &pf->fdir_filter_list); - return err ? -EOPNOTSUPP : 0; -} + /* update counts */ + pf->fdir_pf_active_filters++; -/** - * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it - * - * Returns 0 if the filters were successfully added or removed - **/ -static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, - struct i40e_fdir_data *fd_data, - struct ethtool_rx_flow_spec *fsp, bool add) -{ - return -EOPNOTSUPP; + return 0; } /** - * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required for the FDir descriptor - * @fsp: the ethtool flow spec - * @add: true adds a filter, false removes it + * i40e_del_fdir_entry - Deletes a Flow Director filter entry + * @vsi: Pointer to the targeted VSI + * @cmd: The command to get or set Rx flow classification rules * - * Returns 0 if the filters were successfully added or removed - **/ -static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, - struct i40e_fdir_data *fd_data, - struct ethtool_rx_flow_spec *fsp, bool add) + * The function removes a Flow Director filter entry from the + * hlist of the corresponding PF + * + * Returns 0 on success + */ +static int i40e_del_fdir_entry(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd) { + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; struct i40e_pf *pf = vsi->back; - struct iphdr *ip; - bool err = false; - int ret; - int i; - - ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); + int ret = 0; - ip->saddr = fsp->h_u.usr_ip4_spec.ip4src; - ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst; - ip->protocol = fsp->h_u.usr_ip4_spec.proto; - - for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; - i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) { - fd_data->pctype = i; - ret = i40e_program_fdir_filter(fd_data, pf, add); + ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); - if (ret) { - dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - err = true; - } else { - dev_info(&pf->pdev->dev, - "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); - } - } - - return err ? -EOPNOTSUPP : 0; + i40e_fdir_check_and_reenable(pf); + return ret; } /** - * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for - * a specific flow spec based on their protocol + * i40e_add_fdir_ethtool - Add/Remove Flow Director filters * @vsi: pointer to the targeted VSI * @cmd: command to get or set RX flow classification rules - * @add: true adds a filter, false removes it * - * Returns 0 if the filters were successfully added or removed + * Add Flow Director filters for a specific flow spec based on their + * protocol. Returns 0 if the filters were successfully added. **/ -static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi, - struct ethtool_rxnfc *cmd, bool add) +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, + struct ethtool_rxnfc *cmd) { - struct i40e_fdir_data fd_data; - int ret = -EINVAL; + struct ethtool_rx_flow_spec *fsp; + struct i40e_fdir_filter *input; struct i40e_pf *pf; - struct ethtool_rx_flow_spec *fsp = - (struct ethtool_rx_flow_spec *)&cmd->fs; + int ret = -EINVAL; if (!vsi) return -EINVAL; pf = vsi->back; - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= vsi->num_queue_pairs)) + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return -EOPNOTSUPP; + + if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) + return -ENOSPC; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + + pf->hw.func_caps.fd_filters_guaranteed)) { + return -EINVAL; + } + + if (fsp->ring_cookie >= vsi->num_queue_pairs) return -EINVAL; - /* Populate the Flow Director that we have at the moment - * and allocate the raw packet buffer for the calling functions - */ - fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, - GFP_KERNEL); + input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!fd_data.raw_packet) { - dev_info(&pf->pdev->dev, "Could not allocate memory\n"); + if (!input) return -ENOMEM; - } - fd_data.q_index = fsp->ring_cookie; - fd_data.flex_off = 0; - fd_data.pctype = 0; - fd_data.dest_vsi = vsi->id; - fd_data.dest_ctl = 0; - fd_data.fd_status = 0; - fd_data.cnt_index = 0; - fd_data.fd_id = 0; + input->fd_id = fsp->location; - switch (fsp->flow_type & ~FLOW_EXT) { - case TCP_V4_FLOW: - ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); - break; - case UDP_V4_FLOW: - ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add); - break; - case SCTP_V4_FLOW: - ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add); - break; - case IPV4_FLOW: - ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add); - break; - case IP_USER_FLOW: - switch (fsp->h_u.usr_ip4_spec.proto) { - case IPPROTO_TCP: - ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); - break; - case IPPROTO_UDP: - ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add); - break; - case IPPROTO_SCTP: - ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add); - break; - default: - ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add); - break; - } - break; - default: - dev_info(&pf->pdev->dev, "Could not specify spec type\n"); - ret = -EINVAL; - } + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; + else + input->dest_ctl = + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; - kfree(fd_data.raw_packet); - fd_data.raw_packet = NULL; + input->q_index = fsp->ring_cookie; + input->flex_off = 0; + input->pctype = 0; + input->dest_vsi = vsi->id; + input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; + input->cnt_index = 0; + input->flow_type = fsp->flow_type; + input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; + input->src_port = fsp->h_u.tcp_ip4_spec.psrc; + input->dst_port = fsp->h_u.tcp_ip4_spec.pdst; + input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + + ret = i40e_add_del_fdir(vsi, input, true); + if (ret) + kfree(input); + else + i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); return ret; } + /** * i40e_set_rxnfc - command to set RX flow classification rules * @netdev: network interface device structure @@ -1419,10 +1560,10 @@ ret = i40e_set_rss_hash_opt(pf, cmd); break; case ETHTOOL_SRXCLSRLINS: - ret = i40e_add_del_fdir_ethtool(vsi, cmd, true); + ret = i40e_add_fdir_ethtool(vsi, cmd); break; case ETHTOOL_SRXCLSRLDEL: - ret = i40e_add_del_fdir_ethtool(vsi, cmd, false); + ret = i40e_del_fdir_entry(vsi, cmd); break; default: break; @@ -1431,6 +1572,94 @@ return ret; } +/** + * i40e_max_channels - get Max number of combined channels supported + * @vsi: vsi pointer + **/ +static unsigned int i40e_max_channels(struct i40e_vsi *vsi) +{ + /* TODO: This code assumes DCB and FD is disabled for now. */ + return vsi->alloc_queue_pairs; +} + +/** + * i40e_get_channels - Get the current channels enabled and max supported etc. + * @netdev: network interface device structure + * @ch: ethtool channels structure + * + * We don't support separate tx and rx queues as channels. The other count + * represents how many queues are being used for control. max_combined counts + * how many queue pairs we can support. They may not be mapped 1 to 1 with + * q_vectors since we support a lot more queue pairs than q_vectors. + **/ +static void i40e_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + /* report maximum channels */ + ch->max_combined = i40e_max_channels(vsi); + + /* report info for other vector */ + ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; + ch->max_other = ch->other_count; + + /* Note: This code assumes DCB is disabled for now. */ + ch->combined_count = vsi->num_queue_pairs; +} + +/** + * i40e_set_channels - Set the new channels count. + * @netdev: network interface device structure + * @ch: ethtool channels structure + * + * The new channels count may not be the same as requested by the user + * since it gets rounded down to a power of 2 value. + **/ +static int i40e_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + unsigned int count = ch->combined_count; + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + int new_count; + + /* We do not support setting channels for any other VSI at present */ + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > i40e_max_channels(vsi)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + /* TODO: Flow director limit, DCB etc */ + + /* cap RSS limit */ + if (count > pf->rss_size_max) + count = pf->rss_size_max; + + /* use rss_reconfig to rebuild with new queue count and update traffic + * class queue mapping + */ + new_count = i40e_reconfig_rss_queues(pf, count); + if (new_count > 0) + return 0; + else + return -EINVAL; +} + static const struct ethtool_ops i40e_ethtool_ops = { .get_settings = i40e_get_settings, .get_drvinfo = i40e_get_drvinfo, @@ -1439,6 +1668,7 @@ .nway_reset = i40e_nway_reset, .get_link = ethtool_op_get_link, .get_wol = i40e_get_wol, + .set_wol = i40e_set_wol, .get_eeprom_len = i40e_get_eeprom_len, .get_eeprom = i40e_get_eeprom, .get_ringparam = i40e_get_ringparam, @@ -1455,6 +1685,8 @@ .get_ethtool_stats = i40e_get_ethtool_stats, .get_coalesce = i40e_get_coalesce, .set_coalesce = i40e_set_coalesce, + .get_channels = i40e_get_channels, + .set_channels = i40e_set_channels, .get_ts_info = i40e_get_ts_info, }; --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -47,10 +46,10 @@ u64 direct_mode_sz) { enum i40e_memory_type mem_type __attribute__((unused)); - i40e_status ret_code = 0; struct i40e_hmc_sd_entry *sd_entry; bool dma_mem_alloc_done = false; struct i40e_dma_mem mem; + i40e_status ret_code; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { @@ -90,11 +89,9 @@ sd_entry->u.pd_table.pd_entry = (struct i40e_hmc_pd_entry *) sd_entry->u.pd_table.pd_entry_virt_mem.va; - memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, - sizeof(struct i40e_dma_mem)); + sd_entry->u.pd_table.pd_page_addr = mem; } else { - memcpy(&sd_entry->u.bp.addr, &mem, - sizeof(struct i40e_dma_mem)); + sd_entry->u.bp.addr = mem; sd_entry->u.bp.sd_pd_index = sd_index; } /* initialize the sd entry */ @@ -165,7 +162,7 @@ if (ret_code) goto exit; - memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem)); + pd_entry->bp.addr = mem; pd_entry->bp.sd_pd_index = pd_index; pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED; /* Set page address and valid bit */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -117,7 +116,6 @@ * @hw: pointer to our hw struct * @pa: pointer to physical address * @sd_index: segment descriptor index - * @hmc_fn_id: hmc function id * @type: if sd entry is direct or paged **/ #define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ @@ -139,7 +137,6 @@ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware * @hw: pointer to our hw struct * @sd_index: segment descriptor index - * @hmc_fn_id: hmc function id * @type: if sd entry is direct or paged **/ #define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ @@ -160,7 +157,6 @@ * @hw: pointer to our hw struct * @sd_idx: segment descriptor index * @pd_idx: page descriptor index - * @hmc_fn_id: hmc function id **/ #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ wr32((hw), I40E_PFHMC_PDINV, \ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -486,8 +485,7 @@ /* Make one big object, a single SD */ info.count = 1; ret_code = i40e_create_lan_hmc_object(hw, &info); - if ((ret_code) && - (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) + if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) goto try_type_paged; else if (ret_code) goto configure_lan_hmc_out; --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -113,8 +112,8 @@ #define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 #define I40E_HMC_OBJ_SIZE_TXQ 128 #define I40E_HMC_OBJ_SIZE_RXQ 32 -#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 -#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 enum i40e_hmc_lan_rsrc_type { I40E_HMC_LAN_FULL = 0, --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_main.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -27,6 +26,10 @@ /* Local includes */ #include "i40e.h" +#include "i40e_diag.h" +#ifdef CONFIG_I40E_VXLAN +#include +#endif const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@ -36,22 +39,24 @@ #define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_BUILD 36 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN const char i40e_driver_version_str[] = DRV_VERSION; -static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation."; +static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; /* a bit of forward declarations */ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); static void i40e_handle_reset_warning(struct i40e_pf *pf); static int i40e_add_vsi(struct i40e_vsi *vsi); static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); -static int i40e_setup_pf_switch(struct i40e_pf *pf); +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_fdir_sb_setup(struct i40e_pf *pf); +static int i40e_veb_get_bw_info(struct i40e_veb *veb); /* i40e_pci_tbl - PCI Device ID Table * @@ -61,16 +66,16 @@ * Class, Class Mask, private data (not used) } */ static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { - {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0}, - {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, /* required last entry */ {0, } }; @@ -301,6 +306,7 @@ break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); + set_bit(__I40E_DOWN, &vsi->state); i40e_down(vsi); break; } @@ -354,7 +360,7 @@ struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; - if (!vsi->tx_rings) + if (test_bit(__I40E_DOWN, &vsi->state)) return stats; rcu_read_lock(); @@ -368,20 +374,20 @@ continue; do { - start = u64_stats_fetch_begin_bh(&tx_ring->syncp); + start = u64_stats_fetch_begin_irq(&tx_ring->syncp); packets = tx_ring->stats.packets; bytes = tx_ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; rx_ring = &tx_ring[1]; do { - start = u64_stats_fetch_begin_bh(&rx_ring->syncp); + start = u64_stats_fetch_begin_irq(&rx_ring->syncp); packets = rx_ring->stats.packets; bytes = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; @@ -416,7 +422,7 @@ memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); - if (vsi->rx_rings) + if (vsi->rx_rings && vsi->rx_rings[0]) { for (i = 0; i < vsi->num_queue_pairs; i++) { memset(&vsi->rx_rings[i]->stats, 0 , sizeof(vsi->rx_rings[i]->stats)); @@ -427,6 +433,7 @@ memset(&vsi->tx_rings[i]->tx_stats, 0, sizeof(vsi->tx_rings[i]->tx_stats)); } + } vsi->stat_offsets_loaded = false; } @@ -461,7 +468,7 @@ { u64 new_data; - if (hw->device_id == I40E_QEMU_DEVICE_ID) { + if (hw->device_id == I40E_DEV_ID_QEMU) { new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; } else { @@ -577,10 +584,11 @@ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx), veb->stat_offsets_loaded, &oes->tx_discards, &es->tx_discards); - i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), - veb->stat_offsets_loaded, - &oes->rx_unknown_protocol, &es->rx_unknown_protocol); - + if (hw->revision_id > 0) + i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), + veb->stat_offsets_loaded, + &oes->rx_unknown_protocol, + &es->rx_unknown_protocol); i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx), veb->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); @@ -730,6 +738,7 @@ u32 rx_page, rx_buf; u64 rx_p, rx_b; u64 tx_p, tx_b; + u32 val; int i; u16 q; @@ -760,10 +769,10 @@ p = ACCESS_ONCE(vsi->tx_rings[q]); do { - start = u64_stats_fetch_begin_bh(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); tx_b += bytes; tx_p += packets; tx_restart += p->tx_stats.restart_queue; @@ -772,14 +781,14 @@ /* Rx queue is part of the same block as Tx queue */ p = &p[1]; do { - start = u64_stats_fetch_begin_bh(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); packets = p->stats.packets; bytes = p->stats.bytes; - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); rx_b += bytes; rx_p += packets; - rx_buf += p->rx_stats.alloc_rx_buff_failed; - rx_page += p->rx_stats.alloc_rx_page_failed; + rx_buf += p->rx_stats.alloc_buff_failed; + rx_page += p->rx_stats.alloc_page_failed; } rcu_read_unlock(); vsi->tx_restart = tx_restart; @@ -962,6 +971,20 @@ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); + + val = rd32(hw, I40E_PRTPM_EEE_STAT); + nsd->tx_lpi_status = + (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; + nsd->rx_lpi_status = + (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> + I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; + i40e_stat_update32(hw, I40E_PRTPM_TLPIC, + pf->stat_offsets_loaded, + &osd->tx_lpi_count, &nsd->tx_lpi_count); + i40e_stat_update32(hw, I40E_PRTPM_RLPIC, + pf->stat_offsets_loaded, + &osd->rx_lpi_count, &nsd->rx_lpi_count); } pf->stat_offsets_loaded = true; @@ -1065,7 +1088,7 @@ if (!i40e_find_filter(vsi, macaddr, f->vlan, is_vf, is_netdev)) { if (!i40e_add_filter(vsi, macaddr, f->vlan, - is_vf, is_netdev)) + is_vf, is_netdev)) return NULL; } } @@ -1207,6 +1230,10 @@ if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) return 0; + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return -EADDRNOTAVAIL; + if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; ret = i40e_aq_mac_address_write(&vsi->back->hw, @@ -1260,6 +1287,7 @@ u8 offset; u16 qmap; int i; + u16 num_tc_qps = 0; sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; offset = 0; @@ -1281,6 +1309,9 @@ vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; + /* Number of queues per enabled TC */ + num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc); + num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -1288,30 +1319,25 @@ if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */ int pow, num_qps; - vsi->tc_config.tc_info[i].qoffset = offset; switch (vsi->type) { case I40E_VSI_MAIN: - if (i == 0) - qcount = pf->rss_size; - else - qcount = pf->num_tc_qps; - vsi->tc_config.tc_info[i].qcount = qcount; + qcount = min_t(int, pf->rss_size, num_tc_qps); break; case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: default: - qcount = vsi->alloc_queue_pairs; - vsi->tc_config.tc_info[i].qcount = qcount; + qcount = num_tc_qps; WARN_ON(i != 0); break; } + vsi->tc_config.tc_info[i].qoffset = offset; + vsi->tc_config.tc_info[i].qcount = qcount; /* find the power-of-2 of the number of queue pairs */ - num_qps = vsi->tc_config.tc_info[i].qcount; + num_qps = qcount; pow = 0; - while (num_qps && - ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) { + while (num_qps && ((1 << pow) < qcount)) { pow++; num_qps >>= 1; } @@ -1321,7 +1347,7 @@ (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); - offset += vsi->tc_config.tc_info[i].qcount; + offset += qcount; } else { /* TC is not enabled so set the offset to * default queue and allocate one queue @@ -1497,11 +1523,6 @@ cpu_to_le16((u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); - /* vlan0 as wild card to allow packets from all vlans */ - if (f->vlan == I40E_VLAN_ANY || - (vsi->netdev && !(vsi->netdev->features & - NETIF_F_HW_VLAN_CTAG_FILTER))) - cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; del_list[num_del].flags = cmd_flags; num_del++; @@ -1567,12 +1588,6 @@ add_list[num_add].queue_number = 0; cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; - - /* vlan0 as wild card to allow packets from all vlans */ - if (f->vlan == I40E_VLAN_ANY || (vsi->netdev && - !(vsi->netdev->features & - NETIF_F_HW_VLAN_CTAG_FILTER))) - cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; add_list[num_add].flags = cpu_to_le16(cmd_flags); num_add++; @@ -1638,6 +1653,13 @@ dev_info(&pf->pdev->dev, "set uni promisc failed, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); + aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) + dev_info(&pf->pdev->dev, + "set brdcast promisc failed, err %d, aq_err %d\n", + aq_ret, pf->hw.aq.asq_last_status); } clear_bit(__I40E_CONFIG_BUSY, &vsi->state); @@ -1690,6 +1712,27 @@ } /** + * i40e_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + + switch (cmd) { + case SIOCGHWTSTAMP: + return i40e_ptp_get_ts_config(pf, ifr); + case SIOCSHWTSTAMP: + return i40e_ptp_set_ts_config(pf, ifr); + default: + return -EOPNOTSUPP; + } +} + +/** * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI * @vsi: the vsi being adjusted **/ @@ -1771,7 +1814,6 @@ { struct i40e_mac_filter *f, *add_f; bool is_netdev, is_vf; - int ret; is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(vsi->netdev); @@ -1797,13 +1839,6 @@ } } - ret = i40e_sync_vsi_filters(vsi); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "Could not sync filters for vid %d\n", vid); - return ret; - } - /* Now if we add a vlan tag, make sure to check if it is the first * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" * with 0, so we now accept untagged and specified tagged traffic @@ -1824,7 +1859,10 @@ return -ENOMEM; } } + } + /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ + if (vid > 0 && !vsi->info.pvid) { list_for_each_entry(f, &vsi->mac_filter_list, list) { if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev)) { @@ -1840,10 +1878,13 @@ } } } - ret = i40e_sync_vsi_filters(vsi); } - return ret; + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return 0; + + return i40e_sync_vsi_filters(vsi); } /** @@ -1859,7 +1900,6 @@ struct i40e_mac_filter *f, *add_f; bool is_vf, is_netdev; int filter_count = 0; - int ret; is_vf = (vsi->type == I40E_VSI_SRIOV); is_netdev = !!(netdev); @@ -1870,12 +1910,6 @@ list_for_each_entry(f, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); - ret = i40e_sync_vsi_filters(vsi); - if (ret) { - dev_info(&vsi->back->pdev->dev, "Could not sync filters\n"); - return ret; - } - /* go through all the filters for this VSI and if there is only * vid == 0 it means there are no other filters, so vid 0 must * be replaced with -1. This signifies that we should from now @@ -1918,6 +1952,10 @@ } } + if (test_bit(__I40E_DOWN, &vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + return 0; + return i40e_sync_vsi_filters(vsi); } @@ -1940,11 +1978,14 @@ netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); - /* If the network stack called us with vid = 0, we should - * indicate to i40e_vsi_add_vlan() that we want to receive - * any traffic (i.e. with any vlan tag, or untagged) + /* If the network stack called us with vid = 0 then + * it is asking to receive priority tagged packets with + * vlan id 0. Our HW receives them by default when configured + * to receive untagged packets so there is no need to add an + * extra filter for vlan 0 tagged packets. */ - ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); + if (vid) + ret = i40e_vsi_add_vlan(vsi, vid); if (!ret && (vid < VLAN_N_VID)) set_bit(vid, vsi->active_vlans); @@ -1957,7 +1998,7 @@ * @netdev: network interface to be adjusted * @vid: vlan id to be removed * - * net_device_ops implementation for adding vlan ids + * net_device_ops implementation for removing vlan ids **/ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) @@ -2008,8 +2049,9 @@ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi->info.pvid = cpu_to_le16(vid); - vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID; - vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; + vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | + I40E_AQ_VSI_PVLAN_INSERT_PVID | + I40E_AQ_VSI_PVLAN_EMOD_STR; ctxt.seid = vsi->seid; memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); @@ -2032,8 +2074,9 @@ **/ void i40e_vsi_remove_pvid(struct i40e_vsi *vsi) { + i40e_vlan_stripping_disable(vsi); + vsi->info.pvid = 0; - i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features); } /** @@ -2066,8 +2109,11 @@ { int i; + if (!vsi->tx_rings) + return; + for (i = 0; i < vsi->num_queue_pairs; i++) - if (vsi->tx_rings[i]->desc) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) i40e_free_tx_resources(vsi->tx_rings[i]); } @@ -2100,8 +2146,11 @@ { int i; + if (!vsi->rx_rings) + return; + for (i = 0; i < vsi->num_queue_pairs; i++) - if (vsi->rx_rings[i]->desc) + if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) i40e_free_rx_resources(vsi->rx_rings[i]); } @@ -2121,7 +2170,7 @@ u32 qtx_ctl = 0; /* some ATR related tx ring init */ - if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) { + if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { ring->atr_sample_rate = vsi->back->atr_sample_rate; ring->atr_count = 0; } else { @@ -2130,6 +2179,7 @@ /* initialize XPS */ if (ring->q_vector && ring->netdev && + vsi->tc_config.numtc <= 1 && !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, @@ -2141,8 +2191,14 @@ tx_ctx.new_context = 1; tx_ctx.base = (ring->dma / 128); tx_ctx.qlen = ring->count; - tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED | - I40E_FLAG_FDIR_ATR_ENABLED)); + tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED)); + tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); + /* FDIR VSI tx ring can still use RS bit and writebacks */ + if (vsi->type != I40E_VSI_FDIR) + tx_ctx.head_wb_ena = 1; + tx_ctx.head_wb_addr = ring->dma + + (ring->count * sizeof(struct i40e_tx_desc)); /* As part of VSI creation/update, FW allocates certain * Tx arbitration queue sets for each TC enabled for @@ -2176,7 +2232,10 @@ } /* Now associate this queue with this PCI function */ - qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + if (vsi->type == I40E_VSI_VMDQ2) + qtx_ctl = I40E_QTX_CTL_VM_QUEUE; + else + qtx_ctl = I40E_QTX_CTL_PF_QUEUE; qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); @@ -2243,7 +2302,10 @@ rx_ctx.tphwdesc_ena = 1; rx_ctx.tphdata_ena = 1; rx_ctx.tphhead_ena = 1; - rx_ctx.lrxqthresh = 2; + if (hw->revision_id == 0) + rx_ctx.lrxqthresh = 0; + else + rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; rx_ctx.showiv = 1; @@ -2380,6 +2442,28 @@ } /** + * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters + * @vsi: Pointer to the targeted VSI + * + * This function replays the hlist on the hw where all the SB Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) +{ + struct i40e_fdir_filter *filter; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node; + + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return; + + hlist_for_each_entry_safe(filter, node, + &pf->fdir_filter_list, fdir_node) { + i40e_add_del_fdir(vsi, filter, true); + } +} + +/** * i40e_vsi_configure - Set up the VSI for action * @vsi: the VSI being configured **/ @@ -2477,6 +2561,7 @@ I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_GPIO_MASK | + I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | @@ -2485,8 +2570,8 @@ wr32(hw, I40E_PFINT_ICR0_ENA, val); /* SW_ITR_IDX = 0, but don't change INTENA */ - wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK | - I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK); + wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); /* OTHER_ITR_IDX = 0 */ wr32(hw, I40E_PFINT_STAT_CTL0, 0); @@ -2516,7 +2601,7 @@ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); - /* Associate the queue pair to the vector and enable the q int */ + /* Associate the queue pair to the vector and enable the queue int */ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); @@ -2532,6 +2617,19 @@ } /** + * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 + * @pf: board private structure + **/ +void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + + wr32(hw, I40E_PFINT_DYN_CTL0, + I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + i40e_flush(hw); +} + +/** * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure **/ @@ -2584,23 +2682,6 @@ } /** - * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings - * @irq: interrupt number - * @data: pointer to a q_vector - **/ -static irqreturn_t i40e_fdir_clean_rings(int irq, void *data) -{ - struct i40e_q_vector *q_vector = data; - - if (!q_vector->tx.ring && !q_vector->rx.ring) - return IRQ_HANDLED; - - pr_info("fdir ring cleaning needed\n"); - - return IRQ_HANDLED; -} - -/** * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts * @vsi: the VSI being configured * @basename: name for the vector @@ -2740,20 +2821,21 @@ { struct i40e_pf *pf = (struct i40e_pf *)data; struct i40e_hw *hw = &pf->hw; + irqreturn_t ret = IRQ_NONE; u32 icr0, icr0_remaining; u32 val, ena_mask; icr0 = rd32(hw, I40E_PFINT_ICR0); - - val = rd32(hw, I40E_PFINT_DYN_CTL0); - val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; - wr32(hw, I40E_PFINT_DYN_CTL0, val); + ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* if sharing a legacy IRQ, we might get called w/o an intr pending */ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) - return IRQ_NONE; + goto enable_intr; - ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); + /* if interrupt but no bits showing, must be SWINT */ + if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || + (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) + pf->sw_int_count++; /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { @@ -2793,12 +2875,28 @@ val = rd32(hw, I40E_GLGEN_RSTAT); val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; - if (val & I40E_RESET_CORER) + if (val == I40E_RESET_CORER) { pf->corer_count++; - else if (val & I40E_RESET_GLOBR) + } else if (val == I40E_RESET_GLOBR) { pf->globr_count++; - else if (val & I40E_RESET_EMPR) + } else if (val == I40E_RESET_EMPR) { pf->empr_count++; + set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + } + } + + if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { + icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; + dev_info(&pf->pdev->dev, "HMC error interrupt\n"); + } + + if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { + u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); + + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { + icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + i40e_ptp_tx_hwtstamp(pf); + } } /* If a critical error is pending we have no choice but to reset the @@ -2809,22 +2907,18 @@ if (icr0_remaining) { dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", icr0_remaining); - if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) || - (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || + if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || - (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || - (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) { - if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { - dev_info(&pf->pdev->dev, "HMC error interrupt\n"); - } else { - dev_info(&pf->pdev->dev, "device will be reset\n"); - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); - i40e_service_event_schedule(pf); - } + (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { + dev_info(&pf->pdev->dev, "device will be reset\n"); + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); } ena_mask &= ~icr0_remaining; } + ret = IRQ_HANDLED; +enable_intr: /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); if (!test_bit(__I40E_DOWN, &pf->state)) { @@ -2832,6 +2926,94 @@ i40e_irq_dynamic_enable_icr0(pf); } + return ret; +} + +/** + * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes + * @tx_ring: tx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) +{ + struct i40e_vsi *vsi = tx_ring->vsi; + u16 i = tx_ring->next_to_clean; + struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_desc; + + tx_buf = &tx_ring->tx_bi[i]; + tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* if the descriptor isn't done, no work yet to do */ + if (!(eop_desc->cmd_type_offset_bsz & + cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + dma_unmap_len_set(tx_buf, len, 0); + + + /* move to the next desc and buffer to clean */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + i40e_irq_dynamic_enable(vsi, + tx_ring->q_vector->v_idx + vsi->base_vector); + } + return budget > 0; +} + +/** + * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) +{ + struct i40e_q_vector *q_vector = data; + struct i40e_vsi *vsi; + + if (!q_vector->tx.ring) + return IRQ_HANDLED; + + vsi = q_vector->tx.ring->vsi; + i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); + return IRQ_HANDLED; } @@ -2967,35 +3149,26 @@ pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - j = 1000; - do { - usleep_range(1000, 2000); + for (j = 0; j < 50; j++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); - } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) - ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); - - if (enable) { - /* is STAT set ? */ - if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) { - dev_info(&pf->pdev->dev, - "Tx %d already enabled\n", i); - continue; - } - } else { - /* is !STAT set ? */ - if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) { - dev_info(&pf->pdev->dev, - "Tx %d already disabled\n", i); - continue; - } + if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == + ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); } + /* Skip if the queue is already in the requested state */ + if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + continue; + if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + continue; /* turn on/off the queue */ - if (enable) - tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | - I40E_QTX_ENA_QENA_STAT_MASK; - else + if (enable) { + wr32(hw, I40E_QTX_HEAD(pf_q), 0); + tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; + } else { tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; + } wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); @@ -3019,6 +3192,9 @@ } } + if (hw->revision_id == 0) + mdelay(50); + return 0; } @@ -3036,12 +3212,13 @@ pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - j = 1000; - do { - usleep_range(1000, 2000); + for (j = 0; j < 50; j++) { rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); - } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) - ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); + if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == + ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } if (enable) { /* is STAT set ? */ @@ -3055,11 +3232,9 @@ /* turn on/off the queue */ if (enable) - rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | - I40E_QRX_ENA_QENA_STAT_MASK; + rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; else - rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | - I40E_QRX_ENA_QENA_STAT_MASK); + rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); /* wait for the change to finish */ @@ -3091,9 +3266,9 @@ * @vsi: the VSI being configured * @enable: start or stop the rings **/ -static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) +int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) { - int ret; + int ret = 0; /* do rx first for enable and last for disable */ if (request) { @@ -3102,10 +3277,9 @@ return ret; ret = i40e_vsi_control_tx(vsi, request); } else { - ret = i40e_vsi_control_tx(vsi, request); - if (ret) - return ret; - ret = i40e_vsi_control_rx(vsi, request); + /* Ignore return value, we need to shutdown whatever we can */ + i40e_vsi_control_tx(vsi, request); + i40e_vsi_control_rx(vsi, request); } return ret; @@ -3131,7 +3305,8 @@ u16 vector = i + base; /* free only the irqs that were actually requested */ - if (vsi->q_vectors[i]->num_ringpairs == 0) + if (!vsi->q_vectors[i] || + !vsi->q_vectors[i]->num_ringpairs) continue; /* clear the affinity_mask in the IRQ descriptor */ @@ -3543,7 +3718,7 @@ /* Get the VSI level BW configuration per TC */ aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, - NULL); + NULL); if (aq_ret) { dev_info(&pf->pdev->dev, "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", @@ -3597,8 +3772,8 @@ NULL); if (aq_ret) { dev_info(&vsi->back->pdev->dev, - "%s: AQ command Config VSI BW allocation per TC failed = %d\n", - __func__, vsi->back->hw.aq.asq_last_status); + "AQ command Config VSI BW allocation per TC failed = %d\n", + vsi->back->hw.aq.asq_last_status); return -EINVAL; } @@ -3754,6 +3929,149 @@ } /** + * i40e_veb_config_tc - Configure TCs for given VEB + * @veb: given VEB + * @enabled_tc: TC bitmap + * + * Configures given TC bitmap for VEB (switching) element + **/ +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; + struct i40e_pf *pf = veb->pf; + int ret = 0; + int i; + + /* No TCs or already enabled TCs just return */ + if (!enabled_tc || veb->enabled_tc == enabled_tc) + return ret; + + bw_data.tc_valid_bits = enabled_tc; + /* bw_data.absolute_credits is not set (relative) */ + + /* Enable ETS TCs with equal BW Share for now */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (enabled_tc & (1 << i)) + bw_data.tc_bw_share_credits[i] = 1; + } + + ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, + &bw_data, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "veb bw config failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + goto out; + } + + /* Update the BW information */ + ret = i40e_veb_get_bw_info(veb); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed getting veb bw config, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + +out: + return ret; +} + +#ifdef CONFIG_I40E_DCB +/** + * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs + * @pf: PF struct + * + * Reconfigure VEB/VSIs on a given PF; it is assumed that + * the caller would've quiesce all the VSIs before calling + * this function + **/ +static void i40e_dcb_reconfigure(struct i40e_pf *pf) +{ + u8 tc_map = 0; + int ret; + u8 v; + + /* Enable the TCs available on PF to all VEBs */ + tc_map = i40e_pf_get_tc_map(pf); + for (v = 0; v < I40E_MAX_VEB; v++) { + if (!pf->veb[v]) + continue; + ret = i40e_veb_config_tc(pf->veb[v], tc_map); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed configuring TC for VEB seid=%d\n", + pf->veb[v]->seid); + /* Will try to configure as many components */ + } + } + + /* Update each VSI */ + for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + if (!pf->vsi[v]) + continue; + + /* - Enable all TCs for the LAN VSI + * - For all others keep them at TC0 for now + */ + if (v == pf->lan_vsi) + tc_map = i40e_pf_get_tc_map(pf); + else + tc_map = i40e_pf_get_default_tc(pf); + + ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); + if (ret) { + dev_info(&pf->pdev->dev, + "Failed configuring TC for VSI seid=%d\n", + pf->vsi[v]->seid); + /* Will try to configure as many components */ + } else { + if (pf->vsi[v]->netdev) + i40e_dcbnl_set_all(pf->vsi[v]); + } + } +} + +/** + * i40e_init_pf_dcb - Initialize DCB configuration + * @pf: PF being configured + * + * Query the current DCB configuration and cache it + * in the hardware structure + **/ +static int i40e_init_pf_dcb(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int err = 0; + + if (pf->hw.func_caps.npar_enable) + goto out; + + /* Get the initial DCB configuration */ + err = i40e_init_dcb(hw); + if (!err) { + /* Device/Function is not DCBX capable */ + if ((!hw->func_caps.dcb) || + (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { + dev_info(&pf->pdev->dev, + "DCBX offload is not supported or is disabled for this PF.\n"); + + if (pf->flags & I40E_FLAG_MFP_ENABLED) + goto out; + + } else { + /* When status is not DISABLED then DCBX in FW */ + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | + DCB_CAP_DCBX_VER_IEEE; + pf->flags |= I40E_FLAG_DCB_ENABLED; + } + } + +out: + return err; +} +#endif /* CONFIG_I40E_DCB */ + +/** * i40e_up_complete - Finish the last steps of bringing up a connection * @vsi: the VSI being configured **/ @@ -3784,6 +4102,10 @@ } else if (vsi->netdev) { netdev_info(vsi->netdev, "NIC Link is Down\n"); } + + /* replay FDIR SB filters */ + if (vsi->type == I40E_VSI_FDIR) + i40e_fdir_filter_restore(vsi); i40e_service_event_schedule(pf); return 0; @@ -3930,17 +4252,50 @@ struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - char int_name[IFNAMSIZ]; int err; - /* disallow open during test */ - if (test_bit(__I40E_TESTING, &pf->state)) + /* disallow open during test or if eeprom is broken */ + if (test_bit(__I40E_TESTING, &pf->state) || + test_bit(__I40E_BAD_EEPROM, &pf->state)) return -EBUSY; netif_carrier_off(netdev); - /* allocate descriptors */ - err = i40e_vsi_setup_tx_resources(vsi); + err = i40e_vsi_open(vsi); + if (err) + return err; + + /* configure global TSO hardware offload settings */ + wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | + TCP_FLAG_FIN) >> 16); + wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | + TCP_FLAG_FIN | + TCP_FLAG_CWR) >> 16); + wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); + +#ifdef CONFIG_I40E_VXLAN + vxlan_get_rx_port(netdev); +#endif + + return 0; +} + +/** + * i40e_vsi_open - + * @vsi: the VSI to open + * + * Finish initialization of the VSI. + * + * Returns 0 on success, negative value on failure + **/ +int i40e_vsi_open(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + char int_name[IFNAMSIZ]; + int err; + + /* allocate descriptors */ + err = i40e_vsi_setup_tx_resources(vsi); if (err) goto err_setup_tx; err = i40e_vsi_setup_rx_resources(vsi); @@ -3951,28 +4306,34 @@ if (err) goto err_setup_rx; + if (!vsi->netdev) { + err = EINVAL; + goto err_setup_rx; + } snprintf(int_name, sizeof(int_name) - 1, "%s-%s", - dev_driver_string(&pf->pdev->dev), netdev->name); + dev_driver_string(&pf->pdev->dev), vsi->netdev->name); err = i40e_vsi_request_irq(vsi, int_name); if (err) goto err_setup_rx; + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); + if (err) + goto err_set_queues; + err = i40e_up_complete(vsi); if (err) goto err_up_complete; - if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) { - err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL); - if (err) - netdev_info(netdev, - "couldn't set broadcast err %d aq_err %d\n", - err, pf->hw.aq.asq_last_status); - } - return 0; err_up_complete: i40e_down(vsi); +err_set_queues: i40e_vsi_free_irq(vsi); err_setup_rx: i40e_vsi_free_rx_resources(vsi); @@ -3985,6 +4346,26 @@ } /** + * i40e_fdir_filter_exit - Cleans up the Flow Director accounting + * @pf: Pointer to pf + * + * This function destroys the hlist where all the Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_exit(struct i40e_pf *pf) +{ + struct i40e_fdir_filter *filter; + struct hlist_node *node2; + + hlist_for_each_entry_safe(filter, node2, + &pf->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + pf->fdir_pf_active_filters = 0; +} + +/** * i40e_close - Disables a network interface * @netdev: network interface device structure * @@ -4037,7 +4418,7 @@ * for the warning interrupt will deal with the shutdown * and recovery of the switch setup. */ - dev_info(&pf->pdev->dev, "GlobalR requested\n"); + dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_GLOBR_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); @@ -4048,12 +4429,30 @@ * * Same as Global Reset, except does *not* include the MAC/PHY */ - dev_info(&pf->pdev->dev, "CoreR requested\n"); + dev_dbg(&pf->pdev->dev, "CoreR requested\n"); val = rd32(&pf->hw, I40E_GLGEN_RTRIG); val |= I40E_GLGEN_RTRIG_CORER_MASK; wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); + } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { + + /* Request a Firmware Reset + * + * Same as Global reset, plus restarting the + * embedded firmware engine. + */ + /* enable EMP Reset */ + val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); + val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; + wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); + + /* force the reset */ + val = rd32(&pf->hw, I40E_GLGEN_RTRIG); + val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; + wr32(&pf->hw, I40E_GLGEN_RTRIG, val); + i40e_flush(&pf->hw); + } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset @@ -4064,7 +4463,7 @@ * the switch, since we need to do all the recovery as * for the Core Reset. */ - dev_info(&pf->pdev->dev, "PFR requested\n"); + dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_handle_reset_warning(pf); } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { @@ -4091,6 +4490,144 @@ } } +#ifdef CONFIG_I40E_DCB +/** + * i40e_dcb_need_reconfig - Check if DCB needs reconfig + * @pf: board private structure + * @old_cfg: current DCB config + * @new_cfg: new DCB config + **/ +bool i40e_dcb_need_reconfig(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, + struct i40e_dcbx_config *new_cfg) +{ + bool need_reconfig = false; + + /* Check if ETS configuration has changed */ + if (memcmp(&new_cfg->etscfg, + &old_cfg->etscfg, + sizeof(new_cfg->etscfg))) { + /* If Priority Table has changed reconfig is needed */ + if (memcmp(&new_cfg->etscfg.prioritytable, + &old_cfg->etscfg.prioritytable, + sizeof(new_cfg->etscfg.prioritytable))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); + } + + if (memcmp(&new_cfg->etscfg.tcbwtable, + &old_cfg->etscfg.tcbwtable, + sizeof(new_cfg->etscfg.tcbwtable))) + dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + + if (memcmp(&new_cfg->etscfg.tsatable, + &old_cfg->etscfg.tsatable, + sizeof(new_cfg->etscfg.tsatable))) + dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); + } + + /* Check if PFC configuration has changed */ + if (memcmp(&new_cfg->pfc, + &old_cfg->pfc, + sizeof(new_cfg->pfc))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); + } + + /* Check if APP Table has changed */ + if (memcmp(&new_cfg->app, + &old_cfg->app, + sizeof(new_cfg->app))) { + need_reconfig = true; + dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); + } + + return need_reconfig; +} + +/** + * i40e_handle_lldp_event - Handle LLDP Change MIB event + * @pf: board private structure + * @e: event info posted on ARQ + **/ +static int i40e_handle_lldp_event(struct i40e_pf *pf, + struct i40e_arq_event_info *e) +{ + struct i40e_aqc_lldp_get_mib *mib = + (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; + struct i40e_hw *hw = &pf->hw; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; + struct i40e_dcbx_config tmp_dcbx_cfg; + bool need_reconfig = false; + int ret = 0; + u8 type; + + /* Ignore if event is not for Nearest Bridge */ + type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) + & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) + return ret; + + /* Check MIB Type and return if event for Remote MIB update */ + type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; + if (type == I40E_AQ_LLDP_MIB_REMOTE) { + /* Update the remote cached instance and return */ + ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, + I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, + &hw->remote_dcbx_config); + goto exit; + } + + /* Convert/store the DCBX data from LLDPDU temporarily */ + memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); + ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg); + if (ret) { + /* Error in LLDPDU parsing return */ + dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n"); + goto exit; + } + + /* No change detected in DCBX configs */ + if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { + dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); + goto exit; + } + + need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg); + + i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg); + + /* Overwrite the new configuration */ + *dcbx_cfg = tmp_dcbx_cfg; + + if (!need_reconfig) + goto exit; + + /* Reconfiguration needed quiesce all VSIs */ + i40e_pf_quiesce_all_vsi(pf); + + /* Changes in configuration update VEB/VSI */ + i40e_dcb_reconfigure(pf); + + i40e_pf_unquiesce_all_vsi(pf); +exit: + return ret; +} +#endif /* CONFIG_I40E_DCB */ + +/** + * i40e_do_reset_safe - Protected reset path for userland calls. + * @pf: board private structure + * @reset_flags: which reset is requested + * + **/ +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) +{ + rtnl_lock(); + i40e_do_reset(pf, reset_flags); + rtnl_unlock(); +} + /** * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event * @pf: board private structure @@ -4110,8 +4647,8 @@ struct i40e_vf *vf; u16 vf_id; - dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", - __func__, queue, qtx_ctl); + dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", + queue, qtx_ctl); /* Queue belongs to VF, find the VF and issue VF reset */ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) @@ -4141,6 +4678,54 @@ } /** + * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW + * @pf: board private structure + **/ +int i40e_get_current_fd_count(struct i40e_pf *pf) +{ + int val, fcnt_prog; + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); + fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); + return fcnt_prog; +} + +/** + * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled + * @pf: board private structure + **/ +void i40e_fdir_check_and_reenable(struct i40e_pf *pf) +{ + u32 fcnt_prog, fcnt_avail; + + /* Check if, FD SB or ATR was auto disabled and if there is enough room + * to re-enable + */ + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->flags & I40E_FLAG_FD_SB_ENABLED)) + return; + fcnt_prog = i40e_get_current_fd_count(pf); + fcnt_avail = pf->hw.fdir_shared_filter_count + + pf->fdir_pf_filter_count; + if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); + } + } + /* Wait for some more space to be available to turn on ATR */ + if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + } + } +} + +/** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure **/ @@ -4149,11 +4734,14 @@ if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) return; - pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; - /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, &pf->state)) return; + i40e_fdir_check_and_reenable(pf); + + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (pf->flags & I40E_FLAG_FD_SB_ENABLED)) + pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; } /** @@ -4245,6 +4833,9 @@ if (pf->vf) i40e_vc_notify_link_state(pf); + + if (pf->flags & I40E_FLAG_PTP) + i40e_ptp_set_increment(pf); } /** @@ -4326,6 +4917,8 @@ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i]) i40e_update_veb_stats(pf->veb[i]); + + i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); } /** @@ -4336,6 +4929,7 @@ { u32 reset_flags = 0; + rtnl_lock(); if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { reset_flags |= (1 << __I40E_REINIT_REQUESTED); clear_bit(__I40E_REINIT_REQUESTED, &pf->state); @@ -4358,7 +4952,7 @@ */ if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { i40e_handle_reset_warning(pf); - return; + goto unlock; } /* If we're already down or resetting, just bail */ @@ -4366,6 +4960,9 @@ !test_bit(__I40E_DOWN, &pf->state) && !test_bit(__I40E_CONFIG_BUSY, &pf->state)) i40e_do_reset(pf, reset_flags); + +unlock: + rtnl_unlock(); } /** @@ -4429,6 +5026,7 @@ return; do { + event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */ ret = i40e_clean_arq_element(hw, &event, &pending); if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) { dev_info(&pf->pdev->dev, "No ARQ event found\n"); @@ -4453,16 +5051,24 @@ event.msg_size); break; case i40e_aqc_opc_lldp_update_mib: - dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); + dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); +#ifdef CONFIG_I40E_DCB + rtnl_lock(); + ret = i40e_handle_lldp_event(pf, &event); + rtnl_unlock(); +#endif /* CONFIG_I40E_DCB */ break; case i40e_aqc_opc_event_lan_overflow: - dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); + dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); i40e_handle_lan_overflow_event(pf, &event); break; + case i40e_aqc_opc_send_msg_to_peer: + dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); + break; default: dev_info(&pf->pdev->dev, - "ARQ Error: Unknown event %d received\n", - event.desc.opcode); + "ARQ Error: Unknown event 0x%04x received\n", + opcode); break; } } while (pending && (i++ < pf->adminq_work_limit)); @@ -4478,6 +5084,31 @@ } /** + * i40e_verify_eeprom - make sure eeprom is good to use + * @pf: board private structure + **/ +static void i40e_verify_eeprom(struct i40e_pf *pf) +{ + int err; + + err = i40e_diag_eeprom_test(&pf->hw); + if (err) { + /* retry in case of garbage read */ + err = i40e_diag_eeprom_test(&pf->hw); + if (err) { + dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", + err); + set_bit(__I40E_BAD_EEPROM, &pf->state); + } + } + + if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { + dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); + clear_bit(__I40E_BAD_EEPROM, &pf->state); + } +} + +/** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * @@ -4592,6 +5223,15 @@ } } while (err); + /* increment MSI-X count because current FW skips one */ + pf->hw.func_caps.num_msix_vectors++; + + if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || + (pf->hw.aq.fw_maj_ver < 2)) { + pf->hw.func_caps.num_msix_vectors++; + pf->hw.func_caps.num_msix_vectors_vf++; + } + if (pf->hw.debug_mask & I40E_DEBUG_USER) dev_info(&pf->pdev->dev, "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", @@ -4603,57 +5243,89 @@ pf->hw.func_caps.num_tx_qp, pf->hw.func_caps.num_vsis); +#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ + + pf->hw.func_caps.num_vfs) + if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { + dev_info(&pf->pdev->dev, + "got num_vsis %d, setting num_vsis to %d\n", + pf->hw.func_caps.num_vsis, DEF_NUM_VSI); + pf->hw.func_caps.num_vsis = DEF_NUM_VSI; + } + return 0; } +static int i40e_vsi_clear(struct i40e_vsi *vsi); + /** - * i40e_fdir_setup - initialize the Flow Director resources + * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband * @pf: board private structure **/ -static void i40e_fdir_setup(struct i40e_pf *pf) +static void i40e_fdir_sb_setup(struct i40e_pf *pf) { struct i40e_vsi *vsi; bool new_vsi = false; int err, i; - if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED | - I40E_FLAG_FDIR_ATR_ENABLED))) + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; - pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - - /* find existing or make new FDIR VSI */ + /* find existing VSI and see if it needs configuring */ vsi = NULL; - for (i = 0; i < pf->hw.func_caps.num_vsis; i++) - if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) + for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { vsi = pf->vsi[i]; + break; + } + } + + /* create a new VSI if none exists */ if (!vsi) { - vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0); + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, + pf->vsi[pf->lan_vsi]->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); - pf->flags &= ~I40E_FLAG_FDIR_ENABLED; - return; + goto err_vsi; } new_vsi = true; } - WARN_ON(vsi->base_queue != I40E_FDIR_RING); - i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings); + i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); err = i40e_vsi_setup_tx_resources(vsi); - if (!err) - err = i40e_vsi_setup_rx_resources(vsi); - if (!err) - err = i40e_vsi_configure(vsi); - if (!err && new_vsi) { + if (err) + goto err_setup_tx; + err = i40e_vsi_setup_rx_resources(vsi); + if (err) + goto err_setup_rx; + + if (new_vsi) { char int_name[IFNAMSIZ + 9]; + err = i40e_vsi_configure(vsi); + if (err) + goto err_setup_rx; snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", dev_driver_string(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); - } - if (!err) + if (err) + goto err_setup_rx; err = i40e_up_complete(vsi); + if (err) + goto err_up_complete; + clear_bit(__I40E_NEEDS_RESTART, &vsi->state); + } - clear_bit(__I40E_NEEDS_RESTART, &vsi->state); + return; + +err_up_complete: + i40e_down(vsi); + i40e_vsi_free_irq(vsi); +err_setup_rx: + i40e_vsi_free_rx_resources(vsi); +err_setup_tx: + i40e_vsi_free_tx_resources(vsi); +err_vsi: + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + i40e_vsi_clear(vsi); } /** @@ -4664,6 +5336,7 @@ { int i; + i40e_fdir_filter_exit(pf); for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_vsi_release(pf->vsi[i]); @@ -4673,26 +5346,25 @@ } /** - * i40e_handle_reset_warning - prep for the core to reset + * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * - * Close up the VFs and other things in prep for a Core Reset, - * then get ready to rebuild the world. - **/ -static void i40e_handle_reset_warning(struct i40e_pf *pf) + * Close up the VFs and other things in prep for pf Reset. + **/ +static int i40e_prep_for_reset(struct i40e_pf *pf) { - struct i40e_driver_version dv; struct i40e_hw *hw = &pf->hw; i40e_status ret; u32 v; clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) - return; + return 0; - dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); + dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); - i40e_vc_notify_reset(pf); + if (i40e_check_asq_alive(hw)) + i40e_vc_notify_reset(pf); /* quiesce the VSIs and their queues that are not already DOWN */ i40e_pf_quiesce_all_vsi(pf); @@ -4704,6 +5376,27 @@ i40e_shutdown_adminq(&pf->hw); + /* call shutdown HMC */ + ret = i40e_shutdown_lan_hmc(hw); + if (ret) { + dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); + clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); + } + return ret; +} + +/** + * i40e_reset_and_rebuild - reset and rebuild using a saved config + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + **/ +static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) +{ + struct i40e_driver_version dv; + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + u32 v; + /* Now we wait for GRST to settle out. * We don't have to delete the VEBs or VSIs from the hw switch * because the reset will make them disappear. @@ -4715,7 +5408,7 @@ if (test_bit(__I40E_DOWN, &pf->state)) goto end_core_reset; - dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); + dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); @@ -4724,6 +5417,12 @@ goto end_core_reset; } + /* re-verify the eeprom if we just had an EMP reset */ + if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { + clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + i40e_verify_eeprom(pf); + } + ret = i40e_get_capabilities(pf); if (ret) { dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", @@ -4731,13 +5430,6 @@ goto end_core_reset; } - /* call shutdown HMC */ - ret = i40e_shutdown_lan_hmc(hw); - if (ret) { - dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); - goto end_core_reset; - } - ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); @@ -4751,8 +5443,16 @@ goto end_core_reset; } +#ifdef CONFIG_I40E_DCB + ret = i40e_init_pf_dcb(pf); + if (ret) { + dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret); + goto end_core_reset; + } +#endif /* CONFIG_I40E_DCB */ + /* do basic switch setup */ - ret = i40e_setup_pf_switch(pf); + ret = i40e_setup_pf_switch(pf, reinit); if (ret) goto end_core_reset; @@ -4764,7 +5464,7 @@ * try to recover minimal use by getting the basic PF VSI working. */ if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { - dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); + dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); /* find the one VEB connected to the MAC, and find orphans */ for (v = 0; v < I40E_MAX_VEB; v++) { if (!pf->veb[v]) @@ -4817,6 +5517,11 @@ /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); + if (pf->num_alloc_vfs) { + for (v = 0; v < pf->num_alloc_vfs; v++) + i40e_reset_vf(&pf->vf[v], true); + } + /* tell the firmware that we're starting */ dv.major_version = DRV_VERSION_MAJOR; dv.minor_version = DRV_VERSION_MINOR; @@ -4824,13 +5529,29 @@ dv.subbuild_version = 0; i40e_aq_send_driver_version(&pf->hw, &dv, NULL); - dev_info(&pf->pdev->dev, "PF reset done\n"); + dev_info(&pf->pdev->dev, "reset complete\n"); end_core_reset: clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); } /** + * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild + * @pf: board private structure + * + * Close up the VFs and other things in prep for a Core Reset, + * then get ready to rebuild the world. + **/ +static void i40e_handle_reset_warning(struct i40e_pf *pf) +{ + i40e_status ret; + + ret = i40e_prep_for_reset(pf); + if (!ret) + i40e_reset_and_rebuild(pf, false); +} + +/** * i40e_handle_mdd_event * @pf: pointer to the pf structure * @@ -4857,7 +5578,7 @@ u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; dev_info(&pf->pdev->dev, - "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", + "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n", event, queue, func); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; @@ -4871,7 +5592,7 @@ u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; dev_info(&pf->pdev->dev, - "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", + "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", event, queue, func); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; @@ -4911,6 +5632,52 @@ i40e_flush(hw); } +#ifdef CONFIG_I40E_VXLAN +/** + * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW + * @pf: board private structure + **/ +static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) +{ + const int vxlan_hdr_qwords = 4; + struct i40e_hw *hw = &pf->hw; + i40e_status ret; + u8 filter_index; + __be16 port; + int i; + + if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) + return; + + pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->pending_vxlan_bitmap & (1 << i)) { + pf->pending_vxlan_bitmap &= ~(1 << i); + port = pf->vxlan_ports[i]; + ret = port ? + i40e_aq_add_udp_tunnel(hw, ntohs(port), + vxlan_hdr_qwords, + I40E_AQC_TUNNEL_TYPE_VXLAN, + &filter_index, NULL) + : i40e_aq_del_udp_tunnel(hw, i, NULL); + + if (ret) { + dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", + port ? "adding" : "deleting", + ntohs(port), port ? i : i); + + pf->vxlan_ports[i] = 0; + } else { + dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", + port ? "Added" : "Deleted", + ntohs(port), port ? i : filter_index); + } + } + } +} + +#endif /** * i40e_service_task - Run the driver's async subtasks * @work: pointer to work_struct containing our data @@ -4929,6 +5696,9 @@ i40e_fdir_reinit_subtask(pf); i40e_check_hang_subtask(pf); i40e_sync_filters_subtask(pf); +#ifdef CONFIG_I40E_VXLAN + i40e_sync_vxlan_filters_subtask(pf); +#endif i40e_clean_adminq_subtask(pf); i40e_service_event_complete(pf); @@ -5006,6 +5776,42 @@ } /** + * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi + * @type: VSI pointer + * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. + * + * On error: returns error code (negative) + * On success: returns 0 + **/ +static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) +{ + int size; + int ret = 0; + + /* allocate memory for both Tx and Rx ring pointers */ + size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; + vsi->tx_rings = kzalloc(size, GFP_KERNEL); + if (!vsi->tx_rings) + return -ENOMEM; + vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; + + if (alloc_qvectors) { + /* allocate memory for q_vector pointers */ + size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; + vsi->q_vectors = kzalloc(size, GFP_KERNEL); + if (!vsi->q_vectors) { + ret = -ENOMEM; + goto err_vectors; + } + } + return ret; + +err_vectors: + kfree(vsi->tx_rings); + return ret; +} + +/** * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF * @pf: board private structure * @type: type of VSI @@ -5017,8 +5823,6 @@ { int ret = -ENODEV; struct i40e_vsi *vsi; - int sz_vectors; - int sz_rings; int vsi_idx; int i; @@ -5068,22 +5872,9 @@ if (ret) goto err_rings; - /* allocate memory for ring pointers */ - sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; - vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL); - if (!vsi->tx_rings) { - ret = -ENOMEM; + ret = i40e_vsi_alloc_arrays(vsi, true); + if (ret) goto err_rings; - } - vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; - - /* allocate memory for q_vector pointers */ - sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; - vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL); - if (!vsi->q_vectors) { - ret = -ENOMEM; - goto err_vectors; - } /* Setup default MSIX irq handler for VSI */ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); @@ -5092,8 +5883,6 @@ ret = vsi_idx; goto unlock_pf; -err_vectors: - kfree(vsi->tx_rings); err_rings: pf->next_vsi = i - 1; kfree(vsi); @@ -5103,6 +5892,26 @@ } /** + * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI + * @type: VSI pointer + * @free_qvectors: a bool to specify if q_vectors need to be freed. + * + * On error: returns error code (negative) + * On success: returns 0 + **/ +static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) +{ + /* free the ring and vector containers */ + if (free_qvectors) { + kfree(vsi->q_vectors); + vsi->q_vectors = NULL; + } + kfree(vsi->tx_rings); + vsi->tx_rings = NULL; + vsi->rx_rings = NULL; +} + +/** * i40e_vsi_clear - Deallocate the VSI provided * @vsi: the VSI being un-configured **/ @@ -5138,9 +5947,7 @@ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); - /* free the ring and vector containers */ - kfree(vsi->q_vectors); - kfree(vsi->tx_rings); + i40e_vsi_free_arrays(vsi, true); pf->vsi[vsi->idx] = NULL; if (vsi->idx < pf->next_vsi) @@ -5158,18 +5965,17 @@ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI * @vsi: the VSI being cleaned **/ -static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi) +static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) { int i; - if (vsi->tx_rings[0]) + if (vsi->tx_rings && vsi->tx_rings[0]) { for (i = 0; i < vsi->alloc_queue_pairs; i++) { kfree_rcu(vsi->tx_rings[i], rcu); vsi->tx_rings[i] = NULL; vsi->rx_rings[i] = NULL; } - - return 0; + } } /** @@ -5186,6 +5992,7 @@ struct i40e_ring *tx_ring; struct i40e_ring *rx_ring; + /* allocate space for both Tx and Rx in one shot */ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); if (!tx_ring) goto err_out; @@ -5234,37 +6041,16 @@ **/ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) { - int err = 0; - - pf->num_msix_entries = 0; - while (vectors >= I40E_MIN_MSIX) { - err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors); - if (err == 0) { - /* good to go */ - pf->num_msix_entries = vectors; - break; - } else if (err < 0) { - /* total failure */ - dev_info(&pf->pdev->dev, - "MSI-X vector reservation failed: %d\n", err); - vectors = 0; - break; - } else { - /* err > 0 is the hint for retry */ - dev_info(&pf->pdev->dev, - "MSI-X vectors wanted %d, retrying with %d\n", - vectors, err); - vectors = err; - } - } - - if (vectors > 0 && vectors < I40E_MIN_MSIX) { + vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, + I40E_MIN_MSIX, vectors); + if (vectors < 0) { dev_info(&pf->pdev->dev, - "Couldn't get enough vectors, only %d available\n", - vectors); + "MSI-X vector reservation failed: %d\n", vectors); vectors = 0; } + pf->num_msix_entries = vectors; + return vectors; } @@ -5289,19 +6075,22 @@ /* The number of vectors we'll request will be comprised of: * - Add 1 for "other" cause for Admin Queue events, etc. * - The number of LAN queue pairs - * already adjusted for the NUMA node - * assumes symmetric Tx/Rx pairing + * - Queues being used for RSS. + * We don't need as many as max_rss_size vectors. + * use rss_size instead in the calculation since that + * is governed by number of cpus in the system. + * - assumes symmetric Tx/Rx pairing * - The number of VMDq pairs * Once we count this up, try the request. * * If we can't get what we want, we'll simplify to nearly nothing * and try again. If that still fails, we punt. */ - pf->num_lan_msix = pf->num_lan_qps; + pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); pf->num_vmdq_msix = pf->num_vmdq_qps; v_budget = 1 + pf->num_lan_msix; v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); - if (pf->flags & I40E_FLAG_FDIR_ENABLED) + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) v_budget++; /* Scale down if necessary, and the rings will share vectors */ @@ -5323,7 +6112,7 @@ } else if (vec == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ - dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); + dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; @@ -5359,13 +6148,13 @@ } /** - * i40e_alloc_q_vector - Allocate memory for a single interrupt vector + * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector * @vsi: the VSI being configured * @v_idx: index of the vector in the vsi struct * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ -static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) { struct i40e_q_vector *q_vector; @@ -5391,13 +6180,13 @@ } /** - * i40e_alloc_q_vectors - Allocate memory for interrupt vectors + * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors * @vsi: the VSI being configured * * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. **/ -static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) +static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int v_idx, num_q_vectors; @@ -5412,7 +6201,7 @@ return -EINVAL; for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - err = i40e_alloc_q_vector(vsi, v_idx); + err = i40e_vsi_alloc_q_vector(vsi, v_idx); if (err) goto err_out; } @@ -5437,14 +6226,13 @@ if (pf->flags & I40E_FLAG_MSIX_ENABLED) { err = i40e_init_msix(pf); if (err) { - pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_RSS_ENABLED | - I40E_FLAG_MQ_ENABLED | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_SRIOV_ENABLED | - I40E_FLAG_FDIR_ENABLED | - I40E_FLAG_FDIR_ATR_ENABLED | - I40E_FLAG_VMDQ_ENABLED); + pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | + I40E_FLAG_RSS_ENABLED | + I40E_FLAG_DCB_ENABLED | + I40E_FLAG_SRIOV_ENABLED | + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_VMDQ_ENABLED); /* rework the queue expectations without MSIX */ i40e_determine_queue_usage(pf); @@ -5453,7 +6241,7 @@ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { - dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); + dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); err = pci_enable_msi(pf->pdev); if (err) { dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); @@ -5462,7 +6250,7 @@ } if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) - dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); + dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); /* track first vector for misc interrupts */ err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); @@ -5489,7 +6277,8 @@ i40e_intr, 0, pf->misc_int_name, pf); if (err) { dev_info(&pf->pdev->dev, - "request_irq for msix_misc failed: %d\n", err); + "request_irq for %s failed: %d\n", + pf->misc_int_name, err); return -EFAULT; } } @@ -5513,15 +6302,15 @@ **/ static int i40e_config_rss(struct i40e_pf *pf) { - struct i40e_hw *hw = &pf->hw; - u32 lut = 0; - int i, j; - u64 hena; /* Set of random keys generated using kernel random number generator */ static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; + struct i40e_hw *hw = &pf->hw; + u32 lut = 0; + int i, j; + u64 hena; /* Fill out hash function seed */ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) @@ -5530,16 +6319,7 @@ /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); - hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | - ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)| - ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + hena |= I40E_DEFAULT_RSS_HENA; wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); @@ -5568,6 +6348,34 @@ } /** + * i40e_reconfig_rss_queues - change number of queues for rss and rebuild + * @pf: board private structure + * @queue_count: the requested queue count for rss. + * + * returns 0 if rss is not enabled, if enabled returns the final rss queue + * count which may be different from the requested queue count. + **/ +int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) +{ + if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) + return 0; + + queue_count = min_t(int, queue_count, pf->rss_size_max); + queue_count = rounddown_pow_of_two(queue_count); + + if (queue_count != pf->rss_size) { + i40e_prep_for_reset(pf); + + pf->rss_size = queue_count; + + i40e_reset_and_rebuild(pf, true); + i40e_config_rss(pf); + } + dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); + return pf->rss_size; +} + +/** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize * @@ -5582,6 +6390,7 @@ pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); + pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { if (I40E_DEBUG_USER & debug) pf->hw.debug_mask = debug; @@ -5593,39 +6402,43 @@ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_RX_PS_ENABLED | - I40E_FLAG_MQ_ENABLED | I40E_FLAG_RX_1BUF_ENABLED; + /* Depending on PF configurations, it is possible that the RSS + * maximum might end up larger than the available queues + */ pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; + pf->rss_size_max = min_t(int, pf->rss_size_max, + pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { pf->flags |= I40E_FLAG_RSS_ENABLED; - pf->rss_size = min_t(int, pf->rss_size_max, - nr_cpus_node(numa_node_id())); + pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); + pf->rss_size = rounddown_pow_of_two(pf->rss_size); } else { pf->rss_size = 1; } - if (pf->hw.func_caps.dcb) - pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC; - else - pf->num_tc_qps = 0; + /* MFP mode enabled */ + if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { + pf->flags |= I40E_FLAG_MFP_ENABLED; + dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); + } - if (pf->hw.func_caps.fd) { - /* FW/NVM is not yet fixed in this regard */ - if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || - (pf->hw.func_caps.fd_filters_best_effort > 0)) { - pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED; - dev_info(&pf->pdev->dev, - "Flow Director ATR mode Enabled\n"); - pf->flags |= I40E_FLAG_FDIR_ENABLED; + /* FW/NVM is not yet fixed in this regard */ + if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || + (pf->hw.func_caps.fd_filters_best_effort > 0)) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; + if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + } else { dev_info(&pf->pdev->dev, - "Flow Director Side Band mode Enabled\n"); - pf->fdir_pf_filter_count = - pf->hw.func_caps.fd_filters_guaranteed; + "Flow Director Sideband mode Disabled in MFP mode\n"); } - } else { - pf->fdir_pf_filter_count = 0; + pf->fdir_pf_filter_count = + pf->hw.func_caps.fd_filters_guaranteed; + pf->hw.fdir_shared_filter_count = + pf->hw.func_caps.fd_filters_best_effort; } if (pf->hw.func_caps.vmdq) { @@ -5634,12 +6447,6 @@ pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; } - /* MFP mode enabled */ - if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { - pf->flags |= I40E_FLAG_MFP_ENABLED; - dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); - } - #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; @@ -5661,46 +6468,184 @@ err = -ENOMEM; goto sw_init_done; } - pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; - pf->qp_pile->search_hint = 0; + pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; + pf->qp_pile->search_hint = 0; + + /* set up vector assignment tracking */ + size = sizeof(struct i40e_lump_tracking) + + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + kfree(pf->qp_pile); + err = -ENOMEM; + goto sw_init_done; + } + pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; + pf->irq_pile->search_hint = 0; + + mutex_init(&pf->switch_mutex); + +sw_init_done: + return err; +} + +/** + * i40e_set_ntuple - set the ntuple feature flag and take action + * @pf: board private structure to initialize + * @features: the feature set that the stack is suggesting + * + * returns a bool to indicate if reset needs to happen + **/ +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) +{ + bool need_reset = false; + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + if (features & NETIF_F_NTUPLE) { + /* Enable filters and mark for reset */ + if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) + need_reset = true; + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + } else { + /* turn off filters, mark for reset and clear SW filter list */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + need_reset = true; + i40e_fdir_filter_exit(pf); + } + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + /* if ATR was disabled it can be re-enabled. */ + if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + } + return need_reset; +} + +/** + * i40e_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + **/ +static int i40e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + bool need_reset; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + i40e_vlan_stripping_enable(vsi); + else + i40e_vlan_stripping_disable(vsi); + + need_reset = i40e_set_ntuple(pf, features); + + if (need_reset) + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + return 0; +} + +#ifdef CONFIG_I40E_VXLAN +/** + * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port + * @pf: board private structure + * @port: The UDP port to look up + * + * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found + **/ +static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) +{ + u8 i; + + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->vxlan_ports[i] == port) + return i; + } + + return i; +} + +/** + * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: New UDP port number that VXLAN started listening to + **/ +static void i40e_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 next_idx; + u8 idx; + + if (sa_family == AF_INET6) + return; + + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); + return; + } - /* set up vector assignment tracking */ - size = sizeof(struct i40e_lump_tracking) - + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); - pf->irq_pile = kzalloc(size, GFP_KERNEL); - if (!pf->irq_pile) { - kfree(pf->qp_pile); - err = -ENOMEM; - goto sw_init_done; + /* Now check if there is space to add the new port */ + next_idx = i40e_get_vxlan_port_idx(pf, 0); + + if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", + ntohs(port)); + return; } - pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; - pf->irq_pile->search_hint = 0; - mutex_init(&pf->switch_mutex); + /* New port: add it and mark its index in the bitmap */ + pf->vxlan_ports[next_idx] = port; + pf->pending_vxlan_bitmap |= (1 << next_idx); -sw_init_done: - return err; + pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; } /** - * i40e_set_features - set the netdev feature flags - * @netdev: ptr to the netdev being adjusted - * @features: the feature set that the stack is suggesting + * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to **/ -static int i40e_set_features(struct net_device *netdev, - netdev_features_t features) +static void i40e_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u8 idx; - if (features & NETIF_F_HW_VLAN_CTAG_RX) - i40e_vlan_stripping_enable(vsi); - else - i40e_vlan_stripping_disable(vsi); + if (sa_family == AF_INET6) + return; - return 0; + idx = i40e_get_vxlan_port_idx(pf, port); + + /* Check if port already exists */ + if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->vxlan_ports[idx] = 0; + + pf->pending_vxlan_bitmap |= (1 << idx); + + pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + } else { + netdev_warn(netdev, "Port %d was not found, not deleting\n", + ntohs(port)); + } } +#endif static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -5710,6 +6655,7 @@ .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, .ndo_change_mtu = i40e_change_mtu, + .ndo_do_ioctl = i40e_ioctl, .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, @@ -5722,6 +6668,11 @@ .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, + .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, +#ifdef CONFIG_I40E_VXLAN + .ndo_add_vxlan_port = i40e_add_vxlan_port, + .ndo_del_vxlan_port = i40e_del_vxlan_port, +#endif }; /** @@ -5732,6 +6683,7 @@ **/ static int i40e_config_netdev(struct i40e_vsi *vsi) { + u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_netdev_priv *np; @@ -5748,10 +6700,9 @@ np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features = NETIF_F_IP_CSUM | + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_TSO | - NETIF_F_SG; + NETIF_F_TSO; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | @@ -5763,8 +6714,10 @@ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_IPV6_CSUM | NETIF_F_TSO | + NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | + NETIF_F_NTUPLE | NETIF_F_RXHASH | 0; @@ -5781,6 +6734,7 @@ random_ether_addr(mac_addr); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); } + i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); @@ -5814,10 +6768,6 @@ if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) return; - /* there is no HW VSI for FDIR */ - if (vsi->type == I40E_VSI_FDIR) - return; - i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); return; } @@ -5901,12 +6851,12 @@ break; case I40E_VSI_FDIR: - /* no queue mapping or actual HW VSI needed */ - vsi->info.valid_sections = 0; - vsi->seid = 0; - vsi->id = 0; + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = vsi->uplink_seid; + ctxt.connection_type = 0x1; /* regular data port */ + ctxt.flags = I40E_AQ_VSI_TYPE_PF; i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); - return 0; break; case I40E_VSI_VMDQ2: @@ -6027,8 +6977,6 @@ if (vsi->netdev) { /* results in a call to i40e_close() */ unregister_netdev(vsi->netdev); - free_netdev(vsi->netdev); - vsi->netdev = NULL; } } else { if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) @@ -6047,6 +6995,10 @@ i40e_vsi_delete(vsi); i40e_vsi_free_q_vectors(vsi); + if (vsi->netdev) { + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } i40e_vsi_clear_rings(vsi); i40e_vsi_clear(vsi); @@ -6101,13 +7053,12 @@ } if (vsi->base_vector) { - dev_info(&pf->pdev->dev, - "VSI %d has non-zero base vector %d\n", + dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", vsi->seid, vsi->base_vector); return -EEXIST; } - ret = i40e_alloc_q_vectors(vsi); + ret = i40e_vsi_alloc_q_vectors(vsi); if (ret) { dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for VSI %d, ret=%d\n", @@ -6121,7 +7072,7 @@ vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, - "failed to get q tracking for VSI %d, err=%d\n", + "failed to get queue tracking for VSI %d, err=%d\n", vsi->seid, vsi->base_vector); i40e_vsi_free_q_vectors(vsi); ret = -ENOENT; @@ -6133,6 +7084,69 @@ } /** + * i40e_vsi_reinit_setup - return and reallocate resources for a VSI + * @vsi: pointer to the vsi. + * + * This re-allocates a vsi's queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct + * on success, otherwise returns NULL on failure. + **/ +static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + u8 enabled_tc; + int ret; + + i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); + i40e_vsi_clear_rings(vsi); + + i40e_vsi_free_arrays(vsi, false); + i40e_set_num_rings_in_vsi(vsi); + ret = i40e_vsi_alloc_arrays(vsi, false); + if (ret) + goto err_vsi; + + ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); + if (ret < 0) { + dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", + vsi->seid, ret); + goto err_vsi; + } + vsi->base_queue = ret; + + /* Update the FW view of the VSI. Force a reset of TC and queue + * layout configurations. + */ + enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; + pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; + pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; + i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + + /* assign it some queues */ + ret = i40e_alloc_rings(vsi); + if (ret) + goto err_rings; + + /* map all of the rings to the q_vectors */ + i40e_vsi_map_rings_to_vectors(vsi); + return vsi; + +err_rings: + i40e_vsi_free_q_vectors(vsi); + if (vsi->netdev_registered) { + vsi->netdev_registered = false; + unregister_netdev(vsi->netdev); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + } + i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); +err_vsi: + i40e_vsi_clear(vsi); + return NULL; +} + +/** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @type: VSI type @@ -6212,6 +7226,8 @@ if (v_idx < 0) goto err_alloc; vsi = pf->vsi[v_idx]; + if (!vsi) + goto err_alloc; vsi->type = type; vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); @@ -6220,7 +7236,8 @@ else if (type == I40E_VSI_SRIOV) vsi->vf_id = param1; /* assign it some queues */ - ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); + ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, + vsi->idx); if (ret < 0) { dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", vsi->seid, ret); @@ -6246,6 +7263,10 @@ goto err_netdev; vsi->netdev_registered = true; netif_carrier_off(vsi->netdev); +#ifdef CONFIG_I40E_DCB + /* Setup DCB netlink interface */ + i40e_dcbnl_setup(vsi); +#endif /* CONFIG_I40E_DCB */ /* fall through */ case I40E_VSI_FDIR: @@ -6503,12 +7524,14 @@ **/ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { - bool is_default = (vsi->idx == vsi->back->lan_vsi); + bool is_default = false; + bool is_cloud = false; int ret; /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, - veb->enabled_tc, is_default, &veb->seid, NULL); + veb->enabled_tc, is_default, + is_cloud, &veb->seid, NULL); if (ret) { dev_info(&veb->pf->pdev->dev, "couldn't add VEB, err %d, aq_err %d\n", @@ -6773,11 +7796,13 @@ /** * i40e_setup_pf_switch - Setup the HW switch on startup or after reset * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. * * Returns 0 on success, negative value on failure **/ -static int i40e_setup_pf_switch(struct i40e_pf *pf) +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) { + u32 rxfc = 0, txfc = 0, rxfc_reg; int ret; /* find out what's out there already */ @@ -6790,14 +7815,8 @@ } i40e_pf_reset_stats(pf); - /* fdir VSI must happen first to be sure it gets queue 0, but only - * if there is enough room for the fdir VSI - */ - if (pf->num_lan_qps > 1) - i40e_fdir_setup(pf); - /* first time setup */ - if (pf->lan_vsi == I40E_NO_VSI) { + if (pf->lan_vsi == I40E_NO_VSI || reinit) { struct i40e_vsi *vsi = NULL; u16 uplink_seid; @@ -6808,19 +7827,15 @@ uplink_seid = pf->veb[pf->lan_veb]->seid; else uplink_seid = pf->mac_seid; - - vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); + if (pf->lan_vsi == I40E_NO_VSI) + vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); + else if (reinit) + vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); if (!vsi) { dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); i40e_fdir_teardown(pf); return -EAGAIN; } - /* accommodate kcompat by copying the main VSI queue count - * into the pf, since this newer code pushes the pf queue - * info down a level into a VSI - */ - pf->num_rx_queues = vsi->alloc_queue_pairs; - pf->num_tx_queues = vsi->alloc_queue_pairs; } else { /* force a reset of TC and queue layout configurations */ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; @@ -6830,6 +7845,8 @@ } i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); + i40e_fdir_sb_setup(pf); + /* Setup static PF queue filter control settings */ ret = i40e_setup_pf_filter_control(pf); if (ret) { @@ -6848,37 +7865,68 @@ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); i40e_link_event(pf); - /* Initialize user-specifics link properties */ + /* Initialize user-specific link properties */ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? true : false); - pf->hw.fc.requested_mode = I40E_FC_DEFAULT; - if (pf->hw.phy.link_info.an_info & - (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX)) + /* requested_mode is set in probe or by ethtool */ + if (!pf->fc_autoneg_status) + goto no_autoneg; + + if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) && + (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)) pf->hw.fc.current_mode = I40E_FC_FULL; else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) pf->hw.fc.current_mode = I40E_FC_TX_PAUSE; else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) pf->hw.fc.current_mode = I40E_FC_RX_PAUSE; else - pf->hw.fc.current_mode = I40E_FC_DEFAULT; + pf->hw.fc.current_mode = I40E_FC_NONE; - return ret; -} + /* sync the flow control settings with the auto-neg values */ + switch (pf->hw.fc.current_mode) { + case I40E_FC_FULL: + txfc = 1; + rxfc = 1; + break; + case I40E_FC_TX_PAUSE: + txfc = 1; + rxfc = 0; + break; + case I40E_FC_RX_PAUSE: + txfc = 0; + rxfc = 1; + break; + case I40E_FC_NONE: + case I40E_FC_DEFAULT: + txfc = 0; + rxfc = 0; + break; + case I40E_FC_PFC: + /* TBD */ + break; + /* no default case, we have to handle all possibilities here */ + } -/** - * i40e_set_rss_size - helper to set rss_size - * @pf: board private structure - * @queues_left: how many queues - */ -static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left) -{ - int num_tc0; + wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT); + + rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) & + ~I40E_PRTDCB_MFLCN_RFCE_MASK; + rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT); - num_tc0 = min_t(int, queues_left, pf->rss_size_max); - num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id())); - num_tc0 = rounddown_pow_of_two(num_tc0); + wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg); - return num_tc0; + goto fc_complete; + +no_autoneg: + /* disable L2 flow control, user can turn it on if they wish */ + wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0); + wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) & + ~I40E_PRTDCB_MFLCN_RFCE_MASK); + +fc_complete: + i40e_ptp_init(pf); + + return ret; } /** @@ -6887,12 +7935,9 @@ **/ static void i40e_determine_queue_usage(struct i40e_pf *pf) { - int accum_tc_size; int queues_left; pf->num_lan_qps = 0; - pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps); - accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps; /* Find the max queues to be put into basic use. We'll always be * using TC0, whether or not DCB is running, and TC0 will get the @@ -6900,99 +7945,45 @@ */ queues_left = pf->hw.func_caps.num_tx_qp; - if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) && - (pf->flags & I40E_FLAG_MQ_ENABLED)) || - !(pf->flags & (I40E_FLAG_RSS_ENABLED | - I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) || - (queues_left == 1)) { - + if ((queues_left == 1) || + !(pf->flags & I40E_FLAG_MSIX_ENABLED) || + !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_DCB_ENABLED))) { /* one qp for PF, no queues for anything else */ queues_left = 0; pf->rss_size = pf->num_lan_qps = 1; /* make sure all the fancies are disabled */ - pf->flags &= ~(I40E_FLAG_RSS_ENABLED | - I40E_FLAG_MQ_ENABLED | - I40E_FLAG_FDIR_ENABLED | - I40E_FLAG_FDIR_ATR_ENABLED | - I40E_FLAG_DCB_ENABLED | - I40E_FLAG_SRIOV_ENABLED | - I40E_FLAG_VMDQ_ENABLED); - - } else if (pf->flags & I40E_FLAG_RSS_ENABLED && - !(pf->flags & I40E_FLAG_FDIR_ENABLED) && - !(pf->flags & I40E_FLAG_DCB_ENABLED)) { - - pf->rss_size = i40e_set_rss_size(pf, queues_left); - - queues_left -= pf->rss_size; - pf->num_lan_qps = pf->rss_size; - - } else if (pf->flags & I40E_FLAG_RSS_ENABLED && - !(pf->flags & I40E_FLAG_FDIR_ENABLED) && - (pf->flags & I40E_FLAG_DCB_ENABLED)) { - - /* save num_tc_qps queues for TCs 1 thru 7 and the rest - * are set up for RSS in TC0 - */ - queues_left -= accum_tc_size; - - pf->rss_size = i40e_set_rss_size(pf, queues_left); - - queues_left -= pf->rss_size; - if (queues_left < 0) { - dev_info(&pf->pdev->dev, "not enough queues for DCB\n"); - return; - } - - pf->num_lan_qps = pf->rss_size + accum_tc_size; - - } else if (pf->flags & I40E_FLAG_RSS_ENABLED && - (pf->flags & I40E_FLAG_FDIR_ENABLED) && - !(pf->flags & I40E_FLAG_DCB_ENABLED)) { - - queues_left -= 1; /* save 1 queue for FD */ - - pf->rss_size = i40e_set_rss_size(pf, queues_left); - - queues_left -= pf->rss_size; - if (queues_left < 0) { - dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n"); - return; - } - - pf->num_lan_qps = pf->rss_size; - - } else if (pf->flags & I40E_FLAG_RSS_ENABLED && - (pf->flags & I40E_FLAG_FDIR_ENABLED) && - (pf->flags & I40E_FLAG_DCB_ENABLED)) { - - /* save 1 queue for TCs 1 thru 7, - * 1 queue for flow director, - * and the rest are set up for RSS in TC0 - */ - queues_left -= 1; - queues_left -= accum_tc_size; + pf->flags &= ~(I40E_FLAG_RSS_ENABLED | + I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_ATR_ENABLED | + I40E_FLAG_DCB_ENABLED | + I40E_FLAG_SRIOV_ENABLED | + I40E_FLAG_VMDQ_ENABLED); + } else { + /* Not enough queues for all TCs */ + if ((pf->flags & I40E_FLAG_DCB_ENABLED) && + (queues_left < I40E_MAX_TRAFFIC_CLASS)) { + pf->flags &= ~I40E_FLAG_DCB_ENABLED; + dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); + } + pf->num_lan_qps = pf->rss_size_max; + queues_left -= pf->num_lan_qps; + } - pf->rss_size = i40e_set_rss_size(pf, queues_left); - queues_left -= pf->rss_size; - if (queues_left < 0) { - dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n"); - return; + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (queues_left > 1) { + queues_left -= 1; /* save 1 queue for FD */ + } else { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); } - - pf->num_lan_qps = pf->rss_size + accum_tc_size; - - } else { - dev_info(&pf->pdev->dev, - "Invalid configuration, flags=0x%08llx\n", pf->flags); - return; } if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && pf->num_vf_qps && pf->num_req_vfs && queues_left) { - pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / - pf->num_vf_qps)); + pf->num_req_vfs = min_t(int, pf->num_req_vfs, + (queues_left / pf->num_vf_qps)); queues_left -= (pf->num_req_vfs * pf->num_vf_qps); } @@ -7003,6 +7994,7 @@ queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); } + pf->queues_left = queues_left; return; } @@ -7024,7 +8016,7 @@ settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; /* Flow Director is enabled */ - if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED)) + if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) settings->enable_fdir = true; /* Ethtype and MACVLAN filters enabled for PF */ @@ -7037,6 +8029,44 @@ return 0; } +#define INFO_STRING_LEN 255 +static void i40e_print_features(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + char *buf, *string; + + string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); + if (!string) { + dev_err(&pf->pdev->dev, "Features string allocation failed\n"); + return; + } + + buf = string; + + buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); +#ifdef CONFIG_PCI_IOV + buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); +#endif + buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs); + + if (pf->flags & I40E_FLAG_RSS_ENABLED) + buf += sprintf(buf, "RSS "); + buf += sprintf(buf, "FDir "); + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) + buf += sprintf(buf, "ATR "); + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) + buf += sprintf(buf, "NTUPLE "); + if (pf->flags & I40E_FLAG_DCB_ENABLED) + buf += sprintf(buf, "DCB "); + if (pf->flags & I40E_FLAG_PTP) + buf += sprintf(buf, "PTP "); + + BUG_ON(buf > (string + INFO_STRING_LEN)); + dev_info(&pf->pdev->dev, "%s\n", string); + kfree(string); +} + /** * i40e_probe - Device initialization routine * @pdev: PCI device information struct @@ -7053,6 +8083,8 @@ struct i40e_driver_version dv; struct i40e_pf *pf; struct i40e_hw *hw; + static u16 pfs_found; + u16 link_status; int err = 0; u32 len; @@ -7061,17 +8093,14 @@ return err; /* set up for high or low dma */ - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { - /* coherent mask for the same size will always succeed if - * dma_set_mask does - */ - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - } else { - dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); - err = -EIO; - goto err_dma; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } } /* set up pci connections */ @@ -7118,6 +8147,18 @@ hw->subsystem_device_id = pdev->subsystem_device; hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); + pf->instance = pfs_found; + + /* do a special CORER for clearing PXE mode once at init */ + if (hw->revision_id == 0 && + (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { + wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); + i40e_flush(hw); + msleep(200); + pf->corer_count++; + + i40e_clear_pxe_mode(hw); + } /* Reset here to make sure all is clean and to define PF 'n' */ err = i40e_pf_reset(hw); @@ -7142,6 +8183,9 @@ goto err_pf_reset; } + /* set up a default setting for link flow control */ + pf->hw.fc.requested_mode = I40E_FC_NONE; + err = i40e_init_adminq(hw); dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); if (err) { @@ -7152,6 +8196,9 @@ goto err_pf_reset; } + i40e_verify_eeprom(pf); + + i40e_clear_pxe_mode(hw); err = i40e_get_capabilities(pf); if (err) goto err_adminq_setup; @@ -7178,7 +8225,7 @@ } i40e_get_mac_addr(hw, hw->mac.addr); - if (i40e_validate_mac_addr(hw->mac.addr)) { + if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; goto err_mac_addr; @@ -7188,6 +8235,14 @@ pci_set_drvdata(pdev, pf); pci_save_state(pdev); +#ifdef CONFIG_I40E_DCB + err = i40e_init_pf_dcb(pf); + if (err) { + dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); + pf->flags &= ~I40E_FLAG_DCB_ENABLED; + goto err_init_dcb; + } +#endif /* CONFIG_I40E_DCB */ /* set up periodic task facility */ setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); @@ -7198,6 +8253,10 @@ pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; pf->link_check_timeout = jiffies; + /* WoL defaults to disabled */ + pf->wol_en = false; + device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); + /* set up the main switch operations */ i40e_determine_queue_usage(pf); i40e_init_interrupt_scheme(pf); @@ -7212,7 +8271,7 @@ goto err_switch_setup; } - err = i40e_setup_pf_switch(pf); + err = i40e_setup_pf_switch(pf, false); if (err) { dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; @@ -7240,7 +8299,8 @@ /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && - (pf->flags & I40E_FLAG_MSIX_ENABLED)) { + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { u32 val; /* disable link interrupts for VFs */ @@ -7248,8 +8308,20 @@ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); i40e_flush(hw); + + if (pci_num_vf(pdev)) { + dev_info(&pdev->dev, + "Active VFs found, allocating resources.\n"); + err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); + if (err) + dev_info(&pdev->dev, + "Error %d allocating resources for existing VFs\n", + err); + } } + pfs_found++; + i40e_dbg_pf_init(pf); /* tell the firmware that we're starting */ @@ -7263,15 +8335,44 @@ mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); + /* Get the negotiated link width and speed from PCI config space */ + pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); + + i40e_set_pci_config_data(hw, link_status); + + dev_info(&pdev->dev, "PCI-Express: %s %s\n", + (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : + hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : + hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : + "Unknown"), + (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : + hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : + hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : + hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : + "Unknown")); + + if (hw->bus.width < i40e_bus_width_pcie_x8 || + hw->bus.speed < i40e_bus_speed_8000) { + dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); + dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); + } + + /* print a string summarizing features */ + i40e_print_features(pf); + return 0; /* Unwind what we've done if something failed in the setup */ err_vsis: set_bit(__I40E_DOWN, &pf->state); -err_switch_setup: i40e_clear_interrupt_scheme(pf); kfree(pf->vsi); +err_switch_setup: + i40e_reset_interrupt_capability(pf); del_timer_sync(&pf->service_timer); +#ifdef CONFIG_I40E_DCB +err_init_dcb: +#endif /* CONFIG_I40E_DCB */ err_mac_addr: err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); @@ -7313,16 +8414,18 @@ i40e_dbg_pf_exit(pf); - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { - i40e_free_vfs(pf); - pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; - } + i40e_ptp_stop(pf); /* no more scheduling of any task */ set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; + } + i40e_fdir_teardown(pf); /* If there is a switch structure or any orphans, remove them. @@ -7356,7 +8459,6 @@ "Failed to destroy the HMC resources: %d\n", ret_code); /* shutdown the adminq */ - i40e_aq_queue_shutdown(&pf->hw, true); ret_code = i40e_shutdown_adminq(&pf->hw); if (ret_code) dev_warn(&pdev->dev, @@ -7413,7 +8515,11 @@ dev_info(&pdev->dev, "%s: error %d\n", __func__, error); /* shutdown all operations */ - i40e_pf_quiesce_all_vsi(pf); + if (!test_bit(__I40E_SUSPENDED, &pf->state)) { + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + } /* Request a slot reset */ return PCI_ERS_RESULT_NEED_RESET; @@ -7476,9 +8582,103 @@ struct i40e_pf *pf = pci_get_drvdata(pdev); dev_info(&pdev->dev, "%s\n", __func__); + if (test_bit(__I40E_SUSPENDED, &pf->state)) + return; + + rtnl_lock(); i40e_handle_reset_warning(pf); + rtnl_lock(); +} + +/** + * i40e_shutdown - PCI callback for shutting down + * @pdev: PCI device information struct + **/ +static void i40e_shutdown(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; + + set_bit(__I40E_SUSPENDED, &pf->state); + set_bit(__I40E_DOWN, &pf->state); + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, pf->wol_en); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PM +/** + * i40e_suspend - PCI callback for moving to D3 + * @pdev: PCI device information struct + **/ +static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + struct i40e_hw *hw = &pf->hw; + + set_bit(__I40E_SUSPENDED, &pf->state); + set_bit(__I40E_DOWN, &pf->state); + rtnl_lock(); + i40e_prep_for_reset(pf); + rtnl_unlock(); + + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + + pci_wake_from_d3(pdev, pf->wol_en); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +/** + * i40e_resume - PCI callback for waking up from D3 + * @pdev: PCI device information struct + **/ +static int i40e_resume(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* pci_restore_state() clears dev->state_saves, so + * call pci_save_state() again to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "%s: Cannot enable PCI device from suspend\n", + __func__); + return err; + } + pci_set_master(pdev); + + /* no wakeup events while running */ + pci_wake_from_d3(pdev, false); + + /* handling the reset will rebuild the device state */ + if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { + clear_bit(__I40E_DOWN, &pf->state); + rtnl_lock(); + i40e_reset_and_rebuild(pf, false); + rtnl_unlock(); + } + + return 0; } +#endif static const struct pci_error_handlers i40e_err_handler = { .error_detected = i40e_pci_error_detected, .slot_reset = i40e_pci_error_slot_reset, @@ -7490,6 +8690,11 @@ .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, +#ifdef CONFIG_PM + .suspend = i40e_suspend, + .resume = i40e_resume, +#endif + .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, }; --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -28,14 +27,14 @@ #include "i40e_prototype.h" /** - * i40e_init_nvm_ops - Initialize NVM function pointers. - * @hw: pointer to the HW structure. + * i40e_init_nvm_ops - Initialize NVM function pointers + * @hw: pointer to the HW structure * - * Setups the function pointers and the NVM info structure. Should be called - * once per NVM initialization, e.g. inside the i40e_init_shared_code(). - * Please notice that the NVM term is used here (& in all methods covered - * in this file) as an equivalent of the FLASH part mapped into the SR. - * We are accessing FLASH always thru the Shadow RAM. + * Setup the function pointers and the NVM info structure. Should be called + * once per NVM initialization, e.g. inside the i40e_init_shared_code(). + * Please notice that the NVM term is used here (& in all methods covered + * in this file) as an equivalent of the FLASH part mapped into the SR. + * We are accessing FLASH always thru the Shadow RAM. **/ i40e_status i40e_init_nvm(struct i40e_hw *hw) { @@ -50,16 +49,16 @@ gens = rd32(hw, I40E_GLNVM_GENS); sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> I40E_GLNVM_GENS_SR_SIZE_SHIFT); - /* Switching to words (sr_size contains power of 2KB). */ + /* Switching to words (sr_size contains power of 2KB) */ nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; - /* Check if we are in the normal or blank NVM programming mode. */ + /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, I40E_GLNVM_FLA); - if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */ - /* Max NVM timeout. */ + if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ + /* Max NVM timeout */ nvm->timeout = I40E_MAX_NVM_TIMEOUT; nvm->blank_nvm_mode = false; - } else { /* Blank programming mode. */ + } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; ret_code = I40E_ERR_NVM_BLANK_MODE; hw_dbg(hw, "NVM init error: unsupported blank mode.\n"); @@ -69,12 +68,12 @@ } /** - * i40e_acquire_nvm - Generic request for acquiring the NVM ownership. - * @hw: pointer to the HW structure. - * @access: NVM access type (read or write). + * i40e_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write) * - * This function will request NVM ownership for reading - * via the proper Admin Command. + * This function will request NVM ownership for reading + * via the proper Admin Command. **/ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access) @@ -88,20 +87,20 @@ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 0, &time, NULL); - /* Reading the Global Device Timer. */ + /* Reading the Global Device Timer */ gtime = rd32(hw, I40E_GLVFGEN_TIMER); - /* Store the timeout. */ + /* Store the timeout */ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime; if (ret_code) { - /* Set the polling timeout. */ + /* Set the polling timeout */ if (time > I40E_MAX_NVM_TIMEOUT) timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; else timeout = hw->nvm.hw_semaphore_timeout; - /* Poll until the current NVM owner timeouts. */ + /* Poll until the current NVM owner timeouts */ while (gtime < timeout) { usleep_range(10000, 20000); ret_code = i40e_aq_request_resource(hw, @@ -129,10 +128,10 @@ } /** - * i40e_release_nvm - Generic request for releasing the NVM ownership. - * @hw: pointer to the HW structure. + * i40e_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure * - * This function will release NVM resource via the proper Admin Command. + * This function will release NVM resource via the proper Admin Command. **/ void i40e_release_nvm(struct i40e_hw *hw) { @@ -141,17 +140,17 @@ } /** - * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit. - * @hw: pointer to the HW structure. + * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit + * @hw: pointer to the HW structure * - * Polls the SRCTL Shadow RAM register done bit. + * Polls the SRCTL Shadow RAM register done bit. **/ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) { i40e_status ret_code = I40E_ERR_TIMEOUT; u32 srctl, wait_cnt; - /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */ + /* Poll the I40E_GLNVM_SRCTL until the done bit is set */ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { srctl = rd32(hw, I40E_GLNVM_SRCTL); if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { @@ -161,20 +160,20 @@ udelay(5); } if (ret_code == I40E_ERR_TIMEOUT) - hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); + hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n"); return ret_code; } /** - * i40e_read_nvm_srctl - Reads Shadow RAM. - * @hw: pointer to the HW structure. - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - * @data: word read from the Shadow RAM. + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM * - * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ -static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset, - u16 *data) +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) { i40e_status ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; @@ -185,15 +184,15 @@ goto read_nvm_exit; } - /* Poll the done bit first. */ + /* Poll the done bit first */ ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { - /* Write the address and start reading. */ + /* Write the address and start reading */ sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | (1 << I40E_GLNVM_SRCTL_START_SHIFT); wr32(hw, I40E_GLNVM_SRCTL, sr_reg); - /* Poll I40E_GLNVM_SRCTL until the done bit is set. */ + /* Poll I40E_GLNVM_SRCTL until the done bit is set */ ret_code = i40e_poll_sr_srctl_done_bit(hw); if (!ret_code) { sr_reg = rd32(hw, I40E_GLNVM_SRDATA); @@ -211,80 +210,45 @@ } /** - * i40e_read_nvm_word - Reads Shadow RAM word. - * @hw: pointer to the HW structure. - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - * @data: word read from the Shadow RAM. - * - * Reads 16 bit word from the Shadow RAM. Each read is preceded - * with the NVM ownership taking and followed by the release. - **/ -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data) -{ - i40e_status ret_code = 0; - - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - ret_code = i40e_read_nvm_srctl(hw, offset, data); - i40e_release_nvm(hw); - } - - return ret_code; -} - -/** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer. - * @hw: pointer to the HW structure. - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - * @words: number of words to read (in) & - * number of words read before the NVM ownership timeout (out). - * @data: words read from the Shadow RAM. - * - * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() - * method. The buffer read is preceded by the NVM ownership take - * and followed by the release. + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. **/ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { i40e_status ret_code = 0; u16 index, word; - u32 time; - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - /* Loop thru the selected region. */ - for (word = 0; word < *words; word++) { - index = offset + word; - ret_code = i40e_read_nvm_srctl(hw, index, &data[word]); - if (ret_code) - break; - /* Check if we didn't exceeded the semaphore timeout. */ - time = rd32(hw, I40E_GLVFGEN_TIMER); - if (time >= hw->nvm.hw_semaphore_timeout) { - ret_code = I40E_ERR_TIMEOUT; - hw_dbg(hw, "NVM read error: timeout.\n"); - break; - } - } - /* Update the number of words read from the Shadow RAM. */ - *words = word; - /* Release the NVM ownership. */ - i40e_release_nvm(hw); + /* Loop thru the selected region */ + for (word = 0; word < *words; word++) { + index = offset + word; + ret_code = i40e_read_nvm_word(hw, index, &data[word]); + if (ret_code) + break; } + /* Update the number of words read from the Shadow RAM */ + *words = word; + return ret_code; } /** - * i40e_calc_nvm_checksum - Calculates and returns the checksum - * @hw: pointer to hardware structure - * - * This function calculate SW Checksum that covers the whole 64kB shadow RAM - * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD - * is customer specific and unknown. Therefore, this function skips all maximum - * possible size of VPD (1kB). + * i40e_calc_nvm_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @checksum: pointer to the checksum + * + * This function calculates SW Checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD + * is customer specific and unknown. Therefore, this function skips all maximum + * possible size of VPD (1kB). **/ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) @@ -297,14 +261,14 @@ u32 i = 0; /* read pointer to VPD area */ - ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module); + ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } /* read pointer to PCIe Alt Auto-load module */ - ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, + ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, &pcie_alt_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; @@ -331,7 +295,7 @@ break; } - ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word); + ret_code = i40e_read_nvm_word(hw, (u16)i, &word); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -346,19 +310,19 @@ } /** - * i40e_validate_nvm_checksum - Validate EEPROM checksum - * @hw: pointer to hardware structure - * @checksum: calculated checksum + * i40e_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum: calculated checksum * - * Performs checksum calculation and validates the NVM SW checksum. If the - * caller does not need checksum, the value can be NULL. + * Performs checksum calculation and validates the NVM SW checksum. If the + * caller does not need checksum, the value can be NULL. **/ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { i40e_status ret_code = 0; u16 checksum_sr = 0; - u16 checksum_local; + u16 checksum_local = 0; ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) @@ -371,7 +335,7 @@ /* Do not use i40e_read_nvm_word() because we do not want to take * the synchronization semaphores twice here. */ - i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); /* Verify read checksum from EEPROM is the same as * calculated checksum --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -51,7 +50,6 @@ void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details); -bool i40e_asq_done(struct i40e_hw *hw); /* debug function for adminq */ void i40e_debug_aq(struct i40e_hw *hw, @@ -60,10 +58,11 @@ void *buffer); void i40e_idle_aq(struct i40e_hw *hw); -void i40e_resume_aq(struct i40e_hw *hw); +bool i40e_check_asq_alive(struct i40e_hw *hw); +i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); u32 i40e_led_get(struct i40e_hw *hw); -void i40e_led_set(struct i40e_hw *hw, u32 mode); +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); /* admin send queue commands */ @@ -71,8 +70,6 @@ u16 *fw_major_version, u16 *fw_minor_version, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading); i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, @@ -95,9 +92,9 @@ u16 vsi_id, bool set_filter, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); @@ -106,7 +103,8 @@ struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, - bool default_port, u16 *pveb_seid, + bool default_port, bool enable_l2_filtering, + u16 *pveb_seid, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, @@ -119,12 +117,6 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id, - struct i40e_aqc_add_remove_vlan_element_data *v_list, - u8 count, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id, - struct i40e_aqc_add_remove_vlan_element_data *v_list, - u8 count, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); @@ -164,11 +156,19 @@ struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, + u16 udp_port, u8 header_len, + u8 protocol_index, u8 *filter_index, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, u16 flags, u8 *mac_addr, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, enum i40e_aq_hmc_profile profile, u8 pe_vf_enabled_count, @@ -179,6 +179,15 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_ets_data *ets_data, + enum i40e_admin_queue_opc opcode, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_vsi_bw_config_resp *bw_data, @@ -207,8 +216,6 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_validate_mac_addr(u8 *mac_addr); -i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, - struct i40e_lldp_variables *lldp_cfg); /* prototype for functions used for NVM access */ i40e_status i40e_init_nvm(struct i40e_hw *hw); i40e_status i40e_acquire_nvm(struct i40e_hw *hw, @@ -222,6 +229,14 @@ u16 *words, u16 *data); i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, u16 *checksum); +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); + +extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40e_ptype_lookup[ptype]; +} /* prototype for functions used for SW locks */ @@ -236,4 +251,9 @@ struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_set_filter_control(struct i40e_hw *hw, struct i40e_filter_control_settings *settings); +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); #endif /* _I40E_PROTOTYPE_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -0,0 +1,662 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e.h" +#include +#include + +/* The XL710 timesync is very much like Intel's 82599 design when it comes to + * the fundamental clock design. However, the clock operations are much simpler + * in the XL710 because the device supports a full 64 bits of nanoseconds. + * Because the field is so wide, we can forgo the cycle counter and just + * operate with the nanosecond field directly without fear of overflow. + * + * Much like the 82599, the update period is dependent upon the link speed: + * At 40Gb link or no link, the period is 1.6ns. + * At 10Gb link, the period is multiplied by 2. (3.2ns) + * At 1Gb link, the period is multiplied by 20. (32ns) + * 1588 functionality is not supported at 100Mbps. + */ +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL 0x0333333333ULL +#define I40E_PTP_1GB_INCVAL 0x2000000000ULL + +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \ + I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \ + I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PTP_TX_TIMEOUT (HZ * 15) + +/** + * i40e_ptp_read - Read the PHC time from the device + * @pf: Board private structure + * @ts: timespec structure to hold the current time value + * + * This function reads the PRTTSYN_TIME registers and stores them in a + * timespec. However, since the registers are 64 bits of nanoseconds, we must + * convert the result to a timespec before we can return. + **/ +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) +{ + struct i40e_hw *hw = &pf->hw; + u32 hi, lo; + u64 ns; + + /* The timer latches on the lowest register read. */ + lo = rd32(hw, I40E_PRTTSYN_TIME_L); + hi = rd32(hw, I40E_PRTTSYN_TIME_H); + + ns = (((u64)hi) << 32) | lo; + + *ts = ns_to_timespec(ns); +} + +/** + * i40e_ptp_write - Write the PHC time to the device + * @pf: Board private structure + * @ts: timespec structure that holds the new time value + * + * This function writes the PRTTSYN_TIME registers with the user value. Since + * we receive a timespec from the stack, we must convert that timespec into + * nanoseconds before programming the registers. + **/ +static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts) +{ + struct i40e_hw *hw = &pf->hw; + u64 ns = timespec_to_ns(ts); + + /* The timer will not update until the high register is written, so + * write the low register first. + */ + wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32); +} + +/** + * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time + * @hwtstamps: Timestamp structure to update + * @timestamp: Timestamp from the hardware + * + * We need to convert the NIC clock value into a hwtstamp which can be used by + * the upper level timestamping functions. Since the timestamp is simply a 64- + * bit nanosecond value, we can call ns_to_ktime directly to handle this. + **/ +static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps, + u64 timestamp) +{ + memset(hwtstamps, 0, sizeof(*hwtstamps)); + + hwtstamps->hwtstamp = ns_to_ktime(timestamp); +} + +/** + * i40e_ptp_adjfreq - Adjust the PHC frequency + * @ptp: The PTP clock structure + * @ppb: Parts per billion adjustment from the base + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + struct i40e_hw *hw = &pf->hw; + u64 adj, freq, diff; + int neg_adj = 0; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + + smp_mb(); /* Force any pending update before accessing. */ + adj = ACCESS_ONCE(pf->ptp_base_adj); + + freq = adj; + freq *= ppb; + diff = div_u64(freq, 1000000000ULL); + + if (neg_adj) + adj -= diff; + else + adj += diff; + + wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); + + return 0; +} + +/** + * i40e_ptp_adjtime - Adjust the PHC time + * @ptp: The PTP clock structure + * @delta: Offset in nanoseconds to adjust the PHC time by + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + struct timespec now, then = ns_to_timespec(delta); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + + i40e_ptp_read(pf, &now); + now = timespec_add(now, then); + i40e_ptp_write(pf, (const struct timespec *)&now); + + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_gettime - Get the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure to hold the current time value + * + * Read the device clock and return the correct value on ns, after converting it + * into a timespec struct. + **/ +static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + i40e_ptp_read(pf, ts); + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_settime - Set the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + **/ +static int i40e_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&pf->tmreg_lock, flags); + i40e_ptp_write(pf, ts); + spin_unlock_irqrestore(&pf->tmreg_lock, flags); + + return 0; +} + +/** + * i40e_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a + * Tx timestamp event has occurred, in order to pass the Tx timestamp value up + * the stack in the skb. + */ +static void i40e_ptp_tx_work(struct work_struct *work) +{ + struct i40e_pf *pf = container_of(work, struct i40e_pf, + ptp_tx_work); + struct i40e_hw *hw = &pf->hw; + u32 prttsyn_stat_0; + + if (!pf->ptp_tx_skb) + return; + + if (time_is_before_jiffies(pf->ptp_tx_start + + I40E_PTP_TX_TIMEOUT)) { + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + pf->tx_hwtstamp_timeouts++; + dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n"); + return; + } + + prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0); + if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK) + i40e_ptp_tx_hwtstamp(pf); + else + schedule_work(&pf->ptp_tx_work); +} + +/** + * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem + * @ptp: The PTP clock structure + * @rq: The requested feature to change + * @on: Enable/disable flag + * + * The XL710 does not support any of the ancillary features of the PHC + * subsystem, so this function may just return. + **/ +static int i40e_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung + * @vsi: The VSI with the rings relevant to 1588 + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_ring *rx_ring; + unsigned long rx_event; + u32 prttsyn_stat; + int n; + + if (pf->flags & I40E_FLAG_PTP) + return; + + prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + + /* Unless all four receive timestamp registers are latched, we are not + * concerned about a possible PTP Rx hang, so just update the timeout + * counter and exit. + */ + if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK << + I40E_PRTTSYN_STAT_1_RXT0_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT1_MASK << + I40E_PRTTSYN_STAT_1_RXT1_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT2_MASK << + I40E_PRTTSYN_STAT_1_RXT2_SHIFT) | + (I40E_PRTTSYN_STAT_1_RXT3_MASK << + I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) { + pf->last_rx_ptp_check = jiffies; + return; + } + + /* Determine the most recent watchdog or rx_timestamp event. */ + rx_event = pf->last_rx_ptp_check; + for (n = 0; n < vsi->num_queue_pairs; n++) { + rx_ring = vsi->rx_rings[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* Only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + pf->last_rx_ptp_check = jiffies; + pf->rx_hwtstamp_cleared++; + dev_warn(&vsi->back->pdev->dev, + "%s: clearing Rx timestamp hang\n", + __func__); + } +} + +/** + * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp + * @pf: Board private structure + * + * Read the value of the Tx timestamp from the registers, convert it into a + * value consumable by the stack, and store that result into the shhwtstamps + * struct before returning it up the stack. + **/ +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct i40e_hw *hw = &pf->hw; + u32 hi, lo; + u64 ns; + + lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); + hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); + + ns = (((u64)hi) << 32) | lo; + + i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); + skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; +} + +/** + * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp + * @pf: Board private structure + * @skb: Particular skb to send timestamp with + * @index: Index into the receive timestamp registers for the timestamp + * + * The XL710 receives a notification in the receive descriptor with an offset + * into the set of RXTIME registers where the timestamp is for that skb. This + * function goes and fetches the receive timestamp from that offset, if a valid + * one exists. The RXTIME registers are in ns, so we must convert the result + * first. + **/ +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) +{ + u32 prttsyn_stat, hi, lo; + struct i40e_hw *hw; + u64 ns; + + /* Since we cannot turn off the Rx timestamp logic if the device is + * doing Tx timestamping, check if Rx timestamping is configured. + */ + if (!pf->ptp_rx) + return; + + hw = &pf->hw; + + prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + + if (!(prttsyn_stat & (1 << index))) + return; + + lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); + hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index)); + + ns = (((u64)hi) << 32) | lo; + + i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns); +} + +/** + * i40e_ptp_set_increment - Utility function to update clock increment rate + * @pf: Board private structure + * + * During a link change, the DMA frequency that drives the 1588 logic will + * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds, + * we must update the increment value per clock tick. + **/ +void i40e_ptp_set_increment(struct i40e_pf *pf) +{ + struct i40e_link_status *hw_link_info; + struct i40e_hw *hw = &pf->hw; + u64 incval; + + hw_link_info = &hw->phy.link_info; + + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + + switch (hw_link_info->link_speed) { + case I40E_LINK_SPEED_10GB: + incval = I40E_PTP_10GB_INCVAL; + break; + case I40E_LINK_SPEED_1GB: + incval = I40E_PTP_1GB_INCVAL; + break; + case I40E_LINK_SPEED_100MB: + dev_warn(&pf->pdev->dev, + "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n", + __func__); + incval = 0; + break; + case I40E_LINK_SPEED_40GB: + default: + incval = I40E_PTP_40GB_INCVAL; + break; + } + + /* Write the new increment value into the increment register. The + * hardware will not update the clock until both registers have been + * written. + */ + wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF); + wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); + + /* Update the base adjustement value. */ + ACCESS_ONCE(pf->ptp_base_adj) = incval; + smp_mb(); /* Force the above update. */ +} + +/** + * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Obtain the current hardware timestamping settigs as requested. To do this, + * keep a shadow copy of the timestamp settings rather than attempting to + * deconstruct it from the registers. + **/ +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &pf->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Respond to the user filter requests and make the appropriate hardware + * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping + * logic, so keep track in software of whether to indicate these timestamps + * or not. + * + * It is permissible to "upgrade" the user request to a broader filter, as long + * as the user receives the timestamps they care about and the user is notified + * the filter has been broadened. + **/ +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ + struct i40e_hw *hw = &pf->hw; + struct hwtstamp_config *config = &pf->tstamp_config; + u32 pf_id, tsyntype, regval; + + if (copy_from_user(config, ifr->ifr_data, sizeof(*config))) + return -EFAULT; + + /* Reserved for future extensions. */ + if (config->flags) + return -EINVAL; + + /* Confirm that 1588 is supported on this PF. */ + pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> + I40E_PRTTSYN_CTL0_PF_ID_SHIFT; + if (hw->pf_id != pf_id) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + pf->ptp_tx = false; + break; + case HWTSTAMP_TX_ON: + pf->ptp_tx = true; + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + pf->ptp_rx = false; + tsyntype = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + pf->ptp_rx = true; + tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | + I40E_PRTTSYN_CTL1_TSYNTYPE_V1 | + I40E_PRTTSYN_CTL1_UDP_ENA_MASK; + config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + pf->ptp_rx = true; + tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | + I40E_PRTTSYN_CTL1_TSYNTYPE_V2 | + I40E_PRTTSYN_CTL1_UDP_ENA_MASK; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_ALL: + default: + return -ERANGE; + } + + /* Clear out all 1588-related registers to clear and unlatch them. */ + rd32(hw, I40E_PRTTSYN_STAT_0); + rd32(hw, I40E_PRTTSYN_TXTIME_H); + rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); + rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + + /* Enable/disable the Tx timestamp interrupt based on user input. */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + if (pf->ptp_tx) + regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; + else + regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + + regval = rd32(hw, I40E_PFINT_ICR0_ENA); + if (pf->ptp_tx) + regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + else + regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, regval); + + /* There is no simple on/off switch for Rx. To "disable" Rx support, + * ignore any received timestamps, rather than turn off the clock. + */ + if (pf->ptp_rx) { + regval = rd32(hw, I40E_PRTTSYN_CTL1); + /* clear everything but the enable bit */ + regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK; + /* now enable bits for desired Rx timestamps */ + regval |= tsyntype; + wr32(hw, I40E_PRTTSYN_CTL1, regval); + } + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/** + * i40e_ptp_init - Initialize the 1588 support and register the PHC + * @pf: Board private structure + * + * This function registers the device clock as a PHC. If it is successful, it + * starts the clock in the hardware. + **/ +void i40e_ptp_init(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; + + strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name)); + pf->ptp_caps.owner = THIS_MODULE; + pf->ptp_caps.max_adj = 999999999; + pf->ptp_caps.n_ext_ts = 0; + pf->ptp_caps.pps = 0; + pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; + pf->ptp_caps.adjtime = i40e_ptp_adjtime; + pf->ptp_caps.gettime = i40e_ptp_gettime; + pf->ptp_caps.settime = i40e_ptp_settime; + pf->ptp_caps.enable = i40e_ptp_enable; + + /* Attempt to register the clock before enabling the hardware. */ + pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); + if (IS_ERR(pf->ptp_clock)) { + pf->ptp_clock = NULL; + dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", + __func__); + } else { + struct timespec ts; + u32 regval; + + spin_lock_init(&pf->tmreg_lock); + INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work); + + dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, + netdev->name); + pf->flags |= I40E_FLAG_PTP; + + /* Ensure the clocks are running. */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + regval = rd32(hw, I40E_PRTTSYN_CTL1); + regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL1, regval); + + /* Set the increment value per clock tick. */ + i40e_ptp_set_increment(pf); + + /* reset the tstamp_config */ + memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config)); + + /* Set the clock value. */ + ts = ktime_to_timespec(ktime_get_real()); + i40e_ptp_settime(&pf->ptp_caps, &ts); + } +} + +/** + * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC + * @pf: Board private structure + * + * This function handles the cleanup work required from the initialization by + * clearing out the important information and unregistering the PHC. + **/ +void i40e_ptp_stop(struct i40e_pf *pf) +{ + pf->flags &= ~I40E_FLAG_PTP; + pf->ptp_tx = false; + pf->ptp_rx = false; + + cancel_work_sync(&pf->ptp_tx_work); + if (pf->ptp_tx_skb) { + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + } + + if (pf->ptp_clock) { + ptp_clock_unregister(pf->ptp_clock); + pf->ptp_clock = NULL; + dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, + pf->vsi[pf->lan_vsi]->netdev->name); + } +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_register.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -28,6 +27,10 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ +#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ +#define I40E_GL_GP_FUSE_MAX_INDEX 28 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) #define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 #define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 #define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) @@ -38,6 +41,11 @@ #define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) #define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 #define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) #define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 #define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 #define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) @@ -50,9 +58,14 @@ #define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600 #define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 #define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) #define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 #define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 #define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) + #define I40E_PF_ARQBAH 0x00080180 #define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 #define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT) @@ -837,7 +850,7 @@ #define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT) #define I40E_GLHMC_PEQ1FLMAX 0x000C2058 #define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 -#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) #define I40E_GLHMC_PEQ1MAX 0x000C2054 #define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 #define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) @@ -903,7 +916,7 @@ #define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT) #define I40E_GLHMC_PEXFFLMAX 0x000C204c #define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 -#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) #define I40E_GLHMC_PEXFMAX 0x000C2048 #define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 #define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) @@ -1636,7 +1649,7 @@ #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) #define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) -#define I40E_VSILAN_QTABLE_MAX_INDEX 15 +#define I40E_VSILAN_QTABLE_MAX_INDEX 7 #define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 #define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) #define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 @@ -1773,16 +1786,20 @@ #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) #define I40E_GL_MNG_FWSM 0x000B6134 -#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 -#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT) -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 +#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1 +#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT) +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6 #define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) +#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 +#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) #define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 #define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) +#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25 +#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT) #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 @@ -2035,6 +2052,28 @@ #define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT) #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 #define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT) +#define I40E_GLNVM_ULD 0x000B6008 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 +#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 +#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) + #define I40E_GLPCI_BYTCTH 0x0009C484 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) @@ -2170,6 +2209,12 @@ #define I40E_GLPCI_PCIERR 0x000BE4FC #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) +#define I40E_GLPCI_PCITEST2 0x000BE4BC +#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0 +#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT) +#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1 +#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT) + #define I40E_GLPCI_PKTCT 0x0009C4BC #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) @@ -2380,8 +2425,7 @@ #define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) #define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 #define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17 -#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT) + #define I40E_PFPE_MRTEIDXMASK 0x00008600 #define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 #define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) @@ -2460,8 +2504,6 @@ #define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) #define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 #define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17 -#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT) #define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ #define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 #define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 @@ -3141,30 +3183,6 @@ #define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 #define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 #define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) -#define I40E_GLPM_DMACR 0x000881F4 -#define I40E_GLPM_DMACR_DMACWT_SHIFT 0 -#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT) -#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29 -#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT) -#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30 -#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT) -#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31 -#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT) -#define I40E_GLPM_LTRC 0x000BE500 -#define I40E_GLPM_LTRC_SLTRV_SHIFT 0 -#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT) -#define I40E_GLPM_LTRC_SSCALE_SHIFT 10 -#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT) -#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15 -#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT) -#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16 -#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT) -#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26 -#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT) -#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30 -#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT) -#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31 -#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT) #define I40E_PRTPM_EEE_STAT 0x001E4320 #define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 #define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) @@ -3201,9 +3219,6 @@ #define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT) #define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 #define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) -#define I40E_PRTPM_HPTC 0x000AC800 -#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0 -#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT) #define I40E_PRTPM_RLPIC 0x001E43A0 #define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 #define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT) @@ -3265,8 +3280,8 @@ #define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) #define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 #define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) -#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7 -#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT) +#define I40E_GLQF_CTL_RSVD_SHIFT 7 +#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT) #define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 #define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT) #define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 @@ -3416,9 +3431,9 @@ #define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ #define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 -#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) -#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6 -#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 +#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 #define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) #define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) @@ -3504,7 +3519,7 @@ #define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 #define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) #define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) -#define I40E_VSIQF_TCREGION_MAX_INDEX 7 +#define I40E_VSIQF_TCREGION_MAX_INDEX 3 #define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 #define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) #define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 @@ -3521,10 +3536,7 @@ #define I40E_GL_FCOEDDPC_MAX_INDEX 143 #define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 #define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) -#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */ -#define I40E_GL_FCOEDDPEC_MAX_INDEX 143 -#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0 -#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT) +/* _i=0...143 */ #define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ #define I40E_GL_FCOEDIFEC_MAX_INDEX 143 #define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 @@ -4276,46 +4288,10 @@ #define I40E_PFPM_APM 0x000B8080 #define I40E_PFPM_APM_APME_SHIFT 0 #define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT) -#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128)) -#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7 -#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0 -#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT) #define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ #define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 #define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 #define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) -#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128)) -#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7 -#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0 -#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT) -#define I40E_PFPM_PROXYFC 0x00245A80 -#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0 -#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT) -#define I40E_PFPM_PROXYFC_EX_SHIFT 1 -#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT) -#define I40E_PFPM_PROXYFC_ARP_SHIFT 4 -#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT) -#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5 -#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT) -#define I40E_PFPM_PROXYFC_NS_SHIFT 9 -#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT) -#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10 -#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT) -#define I40E_PFPM_PROXYFC_MLD_SHIFT 12 -#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT) -#define I40E_PFPM_PROXYS 0x00245B80 -#define I40E_PFPM_PROXYS_EX_SHIFT 1 -#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT) -#define I40E_PFPM_PROXYS_ARP_SHIFT 4 -#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT) -#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5 -#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT) -#define I40E_PFPM_PROXYS_NS_SHIFT 9 -#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT) -#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10 -#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT) -#define I40E_PFPM_PROXYS_MLD_SHIFT 12 -#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT) #define I40E_PFPM_WUC 0x0006B200 #define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 #define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT) @@ -4536,21 +4512,21 @@ #define I40E_VFMSIX_PBA 0x00002000 #define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 #define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ #define I40E_VFMSIX_TADD_MAX_INDEX 16 #define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 #define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) #define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 #define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ #define I40E_VFMSIX_TMSG_MAX_INDEX 16 #define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 #define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ #define I40E_VFMSIX_TUADD_MAX_INDEX 16 #define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 #define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ #define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 #define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 #define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT) @@ -4610,8 +4586,6 @@ #define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) #define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 #define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17 -#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT) #define I40E_VFPE_MRTEIDXMASK1 0x00009000 #define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 #define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) @@ -4684,5 +4658,13 @@ #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT) - +#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT) #endif --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_status.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -26,6 +25,7 @@ ******************************************************************************/ #include "i40e.h" +#include "i40e_prototype.h" static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, u32 td_tag) @@ -40,11 +40,12 @@ #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** * i40e_program_fdir_filter - Program a Flow Director filter - * @fdir_input: Packet data that will be filter parameters + * @fdir_data: Packet data that will be filter parameters + * @raw_packet: the pre-allocated packet buffer for FDir * @pf: The pf pointer * @add: True for add/update, False for remove **/ -int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, struct i40e_pf *pf, bool add) { struct i40e_filter_program_desc *fdir_desc; @@ -69,15 +70,14 @@ tx_ring = vsi->tx_rings[0]; dev = tx_ring->dev; - dma = dma_map_single(dev, fdir_data->raw_packet, - I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); + dma = dma_map_single(dev, raw_packet, + I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto dma_fail; /* grab the next descriptor */ i = tx_ring->next_to_use; fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); - tx_buf = &tx_ring->tx_bi[i]; tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; @@ -129,14 +129,22 @@ /* Now program a dummy descriptor */ i = tx_ring->next_to_use; tx_desc = I40E_TX_DESC(tx_ring, i); + tx_buf = &tx_ring->tx_bi[i]; tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; + /* record length, and DMA address */ + dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); + dma_unmap_addr_set(tx_buf, dma, dma); + tx_desc->buffer_addr = cpu_to_le64(dma); td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); + build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); + + /* set the timestamp */ + tx_buf->time_stamp = jiffies; /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only @@ -155,26 +163,329 @@ return -1; } +#define IP_HEADER_OFFSET 14 +#define I40E_UDPIP_DUMMY_PACKET_LEN 42 +/** + * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + u8 *raw_packet, bool add) +{ + struct i40e_pf *pf = vsi->back; + struct udphdr *udp; + struct iphdr *ip; + bool err = false; + int ret; + int i; + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + + ip->daddr = fd_data->dst_ip[0]; + udp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip[0]; + udp->source = fd_data->src_port; + + for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP; + i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) { + fd_data->pctype = i; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "Filter command send failed for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + err = true; + } else { + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + } + } + + return err ? -EOPNOTSUPP : 0; +} + +#define I40E_TCPIP_DUMMY_PACKET_LEN 54 +/** + * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + u8 *raw_packet, bool add) +{ + struct i40e_pf *pf = vsi->back; + struct tcphdr *tcp; + struct iphdr *ip; + bool err = false; + int ret; + /* Dummy packet */ + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11, + 0x0, 0x72, 0, 0, 0, 0}; + + memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN); + + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET + + sizeof(struct iphdr)); + + ip->daddr = fd_data->dst_ip[0]; + tcp->dest = fd_data->dst_port; + ip->saddr = fd_data->src_ip[0]; + tcp->source = fd_data->src_port; + + if (add) { + if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + } + } + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "Filter command send failed for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + err = true; + } else { + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + } + + fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + if (ret) { + dev_info(&pf->pdev->dev, + "Filter command send failed for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + err = true; + } else { + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + } + + return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Always returns -EOPNOTSUPP + **/ +static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + u8 *raw_packet, bool add) +{ + return -EOPNOTSUPP; +} + +#define I40E_IP_DUMMY_PACKET_LEN 34 +/** + * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, + struct i40e_fdir_filter *fd_data, + u8 *raw_packet, bool add) +{ + struct i40e_pf *pf = vsi->back; + struct iphdr *ip; + bool err = false; + int ret; + int i; + static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, + 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0}; + + memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); + ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + + ip->saddr = fd_data->src_ip[0]; + ip->daddr = fd_data->dst_ip[0]; + ip->protocol = 0; + + for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; + i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) { + fd_data->pctype = i; + ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + + if (ret) { + dev_info(&pf->pdev->dev, + "Filter command send failed for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + err = true; + } else { + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d (ret = %d)\n", + fd_data->pctype, ret); + } + } + + return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir - Build raw packets to add/del fdir filter + * @vsi: pointer to the targeted VSI + * @cmd: command to get or set RX flow classification rules + * @add: true adds a filter, false removes it + * + **/ +int i40e_add_del_fdir(struct i40e_vsi *vsi, + struct i40e_fdir_filter *input, bool add) +{ + struct i40e_pf *pf = vsi->back; + u8 *raw_packet; + int ret; + + /* Populate the Flow Director that we have at the moment + * and allocate the raw packet buffer for the calling functions + */ + raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); + if (!raw_packet) + return -ENOMEM; + + switch (input->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet, + add); + break; + case UDP_V4_FLOW: + ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet, + add); + break; + case SCTP_V4_FLOW: + ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet, + add); + break; + case IPV4_FLOW: + ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet, + add); + break; + case IP_USER_FLOW: + switch (input->ip4_proto) { + case IPPROTO_TCP: + ret = i40e_add_del_fdir_tcpv4(vsi, input, + raw_packet, add); + break; + case IPPROTO_UDP: + ret = i40e_add_del_fdir_udpv4(vsi, input, + raw_packet, add); + break; + case IPPROTO_SCTP: + ret = i40e_add_del_fdir_sctpv4(vsi, input, + raw_packet, add); + break; + default: + ret = i40e_add_del_fdir_ipv4(vsi, input, + raw_packet, add); + break; + } + break; + default: + dev_info(&pf->pdev->dev, "Could not specify spec type %d\n", + input->flow_type); + ret = -EINVAL; + } + + kfree(raw_packet); + return ret; +} + /** * i40e_fd_handle_status - check the Programming Status for FD * @rx_ring: the Rx ring for this descriptor - * @qw: the descriptor data + * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor. * @prog_id: the id originally used for programming * * This is used to verify if the FD programming or invalidation * requested by SW to the HW is successful or not and take actions accordingly. **/ -static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) +static void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) { - struct pci_dev *pdev = rx_ring->vsi->back->pdev; + struct i40e_pf *pf = rx_ring->vsi->back; + struct pci_dev *pdev = pf->pdev; + u32 fcnt_prog, fcnt_avail; u32 error; + u64 qw; + qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; - /* for now just print the Status */ - dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n", - prog_id, error); + if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { + dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", + rx_desc->wb.qword0.hi_dword.fd_id); + + /* filter programming failed most likely due to table full */ + fcnt_prog = i40e_get_current_fd_count(pf); + fcnt_avail = pf->hw.fdir_shared_filter_count + + pf->fdir_pf_filter_count; + + /* If ATR is running fcnt_prog can quickly change, + * if we are very close to full, it makes sense to disable + * FD ATR/SB and then re-enable it when there is room. + */ + if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { + /* Turn off ATR first */ + if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); + pf->auto_disable_flags |= + I40E_FLAG_FD_ATR_ENABLED; + pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; + } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); + pf->auto_disable_flags |= + I40E_FLAG_FD_SB_ENABLED; + pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; + } + } else { + dev_info(&pdev->dev, "FD filter programming error\n"); + } + } else if (error == + (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n", + rx_desc->wb.qword0.hi_dword.fd_id); + } } /** @@ -309,6 +620,20 @@ } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean * @budget: how many cleans we're allowed @@ -319,6 +644,7 @@ { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; unsigned int total_packets = 0; unsigned int total_bytes = 0; @@ -327,6 +653,8 @@ tx_desc = I40E_TX_DESC(tx_ring, i); i -= tx_ring->count; + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + do { struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; @@ -337,9 +665,8 @@ /* prevent any other reads prior to eop_desc */ read_barrier_depends(); - /* if the descriptor isn't done, no work yet to do */ - if (!(eop_desc->cmd_type_offset_bsz & - cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) break; /* clear next_to_watch to prevent false hangs */ @@ -571,7 +898,7 @@ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) - i40e_fd_handle_status(rx_ring, qw, id); + i40e_fd_handle_status(rx_ring, rx_desc, id); } /** @@ -595,6 +922,10 @@ /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); tx_ring->size = ALIGN(tx_ring->size, 4096); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); @@ -768,7 +1099,7 @@ skb = netdev_alloc_skb_ip_align(rx_ring->netdev, rx_ring->rx_buf_len); if (!skb) { - rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_ring->rx_stats.alloc_buff_failed++; goto no_buffers; } /* initialize queue mapping */ @@ -782,7 +1113,7 @@ rx_ring->rx_buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(rx_ring->dev, bi->dma)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_ring->rx_stats.alloc_buff_failed++; bi->dma = 0; goto no_buffers; } @@ -792,7 +1123,7 @@ if (!bi->page) { bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { - rx_ring->rx_stats.alloc_rx_page_failed++; + rx_ring->rx_stats.alloc_page_failed++; goto no_buffers; } } @@ -807,7 +1138,7 @@ DMA_FROM_DEVICE); if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { - rx_ring->rx_stats.alloc_rx_page_failed++; + rx_ring->rx_stats.alloc_page_failed++; bi->page_dma = 0; goto no_buffers; } @@ -860,12 +1191,25 @@ * @skb: skb currently being received and modified * @rx_status: status value of last descriptor in packet * @rx_error: error value of last descriptor in packet + * @rx_ptype: ptype value of last descriptor in packet **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, u32 rx_status, - u32 rx_error) + u32 rx_error, + u16 rx_ptype) { + bool ipv4_tunnel, ipv6_tunnel; + __wsum rx_udp_csum; + __sum16 csum; + struct iphdr *iph; + + ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + + skb->encapsulation = ipv4_tunnel || ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; /* Rx csum enabled and ip headers found? */ @@ -873,13 +1217,47 @@ rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* IP or L4 checksum error */ + /* likely incorrect csum if alternate IP extension headers found */ + if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + return; + + /* IP or L4 or outmost IP checksum error */ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | - (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) { + (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | + (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { vsi->back->hw_csum_rx_error++; return; } + if (ipv4_tunnel && + !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { + /* If VXLAN traffic has an outer UDPv4 checksum we need to check + * it in the driver, hardware does not do it for us. + * Since L3L4P bit was set we assume a valid IHL value (>=5) + * so the total length of IPv4 header is IHL*4 bytes + */ + skb->transport_header = skb->mac_header + + sizeof(struct ethhdr) + + (ip_hdr(skb)->ihl * 4); + + /* Add 4 bytes for VLAN tagged packets */ + skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + ? VLAN_HLEN : 0; + + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic( + iph->saddr, iph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); + + if (udp_hdr(skb)->check != csum) { + vsi->back->hw_csum_rx_error++; + return; + } + } + skb->ip_summed = CHECKSUM_UNNECESSARY; } @@ -891,13 +1269,38 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring, union i40e_rx_desc *rx_desc) { - if (ring->netdev->features & NETIF_F_RXHASH) { - if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >> - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & - I40E_RX_DESC_FLTSTAT_RSS_HASH) - return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - } - return 0; + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if ((ring->netdev->features & NETIF_F_RXHASH) && + (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) + return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + else + return 0; +} + +/** + * i40e_ptype_to_hash - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + else + return PKT_HASH_TYPE_L2; } /** @@ -917,12 +1320,16 @@ u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + u8 rx_ptype; u64 qword; + if (budget <= 0) + return 0; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) - >> I40E_RXD_QW1_STATUS_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { union i40e_rx_desc *next_rxd; @@ -938,18 +1345,20 @@ skb = rx_bi->skb; prefetch(skb->data); - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) - >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) - >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT; - rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) - >> I40E_RXD_QW1_LENGTH_SPH_SHIFT; + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> + I40E_RXD_QW1_LENGTH_HBUF_SHIFT; + rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> + I40E_RXD_QW1_LENGTH_SPH_SHIFT; - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) - >> I40E_RXD_QW1_ERROR_SHIFT; + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; rx_bi->skb = NULL; /* This memory barrier is needed to keep us from reading @@ -1029,14 +1438,23 @@ goto next_desc; } - skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); - i40e_rx_checksum(vsi, skb, rx_status, rx_error); + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; @@ -1059,8 +1477,8 @@ /* use prefetched values */ rx_desc = next_rxd; qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) - >> I40E_RXD_QW1_STATUS_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; } rx_ring->next_to_clean = i; @@ -1173,15 +1591,13 @@ u16 i; /* make sure ATR is enabled */ - if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED)) + if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; /* if sampling is disabled do nothing */ if (!tx_ring->atr_sample_rate) return; - tx_ring->atr_count++; - /* snag network header to get L4 type and address */ hdr.network = skb_network_header(skb); @@ -1203,8 +1619,17 @@ th = (struct tcphdr *)(hdr.network + hlen); - /* sample on all syn/fin packets or once every atr sample rate */ - if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate)) + /* Due to lack of space, no more new filters can be programmed */ + if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + + tx_ring->atr_count++; + + /* sample on all syn/fin/rst packets or once every atr sample rate */ + if (!th->fin && + !th->syn && + !th->rst && + (tx_ring->atr_count < tx_ring->atr_sample_rate)) return; tx_ring->atr_count = 0; @@ -1228,7 +1653,7 @@ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; - dtype_cmd |= th->fin ? + dtype_cmd |= (th->fin || th->rst) ? (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << I40E_TXD_FLTR_QW1_PCMD_SHIFT) : (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << @@ -1268,7 +1693,7 @@ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; tx_flags |= I40E_TX_FLAGS_HW_VLAN; /* else if it is a SW VLAN, check the next protocol and store the tag */ - } else if (protocol == __constant_htons(ETH_P_8021Q)) { + } else if (protocol == htons(ETH_P_8021Q)) { struct vlan_hdr *vhdr, _vhdr; vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); if (!vhdr) @@ -1288,9 +1713,11 @@ I40E_TX_FLAGS_VLAN_PRIO_SHIFT; if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { struct vlan_ethhdr *vhdr; - if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) - return -ENOMEM; + int rc; + + rc = skb_cow_head(skb, 0); + if (rc < 0) + return rc; vhdr = (struct vlan_ethhdr *)skb->data; vhdr->h_vlan_TCI = htons(tx_flags >> I40E_TX_FLAGS_VLAN_SHIFT); @@ -1318,22 +1745,20 @@ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; + struct ipv6hdr *ipv6h; struct tcphdr *tcph; struct iphdr *iph; u32 l4len; int err; - struct ipv6hdr *ipv6h; if (!skb_is_gso(skb)) return 0; - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } + err = skb_cow_head(skb, 0); + if (err < 0) + return err; - if (protocol == __constant_htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); iph->tot_len = 0; @@ -1359,10 +1784,50 @@ cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = skb_shinfo(skb)->gso_size; - *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) - | ((u64)cd_tso_len - << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) - | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((u64)cd_tso_len << + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + return 1; +} + +/** + * i40e_tsyn - set up the tsyn context descriptor + * @tx_ring: ptr to the ring to send + * @skb: ptr to the skb we're sending + * @tx_flags: the collected send information + * + * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen + **/ +static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u64 *cd_type_cmd_tso_mss) +{ + struct i40e_pf *pf; + + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return 0; + + /* Tx timestamps cannot be sampled when doing TSO */ + if (tx_flags & I40E_TX_FLAGS_TSO) + return 0; + + /* only timestamp the outbound packet if the user has requested it and + * we are not already transmitting a packet to be timestamped + */ + pf = i40e_netdev_to_pf(tx_ring->netdev); + if (pf->ptp_tx && !pf->ptp_tx_skb) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + pf->ptp_tx_skb = skb_get(skb); + } else { + return 0; + } + + *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << + I40E_TXD_CTX_QW1_CMD_SHIFT; + + pf->ptp_tx_start = jiffies; + schedule_work(&pf->ptp_tx_work); + return 1; } @@ -1490,7 +1955,8 @@ struct i40e_tx_context_desc *context_desc; int i = tx_ring->next_to_use; - if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) return; /* grab the next descriptor */ @@ -1601,9 +2067,23 @@ tx_bi = &tx_ring->tx_bi[i]; } - tx_desc->cmd_type_offset_bsz = - build_ctob(td_cmd, td_offset, size, td_tag) | - cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ +#define WB_STRIDE 0x3 + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), @@ -1660,6 +2140,7 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ smp_mb(); /* Check again in a case another CPU has just made room available. */ @@ -1705,7 +2186,7 @@ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 2 desc gap to keep tail from touching head, + * + 4 desc gap to avoid the cache line where head is, * + 1 desc for context descriptor, * otherwise try next time */ @@ -1716,7 +2197,7 @@ count += skb_shinfo(skb)->nr_frags; #endif count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 3)) { + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return 0; } @@ -1741,6 +2222,7 @@ __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; + int tsyn; int tso; if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; @@ -1756,9 +2238,9 @@ first = &tx_ring->tx_bi[tx_ring->next_to_use]; /* setup IPv4/IPv6 offloads */ - if (protocol == __constant_htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) tx_flags |= I40E_TX_FLAGS_IPV4; - else if (protocol == __constant_htons(ETH_P_IPV6)) + else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, @@ -1771,6 +2253,11 @@ skb_tx_timestamp(skb); + tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); + + if (tsyn) + tx_flags |= I40E_TX_FLAGS_TSYN; + /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -25,11 +24,13 @@ * ******************************************************************************/ +#ifndef _I40E_TXRX_H_ +#define _I40E_TXRX_H_ + /* Interrupt Throttling and Rate Limiting (storm control) Goodies */ -#define I40E_MAX_ITR 0x07FF -#define I40E_MIN_ITR 0x0001 -#define I40E_ITR_USEC_RESOLUTION 2 +#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ +#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ #define I40E_MAX_IRATE 0x03F #define I40E_MIN_IRATE 0x001 #define I40E_IRATE_USEC_RESOLUTION 4 @@ -49,10 +50,43 @@ #define I40E_QUEUE_END_OF_LIST 0x7FF -#define I40E_ITR_NONE 3 -#define I40E_RX_ITR 0 -#define I40E_TX_ITR 1 -#define I40E_PE_ITR 2 +/* this enum matches hardware bits and is meant to be used by DYN_CTLN + * registers and QINT registers or more generally anywhere in the manual + * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any + * register but instead is a special value meaning "don't update" ITR0/1/2. + */ +enum i40e_dyn_idx_t { + I40E_IDX_ITR0 = 0, + I40E_IDX_ITR1 = 1, + I40E_IDX_ITR2 = 2, + I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +}; + +/* these are indexes into ITRN registers */ +#define I40E_RX_ITR I40E_IDX_ITR0 +#define I40E_TX_ITR I40E_IDX_ITR1 +#define I40E_PE_ITR I40E_IDX_ITR2 + +/* Supported RSS offloads */ +#define I40E_DEFAULT_RSS_HENA ( \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ #define I40E_RXBUFFER_2048 2048 @@ -102,6 +136,7 @@ #define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FSO (u32)(1 << 7) +#define I40E_TX_FLAGS_TSYN (u32)(1 << 8) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -139,8 +174,8 @@ struct i40e_rx_queue_stats { u64 non_eop_descs; - u64 alloc_rx_page_failed; - u64 alloc_rx_buff_failed; + u64 alloc_page_failed; + u64 alloc_buff_failed; }; enum i40e_ring_state_t { @@ -214,6 +249,8 @@ u8 atr_sample_rate; u8 atr_count; + unsigned long last_rx_timestamp; + bool ring_active; /* is ring online or not */ /* stats structs */ @@ -262,3 +299,4 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring); void i40e_free_rx_resources(struct i40e_ring *rx_ring); int i40e_napi_poll(struct napi_struct *napi, int budget); +#endif /* _I40E_TXRX_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_type.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -36,38 +35,31 @@ #include "i40e_lan_hmc.h" /* Device IDs */ -#define I40E_SFP_XL710_DEVICE_ID 0x1572 -#define I40E_SFP_X710_DEVICE_ID 0x1573 -#define I40E_QEMU_DEVICE_ID 0x1574 -#define I40E_KX_A_DEVICE_ID 0x157F -#define I40E_KX_B_DEVICE_ID 0x1580 -#define I40E_KX_C_DEVICE_ID 0x1581 -#define I40E_KX_D_DEVICE_ID 0x1582 -#define I40E_QSFP_A_DEVICE_ID 0x1583 -#define I40E_QSFP_B_DEVICE_ID 0x1584 -#define I40E_QSFP_C_DEVICE_ID 0x1585 -#define I40E_VF_DEVICE_ID 0x154C -#define I40E_VF_HV_DEVICE_ID 0x1571 - -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0000 +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_SFP_X710 0x1573 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_KX_D 0x1582 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 3 #define I40E_MAX_CHAINED_RX_BUFFERS 5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 /* Max default timeout in ms, */ #define I40E_MAX_NVM_TIMEOUT 18000 -/* Check whether address is multicast. This is little-endian specific check.*/ -#define I40E_IS_MULTICAST(address) \ - (bool)(((u8 *)(address))[0] & ((u8)0x01)) - -/* Check whether an address is broadcast. */ -#define I40E_IS_BROADCAST(address) \ - ((((u8 *)(address))[0] == ((u8)0xff)) && \ - (((u8 *)(address))[1] == ((u8)0xff))) - /* Switch from mc to the 2usec global time (this is the GTIME resolution) */ #define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) @@ -75,8 +67,6 @@ struct i40e_hw; typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); -#define I40E_ETH_LENGTH_OF_ADDRESS 6 - /* Data type manipulation macros. */ #define I40E_DESC_UNUSED(R) \ @@ -85,9 +75,10 @@ /* bitfields for Tx queue mapping in QTX_CTL */ #define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 #define I40E_QTX_CTL_PF_QUEUE 0x2 -/* debug masks */ +/* debug masks - set these bits in hw->debug_mask to control output */ enum i40e_debug_mask { I40E_DEBUG_INIT = 0x00000001, I40E_DEBUG_RELEASE = 0x00000002, @@ -100,11 +91,12 @@ I40E_DEBUG_FLOW = 0x00000200, I40E_DEBUG_DCB = 0x00000400, I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, - I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */ + I40E_DEBUG_AQ_MESSAGE = 0x01000000, I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, - I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */ + I40E_DEBUG_AQ_COMMAND = 0x06000000, I40E_DEBUG_AQ = 0x0F000000, I40E_DEBUG_USER = 0xF0000000, @@ -134,6 +126,7 @@ I40E_MEDIA_TYPE_BASET, I40E_MEDIA_TYPE_BACKPLANE, I40E_MEDIA_TYPE_CX4, + I40E_MEDIA_TYPE_DA, I40E_MEDIA_TYPE_VIRTUAL }; @@ -171,6 +164,7 @@ u8 link_info; u8 an_info; u8 ext_info; + u8 loopback; /* is Link Status Event notification to SW enabled */ bool lse_enable; }; @@ -236,9 +230,9 @@ struct i40e_mac_info { enum i40e_mac_type type; - u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; - u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS]; - u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; u16 max_fcoeq; }; @@ -465,6 +459,10 @@ union { __le32 rss; /* RSS Hash */ __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; } hi_dword; } qword0; struct { @@ -500,18 +498,26 @@ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, - I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */ + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, I40E_RX_DESC_STATUS_PIF_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ - I40E_RX_DESC_STATUS_LPBK_SHIFT = 14 + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 }; #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \ +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ + I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + enum i40e_rx_desc_fltstat_values { I40E_RX_DESC_FLTSTAT_NO_DATA = 0, I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ @@ -547,28 +553,32 @@ /* Packet type non-ip values */ enum i40e_rx_l2_ptype { - I40E_RX_PTYPE_L2_RESERVED = 0, - I40E_RX_PTYPE_L2_MAC_PAY2 = 1, - I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - I40E_RX_PTYPE_L2_FIP_PAY2 = 3, - I40E_RX_PTYPE_L2_OUI_PAY2 = 4, - I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, - I40E_RX_PTYPE_L2_ECP_PAY2 = 7, - I40E_RX_PTYPE_L2_EVB_PAY2 = 8, - I40E_RX_PTYPE_L2_QCN_PAY2 = 9, - I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, - I40E_RX_PTYPE_L2_ARP = 11, - I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, - I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21 + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 }; struct i40e_rx_ptype_decoded { @@ -693,7 +703,7 @@ enum i40e_rx_prog_status_desc_error_bits { /* Note: These are predefined bit offsets */ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 }; @@ -852,10 +862,7 @@ /* Packet Classifier Types for filters */ enum i40e_filter_pctype { - /* Note: Value 0-25 are reserved for future use */ - I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26, - I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27, - I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28, + /* Note: Values 0-28 are reserved for future use */ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, @@ -864,8 +871,7 @@ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Value 37 is reserved for future use */ - I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38, + /* Note: Values 37-38 are reserved for future use */ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, @@ -877,7 +883,8 @@ /* Note: Value 47 is reserved for future use */ I40E_FILTER_PCTYPE_FCOE_OX = 48, I40E_FILTER_PCTYPE_FCOE_RX = 49, - /* Note: Value 50-62 are reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, }; @@ -1008,12 +1015,18 @@ u64 tx_size_big; /* ptc9522 */ u64 mac_short_packet_dropped; /* mspdc */ u64 checksum_error; /* xec */ + /* EEE LPI */ + bool tx_lpi_status; + bool rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ }; /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_EMP_MODULE_PTR 0x0F #define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D #define I40E_SR_NVM_EETRACK_HI 0x2E @@ -1138,17 +1151,4 @@ I40E_RESET_GLOBR = 2, I40E_RESET_EMPR = 3, }; - -/* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0xF -struct i40e_lldp_variables { - u16 length; - u16 adminstatus; - u16 msgfasttx; - u16 msgtxinterval; - u16 txparams; - u16 timers; - u16 crc8; -}; - #endif /* _I40E_TYPE_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -142,7 +141,7 @@ u16 num_queue_pairs; enum i40e_vsi_type vsi_type; u16 qset_handle; - u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u8 default_mac_addr[ETH_ALEN]; }; /* VF offload flags */ #define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 @@ -265,7 +264,7 @@ */ struct i40e_virtchnl_ether_addr { - u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u8 addr[ETH_ALEN]; u8 pad[2]; }; --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -102,130 +101,6 @@ } /** - * i40e_ctrl_vsi_tx_queue - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct - * @vsi_queue_id: vsi relative queue index - * @ctrl: control flags - * - * enable/disable/enable check/disable check - **/ -static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, - u16 vsi_queue_id, - enum i40e_queue_ctrl ctrl) -{ - struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - bool writeback = false; - u16 pf_queue_id; - int ret = 0; - u32 reg; - - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); - reg = rd32(hw, I40E_QTX_ENA(pf_queue_id)); - - switch (ctrl) { - case I40E_QUEUE_CTRL_ENABLE: - reg |= I40E_QTX_ENA_QENA_REQ_MASK; - writeback = true; - break; - case I40E_QUEUE_CTRL_ENABLECHECK: - ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM; - break; - case I40E_QUEUE_CTRL_DISABLE: - reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; - writeback = true; - break; - case I40E_QUEUE_CTRL_DISABLECHECK: - ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0; - break; - case I40E_QUEUE_CTRL_FASTDISABLE: - reg |= I40E_QTX_ENA_FAST_QDIS_MASK; - writeback = true; - break; - case I40E_QUEUE_CTRL_FASTDISABLECHECK: - ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0; - if (!ret) { - reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK; - writeback = true; - } - break; - default: - ret = -EINVAL; - break; - } - - if (writeback) { - wr32(hw, I40E_QTX_ENA(pf_queue_id), reg); - i40e_flush(hw); - } - - return ret; -} - -/** - * i40e_ctrl_vsi_rx_queue - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct - * @vsi_queue_id: vsi relative queue index - * @ctrl: control flags - * - * enable/disable/enable check/disable check - **/ -static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, - u16 vsi_queue_id, - enum i40e_queue_ctrl ctrl) -{ - struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - bool writeback = false; - u16 pf_queue_id; - int ret = 0; - u32 reg; - - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); - reg = rd32(hw, I40E_QRX_ENA(pf_queue_id)); - - switch (ctrl) { - case I40E_QUEUE_CTRL_ENABLE: - reg |= I40E_QRX_ENA_QENA_REQ_MASK; - writeback = true; - break; - case I40E_QUEUE_CTRL_ENABLECHECK: - ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM; - break; - case I40E_QUEUE_CTRL_DISABLE: - reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; - writeback = true; - break; - case I40E_QUEUE_CTRL_DISABLECHECK: - ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0; - break; - case I40E_QUEUE_CTRL_FASTDISABLE: - reg |= I40E_QRX_ENA_FAST_QDIS_MASK; - writeback = true; - break; - case I40E_QUEUE_CTRL_FASTDISABLECHECK: - ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0; - if (!ret) { - reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK; - writeback = true; - } - break; - default: - ret = -EINVAL; - break; - } - - if (writeback) { - wr32(hw, I40E_QRX_ENA(pf_queue_id), reg); - i40e_flush(hw); - } - - return ret; -} - -/** * i40e_config_irq_link_list * @vf: pointer to the vf info * @vsi_idx: index of VSI in PF struct @@ -251,8 +126,8 @@ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); else reg_idx = I40E_VPINT_LNKLSTN( - (pf->hw.func_caps.num_msix_vectors_vf - * vf->vf_id) + (vector_id - 1)); + ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + + (vector_id - 1)); if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { /* Special case - No queues mapped on this vector */ @@ -260,23 +135,17 @@ goto irq_list_done; } tempmap = vecmap->rxq_map; - vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (vsi_queue_id < I40E_MAX_VSI_QP) { + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { linklistmap |= (1 << (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id)); - vsi_queue_id = - find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1); } tempmap = vecmap->txq_map; - vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (vsi_queue_id < I40E_MAX_VSI_QP) { + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { linklistmap |= (1 << (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id + 1)); - vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - vsi_queue_id + 1); } next_q = find_first_bit(&linklistmap, @@ -307,7 +176,8 @@ (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES), next_q + 1); - if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { + if (next_q < + (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, @@ -360,6 +230,9 @@ tx_ctx.qlen = info->ring_len; tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); tx_ctx.rdylist_act = 0; + tx_ctx.head_wb_ena = 1; + tx_ctx.head_wb_addr = info->dma_ring_addr + + (info->ring_len * sizeof(struct i40e_tx_desc)); /* clear the context in the HMC */ ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); @@ -499,7 +372,6 @@ { struct i40e_mac_filter *f = NULL; struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; struct i40e_vsi *vsi; int ret = 0; @@ -513,171 +385,42 @@ goto error_alloc_vsi_res; } if (type == I40E_VSI_SRIOV) { + u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; vf->lan_vsi_index = vsi->idx; vf->lan_vsi_id = vsi->id; dev_info(&pf->pdev->dev, - "LAN VSI index %d, VSI id %d\n", - vsi->idx, vsi->id); + "VF %d assigned LAN VSI index %d, VSI id %d\n", + vf->vf_id, vsi->idx, vsi->id); + /* If the port VLAN has been configured and then the + * VF driver was removed then the VSI port VLAN + * configuration was destroyed. Check if there is + * a port VLAN and restore the VSI configuration if + * needed. + */ + if (vf->port_vlan_id) + i40e_vsi_add_pvid(vsi, vf->port_vlan_id); f = i40e_add_filter(vsi, vf->default_lan_addr.addr, - 0, true, false); - } - if (!f) { - dev_err(&pf->pdev->dev, "Unable to add ucast filter\n"); - ret = -ENOMEM; - goto error_alloc_vsi_res; + vf->port_vlan_id, true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not allocate VF MAC addr\n"); + f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, + true, false); + if (!f) + dev_info(&pf->pdev->dev, + "Could not allocate VF broadcast filter\n"); } /* program mac filter */ ret = i40e_sync_vsi_filters(vsi); - if (ret) { + if (ret) dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); - goto error_alloc_vsi_res; - } - - /* accept bcast pkts. by default */ - ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL); - if (ret) { - dev_err(&pf->pdev->dev, - "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n", - vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status); - ret = -EINVAL; - } error_alloc_vsi_res: return ret; } /** - * i40e_reset_vf - * @vf: pointer to the vf structure - * @flr: VFLR was issued or not - * - * reset the vf - **/ -int i40e_reset_vf(struct i40e_vf *vf, bool flr) -{ - int ret = -ENOENT; - struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - u32 reg, reg_idx, msix_vf; - bool rsd = false; - u16 pf_queue_id; - int i, j; - - /* warn the VF */ - wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS); - - clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); - - /* PF triggers VFR only when VF requests, in case of - * VFLR, HW triggers VFR - */ - if (!flr) { - /* reset vf using VPGEN_VFRTRIG reg */ - reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK; - wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); - i40e_flush(hw); - } - - /* poll VPGEN_VFRSTAT reg to make sure - * that reset is complete - */ - for (i = 0; i < 4; i++) { - /* vf reset requires driver to first reset the - * vf & than poll the status register to make sure - * that the requested op was completed - * successfully - */ - udelay(10); - reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); - if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { - rsd = true; - break; - } - } - - if (!rsd) - dev_err(&pf->pdev->dev, "VF reset check timeout %d\n", - vf->vf_id); - - /* fast disable qps */ - for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { - ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j, - I40E_QUEUE_CTRL_FASTDISABLE); - ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j, - I40E_QUEUE_CTRL_FASTDISABLE); - } - - /* Queue enable/disable requires driver to - * first reset the vf & than poll the status register - * to make sure that the requested op was completed - * successfully - */ - udelay(10); - for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { - ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j, - I40E_QUEUE_CTRL_FASTDISABLECHECK); - if (ret) - dev_info(&pf->pdev->dev, - "Queue control check failed on Tx queue %d of VSI %d VF %d\n", - vf->lan_vsi_index, j, vf->vf_id); - ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j, - I40E_QUEUE_CTRL_FASTDISABLECHECK); - if (ret) - dev_info(&pf->pdev->dev, - "Queue control check failed on Rx queue %d of VSI %d VF %d\n", - vf->lan_vsi_index, j, vf->vf_id); - } - - /* clear the irq settings */ - msix_vf = pf->hw.func_caps.num_msix_vectors_vf; - for (i = 0; i < msix_vf; i++) { - /* format is same for both registers */ - if (0 == i) - reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); - else - reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * - (vf->vf_id)) - + (i - 1)); - reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | - I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); - wr32(hw, reg_idx, reg); - i40e_flush(hw); - } - /* disable interrupts so the VF starts in a known state */ - for (i = 0; i < msix_vf; i++) { - /* format is same for both registers */ - if (0 == i) - reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); - else - reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * - (vf->vf_id)) - + (i - 1)); - wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); - i40e_flush(hw); - } - - /* set the defaults for the rqctl & tqctl registers */ - reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK | - I40E_QINT_RQCTL_NEXTQ_TYPE_MASK); - for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); - wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg); - wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg); - } - - /* clear the reset bit in the VPGEN_VFRTRIG reg */ - reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); - reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; - wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); - /* tell the VF the reset is done */ - wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); - i40e_flush(hw); - - return ret; -} - -/** * i40e_enable_vf_mappings * @vf: pointer to the vf info * @@ -756,6 +499,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + u32 reg_idx, reg; + int i, msix_vf; /* free vsi & disconnect it from the parent uplink */ if (vf->lan_vsi_index) { @@ -763,6 +509,35 @@ vf->lan_vsi_index = 0; vf->lan_vsi_id = 0; } + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + + /* disable interrupts so the VF starts in a known state */ + for (i = 0; i < msix_vf; i++) { + /* format is same for both registers */ + if (0 == i) + reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); + else + reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * + (vf->vf_id)) + + (i - 1)); + wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + i40e_flush(hw); + } + + /* clear the irq settings */ + for (i = 0; i < msix_vf; i++) { + /* format is same for both registers */ + if (0 == i) + reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); + else + reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * + (vf->vf_id)) + + (i - 1)); + reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); + wr32(hw, reg_idx, reg); + i40e_flush(hw); + } /* reset some of the state varibles keeping * track of the resources */ @@ -804,6 +579,111 @@ return ret; } +#define VF_DEVICE_STATUS 0xAA +#define VF_TRANS_PENDING_MASK 0x20 +/** + * i40e_quiesce_vf_pci + * @vf: pointer to the vf structure + * + * Wait for VF PCI transactions to be cleared after reset. Returns -EIO + * if the transactions never clear. + **/ +static int i40e_quiesce_vf_pci(struct i40e_vf *vf) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + int vf_abs_id, i; + u32 reg; + + vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; + + wr32(hw, I40E_PF_PCI_CIAA, + VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); + for (i = 0; i < 100; i++) { + reg = rd32(hw, I40E_PF_PCI_CIAD); + if ((reg & VF_TRANS_PENDING_MASK) == 0) + return 0; + udelay(1); + } + return -EIO; +} + +/** + * i40e_reset_vf + * @vf: pointer to the vf structure + * @flr: VFLR was issued or not + * + * reset the vf + **/ +void i40e_reset_vf(struct i40e_vf *vf, bool flr) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + bool rsd = false; + int i; + u32 reg; + + /* warn the VF */ + clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + + /* In the case of a VFLR, the HW has already reset the VF and we + * just need to clean up, so don't hit the VFRTRIG register. + */ + if (!flr) { + /* reset vf using VPGEN_VFRTRIG reg */ + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + i40e_flush(hw); + } + + if (i40e_quiesce_vf_pci(vf)) + dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", + vf->vf_id); + + /* poll VPGEN_VFRSTAT reg to make sure + * that reset is complete + */ + for (i = 0; i < 100; i++) { + /* vf reset requires driver to first reset the + * vf & than poll the status register to make sure + * that the requested op was completed + * successfully + */ + udelay(10); + reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); + if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { + rsd = true; + break; + } + } + + if (!rsd) + dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", + vf->vf_id); + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); + /* clear the reset bit in the VPGEN_VFRTRIG reg */ + reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); + reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + + /* On initial reset, we won't have any queues */ + if (vf->lan_vsi_index == 0) + goto complete_reset; + + i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); +complete_reset: + /* reallocate vf resources to reset the VSI state */ + i40e_free_vf_res(vf); + i40e_alloc_vf_res(vf); + i40e_enable_vf_mappings(vf); + set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + + /* tell the VF the reset is done */ + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); + i40e_flush(hw); +} + /** * i40e_vfs_are_assigned * @pf: pointer to the pf structure @@ -816,7 +696,7 @@ struct pci_dev *vfdev; /* loop through all the VFs to see if we own any that are assigned */ - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL); + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL); while (vfdev) { /* if we don't own it we don't care */ if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) { @@ -826,12 +706,82 @@ } vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, - I40E_VF_DEVICE_ID, + I40E_DEV_ID_VF, vfdev); } return false; } +#ifdef CONFIG_PCI_IOV + +/** + * i40e_enable_pf_switch_lb + * @pf: pointer to the pf structure + * + * enable switch loop back or die - no point in a return value + **/ +static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get pf vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} +#endif + +/** + * i40e_disable_pf_switch_lb + * @pf: pointer to the pf structure + * + * disable switch loop back or die - no point in a return value + **/ +static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get pf vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} /** * i40e_free_vfs @@ -842,17 +792,20 @@ void i40e_free_vfs(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - int i; + u32 reg_idx, bit_idx; + int i, tmp, vf_id; if (!pf->vf) return; /* Disable interrupt 0 so we don't try to handle the VFLR. */ - wr32(hw, I40E_PFINT_DYN_CTL0, 0); - i40e_flush(hw); + i40e_irq_dynamic_disable_icr0(pf); + mdelay(10); /* let any messages in transit get finished up */ /* free up vf resources */ - for (i = 0; i < pf->num_alloc_vfs; i++) { + tmp = pf->num_alloc_vfs; + pf->num_alloc_vfs = 0; + for (i = 0; i < tmp; i++) { if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) i40e_free_vf_res(&pf->vf[i]); /* disable qp mappings */ @@ -861,20 +814,25 @@ kfree(pf->vf); pf->vf = NULL; - pf->num_alloc_vfs = 0; - if (!i40e_vfs_are_assigned(pf)) + if (!i40e_vfs_are_assigned(pf)) { pci_disable_sriov(pf->pdev); - else + /* Acknowledge VFLR for all VFS. Without this, VFs will fail to + * work correctly when SR-IOV gets re-enabled. + */ + for (vf_id = 0; vf_id < tmp; vf_id++) { + reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); + } + i40e_disable_pf_switch_lb(pf); + } else { dev_warn(&pf->pdev->dev, "unable to disable SR-IOV because VFs are assigned.\n"); + } /* Re-enable interrupt 0. */ - wr32(hw, I40E_PFINT_DYN_CTL0, - I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | - (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); - i40e_flush(hw); + i40e_irq_dynamic_enable_icr0(pf); } #ifdef CONFIG_PCI_IOV @@ -885,21 +843,26 @@ * * allocate vf resources **/ -static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) { struct i40e_vf *vfs; int i, ret = 0; - ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); - if (ret) { - dev_err(&pf->pdev->dev, - "pci_enable_sriov failed with error %d!\n", ret); - pf->num_alloc_vfs = 0; - goto err_iov; - } + /* Disable interrupt 0 so we don't try to handle the VFLR. */ + i40e_irq_dynamic_disable_icr0(pf); + /* Check to see if we're just allocating resources for extant VFs */ + if (pci_num_vf(pf->pdev) != num_alloc_vfs) { + ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to enable SR-IOV, error %d.\n", ret); + pf->num_alloc_vfs = 0; + goto err_iov; + } + } /* allocate memory */ - vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL); + vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL); if (!vfs) { ret = -ENOMEM; goto err_alloc; @@ -913,11 +876,8 @@ /* assign default capabilities */ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); - - ret = i40e_alloc_vf_res(&vfs[i]); - i40e_reset_vf(&vfs[i], true); - if (ret) - break; + /* vf resources get allocated during reset */ + i40e_reset_vf(&vfs[i], false); /* enable vf vplan_qtable mappings */ i40e_enable_vf_mappings(&vfs[i]); @@ -925,10 +885,13 @@ pf->vf = vfs; pf->num_alloc_vfs = num_alloc_vfs; + i40e_enable_pf_switch_lb(pf); err_alloc: if (ret) i40e_free_vfs(pf); err_iov: + /* Re-enable interrupt 0. */ + i40e_irq_dynamic_enable_icr0(pf); return ret; } @@ -1009,6 +972,7 @@ { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; + int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id; i40e_status aq_ret; /* single place to detect unsuccessful return values */ @@ -1028,8 +992,8 @@ vf->num_valid_msgs++; } - aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, - msg, msglen, NULL); + aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval, + msg, msglen, NULL); if (aq_ret) { dev_err(&pf->pdev->dev, "Unable to send the message to VF %d aq_err %d\n", @@ -1144,12 +1108,10 @@ * unlike other virtchnl messages, pf driver * doesn't send the response back to the vf **/ -static int i40e_vc_reset_vf_msg(struct i40e_vf *vf) +static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) { - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) - return -ENOENT; - - return i40e_reset_vf(vf, false); + if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) + i40e_reset_vf(vf, false); } /** @@ -1291,27 +1253,21 @@ /* lookout for the invalid queue index */ tempmap = map->rxq_map; - vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (vsi_queue_id < I40E_MAX_VSI_QP) { + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; } - vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - vsi_queue_id + 1); } tempmap = map->txq_map; - vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (vsi_queue_id < I40E_MAX_VSI_QP) { + for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) { if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; } - vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - vsi_queue_id + 1); } i40e_config_irq_link_list(vf, vsi_id, map); @@ -1337,8 +1293,6 @@ struct i40e_pf *pf = vf->pf; u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; - unsigned long tempmap; - u16 queue_id; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; @@ -1354,66 +1308,8 @@ aq_ret = I40E_ERR_PARAM; goto error_param; } - - tempmap = vqs->rx_queues; - queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (queue_id < I40E_MAX_VSI_QP) { - if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, - I40E_QUEUE_CTRL_ENABLE); - - queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - queue_id + 1); - } - - tempmap = vqs->tx_queues; - queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (queue_id < I40E_MAX_VSI_QP) { - if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, - I40E_QUEUE_CTRL_ENABLE); - - queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - queue_id + 1); - } - - /* Poll the status register to make sure that the - * requested op was completed successfully - */ - udelay(10); - - tempmap = vqs->rx_queues; - queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (queue_id < I40E_MAX_VSI_QP) { - if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, - I40E_QUEUE_CTRL_ENABLECHECK)) { - dev_err(&pf->pdev->dev, - "Queue control check failed on RX queue %d of VSI %d VF %d\n", - queue_id, vsi_id, vf->vf_id); - } - queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - queue_id + 1); - } - - tempmap = vqs->tx_queues; - queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (queue_id < I40E_MAX_VSI_QP) { - if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, - I40E_QUEUE_CTRL_ENABLECHECK)) { - dev_err(&pf->pdev->dev, - "Queue control check failed on TX queue %d of VSI %d VF %d\n", - queue_id, vsi_id, vf->vf_id); - } - queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - queue_id + 1); - } - + if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) + aq_ret = I40E_ERR_TIMEOUT; error_param: /* send the response to the vf */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, @@ -1436,8 +1332,6 @@ struct i40e_pf *pf = vf->pf; u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; - unsigned long tempmap; - u16 queue_id; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; @@ -1453,65 +1347,8 @@ aq_ret = I40E_ERR_PARAM; goto error_param; } - - tempmap = vqs->rx_queues; - queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (queue_id < I40E_MAX_VSI_QP) { - if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, - I40E_QUEUE_CTRL_DISABLE); - - queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - queue_id + 1); - } - - tempmap = vqs->tx_queues; - queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (queue_id < I40E_MAX_VSI_QP) { - if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, - I40E_QUEUE_CTRL_DISABLE); - - queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - queue_id + 1); - } - - /* Poll the status register to make sure that the - * requested op was completed successfully - */ - udelay(10); - - tempmap = vqs->rx_queues; - queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (queue_id < I40E_MAX_VSI_QP) { - if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, - I40E_QUEUE_CTRL_DISABLECHECK)) { - dev_err(&pf->pdev->dev, - "Queue control check failed on RX queue %d of VSI %d VF %d\n", - queue_id, vsi_id, vf->vf_id); - } - queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - queue_id + 1); - } - - tempmap = vqs->tx_queues; - queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); - while (queue_id < I40E_MAX_VSI_QP) { - if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, - I40E_QUEUE_CTRL_DISABLECHECK)) { - dev_err(&pf->pdev->dev, - "Queue control check failed on TX queue %d of VSI %d VF %d\n", - queue_id, vsi_id, vf->vf_id); - } - queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, - queue_id + 1); - } + if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) + aq_ret = I40E_ERR_TIMEOUT; error_param: /* send the response to the vf */ @@ -1554,7 +1391,7 @@ goto error_param; } i40e_update_eth_stats(vsi); - memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats)); + stats = vsi->eth_stats; error_param: /* send the response back to the vf */ @@ -1563,6 +1400,40 @@ } /** + * i40e_check_vf_permission + * @vf: pointer to the vf info + * @macaddr: pointer to the MAC Address being checked + * + * Check if the VF has permission to add or delete unicast MAC address + * filters and return error code -EPERM if not. Then check if the + * address filter requested is broadcast or zero and if so return + * an invalid MAC address error code. + **/ +static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) +{ + struct i40e_pf *pf = vf->pf; + int ret = 0; + + if (is_broadcast_ether_addr(macaddr) || + is_zero_ether_addr(macaddr)) { + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); + ret = I40E_ERR_INVALID_MAC_ADDR; + } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && + !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { + /* If the host VMM administrator has set the VF MAC address + * administratively via the ndo_set_vf_mac command then deny + * permission to the VF to add or delete unicast MAC addresses. + * The VF may request to set the MAC address filter already + * assigned to it so do not return an error in that case. + */ + dev_err(&pf->pdev->dev, + "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); + ret = -EPERM; + } + return ret; +} + +/** * i40e_vc_add_mac_addr_msg * @vf: pointer to the vf info * @msg: pointer to the msg buffer @@ -1577,24 +1448,20 @@ struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = al->vsi_id; - i40e_status aq_ret = 0; + i40e_status ret = 0; int i; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { - aq_ret = I40E_ERR_PARAM; + ret = I40E_ERR_PARAM; goto error_param; } for (i = 0; i < al->num_elements; i++) { - if (is_broadcast_ether_addr(al->list[i].addr) || - is_zero_ether_addr(al->list[i].addr)) { - dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n", - al->list[i].addr); - aq_ret = I40E_ERR_PARAM; + ret = i40e_check_vf_permission(vf, al->list[i].addr); + if (ret) goto error_param; - } } vsi = pf->vsi[vsi_id]; @@ -1603,7 +1470,7 @@ struct i40e_mac_filter *f; f = i40e_find_mac(vsi, al->list[i].addr, true, false); - if (f) { + if (!f) { if (i40e_is_vsi_in_vlan(vsi)) f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, true, false); @@ -1615,7 +1482,7 @@ if (!f) { dev_err(&pf->pdev->dev, "Unable to add VF MAC filter\n"); - aq_ret = I40E_ERR_PARAM; + ret = I40E_ERR_PARAM; goto error_param; } } @@ -1627,7 +1494,7 @@ error_param: /* send the response to the vf */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - aq_ret); + ret); } /** @@ -1645,15 +1512,25 @@ struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = al->vsi_id; - i40e_status aq_ret = 0; + i40e_status ret = 0; int i; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { - aq_ret = I40E_ERR_PARAM; + ret = I40E_ERR_PARAM; goto error_param; } + + for (i = 0; i < al->num_elements; i++) { + if (is_broadcast_ether_addr(al->list[i].addr) || + is_zero_ether_addr(al->list[i].addr)) { + dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", + al->list[i].addr); + ret = I40E_ERR_INVALID_MAC_ADDR; + goto error_param; + } + } vsi = pf->vsi[vsi_id]; /* delete addresses from the list */ @@ -1668,7 +1545,7 @@ error_param: /* send the response to the vf */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - aq_ret); + ret); } /** @@ -1777,30 +1654,6 @@ } /** - * i40e_vc_fcoe_msg - * @vf: pointer to the vf info - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * called from the vf for the fcoe msgs - **/ -static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) -{ - i40e_status aq_ret = 0; - - if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) { - aq_ret = I40E_ERR_PARAM; - goto error_param; - } - aq_ret = I40E_ERR_NOT_IMPLEMENTED; - -error_param: - /* send the response to the vf */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret); -} - -/** * i40e_vc_validate_vf_msg * @vf: pointer to the vf info * @msg: pointer to the msg buffer @@ -1920,19 +1773,24 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { - struct i40e_vf *vf = &(pf->vf[vf_id]); struct i40e_hw *hw = &pf->hw; + unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; + struct i40e_vf *vf; int ret; pf->vf_aq_requests++; + if (local_vf_id >= pf->num_alloc_vfs) + return -EINVAL; + vf = &(pf->vf[local_vf_id]); /* perform basic checks on the msg */ ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); if (ret) { - dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id); + dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", + local_vf_id, v_opcode, msglen); return ret; } - wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE); + switch (v_opcode) { case I40E_VIRTCHNL_OP_VERSION: ret = i40e_vc_get_version_msg(vf); @@ -1941,7 +1799,8 @@ ret = i40e_vc_get_vf_resources_msg(vf); break; case I40E_VIRTCHNL_OP_RESET_VF: - ret = i40e_vc_reset_vf_msg(vf); + i40e_vc_reset_vf_msg(vf); + ret = 0; break; case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); @@ -1973,13 +1832,10 @@ case I40E_VIRTCHNL_OP_GET_STATS: ret = i40e_vc_get_stats_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_FCOE: - ret = i40e_vc_fcoe_msg(vf, msg, msglen); - break; case I40E_VIRTCHNL_OP_UNKNOWN: default: - dev_err(&pf->pdev->dev, - "Unsupported opcode %d from vf %d\n", v_opcode, vf_id); + dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", + v_opcode, local_vf_id); ret = i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_NOT_IMPLEMENTED); break; @@ -2015,19 +1871,8 @@ /* clear the bit in GLGEN_VFLRSTAT */ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); - if (i40e_reset_vf(vf, true)) - dev_err(&pf->pdev->dev, - "Unable to reset the VF %d\n", vf_id); - /* free up vf resources to destroy vsi state */ - i40e_free_vf_res(vf); - - /* allocate new vf resources with the default state */ - if (i40e_alloc_vf_res(vf)) - dev_err(&pf->pdev->dev, - "Unable to allocate VF resources %d\n", - vf_id); - - i40e_enable_vf_mappings(vf); + if (!test_bit(__I40E_DOWN, &pf->state)) + i40e_reset_vf(vf, true); } } @@ -2078,15 +1923,28 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) { struct i40e_virtchnl_pf_event pfe; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf = pf->vf; + struct i40e_link_status *ls = &pf->hw.phy.link_info; + int i; pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; - pfe.event_data.link_event.link_status = - pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; - pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; - - i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, - (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); + for (i = 0; i < pf->num_alloc_vfs; i++) { + if (vf->link_forced) { + pfe.event_data.link_event.link_status = vf->link_up; + pfe.event_data.link_event.link_speed = + (vf->link_up ? I40E_LINK_SPEED_40GB : 0); + } else { + pfe.event_data.link_event.link_status = + ls->link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = ls->link_speed; + } + i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), + NULL); + vf++; + } } /** @@ -2183,6 +2041,7 @@ goto error_param; } memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); + vf->pf_set_mac = true; dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); ret = 0; @@ -2229,6 +2088,20 @@ goto error_pvid; } + if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) + dev_err(&pf->pdev->dev, + "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", + vf_id); + + /* Check for condition where there was already a port VLAN ID + * filter set and now it is being deleted by setting it to zero. + * Before deleting all the old VLAN filters we must add new ones + * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our + * MAC addresses deleted. + */ + if (!(vlan_id || qos) && vsi->info.pvid) + ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); + if (vsi->info.pvid) { /* kill old VLAN */ ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & @@ -2243,7 +2116,7 @@ ret = i40e_vsi_add_pvid(vsi, vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)); else - i40e_vlan_stripping_disable(vsi); + i40e_vsi_remove_pvid(vsi); if (vlan_id) { dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", @@ -2257,12 +2130,20 @@ vsi->back->hw.aq.asq_last_status); goto error_pvid; } + /* Kill non-vlan MAC filters - ignore error return since + * there might not be any non-vlan MAC filters. + */ + i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY); } if (ret) { dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); goto error_pvid; } + /* The Port VLAN needs to be saved across resets the same as the + * default LAN MAC address. + */ + vf->port_vlan_id = le16_to_cpu(vsi->info.pvid); ret = 0; error_pvid: @@ -2294,7 +2175,6 @@ int vf_id, struct ifla_vf_info *ivi) { struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_mac_filter *f, *ftmp; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_vf *vf; @@ -2318,11 +2198,7 @@ ivi->vf = vf_id; - /* first entry of the list is the default ethernet address */ - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS); - break; - } + memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); ivi->tx_rate = 0; ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK; @@ -2333,3 +2209,64 @@ error_param: return ret; } + +/** + * i40e_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: vf identifier + * @link: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_virtchnl_pf_event pfe; + struct i40e_hw *hw = &pf->hw; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + ret = -EINVAL; + goto error_out; + } + + vf = &pf->vf[vf_id]; + + pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + pfe.event_data.link_event.link_status = + pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; + pfe.event_data.link_event.link_speed = + pf->hw.phy.link_info.link_speed; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + pfe.event_data.link_event.link_status = true; + pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + pfe.event_data.link_event.link_status = false; + pfe.event_data.link_event.link_speed = 0; + break; + default: + ret = -EINVAL; + goto error_out; + } + /* Notify the VF of its new link state */ + i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, + 0, (u8 *)&pfe, sizeof(pfe), NULL); + +error_out: + return ret; +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . * * The full GNU General Public License is included in this distribution in * the file called "COPYING". @@ -82,6 +81,8 @@ struct i40e_virtchnl_ether_addr default_lan_addr; struct i40e_virtchnl_ether_addr default_fcoe_addr; + u16 port_vlan_id; + bool pf_set_mac; /* The VMM admin set the VF MAC address */ /* VSI indices - actual VSI pointers are maintained in the PF structure * When assigned, these will be non-zero, because VSI 0 is always @@ -97,14 +98,17 @@ unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ + bool link_forced; + bool link_up; /* only valid if vf link is forced */ }; void i40e_free_vfs(struct i40e_pf *pf); int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); int i40e_vc_process_vflr_event(struct i40e_pf *pf); -int i40e_reset_vf(struct i40e_vf *vf, bool flr); +void i40e_reset_vf(struct i40e_vf *vf, bool flr); void i40e_vc_notify_vf_reset(struct i40e_vf *vf); /* vf configuration related iplink handlers */ @@ -114,6 +118,8 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); + void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/Makefile +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/Makefile @@ -0,0 +1,33 @@ +################################################################################ +# +# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver +# Copyright(c) 2013 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +## Makefile for the Intel(R) 40GbE VF driver +# +# + +obj-$(CONFIG_I40EVF) += i40evf.o + +i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ + i40e_txrx.o i40e_common.o i40e_adminq.o + --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -0,0 +1,927 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_status.h" +#include "i40e_type.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" + +/** + * i40e_adminq_init_regs - Initialize AdminQ registers + * @hw: pointer to the hardware structure + * + * This assumes the alloc_asq and alloc_arq functions have already been called + **/ +static void i40e_adminq_init_regs(struct i40e_hw *hw) +{ + /* set head and tail registers in our local struct */ + if (hw->mac.type == I40E_MAC_VF) { + hw->aq.asq.tail = I40E_VF_ATQT1; + hw->aq.asq.head = I40E_VF_ATQH1; + hw->aq.asq.len = I40E_VF_ATQLEN1; + hw->aq.arq.tail = I40E_VF_ARQT1; + hw->aq.arq.head = I40E_VF_ARQH1; + hw->aq.arq.len = I40E_VF_ARQLEN1; + } else { + hw->aq.asq.tail = I40E_PF_ATQT; + hw->aq.asq.head = I40E_PF_ATQH; + hw->aq.asq.len = I40E_PF_ATQLEN; + hw->aq.arq.tail = I40E_PF_ARQT; + hw->aq.arq.head = I40E_PF_ARQH; + hw->aq.arq.len = I40E_PF_ARQLEN; + } +} + +/** + * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, + i40e_mem_atq_ring, + (hw->aq.num_asq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + return ret_code; + + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, + (hw->aq.num_asq_entries * + sizeof(struct i40e_asq_cmd_details))); + if (ret_code) { + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + return ret_code; + } + + return ret_code; +} + +/** + * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw) +{ + i40e_status ret_code; + + ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, + i40e_mem_arq_ring, + (hw->aq.num_arq_entries * + sizeof(struct i40e_aq_desc)), + I40E_ADMINQ_DESC_ALIGNMENT); + + return ret_code; +} + +/** + * i40e_free_adminq_asq - Free Admin Queue send rings + * @hw: pointer to the hardware structure + * + * This assumes the posted send buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_asq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); +} + +/** + * i40e_free_adminq_arq - Free Admin Queue receive rings + * @hw: pointer to the hardware structure + * + * This assumes the posted receive buffers have already been cleaned + * and de-allocated + **/ +static void i40e_free_adminq_arq(struct i40e_hw *hw) +{ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); +} + +/** + * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + int i; + + /* We'll be allocating the buffer info memory first, then we can + * allocate the mapped buffers for the event processing + */ + + /* buffer_info structures do not need alignment */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, + (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_arq_bufs; + hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_arq_entries; i++) { + bi = &hw->aq.arq.r.arq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_arq_buf, + hw->aq.arq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_arq_bufs; + + /* now configure the descriptors for use */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, i); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->opcode = 0; + /* This is in accordance with Admin queue design, there is no + * register for buffer size configuration + */ + desc->datalen = cpu_to_le16((u16)bi->size); + desc->retval = 0; + desc->cookie_high = 0; + desc->cookie_low = 0; + desc->params.external.addr_high = + cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = + cpu_to_le32(lower_32_bits(bi->pa)); + desc->params.external.param0 = 0; + desc->params.external.param1 = 0; + } + +alloc_arq_bufs: + return ret_code; + +unwind_alloc_arq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); + + return ret_code; +} + +/** + * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue + * @hw: pointer to the hardware structure + **/ +static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw) +{ + i40e_status ret_code; + struct i40e_dma_mem *bi; + int i; + + /* No mapped memory needed yet, just the buffer info structures */ + ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, + (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem))); + if (ret_code) + goto alloc_asq_bufs; + hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va; + + /* allocate the mapped buffers */ + for (i = 0; i < hw->aq.num_asq_entries; i++) { + bi = &hw->aq.asq.r.asq_bi[i]; + ret_code = i40e_allocate_dma_mem(hw, bi, + i40e_mem_asq_buf, + hw->aq.asq_buf_size, + I40E_ADMINQ_DESC_ALIGNMENT); + if (ret_code) + goto unwind_alloc_asq_bufs; + } +alloc_asq_bufs: + return ret_code; + +unwind_alloc_asq_bufs: + /* don't try to free the one that failed... */ + i--; + for (; i >= 0; i--) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); + + return ret_code; +} + +/** + * i40e_free_arq_bufs - Free receive queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_arq_bufs(struct i40e_hw *hw) +{ + int i; + + /* free descriptors */ + for (i = 0; i < hw->aq.num_arq_entries; i++) + i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.arq.dma_head); +} + +/** + * i40e_free_asq_bufs - Free send queue buffer info elements + * @hw: pointer to the hardware structure + **/ +static void i40e_free_asq_bufs(struct i40e_hw *hw) +{ + int i; + + /* only unmap if the address is non-NULL */ + for (i = 0; i < hw->aq.num_asq_entries; i++) + if (hw->aq.asq.r.asq_bi[i].pa) + i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); + + /* free the buffer info list */ + i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + + /* free the descriptor memory */ + i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + + /* free the dma header */ + i40e_free_virt_mem(hw, &hw->aq.asq.dma_head); +} + +/** + * i40e_config_asq_regs - configure ASQ registers + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the transmit queue + **/ +static void i40e_config_asq_regs(struct i40e_hw *hw) +{ + if (hw->mac.type == I40E_MAC_VF) { + /* configure the transmit queue */ + wr32(hw, I40E_VF_ATQBAH1, + upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_VF_ATQBAL1, + lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries | + I40E_VF_ATQLEN1_ATQENABLE_MASK)); + } else { + /* configure the transmit queue */ + wr32(hw, I40E_PF_ATQBAH, + upper_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQBAL, + lower_32_bits(hw->aq.asq.desc_buf.pa)); + wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries | + I40E_PF_ATQLEN_ATQENABLE_MASK)); + } +} + +/** + * i40e_config_arq_regs - ARQ register configuration + * @hw: pointer to the hardware structure + * + * Configure base address and length registers for the receive (event queue) + **/ +static void i40e_config_arq_regs(struct i40e_hw *hw) +{ + if (hw->mac.type == I40E_MAC_VF) { + /* configure the receive queue */ + wr32(hw, I40E_VF_ARQBAH1, + upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_VF_ARQBAL1, + lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries | + I40E_VF_ARQLEN1_ARQENABLE_MASK)); + } else { + /* configure the receive queue */ + wr32(hw, I40E_PF_ARQBAH, + upper_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQBAL, + lower_32_bits(hw->aq.arq.desc_buf.pa)); + wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries | + I40E_PF_ARQLEN_ARQENABLE_MASK)); + } + + /* Update tail in the HW to post pre-allocated buffers */ + wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); +} + +/** + * i40e_init_asq - main initialization routine for ASQ + * @hw: pointer to the hardware structure + * + * This is the main initialization routine for the Admin Send Queue + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_asq_entries == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + hw->aq.asq.count = hw->aq.num_asq_entries; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_asq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_asq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + i40e_config_asq_regs(hw); + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_asq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_init_arq - initialize ARQ + * @hw: pointer to the hardware structure + * + * The main initialization routine for the Admin Receive (Event) Queue. + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.arq_buf_size + * + * Do *NOT* hold the lock when calling this as the memory allocation routines + * called are not going to be atomic context safe + **/ +static i40e_status i40e_init_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count > 0) { + /* queue already initialized */ + ret_code = I40E_ERR_NOT_READY; + goto init_adminq_exit; + } + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.arq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + hw->aq.arq.count = hw->aq.num_arq_entries; + + /* allocate the ring memory */ + ret_code = i40e_alloc_adminq_arq_ring(hw); + if (ret_code) + goto init_adminq_exit; + + /* allocate buffers in the rings */ + ret_code = i40e_alloc_arq_bufs(hw); + if (ret_code) + goto init_adminq_free_rings; + + /* initialize base registers */ + i40e_config_arq_regs(hw); + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_rings: + i40e_free_adminq_arq(hw); + +init_adminq_exit: + return ret_code; +} + +/** + * i40e_shutdown_asq - shutdown the ASQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Send Queue + **/ +static i40e_status i40e_shutdown_asq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.asq.count == 0) + return I40E_ERR_NOT_READY; + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.asq.head, 0); + wr32(hw, hw->aq.asq.tail, 0); + wr32(hw, hw->aq.asq.len, 0); + + /* make sure lock is available */ + mutex_lock(&hw->aq.asq_mutex); + + hw->aq.asq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_asq_bufs(hw); + + mutex_unlock(&hw->aq.asq_mutex); + + return ret_code; +} + +/** + * i40e_shutdown_arq - shutdown ARQ + * @hw: pointer to the hardware structure + * + * The main shutdown routine for the Admin Receive Queue + **/ +static i40e_status i40e_shutdown_arq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (hw->aq.arq.count == 0) + return I40E_ERR_NOT_READY; + + /* Stop firmware AdminQ processing */ + wr32(hw, hw->aq.arq.head, 0); + wr32(hw, hw->aq.arq.tail, 0); + wr32(hw, hw->aq.arq.len, 0); + + /* make sure lock is available */ + mutex_lock(&hw->aq.arq_mutex); + + hw->aq.arq.count = 0; /* to indicate uninitialized queue */ + + /* free ring buffers */ + i40e_free_arq_bufs(hw); + + mutex_unlock(&hw->aq.arq_mutex); + + return ret_code; +} + +/** + * i40evf_init_adminq - main initialization routine for Admin Queue + * @hw: pointer to the hardware structure + * + * Prior to calling this function, drivers *MUST* set the following fields + * in the hw->aq structure: + * - hw->aq.num_asq_entries + * - hw->aq.num_arq_entries + * - hw->aq.arq_buf_size + * - hw->aq.asq_buf_size + **/ +i40e_status i40evf_init_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code; + + /* verify input for valid configuration */ + if ((hw->aq.num_arq_entries == 0) || + (hw->aq.num_asq_entries == 0) || + (hw->aq.arq_buf_size == 0) || + (hw->aq.asq_buf_size == 0)) { + ret_code = I40E_ERR_CONFIG; + goto init_adminq_exit; + } + + /* initialize locks */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + + /* Set up register offsets */ + i40e_adminq_init_regs(hw); + + /* allocate the ASQ */ + ret_code = i40e_init_asq(hw); + if (ret_code) + goto init_adminq_destroy_locks; + + /* allocate the ARQ */ + ret_code = i40e_init_arq(hw); + if (ret_code) + goto init_adminq_free_asq; + + /* success! */ + goto init_adminq_exit; + +init_adminq_free_asq: + i40e_shutdown_asq(hw); +init_adminq_destroy_locks: + +init_adminq_exit: + return ret_code; +} + +/** + * i40evf_shutdown_adminq - shutdown routine for the Admin Queue + * @hw: pointer to the hardware structure + **/ +i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) +{ + i40e_status ret_code = 0; + + if (i40evf_check_asq_alive(hw)) + i40evf_aq_queue_shutdown(hw, true); + + i40e_shutdown_asq(hw); + i40e_shutdown_arq(hw); + + /* destroy the locks */ + + return ret_code; +} + +/** + * i40e_clean_asq - cleans Admin send queue + * @hw: pointer to the hardware structure + * + * returns the number of free desc + **/ +static u16 i40e_clean_asq(struct i40e_hw *hw) +{ + struct i40e_adminq_ring *asq = &(hw->aq.asq); + struct i40e_asq_cmd_details *details; + u16 ntc = asq->next_to_clean; + struct i40e_aq_desc desc_cb; + struct i40e_aq_desc *desc; + + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + while (rd32(hw, hw->aq.asq.head) != ntc) { + if (details->callback) { + I40E_ADMINQ_CALLBACK cb_func = + (I40E_ADMINQ_CALLBACK)details->callback; + desc_cb = *desc; + cb_func(hw, &desc_cb); + } + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + memset((void *)details, 0, + sizeof(struct i40e_asq_cmd_details)); + ntc++; + if (ntc == asq->count) + ntc = 0; + desc = I40E_ADMINQ_DESC(*asq, ntc); + details = I40E_ADMINQ_DETAILS(*asq, ntc); + } + + asq->next_to_clean = ntc; + + return I40E_DESC_UNUSED(asq); +} + +/** + * i40evf_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +bool i40evf_asq_done(struct i40e_hw *hw) +{ + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; + +} + +/** + * i40evf_asq_send_command - send command to Admin Queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor describing the command (non DMA mem) + * @buff: buffer to use for indirect commands + * @buff_size: size of buffer for indirect commands + * @cmd_details: pointer to command details structure + * + * This is the main send command driver routine for the Admin Queue send + * queue. It runs the queue, cleans the queue, etc + **/ +i40e_status i40evf_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status = 0; + struct i40e_dma_mem *dma_buff = NULL; + struct i40e_asq_cmd_details *details; + struct i40e_aq_desc *desc_on_ring; + bool cmd_completed = false; + u16 retval = 0; + + if (hw->aq.asq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Admin queue not initialized.\n"); + status = I40E_ERR_QUEUE_EMPTY; + goto asq_send_command_exit; + } + + details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); + if (cmd_details) { + *details = *cmd_details; + + /* If the cmd_details are defined copy the cookie. The + * cpu_to_le32 is not needed here because the data is ignored + * by the FW, only used by the driver + */ + if (details->cookie) { + desc->cookie_high = + cpu_to_le32(upper_32_bits(details->cookie)); + desc->cookie_low = + cpu_to_le32(lower_32_bits(details->cookie)); + } + } else { + memset(details, 0, sizeof(struct i40e_asq_cmd_details)); + } + + /* clear requested flags and then set additional flags if defined */ + desc->flags &= ~cpu_to_le16(details->flags_dis); + desc->flags |= cpu_to_le16(details->flags_ena); + + mutex_lock(&hw->aq.asq_mutex); + + if (buff_size > hw->aq.asq_buf_size) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Invalid buffer size: %d.\n", + buff_size); + status = I40E_ERR_INVALID_SIZE; + goto asq_send_command_error; + } + + if (details->postpone && !details->async) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Async flag not set along with postpone flag"); + status = I40E_ERR_PARAM; + goto asq_send_command_error; + } + + /* call clean and check queue available function to reclaim the + * descriptors that were processed by FW, the function returns the + * number of desc available + */ + /* the clean function called here could be called in a separate thread + * in case of asynchronous completions + */ + if (i40e_clean_asq(hw) == 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Error queue is full.\n"); + status = I40E_ERR_ADMIN_QUEUE_FULL; + goto asq_send_command_error; + } + + /* initialize the temp desc pointer with the right desc */ + desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); + + /* if the desc is available copy the temp desc to the right place */ + *desc_on_ring = *desc; + + /* if buff is not NULL assume indirect command */ + if (buff != NULL) { + dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); + /* copy the user buff into the respective DMA buff */ + memcpy(dma_buff->va, buff, buff_size); + desc_on_ring->datalen = cpu_to_le16(buff_size); + + /* Update the address values in the desc with the pa value + * for respective buffer + */ + desc_on_ring->params.external.addr_high = + cpu_to_le32(upper_32_bits(dma_buff->pa)); + desc_on_ring->params.external.addr_low = + cpu_to_le32(lower_32_bits(dma_buff->pa)); + } + + /* bump the tail */ + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff); + (hw->aq.asq.next_to_use)++; + if (hw->aq.asq.next_to_use == hw->aq.asq.count) + hw->aq.asq.next_to_use = 0; + if (!details->postpone) + wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); + + /* if cmd_details are not defined or async flag is not set, + * we need to wait for desc write back + */ + if (!details->async && !details->postpone) { + u32 total_delay = 0; + u32 delay_len = 10; + + do { + /* AQ designers suggest use of head for better + * timing reliability than DD bit + */ + if (i40evf_asq_done(hw)) + break; + /* ugh! delay while spin_lock */ + udelay(delay_len); + total_delay += delay_len; + } while (total_delay < I40E_ASQ_CMD_TIMEOUT); + } + + /* if ready, copy the desc back to temp */ + if (i40evf_asq_done(hw)) { + *desc = *desc_on_ring; + if (buff != NULL) + memcpy(buff, dma_buff->va, buff_size); + retval = le16_to_cpu(desc->retval); + if (retval != 0) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Command completed with error 0x%X.\n", + retval); + /* strip off FW internal code */ + retval &= 0xff; + } + cmd_completed = true; + if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK) + status = 0; + else + status = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; + } + + /* update the error if time out occurred */ + if ((!cmd_completed) && + (!details->async && !details->postpone)) { + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } + +asq_send_command_error: + mutex_unlock(&hw->aq.asq_mutex); +asq_send_command_exit: + return status; +} + +/** + * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function + * @desc: pointer to the temp descriptor (non DMA mem) + * @opcode: the opcode can be used to decide which flags to turn off or on + * + * Fill the desc with default values + **/ +void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode) +{ + /* zero out the desc */ + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI); +} + +/** + * i40evf_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *pending) +{ + i40e_status ret_code = 0; + u16 ntc = hw->aq.arq.next_to_clean; + struct i40e_aq_desc *desc; + struct i40e_dma_mem *bi; + u16 desc_idx; + u16 datalen; + u16 flags; + u16 ntu; + + /* take the lock before we start messing with the ring */ + mutex_lock(&hw->aq.arq_mutex); + + /* set next_to_use to head */ + ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); + if (ntu == ntc) { + /* nothing to do - shouldn't need to update ring's values */ + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQRX: Queue is empty.\n"); + ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; + goto clean_arq_element_out; + } + + /* now clean the next descriptor */ + desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); + desc_idx = ntc; + i40evf_debug_aq(hw, + I40E_DEBUG_AQ_COMMAND, + (void *)desc, + hw->aq.arq.r.arq_bi[desc_idx].va); + + flags = le16_to_cpu(desc->flags); + if (flags & I40E_AQ_FLAG_ERR) { + ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + hw->aq.arq_last_status = + (enum i40e_admin_queue_err)le16_to_cpu(desc->retval); + i40e_debug(hw, + I40E_DEBUG_AQ_MESSAGE, + "AQRX: Event received with error 0x%X.\n", + hw->aq.arq_last_status); + } else { + e->desc = *desc; + datalen = le16_to_cpu(desc->datalen); + e->msg_size = min(datalen, e->msg_size); + if (e->msg_buf != NULL && (e->msg_size != 0)) + memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, + e->msg_size); + } + + /* Restore the original datalen and buffer address in the desc, + * FW updates datalen to indicate the event message + * size + */ + bi = &hw->aq.arq.r.arq_bi[ntc]; + memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + + desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); + if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) + desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB); + desc->datalen = cpu_to_le16((u16)bi->size); + desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa)); + desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); + + /* set tail = the last cleaned desc index. */ + wr32(hw, hw->aq.arq.tail, ntc); + /* ntc is updated to tail + 1 */ + ntc++; + if (ntc == hw->aq.num_arq_entries) + ntc = 0; + hw->aq.arq.next_to_clean = ntc; + hw->aq.arq.next_to_use = ntu; + +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending != NULL) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + mutex_unlock(&hw->aq.arq_mutex); + + return ret_code; +} + +void i40evf_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -0,0 +1,106 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ADMINQ_H_ +#define _I40E_ADMINQ_H_ + +#include "i40e_osdep.h" +#include "i40e_adminq_cmd.h" + +#define I40E_ADMINQ_DESC(R, i) \ + (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i])) + +#define I40E_ADMINQ_DESC_ALIGNMENT 4096 + +struct i40e_adminq_ring { + struct i40e_virt_mem dma_head; /* space for dma structures */ + struct i40e_dma_mem desc_buf; /* descriptor ring memory */ + struct i40e_virt_mem cmd_buf; /* command buffer memory */ + + union { + struct i40e_dma_mem *asq_bi; + struct i40e_dma_mem *arq_bi; + } r; + + u16 count; /* Number of descriptors */ + u16 rx_buf_len; /* Admin Receive Queue buffer length */ + + /* used for interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + /* used for queue tracking */ + u32 head; + u32 tail; + u32 len; +}; + +/* ASQ transaction details */ +struct i40e_asq_cmd_details { + void *callback; /* cast from type I40E_ADMINQ_CALLBACK */ + u64 cookie; + u16 flags_ena; + u16 flags_dis; + bool async; + bool postpone; +}; + +#define I40E_ADMINQ_DETAILS(R, i) \ + (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i])) + +/* ARQ event information */ +struct i40e_arq_event_info { + struct i40e_aq_desc desc; + u16 msg_size; + u8 *msg_buf; +}; + +/* Admin Queue information */ +struct i40e_adminq_info { + struct i40e_adminq_ring arq; /* receive queue */ + struct i40e_adminq_ring asq; /* send queue */ + u16 num_arq_entries; /* receive queue depth */ + u16 num_asq_entries; /* send queue depth */ + u16 arq_buf_size; /* receive queue buffer size */ + u16 asq_buf_size; /* send queue buffer size */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u16 api_maj_ver; /* api major version */ + u16 api_min_ver; /* api minor version */ + + struct mutex asq_mutex; /* Send queue lock */ + struct mutex arq_mutex; /* Receive queue lock */ + + /* last status values on send and receive queues */ + enum i40e_admin_queue_err asq_last_status; + enum i40e_admin_queue_err arq_last_status; +}; + +/* general information */ +#define I40E_AQ_LARGE_BUF 512 +#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ + +void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, + u16 opcode); + +#endif /* _I40E_ADMINQ_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -0,0 +1,2153 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0001 +#define I40E_FW_API_VERSION_A0_MINOR 0x0000 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + i40e_aqc_opc_set_storm_control_config = 0x0280, + i40e_aqc_opc_get_storm_control_config = 0x0281, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_reset = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + + /* debug commands */ + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_read_reg_sg = 0xFF05, + i40e_aqc_opc_debug_write_reg_sg = 0xFF06, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, + i40e_aqc_opc_debug_modify_internals = 0xFF09, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* internal (0x00XX) commands */ + +/* Get version (direct 0x0001) */ +struct i40e_aqc_get_version { + __le32 rom_ver; + __le32 fw_build; + __le16 fw_major; + __le16 fw_minor; + __le16 api_major; + __le16 api_minor; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); + +/* Send driver version (indirect 0x0002) */ +struct i40e_aqc_driver_version { + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 + +struct i40e_aqc_request_resource { + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct i40e_aqc_list_capabilites { + u8 command_flags; +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); + +struct i40e_aqc_list_capabilities_element_resp { + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; +}; + +/* list of caps */ + +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 + +/* Set CPPM Configuration (direct 0x0103) */ +struct i40e_aqc_cppm_configuration { + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); + +/* Set ARP Proxy command / response (indirect 0x0104) */ +struct i40e_aqc_arp_proxy_data { + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; +}; + +/* Set NS Proxy Table Entry Command (indirect 0x0105) */ +struct i40e_aqc_ns_proxy_data { + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; +}; + +/* Manage LAA Command (0x0106) - obsolete */ +struct i40e_aqc_mng_laa { + __le16 command_flags; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; +}; + +/* Manage MAC Address Read Command (indirect 0x0107) */ +struct i40e_aqc_mac_address_read { + __le16 command_flags; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); + +struct i40e_aqc_mac_address_read_data { + u8 pf_lan_mac[6]; + u8 pf_san_mac[6]; + u8 port_mac[6]; + u8 pf_wol_mac[6]; +}; + +I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); + +/* Manage MAC Address Write Command (0x0108) */ +struct i40e_aqc_mac_address_write { + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); + +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + +/* Switch configuration commands (0x02xx) */ + +/* Used by many indirect commands that only pass an seid and a buffer in the + * command + */ +struct i40e_aqc_switch_seid { + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); + +/* Get Switch Configuration command (indirect 0x0200) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_switch_config_header_resp { + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; +}; + +struct i40e_aqc_switch_config_element_resp { + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; +}; + +/* Get Switch Configuration (indirect 0x0200) + * an array of elements are returned in the response buffer + * the first in the array is the header, remainder are elements + */ +struct i40e_aqc_get_switch_config_resp { + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; +}; + +/* Add Statistics (direct 0x0201) + * Remove Statistics (direct 0x0202) + */ +struct i40e_aqc_add_remove_statistics { + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); + +/* Set Port Parameters command (direct 0x0203) */ +struct i40e_aqc_set_port_parameters { + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); + +/* Get Switch Resource Allocation (indirect 0x0204) */ +struct i40e_aqc_get_switch_resource_alloc { + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); + +/* expect an array of these structs in the response buffer */ +struct i40e_aqc_switch_resource_alloc_element_resp { + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; +}; + +/* Add VSI (indirect 0x0210) + * this indirect command uses struct i40e_aqc_vsi_properties_data + * as the indirect buffer (128 bytes) + * + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI + */ +struct i40e_aqc_add_get_update_vsi { + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 +#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); + +struct i40e_aqc_add_get_update_vsi_completion { + __le16 seid; + __le16 vsi_number; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress table */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Add Port Virtualizer (direct 0x0220) + * also used for update PV (direct 0x0221) but only flags are used + * (IS_CTRL_PORT only works on add PV) + */ +struct i40e_aqc_add_update_pv { + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); + +struct i40e_aqc_add_update_pv_completion { + /* reserved for update; for add also encodes error if rc == ENOSPC */ + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); + +/* Get PV Params (direct 0x0222) + * uses i40e_aqc_switch_seid for the descriptor + */ + +struct i40e_aqc_get_pv_params_completion { + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); + +/* Add VEB (direct 0x0230) */ +struct i40e_aqc_add_veb { + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); + +struct i40e_aqc_add_veb_completion { + u8 reserved[6]; + __le16 switch_seid; + /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +/* Delete Element (direct 0x0243) + * uses the generic i40e_aqc_switch_seid + */ + +/* Add MAC-VLAN (indirect 0x0250) */ + +/* used for the command for most vlan commands */ +struct i40e_aqc_macvlan { + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); + +/* indirect data for command and response */ +struct i40e_aqc_add_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) + /* response section */ + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_macvlan_completion { + __le16 perfect_mac_used; + __le16 perfect_mac_free; + __le16 unicast_hash_free; + __le16 multicast_hash_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); + +/* Remove MAC-VLAN (indirect 0x0251) + * uses i40e_aqc_macvlan for the descriptor + * data points to an array of num_addresses of elements + */ + +struct i40e_aqc_remove_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; + /* reply section */ + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; +}; + +/* Add VLAN (indirect 0x0252) + * Remove VLAN (indirect 0x0253) + * use the generic i40e_aqc_macvlan for the command + */ +struct i40e_aqc_add_remove_vlan_element_data { + __le16 vlan_tag; + u8 vlan_flags; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_vlan_completion { + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set VSI Promiscuous Modes (direct 0x0254) */ +struct i40e_aqc_set_vsi_promiscuous_modes { + __le16 promiscuous_flags; + __le16 valid_flags; +/* flags used for both fields above */ +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); + +/* Add S/E-tag command (direct 0x0255) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_add_tag { + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + __le16 queue_number; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); + +struct i40e_aqc_add_remove_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); + +/* Remove S/E-tag command (direct 0x0256) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_remove_tag { + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + u8 reserved[12]; +}; + +/* Add multicast E-Tag (direct 0x0257) + * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields + * and no external data + */ +struct i40e_aqc_add_remove_mcast_etag { + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); + +struct i40e_aqc_add_remove_mcast_etag_completion { + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; + +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); + +/* Update S/E-Tag (direct 0x0259) */ +struct i40e_aqc_update_tag { + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); + +struct i40e_aqc_update_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); + +/* Add Control Packet filter (direct 0x025A) + * Remove Control Packet filter (direct 0x025B) + * uses the i40e_aqc_add_oveb_cloud, + * and the generic direct completion structure + */ +struct i40e_aqc_add_remove_control_packet_filter { + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) + __le16 queue; + u8 reserved[2]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); + +struct i40e_aqc_add_remove_control_packet_filter_completion { + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); + +/* Add Cloud filters (indirect 0x025C) + * Remove Cloud filters (indirect 0x025D) + * uses the i40e_aqc_add_remove_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_aqc_add_remove_cloud_filters { + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); + +struct i40e_aqc_add_remove_cloud_filters_element_data { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + } ipaddr; + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007 +/* 0x0000 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0002 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +/* 0x0007 reserved */ +/* 0x0008 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; + /* response section */ + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; +}; + +struct i40e_aqc_remove_cloud_filters_completion { + __le16 perfect_ovlan_used; + __le16 perfect_ovlan_free; + __le16 vlan_used; + __le16 vlan_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); + +/* Add Mirror Rule (indirect or direct 0x0260) + * Delete Mirror Rule (indirect or direct 0x0261) + * note: some rule types (4,5) do not use an external buffer. + * take care to set the flags correctly. + */ +struct i40e_aqc_add_delete_mirror_rule { + __le16 seid; + __le16 rule_type; +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ + I40E_AQC_MIRROR_RULE_TYPE_SHIFT) +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 + __le16 num_entries; + __le16 destination; /* VSI for add, rule id for delete */ + __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); + +struct i40e_aqc_add_delete_mirror_rule_completion { + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); + +/* Set Storm Control Configuration (direct 0x0280) + * Get Storm Control Configuration (direct 0x0281) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_set_get_storm_control_config { + __le32 broadcast_threshold; + __le32 multicast_threshold; + __le32 control_flags; +#define I40E_AQC_STORM_CONTROL_MDIPW 0x01 +#define I40E_AQC_STORM_CONTROL_MDICW 0x02 +#define I40E_AQC_STORM_CONTROL_BDIPW 0x04 +#define I40E_AQC_STORM_CONTROL_BDICW 0x08 +#define I40E_AQC_STORM_CONTROL_BIDU 0x10 +#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8 +#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \ + I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT) + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config); + +/* DCB 0x03xx*/ + +/* PFC Ignore (direct 0x0301) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_pfc_ignore { + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); + +/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure + * with no parameters + */ + +/* TX scheduler 0x04xx */ + +/* Almost all the indirect commands use + * this generic struct to pass the SEID in param0 + */ +struct i40e_aqc_tx_sched_ind { + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); + +/* Several commands respond with a set of queue set handles */ +struct i40e_aqc_qs_handles_resp { + __le16 qs_handles[8]; +}; + +/* Configure VSI BW limits (direct 0x0400) */ +struct i40e_aqc_configure_vsi_bw_limit { + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); + +/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_ets_sla_bw_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_tc_bw_data { + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; +}; + +/* Query vsi bw configuration (indirect 0x0408) */ +struct i40e_aqc_query_vsi_bw_config_resp { + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; +}; + +/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ +struct i40e_aqc_query_vsi_ets_sla_config_resp { + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ +struct i40e_aqc_configure_switching_comp_bw_limit { + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); + +/* Enable Physical Port ETS (indirect 0x0413) + * Modify Physical Port ETS (indirect 0x0414) + * Disable Physical Port ETS (indirect 0x0415) + */ +struct i40e_aqc_configure_switching_comp_ets_data { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_flags; + u8 reserved2[17]; + u8 tc_bw_share_credits[8]; + u8 reserved3[96]; +}; + +/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ +struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +/* Configure Switching Component Bandwidth Allocation per Tc + * (indirect 0x0417) + */ +struct i40e_aqc_configure_switching_comp_bw_config_data { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; +}; + +/* Query Switching Component Configuration (indirect 0x0418) */ +struct i40e_aqc_query_switching_comp_ets_config_resp { + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; +}; + +/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ +struct i40e_aqc_query_port_ets_config_resp { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved3[32]; +}; + +/* Query Switching Component Bandwidth Allocation per Traffic Type + * (indirect 0x041A) + */ +struct i40e_aqc_query_switching_comp_bw_config_resp { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +/* Suspend/resume port TX traffic + * (direct 0x041B and 0x041C) uses the generic SEID struct + */ + +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F + +/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ + +/* set in param0 for get phy abilities to report qualified modules */ +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 + +enum i40e_aq_phy_type { + I40E_PHY_TYPE_SGMII = 0x0, + I40E_PHY_TYPE_1000BASE_KX = 0x1, + I40E_PHY_TYPE_10GBASE_KX4 = 0x2, + I40E_PHY_TYPE_10GBASE_KR = 0x3, + I40E_PHY_TYPE_40GBASE_KR4 = 0x4, + I40E_PHY_TYPE_XAUI = 0x5, + I40E_PHY_TYPE_XFI = 0x6, + I40E_PHY_TYPE_SFI = 0x7, + I40E_PHY_TYPE_XLAUI = 0x8, + I40E_PHY_TYPE_XLPPI = 0x9, + I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, + I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, + I40E_PHY_TYPE_100BASE_TX = 0x11, + I40E_PHY_TYPE_1000BASE_T = 0x12, + I40E_PHY_TYPE_10GBASE_T = 0x13, + I40E_PHY_TYPE_10GBASE_SR = 0x14, + I40E_PHY_TYPE_10GBASE_LR = 0x15, + I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, + I40E_PHY_TYPE_10GBASE_CR1 = 0x17, + I40E_PHY_TYPE_40GBASE_CR4 = 0x18, + I40E_PHY_TYPE_40GBASE_SR4 = 0x19, + I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, + I40E_PHY_TYPE_20GBASE_KR2 = 0x1B, + I40E_PHY_TYPE_MAX +}; + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) +}; + +struct i40e_aqc_module_desc { + u8 oui[3]; + u8 reserved1; + u8 part_number[16]; + u8 revision[4]; + u8 reserved2[8]; +}; + +struct i40e_aq_get_phy_abilities_resp { + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_FLAG_AN_SHIFT 3 +#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT) +#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */ +#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01 +#define I40E_AQ_PHY_FLAG_AN_ON 0x02 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; +}; + +/* Set PHY Config (direct 0x0601) */ +struct i40e_aq_set_phy_config { /* same bits as above in all */ + __le32 phy_type; + u8 link_speed; + u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); + +/* Set MAC Config command data structure (direct 0x0603) */ +struct i40e_aq_set_mac_config { + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); + +/* Restart Auto-Negotiation (direct 0x605) */ +struct i40e_aqc_set_link_restart_an { + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); + +/* Get Link Status cmd & response data structure (direct 0x0607) */ +struct i40e_aqc_get_link_status { + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 +/* only response uses this flag */ +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); + +/* Set event mask command (direct 0x613) */ +struct i40e_aqc_set_phy_int_mask { + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); + +/* Get Local AN advt register (direct 0x0614) + * Set Local AN advt register (direct 0x0615) + * Get Link Partner AN advt register (direct 0x0616) + */ +struct i40e_aqc_an_advt_reg { + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); + +/* Set Loopback mode (0x0618) */ +struct i40e_aqc_set_lb_mode { + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); + +/* Set PHY Reset command (0x0622) */ +struct i40e_aqc_set_phy_reset { + u8 reset_flags; +#define I40E_AQ_PHY_RESET_REQUEST 0x02 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); + +enum i40e_aq_phy_reg_type { + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct i40e_aqc_nvm_update { + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +/* Alternate structure */ + +/* Direct write (direct 0x0900) + * Direct read (direct 0x0902) + */ +struct i40e_aqc_alternate_write { + __le32 address0; + __le32 data0; + __le32 address1; + __le32 data1; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); + +/* Indirect write (indirect 0x0901) + * Indirect read (indirect 0x0903) + */ + +struct i40e_aqc_alternate_ind_write { + __le32 address; + __le32 length; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); + +/* Done alternate write (direct 0x0904) + * uses i40e_aq_desc + */ +struct i40e_aqc_alternate_write_done { + __le16 cmd_flags; +#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 +#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 +#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 +#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); + +/* Set OEM mode (direct 0x0905) */ +struct i40e_aqc_alternate_set_mode { + __le32 mode; +#define I40E_AQ_ALTERNATE_MODE_NONE 0 +#define I40E_AQ_ALTERNATE_MODE_OEM 1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); + +/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ + +/* async events 0x10xx */ + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct i40e_aqc_lan_overflow { + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); + +/* Get LLDP MIB (indirect 0x0A00) */ +struct i40e_aqc_lldp_get_mib { + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) +/* TX pause flags use I40E_AQ_LINK_TX_* above */ + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); + +/* Configure LLDP MIB Change Event (direct 0x0A01) + * also used for the event (with type in the command field) + */ +struct i40e_aqc_lldp_update_mib { + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct i40e_aqc_lldp_add_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); + +/* Update LLDP TLV (indirect 0x0A03) */ +struct i40e_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); + +/* Stop LLDP (direct 0x0A05) */ +struct i40e_aqc_lldp_stop { + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); + +/* Start LLDP (direct 0x0A06) */ + +struct i40e_aqc_lldp_start { + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); + +/* Apply MIB changes (0x0A07) + * uses the generic struc as it contains no data + */ + +/* Add Udp Tunnel command and completion (direct 0x0B00) */ +struct i40e_aqc_add_udp_tunnel { + __le16 udp_port; + u8 header_len; /* in DWords, 1 to 15 */ + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0 +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x3 + u8 variable_udp_length; +#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0 +#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1 + u8 udp_key_index; +#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1 +#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); + +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + +/* remove UDP Tunnel command (0x0B01) */ +struct i40e_aqc_remove_udp_tunnel { + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 pf_filters; + u8 total_filters; + u8 reserved2[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); + +struct i40e_aqc_del_udp_tunnel_completion { + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved; + u8 tunnels_free; + u8 reserved1[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); + +/* tunnel key structure 0x0B10 */ + +struct i40e_aqc_tunnel_key_structure_A0 { + __le16 key1_off; + __le16 key1_len; + __le16 key2_off; + __le16 key2_len; + __le16 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 resreved[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0); + +struct i40e_aqc_tunnel_key_structure { + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); + +/* OEM mode commands (direct 0xFE0x) */ +struct i40e_aqc_oem_param_change { + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + u8 param_value2[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); + +struct i40e_aqc_oem_state_change { + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); + +/* debug commands */ + +/* get device id (0xFF00) uses the generic structure */ + +/* set test more (0xFF01, internal) */ + +struct i40e_acq_set_test_mode { + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); + +/* Debug Read Register command (0xFF03) + * Debug Write Register command (0xFF04) + */ +struct i40e_aqc_debug_reg_read_write { + __le32 reserved; + __le32 address; + __le32 value_high; + __le32 value_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); + +/* Scatter/gather Reg Read (indirect 0xFF05) + * Scatter/gather Reg Write (indirect 0xFF06) + */ + +/* i40e_aq_desc is used for the command */ +struct i40e_aqc_debug_reg_sg_element_data { + __le32 address; + __le32 value; +}; + +/* Debug Modify register (direct 0xFF07) */ +struct i40e_aqc_debug_modify_reg { + __le32 address; + __le32 value; + __le32 clear_mask; + __le32 set_mask; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); + +/* dump internal data (0xFF08, indirect) */ + +#define I40E_AQ_CLUSTER_ID_AUX 0 +#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 +#define I40E_AQ_CLUSTER_ID_TXSCHED 2 +#define I40E_AQ_CLUSTER_ID_HMC 3 +#define I40E_AQ_CLUSTER_ID_MAC0 4 +#define I40E_AQ_CLUSTER_ID_MAC1 5 +#define I40E_AQ_CLUSTER_ID_MAC2 6 +#define I40E_AQ_CLUSTER_ID_MAC3 7 +#define I40E_AQ_CLUSTER_ID_DCB 8 +#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 +#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 + +struct i40e_aqc_debug_dump_internals { + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); + +struct i40e_aqc_debug_modify_internals { + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); + +#endif --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_alloc.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_alloc.h @@ -0,0 +1,55 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_ALLOC_H_ +#define _I40E_ALLOC_H_ + +struct i40e_hw; + +/* Memory allocation types */ +enum i40e_memory_type { + i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */ + i40e_mem_asq_buf = 1, + i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */ + i40e_mem_arq_ring = 3, /* ARQ descriptor ring */ + i40e_mem_atq_ring = 4, /* ATQ descriptor ring */ + i40e_mem_pd = 5, /* Page Descriptor */ + i40e_mem_bp = 6, /* Backing Page - 4KB */ + i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */ + i40e_mem_reserved +}; + +/* prototype for functions used for dynamic memory allocation */ +i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + enum i40e_memory_type type, + u64 size, u32 alignment); +i40e_status i40e_free_dma_mem(struct i40e_hw *hw, + struct i40e_dma_mem *mem); +i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem, + u32 size); +i40e_status i40e_free_virt_mem(struct i40e_hw *hw, + struct i40e_virt_mem *mem); + +#endif /* _I40E_ALLOC_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -0,0 +1,619 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_type.h" +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_virtchnl.h" + +/** + * i40e_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +i40e_status i40e_set_mac_type(struct i40e_hw *hw) +{ + i40e_status status = 0; + + if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { + switch (hw->device_id) { + case I40E_DEV_ID_SFP_XL710: + case I40E_DEV_ID_SFP_X710: + case I40E_DEV_ID_QEMU: + case I40E_DEV_ID_KX_A: + case I40E_DEV_ID_KX_B: + case I40E_DEV_ID_KX_C: + case I40E_DEV_ID_KX_D: + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + hw->mac.type = I40E_MAC_XL710; + break; + case I40E_DEV_ID_VF: + case I40E_DEV_ID_VF_HV: + hw->mac.type = I40E_MAC_VF; + break; + default: + hw->mac.type = I40E_MAC_GENERIC; + break; + } + } else { + status = I40E_ERR_DEVICE_NOT_SUPPORTED; + } + + hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, status); + return status; +} + +/** + * i40evf_debug_aq + * @hw: debug mask related to admin queue + * @mask: debug mask + * @desc: pointer to admin queue descriptor + * @buffer: pointer to command buffer + * + * Dumps debug log about adminq command with descriptor contents. + **/ +void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, + void *buffer) +{ + struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u8 *aq_buffer = (u8 *)buffer; + u32 data[4]; + u32 i = 0; + + if ((!(mask & hw->debug_mask)) || (desc == NULL)) + return; + + i40e_debug(hw, mask, + "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + aq_desc->opcode, aq_desc->flags, aq_desc->datalen, + aq_desc->retval); + i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", + aq_desc->cookie_high, aq_desc->cookie_low); + i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", + aq_desc->params.internal.param0, + aq_desc->params.internal.param1); + i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", + aq_desc->params.external.addr_high, + aq_desc->params.external.addr_low); + + if ((buffer != NULL) && (aq_desc->datalen != 0)) { + memset(data, 0, sizeof(data)); + i40e_debug(hw, mask, "AQ CMD Buffer:\n"); + for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) { + data[((i % 16) / 4)] |= + ((u32)aq_buffer[i]) << (8 * (i % 4)); + if ((i % 16) == 15) { + i40e_debug(hw, mask, + "\t0x%04X %08X %08X %08X %08X\n", + i - 15, data[0], data[1], data[2], + data[3]); + memset(data, 0, sizeof(data)); + } + } + if ((i % 16) != 0) + i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", + i - (i % 16), data[0], data[1], data[2], + data[3]); + } +} + +/** + * i40evf_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40evf_check_asq_alive(struct i40e_hw *hw) +{ + return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK); +} + +/** + * i40evf_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_queue_shutdown *cmd = + (struct i40e_aqc_queue_shutdown *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_queue_shutdown); + + if (unloading) + cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); + status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL); + + return status; +} + + +/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40evf_ptype_lookup[ptype].known + * THEN + * Packet is unknown + * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + * Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + * Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ + { PTYPE, \ + 1, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP, \ + I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ + I40E_RX_PTYPE_##OUTER_FRAG, \ + I40E_RX_PTYPE_TUNNEL_##T, \ + I40E_RX_PTYPE_TUNNEL_END_##TE, \ + I40E_RX_PTYPE_##TEF, \ + I40E_RX_PTYPE_INNER_PROT_##I, \ + I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ + { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { + /* L2 Packet types */ + I40E_PTT_UNUSED_ENTRY(0), + I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), + I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(4), + I40E_PTT_UNUSED_ENTRY(5), + I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT_UNUSED_ENTRY(8), + I40E_PTT_UNUSED_ENTRY(9), + I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), + I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + + /* Non Tunneled IPv4 */ + I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(25), + I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(32), + I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(39), + I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(47), + I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(54), + I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(62), + I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(69), + I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(77), + I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(84), + I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT_UNUSED_ENTRY(91), + I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(98), + I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(105), + I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(113), + I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(120), + I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(128), + I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(135), + I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(143), + I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + I40E_PTT_UNUSED_ENTRY(150), + I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + I40E_PTT_UNUSED_ENTRY(154), + I40E_PTT_UNUSED_ENTRY(155), + I40E_PTT_UNUSED_ENTRY(156), + I40E_PTT_UNUSED_ENTRY(157), + I40E_PTT_UNUSED_ENTRY(158), + I40E_PTT_UNUSED_ENTRY(159), + + I40E_PTT_UNUSED_ENTRY(160), + I40E_PTT_UNUSED_ENTRY(161), + I40E_PTT_UNUSED_ENTRY(162), + I40E_PTT_UNUSED_ENTRY(163), + I40E_PTT_UNUSED_ENTRY(164), + I40E_PTT_UNUSED_ENTRY(165), + I40E_PTT_UNUSED_ENTRY(166), + I40E_PTT_UNUSED_ENTRY(167), + I40E_PTT_UNUSED_ENTRY(168), + I40E_PTT_UNUSED_ENTRY(169), + + I40E_PTT_UNUSED_ENTRY(170), + I40E_PTT_UNUSED_ENTRY(171), + I40E_PTT_UNUSED_ENTRY(172), + I40E_PTT_UNUSED_ENTRY(173), + I40E_PTT_UNUSED_ENTRY(174), + I40E_PTT_UNUSED_ENTRY(175), + I40E_PTT_UNUSED_ENTRY(176), + I40E_PTT_UNUSED_ENTRY(177), + I40E_PTT_UNUSED_ENTRY(178), + I40E_PTT_UNUSED_ENTRY(179), + + I40E_PTT_UNUSED_ENTRY(180), + I40E_PTT_UNUSED_ENTRY(181), + I40E_PTT_UNUSED_ENTRY(182), + I40E_PTT_UNUSED_ENTRY(183), + I40E_PTT_UNUSED_ENTRY(184), + I40E_PTT_UNUSED_ENTRY(185), + I40E_PTT_UNUSED_ENTRY(186), + I40E_PTT_UNUSED_ENTRY(187), + I40E_PTT_UNUSED_ENTRY(188), + I40E_PTT_UNUSED_ENTRY(189), + + I40E_PTT_UNUSED_ENTRY(190), + I40E_PTT_UNUSED_ENTRY(191), + I40E_PTT_UNUSED_ENTRY(192), + I40E_PTT_UNUSED_ENTRY(193), + I40E_PTT_UNUSED_ENTRY(194), + I40E_PTT_UNUSED_ENTRY(195), + I40E_PTT_UNUSED_ENTRY(196), + I40E_PTT_UNUSED_ENTRY(197), + I40E_PTT_UNUSED_ENTRY(198), + I40E_PTT_UNUSED_ENTRY(199), + + I40E_PTT_UNUSED_ENTRY(200), + I40E_PTT_UNUSED_ENTRY(201), + I40E_PTT_UNUSED_ENTRY(202), + I40E_PTT_UNUSED_ENTRY(203), + I40E_PTT_UNUSED_ENTRY(204), + I40E_PTT_UNUSED_ENTRY(205), + I40E_PTT_UNUSED_ENTRY(206), + I40E_PTT_UNUSED_ENTRY(207), + I40E_PTT_UNUSED_ENTRY(208), + I40E_PTT_UNUSED_ENTRY(209), + + I40E_PTT_UNUSED_ENTRY(210), + I40E_PTT_UNUSED_ENTRY(211), + I40E_PTT_UNUSED_ENTRY(212), + I40E_PTT_UNUSED_ENTRY(213), + I40E_PTT_UNUSED_ENTRY(214), + I40E_PTT_UNUSED_ENTRY(215), + I40E_PTT_UNUSED_ENTRY(216), + I40E_PTT_UNUSED_ENTRY(217), + I40E_PTT_UNUSED_ENTRY(218), + I40E_PTT_UNUSED_ENTRY(219), + + I40E_PTT_UNUSED_ENTRY(220), + I40E_PTT_UNUSED_ENTRY(221), + I40E_PTT_UNUSED_ENTRY(222), + I40E_PTT_UNUSED_ENTRY(223), + I40E_PTT_UNUSED_ENTRY(224), + I40E_PTT_UNUSED_ENTRY(225), + I40E_PTT_UNUSED_ENTRY(226), + I40E_PTT_UNUSED_ENTRY(227), + I40E_PTT_UNUSED_ENTRY(228), + I40E_PTT_UNUSED_ENTRY(229), + + I40E_PTT_UNUSED_ENTRY(230), + I40E_PTT_UNUSED_ENTRY(231), + I40E_PTT_UNUSED_ENTRY(232), + I40E_PTT_UNUSED_ENTRY(233), + I40E_PTT_UNUSED_ENTRY(234), + I40E_PTT_UNUSED_ENTRY(235), + I40E_PTT_UNUSED_ENTRY(236), + I40E_PTT_UNUSED_ENTRY(237), + I40E_PTT_UNUSED_ENTRY(238), + I40E_PTT_UNUSED_ENTRY(239), + + I40E_PTT_UNUSED_ENTRY(240), + I40E_PTT_UNUSED_ENTRY(241), + I40E_PTT_UNUSED_ENTRY(242), + I40E_PTT_UNUSED_ENTRY(243), + I40E_PTT_UNUSED_ENTRY(244), + I40E_PTT_UNUSED_ENTRY(245), + I40E_PTT_UNUSED_ENTRY(246), + I40E_PTT_UNUSED_ENTRY(247), + I40E_PTT_UNUSED_ENTRY(248), + I40E_PTT_UNUSED_ENTRY(249), + + I40E_PTT_UNUSED_ENTRY(250), + I40E_PTT_UNUSED_ENTRY(251), + I40E_PTT_UNUSED_ENTRY(252), + I40E_PTT_UNUSED_ENTRY(253), + I40E_PTT_UNUSED_ENTRY(254), + I40E_PTT_UNUSED_ENTRY(255) +}; + + +/** + * i40e_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * Send message to PF driver using admin queue. By default, this message + * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for + * completion before returning. + **/ +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); + desc.cookie_high = cpu_to_le32(v_opcode); + desc.cookie_low = cpu_to_le32(v_retval); + if (msglen) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF + | I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.datalen = cpu_to_le16(msglen); + } + if (!cmd_details) { + struct i40e_asq_cmd_details details; + memset(&details, 0, sizeof(details)); + details.async = true; + cmd_details = &details; + } + status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details); + return status; +} + +/** + * i40e_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg) +{ + struct i40e_virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + hw->dev_caps.dcb = msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_L2; + hw->dev_caps.fcoe = (msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == I40E_VSI_SRIOV) { + memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr, + ETH_ALEN); + memcpy(hw->mac.addr, vsi_res->default_mac_addr, + ETH_ALEN); + } + vsi_res++; + } +} + +/** + * i40e_vf_reset + * @hw: pointer to the hardware structure + * + * Send a VF_RESET message to the PF. Does not wait for response from PF + * as none will be forthcoming. Immediately after calling this function, + * the admin queue should be shut down and (optionally) reinitialized. + **/ +i40e_status i40e_vf_reset(struct i40e_hw *hw) +{ + return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, + 0, NULL, 0, NULL); +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -0,0 +1,238 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_HMC_H_ +#define _I40E_HMC_H_ + +#define I40E_HMC_MAX_BP_COUNT 512 + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ +#define I40E_HMC_PD_CNT_IN_SD 512 +#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ +#define I40E_HMC_PAGED_BP_SIZE 4096 +#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40E_FIRST_VF_FPM_ID 16 + +struct i40e_hmc_obj_info { + u64 base; /* base addr in FPM */ + u32 max_cnt; /* max count available for this hmc func */ + u32 cnt; /* count of objects driver actually wants to create */ + u64 size; /* size in bytes of one object */ +}; + +enum i40e_sd_entry_type { + I40E_SD_TYPE_INVALID = 0, + I40E_SD_TYPE_PAGED = 1, + I40E_SD_TYPE_DIRECT = 2 +}; + +struct i40e_hmc_bp { + enum i40e_sd_entry_type entry_type; + struct i40e_dma_mem addr; /* populate to be used by hw */ + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40e_hmc_pd_entry { + struct i40e_hmc_bp bp; + u32 sd_index; + bool valid; +}; + +struct i40e_hmc_pd_table { + struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ + struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ + struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ + + u32 ref_cnt; + u32 sd_index; +}; + +struct i40e_hmc_sd_entry { + enum i40e_sd_entry_type entry_type; + bool valid; + + union { + struct i40e_hmc_pd_table pd_table; + struct i40e_hmc_bp bp; + } u; +}; + +struct i40e_hmc_sd_table { + struct i40e_virt_mem addr; /* used to track sd_entry allocations */ + u32 sd_cnt; + u32 ref_cnt; + struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ +}; + +struct i40e_hmc_info { + u32 signature; + /* equals to pci func num for PF and dynamically allocated for VFs */ + u8 hmc_fn_id; + u16 first_sd_index; /* index of the first available SD */ + + /* hmc objects */ + struct i40e_hmc_obj_info *hmc_obj; + struct i40e_virt_mem hmc_obj_virt_mem; + struct i40e_hmc_sd_table sd_table; +}; + +#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware + * @hw: pointer to our hw struct + * @pa: pointer to physical address + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ +{ \ + u32 val1, val2, val3; \ + val1 = (u32)(upper_32_bits(pa)); \ + val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ + (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ + val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware + * @hw: pointer to our hw struct + * @sd_index: segment descriptor index + * @type: if sd entry is direct or paged + **/ +#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ +{ \ + u32 val2, val3; \ + val2 = (I40E_HMC_MAX_BP_COUNT << \ + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ + ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ + val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ + wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ + wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ + wr32((hw), I40E_PFHMC_SDCMD, val3); \ +} + +/** + * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + **/ +#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \ + wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ +{ \ + u64 fpm_addr, fpm_limit; \ + fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (index); \ + fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ + *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ + *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(sd_limit) += 1; \ +} + +/** + * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40e_hmc_rsrc_type. + **/ +#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ +{ \ + u64 fpm_adr, fpm_limit; \ + fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ + (hmc_info)->hmc_obj[(type)].size * (idx); \ + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ + *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ + *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ + /* add one more to the limit to correct our range */ \ + *(pd_limit) += 1; \ +} +i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 sd_index, + enum i40e_sd_entry_type type, + u64 direct_mode_sz); + +i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 pd_index); +i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); +i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, + u32 idx); +i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, + struct i40e_hmc_info *hmc_info, + u32 idx, bool is_pf); + +#endif /* _I40E_HMC_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h @@ -0,0 +1,165 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_LAN_HMC_H_ +#define _I40E_LAN_HMC_H_ + +/* forward-declare the HW struct for the compiler */ +struct i40e_hw; + +/* HMC element context information */ + +/* Rx queue context data */ +struct i40e_hmc_obj_rxq { + u16 head; + u8 cpuid; + u64 base; + u16 qlen; +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + u8 dbuff; +#define I40E_RXQ_CTX_HBUFF_SHIFT 6 + u8 hbuff; + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 fc_ena; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; + u16 rxmax; + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; + u8 lrxqthresh; +}; + +/* Tx queue context data */ +struct i40e_hmc_obj_txq { + u16 head; + u8 new_context; + u64 base; + u8 fc_ena; + u8 timesync_ena; + u8 fd_ena; + u8 alt_vlan_ena; + u16 thead_wb; + u16 cpuid; + u8 head_wb_ena; + u16 qlen; + u8 tphrdesc_ena; + u8 tphrpacket_ena; + u8 tphwdesc_ena; + u64 head_wb_addr; + u32 crc; + u16 rdylist; + u8 rdylist_act; +}; + +/* for hsplit_0 field of Rx HMC context */ +enum i40e_hmc_obj_rx_hsplit_0 { + I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, +}; + +/* fcoe_cntx and fcoe_filt are for debugging purpose only */ +struct i40e_hmc_obj_fcoe_cntx { + u32 rsv[32]; +}; + +struct i40e_hmc_obj_fcoe_filt { + u32 rsv[8]; +}; + +/* Context sizes for LAN objects */ +enum i40e_hmc_lan_object_size { + I40E_HMC_LAN_OBJ_SZ_8 = 0x3, + I40E_HMC_LAN_OBJ_SZ_16 = 0x4, + I40E_HMC_LAN_OBJ_SZ_32 = 0x5, + I40E_HMC_LAN_OBJ_SZ_64 = 0x6, + I40E_HMC_LAN_OBJ_SZ_128 = 0x7, + I40E_HMC_LAN_OBJ_SZ_256 = 0x8, + I40E_HMC_LAN_OBJ_SZ_512 = 0x9, +}; + +#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 +#define I40E_HMC_OBJ_SIZE_TXQ 128 +#define I40E_HMC_OBJ_SIZE_RXQ 32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 + +enum i40e_hmc_lan_rsrc_type { + I40E_HMC_LAN_FULL = 0, + I40E_HMC_LAN_TX = 1, + I40E_HMC_LAN_RX = 2, + I40E_HMC_FCOE_CTX = 3, + I40E_HMC_FCOE_FILT = 4, + I40E_HMC_LAN_MAX = 5 +}; + +enum i40e_hmc_model { + I40E_HMC_MODEL_DIRECT_PREFERRED = 0, + I40E_HMC_MODEL_DIRECT_ONLY = 1, + I40E_HMC_MODEL_PAGED_ONLY = 2, + I40E_HMC_MODEL_UNKNOWN, +}; + +struct i40e_hmc_lan_create_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; + enum i40e_sd_entry_type entry_type; + u64 direct_mode_sz; +}; + +struct i40e_hmc_lan_delete_obj_info { + struct i40e_hmc_info *hmc_info; + u32 rsrc_type; + u32 start_idx; + u32 count; +}; + +i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, + u32 rxq_num, u32 fcoe_cntx_num, + u32 fcoe_filt_num); +i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, + enum i40e_hmc_model model); +i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); + +i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_txq *s); +i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue); +i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, + u16 queue, + struct i40e_hmc_obj_rxq *s); + +#endif /* _I40E_LAN_HMC_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -0,0 +1,72 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_OSDEP_H_ +#define _I40E_OSDEP_H_ + +#include +#include +#include +#include +#include + +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include + +/* File to be the magic between shared code and + * actual OS primitives + */ + +#define hw_dbg(hw, S, A...) do {} while (0) + +#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) +#define rd32(a, reg) readl((a)->hw_addr + (reg)) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) +#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT) + +/* memory allocation tracking */ +struct i40e_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +#define i40e_allocate_dma_mem(h, m, unused, s, a) \ + i40evf_allocate_dma_mem_d(h, m, s, a) +#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m) + +struct i40e_virt_mem { + void *va; + u32 size; +} __packed; +#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) +#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) + +#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__) +extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) + __attribute__ ((format(gnu_printf, 3, 4))); + +typedef enum i40e_status_code i40e_status; +#endif /* _I40E_OSDEP_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -0,0 +1,91 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_PROTOTYPE_H_ +#define _I40E_PROTOTYPE_H_ + +#include "i40e_type.h" +#include "i40e_alloc.h" +#include "i40e_virtchnl.h" + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +i40e_status i40evf_init_adminq(struct i40e_hw *hw); +i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw); +void i40e_adminq_init_ring_data(struct i40e_hw *hw); +i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +i40e_status i40evf_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +bool i40evf_asq_done(struct i40e_hw *hw); + +/* debug function for adminq */ +void i40evf_debug_aq(struct i40e_hw *hw, + enum i40e_debug_mask mask, + void *desc, + void *buffer); + +void i40e_idle_aq(struct i40e_hw *hw); +void i40evf_resume_aq(struct i40e_hw *hw); +bool i40evf_check_asq_alive(struct i40e_hw *hw); +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, + bool unloading); + +i40e_status i40e_set_mac_type(struct i40e_hw *hw); + +extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40evf_ptype_lookup[ptype]; +} + +/* prototype for functions used for SW locks */ + +/* i40e_common for VF drivers*/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg); +i40e_status i40e_vf_reset(struct i40e_hw *hw); +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_set_filter_control(struct i40e_hw *hw, + struct i40e_filter_control_settings *settings); +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, + u8 *mac_addr, u16 ethtype, u16 flags, + u16 vsi_seid, u16 queue, bool is_add, + struct i40e_control_filter_stats *stats, + struct i40e_asq_cmd_details *cmd_details); +#endif /* _I40E_PROTOTYPE_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -0,0 +1,4667 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_REGISTER_H_ +#define _I40E_REGISTER_H_ + +#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ +#define I40E_GL_GP_FUSE_MAX_INDEX 28 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 +#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 +#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) +#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) +#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 +#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) +#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600 +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) + +#define I40E_PF_ARQBAH 0x00080180 +#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT) +#define I40E_PF_ARQBAL 0x00080080 +#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT) +#define I40E_PF_ARQH 0x00080380 +#define I40E_PF_ARQH_ARQH_SHIFT 0 +#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT) +#define I40E_PF_ARQLEN 0x00080280 +#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT) +#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT) +#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQT 0x00080480 +#define I40E_PF_ARQT_ARQT_SHIFT 0 +#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT) +#define I40E_PF_ATQBAH 0x00080100 +#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT) +#define I40E_PF_ATQBAL 0x00080000 +#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT) +#define I40E_PF_ATQH 0x00080300 +#define I40E_PF_ATQH_ATQH_SHIFT 0 +#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT) +#define I40E_PF_ATQLEN 0x00080200 +#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT) +#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT) +#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQT 0x00080400 +#define I40E_PF_ATQT_ATQT_SHIFT 0 +#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT) +#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ARQBAH_MAX_INDEX 127 +#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ARQBAL_MAX_INDEX 127 +#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT) +#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ARQH_MAX_INDEX 127 +#define I40E_VF_ARQH_ARQH_SHIFT 0 +#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT) +#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ARQLEN_MAX_INDEX 127 +#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ARQT_MAX_INDEX 127 +#define I40E_VF_ARQT_ARQT_SHIFT 0 +#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT) +#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ATQBAH_MAX_INDEX 127 +#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ATQBAL_MAX_INDEX 127 +#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT) +#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ATQH_MAX_INDEX 127 +#define I40E_VF_ATQH_ATQH_SHIFT 0 +#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT) +#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ATQLEN_MAX_INDEX 127 +#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VF_ATQT_MAX_INDEX 127 +#define I40E_VF_ATQT_ATQT_SHIFT 0 +#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT) +#define I40E_PRT_L2TAGSEN 0x001C0B20 +#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 +#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA 0x0010C080 +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_LAN_ERRINFO 0x0010C000 +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */ +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 +#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) +#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 +#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 +#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) +#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 +#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) +#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */ +#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 +#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 +#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT) +#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */ +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 +#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 +#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) +#define I40E_PFCM_PE_ERRDATA 0x00138D00 +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_PE_ERRINFO 0x00138C80 +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) +#define I40E_GLDCB_GENC 0x00083044 +#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 +#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT) +#define I40E_GLDCB_RUPTI 0x00122618 +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 +#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) +#define I40E_PRTDCB_FCCFG 0x001E4640 +#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 +#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT) +#define I40E_PRTDCB_FCRTV 0x001E4600 +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 +#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) +#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 +#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 +#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 +#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) +#define I40E_PRTDCB_GENC 0x00083000 +#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 +#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT) +#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 +#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 +#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT) +#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 +#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) +#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 +#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT) +#define I40E_PRTDCB_GENS 0x00083020 +#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 +#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) +#define I40E_PRTDCB_MFLCN 0x001E2400 +#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 +#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT) +#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 +#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 +#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT) +#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 +#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT) +#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 +#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) +#define I40E_PRTDCB_RETSC 0x001223E0 +#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 +#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 +#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 +#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) +#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 +#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT) +#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 +#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 +#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 +#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RPPMC 0x001223A0 +#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 +#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 +#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 +#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) +#define I40E_PRTDCB_RUP 0x001C0B00 +#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 +#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) +#define I40E_PRTDCB_RUP2TC 0x001C09A0 +#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 +#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 +#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 +#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 +#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 +#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 +#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 +#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) +#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 +#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_TC2PFC 0x001C0980 +#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCPMC 0x000A21A0 +#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TDPMC 0x000A0180 +#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 +#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT) +#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) +#define I40E_PRTDCB_TDPUC 0x00044100 +#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0 +#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT) +#define I40E_PRTDCB_TETSC_TCB 0x000AE060 +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) +#define I40E_PRTDCB_TETSC_TPB 0x00098060 +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 +#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) +#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 +#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) +#define I40E_PRTDCB_TFCS 0x001E4560 +#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 +#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 +#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 +#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 +#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 +#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 +#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 +#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 +#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT) +#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 +#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT) +#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT) +#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 +#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) +#define I40E_GLFCOE_RCTL 0x00269B94 +#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 +#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT) +#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 +#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT) +#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 +#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT) +#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 +#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) +#define I40E_GL_FWSTS 0x00083048 +#define I40E_GL_FWSTS_FWS0B_SHIFT 0 +#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT) +#define I40E_GL_FWSTS_FWRI_SHIFT 9 +#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT) +#define I40E_GL_FWSTS_FWS1B_SHIFT 16 +#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GLGEN_CLKSTAT 0x000B8184 +#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 +#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 +#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 +#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 +#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 +#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 +#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) +#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ +#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 +#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 +#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 +#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) +#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 +#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 +#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_SET 0x00088184 +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 +#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) +#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 +#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) +#define I40E_GLGEN_GPIO_STAT 0x0008817C +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 +#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) +#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 +#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) +#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 +#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 +#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT) +#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 +#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT) +#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 +#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT) +#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 +#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT) +#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 +#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT) +#define I40E_GLGEN_I2CCMD_R_SHIFT 29 +#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT) +#define I40E_GLGEN_I2CCMD_E_SHIFT 31 +#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT) +#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 +#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 +#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 +#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 +#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 +#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 +#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 +#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 +#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 +#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 +#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 +#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) +#define I40E_GLGEN_LED_CTL 0x00088178 +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 +#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) +#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 +#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 +#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 +#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 +#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) +#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLGEN_MSCA_MAX_INDEX 3 +#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 +#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT) +#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 +#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT) +#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 +#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT) +#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 +#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_GLGEN_MSCA_STCODE_SHIFT 28 +#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 +#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLGEN_MSRWD_MAX_INDEX 3 +#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 +#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) +#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 +#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) +#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 +#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 +#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) +#define I40E_GLGEN_PE_ENA 0x000B81A0 +#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0 +#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT) +#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1 +#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT) +#define I40E_GLGEN_RSTAT 0x000B8188 +#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 +#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) +#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 +#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) +#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 +#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT) +#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 +#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 +#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) +#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 +#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) +#define I40E_GLGEN_RSTCTL 0x000B8180 +#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 +#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 +#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) +#define I40E_GLGEN_RSTENA_EMP 0x000B818C +#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 +#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) +#define I40E_GLGEN_RTRIG 0x000B8190 +#define I40E_GLGEN_RTRIG_CORER_SHIFT 0 +#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT) +#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 +#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT) +#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 +#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT) +#define I40E_GLGEN_STAT 0x000B612C +#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 +#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT) +#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 +#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT) +#define I40E_GLGEN_STAT_VTEN_SHIFT 3 +#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT) +#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 +#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT) +#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 +#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT) +#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 +#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT) +#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 +#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 +#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) +#define I40E_GLVFGEN_TIMER 0x000881BC +#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 +#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT) +#define I40E_PFGEN_CTRL 0x00092400 +#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 +#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT) +#define I40E_PFGEN_DRUN 0x00092500 +#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 +#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT) +#define I40E_PFGEN_PORTNUM 0x001C0480 +#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_STATE 0x00088000 +#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0 +#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT) +#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 +#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT) +#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 +#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT) +#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 +#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT) +#define I40E_PRTGEN_CNF 0x000B8120 +#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 +#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 +#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 +#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) +#define I40E_PRTGEN_CNF2 0x000B8160 +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 +#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) +#define I40E_PRTGEN_STATUS 0x000B8100 +#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 +#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 +#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) +#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 +#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) +#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 +#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 +#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT) +#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 +#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 +#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) +#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ +#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 +#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 +#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT) +#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ +#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 +#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 +#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT) +#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) +#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_CEQPART_MAX_INDEX 15 +#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 +#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) +#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 +#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) +#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 +#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) +#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 +#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) +#define I40E_GLHMC_FCOEFMAX 0x000C20D0 +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 +#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) +#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 +#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) +#define I40E_GLHMC_FCOEMAX 0x000C2014 +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 +#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) +#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 +#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) +#define I40E_GLHMC_FSIAVMAX 0x000C2068 +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 +#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) +#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) +#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 +#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) +#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) +#define I40E_GLHMC_FSIMCMAX 0x000C2060 +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 +#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) +#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 +#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) +#define I40E_GLHMC_LANQMAX 0x000C2008 +#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 +#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) +#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 +#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) +#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 +#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) +#define I40E_GLHMC_LANRXOBJSZ 0x000C200c +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) +#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 +#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) +#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 +#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT) +#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 +#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) +#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 +#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) +#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_PEARPMAX 0x000C2038 +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) +#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) +#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_PECQOBJSZ 0x000C2020 +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTMAX 0x000C2030 +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) +#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_PEMRMAX 0x000C2040 +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) +#define I40E_GLHMC_PEMROBJSZ 0x000C203c +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) +#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_PEPBLMAX 0x000C206c +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) +#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0 +#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT) +#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) +#define I40E_GLHMC_PEQ1MAX 0x000C2054 +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) +#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) +#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_PEQPOBJSZ 0x000C201c +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) +#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_PESRQMAX 0x000C2028 +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) +#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) +#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4 +#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT) +#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_PETIMERMAX 0x000C2084 +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) +#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) +#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0 +#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT) +#define I40E_GLHMC_PEXFFLMAX 0x000C204c +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) +#define I40E_GLHMC_PEXFMAX 0x000C2048 +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) +#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) +#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4 +#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT) +#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 +#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) +#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLHMC_SDPART_MAX_INDEX 15 +#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) +#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) +#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29 +#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT) +#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 +#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 +#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) +#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 +#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) +#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0 +#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT) +#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0 +#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT) +#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 +#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) +#define I40E_PFHMC_ERRORDATA 0x000C0500 +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 +#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) +#define I40E_PFHMC_ERRORINFO 0x000C0400 +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 +#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 +#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 +#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 +#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 +#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) +#define I40E_PFHMC_PDINV 0x000C0300 +#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) +#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 +#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT) +#define I40E_PFHMC_SDCMD 0x000C0000 +#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 +#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) +#define I40E_PFHMC_SDDATAHIGH 0x000C0200 +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 +#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) +#define I40E_PFHMC_SDDATALOW 0x000C0100 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 +#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) +#define I40E_GL_UFUSE 0x00094008 +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 +#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) +#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 +#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) +#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 +#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) +#define I40E_EMPINT_GPIO_ENA 0x00088188 +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 +#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 +#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) +#define I40E_PFINT_AEQCTL 0x00038700 +#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ +#define I40E_PFINT_CEQCTL_MAX_INDEX 511 +#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_PFINT_DYN_CTL0 0x00038480 +#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ +#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_PFINT_GPIO_ENA 0x00088080 +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 +#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 +#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 +#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 +#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 +#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 +#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 +#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 +#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 +#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 +#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 +#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 +#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 +#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 +#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 +#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 +#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 +#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 +#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 +#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 +#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 +#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 +#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 +#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 +#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 +#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 +#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 +#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 +#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 +#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 +#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) +#define I40E_PFINT_ICR0 0x00038780 +#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 +#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 +#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 +#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT) +#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 +#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT) +#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT) +#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT) +#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_SWINT_SHIFT 31 +#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT) +#define I40E_PFINT_ICR0_ENA 0x00038800 +#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16 +#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19 +#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20 +#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT) +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21 +#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT) +#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22 +#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT) +#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 +#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 +#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 +#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 +#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT) +#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29 +#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT) +#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ +#define I40E_PFINT_ITR0_MAX_INDEX 2 +#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT) +#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) +#define I40E_PFINT_ITRN_MAX_INDEX 2 +#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT) +#define I40E_PFINT_LNKLST0 0x00038500 +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ +#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_PFINT_RATE0 0x00038580 +#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT) +#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ +#define I40E_PFINT_RATEN_MAX_INDEX 511 +#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT) +#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_PFINT_STAT_CTL0 0x00038400 +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ +#define I40E_QINT_RQCTL_MAX_INDEX 1535 +#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT) +#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ +#define I40E_QINT_TQCTL_MAX_INDEX 1535 +#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 +#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) +#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 +#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT) +#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) +#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) +#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 +#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT) +#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 +#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ +#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 +#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFINT_ICR0_MAX_INDEX 127 +#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_SWINT_SHIFT 31 +#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT) +#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT) +#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ +#define I40E_VFINT_ITR0_MAX_INDEX 2 +#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) +#define I40E_VFINT_ITRN_MAX_INDEX 2 +#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VPINT_AEQCTL_MAX_INDEX 127 +#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ +#define I40E_VPINT_CEQCTL_MAX_INDEX 511 +#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 +#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 +#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 +#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 +#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 +#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) +#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 +#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) +#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 +#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT) +#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VPINT_LNKLST0_MAX_INDEX 127 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ +#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 +#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 +#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) +#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VPINT_RATE0_MAX_INDEX 127 +#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT) +#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT) +#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ +#define I40E_VPINT_RATEN_MAX_INDEX 511 +#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 +#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT) +#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 +#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT) +#define I40E_GL_RDPU_CNTRL 0x00051060 +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 +#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) +#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 +#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT) +#define I40E_GLLAN_RCTL_0 0x0012A500 +#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 +#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) +#define I40E_GLLAN_TSOMSK_F 0x000442D8 +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 +#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) +#define I40E_GLLAN_TSOMSK_L 0x000442E0 +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 +#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) +#define I40E_GLLAN_TSOMSK_M 0x000442DC +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 +#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) +#define I40E_PFLAN_QALLOC 0x001C0400 +#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 +#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) +#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 +#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_SHIFT 31 +#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ +#define I40E_QRX_ENA_MAX_INDEX 1535 +#define I40E_QRX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT) +#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT) +#define I40E_QRX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT) +#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ +#define I40E_QRX_TAIL_MAX_INDEX 1535 +#define I40E_QRX_TAIL_TAIL_SHIFT 0 +#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT) +#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ +#define I40E_QTX_CTL_MAX_INDEX 1535 +#define I40E_QTX_CTL_PFVF_Q_SHIFT 0 +#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT) +#define I40E_QTX_CTL_PF_INDX_SHIFT 2 +#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT) +#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 +#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT) +#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ +#define I40E_QTX_ENA_MAX_INDEX 1535 +#define I40E_QTX_ENA_QENA_REQ_SHIFT 0 +#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT) +#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 +#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT) +#define I40E_QTX_ENA_QENA_STAT_SHIFT 2 +#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT) +#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ +#define I40E_QTX_HEAD_MAX_INDEX 1535 +#define I40E_QTX_HEAD_HEAD_SHIFT 0 +#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT) +#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 +#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT) +#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ +#define I40E_QTX_TAIL_MAX_INDEX 1535 +#define I40E_QTX_TAIL_TAIL_SHIFT 0 +#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT) +#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VPLAN_MAPENA_MAX_INDEX 127 +#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 +#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) +#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ +#define I40E_VPLAN_QTABLE_MAX_INDEX 15 +#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 +#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT) +#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ +#define I40E_VSILAN_QBASE_MAX_INDEX 383 +#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 +#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT) +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 +#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) +#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) +#define I40E_VSILAN_QTABLE_MAX_INDEX 7 +#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 +#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) +#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 +#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) +#define I40E_PRTGL_SAH 0x001E2140 +#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 +#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT) +#define I40E_PRTGL_SAH_MFS_SHIFT 16 +#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT) +#define I40E_PRTGL_SAL 0x001E2120 +#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 +#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT) +#define I40E_PRTMAC_HLCTLA 0x001E4760 +#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0 +#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT) +#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1 +#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT) +#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2 +#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT) +#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4 +#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT) +#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7 +#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000 +#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 +#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) +#define I40E_PRTMAC_HSECTL1 0x001E3560 +#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0 +#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT) +#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3 +#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT) +#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4 +#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT) +#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7 +#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT) +#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30 +#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT) +#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31 +#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 +#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) +#define I40E_GL_MNG_FWSM 0x000B6134 +#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1 +#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT) +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6 +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 +#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 +#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) +#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 +#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 +#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) +#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25 +#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 +#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 +#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 +#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 +#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) +#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 +#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) +#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ +#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 +#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) +#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) +#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 +#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 +#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) +#define I40E_PRT_MNG_MANC 0x00256A20 +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 +#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 +#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 +#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) +#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 +#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 +#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 +#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 +#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) +#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 +#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) +#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 +#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 +#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT) +#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) +#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 +#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 +#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 +#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 +#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 +#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 +#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 +#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 +#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 +#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) +#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 +#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 +#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 +#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 +#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 +#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 +#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) +#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 +#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) +#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRT_MNG_METF_MAX_INDEX 3 +#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 +#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT) +#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 +#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT) +#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ +#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 +#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) +#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 +#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT) +#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 +#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT) +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 +#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) +#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 +#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) +#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ +#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 +#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 +#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) +#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 +#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT) +#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 +#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 +#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT) +#define I40E_PRT_MNG_MNGONLY 0x00256A60 +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 +#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) +#define I40E_PRT_MNG_MSFM 0x00256AA0 +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 +#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 +#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 +#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 +#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 +#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 +#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 +#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 +#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) +#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */ +#define I40E_MSIX_PBA_MAX_INDEX 5 +#define I40E_MSIX_PBA_PENBIT_SHIFT 0 +#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT) +#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ +#define I40E_MSIX_TADD_MAX_INDEX 128 +#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT) +#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ +#define I40E_MSIX_TMSG_MAX_INDEX 128 +#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ +#define I40E_MSIX_TUADD_MAX_INDEX 128 +#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ +#define I40E_MSIX_TVCTRL_MAX_INDEX 128 +#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */ +#define I40E_VFMSIX_PBA1_MAX_INDEX 19 +#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ +#define I40E_VFMSIX_TADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ +#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ +#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ +#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 +#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 +#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 +#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT) +#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 +#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT) +#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 +#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT) +#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 +#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT) +#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 +#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT) +#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 +#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT) +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT) +#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 +#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT) +#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 +#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT) +#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 +#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT) +#define I40E_GLNVM_FLASHID 0x000B6104 +#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 +#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT) +#define I40E_GLNVM_GENS 0x000B6100 +#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 +#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT) +#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 +#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT) +#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 +#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT) +#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 +#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT) +#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 +#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) +#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ +#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 +#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) +#define I40E_GLNVM_SRCTL 0x000B6110 +#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 +#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT) +#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 +#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT) +#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 +#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT) +#define I40E_GLNVM_SRCTL_START_SHIFT 30 +#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_SHIFT 31 +#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRDATA 0x000B6114 +#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 +#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT) +#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 +#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT) +#define I40E_GLNVM_ULD 0x000B6008 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 +#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 +#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) + +#define I40E_GLPCI_BYTCTH 0x0009C484 +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_BYTCTL 0x0009C488 +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 +#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) +#define I40E_GLPCI_CAPCTRL 0x000BE4A4 +#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 +#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) +#define I40E_GLPCI_CAPSUP 0x000BE4A8 +#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 +#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) +#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 +#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 +#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 +#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 +#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 +#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 +#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 +#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 +#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 +#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 +#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 +#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 +#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 +#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) +#define I40E_GLPCI_CNF 0x000BE4C0 +#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 +#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT) +#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 +#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) +#define I40E_GLPCI_CNF2 0x000BE494 +#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 +#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT) +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 +#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 +#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) +#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 +#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) +#define I40E_GLPCI_DREVID 0x0009C480 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) +#define I40E_GLPCI_GSCL_1 0x0009C48C +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 +#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 +#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 +#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 +#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) +#define I40E_GLPCI_GSCL_2 0x0009C490 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 +#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) +#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 +#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 +#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) +#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ +#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 +#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) +#define I40E_GLPCI_LATCT 0x0009C4B4 +#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 +#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) +#define I40E_GLPCI_LBARCTRL 0x000BE484 +#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 +#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) +#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 +#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT) +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 +#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 +#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 +#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) +#define I40E_GLPCI_LINKCAP 0x000BE4AC +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 +#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 +#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 +#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) +#define I40E_GLPCI_PCIERR 0x000BE4FC +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 +#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) +#define I40E_GLPCI_PCITEST2 0x000BE4BC +#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0 +#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT) +#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1 +#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT) + +#define I40E_GLPCI_PKTCT 0x0009C4BC +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 +#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) +#define I40E_GLPCI_PMSUP 0x000BE4B0 +#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 +#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 +#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 +#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 +#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 +#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) +#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 +#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) +#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 +#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) +#define I40E_GLPCI_PWRDATA 0x000BE490 +#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 +#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 +#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 +#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) +#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 +#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) +#define I40E_GLPCI_REVID 0x000BE4B4 +#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 +#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT) +#define I40E_GLPCI_SERH 0x000BE49C +#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 +#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT) +#define I40E_GLPCI_SERL 0x000BE498 +#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 +#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT) +#define I40E_GLPCI_SUBSYSID 0x000BE48C +#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0 +#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT) +#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16 +#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT) +#define I40E_GLPCI_UPADD 0x000BE4F8 +#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 +#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT) +#define I40E_GLPCI_VFSUP 0x000BE4B8 +#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 +#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 +#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_PF_FUNC_RID 0x0009C000 +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 +#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 +#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) +#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 +#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) +#define I40E_PF_PCI_CIAA 0x0009C080 +#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 +#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT) +#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 +#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT) +#define I40E_PF_PCI_CIAD 0x0009C100 +#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 +#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT) +#define I40E_PFPCI_CLASS 0x000BE400 +#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 +#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) +#define I40E_PFPCI_CNF 0x000BE000 +#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 +#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT) +#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 +#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT) +#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 +#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT) +#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 +#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT) +#define I40E_PFPCI_FACTPS 0x0009C180 +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 +#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 +#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) +#define I40E_PFPCI_FUNC 0x000BE200 +#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 +#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 +#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) +#define I40E_PFPCI_FUNC2 0x000BE180 +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 +#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) +#define I40E_PFPCI_ICAUSE 0x0009C200 +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 +#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) +#define I40E_PFPCI_IENA 0x0009C280 +#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 +#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) +#define I40E_PFPCI_PFDEVID 0x000BE080 +#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0 +#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT) +#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16 +#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT) +#define I40E_PFPCI_PM 0x000BE300 +#define I40E_PFPCI_PM_PME_EN_SHIFT 0 +#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT) +#define I40E_PFPCI_STATUS1 0x000BE280 +#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 +#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) +#define I40E_PFPCI_VFDEVID 0x000BE100 +#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0 +#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT) +#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16 +#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT) +#define I40E_PFPCI_VMINDEX 0x0009C300 +#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 +#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) +#define I40E_PFPCI_VMPEND 0x0009C380 +#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 +#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT) +#define I40E_GLPE_CPUSTATUS0 0x0000D040 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) +#define I40E_GLPE_CPUSTATUS1 0x0000D044 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) +#define I40E_GLPE_CPUSTATUS2 0x0000D048 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) +#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15 +#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 +#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_AEQALLOC 0x00131180 +#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_PFPE_CCQPHIGH 0x00008200 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_PFPE_CCQPLOW 0x00008180 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_PFPE_CCQPSTATUS 0x00008100 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_PFPE_CQACK 0x00131100 +#define I40E_PFPE_CQACK_PECQID_SHIFT 0 +#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT) +#define I40E_PFPE_CQARM 0x00131080 +#define I40E_PFPE_CQARM_PECQID_SHIFT 0 +#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT) +#define I40E_PFPE_CQPDB 0x00008000 +#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_PFPE_CQPERRCODES 0x00008880 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_PFPE_CQPTAIL 0x00008080 +#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_IPCONFIG0 0x00008280 +#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) + +#define I40E_PFPE_MRTEIDXMASK 0x00008600 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_PFPE_TCPNOWTIMER 0x00008580 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_PFPE_UDACTRL 0x00008700 +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_PFPE_UDAUCFBQPN 0x00008780 +#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 +#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) +#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 +#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_WQEALLOC 0x00138C00 +#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 +#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_CQACK_MAX_INDEX 127 +#define I40E_VFPE_CQACK_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT) +#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_CQARM_MAX_INDEX 127 +#define I40E_VFPE_CQARM_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT) +#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_CQPDB_MAX_INDEX 127 +#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 +#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 +#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) +#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 +#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) +#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) +#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) +#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) +#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) +#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) +#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) +#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) +#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) +#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) +#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) +#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) +#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) +#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) +#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) +#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) +#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) +#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) +#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) +#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) +#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) +#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) +#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) +#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) +#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008 +#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0 +#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4)) +#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4)) +#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4)) +#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4)) +#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4)) +#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4)) +#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4)) +#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4)) +#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4)) +#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4)) +#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4)) +#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4)) +#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4)) +#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4)) +#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4)) +#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4)) +#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) +#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */ +#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_PRTPM_EEE_STAT 0x001E4320 +#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 +#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 +#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 +#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) +#define I40E_PRTPM_EEEC 0x001E4380 +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 +#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 +#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) +#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 +#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) +#define I40E_PRTPM_EEEFWD 0x001E4400 +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 +#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) +#define I40E_PRTPM_EEER 0x001E4360 +#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 +#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) +#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 +#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) +#define I40E_PRTPM_EEETXC 0x001E43E0 +#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 +#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT) +#define I40E_PRTPM_GC 0x000B8140 +#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 +#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) +#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 +#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT) +#define I40E_PRTPM_GC_RATD_SHIFT 2 +#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT) +#define I40E_PRTPM_GC_LCDMP_SHIFT 3 +#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT) +#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 +#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) +#define I40E_PRTPM_RLPIC 0x001E43A0 +#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 +#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT) +#define I40E_PRTPM_TLPIC 0x001E43C0 +#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 +#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT) +#define I40E_GLRPB_DPSS 0x000AC828 +#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 +#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT) +#define I40E_GLRPB_GHW 0x000AC830 +#define I40E_GLRPB_GHW_GHW_SHIFT 0 +#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT) +#define I40E_GLRPB_GLW 0x000AC834 +#define I40E_GLRPB_GLW_GLW_SHIFT 0 +#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT) +#define I40E_GLRPB_PHW 0x000AC844 +#define I40E_GLRPB_PHW_PHW_SHIFT 0 +#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT) +#define I40E_GLRPB_PLW 0x000AC848 +#define I40E_GLRPB_PLW_PLW_SHIFT 0 +#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT) +#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTRPB_DHW_MAX_INDEX 7 +#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 +#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) +#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTRPB_DLW_MAX_INDEX 7 +#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 +#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) +#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTRPB_DPS_MAX_INDEX 7 +#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 +#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) +#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTRPB_SHT_MAX_INDEX 7 +#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 +#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) +#define I40E_PRTRPB_SHW 0x000AC580 +#define I40E_PRTRPB_SHW_SHW_SHIFT 0 +#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT) +#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ +#define I40E_PRTRPB_SLT_MAX_INDEX 7 +#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 +#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) +#define I40E_PRTRPB_SLW 0x000AC6A0 +#define I40E_PRTRPB_SLW_SLW_SHIFT 0 +#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT) +#define I40E_PRTRPB_SPS 0x000AC7C0 +#define I40E_PRTRPB_SPS_SPS_SHIFT 0 +#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT) +#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ +#define I40E_GLQF_APBVT_MAX_INDEX 2047 +#define I40E_GLQF_APBVT_APBVT_SHIFT 0 +#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT) +#define I40E_GLQF_CTL 0x00269BA4 +#define I40E_GLQF_CTL_HTOEP_SHIFT 1 +#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT) +#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 +#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) +#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 +#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) +#define I40E_GLQF_CTL_RSVD_SHIFT 7 +#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT) +#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 +#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 +#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT) +#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 +#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT) +#define I40E_GLQF_CTL_FDBEST_SHIFT 17 +#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT) +#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 +#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT) +#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 +#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT) +#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 +#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT) +#define I40E_GLQF_FDCNT_0 0x00269BAC +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 +#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) +#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13 +#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) +#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ +#define I40E_GLQF_HSYM_MAX_INDEX 63 +#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 +#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT) +#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ +#define I40E_GLQF_PCNT_MAX_INDEX 511 +#define I40E_GLQF_PCNT_PCNT_SHIFT 0 +#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT) +#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ +#define I40E_GLQF_SWAP_MAX_INDEX 1 +#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 +#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 +#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 +#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 +#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) +#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 +#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) +#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 +#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT) +#define I40E_PFQF_CTL_0 0x001C0AC0 +#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10 +#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14 +#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16 +#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) +#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17 +#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT) +#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18 +#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) +#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 +#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) +#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 +#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) +#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 +#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) +#define I40E_PFQF_CTL_1 0x00245D80 +#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 +#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) +#define I40E_PFQF_FDALLOC 0x00246280 +#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 +#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT) +#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 +#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT) +#define I40E_PFQF_FDSTAT 0x00246380 +#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 +#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) +#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 +#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) +#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ +#define I40E_PFQF_HENA_MAX_INDEX 1 +#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ +#define I40E_PFQF_HKEY_MAX_INDEX 12 +#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT) +#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT) +#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT) +#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT) +#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ +#define I40E_PFQF_HLUT_MAX_INDEX 127 +#define I40E_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT) +#define I40E_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT) +#define I40E_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT) +#define I40E_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ +#define I40E_PFQF_HREGION_MAX_INDEX 7 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT) +#define I40E_PRTQF_CTL_0 0x00256E60 +#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 +#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) +#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ +#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) +#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ +#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 +#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 +#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT) +#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT) +#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ +#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 +#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) +#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 +#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) +#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) +#define I40E_VFQF_HENA1_MAX_INDEX 1 +#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ +#define I40E_VFQF_HKEY1_MAX_INDEX 12 +#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT) +#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT) +#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT) +#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT) +#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ +#define I40E_VFQF_HLUT1_MAX_INDEX 15 +#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT) +#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT) +#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT) +#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT) +#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) +#define I40E_VFQF_HREGION1_MAX_INDEX 7 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT) +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT) +#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VPQF_CTL_MAX_INDEX 127 +#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 +#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT) +#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 +#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT) +#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 +#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT) +#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 +#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT) +#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ +#define I40E_VSIQF_CTL_MAX_INDEX 383 +#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 +#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT) +#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 +#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 +#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 +#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 +#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) +#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 +#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) +#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) +#define I40E_VSIQF_TCREGION_MAX_INDEX 3 +#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 +#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 +#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) +#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 +#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) +#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 +#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) +#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOECRC_MAX_INDEX 143 +#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 +#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT) +#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDDPC_MAX_INDEX 143 +#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 +#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) +/* _i=0...143 */ +#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 +#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) +#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDIFRC_MAX_INDEX 143 +#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0 +#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT) +#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 +#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) +#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDIXAC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0 +#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT) +#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 +#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) +#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 +#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) +#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 +#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) +#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 +#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) +#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 +#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) +#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 +#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) +#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOELAST_MAX_INDEX 143 +#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 +#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT) +#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEPRC_MAX_INDEX 143 +#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 +#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT) +#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOEPTC_MAX_INDEX 143 +#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 +#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT) +#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ +#define I40E_GL_FCOERPDC_MAX_INDEX 143 +#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 +#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT) +#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_BPRCH_MAX_INDEX 3 +#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_BPRCL_MAX_INDEX 3 +#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_BPTCH_MAX_INDEX 3 +#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_BPTCL_MAX_INDEX 3 +#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 +#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 +#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 +#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) +#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_GORCH_MAX_INDEX 3 +#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 +#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT) +#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_GORCL_MAX_INDEX 3 +#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 +#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT) +#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_GOTCH_MAX_INDEX 3 +#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT) +#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_GOTCL_MAX_INDEX 3 +#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT) +#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 +#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 +#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) +#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_LDPC_MAX_INDEX 3 +#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 +#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT) +#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) +#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 +#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) +#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 +#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) +#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 +#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) +#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_MLFC_MAX_INDEX 3 +#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 +#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT) +#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_MPRCH_MAX_INDEX 3 +#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT) +#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_MPRCL_MAX_INDEX 3 +#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT) +#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_MPTCH_MAX_INDEX 3 +#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT) +#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_MPTCL_MAX_INDEX 3 +#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT) +#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_MRFC_MAX_INDEX 3 +#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 +#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT) +#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 +#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) +#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 +#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) +#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC127H_MAX_INDEX 3 +#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 +#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT) +#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC127L_MAX_INDEX 3 +#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 +#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT) +#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC255H_MAX_INDEX 3 +#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 +#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) +#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC255L_MAX_INDEX 3 +#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 +#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT) +#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC511H_MAX_INDEX 3 +#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 +#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT) +#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC511L_MAX_INDEX 3 +#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 +#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT) +#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC64H_MAX_INDEX 3 +#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 +#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT) +#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC64L_MAX_INDEX 3 +#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 +#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT) +#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 +#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) +#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 +#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) +#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 +#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) +#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 +#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) +#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC127H_MAX_INDEX 3 +#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 +#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT) +#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC127L_MAX_INDEX 3 +#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 +#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT) +#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 +#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) +#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 +#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) +#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC255H_MAX_INDEX 3 +#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 +#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT) +#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC255L_MAX_INDEX 3 +#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 +#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT) +#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC511H_MAX_INDEX 3 +#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 +#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT) +#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC511L_MAX_INDEX 3 +#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 +#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT) +#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC64H_MAX_INDEX 3 +#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 +#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT) +#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC64L_MAX_INDEX 3 +#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 +#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT) +#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 +#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) +#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 +#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 +#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) +#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) +#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) +#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) +#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 +#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) +#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) +#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 +#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) +#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) +#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 +#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) +#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_RDPC_MAX_INDEX 3 +#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 +#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT) +#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_RFC_MAX_INDEX 3 +#define I40E_GLPRT_RFC_RFC_SHIFT 0 +#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT) +#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_RJC_MAX_INDEX 3 +#define I40E_GLPRT_RJC_RJC_SHIFT 0 +#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT) +#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_RLEC_MAX_INDEX 3 +#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 +#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT) +#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_ROC_MAX_INDEX 3 +#define I40E_GLPRT_ROC_ROC_SHIFT 0 +#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT) +#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_RUC_MAX_INDEX 3 +#define I40E_GLPRT_RUC_RUC_SHIFT 0 +#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT) +#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_RUPP_MAX_INDEX 3 +#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 +#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT) +#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) +#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 +#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) +#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_STDC_MAX_INDEX 3 +#define I40E_GLPRT_STDC_STDC_SHIFT 0 +#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT) +#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_TDOLD_MAX_INDEX 3 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 +#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) +#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_TDPC_MAX_INDEX 3 +#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 +#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT) +#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_UPRCH_MAX_INDEX 3 +#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_UPRCL_MAX_INDEX 3 +#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT) +#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_UPTCH_MAX_INDEX 3 +#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT) +#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ +#define I40E_GLPRT_UPTCL_MAX_INDEX 3 +#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 +#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT) +#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_BPRCH_MAX_INDEX 15 +#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT) +#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_BPRCL_MAX_INDEX 15 +#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT) +#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_BPTCH_MAX_INDEX 15 +#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT) +#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_BPTCL_MAX_INDEX 15 +#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT) +#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_GORCH_MAX_INDEX 15 +#define I40E_GLSW_GORCH_GORCH_SHIFT 0 +#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT) +#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_GORCL_MAX_INDEX 15 +#define I40E_GLSW_GORCL_GORCL_SHIFT 0 +#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT) +#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_GOTCH_MAX_INDEX 15 +#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT) +#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_GOTCL_MAX_INDEX 15 +#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT) +#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_MPRCH_MAX_INDEX 15 +#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT) +#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_MPRCL_MAX_INDEX 15 +#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT) +#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_MPTCH_MAX_INDEX 15 +#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT) +#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_MPTCL_MAX_INDEX 15 +#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT) +#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_RUPP_MAX_INDEX 15 +#define I40E_GLSW_RUPP_RUPP_SHIFT 0 +#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT) +#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_TDPC_MAX_INDEX 15 +#define I40E_GLSW_TDPC_TDPC_SHIFT 0 +#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT) +#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_UPRCH_MAX_INDEX 15 +#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT) +#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_UPRCL_MAX_INDEX 15 +#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT) +#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_UPTCH_MAX_INDEX 15 +#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 +#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT) +#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ +#define I40E_GLSW_UPTCL_MAX_INDEX 15 +#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT) +#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_BPRCH_MAX_INDEX 383 +#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT) +#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_BPRCL_MAX_INDEX 383 +#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT) +#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_BPTCH_MAX_INDEX 383 +#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT) +#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_BPTCL_MAX_INDEX 383 +#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT) +#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_GORCH_MAX_INDEX 383 +#define I40E_GLV_GORCH_GORCH_SHIFT 0 +#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT) +#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_GORCL_MAX_INDEX 383 +#define I40E_GLV_GORCL_GORCL_SHIFT 0 +#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT) +#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_GOTCH_MAX_INDEX 383 +#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 +#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT) +#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_GOTCL_MAX_INDEX 383 +#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 +#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT) +#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_MPRCH_MAX_INDEX 383 +#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 +#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT) +#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_MPRCL_MAX_INDEX 383 +#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 +#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT) +#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_MPTCH_MAX_INDEX 383 +#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 +#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT) +#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_MPTCL_MAX_INDEX 383 +#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 +#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT) +#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_RDPC_MAX_INDEX 383 +#define I40E_GLV_RDPC_RDPC_SHIFT 0 +#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT) +#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_RUPP_MAX_INDEX 383 +#define I40E_GLV_RUPP_RUPP_SHIFT 0 +#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT) +#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */ +#define I40E_GLV_TEPC_MAX_INDEX 383 +#define I40E_GLV_TEPC_TEPC_SHIFT 0 +#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT) +#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_UPRCH_MAX_INDEX 383 +#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 +#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT) +#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_UPRCL_MAX_INDEX 383 +#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 +#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT) +#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_UPTCH_MAX_INDEX 383 +#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 +#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT) +#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ +#define I40E_GLV_UPTCL_MAX_INDEX 383 +#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 +#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT) +#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ +#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ +#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ +#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ +#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT) +#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ +#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 +#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT) +#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ +#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 +#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT) +#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ +#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 +#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT) +#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ +#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 +#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 +#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT) +#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 +#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) +#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 +#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) +#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 +#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) +#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 +#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 +#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) +#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 +#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) +#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 +#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) +#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 +#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) +#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ +#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 +#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 +#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) +#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 +#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) +#define I40E_GL_MTG_FLU_MSK_L 0x00269F44 +#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0 +#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT) +#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */ +#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) +#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 +#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) +#define I40E_PRT_MSCCNT 0x00256BA0 +#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0 +#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT) +#define I40E_PRT_SCSTS 0x00256C20 +#define I40E_PRT_SCSTS_BSCA_SHIFT 0 +#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT) +#define I40E_PRT_SCSTS_BSCAP_SHIFT 1 +#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT) +#define I40E_PRT_SCSTS_MSCA_SHIFT 2 +#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT) +#define I40E_PRT_SCSTS_MSCAP_SHIFT 3 +#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT) +#define I40E_PRT_SWT_BSCCNT 0x00256C60 +#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0 +#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT) +#define I40E_PRTTSYN_ADJ 0x001E4280 +#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 +#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) +#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 +#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT) +#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ +#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 +#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 +#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 +#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 +#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) +#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 +#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) +#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ +#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 +#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 +#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 +#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) +#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ +#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 +#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) +#define I40E_PRTTSYN_CTL0 0x001E4200 +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 +#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 +#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 +#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 +#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 +#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) +#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) +#define I40E_PRTTSYN_CTL1 0x00085020 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 +#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 +#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 +#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 +#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 +#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) +#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ +#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 +#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) +#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ +#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 +#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) +#define I40E_PRTTSYN_INC_H 0x001E4060 +#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 +#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) +#define I40E_PRTTSYN_INC_L 0x001E4040 +#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 +#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) +#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) +#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) +#define I40E_PRTTSYN_STAT_0 0x001E4220 +#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 +#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT) +#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 +#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT) +#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 +#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) +#define I40E_PRTTSYN_STAT_1 0x00085140 +#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 +#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 +#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT) +#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 +#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT) +#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ +#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 +#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) +#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ +#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 +#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) +#define I40E_PRTTSYN_TIME_H 0x001E4120 +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 +#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) +#define I40E_PRTTSYN_TIME_L 0x001E4100 +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 +#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) +#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) +#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 +#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) +#define I40E_GLSCD_QUANTA 0x000B2080 +#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 +#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) +#define I40E_GL_MDET_RX 0x0012A510 +#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 +#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT) +#define I40E_GL_MDET_RX_EVENT_SHIFT 8 +#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT) +#define I40E_GL_MDET_RX_QUEUE_SHIFT 17 +#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT) +#define I40E_GL_MDET_RX_VALID_SHIFT 31 +#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT) +#define I40E_GL_MDET_TX 0x000E6480 +#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0 +#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT) +#define I40E_GL_MDET_TX_EVENT_SHIFT 8 +#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT) +#define I40E_GL_MDET_TX_QUEUE_SHIFT 17 +#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT) +#define I40E_GL_MDET_TX_VALID_SHIFT 31 +#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT) +#define I40E_PF_MDET_RX 0x0012A400 +#define I40E_PF_MDET_RX_VALID_SHIFT 0 +#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT) +#define I40E_PF_MDET_TX 0x000E6400 +#define I40E_PF_MDET_TX_VALID_SHIFT 0 +#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC 0x001C0500 +#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0 +#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 +#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 +#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VP_MDET_RX_MAX_INDEX 127 +#define I40E_VP_MDET_RX_VALID_SHIFT 0 +#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT) +#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_VP_MDET_TX_MAX_INDEX 127 +#define I40E_VP_MDET_TX_VALID_SHIFT 0 +#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT) +#define I40E_GLPM_WUMC 0x0006C800 +#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 +#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT) +#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 +#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) +#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 +#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT) +#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 +#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT) +#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 +#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) +#define I40E_PFPM_APM 0x000B8080 +#define I40E_PFPM_APM_APME_SHIFT 0 +#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT) +#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ +#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 +#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) +#define I40E_PFPM_WUC 0x0006B200 +#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 +#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT) +#define I40E_PFPM_WUFC 0x0006B400 +#define I40E_PFPM_WUFC_LNKC_SHIFT 0 +#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT) +#define I40E_PFPM_WUFC_MAG_SHIFT 1 +#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT) +#define I40E_PFPM_WUFC_MNG_SHIFT 3 +#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT) +#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 +#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 +#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 +#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 +#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 +#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 +#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 +#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 +#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT) +#define I40E_PFPM_WUFC_FLX0_SHIFT 16 +#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT) +#define I40E_PFPM_WUFC_FLX1_SHIFT 17 +#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT) +#define I40E_PFPM_WUFC_FLX2_SHIFT 18 +#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT) +#define I40E_PFPM_WUFC_FLX3_SHIFT 19 +#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT) +#define I40E_PFPM_WUFC_FLX4_SHIFT 20 +#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT) +#define I40E_PFPM_WUFC_FLX5_SHIFT 21 +#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT) +#define I40E_PFPM_WUFC_FLX6_SHIFT 22 +#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT) +#define I40E_PFPM_WUFC_FLX7_SHIFT 23 +#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT) +#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT) +#define I40E_PFPM_WUS 0x0006B600 +#define I40E_PFPM_WUS_LNKC_SHIFT 0 +#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT) +#define I40E_PFPM_WUS_MAG_SHIFT 1 +#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT) +#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 +#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT) +#define I40E_PFPM_WUS_MNG_SHIFT 3 +#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT) +#define I40E_PFPM_WUS_FLX0_SHIFT 16 +#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT) +#define I40E_PFPM_WUS_FLX1_SHIFT 17 +#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT) +#define I40E_PFPM_WUS_FLX2_SHIFT 18 +#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT) +#define I40E_PFPM_WUS_FLX3_SHIFT 19 +#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT) +#define I40E_PFPM_WUS_FLX4_SHIFT 20 +#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT) +#define I40E_PFPM_WUS_FLX5_SHIFT 21 +#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT) +#define I40E_PFPM_WUS_FLX6_SHIFT 22 +#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT) +#define I40E_PFPM_WUS_FLX7_SHIFT 23 +#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT) +#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 +#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT) +#define I40E_PRTPM_FHFHR 0x0006C000 +#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 +#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT) +#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 +#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT) +#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRTPM_SAH_MAX_INDEX 3 +#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 +#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT) +#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 +#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT) +#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 +#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) +#define I40E_PRTPM_SAH_AV_SHIFT 31 +#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT) +#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ +#define I40E_PRTPM_SAL_MAX_INDEX 3 +#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 +#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT) +#define I40E_VF_ARQBAH1 0x00006000 +#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 +#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT) +#define I40E_VF_ARQBAL1 0x00006C00 +#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 +#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT) +#define I40E_VF_ARQH1 0x00007400 +#define I40E_VF_ARQH1_ARQH_SHIFT 0 +#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT) +#define I40E_VF_ARQLEN1 0x00008000 +#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 +#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT) +#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQT1 0x00007000 +#define I40E_VF_ARQT1_ARQT_SHIFT 0 +#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT) +#define I40E_VF_ATQBAH1 0x00007800 +#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 +#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT) +#define I40E_VF_ATQBAL1 0x00007C00 +#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 +#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT) +#define I40E_VF_ATQH1 0x00006400 +#define I40E_VF_ATQH1_ATQH_SHIFT 0 +#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT) +#define I40E_VF_ATQLEN1 0x00006800 +#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 +#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT) +#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQT1 0x00008400 +#define I40E_VF_ATQT1_ATQT_SHIFT 0 +#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT) +#define I40E_VFGEN_RSTAT 0x00008800 +#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define I40E_VFINT_DYN_CTL01 0x00005C00 +#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) +#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 +#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 +#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 +#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) +#define I40E_VFINT_ICR0_ENA1 0x00005000 +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) +#define I40E_VFINT_ICR01 0x00004800 +#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 +#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 +#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 +#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 +#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT) +#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 +#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT) +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 +#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) +#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT) +#define I40E_VFINT_ICR01_SWINT_SHIFT 31 +#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT) +#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ +#define I40E_VFINT_ITR01_MAX_INDEX 2 +#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT) +#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) +#define I40E_VFINT_ITRN1_MAX_INDEX 2 +#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 +#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT) +#define I40E_VFINT_STAT_CTL01 0x00005400 +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 +#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) +#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ +#define I40E_QRX_TAIL1_MAX_INDEX 15 +#define I40E_QRX_TAIL1_TAIL_SHIFT 0 +#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT) +#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ +#define I40E_QTX_TAIL1_MAX_INDEX 15 +#define I40E_QTX_TAIL1_TAIL_SHIFT 0 +#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT) +#define I40E_VFMSIX_PBA 0x00002000 +#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 +#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT) +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TADD_MAX_INDEX 16 +#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 +#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) +#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 +#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT) +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TMSG_MAX_INDEX 16 +#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 +#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TUADD_MAX_INDEX 16 +#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 +#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 +#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 +#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT) +#define I40E_VFCM_PE_ERRDATA 0x0000DC00 +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_VFCM_PE_ERRINFO 0x0000D800 +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ +#define I40E_VFQF_HENA_MAX_INDEX 1 +#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 +#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT) +#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ +#define I40E_VFQF_HKEY_MAX_INDEX 12 +#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT) +#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT) +#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT) +#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT) +#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ +#define I40E_VFQF_HLUT_MAX_INDEX 15 +#define I40E_VFQF_HLUT_LUT0_SHIFT 0 +#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT) +#define I40E_VFQF_HLUT_LUT1_SHIFT 8 +#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT) +#define I40E_VFQF_HLUT_LUT2_SHIFT 16 +#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT) +#define I40E_VFQF_HLUT_LUT3_SHIFT 24 +#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT) +#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ +#define I40E_VFQF_HREGION_MAX_INDEX 7 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT) +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT) +#endif --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_status.h @@ -0,0 +1,97 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_STATUS_H_ +#define _I40E_STATUS_H_ + +/* Error Codes */ +enum i40e_status_code { + I40E_SUCCESS = 0, + I40E_ERR_NVM = -1, + I40E_ERR_NVM_CHECKSUM = -2, + I40E_ERR_PHY = -3, + I40E_ERR_CONFIG = -4, + I40E_ERR_PARAM = -5, + I40E_ERR_MAC_TYPE = -6, + I40E_ERR_UNKNOWN_PHY = -7, + I40E_ERR_LINK_SETUP = -8, + I40E_ERR_ADAPTER_STOPPED = -9, + I40E_ERR_INVALID_MAC_ADDR = -10, + I40E_ERR_DEVICE_NOT_SUPPORTED = -11, + I40E_ERR_MASTER_REQUESTS_PENDING = -12, + I40E_ERR_INVALID_LINK_SETTINGS = -13, + I40E_ERR_AUTONEG_NOT_COMPLETE = -14, + I40E_ERR_RESET_FAILED = -15, + I40E_ERR_SWFW_SYNC = -16, + I40E_ERR_NO_AVAILABLE_VSI = -17, + I40E_ERR_NO_MEMORY = -18, + I40E_ERR_BAD_PTR = -19, + I40E_ERR_RING_FULL = -20, + I40E_ERR_INVALID_PD_ID = -21, + I40E_ERR_INVALID_QP_ID = -22, + I40E_ERR_INVALID_CQ_ID = -23, + I40E_ERR_INVALID_CEQ_ID = -24, + I40E_ERR_INVALID_AEQ_ID = -25, + I40E_ERR_INVALID_SIZE = -26, + I40E_ERR_INVALID_ARP_INDEX = -27, + I40E_ERR_INVALID_FPM_FUNC_ID = -28, + I40E_ERR_QP_INVALID_MSG_SIZE = -29, + I40E_ERR_QP_TOOMANY_WRS_POSTED = -30, + I40E_ERR_INVALID_FRAG_COUNT = -31, + I40E_ERR_QUEUE_EMPTY = -32, + I40E_ERR_INVALID_ALIGNMENT = -33, + I40E_ERR_FLUSHED_QUEUE = -34, + I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35, + I40E_ERR_INVALID_IMM_DATA_SIZE = -36, + I40E_ERR_TIMEOUT = -37, + I40E_ERR_OPCODE_MISMATCH = -38, + I40E_ERR_CQP_COMPL_ERROR = -39, + I40E_ERR_INVALID_VF_ID = -40, + I40E_ERR_INVALID_HMCFN_ID = -41, + I40E_ERR_BACKING_PAGE_ERROR = -42, + I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + I40E_ERR_INVALID_PBLE_INDEX = -44, + I40E_ERR_INVALID_SD_INDEX = -45, + I40E_ERR_INVALID_PAGE_DESC_INDEX = -46, + I40E_ERR_INVALID_SD_TYPE = -47, + I40E_ERR_MEMCPY_FAILED = -48, + I40E_ERR_INVALID_HMC_OBJ_INDEX = -49, + I40E_ERR_INVALID_HMC_OBJ_COUNT = -50, + I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51, + I40E_ERR_SRQ_ENABLED = -52, + I40E_ERR_ADMIN_QUEUE_ERROR = -53, + I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54, + I40E_ERR_BUF_TOO_SHORT = -55, + I40E_ERR_ADMIN_QUEUE_FULL = -56, + I40E_ERR_ADMIN_QUEUE_NO_WORK = -57, + I40E_ERR_BAD_IWARP_CQE = -58, + I40E_ERR_NVM_BLANK_MODE = -59, + I40E_ERR_NOT_IMPLEMENTED = -60, + I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61, + I40E_ERR_DIAG_TEST_FAILED = -62, + I40E_ERR_NOT_READY = -63, + I40E_NOT_SUPPORTED = -64, + I40E_ERR_FIRMWARE_API_VERSION = -65, +}; + +#endif /* _I40E_STATUS_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -0,0 +1,1633 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include + +#include "i40evf.h" +#include "i40e_prototype.h" + +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) + +/** + * i40e_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, + struct i40e_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +/** + * i40evf_clean_tx_ring - Free any empty Tx buffers + * @tx_ring: ring to be cleaned + **/ +void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_bi) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]); + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index)); +} + +/** + * i40evf_free_tx_resources - Free Tx resources per queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void i40evf_free_tx_resources(struct i40e_ring *tx_ring) +{ + i40evf_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +/** + * i40e_get_tx_pending - how many tx descriptors not processed + * @tx_ring: the ring of descriptors + * + * Since there is no access to the ring head register + * in XL710, we need to use our local copies + **/ +static u32 i40e_get_tx_pending(struct i40e_ring *ring) +{ + u32 ntu = ((ring->next_to_clean <= ring->next_to_use) + ? ring->next_to_use + : ring->next_to_use + ring->count); + return ntu - ring->next_to_clean; +} + +/** + * i40e_check_tx_hang - Is there a hang in the Tx queue + * @tx_ring: the ring of descriptors + **/ +static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) +{ + u32 tx_pending = i40e_get_tx_pending(tx_ring); + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * PFC clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && + tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, + &tx_ring->state); + } else { + /* update completed stats and disarm the hang check */ + tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; + clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); + } + + return ret; +} + +/** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** + * i40e_clean_tx_irq - Reclaim resources after transmit completes + * @tx_ring: tx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +{ + u16 i = tx_ring->next_to_clean; + struct i40e_tx_buffer *tx_buf; + struct i40e_tx_desc *tx_head; + struct i40e_tx_desc *tx_desc; + unsigned int total_packets = 0; + unsigned int total_bytes = 0; + + tx_buf = &tx_ring->tx_bi[i]; + tx_desc = I40E_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); + + do { + struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + read_barrier_depends(); + + /* we have caught up to head, no work left to do */ + if (tx_head == tx_desc) + break; + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb */ + dev_kfree_skb_any(tx_buf->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buf = tx_ring->tx_bi; + tx_desc = I40E_TX_DESC(tx_ring, 0); + } + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; + + if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" + " VSI <%d>\n" + " Tx Queue <%d>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n", + tx_ring->vsi->seid, + tx_ring->queue_index, + tx_ring->next_to_use, i); + dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->tx_bi[i].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + dev_info(tx_ring->dev, + "tx hang detected on queue %d, resetting adapter\n", + tx_ring->queue_index); + + tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } + + netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return budget > 0; +} + +/** + * i40e_set_new_dynamic_itr - Find new ITR level + * @rc: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte counts during + * the last interrupt. The advantage of per interrupt computation + * is faster updates and more accurate ITR for the current traffic + * pattern. Constants in this function were computed based on + * theoretical maximum wire speed and thresholds were set based on + * testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) +{ + enum i40e_latency_range new_latency_range = rc->latency_range; + u32 new_itr = rc->itr; + int bytes_per_int; + + if (rc->total_packets == 0 || !rc->itr) + return; + + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (8000 ints/s) + */ + bytes_per_int = rc->total_bytes / rc->itr; + switch (rc->itr) { + case I40E_LOWEST_LATENCY: + if (bytes_per_int > 10) + new_latency_range = I40E_LOW_LATENCY; + break; + case I40E_LOW_LATENCY: + if (bytes_per_int > 20) + new_latency_range = I40E_BULK_LATENCY; + else if (bytes_per_int <= 10) + new_latency_range = I40E_LOWEST_LATENCY; + break; + case I40E_BULK_LATENCY: + if (bytes_per_int <= 20) + rc->latency_range = I40E_LOW_LATENCY; + break; + } + + switch (new_latency_range) { + case I40E_LOWEST_LATENCY: + new_itr = I40E_ITR_100K; + break; + case I40E_LOW_LATENCY: + new_itr = I40E_ITR_20K; + break; + case I40E_BULK_LATENCY: + new_itr = I40E_ITR_8K; + break; + default: + break; + } + + if (new_itr != rc->itr) { + /* do an exponential smoothing */ + new_itr = (10 * new_itr * rc->itr) / + ((9 * new_itr) + rc->itr); + rc->itr = new_itr & I40E_MAX_ITR; + } + + rc->total_bytes = 0; + rc->total_packets = 0; +} + +/** + * i40e_update_dynamic_itr - Adjust ITR based on bytes per int + * @q_vector: the vector to adjust + **/ +static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector) +{ + u16 vector = q_vector->vsi->base_vector + q_vector->v_idx; + struct i40e_hw *hw = &q_vector->vsi->back->hw; + u32 reg_addr; + u16 old_itr; + + reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1); + old_itr = q_vector->rx.itr; + i40e_set_new_dynamic_itr(&q_vector->rx); + if (old_itr != q_vector->rx.itr) + wr32(hw, reg_addr, q_vector->rx.itr); + + reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1); + old_itr = q_vector->tx.itr; + i40e_set_new_dynamic_itr(&q_vector->tx); + if (old_itr != q_vector->tx.itr) + wr32(hw, reg_addr, q_vector->tx.itr); +} + +/** + * i40evf_setup_tx_descriptors - Allocate the Tx descriptors + * @tx_ring: the tx ring to set up + * + * Return 0 on success, negative on error + **/ +int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; + tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_bi) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); + /* add u32 for head writeback, align after this takes care of + * guaranteeing this is at least one cache line in size + */ + tx_ring->size += sizeof(u32); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + kfree(tx_ring->tx_bi); + tx_ring->tx_bi = NULL; + return -ENOMEM; +} + +/** + * i40evf_clean_rx_ring - Free Rx buffers + * @rx_ring: ring to be cleaned + **/ +void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_bi) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->dma) { + dma_unmap_single(dev, + rx_bi->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + } + if (rx_bi->skb) { + dev_kfree_skb(rx_bi->skb); + rx_bi->skb = NULL; + } + if (rx_bi->page) { + if (rx_bi->page_dma) { + dma_unmap_page(dev, + rx_bi->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_bi->page_dma = 0; + } + __free_page(rx_bi->page); + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + } + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + memset(rx_ring->rx_bi, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * i40evf_free_rx_resources - Free Rx resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void i40evf_free_rx_resources(struct i40e_ring *rx_ring) +{ + i40evf_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +/** + * i40evf_setup_rx_descriptors - Allocate Rx descriptors + * @rx_ring: Rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int bi_size; + + bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; + rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_bi) + goto err; + + /* Round up to nearest 4K */ + rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) + ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) + : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + rx_ring->size); + goto err; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + return -ENOMEM; +} + +/** + * i40e_release_rx_desc - Store the new tail and head values + * @rx_ring: ring to bump + * @val: new head index + **/ +static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(val, rx_ring->tail); +} + +/** + * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + struct sk_buff *skb; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + skb = bi->skb; + + if (!skb) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + goto no_buffers; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + bi->skb = skb; + } + + if (!bi->dma) { + bi->dma = dma_map_single(rx_ring->dev, + skb->data, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, bi->dma)) { + rx_ring->rx_stats.alloc_buff_failed++; + bi->dma = 0; + goto no_buffers; + } + } + + if (ring_is_ps_enabled(rx_ring)) { + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + } else { + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; + } + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; + struct i40e_vsi *vsi = rx_ring->vsi; + u64 flags = vsi->back->flags; + + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + if (flags & I40E_FLAG_IN_NETPOLL) + netif_rx(skb); + else + napi_gro_receive(&q_vector->napi, skb); +} + +/** + * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum + * @vsi: the VSI we care about + * @skb: skb currently being received and modified + * @rx_status: status value of last descriptor in packet + * @rx_error: error value of last descriptor in packet + * @rx_ptype: ptype value of last descriptor in packet + **/ +static inline void i40e_rx_checksum(struct i40e_vsi *vsi, + struct sk_buff *skb, + u32 rx_status, + u32 rx_error, + u16 rx_ptype) +{ + bool ipv4_tunnel, ipv6_tunnel; + __wsum rx_udp_csum; + __sum16 csum; + struct iphdr *iph; + + ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + + skb->encapsulation = ipv4_tunnel || ipv6_tunnel; + skb->ip_summed = CHECKSUM_NONE; + + /* Rx csum enabled and ip headers found? */ + if (!(vsi->netdev->features & NETIF_F_RXCSUM && + rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) + return; + + /* likely incorrect csum if alternate IP extension headers found */ + if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) + return; + + /* IP or L4 or outmost IP checksum error */ + if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | + (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) | + (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) { + vsi->back->hw_csum_rx_error++; + return; + } + + if (ipv4_tunnel && + !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { + /* If VXLAN traffic has an outer UDPv4 checksum we need to check + * it in the driver, hardware does not do it for us. + * Since L3L4P bit was set we assume a valid IHL value (>=5) + * so the total length of IPv4 header is IHL*4 bytes + */ + skb->transport_header = skb->mac_header + + sizeof(struct ethhdr) + + (ip_hdr(skb)->ihl * 4); + + /* Add 4 bytes for VLAN tagged packets */ + skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + ? VLAN_HLEN : 0; + + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic( + iph->saddr, iph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); + + if (udp_hdr(skb)->check != csum) { + vsi->back->hw_csum_rx_error++; + return; + } + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/** + * i40e_rx_hash - returns the hash value from the Rx descriptor + * @ring: descriptor ring + * @rx_desc: specific descriptor + **/ +static inline u32 i40e_rx_hash(struct i40e_ring *ring, + union i40e_rx_desc *rx_desc) +{ + const __le64 rss_mask = + cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + + if ((ring->netdev->features & NETIF_F_RXHASH) && + (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) + return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); + else + return 0; +} + +/** + * i40e_ptype_to_hash - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +{ + struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + + if (!decoded.known) + return PKT_HASH_TYPE_NONE; + + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) + return PKT_HASH_TYPE_L4; + else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && + decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) + return PKT_HASH_TYPE_L3; + else + return PKT_HASH_TYPE_L2; +} + +/** + * i40e_clean_rx_irq - Reclaim resources after receive completes + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished) + **/ +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + const int current_node = numa_node_id(); + struct i40e_vsi *vsi = rx_ring->vsi; + u16 i = rx_ring->next_to_clean; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u8 rx_ptype; + u64 qword; + + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { + union i40e_rx_desc *next_rxd; + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> + I40E_RXD_QW1_LENGTH_HBUF_SHIFT; + rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> + I40E_RXD_QW1_LENGTH_SPH_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * STATUS_DD bit is set + */ + rmb(); + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + if (rx_bi->dma) { + u16 len; + + if (rx_hbo) + len = I40E_RX_HDR_SIZE; + else if (rx_sph) + len = rx_header_len; + else if (rx_packet_len) + len = rx_packet_len; /* 1buf/no split found */ + else + len = rx_header_len; /* split always mode */ + + skb_put(skb, len); + dma_unmap_single(rx_ring->dev, + rx_bi->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + } + + /* Get the rest of the data if this was a header split */ + if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset, + rx_packet_len); + + skb->len += rx_packet_len; + skb->data_len += rx_packet_len; + skb->truesize += rx_packet_len; + + if ((page_count(rx_bi->page) == 1) && + (page_to_nid(rx_bi->page) == current_node)) + get_page(rx_bi->page); + else + rx_bi->page = NULL; + + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + rx_bi->page_dma = 0; + } + I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + struct i40e_rx_buffer *next_buffer; + + next_buffer = &rx_ring->rx_bi[i]; + + if (ring_is_ps_enabled(rx_ring)) { + rx_bi->skb = next_buffer->skb; + rx_bi->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + } + rx_ring->rx_stats.non_eop_descs++; + goto next_desc; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + goto next_desc; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + budget--; +next_desc: + rx_desc->wb.qword1.status_error_len = 0; + if (!budget) + break; + + cleaned_count++; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + } + + rx_ring->next_to_clean = i; + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + if (cleaned_count) + i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + + return budget > 0; +} + +/** + * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int i40evf_napi_poll(struct napi_struct *napi, int budget) +{ + struct i40e_q_vector *q_vector = + container_of(napi, struct i40e_q_vector, napi); + struct i40e_vsi *vsi = q_vector->vsi; + struct i40e_ring *ring; + bool clean_complete = true; + int budget_per_ring; + + if (test_bit(__I40E_DOWN, &vsi->state)) { + napi_complete(napi); + return 0; + } + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + i40e_for_each_ring(ring, q_vector->tx) + clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + + /* We attempt to distribute budget to each Rx queue fairly, but don't + * allow the budget to go below 1 because that would exit polling early. + */ + budget_per_ring = max(budget/q_vector->num_ringpairs, 1); + + i40e_for_each_ring(ring, q_vector->rx) + clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) + return budget; + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete(napi); + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) || + ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + i40e_update_dynamic_itr(q_vector); + + if (!test_bit(__I40E_DOWN, &vsi->state)) + i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx); + + return 0; +} + +/** + * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * @skb: send buffer + * @tx_ring: ring to send buffer on + * @flags: the tx flags to be set + * + * Checks the skb and set up correspondingly several generic transmit flags + * related to VLAN tagging for the HW, such as VLAN, DCB, etc. + * + * Returns error code indicate the frame should be dropped upon error and the + * otherwise returns 0 to indicate the flags has been set properly. + **/ +static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) +{ + __be16 protocol = skb->protocol; + u32 tx_flags = 0; + + /* if we have a HW VLAN tag being added, default to the HW one */ + if (vlan_tx_tag_present(skb)) { + tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN, check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + return -EINVAL; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; + tx_flags |= I40E_TX_FLAGS_SW_VLAN; + } + + *flags = tx_flags; + return 0; +} + +/** + * i40e_tso - set up the tso context descriptor + * @tx_ring: ptr to the ring to send + * @skb: ptr to the skb we're sending + * @tx_flags: the collected send information + * @protocol: the send protocol + * @hdr_len: ptr to the size of the packet header + * @cd_tunneling: ptr to context descriptor bits + * + * Returns 0 if no TSO can happen, 1 if tso is going, or error + **/ +static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol, u8 *hdr_len, + u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) +{ + u32 cd_cmd, cd_tso_len, cd_mss; + struct ipv6hdr *ipv6h; + struct tcphdr *tcph; + struct iphdr *iph; + u32 l4len; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (protocol == htons(ETH_P_IP)) { + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + } else if (skb_is_gso_v6(skb)) { + + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) + : ipv6_hdr(skb); + tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + ipv6h->payload_len = 0; + tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, + 0, IPPROTO_TCP, 0); + } + + l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); + *hdr_len = (skb->encapsulation + ? (skb_inner_transport_header(skb) - skb->data) + : skb_transport_offset(skb)) + l4len; + + /* find the field values */ + cd_cmd = I40E_TX_CTX_DESC_TSO; + cd_tso_len = skb->len - *hdr_len; + cd_mss = skb_shinfo(skb)->gso_size; + *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + ((u64)cd_tso_len << + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + return 1; +} + +/** + * i40e_tx_enable_csum - Enable Tx checksum offloads + * @skb: send buffer + * @tx_flags: Tx flags currently set + * @td_cmd: Tx descriptor command bits to set + * @td_offset: Tx descriptor header offsets to set + * @cd_tunneling: ptr to context desc bits + **/ +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) +{ + struct ipv6hdr *this_ipv6_hdr; + unsigned int this_tcp_hdrlen; + struct iphdr *this_ip_hdr; + u32 network_hdr_len; + u8 l4_hdr = 0; + + if (skb->encapsulation) { + network_hdr_len = skb_inner_network_header_len(skb); + this_ip_hdr = inner_ip_hdr(skb); + this_ipv6_hdr = inner_ipv6_hdr(skb); + this_tcp_hdrlen = inner_tcp_hdrlen(skb); + + if (tx_flags & I40E_TX_FLAGS_IPV4) { + + if (tx_flags & I40E_TX_FLAGS_TSO) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + ip_hdr(skb)->check = 0; + } else { + *cd_tunneling |= + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + } + } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + if (tx_flags & I40E_TX_FLAGS_TSO) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + ip_hdr(skb)->check = 0; + } else { + *cd_tunneling |= + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + } + } + + /* Now set the ctx descriptor fields */ + *cd_tunneling |= (skb_network_header_len(skb) >> 2) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + I40E_TXD_CTX_UDP_TUNNELING | + ((skb_inner_network_offset(skb) - + skb_transport_offset(skb)) >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + } else { + network_hdr_len = skb_network_header_len(skb); + this_ip_hdr = ip_hdr(skb); + this_ipv6_hdr = ipv6_hdr(skb); + this_tcp_hdrlen = tcp_hdrlen(skb); + } + + /* Enable IP checksum offloads */ + if (tx_flags & I40E_TX_FLAGS_IPV4) { + l4_hdr = this_ip_hdr->protocol; + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (tx_flags & I40E_TX_FLAGS_TSO) { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; + this_ip_hdr->check = 0; + } else { + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; + } + /* Now set the td_offset for IP header length */ + *td_offset = (network_hdr_len >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + l4_hdr = this_ipv6_hdr->nexthdr; + *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + /* Now set the td_offset for IP header length */ + *td_offset = (network_hdr_len >> 2) << + I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + } + /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ + *td_offset |= (skb_network_offset(skb) >> 1) << + I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L4 checksum offloads */ + switch (l4_hdr) { + case IPPROTO_TCP: + /* enable checksum offloads */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + *td_offset |= (this_tcp_hdrlen >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_SCTP: + /* enable SCTP checksum offload */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + *td_offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case IPPROTO_UDP: + /* enable UDP checksum offload */ + *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + *td_offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + break; + } +} + +/** + * i40e_create_tx_ctx Build the Tx context descriptor + * @tx_ring: ring to create the descriptor on + * @cd_type_cmd_tso_mss: Quad Word 1 + * @cd_tunneling: Quad Word 0 - bits 0-31 + * @cd_l2tag2: Quad Word 0 - bits 32-63 + **/ +static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, + const u64 cd_type_cmd_tso_mss, + const u32 cd_tunneling, const u32 cd_l2tag2) +{ + struct i40e_tx_context_desc *context_desc; + int i = tx_ring->next_to_use; + + if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && + !cd_tunneling && !cd_l2tag2) + return; + + /* grab the next descriptor */ + context_desc = I40E_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* cpu_to_le32 and assign to struct fields */ + context_desc->tunneling_params = cpu_to_le32(cd_tunneling); + context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); + context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); +} + +/** + * i40e_tx_map - Build the Tx descriptor + * @tx_ring: ring to send buffer on + * @skb: send buffer + * @first: first buffer info buffer to use + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * @td_cmd: the command field in the descriptor + * @td_offset: offset for checksum or crc + **/ +static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) +{ + unsigned int data_len = skb->data_len; + unsigned int size = skb_headlen(skb); + struct skb_frag_struct *frag; + struct i40e_tx_buffer *tx_bi; + struct i40e_tx_desc *tx_desc; + u16 i = tx_ring->next_to_use; + u32 td_tag = 0; + dma_addr_t dma; + u16 gso_segs; + + if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { + td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; + td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >> + I40E_TX_FLAGS_VLAN_SHIFT; + } + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) + gso_segs = skb_shinfo(skb)->gso_segs; + else + gso_segs = 1; + + /* multiply data chunks by size of headers */ + first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); + first->gso_segs = gso_segs; + first->skb = skb; + first->tx_flags = tx_flags; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_desc = I40E_TX_DESC(tx_ring, i); + tx_bi = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + tx_desc->buffer_addr = cpu_to_le64(dma); + + while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, + I40E_MAX_DATA_PER_TXD, td_tag); + + tx_desc++; + i++; + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + dma += I40E_MAX_DATA_PER_TXD; + size -= I40E_MAX_DATA_PER_TXD; + + tx_desc->buffer_addr = cpu_to_le64(dma); + } + + if (likely(!data_len)) + break; + + tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, + size, td_tag); + + tx_desc++; + i++; + if (i == tx_ring->count) { + tx_desc = I40E_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_bi[i]; + } + + /* Place RS bit on last descriptor of any packet that spans across the + * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. + */ +#define WB_STRIDE 0x3 + if (((i & WB_STRIDE) != WB_STRIDE) && + (first <= &tx_ring->tx_bi[i]) && + (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << + I40E_TXD_QW1_CMD_SHIFT); + } else { + tx_desc->cmd_type_offset_bsz = + build_ctob(td_cmd, td_offset, size, td_tag) | + cpu_to_le64((u64)I40E_TXD_CMD << + I40E_TXD_QW1_CMD_SHIFT); + } + + netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index), + first->bytecount); + + /* set the timestamp */ + first->time_stamp = jiffies; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + /* notify HW of packet */ + writel(i, tx_ring->tail); + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_bi[i]; + i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + if (i == 0) + i = tx_ring->count; + i--; + } + + tx_ring->next_to_use = i; +} + +/** + * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40e_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40e_maybe_stop_tx(tx_ring, size); +} + +/** + * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +static int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) +{ +#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD + unsigned int f; +#endif + int count = 0; + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ +#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + count += TXD_USE_COUNT(skb_headlen(skb)); + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; + return 0; + } + return count; +} + +/** + * i40e_xmit_frame_ring - Sends buffer on Tx ring + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, + struct i40e_ring *tx_ring) +{ + u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT; + u32 cd_tunneling = 0, cd_l2tag2 = 0; + struct i40e_tx_buffer *first; + u32 td_offset = 0; + u32 tx_flags = 0; + __be16 protocol; + u32 td_cmd = 0; + u8 hdr_len = 0; + int tso; + if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + return NETDEV_TX_BUSY; + + /* prepare the xmit flags */ + if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + goto out_drop; + + /* obtain protocol of skb */ + protocol = skb->protocol; + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_bi[tx_ring->next_to_use]; + + /* setup IPv4/IPv6 offloads */ + if (protocol == htons(ETH_P_IP)) + tx_flags |= I40E_TX_FLAGS_IPV4; + else if (protocol == htons(ETH_P_IPV6)) + tx_flags |= I40E_TX_FLAGS_IPV6; + + tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + &cd_type_cmd_tso_mss, &cd_tunneling); + + if (tso < 0) + goto out_drop; + else if (tso) + tx_flags |= I40E_TX_FLAGS_TSO; + + skb_tx_timestamp(skb); + + /* always enable CRC insertion offload */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + + /* Always offload the checksum, since it's in the data descriptor */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + tx_flags |= I40E_TX_FLAGS_CSUM; + + i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + } + + i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, + cd_tunneling, cd_l2tag2); + + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); + + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); + + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer + * @skb: send buffer + * @netdev: network interface device structure + * + * Returns NETDEV_TX_OK if sent, else an error code + **/ +netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (unlikely(skb->len < I40E_MIN_TX_LEN)) { + if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) + return NETDEV_TX_OK; + skb->len = I40E_MIN_TX_LEN; + skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); + } + + return i40e_xmit_frame_ring(skb, tx_ring); +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -0,0 +1,296 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_TXRX_H_ +#define _I40E_TXRX_H_ + +/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ + +#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ +#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ +#define I40E_MAX_IRATE 0x03F +#define I40E_MIN_IRATE 0x001 +#define I40E_IRATE_USEC_RESOLUTION 4 +#define I40E_ITR_100K 0x0005 +#define I40E_ITR_20K 0x0019 +#define I40E_ITR_8K 0x003E +#define I40E_ITR_4K 0x007A +#define I40E_ITR_RX_DEF I40E_ITR_8K +#define I40E_ITR_TX_DEF I40E_ITR_4K +#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ +#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ +#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ +#define I40E_DEFAULT_IRQ_WORK 256 +#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1) +#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC)) +#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1) + +#define I40E_QUEUE_END_OF_LIST 0x7FF + +/* this enum matches hardware bits and is meant to be used by DYN_CTLN + * registers and QINT registers or more generally anywhere in the manual + * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any + * register but instead is a special value meaning "don't update" ITR0/1/2. + */ +enum i40e_dyn_idx_t { + I40E_IDX_ITR0 = 0, + I40E_IDX_ITR1 = 1, + I40E_IDX_ITR2 = 2, + I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ +}; + +/* these are indexes into ITRN registers */ +#define I40E_RX_ITR I40E_IDX_ITR0 +#define I40E_TX_ITR I40E_IDX_ITR1 +#define I40E_PE_ITR I40E_IDX_ITR2 + +/* Supported RSS offloads */ +#define I40E_DEFAULT_RSS_HENA ( \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ + ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) + +/* Supported Rx Buffer Sizes */ +#define I40E_RXBUFFER_512 512 /* Used for packet split */ +#define I40E_RXBUFFER_2048 2048 +#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ +#define I40E_RXBUFFER_4096 4096 +#define I40E_RXBUFFER_8192 8192 +#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ + +/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, + * this adds up to 512 bytes of extra data meaning the smallest allocation + * we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_NEXT_DESC(r, i, n) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + (n) = I40E_RX_DESC((r), (i)); \ + } while (0) + +#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ + do { \ + I40E_RX_NEXT_DESC((r), (i), (n)); \ + prefetch((n)); \ + } while (0) + +#define i40e_rx_desc i40e_32byte_rx_desc + +#define I40E_MIN_TX_LEN 17 +#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */ + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) + +#define I40E_TX_FLAGS_CSUM (u32)(1) +#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) +#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2) +#define I40E_TX_FLAGS_TSO (u32)(1 << 3) +#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4) +#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5) +#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) +#define I40E_TX_FLAGS_FSO (u32)(1 << 7) +#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 +#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 +#define I40E_TX_FLAGS_VLAN_SHIFT 16 + +struct i40e_tx_buffer { + struct i40e_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct i40e_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + dma_addr_t page_dma; + unsigned int page_offset; +}; + +struct i40e_queue_stats { + u64 packets; + u64 bytes; +}; + +struct i40e_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct i40e_rx_queue_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buff_failed; +}; + +enum i40e_ring_state_t { + __I40E_TX_FDIR_INIT_DONE, + __I40E_TX_XPS_INIT_DONE, + __I40E_TX_DETECT_HANG, + __I40E_HANG_CHECK_ARMED, + __I40E_RX_PS_ENABLED, + __I40E_RX_LRO_ENABLED, + __I40E_RX_16BYTE_DESC_ENABLED, +}; + +#define ring_is_ps_enabled(ring) \ + test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define set_ring_ps_enabled(ring) \ + set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define clear_ring_ps_enabled(ring) \ + clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) +#define ring_is_lro_enabled(ring) \ + test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state) +#define set_ring_lro_enabled(ring) \ + set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state) +#define clear_ring_lro_enabled(ring) \ + clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state) +#define ring_is_16byte_desc_enabled(ring) \ + test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +#define set_ring_16byte_desc_enabled(ring) \ + set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +#define clear_ring_16byte_desc_enabled(ring) \ + clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) + +/* struct that defines a descriptor ring, associated with a VSI */ +struct i40e_ring { + struct i40e_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + union { + struct i40e_tx_buffer *tx_bi; + struct i40e_rx_buffer *rx_bi; + }; + unsigned long state; + u16 queue_index; /* Queue number of ring */ + u8 dcb_tc; /* Traffic class of ring */ + u8 __iomem *tail; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + u16 rx_hdr_len; + u16 rx_buf_len; + u8 dtype; +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 +#define I40E_RX_DTYPE_HEADER_SPLIT 2 + u8 hsplit; +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + + u8 atr_sample_rate; + u8 atr_count; + + bool ring_active; /* is ring online or not */ + + /* stats structs */ + struct i40e_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct i40e_tx_queue_stats tx_stats; + struct i40e_rx_queue_stats rx_stats; + }; + + unsigned int size; /* length of descriptor ring in bytes */ + dma_addr_t dma; /* physical address of ring */ + + struct i40e_vsi *vsi; /* Backreference to associated VSI */ + struct i40e_q_vector *q_vector; /* Backreference to associated vector */ + + struct rcu_head rcu; /* to avoid race on free */ +} ____cacheline_internodealigned_in_smp; + +enum i40e_latency_range { + I40E_LOWEST_LATENCY = 0, + I40E_LOW_LATENCY = 1, + I40E_BULK_LATENCY = 2, +}; + +struct i40e_ring_container { + /* array of pointers to rings */ + struct i40e_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; + enum i40e_latency_range latency_range; + u16 itr; +}; + +/* iterator for handling rings in ring container */ +#define i40e_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); +void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); +int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring); +int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); +void i40evf_free_tx_resources(struct i40e_ring *tx_ring); +void i40evf_free_rx_resources(struct i40e_ring *rx_ring); +int i40evf_napi_poll(struct napi_struct *napi, int budget); +#endif /* _I40E_TXRX_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -0,0 +1,1160 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_TYPE_H_ +#define _I40E_TYPE_H_ + +#include "i40e_status.h" +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_hmc.h" +#include "i40e_lan_hmc.h" + +/* Device IDs */ +#define I40E_DEV_ID_SFP_XL710 0x1572 +#define I40E_DEV_ID_SFP_X710 0x1573 +#define I40E_DEV_ID_QEMU 0x1574 +#define I40E_DEV_ID_KX_A 0x157F +#define I40E_DEV_ID_KX_B 0x1580 +#define I40E_DEV_ID_KX_C 0x1581 +#define I40E_DEV_ID_KX_D 0x1582 +#define I40E_DEV_ID_QSFP_A 0x1583 +#define I40E_DEV_ID_QSFP_B 0x1584 +#define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_VF_HV 0x1571 + +#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ + (d) == I40E_DEV_ID_QSFP_B || \ + (d) == I40E_DEV_ID_QSFP_C) + +#define I40E_MAX_VSI_QP 16 +#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_CHAINED_RX_BUFFERS 5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 + +/* Max default timeout in ms, */ +#define I40E_MAX_NVM_TIMEOUT 18000 + +/* Switch from mc to the 2usec global time (this is the GTIME resolution) */ +#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2) + +/* forward declaration */ +struct i40e_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); + +/* Data type manipulation macros. */ + +#define I40E_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 +#define I40E_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +/* PCI Bus Info */ +#define I40E_PCI_LINK_WIDTH_1 0x10 +#define I40E_PCI_LINK_WIDTH_2 0x20 +#define I40E_PCI_LINK_WIDTH_4 0x40 +#define I40E_PCI_LINK_WIDTH_8 0x80 +#define I40E_PCI_LINK_SPEED_2500 0x1 +#define I40E_PCI_LINK_SPEED_5000 0x2 +#define I40E_PCI_LINK_SPEED_8000 0x3 + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum i40e_mac_type { + I40E_MAC_UNKNOWN = 0, + I40E_MAC_X710, + I40E_MAC_XL710, + I40E_MAC_VF, + I40E_MAC_GENERIC, +}; + +enum i40e_media_type { + I40E_MEDIA_TYPE_UNKNOWN = 0, + I40E_MEDIA_TYPE_FIBER, + I40E_MEDIA_TYPE_BASET, + I40E_MEDIA_TYPE_BACKPLANE, + I40E_MEDIA_TYPE_CX4, + I40E_MEDIA_TYPE_DA, + I40E_MEDIA_TYPE_VIRTUAL +}; + +enum i40e_fc_mode { + I40E_FC_NONE = 0, + I40E_FC_RX_PAUSE, + I40E_FC_TX_PAUSE, + I40E_FC_FULL, + I40E_FC_PFC, + I40E_FC_DEFAULT +}; + +enum i40e_vsi_type { + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1, + I40E_VSI_VMDQ2, + I40E_VSI_CTRL, + I40E_VSI_FCOE, + I40E_VSI_MIRROR, + I40E_VSI_SRIOV, + I40E_VSI_FDIR, + I40E_VSI_TYPE_UNKNOWN +}; + +enum i40e_queue_type { + I40E_QUEUE_TYPE_RX = 0, + I40E_QUEUE_TYPE_TX, + I40E_QUEUE_TYPE_PE_CEQ, + I40E_QUEUE_TYPE_UNKNOWN +}; + +struct i40e_link_status { + enum i40e_aq_phy_type phy_type; + enum i40e_aq_link_speed link_speed; + u8 link_info; + u8 an_info; + u8 ext_info; + u8 loopback; + /* is Link Status Event notification to SW enabled */ + bool lse_enable; +}; + +struct i40e_phy_info { + struct i40e_link_status link_info; + struct i40e_link_status link_info_old; + u32 autoneg_advertised; + u32 phy_id; + u32 module_type; + bool get_link_info; + enum i40e_media_type media_type; +}; + +#define I40E_HW_CAP_MAX_GPIO 30 +/* Capabilities of a PF or a VF or the whole device */ +struct i40e_hw_capabilities { + u32 switch_mode; +#define I40E_NVM_IMAGE_TYPE_EVB 0x0 +#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 +#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 + + u32 management_mode; + u32 npar_enable; + u32 os2bmc; + u32 valid_functions; + bool sr_iov_1_1; + bool vmdq; + bool evb_802_1_qbg; /* Edge Virtual Bridging */ + bool evb_802_1_qbh; /* Bridge Port Extension */ + bool dcb; + bool fcoe; + bool mfp_mode_1; + bool mgmt_cem; + bool ieee_1588; + bool iwarp; + bool fd; + u32 fd_filters_guaranteed; + u32 fd_filters_best_effort; + bool rss; + u32 rss_table_size; + u32 rss_table_entry_width; + bool led[I40E_HW_CAP_MAX_GPIO]; + bool sdp[I40E_HW_CAP_MAX_GPIO]; + u32 nvm_image_type; + u32 num_flow_director_filters; + u32 num_vfs; + u32 vf_base_id; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors; + u32 num_msix_vectors_vf; + u32 led_pin_num; + u32 sdp_pin_num; + u32 mdio_port_num; + u32 mdio_port_mode; + u8 rx_buf_chain_len; + u32 enabled_tcmap; + u32 maxtc; +}; + +struct i40e_mac_info { + enum i40e_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum i40e_aq_resources_ids { + I40E_NVM_RESOURCE_ID = 1 +}; + +enum i40e_aq_resource_access_type { + I40E_RESOURCE_READ = 1, + I40E_RESOURCE_WRITE +}; + +struct i40e_nvm_info { + u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */ + u64 hw_semaphore_wait; /* - || - */ + u32 timeout; /* [ms] */ + u16 sr_size; /* Shadow RAM size in words */ + bool blank_nvm_mode; /* is NVM empty (no FW present)*/ + u16 version; /* NVM package version */ + u32 eetrack; /* NVM data version */ +}; + +/* PCI bus types */ +enum i40e_bus_type { + i40e_bus_type_unknown = 0, + i40e_bus_type_pci, + i40e_bus_type_pcix, + i40e_bus_type_pci_express, + i40e_bus_type_reserved +}; + +/* PCI bus speeds */ +enum i40e_bus_speed { + i40e_bus_speed_unknown = 0, + i40e_bus_speed_33 = 33, + i40e_bus_speed_66 = 66, + i40e_bus_speed_100 = 100, + i40e_bus_speed_120 = 120, + i40e_bus_speed_133 = 133, + i40e_bus_speed_2500 = 2500, + i40e_bus_speed_5000 = 5000, + i40e_bus_speed_8000 = 8000, + i40e_bus_speed_reserved +}; + +/* PCI bus widths */ +enum i40e_bus_width { + i40e_bus_width_unknown = 0, + i40e_bus_width_pcie_x1 = 1, + i40e_bus_width_pcie_x2 = 2, + i40e_bus_width_pcie_x4 = 4, + i40e_bus_width_pcie_x8 = 8, + i40e_bus_width_32 = 32, + i40e_bus_width_64 = 64, + i40e_bus_width_reserved +}; + +/* Bus parameters */ +struct i40e_bus_info { + enum i40e_bus_speed speed; + enum i40e_bus_width width; + enum i40e_bus_type type; + + u16 func; + u16 device; + u16 lan_id; +}; + +/* Flow control (FC) parameters */ +struct i40e_fc_info { + enum i40e_fc_mode current_mode; /* FC mode in effect */ + enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +#define I40E_MAX_TRAFFIC_CLASS 8 +#define I40E_MAX_USER_PRIORITY 8 +#define I40E_DCBX_MAX_APPS 32 +#define I40E_LLDPDU_SIZE 1500 + +/* IEEE 802.1Qaz ETS Configuration data */ +struct i40e_ieee_ets_config { + u8 willing; + u8 cbs; + u8 maxtcs; + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* IEEE 802.1Qaz ETS Recommendation data */ +struct i40e_ieee_ets_recommend { + u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; + u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; + u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; +}; + +/* IEEE 802.1Qaz PFC Configuration data */ +struct i40e_ieee_pfc_config { + u8 willing; + u8 mbc; + u8 pfccap; + u8 pfcenable; +}; + +/* IEEE 802.1Qaz Application Priority data */ +struct i40e_ieee_app_priority_table { + u8 priority; + u8 selector; + u16 protocolid; +}; + +struct i40e_dcbx_config { + u32 numapps; + struct i40e_ieee_ets_config etscfg; + struct i40e_ieee_ets_recommend etsrec; + struct i40e_ieee_pfc_config pfc; + struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; +}; + +/* Port hardware description */ +struct i40e_hw { + u8 __iomem *hw_addr; + void *back; + + /* function pointer structs */ + struct i40e_phy_info phy; + struct i40e_mac_info mac; + struct i40e_bus_info bus; + struct i40e_nvm_info nvm; + struct i40e_fc_info fc; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 port; + bool adapter_stopped; + + /* capabilities for entire device and PCI func */ + struct i40e_hw_capabilities dev_caps; + struct i40e_hw_capabilities func_caps; + + /* Flow Director shared filter space */ + u16 fdir_shared_filter_count; + + /* device profile info */ + u8 pf_id; + u16 main_vsi_seid; + + /* Closest numa node to the device */ + u16 numa_node; + + /* Admin Queue info */ + struct i40e_adminq_info aq; + + /* HMC info */ + struct i40e_hmc_info hmc; /* HMC info struct */ + + /* LLDP/DCBX Status */ + u16 dcbx_status; + + /* DCBX info */ + struct i40e_dcbx_config local_dcbx_config; + struct i40e_dcbx_config remote_dcbx_config; + + /* debug mask */ + u32 debug_mask; +}; + +struct i40e_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; +}; + +/* RX Descriptors */ +union i40e_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union i40e_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +#define I40E_RXD_QW1_STATUS_SHIFT 0 +#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT) + +enum i40e_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_STATUS_DD_SHIFT = 0, + I40E_RX_DESC_STATUS_EOF_SHIFT = 1, + I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, + I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + I40E_RX_DESC_STATUS_FLM_SHIFT = 11, + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18 +}; + +#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \ + I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +enum i40e_rx_desc_fltstat_values { + I40E_RX_DESC_FLTSTAT_NO_DATA = 0, + I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + I40E_RX_DESC_FLTSTAT_RSV = 2, + I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define I40E_RXD_QW1_ERROR_SHIFT 19 +#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) + +enum i40e_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_ERROR_RXE_SHIFT = 0, + I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, + I40E_RX_DESC_ERROR_HBO_SHIFT = 2, + I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + I40E_RX_DESC_ERROR_IPE_SHIFT = 3, + I40E_RX_DESC_ERROR_L4E_SHIFT = 4, + I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, + I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6 +}; + +enum i40e_rx_desc_error_l3l4e_fcoe_masks { + I40E_RX_DESC_ERROR_L3L4E_NONE = 0, + I40E_RX_DESC_ERROR_L3L4E_PROT = 1, + I40E_RX_DESC_ERROR_L3L4E_FC = 2, + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define I40E_RXD_QW1_PTYPE_SHIFT 30 +#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum i40e_rx_l2_ptype { + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct i40e_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum i40e_rx_ptype_outer_ip { + I40E_RX_PTYPE_OUTER_L2 = 0, + I40E_RX_PTYPE_OUTER_IP = 1 +}; + +enum i40e_rx_ptype_outer_ip_ver { + I40E_RX_PTYPE_OUTER_NONE = 0, + I40E_RX_PTYPE_OUTER_IPV4 = 0, + I40E_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum i40e_rx_ptype_outer_fragmented { + I40E_RX_PTYPE_NOT_FRAG = 0, + I40E_RX_PTYPE_FRAG = 1 +}; + +enum i40e_rx_ptype_tunnel_type { + I40E_RX_PTYPE_TUNNEL_NONE = 0, + I40E_RX_PTYPE_TUNNEL_IP_IP = 1, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum i40e_rx_ptype_tunnel_end_prot { + I40E_RX_PTYPE_TUNNEL_END_NONE = 0, + I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, + I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum i40e_rx_ptype_inner_prot { + I40E_RX_PTYPE_INNER_PROT_NONE = 0, + I40E_RX_PTYPE_INNER_PROT_UDP = 1, + I40E_RX_PTYPE_INNER_PROT_TCP = 2, + I40E_RX_PTYPE_INNER_PROT_SCTP = 3, + I40E_RX_PTYPE_INNER_PROT_ICMP = 4, + I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum i40e_rx_ptype_payload_layer { + I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + I40E_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \ + I40E_RXD_QW1_LENGTH_SPH_SHIFT) + +enum i40e_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */ + I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +enum i40e_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, + I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum i40e_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum i40e_rx_prog_status_desc_prog_id_masks { + I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum i40e_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +/* TX Descriptor */ +struct i40e_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define I40E_TXD_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) + +enum i40e_tx_desc_dtype_value { + I40E_TX_DESC_DTYPE_DATA = 0x0, + I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + I40E_TX_DESC_DTYPE_CONTEXT = 0x1, + I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, + I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, + I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, + I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, + I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + I40E_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define I40E_TXD_QW1_CMD_SHIFT 4 +#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) + +enum i40e_tx_desc_cmd_bits { + I40E_TX_DESC_CMD_EOP = 0x0001, + I40E_TX_DESC_CMD_RS = 0x0002, + I40E_TX_DESC_CMD_ICRC = 0x0004, + I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, + I40E_TX_DESC_CMD_DUMMY = 0x0010, + I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + I40E_TX_DESC_CMD_FCOET = 0x0080, + I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define I40E_TXD_QW1_OFFSET_SHIFT 16 +#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + I40E_TXD_QW1_OFFSET_SHIFT) + +enum i40e_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define I40E_TXD_QW1_L2TAG1_SHIFT 48 +#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct i40e_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) + +#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 +#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) + +enum i40e_tx_ctx_desc_cmd_bits { + I40E_TX_CTX_DESC_TSO = 0x01, + I40E_TX_CTX_DESC_TSYN = 0x02, + I40E_TX_CTX_DESC_IL2TAG2 = 0x04, + I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, + I40E_TX_CTX_DESC_SWPE = 0x40 +}; + +#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 +#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + I40E_TXD_CTX_QW1_MSS_SHIFT) + +#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 +#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) + +#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + I40E_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum i40e_tx_ctx_desc_eipt_offload { + I40E_TX_CTX_EXT_IP_NONE = 0x0, + I40E_TX_CTX_EXT_IP_IPV6 = 0x1, + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + I40E_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 +#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \ + I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK + +#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + I40E_TXD_CTX_QW0_NATLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + I40E_TXD_CTX_QW0_DECTTL_SHIFT) + +struct i40e_filter_program_desc { + __le32 qindex_flex_ptype_vsi; + __le32 rsvd; + __le32 dtype_cmd_cntindex; + __le32 fd_id; +}; +#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 +#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ + I40E_TXD_FLTR_QW0_QINDEX_SHIFT) +#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 +#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ + I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) +#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 +#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ + I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) + +/* Packet Classifier Types for filters */ +enum i40e_filter_pctype { + /* Note: Values 0-28 are reserved for future use */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, + I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-38 are reserved for future use */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, + I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OX = 48, + I40E_FILTER_PCTYPE_FCOE_RX = 49, + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + +enum i40e_filter_program_desc_dest { + I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, + I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, +}; + +enum i40e_filter_program_desc_fd_status { + I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, + I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, +}; + +#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 +#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) + +#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 +#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) + +#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) + +enum i40e_filter_program_desc_pcmd { + I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, + I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, +}; + +#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \ + I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) + +#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ + I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) + +#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 +#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) + +enum i40e_filter_type { + I40E_FLOW_DIRECTOR_FLTR = 0, + I40E_PE_QUAD_HASH_FLTR = 1, + I40E_ETHERTYPE_FLTR, + I40E_FCOE_CTX_FLTR, + I40E_MAC_VLAN_FLTR, + I40E_HASH_FLTR +}; + +struct i40e_vsi_context { + u16 seid; + u16 uplink_seid; + u16 vsi_number; + u16 vsis_allocated; + u16 vsis_unallocated; + u16 flags; + u8 pf_num; + u8 vf_num; + u8 connection_type; + struct i40e_aqc_vsi_properties_data info; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct i40e_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_errors; /* repc */ + u64 rx_missed; /* rmpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +/* Statistics collected by the MAC */ +struct i40e_hw_port_stats { + /* eth stats collected by the port */ + struct i40e_eth_stats eth; + + /* additional port specific stats */ + u64 tx_dropped_link_down; /* tdold */ + u64 crc_errors; /* crcerrs */ + u64 illegal_bytes; /* illerrc */ + u64 error_bytes; /* errbc */ + u64 mac_local_faults; /* mlfc */ + u64 mac_remote_faults; /* mrfc */ + u64 rx_length_errors; /* rlec */ + u64 link_xon_rx; /* lxonrxc */ + u64 link_xoff_rx; /* lxoffrxc */ + u64 priority_xon_rx[8]; /* pxonrxc[8] */ + u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ + u64 link_xon_tx; /* lxontxc */ + u64 link_xoff_tx; /* lxofftxc */ + u64 priority_xon_tx[8]; /* pxontxc[8] */ + u64 priority_xoff_tx[8]; /* pxofftxc[8] */ + u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ + u64 rx_size_64; /* prc64 */ + u64 rx_size_127; /* prc127 */ + u64 rx_size_255; /* prc255 */ + u64 rx_size_511; /* prc511 */ + u64 rx_size_1023; /* prc1023 */ + u64 rx_size_1522; /* prc1522 */ + u64 rx_size_big; /* prc9522 */ + u64 rx_undersize; /* ruc */ + u64 rx_fragments; /* rfc */ + u64 rx_oversize; /* roc */ + u64 rx_jabber; /* rjc */ + u64 tx_size_64; /* ptc64 */ + u64 tx_size_127; /* ptc127 */ + u64 tx_size_255; /* ptc255 */ + u64 tx_size_511; /* ptc511 */ + u64 tx_size_1023; /* ptc1023 */ + u64 tx_size_1522; /* ptc1522 */ + u64 tx_size_big; /* ptc9522 */ + u64 mac_short_packet_dropped; /* mspdc */ + u64 checksum_error; /* xec */ + /* EEE LPI */ + bool tx_lpi_status; + bool rx_lpi_status; + u64 tx_lpi_count; /* etlpic */ + u64 rx_lpi_count; /* erlpic */ +}; + +/* Checksum and Shadow RAM pointers */ +#define I40E_SR_NVM_CONTROL_WORD 0x00 +#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_WAKE_ON_LAN 0x19 +#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 +#define I40E_SR_NVM_EETRACK_LO 0x2D +#define I40E_SR_NVM_EETRACK_HI 0x2E +#define I40E_SR_VPD_PTR 0x2F +#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E +#define I40E_SR_SW_CHECKSUM_WORD 0x3F + +/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ +#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 +#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 +#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 +#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) + +/* Shadow RAM related */ +#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 +#define I40E_SR_WORDS_IN_1KB 512 +/* Checksum should be calculated such that after adding all the words, + * including the checksum word itself, the sum should be 0xBABA. + */ +#define I40E_SR_SW_CHECKSUM_BASE 0xBABA + +#define I40E_SRRD_SRCTL_ATTEMPTS 100000 + +enum i40e_switch_element_types { + I40E_SWITCH_ELEMENT_TYPE_MAC = 1, + I40E_SWITCH_ELEMENT_TYPE_PF = 2, + I40E_SWITCH_ELEMENT_TYPE_VF = 3, + I40E_SWITCH_ELEMENT_TYPE_EMP = 4, + I40E_SWITCH_ELEMENT_TYPE_BMC = 6, + I40E_SWITCH_ELEMENT_TYPE_PE = 16, + I40E_SWITCH_ELEMENT_TYPE_VEB = 17, + I40E_SWITCH_ELEMENT_TYPE_PA = 18, + I40E_SWITCH_ELEMENT_TYPE_VSI = 19, +}; + +/* Supported EtherType filters */ +enum i40e_ether_type_index { + I40E_ETHER_TYPE_1588 = 0, + I40E_ETHER_TYPE_FIP = 1, + I40E_ETHER_TYPE_OUI_EXTENDED = 2, + I40E_ETHER_TYPE_MAC_CONTROL = 3, + I40E_ETHER_TYPE_LLDP = 4, + I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, + I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, + I40E_ETHER_TYPE_QCN_CNM = 7, + I40E_ETHER_TYPE_8021X = 8, + I40E_ETHER_TYPE_ARP = 9, + I40E_ETHER_TYPE_RSV1 = 10, + I40E_ETHER_TYPE_RSV2 = 11, +}; + +/* Filter context base size is 1K */ +#define I40E_HASH_FILTER_BASE_SIZE 1024 +/* Supported Hash filter values */ +enum i40e_hash_filter_size { + I40E_HASH_FILTER_SIZE_1K = 0, + I40E_HASH_FILTER_SIZE_2K = 1, + I40E_HASH_FILTER_SIZE_4K = 2, + I40E_HASH_FILTER_SIZE_8K = 3, + I40E_HASH_FILTER_SIZE_16K = 4, + I40E_HASH_FILTER_SIZE_32K = 5, + I40E_HASH_FILTER_SIZE_64K = 6, + I40E_HASH_FILTER_SIZE_128K = 7, + I40E_HASH_FILTER_SIZE_256K = 8, + I40E_HASH_FILTER_SIZE_512K = 9, + I40E_HASH_FILTER_SIZE_1M = 10, +}; + +/* DMA context base size is 0.5K */ +#define I40E_DMA_CNTX_BASE_SIZE 512 +/* Supported DMA context values */ +enum i40e_dma_cntx_size { + I40E_DMA_CNTX_SIZE_512 = 0, + I40E_DMA_CNTX_SIZE_1K = 1, + I40E_DMA_CNTX_SIZE_2K = 2, + I40E_DMA_CNTX_SIZE_4K = 3, + I40E_DMA_CNTX_SIZE_8K = 4, + I40E_DMA_CNTX_SIZE_16K = 5, + I40E_DMA_CNTX_SIZE_32K = 6, + I40E_DMA_CNTX_SIZE_64K = 7, + I40E_DMA_CNTX_SIZE_128K = 8, + I40E_DMA_CNTX_SIZE_256K = 9, +}; + +/* Supported Hash look up table (LUT) sizes */ +enum i40e_hash_lut_size { + I40E_HASH_LUT_SIZE_128 = 0, + I40E_HASH_LUT_SIZE_512 = 1, +}; + +/* Structure to hold a per PF filter control settings */ +struct i40e_filter_control_settings { + /* number of PE Quad Hash filter buckets */ + enum i40e_hash_filter_size pe_filt_num; + /* number of PE Quad Hash contexts */ + enum i40e_dma_cntx_size pe_cntx_num; + /* number of FCoE filter buckets */ + enum i40e_hash_filter_size fcoe_filt_num; + /* number of FCoE DDP contexts */ + enum i40e_dma_cntx_size fcoe_cntx_num; + /* size of the Hash LUT */ + enum i40e_hash_lut_size hash_lut_size; + /* enable FDIR filters for PF and its VFs */ + bool enable_fdir; + /* enable Ethertype filters for PF and its VFs */ + bool enable_ethtype; + /* enable MAC/VLAN filters for PF and its VFs */ + bool enable_macvlan; +}; + +/* Structure to hold device level control filter counts */ +struct i40e_control_filter_stats { + u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ + u16 etype_used; /* Used perfect EtherType filters */ + u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ + u16 etype_free; /* Un-used perfect EtherType filters */ +}; + +enum i40e_reset_type { + I40E_RESET_POR = 0, + I40E_RESET_CORER = 1, + I40E_RESET_GLOBR = 2, + I40E_RESET_EMPR = 3, +}; +#endif /* _I40E_TYPE_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -0,0 +1,364 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_VIRTCHNL_H_ +#define _I40E_VIRTCHNL_H_ + +#include "i40e_type.h" + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the various i40e drivers. + * + * Admin queue buffer usage: + * desc->opcode is always i40e_aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * Firmware copies the cookie fields when sending messages between the PF and + * VF, but uses all other fields internally. Due to this limitation, we + * must send all messages as "indirect", i.e. using an external buffer. + * + * All the vsi indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value is of + * i40e_status_code type, defined in the i40e_type.h. + * + * In general, VF driver initialization should roughly follow the order of these + * opcodes. The VF driver must first validate the API version of the PF driver, + * then request a reset, then get resources, then configure queues and + * interrupts. After these operations are complete, the VF driver may start + * its queues, optionally add MAC and VLAN filters, and process traffic. + */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum i40e_virtchnl_ops { +/* VF sends req. to pf for the following + * ops. + */ + I40E_VIRTCHNL_OP_UNKNOWN = 0, + I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + I40E_VIRTCHNL_OP_RESET_VF, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + I40E_VIRTCHNL_OP_ENABLE_QUEUES, + I40E_VIRTCHNL_OP_DISABLE_QUEUES, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + I40E_VIRTCHNL_OP_ADD_VLAN, + I40E_VIRTCHNL_OP_DEL_VLAN, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + I40E_VIRTCHNL_OP_GET_STATS, + I40E_VIRTCHNL_OP_FCOE, +/* PF sends status change events to vfs using + * the following op. + */ + I40E_VIRTCHNL_OP_EVENT, +}; + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct i40e_virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + i40e_status v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +/* Message descriptions and data structures.*/ + +/* I40E_VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define I40E_VIRTCHNL_VERSION_MAJOR 1 +#define I40E_VIRTCHNL_VERSION_MINOR 0 +struct i40e_virtchnl_version_info { + u32 major; + u32 minor; +}; + +/* I40E_VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES + * VF sends this request to PF with no parameters + * PF responds with an indirect message containing + * i40e_virtchnl_vf_resource and one or more + * i40e_virtchnl_vsi_resource structures. + */ + +struct i40e_virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum i40e_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; +/* VF offload flags */ +#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 +#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 + +struct i40e_virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 max_fcoe_contexts; + u32 max_fcoe_filters; + + struct i40e_virtchnl_vsi_resource vsi_res[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of i40e_virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct i40e_virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; + u64 dma_ring_addr; + u64 dma_headwb_addr; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of i40e_virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct i40e_virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; + u32 databuffer_size; + u32 max_pkt_size; + u64 dma_ring_addr; + enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct i40e_virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct i40e_virtchnl_txq_info txq; + struct i40e_virtchnl_rxq_info rxq; +}; + +struct i40e_virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + struct i40e_virtchnl_queue_pair_info qpair[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct i40e_virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +struct i40e_virtchnl_irq_map_info { + u16 num_vectors; + struct i40e_virtchnl_vector_map vecmap[1]; +}; + +/* I40E_VIRTCHNL_OP_ENABLE_QUEUES + * I40E_VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct i40e_virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct i40e_virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 pad[2]; +}; + +struct i40e_virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct i40e_virtchnl_ether_addr list[1]; +}; + +/* I40E_VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* I40E_VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct i40e_virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct i40e_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 +#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* I40E_VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct i40e_eth_stats in an external buffer. + */ + +/* I40E_VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum i40e_virtchnl_event_codes { + I40E_VIRTCHNL_EVENT_UNKNOWN = 0, + I40E_VIRTCHNL_EVENT_LINK_CHANGE, + I40E_VIRTCHNL_EVENT_RESET_IMPENDING, + I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; +#define I40E_PF_EVENT_SEVERITY_INFO 0 +#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct i40e_virtchnl_pf_event { + enum i40e_virtchnl_event_codes event; + union { + struct { + enum i40e_aq_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +/* The following are TBD, not necessary for LAN functionality. + * I40E_VIRTCHNL_OP_FCOE + */ + +/* VF reset states - these are written into the RSTAT register: + * I40E_VFGEN_RSTAT1 on the PF + * I40E_VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum i40e_vfr_states { + I40E_VFR_INPROGRESS = 0, + I40E_VFR_COMPLETED, + I40E_VFR_VFACTIVE, + I40E_VFR_UNKNOWN, +}; + +#endif /* _I40E_VIRTCHNL_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40evf.h +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -0,0 +1,293 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40EVF_H_ +#define _I40EVF_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i40e_type.h" +#include "i40e_virtchnl.h" +#include "i40e_txrx.h" + +#define DEFAULT_DEBUG_LEVEL_SHIFT 3 +#define PFX "i40evf: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __func__ , ## args))) + +/* dummy struct to make common code less painful */ +struct i40e_vsi { + struct i40evf_adapter *back; + struct net_device *netdev; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 seid; + u16 id; + unsigned long state; + int base_vector; + u16 work_limit; + /* high bit set means dynamic, use accessor routines to read/write. + * hardware only supports 2us resolution for the ITR registers. + * these values always store the USER setting, and must be converted + * before programming to a register. + */ + u16 rx_itr_setting; + u16 tx_itr_setting; +}; + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40EVF_DEFAULT_TXD 512 +#define I40EVF_DEFAULT_RXD 512 +#define I40EVF_MAX_TXD 4096 +#define I40EVF_MIN_TXD 64 +#define I40EVF_MAX_RXD 4096 +#define I40EVF_MIN_RXD 64 +#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8 + +/* Supported Rx Buffer Sizes */ +#define I40EVF_RXBUFFER_64 64 /* Used for packet split */ +#define I40EVF_RXBUFFER_128 128 /* Used for packet split */ +#define I40EVF_RXBUFFER_256 256 /* Used for packet split */ +#define I40EVF_RXBUFFER_2048 2048 +#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ +#define I40EVF_MAX_AQ_BUF_SIZE 4096 +#define I40EVF_AQ_LEN 32 +#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */ + +#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + +#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) +#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) +#define I40E_TX_CTXTDESC(R, i) \ + (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) +#define MAX_RX_QUEUES 8 +#define MAX_TX_QUEUES MAX_RX_QUEUES + +/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ +struct i40e_q_vector { + struct i40evf_adapter *adapter; + struct i40e_vsi *vsi; + struct napi_struct napi; + unsigned long reg_idx; + struct i40e_ring_container rx; + struct i40e_ring_container tx; + u32 ring_mask; + u8 num_ringpairs; /* total number of ring pairs in vector */ + int v_idx; /* vector index in list */ + char name[IFNAMSIZ + 9]; + cpumask_var_t affinity_mask; +}; + +/* Helper macros to switch between ints/sec and what the register uses. + * And yes, it's the same math going both ways. The lowest value + * supported by all of the i40e hardware is 8. + */ +#define EITR_INTS_PER_SEC_TO_REG(_eitr) \ + ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) +#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG + +#define I40EVF_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +#define I40EVF_RX_DESC_ADV(R, i) \ + (&(((union i40e_adv_rx_desc *)((R).desc))[i])) +#define I40EVF_TX_DESC_ADV(R, i) \ + (&(((union i40e_adv_tx_desc *)((R).desc))[i])) +#define I40EVF_TX_CTXTDESC_ADV(R, i) \ + (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i])) + +#define OTHER_VECTOR 1 +#define NONQ_VECS (OTHER_VECTOR) + +#define MAX_MSIX_Q_VECTORS 4 +#define MAX_MSIX_COUNT 5 + +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) + +#define I40EVF_QUEUE_END_OF_LIST 0x7FF +#define I40EVF_FREE_VECTOR 0x7FFF +struct i40evf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +struct i40evf_vlan_filter { + struct list_head list; + u16 vlan; + bool remove; /* filter needs to be removed */ + bool add; /* filter needs to be added */ +}; + +/* Driver state. The order of these is important! */ +enum i40evf_state_t { + __I40EVF_STARTUP, /* driver loaded, probe complete */ + __I40EVF_REMOVE, /* driver is being unloaded */ + __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ + __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __I40EVF_INIT_SW, /* got resources, setting up structs */ + __I40EVF_RESETTING, /* in reset */ + /* Below here, watchdog is running */ + __I40EVF_DOWN, /* ready, can be opened */ + __I40EVF_TESTING, /* in ethtool self-test */ + __I40EVF_RUNNING, /* opened, working */ +}; + +enum i40evf_critical_section_t { + __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */ +}; +/* make common code happy */ +#define __I40E_DOWN __I40EVF_DOWN + +/* board specific private data structure */ +struct i40evf_adapter { + struct timer_list watchdog_timer; + struct work_struct reset_task; + struct work_struct adminq_task; + struct delayed_work init_task; + struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + struct list_head vlan_filter_list; + char misc_vector_name[IFNAMSIZ + 9]; + + /* TX */ + struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; + u32 tx_timeout_count; + struct list_head mac_filter_list; + + /* RX */ + struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; + u64 hw_csum_rx_error; + int num_msix_vectors; + struct msix_entry *msix_entries; + + u32 flags; +#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) +#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) +#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) +#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3) +#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4) +#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) +#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) +#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) +#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8) +#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9) +#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10) +/* duplcates for common code */ +#define I40E_FLAG_FDIR_ATR_ENABLED 0 +#define I40E_FLAG_DCB_ENABLED 0 +#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL +#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED + /* flags for admin queue service task */ + u32 aq_required; + u32 aq_pending; +#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1) +#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) +#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) +#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) +#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) +#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) +#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) +#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) +#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + + struct i40e_hw hw; /* defined in i40e_type.h */ + + enum i40evf_state_t state; + volatile unsigned long crit_section; + + struct work_struct watchdog_task; + bool netdev_registered; + bool link_up; + enum i40e_virtchnl_ops current_op; + struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ + struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + u16 msg_enable; + struct i40e_eth_stats current_stats; + struct i40e_vsi vsi; + u32 aq_wait_count; +}; + + +/* needed by i40evf_ethtool.c */ +extern char i40evf_driver_name[]; +extern const char i40evf_driver_version[]; + +int i40evf_up(struct i40evf_adapter *adapter); +void i40evf_down(struct i40evf_adapter *adapter); +void i40evf_reinit_locked(struct i40evf_adapter *adapter); +void i40evf_reset(struct i40evf_adapter *adapter); +void i40evf_set_ethtool_ops(struct net_device *netdev); +void i40evf_update_stats(struct i40evf_adapter *adapter); +void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter); +int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter); +void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask); + +void i40e_napi_add_all(struct i40evf_adapter *adapter); +void i40e_napi_del_all(struct i40evf_adapter *adapter); + +int i40evf_send_api_ver(struct i40evf_adapter *adapter); +int i40evf_verify_api_ver(struct i40evf_adapter *adapter); +int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter); +int i40evf_get_vf_config(struct i40evf_adapter *adapter); +void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush); +void i40evf_configure_queues(struct i40evf_adapter *adapter); +void i40evf_deconfigure_queues(struct i40evf_adapter *adapter); +void i40evf_enable_queues(struct i40evf_adapter *adapter); +void i40evf_disable_queues(struct i40evf_adapter *adapter); +void i40evf_map_queues(struct i40evf_adapter *adapter); +void i40evf_add_ether_addrs(struct i40evf_adapter *adapter); +void i40evf_del_ether_addrs(struct i40evf_adapter *adapter); +void i40evf_add_vlans(struct i40evf_adapter *adapter); +void i40evf_del_vlans(struct i40evf_adapter *adapter); +void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); +void i40evf_request_stats(struct i40evf_adapter *adapter); +void i40evf_request_reset(struct i40evf_adapter *adapter); +void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, u16 msglen); +#endif /* _I40EVF_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -0,0 +1,393 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* ethtool support for i40evf */ +#include "i40evf.h" + +#include + + +struct i40evf_stats { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define I40EVF_STAT(_name, _stat) { \ + .stat_string = _name, \ + .stat_offset = offsetof(struct i40evf_adapter, _stat) \ +} + +/* All stats are u64, so we don't need to track the size of the field. */ +static const struct i40evf_stats i40evf_gstrings_stats[] = { + I40EVF_STAT("rx_bytes", current_stats.rx_bytes), + I40EVF_STAT("rx_unicast", current_stats.rx_unicast), + I40EVF_STAT("rx_multicast", current_stats.rx_multicast), + I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast), + I40EVF_STAT("rx_discards", current_stats.rx_discards), + I40EVF_STAT("rx_errors", current_stats.rx_errors), + I40EVF_STAT("rx_missed", current_stats.rx_missed), + I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol), + I40EVF_STAT("tx_bytes", current_stats.tx_bytes), + I40EVF_STAT("tx_unicast", current_stats.tx_unicast), + I40EVF_STAT("tx_multicast", current_stats.tx_multicast), + I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast), + I40EVF_STAT("tx_discards", current_stats.tx_discards), + I40EVF_STAT("tx_errors", current_stats.tx_errors), +}; + +#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) +#define I40EVF_QUEUE_STATS_LEN \ + (((struct i40evf_adapter *) \ + netdev_priv(netdev))->vsi_res->num_queue_pairs * 4) +#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN) + +/** + * i40evf_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings. Because this is a VF, we don't know what + * kind of link we really have, so we fake it. + **/ +static int i40evf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + /* In the future the VF will be able to query the PF for + * some information - for now use a dummy value + */ + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->autoneg = AUTONEG_DISABLE; + ecmd->transceiver = XCVR_DUMMY1; + ecmd->port = PORT_NONE; + + return 0; +} + +/** + * i40evf_get_sset_count - Get length of string set + * @netdev: network interface device structure + * @sset: id of string set + * + * Reports size of string table. This driver only supports + * strings for statistics. + **/ +static int i40evf_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset == ETH_SS_STATS) + return I40EVF_STATS_LEN; + else + return -ENOTSUPP; +} + +/** + * i40evf_get_ethtool_stats - report device statistics + * @netdev: network interface device structure + * @stats: ethtool statistics structure + * @data: pointer to data buffer + * + * All statistics are added to the data buffer as an array of u64. + **/ +static void i40evf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int i, j; + char *p; + + for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { + p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; + data[i] = *(u64 *)p; + } + for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { + data[i++] = adapter->tx_rings[j]->stats.packets; + data[i++] = adapter->tx_rings[j]->stats.bytes; + } + for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { + data[i++] = adapter->rx_rings[j]->stats.packets; + data[i++] = adapter->rx_rings[j]->stats.bytes; + } +} + +/** + * i40evf_get_strings - Get string set + * @netdev: network interface device structure + * @sset: id of string set + * @data: buffer for string data + * + * Builds stats string table. + **/ +static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u8 *p = data; + int i; + + if (sset == ETH_SS_STATS) { + for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, i40evf_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); + p += ETH_GSTRING_LEN; + snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); + p += ETH_GSTRING_LEN; + } + } +} + +/** + * i40evf_get_msglevel - Get debug message level + * @netdev: network interface device structure + * + * Returns current debug message level. + **/ +static u32 i40evf_get_msglevel(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +/** + * i40evf_get_msglevel - Set debug message level + * @netdev: network interface device structure + * @data: message level + * + * Set current debug message level. Higher values cause the driver to + * be noisier. + **/ +static void i40evf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +/** + * i40evf_get_drvinto - Get driver info + * @netdev: network interface device structure + * @drvinfo: ethool driver info structure + * + * Returns information about the driver and device for display to the user. + **/ +static void i40evf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, i40evf_driver_name, 32); + strlcpy(drvinfo->version, i40evf_driver_version, 32); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); +} + +/** + * i40evf_get_ringparam - Get ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Returns current ring parameters. TX and RX rings are reported separately, + * but the number of rings is not reported. + **/ +static void i40evf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_ring *tx_ring = adapter->tx_rings[0]; + struct i40e_ring *rx_ring = adapter->rx_rings[0]; + + ring->rx_max_pending = I40EVF_MAX_RXD; + ring->tx_max_pending = I40EVF_MAX_TXD; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; +} + +/** + * i40evf_set_ringparam - Set ring parameters + * @netdev: network interface device structure + * @ring: ethtool ringparam structure + * + * Sets ring parameters. TX and RX rings are controlled separately, but the + * number of rings is not specified, so all rings get the same settings. + **/ +static int i40evf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + int i; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_tx_count = clamp_t(u32, ring->tx_pending, + I40EVF_MIN_TXD, + I40EVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, + I40EVF_MIN_RXD, + I40EVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_rings[0]->count) && + (new_rx_count == adapter->rx_rings[0]->count)) + return 0; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + adapter->tx_rings[0]->count = new_tx_count; + adapter->rx_rings[0]->count = new_rx_count; + } + + if (netif_running(netdev)) + i40evf_reinit_locked(adapter); + return 0; +} + +/** + * i40evf_get_coalesce - Get interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Returns current coalescing settings. This is referred to elsewhere in the + * driver as Interrupt Throttle Rate, as this is how the hardware describes + * this functionality. + **/ +static int i40evf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_vsi *vsi = &adapter->vsi; + + ec->tx_max_coalesced_frames = vsi->work_limit; + ec->rx_max_coalesced_frames = vsi->work_limit; + + if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) + ec->rx_coalesce_usecs = 1; + else + ec->rx_coalesce_usecs = vsi->rx_itr_setting; + + if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + ec->tx_coalesce_usecs = 1; + else + ec->tx_coalesce_usecs = vsi->tx_itr_setting; + + return 0; +} + +/** + * i40evf_set_coalesce - Set interrupt coalescing settings + * @netdev: network interface device structure + * @ec: ethtool coalesce structure + * + * Change current coalescing settings. + **/ +static int i40evf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + struct i40e_vsi *vsi = &adapter->vsi; + struct i40e_q_vector *q_vector; + int i; + + if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames) + vsi->work_limit = ec->tx_max_coalesced_frames; + + switch (ec->rx_coalesce_usecs) { + case 0: + vsi->rx_itr_setting = 0; + break; + case 1: + vsi->rx_itr_setting = (I40E_ITR_DYNAMIC + | ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); + break; + default: + if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || + (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) + return -EINVAL; + vsi->rx_itr_setting = ec->rx_coalesce_usecs; + break; + } + + switch (ec->tx_coalesce_usecs) { + case 0: + vsi->tx_itr_setting = 0; + break; + case 1: + vsi->tx_itr_setting = (I40E_ITR_DYNAMIC + | ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); + break; + default: + if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || + (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) + return -EINVAL; + vsi->tx_itr_setting = ec->tx_coalesce_usecs; + break; + } + + for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { + q_vector = adapter->q_vector[i]; + q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr); + i40e_flush(hw); + } + + return 0; +} + +static struct ethtool_ops i40evf_ethtool_ops = { + .get_settings = i40evf_get_settings, + .get_drvinfo = i40evf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = i40evf_get_ringparam, + .set_ringparam = i40evf_set_ringparam, + .get_strings = i40evf_get_strings, + .get_ethtool_stats = i40evf_get_ethtool_stats, + .get_sset_count = i40evf_get_sset_count, + .get_msglevel = i40evf_get_msglevel, + .set_msglevel = i40evf_set_msglevel, + .get_coalesce = i40evf_get_coalesce, + .set_coalesce = i40evf_set_coalesce, +}; + +/** + * i40evf_set_ethtool_ops - Initialize ethtool ops struct + * @netdev: network interface device structure + * + * Sets ethtool ops struct in our netdev so that ethtool can call + * our functions. + **/ +void i40evf_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops); +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -0,0 +1,2456 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40evf.h" +#include "i40e_prototype.h" +static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); +static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); +static int i40evf_close(struct net_device *netdev); + +char i40evf_driver_name[] = "i40evf"; +static const char i40evf_driver_string[] = + "Intel(R) XL710 X710 Virtual Function Network Driver"; + +#define DRV_VERSION "0.9.16" +const char i40evf_driver_version[] = DRV_VERSION; +static const char i40evf_copyright[] = + "Copyright (c) 2013 - 2014 Intel Corporation."; + +/* i40evf_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(i40evf_pci_tbl) = { + {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, + /* required last entry */ + {0, } +}; + +MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/** + * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + **/ +i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw, + struct i40e_dma_mem *mem, + u64 size, u32 alignment) +{ + struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + + if (!mem) + return I40E_ERR_PARAM; + + mem->size = ALIGN(size, alignment); + mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, + (dma_addr_t *)&mem->pa, GFP_KERNEL); + if (mem->va) + return 0; + else + return I40E_ERR_NO_MEMORY; +} + +/** + * i40evf_free_dma_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem) +{ + struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back; + + if (!mem || !mem->va) + return I40E_ERR_PARAM; + dma_free_coherent(&adapter->pdev->dev, mem->size, + mem->va, (dma_addr_t)mem->pa); + return 0; +} + +/** + * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + **/ +i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw, + struct i40e_virt_mem *mem, u32 size) +{ + if (!mem) + return I40E_ERR_PARAM; + + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (mem->va) + return 0; + else + return I40E_ERR_NO_MEMORY; +} + +/** + * i40evf_free_virt_mem_d - OS specific memory free for shared code + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + **/ +i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw, + struct i40e_virt_mem *mem) +{ + if (!mem) + return I40E_ERR_PARAM; + + /* it's ok to kfree a NULL pointer */ + kfree(mem->va); + + return 0; +} + +/** + * i40evf_debug_d - OS dependent version of debug printing + * @hw: pointer to the HW structure + * @mask: debug level mask + * @fmt_str: printf-type format description + **/ +void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) +{ + char buf[512]; + va_list argptr; + + if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) + return; + + va_start(argptr, fmt_str); + vsnprintf(buf, sizeof(buf), fmt_str, argptr); + va_end(argptr); + + /* the debug string is already formatted with a newline */ + pr_info("%s", buf); +} + +/** + * i40evf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void i40evf_tx_timeout(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + adapter->tx_timeout_count++; + dev_info(&adapter->pdev->dev, "TX timeout detected.\n"); + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } +} + +/** + * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + wr32(hw, I40E_VFINT_DYN_CTL01, 0); + + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); + + synchronize_irq(adapter->msix_entries[0].vector); +} + +/** + * i40evf_misc_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); + + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); +} + +/** + * i40evf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void i40evf_irq_disable(struct i40evf_adapter *adapter) +{ + int i; + struct i40e_hw *hw = &adapter->hw; + + if (!adapter->msix_entries) + return; + + for (i = 1; i < adapter->num_msix_vectors; i++) { + wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); + synchronize_irq(adapter->msix_entries[i].vector); + } + /* read flush */ + rd32(hw, I40E_VFGEN_RSTAT); + +} + +/** + * i40evf_irq_enable_queues - Enable interrupt for specified queues + * @adapter: board private structure + * @mask: bitmap of queues to enable + **/ +void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) +{ + struct i40e_hw *hw = &adapter->hw; + int i; + + for (i = 1; i < adapter->num_msix_vectors; i++) { + if (mask & (1 << (i - 1))) { + wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), + I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + } + } +} + +/** + * i40evf_fire_sw_int - Generate SW interrupt for specified vectors + * @adapter: board private structure + * @mask: bitmap of vectors to trigger + **/ +static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, + u32 mask) +{ + struct i40e_hw *hw = &adapter->hw; + int i; + uint32_t dyn_ctl; + + for (i = 1; i < adapter->num_msix_vectors; i++) { + if (mask & (1 << i)) { + dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); + dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; + wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); + } + } +} + +/** + * i40evf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush) +{ + struct i40e_hw *hw = &adapter->hw; + + i40evf_irq_enable_queues(adapter, ~0); + + if (flush) + rd32(hw, I40E_VFGEN_RSTAT); +} + +/** + * i40evf_msix_aq - Interrupt handler for vector 0 + * @irq: interrupt number + * @data: pointer to netdev + **/ +static irqreturn_t i40evf_msix_aq(int irq, void *data) +{ + struct net_device *netdev = data; + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + u32 val; + u32 ena_mask; + + /* handle non-queue interrupts */ + val = rd32(hw, I40E_VFINT_ICR01); + ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1); + + + val = rd32(hw, I40E_VFINT_DYN_CTL01); + val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; + wr32(hw, I40E_VFINT_DYN_CTL01, val); + + /* re-enable interrupt causes */ + wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask); + wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK); + + /* schedule work on the private workqueue */ + schedule_work(&adapter->adminq_task); + + return IRQ_HANDLED; +} + +/** + * i40evf_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) +{ + struct i40e_q_vector *q_vector = data; + + if (!q_vector->tx.ring && !q_vector->rx.ring) + return IRQ_HANDLED; + + napi_schedule(&q_vector->napi); + + return IRQ_HANDLED; +} + +/** + * i40evf_map_vector_to_rxq - associate irqs with rx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void +i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) +{ + struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; + struct i40e_ring *rx_ring = adapter->rx_rings[r_idx]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + rx_ring->vsi = &adapter->vsi; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; + q_vector->rx.latency_range = I40E_LOW_LATENCY; +} + +/** + * i40evf_map_vector_to_txq - associate irqs with tx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @t_idx: queue number + **/ +static void +i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) +{ + struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; + struct i40e_ring *tx_ring = adapter->tx_rings[t_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + tx_ring->vsi = &adapter->vsi; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + q_vector->tx.latency_range = I40E_LOW_LATENCY; + q_vector->num_ringpairs++; + q_vector->ring_mask |= (1 << t_idx); +} + +/** + * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) +{ + int q_vectors; + int v_start = 0; + int rxr_idx = 0, txr_idx = 0; + int rxr_remaining = adapter->vsi_res->num_queue_pairs; + int txr_remaining = adapter->vsi_res->num_queue_pairs; + int i, j; + int rqpv, tqpv; + int err = 0; + + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + /* The ideal configuration... + * We have enough vectors to map one per queue. + */ + if (q_vectors == (rxr_remaining * 2)) { + for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) + i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx); + + for (; txr_idx < txr_remaining; v_start++, txr_idx++) + i40evf_map_vector_to_txq(adapter, v_start, txr_idx); + goto out; + } + + /* If we don't have enough vectors for a 1-to-1 + * mapping, we'll have to group them so there are + * multiple queues per vector. + * Re-adjusting *qpv takes care of the remainder. + */ + for (i = v_start; i < q_vectors; i++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); + for (j = 0; j < rqpv; j++) { + i40evf_map_vector_to_rxq(adapter, i, rxr_idx); + rxr_idx++; + rxr_remaining--; + } + } + for (i = v_start; i < q_vectors; i++) { + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); + for (j = 0; j < tqpv; j++) { + i40evf_map_vector_to_txq(adapter, i, txr_idx); + txr_idx++; + txr_remaining--; + } + } + +out: + adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + + return err; +} + +/** + * i40evf_request_traffic_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * Allocates MSI-X vectors for tx and rx handling, and requests + * interrupts from the kernel. + **/ +static int +i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) +{ + int vector, err, q_vectors; + int rx_int_idx = 0, tx_int_idx = 0; + + i40evf_irq_disable(adapter); + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (vector = 0; vector < q_vectors; vector++) { + struct i40e_q_vector *q_vector = adapter->q_vector[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "i40evf-%s-%s-%d", basename, + "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "i40evf-%s-%s-%d", basename, + "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "i40evf-%s-%s-%d", basename, + "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq( + adapter->msix_entries[vector + NONQ_VECS].vector, + i40evf_msix_clean_rings, + 0, + q_vector->name, + q_vector); + if (err) { + dev_info(&adapter->pdev->dev, + "%s: request_irq failed, error: %d\n", + __func__, err); + goto free_queue_irqs; + } + /* assign the mask for this irq */ + irq_set_affinity_hint( + adapter->msix_entries[vector + NONQ_VECS].vector, + q_vector->affinity_mask); + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_set_affinity_hint( + adapter->msix_entries[vector + NONQ_VECS].vector, + NULL); + free_irq(adapter->msix_entries[vector + NONQ_VECS].vector, + adapter->q_vector[vector]); + } + return err; +} + +/** + * i40evf_request_misc_irq - Initialize MSI-X interrupts + * @adapter: board private structure + * + * Allocates MSI-X vector 0 and requests interrupts from the kernel. This + * vector is only for the admin queue, and stays active even when the netdev + * is closed. + **/ +static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + sprintf(adapter->misc_vector_name, "i40evf:mbx"); + err = request_irq(adapter->msix_entries[0].vector, + &i40evf_msix_aq, 0, + adapter->misc_vector_name, netdev); + if (err) { + dev_err(&adapter->pdev->dev, + "request_irq for %s failed: %d\n", + adapter->misc_vector_name, err); + free_irq(adapter->msix_entries[0].vector, netdev); + } + return err; +} + +/** + * i40evf_free_traffic_irqs - Free MSI-X interrupts + * @adapter: board private structure + * + * Frees all MSI-X vectors other than 0. + **/ +static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) +{ + int i; + int q_vectors; + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (i = 0; i < q_vectors; i++) { + irq_set_affinity_hint(adapter->msix_entries[i+1].vector, + NULL); + free_irq(adapter->msix_entries[i+1].vector, + adapter->q_vector[i]); + } +} + +/** + * i40evf_free_misc_irq - Free MSI-X miscellaneous vector + * @adapter: board private structure + * + * Frees MSI-X vector 0. + **/ +static void i40evf_free_misc_irq(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->msix_entries[0].vector, netdev); +} + +/** + * i40evf_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void i40evf_configure_tx(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + int i; + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); +} + +/** + * i40evf_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void i40evf_configure_rx(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i; + int rx_buf_len; + + + adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE; + adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; + + /* Decide whether to use packet split mode or not */ + if (netdev->mtu > ETH_DATA_LEN) { + if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE) + adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; + else + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; + } else { + if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE) + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; + else + adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; + } + + /* Set the RX buffer length according to the mode */ + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + rx_buf_len = I40E_RX_HDR_SIZE; + } else { + if (netdev->mtu <= ETH_DATA_LEN) + rx_buf_len = I40EVF_RXBUFFER_2048; + else + rx_buf_len = ALIGN(max_frame, 1024); + } + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); + adapter->rx_rings[i]->rx_buf_len = rx_buf_len; + } +} + +/** + * i40evf_find_vlan - Search filter list for specific vlan filter + * @adapter: board private structure + * @vlan: vlan tag + * + * Returns ptr to the filter object or NULL + **/ +static struct +i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f; + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (vlan == f->vlan) + return f; + } + return NULL; +} + +/** + * i40evf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct +i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f; + + f = i40evf_find_vlan(adapter, vlan); + if (NULL == f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (NULL == f) { + dev_info(&adapter->pdev->dev, + "%s: no memory for new VLAN filter\n", + __func__); + return NULL; + } + f->vlan = vlan; + + INIT_LIST_HEAD(&f->list); + list_add(&f->list, &adapter->vlan_filter_list); + f->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + } + + return f; +} + +/** + * i40evf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) +{ + struct i40evf_vlan_filter *f; + + f = i40evf_find_vlan(adapter, vlan); + if (f) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + } + return; +} + +/** + * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device + * @netdev: network device struct + * @vid: VLAN tag + **/ +static int i40evf_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (i40evf_add_vlan(adapter, vid) == NULL) + return -ENOMEM; + return 0; +} + +/** + * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device + * @netdev: network device struct + * @vid: VLAN tag + **/ +static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + i40evf_del_vlan(adapter, vid); + return 0; +} + +/** + * i40evf_find_filter - Search filter list for specific mac filter + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL + **/ +static struct +i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, + u8 *macaddr) +{ + struct i40evf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + return NULL; +} + +/** + * i40e_add_filter - Add a mac filter to the filter list + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct +i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, + u8 *macaddr) +{ + struct i40evf_mac_filter *f; + + if (!macaddr) + return NULL; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + mdelay(1); + + f = i40evf_find_filter(adapter, macaddr); + if (NULL == f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (NULL == f) { + dev_info(&adapter->pdev->dev, + "%s: no memory for new filter\n", __func__); + clear_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section); + return NULL; + } + + memcpy(f->macaddr, macaddr, ETH_ALEN); + + list_add(&f->list, &adapter->mac_filter_list); + f->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + } + + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return f; +} + +/** + * i40evf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_set_mac(struct net_device *netdev, void *p) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + struct i40evf_mac_filter *f; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) + return 0; + + f = i40evf_add_filter(adapter, addr->sa_data); + if (f) { + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + } + + return (f == NULL) ? -ENOMEM : 0; +} + +/** + * i40evf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +static void i40evf_set_rx_mode(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40evf_mac_filter *f, *ftmp; + struct netdev_hw_addr *uca; + struct netdev_hw_addr *mca; + + /* add addr if not already in the filter list */ + netdev_for_each_uc_addr(uca, netdev) { + i40evf_add_filter(adapter, uca->addr); + } + netdev_for_each_mc_addr(mca, netdev) { + i40evf_add_filter(adapter, mca->addr); + } + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + mdelay(1); + /* remove filter if not in netdev list */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + bool found = false; + + if (f->macaddr[0] & 0x01) { + netdev_for_each_mc_addr(mca, netdev) { + if (ether_addr_equal(mca->addr, f->macaddr)) { + found = true; + break; + } + } + } else { + netdev_for_each_uc_addr(uca, netdev) { + if (ether_addr_equal(uca->addr, f->macaddr)) { + found = true; + break; + } + } + } + if (found) { + f->remove = true; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + } + } + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +} + +/** + * i40evf_napi_enable_all - enable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) +{ + int q_idx; + struct i40e_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + q_vector = adapter->q_vector[q_idx]; + napi = &q_vector->napi; + napi_enable(napi); + } +} + +/** + * i40evf_napi_disable_all - disable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) +{ + int q_idx; + struct i40e_q_vector *q_vector; + int q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = adapter->q_vector[q_idx]; + napi_disable(&q_vector->napi); + } +} + +/** + * i40evf_configure - set up transmit and receive data structures + * @adapter: board private structure + **/ +static void i40evf_configure(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + i40evf_set_rx_mode(netdev); + + i40evf_configure_tx(adapter); + i40evf_configure_rx(adapter); + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + struct i40e_ring *ring = adapter->rx_rings[i]; + i40evf_alloc_rx_buffers(ring, ring->count); + ring->next_to_use = ring->count - 1; + writel(ring->next_to_use, ring->tail); + } +} + +/** + * i40evf_up_complete - Finish the last steps of bringing up a connection + * @adapter: board private structure + **/ +static int i40evf_up_complete(struct i40evf_adapter *adapter) +{ + adapter->state = __I40EVF_RUNNING; + clear_bit(__I40E_DOWN, &adapter->vsi.state); + + i40evf_napi_enable_all(adapter); + + adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + return 0; +} + +/** + * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + i40evf_clean_rx_ring(adapter->rx_rings[i]); +} + +/** + * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + i40evf_clean_tx_ring(adapter->tx_rings[i]); +} + +/** + * i40e_down - Shutdown the connection processing + * @adapter: board private structure + **/ +void i40evf_down(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct i40evf_mac_filter *f; + + /* remove all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->remove = true; + } + /* remove all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->remove = true; + } + if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __I40EVF_RESETTING) { + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + /* disable receives */ + adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + msleep(20); + } + netif_tx_disable(netdev); + + netif_tx_stop_all_queues(netdev); + + i40evf_irq_disable(adapter); + + i40evf_napi_disable_all(adapter); + + netif_carrier_off(netdev); + + i40evf_clean_all_tx_rings(adapter); + i40evf_clean_all_rx_rings(adapter); +} + +/** + * i40evf_acquire_msix_vectors - Setup the MSIX capability + * @adapter: board private structure + * @vectors: number of vectors to request + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns 0 on success, negative on failure + **/ +static int +i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) +{ + int err, vector_threshold; + + /* We'll want at least 3 (vector_threshold): + * 0) Other (Admin Queue and link, mostly) + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + */ + vector_threshold = MIN_MSIX_COUNT; + + /* The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + while (vectors >= vector_threshold) { + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, + vectors); + if (!err) /* Success in acquiring all requested vectors. */ + break; + else if (err < 0) + vectors = 0; /* Nasty failure, quit now */ + else /* err == number of vectors we should try again with */ + vectors = err; + } + + if (vectors < vector_threshold) { + dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n"); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + err = -EIO; + } else { + /* Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NONQ_VECS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = vectors; + } + return err; +} + +/** + * i40evf_free_queues - Free memory for all rings + * @adapter: board private structure to initialize + * + * Free all of the memory associated with queue pairs. + **/ +static void i40evf_free_queues(struct i40evf_adapter *adapter) +{ + int i; + + if (!adapter->vsi_res) + return; + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + if (adapter->tx_rings[i]) + kfree_rcu(adapter->tx_rings[i], rcu); + adapter->tx_rings[i] = NULL; + adapter->rx_rings[i] = NULL; + } +} + +/** + * i40evf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int i40evf_alloc_queues(struct i40evf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + struct i40e_ring *tx_ring; + struct i40e_ring *rx_ring; + + tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); + if (!tx_ring) + goto err_out; + + tx_ring->queue_index = i; + tx_ring->netdev = adapter->netdev; + tx_ring->dev = &adapter->pdev->dev; + tx_ring->count = I40EVF_DEFAULT_TXD; + adapter->tx_rings[i] = tx_ring; + + rx_ring = &tx_ring[1]; + rx_ring->queue_index = i; + rx_ring->netdev = adapter->netdev; + rx_ring->dev = &adapter->pdev->dev; + rx_ring->count = I40EVF_DEFAULT_RXD; + adapter->rx_rings[i] = rx_ring; + } + + return 0; + +err_out: + i40evf_free_queues(adapter); + return -ENOMEM; +} + +/** + * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) +{ + int vector, v_budget; + int pairs = 0; + int err = 0; + + if (!adapter->vsi_res) { + err = -EIO; + goto out; + } + pairs = adapter->vsi_res->num_queue_pairs; + + /* It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) twice the number of vectors as there are CPU's. + */ + v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; + v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors); + + /* A failure in MSI-X entry allocation isn't fatal, but it does + * mean we disable MSI-X capabilities of the adapter. + */ + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + i40evf_acquire_msix_vectors(adapter, v_budget); + +out: + adapter->netdev->real_num_tx_queues = pairs; + return err; +} + +/** + * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) +{ + int q_idx, num_q_vectors; + struct i40e_q_vector *q_vector; + + num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); + if (!q_vector) + goto err_out; + q_vector->adapter = adapter; + q_vector->vsi = &adapter->vsi; + q_vector->v_idx = q_idx; + netif_napi_add(adapter->netdev, &q_vector->napi, + i40evf_napi_poll, 64); + adapter->q_vector[q_idx] = q_vector; + } + + return 0; + +err_out: + while (q_idx) { + q_idx--; + q_vector = adapter->q_vector[q_idx]; + netif_napi_del(&q_vector->napi); + kfree(q_vector); + adapter->q_vector[q_idx] = NULL; + } + return -ENOMEM; +} + +/** + * i40evf_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; + napi_vectors = adapter->vsi_res->num_queue_pairs; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; + + adapter->q_vector[q_idx] = NULL; + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + kfree(q_vector); + } +} + +/** + * i40evf_reset_interrupt_capability - Reset MSIX setup + * @adapter: board private structure + * + **/ +void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter) +{ + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + + return; +} + +/** + * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) +{ + int err; + + err = i40evf_set_interrupt_capability(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = i40evf_alloc_q_vectors(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + err = i40evf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", + (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" : + "Disabled", adapter->vsi_res->num_queue_pairs); + + return 0; +err_alloc_queues: + i40evf_free_q_vectors(adapter); +err_alloc_q_vectors: + i40evf_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +/** + * i40evf_watchdog_timer - Periodic call-back timer + * @data: pointer to adapter disguised as unsigned long + **/ +static void i40evf_watchdog_timer(unsigned long data) +{ + struct i40evf_adapter *adapter = (struct i40evf_adapter *)data; + schedule_work(&adapter->watchdog_task); + /* timer will be rescheduled in watchdog task */ +} + +/** + * i40evf_watchdog_task - Periodic call-back task + * @work: pointer to work_struct + **/ +static void i40evf_watchdog_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + watchdog_task); + struct i40e_hw *hw = &adapter->hw; + + if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + dev_info(&adapter->pdev->dev, "Checking for redemption\n"); + if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) { + /* A chance for redemption! */ + dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); + adapter->state = __I40EVF_STARTUP; + adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + schedule_delayed_work(&adapter->init_task, 10); + clear_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section); + /* Don't reschedule the watchdog, since we've restarted + * the init task. When init_task contacts the PF and + * gets everything set up again, it'll restart the + * watchdog for us. Down, boy. Sit. Stay. Woof. + */ + return; + } + adapter->aq_pending = 0; + adapter->aq_required = 0; + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + goto watchdog_done; + } + + if ((adapter->state < __I40EVF_DOWN) || + (adapter->flags & I40EVF_FLAG_RESET_PENDING)) + goto watchdog_done; + + /* check for reset */ + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && + (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { + adapter->state = __I40EVF_RESETTING; + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + dev_err(&adapter->pdev->dev, "Hardware reset detected.\n"); + dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); + schedule_work(&adapter->reset_task); + adapter->aq_pending = 0; + adapter->aq_required = 0; + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + goto watchdog_done; + } + + /* Process admin queue tasks. After init, everything gets done + * here so we don't race on the admin queue. + */ + if (adapter->aq_pending) + goto watchdog_done; + + if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { + i40evf_map_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) { + i40evf_add_ether_addrs(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) { + i40evf_add_vlans(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) { + i40evf_del_ether_addrs(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) { + i40evf_del_vlans(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { + i40evf_disable_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { + i40evf_configure_queues(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) { + i40evf_enable_queues(adapter); + goto watchdog_done; + } + + if (adapter->state == __I40EVF_RUNNING) + i40evf_request_stats(adapter); + + i40evf_irq_enable(adapter, true); + i40evf_fire_sw_int(adapter, 0xFF); + +watchdog_done: + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +restart_watchdog: + if (adapter->aq_required) + mod_timer(&adapter->watchdog_timer, + jiffies + msecs_to_jiffies(20)); + else + mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); + schedule_work(&adapter->adminq_task); +} + +/** + * i40evf_configure_rss - increment to next available tx queue + * @adapter: board private structure + * @j: queue counter + * + * Helper function for RSS programming to increment through available + * queus. Returns the next queue value. + **/ +static int next_queue(struct i40evf_adapter *adapter, int j) +{ + j += 1; + + return j >= adapter->vsi_res->num_queue_pairs ? 0 : j; +} + +/** + * i40evf_configure_rss - Prepare for RSS if used + * @adapter: board private structure + **/ +static void i40evf_configure_rss(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + u32 lut = 0; + int i, j; + u64 hena; + + /* Set of random keys generated using kernel random number generator */ + static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = { + 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127, + 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0, + 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e, + 0x4954b126 }; + + /* Hash type is configured by the PF - we just supply the key */ + + /* Fill out hash function seed */ + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), seed[i]); + + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + hena = I40E_DEFAULT_RSS_HENA; + wr32(hw, I40E_VFQF_HENA(0), (u32)hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + + /* Populate the LUT with max no. of queues in round robin fashion */ + j = adapter->vsi_res->num_queue_pairs; + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + j = next_queue(adapter, j); + lut = j; + j = next_queue(adapter, j); + lut |= j << 8; + j = next_queue(adapter, j); + lut |= j << 16; + j = next_queue(adapter, j); + lut |= j << 24; + wr32(hw, I40E_VFQF_HLUT(i), lut); + } + i40e_flush(hw); +} + +#define I40EVF_RESET_WAIT_MS 100 +#define I40EVF_RESET_WAIT_COUNT 200 +/** + * i40evf_reset_task - Call-back task to handle hardware reset + * @work: pointer to work_struct + * + * During reset we need to shut down and reinitialize the admin queue + * before we can use it to communicate with the PF again. We also clear + * and reinit the rings because that context is lost as well. + **/ +static void i40evf_reset_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + reset_task); + struct i40e_hw *hw = &adapter->hw; + int i = 0, err; + uint32_t rstat_val; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + udelay(500); + + if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { + dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); + i40evf_request_reset(adapter); + } + + /* poll until we see the reset actually happen */ + for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (rstat_val != I40E_VFR_VFACTIVE) { + dev_info(&adapter->pdev->dev, "Reset now occurring\n"); + break; + } else { + msleep(I40EVF_RESET_WAIT_MS); + } + } + if (i == I40EVF_RESET_WAIT_COUNT) { + dev_err(&adapter->pdev->dev, "Reset was not detected\n"); + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + goto continue_reset; /* act like the reset happened */ + } + + /* wait until the reset is complete and the PF is responding to us */ + for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { + rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (rstat_val == I40E_VFR_VFACTIVE) { + dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n"); + break; + } else { + msleep(I40EVF_RESET_WAIT_MS); + } + } + if (i == I40EVF_RESET_WAIT_COUNT) { + /* reset never finished */ + dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n", + rstat_val); + adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + + if (netif_running(adapter->netdev)) + i40evf_close(adapter->netdev); + + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + i40evf_free_queues(adapter); + kfree(adapter->vf_res); + i40evf_shutdown_adminq(hw); + adapter->netdev->flags &= ~IFF_UP; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return; /* Do not attempt to reinit. It's dead, Jim. */ + } + +continue_reset: + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + + i40evf_down(adapter); + adapter->state = __I40EVF_RESETTING; + + /* kill and reinit the admin queue */ + if (i40evf_shutdown_adminq(hw)) + dev_warn(&adapter->pdev->dev, + "%s: Failed to destroy the Admin Queue resources\n", + __func__); + err = i40evf_init_adminq(hw); + if (err) + dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", + __func__, err); + + adapter->aq_pending = 0; + adapter->aq_required = 0; + i40evf_map_queues(adapter); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + mod_timer(&adapter->watchdog_timer, jiffies + 2); + + if (netif_running(adapter->netdev)) { + /* allocate transmit descriptors */ + err = i40evf_setup_all_tx_resources(adapter); + if (err) + goto reset_err; + + /* allocate receive descriptors */ + err = i40evf_setup_all_rx_resources(adapter); + if (err) + goto reset_err; + + i40evf_configure(adapter); + + err = i40evf_up_complete(adapter); + if (err) + goto reset_err; + + i40evf_irq_enable(adapter, true); + } + return; +reset_err: + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); + i40evf_close(adapter->netdev); +} + +/** + * i40evf_adminq_task - worker thread to clean the admin queue + * @work: pointer to work_struct containing our data + **/ +static void i40evf_adminq_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = + container_of(work, struct i40evf_adapter, adminq_task); + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + struct i40e_virtchnl_msg *v_msg; + i40e_status ret; + u16 pending; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + return; + + event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + if (!event.msg_buf) { + dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n", + __func__); + return; + } + v_msg = (struct i40e_virtchnl_msg *)&event.desc; + do { + ret = i40evf_clean_arq_element(hw, &event, &pending); + if (ret) + break; /* No event to process or error cleaning ARQ */ + + i40evf_virtchnl_completion(adapter, v_msg->v_opcode, + v_msg->v_retval, event.msg_buf, + event.msg_size); + if (pending != 0) { + dev_info(&adapter->pdev->dev, + "%s: ARQ: Pending events %d\n", + __func__, pending); + memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); + } + } while (pending); + + /* re-enable Admin queue interrupt cause */ + i40evf_misc_irq_enable(adapter); + + kfree(event.msg_buf); +} + +/** + * i40evf_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + if (adapter->tx_rings[i]->desc) + i40evf_free_tx_resources(adapter->tx_rings[i]); + +} + +/** + * i40evf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); + if (!err) + continue; + dev_err(&adapter->pdev->dev, + "%s: Allocation for Tx Queue %u failed\n", + __func__, i); + break; + } + + return err; +} + +/** + * i40evf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); + if (!err) + continue; + dev_err(&adapter->pdev->dev, + "%s: Allocation for Rx Queue %u failed\n", + __func__, i); + break; + } + return err; +} + +/** + * i40evf_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + if (adapter->rx_rings[i]->desc) + i40evf_free_rx_resources(adapter->rx_rings[i]); +} + +/** + * i40evf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int i40evf_open(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int err; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + return -EIO; + } + if (adapter->state != __I40EVF_DOWN) + return -EBUSY; + + /* allocate transmit descriptors */ + err = i40evf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = i40evf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* clear any pending interrupts, may auto mask */ + err = i40evf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto err_req_irq; + + i40evf_configure(adapter); + + err = i40evf_up_complete(adapter); + if (err) + goto err_req_irq; + + i40evf_irq_enable(adapter, true); + + return 0; + +err_req_irq: + i40evf_down(adapter); + i40evf_free_traffic_irqs(adapter); +err_setup_rx: + i40evf_free_all_rx_resources(adapter); +err_setup_tx: + i40evf_free_all_tx_resources(adapter); + + return err; +} + +/** + * i40evf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) + * are freed, along with all transmit and receive resources. + **/ +static int i40evf_close(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + if (adapter->state <= __I40EVF_DOWN) + return 0; + + /* signal that we are down to the interrupt handler */ + adapter->state = __I40EVF_DOWN; + + set_bit(__I40E_DOWN, &adapter->vsi.state); + + i40evf_down(adapter); + i40evf_free_traffic_irqs(adapter); + + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); + + return 0; +} + +/** + * i40evf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *i40evf_get_stats(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * i40evf_reinit_locked - Software reinit + * @adapter: board private structure + * + * Reinititalizes the ring structures in response to a software configuration + * change. Roughly the same as close followed by open, but skips releasing + * and reallocating the interrupts. + **/ +void i40evf_reinit_locked(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + WARN_ON(in_interrupt()); + + adapter->state = __I40EVF_RESETTING; + + i40evf_down(adapter); + + /* allocate transmit descriptors */ + err = i40evf_setup_all_tx_resources(adapter); + if (err) + goto err_reinit; + + /* allocate receive descriptors */ + err = i40evf_setup_all_rx_resources(adapter); + if (err) + goto err_reinit; + + i40evf_configure(adapter); + + err = i40evf_up_complete(adapter); + if (err) + goto err_reinit; + + i40evf_irq_enable(adapter, true); + return; + +err_reinit: + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n"); + i40evf_close(netdev); +} + +/** + * i40evf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) + return -EINVAL; + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + i40evf_reinit_locked(adapter); + return 0; +} + +static const struct net_device_ops i40evf_netdev_ops = { + .ndo_open = i40evf_open, + .ndo_stop = i40evf_close, + .ndo_start_xmit = i40evf_xmit_frame, + .ndo_get_stats = i40evf_get_stats, + .ndo_set_rx_mode = i40evf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = i40evf_set_mac, + .ndo_change_mtu = i40evf_change_mtu, + .ndo_tx_timeout = i40evf_tx_timeout, + .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, +}; + +/** + * i40evf_check_reset_complete - check that VF reset is complete + * @hw: pointer to hw struct + * + * Returns 0 if device is ready to use, or -EBUSY if it's in reset. + **/ +static int i40evf_check_reset_complete(struct i40e_hw *hw) +{ + u32 rstat; + int i; + + for (i = 0; i < 100; i++) { + rstat = rd32(hw, I40E_VFGEN_RSTAT); + if (rstat == I40E_VFR_VFACTIVE) + return 0; + udelay(10); + } + return -EBUSY; +} + +/** + * i40evf_init_task - worker thread to perform delayed initialization + * @work: pointer to work_struct containing our data + * + * This task completes the work that was begun in probe. Due to the nature + * of VF-PF communications, we may need to wait tens of milliseconds to get + * reponses back from the PF. Rather than busy-wait in probe and bog down the + * whole system, we'll do it in a task so we can sleep. + * This task only runs during driver init. Once we've established + * communications with the PF driver and set up our netdev, the watchdog + * takes over. + **/ +static void i40evf_init_task(struct work_struct *work) +{ + struct i40evf_adapter *adapter = container_of(work, + struct i40evf_adapter, + init_task.work); + struct net_device *netdev = adapter->netdev; + struct i40evf_mac_filter *f; + struct i40e_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int i, err, bufsz; + + switch (adapter->state) { + case __I40EVF_STARTUP: + /* driver loaded, probe complete */ + adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + err = i40e_set_mac_type(hw); + if (err) { + dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", + err); + goto err; + } + err = i40evf_check_reset_complete(hw); + if (err) { + dev_err(&pdev->dev, "Device is still in reset (%d)\n", + err); + goto err; + } + hw->aq.num_arq_entries = I40EVF_AQ_LEN; + hw->aq.num_asq_entries = I40EVF_AQ_LEN; + hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE; + + err = i40evf_init_adminq(hw); + if (err) { + dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", + err); + goto err; + } + err = i40evf_send_api_ver(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err); + i40evf_shutdown_adminq(hw); + goto err; + } + adapter->state = __I40EVF_INIT_VERSION_CHECK; + goto restart; + break; + case __I40EVF_INIT_VERSION_CHECK: + if (!i40evf_asq_done(hw)) { + dev_err(&pdev->dev, "Admin queue command never completed.\n"); + goto err; + } + + /* aq msg sent, awaiting reply */ + err = i40evf_verify_api_ver(adapter); + if (err) { + dev_err(&pdev->dev, "Unable to verify API version (%d)\n", + err); + goto err; + } + err = i40evf_send_vf_config_msg(adapter); + if (err) { + dev_err(&pdev->dev, "Unable send config request (%d)\n", + err); + goto err; + } + adapter->state = __I40EVF_INIT_GET_RESOURCES; + goto restart; + break; + case __I40EVF_INIT_GET_RESOURCES: + /* aq msg sent, awaiting reply */ + if (!adapter->vf_res) { + bufsz = sizeof(struct i40e_virtchnl_vf_resource) + + (I40E_MAX_VF_VSI * + sizeof(struct i40e_virtchnl_vsi_resource)); + adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + err = i40evf_get_vf_config(adapter); + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) + goto restart; + if (err) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", + err); + goto err_alloc; + } + adapter->state = __I40EVF_INIT_SW; + break; + default: + goto err_alloc; + } + /* got VF config message back from PF, now we can parse it */ + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + if (!adapter->vsi_res) { + dev_err(&pdev->dev, "No LAN VSI found\n"); + goto err_alloc; + } + + adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + + netdev->netdev_ops = &i40evf_netdev_ops; + i40evf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netdev->features |= NETIF_F_HIGHDMA | + NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_GRO; + + if (adapter->vf_res->vf_offload_flags + & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { + netdev->vlan_features = netdev->features; + netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features; + netdev->hw_features &= ~NETIF_F_RXCSUM; + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n", + adapter->hw.mac.addr); + random_ether_addr(adapter->hw.mac.addr); + } + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + INIT_LIST_HEAD(&adapter->mac_filter_list); + INIT_LIST_HEAD(&adapter->vlan_filter_list); + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (NULL == f) + goto err_sw_init; + + memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN); + f->add = true; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + + list_add(&f->list, &adapter->mac_filter_list); + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = &i40evf_watchdog_timer; + adapter->watchdog_timer.data = (unsigned long)adapter; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + err = i40evf_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + i40evf_map_rings_to_vectors(adapter); + i40evf_configure_rss(adapter); + err = i40evf_request_misc_irq(adapter); + if (err) + goto err_sw_init; + + netif_carrier_off(netdev); + + adapter->vsi.id = adapter->vsi_res->vsi_id; + adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ + adapter->vsi.back = adapter; + adapter->vsi.base_vector = 1; + adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK; + adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC; + adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC; + adapter->vsi.netdev = adapter->netdev; + + if (!adapter->netdev_registered) { + err = register_netdev(netdev); + if (err) + goto err_register; + } + + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + + dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr); + if (netdev->features & NETIF_F_GRO) + dev_info(&pdev->dev, "GRO is enabled\n"); + + dev_info(&pdev->dev, "%s\n", i40evf_driver_string); + adapter->state = __I40EVF_DOWN; + set_bit(__I40E_DOWN, &adapter->vsi.state); + i40evf_misc_irq_enable(adapter); + return; +restart: + schedule_delayed_work(&adapter->init_task, + msecs_to_jiffies(50)); + return; + +err_register: + i40evf_free_misc_irq(adapter); +err_sw_init: + i40evf_reset_interrupt_capability(adapter); +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + /* Things went into the weeds, so try again later */ + if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { + dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); + adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; + return; /* do not reschedule */ + } + schedule_delayed_work(&adapter->init_task, HZ * 3); + return; +} + +/** + * i40evf_shutdown - Shutdown the device in preparation for a reboot + * @pdev: pci device structure + **/ +static void i40evf_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + i40evf_close(netdev); + +#ifdef CONFIG_PM + pci_save_state(pdev); + +#endif + pci_disable_device(pdev); +} + +/** + * i40evf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in i40evf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * i40evf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct i40evf_adapter *adapter = NULL; + struct i40e_hw *hw = NULL; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_regions(pdev, i40evf_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), + MAX_TX_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + + hw = &adapter->hw; + hw->back = adapter; + + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + adapter->state = __I40EVF_STARTUP; + + /* Call save state here because it relies on the adapter struct. */ + pci_save_state(pdev); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw->hw_addr) { + err = -EIO; + goto err_ioremap; + } + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + + INIT_WORK(&adapter->reset_task, i40evf_reset_task); + INIT_WORK(&adapter->adminq_task, i40evf_adminq_task); + INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task); + INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task); + schedule_delayed_work(&adapter->init_task, 10); + + return 0; + +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +#ifdef CONFIG_PM +/** + * i40evf_suspend - Power management suspend routine + * @pdev: PCI device information struct + * @state: unused + * + * Called when the system (VM) is entering sleep/suspend. + **/ +static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct i40evf_adapter *adapter = netdev_priv(netdev); + int retval = 0; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + rtnl_lock(); + i40evf_down(adapter); + rtnl_unlock(); + } + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + + retval = pci_save_state(pdev); + if (retval) + return retval; + + pci_disable_device(pdev); + + return 0; +} + +/** + * i40evf_resume - Power managment resume routine + * @pdev: PCI device information struct + * + * Called when the system (VM) is resumed from sleep/suspend. + **/ +static int i40evf_resume(struct pci_dev *pdev) +{ + struct i40evf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); + return err; + } + pci_set_master(pdev); + + rtnl_lock(); + err = i40evf_set_interrupt_capability(adapter); + if (err) { + dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n"); + return err; + } + err = i40evf_request_misc_irq(adapter); + rtnl_unlock(); + if (err) { + dev_err(&pdev->dev, "Cannot get interrupt vector.\n"); + return err; + } + + schedule_work(&adapter->reset_task); + + netif_device_attach(netdev); + + return err; +} + +#endif /* CONFIG_PM */ +/** + * i40evf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * i40evf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void i40evf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct i40evf_adapter *adapter = netdev_priv(netdev); + struct i40e_hw *hw = &adapter->hw; + + cancel_delayed_work_sync(&adapter->init_task); + cancel_work_sync(&adapter->reset_task); + + if (adapter->netdev_registered) { + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + adapter->state = __I40EVF_REMOVE; + + if (adapter->msix_entries) { + i40evf_misc_irq_disable(adapter); + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + } + + del_timer_sync(&adapter->watchdog_timer); + flush_scheduled_work(); + + if (hw->aq.asq.count) + i40evf_shutdown_adminq(hw); + + iounmap(hw->hw_addr); + pci_release_regions(pdev); + + i40evf_free_queues(adapter); + kfree(adapter->vf_res); + + free_netdev(netdev); + + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +static struct pci_driver i40evf_driver = { + .name = i40evf_driver_name, + .id_table = i40evf_pci_tbl, + .probe = i40evf_probe, + .remove = i40evf_remove, +#ifdef CONFIG_PM + .suspend = i40evf_suspend, + .resume = i40evf_resume, +#endif + .shutdown = i40evf_shutdown, +}; + +/** + * i40e_init_module - Driver Registration Routine + * + * i40e_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init i40evf_init_module(void) +{ + int ret; + pr_info("i40evf: %s - version %s\n", i40evf_driver_string, + i40evf_driver_version); + + pr_info("%s\n", i40evf_copyright); + + ret = pci_register_driver(&i40evf_driver); + return ret; +} + +module_init(i40evf_init_module); + +/** + * i40e_exit_module - Driver Exit Cleanup Routine + * + * i40e_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit i40evf_exit_module(void) +{ + pci_unregister_driver(&i40evf_driver); +} + +module_exit(i40evf_exit_module); + +/* i40evf_main.c */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ linux-3.13.0/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -0,0 +1,789 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40evf.h" +#include "i40e_prototype.h" + +/* busy wait delay in msec */ +#define I40EVF_BUSY_WAIT_DELAY 10 +#define I40EVF_BUSY_WAIT_COUNT 50 + +/** + * i40evf_send_pf_msg + * @adapter: adapter structure + * @op: virtual channel opcode + * @msg: pointer to message buffer + * @len: message length + * + * Send message to PF and print status if failure. + **/ +static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, + enum i40e_virtchnl_ops op, u8 *msg, u16 len) +{ + struct i40e_hw *hw = &adapter->hw; + i40e_status err; + + if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) + return 0; /* nothing to see here, move along */ + + err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); + if (err) + dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", + op, err, hw->aq.asq_last_status); + return err; +} + +/** + * i40evf_send_api_ver + * @adapter: adapter structure + * + * Send API version admin queue message to the PF. The reply is not checked + * in this function. Returns 0 if the message was successfully + * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int i40evf_send_api_ver(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_version_info vvi; + + vvi.major = I40E_VIRTCHNL_VERSION_MAJOR; + vvi.minor = I40E_VIRTCHNL_VERSION_MINOR; + + return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi, + sizeof(vvi)); +} + +/** + * i40evf_verify_api_ver + * @adapter: adapter structure + * + * Compare API versions with the PF. Must be called after admin queue is + * initialized. Returns 0 if API versions match, -EIO if + * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty. + **/ +int i40evf_verify_api_ver(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_version_info *pf_vvi; + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + i40e_status err; + + event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + if (!event.msg_buf) { + err = -ENOMEM; + goto out; + } + + err = i40evf_clean_arq_element(hw, &event, NULL); + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) + goto out_alloc; + + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + if (err) { + err = -EIO; + goto out_alloc; + } + + if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != + I40E_VIRTCHNL_OP_VERSION) { + err = -EIO; + goto out_alloc; + } + + pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; + if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) || + (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR)) + err = -EIO; + +out_alloc: + kfree(event.msg_buf); +out: + return err; +} + +/** + * i40evf_send_vf_config_msg + * @adapter: adapter structure + * + * Send VF configuration request admin queue message to the PF. The reply + * is not checked in this function. Returns 0 if the message was + * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) +{ + return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); +} + +/** + * i40evf_get_vf_config + * @hw: pointer to the hardware structure + * @len: length of buffer + * + * Get VF configuration from PF and populate hw structure. Must be called after + * admin queue is initialized. Busy waits until response is received from PF, + * with maximum timeout. Response from PF is returned in the buffer for further + * processing by the caller. + **/ +int i40evf_get_vf_config(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + struct i40e_arq_event_info event; + u16 len; + i40e_status err; + + len = sizeof(struct i40e_virtchnl_vf_resource) + + I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); + event.msg_size = len; + event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + if (!event.msg_buf) { + err = -ENOMEM; + goto out; + } + + err = i40evf_clean_arq_element(hw, &event, NULL); + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) + goto out_alloc; + + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + if (err) { + dev_err(&adapter->pdev->dev, + "%s: Error returned from PF, %d, %d\n", __func__, + le32_to_cpu(event.desc.cookie_high), + le32_to_cpu(event.desc.cookie_low)); + err = -EIO; + goto out_alloc; + } + + if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != + I40E_VIRTCHNL_OP_GET_VF_RESOURCES) { + dev_err(&adapter->pdev->dev, + "%s: Invalid response from PF, %d, %d\n", __func__, + le32_to_cpu(event.desc.cookie_high), + le32_to_cpu(event.desc.cookie_low)); + err = -EIO; + goto out_alloc; + } + memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len)); + + i40e_vf_parse_hw_config(hw, adapter->vf_res); +out_alloc: + kfree(event.msg_buf); +out: + return err; +} + +/** + * i40evf_configure_queues + * @adapter: adapter structure + * + * Request that the PF set up our (previously allocated) queues. + **/ +void i40evf_configure_queues(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_vsi_queue_config_info *vqci; + struct i40e_virtchnl_queue_pair_info *vqpi; + int pairs = adapter->vsi_res->num_queue_pairs; + int i, len; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; + len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); + vqci = kzalloc(len, GFP_ATOMIC); + if (!vqci) { + dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n", + __func__); + return; + } + vqci->vsi_id = adapter->vsi_res->vsi_id; + vqci->num_queue_pairs = pairs; + vqpi = vqci->qpair; + /* Size check is not needed here - HW max is 16 queue pairs, and we + * can fit info for 31 of them into the AQ buffer before it overflows. + */ + for (i = 0; i < pairs; i++) { + vqpi->txq.vsi_id = vqci->vsi_id; + vqpi->txq.queue_id = i; + vqpi->txq.ring_len = adapter->tx_rings[i]->count; + vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; + + vqpi->rxq.vsi_id = vqci->vsi_id; + vqpi->rxq.queue_id = i; + vqpi->rxq.ring_len = adapter->rx_rings[i]->count; + vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; + vqpi->rxq.max_pkt_size = adapter->netdev->mtu + + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; + vqpi++; + } + + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + (u8 *)vqci, len); + kfree(vqci); + adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES; +} + +/** + * i40evf_enable_queues + * @adapter: adapter structure + * + * Request that the PF enable all of our queues. + **/ +void i40evf_enable_queues(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_queue_select vqs; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; + vqs.vsi_id = adapter->vsi_res->vsi_id; + vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.rx_queues = vqs.tx_queues; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); + adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; +} + +/** + * i40evf_disable_queues + * @adapter: adapter structure + * + * Request that the PF disable all of our queues. + **/ +void i40evf_disable_queues(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_queue_select vqs; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; + vqs.vsi_id = adapter->vsi_res->vsi_id; + vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.rx_queues = vqs.tx_queues; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); + adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; +} + +/** + * i40evf_map_queues + * @adapter: adapter structure + * + * Request that the PF map queues to interrupt vectors. Misc causes, including + * admin queue, are always mapped to vector 0. + **/ +void i40evf_map_queues(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_irq_map_info *vimi; + int v_idx, q_vectors, len; + struct i40e_q_vector *q_vector; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; + + q_vectors = adapter->num_msix_vectors - NONQ_VECS; + + len = sizeof(struct i40e_virtchnl_irq_map_info) + + (adapter->num_msix_vectors * + sizeof(struct i40e_virtchnl_vector_map)); + vimi = kzalloc(len, GFP_ATOMIC); + if (!vimi) { + dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n", + __func__); + return; + } + + vimi->num_vectors = adapter->num_msix_vectors; + /* Queue vectors first */ + for (v_idx = 0; v_idx < q_vectors; v_idx++) { + q_vector = adapter->q_vector[v_idx]; + vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; + vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; + vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; + vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask; + } + /* Misc vector last - this is only for AdminQ messages */ + vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; + vimi->vecmap[v_idx].vector_id = 0; + vimi->vecmap[v_idx].txq_map = 0; + vimi->vecmap[v_idx].rxq_map = 0; + + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + (u8 *)vimi, len); + kfree(vimi); + adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS; +} + +/** + * i40evf_add_ether_addrs + * @adapter: adapter structure + * @addrs: the MAC address filters to add (contiguous) + * @count: number of filters + * + * Request that the PF add one or more addresses to our filters. + **/ +void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_ether_addr_list *veal; + int len, i = 0, count = 0; + struct i40evf_mac_filter *f; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; + + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", + __func__); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct i40e_virtchnl_ether_addr_list)) / + sizeof(struct i40e_virtchnl_ether_addr); + len = I40EVF_MAX_AQ_BUF_SIZE; + } + + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n", + __func__); + return; + } + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) { + memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); + i++; + f->add = false; + } + } + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + (u8 *)veal, len); + kfree(veal); + adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + +} + +/** + * i40evf_del_ether_addrs + * @adapter: adapter structure + * @addrs: the MAC address filters to remove (contiguous) + * @count: number of filtes + * + * Request that the PF remove one or more addresses from our filters. + **/ +void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_ether_addr_list *veal; + struct i40evf_mac_filter *f, *ftmp; + int len, i = 0, count = 0; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->remove) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; + + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (count * sizeof(struct i40e_virtchnl_ether_addr)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n", + __func__); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct i40e_virtchnl_ether_addr_list)) / + sizeof(struct i40e_virtchnl_ether_addr); + len = I40EVF_MAX_AQ_BUF_SIZE; + } + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n", + __func__); + return; + } + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + if (f->remove) { + memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN); + i++; + list_del(&f->list); + kfree(f); + } + } + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + (u8 *)veal, len); + kfree(veal); + adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; +} + +/** + * i40evf_add_vlans + * @adapter: adapter structure + * @vlans: the VLANs to add + * @count: number of VLANs + * + * Request that the PF add one or more VLAN filters to our VSI. + **/ +void i40evf_add_vlans(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_vlan_filter_list *vvfl; + int len, i = 0, count = 0; + struct i40evf_vlan_filter *f; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", + __func__); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct i40e_virtchnl_vlan_filter_list)) / + sizeof(u16); + len = I40EVF_MAX_AQ_BUF_SIZE; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n", + __func__); + return; + } + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vvfl->vlan_id[i] = f->vlan; + i++; + f->add = false; + } + } + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); + kfree(vvfl); + adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; +} + +/** + * i40evf_del_vlans + * @adapter: adapter structure + * @vlans: the VLANs to remove + * @count: number of VLANs + * + * Request that the PF remove one or more VLAN filters from our VSI. + **/ +void i40evf_del_vlans(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_vlan_filter_list *vvfl; + struct i40evf_vlan_filter *f, *ftmp; + int len, i = 0, count = 0; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->remove) + count++; + } + if (!count) { + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN; + + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (count * sizeof(u16)); + if (len > I40EVF_MAX_AQ_BUF_SIZE) { + dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n", + __func__); + count = (I40EVF_MAX_AQ_BUF_SIZE - + sizeof(struct i40e_virtchnl_vlan_filter_list)) / + sizeof(u16); + len = I40EVF_MAX_AQ_BUF_SIZE; + } + vvfl = kzalloc(len, GFP_ATOMIC); + if (!vvfl) { + dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n", + __func__); + return; + } + vvfl->vsi_id = adapter->vsi_res->vsi_id; + vvfl->num_elements = count; + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vvfl->vlan_id[i] = f->vlan; + i++; + list_del(&f->list); + kfree(f); + } + } + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); + kfree(vvfl); + adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; +} + +/** + * i40evf_set_promiscuous + * @adapter: adapter structure + * @flags: bitmask to control unicast/multicast promiscuous. + * + * Request that the PF enable promiscuous mode for our VSI. + **/ +void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) +{ + struct i40e_virtchnl_promisc_info vpi; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "%s: command %d pending\n", + __func__, adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + vpi.vsi_id = adapter->vsi_res->vsi_id; + vpi.flags = flags; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + (u8 *)&vpi, sizeof(vpi)); +} + +/** + * i40evf_request_stats + * @adapter: adapter structure + * + * Request VSI statistics from PF. + **/ +void i40evf_request_stats(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_queue_select vqs; + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* no error message, this isn't crucial */ + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS; + vqs.vsi_id = adapter->vsi_res->vsi_id; + /* queue maps are ignored for this message - only the vsi is used */ + if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS, + (u8 *)&vqs, sizeof(vqs))) + /* if the request failed, don't lock out others */ + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} +/** + * i40evf_request_reset + * @adapter: adapter structure + * + * Request that the PF reset this VF. No response is expected. + **/ +void i40evf_request_reset(struct i40evf_adapter *adapter) +{ + /* Don't check CURRENT_OP - this is always higher priority */ + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} + +/** + * i40evf_virtchnl_completion + * @adapter: adapter structure + * @v_opcode: opcode sent by PF + * @v_retval: retval sent by PF + * @msg: message sent by PF + * @msglen: message length + * + * Asynchronous completion function for admin queue messages. Rather than busy + * wait, we fire off our requests and assume that no errors will be returned. + * This function handles the reply messages. + **/ +void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, + u8 *msg, u16 msglen) +{ + struct net_device *netdev = adapter->netdev; + + if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { + struct i40e_virtchnl_pf_event *vpe = + (struct i40e_virtchnl_pf_event *)msg; + switch (vpe->event) { + case I40E_VIRTCHNL_EVENT_LINK_CHANGE: + adapter->link_up = + vpe->event_data.link_event.link_status; + if (adapter->link_up && !netif_carrier_ok(netdev)) { + dev_info(&adapter->pdev->dev, "NIC Link is Up\n"); + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + } else if (!adapter->link_up) { + dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + break; + case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: + dev_info(&adapter->pdev->dev, "PF reset warning received\n"); + if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + adapter->flags |= I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); + schedule_work(&adapter->reset_task); + } + break; + default: + dev_err(&adapter->pdev->dev, + "%s: Unknown event %d from pf\n", + __func__, vpe->event); + break; + + } + return; + } + if (v_opcode != adapter->current_op) { + dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n", + __func__, adapter->current_op, v_opcode); + /* We're probably completely screwed at this point, but clear + * the current op and try to carry on.... + */ + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + return; + } + if (v_retval) { + dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n", + __func__, v_retval, v_opcode); + } + switch (v_opcode) { + case I40E_VIRTCHNL_OP_GET_STATS: { + struct i40e_eth_stats *stats = + (struct i40e_eth_stats *)msg; + adapter->net_stats.rx_packets = stats->rx_unicast + + stats->rx_multicast + + stats->rx_broadcast; + adapter->net_stats.tx_packets = stats->tx_unicast + + stats->tx_multicast + + stats->tx_broadcast; + adapter->net_stats.rx_bytes = stats->rx_bytes; + adapter->net_stats.tx_bytes = stats->tx_bytes; + adapter->net_stats.rx_errors = stats->rx_errors; + adapter->net_stats.tx_errors = stats->tx_errors; + adapter->net_stats.rx_dropped = stats->rx_missed; + adapter->net_stats.tx_dropped = stats->tx_discards; + adapter->current_stats = *stats; + } + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER); + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER); + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER); + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES); + /* enable transmits */ + i40evf_irq_enable(adapter, true); + netif_tx_start_all_queues(adapter->netdev); + netif_carrier_on(adapter->netdev); + break; + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES); + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); + break; + default: + dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n", + __func__, v_opcode); + break; + } /* switch v_opcode */ + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/e1000_82575.c +++ linux-3.13.0/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1403,6 +1403,13 @@ s32 ret_val; u16 i, rar_count = mac->rar_entry_count; + if ((hw->mac.type >= e1000_i210) && + !(igb_get_flash_presence_i210(hw))) { + ret_val = igb_pll_workaround_i210(hw); + if (ret_val) + return ret_val; + } + /* Initialize identification LED */ ret_val = igb_id_led_init(hw); if (ret_val) { --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/e1000_defines.h +++ linux-3.13.0/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -46,14 +46,15 @@ /* Extended Device Control */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ /* Physical Func Reset Done Indication */ -#define E1000_CTRL_EXT_PFRSTD 0x00004000 -#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 -#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 -#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 -#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 -#define E1000_CTRL_EXT_EIAME 0x01000000 -#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 /* Interrupt delay cancellation */ /* Driver loaded bit for FW */ #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 @@ -62,6 +63,7 @@ /* packet buffer parity error detection enabled */ /* descriptor FIFO parity error detection enable */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 #define E1000_I2CCMD_REG_ADDR_SHIFT 16 #define E1000_I2CCMD_PHY_ADDR_SHIFT 24 #define E1000_I2CCMD_OPCODE_READ 0x08000000 --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/e1000_hw.h +++ linux-3.13.0/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -569,4 +569,7 @@ /* These functions must be implemented by drivers */ s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); + +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); #endif /* _E1000_HW_H_ */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/e1000_i210.c +++ linux-3.13.0/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -364,7 +364,7 @@ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); if (word_address == address) { *data = INVM_DWORD_TO_WORD_DATA(invm_dword); - hw_dbg("Read INVM Word 0x%02x = %x", + hw_dbg("Read INVM Word 0x%02x = %x\n", address, *data); status = E1000_SUCCESS; break; @@ -835,3 +835,69 @@ } return ret_val; } + +/** + * igb_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +s32 igb_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = rd32(E1000_WUC); + mdicnfg = rd32(E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + wr32(E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = 0; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = rd32(E1000_CTRL); + wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = rd32(E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + wr32(E1000_CTRL_EXT, ctrl_ext); + + wr32(E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + wr32(E1000_EEARBC_I210, reg_val); + + igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + usleep_range(1000, 2000); + pci_word &= ~E1000_PCI_PMCSR_D3; + igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + wr32(E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + wr32(E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + wr32(E1000_MDICNFG, mdicnfg); + return ret_val; +} --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/e1000_i210.h +++ linux-3.13.0/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -46,6 +46,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); s32 igb_init_nvm_params_i210(struct e1000_hw *hw); bool igb_get_flash_presence_i210(struct e1000_hw *hw); +s32 igb_pll_workaround_i210(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #define E1000_EEPROM_FLASH_SIZE_WORD 0x11 @@ -91,4 +92,15 @@ #define NVM_LED_1_CFG_DEFAULT_I211 0x0184 #define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + #endif --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/e1000_mac.c +++ linux-3.13.0/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -930,11 +930,10 @@ */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; - hw_dbg("Flow Control = FULL.\r\n"); + hw_dbg("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = " - "RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = RX PAUSE frames only.\n"); } } /* For receiving PAUSE frames ONLY. @@ -949,7 +948,7 @@ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_tx_pause; - hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = TX PAUSE frames only.\n"); } /* For transmitting PAUSE frames ONLY. * @@ -963,7 +962,7 @@ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = RX PAUSE frames only.\n"); } /* Per the IEEE spec, at this point flow control should be * disabled. However, we want to consider that we could @@ -989,10 +988,10 @@ (hw->fc.requested_mode == e1000_fc_tx_pause) || (hw->fc.strict_ieee)) { hw->fc.current_mode = e1000_fc_none; - hw_dbg("Flow Control = NONE.\r\n"); + hw_dbg("Flow Control = NONE.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = RX PAUSE frames only.\n"); } /* Now we need to do one last check... If we auto- --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/e1000_regs.h +++ linux-3.13.0/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -69,6 +69,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ linux-3.13.0/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2270,15 +2270,15 @@ ring = adapter->tx_ring[j]; do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); data[i] = ring->tx_stats.packets; data[i+1] = ring->tx_stats.bytes; data[i+2] = ring->tx_stats.restart_queue; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); + start = u64_stats_fetch_begin_irq(&ring->tx_syncp2); restart2 = ring->tx_stats.restart_queue2; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start)); data[i+2] += restart2; i += IGB_TX_QUEUE_STATS_LEN; @@ -2286,13 +2286,13 @@ for (j = 0; j < adapter->num_rx_queues; j++) { ring = adapter->rx_ring[j]; do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); data[i] = ring->rx_stats.packets; data[i+1] = ring->rx_stats.bytes; data[i+2] = ring->rx_stats.drops; data[i+3] = ring->rx_stats.csum_err; data[i+4] = ring->rx_stats.alloc_failed; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); i += IGB_RX_QUEUE_STATS_LEN; } spin_unlock(&adapter->stats64_lock); --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/igb_main.c +++ linux-3.13.0/drivers/net/ethernet/intel/igb/igb_main.c @@ -4909,8 +4909,10 @@ rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); if (rqdpc) { ring->rx_stats.drops += rqdpc; @@ -4918,10 +4920,10 @@ } do { - start = u64_stats_fetch_begin_bh(&ring->rx_syncp); + start = u64_stats_fetch_begin_irq(&ring->rx_syncp); _bytes = ring->rx_stats.bytes; _packets = ring->rx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -4934,10 +4936,10 @@ for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *ring = adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_bh(&ring->tx_syncp); + start = u64_stats_fetch_begin_irq(&ring->tx_syncp); _bytes = ring->tx_stats.bytes; _packets = ring->tx_stats.packets; - } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start)); bytes += _bytes; packets += _packets; } @@ -6912,6 +6914,20 @@ } } +void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_read_config_word(adapter->pdev, reg, value); +} + +void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + struct igb_adapter *adapter = hw->back; + + pci_write_config_word(adapter->pdev, reg, *value); +} + s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) { struct igb_adapter *adapter = hw->back; @@ -7841,6 +7857,8 @@ if (netif_running(netdev)) igb_close(netdev); + else + igb_reset(adapter); igb_clear_interrupt_scheme(adapter); --- linux-3.13.0.orig/drivers/net/ethernet/intel/igb/igb_ptp.c +++ linux-3.13.0/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -387,7 +387,7 @@ dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; adapter->tx_hwtstamp_timeouts++; - dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); return; } @@ -454,7 +454,7 @@ rd32(E1000_RXSTMPH); adapter->last_rx_ptp_check = jiffies; adapter->rx_hwtstamp_cleared++; - dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); } } --- linux-3.13.0.orig/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ linux-3.13.0/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1521,12 +1521,12 @@ int tso; if (test_bit(__IXGB_DOWN, &adapter->flags)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->len <= 0) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1543,7 +1543,7 @@ tso = ixgb_tso(adapter, skb); if (tso < 0) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } --- linux-3.13.0.orig/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ linux-3.13.0/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1208,7 +1208,7 @@ */ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; - hw_dbg(hw, "Detected EEPROM page size = %d words.", + hw_dbg(hw, "Detected EEPROM page size = %d words.\n", hw->eeprom.word_page_size); out: return status; --- linux-3.13.0.orig/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ linux-3.13.0/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1127,10 +1127,10 @@ } do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; #ifdef BP_EXTENDED_STATS data[i] = ring->stats.yields; @@ -1155,10 +1155,10 @@ } do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); data[i] = ring->stats.packets; data[i+1] = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); i += 2; #ifdef BP_EXTENDED_STATS data[i] = ring->stats.yields; --- linux-3.13.0.orig/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ linux-3.13.0/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6828,7 +6828,7 @@ } static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; #ifdef IXGBE_FCOE @@ -6854,7 +6854,7 @@ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) break; default: - return __netdev_pick_tx(dev, skb); + return fallback(dev, skb); } f = &adapter->ring_feature[RING_F_FCOE]; @@ -6867,7 +6867,7 @@ return txq + f->offset; #else - return __netdev_pick_tx(dev, skb); + return fallback(dev, skb); #endif } @@ -7181,10 +7181,10 @@ if (ring) { do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; } @@ -7197,10 +7197,10 @@ if (ring) { do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); packets = ring->stats.packets; bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->tx_packets += packets; stats->tx_bytes += bytes; } --- linux-3.13.0.orig/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ linux-3.13.0/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -501,7 +501,7 @@ if (time_out == max_time_out) { status = IXGBE_ERR_LINK_SETUP; - hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); + hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n"); } return status; @@ -706,7 +706,7 @@ if (time_out == max_time_out) { status = IXGBE_ERR_LINK_SETUP; - hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); + hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n"); } return status; @@ -1132,7 +1132,7 @@ status = 0; } else { if (hw->allow_unsupported_sfp) { - e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); status = 0; } else { hw_dbg(hw, --- linux-3.13.0.orig/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ linux-3.13.0/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -460,7 +460,7 @@ IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; - e_warn(drv, "clearing RX Timestamp hang"); + e_warn(drv, "clearing RX Timestamp hang\n"); } } @@ -518,7 +518,7 @@ if (timeout) { dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; - e_warn(drv, "clearing Tx Timestamp hang"); + e_warn(drv, "clearing Tx Timestamp hang\n"); return; } --- linux-3.13.0.orig/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ linux-3.13.0/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3351,10 +3351,10 @@ for (i = 0; i < adapter->num_rx_queues; i++) { ring = &adapter->rx_ring[i]; do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); bytes = ring->total_bytes; packets = ring->total_packets; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_bytes += bytes; stats->rx_packets += packets; } @@ -3362,10 +3362,10 @@ for (i = 0; i < adapter->num_tx_queues; i++) { ring = &adapter->tx_ring[i]; do { - start = u64_stats_fetch_begin_bh(&ring->syncp); + start = u64_stats_fetch_begin_irq(&ring->syncp); bytes = ring->total_bytes; packets = ring->total_packets; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->tx_bytes += bytes; stats->tx_packets += packets; } --- linux-3.13.0.orig/drivers/net/ethernet/lantiq_etop.c +++ linux-3.13.0/drivers/net/ethernet/lantiq_etop.c @@ -620,7 +620,7 @@ static u16 ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { /* we are currently only using the first queue */ return 0; --- linux-3.13.0.orig/drivers/net/ethernet/marvell/mvneta.c +++ linux-3.13.0/drivers/net/ethernet/marvell/mvneta.c @@ -101,16 +101,56 @@ #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) + +/* Exception Interrupt Port/Queue Cause register */ + #define MVNETA_INTR_NEW_CAUSE 0x25a0 -#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) #define MVNETA_INTR_NEW_MASK 0x25a4 + +/* bits 0..7 = TXQ SENT, one bit per queue. + * bits 8..15 = RXQ OCCUP, one bit per queue. + * bits 16..23 = RXQ FREE, one bit per queue. + * bit 29 = OLD_REG_SUM, see old reg ? + * bit 30 = TX_ERR_SUM, one bit for 4 ports + * bit 31 = MISC_SUM, one bit for 4 ports + */ +#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) +#define MVNETA_TX_INTR_MASK_ALL (0xff << 0) +#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) +#define MVNETA_RX_INTR_MASK_ALL (0xff << 8) + #define MVNETA_INTR_OLD_CAUSE 0x25a8 #define MVNETA_INTR_OLD_MASK 0x25ac + +/* Data Path Port/Queue Cause Register */ #define MVNETA_INTR_MISC_CAUSE 0x25b0 #define MVNETA_INTR_MISC_MASK 0x25b4 + +#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) +#define MVNETA_CAUSE_LINK_CHANGE BIT(1) +#define MVNETA_CAUSE_PTP BIT(4) + +#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) +#define MVNETA_CAUSE_RX_OVERRUN BIT(8) +#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) +#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) +#define MVNETA_CAUSE_TX_UNDERUN BIT(11) +#define MVNETA_CAUSE_PRBS_ERR BIT(12) +#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) +#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) + +#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 +#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) +#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) + +#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 +#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) +#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) + #define MVNETA_INTR_ENABLE 0x25b8 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 -#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 +#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF + #define MVNETA_RXQ_CMD 0x2680 #define MVNETA_RXQ_DISABLE_SHIFT 8 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff @@ -121,7 +161,7 @@ #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc #define MVNETA_GMAC0_PORT_ENABLE BIT(0) #define MVNETA_GMAC_CTRL_2 0x2c08 -#define MVNETA_GMAC2_PSC_ENABLE BIT(3) +#define MVNETA_GMAC2_PCS_ENABLE BIT(3) #define MVNETA_GMAC2_PORT_RGMII BIT(4) #define MVNETA_GMAC2_PORT_RESET BIT(6) #define MVNETA_GMAC_STATUS 0x2c10 @@ -172,13 +212,10 @@ /* Various constants */ /* Coalescing */ -#define MVNETA_TXDONE_COAL_PKTS 16 +#define MVNETA_TXDONE_COAL_PKTS 1 #define MVNETA_RX_COAL_PKTS 32 #define MVNETA_RX_COAL_USEC 100 -/* Timer */ -#define MVNETA_TX_DONE_TIMER_PERIOD 10 - /* Napi polling weight */ #define MVNETA_RX_POLL_WEIGHT 64 @@ -221,10 +258,12 @@ #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) -struct mvneta_stats { +struct mvneta_pcpu_stats { struct u64_stats_sync syncp; - u64 packets; - u64 bytes; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; }; struct mvneta_port { @@ -232,16 +271,11 @@ void __iomem *base; struct mvneta_rx_queue *rxqs; struct mvneta_tx_queue *txqs; - struct timer_list tx_done_timer; struct net_device *dev; u32 cause_rx_tx; struct napi_struct napi; - /* Flags */ - unsigned long flags; -#define MVNETA_F_TX_DONE_TIMER_BIT 0 - /* Napi weight */ int weight; @@ -250,8 +284,7 @@ u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; - struct mvneta_stats tx_stats; - struct mvneta_stats rx_stats; + struct mvneta_pcpu_stats *stats; struct mii_bus *mii_bus; struct phy_device *phy_dev; @@ -461,21 +494,29 @@ { struct mvneta_port *pp = netdev_priv(dev); unsigned int start; + int cpu; - memset(stats, 0, sizeof(struct rtnl_link_stats64)); - - do { - start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp); - stats->rx_packets = pp->rx_stats.packets; - stats->rx_bytes = pp->rx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start)); - - - do { - start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp); - stats->tx_packets = pp->tx_stats.packets; - stats->tx_bytes = pp->tx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start)); + for_each_possible_cpu(cpu) { + struct mvneta_pcpu_stats *cpu_stats; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + + cpu_stats = per_cpu_ptr(pp->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->rx_packets; + rx_bytes = cpu_stats->rx_bytes; + tx_packets = cpu_stats->tx_packets; + tx_bytes = cpu_stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } stats->rx_errors = dev->stats.rx_errors; stats->rx_dropped = dev->stats.rx_dropped; @@ -688,7 +729,7 @@ u32 val; val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - val |= MVNETA_GMAC2_PSC_ENABLE; + val |= MVNETA_GMAC2_PCS_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); @@ -1100,17 +1141,6 @@ txq->done_pkts_coal = value; } -/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */ -static void mvneta_add_tx_done_timer(struct mvneta_port *pp) -{ - if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) { - pp->tx_done_timer.expires = jiffies + - msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD); - add_timer(&pp->tx_done_timer); - } -} - - /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, u32 phys_addr, u32 cookie) @@ -1182,7 +1212,7 @@ command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; - if (l3_proto == swab16(ETH_P_IP)) + if (l3_proto == htons(ETH_P_IP)) command |= MVNETA_TXD_IP_CSUM; else command |= MVNETA_TX_L3_IP6; @@ -1391,6 +1421,8 @@ { struct net_device *dev = pp->dev; int rx_done, rx_filled; + u32 rcvd_pkts = 0; + u32 rcvd_bytes = 0; /* Get number of received packets */ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); @@ -1428,10 +1460,8 @@ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); - u64_stats_update_begin(&pp->rx_stats.syncp); - pp->rx_stats.packets++; - pp->rx_stats.bytes += rx_bytes; - u64_stats_update_end(&pp->rx_stats.syncp); + rcvd_pkts++; + rcvd_bytes += rx_bytes; /* Linux processing */ skb_reserve(skb, MVNETA_MH_SIZE); @@ -1452,6 +1482,15 @@ } } + if (rcvd_pkts) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += rcvd_pkts; + stats->rx_bytes += rcvd_bytes; + u64_stats_update_end(&stats->syncp); + } + /* Update rxq management counters */ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); @@ -1523,6 +1562,7 @@ u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; struct mvneta_tx_desc *tx_desc; + int len = skb->len; struct netdev_queue *nq; int frags = 0; u32 tx_cmd; @@ -1582,25 +1622,17 @@ out: if (frags > 0) { - u64_stats_update_begin(&pp->tx_stats.syncp); - pp->tx_stats.packets++; - pp->tx_stats.bytes += skb->len; - u64_stats_update_end(&pp->tx_stats.syncp); + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += len; + u64_stats_update_end(&stats->syncp); } else { dev->stats.tx_dropped++; dev_kfree_skb_any(skb); } - if (txq->count >= MVNETA_TXDONE_COAL_PKTS) - mvneta_txq_done(pp, txq); - - /* If after calling mvneta_txq_done, count equals - * frags, we need to set the timer - */ - if (txq->count == frags && frags > 0) - mvneta_add_tx_done_timer(pp); - return NETDEV_TX_OK; } @@ -1876,14 +1908,22 @@ /* Read cause register */ cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & - MVNETA_RX_INTR_MASK(rxq_number); + (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); + + /* Release Tx descriptors */ + if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { + int tx_todo = 0; + + mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL), &tx_todo); + cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; + } /* For the case where the last mvneta_poll did not process all * RX packets */ cause_rx_tx |= pp->cause_rx_tx; if (rxq_number > 1) { - while ((cause_rx_tx != 0) && (budget > 0)) { + while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) { int count; struct mvneta_rx_queue *rxq; /* get rx queue number from cause_rx_tx */ @@ -1915,7 +1955,7 @@ napi_complete(napi); local_irq_save(flags); mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number)); + MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); local_irq_restore(flags); } @@ -1923,26 +1963,6 @@ return rx_done; } -/* tx done timer callback */ -static void mvneta_tx_done_timer_callback(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct mvneta_port *pp = netdev_priv(dev); - int tx_done = 0, tx_todo = 0; - - if (!netif_running(dev)) - return ; - - clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); - - tx_done = mvneta_tx_done_gbe(pp, - (((1 << txq_number) - 1) & - MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK), - &tx_todo); - if (tx_todo > 0) - mvneta_add_tx_done_timer(pp); -} - /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int num) @@ -2192,7 +2212,7 @@ /* Unmask interrupts */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number)); + MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); phy_start(pp->phy_dev); netif_tx_start_all_queues(pp->dev); @@ -2225,16 +2245,6 @@ mvneta_rx_reset(pp); } -/* tx timeout callback - display a message and stop/start the network device */ -static void mvneta_tx_timeout(struct net_device *dev) -{ - struct mvneta_port *pp = netdev_priv(dev); - - netdev_info(dev, "tx timeout\n"); - mvneta_stop_dev(pp); - mvneta_start_dev(pp); -} - /* Return positive if MTU is valid */ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu) { @@ -2358,7 +2368,7 @@ if (phydev->speed == SPEED_1000) val |= MVNETA_GMAC_CONFIG_GMII_SPEED; - else + else if (phydev->speed == SPEED_100) val |= MVNETA_GMAC_CONFIG_MII_SPEED; mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); @@ -2478,8 +2488,6 @@ free_irq(dev->irq, pp); mvneta_cleanup_rxqs(pp); mvneta_cleanup_txqs(pp); - del_timer(&pp->tx_done_timer); - clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); return 0; } @@ -2615,7 +2623,6 @@ .ndo_set_rx_mode = mvneta_set_rx_mode, .ndo_set_mac_address = mvneta_set_mac_addr, .ndo_change_mtu = mvneta_change_mtu, - .ndo_tx_timeout = mvneta_tx_timeout, .ndo_get_stats64 = mvneta_get_stats64, .ndo_do_ioctl = mvneta_ioctl, }; @@ -2751,6 +2758,7 @@ const char *mac_from; int phy_mode; int err; + int cpu; /* Our multiqueue support is not complete, so for now, only * allow the usage of the first RX queue @@ -2792,9 +2800,6 @@ pp = netdev_priv(dev); - u64_stats_init(&pp->tx_stats.syncp); - u64_stats_init(&pp->rx_stats.syncp); - pp->weight = MVNETA_RX_POLL_WEIGHT; pp->phy_node = phy_node; pp->phy_interface = phy_mode; @@ -2813,6 +2818,19 @@ goto err_clk; } + /* Alloc per-cpu stats */ + pp->stats = alloc_percpu(struct mvneta_pcpu_stats); + if (!pp->stats) { + err = -ENOMEM; + goto err_unmap; + } + + for_each_possible_cpu(cpu) { + struct mvneta_pcpu_stats *stats; + stats = per_cpu_ptr(pp->stats, cpu); + u64_stats_init(&stats->syncp); + } + dt_mac_addr = of_get_mac_address(dn); if (dt_mac_addr) { mac_from = "device tree"; @@ -2828,11 +2846,6 @@ } } - pp->tx_done_timer.data = (unsigned long)dev; - pp->tx_done_timer.function = mvneta_tx_done_timer_callback; - init_timer(&pp->tx_done_timer); - clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags); - pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; @@ -2842,7 +2855,7 @@ err = mvneta_init(pp, phy_addr); if (err < 0) { dev_err(&pdev->dev, "can't init eth hal\n"); - goto err_unmap; + goto err_free_stats; } mvneta_port_power_up(pp, phy_mode); @@ -2872,6 +2885,8 @@ err_deinit: mvneta_deinit(pp); +err_free_stats: + free_percpu(pp->stats); err_unmap: iounmap(pp->base); err_clk: @@ -2892,6 +2907,7 @@ unregister_netdev(dev); mvneta_deinit(pp); clk_disable_unprepare(pp->clk); + free_percpu(pp->stats); iounmap(pp->base); irq_dispose_mapping(dev->irq); free_netdev(dev); --- linux-3.13.0.orig/drivers/net/ethernet/marvell/sky2.c +++ linux-3.13.0/drivers/net/ethernet/marvell/sky2.c @@ -3906,19 +3906,19 @@ u64 _bytes, _packets; do { - start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp); + start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp); _bytes = sky2->rx_stats.bytes; _packets = sky2->rx_stats.packets; - } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start)); stats->rx_packets = _packets; stats->rx_bytes = _bytes; do { - start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp); + start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp); _bytes = sky2->tx_stats.bytes; _packets = sky2->tx_stats.packets; - } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start)); stats->tx_packets = _packets; stats->tx_bytes = _bytes; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -3,9 +3,10 @@ # config MLX4_EN - tristate "Mellanox Technologies 10Gbit Ethernet support" + tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" depends on PCI select MLX4_CORE + select PTP_1588_CLOCK ---help--- This driver supports Mellanox Technologies ConnectX Ethernet devices. @@ -22,6 +23,13 @@ If unsure, set to Y +config MLX4_EN_VXLAN + bool "VXLAN offloads Support" + default y + depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m) + ---help--- + Say Y here if you want to use VXLAN offloads in the driver. + config MLX4_CORE tristate depends on PCI --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -71,9 +71,9 @@ return obj; } -void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) +void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr) { - mlx4_bitmap_free_range(bitmap, obj, 1); + mlx4_bitmap_free_range(bitmap, obj, 1, use_rr); } u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) @@ -118,11 +118,17 @@ return bitmap->avail; } -void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) +void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, + int use_rr) { obj &= bitmap->max + bitmap->reserved_top - 1; spin_lock(&bitmap->lock); + if (!use_rr) { + bitmap->last = min(bitmap->last, obj); + bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) + & bitmap->mask; + } bitmap_clear(bitmap->table, obj, cnt); bitmap->avail += cnt; spin_unlock(&bitmap->lock); --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -800,16 +800,7 @@ vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } -static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, - struct mlx4_vhcr *vhcr, - struct mlx4_cmd_mailbox *inbox, - struct mlx4_cmd_mailbox *outbox, - struct mlx4_cmd_info *cmd) -{ - return -EPERM; -} - -static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave, +static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, @@ -964,6 +955,15 @@ .wrapper = NULL }, { + .opcode = MLX4_CMD_CONFIG_DEV, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, + { .opcode = MLX4_CMD_ALLOC_RES, .has_inbox = false, .has_outbox = false, @@ -1253,12 +1253,12 @@ }, { .opcode = MLX4_CMD_UPDATE_QP, - .has_inbox = false, + .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = MLX4_CMD_UPDATE_QP_wrapper + .wrapper = mlx4_UPDATE_QP_wrapper }, { .opcode = MLX4_CMD_GET_OP_REQ, @@ -1267,7 +1267,7 @@ .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = MLX4_CMD_GET_OP_REQ_wrapper, + .wrapper = mlx4_CMD_EPERM_wrapper, }, { .opcode = MLX4_CMD_CONF_SPECIAL_QP, @@ -1296,6 +1296,15 @@ .verify = NULL, .wrapper = mlx4_QUERY_IF_STAT_wrapper }, + { + .opcode = MLX4_CMD_ACCESS_REG, + .has_inbox = true, + .has_outbox = true, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_ACCESS_REG_wrapper, + }, /* Native multicast commands are not available for guests */ { .opcode = MLX4_CMD_QP_ATTACH, @@ -1371,6 +1380,15 @@ .verify = NULL, .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper }, + { + .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, }; static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, @@ -1634,8 +1652,16 @@ int port, err; struct mlx4_vport_state *vp_admin; struct mlx4_vport_oper_state *vp_oper; - - for (port = 1; port <= MLX4_MAX_PORTS; port++) { + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); + + for (port = min_port; port <= max_port; port++) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; vp_oper->state = *vp_admin; @@ -1676,8 +1702,17 @@ { int port; struct mlx4_vport_oper_state *vp_oper; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports( + &priv->dev, slave); + int min_port = find_first_bit(actv_ports.ports, + priv->dev.caps.num_ports) + 1; + int max_port = min_port - 1 + + bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports); + - for (port = 1; port <= MLX4_MAX_PORTS; port++) { + for (port = min_port; port <= max_port; port++) { + if (!test_bit(port - 1, actv_ports.ports)) + continue; vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (NO_INDX != vp_oper->vlan_idx) { __mlx4_unregister_vlan(&priv->dev, @@ -2225,6 +2260,112 @@ return vf+1; } +int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) +{ + if (slave < 1 || slave > dev->num_vfs) { + mlx4_err(dev, + "Bad slave number:%d (number of activated slaves: %lu)\n", + slave, dev->num_slaves); + return -EINVAL; + } + return slave - 1; +} + +struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + int vf; + + bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS); + + if (slave == 0) { + bitmap_fill(actv_ports.ports, dev->caps.num_ports); + return actv_ports; + } + + vf = mlx4_get_vf_indx(dev, slave); + if (vf < 0) + return actv_ports; + + bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1, + min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports, + dev->caps.num_ports)); + + return actv_ports; +} +EXPORT_SYMBOL_GPL(mlx4_get_active_ports); + +int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port) +{ + unsigned n; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port <= 0 || port > m) + return -EINVAL; + + n = find_first_bit(actv_ports.ports, dev->caps.num_ports); + if (port <= n) + port = n + 1; + + return port; +} +EXPORT_SYMBOL_GPL(mlx4_slave_convert_port); + +int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + if (test_bit(port - 1, actv_ports.ports)) + return port - + find_first_bit(actv_ports.ports, dev->caps.num_ports); + + return -1; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port); + +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, + int port) +{ + unsigned i; + struct mlx4_slaves_pport slaves_pport; + + bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); + + if (port <= 0 || port > dev->caps.num_ports) + return slaves_pport; + + for (i = 0; i < dev->num_vfs + 1; i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, i); + if (test_bit(port - 1, actv_ports.ports)) + set_bit(i, slaves_pport.slaves); + } + + return slaves_pport; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport); + +struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( + struct mlx4_dev *dev, + const struct mlx4_active_ports *crit_ports) +{ + unsigned i; + struct mlx4_slaves_pport slaves_pport; + + bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); + + for (i = 0; i < dev->num_vfs + 1; i++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, i); + if (bitmap_equal(crit_ports->ports, actv_ports.ports, + dev->caps.num_ports)) + set_bit(i, slaves_pport.slaves); + } + + return slaves_pport; +} +EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); + int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2280,6 +2421,30 @@ } EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); + /* mlx4_get_slave_default_vlan - + * return true if VST ( default vlan) + * if VST, will return vlan & qos (if not NULL) + */ +bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, + u16 *vlan, u8 *qos) +{ + struct mlx4_vport_oper_state *vp_oper; + struct mlx4_priv *priv; + + priv = mlx4_priv(dev); + vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + + if (MLX4_VGT != vp_oper->state.default_vlan) { + if (vlan) + *vlan = vp_oper->state.default_vlan; + if (qos) + *qos = vp_oper->state.default_qos; + return true; + } + return false; +} +EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan); + int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) { struct mlx4_priv *priv = mlx4_priv(dev); --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/cq.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -187,7 +187,7 @@ mlx4_table_put(dev, &cq_table->table, *cqn); err_out: - mlx4_bitmap_free(&cq_table->bitmap, *cqn); + mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR); return err; } @@ -217,7 +217,7 @@ mlx4_table_put(dev, &cq_table->cmpt_table, cqn); mlx4_table_put(dev, &cq_table->table, cqn); - mlx4_bitmap_free(&cq_table->bitmap, cqn); + mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR); } static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) @@ -294,6 +294,7 @@ atomic_set(&cq->refcount, 1); init_completion(&cq->free); + cq->irq = priv->eq_table.eq[cq->vector].irq; return 0; err_radix: --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -35,48 +35,6 @@ #include "mlx4_en.h" -int mlx4_en_timestamp_config(struct net_device *dev, int tx_type, int rx_filter) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; - int port_up = 0; - int err = 0; - - mutex_lock(&mdev->state_lock); - if (priv->port_up) { - port_up = 1; - mlx4_en_stop_port(dev, 1); - } - - mlx4_en_free_resources(priv); - - en_warn(priv, "Changing Time Stamp configuration\n"); - - priv->hwtstamp_config.tx_type = tx_type; - priv->hwtstamp_config.rx_filter = rx_filter; - - if (rx_filter != HWTSTAMP_FILTER_NONE) - dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; - else - dev->features |= NETIF_F_HW_VLAN_CTAG_RX; - - err = mlx4_en_alloc_resources(priv); - if (err) { - en_err(priv, "Failed reallocating port resources\n"); - goto out; - } - if (port_up) { - err = mlx4_en_start_port(dev); - if (err) - en_err(priv, "Failed starting port\n"); - } - -out: - mutex_unlock(&mdev->state_lock); - netdev_features_change(dev); - return err; -} - /* mlx4_en_read_clock - read raw cycle counter (to be used by time counter) */ static cycle_t mlx4_en_read_clock(const struct cyclecounter *tc) @@ -103,19 +61,191 @@ struct skb_shared_hwtstamps *hwts, u64 timestamp) { + unsigned long flags; u64 nsec; + read_lock_irqsave(&mdev->clock_lock, flags); nsec = timecounter_cyc2time(&mdev->clock, timestamp); + read_unlock_irqrestore(&mdev->clock_lock, flags); memset(hwts, 0, sizeof(struct skb_shared_hwtstamps)); hwts->hwtstamp = ns_to_ktime(nsec); } +/** + * mlx4_en_remove_timestamp - disable PTP device + * @mdev: board private structure + * + * Stop the PTP support. + **/ +void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) +{ + if (mdev->ptp_clock) { + ptp_clock_unregister(mdev->ptp_clock); + mdev->ptp_clock = NULL; + mlx4_info(mdev, "removed PHC\n"); + } +} + +void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) +{ + bool timeout = time_is_before_jiffies(mdev->last_overflow_check + + mdev->overflow_period); + unsigned long flags; + + if (timeout) { + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_read(&mdev->clock); + write_unlock_irqrestore(&mdev->clock_lock, flags); + mdev->last_overflow_check = jiffies; + } +} + +/** + * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired frequency change in parts per billion + * + * Adjust the frequency of the PHC cycle counter by the indicated delta from + * the base frequency. + **/ +static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + u64 adj; + u32 diff, mult; + int neg_adj = 0; + unsigned long flags; + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + mult = mdev->nominal_c_mult; + adj = mult; + adj *= delta; + diff = div_u64(adj, 1000000000ULL); + + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_read(&mdev->clock); + mdev->cycles.mult = neg_adj ? mult - diff : mult + diff; + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_adjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + **/ +static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + unsigned long flags; + s64 now; + + write_lock_irqsave(&mdev->clock_lock, flags); + now = timecounter_read(&mdev->clock); + now += delta; + timecounter_init(&mdev->clock, &mdev->cycles, now); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_gettime - Reads the current time from the hardware clock + * @ptp: ptp clock structure + * @ts: timespec structure to hold the current time value + * + * Read the timecounter and return the correct value in ns after converting + * it into a struct timespec. + **/ +static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + unsigned long flags; + u32 remainder; + u64 ns; + + write_lock_irqsave(&mdev->clock_lock, flags); + ns = timecounter_read(&mdev->clock); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +/** + * mlx4_en_phc_settime - Set the current time on the hardware clock + * @ptp: ptp clock structure + * @ts: timespec containing the new time for the cycle counter + * + * Reset the timecounter to use a new base value instead of the kernel + * wall timer value. + **/ +static int mlx4_en_phc_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev, + ptp_clock_info); + u64 ns = timespec_to_ns(ts); + unsigned long flags; + + /* reset the timecounter */ + write_lock_irqsave(&mdev->clock_lock, flags); + timecounter_init(&mdev->clock, &mdev->cycles, ns); + write_unlock_irqrestore(&mdev->clock_lock, flags); + + return 0; +} + +/** + * mlx4_en_phc_enable - enable or disable an ancillary feature + * @ptp: ptp clock structure + * @request: Desired resource to enable or disable + * @on: Caller passes one to enable or zero to disable + * + * Enable (or disable) ancillary features of the PHC subsystem. + * Currently, no ancillary features are supported. + **/ +static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp, + struct ptp_clock_request __always_unused *request, + int __always_unused on) +{ + return -EOPNOTSUPP; +} + +static const struct ptp_clock_info mlx4_en_ptp_clock_info = { + .owner = THIS_MODULE, + .max_adj = 100000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 0, + .adjfreq = mlx4_en_phc_adjfreq, + .adjtime = mlx4_en_phc_adjtime, + .gettime = mlx4_en_phc_gettime, + .settime = mlx4_en_phc_settime, + .enable = mlx4_en_phc_enable, +}; + void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; + unsigned long flags; u64 ns; + rwlock_init(&mdev->clock_lock); + memset(&mdev->cycles, 0, sizeof(mdev->cycles)); mdev->cycles.read = mlx4_en_read_clock; mdev->cycles.mask = CLOCKSOURCE_MASK(48); @@ -127,9 +257,12 @@ mdev->cycles.shift = 14; mdev->cycles.mult = clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); + mdev->nominal_c_mult = mdev->cycles.mult; + write_lock_irqsave(&mdev->clock_lock, flags); timecounter_init(&mdev->clock, &mdev->cycles, ktime_to_ns(ktime_get_real())); + write_unlock_irqrestore(&mdev->clock_lock, flags); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. @@ -137,15 +270,18 @@ ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask); do_div(ns, NSEC_PER_SEC / 2 / HZ); mdev->overflow_period = ns; -} - -void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev) -{ - bool timeout = time_is_before_jiffies(mdev->last_overflow_check + - mdev->overflow_period); - if (timeout) { - timecounter_read(&mdev->clock); - mdev->last_overflow_check = jiffies; + /* Configure the PHC */ + mdev->ptp_clock_info = mlx4_en_ptp_clock_info; + snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp"); + + mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info, + &mdev->pdev->dev); + if (IS_ERR(mdev->ptp_clock)) { + mdev->ptp_clock = NULL; + mlx4_err(mdev, "ptp_clock_register failed\n"); + } else { + mlx4_info(mdev, "registered PHC clock\n"); } + } --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -31,6 +31,7 @@ * */ +#include #include #include #include @@ -66,7 +67,6 @@ cq->ring = ring; cq->is_tx = mode; - spin_lock_init(&cq->lock); /* Allocate HW buffers on provided NUMA node. * dev->numa_node is used in mtt range allocation flow. @@ -130,11 +130,16 @@ "%s ,Falling back to legacy EQ's\n", name); } + } } else { cq->vector = (cq->ring + 1 + priv->port) % mdev->dev->caps.num_comp_vectors; } + + cq->irq_desc = + irq_to_desc(mlx4_eq_get_irq(mdev->dev, + cq->vector)); } else { /* For TX we use the same irq per ring we assigned for the RX */ @@ -161,12 +166,23 @@ cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; - if (!cq->is_tx) { + if (cq->is_tx) { + netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, + NAPI_POLL_WEIGHT); + } else { + struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; + + err = irq_set_affinity_hint(cq->mcq.irq, + ring->affinity_mask); + if (err) + mlx4_warn(mdev, "Failed setting affinity hint\n"); + netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); napi_hash_add(&cq->napi); - napi_enable(&cq->napi); } + napi_enable(&cq->napi); + return 0; } @@ -177,8 +193,9 @@ mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - if (priv->mdev->dev->caps.comp_pool && cq->vector) + if (priv->mdev->dev->caps.comp_pool && cq->vector) { mlx4_release_eq(priv->mdev->dev, cq->vector); + } cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; @@ -188,12 +205,13 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { + napi_disable(&cq->napi); if (!cq->is_tx) { - napi_disable(&cq->napi); napi_hash_del(&cq->napi); synchronize_rcu(); - netif_napi_del(&cq->napi); + irq_set_affinity_hint(cq->mcq.irq, NULL); } + netif_napi_del(&cq->napi); mlx4_cq_free(priv->mdev->dev, &cq->mcq); } --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -62,7 +62,7 @@ int has_ets_tc = 0; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - if (ets->prio_tc[i] > MLX4_EN_NUM_UP) { + if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) { en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", i, ets->prio_tc[i]); return -EINVAL; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -98,6 +99,10 @@ drvinfo->eedump_len = 0; } +static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = { + "blueflame", +}; + static const char main_strings[][ETH_GSTRING_LEN] = { "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", @@ -235,6 +240,8 @@ case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(mlx4_en_priv_flags); default: return -EOPNOTSUPP; } @@ -358,10 +365,311 @@ #endif } break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, + mlx4_en_priv_flags[i]); + break; + } } -static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static u32 mlx4_en_autoneg_get(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + u32 autoneg = AUTONEG_DISABLE; + + if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) && + (priv->port_state.flags & MLX4_EN_PORT_ANE)) + autoneg = AUTONEG_ENABLE; + + return autoneg; +} + +static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return SUPPORTED_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return SUPPORTED_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return SUPPORTED_Backplane; + } + return 0; +} + +static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) +{ + u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper); + + if (!eth_proto) /* link down */ + eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) + | MLX4_PROT_MASK(MLX4_1000BASE_T) + | MLX4_PROT_MASK(MLX4_100BASE_TX))) { + return PORT_TP; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR) + | MLX4_PROT_MASK(MLX4_56GBASE_SR4) + | MLX4_PROT_MASK(MLX4_40GBASE_SR4) + | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { + return PORT_FIBRE; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + | MLX4_PROT_MASK(MLX4_56GBASE_CR4) + | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) { + return PORT_DA; + } + + if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + | MLX4_PROT_MASK(MLX4_40GBASE_KR4) + | MLX4_PROT_MASK(MLX4_20GBASE_KR2) + | MLX4_PROT_MASK(MLX4_10GBASE_KR) + | MLX4_PROT_MASK(MLX4_10GBASE_KX4) + | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { + return PORT_NONE; + } + return PORT_OTHER; +} + +#define MLX4_LINK_MODES_SZ \ + (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) + +enum ethtool_report { + SUPPORTED = 0, + ADVERTISED = 1, + SPEED = 2 +}; + +/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ +static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { + [MLX4_100BASE_TX] = { + SUPPORTED_100baseT_Full, + ADVERTISED_100baseT_Full, + SPEED_100 + }, + + [MLX4_1000BASE_T] = { + SUPPORTED_1000baseT_Full, + ADVERTISED_1000baseT_Full, + SPEED_1000 + }, + [MLX4_1000BASE_CX_SGMII] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + [MLX4_1000BASE_KX] = { + SUPPORTED_1000baseKX_Full, + ADVERTISED_1000baseKX_Full, + SPEED_1000 + }, + + [MLX4_10GBASE_T] = { + SUPPORTED_10000baseT_Full, + ADVERTISED_10000baseT_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KX4] = { + SUPPORTED_10000baseKX4_Full, + ADVERTISED_10000baseKX4_Full, + SPEED_10000 + }, + [MLX4_10GBASE_KR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_CR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + [MLX4_10GBASE_SR] = { + SUPPORTED_10000baseKR_Full, + ADVERTISED_10000baseKR_Full, + SPEED_10000 + }, + + [MLX4_20GBASE_KR2] = { + SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, + ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, + SPEED_20000 + }, + + [MLX4_40GBASE_CR4] = { + SUPPORTED_40000baseCR4_Full, + ADVERTISED_40000baseCR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_KR4] = { + SUPPORTED_40000baseKR4_Full, + ADVERTISED_40000baseKR4_Full, + SPEED_40000 + }, + [MLX4_40GBASE_SR4] = { + SUPPORTED_40000baseSR4_Full, + ADVERTISED_40000baseSR4_Full, + SPEED_40000 + }, + + [MLX4_56GBASE_KR4] = { + SUPPORTED_56000baseKR4_Full, + ADVERTISED_56000baseKR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_CR4] = { + SUPPORTED_56000baseCR4_Full, + ADVERTISED_56000baseCR4_Full, + SPEED_56000 + }, + [MLX4_56GBASE_SR4] = { + SUPPORTED_56000baseSR4_Full, + ADVERTISED_56000baseSR4_Full, + SPEED_56000 + }, +}; + +static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) +{ + int i; + u32 link_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (eth_proto & MLX4_PROT_MASK(i)) + link_modes |= ptys2ethtool_map[i][report]; + } + return link_modes; +} + +static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][report] & link_modes) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +/* Convert actual speed (SPEED_XXX) to ptys link modes */ +static u32 speed2ptys_link_modes(u32 speed) +{ + int i; + u32 ptys_modes = 0; + + for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { + if (ptys2ethtool_map[i][SPEED] == speed) + ptys_modes |= 1 << i; + } + return ptys_modes; +} + +static int ethtool_get_ptys_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + u32 eth_proto; + int ret; + + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)", + ret); + return ret; + } + en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n", + ptys_reg.proto_mask); + en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n", + be32_to_cpu(ptys_reg.eth_proto_cap)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n", + be32_to_cpu(ptys_reg.eth_proto_admin)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n", + be32_to_cpu(ptys_reg.eth_proto_oper)); + en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", + be32_to_cpu(ptys_reg.eth_proto_lp_adv)); + + cmd->supported = 0; + cmd->advertising = 0; + + cmd->supported |= ptys_get_supported_port(&ptys_reg); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); + cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); + cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; + + cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? + ADVERTISED_Asym_Pause : 0; + + cmd->port = ptys_get_active_port(&ptys_reg); + cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? + XCVR_EXTERNAL : XCVR_INTERNAL; + + if (mlx4_en_autoneg_get(dev)) { + cmd->supported |= SUPPORTED_Autoneg; + cmd->advertising |= ADVERTISED_Autoneg; + } + + cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + + eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); + cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); + + cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + ADVERTISED_Autoneg : 0; + + cmd->phy_address = 0; + cmd->mdio_support = 0; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + + return ret; +} + +static void ethtool_get_default_settings(struct net_device *dev, + struct ethtool_cmd *cmd) { struct mlx4_en_priv *priv = netdev_priv(dev); int trans_type; @@ -369,18 +677,7 @@ cmd->autoneg = AUTONEG_DISABLE; cmd->supported = SUPPORTED_10000baseT_Full; cmd->advertising = ADVERTISED_10000baseT_Full; - - if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) - return -ENOMEM; - - trans_type = priv->port_state.transciver; - if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); - cmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(cmd, -1); - cmd->duplex = -1; - } + trans_type = priv->port_state.transceiver; if (trans_type > 0 && trans_type <= 0xC) { cmd->port = PORT_FIBRE; @@ -396,17 +693,118 @@ cmd->port = -1; cmd->transceiver = -1; } +} + +static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int ret = -EINVAL; + + if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) + return -ENOMEM; + + en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n", + priv->port_state.flags & MLX4_EN_PORT_ANC, + priv->port_state.flags & MLX4_EN_PORT_ANE); + + if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) + ret = ethtool_get_ptys_settings(dev, cmd); + if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ + ethtool_get_default_settings(dev, cmd); + + if (netif_carrier_ok(dev)) { + ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); + cmd->duplex = DUPLEX_FULL; + } else { + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; + } return 0; } +/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */ +static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, + __be32 proto_cap) +{ + __be32 proto_admin = 0; + + if (!speed) { /* Speed = 0 ==> Reset Link modes */ + proto_admin = proto_cap; + en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n", + be32_to_cpu(proto_cap)); + } else { + u32 ptys_link_modes = speed2ptys_link_modes(speed); + + proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap; + en_info(priv, "Setting Speed to %d\n", speed); + } + return proto_admin; +} + static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { - if ((cmd->autoneg == AUTONEG_ENABLE) || - (ethtool_cmd_speed(cmd) != SPEED_10000) || - (cmd->duplex != DUPLEX_FULL)) + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_ptys_reg ptys_reg; + __be32 proto_admin; + int ret; + + u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); + int speed = ethtool_cmd_speed(cmd); + + en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", + speed, cmd->advertising, cmd->autoneg, cmd->duplex); + + if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || + (cmd->duplex == DUPLEX_HALF)) return -EINVAL; - /* Nothing to change */ + memset(&ptys_reg, 0, sizeof(ptys_reg)); + ptys_reg.local_port = priv->port; + ptys_reg.proto_mask = MLX4_PTYS_EN; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, + MLX4_ACCESS_REG_QUERY, &ptys_reg); + if (ret) { + en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n", + ret); + return 0; + } + + proto_admin = cpu_to_be32(ptys_adv); + if (speed >= 0 && speed != priv->port_state.link_speed) + /* If speed was set then speed decides :-) */ + proto_admin = speed_set_ptys_admin(priv, speed, + ptys_reg.eth_proto_cap); + + proto_admin &= ptys_reg.eth_proto_cap; + + if (proto_admin == ptys_reg.eth_proto_admin) + return 0; /* Nothing to change */ + + if (!proto_admin) { + en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); + return -EINVAL; /* nothing to change due to bad input */ + } + + en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", + be32_to_cpu(proto_admin)); + + ptys_reg.eth_proto_admin = proto_admin; + ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE, + &ptys_reg); + if (ret) { + en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)", + be32_to_cpu(ptys_reg.eth_proto_admin), ret); + return ret; + } + + en_warn(priv, "Port link mode changed, restarting port...\n"); + mutex_lock(&priv->mdev->state_lock); + if (priv->port_up) { + mlx4_en_stop_port(dev, 1); + if (mlx4_en_start_port(dev)) + en_err(priv, "Failed restarting port %d\n", priv->port); + } + mutex_unlock(&priv->mdev->state_lock); return 0; } @@ -1193,11 +1591,137 @@ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL); + + if (mdev->ptp_clock) + info->phc_index = ptp_clock_index(mdev->ptp_clock); } return ret; } +int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); + bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME); + int i; + + if (bf_enabled_new == bf_enabled_old) + return 0; /* Nothing to do */ + + if (bf_enabled_new) { + bool bf_supported = true; + + for (i = 0; i < priv->tx_ring_num; i++) + bf_supported &= priv->tx_ring[i]->bf_alloced; + + if (!bf_supported) { + en_err(priv, "BlueFlame is not supported\n"); + return -EINVAL; + } + + priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } else { + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } + + for (i = 0; i < priv->tx_ring_num; i++) + priv->tx_ring[i]->bf_enabled = bf_enabled_new; + + en_info(priv, "BlueFlame %s\n", + bf_enabled_new ? "Enabled" : "Disabled"); + + return 0; +} + +u32 mlx4_en_get_priv_flags(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->pflags; +} + +static int mlx4_en_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int ret; + u8 data[4]; + + /* Read first 2 bytes to get Module & REV ID */ + ret = mlx4_get_module_info(mdev->dev, priv->port, + 0/*offset*/, 2/*size*/, data); + if (ret < 2) + return -EIO; + + switch (data[0] /* identifier */) { + case MLX4_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case MLX4_MODULE_ID_QSFP_PLUS: + if (data[1] >= 0x3) { /* revision id */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + case MLX4_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + case MLX4_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + return -ENOSYS; + } + + return 0; +} + +static int mlx4_en_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int offset = ee->offset; + int i = 0, ret; + + if (ee->len == 0) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + en_dbg(DRV, priv, + "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", + i, offset, ee->len - i); + + ret = mlx4_get_module_info(mdev->dev, priv->port, + offset, ee->len - i, data + i); + + if (!ret) /* Done reading */ + return 0; + + if (ret < 0) { + en_err(priv, + "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", + i, offset, ee->len - i, ret); + return 0; + } + + i += ret; + offset += ret; + } + return 0; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, .get_settings = mlx4_en_get_settings, @@ -1225,6 +1749,10 @@ .get_channels = mlx4_en_get_channels, .set_channels = mlx4_en_set_channels, .get_ts_info = mlx4_en_get_ts_info, + .set_priv_flags = mlx4_en_set_priv_flags, + .get_priv_flags = mlx4_en_get_priv_flags, + .get_module_info = mlx4_en_get_module_info, + .get_module_eeprom = mlx4_en_get_module_eeprom }; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." " Per priority bit mask"); +MLX4_EN_PARM_INT(inline_thold, MAX_INLINE, + "Threshold for using inline data (range: 17-104, default: 104)"); + +#define MAX_PFC_TX 0xff +#define MAX_PFC_RX 0xff + int en_print(const char *level, const struct mlx4_en_priv *priv, const char *format, ...) { @@ -123,8 +129,10 @@ int i; params->udp_rss = udp_rss; - params->num_tx_rings_p_up = min_t(int, num_online_cpus(), - MLX4_EN_MAX_TX_RING_P_UP); + params->num_tx_rings_p_up = mlx4_low_memory_profile() ? + MLX4_EN_MIN_TX_RING_P_UP : + min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP); + if (params->udp_rss && !(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) { mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); @@ -140,6 +148,7 @@ params->prof[i].tx_ring_num = params->num_tx_rings_p_up * MLX4_EN_NUM_UP; params->prof[i].rss_rings = 0; + params->prof[i].inline_thold = inline_thold; } return 0; @@ -174,6 +183,9 @@ mlx4_err(mdev, "Internal error detected, restarting device\n"); break; + case MLX4_DEV_EVENT_SLAVE_INIT: + case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: + break; default: if (port < 1 || port > dev->caps.num_ports || !mdev->pndev[port]) @@ -196,6 +208,9 @@ if (mdev->pndev[i]) mlx4_en_destroy_netdev(mdev->pndev[i]); + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) + mlx4_en_remove_timestamp(mdev); + flush_workqueue(mdev->workqueue); destroy_workqueue(mdev->workqueue); (void) mlx4_mr_free(dev, &mdev->mr); @@ -268,19 +283,8 @@ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) mlx4_en_init_timestamp(mdev); - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - if (!dev->caps.comp_pool) { - mdev->profile.prof[i].rx_ring_num = - rounddown_pow_of_two(max_t(int, MIN_RX_RINGS, - min_t(int, - dev->caps.num_comp_vectors, - DEF_RX_RINGS))); - } else { - mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two( - min_t(int, dev->caps.comp_pool/ - dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1)); - } - } + /* Set default number of RX rings*/ + mlx4_en_set_num_rx_rings(mdev); /* Create our own workqueue for reset/multicast tasks * Note: we cannot use the shared workqueue because of deadlocks caused @@ -330,8 +334,31 @@ .protocol = MLX4_PROT_ETH, }; +static void mlx4_en_verify_params(void) +{ + if (pfctx > MAX_PFC_TX) { + pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", + pfctx, MAX_PFC_TX); + pfctx = 0; + } + + if (pfcrx > MAX_PFC_RX) { + pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n", + pfcrx, MAX_PFC_RX); + pfcrx = 0; + } + + if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) { + pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n", + inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE); + inline_thold = MAX_INLINE; + } +} + static int __init mlx4_en_init(void) { + mlx4_en_verify_params(); + return mlx4_register_interface(&mlx4_en_interface); } --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -468,6 +469,53 @@ memset(&dst_mac[ETH_ALEN], 0, 2); } + +static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr, + int qpn, u64 *reg_id) +{ + int err; + struct mlx4_spec_list spec_eth_outer = { {NULL} }; + struct mlx4_spec_list spec_vxlan = { {NULL} }; + struct mlx4_spec_list spec_eth_inner = { {NULL} }; + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_REGULAR, + .priority = MLX4_DOMAIN_NIC, + }; + + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return 0; /* do nothing */ + + rule.port = priv->port; + rule.qpn = qpn; + INIT_LIST_HEAD(&rule.list); + + spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); + memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + + spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ + spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ + + list_add_tail(&spec_eth_outer.list, &rule.list); + list_add_tail(&spec_vxlan.list, &rule.list); + list_add_tail(&spec_eth_inner.list, &rule.list); + + err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id); + if (err) { + en_err(priv, "failed to add vxlan steering rule, err %d\n", err); + return err; + } + en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id); + return 0; +} + + static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, unsigned char *mac, int *qpn, u64 *reg_id) { @@ -556,7 +604,7 @@ int err = 0; u64 reg_id; int *qpn = &priv->base_qpn; - u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr); en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", priv->dev->dev_addr); @@ -585,12 +633,18 @@ if (err) goto steer_err; + err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn, + &priv->tunnel_reg_id); + if (err) + goto tunnel_err; + entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { err = -ENOMEM; goto alloc_err; } memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac)); + memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac)); entry->reg_id = reg_id; hlist_add_head_rcu(&entry->hlist, @@ -599,6 +653,9 @@ return 0; alloc_err: + if (priv->tunnel_reg_id) + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); +tunnel_err: mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id); steer_err: @@ -617,7 +674,7 @@ u64 mac; if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { - mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); + mac = mlx4_mac_to_u64(priv->dev->dev_addr); en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", priv->dev->dev_addr); mlx4_unregister_mac(dev, priv->port, mac); @@ -630,7 +687,7 @@ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { bucket = &priv->mac_hash[i]; hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { - mac = mlx4_en_mac_to_u64(entry->mac); + mac = mlx4_mac_to_u64(entry->mac); en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, @@ -642,6 +699,11 @@ } } + if (priv->tunnel_reg_id) { + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); + priv->tunnel_reg_id = 0; + } + en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", priv->port, qpn); mlx4_qp_release_range(dev, qpn, 1); @@ -655,14 +717,14 @@ struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_dev *dev = mdev->dev; int err = 0; - u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac); + u64 new_mac_u64 = mlx4_mac_to_u64(new_mac); if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { struct hlist_head *bucket; unsigned int mac_hash; struct mlx4_mac_entry *entry; struct hlist_node *tmp; - u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); + u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac); bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { @@ -682,6 +744,14 @@ err = mlx4_en_uc_steer_add(priv, new_mac, &qpn, &entry->reg_id); + if (err) + return err; + if (priv->tunnel_reg_id) { + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); + priv->tunnel_reg_id = 0; + } + err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, + &priv->tunnel_reg_id); return err; } } @@ -691,33 +761,23 @@ return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); } -u64 mlx4_en_mac_to_u64(u8 *addr) -{ - u64 mac = 0; - int i; - - for (i = 0; i < ETH_ALEN; i++) { - mac <<= 8; - mac |= addr[i]; - } - return mac; -} - -static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) +static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv, + unsigned char new_mac[ETH_ALEN + 2]) { int err = 0; if (priv->port_up) { /* Remove old MAC and insert the new one */ err = mlx4_en_replace_mac(priv, priv->base_qpn, - priv->dev->dev_addr, priv->prev_mac); + new_mac, priv->current_mac); if (err) en_err(priv, "Failed changing HW MAC address\n"); - memcpy(priv->prev_mac, priv->dev->dev_addr, - sizeof(priv->prev_mac)); } else en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); + if (!err) + memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac)); + return err; } @@ -726,15 +786,17 @@ struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; struct sockaddr *saddr = addr; + unsigned char new_mac[ETH_ALEN + 2]; int err; if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); - mutex_lock(&mdev->state_lock); - err = mlx4_en_do_set_mac(priv); + memcpy(new_mac, saddr->sa_data, ETH_ALEN); + err = mlx4_en_do_set_mac(priv, new_mac); + if (!err) + memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); mutex_unlock(&mdev->state_lock); return err; @@ -782,7 +844,7 @@ list_for_each_entry(dst_tmp, dst, list) { found = false; list_for_each_entry(src_tmp, src, list) { - if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { + if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { found = true; break; } @@ -797,7 +859,7 @@ list_for_each_entry(src_tmp, src, list) { found = false; list_for_each_entry(dst_tmp, dst, list) { - if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { + if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) { dst_tmp->action = MCLIST_NONE; found = true; break; @@ -1021,7 +1083,7 @@ mlx4_en_cache_mclist(dev); netif_addr_unlock_bh(dev); list_for_each_entry(mclist, &priv->mc_list, list) { - mcast_addr = mlx4_en_mac_to_u64(mclist->addr); + mcast_addr = mlx4_mac_to_u64(mclist->addr); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, mcast_addr, 0, MLX4_MCAST_CONFIG); } @@ -1044,6 +1106,12 @@ if (err) en_err(priv, "Fail to detach multicast address\n"); + if (mclist->tunnel_reg_id) { + err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); + if (err) + en_err(priv, "Failed to detach multicast address\n"); + } + /* remove from list */ list_del(&mclist->list); kfree(mclist); @@ -1061,6 +1129,10 @@ if (err) en_err(priv, "Fail to attach multicast address\n"); + err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn, + &mclist->tunnel_reg_id); + if (err) + en_err(priv, "Failed to attach multicast address\n"); } } } @@ -1099,11 +1171,12 @@ } /* MAC address of the port is not in uc list */ - if (ether_addr_equal_64bits(entry->mac, dev->dev_addr)) + if (ether_addr_equal_64bits(entry->mac, + priv->current_mac)) found = true; if (!found) { - mac = mlx4_en_mac_to_u64(entry->mac); + mac = mlx4_mac_to_u64(entry->mac); mlx4_en_uc_steer_release(priv, entry->mac, priv->base_qpn, entry->reg_id); @@ -1146,7 +1219,7 @@ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; break; } - mac = mlx4_en_mac_to_u64(ha->addr); + mac = mlx4_mac_to_u64(ha->addr); memcpy(entry->mac, ha->addr, ETH_ALEN); err = mlx4_register_mac(mdev->dev, priv->port, mac); if (err < 0) { @@ -1237,15 +1310,11 @@ { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_cq *cq; - unsigned long flags; int i; for (i = 0; i < priv->rx_ring_num; i++) { cq = priv->rx_cq[i]; - spin_lock_irqsave(&cq->lock, flags); - napi_synchronize(&cq->napi); - mlx4_en_process_rx_cq(dev, cq, 0); - spin_unlock_irqrestore(&cq->lock, flags); + napi_schedule(&cq->napi); } } #endif @@ -1413,7 +1482,7 @@ queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); } if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { - mlx4_en_do_set_mac(priv); + mlx4_en_do_set_mac(priv, priv->current_mac); mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; } mutex_unlock(&mdev->state_lock); @@ -1463,6 +1532,27 @@ mutex_unlock(&mdev->state_lock); } +static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) +{ + struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; + int numa_node = priv->mdev->dev->numa_node; + int ret = 0; + + if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + + ret = cpumask_set_cpu_local_first(ring_idx, numa_node, + ring->affinity_mask); + if (ret) + free_cpumask_var(ring->affinity_mask); + + return ret; +} + +static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) +{ + free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); +} int mlx4_en_start_port(struct net_device *dev) { @@ -1504,9 +1594,16 @@ mlx4_en_cq_init_lock(cq); + err = mlx4_en_init_affinity_hint(priv, i); + if (err) { + en_err(priv, "Failed preparing IRQ affinity hint\n"); + goto cq_err; + } + err = mlx4_en_activate_cq(priv, cq, i); if (err) { en_err(priv, "Failed activating Rx CQ\n"); + mlx4_en_free_affinity_hint(priv, i); goto cq_err; } for (j = 0; j < cq->size; j++) @@ -1515,6 +1612,7 @@ if (err) { en_err(priv, "Failed setting cq moderation parameters"); mlx4_en_deactivate_cq(priv, cq); + mlx4_en_free_affinity_hint(priv, i); goto cq_err; } mlx4_en_arm_cq(priv, cq); @@ -1598,6 +1696,15 @@ goto tx_err; } + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); + if (err) { + en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", + err); + goto tx_err; + } + } + /* Init port */ en_dbg(HW, priv, "Initializing port\n"); err = mlx4_INIT_PORT(mdev->dev, priv->port); @@ -1622,6 +1729,10 @@ mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); +#ifdef CONFIG_MLX4_EN_VXLAN + if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + vxlan_get_rx_port(dev); +#endif priv->port_up = true; netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -1639,8 +1750,10 @@ mac_err: mlx4_en_put_qp(priv); cq_err: - while (rx_index--) + while (rx_index--) { mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); + mlx4_en_free_affinity_hint(priv, i); + } for (i = 0; i < priv->rx_ring_num; i++) mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); @@ -1713,6 +1826,8 @@ mc_list[5] = priv->port; mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, mclist->reg_id); + if (mclist->tunnel_reg_id) + mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); } mlx4_en_clear_list(dev); list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { @@ -1769,6 +1884,8 @@ msleep(1); mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); mlx4_en_deactivate_cq(priv, cq); + + mlx4_en_free_affinity_hint(priv, i); } } @@ -1910,8 +2027,10 @@ prof->tx_ring_size, i, TX, node)) goto err; - if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, - prof->tx_ring_size, TXBB_SIZE, node)) + if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], + priv->base_tx_qpn + i, + prof->tx_ring_size, TXBB_SIZE, + node, i)) goto err; } @@ -2075,7 +2194,7 @@ return -ERANGE; } - if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) { + if (mlx4_en_reset_config(dev, config, dev->features)) { config.tx_type = HWTSTAMP_TX_OFF; config.rx_filter = HWTSTAMP_FILTER_NONE; } @@ -2098,6 +2217,16 @@ netdev_features_t features) { struct mlx4_en_priv *priv = netdev_priv(netdev); + int ret = 0; + + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + en_info(priv, "Turn %s RX vlan strip offload\n", + (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF"); + ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config, + features); + if (ret) + return ret; + } if (features & NETIF_F_LOOPBACK) priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); @@ -2115,7 +2244,7 @@ { struct mlx4_en_priv *en_priv = netdev_priv(dev); struct mlx4_en_dev *mdev = en_priv->mdev; - u64 mac_u64 = mlx4_en_mac_to_u64(mac); + u64 mac_u64 = mlx4_mac_to_u64(mac); if (!is_valid_ether_addr(mac)) return -EINVAL; @@ -2154,6 +2283,104 @@ return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); } + +#define PORT_ID_BYTE_LEN 8 +static int mlx4_en_get_phys_port_id(struct net_device *dev, + struct netdev_phys_port_id *ppid) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_dev *mdev = priv->mdev->dev; + int i; + u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; + + if (!phys_port_id) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(phys_port_id); + for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) { + ppid->id[i] = phys_port_id & 0xff; + phys_port_id >>= 8; + } + return 0; +} + +#ifdef CONFIG_MLX4_EN_VXLAN +static void mlx4_en_add_vxlan_offloads(struct work_struct *work) +{ + int ret; + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + vxlan_add_task); + + ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); + if (ret) + goto out; + + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, 1); +out: + if (ret) + en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); +} + +static void mlx4_en_del_vxlan_offloads(struct work_struct *work) +{ + int ret; + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, + vxlan_del_task); + + ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, 0); + if (ret) + en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); + + priv->vxlan_port = 0; +} + +static void mlx4_en_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 current_port; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return; + + if (sa_family == AF_INET6) + return; + + current_port = priv->vxlan_port; + if (current_port && current_port != port) { + en_warn(priv, "vxlan port %d configured, can't add port %d\n", + ntohs(current_port), ntohs(port)); + return; + } + + priv->vxlan_port = port; + queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); +} + +static void mlx4_en_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 current_port; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + return; + + if (sa_family == AF_INET6) + return; + + current_port = priv->vxlan_port; + if (current_port != port) { + en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); + return; + } + + queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); +} +#endif + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2179,6 +2406,11 @@ #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = mlx4_en_low_latency_recv, #endif + .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, +#endif }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -2207,6 +2439,11 @@ #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif + .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, +#endif }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, @@ -2242,6 +2479,7 @@ priv->port = port; priv->port_up = false; priv->flags = prof->flags; + priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | MLX4_WQE_CTRL_SOLICITED); priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; @@ -2261,6 +2499,7 @@ } priv->rx_ring_num = prof->rx_ring_num; priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; + priv->cqe_size = mdev->dev->caps.cqe_size; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); @@ -2269,6 +2508,10 @@ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); +#ifdef CONFIG_MLX4_EN_VXLAN + INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); + INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); +#endif #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { @@ -2293,7 +2536,7 @@ if (mlx4_is_slave(priv->mdev->dev)) { eth_hw_addr_random(dev); en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); - mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr); + mac_u64 = mlx4_mac_to_u64(dev->dev_addr); mdev->dev->caps.def_mac[priv->port] = mac_u64; } else { en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", @@ -2303,7 +2546,7 @@ } } - memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac)); + memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac)); priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); @@ -2356,7 +2599,8 @@ dev->features = dev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - dev->hw_features |= NETIF_F_LOOPBACK; + dev->hw_features |= NETIF_F_LOOPBACK | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) @@ -2365,18 +2609,18 @@ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) dev->priv_flags |= IFF_UNICAST_FLT; + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + dev->features |= NETIF_F_GSO_UDP_TUNNEL; + } + mdev->pndev[port] = dev; netif_carrier_off(dev); mlx4_en_set_default_moderation(priv); - err = register_netdev(dev); - if (err) { - en_err(priv, "Netdev registration failed for port %d\n", port); - goto out; - } - priv->registered = 1; - en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); @@ -2394,6 +2638,15 @@ goto out; } + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); + if (err) { + en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n", + err); + goto out; + } + } + /* Init port */ en_warn(priv, "Initializing port\n"); err = mlx4_INIT_PORT(mdev->dev, priv->port); @@ -2407,6 +2660,14 @@ queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); + err = register_netdev(dev); + if (err) { + en_err(priv, "Netdev registration failed for port %d\n", port); + goto out; + } + + priv->registered = 1; + return 0; out: @@ -2414,3 +2675,79 @@ return err; } +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t features) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + int port_up = 0; + int err = 0; + + if (priv->hwtstamp_config.tx_type == ts_config.tx_type && + priv->hwtstamp_config.rx_filter == ts_config.rx_filter && + !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) + return 0; /* Nothing to change */ + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) && + (features & NETIF_F_HW_VLAN_CTAG_RX) && + (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) { + en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n"); + return -EINVAL; + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + mlx4_en_free_resources(priv); + + en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", + ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + priv->hwtstamp_config.tx_type = ts_config.tx_type; + priv->hwtstamp_config.rx_filter = ts_config.rx_filter; + + if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) { + /* RX time-stamping is OFF, update the RX vlan offload + * to the latest wanted state + */ + if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX) + dev->features |= NETIF_F_HW_VLAN_CTAG_RX; + else + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + /* RX vlan offload and RX time-stamping can't co-exist ! + * Regardless of the caller's choice, + * Turn Off RX vlan offload in case of time-stamping is ON + */ + if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n"); + dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + err = mlx4_en_alloc_resources(priv); + if (err) { + en_err(priv, "Failed reallocating port resources\n"); + goto out; + } + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) + en_err(priv, "Failed starting port\n"); + } + +out: + mutex_unlock(&mdev->state_lock); + netdev_features_change(dev); + return err; +} --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -91,21 +91,37 @@ * already synchronized, no need in locking */ state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK); switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) { + case MLX4_EN_100M_SPEED: + state->link_speed = SPEED_100; + break; case MLX4_EN_1G_SPEED: - state->link_speed = 1000; + state->link_speed = SPEED_1000; break; case MLX4_EN_10G_SPEED_XAUI: case MLX4_EN_10G_SPEED_XFI: - state->link_speed = 10000; + state->link_speed = SPEED_10000; + break; + case MLX4_EN_20G_SPEED: + state->link_speed = SPEED_20000; break; case MLX4_EN_40G_SPEED: - state->link_speed = 40000; + state->link_speed = SPEED_40000; + break; + case MLX4_EN_56G_SPEED: + state->link_speed = SPEED_56000; break; default: state->link_speed = -1; break; } - state->transciver = qport_context->transceiver; + + state->transceiver = qport_context->transceiver; + + state->flags = 0; /* Reset and recalculate the port flags */ + state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ? + MLX4_EN_PORT_ANC : 0; + state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ? + MLX4_EN_PORT_ANE : 0; out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); @@ -148,10 +164,16 @@ stats->tx_packets = 0; stats->tx_bytes = 0; priv->port_stats.tx_chksum_offload = 0; + priv->port_stats.queue_stopped = 0; + priv->port_stats.wake_queue = 0; + for (i = 0; i < priv->tx_ring_num; i++) { stats->tx_packets += priv->tx_ring[i]->packets; stats->tx_bytes += priv->tx_ring[i]->bytes; priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; + priv->port_stats.queue_stopped += + priv->tx_ring[i]->queue_stopped; + priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue; } stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_port.h +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_port.h @@ -53,22 +53,49 @@ MLX4_MCAST_ENABLE = 2, }; +enum mlx4_link_mode { + MLX4_1000BASE_CX_SGMII = 0, + MLX4_1000BASE_KX = 1, + MLX4_10GBASE_CX4 = 2, + MLX4_10GBASE_KX4 = 3, + MLX4_10GBASE_KR = 4, + MLX4_20GBASE_KR2 = 5, + MLX4_40GBASE_CR4 = 6, + MLX4_40GBASE_KR4 = 7, + MLX4_56GBASE_KR4 = 8, + MLX4_10GBASE_CR = 12, + MLX4_10GBASE_SR = 13, + MLX4_40GBASE_SR4 = 15, + MLX4_56GBASE_CR4 = 17, + MLX4_56GBASE_SR4 = 18, + MLX4_100BASE_TX = 24, + MLX4_1000BASE_T = 25, + MLX4_10GBASE_T = 26, +}; + +#define MLX4_PROT_MASK(link_mode) (1<db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) context->param3 |= cpu_to_be32(1 << 30); + + if (!is_tx && !rss && + (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) { + en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); + context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */ + } } --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "mlx4_en.h" @@ -318,6 +319,32 @@ } } +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) +{ + int i; + int num_of_eqs; + int num_rx_rings; + struct mlx4_dev *dev = mdev->dev; + + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + if (!dev->caps.comp_pool) + num_of_eqs = max_t(int, MIN_RX_RINGS, + min_t(int, + dev->caps.num_comp_vectors, + DEF_RX_RINGS)); + else + num_of_eqs = min_t(int, MAX_MSIX_P_PORT, + dev->caps.comp_pool/ + dev->caps.num_ports) - 1; + + num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : + min_t(int, num_of_eqs, + netif_get_num_default_rss_queues()); + mdev->profile.prof[i].rx_ring_num = + rounddown_pow_of_two(num_rx_rings); + } +} + int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride, int node) @@ -631,15 +658,19 @@ int ip_summed; int factor = priv->cqe_factor; u64 timestamp; + bool l2_tunnel; if (!priv->port_up) return 0; + if (budget <= 0) + return polled; + /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, @@ -709,6 +740,8 @@ length -= ring->fcs_del; ring->bytes += length; ring->packets++; + l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); if (likely(dev->features & NETIF_F_RXCSUM)) { if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && @@ -721,7 +754,7 @@ * - not an IP fragment * - no LLS polling in progress */ - if (!mlx4_en_cq_ll_polling(cq) && + if (!mlx4_en_cq_busy_polling(cq) && (dev->features & NETIF_F_GRO)) { struct sk_buff *gro_skb = napi_get_frags(&cq->napi); if (!gro_skb) @@ -738,6 +771,8 @@ gro_skb->data_len = length; gro_skb->ip_summed = CHECKSUM_UNNECESSARY; + if (l2_tunnel) + gro_skb->encapsulation = 1; if ((cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { @@ -788,6 +823,9 @@ skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); + if (l2_tunnel) + skb->encapsulation = 1; + if (dev->features & NETIF_F_RXHASH) skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid); @@ -804,8 +842,10 @@ skb_mark_napi_id(skb, &cq->napi); - /* Push it up the stack */ - netif_receive_skb(skb); + if (!mlx4_en_cq_busy_polling(cq)) + napi_gro_receive(&cq->napi, skb); + else + netif_receive_skb(skb); next: for (nr = 0; nr < priv->num_frags; nr++) @@ -813,7 +853,7 @@ ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; if (++polled == budget) goto out; } @@ -856,9 +896,25 @@ mlx4_en_cq_unlock_napi(cq); /* If we used up all the quota - we're probably not done yet... */ - if (done == budget) + if (done == budget) { + int cpu_curr; + const struct cpumask *aff; + INC_PERF_COUNTER(priv->pstats.napi_quota); - else { + + cpu_curr = smp_processor_id(); + aff = irq_desc_get_irq_data(cq->irq_desc)->affinity; + + if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) { + /* Current cpu is not according to smp_irq_affinity - + * probably affinity changed. need to stop this NAPI + * poll, and restart it on the right CPU + */ + napi_complete(napi); + mlx4_en_arm_cq(priv, cq); + return 0; + } + } else { /* Done for now */ napi_complete(napi); mlx4_en_arm_cq(priv, cq); @@ -1053,6 +1109,12 @@ rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6; rss_context->base_qpn_udp = rss_context->default_qpn; } + + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { + en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n"); + rss_mask |= MLX4_RSS_BY_INNER_HEADERS; + } + rss_context->flags = rss_mask; rss_context->hash_fn = MLX4_RSS_HASH_TOP; for (i = 0; i < 10; i++) --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -129,9 +129,15 @@ if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) return -ENOMEM; - /* The device currently only supports 10G speed */ - if (priv->port_state.link_speed != SPEED_10000) + /* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */ + if (priv->port_state.link_speed != SPEED_100 && + priv->port_state.link_speed != SPEED_1000 && + priv->port_state.link_speed != SPEED_10000 && + priv->port_state.link_speed != SPEED_20000 && + priv->port_state.link_speed != SPEED_40000 && + priv->port_state.link_speed != SPEED_56000) return priv->port_state.link_speed; + return 0; } --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -39,23 +39,14 @@ #include #include #include +#include #include #include "mlx4_en.h" -enum { - MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ - MAX_BF = 256, -}; - -static int inline_thold __read_mostly = MAX_INLINE; - -module_param_named(inline_thold, inline_thold, int, 0444); -MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); - int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, int qpn, u32 size, - u16 stride, int node) + u16 stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; @@ -74,8 +65,7 @@ ring->size = size; ring->size_mask = size - 1; ring->stride = stride; - - inline_thold = min(inline_thold, MAX_INLINE); + ring->inline_thold = priv->prof->inline_thold; tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc_node(tmp, node); @@ -136,10 +126,19 @@ ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; - } else - ring->bf_enabled = true; + ring->bf_alloced = false; + priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME; + } else { + ring->bf_alloced = true; + ring->bf_enabled = !!(priv->pflags & + MLX4_EN_PRIV_FLAGS_BLUEFLAME); + } ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; + ring->queue_index = queue_index; + + if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index)) + cpumask_set_cpu(queue_index, &ring->affinity_mask); *pring = ring; return 0; @@ -167,7 +166,7 @@ struct mlx4_en_tx_ring *ring = *pring; en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); - if (ring->bf_enabled) + if (ring->bf_alloced) mlx4_bf_free(mdev->dev, &ring->bf); mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); @@ -201,11 +200,14 @@ mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, ring->cqn, user_prio, &ring->context); - if (ring->bf_enabled) + if (ring->bf_alloced) ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, &ring->qp, &ring->qp_state); + if (!user_prio && cpu_online(ring->queue_index)) + netif_set_xps_queue(priv->dev, &ring->affinity_mask, + ring->queue_index); return err; } @@ -354,7 +356,9 @@ return cnt; } -static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) +static int mlx4_en_process_tx_cq(struct net_device *dev, + struct mlx4_en_cq *cq, + int budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; @@ -372,18 +376,19 @@ u32 bytes = 0; int factor = priv->cqe_factor; u64 timestamp = 0; + int done = 0; if (!priv->port_up) - return; + return 0; index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; ring_index = ring->cons & size_mask; stamp_index = ring_index; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, - cons_index & size)) { + cons_index & size) && (done < budget)) { /* * make sure we read the CQE after we read the * ownership bit @@ -421,11 +426,11 @@ txbbs_stamp = txbbs_skipped; packets++; bytes += ring->tx_info[ring_index].nr_bytes; - } while (ring_index != new_index); + } while ((++done < budget) && (ring_index != new_index)); ++cons_index; index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } @@ -445,8 +450,9 @@ */ if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; + ring->wake_queue++; } + return done; } void mlx4_en_tx_irq(struct mlx4_cq *mcq) @@ -454,10 +460,31 @@ struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); - mlx4_en_process_tx_cq(cq->dev, cq); - mlx4_en_arm_cq(priv, cq); + if (priv->port_up) + napi_schedule(&cq->napi); + else + mlx4_en_arm_cq(priv, cq); } +/* TX CQ polling - called by NAPI */ +int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) +{ + struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); + struct net_device *dev = cq->dev; + struct mlx4_en_priv *priv = netdev_priv(dev); + int done; + + done = mlx4_en_process_tx_cq(dev, cq, budget); + + /* If we used up all the quota - we're probably not done yet... */ + if (done < budget) { + /* Done for now */ + napi_complete(napi); + mlx4_en_arm_cq(priv, cq); + return done; + } + return budget; +} static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, @@ -487,7 +514,7 @@ return ring->buf + index * TXBB_SIZE; } -static int is_inline(struct sk_buff *skb, void **pfrag) +static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag) { void *ptr; @@ -528,7 +555,10 @@ int real_size; if (skb_is_gso(skb)) { - *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->encapsulation) + *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb); + else + *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + ALIGN(*lso_header_size + 4, DS_SIZE); if (unlikely(*lso_header_size != skb_headlen(skb))) { @@ -544,7 +574,7 @@ } } else { *lso_header_size = 0; - if (!is_inline(skb, NULL)) + if (!is_inline(priv->prof->inline_thold, skb, NULL)) real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; else real_size = inline_size(skb); @@ -560,7 +590,13 @@ int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; if (skb->len <= spc) { - inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + if (likely(skb->len >= MIN_PKT_LEN)) { + inl->byte_count = cpu_to_be32(1 << 31 | skb->len); + } else { + inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN); + memset(((void *)(inl + 1)) + skb->len, 0, + MIN_PKT_LEN - skb->len); + } skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); if (skb_shinfo(skb)->nr_frags) memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, @@ -593,7 +629,7 @@ } u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; @@ -605,7 +641,7 @@ if (vlan_tx_tag_present(skb)) up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT; - return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up; + return fallback(dev, skb) % rings_p_up + up * rings_p_up; } static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) @@ -660,7 +696,7 @@ ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - priv->port_stats.queue_stopped++; + ring->queue_stopped++; /* If queue was emptied after the if, and before the * stop_queue - need to wake the queue, or else it will remain @@ -673,7 +709,7 @@ if (unlikely(((int)(ring->prod - ring->cons)) <= ring->size - HEADROOM - MAX_DESC_TXBBS)) { netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; + ring->wake_queue++; } else { return NETDEV_TX_BUSY; } @@ -711,11 +747,11 @@ tx_info->data_offset = (void *)data - (void *)tx_desc; tx_info->linear = (lso_header_size < skb_headlen(skb) && - !is_inline(skb, NULL)) ? 1 : 0; + !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0; data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; - if (is_inline(skb, &fragptr)) { + if (is_inline(ring->inline_thold, skb, &fragptr)) { tx_info->inl = 1; } else { /* Map fragments */ @@ -828,6 +864,14 @@ tx_info->inl = 1; } + if (skb->encapsulation) { + struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); + if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP) + op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP); + else + op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP); + } + ring->prod += nr_txbb; /* If we used a bounce buffer then copy descriptor back into place */ @@ -837,7 +881,8 @@ skb_tx_timestamp(skb); if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { - *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); + tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn); + op_own |= htonl((bf_index & 0xffff) << 8); /* Ensure new descirptor hits memory * before setting ownership of this descriptor to HW */ --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/eq.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -102,21 +102,24 @@ mb(); } -static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor) +static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, + u8 eqe_size) { /* (entry & (eq->nent - 1)) gives us a cyclic array */ - unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor); - /* CX3 is capable of extending the EQE from 32 to 64 bytes. - * When this feature is enabled, the first (in the lower addresses) + unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; + /* CX3 is capable of extending the EQE from 32 to 64 bytes with + * strides of 64B,128B and 256B. + * When 64B EQE is used, the first (in the lower addresses) * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes * contain the legacy EQE information. + * In all other cases, the first 32B contains the legacy EQE info. */ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; } -static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor) +static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) { - struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); + struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; } @@ -272,7 +275,10 @@ { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; - if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) { + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return SLAVE_PORT_DOWN; @@ -286,8 +292,10 @@ { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); - if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return -1; @@ -301,9 +309,13 @@ { int i; enum slave_port_gen_event gen_event; + struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, + port); - for (i = 0; i < dev->num_slaves; i++) - set_and_calc_slave_port_state(dev, i, port, event, &gen_event); + for (i = 0; i < dev->num_vfs + 1; i++) + if (test_bit(i, slaves_pport.slaves)) + set_and_calc_slave_port_state(dev, i, port, + event, &gen_event); } /************************************************************************** The function get as input the new event to that port, @@ -322,12 +334,14 @@ struct mlx4_slave_state *ctx = NULL; unsigned long flags; int ret = -1; + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); enum slave_port_state cur_state = mlx4_get_slave_port_state(dev, slave, port); *gen_event = SLAVE_PORT_GEN_EVENT_NONE; - if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { + if (slave >= dev->num_slaves || port > dev->caps.num_ports || + port <= 0 || !test_bit(port - 1, actv_ports.ports)) { pr_err("%s: Error: asking for slave:%d, port:%d\n", __func__, slave, port); return ret; @@ -450,8 +464,9 @@ enum slave_port_gen_event gen_event; unsigned long flags; struct mlx4_vport_state *s_info; + int eqe_size = dev->caps.eqe_size; - while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { + while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. @@ -543,15 +558,19 @@ be64_to_cpu(eqe->event.cmd.out_param)); break; - case MLX4_EVENT_TYPE_PORT_CHANGE: + case MLX4_EVENT_TYPE_PORT_CHANGE: { + struct mlx4_slaves_pport slaves_port; port = be32_to_cpu(eqe->event.port_change.port) >> 28; + slaves_port = mlx4_phys_to_slaves_pport(dev, port); if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, port); mlx4_priv(dev)->sense.do_sense_port[port] = 1; if (!mlx4_is_master(dev)) break; - for (i = 0; i < dev->num_slaves; i++) { + for (i = 0; i < dev->num_vfs + 1; i++) { + if (!test_bit(i, slaves_port.slaves)) + continue; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (i == mlx4_master_func_num(dev)) continue; @@ -559,8 +578,13 @@ " to slave: %d, port:%d\n", __func__, i, port); s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; - if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); mlx4_slave_event(dev, i, eqe); + } } else { /* IB port */ set_and_calc_slave_port_state(dev, i, port, MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, @@ -581,12 +605,19 @@ if (!mlx4_is_master(dev)) break; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) - for (i = 0; i < dev->num_slaves; i++) { + for (i = 0; i < dev->num_vfs + 1; i++) { + if (!test_bit(i, slaves_port.slaves)) + continue; if (i == mlx4_master_func_num(dev)) continue; s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; - if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( + (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) + | (mlx4_phys_to_slave_port(dev, i, port) << 28)); mlx4_slave_event(dev, i, eqe); + } } else /* IB port */ /* port-up event will be sent to a slave when the @@ -595,6 +626,7 @@ set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); } break; + } case MLX4_EVENT_TYPE_CQ_ERROR: mlx4_warn(dev, "CQ %s on CQN %06x\n", @@ -886,8 +918,10 @@ eq->dev = dev; eq->nent = roundup_pow_of_two(max(nent, 2)); - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B. + */ + npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; eq->page_list = kmalloc(npages * sizeof *eq->page_list, GFP_KERNEL); @@ -963,7 +997,7 @@ mlx4_mtt_cleanup(dev, &eq->mtt); err_out_free_eq: - mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); + mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); err_out_free_pages: for (i = 0; i < npages; ++i) @@ -989,8 +1023,10 @@ struct mlx4_cmd_mailbox *mailbox; int err; int i; - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B + */ + int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) @@ -1018,7 +1054,7 @@ eq->page_list[i].map); kfree(eq->page_list); - mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); + mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); mlx4_free_cmd_mailbox(dev, mailbox); } @@ -1345,6 +1381,7 @@ continue; /*we dont want to break here*/ } + eq_set_ci(&priv->eq_table.eq[vec], 1); } } @@ -1360,6 +1397,14 @@ } EXPORT_SYMBOL(mlx4_assign_eq); +int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + return priv->eq_table.eq[vec].irq; +} +EXPORT_SYMBOL(mlx4_eq_get_irq); + void mlx4_release_eq(struct mlx4_dev *dev, int vec) { struct mlx4_priv *priv = mlx4_priv(dev); --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/fw.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -129,12 +129,19 @@ [0] = "RSS support", [1] = "RSS Toeplitz Hash Function support", [2] = "RSS XOR Hash Function support", - [3] = "Device manage flow steering support", + [3] = "Device managed flow steering support", [4] = "Automatic MAC reassignment support", [5] = "Time stamping support", [6] = "VST (control vlan insertion/stripping) support", [7] = "FSM (MAC anti-spoofing) support", - [8] = "Dynamic QP updates support" + [8] = "Dynamic QP updates support", + [9] = "Device managed flow steering IPoIB support", + [10] = "TCP/IP offloads/flow-steering for VXLAN support", + [11] = "MAD DEMUX (Secure-Host) support", + [12] = "Large cache line (>64B) CQE stride support", + [13] = "Large cache line (>64B) EQE stride support", + [14] = "Ethernet protocol control support", + [15] = "Ethernet Backplane autoneg support" }; int i; @@ -207,29 +214,41 @@ /* when opcode modifier = 1 */ #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 -#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8 -#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc +#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8 +#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10 #define QUERY_FUNC_CAP_QP0_PROXY 0x14 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c +#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28 -#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40 -#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80 +#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40 +#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80 +#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10 -#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80 +#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 if (vhcr->op_modifier == 1) { - field = 0; - /* ensure force vlan and force mac bits are not set */ - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); - /* ensure that phy_wqe_gid bit is not set */ - MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); - - field = vhcr->in_modifier; /* phys-port = logical-port */ + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, slave); + int converted_port = mlx4_slave_convert_port( + dev, slave, vhcr->in_modifier); + + if (converted_port < 0) + return -EINVAL; + + vhcr->in_modifier = converted_port; + /* Set nic_info bit to mark new fields support */ + field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; + MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); + + /* phys-port = logical-port */ + field = vhcr->in_modifier - + find_first_bit(actv_ports.ports, dev->caps.num_ports); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); + field = vhcr->in_modifier; /* size is now the QP number */ size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); @@ -243,13 +262,20 @@ size += 2; MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY); + MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier], + QUERY_FUNC_CAP_PHYS_PORT_ID); + } else if (vhcr->op_modifier == 0) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, slave); /* enable rdma and ethernet interfaces, and new quota locations */ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | QUERY_FUNC_CAP_FLAG_QUOTAS); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); - field = dev->caps.num_ports; + field = min( + bitmap_weight(actv_ports.ports, dev->caps.num_ports), + dev->caps.num_ports); MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); size = dev->caps.function_caps; /* set PF behaviours */ @@ -391,22 +417,22 @@ goto out; } + MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET); if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) { - MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET); - if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) { + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_OFFSET) { mlx4_err(dev, "VLAN is enforced on this port\n"); err = -EPROTONOSUPPORT; goto out; } - if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) { + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) { mlx4_err(dev, "Force mac is enabled on this port\n"); err = -EPROTONOSUPPORT; goto out; } } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { - MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET); - if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) { + MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); + if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { mlx4_err(dev, "phy_wqe_gid is " "enforced on this ib port\n"); err = -EPROTONOSUPPORT; @@ -433,6 +459,10 @@ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY); func_cap->qp1_proxy_qpn = size & 0xFFFFFF; + if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO) + MLX4_GET(func_cap->phys_port_id, outbox, + QUERY_FUNC_CAP_PHYS_PORT_ID); + /* All other resources are allocated by the master, but we still report * 'num' and 'reserved' capabilities as follows: * - num remains the maximum resource index @@ -513,8 +543,11 @@ #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 +#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 +#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a +#define QUERY_DEV_CAP_ETH_PROT_CTRL_OFFSET 0x7a #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -528,7 +561,9 @@ #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 +#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d +#define QUERY_DEV_CAP_VXLAN 0x9e dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -603,6 +638,9 @@ if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); dev_cap->fs_max_num_qp_per_entry = field; MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); @@ -686,14 +724,26 @@ dev_cap->max_rq_sg = field; MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); dev_cap->max_rq_desc_sz = size; - + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 5)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL; + if (field & (1 << 6)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + if (field & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); MLX4_GET(dev_cap->reserved_lkey, outbox, QUERY_DEV_CAP_RSVD_LKEY_OFFSET); + MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); + if (field32 & (1 << 0)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); + if (field & 1<<3) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; MLX4_GET(dev_cap->max_icm_sz, outbox, QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) @@ -823,6 +873,10 @@ int err = 0; u8 field; u32 bmme_flags; + int real_port; + int slave_port; + int first_port; + struct mlx4_active_ports actv_ports; err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); @@ -835,20 +889,43 @@ MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; + actv_ports = mlx4_get_active_ports(dev, slave); + first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports); + for (slave_port = 0, real_port = first_port; + real_port < first_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + ++real_port, ++slave_port) { + if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port)) + flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port; + else + flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); + } + for (; slave_port < dev->caps.num_ports; ++slave_port) + flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port); MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET); + field &= ~0x0F; + field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET); + /* For guests, disable timestamp */ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + /* For guests, disable vxlan tunneling */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); + field &= 0xf7; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); + /* For guests, report Blueflame disabled */ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); /* For guests, disable mw type 2 */ - MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); @@ -860,6 +937,12 @@ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); } + + /* turn off ipoib managed steering for guests */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + field &= ~0x80; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + return 0; } @@ -875,12 +958,20 @@ u16 short_field; int err; int admin_link_state; + int port = mlx4_slave_convert_port(dev, slave, + vhcr->in_modifier & 0xFF); #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 #define MLX4_PORT_LINK_UP_MASK 0x80 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e + if (port < 0) + return -EINVAL; + + vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | + (port & 0xFF); + err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); @@ -907,7 +998,10 @@ MLX4_PUT(outbox->buf, port_type, QUERY_PORT_SUPPORTED_TYPE_OFFSET); - short_field = 1; /* slave max gids */ + if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH) + short_field = mlx4_get_slave_num_gids(dev, slave, port); + else + short_field = 1; /* slave max gids */ MLX4_PUT(outbox->buf, short_field, QUERY_PORT_CUR_MAX_GID_OFFSET); @@ -1267,6 +1361,7 @@ #define INIT_HCA_IN_SIZE 0x200 #define INIT_HCA_VERSION_OFFSET 0x000 #define INIT_HCA_VERSION 2 +#define INIT_HCA_VXLAN_OFFSET 0x0c #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e #define INIT_HCA_FLAGS_OFFSET 0x014 #define INIT_HCA_QPC_OFFSET 0x020 @@ -1277,6 +1372,7 @@ #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) +#define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) @@ -1353,11 +1449,25 @@ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { + dev->caps.eqe_size = cache_line_size(); + dev->caps.cqe_size = cache_line_size(); + dev->caps.eqe_factor = 0; + MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | + (ilog2(dev->caps.eqe_size) - 5)), + INIT_HCA_EQE_CQE_STRIDE_OFFSET); + + /* User still need to know to support CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); @@ -1425,8 +1535,14 @@ MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); - err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, - MLX4_CMD_NATIVE); + /* set parser VXLAN attributes */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { + u8 parser_params = 0; + MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); + } + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) mlx4_err(dev, "INIT_HCA returns %d\n", err); @@ -1511,6 +1627,17 @@ if (byte_field & 0x40) /* 64-bytes cqe enabled */ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); + if (byte_field) { + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + param->cqe_size = 1 << ((byte_field & + MLX4_CQE_SIZE_MASK_STRIDE) + 5); + param->eqe_size = 1 << (((byte_field & + MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); + } + /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); @@ -1550,9 +1677,12 @@ struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); - int port = vhcr->in_modifier; + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); int err; + if (port < 0) + return -EINVAL; + if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) return 0; @@ -1642,9 +1772,12 @@ struct mlx4_cmd_info *cmd) { struct mlx4_priv *priv = mlx4_priv(dev); - int port = vhcr->in_modifier; + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier); int err; + if (port < 0) + return -EINVAL; + if (!(priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))) return 0; @@ -1652,7 +1785,7 @@ if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { if (priv->mfunc.master.init_port_ref[port] == 1) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, - 1000, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; } @@ -1663,7 +1796,7 @@ if (!priv->mfunc.master.qp0_state[port].qp0_active && priv->mfunc.master.qp0_state[port].port_active) { err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, - 1000, MLX4_CMD_NATIVE); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) return err; priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); @@ -1678,17 +1811,57 @@ int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) { - return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, - MLX4_CMD_WRAPPED); + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) { - return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, - MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); } +struct mlx4_config_dev { + __be32 update_flags; + __be32 rsdv1[3]; + __be16 vxlan_udp_dport; + __be16 rsvd2; +}; + +#define MLX4_VXLAN_UDP_DPORT (1 << 0) + +static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) +{ + int err; + struct mlx4_cmd_mailbox *mailbox; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memcpy(mailbox->buf, config_dev, sizeof(*config_dev)); + + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + +int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT); + config_dev.vxlan_udp_dport = udp_port; + + return mlx4_CONFIG_DEV(dev, &config_dev); +} +EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); + + int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) { int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0, @@ -1710,7 +1883,45 @@ int mlx4_NOP(struct mlx4_dev *dev) { /* Input modifier of 0x1f means "finish as soon as possible." */ - return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); +} + +int mlx4_get_phys_port_id(struct mlx4_dev *dev) +{ + u8 port; + u32 *outbox; + struct mlx4_cmd_mailbox *mailbox; + u32 in_mod; + u32 guid_hi, guid_lo; + int err, ret = 0; +#define MOD_STAT_CFG_PORT_OFFSET 8 +#define MOD_STAT_CFG_GUID_H 0X14 +#define MOD_STAT_CFG_GUID_L 0X1c + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + for (port = 1; port <= dev->caps.num_ports; port++) { + in_mod = port << MOD_STAT_CFG_PORT_OFFSET; + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2, + MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Fail to get port %d uplink guid\n", + port); + ret = err; + } else { + MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H); + MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L); + dev->caps.phys_port_id[port] = (u64)guid_lo | + (u64)guid_hi << 32; + } + } + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; } #define MLX4_WOL_SETUP_MODE (5 << 28) @@ -1819,7 +2030,8 @@ err = EINVAL; break; } - err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), + err = mlx4_cmd(dev, 0, ((u32) err | + (__force u32)cpu_to_be32(token) << 16), 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) { @@ -1834,3 +2046,142 @@ out: mlx4_free_cmd_mailbox(dev, mailbox); } + +/* Access Reg commands */ +enum mlx4_access_reg_masks { + MLX4_ACCESS_REG_STATUS_MASK = 0x7f, + MLX4_ACCESS_REG_METHOD_MASK = 0x7f, + MLX4_ACCESS_REG_LEN_MASK = 0x7ff +}; + +struct mlx4_access_reg { + __be16 constant1; + u8 status; + u8 resrvd1; + __be16 reg_id; + u8 method; + u8 constant2; + __be32 resrvd2[2]; + __be16 len_const; + __be16 resrvd3; +#define MLX4_ACCESS_REG_HEADER_SIZE (20) + u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE]; +} __attribute__((__packed__)); + +/** + * mlx4_ACCESS_REG - Generic access reg command. + * @dev: mlx4_dev. + * @reg_id: register ID to access. + * @method: Access method Read/Write. + * @reg_len: register length to Read/Write in bytes. + * @reg_data: reg_data pointer to Read/Write From/To. + * + * Access ConnectX registers FW command. + * Returns 0 on success and copies outbox mlx4_access_reg data + * field into reg_data or a negative error code. + */ +static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id, + enum mlx4_access_reg_method method, + u16 reg_len, void *reg_data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_access_reg *inbuf, *outbuf; + int err; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inbuf = inbox->buf; + outbuf = outbox->buf; + + inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4); + inbuf->constant2 = 0x1; + inbuf->reg_id = cpu_to_be16(reg_id); + inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK; + + reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data))); + inbuf->len_const = + cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) | + ((0x3) << 12)); + + memcpy(inbuf->reg_data, reg_data, reg_len); + err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0, + MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) { + err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK; + mlx4_err(dev, + "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n", + reg_id, err); + goto out; + } + + memcpy(reg_data, outbuf->reg_data, reg_len); +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return err; +} + +/* ConnectX registers IDs */ +enum mlx4_reg_id { + MLX4_REG_ID_PTYS = 0x5004, +}; + +/** + * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed) + * register + * @dev: mlx4_dev. + * @method: Access method Read/Write. + * @ptys_reg: PTYS register data pointer. + * + * Access ConnectX PTYS register, to Read/Write Port Type/Speed + * configuration + * Returns 0 on success or a negative error code. + */ +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg) +{ + return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS, + method, sizeof(*ptys_reg), ptys_reg); +} +EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG); + +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + struct mlx4_access_reg *inbuf = inbox->buf; + u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK; + u16 reg_id = be16_to_cpu(inbuf->reg_id); + + if (slave != mlx4_master_func_num(dev) && + method == MLX4_ACCESS_REG_WRITE) + return -EPERM; + + if (reg_id == MLX4_REG_ID_PTYS) { + struct mlx4_ptys_reg *ptys_reg = + (struct mlx4_ptys_reg *)inbuf->reg_data; + + ptys_reg->local_port = + mlx4_slave_convert_port(dev, slave, + ptys_reg->local_port); + } + + return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier, + 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); +} --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/fw.h +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -140,6 +140,8 @@ u32 qp1_proxy_qpn; u8 physical_port; u8 port_flags; + u8 flags1; + u64 phys_port_id; }; struct mlx4_adapter { @@ -175,6 +177,8 @@ u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 steering_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; + u16 cqe_size; /* For use only when CQE stride feature enabled */ + u16 eqe_size; /* For use only when EQE stride feature enabled */ }; struct mlx4_init_ib_param { --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/main.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/main.c @@ -41,7 +41,6 @@ #include #include #include -#include #include #include @@ -78,13 +77,17 @@ #endif /* CONFIG_PCI_MSI */ -static int num_vfs; -module_param(num_vfs, int, 0444); -MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); - -static int probe_vf; -module_param(probe_vf, int, 0644); -MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); +static uint8_t num_vfs[3] = {0, 0, 0}; +static int num_vfs_argc = 3; +module_param_array(num_vfs, byte , &num_vfs_argc, 0444); +MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" + "num_vfs=port1,port2,port1+2"); + +static uint8_t probe_vf[3] = {0, 0, 0}; +static int probe_vfs_argc = 3; +module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); +MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" + "probe_vf=port1,port2,port1+2"); int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; module_param_named(log_num_mgm_entry_size, @@ -96,14 +99,15 @@ " To activate device managed" " flow steering when available, set to -1"); -static bool enable_64b_cqe_eqe; +static bool enable_64b_cqe_eqe = true; module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, - "Enable 64 byte CQEs/EQEs when the FW supports this"); + "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); #define HCA_GLOBAL_CAP_MASK 0 -#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE +#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ + MLX4_FUNC_CAP_EQE_CQE_STRIDE) static char mlx4_version[] = DRV_NAME ": Mellanox ConnectX core driver v" @@ -119,6 +123,16 @@ .num_mtt = 1 << 20, /* It is really num mtt segements */ }; +static struct mlx4_profile low_mem_profile = { + .num_qp = 1 << 17, + .num_srq = 1 << 6, + .rdmarc_per_qp = 1 << 4, + .num_cq = 1 << 8, + .num_mcg = 1 << 8, + .num_mpt = 1 << 9, + .num_mtt = 1 << 7, +}; + static int log_num_mac = 7; module_param_named(log_num_mac, log_num_mac, int, 0444); MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); @@ -128,6 +142,8 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); /* Log2 max number of VLANs per ETH port (0-7) */ #define MLX4_LOG_NUM_VLANS 7 +#define MLX4_MIN_LOG_NUM_VLANS 0 +#define MLX4_MIN_LOG_NUM_MAC 1 static bool use_prio; module_param_named(use_prio, use_prio, bool, 0444); @@ -150,6 +166,8 @@ struct pci_dev *pdev; }; +static atomic_t pf_loading = ATOMIC_INIT(0); + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -183,6 +201,40 @@ dev->caps.port_mask[i] = dev->caps.port_type[i]; } +static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) +{ + struct mlx4_caps *dev_cap = &dev->caps; + + /* FW not supporting or cancelled by user */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || + !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) + return; + + /* Must have 64B CQE_EQE enabled by FW to use bigger stride + * When FW has NCSI it may decide not to report 64B CQE/EQEs + */ + if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || + !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + return; + } + + if (cache_line_size() == 128 || cache_line_size() == 256) { + mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); + /* Changing the real data inside CQE size to 32B */ + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; + + if (mlx4_is_master(dev)) + dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; + } else { + mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } +} + static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; @@ -289,9 +341,14 @@ if (mlx4_is_mfunc(dev)) dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; - dev->caps.log_num_macs = log_num_mac; - dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; dev->caps.log_num_prios = use_prio ? 3 : 0; + if (mlx4_low_memory_profile()) { + dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; + dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; + } else { + dev->caps.log_num_macs = log_num_mac; + dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; + } for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; @@ -379,6 +436,14 @@ dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; } + + if (dev_cap->flags2 & + (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | + MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { + mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } } if ((dev->caps.flags & @@ -386,8 +451,89 @@ mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; + if (!mlx4_is_slave(dev)) + mlx4_enable_cqe_eqe_stride(dev); + return 0; } + +static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + u32 lnkcap1, lnkcap2; + int err1, err2; + +#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1); + err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2); + if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + *speed = PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + if (!err1) { + *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; + if (!lnkcap2) { /* pre-r3.0 */ + if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) + *speed = PCIE_SPEED_5_0GT; + else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) + *speed = PCIE_SPEED_2_5GT; + } + } + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { + return err1 ? err1 : + err2 ? err2 : -EINVAL; + } + return 0; +} + +static void mlx4_check_pcie_caps(struct mlx4_dev *dev) +{ + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + int err; + +#define PCIE_SPEED_STR(speed) \ + (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ + "Unknown") + + err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); + if (err) { + mlx4_warn(dev, + "Unable to determine PCIe device BW capabilities\n"); + return; + } + + err = pcie_get_minimum_link(dev->pdev, &speed, &width); + if (err || speed == PCI_SPEED_UNKNOWN || + width == PCIE_LNK_WIDTH_UNKNOWN) { + mlx4_warn(dev, + "Unable to determine PCI device chain minimum BW\n"); + return; + } + + if (width != width_cap || speed != speed_cap) + mlx4_warn(dev, + "PCIe BW is different than device's capability\n"); + + mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", + PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); + mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", + width, width_cap); + return; +} + /*The function checks if there are live vf, return the num of them*/ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) { @@ -606,6 +752,7 @@ dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; dev->caps.port_mask[i] = dev->caps.port_type[i]; + dev->caps.phys_port_id[i] = func_cap.phys_port_id; if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, &dev->caps.gid_table_len[i], &dev->caps.pkey_table_len[i])) @@ -632,11 +779,22 @@ if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { + dev->caps.eqe_size = hca_param.eqe_size; + dev->caps.eqe_factor = 0; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { + dev->caps.cqe_size = hca_param.cqe_size; + /* User still need to know when CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); @@ -670,10 +828,10 @@ has_eth_port = true; } - if (has_ib_port) - request_module_nowait(IB_DRV_NAME); if (has_eth_port) request_module_nowait(EN_DRV_NAME); + if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) + request_module_nowait(IB_DRV_NAME); } /* @@ -1328,6 +1486,11 @@ u32 slave_read; u32 cmd_channel_ver; + if (atomic_read(&pf_loading)) { + mlx4_warn(dev, "PF is not ready. Deferring probe\n"); + return -EPROBE_DEFER; + } + mutex_lock(&priv->cmd.slave_cmd_mutex); priv->cmd.max_cmds = 1; mlx4_warn(dev, "Sending reset\n"); @@ -1384,7 +1547,11 @@ int i; for (i = 1; i <= dev->caps.num_ports; i++) { - dev->caps.gid_table_len[i] = 1; + if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) + dev->caps.gid_table_len[i] = + mlx4_get_slave_num_gids(dev, 0, i); + else + dev->caps.gid_table_len[i] = 1; dev->caps.pkey_table_len[i] = dev->phys_caps.pkey_phys_table_len[i] - 1; } @@ -1409,7 +1576,7 @@ if (mlx4_log_num_mgm_entry_size == -1 && dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && (!mlx4_is_mfunc(dev) || - (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) && + (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) && choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= MLX4_MIN_MGM_LOG_ENTRY_SIZE) { dev->oper_log_mgm_entry_size = @@ -1443,6 +1610,19 @@ mlx4_log_num_mgm_entry_size); } +static void choose_tunnel_offload_mode(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap) +{ + if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; + else + dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; + + mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode + == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); +} + static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1483,11 +1663,21 @@ } choose_steering_mode(dev, &dev_cap); + choose_tunnel_offload_mode(dev, &dev_cap); + + err = mlx4_get_phys_port_id(dev); + if (err) + mlx4_err(dev, "Fail to get physical port id\n"); if (mlx4_is_master(dev)) mlx4_parav_master_pf_caps(dev); - profile = default_profile; + if (mlx4_low_memory_profile()) { + mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); + profile = low_mem_profile; + } else { + profile = default_profile; + } if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) profile.num_mcg = MLX4_FS_NUM_MCG; @@ -1654,7 +1844,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) { - mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); + mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); return; } @@ -1877,7 +2067,7 @@ struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int nreq = min_t(int, dev->caps.num_ports * - min_t(int, netif_get_num_default_rss_queues() + 1, + min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); int err; int i; @@ -1944,6 +2134,7 @@ if (!mlx4_is_slave(dev)) { mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_vlan_table(dev, &info->vlan_table); + mlx4_init_roce_gid_table(dev, &info->gid_table); info->base_qpn = mlx4_get_base_qpn(dev, port); } @@ -2097,6 +2288,13 @@ struct mlx4_dev *dev; int err; int port; + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { + {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; + unsigned total_vfs = 0; + int sriov_initialized = 0; + unsigned int i; pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); @@ -2111,17 +2309,40 @@ * per port, we must limit the number of VFs to 63 (since their are * 128 MACs) */ - if (num_vfs >= MLX4_MAX_NUM_VF) { + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; + total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { + nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; + if (nvfs[i] < 0) { + dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); + return -EINVAL; + } + } + for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; + i++) { + prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; + if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { + dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); + return -EINVAL; + } + } + if (total_vfs >= MLX4_MAX_NUM_VF) { dev_err(&pdev->dev, "Requested more VF's (%d) than allowed (%d)\n", - num_vfs, MLX4_MAX_NUM_VF - 1); + total_vfs, MLX4_MAX_NUM_VF - 1); return -EINVAL; } - if (num_vfs < 0) { - pr_err("num_vfs module parameter cannot be negative\n"); - return -EINVAL; + for (i = 0; i < MLX4_MAX_PORTS; i++) { + if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) { + dev_err(&pdev->dev, + "Requested more VF's (%d) for port (%d) than allowed (%d)\n", + nvfs[i] + nvfs[2], i + 1, + MLX4_MAX_NUM_VF_P_PORT - 1); + return -EINVAL; + } } + + /* * Check for BARs. */ @@ -2171,13 +2392,8 @@ /* Allow large DMA segments, up to the firmware limit of 1 GB */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - err = -ENOMEM; - goto err_release_regions; - } - - dev = &priv->dev; + dev = pci_get_drvdata(pdev); + priv = mlx4_priv(dev); dev->pdev = pdev; INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); @@ -2196,11 +2412,23 @@ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { /* When acting as pf, we normally skip vfs unless explicitly * requested to probe them. */ - if (num_vfs && extended_func_num(pdev) > probe_vf) { - mlx4_warn(dev, "Skipping virtual function:%d\n", - extended_func_num(pdev)); - err = -ENODEV; - goto err_free_dev; + if (total_vfs) { + unsigned vfs_offset = 0; + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && + vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset += nvfs[i], i++) + ; + if (i == sizeof(nvfs)/sizeof(nvfs[0])) { + err = -ENODEV; + goto err_free_dev; + } + if ((extended_func_num(pdev) - vfs_offset) + > prb_vf[i]) { + mlx4_warn(dev, "Skipping virtual function:%d\n", + extended_func_num(pdev)); + err = -ENODEV; + goto err_free_dev; + } } mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); dev->flags |= MLX4_FLAG_SLAVE; @@ -2220,18 +2448,30 @@ } } - if (num_vfs) { - mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); - err = pci_enable_sriov(pdev, num_vfs); - if (err) { - mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", - err); + if (total_vfs) { + mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", + total_vfs); + dev->dev_vfs = kzalloc( + total_vfs * sizeof(*dev->dev_vfs), + GFP_KERNEL); + if (NULL == dev->dev_vfs) { + mlx4_err(dev, "Failed to allocate memory for VFs\n"); err = 0; } else { - mlx4_warn(dev, "Running in master mode\n"); - dev->flags |= MLX4_FLAG_SRIOV | - MLX4_FLAG_MASTER; - dev->num_vfs = num_vfs; + atomic_inc(&pf_loading); + err = pci_enable_sriov(pdev, total_vfs); + if (err) { + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", + err); + atomic_dec(&pf_loading); + err = 0; + } else { + mlx4_warn(dev, "Running in master mode\n"); + dev->flags |= MLX4_FLAG_SRIOV | + MLX4_FLAG_MASTER; + dev->num_vfs = total_vfs; + sriov_initialized = 1; + } } } @@ -2287,15 +2527,45 @@ goto err_mfunc; } + /* check if the device is functioning at its maximum possible speed. + * No return code for this call, just warn the user in case of PCI + * express device capabilities are under-satisfied by the bus. + */ + if (!mlx4_is_slave(dev)) + mlx4_check_pcie_caps(dev); + /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ if (mlx4_is_master(dev)) { + unsigned sum = 0; err = mlx4_multi_func_init(dev); if (err) { mlx4_err(dev, "Failed to init master mfunc" "interface, aborting.\n"); goto err_close; } + if (sriov_initialized) { + int ib_ports = 0; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + ib_ports++; + + if (ib_ports && + (num_vfs_argc > 1 || probe_vfs_argc > 1)) { + mlx4_err(dev, + "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); + err = -EINVAL; + goto err_master_mfunc; + } + for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { + unsigned j; + for (j = 0; j < nvfs[i]; ++sum, ++j) { + dev->dev_vfs[sum].min_port = + i < 2 ? i + 1 : 1; + dev->dev_vfs[sum].n_ports = i < 2 ? 1 : + dev->caps.num_ports; + } + } + } } err = mlx4_alloc_eq_table(dev); @@ -2350,8 +2620,10 @@ mlx4_sense_init(dev); mlx4_start_sense(dev); - priv->pci_dev_data = pci_dev_data; - pci_set_drvdata(pdev, dev); + priv->removed = 0; + + if (mlx4_is_master(dev) && dev->num_vfs) + atomic_dec(&pf_loading); return 0; @@ -2403,6 +2675,11 @@ if (!mlx4_is_slave(dev)) mlx4_free_ownership(dev); + if (mlx4_is_master(dev) && dev->num_vfs) + atomic_dec(&pf_loading); + + kfree(priv->dev.dev_vfs); + err_free_dev: kfree(priv); @@ -2417,84 +2694,110 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { + struct mlx4_priv *priv; + struct mlx4_dev *dev; + printk_once(KERN_INFO "%s", mlx4_version); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev = &priv->dev; + pci_set_drvdata(pdev, dev); + priv->pci_dev_data = id->driver_data; + return __mlx4_init_one(pdev, id->driver_data); } -static void mlx4_remove_one(struct pci_dev *pdev) +static void __mlx4_remove_one(struct pci_dev *pdev) { struct mlx4_dev *dev = pci_get_drvdata(pdev); struct mlx4_priv *priv = mlx4_priv(dev); + int pci_dev_data; int p; - if (dev) { - /* in SRIOV it is not allowed to unload the pf's - * driver while there are alive vf's */ - if (mlx4_is_master(dev)) { - if (mlx4_how_many_lives_vf(dev)) - printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); - } - mlx4_stop_sense(dev); - mlx4_unregister_device(dev); + if (priv->removed) + return; - for (p = 1; p <= dev->caps.num_ports; p++) { - mlx4_cleanup_port_info(&priv->port[p]); - mlx4_CLOSE_PORT(dev, p); - } + pci_dev_data = priv->pci_dev_data; - if (mlx4_is_master(dev)) - mlx4_free_resource_tracker(dev, - RES_TR_FREE_SLAVES_ONLY); + /* in SRIOV it is not allowed to unload the pf's + * driver while there are alive vf's */ + if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) + printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); + mlx4_stop_sense(dev); + mlx4_unregister_device(dev); + + for (p = 1; p <= dev->caps.num_ports; p++) { + mlx4_cleanup_port_info(&priv->port[p]); + mlx4_CLOSE_PORT(dev, p); + } - mlx4_cleanup_counters_table(dev); - mlx4_cleanup_qp_table(dev); - mlx4_cleanup_srq_table(dev); - mlx4_cleanup_cq_table(dev); - mlx4_cmd_use_polling(dev); - mlx4_cleanup_eq_table(dev); - mlx4_cleanup_mcg_table(dev); - mlx4_cleanup_mr_table(dev); - mlx4_cleanup_xrcd_table(dev); - mlx4_cleanup_pd_table(dev); + if (mlx4_is_master(dev)) + mlx4_free_resource_tracker(dev, + RES_TR_FREE_SLAVES_ONLY); - if (mlx4_is_master(dev)) - mlx4_free_resource_tracker(dev, - RES_TR_FREE_STRUCTS_ONLY); + mlx4_cleanup_counters_table(dev); + mlx4_cleanup_qp_table(dev); + mlx4_cleanup_srq_table(dev); + mlx4_cleanup_cq_table(dev); + mlx4_cmd_use_polling(dev); + mlx4_cleanup_eq_table(dev); + mlx4_cleanup_mcg_table(dev); + mlx4_cleanup_mr_table(dev); + mlx4_cleanup_xrcd_table(dev); + mlx4_cleanup_pd_table(dev); - iounmap(priv->kar); - mlx4_uar_free(dev, &priv->driver_uar); - mlx4_cleanup_uar_table(dev); - if (!mlx4_is_slave(dev)) - mlx4_clear_steering(dev); - mlx4_free_eq_table(dev); - if (mlx4_is_master(dev)) - mlx4_multi_func_cleanup(dev); - mlx4_close_hca(dev); - if (mlx4_is_slave(dev)) - mlx4_multi_func_cleanup(dev); - mlx4_cmd_cleanup(dev); - - if (dev->flags & MLX4_FLAG_MSI_X) - pci_disable_msix(pdev); - if (dev->flags & MLX4_FLAG_SRIOV) { - mlx4_warn(dev, "Disabling SR-IOV\n"); - pci_disable_sriov(pdev); - } - - if (!mlx4_is_slave(dev)) - mlx4_free_ownership(dev); - - kfree(dev->caps.qp0_tunnel); - kfree(dev->caps.qp0_proxy); - kfree(dev->caps.qp1_tunnel); - kfree(dev->caps.qp1_proxy); - - kfree(priv); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + if (mlx4_is_master(dev)) + mlx4_free_resource_tracker(dev, + RES_TR_FREE_STRUCTS_ONLY); + + iounmap(priv->kar); + mlx4_uar_free(dev, &priv->driver_uar); + mlx4_cleanup_uar_table(dev); + if (!mlx4_is_slave(dev)) + mlx4_clear_steering(dev); + mlx4_free_eq_table(dev); + if (mlx4_is_master(dev)) + mlx4_multi_func_cleanup(dev); + mlx4_close_hca(dev); + if (mlx4_is_slave(dev)) + mlx4_multi_func_cleanup(dev); + mlx4_cmd_cleanup(dev); + + if (dev->flags & MLX4_FLAG_MSI_X) + pci_disable_msix(pdev); + if (dev->flags & MLX4_FLAG_SRIOV) { + mlx4_warn(dev, "Disabling SR-IOV\n"); + pci_disable_sriov(pdev); + dev->num_vfs = 0; } + + if (!mlx4_is_slave(dev)) + mlx4_free_ownership(dev); + + kfree(dev->caps.qp0_tunnel); + kfree(dev->caps.qp0_proxy); + kfree(dev->caps.qp1_tunnel); + kfree(dev->caps.qp1_proxy); + kfree(dev->dev_vfs); + + pci_release_regions(pdev); + pci_disable_device(pdev); + memset(priv, 0, sizeof(*priv)); + priv->pci_dev_data = pci_dev_data; + priv->removed = 1; +} + +static void mlx4_remove_one(struct pci_dev *pdev) +{ + struct mlx4_dev *dev = pci_get_drvdata(pdev); + struct mlx4_priv *priv = mlx4_priv(dev); + + __mlx4_remove_one(pdev); + kfree(priv); + pci_set_drvdata(pdev, NULL); } int mlx4_restart_one(struct pci_dev *pdev) @@ -2504,7 +2807,7 @@ int pci_dev_data; pci_dev_data = priv->pci_dev_data; - mlx4_remove_one(pdev); + __mlx4_remove_one(pdev); return __mlx4_init_one(pdev, pci_dev_data); } @@ -2559,7 +2862,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { - mlx4_remove_one(pdev); + __mlx4_remove_one(pdev); return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; @@ -2567,7 +2870,11 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) { - int ret = __mlx4_init_one(pdev, 0); + struct mlx4_dev *dev = pci_get_drvdata(pdev); + struct mlx4_priv *priv = mlx4_priv(dev); + int ret; + + ret = __mlx4_init_one(pdev, priv->pci_dev_data); return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } @@ -2581,6 +2888,7 @@ .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, + .shutdown = __mlx4_remove_one, .remove = mlx4_remove_one, .err_handler = &mlx4_err_handler, }; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -125,9 +125,14 @@ enum mlx4_steer_type steer, u32 qpn) { - struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1]; + struct mlx4_steer *s_steer; struct mlx4_promisc_qp *pqp; + if (port < 1 || port > dev->caps.num_ports) + return NULL; + + s_steer = &mlx4_priv(dev)->steer[port - 1]; + list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { if (pqp->qpn == qpn) return pqp; @@ -154,6 +159,9 @@ u32 prot; int err; + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + s_steer = &mlx4_priv(dev)->steer[port - 1]; new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); if (!new_entry) @@ -238,6 +246,9 @@ struct mlx4_promisc_qp *pqp; struct mlx4_promisc_qp *dqp; + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + s_steer = &mlx4_priv(dev)->steer[port - 1]; pqp = get_promisc_qp(dev, port, steer, qpn); @@ -283,6 +294,9 @@ struct mlx4_steer_index *tmp_entry, *entry = NULL; struct mlx4_promisc_qp *dqp, *tmp_dqp; + if (port < 1 || port > dev->caps.num_ports) + return NULL; + s_steer = &mlx4_priv(dev)->steer[port - 1]; /* if qp is not promisc, it cannot be duplicated */ @@ -324,6 +338,9 @@ bool ret = false; int i; + if (port < 1 || port > dev->caps.num_ports) + return NULL; + s_steer = &mlx4_priv(dev)->steer[port - 1]; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -378,6 +395,9 @@ int err; struct mlx4_priv *priv = mlx4_priv(dev); + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + s_steer = &mlx4_priv(dev)->steer[port - 1]; mutex_lock(&priv->mcg_table.mutex); @@ -484,6 +504,9 @@ int loc, i; int err; + if (port < 1 || port > dev->caps.num_ports) + return -EINVAL; + s_steer = &mlx4_priv(dev)->steer[port - 1]; mutex_lock(&priv->mcg_table.mutex); @@ -674,7 +697,8 @@ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, - [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 + [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006, + [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008 }; int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, @@ -699,7 +723,9 @@ [MLX4_NET_TRANS_RULE_ID_TCP] = sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), [MLX4_NET_TRANS_RULE_ID_UDP] = - sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_VXLAN] = + sizeof(struct mlx4_net_trans_rule_hw_vxlan) }; int mlx4_hw_rule_sz(struct mlx4_dev *dev, @@ -764,6 +790,13 @@ rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; break; + case MLX4_NET_TRANS_RULE_ID_VXLAN: + rule_hw->vxlan.vni = + cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8); + rule_hw->vxlan.vni_mask = + cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8); + break; + default: return -EINVAL; } @@ -895,6 +928,23 @@ } EXPORT_SYMBOL_GPL(mlx4_flow_detach); +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, + u32 max_range_qpn) +{ + int err; + u64 in_param; + + in_param = ((u64) min_range_qpn) << 32; + in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; + + err = mlx4_cmd(dev, in_param, 0, 0, + MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); + int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer) @@ -996,7 +1046,7 @@ index, dev->caps.num_mgms); else mlx4_bitmap_free(&priv->mcg_table.bitmap, - index - dev->caps.num_mgms); + index - dev->caps.num_mgms, MLX4_USE_RR); } mutex_unlock(&priv->mcg_table.mutex); @@ -1087,7 +1137,7 @@ index, amgm_index, dev->caps.num_mgms); else mlx4_bitmap_free(&priv->mcg_table.bitmap, - amgm_index - dev->caps.num_mgms); + amgm_index - dev->caps.num_mgms, MLX4_USE_RR); } } else { /* Remove entry from AMGM */ @@ -1107,7 +1157,7 @@ prev, index, dev->caps.num_mgms); else mlx4_bitmap_free(&priv->mcg_table.bitmap, - index - dev->caps.num_mgms); + index - dev->caps.num_mgms, MLX4_USE_RR); } out: @@ -1337,9 +1387,12 @@ struct mlx4_cmd_info *cmd) { u32 qpn = (u32) vhcr->in_param & 0xffffffff; - u8 port = vhcr->in_param >> 62; + int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62); enum mlx4_steer_type steer = vhcr->in_modifier; + if (port < 0) + return -EINVAL; + /* Promiscuous unicast is not allowed in mfunc */ if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) return 0; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -51,8 +51,8 @@ #define DRV_NAME "mlx4_core" #define PFX DRV_NAME ": " -#define DRV_VERSION "1.1" -#define DRV_RELDATE "Dec, 2011" +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb, 2014" #define MLX4_FS_UDP_UC_EN (1 << 1) #define MLX4_FS_TCP_UC_EN (1 << 2) @@ -282,6 +282,9 @@ #define MLX4_MPT_STATUS_SW 0xF0 #define MLX4_MPT_STATUS_HW 0x00 +#define MLX4_CQE_SIZE_MASK_STRIDE 0x3 +#define MLX4_EQE_SIZE_MASK_STRIDE 0x30 + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ @@ -695,6 +698,17 @@ int max; }; +#define MLX4_ROCE_GID_ENTRY_SIZE 16 + +struct mlx4_roce_gid_entry { + u8 raw[MLX4_ROCE_GID_ENTRY_SIZE]; +}; + +struct mlx4_roce_gid_table { + struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS]; + struct mutex mutex; +}; + #define MLX4_MAX_VLAN_NUM 128 #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) @@ -758,6 +772,7 @@ struct device_attribute port_mtu_attr; struct mlx4_mac_table mac_table; struct mlx4_vlan_table vlan_table; + struct mlx4_roce_gid_table gid_table; int base_qpn; }; @@ -783,6 +798,11 @@ MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, }; +enum { + MLX4_NO_RR = 0, + MLX4_USE_RR = 1, +}; + struct mlx4_priv { struct mlx4_dev dev; @@ -791,6 +811,7 @@ spinlock_t ctx_lock; int pci_dev_data; + int removed; struct list_head pgdir_list; struct mutex pgdir_mutex; @@ -844,9 +865,10 @@ extern struct workqueue_struct *mlx4_wq; u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); -void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); +void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); -void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); +void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, + int use_rr); u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved_bot, u32 resetrved_top); @@ -1128,6 +1150,8 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); +void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table); void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); @@ -1137,6 +1161,7 @@ enum mlx4_resource resource_type, u64 resource_id, int *slave); void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave); int mlx4_init_resource_tracker(struct mlx4_dev *dev); void mlx4_free_resource_tracker(struct mlx4_dev *dev, @@ -1183,6 +1208,12 @@ struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1236,6 +1267,11 @@ struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); @@ -1271,4 +1307,8 @@ void mlx4_init_quotas(struct mlx4_dev *dev); +int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); +/* Returns the VF index of slave */ +int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); + #endif /* MLX4_H */ --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -45,6 +45,7 @@ #include #endif #include +#include #include #include @@ -56,8 +57,8 @@ #include "en_port.h" #define DRV_NAME "mlx4_en" -#define DRV_VERSION "2.0" -#define DRV_RELDATE "Dec 2011" +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb 2014" #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) @@ -92,6 +93,8 @@ * OS related constants and tunables */ +#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1 + #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ) /* Use the maximum between 16384 and a single page */ @@ -118,6 +121,7 @@ #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) #define MLX4_EN_SMALL_PKT_SIZE 64 +#define MLX4_EN_MIN_TX_RING_P_UP 1 #define MLX4_EN_MAX_TX_RING_P_UP 32 #define MLX4_EN_NUM_UP 8 #define MLX4_EN_DEF_TX_RING_SIZE 512 @@ -186,6 +190,13 @@ #define GET_AVG_PERF_COUNTER(cnt) (0) #endif /* MLX4_EN_PERF_STAT */ +/* Constants for TX flow */ +enum { + MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ + MAX_BF = 256, + MIN_PKT_LEN = 17, +}; + /* * Configurables */ @@ -255,6 +266,8 @@ u16 poll_cnt; struct mlx4_en_tx_info *tx_info; u8 *bounce_buf; + u8 queue_index; + cpumask_t affinity_mask; u32 last_nr_txbb; struct mlx4_qp qp; struct mlx4_qp_context context; @@ -264,10 +277,14 @@ unsigned long bytes; unsigned long packets; unsigned long tx_csum; + unsigned long queue_stopped; + unsigned long wake_queue; struct mlx4_bf bf; bool bf_enabled; + bool bf_alloced; struct netdev_queue *tx_queue; int hwtstamp_tx_type; + int inline_thold; }; struct mlx4_en_rx_desc { @@ -300,13 +317,13 @@ unsigned long csum_ok; unsigned long csum_none; int hwtstamp_rx_filter; + cpumask_var_t affinity_mask; }; struct mlx4_en_cq { struct mlx4_cq mcq; struct mlx4_hwq_resources wqres; int ring; - spinlock_t lock; struct net_device *dev; struct napi_struct napi; int size; @@ -330,6 +347,7 @@ #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) spinlock_t poll_lock; /* protects from LLS/napi conflicts */ #endif /* CONFIG_NET_RX_BUSY_POLL */ + struct irq_desc *irq_desc; }; struct mlx4_en_port_profile { @@ -343,6 +361,7 @@ u8 tx_pause; u8 tx_ppp; int rss_rings; + int inline_thold; }; struct mlx4_en_profile { @@ -373,10 +392,14 @@ u32 priv_pdn; spinlock_t uar_lock; u8 mac_removed[MLX4_MAX_PORTS + 1]; + rwlock_t clock_lock; + u32 nominal_c_mult; struct cyclecounter cycles; struct timecounter clock; unsigned long last_overflow_check; unsigned long overflow_period; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; }; @@ -388,10 +411,16 @@ enum mlx4_qp_state indir_state; }; +enum mlx4_en_port_flag { + MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */ + MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */ +}; + struct mlx4_en_port_state { int link_state; int link_speed; - int transciver; + int transceiver; + u32 flags; }; struct mlx4_en_pkt_stats { @@ -434,6 +463,7 @@ enum mlx4_en_mclist_act action; u8 addr[ETH_ALEN]; u64 reg_id; + u64 tunnel_reg_id; }; struct mlx4_en_frag_info { @@ -513,11 +543,12 @@ int registered; int allocated; int stride; - unsigned char prev_mac[ETH_ALEN + 2]; + unsigned char current_mac[ETH_ALEN + 2]; int mac_index; unsigned max_mtu; int base_qpn; int cqe_factor; + int cqe_size; struct mlx4_en_rss_map rss_map; __be32 ctrl_flags; @@ -540,6 +571,10 @@ struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; +#ifdef CONFIG_MLX4_EN_VXLAN + struct work_struct vxlan_add_task; + struct work_struct vxlan_del_task; +#endif struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_port_stats port_stats; @@ -565,7 +600,10 @@ struct list_head filters; struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT]; #endif + u64 tunnel_reg_id; + __be16 vxlan_port; + u32 pflags; }; enum mlx4_en_wol { @@ -580,6 +618,11 @@ struct rcu_head rcu; }; +static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) +{ + return buf + idx * cqe_sz; +} + #ifdef CONFIG_NET_RX_BUSY_POLL static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) { @@ -653,7 +696,7 @@ } /* true if a socket is polling, even if it did not get the lock */ -static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) { WARN_ON(!(cq->state & MLX4_CQ_LOCKED)); return cq->state & CQ_USER_PEND; @@ -683,7 +726,7 @@ return false; } -static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) +static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq) { return false; } @@ -715,12 +758,13 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv); + void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, - int qpn, u32 size, u16 stride, int node); + int qpn, u32 size, u16 stride, + int node, int queue_index); void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring); int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, @@ -728,7 +772,7 @@ int cq, int user_prio); void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); - +void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride, int node); @@ -742,6 +786,7 @@ struct mlx4_en_cq *cq, int budget); int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); +int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget); void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context); @@ -776,9 +821,15 @@ #define MLX4_EN_NUM_SELF_TEST 5 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); -u64 mlx4_en_mac_to_u64(u8 *addr); void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); +#define DEV_FEATURE_CHANGED(dev, new_features, feature) \ + ((dev->features & feature) ^ (new_features & feature)) + +int mlx4_en_reset_config(struct net_device *dev, + struct hwtstamp_config ts_config, + netdev_features_t new_features); + /* * Functions for time stamping */ @@ -787,9 +838,7 @@ struct skb_shared_hwtstamps *hwts, u64 timestamp); void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev); -int mlx4_en_timestamp_config(struct net_device *dev, - int tx_type, - int rx_filter); +void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev); /* Globals */ --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/mr.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -346,7 +346,7 @@ { struct mlx4_priv *priv = mlx4_priv(dev); - mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR); } static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) @@ -991,7 +991,7 @@ int mlx4_SYNC_TPT(struct mlx4_dev *dev) { - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, - MLX4_CMD_NATIVE); + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); } EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT); --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/pd.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -59,7 +59,7 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn) { - mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn); + mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR); } EXPORT_SYMBOL_GPL(mlx4_pd_free); @@ -96,7 +96,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) { - mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn); + mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR); } void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) @@ -164,7 +164,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar) { - mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index); + mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR); } EXPORT_SYMBOL_GPL(mlx4_uar_free); --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/port.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/port.c @@ -75,6 +75,16 @@ table->total = 0; } +void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) + memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE); +} + static int validate_index(struct mlx4_dev *dev, struct mlx4_mac_table *table, int index) { @@ -123,6 +133,26 @@ return err; } +int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int i; + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!table->refs[i]) + continue; + + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + *idx = i; + return 0; + } + } + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); + int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; @@ -485,6 +515,162 @@ mlx4_free_cmd_mailbox(dev, outmailbox); return err; } +static struct mlx4_roce_gid_entry zgid_entry; + +int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) +{ + int vfs; + int slave_gid = slave; + unsigned i; + struct mlx4_slaves_pport slaves_pport; + struct mlx4_active_ports actv_ports; + unsigned max_port_p_one; + + if (slave == 0) + return MLX4_ROCE_PF_GIDS; + + /* Slave is a VF */ + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + actv_ports = mlx4_get_active_ports(dev, slave); + max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_slaves_pport slaves_pport_actv; + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + if (i == port) + continue; + slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid -= bitmap_weight(slaves_pport_actv.slaves, + dev->num_vfs + 1); + } + vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) + return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; + return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; +} + +int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) +{ + int gids; + unsigned i; + int slave_gid = slave; + int vfs; + + struct mlx4_slaves_pport slaves_pport; + struct mlx4_active_ports actv_ports; + unsigned max_port_p_one; + + if (slave == 0) + return 0; + + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + actv_ports = mlx4_get_active_ports(dev, slave); + max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + + for (i = 1; i < max_port_p_one; i++) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_slaves_pport slaves_pport_actv; + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + if (i == port) + continue; + slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid -= bitmap_weight(slaves_pport_actv.slaves, + dev->num_vfs + 1); + } + gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; + vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + if (slave_gid <= gids % vfs) + return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); + + return MLX4_ROCE_PF_GIDS + (gids % vfs) + + ((gids / vfs) * (slave_gid - 1)); +} +EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix); + +static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, + int port, struct mlx4_cmd_mailbox *mailbox) +{ + struct mlx4_roce_gid_entry *gid_entry_mbox; + struct mlx4_priv *priv = mlx4_priv(dev); + int num_gids, base, offset; + int i, err; + + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + + mutex_lock(&(priv->port[port].gid_table.mutex)); + /* Zero-out gids belonging to that slave in the port GID table */ + for (i = 0, offset = base; i < num_gids; offset++, i++) + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE); + + /* Now, copy roce port gids table to mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf; + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, mailbox->dma, + ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; +} + + +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + struct mlx4_cmd_mailbox *mailbox; + int num_eth_ports, err; + int i; + + if (slave < 0 || slave > dev->num_vfs) + return; + + actv_ports = mlx4_get_active_ports(dev, slave); + + for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + num_eth_ports++; + } + } + + if (!num_eth_ports) + return; + + /* have ETH ports. Alloc mailbox for SET_PORT command */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return; + + for (i = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox); + if (err) + mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n", + slave, i + 1, err); + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return; +} static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) @@ -495,14 +681,18 @@ struct mlx4_slave_state *slave_st = &master->slave_state[slave]; struct mlx4_set_port_rqp_calc_context *qpn_context; struct mlx4_set_port_general_context *gen_context; + struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1; int reset_qkey_viols; int port; int is_eth; + int num_gids; + int base; u32 in_modifier; u32 promisc; u16 mtu, prev_mtu; int err; - int i; + int i, j; + int offset; __be32 agg_cap_mask; __be32 slave_cap_mask; __be32 new_cap_mask; @@ -515,7 +705,8 @@ /* Slaves cannot perform SET_PORT operations except changing MTU */ if (is_eth) { if (slave != dev->caps.function && - in_modifier != MLX4_SET_PORT_GENERAL) { + in_modifier != MLX4_SET_PORT_GENERAL && + in_modifier != MLX4_SET_PORT_GID_TABLE) { mlx4_warn(dev, "denying SET_PORT for slave:%d\n", slave); return -EINVAL; @@ -561,8 +752,80 @@ gen_context->mtu = cpu_to_be16(master->max_mtu[port]); break; + case MLX4_SET_PORT_GID_TABLE: + /* change to MULTIPLE entries: number of guest's gids + * need a FOR-loop here over number of gids the guest has. + * 1. Check no duplicates in gids passed by slave + */ + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0; i < num_gids; gid_entry_mbox++, i++) { + if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, + sizeof(zgid_entry))) + continue; + gid_entry_mb1 = gid_entry_mbox + 1; + for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) { + if (!memcmp(gid_entry_mb1->raw, + zgid_entry.raw, sizeof(zgid_entry))) + continue; + if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw, + sizeof(gid_entry_mbox->raw))) { + /* found duplicate */ + return -EINVAL; + } + } + } + + /* 2. Check that do not have duplicates in OTHER + * entries in the port GID table + */ + + mutex_lock(&(priv->port[port].gid_table.mutex)); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { + if (i >= base && i < base + num_gids) + continue; /* don't compare to slave's current gids */ + gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i]; + if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) + continue; + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (j = 0; j < num_gids; gid_entry_mbox++, j++) { + if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw, + sizeof(zgid_entry))) + continue; + if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, + sizeof(gid_entry_tbl->raw))) { + /* found duplicate */ + mlx4_warn(dev, "requested gid entry for slave:%d " + "is a duplicate of gid at index %d\n", + slave, i); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return -EINVAL; + } + } + } + + /* insert slave GIDs with memcpy, starting at slave's base index */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE); + + /* Now, copy roce port gids table to current mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; } - return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, + + return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } @@ -626,6 +889,15 @@ struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { + int port = mlx4_slave_convert_port( + dev, slave, vhcr->in_modifier & 0xFF); + + if (port < 0) + return -EINVAL; + + vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) | + (port & 0xFF); + return mlx4_common_set_port(dev, slave, vhcr->in_modifier, vhcr->op_modifier, inbox); } @@ -800,6 +1072,48 @@ } EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); +enum { + VXLAN_ENABLE_MODIFY = 1 << 7, + VXLAN_STEERING_MODIFY = 1 << 6, + + VXLAN_ENABLE = 1 << 7, +}; + +struct mlx4_set_port_vxlan_context { + u32 reserved1; + u8 modify_flags; + u8 reserved2; + u8 enable_flags; + u8 steering; +}; + +int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable) +{ + int err; + u32 in_mod; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_vxlan_context *context; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof(*context)); + + context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY; + if (enable) + context->enable_flags = VXLAN_ENABLE; + context->steering = steering; + + in_mod = MLX4_SET_PORT_VXLAN << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN); + int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -866,3 +1180,271 @@ *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; } EXPORT_SYMBOL(mlx4_set_stats_bitmap); + +int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, + int *slave_id) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + int i, found_ix = -1; + int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; + struct mlx4_slaves_pport slaves_pport; + unsigned num_vfs; + int slave_gid; + + if (!mlx4_is_mfunc(dev)) + return -EINVAL; + + slaves_pport = mlx4_phys_to_slaves_pport(dev, port); + num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; + + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { + if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, + MLX4_ROCE_GID_ENTRY_SIZE)) { + found_ix = i; + break; + } + } + + if (found_ix >= 0) { + /* Calculate a slave_gid which is the slave number in the gid + * table and not a globally unique slave number. + */ + if (found_ix < MLX4_ROCE_PF_GIDS) + slave_gid = 0; + else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * + (vf_gids / num_vfs + 1)) + slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) / + (vf_gids / num_vfs + 1)) + 1; + else + slave_gid = + ((found_ix - MLX4_ROCE_PF_GIDS - + ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / + (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; + + /* Calculate the globally unique slave id */ + if (slave_gid) { + struct mlx4_active_ports exclusive_ports; + struct mlx4_active_ports actv_ports; + struct mlx4_slaves_pport slaves_pport_actv; + unsigned max_port_p_one; + int num_vfs_before = 0; + int candidate_slave_gid; + + /* Calculate how many VFs are on the previous port, if exists */ + for (i = 1; i < port; i++) { + bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + num_vfs_before += bitmap_weight( + slaves_pport_actv.slaves, + dev->num_vfs + 1); + } + + /* candidate_slave_gid isn't necessarily the correct slave, but + * it has the same number of ports and is assigned to the same + * ports as the real slave we're looking for. On dual port VF, + * slave_gid = [single port VFs on port ] + + * [offset of the current slave from the first dual port VF] + + * 1 (for the PF). + */ + candidate_slave_gid = slave_gid + num_vfs_before; + + actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid); + max_port_p_one = find_first_bit( + actv_ports.ports, dev->caps.num_ports) + + bitmap_weight(actv_ports.ports, + dev->caps.num_ports) + 1; + + /* Calculate the real slave number */ + for (i = 1; i < max_port_p_one; i++) { + if (i == port) + continue; + bitmap_zero(exclusive_ports.ports, + dev->caps.num_ports); + set_bit(i - 1, exclusive_ports.ports); + slaves_pport_actv = + mlx4_phys_to_slaves_pport_actv( + dev, &exclusive_ports); + slave_gid += bitmap_weight( + slaves_pport_actv.slaves, + dev->num_vfs + 1); + } + } + *slave_id = slave_gid; + } + + return (found_ix >= 0) ? 0 : -EINVAL; +} +EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid); + +int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, + u8 *gid) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + if (!mlx4_is_master(dev)) + return -EINVAL; + + memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + return 0; +} +EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); + +/* Cable Module Info */ +#define MODULE_INFO_MAX_READ 48 + +#define I2C_ADDR_LOW 0x50 +#define I2C_ADDR_HIGH 0x51 +#define I2C_PAGE_SIZE 256 + +/* Module Info Data */ +struct mlx4_cable_info { + u8 i2c_addr; + u8 page_num; + __be16 dev_mem_address; + __be16 reserved1; + __be16 size; + __be32 reserved2[2]; + u8 data[MODULE_INFO_MAX_READ]; +}; + +enum cable_info_err { + CABLE_INF_INV_PORT = 0x1, + CABLE_INF_OP_NOSUP = 0x2, + CABLE_INF_NOT_CONN = 0x3, + CABLE_INF_NO_EEPRM = 0x4, + CABLE_INF_PAGE_ERR = 0x5, + CABLE_INF_INV_ADDR = 0x6, + CABLE_INF_I2C_ADDR = 0x7, + CABLE_INF_QSFP_VIO = 0x8, + CABLE_INF_I2C_BUSY = 0x9, +}; + +#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF) + +static inline const char *cable_info_mad_err_str(u16 mad_status) +{ + u8 err = MAD_STATUS_2_CABLE_ERR(mad_status); + + switch (err) { + case CABLE_INF_INV_PORT: + return "invalid port selected"; + case CABLE_INF_OP_NOSUP: + return "operation not supported for this port (the port is of type CX4 or internal)"; + case CABLE_INF_NOT_CONN: + return "cable is not connected"; + case CABLE_INF_NO_EEPRM: + return "the connected cable has no EPROM (passive copper cable)"; + case CABLE_INF_PAGE_ERR: + return "page number is greater than 15"; + case CABLE_INF_INV_ADDR: + return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)"; + case CABLE_INF_I2C_ADDR: + return "invalid I2C slave address"; + case CABLE_INF_QSFP_VIO: + return "at least one cable violates the QSFP specification and ignores the modsel signal"; + case CABLE_INF_I2C_BUSY: + return "I2C bus is constantly busy"; + } + return "Unknown Error"; +} + +/** + * mlx4_get_module_info - Read cable module eeprom data + * @dev: mlx4_dev. + * @port: port number. + * @offset: byte offset in eeprom to start reading data from. + * @size: num of bytes to read. + * @data: output buffer to put the requested data into. + * + * Reads cable module eeprom data, puts the outcome data into + * data pointer paramer. + * Returns num of read bytes on success or a negative error + * code. + */ +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data) +{ + struct mlx4_cmd_mailbox *inbox, *outbox; + struct mlx4_mad_ifc *inmad, *outmad; + struct mlx4_cable_info *cable_info; + u16 i2c_addr; + int ret; + + if (size > MODULE_INFO_MAX_READ) + size = MODULE_INFO_MAX_READ; + + inbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(inbox)) + return PTR_ERR(inbox); + + outbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(outbox)) { + mlx4_free_cmd_mailbox(dev, inbox); + return PTR_ERR(outbox); + } + + inmad = (struct mlx4_mad_ifc *)(inbox->buf); + outmad = (struct mlx4_mad_ifc *)(outbox->buf); + + inmad->method = 0x1; /* Get */ + inmad->class_version = 0x1; + inmad->mgmt_class = 0x1; + inmad->base_version = 0x1; + inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */ + + if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE) + /* Cross pages reads are not allowed + * read until offset 256 in low page + */ + size -= offset + size - I2C_PAGE_SIZE; + + i2c_addr = I2C_ADDR_LOW; + if (offset >= I2C_PAGE_SIZE) { + /* Reset offset to high page */ + i2c_addr = I2C_ADDR_HIGH; + offset -= I2C_PAGE_SIZE; + } + + cable_info = (struct mlx4_cable_info *)inmad->data; + cable_info->dev_mem_address = cpu_to_be16(offset); + cable_info->page_num = 0; + cable_info->i2c_addr = i2c_addr; + cable_info->size = cpu_to_be16(size); + + ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3, + MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, + MLX4_CMD_NATIVE); + if (ret) + goto out; + + if (be16_to_cpu(outmad->status)) { + /* Mad returned with bad status */ + ret = be16_to_cpu(outmad->status); + mlx4_warn(dev, + "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n", + 0xFF60, port, i2c_addr, offset, size, + ret, cable_info_mad_err_str(ret)); + + if (i2c_addr == I2C_ADDR_HIGH && + MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR) + /* Some SFP cables do not support i2c slave + * address 0x51 (high page), abort silently. + */ + ret = 0; + else + ret = -ret; + goto out; + } + cable_info = (struct mlx4_cable_info *)outmad->data; + memcpy(data, cable_info->data, size); + ret = size; +out: + mlx4_free_cmd_mailbox(dev, inbox); + mlx4_free_cmd_mailbox(dev, outbox); + return ret; +} +EXPORT_SYMBOL(mlx4_get_module_info); --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/qp.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -250,7 +250,7 @@ if (mlx4_is_qp_reserved(dev, (u32) base_qpn)) return; - mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); + mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, MLX4_USE_RR); } void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) @@ -390,6 +390,41 @@ EXPORT_SYMBOL_GPL(mlx4_qp_alloc); +#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC +int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *cmd; + u64 pri_addr_path_mask = 0; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cmd = (struct mlx4_update_qp_context *)mailbox->buf; + + if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) + return -EINVAL; + + if (attr & MLX4_UPDATE_QP_SMAC) { + pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; + cmd->qp_context.pri_path.grh_mylmc = params->smac_index; + } + + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + + err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_update_qp); + void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -52,6 +52,8 @@ struct mac_res { struct list_head list; u64 mac; + int ref_count; + u8 smac_index; u8 port; }; @@ -219,6 +221,11 @@ int qpn; }; +static int mlx4_is_eth(struct mlx4_dev *dev, int port) +{ + return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1; +} + static void *res_tracker_lookup(struct rb_root *root, u64 res_id) { struct rb_node *node = root->rb_node; @@ -461,6 +468,8 @@ spin_lock_init(&res_alloc->alloc_lock); for (t = 0; t < dev->num_vfs + 1; t++) { + struct mlx4_active_ports actv_ports = + mlx4_get_active_ports(dev, t); switch (i) { case RES_QP: initialize_res_quotas(dev, res_alloc, RES_QP, @@ -490,10 +499,27 @@ break; case RES_MAC: if (t == mlx4_master_func_num(dev)) { - res_alloc->quota[t] = MLX4_MAX_MAC_NUM; + int max_vfs_pport = 0; + /* Calculate the max vfs per port for */ + /* both ports. */ + for (j = 0; j < dev->caps.num_ports; + j++) { + struct mlx4_slaves_pport slaves_pport = + mlx4_phys_to_slaves_pport(dev, j + 1); + unsigned current_slaves = + bitmap_weight(slaves_pport.slaves, + dev->caps.num_ports) - 1; + if (max_vfs_pport < current_slaves) + max_vfs_pport = + current_slaves; + } + res_alloc->quota[t] = + MLX4_MAX_MAC_NUM - + 2 * max_vfs_pport; res_alloc->guaranteed[t] = 2; for (j = 0; j < MLX4_MAX_PORTS; j++) - res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM; + res_alloc->res_port_free[j] = + MLX4_MAX_MAC_NUM; } else { res_alloc->quota[t] = MLX4_MAX_MAC_NUM; res_alloc->guaranteed[t] = 2; @@ -521,9 +547,10 @@ break; } if (i == RES_MAC || i == RES_VLAN) { - for (j = 0; j < MLX4_MAX_PORTS; j++) - res_alloc->res_port_rsvd[j] += - res_alloc->guaranteed[t]; + for (j = 0; j < dev->caps.num_ports; j++) + if (test_bit(j, actv_ports.ports)) + res_alloc->res_port_rsvd[j] += + res_alloc->guaranteed[t]; } else { res_alloc->res_reserved += res_alloc->guaranteed[t]; } @@ -559,6 +586,7 @@ } /* free master's vlans */ i = dev->caps.function; + mlx4_reset_roce_gids(dev, i); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); rem_slave_vlans(dev, i); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); @@ -600,15 +628,37 @@ struct mlx4_qp_context *qp_ctx = inbox->buf + 8; enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; + int port; - if (MLX4_QP_ST_UD == ts) - qp_ctx->pri_path.mgid_index = 0x80 | slave; + if (MLX4_QP_ST_UD == ts) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) + qp_ctx->pri_path.mgid_index = + mlx4_get_base_gid_ix(dev, slave, port) | 0x80; + else + qp_ctx->pri_path.mgid_index = slave | 0x80; - if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) { - if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) - qp_ctx->pri_path.mgid_index = slave & 0x7F; - if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) - qp_ctx->alt_path.mgid_index = slave & 0x7F; + } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) { + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->pri_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->pri_path.mgid_index &= 0x7f; + } else { + qp_ctx->pri_path.mgid_index = slave & 0x7F; + } + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port)) { + qp_ctx->alt_path.mgid_index += + mlx4_get_base_gid_ix(dev, slave, port); + qp_ctx->alt_path.mgid_index &= 0x7f; + } else { + qp_ctx->alt_path.mgid_index = slave & 0x7F; + } + } } } @@ -619,7 +669,6 @@ struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; - u32 qp_type; int port; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; @@ -627,12 +676,6 @@ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (MLX4_VGT != vp_oper->state.default_vlan) { - qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; - if (MLX4_QP_ST_RC == qp_type || - (MLX4_QP_ST_UD == qp_type && - !mlx4_is_qp_reserved(dev, qpn))) - return -EINVAL; - /* the reserved QPs (special, proxy, tunnel) * do not operate over vlans */ @@ -1340,43 +1383,29 @@ spin_lock_irq(mlx4_tlock(dev)); r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); - if (!r) + if (!r) { err = -ENOENT; - else if (r->com.owner != slave) + } else if (r->com.owner != slave) { err = -EPERM; - else { - switch (state) { - case RES_CQ_BUSY: - err = -EBUSY; - break; - - case RES_CQ_ALLOCATED: - if (r->com.state != RES_CQ_HW) - err = -EINVAL; - else if (atomic_read(&r->ref_count)) - err = -EBUSY; - else - err = 0; - break; - - case RES_CQ_HW: - if (r->com.state != RES_CQ_ALLOCATED) - err = -EINVAL; - else - err = 0; - break; - - default: + } else if (state == RES_CQ_ALLOCATED) { + if (r->com.state != RES_CQ_HW) err = -EINVAL; - } + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + else + err = 0; + } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) { + err = -EINVAL; + } else { + err = 0; + } - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_CQ_BUSY; - if (cq) - *cq = r; - } + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_CQ_BUSY; + if (cq) + *cq = r; } spin_unlock_irq(mlx4_tlock(dev)); @@ -1385,7 +1414,7 @@ } static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, - enum res_cq_states state, struct res_srq **srq) + enum res_srq_states state, struct res_srq **srq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; @@ -1394,39 +1423,25 @@ spin_lock_irq(mlx4_tlock(dev)); r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); - if (!r) + if (!r) { err = -ENOENT; - else if (r->com.owner != slave) + } else if (r->com.owner != slave) { err = -EPERM; - else { - switch (state) { - case RES_SRQ_BUSY: - err = -EINVAL; - break; - - case RES_SRQ_ALLOCATED: - if (r->com.state != RES_SRQ_HW) - err = -EINVAL; - else if (atomic_read(&r->ref_count)) - err = -EBUSY; - break; - - case RES_SRQ_HW: - if (r->com.state != RES_SRQ_ALLOCATED) - err = -EINVAL; - break; - - default: + } else if (state == RES_SRQ_ALLOCATED) { + if (r->com.state != RES_SRQ_HW) err = -EINVAL; - } + else if (atomic_read(&r->ref_count)) + err = -EBUSY; + } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) { + err = -EINVAL; + } - if (!err) { - r->com.from_state = r->com.state; - r->com.to_state = state; - r->com.state = RES_SRQ_BUSY; - if (srq) - *srq = r; - } + if (!err) { + r->com.from_state = r->com.state; + r->com.to_state = state; + r->com.state = RES_SRQ_BUSY; + if (srq) + *srq = r; } spin_unlock_irq(mlx4_tlock(dev)); @@ -1484,7 +1499,7 @@ switch (op) { case RES_OP_RESERVE: - count = get_param_l(&in_param); + count = get_param_l(&in_param) & 0xffffff; align = get_param_h(&in_param); err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); if (err) @@ -1687,11 +1702,39 @@ return err; } -static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) +static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port, + u8 smac_index, u64 *mac) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct mac_res *res; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->smac_index == smac_index && res->port == (u8) port) { + *mac = res->mac; + return 0; + } + } + return -ENOENT; +} + +static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *mac_list = + &tracker->slave_list[slave].res_list[RES_MAC]; + struct mac_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, mac_list, list) { + if (res->mac == mac && res->port == (u8) port) { + /* mac found. update ref count */ + ++res->ref_count; + return 0; + } + } if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) return -EINVAL; @@ -1702,6 +1745,8 @@ } res->mac = mac; res->port = (u8) port; + res->smac_index = smac_index; + res->ref_count = 1; list_add_tail(&res->list, &tracker->slave_list[slave].res_list[RES_MAC]); return 0; @@ -1718,9 +1763,11 @@ list_for_each_entry_safe(res, tmp, mac_list, list) { if (res->mac == mac && res->port == (u8) port) { - list_del(&res->list); - mlx4_release_resource(dev, slave, RES_MAC, 1, port); - kfree(res); + if (!--res->ref_count) { + list_del(&res->list); + mlx4_release_resource(dev, slave, RES_MAC, 1, port); + kfree(res); + } break; } } @@ -1733,10 +1780,13 @@ struct list_head *mac_list = &tracker->slave_list[slave].res_list[RES_MAC]; struct mac_res *res, *tmp; + int i; list_for_each_entry_safe(res, tmp, mac_list, list) { list_del(&res->list); - __mlx4_unregister_mac(dev, res->port, res->mac); + /* dereference the mac the num times the slave referenced it */ + for (i = 0; i < res->ref_count; i++) + __mlx4_unregister_mac(dev, res->port, res->mac); mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); kfree(res); } @@ -1748,21 +1798,28 @@ int err = -EINVAL; int port; u64 mac; + u8 smac_index; if (op != RES_OP_RESERVE_AND_MAP) return err; port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; mac = in_param; err = __mlx4_register_mac(dev, port, mac); if (err >= 0) { + smac_index = err; set_param_l(out_param, err); err = 0; } if (!err) { - err = mac_add_to_slave(dev, slave, mac, port); + err = mac_add_to_slave(dev, slave, mac, port, smac_index); if (err) __mlx4_unregister_mac(dev, port, mac); } @@ -1859,6 +1916,11 @@ if (!port || op != RES_OP_RESERVE_AND_MAP) return -EINVAL; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ if (!in_port && port > 0 && port <= dev->caps.num_ports) { slave_state[slave].old_vlan_api = true; @@ -2156,6 +2218,11 @@ switch (op) { case RES_OP_RESERVE_AND_MAP: port = !in_port ? get_param_l(out_param) : in_port; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; mac_del_from_slave(dev, slave, in_param, port); __mlx4_unregister_mac(dev, port, in_param); break; @@ -2175,6 +2242,11 @@ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; int err = 0; + port = mlx4_slave_convert_port( + dev, slave, port); + + if (port < 0) + return -EINVAL; switch (op) { case RES_OP_RESERVE_AND_MAP: if (slave_state[slave].old_vlan_api) @@ -2762,6 +2834,8 @@ u32 qp_type; struct mlx4_qp_context *qp_ctx; enum mlx4_qp_optpar optpar; + int port; + int num_gids; qp_ctx = inbox->buf + 8; qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; @@ -2769,6 +2843,7 @@ switch (qp_type) { case MLX4_QP_ST_RC: + case MLX4_QP_ST_XRC: case MLX4_QP_ST_UC: switch (transition) { case QP_TRANS_INIT2RTR: @@ -2777,13 +2852,24 @@ case QP_TRANS_SQD2SQD: case QP_TRANS_SQD2RTS: if (slave != mlx4_master_func_num(dev)) - /* slaves have only gid index 0 */ - if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) - if (qp_ctx->pri_path.mgid_index) + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { + port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->pri_path.mgid_index >= num_gids) return -EINVAL; - if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) - if (qp_ctx->alt_path.mgid_index) + } + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1; + if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + else + num_gids = 1; + if (qp_ctx->alt_path.mgid_index >= num_gids) return -EINVAL; + } break; default: break; @@ -3296,6 +3382,58 @@ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); } +static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf); + u8 pri_sched_queue; + int port = mlx4_slave_convert_port( + dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1; + + if (port < 0) + return -EINVAL; + + pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) | + ((port & 1) << 6); + + if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH || + mlx4_is_eth(dev, port + 1)) { + qpc->pri_path.sched_queue = pri_sched_queue; + } + + if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { + port = mlx4_slave_convert_port( + dev, slave, (qpc->alt_path.sched_queue >> 6 & 1) + + 1) - 1; + if (port < 0) + return -EINVAL; + qpc->alt_path.sched_queue = + (qpc->alt_path.sched_queue & ~(1 << 6)) | + (port & 1) << 6; + } + return 0; +} + +static int roce_verify_mac(struct mlx4_dev *dev, int slave, + struct mlx4_qp_context *qpc, + struct mlx4_cmd_mailbox *inbox) +{ + u64 mac; + int port; + u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + u8 sched = *(u8 *)(inbox->buf + 64); + u8 smac_ix; + + port = (sched >> 6 & 1) + 1; + if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) { + smac_ix = qpc->pri_path.grh_mylmc & 0x7f; + if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac)) + return -ENOENT; + } + return 0; +} + int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -3314,10 +3452,16 @@ u8 orig_vlan_index = qpc->pri_path.vlan_index; u8 orig_feup = qpc->pri_path.feup; + err = adjust_qp_sched_queue(dev, slave, qpc, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); if (err) return err; + if (roce_verify_mac(dev, slave, qpc, inbox)) + return -EINVAL; + update_pkey_index(dev, slave, inbox); update_gid(dev, inbox, (u8)slave); adjust_proxy_tun_qkey(dev, vhcr, qpc); @@ -3362,6 +3506,9 @@ int err; struct mlx4_qp_context *context = inbox->buf + 8; + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); if (err) return err; @@ -3381,6 +3528,9 @@ int err; struct mlx4_qp_context *context = inbox->buf + 8; + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); if (err) return err; @@ -3399,6 +3549,9 @@ struct mlx4_cmd_info *cmd) { struct mlx4_qp_context *context = inbox->buf + 8; + int err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; adjust_proxy_tun_qkey(dev, vhcr, context); return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); } @@ -3412,6 +3565,9 @@ int err; struct mlx4_qp_context *context = inbox->buf + 8; + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); if (err) return err; @@ -3431,6 +3587,9 @@ int err; struct mlx4_qp_context *context = inbox->buf + 8; + err = adjust_qp_sched_queue(dev, slave, context, inbox); + if (err) + return err; err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); if (err) return err; @@ -3534,16 +3693,26 @@ return err; } -static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - int block_loopback, enum mlx4_protocol prot, +static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, + u8 gid[16], int block_loopback, enum mlx4_protocol prot, enum mlx4_steer_type type, u64 *reg_id) { switch (dev->caps.steering_mode) { - case MLX4_STEERING_MODE_DEVICE_MANAGED: - return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5], + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, block_loopback, prot, reg_id); + } case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) { + int port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (port < 0) + return port; + gid[5] = port; + } return mlx4_qp_attach_common(dev, qp, gid, block_loopback, prot, type); default: @@ -3551,9 +3720,9 @@ } } -static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - enum mlx4_protocol prot, enum mlx4_steer_type type, - u64 reg_id) +static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, + u8 gid[16], enum mlx4_protocol prot, + enum mlx4_steer_type type, u64 reg_id) { switch (dev->caps.steering_mode) { case MLX4_STEERING_MODE_DEVICE_MANAGED: @@ -3565,6 +3734,25 @@ } } +static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, + u8 *gid, enum mlx4_protocol prot) +{ + int real_port; + + if (prot != MLX4_PROT_ETH) + return 0; + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || + dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + real_port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (real_port < 0) + return -EINVAL; + gid[5] = real_port; + } + + return 0; +} + int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -3590,7 +3778,7 @@ qp.qpn = qpn; if (attach) { - err = qp_attach(dev, &qp, gid, block_loopback, prot, + err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, type, ®_id); if (err) { pr_err("Fail to attach rule to qp 0x%x\n", qpn); @@ -3600,6 +3788,10 @@ if (err) goto ex_detach; } else { + err = mlx4_adjust_port(dev, slave, gid, prot); + if (err) + goto ex_put; + err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); if (err) goto ex_put; @@ -3634,7 +3826,7 @@ !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { list_for_each_entry_safe(res, tmp, rlist, list) { be_mac = cpu_to_be64(res->mac << 16); - if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN)) + if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac)) return 0; } pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", @@ -3704,6 +3896,60 @@ } +#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd_info) +{ + int err; + u32 qpn = vhcr->in_modifier & 0xffffff; + struct res_qp *rqp; + u64 mac; + unsigned port; + u64 pri_addr_path_mask; + struct mlx4_update_qp_context *cmd; + int smac_index; + + cmd = (struct mlx4_update_qp_context *)inbox->buf; + + pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); + if (cmd->qp_mask || cmd->secondary_addr_path_mask || + (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) + return -EPERM; + + /* Just change the smac for the QP */ + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); + return err; + } + + port = (rqp->sched_queue >> 6 & 1) + 1; + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } + + err = mlx4_cmd(dev, inbox->dma, + vhcr->in_modifier, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); + goto err_mac; + } + +err_mac: + put_res(dev, slave, qpn, RES_QP); + return err; +} + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -3726,6 +3972,9 @@ return -EOPNOTSUPP; ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); + if (ctrl->port <= 0) + return -EINVAL; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { @@ -4433,7 +4682,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); - + mlx4_reset_roce_gids(dev, slave); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); rem_slave_vlans(dev, slave); rem_slave_macs(dev, slave); --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx4/srq.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -117,7 +117,7 @@ mlx4_table_put(dev, &srq_table->table, *srqn); err_out: - mlx4_bitmap_free(&srq_table->bitmap, *srqn); + mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR); return err; } @@ -145,7 +145,7 @@ mlx4_table_put(dev, &srq_table->cmpt_table, srqn); mlx4_table_put(dev, &srq_table->table, srqn); - mlx4_bitmap_free(&srq_table->bitmap, srqn); + mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR); } static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -4,5 +4,5 @@ config MLX5_CORE tristate - depends on PCI && X86 + depends on PCI default n --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -201,10 +201,23 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - int type, struct mlx5_cq_modify_params *params) + struct mlx5_modify_cq_mbox_in *in, int in_sz) { - return -ENOSYS; + struct mlx5_modify_cq_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); + err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return mlx5_cmd_status_to_err(&out.hdr); + + return 0; } +EXPORT_SYMBOL(mlx5_core_modify_cq); int mlx5_init_cq_table(struct mlx5_core_dev *dev) { --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -275,7 +275,7 @@ } static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - int index) + int index, int *is_str) { struct mlx5_query_qp_mbox_out *out; struct mlx5_qp_context *ctx; @@ -293,19 +293,40 @@ goto out; } + *is_str = 0; ctx = &out->ctx; switch (index) { case QP_PID: param = qp->pid; break; case QP_STATE: - param = be32_to_cpu(ctx->flags) >> 28; + param = (u64)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28); + *is_str = 1; break; case QP_XPORT: - param = (be32_to_cpu(ctx->flags) >> 16) & 0xff; + param = (u64)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff); + *is_str = 1; break; case QP_MTU: - param = ctx->mtu_msgmax >> 5; + switch (ctx->mtu_msgmax >> 5) { + case IB_MTU_256: + param = 256; + break; + case IB_MTU_512: + param = 512; + break; + case IB_MTU_1024: + param = 1024; + break; + case IB_MTU_2048: + param = 2048; + break; + case IB_MTU_4096: + param = 4096; + break; + default: + param = 0; + } break; case QP_N_RECV: param = 1 << ((ctx->rq_size_stride >> 3) & 0xf); @@ -414,6 +435,7 @@ struct mlx5_field_desc *desc; struct mlx5_rsc_debug *d; char tbuf[18]; + int is_str = 0; u64 field; int ret; @@ -424,7 +446,7 @@ d = (void *)(desc - desc->i) - sizeof(*d); switch (d->type) { case MLX5_DBG_RSC_QP: - field = qp_read_field(d->dev, d->object, desc->i); + field = qp_read_field(d->dev, d->object, desc->i, &is_str); break; case MLX5_DBG_RSC_EQ: @@ -440,7 +462,12 @@ return -EINVAL; } - ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + + if (is_str) + ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)field); + else + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + if (ret > 0) { if (copy_to_user(buf, tbuf, ret)) return -EFAULT; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -46,8 +46,8 @@ #include "mlx5_core.h" #define DRIVER_NAME "mlx5_core" -#define DRIVER_VERSION "1.0" -#define DRIVER_RELDATE "June 2013" +#define DRIVER_VERSION "2.2-1" +#define DRIVER_RELDATE "Feb 2014" MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); @@ -460,7 +460,10 @@ err_stop_poll: mlx5_stop_health_poll(dev); - mlx5_cmd_teardown_hca(dev); + if (mlx5_cmd_teardown_hca(dev)) { + dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); + return err; + } err_pagealloc_stop: mlx5_pagealloc_stop(dev); @@ -503,7 +506,10 @@ mlx5_eq_cleanup(dev); mlx5_disable_msix(dev); mlx5_stop_health_poll(dev); - mlx5_cmd_teardown_hca(dev); + if (mlx5_cmd_teardown_hca(dev)) { + dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); + return; + } mlx5_pagealloc_stop(dev); mlx5_reclaim_startup_pages(dev); mlx5_core_disable_hca(dev); @@ -531,7 +537,6 @@ return 0; - mlx5_health_cleanup(); err_debug: mlx5_unregister_debugfs(); return err; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -99,7 +99,7 @@ enum { MLX5_MAX_RECLAIM_TIME_MILI = 5000, - MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096, + MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, }; static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) @@ -192,10 +192,8 @@ struct fw_page *fp; unsigned n; - if (list_empty(&dev->priv.free_list)) { + if (list_empty(&dev->priv.free_list)) return -ENOMEM; - mlx5_core_warn(dev, "\n"); - } fp = list_entry(dev->priv.free_list.next, struct fw_page, list); n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); @@ -208,7 +206,7 @@ if (!fp->free_count) list_del(&fp->list); - *addr = fp->addr + n * 4096; + *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE; return 0; } @@ -224,14 +222,15 @@ return; } - n = (addr & ~PAGE_MASK) % 4096; + n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT; fwp->free_count++; set_bit(n, &fwp->bitmask); if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) { rb_erase(&fwp->rb_node, &dev->priv.page_root); if (fwp->free_count != 1) list_del(&fwp->list); - dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE, + DMA_BIDIRECTIONAL); __free_page(fwp->page); kfree(fwp); } else if (fwp->free_count == 1) { --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -57,7 +57,7 @@ in->arg = cpu_to_be32(arg); in->register_id = cpu_to_be16(reg_num); err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out, - sizeof(out) + size_out); + sizeof(*out) + size_out); if (err) goto ex2; --- linux-3.13.0.orig/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ linux-3.13.0/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -74,7 +74,7 @@ struct mlx5_destroy_qp_mbox_out dout; int err; - memset(&dout, 0, sizeof(dout)); + memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); @@ -84,7 +84,8 @@ } if (out.hdr.status) { - pr_warn("current num of QPs 0x%x\n", atomic_read(&dev->num_qps)); + mlx5_core_warn(dev, "current num of QPs 0x%x\n", + atomic_read(&dev->num_qps)); return mlx5_cmd_status_to_err(&out.hdr); } --- linux-3.13.0.orig/drivers/net/ethernet/micrel/ks8851.c +++ linux-3.13.0/drivers/net/ethernet/micrel/ks8851.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -83,6 +84,7 @@ * @rc_rxqcr: Cached copy of KS_RXQCR. * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. + * @vdd_reg: Optional regulator supplying the chip * * The @lock ensures that the chip is protected when certain operations are * in progress. When the read or write packet transfer is in progress, most @@ -130,6 +132,7 @@ struct spi_transfer spi_xfer2[2]; struct eeprom_93cx6 eeprom; + struct regulator *vdd_reg; }; static int msg_enable; @@ -1414,6 +1417,21 @@ ks->spidev = spi; ks->tx_space = 6144; + ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); + if (IS_ERR(ks->vdd_reg)) { + ret = PTR_ERR(ks->vdd_reg); + if (ret == -EPROBE_DEFER) + goto err_reg; + } else { + ret = regulator_enable(ks->vdd_reg); + if (ret) { + dev_err(&spi->dev, "regulator enable fail: %d\n", + ret); + goto err_reg_en; + } + } + + mutex_init(&ks->lock); spin_lock_init(&ks->statelock); @@ -1508,8 +1526,14 @@ err_netdev: free_irq(ndev->irq, ks); -err_id: err_irq: +err_id: + if (!IS_ERR(ks->vdd_reg)) + regulator_disable(ks->vdd_reg); +err_reg_en: + if (!IS_ERR(ks->vdd_reg)) + regulator_put(ks->vdd_reg); +err_reg: free_netdev(ndev); return ret; } @@ -1523,6 +1547,10 @@ unregister_netdev(priv->netdev); free_irq(spi->irq, priv); + if (!IS_ERR(priv->vdd_reg)) { + regulator_disable(priv->vdd_reg); + regulator_put(priv->vdd_reg); + } free_netdev(priv->netdev); return 0; --- linux-3.13.0.orig/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ linux-3.13.0/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -872,6 +872,10 @@ return -ENOMEM; dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); + if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { + __free_page(dmatest_page); + return -ENOMEM; + } /* Run a small DMA test. * The magic multipliers to the length tell the firmware @@ -1293,6 +1297,7 @@ int bytes, int watchdog) { struct page *page; + dma_addr_t bus; int idx; #if MYRI10GE_ALLOC_SIZE > 4096 int end_offset; @@ -1317,11 +1322,21 @@ rx->watchdog_needed = 1; return; } + + bus = pci_map_page(mgp->pdev, page, 0, + MYRI10GE_ALLOC_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + __free_pages(page, MYRI10GE_ALLOC_ORDER); + if (rx->fill_cnt - rx->cnt < 16) + rx->watchdog_needed = 1; + return; + } + rx->page = page; rx->page_offset = 0; - rx->bus = pci_map_page(mgp->pdev, page, 0, - MYRI10GE_ALLOC_SIZE, - PCI_DMA_FROMDEVICE); + rx->bus = bus; + } rx->info[idx].page = rx->page; rx->info[idx].page_offset = rx->page_offset; @@ -2765,6 +2780,35 @@ mb(); } +static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, + struct myri10ge_tx_buf *tx, int idx) +{ + unsigned int len; + int last_idx; + + /* Free any DMA resources we've alloced and clear out the skb slot */ + last_idx = (idx + 1) & tx->mask; + idx = tx->req & tx->mask; + do { + len = dma_unmap_len(&tx->info[idx], len); + if (len) { + if (tx->info[idx].skb != NULL) + pci_unmap_single(mgp->pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(mgp->pdev, + dma_unmap_addr(&tx->info[idx], + bus), len, + PCI_DMA_TODEVICE); + dma_unmap_len_set(&tx->info[idx], len, 0); + tx->info[idx].skb = NULL; + } + idx = (idx + 1) & tx->mask; + } while (idx != last_idx); +} + /* * Transmit a packet. We need to split the packet so that a single * segment does not cross myri10ge->tx_boundary, so this makes segment @@ -2788,7 +2832,7 @@ u32 low; __be32 high_swapped; unsigned int len; - int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; + int idx, avail, frag_cnt, frag_idx, count, mss, max_segments; u16 pseudo_hdr_offset, cksum_offset, queue; int cum_len, seglen, boundary, rdma_count; u8 flags, odd_flag; @@ -2885,9 +2929,12 @@ /* map the skb for DMA */ len = skb_headlen(skb); + bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) + goto drop; + idx = tx->req & tx->mask; tx->info[idx].skb = skb; - bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); dma_unmap_addr_set(&tx->info[idx], bus, bus); dma_unmap_len_set(&tx->info[idx], len, len); @@ -2986,12 +3033,16 @@ break; /* map next fragment for DMA */ - idx = (count + tx->req) & tx->mask; frag = &skb_shinfo(skb)->frags[frag_idx]; frag_idx++; len = skb_frag_size(frag); bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + myri10ge_unmap_tx_dma(mgp, tx, idx); + goto drop; + } + idx = (count + tx->req) & tx->mask; dma_unmap_addr_set(&tx->info[idx], bus, bus); dma_unmap_len_set(&tx->info[idx], len, len); } @@ -3022,31 +3073,8 @@ return NETDEV_TX_OK; abort_linearize: - /* Free any DMA resources we've alloced and clear out the skb - * slot so as to not trip up assertions, and to avoid a - * double-free if linearizing fails */ + myri10ge_unmap_tx_dma(mgp, tx, idx); - last_idx = (idx + 1) & tx->mask; - idx = tx->req & tx->mask; - tx->info[idx].skb = NULL; - do { - len = dma_unmap_len(&tx->info[idx], len); - if (len) { - if (tx->info[idx].skb != NULL) - pci_unmap_single(mgp->pdev, - dma_unmap_addr(&tx->info[idx], - bus), len, - PCI_DMA_TODEVICE); - else - pci_unmap_page(mgp->pdev, - dma_unmap_addr(&tx->info[idx], - bus), len, - PCI_DMA_TODEVICE); - dma_unmap_len_set(&tx->info[idx], len, 0); - tx->info[idx].skb = NULL; - } - idx = (idx + 1) & tx->mask; - } while (idx != last_idx); if (skb_is_gso(skb)) { netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); goto drop; --- linux-3.13.0.orig/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ linux-3.13.0/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3135,12 +3135,12 @@ u64 packets, bytes, multicast; do { - start = u64_stats_fetch_begin_bh(&rxstats->syncp); + start = u64_stats_fetch_begin_irq(&rxstats->syncp); packets = rxstats->rx_frms; multicast = rxstats->rx_mcast; bytes = rxstats->rx_bytes; - } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start)); net_stats->rx_packets += packets; net_stats->rx_bytes += bytes; @@ -3150,11 +3150,11 @@ net_stats->rx_dropped += rxstats->rx_dropped; do { - start = u64_stats_fetch_begin_bh(&txstats->syncp); + start = u64_stats_fetch_begin_irq(&txstats->syncp); packets = txstats->tx_frms; bytes = txstats->tx_bytes; - } while (u64_stats_fetch_retry_bh(&txstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&txstats->syncp, start)); net_stats->tx_packets += packets; net_stats->tx_bytes += bytes; @@ -4206,6 +4206,9 @@ return ret; } +#define VXGE_PXE_FIRMWARE "vxge/X3fw-pxe.ncf" +#define VXGE_FIRMWARE "vxge/X3fw.ncf" + static int vxge_probe_fw_update(struct vxgedev *vdev) { u32 maj, min, bld; @@ -4248,9 +4251,9 @@ } } if (gpxe) - fw_name = "vxge/X3fw-pxe.ncf"; + fw_name = VXGE_PXE_FIRMWARE; else - fw_name = "vxge/X3fw.ncf"; + fw_name = VXGE_FIRMWARE; ret = vxge_fw_upgrade(vdev, fw_name, 0); /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on @@ -4855,3 +4858,5 @@ } module_init(vxge_starter); module_exit(vxge_closer); +MODULE_FIRMWARE(VXGE_PXE_FIRMWARE); +MODULE_FIRMWARE(VXGE_FIRMWARE); --- linux-3.13.0.orig/drivers/net/ethernet/nvidia/forcedeth.c +++ linux-3.13.0/drivers/net/ethernet/nvidia/forcedeth.c @@ -1755,19 +1755,19 @@ /* software stats */ do { - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp); + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); storage->rx_packets = np->stat_rx_packets; storage->rx_bytes = np->stat_rx_bytes; storage->rx_dropped = np->stat_rx_dropped; storage->rx_missed_errors = np->stat_rx_missed_errors; - } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start)); + } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); do { - syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp); + syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); storage->tx_packets = np->stat_tx_packets; storage->tx_bytes = np->stat_tx_bytes; storage->tx_dropped = np->stat_tx_dropped; - } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start)); + } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); /* If the nic supports hw counters then retrieve latest values */ if (np->driver_data & DEV_HAS_STATISTICS_V123) { --- linux-3.13.0.orig/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ linux-3.13.0/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2392,7 +2392,10 @@ work_done = netxen_process_rcv_ring(sds_ring, budget); - if ((work_done < budget) && tx_complete) { + if (!tx_complete) + work_done = budget; + + if (work_done < budget) { napi_complete(&sds_ring->napi); if (test_bit(__NX_DEV_UP, &adapter->state)) netxen_nic_enable_int(sds_ring); --- linux-3.13.0.orig/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ linux-3.13.0/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -1047,6 +1047,7 @@ struct qlcnic_dcb_cee *peer; int i; + memset(info, 0, sizeof(*info)); *app_count = 0; if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state)) --- linux-3.13.0.orig/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ linux-3.13.0/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2352,23 +2352,29 @@ { struct ql_adapter *qdev = netdev_priv(ndev); int status = 0; + bool need_restart = netif_running(ndev); - status = ql_adapter_down(qdev); - if (status) { - netif_err(qdev, link, qdev->ndev, - "Failed to bring down the adapter\n"); - return status; + if (need_restart) { + status = ql_adapter_down(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring down the adapter\n"); + return status; + } } /* update the features with resent change */ ndev->features = features; - status = ql_adapter_up(qdev); - if (status) { - netif_err(qdev, link, qdev->ndev, - "Failed to bring up the adapter\n"); - return status; + if (need_restart) { + status = ql_adapter_up(qdev); + if (status) { + netif_err(qdev, link, qdev->ndev, + "Failed to bring up the adapter\n"); + return status; + } } + return status; } --- linux-3.13.0.orig/drivers/net/ethernet/realtek/8139cp.c +++ linux-3.13.0/drivers/net/ethernet/realtek/8139cp.c @@ -899,7 +899,7 @@ return NETDEV_TX_OK; out_dma_error: - kfree_skb(skb); + dev_kfree_skb_any(skb); cp->dev->stats.tx_dropped++; goto out_unlock; } --- linux-3.13.0.orig/drivers/net/ethernet/realtek/8139too.c +++ linux-3.13.0/drivers/net/ethernet/realtek/8139too.c @@ -1717,9 +1717,9 @@ if (len < ETH_ZLEN) memset(tp->tx_buf[entry], 0, ETH_ZLEN); skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); } else { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } @@ -2522,16 +2522,16 @@ netdev_stats_to_stats64(stats, &dev->stats); do { - start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); stats->rx_packets = tp->rx_stats.packets; stats->rx_bytes = tp->rx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); do { - start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); stats->tx_packets = tp->tx_stats.packets; stats->tx_bytes = tp->tx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); return stats; } --- linux-3.13.0.orig/drivers/net/ethernet/realtek/r8169.c +++ linux-3.13.0/drivers/net/ethernet/realtek/r8169.c @@ -5835,7 +5835,7 @@ tp->TxDescArray + entry); if (skb) { tp->dev->stats.tx_dropped++; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); tx_skb->skb = NULL; } } @@ -6060,7 +6060,7 @@ err_dma_1: rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); err_dma_0: - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); err_update_stats: dev->stats.tx_dropped++; return NETDEV_TX_OK; @@ -6143,7 +6143,7 @@ tp->tx_stats.packets++; tp->tx_stats.bytes += tx_skb->skb->len; u64_stats_update_end(&tp->tx_stats.syncp); - dev_kfree_skb(tx_skb->skb); + dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; } dirty_tx++; @@ -6591,17 +6591,17 @@ rtl8169_rx_missed(dev, ioaddr); do { - start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); stats->rx_packets = tp->rx_stats.packets; stats->rx_bytes = tp->rx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); do { - start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); stats->tx_packets = tp->tx_stats.packets; stats->tx_bytes = tp->tx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; --- linux-3.13.0.orig/drivers/net/ethernet/renesas/sh_eth.c +++ linux-3.13.0/drivers/net/ethernet/renesas/sh_eth.c @@ -247,6 +247,27 @@ }; static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { + [EDMR] = 0x0000, + [EDTRR] = 0x0004, + [EDRRR] = 0x0008, + [TDLAR] = 0x000c, + [RDLAR] = 0x0010, + [EESR] = 0x0014, + [EESIPR] = 0x0018, + [TRSCER] = 0x001c, + [RMFCR] = 0x0020, + [TFTR] = 0x0024, + [FDR] = 0x0028, + [RMCR] = 0x002c, + [EDOCR] = 0x0030, + [FCFTR] = 0x0034, + [RPADIR] = 0x0038, + [TRIMD] = 0x003c, + [RBWAR] = 0x0040, + [RDFAR] = 0x0044, + [TBRAR] = 0x004c, + [TDFAR] = 0x0050, + [ECMR] = 0x0160, [ECSR] = 0x0164, [ECSIPR] = 0x0168, @@ -483,7 +504,6 @@ .register_type = SH_ETH_REG_FAST_SH4, .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, - .rmcr_value = RMCR_RNC, .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | @@ -561,7 +581,6 @@ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .fdr_value = 0x0000072f, - .rmcr_value = RMCR_RNC, .irq_flags = IRQF_SHARED, .apr = 1, @@ -689,7 +708,6 @@ EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .fdr_value = 0x0000070f, - .rmcr_value = RMCR_RNC, .apr = 1, .mpr = 1, @@ -738,9 +756,6 @@ if (!cd->fdr_value) cd->fdr_value = DEFAULT_FDR_INIT; - if (!cd->rmcr_value) - cd->rmcr_value = DEFAULT_RMCR_VALUE; - if (!cd->tx_check) cd->tx_check = DEFAULT_TX_CHECK; @@ -1193,8 +1208,8 @@ sh_eth_write(ndev, mdp->cd->fdr_value, FDR); sh_eth_write(ndev, 0, TFTR); - /* Frame recv control */ - sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); + /* Frame recv control (enable multiple-packets per rx irq) */ + sh_eth_write(ndev, RMCR_RNC, RMCR); sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); --- linux-3.13.0.orig/drivers/net/ethernet/renesas/sh_eth.h +++ linux-3.13.0/drivers/net/ethernet/renesas/sh_eth.h @@ -324,7 +324,6 @@ enum RMCR_BIT { RMCR_RNC = 0x00000001, }; -#define DEFAULT_RMCR_VALUE 0x00000000 /* ECMR */ enum FELIC_MODE_BIT { @@ -473,7 +472,6 @@ unsigned long fdr_value; unsigned long fcftr_value; unsigned long rpadir_value; - unsigned long rmcr_value; /* interrupt checking mask */ unsigned long tx_check; --- linux-3.13.0.orig/drivers/net/ethernet/sfc/ef10.c +++ linux-3.13.0/drivers/net/ethernet/sfc/ef10.c @@ -555,10 +555,17 @@ * several of each (in fact that's the only option if host * page size is >4K). So we may allocate some extra VIs just * for writing PIO buffers through. + * + * The UC mapping contains (min_vis - 1) complete VIs and the + * first half of the next VI. Then the WC mapping begins with + * the second half of this last VI. */ uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF); if (nic_data->n_piobufs) { + /* pio_write_vi_base rounds down to give the number of complete + * VIs inside the UC mapping. + */ pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE; wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + nic_data->n_piobufs) * --- linux-3.13.0.orig/drivers/net/ethernet/sfc/efx.c +++ linux-3.13.0/drivers/net/ethernet/sfc/efx.c @@ -1514,6 +1514,8 @@ if (rc) goto fail1; + efx_set_channels(efx); + rc = efx->type->dimension_resources(efx); if (rc) goto fail2; @@ -1524,7 +1526,6 @@ efx->rx_indir_table[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); - efx_set_channels(efx); netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); --- linux-3.13.0.orig/drivers/net/ethernet/sfc/efx.h +++ linux-3.13.0/drivers/net/ethernet/sfc/efx.h @@ -66,6 +66,9 @@ #define EFX_RXQ_MIN_ENT 128U #define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) +#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ + EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) + /* Filters */ /** --- linux-3.13.0.orig/drivers/net/ethernet/sfc/ethtool.c +++ linux-3.13.0/drivers/net/ethernet/sfc/ethtool.c @@ -583,7 +583,7 @@ struct efx_nic *efx = netdev_priv(net_dev); ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; - ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; + ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx); ring->rx_pending = efx->rxq_entries; ring->tx_pending = efx->txq_entries; } @@ -596,7 +596,7 @@ if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || - ring->tx_pending > EFX_MAX_DMAQ_SIZE) + ring->tx_pending > EFX_TXQ_MAX_ENT(efx)) return -EINVAL; if (ring->rx_pending < EFX_RXQ_MIN_ENT) { --- linux-3.13.0.orig/drivers/net/ethernet/sfc/io.h +++ linux-3.13.0/drivers/net/ethernet/sfc/io.h @@ -66,10 +66,17 @@ #define EFX_USE_QWORD_IO 1 #endif +/* Hardware issue requires that only 64-bit naturally aligned writes + * are seen by hardware. Its not strictly necessary to restrict to + * x86_64 arch, but done for safety since unusual write combining behaviour + * can break PIO. + */ +#ifdef CONFIG_X86_64 /* PIO is a win only if write-combining is possible */ #ifdef ARCH_HAS_IOREMAP_WC #define EFX_USE_PIO 1 #endif +#endif #ifdef EFX_USE_QWORD_IO static inline void _efx_writeq(struct efx_nic *efx, __le64 value, --- linux-3.13.0.orig/drivers/net/ethernet/sfc/nic.c +++ linux-3.13.0/drivers/net/ethernet/sfc/nic.c @@ -156,13 +156,15 @@ efx->net_dev->rx_cpu_rmap = NULL; #endif - /* Disable MSI/MSI-X interrupts */ - efx_for_each_channel(channel, efx) - free_irq(channel->irq, &efx->msi_context[channel->channel]); - - /* Disable legacy interrupt */ - if (efx->legacy_irq) + if (EFX_INT_MODE_USE_MSI(efx)) { + /* Disable MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + free_irq(channel->irq, + &efx->msi_context[channel->channel]); + } else { + /* Disable legacy interrupt */ free_irq(efx->legacy_irq, efx); + } } /* Register dump */ --- linux-3.13.0.orig/drivers/net/ethernet/sfc/ptp.c +++ linux-3.13.0/drivers/net/ethernet/sfc/ptp.c @@ -1360,6 +1360,13 @@ struct efx_ptp_data *ptp = efx->ptp_data; int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); + if (!ptp) { + if (net_ratelimit()) + netif_warn(efx, drv, efx->net_dev, + "Received PTP event but PTP not set up\n"); + return; + } + if (!ptp->enabled) return; --- linux-3.13.0.orig/drivers/net/ethernet/sfc/tx.c +++ linux-3.13.0/drivers/net/ethernet/sfc/tx.c @@ -189,6 +189,18 @@ u8 buf[L1_CACHE_BYTES]; }; +/* Copy in explicit 64-bit writes. */ +static void efx_memcpy_64(void __iomem *dest, void *src, size_t len) +{ + u64 *src64 = src; + u64 __iomem *dest64 = dest; + size_t l64 = len / 8; + size_t i; + + for (i = 0; i < l64; i++) + writeq(src64[i], &dest64[i]); +} + /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. * Advances piobuf pointer. Leaves additional data in the copy buffer. */ @@ -198,7 +210,7 @@ { int block_len = len & ~(sizeof(copy_buf->buf) - 1); - memcpy_toio(*piobuf, data, block_len); + efx_memcpy_64(*piobuf, data, block_len); *piobuf += block_len; len -= block_len; @@ -230,7 +242,7 @@ if (copy_buf->used < sizeof(copy_buf->buf)) return; - memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); + efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); *piobuf += sizeof(copy_buf->buf); data += copy_to_buf; len -= copy_to_buf; @@ -245,7 +257,7 @@ { /* if there's anything in it, write the whole buffer, including junk */ if (copy_buf->used) - memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); + efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); } /* Traverse skb structure and copy fragments in to PIO buffer. @@ -304,8 +316,8 @@ */ BUILD_BUG_ON(L1_CACHE_BYTES > SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - memcpy_toio(tx_queue->piobuf, skb->data, - ALIGN(skb->len, L1_CACHE_BYTES)); + efx_memcpy_64(tx_queue->piobuf, skb->data, + ALIGN(skb->len, L1_CACHE_BYTES)); } EFX_POPULATE_QWORD_5(buffer->option, --- linux-3.13.0.orig/drivers/net/ethernet/smsc/smsc911x.c +++ linux-3.13.0/drivers/net/ethernet/smsc/smsc911x.c @@ -1342,6 +1342,42 @@ spin_unlock(&pdata->mac_lock); } +static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) +{ + int rc = 0; + + if (!pdata->phy_dev) + return rc; + + /* If the internal PHY is in General Power-Down mode, all, except the + * management interface, is powered-down and stays in that condition as + * long as Phy register bit 0.11 is HIGH. + * + * In that case, clear the bit 0.11, so the PHY powers up and we can + * access to the phy registers. + */ + rc = phy_read(pdata->phy_dev, MII_BMCR); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); + return rc; + } + + /* If the PHY general power-down bit is not set is not necessary to + * disable the general power down-mode. + */ + if (rc & BMCR_PDOWN) { + rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); + if (rc < 0) { + SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); + return rc; + } + + usleep_range(1000, 1500); + } + + return 0; +} + static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) { int rc = 0; @@ -1415,6 +1451,16 @@ int ret; /* + * Make sure to power-up the PHY chip before doing a reset, otherwise + * the reset fails. + */ + ret = smsc911x_phy_general_power_up(pdata); + if (ret) { + SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip"); + return ret; + } + + /* * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that * are initialized in a Energy Detect Power-Down mode that prevents * the MAC chip to be software reseted. So we have to wakeup the PHY --- linux-3.13.0.orig/drivers/net/ethernet/sun/sunvnet.c +++ linux-3.13.0/drivers/net/ethernet/sun/sunvnet.c @@ -656,7 +656,7 @@ spin_lock_irqsave(&port->vio.lock, flags); dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - if (unlikely(vnet_tx_dring_avail(dr) < 2)) { + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); @@ -704,7 +704,7 @@ dev->stats.tx_bytes += skb->len; dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); - if (unlikely(vnet_tx_dring_avail(dr) < 2)) { + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { netif_stop_queue(dev); if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) netif_wake_queue(dev); @@ -1083,6 +1083,24 @@ return vp; } +static void vnet_cleanup(void) +{ + struct vnet *vp; + struct net_device *dev; + + mutex_lock(&vnet_list_mutex); + while (!list_empty(&vnet_list)) { + vp = list_first_entry(&vnet_list, struct vnet, list); + list_del(&vp->list); + dev = vp->dev; + /* vio_unregister_driver() should have cleaned up port_list */ + BUG_ON(!list_empty(&vp->port_list)); + unregister_netdev(dev); + free_netdev(dev); + } + mutex_unlock(&vnet_list_mutex); +} + static const char *local_mac_prop = "local-mac-address"; static struct vnet *vnet_find_parent(struct mdesc_handle *hp, @@ -1240,7 +1258,6 @@ kfree(port); - unregister_netdev(vp->dev); } return 0; } @@ -1268,6 +1285,7 @@ static void __exit vnet_exit(void) { vio_unregister_driver(&vnet_port_driver); + vnet_cleanup(); } module_init(vnet_init); --- linux-3.13.0.orig/drivers/net/ethernet/ti/Kconfig +++ linux-3.13.0/drivers/net/ethernet/ti/Kconfig @@ -62,6 +62,8 @@ select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO select TI_CPSW_PHY_SEL + select MFD_SYSCON + select REGMAP ---help--- This driver supports TI's CPSW Ethernet Switch. --- linux-3.13.0.orig/drivers/net/ethernet/ti/cpsw.c +++ linux-3.13.0/drivers/net/ethernet/ti/cpsw.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include @@ -637,6 +639,14 @@ static irqreturn_t cpsw_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; + int value = irq - priv->irqs_table[0]; + + /* NOTICE: Ending IRQ here. The trick with the 'value' variable above + * is to make sure we will always write the correct value to the EOI + * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 + * for TX Interrupt and 3 for MISC Interrupt. + */ + cpdma_ctlr_eoi(priv->dma, value); cpsw_intr_disable(priv); if (priv->irq_enabled == true) { @@ -666,8 +676,6 @@ int num_tx, num_rx; num_tx = cpdma_chan_process(priv->txch, 128); - if (num_tx) - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { @@ -675,7 +683,6 @@ napi_complete(napi); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { prim_cpsw->irq_enabled = true; @@ -1169,6 +1176,10 @@ cpsw_set_coalesce(ndev, &coal); } + napi_enable(&priv->napi); + cpdma_ctlr_start(priv->dma); + cpsw_intr_enable(priv); + prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { @@ -1177,12 +1188,6 @@ } } - napi_enable(&priv->napi); - cpdma_ctlr_start(priv->dma); - cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - if (priv->data.dual_emac) priv->slaves[priv->emac_port].open_stat = true; return 0; @@ -1430,9 +1435,6 @@ cpdma_chan_start(priv->txch); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - } static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) @@ -1478,9 +1480,6 @@ cpsw_interrupt(ndev->irq, priv); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - } #endif @@ -1524,6 +1523,19 @@ if (vid == priv->data.default_vlan) return 0; + if (priv->data.dual_emac) { + /* In dual EMAC, reserved VLAN id should not be used for + * creating VLAN interfaces as this can break the dual + * EMAC port separation + */ + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (vid == priv->slaves[i].port_vlan) + return -EINVAL; + } + } + dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); return cpsw_add_vlan_ale_entry(priv, vid); } @@ -1537,6 +1549,15 @@ if (vid == priv->data.default_vlan) return 0; + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (vid == priv->slaves[i].port_vlan) + return -EINVAL; + } + } + dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); ret = cpsw_ale_del_vlan(priv->ale, vid, 0); if (ret != 0) @@ -1700,6 +1721,36 @@ slave->port_vlan = data->dual_emac_res_vlan; } +#define AM33XX_CTRL_MAC_LO_REG(id) (0x630 + 0x8 * id) +#define AM33XX_CTRL_MAC_HI_REG(id) (0x630 + 0x8 * id + 0x4) + +static int cpsw_am33xx_cm_get_macid(struct device *dev, int slave, + u8 *mac_addr) +{ + u32 macid_lo; + u32 macid_hi; + struct regmap *syscon; + + syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon"); + if (IS_ERR(syscon)) { + if (PTR_ERR(syscon) == -ENODEV) + return 0; + return PTR_ERR(syscon); + } + + regmap_read(syscon, AM33XX_CTRL_MAC_LO_REG(slave), &macid_lo); + regmap_read(syscon, AM33XX_CTRL_MAC_HI_REG(slave), &macid_hi); + + mac_addr[5] = (macid_lo >> 8) & 0xff; + mac_addr[4] = macid_lo & 0xff; + mac_addr[3] = (macid_hi >> 24) & 0xff; + mac_addr[2] = (macid_hi >> 16) & 0xff; + mac_addr[1] = (macid_hi >> 8) & 0xff; + mac_addr[0] = macid_hi & 0xff; + + return 0; +} + static int cpsw_probe_dt(struct cpsw_platform_data *data, struct platform_device *pdev) { @@ -1807,8 +1858,16 @@ PHY_ID_FMT, mdio->name, phyid); mac_addr = of_get_mac_address(slave_node); - if (mac_addr) + if (mac_addr) { memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + } else { + if (of_machine_is_compatible("ti,am33xx")) { + ret = cpsw_am33xx_cm_get_macid(&pdev->dev, i, + slave_data->mac_addr); + if (ret) + return ret; + } + } slave_data->phy_if = of_get_phy_mode(slave_node); @@ -1937,6 +1996,7 @@ priv->irq_enabled = true; if (!priv->cpts) { pr_err("error allocating cpts\n"); + ret = -ENOMEM; goto clean_ndev_ret; } @@ -2133,10 +2193,6 @@ goto clean_ale_ret; } - if (cpts_register(&pdev->dev, priv->cpts, - data->cpts_clock_mult, data->cpts_clock_shift)) - dev_err(priv->dev, "error registering cpts device\n"); - cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", ss_res->start, ndev->irq); --- linux-3.13.0.orig/drivers/net/ethernet/ti/cpsw.h +++ linux-3.13.0/drivers/net/ethernet/ti/cpsw.h @@ -15,6 +15,7 @@ #define __CPSW_H__ #include +#include struct cpsw_slave_data { char phy_id[MII_BUS_ID_SIZE]; --- linux-3.13.0.orig/drivers/net/ethernet/ti/davinci_emac.c +++ linux-3.13.0/drivers/net/ethernet/ti/davinci_emac.c @@ -1532,7 +1532,7 @@ struct device *emac_dev = &ndev->dev; u32 cnt; struct resource *res; - int ret; + int q, m, ret; int i = 0; int k = 0; struct emac_priv *priv = netdev_priv(ndev); @@ -1567,8 +1567,7 @@ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { for (i = res->start; i <= res->end; i++) { - if (devm_request_irq(&priv->pdev->dev, i, emac_irq, - 0, ndev->name, ndev)) + if (request_irq(i, emac_irq, 0, ndev->name, ndev)) goto rollback; } k++; @@ -1641,7 +1640,15 @@ rollback: - dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed"); + dev_err(emac_dev, "DaVinci EMAC: request_irq() failed"); + + for (q = k; k >= 0; k--) { + for (m = i; m >= res->start; m--) + free_irq(m, ndev); + res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1); + m = res->end; + } + ret = -EBUSY; err: pm_runtime_put(&priv->pdev->dev); @@ -1659,6 +1666,9 @@ */ static int emac_dev_stop(struct net_device *ndev) { + struct resource *res; + int i = 0; + int irq_num; struct emac_priv *priv = netdev_priv(ndev); struct device *emac_dev = &ndev->dev; @@ -1674,6 +1684,13 @@ if (priv->phydev) phy_disconnect(priv->phydev); + /* Free IRQ */ + while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { + for (irq_num = res->start; irq_num <= res->end; irq_num++) + free_irq(irq_num, priv->ndev); + i++; + } + if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); --- linux-3.13.0.orig/drivers/net/ethernet/tile/tilegx.c +++ linux-3.13.0/drivers/net/ethernet/tile/tilegx.c @@ -2081,7 +2081,7 @@ /* Return subqueue id on this core (one per core). */ static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { return smp_processor_id(); } --- linux-3.13.0.orig/drivers/net/ethernet/tile/tilepro.c +++ linux-3.13.0/drivers/net/ethernet/tile/tilepro.c @@ -2068,14 +2068,14 @@ cpu_stats = &priv->cpu[i]->stats; do { - start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); trx_packets = cpu_stats->rx_packets; ttx_packets = cpu_stats->tx_packets; trx_bytes = cpu_stats->rx_bytes; ttx_bytes = cpu_stats->tx_bytes; trx_errors = cpu_stats->rx_errors; trx_dropped = cpu_stats->rx_dropped; - } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); rx_packets += trx_packets; tx_packets += ttx_packets; --- linux-3.13.0.orig/drivers/net/ethernet/via/via-rhine.c +++ linux-3.13.0/drivers/net/ethernet/via/via-rhine.c @@ -2070,16 +2070,16 @@ netdev_stats_to_stats64(stats, &dev->stats); do { - start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp); + start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); stats->rx_packets = rp->rx_stats.packets; stats->rx_bytes = rp->rx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); do { - start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp); + start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); stats->tx_packets = rp->tx_stats.packets; stats->tx_bytes = rp->tx_stats.bytes; - } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start)); + } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); return stats; } --- linux-3.13.0.orig/drivers/net/hyperv/hyperv_net.h +++ linux-3.13.0/drivers/net/hyperv/hyperv_net.h @@ -463,7 +463,8 @@ #define NETVSC_MTU 65536 -#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */ +#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */ +#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */ #define NETVSC_RECEIVE_BUFFER_ID 0xcafe --- linux-3.13.0.orig/drivers/net/hyperv/netvsc.c +++ linux-3.13.0/drivers/net/hyperv/netvsc.c @@ -137,8 +137,7 @@ if (net_device->recv_buf) { /* Free up the receive buffer */ - free_pages((unsigned long)net_device->recv_buf, - get_order(net_device->recv_buf_size)); + vfree(net_device->recv_buf); net_device->recv_buf = NULL; } @@ -164,9 +163,7 @@ return -ENODEV; ndev = net_device->ndev; - net_device->recv_buf = - (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, - get_order(net_device->recv_buf_size)); + net_device->recv_buf = vzalloc(net_device->recv_buf_size); if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); @@ -362,6 +359,11 @@ goto cleanup; /* Post the big receive buffer to NetVSP */ + if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) + net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; + else + net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; + ret = netvsc_init_recv_buf(device); cleanup: @@ -911,7 +913,6 @@ ndev = net_device->ndev; /* Initialize the NetVSC channel extension */ - net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; spin_lock_init(&net_device->recv_pkt_list_lock); INIT_LIST_HEAD(&net_device->recv_pkt_list); --- linux-3.13.0.orig/drivers/net/hyperv/netvsc_drv.c +++ linux-3.13.0/drivers/net/hyperv/netvsc_drv.c @@ -89,8 +89,12 @@ { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; + struct netvsc_device *nvdev; + struct rndis_device *rdev; int ret = 0; + netif_carrier_off(net); + /* Open up the device */ ret = rndis_filter_open(device_obj); if (ret != 0) { @@ -100,6 +104,11 @@ netif_start_queue(net); + nvdev = hv_get_drvdata(device_obj); + rdev = nvdev->extension; + if (!rdev->link_state) + netif_carrier_on(net); + return ret; } @@ -138,6 +147,7 @@ struct hv_netvsc_packet *packet; int ret; unsigned int i, num_pages, npg_data; + u32 skb_length = skb->len; /* Add multipages for skb->data and additional 2 for RNDIS */ npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1) @@ -208,7 +218,7 @@ ret = rndis_filter_send(net_device_ctx->device_ctx, packet); if (ret == 0) { - net->stats.tx_bytes += skb->len; + net->stats.tx_bytes += skb_length; net->stats.tx_packets++; } else { kfree(packet); @@ -230,23 +240,24 @@ struct net_device *net; struct net_device_context *ndev_ctx; struct netvsc_device *net_device; + struct rndis_device *rdev; net_device = hv_get_drvdata(device_obj); + rdev = net_device->extension; + + rdev->link_state = status != 1; + net = net_device->ndev; - if (!net) { - netdev_err(net, "got link status but net device " - "not initialized yet\n"); + if (!net || net->reg_state != NETREG_REGISTERED) return; - } + ndev_ctx = netdev_priv(net); if (status == 1) { - netif_carrier_on(net); - ndev_ctx = netdev_priv(net); schedule_delayed_work(&ndev_ctx->dwork, 0); schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); } else { - netif_carrier_off(net); + schedule_delayed_work(&ndev_ctx->dwork, 0); } } @@ -389,17 +400,35 @@ * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add * another netif_notify_peers() into a delayed work, otherwise GARP packet * will not be sent after quick migration, and cause network disconnection. + * Also, we update the carrier status here. */ -static void netvsc_send_garp(struct work_struct *w) +static void netvsc_link_change(struct work_struct *w) { struct net_device_context *ndev_ctx; struct net_device *net; struct netvsc_device *net_device; + struct rndis_device *rdev; + bool notify; + + rtnl_lock(); ndev_ctx = container_of(w, struct net_device_context, dwork.work); net_device = hv_get_drvdata(ndev_ctx->device_ctx); + rdev = net_device->extension; net = net_device->ndev; - netdev_notify_peers(net); + + if (rdev->link_state) { + netif_carrier_off(net); + notify = false; + } else { + netif_carrier_on(net); + notify = true; + } + + rtnl_unlock(); + + if (notify) + netdev_notify_peers(net); } @@ -415,13 +444,10 @@ if (!net) return -ENOMEM; - /* Set initial state */ - netif_carrier_off(net); - net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; hv_set_drvdata(dev, net); - INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); + INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); net->netdev_ops = &device_ops; @@ -444,8 +470,6 @@ } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); - netif_carrier_on(net); - ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); --- linux-3.13.0.orig/drivers/net/ieee802154/fakehard.c +++ linux-3.13.0/drivers/net/ieee802154/fakehard.c @@ -376,17 +376,20 @@ err = wpan_phy_register(phy); if (err) - goto out; + goto err_phy_reg; err = register_netdev(dev); - if (err < 0) - goto out; + if (err) + goto err_netdev_reg; dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); return 0; -out: - unregister_netdev(dev); +err_netdev_reg: + wpan_phy_unregister(phy); +err_phy_reg: + free_netdev(dev); + wpan_phy_free(phy); return err; } --- linux-3.13.0.orig/drivers/net/ifb.c +++ linux-3.13.0/drivers/net/ifb.c @@ -136,18 +136,18 @@ unsigned int start; do { - start = u64_stats_fetch_begin_bh(&dp->rsync); + start = u64_stats_fetch_begin_irq(&dp->rsync); stats->rx_packets = dp->rx_packets; stats->rx_bytes = dp->rx_bytes; - } while (u64_stats_fetch_retry_bh(&dp->rsync, start)); + } while (u64_stats_fetch_retry_irq(&dp->rsync, start)); do { - start = u64_stats_fetch_begin_bh(&dp->tsync); + start = u64_stats_fetch_begin_irq(&dp->tsync); stats->tx_packets = dp->tx_packets; stats->tx_bytes = dp->tx_bytes; - } while (u64_stats_fetch_retry_bh(&dp->tsync, start)); + } while (u64_stats_fetch_retry_irq(&dp->tsync, start)); stats->rx_dropped = dev->stats.rx_dropped; stats->tx_dropped = dev->stats.tx_dropped; --- linux-3.13.0.orig/drivers/net/loopback.c +++ linux-3.13.0/drivers/net/loopback.c @@ -112,10 +112,10 @@ lb_stats = per_cpu_ptr(dev->lstats, i); do { - start = u64_stats_fetch_begin_bh(&lb_stats->syncp); + start = u64_stats_fetch_begin_irq(&lb_stats->syncp); tbytes = lb_stats->bytes; tpackets = lb_stats->packets; - } while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start)); bytes += tbytes; packets += tpackets; } --- linux-3.13.0.orig/drivers/net/macvlan.c +++ linux-3.13.0/drivers/net/macvlan.c @@ -263,11 +263,9 @@ const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; - __u8 ip_summed = skb->ip_summed; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; - skb->ip_summed = CHECKSUM_UNNECESSARY; /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { @@ -285,7 +283,6 @@ } xmit_world: - skb->ip_summed = ip_summed; skb->dev = vlan->lowerdev; return dev_queue_xmit(skb); } @@ -462,8 +459,10 @@ struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (change & IFF_ALLMULTI) - dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (dev->flags & IFF_UP) { + if (change & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + } } static void macvlan_set_mac_lists(struct net_device *dev) @@ -507,6 +506,9 @@ static struct lock_class_key macvlan_netdev_xmit_lock_key; static struct lock_class_key macvlan_netdev_addr_lock_key; +#define ALWAYS_ON_FEATURES \ + (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX) + #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ @@ -516,6 +518,11 @@ #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) +static int macvlan_get_nest_level(struct net_device *dev) +{ + return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; +} + static void macvlan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) @@ -526,8 +533,9 @@ static void macvlan_set_lockdep_class(struct net_device *dev) { - lockdep_set_class(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key, + macvlan_get_nest_level(dev)); netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); } @@ -540,7 +548,8 @@ dev->state = (dev->state & ~MACVLAN_STATE_MASK) | (lowerdev->state & MACVLAN_STATE_MASK); dev->features = lowerdev->features & MACVLAN_FEATURES; - dev->features |= NETIF_F_LLTX; + dev->features |= ALWAYS_ON_FEATURES; + dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->gso_max_size = lowerdev->gso_max_size; dev->iflink = lowerdev->ifindex; dev->hard_header_len = lowerdev->hard_header_len; @@ -587,13 +596,13 @@ for_each_possible_cpu(i) { p = per_cpu_ptr(vlan->pcpu_stats, i); do { - start = u64_stats_fetch_begin_bh(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; rx_multicast = p->rx_multicast; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -700,7 +709,8 @@ features = netdev_increment_features(vlan->lowerdev->features, features, mask); - features |= NETIF_F_LLTX; + features |= ALWAYS_ON_FEATURES; + features &= ~NETIF_F_NETNS_LOCAL; return features; } @@ -729,6 +739,7 @@ .ndo_fdb_add = macvlan_fdb_add, .ndo_fdb_del = macvlan_fdb_del, .ndo_fdb_dump = ndo_dflt_fdb_dump, + .ndo_get_lock_subclass = macvlan_get_nest_level, }; void macvlan_common_setup(struct net_device *dev) @@ -864,6 +875,7 @@ vlan->receive = receive; vlan->forward = forward; vlan->set_features = MACVLAN_FEATURES; + vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) @@ -887,14 +899,15 @@ dev->priv_flags |= IFF_MACVLAN; err = netdev_upper_dev_link(lowerdev, dev); if (err) - goto destroy_port; - + goto unregister_netdev; list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; +unregister_netdev: + unregister_netdevice(dev); destroy_port: port->count -= 1; if (!port->count) @@ -1042,7 +1055,6 @@ list_for_each_entry_safe(vlan, next, &port->vlans, list) vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); unregister_netdevice_many(&list_kill); - list_del(&list_kill); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ --- linux-3.13.0.orig/drivers/net/macvtap.c +++ linux-3.13.0/drivers/net/macvtap.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -108,17 +109,15 @@ return err; } +/* Requires RTNL */ static int macvtap_set_queue(struct net_device *dev, struct file *file, struct macvtap_queue *q) { struct macvlan_dev *vlan = netdev_priv(dev); - int err = -EBUSY; - rtnl_lock(); if (vlan->numqueues == MAX_MACVTAP_QUEUES) - goto out; + return -EBUSY; - err = 0; rcu_assign_pointer(q->vlan, vlan); rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); sock_hold(&q->sk); @@ -132,9 +131,7 @@ vlan->numvtaps++; vlan->numqueues++; -out: - rtnl_unlock(); - return err; + return 0; } static int macvtap_disable_queue(struct macvtap_queue *q) @@ -219,7 +216,7 @@ goto out; /* Check if we can use flow to select a queue */ - rxq = skb_get_rxhash(skb); + rxq = skb_get_hash(skb); if (rxq) { tap = rcu_dereference(vlan->taps[rxq % numvtaps]); goto out; @@ -315,6 +312,15 @@ segs = nskb; } } else { + /* If we receive a partial checksum and the tap side + * doesn't support checksum offload, compute the checksum. + * Note: it doesn't matter which checksum feature to + * check, we either support them all or none. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL && + !(features & NETIF_F_ALL_CSUM) && + skb_checksum_help(skb)) + goto drop; skb_queue_tail(&q->sk.sk_receive_queue, skb); } @@ -441,11 +447,12 @@ static int macvtap_open(struct inode *inode, struct file *file) { struct net *net = current->nsproxy->net_ns; - struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); + struct net_device *dev; struct macvtap_queue *q; - int err; + int err = -ENODEV; - err = -ENODEV; + rtnl_lock(); + dev = dev_get_by_macvtap_minor(iminor(inode)); if (!dev) goto out; @@ -485,6 +492,7 @@ if (dev) dev_put(dev); + rtnl_unlock(); return err; } @@ -559,6 +567,8 @@ break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; + if (skb->protocol == htons(ETH_P_IPV6)) + ipv6_proxy_select_ident(skb); break; default: return -EINVAL; @@ -615,6 +625,8 @@ if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; vnet_hdr->csum_start = skb_checksum_start_offset(skb); + if (vlan_tx_tag_present(skb)) + vnet_hdr->csum_start += VLAN_HLEN; vnet_hdr->csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; @@ -623,12 +635,15 @@ return 0; } +/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ +#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) + /* Get packet from user space buffer */ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, const struct iovec *iv, unsigned long total_len, size_t count, int noblock) { - int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); + int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); struct sk_buff *skb; struct macvlan_dev *vlan; unsigned long len = total_len; @@ -687,7 +702,7 @@ linear = vnet_hdr.hdr_len; } - skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, + skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, linear, noblock, &err); if (!skb) goto err; --- linux-3.13.0.orig/drivers/net/nlmon.c +++ linux-3.13.0/drivers/net/nlmon.c @@ -99,10 +99,10 @@ nl_stats = per_cpu_ptr(dev->lstats, i); do { - start = u64_stats_fetch_begin_bh(&nl_stats->syncp); + start = u64_stats_fetch_begin_irq(&nl_stats->syncp); tbytes = nl_stats->bytes; tpackets = nl_stats->packets; - } while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start)); packets += tpackets; bytes += tbytes; --- linux-3.13.0.orig/drivers/net/phy/fixed.c +++ linux-3.13.0/drivers/net/phy/fixed.c @@ -5,6 +5,7 @@ * Anton Vorontsov * * Copyright (c) 2006-2007 MontaVista Software, Inc. + * Copyright 2009 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -17,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -55,6 +57,9 @@ bmcr |= BMCR_FULLDPLX; switch (fp->status.speed) { + case 10000: + fp->regs[MDIO_STAT2] = MDIO_STAT2_DEVPRST_VAL; + break; case 1000: bmsr |= BMSR_ESTATEN; bmcr |= BMCR_SPEED1000; @@ -75,6 +80,9 @@ } } else { switch (fp->status.speed) { + case 10000: + fp->regs[MDIO_STAT2] = MDIO_STAT2_DEVPRST_VAL; + break; case 1000: bmsr |= BMSR_ESTATEN; bmcr |= BMCR_SPEED1000; --- linux-3.13.0.orig/drivers/net/phy/phy.c +++ linux-3.13.0/drivers/net/phy/phy.c @@ -203,6 +203,25 @@ } /** + * phy_check_valid - check if there is a valid PHY setting which matches + * speed, duplex, and feature mask + * @speed: speed to match + * @duplex: duplex to match + * @features: A mask of the valid settings + * + * Description: Returns true if there is a valid setting, false otherwise. + */ +static inline bool phy_check_valid(int speed, int duplex, u32 features) +{ + unsigned int idx; + + idx = phy_find_valid(phy_find_setting(speed, duplex), features); + + return settings[idx].speed == speed && settings[idx].duplex == duplex && + (settings[idx].setting & features); +} + +/** * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex * @phydev: the target phy_device struct * @@ -1016,7 +1035,7 @@ (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; - int idx, status; + int status; /* Read phy status to properly get the right settings */ status = phy_read_status(phydev); @@ -1048,8 +1067,7 @@ adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); - idx = phy_find_setting(phydev->speed, phydev->duplex); - if (!(lp & adv & settings[idx].setting)) + if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) goto eee_exit; if (clk_stop_enable) { --- linux-3.13.0.orig/drivers/net/phy/phy_device.c +++ linux-3.13.0/drivers/net/phy/phy_device.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -54,15 +55,13 @@ } static struct phy_driver genphy_driver; +static struct phy_driver gen10g_driver; extern int mdio_bus_init(void); extern void mdio_bus_exit(void); static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); -static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, - u32 flags, phy_interface_t interface); - /* * Creates a new phy_fixup and adds it to the list * @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID) @@ -520,13 +519,13 @@ * * Description: Called by drivers to attach to a particular PHY * device. The phy_device is found, and properly hooked up - * to the phy_driver. If no driver is attached, then the - * genphy_driver is used. The phy_device is given a ptr to + * to the phy_driver. If no driver is attached, then a + * generic driver is used. The phy_device is given a ptr to * the attaching device, and given a callback for link status * change. The phy_device is returned to the attaching driver. */ -static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, - u32 flags, phy_interface_t interface) +int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, + u32 flags, phy_interface_t interface) { struct device *d = &phydev->dev; int err; @@ -534,12 +533,10 @@ /* Assume that if there is no driver, that it doesn't * exist, and we should use the genphy driver. */ if (NULL == d->driver) { - if (phydev->is_c45) { - pr_err("No driver for phy %x\n", phydev->phy_id); - return -ENODEV; - } - - d->driver = &genphy_driver.driver; + if (phydev->is_c45) + d->driver = &gen10g_driver.driver; + else + d->driver = &genphy_driver.driver; err = d->driver->probe(d); if (err >= 0) @@ -572,6 +569,7 @@ return err; } +EXPORT_SYMBOL(phy_attach_direct); /** * phy_attach - attach a network device to a particular PHY device @@ -622,6 +620,8 @@ * real driver could be loaded */ if (phydev->dev.driver == &genphy_driver.driver) device_release_driver(&phydev->dev); + else if (phydev->dev.driver == &gen10g_driver.driver) + device_release_driver(&phydev->dev); } EXPORT_SYMBOL(phy_detach); @@ -689,6 +689,13 @@ return changed; } +int gen10g_config_advert(struct phy_device *dev) +{ + return 0; +} +EXPORT_SYMBOL(gen10g_config_advert); + + /** * genphy_setup_forced - configures/forces speed/duplex from @phydev * @phydev: target phy_device struct @@ -742,6 +749,12 @@ } EXPORT_SYMBOL(genphy_restart_aneg); +int gen10g_restart_aneg(struct phy_device *phydev) +{ + return 0; +} +EXPORT_SYMBOL(gen10g_restart_aneg); + /** * genphy_config_aneg - restart auto-negotiation or write BMCR @@ -784,6 +797,13 @@ } EXPORT_SYMBOL(genphy_config_aneg); +int gen10g_config_aneg(struct phy_device *phydev) +{ + return 0; +} +EXPORT_SYMBOL(gen10g_config_aneg); + + /** * genphy_update_link - update link status in @phydev * @phydev: target phy_device struct @@ -867,6 +887,16 @@ lpa &= adv; + err = phy_read(phydev, MII_BMSR); + + if (err < 0) + return err; + + /* if the link changed while reading speed and duplex + * abort the speed and duplex update */ + if (((err & BMSR_LSTATUS) == 0) != (phydev->link == 0)) + return 0; + phydev->speed = SPEED_10; phydev->duplex = DUPLEX_HALF; phydev->pause = phydev->asym_pause = 0; @@ -913,6 +943,34 @@ } EXPORT_SYMBOL(genphy_read_status); +int gen10g_read_status(struct phy_device *phydev) +{ + int devad, reg; + u32 mmd_mask = phydev->c45_ids.devices_in_package; + + phydev->link = 1; + + /* For now just lie and say it's 10G all the time */ + phydev->speed = 10000; + phydev->duplex = DUPLEX_FULL; + + for (devad = 0; mmd_mask; devad++, mmd_mask = mmd_mask >> 1) { + if (!mmd_mask & 1) + continue; + + /* Read twice because link state is latched and a + * read moves the current state into the register */ + phy_read_mmd(phydev, devad, MDIO_STAT1); + reg = phy_read_mmd(phydev, devad, MDIO_STAT1); + if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) + phydev->link = 0; + } + + return 0; +} +EXPORT_SYMBOL(gen10g_read_status); + + static int genphy_config_init(struct phy_device *phydev) { int val; @@ -959,6 +1017,15 @@ return 0; } + +static int gen10g_config_init(struct phy_device *phydev) +{ + /* Temporarily just say we support everything */ + phydev->supported = phydev->advertising = SUPPORTED_10000baseT_Full; + + return 0; +} + int genphy_suspend(struct phy_device *phydev) { int value; @@ -974,6 +1041,13 @@ } EXPORT_SYMBOL(genphy_suspend); +int gen10g_suspend(struct phy_device *phydev) +{ + return 0; +} +EXPORT_SYMBOL(gen10g_suspend); + + int genphy_resume(struct phy_device *phydev) { int value; @@ -989,6 +1063,13 @@ } EXPORT_SYMBOL(genphy_resume); +int gen10g_resume(struct phy_device *phydev) +{ + return 0; +} +EXPORT_SYMBOL(gen10g_resume); + + /** * phy_probe - probe and init a PHY device * @dev: device to probe and init @@ -1129,6 +1210,20 @@ .driver = {.owner= THIS_MODULE, }, }; +static struct phy_driver gen10g_driver = { + .phy_id = 0xffffffff, + .phy_id_mask = 0xffffffff, + .name = "Generic 10G PHY", + .config_init = gen10g_config_init, + .features = 0, + .config_aneg = gen10g_config_aneg, + .read_status = gen10g_read_status, + .suspend = gen10g_suspend, + .resume = gen10g_resume, + .driver = {.owner = THIS_MODULE, }, +}; + + static int __init phy_init(void) { int rc; @@ -1139,13 +1234,25 @@ rc = phy_driver_register(&genphy_driver); if (rc) - mdio_bus_exit(); + goto genphy_register_failed; + + rc = phy_driver_register(&gen10g_driver); + if (rc) + goto gen10g_register_failed; + + return rc; + +gen10g_register_failed: + phy_driver_unregister(&genphy_driver); +genphy_register_failed: + mdio_bus_exit(); return rc; } static void __exit phy_exit(void) { + phy_driver_unregister(&gen10g_driver); phy_driver_unregister(&genphy_driver); mdio_bus_exit(); } --- linux-3.13.0.orig/drivers/net/phy/vitesse.c +++ linux-3.13.0/drivers/net/phy/vitesse.c @@ -74,7 +74,7 @@ MODULE_AUTHOR("Kriston Carson"); MODULE_LICENSE("GPL"); -static int vsc824x_add_skew(struct phy_device *phydev) +int vsc824x_add_skew(struct phy_device *phydev) { int err; int extcon; @@ -94,6 +94,7 @@ return err; } +EXPORT_SYMBOL(vsc824x_add_skew); static int vsc824x_config_init(struct phy_device *phydev) { --- linux-3.13.0.orig/drivers/net/ppp/ppp_deflate.c +++ linux-3.13.0/drivers/net/ppp/ppp_deflate.c @@ -246,7 +246,7 @@ /* * See if we managed to reduce the size of the packet. */ - if (olen < isize) { + if (olen < isize && olen <= osize) { state->stats.comp_bytes += olen; state->stats.comp_packets++; } else { --- linux-3.13.0.orig/drivers/net/ppp/ppp_generic.c +++ linux-3.13.0/drivers/net/ppp/ppp_generic.c @@ -601,7 +601,7 @@ if (file == ppp->owner) ppp_shutdown_interface(ppp); } - if (atomic_long_read(&file->f_count) <= 2) { + if (atomic_long_read(&file->f_count) < 2) { ppp_release(NULL, file); err = 0; } else --- linux-3.13.0.orig/drivers/net/ppp/pppoe.c +++ linux-3.13.0/drivers/net/ppp/pppoe.c @@ -675,7 +675,7 @@ po->chan.hdrlen = (sizeof(struct pppoe_hdr) + dev->hard_header_len); - po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); + po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2; po->chan.private = sk; po->chan.ops = &pppoe_chan_ops; --- linux-3.13.0.orig/drivers/net/ppp/pptp.c +++ linux-3.13.0/drivers/net/ppp/pptp.c @@ -281,7 +281,7 @@ nf_reset(skb); skb->ip_summed = CHECKSUM_NONE; - ip_select_ident(skb, &rt->dst, NULL); + ip_select_ident(skb, NULL); ip_send_check(iph); ip_local_out(skb); @@ -506,7 +506,9 @@ int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; - sp.sa_family = AF_PPPOX; + memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); + + sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_PPTP; sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; --- linux-3.13.0.orig/drivers/net/slip/slip.c +++ linux-3.13.0/drivers/net/slip/slip.c @@ -83,6 +83,7 @@ #include #include #include +#include #include "slip.h" #ifdef CONFIG_INET #include @@ -416,34 +417,44 @@ #endif } -/* - * Called by the driver when there's room for more data. If we have - * more packets to send, we send them here. - */ -static void slip_write_wakeup(struct tty_struct *tty) +/* Write out any remaining transmit buffer. Scheduled when tty is writable */ +static void slip_transmit(struct work_struct *work) { + struct slip *sl = container_of(work, struct slip, tx_work); int actual; - struct slip *sl = tty->disc_data; + spin_lock_bh(&sl->lock); /* First make sure we're connected. */ - if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) + if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) { + spin_unlock_bh(&sl->lock); return; + } - spin_lock(&sl->lock); if (sl->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - spin_unlock(&sl->lock); + clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); + spin_unlock_bh(&sl->lock); sl_unlock(sl); return; } - actual = tty->ops->write(tty, sl->xhead, sl->xleft); + actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; - spin_unlock(&sl->lock); + spin_unlock_bh(&sl->lock); +} + +/* + * Called by the driver when there's room for more data. + * Schedule the transmit. + */ +static void slip_write_wakeup(struct tty_struct *tty) +{ + struct slip *sl = tty->disc_data; + + schedule_work(&sl->tx_work); } static void sl_tx_timeout(struct net_device *dev) @@ -749,6 +760,7 @@ sl->magic = SLIP_MAGIC; sl->dev = dev; spin_lock_init(&sl->lock); + INIT_WORK(&sl->tx_work, slip_transmit); sl->mode = SL_MODE_DEFAULT; #ifdef CONFIG_SLIP_SMART /* initialize timer_list struct */ @@ -872,8 +884,12 @@ if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty) return; + spin_lock_bh(&sl->lock); tty->disc_data = NULL; sl->tty = NULL; + spin_unlock_bh(&sl->lock); + + flush_work(&sl->tx_work); /* VSV = very important to remove timers */ #ifdef CONFIG_SLIP_SMART --- linux-3.13.0.orig/drivers/net/slip/slip.h +++ linux-3.13.0/drivers/net/slip/slip.h @@ -53,6 +53,7 @@ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ spinlock_t lock; + struct work_struct tx_work; /* Flushes transmit buffer */ #ifdef SL_INCLUDE_CSLIP struct slcompress *slcomp; /* for header compression */ --- linux-3.13.0.orig/drivers/net/team/team.c +++ linux-3.13.0/drivers/net/team/team.c @@ -42,9 +42,7 @@ static struct team_port *team_port_get_rcu(const struct net_device *dev) { - struct team_port *port = rcu_dereference(dev->rx_handler_data); - - return team_port_exists(dev) ? port : NULL; + return rcu_dereference(dev->rx_handler_data); } static struct team_port *team_port_get_rtnl(const struct net_device *dev) @@ -629,6 +627,7 @@ static void team_notify_peers_work(struct work_struct *work) { struct team *team; + int val; team = container_of(work, struct team, notify_peers.dw.work); @@ -636,9 +635,14 @@ schedule_delayed_work(&team->notify_peers.dw, 0); return; } + val = atomic_dec_if_positive(&team->notify_peers.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); rtnl_unlock(); - if (!atomic_dec_and_test(&team->notify_peers.count_pending)) + if (val) schedule_delayed_work(&team->notify_peers.dw, msecs_to_jiffies(team->notify_peers.interval)); } @@ -647,7 +651,7 @@ { if (!team->notify_peers.count || !netif_running(team->dev)) return; - atomic_set(&team->notify_peers.count_pending, team->notify_peers.count); + atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); schedule_delayed_work(&team->notify_peers.dw, 0); } @@ -669,6 +673,7 @@ static void team_mcast_rejoin_work(struct work_struct *work) { struct team *team; + int val; team = container_of(work, struct team, mcast_rejoin.dw.work); @@ -676,9 +681,14 @@ schedule_delayed_work(&team->mcast_rejoin.dw, 0); return; } + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); rtnl_unlock(); - if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) + if (val) schedule_delayed_work(&team->mcast_rejoin.dw, msecs_to_jiffies(team->mcast_rejoin.interval)); } @@ -687,7 +697,7 @@ { if (!team->mcast_rejoin.count || !netif_running(team->dev)) return; - atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count); + atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); schedule_delayed_work(&team->mcast_rejoin.dw, 0); } @@ -1648,7 +1658,7 @@ } static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct @@ -1713,11 +1723,11 @@ if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) if (team->ops.port_change_dev_addr) team->ops.port_change_dev_addr(team, port); - rcu_read_unlock(); + mutex_unlock(&team->lock); return 0; } @@ -1732,6 +1742,7 @@ * to traverse list in reverse under rcu_read_lock */ mutex_lock(&team->lock); + team->port_mtu_change_allowed = true; list_for_each_entry(port, &team->port_list, list) { err = dev_set_mtu(port->dev, new_mtu); if (err) { @@ -1740,6 +1751,7 @@ goto unwind; } } + team->port_mtu_change_allowed = false; mutex_unlock(&team->lock); dev->mtu = new_mtu; @@ -1749,6 +1761,7 @@ unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) dev_set_mtu(port->dev, dev->mtu); + team->port_mtu_change_allowed = false; mutex_unlock(&team->lock); return err; @@ -1767,13 +1780,13 @@ for_each_possible_cpu(i) { p = per_cpu_ptr(team->pcpu_stats, i); do { - start = u64_stats_fetch_begin_bh(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; rx_multicast = p->rx_multicast; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -2853,7 +2866,9 @@ break; case NETDEV_CHANGEMTU: /* Forbid to change mtu of underlaying device */ - return NOTIFY_BAD; + if (!port->team->port_mtu_change_allowed) + return NOTIFY_BAD; + break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid to change type of underlaying device */ return NOTIFY_BAD; --- linux-3.13.0.orig/drivers/net/team/team_mode_loadbalance.c +++ linux-3.13.0/drivers/net/team/team_mode_loadbalance.c @@ -432,9 +432,9 @@ struct lb_stats tmp; do { - start = u64_stats_fetch_begin_bh(syncp); + start = u64_stats_fetch_begin_irq(syncp); tmp.tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry_bh(syncp, start)); + } while (u64_stats_fetch_retry_irq(syncp, start)); acc_stats->tx_bytes += tmp.tx_bytes; } --- linux-3.13.0.orig/drivers/net/tun.c +++ linux-3.13.0/drivers/net/tun.c @@ -65,6 +65,7 @@ #include #include #include +#include #include #include #include @@ -349,7 +350,7 @@ * hope the rxq no. may help here. */ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct tun_struct *tun = netdev_priv(dev); struct tun_flow_entry *e; @@ -359,7 +360,7 @@ rcu_read_lock(); numqueues = ACCESS_ONCE(tun->numqueues); - txq = skb_get_rxhash(skb); + txq = skb_get_hash(skb); if (txq) { e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); if (e) @@ -1104,6 +1105,8 @@ break; } + skb_reset_network_header(skb); + if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { pr_debug("GSO!\n"); switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -1115,6 +1118,8 @@ break; case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + if (skb->protocol == htons(ETH_P_IPV6)) + ipv6_proxy_select_ident(skb); break; default: tun->dev->stats.rx_frame_errors++; @@ -1144,10 +1149,9 @@ skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; } - skb_reset_network_header(skb); skb_probe_transport_header(skb, 0); - rxhash = skb_get_rxhash(skb); + rxhash = skb_get_hash(skb); netif_rx_ni(skb); tun->dev->stats.rx_packets++; @@ -1186,6 +1190,10 @@ struct tun_pi pi = { 0, skb->protocol }; ssize_t total = 0; int vlan_offset = 0, copied; + int vlan_hlen = 0; + + if (vlan_tx_tag_present(skb)) + vlan_hlen = VLAN_HLEN; if (!(tun->flags & TUN_NO_PI)) { if ((len -= sizeof(pi)) < 0) @@ -1237,7 +1245,8 @@ if (skb->ip_summed == CHECKSUM_PARTIAL) { gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - gso.csum_start = skb_checksum_start_offset(skb); + gso.csum_start = skb_checksum_start_offset(skb) + + vlan_hlen; gso.csum_offset = skb->csum_offset; } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; @@ -1250,10 +1259,9 @@ } copied = total; - total += skb->len; - if (!vlan_tx_tag_present(skb)) { - len = min_t(int, skb->len, len); - } else { + len = min_t(int, skb->len + vlan_hlen, len); + total += skb->len + vlan_hlen; + if (vlan_hlen) { int copy, ret; struct { __be16 h_vlan_proto; @@ -1264,8 +1272,6 @@ veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); - len = min_t(int, skb->len + VLAN_HLEN, len); - total += VLAN_HLEN; copy = min_t(int, vlan_offset, len); ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); @@ -1651,7 +1657,9 @@ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->features = dev->hw_features; - dev->vlan_features = dev->features; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, file, false); --- linux-3.13.0.orig/drivers/net/usb/asix_devices.c +++ linux-3.13.0/drivers/net/usb/asix_devices.c @@ -918,7 +918,8 @@ .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_reset, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | + FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, .tx_fixup = asix_tx_fixup, }; --- linux-3.13.0.orig/drivers/net/usb/ax88179_178a.c +++ linux-3.13.0/drivers/net/usb/ax88179_178a.c @@ -697,6 +697,7 @@ { struct usbnet *dev = netdev_priv(net); struct sockaddr *addr = p; + int ret; if (netif_running(net)) return -EBUSY; @@ -706,8 +707,12 @@ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); /* Set the MAC address */ - return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, + ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN, net->dev_addr); + if (ret < 0) + return ret; + + return 0; } static const struct net_device_ops ax88179_netdev_ops = { @@ -1030,20 +1035,12 @@ dev->mii.phy_id = 0x03; dev->mii.supports_gmii = 1; - if (usb_device_no_sg_constraint(dev->udev)) - dev->can_dma_sg = 1; - dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; - if (dev->can_dma_sg) { - dev->net->features |= NETIF_F_SG | NETIF_F_TSO; - dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO; - } - /* Enable checksum offload */ *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6; @@ -1119,6 +1116,10 @@ u16 hdr_off; u32 *pkt_hdr; + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + skb_trim(skb, skb->len - 4); memcpy(&rx_hdr, skb_tail_pointer(skb), 4); le32_to_cpus(&rx_hdr); @@ -1418,6 +1419,19 @@ .tx_fixup = ax88179_tx_fixup, }; +static const struct driver_info lenovo_info = { + .description = "Lenovo OneLinkDock Gigabit LAN", + .bind = ax88179_bind, + .unbind = ax88179_unbind, + .status = ax88179_status, + .link_reset = ax88179_link_reset, + .reset = ax88179_reset, + .stop = ax88179_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .rx_fixup = ax88179_rx_fixup, + .tx_fixup = ax88179_tx_fixup, +}; + static const struct usb_device_id products[] = { { /* ASIX AX88179 10/100/1000 */ @@ -1435,6 +1449,10 @@ /* Samsung USB Ethernet Adapter */ USB_DEVICE(0x04e8, 0xa100), .driver_info = (unsigned long)&samsung_info, +}, { + /* Lenovo OneLinkDock Gigabit LAN */ + USB_DEVICE(0x17ef, 0x304b), + .driver_info = (unsigned long)&lenovo_info, }, { }, }; --- linux-3.13.0.orig/drivers/net/usb/cdc_mbim.c +++ linux-3.13.0/drivers/net/usb/cdc_mbim.c @@ -120,6 +120,16 @@ cdc_ncm_unbind(dev, intf); } +/* verify that the ethernet protocol is IPv4 or IPv6 */ +static bool is_ip_proto(__be16 proto) +{ + switch (proto) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + return true; + } + return false; +} static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { @@ -128,6 +138,7 @@ struct cdc_ncm_ctx *ctx = info->ctx; __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); u16 tci = 0; + bool is_ip; u8 *c; if (!ctx) @@ -137,25 +148,32 @@ if (skb->len <= ETH_HLEN) goto error; + /* Some applications using e.g. packet sockets will + * bypass the VLAN acceleration and create tagged + * ethernet frames directly. We primarily look for + * the accelerated out-of-band tag, but fall back if + * required + */ + skb_reset_mac_header(skb); + if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN && + __vlan_get_tag(skb, &tci) == 0) { + is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); + skb_pull(skb, VLAN_ETH_HLEN); + } else { + is_ip = is_ip_proto(eth_hdr(skb)->h_proto); + skb_pull(skb, ETH_HLEN); + } + /* mapping VLANs to MBIM sessions: * no tag => IPS session <0> * 1 - 255 => IPS session * 256 - 511 => DSS session * 512 - 4095 => unsupported, drop */ - vlan_get_tag(skb, &tci); - switch (tci & 0x0f00) { case 0x0000: /* VLAN ID 0 - 255 */ - /* verify that datagram is IPv4 or IPv6 */ - skb_reset_mac_header(skb); - switch (eth_hdr(skb)->h_proto) { - case htons(ETH_P_IP): - case htons(ETH_P_IPV6): - break; - default: + if (!is_ip) goto error; - } c = (u8 *)&sign; c[3] = tci; break; @@ -169,7 +187,6 @@ "unsupported tci=0x%04x\n", tci); goto error; } - skb_pull(skb, ETH_HLEN); } spin_lock_bh(&ctx->mtx); @@ -204,17 +221,23 @@ return; /* need to send the NA on the VLAN dev, if any */ - if (tci) + rcu_read_lock(); + if (tci) { netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), tci); - else + if (!netdev) { + rcu_read_unlock(); + return; + } + } else { netdev = dev->net; - if (!netdev) - return; + } + dev_hold(netdev); + rcu_read_unlock(); in6_dev = in6_dev_get(netdev); if (!in6_dev) - return; + goto out; is_router = !!in6_dev->cnf.forwarding; in6_dev_put(in6_dev); @@ -224,6 +247,8 @@ true /* solicited */, false /* override */, true /* inc_opt */); +out: + dev_put(netdev); } static bool is_neigh_solicit(u8 *buf, size_t len) --- linux-3.13.0.orig/drivers/net/usb/cdc_ncm.c +++ linux-3.13.0/drivers/net/usb/cdc_ncm.c @@ -69,7 +69,6 @@ static int cdc_ncm_setup(struct usbnet *dev) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - struct usb_cdc_ncm_ntb_parameters ncm_parm; u32 val; u8 flags; u8 iface_no; @@ -83,22 +82,22 @@ err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, USB_TYPE_CLASS | USB_DIR_IN |USB_RECIP_INTERFACE, - 0, iface_no, &ncm_parm, - sizeof(ncm_parm)); + 0, iface_no, &ctx->ncm_parm, + sizeof(ctx->ncm_parm)); if (err < 0) { dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); return err; /* GET_NTB_PARAMETERS is required */ } /* read correct set of parameters according to device mode */ - ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize); - ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize); - ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder); - ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor); - ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment); + ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); + ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); + ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); + ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); + ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); /* devices prior to NCM Errata shall set this field to zero */ - ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams); - ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported); + ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); + ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); /* there are some minor differences in NCM and MBIM defaults */ if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { @@ -147,7 +146,7 @@ } /* inform device about NTB input size changes */ - if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) { + if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, @@ -163,14 +162,6 @@ dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", CDC_NCM_NTB_MAX_SIZE_TX); ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; - - /* Adding a pad byte here simplifies the handling in - * cdc_ncm_fill_tx_frame, by making tx_max always - * represent the real skb max size. - */ - if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) - ctx->tx_max++; - } /* @@ -440,6 +431,10 @@ goto error2; } + /* initialize data interface */ + if (cdc_ncm_setup(dev)) + goto error2; + /* configure data interface */ temp = usb_set_interface(dev->udev, iface_no, data_altsetting); if (temp) { @@ -454,12 +449,6 @@ goto error2; } - /* initialize data interface */ - if (cdc_ncm_setup(dev)) { - dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n"); - goto error2; - } - usb_set_intfdata(ctx->data, dev); usb_set_intfdata(ctx->control, dev); @@ -476,6 +465,15 @@ dev->hard_mtu = ctx->tx_max; dev->rx_urb_size = ctx->rx_max; + /* cdc_ncm_setup will override dwNtbOutMaxSize if it is + * outside the sane range. Adding a pad byte here if necessary + * simplifies the handling in cdc_ncm_fill_tx_frame, making + * tx_max always represent the real skb max size. + */ + if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && + ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) + ctx->tx_max++; + return 0; error2: @@ -771,7 +769,7 @@ skb_out->len > CDC_NCM_MIN_TX_PKT) memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len); - else if ((skb_out->len % dev->maxpacket) == 0) + else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) *skb_put(skb_out, 1) = 0; /* force short packet */ /* set final frame length */ --- linux-3.13.0.orig/drivers/net/usb/cx82310_eth.c +++ linux-3.13.0/drivers/net/usb/cx82310_eth.c @@ -302,9 +302,18 @@ .tx_fixup = cx82310_tx_fixup, }; +#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ + USB_DEVICE_ID_MATCH_DEV_INFO, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bDeviceClass = (cl), \ + .bDeviceSubClass = (sc), \ + .bDeviceProtocol = (pr) + static const struct usb_device_id products[] = { { - USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), + USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), .driver_info = (unsigned long) &cx82310_info }, { }, --- linux-3.13.0.orig/drivers/net/usb/gl620a.c +++ linux-3.13.0/drivers/net/usb/gl620a.c @@ -86,6 +86,10 @@ u32 size; u32 count; + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + header = (struct gl_header *) skb->data; // get the packet count of the received skb --- linux-3.13.0.orig/drivers/net/usb/huawei_cdc_ncm.c +++ linux-3.13.0/drivers/net/usb/huawei_cdc_ncm.c @@ -84,12 +84,13 @@ ctx = drvstate->ctx; if (usbnet_dev->status) - /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 - * decimal (0x100)" + /* The wMaxCommand buffer must be big enough to hold + * any message from the modem. Experience has shown + * that some replies are more than 256 bytes long */ subdriver = usb_cdc_wdm_register(ctx->control, &usbnet_dev->status->desc, - 256, /* wMaxCommand */ + 1024, /* wMaxCommand */ huawei_cdc_ncm_wdm_manage_power); if (IS_ERR(subdriver)) { ret = PTR_ERR(subdriver); @@ -206,6 +207,9 @@ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), .driver_info = (unsigned long)&huawei_cdc_ncm_info, }, + { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16), + .driver_info = (unsigned long)&huawei_cdc_ncm_info, + }, /* Terminating entry */ { --- linux-3.13.0.orig/drivers/net/usb/mcs7830.c +++ linux-3.13.0/drivers/net/usb/mcs7830.c @@ -528,8 +528,9 @@ { u8 status; - if (skb->len == 0) { - dev_err(&dev->udev->dev, "unexpected empty rx frame\n"); + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) { + dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); return 0; } --- linux-3.13.0.orig/drivers/net/usb/net1080.c +++ linux-3.13.0/drivers/net/usb/net1080.c @@ -366,6 +366,10 @@ struct nc_trailer *trailer; u16 hdr_len, packet_len; + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + if (!(skb->len & 0x01)) { netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", skb->len, dev->net->hard_header_len, dev->hard_mtu, --- linux-3.13.0.orig/drivers/net/usb/plusb.c +++ linux-3.13.0/drivers/net/usb/plusb.c @@ -136,6 +136,11 @@ }, { USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ .driver_info = (unsigned long) &prolific_info, +}, { + USB_DEVICE(0x3923, 0x7825), /* National Instruments USB + * Host-to-Host Cable + */ + .driver_info = (unsigned long) &prolific_info, }, { }, // END --- linux-3.13.0.orig/drivers/net/usb/qmi_wwan.c +++ linux-3.13.0/drivers/net/usb/qmi_wwan.c @@ -80,10 +80,10 @@ { __be16 proto; - /* usbnet rx_complete guarantees that skb->len is at least - * hard_header_len, so we can inspect the dest address without - * checking skb->len - */ + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + switch (skb->data[0] & 0xf0) { case 0x40: proto = htons(ETH_P_IP); @@ -660,8 +660,25 @@ {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, + {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ + {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ + {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ + {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */ + {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */ + {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */ + {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */ + {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */ + {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */ + {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */ + {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */ + {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */ + {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */ + {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */ + {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */ + {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */ {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, @@ -717,19 +734,40 @@ {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ + {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */ + {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */ + {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ + {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ + {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ + {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ + {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ - {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ + {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ + {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ + {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ + {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */ + {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ + {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ --- linux-3.13.0.orig/drivers/net/usb/rndis_host.c +++ linux-3.13.0/drivers/net/usb/rndis_host.c @@ -494,6 +494,10 @@ */ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + /* peripheral may have batched packets to us... */ while (likely(skb->len)) { struct rndis_data_hdr *hdr = (void *)skb->data; --- linux-3.13.0.orig/drivers/net/usb/smsc75xx.c +++ linux-3.13.0/drivers/net/usb/smsc75xx.c @@ -2108,6 +2108,10 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + while (skb->len > 0) { u32 rx_cmd_a, rx_cmd_b, align_count, size; struct sk_buff *ax_skb; --- linux-3.13.0.orig/drivers/net/usb/smsc95xx.c +++ linux-3.13.0/drivers/net/usb/smsc95xx.c @@ -1725,6 +1725,10 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + while (skb->len > 0) { u32 header, align_count; struct sk_buff *ax_skb; --- linux-3.13.0.orig/drivers/net/usb/usbnet.c +++ linux-3.13.0/drivers/net/usb/usbnet.c @@ -543,17 +543,19 @@ } // else network stack removes extra byte if we forced a short packet - if (skb->len) { - /* all data was already cloned from skb inside the driver */ - if (dev->driver_info->flags & FLAG_MULTI_PACKET) - dev_kfree_skb_any(skb); - else - usbnet_skb_return(dev, skb); + /* all data was already cloned from skb inside the driver */ + if (dev->driver_info->flags & FLAG_MULTI_PACKET) + goto done; + + if (skb->len < ETH_HLEN) { + dev->net->stats.rx_errors++; + dev->net->stats.rx_length_errors++; + netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); + } else { + usbnet_skb_return(dev, skb); return; } - netif_dbg(dev, rx_err, dev->net, "drop\n"); - dev->net->stats.rx_errors++; done: skb_queue_tail(&dev->done, skb); } @@ -575,13 +577,6 @@ switch (urb_status) { /* success */ case 0: - if (skb->len < dev->net->hard_header_len) { - state = rx_cleanup; - dev->net->stats.rx_errors++; - dev->net->stats.rx_length_errors++; - netif_dbg(dev, rx_err, dev->net, - "rx length %d\n", skb->len); - } break; /* stalls need manual reset. this is rare ... except that @@ -758,14 +753,12 @@ // precondition: never called in_interrupt static void usbnet_terminate_urbs(struct usbnet *dev) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); DECLARE_WAITQUEUE(wait, current); int temp; /* ensure there are no more active urbs */ - add_wait_queue(&unlink_wakeup, &wait); + add_wait_queue(&dev->wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); - dev->wait = &unlink_wakeup; temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); @@ -779,15 +772,14 @@ "waited for %d urb completions\n", temp); } set_current_state(TASK_RUNNING); - dev->wait = NULL; - remove_wait_queue(&unlink_wakeup, &wait); + remove_wait_queue(&dev->wait, &wait); } int usbnet_stop (struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct driver_info *info = dev->driver_info; - int retval; + int retval, pm; clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue (net); @@ -797,6 +789,8 @@ net->stats.rx_packets, net->stats.tx_packets, net->stats.rx_errors, net->stats.tx_errors); + /* to not race resume */ + pm = usb_autopm_get_interface(dev->intf); /* allow minidriver to stop correctly (wireless devices to turn off * radio etc) */ if (info->stop) { @@ -823,6 +817,9 @@ dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); + if (!pm) + usb_autopm_put_interface(dev->intf); + if (info->manage_power && !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) info->manage_power(dev, 0); @@ -1443,11 +1440,12 @@ /* restart RX again after disabling due to high error rate */ clear_bit(EVENT_RX_KILL, &dev->flags); - // waiting for all pending urbs to complete? - if (dev->wait) { - if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { - wake_up (dev->wait); - } + /* waiting for all pending urbs to complete? + * only then can we forgo submitting anew + */ + if (waitqueue_active(&dev->wait)) { + if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) + wake_up_all(&dev->wait); // or are we maybe short a few urbs? } else if (netif_running (dev->net) && @@ -1586,6 +1584,7 @@ dev->driver_name = name; dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); + init_waitqueue_head(&dev->wait); skb_queue_head_init (&dev->rxq); skb_queue_head_init (&dev->txq); skb_queue_head_init (&dev->done); @@ -1797,9 +1796,10 @@ spin_unlock_irq(&dev->txq.lock); if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { - /* handle remote wakeup ASAP */ - if (!dev->wait && - netif_device_present(dev->net) && + /* handle remote wakeup ASAP + * we cannot race against stop + */ + if (netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) rx_alloc_submit(dev, GFP_NOIO); --- linux-3.13.0.orig/drivers/net/veth.c +++ linux-3.13.0/drivers/net/veth.c @@ -155,10 +155,10 @@ unsigned int start; do { - start = u64_stats_fetch_begin_bh(&stats->syncp); + start = u64_stats_fetch_begin_irq(&stats->syncp); packets = stats->packets; bytes = stats->bytes; - } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); result->packets += packets; result->bytes += bytes; } @@ -285,7 +285,8 @@ dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; dev->features |= VETH_FEATURES; - dev->vlan_features = dev->features; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); dev->destructor = veth_dev_free; dev->hw_features = VETH_FEATURES; --- linux-3.13.0.orig/drivers/net/virtio_net.c +++ linux-3.13.0/drivers/net/virtio_net.c @@ -633,8 +633,7 @@ } while (rq->vq->num_free); if (unlikely(rq->num > rq->max)) rq->max = rq->num; - if (unlikely(!virtqueue_kick(rq->vq))) - return false; + virtqueue_kick(rq->vq); return !oom; } @@ -840,7 +839,7 @@ err = xmit_skb(sq, skb); /* This should not happen! */ - if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) { + if (unlikely(err)) { dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, @@ -849,6 +848,7 @@ kfree_skb(skb); return NETDEV_TX_OK; } + virtqueue_kick(sq->vq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); @@ -968,16 +968,16 @@ u64 tpackets, tbytes, rpackets, rbytes; do { - start = u64_stats_fetch_begin_bh(&stats->tx_syncp); + start = u64_stats_fetch_begin_irq(&stats->tx_syncp); tpackets = stats->tx_packets; tbytes = stats->tx_bytes; - } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); do { - start = u64_stats_fetch_begin_bh(&stats->rx_syncp); + start = u64_stats_fetch_begin_irq(&stats->rx_syncp); rpackets = stats->rx_packets; rbytes = stats->rx_bytes; - } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); + } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; @@ -1645,7 +1645,8 @@ /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) --- linux-3.13.0.orig/drivers/net/vmxnet3/vmxnet3_drv.c +++ linux-3.13.0/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1760,11 +1760,20 @@ { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) - vmxnet3_disable_all_intrs(adapter); - - vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); - vmxnet3_enable_all_intrs(adapter); + switch (adapter->intr.type) { +#ifdef CONFIG_PCI_MSI + case VMXNET3_IT_MSIX: { + int i; + for (i = 0; i < adapter->num_rx_queues; i++) + vmxnet3_msix_rx(0, &adapter->rx_queue[i]); + break; + } +#endif + case VMXNET3_IT_MSI: + default: + vmxnet3_intr(0, adapter->netdev); + break; + } } #endif /* CONFIG_NET_POLL_CONTROLLER */ --- linux-3.13.0.orig/drivers/net/vxlan.c +++ linux-3.13.0/drivers/net/vxlan.c @@ -40,6 +40,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_IPV6) #include #include @@ -278,13 +279,15 @@ return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); } -/* Find VXLAN socket based on network namespace and UDP port */ -static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) +/* Find VXLAN socket based on network namespace, address family and UDP port */ +static struct vxlan_sock *vxlan_find_sock(struct net *net, + sa_family_t family, __be16 port) { struct vxlan_sock *vs; hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { - if (inet_sk(vs->sock->sk)->inet_sport == port) + if (inet_sk(vs->sock->sk)->inet_sport == port && + inet_sk(vs->sock->sk)->sk.sk_family == family) return vs; } return NULL; @@ -303,11 +306,12 @@ } /* Look up VNI in a per net namespace table */ -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) +static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, + sa_family_t family, __be16 port) { struct vxlan_sock *vs; - vs = vxlan_find_sock(net, port); + vs = vxlan_find_sock(net, family, port); if (!vs) return NULL; @@ -554,13 +558,104 @@ return 1; } +static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff *skb) +{ + struct sk_buff *p, **pp = NULL; + struct vxlanhdr *vh, *vh2; + struct ethhdr *eh, *eh2; + unsigned int hlen, off_vx, off_eth; + const struct packet_offload *ptype; + __be16 type; + int flush = 1; + + off_vx = skb_gro_offset(skb); + hlen = off_vx + sizeof(*vh); + vh = skb_gro_header_fast(skb, off_vx); + if (skb_gro_header_hard(skb, hlen)) { + vh = skb_gro_header_slow(skb, hlen, off_vx); + if (unlikely(!vh)) + goto out; + } + skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ + + off_eth = skb_gro_offset(skb); + hlen = off_eth + sizeof(*eh); + eh = skb_gro_header_fast(skb, off_eth); + if (skb_gro_header_hard(skb, hlen)) { + eh = skb_gro_header_slow(skb, hlen, off_eth); + if (unlikely(!eh)) + goto out; + } + + flush = 0; + + for (p = *head; p; p = p->next) { + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + vh2 = (struct vxlanhdr *)(p->data + off_vx); + eh2 = (struct ethhdr *)(p->data + off_eth); + if (vh->vx_vni != vh2->vx_vni || compare_ether_header(eh, eh2)) { + NAPI_GRO_CB(p)->same_flow = 0; + continue; + } + } + + type = eh->h_proto; + + rcu_read_lock(); + ptype = gro_find_receive_by_type(type); + if (ptype == NULL) { + flush = 1; + goto out_unlock; + } + + skb_gro_pull(skb, sizeof(*eh)); /* pull inner eth header */ + pp = ptype->callbacks.gro_receive(head, skb); + +out_unlock: + rcu_read_unlock(); +out: + NAPI_GRO_CB(skb)->flush |= flush; + + return pp; +} + +static int vxlan_gro_complete(struct sk_buff *skb, int nhoff) +{ + struct ethhdr *eh; + struct packet_offload *ptype; + __be16 type; + int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); + int err = -ENOSYS; + + eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); + type = eh->h_proto; + + rcu_read_lock(); + ptype = gro_find_complete_by_type(type); + if (ptype != NULL) + err = ptype->callbacks.gro_complete(skb, nhoff + vxlan_len); + + rcu_read_unlock(); + return err; +} + /* Notify netdevs that UDP port started listening */ -static void vxlan_notify_add_rx_port(struct sock *sk) +static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) { struct net_device *dev; + struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); sa_family_t sa_family = sk->sk_family; __be16 port = inet_sk(sk)->inet_sport; + int err; + + if (sa_family == AF_INET) { + err = udp_add_offload(&vs->udp_offloads); + if (err) + pr_warn("vxlan: udp_add_offload failed with status %d\n", err); + } rcu_read_lock(); for_each_netdev_rcu(net, dev) { @@ -572,9 +667,10 @@ } /* Notify netdevs that UDP port is no more listening */ -static void vxlan_notify_del_rx_port(struct sock *sk) +static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) { struct net_device *dev; + struct sock *sk = vs->sock->sk; struct net *net = sock_net(sk); sa_family_t sa_family = sk->sk_family; __be16 port = inet_sk(sk)->inet_sport; @@ -586,6 +682,9 @@ port); } rcu_read_unlock(); + + if (sa_family == AF_INET) + udp_del_offload(&vs->udp_offloads); } /* Add new entry to forwarding table -- assumes lock held */ @@ -777,6 +876,9 @@ if (err) return err; + if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family) + return -EAFNOSUPPORT; + spin_lock_bh(&vxlan->hash_lock); err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, port, vni, ifindex, ndm->ndm_flags); @@ -949,7 +1051,7 @@ spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); rcu_assign_sk_user_data(vs->sock->sk, NULL); - vxlan_notify_del_rx_port(sk); + vxlan_notify_del_rx_port(vs); spin_unlock(&vn->sock_lock); queue_work(vxlan_wq, &vs->del_work); @@ -1047,6 +1149,15 @@ if (!vs) goto drop; + /* If the NIC driver gave us an encapsulated packet + * with the encapsulation mark, the device checksummed it + * for us. Otherwise force the upper layers to verify it. + */ + if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation) + skb->ip_summed = CHECKSUM_NONE; + + skb->encapsulation = 0; + vs->rcv(vs, skb, vxh->vx_vni); return 0; @@ -1110,8 +1221,8 @@ * leave the CHECKSUM_UNNECESSARY, the device checksummed it * for us. Otherwise force the upper layers to verify it. */ - if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation || - !(vxlan->dev->features & NETIF_F_RXCSUM)) + if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) || + !skb->encapsulation || !(vxlan->dev->features & NETIF_F_RXCSUM)) skb->ip_summed = CHECKSUM_NONE; skb->encapsulation = 0; @@ -1210,6 +1321,9 @@ neigh_release(n); + if (reply == NULL) + goto out; + skb_reset_mac_header(reply); __skb_pull(reply, skb_network_offset(reply)); reply->ip_summed = CHECKSUM_UNNECESSARY; @@ -1220,7 +1334,7 @@ } else if (vxlan->flags & VXLAN_F_L3MISS) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = tip, - .sa.sa_family = AF_INET, + .sin.sin_family = AF_INET, }; vxlan_ip_miss(dev, &ipa); @@ -1231,40 +1345,126 @@ } #if IS_ENABLED(CONFIG_IPV6) + +static struct sk_buff *vxlan_na_create(struct sk_buff *request, + struct neighbour *n, bool isrouter) +{ + struct net_device *dev = request->dev; + struct sk_buff *reply; + struct nd_msg *ns, *na; + struct ipv6hdr *pip6; + u8 *daddr; + int na_olen = 8; /* opt hdr + ETH_ALEN for target */ + int ns_olen; + int i, len; + + if (dev == NULL) + return NULL; + + len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + + sizeof(*na) + na_olen + dev->needed_tailroom; + reply = alloc_skb(len, GFP_ATOMIC); + if (reply == NULL) + return NULL; + + reply->protocol = htons(ETH_P_IPV6); + reply->dev = dev; + skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); + skb_push(reply, sizeof(struct ethhdr)); + skb_set_mac_header(reply, 0); + + ns = (struct nd_msg *)skb_transport_header(request); + + daddr = eth_hdr(request)->h_source; + ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); + for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { + if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { + daddr = ns->opt + i + sizeof(struct nd_opt_hdr); + break; + } + } + + /* Ethernet header */ + memcpy(eth_hdr(reply)->h_dest, daddr, ETH_ALEN); + memcpy(eth_hdr(reply)->h_source, n->ha, ETH_ALEN); + eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); + reply->protocol = htons(ETH_P_IPV6); + + skb_pull(reply, sizeof(struct ethhdr)); + skb_set_network_header(reply, 0); + skb_put(reply, sizeof(struct ipv6hdr)); + + /* IPv6 header */ + + pip6 = ipv6_hdr(reply); + memset(pip6, 0, sizeof(struct ipv6hdr)); + pip6->version = 6; + pip6->priority = ipv6_hdr(request)->priority; + pip6->nexthdr = IPPROTO_ICMPV6; + pip6->hop_limit = 255; + pip6->daddr = ipv6_hdr(request)->saddr; + pip6->saddr = *(struct in6_addr *)n->primary_key; + + skb_pull(reply, sizeof(struct ipv6hdr)); + skb_set_transport_header(reply, 0); + + na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); + + /* Neighbor Advertisement */ + memset(na, 0, sizeof(*na)+na_olen); + na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; + na->icmph.icmp6_router = isrouter; + na->icmph.icmp6_override = 1; + na->icmph.icmp6_solicited = 1; + na->target = ns->target; + memcpy(&na->opt[2], n->ha, ETH_ALEN); + na->opt[0] = ND_OPT_TARGET_LL_ADDR; + na->opt[1] = na_olen >> 3; + + na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, + &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, + csum_partial(na, sizeof(*na)+na_olen, 0)); + + pip6->payload_len = htons(sizeof(*na)+na_olen); + + skb_push(reply, sizeof(struct ipv6hdr)); + + reply->ip_summed = CHECKSUM_UNNECESSARY; + + return reply; +} + static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); - struct neighbour *n; - union vxlan_addr ipa; + struct nd_msg *msg; const struct ipv6hdr *iphdr; const struct in6_addr *saddr, *daddr; - struct nd_msg *msg; - struct inet6_dev *in6_dev = NULL; + struct neighbour *n; + struct inet6_dev *in6_dev; in6_dev = __in6_dev_get(dev); if (!in6_dev) goto out; - if (!pskb_may_pull(skb, skb->len)) - goto out; - iphdr = ipv6_hdr(skb); saddr = &iphdr->saddr; daddr = &iphdr->daddr; - if (ipv6_addr_loopback(daddr) || - ipv6_addr_is_multicast(daddr)) - goto out; - msg = (struct nd_msg *)skb_transport_header(skb); if (msg->icmph.icmp6_code != 0 || msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) goto out; - n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev); + if (ipv6_addr_loopback(daddr) || + ipv6_addr_is_multicast(&msg->target)) + goto out; + + n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); if (n) { struct vxlan_fdb *f; + struct sk_buff *reply; if (!(n->nud_state & NUD_CONNECTED)) { neigh_release(n); @@ -1278,13 +1478,23 @@ goto out; } - ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target, - !!in6_dev->cnf.forwarding, - true, false, false); + reply = vxlan_na_create(skb, n, + !!(f ? f->flags & NTF_ROUTER : 0)); + neigh_release(n); + + if (reply == NULL) + goto out; + + if (netif_rx_ni(reply) == NET_RX_DROP) + dev->stats.rx_dropped++; + } else if (vxlan->flags & VXLAN_F_L3MISS) { - ipa.sin6.sin6_addr = *daddr; - ipa.sa.sa_family = AF_INET6; + union vxlan_addr ipa = { + .sin6.sin6_addr = msg->target, + .sin6.sin6_family = AF_INET6, + }; + vxlan_ip_miss(dev, &ipa); } @@ -1315,7 +1525,7 @@ if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { union vxlan_addr ipa = { .sin.sin_addr.s_addr = pip->daddr, - .sa.sa_family = AF_INET, + .sin.sin_family = AF_INET, }; vxlan_ip_miss(dev, &ipa); @@ -1336,7 +1546,7 @@ if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { union vxlan_addr ipa = { .sin6.sin6_addr = pip6->daddr, - .sa.sa_family = AF_INET6, + .sin6.sin6_family = AF_INET6, }; vxlan_ip_miss(dev, &ipa); @@ -1390,7 +1600,7 @@ unsigned int range = (port_max - port_min) + 1; u32 hash; - hash = skb_get_rxhash(skb); + hash = skb_get_hash(skb); if (!hash) hash = jhash(skb->data, 2 * ETH_ALEN, (__force u32) skb->protocol); @@ -1576,6 +1786,8 @@ struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; + struct net_device *dev = skb->dev; + int len = skb->len; skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; @@ -1597,16 +1809,16 @@ u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; - tx_stats->tx_bytes += skb->len; + tx_stats->tx_bytes += len; u64_stats_update_end(&tx_stats->syncp); if (netif_rx(skb) == NET_RX_SUCCESS) { u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; - rx_stats->rx_bytes += skb->len; + rx_stats->rx_bytes += len; u64_stats_update_end(&rx_stats->syncp); } else { - skb->dev->stats.rx_dropped++; + dev->stats.rx_dropped++; } } @@ -1677,7 +1889,8 @@ struct vxlan_dev *dst_vxlan; ip_rt_put(rt); - dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); + dst_vxlan = vxlan_find_vni(dev_net(dev), vni, + dst->sa.sa_family, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@ -1730,7 +1943,8 @@ struct vxlan_dev *dst_vxlan; dst_release(ndst); - dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); + dst_vxlan = vxlan_find_vni(dev_net(dev), vni, + dst->sa.sa_family, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@ -1781,7 +1995,8 @@ return arp_reduce(dev, skb); #if IS_ENABLED(CONFIG_IPV6) else if (ntohs(eth->h_proto) == ETH_P_IPV6 && - skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) && + pskb_may_pull(skb, sizeof(struct ipv6hdr) + + sizeof(struct nd_msg)) && ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { struct nd_msg *msg; @@ -1790,6 +2005,7 @@ msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) return neigh_reduce(dev, skb); } + eth = eth_hdr(skb); #endif } @@ -1879,6 +2095,7 @@ { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); + bool ipv6 = vxlan->flags & VXLAN_F_IPV6; struct vxlan_sock *vs; int i; @@ -1894,10 +2111,10 @@ spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port); - if (vs) { + vs = vxlan_find_sock(dev_net(dev), ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port); + if (vs && atomic_add_unless(&vs->refcnt, 1, 0)) { /* If we have a socket with same port already, reuse it */ - atomic_inc(&vs->refcnt); vxlan_vs_add_dev(vs, vxlan); } else { /* otherwise make new socket outside of RTNL */ @@ -2058,9 +2275,9 @@ eth_hw_addr_random(dev); ether_setup(dev); if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) - dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM; + dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; else - dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; + dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; dev->netdev_ops = &vxlan_netdev_ops; dev->destructor = free_netdev; @@ -2278,7 +2495,7 @@ struct sock *sk; unsigned int h; - vs = kmalloc(sizeof(*vs), GFP_KERNEL); + vs = kzalloc(sizeof(*vs), GFP_KERNEL); if (!vs) return ERR_PTR(-ENOMEM); @@ -2303,9 +2520,14 @@ vs->data = data; rcu_assign_sk_user_data(vs->sock->sk, vs); + /* Initialize the vxlan udp offloads structure */ + vs->udp_offloads.port = port; + vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive; + vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete; + spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); - vxlan_notify_add_rx_port(sk); + vxlan_notify_add_rx_port(vs); spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ @@ -2336,13 +2558,10 @@ return vs; spin_lock(&vn->sock_lock); - vs = vxlan_find_sock(net, port); - if (vs) { - if (vs->rcv == rcv) - atomic_inc(&vs->refcnt); - else + vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); + if (vs && ((vs->rcv != rcv) || + !atomic_add_unless(&vs->refcnt, 1, 0))) vs = ERR_PTR(-EBUSY); - } spin_unlock(&vn->sock_lock); if (!vs) @@ -2386,9 +2605,10 @@ vni = nla_get_u32(data[IFLA_VXLAN_ID]); dst->remote_vni = vni; + /* Unless IPv6 is explicitly requested, assume IPv4 */ + dst->remote_ip.sa.sa_family = AF_INET; if (data[IFLA_VXLAN_GROUP]) { dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]); - dst->remote_ip.sa.sa_family = AF_INET; } else if (data[IFLA_VXLAN_GROUP6]) { if (!IS_ENABLED(CONFIG_IPV6)) return -EPFNOSUPPORT; @@ -2437,8 +2657,7 @@ if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); - /* update header length based on lower device */ - dev->hard_header_len = lowerdev->hard_header_len + + dev->needed_headroom = lowerdev->hard_header_len + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); } else if (use_ipv6) vxlan->flags |= VXLAN_F_IPV6; @@ -2482,7 +2701,8 @@ if (data[IFLA_VXLAN_PORT]) vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); - if (vxlan_find_vni(net, vni, vxlan->dst_port)) { + if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, + vxlan->dst_port)) { pr_info("duplicate VNI %u\n", vni); return -EEXIST; } --- linux-3.13.0.orig/drivers/net/wireless/ath/ar5523/ar5523.c +++ linux-3.13.0/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1091,7 +1091,8 @@ return ret; } -static void ar5523_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct ar5523 *ar = hw->priv; @@ -1765,7 +1766,7 @@ AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */ AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */ AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */ - AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108 + AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108 (CyberTAN Technology) */ AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */ AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */ --- linux-3.13.0.orig/drivers/net/wireless/ath/ath10k/mac.c +++ linux-3.13.0/drivers/net/wireless/ath/ath10k/mac.c @@ -2939,7 +2939,8 @@ return ret; } -static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct ath10k *ar = hw->priv; bool skip; --- linux-3.13.0.orig/drivers/net/wireless/ath/ath5k/qcu.c +++ linux-3.13.0/drivers/net/wireless/ath/ath5k/qcu.c @@ -225,13 +225,7 @@ } else { switch (queue_type) { case AR5K_TX_QUEUE_DATA: - for (queue = AR5K_TX_QUEUE_ID_DATA_MIN; - ah->ah_txq[queue].tqi_type != - AR5K_TX_QUEUE_INACTIVE; queue++) { - - if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) - return -EINVAL; - } + queue = queue_info->tqi_subtype; break; case AR5K_TX_QUEUE_UAPSD: queue = AR5K_TX_QUEUE_ID_UAPSD; --- linux-3.13.0.orig/drivers/net/wireless/ath/ath5k/reset.c +++ linux-3.13.0/drivers/net/wireless/ath/ath5k/reset.c @@ -478,7 +478,7 @@ regval = ioread32(reg); iowrite32(regval | val, reg); regval = ioread32(reg); - usleep_range(100, 150); + udelay(100); /* NB: should be atomic */ /* Bring BB/MAC out of reset */ iowrite32(regval & ~val, reg); --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -646,6 +646,19 @@ else ah->enabled_cals &= ~TX_CL_CAL; } + + if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) { + if (ah->is_clk_25mhz) { + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); + REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); + } else { + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); + REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); + } + udelay(100); + } } static void ar9003_hw_prog_ini(struct ath_hw *ah, --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h @@ -56,7 +56,7 @@ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, - {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, + {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, @@ -95,7 +95,7 @@ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, - {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, + {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa}, {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, }; --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -34,6 +34,10 @@ module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444); MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); +static int ath9k_ps_enable; +module_param_named(ps_enable, ath9k_ps_enable, int, 0444); +MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); + #define CHAN2G(_freq, _idx) { \ .center_freq = (_freq), \ .hw_value = (_idx), \ @@ -725,12 +729,14 @@ IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_HAS_RATE_CONTROL | IEEE80211_HW_RX_INCLUDES_FCS | - IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_MFP_CAPABLE | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; + if (ath9k_ps_enable) + hw->flags |= IEEE80211_HW_SUPPORTS_PS; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1315,21 +1315,22 @@ struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_rate trate; + if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED)) + return; + mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); - if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { - memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); - ath9k_htc_setup_rate(priv, sta, &trate); - if (!ath9k_htc_send_rate_cmd(priv, &trate)) - ath_dbg(common, CONFIG, - "Supported rates for sta: %pM updated, rate caps: 0x%X\n", - sta->addr, be32_to_cpu(trate.capflags)); - else - ath_dbg(common, CONFIG, - "Unable to update supported rates for sta: %pM\n", - sta->addr); - } + memset(&trate, 0, sizeof(struct ath9k_htc_target_rate)); + ath9k_htc_setup_rate(priv, sta, &trate); + if (!ath9k_htc_send_rate_cmd(priv, &trate)) + ath_dbg(common, CONFIG, + "Supported rates for sta: %pM updated, rate caps: 0x%X\n", + sta->addr, be32_to_cpu(trate.capflags)); + else + ath_dbg(common, CONFIG, + "Unable to update supported rates for sta: %pM\n", + sta->addr); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/hw.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/hw.c @@ -923,19 +923,6 @@ udelay(RTC_PLL_SETTLE_DELAY); REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); - - if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { - if (ah->is_clk_25mhz) { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); - REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); - } else { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); - REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); - } - udelay(100); - } } static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, @@ -2608,13 +2595,6 @@ ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; - /* - * Fast channel change across bands is available - * only for AR9462 and AR9565. - */ - if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) - pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH; - return 0; } --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/hw.h +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/hw.h @@ -215,8 +215,8 @@ #define AH_WOW_BEACON_MISS BIT(3) enum ath_hw_txq_subtype { - ATH_TXQ_AC_BE = 0, - ATH_TXQ_AC_BK = 1, + ATH_TXQ_AC_BK = 0, + ATH_TXQ_AC_BE = 1, ATH_TXQ_AC_VI = 2, ATH_TXQ_AC_VO = 3, }; --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/init.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/init.c @@ -57,6 +57,10 @@ module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444); MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity"); +static int ath9k_ps_enable; +module_param_named(ps_enable, ath9k_ps_enable, int, 0444); +MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); + bool is_ath9k_unloaded; /* We use the hw_value as an index into our private channel structure */ @@ -890,13 +894,15 @@ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_PS_NULLFUNC_STACK | IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_RC_TABLE | IEEE80211_HW_SUPPORTS_HT_CCK_RATES; + if (ath9k_ps_enable) + hw->flags |= IEEE80211_HW_SUPPORTS_PS; + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/mac.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/mac.c @@ -311,14 +311,7 @@ q = ATH9K_NUM_TX_QUEUES - 3; break; case ATH9K_TX_QUEUE_DATA: - for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++) - if (ah->txq[q].tqi_type == - ATH9K_TX_QUEUE_INACTIVE) - break; - if (q == ATH9K_NUM_TX_QUEUES) { - ath_err(common, "No available TX queue\n"); - return -1; - } + q = qinfo->tqi_subtype; break; default: ath_err(common, "Invalid TX queue type: %u\n", type); --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/main.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/main.c @@ -1818,7 +1818,8 @@ mutex_unlock(&sc->mutex); } -static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/recv.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/recv.c @@ -733,11 +733,18 @@ return NULL; /* - * mark descriptor as zero-length and set the 'more' - * flag to ensure that both buffers get discarded + * Re-check previous descriptor, in case it has been filled + * in the mean time. */ - rs->rs_datalen = 0; - rs->rs_more = true; + ret = ath9k_hw_rxprocdesc(ah, ds, rs); + if (ret == -EINPROGRESS) { + /* + * mark descriptor as zero-length and set the 'more' + * flag to ensure that both buffers get discarded + */ + rs->rs_datalen = 0; + rs->rs_more = true; + } } list_del(&bf->list); @@ -851,20 +858,15 @@ enum ieee80211_band band; unsigned int i = 0; struct ath_softc __maybe_unused *sc = common->priv; + struct ath_hw *ah = sc->sc_ah; - band = hw->conf.chandef.chan->band; + band = ah->curchan->chan->band; sband = hw->wiphy->bands[band]; - switch (hw->conf.chandef.width) { - case NL80211_CHAN_WIDTH_5: + if (IS_CHAN_QUARTER_RATE(ah->curchan)) rxs->flag |= RX_FLAG_5MHZ; - break; - case NL80211_CHAN_WIDTH_10: + else if (IS_CHAN_HALF_RATE(ah->curchan)) rxs->flag |= RX_FLAG_10MHZ; - break; - default: - break; - } if (rx_stats->rs_rate & 0x80) { /* HT rate */ @@ -1171,32 +1173,32 @@ struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hdr *hdr; bool discard_current = sc->rx.discard_next; - int ret = 0; /* * Discard corrupt descriptors which are marked in * ath_get_next_rx_buf(). */ - sc->rx.discard_next = rx_stats->rs_more; if (discard_current) - return -EINVAL; + goto corrupt; + + sc->rx.discard_next = false; /* * Discard zero-length packets. */ if (!rx_stats->rs_datalen) { RX_STAT_INC(rx_len_err); - return -EINVAL; + goto corrupt; } - /* - * rs_status follows rs_datalen so if rs_datalen is too large - * we can take a hint that hardware corrupted it, so ignore - * those frames. - */ + /* + * rs_status follows rs_datalen so if rs_datalen is too large + * we can take a hint that hardware corrupted it, so ignore + * those frames. + */ if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { RX_STAT_INC(rx_len_err); - return -EINVAL; + goto corrupt; } /* Only use status info from the last fragment */ @@ -1210,10 +1212,8 @@ * This is different from the other corrupt descriptor * condition handled above. */ - if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) { - ret = -EINVAL; - goto exit; - } + if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) + goto corrupt; hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); @@ -1229,18 +1229,15 @@ if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) RX_STAT_INC(rx_spectral); - ret = -EINVAL; - goto exit; + return -EINVAL; } /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. */ - if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) { - ret = -EINVAL; - goto exit; - } + if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) + return -EINVAL; rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr); if (rx_stats->is_mybeacon) { @@ -1248,15 +1245,19 @@ ath_start_rx_poll(sc, 3); } - if (ath9k_process_rate(common, hw, rx_stats, rx_status)) { - ret =-EINVAL; - goto exit; - } + /* + * This shouldn't happen, but have a safety check anyway. + */ + if (WARN_ON(!ah->curchan)) + return -EINVAL; + + if (ath9k_process_rate(common, hw, rx_stats, rx_status)) + return -EINVAL; ath9k_process_rssi(common, hw, rx_stats, rx_status); - rx_status->band = hw->conf.chandef.chan->band; - rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = ah->curchan->chan->band; + rx_status->freq = ah->curchan->chan->center_freq; rx_status->antenna = rx_stats->rs_antenna; rx_status->flag |= RX_FLAG_MACTIME_END; @@ -1266,9 +1267,11 @@ sc->rx.num_pkts++; #endif -exit: - sc->rx.discard_next = false; - return ret; + return 0; + +corrupt: + sc->rx.discard_next = rx_stats->rs_more; + return -EINVAL; } static void ath9k_rx_skb_postprocess(struct ath_common *common, --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/reg.h +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/reg.h @@ -810,6 +810,9 @@ #define AR_SREV_VERSION_9565 0x2C0 #define AR_SREV_REVISION_9565_10 0 #define AR_SREV_VERSION_9550 0x400 +#define AR_SREV_VERSION_9531 0x500 +#define AR_SREV_REVISION_9531_10 0 +#define AR_SREV_REVISION_9531_11 1 #define AR_SREV_5416(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ @@ -938,11 +941,19 @@ #define AR_SREV_9580(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10)) - #define AR_SREV_9580_10(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9580_10)) +#define AR_SREV_9531(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531)) +#define AR_SREV_9531_10(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_10)) +#define AR_SREV_9531_11(_ah) \ + (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_11)) + /* NOTE: When adding chips newer than Peacock, add chip check here */ #define AR_SREV_9580_10_OR_LATER(_ah) \ (AR_SREV_9580(_ah)) --- linux-3.13.0.orig/drivers/net/wireless/ath/ath9k/xmit.c +++ linux-3.13.0/drivers/net/wireless/ath/ath9k/xmit.c @@ -904,6 +904,15 @@ tx_info = IEEE80211_SKB_CB(skb); tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; + + /* + * No aggregation session is running, but there may be frames + * from a previous session or a failed attempt in the queue. + * Send them out as normal data frames + */ + if (!tid->active) + tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU; + if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { bf->bf_state.bf_type = 0; return bf; @@ -1458,14 +1467,16 @@ for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - if (!tid->sched) - continue; - ac = tid->ac; txq = ac->txq; ath_txq_lock(sc, txq); + if (!tid->sched) { + ath_txq_unlock(sc, txq); + continue; + } + buffered = ath_tid_has_buffered(tid); tid->sched = false; @@ -1710,7 +1721,7 @@ ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); - qi.tqi_readyTime = (cur_conf->beacon_interval * + qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) * ATH_CABQ_READY_TIME) / 100; ath_txq_update(sc, qnum, &qi); @@ -2072,7 +2083,7 @@ ATH_TXBUF_RESET(bf); - if (tid) { + if (tid && ieee80211_is_data_present(hdr->frame_control)) { fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; seqno = tid->seq_next; hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); @@ -2195,14 +2206,15 @@ txq->stopped = true; } + if (txctl->an && ieee80211_is_data_present(hdr->frame_control)) + tid = ath_get_skb_tid(sc, txctl->an, skb); + if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { ath_txq_unlock(sc, txq); txq = sc->tx.uapsdq; ath_txq_lock(sc, txq); } else if (txctl->an && ieee80211_is_data_present(hdr->frame_control)) { - tid = ath_get_skb_tid(sc, txctl->an, skb); - WARN_ON(tid->ac->txq != txctl->txq); if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) --- linux-3.13.0.orig/drivers/net/wireless/ath/carl9170/carl9170.h +++ linux-3.13.0/drivers/net/wireless/ath/carl9170/carl9170.h @@ -256,6 +256,7 @@ atomic_t rx_work_urbs; atomic_t rx_pool_urbs; kernel_ulong_t features; + bool usb_ep_cmd_is_bulk; /* firmware settings */ struct completion fw_load_wait; --- linux-3.13.0.orig/drivers/net/wireless/ath/carl9170/main.c +++ linux-3.13.0/drivers/net/wireless/ath/carl9170/main.c @@ -1708,7 +1708,9 @@ return 0; } -static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void carl9170_op_flush(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 queues, bool drop) { struct ar9170 *ar = hw->priv; unsigned int vid; --- linux-3.13.0.orig/drivers/net/wireless/ath/carl9170/usb.c +++ linux-3.13.0/drivers/net/wireless/ath/carl9170/usb.c @@ -621,9 +621,16 @@ goto err_free; } - usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev, - AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4, - carl9170_usb_cmd_complete, ar, 1); + if (ar->usb_ep_cmd_is_bulk) + usb_fill_bulk_urb(urb, ar->udev, + usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD), + cmd, cmd->hdr.len + 4, + carl9170_usb_cmd_complete, ar); + else + usb_fill_int_urb(urb, ar->udev, + usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD), + cmd, cmd->hdr.len + 4, + carl9170_usb_cmd_complete, ar, 1); if (free_buf) urb->transfer_flags |= URB_FREE_BUFFER; @@ -1032,9 +1039,10 @@ static int carl9170_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_endpoint_descriptor *ep; struct ar9170 *ar; struct usb_device *udev; - int err; + int i, err; err = usb_reset_device(interface_to_usbdev(intf)); if (err) @@ -1050,6 +1058,21 @@ ar->intf = intf; ar->features = id->driver_info; + /* We need to remember the type of endpoint 4 because it differs + * between high- and full-speed configuration. The high-speed + * configuration specifies it as interrupt and the full-speed + * configuration as bulk endpoint. This information is required + * later when sending urbs to that endpoint. + */ + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) { + ep = &intf->cur_altsetting->endpoint[i].desc; + + if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD && + usb_endpoint_dir_out(ep) && + usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK) + ar->usb_ep_cmd_is_bulk = true; + } + usb_set_intfdata(intf, ar); SET_IEEE80211_DEV(ar->hw, &intf->dev); --- linux-3.13.0.orig/drivers/net/wireless/b43/b43.h +++ linux-3.13.0/drivers/net/wireless/b43/b43.h @@ -731,8 +731,6 @@ struct b43_request_fw_context { /* The device we are requesting the fw for. */ struct b43_wldev *dev; - /* a completion event structure needed if this call is asynchronous */ - struct completion fw_load_complete; /* a pointer to the firmware object */ const struct firmware *blob; /* The type of firmware to request. */ @@ -809,6 +807,8 @@ struct b43_wldev { struct b43_bus_dev *dev; struct b43_wl *wl; + /* a completion event structure needed if this call is asynchronous */ + struct completion fw_load_complete; /* The device initialization status. * Use b43_status() to query. */ --- linux-3.13.0.orig/drivers/net/wireless/b43/main.c +++ linux-3.13.0/drivers/net/wireless/b43/main.c @@ -2070,6 +2070,7 @@ static void b43_release_firmware(struct b43_wldev *dev) { + complete(&dev->fw_load_complete); b43_do_release_fw(&dev->fw.ucode); b43_do_release_fw(&dev->fw.pcm); b43_do_release_fw(&dev->fw.initvals); @@ -2095,7 +2096,7 @@ struct b43_request_fw_context *ctx = context; ctx->blob = firmware; - complete(&ctx->fw_load_complete); + complete(&ctx->dev->fw_load_complete); } int b43_do_request_fw(struct b43_request_fw_context *ctx, @@ -2142,7 +2143,7 @@ } if (async) { /* do this part asynchronously */ - init_completion(&ctx->fw_load_complete); + init_completion(&ctx->dev->fw_load_complete); err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname, ctx->dev->dev->dev, GFP_KERNEL, ctx, b43_fw_cb); @@ -2150,12 +2151,11 @@ pr_err("Unable to load firmware\n"); return err; } - /* stall here until fw ready */ - wait_for_completion(&ctx->fw_load_complete); + wait_for_completion(&ctx->dev->fw_load_complete); if (ctx->blob) goto fw_ready; /* On some ARM systems, the async request will fail, but the next sync - * request works. For this reason, we dall through here + * request works. For this reason, we fall through here */ } err = request_firmware(&ctx->blob, ctx->fwname, @@ -2424,6 +2424,7 @@ static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl); static void b43_one_core_detach(struct b43_bus_dev *dev); +static int b43_rng_init(struct b43_wl *wl); static void b43_request_firmware(struct work_struct *work) { @@ -2475,6 +2476,10 @@ goto err_one_core_detach; wl->hw_registred = true; b43_leds_register(wl->current_dev); + + /* Register HW RNG driver */ + b43_rng_init(wl); + goto out; err_one_core_detach: @@ -4636,9 +4641,6 @@ if (!dev || b43_status(dev) != B43_STAT_INITIALIZED) return; - /* Unregister HW RNG driver */ - b43_rng_exit(dev->wl); - b43_set_status(dev, B43_STAT_UNINIT); /* Stop the microcode PSM. */ @@ -4795,9 +4797,6 @@ b43_set_status(dev, B43_STAT_INITIALIZED); - /* Register HW RNG driver */ - b43_rng_init(dev->wl); - out: return err; @@ -5464,6 +5463,9 @@ b43_one_core_detach(wldev->dev); + /* Unregister HW RNG driver */ + b43_rng_exit(wl); + b43_leds_unregister(wl); ieee80211_free_hw(wl->hw); @@ -5541,6 +5543,9 @@ b43_one_core_detach(dev); + /* Unregister HW RNG driver */ + b43_rng_exit(wl); + if (list_empty(&wl->devlist)) { b43_leds_unregister(wl); /* Last core on the chip unregistered. --- linux-3.13.0.orig/drivers/net/wireless/b43/phy_n.c +++ linux-3.13.0/drivers/net/wireless/b43/phy_n.c @@ -5176,22 +5176,22 @@ int ch = new_channel->hw_value; u16 old_band_5ghz; - u32 tmp32; + u16 tmp16; old_band_5ghz = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { - tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); + tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) { b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); - tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); + tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR); + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4); b43_phy_mask(dev, B43_PHY_B_BBCFG, 0x3FFF); - b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); + b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16); } b43_chantab_phy_upload(dev, e); --- linux-3.13.0.orig/drivers/net/wireless/b43/xmit.c +++ linux-3.13.0/drivers/net/wireless/b43/xmit.c @@ -810,9 +810,13 @@ break; case B43_PHYTYPE_G: status.band = IEEE80211_BAND_2GHZ; - /* chanid is the radio channel cookie value as used - * to tune the radio. */ - status.freq = chanid + 2400; + /* Somewhere between 478.104 and 508.1084 firmware for G-PHY + * has been modified to be compatible with N-PHY and others. + */ + if (dev->fw.rev >= 508) + status.freq = ieee80211_channel_to_frequency(chanid, status.band); + else + status.freq = chanid + 2400; break; case B43_PHYTYPE_N: case B43_PHYTYPE_LP: @@ -821,10 +825,10 @@ * channel number in b43. */ if (chanstat & B43_RX_CHAN_5GHZ) { status.band = IEEE80211_BAND_5GHZ; - status.freq = b43_freq_to_channel_5ghz(chanid); + status.freq = b43_channel_to_freq_5ghz(chanid); } else { status.band = IEEE80211_BAND_2GHZ; - status.freq = b43_freq_to_channel_2ghz(chanid); + status.freq = b43_channel_to_freq_2ghz(chanid); } break; default: --- linux-3.13.0.orig/drivers/net/wireless/b43legacy/main.c +++ linux-3.13.0/drivers/net/wireless/b43legacy/main.c @@ -3919,6 +3919,7 @@ * as the ieee80211 unreg will destroy the workqueue. */ cancel_work_sync(&wldev->restart_work); cancel_work_sync(&wl->firmware_load); + complete(&wldev->fw_load_complete); B43legacy_WARN_ON(!wl); if (!wldev->fw.ucode) --- linux-3.13.0.orig/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ linux-3.13.0/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -426,6 +426,12 @@ bool blocked; int err; + if (!wl->ucode.bcm43xx_bomminor) { + err = brcms_request_fw(wl, wl->wlc->hw->d11core); + if (err) + return -ENOENT; + } + ieee80211_wake_queues(hw); spin_lock_bh(&wl->lock); blocked = brcms_rfkill_set_hw_state(wl); @@ -433,14 +439,6 @@ if (!blocked) wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); - if (!wl->ucode.bcm43xx_bomminor) { - err = brcms_request_fw(wl, wl->wlc->hw->d11core); - if (err) { - brcms_remove(wl->wlc->hw->d11core); - return -ENOENT; - } - } - spin_lock_bh(&wl->lock); /* avoid acknowledging frames before a non-monitor device is added */ wl->mute_tx = true; @@ -899,7 +897,8 @@ return result; } -static void brcms_ops_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void brcms_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct brcms_info *wl = hw->priv; int ret; --- linux-3.13.0.orig/drivers/net/wireless/cw1200/sta.c +++ linux-3.13.0/drivers/net/wireless/cw1200/sta.c @@ -935,7 +935,8 @@ return ret; } -void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct cw1200_common *priv = hw->priv; --- linux-3.13.0.orig/drivers/net/wireless/cw1200/sta.h +++ linux-3.13.0/drivers/net/wireless/cw1200/sta.h @@ -40,7 +40,8 @@ int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value); -void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop); +void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop); u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list); --- linux-3.13.0.orig/drivers/net/wireless/hostap/hostap_hw.c +++ linux-3.13.0/drivers/net/wireless/hostap/hostap_hw.c @@ -69,7 +69,7 @@ module_param_string(essid, essid, sizeof(essid), 0444); MODULE_PARM_DESC(essid, "Host AP's ESSID"); -static int iw_mode[MAX_PARM_DEVICES] = { IW_MODE_MASTER, DEF_INTS }; +static int iw_mode[MAX_PARM_DEVICES] = { IW_MODE_INFRA, DEF_INTS }; module_param_array(iw_mode, int, NULL, 0444); MODULE_PARM_DESC(iw_mode, "Initial operation mode"); --- linux-3.13.0.orig/drivers/net/wireless/iwlegacy/common.c +++ linux-3.13.0/drivers/net/wireless/iwlegacy/common.c @@ -4702,7 +4702,8 @@ } EXPORT_SYMBOL(il_mac_change_interface); -void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct il_priv *il = hw->priv; unsigned long timeout = jiffies + msecs_to_jiffies(500); --- linux-3.13.0.orig/drivers/net/wireless/iwlegacy/common.h +++ linux-3.13.0/drivers/net/wireless/iwlegacy/common.h @@ -1722,7 +1722,8 @@ struct ieee80211_vif *vif); int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype newtype, bool newp2p); -void il_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop); +void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop); int il_alloc_txq_mem(struct il_priv *il); void il_free_txq_mem(struct il_priv *il); --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/dvm/commands.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/dvm/commands.h @@ -966,21 +966,21 @@ /* WiFi queues mask */ -#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0)) -#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1)) -#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2)) -#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3)) -#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3)) +#define IWL_SCD_BK_MSK BIT(0) +#define IWL_SCD_BE_MSK BIT(1) +#define IWL_SCD_VI_MSK BIT(2) +#define IWL_SCD_VO_MSK BIT(3) +#define IWL_SCD_MGMT_MSK BIT(3) /* PAN queues mask */ -#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4)) -#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5)) -#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6)) -#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7)) -#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7)) -#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8)) +#define IWL_PAN_SCD_BK_MSK BIT(4) +#define IWL_PAN_SCD_BE_MSK BIT(5) +#define IWL_PAN_SCD_VI_MSK BIT(6) +#define IWL_PAN_SCD_VO_MSK BIT(7) +#define IWL_PAN_SCD_MGMT_MSK BIT(7) +#define IWL_PAN_SCD_MULTICAST_MSK BIT(8) -#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) +#define IWL_AGG_TX_QUEUE_MSK 0xffc00 #define IWL_DROP_ALL BIT(1) @@ -1005,12 +1005,17 @@ * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable. * 2: Dump all FIFO */ -struct iwl_txfifo_flush_cmd { +struct iwl_txfifo_flush_cmd_v3 { __le32 queue_control; __le16 flush_control; __le16 reserved; } __packed; +struct iwl_txfifo_flush_cmd_v2 { + __le16 queue_control; + __le16 flush_control; +} __packed; + /* * REPLY_WEP_KEY = 0x20 */ --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/dvm/dev.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -708,7 +708,6 @@ unsigned long reload_jiffies; int reload_count; bool ucode_loaded; - bool init_ucode_run; /* Don't run init uCode again */ u8 plcp_delta_threshold; --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/dvm/lib.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -138,38 +138,38 @@ */ int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk) { - struct iwl_txfifo_flush_cmd flush_cmd; - struct iwl_host_cmd cmd = { - .id = REPLY_TXFIFO_FLUSH, - .len = { sizeof(struct iwl_txfifo_flush_cmd), }, - .flags = CMD_SYNC, - .data = { &flush_cmd, }, + struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = { + .flush_control = cpu_to_le16(IWL_DROP_ALL), + }; + struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = { + .flush_control = cpu_to_le16(IWL_DROP_ALL), }; - memset(&flush_cmd, 0, sizeof(flush_cmd)); + u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | + IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK; - flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | - IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | - IWL_SCD_MGMT_MSK; if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))) - flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK | - IWL_PAN_SCD_VI_MSK | - IWL_PAN_SCD_BE_MSK | - IWL_PAN_SCD_BK_MSK | - IWL_PAN_SCD_MGMT_MSK | - IWL_PAN_SCD_MULTICAST_MSK; + queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK | + IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK | + IWL_PAN_SCD_MGMT_MSK | + IWL_PAN_SCD_MULTICAST_MSK; if (priv->nvm_data->sku_cap_11n_enable) - flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK; + queue_control |= IWL_AGG_TX_QUEUE_MSK; if (scd_q_msk) - flush_cmd.queue_control = cpu_to_le32(scd_q_msk); - - IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", - flush_cmd.queue_control); - flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL); + queue_control = scd_q_msk; - return iwl_dvm_send_cmd(priv, &cmd); + IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control); + flush_cmd_v3.queue_control = cpu_to_le32(queue_control); + flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control); + + if (IWL_UCODE_API(priv->fw->ucode_ver) > 2) + return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, + sizeof(flush_cmd_v3), + &flush_cmd_v3); + return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, + sizeof(flush_cmd_v2), &flush_cmd_v2); } void iwlagn_dev_txfifo_flush(struct iwl_priv *priv) --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -704,6 +704,24 @@ return ret; } +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + return false; + return true; +} + +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + return false; + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) + return true; + + /* disabled by default */ + return false; +} + static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, @@ -725,7 +743,7 @@ switch (action) { case IEEE80211_AMPDU_RX_START: - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + if (!iwl_enable_rx_ampdu(priv->cfg)) break; IWL_DEBUG_HT(priv, "start Rx\n"); ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); @@ -737,7 +755,7 @@ case IEEE80211_AMPDU_TX_START: if (!priv->trans->ops->txq_enable) break; - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + if (!iwl_enable_tx_ampdu(priv->cfg)) break; IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); @@ -1081,9 +1099,11 @@ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; } -static void iwlagn_mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + u32 scd_queues; mutex_lock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "enter\n"); @@ -1097,18 +1117,19 @@ goto done; } - /* - * mac80211 will not push any more frames for transmit - * until the flush is completed - */ - if (drop) { - IWL_DEBUG_MAC80211(priv, "send flush command\n"); - if (iwlagn_txfifo_flush(priv, 0)) { - IWL_ERR(priv, "flush request fail\n"); - goto done; - } + scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1; + scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | + BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); + + if (vif) + scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); + + IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues); + if (iwlagn_txfifo_flush(priv, scd_queues)) { + IWL_ERR(priv, "flush request fail\n"); + goto done; } - IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); + IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); iwl_trans_wait_tx_queue_empty(priv->trans); done: mutex_unlock(&priv->mutex); --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/dvm/main.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/dvm/main.c @@ -252,13 +252,17 @@ struct iwl_priv *priv = container_of(work, struct iwl_priv, bt_runtime_config); + mutex_lock(&priv->mutex); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; + goto out; /* dont send host command if rf-kill is on */ if (!iwl_is_ready_rf(priv)) - return; + goto out; + iwlagn_send_advance_bt_config(priv); +out: + mutex_unlock(&priv->mutex); } static void iwl_bg_bt_full_concurrency(struct work_struct *work) --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/dvm/sta.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -590,6 +590,7 @@ sizeof(priv->tid_data[sta_id][tid])); priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; priv->num_stations--; --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/dvm/tx.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1291,8 +1291,6 @@ struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; struct iwl_ht_agg *agg; struct sk_buff_head reclaimed_skbs; - struct ieee80211_tx_info *info; - struct ieee80211_hdr *hdr; struct sk_buff *skb; int sta_id; int tid; @@ -1379,22 +1377,28 @@ freed = 0; skb_queue_walk(&reclaimed_skbs, skb) { - hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(1); - info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); + memset(&info->status, 0, sizeof(info->status)); + /* Packet was transmitted successfully, failures come as single + * frames because before failing a frame the firmware transmits + * it without aggregation at least once. + */ + info->flags |= IEEE80211_TX_STAT_ACK; + if (freed == 1) { /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ info = IEEE80211_SKB_CB(skb); memset(&info->status, 0, sizeof(info->status)); - info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_len = ba_resp->txed_2_done; info->status.ampdu_len = ba_resp->txed; --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -424,9 +424,6 @@ if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) return 0; - if (priv->init_ucode_run) - return 0; - iwl_init_notification_wait(&priv->notif_wait, &calib_wait, calib_complete, ARRAY_SIZE(calib_complete), iwlagn_wait_calib, priv); @@ -446,8 +443,6 @@ */ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, UCODE_CALIB_TIMEOUT); - if (!ret) - priv->init_ucode_run = true; goto out; --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/iwl-7000.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -223,3 +223,4 @@ MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); +MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/iwl-config.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/iwl-config.h @@ -119,6 +119,8 @@ #define IWL_LONG_WD_TIMEOUT 10000 #define IWL_MAX_WD_TIMEOUT 120000 +#define IWL_DEFAULT_MAX_TX_POWER 22 + /* Antenna presence definitions */ #define ANT_NONE 0x0 #define ANT_A BIT(0) --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/iwl-drv.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -1247,7 +1247,7 @@ MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO); MODULE_PARM_DESC(11n_disable, - "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); + "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, int, S_IRUGO); MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/iwl-fw.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -88,7 +88,6 @@ * connection when going back to D0 * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version) * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version) - * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan. * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command * containing CAM (Continuous Active Mode) indication. @@ -110,7 +109,6 @@ IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API = BIT(14), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL = BIT(15), IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE = BIT(16), - IWL_UCODE_TLV_FLAGS_SCHED_SCAN = BIT(17), IWL_UCODE_TLV_FLAGS_STA_KEY_CMD = BIT(19), IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD = BIT(20), IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24), --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/iwl-modparams.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/iwl-modparams.h @@ -79,9 +79,12 @@ IWL_POWER_NUM }; -#define IWL_DISABLE_HT_ALL BIT(0) -#define IWL_DISABLE_HT_TXAGG BIT(1) -#define IWL_DISABLE_HT_RXAGG BIT(2) +enum iwl_disable_11n { + IWL_DISABLE_HT_ALL = BIT(0), + IWL_DISABLE_HT_TXAGG = BIT(1), + IWL_DISABLE_HT_RXAGG = BIT(2), + IWL_ENABLE_HT_TXAGG = BIT(3), +}; /** * struct iwl_mod_params @@ -90,7 +93,7 @@ * * @sw_crypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, - * use IWL_DISABLE_HT_* constants + * use IWL_[DIS,EN]ABLE_HT_* constants * @amsdu_size_8K: enable 8K amsdu size, default = 0 * @restart_fw: restart firmware, default = 1 * @wd_disable: enable stuck queue check, default = 0 --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -118,8 +118,6 @@ #define LAST_2GHZ_HT_PLUS 9 #define LAST_5GHZ_HT 161 -#define DEFAULT_MAX_TX_POWER 16 - /* rate data (static) */ static struct ieee80211_rate iwl_cfg80211_rates[] = { { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, @@ -182,6 +180,11 @@ for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) { ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); + + if (ch_idx >= NUM_2GHZ_CHANNELS && + !data->sku_cap_band_52GHz_enable) + ch_flags &= ~NVM_CHANNEL_VALID; + if (!(ch_flags & NVM_CHANNEL_VALID)) { IWL_DEBUG_EEPROM(dev, "Ch. %d Flags %x [%sGHz] - No traffic\n", @@ -237,7 +240,7 @@ * Default value - highest tx power value. max_power * is not used in mvm, and is used for backwards compatibility */ - channel->max_power = DEFAULT_MAX_TX_POWER; + channel->max_power = IWL_DEFAULT_MAX_TX_POWER; is_5ghz = channel->band == IEEE80211_BAND_5GHZ; IWL_DEBUG_EEPROM(dev, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/iwl-prph.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -274,4 +274,8 @@ /*********************** END TX SCHEDULER *************************************/ +/* Oscillator clock */ +#define OSC_CLK (0xa04068) +#define OSC_CLK_FORCE_CONTROL (0x8) + #endif /* __iwl_prph_h__ */ --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/iwl-trans.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -484,6 +484,7 @@ * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. * @pm_support: set to true in start_hw if link pm is supported + * @ltr_enabled: set to true if the LTR is enabled * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. * @dev_cmd_headroom: room needed for the transport's private use before the @@ -508,6 +509,7 @@ u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; bool pm_support; + bool ltr_enabled; /* The following fields are internal only */ struct kmem_cache *dev_cmd_pool; --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -871,8 +871,11 @@ lockdep_assert_held(&mvm->mutex); - /* Rssi update while not associated ?! */ - if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) + /* + * Rssi update while not associated - can happen since the statistics + * are handled asynchronously + */ + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) return; /* No BT - reports should be disabled */ --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -63,6 +63,7 @@ #include "mvm.h" #include "sta.h" #include "iwl-io.h" +#include "fw-error-dump.h" struct iwl_dbgfs_mvm_ctx { struct iwl_mvm *mvm; @@ -139,6 +140,48 @@ return ret; } +static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file) +{ + struct iwl_mvm *mvm = inode->i_private; + int ret; + + if (!mvm) + return -EINVAL; + + mutex_lock(&mvm->mutex); + if (!mvm->fw_error_dump) { + ret = -ENODATA; + goto out; + } + + file->private_data = mvm->fw_error_dump; + mvm->fw_error_dump = NULL; + ret = 0; + +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_fw_error_dump_file *dump_file = file->private_data; + + return simple_read_from_buffer(user_buf, count, ppos, + dump_file, + le32_to_cpu(dump_file->file_len)); +} + +static int iwl_dbgfs_fw_error_dump_release(struct inode *inode, + struct file *file) +{ + vfree(file->private_data); + + return 0; +} + static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1170,6 +1213,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain); +static const struct file_operations iwl_dbgfs_fw_error_dump_ops = { + .open = iwl_dbgfs_fw_error_dump_open, + .read = iwl_dbgfs_fw_error_dump_read, + .release = iwl_dbgfs_fw_error_dump_release, +}; + #ifdef CONFIG_PM_SLEEP MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram); #endif @@ -1189,6 +1238,7 @@ MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD) --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -66,13 +66,46 @@ /* Power Management Commands, Responses, Notifications */ +/** + * enum iwl_ltr_config_flags - masks for LTR config command flags + * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status + * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow + * memory access + * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR + * reg change + * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from + * D0 to D3 + * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register + * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register + * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD + */ +enum iwl_ltr_config_flags { + LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), + LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), + LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), + LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), + LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), + LTR_CFG_FLAG_SW_SET_LONG = BIT(5), + LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), +}; + +/** + * struct iwl_ltr_config_cmd - configures the LTR + * @flags: See %enum iwl_ltr_config_flags + */ +struct iwl_ltr_config_cmd { + __le32 flags; + __le32 static_long; + __le32 static_short; +} __packed; + /* Radio LP RX Energy Threshold measured in dBm */ #define POWER_LPRX_RSSI_THRESHOLD 75 #define POWER_LPRX_RSSI_THRESHOLD_MAX 94 #define POWER_LPRX_RSSI_THRESHOLD_MIN 30 /** - * enum iwl_scan_flags - masks for power table command flags + * enum iwl_power_flags - masks for power table command flags * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -141,6 +141,7 @@ /* Power - legacy power table command */ POWER_TABLE_CMD = 0x77, + LTR_CONFIG = 0xee, /* Thermal Throttling*/ REPLY_THERMAL_MNG_BACKOFF = 0x7e, --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h @@ -0,0 +1,106 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __fw_error_dump_h__ +#define __fw_error_dump_h__ + +#include + +#define IWL_FW_ERROR_DUMP_BARKER 0x14789632 + +/** + * enum iwl_fw_error_dump_type - types of data in the dump file + * @IWL_FW_ERROR_DUMP_SRAM: + * @IWL_FW_ERROR_DUMP_REG: + */ +enum iwl_fw_error_dump_type { + IWL_FW_ERROR_DUMP_SRAM = 0, + IWL_FW_ERROR_DUMP_REG = 1, + + IWL_FW_ERROR_DUMP_MAX, +}; + +/** + * struct iwl_fw_error_dump_data - data for one type + * @type: %enum iwl_fw_error_dump_type + * @len: the length starting from %data - must be a multiplier of 4. + * @data: the data itself padded to be a multiplier of 4. + */ +struct iwl_fw_error_dump_data { + __le32 type; + __le32 len; + __u8 data[]; +} __packed __aligned(4); + +/** + * struct iwl_fw_error_dump_file - the layout of the header of the file + * @barker: must be %IWL_FW_ERROR_DUMP_BARKER + * @file_len: the length of all the file starting from %barker + * @data: array of %struct iwl_fw_error_dump_data + */ +struct iwl_fw_error_dump_file { + __le32 barker; + __le32 file_len; + u8 data[0]; +} __packed __aligned(4); + +#endif /* __fw_error_dump_h__ */ --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/fw.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -431,6 +431,15 @@ goto error; } + if (mvm->trans->ltr_enabled) { + struct iwl_ltr_config_cmd cmd = { + .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), + }; + + WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, + sizeof(cmd), &cmd)); + } + ret = iwl_mvm_power_update_device_mode(mvm); if (ret) goto error; --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -566,13 +566,8 @@ if (vif->bss_conf.qos) cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); - /* Don't use cts to self as the fw doesn't support it currently. */ - if (vif->bss_conf.use_cts_prot) { + if (vif->bss_conf.use_cts_prot) cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8) - cmd->protection_flags |= - cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN); - } /* * I think that we should enable these 2 flags regardless the HT PROT @@ -1153,10 +1148,18 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { - u16 *id = _data; + struct iwl_missed_beacons_notif *missed_beacons = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (mvmvif->id == *id) + if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id)) + return; + + /* + * TODO: the threshold should be adjusted based on latency conditions, + * and/or in case of a CS flow on one of the other AP vifs. + */ + if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) > + IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); } @@ -1165,12 +1168,19 @@ struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data; - u16 id = (u16)le32_to_cpu(missed_beacons->mac_id); + struct iwl_missed_beacons_notif *mb = (void *)pkt->data; + + IWL_DEBUG_INFO(mvm, + "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", + le32_to_cpu(mb->mac_id), + le32_to_cpu(mb->consec_missed_beacons), + le32_to_cpu(mb->consec_missed_beacons_since_last_rx), + le32_to_cpu(mb->num_recvd_beacons), + le32_to_cpu(mb->num_expected_beacons)); ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_beacon_loss_iterator, - &id); + mb); return 0; } --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -179,7 +179,7 @@ !iwlwifi_mod_params.sw_crypto) hw->flags |= IEEE80211_HW_MFP_CAPABLE; - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) { + if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) { hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; hw->uapsd_queues = IWL_UAPSD_AC_INFO; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; @@ -246,13 +246,13 @@ else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) { + if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) { hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; /* we create the 802.11 header and zero length SSID IE. */ hw->wiphy->max_sched_scan_ie_len = - SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; + SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; } hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | @@ -321,6 +321,24 @@ ieee80211_free_txskb(hw, skb); } +static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + return false; + return true; +} + +static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + return false; + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) + return true; + + /* enabled by default */ + return true; +} + static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, @@ -340,7 +358,7 @@ switch (action) { case IEEE80211_AMPDU_RX_START: - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) { + if (!iwl_enable_rx_ampdu(mvm->cfg)) { ret = -EINVAL; break; } @@ -350,7 +368,7 @@ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); break; case IEEE80211_AMPDU_TX_START: - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) { + if (!iwl_enable_tx_ampdu(mvm->cfg)) { ret = -EINVAL; break; } @@ -385,9 +403,6 @@ mvmvif->uploaded = false; mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; - /* does this make sense at all? */ - mvmvif->color++; - spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); @@ -397,6 +412,14 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { +#ifdef CONFIG_IWLWIFI_DEBUGFS + static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL }; + + /* notify the userspace about the error we had */ + if (iwl_mvm_fw_error_dump(mvm)) + kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env); +#endif + iwl_trans_stop_device(mvm->trans); iwl_trans_stop_hw(mvm->trans, false); @@ -601,7 +624,7 @@ if (ret) goto out_remove_mac; - if (!mvm->bf_allowed_vif && + if (!mvm->bf_allowed_vif && false && vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){ mvm->bf_allowed_vif = mvmvif; @@ -802,7 +825,7 @@ memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN); - return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, + return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, sizeof(mcast_filter_cmd), &mcast_filter_cmd); } --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -68,6 +68,7 @@ #include #include #include +#include #include "iwl-op-mode.h" #include "iwl-trans.h" @@ -81,6 +82,7 @@ #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 +#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_BK = 0, @@ -151,7 +153,7 @@ IWL_POWER_SCHEME_LP }; -#define IWL_CONN_MAX_LISTEN_INTERVAL 70 +#define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ @@ -517,6 +519,9 @@ /* -1 for always, 0 for never, >0 for that many times */ s8 restart_fw; + void *fw_error_dump; + void *fw_error_sram; + u32 fw_error_sram_len; struct led_classdev led; @@ -600,7 +605,10 @@ struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx); void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm); -void iwl_mvm_dump_sram(struct iwl_mvm *mvm); +#ifdef CONFIG_IWLWIFI_DEBUGFS +bool iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); +void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm); +#endif u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/ops.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -61,6 +61,7 @@ * *****************************************************************************/ #include +#include #include #include "iwl-notif-wait.h" @@ -78,6 +79,7 @@ #include "iwl-prph.h" #include "rs.h" #include "fw-api-scan.h" +#include "fw-error-dump.h" #include "time-event.h" /* @@ -310,6 +312,7 @@ CMD(REPLY_BEACON_FILTERING_CMD), CMD(REPLY_THERMAL_MNG_BACKOFF), CMD(MAC_PM_POWER_TABLE), + CMD(LTR_CONFIG), CMD(BT_COEX_CI), }; #undef CMD @@ -470,6 +473,7 @@ out_unregister: ieee80211_unregister_hw(mvm->hw); + iwl_mvm_leds_exit(mvm); out_free: iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); @@ -491,6 +495,8 @@ ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); + vfree(mvm->fw_error_dump); + kfree(mvm->fw_error_sram); #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) kfree(mvm->d3_resume_sram); @@ -751,13 +757,66 @@ } } +#ifdef CONFIG_IWLWIFI_DEBUGFS +/* Returns true if a dump was created */ +bool iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) +{ + struct iwl_fw_error_dump_file *dump_file; + struct iwl_fw_error_dump_data *dump_data; + u32 file_len; + + lockdep_assert_held(&mvm->mutex); + + /* + * Don't create an error dump if we'v already got one, but return + * true to trigger a (re-?)notification to userspace that there's + * something there to read. + */ + if (mvm->fw_error_dump) + return true; + + if (!mvm->fw_error_sram) + return false; + + file_len = mvm->fw_error_sram_len + + sizeof(*dump_file) + + sizeof(*dump_data); + + dump_file = vmalloc(file_len); + if (!dump_file) + return false; + + mvm->fw_error_dump = dump_file; + + dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); + dump_file->file_len = cpu_to_le32(file_len); + dump_data = (void *)dump_file->data; + dump_data->type = IWL_FW_ERROR_DUMP_SRAM; + dump_data->len = cpu_to_le32(mvm->fw_error_sram_len); + + /* + * No need for lock since at the stage the FW isn't loaded. So it + * can't assert - we are the only one who can possibly be accessing + * mvm->fw_error_sram right now. + */ + memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len); + kfree(mvm->fw_error_sram); + mvm->fw_error_sram = NULL; + mvm->fw_error_sram_len = 0; + + return true; +} +#endif + static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); iwl_mvm_dump_nic_error_log(mvm); - if (!mvm->restart_fw) - iwl_mvm_dump_sram(mvm); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_mvm_fw_error_sram_dump(mvm); +#endif iwl_mvm_nic_restart(mvm); } --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/scan.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -325,7 +325,8 @@ iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0); - cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL); + cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | + TX_CMD_FLG_BT_DIS); cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); cmd->tx_cmd.rate_n_flags = --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/tx.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -822,16 +822,12 @@ struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; struct sk_buff_head reclaimed_skbs; struct iwl_mvm_tid_data *tid_data; - struct ieee80211_tx_info *info; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; - struct ieee80211_hdr *hdr; struct sk_buff *skb; int sta_id, tid, freed; - /* "flow" corresponds to Tx queue */ u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); - /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); @@ -839,6 +835,11 @@ sta_id = ba_notif->sta_id; tid = ba_notif->tid; + if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || + tid >= IWL_MAX_TID_COUNT, + "sta_id %d tid %d", sta_id, tid)) + return 0; + rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); @@ -888,22 +889,26 @@ freed = 0; skb_queue_walk(&reclaimed_skbs, skb) { - hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(1); - info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + memset(&info->status, 0, sizeof(info->status)); + /* Packet was transmitted successfully, failures come as single + * frames because before failing a frame the firmware transmits + * it without aggregation at least once. + */ + info->flags |= IEEE80211_TX_STAT_ACK; + if (freed == 1) { /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ - info = IEEE80211_SKB_CB(skb); - memset(&info->status, 0, sizeof(info->status)); - info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_len = ba_notif->txed_2_done; info->status.ampdu_len = ba_notif->txed; --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/mvm/utils.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -411,6 +411,8 @@ mvm->status, table.valid); } + IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version); + trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.data3, table.blink1, table.blink2, table.ilink1, @@ -453,27 +455,26 @@ IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler); } -void iwl_mvm_dump_sram(struct iwl_mvm *mvm) +void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm) { const struct fw_img *img; - int ofs, len = 0; - u8 *buf; + u32 ofs, sram_len; + void *sram; - if (!mvm->ucode_loaded) + if (!mvm->ucode_loaded || mvm->fw_error_sram) return; img = &mvm->fw->img[mvm->cur_ucode]; ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; - len = img->sec[IWL_UCODE_SECTION_DATA].len; + sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; - buf = kzalloc(len, GFP_ATOMIC); - if (!buf) + sram = kzalloc(sram_len, GFP_ATOMIC); + if (!sram) return; - iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len); - iwl_print_hex_error(mvm->trans, buf, len); - - kfree(buf); + iwl_trans_read_mem_bytes(mvm->trans, ofs, sram, sram_len); + mvm->fw_error_sram = sram; + mvm->fw_error_sram_len = sram_len; } /** --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/pcie/drv.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -272,6 +272,8 @@ {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)}, @@ -297,6 +299,9 @@ {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)}, @@ -312,6 +317,8 @@ {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)}, {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)}, @@ -350,25 +357,34 @@ {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/pcie/trans.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -121,6 +121,7 @@ { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 lctl; + u16 cap; /* * HW bug W/A for instability in PCIe bus L0S->L1 transition. @@ -131,16 +132,17 @@ * power savings, even without L1. */ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { - /* L1-ASPM enabled; disable(!) L0S */ + if (lctl & PCI_EXP_LNKCTL_ASPM_L1) iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - dev_info(trans->dev, "L1 Enabled; Disabling L0S\n"); - } else { - /* L1-ASPM disabled; enable(!) L0S */ + else iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - dev_info(trans->dev, "L1 Disabled; Enabling L0S\n"); - } trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); + + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); + trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; + dev_info(trans->dev, "L1 %sabled - LTR %sabled\n", + (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", + trans->ltr_enabled ? "En" : "Dis"); } /* @@ -206,6 +208,28 @@ goto out; } + if (trans->cfg->host_interrupt_operation_mode) { + /* + * This is a bit of an abuse - This is needed for 7260 / 3160 + * only check host_interrupt_operation_mode even if this is + * not related to host_interrupt_operation_mode. + * + * Enable the oscillator to count wake up time for L1 exit. This + * consumes slightly more power (100uA) - but allows to be sure + * that we wake up from L1 on time. + * + * This looks weird: read twice the same register, discard the + * value, set a bit, and yet again, read that same register + * just to discard the value. But that's the way the hardware + * seems to like it. + */ + iwl_read_prph(trans, OSC_CLK); + iwl_read_prph(trans, OSC_CLK); + iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); + iwl_read_prph(trans, OSC_CLK); + iwl_read_prph(trans, OSC_CLK); + } + /* * Enable DMA clock and wait for it to stabilize. * @@ -326,6 +350,7 @@ { int ret; int t = 0; + int iter; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); @@ -334,18 +359,23 @@ if (ret >= 0) return 0; - /* If HW is not ready, prepare the conditions to check again */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE); + for (iter = 0; iter < 10; iter++) { + /* If HW is not ready, prepare the conditions to check again */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + do { + ret = iwl_pcie_set_hw_ready(trans); + if (ret >= 0) + return 0; + + usleep_range(200, 1000); + t += 200; + } while (t < 150000); + msleep(25); + } - do { - ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) - return 0; - - usleep_range(200, 1000); - t += 200; - } while (t < 150000); + IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter); return ret; } --- linux-3.13.0.orig/drivers/net/wireless/iwlwifi/pcie/tx.c +++ linux-3.13.0/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -729,7 +729,12 @@ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4); - iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr); + /* + * Send 0 as the scd_base_addr since the device may have be reset + * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will + * contain garbage. + */ + iwl_pcie_tx_start(trans, 0); } /* --- linux-3.13.0.orig/drivers/net/wireless/mac80211_hwsim.c +++ linux-3.13.0/drivers/net/wireless/mac80211_hwsim.c @@ -1466,7 +1466,9 @@ return 0; } -static void mac80211_hwsim_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void mac80211_hwsim_flush(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 queues, bool drop) { /* Not implemented, queues only on kernel side */ } --- linux-3.13.0.orig/drivers/net/wireless/mwifiex/11ac.c +++ linux-3.13.0/drivers/net/wireless/mwifiex/11ac.c @@ -189,8 +189,7 @@ vht_cap->header.len = cpu_to_le16(sizeof(struct ieee80211_vht_cap)); memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), - (u8 *)bss_desc->bcn_vht_cap + - sizeof(struct ieee_types_header), + (u8 *)bss_desc->bcn_vht_cap, le16_to_cpu(vht_cap->header.len)); mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band); --- linux-3.13.0.orig/drivers/net/wireless/mwifiex/11n.c +++ linux-3.13.0/drivers/net/wireless/mwifiex/11n.c @@ -308,8 +308,7 @@ ht_cap->header.len = cpu_to_le16(sizeof(struct ieee80211_ht_cap)); memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), - (u8 *) bss_desc->bcn_ht_cap + - sizeof(struct ieee_types_header), + (u8 *)bss_desc->bcn_ht_cap, le16_to_cpu(ht_cap->header.len)); mwifiex_fill_cap_info(priv, radio_type, ht_cap); --- linux-3.13.0.orig/drivers/net/wireless/mwifiex/fw.h +++ linux-3.13.0/drivers/net/wireless/mwifiex/fw.h @@ -226,7 +226,7 @@ /* HW_SPEC fw_cap_info */ -#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14))) +#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13))) #define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3) #define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3) --- linux-3.13.0.orig/drivers/net/wireless/mwifiex/main.c +++ linux-3.13.0/drivers/net/wireless/mwifiex/main.c @@ -646,6 +646,7 @@ } tx_info = MWIFIEX_SKB_TXCB(skb); + memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; @@ -747,7 +748,7 @@ static u16 mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { skb->priority = cfg80211_classify8021d(skb); return mwifiex_1d_to_wmm_queue[skb->priority]; --- linux-3.13.0.orig/drivers/net/wireless/mwifiex/pcie.c +++ linux-3.13.0/drivers/net/wireless/mwifiex/pcie.c @@ -1211,6 +1211,12 @@ rd_index = card->rxbd_rdptr & reg->rx_mask; skb_data = card->rx_buf_list[rd_index]; + /* If skb allocation was failed earlier for Rx packet, + * rx_buf_list[rd_index] would have been left with a NULL. + */ + if (!skb_data) + return -ENOMEM; + MWIFIEX_SKB_PACB(skb_data, &buf_pa); pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE, PCI_DMA_FROMDEVICE); @@ -1525,6 +1531,14 @@ if (adapter->ps_state == PS_STATE_SLEEP_CFM) { mwifiex_process_sleep_confirm_resp(adapter, skb->data, skb->len); + mwifiex_pcie_enable_host_int(adapter); + if (mwifiex_write_reg(adapter, + PCIE_CPU_INT_EVENT, + CPU_INTR_SLEEP_CFM_DONE)) { + dev_warn(adapter->dev, + "Write register failed\n"); + return -1; + } while (reg->sleep_cookie && (count++ < 10) && mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); @@ -1993,23 +2007,9 @@ adapter->int_status |= pcie_ireg; spin_unlock_irqrestore(&adapter->int_lock, flags); - if (pcie_ireg & HOST_INTR_CMD_DONE) { - if ((adapter->ps_state == PS_STATE_SLEEP_CFM) || - (adapter->ps_state == PS_STATE_SLEEP)) { - mwifiex_pcie_enable_host_int(adapter); - if (mwifiex_write_reg(adapter, - PCIE_CPU_INT_EVENT, - CPU_INTR_SLEEP_CFM_DONE) - ) { - dev_warn(adapter->dev, - "Write register failed\n"); - return; - - } - } - } else if (!adapter->pps_uapsd_mode && - adapter->ps_state == PS_STATE_SLEEP && - mwifiex_pcie_ok_to_access_hw(adapter)) { + if (!adapter->pps_uapsd_mode && + adapter->ps_state == PS_STATE_SLEEP && + mwifiex_pcie_ok_to_access_hw(adapter)) { /* Potentially for PCIe we could get other * interrupts like shared. Don't change power * state until cookie is set */ --- linux-3.13.0.orig/drivers/net/wireless/mwifiex/scan.c +++ linux-3.13.0/drivers/net/wireless/mwifiex/scan.c @@ -1681,7 +1681,7 @@ const u8 *ie_buf; size_t ie_len; u16 channel = 0; - u64 fw_tsf = 0; + __le64 fw_tsf = 0; u16 beacon_size = 0; u32 curr_bcn_bytes; u32 freq; @@ -1815,7 +1815,7 @@ ie_buf, ie_len, rssi, GFP_KERNEL); bss_priv = (struct mwifiex_bss_priv *)bss->priv; bss_priv->band = band; - bss_priv->fw_tsf = fw_tsf; + bss_priv->fw_tsf = le64_to_cpu(fw_tsf); if (priv->media_connected && !memcmp(bssid, priv->curr_bss_params.bss_descriptor @@ -2101,12 +2101,12 @@ curr_bss->ht_info_offset); if (curr_bss->bcn_vht_cap) - curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf + - curr_bss->vht_cap_offset); + curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf + + curr_bss->vht_cap_offset); if (curr_bss->bcn_vht_oper) - curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf + - curr_bss->vht_info_offset); + curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf + + curr_bss->vht_info_offset); if (curr_bss->bcn_bss_co_2040) curr_bss->bcn_bss_co_2040 = --- linux-3.13.0.orig/drivers/net/wireless/mwifiex/usb.c +++ linux-3.13.0/drivers/net/wireless/mwifiex/usb.c @@ -511,13 +511,6 @@ MWIFIEX_BSS_ROLE_ANY), MWIFIEX_ASYNC_CMD); -#ifdef CONFIG_PM - /* Resume handler may be called due to remote wakeup, - * force to exit suspend anyway - */ - usb_disable_autosuspend(card->udev); -#endif /* CONFIG_PM */ - return 0; } @@ -557,7 +550,6 @@ .id_table = mwifiex_usb_table, .suspend = mwifiex_usb_suspend, .resume = mwifiex_usb_resume, - .supports_autosuspend = 1, }; static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) --- linux-3.13.0.orig/drivers/net/wireless/mwifiex/wmm.c +++ linux-3.13.0/drivers/net/wireless/mwifiex/wmm.c @@ -559,7 +559,8 @@ mwifiex_wmm_delete_all_ralist(priv); memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); - if (priv->adapter->if_ops.clean_pcie_ring) + if (priv->adapter->if_ops.clean_pcie_ring && + !priv->adapter->surprise_removed) priv->adapter->if_ops.clean_pcie_ring(priv->adapter); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); } --- linux-3.13.0.orig/drivers/net/wireless/p54/main.c +++ linux-3.13.0/drivers/net/wireless/p54/main.c @@ -670,7 +670,8 @@ return total; } -static void p54_flush(struct ieee80211_hw *dev, u32 queues, bool drop) +static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct p54_common *priv = dev->priv; unsigned int total, i; --- linux-3.13.0.orig/drivers/net/wireless/p54/txrx.c +++ linux-3.13.0/drivers/net/wireless/p54/txrx.c @@ -587,7 +587,7 @@ chan = priv->curchan; if (chan) { struct survey_info *survey = &priv->survey[chan->hw_value]; - survey->noise = clamp_t(s8, priv->noise, -128, 127); + survey->noise = clamp(priv->noise, -128, 127); survey->channel_time = priv->survey_raw.active; survey->channel_time_tx = priv->survey_raw.tx; survey->channel_time_busy = priv->survey_raw.tx + --- linux-3.13.0.orig/drivers/net/wireless/rt2x00/rt2500pci.c +++ linux-3.13.0/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1684,8 +1684,13 @@ /* * Detect if this device has an hardware controlled radio. */ - if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) { __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); + /* + * On this device RFKILL initialized during probe does not work. + */ + __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags); + } /* * Check if the BBP tuning should be enabled. --- linux-3.13.0.orig/drivers/net/wireless/rt2x00/rt2800.h +++ linux-3.13.0/drivers/net/wireless/rt2x00/rt2800.h @@ -2041,7 +2041,7 @@ * 2 - drop tx power by 12dBm, * 3 - increase tx power by 6dBm */ -#define BBP1_TX_POWER_CTRL FIELD8(0x07) +#define BBP1_TX_POWER_CTRL FIELD8(0x03) #define BBP1_TX_ANTENNA FIELD8(0x18) /* --- linux-3.13.0.orig/drivers/net/wireless/rt2x00/rt2800usb.c +++ linux-3.13.0/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1064,6 +1064,7 @@ /* Ovislink */ { USB_DEVICE(0x1b75, 0x3071) }, { USB_DEVICE(0x1b75, 0x3072) }, + { USB_DEVICE(0x1b75, 0xa200) }, /* Para */ { USB_DEVICE(0x20b8, 0x8888) }, /* Pegatron */ --- linux-3.13.0.orig/drivers/net/wireless/rt2x00/rt2x00.h +++ linux-3.13.0/drivers/net/wireless/rt2x00/rt2x00.h @@ -695,6 +695,7 @@ REQUIRE_SW_SEQNO, REQUIRE_HT_TX_DESC, REQUIRE_PS_AUTOWAKE, + REQUIRE_DELAYED_RFKILL, /* * Capabilities @@ -1450,7 +1451,8 @@ struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params); void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw); -void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop); +void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop); int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); void rt2x00mac_get_ringparam(struct ieee80211_hw *hw, --- linux-3.13.0.orig/drivers/net/wireless/rt2x00/rt2x00dev.c +++ linux-3.13.0/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -1128,9 +1128,10 @@ return; /* - * Unregister extra components. + * Stop rfkill polling. */ - rt2x00rfkill_unregister(rt2x00dev); + if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_unregister(rt2x00dev); /* * Allow the HW to uninitialize. @@ -1168,6 +1169,12 @@ set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); + /* + * Start rfkill polling. + */ + if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_register(rt2x00dev); + return 0; } @@ -1377,7 +1384,12 @@ rt2x00link_register(rt2x00dev); rt2x00leds_register(rt2x00dev); rt2x00debug_register(rt2x00dev); - rt2x00rfkill_register(rt2x00dev); + + /* + * Start rfkill polling. + */ + if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_register(rt2x00dev); return 0; @@ -1393,6 +1405,12 @@ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); /* + * Stop rfkill polling. + */ + if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_unregister(rt2x00dev); + + /* * Disable radio. */ rt2x00lib_disable_radio(rt2x00dev); --- linux-3.13.0.orig/drivers/net/wireless/rt2x00/rt2x00mac.c +++ linux-3.13.0/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -489,6 +489,8 @@ crypto.cipher = rt2x00crypto_key_to_cipher(key); if (crypto.cipher == CIPHER_NONE) return -EOPNOTSUPP; + if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev)) + return -EOPNOTSUPP; crypto.cmd = cmd; @@ -623,20 +625,18 @@ bss_conf->bssid); /* - * Update the beacon. This is only required on USB devices. PCI - * devices fetch beacons periodically. - */ - if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev)) - rt2x00queue_update_beacon(rt2x00dev, vif); - - /* * Start/stop beaconing. */ if (changes & BSS_CHANGED_BEACON_ENABLED) { if (!bss_conf->enable_beacon && intf->enable_beacon) { - rt2x00queue_clear_beacon(rt2x00dev, vif); rt2x00dev->intf_beaconing--; intf->enable_beacon = false; + /* + * Clear beacon in the H/W for this vif. This is needed + * to disable beaconing on this particular interface + * and keep it running on other interfaces. + */ + rt2x00queue_clear_beacon(rt2x00dev, vif); if (rt2x00dev->intf_beaconing == 0) { /* @@ -647,11 +647,15 @@ rt2x00queue_stop_queue(rt2x00dev->bcn); mutex_unlock(&intf->beacon_skb_mutex); } - - } else if (bss_conf->enable_beacon && !intf->enable_beacon) { rt2x00dev->intf_beaconing++; intf->enable_beacon = true; + /* + * Upload beacon to the H/W. This is only required on + * USB devices. PCI devices fetch beacons periodically. + */ + if (rt2x00_is_usb(rt2x00dev)) + rt2x00queue_update_beacon(rt2x00dev, vif); if (rt2x00dev->intf_beaconing == 1) { /* @@ -749,7 +753,8 @@ } EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); -void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; --- linux-3.13.0.orig/drivers/net/wireless/rt2x00/rt2x00queue.c +++ linux-3.13.0/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -160,55 +160,29 @@ skb_trim(skb, frame_length); } -void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) +/* + * H/W needs L2 padding between the header and the paylod if header size + * is not 4 bytes aligned. + */ +void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) { - unsigned int payload_length = skb->len - header_length; - unsigned int header_align = ALIGN_SIZE(skb, 0); - unsigned int payload_align = ALIGN_SIZE(skb, header_length); - unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; - - /* - * Adjust the header alignment if the payload needs to be moved more - * than the header. - */ - if (payload_align > header_align) - header_align += 4; + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; - /* There is nothing to do if no alignment is needed */ - if (!header_align) + if (!l2pad) return; - /* Reserve the amount of space needed in front of the frame */ - skb_push(skb, header_align); - - /* - * Move the header. - */ - memmove(skb->data, skb->data + header_align, header_length); - - /* Move the payload, if present and if required */ - if (payload_length && payload_align) - memmove(skb->data + header_length + l2pad, - skb->data + header_length + l2pad + payload_align, - payload_length); - - /* Trim the skb to the correct size */ - skb_trim(skb, header_length + l2pad + payload_length); + skb_push(skb, l2pad); + memmove(skb->data, skb->data + l2pad, hdr_len); } -void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) +void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) { - /* - * L2 padding is only present if the skb contains more than just the - * IEEE 802.11 header. - */ - unsigned int l2pad = (skb->len > header_length) ? - L2PAD_SIZE(header_length) : 0; + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; if (!l2pad) return; - memmove(skb->data + l2pad, skb->data, header_length); + memmove(skb->data + l2pad, skb->data, hdr_len); skb_pull(skb, l2pad); } --- linux-3.13.0.orig/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +++ linux-3.13.0/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h @@ -15,6 +15,8 @@ #ifndef RTL8187_H #define RTL8187_H +#include + #include "rtl818x.h" #include "leds.h" @@ -139,7 +141,10 @@ u8 aifsn[4]; u8 rfkill_mask; struct { - __le64 buf; + union { + __le64 buf; + u8 dummy1[L1_CACHE_BYTES]; + } ____cacheline_aligned; struct sk_buff_head queue; } b_tx_status; /* This queue is used by both -b and non-b devices */ struct mutex io_mutex; @@ -147,7 +152,8 @@ u8 bits8; __le16 bits16; __le32 bits32; - } *io_dmabuf; + u8 dummy2[L1_CACHE_BYTES]; + } *io_dmabuf ____cacheline_aligned; bool rfkill_off; u16 seqno; }; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/Kconfig +++ linux-3.13.0/drivers/net/wireless/rtlwifi/Kconfig @@ -5,7 +5,7 @@ ---help--- This option will enable support for the Realtek mac80211-based wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de, - rtl8723eu, and rtl8188eu share some common code. + rtl8723ae, rtl8723be, and rtl8188ae share some common code. if RTL_CARDS @@ -48,12 +48,27 @@ depends on PCI select RTLWIFI select RTLWIFI_PCI + select RTL8723_COMMON + select RTLBTCOEXIST ---help--- This is the driver for Realtek RTL8723AE 802.11n PCIe wireless network adapters. If you choose to build it as a module, it will be called rtl8723ae +config RTL8723BE + tristate "Realtek RTL8723BE PCIe Wireless Network Adapter" + depends on PCI + select RTLWIFI + select RTLWIFI_PCI + select RTL8723_COMMON + select RTLBTCOEXIST + ---help--- + This is the driver for Realtek RTL8723BE 802.11n PCIe + wireless network adapters. + + If you choose to build it as a module, it will be called rtl8723be + config RTL8188EE tristate "Realtek RTL8188EE Wireless Network Adapter" depends on PCI @@ -101,4 +116,14 @@ depends on RTL8192CE || RTL8192CU default y +config RTL8723_COMMON + tristate + depends on RTL8723AE || RTL8723BE + default y + +config RTLBTCOEXIST + tristate + depends on RTL8723AE || RTL8723BE + default y + endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/Makefile +++ linux-3.13.0/drivers/net/wireless/rtlwifi/Makefile @@ -24,6 +24,9 @@ obj-$(CONFIG_RTL8192SE) += rtl8192se/ obj-$(CONFIG_RTL8192DE) += rtl8192de/ obj-$(CONFIG_RTL8723AE) += rtl8723ae/ +obj-$(CONFIG_RTL8723BE) += rtl8723be/ obj-$(CONFIG_RTL8188EE) += rtl8188ee/ +obj-$(CONFIG_RTLBTCOEXIST) += btcoexist/ +obj-$(CONFIG_RTL8723_COMMON) += rtl8723com/ ccflags-y += -D__CHECK_ENDIAN__ --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/base.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/base.c @@ -1437,7 +1437,8 @@ /* if we can't recv beacon for 6s, we should * reconnect this AP */ - if (rtlpriv->link_info.roam_times >= 3) { + if ((rtlpriv->link_info.roam_times >= 3) && + !is_zero_ether_addr(rtlpriv->mac80211.bssid)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AP off, try to reconnect now\n"); rtlpriv->link_info.roam_times = 0; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/btcoexist/Makefile +++ linux-3.13.0/drivers/net/wireless/rtlwifi/btcoexist/Makefile @@ -0,0 +1,7 @@ +btcoexist-objs := halbtc8723b2ant.o \ + halbtcoutsrc.o \ + rtl_btc.o + +obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o + +ccflags-y += -D__CHECK_ENDIAN__ --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h @@ -0,0 +1,87 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + ******************************************************************************/ + +#ifndef __HALBT_PRECOMP_H__ +#define __HALBT_PRECOMP_H__ +/************************************************************* + * include files + *************************************************************/ +#include "../wifi.h" +#include "../efuse.h" +#include "../base.h" +#include "../regd.h" +#include "../cam.h" +#include "../ps.h" +#include "../pci.h" + +#include "halbtcoutsrc.h" + +#include "halbtc8723b2ant.h" + +#define BIT0 0x00000001 +#define BIT1 0x00000002 +#define BIT2 0x00000004 +#define BIT3 0x00000008 +#define BIT4 0x00000010 +#define BIT5 0x00000020 +#define BIT6 0x00000040 +#define BIT7 0x00000080 +#define BIT8 0x00000100 +#define BIT9 0x00000200 +#define BIT10 0x00000400 +#define BIT11 0x00000800 +#define BIT12 0x00001000 +#define BIT13 0x00002000 +#define BIT14 0x00004000 +#define BIT15 0x00008000 +#define BIT16 0x00010000 +#define BIT17 0x00020000 +#define BIT18 0x00040000 +#define BIT19 0x00080000 +#define BIT20 0x00100000 +#define BIT21 0x00200000 +#define BIT22 0x00400000 +#define BIT23 0x00800000 +#define BIT24 0x01000000 +#define BIT25 0x02000000 +#define BIT26 0x04000000 +#define BIT27 0x08000000 +#define BIT28 0x10000000 +#define BIT29 0x20000000 +#define BIT30 0x40000000 +#define BIT31 0x80000000 + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#endif /* __HALBT_PRECOMP_H__ */ --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -0,0 +1,3698 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +/*************************************************************** + * Description: + * + * This file is for RTL8723B Co-exist mechanism + * + * History + * 2012/11/15 Cosa first check in. + * + **************************************************************/ +/************************************************************** + * include files + **************************************************************/ +#include "halbt_precomp.h" +/************************************************************** + * Global variables, these are static variables + **************************************************************/ +static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant; +static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant; +static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant; +static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant; + +static const char *const glbt_info_src_8723b_2ant[] = { + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +static u32 glcoex_ver_date_8723b_2ant = 20130731; +static u32 glcoex_ver_8723b_2ant = 0x3b; + +/************************************************************** + * local function proto type if needed + **************************************************************/ +/************************************************************** + * local function start with btc8723b2ant_ + **************************************************************/ +static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, + u8 rssi_thresh1) +{ + s32 bt_rssi = 0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; + + bt_rssi = coex_sta->bt_rssi; + + if (level_num == 2) { + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to High\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Low\n"); + } + } else { + if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); + return coex_sta->pre_bt_rssi_state; + } + + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Low\n"); + } + } else if ((coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (bt_rssi >= rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to High\n"); + } else if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Medium\n"); + } + } else { + if (bt_rssi < rssi_thresh1) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at High\n"); + } + } + } + + coex_sta->pre_bt_rssi_state = bt_rssi_state; + + return bt_rssi_state; +} + +static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, + u8 rssi_thresh, u8 rssi_thresh1) +{ + s32 wifi_rssi = 0; + u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + + if (level_num == 2) { + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to High\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Low\n"); + } + } else { + if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Low\n"); + } + } else if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (wifi_rssi >= rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to High\n"); + } else if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Medium\n"); + } + } else { + if (wifi_rssi < rssi_thresh1) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at High\n"); + } + } + } + + coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; + + return wifi_rssi_state; +} + +static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +{ + u32 reg_hp_txrx, reg_lp_txrx, u32tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0; + u32 reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_txrx = 0x770; + reg_lp_txrx = 0x774; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); + reg_hp_tx = u32tmp & MASKLWORD; + reg_hp_rx = (u32tmp & MASKHWORD) >> 16; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); + reg_lp_tx = u32tmp & MASKLWORD; + reg_lp_rx = (u32tmp & MASKHWORD) >> 16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + + /* reset counter */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) +{ + static bool pre_wifi_busy; + static bool pre_under_4way; + static bool pre_bt_hs_on; + bool wifi_busy = false, under_4way = false, bt_hs_on = false; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + + if (wifi_connected) { + if (wifi_busy != pre_wifi_busy) { + pre_wifi_busy = wifi_busy; + return true; + } + + if (under_4way != pre_under_4way) { + pre_under_4way = under_4way; + return true; + } + + if (bt_hs_on != pre_bt_hs_on) { + pre_bt_hs_on = bt_hs_on; + return true; + } + } + + return false; +} + +static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist) +{ + /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + +#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */ + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + bt_link_info->bt_link_exist = coex_sta->bt_link_exist; + bt_link_info->sco_exist = coex_sta->sco_exist; + bt_link_info->a2dp_exist = coex_sta->a2dp_exist; + bt_link_info->pan_exist = coex_sta->pan_exist; + bt_link_info->hid_exist = coex_sta->hid_exist; + + /* work around for HS mode. */ + if (bt_hs_on) { + bt_link_info->pan_exist = true; + bt_link_info->bt_link_exist = true; + } +#else /* profile from bt stack */ + bt_link_info->bt_link_exist = stack_info->bt_link_exist; + bt_link_info->sco_exist = stack_info->sco_exist; + bt_link_info->a2dp_exist = stack_info->a2dp_exist; + bt_link_info->pan_exist = stack_info->pan_exist; + bt_link_info->hid_exist = stack_info->hid_exist; + + /*for win-8 stack HID report error*/ + if (!stack_info->hid_exist) + stack_info->hid_exist = coex_sta->hid_exist; + /*sync BTInfo with BT firmware and stack*/ + /* when stack HID report error, here we use the info from bt fw.*/ + if (!stack_info->bt_link_exist) + stack_info->bt_link_exist = coex_sta->bt_link_exist; +#endif + /* check if Sco only */ + if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->sco_only = true; + else + bt_link_info->sco_only = false; + + /* check if A2dp only */ + if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->a2dp_only = true; + else + bt_link_info->a2dp_only = false; + + /* check if Pan only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->pan_only = true; + else + bt_link_info->pan_only = false; + + /* check if Hid only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && bt_link_info->hid_exist) + bt_link_info->hid_only = true; + else + bt_link_info->hid_only = false; +} + +static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED; + u8 num_of_diff_profile = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + if (!bt_link_info->bt_link_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], No BT link exists!!!\n"); + return algorithm; + } + + if (bt_link_info->sco_exist) + num_of_diff_profile++; + if (bt_link_info->hid_exist) + num_of_diff_profile++; + if (bt_link_info->pan_exist) + num_of_diff_profile++; + if (bt_link_info->a2dp_exist) + num_of_diff_profile++; + + if (num_of_diff_profile == 1) { + if (bt_link_info->sco_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO only\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; + } else { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID only\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2DP only\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], PAN(HS) only\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], PAN(EDR) only\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR; + } + } + } + } else if (num_of_diff_profile == 2) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP ==> SCO\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + PAN(HS)\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + PAN(HS)\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2DP + PAN(HS)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex],A2DP + PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } else if (num_of_diff_profile == 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP" + " ==> HID\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + " + "PAN(HS)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + " + "PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP + " + "PAN(HS)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP + " + "PAN(EDR) ==> HID\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP + " + "PAN(HS)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP + " + "PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } else if (num_of_diff_profile >= 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Error!!! SCO + HID" + " + A2DP + PAN(HS)\n"); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP +" + " PAN(EDR)==>PAN(EDR)+HID\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + return algorithm; +} + +static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist) +{ + bool ret = false; + bool bt_hs_on = false, wifi_connected = false; + s32 bt_hs_rssi = 0; + u8 bt_rssi_state; + + if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on)) + return false; + if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected)) + return false; + if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) + return false; + + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + + if (wifi_connected) { + if (bt_hs_on) { + if (bt_hs_rssi > 37) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt " + "power for HS mode!!\n"); + ret = true; + } + } else { + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt " + "power for Wifi is connected!!\n"); + ret = true; + } + } + } + + return ret; +} + +static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, + u8 dac_swing_lvl) +{ + u8 h2c_parameter[1] = {0}; + + /* There are several type of dacswing + * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 + */ + h2c_parameter[0] = dac_swing_lvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); +} + +static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, + bool dec_bt_pwr) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = 0; + + if (dec_bt_pwr) + h2c_parameter[0] |= BIT1; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", + (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); +} + +static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist, + bool force_exec, bool dec_bt_pwr) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power = %s\n", + (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF")); + coex_dm->cur_dec_bt_pwr = dec_bt_pwr; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + + if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) + return; + } + btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr); + + coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; +} + +static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, + bool force_exec, u8 fw_dac_swing_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec ? "force to" : ""), fw_dac_swing_lvl); + coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], preFwDacSwingLvl=%d, " + "curFwDacSwingLvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); + + if (coex_dm->pre_fw_dac_swing_lvl == + coex_dm->cur_fw_dac_swing_lvl) + return; + } + + btc8723b2ant_set_fw_dac_swing_level(btcoexist, + coex_dm->cur_fw_dac_swing_lvl); + coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; +} + +static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, + bool rx_rf_shrink_on) +{ + if (rx_rf_shrink_on) { + /* Shrink RF Rx LPF corner */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, 0xffffc); + } else { + /* Resume RF Rx LPF corner */ + /* After initialized, we can use coex_dm->btRf0x1eBackup */ + if (btcoexist->initilized) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, + coex_dm->bt_rf0x1e_backup); + } + } +} + +static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist, + bool force_exec, bool rx_rf_shrink_on) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec ? "force to" : ""), (rx_rf_shrink_on ? + "ON" : "OFF")); + coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreRfRxLpfShrink=%d, " + "bCurRfRxLpfShrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); + + if (coex_dm->pre_rf_rx_lpf_shrink == + coex_dm->cur_rf_rx_lpf_shrink) + return; + } + btc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist, + coex_dm->cur_rf_rx_lpf_shrink); + + coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; +} + +static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + u8 h2c_parameter[6] = {0}; + + h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/ + + if (low_penalty_ra) { + h2c_parameter[1] |= BIT0; + /*normal rate except MCS7/6/5, OFDM54/48/36*/ + h2c_parameter[2] = 0x00; + h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/ + h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/ + h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); +} + +static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) +{ + /*return; */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec ? "force to" : ""), (low_penalty_ra ? + "ON" : "OFF")); + coex_dm->cur_low_penalty_ra = low_penalty_ra; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreLowPenaltyRa=%d, " + "bCurLowPenaltyRa=%d\n", + coex_dm->pre_low_penalty_ra, + coex_dm->cur_low_penalty_ra); + + if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) + return; + } + btc8723b_set_penalty_txrate(btcoexist, coex_dm->cur_low_penalty_ra); + + coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; +} + +static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist, + u32 level) +{ + u8 val = (u8) level; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); +} + +static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex, + bool sw_dac_swing_on, + u32 sw_dac_swing_lvl) +{ + if (sw_dac_swing_on) + btc8723b2ant_set_dac_swing_reg(btcoex, sw_dac_swing_lvl); + else + btc8723b2ant_set_dac_swing_reg(btcoex, 0x18); +} + + +static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swing_on, + u32 dac_swing_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", + (force_exec ? "force to" : ""), + (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl); + coex_dm->cur_dac_swing_on = dac_swing_on; + coex_dm->cur_dac_swing_lvl = dac_swing_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x," + " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", + coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); + + if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && + (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) + return; + } + mdelay(30); + btc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on, + dac_swing_lvl); + + coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on; + coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; +} + +static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, + bool agc_table_en) +{ + u8 rssi_adjust_val = 0; + + /* BB AGC Gain Table */ + if (agc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table On!\n"); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table Off!\n"); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001); + } + + + /* RF Gain */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); + if (agc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table On!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x38fff); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x38ffe); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table Off!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x380c3); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x28ce6); + } + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1); + + if (agc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table On!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, + 0xfffff, 0x38fff); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, + 0xfffff, 0x38ffe); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table Off!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, + 0xfffff, 0x380c3); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, + 0xfffff, 0x28ce6); + } + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0); + + /* set rssiAdjustVal for wifi module. */ + if (agc_table_en) + rssi_adjust_val = 8; + btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, + &rssi_adjust_val); +} + +static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist, + bool force_exec, bool agc_table_en) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s %s Agc Table\n", + (force_exec ? "force to" : ""), + (agc_table_en ? "Enable" : "Disable")); + coex_dm->cur_agc_table_en = agc_table_en; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en); + + if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) + return; + } + btc8723b2ant_set_agc_table(btcoexist, agc_table_en); + + coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en; +} + +static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, + u32 val0x6c4, u32 val0x6c8, + u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0=0x%x," + " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + (force_exec ? "force to" : ""), val0x6c0, + val0x6c4, val0x6c8, val0x6cc); + coex_dm->cur_val0x6c0 = val0x6c0; + coex_dm->cur_val0x6c4 = val0x6c4; + coex_dm->cur_val0x6c8 = val0x6c8; + coex_dm->cur_val0x6cc = val0x6cc; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], preVal0x6c0=0x%x, " + "preVal0x6c4=0x%x, preVal0x6c8=0x%x, " + "preVal0x6cc=0x%x !!\n", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, + coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], curVal0x6c0=0x%x, " + "curVal0x6c4=0x%x, curVal0x6c8=0x%x, " + "curVal0x6cc=0x%x !!\n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, + coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); + + if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && + (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && + (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) && + (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) + return; + } + btc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, + val0x6c8, val0x6cc); + + coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; + coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; + coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8; + coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; +} + +static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + switch (type) { + case 0: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x55555555, 0xffff, 0x3); + break; + case 1: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5afa5afa, 0xffff, 0x3); + break; + case 2: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffff, 0x3); + break; + case 3: + btc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa, + 0xaaaaaaaa, 0xffff, 0x3); + break; + case 4: + btc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff, + 0xffffffff, 0xffff, 0x3); + break; + case 5: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff, + 0x5fff5fff, 0xffff, 0x3); + break; + case 6: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); + break; + case 7: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5afa5afa, 0xffff, 0x3); + break; + case 8: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea, + 0x5aea5aea, 0xffff, 0x3); + break; + case 9: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5aea5aea, 0xffff, 0x3); + break; + case 10: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5aff5aff, 0xffff, 0x3); + break; + case 11: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5a5f5a5f, 0xffff, 0x3); + break; + case 12: + btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5f5f5f5f, 0xffff, 0x3); + break; + default: + break; + } +} + +static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, + bool enable) +{ + u8 h2c_parameter[1] = {0}; + + if (enable) + h2c_parameter[0] |= BIT0;/* function enable*/ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, " + "FW write 0x63=0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); +} + +static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + coex_dm->cur_ignore_wlan_act = enable; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d, " + "bCurIgnoreWlanAct = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); + + if (coex_dm->pre_ignore_wlan_act == + coex_dm->cur_ignore_wlan_act) + return; + } + btc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, + u8 byte2, u8 byte3, u8 byte4, u8 byte5) +{ + u8 h2c_parameter[5]; + + h2c_parameter[0] = byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = byte5; + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | h2c_parameter[4]); + + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +static void btc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, bool low_penalty_ra, + bool limited_dig, bool bt_lna_constrain) +{ + btc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); + btc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); +} + +static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist, + bool agc_table_shift, bool adc_backoff, + bool sw_dac_swing, u32 dac_swing_lvl) +{ + btc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift); + btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing, + dac_swing_lvl); +} + +static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, + bool turn_on, u8 type) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec ? "force to" : ""), + (turn_on ? "ON" : "OFF"), type); + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + + if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) + return; + } + if (turn_on) { + switch (type) { + case 1: + default: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + case 2: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x90); + break; + case 3: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x90); + break; + case 4: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, + 0x03, 0xf1, 0x90); + break; + case 5: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0x60, 0x90); + break; + case 6: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, + 0x12, 0x60, 0x90); + break; + case 7: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0x70, 0x90); + break; + case 8: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10, + 0x3, 0x70, 0x90); + break; + case 9: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + case 10: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x90); + break; + case 11: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0xa, 0xe1, 0x90); + break; + case 12: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, + 0x5, 0xe1, 0x90); + break; + case 13: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0x60, 0x90); + break; + case 14: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, + 0x12, 0x60, 0x90); + break; + case 15: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0xa, 0x60, 0x90); + break; + case 16: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, + 0x5, 0x60, 0x90); + break; + case 17: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f, + 0x2f, 0x60, 0x90); + break; + case 18: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, + 0x5, 0xe1, 0x90); + break; + case 19: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x25, 0xe1, 0x90); + break; + case 20: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x25, 0x60, 0x90); + break; + case 21: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, + 0x03, 0x70, 0x90); + break; + case 71: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + } + } else { + /* disable PS tdma */ + switch (type) { + case 0: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x40, 0x0); + break; + case 1: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x48, 0x0); + break; + default: + btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x40, 0x0); + break; + } + } + + /* update pre state */ + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +static void btc8723b2ant_coex_alloff(struct btc_coexist *btcoexist) +{ + /* fw all off */ + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + /* sw all off */ + btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + + /* hw all off */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); +} + +static void btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + /* force to reset coex mechanism*/ + + btc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false); + + btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); +} + +static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist) +{ + bool wifi_connected = false; + bool low_pwr_disable = true; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + if (wifi_connected) { + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + } else { + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + } + btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + + coex_dm->need_recover_0x948 = true; + coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948); + + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); +} + +static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist) +{ + bool common = false, wifi_connected = false; + bool wifi_busy = false; + bool bt_hs_on = false, low_pwr_disable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (!wifi_connected) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non-connected idle!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, + 0x0); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, + false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, + 0x18); + + common = true; + } else { + if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + " + "BT non connected-idle!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x0); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 0xb); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + common = true; + } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (bt_hs_on) + return false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + " + "BT connected-idle!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x0); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 0xb); + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + common = true; + } else { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (wifi_busy) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Busy + " + "BT Busy!!\n"); + common = false; + } else { + if (bt_hs_on) + return false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Idle + " + "BT Busy!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, + 0x1, 0xfffff, 0x0); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, + 7); + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 21); + btc8723b2ant_fw_dac_swing_lvl(btcoexist, + NORMAL_EXEC, + 0xb); + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, + true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, + false); + btc8723b2ant_sw_mechanism1(btcoexist, false, + false, false, + false); + btc8723b2ant_sw_mechanism2(btcoexist, false, + false, false, + 0x18); + common = true; + } + } + } + + return common; +} + +static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause, + s32 result) +{ + /* Set PS TDMA for max interval == 1 */ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + + if (coex_dm->cur_ps_tdma == 71) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71); + coex_dm->tdma_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + coex_dm->tdma_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12); + coex_dm->tdma_adj_type = 12; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 71) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + } else if (result == 1) { + int tmp = coex_dm->cur_ps_tdma; + switch (tmp) { + case 4: + case 3: + case 2: + case 12: + case 11: + case 10: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, tmp - 1); + coex_dm->tdma_adj_type = tmp - 1; + break; + case 1: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 71); + coex_dm->tdma_adj_type = 71; + break; + } + } + } +} + +static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause, + s32 result) +{ + /* Set PS TDMA for max interval == 2 */ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16); + coex_dm->tdma_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12); + coex_dm->tdma_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } + } + } +} + +static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause, + s32 result) +{ + /* Set PS TDMA for max interval == 3 */ + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 2) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 3) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 9) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 10) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 11) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16); + coex_dm->tdma_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 8); + coex_dm->tdma_adj_type = 8; + } else if (coex_dm->cur_ps_tdma == 13) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 16); + coex_dm->tdma_adj_type = 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 7) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 6) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 16) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 15) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 14) { + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + switch (coex_dm->cur_ps_tdma) { + case 5: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + coex_dm->tdma_adj_type = 3; + break; + case 6: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + coex_dm->tdma_adj_type = 3; + break; + case 7: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + coex_dm->tdma_adj_type = 3; + break; + case 8: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4); + coex_dm->tdma_adj_type = 4; + break; + case 13: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + coex_dm->tdma_adj_type = 11; + break; + case 14: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + coex_dm->tdma_adj_type = 11; + break; + case 15: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + coex_dm->tdma_adj_type = 11; + break; + case 16: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12); + coex_dm->tdma_adj_type = 12; + break; + } + if (result == -1) { + switch (coex_dm->cur_ps_tdma) { + case 1: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + break; + case 2: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + break; + case 3: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 4); + coex_dm->tdma_adj_type = 4; + break; + case 9: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + break; + case 10: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + break; + case 11: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 12); + coex_dm->tdma_adj_type = 12; + break; + } + } else if (result == 1) { + switch (coex_dm->cur_ps_tdma) { + case 4: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + break; + case 3: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + break; + case 2: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + break; + case 12: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + break; + case 11: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + break; + case 10: + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } + } + } +} + +static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, + bool sco_hid, bool tx_pause, + u8 max_interval) +{ + static s32 up, dn, m, n, wait_count; + /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/ + s32 result; + u8 retry_count = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjust()\n"); + + if (!coex_dm->auto_tdma_adjust) { + coex_dm->auto_tdma_adjust = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); + if (sco_hid) { + if (tx_pause) { + if (max_interval == 1) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->tdma_adj_type = 13; + } else if (max_interval == 2) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->tdma_adj_type = 14; + } else if (max_interval == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } else { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->tdma_adj_type = 15; + } + } else { + if (max_interval == 1) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->tdma_adj_type = 9; + } else if (max_interval == 2) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->tdma_adj_type = 10; + } else if (max_interval == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } else { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->tdma_adj_type = 11; + } + } + } else { + if (tx_pause) { + if (max_interval == 1) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->tdma_adj_type = 5; + } else if (max_interval == 2) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->tdma_adj_type = 6; + } else if (max_interval == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } else { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->tdma_adj_type = 7; + } + } else { + if (max_interval == 1) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->tdma_adj_type = 1; + } else if (max_interval == 2) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->tdma_adj_type = 2; + } else if (max_interval == 3) { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } else { + btc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->tdma_adj_type = 3; + } + } + } + + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_count = 0; + } else { + /*accquire the BT TRx retry count from BT_Info byte2*/ + retry_count = coex_sta->bt_retry_cnt; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], retry_count = %d\n", retry_count); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", + up, dn, m, n, wait_count); + result = 0; + wait_count++; + /* no retry in the last 2-second duration*/ + if (retry_count == 0) { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if (up >= n) { + wait_count = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi " + "duration!!\n"); + } /* <=3 retry in the last 2-second duration*/ + } else if (retry_count <= 3) { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) { + if (wait_count <= 2) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration " + "for retry_counter<3!!\n"); + } + } else { + if (wait_count == 1) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration " + "for retry_counter>3!!\n"); + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], max Interval = %d\n", max_interval); + if (max_interval == 1) + set_tdma_int1(btcoexist, tx_pause, result); + else if (max_interval == 2) + set_tdma_int2(btcoexist, tx_pause, result); + else if (max_interval == 3) + set_tdma_int3(btcoexist, tx_pause, result); + } + + /*if current PsTdma not match with the recorded one (when scan, dhcp..), + *then we have to adjust it back to the previous recorded one. + */ + if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) { + bool scan = false, link = false, roam = false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], PsTdma type dismatch!!!, " + "curPsTdma=%d, recordPsTdma=%d\n", + coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (!scan && !link && !roam) + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, + coex_dm->tdma_adj_type); + else + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], roaming/link/scan is under" + " progress, will adjust next time!!!\n"); + } +} + +/* SCO only or SCO+PAN(HS) */ +static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + /*for SCO quality at 11b/g mode*/ + if (BTC_WIFI_BW_LEGACY == wifi_bw) + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 2); + else /*for SCO quality & wifi performance balance at 11n mode*/ + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 8); + + /*for voice quality */ + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + true, 0x4); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + true, 0x4); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + true, 0x4); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + true, 0x4); + } + } +} + +static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/ + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + else /*for HID quality & wifi performance balance at 11n mode*/ + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 9); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + else + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/ +static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8723b2ant_tdma_duration_adjust(btcoexist, false, + false, 1); + else + btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 1); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + + btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, + BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 10); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); + else + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/*PAN(HS) only*/ +static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + + btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/*PAN(EDR)+A2DP*/ +static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 12); + if (BTC_WIFI_BW_HT40 == wifi_bw) + btc8723b2ant_tdma_duration_adjust(btcoexist, false, + true, 3); + else + btc8723b2ant_tdma_duration_adjust(btcoexist, false, + false, 3); + } else { + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (BTC_WIFI_BW_HT40 == wifi_bw) { + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 3); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x780); + } else { + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 6); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x0); + } + btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2); + } else { + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, + 0x0); + btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* HID+A2DP+PAN(EDR) */ +static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (BTC_WIFI_BW_HT40 == wifi_bw) + btc8723b2ant_tdma_duration_adjust(btcoexist, true, + true, 2); + else + btc8723b2ant_tdma_duration_adjust(btcoexist, true, + false, 3); + } else { + btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (btc8723b_need_dec_pwr(btcoexist)) + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2); + else + btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + btc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + btc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +{ + u8 algorithm = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), " + "return for Manual CTRL <===\n"); + return; + } + + if (coex_sta->under_ips) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); + return; + } + + algorithm = btc8723b2ant_action_algorithm(btcoexist); + if (coex_sta->c2h_bt_inquiry_page && + (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT is under inquiry/page scan !!\n"); + btc8723b2ant_action_bt_inquiry(btcoexist); + return; + } else { + if (coex_dm->need_recover_0x948) { + coex_dm->need_recover_0x948 = false; + btcoexist->btc_write_2byte(btcoexist, 0x948, + coex_dm->backup_0x948); + } + } + + coex_dm->cur_algorithm = algorithm; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n", + coex_dm->cur_algorithm); + + if (btc8723b2ant_is_common_action(btcoexist)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant common.\n"); + coex_dm->auto_tdma_adjust = false; + } else { + if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], preAlgorithm=%d, " + "curAlgorithm=%d\n", coex_dm->pre_algorithm, + coex_dm->cur_algorithm); + coex_dm->auto_tdma_adjust = false; + } + switch (coex_dm->cur_algorithm) { + case BT_8723B_2ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = SCO.\n"); + btc8723b2ant_action_sco(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID.\n"); + btc8723b2ant_action_hid(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = A2DP.\n"); + btc8723b2ant_action_a2dp(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = A2DP+PAN(HS).\n"); + btc8723b2ant_action_a2dp_pan_hs(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = PAN(EDR).\n"); + btc8723b2ant_action_pan_edr(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = HS mode.\n"); + btc8723b2ant_action_pan_hs(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = PAN+A2DP.\n"); + btc8723b2ant_action_pan_edr_a2dp(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = PAN(EDR)+HID.\n"); + btc8723b2ant_action_pan_edr_hid(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = HID+A2DP+PAN.\n"); + btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = HID+A2DP.\n"); + btc8723b2ant_action_hid_a2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = coexist All Off!!\n"); + btc8723b2ant_coex_alloff(btcoexist); + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } +} + + + +/********************************************************************* + * work around function start with wa_btc8723b2ant_ + *********************************************************************/ +/********************************************************************* + * extern function start with EXbtc8723b2ant_ + *********************************************************************/ +void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u32 u32tmp = 0, fw_ver; + u8 u8tmp = 0; + u8 h2c_parameter[2] = {0}; + + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], 2Ant Init HW Config!!\n"); + + /* backup rf 0x1e value */ + coex_dm->bt_rf0x1e_backup = btcoexist->btc_get_rf_reg(btcoexist, + BTC_RF_A, 0x1e, + 0xfffff); + + /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp &= ~BIT23; + u32tmp |= BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + + btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3); + btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1); + + /* Antenna switch control parameter */ + /* btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);*/ + + /*Force GNT_BT to low*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + + /* 0x790[5:0]=0x5 */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u8tmp &= 0xc0; + u8tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); + + + /*Antenna config */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + + /*ext switch for fw ver < 0xc */ + if (fw_ver < 0xc00) { + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, + 0x3, 0x1); + /*Main Ant to BT for IPS case 0x4c[23]=1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, + 0x1); + + /*tell firmware "no antenna inverse"*/ + h2c_parameter[0] = 0; + h2c_parameter[1] = 1; /* ext switch type */ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, + 0x3, 0x2); + /*Aux Ant to BT for IPS case 0x4c[23]=1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, + 0x0); + + /*tell firmware "antenna inverse"*/ + h2c_parameter[0] = 1; + h2c_parameter[1] = 1; /*ext switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } else { + /*ext switch always at s1 (if exist) */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1); + /*Main Ant to BT for IPS case 0x4c[23]=1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x1); + + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { + /*tell firmware "no antenna inverse"*/ + h2c_parameter[0] = 0; + h2c_parameter[1] = 0; /*ext switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + /*tell firmware "antenna inverse"*/ + h2c_parameter[0] = 1; + h2c_parameter[1] = 0; /*ext switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } + + /* PTA parameter */ + btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0); + + /* Enable counter statistics */ + /*0x76e[3] =1, WLAN_Act control by PTA*/ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); +} + +void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); + btc8723b2ant_init_coex_dm(btcoexist); +} + +void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + u8 *cli_buf = btcoexist->cli_buf; + u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0; + u32 u32tmp[4]; + bool roam = false, scan = false; + bool link = false, wifi_under_5g = false; + bool bt_hs_on = false, wifi_busy = false; + s32 wifi_rssi = 0, bt_hs_rssi = 0; + u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck; + u8 wifi_dot11_chnl, wifi_hs_chnl; + u32 fw_ver = 0, bt_patch_ver = 0; + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ============[BT Coexist info]============"); + CL_PRINTF(cli_buf); + + if (btcoexist->manual_control) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ==========[Under Manual Control]============"); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n =========================================="); + CL_PRINTF(cli_buf); + } + + if (!board_info->bt_exist) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); + CL_PRINTF(cli_buf); + return; + } + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", + "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", + "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, + &wifi_dot11_chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0], + coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + &wifi_traffic_dir); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : + (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? + "uplink" : "downlink"))); + CL_PRINTF(cli_buf); + + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", + bt_link_info->sco_exist, bt_link_info->hid_exist, + bt_link_info->pan_exist, bt_link_info->a2dp_exist); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + + bt_info_ext = coex_sta->bt_info_ext; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); + CL_PRINTF(cli_buf); + + for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) { + if (coex_sta->bt_info_c2h_cnt[i]) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %02x %02x %02x " + "%02x %02x %02x %02x(%d)", + glbt_info_src_8723b_2ant[i], + coex_sta->bt_info_c2h[i][0], + coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], + coex_sta->bt_info_c2h[i][3], + coex_sta->bt_info_c2h[i][4], + coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h_cnt[i]); + CL_PRINTF(cli_buf); + } + } + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s", + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + /* Sw mechanism */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s", "============[Sw mechanism]============"); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + CL_PRINTF(cli_buf); + + /* Fw mechanism */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Fw mechanism]============"); + CL_PRINTF(cli_buf); + + ps_tdma_case = coex_dm->cur_ps_tdma; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para[0], + coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], + coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + ps_tdma_case, coex_dm->auto_tdma_adjust); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", + "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr, + coex_dm->cur_ignore_wlan_act); + CL_PRINTF(cli_buf); + + /* Hw setting */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Hw setting]============"); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + CL_PRINTF(cli_buf); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0x778/0x880[29:25]", u8tmp[0], + (u32tmp[0]&0x3e000000) >> 25); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x948/ 0x67[5] / 0x765", + u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", + u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3); + CL_PRINTF(cli_buf); + + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x38[11]/0x40/0x4c[24:23]/0x64[0]", + ((u8tmp[0] & 0x8)>>3), u8tmp[1], + ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8); + u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); + + fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) + + ((u32tmp[1]&0xffff0000) >> 16) + + (u32tmp[1] & 0xffff) + + (u32tmp[2] & 0xffff) + + ((u32tmp[3]&0xffff0000) >> 16) + + (u32tmp[3] & 0xffff); + fa_cck = (u8tmp[0] << 8) + u8tmp[1]; + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "OFDM-CCA/OFDM-FA/CCK-FA", + u32tmp[0]&0xffff, fa_ofdm, fa_cck); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "0x770(high-pri rx/tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, + coex_sta->low_priority_tx); + CL_PRINTF(cli_buf); +#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) + btc8723b2ant_monitor_bt_ctr(btcoexist); +#endif + btcoexist->btc_disp_dbg_msg(btcoexist, + BTC_DBG_DISP_COEX_STATISTICS); +} + + +void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_IPS_ENTER == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); + coex_sta->under_ips = true; + btc8723b2ant_coex_alloff(btcoexist); + } else if (BTC_IPS_LEAVE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); + coex_sta->under_ips = false; + } +} + +void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_LPS_ENABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); + coex_sta->under_lps = true; + } else if (BTC_LPS_DISABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); + coex_sta->under_lps = false; + } +} + +void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_SCAN_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); + else if (BTC_SCAN_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); +} + +void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_ASSOCIATE_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); + else if (BTC_ASSOCIATE_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); +} + +void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, + u8 type) +{ + u8 h2c_parameter[3] = {0}; + u32 wifi_bw; + u8 wifi_central_chnl; + + if (BTC_MEDIA_CONNECT == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); + + /* only 2.4G we need to inform bt the chnl mask */ + btcoexist->btc_get(btcoexist, + BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl); + if ((BTC_MEDIA_CONNECT == type) && + (wifi_central_chnl <= 14)) { + h2c_parameter[0] = 0x1; + h2c_parameter[1] = wifi_central_chnl; + btcoexist->btc_get(btcoexist, + BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66=0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); + + btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); +} + +void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) +{ + if (type == BTC_PACKET_DHCP) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], DHCP Packet notify\n"); +} + +void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length) +{ + u8 bt_info = 0; + u8 i, rsp_source = 0; + bool bt_busy = false, limited_dig = false; + bool wifi_connected = false; + + coex_sta->c2h_bt_info_req_sent = false; + + rsp_source = tmpbuf[0]&0xf; + if (rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX) + rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rsp_source]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data=[", + rsp_source, length); + for (i = 0; i < length; i++) { + coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i]; + if (i == 1) + bt_info = tmpbuf[i]; + if (i == length-1) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x]\n", tmpbuf[i]); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x, ", tmpbuf[i]); + } + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), " + "return for Manual CTRL<===\n"); + return; + } + + if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) { + coex_sta->bt_retry_cnt = /* [3:0]*/ + coex_sta->bt_info_c2h[rsp_source][2] & 0xf; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rsp_source][4]; + + /* Here we need to resend some wifi info to BT + * because bt is reset and loss of the info. + */ + if ((coex_sta->bt_info_ext & BIT1)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit1 check," + " send wifi BW&Chnl to BT!!\n"); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if (wifi_connected) + btc8723b_med_stat_notify(btcoexist, + BTC_MEDIA_CONNECT); + else + btc8723b_med_stat_notify(btcoexist, + BTC_MEDIA_DISCONNECT); + } + + if ((coex_sta->bt_info_ext & BIT3)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit3 check, " + "set BT NOT to ignore Wlan active!!\n"); + btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, + false); + } else { + /* BT already NOT ignore Wlan active, do nothing here.*/ + } +#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0) + if ((coex_sta->bt_info_ext & BIT4)) { + /* BT auto report already enabled, do nothing*/ + } else { + btc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC, + true); + } +#endif + } + + /* check BIT2 first ==> check if bt is under inquiry or page scan*/ + if (bt_info & BT_INFO_8723B_2ANT_B_INQ_PAGE) + coex_sta->c2h_bt_inquiry_page = true; + else + coex_sta->c2h_bt_inquiry_page = false; + + /* set link exist status*/ + if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) { + coex_sta->bt_link_exist = false; + coex_sta->pan_exist = false; + coex_sta->a2dp_exist = false; + coex_sta->hid_exist = false; + coex_sta->sco_exist = false; + } else { /* connection exists */ + coex_sta->bt_link_exist = true; + if (bt_info & BT_INFO_8723B_2ANT_B_FTP) + coex_sta->pan_exist = true; + else + coex_sta->pan_exist = false; + if (bt_info & BT_INFO_8723B_2ANT_B_A2DP) + coex_sta->a2dp_exist = true; + else + coex_sta->a2dp_exist = false; + if (bt_info & BT_INFO_8723B_2ANT_B_HID) + coex_sta->hid_exist = true; + else + coex_sta->hid_exist = false; + if (bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) + coex_sta->sco_exist = true; + else + coex_sta->sco_exist = false; + } + + btc8723b2ant_update_bt_link_info(btcoexist); + + if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) { + coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), " + "BT Non-Connected idle!!!\n"); + /* connection exists but no busy */ + } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) { + coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) || + (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) { + coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + } else if (bt_info & BT_INFO_8723B_2ANT_B_ACL_BUSY) { + coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + } else { + coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), " + "BT Non-Defined state!!!\n"); + } + + if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) { + bt_busy = true; + limited_dig = true; + } else { + bt_busy = false; + limited_dig = false; + } + + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + coex_dm->limited_dig = limited_dig; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + + btc8723b2ant_run_coexist_mechanism(btcoexist); +} + +void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist, + u8 type) +{ + if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex],StackOP Inquiry/page/pair start notify\n"); + else if (BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex],StackOP Inquiry/page/pair finish notify\n"); +} + +void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + + btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + btc8723b_med_stat_notify(btcoexist, BTC_MEDIA_DISCONNECT); +} + +void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + static u8 dis_ver_info_cnt; + u32 fw_ver = 0, bt_patch_ver = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], ==========================" + "Periodical===========================\n"); + + if (dis_ver_info_cnt <= 5) { + dis_ver_info_cnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ****************************" + "************************************\n"); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ " + "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num, + board_info->btdm_ant_num, board_info->btdm_ant_pos); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], CoexVer/ FwVer/ PatchVer = " + "%d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], *****************************" + "***********************************\n"); + } + +#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0) + btc8723b2ant_query_bt_info(btcoexist); + btc8723b2ant_monitor_bt_ctr(btcoexist); + btc8723b2ant_monitor_bt_enable_disable(btcoexist); +#else + if (btc8723b2ant_is_wifi_status_changed(btcoexist) || + coex_dm->auto_tdma_adjust) + btc8723b2ant_run_coexist_mechanism(btcoexist); +#endif +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h @@ -0,0 +1,173 @@ +/****************************************************************************** + * + * Copyright(c) 2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +#ifndef _HAL8723B_2_ANT +#define _HAL8723B_2_ANT + +/************************************************************************ + * The following is for 8723B 2Ant BT Co-exist definition + ************************************************************************/ +#define BT_AUTO_REPORT_ONLY_8723B_2ANT 1 + +#define BT_INFO_8723B_2ANT_B_FTP BIT7 +#define BT_INFO_8723B_2ANT_B_A2DP BIT6 +#define BT_INFO_8723B_2ANT_B_HID BIT5 +#define BT_INFO_8723B_2ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8723B_2ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8723B_2ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8723B_2ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8723B_2ANT_B_CONNECTION BIT0 + +#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2 + +enum BT_INFO_SRC_8723B_2ANT { + BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1, + BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8723B_2ANT_MAX +}; + +enum BT_8723B_2ANT_BT_STATUS { + BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8723B_2ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8723B_2ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8723B_2ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8723B_2ANT_BT_STATUS_MAX +}; + +enum BT_8723B_2ANT_COEX_ALGO { + BT_8723B_2ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8723B_2ANT_COEX_ALGO_SCO = 0x1, + BT_8723B_2ANT_COEX_ALGO_HID = 0x2, + BT_8723B_2ANT_COEX_ALGO_A2DP = 0x3, + BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4, + BT_8723B_2ANT_COEX_ALGO_PANEDR = 0x5, + BT_8723B_2ANT_COEX_ALGO_PANHS = 0x6, + BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7, + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID = 0x8, + BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9, + BT_8723B_2ANT_COEX_ALGO_HID_A2DP = 0xa, + BT_8723B_2ANT_COEX_ALGO_MAX = 0xb, +}; + +struct coex_dm_8723b_2ant { + /* fw mechanism */ + bool pre_dec_bt_pwr; + bool cur_dec_bt_pwr; + u8 pre_fw_dac_swing_lvl; + u8 cur_fw_dac_swing_lvl; + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 tdma_adj_type; + bool reset_tdma_adjust; + bool auto_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + + /* sw mechanism */ + bool pre_rf_rx_lpf_shrink; + bool cur_rf_rx_lpf_shrink; + u32 bt_rf0x1e_backup; + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + bool pre_dac_swing_on; + u32 pre_dac_swing_lvl; + bool cur_dac_swing_on; + u32 cur_dac_swing_lvl; + bool pre_adc_back_off; + bool cur_adc_back_off; + bool pre_agc_table_en; + bool cur_agc_table_en; + u32 pre_val0x6c0; + u32 cur_val0x6c0; + u32 pre_val0x6c4; + u32 cur_val0x6c4; + u32 pre_val0x6c8; + u32 cur_val0x6c8; + u8 pre_val0x6cc; + u8 cur_val0x6cc; + bool limited_dig; + + /* algorithm related */ + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; + + bool need_recover_0x948; + u16 backup_0x948; +}; + +struct coex_sta_8723b_2ant { + bool bt_link_exist; + bool sco_exist; + bool a2dp_exist; + bool hid_exist; + bool pan_exist; + + bool under_lps; + bool under_ips; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}; + +/********************************************************************* + * The following is interface which will notify coex module. + *********************************************************************/ +void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist); +void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist); +void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type); +void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length); +void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist); +void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist); +void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c @@ -0,0 +1,1011 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + ******************************************************************************/ + +#include "halbt_precomp.h" + +/*********************************************** + * Global variables + ***********************************************/ + +struct btc_coexist gl_bt_coexist; + +u32 btc_dbg_type[BTC_MSG_MAX]; +static u8 btc_dbg_buf[100]; + +/*************************************************** + * Debug related function + ***************************************************/ +static bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist) +{ + if (!btcoexist->binded || NULL == btcoexist->adapter) + return false; + + return true; +} + +static bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv) +{ + if (rtlpriv->link_info.busytraffic) + return true; + else + return false; +} + +static void halbtc_dbg_init(void) +{ + u8 i; + + for (i = 0; i < BTC_MSG_MAX; i++) + btc_dbg_type[i] = 0; + + btc_dbg_type[BTC_MSG_INTERFACE] = +/* INTF_INIT | */ +/* INTF_NOTIFY | */ + 0; + + btc_dbg_type[BTC_MSG_ALGORITHM] = +/* ALGO_BT_RSSI_STATE | */ +/* ALGO_WIFI_RSSI_STATE | */ +/* ALGO_BT_MONITOR | */ +/* ALGO_TRACE | */ +/* ALGO_TRACE_FW | */ +/* ALGO_TRACE_FW_DETAIL | */ +/* ALGO_TRACE_FW_EXEC | */ +/* ALGO_TRACE_SW | */ +/* ALGO_TRACE_SW_DETAIL | */ +/* ALGO_TRACE_SW_EXEC | */ + 0; +} + +static bool halbtc_is_bt40(struct rtl_priv *adapter) +{ + struct rtl_priv *rtlpriv = adapter; + struct rtl_phy *rtlphy = &(rtlpriv->phy); + bool is_ht40 = true; + enum ht_channel_width bw = rtlphy->current_chan_bw; + + if (bw == HT_CHANNEL_WIDTH_20) + is_ht40 = false; + else if (bw == HT_CHANNEL_WIDTH_20_40) + is_ht40 = true; + + return is_ht40; +} + +static bool halbtc_legacy(struct rtl_priv *adapter) +{ + struct rtl_priv *rtlpriv = adapter; + struct rtl_mac *mac = rtl_mac(rtlpriv); + + bool is_legacy = false; + + if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B)) + is_legacy = true; + + return is_legacy; +} + +bool halbtc_is_wifi_uplink(struct rtl_priv *adapter) +{ + struct rtl_priv *rtlpriv = adapter; + + if (rtlpriv->link_info.tx_busy_traffic) + return true; + else + return false; +} + +static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = + (struct rtl_priv *)btcoexist->adapter; + u32 wifi_bw = BTC_WIFI_BW_HT20; + + if (halbtc_is_bt40(rtlpriv)) { + wifi_bw = BTC_WIFI_BW_HT40; + } else { + if (halbtc_legacy(rtlpriv)) + wifi_bw = BTC_WIFI_BW_LEGACY; + else + wifi_bw = BTC_WIFI_BW_HT20; + } + return wifi_bw; +} + +static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 chnl = 1; + + if (rtlphy->current_channel != 0) + chnl = rtlphy->current_channel; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "static halbtc_get_wifi_central_chnl:%d\n", chnl); + return chnl; +} + +static void halbtc_leave_lps(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv; + struct rtl_ps_ctl *ppsc; + bool ap_enable = false; + + rtlpriv = btcoexist->adapter; + ppsc = rtl_psc(rtlpriv); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + + if (ap_enable) { + pr_info("halbtc_leave_lps()<--dont leave lps under AP mode\n"); + return; + } + + btcoexist->bt_info.bt_ctrl_lps = true; + btcoexist->bt_info.bt_lps_on = false; +} + +static void halbtc_enter_lps(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv; + struct rtl_ps_ctl *ppsc; + bool ap_enable = false; + + rtlpriv = btcoexist->adapter; + ppsc = rtl_psc(rtlpriv); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + + if (ap_enable) { + pr_info("halbtc_enter_lps()<--dont enter lps under AP mode\n"); + return; + } + + btcoexist->bt_info.bt_ctrl_lps = true; + btcoexist->bt_info.bt_lps_on = false; +} + +static void halbtc_normal_lps(struct btc_coexist *btcoexist) +{ + if (btcoexist->bt_info.bt_ctrl_lps) { + btcoexist->bt_info.bt_lps_on = false; + btcoexist->bt_info.bt_ctrl_lps = false; + } +} + +static void halbtc_leave_low_power(void) +{ +} + +static void halbtc_nomal_low_power(void) +{ +} + +static void halbtc_disable_low_power(void) +{ +} + +static void halbtc_aggregation_check(void) +{ +} + +static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist) +{ + return 0; +} + +static s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter) +{ + struct rtl_priv *rtlpriv = adapter; + s32 undec_sm_pwdb = 0; + + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) + undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; + else /* associated entry pwdb */ + undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; + return undec_sm_pwdb; +} + +static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist; + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + bool *bool_tmp = (bool *)out_buf; + int *s32_tmp = (int *)out_buf; + u32 *u32_tmp = (u32 *)out_buf; + u8 *u8_tmp = (u8 *)out_buf; + bool tmp = false; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return false; + + switch (get_type) { + case BTC_GET_BL_HS_OPERATION: + *bool_tmp = false; + break; + case BTC_GET_BL_HS_CONNECTING: + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_CONNECTED: + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) + tmp = true; + *bool_tmp = tmp; + break; + case BTC_GET_BL_WIFI_BUSY: + if (halbtc_is_wifi_busy(rtlpriv)) + *bool_tmp = true; + else + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_SCAN: + if (mac->act_scanning) + *bool_tmp = true; + else + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_LINK: + if (mac->link_state == MAC80211_LINKING) + *bool_tmp = true; + else + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_ROAM: /*TODO*/ + if (mac->link_state == MAC80211_LINKING) + *bool_tmp = true; + else + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_4_WAY_PROGRESS: /*TODO*/ + *bool_tmp = false; + + break; + case BTC_GET_BL_WIFI_UNDER_5G: + *bool_tmp = false; /*TODO*/ + + case BTC_GET_BL_WIFI_DHCP: /*TODO*/ + break; + case BTC_GET_BL_WIFI_SOFTAP_IDLE: + *bool_tmp = true; + break; + case BTC_GET_BL_WIFI_SOFTAP_LINKING: + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND: + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_AP_MODE_ENABLE: + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION: + if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm) + *bool_tmp = false; + else + *bool_tmp = true; + break; + case BTC_GET_BL_WIFI_UNDER_B_MODE: + *bool_tmp = false; /*TODO*/ + break; + case BTC_GET_BL_EXT_SWITCH: + *bool_tmp = false; + break; + case BTC_GET_S4_WIFI_RSSI: + *s32_tmp = halbtc_get_wifi_rssi(rtlpriv); + break; + case BTC_GET_S4_HS_RSSI: /*TODO*/ + *s32_tmp = halbtc_get_wifi_rssi(rtlpriv); + break; + case BTC_GET_U4_WIFI_BW: + *u32_tmp = halbtc_get_wifi_bw(btcoexist); + break; + case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION: + if (halbtc_is_wifi_uplink(rtlpriv)) + *u32_tmp = BTC_WIFI_TRAFFIC_TX; + else + *u32_tmp = BTC_WIFI_TRAFFIC_RX; + break; + case BTC_GET_U4_WIFI_FW_VER: + *u32_tmp = rtlhal->fw_version; + break; + case BTC_GET_U4_BT_PATCH_VER: + *u32_tmp = halbtc_get_bt_patch_version(btcoexist); + break; + case BTC_GET_U1_WIFI_DOT11_CHNL: + *u8_tmp = rtlphy->current_channel; + break; + case BTC_GET_U1_WIFI_CENTRAL_CHNL: + *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist); + break; + case BTC_GET_U1_WIFI_HS_CHNL: + *u8_tmp = 1;/*BT_OperateChnl(rtlpriv);*/ + break; + case BTC_GET_U1_MAC_PHY_MODE: + *u8_tmp = BTC_MP_UNKNOWN; + break; + + /************* 1Ant **************/ + case BTC_GET_U1_LPS_MODE: + *u8_tmp = btcoexist->pwr_mode_val[0]; + break; + + default: + break; + } + + return true; +} + +static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist; + bool *bool_tmp = (bool *)in_buf; + u8 *u8_tmp = (u8 *)in_buf; + u32 *u32_tmp = (u32 *)in_buf; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return false; + + switch (set_type) { + /* set some bool type variables. */ + case BTC_SET_BL_BT_DISABLE: + btcoexist->bt_info.bt_disabled = *bool_tmp; + break; + case BTC_SET_BL_BT_TRAFFIC_BUSY: + btcoexist->bt_info.bt_busy = *bool_tmp; + break; + case BTC_SET_BL_BT_LIMITED_DIG: + btcoexist->bt_info.limited_dig = *bool_tmp; + break; + case BTC_SET_BL_FORCE_TO_ROAM: + btcoexist->bt_info.force_to_roam = *bool_tmp; + break; + case BTC_SET_BL_TO_REJ_AP_AGG_PKT: + btcoexist->bt_info.reject_agg_pkt = *bool_tmp; + break; + case BTC_SET_BL_BT_CTRL_AGG_SIZE: + btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp; + break; + case BTC_SET_BL_INC_SCAN_DEV_NUM: + btcoexist->bt_info.increase_scan_dev_num = *bool_tmp; + break; + /* set some u1Byte type variables. */ + case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON: + btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp; + break; + case BTC_SET_U1_AGG_BUF_SIZE: + btcoexist->bt_info.agg_buf_size = *u8_tmp; + break; + /* the following are some action which will be triggered */ + case BTC_SET_ACT_GET_BT_RSSI: + /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/ + break; + case BTC_SET_ACT_AGGREGATE_CTRL: + halbtc_aggregation_check(); + break; + + /* 1Ant */ + case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE: + btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp; + break; + case BTC_SET_UI_SCAN_SIG_COMPENSATION: + /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */ + break; + case BTC_SET_U1_1ANT_LPS: + btcoexist->bt_info.lps_1ant = *u8_tmp; + break; + case BTC_SET_U1_1ANT_RPWM: + btcoexist->bt_info.rpwm_1ant = *u8_tmp; + break; + /* the following are some action which will be triggered */ + case BTC_SET_ACT_LEAVE_LPS: + halbtc_leave_lps(btcoexist); + break; + case BTC_SET_ACT_ENTER_LPS: + halbtc_enter_lps(btcoexist); + break; + case BTC_SET_ACT_NORMAL_LPS: + halbtc_normal_lps(btcoexist); + break; + case BTC_SET_ACT_DISABLE_LOW_POWER: + halbtc_disable_low_power(); + break; + case BTC_SET_ACT_UPDATE_ra_mask: + btcoexist->bt_info.ra_mask = *u32_tmp; + break; + case BTC_SET_ACT_SEND_MIMO_PS: + break; + case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT: + btcoexist->bt_info.force_exec_pwr_cmd_cnt++; + break; + case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/ + break; + case BTC_SET_ACT_CTRL_BT_COEX: + break; + default: + break; + } + + return true; +} + +static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist) +{ +} + +static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist) +{ +} + +static void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist) +{ +} + +static void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist) +{ +} + +/************************************************************ + * IO related function + ************************************************************/ +static u8 halbtc_read_1byte(void *bt_context, u32 reg_addr) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_read_byte(rtlpriv, reg_addr); +} + +static u16 halbtc_read_2byte(void *bt_context, u32 reg_addr) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_read_word(rtlpriv, reg_addr); +} + +static u32 halbtc_read_4byte(void *bt_context, u32 reg_addr) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_read_dword(rtlpriv, reg_addr); +} + +static void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_write_byte(rtlpriv, reg_addr, data); +} + +static void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr, + u32 bit_mask, u8 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 original_value, bit_shift = 0; + u8 i; + + if (bit_mask != MASKDWORD) {/*if not "double word" write*/ + original_value = rtl_read_byte(rtlpriv, reg_addr); + for (i = 0; i <= 7; i++) { + if ((bit_mask>>i) & 0x1) + break; + } + bit_shift = i; + data = (original_value & (~bit_mask)) | + ((data << bit_shift) & bit_mask); + } + rtl_write_byte(rtlpriv, reg_addr, data); +} + +static void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_write_word(rtlpriv, reg_addr, data); +} + +static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data) +{ + struct btc_coexist *btcoexist = + (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_write_dword(rtlpriv, reg_addr, data); +} + +static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask, + u32 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data); +} + +static u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask); +} + +static void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr, + u32 bit_mask, u32 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data); +} + +static u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr, + u32 bit_mask) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask); +} + +static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id, + u32 cmd_len, u8 *cmd_buf) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id, + cmd_len, cmd_buf); +} + +static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + switch (disp_type) { + case BTC_DBG_DISP_COEX_STATISTICS: + halbtc_display_coex_statistics(btcoexist); + break; + case BTC_DBG_DISP_BT_LINK_INFO: + halbtc_display_bt_link_info(btcoexist); + break; + case BTC_DBG_DISP_BT_FW_VER: + halbtc_display_bt_fw_info(btcoexist); + break; + case BTC_DBG_DISP_FW_PWR_MODE_CMD: + halbtc_display_fw_pwr_mode_cmd(btcoexist); + break; + default: + break; + } +} + +/***************************************************************** + * Extern functions called by other module + *****************************************************************/ +bool exhalbtc_initlize_variables(struct rtl_priv *adapter) +{ + struct btc_coexist *btcoexist = &gl_bt_coexist; + + btcoexist->statistics.cnt_bind++; + + halbtc_dbg_init(); + + if (btcoexist->binded) + return false; + else + btcoexist->binded = true; + +#if (defined(CONFIG_PCI_HCI)) + btcoexist->chip_interface = BTC_INTF_PCI; +#elif (defined(CONFIG_USB_HCI)) + btcoexist->chip_interface = BTC_INTF_USB; +#elif (defined(CONFIG_SDIO_HCI)) + btcoexist->chip_interface = BTC_INTF_SDIO; +#elif (defined(CONFIG_GSPI_HCI)) + btcoexist->chip_interface = BTC_INTF_GSPI; +#else + btcoexist->chip_interface = BTC_INTF_UNKNOWN; +#endif + + if (NULL == btcoexist->adapter) + btcoexist->adapter = adapter; + + btcoexist->stack_info.profile_notified = false; + + btcoexist->btc_read_1byte = halbtc_read_1byte; + btcoexist->btc_write_1byte = halbtc_write_1byte; + btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte; + btcoexist->btc_read_2byte = halbtc_read_2byte; + btcoexist->btc_write_2byte = halbtc_write_2byte; + btcoexist->btc_read_4byte = halbtc_read_4byte; + btcoexist->btc_write_4byte = halbtc_write_4byte; + + btcoexist->btc_set_bb_reg = halbtc_set_bbreg; + btcoexist->btc_get_bb_reg = halbtc_get_bbreg; + + btcoexist->btc_set_rf_reg = halbtc_set_rfreg; + btcoexist->btc_get_rf_reg = halbtc_get_rfreg; + + btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd; + btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg; + + btcoexist->btc_get = halbtc_get; + btcoexist->btc_set = halbtc_set; + + btcoexist->cli_buf = &btc_dbg_buf[0]; + + btcoexist->bt_info.b_bt_ctrl_buf_size = false; + btcoexist->bt_info.agg_buf_size = 5; + + btcoexist->bt_info.increase_scan_dev_num = false; + return true; +} + +void exhalbtc_init_hw_config(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->statistics.cnt_init_hw_config++; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_init_hwconfig(btcoexist); +} + +void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->statistics.cnt_init_coex_dm++; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_init_coex_dm(btcoexist); + + btcoexist->initilized = true; +} + +void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 ips_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_ips_notify++; + if (btcoexist->manual_control) + return; + + if (ERFOFF == type) + ips_type = BTC_IPS_ENTER; + else + ips_type = BTC_IPS_LEAVE; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type); + + halbtc_nomal_low_power(); +} + +void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 lps_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_lps_notify++; + if (btcoexist->manual_control) + return; + + if (EACTIVE == type) + lps_type = BTC_LPS_DISABLE; + else + lps_type = BTC_LPS_ENABLE; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type); +} + +void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 scan_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_scan_notify++; + if (btcoexist->manual_control) + return; + + if (type) + scan_type = BTC_SCAN_START; + else + scan_type = BTC_SCAN_FINISH; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type); + + halbtc_nomal_low_power(); +} + +void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 asso_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_connect_notify++; + if (btcoexist->manual_control) + return; + + if (action) + asso_type = BTC_ASSOCIATE_START; + else + asso_type = BTC_ASSOCIATE_FINISH; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type); +} + +void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, + enum _RT_MEDIA_STATUS media_status) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 status; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_media_status_notify++; + if (btcoexist->manual_control) + return; + + if (RT_MEDIA_CONNECT == media_status) + status = BTC_MEDIA_CONNECT; + else + status = BTC_MEDIA_DISCONNECT; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + btc8723b_med_stat_notify(btcoexist, status); + + halbtc_nomal_low_power(); +} + +void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 packet_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_special_packet_notify++; + if (btcoexist->manual_control) + return; + + packet_type = BTC_PACKET_DHCP; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_special_packet_notify(btcoexist, + packet_type); + + halbtc_nomal_low_power(); +} + +void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_bt_info_notify++; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length); +} + +void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 stack_op_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_stack_operation_notify++; + if (btcoexist->manual_control) + return; + + stack_op_type = BTC_STACK_OP_NONE; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_stack_operation_notify(btcoexist, + stack_op_type); + + halbtc_nomal_low_power(); +} + +void exhalbtc_halt_notify(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_halt_notify(btcoexist); +} + +void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) +{ + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; +} + +void exhalbtc_periodical(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_periodical++; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_periodical(btcoexist); + + halbtc_nomal_low_power(); +} + +void exhalbtc_dbg_control(struct btc_coexist *btcoexist, + u8 code, u8 len, u8 *data) +{ + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_dbg_ctrl++; +} + +void exhalbtc_stack_update_profile_info(void) +{ +} + +void exhalbtc_update_min_bt_rssi(char bt_rssi) +{ + struct btc_coexist *btcoexist = &gl_bt_coexist; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->stack_info.min_bt_rssi = bt_rssi; +} + +void exhalbtc_set_hci_version(u16 hci_version) +{ + struct btc_coexist *btcoexist = &gl_bt_coexist; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->stack_info.hci_version = hci_version; +} + +void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version) +{ + struct btc_coexist *btcoexist = &gl_bt_coexist; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->bt_info.bt_real_fw_ver = bt_patch_version; + btcoexist->bt_info.bt_hci_ver = bt_hci_version; +} + +void exhalbtc_set_bt_exist(bool bt_exist) +{ + gl_bt_coexist.board_info.bt_exist = bt_exist; +} + +void exhalbtc_set_chip_type(u8 chip_type) +{ + switch (chip_type) { + default: + case BT_2WIRE: + case BT_ISSC_3WIRE: + case BT_ACCEL: + case BT_RTL8756: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF; + break; + case BT_CSR_BC4: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4; + break; + case BT_CSR_BC8: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8; + break; + case BT_RTL8723A: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A; + break; + case BT_RTL8821: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821; + break; + case BT_RTL8723B: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B; + break; + } +} + +void exhalbtc_set_ant_num(u8 type, u8 ant_num) +{ + if (BT_COEX_ANT_TYPE_PG == type) { + gl_bt_coexist.board_info.pg_ant_num = ant_num; + gl_bt_coexist.board_info.btdm_ant_num = ant_num; + } else if (BT_COEX_ANT_TYPE_ANTDIV == type) { + gl_bt_coexist.board_info.btdm_ant_num = ant_num; + } +} + +void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) + ex_halbtc8723b2ant_display_coex_info(btcoexist); +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h @@ -0,0 +1,559 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2012 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +#ifndef __HALBTC_OUT_SRC_H__ +#define __HALBTC_OUT_SRC_H__ + +#include "../wifi.h" + +#define NORMAL_EXEC false +#define FORCE_EXEC true + +#define BTC_RF_A RF90_PATH_A +#define BTC_RF_B RF90_PATH_B +#define BTC_RF_C RF90_PATH_C +#define BTC_RF_D RF90_PATH_D + +#define BTC_SMSP SINGLEMAC_SINGLEPHY +#define BTC_DMDP DUALMAC_DUALPHY +#define BTC_DMSP DUALMAC_SINGLEPHY +#define BTC_MP_UNKNOWN 0xff + +#define IN +#define OUT + +#define BT_TMP_BUF_SIZE 100 + +#define BT_COEX_ANT_TYPE_PG 0 +#define BT_COEX_ANT_TYPE_ANTDIV 1 +#define BT_COEX_ANT_TYPE_DETECTED 2 + +#define BTC_MIMO_PS_STATIC 0 +#define BTC_MIMO_PS_DYNAMIC 1 + +#define BTC_RATE_DISABLE 0 +#define BTC_RATE_ENABLE 1 + +#define BTC_ANT_PATH_WIFI 0 +#define BTC_ANT_PATH_BT 1 +#define BTC_ANT_PATH_PTA 2 + +enum btc_chip_interface { + BTC_INTF_UNKNOWN = 0, + BTC_INTF_PCI = 1, + BTC_INTF_USB = 2, + BTC_INTF_SDIO = 3, + BTC_INTF_GSPI = 4, + BTC_INTF_MAX +}; + +enum BTC_CHIP_TYPE { + BTC_CHIP_UNDEF = 0, + BTC_CHIP_CSR_BC4 = 1, + BTC_CHIP_CSR_BC8 = 2, + BTC_CHIP_RTL8723A = 3, + BTC_CHIP_RTL8821 = 4, + BTC_CHIP_RTL8723B = 5, + BTC_CHIP_MAX +}; + +enum BTC_MSG_TYPE { + BTC_MSG_INTERFACE = 0x0, + BTC_MSG_ALGORITHM = 0x1, + BTC_MSG_MAX +}; +extern u32 btc_dbg_type[]; + +/* following is for BTC_MSG_INTERFACE */ +#define INTF_INIT BIT0 +#define INTF_NOTIFY BIT2 + +/* following is for BTC_ALGORITHM */ +#define ALGO_BT_RSSI_STATE BIT0 +#define ALGO_WIFI_RSSI_STATE BIT1 +#define ALGO_BT_MONITOR BIT2 +#define ALGO_TRACE BIT3 +#define ALGO_TRACE_FW BIT4 +#define ALGO_TRACE_FW_DETAIL BIT5 +#define ALGO_TRACE_FW_EXEC BIT6 +#define ALGO_TRACE_SW BIT7 +#define ALGO_TRACE_SW_DETAIL BIT8 +#define ALGO_TRACE_SW_EXEC BIT9 + +#define BT_COEX_ANT_TYPE_PG 0 +#define BT_COEX_ANT_TYPE_ANTDIV 1 +#define BT_COEX_ANT_TYPE_DETECTED 2 +#define BTC_MIMO_PS_STATIC 0 +#define BTC_MIMO_PS_DYNAMIC 1 +#define BTC_RATE_DISABLE 0 +#define BTC_RATE_ENABLE 1 +#define BTC_ANT_PATH_WIFI 0 +#define BTC_ANT_PATH_BT 1 +#define BTC_ANT_PATH_PTA 2 + + +#define CL_SPRINTF snprintf +#define CL_PRINTF printk + +#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \ + do { \ + if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\ + printk(printstr, ##__VA_ARGS__); \ + } \ + } while (0) + +#define BTC_PRINT_F(dbgtype, dbgflag, printstr, ...) \ + do { \ + if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\ + pr_info("%s: ", __func__); \ + printk(printstr, ##__VA_ARGS__); \ + } \ + } while (0) + +#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr) \ + do { \ + if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \ + int __i; \ + u8 *__ptr = (u8 *)_ptr; \ + printk printstr; \ + for (__i = 0; __i < 6; __i++) \ + printk("%02X%s", __ptr[__i], (__i == 5) ? \ + "" : "-"); \ + pr_info("\n"); \ + } \ + } while (0) + +#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \ + do { \ + if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \ + int __i; \ + u8 *__ptr = (u8 *)_hexdata; \ + printk(_titlestring); \ + for (__i = 0; __i < (int)_hexdatalen; __i++) { \ + printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \ + == 0) ? " " : " ");\ + if (((__i + 1) % 16) == 0) \ + printk("\n"); \ + } \ + pr_debug("\n"); \ + } \ + } while (0) + +#define BTC_ANT_PATH_WIFI 0 +#define BTC_ANT_PATH_BT 1 +#define BTC_ANT_PATH_PTA 2 + +enum btc_power_save_type { + BTC_PS_WIFI_NATIVE = 0, + BTC_PS_LPS_ON = 1, + BTC_PS_LPS_OFF = 2, + BTC_PS_LPS_MAX +}; + +struct btc_board_info { + /* The following is some board information */ + u8 bt_chip_type; + u8 pg_ant_num; /* pg ant number */ + u8 btdm_ant_num; /* ant number for btdm */ + u8 btdm_ant_pos; + bool bt_exist; +}; + +enum btc_dbg_opcode { + BTC_DBG_SET_COEX_NORMAL = 0x0, + BTC_DBG_SET_COEX_WIFI_ONLY = 0x1, + BTC_DBG_SET_COEX_BT_ONLY = 0x2, + BTC_DBG_MAX +}; + +enum btc_rssi_state { + BTC_RSSI_STATE_HIGH = 0x0, + BTC_RSSI_STATE_MEDIUM = 0x1, + BTC_RSSI_STATE_LOW = 0x2, + BTC_RSSI_STATE_STAY_HIGH = 0x3, + BTC_RSSI_STATE_STAY_MEDIUM = 0x4, + BTC_RSSI_STATE_STAY_LOW = 0x5, + BTC_RSSI_MAX +}; + +enum btc_wifi_role { + BTC_ROLE_STATION = 0x0, + BTC_ROLE_AP = 0x1, + BTC_ROLE_IBSS = 0x2, + BTC_ROLE_HS_MODE = 0x3, + BTC_ROLE_MAX +}; + +enum btc_wifi_bw_mode { + BTC_WIFI_BW_LEGACY = 0x0, + BTC_WIFI_BW_HT20 = 0x1, + BTC_WIFI_BW_HT40 = 0x2, + BTC_WIFI_BW_MAX +}; + +enum btc_wifi_traffic_dir { + BTC_WIFI_TRAFFIC_TX = 0x0, + BTC_WIFI_TRAFFIC_RX = 0x1, + BTC_WIFI_TRAFFIC_MAX +}; + +enum btc_wifi_pnp { + BTC_WIFI_PNP_WAKE_UP = 0x0, + BTC_WIFI_PNP_SLEEP = 0x1, + BTC_WIFI_PNP_MAX +}; + + +enum btc_get_type { + /* type bool */ + BTC_GET_BL_HS_OPERATION, + BTC_GET_BL_HS_CONNECTING, + BTC_GET_BL_WIFI_CONNECTED, + BTC_GET_BL_WIFI_BUSY, + BTC_GET_BL_WIFI_SCAN, + BTC_GET_BL_WIFI_LINK, + BTC_GET_BL_WIFI_DHCP, + BTC_GET_BL_WIFI_SOFTAP_IDLE, + BTC_GET_BL_WIFI_SOFTAP_LINKING, + BTC_GET_BL_WIFI_IN_EARLY_SUSPEND, + BTC_GET_BL_WIFI_ROAM, + BTC_GET_BL_WIFI_4_WAY_PROGRESS, + BTC_GET_BL_WIFI_UNDER_5G, + BTC_GET_BL_WIFI_AP_MODE_ENABLE, + BTC_GET_BL_WIFI_ENABLE_ENCRYPTION, + BTC_GET_BL_WIFI_UNDER_B_MODE, + BTC_GET_BL_EXT_SWITCH, + + /* type s4Byte */ + BTC_GET_S4_WIFI_RSSI, + BTC_GET_S4_HS_RSSI, + + /* type u32 */ + BTC_GET_U4_WIFI_BW, + BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + BTC_GET_U4_WIFI_FW_VER, + BTC_GET_U4_BT_PATCH_VER, + + /* type u1Byte */ + BTC_GET_U1_WIFI_DOT11_CHNL, + BTC_GET_U1_WIFI_CENTRAL_CHNL, + BTC_GET_U1_WIFI_HS_CHNL, + BTC_GET_U1_MAC_PHY_MODE, + + /* for 1Ant */ + BTC_GET_U1_LPS_MODE, + BTC_GET_BL_BT_SCO_BUSY, + + /* for test mode */ + BTC_GET_DRIVER_TEST_CFG, + BTC_GET_MAX +}; + + +enum btc_set_type { + /* type bool */ + BTC_SET_BL_BT_DISABLE, + BTC_SET_BL_BT_TRAFFIC_BUSY, + BTC_SET_BL_BT_LIMITED_DIG, + BTC_SET_BL_FORCE_TO_ROAM, + BTC_SET_BL_TO_REJ_AP_AGG_PKT, + BTC_SET_BL_BT_CTRL_AGG_SIZE, + BTC_SET_BL_INC_SCAN_DEV_NUM, + + /* type u1Byte */ + BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, + BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, + BTC_SET_UI_SCAN_SIG_COMPENSATION, + BTC_SET_U1_AGG_BUF_SIZE, + + /* type trigger some action */ + BTC_SET_ACT_GET_BT_RSSI, + BTC_SET_ACT_AGGREGATE_CTRL, + + /********* for 1Ant **********/ + /* type bool */ + BTC_SET_BL_BT_SCO_BUSY, + /* type u1Byte */ + BTC_SET_U1_1ANT_LPS, + BTC_SET_U1_1ANT_RPWM, + /* type trigger some action */ + BTC_SET_ACT_LEAVE_LPS, + BTC_SET_ACT_ENTER_LPS, + BTC_SET_ACT_NORMAL_LPS, + BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT, + BTC_SET_ACT_DISABLE_LOW_POWER, + BTC_SET_ACT_UPDATE_ra_mask, + BTC_SET_ACT_SEND_MIMO_PS, + /* BT Coex related */ + BTC_SET_ACT_CTRL_BT_INFO, + BTC_SET_ACT_CTRL_BT_COEX, + /***************************/ + BTC_SET_MAX +}; + +enum btc_dbg_disp_type { + BTC_DBG_DISP_COEX_STATISTICS = 0x0, + BTC_DBG_DISP_BT_LINK_INFO = 0x1, + BTC_DBG_DISP_BT_FW_VER = 0x2, + BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3, + BTC_DBG_DISP_MAX +}; + +enum btc_notify_type_ips { + BTC_IPS_LEAVE = 0x0, + BTC_IPS_ENTER = 0x1, + BTC_IPS_MAX +}; + +enum btc_notify_type_lps { + BTC_LPS_DISABLE = 0x0, + BTC_LPS_ENABLE = 0x1, + BTC_LPS_MAX +}; + +enum btc_notify_type_scan { + BTC_SCAN_FINISH = 0x0, + BTC_SCAN_START = 0x1, + BTC_SCAN_MAX +}; + +enum btc_notify_type_associate { + BTC_ASSOCIATE_FINISH = 0x0, + BTC_ASSOCIATE_START = 0x1, + BTC_ASSOCIATE_MAX +}; + +enum btc_notify_type_media_status { + BTC_MEDIA_DISCONNECT = 0x0, + BTC_MEDIA_CONNECT = 0x1, + BTC_MEDIA_MAX +}; + +enum btc_notify_type_special_packet { + BTC_PACKET_UNKNOWN = 0x0, + BTC_PACKET_DHCP = 0x1, + BTC_PACKET_ARP = 0x2, + BTC_PACKET_EAPOL = 0x3, + BTC_PACKET_MAX +}; + +enum btc_notify_type_stack_operation { + BTC_STACK_OP_NONE = 0x0, + BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1, + BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2, + BTC_STACK_OP_MAX +}; + + +typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr); + +typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr); + +typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr); + +typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data); + +typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr, + u32 bit_mask, u8 data1b); + +typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data); + +typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data); + +typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr, + u8 bit_mask, u8 data); + +typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr, + u32 bit_mask, u32 data); + +typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr, + u32 bit_mask); + +typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr, + u32 bit_mask, u32 data); + +typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path, + u32 reg_addr, u32 bit_mask); + +typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id, + u32 cmd_len, u8 *cmd_buffer); + +typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf); + +typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf); + +typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type); + +struct btc_bt_info { + bool bt_disabled; + u8 rssi_adjust_for_agc_table_on; + u8 rssi_adjust_for_1ant_coex_type; + bool bt_busy; + u8 agg_buf_size; + bool limited_dig; + bool reject_agg_pkt; + bool b_bt_ctrl_buf_size; + bool increase_scan_dev_num; + u16 bt_hci_ver; + u16 bt_real_fw_ver; + u8 bt_fw_ver; + + /* the following is for 1Ant solution */ + bool bt_ctrl_lps; + bool bt_pwr_save_mode; + bool bt_lps_on; + bool force_to_roam; + u8 force_exec_pwr_cmd_cnt; + u8 lps_1ant; + u8 rpwm_1ant; + u32 ra_mask; +}; + +struct btc_stack_info { + bool profile_notified; + u16 hci_version; /* stack hci version */ + u8 num_of_link; + bool bt_link_exist; + bool sco_exist; + bool acl_exist; + bool a2dp_exist; + bool hid_exist; + u8 num_of_hid; + bool pan_exist; + bool unknown_acl_exist; + char min_bt_rssi; +}; + +struct btc_statistics { + u32 cnt_bind; + u32 cnt_init_hw_config; + u32 cnt_init_coex_dm; + u32 cnt_ips_notify; + u32 cnt_lps_notify; + u32 cnt_scan_notify; + u32 cnt_connect_notify; + u32 cnt_media_status_notify; + u32 cnt_special_packet_notify; + u32 cnt_bt_info_notify; + u32 cnt_periodical; + u32 cnt_stack_operation_notify; + u32 cnt_dbg_ctrl; +}; + +struct btc_bt_link_info { + bool bt_link_exist; + bool sco_exist; + bool sco_only; + bool a2dp_exist; + bool a2dp_only; + bool hid_exist; + bool hid_only; + bool pan_exist; + bool pan_only; +}; + +enum btc_antenna_pos { + BTC_ANTENNA_AT_MAIN_PORT = 0x1, + BTC_ANTENNA_AT_AUX_PORT = 0x2, +}; + +struct btc_coexist { + /* make sure only one adapter can bind the data context */ + bool binded; + /* default adapter */ + void *adapter; + struct btc_board_info board_info; + /* some bt info referenced by non-bt module */ + struct btc_bt_info bt_info; + struct btc_stack_info stack_info; + enum btc_chip_interface chip_interface; + struct btc_bt_link_info bt_link_info; + + bool initilized; + bool stop_coex_dm; + bool manual_control; + u8 *cli_buf; + struct btc_statistics statistics; + u8 pwr_mode_val[10]; + + /* function pointers - io related */ + bfp_btc_r1 btc_read_1byte; + bfp_btc_w1 btc_write_1byte; + bfp_btc_w1_bit_mak btc_write_1byte_bitmask; + bfp_btc_r2 btc_read_2byte; + bfp_btc_w2 btc_write_2byte; + bfp_btc_r4 btc_read_4byte; + bfp_btc_w4 btc_write_4byte; + + bfp_btc_set_bb_reg btc_set_bb_reg; + bfp_btc_get_bb_reg btc_get_bb_reg; + + + bfp_btc_set_rf_reg btc_set_rf_reg; + bfp_btc_get_rf_reg btc_get_rf_reg; + + bfp_btc_fill_h2c btc_fill_h2c; + + bfp_btc_disp_dbg_msg btc_disp_dbg_msg; + + bfp_btc_get btc_get; + bfp_btc_set btc_set; +}; + +bool halbtc_is_wifi_uplink(struct rtl_priv *adapter); + +extern struct btc_coexist gl_bt_coexist; + +bool exhalbtc_initlize_variables(struct rtl_priv *adapter); +void exhalbtc_init_hw_config(struct btc_coexist *btcoexist); +void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist); +void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action); +void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, + enum _RT_MEDIA_STATUS media_status); +void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type); +void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, + u8 length); +void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_halt_notify(struct btc_coexist *btcoexist); +void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); +void exhalbtc_periodical(struct btc_coexist *btcoexist); +void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len, + u8 *data); +void exhalbtc_stack_update_profile_info(void); +void exhalbtc_set_hci_version(u16 hci_version); +void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version); +void exhalbtc_update_min_bt_rssi(char bt_rssi); +void exhalbtc_set_bt_exist(bool bt_exist); +void exhalbtc_set_chip_type(u8 chip_type); +void exhalbtc_set_ant_num(u8 type, u8 ant_num); +void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist); +void exhalbtc_signal_compensation(struct btc_coexist *btcoexist, + u8 *rssi_wifi, u8 *rssi_bt); +void exhalbtc_lps_leave(struct btc_coexist *btcoexist); +void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c @@ -0,0 +1,218 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2013 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "rtl_btc.h" +#include "halbt_precomp.h" + +#include +#include + +static struct rtl_btc_ops rtl_btc_operation = { + .btc_init_variables = rtl_btc_init_variables, + .btc_init_hal_vars = rtl_btc_init_hal_vars, + .btc_init_hw_config = rtl_btc_init_hw_config, + .btc_ips_notify = rtl_btc_ips_notify, + .btc_scan_notify = rtl_btc_scan_notify, + .btc_connect_notify = rtl_btc_connect_notify, + .btc_mediastatus_notify = rtl_btc_mediastatus_notify, + .btc_periodical = rtl_btc_periodical, + .btc_halt_notify = rtl_btc_halt_notify, + .btc_btinfo_notify = rtl_btc_btinfo_notify, + .btc_is_limited_dig = rtl_btc_is_limited_dig, + .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo, + .btc_is_bt_disabled = rtl_btc_is_bt_disabled, +}; + +void rtl_btc_init_variables(struct rtl_priv *rtlpriv) +{ + exhalbtc_initlize_variables(rtlpriv); +} + +void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv) +{ + u8 ant_num; + u8 bt_exist; + u8 bt_type; + + ant_num = rtl_get_hwpg_ant_num(rtlpriv); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "%s, antNum is %d\n", __func__, ant_num); + + bt_exist = rtl_get_hwpg_bt_exist(rtlpriv); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "%s, bt_exist is %d\n", __func__, bt_exist); + exhalbtc_set_bt_exist(bt_exist); + + bt_type = rtl_get_hwpg_bt_type(rtlpriv); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s, bt_type is %d\n", + __func__, bt_type); + exhalbtc_set_chip_type(bt_type); + + exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num); +} + +void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv) +{ + exhalbtc_init_hw_config(&gl_bt_coexist); + exhalbtc_init_coex_dm(&gl_bt_coexist); +} + +void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type) +{ + exhalbtc_ips_notify(&gl_bt_coexist, type); +} + +void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype) +{ + exhalbtc_scan_notify(&gl_bt_coexist, scantype); +} + +void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action) +{ + exhalbtc_connect_notify(&gl_bt_coexist, action); +} + +void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, + enum _RT_MEDIA_STATUS mstatus) +{ + exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus); +} + +void rtl_btc_periodical(struct rtl_priv *rtlpriv) +{ + exhalbtc_periodical(&gl_bt_coexist); +} + +void rtl_btc_halt_notify(void) +{ + exhalbtc_halt_notify(&gl_bt_coexist); +} + +void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) +{ + exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length); +} + +bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv) +{ + return gl_bt_coexist.bt_info.limited_dig; +} + +bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv) +{ + bool bt_change_edca = false; + u32 cur_edca_val; + u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b; + u32 edca_hs; + u32 edca_addr = 0x504; + + cur_edca_val = rtl_read_dword(rtlpriv, edca_addr); + if (halbtc_is_wifi_uplink(rtlpriv)) { + if (cur_edca_val != edca_bt_hs_uplink) { + edca_hs = edca_bt_hs_uplink; + bt_change_edca = true; + } + } else { + if (cur_edca_val != edca_bt_hs_downlink) { + edca_hs = edca_bt_hs_downlink; + bt_change_edca = true; + } + } + + if (bt_change_edca) + rtl_write_dword(rtlpriv, edca_addr, edca_hs); + + return true; +} + +bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv) +{ + if (gl_bt_coexist.bt_info.bt_disabled) + return true; + else + return false; +} + +struct rtl_btc_ops *rtl_btc_get_ops_pointer(void) +{ + return &rtl_btc_operation; +} +EXPORT_SYMBOL(rtl_btc_get_ops_pointer); + +u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) +{ + u8 num; + + if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) + num = 2; + else + num = 1; + + return num; +} + +enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + enum _RT_MEDIA_STATUS m_status = RT_MEDIA_DISCONNECT; + + u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; + + if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) + m_status = RT_MEDIA_CONNECT; + + return m_status; +} + +u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv) +{ + return rtlpriv->btcoexist.btc_info.btcoexist; +} + +u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) +{ + return rtlpriv->btcoexist.btc_info.bt_type; +} + +MODULE_AUTHOR("Page He "); +MODULE_AUTHOR("Realtek WlanFAE "); +MODULE_AUTHOR("Larry Finger "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); + +static int __init rtl_btcoexist_module_init(void) +{ + return 0; +} + +static void __exit rtl_btcoexist_module_exit(void) +{ + return; +} + +module_init(rtl_btcoexist_module_init); +module_exit(rtl_btcoexist_module_exit); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h @@ -0,0 +1,52 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_BTC_H__ +#define __RTL_BTC_H__ + +#include "halbt_precomp.h" + +void rtl_btc_init_variables(struct rtl_priv *rtlpriv); +void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv); +void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv); +void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type); +void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype); +void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action); +void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, + enum _RT_MEDIA_STATUS mstatus); +void rtl_btc_periodical(struct rtl_priv *rtlpriv); +void rtl_btc_halt_notify(void); +void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length); +bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv); +bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv); +bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv); + +struct rtl_btc_ops *rtl_btc_get_ops_pointer(void); + +u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv); +u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv); +u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv); +enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/core.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/core.c @@ -46,10 +46,20 @@ "Firmware callback routine entered!\n"); complete(&rtlpriv->firmware_loading_complete); if (!firmware) { + if (rtlpriv->cfg->alt_fw_name) { + err = request_firmware(&firmware, + rtlpriv->cfg->alt_fw_name, + rtlpriv->io.dev); + pr_info("Loading alternative firmware %s\n", + rtlpriv->cfg->alt_fw_name); + if (!err) + goto found_alt; + } pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name); rtlpriv->max_fw_size = 0; return; } +found_alt: if (firmware->size > rtlpriv->max_fw_size) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Firmware is too big!\n"); @@ -727,6 +737,11 @@ rtlpriv->cfg->ops->linked_set_reg(hw); rcu_read_lock(); sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid); + if (!sta) { + pr_err("ieee80211_find_sta returned NULL\n"); + rcu_read_unlock(); + goto out; + } if (vif->type == NL80211_IFTYPE_STATION && sta) rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); @@ -881,7 +896,7 @@ mac->basic_rates = basic_rates; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, - (u8 *) (&basic_rates)); + (u8 *)(&basic_rates)); } rcu_read_unlock(); } @@ -895,6 +910,11 @@ if (bss_conf->assoc) { if (ppsc->fwctrl_lps) { u8 mstatus = RT_MEDIA_CONNECT; + u8 keep_alive = 10; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_KEEP_ALIVE, + (u8 *)(&keep_alive)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_JOINBSSRPT, &mstatus); @@ -1298,7 +1318,8 @@ * before switch channel or power save, or tx buffer packet * maybe send after offchannel or rf sleep, this may cause * dis-association by AP */ -static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void rtl_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct rtl_priv *rtlpriv = rtl_priv(hw); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/pci.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/pci.c @@ -1068,7 +1068,7 @@ mac->current_ampdu_factor = 3; /*QOS*/ - rtlpci->acm_method = eAcmWay2_SW; + rtlpci->acm_method = EACMWAY2_SW; /*task */ tasklet_init(&rtlpriv->works.irq_tasklet, @@ -1843,6 +1843,65 @@ return true; } +static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); + int ret; + + ret = pci_enable_msi(rtlpci->pdev); + if (ret < 0) + return ret; + + ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt, + IRQF_SHARED, KBUILD_MODNAME, hw); + if (ret < 0) { + pci_disable_msi(rtlpci->pdev); + return ret; + } + + rtlpci->using_msi = true; + + RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG, + "MSI Interrupt Mode!\n"); + return 0; +} + +static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); + int ret; + + ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt, + IRQF_SHARED, KBUILD_MODNAME, hw); + if (ret < 0) + return ret; + + rtlpci->using_msi = false; + RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG, + "Pin-based Interrupt Mode!\n"); + return 0; +} + +static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); + int ret; + + if (rtlpci->msi_support) { + ret = rtl_pci_intr_mode_msi(hw); + if (ret < 0) + ret = rtl_pci_intr_mode_legacy(hw); + } else { + ret = rtl_pci_intr_mode_legacy(hw); + } + return ret; +} + int rtl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -1985,8 +2044,7 @@ } rtlpci = rtl_pcidev(pcipriv); - err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt, - IRQF_SHARED, KBUILD_MODNAME, hw); + err = rtl_pci_intr_mode_decide(hw); if (err) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s: failed to register IRQ handler\n", @@ -2054,6 +2112,9 @@ rtlpci->irq_alloc = 0; } + if (rtlpci->using_msi) + pci_disable_msi(rtlpci->pdev); + list_del(&rtlpriv->list); if (rtlpriv->io.pci_mem_start != 0) { pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/pci.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/pci.h @@ -199,6 +199,10 @@ u16 shortretry_limit; u16 longretry_limit; + + /* MSI support */ + bool msi_support; + bool using_msi; }; struct mp_adapter { --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/ps.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/ps.c @@ -48,7 +48,7 @@ /*<2> Enable Adapter */ if (rtlpriv->cfg->ops->hw_init(hw)) - return 1; + return false; RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); /*<3> Enable Interrupt */ --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c @@ -235,7 +235,7 @@ u8 pwr_val = 0; u8 cck_base = rtldm->swing_idx_cck_base; u8 cck_val = rtldm->swing_idx_cck; - u8 ofdm_base = rtldm->swing_idx_ofdm_base; + u8 ofdm_base = rtldm->swing_idx_ofdm_base[0]; u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A]; if (type == 0) { @@ -726,7 +726,7 @@ static u64 last_rx; long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; - if (rtlhal->oem_id == RT_CID_819x_HP) { + if (rtlhal->oem_id == RT_CID_819X_HP) { u64 cur_txok_cnt = 0; u64 cur_rxok_cnt = 0; cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok; @@ -912,7 +912,7 @@ for (i = 0; i < OFDM_TABLE_LENGTH; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { ofdm_old[0] = (u8) i; - rtldm->swing_idx_ofdm_base = (u8)i; + rtldm->swing_idx_ofdm_base[0] = (u8)i; RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n", ROFDM0_XATXIQIMBAL, @@ -1078,7 +1078,7 @@ rtldm->swing_flag_ofdm = true; } - if (rtldm->swing_idx_cck != rtldm->swing_idx_cck) { + if (rtldm->swing_idx_cck_cur != rtldm->swing_idx_cck) { rtldm->swing_idx_cck_cur = rtldm->swing_idx_cck; rtldm->swing_flag_cck = true; } --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c @@ -509,7 +509,7 @@ u8 e_aci = *((u8 *)val); rtl88e_dm_init_edca_turbo(hw); - if (rtlpci->acm_method != eAcmWay2_SW) + if (rtlpci->acm_method != EACMWAY2_SW) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, (u8 *)(&e_aci)); break; } @@ -1025,9 +1025,20 @@ bool rtstatus = true; int err = 0; u8 tmp_u1b, u1byte; + unsigned long flags; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n"); rtlpriv->rtlhal.being_init_adapter = true; + /* As this function can take a very long time (up to 350 ms) + * and can be called with irqs disabled, reenable the irqs + * to let the other devices continue being serviced. + * + * It is safe doing so since our own interrupts will only be enabled + * in a subsequent step. + */ + local_save_flags(flags); + local_irq_enable(); + rtlpriv->intf_ops->disable_aspm(hw); tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1); @@ -1043,7 +1054,7 @@ if (rtstatus != true) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); err = 1; - return err; + goto exit; } err = rtl88e_download_fw(hw, false); @@ -1051,8 +1062,7 @@ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now..\n"); err = 1; - rtlhal->fw_ready = false; - return err; + goto exit; } else { rtlhal->fw_ready = true; } @@ -1097,7 +1107,7 @@ if (ppsc->rfpwr_state == ERFON) { if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) || ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) && - (rtlhal->oem_id == RT_CID_819x_HP))) { + (rtlhal->oem_id == RT_CID_819X_HP))) { rtl88e_phy_set_rfpath_switch(hw, true); rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT; } else { @@ -1135,10 +1145,12 @@ } rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128)); rtl88e_dm_init(hw); +exit: + local_irq_restore(flags); rtlpriv->rtlhal.being_init_adapter = false; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n", err); - return 0; + return err; } static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw) @@ -1872,15 +1884,15 @@ case EEPROM_CID_DEFAULT: if (rtlefuse->eeprom_did == 0x8179) { if (rtlefuse->eeprom_svid == 0x1025) { - rtlhal->oem_id = RT_CID_819x_Acer; + rtlhal->oem_id = RT_CID_819X_ACER; } else if ((rtlefuse->eeprom_svid == 0x10EC && rtlefuse->eeprom_smid == 0x0179) || (rtlefuse->eeprom_svid == 0x17AA && rtlefuse->eeprom_smid == 0x0179)) { - rtlhal->oem_id = RT_CID_819x_Lenovo; + rtlhal->oem_id = RT_CID_819X_LENOVO; } else if (rtlefuse->eeprom_svid == 0x103c && rtlefuse->eeprom_smid == 0x197d) { - rtlhal->oem_id = RT_CID_819x_HP; + rtlhal->oem_id = RT_CID_819X_HP; } else { rtlhal->oem_id = RT_CID_DEFAULT; } @@ -1892,7 +1904,7 @@ rtlhal->oem_id = RT_CID_TOSHIBA; break; case EEPROM_CID_QMI: - rtlhal->oem_id = RT_CID_819x_QMI; + rtlhal->oem_id = RT_CID_819X_QMI; break; case EEPROM_CID_WHQL: default: @@ -1911,14 +1923,14 @@ pcipriv->ledctl.led_opendrain = true; switch (rtlhal->oem_id) { - case RT_CID_819x_HP: + case RT_CID_819X_HP: pcipriv->ledctl.led_opendrain = true; break; - case RT_CID_819x_Lenovo: + case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: case RT_CID_TOSHIBA: case RT_CID_CCX: - case RT_CID_819x_Acer: + case RT_CID_819X_ACER: case RT_CID_WHQL: default: break; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c @@ -1002,7 +1002,7 @@ } } - if (rtlhal->oem_id == RT_CID_819x_HP) + if (rtlhal->oem_id == RT_CID_819X_HP) rtl88_config_s(hw, 0x52, 0x7E4BD); break; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c @@ -93,6 +93,7 @@ u8 tid; rtl8188ee_bt_reg_init(hw); + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; @@ -266,6 +267,7 @@ .inactiveps = true, .swctrl_lps = false, .fwctrl_lps = true, + .msi_support = false, .debug = DBG_EMERG, }; @@ -382,10 +384,12 @@ module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl88ee_mod_params.msi_support, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); +MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c @@ -293,7 +293,7 @@ u8 *psaddr; __le16 fc; u16 type, ufc; - bool match_bssid, packet_toself, packet_beacon, addr; + bool match_bssid, packet_toself, packet_beacon = false, addr; tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; @@ -452,7 +452,7 @@ /* During testing, hdr was NULL */ return false; } - if ((ieee80211_is_robust_mgmt_frame(hdr)) && + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; else --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c @@ -158,6 +158,42 @@ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} }; +static u32 power_index_reg[6] = {0xc90, 0xc91, 0xc92, 0xc98, 0xc99, 0xc9a}; + +void dm_restorepowerindex(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 index; + + for (index = 0; index < 6; index++) + rtl_write_byte(rtlpriv, power_index_reg[index], + rtlpriv->dm.powerindex_backup[index]); +} +EXPORT_SYMBOL_GPL(dm_restorepowerindex); + +void dm_writepowerindex(struct ieee80211_hw *hw, u8 value) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 index; + + for (index = 0; index < 6; index++) + rtl_write_byte(rtlpriv, power_index_reg[index], value); +} +EXPORT_SYMBOL_GPL(dm_writepowerindex); + +void dm_savepowerindex(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 index; + u8 tmp; + + for (index = 0; index < 6; index++) { + tmp = rtl_read_byte(rtlpriv, power_index_reg[index]); + rtlpriv->dm.powerindex_backup[index] = tmp; + } +} +EXPORT_SYMBOL_GPL(dm_savepowerindex); + static void rtl92c_dm_diginit(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -180,7 +216,12 @@ dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX; - dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; + dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LowRssi; + + dm_digtable->forbidden_igi = DM_DIG_MIN; + dm_digtable->large_fa_hit = 0; + dm_digtable->recover_cnt = 0; + dm_digtable->dig_dynamic_min = 0x25; } static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) @@ -206,7 +247,9 @@ rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb; } - return (u8) rssi_val_min; + if (rssi_val_min > 100) + rssi_val_min = 100; + return (u8)rssi_val_min; } static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) @@ -224,9 +267,17 @@ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); + + ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD); + falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff); + falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16); + falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + - falsealm_cnt->cnt_rate_illegal + - falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail; + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_fast_fsync_fail + + falsealm_cnt->cnt_sb_search_fail; rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); @@ -271,12 +322,14 @@ value_igi++; else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2) value_igi += 2; + if (value_igi > DM_DIG_FA_UPPER) value_igi = DM_DIG_FA_UPPER; else if (value_igi < DM_DIG_FA_LOWER) value_igi = DM_DIG_FA_LOWER; + if (rtlpriv->falsealm_cnt.cnt_all > 10000) - value_igi = 0x32; + value_igi = DM_DIG_FA_UPPER; dm_digtable->cur_igvalue = value_igi; rtl92c_dm_write_dig(hw); @@ -286,32 +339,80 @@ { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *digtable = &rtlpriv->dm_digtable; + u32 isbt; - if (rtlpriv->falsealm_cnt.cnt_all > digtable->fa_highthresh) { - if ((digtable->back_val - 2) < digtable->back_range_min) - digtable->back_val = digtable->back_range_min; - else - digtable->back_val -= 2; - } else if (rtlpriv->falsealm_cnt.cnt_all < digtable->fa_lowthresh) { - if ((digtable->back_val + 2) > digtable->back_range_max) - digtable->back_val = digtable->back_range_max; - else - digtable->back_val += 2; + /* modify DIG lower bound, deal with abnorally large false alarm */ + if (rtlpriv->falsealm_cnt.cnt_all > 10000) { + digtable->large_fa_hit++; + if (digtable->forbidden_igi < digtable->cur_igvalue) { + digtable->forbidden_igi = digtable->cur_igvalue; + digtable->large_fa_hit = 1; + } + + if (digtable->large_fa_hit >= 3) { + if ((digtable->forbidden_igi + 1) > + digtable->rx_gain_max) + digtable->rx_gain_min = digtable->rx_gain_max; + else + digtable->rx_gain_min = (digtable->forbidden_igi + 1); + digtable->recover_cnt = 3600; /* 3600=2hr */ + } + } else { + /* Recovery mechanism for IGI lower bound */ + if (digtable->recover_cnt != 0) { + digtable->recover_cnt--; + } else { + if (digtable->large_fa_hit == 0) { + if ((digtable->forbidden_igi-1) < DM_DIG_MIN) { + digtable->forbidden_igi = DM_DIG_MIN; + digtable->rx_gain_min = DM_DIG_MIN; + } else { + digtable->forbidden_igi--; + digtable->rx_gain_min = digtable->forbidden_igi + 1; + } + } else if (digtable->large_fa_hit == 3) { + digtable->large_fa_hit = 0; + } + } } + if (rtlpriv->falsealm_cnt.cnt_all < 250) { + isbt = rtl_read_byte(rtlpriv, 0x4fd) & 0x01; - if ((digtable->rssi_val_min + 10 - digtable->back_val) > - digtable->rx_gain_max) + if (!isbt) { + if (rtlpriv->falsealm_cnt.cnt_all > + digtable->fa_lowthresh) { + if ((digtable->back_val - 2) < + digtable->back_range_min) + digtable->back_val = digtable->back_range_min; + else + digtable->back_val -= 2; + } else if (rtlpriv->falsealm_cnt.cnt_all < + digtable->fa_lowthresh) { + if ((digtable->back_val + 2) > + digtable->back_range_max) + digtable->back_val = digtable->back_range_max; + else + digtable->back_val += 2; + } + } else { + digtable->back_val = DM_DIG_BACKOFF_DEFAULT; + } + } else { + /* Adjust initial gain by false alarm */ + if (rtlpriv->falsealm_cnt.cnt_all > 1000) + digtable->cur_igvalue = digtable->pre_igvalue + 2; + else if (rtlpriv->falsealm_cnt.cnt_all > 750) + digtable->cur_igvalue = digtable->pre_igvalue + 1; + else if (rtlpriv->falsealm_cnt.cnt_all < 500) + digtable->cur_igvalue = digtable->pre_igvalue - 1; + } + + /* Check initial gain by upper/lower bound */ + if (digtable->cur_igvalue > digtable->rx_gain_max) digtable->cur_igvalue = digtable->rx_gain_max; - else if ((digtable->rssi_val_min + 10 - - digtable->back_val) < digtable->rx_gain_min) - digtable->cur_igvalue = digtable->rx_gain_min; - else - digtable->cur_igvalue = digtable->rssi_val_min + 10 - - digtable->back_val; - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - "rssi_val_min = %x back_val %x\n", - digtable->rssi_val_min, digtable->back_val); + if (digtable->cur_igvalue < digtable->rx_gain_min) + digtable->cur_igvalue = digtable->rx_gain_min; rtl92c_dm_write_dig(hw); } @@ -329,7 +430,7 @@ multi_sta = true; if (!multi_sta || - dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) { + dm_digtable->cursta_cstate == DIG_STA_DISCONNECT) { initialized = false; dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; return; @@ -375,7 +476,6 @@ RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "presta_cstate = %x, cursta_cstate = %x\n", dm_digtable->presta_cstate, dm_digtable->cursta_cstate); - if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate || dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT || dm_digtable->cursta_cstate == DIG_STA_CONNECT) { @@ -383,6 +483,8 @@ if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) { dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); + if (dm_digtable->rssi_val_min > 100) + dm_digtable->rssi_val_min = 100; rtl92c_dm_ctrl_initgain_by_rssi(hw); } } else { @@ -398,11 +500,12 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) { dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); + if (dm_digtable->rssi_val_min > 100) + dm_digtable->rssi_val_min = 100; if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { if (dm_digtable->rssi_val_min <= 25) @@ -424,48 +527,14 @@ } if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) { - if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { - if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) - dm_digtable->cur_cck_fa_state = - CCK_FA_STAGE_High; - else - dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low; - - if (dm_digtable->pre_cck_fa_state != - dm_digtable->cur_cck_fa_state) { - if (dm_digtable->cur_cck_fa_state == - CCK_FA_STAGE_Low) - rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, - 0x83); - else - rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, - 0xcd); - - dm_digtable->pre_cck_fa_state = - dm_digtable->cur_cck_fa_state; - } - - rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40); - - if (IS_92C_SERIAL(rtlhal->version)) - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, - MASKBYTE2, 0xd7); - } else { + if ((dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) || + (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_MAX)) + rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83); + else rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); - rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47); - if (IS_92C_SERIAL(rtlhal->version)) - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, - MASKBYTE2, 0xd3); - } dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state; } - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n", - dm_digtable->cur_cck_pd_state); - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n", - IS_92C_SERIAL(rtlhal->version)); } static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) @@ -482,6 +551,8 @@ else dm_digtable->cursta_cstate = DIG_STA_DISCONNECT; + dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT; + rtl92c_dm_initial_gain_sta(hw); rtl92c_dm_initial_gain_multi_sta(hw); rtl92c_dm_cck_packet_detection_thresh(hw); @@ -493,23 +564,26 @@ static void rtl92c_dm_dig(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct dig_t *dm_digtable = &rtlpriv->dm_digtable; if (rtlpriv->dm.dm_initialgain_enable == false) return; - if (dm_digtable->dig_enable_flag == false) + if (!rtlpriv->dm.dm_flag & DYNAMIC_FUNC_DIG) return; rtl92c_dm_ctrl_initgain_by_twoport(hw); - } static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - rtlpriv->dm.dynamic_txpower_enable = false; - + if (rtlpriv->rtlhal.interface == INTF_USB && + rtlpriv->rtlhal.board_type & 0x1) { + dm_savepowerindex(hw); + rtlpriv->dm.dynamic_txpower_enable = true; + } else { + rtlpriv->dm.dynamic_txpower_enable = false; + } rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; } @@ -524,9 +598,14 @@ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, dm_digtable->back_val); - dm_digtable->cur_igvalue += 2; - if (dm_digtable->cur_igvalue > 0x3f) - dm_digtable->cur_igvalue = 0x3f; + if (rtlpriv->rtlhal.interface == INTF_USB && + !dm_digtable->dig_enable_flag) { + dm_digtable->pre_igvalue = 0x17; + return; + } + dm_digtable->cur_igvalue -= 1; + if (dm_digtable->cur_igvalue < DM_DIG_MIN) + dm_digtable->cur_igvalue = DM_DIG_MIN; if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) { rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, @@ -536,11 +615,47 @@ dm_digtable->pre_igvalue = dm_digtable->cur_igvalue; } + RT_TRACE(rtlpriv, COMP_DIG, DBG_WARNING, + "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, + dm_digtable->rssi_val_min, dm_digtable->back_val, + dm_digtable->rx_gain_max, dm_digtable->rx_gain_min, + dm_digtable->large_fa_hit, dm_digtable->forbidden_igi); } EXPORT_SYMBOL(rtl92c_dm_write_dig); static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw) { + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff; + + if (mac->link_state != MAC80211_LINKED) + return; + + if (mac->opmode == NL80211_IFTYPE_ADHOC || + mac->opmode == NL80211_IFTYPE_AP) { + /* TODO: Handle ADHOC and AP Mode */ + } + + if (tmpentry_max_pwdb != 0) + rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb; + else + rtlpriv->dm.entry_max_undec_sm_pwdb = 0; + + if (tmpentry_min_pwdb != 0xff) + rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb; + else + rtlpriv->dm.entry_min_undec_sm_pwdb = 0; + +/* TODO: + * if (mac->opmode == NL80211_IFTYPE_STATION) { + * if (rtlpriv->rtlhal.fw_ready) { + * u32 param = (u32)(rtlpriv->dm.undec_sm_pwdb << 16); + * rtl8192c_set_rssi_cmd(hw, param); + * } + * } + */ } void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw) @@ -750,6 +865,7 @@ rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; rtlpriv->dm.cck_index = cck_index_old; } + /* Handle USB High PA boards */ delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? (thermalvalue - rtlpriv->dm.thermalvalue) : @@ -1140,22 +1256,22 @@ { struct rtl_priv *rtlpriv = rtl_priv(hw); struct ps_t *dm_pstable = &rtlpriv->dm_pstable; - static u8 initialize; - static u32 reg_874, reg_c70, reg_85c, reg_a74; - if (initialize == 0) { - reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, - MASKDWORD) & 0x1CC000) >> 14; + if (!rtlpriv->reg_init) { + rtlpriv->reg_874 = (rtl_get_bbreg(hw, + RFPGA0_XCD_RFINTERFACESW, + MASKDWORD) & 0x1CC000) >> 14; - reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, - MASKDWORD) & BIT(3)) >> 3; + rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, + MASKDWORD) & BIT(3)) >> 3; - reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, - MASKDWORD) & 0xFF000000) >> 24; + rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, + MASKDWORD) & 0xFF000000) >> 24; - reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12; + rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & + 0xF000) >> 12; - initialize = 1; + rtlpriv->reg_init = true; } if (!bforce_in_normal) { @@ -1192,12 +1308,12 @@ rtl_set_bbreg(hw, 0x818, BIT(28), 0x1); } else { rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, - 0x1CC000, reg_874); + 0x1CC000, rtlpriv->reg_874); rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), - reg_c70); + rtlpriv->reg_c70); rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, - reg_85c); - rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74); + rtlpriv->reg_85c); + rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74); rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); } @@ -1213,6 +1329,7 @@ struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + /* Determine the minimum RSSI */ if (((mac->link_state == MAC80211_NOLINK)) && (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { dm_pstable->rssi_val_min = 0; @@ -1241,6 +1358,7 @@ dm_pstable->rssi_val_min); } + /* Power Saving for 92C */ if (IS_92C_SERIAL(rtlhal->version)) ;/* rtl92c_dm_1r_cca(hw); */ else @@ -1252,12 +1370,23 @@ struct rtl_priv *rtlpriv = rtl_priv(hw); rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + rtlpriv->dm.dm_flag = DYNAMIC_FUNC_DISABLE | DYNAMIC_FUNC_DIG; + rtlpriv->dm.undec_sm_pwdb = -1; + rtlpriv->dm.undec_sm_cck = -1; + rtlpriv->dm.dm_initialgain_enable = true; rtl92c_dm_diginit(hw); + + rtlpriv->dm.dm_flag |= HAL_DM_HIPWR_DISABLE; rtl92c_dm_init_dynamic_txpower(hw); + rtl92c_dm_init_edca_turbo(hw); rtl92c_dm_init_rate_adaptive_mask(hw); + rtlpriv->dm.dm_flag |= DYNAMIC_FUNC_SS; rtl92c_dm_initialize_txpower_tracking(hw); rtl92c_dm_init_dynamic_bb_powersaving(hw); + + rtlpriv->dm.ofdm_pkt_cnt = 0; + rtlpriv->dm.dm_rssi_sel = RSSI_DEFAULT; } EXPORT_SYMBOL(rtl92c_dm_init); @@ -1308,7 +1437,7 @@ } if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { - rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL2; RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && @@ -1328,8 +1457,16 @@ "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); + if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_NORMAL) + dm_restorepowerindex(hw); + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_LEVEL1) + dm_writepowerindex(hw, 0x14); + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_LEVEL2) + dm_writepowerindex(hw, 0x10); } - rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } @@ -1400,12 +1537,6 @@ else curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW); - /* Set Tx Power according to BT status. */ - if (undec_sm_pwdb >= 30) - curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW; - else if (undec_sm_pwdb < 25) - curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW); - /* Check BT state related to BT_Idle in B/G mode. */ if (undec_sm_pwdb < 15) curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h @@ -91,6 +91,17 @@ #define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 #define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 +#define DYNAMIC_FUNC_DISABLE 0x0 +#define DYNAMIC_FUNC_DIG BIT(0) +#define DYNAMIC_FUNC_HP BIT(1) +#define DYNAMIC_FUNC_SS BIT(2) /*Tx Power Tracking*/ +#define DYNAMIC_FUNC_BT BIT(3) +#define DYNAMIC_FUNC_ANT_DIV BIT(4) + +#define RSSI_CCK 0 +#define RSSI_OFDM 1 +#define RSSI_DEFAULT 2 + struct swat_t { u8 failure_cnt; u8 try_flag; @@ -167,5 +178,8 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery); void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw); void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw); +void dm_savepowerindex(struct ieee80211_hw *hw); +void dm_writepowerindex(struct ieee80211_hw *hw, u8 value); +void dm_restorepowerindex(struct ieee80211_hw *hw); #endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -319,7 +319,7 @@ u8 e_aci = *(val); rtl92c_dm_init_edca_turbo(hw); - if (rtlpci->acm_method != eAcmWay2_SW) + if (rtlpci->acm_method != EACMWAY2_SW) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, (&e_aci)); @@ -937,14 +937,27 @@ bool is92c; int err; u8 tmp_u1b; + unsigned long flags; rtlpci->being_init_adapter = true; + + /* Since this function can take a very long time (up to 350 ms) + * and can be called with irqs disabled, reenable the irqs + * to let the other devices continue being serviced. + * + * It is safe doing so since our own interrupts will only be enabled + * in a subsequent step. + */ + local_save_flags(flags); + local_irq_enable(); + + rtlhal->fw_ready = false; rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl92ce_init_mac(hw); if (!rtstatus) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); err = 1; - return err; + goto exit; } err = rtl92c_download_fw(hw); @@ -952,9 +965,10 @@ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now..\n"); err = 1; - return err; + goto exit; } + rtlhal->fw_ready = true; rtlhal->last_hmeboxnum = 0; rtl92c_phy_mac_config(hw); /* because last function modify RCR, so we update @@ -1032,6 +1046,8 @@ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); } rtl92c_dm_init(hw); +exit: + local_irq_restore(flags); rtlpci->being_init_adapter = false; return err; } @@ -1720,7 +1736,7 @@ if (rtlefuse->eeprom_did == 0x8176) { if ((rtlefuse->eeprom_svid == 0x103C && rtlefuse->eeprom_smid == 0x1629)) - rtlhal->oem_id = RT_CID_819x_HP; + rtlhal->oem_id = RT_CID_819X_HP; else rtlhal->oem_id = RT_CID_DEFAULT; } else { @@ -1731,7 +1747,7 @@ rtlhal->oem_id = RT_CID_TOSHIBA; break; case EEPROM_CID_QMI: - rtlhal->oem_id = RT_CID_819x_QMI; + rtlhal->oem_id = RT_CID_819X_QMI; break; case EEPROM_CID_WHQL: default: @@ -1750,14 +1766,14 @@ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); switch (rtlhal->oem_id) { - case RT_CID_819x_HP: + case RT_CID_819X_HP: pcipriv->ledctl.led_opendrain = true; break; - case RT_CID_819x_Lenovo: + case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: case RT_CID_TOSHIBA: case RT_CID_CCX: - case RT_CID_819x_Acer: + case RT_CID_819X_ACER: case RT_CID_WHQL: default: break; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -393,7 +393,7 @@ /* In testing, hdr was NULL here */ return false; } - if ((ieee80211_is_robust_mgmt_frame(hdr)) && + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; else --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c @@ -101,6 +101,15 @@ "PHY_SetTxPowerLevel8192S() Channel = %d\n", rtlphy->current_channel); rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); + if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_NORMAL) + dm_restorepowerindex(hw); + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_LEVEL1) + dm_writepowerindex(hw, 0x14); + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_LEVEL2) + dm_writepowerindex(hw, 0x10); } rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h @@ -30,3 +30,6 @@ #include "../rtl8192ce/dm.h" void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw); +void dm_savepowerindex(struct ieee80211_hw *hw); +void dm_writepowerindex(struct ieee80211_hw *hw, u8 value); +void dm_restorepowerindex(struct ieee80211_hw *hw); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -394,7 +394,7 @@ if (rtlefuse->eeprom_did == 0x8176) { if ((rtlefuse->eeprom_svid == 0x103C && rtlefuse->eeprom_smid == 0x1629)) - rtlhal->oem_id = RT_CID_819x_HP; + rtlhal->oem_id = RT_CID_819X_HP; else rtlhal->oem_id = RT_CID_DEFAULT; } else { @@ -405,7 +405,7 @@ rtlhal->oem_id = RT_CID_TOSHIBA; break; case EEPROM_CID_QMI: - rtlhal->oem_id = RT_CID_819x_QMI; + rtlhal->oem_id = RT_CID_819X_QMI; break; case EEPROM_CID_WHQL: default: @@ -423,14 +423,14 @@ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); switch (rtlhal->oem_id) { - case RT_CID_819x_HP: + case RT_CID_819X_HP: usb_priv->ledctl.led_opendrain = true; break; - case RT_CID_819x_Lenovo: + case RT_CID_819X_LENOVO: case RT_CID_DEFAULT: case RT_CID_TOSHIBA: case RT_CID_CCX: - case RT_CID_819x_Acer: + case RT_CID_819X_ACER: case RT_CID_WHQL: default: break; @@ -985,19 +985,30 @@ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); int err = 0; static bool iqk_initialized; + unsigned long flags; + + /* As this function can take a very long time (up to 350 ms) + * and can be called with irqs disabled, reenable the irqs + * to let the other devices continue being serviced. + * + * It is safe doing so since our own interrupts will only be enabled + * in a subsequent step. + */ + local_save_flags(flags); + local_irq_enable(); rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU; err = _rtl92cu_init_mac(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n"); - return err; + goto exit; } err = rtl92c_download_fw(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now..\n"); err = 1; - return err; + goto exit; } rtlhal->last_hmeboxnum = 0; /* h2c */ _rtl92cu_phy_param_tab_init(hw); @@ -1034,6 +1045,8 @@ _InitPABias(hw); _update_mac_setting(hw); rtl92c_dm_init(hw); +exit: + local_irq_restore(flags); return err; } @@ -1795,7 +1808,7 @@ e_aci); break; } - if (rtlusb->acm_method != eAcmWay2_SW) + if (rtlusb->acm_method != EACMWAY2_SW) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, &e_aci); break; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c @@ -85,17 +85,15 @@ if (mac->act_scanning) { tx_agc[RF90_PATH_A] = 0x3f3f3f3f; tx_agc[RF90_PATH_B] = 0x3f3f3f3f; - if (turbo_scanoff) { - for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { - tx_agc[idx1] = ppowerlevel[idx1] | - (ppowerlevel[idx1] << 8) | - (ppowerlevel[idx1] << 16) | - (ppowerlevel[idx1] << 24); - if (rtlhal->interface == INTF_USB) { - if (tx_agc[idx1] > 0x20 && - rtlefuse->external_pa) - tx_agc[idx1] = 0x20; - } + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + if (rtlhal->interface == INTF_USB) { + if (tx_agc[idx1] > 0x20 && + rtlefuse->external_pa) + tx_agc[idx1] = 0x20; } } } else { @@ -107,7 +105,7 @@ TXHIGHPWRLEVEL_LEVEL2) { tx_agc[RF90_PATH_A] = 0x00000000; tx_agc[RF90_PATH_B] = 0x00000000; - } else{ + } else { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | @@ -373,7 +371,12 @@ regoffset == RTXAGC_B_MCS07_MCS04) regoffset = 0xc98; for (i = 0; i < 3; i++) { - writeVal = (writeVal > 6) ? (writeVal - 6) : 0; + if (i != 2) + writeVal = (writeVal > 8) ? + (writeVal - 8) : 0; + else + writeVal = (writeVal > 6) ? + (writeVal - 6) : 0; rtl_write_byte(rtlpriv, (u32)(regoffset + i), (u8)writeVal); } --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -50,6 +50,9 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless"); MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin"); static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) { @@ -69,14 +72,21 @@ "Can't alloc buffer for fw\n"); return 1; } - + if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) && + !IS_92C_SERIAL(rtlpriv->rtlhal.version)) { + rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin"; + } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) { + rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin"; + } else { + rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; + } + /* provide name of alternative file */ + rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin"; pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name); rtlpriv->max_fw_size = 0x4000; err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); - - return err; } @@ -307,6 +317,8 @@ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */ + {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ /* HP - Lite-On ,8188CUS Slim Combo */ --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192de/hw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192de/hw.c @@ -318,7 +318,7 @@ case HW_VAR_AC_PARAM: { u8 e_aci = *val; rtl92d_dm_init_edca_turbo(hw); - if (rtlpci->acm_method != eAcmWay2_SW) + if (rtlpci->acm_method != EACMWAY2_SW) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, &e_aci); break; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192se/hw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192se/hw.c @@ -251,7 +251,7 @@ u8 e_aci = *val; rtl92s_dm_init_edca_turbo(hw); - if (rtlpci->acm_method != eAcmWay2_SW) + if (rtlpci->acm_method != EACMWAY2_SW) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, &e_aci); @@ -955,7 +955,7 @@ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 tmp_byte = 0; - + unsigned long flags; bool rtstatus = true; u8 tmp_u1b; int err = false; @@ -967,6 +967,16 @@ rtlpci->being_init_adapter = true; + /* As this function can take a very long time (up to 350 ms) + * and can be called with irqs disabled, reenable the irqs + * to let the other devices continue being serviced. + * + * It is safe doing so since our own interrupts will only be enabled + * in a subsequent step. + */ + local_save_flags(flags); + local_irq_enable(); + rtlpriv->intf_ops->disable_aspm(hw); /* 1. MAC Initialize */ @@ -984,7 +994,8 @@ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now... " "Please copy FW into /lib/firmware/rtlwifi\n"); - return 1; + err = 1; + goto exit; } /* After FW download, we have to reset MAC register */ @@ -997,7 +1008,8 @@ /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */ if (!rtl92s_phy_mac_config(hw)) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n"); - return rtstatus; + err = rtstatus; + goto exit; } /* because last function modify RCR, so we update @@ -1016,7 +1028,8 @@ /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */ if (!rtl92s_phy_bb_config(hw)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n"); - return rtstatus; + err = rtstatus; + goto exit; } /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */ @@ -1033,7 +1046,8 @@ if (!rtl92s_phy_rf_config(hw)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n"); - return rtstatus; + err = rtstatus; + goto exit; } /* After read predefined TXT, we must set BB/MAC/RF @@ -1122,8 +1136,9 @@ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON); rtl92s_dm_init(hw); +exit: + local_irq_restore(flags); rtlpci->being_init_adapter = false; - return err; } --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -49,6 +49,12 @@ if (ieee80211_is_nullfunc(fc)) return QSLT_HIGH; + /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use + * queue V0 at priority 7; however, the RTL8192SE appears to have + * that queue at priority 6 + */ + if (skb->priority == 7) + return QSLT_VO; return skb->priority; } @@ -310,7 +316,7 @@ /* during testing, hdr was NULL here */ return false; } - if ((ieee80211_is_robust_mgmt_frame(hdr)) && + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; else --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/def.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/def.h @@ -46,11 +46,6 @@ #define E_CUT_VERSION BIT(14) #define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28)) -enum version_8723e { - VERSION_TEST_UMC_CHIP_8723 = 0x0081, - VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089, - VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089, -}; /* MASK */ #define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c @@ -35,6 +35,7 @@ #include "def.h" #include "phy.h" #include "dm.h" +#include "../rtl8723com/dm_common.h" #include "fw.h" #include "hal_btc.h" @@ -483,16 +484,6 @@ rtl8723ae_dm_ctrl_initgain_by_twoport(hw); } -static void rtl8723ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->dm.dynamic_txpower_enable = false; - - rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; - rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; -} - static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -585,19 +576,6 @@ } } -static void rtl8723ae_dm_pwdmonitor(struct ieee80211_hw *hw) -{ -} - -void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->dm.current_turbo_edca = false; - rtlpriv->dm.is_any_nonbepkts = false; - rtlpriv->dm.is_cur_rdlstate = false; -} - static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -778,17 +756,6 @@ } } -static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->dm_pstable.pre_ccastate = CCA_MAX; - rtlpriv->dm_pstable.cur_ccasate = CCA_MAX; - rtlpriv->dm_pstable.pre_rfstate = RF_MAX; - rtlpriv->dm_pstable.cur_rfstate = RF_MAX; - rtlpriv->dm_pstable.rssi_val_min = 0; -} - void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -905,11 +872,11 @@ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; rtl8723ae_dm_diginit(hw); - rtl8723ae_dm_init_dynamic_txpower(hw); - rtl8723ae_dm_init_edca_turbo(hw); + rtl8723_dm_init_dynamic_txpower(hw); + rtl8723_dm_init_edca_turbo(hw); rtl8723ae_dm_init_rate_adaptive_mask(hw); rtl8723ae_dm_initialize_txpower_tracking(hw); - rtl8723ae_dm_init_dynamic_bpowersaving(hw); + rtl8723_dm_init_dynamic_bb_powersaving(hw); } void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw) @@ -930,7 +897,6 @@ if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { - rtl8723ae_dm_pwdmonitor(hw); rtl8723ae_dm_dig(hw); rtl8723ae_dm_false_alarm_counter_statistics(hw); rtl8723ae_dm_dynamic_bpowersaving(hw); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h @@ -147,7 +147,6 @@ void rtl8723ae_dm_init(struct ieee80211_hw *hw); void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw); void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw); -void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw); void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c @@ -34,199 +34,7 @@ #include "reg.h" #include "def.h" #include "fw.h" - -static void _rtl8723ae_enable_fw_download(struct ieee80211_hw *hw, bool enable) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 tmp; - if (enable) { - tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); - - tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); - rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01); - - tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); - rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); - } else { - tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); - rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); - - rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00); - } -} - -static void _rtl8723ae_fw_block_write(struct ieee80211_hw *hw, - const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 blockSize = sizeof(u32); - u8 *bufferPtr = (u8 *) buffer; - u32 *pu4BytePtr = (u32 *) buffer; - u32 i, offset, blockCount, remainSize; - - blockCount = size / blockSize; - remainSize = size % blockSize; - - for (i = 0; i < blockCount; i++) { - offset = i * blockSize; - rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), - *(pu4BytePtr + i)); - } - - if (remainSize) { - offset = blockCount * blockSize; - bufferPtr += offset; - for (i = 0; i < remainSize; i++) { - rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS + - offset + i), *(bufferPtr + i)); - } - } -} - -static void _rtl8723ae_fw_page_write(struct ieee80211_hw *hw, - u32 page, const u8 *buffer, u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value8; - u8 u8page = (u8) (page & 0x07); - - value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; - - rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); - _rtl8723ae_fw_block_write(hw, buffer, size); -} - -static void _rtl8723ae_write_fw(struct ieee80211_hw *hw, - enum version_8723e version, u8 *buffer, - u32 size) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 *bufferPtr = (u8 *) buffer; - u32 page_nums, remain_size; - u32 page, offset; - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); - - page_nums = size / FW_8192C_PAGE_SIZE; - remain_size = size % FW_8192C_PAGE_SIZE; - - if (page_nums > 6) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Page numbers should not be greater then 6\n"); - } - - for (page = 0; page < page_nums; page++) { - offset = page * FW_8192C_PAGE_SIZE; - _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset), - FW_8192C_PAGE_SIZE); - } - - if (remain_size) { - offset = page_nums * FW_8192C_PAGE_SIZE; - page = page_nums; - _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset), - remain_size); - } - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n"); -} - -static int _rtl8723ae_fw_free_to_go(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - int err = -EIO; - u32 counter = 0; - u32 value32; - - do { - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) && - (!(value32 & FWDL_ChkSum_rpt))); - - if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "chksum report faill ! REG_MCUFWDL:0x%08x .\n", - value32); - goto exit; - } - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); - - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - value32 |= MCUFWDL_RDY; - value32 &= ~WINTINI_RDY; - rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); - - counter = 0; - - do { - value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); - if (value32 & WINTINI_RDY) { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", - value32); - err = 0; - goto exit; - } - - mdelay(FW_8192C_POLLING_DELAY); - - } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); - - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32); - -exit: - return err; -} - -int rtl8723ae_download_fw(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl8723ae_firmware_header *pfwheader; - u8 *pfwdata; - u32 fwsize; - int err; - enum version_8723e version = rtlhal->version; - - if (!rtlhal->pfirmware) - return 1; - - pfwheader = (struct rtl8723ae_firmware_header *)rtlhal->pfirmware; - pfwdata = (u8 *) rtlhal->pfirmware; - fwsize = rtlhal->fwsize; - - if (IS_FW_HEADER_EXIST(pfwheader)) { - RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, - "Firmware Version(%d), Signature(%#x),Size(%d)\n", - pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl8723ae_firmware_header)); - - pfwdata = pfwdata + sizeof(struct rtl8723ae_firmware_header); - fwsize = fwsize - sizeof(struct rtl8723ae_firmware_header); - } - - if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) { - rtl8723ae_firmware_selfreset(hw); - rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); - } - _rtl8723ae_enable_fw_download(hw, true); - _rtl8723ae_write_fw(hw, version, pfwdata, fwsize); - _rtl8723ae_enable_fw_download(hw, false); - - err = _rtl8723ae_fw_free_to_go(hw); - if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "Firmware is not ready to run!\n"); - } else { - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "Firmware is ready to run!\n"); - } - return 0; -} +#include "../rtl8723com/fw_common.h" static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) { @@ -463,50 +271,6 @@ return; } -void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw) -{ - u8 u1tmp; - u8 delay = 100; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); - u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - - while (u1tmp & BIT(2)) { - delay--; - if (delay == 0) - break; - udelay(50); - u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - } - if (delay == 0) { - u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2))); - } -} - -void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 u1_h2c_set_pwrmode[3] = { 0 }; - struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - - RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); - - SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); - SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, - (rtlpriv->mac80211.p2p) ? - ppsc->smart_ps : 1); - SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, - ppsc->reg_max_lps_awakeintvl); - - RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n", - u1_h2c_set_pwrmode, 3); - rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); - -} - static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) { @@ -812,7 +576,6 @@ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4)); p2p_ps_offload->offload_en = 1; - if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { p2p_ps_offload->role = 1; p2p_ps_offload->allstasleep = 0; @@ -836,3 +599,24 @@ } rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); } + +void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 u1_h2c_set_pwrmode[3] = { 0 }; + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); + + SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode); + SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(u1_h2c_set_pwrmode, + (rtlpriv->mac80211.p2p) ? + ppsc->smart_ps : 1); + SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode, + ppsc->reg_max_lps_awakeintvl); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n", + u1_h2c_set_pwrmode, 3); + rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h @@ -34,7 +34,7 @@ #define FW_8192C_END_ADDRESS 0x3FFF #define FW_8192C_PAGE_SIZE 4096 #define FW_8192C_POLLING_DELAY 5 -#define FW_8192C_POLLING_TIMEOUT_COUNT 1000 +#define FW_8192C_POLLING_TIMEOUT_COUNT 6000 #define BEACON_PG 0 #define PSPOLL_PG 2 @@ -65,21 +65,9 @@ u32 rsvd5; }; -enum rtl8192c_h2c_cmd { - H2C_AP_OFFLOAD = 0, - H2C_SETPWRMODE = 1, - H2C_JOINBSSRPT = 2, - H2C_RSVDPAGE = 3, - H2C_RSSI_REPORT = 4, - H2C_P2P_PS_CTW_CMD = 5, - H2C_P2P_PS_OFFLOAD = 6, - H2C_RA_MASK = 7, - MAX_H2CCMD -}; - #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) -#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \ +#define SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) #define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) @@ -92,10 +80,8 @@ #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) -int rtl8723ae_download_fw(struct ieee80211_hw *hw); void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); -void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw); void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c @@ -31,6 +31,8 @@ #include "../pci.h" #include "dm.h" #include "fw.h" +#include "../rtl8723com/fw_common.h" +#include "../rtl8723com/fw_common.h" #include "phy.h" #include "reg.h" #include "hal_btc.h" --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c @@ -30,7 +30,9 @@ #include "hal_btc.h" #include "../pci.h" #include "phy.h" +#include "../rtl8723com/phy_common.h" #include "fw.h" +#include "../rtl8723com/fw_common.h" #include "reg.h" #include "def.h" @@ -391,13 +393,13 @@ if (sw_dac_swing_on) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl); - rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, - sw_dac_swing_lvl); + rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, + sw_dac_swing_lvl); rtlpcipriv->bt_coexist.sw_coexist_all_off = false; } else { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "[BTCoex], SwDacSwing Off!\n"); - rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0); + rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0); } } --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c @@ -38,7 +38,9 @@ #include "def.h" #include "phy.h" #include "dm.h" +#include "../rtl8723com/dm_common.h" #include "fw.h" +#include "../rtl8723com/fw_common.h" #include "led.h" #include "hw.h" #include "pwrseqcmd.h" @@ -304,9 +306,9 @@ break; } case HW_VAR_AC_PARAM:{ u8 e_aci = *((u8 *) val); - rtl8723ae_dm_init_edca_turbo(hw); + rtl8723_dm_init_edca_turbo(hw); - if (rtlpci->acm_method != eAcmWay2_SW) + if (rtlpci->acm_method != EACMWAY2_SW) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, (u8 *) (&e_aci)); @@ -880,23 +882,33 @@ bool rtstatus = true; int err; u8 tmp_u1b; + unsigned long flags; rtlpriv->rtlhal.being_init_adapter = true; + /* As this function can take a very long time (up to 350 ms) + * and can be called with irqs disabled, reenable the irqs + * to let the other devices continue being serviced. + * + * It is safe doing so since our own interrupts will only be enabled + * in a subsequent step. + */ + local_save_flags(flags); + local_irq_enable(); + rtlpriv->intf_ops->disable_aspm(hw); rtstatus = _rtl8712e_init_mac(hw); if (rtstatus != true) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); err = 1; - return err; + goto exit; } - err = rtl8723ae_download_fw(hw); + err = rtl8723_download_fw(hw, false); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Failed to download FW. Init HW without FW now..\n"); err = 1; - rtlhal->fw_ready = false; - return err; + goto exit; } else { rtlhal->fw_ready = true; } @@ -971,6 +983,8 @@ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); } rtl8723ae_dm_init(hw); +exit: + local_irq_restore(flags); rtlpriv->rtlhal.being_init_adapter = false; return err; } @@ -1153,7 +1167,7 @@ { struct rtl_priv *rtlpriv = rtl_priv(hw); - rtl8723ae_dm_init_edca_turbo(hw); + rtl8723_dm_init_edca_turbo(hw); switch (aci) { case AC1_BK: rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); @@ -1655,7 +1669,7 @@ CHK_SVID_SMID(0x10EC, 0x9185)) rtlhal->oem_id = RT_CID_TOSHIBA; else if (rtlefuse->eeprom_svid == 0x1025) - rtlhal->oem_id = RT_CID_819x_Acer; + rtlhal->oem_id = RT_CID_819X_ACER; else if (CHK_SVID_SMID(0x10EC, 0x6191) || CHK_SVID_SMID(0x10EC, 0x6192) || CHK_SVID_SMID(0x10EC, 0x6193) || @@ -1665,7 +1679,7 @@ CHK_SVID_SMID(0x10EC, 0x8191) || CHK_SVID_SMID(0x10EC, 0x8192) || CHK_SVID_SMID(0x10EC, 0x8193)) - rtlhal->oem_id = RT_CID_819x_SAMSUNG; + rtlhal->oem_id = RT_CID_819X_SAMSUNG; else if (CHK_SVID_SMID(0x10EC, 0x8195) || CHK_SVID_SMID(0x10EC, 0x9195) || CHK_SVID_SMID(0x10EC, 0x7194) || @@ -1673,24 +1687,24 @@ CHK_SVID_SMID(0x10EC, 0x8201) || CHK_SVID_SMID(0x10EC, 0x8202) || CHK_SVID_SMID(0x10EC, 0x9200)) - rtlhal->oem_id = RT_CID_819x_Lenovo; + rtlhal->oem_id = RT_CID_819X_LENOVO; else if (CHK_SVID_SMID(0x10EC, 0x8197) || CHK_SVID_SMID(0x10EC, 0x9196)) - rtlhal->oem_id = RT_CID_819x_CLEVO; + rtlhal->oem_id = RT_CID_819X_CLEVO; else if (CHK_SVID_SMID(0x1028, 0x8194) || CHK_SVID_SMID(0x1028, 0x8198) || CHK_SVID_SMID(0x1028, 0x9197) || CHK_SVID_SMID(0x1028, 0x9198)) - rtlhal->oem_id = RT_CID_819x_DELL; + rtlhal->oem_id = RT_CID_819X_DELL; else if (CHK_SVID_SMID(0x103C, 0x1629)) - rtlhal->oem_id = RT_CID_819x_HP; + rtlhal->oem_id = RT_CID_819X_HP; else if (CHK_SVID_SMID(0x1A32, 0x2315)) - rtlhal->oem_id = RT_CID_819x_QMI; + rtlhal->oem_id = RT_CID_819X_QMI; else if (CHK_SVID_SMID(0x10EC, 0x8203)) - rtlhal->oem_id = RT_CID_819x_PRONETS; + rtlhal->oem_id = RT_CID_819X_PRONETS; else if (CHK_SVID_SMID(0x1043, 0x84B5)) rtlhal->oem_id = - RT_CID_819x_Edimax_ASUS; + RT_CID_819X_EDIMAX_ASUS; else rtlhal->oem_id = RT_CID_DEFAULT; } else if (rtlefuse->eeprom_did == 0x8178) { @@ -1712,12 +1726,12 @@ CHK_SVID_SMID(0x10EC, 0x9185)) rtlhal->oem_id = RT_CID_TOSHIBA; else if (rtlefuse->eeprom_svid == 0x1025) - rtlhal->oem_id = RT_CID_819x_Acer; + rtlhal->oem_id = RT_CID_819X_ACER; else if (CHK_SVID_SMID(0x10EC, 0x8186)) - rtlhal->oem_id = RT_CID_819x_PRONETS; + rtlhal->oem_id = RT_CID_819X_PRONETS; else if (CHK_SVID_SMID(0x1043, 0x8486)) rtlhal->oem_id = - RT_CID_819x_Edimax_ASUS; + RT_CID_819X_EDIMAX_ASUS; else rtlhal->oem_id = RT_CID_DEFAULT; } else { @@ -1731,7 +1745,7 @@ rtlhal->oem_id = RT_CID_CCX; break; case EEPROM_CID_QMI: - rtlhal->oem_id = RT_CID_819x_QMI; + rtlhal->oem_id = RT_CID_819X_QMI; break; case EEPROM_CID_WHQL: break; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c @@ -36,6 +36,7 @@ #include "rf.h" #include "dm.h" #include "table.h" +#include "../rtl8723com/phy_common.h" /* static forward definitions */ static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw, @@ -43,72 +44,17 @@ static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data); -static u32 _phy_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset); -static void _phy_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset, u32 data); -static u32 _phy_calculate_bit_shift(u32 bitmask); static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw); static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw); static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype); static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype); -static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw); -static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, - u32 cmdtableidx, u32 cmdtablesz, - enum swchnlcmd_id cmdid, - u32 para1, u32 para2, - u32 msdelay); static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, u8 *stage, u8 *step, u32 *delay); static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, enum wireless_mode wirelessmode, long power_indbm); -static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, u8 txpwridx); static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw); -u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, - u32 bitmask) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 returnvalue, originalvalue, bitshift; - - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask); - originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _phy_calculate_bit_shift(bitmask); - returnvalue = (originalvalue & bitmask) >> bitshift; - - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, regaddr, - originalvalue); - - return returnvalue; -} - -void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw, - u32 regaddr, u32 bitmask, u32 data) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 originalvalue, bitshift; - - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, - bitmask, data); - - if (bitmask != MASKDWORD) { - originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _phy_calculate_bit_shift(bitmask); - data = ((originalvalue & (~bitmask)) | (data << bitshift)); - } - - rtl_write_dword(rtlpriv, regaddr, data); - - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, - "regaddr(%#x), bitmask(%#x), data(%#x)\n", - regaddr, bitmask, data); -} - u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask) { @@ -124,11 +70,11 @@ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); if (rtlphy->rf_mode != RF_OP_BY_FW) - original_value = _phy_rf_serial_read(hw, rfpath, regaddr); + original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr); else original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr); - bitshift = _phy_calculate_bit_shift(bitmask); + bitshift = rtl8723_phy_calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); @@ -157,19 +103,19 @@ if (rtlphy->rf_mode != RF_OP_BY_FW) { if (bitmask != RFREG_OFFSET_MASK) { - original_value = _phy_rf_serial_read(hw, rfpath, - regaddr); - bitshift = _phy_calculate_bit_shift(bitmask); + original_value = rtl8723_phy_rf_serial_read(hw, rfpath, + regaddr); + bitshift = rtl8723_phy_calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); } - _phy_rf_serial_write(hw, rfpath, regaddr, data); + rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data); } else { if (bitmask != RFREG_OFFSET_MASK) { original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr); - bitshift = _phy_calculate_bit_shift(bitmask); + bitshift = rtl8723_phy_calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); } @@ -197,87 +143,6 @@ RT_ASSERT(false, "deprecated!\n"); } -static u32 _phy_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; - u32 newoffset; - u32 tmplong, tmplong2; - u8 rfpi_enable = 0; - u32 retvalue; - - offset &= 0x3f; - newoffset = offset; - if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); - return 0xFFFFFFFF; - } - tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); - if (rfpath == RF90_PATH_A) - tmplong2 = tmplong; - else - tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); - tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | - (newoffset << 23) | BLSSIREADEDGE; - rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, - tmplong & (~BLSSIREADEDGE)); - mdelay(1); - rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); - mdelay(1); - rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, - tmplong | BLSSIREADEDGE); - mdelay(1); - if (rfpath == RF90_PATH_A) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, - BIT(8)); - else if (rfpath == RF90_PATH_B) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, - BIT(8)); - if (rfpi_enable) - retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi, - BLSSIREADBACKDATA); - else - retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb, - BLSSIREADBACKDATA); - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n", - rfpath, pphyreg->rf_rb, retvalue); - return retvalue; -} - -static void _phy_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset, u32 data) -{ - u32 data_and_addr; - u32 newoffset; - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; - - if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); - return; - } - offset &= 0x3f; - newoffset = offset; - data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; - rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n", - rfpath, pphyreg->rf3wire_offset, data_and_addr); -} - -static u32 _phy_calculate_bit_shift(u32 bitmask) -{ - u32 i; - - for (i = 0; i <= 31; i++) { - if (((bitmask >> i) & 0x1) == 1) - break; - } - return i; -} - static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw) { rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2); @@ -307,7 +172,7 @@ u8 tmpu1b; u8 reg_hwparafile = 1; - _phy_init_bb_rf_reg_def(hw); + rtl8723_phy_init_bb_rf_reg_def(hw); /* 1. 0x28[1] = 1 */ tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_PLL_CTRL); @@ -690,92 +555,6 @@ ROFDM0_RXDETECTOR3, rtlphy->framesync); } -static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; - rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; - rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW; - rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW; - - rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB; - rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB; - rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB; - rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB; - - rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; - rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; - - rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; - rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; - - rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = - RFPGA0_XA_LSSIPARAMETER; - rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = - RFPGA0_XB_LSSIPARAMETER; - - rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER; - - rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE; - - rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1; - rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1; - - rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; - rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; - - rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; - - rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1; - - rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; - - rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE; - rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE; - - rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; - rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE; - rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; - rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; - - rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE; - - rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; - rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; - rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE; - rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; - - rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK; - - rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK; - rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK; -} - void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -785,17 +564,17 @@ long txpwr_dbm; txpwr_level = rtlphy->cur_cck_txpwridx; - txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level); + txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level); txpwr_level = rtlphy->cur_ofdm24g_txpwridx + rtlefuse->legacy_ht_txpowerdiff; - if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm) - txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, + if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm) + txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level); txpwr_level = rtlphy->cur_ofdm24g_txpwridx; - if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) > + if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) > txpwr_dbm) - txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, - txpwr_level); + txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level); *powerlevel = txpwr_dbm; } @@ -912,28 +691,6 @@ return txpwridx; } -static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, u8 txpwridx) -{ - long offset; - long pwrout_dbm; - - switch (wirelessmode) { - case WIRELESS_MODE_B: - offset = -7; - break; - case WIRELESS_MODE_G: - case WIRELESS_MODE_N_24G: - offset = -8; - break; - default: - offset = -8; - break; - } - pwrout_dbm = txpwridx / 2 + offset; - return pwrout_dbm; -} - void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1117,26 +874,26 @@ u8 num_total_rfpath = rtlphy->num_total_rfpath; precommoncmdcnt = 0; - _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, - MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, - 0, 0, 0); - _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, - MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); + rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, + 0, 0, 0); + rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); postcommoncmdcnt = 0; - _phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, - MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); + rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, + MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); rfdependcmdcnt = 0; RT_ASSERT((channel >= 1 && channel <= 14), "illegal channel for Zebra: %d\n", channel); - _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, - MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, + rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, RF_CHNLBW, channel, 10); - _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, - MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0); + rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0); do { switch (*stage) { @@ -1204,29 +961,6 @@ return false; } -static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, - u32 cmdtableidx, u32 cmdtablesz, - enum swchnlcmd_id cmdid, u32 para1, - u32 para2, u32 msdelay) -{ - struct swchnlcmd *pcmd; - - if (cmdtable == NULL) { - RT_ASSERT(false, "cmdtable cannot be NULL.\n"); - return false; - } - - if (cmdtableidx >= cmdtablesz) - return false; - - pcmd = cmdtable + cmdtableidx; - pcmd->cmdid = cmdid; - pcmd->para1 = para1; - pcmd->para2 = para2; - pcmd->msdelay = msdelay; - return true; -} - static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) { u32 reg_eac, reg_e94, reg_e9c, reg_ea4; @@ -1297,136 +1031,6 @@ return result; } -static void phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, bool iqk_ok, - long result[][8], u8 final_candidate, - bool btxonly) -{ - u32 oldval_0, x, tx0_a, reg; - long y, tx0_c; - - if (final_candidate == 0xFF) { - return; - } else if (iqk_ok) { - oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, - MASKDWORD) >> 22) & 0x3FF; - x = result[final_candidate][0]; - if ((x & 0x00000200) != 0) - x = x | 0xFFFFFC00; - tx0_a = (x * oldval_0) >> 8; - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31), - ((x * oldval_0 >> 7) & 0x1)); - y = result[final_candidate][1]; - if ((y & 0x00000200) != 0) - y = y | 0xFFFFFC00; - tx0_c = (y * oldval_0) >> 8; - rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, - ((tx0_c & 0x3C0) >> 6)); - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, - (tx0_c & 0x3F)); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29), - ((y * oldval_0 >> 7) & 0x1)); - if (btxonly) - return; - reg = result[final_candidate][2]; - rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg); - reg = result[final_candidate][3] & 0x3F; - rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg); - reg = (result[final_candidate][3] >> 6) & 0xF; - rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg); - } -} - -static void phy_save_adda_regs(struct ieee80211_hw *hw, - u32 *addareg, u32 *addabackup, - u32 registernum) -{ - u32 i; - - for (i = 0; i < registernum; i++) - addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD); -} - -static void phy_save_mac_regs(struct ieee80211_hw *hw, u32 *macreg, - u32 *macbackup) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - - for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) - macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]); - macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]); -} - -static void phy_reload_adda_regs(struct ieee80211_hw *hw, u32 *addareg, - u32 *addabackup, u32 regiesternum) -{ - u32 i; - - for (i = 0; i < regiesternum; i++) - rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]); -} - -static void phy_reload_mac_regs(struct ieee80211_hw *hw, u32 *macreg, - u32 *macbackup) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - - for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) - rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]); - rtl_write_dword(rtlpriv, macreg[i], macbackup[i]); -} - -static void _rtl8723ae_phy_path_adda_on(struct ieee80211_hw *hw, - u32 *addareg, bool is_patha_on, - bool is2t) -{ - u32 pathOn; - u32 i; - - pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; - if (false == is2t) { - pathOn = 0x0bdb25a0; - rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); - } else { - rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn); - } - - for (i = 1; i < IQK_ADDA_REG_NUM; i++) - rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn); -} - -static void _rtl8723ae_phy_mac_setting_calibration(struct ieee80211_hw *hw, - u32 *macreg, u32 *macbackup) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i = 0; - - rtl_write_byte(rtlpriv, macreg[i], 0x3F); - - for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) - rtl_write_byte(rtlpriv, macreg[i], - (u8) (macbackup[i] & (~BIT(3)))); - rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5)))); -} - -static void _rtl8723ae_phy_path_a_standby(struct ieee80211_hw *hw) -{ - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0); - rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); -} - -static void _rtl8723ae_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode) -{ - u32 mode; - - mode = pi_mode ? 0x01000100 : 0x01000000; - rtl_set_bbreg(hw, 0x820, MASKDWORD, mode); - rtl_set_bbreg(hw, 0x828, MASKDWORD, mode); -} - static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8], u8 c1, u8 c2) { @@ -1498,10 +1102,12 @@ const u32 retrycount = 2; if (t == 0) { - phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16); - phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); + rtl8723_save_adda_registers(hw, adda_reg, rtlphy->adda_backup, + 16); + rtl8723_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); } - _rtl8723ae_phy_path_adda_on(hw, adda_reg, true, is2t); + rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t); if (t == 0) { rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, @@ -1509,7 +1115,7 @@ } if (!rtlphy->rfpi_enable) - _rtl8723ae_phy_pi_mode_switch(hw, true); + rtl8723_phy_pi_mode_switch(hw, true); if (t == 0) { rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD); rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD); @@ -1522,7 +1128,7 @@ rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000); } - _rtl8723ae_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000); if (is2t) @@ -1552,8 +1158,8 @@ } if (is2t) { - _rtl8723ae_phy_path_a_standby(hw); - _rtl8723ae_phy_path_adda_on(hw, adda_reg, false, is2t); + rtl8723_phy_path_a_standby(hw); + rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t); for (i = 0; i < retrycount; i++) { pathb_ok = _rtl8723ae_phy_path_b_iqk(hw); if (pathb_ok == 0x03) { @@ -1588,9 +1194,11 @@ rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3); if (t != 0) { if (!rtlphy->rfpi_enable) - _rtl8723ae_phy_pi_mode_switch(hw, false); - phy_reload_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16); - phy_reload_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup); + rtl8723_phy_pi_mode_switch(hw, false); + rtl8723_phy_reload_adda_registers(hw, adda_reg, + rtlphy->adda_backup, 16); + rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); } } @@ -1691,7 +1299,8 @@ }; if (recovery) { - phy_reload_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10); + rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, 10); return; } if (start_conttx || singletone) @@ -1756,9 +1365,10 @@ rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0; } if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ - phy_path_a_fill_iqk_matrix(hw, patha_ok, result, - final_candidate, (reg_ea4 == 0)); - phy_save_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10); + rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result, + final_candidate, + (reg_ea4 == 0)); + rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10); } void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw) --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h @@ -76,23 +76,6 @@ #define RTL92C_MAX_PATH_NUM 2 -enum swchnlcmd_id { - CMDID_END, - CMDID_SET_TXPOWEROWER_LEVEL, - CMDID_BBREGWRITE10, - CMDID_WRITEPORT_ULONG, - CMDID_WRITEPORT_USHORT, - CMDID_WRITEPORT_UCHAR, - CMDID_RF_WRITEREG, -}; - -struct swchnlcmd { - enum swchnlcmd_id cmdid; - u32 para1; - u32 para2; - u32 msdelay; -}; - enum hw90_block_e { HW90_BLOCK_MAC = 0, HW90_BLOCK_PHY0 = 1, @@ -183,10 +166,6 @@ u32 mcs_original_offset[4][16]; }; -u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, - u32 regaddr, u32 bitmask); -void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw, - u32 regaddr, u32 bitmask, u32 data); u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c @@ -37,8 +37,11 @@ #include "reg.h" #include "def.h" #include "phy.h" +#include "../rtl8723com/phy_common.h" #include "dm.h" #include "hw.h" +#include "fw.h" +#include "../rtl8723com/fw_common.h" #include "sw.h" #include "trx.h" #include "led.h" @@ -193,6 +196,11 @@ } } +static bool is_fw_header(struct rtl92c_firmware_header *hdr) +{ + return (hdr->signature & 0xfff0) == 0x2300; +} + static struct rtl_hal_ops rtl8723ae_hal_ops = { .init_sw_vars = rtl8723ae_init_sw_vars, .deinit_sw_vars = rtl8723ae_deinit_sw_vars, @@ -231,13 +239,14 @@ .set_key = rtl8723ae_set_key, .init_sw_leds = rtl8723ae_init_sw_leds, .allow_all_destaddr = rtl8723ae_allow_all_destaddr, - .get_bbreg = rtl8723ae_phy_query_bb_reg, - .set_bbreg = rtl8723ae_phy_set_bb_reg, + .get_bbreg = rtl8723_phy_query_bb_reg, + .set_bbreg = rtl8723_phy_set_bb_reg, .get_rfreg = rtl8723ae_phy_query_rf_reg, .set_rfreg = rtl8723ae_phy_set_rf_reg, .c2h_command_handle = rtl_8723e_c2h_command_handle, .bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify, .bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps, + .is_fw_header = is_fw_header, }; static struct rtl_mod_params rtl8723ae_mod_params = { --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c @@ -334,7 +334,7 @@ /* during testing, hdr could be NULL here */ return false; } - if ((ieee80211_is_robust_mgmt_frame(hdr)) && + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && (ieee80211_has_protected(hdr->frame_control))) rx_status->flag &= ~RX_FLAG_DECRYPTED; else --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h @@ -521,12 +521,6 @@ memset(__pdesc, 0, _size); \ } while (0) -#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \ - ((rxmcs) == DESC92_RATE1M || \ - (rxmcs) == DESC92_RATE2M || \ - (rxmcs) == DESC92_RATE5_5M || \ - (rxmcs) == DESC92_RATE11M) - struct rx_fwinfo_8723e { u8 gain_trsw[4]; u8 pwdb_all; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/Makefile +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/Makefile @@ -0,0 +1,20 @@ +obj-m := rtl8723be.o + + +rtl8723be-objs := \ + dm.o \ + fw.o \ + hw.o \ + led.o \ + phy.o \ + pwrseq.o \ + pwrseqcmd.o \ + rf.o \ + sw.o \ + table.o \ + trx.o \ + + +obj-$(CONFIG_RTL8723BE) += rtl8723be.o + +ccflags-y += -D__CHECK_ENDIAN__ --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/def.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/def.h @@ -0,0 +1,248 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_DEF_H__ +#define __RTL8723BE_DEF_H__ + +#define HAL_RETRY_LIMIT_INFRA 48 +#define HAL_RETRY_LIMIT_AP_ADHOC 7 + +#define RESET_DELAY_8185 20 + +#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) +#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) + +#define NUM_OF_FIRMWARE_QUEUE 10 +#define NUM_OF_PAGES_IN_FW 0x100 +#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 +#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 +#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 +#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 +#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 +#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 + +#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 +#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 +#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 +#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 +#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 + +#define MAX_LINES_HWCONFIG_TXT 1000 +#define MAX_BYTES_LINE_HWCONFIG_TXT 256 + +#define SW_THREE_WIRE 0 +#define HW_THREE_WIRE 2 + +#define BT_DEMO_BOARD 0 +#define BT_QA_BOARD 1 +#define BT_FPGA 2 + +#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 +#define HAL_PRIME_CHNL_OFFSET_LOWER 1 +#define HAL_PRIME_CHNL_OFFSET_UPPER 2 + +#define MAX_H2C_QUEUE_NUM 10 + +#define RX_MPDU_QUEUE 0 +#define RX_CMD_QUEUE 1 +#define RX_MAX_QUEUE 2 +#define AC2QUEUEID(_AC) (_AC) + +#define C2H_RX_CMD_HDR_LEN 8 +#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 0, 16) +#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 16, 8) +#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 24, 7) +#define GET_C2H_CMD_CONTINUE(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 31, 1) +#define GET_C2H_CMD_CONTENT(__prxhdr) \ + ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN) + +#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) +#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) +#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) +#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) +#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) +#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) +#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) +#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) +#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) + +#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) +#define CHIP_BONDING_92C_1T2R 0x1 + +#define CHIP_8723 BIT(0) +#define CHIP_8723B (BIT(1) | BIT(2)) +#define NORMAL_CHIP BIT(3) +#define RF_TYPE_1T1R (~(BIT(4) | BIT(5) | BIT(6))) +#define RF_TYPE_1T2R BIT(4) +#define RF_TYPE_2T2R BIT(5) +#define CHIP_VENDOR_UMC BIT(7) +#define B_CUT_VERSION BIT(12) +#define C_CUT_VERSION BIT(13) +#define D_CUT_VERSION ((BIT(12) | BIT(13))) +#define E_CUT_VERSION BIT(14) +#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28)) + +/* MASK */ +#define IC_TYPE_MASK (BIT(0) | BIT(1) | BIT(2)) +#define CHIP_TYPE_MASK BIT(3) +#define RF_TYPE_MASK (BIT(4) | BIT(5) | BIT(6)) +#define MANUFACTUER_MASK BIT(7) +#define ROM_VERSION_MASK (BIT(11) | BIT(10) | BIT(9) | BIT(8)) +#define CUT_VERSION_MASK (BIT(15) | BIT(14) | BIT(13) | BIT(12)) + +/* Get element */ +#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK) +#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK) +#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK) +#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK) +#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK) +#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK) + +#define IS_92C_SERIAL(version) ((IS_81XXC(version) && IS_2T2R(version)) ?\ + true : false) +#define IS_81XXC(version) ((GET_CVID_IC_TYPE(version) == 0) ?\ + true : false) +#define IS_8723_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8723) ?\ + true : false) +#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version)) ? false : true) +#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\ + ? true : false) +#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\ + ? true : false) +enum rf_optype { + RF_OP_BY_SW_3WIRE = 0, + RF_OP_BY_FW, + RF_OP_MAX +}; + +enum rf_power_state { + RF_ON, + RF_OFF, + RF_SLEEP, + RF_SHUT_DOWN, +}; + +enum power_save_mode { + POWER_SAVE_MODE_ACTIVE, + POWER_SAVE_MODE_SAVE, +}; + +enum power_polocy_config { + POWERCFG_MAX_POWER_SAVINGS, + POWERCFG_GLOBAL_POWER_SAVINGS, + POWERCFG_LOCAL_POWER_SAVINGS, + POWERCFG_LENOVO, +}; + +enum interface_select_pci { + INTF_SEL1_MINICARD = 0, + INTF_SEL0_PCIE = 1, + INTF_SEL2_RSV = 2, + INTF_SEL3_RSV = 3, +}; + +enum rtl_desc_qsel { + QSLT_BK = 0x2, + QSLT_BE = 0x0, + QSLT_VI = 0x5, + QSLT_VO = 0x7, + QSLT_BEACON = 0x10, + QSLT_HIGH = 0x11, + QSLT_MGNT = 0x12, + QSLT_CMD = 0x13, +}; + +enum rtl_desc8723e_rate { + DESC92C_RATE1M = 0x00, + DESC92C_RATE2M = 0x01, + DESC92C_RATE5_5M = 0x02, + DESC92C_RATE11M = 0x03, + + DESC92C_RATE6M = 0x04, + DESC92C_RATE9M = 0x05, + DESC92C_RATE12M = 0x06, + DESC92C_RATE18M = 0x07, + DESC92C_RATE24M = 0x08, + DESC92C_RATE36M = 0x09, + DESC92C_RATE48M = 0x0a, + DESC92C_RATE54M = 0x0b, + + DESC92C_RATEMCS0 = 0x0c, + DESC92C_RATEMCS1 = 0x0d, + DESC92C_RATEMCS2 = 0x0e, + DESC92C_RATEMCS3 = 0x0f, + DESC92C_RATEMCS4 = 0x10, + DESC92C_RATEMCS5 = 0x11, + DESC92C_RATEMCS6 = 0x12, + DESC92C_RATEMCS7 = 0x13, + DESC92C_RATEMCS8 = 0x14, + DESC92C_RATEMCS9 = 0x15, + DESC92C_RATEMCS10 = 0x16, + DESC92C_RATEMCS11 = 0x17, + DESC92C_RATEMCS12 = 0x18, + DESC92C_RATEMCS13 = 0x19, + DESC92C_RATEMCS14 = 0x1a, + DESC92C_RATEMCS15 = 0x1b, + DESC92C_RATEMCS15_SG = 0x1c, + DESC92C_RATEMCS32 = 0x20, +}; + +enum rx_packet_type { + NORMAL_RX, + TX_REPORT1, + TX_REPORT2, + HIS_REPORT, +}; + +struct phy_sts_cck_8723e_t { + u8 adc_pwdb_X[4]; + u8 sq_rpt; + u8 cck_agc_rpt; +}; + +struct h2c_cmd_8723e { + u8 element_id; + u32 cmd_len; + u8 *p_cmdbuffer; +}; + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/dm.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/dm.c @@ -0,0 +1,1325 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../base.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "../rtl8723com/dm_common.h" +#include "fw.h" +#include "../rtl8723com/fw_common.h" +#include "trx.h" +#include "../btcoexist/rtl_btc.h" + +static const u32 ofdmswing_table[] = { + 0x0b40002d, /* 0, -15.0dB */ + 0x0c000030, /* 1, -14.5dB */ + 0x0cc00033, /* 2, -14.0dB */ + 0x0d800036, /* 3, -13.5dB */ + 0x0e400039, /* 4, -13.0dB */ + 0x0f00003c, /* 5, -12.5dB */ + 0x10000040, /* 6, -12.0dB */ + 0x11000044, /* 7, -11.5dB */ + 0x12000048, /* 8, -11.0dB */ + 0x1300004c, /* 9, -10.5dB */ + 0x14400051, /* 10, -10.0dB */ + 0x15800056, /* 11, -9.5dB */ + 0x16c0005b, /* 12, -9.0dB */ + 0x18000060, /* 13, -8.5dB */ + 0x19800066, /* 14, -8.0dB */ + 0x1b00006c, /* 15, -7.5dB */ + 0x1c800072, /* 16, -7.0dB */ + 0x1e400079, /* 17, -6.5dB */ + 0x20000080, /* 18, -6.0dB */ + 0x22000088, /* 19, -5.5dB */ + 0x24000090, /* 20, -5.0dB */ + 0x26000098, /* 21, -4.5dB */ + 0x288000a2, /* 22, -4.0dB */ + 0x2ac000ab, /* 23, -3.5dB */ + 0x2d4000b5, /* 24, -3.0dB */ + 0x300000c0, /* 25, -2.5dB */ + 0x32c000cb, /* 26, -2.0dB */ + 0x35c000d7, /* 27, -1.5dB */ + 0x390000e4, /* 28, -1.0dB */ + 0x3c8000f2, /* 29, -0.5dB */ + 0x40000100, /* 30, +0dB */ + 0x43c0010f, /* 31, +0.5dB */ + 0x47c0011f, /* 32, +1.0dB */ + 0x4c000130, /* 33, +1.5dB */ + 0x50800142, /* 34, +2.0dB */ + 0x55400155, /* 35, +2.5dB */ + 0x5a400169, /* 36, +3.0dB */ + 0x5fc0017f, /* 37, +3.5dB */ + 0x65400195, /* 38, +4.0dB */ + 0x6b8001ae, /* 39, +4.5dB */ + 0x71c001c7, /* 40, +5.0dB */ + 0x788001e2, /* 41, +5.5dB */ + 0x7f8001fe /* 42, +6.0dB */ +}; + +static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { + {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /* 0, -16.0dB */ + {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 1, -15.5dB */ + {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 2, -15.0dB */ + {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 3, -14.5dB */ + {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 4, -14.0dB */ + {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 5, -13.5dB */ + {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 6, -13.0dB */ + {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 7, -12.5dB */ + {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 8, -12.0dB */ + {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 9, -11.5dB */ + {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */ + {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */ + {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */ + {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */ + {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */ + {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */ + {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */ + {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */ + {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */ + {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */ + {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */ + {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */ + {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */ + {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */ + {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */ + {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */ + {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */ + {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */ + {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */ + {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */ + {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */ + {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */ + {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} /* 32, +0dB */ +}; + +static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { + {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /* 0, -16.0dB */ + {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 1, -15.5dB */ + {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 2, -15.0dB */ + {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 3, -14.5dB */ + {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 4, -14.0dB */ + {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 5, -13.5dB */ + {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 6, -13.0dB */ + {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 7, -12.5dB */ + {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 8, -12.0dB */ + {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 9, -11.5dB */ + {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */ + {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */ + {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */ + {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */ + {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */ + {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */ + {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */ + {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */ + {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */ + {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */ + {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */ + {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */ + {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */ + {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */ + {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */ + {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */ + {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */ + {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */ + {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */ + {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */ + {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */ + {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */ + {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} /* 32, +0dB */ +}; + +static const u32 edca_setting_dl[PEER_MAX] = { + 0xa44f, /* 0 UNKNOWN */ + 0x5ea44f, /* 1 REALTEK_90 */ + 0x5e4322, /* 2 REALTEK_92SE */ + 0x5ea42b, /* 3 BROAD */ + 0xa44f, /* 4 RAL */ + 0xa630, /* 5 ATH */ + 0x5ea630, /* 6 CISCO */ + 0x5ea42b, /* 7 MARVELL */ +}; + +static const u32 edca_setting_ul[PEER_MAX] = { + 0x5e4322, /* 0 UNKNOWN */ + 0xa44f, /* 1 REALTEK_90 */ + 0x5ea44f, /* 2 REALTEK_92SE */ + 0x5ea32b, /* 3 BROAD */ + 0x5ea422, /* 4 RAL */ + 0x5ea322, /* 5 ATH */ + 0x3ea430, /* 6 CISCO */ + 0x5ea44f, /* 7 MARV */ +}; + +void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type, + u8 *pdirection, u32 *poutwrite_val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 pwr_val = 0; + u8 ofdm_base = rtlpriv->dm.swing_idx_ofdm_base[RF90_PATH_A]; + u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A]; + u8 cck_base = rtldm->swing_idx_cck_base; + u8 cck_val = rtldm->swing_idx_cck; + + if (type == 0) { + if (ofdm_val <= ofdm_base) { + *pdirection = 1; + pwr_val = ofdm_base - ofdm_val; + } else { + *pdirection = 2; + pwr_val = ofdm_val - ofdm_base; + } + } else if (type == 1) { + if (cck_val <= cck_base) { + *pdirection = 1; + pwr_val = cck_base - cck_val; + } else { + *pdirection = 2; + pwr_val = cck_val - cck_base; + } + } + + if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1)) + pwr_val = TXPWRTRACK_MAX_IDX; + + *poutwrite_val = pwr_val | (pwr_val << 8) | + (pwr_val << 16) | (pwr_val << 24); +} + +static void rtl8723be_dm_diginit(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + + dm_digtable->dig_enable_flag = true; + dm_digtable->cur_igvalue = rtl_get_bbreg(hw, + ROFDM0_XAAGCCORE1, 0x7f); + dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; + dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; + dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; + dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; + dm_digtable->rx_gain_max = DM_DIG_MAX; + dm_digtable->rx_gain_min = DM_DIG_MIN; + dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT; + dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX; + dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN; + dm_digtable->pre_cck_cca_thres = 0xff; + dm_digtable->cur_cck_cca_thres = 0x83; + dm_digtable->forbidden_igi = DM_DIG_MIN; + dm_digtable->large_fa_hit = 0; + dm_digtable->recover_cnt = 0; + dm_digtable->dig_min_0 = DM_DIG_MIN; + dm_digtable->dig_min_1 = DM_DIG_MIN; + dm_digtable->media_connect_0 = false; + dm_digtable->media_connect_1 = false; + rtlpriv->dm.dm_initialgain_enable = true; + dm_digtable->bt30_cur_igi = 0x32; +} + +void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rate_adaptive *ra = &(rtlpriv->ra); + + ra->ratr_state = DM_RATR_STA_INIT; + ra->pre_ratr_state = DM_RATR_STA_INIT; + + if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) + rtlpriv->dm.useramask = true; + else + rtlpriv->dm.useramask = false; + + ra->high_rssi_thresh_for_ra = 50; + ra->low_rssi_thresh_for_ra40m = 20; +} + +static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.txpower_tracking = true; + rtlpriv->dm.txpower_track_control = true; + rtlpriv->dm.thermalvalue = 0; + + rtlpriv->dm.ofdm_index[0] = 30; + rtlpriv->dm.cck_index = 20; + + rtlpriv->dm.swing_idx_cck_base = rtlpriv->dm.cck_index; + + rtlpriv->dm.swing_idx_ofdm_base[0] = rtlpriv->dm.ofdm_index[0]; + rtlpriv->dm.delta_power_index[RF90_PATH_A] = 0; + rtlpriv->dm.delta_power_index_last[RF90_PATH_A] = 0; + rtlpriv->dm.power_index_offset[RF90_PATH_A] = 0; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + " rtlpriv->dm.txpower_tracking = %d\n", + rtlpriv->dm.txpower_tracking); +} + +static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap; + rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, 0x800); + rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL; +} + +void rtl8723be_dm_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + rtl8723be_dm_diginit(hw); + rtl8723be_dm_init_rate_adaptive_mask(hw); + rtl8723_dm_init_edca_turbo(hw); + rtl8723_dm_init_dynamic_bb_powersaving(hw); + rtl8723_dm_init_dynamic_txpower(hw); + rtl8723be_dm_init_txpower_tracking(hw); + rtl8723be_dm_init_dynamic_atc_switch(hw); +} + +static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct dig_t *rtl_dm_dig = &(rtlpriv->dm_digtable); + struct rtl_mac *mac = rtl_mac(rtlpriv); + + /* Determine the minimum RSSI */ + if ((mac->link_state < MAC80211_LINKED) && + (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) { + rtl_dm_dig->min_undec_pwdb_for_dm = 0; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "Not connected to any\n"); + } + if (mac->link_state >= MAC80211_LINKED) { + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.entry_min_undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "AP Client PWDB = 0x%lx\n", + rtlpriv->dm.entry_min_undec_sm_pwdb); + } else { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "STA Default Port PWDB = 0x%x\n", + rtl_dm_dig->min_undec_pwdb_for_dm); + } + } else { + rtl_dm_dig->min_undec_pwdb_for_dm = + rtlpriv->dm.entry_min_undec_sm_pwdb; + RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, + "AP Ext Port or disconnet PWDB = 0x%x\n", + rtl_dm_dig->min_undec_pwdb_for_dm); + } + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", + rtl_dm_dig->min_undec_pwdb_for_dm); +} + +static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_sta_info *drv_priv; + u8 h2c_parameter[3] = { 0 }; + long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; + + /* AP & ADHOC & MESH */ + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { + if (drv_priv->rssi_stat.undec_sm_pwdb < + tmp_entry_min_pwdb) + tmp_entry_min_pwdb = + drv_priv->rssi_stat.undec_sm_pwdb; + if (drv_priv->rssi_stat.undec_sm_pwdb > + tmp_entry_max_pwdb) + tmp_entry_max_pwdb = + drv_priv->rssi_stat.undec_sm_pwdb; + } + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + + /* If associated entry is found */ + if (tmp_entry_max_pwdb != 0) { + rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "EntryMaxPWDB = 0x%lx(%ld)\n", + tmp_entry_max_pwdb, tmp_entry_max_pwdb); + } else { + rtlpriv->dm.entry_max_undec_sm_pwdb = 0; + } + /* If associated entry is found */ + if (tmp_entry_min_pwdb != 0xff) { + rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "EntryMinPWDB = 0x%lx(%ld)\n", + tmp_entry_min_pwdb, tmp_entry_min_pwdb); + } else { + rtlpriv->dm.entry_min_undec_sm_pwdb = 0; + } + /* Indicate Rx signal strength to FW. */ + if (rtlpriv->dm.useramask) { + h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF); + h2c_parameter[1] = 0x20; + h2c_parameter[0] = 0; + rtl8723be_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter); + } else { + rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb); + } + rtl8723be_dm_find_minimum_rssi(hw); + rtlpriv->dm_digtable.rssi_val_min = + rtlpriv->dm_digtable.min_undec_pwdb_for_dm; +} + +void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->dm_digtable.cur_igvalue != current_igi) { + rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, current_igi); + if (rtlpriv->phy.rf_type != RF_1T1R) + rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, current_igi); + } + rtlpriv->dm_digtable.pre_igvalue = rtlpriv->dm_digtable.cur_igvalue; + rtlpriv->dm_digtable.cur_igvalue = current_igi; +} + +static void rtl8723be_dm_dig(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct dig_t *dm_digtable = &(rtlpriv->dm_digtable); + u8 dig_dynamic_min, dig_maxofmin; + bool firstconnect, firstdisconnect; + u8 dm_dig_max, dm_dig_min; + u8 current_igi = dm_digtable->cur_igvalue; + u8 offset; + + /* AP, BT */ + if (mac->act_scanning) + return; + + dig_dynamic_min = dm_digtable->dig_min_0; + firstconnect = (mac->link_state >= MAC80211_LINKED) && + !dm_digtable->media_connect_0; + firstdisconnect = (mac->link_state < MAC80211_LINKED) && + dm_digtable->media_connect_0; + + dm_dig_max = 0x5a; + dm_dig_min = DM_DIG_MIN; + dig_maxofmin = DM_DIG_MAX_AP; + + if (mac->link_state >= MAC80211_LINKED) { + if ((dm_digtable->rssi_val_min + 10) > dm_dig_max) + dm_digtable->rx_gain_max = dm_dig_max; + else if ((dm_digtable->rssi_val_min + 10) < dm_dig_min) + dm_digtable->rx_gain_max = dm_dig_min; + else + dm_digtable->rx_gain_max = + dm_digtable->rssi_val_min + 10; + + if (rtlpriv->dm.one_entry_only) { + offset = 12; + if (dm_digtable->rssi_val_min - offset < dm_dig_min) + dig_dynamic_min = dm_dig_min; + else if (dm_digtable->rssi_val_min - offset > + dig_maxofmin) + dig_dynamic_min = dig_maxofmin; + else + dig_dynamic_min = + dm_digtable->rssi_val_min - offset; + } else { + dig_dynamic_min = dm_dig_min; + } + } else { + dm_digtable->rx_gain_max = dm_dig_max; + dig_dynamic_min = dm_dig_min; + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n"); + } + + if (rtlpriv->falsealm_cnt.cnt_all > 10000) { + if (dm_digtable->large_fa_hit != 3) + dm_digtable->large_fa_hit++; + if (dm_digtable->forbidden_igi < current_igi) { + dm_digtable->forbidden_igi = current_igi; + dm_digtable->large_fa_hit = 1; + } + + if (dm_digtable->large_fa_hit >= 3) { + if ((dm_digtable->forbidden_igi + 1) > + dm_digtable->rx_gain_max) + dm_digtable->rx_gain_min = + dm_digtable->rx_gain_max; + else + dm_digtable->rx_gain_min = + dm_digtable->forbidden_igi + 1; + dm_digtable->recover_cnt = 3600; + } + } else { + if (dm_digtable->recover_cnt != 0) { + dm_digtable->recover_cnt--; + } else { + if (dm_digtable->large_fa_hit < 3) { + if ((dm_digtable->forbidden_igi - 1) < + dig_dynamic_min) { + dm_digtable->forbidden_igi = + dig_dynamic_min; + dm_digtable->rx_gain_min = + dig_dynamic_min; + } else { + dm_digtable->forbidden_igi--; + dm_digtable->rx_gain_min = + dm_digtable->forbidden_igi + 1; + } + } else { + dm_digtable->large_fa_hit = 0; + } + } + } + if (dm_digtable->rx_gain_min > dm_digtable->rx_gain_max) + dm_digtable->rx_gain_min = dm_digtable->rx_gain_max; + + if (mac->link_state >= MAC80211_LINKED) { + if (firstconnect) { + if (dm_digtable->rssi_val_min <= dig_maxofmin) + current_igi = dm_digtable->rssi_val_min; + else + current_igi = dig_maxofmin; + + dm_digtable->large_fa_hit = 0; + } else { + if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2) + current_igi += 4; + else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1) + current_igi += 2; + else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) + current_igi -= 2; + } + } else { + if (firstdisconnect) { + current_igi = dm_digtable->rx_gain_min; + } else { + if (rtlpriv->falsealm_cnt.cnt_all > 10000) + current_igi += 4; + else if (rtlpriv->falsealm_cnt.cnt_all > 8000) + current_igi += 2; + else if (rtlpriv->falsealm_cnt.cnt_all < 500) + current_igi -= 2; + } + } + + if (current_igi > dm_digtable->rx_gain_max) + current_igi = dm_digtable->rx_gain_max; + else if (current_igi < dm_digtable->rx_gain_min) + current_igi = dm_digtable->rx_gain_min; + + rtl8723be_dm_write_dig(hw, current_igi); + dm_digtable->media_connect_0 = + ((mac->link_state >= MAC80211_LINKED) ? true : false); + dm_digtable->dig_min_0 = dig_dynamic_min; +} + +static void rtl8723be_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) +{ + u32 ret_value; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); + + rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); + + ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE1_11N, MASKDWORD); + falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff; + falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16; + + ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE2_11N, MASKDWORD); + falsealm_cnt->cnt_ofdm_cca = ret_value & 0xffff; + falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16; + + ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE3_11N, MASKDWORD); + falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff; + falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16; + + ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE4_11N, MASKDWORD); + falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff; + + falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_fast_fsync_fail + + falsealm_cnt->cnt_sb_search_fail; + + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(12), 1); + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(14), 1); + + ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_RST_11N, MASKBYTE0); + falsealm_cnt->cnt_cck_fail = ret_value; + + ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_MSB_11N, MASKBYTE3); + falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; + + ret_value = rtl_get_bbreg(hw, DM_REG_CCK_CCA_CNT_11N, MASKDWORD); + falsealm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) | + ((ret_value & 0xff00) >> 8); + + falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail + + falsealm_cnt->cnt_sb_search_fail + + falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_cck_fail; + + falsealm_cnt->cnt_cca_all = falsealm_cnt->cnt_ofdm_cca + + falsealm_cnt->cnt_cck_cca; + + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 1); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 0); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 1); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 0); + + rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 0); + rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 0); + + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 0); + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 2); + + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0); + rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + "cnt_parity_fail = %d, cnt_rate_illegal = %d, " + "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", + falsealm_cnt->cnt_parity_fail, + falsealm_cnt->cnt_rate_illegal, + falsealm_cnt->cnt_crc8_fail, + falsealm_cnt->cnt_mcs_fail); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + "cnt_ofdm_fail = %x, cnt_cck_fail = %x," + " cnt_all = %x\n", + falsealm_cnt->cnt_ofdm_fail, + falsealm_cnt->cnt_cck_fail, + falsealm_cnt->cnt_all); +} + +static void rtl8723be_dm_dynamic_txpower(struct ieee80211_hw *hw) +{ + /* 8723BE does not support ODM_BB_DYNAMIC_TXPWR*/ + return; +} + +static void rtl8723be_set_iqk_matrix(struct ieee80211_hw *hw, u8 ofdm_index, + u8 rfpath, long iqk_result_x, + long iqk_result_y) +{ + long ele_a = 0, ele_d, ele_c = 0, value32; + + if (ofdm_index >= 43) + ofdm_index = 43 - 1; + + ele_d = (ofdmswing_table[ofdm_index] & 0xFFC00000) >> 22; + + if (iqk_result_x != 0) { + if ((iqk_result_x & 0x00000200) != 0) + iqk_result_x = iqk_result_x | 0xFFFFFC00; + ele_a = ((iqk_result_x * ele_d) >> 8) & 0x000003FF; + + if ((iqk_result_y & 0x00000200) != 0) + iqk_result_y = iqk_result_y | 0xFFFFFC00; + ele_c = ((iqk_result_y * ele_d) >> 8) & 0x000003FF; + + switch (rfpath) { + case RF90_PATH_A: + value32 = (ele_d << 22) | + ((ele_c & 0x3F) << 16) | ele_a; + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, + value32); + value32 = (ele_c & 0x000003C0) >> 6; + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); + value32 = ((iqk_result_x * ele_d) >> 7) & 0x01; + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), + value32); + break; + default: + break; + } + } else { + switch (rfpath) { + case RF90_PATH_A: + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, + ofdmswing_table[ofdm_index]); + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00); + break; + default: + break; + } + } +} + +static void rtl8723be_dm_tx_power_track_set_power(struct ieee80211_hw *hw, + enum pwr_track_control_method method, + u8 rfpath, u8 idx) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 swing_idx_ofdm_limit = 36; + + if (method == TXAGC) { + rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel); + } else if (method == BBSWING) { + if (rtldm->swing_idx_cck >= CCK_TABLE_SIZE) + rtldm->swing_idx_cck = CCK_TABLE_SIZE - 1; + + if (!rtldm->cck_inch14) { + rtl_write_byte(rtlpriv, 0xa22, + cckswing_table_ch1ch13[rtldm->swing_idx_cck][0]); + rtl_write_byte(rtlpriv, 0xa23, + cckswing_table_ch1ch13[rtldm->swing_idx_cck][1]); + rtl_write_byte(rtlpriv, 0xa24, + cckswing_table_ch1ch13[rtldm->swing_idx_cck][2]); + rtl_write_byte(rtlpriv, 0xa25, + cckswing_table_ch1ch13[rtldm->swing_idx_cck][3]); + rtl_write_byte(rtlpriv, 0xa26, + cckswing_table_ch1ch13[rtldm->swing_idx_cck][4]); + rtl_write_byte(rtlpriv, 0xa27, + cckswing_table_ch1ch13[rtldm->swing_idx_cck][5]); + rtl_write_byte(rtlpriv, 0xa28, + cckswing_table_ch1ch13[rtldm->swing_idx_cck][6]); + rtl_write_byte(rtlpriv, 0xa29, + cckswing_table_ch1ch13[rtldm->swing_idx_cck][7]); + } else { + rtl_write_byte(rtlpriv, 0xa22, + cckswing_table_ch14[rtldm->swing_idx_cck][0]); + rtl_write_byte(rtlpriv, 0xa23, + cckswing_table_ch14[rtldm->swing_idx_cck][1]); + rtl_write_byte(rtlpriv, 0xa24, + cckswing_table_ch14[rtldm->swing_idx_cck][2]); + rtl_write_byte(rtlpriv, 0xa25, + cckswing_table_ch14[rtldm->swing_idx_cck][3]); + rtl_write_byte(rtlpriv, 0xa26, + cckswing_table_ch14[rtldm->swing_idx_cck][4]); + rtl_write_byte(rtlpriv, 0xa27, + cckswing_table_ch14[rtldm->swing_idx_cck][5]); + rtl_write_byte(rtlpriv, 0xa28, + cckswing_table_ch14[rtldm->swing_idx_cck][6]); + rtl_write_byte(rtlpriv, 0xa29, + cckswing_table_ch14[rtldm->swing_idx_cck][7]); + } + + if (rfpath == RF90_PATH_A) { + if (rtldm->swing_idx_ofdm[RF90_PATH_A] < + swing_idx_ofdm_limit) + swing_idx_ofdm_limit = + rtldm->swing_idx_ofdm[RF90_PATH_A]; + + rtl8723be_set_iqk_matrix(hw, + rtldm->swing_idx_ofdm[rfpath], rfpath, + rtlphy->iqk_matrix[idx].value[0][0], + rtlphy->iqk_matrix[idx].value[0][1]); + } else if (rfpath == RF90_PATH_B) { + if (rtldm->swing_idx_ofdm[RF90_PATH_B] < + swing_idx_ofdm_limit) + swing_idx_ofdm_limit = + rtldm->swing_idx_ofdm[RF90_PATH_B]; + + rtl8723be_set_iqk_matrix(hw, + rtldm->swing_idx_ofdm[rfpath], rfpath, + rtlphy->iqk_matrix[idx].value[0][4], + rtlphy->iqk_matrix[idx].value[0][5]); + } + } else { + return; + } +} + +static void txpwr_track_cb_therm(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 thermalvalue = 0, delta, delta_lck, delta_iqk; + u8 thermalvalue_avg_count = 0; + u32 thermalvalue_avg = 0; + int i = 0; + + u8 ofdm_min_index = 6; + u8 index = 0; + + char delta_swing_table_idx_tup_a[] = { + 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, + 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, + 10, 11, 11, 12, 12, 13, 14, 15}; + char delta_swing_table_idx_tdown_a[] = { + 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, + 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, + 9, 10, 10, 11, 12, 13, 14, 15}; + + /*Initilization ( 7 steps in total)*/ + rtlpriv->dm.txpower_trackinginit = true; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "rtl8723be_dm_txpower_tracking" + "_callback_thermalmeter\n"); + + thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00); + if (!rtlpriv->dm.txpower_track_control || thermalvalue == 0 || + rtlefuse->eeprom_thermalmeter == 0xFF) + return; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Readback Thermal Meter = 0x%x pre thermal meter 0x%x " + "eeprom_thermalmeter 0x%x\n", + thermalvalue, rtldm->thermalvalue, + rtlefuse->eeprom_thermalmeter); + /*3 Initialize ThermalValues of RFCalibrateInfo*/ + if (!rtldm->thermalvalue) { + rtlpriv->dm.thermalvalue_lck = thermalvalue; + rtlpriv->dm.thermalvalue_iqk = thermalvalue; + } + + /*4 Calculate average thermal meter*/ + rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermalvalue; + rtldm->thermalvalue_avg_index++; + if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8723BE) + rtldm->thermalvalue_avg_index = 0; + + for (i = 0; i < AVG_THERMAL_NUM_8723BE; i++) { + if (rtldm->thermalvalue_avg[i]) { + thermalvalue_avg += rtldm->thermalvalue_avg[i]; + thermalvalue_avg_count++; + } + } + + if (thermalvalue_avg_count) + thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count); + + /* 5 Calculate delta, delta_LCK, delta_IQK.*/ + delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? + (thermalvalue - rtlpriv->dm.thermalvalue) : + (rtlpriv->dm.thermalvalue - thermalvalue); + delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? + (thermalvalue - rtlpriv->dm.thermalvalue_lck) : + (rtlpriv->dm.thermalvalue_lck - thermalvalue); + delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? + (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : + (rtlpriv->dm.thermalvalue_iqk - thermalvalue); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Readback Thermal Meter = 0x%x pre thermal meter 0x%x " + "eeprom_thermalmeter 0x%x delta 0x%x " + "delta_lck 0x%x delta_iqk 0x%x\n", + thermalvalue, rtlpriv->dm.thermalvalue, + rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk); + /* 6 If necessary, do LCK.*/ + if (delta_lck >= IQK_THRESHOLD) { + rtlpriv->dm.thermalvalue_lck = thermalvalue; + rtl8723be_phy_lc_calibrate(hw); + } + + /* 7 If necessary, move the index of + * swing table to adjust Tx power. + */ + if (delta > 0 && rtlpriv->dm.txpower_track_control) { + delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ? + (thermalvalue - rtlefuse->eeprom_thermalmeter) : + (rtlefuse->eeprom_thermalmeter - thermalvalue); + + if (delta >= TXSCALE_TABLE_SIZE) + delta = TXSCALE_TABLE_SIZE - 1; + /* 7.1 Get the final CCK_index and + * OFDM_index for each swing table. + */ + if (thermalvalue > rtlefuse->eeprom_thermalmeter) { + rtldm->delta_power_index_last[RF90_PATH_A] = + rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = + delta_swing_table_idx_tup_a[delta]; + } else { + rtldm->delta_power_index_last[RF90_PATH_A] = + rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = + -1 * delta_swing_table_idx_tdown_a[delta]; + } + + /* 7.2 Handle boundary conditions of index.*/ + if (rtldm->delta_power_index[RF90_PATH_A] == + rtldm->delta_power_index_last[RF90_PATH_A]) + rtldm->power_index_offset[RF90_PATH_A] = 0; + else + rtldm->power_index_offset[RF90_PATH_A] = + rtldm->delta_power_index[RF90_PATH_A] - + rtldm->delta_power_index_last[RF90_PATH_A]; + + rtldm->ofdm_index[0] = + rtldm->swing_idx_ofdm_base[RF90_PATH_A] + + rtldm->power_index_offset[RF90_PATH_A]; + rtldm->cck_index = rtldm->swing_idx_cck_base + + rtldm->power_index_offset[RF90_PATH_A]; + + rtldm->swing_idx_cck = rtldm->cck_index; + rtldm->swing_idx_ofdm[0] = rtldm->ofdm_index[0]; + + if (rtldm->ofdm_index[0] > OFDM_TABLE_SIZE - 1) + rtldm->ofdm_index[0] = OFDM_TABLE_SIZE - 1; + else if (rtldm->ofdm_index[0] < ofdm_min_index) + rtldm->ofdm_index[0] = ofdm_min_index; + + if (rtldm->cck_index > CCK_TABLE_SIZE - 1) + rtldm->cck_index = CCK_TABLE_SIZE - 1; + else if (rtldm->cck_index < 0) + rtldm->cck_index = 0; + } else { + rtldm->power_index_offset[RF90_PATH_A] = 0; + } + + if ((rtldm->power_index_offset[RF90_PATH_A] != 0) && + (rtldm->txpower_track_control)) { + rtldm->done_txpower = true; + if (thermalvalue > rtlefuse->eeprom_thermalmeter) + rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0, + index); + else + rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0, + index); + + rtldm->swing_idx_cck_base = rtldm->swing_idx_cck; + rtldm->swing_idx_ofdm_base[RF90_PATH_A] = + rtldm->swing_idx_ofdm[0]; + rtldm->thermalvalue = thermalvalue; + } + + if (delta_iqk >= IQK_THRESHOLD) { + rtldm->thermalvalue_iqk = thermalvalue; + rtl8723be_phy_iq_calibrate(hw, false); + } + + rtldm->txpowercount = 0; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n"); +} + +void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + static u8 tm_trigger; + + if (!rtlpriv->dm.txpower_tracking) + return; + + if (!tm_trigger) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16), + 0x03); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Trigger 8723be Thermal Meter!!\n"); + tm_trigger = 1; + return; + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Schedule TxPowerTracking !!\n"); + txpwr_track_cb_therm(hw); + tm_trigger = 0; + } +} + +static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rate_adaptive *ra = &(rtlpriv->ra); + struct ieee80211_sta *sta = NULL; + u32 low_rssithresh_for_ra = ra->low2high_rssi_thresh_for_ra40m; + u32 high_rssithresh_for_ra = ra->high_rssi_thresh_for_ra; + u8 go_up_gap = 5; + + if (is_hal_stop(rtlhal)) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "driver is going to unload\n"); + return; + } + + if (!rtlpriv->dm.useramask) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "driver does not control rate adaptive mask\n"); + return; + } + + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION) { + switch (ra->pre_ratr_state) { + case DM_RATR_STA_MIDDLE: + high_rssithresh_for_ra += go_up_gap; + break; + case DM_RATR_STA_LOW: + high_rssithresh_for_ra += go_up_gap; + low_rssithresh_for_ra += go_up_gap; + break; + default: + break; + } + + if (rtlpriv->dm.undec_sm_pwdb > + (long)high_rssithresh_for_ra) + ra->ratr_state = DM_RATR_STA_HIGH; + else if (rtlpriv->dm.undec_sm_pwdb > + (long)low_rssithresh_for_ra) + ra->ratr_state = DM_RATR_STA_MIDDLE; + else + ra->ratr_state = DM_RATR_STA_LOW; + + if (ra->pre_ratr_state != ra->ratr_state) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "RSSI = %ld\n", + rtlpriv->dm.undec_sm_pwdb); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "RSSI_LEVEL = %d\n", ra->ratr_state); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "PreState = %d, CurState = %d\n", + ra->pre_ratr_state, ra->ratr_state); + + rcu_read_lock(); + sta = rtl_find_sta(hw, mac->bssid); + if (sta) + rtlpriv->cfg->ops->update_rate_tbl(hw, sta, + ra->ratr_state); + rcu_read_unlock(); + + ra->pre_ratr_state = ra->ratr_state; + } + } +} + +static bool rtl8723be_dm_is_edca_turbo_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->cfg->ops->get_btc_status()) { + if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv)) + return true; + } + if (rtlpriv->mac80211.mode == WIRELESS_MODE_B) + return true; + + return false; +} + +static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + static u64 last_txok_cnt; + static u64 last_rxok_cnt; + u64 cur_txok_cnt = 0; + u64 cur_rxok_cnt = 0; + u32 edca_be_ul = 0x6ea42b; + u32 edca_be_dl = 0x6ea42b;/*not sure*/ + u32 edca_be = 0x5ea42b; + u32 iot_peer = 0; + bool is_cur_rdlstate; + bool last_is_cur_rdlstate = false; + bool bias_on_rx = false; + bool edca_turbo_on = false; + + last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate; + + cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; + cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; + + iot_peer = rtlpriv->mac80211.vendor; + bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ? + true : false; + edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) && + (!rtlpriv->dm.disable_framebursting)) ? + true : false; + + if ((iot_peer == PEER_CISCO) && + (mac->mode == WIRELESS_MODE_N_24G)) { + edca_be_dl = edca_setting_dl[iot_peer]; + edca_be_ul = edca_setting_ul[iot_peer]; + } + if (rtl8723be_dm_is_edca_turbo_disable(hw)) + goto exit; + + if (edca_turbo_on) { + if (bias_on_rx) + is_cur_rdlstate = (cur_txok_cnt > cur_rxok_cnt * 4) ? + false : true; + else + is_cur_rdlstate = (cur_rxok_cnt > cur_txok_cnt * 4) ? + true : false; + + edca_be = (is_cur_rdlstate) ? edca_be_dl : edca_be_ul; + rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be); + rtlpriv->dm.is_cur_rdlstate = is_cur_rdlstate; + rtlpriv->dm.current_turbo_edca = true; + } else { + if (rtlpriv->dm.current_turbo_edca) { + u8 tmp = AC0_BE; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, + (u8 *)(&tmp)); + } + rtlpriv->dm.current_turbo_edca = false; + } + +exit: + rtlpriv->dm.is_any_nonbepkts = false; + last_txok_cnt = rtlpriv->stats.txbytesunicast; + last_rxok_cnt = rtlpriv->stats.rxbytesunicast; +} + +static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 cur_cck_cca_thresh; + + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + if (rtlpriv->dm_digtable.rssi_val_min > 25) { + cur_cck_cca_thresh = 0xcd; + } else if ((rtlpriv->dm_digtable.rssi_val_min <= 25) && + (rtlpriv->dm_digtable.rssi_val_min > 10)) { + cur_cck_cca_thresh = 0x83; + } else { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) + cur_cck_cca_thresh = 0x83; + else + cur_cck_cca_thresh = 0x40; + } + } else { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) + cur_cck_cca_thresh = 0x83; + else + cur_cck_cca_thresh = 0x40; + } + + if (rtlpriv->dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh) + rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh); + + rtlpriv->dm_digtable.pre_cck_cca_thres = rtlpriv->dm_digtable.cur_cck_cca_thres; + rtlpriv->dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh; + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + "CCK cca thresh hold =%x\n", + rtlpriv->dm_digtable.cur_cck_cca_thres); +} + +static void rtl8723be_dm_dynamic_edcca(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 reg_c50, reg_c58; + bool fw_current_in_ps_mode = false; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_in_ps_mode)); + if (fw_current_in_ps_mode) + return; + + reg_c50 = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + reg_c58 = rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + + if (reg_c50 > 0x28 && reg_c58 > 0x28) { + if (!rtlpriv->rtlhal.pre_edcca_enable) { + rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x03); + rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x00); + } + } else if (reg_c50 < 0x25 && reg_c58 < 0x25) { + if (rtlpriv->rtlhal.pre_edcca_enable) { + rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x7f); + rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x7f); + } + } +} + +static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 crystal_cap; + u32 packet_count; + int cfo_khz_a, cfo_khz_b, cfo_ave = 0, adjust_xtal = 0; + int cfo_ave_diff; + + if (rtlpriv->mac80211.link_state < MAC80211_LINKED) { + if (rtldm->atc_status == ATC_STATUS_OFF) { + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11), + ATC_STATUS_ON); + rtldm->atc_status = ATC_STATUS_ON; + } + if (rtlpriv->cfg->ops->get_btc_status()) { + if (!rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(rtlpriv)) { + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, + "odm_DynamicATCSwitch(): Disable" + " CFO tracking for BT!!\n"); + return; + } + } + + if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) { + rtldm->crystal_cap = rtlpriv->efuse.crystalcap; + crystal_cap = rtldm->crystal_cap & 0x3f; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, + (crystal_cap | (crystal_cap << 6))); + } + } else { + cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280; + cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280; + packet_count = rtldm->packet_count; + + if (packet_count == rtldm->packet_count_pre) + return; + + rtldm->packet_count_pre = packet_count; + + if (rtlpriv->phy.rf_type == RF_1T1R) + cfo_ave = cfo_khz_a; + else + cfo_ave = (int)(cfo_khz_a + cfo_khz_b) >> 1; + + cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ? + (rtldm->cfo_ave_pre - cfo_ave) : + (cfo_ave - rtldm->cfo_ave_pre); + + if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) { + rtldm->large_cfo_hit = 1; + return; + } else { + rtldm->large_cfo_hit = 0; + } + + rtldm->cfo_ave_pre = cfo_ave; + + if (cfo_ave >= -rtldm->cfo_threshold && + cfo_ave <= rtldm->cfo_threshold && rtldm->is_freeze == 0) { + if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL) { + rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10; + rtldm->is_freeze = 1; + } else { + rtldm->cfo_threshold = CFO_THRESHOLD_XTAL; + } + } + + if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f) + adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 1) + 1; + else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) && + rtlpriv->dm.crystal_cap > 0) + adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 1) - 1; + + if (adjust_xtal != 0) { + rtldm->is_freeze = 0; + rtldm->crystal_cap += adjust_xtal; + + if (rtldm->crystal_cap > 0x3f) + rtldm->crystal_cap = 0x3f; + else if (rtldm->crystal_cap < 0) + rtldm->crystal_cap = 0; + + crystal_cap = rtldm->crystal_cap & 0x3f; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, + (crystal_cap | (crystal_cap << 6))); + } + + if (cfo_ave < CFO_THRESHOLD_ATC && + cfo_ave > -CFO_THRESHOLD_ATC) { + if (rtldm->atc_status == ATC_STATUS_ON) { + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11), + ATC_STATUS_OFF); + rtldm->atc_status = ATC_STATUS_OFF; + } + } else { + if (rtldm->atc_status == ATC_STATUS_OFF) { + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11), + ATC_STATUS_ON); + rtldm->atc_status = ATC_STATUS_ON; + } + } + } +} + +static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_sta_info *drv_priv; + u8 cnt = 0; + + rtlpriv->dm.one_entry_only = false; + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION && + rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + rtlpriv->dm.one_entry_only = true; + return; + } + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { + cnt++; + } + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + + if (cnt == 1) + rtlpriv->dm.one_entry_only = true; + } +} + +void rtl8723be_dm_watchdog(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool fw_current_inpsmode = false; + bool fw_ps_awake = true; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inpsmode)); + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, + (u8 *)(&fw_ps_awake)); + + if (ppsc->p2p_ps_info.p2p_ps_mode) + fw_ps_awake = false; + + if ((ppsc->rfpwr_state == ERFON) && + ((!fw_current_inpsmode) && fw_ps_awake) && + (!ppsc->rfchange_inprogress)) { + rtl8723be_dm_common_info_self_update(hw); + rtl8723be_dm_false_alarm_counter_statistics(hw); + rtl8723be_dm_check_rssi_monitor(hw); + rtl8723be_dm_dig(hw); + rtl8723be_dm_dynamic_edcca(hw); + rtl8723be_dm_cck_packet_detection_thresh(hw); + rtl8723be_dm_refresh_rate_adaptive_mask(hw); + rtl8723be_dm_check_edca_turbo(hw); + rtl8723be_dm_dynamic_atc_switch(hw); + rtl8723be_dm_check_txpower_tracking(hw); + rtl8723be_dm_dynamic_txpower(hw); + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv); + } + rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0; +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/dm.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/dm.h @@ -0,0 +1,310 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_DM_H__ +#define __RTL8723BE_DM_H__ + +#define MAIN_ANT 0 +#define AUX_ANT 1 +#define MAIN_ANT_CG_TRX 1 +#define AUX_ANT_CG_TRX 0 +#define MAIN_ANT_CGCS_RX 0 +#define AUX_ANT_CGCS_RX 1 + +#define TXSCALE_TABLE_SIZE 30 + +/*RF REG LIST*/ +#define DM_REG_RF_MODE_11N 0x00 +#define DM_REG_RF_0B_11N 0x0B +#define DM_REG_CHNBW_11N 0x18 +#define DM_REG_T_METER_11N 0x24 +#define DM_REG_RF_25_11N 0x25 +#define DM_REG_RF_26_11N 0x26 +#define DM_REG_RF_27_11N 0x27 +#define DM_REG_RF_2B_11N 0x2B +#define DM_REG_RF_2C_11N 0x2C +#define DM_REG_RXRF_A3_11N 0x3C +#define DM_REG_T_METER_92D_11N 0x42 +#define DM_REG_T_METER_88E_11N 0x42 + +/*BB REG LIST*/ +/*PAGE 8 */ +#define DM_REG_BB_CTRL_11N 0x800 +#define DM_REG_RF_PIN_11N 0x804 +#define DM_REG_PSD_CTRL_11N 0x808 +#define DM_REG_TX_ANT_CTRL_11N 0x80C +#define DM_REG_BB_PWR_SAV5_11N 0x818 +#define DM_REG_CCK_RPT_FORMAT_11N 0x824 +#define DM_REG_RX_DEFUALT_A_11N 0x858 +#define DM_REG_RX_DEFUALT_B_11N 0x85A +#define DM_REG_BB_PWR_SAV3_11N 0x85C +#define DM_REG_ANTSEL_CTRL_11N 0x860 +#define DM_REG_RX_ANT_CTRL_11N 0x864 +#define DM_REG_PIN_CTRL_11N 0x870 +#define DM_REG_BB_PWR_SAV1_11N 0x874 +#define DM_REG_ANTSEL_PATH_11N 0x878 +#define DM_REG_BB_3WIRE_11N 0x88C +#define DM_REG_SC_CNT_11N 0x8C4 +#define DM_REG_PSD_DATA_11N 0x8B4 +/*PAGE 9*/ +#define DM_REG_ANT_MAPPING1_11N 0x914 +#define DM_REG_ANT_MAPPING2_11N 0x918 +/*PAGE A*/ +#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00 +#define DM_REG_CCK_CCA_11N 0xA0A +#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C +#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10 +#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14 +#define DM_REG_CCK_FILTER_PARA1_11N 0xA22 +#define DM_REG_CCK_FILTER_PARA2_11N 0xA23 +#define DM_REG_CCK_FILTER_PARA3_11N 0xA24 +#define DM_REG_CCK_FILTER_PARA4_11N 0xA25 +#define DM_REG_CCK_FILTER_PARA5_11N 0xA26 +#define DM_REG_CCK_FILTER_PARA6_11N 0xA27 +#define DM_REG_CCK_FILTER_PARA7_11N 0xA28 +#define DM_REG_CCK_FILTER_PARA8_11N 0xA29 +#define DM_REG_CCK_FA_RST_11N 0xA2C +#define DM_REG_CCK_FA_MSB_11N 0xA58 +#define DM_REG_CCK_FA_LSB_11N 0xA5C +#define DM_REG_CCK_CCA_CNT_11N 0xA60 +#define DM_REG_BB_PWR_SAV4_11N 0xA74 +/*PAGE B */ +#define DM_REG_LNA_SWITCH_11N 0xB2C +#define DM_REG_PATH_SWITCH_11N 0xB30 +#define DM_REG_RSSI_CTRL_11N 0xB38 +#define DM_REG_CONFIG_ANTA_11N 0xB68 +#define DM_REG_RSSI_BT_11N 0xB9C +/*PAGE C */ +#define DM_REG_OFDM_FA_HOLDC_11N 0xC00 +#define DM_REG_RX_PATH_11N 0xC04 +#define DM_REG_TRMUX_11N 0xC08 +#define DM_REG_OFDM_FA_RSTC_11N 0xC0C +#define DM_REG_RXIQI_MATRIX_11N 0xC14 +#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C +#define DM_REG_IGI_A_11N 0xC50 +#define DM_REG_ANTDIV_PARA2_11N 0xC54 +#define DM_REG_IGI_B_11N 0xC58 +#define DM_REG_ANTDIV_PARA3_11N 0xC5C +#define DM_REG_BB_PWR_SAV2_11N 0xC70 +#define DM_REG_RX_OFF_11N 0xC7C +#define DM_REG_TXIQK_MATRIXA_11N 0xC80 +#define DM_REG_TXIQK_MATRIXB_11N 0xC88 +#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94 +#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C +#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0 +#define DM_REG_ANTDIV_PARA1_11N 0xCA4 +#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0 +/*PAGE D */ +#define DM_REG_OFDM_FA_RSTD_11N 0xD00 +#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0 +#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4 +#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8 +/*PAGE E */ +#define DM_REG_TXAGC_A_6_18_11N 0xE00 +#define DM_REG_TXAGC_A_24_54_11N 0xE04 +#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08 +#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10 +#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14 +#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18 +#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C +#define DM_REG_FPGA0_IQK_11N 0xE28 +#define DM_REG_TXIQK_TONE_A_11N 0xE30 +#define DM_REG_RXIQK_TONE_A_11N 0xE34 +#define DM_REG_TXIQK_PI_A_11N 0xE38 +#define DM_REG_RXIQK_PI_A_11N 0xE3C +#define DM_REG_TXIQK_11N 0xE40 +#define DM_REG_RXIQK_11N 0xE44 +#define DM_REG_IQK_AGC_PTS_11N 0xE48 +#define DM_REG_IQK_AGC_RSP_11N 0xE4C +#define DM_REG_BLUETOOTH_11N 0xE6C +#define DM_REG_RX_WAIT_CCA_11N 0xE70 +#define DM_REG_TX_CCK_RFON_11N 0xE74 +#define DM_REG_TX_CCK_BBON_11N 0xE78 +#define DM_REG_OFDM_RFON_11N 0xE7C +#define DM_REG_OFDM_BBON_11N 0xE80 +#define DM_REG_TX2RX_11N 0xE84 +#define DM_REG_TX2TX_11N 0xE88 +#define DM_REG_RX_CCK_11N 0xE8C +#define DM_REG_RX_OFDM_11N 0xED0 +#define DM_REG_RX_WAIT_RIFS_11N 0xED4 +#define DM_REG_RX2RX_11N 0xED8 +#define DM_REG_STANDBY_11N 0xEDC +#define DM_REG_SLEEP_11N 0xEE0 +#define DM_REG_PMPD_ANAEN_11N 0xEEC + +/*MAC REG LIST*/ +#define DM_REG_BB_RST_11N 0x02 +#define DM_REG_ANTSEL_PIN_11N 0x4C +#define DM_REG_EARLY_MODE_11N 0x4D0 +#define DM_REG_RSSI_MONITOR_11N 0x4FE +#define DM_REG_EDCA_VO_11N 0x500 +#define DM_REG_EDCA_VI_11N 0x504 +#define DM_REG_EDCA_BE_11N 0x508 +#define DM_REG_EDCA_BK_11N 0x50C +#define DM_REG_TXPAUSE_11N 0x522 +#define DM_REG_RESP_TX_11N 0x6D8 +#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0 +#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4 + +/*DIG Related*/ +#define DM_BIT_IGI_11N 0x0000007F + +#define HAL_DM_DIG_DISABLE BIT(0) +#define HAL_DM_HIPWR_DISABLE BIT(1) + +#define OFDM_TABLE_LENGTH 43 +#define CCK_TABLE_LENGTH 33 + +#define OFDM_TABLE_SIZE 37 +#define CCK_TABLE_SIZE 33 + +#define BW_AUTO_SWITCH_HIGH_LOW 25 +#define BW_AUTO_SWITCH_LOW_HIGH 30 + +#define DM_DIG_THRESH_HIGH 40 +#define DM_DIG_THRESH_LOW 35 + +#define DM_FALSEALARM_THRESH_LOW 400 +#define DM_FALSEALARM_THRESH_HIGH 1000 + +#define DM_DIG_MAX 0x3e +#define DM_DIG_MIN 0x1e + +#define DM_DIG_MAX_AP 0x32 +#define DM_DIG_MIN_AP 0x20 + +#define DM_DIG_FA_UPPER 0x3e +#define DM_DIG_FA_LOWER 0x1e +#define DM_DIG_FA_TH0 0x200 +#define DM_DIG_FA_TH1 0x300 +#define DM_DIG_FA_TH2 0x400 + +#define DM_DIG_BACKOFF_MAX 12 +#define DM_DIG_BACKOFF_MIN -4 +#define DM_DIG_BACKOFF_DEFAULT 10 + +#define RXPATHSELECTION_DIFF_TH 18 + +#define DM_RATR_STA_INIT 0 +#define DM_RATR_STA_HIGH 1 +#define DM_RATR_STA_MIDDLE 2 +#define DM_RATR_STA_LOW 3 + +#define CTS2SELF_THVAL 30 +#define REGC38_TH 20 + +#define TXHIGHPWRLEVEL_NORMAL 0 +#define TXHIGHPWRLEVEL_LEVEL1 1 +#define TXHIGHPWRLEVEL_LEVEL2 2 +#define TXHIGHPWRLEVEL_BT1 3 +#define TXHIGHPWRLEVEL_BT2 4 + +#define DM_TYPE_BYFW 0 +#define DM_TYPE_BYDRIVER 1 + +#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 +#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 +#define TXPWRTRACK_MAX_IDX 6 + +/* Dynamic ATC switch */ +#define ATC_STATUS_OFF 0x0 /* enable */ +#define ATC_STATUS_ON 0x1 /* disable */ +#define CFO_THRESHOLD_XTAL 10 /* kHz */ +#define CFO_THRESHOLD_ATC 80 /* kHz */ + +enum FAT_STATE { + FAT_NORMAL_STATE = 0, + FAT_TRAINING_STATE = 1, +}; + +enum tag_dynamic_init_gain_operation_type_definition { + DIG_TYPE_THRESH_HIGH = 0, + DIG_TYPE_THRESH_LOW = 1, + DIG_TYPE_BACKOFF = 2, + DIG_TYPE_RX_GAIN_MIN = 3, + DIG_TYPE_RX_GAIN_MAX = 4, + DIG_TYPE_ENABLE = 5, + DIG_TYPE_DISABLE = 6, + DIG_OP_TYPE_MAX +}; + +enum dm_1r_cca_e { + CCA_1R = 0, + CCA_2R = 1, + CCA_MAX = 2, +}; + +enum dm_rf_e { + RF_SAVE = 0, + RF_NORMAL = 1, + RF_MAX = 2, +}; + +enum dm_sw_ant_switch_e { + ANS_ANTENNA_B = 1, + ANS_ANTENNA_A = 2, + ANS_ANTENNA_MAX = 3, +}; + +enum dm_dig_ext_port_alg_e { + DIG_EXT_PORT_STAGE_0 = 0, + DIG_EXT_PORT_STAGE_1 = 1, + DIG_EXT_PORT_STAGE_2 = 2, + DIG_EXT_PORT_STAGE_3 = 3, + DIG_EXT_PORT_STAGE_MAX = 4, +}; + +enum dm_dig_connect_e { + DIG_STA_DISCONNECT = 0, + DIG_STA_CONNECT = 1, + DIG_STA_BEFORE_CONNECT = 2, + DIG_MULTISTA_DISCONNECT = 3, + DIG_MULTISTA_CONNECT = 4, + DIG_CONNECT_MAX +}; + +enum pwr_track_control_method { + BBSWING, + TXAGC +}; + +#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) +#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) +#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) +#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) +#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) + +void rtl8723be_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc, + u32 mac_id); +void rtl8723be_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux, + u32 mac_id, u32 rx_pwdb_all); +void rtl8723be_dm_fast_antenna_trainning_callback(unsigned long data); +void rtl8723be_dm_init(struct ieee80211_hw *hw); +void rtl8723be_dm_watchdog(struct ieee80211_hw *hw); +void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi); +void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw); +void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); +void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type, + u8 *pdirection, u32 *poutwrite_val); +void rtl8723be_dm_init_edca_turbo(struct ieee80211_hw *hw); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/fw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/fw.c @@ -0,0 +1,628 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "reg.h" +#include "def.h" +#include "fw.h" +#include "../rtl8723com/fw_common.h" + +static bool _rtl8723be_check_fw_read_last_h2c(struct ieee80211_hw *hw, + u8 boxnum) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 val_hmetfr; + bool result = false; + + val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); + if (((val_hmetfr >> boxnum) & BIT(0)) == 0) + result = true; + return result; +} + +static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 boxnum; + u16 box_reg = 0, box_extreg = 0; + u8 u1b_tmp; + bool isfw_read = false; + u8 buf_index = 0; + bool bwrite_sucess = false; + u8 wait_h2c_limit = 100; + u8 wait_writeh2c_limit = 100; + u8 boxcontent[4], boxextcontent[4]; + u32 h2c_waitcounter = 0; + unsigned long flag; + u8 idx; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n"); + + while (true) { + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + if (rtlhal->h2c_setinprogress) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "H2C set in progress! Wait to set.." + "element_id(%d).\n", element_id); + + while (rtlhal->h2c_setinprogress) { + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, + flag); + h2c_waitcounter++; + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Wait 100 us (%d times)...\n", + h2c_waitcounter); + udelay(100); + + if (h2c_waitcounter > 1000) + return; + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, + flag); + } + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + } else { + rtlhal->h2c_setinprogress = true; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + break; + } + } + while (!bwrite_sucess) { + wait_writeh2c_limit--; + if (wait_writeh2c_limit == 0) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Write H2C fail because no trigger " + "for FW INT!\n"); + break; + } + boxnum = rtlhal->last_hmeboxnum; + switch (boxnum) { + case 0: + box_reg = REG_HMEBOX_0; + box_extreg = REG_HMEBOX_EXT_0; + break; + case 1: + box_reg = REG_HMEBOX_1; + box_extreg = REG_HMEBOX_EXT_1; + break; + case 2: + box_reg = REG_HMEBOX_2; + box_extreg = REG_HMEBOX_EXT_2; + break; + case 3: + box_reg = REG_HMEBOX_3; + box_extreg = REG_HMEBOX_EXT_3; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not processed\n"); + break; + } + isfw_read = _rtl8723be_check_fw_read_last_h2c(hw, boxnum); + while (!isfw_read) { + wait_h2c_limit--; + if (wait_h2c_limit == 0) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Wating too long for FW read " + "clear HMEBox(%d)!\n", boxnum); + break; + } + udelay(10); + + isfw_read = _rtl8723be_check_fw_read_last_h2c(hw, + boxnum); + u1b_tmp = rtl_read_byte(rtlpriv, 0x130); + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Wating for FW read clear HMEBox(%d)!!! 0x130 = %2x\n", + boxnum, u1b_tmp); + } + if (!isfw_read) { + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Write H2C register BOX[%d] fail!!!!! " + "Fw do not read.\n", boxnum); + break; + } + memset(boxcontent, 0, sizeof(boxcontent)); + memset(boxextcontent, 0, sizeof(boxextcontent)); + boxcontent[0] = element_id; + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "Write element_id box_reg(%4x) = %2x\n", + box_reg, element_id); + + switch (cmd_len) { + case 1: + case 2: + case 3: + /*boxcontent[0] &= ~(BIT(7));*/ + memcpy((u8 *)(boxcontent) + 1, + p_cmdbuffer + buf_index, cmd_len); + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } + break; + case 4: + case 5: + case 6: + case 7: + /*boxcontent[0] |= (BIT(7));*/ + memcpy((u8 *)(boxextcontent), + p_cmdbuffer + buf_index+3, cmd_len-3); + memcpy((u8 *)(boxcontent) + 1, + p_cmdbuffer + buf_index, 3); + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_extreg + idx, + boxextcontent[idx]); + } + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + bwrite_sucess = true; + + rtlhal->last_hmeboxnum = boxnum + 1; + if (rtlhal->last_hmeboxnum == 4) + rtlhal->last_hmeboxnum = 0; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, + "pHalData->last_hmeboxnum = %d\n", + rtlhal->last_hmeboxnum); + } + if (!rtlpriv) { + pr_err("rtlpriv bad\n"); + return; + } + if (!rtlhal) { + pr_err("rtlhal bad\n"); + return; + } + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + rtlhal->h2c_setinprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + + RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n"); +} + +void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 tmp_cmdbuf[2]; + + if (!rtlhal->fw_ready) { + RT_ASSERT(false, + "return H2C cmd because of Fw download fail!!!\n"); + return; + } + memset(tmp_cmdbuf, 0, 8); + memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len); + _rtl8723be_fill_h2c_command(hw, element_id, cmd_len, + (u8 *)&tmp_cmdbuf); + return; +} + +void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 u1_h2c_set_pwrmode[H2C_8723BE_PWEMODE_LENGTH] = { 0 }; + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 rlbm, power_state = 0; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode); + + SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0)); + rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM = 2.*/ + SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm); + SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, + (rtlpriv->mac80211.p2p) ? + ppsc->smart_ps : 1); + SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, + ppsc->reg_max_lps_awakeintvl); + SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0); + if (mode == FW_PS_ACTIVE_MODE) + power_state |= FW_PWR_STATE_ACTIVE; + else + power_state |= FW_PWR_STATE_RF_OFF; + SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n", + u1_h2c_set_pwrmode, H2C_8723BE_PWEMODE_LENGTH); + rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_SETPWRMODE, + H2C_8723BE_PWEMODE_LENGTH, + u1_h2c_set_pwrmode); +} + +static bool _rtl8723be_cmd_send_packet(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + struct rtl_tx_desc *pdesc; + struct sk_buff *pskb = NULL; + u8 own; + unsigned long flags; + + ring = &rtlpci->tx_ring[BEACON_QUEUE]; + + pskb = __skb_dequeue(&ring->queue); + if (pskb) + kfree_skb(pskb); + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + + pdesc = &ring->desc[0]; + own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN); + + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); + + __skb_queue_tail(&ring->queue, skb); + + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); + + return true; +} +#define BEACON_PG 0 /* ->1 */ +#define PSPOLL_PG 2 +#define NULL_PG 3 +#define PROBERSP_PG 4 /* ->5 */ + +#define TOTAL_RESERVED_PKT_LEN 768 + +static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = { + /* page 0 beacon */ + 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78, + 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x64, 0x00, 0x10, 0x04, 0x00, 0x05, 0x54, 0x65, + 0x73, 0x74, 0x32, 0x01, 0x08, 0x82, 0x84, 0x0B, + 0x16, 0x24, 0x30, 0x48, 0x6C, 0x03, 0x01, 0x06, + 0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x02, 0x32, + 0x04, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, + 0x09, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x3D, 0x00, 0xDD, 0x07, 0x00, 0xE0, 0x4C, + 0x02, 0x02, 0x00, 0x00, 0xDD, 0x18, 0x00, 0x50, + 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, + 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, 0x01, 0x00, + + /* page 1 beacon */ + 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 2 ps-poll */ + 0xA4, 0x10, 0x01, 0xC0, 0xEC, 0x1A, 0x59, 0x0B, + 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 3 null */ + 0x48, 0x01, 0x00, 0x00, 0xEC, 0x1A, 0x59, 0x0B, + 0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78, + 0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x72, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 4 probe_resp */ + 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10, + 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42, + 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00, + 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00, + 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69, + 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C, + 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96, + 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A, + 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C, + 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18, + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02, + 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + /* page 5 probe_resp */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, + bool dl_finished) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct sk_buff *skb = NULL; + + u32 totalpacketlen; + bool rtstatus; + u8 u1rsvdpageloc[5] = { 0 }; + bool dlok = false; + + u8 *beacon; + u8 *p_pspoll; + u8 *nullfunc; + u8 *p_probersp; + /*--------------------------------------------------------- + * (1) beacon + *--------------------------------------------------------- + */ + beacon = &reserved_page_packet[BEACON_PG * 128]; + SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); + SET_80211_HDR_ADDRESS3(beacon, mac->bssid); + + /*------------------------------------------------------- + * (2) ps-poll + *------------------------------------------------------- + */ + p_pspoll = &reserved_page_packet[PSPOLL_PG * 128]; + SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); + SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); + SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); + + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); + + /*-------------------------------------------------------- + * (3) null data + *-------------------------------------------------------- + */ + nullfunc = &reserved_page_packet[NULL_PG * 128]; + SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); + SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); + SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); + + /*--------------------------------------------------------- + * (4) probe response + *--------------------------------------------------------- + */ + p_probersp = &reserved_page_packet[PROBERSP_PG * 128]; + SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); + SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); + SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); + + totalpacketlen = TOTAL_RESERVED_PKT_LEN; + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "rtl8723be_set_fw_rsvdpagepkt(): " + "HW_VAR_SET_TX_CMD: ALL\n", + &reserved_page_packet[0], totalpacketlen); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl8723be_set_fw_rsvdpagepkt(): " + "HW_VAR_SET_TX_CMD: ALL\n", u1rsvdpageloc, 3); + + + skb = dev_alloc_skb(totalpacketlen); + memcpy((u8 *)skb_put(skb, totalpacketlen), + &reserved_page_packet, totalpacketlen); + + rtstatus = _rtl8723be_cmd_send_packet(hw, skb); + + if (rtstatus) + dlok = true; + + if (dlok) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Set RSVD page location to Fw.\n"); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n", + u1rsvdpageloc, 3); + rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RSVDPAGE, + sizeof(u1rsvdpageloc), u1rsvdpageloc); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set RSVD page location to Fw FAIL!!!!!!.\n"); + } +} + +/*Should check FW support p2p or not.*/ +static void rtl8723be_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, + u8 ctwindow) +{ + u8 u1_ctwindow_period[1] = {ctwindow}; + + rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_CTW_CMD, 1, + u1_ctwindow_period); +} + +void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, + u8 p2p_ps_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info); + struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload; + u8 i; + u16 ctwindow; + u32 start_time, tsf_low; + + switch (p2p_ps_state) { + case P2P_PS_DISABLE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n"); + memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t)); + break; + case P2P_PS_ENABLE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n"); + /* update CTWindow value. */ + if (p2pinfo->ctwindow > 0) { + p2p_ps_offload->ctwindow_en = 1; + ctwindow = p2pinfo->ctwindow; + rtl8723be_set_p2p_ctw_period_cmd(hw, ctwindow); + } + /* hw only support 2 set of NoA */ + for (i = 0; i < p2pinfo->noa_num; i++) { + /* To control the register setting + * for which NOA + */ + rtl_write_byte(rtlpriv, 0x5cf, (i << 4)); + if (i == 0) + p2p_ps_offload->noa0_en = 1; + else + p2p_ps_offload->noa1_en = 1; + + /* config P2P NoA Descriptor Register */ + rtl_write_dword(rtlpriv, 0x5E0, + p2pinfo->noa_duration[i]); + rtl_write_dword(rtlpriv, 0x5E4, + p2pinfo->noa_interval[i]); + + /*Get Current TSF value */ + tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + start_time = p2pinfo->noa_start_time[i]; + if (p2pinfo->noa_count_type[i] != 1) { + while (start_time <= (tsf_low + (50 * 1024))) { + start_time += p2pinfo->noa_interval[i]; + if (p2pinfo->noa_count_type[i] != 255) + p2pinfo->noa_count_type[i]--; + } + } + rtl_write_dword(rtlpriv, 0x5E8, start_time); + rtl_write_dword(rtlpriv, 0x5EC, + p2pinfo->noa_count_type[i]); + } + if ((p2pinfo->opp_ps == 1) || + (p2pinfo->noa_num > 0)) { + /* rst p2p circuit */ + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4)); + + p2p_ps_offload->offload_en = 1; + + if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) { + p2p_ps_offload->role = 1; + p2p_ps_offload->allstasleep = 0; + } else { + p2p_ps_offload->role = 0; + } + p2p_ps_offload->discovery = 0; + } + break; + case P2P_PS_SCAN: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n"); + p2p_ps_offload->discovery = 1; + break; + case P2P_PS_SCAN_DONE: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n"); + p2p_ps_offload->discovery = 0; + p2pinfo->p2p_ps_state = P2P_PS_ENABLE; + break; + default: + break; + } + rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_OFFLOAD, 1, + (u8 *)p2p_ps_offload); +} + +void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) +{ + u8 u1_joinbssrpt_parm[1] = { 0 }; + + SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); + + rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_JOINBSSRPT, 1, + u1_joinbssrpt_parm); +} + +void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, + u8 ap_offload_enable) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 u1_apoffload_parm[H2C_8723BE_AP_OFFLOAD_LENGTH] = { 0 }; + + SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable); + SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid); + SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0); + + rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_AP_OFFLOAD, + H2C_8723BE_AP_OFFLOAD_LENGTH, u1_apoffload_parm); +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/fw.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/fw.h @@ -0,0 +1,248 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE__FW__H__ +#define __RTL8723BE__FW__H__ + +#define FW_8192C_SIZE 0x8000 +#define FW_8192C_START_ADDRESS 0x1000 +#define FW_8192C_END_ADDRESS 0x5FFF +#define FW_8192C_PAGE_SIZE 4096 +#define FW_8192C_POLLING_DELAY 5 +#define FW_8192C_POLLING_TIMEOUT_COUNT 6000 + +#define IS_FW_HEADER_EXIST(_pfwhdr) \ + ((_pfwhdr->signature&0xFFF0) == 0x5300) +#define USE_OLD_WOWLAN_DEBUG_FW 0 + +#define H2C_8723BE_RSVDPAGE_LOC_LEN 5 +#define H2C_8723BE_PWEMODE_LENGTH 5 +#define H2C_8723BE_JOINBSSRPT_LENGTH 1 +#define H2C_8723BE_AP_OFFLOAD_LENGTH 3 +#define H2C_8723BE_WOWLAN_LENGTH 3 +#define H2C_8723BE_KEEP_ALIVE_CTRL_LENGTH 3 +#if (USE_OLD_WOWLAN_DEBUG_FW == 0) +#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 1 +#else +#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN 3 +#endif +#define H2C_8723BE_AOAC_GLOBAL_INFO_LEN 2 +#define H2C_8723BE_AOAC_RSVDPAGE_LOC_LEN 7 + + +/* Fw PS state for RPWM. +*BIT[2:0] = HW state +*BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state +*BIT[4] = sub-state +*/ +#define FW_PS_GO_ON BIT(0) +#define FW_PS_TX_NULL BIT(1) +#define FW_PS_RF_ON BIT(2) +#define FW_PS_REGISTER_ACTIVE BIT(3) + +#define FW_PS_DPS BIT(0) +#define FW_PS_LCLK (FW_PS_DPS) +#define FW_PS_RF_OFF BIT(1) +#define FW_PS_ALL_ON BIT(2) +#define FW_PS_ST_ACTIVE BIT(3) +#define FW_PS_ISR_ENABLE BIT(4) +#define FW_PS_IMR_ENABLE BIT(5) + + +#define FW_PS_ACK BIT(6) +#define FW_PS_TOGGLE BIT(7) + + /* 88E RPWM value*/ + /* BIT[0] = 1: 32k, 0: 40M*/ +#define FW_PS_CLOCK_OFF BIT(0) /* 32k*/ +#define FW_PS_CLOCK_ON 0 /*40M*/ + +#define FW_PS_STATE_MASK (0x0F) +#define FW_PS_STATE_HW_MASK (0x07) +/*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/ +#define FW_PS_STATE_INT_MASK (0x3F) + +#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x)) +#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x)) +#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x)) +#define FW_PS_ISR_VAL(x) ((x) & 0x70) +#define FW_PS_IMR_MASK(x) ((x) & 0xDF) +#define FW_PS_KEEP_IMR(x) ((x) & 0x20) + + +#define FW_PS_STATE_S0 (FW_PS_DPS) +#define FW_PS_STATE_S1 (FW_PS_LCLK) +#define FW_PS_STATE_S2 (FW_PS_RF_OFF) +#define FW_PS_STATE_S3 (FW_PS_ALL_ON) +#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON)) + +/* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/ +#define FW_PS_STATE_ALL_ON_88E (FW_PS_CLOCK_ON) +/* (FW_PS_RF_ON)*/ +#define FW_PS_STATE_RF_ON_88E (FW_PS_CLOCK_ON) +/* 0x0*/ +#define FW_PS_STATE_RF_OFF_88E (FW_PS_CLOCK_ON) +/* (FW_PS_STATE_RF_OFF)*/ +#define FW_PS_STATE_RF_OFF_LOW_PWR_88E (FW_PS_CLOCK_OFF) + +#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4) +#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3) +#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2) +#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1) + + +/* For 88E H2C PwrMode Cmd ID 5.*/ +#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) +#define FW_PWR_STATE_RF_OFF 0 + +#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK) +#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF | FW_PS_ALL_ON)) +#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON)) +#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE)) +#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40) + +#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0))) + +#define IS_IN_LOW_POWER_STATE_88E(fwpsstate) \ + (FW_PS_STATE(fwpsstate) == FW_PS_CLOCK_OFF) + +#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) +#define FW_PWR_STATE_RF_OFF 0 + +#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) + +#define SET_88E_H2CCMD_WOWLAN_FUNC_ENABLE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val) +#define SET_88E_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val) +#define SET_88E_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val) +#define SET_88E_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val) +#define SET_88E_H2CCMD_WOWLAN_ALL_PKT_DROP(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 4, 1, __val) +#define SET_88E_H2CCMD_WOWLAN_GPIO_ACTIVE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 5, 1, __val) +#define SET_88E_H2CCMD_WOWLAN_REKEY_WAKE_UP(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 6, 1, __val) +#define SET_88E_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 7, 1, __val) +#define SET_88E_H2CCMD_WOWLAN_GPIONUM(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) +#define SET_88E_H2CCMD_WOWLAN_GPIO_DURATION(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) + + +#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 4, __val) +#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 4, 4, __val) +#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) +#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val) +#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val) +#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd) \ + LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8) + +#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) + +/* AP_OFFLOAD */ +#define SET_H2CCMD_AP_OFFLOAD_ON(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) +#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) +#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val) + +/* Keep Alive Control*/ +#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val) +#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__ph2ccmd, __val)\ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val) +#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) + +/*REMOTE_WAKE_CTRL */ +#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val) +#if (USE_OLD_WOWLAN_DEBUG_FW == 0) +#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__ph2ccmd, __val)\ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val) +#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__ph2ccmd, __val)\ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val) +#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__ph2ccmd, __val)\ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val) +#else +#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) +#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) +#endif + +/* GTK_OFFLOAD */ +#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) + +/* AOAC_RSVDPAGE_LOC */ +#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_REM_WAKE_CTRL_INFO(__ph2ccmd, __val)\ + SET_BITS_TO_LE_1BYTE((__ph2ccmd), 0, 8, __val) +#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) +#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) +#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val) +#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val) + +void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); +void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, + u8 ap_offload_enable); +void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer); +void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw); +void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, + bool dl_finished); +void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); +int rtl8723be_download_fw(struct ieee80211_hw *hw, + bool buse_wake_on_wlan_fw); +void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, + u8 p2p_ps_state); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/hw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/hw.c @@ -0,0 +1,2529 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../efuse.h" +#include "../base.h" +#include "../regd.h" +#include "../cam.h" +#include "../ps.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "../rtl8723com/dm_common.h" +#include "fw.h" +#include "../rtl8723com/fw_common.h" +#include "led.h" +#include "hw.h" +#include "pwrseqcmd.h" +#include "pwrseq.h" +#include "../btcoexist/rtl_btc.h" + +#define LLT_CONFIG 5 + +static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + + while (skb_queue_len(&ring->queue)) { + struct rtl_tx_desc *entry = &ring->desc[ring->idx]; + struct sk_buff *skb = __skb_dequeue(&ring->queue); + + pci_unmap_single(rtlpci->pdev, + rtlpriv->cfg->ops->get_desc( + (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), + skb->len, PCI_DMA_TODEVICE); + kfree_skb(skb); + ring->idx = (ring->idx + 1) % ring->entries; + } +} + +static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw, + u8 set_bits, u8 clear_bits) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpci->reg_bcn_ctrl_val |= set_bits; + rtlpci->reg_bcn_ctrl_val &= ~clear_bits; + + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val); +} + +static void _rtl8723be_stop_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp1byte; + + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6))); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); +} + +static void _rtl8723be_resume_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp1byte; + + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte |= BIT(1); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); +} + +static void _rtl8723be_enable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(1)); +} + +static void _rtl8723be_disable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl8723be_set_bcn_ctrl_reg(hw, BIT(1), 0); +} + +static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val, + bool need_turn_off_ckk) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool support_remote_wake_up; + u32 count = 0, isr_regaddr, content; + bool schedule_timer = need_turn_off_ckk; + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *)(&support_remote_wake_up)); + + if (!rtlhal->fw_ready) + return; + if (!rtlpriv->psc.fw_current_inpsmode) + return; + + while (1) { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + if (rtlhal->fw_clk_change_in_progress) { + while (rtlhal->fw_clk_change_in_progress) { + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + count++; + udelay(100); + if (count > 1000) + return; + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + } + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } else { + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + break; + } + } + if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) { + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + if (FW_PS_IS_ACK(rpwm_val)) { + isr_regaddr = REG_HISR; + content = rtl_read_dword(rtlpriv, isr_regaddr); + while (!(content & IMR_CPWM) && (count < 500)) { + udelay(50); + count++; + content = rtl_read_dword(rtlpriv, isr_regaddr); + } + + if (content & IMR_CPWM) { + rtl_write_word(rtlpriv, isr_regaddr, 0x0100); + rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Receive CPWM INT!!! Set " + "pHalData->FwPSState = %X\n", + rtlhal->fw_ps_state); + } + } + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + if (schedule_timer) { + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + } + } else { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } +} + +static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + enum rf_pwrstate rtstate; + bool schedule_timer = false; + u8 queue; + + if (!rtlhal->fw_ready) + return; + if (!rtlpriv->psc.fw_current_inpsmode) + return; + if (!rtlhal->allow_sw_to_change_hwclc) + return; + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate)); + if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF) + return; + + for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) { + ring = &rtlpci->tx_ring[queue]; + if (skb_queue_len(&ring->queue)) { + schedule_timer = true; + break; + } + } + if (schedule_timer) { + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + return; + } + if (FW_PS_STATE(rtlhal->fw_ps_state) != + FW_PS_STATE_RF_OFF_LOW_PWR_88E) { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + if (!rtlhal->fw_clk_change_in_progress) { + rtlhal->fw_clk_change_in_progress = true; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val); + rtl_write_word(rtlpriv, REG_HISR, 0x0100); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } else { + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + } + } +} + +static void _rtl8723be_set_fw_ps_rf_on(struct ieee80211_hw *hw) +{ + u8 rpwm_val = 0; + rpwm_val |= (FW_PS_STATE_RF_OFF_88E | FW_PS_ACK); + _rtl8723be_set_fw_clock_on(hw, rpwm_val, true); +} + +static void _rtl8723be_fwlps_leave(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool fw_current_inps = false; + u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE; + + if (ppsc->low_power_enable) { + rpwm_val = (FW_PS_STATE_ALL_ON_88E | FW_PS_ACK);/* RF on */ + _rtl8723be_set_fw_clock_on(hw, rpwm_val, false); + rtlhal->allow_sw_to_change_hwclc = false; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&fw_pwrmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + } else { + rpwm_val = FW_PS_STATE_ALL_ON_88E; /* RF on */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&fw_pwrmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + } +} + +static void _rtl8723be_fwlps_enter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool fw_current_inps = true; + u8 rpwm_val; + + if (ppsc->low_power_enable) { + rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_88E; /* RF off */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&ppsc->fwctrl_psmode)); + rtlhal->allow_sw_to_change_hwclc = true; + _rtl8723be_set_fw_clock_off(hw, rpwm_val); + + } else { + rpwm_val = FW_PS_STATE_RF_OFF_88E; /* RF off */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *)(&fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&ppsc->fwctrl_psmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *)(&rpwm_val)); + } +} + +void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + switch (variable) { + case HW_VAR_RCR: + *((u32 *)(val)) = rtlpci->receive_config; + break; + case HW_VAR_RF_STATE: + *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; + break; + case HW_VAR_FWLPS_RF_ON: { + enum rf_pwrstate rfstate; + u32 val_rcr; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { + *((bool *)(val)) = true; + } else { + val_rcr = rtl_read_dword(rtlpriv, REG_RCR); + val_rcr &= 0x00070000; + if (val_rcr) + *((bool *)(val)) = false; + else + *((bool *)(val)) = true; + } + break; } + case HW_VAR_FW_PSMODE_STATUS: + *((bool *)(val)) = ppsc->fw_current_inpsmode; + break; + case HW_VAR_CORRECT_TSF: { + u64 tsf; + u32 *ptsf_low = (u32 *)&tsf; + u32 *ptsf_high = ((u32 *)&tsf) + 1; + + *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); + *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + *((u64 *)(val)) = tsf; + + break; } + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process %x\n", variable); + break; + } +} + +void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 idx; + + switch (variable) { + case HW_VAR_ETHER_ADDR: + for (idx = 0; idx < ETH_ALEN; idx++) + rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]); + break; + case HW_VAR_BASIC_RATE: { + u16 rate_cfg = ((u16 *)val)[0]; + u8 rate_index = 0; + rate_cfg = rate_cfg & 0x15f; + rate_cfg |= 0x01; + rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); + rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff); + while (rate_cfg > 0x1) { + rate_cfg = (rate_cfg >> 1); + rate_index++; + } + rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index); + break; } + case HW_VAR_BSSID: + for (idx = 0; idx < ETH_ALEN; idx++) + rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]); + break; + case HW_VAR_SIFS: + rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); + rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]); + + rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); + + if (!mac->ht_enable) + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e); + else + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + *((u16 *)val)); + break; + case HW_VAR_SLOT_TIME: { + u8 e_aci; + + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "HW_VAR_SLOT_TIME %x\n", val[0]); + + rtl_write_byte(rtlpriv, REG_SLOT, val[0]); + + for (e_aci = 0; e_aci < AC_MAX; e_aci++) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, + (u8 *)(&e_aci)); + } + break; } + case HW_VAR_ACK_PREAMBLE: { + u8 reg_tmp; + u8 short_preamble = (bool) (*(u8 *)val); + reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL + 2); + if (short_preamble) { + reg_tmp |= 0x02; + rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp); + } else { + reg_tmp &= 0xFD; + rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp); + } + break; } + case HW_VAR_WPA_CONFIG: + rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val)); + break; + case HW_VAR_AMPDU_MIN_SPACE: { + u8 min_spacing_to_set; + u8 sec_min_space; + + min_spacing_to_set = *((u8 *)val); + if (min_spacing_to_set <= 7) { + sec_min_space = 0; + + if (min_spacing_to_set < sec_min_space) + min_spacing_to_set = sec_min_space; + + mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) | + min_spacing_to_set); + + *val = min_spacing_to_set; + + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", + mac->min_space_cfg); + + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + } + break; } + case HW_VAR_SHORTGI_DENSITY: { + u8 density_to_set; + + density_to_set = *((u8 *)val); + mac->min_space_cfg |= (density_to_set << 3); + + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_SHORTGI_DENSITY: %#x\n", + mac->min_space_cfg); + + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + break; } + case HW_VAR_AMPDU_FACTOR: { + u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9}; + u8 factor_toset; + u8 *p_regtoset = NULL; + u8 index = 0; + + p_regtoset = regtoset_normal; + + factor_toset = *((u8 *)val); + if (factor_toset <= 3) { + factor_toset = (1 << (factor_toset + 2)); + if (factor_toset > 0xf) + factor_toset = 0xf; + + for (index = 0; index < 4; index++) { + if ((p_regtoset[index] & 0xf0) > + (factor_toset << 4)) + p_regtoset[index] = + (p_regtoset[index] & 0x0f) | + (factor_toset << 4); + + if ((p_regtoset[index] & 0x0f) > factor_toset) + p_regtoset[index] = + (p_regtoset[index] & 0xf0) | + (factor_toset); + + rtl_write_byte(rtlpriv, + (REG_AGGLEN_LMT + index), + p_regtoset[index]); + } + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + "Set HW_VAR_AMPDU_FACTOR: %#x\n", + factor_toset); + } + break; } + case HW_VAR_AC_PARAM: { + u8 e_aci = *((u8 *)val); + rtl8723_dm_init_edca_turbo(hw); + + if (rtlpci->acm_method != EACMWAY2_SW) + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL, + (u8 *)(&e_aci)); + break; } + case HW_VAR_ACM_CTRL: { + u8 e_aci = *((u8 *)val); + union aci_aifsn *p_aci_aifsn = + (union aci_aifsn *)(&(mac->ac[0].aifs)); + u8 acm = p_aci_aifsn->f.acm; + u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); + + acm_ctrl = + acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1); + + if (acm) { + switch (e_aci) { + case AC0_BE: + acm_ctrl |= ACMHW_BEQEN; + break; + case AC2_VI: + acm_ctrl |= ACMHW_VIQEN; + break; + case AC3_VO: + acm_ctrl |= ACMHW_VOQEN; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "HW_VAR_ACM_CTRL acm set " + "failed: eACI is %d\n", acm); + break; + } + } else { + switch (e_aci) { + case AC0_BE: + acm_ctrl &= (~ACMHW_BEQEN); + break; + case AC2_VI: + acm_ctrl &= (~ACMHW_VIQEN); + break; + case AC3_VO: + acm_ctrl &= (~ACMHW_BEQEN); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + } + RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, + "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] " + "Write 0x%X\n", acm_ctrl); + rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); + break; } + case HW_VAR_RCR: + rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]); + rtlpci->receive_config = ((u32 *)(val))[0]; + break; + case HW_VAR_RETRY_LIMIT: { + u8 retry_limit = ((u8 *)(val))[0]; + + rtl_write_word(rtlpriv, REG_RL, + retry_limit << RETRY_LIMIT_SHORT_SHIFT | + retry_limit << RETRY_LIMIT_LONG_SHIFT); + break; } + case HW_VAR_DUAL_TSF_RST: + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); + break; + case HW_VAR_EFUSE_BYTES: + rtlefuse->efuse_usedbytes = *((u16 *)val); + break; + case HW_VAR_EFUSE_USAGE: + rtlefuse->efuse_usedpercentage = *((u8 *)val); + break; + case HW_VAR_IO_CMD: + rtl8723be_phy_set_io_cmd(hw, (*(enum io_type *)val)); + break; + case HW_VAR_SET_RPWM: { + u8 rpwm_val; + + rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM); + udelay(1); + + if (rpwm_val & BIT(7)) { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, (*(u8 *)val)); + } else { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + ((*(u8 *)val) | BIT(7))); + } + break; } + case HW_VAR_H2C_FW_PWRMODE: + rtl8723be_set_fw_pwrmode_cmd(hw, (*(u8 *)val)); + break; + case HW_VAR_FW_PSMODE_STATUS: + ppsc->fw_current_inpsmode = *((bool *)val); + break; + case HW_VAR_RESUME_CLK_ON: + _rtl8723be_set_fw_ps_rf_on(hw); + break; + case HW_VAR_FW_LPS_ACTION: { + bool enter_fwlps = *((bool *)val); + + if (enter_fwlps) + _rtl8723be_fwlps_enter(hw); + else + _rtl8723be_fwlps_leave(hw); + + break; } + case HW_VAR_H2C_FW_JOINBSSRPT: { + u8 mstatus = (*(u8 *)val); + u8 tmp_regcr, tmp_reg422, bcnvalid_reg; + u8 count = 0, dlbcn_count = 0; + bool recover = false; + + if (mstatus == RT_MEDIA_CONNECT) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL); + + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr | BIT(0))); + + _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3)); + _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0); + + tmp_reg422 = rtl_read_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp_reg422 & (~BIT(6))); + if (tmp_reg422 & BIT(6)) + recover = true; + + do { + bcnvalid_reg = rtl_read_byte(rtlpriv, + REG_TDECTRL + 2); + rtl_write_byte(rtlpriv, REG_TDECTRL + 2, + (bcnvalid_reg | BIT(0))); + _rtl8723be_return_beacon_queue_skb(hw); + + rtl8723be_set_fw_rsvdpagepkt(hw, 0); + bcnvalid_reg = rtl_read_byte(rtlpriv, + REG_TDECTRL + 2); + count = 0; + while (!(bcnvalid_reg & BIT(0)) && count < 20) { + count++; + udelay(10); + bcnvalid_reg = rtl_read_byte(rtlpriv, + REG_TDECTRL + 2); + } + dlbcn_count++; + } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5); + + if (bcnvalid_reg & BIT(0)) + rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0)); + + _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0); + _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4)); + + if (recover) { + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp_reg422); + } + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr & ~(BIT(0)))); + } + rtl8723be_set_fw_joinbss_report_cmd(hw, (*(u8 *)val)); + break; } + case HW_VAR_H2C_FW_P2P_PS_OFFLOAD: + rtl8723be_set_p2p_ps_offload_cmd(hw, (*(u8 *)val)); + break; + case HW_VAR_AID: { + u16 u2btmp; + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); + u2btmp &= 0xC000; + rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, + (u2btmp | mac->assoc_id)); + break; } + case HW_VAR_CORRECT_TSF: { + u8 btype_ibss = ((u8 *)(val))[0]; + + if (btype_ibss) + _rtl8723be_stop_tx_beacon(hw); + + _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3)); + + rtl_write_dword(rtlpriv, REG_TSFTR, + (u32) (mac->tsf & 0xffffffff)); + rtl_write_dword(rtlpriv, REG_TSFTR + 4, + (u32) ((mac->tsf >> 32) & 0xffffffff)); + + _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0); + + if (btype_ibss) + _rtl8723be_resume_tx_beacon(hw); + break; } + case HW_VAR_KEEP_ALIVE: { + u8 array[2]; + array[0] = 0xff; + array[1] = *((u8 *)val); + rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_KEEP_ALIVE_CTRL, + 2, array); + break; } + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process %x\n", + variable); + break; + } +} + +static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool status = true; + int count = 0; + u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | + _LLT_OP(_LLT_WRITE_ACCESS); + + rtl_write_dword(rtlpriv, REG_LLT_INIT, value); + + do { + value = rtl_read_dword(rtlpriv, REG_LLT_INIT); + if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) + break; + + if (count > POLLING_LLT_THRESHOLD) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Failed to polling write LLT done at " + "address %d!\n", address); + status = false; + break; + } + } while (++count); + + return status; +} + +static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned short i; + u8 txpktbuf_bndy; + u8 maxpage; + bool status; + + maxpage = 255; + txpktbuf_bndy = 245; + + rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, + (0x27FF0000 | txpktbuf_bndy)); + rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_PBP, 0x31); + rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4); + + for (i = 0; i < (txpktbuf_bndy - 1); i++) { + status = _rtl8723be_llt_write(hw, i, i + 1); + if (!status) + return status; + } + status = _rtl8723be_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); + + if (!status) + return status; + + for (i = txpktbuf_bndy; i < maxpage; i++) { + status = _rtl8723be_llt_write(hw, i, (i + 1)); + if (!status) + return status; + } + status = _rtl8723be_llt_write(hw, maxpage, txpktbuf_bndy); + if (!status) + return status; + + rtl_write_dword(rtlpriv, REG_RQPN, 0x80e40808); + rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00); + + return true; +} + +static void _rtl8723be_gen_refresh_led_state(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0); + + if (rtlpriv->rtlhal.up_first_time) + return; + + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) + rtl8723be_sw_led_on(hw, pled0); + else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) + rtl8723be_sw_led_on(hw, pled0); + else + rtl8723be_sw_led_off(hw, pled0); +} + +static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + unsigned char bytetmp; + unsigned short wordtmp; + u16 retry = 0; + bool mac_func_enable; + + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); + + /*Auto Power Down to CHIP-off State*/ + bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7)); + rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp); + + bytetmp = rtl_read_byte(rtlpriv, REG_CR); + if (bytetmp == 0xFF) + mac_func_enable = true; + else + mac_func_enable = false; + + /* HW Power on sequence */ + if (!rtlbe_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, + PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, + RTL8723_NIC_ENABLE_FLOW)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "init MAC Fail as power on failure\n"); + return false; + } + bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4); + rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp); + + bytetmp = rtl_read_byte(rtlpriv, REG_CR); + bytetmp = 0xff; + rtl_write_byte(rtlpriv, REG_CR, bytetmp); + mdelay(2); + + bytetmp = rtl_read_byte(rtlpriv, REG_HWSEQ_CTRL); + bytetmp |= 0x7f; + rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp); + mdelay(2); + + bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3); + if (bytetmp & BIT(0)) { + bytetmp = rtl_read_byte(rtlpriv, 0x7c); + bytetmp |= BIT(6); + rtl_write_byte(rtlpriv, 0x7c, bytetmp); + } + bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR); + bytetmp |= BIT(3); + rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp); + bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1); + bytetmp &= ~BIT(4); + rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp); + + bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+3); + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+3, bytetmp | 0x77); + + rtl_write_word(rtlpriv, REG_CR, 0x2ff); + + if (!mac_func_enable) { + if (!_rtl8723be_llt_table_init(hw)) + return false; + } + rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff); + rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff); + + /* Enable FW Beamformer Interrupt */ + bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3); + rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6)); + + wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL); + wordtmp &= 0xf; + wordtmp |= 0xF5B1; + rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp); + + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F); + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF); + rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config); + + rtl_write_byte(rtlpriv, 0x4d0, 0x0); + + rtl_write_dword(rtlpriv, REG_BCNQ_DESA, + ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) & + DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_MGQ_DESA, + (u64) rtlpci->tx_ring[MGNT_QUEUE].dma & + DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VOQ_DESA, + (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VIQ_DESA, + (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_BEQ_DESA, + (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_BKQ_DESA, + (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_HQ_DESA, + (u64) rtlpci->tx_ring[HIGH_QUEUE].dma & + DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_RX_DESA, + (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma & + DMA_BIT_MASK(32)); + + bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 3); + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, bytetmp | 0x77); + + rtl_write_dword(rtlpriv, REG_INT_MIG, 0); + + bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL); + rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6)); + + rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3); + + do { + retry++; + bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL); + } while ((retry < 200) && (bytetmp & BIT(7))); + + _rtl8723be_gen_refresh_led_state(hw); + + rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0); + + bytetmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, bytetmp & ~BIT(2)); + + return true; +} + +static void _rtl8723be_hw_configure(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 reg_bw_opmode; + u32 reg_ratr, reg_prsr; + + reg_bw_opmode = BW_OPMODE_20MHZ; + reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | + RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS; + reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; + + rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr); + rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF); +} + +static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + rtl_write_byte(rtlpriv, 0x34b, 0x93); + rtl_write_word(rtlpriv, 0x350, 0x870c); + rtl_write_byte(rtlpriv, 0x352, 0x1); + + if (ppsc->support_backdoor) + rtl_write_byte(rtlpriv, 0x349, 0x1b); + else + rtl_write_byte(rtlpriv, 0x349, 0x03); + + rtl_write_word(rtlpriv, 0x350, 0x2718); + rtl_write_byte(rtlpriv, 0x352, 0x1); +} + +void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 sec_reg_value; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm); + + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "not open hw encryption\n"); + return; + } + sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE; + + if (rtlpriv->sec.use_defaultkey) { + sec_reg_value |= SCR_TXUSEDK; + sec_reg_value |= SCR_RXUSEDK; + } + sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); + + rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); + + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "The SECR-value %x\n", + sec_reg_value); + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); +} + +int rtl8723be_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + bool rtstatus = true; + int err; + u8 tmp_u1b; + unsigned long flags; + + /* reenable interrupts to not interfere with other devices */ + local_save_flags(flags); + local_irq_enable(); + + rtlpriv->rtlhal.being_init_adapter = true; + rtlpriv->intf_ops->disable_aspm(hw); + rtstatus = _rtl8723be_init_mac(hw); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n"); + err = 1; + goto exit; + } + tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG); + tmp_u1b &= 0x7F; + rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b); + + err = rtl8723_download_fw(hw, true); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Failed to download FW. Init HW without FW now..\n"); + err = 1; + rtlhal->fw_ready = false; + goto exit; + } else { + rtlhal->fw_ready = true; + } + rtlhal->last_hmeboxnum = 0; + rtl8723be_phy_mac_config(hw); + /* because last function modify RCR, so we update + * rcr var here, or TP will unstable for receive_config + * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx + * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252 + */ + rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR); + rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV); + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + + rtl8723be_phy_bb_config(hw); + rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; + rtl8723be_phy_rf_config(hw); + + rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, + RF_CHNLBW, RFREG_OFFSET_MASK); + rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1, + RF_CHNLBW, RFREG_OFFSET_MASK); + rtlphy->rfreg_chnlval[0] &= 0xFFF03FF; + rtlphy->rfreg_chnlval[0] |= (BIT(10) | BIT(11)); + + rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1); + rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); + _rtl8723be_hw_configure(hw); + rtl_cam_reset_all_entry(hw); + rtl8723be_enable_hw_security_config(hw); + + ppsc->rfpwr_state = ERFON; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); + _rtl8723be_enable_aspm_back_door(hw); + rtlpriv->intf_ops->enable_aspm(hw); + + rtl8723be_bt_hw_init(hw); + + rtl_set_bbreg(hw, 0x64, BIT(20), 0); + rtl_set_bbreg(hw, 0x64, BIT(24), 0); + + rtl_set_bbreg(hw, 0x40, BIT(4), 0); + rtl_set_bbreg(hw, 0x40, BIT(3), 1); + + rtl_set_bbreg(hw, 0x944, BIT(0)|BIT(1), 0x3); + rtl_set_bbreg(hw, 0x930, 0xff, 0x77); + + rtl_set_bbreg(hw, 0x38, BIT(11), 0x1); + + rtl_set_bbreg(hw, 0xb2c, 0xffffffff, 0x80000000); + + if (ppsc->rfpwr_state == ERFON) { + rtl8723be_dm_check_txpower_tracking(hw); + rtl8723be_phy_lc_calibrate(hw); + } + tmp_u1b = efuse_read_1byte(hw, 0x1FA); + if (!(tmp_u1b & BIT(0))) { + rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n"); + } + if (!(tmp_u1b & BIT(4))) { + tmp_u1b = rtl_read_byte(rtlpriv, 0x16); + tmp_u1b &= 0x0F; + rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80); + udelay(10); + rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n"); + } + rtl8723be_dm_init(hw); +exit: + local_irq_restore(flags); + rtlpriv->rtlhal.being_init_adapter = false; + return err; +} + +static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + enum version_8723e version = VERSION_UNKNOWN; + u8 count = 0; + u8 value8; + u32 value32; + + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0); + + value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 2); + rtl_write_byte(rtlpriv, REG_APS_FSMCO + 2, value8 | BIT(0)); + + value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1); + rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, value8 | BIT(0)); + + value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1); + while (((value8 & BIT(0))) && (count++ < 100)) { + udelay(10); + value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1); + } + count = 0; + value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION); + while ((value8 == 0) && (count++ < 50)) { + value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION); + mdelay(1); + } + value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1); + if ((value32 & (CHIP_8723B)) != CHIP_8723B) + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unkown chip version\n"); + else + version = (enum version_8723e) VERSION_TEST_CHIP_1T1R_8723B; + + rtlphy->rf_type = RF_1T1R; + + value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION); + if (value8 >= 0x02) + version |= BIT(3); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ? + "RF_2T2R" : "RF_1T1R"); + + return version; +} + +static int _rtl8723be_set_media_status(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc; + enum led_ctl_mode ledaction = LED_CTL_NO_LINK; + + rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0); + RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD, + "clear 0x550 when set HW_VAR_MEDIA_STATUS\n"); + + if (type == NL80211_IFTYPE_UNSPECIFIED || + type == NL80211_IFTYPE_STATION) { + _rtl8723be_stop_tx_beacon(hw); + _rtl8723be_enable_bcn_sub_func(hw); + } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) { + _rtl8723be_resume_tx_beacon(hw); + _rtl8723be_disable_bcn_sub_func(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "Set HW_VAR_MEDIA_STATUS: " + "No such media status(%x).\n", type); + } + switch (type) { + case NL80211_IFTYPE_UNSPECIFIED: + bt_msr |= MSR_NOLINK; + ledaction = LED_CTL_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to NO LINK!\n"); + break; + case NL80211_IFTYPE_ADHOC: + bt_msr |= MSR_ADHOC; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to Ad Hoc!\n"); + break; + case NL80211_IFTYPE_STATION: + bt_msr |= MSR_INFRA; + ledaction = LED_CTL_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to STA!\n"); + break; + case NL80211_IFTYPE_AP: + bt_msr |= MSR_AP; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to AP!\n"); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Network type %d not support!\n", type); + return 1; + } + rtl_write_byte(rtlpriv, (MSR), bt_msr); + rtlpriv->cfg->ops->led_control(hw, ledaction); + if ((bt_msr & 0x03) == MSR_AP) + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); + else + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); + return 0; +} + +void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rcr = rtlpci->receive_config; + + if (rtlpriv->psc.rfpwr_state != ERFON) + return; + + if (check_bssid) { + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4)); + } else if (!check_bssid) { + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); + _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + } +} + +int rtl8723be_set_network_type(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (_rtl8723be_set_media_status(hw, type)) + return -EOPNOTSUPP; + + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { + if (type != NL80211_IFTYPE_AP) + rtl8723be_set_check_bssid(hw, true); + } else { + rtl8723be_set_check_bssid(hw, false); + } + return 0; +} + +/* don't set REG_EDCA_BE_PARAM here + * because mac80211 will send pkt when scan + */ +void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + rtl8723_dm_init_edca_turbo(hw); + switch (aci) { + case AC1_BK: + rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); + break; + case AC0_BE: + break; + case AC2_VI: + rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322); + break; + case AC3_VO: + rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); + break; + default: + RT_ASSERT(false, "invalid aci: %d !\n", aci); + break; + } +} + +void rtl8723be_enable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); + rtlpci->irq_enabled = true; + /* there are some C2H CMDs have been sent + * before system interrupt is enabled, e.g., C2H, CPWM. + * So we need to clear all C2H events that FW has notified, + * otherwise FW won't schedule any commands anymore. + */ + rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0); + /*enable system interrupt*/ + rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF); +} + +void rtl8723be_disable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED); + rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED); + rtlpci->irq_enabled = false; + synchronize_irq(rtlpci->pdev->irq); +} + +static void _rtl8723be_poweroff_adapter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 u1b_tmp; + + /* Combo (PCIe + USB) Card and PCIe-MF Card */ + /* 1. Run LPS WL RFOFF flow */ + rtlbe_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW); + + /* 2. 0x1F[7:0] = 0 */ + /* turn off RF */ + rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); + if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && + rtlhal->fw_ready) + rtl8723be_firmware_selfreset(hw); + + /* Reset MCU. Suggested by Filen. */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2)))); + + /* g. MCUFWDL 0x80[1:0]= 0 */ + /* reset MCU ready status */ + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + + /* HW card disable configuration. */ + rtlbe_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW); + + /* Reset MCU IO Wrapper */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0)); + + /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */ + /* lock ISO/CLK/Power control register */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); +} + +void rtl8723be_card_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + enum nl80211_iftype opmode; + + mac->link_state = MAC80211_NOLINK; + opmode = NL80211_IFTYPE_UNSPECIFIED; + _rtl8723be_set_media_status(hw, opmode); + if (rtlpriv->rtlhal.driver_is_goingto_unload || + ppsc->rfoff_reason > RF_CHANGE_BY_PS) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + _rtl8723be_poweroff_adapter(hw); + + /* after power off we should do iqk again */ + rtlpriv->phy.iqk_initialized = false; +} + +void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, *p_inta); + + *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & + rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); +} + +void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 bcn_interval, atim_window; + + bcn_interval = mac->beacon_interval; + atim_window = 2; /*FIX MERGE */ + rtl8723be_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18); + rtl_write_byte(rtlpriv, 0x606, 0x30); + rtl8723be_enable_interrupt(hw); +} + +void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 bcn_interval = mac->beacon_interval; + + RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, + "beacon_interval:%d\n", bcn_interval); + rtl8723be_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl8723be_enable_interrupt(hw); +} + +void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, + "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr); + + if (add_msr) + rtlpci->irq_mask[0] |= add_msr; + if (rm_msr) + rtlpci->irq_mask[0] &= (~rm_msr); + rtl8723be_disable_interrupt(hw); + rtl8723be_enable_interrupt(hw); +} + +static u8 _rtl8723be_get_chnl_group(u8 chnl) +{ + u8 group; + + if (chnl < 3) + group = 0; + else if (chnl < 9) + group = 1; + else + group = 2; + return group; +} + +static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw, + struct txpower_info_2g *pw2g, + struct txpower_info_5g *pw5g, + bool autoload_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "hal_ReadPowerValueFromPROM8723BE(): " + "PROMContent[0x%x]= 0x%x\n", + (addr + 1), hwinfo[addr + 1]); + if (0xFF == hwinfo[addr + 1]) /*YJ, add, 120316*/ + autoload_fail = true; + + if (autoload_fail) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "auto load fail : Use Default value!\n"); + for (path = 0; path < MAX_RF_PATH; path++) { + /* 2.4G default value */ + for (group = 0; group < MAX_CHNL_GROUP_24G; group++) { + pw2g->index_cck_base[path][group] = 0x2D; + pw2g->index_bw40_base[path][group] = 0x2D; + } + for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) { + if (cnt == 0) { + pw2g->bw20_diff[path][0] = 0x02; + pw2g->ofdm_diff[path][0] = 0x04; + } else { + pw2g->bw20_diff[path][cnt] = 0xFE; + pw2g->bw40_diff[path][cnt] = 0xFE; + pw2g->cck_diff[path][cnt] = 0xFE; + pw2g->ofdm_diff[path][cnt] = 0xFE; + } + } + } + return; + } + for (path = 0; path < MAX_RF_PATH; path++) { + /*2.4G default value*/ + for (group = 0; group < MAX_CHNL_GROUP_24G; group++) { + pw2g->index_cck_base[path][group] = hwinfo[addr++]; + if (pw2g->index_cck_base[path][group] == 0xFF) + pw2g->index_cck_base[path][group] = 0x2D; + } + for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) { + pw2g->index_bw40_base[path][group] = hwinfo[addr++]; + if (pw2g->index_bw40_base[path][group] == 0xFF) + pw2g->index_bw40_base[path][group] = 0x2D; + } + for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) { + if (cnt == 0) { + pw2g->bw40_diff[path][cnt] = 0; + if (hwinfo[addr] == 0xFF) { + pw2g->bw20_diff[path][cnt] = 0x02; + } else { + pw2g->bw20_diff[path][cnt] = + (hwinfo[addr] & 0xf0) >> 4; + /*bit sign number to 8 bit sign number*/ + if (pw2g->bw20_diff[path][cnt] & BIT(3)) + pw2g->bw20_diff[path][cnt] |= 0xF0; + } + if (hwinfo[addr] == 0xFF) { + pw2g->ofdm_diff[path][cnt] = 0x04; + } else { + pw2g->ofdm_diff[path][cnt] = + (hwinfo[addr] & 0x0f); + /*bit sign number to 8 bit sign number*/ + if (pw2g->ofdm_diff[path][cnt] & BIT(3)) + pw2g->ofdm_diff[path][cnt] |= + 0xF0; + } + pw2g->cck_diff[path][cnt] = 0; + addr++; + } else { + if (hwinfo[addr] == 0xFF) { + pw2g->bw40_diff[path][cnt] = 0xFE; + } else { + pw2g->bw40_diff[path][cnt] = + (hwinfo[addr] & 0xf0) >> 4; + if (pw2g->bw40_diff[path][cnt] & BIT(3)) + pw2g->bw40_diff[path][cnt] |= + 0xF0; + } + if (hwinfo[addr] == 0xFF) { + pw2g->bw20_diff[path][cnt] = 0xFE; + } else { + pw2g->bw20_diff[path][cnt] = + (hwinfo[addr] & 0x0f); + if (pw2g->bw20_diff[path][cnt] & BIT(3)) + pw2g->bw20_diff[path][cnt] |= + 0xF0; + } + addr++; + + if (hwinfo[addr] == 0xFF) { + pw2g->ofdm_diff[path][cnt] = 0xFE; + } else { + pw2g->ofdm_diff[path][cnt] = + (hwinfo[addr] & 0xf0) >> 4; + if (pw2g->ofdm_diff[path][cnt] & BIT(3)) + pw2g->ofdm_diff[path][cnt] |= + 0xF0; + } + if (hwinfo[addr] == 0xFF) { + pw2g->cck_diff[path][cnt] = 0xFE; + } else { + pw2g->cck_diff[path][cnt] = + (hwinfo[addr] & 0x0f); + if (pw2g->cck_diff[path][cnt] & BIT(3)) + pw2g->cck_diff[path][cnt] |= + 0xF0; + } + addr++; + } + } + /*5G default value*/ + for (group = 0; group < MAX_CHNL_GROUP_5G; group++) { + pw5g->index_bw40_base[path][group] = hwinfo[addr++]; + if (pw5g->index_bw40_base[path][group] == 0xFF) + pw5g->index_bw40_base[path][group] = 0xFE; + } + for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) { + if (cnt == 0) { + pw5g->bw40_diff[path][cnt] = 0; + + if (hwinfo[addr] == 0xFF) { + pw5g->bw20_diff[path][cnt] = 0; + } else { + pw5g->bw20_diff[path][0] = + (hwinfo[addr] & 0xf0) >> 4; + if (pw5g->bw20_diff[path][cnt] & BIT(3)) + pw5g->bw20_diff[path][cnt] |= + 0xF0; + } + if (hwinfo[addr] == 0xFF) { + pw5g->ofdm_diff[path][cnt] = 0x04; + } else { + pw5g->ofdm_diff[path][0] = + (hwinfo[addr] & 0x0f); + if (pw5g->ofdm_diff[path][cnt] & BIT(3)) + pw5g->ofdm_diff[path][cnt] |= + 0xF0; + } + addr++; + } else { + if (hwinfo[addr] == 0xFF) { + pw5g->bw40_diff[path][cnt] = 0xFE; + } else { + pw5g->bw40_diff[path][cnt] = + (hwinfo[addr] & 0xf0) >> 4; + if (pw5g->bw40_diff[path][cnt] & BIT(3)) + pw5g->bw40_diff[path][cnt] |= 0xF0; + } + if (hwinfo[addr] == 0xFF) { + pw5g->bw20_diff[path][cnt] = 0xFE; + } else { + pw5g->bw20_diff[path][cnt] = + (hwinfo[addr] & 0x0f); + if (pw5g->bw20_diff[path][cnt] & BIT(3)) + pw5g->bw20_diff[path][cnt] |= 0xF0; + } + addr++; + } + } + if (hwinfo[addr] == 0xFF) { + pw5g->ofdm_diff[path][1] = 0xFE; + pw5g->ofdm_diff[path][2] = 0xFE; + } else { + pw5g->ofdm_diff[path][1] = (hwinfo[addr] & 0xf0) >> 4; + pw5g->ofdm_diff[path][2] = (hwinfo[addr] & 0x0f); + } + addr++; + + if (hwinfo[addr] == 0xFF) + pw5g->ofdm_diff[path][3] = 0xFE; + else + pw5g->ofdm_diff[path][3] = (hwinfo[addr] & 0x0f); + addr++; + + for (cnt = 1; cnt < MAX_TX_COUNT; cnt++) { + if (pw5g->ofdm_diff[path][cnt] == 0xFF) + pw5g->ofdm_diff[path][cnt] = 0xFE; + else if (pw5g->ofdm_diff[path][cnt] & BIT(3)) + pw5g->ofdm_diff[path][cnt] |= 0xF0; + } + } +} + +static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct txpower_info_2g pw2g; + struct txpower_info_5g pw5g; + u8 rf_path, index; + u8 i; + + _rtl8723be_read_power_value_fromprom(hw, &pw2g, &pw5g, autoload_fail, + hwinfo); + + for (rf_path = 0; rf_path < 2; rf_path++) { + for (i = 0; i < 14; i++) { + index = _rtl8723be_get_chnl_group(i+1); + + rtlefuse->txpwrlevel_cck[rf_path][i] = + pw2g.index_cck_base[rf_path][index]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + pw2g.index_bw40_base[rf_path][index]; + } + for (i = 0; i < MAX_TX_COUNT; i++) { + rtlefuse->txpwr_ht20diff[rf_path][i] = + pw2g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_ht40diff[rf_path][i] = + pw2g.bw40_diff[rf_path][i]; + rtlefuse->txpwr_legacyhtdiff[rf_path][i] = + pw2g.ofdm_diff[rf_path][i]; + } + for (i = 0; i < 14; i++) { + RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + "RF(%d)-Ch(%d) [CCK / HT40_1S ] = " + "[0x%x / 0x%x ]\n", rf_path, i, + rtlefuse->txpwrlevel_cck[rf_path][i], + rtlefuse->txpwrlevel_ht40_1s[rf_path][i]); + } + } + if (!autoload_fail) + rtlefuse->eeprom_thermalmeter = + hwinfo[EEPROM_THERMAL_METER_88E]; + else + rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER; + + if (rtlefuse->eeprom_thermalmeter == 0xff || autoload_fail) { + rtlefuse->apk_thermalmeterignore = true; + rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER; + } + rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; + RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter); + + if (!autoload_fail) { + rtlefuse->eeprom_regulatory = + hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x07;/*bit0~2*/ + if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF) + rtlefuse->eeprom_regulatory = 0; + } else { + rtlefuse->eeprom_regulatory = 0; + } + RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory); +} + +static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, + bool pseudo_test) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u16 i, usvalue; + u8 hwinfo[HWSET_MAX_SIZE]; + u16 eeprom_id; + bool is_toshiba_smid1 = false; + bool is_toshiba_smid2 = false; + bool is_samsung_smid = false; + bool is_lenovo_smid = false; + u16 toshiba_smid1[] = { + 0x6151, 0x6152, 0x6154, 0x6155, 0x6177, 0x6178, 0x6179, 0x6180, + 0x7151, 0x7152, 0x7154, 0x7155, 0x7177, 0x7178, 0x7179, 0x7180, + 0x8151, 0x8152, 0x8154, 0x8155, 0x8181, 0x8182, 0x8184, 0x8185, + 0x9151, 0x9152, 0x9154, 0x9155, 0x9181, 0x9182, 0x9184, 0x9185 + }; + u16 toshiba_smid2[] = { + 0x6181, 0x6184, 0x6185, 0x7181, 0x7182, 0x7184, 0x7185, 0x8181, + 0x8182, 0x8184, 0x8185, 0x9181, 0x9182, 0x9184, 0x9185 + }; + u16 samsung_smid[] = { + 0x6191, 0x6192, 0x6193, 0x7191, 0x7192, 0x7193, 0x8191, 0x8192, + 0x8193, 0x9191, 0x9192, 0x9193 + }; + u16 lenovo_smid[] = { + 0x8195, 0x9195, 0x7194, 0x8200, 0x8201, 0x8202, 0x9199, 0x9200 + }; + + if (pseudo_test) { + /* needs to be added */ + return; + } + if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + rtl_efuse_shadow_map_update(hw); + + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + HWSET_MAX_SIZE); + } else if (rtlefuse->epromtype == EEPROM_93C46) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "RTL819X Not boot from eeprom, check it !!"); + } + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"), + hwinfo, HWSET_MAX_SIZE); + + eeprom_id = *((u16 *)&hwinfo[0]); + if (eeprom_id != RTL8723BE_EEPROM_ID) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "EEPROM ID(%#x) is invalid!!\n", eeprom_id); + rtlefuse->autoload_failflag = true; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); + rtlefuse->autoload_failflag = false; + } + if (rtlefuse->autoload_failflag) + return; + + rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; + rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; + rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; + rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROMId = 0x%4x\n", eeprom_id); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); + + for (i = 0; i < 6; i += 2) { + usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; + *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n", + rtlefuse->dev_addr); + + /*parse xtal*/ + rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8723BE]; + if (rtlefuse->crystalcap == 0xFF) + rtlefuse->crystalcap = 0x20; + + _rtl8723be_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, + hwinfo); + + rtl8723be_read_bt_coexist_info_from_hwpg(hw, + rtlefuse->autoload_failflag, + hwinfo); + + rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; + rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; + rtlefuse->txpwr_fromeprom = true; + rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); + + /* set channel plan to world wide 13 */ + rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; + + if (rtlhal->oem_id == RT_CID_DEFAULT) { + /* Does this one have a Toshiba SMID from group 1? */ + for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) { + if (rtlefuse->eeprom_smid == toshiba_smid1[i]) { + is_toshiba_smid1 = true; + break; + } + } + /* Does this one have a Toshiba SMID from group 2? */ + for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) { + if (rtlefuse->eeprom_smid == toshiba_smid2[i]) { + is_toshiba_smid2 = true; + break; + } + } + /* Does this one have a Samsung SMID? */ + for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) { + if (rtlefuse->eeprom_smid == samsung_smid[i]) { + is_samsung_smid = true; + break; + } + } + /* Does this one have a Lenovo SMID? */ + for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) { + if (rtlefuse->eeprom_smid == lenovo_smid[i]) { + is_lenovo_smid = true; + break; + } + } + switch (rtlefuse->eeprom_oemid) { + case EEPROM_CID_DEFAULT: + if (rtlefuse->eeprom_did == 0x8176) { + if (rtlefuse->eeprom_svid == 0x10EC && + is_toshiba_smid1) { + rtlhal->oem_id = RT_CID_TOSHIBA; + } else if (rtlefuse->eeprom_svid == 0x1025) { + rtlhal->oem_id = RT_CID_819X_ACER; + } else if (rtlefuse->eeprom_svid == 0x10EC && + is_samsung_smid) { + rtlhal->oem_id = RT_CID_819X_SAMSUNG; + } else if (rtlefuse->eeprom_svid == 0x10EC && + is_lenovo_smid) { + rtlhal->oem_id = RT_CID_819X_LENOVO; + } else if ((rtlefuse->eeprom_svid == 0x10EC && + rtlefuse->eeprom_smid == 0x8197) || + (rtlefuse->eeprom_svid == 0x10EC && + rtlefuse->eeprom_smid == 0x9196)) { + rtlhal->oem_id = RT_CID_819X_CLEVO; + } else if ((rtlefuse->eeprom_svid == 0x1028 && + rtlefuse->eeprom_smid == 0x8194) || + (rtlefuse->eeprom_svid == 0x1028 && + rtlefuse->eeprom_smid == 0x8198) || + (rtlefuse->eeprom_svid == 0x1028 && + rtlefuse->eeprom_smid == 0x9197) || + (rtlefuse->eeprom_svid == 0x1028 && + rtlefuse->eeprom_smid == 0x9198)) { + rtlhal->oem_id = RT_CID_819X_DELL; + } else if ((rtlefuse->eeprom_svid == 0x103C && + rtlefuse->eeprom_smid == 0x1629)) { + rtlhal->oem_id = RT_CID_819X_HP; + } else if ((rtlefuse->eeprom_svid == 0x1A32 && + rtlefuse->eeprom_smid == 0x2315)) { + rtlhal->oem_id = RT_CID_819X_QMI; + } else if ((rtlefuse->eeprom_svid == 0x10EC && + rtlefuse->eeprom_smid == 0x8203)) { + rtlhal->oem_id = RT_CID_819X_PRONETS; + } else if ((rtlefuse->eeprom_svid == 0x1043 && + rtlefuse->eeprom_smid == 0x84B5)) { + rtlhal->oem_id = RT_CID_819X_EDIMAX_ASUS; + } else { + rtlhal->oem_id = RT_CID_DEFAULT; + } + } else if (rtlefuse->eeprom_did == 0x8178) { + if (rtlefuse->eeprom_svid == 0x10EC && + is_toshiba_smid2) + rtlhal->oem_id = RT_CID_TOSHIBA; + else if (rtlefuse->eeprom_svid == 0x1025) + rtlhal->oem_id = RT_CID_819X_ACER; + else if ((rtlefuse->eeprom_svid == 0x10EC && + rtlefuse->eeprom_smid == 0x8186)) + rtlhal->oem_id = RT_CID_819X_PRONETS; + else if ((rtlefuse->eeprom_svid == 0x1043 && + rtlefuse->eeprom_smid == 0x84B6)) + rtlhal->oem_id = + RT_CID_819X_EDIMAX_ASUS; + else + rtlhal->oem_id = RT_CID_DEFAULT; + } else { + rtlhal->oem_id = RT_CID_DEFAULT; + } + break; + case EEPROM_CID_TOSHIBA: + rtlhal->oem_id = RT_CID_TOSHIBA; + break; + case EEPROM_CID_CCX: + rtlhal->oem_id = RT_CID_CCX; + break; + case EEPROM_CID_QMI: + rtlhal->oem_id = RT_CID_819X_QMI; + break; + case EEPROM_CID_WHQL: + break; + default: + rtlhal->oem_id = RT_CID_DEFAULT; + break; + } + } +} + +static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + pcipriv->ledctl.led_opendrain = true; + switch (rtlhal->oem_id) { + case RT_CID_819X_HP: + pcipriv->ledctl.led_opendrain = true; + break; + case RT_CID_819X_LENOVO: + case RT_CID_DEFAULT: + case RT_CID_TOSHIBA: + case RT_CID_CCX: + case RT_CID_819X_ACER: + case RT_CID_WHQL: + default: + break; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + "RT Customized ID: 0x%02X\n", rtlhal->oem_id); +} + +void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_u1b; + + rtlhal->version = _rtl8723be_read_chip_version(hw); + if (get_rf_type(rtlphy) == RF_1T1R) + rtlpriv->dm.rfpath_rxenable[0] = true; + else + rtlpriv->dm.rfpath_rxenable[0] = + rtlpriv->dm.rfpath_rxenable[1] = true; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n", + rtlhal->version); + tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); + if (tmp_u1b & BIT(4)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n"); + rtlefuse->epromtype = EEPROM_93C46; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n"); + rtlefuse->epromtype = EEPROM_BOOT_EFUSE; + } + if (tmp_u1b & BIT(5)) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); + rtlefuse->autoload_failflag = false; + _rtl8723be_read_adapter_info(hw, false); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n"); + } + _rtl8723be_hal_customized_behavior(hw); +} + +static void rtl8723be_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 ratr_value; + u8 ratr_index = 0; + u8 nmode = mac->ht_enable; + u8 mimo_ps = IEEE80211_SMPS_OFF; + u16 shortgi_rate; + u32 tmp_ratr_value; + u8 curtxbw_40mhz = mac->bw_40; + u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; + enum wireless_mode wirelessmode = mac->mode; + + if (rtlhal->current_bandtype == BAND_ON_5G) + ratr_value = sta->supp_rates[1] << 4; + else + ratr_value = sta->supp_rates[0]; + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_value = 0xfff; + ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); + switch (wirelessmode) { + case WIRELESS_MODE_B: + if (ratr_value & 0x0000000c) + ratr_value &= 0x0000000d; + else + ratr_value &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_value &= 0x00000FF5; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + nmode = 1; + if (mimo_ps == IEEE80211_SMPS_STATIC) { + ratr_value &= 0x0007F005; + } else { + u32 ratr_mask; + + if (get_rf_type(rtlphy) == RF_1T2R || + get_rf_type(rtlphy) == RF_1T1R) + ratr_mask = 0x000ff005; + else + ratr_mask = 0x0f0ff005; + ratr_value &= ratr_mask; + } + break; + default: + if (rtlphy->rf_type == RF_1T2R) + ratr_value &= 0x000ff0ff; + else + ratr_value &= 0x0f0ff0ff; + break; + } + if ((rtlpriv->btcoexist.bt_coexistence) && + (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) && + (rtlpriv->btcoexist.bt_cur_state) && + (rtlpriv->btcoexist.bt_ant_isolation) && + ((rtlpriv->btcoexist.bt_service == BT_SCO) || + (rtlpriv->btcoexist.bt_service == BT_BUSY))) + ratr_value &= 0x0fffcfc0; + else + ratr_value &= 0x0FFFFFFF; + + if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz))) { + ratr_value |= 0x10000000; + tmp_ratr_value = (ratr_value >> 12); + + for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { + if ((1 << shortgi_rate) & tmp_ratr_value) + break; + } + shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | + (shortgi_rate << 4) | (shortgi_rate); + } + rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); + + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)); +} + +static u8 _rtl8723be_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, + u8 rate_index) +{ + u8 ret = 0; + + switch (rate_index) { + case RATR_INX_WIRELESS_NGB: + ret = 1; + break; + case RATR_INX_WIRELESS_N: + case RATR_INX_WIRELESS_NG: + ret = 5; + break; + case RATR_INX_WIRELESS_NB: + ret = 3; + break; + case RATR_INX_WIRELESS_GB: + ret = 6; + break; + case RATR_INX_WIRELESS_G: + ret = 7; + break; + case RATR_INX_WIRELESS_B: + ret = 8; + break; + default: + ret = 0; + break; + } + return ret; +} + +static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_sta_info *sta_entry = NULL; + u32 ratr_bitmap; + u8 ratr_index; + u8 curtxbw_40mhz = (sta->ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0; + u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; + enum wireless_mode wirelessmode = 0; + bool shortgi = false; + u8 rate_mask[7]; + u8 macid = 0; + u8 mimo_ps = IEEE80211_SMPS_OFF; + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + wirelessmode = sta_entry->wireless_mode; + if (mac->opmode == NL80211_IFTYPE_STATION || + mac->opmode == NL80211_IFTYPE_MESH_POINT) + curtxbw_40mhz = mac->bw_40; + else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) + macid = sta->aid + 1; + + ratr_bitmap = sta->supp_rates[0]; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_bitmap = 0xfff; + + ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); + switch (wirelessmode) { + case WIRELESS_MODE_B: + ratr_index = RATR_INX_WIRELESS_B; + if (ratr_bitmap & 0x0000000c) + ratr_bitmap &= 0x0000000d; + else + ratr_bitmap &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_index = RATR_INX_WIRELESS_GB; + + if (rssi_level == 1) + ratr_bitmap &= 0x00000f00; + else if (rssi_level == 2) + ratr_bitmap &= 0x00000ff0; + else + ratr_bitmap &= 0x00000ff5; + break; + case WIRELESS_MODE_A: + ratr_index = RATR_INX_WIRELESS_A; + ratr_bitmap &= 0x00000ff0; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + ratr_index = RATR_INX_WIRELESS_NGB; + + if (mimo_ps == IEEE80211_SMPS_STATIC || + mimo_ps == IEEE80211_SMPS_DYNAMIC) { + if (rssi_level == 1) + ratr_bitmap &= 0x00070000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0007f000; + else + ratr_bitmap &= 0x0007f005; + } else { + if (rtlphy->rf_type == RF_1T1R) { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } + } else { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0f8f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f8ff000; + else + ratr_bitmap &= 0x0f8ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x0f8f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f8ff000; + else + ratr_bitmap &= 0x0f8ff005; + } + } + } + if ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz)) { + if (macid == 0) + shortgi = true; + else if (macid == 1) + shortgi = false; + } + break; + default: + ratr_index = RATR_INX_WIRELESS_NGB; + + if (rtlphy->rf_type == RF_1T2R) + ratr_bitmap &= 0x000ff0ff; + else + ratr_bitmap &= 0x0f0ff0ff; + break; + } + sta_entry->ratr_index = ratr_index; + + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "ratr_bitmap :%x\n", ratr_bitmap); + *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | + (ratr_index << 28)); + rate_mask[0] = macid; + rate_mask[1] = _rtl8723be_mrate_idx_to_arfr_id(hw, ratr_index) | + (shortgi ? 0x80 : 0x00); + rate_mask[2] = curtxbw_40mhz; + /* if (prox_priv->proxim_modeinfo->power_output > 0) + * rate_mask[2] |= BIT(6); + */ + + rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff); + rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8); + rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16); + rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24); + + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, + "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n", + ratr_index, ratr_bitmap, + rate_mask[0], rate_mask[1], + rate_mask[2], rate_mask[3], + rate_mask[4], rate_mask[5], + rate_mask[6]); + rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RA_MASK, 7, rate_mask); + _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0); +} + +void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + if (rtlpriv->dm.useramask) + rtl8723be_update_hal_rate_mask(hw, sta, rssi_level); + else + rtl8723be_update_hal_rate_table(hw, sta); +} + +void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 sifs_timer; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, + (u8 *)&mac->slot_time); + if (!mac->ht_enable) + sifs_timer = 0x0a0a; + else + sifs_timer = 0x0e0e; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); +} + +bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; + u8 u1tmp; + bool actuallyset = false; + + if (rtlpriv->rtlhal.being_init_adapter) + return false; + + if (ppsc->swrf_processing) + return false; + + spin_lock(&rtlpriv->locks.rf_ps_lock); + if (ppsc->rfchange_inprogress) { + spin_unlock(&rtlpriv->locks.rf_ps_lock); + return false; + } else { + ppsc->rfchange_inprogress = true; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } + cur_rfstate = ppsc->rfpwr_state; + + rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2, + rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1))); + + u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2); + + if (rtlphy->polarity_ctl) + e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON; + else + e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF; + + if (ppsc->hwradiooff && + (e_rfpowerstate_toset == ERFON)) { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "GPIOChangeRF - HW Radio ON, RF ON\n"); + + e_rfpowerstate_toset = ERFON; + ppsc->hwradiooff = false; + actuallyset = true; + } else if (!ppsc->hwradiooff && + (e_rfpowerstate_toset == ERFOFF)) { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "GPIOChangeRF - HW Radio OFF, RF OFF\n"); + + e_rfpowerstate_toset = ERFOFF; + ppsc->hwradiooff = true; + actuallyset = true; + } + if (actuallyset) { + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } else { + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } + *valid = 1; + return !ppsc->hwradiooff; +} + +void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 *macaddr = p_macaddr; + u32 entry_id = 0; + bool is_pairwise = false; + + static u8 cam_const_addr[4][6] = { + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} + }; + static u8 cam_const_broad[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + + if (clear_all) { + u8 idx = 0; + u8 cam_offset = 0; + u8 clear_number = 5; + + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n"); + + for (idx = 0; idx < clear_number; idx++) { + rtl_cam_mark_invalid(hw, cam_offset + idx); + rtl_cam_empty_entry(hw, cam_offset + idx); + + if (idx < 5) { + memset(rtlpriv->sec.key_buf[idx], 0, + MAX_KEY_LEN); + rtlpriv->sec.key_len[idx] = 0; + } + } + } else { + switch (enc_algo) { + case WEP40_ENCRYPTION: + enc_algo = CAM_WEP40; + break; + case WEP104_ENCRYPTION: + enc_algo = CAM_WEP104; + break; + case TKIP_ENCRYPTION: + enc_algo = CAM_TKIP; + break; + case AESCCMP_ENCRYPTION: + enc_algo = CAM_AES; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + enc_algo = CAM_TKIP; + break; + } + + if (is_wepkey || rtlpriv->sec.use_defaultkey) { + macaddr = cam_const_addr[key_index]; + entry_id = key_index; + } else { + if (is_group) { + macaddr = cam_const_broad; + entry_id = key_index; + } else { + if (mac->opmode == NL80211_IFTYPE_AP) { + entry_id = rtl_cam_get_free_entry(hw, + p_macaddr); + if (entry_id >= TOTAL_CAM_ENTRY) { + RT_TRACE(rtlpriv, COMP_SEC, + DBG_EMERG, + "Can not find free" + " hw security cam " + "entry\n"); + return; + } + } else { + entry_id = CAM_PAIRWISE_KEY_POSITION; + } + key_index = PAIRWISE_KEYIDX; + is_pairwise = true; + } + } + if (rtlpriv->sec.key_len[key_index] == 0) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "delete one entry, entry_id is %d\n", + entry_id); + if (mac->opmode == NL80211_IFTYPE_AP) + rtl_cam_del_entry(hw, p_macaddr); + rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "add one entry\n"); + if (is_pairwise) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "set Pairwise key\n"); + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[key_index]); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + "set group key\n"); + + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_cam_add_one_entry(hw, + rtlefuse->dev_addr, + PAIRWISE_KEYIDX, + CAM_PAIRWISE_KEY_POSITION, + enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf + [entry_id]); + } + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); + } + } + } +} + +void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool auto_load_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value; + u32 tmpu_32; + + if (!auto_load_fail) { + tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL); + if (tmpu_32 & BIT(18)) + rtlpriv->btcoexist.btc_info.btcoexist = 1; + else + rtlpriv->btcoexist.btc_info.btcoexist = 0; + value = hwinfo[RF_OPTION4]; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; + rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); + } else { + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; + } +} + +void rtl8723be_bt_reg_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* 0:Low, 1:High, 2:From Efuse. */ + rtlpriv->btcoexist.reg_bt_iso = 2; + /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ + rtlpriv->btcoexist.reg_bt_sco = 3; + /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ + rtlpriv->btcoexist.reg_bt_sco = 0; +} + +void rtl8723be_bt_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv); +} + +void rtl8723be_suspend(struct ieee80211_hw *hw) +{ +} + +void rtl8723be_resume(struct ieee80211_hw *hw) +{ +} + +/* Turn on AAP (RCR:bit 0) for promicuous mode. */ +void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da, + bool write_into_reg) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + if (allow_all_da) /* Set BIT0 */ + rtlpci->receive_config |= RCR_AAP; + else /* Clear BIT0 */ + rtlpci->receive_config &= ~RCR_AAP; + + if (write_into_reg) + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + + RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD, + "receive_config = 0x%08X, write_into_reg =%d\n", + rtlpci->receive_config, write_into_reg); +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/hw.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/hw.h @@ -0,0 +1,64 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_HW_H__ +#define __RTL8723BE_HW_H__ + +void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw); + +void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb); +int rtl8723be_hw_init(struct ieee80211_hw *hw); +void rtl8723be_card_disable(struct ieee80211_hw *hw); +void rtl8723be_enable_interrupt(struct ieee80211_hw *hw); +void rtl8723be_disable_interrupt(struct ieee80211_hw *hw); +int rtl8723be_set_network_type(struct ieee80211_hw *hw, + enum nl80211_iftype type); +void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); +void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci); +void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw); +void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw); +void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr); +void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level); +void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw); +bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); +void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw); +void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); +void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, u8 *hwinfo); +void rtl8723be_bt_reg_init(struct ieee80211_hw *hw); +void rtl8723be_bt_hw_init(struct ieee80211_hw *hw); +void rtl8723be_suspend(struct ieee80211_hw *hw); +void rtl8723be_resume(struct ieee80211_hw *hw); +void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da, + bool write_into_reg); +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/led.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/led.c @@ -0,0 +1,153 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "reg.h" +#include "led.h" + +static void _rtl8723be_init_led(struct ieee80211_hw *hw, struct rtl_led *pled, + enum rtl_led_pin ledpin) +{ + pled->hw = hw; + pled->ledpin = ledpin; + pled->ledon = false; +} + +void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + u8 ledcfg; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); + + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); + ledcfg &= ~BIT(6); + rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5)); + break; + case LED_PIN_LED1: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); + rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + pled->ledon = true; +} + +void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u8 ledcfg; + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin); + + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); + + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + ledcfg &= 0xf0; + if (pcipriv->ledctl.led_opendrain) { + ledcfg &= 0x90; /* Set to software control. */ + rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3))); + ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); + ledcfg &= 0xFE; + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg); + } else { + ledcfg &= ~BIT(6); + rtl_write_byte(rtlpriv, REG_LEDCFG2, + (ledcfg | BIT(3) | BIT(5))); + } + break; + case LED_PIN_LED1: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); + ledcfg &= 0x10; /* Set to software control. */ + rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3)); + + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not processed\n"); + break; + } + pled->ledon = false; +} + +void rtl8723be_init_sw_leds(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); + _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); +} + +static void _rtl8723be_sw_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0); + switch (ledaction) { + case LED_CTL_POWER_ON: + case LED_CTL_LINK: + case LED_CTL_NO_LINK: + rtl8723be_sw_led_on(hw, pled0); + break; + case LED_CTL_POWER_OFF: + rtl8723be_sw_led_off(hw, pled0); + break; + default: + break; + } +} + +void rtl8723be_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) && + (ledaction == LED_CTL_TX || + ledaction == LED_CTL_RX || + ledaction == LED_CTL_SITE_SURVEY || + ledaction == LED_CTL_LINK || + ledaction == LED_CTL_NO_LINK || + ledaction == LED_CTL_START_TO_LINK || + ledaction == LED_CTL_POWER_ON)) { + return; + } + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction); + _rtl8723be_sw_led_control(hw, ledaction); +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/led.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/led.h @@ -0,0 +1,35 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_LED_H__ +#define __RTL8723BE_LED_H__ + +void rtl8723be_init_sw_leds(struct ieee80211_hw *hw); +void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8723be_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/phy.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/phy.c @@ -0,0 +1,2175 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../ps.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "../rtl8723com/phy_common.h" +#include "rf.h" +#include "dm.h" +#include "table.h" +#include "trx.h" + +static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw); +static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype); +static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, + u8 *step, u32 *delay); +static void _rtl8723be_config_bb_reg(struct ieee80211_hw *hw, + u32 addr, u32 data); + +static bool _rtl8723be_check_condition(struct ieee80211_hw *hw, + const u32 condition) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 _board = rtlefuse->board_type; /*need efuse define*/ + u32 _interface = rtlhal->interface; + u32 _platform = 0x08;/*SupportPlatform */ + u32 cond = condition; + + if (condition == 0xCDCDCDCD) + return true; + + cond = condition & 0xFF; + if ((_board & cond) == 0 && cond != 0x1F) + return false; + + cond = condition & 0xFF00; + cond = cond >> 8; + if ((_interface & cond) == 0 && cond != 0x07) + return false; + + cond = condition & 0xFF0000; + cond = cond >> 16; + if ((_platform & cond) == 0 && cond != 0x0F) + return false; + return true; +} + +static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + u32 arraylength; + u32 *ptrarray; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n"); + arraylength = RTL8723BEMAC_1T_ARRAYLEN; + ptrarray = RTL8723BEMAC_1T_ARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength); + for (i = 0; i < arraylength; i = i + 2) + rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]); + return true; +} + +static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype) +{ + #define READ_NEXT_PAIR(v1, v2, i) \ + do { \ + i += 2; \ + v1 = array_table[i];\ + v2 = array_table[i+1]; \ + } while (0) + + int i; + u32 *array_table; + u16 arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 v1 = 0, v2 = 0; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + arraylen = RTL8723BEPHY_REG_1TARRAYLEN; + array_table = RTL8723BEPHY_REG_1TARRAY; + + for (i = 0; i < arraylen; i = i + 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl8723be_config_bb_reg(hw, v1, v2); + } else {/*This line is the start line of branch.*/ + if (!_rtl8723be_check_condition(hw, array_table[i])) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + READ_NEXT_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + /* Configure matched pairs and + * skip to end of if-else. + */ + } else { + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + _rtl8723be_config_bb_reg(hw, + v1, v2); + READ_NEXT_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylen - 2) + READ_NEXT_PAIR(v1, v2, i); + } + } + } + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + arraylen = RTL8723BEAGCTAB_1TARRAYLEN; + array_table = RTL8723BEAGCTAB_1TARRAY; + + for (i = 0; i < arraylen; i = i + 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1 < 0xCDCDCDCD) { + rtl_set_bbreg(hw, array_table[i], + MASKDWORD, + array_table[i + 1]); + udelay(1); + continue; + } else {/*This line is the start line of branch.*/ + if (!_rtl8723be_check_condition(hw, array_table[i])) { + /* Discard the following + * (offset, data) pairs + */ + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + READ_NEXT_PAIR(v1, v2, i); + } + i -= 2; /* prevent from for-loop += 2*/ + /*Configure matched pairs and + *skip to end of if-else. + */ + } else { + READ_NEXT_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < arraylen - 2) { + rtl_set_bbreg(hw, array_table[i], + MASKDWORD, + array_table[i + 1]); + udelay(1); + READ_NEXT_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylen - 2) + READ_NEXT_PAIR(v1, v2, i); + } + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "The agctab_array_table[0] is " + "%x Rtl818EEPHY_REGArray[1] is %x\n", + array_table[i], array_table[i + 1]); + } + } + return true; +} + +static u8 _rtl8723be_get_rate_section_index(u32 regaddr) +{ + u8 index = 0; + + switch (regaddr) { + case RTXAGC_A_RATE18_06: + case RTXAGC_B_RATE18_06: + index = 0; + break; + case RTXAGC_A_RATE54_24: + case RTXAGC_B_RATE54_24: + index = 1; + break; + case RTXAGC_A_CCK1_MCS32: + case RTXAGC_B_CCK1_55_MCS32: + index = 2; + break; + case RTXAGC_B_CCK11_A_CCK2_11: + index = 3; + break; + case RTXAGC_A_MCS03_MCS00: + case RTXAGC_B_MCS03_MCS00: + index = 4; + break; + case RTXAGC_A_MCS07_MCS04: + case RTXAGC_B_MCS07_MCS04: + index = 5; + break; + case RTXAGC_A_MCS11_MCS08: + case RTXAGC_B_MCS11_MCS08: + index = 6; + break; + case RTXAGC_A_MCS15_MCS12: + case RTXAGC_B_MCS15_MCS12: + index = 7; + break; + default: + regaddr &= 0xFFF; + if (regaddr >= 0xC20 && regaddr <= 0xC4C) + index = (u8) ((regaddr - 0xC20) / 4); + else if (regaddr >= 0xE20 && regaddr <= 0xE4C) + index = (u8) ((regaddr - 0xE20) / 4); + break; + }; + return index; +} + +u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, readback_value, bitshift; + unsigned long flags; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n", + regaddr, rfpath, bitmask); + + spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); + + original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr); + bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + readback_value = (original_value & bitmask) >> bitshift; + + spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), rfpath(%#x), " + "bitmask(%#x), original_value(%#x)\n", + regaddr, rfpath, bitmask, original_value); + + return readback_value; +} + +void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, bitshift; + unsigned long flags; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, path); + + spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); + + if (bitmask != RFREG_OFFSET_MASK) { + original_value = rtl8723_phy_rf_serial_read(hw, path, + regaddr); + bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + data = ((original_value & (~bitmask)) | + (data << bitshift)); + } + + rtl8723_phy_rf_serial_write(hw, path, regaddr, data); + + spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, path); +} + +bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool rtstatus = _rtl8723be_phy_config_mac_with_headerfile(hw); + + rtl_write_byte(rtlpriv, 0x04CA, 0x0B); + return rtstatus; +} + +bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw) +{ + bool rtstatus = true; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 regval; + u8 reg_hwparafile = 1; + u32 tmp; + u8 crystalcap = rtlpriv->efuse.crystalcap; + rtl8723_phy_init_bb_rf_reg_def(hw); + regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, + regval | BIT(13) | BIT(0) | BIT(1)); + + rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, + FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE | + FEN_BB_GLB_RSTN | FEN_BBRSTB); + tmp = rtl_read_dword(rtlpriv, 0x4c); + rtl_write_dword(rtlpriv, 0x4c, tmp | BIT(23)); + + rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); + + if (reg_hwparafile == 1) + rtstatus = _rtl8723be_phy_bb8723b_config_parafile(hw); + + crystalcap = crystalcap & 0x3F; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, + (crystalcap | crystalcap << 6)); + + return rtstatus; +} + +bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw) +{ + return rtl8723be_phy_rf6052_config(hw); +} + +static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr, + u32 data, enum radio_path rfpath, + u32 regaddr) +{ + if (addr == 0xfe || addr == 0xffe) { + mdelay(50); + } else { + rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data); + udelay(1); + } +} + +static void _rtl8723be_config_rf_radio_a(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + u32 content = 0x1000; /*RF Content: radio_a_txt*/ + u32 maskforphyset = (u32)(content & 0xE000); + + _rtl8723be_config_rf_reg(hw, addr, data, RF90_PATH_A, + addr | maskforphyset); +} + +static void _rtl8723be_phy_init_tx_power_by_rate(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + u8 band, path, txnum, section; + + for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band) + for (path = 0; path < TX_PWR_BY_RATE_NUM_RF; ++path) + for (txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum) + for (section = 0; + section < TX_PWR_BY_RATE_NUM_SECTION; + ++section) + rtlphy->tx_power_by_rate_offset[band] + [path][txnum][section] = 0; +} + +static void _rtl8723be_config_bb_reg(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + if (addr == 0xfe) { + mdelay(50); + } else if (addr == 0xfd) { + mdelay(5); + } else if (addr == 0xfc) { + mdelay(1); + } else if (addr == 0xfb) { + udelay(50); + } else if (addr == 0xfa) { + udelay(5); + } else if (addr == 0xf9) { + udelay(1); + } else { + rtl_set_bbreg(hw, addr, MASKDWORD, data); + udelay(1); + } +} + +static void phy_set_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, + u8 path, u8 rate_section, + u8 txnum, u8 value) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (path > RF90_PATH_D) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", + path); + return; + } + + if (band == BAND_ON_2_4G) { + switch (rate_section) { + case CCK: + rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value; + break; + case OFDM: + rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value; + break; + case HT_MCS0_MCS7: + rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value; + break; + case HT_MCS8_MCS15: + rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value; + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid RateSection %d in Band 2.4G, Rf Path" + " %d, %dTx in PHY_SetTxPowerByRateBase()\n", + rate_section, path, txnum); + break; + }; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Band %d in PHY_SetTxPowerByRateBase()\n", + band); + } +} + +static u8 phy_get_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path, + u8 txnum, u8 rate_section) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 value = 0; + if (path > RF90_PATH_D) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n", + path); + return 0; + } + + if (band == BAND_ON_2_4G) { + switch (rate_section) { + case CCK: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0]; + break; + case OFDM: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1]; + break; + case HT_MCS0_MCS7: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2]; + break; + case HT_MCS8_MCS15: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3]; + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid RateSection %d in Band 2.4G, Rf Path" + " %d, %dTx in PHY_GetTxPowerByRateBase()\n", + rate_section, path, txnum); + break; + }; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", + band); + } + + return value; +} + +static void _rtl8723be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u16 raw_value = 0; + u8 base = 0, path = 0; + + for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) { + if (path == RF90_PATH_A) { + raw_value = (u16) (rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_1TX][3] >> 24) & 0xFF; + base = (raw_value >> 4) * 10 + (raw_value & 0xF); + phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, CCK, + RF_1TX, base); + } else if (path == RF90_PATH_B) { + raw_value = (u16) (rtlphy->tx_power_by_rate_offset + [BAND_ON_2_4G][path][RF_1TX][3] >> 0) & 0xFF; + base = (raw_value >> 4) * 10 + (raw_value & 0xF); + phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, + CCK, RF_1TX, base); + } + raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [path][RF_1TX][1] >> 24) & 0xFF; + base = (raw_value >> 4) * 10 + (raw_value & 0xF); + phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, + base); + + raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [path][RF_1TX][5] >> 24) & 0xFF; + base = (raw_value >> 4) * 10 + (raw_value & 0xF); + phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, + RF_1TX, base); + + raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [path][RF_2TX][7] >> 24) & 0xFF; + base = (raw_value >> 4) * 10 + (raw_value & 0xF); + phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, + HT_MCS8_MCS15, RF_2TX, base); + } +} + +static void phy_conv_dbm_to_rel(u32 *data, u8 start, u8 end, u8 base_val) +{ + char i = 0; + u8 temp_value = 0; + u32 temp_data = 0; + + for (i = 3; i >= 0; --i) { + if (i >= start && i <= end) { + /* Get the exact value */ + temp_value = (u8) (*data >> (i * 8)) & 0xF; + temp_value += ((u8) ((*data >> (i*8 + 4)) & 0xF)) * 10; + + /* Change the value to a relative value */ + temp_value = (temp_value > base_val) ? + temp_value - base_val : + base_val - temp_value; + } else { + temp_value = (u8) (*data >> (i * 8)) & 0xFF; + } + temp_data <<= 8; + temp_data |= temp_value; + } + *data = temp_data; +} + +static void conv_dbm_to_rel(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 base = 0, rfpath = RF90_PATH_A; + + base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath, + RF_1TX, CCK); + phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [rfpath][RF_1TX][2]), 1, 1, base); + phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [rfpath][RF_1TX][3]), 1, 3, base); + + base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath, + RF_1TX, OFDM); + phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [rfpath][RF_1TX][0]), 0, 3, base); + phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [rfpath][RF_1TX][1]), 0, 3, base); + + base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath, + RF_1TX, HT_MCS0_MCS7); + phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [rfpath][RF_1TX][4]), 0, 3, base); + phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [rfpath][RF_1TX][5]), 0, 3, base); + + base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath, + RF_2TX, HT_MCS8_MCS15); + phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [rfpath][RF_2TX][6]), 0, 3, base); + + phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G] + [rfpath][RF_2TX][7]), 0, 3, base); + + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + "<=== conv_dbm_to_rel()\n"); +} + +static void _rtl8723be_phy_txpower_by_rate_configuration( + struct ieee80211_hw *hw) +{ + _rtl8723be_phy_store_txpower_by_rate_base(hw); + conv_dbm_to_rel(hw); +} + +static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + bool rtstatus; + + rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_PHY_REG); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + return false; + } + _rtl8723be_phy_init_tx_power_by_rate(hw); + if (!rtlefuse->autoload_failflag) { + rtlphy->pwrgroup_cnt = 0; + rtstatus = _rtl8723be_phy_config_bb_with_pgheaderfile(hw, + BASEBAND_CONFIG_PHY_REG); + } + _rtl8723be_phy_txpower_by_rate_configuration(hw); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + return false; + } + rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_AGC_TAB); + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n"); + return false; + } + rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, + 0x200)); + return true; +} + +static void _rtl8723be_store_tx_power_by_rate(struct ieee80211_hw *hw, + u32 band, u32 rfpath, + u32 txnum, u32 regaddr, + u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 rate_section = _rtl8723be_get_rate_section_index(regaddr); + + if (band != BAND_ON_2_4G && band != BAND_ON_5G) + RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR, + "Invalid Band %d\n", band); + + if (rfpath > MAX_RF_PATH) + RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR, + "Invalid RfPath %d\n", rfpath); + + if (txnum > MAX_RF_PATH) + RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR, + "Invalid TxNum %d\n", txnum); + + rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = + data; +} + +static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i; + u32 *phy_regarray_table_pg; + u16 phy_regarray_pg_len; + u32 v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0; + + phy_regarray_pg_len = RTL8723BEPHY_REG_ARRAY_PGLEN; + phy_regarray_table_pg = RTL8723BEPHY_REG_ARRAY_PG; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_regarray_pg_len; i = i + 6) { + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + v4 = phy_regarray_table_pg[i+3]; + v5 = phy_regarray_table_pg[i+4]; + v6 = phy_regarray_table_pg[i+5]; + + if (v1 < 0xcdcdcdcd) { + if (phy_regarray_table_pg[i] == 0xfe || + phy_regarray_table_pg[i] == 0xffe) + mdelay(50); + else + _rtl8723be_store_tx_power_by_rate(hw, + v1, v2, v3, v4, v5, v6); + continue; + } else { + /*don't need the hw_body*/ + if (!_rtl8723be_check_condition(hw, + phy_regarray_table_pg[i])) { + i += 2; /* skip the pair of expression*/ + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + while (v2 != 0xDEAD) { + i += 3; + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + } + } + } + } + } else { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "configtype != BaseBand_Config_PHY_REG\n"); + } + return true; +} + +bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath) +{ + #define READ_NEXT_RF_PAIR(v1, v2, i) \ + do { \ + i += 2; \ + v1 = radioa_array_table[i]; \ + v2 = radioa_array_table[i+1]; \ + } while (0) + + int i; + bool rtstatus = true; + u32 *radioa_array_table; + u16 radioa_arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 v1 = 0, v2 = 0; + + radioa_arraylen = RTL8723BE_RADIOA_1TARRAYLEN; + radioa_array_table = RTL8723BE_RADIOA_1TARRAY; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Radio_A:RTL8723BE_RADIOA_1TARRAY %d\n", radioa_arraylen); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); + rtstatus = true; + switch (rfpath) { + case RF90_PATH_A: + for (i = 0; i < radioa_arraylen; i = i + 2) { + v1 = radioa_array_table[i]; + v2 = radioa_array_table[i+1]; + if (v1 < 0xcdcdcdcd) { + _rtl8723be_config_rf_radio_a(hw, v1, v2); + } else { /*This line is the start line of branch.*/ + if (!_rtl8723be_check_condition(hw, + radioa_array_table[i])) { + /* Discard the following + * (offset, data) pairs + */ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < radioa_arraylen - 2) + READ_NEXT_RF_PAIR(v1, v2, i); + i -= 2; /* prevent from for-loop += 2*/ + } else { + /* Configure matched pairs + * and skip to end of if-else. + */ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && + i < radioa_arraylen - 2) { + _rtl8723be_config_rf_radio_a(hw, + v1, v2); + READ_NEXT_RF_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && + i < radioa_arraylen - 2) { + READ_NEXT_RF_PAIR(v1, v2, i); + } + } + } + } + + if (rtlhal->oem_id == RT_CID_819X_HP) + _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD); + + break; + case RF90_PATH_B: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + case RF90_PATH_C: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + case RF90_PATH_D: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + return true; +} + +void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + rtlphy->default_initialgain[0] = + (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[1] = + (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[2] = + (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[3] = + (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Default initial gain (c50 = 0x%x, " + "c58 = 0x%x, c60 = 0x%x, c68 = 0x%x\n", + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3]); + + rtlphy->framesync = (u8) rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, + MASKBYTE0); + rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, + MASKDWORD); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Default framesync (0x%x) = 0x%x\n", + ROFDM0_RXDETECTOR3, rtlphy->framesync); +} + +void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 txpwr_level; + long txpwr_dbm; + + txpwr_level = rtlphy->cur_cck_txpwridx; + txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, + txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > + txpwr_dbm) + txpwr_dbm = + rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, + txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level); + *powerlevel = txpwr_dbm; +} + +static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path, + u8 rate) +{ + u8 rate_section = 0; + + switch (rate) { + case DESC92C_RATE1M: + rate_section = 2; + break; + case DESC92C_RATE2M: + case DESC92C_RATE5_5M: + if (path == RF90_PATH_A) + rate_section = 3; + else if (path == RF90_PATH_B) + rate_section = 2; + break; + case DESC92C_RATE11M: + rate_section = 3; + break; + case DESC92C_RATE6M: + case DESC92C_RATE9M: + case DESC92C_RATE12M: + case DESC92C_RATE18M: + rate_section = 0; + break; + case DESC92C_RATE24M: + case DESC92C_RATE36M: + case DESC92C_RATE48M: + case DESC92C_RATE54M: + rate_section = 1; + break; + case DESC92C_RATEMCS0: + case DESC92C_RATEMCS1: + case DESC92C_RATEMCS2: + case DESC92C_RATEMCS3: + rate_section = 4; + break; + case DESC92C_RATEMCS4: + case DESC92C_RATEMCS5: + case DESC92C_RATEMCS6: + case DESC92C_RATEMCS7: + rate_section = 5; + break; + case DESC92C_RATEMCS8: + case DESC92C_RATEMCS9: + case DESC92C_RATEMCS10: + case DESC92C_RATEMCS11: + rate_section = 6; + break; + case DESC92C_RATEMCS12: + case DESC92C_RATEMCS13: + case DESC92C_RATEMCS14: + case DESC92C_RATEMCS15: + rate_section = 7; + break; + default: + RT_ASSERT(true, "Rate_Section is Illegal\n"); + break; + } + return rate_section; +} + +static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw, + enum band_type band, + enum radio_path rfpath, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 shift = 0, rate_section, tx_num; + char tx_pwr_diff = 0; + + rate_section = _rtl8723be_phy_get_ratesection_intxpower_byrate(rfpath, + rate); + tx_num = RF_TX_NUM_NONIMPLEMENT; + + if (tx_num == RF_TX_NUM_NONIMPLEMENT) { + if (rate >= DESC92C_RATEMCS8 && rate <= DESC92C_RATEMCS15) + tx_num = RF_2TX; + else + tx_num = RF_1TX; + } + + switch (rate) { + case DESC92C_RATE6M: + case DESC92C_RATE24M: + case DESC92C_RATEMCS0: + case DESC92C_RATEMCS4: + case DESC92C_RATEMCS8: + case DESC92C_RATEMCS12: + shift = 0; + break; + case DESC92C_RATE1M: + case DESC92C_RATE2M: + case DESC92C_RATE9M: + case DESC92C_RATE36M: + case DESC92C_RATEMCS1: + case DESC92C_RATEMCS5: + case DESC92C_RATEMCS9: + case DESC92C_RATEMCS13: + shift = 8; + break; + case DESC92C_RATE5_5M: + case DESC92C_RATE12M: + case DESC92C_RATE48M: + case DESC92C_RATEMCS2: + case DESC92C_RATEMCS6: + case DESC92C_RATEMCS10: + case DESC92C_RATEMCS14: + shift = 16; + break; + case DESC92C_RATE11M: + case DESC92C_RATE18M: + case DESC92C_RATE54M: + case DESC92C_RATEMCS3: + case DESC92C_RATEMCS7: + case DESC92C_RATEMCS11: + case DESC92C_RATEMCS15: + shift = 24; + break; + default: + RT_ASSERT(true, "Rate_Section is Illegal\n"); + break; + } + tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num] + [rate_section] >> shift) & 0xff; + + return tx_pwr_diff; +} + +static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path, + u8 rate, u8 bandwidth, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 index = (channel - 1); + u8 txpower; + u8 power_diff_byrate = 0; + + if (channel > 14 || channel < 1) { + index = 0; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "Illegal channel!\n"); + } + if (RTL8723E_RX_HAL_IS_CCK_RATE(rate)) + txpower = rtlefuse->txpwrlevel_cck[path][index]; + else if (DESC92C_RATE6M <= rate) + txpower = rtlefuse->txpwrlevel_ht40_1s[path][index]; + else + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + "invalid rate\n"); + + if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M && + !RTL8723E_RX_HAL_IS_CCK_RATE(rate)) + txpower += rtlefuse->txpwr_legacyhtdiff[0][TX_1S]; + + if (bandwidth == HT_CHANNEL_WIDTH_20) { + if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15) + txpower += rtlefuse->txpwr_ht20diff[0][TX_1S]; + if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15) + txpower += rtlefuse->txpwr_ht20diff[0][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) { + if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15) + txpower += rtlefuse->txpwr_ht40diff[0][TX_1S]; + if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15) + txpower += rtlefuse->txpwr_ht40diff[0][TX_2S]; + } + if (rtlefuse->eeprom_regulatory != 2) + power_diff_byrate = _rtl8723be_get_txpower_by_rate(hw, + BAND_ON_2_4G, + path, rate); + + txpower += power_diff_byrate; + + if (txpower > MAX_POWER_INDEX) + txpower = MAX_POWER_INDEX; + + return txpower; +} + +static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw, + u8 power_index, u8 path, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + if (path == RF90_PATH_A) { + switch (rate) { + case DESC92C_RATE1M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_CCK1_MCS32, + MASKBYTE1, power_index); + break; + case DESC92C_RATE2M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11, + MASKBYTE1, power_index); + break; + case DESC92C_RATE5_5M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11, + MASKBYTE2, power_index); + break; + case DESC92C_RATE11M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11, + MASKBYTE3, power_index); + break; + case DESC92C_RATE6M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06, + MASKBYTE0, power_index); + break; + case DESC92C_RATE9M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06, + MASKBYTE1, power_index); + break; + case DESC92C_RATE12M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06, + MASKBYTE2, power_index); + break; + case DESC92C_RATE18M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06, + MASKBYTE3, power_index); + break; + case DESC92C_RATE24M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24, + MASKBYTE0, power_index); + break; + case DESC92C_RATE36M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24, + MASKBYTE1, power_index); + break; + case DESC92C_RATE48M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24, + MASKBYTE2, power_index); + break; + case DESC92C_RATE54M: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24, + MASKBYTE3, power_index); + break; + case DESC92C_RATEMCS0: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00, + MASKBYTE0, power_index); + break; + case DESC92C_RATEMCS1: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00, + MASKBYTE1, power_index); + break; + case DESC92C_RATEMCS2: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00, + MASKBYTE2, power_index); + break; + case DESC92C_RATEMCS3: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00, + MASKBYTE3, power_index); + break; + case DESC92C_RATEMCS4: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04, + MASKBYTE0, power_index); + break; + case DESC92C_RATEMCS5: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04, + MASKBYTE1, power_index); + break; + case DESC92C_RATEMCS6: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04, + MASKBYTE2, power_index); + break; + case DESC92C_RATEMCS7: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04, + MASKBYTE3, power_index); + break; + case DESC92C_RATEMCS8: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08, + MASKBYTE0, power_index); + break; + case DESC92C_RATEMCS9: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08, + MASKBYTE1, power_index); + break; + case DESC92C_RATEMCS10: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08, + MASKBYTE2, power_index); + break; + case DESC92C_RATEMCS11: + rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08, + MASKBYTE3, power_index); + break; + default: + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + "Invalid Rate!!\n"); + break; + } + } else { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n"); + } +} + +void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 cck_rates[] = {DESC92C_RATE1M, DESC92C_RATE2M, + DESC92C_RATE5_5M, DESC92C_RATE11M}; + u8 ofdm_rates[] = {DESC92C_RATE6M, DESC92C_RATE9M, + DESC92C_RATE12M, DESC92C_RATE18M, + DESC92C_RATE24M, DESC92C_RATE36M, + DESC92C_RATE48M, DESC92C_RATE54M}; + u8 ht_rates_1t[] = {DESC92C_RATEMCS0, DESC92C_RATEMCS1, + DESC92C_RATEMCS2, DESC92C_RATEMCS3, + DESC92C_RATEMCS4, DESC92C_RATEMCS5, + DESC92C_RATEMCS6, DESC92C_RATEMCS7}; + u8 i, size; + u8 power_index; + + if (!rtlefuse->txpwr_fromeprom) + return; + + size = sizeof(cck_rates) / sizeof(u8); + for (i = 0; i < size; i++) { + power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A, + cck_rates[i], + rtl_priv(hw)->phy.current_chan_bw, + channel); + _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A, + cck_rates[i]); + } + size = sizeof(ofdm_rates) / sizeof(u8); + for (i = 0; i < size; i++) { + power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A, + ofdm_rates[i], + rtl_priv(hw)->phy.current_chan_bw, + channel); + _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A, + ofdm_rates[i]); + } + size = sizeof(ht_rates_1t) / sizeof(u8); + for (i = 0; i < size; i++) { + power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A, + ht_rates_1t[i], + rtl_priv(hw)->phy.current_chan_bw, + channel); + _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A, + ht_rates_1t[i]); + } +} + +void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum io_type iotype; + + if (!is_hal_stop(rtlhal)) { + switch (operation) { + case SCAN_OPT_BACKUP: + iotype = IO_CMD_PAUSE_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD, + (u8 *)&iotype); + break; + case SCAN_OPT_RESTORE: + iotype = IO_CMD_RESUME_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD, + (u8 *)&iotype); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Unknown Scan Backup operation.\n"); + break; + } + } +} + +void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 reg_bw_opmode; + u8 reg_prsr_rsc; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + "Switch to %s bandwidth\n", + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : "40MHz"); + + if (is_hal_stop(rtlhal)) { + rtlphy->set_bwmode_inprogress = false; + return; + } + + reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE); + reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2); + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + reg_bw_opmode |= BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + break; + case HT_CHANNEL_WIDTH_20_40: + reg_bw_opmode &= ~BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + reg_prsr_rsc = (reg_prsr_rsc & 0x90) | + (mac->cur_40_prime_sc << 5); + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + break; + } + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND, + (mac->cur_40_prime_sc >> 1)); + rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc); + rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)), + (mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "unknown bandwidth: %#X\n", rtlphy->current_chan_bw); + break; + } + rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); + rtlphy->set_bwmode_inprogress = false; + RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n"); +} + +void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_bw = rtlphy->current_chan_bw; + + if (rtlphy->set_bwmode_inprogress) + return; + rtlphy->set_bwmode_inprogress = true; + if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { + rtl8723be_phy_set_bw_mode_callback(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "false driver sleep or unload\n"); + rtlphy->set_bwmode_inprogress = false; + rtlphy->current_chan_bw = tmp_bw; + } +} + +void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 delay; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + "switch to channel%d\n", rtlphy->current_channel); + if (is_hal_stop(rtlhal)) + return; + do { + if (!rtlphy->sw_chnl_inprogress) + break; + if (!rtl8723be_phy_sw_chn_step_by_step(hw, + rtlphy->current_channel, + &rtlphy->sw_chnl_stage, + &rtlphy->sw_chnl_step, + &delay)) { + if (delay > 0) + mdelay(delay); + else + continue; + } else { + rtlphy->sw_chnl_inprogress = false; + } + break; + } while (true); + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n"); +} + +u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (rtlphy->sw_chnl_inprogress) + return 0; + if (rtlphy->set_bwmode_inprogress) + return 0; + RT_ASSERT((rtlphy->current_channel <= 14), + "WIRELESS_MODE_G but channel>14"); + rtlphy->sw_chnl_inprogress = true; + rtlphy->sw_chnl_stage = 0; + rtlphy->sw_chnl_step = 0; + if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { + rtl8723be_phy_sw_chnl_callback(hw); + RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, + "sw_chnl_inprogress false schdule " + "workitem current channel %d\n", + rtlphy->current_channel); + rtlphy->sw_chnl_inprogress = false; + } else { + RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, + "sw_chnl_inprogress false driver sleep or" + " unload\n"); + rtlphy->sw_chnl_inprogress = false; + } + return 1; +} + +static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, + u8 *step, u32 *delay) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct swchnlcmd precommoncmd[MAX_PRECMD_CNT]; + u32 precommoncmdcnt; + struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT]; + u32 postcommoncmdcnt; + struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT]; + u32 rfdependcmdcnt; + struct swchnlcmd *currentcmd = NULL; + u8 rfpath; + u8 num_total_rfpath = rtlphy->num_total_rfpath; + + precommoncmdcnt = 0; + rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, + CMDID_SET_TXPOWEROWER_LEVEL, + 0, 0, 0); + rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); + postcommoncmdcnt = 0; + rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, + MAX_POSTCMD_CNT, CMDID_END, + 0, 0, 0); + rfdependcmdcnt = 0; + + RT_ASSERT((channel >= 1 && channel <= 14), + "illegal channel for Zebra: %d\n", channel); + + rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, + CMDID_RF_WRITEREG, + RF_CHNLBW, channel, 10); + + rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, + CMDID_END, 0, 0, 0); + + do { + switch (*stage) { + case 0: + currentcmd = &precommoncmd[*step]; + break; + case 1: + currentcmd = &rfdependcmd[*step]; + break; + case 2: + currentcmd = &postcommoncmd[*step]; + break; + } + + if (currentcmd->cmdid == CMDID_END) { + if ((*stage) == 2) { + return true; + } else { + (*stage)++; + (*step) = 0; + continue; + } + } + + switch (currentcmd->cmdid) { + case CMDID_SET_TXPOWEROWER_LEVEL: + rtl8723be_phy_set_txpower_level(hw, channel); + break; + case CMDID_WRITEPORT_ULONG: + rtl_write_dword(rtlpriv, currentcmd->para1, + currentcmd->para2); + break; + case CMDID_WRITEPORT_USHORT: + rtl_write_word(rtlpriv, currentcmd->para1, + (u16) currentcmd->para2); + break; + case CMDID_WRITEPORT_UCHAR: + rtl_write_byte(rtlpriv, currentcmd->para1, + (u8) currentcmd->para2); + break; + case CMDID_RF_WRITEREG: + for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { + rtlphy->rfreg_chnlval[rfpath] = + ((rtlphy->rfreg_chnlval[rfpath] & + 0xfffffc00) | currentcmd->para2); + + rtl_set_rfreg(hw, (enum radio_path)rfpath, + currentcmd->para1, + RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[rfpath]); + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + + break; + } while (true); + + (*delay) = currentcmd->msdelay; + (*step)++; + return false; +} + +static u8 _rtl8723be_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) +{ + u32 reg_eac, reg_e94, reg_e9c, reg_ea4; + u8 result = 0x00; + + rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c); + rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x30008c1c); + rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x8214032a); + rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160000); + + rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911); + rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD); + reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD); + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + return result; +} + +static bool phy_similarity_cmp(struct ieee80211_hw *hw, long result[][8], + u8 c1, u8 c2) +{ + u32 i, j, diff, simularity_bitmap, bound; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u8 final_candidate[2] = { 0xFF, 0xFF }; + bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version); + + if (is2t) + bound = 8; + else + bound = 4; + + simularity_bitmap = 0; + + for (i = 0; i < bound; i++) { + diff = (result[c1][i] > result[c2][i]) ? + (result[c1][i] - result[c2][i]) : + (result[c2][i] - result[c1][i]); + + if (diff > MAX_TOLERANCE) { + if ((i == 2 || i == 6) && !simularity_bitmap) { + if (result[c1][i] + result[c1][i + 1] == 0) + final_candidate[(i / 4)] = c2; + else if (result[c2][i] + result[c2][i + 1] == 0) + final_candidate[(i / 4)] = c1; + else + simularity_bitmap |= (1 << i); + } else { + simularity_bitmap |= (1 << i); + } + } + } + + if (simularity_bitmap == 0) { + for (i = 0; i < (bound / 4); i++) { + if (final_candidate[i] != 0xFF) { + for (j = i * 4; j < (i + 1) * 4 - 2; j++) + result[3][j] = + result[final_candidate[i]][j]; + bresult = false; + } + } + return bresult; + } else if (!(simularity_bitmap & 0x0F)) { + for (i = 0; i < 4; i++) + result[3][i] = result[c1][i]; + return false; + } else if (!(simularity_bitmap & 0xF0) && is2t) { + for (i = 4; i < 8; i++) + result[3][i] = result[c1][i]; + return false; + } else { + return false; + } +} + +static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, + long result[][8], u8 t, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 i; + u8 patha_ok; + u32 adda_reg[IQK_ADDA_REG_NUM] = { + 0x85c, 0xe6c, 0xe70, 0xe74, + 0xe78, 0xe7c, 0xe80, 0xe84, + 0xe88, 0xe8c, 0xed0, 0xed4, + 0xed8, 0xedc, 0xee0, 0xeec + }; + + u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { + 0x522, 0x550, 0x551, 0x040 + }; + u32 iqk_bb_reg[IQK_BB_REG_NUM] = { + ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR, + RFPGA0_XCD_RFINTERFACESW, 0xb68, 0xb6c, + 0x870, 0x860, + 0x864, 0x800 + }; + const u32 retrycount = 2; + u32 path_sel_bb, path_sel_rf; + u8 tmp_reg_c50, tmp_reg_c58; + + tmp_reg_c50 = rtl_get_bbreg(hw, 0xc50, MASKBYTE0); + tmp_reg_c58 = rtl_get_bbreg(hw, 0xc58, MASKBYTE0); + + if (t == 0) { + rtl8723_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, 16); + rtl8723_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + rtl8723_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + } + rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t); + if (t == 0) { + rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + } + if (!rtlphy->rfpi_enable) + rtl8723_phy_pi_mode_switch(hw, true); + + path_sel_bb = rtl_get_bbreg(hw, 0x948, MASKDWORD); + path_sel_rf = rtl_get_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff); + + /*BB Setting*/ + rtl_set_bbreg(hw, 0x800, BIT(24), 0x00); + rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600); + rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4); + rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000); + + rtl_set_bbreg(hw, 0x870, BIT(10), 0x01); + rtl_set_bbreg(hw, 0x870, BIT(26), 0x01); + rtl_set_bbreg(hw, 0x860, BIT(10), 0x00); + rtl_set_bbreg(hw, 0x864, BIT(10), 0x00); + + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASKDWORD, 0x10000); + rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000); + + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); + rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x81004800); + for (i = 0; i < retrycount; i++) { + patha_ok = _rtl8723be_phy_path_a_iqk(hw, is2t); + if (patha_ok == 0x01) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Path A Tx IQK Success!!\n"); + result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) & + 0x3FF0000) >> 16; + result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & + 0x3FF0000) >> 16; + break; + } + } + + if (0 == patha_ok) + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Path A IQK Success!!\n"); + if (is2t) { + rtl8723_phy_path_a_standby(hw); + rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t); + } + + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0); + + if (t != 0) { + if (!rtlphy->rfpi_enable) + rtl8723_phy_pi_mode_switch(hw, false); + rtl8723_phy_reload_adda_registers(hw, adda_reg, + rtlphy->adda_backup, 16); + rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + + rtl_set_bbreg(hw, 0x948, MASKDWORD, path_sel_bb); + rtl_set_rfreg(hw, RF90_PATH_B, 0xb0, 0xfffff, path_sel_rf); + + rtl_set_bbreg(hw, 0xc50, MASKBYTE0, 0x50); + rtl_set_bbreg(hw, 0xc50, MASKBYTE0, tmp_reg_c50); + if (is2t) { + rtl_set_bbreg(hw, 0xc58, MASKBYTE0, 0x50); + rtl_set_bbreg(hw, 0xc58, MASKBYTE0, tmp_reg_c58); + } + rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00); + rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00); + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n"); +} + +static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmpreg; + u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; + + tmpreg = rtl_read_byte(rtlpriv, 0xd03); + + if ((tmpreg & 0x70) != 0) + rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F); + else + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + + if ((tmpreg & 0x70) != 0) { + rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS); + + if (is2t) + rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00, + MASK12BITS); + + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, + (rf_a_mode & 0x8FFFF) | 0x10000); + + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, + (rf_b_mode & 0x8FFFF) | 0x10000); + } + lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS); + + rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0); + rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a); + + mdelay(100); + + rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0); + + if ((tmpreg & 0x70) != 0) { + rtl_write_byte(rtlpriv, 0xd03, tmpreg); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode); + + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, + MASK12BITS, rf_b_mode); + } else { + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); +} + +static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, + bool bmain, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n"); + + if (is_hal_stop(rtlhal)) { + u8 u1btmp; + u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0); + rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7)); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); + } + if (is2t) { + if (bmain) + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(6), 0x1); + else + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(6), 0x2); + } else { + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0); + rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201); + + /* We use the RF definition of MAIN and AUX, + * left antenna and right antenna repectively. + * Default output at AUX. + */ + if (bmain) { + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, + BIT(14) | BIT(13) | BIT(12), 0); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(4) | BIT(3), 0); + if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) + rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 0); + } else { + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, + BIT(14) | BIT(13) | BIT(12), 1); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(4) | BIT(3), 1); + if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) + rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 1); + } + } +} + +#undef IQK_ADDA_REG_NUM +#undef IQK_DELAY_TIME + +void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + long result[4][8]; + u8 i, final_candidate; + bool patha_ok, pathb_ok; + long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, + reg_ecc, reg_tmp = 0; + bool is12simular, is13simular, is23simular; + u32 iqk_bb_reg[9] = { + ROFDM0_XARXIQIMBALANCE, + ROFDM0_XBRXIQIMBALANCE, + ROFDM0_ECCATHRESHOLD, + ROFDM0_AGCRSSITABLE, + ROFDM0_XATXIQIMBALANCE, + ROFDM0_XBTXIQIMBALANCE, + ROFDM0_XCTXAFE, + ROFDM0_XDTXAFE, + ROFDM0_RXIQEXTANTA + }; + + if (recovery) { + rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, 9); + return; + } + + for (i = 0; i < 8; i++) { + result[0][i] = 0; + result[1][i] = 0; + result[2][i] = 0; + result[3][i] = 0; + } + final_candidate = 0xff; + patha_ok = false; + pathb_ok = false; + is12simular = false; + is23simular = false; + is13simular = false; + for (i = 0; i < 3; i++) { + if (get_rf_type(rtlphy) == RF_2T2R) + _rtl8723be_phy_iq_calibrate(hw, result, i, true); + else + _rtl8723be_phy_iq_calibrate(hw, result, i, false); + if (i == 1) { + is12simular = phy_similarity_cmp(hw, result, 0, 1); + if (is12simular) { + final_candidate = 0; + break; + } + } + if (i == 2) { + is13simular = phy_similarity_cmp(hw, result, 0, 2); + if (is13simular) { + final_candidate = 0; + break; + } + is23simular = phy_similarity_cmp(hw, result, 1, 2); + if (is23simular) { + final_candidate = 1; + } else { + for (i = 0; i < 8; i++) + reg_tmp += result[3][i]; + + if (reg_tmp != 0) + final_candidate = 3; + else + final_candidate = 0xFF; + } + } + } + for (i = 0; i < 4; i++) { + reg_e94 = result[i][0]; + reg_e9c = result[i][1]; + reg_ea4 = result[i][2]; + reg_eac = result[i][3]; + reg_eb4 = result[i][4]; + reg_ebc = result[i][5]; + reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; + } + if (final_candidate != 0xff) { + reg_e94 = result[final_candidate][0]; + rtlphy->reg_e94 = reg_e94; + reg_e9c = result[final_candidate][1]; + rtlphy->reg_e9c = reg_e9c; + reg_ea4 = result[final_candidate][2]; + reg_eac = result[final_candidate][3]; + reg_eb4 = result[final_candidate][4]; + rtlphy->reg_eb4 = reg_eb4; + reg_ebc = result[final_candidate][5]; + rtlphy->reg_ebc = reg_ebc; + reg_ec4 = result[final_candidate][6]; + reg_ecc = result[final_candidate][7]; + patha_ok = true; + pathb_ok = true; + } else { + rtlphy->reg_e94 = 0x100; + rtlphy->reg_eb4 = 0x100; + rtlphy->reg_e9c = 0x0; + rtlphy->reg_ebc = 0x0; + } + if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ + rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result, + final_candidate, + (reg_ea4 == 0)); + if (final_candidate != 0xFF) { + for (i = 0; i < IQK_MATRIX_REG_NUM; i++) + rtlphy->iqk_matrix[0].value[0][i] = + result[final_candidate][i]; + rtlphy->iqk_matrix[0].iqk_done = true; + } + rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9); +} + +void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); + u32 timeout = 2000, timecount = 0; + + while (rtlpriv->mac80211.act_scanning && timecount < timeout) { + udelay(50); + timecount += 50; + } + + rtlphy->lck_inprogress = true; + RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + "LCK:Start!!! currentband %x delay %d ms\n", + rtlhal->current_bandtype, timecount); + + _rtl8723be_phy_lc_calibrate(hw, false); + + rtlphy->lck_inprogress = false; +} + +void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (rtlphy->apk_done) + return; + + return; +} + +void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) +{ + _rtl8723be_phy_set_rfpath_switch(hw, bmain, false); +} + +static void rtl8723be_phy_set_io(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "--->Cmd(%#x), set_io_inprogress(%d)\n", + rtlphy->current_io_type, rtlphy->set_io_inprogress); + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + rtlpriv->dm_digtable.cur_igvalue = + rtlphy->initgain_backup.xaagccore1; + /*rtl92c_dm_write_dig(hw);*/ + rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel); + rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83); + break; + case IO_CMD_PAUSE_DM_BY_SCAN: + rtlphy->initgain_backup.xaagccore1 = + rtlpriv->dm_digtable.cur_igvalue; + rtlpriv->dm_digtable.cur_igvalue = 0x17; + rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + rtlphy->set_io_inprogress = false; + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "(%#x)\n", rtlphy->current_io_type); +} + +bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + bool postprocessing = false; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "-->IO Cmd(%#x), set_io_inprogress(%d)\n", + iotype, rtlphy->set_io_inprogress); + do { + switch (iotype) { + case IO_CMD_RESUME_DM_BY_SCAN: + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "[IO CMD] Resume DM after scan.\n"); + postprocessing = true; + break; + case IO_CMD_PAUSE_DM_BY_SCAN: + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + "[IO CMD] Pause DM before scan.\n"); + postprocessing = true; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + break; + } + } while (false); + if (postprocessing && !rtlphy->set_io_inprogress) { + rtlphy->set_io_inprogress = true; + rtlphy->current_io_type = iotype; + } else { + return false; + } + rtl8723be_phy_set_io(hw); + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype); + return true; +} + +static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); +} + +static void _rtl8723be_phy_set_rf_sleep(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); +} + +static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool bresult = true; + u8 i, queue_id; + struct rtl8192_tx_ring *ring = NULL; + + switch (rfpwr_state) { + case ERFON: + if ((ppsc->rfpwr_state == ERFOFF) && + RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { + bool rtstatus; + u32 initialize_count = 0; + do { + initialize_count++; + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "IPS Set eRf nic enable\n"); + rtstatus = rtl_ps_enable_nic(hw); + } while (!rtstatus && (initialize_count < 10)); + RT_CLEAR_PS_LEVEL(ppsc, + RT_RF_OFF_LEVL_HALT_NIC); + } else { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "Set ERFON sleeped:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc->last_sleep_jiffies)); + ppsc->last_awake_jiffies = jiffies; + rtl8723be_phy_set_rf_on(hw); + } + if (mac->link_state == MAC80211_LINKED) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK); + else + rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK); + break; + case ERFOFF: + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "eRf Off/Sleep: %d times " + "TcbBusyQueue[%d] =%d before " + "doze!\n", (i + 1), queue_id, + skb_queue_len(&ring->queue)); + + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "\n ERFSLEEP: %d times " + "TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue)); + break; + } + } + + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "IPS Set eRf nic disable\n"); + rtl_ps_disable_nic(hw); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + } else { + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_POWER_OFF); + } + } + break; + case ERFSLEEP: + if (ppsc->rfpwr_state == ERFOFF) + break; + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "eRf Off/Sleep: %d times " + "TcbBusyQueue[%d] =%d before " + "doze!\n", (i + 1), queue_id, + skb_queue_len(&ring->queue)); + + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "\n ERFSLEEP: %d times " + "TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue)); + break; + } + } + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + "Set ERFSLEEP awaked:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc->last_awake_jiffies)); + ppsc->last_sleep_jiffies = jiffies; + _rtl8723be_phy_set_rf_sleep(hw); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "switch case not process\n"); + bresult = false; + break; + } + if (bresult) + ppsc->rfpwr_state = rfpwr_state; + return bresult; +} + +bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + bool bresult = false; + + if (rfpwr_state == ppsc->rfpwr_state) + return bresult; + bresult = _rtl8723be_phy_set_rf_power_state(hw, rfpwr_state); + return bresult; +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/phy.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/phy.h @@ -0,0 +1,217 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_PHY_H__ +#define __RTL8723BE_PHY_H__ + +/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/ +#define MAX_TX_COUNT 4 +#define TX_1S 0 +#define TX_2S 1 + +#define MAX_POWER_INDEX 0x3F + +#define MAX_PRECMD_CNT 16 +#define MAX_RFDEPENDCMD_CNT 16 +#define MAX_POSTCMD_CNT 16 + +#define MAX_DOZE_WAITING_TIMES_9x 64 + +#define RT_CANNOT_IO(hw) false +#define HIGHPOWER_RADIOA_ARRAYLEN 22 + +#define IQK_ADDA_REG_NUM 16 +#define IQK_BB_REG_NUM 9 +#define MAX_TOLERANCE 5 +#define IQK_DELAY_TIME 10 +#define index_mapping_NUM 15 + +#define APK_BB_REG_NUM 5 +#define APK_AFE_REG_NUM 16 +#define APK_CURVE_REG_NUM 4 +#define PATH_NUM 1 + +#define LOOP_LIMIT 5 +#define MAX_STALL_TIME 50 +#define ANTENNADIVERSITYVALUE 0x80 +#define MAX_TXPWR_IDX_NMODE_92S 63 +#define RESET_CNT_LIMIT 3 + +#define IQK_ADDA_REG_NUM 16 +#define IQK_MAC_REG_NUM 4 + +#define RF6052_MAX_PATH 2 + +#define CT_OFFSET_MAC_ADDR 0X16 + +#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A +#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60 +#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66 +#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69 +#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C + +#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F +#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72 + +#define CT_OFFSET_CHANNEL_PLAH 0x75 +#define CT_OFFSET_THERMAL_METER 0x78 +#define CT_OFFSET_RF_OPTION 0x79 +#define CT_OFFSET_VERSION 0x7E +#define CT_OFFSET_CUSTOMER_ID 0x7F + +#define RTL92C_MAX_PATH_NUM 2 + +enum hw90_block_e { + HW90_BLOCK_MAC = 0, + HW90_BLOCK_PHY0 = 1, + HW90_BLOCK_PHY1 = 2, + HW90_BLOCK_RF = 3, + HW90_BLOCK_MAXIMUM = 4, +}; + +enum baseband_config_type { + BASEBAND_CONFIG_PHY_REG = 0, + BASEBAND_CONFIG_AGC_TAB = 1, +}; + +enum ra_offset_area { + RA_OFFSET_LEGACY_OFDM1, + RA_OFFSET_LEGACY_OFDM2, + RA_OFFSET_HT_OFDM1, + RA_OFFSET_HT_OFDM2, + RA_OFFSET_HT_OFDM3, + RA_OFFSET_HT_OFDM4, + RA_OFFSET_HT_CCK, +}; + +enum antenna_path { + ANTENNA_NONE, + ANTENNA_D, + ANTENNA_C, + ANTENNA_CD, + ANTENNA_B, + ANTENNA_BD, + ANTENNA_BC, + ANTENNA_BCD, + ANTENNA_A, + ANTENNA_AD, + ANTENNA_AC, + ANTENNA_ACD, + ANTENNA_AB, + ANTENNA_ABD, + ANTENNA_ABC, + ANTENNA_ABCD +}; + +struct r_antenna_select_ofdm { + u32 r_tx_antenna:4; + u32 r_ant_l:4; + u32 r_ant_non_ht:4; + u32 r_ant_ht1:4; + u32 r_ant_ht2:4; + u32 r_ant_ht_s1:4; + u32 r_ant_non_ht_s1:4; + u32 ofdm_txsc:2; + u32 reserved:2; +}; + +struct r_antenna_select_cck { + u8 r_cckrx_enable_2:2; + u8 r_cckrx_enable:2; + u8 r_ccktx_enable:4; +}; + + +struct efuse_contents { + u8 mac_addr[ETH_ALEN]; + u8 cck_tx_power_idx[6]; + u8 ht40_1s_tx_power_idx[6]; + u8 ht40_2s_tx_power_idx_diff[3]; + u8 ht20_tx_power_idx_diff[3]; + u8 ofdm_tx_power_idx_diff[3]; + u8 ht40_max_power_offset[3]; + u8 ht20_max_power_offset[3]; + u8 channel_plan; + u8 thermal_meter; + u8 rf_option[5]; + u8 version; + u8 oem_id; + u8 regulatory; +}; + +struct tx_power_struct { + u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 legacy_ht_txpowerdiff; + u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 pwrgroup_cnt; + u32 mcs_original_offset[4][16]; +}; + +enum _ANT_DIV_TYPE { + NO_ANTDIV = 0xFF, + CG_TRX_HW_ANTDIV = 0x01, + CGCS_RX_HW_ANTDIV = 0x02, + FIXED_HW_ANTDIV = 0x03, + CG_TRX_SMART_ANTDIV = 0x04, + CGCS_RX_SW_ANTDIV = 0x05, +}; + +u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 regaddr, u32 bitmask); +void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data); +bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw); +bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw); +bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw); +void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); +void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, + long *powerlevel); +void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, + u8 channel); +void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, + u8 operation); +void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw); +void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type); +void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw); +u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw); +void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, + bool b_recovery); +void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw); +void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); +bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); +bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c @@ -0,0 +1,106 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "pwrseqcmd.h" +#include "pwrseq.h" + + +/* drivers should parse below arrays and do the corresponding actions */ +/*3 Power on Array*/ +struct wlan_pwr_cfg rtl8723B_power_on_flow[RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8723B_TRANS_END_STEPS] = { + RTL8723B_TRANS_CARDEMU_TO_ACT + RTL8723B_TRANS_END +}; + +/*3Radio off GPIO Array */ +struct wlan_pwr_cfg rtl8723B_radio_off_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_END_STEPS] = { + RTL8723B_TRANS_ACT_TO_CARDEMU + RTL8723B_TRANS_END +}; + +/*3Card Disable Array*/ +struct wlan_pwr_cfg rtl8723B_card_disable_flow + [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723B_TRANS_END_STEPS] = { + RTL8723B_TRANS_ACT_TO_CARDEMU + RTL8723B_TRANS_CARDEMU_TO_CARDDIS + RTL8723B_TRANS_END +}; + +/*3 Card Enable Array*/ +struct wlan_pwr_cfg rtl8723B_card_enable_flow + [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723B_TRANS_END_STEPS] = { + RTL8723B_TRANS_CARDDIS_TO_CARDEMU + RTL8723B_TRANS_CARDEMU_TO_ACT + RTL8723B_TRANS_END +}; + +/*3Suspend Array*/ +struct wlan_pwr_cfg rtl8723B_suspend_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8723B_TRANS_END_STEPS] = { + RTL8723B_TRANS_ACT_TO_CARDEMU + RTL8723B_TRANS_CARDEMU_TO_SUS + RTL8723B_TRANS_END +}; + +/*3 Resume Array*/ +struct wlan_pwr_cfg rtl8723B_resume_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8723B_TRANS_END_STEPS] = { + RTL8723B_TRANS_SUS_TO_CARDEMU + RTL8723B_TRANS_CARDEMU_TO_ACT + RTL8723B_TRANS_END +}; + +/*3HWPDN Array*/ +struct wlan_pwr_cfg rtl8723B_hwpdn_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723B_TRANS_END_STEPS] = { + RTL8723B_TRANS_ACT_TO_CARDEMU + RTL8723B_TRANS_CARDEMU_TO_PDN + RTL8723B_TRANS_END +}; + +/*3 Enter LPS */ +struct wlan_pwr_cfg rtl8723B_enter_lps_flow[RTL8723B_TRANS_ACT_TO_LPS_STEPS + + RTL8723B_TRANS_END_STEPS] = { + /*FW behavior*/ + RTL8723B_TRANS_ACT_TO_LPS + RTL8723B_TRANS_END +}; + +/*3 Leave LPS */ +struct wlan_pwr_cfg rtl8723B_leave_lps_flow[RTL8723B_TRANS_LPS_TO_ACT_STEPS + + RTL8723B_TRANS_END_STEPS] = { + /*FW behavior*/ + RTL8723B_TRANS_LPS_TO_ACT + RTL8723B_TRANS_END +}; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h @@ -0,0 +1,305 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_PWRSEQ_H__ +#define __RTL8723BE_PWRSEQ_H__ + +#include "pwrseqcmd.h" +/* Check document WM-20130425-JackieLau-RTL8723B_Power_Architecture v05.vsd + * There are 6 HW Power States: + * 0: POFF--Power Off + * 1: PDN--Power Down + * 2: CARDEMU--Card Emulation + * 3: ACT--Active Mode + * 4: LPS--Low Power State + * 5: SUS--Suspend + * + * The transition from different states are defined below + * TRANS_CARDEMU_TO_ACT + * TRANS_ACT_TO_CARDEMU + * TRANS_CARDEMU_TO_SUS + * TRANS_SUS_TO_CARDEMU + * TRANS_CARDEMU_TO_PDN + * TRANS_ACT_TO_LPS + * TRANS_LPS_TO_ACT + * + * TRANS_END + */ +#define RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS 23 +#define RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS 15 +#define RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS 15 +#define RTL8723B_TRANS_SUS_TO_CARDEMU_STEPS 15 +#define RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS 15 +#define RTL8723B_TRANS_PDN_TO_CARDEMU_STEPS 15 +#define RTL8723B_TRANS_ACT_TO_LPS_STEPS 15 +#define RTL8723B_TRANS_LPS_TO_ACT_STEPS 15 +#define RTL8723B_TRANS_END_STEPS 1 + +#define RTL8723B_TRANS_CARDEMU_TO_ACT \ + {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS}, \ + {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)|BIT(2)), 0}, \ + {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , 0}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , BIT(0)}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \ + {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)}, \ + {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + {0x0068, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3), BIT(3)}, \ + {0x0069, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)}, + +#define RTL8723B_TRANS_ACT_TO_CARDEMU \ + {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \ + {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \ + {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), 0}, \ + {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ + PWR_CMD_WRITE, BIT(5), BIT(5)}, \ + {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ + PWR_CMD_WRITE, BIT(0), 0}, + +#define RTL8723B_TRANS_CARDEMU_TO_SUS \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4) | BIT(3), (BIT(4) | BIT(3))}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \ + PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},\ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, + +#define RTL8723B_TRANS_SUS_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, + +#define RTL8723B_TRANS_CARDEMU_TO_CARDDIS \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)}, \ + {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 1}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, + +#define RTL8723B_TRANS_CARDDIS_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \ + {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, + +#define RTL8723B_TRANS_CARDEMU_TO_PDN \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \ + PWR_INTF_SDIO_MSK | PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, \ + PWR_CMD_WRITE, 0xFF, 0x20}, \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, + +#define RTL8723B_TRANS_PDN_TO_CARDEMU \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, + +#define RTL8723B_TRANS_ACT_TO_LPS \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ + {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03}, \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \ + {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00}, \ + {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, + +#define RTL8723B_TRANS_LPS_TO_ACT \ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \ + PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, \ + {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \ + {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \ + {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \ + {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \ + PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, + +#define RTL8723B_TRANS_END \ + {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, \ + PWR_CMD_END, 0, 0}, + +extern struct wlan_pwr_cfg rtl8723B_power_on_flow + [RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8723B_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723B_radio_off_flow + [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723B_card_disable_flow + [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723B_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723B_card_enable_flow + [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723B_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723B_suspend_flow + [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8723B_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723B_resume_flow + [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8723B_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723B_hwpdn_flow + [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8723B_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723B_enter_lps_flow + [RTL8723B_TRANS_ACT_TO_LPS_STEPS + + RTL8723B_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8723B_leave_lps_flow + [RTL8723B_TRANS_LPS_TO_ACT_STEPS + + RTL8723B_TRANS_END_STEPS]; + +/* RTL8723 Power Configuration CMDs for PCIe interface */ +#define RTL8723_NIC_PWR_ON_FLOW rtl8723B_power_on_flow +#define RTL8723_NIC_RF_OFF_FLOW rtl8723B_radio_off_flow +#define RTL8723_NIC_DISABLE_FLOW rtl8723B_card_disable_flow +#define RTL8723_NIC_ENABLE_FLOW rtl8723B_card_enable_flow +#define RTL8723_NIC_SUSPEND_FLOW rtl8723B_suspend_flow +#define RTL8723_NIC_RESUME_FLOW rtl8723B_resume_flow +#define RTL8723_NIC_PDN_FLOW rtl8723B_hwpdn_flow +#define RTL8723_NIC_LPS_ENTER_FLOW rtl8723B_enter_lps_flow +#define RTL8723_NIC_LPS_LEAVE_FLOW rtl8723B_leave_lps_flow + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c @@ -0,0 +1,140 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "pwrseq.h" + +/* Description: + * This routine deal with the Power Configuration CMDs + * parsing for RTL8723/RTL8188E Series IC. + * Assumption: + * We should follow specific format which was released from HW SD. + * + * 2011.07.07, added by Roger. + */ +bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, + u8 fab_version, u8 interface_type, + struct wlan_pwr_cfg pwrcfgcmd[]) + +{ + struct wlan_pwr_cfg pwr_cfg_cmd = {0}; + bool b_polling_bit = false; + u32 ary_idx = 0; + u8 value = 0; + u32 offset = 0; + u32 polling_count = 0; + u32 max_polling_cnt = 5000; + + do { + pwr_cfg_cmd = pwrcfgcmd[ary_idx]; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtlbe_hal_pwrseqcmdparsing(): " + "offset(%#x),cut_msk(%#x), fab_msk(%#x)," + "interface_msk(%#x), base(%#x), " + "cmd(%#x), msk(%#x), value(%#x)\n", + GET_PWR_CFG_OFFSET(pwr_cfg_cmd), + GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd), + GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd), + GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd), + GET_PWR_CFG_BASE(pwr_cfg_cmd), + GET_PWR_CFG_CMD(pwr_cfg_cmd), + GET_PWR_CFG_MASK(pwr_cfg_cmd), + GET_PWR_CFG_VALUE(pwr_cfg_cmd)); + + if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) && + (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) && + (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) { + switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) { + case PWR_CMD_READ: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtlbe_hal_pwrseqcmdparsing(): " + "PWR_CMD_READ\n"); + break; + case PWR_CMD_WRITE: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtlbe_hal_pwrseqcmdparsing(): " + "PWR_CMD_WRITE\n"); + offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd); + + /*Read the value from system register*/ + value = rtl_read_byte(rtlpriv, offset); + value &= (~(GET_PWR_CFG_MASK(pwr_cfg_cmd))); + value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd) + & GET_PWR_CFG_MASK(pwr_cfg_cmd)); + + /*Write the value back to sytem register*/ + rtl_write_byte(rtlpriv, offset, value); + break; + case PWR_CMD_POLLING: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtlbe_hal_pwrseqcmdparsing(): " + "PWR_CMD_POLLING\n"); + b_polling_bit = false; + offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd); + + do { + value = rtl_read_byte(rtlpriv, offset); + + value &= GET_PWR_CFG_MASK(pwr_cfg_cmd); + if (value == + (GET_PWR_CFG_VALUE(pwr_cfg_cmd) & + GET_PWR_CFG_MASK(pwr_cfg_cmd))) + b_polling_bit = true; + else + udelay(10); + + if (polling_count++ > max_polling_cnt) + return false; + + } while (!b_polling_bit); + break; + case PWR_CMD_DELAY: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtlbe_hal_pwrseqcmdparsing(): " + "PWR_CMD_DELAY\n"); + if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) == + PWRSEQ_DELAY_US) + udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd)); + else + mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd)); + break; + case PWR_CMD_END: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "rtlbe_hal_pwrseqcmdparsing(): " + "PWR_CMD_END\n"); + return true; + break; + default: + RT_ASSERT(false, + "rtlbe_hal_pwrseqcmdparsing(): " + "Unknown CMD!!\n"); + break; + } + } + + ary_idx++; + } while (1); + + return true; +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h @@ -0,0 +1,95 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_PWRSEQCMD_H__ +#define __RTL8723BE_PWRSEQCMD_H__ + +#include "../wifi.h" +/*---------------------------------------------*/ +/*The value of cmd: 4 bits */ +/*---------------------------------------------*/ +#define PWR_CMD_READ 0x00 +#define PWR_CMD_WRITE 0x01 +#define PWR_CMD_POLLING 0x02 +#define PWR_CMD_DELAY 0x03 +#define PWR_CMD_END 0x04 + +/* define the base address of each block */ +#define PWR_BASEADDR_MAC 0x00 +#define PWR_BASEADDR_USB 0x01 +#define PWR_BASEADDR_PCIE 0x02 +#define PWR_BASEADDR_SDIO 0x03 + +#define PWR_INTF_SDIO_MSK BIT(0) +#define PWR_INTF_USB_MSK BIT(1) +#define PWR_INTF_PCI_MSK BIT(2) +#define PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +#define PWR_FAB_TSMC_MSK BIT(0) +#define PWR_FAB_UMC_MSK BIT(1) +#define PWR_FAB_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +#define PWR_CUT_TESTCHIP_MSK BIT(0) +#define PWR_CUT_A_MSK BIT(1) +#define PWR_CUT_B_MSK BIT(2) +#define PWR_CUT_C_MSK BIT(3) +#define PWR_CUT_D_MSK BIT(4) +#define PWR_CUT_E_MSK BIT(5) +#define PWR_CUT_F_MSK BIT(6) +#define PWR_CUT_G_MSK BIT(7) +#define PWR_CUT_ALL_MSK 0xFF + + +enum pwrseq_delay_unit { + PWRSEQ_DELAY_US, + PWRSEQ_DELAY_MS, +}; + +struct wlan_pwr_cfg { + u16 offset; + u8 cut_msk; + u8 fab_msk:4; + u8 interface_msk:4; + u8 base:4; + u8 cmd:4; + u8 msk; + u8 value; + +}; + +#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset +#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk +#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk +#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk +#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base +#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd +#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk +#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value + +bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, + u8 fab_version, u8 interface_type, + struct wlan_pwr_cfg pwrcfgcmd[]); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/reg.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/reg.h @@ -0,0 +1,2293 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_REG_H__ +#define __RTL8723BE_REG_H__ + +#define TXPKT_BUF_SELECT 0x69 +#define RXPKT_BUF_SELECT 0xA5 +#define DISABLE_TRXPKT_BUF_ACCESS 0x0 + +#define REG_SYS_ISO_CTRL 0x0000 +#define REG_SYS_FUNC_EN 0x0002 +#define REG_APS_FSMCO 0x0004 +#define REG_SYS_CLKR 0x0008 +#define REG_9346CR 0x000A +#define REG_EE_VPD 0x000C +#define REG_AFE_MISC 0x0010 +#define REG_SPS0_CTRL 0x0011 +#define REG_SPS_OCP_CFG 0x0018 +#define REG_RSV_CTRL 0x001C +#define REG_RF_CTRL 0x001F +#define REG_LDOA15_CTRL 0x0020 +#define REG_LDOV12D_CTRL 0x0021 +#define REG_LDOHCI12_CTRL 0x0022 +#define REG_LPLDO_CTRL 0x0023 +#define REG_AFE_XTAL_CTRL 0x0024 +/* 1.5v for 8188EE test chip, 1.4v for MP chip */ +#define REG_AFE_LDO_CTRL 0x0027 +#define REG_AFE_PLL_CTRL 0x0028 +#define REG_MAC_PHY_CTRL 0x002c +#define REG_EFUSE_CTRL 0x0030 +#define REG_EFUSE_TEST 0x0034 +#define REG_PWR_DATA 0x0038 +#define REG_CAL_TIMER 0x003C +#define REG_ACLK_MON 0x003E +#define REG_GPIO_MUXCFG 0x0040 +#define REG_GPIO_IO_SEL 0x0042 +#define REG_MAC_PINMUX_CFG 0x0043 +#define REG_GPIO_PIN_CTRL 0x0044 +#define REG_GPIO_INTM 0x0048 +#define REG_LEDCFG0 0x004C +#define REG_LEDCFG1 0x004D +#define REG_LEDCFG2 0x004E +#define REG_LEDCFG3 0x004F +#define REG_FSIMR 0x0050 +#define REG_FSISR 0x0054 +#define REG_HSIMR 0x0058 +#define REG_HSISR 0x005c +#define REG_GPIO_PIN_CTRL_2 0x0060 +#define REG_GPIO_IO_SEL_2 0x0062 +#define REG_MULTI_FUNC_CTRL 0x0068 +#define REG_GPIO_OUTPUT 0x006c +#define REG_AFE_XTAL_CTRL_EXT 0x0078 +#define REG_XCK_OUT_CTRL 0x007c +#define REG_MCUFWDL 0x0080 +#define REG_WOL_EVENT 0x0081 +#define REG_MCUTSTCFG 0x0084 + + +#define REG_HIMR 0x00B0 +#define REG_HISR 0x00B4 +#define REG_HIMRE 0x00B8 +#define REG_HISRE 0x00BC + +#define REG_EFUSE_ACCESS 0x00CF + +#define REG_BIST_SCAN 0x00D0 +#define REG_BIST_RPT 0x00D4 +#define REG_BIST_ROM_RPT 0x00D8 +#define REG_USB_SIE_INTF 0x00E0 +#define REG_PCIE_MIO_INTF 0x00E4 +#define REG_PCIE_MIO_INTD 0x00E8 +#define REG_HPON_FSM 0x00EC +#define REG_SYS_CFG 0x00F0 +#define REG_GPIO_OUTSTS 0x00F4 +#define REG_SYS_CFG1 0x00F0 +#define REG_ROM_VERSION 0x00FD + +#define REG_CR 0x0100 +#define REG_PBP 0x0104 +#define REG_PKT_BUFF_ACCESS_CTRL 0x0106 +#define REG_TRXDMA_CTRL 0x010C +#define REG_TRXFF_BNDY 0x0114 +#define REG_TRXFF_STATUS 0x0118 +#define REG_RXFF_PTR 0x011C + +#define REG_CPWM 0x012F +#define REG_FWIMR 0x0130 +#define REG_FWISR 0x0134 +#define REG_PKTBUF_DBG_CTRL 0x0140 +#define REG_PKTBUF_DBG_DATA_L 0x0144 +#define REG_PKTBUF_DBG_DATA_H 0x0148 +#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL + 2) + +#define REG_TC0_CTRL 0x0150 +#define REG_TC1_CTRL 0x0154 +#define REG_TC2_CTRL 0x0158 +#define REG_TC3_CTRL 0x015C +#define REG_TC4_CTRL 0x0160 +#define REG_TCUNIT_BASE 0x0164 +#define REG_MBIST_START 0x0174 +#define REG_MBIST_DONE 0x0178 +#define REG_MBIST_FAIL 0x017C +#define REG_32K_CTRL 0x0194 +#define REG_C2HEVT_MSG_NORMAL 0x01A0 +#define REG_C2HEVT_CLEAR 0x01AF +#define REG_C2HEVT_MSG_TEST 0x01B8 +#define REG_MCUTST_1 0x01c0 +#define REG_FMETHR 0x01C8 +#define REG_HMETFR 0x01CC +#define REG_HMEBOX_0 0x01D0 +#define REG_HMEBOX_1 0x01D4 +#define REG_HMEBOX_2 0x01D8 +#define REG_HMEBOX_3 0x01DC + +#define REG_LLT_INIT 0x01E0 +#define REG_BB_ACCEESS_CTRL 0x01E8 +#define REG_BB_ACCESS_DATA 0x01EC + +#define REG_HMEBOX_EXT_0 0x01F0 +#define REG_HMEBOX_EXT_1 0x01F4 +#define REG_HMEBOX_EXT_2 0x01F8 +#define REG_HMEBOX_EXT_3 0x01FC + +#define REG_RQPN 0x0200 +#define REG_FIFOPAGE 0x0204 +#define REG_TDECTRL 0x0208 +#define REG_TXDMA_OFFSET_CHK 0x020C +#define REG_TXDMA_STATUS 0x0210 +#define REG_RQPN_NPQ 0x0214 + +#define REG_RXDMA_AGG_PG_TH 0x0280 +/* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */ +#define REG_FW_UPD_RDPTR 0x0284 +/* Control the RX DMA.*/ +#define REG_RXDMA_CONTROL 0x0286 +/* The number of packets in RXPKTBUF. */ +#define REG_RXPKT_NUM 0x0287 + +#define REG_PCIE_CTRL_REG 0x0300 +#define REG_INT_MIG 0x0304 +#define REG_BCNQ_DESA 0x0308 +#define REG_HQ_DESA 0x0310 +#define REG_MGQ_DESA 0x0318 +#define REG_VOQ_DESA 0x0320 +#define REG_VIQ_DESA 0x0328 +#define REG_BEQ_DESA 0x0330 +#define REG_BKQ_DESA 0x0338 +#define REG_RX_DESA 0x0340 + +#define REG_DBI 0x0348 +#define REG_MDIO 0x0354 +#define REG_DBG_SEL 0x0360 +#define REG_PCIE_HRPWM 0x0361 +#define REG_PCIE_HCPWM 0x0363 +#define REG_UART_CTRL 0x0364 +#define REG_WATCH_DOG 0x0368 +#define REG_UART_TX_DESA 0x0370 +#define REG_UART_RX_DESA 0x0378 + + +#define REG_HDAQ_DESA_NODEF 0x0000 +#define REG_CMDQ_DESA_NODEF 0x0000 + +#define REG_VOQ_INFORMATION 0x0400 +#define REG_VIQ_INFORMATION 0x0404 +#define REG_BEQ_INFORMATION 0x0408 +#define REG_BKQ_INFORMATION 0x040C +#define REG_MGQ_INFORMATION 0x0410 +#define REG_HGQ_INFORMATION 0x0414 +#define REG_BCNQ_INFORMATION 0x0418 +#define REG_TXPKT_EMPTY 0x041A + + +#define REG_CPU_MGQ_INFORMATION 0x041C +#define REG_FWHW_TXQ_CTRL 0x0420 +#define REG_HWSEQ_CTRL 0x0423 +#define REG_TXPKTBUF_BCNQ_BDNY 0x0424 +#define REG_TXPKTBUF_MGQ_BDNY 0x0425 +#define REG_MULTI_BCNQ_EN 0x0426 +#define REG_MULTI_BCNQ_OFFSET 0x0427 +#define REG_SPEC_SIFS 0x0428 +#define REG_RL 0x042A +#define REG_DARFRC 0x0430 +#define REG_RARFRC 0x0438 +#define REG_RRSR 0x0440 +#define REG_ARFR0 0x0444 +#define REG_ARFR1 0x0448 +#define REG_ARFR2 0x044C +#define REG_ARFR3 0x0450 +#define REG_AMPDU_MAX_TIME 0x0456 +#define REG_AGGLEN_LMT 0x0458 +#define REG_AMPDU_MIN_SPACE 0x045C +#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D +#define REG_FAST_EDCA_CTRL 0x0460 +#define REG_RD_RESP_PKT_TH 0x0463 +#define REG_INIRTS_RATE_SEL 0x0480 +#define REG_INIDATA_RATE_SEL 0x0484 +#define REG_POWER_STATUS 0x04A4 +#define REG_POWER_STAGE1 0x04B4 +#define REG_POWER_STAGE2 0x04B8 +#define REG_PKT_LIFE_TIME 0x04C0 +#define REG_STBC_SETTING 0x04C4 +#define REG_PROT_MODE_CTRL 0x04C8 +#define REG_BAR_MODE_CTRL 0x04CC +#define REG_RA_TRY_RATE_AGG_LMT 0x04CF +#define REG_EARLY_MODE_CONTROL 0x04D0 +#define REG_NQOS_SEQ 0x04DC +#define REG_QOS_SEQ 0x04DE +#define REG_NEED_CPU_HANDLE 0x04E0 +#define REG_PKT_LOSE_RPT 0x04E1 +#define REG_PTCL_ERR_STATUS 0x04E2 +#define REG_TX_RPT_CTRL 0x04EC +#define REG_TX_RPT_TIME 0x04F0 +#define REG_DUMMY 0x04FC + +#define REG_EDCA_VO_PARAM 0x0500 +#define REG_EDCA_VI_PARAM 0x0504 +#define REG_EDCA_BE_PARAM 0x0508 +#define REG_EDCA_BK_PARAM 0x050C +#define REG_BCNTCFG 0x0510 +#define REG_PIFS 0x0512 +#define REG_RDG_PIFS 0x0513 +#define REG_SIFS_CTX 0x0514 +#define REG_SIFS_TRX 0x0516 +#define REG_AGGR_BREAK_TIME 0x051A +#define REG_SLOT 0x051B +#define REG_TX_PTCL_CTRL 0x0520 +#define REG_TXPAUSE 0x0522 +#define REG_DIS_TXREQ_CLR 0x0523 +#define REG_RD_CTRL 0x0524 +#define REG_TBTT_PROHIBIT 0x0540 +#define REG_RD_NAV_NXT 0x0544 +#define REG_NAV_PROT_LEN 0x0546 +#define REG_BCN_CTRL 0x0550 +#define REG_USTIME_TSF 0x0551 +#define REG_MBID_NUM 0x0552 +#define REG_DUAL_TSF_RST 0x0553 +#define REG_BCN_INTERVAL 0x0554 +#define REG_MBSSID_BCN_SPACE 0x0554 +#define REG_DRVERLYINT 0x0558 +#define REG_BCNDMATIM 0x0559 +#define REG_ATIMWND 0x055A +#define REG_BCN_MAX_ERR 0x055D +#define REG_RXTSF_OFFSET_CCK 0x055E +#define REG_RXTSF_OFFSET_OFDM 0x055F +#define REG_TSFTR 0x0560 +#define REG_INIT_TSFTR 0x0564 +#define REG_SECONDARY_CCA_CTRL 0x0577 +#define REG_PSTIMER 0x0580 +#define REG_TIMER0 0x0584 +#define REG_TIMER1 0x0588 +#define REG_ACMHWCTRL 0x05C0 +#define REG_ACMRSTCTRL 0x05C1 +#define REG_ACMAVG 0x05C2 +#define REG_VO_ADMTIME 0x05C4 +#define REG_VI_ADMTIME 0x05C6 +#define REG_BE_ADMTIME 0x05C8 +#define REG_EDCA_RANDOM_GEN 0x05CC +#define REG_SCH_TXCMD 0x05D0 + +#define REG_APSD_CTRL 0x0600 +#define REG_BWOPMODE 0x0603 +#define REG_TCR 0x0604 +#define REG_RCR 0x0608 +#define REG_RX_PKT_LIMIT 0x060C +#define REG_RX_DLK_TIME 0x060D +#define REG_RX_DRVINFO_SZ 0x060F + +#define REG_MACID 0x0610 +#define REG_BSSID 0x0618 +#define REG_MAR 0x0620 +#define REG_MBIDCAMCFG 0x0628 + +#define REG_USTIME_EDCA 0x0638 +#define REG_MAC_SPEC_SIFS 0x063A +#define REG_RESP_SIFS_CCK 0x063C +#define REG_RESP_SIFS_OFDM 0x063E +#define REG_ACKTO 0x0640 +#define REG_CTS2TO 0x0641 +#define REG_EIFS 0x0642 + +#define REG_NAV_CTRL 0x0650 +#define REG_BACAMCMD 0x0654 +#define REG_BACAMCONTENT 0x0658 +#define REG_LBDLY 0x0660 +#define REG_FWDLY 0x0661 +#define REG_RXERR_RPT 0x0664 +#define REG_TRXPTCL_CTL 0x0668 + +#define REG_CAMCMD 0x0670 +#define REG_CAMWRITE 0x0674 +#define REG_CAMREAD 0x0678 +#define REG_CAMDBG 0x067C +#define REG_SECCFG 0x0680 + +#define REG_WOW_CTRL 0x0690 +#define REG_PSSTATUS 0x0691 +#define REG_PS_RX_INFO 0x0692 +#define REG_UAPSD_TID 0x0693 +#define REG_LPNAV_CTRL 0x0694 +#define REG_WKFMCAM_NUM 0x0698 +#define REG_WKFMCAM_RWD 0x069C +#define REG_RXFLTMAP0 0x06A0 +#define REG_RXFLTMAP1 0x06A2 +#define REG_RXFLTMAP2 0x06A4 +#define REG_BCN_PSR_RPT 0x06A8 +#define REG_CALB32K_CTRL 0x06AC +#define REG_PKT_MON_CTRL 0x06B4 +#define REG_BT_COEX_TABLE 0x06C0 +#define REG_WMAC_RESP_TXINFO 0x06D8 + +#define REG_USB_INFO 0xFE17 +#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_DMA_AGG_TO 0xFE5B +#define REG_USB_AGG_TO 0xFE5C +#define REG_USB_AGG_TH 0xFE5D + +#define REG_TEST_USB_TXQS 0xFE48 +#define REG_TEST_SIE_VID 0xFE60 +#define REG_TEST_SIE_PID 0xFE62 +#define REG_TEST_SIE_OPTIONAL 0xFE64 +#define REG_TEST_SIE_CHIRP_K 0xFE65 +#define REG_TEST_SIE_PHY 0xFE66 +#define REG_TEST_SIE_MAC_ADDR 0xFE70 +#define REG_TEST_SIE_STRING 0xFE80 + +#define REG_NORMAL_SIE_VID 0xFE60 +#define REG_NORMAL_SIE_PID 0xFE62 +#define REG_NORMAL_SIE_OPTIONAL 0xFE64 +#define REG_NORMAL_SIE_EP 0xFE65 +#define REG_NORMAL_SIE_PHY 0xFE68 +#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 +#define REG_NORMAL_SIE_STRING 0xFE80 + +#define CR9346 REG_9346CR +#define MSR (REG_CR + 2) +#define ISR REG_HISR +#define TSFR REG_TSFTR + +#define MACIDR0 REG_MACID +#define MACIDR4 (REG_MACID + 4) + +#define PBP REG_PBP + +#define IDR0 MACIDR0 +#define IDR4 MACIDR4 + +#define UNUSED_REGISTER 0x1BF +#define DCAM UNUSED_REGISTER +#define PSR UNUSED_REGISTER +#define BBADDR UNUSED_REGISTER +#define PHYDATAR UNUSED_REGISTER + +#define INVALID_BBRF_VALUE 0x12345678 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A + +#define CMDEEPROM_EN BIT(5) +#define CMDEEPROM_SEL BIT(4) +#define CMD9346CR_9356SEL BIT(4) +#define AUTOLOAD_EEPROM (CMDEEPROM_EN | CMDEEPROM_SEL) +#define AUTOLOAD_EFUSE CMDEEPROM_EN + +#define GPIOSEL_GPIO 0 +#define GPIOSEL_ENBT BIT(5) + +#define GPIO_IN REG_GPIO_PIN_CTRL +#define GPIO_OUT (REG_GPIO_PIN_CTRL + 1) +#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL + 2) +#define GPIO_MOD (REG_GPIO_PIN_CTRL + 3) + +/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */ +#define HSIMR_GPIO12_0_INT_EN BIT(0) +#define HSIMR_SPS_OCP_INT_EN BIT(5) +#define HSIMR_RON_INT_EN BIT(6) +#define HSIMR_PDN_INT_EN BIT(7) +#define HSIMR_GPIO9_INT_EN BIT(25) + +/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */ + +#define HSISR_GPIO12_0_INT BIT(0) +#define HSISR_SPS_OCP_INT BIT(5) +#define HSISR_RON_INT_EN BIT(6) +#define HSISR_PDNINT BIT(7) +#define HSISR_GPIO9_INT BIT(25) + +#define MSR_NOLINK 0x00 +#define MSR_ADHOC 0x01 +#define MSR_INFRA 0x02 +#define MSR_AP 0x03 + +#define RRSR_RSC_OFFSET 21 +#define RRSR_SHORT_OFFSET 23 +#define RRSR_RSC_BW_40M 0x600000 +#define RRSR_RSC_UPSUBCHNL 0x400000 +#define RRSR_RSC_LOWSUBCHNL 0x200000 +#define RRSR_SHORT 0x800000 +#define RRSR_1M BIT(0) +#define RRSR_2M BIT(1) +#define RRSR_5_5M BIT(2) +#define RRSR_11M BIT(3) +#define RRSR_6M BIT(4) +#define RRSR_9M BIT(5) +#define RRSR_12M BIT(6) +#define RRSR_18M BIT(7) +#define RRSR_24M BIT(8) +#define RRSR_36M BIT(9) +#define RRSR_48M BIT(10) +#define RRSR_54M BIT(11) +#define RRSR_MCS0 BIT(12) +#define RRSR_MCS1 BIT(13) +#define RRSR_MCS2 BIT(14) +#define RRSR_MCS3 BIT(15) +#define RRSR_MCS4 BIT(16) +#define RRSR_MCS5 BIT(17) +#define RRSR_MCS6 BIT(18) +#define RRSR_MCS7 BIT(19) +#define BRSR_ACKSHORTPMB BIT(23) + +#define RATR_1M 0x00000001 +#define RATR_2M 0x00000002 +#define RATR_55M 0x00000004 +#define RATR_11M 0x00000008 +#define RATR_6M 0x00000010 +#define RATR_9M 0x00000020 +#define RATR_12M 0x00000040 +#define RATR_18M 0x00000080 +#define RATR_24M 0x00000100 +#define RATR_36M 0x00000200 +#define RATR_48M 0x00000400 +#define RATR_54M 0x00000800 +#define RATR_MCS0 0x00001000 +#define RATR_MCS1 0x00002000 +#define RATR_MCS2 0x00004000 +#define RATR_MCS3 0x00008000 +#define RATR_MCS4 0x00010000 +#define RATR_MCS5 0x00020000 +#define RATR_MCS6 0x00040000 +#define RATR_MCS7 0x00080000 +#define RATR_MCS8 0x00100000 +#define RATR_MCS9 0x00200000 +#define RATR_MCS10 0x00400000 +#define RATR_MCS11 0x00800000 +#define RATR_MCS12 0x01000000 +#define RATR_MCS13 0x02000000 +#define RATR_MCS14 0x04000000 +#define RATR_MCS15 0x08000000 + +#define RATE_1M BIT(0) +#define RATE_2M BIT(1) +#define RATE_5_5M BIT(2) +#define RATE_11M BIT(3) +#define RATE_6M BIT(4) +#define RATE_9M BIT(5) +#define RATE_12M BIT(6) +#define RATE_18M BIT(7) +#define RATE_24M BIT(8) +#define RATE_36M BIT(9) +#define RATE_48M BIT(10) +#define RATE_54M BIT(11) +#define RATE_MCS0 BIT(12) +#define RATE_MCS1 BIT(13) +#define RATE_MCS2 BIT(14) +#define RATE_MCS3 BIT(15) +#define RATE_MCS4 BIT(16) +#define RATE_MCS5 BIT(17) +#define RATE_MCS6 BIT(18) +#define RATE_MCS7 BIT(19) +#define RATE_MCS8 BIT(20) +#define RATE_MCS9 BIT(21) +#define RATE_MCS10 BIT(22) +#define RATE_MCS11 BIT(23) +#define RATE_MCS12 BIT(24) +#define RATE_MCS13 BIT(25) +#define RATE_MCS14 BIT(26) +#define RATE_MCS15 BIT(27) + +#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M) +#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\ + RATR_24M | RATR_36M | RATR_48M | RATR_54M) +#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\ + RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\ + RATR_MCS6 | RATR_MCS7) +#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\ + RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\ + RATR_MCS14 | RATR_MCS15) + +#define BW_OPMODE_20MHZ BIT(2) +#define BW_OPMODE_5G BIT(1) +#define BW_OPMODE_11J BIT(0) + +#define CAM_VALID BIT(15) +#define CAM_NOTVALID 0x0000 +#define CAM_USEDK BIT(5) + +#define CAM_NONE 0x0 +#define CAM_WEP40 0x01 +#define CAM_TKIP 0x02 +#define CAM_AES 0x04 +#define CAM_WEP104 0x05 + +#define TOTAL_CAM_ENTRY 32 +#define HALF_CAM_ENTRY 16 + +#define CAM_WRITE BIT(16) +#define CAM_READ 0x00000000 +#define CAM_POLLINIG BIT(31) + +#define SCR_USEDK 0x01 +#define SCR_TXSEC_ENABLE 0x02 +#define SCR_RXSEC_ENABLE 0x04 + +#define WOW_PMEN BIT(0) +#define WOW_WOMEN BIT(1) +#define WOW_MAGIC BIT(2) +#define WOW_UWF BIT(3) + +/********************************************* +* 8723BE IMR/ISR bits +**********************************************/ +#define IMR_DISABLED 0x0 +/* IMR DW0(0x0060-0063) Bit 0-31 */ +#define IMR_TXCCK BIT(30) /* TXRPT interrupt when + * CCX bit of the packet is set + */ +#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */ +#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires, + * this bit is set to 1 + */ +#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires, + * this bit is set to 1 + */ +#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */ +#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */ +#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle + * indication interrupt + */ +#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */ +#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */ +#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is + * true, this bit is set to 1) + */ +#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt + * Extension for Win7 + */ +#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */ +#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is + * true, this bit is set to 1) + */ +#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status, + * Write 1 clear + */ +#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status, + * Write 1 clear + */ +#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status, + * Write 1 clear + */ +#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */ +#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */ +#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */ +#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */ +#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */ +#define IMR_VODOK BIT(2) /* AC_VO DMA OK */ +#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */ +#define IMR_ROK BIT(0) /* Receive DMA OK */ + +/* IMR DW1(0x00B4-00B7) Bit 0-31 */ +#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */ +#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */ +#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */ +#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */ +#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */ +#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */ +#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */ +#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */ +#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */ +#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */ +#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */ +#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */ +#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */ +#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */ +#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */ +#define IMR_TXERR BIT(11) /* Tx Error Flag Interrupt Status, + * write 1 clear. + */ +#define IMR_RXERR BIT(10) /* Rx Error Flag INT Status, + * Write 1 clear + */ +#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */ +#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */ + +#define HWSET_MAX_SIZE 512 +#define EFUSE_MAX_SECTION 64 +#define EFUSE_REAL_CONTENT_LEN 256 +#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header, + * dummy 7 bytes frome CP test + * and reserved 1byte. + */ + +#define EEPROM_DEFAULT_TSSI 0x0 +#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_BOARDTYPE 0x02 +#define EEPROM_DEFAULT_TXPOWER 0x1010 +#define EEPROM_DEFAULT_HT2T_TXPWR 0x10 + +#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 +#define EEPROM_DEFAULT_THERMALMETER 0x18 +#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 +#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 +#define EEPROM_DEFAULT_HT20_DIFF 2 +#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 +#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0 +#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0 + +#define RF_OPTION1 0x79 +#define RF_OPTION2 0x7A +#define RF_OPTION3 0x7B +#define RF_OPTION4 0xC3 + +#define EEPROM_DEFAULT_PID 0x1234 +#define EEPROM_DEFAULT_VID 0x5678 +#define EEPROM_DEFAULT_CUSTOMERID 0xAB +#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD +#define EEPROM_DEFAULT_VERSION 0 + +#define EEPROM_CHANNEL_PLAN_FCC 0x0 +#define EEPROM_CHANNEL_PLAN_IC 0x1 +#define EEPROM_CHANNEL_PLAN_ETSI 0x2 +#define EEPROM_CHANNEL_PLAN_SPAIN 0x3 +#define EEPROM_CHANNEL_PLAN_FRANCE 0x4 +#define EEPROM_CHANNEL_PLAN_MKK 0x5 +#define EEPROM_CHANNEL_PLAN_MKK1 0x6 +#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7 +#define EEPROM_CHANNEL_PLAN_TELEC 0x8 +#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9 +#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA +#define EEPROM_CHANNEL_PLAN_NCC 0xB +#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80 + +#define EEPROM_CID_DEFAULT 0x0 +#define EEPROM_CID_TOSHIBA 0x4 +#define EEPROM_CID_CCX 0x10 +#define EEPROM_CID_QMI 0x0D +#define EEPROM_CID_WHQL 0xFE + +#define RTL8723BE_EEPROM_ID 0x8129 + +#define EEPROM_HPON 0x02 +#define EEPROM_CLK 0x06 +#define EEPROM_TESTR 0x08 + + +#define EEPROM_TXPOWERCCK 0x10 +#define EEPROM_TXPOWERHT40_1S 0x16 +#define EEPROM_TXPOWERHT20DIFF 0x1B +#define EEPROM_TXPOWER_OFDMDIFF 0x1B + + + +#define EEPROM_TX_PWR_INX 0x10 + +#define EEPROM_CHANNELPLAN 0xB8 +#define EEPROM_XTAL_8723BE 0xB9 +#define EEPROM_THERMAL_METER_88E 0xBA +#define EEPROM_IQK_LCK_88E 0xBB + +#define EEPROM_RF_BOARD_OPTION_88E 0xC1 +#define EEPROM_RF_FEATURE_OPTION_88E 0xC2 +#define EEPROM_RF_BT_SETTING_88E 0xC3 +#define EEPROM_VERSION 0xC4 +#define EEPROM_CUSTOMER_ID 0xC5 +#define EEPROM_RF_ANTENNA_OPT_88E 0xC9 + +#define EEPROM_MAC_ADDR 0xD0 +#define EEPROM_VID 0xD6 +#define EEPROM_DID 0xD8 +#define EEPROM_SVID 0xDA +#define EEPROM_SMID 0xDC + +#define STOPBECON BIT(6) +#define STOPHIGHT BIT(5) +#define STOPMGT BIT(4) +#define STOPVO BIT(3) +#define STOPVI BIT(2) +#define STOPBE BIT(1) +#define STOPBK BIT(0) + +#define RCR_APPFCS BIT(31) +#define RCR_APP_MIC BIT(30) +#define RCR_APP_ICV BIT(29) +#define RCR_APP_PHYST_RXFF BIT(28) +#define RCR_APP_BA_SSN BIT(27) +#define RCR_ENMBID BIT(24) +#define RCR_LSIGEN BIT(23) +#define RCR_MFBEN BIT(22) +#define RCR_HTC_LOC_CTRL BIT(14) +#define RCR_AMF BIT(13) +#define RCR_ACF BIT(12) +#define RCR_ADF BIT(11) +#define RCR_AICV BIT(9) +#define RCR_ACRC32 BIT(8) +#define RCR_CBSSID_BCN BIT(7) +#define RCR_CBSSID_DATA BIT(6) +#define RCR_CBSSID RCR_CBSSID_DATA +#define RCR_APWRMGT BIT(5) +#define RCR_ADD3 BIT(4) +#define RCR_AB BIT(3) +#define RCR_AM BIT(2) +#define RCR_APM BIT(1) +#define RCR_AAP BIT(0) +#define RCR_MXDMA_OFFSET 8 +#define RCR_FIFO_OFFSET 13 + +#define RSV_CTRL 0x001C +#define RD_CTRL 0x0524 + +#define REG_USB_INFO 0xFE17 +#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_DMA_AGG_TO 0xFE5B +#define REG_USB_AGG_TO 0xFE5C +#define REG_USB_AGG_TH 0xFE5D + +#define REG_USB_VID 0xFE60 +#define REG_USB_PID 0xFE62 +#define REG_USB_OPTIONAL 0xFE64 +#define REG_USB_CHIRP_K 0xFE65 +#define REG_USB_PHY 0xFE66 +#define REG_USB_MAC_ADDR 0xFE70 +#define REG_USB_HRPWM 0xFE58 +#define REG_USB_HCPWM 0xFE57 + +#define SW18_FPWM BIT(3) + +#define ISO_MD2PP BIT(0) +#define ISO_UA2USB BIT(1) +#define ISO_UD2CORE BIT(2) +#define ISO_PA2PCIE BIT(3) +#define ISO_PD2CORE BIT(4) +#define ISO_IP2MAC BIT(5) +#define ISO_DIOP BIT(6) +#define ISO_DIOE BIT(7) +#define ISO_EB2CORE BIT(8) +#define ISO_DIOR BIT(9) + +#define PWC_EV25V BIT(14) +#define PWC_EV12V BIT(15) + +#define FEN_BBRSTB BIT(0) +#define FEN_BB_GLB_RSTN BIT(1) +#define FEN_USBA BIT(2) +#define FEN_UPLL BIT(3) +#define FEN_USBD BIT(4) +#define FEN_DIO_PCIE BIT(5) +#define FEN_PCIEA BIT(6) +#define FEN_PPLL BIT(7) +#define FEN_PCIED BIT(8) +#define FEN_DIOE BIT(9) +#define FEN_CPUEN BIT(10) +#define FEN_DCORE BIT(11) +#define FEN_ELDR BIT(12) +#define FEN_DIO_RF BIT(13) +#define FEN_HWPDN BIT(14) +#define FEN_MREGEN BIT(15) + +#define PFM_LDALL BIT(0) +#define PFM_ALDN BIT(1) +#define PFM_LDKP BIT(2) +#define PFM_WOWL BIT(3) +#define ENPDN BIT(4) +#define PDN_PL BIT(5) +#define APFM_ONMAC BIT(8) +#define APFM_OFF BIT(9) +#define APFM_RSM BIT(10) +#define AFSM_HSUS BIT(11) +#define AFSM_PCIE BIT(12) +#define APDM_MAC BIT(13) +#define APDM_HOST BIT(14) +#define APDM_HPDN BIT(15) +#define RDY_MACON BIT(16) +#define SUS_HOST BIT(17) +#define ROP_ALD BIT(20) +#define ROP_PWR BIT(21) +#define ROP_SPS BIT(22) +#define SOP_MRST BIT(25) +#define SOP_FUSE BIT(26) +#define SOP_ABG BIT(27) +#define SOP_AMB BIT(28) +#define SOP_RCK BIT(29) +#define SOP_A8M BIT(30) +#define XOP_BTCK BIT(31) + +#define ANAD16V_EN BIT(0) +#define ANA8M BIT(1) +#define MACSLP BIT(4) +#define LOADER_CLK_EN BIT(5) +#define _80M_SSC_DIS BIT(7) +#define _80M_SSC_EN_HO BIT(8) +#define PHY_SSC_RSTB BIT(9) +#define SEC_CLK_EN BIT(10) +#define MAC_CLK_EN BIT(11) +#define SYS_CLK_EN BIT(12) +#define RING_CLK_EN BIT(13) + +#define BOOT_FROM_EEPROM BIT(4) +#define EEPROM_EN BIT(5) + +#define AFE_BGEN BIT(0) +#define AFE_MBEN BIT(1) +#define MAC_ID_EN BIT(7) + +#define WLOCK_ALL BIT(0) +#define WLOCK_00 BIT(1) +#define WLOCK_04 BIT(2) +#define WLOCK_08 BIT(3) +#define WLOCK_40 BIT(4) +#define R_DIS_PRST_0 BIT(5) +#define R_DIS_PRST_1 BIT(6) +#define LOCK_ALL_EN BIT(7) + +#define RF_EN BIT(0) +#define RF_RSTB BIT(1) +#define RF_SDMRSTB BIT(2) + +#define LDA15_EN BIT(0) +#define LDA15_STBY BIT(1) +#define LDA15_OBUF BIT(2) +#define LDA15_REG_VOS BIT(3) +#define _LDA15_VOADJ(x) (((x) & 0x7) << 4) + +#define LDV12_EN BIT(0) +#define LDV12_SDBY BIT(1) +#define LPLDO_HSM BIT(2) +#define LPLDO_LSM_DIS BIT(3) +#define _LDV12_VADJ(x) (((x) & 0xF) << 4) + +#define XTAL_EN BIT(0) +#define XTAL_BSEL BIT(1) +#define _XTAL_BOSC(x) (((x) & 0x3) << 2) +#define _XTAL_CADJ(x) (((x) & 0xF) << 4) +#define XTAL_GATE_USB BIT(8) +#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9) +#define XTAL_GATE_AFE BIT(11) +#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12) +#define XTAL_RF_GATE BIT(14) +#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15) +#define XTAL_GATE_DIG BIT(17) +#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18) +#define XTAL_BT_GATE BIT(20) +#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21) +#define _XTAL_GPIO(x) (((x) & 0x7) << 23) + +#define CKDLY_AFE BIT(26) +#define CKDLY_USB BIT(27) +#define CKDLY_DIG BIT(28) +#define CKDLY_BT BIT(29) + +#define APLL_EN BIT(0) +#define APLL_320_EN BIT(1) +#define APLL_FREF_SEL BIT(2) +#define APLL_EDGE_SEL BIT(3) +#define APLL_WDOGB BIT(4) +#define APLL_LPFEN BIT(5) + +#define APLL_REF_CLK_13MHZ 0x1 +#define APLL_REF_CLK_19_2MHZ 0x2 +#define APLL_REF_CLK_20MHZ 0x3 +#define APLL_REF_CLK_25MHZ 0x4 +#define APLL_REF_CLK_26MHZ 0x5 +#define APLL_REF_CLK_38_4MHZ 0x6 +#define APLL_REF_CLK_40MHZ 0x7 + +#define APLL_320EN BIT(14) +#define APLL_80EN BIT(15) +#define APLL_1MEN BIT(24) + +#define ALD_EN BIT(18) +#define EF_PD BIT(19) +#define EF_FLAG BIT(31) + +#define EF_TRPT BIT(7) +#define LDOE25_EN BIT(31) + +#define RSM_EN BIT(0) +#define TIMER_EN BIT(4) + +#define TRSW0EN BIT(2) +#define TRSW1EN BIT(3) +#define EROM_EN BIT(4) +#define ENBT BIT(5) +#define ENUART BIT(8) +#define UART_910 BIT(9) +#define ENPMAC BIT(10) +#define SIC_SWRST BIT(11) +#define ENSIC BIT(12) +#define SIC_23 BIT(13) +#define ENHDP BIT(14) +#define SIC_LBK BIT(15) + +#define LED0PL BIT(4) +#define LED1PL BIT(12) +#define LED0DIS BIT(7) + +#define MCUFWDL_EN BIT(0) +#define MCUFWDL_RDY BIT(1) +#define FWDL_CHKSUM_RPT BIT(2) +#define MACINI_RDY BIT(3) +#define BBINI_RDY BIT(4) +#define RFINI_RDY BIT(5) +#define WINTINI_RDY BIT(6) +#define CPRST BIT(23) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) +#define IC_MACPHY_MODE BIT(11) +#define VENDOR_ID BIT(19) +#define PAD_HWPD_IDN BIT(22) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) + +#define CHIP_VER_RTL_MASK 0xF000 +#define CHIP_VER_RTL_SHIFT 12 + +#define REG_LBMODE (REG_CR + 3) + +#define HCI_TXDMA_EN BIT(0) +#define HCI_RXDMA_EN BIT(1) +#define TXDMA_EN BIT(2) +#define RXDMA_EN BIT(3) +#define PROTOCOL_EN BIT(4) +#define SCHEDULE_EN BIT(5) +#define MACTXEN BIT(6) +#define MACRXEN BIT(7) +#define ENSWBCN BIT(8) +#define ENSEC BIT(9) + +#define _NETTYPE(x) (((x) & 0x3) << 16) +#define MASK_NETTYPE 0x30000 +#define NT_NO_LINK 0x0 +#define NT_LINK_AD_HOC 0x1 +#define NT_LINK_AP 0x2 +#define NT_AS_AP 0x3 + +#define _LBMODE(x) (((x) & 0xF) << 24) +#define MASK_LBMODE 0xF000000 +#define LOOPBACK_NORMAL 0x0 +#define LOOPBACK_IMMEDIATELY 0xB +#define LOOPBACK_MAC_DELAY 0x3 +#define LOOPBACK_PHY 0x1 +#define LOOPBACK_DMA 0x7 + +#define GET_RX_PAGE_SIZE(value) ((value) & 0xF) +#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4) +#define _PSRX_MASK 0xF +#define _PSTX_MASK 0xF0 +#define _PSRX(x) (x) +#define _PSTX(x) ((x) << 4) + +#define PBP_64 0x0 +#define PBP_128 0x1 +#define PBP_256 0x2 +#define PBP_512 0x3 +#define PBP_1024 0x4 + +#define RXDMA_ARBBW_EN BIT(0) +#define RXSHFT_EN BIT(1) +#define RXDMA_AGG_EN BIT(2) +#define QS_VO_QUEUE BIT(8) +#define QS_VI_QUEUE BIT(9) +#define QS_BE_QUEUE BIT(10) +#define QS_BK_QUEUE BIT(11) +#define QS_MANAGER_QUEUE BIT(12) +#define QS_HIGH_QUEUE BIT(13) + +#define HQSEL_VOQ BIT(0) +#define HQSEL_VIQ BIT(1) +#define HQSEL_BEQ BIT(2) +#define HQSEL_BKQ BIT(3) +#define HQSEL_MGTQ BIT(4) +#define HQSEL_HIQ BIT(5) + +#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14) +#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12) +#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10) +#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8) +#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6) +#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4) + +#define QUEUE_LOW 1 +#define QUEUE_NORMAL 2 +#define QUEUE_HIGH 3 + +#define _LLT_NO_ACTIVE 0x0 +#define _LLT_WRITE_ACCESS 0x1 +#define _LLT_READ_ACCESS 0x2 + +#define _LLT_INIT_DATA(x) ((x) & 0xFF) +#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8) +#define _LLT_OP(x) (((x) & 0x3) << 30) +#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3) + +#define BB_WRITE_READ_MASK (BIT(31) | BIT(30)) +#define BB_WRITE_EN BIT(30) +#define BB_READ_EN BIT(31) + +#define _HPQ(x) ((x) & 0xFF) +#define _LPQ(x) (((x) & 0xFF) << 8) +#define _PUBQ(x) (((x) & 0xFF) << 16) +#define _NPQ(x) ((x) & 0xFF) + +#define HPQ_PUBLIC_DIS BIT(24) +#define LPQ_PUBLIC_DIS BIT(25) +#define LD_RQPN BIT(31) + +#define BCN_VALID BIT(16) +#define BCN_HEAD(x) (((x) & 0xFF) << 8) +#define BCN_HEAD_MASK 0xFF00 + +#define BLK_DESC_NUM_SHIFT 4 +#define BLK_DESC_NUM_MASK 0xF + +#define DROP_DATA_EN BIT(9) + +#define EN_AMPDU_RTY_NEW BIT(7) + +#define _INIRTSMCS_SEL(x) ((x) & 0x3F) + +#define _SPEC_SIFS_CCK(x) ((x) & 0xFF) +#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8) + +#define RATE_REG_BITMAP_ALL 0xFFFFF + +#define _RRSC_BITMAP(x) ((x) & 0xFFFFF) + +#define _RRSR_RSC(x) (((x) & 0x3) << 21) +#define RRSR_RSC_RESERVED 0x0 +#define RRSR_RSC_UPPER_SUBCHANNEL 0x1 +#define RRSR_RSC_LOWER_SUBCHANNEL 0x2 +#define RRSR_RSC_DUPLICATE_MODE 0x3 + +#define USE_SHORT_G1 BIT(20) + +#define _AGGLMT_MCS0(x) ((x) & 0xF) +#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4) +#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8) +#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12) +#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16) +#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20) +#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24) +#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28) + +#define RETRY_LIMIT_SHORT_SHIFT 8 +#define RETRY_LIMIT_LONG_SHIFT 0 + +#define _DARF_RC1(x) ((x) & 0x1F) +#define _DARF_RC2(x) (((x) & 0x1F) << 8) +#define _DARF_RC3(x) (((x) & 0x1F) << 16) +#define _DARF_RC4(x) (((x) & 0x1F) << 24) +#define _DARF_RC5(x) ((x) & 0x1F) +#define _DARF_RC6(x) (((x) & 0x1F) << 8) +#define _DARF_RC7(x) (((x) & 0x1F) << 16) +#define _DARF_RC8(x) (((x) & 0x1F) << 24) + +#define _RARF_RC1(x) ((x) & 0x1F) +#define _RARF_RC2(x) (((x) & 0x1F) << 8) +#define _RARF_RC3(x) (((x) & 0x1F) << 16) +#define _RARF_RC4(x) (((x) & 0x1F) << 24) +#define _RARF_RC5(x) ((x) & 0x1F) +#define _RARF_RC6(x) (((x) & 0x1F) << 8) +#define _RARF_RC7(x) (((x) & 0x1F) << 16) +#define _RARF_RC8(x) (((x) & 0x1F) << 24) + +#define AC_PARAM_TXOP_LIMIT_OFFSET 16 +#define AC_PARAM_ECW_MAX_OFFSET 12 +#define AC_PARAM_ECW_MIN_OFFSET 8 +#define AC_PARAM_AIFS_OFFSET 0 + +#define _AIFS(x) (x) +#define _ECW_MAX_MIN(x) ((x) << 8) +#define _TXOP_LIMIT(x) ((x) << 16) + +#define _BCNIFS(x) ((x) & 0xFF) +#define _BCNECW(x) ((((x) & 0xF)) << 8) + +#define _LRL(x) ((x) & 0x3F) +#define _SRL(x) (((x) & 0x3F) << 8) + +#define _SIFS_CCK_CTX(x) ((x) & 0xFF) +#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8) + +#define _SIFS_OFDM_CTX(x) ((x) & 0xFF) +#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8) + +#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8) + +#define DIS_EDCA_CNT_DWN BIT(11) + +#define EN_MBSSID BIT(1) +#define EN_TXBCN_RPT BIT(2) +#define EN_BCN_FUNCTION BIT(3) + +#define TSFTR_RST BIT(0) +#define TSFTR1_RST BIT(1) + +#define STOP_BCNQ BIT(6) + +#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) +#define DIS_TSF_UDT0_TEST_CHIP BIT(5) + +#define ACMHW_HWEN BIT(0) +#define ACMHW_BEQEN BIT(1) +#define ACMHW_VIQEN BIT(2) +#define ACMHW_VOQEN BIT(3) +#define ACMHW_BEQSTATUS BIT(4) +#define ACMHW_VIQSTATUS BIT(5) +#define ACMHW_VOQSTATUS BIT(6) + +#define APSDOFF BIT(6) +#define APSDOFF_STATUS BIT(7) + +#define BW_20MHZ BIT(2) + +#define RATE_BITMAP_ALL 0xFFFFF + +#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1 + +#define TSFRST BIT(0) +#define DIS_GCLK BIT(1) +#define PAD_SEL BIT(2) +#define PWR_ST BIT(6) +#define PWRBIT_OW_EN BIT(7) +#define ACRC BIT(8) +#define CFENDFORM BIT(9) +#define ICV BIT(10) + +#define AAP BIT(0) +#define APM BIT(1) +#define AM BIT(2) +#define AB BIT(3) +#define ADD3 BIT(4) +#define APWRMGT BIT(5) +#define CBSSID BIT(6) +#define CBSSID_DATA BIT(6) +#define CBSSID_BCN BIT(7) +#define ACRC32 BIT(8) +#define AICV BIT(9) +#define ADF BIT(11) +#define ACF BIT(12) +#define AMF BIT(13) +#define HTC_LOC_CTRL BIT(14) +#define UC_DATA_EN BIT(16) +#define BM_DATA_EN BIT(17) +#define MFBEN BIT(22) +#define LSIGEN BIT(23) +#define ENMBID BIT(24) +#define APP_BASSN BIT(27) +#define APP_PHYSTS BIT(28) +#define APP_ICV BIT(29) +#define APP_MIC BIT(30) +#define APP_FCS BIT(31) + +#define _MIN_SPACE(x) ((x) & 0x7) +#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) + +#define RXERR_TYPE_OFDM_PPDU 0 +#define RXERR_TYPE_OFDM_FALSE_ALARM 1 +#define RXERR_TYPE_OFDM_MPDU_OK 2 +#define RXERR_TYPE_OFDM_MPDU_FAIL 3 +#define RXERR_TYPE_CCK_PPDU 4 +#define RXERR_TYPE_CCK_FALSE_ALARM 5 +#define RXERR_TYPE_CCK_MPDU_OK 6 +#define RXERR_TYPE_CCK_MPDU_FAIL 7 +#define RXERR_TYPE_HT_PPDU 8 +#define RXERR_TYPE_HT_FALSE_ALARM 9 +#define RXERR_TYPE_HT_MPDU_TOTAL 10 +#define RXERR_TYPE_HT_MPDU_OK 11 +#define RXERR_TYPE_HT_MPDU_FAIL 12 +#define RXERR_TYPE_RX_FULL_DROP 15 + +#define RXERR_COUNTER_MASK 0xFFFFF +#define RXERR_RPT_RST BIT(27) +#define _RXERR_RPT_SEL(type) ((type) << 28) + +#define SCR_TXUSEDK BIT(0) +#define SCR_RXUSEDK BIT(1) +#define SCR_TXENCENABLE BIT(2) +#define SCR_RXDECENABLE BIT(3) +#define SCR_SKBYA2 BIT(4) +#define SCR_NOSKMC BIT(5) +#define SCR_TXBCUSEDK BIT(6) +#define SCR_RXBCUSEDK BIT(7) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) +#define IC_MACPHY_MODE BIT(11) +#define BT_FUNC BIT(16) +#define VENDOR_ID BIT(19) +#define PAD_HWPD_IDN BIT(22) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) + +#define USB_IS_HIGH_SPEED 0 +#define USB_IS_FULL_SPEED 1 +#define USB_SPEED_MASK BIT(5) + +#define USB_NORMAL_SIE_EP_MASK 0xF +#define USB_NORMAL_SIE_EP_SHIFT 4 + +#define USB_TEST_EP_MASK 0x30 +#define USB_TEST_EP_SHIFT 4 + +#define USB_AGG_EN BIT(3) + +#define MAC_ADDR_LEN 6 +#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/ + +#define POLLING_LLT_THRESHOLD 20 +#define POLLING_READY_TIMEOUT_COUNT 3000 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A + +#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) +#define EPROM_CMD_CONFIG 0x3 +#define EPROM_CMD_LOAD 1 + +#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE + +#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) + +#define RPMAC_RESET 0x100 +#define RPMAC_TXSTART 0x104 +#define RPMAC_TXLEGACYSIG 0x108 +#define RPMAC_TXHTSIG1 0x10c +#define RPMAC_TXHTSIG2 0x110 +#define RPMAC_PHYDEBUG 0x114 +#define RPMAC_TXPACKETNUM 0x118 +#define RPMAC_TXIDLE 0x11c +#define RPMAC_TXMACHEADER0 0x120 +#define RPMAC_TXMACHEADER1 0x124 +#define RPMAC_TXMACHEADER2 0x128 +#define RPMAC_TXMACHEADER3 0x12c +#define RPMAC_TXMACHEADER4 0x130 +#define RPMAC_TXMACHEADER5 0x134 +#define RPMAC_TXDADATYPE 0x138 +#define RPMAC_TXRANDOMSEED 0x13c +#define RPMAC_CCKPLCPPREAMBLE 0x140 +#define RPMAC_CCKPLCPHEADER 0x144 +#define RPMAC_CCKCRC16 0x148 +#define RPMAC_OFDMRXCRC32OK 0x170 +#define RPMAC_OFDMRXCRC32ER 0x174 +#define RPMAC_OFDMRXPARITYER 0x178 +#define RPMAC_OFDMRXCRC8ER 0x17c +#define RPMAC_CCKCRXRC16ER 0x180 +#define RPMAC_CCKCRXRC32ER 0x184 +#define RPMAC_CCKCRXRC32OK 0x188 +#define RPMAC_TXSTATUS 0x18c + +#define RFPGA0_RFMOD 0x800 + +#define RFPGA0_TXINFO 0x804 +#define RFPGA0_PSDFUNCTION 0x808 + +#define RFPGA0_TXGAINSTAGE 0x80c + +#define RFPGA0_RFTIMING1 0x810 +#define RFPGA0_RFTIMING2 0x814 + +#define RFPGA0_XA_HSSIPARAMETER1 0x820 +#define RFPGA0_XA_HSSIPARAMETER2 0x824 +#define RFPGA0_XB_HSSIPARAMETER1 0x828 +#define RFPGA0_XB_HSSIPARAMETER2 0x82c + +#define RFPGA0_XA_LSSIPARAMETER 0x840 +#define RFPGA0_XB_LSSIPARAMETER 0x844 + +#define RFPGA0_RFWAKEUPPARAMETER 0x850 +#define RFPGA0_RFSLEEPUPPARAMETER 0x854 + +#define RFPGA0_XAB_SWITCHCONTROL 0x858 +#define RFPGA0_XCD_SWITCHCONTROL 0x85c + +#define RFPGA0_XA_RFINTERFACEOE 0x860 +#define RFPGA0_XB_RFINTERFACEOE 0x864 + +#define RFPGA0_XAB_RFINTERFACESW 0x870 +#define RFPGA0_XCD_RFINTERFACESW 0x874 + +#define RFPGA0_XAB_RFPARAMETER 0x878 +#define RFPGA0_XCD_RFPARAMETER 0x87c + +#define RFPGA0_ANALOGPARAMETER1 0x880 +#define RFPGA0_ANALOGPARAMETER2 0x884 +#define RFPGA0_ANALOGPARAMETER3 0x888 +#define RFPGA0_ANALOGPARAMETER4 0x88c + +#define RFPGA0_XA_LSSIREADBACK 0x8a0 +#define RFPGA0_XB_LSSIREADBACK 0x8a4 +#define RFPGA0_XC_LSSIREADBACK 0x8a8 +#define RFPGA0_XD_LSSIREADBACK 0x8ac + +#define RFPGA0_PSDREPORT 0x8b4 +#define TRANSCEIVEA_HSPI_READBACK 0x8b8 +#define TRANSCEIVEB_HSPI_READBACK 0x8bc +#define REG_SC_CNT 0x8c4 +#define RFPGA0_XAB_RFINTERFACERB 0x8e0 +#define RFPGA0_XCD_RFINTERFACERB 0x8e4 + +#define RFPGA1_RFMOD 0x900 + +#define RFPGA1_TXBLOCK 0x904 +#define RFPGA1_DEBUGSELECT 0x908 +#define RFPGA1_TXINFO 0x90c + +#define RCCK0_SYSTEM 0xa00 + +#define RCCK0_AFESETTING 0xa04 +#define RCCK0_CCA 0xa08 + +#define RCCK0_RXAGC1 0xa0c +#define RCCK0_RXAGC2 0xa10 + +#define RCCK0_RXHP 0xa14 + +#define RCCK0_DSPPARAMETER1 0xa18 +#define RCCK0_DSPPARAMETER2 0xa1c + +#define RCCK0_TXFILTER1 0xa20 +#define RCCK0_TXFILTER2 0xa24 +#define RCCK0_DEBUGPORT 0xa28 +#define RCCK0_FALSEALARMREPORT 0xa2c +#define RCCK0_TRSSIREPORT 0xa50 +#define RCCK0_RXREPORT 0xa54 +#define RCCK0_FACOUNTERLOWER 0xa5c +#define RCCK0_FACOUNTERUPPER 0xa58 +#define RCCK0_CCA_CNT 0xa60 + + +/* PageB(0xB00) */ +#define RPDP_ANTA 0xb00 +#define RPDP_ANTA_4 0xb04 +#define RPDP_ANTA_8 0xb08 +#define RPDP_ANTA_C 0xb0c +#define RPDP_ANTA_10 0xb10 +#define RPDP_ANTA_14 0xb14 +#define RPDP_ANTA_18 0xb18 +#define RPDP_ANTA_1C 0xb1c +#define RPDP_ANTA_20 0xb20 +#define RPDP_ANTA_24 0xb24 + +#define RCONFIG_PMPD_ANTA 0xb28 +#define CONFIG_RAM64X16 0xb2c + +#define RBNDA 0xb30 +#define RHSSIPAR 0xb34 + +#define RCONFIG_ANTA 0xb68 +#define RCONFIG_ANTB 0xb6c + +#define RPDP_ANTB 0xb70 +#define RPDP_ANTB_4 0xb74 +#define RPDP_ANTB_8 0xb78 +#define RPDP_ANTB_C 0xb7c +#define RPDP_ANTB_10 0xb80 +#define RPDP_ANTB_14 0xb84 +#define RPDP_ANTB_18 0xb88 +#define RPDP_ANTB_1C 0xb8c +#define RPDP_ANTB_20 0xb90 +#define RPDP_ANTB_24 0xb94 + +#define RCONFIG_PMPD_ANTB 0xb98 + +#define RBNDB 0xba0 + +#define RAPK 0xbd8 +#define RPM_RX0_ANTA 0xbdc +#define RPM_RX1_ANTA 0xbe0 +#define RPM_RX2_ANTA 0xbe4 +#define RPM_RX3_ANTA 0xbe8 +#define RPM_RX0_ANTB 0xbec +#define RPM_RX1_ANTB 0xbf0 +#define RPM_RX2_ANTB 0xbf4 +#define RPM_RX3_ANTB 0xbf8 + +/*Page C*/ +#define ROFDM0_LSTF 0xc00 + +#define ROFDM0_TRXPATHENABLE 0xc04 +#define ROFDM0_TRMUXPAR 0xc08 +#define ROFDM0_TRSWISOLATION 0xc0c + +#define ROFDM0_XARXAFE 0xc10 +#define ROFDM0_XARXIQIMBALANCE 0xc14 +#define ROFDM0_XBRXAFE 0xc18 +#define ROFDM0_XBRXIQIMBALANCE 0xc1c +#define ROFDM0_XCRXAFE 0xc20 +#define ROFDM0_XCRXIQIMBANLANCE 0xc24 +#define ROFDM0_XDRXAFE 0xc28 +#define ROFDM0_XDRXIQIMBALANCE 0xc2c + +#define ROFDM0_RXDETECTOR1 0xc30 +#define ROFDM0_RXDETECTOR2 0xc34 +#define ROFDM0_RXDETECTOR3 0xc38 +#define ROFDM0_RXDETECTOR4 0xc3c + +#define ROFDM0_RXDSP 0xc40 +#define ROFDM0_CFOANDDAGC 0xc44 +#define ROFDM0_CCADROPTHRESHOLD 0xc48 +#define ROFDM0_ECCATHRESHOLD 0xc4c + +#define ROFDM0_XAAGCCORE1 0xc50 +#define ROFDM0_XAAGCCORE2 0xc54 +#define ROFDM0_XBAGCCORE1 0xc58 +#define ROFDM0_XBAGCCORE2 0xc5c +#define ROFDM0_XCAGCCORE1 0xc60 +#define ROFDM0_XCAGCCORE2 0xc64 +#define ROFDM0_XDAGCCORE1 0xc68 +#define ROFDM0_XDAGCCORE2 0xc6c + +#define ROFDM0_AGCPARAMETER1 0xc70 +#define ROFDM0_AGCPARAMETER2 0xc74 +#define ROFDM0_AGCRSSITABLE 0xc78 +#define ROFDM0_HTSTFAGC 0xc7c + +#define ROFDM0_XATXIQIMBALANCE 0xc80 +#define ROFDM0_XATXAFE 0xc84 +#define ROFDM0_XBTXIQIMBALANCE 0xc88 +#define ROFDM0_XBTXAFE 0xc8c +#define ROFDM0_XCTXIQIMBALANCE 0xc90 +#define ROFDM0_XCTXAFE 0xc94 +#define ROFDM0_XDTXIQIMBALANCE 0xc98 +#define ROFDM0_XDTXAFE 0xc9c + +#define ROFDM0_RXIQEXTANTA 0xca0 +#define ROFDM0_TXCOEFF1 0xca4 +#define ROFDM0_TXCOEFF2 0xca8 +#define ROFDM0_TXCOEFF3 0xcac +#define ROFDM0_TXCOEFF4 0xcb0 +#define ROFDM0_TXCOEFF5 0xcb4 +#define ROFDM0_TXCOEFF6 0xcb8 + +#define ROFDM0_RXHPPARAMETER 0xce0 +#define ROFDM0_TXPSEUDONOISEWGT 0xce4 +#define ROFDM0_FRAMESYNC 0xcf0 +#define ROFDM0_DFSREPORT 0xcf4 + + +#define ROFDM1_LSTF 0xd00 +#define ROFDM1_TRXPATHENABLE 0xd04 + +#define ROFDM1_CF0 0xd08 +#define ROFDM1_CSI1 0xd10 +#define ROFDM1_SBD 0xd14 +#define ROFDM1_CSI2 0xd18 +#define ROFDM1_CFOTRACKING 0xd2c +#define ROFDM1_TRXMESAURE1 0xd34 +#define ROFDM1_INTFDET 0xd3c +#define ROFDM1_PSEUDONOISESTATEAB 0xd50 +#define ROFDM1_PSEUDONOISESTATECD 0xd54 +#define ROFDM1_RXPSEUDONOISEWGT 0xd58 + +#define ROFDM_PHYCOUNTER1 0xda0 +#define ROFDM_PHYCOUNTER2 0xda4 +#define ROFDM_PHYCOUNTER3 0xda8 + +#define ROFDM_SHORTCFOAB 0xdac +#define ROFDM_SHORTCFOCD 0xdb0 +#define ROFDM_LONGCFOAB 0xdb4 +#define ROFDM_LONGCFOCD 0xdb8 +#define ROFDM_TAILCF0AB 0xdbc +#define ROFDM_TAILCF0CD 0xdc0 +#define ROFDM_PWMEASURE1 0xdc4 +#define ROFDM_PWMEASURE2 0xdc8 +#define ROFDM_BWREPORT 0xdcc +#define ROFDM_AGCREPORT 0xdd0 +#define ROFDM_RXSNR 0xdd4 +#define ROFDM_RXEVMCSI 0xdd8 +#define ROFDM_SIGREPORT 0xddc + +#define RTXAGC_A_RATE18_06 0xe00 +#define RTXAGC_A_RATE54_24 0xe04 +#define RTXAGC_A_CCK1_MCS32 0xe08 +#define RTXAGC_A_MCS03_MCS00 0xe10 +#define RTXAGC_A_MCS07_MCS04 0xe14 +#define RTXAGC_A_MCS11_MCS08 0xe18 +#define RTXAGC_A_MCS15_MCS12 0xe1c + +#define RTXAGC_B_RATE18_06 0x830 +#define RTXAGC_B_RATE54_24 0x834 +#define RTXAGC_B_CCK1_55_MCS32 0x838 +#define RTXAGC_B_MCS03_MCS00 0x83c +#define RTXAGC_B_MCS07_MCS04 0x848 +#define RTXAGC_B_MCS11_MCS08 0x84c +#define RTXAGC_B_MCS15_MCS12 0x868 +#define RTXAGC_B_CCK11_A_CCK2_11 0x86c + +#define RFPGA0_IQK 0xe28 +#define RTX_IQK_TONE_A 0xe30 +#define RRX_IQK_TONE_A 0xe34 +#define RTX_IQK_PI_A 0xe38 +#define RRX_IQK_PI_A 0xe3c + +#define RTX_IQK 0xe40 +#define RRX_IQK 0xe44 +#define RIQK_AGC_PTS 0xe48 +#define RIQK_AGC_RSP 0xe4c +#define RTX_IQK_TONE_B 0xe50 +#define RRX_IQK_TONE_B 0xe54 +#define RTX_IQK_PI_B 0xe58 +#define RRX_IQK_PI_B 0xe5c +#define RIQK_AGC_CONT 0xe60 + +#define RBLUE_TOOTH 0xe6c +#define RRX_WAIT_CCA 0xe70 +#define RTX_CCK_RFON 0xe74 +#define RTX_CCK_BBON 0xe78 +#define RTX_OFDM_RFON 0xe7c +#define RTX_OFDM_BBON 0xe80 +#define RTX_TO_RX 0xe84 +#define RTX_TO_TX 0xe88 +#define RRX_CCK 0xe8c + +#define RTX_POWER_BEFORE_IQK_A 0xe94 +#define RTX_POWER_AFTER_IQK_A 0xe9c + +#define RRX_POWER_BEFORE_IQK_A 0xea0 +#define RRX_POWER_BEFORE_IQK_A_2 0xea4 +#define RRX_POWER_AFTER_IQK_A 0xea8 +#define RRX_POWER_AFTER_IQK_A_2 0xeac + +#define RTX_POWER_BEFORE_IQK_B 0xeb4 +#define RTX_POWER_AFTER_IQK_B 0xebc + +#define RRX_POWER_BEFORE_IQK_B 0xec0 +#define RRX_POWER_BEFORE_IQK_B_2 0xec4 +#define RRX_POWER_AFTER_IQK_B 0xec8 +#define RRX_POWER_AFTER_IQK_B_2 0xecc + +#define RRX_OFDM 0xed0 +#define RRX_WAIT_RIFS 0xed4 +#define RRX_TO_RX 0xed8 +#define RSTANDBY 0xedc +#define RSLEEP 0xee0 +#define RPMPD_ANAEN 0xeec + +#define RZEBRA1_HSSIENABLE 0x0 +#define RZEBRA1_TRXENABLE1 0x1 +#define RZEBRA1_TRXENABLE2 0x2 +#define RZEBRA1_AGC 0x4 +#define RZEBRA1_CHARGEPUMP 0x5 +#define RZEBRA1_CHANNEL 0x7 + +#define RZEBRA1_TXGAIN 0x8 +#define RZEBRA1_TXLPF 0x9 +#define RZEBRA1_RXLPF 0xb +#define RZEBRA1_RXHPFCORNER 0xc + +#define RGLOBALCTRL 0 +#define RRTL8256_TXLPF 19 +#define RRTL8256_RXLPF 11 +#define RRTL8258_TXLPF 0x11 +#define RRTL8258_RXLPF 0x13 +#define RRTL8258_RSSILPF 0xa + +#define RF_AC 0x00 + +#define RF_IQADJ_G1 0x01 +#define RF_IQADJ_G2 0x02 +#define RF_POW_TRSW 0x05 + +#define RF_GAIN_RX 0x06 +#define RF_GAIN_TX 0x07 + +#define RF_TXM_IDAC 0x08 +#define RF_BS_IQGEN 0x0F + +#define RF_MODE1 0x10 +#define RF_MODE2 0x11 + +#define RF_RX_AGC_HP 0x12 +#define RF_TX_AGC 0x13 +#define RF_BIAS 0x14 +#define RF_IPA 0x15 +#define RF_POW_ABILITY 0x17 +#define RF_MODE_AG 0x18 +#define RRFCHANNEL 0x18 +#define RF_CHNLBW 0x18 +#define RF_TOP 0x19 + +#define RF_RX_G1 0x1A +#define RF_RX_G2 0x1B + +#define RF_RX_BB2 0x1C +#define RF_RX_BB1 0x1D + +#define RF_RCK1 0x1E +#define RF_RCK2 0x1F + +#define RF_TX_G1 0x20 +#define RF_TX_G2 0x21 +#define RF_TX_G3 0x22 + +#define RF_TX_BB1 0x23 +#define RF_T_METER 0x42 + +#define RF_SYN_G1 0x25 +#define RF_SYN_G2 0x26 +#define RF_SYN_G3 0x27 +#define RF_SYN_G4 0x28 +#define RF_SYN_G5 0x29 +#define RF_SYN_G6 0x2A +#define RF_SYN_G7 0x2B +#define RF_SYN_G8 0x2C + +#define RF_RCK_OS 0x30 +#define RF_TXPA_G1 0x31 +#define RF_TXPA_G2 0x32 +#define RF_TXPA_G3 0x33 + +#define RF_TX_BIAS_A 0x35 +#define RF_TX_BIAS_D 0x36 +#define RF_LOBF_9 0x38 +#define RF_RXRF_A3 0x3C +#define RF_TRSW 0x3F + +#define RF_TXRF_A2 0x41 +#define RF_TXPA_G4 0x46 +#define RF_TXPA_A4 0x4B + +#define RF_WE_LUT 0xEF + +#define BBBRESETB 0x100 +#define BGLOBALRESETB 0x200 +#define BOFDMTXSTART 0x4 +#define BCCKTXSTART 0x8 +#define BCRC32DEBUG 0x100 +#define BPMACLOOPBACK 0x10 +#define BTXLSIG 0xffffff +#define BOFDMTXRATE 0xf +#define BOFDMTXRESERVED 0x10 +#define BOFDMTXLENGTH 0x1ffe0 +#define BOFDMTXPARITY 0x20000 +#define BTXHTSIG1 0xffffff +#define BTXHTMCSRATE 0x7f +#define BTXHTBW 0x80 +#define BTXHTLENGTH 0xffff00 +#define BTXHTSIG2 0xffffff +#define BTXHTSMOOTHING 0x1 +#define BTXHTSOUNDING 0x2 +#define BTXHTRESERVED 0x4 +#define BTXHTAGGREATION 0x8 +#define BTXHTSTBC 0x30 +#define BTXHTADVANCECODING 0x40 +#define BTXHTSHORTGI 0x80 +#define BTXHTNUMBERHT_LTF 0x300 +#define BTXHTCRC8 0x3fc00 +#define BCOUNTERRESET 0x10000 +#define BNUMOFOFDMTX 0xffff +#define BNUMOFCCKTX 0xffff0000 +#define BTXIDLEINTERVAL 0xffff +#define BOFDMSERVICE 0xffff0000 +#define BTXMACHEADER 0xffffffff +#define BTXDATAINIT 0xff +#define BTXHTMODE 0x100 +#define BTXDATATYPE 0x30000 +#define BTXRANDOMSEED 0xffffffff +#define BCCKTXPREAMBLE 0x1 +#define BCCKTXSFD 0xffff0000 +#define BCCKTXSIG 0xff +#define BCCKTXSERVICE 0xff00 +#define BCCKLENGTHEXT 0x8000 +#define BCCKTXLENGHT 0xffff0000 +#define BCCKTXCRC16 0xffff +#define BCCKTXSTATUS 0x1 +#define BOFDMTXSTATUS 0x2 +#define IS_BB_REG_OFFSET_92S(_offset) \ + ((_offset >= 0x800) && (_offset <= 0xfff)) + +#define BRFMOD 0x1 +#define BJAPANMODE 0x2 +#define BCCKTXSC 0x30 +#define BCCKEN 0x1000000 +#define BOFDMEN 0x2000000 + +#define BOFDMRXADCPHASE 0x10000 +#define BOFDMTXDACPHASE 0x40000 +#define BXATXAGC 0x3f + +#define BXBTXAGC 0xf00 +#define BXCTXAGC 0xf000 +#define BXDTXAGC 0xf0000 + +#define BPASTART 0xf0000000 +#define BTRSTART 0x00f00000 +#define BRFSTART 0x0000f000 +#define BBBSTART 0x000000f0 +#define BBBCCKSTART 0x0000000f +#define BPAEND 0xf +#define BTREND 0x0f000000 +#define BRFEND 0x000f0000 +#define BCCAMASK 0x000000f0 +#define BR2RCCAMASK 0x00000f00 +#define BHSSI_R2TDELAY 0xf8000000 +#define BHSSI_T2RDELAY 0xf80000 +#define BCONTXHSSI 0x400 +#define BIGFROMCCK 0x200 +#define BAGCADDRESS 0x3f +#define BRXHPTX 0x7000 +#define BRXHP2RX 0x38000 +#define BRXHPCCKINI 0xc0000 +#define BAGCTXCODE 0xc00000 +#define BAGCRXCODE 0x300000 + +#define B3WIREDATALENGTH 0x800 +#define B3WIREADDREAALENGTH 0x400 + +#define B3WIRERFPOWERDOWN 0x1 +#define B5GPAPEPOLARITY 0x40000000 +#define B2GPAPEPOLARITY 0x80000000 +#define BRFSW_TXDEFAULTANT 0x3 +#define BRFSW_TXOPTIONANT 0x30 +#define BRFSW_RXDEFAULTANT 0x300 +#define BRFSW_RXOPTIONANT 0x3000 +#define BRFSI_3WIREDATA 0x1 +#define BRFSI_3WIRECLOCK 0x2 +#define BRFSI_3WIRELOAD 0x4 +#define BRFSI_3WIRERW 0x8 +#define BRFSI_3WIRE 0xf + +#define BRFSI_RFENV 0x10 + +#define BRFSI_TRSW 0x20 +#define BRFSI_TRSWB 0x40 +#define BRFSI_ANTSW 0x100 +#define BRFSI_ANTSWB 0x200 +#define BRFSI_PAPE 0x400 +#define BRFSI_PAPE5G 0x800 +#define BBANDSELECT 0x1 +#define BHTSIG2_GI 0x80 +#define BHTSIG2_SMOOTHING 0x01 +#define BHTSIG2_SOUNDING 0x02 +#define BHTSIG2_AGGREATON 0x08 +#define BHTSIG2_STBC 0x30 +#define BHTSIG2_ADVCODING 0x40 +#define BHTSIG2_NUMOFHTLTF 0x300 +#define BHTSIG2_CRC8 0x3fc +#define BHTSIG1_MCS 0x7f +#define BHTSIG1_BANDWIDTH 0x80 +#define BHTSIG1_HTLENGTH 0xffff +#define BLSIG_RATE 0xf +#define BLSIG_RESERVED 0x10 +#define BLSIG_LENGTH 0x1fffe +#define BLSIG_PARITY 0x20 +#define BCCKRXPHASE 0x4 + +#define BLSSIREADADDRESS 0x7f800000 +#define BLSSIREADEDGE 0x80000000 + +#define BLSSIREADBACKDATA 0xfffff + +#define BLSSIREADOKFLAG 0x1000 +#define BCCKSAMPLERATE 0x8 +#define BREGULATOR0STANDBY 0x1 +#define BREGULATORPLLSTANDBY 0x2 +#define BREGULATOR1STANDBY 0x4 +#define BPLLPOWERUP 0x8 +#define BDPLLPOWERUP 0x10 +#define BDA10POWERUP 0x20 +#define BAD7POWERUP 0x200 +#define BDA6POWERUP 0x2000 +#define BXTALPOWERUP 0x4000 +#define B40MDCLKPOWERUP 0x8000 +#define BDA6DEBUGMODE 0x20000 +#define BDA6SWING 0x380000 + +#define BADCLKPHASE 0x4000000 +#define B80MCLKDELAY 0x18000000 +#define BAFEWATCHDOGENABLE 0x20000000 + +#define BXTALCAP01 0xc0000000 +#define BXTALCAP23 0x3 +#define BXTALCAP92X 0x0f000000 +#define BXTALCAP 0x0f000000 + +#define BINTDIFCLKENABLE 0x400 +#define BEXTSIGCLKENABLE 0x800 +#define BBANDGAP_MBIAS_POWERUP 0x10000 +#define BAD11SH_GAIN 0xc0000 +#define BAD11NPUT_RANGE 0x700000 +#define BAD110P_CURRENT 0x3800000 +#define BLPATH_LOOPBACK 0x4000000 +#define BQPATH_LOOPBACK 0x8000000 +#define BAFE_LOOPBACK 0x10000000 +#define BDA10_SWING 0x7e0 +#define BDA10_REVERSE 0x800 +#define BDA_CLK_SOURCE 0x1000 +#define BDA7INPUT_RANGE 0x6000 +#define BDA7_GAIN 0x38000 +#define BDA7OUTPUT_CM_MODE 0x40000 +#define BDA7INPUT_CM_MODE 0x380000 +#define BDA7CURRENT 0xc00000 +#define BREGULATOR_ADJUST 0x7000000 +#define BAD11POWERUP_ATTX 0x1 +#define BDA10PS_ATTX 0x10 +#define BAD11POWERUP_ATRX 0x100 +#define BDA10PS_ATRX 0x1000 +#define BCCKRX_AGC_FORMAT 0x200 +#define BPSDFFT_SAMPLE_POINT 0xc000 +#define BPSD_AVERAGE_NUM 0x3000 +#define BIQPATH_CONTROL 0xc00 +#define BPSD_FREQ 0x3ff +#define BPSD_ANTENNA_PATH 0x30 +#define BPSD_IQ_SWITCH 0x40 +#define BPSD_RX_TRIGGER 0x400000 +#define BPSD_TX_TRIGGER 0x80000000 +#define BPSD_SINE_TONE_SCALE 0x7f000000 +#define BPSD_REPORT 0xffff + +#define BOFDM_TXSC 0x30000000 +#define BCCK_TXON 0x1 +#define BOFDM_TXON 0x2 +#define BDEBUG_PAGE 0xfff +#define BDEBUG_ITEM 0xff +#define BANTL 0x10 +#define BANT_NONHT 0x100 +#define BANT_HT1 0x1000 +#define BANT_HT2 0x10000 +#define BANT_HT1S1 0x100000 +#define BANT_NONHTS1 0x1000000 + +#define BCCK_BBMODE 0x3 +#define BCCK_TXPOWERSAVING 0x80 +#define BCCK_RXPOWERSAVING 0x40 + +#define BCCK_SIDEBAND 0x10 + +#define BCCK_SCRAMBLE 0x8 +#define BCCK_ANTDIVERSITY 0x8000 +#define BCCK_CARRIER_RECOVERY 0x4000 +#define BCCK_TXRATE 0x3000 +#define BCCK_DCCANCEL 0x0800 +#define BCCK_ISICANCEL 0x0400 +#define BCCK_MATCH_FILTER 0x0200 +#define BCCK_EQUALIZER 0x0100 +#define BCCK_PREAMBLE_DETECT 0x800000 +#define BCCK_FAST_FALSECCA 0x400000 +#define BCCK_CH_ESTSTART 0x300000 +#define BCCK_CCA_COUNT 0x080000 +#define BCCK_CS_LIM 0x070000 +#define BCCK_BIST_MODE 0x80000000 +#define BCCK_CCAMASK 0x40000000 +#define BCCK_TX_DAC_PHASE 0x4 +#define BCCK_RX_ADC_PHASE 0x20000000 +#define BCCKR_CP_MODE 0x0100 +#define BCCK_TXDC_OFFSET 0xf0 +#define BCCK_RXDC_OFFSET 0xf +#define BCCK_CCA_MODE 0xc000 +#define BCCK_FALSECS_LIM 0x3f00 +#define BCCK_CS_RATIO 0xc00000 +#define BCCK_CORGBIT_SEL 0x300000 +#define BCCK_PD_LIM 0x0f0000 +#define BCCK_NEWCCA 0x80000000 +#define BCCK_RXHP_OF_IG 0x8000 +#define BCCK_RXIG 0x7f00 +#define BCCK_LNA_POLARITY 0x800000 +#define BCCK_RX1ST_BAIN 0x7f0000 +#define BCCK_RF_EXTEND 0x20000000 +#define BCCK_RXAGC_SATLEVEL 0x1f000000 +#define BCCK_RXAGC_SATCOUNT 0xe0 +#define BCCKRXRFSETTLE 0x1f +#define BCCK_FIXED_RXAGC 0x8000 +#define BCCK_ANTENNA_POLARITY 0x2000 +#define BCCK_TXFILTER_TYPE 0x0c00 +#define BCCK_RXAGC_REPORTTYPE 0x0300 +#define BCCK_RXDAGC_EN 0x80000000 +#define BCCK_RXDAGC_PERIOD 0x20000000 +#define BCCK_RXDAGC_SATLEVEL 0x1f000000 +#define BCCK_TIMING_RECOVERY 0x800000 +#define BCCK_TXC0 0x3f0000 +#define BCCK_TXC1 0x3f000000 +#define BCCK_TXC2 0x3f +#define BCCK_TXC3 0x3f00 +#define BCCK_TXC4 0x3f0000 +#define BCCK_TXC5 0x3f000000 +#define BCCK_TXC6 0x3f +#define BCCK_TXC7 0x3f00 +#define BCCK_DEBUGPORT 0xff0000 +#define BCCK_DAC_DEBUG 0x0f000000 +#define BCCK_FALSEALARM_ENABLE 0x8000 +#define BCCK_FALSEALARM_READ 0x4000 +#define BCCK_TRSSI 0x7f +#define BCCK_RXAGC_REPORT 0xfe +#define BCCK_RXREPORT_ANTSEL 0x80000000 +#define BCCK_RXREPORT_MFOFF 0x40000000 +#define BCCK_RXREPORT_SQLOSS 0x20000000 +#define BCCK_RXREPORT_PKTLOSS 0x10000000 +#define BCCK_RXREPORT_LOCKEDBIT 0x08000000 +#define BCCK_RXREPORT_RATEERROR 0x04000000 +#define BCCK_RXREPORT_RXRATE 0x03000000 +#define BCCK_RXFA_COUNTER_LOWER 0xff +#define BCCK_RXFA_COUNTER_UPPER 0xff000000 +#define BCCK_RXHPAGC_START 0xe000 +#define BCCK_RXHPAGC_FINAL 0x1c00 +#define BCCK_RXFALSEALARM_ENABLE 0x8000 +#define BCCK_FACOUNTER_FREEZE 0x4000 +#define BCCK_TXPATH_SEL 0x10000000 +#define BCCK_DEFAULT_RXPATH 0xc000000 +#define BCCK_OPTION_RXPATH 0x3000000 + +#define BNUM_OFSTF 0x3 +#define BSHIFT_L 0xc0 +#define BGI_TH 0xc +#define BRXPATH_A 0x1 +#define BRXPATH_B 0x2 +#define BRXPATH_C 0x4 +#define BRXPATH_D 0x8 +#define BTXPATH_A 0x1 +#define BTXPATH_B 0x2 +#define BTXPATH_C 0x4 +#define BTXPATH_D 0x8 +#define BTRSSI_FREQ 0x200 +#define BADC_BACKOFF 0x3000 +#define BDFIR_BACKOFF 0xc000 +#define BTRSSI_LATCH_PHASE 0x10000 +#define BRX_LDC_OFFSET 0xff +#define BRX_QDC_OFFSET 0xff00 +#define BRX_DFIR_MODE 0x1800000 +#define BRX_DCNF_TYPE 0xe000000 +#define BRXIQIMB_A 0x3ff +#define BRXIQIMB_B 0xfc00 +#define BRXIQIMB_C 0x3f0000 +#define BRXIQIMB_D 0xffc00000 +#define BDC_DC_NOTCH 0x60000 +#define BRXNB_NOTCH 0x1f000000 +#define BPD_TH 0xf +#define BPD_TH_OPT2 0xc000 +#define BPWED_TH 0x700 +#define BIFMF_WIN_L 0x800 +#define BPD_OPTION 0x1000 +#define BMF_WIN_L 0xe000 +#define BBW_SEARCH_L 0x30000 +#define BWIN_ENH_L 0xc0000 +#define BBW_TH 0x700000 +#define BED_TH2 0x3800000 +#define BBW_OPTION 0x4000000 +#define BRADIO_TH 0x18000000 +#define BWINDOW_L 0xe0000000 +#define BSBD_OPTION 0x1 +#define BFRAME_TH 0x1c +#define BFS_OPTION 0x60 +#define BDC_SLOPE_CHECK 0x80 +#define BFGUARD_COUNTER_DC_L 0xe00 +#define BFRAME_WEIGHT_SHORT 0x7000 +#define BSUB_TUNE 0xe00000 +#define BFRAME_DC_LENGTH 0xe000000 +#define BSBD_START_OFFSET 0x30000000 +#define BFRAME_TH_2 0x7 +#define BFRAME_GI2_TH 0x38 +#define BGI2_SYNC_EN 0x40 +#define BSARCH_SHORT_EARLY 0x300 +#define BSARCH_SHORT_LATE 0xc00 +#define BSARCH_GI2_LATE 0x70000 +#define BCFOANTSUM 0x1 +#define BCFOACC 0x2 +#define BCFOSTARTOFFSET 0xc +#define BCFOLOOPBACK 0x70 +#define BCFOSUMWEIGHT 0x80 +#define BDAGCENABLE 0x10000 +#define BTXIQIMB_A 0x3ff +#define BTXIQIMB_b 0xfc00 +#define BTXIQIMB_C 0x3f0000 +#define BTXIQIMB_D 0xffc00000 +#define BTXIDCOFFSET 0xff +#define BTXIQDCOFFSET 0xff00 +#define BTXDFIRMODE 0x10000 +#define BTXPESUDO_NOISEON 0x4000000 +#define BTXPESUDO_NOISE_A 0xff +#define BTXPESUDO_NOISE_B 0xff00 +#define BTXPESUDO_NOISE_C 0xff0000 +#define BTXPESUDO_NOISE_D 0xff000000 +#define BCCA_DROPOPTION 0x20000 +#define BCCA_DROPTHRES 0xfff00000 +#define BEDCCA_H 0xf +#define BEDCCA_L 0xf0 +#define BLAMBDA_ED 0x300 +#define BRX_INITIALGAIN 0x7f +#define BRX_ANTDIV_EN 0x80 +#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 +#define BRX_HIGHPOWER_FLOW 0x8000 +#define BRX_AGC_FREEZE_THRES 0xc0000 +#define BRX_FREEZESTEP_AGC1 0x300000 +#define BRX_FREEZESTEP_AGC2 0xc00000 +#define BRX_FREEZESTEP_AGC3 0x3000000 +#define BRX_FREEZESTEP_AGC0 0xc000000 +#define BRXRSSI_CMP_EN 0x10000000 +#define BRXQUICK_AGCEN 0x20000000 +#define BRXAGC_FREEZE_THRES_MODE 0x40000000 +#define BRX_OVERFLOW_CHECKTYPE 0x80000000 +#define BRX_AGCSHIFT 0x7f +#define BTRSW_TRI_ONLY 0x80 +#define BPOWER_THRES 0x300 +#define BRXAGC_EN 0x1 +#define BRXAGC_TOGETHER_EN 0x2 +#define BRXAGC_MIN 0x4 +#define BRXHP_INI 0x7 +#define BRXHP_TRLNA 0x70 +#define BRXHP_RSSI 0x700 +#define BRXHP_BBP1 0x7000 +#define BRXHP_BBP2 0x70000 +#define BRXHP_BBP3 0x700000 +#define BRSSI_H 0x7f0000 +#define BRSSI_GEN 0x7f000000 +#define BRXSETTLE_TRSW 0x7 +#define BRXSETTLE_LNA 0x38 +#define BRXSETTLE_RSSI 0x1c0 +#define BRXSETTLE_BBP 0xe00 +#define BRXSETTLE_RXHP 0x7000 +#define BRXSETTLE_ANTSW_RSSI 0x38000 +#define BRXSETTLE_ANTSW 0xc0000 +#define BRXPROCESS_TIME_DAGC 0x300000 +#define BRXSETTLE_HSSI 0x400000 +#define BRXPROCESS_TIME_BBPPW 0x800000 +#define BRXANTENNA_POWER_SHIFT 0x3000000 +#define BRSSI_TABLE_SELECT 0xc000000 +#define BRXHP_FINAL 0x7000000 +#define BRXHPSETTLE_BBP 0x7 +#define BRXHTSETTLE_HSSI 0x8 +#define BRXHTSETTLE_RXHP 0x70 +#define BRXHTSETTLE_BBPPW 0x80 +#define BRXHTSETTLE_IDLE 0x300 +#define BRXHTSETTLE_RESERVED 0x1c00 +#define BRXHT_RXHP_EN 0x8000 +#define BRXAGC_FREEZE_THRES 0x30000 +#define BRXAGC_TOGETHEREN 0x40000 +#define BRXHTAGC_MIN 0x80000 +#define BRXHTAGC_EN 0x100000 +#define BRXHTDAGC_EN 0x200000 +#define BRXHT_RXHP_BBP 0x1c00000 +#define BRXHT_RXHP_FINAL 0xe0000000 +#define BRXPW_RADIO_TH 0x3 +#define BRXPW_RADIO_EN 0x4 +#define BRXMF_HOLD 0x3800 +#define BRXPD_DELAY_TH1 0x38 +#define BRXPD_DELAY_TH2 0x1c0 +#define BRXPD_DC_COUNT_MAX 0x600 +#define BRXPD_DELAY_TH 0x8000 +#define BRXPROCESS_DELAY 0xf0000 +#define BRXSEARCHRANGE_GI2_EARLY 0x700000 +#define BRXFRAME_FUARD_COUNTER_L 0x3800000 +#define BRXSGI_GUARD_L 0xc000000 +#define BRXSGI_SEARCH_L 0x30000000 +#define BRXSGI_TH 0xc0000000 +#define BDFSCNT0 0xff +#define BDFSCNT1 0xff00 +#define BDFSFLAG 0xf0000 +#define BMF_WEIGHT_SUM 0x300000 +#define BMINIDX_TH 0x7f000000 +#define BDAFORMAT 0x40000 +#define BTXCH_EMU_ENABLE 0x01000000 +#define BTRSW_ISOLATION_A 0x7f +#define BTRSW_ISOLATION_B 0x7f00 +#define BTRSW_ISOLATION_C 0x7f0000 +#define BTRSW_ISOLATION_D 0x7f000000 +#define BEXT_LNA_GAIN 0x7c00 + +#define BSTBC_EN 0x4 +#define BANTENNA_MAPPING 0x10 +#define BNSS 0x20 +#define BCFO_ANTSUM_ID 0x200 +#define BPHY_COUNTER_RESET 0x8000000 +#define BCFO_REPORT_GET 0x4000000 +#define BOFDM_CONTINUE_TX 0x10000000 +#define BOFDM_SINGLE_CARRIER 0x20000000 +#define BOFDM_SINGLE_TONE 0x40000000 +#define BHT_DETECT 0x100 +#define BCFOEN 0x10000 +#define BCFOVALUE 0xfff00000 +#define BSIGTONE_RE 0x3f +#define BSIGTONE_IM 0x7f00 +#define BCOUNTER_CCA 0xffff +#define BCOUNTER_PARITYFAIL 0xffff0000 +#define BCOUNTER_RATEILLEGAL 0xffff +#define BCOUNTER_CRC8FAIL 0xffff0000 +#define BCOUNTER_MCSNOSUPPORT 0xffff +#define BCOUNTER_FASTSYNC 0xffff +#define BSHORTCFO 0xfff +#define BSHORTCFOT_LENGTH 12 +#define BSHORTCFOF_LENGTH 11 +#define BLONGCFO 0x7ff +#define BLONGCFOT_LENGTH 11 +#define BLONGCFOF_LENGTH 11 +#define BTAILCFO 0x1fff +#define BTAILCFOT_LENGTH 13 +#define BTAILCFOF_LENGTH 12 +#define BNOISE_EN_PWDB 0xffff +#define BCC_POWER_DB 0xffff0000 +#define BMOISE_PWDB 0xffff +#define BPOWERMEAST_LENGTH 10 +#define BPOWERMEASF_LENGTH 3 +#define BRX_HT_BW 0x1 +#define BRXSC 0x6 +#define BRX_HT 0x8 +#define BNB_INTF_DET_ON 0x1 +#define BINTF_WIN_LEN_CFG 0x30 +#define BNB_INTF_TH_CFG 0x1c0 +#define BRFGAIN 0x3f +#define BTABLESEL 0x40 +#define BTRSW 0x80 +#define BRXSNR_A 0xff +#define BRXSNR_B 0xff00 +#define BRXSNR_C 0xff0000 +#define BRXSNR_D 0xff000000 +#define BSNR_EVMT_LENGTH 8 +#define BSNR_EVMF_LENGTH 1 +#define BCSI1ST 0xff +#define BCSI2ND 0xff00 +#define BRXEVM1ST 0xff0000 +#define BRXEVM2ND 0xff000000 +#define BSIGEVM 0xff +#define BPWDB 0xff00 +#define BSGIEN 0x10000 + +#define BSFACTOR_QMA1 0xf +#define BSFACTOR_QMA2 0xf0 +#define BSFACTOR_QMA3 0xf00 +#define BSFACTOR_QMA4 0xf000 +#define BSFACTOR_QMA5 0xf0000 +#define BSFACTOR_QMA6 0xf0000 +#define BSFACTOR_QMA7 0xf00000 +#define BSFACTOR_QMA8 0xf000000 +#define BSFACTOR_QMA9 0xf0000000 +#define BCSI_SCHEME 0x100000 + +#define BNOISE_LVL_TOP_SET 0x3 +#define BCHSMOOTH 0x4 +#define BCHSMOOTH_CFG1 0x38 +#define BCHSMOOTH_CFG2 0x1c0 +#define BCHSMOOTH_CFG3 0xe00 +#define BCHSMOOTH_CFG4 0x7000 +#define BMRCMODE 0x800000 +#define BTHEVMCFG 0x7000000 + +#define BLOOP_FIT_TYPE 0x1 +#define BUPD_CFO 0x40 +#define BUPD_CFO_OFFDATA 0x80 +#define BADV_UPD_CFO 0x100 +#define BADV_TIME_CTRL 0x800 +#define BUPD_CLKO 0x1000 +#define BFC 0x6000 +#define BTRACKING_MODE 0x8000 +#define BPHCMP_ENABLE 0x10000 +#define BUPD_CLKO_LTF 0x20000 +#define BCOM_CH_CFO 0x40000 +#define BCSI_ESTI_MODE 0x80000 +#define BADV_UPD_EQZ 0x100000 +#define BUCHCFG 0x7000000 +#define BUPDEQZ 0x8000000 + +#define BRX_PESUDO_NOISE_ON 0x20000000 +#define BRX_PESUDO_NOISE_A 0xff +#define BRX_PESUDO_NOISE_B 0xff00 +#define BRX_PESUDO_NOISE_C 0xff0000 +#define BRX_PESUDO_NOISE_D 0xff000000 +#define BRX_PESUDO_NOISESTATE_A 0xffff +#define BRX_PESUDO_NOISESTATE_B 0xffff0000 +#define BRX_PESUDO_NOISESTATE_C 0xffff +#define BRX_PESUDO_NOISESTATE_D 0xffff0000 + +#define BZEBRA1_HSSIENABLE 0x8 +#define BZEBRA1_TRXCONTROL 0xc00 +#define BZEBRA1_TRXGAINSETTING 0x07f +#define BZEBRA1_RXCOUNTER 0xc00 +#define BZEBRA1_TXCHANGEPUMP 0x38 +#define BZEBRA1_RXCHANGEPUMP 0x7 +#define BZEBRA1_CHANNEL_NUM 0xf80 +#define BZEBRA1_TXLPFBW 0x400 +#define BZEBRA1_RXLPFBW 0x600 + +#define BRTL8256REG_MODE_CTRL1 0x100 +#define BRTL8256REG_MODE_CTRL0 0x40 +#define BRTL8256REG_TXLPFBW 0x18 +#define BRTL8256REG_RXLPFBW 0x600 + +#define BRTL8258_TXLPFBW 0xc +#define BRTL8258_RXLPFBW 0xc00 +#define BRTL8258_RSSILPFBW 0xc0 + +#define BBYTE0 0x1 +#define BBYTE1 0x2 +#define BBYTE2 0x4 +#define BBYTE3 0x8 +#define BWORD0 0x3 +#define BWORD1 0xc +#define BWORD 0xf + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#define MASK4BITS 0x0f +#define MASK20BITS 0xfffff +#define RFREG_OFFSET_MASK 0xfffff + +#define BENABLE 0x1 +#define BDISABLE 0x0 + +#define LEFT_ANTENNA 0x0 +#define RIGHT_ANTENNA 0x1 + +#define TCHECK_TXSTATUS 500 +#define TUPDATE_RXCOUNTER 100 + +#define REG_UN_used_register 0x01bf + +/* WOL bit information */ +#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0) +#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1) +#define HAL92C_WOL_DISASSOC_EVENT BIT(2) +#define HAL92C_WOL_DEAUTH_EVENT BIT(3) +#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4) + +#define WOL_REASON_PTK_UPDATE BIT(0) +#define WOL_REASON_GTK_UPDATE BIT(1) +#define WOL_REASON_DISASSOC BIT(2) +#define WOL_REASON_DEAUTH BIT(3) +#define WOL_REASON_FW_DISCONNECT BIT(4) + +/* 2 EFUSE_TEST (For RTL8723 partially) */ +#define EFUSE_SEL(x) (((x) & 0x3) << 8) +#define EFUSE_SEL_MASK 0x300 +#define EFUSE_WIFI_SEL_0 0x0 + +#define WL_HWPDN_EN BIT(0) /* Enable GPIO[9] as WiFi HW PDn source*/ +#define WL_HWPDN_SL BIT(1) /* WiFi HW PDn polarity control*/ + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/rf.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/rf.c @@ -0,0 +1,504 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" + +static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw); + +void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + switch (bandwidth) { + case HT_CHANNEL_WIDTH_20: + rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & + 0xfffff3ff) | BIT(10) | BIT(11)); + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + break; + case HT_CHANNEL_WIDTH_20_40: + rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & + 0xfffff3ff) | BIT(10)); + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, + "unknown bandwidth: %#X\n", bandwidth); + break; + } +} + +void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 tx_agc[2] = {0, 0}, tmpval; + bool turbo_scanoff = false; + u8 idx1, idx2; + u8 *ptr; + u8 direction; + u32 pwrtrac_value; + + if (rtlefuse->eeprom_regulatory != 0) + turbo_scanoff = true; + + if (mac->act_scanning) { + tx_agc[RF90_PATH_A] = 0x3f3f3f3f; + tx_agc[RF90_PATH_B] = 0x3f3f3f3f; + + if (turbo_scanoff) { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + } + } else { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + if (rtlefuse->eeprom_regulatory == 0) { + tmpval = + (rtlphy->mcs_offset[0][6]) + + (rtlphy->mcs_offset[0][7] << 8); + tx_agc[RF90_PATH_A] += tmpval; + + tmpval = (rtlphy->mcs_offset[0][14]) + + (rtlphy->mcs_offset[0][15] << + 24); + tx_agc[RF90_PATH_B] += tmpval; + } + } + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + ptr = (u8 *)(&(tx_agc[idx1])); + for (idx2 = 0; idx2 < 4; idx2++) { + if (*ptr > RF6052_MAX_TX_PWR) + *ptr = RF6052_MAX_TX_PWR; + ptr++; + } + } + rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value); + if (direction == 1) { + tx_agc[0] += pwrtrac_value; + tx_agc[1] += pwrtrac_value; + } else if (direction == 2) { + tx_agc[0] -= pwrtrac_value; + tx_agc[1] -= pwrtrac_value; + } + tmpval = tx_agc[RF90_PATH_A] & 0xff; + rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_A_CCK1_MCS32); + + tmpval = tx_agc[RF90_PATH_A] >> 8; + + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_B_CCK11_A_CCK2_11); + + tmpval = tx_agc[RF90_PATH_B] >> 24; + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_B_CCK11_A_CCK2_11); + + tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; + rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_B_CCK1_55_MCS32); +} + +static void rtl8723be_phy_get_power_base(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, + u8 channel, u32 *ofdmbase, + u32 *mcsbase) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 powerbase0, powerbase1; + u8 i, powerlevel[2]; + + for (i = 0; i < 2; i++) { + powerbase0 = ppowerlevel_ofdm[i]; + + powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | + (powerbase0 << 8) | powerbase0; + *(ofdmbase + i) = powerbase0; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + " [OFDM power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)); + } + + for (i = 0; i < 2; i++) { + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) + powerlevel[i] = ppowerlevel_bw20[i]; + else + powerlevel[i] = ppowerlevel_bw40[i]; + powerbase1 = powerlevel[i]; + powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) | + (powerbase1 << 8) | powerbase1; + + *(mcsbase + i) = powerbase1; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + " [MCS power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(mcsbase + i)); + } +} + +static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, + u32 *powerbase0, u32 *powerbase1, + u32 *p_outwriteval) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 i, chnlgroup = 0, pwr_diff_limit[4]; + u8 pwr_diff = 0, customer_pwr_diff; + u32 writeval, customer_limit, rf; + + for (rf = 0; rf < 2; rf++) { + switch (rtlefuse->eeprom_regulatory) { + case 0: + chnlgroup = 0; + + writeval = + rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)] + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "RTK better performance, " + "writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + break; + case 1: + if (rtlphy->pwrgroup_cnt == 1) { + chnlgroup = 0; + } else { + if (channel < 3) + chnlgroup = 0; + else if (channel < 6) + chnlgroup = 1; + else if (channel < 9) + chnlgroup = 2; + else if (channel < 12) + chnlgroup = 3; + else if (channel < 14) + chnlgroup = 4; + else if (channel == 14) + chnlgroup = 5; + } + writeval = rtlphy->mcs_offset[chnlgroup] + [index + (rf ? 8 : 0)] + ((index < 2) ? + powerbase0[rf] : + powerbase1[rf]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Realtek regulatory, 20MHz, " + "writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + + break; + case 2: + writeval = + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Better regulatory, " + "writeval(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + break; + case 3: + chnlgroup = 0; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "customer's limit, 40MHz " + "rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht40[rf] + [channel-1]); + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "customer's limit, 20MHz " + "rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht20[rf] + [channel-1]); + } + + if (index < 2) + pwr_diff = + rtlefuse->txpwr_legacyhtdiff[rf][channel-1]; + else if (rtlphy->current_chan_bw == + HT_CHANNEL_WIDTH_20) + pwr_diff = + rtlefuse->txpwr_ht20diff[rf][channel-1]; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) + customer_pwr_diff = + rtlefuse->pwrgroup_ht40[rf][channel-1]; + else + customer_pwr_diff = + rtlefuse->pwrgroup_ht20[rf][channel-1]; + + if (pwr_diff > customer_pwr_diff) + pwr_diff = 0; + else + pwr_diff = customer_pwr_diff - pwr_diff; + + for (i = 0; i < 4; i++) { + pwr_diff_limit[i] = + (u8)((rtlphy->mcs_offset + [chnlgroup][index + (rf ? 8 : 0)] & + (0x7f << (i * 8))) >> (i * 8)); + + if (pwr_diff_limit[i] > pwr_diff) + pwr_diff_limit[i] = pwr_diff; + } + + customer_limit = (pwr_diff_limit[3] << 24) | + (pwr_diff_limit[2] << 16) | + (pwr_diff_limit[1] << 8) | + (pwr_diff_limit[0]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Customer's limit rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), customer_limit); + + writeval = customer_limit + ((index < 2) ? + powerbase0[rf] : + powerbase1[rf]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Customer, writeval rf(%c)= 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + break; + default: + chnlgroup = 0; + writeval = + rtlphy->mcs_offset[chnlgroup] + [index + (rf ? 8 : 0)] + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "RTK better performance, writeval " + "rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeval); + break; + } + + if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) + writeval = writeval - 0x06060606; + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_BT2) + writeval = writeval - 0x0c0c0c0c; + *(p_outwriteval + rf) = writeval; + } +} + +static void _rtl8723be_write_ofdm_power_reg(struct ieee80211_hw *hw, + u8 index, u32 *value) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 regoffset_a[6] = { + RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, + RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, + RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 + }; + u16 regoffset_b[6] = { + RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, + RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, + RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 + }; + u8 i, rf, pwr_val[4]; + u32 writeval; + u16 regoffset; + + for (rf = 0; rf < 2; rf++) { + writeval = value[rf]; + for (i = 0; i < 4; i++) { + pwr_val[i] = (u8) ((writeval & (0x7f << + (i * 8))) >> (i * 8)); + + if (pwr_val[i] > RF6052_MAX_TX_PWR) + pwr_val[i] = RF6052_MAX_TX_PWR; + } + writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + (pwr_val[1] << 8) | pwr_val[0]; + + if (rf == 0) + regoffset = regoffset_a[index]; + else + regoffset = regoffset_b[index]; + rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "Set 0x%x = %08x\n", regoffset, writeval); + } +} + +void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, u8 channel) +{ + u32 writeval[2], powerbase0[2], powerbase1[2]; + u8 index; + u8 direction; + u32 pwrtrac_value; + + rtl8723be_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20, + ppowerlevel_bw40, channel, + &powerbase0[0], &powerbase1[0]); + + rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value); + + for (index = 0; index < 6; index++) { + txpwr_by_regulatory(hw, channel, index, &powerbase0[0], + &powerbase1[0], &writeval[0]); + if (direction == 1) { + writeval[0] += pwrtrac_value; + writeval[1] += pwrtrac_value; + } else if (direction == 2) { + writeval[0] -= pwrtrac_value; + writeval[1] -= pwrtrac_value; + } + _rtl8723be_write_ofdm_power_reg(hw, index, &writeval[0]); + } +} + +bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (rtlphy->rf_type == RF_1T1R) + rtlphy->num_total_rfpath = 1; + else + rtlphy->num_total_rfpath = 2; + + return _rtl8723be_phy_rf6052_config_parafile(hw); +} + +static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct bb_reg_def *pphyreg; + u32 u4_regvalue = 0; + u8 rfpath; + bool rtstatus = true; + + for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { + pphyreg = &rtlphy->phyreg_def[rfpath]; + + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV); + break; + case RF90_PATH_B: + case RF90_PATH_D: + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV << 16); + break; + } + + rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); + udelay(1); + + rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); + udelay(1); + + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, + B3WIREADDREAALENGTH, 0x0); + udelay(1); + + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); + udelay(1); + + switch (rfpath) { + case RF90_PATH_A: + rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + break; + case RF90_PATH_B: + rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + break; + case RF90_PATH_C: + break; + case RF90_PATH_D: + break; + } + + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + rtl_set_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV, u4_regvalue); + break; + case RF90_PATH_B: + case RF90_PATH_D: + rtl_set_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV << 16, u4_regvalue); + break; + } + + if (!rtstatus) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "Radio[%d] Fail!!", rfpath); + return false; + } + } + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); + return rtstatus; +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/rf.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/rf.h @@ -0,0 +1,43 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_RF_H__ +#define __RTL8723BE_RF_H__ + +#define RF6052_MAX_TX_PWR 0x3F +#define RF6052_MAX_REG 0x3F + +void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, + u8 bandwidth); +void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel); +void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, + u8 channel); +bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/sw.c @@ -0,0 +1,388 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../core.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "../rtl8723com/phy_common.h" +#include "dm.h" +#include "hw.h" +#include "fw.h" +#include "../rtl8723com/fw_common.h" +#include "sw.h" +#include "trx.h" +#include "led.h" +#include "table.h" +#include "../btcoexist/rtl_btc.h" + +#include +#include + +static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + /*close ASPM for AMD defaultly */ + rtlpci->const_amdpci_aspm = 0; + + /* ASPM PS mode. + * 0 - Disable ASPM, + * 1 - Enable ASPM without Clock Req, + * 2 - Enable ASPM with Clock Req, + * 3 - Alwyas Enable ASPM with Clock Req, + * 4 - Always Enable ASPM without Clock Req. + * set defult to RTL8192CE:3 RTL8192E:2 + */ + rtlpci->const_pci_aspm = 3; + + /*Setting for PCI-E device */ + rtlpci->const_devicepci_aspm_setting = 0x03; + + /*Setting for PCI-E bridge */ + rtlpci->const_hostpci_aspm_setting = 0x02; + + /* In Hw/Sw Radio Off situation. + * 0 - Default, + * 1 - From ASPM setting without low Mac Pwr, + * 2 - From ASPM setting with low Mac Pwr, + * 3 - Bus D3 + * set default to RTL8192CE:0 RTL8192SE:2 + */ + rtlpci->const_hwsw_rfoff_d3 = 0; + + /* This setting works for those device with + * backdoor ASPM setting such as EPHY setting. + * 0 - Not support ASPM, + * 1 - Support ASPM, + * 2 - According to chipset. + */ + rtlpci->const_support_pciaspm = 1; +} + +int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) +{ + int err = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + rtl8723be_bt_reg_init(hw); + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); + + rtlpriv->dm.dm_initialgain_enable = 1; + rtlpriv->dm.dm_flag = 0; + rtlpriv->dm.disable_framebursting = 0; + rtlpriv->dm.thermalvalue = 0; + rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25); + + mac->ht_enable = true; + + /* compatible 5G band 88ce just 2.4G band & smsp */ + rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; + rtlpriv->rtlhal.bandset = BAND_ON_2_4G; + rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; + + rtlpci->receive_config = (RCR_APPFCS | + RCR_APP_MIC | + RCR_APP_ICV | + RCR_APP_PHYST_RXFF | + RCR_HTC_LOC_CTRL | + RCR_AMF | + RCR_ACF | + RCR_ADF | + RCR_AICV | + RCR_AB | + RCR_AM | + RCR_APM | + 0); + + rtlpci->irq_mask[0] = (u32) (IMR_PSTIMEOUT | + IMR_HSISR_IND_ON_INT | + IMR_C2HCMD | + IMR_HIGHDOK | + IMR_MGNTDOK | + IMR_BKDOK | + IMR_BEDOK | + IMR_VIDOK | + IMR_VODOK | + IMR_RDU | + IMR_ROK | + 0); + + rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0); + + /* for debug level */ + rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug; + /* for LPS & IPS */ + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + rtlpriv->psc.reg_fwctrl_lps = 3; + rtlpriv->psc.reg_max_lps_awakeintvl = 5; + /* for ASPM, you can close aspm through + * set const_support_pciaspm = 0 + */ + rtl8723be_init_aspm_vars(hw); + + if (rtlpriv->psc.reg_fwctrl_lps == 1) + rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; + else if (rtlpriv->psc.reg_fwctrl_lps == 2) + rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; + else if (rtlpriv->psc.reg_fwctrl_lps == 3) + rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; + + /* for firmware buf */ + rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); + if (!rtlpriv->rtlhal.pfirmware) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Can't alloc buffer for fw.\n"); + return 1; + } + + rtlpriv->max_fw_size = 0x8000; + pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); + err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, + rtlpriv->io.dev, GFP_KERNEL, hw, + rtl_fw_cb); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Failed to request firmware!\n"); + return 1; + } + return 0; +} + +void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->cfg->ops->get_btc_status()) + rtlpriv->btcoexist.btc_ops->btc_halt_notify(); + if (rtlpriv->rtlhal.pfirmware) { + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + } +} + +/* get bt coexist status */ +bool rtl8723be_get_btc_status(void) +{ + return true; +} + +static bool is_fw_header(struct rtl92c_firmware_header *hdr) +{ + return (hdr->signature & 0xfff0) == 0x5300; +} + +static struct rtl_hal_ops rtl8723be_hal_ops = { + .init_sw_vars = rtl8723be_init_sw_vars, + .deinit_sw_vars = rtl8723be_deinit_sw_vars, + .read_eeprom_info = rtl8723be_read_eeprom_info, + .interrupt_recognized = rtl8723be_interrupt_recognized, + .hw_init = rtl8723be_hw_init, + .hw_disable = rtl8723be_card_disable, + .hw_suspend = rtl8723be_suspend, + .hw_resume = rtl8723be_resume, + .enable_interrupt = rtl8723be_enable_interrupt, + .disable_interrupt = rtl8723be_disable_interrupt, + .set_network_type = rtl8723be_set_network_type, + .set_chk_bssid = rtl8723be_set_check_bssid, + .set_qos = rtl8723be_set_qos, + .set_bcn_reg = rtl8723be_set_beacon_related_registers, + .set_bcn_intv = rtl8723be_set_beacon_interval, + .update_interrupt_mask = rtl8723be_update_interrupt_mask, + .get_hw_reg = rtl8723be_get_hw_reg, + .set_hw_reg = rtl8723be_set_hw_reg, + .update_rate_tbl = rtl8723be_update_hal_rate_tbl, + .fill_tx_desc = rtl8723be_tx_fill_desc, + .fill_tx_cmddesc = rtl8723be_tx_fill_cmddesc, + .query_rx_desc = rtl8723be_rx_query_desc, + .set_channel_access = rtl8723be_update_channel_access_setting, + .radio_onoff_checking = rtl8723be_gpio_radio_on_off_checking, + .set_bw_mode = rtl8723be_phy_set_bw_mode, + .switch_channel = rtl8723be_phy_sw_chnl, + .dm_watchdog = rtl8723be_dm_watchdog, + .scan_operation_backup = rtl8723be_phy_scan_operation_backup, + .set_rf_power_state = rtl8723be_phy_set_rf_power_state, + .led_control = rtl8723be_led_control, + .set_desc = rtl8723be_set_desc, + .get_desc = rtl8723be_get_desc, + .is_tx_desc_closed = rtl8723be_is_tx_desc_closed, + .tx_polling = rtl8723be_tx_polling, + .enable_hw_sec = rtl8723be_enable_hw_security_config, + .set_key = rtl8723be_set_key, + .init_sw_leds = rtl8723be_init_sw_leds, + .allow_all_destaddr = rtl8723be_allow_all_destaddr, + .get_bbreg = rtl8723_phy_query_bb_reg, + .set_bbreg = rtl8723_phy_set_bb_reg, + .get_rfreg = rtl8723be_phy_query_rf_reg, + .set_rfreg = rtl8723be_phy_set_rf_reg, + .fill_h2c_cmd = rtl8723be_fill_h2c_cmd, + .get_btc_status = rtl8723be_get_btc_status, + .is_fw_header = is_fw_header, +}; + +static struct rtl_mod_params rtl8723be_mod_params = { + .sw_crypto = false, + .inactiveps = true, + .swctrl_lps = false, + .fwctrl_lps = true, + .msi_support = false, + .debug = DBG_EMERG, +}; + +static struct rtl_hal_cfg rtl8723be_hal_cfg = { + .bar_id = 2, + .write_readback = true, + .name = "rtl8723be_pci", + .fw_name = "rtlwifi/rtl8723befw.bin", + .ops = &rtl8723be_hal_ops, + .mod_params = &rtl8723be_mod_params, + .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, + .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, + .maps[SYS_CLK] = REG_SYS_CLKR, + .maps[MAC_RCR_AM] = AM, + .maps[MAC_RCR_AB] = AB, + .maps[MAC_RCR_ACRC32] = ACRC32, + .maps[MAC_RCR_ACF] = ACF, + .maps[MAC_RCR_AAP] = AAP, + + .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, + + .maps[EFUSE_TEST] = REG_EFUSE_TEST, + .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_CLK] = 0, + .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_PWC_EV12V] = PWC_EV12V, + .maps[EFUSE_FEN_ELDR] = FEN_ELDR, + .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, + .maps[EFUSE_ANA8M] = ANA8M, + .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, + .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, + .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, + .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES, + + .maps[RWCAM] = REG_CAMCMD, + .maps[WCAMI] = REG_CAMWRITE, + .maps[RCAMO] = REG_CAMREAD, + .maps[CAMDBG] = REG_CAMDBG, + .maps[SECR] = REG_SECCFG, + .maps[SEC_CAM_NONE] = CAM_NONE, + .maps[SEC_CAM_WEP40] = CAM_WEP40, + .maps[SEC_CAM_TKIP] = CAM_TKIP, + .maps[SEC_CAM_AES] = CAM_AES, + .maps[SEC_CAM_WEP104] = CAM_WEP104, + + .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, + .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, + .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, + .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, + .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, + .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, + .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, + .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, + .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, + .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, + .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, + .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, + .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, + + .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, + .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, + .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0, + .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, + .maps[RTL_IMR_RDU] = IMR_RDU, + .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, + .maps[RTL_IMR_BDOK] = IMR_BCNDOK0, + .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, + .maps[RTL_IMR_TBDER] = IMR_TBDER, + .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, + .maps[RTL_IMR_TBDOK] = IMR_TBDOK, + .maps[RTL_IMR_BKDOK] = IMR_BKDOK, + .maps[RTL_IMR_BEDOK] = IMR_BEDOK, + .maps[RTL_IMR_VIDOK] = IMR_VIDOK, + .maps[RTL_IMR_VODOK] = IMR_VODOK, + .maps[RTL_IMR_ROK] = IMR_ROK, + .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER), + + .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M, + .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M, + .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M, + .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M, + .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M, + .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M, + .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M, + .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M, + .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M, + .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M, + .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M, + .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M, + + .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7, + .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, +}; + +static DEFINE_PCI_DEVICE_TABLE(rtl8723be_pci_id) = { + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb723, rtl8723be_hal_cfg)}, + {}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8723be_pci_id); + +MODULE_AUTHOR("PageHe "); +MODULE_AUTHOR("Realtek WlanFAE "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless"); +MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin"); + +module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444); +module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); +module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); +module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); +module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); +MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); +MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n"); +MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n"); +MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); +MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); + +static const SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); + +static struct pci_driver rtl8723be_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8723be_pci_id, + .probe = rtl_pci_probe, + .remove = rtl_pci_disconnect, + + .driver.pm = &rtlwifi_pm_ops, +}; + +module_pci_driver(rtl8723be_driver); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/sw.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/sw.h @@ -0,0 +1,35 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_SW_H__ +#define __RTL8723BE_SW_H__ + +int rtl8723be_init_sw_vars(struct ieee80211_hw *hw); +void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw); +void rtl8723be_init_var_map(struct ieee80211_hw *hw); +bool rtl8723be_get_btc_status(void); + + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/table.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/table.c @@ -0,0 +1,572 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Created on 2010/ 5/18, 1:41 + * + * Larry Finger + * + *****************************************************************************/ + +#include "table.h" +u32 RTL8723BEPHY_REG_1TARRAY[] = { + 0x800, 0x80040000, + 0x804, 0x00000003, + 0x808, 0x0000FC00, + 0x80C, 0x0000000A, + 0x810, 0x10001331, + 0x814, 0x020C3D10, + 0x818, 0x02200385, + 0x81C, 0x00000000, + 0x820, 0x01000100, + 0x824, 0x00390204, + 0x828, 0x00000000, + 0x82C, 0x00000000, + 0x830, 0x00000000, + 0x834, 0x00000000, + 0x838, 0x00000000, + 0x83C, 0x00000000, + 0x840, 0x00010000, + 0x844, 0x00000000, + 0x848, 0x00000000, + 0x84C, 0x00000000, + 0x850, 0x00000000, + 0x854, 0x00000000, + 0x858, 0x569A11A9, + 0x85C, 0x01000014, + 0x860, 0x66F60110, + 0x864, 0x061F0649, + 0x868, 0x00000000, + 0x86C, 0x27272700, + 0x870, 0x07000760, + 0x874, 0x25004000, + 0x878, 0x00000808, + 0x87C, 0x00000000, + 0x880, 0xB0000C1C, + 0x884, 0x00000001, + 0x888, 0x00000000, + 0x88C, 0xCCC000C0, + 0x890, 0x00000800, + 0x894, 0xFFFFFFFE, + 0x898, 0x40302010, + 0x89C, 0x00706050, + 0x900, 0x00000000, + 0x904, 0x00000023, + 0x908, 0x00000000, + 0x90C, 0x81121111, + 0x910, 0x00000002, + 0x914, 0x00000201, + 0x948, 0x00000000, + 0xA00, 0x00D047C8, + 0xA04, 0x80FF000C, + 0xA08, 0x8C838300, + 0xA0C, 0x2E7F120F, + 0xA10, 0x9500BB78, + 0xA14, 0x1114D028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00D30000, + 0xA70, 0x101FBF00, + 0xA74, 0x00000007, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x21806490, + 0xB2C, 0x00000000, + 0xC00, 0x48071D40, + 0xC04, 0x03A05611, + 0xC08, 0x000000E4, + 0xC0C, 0x6C6C6C6C, + 0xC10, 0x08800000, + 0xC14, 0x40000100, + 0xC18, 0x08800000, + 0xC1C, 0x40000100, + 0xC20, 0x00000000, + 0xC24, 0x00000000, + 0xC28, 0x00000000, + 0xC2C, 0x00000000, + 0xC30, 0x69E9AC44, + 0xC34, 0x469652AF, + 0xC38, 0x49795994, + 0xC3C, 0x0A97971C, + 0xC40, 0x1F7C403F, + 0xC44, 0x000100B7, + 0xC48, 0xEC020107, + 0xC4C, 0x007F037F, + 0xC50, 0x69553420, + 0xC54, 0x43BC0094, + 0xC58, 0x00023169, + 0xC5C, 0x00250492, + 0xC60, 0x00000000, + 0xC64, 0x7112848B, + 0xC68, 0x47C00BFF, + 0xC6C, 0x00000036, + 0xC70, 0x2C7F000D, + 0xC74, 0x020610DB, + 0xC78, 0x0000001F, + 0xC7C, 0x00B91612, + 0xC80, 0x390000E4, + 0xC84, 0x20F60000, + 0xC88, 0x40000100, + 0xC8C, 0x20200000, + 0xC90, 0x00020E1A, + 0xC94, 0x00000000, + 0xC98, 0x00020E1A, + 0xC9C, 0x00007F7F, + 0xCA0, 0x00000000, + 0xCA4, 0x000300A0, + 0xCA8, 0x00000000, + 0xCAC, 0x00000000, + 0xCB0, 0x00000000, + 0xCB4, 0x00000000, + 0xCB8, 0x00000000, + 0xCBC, 0x28000000, + 0xCC0, 0x00000000, + 0xCC4, 0x00000000, + 0xCC8, 0x00000000, + 0xCCC, 0x00000000, + 0xCD0, 0x00000000, + 0xCD4, 0x00000000, + 0xCD8, 0x64B22427, + 0xCDC, 0x00766932, + 0xCE0, 0x00222222, + 0xCE4, 0x00000000, + 0xCE8, 0x37644302, + 0xCEC, 0x2F97D40C, + 0xD00, 0x00000740, + 0xD04, 0x40020401, + 0xD08, 0x0000907F, + 0xD0C, 0x20010201, + 0xD10, 0xA0633333, + 0xD14, 0x3333BC53, + 0xD18, 0x7A8F5B6F, + 0xD2C, 0xCC979975, + 0xD30, 0x00000000, + 0xD34, 0x80608000, + 0xD38, 0x00000000, + 0xD3C, 0x00127353, + 0xD40, 0x00000000, + 0xD44, 0x00000000, + 0xD48, 0x00000000, + 0xD4C, 0x00000000, + 0xD50, 0x6437140A, + 0xD54, 0x00000000, + 0xD58, 0x00000282, + 0xD5C, 0x30032064, + 0xD60, 0x4653DE68, + 0xD64, 0x04518A3C, + 0xD68, 0x00002101, + 0xD6C, 0x2A201C16, + 0xD70, 0x1812362E, + 0xD74, 0x322C2220, + 0xD78, 0x000E3C24, + 0xE00, 0x2D2D2D2D, + 0xE04, 0x2D2D2D2D, + 0xE08, 0x0390272D, + 0xE10, 0x2D2D2D2D, + 0xE14, 0x2D2D2D2D, + 0xE18, 0x2D2D2D2D, + 0xE1C, 0x2D2D2D2D, + 0xE28, 0x00000000, + 0xE30, 0x1000DC1F, + 0xE34, 0x10008C1F, + 0xE38, 0x02140102, + 0xE3C, 0x681604C2, + 0xE40, 0x01007C00, + 0xE44, 0x01004800, + 0xE48, 0xFB000000, + 0xE4C, 0x000028D1, + 0xE50, 0x1000DC1F, + 0xE54, 0x10008C1F, + 0xE58, 0x02140102, + 0xE5C, 0x28160D05, + 0xE60, 0x00000008, + 0xE68, 0x001B2556, + 0xE6C, 0x00C00096, + 0xE70, 0x00C00096, + 0xE74, 0x01000056, + 0xE78, 0x01000014, + 0xE7C, 0x01000056, + 0xE80, 0x01000014, + 0xE84, 0x00C00096, + 0xE88, 0x01000056, + 0xE8C, 0x00C00096, + 0xED0, 0x00C00096, + 0xED4, 0x00C00096, + 0xED8, 0x00C00096, + 0xEDC, 0x000000D6, + 0xEE0, 0x000000D6, + 0xEEC, 0x01C00016, + 0xF14, 0x00000003, + 0xF4C, 0x00000000, + 0xF00, 0x00000300, + 0x820, 0x01000100, + 0x800, 0x83040000, +}; + +u32 RTL8723BEPHY_REG_ARRAY_PG[] = { + 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000, + 0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800, + 0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646, + 0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840, + 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244, + 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436 +}; + +u32 RTL8723BE_RADIOA_1TARRAY[] = { + 0x000, 0x00010000, + 0x0B0, 0x000DFFE0, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0B1, 0x00000018, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0B2, 0x00084C00, + 0x0B5, 0x0000D2CC, + 0x0B6, 0x000925AA, + 0x0B7, 0x00000010, + 0x0B8, 0x0000907F, + 0x05C, 0x00000002, + 0x07C, 0x00000002, + 0x07E, 0x00000005, + 0x08B, 0x0006FC00, + 0x0B0, 0x000FF9F0, + 0x01C, 0x000739D2, + 0x01E, 0x00000000, + 0x0DF, 0x00000780, + 0x050, 0x00067435, + 0x051, 0x0006B04E, + 0x052, 0x000007D2, + 0x053, 0x00000000, + 0x054, 0x00050400, + 0x055, 0x0004026E, + 0x0DD, 0x0000004C, + 0x070, 0x00067435, + 0x071, 0x0006B04E, + 0x072, 0x000007D2, + 0x073, 0x00000000, + 0x074, 0x00050400, + 0x075, 0x0004026E, + 0x0EF, 0x00000100, + 0x034, 0x0000ADD7, + 0x035, 0x00005C00, + 0x034, 0x00009DD4, + 0x035, 0x00005000, + 0x034, 0x00008DD1, + 0x035, 0x00004400, + 0x034, 0x00007DCE, + 0x035, 0x00003800, + 0x034, 0x00006CD1, + 0x035, 0x00004400, + 0x034, 0x00005CCE, + 0x035, 0x00003800, + 0x034, 0x000048CE, + 0x035, 0x00004400, + 0x034, 0x000034CE, + 0x035, 0x00003800, + 0x034, 0x00002451, + 0x035, 0x00004400, + 0x034, 0x0000144E, + 0x035, 0x00003800, + 0x034, 0x00000051, + 0x035, 0x00004400, + 0x0EF, 0x00000000, + 0x0EF, 0x00000100, + 0x0ED, 0x00000010, + 0x044, 0x0000ADD7, + 0x044, 0x00009DD4, + 0x044, 0x00008DD1, + 0x044, 0x00007DCE, + 0x044, 0x00006CC1, + 0x044, 0x00005CCE, + 0x044, 0x000044D1, + 0x044, 0x000034CE, + 0x044, 0x00002451, + 0x044, 0x0000144E, + 0x044, 0x00000051, + 0x0EF, 0x00000000, + 0x0ED, 0x00000000, + 0x0EF, 0x00002000, + 0x03B, 0x000380EF, + 0x03B, 0x000302FE, + 0x03B, 0x00028CE6, + 0x03B, 0x000200BC, + 0x03B, 0x000188A5, + 0x03B, 0x00010FBC, + 0x03B, 0x00008F71, + 0x03B, 0x00000900, + 0x0EF, 0x00000000, + 0x0ED, 0x00000001, + 0x040, 0x000380EF, + 0x040, 0x000302FE, + 0x040, 0x00028CE6, + 0x040, 0x000200BC, + 0x040, 0x000188A5, + 0x040, 0x00010FBC, + 0x040, 0x00008F71, + 0x040, 0x00000900, + 0x0ED, 0x00000000, + 0x082, 0x00080000, + 0x083, 0x00008000, + 0x084, 0x00048D80, + 0x085, 0x00068000, + 0x0A2, 0x00080000, + 0x0A3, 0x00008000, + 0x0A4, 0x00048D80, + 0x0A5, 0x00068000, + 0x000, 0x00033D80, +}; + +u32 RTL8723BEMAC_1T_ARRAY[] = { + 0x02F, 0x00000030, + 0x035, 0x00000000, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x456, 0x0000005E, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x605, 0x00000030, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, +}; + +u32 RTL8723BEAGCTAB_1TARRAY[] = { + 0xC78, 0xFD000001, + 0xC78, 0xFC010001, + 0xC78, 0xFB020001, + 0xC78, 0xFA030001, + 0xC78, 0xF9040001, + 0xC78, 0xF8050001, + 0xC78, 0xF7060001, + 0xC78, 0xF6070001, + 0xC78, 0xF5080001, + 0xC78, 0xF4090001, + 0xC78, 0xF30A0001, + 0xC78, 0xF20B0001, + 0xC78, 0xF10C0001, + 0xC78, 0xF00D0001, + 0xC78, 0xEF0E0001, + 0xC78, 0xEE0F0001, + 0xC78, 0xED100001, + 0xC78, 0xEC110001, + 0xC78, 0xEB120001, + 0xC78, 0xEA130001, + 0xC78, 0xE9140001, + 0xC78, 0xE8150001, + 0xC78, 0xE7160001, + 0xC78, 0xAA170001, + 0xC78, 0xA9180001, + 0xC78, 0xA8190001, + 0xC78, 0xA71A0001, + 0xC78, 0xA61B0001, + 0xC78, 0xA51C0001, + 0xC78, 0xA41D0001, + 0xC78, 0xA31E0001, + 0xC78, 0x671F0001, + 0xC78, 0x66200001, + 0xC78, 0x65210001, + 0xC78, 0x64220001, + 0xC78, 0x63230001, + 0xC78, 0x62240001, + 0xC78, 0x61250001, + 0xC78, 0x47260001, + 0xC78, 0x46270001, + 0xC78, 0x45280001, + 0xC78, 0x44290001, + 0xC78, 0x432A0001, + 0xC78, 0x422B0001, + 0xC78, 0x292C0001, + 0xC78, 0x282D0001, + 0xC78, 0x272E0001, + 0xC78, 0x262F0001, + 0xC78, 0x25300001, + 0xC78, 0x24310001, + 0xC78, 0x09320001, + 0xC78, 0x08330001, + 0xC78, 0x07340001, + 0xC78, 0x06350001, + 0xC78, 0x05360001, + 0xC78, 0x04370001, + 0xC78, 0x03380001, + 0xC78, 0x02390001, + 0xC78, 0x013A0001, + 0xC78, 0x003B0001, + 0xC78, 0x003C0001, + 0xC78, 0x003D0001, + 0xC78, 0x003E0001, + 0xC78, 0x003F0001, + 0xC78, 0xFC400001, + 0xC78, 0xFB410001, + 0xC78, 0xFA420001, + 0xC78, 0xF9430001, + 0xC78, 0xF8440001, + 0xC78, 0xF7450001, + 0xC78, 0xF6460001, + 0xC78, 0xF5470001, + 0xC78, 0xF4480001, + 0xC78, 0xF3490001, + 0xC78, 0xF24A0001, + 0xC78, 0xF14B0001, + 0xC78, 0xF04C0001, + 0xC78, 0xEF4D0001, + 0xC78, 0xEE4E0001, + 0xC78, 0xED4F0001, + 0xC78, 0xEC500001, + 0xC78, 0xEB510001, + 0xC78, 0xEA520001, + 0xC78, 0xE9530001, + 0xC78, 0xE8540001, + 0xC78, 0xE7550001, + 0xC78, 0xE6560001, + 0xC78, 0xE5570001, + 0xC78, 0xAA580001, + 0xC78, 0xA9590001, + 0xC78, 0xA85A0001, + 0xC78, 0xA75B0001, + 0xC78, 0xA65C0001, + 0xC78, 0xA55D0001, + 0xC78, 0xA45E0001, + 0xC78, 0x675F0001, + 0xC78, 0x66600001, + 0xC78, 0x65610001, + 0xC78, 0x64620001, + 0xC78, 0x63630001, + 0xC78, 0x62640001, + 0xC78, 0x61650001, + 0xC78, 0x47660001, + 0xC78, 0x46670001, + 0xC78, 0x45680001, + 0xC78, 0x44690001, + 0xC78, 0x436A0001, + 0xC78, 0x426B0001, + 0xC78, 0x296C0001, + 0xC78, 0x286D0001, + 0xC78, 0x276E0001, + 0xC78, 0x266F0001, + 0xC78, 0x25700001, + 0xC78, 0x24710001, + 0xC78, 0x09720001, + 0xC78, 0x08730001, + 0xC78, 0x07740001, + 0xC78, 0x06750001, + 0xC78, 0x05760001, + 0xC78, 0x04770001, + 0xC78, 0x03780001, + 0xC78, 0x02790001, + 0xC78, 0x017A0001, + 0xC78, 0x007B0001, + 0xC78, 0x007C0001, + 0xC78, 0x007D0001, + 0xC78, 0x007E0001, + 0xC78, 0x007F0001, + 0xC50, 0x69553422, + 0xC50, 0x69553420, +}; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/table.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/table.h @@ -0,0 +1,43 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Created on 2010/ 5/18, 1:41 + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_TABLE__H_ +#define __RTL8723BE_TABLE__H_ + +#include +#define RTL8723BEPHY_REG_1TARRAYLEN 388 +extern u32 RTL8723BEPHY_REG_1TARRAY[]; +#define RTL8723BEPHY_REG_ARRAY_PGLEN 36 +extern u32 RTL8723BEPHY_REG_ARRAY_PG[]; +#define RTL8723BE_RADIOA_1TARRAYLEN 206 +extern u32 RTL8723BE_RADIOA_1TARRAY[]; +#define RTL8723BEMAC_1T_ARRAYLEN 194 +extern u32 RTL8723BEMAC_1T_ARRAY[]; +#define RTL8723BEAGCTAB_1TARRAYLEN 260 +extern u32 RTL8723BEAGCTAB_1TARRAY[]; + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/trx.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/trx.c @@ -0,0 +1,959 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "../stats.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "trx.h" +#include "led.h" +#include "dm.h" +#include "phy.h" + +static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) +{ + __le16 fc = rtl_get_fc(skb); + + if (unlikely(ieee80211_is_beacon(fc))) + return QSLT_BEACON; + if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) + return QSLT_MGNT; + + return skb->priority; +} + +/* mac80211's rate_idx is like this: + * + * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ + * + * B/G rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15 + * + * 5G band:rx_status->band == IEEE80211_BAND_5GHZ + * A rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15 + */ +static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw, + bool isht, u8 desc_rate) +{ + int rate_idx; + + if (!isht) { + if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { + switch (desc_rate) { + case DESC92C_RATE1M: + rate_idx = 0; + break; + case DESC92C_RATE2M: + rate_idx = 1; + break; + case DESC92C_RATE5_5M: + rate_idx = 2; + break; + case DESC92C_RATE11M: + rate_idx = 3; + break; + case DESC92C_RATE6M: + rate_idx = 4; + break; + case DESC92C_RATE9M: + rate_idx = 5; + break; + case DESC92C_RATE12M: + rate_idx = 6; + break; + case DESC92C_RATE18M: + rate_idx = 7; + break; + case DESC92C_RATE24M: + rate_idx = 8; + break; + case DESC92C_RATE36M: + rate_idx = 9; + break; + case DESC92C_RATE48M: + rate_idx = 10; + break; + case DESC92C_RATE54M: + rate_idx = 11; + break; + default: + rate_idx = 0; + break; + } + } else { + switch (desc_rate) { + case DESC92C_RATE6M: + rate_idx = 0; + break; + case DESC92C_RATE9M: + rate_idx = 1; + break; + case DESC92C_RATE12M: + rate_idx = 2; + break; + case DESC92C_RATE18M: + rate_idx = 3; + break; + case DESC92C_RATE24M: + rate_idx = 4; + break; + case DESC92C_RATE36M: + rate_idx = 5; + break; + case DESC92C_RATE48M: + rate_idx = 6; + break; + case DESC92C_RATE54M: + rate_idx = 7; + break; + default: + rate_idx = 0; + break; + } + } + } else { + switch (desc_rate) { + case DESC92C_RATEMCS0: + rate_idx = 0; + break; + case DESC92C_RATEMCS1: + rate_idx = 1; + break; + case DESC92C_RATEMCS2: + rate_idx = 2; + break; + case DESC92C_RATEMCS3: + rate_idx = 3; + break; + case DESC92C_RATEMCS4: + rate_idx = 4; + break; + case DESC92C_RATEMCS5: + rate_idx = 5; + break; + case DESC92C_RATEMCS6: + rate_idx = 6; + break; + case DESC92C_RATEMCS7: + rate_idx = 7; + break; + case DESC92C_RATEMCS8: + rate_idx = 8; + break; + case DESC92C_RATEMCS9: + rate_idx = 9; + break; + case DESC92C_RATEMCS10: + rate_idx = 10; + break; + case DESC92C_RATEMCS11: + rate_idx = 11; + break; + case DESC92C_RATEMCS12: + rate_idx = 12; + break; + case DESC92C_RATEMCS13: + rate_idx = 13; + break; + case DESC92C_RATEMCS14: + rate_idx = 14; + break; + case DESC92C_RATEMCS15: + rate_idx = 15; + break; + default: + rate_idx = 0; + break; + } + } + return rate_idx; +} + +static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, + struct rtl_stats *pstatus, u8 *pdesc, + struct rx_fwinfo_8723be *p_drvinfo, + bool packet_match_bssid, + bool packet_toself, + bool packet_beacon) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + struct phy_sts_cck_8723e_t *cck_buf; + struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + char rx_pwr_all = 0, rx_pwr[4]; + u8 rf_rx_num = 0, evm, pwdb_all; + u8 i, max_spatial_stream; + u32 rssi, total_rssi = 0; + bool is_cck = pstatus->is_cck; + u8 lan_idx, vga_idx; + + /* Record it for next packet processing */ + pstatus->packet_matchbssid = packet_match_bssid; + pstatus->packet_toself = packet_toself; + pstatus->packet_beacon = packet_beacon; + pstatus->rx_mimo_sig_qual[0] = -1; + pstatus->rx_mimo_sig_qual[1] = -1; + + if (is_cck) { + u8 cck_highpwr; + u8 cck_agc_rpt; + /* CCK Driver info Structure is not the same as OFDM packet. */ + cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo; + cck_agc_rpt = cck_buf->cck_agc_rpt; + + /* (1)Hardware does not provide RSSI for CCK + * (2)PWDB, Average PWDB cacluated by + * hardware (for rate adaptive) + */ + if (ppsc->rfpwr_state == ERFON) + cck_highpwr = (u8) rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, + BIT(9)); + else + cck_highpwr = false; + + lan_idx = ((cck_agc_rpt & 0xE0) >> 5); + vga_idx = (cck_agc_rpt & 0x1f); + switch (lan_idx) { + case 7: + if (vga_idx <= 27)/*VGA_idx = 27~2*/ + rx_pwr_all = -100 + 2 * (27 - vga_idx); + else + rx_pwr_all = -100; + break; + case 6:/*VGA_idx = 2~0*/ + rx_pwr_all = -48 + 2 * (2 - vga_idx); + break; + case 5:/*VGA_idx = 7~5*/ + rx_pwr_all = -42 + 2 * (7 - vga_idx); + break; + case 4:/*VGA_idx = 7~4*/ + rx_pwr_all = -36 + 2 * (7 - vga_idx); + break; + case 3:/*VGA_idx = 7~0*/ + rx_pwr_all = -24 + 2 * (7 - vga_idx); + break; + case 2: + if (cck_highpwr)/*VGA_idx = 5~0*/ + rx_pwr_all = -12 + 2 * (5 - vga_idx); + else + rx_pwr_all = -6 + 2 * (5 - vga_idx); + break; + case 1: + rx_pwr_all = 8 - 2 * vga_idx; + break; + case 0: + rx_pwr_all = 14 - 2 * vga_idx; + break; + default: + break; + } + rx_pwr_all += 6; + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + /* CCK gain is smaller than OFDM/MCS gain, */ + /* so we add gain diff by experiences, + * the val is 6 + */ + pwdb_all += 6; + if (pwdb_all > 100) + pwdb_all = 100; + /* modify the offset to make the same gain index with OFDM. */ + if (pwdb_all > 34 && pwdb_all <= 42) + pwdb_all -= 2; + else if (pwdb_all > 26 && pwdb_all <= 34) + pwdb_all -= 6; + else if (pwdb_all > 14 && pwdb_all <= 26) + pwdb_all -= 8; + else if (pwdb_all > 4 && pwdb_all <= 14) + pwdb_all -= 4; + if (!cck_highpwr) { + if (pwdb_all >= 80) + pwdb_all = ((pwdb_all - 80) << 1) + + ((pwdb_all - 80) >> 1) + 80; + else if ((pwdb_all <= 78) && (pwdb_all >= 20)) + pwdb_all += 3; + if (pwdb_all > 100) + pwdb_all = 100; + } + + pstatus->rx_pwdb_all = pwdb_all; + pstatus->recvsignalpower = rx_pwr_all; + + /* (3) Get Signal Quality (EVM) */ + if (packet_match_bssid) { + u8 sq; + + if (pstatus->rx_pwdb_all > 40) { + sq = 100; + } else { + sq = cck_buf->sq_rpt; + if (sq > 64) + sq = 0; + else if (sq < 20) + sq = 100; + else + sq = ((64 - sq) * 100) / 44; + } + + pstatus->signalquality = sq; + pstatus->rx_mimo_sig_qual[0] = sq; + pstatus->rx_mimo_sig_qual[1] = -1; + } + } else { + rtlpriv->dm.rfpath_rxenable[0] = true; + rtlpriv->dm.rfpath_rxenable[1] = true; + + /* (1)Get RSSI for HT rate */ + for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) { + /* we will judge RF RX path now. */ + if (rtlpriv->dm.rfpath_rxenable[i]) + rf_rx_num++; + + rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110; + + /* Translate DBM to percentage. */ + rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); + total_rssi += rssi; + + /* Get Rx snr value in DB */ + rtlpriv->stats.rx_snr_db[i] = + (long)(p_drvinfo->rxsnr[i] / 2); + + /* Record Signal Strength for next packet */ + if (packet_match_bssid) + pstatus->rx_mimo_signalstrength[i] = (u8) rssi; + } + + /* (2)PWDB, Avg cacluated by hardware (for rate adaptive) */ + rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; + + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + pstatus->rx_pwdb_all = pwdb_all; + pstatus->rxpower = rx_pwr_all; + pstatus->recvsignalpower = rx_pwr_all; + + /* (3)EVM of HT rate */ + if (pstatus->is_ht && pstatus->rate >= DESC92C_RATEMCS8 && + pstatus->rate <= DESC92C_RATEMCS15) + max_spatial_stream = 2; + else + max_spatial_stream = 1; + + for (i = 0; i < max_spatial_stream; i++) { + evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]); + + if (packet_match_bssid) { + /* Fill value in RFD, Get the first + * spatial stream only + */ + if (i == 0) + pstatus->signalquality = + (u8) (evm & 0xff); + pstatus->rx_mimo_sig_qual[i] = + (u8) (evm & 0xff); + } + } + if (packet_match_bssid) { + for (i = RF90_PATH_A; i <= RF90_PATH_B; i++) + rtl_priv(hw)->dm.cfo_tail[i] = + (char)p_phystrpt->path_cfotail[i]; + + rtl_priv(hw)->dm.packet_count++; + if (rtl_priv(hw)->dm.packet_count == 0xffffffff) + rtl_priv(hw)->dm.packet_count = 0; + } + } + + /* UI BSS List signal strength(in percentage), + * make it good looking, from 0~100. + */ + if (is_cck) + pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + pwdb_all)); + else if (rf_rx_num != 0) + pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + total_rssi /= rf_rx_num)); + /*HW antenna diversity*/ + rtldm->fat_table.antsel_rx_keep_0 = p_phystrpt->ant_sel; + rtldm->fat_table.antsel_rx_keep_1 = p_phystrpt->ant_sel_b; + rtldm->fat_table.antsel_rx_keep_2 = p_phystrpt->antsel_rx_keep_2; +} + +static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_stats *pstatus, + u8 *pdesc, + struct rx_fwinfo_8723be *p_drvinfo) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct ieee80211_hdr *hdr; + u8 *tmp_buf; + u8 *praddr; + u8 *psaddr; + u16 fc, type; + bool packet_matchbssid, packet_toself, packet_beacon; + + tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; + + hdr = (struct ieee80211_hdr *)tmp_buf; + fc = le16_to_cpu(hdr->frame_control); + type = WLAN_FC_GET_TYPE(hdr->frame_control); + praddr = hdr->addr1; + psaddr = ieee80211_get_SA(hdr); + memcpy(pstatus->psaddr, psaddr, ETH_ALEN); + + packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && + (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ? + hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? + hdr->addr2 : hdr->addr3)) && + (!pstatus->hwerror) && + (!pstatus->crc) && (!pstatus->icv)); + + packet_toself = packet_matchbssid && + (!ether_addr_equal(praddr, rtlefuse->dev_addr)); + + /* YP: packet_beacon is not initialized, + * this assignment is neccesary, + * otherwise it counld be true in this case + * the situation is much worse in Kernel 3.10 + */ + if (ieee80211_is_beacon(hdr->frame_control)) + packet_beacon = true; + else + packet_beacon = false; + + if (packet_beacon && packet_matchbssid) + rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++; + + _rtl8723be_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo, + packet_matchbssid, + packet_toself, + packet_beacon); + + rtl_process_phyinfo(hw, tmp_buf, pstatus); +} + +static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, + u8 *virtualaddress) +{ + u32 dwtmp = 0; + memset(virtualaddress, 0, 8); + + SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); + if (ptcb_desc->empkt_num == 1) { + dwtmp = ptcb_desc->empkt_len[0]; + } else { + dwtmp = ptcb_desc->empkt_len[0]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[1]; + } + SET_EARLYMODE_LEN0(virtualaddress, dwtmp); + + if (ptcb_desc->empkt_num <= 3) { + dwtmp = ptcb_desc->empkt_len[2]; + } else { + dwtmp = ptcb_desc->empkt_len[2]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[3]; + } + SET_EARLYMODE_LEN1(virtualaddress, dwtmp); + if (ptcb_desc->empkt_num <= 5) { + dwtmp = ptcb_desc->empkt_len[4]; + } else { + dwtmp = ptcb_desc->empkt_len[4]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[5]; + } + SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); + SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4); + if (ptcb_desc->empkt_num <= 7) { + dwtmp = ptcb_desc->empkt_len[6]; + } else { + dwtmp = ptcb_desc->empkt_len[6]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[7]; + } + SET_EARLYMODE_LEN3(virtualaddress, dwtmp); + if (ptcb_desc->empkt_num <= 9) { + dwtmp = ptcb_desc->empkt_len[8]; + } else { + dwtmp = ptcb_desc->empkt_len[8]; + dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4; + dwtmp += ptcb_desc->empkt_len[9]; + } + SET_EARLYMODE_LEN4(virtualaddress, dwtmp); +} + +bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rx_fwinfo_8723be *p_drvinfo; + struct ieee80211_hdr *hdr; + + u32 phystatus = GET_RX_DESC_PHYST(pdesc); + status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc); + if (status->packet_report_type == TX_REPORT2) + status->length = (u16) GET_RX_RPT2_DESC_PKT_LEN(pdesc); + else + status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); + status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + RX_DRV_INFO_SIZE_UNIT; + status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); + status->icv = (u16) GET_RX_DESC_ICV(pdesc); + status->crc = (u16) GET_RX_DESC_CRC32(pdesc); + status->hwerror = (status->crc | status->icv); + status->decrypted = !GET_RX_DESC_SWDEC(pdesc); + status->rate = (u8) GET_RX_DESC_RXMCS(pdesc); + status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); + status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); + status->isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); + if (status->packet_report_type == NORMAL_RX) + status->timestamp_low = GET_RX_DESC_TSFL(pdesc); + status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); + + status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate); + + status->macid = GET_RX_DESC_MACID(pdesc); + if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + status->wake_match = BIT(2); + else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + status->wake_match = BIT(1); + else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) + status->wake_match = BIT(0); + else + status->wake_match = 0; + if (status->wake_match) + RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, + "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n", + status->wake_match); + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; + + + hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size + + status->rx_bufshift); + + if (status->crc) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (status->rx_is40Mhzpacket) + rx_status->flag |= RX_FLAG_40MHZ; + + if (status->is_ht) + rx_status->flag |= RX_FLAG_HT; + + rx_status->flag |= RX_FLAG_MACTIME_START; + + /* hw will set status->decrypted true, if it finds the + * frame is open data frame or mgmt frame. + * So hw will not decryption robust managment frame + * for IEEE80211w but still set status->decrypted + * true, so here we should set it back to undecrypted + * for IEEE80211w frame, and mac80211 sw will help + * to decrypt it + */ + if (status->decrypted) { + if (!hdr) { + WARN_ON_ONCE(true); + pr_err("decrypted is true but hdr NULL in skb %p\n", + rtl_get_hdr(skb)); + return false; + } + + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && + (ieee80211_has_protected(hdr->frame_control))) + rx_status->flag &= ~RX_FLAG_DECRYPTED; + else + rx_status->flag |= RX_FLAG_DECRYPTED; + } + + /* rate_idx: index of data rate into band's + * supported rates or MCS index if HT rates + * are use (RX_FLAG_HT) + * Notice: this is diff with windows define + */ + rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht, + status->rate); + + rx_status->mactime = status->timestamp_low; + if (phystatus) { + p_drvinfo = (struct rx_fwinfo_8723be *)(skb->data + + status->rx_bufshift); + + _rtl8723be_translate_rx_signal_stuff(hw, skb, status, + pdesc, p_drvinfo); + } + + /*rx_status->qual = status->signal; */ + rx_status->signal = status->recvsignalpower + 10; + if (status->packet_report_type == TX_REPORT2) { + status->macid_valid_entry[0] = + GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); + status->macid_valid_entry[1] = + GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); + } + return true; +} + +void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 *pdesc = (u8 *)pdesc_tx; + u16 seq_number; + __le16 fc = hdr->frame_control; + unsigned int buf_len = 0; + unsigned int skb_len = skb->len; + u8 fw_qsel = _rtl8723be_map_hwqueue_to_fwqueue(skb, hw_queue); + bool firstseg = ((hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); + bool lastseg = ((hdr->frame_control & + cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); + dma_addr_t mapping; + u8 bw_40 = 0; + u8 short_gi = 0; + + if (mac->opmode == NL80211_IFTYPE_STATION) { + bw_40 = mac->bw_40; + } else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + if (sta) + bw_40 = sta->ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40; + } + seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); + /* reserve 8 byte for AMPDU early mode */ + if (rtlhal->earlymode_enable) { + skb_push(skb, EM_HDR_LEN); + memset(skb->data, 0, EM_HDR_LEN); + } + buf_len = skb->len; + mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error"); + return; + } + CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723be)); + if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { + firstseg = true; + lastseg = true; + } + if (firstseg) { + if (rtlhal->earlymode_enable) { + SET_TX_DESC_PKT_OFFSET(pdesc, 1); + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + + EM_HDR_LEN); + if (ptcb_desc->empkt_num) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "Insert 8 byte.pTcb->EMPktNum:%d\n", + ptcb_desc->empkt_num); + _rtl8723be_insert_emcontent(ptcb_desc, + (u8 *)(skb->data)); + } + } else { + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + } + + /* ptcb_desc->use_driver_rate = true; */ + SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); + if (ptcb_desc->hw_rate > DESC92C_RATEMCS0) + short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; + else + short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0; + + SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + SET_TX_DESC_AGG_ENABLE(pdesc, 1); + SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); + } + SET_TX_DESC_SEQ(pdesc, seq_number); + SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable && + !ptcb_desc->cts_enable) ? + 1 : 0)); + SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0); + SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? + 1 : 0)); + + SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); + + SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); + SET_TX_DESC_RTS_SHORT(pdesc, + ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ? + (ptcb_desc->rts_use_shortpreamble ? 1 : 0) : + (ptcb_desc->rts_use_shortgi ? 1 : 0))); + + if (ptcb_desc->btx_enable_sw_calc_duration) + SET_TX_DESC_NAV_USE_HDR(pdesc, 1); + + if (bw_40) { + if (ptcb_desc->packet_bw) { + SET_TX_DESC_DATA_BW(pdesc, 1); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); + } else { + SET_TX_DESC_DATA_BW(pdesc, 0); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc); + } + } else { + SET_TX_DESC_DATA_BW(pdesc, 0); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0); + } + + SET_TX_DESC_LINIP(pdesc, 0); + SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len); + if (sta) { + u8 ampdu_density = sta->ht_cap.ampdu_density; + SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + } + if (info->control.hw_key) { + struct ieee80211_key_conf *keyconf = + info->control.hw_key; + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + break; + case WLAN_CIPHER_SUITE_CCMP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + break; + default: + SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + break; + } + } + + SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); + SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); + SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); + SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? + 1 : 0); + SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + + if (ieee80211_is_data_qos(fc)) { + if (mac->rdg_en) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "Enable RDG function.\n"); + SET_TX_DESC_RDG_ENABLE(pdesc, 1); + SET_TX_DESC_HTC(pdesc, 1); + } + } + } + + SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); + SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); + SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len); + SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + + if (!ieee80211_is_data_qos(fc)) { + SET_TX_DESC_HWSEQ_EN(pdesc, 1); + SET_TX_DESC_HWSEQ_SEL(pdesc, 0); + } + SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { + SET_TX_DESC_BMC(pdesc, 1); + } + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n"); +} + +void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + bool b_firstseg, bool b_lastseg, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u8 fw_queue = QSLT_BEACON; + + dma_addr_t mapping = pci_map_single(rtlpci->pdev, + skb->data, skb->len, + PCI_DMA_TODEVICE); + + if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + "DMA mapping error"); + return; + } + CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); + + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + + SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); + + SET_TX_DESC_SEQ(pdesc, 0); + + SET_TX_DESC_LINIP(pdesc, 0); + + SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + + SET_TX_DESC_FIRST_SEG(pdesc, 1); + SET_TX_DESC_LAST_SEG(pdesc, 1); + + SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len)); + + SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); + + SET_TX_DESC_RATE_ID(pdesc, 0); + SET_TX_DESC_MACID(pdesc, 0); + + SET_TX_DESC_OWN(pdesc, 1); + + SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len)); + + SET_TX_DESC_FIRST_SEG(pdesc, 1); + SET_TX_DESC_LAST_SEG(pdesc, 1); + + SET_TX_DESC_USE_RATE(pdesc, 1); +} + +void rtl8723be_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val) +{ + if (istx) { + switch (desc_name) { + case HW_DESC_OWN: + SET_TX_DESC_OWN(pdesc, 1); + break; + case HW_DESC_TX_NEXTDESC_ADDR: + SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); + break; + default: + RT_ASSERT(false, "ERR txdesc :%d not process\n", + desc_name); + break; + } + } else { + switch (desc_name) { + case HW_DESC_RXOWN: + SET_RX_DESC_OWN(pdesc, 1); + break; + case HW_DESC_RXBUFF_ADDR: + SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val); + break; + case HW_DESC_RXPKT_LEN: + SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val); + break; + case HW_DESC_RXERO: + SET_RX_DESC_EOR(pdesc, 1); + break; + default: + RT_ASSERT(false, "ERR rxdesc :%d not process\n", + desc_name); + break; + } + } +} + +u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name) +{ + u32 ret = 0; + + if (istx) { + switch (desc_name) { + case HW_DESC_OWN: + ret = GET_TX_DESC_OWN(pdesc); + break; + case HW_DESC_TXBUFF_ADDR: + ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); + break; + default: + RT_ASSERT(false, "ERR txdesc :%d not process\n", + desc_name); + break; + } + } else { + switch (desc_name) { + case HW_DESC_OWN: + ret = GET_RX_DESC_OWN(pdesc); + break; + case HW_DESC_RXPKT_LEN: + ret = GET_RX_DESC_PKT_LEN(pdesc); + break; + default: + RT_ASSERT(false, "ERR rxdesc :%d not process\n", + desc_name); + break; + } + } + return ret; +} + +bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8) rtl8723be_get_desc(entry, true, HW_DESC_OWN); + + /*beacon packet will only use the first + *descriptor by default, and the own may not + *be cleared by the hardware + */ + if (own) + return false; + else + return true; +} + +void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + if (hw_queue == BEACON_QUEUE) { + rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4)); + } else { + rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, + BIT(0) << (hw_queue)); + } +} --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723be/trx.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723be/trx.h @@ -0,0 +1,616 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8723BE_TRX_H__ +#define __RTL8723BE_TRX_H__ + +#define TX_DESC_SIZE 40 +#define TX_DESC_AGGR_SUBFRAME_SIZE 32 + +#define RX_DESC_SIZE 32 +#define RX_DRV_INFO_SIZE_UNIT 8 + +#define TX_DESC_NEXT_DESC_OFFSET 40 +#define USB_HWDESC_HEADER_LEN 40 +#define CRCLENGTH 4 + +#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) +#define SET_TX_DESC_OFFSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) +#define SET_TX_DESC_BMC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) +#define SET_TX_DESC_HTC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) +#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) +#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) +#define SET_TX_DESC_LINIP(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) +#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) +#define SET_TX_DESC_GF(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) +#define SET_TX_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +#define GET_TX_DESC_PKT_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 0, 16) +#define GET_TX_DESC_OFFSET(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 16, 8) +#define GET_TX_DESC_BMC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 24, 1) +#define GET_TX_DESC_HTC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 25, 1) +#define GET_TX_DESC_LAST_SEG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 26, 1) +#define GET_TX_DESC_FIRST_SEG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 27, 1) +#define GET_TX_DESC_LINIP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 28, 1) +#define GET_TX_DESC_NO_ACM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 29, 1) +#define GET_TX_DESC_GF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 30, 1) +#define GET_TX_DESC_OWN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 31, 1) + +#define SET_TX_DESC_MACID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val) +#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) +#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) +#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) +#define SET_TX_DESC_PIFS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) +#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val) +#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) +#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) +#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val) + + +#define SET_TX_DESC_PAID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val) +#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val) +#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val) +#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val) +#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) +#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val) +#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) +#define SET_TX_DESC_RAW(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) +#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) +#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) +#define SET_TX_DESC_BT_INT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val) +#define SET_TX_DESC_GID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val) + + +#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val) +#define SET_TX_DESC_CHK_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val) +#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val) +#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val) +#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val) +#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val) +#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val) +#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val) +#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val) +#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val) +#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val) +#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val) +#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val) +#define SET_TX_DESC_NDPA(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val) +#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val) + + +#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val) +#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val) +#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val) +#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val) +#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val) +#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val) + + +#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val) +#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 4, 1, __val) +#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val) +#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) +#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val) +#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val) +#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val) +#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) + + +#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) + +#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) + +#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val) + +#define SET_TX_DESC_SEQ(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val) + +#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) + +#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) + + +#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val) + +#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+48, 0, 32) + +#define GET_RX_DESC_PKT_LEN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 0, 14) +#define GET_RX_DESC_CRC32(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 14, 1) +#define GET_RX_DESC_ICV(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 15, 1) +#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 16, 4) +#define GET_RX_DESC_SECURITY(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 20, 3) +#define GET_RX_DESC_QOS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 23, 1) +#define GET_RX_DESC_SHIFT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 24, 2) +#define GET_RX_DESC_PHYST(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 26, 1) +#define GET_RX_DESC_SWDEC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 27, 1) +#define GET_RX_DESC_LS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 28, 1) +#define GET_RX_DESC_FS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 29, 1) +#define GET_RX_DESC_EOR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 30, 1) +#define GET_RX_DESC_OWN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 31, 1) + +#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) +#define SET_RX_DESC_EOR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) +#define SET_RX_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +#define GET_RX_DESC_MACID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 0, 7) +#define GET_RX_DESC_TID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 8, 4) +#define GET_RX_DESC_AMSDU(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) +#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) +#define GET_RX_DESC_PAGGR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) +#define GET_RX_DESC_A1_FIT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) +#define GET_RX_DESC_CHKERR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) +#define GET_RX_DESC_IPVER(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) +#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 22, 1) +#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 23, 1) +#define GET_RX_DESC_PAM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) +#define GET_RX_DESC_PWR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) +#define GET_RX_DESC_MD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) +#define GET_RX_DESC_MF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) +#define GET_RX_DESC_TYPE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) +#define GET_RX_DESC_MC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) +#define GET_RX_DESC_BC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) + + +#define GET_RX_DESC_SEQ(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) +#define GET_RX_DESC_FRAG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) +#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 16, 1) +#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 18, 6) +#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 28, 1) + + +#define GET_RX_DESC_RXMCS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 0, 7) +#define GET_RX_DESC_RXHT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) +#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 7, 1) +#define GET_RX_DESC_HTC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) +#define GET_RX_STATUS_DESC_EOSP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 11, 1) +#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 12, 2) + +#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 29, 1) +#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 30, 1) +#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 31, 1) + +#define GET_RX_DESC_SPLCP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 0, 1) +#define GET_RX_STATUS_DESC_LDPC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 1, 1) +#define GET_RX_STATUS_DESC_STBC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 2, 1) +#define GET_RX_DESC_BW(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 4, 2) + +#define GET_RX_DESC_TSFL(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) + +#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) +#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) + +#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) +#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) + + +/* TX report 2 format in Rx desc*/ + +#define GET_RX_RPT2_DESC_PKT_LEN(__rxstatusdesc) \ + LE_BITS_TO_4BYTE(__rxstatusdesc, 0, 9) +#define GET_RX_RPT2_DESC_MACID_VALID_1(__rxstatusdesc) \ + LE_BITS_TO_4BYTE(__rxstatusdesc+16, 0, 32) +#define GET_RX_RPT2_DESC_MACID_VALID_2(__rxstatusdesc) \ + LE_BITS_TO_4BYTE(__rxstatusdesc+20, 0, 32) + +#define SET_EARLYMODE_PKTNUM(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value) +#define SET_EARLYMODE_LEN0(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value) +#define SET_EARLYMODE_LEN1(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value) +#define SET_EARLYMODE_LEN2_1(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value) +#define SET_EARLYMODE_LEN2_2(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value) +#define SET_EARLYMODE_LEN3(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value) +#define SET_EARLYMODE_LEN4(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value) + +#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ +do { \ + if (_size > TX_DESC_NEXT_DESC_OFFSET) \ + memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ + else \ + memset(__pdesc, 0, _size); \ +} while (0) + +struct phy_rx_agc_info_t { + #ifdef __LITTLE_ENDIAN + u8 gain:7, trsw:1; + #else + u8 trsw:1, gain:7; + #endif +}; +struct phy_status_rpt { + struct phy_rx_agc_info_t path_agc[2]; + u8 ch_corr[2]; + u8 cck_sig_qual_ofdm_pwdb_all; + u8 cck_agc_rpt_ofdm_cfosho_a; + u8 cck_rpt_b_ofdm_cfosho_b; + u8 rsvd_1;/* ch_corr_msb; */ + u8 noise_power_db_msb; + char path_cfotail[2]; + u8 pcts_mask[2]; + char stream_rxevm[2]; + u8 path_rxsnr[2]; + u8 noise_power_db_lsb; + u8 rsvd_2[3]; + u8 stream_csi[2]; + u8 stream_target_csi[2]; + u8 sig_evm; + u8 rsvd_3; +#ifdef __LITTLE_ENDIAN + u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ + u8 sgi_en:1; + u8 rxsc:2; + u8 idle_long:1; + u8 r_ant_train_en:1; + u8 ant_sel_b:1; + u8 ant_sel:1; +#else /* _BIG_ENDIAN_ */ + u8 ant_sel:1; + u8 ant_sel_b:1; + u8 r_ant_train_en:1; + u8 idle_long:1; + u8 rxsc:2; + u8 sgi_en:1; + u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ +#endif +} __packed; + +struct rx_fwinfo_8723be { + u8 gain_trsw[4]; + u8 pwdb_all; + u8 cfosho[4]; + u8 cfotail[4]; + char rxevm[2]; + char rxsnr[4]; + u8 pdsnr[2]; + u8 csi_current[2]; + u8 csi_target[2]; + u8 sigevm; + u8 max_ex_pwr; + u8 ex_intf_flag:1; + u8 sgi_en:1; + u8 rxsc:2; + u8 reserve:4; +} __packed; + +struct tx_desc_8723be { + u32 pktsize:16; + u32 offset:8; + u32 bmc:1; + u32 htc:1; + u32 lastseg:1; + u32 firstseg:1; + u32 linip:1; + u32 noacm:1; + u32 gf:1; + u32 own:1; + + u32 macid:6; + u32 rsvd0:2; + u32 queuesel:5; + u32 rd_nav_ext:1; + u32 lsig_txop_en:1; + u32 pifs:1; + u32 rateid:4; + u32 nav_usehdr:1; + u32 en_descid:1; + u32 sectype:2; + u32 pktoffset:8; + + u32 rts_rc:6; + u32 data_rc:6; + u32 agg_en:1; + u32 rdg_en:1; + u32 bar_retryht:2; + u32 agg_break:1; + u32 morefrag:1; + u32 raw:1; + u32 ccx:1; + u32 ampdudensity:3; + u32 bt_int:1; + u32 ant_sela:1; + u32 ant_selb:1; + u32 txant_cck:2; + u32 txant_l:2; + u32 txant_ht:2; + + u32 nextheadpage:8; + u32 tailpage:8; + u32 seq:12; + u32 cpu_handle:1; + u32 tag1:1; + u32 trigger_int:1; + u32 hwseq_en:1; + + u32 rtsrate:5; + u32 apdcfe:1; + u32 qos:1; + u32 hwseq_ssn:1; + u32 userrate:1; + u32 dis_rtsfb:1; + u32 dis_datafb:1; + u32 cts2self:1; + u32 rts_en:1; + u32 hwrts_en:1; + u32 portid:1; + u32 pwr_status:3; + u32 waitdcts:1; + u32 cts2ap_en:1; + u32 txsc:2; + u32 stbc:2; + u32 txshort:1; + u32 txbw:1; + u32 rtsshort:1; + u32 rtsbw:1; + u32 rtssc:2; + u32 rtsstbc:2; + + u32 txrate:6; + u32 shortgi:1; + u32 ccxt:1; + u32 txrate_fb_lmt:5; + u32 rtsrate_fb_lmt:4; + u32 retrylmt_en:1; + u32 txretrylmt:6; + u32 usb_txaggnum:8; + + u32 txagca:5; + u32 txagcb:5; + u32 usemaxlen:1; + u32 maxaggnum:5; + u32 mcsg1maxlen:4; + u32 mcsg2maxlen:4; + u32 mcsg3maxlen:4; + u32 mcs7sgimaxlen:4; + + u32 txbuffersize:16; + u32 sw_offset30:8; + u32 sw_offset31:4; + u32 rsvd1:1; + u32 antsel_c:1; + u32 null_0:1; + u32 null_1:1; + + u32 txbuffaddr; + u32 txbufferaddr64; + u32 nextdescaddress; + u32 nextdescaddress64; + + u32 reserve_pass_pcie_mm_limit[4]; +} __packed; + +struct rx_desc_8723be { + u32 length:14; + u32 crc32:1; + u32 icverror:1; + u32 drv_infosize:4; + u32 security:3; + u32 qos:1; + u32 shift:2; + u32 phystatus:1; + u32 swdec:1; + u32 lastseg:1; + u32 firstseg:1; + u32 eor:1; + u32 own:1; + + u32 macid:6; + u32 tid:4; + u32 hwrsvd:5; + u32 paggr:1; + u32 faggr:1; + u32 a1_fit:4; + u32 a2_fit:4; + u32 pam:1; + u32 pwr:1; + u32 moredata:1; + u32 morefrag:1; + u32 type:2; + u32 mc:1; + u32 bc:1; + + u32 seq:12; + u32 frag:4; + u32 nextpktlen:14; + u32 nextind:1; + u32 rsvd:1; + + u32 rxmcs:6; + u32 rxht:1; + u32 amsdu:1; + u32 splcp:1; + u32 bandwidth:1; + u32 htc:1; + u32 tcpchk_rpt:1; + u32 ipcchk_rpt:1; + u32 tcpchk_valid:1; + u32 hwpcerr:1; + u32 hwpcind:1; + u32 iv0:16; + + u32 iv1; + + u32 tsfl; + + u32 bufferaddress; + u32 bufferaddress64; + +} __packed; + +void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); +bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb); +void rtl8723be_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val); +u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name); +bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); +void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); +void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + bool b_firstseg, bool b_lastseg, + struct sk_buff *skb); +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723com/Makefile +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723com/Makefile @@ -0,0 +1,9 @@ +rtl8723-common-objs := \ + main.o \ + dm_common.o \ + fw_common.o \ + phy_common.o + +obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o + +ccflags-y += -D__CHECK_ENDIAN__ --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c @@ -0,0 +1,65 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "dm_common.h" +#include "../rtl8723ae/dm.h" +#include + +/* These routines are common to RTL8723AE and RTL8723bE */ + +void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.dynamic_txpower_enable = false; + + rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; +} +EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_txpower); + +void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.current_turbo_edca = false; + rtlpriv->dm.is_any_nonbepkts = false; + rtlpriv->dm.is_cur_rdlstate = false; +} +EXPORT_SYMBOL_GPL(rtl8723_dm_init_edca_turbo); + +void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm_pstable.pre_ccastate = CCA_MAX; + rtlpriv->dm_pstable.cur_ccasate = CCA_MAX; + rtlpriv->dm_pstable.pre_rfstate = RF_MAX; + rtlpriv->dm_pstable.cur_rfstate = RF_MAX; + rtlpriv->dm_pstable.rssi_val_min = 0; + rtlpriv->dm_pstable.initialize = 0; +} +EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_bb_powersaving); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h @@ -0,0 +1,33 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __DM_COMMON_H__ +#define __DM_COMMON_H__ + +void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw); +void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw); +void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c @@ -0,0 +1,329 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "fw_common.h" +#include + +void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + + if (enable) { + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); + + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01); + + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); + rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); + } else { + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); + + rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00); + } +} +EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download); + +void rtl8723_fw_block_write(struct ieee80211_hw *hw, + const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 blocksize = sizeof(u32); + u8 *bufferptr = (u8 *)buffer; + u32 *pu4byteptr = (u32 *)buffer; + u32 i, offset, blockcount, remainsize; + + blockcount = size / blocksize; + remainsize = size % blocksize; + + for (i = 0; i < blockcount; i++) { + offset = i * blocksize; + rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset), + *(pu4byteptr + i)); + } + if (remainsize) { + offset = blockcount * blocksize; + bufferptr += offset; + for (i = 0; i < remainsize; i++) { + rtl_write_byte(rtlpriv, + (FW_8192C_START_ADDRESS + offset + i), + *(bufferptr + i)); + } + } +} +EXPORT_SYMBOL_GPL(rtl8723_fw_block_write); + +void rtl8723_fw_page_write(struct ieee80211_hw *hw, + u32 page, const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value8; + u8 u8page = (u8) (page & 0x07); + + value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; + + rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); + rtl8723_fw_block_write(hw, buffer, size); +} +EXPORT_SYMBOL_GPL(rtl8723_fw_page_write); + +static void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen) +{ + u32 fwlen = *pfwlen; + u8 remain = (u8) (fwlen % 4); + + remain = (remain == 0) ? 0 : (4 - remain); + + while (remain > 0) { + pfwbuf[fwlen] = 0; + fwlen++; + remain--; + } + *pfwlen = fwlen; +} + +void rtl8723_write_fw(struct ieee80211_hw *hw, + enum version_8723e version, + u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *bufferptr = (u8 *)buffer; + u32 pagenums, remainsize; + u32 page, offset; + + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size); + + rtl8723_fill_dummy(bufferptr, &size); + + pagenums = size / FW_8192C_PAGE_SIZE; + remainsize = size % FW_8192C_PAGE_SIZE; + + if (pagenums > 8) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Page numbers should not greater then 8\n"); + } + for (page = 0; page < pagenums; page++) { + offset = page * FW_8192C_PAGE_SIZE; + rtl8723_fw_page_write(hw, page, (bufferptr + offset), + FW_8192C_PAGE_SIZE); + } + if (remainsize) { + offset = pagenums * FW_8192C_PAGE_SIZE; + page = pagenums; + rtl8723_fw_page_write(hw, page, (bufferptr + offset), + remainsize); + } +} +EXPORT_SYMBOL_GPL(rtl8723_write_fw); + +void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw) +{ + u8 u1tmp; + u8 delay = 100; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); + u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + + while (u1tmp & BIT(2)) { + delay--; + if (delay == 0) + break; + udelay(50); + u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + } + if (delay == 0) { + u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2))); + } +} +EXPORT_SYMBOL_GPL(rtl8723ae_firmware_selfreset); + +void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw) +{ + u8 u1b_tmp; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2)))); + udelay(50); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp | BIT(0))); + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2))); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + " _8051Reset8723be(): 8051 reset success .\n"); +} +EXPORT_SYMBOL_GPL(rtl8723be_firmware_selfreset); + +int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int err = -EIO; + u32 counter = 0; + u32 value32; + + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) && + (!(value32 & FWDL_CHKSUM_RPT))); + + if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "chksum report fail ! REG_MCUFWDL:0x%08x .\n", + value32); + goto exit; + } + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32); + + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY; + value32 &= ~WINTINI_RDY; + rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); + + if (is_8723be) + rtl8723be_firmware_selfreset(hw); + counter = 0; + + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + if (value32 & WINTINI_RDY) { + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "Polling FW ready success!! " + "REG_MCUFWDL:0x%08x .\n", + value32); + err = 0; + goto exit; + } + udelay(FW_8192C_POLLING_DELAY); + + } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT); + + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", + value32); + +exit: + return err; +} +EXPORT_SYMBOL_GPL(rtl8723_fw_free_to_go); + +int rtl8723_download_fw(struct ieee80211_hw *hw, + bool is_8723be) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl92c_firmware_header *pfwheader; + u8 *pfwdata; + u32 fwsize; + int err; + enum version_8723e version = rtlhal->version; + + if (!rtlhal->pfirmware) + return 1; + + pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; + pfwdata = (u8 *)rtlhal->pfirmware; + fwsize = rtlhal->fwsize; + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, + "normal Firmware SIZE %d\n", fwsize); + + if (rtlpriv->cfg->ops->is_fw_header(pfwheader)) { + RT_TRACE(rtlpriv, COMP_FW, DBG_EMERG, + "Firmware Version(%d), Signature(%#x), Size(%d)\n", + pfwheader->version, pfwheader->signature, + (int)sizeof(struct rtl92c_firmware_header)); + + pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); + fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + } + if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0); + if (is_8723be) + rtl8723be_firmware_selfreset(hw); + else + rtl8723ae_firmware_selfreset(hw); + } + rtl8723_enable_fw_download(hw, true); + rtl8723_write_fw(hw, version, pfwdata, fwsize); + rtl8723_enable_fw_download(hw, false); + + err = rtl8723_fw_free_to_go(hw, is_8723be); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "Firmware is not ready to run!\n"); + } else { + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, + "Firmware is ready to run!\n"); + } + return 0; +} +EXPORT_SYMBOL_GPL(rtl8723_download_fw); + +bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + struct rtl_tx_desc *pdesc; + struct sk_buff *pskb = NULL; + u8 own; + unsigned long flags; + + ring = &rtlpci->tx_ring[BEACON_QUEUE]; + + pskb = __skb_dequeue(&ring->queue); + if (pskb) + kfree_skb(pskb); + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + + pdesc = &ring->desc[0]; + own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN); + + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb); + + __skb_queue_tail(&ring->queue, skb); + + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); + + return true; +} +EXPORT_SYMBOL_GPL(rtl8723_cmd_send_packet); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h @@ -0,0 +1,126 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __FW_COMMON_H__ +#define __FW_COMMON_H__ + +#define REG_SYS_FUNC_EN 0x0002 +#define REG_MCUFWDL 0x0080 +#define FW_8192C_START_ADDRESS 0x1000 +#define FW_8192C_PAGE_SIZE 4096 +#define FW_8192C_POLLING_TIMEOUT_COUNT 6000 +#define FW_8192C_POLLING_DELAY 5 + +#define MCUFWDL_RDY BIT(1) +#define FWDL_CHKSUM_RPT BIT(2) +#define WINTINI_RDY BIT(6) + +#define REG_RSV_CTRL 0x001C +#define REG_HMETFR 0x01CC + +enum version_8723e { + VERSION_TEST_UMC_CHIP_8723 = 0x0081, + VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089, + VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089, + VERSION_TEST_CHIP_1T1R_8723B = 0x0106, + VERSION_NORMAL_SMIC_CHIP_1T1R_8723B = 0x010E, + VERSION_UNKNOWN = 0xFF, +}; + +enum rtl8723ae_h2c_cmd { + H2C_AP_OFFLOAD = 0, + H2C_SETPWRMODE = 1, + H2C_JOINBSSRPT = 2, + H2C_RSVDPAGE = 3, + H2C_RSSI_REPORT = 4, + H2C_P2P_PS_CTW_CMD = 5, + H2C_P2P_PS_OFFLOAD = 6, + H2C_RA_MASK = 7, + MAX_H2CCMD +}; + +enum rtl8723be_cmd { + H2C_8723BE_RSVDPAGE = 0, + H2C_8723BE_JOINBSSRPT = 1, + H2C_8723BE_SCAN = 2, + H2C_8723BE_KEEP_ALIVE_CTRL = 3, + H2C_8723BE_DISCONNECT_DECISION = 4, + H2C_8723BE_INIT_OFFLOAD = 6, + H2C_8723BE_AP_OFFLOAD = 8, + H2C_8723BE_BCN_RSVDPAGE = 9, + H2C_8723BE_PROBERSP_RSVDPAGE = 10, + + H2C_8723BE_SETPWRMODE = 0x20, + H2C_8723BE_PS_TUNING_PARA = 0x21, + H2C_8723BE_PS_TUNING_PARA2 = 0x22, + H2C_8723BE_PS_LPS_PARA = 0x23, + H2C_8723BE_P2P_PS_OFFLOAD = 0x24, + + H2C_8723BE_WO_WLAN = 0x80, + H2C_8723BE_REMOTE_WAKE_CTRL = 0x81, + H2C_8723BE_AOAC_GLOBAL_INFO = 0x82, + H2C_8723BE_AOAC_RSVDPAGE = 0x83, + H2C_8723BE_RSSI_REPORT = 0x42, + H2C_8723BE_RA_MASK = 0x40, + H2C_8723BE_SELECTIVE_SUSPEND_ROF_CMD, + H2C_8723BE_P2P_PS_MODE, + H2C_8723BE_PSD_RESULT, + /*Not defined CTW CMD for P2P yet*/ + H2C_8723BE_P2P_PS_CTW_CMD, + MAX_8723BE_H2CCMD +}; + +struct rtl92c_firmware_header { + u16 signature; + u8 category; + u8 function; + u16 version; + u8 subversion; + u8 rsvd1; + u8 month; + u8 date; + u8 hour; + u8 minute; + u16 ramcodesize; + u16 rsvd2; + u32 svnindex; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; +}; + +void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw); +void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw); +void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable); +void rtl8723_fw_block_write(struct ieee80211_hw *hw, + const u8 *buffer, u32 size); +void rtl8723_fw_page_write(struct ieee80211_hw *hw, + u32 page, const u8 *buffer, u32 size); +void rtl8723_write_fw(struct ieee80211_hw *hw, + enum version_8723e version, + u8 *buffer, u32 size); +int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be); +int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be); +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723com/main.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723com/main.c @@ -0,0 +1,33 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include + + +MODULE_AUTHOR("Realtek WlanFAE "); +MODULE_AUTHOR("Larry Finger "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek RTL8723AE/RTL8723BE 802.11n PCI wireless common routines"); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c @@ -0,0 +1,434 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "phy_common.h" +#include "../rtl8723ae/reg.h" +#include + +/* These routines are common to RTL8723AE and RTL8723bE */ + +u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 returnvalue, originalvalue, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask); + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + returnvalue = (originalvalue & bitmask) >> bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "BBR MASK = 0x%x Addr[0x%x]= 0x%x\n", + bitmask, regaddr, originalvalue); + + return returnvalue; +} +EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg); + +void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, + u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 originalvalue, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x)\n", + regaddr, bitmask, data); + + if (bitmask != MASKDWORD) { + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = rtl8723_phy_calculate_bit_shift(bitmask); + data = ((originalvalue & (~bitmask)) | (data << bitshift)); + } + + rtl_write_dword(rtlpriv, regaddr, data); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x)\n", + regaddr, bitmask, data); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg); + +u32 rtl8723_phy_calculate_bit_shift(u32 bitmask) +{ + u32 i; + + for (i = 0; i <= 31; i++) { + if (((bitmask >> i) & 0x1) == 1) + break; + } + return i; +} +EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift); + +u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + u32 newoffset; + u32 tmplong, tmplong2; + u8 rfpi_enable = 0; + u32 retvalue; + + offset &= 0xff; + newoffset = offset; + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n"); + return 0xFFFFFFFF; + } + tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); + if (rfpath == RF90_PATH_A) + tmplong2 = tmplong; + else + tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); + tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | + (newoffset << 23) | BLSSIREADEDGE; + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong & (~BLSSIREADEDGE)); + mdelay(1); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); + mdelay(2); + if (rfpath == RF90_PATH_A) + rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + else if (rfpath == RF90_PATH_B) + rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, + BIT(8)); + if (rfpi_enable) + retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi, + BLSSIREADBACKDATA); + else + retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb, + BLSSIREADBACKDATA); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "RFR-%d Addr[0x%x]= 0x%x\n", + rfpath, pphyreg->rf_rb, retvalue); + return retvalue; +} +EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read); + +void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 offset, u32 data) +{ + u32 data_and_addr; + u32 newoffset; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n"); + return; + } + offset &= 0xff; + newoffset = offset; + data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; + rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + "RFW-%d Addr[0x%x]= 0x%x\n", rfpath, + pphyreg->rf3wire_offset, data_and_addr); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write); + +long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx) +{ + long offset; + long pwrout_dbm; + + switch (wirelessmode) { + case WIRELESS_MODE_B: + offset = -7; + break; + case WIRELESS_MODE_G: + case WIRELESS_MODE_N_24G: + default: + offset = -8; + break; + } + pwrout_dbm = txpwridx / 2 + offset; + return pwrout_dbm; +} +EXPORT_SYMBOL_GPL(rtl8723_phy_txpwr_idx_to_dbm); + +void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = + RFPGA0_XA_LSSIPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = + RFPGA0_XB_LSSIPARAMETER; + + rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER; + + rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE; + + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1; + + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; + + rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL; + + rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1; + + rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; + + rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE; + rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE; + + rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE; + rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; + rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; + + rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE; + + rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; + rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE; + rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; + + rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK; + + rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK; + rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK; +} +EXPORT_SYMBOL_GPL(rtl8723_phy_init_bb_rf_reg_def); + +bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, + u32 cmdtableidx, + u32 cmdtablesz, + enum swchnlcmd_id cmdid, + u32 para1, u32 para2, + u32 msdelay) +{ + struct swchnlcmd *pcmd; + + if (cmdtable == NULL) { + RT_ASSERT(false, "cmdtable cannot be NULL.\n"); + return false; + } + + if (cmdtableidx >= cmdtablesz) + return false; + + pcmd = cmdtable + cmdtableidx; + pcmd->cmdid = cmdid; + pcmd->para1 = para1; + pcmd->para2 = para2; + pcmd->msdelay = msdelay; + return true; +} +EXPORT_SYMBOL_GPL(rtl8723_phy_set_sw_chnl_cmdarray); + +void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, + bool iqk_ok, + long result[][8], + u8 final_candidate, + bool btxonly) +{ + u32 oldval_0, x, tx0_a, reg; + long y, tx0_c; + + if (final_candidate == 0xFF) { + return; + } else if (iqk_ok) { + oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD) >> 22) & 0x3FF; + x = result[final_candidate][0]; + if ((x & 0x00000200) != 0) + x = x | 0xFFFFFC00; + tx0_a = (x * oldval_0) >> 8; + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31), + ((x * oldval_0 >> 7) & 0x1)); + y = result[final_candidate][1]; + if ((y & 0x00000200) != 0) + y = y | 0xFFFFFC00; + tx0_c = (y * oldval_0) >> 8; + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, + ((tx0_c & 0x3C0) >> 6)); + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, + (tx0_c & 0x3F)); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29), + ((y * oldval_0 >> 7) & 0x1)); + if (btxonly) + return; + reg = result[final_candidate][2]; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][3] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][3] >> 6) & 0xF; + rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg); + } +} +EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_fill_iqk_matrix); + +void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg, + u32 *addabackup, u32 registernum) +{ + u32 i; + + for (i = 0; i < registernum; i++) + addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD); +} +EXPORT_SYMBOL_GPL(rtl8723_save_adda_registers); + +void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) + macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]); + macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_save_mac_registers); + +void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw, + u32 *addareg, u32 *addabackup, + u32 regiesternum) +{ + u32 i; + + for (i = 0; i < regiesternum; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_reload_adda_registers); + +void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) + rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]); + rtl_write_dword(rtlpriv, macreg[i], macbackup[i]); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_reload_mac_registers); + +void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg, + bool is_patha_on, bool is2t) +{ + u32 pathon; + u32 i; + + pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; + if (!is2t) { + pathon = 0x0bdb25a0; + rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); + } else { + rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon); + } + + for (i = 1; i < IQK_ADDA_REG_NUM; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_path_adda_on); + +void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i = 0; + + rtl_write_byte(rtlpriv, macreg[i], 0x3F); + + for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) + rtl_write_byte(rtlpriv, macreg[i], + (u8) (macbackup[i] & (~BIT(3)))); + rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5)))); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_mac_setting_calibration); + +void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw) +{ + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0); + rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_standby); + +void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode) +{ + u32 mode; + + mode = pi_mode ? 0x01000100 : 0x01000000; + rtl_set_bbreg(hw, 0x820, MASKDWORD, mode); + rtl_set_bbreg(hw, 0x828, MASKDWORD, mode); +} +EXPORT_SYMBOL_GPL(rtl8723_phy_pi_mode_switch); --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h @@ -0,0 +1,89 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2014 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __PHY_COMMON__ +#define __PHY_COMMON__ + +#define RT_CANNOT_IO(hw) false + +enum swchnlcmd_id { + CMDID_END, + CMDID_SET_TXPOWEROWER_LEVEL, + CMDID_BBREGWRITE10, + CMDID_WRITEPORT_ULONG, + CMDID_WRITEPORT_USHORT, + CMDID_WRITEPORT_UCHAR, + CMDID_RF_WRITEREG, +}; + +struct swchnlcmd { + enum swchnlcmd_id cmdid; + u32 para1; + u32 para2; + u32 msdelay; +}; + +u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask); +void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, + u32 bitmask, u32 data); +u32 rtl8723_phy_calculate_bit_shift(u32 bitmask); +u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset); +void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 offset, u32 data); +long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx); +void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw); +bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, + u32 cmdtableidx, + u32 cmdtablesz, + enum swchnlcmd_id cmdid, + u32 para1, u32 para2, + u32 msdelay); +void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, + bool iqk_ok, + long result[][8], + u8 final_candidate, + bool btxonly); +void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg, + u32 *addabackup, u32 registernum); +void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup); +void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw, + u32 *addareg, u32 *addabackup, + u32 regiesternum); +void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup); +void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg, + bool is_patha_on, bool is2t); +void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup); +void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw); +void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode); + +#endif --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/stats.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/stats.c @@ -176,6 +176,7 @@ struct rtl_sta_info *drv_priv = NULL; struct ieee80211_sta *sta = NULL; long undec_sm_pwdb; + long undec_sm_cck; rcu_read_lock(); if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) @@ -185,12 +186,16 @@ if (sta) { drv_priv = (struct rtl_sta_info *) sta->drv_priv; undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb; + undec_sm_cck = drv_priv->rssi_stat.undec_sm_cck; } else { undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb; + undec_sm_cck = rtlpriv->dm.undec_sm_cck; } if (undec_sm_pwdb < 0) undec_sm_pwdb = pstatus->rx_pwdb_all; + if (undec_sm_cck < 0) + undec_sm_cck = pstatus->rx_pwdb_all; if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) { undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) + @@ -200,6 +205,15 @@ undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); } + if (pstatus->rx_pwdb_all > (u32) undec_sm_cck) { + undec_sm_cck = (((undec_sm_pwdb) * + (RX_SMOOTH_FACTOR - 1)) + + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); + undec_sm_cck = undec_sm_cck + 1; + } else { + undec_sm_pwdb = (((undec_sm_cck) * (RX_SMOOTH_FACTOR - 1)) + + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); + } if (sta) { drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb; --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/usb.c +++ linux-3.13.0/drivers/net/wireless/rtlwifi/usb.c @@ -410,7 +410,7 @@ mac->current_ampdu_factor = 3; /* QOS */ - rtlusb->acm_method = eAcmWay2_SW; + rtlusb->acm_method = EACMWAY2_SW; /* IRQ */ /* HIMR - turn all on */ @@ -483,6 +483,8 @@ if (unicast) rtlpriv->link_info.num_rx_inperiod++; } + /* static bcn for roaming */ + rtl_beacon_statistic(hw, skb); } } @@ -553,7 +555,7 @@ } } -#define __RX_SKB_MAX_QUEUED 32 +#define __RX_SKB_MAX_QUEUED 64 static void _rtl_rx_work(unsigned long param) { --- linux-3.13.0.orig/drivers/net/wireless/rtlwifi/wifi.h +++ linux-3.13.0/drivers/net/wireless/rtlwifi/wifi.h @@ -49,6 +49,7 @@ #define IQK_ADDA_REG_NUM 16 #define IQK_MAC_REG_NUM 4 +#define IQK_THRESHOLD 8 #define MAX_KEY_LEN 61 #define KEY_BUF_SIZE 5 @@ -96,6 +97,7 @@ #define CHANNEL_MAX_NUMBER_2G 14 #define AVG_THERMAL_NUM 8 #define AVG_THERMAL_NUM_88E 4 +#define AVG_THERMAL_NUM_8723BE 4 #define MAX_TID_COUNT 9 /* for early mode */ @@ -107,6 +109,19 @@ #define MAX_CHNL_GROUP_24G 6 #define MAX_CHNL_GROUP_5G 14 +#define TX_PWR_BY_RATE_NUM_BAND 2 +#define TX_PWR_BY_RATE_NUM_RF 4 +#define TX_PWR_BY_RATE_NUM_SECTION 12 +#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6 +#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5 + +enum rf_tx_num { + RF_1TX = 0, + RF_2TX, + RF_MAX_TX_NUM, + RF_TX_NUM_NONIMPLEMENT, +}; + struct txpower_info_2g { u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; @@ -115,6 +130,8 @@ u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT]; u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT]; u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT]; }; struct txpower_info_5g { @@ -125,6 +142,15 @@ u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT]; }; +enum rate_section { + CCK = 0, + OFDM, + HT_MCS0_MCS7, + HT_MCS8_MCS15, + VHT_1SSMCS0_1SSMCS9, + VHT_2SSMCS0_2SSMCS9, +}; + enum intf_type { INTF_PCI = 0, INTF_USB = 1, @@ -158,6 +184,7 @@ HARDWARE_TYPE_RTL8192DU, HARDWARE_TYPE_RTL8723AE, HARDWARE_TYPE_RTL8723U, + HARDWARE_TYPE_RTL8723BE, HARDWARE_TYPE_RTL8188EE, /* keep it last */ @@ -195,6 +222,12 @@ _pdesc->rxmcs == DESC92_RATE5_5M || \ _pdesc->rxmcs == DESC92_RATE11M) +#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \ + ((rxmcs) == DESC92_RATE1M || \ + (rxmcs) == DESC92_RATE2M || \ + (rxmcs) == DESC92_RATE5_5M || \ + (rxmcs) == DESC92_RATE11M) + enum scan_operation_backup_opt { SCAN_OPT_BACKUP = 0, SCAN_OPT_RESTORE, @@ -330,6 +363,7 @@ HAL_DEF_WOWLAN, HW_VAR_MRC, + HW_VAR_KEEP_ALIVE, HW_VAR_MGT_FILTER, HW_VAR_CTRL_FILTER, @@ -348,34 +382,34 @@ RT_CID_8187_HW_LED = 3, RT_CID_8187_NETGEAR = 4, RT_CID_WHQL = 5, - RT_CID_819x_CAMEO = 6, - RT_CID_819x_RUNTOP = 7, - RT_CID_819x_Senao = 8, + RT_CID_819X_CAMEO = 6, + RT_CID_819X_RUNTOP = 7, + RT_CID_819X_SENAO = 8, RT_CID_TOSHIBA = 9, - RT_CID_819x_Netcore = 10, - RT_CID_Nettronix = 11, + RT_CID_819X_NETCORE = 10, + RT_CID_NETTRONIX = 11, RT_CID_DLINK = 12, RT_CID_PRONET = 13, RT_CID_COREGA = 14, - RT_CID_819x_ALPHA = 15, - RT_CID_819x_Sitecom = 16, + RT_CID_819X_ALPHA = 15, + RT_CID_819X_SITECOM = 16, RT_CID_CCX = 17, - RT_CID_819x_Lenovo = 18, - RT_CID_819x_QMI = 19, - RT_CID_819x_Edimax_Belkin = 20, - RT_CID_819x_Sercomm_Belkin = 21, - RT_CID_819x_CAMEO1 = 22, - RT_CID_819x_MSI = 23, - RT_CID_819x_Acer = 24, - RT_CID_819x_HP = 27, - RT_CID_819x_CLEVO = 28, - RT_CID_819x_Arcadyan_Belkin = 29, - RT_CID_819x_SAMSUNG = 30, - RT_CID_819x_WNC_COREGA = 31, - RT_CID_819x_Foxcoon = 32, - RT_CID_819x_DELL = 33, - RT_CID_819x_PRONETS = 34, - RT_CID_819x_Edimax_ASUS = 35, + RT_CID_819X_LENOVO = 18, + RT_CID_819X_QMI = 19, + RT_CID_819X_EDIMAX_BELKIN = 20, + RT_CID_819X_SERCOMM_BELKIN = 21, + RT_CID_819X_CAMEO1 = 22, + RT_CID_819X_MSI = 23, + RT_CID_819X_ACER = 24, + RT_CID_819X_HP = 27, + RT_CID_819X_CLEVO = 28, + RT_CID_819X_ARCADYAN_BELKIN = 29, + RT_CID_819X_SAMSUNG = 30, + RT_CID_819X_WNC_COREGA = 31, + RT_CID_819X_FOXCOON = 32, + RT_CID_819X_DELL = 33, + RT_CID_819X_PRONETS = 34, + RT_CID_819X_EDIMAX_ASUS = 35, RT_CID_NETGEAR = 36, RT_CID_PLANEX = 37, RT_CID_CC_C = 38, @@ -608,7 +642,7 @@ enum acm_method { eAcmWay0_SwAndHw = 0, eAcmWay1_HW = 1, - eAcmWay2_SW = 2, + EACMWAY2_SW = 2, }; enum macphy_mode { @@ -817,9 +851,9 @@ u32 high_rssi_thresh_for_ra; u32 high2low_rssi_thresh_for_ra; u8 low2high_rssi_thresh_for_ra40m; - u32 low_rssi_thresh_for_ra40M; + u32 low_rssi_thresh_for_ra40m; u8 low2high_rssi_thresh_for_ra20m; - u32 low_rssi_thresh_for_ra20M; + u32 low_rssi_thresh_for_ra20m; u32 upper_rssi_threshold_ratr; u32 middleupper_rssi_threshold_ratr; u32 middle_rssi_threshold_ratr; @@ -986,6 +1020,13 @@ u8 cck_high_power; /* MAX_PG_GROUP groups of pwr diff by rates */ u32 mcs_offset[MAX_PG_GROUP][16]; + u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND] + [TX_PWR_BY_RATE_NUM_RF] + [TX_PWR_BY_RATE_NUM_RF] + [TX_PWR_BY_RATE_NUM_SECTION]; + u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF] + [TX_PWR_BY_RATE_NUM_RF] + [MAX_BASE_NUM_IN_PHY_REG_PG_24G]; u8 default_initialgain[4]; /* the current Tx power level */ @@ -1033,6 +1074,7 @@ struct rssi_sta { long undec_sm_pwdb; + long undec_sm_cck; }; struct rtl_tid_data { @@ -1212,6 +1254,7 @@ bool being_init_adapter; bool bbrf_ready; bool mac_func_enable; + bool pre_edcca_enable; struct bt_coexist_8723 hal_coex_8723; enum intf_type interface; @@ -1320,11 +1363,23 @@ bool becomelinked; }; +struct dm_phy_dbg_info { + char rx_snrdb[4]; + u64 num_qry_phy_status; + u64 num_qry_phy_status_cck; + u64 num_qry_phy_status_ofdm; + u16 num_qry_beacon_pkt; + u16 num_non_be_pkt; + s32 rx_evm[4]; +}; + struct rtl_dm { /*PHY status for Dynamic Management */ long entry_min_undec_sm_pwdb; + long undec_sm_cck; long undec_sm_pwdb; /*out dm */ long entry_max_undec_sm_pwdb; + s32 ofdm_pkt_cnt; bool dm_initialgain_enable; bool dynamic_txpower_enable; bool current_turbo_edca; @@ -1339,6 +1394,7 @@ bool inform_fw_driverctrldm; bool current_mrc_switch; u8 txpowercount; + u8 powerindex_backup[6]; u8 thermalvalue_rxgain; u8 thermalvalue_iqk; @@ -1350,20 +1406,36 @@ bool done_txpower; u8 dynamic_txhighpower_lvl; /*Tx high power level */ u8 dm_flag; /*Indicate each dynamic mechanism's status. */ + u8 dm_flag_tmp; u8 dm_type; + u8 dm_rssi_sel; u8 txpower_track_control; bool interrupt_migration; bool disable_tx_int; char ofdm_index[2]; char cck_index; - char delta_power_index; - char delta_power_index_last; - char power_index_offset; + char delta_power_index[MAX_RF_PATH]; + char delta_power_index_last[MAX_RF_PATH]; + char power_index_offset[MAX_RF_PATH]; + + bool one_entry_only; + struct dm_phy_dbg_info dbginfo; + + /* Dynamic ATC switch */ + bool atc_status; + bool large_cfo_hit; + bool is_freeze; + int cfo_tail[2]; + int cfo_ave_pre; + int crystal_cap; + u8 cfo_threshold; + u32 packet_count; + u32 packet_count_pre; /*88e tx power tracking*/ u8 swing_idx_ofdm[2]; u8 swing_idx_ofdm_cur; - u8 swing_idx_ofdm_base; + u8 swing_idx_ofdm_base[MAX_RF_PATH]; bool swing_flag_ofdm; u8 swing_idx_cck; u8 swing_idx_cck_cur; @@ -1416,12 +1488,14 @@ u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */ u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX]; u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX]; - u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G]; - u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX]; - u8 eprom_chnl_txpwr_ht40_2sdf[2][CHANNEL_GROUP_MAX]; + u8 eeprom_chnlarea_txpwr_cck[MAX_RF_PATH][CHANNEL_GROUP_MAX_2G]; + u8 eeprom_chnlarea_txpwr_ht40_1s[MAX_RF_PATH][CHANNEL_GROUP_MAX]; + u8 eprom_chnl_txpwr_ht40_2sdf[MAX_RF_PATH][CHANNEL_GROUP_MAX]; u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G]; - u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */ - u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */ + /* For HT 40MHZ pwr */ + u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + u8 txpwr_ht40diff[MAX_RF_PATH][MAX_TX_COUNT];/*BW40_24G_Diff*/ u8 internal_pa_5g[2]; /* pathA / pathB */ u8 eeprom_c9; @@ -1638,6 +1712,8 @@ bool btx_enable_sw_calc_duration; }; +struct rtl92c_firmware_header; + struct rtl_hal_ops { int (*init_sw_vars) (struct ieee80211_hw *hw); void (*deinit_sw_vars) (struct ieee80211_hw *hw); @@ -1694,6 +1770,8 @@ enum led_ctl_mode ledaction); void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val); u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name); + bool (*is_tx_desc_closed) (struct ieee80211_hw *hw, + u8 hw_queue, u16 index); void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue); void (*enable_hw_sec) (struct ieee80211_hw *hw); void (*set_key) (struct ieee80211_hw *hw, u32 key_index, @@ -1732,6 +1810,8 @@ void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); + bool (*get_btc_status) (void); + bool (*is_fw_header) (struct rtl92c_firmware_header *hdr); }; struct rtl_intf_ops { @@ -1774,6 +1854,10 @@ /* default: 1 = using linked fw power save */ bool fwctrl_lps; + + /* default: 0 = not using MSI interrupts mode */ + /* submodules should set their own defalut value */ + bool msi_support; }; struct rtl_hal_usbint_cfg { @@ -1804,6 +1888,7 @@ bool write_readback; char *name; char *fw_name; + char *alt_fw_name; struct rtl_hal_ops *ops; struct rtl_mod_params *mod_params; struct rtl_hal_usbint_cfg *usb_interface_cfg; @@ -1908,6 +1993,7 @@ u8 cur_ccasate; u8 pre_rfstate; u8 cur_rfstate; + u8 initialize; long rssi_val_min; }; @@ -1948,6 +2034,7 @@ u8 pre_ccastate; u8 cur_ccasate; u8 large_fa_hit; + u8 dig_dynamic_min; u8 forbidden_igi; u8 dig_state; u8 dig_highpwrstate; @@ -1964,6 +2051,7 @@ char backoffval_range_min; u8 dig_min_0; u8 dig_min_1; + u8 bt30_cur_igi; bool media_connect_0; bool media_connect_1; @@ -1978,6 +2066,96 @@ spinlock_t glb_list_lock; }; +struct rtl_btc_info { + u8 bt_type; + u8 btcoexist; + u8 ant_num; +}; + +struct bt_coexist_info { + struct rtl_btc_ops *btc_ops; + struct rtl_btc_info btc_info; + /* EEPROM BT info. */ + u8 eeprom_bt_coexist; + u8 eeprom_bt_type; + u8 eeprom_bt_ant_num; + u8 eeprom_bt_ant_isol; + u8 eeprom_bt_radio_shared; + + u8 bt_coexistence; + u8 bt_ant_num; + u8 bt_coexist_type; + u8 bt_state; + u8 bt_cur_state; /* 0:on, 1:off */ + u8 bt_ant_isolation; /* 0:good, 1:bad */ + u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */ + u8 bt_service; + u8 bt_radio_shared_type; + u8 bt_rfreg_origin_1e; + u8 bt_rfreg_origin_1f; + u8 bt_rssi_state; + u32 ratio_tx; + u32 ratio_pri; + u32 bt_edca_ul; + u32 bt_edca_dl; + + bool init_set; + bool bt_busy_traffic; + bool bt_traffic_mode_set; + bool bt_non_traffic_mode_set; + + bool fw_coexist_all_off; + bool sw_coexist_all_off; + bool hw_coexist_all_off; + u32 cstate; + u32 previous_state; + u32 cstate_h; + u32 previous_state_h; + + u8 bt_pre_rssi_state; + u8 bt_pre_rssi_state1; + + u8 reg_bt_iso; + u8 reg_bt_sco; + bool balance_on; + u8 bt_active_zero_cnt; + bool cur_bt_disabled; + bool pre_bt_disabled; + + u8 bt_profile_case; + u8 bt_profile_action; + bool bt_busy; + bool hold_for_bt_operation; + u8 lps_counter; +}; + +struct rtl_btc_ops { + void (*btc_init_variables) (struct rtl_priv *rtlpriv); + void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); + void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); + void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); + void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype); + void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action); + void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv, + enum _RT_MEDIA_STATUS mstatus); + void (*btc_periodical) (struct rtl_priv *rtlpriv); + void (*btc_halt_notify) (void); + void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, + u8 *tmp_buf, u8 length); + bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv); + bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv); + bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); +}; + +struct proxim { + bool proxim_on; + + void *proximity_priv; + int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status, + struct sk_buff *skb); + u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type); +}; + struct rtl_priv { struct ieee80211_hw *hw; struct completion firmware_loading_complete; @@ -2028,25 +2206,32 @@ struct dig_t dm_digtable; struct ps_t dm_pstable; - /* section shared by individual drivers */ - union { - struct { /* data buffer pointer for USB reads */ - __le32 *usb_data; - int usb_data_index; - bool initialized; - }; - struct { /* section for 8723ae */ - bool reg_init; /* true if regs saved */ - u32 reg_874; - u32 reg_c70; - u32 reg_85c; - u32 reg_a74; - bool bt_operation_on; - }; - }; + u32 reg_874; + u32 reg_c70; + u32 reg_85c; + u32 reg_a74; + bool reg_init; /* true if regs saved */ + bool bt_operation_on; + __le32 *usb_data; + int usb_data_index; + bool initialized; bool enter_ps; /* true when entering PS */ u8 rate_mask[5]; + /* intel Proximity, should be alloc mem + * in intel Proximity module and can only + * be used in intel Proximity mode + */ + struct proxim proximity; + + /*for bt coexist use*/ + struct bt_coexist_info btcoexist; + + /* separate 92ee from other ICs, + * 92ee use new trx flow. + */ + bool use_new_trx_flow; + /*This must be the last item so that it points to the data allocated beyond this structure like: @@ -2078,6 +2263,9 @@ BT_CSR_BC8 = 4, BT_RTL8756 = 5, BT_RTL8723A = 6, + BT_RTL8821 = 7, + BT_RTL8723B = 8, + BT_RTL8192E = 9, }; enum bt_cur_state { @@ -2103,62 +2291,6 @@ BT_RADIO_INDIVIDUAL = 1, }; -struct bt_coexist_info { - - /* EEPROM BT info. */ - u8 eeprom_bt_coexist; - u8 eeprom_bt_type; - u8 eeprom_bt_ant_num; - u8 eeprom_bt_ant_isol; - u8 eeprom_bt_radio_shared; - - u8 bt_coexistence; - u8 bt_ant_num; - u8 bt_coexist_type; - u8 bt_state; - u8 bt_cur_state; /* 0:on, 1:off */ - u8 bt_ant_isolation; /* 0:good, 1:bad */ - u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */ - u8 bt_service; - u8 bt_radio_shared_type; - u8 bt_rfreg_origin_1e; - u8 bt_rfreg_origin_1f; - u8 bt_rssi_state; - u32 ratio_tx; - u32 ratio_pri; - u32 bt_edca_ul; - u32 bt_edca_dl; - - bool init_set; - bool bt_busy_traffic; - bool bt_traffic_mode_set; - bool bt_non_traffic_mode_set; - - bool fw_coexist_all_off; - bool sw_coexist_all_off; - bool hw_coexist_all_off; - u32 cstate; - u32 previous_state; - u32 cstate_h; - u32 previous_state_h; - - u8 bt_pre_rssi_state; - u8 bt_pre_rssi_state1; - - u8 reg_bt_iso; - u8 reg_bt_sco; - bool balance_on; - u8 bt_active_zero_cnt; - bool cur_bt_disabled; - bool pre_bt_disabled; - - u8 bt_profile_case; - u8 bt_profile_action; - bool bt_busy; - bool hold_for_bt_operation; - u8 lps_counter; -}; - /**************************************** mem access macro define start --- linux-3.13.0.orig/drivers/net/wireless/ti/wlcore/main.c +++ linux-3.13.0/drivers/net/wireless/ti/wlcore/main.c @@ -5147,7 +5147,8 @@ mutex_unlock(&wl->mutex); } -static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct wl1271 *wl = hw->priv; --- linux-3.13.0.orig/drivers/net/xen-netback/common.h +++ linux-3.13.0/drivers/net/xen-netback/common.h @@ -113,6 +113,11 @@ domid_t domid; unsigned int handle; + /* Is this interface disabled? True when backend discovers + * frontend is rogue. + */ + bool disabled; + /* Use NAPI for guest TX */ struct napi_struct napi; /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ --- linux-3.13.0.orig/drivers/net/xen-netback/interface.c +++ linux-3.13.0/drivers/net/xen-netback/interface.c @@ -67,6 +67,15 @@ struct xenvif *vif = container_of(napi, struct xenvif, napi); int work_done; + /* This vif is rogue, we pretend we've there is nothing to do + * for this vif to deschedule it from NAPI. But this interface + * will be turned off in thread context later. + */ + if (unlikely(vif->disabled)) { + napi_complete(napi); + return 0; + } + work_done = xenvif_tx_action(vif, budget); if (work_done < budget) { @@ -323,6 +332,8 @@ vif->ip_csum = 1; vif->dev = dev; + vif->disabled = false; + vif->credit_bytes = vif->remaining_credit = ~0UL; vif->credit_usec = 0UL; init_timer(&vif->credit_timeout); --- linux-3.13.0.orig/drivers/net/xen-netback/netback.c +++ linux-3.13.0/drivers/net/xen-netback/netback.c @@ -203,8 +203,8 @@ * into multiple copies tend to give large frags their * own buffers as before. */ - if ((offset + size > MAX_BUFFER_OFFSET) && - (size <= MAX_BUFFER_OFFSET) && offset && !head) + BUG_ON(size > MAX_BUFFER_OFFSET); + if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) return true; return false; @@ -338,7 +338,7 @@ struct gnttab_copy *copy_gop; struct xenvif_rx_meta *meta; unsigned long bytes; - int gso_type; + int gso_type = XEN_NETIF_GSO_TYPE_NONE; /* Data must not cross a page boundary. */ BUG_ON(size + offset > PAGE_SIZE<gso_type & SKB_GSO_TCPV4) - gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - else - gso_type = XEN_NETIF_GSO_TYPE_NONE; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; + } if (*head && ((1 << gso_type) & vif->gso_mask)) vif->rx.req_cons++; @@ -436,19 +436,15 @@ int head = 1; int old_meta_prod; int gso_type; - int gso_size; old_meta_prod = npo->meta_prod; - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { - gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - gso_size = skb_shinfo(skb)->gso_size; - } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { - gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - gso_size = skb_shinfo(skb)->gso_size; - } else { - gso_type = XEN_NETIF_GSO_TYPE_NONE; - gso_size = 0; + gso_type = XEN_NETIF_GSO_TYPE_NONE; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; } /* Set up a GSO prefix descriptor, if necessary */ @@ -456,7 +452,7 @@ req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_type = gso_type; - meta->gso_size = gso_size; + meta->gso_size = skb_shinfo(skb)->gso_size; meta->size = 0; meta->id = req->id; } @@ -466,7 +462,7 @@ if ((1 << gso_type) & vif->gso_mask) { meta->gso_type = gso_type; - meta->gso_size = gso_size; + meta->gso_size = skb_shinfo(skb)->gso_size; } else { meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_size = 0; @@ -756,7 +752,8 @@ static void xenvif_fatal_tx_err(struct xenvif *vif) { netdev_err(vif->dev, "fatal error; disabling device\n"); - xenvif_carrier_off(vif); + vif->disabled = true; + xenvif_kick_thread(vif); } static int xenvif_count_requests(struct xenvif *vif, @@ -1483,7 +1480,7 @@ vif->tx.sring->req_prod, vif->tx.req_cons, XEN_NETIF_TX_RING_SIZE); xenvif_fatal_tx_err(vif); - continue; + break; } work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx); @@ -1877,7 +1874,18 @@ while (!kthread_should_stop()) { wait_event_interruptible(vif->wq, rx_work_todo(vif) || + vif->disabled || kthread_should_stop()); + + /* This frontend is found to be rogue, disable it in + * kthread context. Currently this is only set when + * netback finds out frontend sends malformed packet, + * but we cannot disable the interface in softirq + * context so we defer it here. + */ + if (unlikely(vif->disabled && netif_carrier_ok(vif->dev))) + xenvif_carrier_off(vif); + if (kthread_should_stop()) break; --- linux-3.13.0.orig/drivers/net/xen-netfront.c +++ linux-3.13.0/drivers/net/xen-netfront.c @@ -117,6 +117,7 @@ } tx_skbs[NET_TX_RING_SIZE]; grant_ref_t gref_tx_head; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; + struct page *grant_tx_page[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; spinlock_t rx_lock ____cacheline_aligned_in_smp; @@ -396,6 +397,7 @@ gnttab_release_grant_reference( &np->gref_tx_head, np->grant_tx_ref[id]); np->grant_tx_ref[id] = GRANT_INVALID_REF; + np->grant_tx_page[id] = NULL; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); dev_kfree_skb_irq(skb); } @@ -452,6 +454,7 @@ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); + np->grant_tx_page[id] = virt_to_page(data); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; @@ -466,9 +469,6 @@ len = skb_frag_size(frag); offset = frag->page_offset; - /* Data must not cross a page boundary. */ - BUG_ON(len + offset > PAGE_SIZE<> PAGE_SHIFT; offset &= ~PAGE_MASK; @@ -476,8 +476,6 @@ while (len > 0) { unsigned long bytes; - BUG_ON(offset >= PAGE_SIZE); - bytes = PAGE_SIZE - offset; if (bytes > len) bytes = len; @@ -497,6 +495,7 @@ np->xbdev->otherend_id, mfn, GNTMAP_readonly); + np->grant_tx_page[id] = page; tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = bytes; @@ -569,9 +568,13 @@ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + xennet_count_skb_frag_slots(skb); if (unlikely(slots > MAX_SKB_FRAGS + 1)) { - net_alert_ratelimited( - "xennet: skb rides the rocket: %d slots\n", slots); - goto drop; + net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", + slots, skb->len); + if (skb_linearize(skb)) + goto drop; + data = skb->data; + offset = offset_in_page(data); + len = skb_headlen(skb); } spin_lock_irqsave(&np->tx_lock, flags); @@ -596,6 +599,7 @@ mfn = virt_to_mfn(data); gnttab_grant_foreign_access_ref( ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); + np->grant_tx_page[id] = virt_to_page(data); tx->gref = np->grant_tx_ref[id] = ref; tx->offset = offset; tx->size = len; @@ -1070,8 +1074,7 @@ static int xennet_change_mtu(struct net_device *dev, int mtu) { - int max = xennet_can_sg(dev) ? - XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; + int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; if (mtu > max) return -EINVAL; @@ -1091,13 +1094,13 @@ unsigned int start; do { - start = u64_stats_fetch_begin_bh(&stats->syncp); + start = u64_stats_fetch_begin_irq(&stats->syncp); rx_packets = stats->rx_packets; tx_packets = stats->tx_packets; rx_bytes = stats->rx_bytes; tx_bytes = stats->tx_bytes; - } while (u64_stats_fetch_retry_bh(&stats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; @@ -1122,10 +1125,11 @@ continue; skb = np->tx_skbs[i].skb; - gnttab_end_foreign_access_ref(np->grant_tx_ref[i], - GNTMAP_readonly); - gnttab_release_grant_reference(&np->gref_tx_head, - np->grant_tx_ref[i]); + get_page(np->grant_tx_page[i]); + gnttab_end_foreign_access(np->grant_tx_ref[i], + GNTMAP_readonly, + (unsigned long)page_address(np->grant_tx_page[i])); + np->grant_tx_page[i] = NULL; np->grant_tx_ref[i] = GRANT_INVALID_REF; add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); dev_kfree_skb_irq(skb); @@ -1134,78 +1138,35 @@ static void xennet_release_rx_bufs(struct netfront_info *np) { - struct mmu_update *mmu = np->rx_mmu; - struct multicall_entry *mcl = np->rx_mcl; - struct sk_buff_head free_list; - struct sk_buff *skb; - unsigned long mfn; - int xfer = 0, noxfer = 0, unused = 0; int id, ref; - dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", - __func__); - return; - - skb_queue_head_init(&free_list); - spin_lock_bh(&np->rx_lock); for (id = 0; id < NET_RX_RING_SIZE; id++) { - ref = np->grant_rx_ref[id]; - if (ref == GRANT_INVALID_REF) { - unused++; - continue; - } + struct sk_buff *skb; + struct page *page; skb = np->rx_skbs[id]; - mfn = gnttab_end_foreign_transfer_ref(ref); - gnttab_release_grant_reference(&np->gref_rx_head, ref); - np->grant_rx_ref[id] = GRANT_INVALID_REF; - - if (0 == mfn) { - skb_shinfo(skb)->nr_frags = 0; - dev_kfree_skb(skb); - noxfer++; + if (!skb) continue; - } - - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - /* Remap the page. */ - const struct page *page = - skb_frag_page(&skb_shinfo(skb)->frags[0]); - unsigned long pfn = page_to_pfn(page); - void *vaddr = page_address(page); - MULTI_update_va_mapping(mcl, (unsigned long)vaddr, - mfn_pte(mfn, PAGE_KERNEL), - 0); - mcl++; - mmu->ptr = ((u64)mfn << PAGE_SHIFT) - | MMU_MACHPHYS_UPDATE; - mmu->val = pfn; - mmu++; + ref = np->grant_rx_ref[id]; + if (ref == GRANT_INVALID_REF) + continue; - set_phys_to_machine(pfn, mfn); - } - __skb_queue_tail(&free_list, skb); - xfer++; - } + page = skb_frag_page(&skb_shinfo(skb)->frags[0]); - dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", - __func__, xfer, noxfer, unused); + /* gnttab_end_foreign_access() needs a page ref until + * foreign access is ended (which may be deferred). + */ + get_page(page); + gnttab_end_foreign_access(ref, 0, + (unsigned long)page_address(page)); + np->grant_rx_ref[id] = GRANT_INVALID_REF; - if (xfer) { - if (!xen_feature(XENFEAT_auto_translated_physmap)) { - /* Do all the remapping work and M2P updates. */ - MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, - NULL, DOMID_SELF); - mcl++; - HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); - } + kfree_skb(skb); } - __skb_queue_purge(&free_list); - spin_unlock_bh(&np->rx_lock); } @@ -1358,6 +1319,7 @@ for (i = 0; i < NET_RX_RING_SIZE; i++) { np->rx_skbs[i] = NULL; np->grant_rx_ref[i] = GRANT_INVALID_REF; + np->grant_tx_page[i] = NULL; } /* A grant for every tx ring slot */ @@ -1393,8 +1355,6 @@ SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); SET_NETDEV_DEV(netdev, &dev->dev); - netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); - np->netdev = netdev; netif_carrier_off(netdev); @@ -2115,7 +2075,7 @@ if (!xen_domain()) return -ENODEV; - if (xen_hvm_domain() && !xen_platform_pci_unplug) + if (!xen_has_pv_nic_devices()) return -ENODEV; pr_info("Initialising Xen virtual ethernet driver\n"); --- linux-3.13.0.orig/drivers/of/address.c +++ linux-3.13.0/drivers/of/address.c @@ -99,11 +99,12 @@ static int of_bus_pci_match(struct device_node *np) { /* + * "pciex" is PCI Express * "vci" is for the /chaos bridge on 1st-gen PCI powermacs * "ht" is hypertransport */ - return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") || - !strcmp(np->type, "ht"); + return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") || + !strcmp(np->type, "vci") || !strcmp(np->type, "ht"); } static void of_bus_pci_count_cells(struct device_node *np, @@ -400,6 +401,21 @@ return NULL; } +static int of_empty_ranges_quirk(void) +{ + if (IS_ENABLED(CONFIG_PPC)) { + /* To save cycles, we cache the result */ + static int quirk_state = -1; + + if (quirk_state < 0) + quirk_state = + of_machine_is_compatible("Power Macintosh") || + of_machine_is_compatible("MacRISC"); + return quirk_state; + } + return false; +} + static int of_translate_one(struct device_node *parent, struct of_bus *bus, struct of_bus *pbus, __be32 *addr, int na, int ns, int pna, const char *rprop) @@ -425,12 +441,10 @@ * This code is only enabled on powerpc. --gcl */ ranges = of_get_property(parent, rprop, &rlen); -#if !defined(CONFIG_PPC) - if (ranges == NULL) { + if (ranges == NULL && !of_empty_ranges_quirk()) { pr_err("OF: no ranges; cannot translate\n"); return 1; } -#endif /* !defined(CONFIG_PPC) */ if (ranges == NULL || rlen == 0) { offset = of_read_number(addr, na); memset(addr, 0, pna * 4); --- linux-3.13.0.orig/drivers/of/base.c +++ linux-3.13.0/drivers/of/base.c @@ -17,6 +17,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include #include #include #include @@ -34,6 +35,7 @@ struct device_node *of_chosen; struct device_node *of_aliases; static struct device_node *of_stdout; +static const char *of_stdout_options; DEFINE_MUTEX(of_aliases_mutex); @@ -342,27 +344,72 @@ } EXPORT_SYMBOL(of_get_cpu_node); -/** Checks if the given "compat" string matches one of the strings in - * the device's "compatible" property +/** + * __of_device_is_compatible() - Check if the node matches given constraints + * @device: pointer to node + * @compat: required compatible string, NULL or "" for any match + * @type: required device_type value, NULL or "" for any match + * @name: required node name, NULL or "" for any match + * + * Checks if the given @compat, @type and @name strings match the + * properties of the given @device. A constraints can be skipped by + * passing NULL or an empty string as the constraint. + * + * Returns 0 for no match, and a positive integer on match. The return + * value is a relative score with larger values indicating better + * matches. The score is weighted for the most specific compatible value + * to get the highest score. Matching type is next, followed by matching + * name. Practically speaking, this results in the following priority + * order for matches: + * + * 1. specific compatible && type && name + * 2. specific compatible && type + * 3. specific compatible && name + * 4. specific compatible + * 5. general compatible && type && name + * 6. general compatible && type + * 7. general compatible && name + * 8. general compatible + * 9. type && name + * 10. type + * 11. name */ static int __of_device_is_compatible(const struct device_node *device, - const char *compat) + const char *compat, const char *type, const char *name) { - const char* cp; - int cplen, l; + struct property *prop; + const char *cp; + int index = 0, score = 0; + + /* Compatible match has highest priority */ + if (compat && compat[0]) { + prop = __of_find_property(device, "compatible", NULL); + for (cp = of_prop_next_string(prop, NULL); cp; + cp = of_prop_next_string(prop, cp), index++) { + if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { + score = INT_MAX/2 - (index << 2); + break; + } + } + if (!score) + return 0; + } - cp = __of_get_property(device, "compatible", &cplen); - if (cp == NULL) - return 0; - while (cplen > 0) { - if (of_compat_cmp(cp, compat, strlen(compat)) == 0) - return 1; - l = strlen(cp) + 1; - cp += l; - cplen -= l; + /* Matching type is better than matching name */ + if (type && type[0]) { + if (!device->type || of_node_cmp(type, device->type)) + return 0; + score += 2; } - return 0; + /* Matching name is a bit better than not */ + if (name && name[0]) { + if (!device->name || of_node_cmp(name, device->name)) + return 0; + score++; + } + + return score; } /** Checks if the given "compat" string matches one of the strings in @@ -375,7 +422,7 @@ int res; raw_spin_lock_irqsave(&devtree_lock, flags); - res = __of_device_is_compatible(device, compat); + res = __of_device_is_compatible(device, compat, NULL, NULL); raw_spin_unlock_irqrestore(&devtree_lock, flags); return res; } @@ -497,6 +544,22 @@ } EXPORT_SYMBOL(of_get_next_parent); +static struct device_node *__of_get_next_child(const struct device_node *node, + struct device_node *prev) +{ + struct device_node *next; + + next = prev ? prev->sibling : node->child; + for (; next; next = next->sibling) + if (of_node_get(next)) + break; + of_node_put(prev); + return next; +} +#define __for_each_child_of_node(parent, child) \ + for (child = __of_get_next_child(parent, NULL); child != NULL; \ + child = __of_get_next_child(parent, child)) + /** * of_get_next_child - Iterate a node childs * @node: parent node @@ -512,11 +575,7 @@ unsigned long flags; raw_spin_lock_irqsave(&devtree_lock, flags); - next = prev ? prev->sibling : node->child; - for (; next; next = next->sibling) - if (of_node_get(next)) - break; - of_node_put(prev); + next = __of_get_next_child(node, prev); raw_spin_unlock_irqrestore(&devtree_lock, flags); return next; } @@ -573,28 +632,95 @@ } EXPORT_SYMBOL(of_get_child_by_name); +static struct device_node *__of_find_node_by_path(struct device_node *parent, + const char *path) +{ + struct device_node *child; + int len = strchrnul(path, '/') - path; + int term; + + if (!len) + return NULL; + + term = strchrnul(path, ':') - path; + if (term < len) + len = term; + + __for_each_child_of_node(parent, child) { + const char *name = strrchr(child->full_name, '/'); + if (WARN(!name, "malformed device_node %s\n", child->full_name)) + continue; + name++; + if (strncmp(path, name, len) == 0 && (strlen(name) == len)) + return child; + } + return NULL; +} + /** - * of_find_node_by_path - Find a node matching a full OF path - * @path: The full path to match + * of_find_node_opts_by_path - Find a node matching a full OF path + * @path: Either the full path to match, or if the path does not + * start with '/', the name of a property of the /aliases + * node (an alias). In the case of an alias, the node + * matching the alias' value will be returned. + * @opts: Address of a pointer into which to store the start of + * an options string appended to the end of the path with + * a ':' separator. + * + * Valid paths: + * /foo/bar Full path + * foo Valid alias + * foo/bar Valid alias + relative path * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ -struct device_node *of_find_node_by_path(const char *path) +struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) { - struct device_node *np = of_allnodes; + struct device_node *np = NULL; + struct property *pp; unsigned long flags; + const char *separator = strchr(path, ':'); + + if (opts) + *opts = separator ? separator + 1 : NULL; + + if (strcmp(path, "/") == 0) + return of_node_get(of_allnodes); + + /* The path could begin with an alias */ + if (*path != '/') { + char *p = strchrnul(path, '/'); + int len = separator ? separator - path : p - path; + + /* of_aliases must not be NULL */ + if (!of_aliases) + return NULL; + + for_each_property_of_node(of_aliases, pp) { + if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { + np = of_find_node_by_path(pp->value); + break; + } + } + if (!np) + return NULL; + path = p; + } + /* Step down the tree matching path components */ raw_spin_lock_irqsave(&devtree_lock, flags); - for (; np; np = np->allnext) { - if (np->full_name && (of_node_cmp(np->full_name, path) == 0) - && of_node_get(np)) - break; + if (!np) + np = of_node_get(of_allnodes); + while (np && *path == '/') { + path++; /* Increment past '/' delimiter */ + np = __of_find_node_by_path(np, path); + path = strchrnul(path, '/'); } raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; } -EXPORT_SYMBOL(of_find_node_by_path); +EXPORT_SYMBOL(of_find_node_opts_by_path); /** * of_find_node_by_name - Find a node by its "name" property @@ -678,10 +804,7 @@ raw_spin_lock_irqsave(&devtree_lock, flags); np = from ? from->allnext : of_allnodes; for (; np; np = np->allnext) { - if (type - && !(np->type && (of_node_cmp(np->type, type) == 0))) - continue; - if (__of_device_is_compatible(np, compatible) && + if (__of_device_is_compatible(np, compatible, type, NULL) && of_node_get(np)) break; } @@ -731,25 +854,22 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches, const struct device_node *node) { + const struct of_device_id *best_match = NULL; + int score, best_score = 0; + if (!matches) return NULL; - while (matches->name[0] || matches->type[0] || matches->compatible[0]) { - int match = 1; - if (matches->name[0]) - match &= node->name - && !strcmp(matches->name, node->name); - if (matches->type[0]) - match &= node->type - && !strcmp(matches->type, node->type); - if (matches->compatible[0]) - match &= __of_device_is_compatible(node, - matches->compatible); - if (match) - return matches; - matches++; + for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { + score = __of_device_is_compatible(node, matches->compatible, + matches->type, matches->name); + if (score > best_score) { + best_match = matches; + best_score = score; + } } - return NULL; + + return best_match; } /** @@ -1075,52 +1195,6 @@ EXPORT_SYMBOL_GPL(of_property_read_string); /** - * of_property_read_string_index - Find and read a string from a multiple - * strings property. - * @np: device node from which the property value is to be read. - * @propname: name of the property to be searched. - * @index: index of the string in the list of strings - * @out_string: pointer to null terminated return string, modified only if - * return value is 0. - * - * Search for a property in a device tree node and retrieve a null - * terminated string value (pointer to data, not a copy) in the list of strings - * contained in that property. - * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if - * property does not have a value, and -EILSEQ if the string is not - * null-terminated within the length of the property data. - * - * The out_string pointer is modified only if a valid string can be decoded. - */ -int of_property_read_string_index(struct device_node *np, const char *propname, - int index, const char **output) -{ - struct property *prop = of_find_property(np, propname, NULL); - int i = 0; - size_t l = 0, total = 0; - const char *p; - - if (!prop) - return -EINVAL; - if (!prop->value) - return -ENODATA; - if (strnlen(prop->value, prop->length) >= prop->length) - return -EILSEQ; - - p = prop->value; - - for (i = 0; total < prop->length; total += l, p += l) { - l = strlen(p) + 1; - if (i++ == index) { - *output = p; - return 0; - } - } - return -ENODATA; -} -EXPORT_SYMBOL_GPL(of_property_read_string_index); - -/** * of_property_match_string() - Find string in a list and return index * @np: pointer to node containing string list property * @propname: string list property name @@ -1146,7 +1220,7 @@ end = p + prop->length; for (i = 0; p < end; i++, p += l) { - l = strlen(p) + 1; + l = strnlen(p, end - p) + 1; if (p + l > end) return -EILSEQ; pr_debug("comparing %s with %s\n", string, p); @@ -1158,39 +1232,41 @@ EXPORT_SYMBOL_GPL(of_property_match_string); /** - * of_property_count_strings - Find and return the number of strings from a - * multiple strings property. + * of_property_read_string_util() - Utility helper for parsing string properties * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. + * @out_strs: output array of string pointers. + * @sz: number of array elements to read. + * @skip: Number of strings to skip over at beginning of list. * - * Search for a property in a device tree node and retrieve the number of null - * terminated string contain in it. Returns the number of strings on - * success, -EINVAL if the property does not exist, -ENODATA if property - * does not have a value, and -EILSEQ if the string is not null-terminated - * within the length of the property data. + * Don't call this function directly. It is a utility helper for the + * of_property_read_string*() family of functions. */ -int of_property_count_strings(struct device_node *np, const char *propname) +int of_property_read_string_helper(struct device_node *np, const char *propname, + const char **out_strs, size_t sz, int skip) { struct property *prop = of_find_property(np, propname, NULL); - int i = 0; - size_t l = 0, total = 0; - const char *p; + int l = 0, i = 0; + const char *p, *end; if (!prop) return -EINVAL; if (!prop->value) return -ENODATA; - if (strnlen(prop->value, prop->length) >= prop->length) - return -EILSEQ; - p = prop->value; + end = p + prop->length; - for (i = 0; total < prop->length; total += l, p += l, i++) - l = strlen(p) + 1; - - return i; + for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) { + l = strnlen(p, end - p) + 1; + if (p + l > end) + return -EILSEQ; + if (out_strs && i >= skip) + *out_strs++ = p; + } + i -= skip; + return i <= 0 ? -ENODATA : i; } -EXPORT_SYMBOL_GPL(of_property_count_strings); +EXPORT_SYMBOL_GPL(of_property_read_string_helper); void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) { @@ -1777,11 +1853,11 @@ of_chosen = of_find_node_by_path("/chosen@0"); if (of_chosen) { - const char *name; - - name = of_get_property(of_chosen, "linux,stdout-path", NULL); + const char *name = of_get_property(of_chosen, "stdout-path", NULL); + if (!name) + name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name) - of_stdout = of_find_node_by_path(name); + of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); } of_aliases = of_find_node_by_path("/aliases"); @@ -1895,20 +1971,23 @@ EXPORT_SYMBOL_GPL(of_prop_next_string); /** - * of_device_is_stdout_path - check if a device node matches the - * linux,stdout-path property - * - * Check if this device node matches the linux,stdout-path property - * in the chosen node. return true if yes, false otherwise. + * of_console_check() - Test and setup console for DT setup + * @dn - Pointer to device node + * @name - Name to use for preferred console without index. ex. "ttyS" + * @index - Index to use for preferred console. + * + * Check if the given device node matches the stdout-path property in the + * /chosen node. If it does then register it as the preferred console and return + * TRUE. Otherwise return FALSE. */ -int of_device_is_stdout_path(struct device_node *dn) +bool of_console_check(struct device_node *dn, char *name, int index) { - if (!of_stdout) + if (!dn || dn != of_stdout || console_set_on_cmdline) return false; - - return of_stdout == dn; + return !add_preferred_console(name, index, + kstrdup(of_stdout_options, GFP_KERNEL)); } -EXPORT_SYMBOL_GPL(of_device_is_stdout_path); +EXPORT_SYMBOL_GPL(of_console_check); /** * of_find_next_cache_node - Find a node's subsidiary cache --- linux-3.13.0.orig/drivers/of/irq.c +++ linux-3.13.0/drivers/of/irq.c @@ -287,7 +287,7 @@ struct device_node *p; const __be32 *intspec, *tmp, *addr; u32 intsize, intlen; - int i, res = -EINVAL; + int i, res; pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index); @@ -298,16 +298,17 @@ /* Get the reg property (if any) */ addr = of_get_property(device, "reg", NULL); + /* Try the new-style interrupts-extended first */ + res = of_parse_phandle_with_args(device, "interrupts-extended", + "#interrupt-cells", index, out_irq); + if (!res) + return of_irq_parse_raw(addr, out_irq); + /* Get the interrupts property */ intspec = of_get_property(device, "interrupts", &intlen); - if (intspec == NULL) { - /* Try the new-style interrupts-extended */ - res = of_parse_phandle_with_args(device, "interrupts-extended", - "#interrupt-cells", index, out_irq); - if (res) - return -EINVAL; - return of_irq_parse_raw(addr, out_irq); - } + if (intspec == NULL) + return -EINVAL; + intlen /= sizeof(*intspec); pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen); @@ -319,15 +320,19 @@ /* Get size of interrupt specifier */ tmp = of_get_property(p, "#interrupt-cells", NULL); - if (tmp == NULL) + if (tmp == NULL) { + res = -EINVAL; goto out; + } intsize = be32_to_cpu(*tmp); pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); /* Check index */ - if ((index + 1) * intsize > intlen) + if ((index + 1) * intsize > intlen) { + res = -EINVAL; goto out; + } /* Copy intspec into irq structure */ intspec += index * intsize; @@ -377,6 +382,54 @@ EXPORT_SYMBOL_GPL(of_irq_to_resource); /** + * of_irq_get - Decode a node's IRQ and return it as a Linux irq number + * @dev: pointer to device tree node + * @index: zero-based index of the irq + * + * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain + * is not yet created. + * + */ +int of_irq_get(struct device_node *dev, int index) +{ + int rc; + struct of_phandle_args oirq; + struct irq_domain *domain; + + rc = of_irq_parse_one(dev, index, &oirq); + if (rc) + return rc; + + domain = irq_find_host(oirq.np); + if (!domain) + return -EPROBE_DEFER; + + return irq_create_of_mapping(&oirq); +} + +/** + * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number + * @dev: pointer to device tree node + * @name: irq name + * + * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain + * is not yet created, or error code in case of any other failure. + */ +int of_irq_get_byname(struct device_node *dev, const char *name) +{ + int index; + + if (unlikely(!name)) + return -EINVAL; + + index = of_property_match_string(dev, "interrupt-names", name); + if (index < 0) + return index; + + return of_irq_get(dev, index); +} + +/** * of_irq_count - Count the number of IRQs a node uses * @dev: pointer to device tree node */ --- linux-3.13.0.orig/drivers/of/of_mdio.c +++ linux-3.13.0/drivers/of/of_mdio.c @@ -247,3 +247,17 @@ return IS_ERR(phy) ? NULL : phy; } EXPORT_SYMBOL(of_phy_connect_fixed_link); + +/* XXX add comment */ +struct phy_device *of_phy_attach(struct net_device *dev, + struct device_node *phy_np, u32 flags, + phy_interface_t iface) +{ + struct phy_device *phy = of_phy_find_device(phy_np); + + if (!phy) + return NULL; + + return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy; +} +EXPORT_SYMBOL(of_phy_attach); --- linux-3.13.0.orig/drivers/of/of_net.c +++ linux-3.13.0/drivers/of/of_net.c @@ -13,8 +13,8 @@ /** * It maps 'enum phy_interface_t' found in include/linux/phy.h - * into the device tree binding of 'phy-mode', so that Ethernet - * device driver can get phy interface from device tree. + * into the device tree binding of 'phy-mode' or 'phy-connection-type', + * so that Ethernet device driver can get phy interface from device tree. */ static const char *phy_modes[] = { [PHY_INTERFACE_MODE_NA] = "", @@ -30,14 +30,16 @@ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid", [PHY_INTERFACE_MODE_RTBI] = "rtbi", [PHY_INTERFACE_MODE_SMII] = "smii", + [PHY_INTERFACE_MODE_XGMII] = "xgmii", }; /** * of_get_phy_mode - Get phy mode for given device_node * @np: Pointer to the given device_node * - * The function gets phy interface string from property 'phy-mode', - * and return its index in phy_modes table, or errno in error case. + * The function gets phy interface string from property 'phy-mode' or + * 'phy-connection-type', and return its index in phy_modes table, or errno in + * error case. */ int of_get_phy_mode(struct device_node *np) { @@ -46,6 +48,8 @@ err = of_property_read_string(np, "phy-mode", &pm); if (err < 0) + err = of_property_read_string(np, "phy-connection-type", &pm); + if (err < 0) return err; for (i = 0; i < ARRAY_SIZE(phy_modes); i++) --- linux-3.13.0.orig/drivers/of/platform.c +++ linux-3.13.0/drivers/of/platform.c @@ -168,7 +168,9 @@ rc = of_address_to_resource(np, i, res); WARN_ON(rc); } - WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq); + if (of_irq_to_resource_table(np, res, num_irq) != num_irq) + pr_debug("not all legacy IRQ resources mapped for %s\n", + np->name); } dev->dev.of_node = of_node_get(np); --- linux-3.13.0.orig/drivers/of/selftest.c +++ linux-3.13.0/drivers/of/selftest.c @@ -132,8 +132,9 @@ selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); } -static void __init of_selftest_property_match_string(void) +static void __init of_selftest_property_string(void) { + const char *strings[4]; struct device_node *np; int rc; @@ -150,13 +151,66 @@ rc = of_property_match_string(np, "phandle-list-names", "third"); selftest(rc == 2, "third expected:0 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "fourth"); - selftest(rc == -ENODATA, "unmatched string; rc=%i", rc); + selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); rc = of_property_match_string(np, "missing-property", "blah"); - selftest(rc == -EINVAL, "missing property; rc=%i", rc); + selftest(rc == -EINVAL, "missing property; rc=%i\n", rc); rc = of_property_match_string(np, "empty-property", "blah"); - selftest(rc == -ENODATA, "empty property; rc=%i", rc); + selftest(rc == -ENODATA, "empty property; rc=%i\n", rc); rc = of_property_match_string(np, "unterminated-string", "blah"); - selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc); + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + + /* of_property_count_strings() tests */ + rc = of_property_count_strings(np, "string-property"); + selftest(rc == 1, "Incorrect string count; rc=%i\n", rc); + rc = of_property_count_strings(np, "phandle-list-names"); + selftest(rc == 3, "Incorrect string count; rc=%i\n", rc); + rc = of_property_count_strings(np, "unterminated-string"); + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + rc = of_property_count_strings(np, "unterminated-string-list"); + selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc); + + /* of_property_read_string_index() tests */ + rc = of_property_read_string_index(np, "string-property", 0, strings); + selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "string-property", 1, strings); + selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 0, strings); + selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 1, strings); + selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 2, strings); + selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "phandle-list-names", 3, strings); + selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "unterminated-string", 0, strings); + selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings); + selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */ + selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + strings[1] = NULL; + + /* of_property_read_string_array() tests */ + rc = of_property_read_string_array(np, "string-property", strings, 4); + selftest(rc == 1, "Incorrect string count; rc=%i\n", rc); + rc = of_property_read_string_array(np, "phandle-list-names", strings, 4); + selftest(rc == 3, "Incorrect string count; rc=%i\n", rc); + rc = of_property_read_string_array(np, "unterminated-string", strings, 4); + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + /* -- An incorrectly formed string should cause a failure */ + rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4); + selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc); + /* -- parsing the correctly formed strings should still work: */ + strings[2] = NULL; + rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2); + selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc); + strings[1] = NULL; + rc = of_property_read_string_array(np, "phandle-list-names", strings, 1); + selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]); } static void __init of_selftest_parse_interrupts(void) @@ -313,7 +367,7 @@ pr_info("start of selftest - you will see error messages\n"); of_selftest_parse_phandle_with_args(); - of_selftest_property_match_string(); + of_selftest_property_string(); of_selftest_parse_interrupts(); of_selftest_parse_interrupts_extended(); pr_info("end of selftest - %i passed, %i failed\n", --- linux-3.13.0.orig/drivers/parport/Kconfig +++ linux-3.13.0/drivers/parport/Kconfig @@ -5,6 +5,12 @@ # Parport configuration. # +config ARCH_MIGHT_HAVE_PC_PARPORT + bool + help + Select this config option from the architecture Kconfig if + the architecture might have PC parallel port hardware. + menuconfig PARPORT tristate "Parallel port support" depends on HAS_IOMEM @@ -31,12 +37,6 @@ If unsure, say Y. -config ARCH_MIGHT_HAVE_PC_PARPORT - bool - help - Select this config option from the architecture Kconfig if - the architecture might have PC parallel port hardware. - if PARPORT config PARPORT_PC --- linux-3.13.0.orig/drivers/parport/parport_pc.c +++ linux-3.13.0/drivers/parport/parport_pc.c @@ -2600,8 +2600,6 @@ syba_2p_epp, syba_1p_ecp, titan_010l, - titan_1284p1, - titan_1284p2, avlab_1p, avlab_2p, oxsemi_952, @@ -2660,8 +2658,6 @@ /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } }, /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } }, /* titan_010l */ { 1, { { 3, -1 }, } }, - /* titan_1284p1 */ { 1, { { 0, 1 }, } }, - /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } }, /* avlab_1p */ { 1, { { 0, 1}, } }, /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} }, /* The Oxford Semi cards are unusual: 954 doesn't support ECP, @@ -2677,8 +2673,8 @@ /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, - /* netmos_9805 */ { 1, { { 0, -1 }, } }, - /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, + /* netmos_9805 */ { 1, { { 0, 1 }, } }, + /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } }, /* netmos_9901 */ { 1, { { 0, -1 }, } }, /* netmos_9865 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, @@ -2722,8 +2718,6 @@ PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L, PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l }, - { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 }, - { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 }, /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ /* AFAVLAB_TK9902 */ { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, --- linux-3.13.0.orig/drivers/pci/Makefile +++ linux-3.13.0/drivers/pci/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o obj-$(CONFIG_M68K) += setup-bus.o setup-irq.o +obj-$(CONFIG_ARM64) += setup-bus.o setup-irq.o # # ACPI Related PCI FW Functions --- linux-3.13.0.orig/drivers/pci/host/Kconfig +++ linux-3.13.0/drivers/pci/host/Kconfig @@ -33,4 +33,18 @@ There are 3 internal PCI controllers available with a single built-in EHCI/OHCI host controller present on each one. +config PCI_XGENE_MSI + bool + +config PCI_XGENE + bool "X-Gene PCIe controller" + depends on ARCH_XGENE + depends on OF + select PCIEPORTBUS + select PCI_XGENE_MSI if PCI_MSI + help + Say Y here if you want internal PCI support on APM X-Gene SoC. + There are 5 internal PCIe ports available. Each port is GEN3 capable + and have varied lanes from x1 to x8. + endmenu --- linux-3.13.0.orig/drivers/pci/host/Makefile +++ linux-3.13.0/drivers/pci/host/Makefile @@ -4,3 +4,5 @@ obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o +obj-$(CONFIG_PCI_XGENE) += pci-xgene.o +obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o --- linux-3.13.0.orig/drivers/pci/host/pci-mvebu.c +++ linux-3.13.0/drivers/pci/host/pci-mvebu.c @@ -60,14 +60,6 @@ #define PCIE_DEBUG_CTRL 0x1a60 #define PCIE_DEBUG_SOFT_RESET BIT(20) -/* - * This product ID is registered by Marvell, and used when the Marvell - * SoC is not the root complex, but an endpoint on the PCIe bus. It is - * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI - * bridge. - */ -#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846 - /* PCI configuration space of a PCI-to-PCI bridge */ struct mvebu_sw_pci_bridge { u16 vendor; @@ -294,6 +286,58 @@ return PCIBIOS_SUCCESSFUL; } +/* + * Remove windows, starting from the largest ones to the smallest + * ones. + */ +static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, + phys_addr_t base, size_t size) +{ + while (size) { + size_t sz = 1 << (fls(size) - 1); + + mvebu_mbus_del_window(base, sz); + base += sz; + size -= sz; + } +} + +/* + * MBus windows can only have a power of two size, but PCI BARs do not + * have this constraint. Therefore, we have to split the PCI BAR into + * areas each having a power of two size. We start from the largest + * one (i.e highest order bit set in the size). + */ +static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, + unsigned int target, unsigned int attribute, + phys_addr_t base, size_t size, + phys_addr_t remap) +{ + size_t size_mapped = 0; + + while (size) { + size_t sz = 1 << (fls(size) - 1); + int ret; + + ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, + sz, remap); + if (ret) { + dev_err(&port->pcie->pdev->dev, + "Could not create MBus window at 0x%x, size 0x%x: %d\n", + base, sz, ret); + mvebu_pcie_del_windows(port, base - size_mapped, + size_mapped); + return; + } + + size -= sz; + size_mapped += sz; + base += sz; + if (remap != MVEBU_MBUS_NO_REMAP) + remap += sz; + } +} + static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) { phys_addr_t iobase; @@ -304,8 +348,8 @@ /* If a window was configured, remove it */ if (port->iowin_base) { - mvebu_mbus_del_window(port->iowin_base, - port->iowin_size); + mvebu_pcie_del_windows(port, port->iowin_base, + port->iowin_size); port->iowin_base = 0; port->iowin_size = 0; } @@ -325,11 +369,11 @@ port->iowin_base = port->pcie->io.start + iobase; port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | (port->bridge.iolimitupper << 16)) - - iobase); + iobase) + 1; - mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, - port->iowin_base, port->iowin_size, - iobase); + mvebu_pcie_add_windows(port, port->io_target, port->io_attr, + port->iowin_base, port->iowin_size, + iobase); pci_ioremap_io(iobase, port->iowin_base); } @@ -341,8 +385,8 @@ /* If a window was configured, remove it */ if (port->memwin_base) { - mvebu_mbus_del_window(port->memwin_base, - port->memwin_size); + mvebu_pcie_del_windows(port, port->memwin_base, + port->memwin_size); port->memwin_base = 0; port->memwin_size = 0; } @@ -359,10 +403,11 @@ port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); port->memwin_size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - - port->memwin_base; + port->memwin_base + 1; - mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr, - port->memwin_base, port->memwin_size); + mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr, + port->memwin_base, port->memwin_size, + MVEBU_MBUS_NO_REMAP); } /* @@ -377,7 +422,8 @@ bridge->class = PCI_CLASS_BRIDGE_PCI; bridge->vendor = PCI_VENDOR_ID_MARVELL; - bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID; + bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; + bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; bridge->header_type = PCI_HEADER_TYPE_BRIDGE; bridge->cache_line_size = 0x10; @@ -702,14 +748,21 @@ /* * On the PCI-to-PCI bridge side, the I/O windows must have at - * least a 64 KB size and be aligned on their size, and the - * memory windows must have at least a 1 MB size and be - * aligned on their size + * least a 64 KB size and the memory windows must have at + * least a 1 MB size. Moreover, MBus windows need to have a + * base address aligned on their size, and their size must be + * a power of two. This means that if the BAR doesn't have a + * power of two size, several MBus windows will actually be + * created. We need to ensure that the biggest MBus window + * (which will be the first one) is aligned on its size, which + * explains the rounddown_pow_of_two() being done here. */ if (res->flags & IORESOURCE_IO) - return round_up(start, max((resource_size_t)SZ_64K, size)); + return round_up(start, max((resource_size_t)SZ_64K, + rounddown_pow_of_two(size))); else if (res->flags & IORESOURCE_MEM) - return round_up(start, max((resource_size_t)SZ_1M, size)); + return round_up(start, max((resource_size_t)SZ_1M, + rounddown_pow_of_two(size))); else return start; } @@ -771,9 +824,9 @@ rangesz = pna + na + ns; nranges = rlen / sizeof(__be32) / rangesz; - for (i = 0; i < nranges; i++) { + for (i = 0; i < nranges; i++, range += rangesz) { u32 flags = of_read_number(range, 1); - u32 slot = of_read_number(range, 2); + u32 slot = of_read_number(range + 1, 1); u64 cpuaddr = of_read_number(range + na, pna); unsigned long rtype; @@ -781,14 +834,14 @@ rtype = IORESOURCE_IO; else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) rtype = IORESOURCE_MEM; + else + continue; if (slot == PCI_SLOT(devfn) && type == rtype) { *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); *attr = DT_CPUADDR_TO_ATTR(cpuaddr); return 0; } - - range += rangesz; } return -ENOENT; --- linux-3.13.0.orig/drivers/pci/host/pci-xgene-msi.c +++ linux-3.13.0/drivers/pci/host/pci-xgene-msi.c @@ -0,0 +1,567 @@ +/* + * XGene MSI Driver + * + * Copyright (c) 2010, Applied Micro Circuits Corporation + * Author: Tanmay Inamdar + * Tuan Phan + * Jim Hull + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NR_MSI_REG 16 +#define IRQS_PER_MSI_INDEX 32 +#define IRQS_PER_MSI_REG 256 +#define NR_MSI_IRQS (NR_MSI_REG * IRQS_PER_MSI_REG) + +#define XGENE_PIC_IP_MASK 0x0000000F +#define XGENE_PIC_IP_GIC 0x00000001 + +/* PCIe MSI Index Registers */ +#define MSI0IR0 0x000000 +#define MSIFIR7 0x7F0000 + +/* PCIe MSI Interrupt Register */ +#define MSI1INT0 0x800000 +#define MSI1INTF 0x8F0000 + +struct xgene_msi { + struct irq_domain *irqhost; + unsigned long cascade_irq; + u32 msiir_offset; + u32 msi_addr_lo; + u32 msi_addr_hi; + void __iomem *msi_regs; + u32 feature; + int msi_virqs[NR_MSI_REG]; + struct msi_bitmap bitmap; + struct list_head list; /* support multiple MSI banks */ + phandle phandle; +}; + +#ifdef CONFIG_ARCH_MSLIM +static inline u64 xgene_pcie_get_iof_addr(u64 addr) +{ + return mslim_pa_to_iof_axi(lower_32_bits(addr)); +} +#else +#define xgene_pcie_get_iof_addr(addr) addr +#endif + +#define MSI_DRIVER_VERSION "0.1" + +struct xgene_msi_feature { + u32 xgene_pic_ip; + u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */ +}; + +struct xgene_msi_cascade_data { + struct xgene_msi *msi_data; + int index; +}; + +LIST_HEAD(msi_head); + +static const struct xgene_msi_feature gic_msi_feature = { + .xgene_pic_ip = XGENE_PIC_IP_GIC, + .msiir_offset = 0, +}; + +irq_hw_number_t virq_to_hw(unsigned int virq) +{ + struct irq_data *irq_data = irq_get_irq_data(virq); + return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; +} + +static inline u32 xgene_msi_intr_read(phys_addr_t __iomem *base, + unsigned int reg) +{ + u32 irq_reg = MSI1INT0 + (reg << 16); + return readl((void *)((phys_addr_t) base + irq_reg)); +} + +static inline u32 xgene_msi_read(phys_addr_t __iomem *base, unsigned int group, + unsigned int reg) +{ + u32 irq_reg = MSI0IR0 + (group << 19) + (reg << 16); + return readl((void *)((phys_addr_t) base + irq_reg)); +} + +/* + * We do not need this actually. The MSIR register has been read once + * in the cascade interrupt. So, this MSI interrupt has been acked +*/ +static void xgene_msi_end_irq(struct irq_data *d) +{ +} + +#ifdef CONFIG_SMP +static int xgene_msi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, bool force) +{ + u64 virt_msir; + + virt_msir = (u64)irq_get_handler_data(d->irq); + return irq_set_affinity(virt_msir, mask_val); +} +#endif + +static struct irq_chip xgene_msi_chip = { + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, + .irq_ack = xgene_msi_end_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = xgene_msi_set_affinity, +#endif + .name = "xgene-msi", +}; + +static int xgene_msi_host_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct xgene_msi *msi_data = h->host_data; + struct irq_chip *chip = &xgene_msi_chip; + + pr_debug("\nENTER %s, virq=%u\n", __func__, virq); + irq_set_status_flags(virq, IRQ_TYPE_LEVEL_HIGH); + irq_set_chip_data(virq, msi_data); + irq_set_chip_and_handler(virq, chip, handle_level_irq); + + return 0; +} + +static const struct irq_domain_ops xgene_msi_host_ops = { + .map = xgene_msi_host_map, +}; + +static int xgene_msi_init_allocator(struct xgene_msi *msi_data) +{ +#ifdef CONFIG_ACPI + if (efi_enabled(EFI_BOOT)) + return msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, NULL); + else +#endif + return msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, + msi_data->irqhost->of_node); +} + +int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) +{ + pr_debug("ENTER %s\n", __func__); + return 0; +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *entry; + struct xgene_msi *msi_data; + + pr_debug("ENTER %s\n", __func__); + list_for_each_entry(entry, &dev->msi_list, list) { + if (entry->irq == 0) + continue; + + msi_data = irq_get_chip_data(entry->irq); + irq_set_msi_desc(entry->irq, NULL); + msi_bitmap_free_hwirqs(&msi_data->bitmap, + virq_to_hw(entry->irq), 1); + irq_dispose_mapping(entry->irq); + } +} + +static void xgene_compose_msi_msg(struct pci_dev *dev, int hwirq, + struct msi_msg *msg, + struct xgene_msi *msi_data) +{ + int reg_set, group; + + group = hwirq % NR_MSI_REG; + reg_set = hwirq / (NR_MSI_REG * IRQS_PER_MSI_INDEX); + + pr_debug("group = %d, reg_set : 0x%x\n", group, reg_set); + msg->address_lo = msi_data->msi_addr_lo + + (((8 * group) + reg_set) << 16); + msg->address_hi = msi_data->msi_addr_hi; + msg->data = (hwirq / NR_MSI_REG) % IRQS_PER_MSI_INDEX; + pr_debug("addr : 0x%08x, data : 0x%x\n", msg->address_lo, msg->data); +} + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + struct device_node *np; + struct msi_desc *entry; + struct msi_msg msg; + u64 gic_irq; + unsigned int virq; + struct xgene_msi *msi_data; + phandle phandle = 0; + int rc = 0; + int hwirq = -1; + + pr_debug("ENTER %s - nvec = %d, type = %d\n", __func__, nvec, type); +#ifdef CONFIG_ACPI + if (!efi_enabled(EFI_BOOT)) +#endif + { + np = pci_device_to_OF_node(pdev); + /* + * If the PCI node has an xgene,msi property, + * then we need to use it to find the specific MSI. + */ + np = of_parse_phandle(np, "xgene,msi", 0); + if (np) { + if (of_device_is_compatible(np, + "xgene,gic-msi-cascade")) + phandle = np->phandle; + else { + dev_err(&pdev->dev, + "node %s has an invalid xgene,msi phandle %u\n", + np->full_name, np->phandle); + rc = -EINVAL; + goto exit; + } + } else + dev_info(&pdev->dev, "Found no xgene,msi phandle\n"); + } + + list_for_each_entry(entry, &pdev->msi_list, list) { + pr_debug("Loop over MSI devices\n"); + /* + * Loop over all the MSI devices until we find one that has an + * available interrupt. + */ + list_for_each_entry(msi_data, &msi_head, list) { + if (phandle && (phandle != msi_data->phandle)) + continue; + pr_debug("Xgene msi pointer : %p\n", msi_data); + hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); + break; + } + + if (hwirq < 0) { + dev_err(&pdev->dev, + "could not allocate MSI interrupt\n"); + rc = -ENOSPC; + goto exit; + } + + virq = irq_create_mapping(msi_data->irqhost, hwirq); + if (virq == 0) { + dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq); + msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); + rc = -ENOSPC; + goto exit; + } + + gic_irq = msi_data->msi_virqs[hwirq % NR_MSI_REG]; + pr_debug("Created Mapping HWIRQ %d on GIC IRQ %llu " + "TO VIRQ %d\n", + hwirq, gic_irq, virq); + /* chip_data is msi_data via host->hostdata in host->map() */ + irq_set_msi_desc(virq, entry); + xgene_compose_msi_msg(pdev, hwirq, &msg, msi_data); + irq_set_handler_data(virq, (void *)gic_irq); + write_msi_msg(virq, &msg); + } + +exit: + return rc; +} + +static void xgene_msi_cascade(unsigned int irq, struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct xgene_msi_cascade_data *cascade_data; + struct xgene_msi *msi_data; + unsigned int cascade_irq; + int msir_index = -1; + u32 msir_value = 0; + u32 intr_index = 0; + u32 msi_intr_reg_value = 0; + u32 msi_intr_reg; + + chained_irq_enter(chip, desc); + cascade_data = irq_get_handler_data(irq); + msi_data = cascade_data->msi_data; + + msi_intr_reg = cascade_data->index; + + if (msi_intr_reg >= NR_MSI_REG) + cascade_irq = 0; + + switch (msi_data->feature & XGENE_PIC_IP_MASK) { + case XGENE_PIC_IP_GIC: + msi_intr_reg_value = xgene_msi_intr_read(msi_data->msi_regs, + msi_intr_reg); + break; + } + + while (msi_intr_reg_value) { + msir_index = ffs(msi_intr_reg_value) - 1; + msir_value = xgene_msi_read(msi_data->msi_regs, msi_intr_reg, + msir_index); + while (msir_value) { + intr_index = ffs(msir_value) - 1; + cascade_irq = irq_linear_revmap(msi_data->irqhost, + msir_index * IRQS_PER_MSI_INDEX * NR_MSI_REG + + intr_index * NR_MSI_REG + msi_intr_reg); + if (cascade_irq != 0) + generic_handle_irq(cascade_irq); + msir_value &= ~(1 << intr_index); + } + msi_intr_reg_value &= ~(1 << msir_index); + } + + chained_irq_exit(chip, desc); +} + +static int xgene_msi_remove(struct platform_device *pdev) +{ + int virq, i; + struct xgene_msi *msi = platform_get_drvdata(pdev); + + pr_debug("ENTER %s\n", __func__); + for (i = 0; i < NR_MSI_REG; i++) { + virq = msi->msi_virqs[i]; + if (virq != 0) + irq_dispose_mapping(virq); + } + + if (msi->bitmap.bitmap) + msi_bitmap_free(&msi->bitmap); + + return 0; +} + +static int xgene_msi_setup_hwirq(struct xgene_msi *msi, + struct platform_device *pdev, + int offset, int irq_index) +{ + int virt_msir; + cpumask_var_t mask; + struct xgene_msi_cascade_data *cascade_data = NULL; + + virt_msir = platform_get_irq(pdev, irq_index); + if (virt_msir < 0) { + dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", + irq_index); + return -EINVAL; + } + + cascade_data = devm_kzalloc(&pdev->dev, + sizeof(struct xgene_msi_cascade_data), GFP_KERNEL); + if (!cascade_data) { + dev_err(&pdev->dev, "No memory for MSI cascade data\n"); + return -ENOMEM; + } + + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + cpumask_setall(mask); + irq_set_affinity(virt_msir, mask); + free_cpumask_var(mask); + } + + msi->msi_virqs[irq_index] = virt_msir; + cascade_data->index = offset; + cascade_data->msi_data = msi; + irq_set_handler_data(virt_msir, cascade_data); + irq_set_chained_handler(virt_msir, xgene_msi_cascade); + pr_debug("mapped phys irq %d\n", virt_msir); + + return 0; +} + +static int xgene_msi_get_param(struct platform_device *pdev, const char *name, + u32 *buf, int count) +{ + int rc = 0; + +#ifdef CONFIG_ACPI + if (efi_enabled(EFI_BOOT)) { + struct acpi_dsm_entry entry; + + if (acpi_dsm_lookup_value(ACPI_HANDLE(&pdev->dev), + name, 0, &entry) || !entry.value) + return -EINVAL; + + if (count == 2) + sscanf(entry.value, "%d %d", &buf[0], &buf[1]); + else + rc = -EINVAL; + + kfree(entry.key); + kfree(entry.value); + } else +#endif + if (of_property_read_u32_array(pdev->dev.of_node, name, + buf, count)) + rc = -EINVAL; + + return rc; +} + +static int xgene_msi_probe(struct platform_device *pdev) +{ + struct xgene_msi *msi; + struct resource *res; + phys_addr_t msiir_offset; + int rc, j, irq_index, count; + u32 offset; + u32 buf[] = { 0, NR_MSI_IRQS}; + + msi = devm_kzalloc(&pdev->dev, sizeof(struct xgene_msi), GFP_KERNEL); + if (!msi) { + dev_err(&pdev->dev, "No memory for MSI structure\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, msi); + +#ifdef CONFIG_ACPI + if (efi_enabled(EFI_BOOT)) + msi->irqhost = irq_domain_add_linear(NULL, + NR_MSI_IRQS, &xgene_msi_host_ops, msi); + else +#endif + msi->irqhost = irq_domain_add_linear(pdev->dev.of_node, + NR_MSI_IRQS, &xgene_msi_host_ops, msi); + if (msi->irqhost == NULL) { + dev_err(&pdev->dev, "No memory for MSI irqhost\n"); + rc = -ENOMEM; + goto error; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(msi->msi_regs)) { + dev_err(&pdev->dev, "no reg space\n"); + rc = -EINVAL; + goto error; + } + + pr_debug("mapped 0x%08llx to 0x%p Size : 0x%08llx\n", res->start, + msi->msi_regs, resource_size(res)); + + msiir_offset = lower_32_bits(xgene_pcie_get_iof_addr(res->start)); + msi->msiir_offset = gic_msi_feature.msiir_offset + + (msiir_offset & 0xfffff); + msi->msi_addr_hi = upper_32_bits(res->start); + msi->msi_addr_lo = gic_msi_feature.msiir_offset + + (msiir_offset & 0xffffffff); + msi->feature = gic_msi_feature.xgene_pic_ip; + +#ifdef CONFIG_ACPI + if (efi_enabled(EFI_BOOT)) + msi->phandle = 0; + else +#endif + msi->phandle = pdev->dev.of_node->phandle; + + rc = xgene_msi_init_allocator(msi); + if (rc) { + dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); + goto error; + } + + rc = xgene_msi_get_param(pdev, "msi-available-ranges", buf, 2); + if (rc) { + dev_err(&pdev->dev, "Error getting MSI ranges\n"); + goto error; + } + + pr_debug("buf[0] = 0x%x buf[1] = 0x%x\n", buf[0], buf[1]); + if (buf[0] % IRQS_PER_MSI_REG || buf[1] % IRQS_PER_MSI_REG) { + pr_err_once("msi available range of" + "%u at %u is not IRQ-aligned\n", + buf[1], buf[0]); + rc = -EINVAL; + goto error; + } + + offset = buf[0] / IRQS_PER_MSI_REG; + count = buf[1] / IRQS_PER_MSI_REG; + pr_debug("offset = %d count = %d\n", offset, count); + + for (irq_index = 0, j = 0; j < count; j++, irq_index++) { + rc = xgene_msi_setup_hwirq(msi, pdev, offset + j, irq_index); + if (rc) + goto error; + } + + list_add_tail(&msi->list, &msi_head); + + pr_info("XGene: PCIe MSI driver v%s\n", MSI_DRIVER_VERSION); + + return 0; + +error: + xgene_msi_remove(pdev); + return rc; +} + +static const struct of_device_id xgene_msi_of_ids[] = { + { + .compatible = "xgene,gic-msi", + }, + {} +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_msi_acpi_ids[] = { + {"APMC0D0E", 0}, + {}, +}; +#endif + +static struct platform_driver xgene_msi_driver = { + .driver = { + .name = "xgene-msi", + .owner = THIS_MODULE, + .of_match_table = xgene_msi_of_ids, +#ifdef CONFIG_ACPI + .acpi_match_table = ACPI_PTR(xgene_msi_acpi_ids), +#endif + }, + .probe = xgene_msi_probe, + .remove = xgene_msi_remove, +}; + +static __init int xgene_msi_init(void) +{ + return platform_driver_register(&xgene_msi_driver); +} + +subsys_initcall_sync(xgene_msi_init); --- linux-3.13.0.orig/drivers/pci/host/pci-xgene.c +++ linux-3.13.0/drivers/pci/host/pci-xgene.c @@ -0,0 +1,784 @@ +/** + * APM X-Gene PCIe Driver + * + * Copyright (c) 2013 Applied Micro Circuits Corporation. + * + * Author: Tanmay Inamdar . + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PCIECORE_LTSSM 0x4c +#define PCIECORE_CTLANDSTATUS 0x50 +#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) +#define INTXSTATUSMASK 0x6c +#define PIM1_1L 0x80 +#define IBAR2 0x98 +#define IR2MSK 0x9c +#define PIM2_1L 0xa0 +#define IBAR3L 0xb4 +#define IR3MSKL 0xbc +#define PIM3_1L 0xc4 +#define OMR1BARL 0x100 +#define OMR2BARL 0x118 +#define CFGBARL 0x154 +#define CFGBARH 0x158 +#define CFGCTL 0x15c +#define RTDID 0x160 +#define BRIDGE_CFG_0 0x2000 +#define BRIDGE_CFG_1 0x2004 +#define BRIDGE_CFG_4 0x2010 +#define BRIDGE_CFG_32 0x2030 +#define BRIDGE_CFG_14 0x2038 +#define BRIDGE_CTRL_1 0x2204 +#define BRIDGE_CTRL_2 0x2208 +#define BRIDGE_CTRL_5 0x2214 +#define BRIDGE_STATUS_0 0x2600 +#define MEM_RAM_SHUTDOWN 0xd070 +#define BLOCK_MEM_RDY 0xd074 + +#define DEVICE_PORT_TYPE_MASK 0x03c00000 +#define PM_FORCE_RP_MODE_MASK 0x00000400 +#define SWITCH_PORT_MODE_MASK 0x00000800 +#define CLASS_CODE_MASK 0xffffff00 +#define LINK_UP_MASK 0x00000100 +#define AER_OPTIONAL_ERROR_EN 0xffc00000 +#define XGENE_PCIE_DEV_CTRL 0x2f02 +#define AXI_EP_CFG_ACCESS 0x10000 +#define ENABLE_ASPM 0x08000000 +#define XGENE_PORT_TYPE_RC 0x05000000 +#define BLOCK_MEM_RDY_VAL 0xFFFFFFFF +#define EN_COHERENCY 0xF0000000 +#define EN_REG 0x00000001 +#define OB_LO_IO 0x00000002 +#define XGENE_PCIE_VENDORID 0xE008 +#define XGENE_PCIE_DEVICEID 0xE004 +#define XGENE_PCIE_TIMEOUT (500*1000) /* us */ +#define XGENE_LTSSM_DETECT_WAIT 20 +#define XGENE_LTSSM_L0_WAIT 4 +#define SZ_1T (SZ_1G*1024ULL) + +struct xgene_res_cfg { + struct resource res; + u64 pci_addr; +}; + +struct xgene_pcie_port { + struct device_node *node; + struct device *dev; + struct clk *clk; + struct xgene_res_cfg mem; + struct xgene_res_cfg io; + struct resource realio; + void __iomem *csr_base; + void __iomem *cfg_base; + u8 link_up; +}; + +static inline u32 pcie_bar_low_val(u32 addr, u32 flags) +{ + return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; +} + +static inline struct xgene_pcie_port * +xgene_pcie_bus_to_port(struct pci_bus *bus) +{ + struct pci_sys_data *sys = bus->sysdata; + return sys->private_data; +} + +/* PCIE Configuration Out/In */ +static inline void xgene_pcie_cfg_out32(void __iomem *addr, u32 val) +{ + writel(val, addr); +} + +static inline void xgene_pcie_cfg_out16(void __iomem *addr, u16 val) +{ + u64 temp_addr = (u64)addr & ~0x3; + u32 val32 = readl((void __iomem *)temp_addr); + + switch ((u64) addr & 0x3) { + case 2: + val32 &= ~0xFFFF0000; + val32 |= (u32) val << 16; + break; + case 0: + default: + val32 &= ~0xFFFF; + val32 |= val; + break; + } + writel(val32, (void __iomem *)temp_addr); +} + +static inline void xgene_pcie_cfg_out8(void __iomem *addr, u8 val) +{ + phys_addr_t temp_addr = (u64) addr & ~0x3; + u32 val32 = readl((void __iomem *)temp_addr); + + switch ((u64) addr & 0x3) { + case 0: + val32 &= ~0xFF; + val32 |= val; + break; + case 1: + val32 &= ~0xFF00; + val32 |= (u32) val << 8; + break; + case 2: + val32 &= ~0xFF0000; + val32 |= (u32) val << 16; + break; + case 3: + default: + val32 &= ~0xFF000000; + val32 |= (u32) val << 24; + break; + } + writel(val32, (void __iomem *)temp_addr); +} + +static inline void xgene_pcie_cfg_in32(void __iomem *addr, u32 *val) +{ + *val = readl(addr); +} + +static inline void xgene_pcie_cfg_in16(void __iomem *addr, u16 *val) +{ + u64 temp_addr = (u64)addr & ~0x3; + u32 val32; + + val32 = readl((void __iomem *)temp_addr); + + switch ((u64)addr & 0x3) { + case 2: + *val = val32 >> 16; + break; + case 0: + default: + *val = val32; + break; + } +} + +static inline void xgene_pcie_cfg_in8(void __iomem *addr, u8 *val) +{ + u64 temp_addr = (u64)addr & ~0x3; + u32 val32; + + val32 = readl((void __iomem *)temp_addr); + + switch ((u64)addr & 0x3) { + case 3: + *val = val32 >> 24; + break; + case 2: + *val = val32 >> 16; + break; + case 1: + *val = val32 >> 8; + break; + case 0: + default: + *val = val32; + break; + } +} + +/* When the address bit [17:16] is 2'b01, the Configuration access will be + * treated as Type 1 and it will be forwarded to external PCIe device. + */ +static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) +{ + struct xgene_pcie_port *port = xgene_pcie_bus_to_port(bus); + + if (bus->number >= (bus->primary + 1)) + return port->cfg_base + AXI_EP_CFG_ACCESS; + + return port->cfg_base; +} + +/* For Configuration request, RTDID register is used as Bus Number, + * Device Number and Function number of the header fields. + */ +static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) +{ + struct xgene_pcie_port *port = xgene_pcie_bus_to_port(bus); + unsigned int b, d, f; + u32 rtdid_val = 0; + + b = bus->number; + d = PCI_SLOT(devfn); + f = PCI_FUNC(devfn); + + if (!pci_is_root_bus(bus)) + rtdid_val = (b << 8) | (d << 3) | f; + + writel(rtdid_val, port->csr_base + RTDID); + /* read the register back to ensure flush */ + readl(port->csr_base + RTDID); +} + +static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 *val) +{ + struct xgene_pcie_port *port = xgene_pcie_bus_to_port(bus); + void __iomem *addr; + u8 val8; + u16 val16; + + if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + + xgene_pcie_set_rtdid_reg(bus, devfn); + addr = xgene_pcie_get_cfg_base(bus); + switch (len) { + case 1: + xgene_pcie_cfg_in8(addr + offset, &val8); + *val = val8; + break; + case 2: + xgene_pcie_cfg_in16(addr + offset, &val16); + *val = val16; + break; + default: + xgene_pcie_cfg_in32(addr + offset, val); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 val) +{ + struct xgene_pcie_port *port = xgene_pcie_bus_to_port(bus); + void __iomem *addr; + + if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + + xgene_pcie_set_rtdid_reg(bus, devfn); + addr = xgene_pcie_get_cfg_base(bus); + switch (len) { + case 1: + xgene_pcie_cfg_out8(addr + offset, (u8) val); + break; + case 2: + xgene_pcie_cfg_out16(addr + offset, (u16) val); + break; + default: + xgene_pcie_cfg_out32(addr + offset, val); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops xgene_pcie_ops = { + .read = xgene_pcie_read_config, + .write = xgene_pcie_write_config +}; + +static void xgene_pcie_program_core(void __iomem *csr_base) +{ + u32 val; + + val = readl(csr_base + BRIDGE_CFG_0); + val |= AER_OPTIONAL_ERROR_EN; + writel(val, csr_base + BRIDGE_CFG_0); + writel(0x0, csr_base + INTXSTATUSMASK); + val = readl(csr_base + BRIDGE_CTRL_1); + val = (val & ~0xffff) | XGENE_PCIE_DEV_CTRL; + writel(val, csr_base + BRIDGE_CTRL_1); +} + +static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr, + u32 flags, u64 size) +{ + u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags; + u32 val32 = 0; + u32 val; + + val32 = readl(csr_base + addr); + val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16); + writel(val, csr_base + addr); + + val32 = readl(csr_base + addr + 0x04); + val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16); + writel(val, csr_base + addr + 0x04); + + val32 = readl(csr_base + addr + 0x04); + val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16); + writel(val, csr_base + addr + 0x04); + + val32 = readl(csr_base + addr + 0x08); + val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16); + writel(val, csr_base + addr + 0x08); + + return mask; +} + +static void xgene_pcie_poll_linkup(struct xgene_pcie_port *port, + u32 *lanes, u32 *speed) +{ + void __iomem *csr_base = port->csr_base; + u32 val32; + u64 start_time, time; + + /* + * A component enters the LTSSM Detect state within + * 20ms of the end of fundamental core reset. + */ + msleep(XGENE_LTSSM_DETECT_WAIT); + port->link_up = 0; + start_time = jiffies; + do { + val32 = readl(csr_base + PCIECORE_CTLANDSTATUS); + if (val32 & LINK_UP_MASK) { + port->link_up = 1; + *speed = PIPE_PHY_RATE_RD(val32); + val32 = readl(csr_base + BRIDGE_STATUS_0); + *lanes = val32 >> 26; + } + time = jiffies_to_msecs(jiffies - start_time); + } while ((!port->link_up) && (time <= XGENE_LTSSM_L0_WAIT)); +} + +static void xgene_pcie_setup_root_complex(struct xgene_pcie_port *port) +{ + void __iomem *csr_base = port->csr_base; + u32 val; + + val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID; + writel(val, csr_base + BRIDGE_CFG_0); + + val = readl(csr_base + BRIDGE_CFG_1); + val &= ~CLASS_CODE_MASK; + val |= PCI_CLASS_BRIDGE_PCI << 16; + writel(val, csr_base + BRIDGE_CFG_1); + + val = readl(csr_base + BRIDGE_CFG_14); + val |= SWITCH_PORT_MODE_MASK; + val &= ~PM_FORCE_RP_MODE_MASK; + writel(val, csr_base + BRIDGE_CFG_14); + + val = readl(csr_base + BRIDGE_CTRL_5); + val &= ~DEVICE_PORT_TYPE_MASK; + val |= XGENE_PORT_TYPE_RC; + writel(val, csr_base + BRIDGE_CTRL_5); + + val = readl(csr_base + BRIDGE_CTRL_2); + val |= ENABLE_ASPM; + writel(val, csr_base + BRIDGE_CTRL_2); + + val = readl(csr_base + BRIDGE_CFG_32); + writel(val | (1 << 19), csr_base + BRIDGE_CFG_32); +} + +/* Return 0 on success */ +static int xgene_pcie_init_ecc(struct xgene_pcie_port *port) +{ + void __iomem *csr_base = port->csr_base; + int timeout = XGENE_PCIE_TIMEOUT; + u32 val; + + val = readl(csr_base + MEM_RAM_SHUTDOWN); + if (val == 0) + return 0; + writel(0x0, csr_base + MEM_RAM_SHUTDOWN); + do { + val = readl(csr_base + BLOCK_MEM_RDY); + udelay(1); + } while ((val != BLOCK_MEM_RDY_VAL) && timeout--); + + return !(timeout > 0); +} + +static int xgene_pcie_init_port(struct xgene_pcie_port *port) +{ + int rc; + + port->clk = clk_get(port->dev, NULL); + if (IS_ERR_OR_NULL(port->clk)) { + dev_err(port->dev, "clock not available\n"); + return -ENODEV; + } + + rc = clk_prepare_enable(port->clk); + if (rc) { + dev_err(port->dev, "clock enable failed\n"); + return rc; + } + + rc = xgene_pcie_init_ecc(port); + if (rc) { + dev_err(port->dev, "memory init failed\n"); + return rc; + } + + return 0; +} + +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct xgene_pcie_port *port = xgene_pcie_bus_to_port(bus); + + return of_node_get(port->node); +} + +static void xgene_pcie_fixup_bridge(struct pci_dev *dev) +{ + int i; + + /* Hide the PCI host BARs from the kernel as their content doesn't + * fit well in the resource management + */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + dev_info(&dev->dev, "Hiding X-Gene pci host bridge resources %s\n", + pci_name(dev)); +} +DECLARE_PCI_FIXUP_HEADER(XGENE_PCIE_VENDORID, XGENE_PCIE_DEVICEID, + xgene_pcie_fixup_bridge); + +static int xgene_pcie_map_reg(struct xgene_pcie_port *port, + struct platform_device *pdev, u64 *cfg_addr) +{ + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); + port->csr_base = devm_ioremap_resource(port->dev, res); + if (IS_ERR(port->csr_base)) + return PTR_ERR(port->csr_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + port->cfg_base = devm_ioremap_resource(port->dev, res); + if (IS_ERR(port->cfg_base)) + return PTR_ERR(port->cfg_base); + *cfg_addr = res->start; + + return 0; +} + +static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, + u32 addr, u32 restype) +{ + struct resource *res; + void __iomem *base = port->csr_base + addr; + resource_size_t size; + u64 cpu_addr, pci_addr; + u64 mask = 0; + u32 min_size; + u32 flag = EN_REG; + + if (restype == IORESOURCE_MEM) { + res = &port->mem.res; + pci_addr = port->mem.pci_addr; + min_size = SZ_128M; + } else { + res = &port->io.res; + pci_addr = port->io.pci_addr; + min_size = 128; + flag |= OB_LO_IO; + } + size = resource_size(res); + if (size >= min_size) + mask = ~(size - 1) | flag; + else + dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n", + (u64)size, min_size); + cpu_addr = res->start; + writel(lower_32_bits(cpu_addr), base); + writel(upper_32_bits(cpu_addr), base + 0x04); + writel(lower_32_bits(mask), base + 0x08); + writel(upper_32_bits(mask), base + 0x0c); + writel(lower_32_bits(pci_addr), base + 0x10); + writel(upper_32_bits(pci_addr), base + 0x14); +} + +static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr) +{ + writel(lower_32_bits(addr), csr_base + CFGBARL); + writel(upper_32_bits(addr), csr_base + CFGBARH); + writel(EN_REG, csr_base + CFGCTL); +} + +static int xgene_pcie_parse_map_ranges(struct xgene_pcie_port *port, + u64 cfg_addr) +{ + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; + + if (of_pci_range_parser_init(&parser, np)) { + dev_err(dev, "missing ranges property\n"); + return -EINVAL; + } + + /* Get the I/O, memory ranges from DT */ + for_each_of_pci_range(&parser, &range) { + struct resource *res = NULL; + u64 restype = range.flags & IORESOURCE_TYPE_BITS; + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + + switch (restype) { + case IORESOURCE_IO: + res = &port->io.res; + port->io.pci_addr = range.pci_addr; + of_pci_range_to_resource(&range, np, res); + xgene_pcie_setup_ob_reg(port, OMR1BARL, restype); + break; + case IORESOURCE_MEM: + res = &port->mem.res; + port->mem.pci_addr = range.pci_addr; + of_pci_range_to_resource(&range, np, res); + xgene_pcie_setup_ob_reg(port, OMR2BARL, restype); + break; + default: + dev_err(dev, "invalid io resource!"); + return -EINVAL; + } + } + xgene_pcie_setup_cfg_reg(port->csr_base, cfg_addr); + return 0; +} + +static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size) +{ + writel(lower_32_bits(pim), addr); + writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04); + writel(lower_32_bits(size), addr + 0x10); + writel(upper_32_bits(size), addr + 0x14); +} + +/* + * X-Gene PCIe support maximum 3 inbound memory regions + * This function helps to select a region based on size of region + */ +static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) +{ + if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) { + *ib_reg_mask |= (1 << 1); + return 1; + } + + if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { + *ib_reg_mask |= (1 << 0); + return 0; + } + + if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) { + *ib_reg_mask |= (1 << 2); + return 2; + } + return -EINVAL; +} + +static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, + struct of_pci_range *range, u8 *ib_reg_mask) +{ + void __iomem *csr_base = port->csr_base; + void __iomem *cfg_base = port->cfg_base; + void *bar_addr; + void *pim_addr; + u64 restype = range->flags & IORESOURCE_TYPE_BITS; + u64 cpu_addr = range->cpu_addr; + u64 pci_addr = range->pci_addr; + u64 size = range->size; + u64 mask = ~(size - 1) | EN_REG; + u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; + u32 bar_low; + int region; + + region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); + if (region < 0) { + dev_warn(port->dev, "invalid pcie dma-range config\n"); + return; + } + + if (restype == PCI_BASE_ADDRESS_MEM_PREFETCH) + flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; + + bar_low = pcie_bar_low_val((u32)cpu_addr, flags); + switch (region) { + case 0: + xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size); + bar_addr = cfg_base + PCI_BASE_ADDRESS_0; + writel(bar_low, bar_addr); + writel(upper_32_bits(cpu_addr), bar_addr + 0x4); + pim_addr = csr_base + PIM1_1L; + break; + case 1: + bar_addr = csr_base + IBAR2; + writel(bar_low, bar_addr); + writel(lower_32_bits(mask), csr_base + IR2MSK); + pim_addr = csr_base + PIM2_1L; + break; + case 2: + bar_addr = csr_base + IBAR3L; + writel(bar_low, bar_addr); + writel(upper_32_bits(cpu_addr), bar_addr + 0x4); + writel(lower_32_bits(mask), csr_base + IR3MSKL); + writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4); + pim_addr = csr_base + PIM3_1L; + break; + } + + xgene_pcie_setup_pims(pim_addr, pci_addr, size); +} + +static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + const int na = 3, ns = 2; + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->np = parser->pna + na + ns; + + parser->range = of_get_property(node, "ib-ranges", &rlen); + if (!parser->range) + return -ENOENT; + + parser->end = parser->range + rlen / sizeof(__be32); + return 0; +} + +static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) +{ + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; + u8 ib_reg_mask = 0; + + if (pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing ib-ranges property\n"); + return -EINVAL; + } + + /* Get the ib-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); + } + return 0; +} + +static int xgene_pcie_setup(int nr, struct pci_sys_data *sys) +{ + struct xgene_pcie_port *pp = sys->private_data; + struct resource *io = &pp->realio; + + io->start = sys->domain * SZ_64K; + io->end = io->start + SZ_64K; + io->flags = pp->io.res.flags; + io->name = "PCI IO"; + pci_ioremap_io(io->start, pp->io.res.start); + + pci_add_resource_offset(&sys->resources, io, sys->io_offset); + sys->mem_offset = pp->mem.res.start - pp->mem.pci_addr; + pci_add_resource_offset(&sys->resources, &pp->mem.res, + sys->mem_offset); + return 1; +} + +static int xgene_pcie_probe_bridge(struct platform_device *pdev) +{ + struct device_node *np = of_node_get(pdev->dev.of_node); + struct xgene_pcie_port *port; + struct hw_pci xgene_pcie_hw; + u32 lanes = 0, speed = 0; + u64 cfg_addr = 0; + static u32 domain; + int ret; + + port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + port->node = np; + port->dev = &pdev->dev; + + ret = xgene_pcie_map_reg(port, pdev, &cfg_addr); + if (ret) + return ret; + + ret = xgene_pcie_init_port(port); + if (ret) + goto skip; + xgene_pcie_program_core(port->csr_base); + xgene_pcie_setup_root_complex(port); + ret = xgene_pcie_parse_map_ranges(port, cfg_addr); + if (ret) + goto skip; + ret = xgene_pcie_parse_map_dma_ranges(port); + if (ret) + goto skip; + xgene_pcie_poll_linkup(port, &lanes, &speed); +skip: + if (!port->link_up) + dev_info(port->dev, "(rc) link down\n"); + else + dev_info(port->dev, "(rc) x%d gen-%d link up\n", + lanes, speed + 1); + platform_set_drvdata(pdev, port); + memset(&xgene_pcie_hw, 0, sizeof(xgene_pcie_hw)); + xgene_pcie_hw.domain = domain++; + xgene_pcie_hw.private_data = (void **)&port; + xgene_pcie_hw.nr_controllers = 1; + xgene_pcie_hw.setup = xgene_pcie_setup; + xgene_pcie_hw.map_irq = of_irq_parse_and_map_pci; + xgene_pcie_hw.ops = &xgene_pcie_ops; + pci_common_init(&xgene_pcie_hw); + return 0; +} + +static const struct of_device_id xgene_pcie_match_table[] = { + {.compatible = "apm,xgene-pcie",}, + {}, +}; + +static struct platform_driver xgene_pcie_driver = { + .driver = { + .name = "xgene-pcie", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(xgene_pcie_match_table), + }, + .probe = xgene_pcie_probe_bridge, +}; +module_platform_driver(xgene_pcie_driver); + +MODULE_AUTHOR("Tanmay Inamdar "); +MODULE_DESCRIPTION("APM X-Gene PCIe driver"); +MODULE_LICENSE("GPL v2"); --- linux-3.13.0.orig/drivers/pci/host/pcie-designware.c +++ linux-3.13.0/drivers/pci/host/pcie-designware.c @@ -498,13 +498,13 @@ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, PCIE_ATU_VIEWPORT); dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE); dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE); dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, PCIE_ATU_LIMIT); dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); } static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) @@ -513,7 +513,6 @@ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, PCIE_ATU_VIEWPORT); dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE); dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE); dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, @@ -521,6 +520,7 @@ dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); } static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) @@ -529,7 +529,6 @@ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, PCIE_ATU_VIEWPORT); dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE); dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE); dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1, @@ -537,6 +536,7 @@ dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), PCIE_ATU_UPPER_TARGET); + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); } static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, @@ -773,7 +773,7 @@ /* setup RC BARs */ dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0); - dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1); + dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1); /* setup interrupt pins */ dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val); --- linux-3.13.0.orig/drivers/pci/hotplug/acpiphp_glue.c +++ linux-3.13.0/drivers/pci/hotplug/acpiphp_glue.c @@ -706,6 +706,17 @@ return (unsigned int)sta; } +static inline bool device_status_valid(unsigned int sta) +{ + /* + * ACPI spec says that _STA may return bit 0 clear with bit 3 set + * if the device is valid but does not require a device driver to be + * loaded (Section 6.3.7 of ACPI 5.0A). + */ + unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING; + return (sta & mask) == mask; +} + /** * trim_stale_devices - remove PCI devices that are not responding. * @dev: PCI device to start walking the hierarchy from. @@ -721,7 +732,7 @@ unsigned long long sta; status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); - alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL) + alive = (ACPI_SUCCESS(status) && device_status_valid(sta)) || acpiphp_no_hotplug(handle); } if (!alive) { @@ -764,7 +775,7 @@ mutex_lock(&slot->crit_sect); if (slot_no_hotplug(slot)) { ; /* do nothing */ - } else if (get_slot_status(slot) == ACPI_STA_ALL) { + } else if (device_status_valid(get_slot_status(slot))) { /* remove stale devices if any */ list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) --- linux-3.13.0.orig/drivers/pci/hotplug/cpci_hotplug_pci.c +++ linux-3.13.0/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -282,12 +282,13 @@ } parent = slot->dev->bus; - list_for_each_entry(dev, &parent->devices, bus_list) + list_for_each_entry(dev, &parent->devices, bus_list) { if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) continue; if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) pci_hp_add_bridge(dev); + } pci_assign_unassigned_bridge_resources(parent->self); --- linux-3.13.0.orig/drivers/pci/hotplug/shpchp_ctrl.c +++ linux-3.13.0/drivers/pci/hotplug/shpchp_ctrl.c @@ -282,8 +282,8 @@ return WRONG_BUS_FREQUENCY; } - bsp = ctrl->pci_dev->bus->cur_bus_speed; - msp = ctrl->pci_dev->bus->max_bus_speed; + bsp = ctrl->pci_dev->subordinate->cur_bus_speed; + msp = ctrl->pci_dev->subordinate->max_bus_speed; /* Check if there are other slots or devices on the same bus */ if (!list_empty(&ctrl->pci_dev->subordinate->devices)) --- linux-3.13.0.orig/drivers/pci/msi.c +++ linux-3.13.0/drivers/pci/msi.c @@ -563,6 +563,20 @@ return ret; } +static int msi_verify_entries(struct pci_dev *dev) +{ + struct msi_desc *entry; + + list_for_each_entry(entry, &dev->msi_list, list) { + if (!dev->no_64bit_msi || !entry->msg.address_hi) + continue; + dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" + " tried to assign one above 4G\n"); + return -EIO; + } + return 0; +} + /** * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function @@ -616,6 +630,13 @@ return ret; } + ret = msi_verify_entries(dev); + if (ret) { + msi_mask_irq(entry, mask, ~mask); + free_msi_irqs(dev); + return ret; + } + ret = populate_msi_sysfs(dev); if (ret) { msi_mask_irq(entry, mask, ~mask); @@ -731,6 +752,11 @@ if (ret) goto error; + /* Check if all MSI entries honor device restrictions */ + ret = msi_verify_entries(dev); + if (ret) + goto error; + /* * Some devices require MSI-X to be enabled before we can touch the * MSI-X registers. We need to mask all the vectors to prevent @@ -1079,3 +1105,77 @@ if (dev->msix_cap) msix_set_enable(dev, 0); } + +/** + * pci_enable_msi_range - configure device's MSI capability structure + * @dev: device to configure + * @minvec: minimal number of interrupts to configure + * @maxvec: maximum number of interrupts to configure + * + * This function tries to allocate a maximum possible number of interrupts in a + * range between @minvec and @maxvec. It returns a negative errno if an error + * occurs. If it succeeds, it returns the actual number of interrupts allocated + * and updates the @dev's irq member to the lowest new interrupt number; + * the other interrupt numbers allocated to this device are consecutive. + **/ +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +EXPORT_SYMBOL(pci_enable_msi_range); + +/** + * pci_enable_msix_range - configure device's MSI-X capability structure + * @dev: pointer to the pci_dev data structure of MSI-X device function + * @entries: pointer to an array of MSI-X entries + * @minvec: minimum number of MSI-X irqs requested + * @maxvec: maximum number of MSI-X irqs requested + * + * Setup the MSI-X capability structure of device function with a maximum + * possible number of interrupts in the range between @minvec and @maxvec + * upon its software driver call to request for MSI-X mode enabled on its + * hardware device function. It returns a negative errno if an error occurs. + * If it succeeds, it returns the actual number of interrupts allocated and + * indicates the successful configuration of MSI-X capability structure + * with new allocated MSI-X interrupts. + **/ +int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +EXPORT_SYMBOL(pci_enable_msix_range); --- linux-3.13.0.orig/drivers/pci/pci-driver.c +++ linux-3.13.0/drivers/pci/pci-driver.c @@ -1324,7 +1324,7 @@ if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) return -ENOMEM; - if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x", + if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device, (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), --- linux-3.13.0.orig/drivers/pci/pci-sysfs.c +++ linux-3.13.0/drivers/pci/pci-sysfs.c @@ -178,7 +178,7 @@ { struct pci_dev *pci_dev = to_pci_dev(dev); - return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n", + return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), @@ -186,7 +186,7 @@ } static DEVICE_ATTR_RO(modalias); -static ssize_t enabled_store(struct device *dev, +static ssize_t enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -212,7 +212,7 @@ return result < 0 ? result : count; } -static ssize_t enabled_show(struct device *dev, +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev; @@ -220,7 +220,7 @@ pdev = to_pci_dev (dev); return sprintf (buf, "%u\n", atomic_read(&pdev->enable_cnt)); } -static DEVICE_ATTR_RW(enabled); +static DEVICE_ATTR_RW(enable); #ifdef CONFIG_NUMA static ssize_t @@ -531,7 +531,7 @@ #endif &dev_attr_dma_mask_bits.attr, &dev_attr_consistent_dma_mask_bits.attr, - &dev_attr_enabled.attr, + &dev_attr_enable.attr, &dev_attr_broken_parity_status.attr, &dev_attr_msi_bus.attr, #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) --- linux-3.13.0.orig/drivers/pci/pci.c +++ linux-3.13.0/drivers/pci/pci.c @@ -782,12 +782,6 @@ if (!__pci_complete_power_transition(dev, state)) error = 0; - /* - * When aspm_policy is "powersave" this call ensures - * that ASPM is configured. - */ - if (!error && dev->bus->self) - pcie_aspm_powersave_config_link(dev->bus->self); return error; } @@ -1120,15 +1114,34 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) { int err; + struct pci_dev *bridge; + u16 cmd; + u8 pin; err = pci_set_power_state(dev, PCI_D0); if (err < 0 && err != -EIO) return err; + + bridge = pci_upstream_bridge(dev); + if (bridge) + pcie_aspm_powersave_config_link(bridge); + err = pcibios_enable_device(dev, bars); if (err < 0) return err; pci_fixup_device(pci_fixup_enable, dev); + if (dev->msi_enabled || dev->msix_enabled) + return 0; + + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + if (pin) { + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & PCI_COMMAND_INTX_DISABLE) + pci_write_config_word(dev, PCI_COMMAND, + cmd & ~PCI_COMMAND_INTX_DISABLE); + } + return 0; } --- linux-3.13.0.orig/drivers/pci/pcie/aspm.c +++ linux-3.13.0/drivers/pci/pcie/aspm.c @@ -782,24 +782,6 @@ } EXPORT_SYMBOL(pci_disable_link_state); -void pcie_clear_aspm(struct pci_bus *bus) -{ - struct pci_dev *child; - - if (aspm_force) - return; - - /* - * Clear any ASPM setup that the firmware has carried out on this bus - */ - list_for_each_entry(child, &bus->devices, bus_list) { - __pci_disable_link_state(child, PCIE_LINK_STATE_L0S | - PCIE_LINK_STATE_L1 | - PCIE_LINK_STATE_CLKPM, - false, true); - } -} - static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp) { int i; --- linux-3.13.0.orig/drivers/pci/probe.c +++ linux-3.13.0/drivers/pci/probe.c @@ -214,14 +214,17 @@ res->flags |= IORESOURCE_SIZEALIGN; if (res->flags & IORESOURCE_IO) { l &= PCI_BASE_ADDRESS_IO_MASK; + sz &= PCI_BASE_ADDRESS_IO_MASK; mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; } else { l &= PCI_BASE_ADDRESS_MEM_MASK; + sz &= PCI_BASE_ADDRESS_MEM_MASK; mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; } } else { res->flags |= (l & IORESOURCE_ROM_ENABLE); l &= PCI_ROM_ADDRESS_MASK; + sz &= PCI_ROM_ADDRESS_MASK; mask = (u32)PCI_ROM_ADDRESS_MASK; } --- linux-3.13.0.orig/drivers/pci/quirks.c +++ linux-3.13.0/drivers/pci/quirks.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /* isa_dma_bridge_buggy */ #include "pci.h" @@ -40,6 +41,21 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on); +/* The BAR0 ~ BAR4 of Marvell 9125 device can't be accessed +* by IO resource file, and need to skip the files +*/ +static void quirk_marvell_mask_bar(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < 5; i++) + if (dev->resource[i].start) + dev->resource[i].start = + dev->resource[i].end = 0; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125, + quirk_marvell_mask_bar); + /* The Mellanox Tavor device gives false positive parity errors * Mark this device with a broken_parity_status, to allow * PCI scanning code to "skip" this now blacklisted device. @@ -287,6 +303,25 @@ } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine); +/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */ +static void quirk_extend_bar_to_page(struct pci_dev *dev) +{ + int i; + + for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + struct resource *r = &dev->resource[i]; + + if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { + r->end = PAGE_SIZE - 1; + r->start = 0; + r->flags |= IORESOURCE_UNSET; + dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n", + i, r); + } + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page); + /* * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. * If it's needed, re-allocate the region. @@ -303,19 +338,52 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); +static void quirk_io(struct pci_dev *dev, int pos, unsigned size, + const char *name) +{ + u32 region; + struct pci_bus_region bus_region; + struct resource *res = dev->resource + pos; + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion); + + if (!region) + return; + + res->name = pci_name(dev); + res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; + res->flags |= + (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); + region &= ~(size - 1); + + /* Convert from PCI bus to resource space */ + bus_region.start = region; + bus_region.end = region + size - 1; + pcibios_bus_to_resource(dev, res, &bus_region); + + dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", + name, PCI_BASE_ADDRESS_0 + (pos << 2), res); +} + /* * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS * ver. 1.33 20070103) don't set the correct ISA PCI region header info. * BAR0 should be 8 bytes; instead, it may be set to something like 8k * (which conflicts w/ BAR1's memory range). + * + * CS553x's ISA PCI BARs may also be read-only (ref: + * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). */ static void quirk_cs5536_vsa(struct pci_dev *dev) { + static char *name = "CS5536 ISA bridge"; + if (pci_resource_len(dev, 0) != 8) { - struct resource *res = &dev->resource[0]; - res->end = res->start + 8 - 1; - dev_info(&dev->dev, "CS5536 ISA bridge bug detected " - "(incorrect header); workaround applied.\n"); + quirk_io(dev, 0, 8, name); /* SMB */ + quirk_io(dev, 1, 256, name); /* GPIO */ + quirk_io(dev, 2, 64, name); /* MFGPT */ + dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", + name); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); @@ -2949,6 +3017,7 @@ } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); /* * PCI devices which are on Intel chips can skip the 10ms delay --- linux-3.13.0.orig/drivers/pci/rom.c +++ linux-3.13.0/drivers/pci/rom.c @@ -69,6 +69,7 @@ { void __iomem *image; int last_image; + unsigned length; image = rom; do { @@ -91,9 +92,9 @@ if (readb(pds + 3) != 'R') break; last_image = readb(pds + 21) & 0x80; - /* this length is reliable */ - image += readw(pds + 16) * 512; - } while (!last_image); + length = readw(pds + 16); + image += length * 512; + } while (length && !last_image); /* never return a size larger than the PCI resource window */ /* there are known ROMs that get the size wrong */ --- linux-3.13.0.orig/drivers/pci/xen-pcifront.c +++ linux-3.13.0/drivers/pci/xen-pcifront.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #define INVALID_GRANT_REF (0) @@ -1138,6 +1139,9 @@ if (!xen_pv_domain() || xen_initial_domain()) return -ENODEV; + if (!xen_has_pv_devices()) + return -ENODEV; + pci_frontend_registrar(1 /* enable */); return xenbus_register_frontend(&xenpci_driver); --- linux-3.13.0.orig/drivers/phy/Kconfig +++ linux-3.13.0/drivers/phy/Kconfig @@ -51,4 +51,11 @@ help Support for Display Port PHY found on Samsung EXYNOS SoCs. +config PHY_XGENE + tristate "APM X-Gene 15Gbps PHY support" + depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST) + select GENERIC_PHY + help + This option enables support for APM X-Gene SoC multi-purpose PHY. + endmenu --- linux-3.13.0.orig/drivers/phy/Makefile +++ linux-3.13.0/drivers/phy/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO) += phy-exynos-mipi-video.o obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o +obj-$(CONFIG_PHY_XGENE) += phy-xgene.o --- linux-3.13.0.orig/drivers/phy/phy-core.c +++ linux-3.13.0/drivers/phy/phy-core.c @@ -50,7 +50,9 @@ static int devm_phy_match(struct device *dev, void *res, void *match_data) { - return res == match_data; + struct phy **phy = res; + + return *phy == match_data; } static struct phy *phy_lookup(struct device *device, const char *port) @@ -161,6 +163,8 @@ dev_err(&phy->dev, "phy init failed --> %d\n", ret); goto out; } + } else { + ret = 0; /* Override possible ret == -ENOTSUPP */ } out: @@ -209,6 +213,8 @@ dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); goto out; } + } else { + ret = 0; /* Override possible ret == -ENOTSUPP */ } out: @@ -477,8 +483,9 @@ return phy; put_dev: - put_device(&phy->dev); - ida_remove(&phy_ida, phy->id); + put_device(&phy->dev); /* calls phy_release() which frees resources */ + return ERR_PTR(ret); + free_phy: kfree(phy); return ERR_PTR(ret); @@ -662,7 +669,7 @@ phy = to_phy(dev); dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); - ida_remove(&phy_ida, phy->id); + ida_simple_remove(&phy_ida, phy->id); kfree(phy); } --- linux-3.13.0.orig/drivers/phy/phy-twl4030-usb.c +++ linux-3.13.0/drivers/phy/phy-twl4030-usb.c @@ -560,7 +560,15 @@ */ omap_musb_mailbox(status); } - sysfs_notify(&twl->dev->kobj, NULL, "vbus"); + + /* don't schedule during sleep - irq works right then */ + if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) { + cancel_delayed_work(&twl->id_workaround_work); + schedule_delayed_work(&twl->id_workaround_work, HZ); + } + + if (irq) + sysfs_notify(&twl->dev->kobj, NULL, "vbus"); return IRQ_HANDLED; } @@ -569,29 +577,8 @@ { struct twl4030_usb *twl = container_of(work, struct twl4030_usb, id_workaround_work.work); - enum omap_musb_vbus_id_status status; - bool status_changed = false; - - status = twl4030_usb_linkstat(twl); - - spin_lock_irq(&twl->lock); - if (status >= 0 && status != twl->linkstat) { - twl->linkstat = status; - status_changed = true; - } - spin_unlock_irq(&twl->lock); - if (status_changed) { - dev_dbg(twl->dev, "handle missing status change to %d\n", - status); - omap_musb_mailbox(status); - } - - /* don't schedule during sleep - irq works right then */ - if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) { - cancel_delayed_work(&twl->id_workaround_work); - schedule_delayed_work(&twl->id_workaround_work, HZ); - } + twl4030_usb_irq(0, twl); } static int twl4030_phy_init(struct phy *phy) --- linux-3.13.0.orig/drivers/phy/phy-xgene.c +++ linux-3.13.0/drivers/phy/phy-xgene.c @@ -0,0 +1,1768 @@ +/* + * AppliedMicro X-Gene Multi-purpose PHY driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Loc Ho + * Tuan Phan + * Suman Tripathi + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * The APM X-Gene PHY consists of two PLL clock macro's (CMU) and lanes. + * The first PLL clock macro is used for internal reference clock. The second + * PLL clock macro is used to generate the clock for the PHY. This driver + * configures the first PLL CMU, the second PLL CMU, and programs the PHY to + * operate according to the mode of operation. The first PLL CMU is only + * required if internal clock is enabled. + * + * Logical Layer Out Of HW module units: + * + * ----------------- + * | Internal | |------| + * | Ref PLL CMU |----| | ------------- --------- + * ------------ ---- | MUX |-----|PHY PLL CMU|----| Serdes| + * | | | | --------- + * External Clock ------| | ------------- + * |------| + * + * The Ref PLL CMU CSR (Configuration System Registers) is accessed + * indirectly from the SDS offset at 0x2000. It is only required for + * internal reference clock. + * The PHY PLL CMU CSR is accessed indirectly from the SDS offset at 0x0000. + * The Serdes CSR is accessed indirectly from the SDS offset at 0x0400. + * + * The Ref PLL CMU can be located within the same PHY IP or outside the PHY IP + * due to shared Ref PLL CMU. For PHY with Ref PLL CMU shared with another IP, + * it is located outside the PHY IP. This is the case for the PHY located + * at 0x1f23a000 (SATA Port 4/5). For such PHY, another resource is required + * to located the SDS/Ref PLL CMU module and its clock for that IP enabled. + * + * Currently, this driver only supports Gen3 SATA mode with external clock. + */ +#include +#include +#include +#include +#include +#include + +/* Max 2 lanes per a PHY unit */ +#define MAX_LANE 2 + +/* Register offset inside the PHY */ +#define SERDES_PLL_INDIRECT_OFFSET 0x0000 +#define SERDES_PLL_REF_INDIRECT_OFFSET 0x2000 +#define SERDES_INDIRECT_OFFSET 0x0400 +#define SERDES_LANE_STRIDE 0x0200 + +/* Some default Serdes parameters */ +#define DEFAULT_SATA_TXBOOST_GAIN { 0x1e, 0x1e, 0x1e } +#define DEFAULT_SATA_TXEYEDIRECTION { 0x0, 0x0, 0x0 } +#define DEFAULT_SATA_TXEYETUNING { 0xa, 0xa, 0xa } +#define DEFAULT_SATA_SPD_SEL { 0x1, 0x3, 0x7 } +#define DEFAULT_SATA_TXAMP { 0x8, 0x8, 0x8 } +#define DEFAULT_SATA_TXCN1 { 0x2, 0x2, 0x2 } +#define DEFAULT_SATA_TXCN2 { 0x0, 0x0, 0x0 } +#define DEFAULT_SATA_TXCP1 { 0xa, 0xa, 0xa } + +#define SATA_SPD_SEL_GEN3 0x7 +#define SATA_SPD_SEL_GEN2 0x3 +#define SATA_SPD_SEL_GEN1 0x1 + +#define SSC_DISABLE 0 +#define SSC_ENABLE 1 + +#define FBDIV_VAL_50M 0x77 +#define REFDIV_VAL_50M 0x1 +#define FBDIV_VAL_100M 0x3B +#define REFDIV_VAL_100M 0x0 + +/* SATA Clock/Reset CSR */ +#define SATACLKENREG 0x00000000 +#define SATA0_CORE_CLKEN 0x00000002 +#define SATA1_CORE_CLKEN 0x00000004 +#define SATASRESETREG 0x00000004 +#define SATA_MEM_RESET_MASK 0x00000020 +#define SATA_MEM_RESET_RD(src) (((src) & 0x00000020) >> 5) +#define SATA_SDS_RESET_MASK 0x00000004 +#define SATA_CSR_RESET_MASK 0x00000001 +#define SATA_CORE_RESET_MASK 0x00000002 +#define SATA_PMCLK_RESET_MASK 0x00000010 +#define SATA_PCLK_RESET_MASK 0x00000008 + +/* SDS CSR used for PHY Indirect access */ +#define SATA_ENET_SDS_PCS_CTL0 0x00000000 +#define REGSPEC_CFG_I_TX_WORDMODE0_SET(dst, src) \ + (((dst) & ~0x00070000) | (((u32) (src) << 16) & 0x00070000)) +#define REGSPEC_CFG_I_RX_WORDMODE0_SET(dst, src) \ + (((dst) & ~0x00e00000) | (((u32) (src) << 21) & 0x00e00000)) +#define SATA_ENET_SDS_CTL0 0x0000000c +#define REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(dst, src) \ + (((dst) & ~0x00007fff) | (((u32) (src)) & 0x00007fff)) +#define SATA_ENET_SDS_CTL1 0x00000010 +#define CFG_I_SPD_SEL_CDR_OVR1_SET(dst, src) \ + (((dst) & ~0x0000000f) | (((u32) (src)) & 0x0000000f)) +#define SATA_ENET_SDS_RST_CTL 0x00000024 +#define SATA_ENET_SDS_IND_CMD_REG 0x0000003c +#define CFG_IND_WR_CMD_MASK 0x00000001 +#define CFG_IND_RD_CMD_MASK 0x00000002 +#define CFG_IND_CMD_DONE_MASK 0x00000004 +#define CFG_IND_ADDR_SET(dst, src) \ + (((dst) & ~0x003ffff0) | (((u32) (src) << 4) & 0x003ffff0)) +#define SATA_ENET_SDS_IND_RDATA_REG 0x00000040 +#define SATA_ENET_SDS_IND_WDATA_REG 0x00000044 +#define SATA_ENET_CLK_MACRO_REG 0x0000004c +#define I_RESET_B_SET(dst, src) \ + (((dst) & ~0x00000001) | (((u32) (src)) & 0x00000001)) +#define I_PLL_FBDIV_SET(dst, src) \ + (((dst) & ~0x001ff000) | (((u32) (src) << 12) & 0x001ff000)) +#define I_CUSTOMEROV_SET(dst, src) \ + (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80)) +#define O_PLL_LOCK_RD(src) (((src) & 0x40000000) >> 30) +#define O_PLL_READY_RD(src) (((src) & 0x80000000) >> 31) + +/* PLL Clock Macro Unit (CMU) CSR accessing from SDS indirectly */ +#define CMU_REG0 0x00000 +#define CMU_REG0_PLL_REF_SEL_MASK 0x00002000 +#define CMU_REG0_PLL_REF_SEL_SET(dst, src) \ + (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000)) +#define CMU_REG0_PDOWN_MASK 0x00004000 +#define CMU_REG0_CAL_COUNT_RESOL_SET(dst, src) \ + (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0)) +#define CMU_REG1 0x00002 +#define CMU_REG1_PLL_CP_SET(dst, src) \ + (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00)) +#define CMU_REG1_PLL_MANUALCAL_SET(dst, src) \ + (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) +#define CMU_REG1_PLL_CP_SEL_SET(dst, src) \ + (((dst) & ~0x000003e0) | (((u32) (src) << 5) & 0x000003e0)) +#define CMU_REG1_REFCLK_CMOS_SEL_MASK 0x00000001 +#define CMU_REG1_REFCLK_CMOS_SEL_SET(dst, src) \ + (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) +#define CMU_REG2 0x00004 +#define CMU_REG2_PLL_REFDIV_SET(dst, src) \ + (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000)) +#define CMU_REG2_PLL_LFRES_SET(dst, src) \ + (((dst) & ~0x0000001e) | (((u32) (src) << 1) & 0x0000001e)) +#define CMU_REG2_PLL_FBDIV_SET(dst, src) \ + (((dst) & ~0x00003fe0) | (((u32) (src) << 5) & 0x00003fe0)) +#define CMU_REG3 0x00006 +#define CMU_REG3_VCOVARSEL_SET(dst, src) \ + (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f)) +#define CMU_REG3_VCO_MOMSEL_INIT_SET(dst, src) \ + (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) +#define CMU_REG3_VCO_MANMOMSEL_SET(dst, src) \ + (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) +#define CMU_REG4 0x00008 +#define CMU_REG5 0x0000a +#define CMU_REG5_PLL_LFSMCAP_SET(dst, src) \ + (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000)) +#define CMU_REG5_PLL_LOCK_RESOLUTION_SET(dst, src) \ + (((dst) & ~0x0000000e) | (((u32) (src) << 1) & 0x0000000e)) +#define CMU_REG5_PLL_LFCAP_SET(dst, src) \ + (((dst) & ~0x00003000) | (((u32) (src) << 12) & 0x00003000)) +#define CMU_REG5_PLL_RESETB_MASK 0x00000001 +#define CMU_REG6 0x0000c +#define CMU_REG6_PLL_VREGTRIM_SET(dst, src) \ + (((dst) & ~0x00000600) | (((u32) (src) << 9) & 0x00000600)) +#define CMU_REG6_MAN_PVT_CAL_SET(dst, src) \ + (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) +#define CMU_REG7 0x0000e +#define CMU_REG7_PLL_CALIB_DONE_RD(src) ((0x00004000 & (u32) (src)) >> 14) +#define CMU_REG7_VCO_CAL_FAIL_RD(src) ((0x00000c00 & (u32) (src)) >> 10) +#define CMU_REG8 0x00010 +#define CMU_REG9 0x00012 +#define CMU_REG9_WORD_LEN_8BIT 0x000 +#define CMU_REG9_WORD_LEN_10BIT 0x001 +#define CMU_REG9_WORD_LEN_16BIT 0x002 +#define CMU_REG9_WORD_LEN_20BIT 0x003 +#define CMU_REG9_WORD_LEN_32BIT 0x004 +#define CMU_REG9_WORD_LEN_40BIT 0x005 +#define CMU_REG9_WORD_LEN_64BIT 0x006 +#define CMU_REG9_WORD_LEN_66BIT 0x007 +#define CMU_REG9_TX_WORD_MODE_CH1_SET(dst, src) \ + (((dst) & ~0x00000380) | (((u32) (src) << 7) & 0x00000380)) +#define CMU_REG9_TX_WORD_MODE_CH0_SET(dst, src) \ + (((dst) & ~0x00000070) | (((u32) (src) << 4) & 0x00000070)) +#define CMU_REG9_PLL_POST_DIVBY2_SET(dst, src) \ + (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) +#define CMU_REG9_VBG_BYPASSB_SET(dst, src) \ + (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) +#define CMU_REG9_IGEN_BYPASS_SET(dst, src) \ + (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) +#define CMU_REG10 0x00014 +#define CMU_REG10_VREG_REFSEL_SET(dst, src) \ + (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) +#define CMU_REG11 0x00016 +#define CMU_REG12 0x00018 +#define CMU_REG12_STATE_DELAY9_SET(dst, src) \ + (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0)) +#define CMU_REG13 0x0001a +#define CMU_REG14 0x0001c +#define CMU_REG15 0x0001e +#define CMU_REG16 0x00020 +#define CMU_REG16_PVT_DN_MAN_ENA_MASK 0x00000001 +#define CMU_REG16_PVT_UP_MAN_ENA_MASK 0x00000002 +#define CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(dst, src) \ + (((dst) & ~0x0000001c) | (((u32) (src) << 2) & 0x0000001c)) +#define CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(dst, src) \ + (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040)) +#define CMU_REG16_BYPASS_PLL_LOCK_SET(dst, src) \ + (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020)) +#define CMU_REG17 0x00022 +#define CMU_REG17_PVT_CODE_R2A_SET(dst, src) \ + (((dst) & ~0x00007f00) | (((u32) (src) << 8) & 0x00007f00)) +#define CMU_REG17_RESERVED_7_SET(dst, src) \ + (((dst) & ~0x000000e0) | (((u32) (src) << 5) & 0x000000e0)) +#define CMU_REG17_PVT_TERM_MAN_ENA_MASK 0x00008000 +#define CMU_REG18 0x00024 +#define CMU_REG19 0x00026 +#define CMU_REG20 0x00028 +#define CMU_REG21 0x0002a +#define CMU_REG22 0x0002c +#define CMU_REG23 0x0002e +#define CMU_REG24 0x00030 +#define CMU_REG25 0x00032 +#define CMU_REG26 0x00034 +#define CMU_REG26_FORCE_PLL_LOCK_SET(dst, src) \ + (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) +#define CMU_REG27 0x00036 +#define CMU_REG28 0x00038 +#define CMU_REG29 0x0003a +#define CMU_REG30 0x0003c +#define CMU_REG30_LOCK_COUNT_SET(dst, src) \ + (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006)) +#define CMU_REG30_PCIE_MODE_SET(dst, src) \ + (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) +#define CMU_REG31 0x0003e +#define CMU_REG32 0x00040 +#define CMU_REG32_FORCE_VCOCAL_START_MASK 0x00004000 +#define CMU_REG32_PVT_CAL_WAIT_SEL_SET(dst, src) \ + (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006)) +#define CMU_REG32_IREF_ADJ_SET(dst, src) \ + (((dst) & ~0x00000180) | (((u32) (src) << 7) & 0x00000180)) +#define CMU_REG33 0x00042 +#define CMU_REG34 0x00044 +#define CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(dst, src) \ + (((dst) & ~0x0000000f) | (((u32) (src) << 0) & 0x0000000f)) +#define CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(dst, src) \ + (((dst) & ~0x00000f00) | (((u32) (src) << 8) & 0x00000f00)) +#define CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(dst, src) \ + (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0)) +#define CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(dst, src) \ + (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000)) +#define CMU_REG35 0x00046 +#define CMU_REG35_PLL_SSC_MOD_SET(dst, src) \ + (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00)) +#define CMU_REG36 0x00048 +#define CMU_REG36_PLL_SSC_EN_SET(dst, src) \ + (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010)) +#define CMU_REG36_PLL_SSC_VSTEP_SET(dst, src) \ + (((dst) & ~0x0000ffc0) | (((u32) (src) << 6) & 0x0000ffc0)) +#define CMU_REG36_PLL_SSC_DSMSEL_SET(dst, src) \ + (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020)) +#define CMU_REG37 0x0004a +#define CMU_REG38 0x0004c +#define CMU_REG39 0x0004e + +/* PHY lane CSR accessing from SDS indirectly */ +#define RXTX_REG0 0x000 +#define RXTX_REG0_CTLE_EQ_HR_SET(dst, src) \ + (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) +#define RXTX_REG0_CTLE_EQ_QR_SET(dst, src) \ + (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0)) +#define RXTX_REG0_CTLE_EQ_FR_SET(dst, src) \ + (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e)) +#define RXTX_REG1 0x002 +#define RXTX_REG1_RXACVCM_SET(dst, src) \ + (((dst) & ~0x0000f000) | (((u32) (src) << 12) & 0x0000f000)) +#define RXTX_REG1_CTLE_EQ_SET(dst, src) \ + (((dst) & ~0x00000f80) | (((u32) (src) << 7) & 0x00000f80)) +#define RXTX_REG1_RXVREG1_SET(dst, src) \ + (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060)) +#define RXTX_REG1_RXIREF_ADJ_SET(dst, src) \ + (((dst) & ~0x00000006) | (((u32) (src) << 1) & 0x00000006)) +#define RXTX_REG2 0x004 +#define RXTX_REG2_VTT_ENA_SET(dst, src) \ + (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100)) +#define RXTX_REG2_TX_FIFO_ENA_SET(dst, src) \ + (((dst) & ~0x00000020) | (((u32) (src) << 5) & 0x00000020)) +#define RXTX_REG2_VTT_SEL_SET(dst, src) \ + (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0)) +#define RXTX_REG4 0x008 +#define RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK 0x00000040 +#define RXTX_REG4_TX_DATA_RATE_SET(dst, src) \ + (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000)) +#define RXTX_REG4_TX_WORD_MODE_SET(dst, src) \ + (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800)) +#define RXTX_REG5 0x00a +#define RXTX_REG5_TX_CN1_SET(dst, src) \ + (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) +#define RXTX_REG5_TX_CP1_SET(dst, src) \ + (((dst) & ~0x000007e0) | (((u32) (src) << 5) & 0x000007e0)) +#define RXTX_REG5_TX_CN2_SET(dst, src) \ + (((dst) & ~0x0000001f) | (((u32) (src) << 0) & 0x0000001f)) +#define RXTX_REG6 0x00c +#define RXTX_REG6_TXAMP_CNTL_SET(dst, src) \ + (((dst) & ~0x00000780) | (((u32) (src) << 7) & 0x00000780)) +#define RXTX_REG6_TXAMP_ENA_SET(dst, src) \ + (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040)) +#define RXTX_REG6_RX_BIST_ERRCNT_RD_SET(dst, src) \ + (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) +#define RXTX_REG6_TX_IDLE_SET(dst, src) \ + (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) +#define RXTX_REG6_RX_BIST_RESYNC_SET(dst, src) \ + (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) +#define RXTX_REG7 0x00e +#define RXTX_REG7_RESETB_RXD_MASK 0x00000100 +#define RXTX_REG7_RESETB_RXA_MASK 0x00000080 +#define RXTX_REG7_BIST_ENA_RX_SET(dst, src) \ + (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040)) +#define RXTX_REG7_RX_WORD_MODE_SET(dst, src) \ + (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800)) +#define RXTX_REG8 0x010 +#define RXTX_REG8_CDR_LOOP_ENA_SET(dst, src) \ + (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000)) +#define RXTX_REG8_CDR_BYPASS_RXLOS_SET(dst, src) \ + (((dst) & ~0x00000800) | (((u32) (src) << 11) & 0x00000800)) +#define RXTX_REG8_SSC_ENABLE_SET(dst, src) \ + (((dst) & ~0x00000200) | (((u32) (src) << 9) & 0x00000200)) +#define RXTX_REG8_SD_VREF_SET(dst, src) \ + (((dst) & ~0x000000f0) | (((u32) (src) << 4) & 0x000000f0)) +#define RXTX_REG8_SD_DISABLE_SET(dst, src) \ + (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100)) +#define RXTX_REG7 0x00e +#define RXTX_REG7_RESETB_RXD_SET(dst, src) \ + (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100)) +#define RXTX_REG7_RESETB_RXA_SET(dst, src) \ + (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080)) +#define RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK 0x00004000 +#define RXTX_REG7_LOOP_BACK_ENA_CTLE_SET(dst, src) \ + (((dst) & ~0x00004000) | (((u32) (src) << 14) & 0x00004000)) +#define RXTX_REG11 0x016 +#define RXTX_REG11_PHASE_ADJUST_LIMIT_SET(dst, src) \ + (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) +#define RXTX_REG12 0x018 +#define RXTX_REG12_LATCH_OFF_ENA_SET(dst, src) \ + (((dst) & ~0x00002000) | (((u32) (src) << 13) & 0x00002000)) +#define RXTX_REG12_SUMOS_ENABLE_SET(dst, src) \ + (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) +#define RXTX_REG12_RX_DET_TERM_ENABLE_MASK 0x00000002 +#define RXTX_REG12_RX_DET_TERM_ENABLE_SET(dst, src) \ + (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) +#define RXTX_REG13 0x01a +#define RXTX_REG14 0x01c +#define RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(dst, src) \ + (((dst) & ~0x0000003f) | (((u32) (src) << 0) & 0x0000003f)) +#define RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(dst, src) \ + (((dst) & ~0x00000040) | (((u32) (src) << 6) & 0x00000040)) +#define RXTX_REG26 0x034 +#define RXTX_REG26_PERIOD_ERROR_LATCH_SET(dst, src) \ + (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800)) +#define RXTX_REG26_BLWC_ENA_SET(dst, src) \ + (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) +#define RXTX_REG21 0x02a +#define RXTX_REG21_DO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10) +#define RXTX_REG21_XO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4) +#define RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(src) ((0x0000000f & (u32)(src))) +#define RXTX_REG22 0x02c +#define RXTX_REG22_SO_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4) +#define RXTX_REG22_EO_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10) +#define RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(src) ((0x0000000f & (u32)(src))) +#define RXTX_REG23 0x02e +#define RXTX_REG23_DE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10) +#define RXTX_REG23_XE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4) +#define RXTX_REG24 0x030 +#define RXTX_REG24_EE_LATCH_CALOUT_RD(src) ((0x0000fc00 & (u32) (src)) >> 10) +#define RXTX_REG24_SE_LATCH_CALOUT_RD(src) ((0x000003f0 & (u32) (src)) >> 4) +#define RXTX_REG27 0x036 +#define RXTX_REG28 0x038 +#define RXTX_REG31 0x03e +#define RXTX_REG38 0x04c +#define RXTX_REG38_CUSTOMER_PINMODE_INV_SET(dst, src) \ + (((dst) & 0x0000fffe) | (((u32) (src) << 1) & 0x0000fffe)) +#define RXTX_REG39 0x04e +#define RXTX_REG40 0x050 +#define RXTX_REG41 0x052 +#define RXTX_REG42 0x054 +#define RXTX_REG43 0x056 +#define RXTX_REG44 0x058 +#define RXTX_REG45 0x05a +#define RXTX_REG46 0x05c +#define RXTX_REG47 0x05e +#define RXTX_REG48 0x060 +#define RXTX_REG49 0x062 +#define RXTX_REG50 0x064 +#define RXTX_REG51 0x066 +#define RXTX_REG52 0x068 +#define RXTX_REG53 0x06a +#define RXTX_REG54 0x06c +#define RXTX_REG55 0x06e +#define RXTX_REG61 0x07a +#define RXTX_REG61_ISCAN_INBERT_SET(dst, src) \ + (((dst) & ~0x00000010) | (((u32) (src) << 4) & 0x00000010)) +#define RXTX_REG61_LOADFREQ_SHIFT_SET(dst, src) \ + (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) +#define RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(dst, src) \ + (((dst) & ~0x000000c0) | (((u32) (src) << 6) & 0x000000c0)) +#define RXTX_REG61_SPD_SEL_CDR_SET(dst, src) \ + (((dst) & ~0x00003c00) | (((u32) (src) << 10) & 0x00003c00)) +#define RXTX_REG62 0x07c +#define RXTX_REG62_PERIOD_H1_QLATCH_SET(dst, src) \ + (((dst) & ~0x00003800) | (((u32) (src) << 11) & 0x00003800)) +#define RXTX_REG81 0x0a2 +#define RXTX_REG89_MU_TH7_SET(dst, src) \ + (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) +#define RXTX_REG89_MU_TH8_SET(dst, src) \ + (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0)) +#define RXTX_REG89_MU_TH9_SET(dst, src) \ + (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e)) +#define RXTX_REG96 0x0c0 +#define RXTX_REG96_MU_FREQ1_SET(dst, src) \ + (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) +#define RXTX_REG96_MU_FREQ2_SET(dst, src) \ + (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0)) +#define RXTX_REG96_MU_FREQ3_SET(dst, src) \ + (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e)) +#define RXTX_REG99 0x0c6 +#define RXTX_REG99_MU_PHASE1_SET(dst, src) \ + (((dst) & ~0x0000f800) | (((u32) (src) << 11) & 0x0000f800)) +#define RXTX_REG99_MU_PHASE2_SET(dst, src) \ + (((dst) & ~0x000007c0) | (((u32) (src) << 6) & 0x000007c0)) +#define RXTX_REG99_MU_PHASE3_SET(dst, src) \ + (((dst) & ~0x0000003e) | (((u32) (src) << 1) & 0x0000003e)) +#define RXTX_REG102 0x0cc +#define RXTX_REG102_FREQLOOP_LIMIT_SET(dst, src) \ + (((dst) & ~0x00000060) | (((u32) (src) << 5) & 0x00000060)) +#define RXTX_REG114 0x0e4 +#define RXTX_REG121 0x0f2 +#define RXTX_REG121_SUMOS_CAL_CODE_RD(src) ((0x0000003e & (u32)(src)) >> 0x1) +#define RXTX_REG125 0x0fa +#define RXTX_REG125_PQ_REG_SET(dst, src) \ + (((dst) & ~0x0000fe00) | (((u32) (src) << 9) & 0x0000fe00)) +#define RXTX_REG125_SIGN_PQ_SET(dst, src) \ + (((dst) & ~0x00000100) | (((u32) (src) << 8) & 0x00000100)) +#define RXTX_REG125_SIGN_PQ_2C_SET(dst, src) \ + (((dst) & ~0x00000080) | (((u32) (src) << 7) & 0x00000080)) +#define RXTX_REG125_PHZ_MANUALCODE_SET(dst, src) \ + (((dst) & ~0x0000007c) | (((u32) (src) << 2) & 0x0000007c)) +#define RXTX_REG125_PHZ_MANUAL_SET(dst, src) \ + (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) +#define RXTX_REG127 0x0fe +#define RXTX_REG127_FORCE_SUM_CAL_START_MASK 0x00000002 +#define RXTX_REG127_FORCE_LAT_CAL_START_MASK 0x00000004 +#define RXTX_REG127_FORCE_SUM_CAL_START_SET(dst, src) \ + (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) +#define RXTX_REG127_FORCE_LAT_CAL_START_SET(dst, src) \ + (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) +#define RXTX_REG127_LATCH_MAN_CAL_ENA_SET(dst, src) \ + (((dst) & ~0x00000008) | (((u32) (src) << 3) & 0x00000008)) +#define RXTX_REG127_DO_LATCH_MANCAL_SET(dst, src) \ + (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) +#define RXTX_REG127_XO_LATCH_MANCAL_SET(dst, src) \ + (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) +#define RXTX_REG128 0x100 +#define RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(dst, src) \ + (((dst) & ~0x0000000c) | (((u32) (src) << 2) & 0x0000000c)) +#define RXTX_REG128_EO_LATCH_MANCAL_SET(dst, src) \ + (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) +#define RXTX_REG128_SO_LATCH_MANCAL_SET(dst, src) \ + (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) +#define RXTX_REG129 0x102 +#define RXTX_REG129_DE_LATCH_MANCAL_SET(dst, src) \ + (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) +#define RXTX_REG129_XE_LATCH_MANCAL_SET(dst, src) \ + (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) +#define RXTX_REG130 0x104 +#define RXTX_REG130_EE_LATCH_MANCAL_SET(dst, src) \ + (((dst) & ~0x0000fc00) | (((u32) (src) << 10) & 0x0000fc00)) +#define RXTX_REG130_SE_LATCH_MANCAL_SET(dst, src) \ + (((dst) & ~0x000003f0) | (((u32) (src) << 4) & 0x000003f0)) +#define RXTX_REG145 0x122 +#define RXTX_REG145_TX_IDLE_SATA_SET(dst, src) \ + (((dst) & ~0x00000001) | (((u32) (src) << 0) & 0x00000001)) +#define RXTX_REG145_RXES_ENA_SET(dst, src) \ + (((dst) & ~0x00000002) | (((u32) (src) << 1) & 0x00000002)) +#define RXTX_REG145_RXDFE_CONFIG_SET(dst, src) \ + (((dst) & ~0x0000c000) | (((u32) (src) << 14) & 0x0000c000)) +#define RXTX_REG145_RXVWES_LATENA_SET(dst, src) \ + (((dst) & ~0x00000004) | (((u32) (src) << 2) & 0x00000004)) +#define RXTX_REG147 0x126 +#define RXTX_REG148 0x128 + +/* Clock macro type */ +enum cmu_type_t { + REF_CMU = 0, /* Clock macro is the internal reference clock */ + PHY_CMU = 1, /* Clock macro is the PLL for the Serdes */ +}; + +enum mux_type_t { + MUX_SELECT_ATA = 0, /* Switch the MUX to ATA */ + MUX_SELECT_SGMMII = 0, /* Switch the MUX to SGMII */ +}; + +enum clk_type_t { + CLK_EXT_DIFF = 0, /* External differential */ + CLK_INT_DIFF = 1, /* Internal differential */ + CLK_INT_SING = 2, /* Internal single ended */ +}; + +enum phy_mode { + MODE_SATA = 0, /* List them for simple reference */ + MODE_SGMII = 1, + MODE_PCIE = 2, + MODE_USB = 3, + MODE_XFI = 4, + MODE_MAX +}; + +struct xgene_sata_override_param { + u32 speed[MAX_LANE]; /* Index for override parameter per lane */ + u32 txspeed[3]; /* Tx speed */ + u32 txboostgain[MAX_LANE*3]; /* Tx freq boost and gain control */ + u32 txeyetuning[MAX_LANE*3]; /* Tx eye tuning */ + u32 txeyedirection[MAX_LANE*3]; /* Tx eye tuning direction */ + u32 txamplitude[MAX_LANE*3]; /* Tx amplitude control */ + u32 txprecursor_cn1[MAX_LANE*3]; /* Tx emphasis taps 1st pre-cursor */ + u32 txprecursor_cn2[MAX_LANE*3]; /* Tx emphasis taps 2nd pre-cursor */ + u32 txpostcursor_cp1[MAX_LANE*3]; /* Tx emphasis taps post-cursor */ +}; + +struct xgene_phy_ctx { + struct device *dev; + struct phy *phy; + enum phy_mode mode; /* Mode of operation */ + enum clk_type_t clk_type; /* Input clock selection */ + void __iomem *sds_base; /* PHY CSR base addr */ + struct clk *clk; /* Optional clock */ + + /* Override Serdes parameters */ + struct xgene_sata_override_param sata_param; +}; + +/* + * For chip earlier than A3 version, enable this flag. + * To enable, pass boot argument phy_xgene.preA3Chip=1 + */ +static int preA3Chip; +MODULE_PARM_DESC(preA3Chip, "Enable pre-A3 chip support (1=enable 0=disable)"); +module_param_named(preA3Chip, preA3Chip, int, 0444); + +static int is_custom_eval_board(void) +{ + struct device_node *root = of_find_node_by_path("/"); + const char *model; + + if (!root) + return 0; + model = of_get_property(root, "model", NULL); + if (model == NULL) + return 0; + return strstr(model, "Mustang") ? 0 : 1; +} + +static void sds_wr(void __iomem *csr_base, u32 indirect_cmd_reg, + u32 indirect_data_reg, u32 addr, u32 data) +{ + unsigned long deadline = jiffies + HZ; + u32 val; + u32 cmd; + + cmd = CFG_IND_WR_CMD_MASK | CFG_IND_CMD_DONE_MASK; + cmd = CFG_IND_ADDR_SET(cmd, addr); + writel(data, csr_base + indirect_data_reg); + readl(csr_base + indirect_data_reg); /* Force a barrier */ + writel(cmd, csr_base + indirect_cmd_reg); + readl(csr_base + indirect_cmd_reg); /* Force a barrier */ + do { + val = readl(csr_base + indirect_cmd_reg); + } while (!(val & CFG_IND_CMD_DONE_MASK) && + time_before(jiffies, deadline)); + if (!(val & CFG_IND_CMD_DONE_MASK)) + pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n", + csr_base + indirect_cmd_reg, addr, data); +} + +static void sds_rd(void __iomem *csr_base, u32 indirect_cmd_reg, + u32 indirect_data_reg, u32 addr, u32 *data) +{ + unsigned long deadline = jiffies + HZ; + u32 val; + u32 cmd; + + cmd = CFG_IND_RD_CMD_MASK | CFG_IND_CMD_DONE_MASK; + cmd = CFG_IND_ADDR_SET(cmd, addr); + writel(cmd, csr_base + indirect_cmd_reg); + readl(csr_base + indirect_cmd_reg); /* Force a barrier */ + do { + val = readl(csr_base + indirect_cmd_reg); + } while (!(val & CFG_IND_CMD_DONE_MASK) && + time_before(jiffies, deadline)); + *data = readl(csr_base + indirect_data_reg); + if (!(val & CFG_IND_CMD_DONE_MASK)) + pr_err("SDS WR timeout at 0x%p offset 0x%08X value 0x%08X\n", + csr_base + indirect_cmd_reg, addr, *data); +} + +static void cmu_wr(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, + u32 reg, u32 data) +{ + void __iomem *sds_base = ctx->sds_base; + u32 val; + + if (cmu_type == REF_CMU) + reg += SERDES_PLL_REF_INDIRECT_OFFSET; + else + reg += SERDES_PLL_INDIRECT_OFFSET; + sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG, + SATA_ENET_SDS_IND_WDATA_REG, reg, data); + sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG, + SATA_ENET_SDS_IND_RDATA_REG, reg, &val); + pr_debug("CMU WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data, val); +} + +static void cmu_rd(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, + u32 reg, u32 *data) +{ + void __iomem *sds_base = ctx->sds_base; + + if (cmu_type == REF_CMU) + reg += SERDES_PLL_REF_INDIRECT_OFFSET; + else + reg += SERDES_PLL_INDIRECT_OFFSET; + sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG, + SATA_ENET_SDS_IND_RDATA_REG, reg, data); + pr_debug("CMU RD addr 0x%X value 0x%08X\n", reg, *data); +} + +static void cmu_toggle1to0(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, + u32 reg, u32 bits) +{ + u32 val; + + cmu_rd(ctx, cmu_type, reg, &val); + val |= bits; + cmu_wr(ctx, cmu_type, reg, val); + cmu_rd(ctx, cmu_type, reg, &val); + val &= ~bits; + cmu_wr(ctx, cmu_type, reg, val); +} + +static void cmu_clrbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, + u32 reg, u32 bits) +{ + u32 val; + + cmu_rd(ctx, cmu_type, reg, &val); + val &= ~bits; + cmu_wr(ctx, cmu_type, reg, val); +} + +static void cmu_setbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, + u32 reg, u32 bits) +{ + u32 val; + + cmu_rd(ctx, cmu_type, reg, &val); + val |= bits; + cmu_wr(ctx, cmu_type, reg, val); +} + +static void serdes_wr(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 data) +{ + void __iomem *sds_base = ctx->sds_base; + u32 val; + + reg += SERDES_INDIRECT_OFFSET; + reg += lane * SERDES_LANE_STRIDE; + sds_wr(sds_base, SATA_ENET_SDS_IND_CMD_REG, + SATA_ENET_SDS_IND_WDATA_REG, reg, data); + sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG, + SATA_ENET_SDS_IND_RDATA_REG, reg, &val); + pr_debug("SERDES WR addr 0x%X value 0x%08X <-> 0x%08X\n", reg, data, + val); +} + +static void serdes_rd(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 *data) +{ + void __iomem *sds_base = ctx->sds_base; + + reg += SERDES_INDIRECT_OFFSET; + reg += lane * SERDES_LANE_STRIDE; + sds_rd(sds_base, SATA_ENET_SDS_IND_CMD_REG, + SATA_ENET_SDS_IND_RDATA_REG, reg, data); + pr_debug("SERDES RD addr 0x%X value 0x%08X\n", reg, *data); +} + +static void serdes_clrbits(struct xgene_phy_ctx *ctx, int lane, u32 reg, + u32 bits) +{ + u32 val; + + serdes_rd(ctx, lane, reg, &val); + val &= ~bits; + serdes_wr(ctx, lane, reg, val); +} + +static void serdes_setbits(struct xgene_phy_ctx *ctx, int lane, u32 reg, + u32 bits) +{ + u32 val; + + serdes_rd(ctx, lane, reg, &val); + val |= bits; + serdes_wr(ctx, lane, reg, val); +} + +static void xgene_phy_cfg_cmu_clk_type(struct xgene_phy_ctx *ctx, + enum cmu_type_t cmu_type, + enum clk_type_t clk_type) +{ + u32 val; + + /* Set the reset sequence delay for TX ready assertion */ + cmu_rd(ctx, cmu_type, CMU_REG12, &val); + val = CMU_REG12_STATE_DELAY9_SET(val, 0x1); + cmu_wr(ctx, cmu_type, CMU_REG12, val); + /* Set the programmable stage delays between various enable stages */ + cmu_wr(ctx, cmu_type, CMU_REG13, 0x0222); + cmu_wr(ctx, cmu_type, CMU_REG14, 0x2225); + + /* Configure clock type */ + if (clk_type == CLK_EXT_DIFF) { + /* Select external clock mux */ + cmu_rd(ctx, cmu_type, CMU_REG0, &val); + val = CMU_REG0_PLL_REF_SEL_SET(val, 0x0); + cmu_wr(ctx, cmu_type, CMU_REG0, val); + /* Select CMOS as reference clock */ + cmu_rd(ctx, cmu_type, CMU_REG1, &val); + val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0); + cmu_wr(ctx, cmu_type, CMU_REG1, val); + dev_dbg(ctx->dev, "Set external reference clock\n"); + } else if (clk_type == CLK_INT_DIFF) { + /* Select internal clock mux */ + cmu_rd(ctx, cmu_type, CMU_REG0, &val); + val = CMU_REG0_PLL_REF_SEL_SET(val, 0x1); + cmu_wr(ctx, cmu_type, CMU_REG0, val); + /* Select CMOS as reference clock */ + cmu_rd(ctx, cmu_type, CMU_REG1, &val); + val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1); + cmu_wr(ctx, cmu_type, CMU_REG1, val); + dev_dbg(ctx->dev, "Set internal reference clock\n"); + } else if (clk_type == CLK_INT_SING) { + /* + * NOTE: This clock type is NOT support for controller + * whose internal clock shared in the PCIe controller + * + * Select internal clock mux + */ + cmu_rd(ctx, cmu_type, CMU_REG1, &val); + val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x1); + cmu_wr(ctx, cmu_type, CMU_REG1, val); + /* Select CML as reference clock */ + cmu_rd(ctx, cmu_type, CMU_REG1, &val); + val = CMU_REG1_REFCLK_CMOS_SEL_SET(val, 0x0); + cmu_wr(ctx, cmu_type, CMU_REG1, val); + dev_dbg(ctx->dev, + "Set internal single ended reference clock\n"); + } +} + +static void xgene_phy_sata_cfg_cmu_core(struct xgene_phy_ctx *ctx, + enum cmu_type_t cmu_type, + enum clk_type_t clk_type) +{ + u32 val; + int ref_100MHz; + + if (cmu_type == REF_CMU) { + /* Set VCO calibration voltage threshold */ + cmu_rd(ctx, cmu_type, CMU_REG34, &val); + val = CMU_REG34_VCO_CAL_VTH_LO_MAX_SET(val, 0x7); + val = CMU_REG34_VCO_CAL_VTH_HI_MAX_SET(val, 0xc); + val = CMU_REG34_VCO_CAL_VTH_LO_MIN_SET(val, 0x3); + val = CMU_REG34_VCO_CAL_VTH_HI_MIN_SET(val, 0x8); + cmu_wr(ctx, cmu_type, CMU_REG34, val); + } + + /* Set the VCO calibration counter */ + cmu_rd(ctx, cmu_type, CMU_REG0, &val); + if (cmu_type == REF_CMU || preA3Chip) + val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x4); + else + val = CMU_REG0_CAL_COUNT_RESOL_SET(val, 0x7); + cmu_wr(ctx, cmu_type, CMU_REG0, val); + + /* Configure PLL for calibration */ + cmu_rd(ctx, cmu_type, CMU_REG1, &val); + val = CMU_REG1_PLL_CP_SET(val, 0x1); + if (cmu_type == REF_CMU || preA3Chip) + val = CMU_REG1_PLL_CP_SEL_SET(val, 0x5); + else + val = CMU_REG1_PLL_CP_SEL_SET(val, 0x3); + if (cmu_type == REF_CMU) + val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0); + else + val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x1); + cmu_wr(ctx, cmu_type, CMU_REG1, val); + + if (cmu_type != REF_CMU) + cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); + + /* Configure the PLL for either 100MHz or 50MHz */ + cmu_rd(ctx, cmu_type, CMU_REG2, &val); + if (cmu_type == REF_CMU) { + val = CMU_REG2_PLL_LFRES_SET(val, 0xa); + ref_100MHz = 1; + } else { + val = CMU_REG2_PLL_LFRES_SET(val, 0x3); + if (clk_type == CLK_EXT_DIFF) + ref_100MHz = 0; + else + ref_100MHz = 1; + } + if (ref_100MHz) { + val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_100M); + val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_100M); + } else { + val = CMU_REG2_PLL_FBDIV_SET(val, FBDIV_VAL_50M); + val = CMU_REG2_PLL_REFDIV_SET(val, REFDIV_VAL_50M); + } + cmu_wr(ctx, cmu_type, CMU_REG2, val); + + /* Configure the VCO */ + cmu_rd(ctx, cmu_type, CMU_REG3, &val); + if (cmu_type == REF_CMU) { + val = CMU_REG3_VCOVARSEL_SET(val, 0x3); + val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x10); + } else { + val = CMU_REG3_VCOVARSEL_SET(val, 0xF); + if (preA3Chip) + val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x15); + else + val = CMU_REG3_VCO_MOMSEL_INIT_SET(val, 0x1a); + val = CMU_REG3_VCO_MANMOMSEL_SET(val, 0x15); + } + cmu_wr(ctx, cmu_type, CMU_REG3, val); + + /* Disable force PLL lock */ + cmu_rd(ctx, cmu_type, CMU_REG26, &val); + val = CMU_REG26_FORCE_PLL_LOCK_SET(val, 0x0); + cmu_wr(ctx, cmu_type, CMU_REG26, val); + + /* Setup PLL loop filter */ + cmu_rd(ctx, cmu_type, CMU_REG5, &val); + val = CMU_REG5_PLL_LFSMCAP_SET(val, 0x3); + val = CMU_REG5_PLL_LFCAP_SET(val, 0x3); + if (cmu_type == REF_CMU || !preA3Chip) + val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x7); + else + val = CMU_REG5_PLL_LOCK_RESOLUTION_SET(val, 0x4); + cmu_wr(ctx, cmu_type, CMU_REG5, val); + + /* Enable or disable manual calibration */ + cmu_rd(ctx, cmu_type, CMU_REG6, &val); + val = CMU_REG6_PLL_VREGTRIM_SET(val, preA3Chip ? 0x0 : 0x2); + val = CMU_REG6_MAN_PVT_CAL_SET(val, preA3Chip ? 0x1 : 0x0); + cmu_wr(ctx, cmu_type, CMU_REG6, val); + + /* Configure lane for 20-bits */ + if (cmu_type == PHY_CMU) { + cmu_rd(ctx, cmu_type, CMU_REG9, &val); + val = CMU_REG9_TX_WORD_MODE_CH1_SET(val, + CMU_REG9_WORD_LEN_20BIT); + val = CMU_REG9_TX_WORD_MODE_CH0_SET(val, + CMU_REG9_WORD_LEN_20BIT); + val = CMU_REG9_PLL_POST_DIVBY2_SET(val, 0x1); + if (!preA3Chip) { + val = CMU_REG9_VBG_BYPASSB_SET(val, 0x0); + val = CMU_REG9_IGEN_BYPASS_SET(val , 0x0); + } + cmu_wr(ctx, cmu_type, CMU_REG9, val); + + if (!preA3Chip) { + cmu_rd(ctx, cmu_type, CMU_REG10, &val); + if (!is_custom_eval_board()) { + val = CMU_REG10_VREG_REFSEL_SET(val, 0x1); + } else { + val = CMU_REG10_VREG_REFSEL_SET(val, 0x0); + } + cmu_wr(ctx, cmu_type, CMU_REG10, val); + } + } + + cmu_rd(ctx, cmu_type, CMU_REG16, &val); + val = CMU_REG16_CALIBRATION_DONE_OVERRIDE_SET(val, 0x1); + val = CMU_REG16_BYPASS_PLL_LOCK_SET(val, 0x1); + if (cmu_type == REF_CMU || preA3Chip) + val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x4); + else + val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7); + cmu_wr(ctx, cmu_type, CMU_REG16, val); + + /* Configure for SATA */ + cmu_rd(ctx, cmu_type, CMU_REG30, &val); + val = CMU_REG30_PCIE_MODE_SET(val, 0x0); + val = CMU_REG30_LOCK_COUNT_SET(val, 0x3); + cmu_wr(ctx, cmu_type, CMU_REG30, val); + + /* Disable state machine bypass */ + cmu_wr(ctx, cmu_type, CMU_REG31, 0xF); + + cmu_rd(ctx, cmu_type, CMU_REG32, &val); + val = CMU_REG32_PVT_CAL_WAIT_SEL_SET(val, 0x3); + if (cmu_type == REF_CMU || preA3Chip || is_custom_eval_board()) { + val = CMU_REG32_IREF_ADJ_SET(val, 0x3); + } else { + val = CMU_REG32_IREF_ADJ_SET(val, 0x1); + } + cmu_wr(ctx, cmu_type, CMU_REG32, val); + + /* Set VCO calibration threshold */ + if (cmu_type != REF_CMU && preA3Chip) + cmu_wr(ctx, cmu_type, CMU_REG34, 0x8d27); + else + cmu_wr(ctx, cmu_type, CMU_REG34, 0x873c); + + /* Set CTLE Override and override waiting from state machine */ + cmu_wr(ctx, cmu_type, CMU_REG37, 0xF00F); +} + +static void xgene_phy_ssc_enable(struct xgene_phy_ctx *ctx, + enum cmu_type_t cmu_type) +{ + u32 val; + + /* Set SSC modulation value */ + cmu_rd(ctx, cmu_type, CMU_REG35, &val); + val = CMU_REG35_PLL_SSC_MOD_SET(val, 98); + cmu_wr(ctx, cmu_type, CMU_REG35, val); + + /* Enable SSC, set vertical step and DSM value */ + cmu_rd(ctx, cmu_type, CMU_REG36, &val); + val = CMU_REG36_PLL_SSC_VSTEP_SET(val, 30); + val = CMU_REG36_PLL_SSC_EN_SET(val, 1); + val = CMU_REG36_PLL_SSC_DSMSEL_SET(val, 1); + cmu_wr(ctx, cmu_type, CMU_REG36, val); + + /* Reset the PLL */ + cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); + cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); + + /* Force VCO calibration to restart */ + cmu_toggle1to0(ctx, cmu_type, CMU_REG32, + CMU_REG32_FORCE_VCOCAL_START_MASK); +} + +static void xgene_phy_sata_cfg_lanes(struct xgene_phy_ctx *ctx) +{ + u32 val; + u32 reg; + int i; + int lane; + + for (lane = 0; lane < MAX_LANE; lane++) { + serdes_wr(ctx, lane, RXTX_REG147, 0x6); + + /* Set boost control for quarter, half, and full rate */ + serdes_rd(ctx, lane, RXTX_REG0, &val); + val = RXTX_REG0_CTLE_EQ_HR_SET(val, 0x10); + val = RXTX_REG0_CTLE_EQ_QR_SET(val, 0x10); + val = RXTX_REG0_CTLE_EQ_FR_SET(val, 0x10); + serdes_wr(ctx, lane, RXTX_REG0, val); + + /* Set boost control value */ + serdes_rd(ctx, lane, RXTX_REG1, &val); + val = RXTX_REG1_RXACVCM_SET(val, 0x7); + val = RXTX_REG1_CTLE_EQ_SET(val, + ctx->sata_param.txboostgain[lane * 3 + + ctx->sata_param.speed[lane]]); + serdes_wr(ctx, lane, RXTX_REG1, val); + + /* Latch VTT value based on the termination to ground and + enable TX FIFO */ + serdes_rd(ctx, lane, RXTX_REG2, &val); + val = RXTX_REG2_VTT_ENA_SET(val, 0x1); + val = RXTX_REG2_VTT_SEL_SET(val, 0x1); + val = RXTX_REG2_TX_FIFO_ENA_SET(val, 0x1); + serdes_wr(ctx, lane, RXTX_REG2, val); + + /* Configure Tx for 20-bits */ + serdes_rd(ctx, lane, RXTX_REG4, &val); + val = RXTX_REG4_TX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT); + serdes_wr(ctx, lane, RXTX_REG4, val); + + if (!preA3Chip) { + serdes_rd(ctx, lane, RXTX_REG1, &val); + val = RXTX_REG1_RXVREG1_SET(val, 0x2); + val = RXTX_REG1_RXIREF_ADJ_SET(val, 0x2); + serdes_wr(ctx, lane, RXTX_REG1, val); + } + + /* Set pre-emphasis first 1 and 2, and post-emphasis values */ + serdes_rd(ctx, lane, RXTX_REG5, &val); + val = RXTX_REG5_TX_CN1_SET(val, + ctx->sata_param.txprecursor_cn1[lane * 3 + + ctx->sata_param.speed[lane]]); + val = RXTX_REG5_TX_CP1_SET(val, + ctx->sata_param.txpostcursor_cp1[lane * 3 + + ctx->sata_param.speed[lane]]); + val = RXTX_REG5_TX_CN2_SET(val, + ctx->sata_param.txprecursor_cn2[lane * 3 + + ctx->sata_param.speed[lane]]); + serdes_wr(ctx, lane, RXTX_REG5, val); + + /* Set TX amplitude value */ + serdes_rd(ctx, lane, RXTX_REG6, &val); + val = RXTX_REG6_TXAMP_CNTL_SET(val, + ctx->sata_param.txamplitude[lane * 3 + + ctx->sata_param.speed[lane]]); + val = RXTX_REG6_TXAMP_ENA_SET(val, 0x1); + val = RXTX_REG6_TX_IDLE_SET(val, 0x0); + val = RXTX_REG6_RX_BIST_RESYNC_SET(val, 0x0); + val = RXTX_REG6_RX_BIST_ERRCNT_RD_SET(val, 0x0); + serdes_wr(ctx, lane, RXTX_REG6, val); + + /* Configure Rx for 20-bits */ + serdes_rd(ctx, lane, RXTX_REG7, &val); + val = RXTX_REG7_BIST_ENA_RX_SET(val, 0x0); + val = RXTX_REG7_RX_WORD_MODE_SET(val, CMU_REG9_WORD_LEN_20BIT); + serdes_wr(ctx, lane, RXTX_REG7, val); + + /* Set CDR and LOS values and enable Rx SSC */ + serdes_rd(ctx, lane, RXTX_REG8, &val); + val = RXTX_REG8_CDR_LOOP_ENA_SET(val, 0x1); + val = RXTX_REG8_CDR_BYPASS_RXLOS_SET(val, 0x0); + val = RXTX_REG8_SSC_ENABLE_SET(val, 0x1); + val = RXTX_REG8_SD_DISABLE_SET(val, 0x0); + val = RXTX_REG8_SD_VREF_SET(val, 0x4); + serdes_wr(ctx, lane, RXTX_REG8, val); + + /* Set phase adjust upper/lower limits */ + serdes_rd(ctx, lane, RXTX_REG11, &val); + val = RXTX_REG11_PHASE_ADJUST_LIMIT_SET(val, 0x0); + serdes_wr(ctx, lane, RXTX_REG11, val); + + /* Enable Latch Off; disable SUMOS and Tx termination */ + serdes_rd(ctx, lane, RXTX_REG12, &val); + val = RXTX_REG12_LATCH_OFF_ENA_SET(val, 0x1); + val = RXTX_REG12_SUMOS_ENABLE_SET(val, 0x0); + val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0x0); + serdes_wr(ctx, lane, RXTX_REG12, val); + + /* Set period error latch to 512T and enable BWL */ + serdes_rd(ctx, lane, RXTX_REG26, &val); + val = RXTX_REG26_PERIOD_ERROR_LATCH_SET(val, 0x0); + val = RXTX_REG26_BLWC_ENA_SET(val, 0x1); + serdes_wr(ctx, lane, RXTX_REG26, val); + + serdes_wr(ctx, lane, RXTX_REG28, 0x0); + + /* Set DFE loop preset value */ + serdes_wr(ctx, lane, RXTX_REG31, 0x0); + + /* Set Eye Monitor counter width to 12-bit */ + serdes_rd(ctx, lane, RXTX_REG61, &val); + val = RXTX_REG61_ISCAN_INBERT_SET(val, 0x1); + val = RXTX_REG61_LOADFREQ_SHIFT_SET(val, 0x0); + val = RXTX_REG61_EYE_COUNT_WIDTH_SEL_SET(val, 0x0); + serdes_wr(ctx, lane, RXTX_REG61, val); + + serdes_rd(ctx, lane, RXTX_REG62, &val); + val = RXTX_REG62_PERIOD_H1_QLATCH_SET(val, 0x0); + serdes_wr(ctx, lane, RXTX_REG62, val); + + /* Set BW select tap X for DFE loop */ + for (i = 0; i < 9; i++) { + reg = RXTX_REG81 + i * 2; + serdes_rd(ctx, lane, reg, &val); + val = RXTX_REG89_MU_TH7_SET(val, 0xe); + val = RXTX_REG89_MU_TH8_SET(val, 0xe); + val = RXTX_REG89_MU_TH9_SET(val, 0xe); + serdes_wr(ctx, lane, reg, val); + } + + /* Set BW select tap X for frequency adjust loop */ + for (i = 0; i < 3; i++) { + reg = RXTX_REG96 + i * 2; + serdes_rd(ctx, lane, reg, &val); + val = RXTX_REG96_MU_FREQ1_SET(val, 0x10); + val = RXTX_REG96_MU_FREQ2_SET(val, 0x10); + val = RXTX_REG96_MU_FREQ3_SET(val, 0x10); + serdes_wr(ctx, lane, reg, val); + } + + /* Set BW select tap X for phase adjust loop */ + for (i = 0; i < 3; i++) { + reg = RXTX_REG99 + i * 2; + serdes_rd(ctx, lane, reg, &val); + val = RXTX_REG99_MU_PHASE1_SET(val, 0x7); + val = RXTX_REG99_MU_PHASE2_SET(val, 0x7); + val = RXTX_REG99_MU_PHASE3_SET(val, 0x7); + serdes_wr(ctx, lane, reg, val); + } + + serdes_rd(ctx, lane, RXTX_REG102, &val); + val = RXTX_REG102_FREQLOOP_LIMIT_SET(val, 0x0); + serdes_wr(ctx, lane, RXTX_REG102, val); + + serdes_wr(ctx, lane, RXTX_REG114, 0xffe0); + + serdes_rd(ctx, lane, RXTX_REG125, &val); + val = RXTX_REG125_SIGN_PQ_SET(val, + ctx->sata_param.txeyedirection[lane * 3 + + ctx->sata_param.speed[lane]]); + val = RXTX_REG125_PQ_REG_SET(val, + ctx->sata_param.txeyetuning[lane * 3 + + ctx->sata_param.speed[lane]]); + val = RXTX_REG125_PHZ_MANUAL_SET(val, 0x1); + serdes_wr(ctx, lane, RXTX_REG125, val); + + serdes_rd(ctx, lane, RXTX_REG127, &val); + val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x0); + serdes_wr(ctx, lane, RXTX_REG127, val); + + serdes_rd(ctx, lane, RXTX_REG128, &val); + val = RXTX_REG128_LATCH_CAL_WAIT_SEL_SET(val, 0x3); + serdes_wr(ctx, lane, RXTX_REG128, val); + + serdes_rd(ctx, lane, RXTX_REG145, &val); + val = RXTX_REG145_RXDFE_CONFIG_SET(val, 0x3); + val = RXTX_REG145_TX_IDLE_SATA_SET(val, 0x0); + if (preA3Chip) { + val = RXTX_REG145_RXES_ENA_SET(val, 0x1); + val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x1); + } else { + val = RXTX_REG145_RXES_ENA_SET(val, 0x0); + val = RXTX_REG145_RXVWES_LATENA_SET(val, 0x0); + } + serdes_wr(ctx, lane, RXTX_REG145, val); + + /* + * Set Rx LOS filter clock rate, sample rate, and threshold + * windows + */ + for (i = 0; i < 4; i++) { + reg = RXTX_REG148 + i * 2; + serdes_wr(ctx, lane, reg, 0xFFFF); + } + } +} + +static int xgene_phy_cal_rdy_chk(struct xgene_phy_ctx *ctx, + enum cmu_type_t cmu_type, + enum clk_type_t clk_type) +{ + void __iomem *csr_serdes = ctx->sds_base; + int loop; + u32 val; + + /* Release PHY main reset */ + writel(0xdf, csr_serdes + SATA_ENET_SDS_RST_CTL); + readl(csr_serdes + SATA_ENET_SDS_RST_CTL); /* Force a barrier */ + + if (cmu_type != REF_CMU) { + cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); + /* + * As per PHY design spec, the PLL reset requires a minimum + * of 800us. + */ + usleep_range(800, 1000); + + cmu_rd(ctx, cmu_type, CMU_REG1, &val); + val = CMU_REG1_PLL_MANUALCAL_SET(val, 0x0); + cmu_wr(ctx, cmu_type, CMU_REG1, val); + /* + * As per PHY design spec, the PLL auto calibration requires + * a minimum of 800us. + */ + usleep_range(800, 1000); + + cmu_toggle1to0(ctx, cmu_type, CMU_REG32, + CMU_REG32_FORCE_VCOCAL_START_MASK); + /* + * As per PHY design spec, the PLL requires a minimum of + * 800us to settle. + */ + usleep_range(800, 1000); + } + + if (!preA3Chip) + goto skip_manual_cal; + + /* + * Configure the termination resister calibration + * The serial receive pins, RXP/RXN, have TERMination resistor + * that is required to be calibrated. + */ + cmu_rd(ctx, cmu_type, CMU_REG17, &val); + val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x12); + val = CMU_REG17_RESERVED_7_SET(val, 0x0); + cmu_wr(ctx, cmu_type, CMU_REG17, val); + cmu_toggle1to0(ctx, cmu_type, CMU_REG17, + CMU_REG17_PVT_TERM_MAN_ENA_MASK); + /* + * The serial transmit pins, TXP/TXN, have Pull-UP and Pull-DOWN + * resistors that are required to the calibrated. + * Configure the pull DOWN calibration + */ + cmu_rd(ctx, cmu_type, CMU_REG17, &val); + val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x29); + val = CMU_REG17_RESERVED_7_SET(val, 0x0); + cmu_wr(ctx, cmu_type, CMU_REG17, val); + cmu_toggle1to0(ctx, cmu_type, CMU_REG16, + CMU_REG16_PVT_DN_MAN_ENA_MASK); + /* Configure the pull UP calibration */ + cmu_rd(ctx, cmu_type, CMU_REG17, &val); + val = CMU_REG17_PVT_CODE_R2A_SET(val, 0x28); + val = CMU_REG17_RESERVED_7_SET(val, 0x0); + cmu_wr(ctx, cmu_type, CMU_REG17, val); + cmu_toggle1to0(ctx, cmu_type, CMU_REG16, + CMU_REG16_PVT_UP_MAN_ENA_MASK); + +skip_manual_cal: + /* Poll the PLL calibration completion status for at least 1 ms */ + loop = 100; + do { + cmu_rd(ctx, cmu_type, CMU_REG7, &val); + if (CMU_REG7_PLL_CALIB_DONE_RD(val)) + break; + /* + * As per PHY design spec, PLL calibration status requires + * a minimum of 10us to be updated. + */ + usleep_range(10, 100); + } while (--loop > 0); + + cmu_rd(ctx, cmu_type, CMU_REG7, &val); + dev_dbg(ctx->dev, "PLL calibration %s\n", + CMU_REG7_PLL_CALIB_DONE_RD(val) ? "done" : "failed"); + if (CMU_REG7_VCO_CAL_FAIL_RD(val)) { + dev_err(ctx->dev, + "PLL calibration failed due to VCO failure\n"); + return -1; + } + dev_dbg(ctx->dev, "PLL calibration successful\n"); + + cmu_rd(ctx, cmu_type, CMU_REG15, &val); + dev_dbg(ctx->dev, "PHY Tx is %sready\n", val & 0x300 ? "" : "not "); + return 0; +} + +static void xgene_phy_pdwn_force_vco(struct xgene_phy_ctx *ctx, + enum cmu_type_t cmu_type, + enum clk_type_t clk_type) +{ + u32 val; + + dev_dbg(ctx->dev, "Reset VCO and re-start again\n"); + if (cmu_type == PHY_CMU) { + cmu_rd(ctx, cmu_type, CMU_REG16, &val); + val = CMU_REG16_VCOCAL_WAIT_BTW_CODE_SET(val, 0x7); + cmu_wr(ctx, cmu_type, CMU_REG16, val); + } + + cmu_toggle1to0(ctx, cmu_type, CMU_REG0, CMU_REG0_PDOWN_MASK); + cmu_toggle1to0(ctx, cmu_type, CMU_REG32, + CMU_REG32_FORCE_VCOCAL_START_MASK); +} + +static int xgene_phy_hw_init_sata(struct xgene_phy_ctx *ctx, + enum clk_type_t clk_type, int ssc_enable) +{ + void __iomem *sds_base = ctx->sds_base; + u32 val; + int i; + + /* Configure the PHY for operation */ + dev_dbg(ctx->dev, "Reset PHY\n"); + /* Place PHY into reset */ + writel(0x0, sds_base + SATA_ENET_SDS_RST_CTL); + val = readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */ + /* Release PHY lane from reset (active high) */ + writel(0x20, sds_base + SATA_ENET_SDS_RST_CTL); + readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */ + /* Release all PHY module out of reset except PHY main reset */ + writel(0xde, sds_base + SATA_ENET_SDS_RST_CTL); + readl(sds_base + SATA_ENET_SDS_RST_CTL); /* Force a barrier */ + + /* Set the operation speed */ + val = readl(sds_base + SATA_ENET_SDS_CTL1); + val = CFG_I_SPD_SEL_CDR_OVR1_SET(val, + ctx->sata_param.txspeed[ctx->sata_param.speed[0]]); + writel(val, sds_base + SATA_ENET_SDS_CTL1); + + dev_dbg(ctx->dev, "Set the customer pin mode to SATA\n"); + val = readl(sds_base + SATA_ENET_SDS_CTL0); + val = REGSPEC_CFG_I_CUSTOMER_PIN_MODE0_SET(val, 0x4421); + writel(val, sds_base + SATA_ENET_SDS_CTL0); + + /* Configure the clock macro unit (CMU) clock type */ + xgene_phy_cfg_cmu_clk_type(ctx, PHY_CMU, clk_type); + + /* Configure the clock macro */ + xgene_phy_sata_cfg_cmu_core(ctx, PHY_CMU, clk_type); + + /* Enable SSC if enabled */ + if (ssc_enable) + xgene_phy_ssc_enable(ctx, PHY_CMU); + + /* Configure PHY lanes */ + xgene_phy_sata_cfg_lanes(ctx); + + /* Set Rx/Tx 20-bit */ + val = readl(sds_base + SATA_ENET_SDS_PCS_CTL0); + val = REGSPEC_CFG_I_RX_WORDMODE0_SET(val, 0x3); + val = REGSPEC_CFG_I_TX_WORDMODE0_SET(val, 0x3); + writel(val, sds_base + SATA_ENET_SDS_PCS_CTL0); + + /* Start PLL calibration and try for three times */ + i = 10; + do { + if (!xgene_phy_cal_rdy_chk(ctx, PHY_CMU, clk_type)) + break; + /* If failed, toggle the VCO power signal and start again */ + xgene_phy_pdwn_force_vco(ctx, PHY_CMU, clk_type); + } while (--i > 0); + /* Even on failure, allow to continue any way */ + if (i <= 0) + dev_err(ctx->dev, "PLL calibration failed\n"); + + return 0; +} + +static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx, + enum clk_type_t clk_type, + int ssc_enable) +{ + int rc; + + dev_dbg(ctx->dev, "PHY init clk type %d\n", clk_type); + + if (ctx->mode == MODE_SATA) { + rc = xgene_phy_hw_init_sata(ctx, clk_type, ssc_enable); + if (rc) + return rc; + } else { + dev_err(ctx->dev, "Un-supported customer pin mode %d\n", + ctx->mode); + return -ENODEV; + } + + return 0; +} + +/* + * Receiver Offset Calibration: + * + * Calibrate the receiver signal path offset in two steps - summar and + * latch calibrations + */ +static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane) +{ + int i; + struct { + u32 reg; + u32 val; + } serdes_reg[] = { + {RXTX_REG38, 0x0}, + {RXTX_REG39, 0xff00}, + {RXTX_REG40, 0xffff}, + {RXTX_REG41, 0xffff}, + {RXTX_REG42, 0xffff}, + {RXTX_REG43, 0xffff}, + {RXTX_REG44, 0xffff}, + {RXTX_REG45, 0xffff}, + {RXTX_REG46, 0xffff}, + {RXTX_REG47, 0xfffc}, + {RXTX_REG48, 0x0}, + {RXTX_REG49, 0x0}, + {RXTX_REG50, 0x0}, + {RXTX_REG51, 0x0}, + {RXTX_REG52, 0x0}, + {RXTX_REG53, 0x0}, + {RXTX_REG54, 0x0}, + {RXTX_REG55, 0x0}, + }; + + /* Start SUMMER calibration */ + serdes_setbits(ctx, lane, RXTX_REG127, + RXTX_REG127_FORCE_SUM_CAL_START_MASK); + /* + * As per PHY design spec, the Summer calibration requires a minimum + * of 100us to complete. + */ + usleep_range(100, 500); + serdes_clrbits(ctx, lane, RXTX_REG127, + RXTX_REG127_FORCE_SUM_CAL_START_MASK); + /* + * As per PHY design spec, the auto calibration requires a minimum + * of 100us to complete. + */ + usleep_range(100, 500); + + /* Start latch calibration */ + serdes_setbits(ctx, lane, RXTX_REG127, + RXTX_REG127_FORCE_LAT_CAL_START_MASK); + /* + * As per PHY design spec, the latch calibration requires a minimum + * of 100us to complete. + */ + usleep_range(100, 500); + serdes_clrbits(ctx, lane, RXTX_REG127, + RXTX_REG127_FORCE_LAT_CAL_START_MASK); + + /* Configure the PHY lane for calibration */ + serdes_wr(ctx, lane, RXTX_REG28, 0x7); + serdes_wr(ctx, lane, RXTX_REG31, 0x7e00); + serdes_clrbits(ctx, lane, RXTX_REG4, + RXTX_REG4_TX_LOOPBACK_BUF_EN_MASK); + serdes_clrbits(ctx, lane, RXTX_REG7, + RXTX_REG7_LOOP_BACK_ENA_CTLE_MASK); + for (i = 0; i < ARRAY_SIZE(serdes_reg); i++) + serdes_wr(ctx, lane, serdes_reg[i].reg, + serdes_reg[i].val); +} + +static void xgene_phy_reset_rxd(struct xgene_phy_ctx *ctx, int lane) +{ + /* Reset digital Rx */ + serdes_clrbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK); + /* As per PHY design spec, the reset requires a minimum of 100us. */ + usleep_range(100, 150); + serdes_setbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK); +} + +static int xgene_phy_get_avg(int accum, int samples) +{ + return (accum + (samples / 2)) / samples; +} + +static void xgene_phy_gen_avg_val(struct xgene_phy_ctx *ctx, int lane) +{ + int max_loop = 10; + int avg_loop = 0; + int lat_do = 0, lat_xo = 0, lat_eo = 0, lat_so = 0; + int lat_de = 0, lat_xe = 0, lat_ee = 0, lat_se = 0; + int sum_cal = 0; + int lat_do_itr, lat_xo_itr, lat_eo_itr, lat_so_itr; + int lat_de_itr, lat_xe_itr, lat_ee_itr, lat_se_itr; + int sum_cal_itr; + int fail_even; + int fail_odd; + u32 val; + + dev_dbg(ctx->dev, "Generating avg calibration value for lane %d\n", + lane); + + /* Enable RX Hi-Z termination */ + serdes_setbits(ctx, lane, RXTX_REG12, + RXTX_REG12_RX_DET_TERM_ENABLE_MASK); + /* Turn off DFE */ + serdes_wr(ctx, lane, RXTX_REG28, 0x0000); + /* DFE Presets to zero */ + serdes_wr(ctx, lane, RXTX_REG31, 0x0000); + + /* + * Receiver Offset Calibration: + * Calibrate the receiver signal path offset in two steps - summar + * and latch calibration. + * Runs the "Receiver Offset Calibration multiple times to determine + * the average value to use. + */ + while (avg_loop < max_loop) { + /* Start the calibration */ + xgene_phy_force_lat_summer_cal(ctx, lane); + + serdes_rd(ctx, lane, RXTX_REG21, &val); + lat_do_itr = RXTX_REG21_DO_LATCH_CALOUT_RD(val); + lat_xo_itr = RXTX_REG21_XO_LATCH_CALOUT_RD(val); + fail_odd = RXTX_REG21_LATCH_CAL_FAIL_ODD_RD(val); + + serdes_rd(ctx, lane, RXTX_REG22, &val); + lat_eo_itr = RXTX_REG22_EO_LATCH_CALOUT_RD(val); + lat_so_itr = RXTX_REG22_SO_LATCH_CALOUT_RD(val); + fail_even = RXTX_REG22_LATCH_CAL_FAIL_EVEN_RD(val); + + serdes_rd(ctx, lane, RXTX_REG23, &val); + lat_de_itr = RXTX_REG23_DE_LATCH_CALOUT_RD(val); + lat_xe_itr = RXTX_REG23_XE_LATCH_CALOUT_RD(val); + + serdes_rd(ctx, lane, RXTX_REG24, &val); + lat_ee_itr = RXTX_REG24_EE_LATCH_CALOUT_RD(val); + lat_se_itr = RXTX_REG24_SE_LATCH_CALOUT_RD(val); + + serdes_rd(ctx, lane, RXTX_REG121, &val); + sum_cal_itr = RXTX_REG121_SUMOS_CAL_CODE_RD(val); + + /* Check for failure. If passed, sum them for averaging */ + if ((fail_even == 0 || fail_even == 1) && + (fail_odd == 0 || fail_odd == 1)) { + lat_do += lat_do_itr; + lat_xo += lat_xo_itr; + lat_eo += lat_eo_itr; + lat_so += lat_so_itr; + lat_de += lat_de_itr; + lat_xe += lat_xe_itr; + lat_ee += lat_ee_itr; + lat_se += lat_se_itr; + sum_cal += sum_cal_itr; + + dev_dbg(ctx->dev, "Iteration %d:\n", avg_loop); + dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n", + lat_do_itr, lat_xo_itr, lat_eo_itr, + lat_so_itr); + dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n", + lat_de_itr, lat_xe_itr, lat_ee_itr, + lat_se_itr); + dev_dbg(ctx->dev, "SUM 0x%x\n", sum_cal_itr); + ++avg_loop; + } else { + dev_err(ctx->dev, + "Receiver calibration failed at %d loop\n", + avg_loop); + } + xgene_phy_reset_rxd(ctx, lane); + } + + /* Update latch manual calibration with average value */ + serdes_rd(ctx, lane, RXTX_REG127, &val); + val = RXTX_REG127_DO_LATCH_MANCAL_SET(val, + xgene_phy_get_avg(lat_do, max_loop)); + val = RXTX_REG127_XO_LATCH_MANCAL_SET(val, + xgene_phy_get_avg(lat_xo, max_loop)); + serdes_wr(ctx, lane, RXTX_REG127, val); + + serdes_rd(ctx, lane, RXTX_REG128, &val); + val = RXTX_REG128_EO_LATCH_MANCAL_SET(val, + xgene_phy_get_avg(lat_eo, max_loop)); + val = RXTX_REG128_SO_LATCH_MANCAL_SET(val, + xgene_phy_get_avg(lat_so, max_loop)); + serdes_wr(ctx, lane, RXTX_REG128, val); + + serdes_rd(ctx, lane, RXTX_REG129, &val); + val = RXTX_REG129_DE_LATCH_MANCAL_SET(val, + xgene_phy_get_avg(lat_de, max_loop)); + val = RXTX_REG129_XE_LATCH_MANCAL_SET(val, + xgene_phy_get_avg(lat_xe, max_loop)); + serdes_wr(ctx, lane, RXTX_REG129, val); + + serdes_rd(ctx, lane, RXTX_REG130, &val); + val = RXTX_REG130_EE_LATCH_MANCAL_SET(val, + xgene_phy_get_avg(lat_ee, max_loop)); + val = RXTX_REG130_SE_LATCH_MANCAL_SET(val, + xgene_phy_get_avg(lat_se, max_loop)); + serdes_wr(ctx, lane, RXTX_REG130, val); + + /* Update SUMMER calibration with average value */ + serdes_rd(ctx, lane, RXTX_REG14, &val); + val = RXTX_REG14_CLTE_LATCAL_MAN_PROG_SET(val, + xgene_phy_get_avg(sum_cal, max_loop)); + serdes_wr(ctx, lane, RXTX_REG14, val); + + dev_dbg(ctx->dev, "Average Value:\n"); + dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n", + xgene_phy_get_avg(lat_do, max_loop), + xgene_phy_get_avg(lat_xo, max_loop), + xgene_phy_get_avg(lat_eo, max_loop), + xgene_phy_get_avg(lat_so, max_loop)); + dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n", + xgene_phy_get_avg(lat_de, max_loop), + xgene_phy_get_avg(lat_xe, max_loop), + xgene_phy_get_avg(lat_ee, max_loop), + xgene_phy_get_avg(lat_se, max_loop)); + dev_dbg(ctx->dev, "SUM 0x%x\n", + xgene_phy_get_avg(sum_cal, max_loop)); + + serdes_rd(ctx, lane, RXTX_REG14, &val); + val = RXTX_REG14_CTLE_LATCAL_MAN_ENA_SET(val, 0x1); + serdes_wr(ctx, lane, RXTX_REG14, val); + dev_dbg(ctx->dev, "Enable Manual Summer calibration\n"); + + serdes_rd(ctx, lane, RXTX_REG127, &val); + val = RXTX_REG127_LATCH_MAN_CAL_ENA_SET(val, 0x1); + dev_dbg(ctx->dev, "Enable Manual Latch calibration\n"); + serdes_wr(ctx, lane, RXTX_REG127, val); + + /* Disable RX Hi-Z termination */ + serdes_rd(ctx, lane, RXTX_REG12, &val); + val = RXTX_REG12_RX_DET_TERM_ENABLE_SET(val, 0); + serdes_wr(ctx, lane, RXTX_REG12, val); + /* Turn on DFE */ + serdes_wr(ctx, lane, RXTX_REG28, 0x0007); + /* Set DFE preset */ + serdes_wr(ctx, lane, RXTX_REG31, 0x7e00); +} + +static int xgene_phy_hw_init(struct phy *phy) +{ + struct xgene_phy_ctx *ctx = phy_get_drvdata(phy); + int rc; + int i; + + rc = xgene_phy_hw_initialize(ctx, CLK_EXT_DIFF, SSC_DISABLE); + if (rc) { + dev_err(ctx->dev, "PHY initialize failed %d\n", rc); + return rc; + } + + /* Setup clock properly after PHY configuration */ + if (!IS_ERR(ctx->clk)) { + /* HW requires an toggle of the clock */ + clk_prepare_enable(ctx->clk); + clk_disable_unprepare(ctx->clk); + clk_prepare_enable(ctx->clk); + } + + /* Compute average value */ + for (i = 0; i < MAX_LANE; i++) + xgene_phy_gen_avg_val(ctx, i); + + dev_dbg(ctx->dev, "PHY initialized\n"); + return 0; +} + +static const struct phy_ops xgene_phy_ops = { + .init = xgene_phy_hw_init, + .owner = THIS_MODULE, +}; + +static struct phy *xgene_phy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct xgene_phy_ctx *ctx = dev_get_drvdata(dev); + + if (args->args_count <= 0) + return ERR_PTR(-EINVAL); + if (args->args[0] < MODE_SATA || args->args[0] >= MODE_MAX) + return ERR_PTR(-EINVAL); + + ctx->mode = args->args[0]; + return ctx->phy; +} + +static void xgene_phy_get_param(struct platform_device *pdev, + const char *name, u32 *buffer, + int count, u32 *default_val, + u32 conv_factor) +{ + int i; + + if (!of_property_read_u32_array(pdev->dev.of_node, name, buffer, + count)) { + for (i = 0; i < count; i++) + buffer[i] /= conv_factor; + return; + } + /* Does not exist, load default */ + for (i = 0; i < count; i++) + buffer[i] = default_val[i % 3]; +} + +static int xgene_phy_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + struct xgene_phy_ctx *ctx; + struct resource *res; + int rc = 0; + u32 default_spd[] = DEFAULT_SATA_SPD_SEL; + u32 default_txboost_gain[] = DEFAULT_SATA_TXBOOST_GAIN; + u32 default_txeye_direction[] = DEFAULT_SATA_TXEYEDIRECTION; + u32 default_txeye_tuning[] = DEFAULT_SATA_TXEYETUNING; + u32 default_txamp[] = DEFAULT_SATA_TXAMP; + u32 default_txcn1[] = DEFAULT_SATA_TXCN1; + u32 default_txcn2[] = DEFAULT_SATA_TXCN2; + u32 default_txcp1[] = DEFAULT_SATA_TXCP1; + int i; + + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->sds_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctx->sds_base)) { + rc = PTR_ERR(ctx->sds_base); + goto error; + } + + /* Retrieve optional clock */ + ctx->clk = clk_get(&pdev->dev, NULL); + + /* Load override paramaters */ + xgene_phy_get_param(pdev, "apm,tx-eye-tuning", + ctx->sata_param.txeyetuning, 6, default_txeye_tuning, 1); + xgene_phy_get_param(pdev, "apm,tx-eye-direction", + ctx->sata_param.txeyedirection, 6, default_txeye_direction, 1); + xgene_phy_get_param(pdev, "apm,tx-boost-gain", + ctx->sata_param.txboostgain, 6, default_txboost_gain, 1); + xgene_phy_get_param(pdev, "apm,tx-amplitude", + ctx->sata_param.txamplitude, 6, default_txamp, 13300); + xgene_phy_get_param(pdev, "apm,tx-pre-cursor1", + ctx->sata_param.txprecursor_cn1, 6, default_txcn1, 18200); + xgene_phy_get_param(pdev, "apm,tx-pre-cursor2", + ctx->sata_param.txprecursor_cn2, 6, default_txcn2, 18200); + xgene_phy_get_param(pdev, "apm,tx-post-cursor", + ctx->sata_param.txpostcursor_cp1, 6, default_txcp1, 18200); + xgene_phy_get_param(pdev, "apm,tx-speed", + ctx->sata_param.txspeed, 3, default_spd, 1); + for (i = 0; i < MAX_LANE; i++) + ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ + + ctx->dev = &pdev->dev; + platform_set_drvdata(pdev, ctx); + + ctx->phy = devm_phy_create(ctx->dev, &xgene_phy_ops, NULL); + if (IS_ERR(ctx->phy)) { + dev_dbg(&pdev->dev, "Failed to create PHY\n"); + rc = PTR_ERR(ctx->phy); + goto error; + } + phy_set_drvdata(ctx->phy, ctx); + + phy_provider = devm_of_phy_provider_register(ctx->dev, + xgene_phy_xlate); + if (IS_ERR(phy_provider)) { + rc = PTR_ERR(phy_provider); + goto error; + } + + return 0; + +error: + return rc; +} + +static const struct of_device_id xgene_phy_of_match[] = { + {.compatible = "apm,xgene-phy",}, + {}, +}; +MODULE_DEVICE_TABLE(of, xgene_phy_of_match); + +static struct platform_driver xgene_phy_driver = { + .probe = xgene_phy_probe, + .driver = { + .name = "xgene-phy", + .owner = THIS_MODULE, + .of_match_table = xgene_phy_of_match, + }, +}; +module_platform_driver(xgene_phy_driver); + +MODULE_DESCRIPTION("APM X-Gene Multi-Purpose PHY driver"); +MODULE_AUTHOR("Loc Ho "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.1"); --- linux-3.13.0.orig/drivers/pinctrl/core.c +++ linux-3.13.0/drivers/pinctrl/core.c @@ -851,7 +851,9 @@ kref_init(&p->users); /* Add the pinctrl handle to the global list */ + mutex_lock(&pinctrl_list_mutex); list_add_tail(&p->node, &pinctrl_list); + mutex_unlock(&pinctrl_list_mutex); return p; } @@ -1808,14 +1810,15 @@ if (pctldev == NULL) return; - mutex_lock(&pinctrldev_list_mutex); mutex_lock(&pctldev->mutex); - pinctrl_remove_device_debugfs(pctldev); + mutex_unlock(&pctldev->mutex); if (!IS_ERR(pctldev->p)) pinctrl_put(pctldev->p); + mutex_lock(&pinctrldev_list_mutex); + mutex_lock(&pctldev->mutex); /* TODO: check that no pinmuxes are still active? */ list_del(&pctldev->node); /* Destroy descriptor tree */ --- linux-3.13.0.orig/drivers/pinctrl/pinctrl-at91.c +++ linux-3.13.0/drivers/pinctrl/pinctrl-at91.c @@ -1260,22 +1260,22 @@ switch (type) { case IRQ_TYPE_EDGE_RISING: - irq_set_handler(d->irq, handle_simple_irq); + __irq_set_handler_locked(d->irq, handle_simple_irq); writel_relaxed(mask, pio + PIO_ESR); writel_relaxed(mask, pio + PIO_REHLSR); break; case IRQ_TYPE_EDGE_FALLING: - irq_set_handler(d->irq, handle_simple_irq); + __irq_set_handler_locked(d->irq, handle_simple_irq); writel_relaxed(mask, pio + PIO_ESR); writel_relaxed(mask, pio + PIO_FELLSR); break; case IRQ_TYPE_LEVEL_LOW: - irq_set_handler(d->irq, handle_level_irq); + __irq_set_handler_locked(d->irq, handle_level_irq); writel_relaxed(mask, pio + PIO_LSR); writel_relaxed(mask, pio + PIO_FELLSR); break; case IRQ_TYPE_LEVEL_HIGH: - irq_set_handler(d->irq, handle_level_irq); + __irq_set_handler_locked(d->irq, handle_level_irq); writel_relaxed(mask, pio + PIO_LSR); writel_relaxed(mask, pio + PIO_REHLSR); break; @@ -1284,7 +1284,7 @@ * disable additional interrupt modes: * fall back to default behavior */ - irq_set_handler(d->irq, handle_simple_irq); + __irq_set_handler_locked(d->irq, handle_simple_irq); writel_relaxed(mask, pio + PIO_AIMDR); return 0; case IRQ_TYPE_NONE: --- linux-3.13.0.orig/drivers/pinctrl/pinctrl-baytrail.c +++ linux-3.13.0/drivers/pinctrl/pinctrl-baytrail.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include @@ -485,9 +484,6 @@ irq_set_handler_data(hwirq, vg); irq_set_chained_handler(hwirq, byt_gpio_irq_handler); - - /* Register interrupt handlers for gpio signaled acpi events */ - acpi_gpiochip_request_interrupts(gc); } pm_runtime_enable(dev); --- linux-3.13.0.orig/drivers/pinctrl/pinctrl-imx.c +++ linux-3.13.0/drivers/pinctrl/pinctrl-imx.c @@ -365,7 +365,7 @@ const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id]; unsigned long config; - if (!pin_reg || !pin_reg->conf_reg) { + if (!pin_reg || pin_reg->conf_reg == -1) { seq_printf(s, "N/A"); return; } --- linux-3.13.0.orig/drivers/pinctrl/pinctrl-imx1-core.c +++ linux-3.13.0/drivers/pinctrl/pinctrl-imx1-core.c @@ -45,7 +45,7 @@ #define MX1_DDIR 0x00 #define MX1_OCR 0x04 #define MX1_ICONFA 0x0c -#define MX1_ICONFB 0x10 +#define MX1_ICONFB 0x14 #define MX1_GIUS 0x20 #define MX1_GPR 0x38 #define MX1_PUEN 0x40 @@ -97,13 +97,13 @@ u32 old_val; u32 new_val; - dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n", - reg, offset, value); - /* Use the next register if the pin's port pin number is >=16 */ if (pin_id % 32 >= 16) reg += 0x04; + dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n", + reg, offset, value); + /* Get current state of pins */ old_val = readl(reg); old_val &= mask; @@ -139,7 +139,7 @@ u32 reg_offset) { void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset; - int offset = pin_id % 16; + int offset = (pin_id % 16) * 2; /* Use the next register if the pin's port pin number is >=16 */ if (pin_id % 32 >= 16) --- linux-3.13.0.orig/drivers/pinctrl/pinctrl-sunxi.c +++ linux-3.13.0/drivers/pinctrl/pinctrl-sunxi.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -469,12 +470,6 @@ return val; } -static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - return pinctrl_gpio_direction_output(chip->base + offset); -} - static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { @@ -498,6 +493,13 @@ spin_unlock_irqrestore(&pctl->lock, flags); } +static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int value) +{ + sunxi_pinctrl_gpio_set(chip, offset, value); + return pinctrl_gpio_direction_output(chip->base + offset); +} + static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags) @@ -664,6 +666,7 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_get_chip(irq); struct sunxi_pinctrl *pctl = irq_get_handler_data(irq); const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG); @@ -673,10 +676,12 @@ if (reg) { int irqoffset; + chained_irq_enter(chip, desc); for_each_set_bit(irqoffset, ®, SUNXI_IRQ_NUMBER) { int pin_irq = irq_find_mapping(pctl->domain, irqoffset); generic_handle_irq(pin_irq); } + chained_irq_exit(chip, desc); } } --- linux-3.13.0.orig/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ linux-3.13.0/drivers/pinctrl/vt8500/pinctrl-wmt.c @@ -276,7 +276,20 @@ if (!configs) return -ENOMEM; - configs[0] = pull; + switch (pull) { + case 0: + configs[0] = PIN_CONFIG_BIAS_DISABLE; + break; + case 1: + configs[0] = PIN_CONFIG_BIAS_PULL_DOWN; + break; + case 2: + configs[0] = PIN_CONFIG_BIAS_PULL_UP; + break; + default: + configs[0] = PIN_CONFIG_BIAS_DISABLE; + dev_err(data->dev, "invalid pull state %d - disabling\n", pull); + } map->type = PIN_MAP_TYPE_CONFIGS_PIN; map->data.configs.group_or_pin = data->groups[group]; --- linux-3.13.0.orig/drivers/platform/x86/Kconfig +++ linux-3.13.0/drivers/platform/x86/Kconfig @@ -197,6 +197,17 @@ To compile this driver as a module, choose M here: the module will be called hp_accel. +config HP_WIRELESS + tristate "HP wireless button" + depends on ACPI + depends on INPUT + help + This driver provides supports for new HP wireless button for Windows 8. + On such systems the driver should load automatically (via ACPI alias). + + To compile this driver as a module, choose M here: the module will + be called hp-wireless. + config HP_WMI tristate "HP WMI extras" depends on ACPI_WMI --- linux-3.13.0.orig/drivers/platform/x86/Makefile +++ linux-3.13.0/drivers/platform/x86/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_ACER_WMI) += acer-wmi.o obj-$(CONFIG_ACERHDF) += acerhdf.o obj-$(CONFIG_HP_ACCEL) += hp_accel.o +obj-$(CONFIG_HP_WIRELESS) += hp-wireless.o obj-$(CONFIG_HP_WMI) += hp-wmi.o obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o --- linux-3.13.0.orig/drivers/platform/x86/acer-wmi.c +++ linux-3.13.0/drivers/platform/x86/acer-wmi.c @@ -572,6 +572,25 @@ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"), }, }, + { + .callback = video_set_backlight_video_vendor, + .ident = "Acer Aspire 5741", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"), + }, + }, + { + /* + * Note no video_set_backlight_video_vendor, we must use the + * acer interface, as there is no native backlight interface. + */ + .ident = "Acer KAV80", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"), + }, + }, {} }; --- linux-3.13.0.orig/drivers/platform/x86/dell-laptop.c +++ linux-3.13.0/drivers/platform/x86/dell-laptop.c @@ -559,6 +559,29 @@ } static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); +static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, + struct serio *port) +{ + static bool extended; + + if (str & 0x20) + return false; + + if (unlikely(data == 0xe0)) { + extended = true; + return false; + } else if (unlikely(extended)) { + switch (data) { + case 0x8: + schedule_delayed_work(&dell_rfkill_work, + round_jiffies_relative(HZ / 4)); + break; + } + extended = false; + } + + return false; +} static int __init dell_setup_rfkill(void) { @@ -633,7 +656,16 @@ goto err_wwan; } + ret = i8042_install_filter(dell_laptop_i8042_filter); + if (ret) { + pr_warn("Unable to install key filter\n"); + goto err_filter; + } + return 0; +err_filter: + if (wwan_rfkill) + rfkill_unregister(wwan_rfkill); err_wwan: rfkill_destroy(wwan_rfkill); if (bluetooth_rfkill) @@ -755,30 +787,6 @@ led_classdev_unregister(&touchpad_led); } -static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, - struct serio *port) -{ - static bool extended; - - if (str & 0x20) - return false; - - if (unlikely(data == 0xe0)) { - extended = true; - return false; - } else if (unlikely(extended)) { - switch (data) { - case 0x8: - schedule_delayed_work(&dell_rfkill_work, - round_jiffies_relative(HZ / 4)); - break; - } - extended = false; - } - - return false; -} - static int __init dell_init(void) { int max_intensity = 0; @@ -828,12 +836,6 @@ goto fail_rfkill; } - ret = i8042_install_filter(dell_laptop_i8042_filter); - if (ret) { - pr_warn("Unable to install key filter\n"); - goto fail_filter; - } - if (quirks && quirks->touchpad_led) touchpad_led_init(&platform_device->dev); @@ -885,7 +887,6 @@ fail_backlight: i8042_remove_filter(dell_laptop_i8042_filter); cancel_delayed_work_sync(&dell_rfkill_work); -fail_filter: dell_cleanup_rfkill(); fail_rfkill: free_page((unsigned long)bufferpage); --- linux-3.13.0.orig/drivers/platform/x86/dell-wmi.c +++ linux-3.13.0/drivers/platform/x86/dell-wmi.c @@ -166,7 +166,7 @@ u16 *buffer_entry = (u16 *)obj->buffer.pointer; if (dell_new_hk_type && (buffer_entry[1] != 0x10)) { - pr_info("Received unknown WMI event (0x%x)\n", + pr_debug("Received unknown WMI event (0x%x)\n", buffer_entry[1]); kfree(obj); return; --- linux-3.13.0.orig/drivers/platform/x86/hp-wireless.c +++ linux-3.13.0/drivers/platform/x86/hp-wireless.c @@ -0,0 +1,132 @@ +/* + * hp-wireless button for Windows 8 + * + * Copyright (C) 2014 Alex Hung + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alex Hung"); +MODULE_ALIAS("acpi*:HPQ6001:*"); + +static struct input_dev *hpwl_input_dev; + +static const struct acpi_device_id hpwl_ids[] = { + {"HPQ6001", 0}, + {"", 0}, +}; + +static int hp_wireless_input_setup(void) +{ + int err; + + hpwl_input_dev = input_allocate_device(); + if (!hpwl_input_dev) + return -ENOMEM; + + hpwl_input_dev->name = "HP Wireless hotkeys"; + hpwl_input_dev->phys = "hpq6001/input0"; + hpwl_input_dev->id.bustype = BUS_HOST; + hpwl_input_dev->evbit[0] = BIT(EV_KEY); + set_bit(KEY_RFKILL, hpwl_input_dev->keybit); + + err = input_register_device(hpwl_input_dev); + if (err) + goto err_free_dev; + + return 0; + +err_free_dev: + input_free_device(hpwl_input_dev); + return err; +} + +static void hp_wireless_input_destroy(void) +{ + input_unregister_device(hpwl_input_dev); +} + +static void hpwl_notify(struct acpi_device *acpi_dev, u32 event) +{ + if (event != 0x80) { + pr_info("Received unknown event (0x%x)\n", event); + return; + } + + input_report_key(hpwl_input_dev, KEY_RFKILL, 1); + input_sync(hpwl_input_dev); + input_report_key(hpwl_input_dev, KEY_RFKILL, 0); + input_sync(hpwl_input_dev); +} + +static int hpwl_add(struct acpi_device *device) +{ + int err; + + err = hp_wireless_input_setup(); + return err; +} + +static int hpwl_remove(struct acpi_device *device) +{ + hp_wireless_input_destroy(); + return 0; +} + +static struct acpi_driver hpwl_driver = { + .name = "hp-wireless", + .owner = THIS_MODULE, + .ids = hpwl_ids, + .ops = { + .add = hpwl_add, + .remove = hpwl_remove, + .notify = hpwl_notify, + }, +}; + +static int __init hpwl_init(void) +{ + int err; + + pr_info("Initializing HPQ6001 module\n"); + err = acpi_bus_register_driver(&hpwl_driver); + if (err) { + pr_err("Unable to register HP wireless control driver.\n"); + goto error_acpi_register; + } + + return 0; + +error_acpi_register: + return err; +} + +static void __exit hpwl_exit(void) +{ + pr_info("Exiting HPQ6001 module\n"); + acpi_bus_unregister_driver(&hpwl_driver); +} + +module_init(hpwl_init); +module_exit(hpwl_exit); --- linux-3.13.0.orig/drivers/platform/x86/hp_accel.c +++ linux-3.13.0/drivers/platform/x86/hp_accel.c @@ -77,6 +77,7 @@ static struct acpi_device_id lis3lv02d_device_ids[] = { {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */ {"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */ + {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */ {"", 0}, }; MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids); @@ -236,6 +237,7 @@ AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap), AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap), AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted), + AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted), { NULL, } /* Laptop models without axis info (yet): * "NC6910" "HP Compaq 6910" --- linux-3.13.0.orig/drivers/platform/x86/intel_ips.c +++ linux-3.13.0/drivers/platform/x86/intel_ips.c @@ -74,6 +74,7 @@ #include #include #include +#include #include #include #include "intel_ips.h" @@ -250,6 +251,8 @@ static const int IPS_ADJUST_PERIOD = 5000; /* ms */ static bool late_i915_load = false; +int i915_bdw_enabled = 0; +EXPORT_SYMBOL(i915_bdw_enabled); /* For initial average collection */ static const int IPS_SAMPLE_PERIOD = 200; /* ms */ @@ -1421,8 +1424,43 @@ * enable graphics turbo, otherwise we must disable it to avoid exceeding * thermal and power limits in the MCP. */ +static bool ips_get_i915_bdw_syms(struct ips_driver *ips) +{ + ips->read_mch_val = symbol_get(i915_bdw_read_mch_val); + if (!ips->read_mch_val) + goto out_err; + ips->gpu_raise = symbol_get(i915_bdw_gpu_raise); + if (!ips->gpu_raise) + goto out_put_mch; + ips->gpu_lower = symbol_get(i915_bdw_gpu_lower); + if (!ips->gpu_lower) + goto out_put_raise; + ips->gpu_busy = symbol_get(i915_bdw_gpu_busy); + if (!ips->gpu_busy) + goto out_put_lower; + ips->gpu_turbo_disable = symbol_get(i915_bdw_gpu_turbo_disable); + if (!ips->gpu_turbo_disable) + goto out_put_busy; + + return true; + +out_put_busy: + symbol_put(i915_bdw_gpu_busy); +out_put_lower: + symbol_put(i915_bdw_gpu_lower); +out_put_raise: + symbol_put(i915_bdw_gpu_raise); +out_put_mch: + symbol_put(i915_bdw_read_mch_val); +out_err: + return false; +} + static bool ips_get_i915_syms(struct ips_driver *ips) { + if (ips_get_i915_bdw_syms(ips) != false) + return true; + ips->read_mch_val = symbol_get(i915_read_mch_val); if (!ips->read_mch_val) goto out_err; @@ -1501,6 +1539,14 @@ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"), }, }, + { + .callback = ips_blacklist_callback, + .ident = "G60JX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "G60JX"), + }, + }, { } /* terminating entry */ }; --- linux-3.13.0.orig/drivers/platform/x86/samsung-laptop.c +++ linux-3.13.0/drivers/platform/x86/samsung-laptop.c @@ -1534,6 +1534,16 @@ }, .driver_data = &samsung_broken_acpi_video, }, + { + .callback = samsung_dmi_matched, + .ident = "NC210", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"), + DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"), + }, + .driver_data = &samsung_broken_acpi_video, + }, { }, }; MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); --- linux-3.13.0.orig/drivers/platform/x86/sony-laptop.c +++ linux-3.13.0/drivers/platform/x86/sony-laptop.c @@ -2917,6 +2917,10 @@ struct sonypi_event *events; }; +struct sony_pic_quirk_entry { + u8 set_wwan_power; +}; + struct sony_pic_dev { struct acpi_device *acpi_dev; struct sony_pic_irq *cur_irq; @@ -2927,6 +2931,7 @@ struct sonypi_eventtypes *event_types; int (*handle_irq)(const u8, const u8); int model; + struct sony_pic_quirk_entry *quirks; u16 evport_offset; u8 camera_power; u8 bluetooth_power; @@ -4362,6 +4367,12 @@ if (result) goto err_remove_pf; + if (spic_dev.quirks && spic_dev.quirks->set_wwan_power) { + /* + * Power isn't enabled by default. + */ + __sony_pic_set_wwanpower(1); + } return 0; err_remove_pf: @@ -4436,6 +4447,16 @@ .drv.pm = &sony_pic_pm, }; +static struct sony_pic_quirk_entry sony_pic_vaio_vgn = { + .set_wwan_power = 1, +}; + +static int dmi_matched(const struct dmi_system_id *dmi) +{ + spic_dev.quirks = dmi->driver_data; + return 0; +} + static struct dmi_system_id __initdata sonypi_dmi_table[] = { { .ident = "Sony Vaio", @@ -4450,6 +4471,8 @@ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), }, + .callback = dmi_matched, + .driver_data = &sony_pic_vaio_vgn, }, { } }; --- linux-3.13.0.orig/drivers/platform/x86/thinkpad_acpi.c +++ linux-3.13.0/drivers/platform/x86/thinkpad_acpi.c @@ -3440,7 +3440,107 @@ delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj); hotkey_dev_attributes = NULL; - return (res < 0)? res : 1; + return (res < 0) ? res : 1; +} + +/* Thinkpad X1 Carbon support 5 modes including Home mode, Web browser + * mode, Web conference mode, Function mode and Lay-flat mode. + * We support Home mode and Function mode currently. + * + * Will consider support rest of modes in future. + * + */ +enum ADAPTIVE_KEY_MODE { + HOME_MODE, + WEB_BROWSER_MODE, + WEB_CONFERENCE_MODE, + FUNCTION_MODE, + LAYFLAT_MODE +}; + +const int adaptive_keyboard_modes[] = { + HOME_MODE, +/* WEB_BROWSER_MODE = 2, + WEB_CONFERENCE_MODE = 3, */ + FUNCTION_MODE +}; + +#define DFR_CHANGE_ROW 0x101 +#define DFR_SHOW_QUICKVIEW_ROW 0x102 + +/* press Fn key a while second, it will switch to Function Mode. Then + * release Fn key, previous mode be restored. + */ +static bool adaptive_keyboard_mode_is_saved; +static int adaptive_keyboard_prev_mode; + +static int adaptive_keyboard_get_next_mode(int mode) +{ + size_t i; + size_t max_mode = ARRAY_SIZE(adaptive_keyboard_modes) - 1; + + for (i = 0; i <= max_mode; i++) { + if (adaptive_keyboard_modes[i] == mode) + break; + } + + if (i >= max_mode) + i = 0; + else + i++; + + return adaptive_keyboard_modes[i]; +} + +static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode) +{ + u32 current_mode = 0; + int new_mode = 0; + + switch (scancode) { + case DFR_CHANGE_ROW: + if (adaptive_keyboard_mode_is_saved) { + new_mode = adaptive_keyboard_prev_mode; + adaptive_keyboard_mode_is_saved = false; + } else { + if (!acpi_evalf( + hkey_handle, ¤t_mode, + "GTRW", "dd", 0)) { + pr_err("Cannot read adaptive keyboard mode\n"); + return false; + } else { + new_mode = adaptive_keyboard_get_next_mode( + current_mode); + } + } + + if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) { + pr_err("Cannot set adaptive keyboard mode\n"); + return false; + } + + return true; + + case DFR_SHOW_QUICKVIEW_ROW: + if (!acpi_evalf(hkey_handle, + &adaptive_keyboard_prev_mode, + "GTRW", "dd", 0)) { + pr_err("Cannot read adaptive keyboard mode\n"); + return false; + } else { + adaptive_keyboard_mode_is_saved = true; + + if (!acpi_evalf(hkey_handle, + NULL, "STRW", "vd", FUNCTION_MODE)) { + pr_err("Cannot set adaptive keyboard mode\n"); + return false; + } + } + return true; + + default: + return false; + } } static bool hotkey_notify_hotkey(const u32 hkey, @@ -3462,6 +3562,8 @@ *ignore_acpi_ev = true; } return true; + } else { + return adaptive_keyboard_hotkey_notify_hotkey(scancode); } return false; } @@ -3734,13 +3836,28 @@ static void hotkey_suspend(void) { + int hkeyv; + /* Do these on suspend, we get the events on early resume! */ hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE; hotkey_autosleep_ack = 0; + + /* save previous mode of adaptive keyboard of X1 Carbon */ + if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { + if ((hkeyv >> 8) == 2) { + if (!acpi_evalf(hkey_handle, + &adaptive_keyboard_prev_mode, + "GTRW", "dd", 0)) { + pr_err("Cannot read adaptive keyboard mode.\n"); + } + } + } } static void hotkey_resume(void) { + int hkeyv; + tpacpi_disable_brightness_delay(); if (hotkey_status_set(true) < 0 || @@ -3753,6 +3870,18 @@ hotkey_wakeup_reason_notify_change(); hotkey_wakeup_hotunplug_complete_notify_change(); hotkey_poll_setup_safe(false); + + /* restore previous mode of adapive keyboard of X1 Carbon */ + if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { + if ((hkeyv >> 8) == 2) { + if (!acpi_evalf(hkey_handle, + NULL, + "STRW", "vd", + adaptive_keyboard_prev_mode)) { + pr_err("Cannot set adaptive keyboard mode.\n"); + } + } + } } /* procfs -------------------------------------------------------------- */ @@ -4447,7 +4576,7 @@ str_supported(video_supported != TPACPI_VIDEO_NONE), video_supported); - return (video_supported != TPACPI_VIDEO_NONE)? 0 : 1; + return (video_supported != TPACPI_VIDEO_NONE) ? 0 : 1; } static void video_exit(void) @@ -4540,7 +4669,7 @@ return -ENOSYS; } - return (res)? 0 : -EIO; + return (res) ? 0 : -EIO; } static int video_autosw_get(void) @@ -4566,7 +4695,7 @@ static int video_autosw_set(int enable) { - if (!acpi_evalf(vid_handle, NULL, "_DOS", "vd", (enable)? 1 : 0)) + if (!acpi_evalf(vid_handle, NULL, "_DOS", "vd", (enable) ? 1 : 0)) return -EIO; return 0; } @@ -4601,20 +4730,20 @@ return -EIO; } - return (res)? 0 : -EIO; + return (res) ? 0 : -EIO; } static int video_expand_toggle(void) { switch (video_supported) { case TPACPI_VIDEO_570: - return acpi_evalf(ec_handle, NULL, "_Q17", "v")? + return acpi_evalf(ec_handle, NULL, "_Q17", "v") ? 0 : -EIO; case TPACPI_VIDEO_770: - return acpi_evalf(vid_handle, NULL, "VEXP", "v")? + return acpi_evalf(vid_handle, NULL, "VEXP", "v") ? 0 : -EIO; case TPACPI_VIDEO_NEW: - return acpi_evalf(NULL, NULL, "\\VEXP", "v")? + return acpi_evalf(NULL, NULL, "\\VEXP", "v") ? 0 : -EIO; default: return -ENOSYS; @@ -4758,14 +4887,14 @@ if (tp_features.light) { if (cmos_handle) { rc = acpi_evalf(cmos_handle, NULL, NULL, "vd", - (status)? + (status) ? TP_CMOS_THINKLIGHT_ON : TP_CMOS_THINKLIGHT_OFF); } else { rc = acpi_evalf(lght_handle, NULL, NULL, "vd", - (status)? 1 : 0); + (status) ? 1 : 0); } - return (rc)? 0 : -EIO; + return (rc) ? 0 : -EIO; } return -ENXIO; @@ -4794,7 +4923,7 @@ static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev) { - return (light_get_status() == 1)? LED_FULL : LED_OFF; + return (light_get_status() == 1) ? LED_FULL : LED_OFF; } static struct tpacpi_led_classdev tpacpi_led_thinklight = { @@ -4916,7 +5045,7 @@ return -EINVAL; res = issue_thinkpad_cmos_command(cmos_cmd); - return (res)? res : count; + return (res) ? res : count; } static struct device_attribute dev_attr_cmos_command = @@ -4940,7 +5069,7 @@ if (res) return res; - return (cmos_handle)? 0 : 1; + return (cmos_handle) ? 0 : 1; } static void cmos_exit(void) @@ -5050,9 +5179,9 @@ if (!acpi_evalf(ec_handle, &status, "GLED", "dd", 1 << led)) return -EIO; - led_s = (status == 0)? + led_s = (status == 0) ? TPACPI_LED_OFF : - ((status == 1)? + ((status == 1) ? TPACPI_LED_ON : TPACPI_LED_BLINK); tpacpi_led_state_cache[led] = led_s; @@ -5449,7 +5578,7 @@ tp_features.beep_needs_two_args = !!(quirks & TPACPI_BEEP_Q1); - return (beep_handle)? 0 : 1; + return (beep_handle) ? 0 : 1; } static int beep_read(struct seq_file *m) @@ -6398,7 +6527,7 @@ if (!rc && ibm_backlight_device) backlight_force_update(ibm_backlight_device, BACKLIGHT_UPDATE_SYSFS); - return (rc == -EINTR)? -ERESTARTSYS : rc; + return (rc == -EINTR) ? -ERESTARTSYS : rc; } static struct ibm_struct brightness_driver_data = { @@ -7855,7 +7984,7 @@ } mutex_unlock(&fan_mutex); - return (rc)? rc : count; + return (rc) ? rc : count; } static struct device_attribute dev_attr_fan_pwm1 = @@ -8453,9 +8582,21 @@ tpacpi_led_set(i, false); } +static void mute_led_resume(void) +{ + int i; + + for (i = 0; i < TPACPI_LED_MAX; i++) { + struct tp_led_table *t = &led_tables[i]; + if (t->state >= 0) + mute_led_on_off(t, t->state); + } +} + static struct ibm_struct mute_led_driver_data = { .name = "mute_led", .exit = mute_led_exit, + .resume = mute_led_resume, }; /**************************************************************************** @@ -8521,7 +8662,7 @@ { static char text_unsupported[] __initdata = "not supported"; - return (is_supported)? &text_unsupported[4] : &text_unsupported[0]; + return (is_supported) ? &text_unsupported[4] : &text_unsupported[0]; } #endif /* CONFIG_THINKPAD_ACPI_DEBUG */ @@ -8642,7 +8783,7 @@ ibm->name, ret); ibm_exit(ibm); - return (ret < 0)? ret : 0; + return (ret < 0) ? ret : 0; } /* Probing */ @@ -8652,17 +8793,31 @@ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z'); } -/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */ -static bool __pure __init tpacpi_is_valid_fw_id(const char* const s, +static bool __pure __init tpacpi_is_valid_fw_id(const char * const s, const char t) { - return s && strlen(s) >= 8 && + /* + * Most models: xxyTkkWW (#.##c) + * Ancient 570/600 and -SL lacks (#.##c) + */ + if (s && strlen(s) >= 8 && tpacpi_is_fw_digit(s[0]) && tpacpi_is_fw_digit(s[1]) && s[2] == t && (s[3] == 'T' || s[3] == 'N') && tpacpi_is_fw_digit(s[4]) && - tpacpi_is_fw_digit(s[5]); + tpacpi_is_fw_digit(s[5])) + return true; + + /* New models: xxxyTkkW (#.##c); T550 and some others */ + return s && strlen(s) >= 8 && + tpacpi_is_fw_digit(s[0]) && + tpacpi_is_fw_digit(s[1]) && + tpacpi_is_fw_digit(s[2]) && + s[3] == t && + (s[4] == 'T' || s[4] == 'N') && + tpacpi_is_fw_digit(s[5]) && + tpacpi_is_fw_digit(s[6]); } /* returns 0 - probe ok, or < 0 - probe error. --- linux-3.13.0.orig/drivers/pnp/isapnp/core.c +++ linux-3.13.0/drivers/pnp/isapnp/core.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include "../base.h" @@ -990,7 +991,7 @@ .disable = isapnp_disable_resources, }; -static int __init isapnp_init(void) +static int __init real_isapnp_init(void) { int cards; struct pnp_card *card; @@ -1084,6 +1085,16 @@ return 0; } +static void __init async_isapnp_init(void *unused, async_cookie_t cookie) +{ + (void)real_isapnp_init(); +} + +static int __init isapnp_init(void) +{ + async_schedule(async_isapnp_init, NULL); + return 0; +} device_initcall(isapnp_init); /* format is: noisapnp */ --- linux-3.13.0.orig/drivers/pnp/pnpacpi/rsparser.c +++ linux-3.13.0/drivers/pnp/pnpacpi/rsparser.c @@ -183,9 +183,7 @@ struct resource r = {0}; int i, flags; - if (acpi_dev_resource_memory(res, &r) - || acpi_dev_resource_io(res, &r) - || acpi_dev_resource_address_space(res, &r) + if (acpi_dev_resource_address_space(res, &r) || acpi_dev_resource_ext_address_space(res, &r)) { pnp_add_resource(dev, &r); return AE_OK; @@ -217,6 +215,17 @@ } switch (res->type) { + case ACPI_RESOURCE_TYPE_MEMORY24: + case ACPI_RESOURCE_TYPE_MEMORY32: + case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: + if (acpi_dev_resource_memory(res, &r)) + pnp_add_resource(dev, &r); + break; + case ACPI_RESOURCE_TYPE_IO: + case ACPI_RESOURCE_TYPE_FIXED_IO: + if (acpi_dev_resource_io(res, &r)) + pnp_add_resource(dev, &r); + break; case ACPI_RESOURCE_TYPE_DMA: dma = &res->data.dma; if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) --- linux-3.13.0.orig/drivers/power/88pm860x_charger.c +++ linux-3.13.0/drivers/power/88pm860x_charger.c @@ -711,6 +711,7 @@ return 0; out_irq: + power_supply_unregister(&info->usb); while (--i >= 0) free_irq(info->irq[i], info); out: --- linux-3.13.0.orig/drivers/power/bq24190_charger.c +++ linux-3.13.0/drivers/power/bq24190_charger.c @@ -929,7 +929,7 @@ charger->properties = bq24190_charger_properties; charger->num_properties = ARRAY_SIZE(bq24190_charger_properties); charger->supplied_to = bq24190_charger_supplied_to; - charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to); + charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to); charger->get_property = bq24190_charger_get_property; charger->set_property = bq24190_charger_set_property; charger->property_is_writeable = bq24190_charger_property_is_writeable; --- linux-3.13.0.orig/drivers/power/charger-manager.c +++ linux-3.13.0/drivers/power/charger-manager.c @@ -86,6 +86,7 @@ static bool is_batt_present(struct charger_manager *cm) { union power_supply_propval val; + struct power_supply *psy; bool present = false; int i, ret; @@ -102,10 +103,17 @@ present = true; break; case CM_CHARGER_STAT: - for (i = 0; cm->charger_stat[i]; i++) { - ret = cm->charger_stat[i]->get_property( - cm->charger_stat[i], - POWER_SUPPLY_PROP_PRESENT, &val); + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { + psy = power_supply_get_by_name( + cm->desc->psy_charger_stat[i]); + if (!psy) { + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", + cm->desc->psy_charger_stat[i]); + continue; + } + + ret = psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT, + &val); if (ret == 0 && val.intval) { present = true; break; @@ -128,14 +136,20 @@ static bool is_ext_pwr_online(struct charger_manager *cm) { union power_supply_propval val; + struct power_supply *psy; bool online = false; int i, ret; /* If at least one of them has one, it's yes. */ - for (i = 0; cm->charger_stat[i]; i++) { - ret = cm->charger_stat[i]->get_property( - cm->charger_stat[i], - POWER_SUPPLY_PROP_ONLINE, &val); + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { + psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); + if (!psy) { + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", + cm->desc->psy_charger_stat[i]); + continue; + } + + ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val); if (ret == 0 && val.intval) { online = true; break; @@ -178,6 +192,7 @@ { int i, ret; bool charging = false; + struct power_supply *psy; union power_supply_propval val; /* If there is no battery, it cannot be charged */ @@ -185,17 +200,22 @@ return false; /* If at least one of the charger is charging, return yes */ - for (i = 0; cm->charger_stat[i]; i++) { + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { /* 1. The charger sholuld not be DISABLED */ if (cm->emergency_stop) continue; if (!cm->charger_enabled) continue; + psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); + if (!psy) { + dev_err(cm->dev, "Cannot find power supply \"%s\"\n", + cm->desc->psy_charger_stat[i]); + continue; + } + /* 2. The charger should be online (ext-power) */ - ret = cm->charger_stat[i]->get_property( - cm->charger_stat[i], - POWER_SUPPLY_PROP_ONLINE, &val); + ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val); if (ret) { dev_warn(cm->dev, "Cannot read ONLINE value from %s\n", cm->desc->psy_charger_stat[i]); @@ -208,9 +228,7 @@ * 3. The charger should not be FULL, DISCHARGING, * or NOT_CHARGING. */ - ret = cm->charger_stat[i]->get_property( - cm->charger_stat[i], - POWER_SUPPLY_PROP_STATUS, &val); + ret = psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &val); if (ret) { dev_warn(cm->dev, "Cannot read STATUS value from %s\n", cm->desc->psy_charger_stat[i]); @@ -1501,19 +1519,21 @@ return -EINVAL; } + if (!desc->psy_fuel_gauge) { + dev_err(&pdev->dev, "No fuel gauge power supply defined\n"); + return -EINVAL; + } + /* Counting index only */ while (desc->psy_charger_stat[i]) i++; - cm->charger_stat = devm_kzalloc(&pdev->dev, - sizeof(struct power_supply *) * i, GFP_KERNEL); - if (!cm->charger_stat) - return -ENOMEM; - + /* Check if charger's supplies are present at probe */ for (i = 0; desc->psy_charger_stat[i]; i++) { - cm->charger_stat[i] = power_supply_get_by_name( - desc->psy_charger_stat[i]); - if (!cm->charger_stat[i]) { + struct power_supply *psy; + + psy = power_supply_get_by_name(desc->psy_charger_stat[i]); + if (!psy) { dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", desc->psy_charger_stat[i]); return -ENODEV; @@ -1843,8 +1863,8 @@ int i; bool found = false; - for (i = 0; cm->charger_stat[i]; i++) { - if (psy == cm->charger_stat[i]) { + for (i = 0; cm->desc->psy_charger_stat[i]; i++) { + if (!strcmp(psy->name, cm->desc->psy_charger_stat[i])) { found = true; break; } --- linux-3.13.0.orig/drivers/power/max17040_battery.c +++ linux-3.13.0/drivers/power/max17040_battery.c @@ -148,7 +148,7 @@ { struct max17040_chip *chip = i2c_get_clientdata(client); - if (chip->pdata->battery_online) + if (chip->pdata && chip->pdata->battery_online) chip->online = chip->pdata->battery_online(); else chip->online = 1; @@ -158,7 +158,8 @@ { struct max17040_chip *chip = i2c_get_clientdata(client); - if (!chip->pdata->charger_online || !chip->pdata->charger_enable) { + if (!chip->pdata || !chip->pdata->charger_online + || !chip->pdata->charger_enable) { chip->status = POWER_SUPPLY_STATUS_UNKNOWN; return; } --- linux-3.13.0.orig/drivers/power/reset/Kconfig +++ linux-3.13.0/drivers/power/reset/Kconfig @@ -51,3 +51,11 @@ depends on POWER_RESET help Reboot support for the APM SoC X-Gene Eval boards. + +config POWER_RESET_SYSCON + bool "Generic SYSCON regmap reset driver" + depends on MFD_SYSCON + depends on POWER_RESET + depends on ARCH_XGENE + help + Reboot support for generic SYSCON mapped register reset. --- linux-3.13.0.orig/drivers/power/reset/Makefile +++ linux-3.13.0/drivers/power/reset/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o +obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o --- linux-3.13.0.orig/drivers/power/reset/syscon-reboot.c +++ linux-3.13.0/drivers/power/reset/syscon-reboot.c @@ -0,0 +1,100 @@ +/* + * Generic Syscon Reboot Driver + * + * Copyright (c) 2013, Applied Micro Circuits Corporation + * Author: Feng Kan + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + * This driver provides system reboot functionality for APM X-Gene SoC. + * For system shutdown, this is board specify. If a board designer + * implements GPIO shutdown, use the gpio-poweroff.c driver. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct syscon_reboot_context { + struct regmap *map; + u32 offset; + u32 mask; +}; + +static struct syscon_reboot_context *syscon_reboot_ctx; + +static void syscon_restart(enum reboot_mode reboot_mode, const char *cmd) +{ + struct syscon_reboot_context *ctx = syscon_reboot_ctx; + unsigned long timeout; + + /* Issue the reboot */ + if (ctx->map) + regmap_write(ctx->map, ctx->offset, ctx->mask); + + timeout = jiffies + HZ; + while (time_before(jiffies, timeout)) + cpu_relax(); + + pr_emerg("Unable to restart system\n"); +} + +static int syscon_reboot_probe(struct platform_device *pdev) +{ + struct syscon_reboot_context *ctx; + struct device *dev = &pdev->dev; + + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + dev_err(&pdev->dev, "out of memory for context\n"); + return -ENOMEM; + } + + ctx->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap"); + if (IS_ERR(ctx->map)) + return PTR_ERR(ctx->map); + + if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset)) + return -EINVAL; + + if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask)) + return -EINVAL; + + arm_pm_restart = syscon_restart; + syscon_reboot_ctx = ctx; + + return 0; +} + +static struct of_device_id syscon_reboot_of_match[] = { + { .compatible = "syscon-reboot" }, + {} +}; + +static struct platform_driver syscon_reboot_driver = { + .probe = syscon_reboot_probe, + .driver = { + .name = "syscon-reboot", + .of_match_table = syscon_reboot_of_match, + }, +}; +module_platform_driver(syscon_reboot_driver); --- linux-3.13.0.orig/drivers/rapidio/devices/tsi721.h +++ linux-3.13.0/drivers/rapidio/devices/tsi721.h @@ -678,6 +678,7 @@ struct list_head free_list; dma_cookie_t completed_cookie; struct tasklet_struct tasklet; + bool active; }; #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ --- linux-3.13.0.orig/drivers/rapidio/devices/tsi721_dma.c +++ linux-3.13.0/drivers/rapidio/devices/tsi721_dma.c @@ -206,8 +206,8 @@ { /* Disable BDMA channel interrupts */ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); - - tasklet_schedule(&bdma_chan->tasklet); + if (bdma_chan->active) + tasklet_schedule(&bdma_chan->tasklet); } #ifdef CONFIG_PCI_MSI @@ -287,6 +287,12 @@ "desc %p not ACKed\n", tx_desc); } + if (ret == NULL) { + dev_dbg(bdma_chan->dchan.device->dev, + "%s: unable to obtain tx descriptor\n", __func__); + goto err_out; + } + i = bdma_chan->wr_count_next % bdma_chan->bd_num; if (i == bdma_chan->bd_num - 1) { i = 0; @@ -297,7 +303,7 @@ tx_desc->txd.phys = bdma_chan->bd_phys + i * sizeof(struct tsi721_dma_desc); tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; - +err_out: spin_unlock_bh(&bdma_chan->lock); return ret; @@ -562,7 +568,7 @@ } #endif /* CONFIG_PCI_MSI */ - tasklet_enable(&bdma_chan->tasklet); + bdma_chan->active = true; tsi721_bdma_interrupt_enable(bdma_chan, 1); return bdma_chan->bd_num - 1; @@ -576,9 +582,7 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); -#ifdef CONFIG_PCI_MSI struct tsi721_device *priv = to_tsi721(dchan->device); -#endif LIST_HEAD(list); dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); @@ -589,14 +593,25 @@ BUG_ON(!list_empty(&bdma_chan->active_list)); BUG_ON(!list_empty(&bdma_chan->queue)); - tasklet_disable(&bdma_chan->tasklet); + tsi721_bdma_interrupt_enable(bdma_chan, 0); + bdma_chan->active = false; + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector); + synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + + bdma_chan->id].vector); + } else +#endif + synchronize_irq(priv->pdev->irq); + + tasklet_kill(&bdma_chan->tasklet); spin_lock_bh(&bdma_chan->lock); list_splice_init(&bdma_chan->free_list, &list); spin_unlock_bh(&bdma_chan->lock); - tsi721_bdma_interrupt_enable(bdma_chan, 0); - #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { free_irq(priv->msix[TSI721_VECT_DMA0_DONE + @@ -790,6 +805,7 @@ bdma_chan->dchan.cookie = 1; bdma_chan->dchan.chan_id = i; bdma_chan->id = i; + bdma_chan->active = false; spin_lock_init(&bdma_chan->lock); @@ -799,7 +815,6 @@ tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, (unsigned long)bdma_chan); - tasklet_disable(&bdma_chan->tasklet); list_add_tail(&bdma_chan->dchan.device_node, &mport->dma.channels); } --- linux-3.13.0.orig/drivers/regulator/arizona-ldo1.c +++ linux-3.13.0/drivers/regulator/arizona-ldo1.c @@ -141,8 +141,6 @@ .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, - .get_bypass = regulator_get_bypass_regmap, - .set_bypass = regulator_set_bypass_regmap, }; static const struct regulator_desc arizona_ldo1 = { @@ -153,11 +151,9 @@ .vsel_reg = ARIZONA_LDO1_CONTROL_1, .vsel_mask = ARIZONA_LDO1_VSEL_MASK, - .bypass_reg = ARIZONA_LDO1_CONTROL_1, - .bypass_mask = ARIZONA_LDO1_BYPASS, .min_uV = 900000, - .uV_step = 50000, - .n_voltages = 7, + .uV_step = 25000, + .n_voltages = 13, .enable_time = 500, .owner = THIS_MODULE, @@ -203,6 +199,7 @@ */ switch (arizona->type) { case WM5102: + case WM8997: desc = &arizona_ldo1_hc; ldo1->init_data = arizona_ldo1_dvfs; break; --- linux-3.13.0.orig/drivers/regulator/core.c +++ linux-3.13.0/drivers/regulator/core.c @@ -953,6 +953,8 @@ return 0; } +static int _regulator_do_enable(struct regulator_dev *rdev); + /** * set_machine_constraints - sets regulator constraints * @rdev: regulator source @@ -1013,10 +1015,9 @@ /* If the constraints say the regulator should be on at this point * and we have control then make sure it is enabled. */ - if ((rdev->constraints->always_on || rdev->constraints->boot_on) && - ops->enable) { - ret = ops->enable(rdev); - if (ret < 0) { + if (rdev->constraints->always_on || rdev->constraints->boot_on) { + ret = _regulator_do_enable(rdev); + if (ret < 0 && ret != -EINVAL) { rdev_err(rdev, "failed to enable\n"); goto out; } @@ -1272,6 +1273,8 @@ if (r->dev.parent && node == r->dev.of_node) return r; + *ret = -EPROBE_DEFER; + return NULL; } else { /* * If we couldn't even get the node then it's @@ -1312,7 +1315,7 @@ struct regulator_dev *rdev; struct regulator *regulator = ERR_PTR(-EPROBE_DEFER); const char *devname = NULL; - int ret = -EPROBE_DEFER; + int ret; if (id == NULL) { pr_err("get() with no identifier\n"); @@ -1322,6 +1325,11 @@ if (dev) devname = dev_name(dev); + if (have_full_constraints()) + ret = -ENODEV; + else + ret = -EPROBE_DEFER; + mutex_lock(®ulator_list_mutex); rdev = regulator_dev_lookup(dev, id, &ret); @@ -1352,7 +1360,7 @@ rdev = dummy_regulator_rdev; goto found; } else { - dev_err(dev, "dummy supplies not allowed\n"); + dev_warn(dev, "dummy supplies not allowed\n"); } mutex_unlock(®ulator_list_mutex); @@ -1471,7 +1479,7 @@ } EXPORT_SYMBOL_GPL(regulator_get_optional); -/* Locks held by regulator_put() */ +/* regulator_list_mutex lock held by regulator_put() */ static void _regulator_put(struct regulator *regulator) { struct regulator_dev *rdev; @@ -1486,12 +1494,14 @@ /* remove any sysfs entries */ if (regulator->dev) sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); + mutex_lock(&rdev->mutex); kfree(regulator->supply_name); list_del(®ulator->list); kfree(regulator); rdev->open_count--; rdev->exclusive = 0; + mutex_unlock(&rdev->mutex); module_put(rdev->owner); } @@ -1754,10 +1764,12 @@ trace_regulator_enable(rdev_get_name(rdev)); if (rdev->ena_pin) { - ret = regulator_ena_gpio_ctrl(rdev, true); - if (ret < 0) - return ret; - rdev->ena_gpio_state = 1; + if (!rdev->ena_gpio_state) { + ret = regulator_ena_gpio_ctrl(rdev, true); + if (ret < 0) + return ret; + rdev->ena_gpio_state = 1; + } } else if (rdev->desc->ops->enable) { ret = rdev->desc->ops->enable(rdev); if (ret < 0) @@ -1887,10 +1899,12 @@ trace_regulator_disable(rdev_get_name(rdev)); if (rdev->ena_pin) { - ret = regulator_ena_gpio_ctrl(rdev, false); - if (ret < 0) - return ret; - rdev->ena_gpio_state = 0; + if (rdev->ena_gpio_state) { + ret = regulator_ena_gpio_ctrl(rdev, false); + if (ret < 0) + return ret; + rdev->ena_gpio_state = 0; + } } else if (rdev->desc->ops->disable) { ret = rdev->desc->ops->disable(rdev); @@ -1900,8 +1914,6 @@ trace_regulator_disable_complete(rdev_get_name(rdev)); - _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, - NULL); return 0; } @@ -1925,6 +1937,8 @@ rdev_err(rdev, "failed to disable\n"); return ret; } + _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, + NULL); } rdev->use_count = 0; @@ -1977,20 +1991,16 @@ { int ret = 0; - /* force disable */ - if (rdev->desc->ops->disable) { - /* ah well, who wants to live forever... */ - ret = rdev->desc->ops->disable(rdev); - if (ret < 0) { - rdev_err(rdev, "failed to force disable\n"); - return ret; - } - /* notify other consumers that power has been forced off */ - _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | - REGULATOR_EVENT_DISABLE, NULL); + ret = _regulator_do_disable(rdev); + if (ret < 0) { + rdev_err(rdev, "failed to force disable\n"); + return ret; } - return ret; + _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | + REGULATOR_EVENT_DISABLE, NULL); + + return 0; } /** @@ -3448,12 +3458,6 @@ config->ena_gpio, ret); goto wash; } - - if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH) - rdev->ena_gpio_state = 1; - - if (config->ena_gpio_invert) - rdev->ena_gpio_state = !rdev->ena_gpio_state; } /* set regulator constraints */ @@ -3623,23 +3627,20 @@ mutex_lock(®ulator_list_mutex); list_for_each_entry(rdev, ®ulator_list, list) { - struct regulator_ops *ops = rdev->desc->ops; - mutex_lock(&rdev->mutex); - if ((rdev->use_count > 0 || rdev->constraints->always_on) && - ops->enable) { - error = ops->enable(rdev); - if (error) - ret = error; + if (rdev->use_count > 0 || rdev->constraints->always_on) { + if (!_regulator_is_enabled(rdev)) { + error = _regulator_do_enable(rdev); + if (error) + ret = error; + } } else { if (!have_full_constraints()) goto unlock; - if (!ops->disable) - goto unlock; if (!_regulator_is_enabled(rdev)) goto unlock; - error = ops->disable(rdev); + error = _regulator_do_disable(rdev); if (error) ret = error; } @@ -3813,7 +3814,7 @@ ops = rdev->desc->ops; c = rdev->constraints; - if (!ops->disable || (c && c->always_on)) + if (c && c->always_on) continue; mutex_lock(&rdev->mutex); @@ -3834,7 +3835,7 @@ /* We log since this may kill the system if it * goes wrong. */ rdev_info(rdev, "disabling\n"); - ret = ops->disable(rdev); + ret = _regulator_do_disable(rdev); if (ret != 0) { rdev_err(rdev, "couldn't disable: %d\n", ret); } --- linux-3.13.0.orig/drivers/regulator/da9063-regulator.c +++ linux-3.13.0/drivers/regulator/da9063-regulator.c @@ -1,3 +1,4 @@ + /* * Regulator driver for DA9063 PMIC series * @@ -60,7 +61,8 @@ .desc.ops = &da9063_ldo_ops, \ .desc.min_uV = (min_mV) * 1000, \ .desc.uV_step = (step_mV) * 1000, \ - .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \ + .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \ + + (DA9063_V##regl_name##_BIAS)), \ .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \ .desc.enable_mask = DA9063_LDO_EN, \ .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \ --- linux-3.13.0.orig/drivers/regulator/max77693.c +++ linux-3.13.0/drivers/regulator/max77693.c @@ -231,7 +231,7 @@ struct max77693_pmic_dev *max77693_pmic; struct max77693_regulator_data *rdata = NULL; int num_rdata, i; - struct regulator_config config; + struct regulator_config config = { }; num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata); if (!rdata || num_rdata <= 0) { --- linux-3.13.0.orig/drivers/rtc/Kconfig +++ linux-3.13.0/drivers/rtc/Kconfig @@ -726,7 +726,7 @@ config RTC_DRV_EFI tristate "EFI RTC" - depends on IA64 + depends on EFI && !X86 help If you say yes here you will get support for the EFI Real Time Clock. @@ -1286,6 +1286,15 @@ This driver can also be built as a module. If so, the module will be called rtc-moxart +config RTC_DRV_XGENE + tristate "APM X-Gene RTC" + help + If you say yes here you get support for the APM X-Gene SoC real time + clock. + + This driver can also be built as a module, if so, the module + will be called "rtc-xgene". + comment "HID Sensor RTC drivers" config RTC_DRV_HID_SENSOR_TIME --- linux-3.13.0.orig/drivers/rtc/Makefile +++ linux-3.13.0/drivers/rtc/Makefile @@ -10,6 +10,10 @@ obj-$(CONFIG_RTC_CLASS) += rtc-core.o rtc-core-y := class.o interface.o +ifdef CONFIG_RTC_DRV_EFI +rtc-core-y += rtc-efi-platform.o +endif + rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o @@ -131,5 +135,6 @@ obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o +obj-$(CONFIG_RTC_DRV_XGENE) += rtc-xgene.o obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o --- linux-3.13.0.orig/drivers/rtc/rtc-at91rm9200.c +++ linux-3.13.0/drivers/rtc/rtc-at91rm9200.c @@ -48,6 +48,7 @@ static const struct at91_rtc_config *at91_rtc_config; static DECLARE_COMPLETION(at91_rtc_updated); +static DECLARE_COMPLETION(at91_rtc_upd_rdy); static unsigned int at91_alarm_year = AT91_RTC_EPOCH; static void __iomem *at91_rtc_regs; static int irq; @@ -161,6 +162,8 @@ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); + wait_for_completion(&at91_rtc_upd_rdy); + /* Stop Time/Calendar from counting */ cr = at91_rtc_read(AT91_RTC_CR); at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); @@ -183,7 +186,9 @@ /* Restart Time/Calendar */ cr = at91_rtc_read(AT91_RTC_CR); + at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_SECEV); at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM)); + at91_rtc_write_ier(AT91_RTC_SECEV); return 0; } @@ -290,8 +295,10 @@ if (rtsr) { /* this interrupt is shared! Is it ours? */ if (rtsr & AT91_RTC_ALARM) events |= (RTC_AF | RTC_IRQF); - if (rtsr & AT91_RTC_SECEV) - events |= (RTC_UF | RTC_IRQF); + if (rtsr & AT91_RTC_SECEV) { + complete(&at91_rtc_upd_rdy); + at91_rtc_write_idr(AT91_RTC_SECEV); + } if (rtsr & AT91_RTC_ACKUPD) complete(&at91_rtc_updated); @@ -413,6 +420,11 @@ return PTR_ERR(rtc); platform_set_drvdata(pdev, rtc); + /* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy + * completion. + */ + at91_rtc_write_ier(AT91_RTC_SECEV); + dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n"); return 0; } --- linux-3.13.0.orig/drivers/rtc/rtc-cmos.c +++ linux-3.13.0/drivers/rtc/rtc-cmos.c @@ -34,11 +34,11 @@ #include #include #include -#include #include #include #include #include +#include /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include @@ -377,6 +377,51 @@ return 0; } +/* + * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes. + */ +static bool alarm_disable_quirk; + +static int __init set_alarm_disable_quirk(const struct dmi_system_id *id) +{ + alarm_disable_quirk = true; + pr_info("rtc-cmos: BIOS has alarm-disable quirk. "); + pr_info("RTC alarms disabled\n"); + return 0; +} + +static const struct dmi_system_id rtc_quirks[] __initconst = { + /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */ + { + .callback = set_alarm_disable_quirk, + .ident = "IBM Truman", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "4852570"), + }, + }, + /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */ + { + .callback = set_alarm_disable_quirk, + .ident = "Gigabyte GA-990XA-UD3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"), + }, + }, + /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */ + { + .callback = set_alarm_disable_quirk, + .ident = "Toshiba Satellite L300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"), + }, + }, + {} +}; + static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct cmos_rtc *cmos = dev_get_drvdata(dev); @@ -385,6 +430,9 @@ if (!is_valid_irq(cmos->irq)) return -EINVAL; + if (alarm_disable_quirk) + return 0; + spin_lock_irqsave(&rtc_lock, flags); if (enabled) @@ -1157,6 +1205,8 @@ platform_driver_registered = true; } + dmi_check_system(rtc_quirks); + if (retval == 0) return 0; --- linux-3.13.0.orig/drivers/rtc/rtc-efi-platform.c +++ linux-3.13.0/drivers/rtc/rtc-efi-platform.c @@ -0,0 +1,31 @@ +/* + * Moved from arch/ia64/kernel/time.c + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * Stephane Eranian + * David Mosberger + * Copyright (C) 1999 Don Dugger + * Copyright (C) 1999-2000 VA Linux Systems + * Copyright (C) 1999-2000 Walt Drummond + */ +#include +#include +#include +#include +#include + +static struct platform_device rtc_efi_dev = { + .name = "rtc-efi", + .id = -1, +}; + +static int __init rtc_init(void) +{ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + if (platform_device_register(&rtc_efi_dev) < 0) + pr_err("unable to register rtc device...\n"); + + /* not necessarily an error */ + return 0; +} +module_init(rtc_init); --- linux-3.13.0.orig/drivers/rtc/rtc-max8907.c +++ linux-3.13.0/drivers/rtc/rtc-max8907.c @@ -51,7 +51,7 @@ { struct max8907_rtc *rtc = data; - regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0); + regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0); rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF); @@ -64,7 +64,7 @@ bcd2bin(regs[RTC_YEAR1]) - 1900; tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1; tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f); - tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07) - 1; + tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07); if (regs[RTC_HOUR] & HOUR_12) { tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f); if (tm->tm_hour == 12) @@ -88,7 +88,7 @@ regs[RTC_YEAR1] = bin2bcd(low); regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1); regs[RTC_DATE] = bin2bcd(tm->tm_mday); - regs[RTC_WEEKDAY] = tm->tm_wday + 1; + regs[RTC_WEEKDAY] = tm->tm_wday; regs[RTC_HOUR] = bin2bcd(tm->tm_hour); regs[RTC_MIN] = bin2bcd(tm->tm_min); regs[RTC_SEC] = bin2bcd(tm->tm_sec); @@ -153,7 +153,7 @@ tm_to_regs(&alrm->time, regs); /* Disable alarm while we update the target time */ - ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0); + ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0); if (ret < 0) return ret; @@ -163,8 +163,7 @@ return ret; if (alrm->enabled) - ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, - 0x7f, 0x7f); + ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x77); return ret; } --- linux-3.13.0.orig/drivers/rtc/rtc-omap.c +++ linux-3.13.0/drivers/rtc/rtc-omap.c @@ -379,6 +379,8 @@ rtc_writel(KICK1_VALUE, OMAP_RTC_KICK1_REG); } + device_init_wakeup(&pdev->dev, true); + rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &omap_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -441,8 +443,6 @@ * is write-only, and always reads as zero...) */ - device_init_wakeup(&pdev->dev, true); - if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT) pr_info("%s: split power mode\n", pdev->name); @@ -452,6 +452,7 @@ return 0; fail0: + device_init_wakeup(&pdev->dev, false); if (id_entry && (id_entry->driver_data & OMAP_RTC_HAS_KICKER)) rtc_writel(0, OMAP_RTC_KICK0_REG); pm_runtime_put_sync(&pdev->dev); --- linux-3.13.0.orig/drivers/rtc/rtc-s5m.c +++ linux-3.13.0/drivers/rtc/rtc-s5m.c @@ -665,6 +665,7 @@ static const struct platform_device_id s5m_rtc_id[] = { { "s5m-rtc", 0 }, + { }, }; static struct platform_driver s5m_rtc_driver = { --- linux-3.13.0.orig/drivers/rtc/rtc-sirfsoc.c +++ linux-3.13.0/drivers/rtc/rtc-sirfsoc.c @@ -290,14 +290,6 @@ rtc_div = ((32768 / RTC_HZ) / 2) - 1; sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV); - rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &sirfsoc_rtc_ops, THIS_MODULE); - if (IS_ERR(rtcdrv->rtc)) { - err = PTR_ERR(rtcdrv->rtc); - dev_err(&pdev->dev, "can't register RTC device\n"); - return err; - } - /* 0x3 -> RTC_CLK */ sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK, rtcdrv->rtc_base + RTC_CLOCK_SWITCH); @@ -312,6 +304,14 @@ rtcdrv->overflow_rtc = sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE); + rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, + &sirfsoc_rtc_ops, THIS_MODULE); + if (IS_ERR(rtcdrv->rtc)) { + err = PTR_ERR(rtcdrv->rtc); + dev_err(&pdev->dev, "can't register RTC device\n"); + return err; + } + rtcdrv->irq = platform_get_irq(pdev, 0); err = devm_request_irq( &pdev->dev, --- linux-3.13.0.orig/drivers/rtc/rtc-xgene.c +++ linux-3.13.0/drivers/rtc/rtc-xgene.c @@ -0,0 +1,278 @@ +/* + * APM X-Gene SoC Real Time Clock Driver + * + * Copyright (c) 2014, Applied Micro Circuits Corporation + * Author: Rameshwar Prasad Sahu + * Loc Ho + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* RTC CSR Registers */ +#define RTC_CCVR 0x00 +#define RTC_CMR 0x04 +#define RTC_CLR 0x08 +#define RTC_CCR 0x0C +#define RTC_CCR_IE BIT(0) +#define RTC_CCR_MASK BIT(1) +#define RTC_CCR_EN BIT(2) +#define RTC_CCR_WEN BIT(3) +#define RTC_STAT 0x10 +#define RTC_STAT_BIT BIT(0) +#define RTC_RSTAT 0x14 +#define RTC_EOI 0x18 +#define RTC_VER 0x1C + +struct xgene_rtc_dev { + struct rtc_device *rtc; + struct device *dev; + unsigned long alarm_time; + void __iomem *csr_base; + struct clk *clk; + unsigned int irq_wake; +}; + +static int xgene_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct xgene_rtc_dev *pdata = dev_get_drvdata(dev); + + rtc_time_to_tm(readl(pdata->csr_base + RTC_CCVR), tm); + return rtc_valid_tm(tm); +} + +static int xgene_rtc_set_mmss(struct device *dev, unsigned long secs) +{ + struct xgene_rtc_dev *pdata = dev_get_drvdata(dev); + + /* + * NOTE: After the following write, the RTC_CCVR is only reflected + * after the update cycle of 1 seconds. + */ + writel((u32) secs, pdata->csr_base + RTC_CLR); + readl(pdata->csr_base + RTC_CLR); /* Force a barrier */ + + return 0; +} + +static int xgene_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct xgene_rtc_dev *pdata = dev_get_drvdata(dev); + + rtc_time_to_tm(pdata->alarm_time, &alrm->time); + alrm->enabled = readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE; + + return 0; +} + +static int xgene_rtc_alarm_irq_enable(struct device *dev, u32 enabled) +{ + struct xgene_rtc_dev *pdata = dev_get_drvdata(dev); + u32 ccr; + + ccr = readl(pdata->csr_base + RTC_CCR); + if (enabled) { + ccr &= ~RTC_CCR_MASK; + ccr |= RTC_CCR_IE; + } else { + ccr &= ~RTC_CCR_IE; + ccr |= RTC_CCR_MASK; + } + writel(ccr, pdata->csr_base + RTC_CCR); + + return 0; +} + +static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct xgene_rtc_dev *pdata = dev_get_drvdata(dev); + unsigned long rtc_time; + unsigned long alarm_time; + + rtc_time = readl(pdata->csr_base + RTC_CCVR); + rtc_tm_to_time(&alrm->time, &alarm_time); + + pdata->alarm_time = alarm_time; + writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR); + + xgene_rtc_alarm_irq_enable(dev, alrm->enabled); + + return 0; +} + +static const struct rtc_class_ops xgene_rtc_ops = { + .read_time = xgene_rtc_read_time, + .set_mmss = xgene_rtc_set_mmss, + .read_alarm = xgene_rtc_read_alarm, + .set_alarm = xgene_rtc_set_alarm, + .alarm_irq_enable = xgene_rtc_alarm_irq_enable, +}; + +static irqreturn_t xgene_rtc_interrupt(int irq, void *id) +{ + struct xgene_rtc_dev *pdata = (struct xgene_rtc_dev *) id; + + /* Check if interrupt asserted */ + if (!(readl(pdata->csr_base + RTC_STAT) & RTC_STAT_BIT)) + return IRQ_NONE; + + /* Clear interrupt */ + readl(pdata->csr_base + RTC_EOI); + + rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF); + + return IRQ_HANDLED; +} + +static int xgene_rtc_probe(struct platform_device *pdev) +{ + struct xgene_rtc_dev *pdata; + struct resource *res; + int ret; + int irq; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + platform_set_drvdata(pdev, pdata); + pdata->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdata->csr_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdata->csr_base)) + return PTR_ERR(pdata->csr_base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + return irq; + } + ret = devm_request_irq(&pdev->dev, irq, xgene_rtc_interrupt, 0, + dev_name(&pdev->dev), pdata); + if (ret) { + dev_err(&pdev->dev, "Could not request IRQ\n"); + return ret; + } + + pdata->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pdata->clk)) { + dev_err(&pdev->dev, "Couldn't get the clock for RTC\n"); + return -ENODEV; + } + clk_prepare_enable(pdata->clk); + + /* Turn on the clock and the crystal */ + writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR); + + device_init_wakeup(&pdev->dev, 1); + + pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, + &xgene_rtc_ops, THIS_MODULE); + if (IS_ERR(pdata->rtc)) { + clk_disable_unprepare(pdata->clk); + return PTR_ERR(pdata->rtc); + } + + /* HW does not support update faster than 1 seconds */ + pdata->rtc->uie_unsupported = 1; + + return 0; +} + +static int xgene_rtc_remove(struct platform_device *pdev) +{ + struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev); + + xgene_rtc_alarm_irq_enable(&pdev->dev, 0); + device_init_wakeup(&pdev->dev, 0); + clk_disable_unprepare(pdata->clk); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int xgene_rtc_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev); + int irq; + + irq = platform_get_irq(pdev, 0); + if (device_may_wakeup(&pdev->dev)) { + if (!enable_irq_wake(irq)) + pdata->irq_wake = 1; + } else { + xgene_rtc_alarm_irq_enable(dev, 0); + clk_disable(pdata->clk); + } + + return 0; +} + +static int xgene_rtc_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev); + int irq; + + irq = platform_get_irq(pdev, 0); + if (device_may_wakeup(&pdev->dev)) { + if (pdata->irq_wake) { + disable_irq_wake(irq); + pdata->irq_wake = 0; + } + } else { + clk_enable(pdata->clk); + xgene_rtc_alarm_irq_enable(dev, 1); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(xgene_rtc_pm_ops, xgene_rtc_suspend, xgene_rtc_resume); + +#ifdef CONFIG_OF +static const struct of_device_id xgene_rtc_of_match[] = { + {.compatible = "apm,xgene-rtc" }, + { } +}; +MODULE_DEVICE_TABLE(of, xgene_rtc_of_match); +#endif + +static struct platform_driver xgene_rtc_driver = { + .probe = xgene_rtc_probe, + .remove = xgene_rtc_remove, + .driver = { + .owner = THIS_MODULE, + .name = "xgene-rtc", + .pm = &xgene_rtc_pm_ops, + .of_match_table = of_match_ptr(xgene_rtc_of_match), + }, +}; + +module_platform_driver(xgene_rtc_driver); + +MODULE_DESCRIPTION("APM X-Gene SoC RTC driver"); +MODULE_AUTHOR("Rameshwar Sahu "); +MODULE_LICENSE("GPL"); --- linux-3.13.0.orig/drivers/s390/char/con3215.c +++ linux-3.13.0/drivers/s390/char/con3215.c @@ -922,7 +922,7 @@ raw3215_freelist = req; } - cdev = ccw_device_probe_console(); + cdev = ccw_device_probe_console(&raw3215_ccw_driver); if (IS_ERR(cdev)) return -ENODEV; --- linux-3.13.0.orig/drivers/s390/char/con3270.c +++ linux-3.13.0/drivers/s390/char/con3270.c @@ -576,7 +576,6 @@ static int __init con3270_init(void) { - struct ccw_device *cdev; struct raw3270 *rp; void *cbuf; int i; @@ -591,10 +590,7 @@ cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); } - cdev = ccw_device_probe_console(); - if (IS_ERR(cdev)) - return -ENODEV; - rp = raw3270_setup_console(cdev); + rp = raw3270_setup_console(); if (IS_ERR(rp)) return PTR_ERR(rp); --- linux-3.13.0.orig/drivers/s390/char/raw3270.c +++ linux-3.13.0/drivers/s390/char/raw3270.c @@ -776,16 +776,24 @@ } #ifdef CONFIG_TN3270_CONSOLE +/* Tentative definition - see below for actual definition. */ +static struct ccw_driver raw3270_ccw_driver; + /* * Setup 3270 device configured as console. */ -struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) +struct raw3270 __init *raw3270_setup_console(void) { + struct ccw_device *cdev; unsigned long flags; struct raw3270 *rp; char *ascebc; int rc; + cdev = ccw_device_probe_console(&raw3270_ccw_driver); + if (IS_ERR(cdev)) + return ERR_CAST(cdev); + rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); ascebc = kzalloc(256, GFP_KERNEL); rc = raw3270_setup_device(cdev, rp, ascebc); --- linux-3.13.0.orig/drivers/s390/char/raw3270.h +++ linux-3.13.0/drivers/s390/char/raw3270.h @@ -190,7 +190,7 @@ wake_up(&raw3270_wait_queue); } -struct raw3270 *raw3270_setup_console(struct ccw_device *cdev); +struct raw3270 *raw3270_setup_console(void); void raw3270_wait_cons_dev(struct raw3270 *); /* Notifier for device addition/removal */ --- linux-3.13.0.orig/drivers/s390/cio/chsc.c +++ linux-3.13.0/drivers/s390/cio/chsc.c @@ -560,18 +560,27 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { - do { + static int ntsm_unsupported; + + while (true) { memset(sei, 0, sizeof(*sei)); sei->request.length = 0x0010; sei->request.code = 0x000e; - sei->ntsm = ntsm; + if (!ntsm_unsupported) + sei->ntsm = ntsm; if (chsc(sei)) break; if (sei->response.code != 0x0001) { - CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", - sei->response.code); + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", + sei->response.code, sei->ntsm); + + if (sei->response.code == 3 && sei->ntsm) { + /* Fallback for old firmware. */ + ntsm_unsupported = 1; + continue; + } break; } @@ -587,7 +596,10 @@ CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); break; } - } while (sei->u.nt0_area.flags & 0x80); + + if (!(sei->u.nt0_area.flags & 0x80)) + break; + } } /* --- linux-3.13.0.orig/drivers/s390/cio/device.c +++ linux-3.13.0/drivers/s390/cio/device.c @@ -1610,7 +1610,7 @@ return rc; } -struct ccw_device *ccw_device_probe_console(void) +struct ccw_device *ccw_device_probe_console(struct ccw_driver *drv) { struct io_subchannel_private *io_priv; struct ccw_device *cdev; @@ -1632,6 +1632,7 @@ kfree(io_priv); return cdev; } + cdev->drv = drv; set_io_private(sch, io_priv); ret = ccw_device_console_enable(cdev, sch); if (ret) { --- linux-3.13.0.orig/drivers/s390/crypto/ap_bus.c +++ linux-3.13.0/drivers/s390/crypto/ap_bus.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "ap_bus.h" @@ -71,7 +72,7 @@ MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ "Copyright IBM Corp. 2006, 2012"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("z90crypt"); +MODULE_ALIAS_CRYPTO("z90crypt"); /* * Module parameter --- linux-3.13.0.orig/drivers/sbus/char/bbc_envctrl.c +++ linux-3.13.0/drivers/sbus/char/bbc_envctrl.c @@ -452,6 +452,9 @@ if (!tp) return; + INIT_LIST_HEAD(&tp->bp_list); + INIT_LIST_HEAD(&tp->glob_list); + tp->client = bbc_i2c_attach(bp, op); if (!tp->client) { kfree(tp); @@ -497,6 +500,9 @@ if (!fp) return; + INIT_LIST_HEAD(&fp->bp_list); + INIT_LIST_HEAD(&fp->glob_list); + fp->client = bbc_i2c_attach(bp, op); if (!fp->client) { kfree(fp); --- linux-3.13.0.orig/drivers/sbus/char/bbc_i2c.c +++ linux-3.13.0/drivers/sbus/char/bbc_i2c.c @@ -301,13 +301,18 @@ if (!bp) return NULL; + INIT_LIST_HEAD(&bp->temps); + INIT_LIST_HEAD(&bp->fans); + bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); if (!bp->i2c_control_regs) goto fail; - bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); - if (!bp->i2c_bussel_reg) - goto fail; + if (op->num_resources == 2) { + bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); + if (!bp->i2c_bussel_reg) + goto fail; + } bp->waiting = 0; init_waitqueue_head(&bp->wq); --- linux-3.13.0.orig/drivers/scsi/NCR5380.c +++ linux-3.13.0/drivers/scsi/NCR5380.c @@ -2655,14 +2655,14 @@ * * Purpose : abort a command * - * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the - * host byte of the result field to, if zero DID_ABORTED is + * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the + * host byte of the result field to, if zero DID_ABORTED is * used. * - * Returns : 0 - success, -1 on failure. + * Returns : SUCCESS - success, FAILED on failure. * - * XXX - there is no way to abort the command that is currently - * connected, you have to wait for it to complete. If this is + * XXX - there is no way to abort the command that is currently + * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() * called where the loop started in NCR5380_main(). * @@ -2712,7 +2712,7 @@ * aborted flag and get back into our main loop. */ - return 0; + return SUCCESS; } #endif --- linux-3.13.0.orig/drivers/scsi/aha1740.c +++ linux-3.13.0/drivers/scsi/aha1740.c @@ -531,7 +531,7 @@ * quiet as possible... */ - return 0; + return SUCCESS; } static struct scsi_host_template aha1740_template = { --- linux-3.13.0.orig/drivers/scsi/atari_NCR5380.c +++ linux-3.13.0/drivers/scsi/atari_NCR5380.c @@ -2613,7 +2613,7 @@ * host byte of the result field to, if zero DID_ABORTED is * used. * - * Returns : 0 - success, -1 on failure. + * Returns : SUCCESS - success, FAILED on failure. * * XXX - there is no way to abort the command that is currently * connected, you have to wait for it to complete. If this is --- linux-3.13.0.orig/drivers/scsi/be2iscsi/be.h +++ linux-3.13.0/drivers/scsi/be2iscsi/be.h @@ -83,9 +83,20 @@ /*ISCSI */ +struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + bool enable; + u32 min_eqd; /* in usecs */ + u32 max_eqd; /* in usecs */ + u32 prev_eqd; /* in usecs */ + u32 et_eqd; /* configured val when aic is off */ + ulong jiffs; + u64 eq_prev; /* Used to calculate eqe */ +}; + struct be_eq_obj { bool todo_mcc_cq; bool todo_cq; + u32 cq_count; struct be_queue_info q; struct beiscsi_hba *phba; struct be_queue_info *cq; @@ -98,6 +109,14 @@ struct be_queue_info cq; }; +struct beiscsi_mcc_tag_state { +#define MCC_TAG_STATE_COMPLETED 0x00 +#define MCC_TAG_STATE_RUNNING 0x01 +#define MCC_TAG_STATE_TIMEOUT 0x02 + uint8_t tag_state; + struct be_dma_mem tag_mem_state; +}; + struct be_ctrl_info { u8 __iomem *csr; u8 __iomem *db; /* Door Bell */ @@ -122,6 +141,8 @@ unsigned short mcc_alloc_index; unsigned short mcc_free_index; unsigned int mcc_tag_available; + + struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1]; }; #include "be_cmds.h" @@ -129,6 +150,7 @@ #define PAGE_SHIFT_4K 12 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) #define mcc_timeout 120000 /* 12s timeout */ +#define BEISCSI_LOGOUT_SYNC_DELAY 250 /* Returns number of pages spanned by the data starting at the given addr */ #define PAGES_4K_SPANNED(_address, size) \ --- linux-3.13.0.orig/drivers/scsi/be2iscsi/be_cmds.c +++ linux-3.13.0/drivers/scsi/be2iscsi/be_cmds.c @@ -138,7 +138,7 @@ * @phba: Driver private structure * @tag: Tag for the MBX Command * @wrb: the WRB used for the MBX Command - * @cmd_hdr: IOCTL Hdr for the MBX Cmd + * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd * * Waits for MBX completion with the passed TAG. * @@ -148,21 +148,26 @@ **/ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint32_t tag, struct be_mcc_wrb **wrb, - void *cmd_hdr) + struct be_dma_mem *mbx_cmd_mem) { int rc = 0; uint32_t mcc_tag_response; uint16_t status = 0, addl_status = 0, wrb_num = 0; struct be_mcc_wrb *temp_wrb; - struct be_cmd_req_hdr *ioctl_hdr; - struct be_cmd_resp_hdr *ioctl_resp_hdr; + struct be_cmd_req_hdr *mbx_hdr; + struct be_cmd_resp_hdr *mbx_resp_hdr; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; if (beiscsi_error(phba)) { free_mcc_tag(&phba->ctrl, tag); - return -EIO; + return -EPERM; } + /* Set MBX Tag state to Active */ + spin_lock(&phba->ctrl.mbox_lock); + phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING; + spin_unlock(&phba->ctrl.mbox_lock); + /* wait for the mccq completion */ rc = wait_event_interruptible_timeout( phba->ctrl.mcc_wait[tag], @@ -171,56 +176,71 @@ BEISCSI_HOST_MBX_TIMEOUT)); if (rc <= 0) { + struct be_dma_mem *tag_mem; + /* Set MBX Tag state to timeout */ + spin_lock(&phba->ctrl.mbox_lock); + phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT; + spin_unlock(&phba->ctrl.mbox_lock); + + /* Store resource addr to be freed later */ + tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; + if (mbx_cmd_mem) { + tag_mem->size = mbx_cmd_mem->size; + tag_mem->va = mbx_cmd_mem->va; + tag_mem->dma = mbx_cmd_mem->dma; + } else + tag_mem->size = 0; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : MBX Cmd Completion timed out\n"); - rc = -EBUSY; - - /* decrement the mccq used count */ - atomic_dec(&phba->ctrl.mcc_obj.q.used); - - goto release_mcc_tag; - } else + return -EBUSY; + } else { rc = 0; + /* Set MBX Tag state to completed */ + spin_lock(&phba->ctrl.mbox_lock); + phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED; + spin_unlock(&phba->ctrl.mbox_lock); + } mcc_tag_response = phba->ctrl.mcc_numtag[tag]; status = (mcc_tag_response & CQE_STATUS_MASK); addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT); - if (cmd_hdr) { - ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr; + if (mbx_cmd_mem) { + mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va; } else { wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT; temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num); - ioctl_hdr = embedded_payload(temp_wrb); + mbx_hdr = embedded_payload(temp_wrb); if (wrb) *wrb = temp_wrb; } if (status || addl_status) { - beiscsi_log(phba, KERN_ERR, + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : MBX Cmd Failed for " "Subsys : %d Opcode : %d with " "Status : %d and Extd_Status : %d\n", - ioctl_hdr->subsystem, - ioctl_hdr->opcode, + mbx_hdr->subsystem, + mbx_hdr->opcode, status, addl_status); if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { - ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; + mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT | BEISCSI_LOG_EH | BEISCSI_LOG_CONFIG, "BC_%d : Insufficent Buffer Error " "Resp_Len : %d Actual_Resp_Len : %d\n", - ioctl_resp_hdr->response_length, - ioctl_resp_hdr->actual_resp_len); + mbx_resp_hdr->response_length, + mbx_resp_hdr->actual_resp_len); rc = -EAGAIN; goto release_mcc_tag; @@ -255,6 +275,19 @@ ASYNC_EVENT_CODE_LINK_STATE); } +static bool is_iscsi_evt(u32 trailer) +{ + return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & + ASYNC_TRAILER_EVENT_CODE_MASK) == + ASYNC_EVENT_CODE_ISCSI; +} + +static int iscsi_evt_type(u32 trailer) +{ + return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & + ASYNC_TRAILER_EVENT_TYPE_MASK; +} + static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) { if (compl->flags != 0) { @@ -319,6 +352,7 @@ int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl, struct be_mcc_compl *compl) { + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); u16 compl_status, extd_status; unsigned short tag; @@ -338,7 +372,32 @@ ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000); ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8; ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF); - wake_up_interruptible(&ctrl->mcc_wait[tag]); + + if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) { + wake_up_interruptible(&ctrl->mcc_wait[tag]); + } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) { + struct be_dma_mem *tag_mem; + tag_mem = &ctrl->ptag_state[tag].tag_mem_state; + + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | + BEISCSI_LOG_CONFIG, + "BC_%d : MBX Completion for timeout Command " + "from FW\n"); + /* Check if memory needs to be freed */ + if (tag_mem->size) + pci_free_consistent(ctrl->pdev, tag_mem->size, + tag_mem->va, tag_mem->dma); + + /* Change tag state */ + spin_lock(&phba->ctrl.mbox_lock); + ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED; + spin_unlock(&phba->ctrl.mbox_lock); + + /* Free MCC Tag */ + free_mcc_tag(ctrl, tag); + } + return 0; } @@ -354,8 +413,23 @@ return NULL; } -static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) +/** + * be2iscsi_fail_session(): Closing session with appropriate error + * @cls_session: ptr to session + * + * Depending on adapter state appropriate error flag is passed. + **/ +void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) { + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + uint32_t iscsi_err_flag; + + if (phba->state & BE_ADAPTER_STATE_SHUTDOWN) + iscsi_err_flag = ISCSI_ERR_INVALID_HOST; + else + iscsi_err_flag = ISCSI_ERR_CONN_FAILED; + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); } @@ -377,7 +451,7 @@ } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { - phba->state = BE_ADAPTER_LINK_UP; + phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT; beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, @@ -386,18 +460,6 @@ } } -static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm, - u16 num_popped) -{ - u32 val = 0; - val |= qid & DB_CQ_RING_ID_MASK; - if (arm) - val |= 1 << DB_CQ_REARM_SHIFT; - val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; - iowrite32(val, phba->db_va + DB_CQ_OFFSET); -} - - int beiscsi_process_mcc(struct beiscsi_hba *phba) { struct be_mcc_compl *compl; @@ -412,7 +474,28 @@ /* Interpret compl as a async link evt */ beiscsi_async_link_state_process(phba, (struct be_async_event_link_state *) compl); - else + else if (is_iscsi_evt(compl->flags)) { + switch (iscsi_evt_type(compl->flags)) { + case ASYNC_EVENT_NEW_ISCSI_TGT_DISC: + case ASYNC_EVENT_NEW_ISCSI_CONN: + case ASYNC_EVENT_NEW_TCP_CONN: + phba->state |= BE_ADAPTER_CHECK_BOOT; + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | + BEISCSI_LOG_MBOX, + "BC_%d : Async iscsi Event," + " flags handled = 0x%08x\n", + compl->flags); + break; + default: + beiscsi_log(phba, KERN_ERR, + BEISCSI_LOG_CONFIG | + BEISCSI_LOG_MBOX, + "BC_%d : Unsupported Async" + " Event, flags = 0x%08x\n", + compl->flags); + } + } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -428,7 +511,7 @@ } if (num) - beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num); + hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0); spin_unlock_bh(&phba->ctrl.mcc_cq_lock); return status; --- linux-3.13.0.orig/drivers/scsi/be2iscsi/be_cmds.h +++ linux-3.13.0/drivers/scsi/be2iscsi/be_cmds.h @@ -26,9 +26,9 @@ * The commands are serviced by the ARM processor in the OneConnect's MPU. */ struct be_sge { - u32 pa_lo; - u32 pa_hi; - u32 len; + __le32 pa_lo; + __le32 pa_hi; + __le32 len; }; #define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */ @@ -71,6 +71,7 @@ #define BEISCSI_FW_MBX_TIMEOUT 100 /* MBOX Command VER */ +#define MBX_CMD_VER1 0x01 #define MBX_CMD_VER2 0x02 struct be_mcc_compl { @@ -103,7 +104,7 @@ /********** MCC door bell ************/ #define DB_MCCQ_OFFSET 0x140 -#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */ +#define DB_MCCQ_RING_ID_MASK 0xFFFF /* bits 0 - 15 */ /* Number of entries posted */ #define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */ @@ -117,6 +118,14 @@ #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF #define ASYNC_EVENT_CODE_LINK_STATE 0x1 +#define ASYNC_EVENT_CODE_ISCSI 0x4 + +#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */ +#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xF +#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4 +#define ASYNC_EVENT_NEW_ISCSI_CONN 0x5 +#define ASYNC_EVENT_NEW_TCP_CONN 0x7 + struct be_async_event_trailer { u32 code; }; @@ -271,6 +280,12 @@ u16 rsvd0; /* sword */ } __packed; +struct be_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +} __packed; + struct mgmt_chap_format { u32 flags; u8 intr_chap_name[256]; @@ -617,12 +632,12 @@ /******************** Modify EQ Delay *******************/ struct be_cmd_req_modify_eq_delay { struct be_cmd_req_hdr hdr; - u32 num_eq; + __le32 num_eq; struct { - u32 eq_id; - u32 phase; - u32 delay_multiplier; - } delay[8]; + __le32 eq_id; + __le32 phase; + __le32 delay_multiplier; + } delay[MAX_CPUS]; } __packed; /******************** Get MAC ADDR *******************/ @@ -708,8 +723,11 @@ void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, + int num); int beiscsi_mccq_compl(struct beiscsi_hba *phba, - uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); + uint32_t tag, struct be_mcc_wrb **wrb, + struct be_dma_mem *mbx_cmd_mem); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); @@ -1004,6 +1022,26 @@ u8 rsvd0[3]; } __packed; +struct tcp_connect_and_offload_in_v1 { + struct be_cmd_req_hdr hdr; + struct ip_addr_format ip_address; + u16 tcp_port; + u16 cid; + u16 cq_id; + u16 defq_id; + struct phys_addr dataout_template_pa; + u16 hdr_ring_id; + u16 data_ring_id; + u8 do_offload; + u8 ifd_state; + u8 rsvd0[2]; + u16 tcp_window_size; + u8 tcp_window_scale_count; + u8 rsvd1; + u32 tcp_mss:24; + u8 rsvd2; +} __packed; + struct tcp_connect_and_offload_out { struct be_cmd_resp_hdr hdr; u32 connection_handle; @@ -1017,8 +1055,8 @@ int *users_final_status; } __packed; -#define DB_DEF_PDU_RING_ID_MASK 0x3FF /* bits 0 - 9 */ -#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 0 - 9 */ +#define DB_DEF_PDU_RING_ID_MASK 0x3FFF /* bits 0 - 13 */ +#define DB_DEF_PDU_CQPROC_MASK 0x3FFF /* bits 16 - 29 */ #define DB_DEF_PDU_REARM_SHIFT 14 #define DB_DEF_PDU_EVENT_SHIFT 15 #define DB_DEF_PDU_CQPROC_SHIFT 16 @@ -1317,4 +1355,5 @@ void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, u8 subsystem, u8 opcode, int cmd_len); +void be2iscsi_fail_session(struct iscsi_cls_session *cls_session); #endif /* !BEISCSI_CMDS_H */ --- linux-3.13.0.orig/drivers/scsi/be2iscsi/be_iscsi.c +++ linux-3.13.0/drivers/scsi/be2iscsi/be_iscsi.c @@ -541,10 +541,8 @@ ip_type = BE2_IPV6; len = mgmt_get_if_info(phba, ip_type, &if_info); - if (len) { - kfree(if_info); + if (len) return len; - } switch (param) { case ISCSI_NET_PARAM_IPV4_ADDR: @@ -569,7 +567,7 @@ break; case ISCSI_NET_PARAM_VLAN_ID: if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) - return -EINVAL; + len = -EINVAL; else len = sprintf(buf, "%d\n", (if_info->vlan_priority & @@ -577,7 +575,7 @@ break; case ISCSI_NET_PARAM_VLAN_PRIORITY: if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) - return -EINVAL; + len = -EINVAL; else len = sprintf(buf, "%d\n", ((if_info->vlan_priority >> 13) & @@ -1108,7 +1106,7 @@ struct beiscsi_hba *phba = beiscsi_ep->phba; struct tcp_connect_and_offload_out *ptcpcnct_out; struct be_dma_mem nonemb_cmd; - unsigned int tag; + unsigned int tag, req_memsize; int ret = -ENOMEM; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, @@ -1129,8 +1127,14 @@ (beiscsi_ep->ep_cid)] = ep; beiscsi_ep->cid_vld = 0; + + if (is_chip_be2_be3r(phba)) + req_memsize = sizeof(struct tcp_connect_and_offload_in); + else + req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); + nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, - sizeof(struct tcp_connect_and_offload_in), + req_memsize, &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { @@ -1141,7 +1145,7 @@ beiscsi_free_ep(beiscsi_ep); return -ENOMEM; } - nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); + nonemb_cmd.size = req_memsize; memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); if (tag <= 0) { @@ -1155,16 +1159,18 @@ return -EAGAIN; } - ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); + ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BS_%d : mgmt_open_connection Failed"); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); + if (ret != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + beiscsi_free_ep(beiscsi_ep); - return -EBUSY; + return ret; } ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; @@ -1268,6 +1274,31 @@ } /** + * beiscsi_flush_cq()- Flush the CQ created. + * @phba: ptr device priv structure. + * + * Before the connection resource are freed flush + * all the CQ enteries + **/ +static void beiscsi_flush_cq(struct beiscsi_hba *phba) +{ + uint16_t i; + struct be_eq_obj *pbe_eq; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + beiscsi_process_cq(pbe_eq); + blk_iopoll_enable(&pbe_eq->iopoll); + } +} + +/** * beiscsi_close_conn - Upload the connection * @ep: The iscsi endpoint * @flag: The type of connection closure @@ -1288,6 +1319,10 @@ } ret = beiscsi_mccq_compl(phba, tag, NULL, NULL); + + /* Flush the CQ entries */ + beiscsi_flush_cq(phba); + return ret; } @@ -1361,6 +1396,7 @@ beiscsi_mccq_compl(phba, tag, NULL, NULL); beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); free_ep: + msleep(BEISCSI_LOGOUT_SYNC_DELAY); beiscsi_free_ep(beiscsi_ep); beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); --- linux-3.13.0.orig/drivers/scsi/be2iscsi/be_main.c +++ linux-3.13.0/drivers/scsi/be2iscsi/be_main.c @@ -228,6 +228,7 @@ struct invalidate_command_table *inv_tbl; struct be_dma_mem nonemb_cmd; unsigned int cid, tag, num_invalidate; + int rc; cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; @@ -285,9 +286,11 @@ return FAILED; } - beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); + rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + if (rc != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); + return iscsi_eh_abort(sc); } @@ -303,6 +306,7 @@ struct invalidate_command_table *inv_tbl; struct be_dma_mem nonemb_cmd; unsigned int cid, tag, i, num_invalidate; + int rc; /* invalidate iocbs */ cls_session = starget_to_session(scsi_target(sc->device)); @@ -363,9 +367,10 @@ return FAILED; } - beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); - pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, - nonemb_cmd.va, nonemb_cmd.dma); + rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); + if (rc != -EBUSY) + pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, + nonemb_cmd.va, nonemb_cmd.dma); return iscsi_eh_device_reset(sc); } @@ -581,7 +586,6 @@ "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); return NULL; } - shost->dma_boundary = pcidev->dma_mask; shost->max_id = BE2_MAX_SESSIONS; shost->max_channel = 0; shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; @@ -594,15 +598,7 @@ pci_set_drvdata(pcidev, phba); phba->interface_handle = 0xFFFFFFFF; - if (iscsi_host_add(shost, &phba->pcidev->dev)) - goto free_devices; - return phba; - -free_devices: - pci_dev_put(phba->pcidev); - iscsi_host_free(phba->shost); - return NULL; } static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) @@ -674,8 +670,19 @@ } pci_set_master(pcidev); - if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { - ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); + if (ret) { + ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); + pci_disable_device(pcidev); + return ret; + } else { + ret = pci_set_consistent_dma_mask(pcidev, + DMA_BIT_MASK(32)); + } + } else { + ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret) { dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); pci_disable_device(pcidev); @@ -804,14 +811,23 @@ unsigned char rearm, unsigned char event) { u32 val = 0; - val |= id & DB_EQ_RING_ID_MASK; + if (rearm) val |= 1 << DB_EQ_REARM_SHIFT; if (clr_interrupt) val |= 1 << DB_EQ_CLR_SHIFT; if (event) val |= 1 << DB_EQ_EVNT_SHIFT; + val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; + /* Setting lower order EQ_ID Bits */ + val |= (id & DB_EQ_RING_ID_LOW_MASK); + + /* Setting Higher order EQ_ID Bits */ + val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & + DB_EQ_RING_ID_HIGH_MASK) + << DB_EQ_HIGH_SET_SHIFT); + iowrite32(val, phba->db_va + DB_EQ_OFFSET); } @@ -873,7 +889,6 @@ struct be_queue_info *cq; unsigned int num_eq_processed; struct be_eq_obj *pbe_eq; - unsigned long flags; pbe_eq = dev_id; eq = &pbe_eq->q; @@ -882,31 +897,15 @@ phba = pbe_eq->phba; num_eq_processed = 0; - if (blk_iopoll_enabled) { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) - blk_iopoll_sched(&pbe_eq->iopoll); - - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - num_eq_processed++; - } - } else { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - spin_lock_irqsave(&phba->isr_lock, flags); - pbe_eq->todo_cq = true; - spin_unlock_irqrestore(&phba->isr_lock, flags); - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - num_eq_processed++; - } + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) + blk_iopoll_sched(&pbe_eq->iopoll); - if (pbe_eq->todo_cq) - queue_work(phba->wq, &pbe_eq->work_cqs); + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + num_eq_processed++; } if (num_eq_processed) @@ -927,7 +926,6 @@ struct hwi_context_memory *phwi_context; struct be_eq_entry *eqe = NULL; struct be_queue_info *eq; - struct be_queue_info *cq; struct be_queue_info *mcc; unsigned long flags, index; unsigned int num_mcceq_processed, num_ioeq_processed; @@ -953,72 +951,40 @@ num_ioeq_processed = 0; num_mcceq_processed = 0; - if (blk_iopoll_enabled) { - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - if (((eqe->dw[offsetof(struct amap_eq_entry, - resource_id) / 32] & - EQE_RESID_MASK) >> 16) == mcc->id) { - spin_lock_irqsave(&phba->isr_lock, flags); - pbe_eq->todo_mcc_cq = true; - spin_unlock_irqrestore(&phba->isr_lock, flags); - num_mcceq_processed++; - } else { - if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) - blk_iopoll_sched(&pbe_eq->iopoll); - num_ioeq_processed++; - } - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); - } - if (num_ioeq_processed || num_mcceq_processed) { - if (pbe_eq->todo_mcc_cq) - queue_work(phba->wq, &pbe_eq->work_cqs); - - if ((num_mcceq_processed) && (!num_ioeq_processed)) - hwi_ring_eq_db(phba, eq->id, 0, - (num_ioeq_processed + - num_mcceq_processed) , 1, 1); - else - hwi_ring_eq_db(phba, eq->id, 0, - (num_ioeq_processed + - num_mcceq_processed), 0, 1); - - return IRQ_HANDLED; - } else - return IRQ_NONE; - } else { - cq = &phwi_context->be_cq[0]; - while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] - & EQE_VALID_MASK) { - - if (((eqe->dw[offsetof(struct amap_eq_entry, - resource_id) / 32] & - EQE_RESID_MASK) >> 16) != cq->id) { - spin_lock_irqsave(&phba->isr_lock, flags); - pbe_eq->todo_mcc_cq = true; - spin_unlock_irqrestore(&phba->isr_lock, flags); - } else { - spin_lock_irqsave(&phba->isr_lock, flags); - pbe_eq->todo_cq = true; - spin_unlock_irqrestore(&phba->isr_lock, flags); - } - AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); - queue_tail_inc(eq); - eqe = queue_tail_node(eq); + while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] + & EQE_VALID_MASK) { + if (((eqe->dw[offsetof(struct amap_eq_entry, + resource_id) / 32] & + EQE_RESID_MASK) >> 16) == mcc->id) { + spin_lock_irqsave(&phba->isr_lock, flags); + pbe_eq->todo_mcc_cq = true; + spin_unlock_irqrestore(&phba->isr_lock, flags); + num_mcceq_processed++; + } else { + if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) + blk_iopoll_sched(&pbe_eq->iopoll); num_ioeq_processed++; } - if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq) + AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); + queue_tail_inc(eq); + eqe = queue_tail_node(eq); + } + if (num_ioeq_processed || num_mcceq_processed) { + if (pbe_eq->todo_mcc_cq) queue_work(phba->wq, &pbe_eq->work_cqs); - if (num_ioeq_processed) { + if ((num_mcceq_processed) && (!num_ioeq_processed)) hwi_ring_eq_db(phba, eq->id, 0, - num_ioeq_processed, 1, 1); - return IRQ_HANDLED; - } else - return IRQ_NONE; - } + (num_ioeq_processed + + num_mcceq_processed) , 1, 1); + else + hwi_ring_eq_db(phba, eq->id, 0, + (num_ioeq_processed + + num_mcceq_processed), 0, 1); + + return IRQ_HANDLED; + } else + return IRQ_NONE; } static int beiscsi_init_irqs(struct beiscsi_hba *phba) @@ -1093,15 +1059,25 @@ return ret; } -static void hwi_ring_cq_db(struct beiscsi_hba *phba, +void hwi_ring_cq_db(struct beiscsi_hba *phba, unsigned int id, unsigned int num_processed, unsigned char rearm, unsigned char event) { u32 val = 0; - val |= id & DB_CQ_RING_ID_MASK; + if (rearm) val |= 1 << DB_CQ_REARM_SHIFT; + val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; + + /* Setting lower order CQ_ID Bits */ + val |= (id & DB_CQ_RING_ID_LOW_MASK); + + /* Setting Higher order CQ_ID Bits */ + val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & + DB_CQ_RING_ID_HIGH_MASK) + << DB_CQ_HIGH_SET_SHIFT); + iowrite32(val, phba->db_va + DB_CQ_OFFSET); } @@ -1342,8 +1318,10 @@ resid = csol_cqe->res_cnt; if (!task->sc) { - if (io_task->scsi_cmnd) + if (io_task->scsi_cmnd) { scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; + } return; } @@ -1380,6 +1358,7 @@ conn->rxdata_octets += resid; unmap: scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); } @@ -2088,7 +2067,7 @@ * return * Number of Completion Entries processed. **/ -static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) +unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) { struct be_queue_info *cq; struct sol_cqe *sol; @@ -2130,6 +2109,18 @@ cri_index = BE_GET_CRI_FROM_CID(cid); ep = phba->ep_array[cri_index]; + + if (ep == NULL) { + /* connection has already been freed + * just move on to next one + */ + beiscsi_log(phba, KERN_WARNING, + BEISCSI_LOG_INIT, + "BM_%d : proc cqe of disconn ep: cid %d\n", + cid); + goto proc_next_cqe; + } + beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; @@ -2239,6 +2230,7 @@ break; } +proc_next_cqe: AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); queue_tail_inc(cq); sol = queue_tail_node(cq); @@ -2291,6 +2283,7 @@ pbe_eq = container_of(iop, struct be_eq_obj, iopoll); ret = beiscsi_process_cq(pbe_eq); + pbe_eq->cq_count += ret; if (ret < budget) { phba = pbe_eq->phba; blk_iopoll_complete(iop); @@ -3704,7 +3697,7 @@ struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct hwi_async_pdu_context *pasync_ctx; - int i, eq_num, ulp_num; + int i, eq_for_mcc, ulp_num; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; @@ -3741,16 +3734,17 @@ if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); } + + be_mcc_queues_destroy(phba); if (phba->msix_enabled) - eq_num = 1; + eq_for_mcc = 1; else - eq_num = 0; - for (i = 0; i < (phba->num_cpus + eq_num); i++) { + eq_for_mcc = 0; + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { q = &phwi_context->be_eq[i].q; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } - be_mcc_queues_destroy(phba); be_cmd_fw_uninit(ctrl); } @@ -3845,9 +3839,9 @@ phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - phwi_context->max_eqd = 0; + phwi_context->max_eqd = 128; phwi_context->min_eqd = 0; - phwi_context->cur_eqd = 64; + phwi_context->cur_eqd = 0; be_cmd_fw_initialize(&phba->ctrl); status = beiscsi_create_eqs(phba, phwi_context); @@ -4216,6 +4210,8 @@ kfree(phba->ep_array); phba->ep_array = NULL; ret = -ENOMEM; + + goto free_memory; } for (i = 0; i < phba->params.cxns_per_ctrl; i++) { @@ -4360,12 +4356,16 @@ goto boot_freemem; } - ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); + ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BM_%d : beiscsi_get_session_info Failed"); - goto boot_freemem; + + if (ret != -EBUSY) + goto boot_freemem; + else + return ret; } session_resp = nonemb_cmd.va ; @@ -4391,6 +4391,10 @@ { struct iscsi_boot_kobj *boot_kobj; + /* it has been created previously */ + if (phba->boot_kset) + return 0; + /* get boot info using mgmt cmd */ if (beiscsi_get_boot_info(phba)) /* Try to see if we can carry on without this */ @@ -4625,6 +4629,11 @@ spin_unlock(&phba->io_sgl_lock); io_task->psgl_handle = NULL; } + + if (io_task->scsi_cmnd) { + scsi_dma_unmap(io_task->scsi_cmnd); + io_task->scsi_cmnd = NULL; + } } else { if (!beiscsi_conn->login_in_progress) beiscsi_free_mgmt_task_handles(beiscsi_conn, task); @@ -5215,12 +5224,12 @@ free_irq(phba->pcidev->irq, phba); } pci_disable_msix(phba->pcidev); + cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); - if (blk_iopoll_enabled) - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); - } + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + } if (unload_state == BEISCSI_CLEAN_UNLOAD) { destroy_workqueue(phba->wq); @@ -5237,7 +5246,6 @@ hwi_cleanup(phba); } - cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); } static void beiscsi_remove(struct pci_dev *pcidev) @@ -5273,6 +5281,8 @@ return; } + phba->state = BE_ADAPTER_STATE_SHUTDOWN; + iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); pci_disable_device(pcidev); } @@ -5292,6 +5302,65 @@ return; } +static void be_eqd_update(struct beiscsi_hba *phba) +{ + struct be_set_eqd set_eqd[MAX_CPUS]; + struct be_aic_obj *aic; + struct be_eq_obj *pbe_eq; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + int eqd, i, num = 0; + ulong now; + u32 pps, delta; + unsigned int tag; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i <= phba->num_cpus; i++) { + aic = &phba->aic_obj[i]; + pbe_eq = &phwi_context->be_eq[i]; + now = jiffies; + if (!aic->jiffs || time_before(now, aic->jiffs) || + pbe_eq->cq_count < aic->eq_prev) { + aic->jiffs = now; + aic->eq_prev = pbe_eq->cq_count; + continue; + } + delta = jiffies_to_msecs(now - aic->jiffs); + pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); + eqd = (pps / 1500) << 2; + + if (eqd < 8) + eqd = 0; + eqd = min_t(u32, eqd, phwi_context->max_eqd); + eqd = max_t(u32, eqd, phwi_context->min_eqd); + + aic->jiffs = now; + aic->eq_prev = pbe_eq->cq_count; + + if (eqd != aic->prev_eqd) { + set_eqd[num].delay_multiplier = (eqd * 65)/100; + set_eqd[num].eq_id = pbe_eq->q.id; + aic->prev_eqd = eqd; + num++; + } + } + if (num) { + tag = be_cmd_modify_eq_delay(phba, set_eqd, num); + if (tag) + beiscsi_mccq_compl(phba, tag, NULL, NULL); + } +} + +static void be_check_boot_session(struct beiscsi_hba *phba) +{ + if (beiscsi_setup_boot_info(phba)) + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Could not set up " + "iSCSI boot info on async event.\n"); +} + /* * beiscsi_hw_health_check()- Check adapter health * @work: work item to check HW health @@ -5305,6 +5374,13 @@ container_of(work, struct beiscsi_hba, beiscsi_hw_check_task.work); + be_eqd_update(phba); + + if (phba->state & BE_ADAPTER_CHECK_BOOT) { + phba->state &= ~BE_ADAPTER_CHECK_BOOT; + be_check_boot_session(phba); + } + beiscsi_ue_detect(phba); schedule_delayed_work(&phba->beiscsi_hw_check_task, @@ -5429,32 +5505,18 @@ phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - if (blk_iopoll_enabled) { - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, - be_iopoll); - blk_iopoll_enable(&pbe_eq->iopoll); - } - - i = (phba->msix_enabled) ? i : 0; - /* Work item for MCC handling */ + for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; - INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); - } else { - if (phba->msix_enabled) { - for (i = 0; i <= phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - INIT_WORK(&pbe_eq->work_cqs, - beiscsi_process_all_cqs); - } - } else { - pbe_eq = &phwi_context->be_eq[0]; - INIT_WORK(&pbe_eq->work_cqs, - beiscsi_process_all_cqs); - } + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + be_iopoll); + blk_iopoll_enable(&pbe_eq->iopoll); } + i = (phba->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); + ret = beiscsi_init_irqs(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -5594,6 +5656,8 @@ phba->ctrl.mcc_tag[i] = i + 1; phba->ctrl.mcc_numtag[i + 1] = 0; phba->ctrl.mcc_tag_available++; + memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, + sizeof(struct be_dma_mem)); } phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; @@ -5614,32 +5678,18 @@ phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - if (blk_iopoll_enabled) { - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, - be_iopoll); - blk_iopoll_enable(&pbe_eq->iopoll); - } - - i = (phba->msix_enabled) ? i : 0; - /* Work item for MCC handling */ + for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; - INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); - } else { - if (phba->msix_enabled) { - for (i = 0; i <= phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - INIT_WORK(&pbe_eq->work_cqs, - beiscsi_process_all_cqs); - } - } else { - pbe_eq = &phwi_context->be_eq[0]; - INIT_WORK(&pbe_eq->work_cqs, - beiscsi_process_all_cqs); - } + blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, + be_iopoll); + blk_iopoll_enable(&pbe_eq->iopoll); } + i = (phba->msix_enabled) ? i : 0; + /* Work item for MCC handling */ + pbe_eq = &phwi_context->be_eq[i]; + INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); + ret = beiscsi_init_irqs(phba); if (ret < 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -5649,6 +5699,9 @@ } hwi_enable_intr(phba); + if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) + goto free_blkenbld; + if (beiscsi_setup_boot_info(phba)) /* * log error but continue, because we may not be using @@ -5668,11 +5721,10 @@ free_blkenbld: destroy_workqueue(phba->wq); - if (blk_iopoll_enabled) - for (i = 0; i < phba->num_cpus; i++) { - pbe_eq = &phwi_context->be_eq[i]; - blk_iopoll_disable(&pbe_eq->iopoll); - } + for (i = 0; i < phba->num_cpus; i++) { + pbe_eq = &phwi_context->be_eq[i]; + blk_iopoll_disable(&pbe_eq->iopoll); + } free_twq: beiscsi_clean_port(phba); beiscsi_free_mem(phba); @@ -5685,9 +5737,9 @@ hba_free: if (phba->msix_enabled) pci_disable_msix(phba->pcidev); - iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); + pci_set_drvdata(pcidev, NULL); disable_pci: pci_disable_device(pcidev); return ret; --- linux-3.13.0.orig/drivers/scsi/be2iscsi/be_main.h +++ linux-3.13.0/drivers/scsi/be2iscsi/be_main.h @@ -71,8 +71,8 @@ #define BEISCSI_SGLIST_ELEMENTS 30 -#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ -#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ +#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ +#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */ #define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ @@ -97,9 +97,15 @@ #define INVALID_SESS_HANDLE 0xFFFFFFFF +/** + * Adapter States + **/ #define BE_ADAPTER_LINK_UP 0x001 #define BE_ADAPTER_LINK_DOWN 0x002 #define BE_ADAPTER_PCI_ERR 0x004 +#define BE_ADAPTER_STATE_SHUTDOWN 0x008 +#define BE_ADAPTER_CHECK_BOOT 0x010 + #define BEISCSI_CLEAN_UNLOAD 0x01 #define BEISCSI_EEH_UNLOAD 0x02 @@ -135,11 +141,15 @@ #define DB_RXULP0_OFFSET 0xA0 /********* Event Q door bell *************/ #define DB_EQ_OFFSET DB_CQ_OFFSET -#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ +#define DB_EQ_RING_ID_LOW_MASK 0x1FF /* bits 0 - 8 */ /* Clear the interrupt for this eq */ #define DB_EQ_CLR_SHIFT (9) /* bit 9 */ /* Must be 1 */ #define DB_EQ_EVNT_SHIFT (10) /* bit 10 */ +/* Higher Order EQ_ID bit */ +#define DB_EQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */ +#define DB_EQ_HIGH_SET_SHIFT 11 +#define DB_EQ_HIGH_FEILD_SHIFT 9 /* Number of event entries processed */ #define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ /* Rearm bit */ @@ -147,7 +157,12 @@ /********* Compl Q door bell *************/ #define DB_CQ_OFFSET 0x120 -#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ +#define DB_CQ_RING_ID_LOW_MASK 0x3FF /* bits 0 - 9 */ +/* Higher Order CQ_ID bit */ +#define DB_CQ_RING_ID_HIGH_MASK 0x1F /* bits 11 - 15 */ +#define DB_CQ_HIGH_SET_SHIFT 11 +#define DB_CQ_HIGH_FEILD_SHIFT 10 + /* Number of event entries processed */ #define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ /* Rearm bit */ @@ -413,6 +428,7 @@ struct mgmt_session_info boot_sess; struct invalidate_command_table inv_tbl[128]; + struct be_aic_obj aic_obj[MAX_CPUS]; unsigned int attr_log_enable; int (*iotask_fn)(struct iscsi_task *, struct scatterlist *sg, @@ -821,6 +837,12 @@ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, struct iscsi_task *task); +void hwi_ring_cq_db(struct beiscsi_hba *phba, + unsigned int id, unsigned int num_processed, + unsigned char rearm, unsigned char event); + +unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq); + static inline bool beiscsi_error(struct beiscsi_hba *phba) { return phba->ue_detected || phba->fw_timeout; --- linux-3.13.0.orig/drivers/scsi/be2iscsi/be_mgmt.c +++ linux-3.13.0/drivers/scsi/be2iscsi/be_mgmt.c @@ -155,6 +155,43 @@ } } +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, + struct be_set_eqd *set_eqd, int num) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_req_modify_eq_delay *req; + unsigned int tag = 0; + int i; + + spin_lock(&ctrl->mbox_lock); + tag = alloc_mcc_tag(phba); + if (!tag) { + spin_unlock(&ctrl->mbox_lock); + return tag; + } + + wrb = wrb_from_mccq(phba); + req = embedded_payload(wrb); + + wrb->tag0 |= tag; + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); + + req->num_eq = cpu_to_le32(num); + for (i = 0; i < num; i++) { + req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); + req->delay[i].phase = 0; + req->delay[i].delay_multiplier = + cpu_to_le32(set_eqd[i].delay_multiplier); + } + + be_mcc_notify(phba); + spin_unlock(&ctrl->mbox_lock); + return tag; +} + /** * mgmt_reopen_session()- Reopen a session based on reopen_type * @phba: Device priv structure instance @@ -447,8 +484,8 @@ struct be_dma_mem *nonemb_cmd) { struct be_cmd_resp_hdr *resp; - struct be_mcc_wrb *wrb = wrb_from_mccq(phba); - struct be_sge *mcc_sge = nonembedded_sgl(wrb); + struct be_mcc_wrb *wrb; + struct be_sge *mcc_sge; unsigned int tag = 0; struct iscsi_bsg_request *bsg_req = job->request; struct be_bsg_vendor_cmd *req = nonemb_cmd->va; @@ -465,7 +502,6 @@ req->sector = sector; req->offset = offset; spin_lock(&ctrl->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case BEISCSI_WRITE_FLASH: @@ -495,6 +531,8 @@ return tag; } + wrb = wrb_from_mccq(phba); + mcc_sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, job->request_payload.sg_cnt); mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); @@ -525,7 +563,6 @@ int status = 0; spin_lock(&ctrl->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, @@ -675,7 +712,7 @@ struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; - struct tcp_connect_and_offload_in *req; + struct tcp_connect_and_offload_in_v1 *req; unsigned short def_hdr_id; unsigned short def_data_id; struct phys_addr template_address = { 0, 0 }; @@ -702,17 +739,16 @@ return tag; } wrb = wrb_from_mccq(phba); - memset(wrb, 0, sizeof(*wrb)); sge = nonembedded_sgl(wrb); req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); wrb->tag0 |= tag; - be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); + be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, - sizeof(*req)); + nonemb_cmd->size); if (dst_addr->sa_family == PF_INET) { __be32 s_addr = daddr_in->sin_addr.s_addr; req->ip_address.ip_type = BE2_IPV4; @@ -758,6 +794,13 @@ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); + + if (!is_chip_be2_be3r(phba)) { + req->hdr.version = MBX_CMD_VER1; + req->tcp_window_size = 0; + req->tcp_window_scale_count = 2; + } + be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; @@ -804,7 +847,7 @@ int resp_buf_len) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mccq(phba); + struct be_mcc_wrb *wrb; struct be_sge *sge; unsigned int tag; int rc = 0; @@ -816,7 +859,8 @@ rc = -ENOMEM; goto free_cmd; } - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mccq(phba); wrb->tag0 |= tag; sge = nonembedded_sgl(wrb); @@ -828,22 +872,25 @@ be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); - rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va); + rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd); + + if (resp_buf) + memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); + if (rc) { - /* Check if the IOCTL needs to be re-issued */ + /* Check if the MBX Cmd needs to be re-issued */ if (rc == -EAGAIN) return rc; - beiscsi_log(phba, KERN_ERR, + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, "BG_%d : mgmt_exec_nonemb_cmd Failed status\n"); - goto free_cmd; + if (rc != -EBUSY) + goto free_cmd; + else + return rc; } - - if (resp_buf) - memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); - free_cmd: pci_free_consistent(ctrl->pdev, nonemb_cmd->size, nonemb_cmd->va, nonemb_cmd->dma); @@ -897,17 +944,20 @@ if (ip_action == IP_ACTION_ADD) { memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value, - ip_param->len); + sizeof(req->ip_params.ip_record.ip_addr.addr)); if (subnet_param) memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, - subnet_param->value, subnet_param->len); + subnet_param->value, + sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); } else { memcpy(req->ip_params.ip_record.ip_addr.addr, - if_info->ip_addr.addr, ip_param->len); + if_info->ip_addr.addr, + sizeof(req->ip_params.ip_record.ip_addr.addr)); memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, - if_info->ip_addr.subnet_mask, ip_param->len); + if_info->ip_addr.subnet_mask, + sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); } rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); @@ -935,7 +985,7 @@ req->action = gtway_action; req->ip_addr.ip_type = BE2_IPV4; - memcpy(req->ip_addr.addr, gt_addr, param_len); + memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr)); return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); } @@ -961,16 +1011,14 @@ BE2_IPV6 : BE2_IPV4 ; rc = mgmt_get_if_info(phba, ip_type, &if_info); - if (rc) { - kfree(if_info); + if (rc) return rc; - } if (boot_proto == ISCSI_BOOTPROTO_DHCP) { if (if_info->dhcp_state) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : DHCP Already Enabled\n"); - return 0; + goto exit; } /* The ip_param->len is 1 in DHCP case. Setting proper IP len as this it is used while @@ -988,7 +1036,7 @@ sizeof(*reldhcp)); if (rc) - return rc; + goto exit; reldhcp = nonemb_cmd.va; reldhcp->interface_hndl = phba->interface_handle; @@ -999,7 +1047,7 @@ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Delete existing dhcp\n"); - return rc; + goto exit; } } } @@ -1009,7 +1057,7 @@ rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL, IP_ACTION_DEL); if (rc) - return rc; + goto exit; } /* Delete the Gateway settings if mode change is to DHCP */ @@ -1019,7 +1067,7 @@ if (rc) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to Get Gateway Addr\n"); - return rc; + goto exit; } if (gtway_addr_set.ip_addr.addr[0]) { @@ -1031,7 +1079,7 @@ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : Failed to clear Gateway Addr Set\n"); - return rc; + goto exit; } } } @@ -1042,7 +1090,7 @@ OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR, sizeof(*dhcpreq)); if (rc) - return rc; + goto exit; dhcpreq = nonemb_cmd.va; dhcpreq->flags = BLOCKING; @@ -1050,12 +1098,14 @@ dhcpreq->interface_hndl = phba->interface_handle; dhcpreq->ip_type = BE2_DHCP_V4; - return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); + rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); } else { - return mgmt_static_ip_modify(phba, if_info, ip_param, + rc = mgmt_static_ip_modify(phba, if_info, ip_param, subnet_param, IP_ACTION_ADD); } +exit: + kfree(if_info); return rc; } @@ -1348,7 +1398,6 @@ { int rc; unsigned int tag; - struct be_mcc_wrb *wrb = NULL; tag = be_cmd_set_vlan(phba, vlan_tag); if (!tag) { @@ -1358,7 +1407,7 @@ return -EBUSY; } - rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + rc = beiscsi_mccq_compl(phba, tag, NULL, NULL); if (rc) { beiscsi_log(phba, KERN_ERR, (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), --- linux-3.13.0.orig/drivers/scsi/be2iscsi/be_mgmt.h +++ linux-3.13.0/drivers/scsi/be2iscsi/be_mgmt.h @@ -335,5 +335,7 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle); void beiscsi_ue_detect(struct beiscsi_hba *phba); +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, + struct be_set_eqd *, int num); #endif --- linux-3.13.0.orig/drivers/scsi/bfa/bfa_ioc.h +++ linux-3.13.0/drivers/scsi/bfa/bfa_ioc.h @@ -72,7 +72,7 @@ } while (0) #define bfa_swap_words(_x) ( \ - ((_x) << 32) | ((_x) >> 32)) + ((u64)(_x) << 32) | ((u64)(_x) >> 32)) #ifdef __BIG_ENDIAN #define bfa_sge_to_be(_x) --- linux-3.13.0.orig/drivers/scsi/bfa/bfad.c +++ linux-3.13.0/drivers/scsi/bfa/bfad.c @@ -1802,7 +1802,7 @@ static u32 * bfad_load_fwimg(struct pci_dev *pdev) { - if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { + if (bfa_asic_id_ct2(pdev->device)) { if (bfi_image_ct2_size == 0) bfad_read_firmware(pdev, &bfi_image_ct2, &bfi_image_ct2_size, BFAD_FW_FILE_CT2); @@ -1812,12 +1812,14 @@ bfad_read_firmware(pdev, &bfi_image_ct, &bfi_image_ct_size, BFAD_FW_FILE_CT); return bfi_image_ct; - } else { + } else if (bfa_asic_id_cb(pdev->device)) { if (bfi_image_cb_size == 0) bfad_read_firmware(pdev, &bfi_image_cb, &bfi_image_cb_size, BFAD_FW_FILE_CB); return bfi_image_cb; } + + return NULL; } static void --- linux-3.13.0.orig/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ linux-3.13.0/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -411,6 +411,7 @@ struct fc_frame_header *fh; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; + struct sk_buff *tmp_skb; unsigned short oxid; interface = container_of(ptype, struct bnx2fc_interface, @@ -423,6 +424,12 @@ goto err; } + tmp_skb = skb_share_check(skb, GFP_ATOMIC); + if (!tmp_skb) + goto err; + + skb = tmp_skb; + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); goto err; --- linux-3.13.0.orig/drivers/scsi/esas2r/esas2r_main.c +++ linux-3.13.0/drivers/scsi/esas2r/esas2r_main.c @@ -1057,7 +1057,7 @@ cmd->scsi_done(cmd); - return 0; + return SUCCESS; } spin_lock_irqsave(&a->queue_lock, flags); --- linux-3.13.0.orig/drivers/scsi/hpsa.c +++ linux-3.13.0/drivers/scsi/hpsa.c @@ -117,9 +117,15 @@ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} @@ -163,9 +169,15 @@ {0x21C3103C, "Smart Array", &SA5_access}, {0x21C4103C, "Smart Array", &SA5_access}, {0x21C5103C, "Smart Array", &SA5_access}, + {0x21C6103C, "Smart Array", &SA5_access}, {0x21C7103C, "Smart Array", &SA5_access}, {0x21C8103C, "Smart Array", &SA5_access}, {0x21C9103C, "Smart Array", &SA5_access}, + {0x21CA103C, "Smart Array", &SA5_access}, + {0x21CB103C, "Smart Array", &SA5_access}, + {0x21CC103C, "Smart Array", &SA5_access}, + {0x21CD103C, "Smart Array", &SA5_access}, + {0x21CE103C, "Smart Array", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; @@ -3139,7 +3151,7 @@ } if (ioc->Request.Type.Direction == XFER_WRITE) { if (copy_from_user(buff[sg_used], data_ptr, sz)) { - status = -ENOMEM; + status = -EFAULT; goto cleanup1; } } else --- linux-3.13.0.orig/drivers/scsi/ibmvscsi/ibmvscsi.c +++ linux-3.13.0/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -185,6 +185,11 @@ if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + rmb(); } else crq = NULL; spin_unlock_irqrestore(&queue->lock, flags); @@ -203,6 +208,11 @@ { struct vio_dev *vdev = to_vio_dev(hostdata->dev); + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the VIOS to prevent it from fetching any stale data. + */ + mb(); return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } @@ -797,7 +807,8 @@ evt->hostdata->dev); if (evt->cmnd_done) evt->cmnd_done(evt->cmnd); - } else if (evt->done) + } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && + evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ) evt->done(evt); free_event_struct(&evt->hostdata->pool, evt); spin_lock_irqsave(hostdata->host->host_lock, flags); --- linux-3.13.0.orig/drivers/scsi/ipr.c +++ linux-3.13.0/drivers/scsi/ipr.c @@ -683,6 +683,7 @@ ipr_reinit_ipr_cmnd(ipr_cmd); ipr_cmd->u.scratch = 0; ipr_cmd->sibling = NULL; + ipr_cmd->eh_comp = NULL; ipr_cmd->fast_done = fast_done; init_timer(&ipr_cmd->timer); } @@ -848,6 +849,8 @@ scsi_dma_unmap(ipr_cmd->scsi_cmd); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } @@ -3630,16 +3633,14 @@ return strlen(buf); } - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); } spin_lock_irqsave(shost->host_lock, lock_flags); ioa_cfg->iopoll_weight = user_iopoll_weight; - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) { blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, ioa_cfg->iopoll_weight, ipr_iopoll); @@ -4805,6 +4806,84 @@ return rc; } +/** + * ipr_match_lun - Match function for specified LUN + * @ipr_cmd: ipr command struct + * @device: device to match (sdev) + * + * Returns: + * 1 if command matches sdev / 0 if command does not match sdev + **/ +static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) +{ + if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device) + return 1; + return 0; +} + +/** + * ipr_wait_for_ops - Wait for matching commands to complete + * @ipr_cmd: ipr command struct + * @device: device to match (sdev) + * @match: match function to use + * + * Returns: + * SUCCESS / FAILED + **/ +static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, + int (*match)(struct ipr_cmnd *, void *)) +{ + struct ipr_cmnd *ipr_cmd; + int wait; + unsigned long flags; + struct ipr_hrr_queue *hrrq; + signed long timeout = IPR_ABORT_TASK_TIMEOUT; + DECLARE_COMPLETION_ONSTACK(comp); + + ENTER; + do { + wait = 0; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock_irqsave(hrrq->lock, flags); + list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = ∁ + wait++; + } + } + spin_unlock_irqrestore(hrrq->lock, flags); + } + + if (wait) { + timeout = wait_for_completion_timeout(&comp, timeout); + + if (!timeout) { + wait = 0; + + for_each_hrrq(hrrq, ioa_cfg) { + spin_lock_irqsave(hrrq->lock, flags); + list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = NULL; + wait++; + } + } + spin_unlock_irqrestore(hrrq->lock, flags); + } + + if (wait) + dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n"); + LEAVE; + return wait ? FAILED : SUCCESS; + } + } + } while (wait); + + LEAVE; + return SUCCESS; +} + static int ipr_eh_host_reset(struct scsi_cmnd *cmd) { struct ipr_ioa_cfg *ioa_cfg; @@ -5023,11 +5102,17 @@ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) { int rc; + struct ipr_ioa_cfg *ioa_cfg; + + ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irq(cmd->device->host->host_lock); rc = __ipr_eh_dev_reset(cmd); spin_unlock_irq(cmd->device->host->host_lock); + if (rc == SUCCESS) + rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); + return rc; } @@ -5205,13 +5290,18 @@ { unsigned long flags; int rc; + struct ipr_ioa_cfg *ioa_cfg; ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags); rc = ipr_cancel_op(scsi_cmd); spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags); + if (rc == SUCCESS) + rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun); LEAVE; return rc; } @@ -5484,8 +5574,7 @@ return IRQ_NONE; } - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == hrrq->toggle_bit) { if (!blk_iopoll_sched_prep(&hrrq->iopoll)) @@ -9859,8 +9948,7 @@ ioa_cfg->host->max_channel = IPR_VSET_BUS; ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { for (i = 1; i < ioa_cfg->hrrq_num; i++) { blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, ioa_cfg->iopoll_weight, ipr_iopoll); @@ -9889,8 +9977,7 @@ int i; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && - ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { + if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { ioa_cfg->iopoll_weight = 0; for (i = 1; i < ioa_cfg->hrrq_num; i++) blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); --- linux-3.13.0.orig/drivers/scsi/ipr.h +++ linux-3.13.0/drivers/scsi/ipr.h @@ -1585,6 +1585,7 @@ struct scsi_device *sdev; } u; + struct completion *eh_comp; struct ipr_hrr_queue *hrrq; struct ipr_ioa_cfg *ioa_cfg; }; --- linux-3.13.0.orig/drivers/scsi/isci/host.h +++ linux-3.13.0/drivers/scsi/isci/host.h @@ -311,9 +311,8 @@ } #define for_each_isci_host(id, ihost, pdev) \ - for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ - id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ - ihost = to_pci_info(pdev)->hosts[++id]) + for (id = 0; id < SCI_MAX_CONTROLLERS && \ + (ihost = to_pci_info(pdev)->hosts[id]); id++) static inline void wait_for_start(struct isci_host *ihost) { --- linux-3.13.0.orig/drivers/scsi/isci/port_config.c +++ linux-3.13.0/drivers/scsi/isci/port_config.c @@ -615,13 +615,6 @@ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); } else { /* the phy is already the part of the port */ - u32 port_state = iport->sm.current_state_id; - - /* if the PORT'S state is resetting then the link up is from - * port hard reset in this case, we need to tell the port - * that link up is recieved - */ - BUG_ON(port_state != SCI_PORT_RESETTING); port_agent->phy_ready_mask |= 1 << phy_index; sci_port_link_up(iport, iphy); } --- linux-3.13.0.orig/drivers/scsi/isci/task.c +++ linux-3.13.0/drivers/scsi/isci/task.c @@ -801,7 +801,7 @@ /* XXX: need to cleanup any ireqs targeting this * domain_device */ - ret = TMF_RESP_FUNC_COMPLETE; + ret = -ENODEV; goto out; } --- linux-3.13.0.orig/drivers/scsi/libiscsi.c +++ linux-3.13.0/drivers/scsi/libiscsi.c @@ -717,11 +717,21 @@ return NULL; } + if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) { + iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN); + return NULL; + } + task = conn->login_task; } else { if (session->state != ISCSI_STATE_LOGGED_IN) return NULL; + if (data_size != 0) { + iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode); + return NULL; + } + BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); --- linux-3.13.0.orig/drivers/scsi/libsas/sas_discover.c +++ linux-3.13.0/drivers/scsi/libsas/sas_discover.c @@ -500,6 +500,7 @@ struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; struct sas_ha_struct *ha = port->ha; + struct domain_device *ddev = port->port_dev; /* prevent revalidation from finding sata links in recovery */ mutex_lock(&ha->disco_mutex); @@ -514,8 +515,9 @@ SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, task_pid_nr(current)); - if (port->port_dev) - res = sas_ex_revalidate_domain(port->port_dev); + if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || + ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) + res = sas_ex_revalidate_domain(ddev); SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", port->id, task_pid_nr(current), res); --- linux-3.13.0.orig/drivers/scsi/megaraid.c +++ linux-3.13.0/drivers/scsi/megaraid.c @@ -1967,7 +1967,7 @@ cmd->device->id, cmd->device->lun); if(list_empty(&adapter->pending_list)) - return FALSE; + return FAILED; list_for_each_safe(pos, next, &adapter->pending_list) { @@ -1990,7 +1990,7 @@ (aor==SCB_ABORT) ? "ABORTING":"RESET", scb->idx); - return FALSE; + return FAILED; } else { @@ -2015,12 +2015,12 @@ list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); - return TRUE; + return SUCCESS; } } } - return FALSE; + return FAILED; } static inline int --- linux-3.13.0.orig/drivers/scsi/megaraid/megaraid_sas.h +++ linux-3.13.0/drivers/scsi/megaraid/megaraid_sas.h @@ -1527,7 +1527,7 @@ u32 *reply_queue; dma_addr_t reply_queue_h; - unsigned long base_addr; + resource_size_t base_addr; struct megasas_register_set __iomem *reg_set; u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; --- linux-3.13.0.orig/drivers/scsi/megaraid/megaraid_sas_base.c +++ linux-3.13.0/drivers/scsi/megaraid/megaraid_sas_base.c @@ -953,7 +953,7 @@ cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; - cmd->cmd_status = 0xFF; + cmd->cmd_status = ENODATA; instance->instancet->issue_dcmd(instance, cmd); --- linux-3.13.0.orig/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ linux-3.13.0/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -92,6 +92,8 @@ { struct megasas_register_set __iomem *regs; regs = instance->reg_set; + + instance->mask_interrupts = 0; /* For Thunderbolt/Invader also clear intr on enable */ writel(~0, ®s->outbound_intr_status); readl(®s->outbound_intr_status); @@ -100,7 +102,6 @@ /* Dummy readl to force pci flush */ readl(®s->outbound_intr_mask); - instance->mask_interrupts = 0; } /** --- linux-3.13.0.orig/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ linux-3.13.0/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -8293,7 +8293,6 @@ mpt2sas_base_free_resources(ioc); pci_save_state(pdev); - pci_disable_device(pdev); pci_set_power_state(pdev, device_state); return 0; } --- linux-3.13.0.orig/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ linux-3.13.0/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -1006,12 +1006,9 @@ &mpt2sas_phy->remote_identify); _transport_add_phy_to_an_existing_port(ioc, sas_node, mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); - } else { + } else memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct sas_identify)); - _transport_del_phy_from_an_existing_port(ioc, sas_node, - mpt2sas_phy); - } if (mpt2sas_phy->phy) mpt2sas_phy->phy->negotiated_linkrate = --- linux-3.13.0.orig/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ linux-3.13.0/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -1003,12 +1003,9 @@ &mpt3sas_phy->remote_identify); _transport_add_phy_to_an_existing_port(ioc, sas_node, mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); - } else { + } else memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct sas_identify)); - _transport_del_phy_from_an_existing_port(ioc, sas_node, - mpt3sas_phy); - } if (mpt3sas_phy->phy) mpt3sas_phy->phy->negotiated_linkrate = --- linux-3.13.0.orig/drivers/scsi/qla2xxx/qla_def.h +++ linux-3.13.0/drivers/scsi/qla2xxx/qla_def.h @@ -2993,8 +2993,7 @@ IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ IS_QLA8044(ha)) #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) -#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ - IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) +#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) --- linux-3.13.0.orig/drivers/scsi/qla2xxx/qla_isr.c +++ linux-3.13.0/drivers/scsi/qla2xxx/qla_isr.c @@ -2829,6 +2829,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) { #define MIN_MSIX_COUNT 2 +#define ATIO_VECTOR 2 int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; @@ -2885,34 +2886,47 @@ } /* Enable MSI-X vectors for the base queue */ - for (i = 0; i < ha->msix_count; i++) { + for (i = 0; i < 2; i++) { qentry = &ha->msix_entries[i]; - if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { - ret = request_irq(qentry->vector, - qla83xx_msix_entries[i].handler, - 0, qla83xx_msix_entries[i].name, rsp); - } else if (IS_P3P_TYPE(ha)) { + if (IS_P3P_TYPE(ha)) ret = request_irq(qentry->vector, qla82xx_msix_entries[i].handler, 0, qla82xx_msix_entries[i].name, rsp); - } else { + else ret = request_irq(qentry->vector, msix_entries[i].handler, 0, msix_entries[i].name, rsp); - } - if (ret) { - ql_log(ql_log_fatal, vha, 0x00cb, - "MSI-X: unable to register handler -- %x/%d.\n", - qentry->vector, ret); - qla24xx_disable_msix(ha); - ha->mqenable = 0; - goto msix_out; - } + if (ret) + goto msix_register_fail; qentry->have_irq = 1; qentry->rsp = rsp; rsp->msix = qentry; } + /* + * If target mode is enable, also request the vector for the ATIO + * queue. + */ + if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { + qentry = &ha->msix_entries[ATIO_VECTOR]; + ret = request_irq(qentry->vector, + qla83xx_msix_entries[ATIO_VECTOR].handler, + 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); + qentry->have_irq = 1; + qentry->rsp = rsp; + rsp->msix = qentry; + } + +msix_register_fail: + if (ret) { + ql_log(ql_log_fatal, vha, 0x00cb, + "MSI-X: unable to register handler -- %x/%d.\n", + qentry->vector, ret); + qla24xx_disable_msix(ha); + ha->mqenable = 0; + goto msix_out; + } + /* Enable MSI-X vector for response queue update for queue 0 */ if (IS_QLA83XX(ha)) { if (ha->msixbase && ha->mqiobase && --- linux-3.13.0.orig/drivers/scsi/qla2xxx/qla_target.c +++ linux-3.13.0/drivers/scsi/qla2xxx/qla_target.c @@ -1361,12 +1361,10 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, uint32_t req_cnt) { - struct qla_hw_data *ha = vha->hw; - device_reg_t __iomem *reg = ha->iobase; uint32_t cnt; if (vha->req->cnt < (req_cnt + 2)) { - cnt = (uint16_t)RD_REG_DWORD(®->isp24.req_q_out); + cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out); ql_dbg(ql_dbg_tgt, vha, 0xe00a, "Request ring circled: cnt=%d, vha->->ring_index=%d, " @@ -3186,7 +3184,8 @@ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c, "SRR cmd %p (se_cmd %p, tag %d, op %x), " "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, - se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); + se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, + cmd->sg_cnt, cmd->offset); qlt_handle_srr(vha, sctio, imm); --- linux-3.13.0.orig/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ linux-3.13.0/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -740,7 +740,16 @@ pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); - WARN_ON(node && (node != se_nacl)); + if (WARN_ON(node && (node != se_nacl))) { + /* + * The nacl no longer matches what we think it should be. + * Most likely a new dynamic acl has been added while + * someone dropped the hardware lock. It clearly is a + * bug elsewhere, but this bit can't make things worse. + */ + btree_insert32(&lport->lport_fcport_map, nacl->nport_id, + node, GFP_ATOMIC); + } pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", se_nacl, nacl->nport_wwnn, nacl->nport_id); @@ -1445,7 +1454,7 @@ /* * Finally register the new FC Nexus with TCM */ - __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); + transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess); return 0; } --- linux-3.13.0.orig/drivers/scsi/qla4xxx/ql4_os.c +++ linux-3.13.0/drivers/scsi/qla4xxx/ql4_os.c @@ -802,6 +802,7 @@ int type; int rem = len; int rc = 0; + int size; memset(&chap_rec, 0, sizeof(chap_rec)); @@ -816,12 +817,14 @@ chap_rec.chap_type = param_info->value[0]; break; case ISCSI_CHAP_PARAM_USERNAME: - memcpy(chap_rec.username, param_info->value, - param_info->len); + size = min_t(size_t, sizeof(chap_rec.username), + param_info->len); + memcpy(chap_rec.username, param_info->value, size); break; case ISCSI_CHAP_PARAM_PASSWORD: - memcpy(chap_rec.password, param_info->value, - param_info->len); + size = min_t(size_t, sizeof(chap_rec.password), + param_info->len); + memcpy(chap_rec.password, param_info->value, size); break; case ISCSI_CHAP_PARAM_PASSWORD_LEN: chap_rec.password_length = param_info->value[0]; --- linux-3.13.0.orig/drivers/scsi/scsi_devinfo.c +++ linux-3.13.0/drivers/scsi/scsi_devinfo.c @@ -202,6 +202,7 @@ {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, {"INSITE", "I325VM", NULL, BLIST_KEY}, + {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, @@ -210,6 +211,7 @@ {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN}, {"MegaRAID", "LD", NULL, BLIST_FORCELUN}, {"MICROP", "4110", NULL, BLIST_NOTQ}, + {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC}, {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2}, {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN}, {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, @@ -222,6 +224,7 @@ {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, {"Promise", "", NULL, BLIST_SPARSELUN}, {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, --- linux-3.13.0.orig/drivers/scsi/scsi_error.c +++ linux-3.13.0/drivers/scsi/scsi_error.c @@ -161,7 +161,7 @@ else if (host->hostt->eh_timed_out) rtn = host->hostt->eh_timed_out(scmd); - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); if (unlikely(rtn == BLK_EH_NOT_HANDLED && !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) @@ -1860,8 +1860,10 @@ * is no point trying to lock the door of an off-line device. */ shost_for_each_device(sdev, shost) { - if (scsi_device_online(sdev) && sdev->locked) + if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) { scsi_eh_lock_door(sdev); + sdev->was_reset = 0; + } } /* --- linux-3.13.0.orig/drivers/scsi/scsi_lib.c +++ linux-3.13.0/drivers/scsi/scsi_lib.c @@ -831,6 +831,14 @@ scsi_next_command(cmd); return; } + } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { + /* + * Certain non BLOCK_PC requests are commands that don't + * actually transfer anything (FLUSH), so cannot use + * good_bytes != blk_rq_bytes(req) as the signal for an error. + * This sets the error explicitly for the problem case. + */ + error = __scsi_error_from_host_byte(cmd, result); } /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ @@ -1250,9 +1258,11 @@ "rejecting I/O to dead device\n"); ret = BLKPREP_KILL; break; - case SDEV_QUIESCE: case SDEV_BLOCK: case SDEV_CREATED_BLOCK: + ret = BLKPREP_DEFER; + break; + case SDEV_QUIESCE: /* * If the devices is blocked we defer normal commands. */ @@ -1684,7 +1694,7 @@ host_dev = scsi_get_device(shost); if (host_dev && host_dev->dma_mask) - bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT; + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; return bounce_limit; } --- linux-3.13.0.orig/drivers/scsi/scsi_netlink.c +++ linux-3.13.0/drivers/scsi/scsi_netlink.c @@ -77,7 +77,7 @@ goto next_msg; } - if (!capable(CAP_SYS_ADMIN)) { + if (!netlink_capable(skb, CAP_SYS_ADMIN)) { err = -EPERM; goto next_msg; } --- linux-3.13.0.orig/drivers/scsi/scsi_scan.c +++ linux-3.13.0/drivers/scsi/scsi_scan.c @@ -320,6 +320,7 @@ struct Scsi_Host *shost = dev_to_shost(dev->parent); unsigned long flags; + starget->state = STARGET_DEL; transport_destroy_device(dev); spin_lock_irqsave(shost->host_lock, flags); if (shost->hostt->target_destroy) @@ -371,6 +372,37 @@ } /** + * scsi_target_reap_ref_release - remove target from visibility + * @kref: the reap_ref in the target being released + * + * Called on last put of reap_ref, which is the indication that no device + * under this target is visible anymore, so render the target invisible in + * sysfs. Note: we have to be in user context here because the target reaps + * should be done in places where the scsi device visibility is being removed. + */ +static void scsi_target_reap_ref_release(struct kref *kref) +{ + struct scsi_target *starget + = container_of(kref, struct scsi_target, reap_ref); + + /* + * if we get here and the target is still in the CREATED state that + * means it was allocated but never made visible (because a scan + * turned up no LUNs), so don't call device_del() on it. + */ + if (starget->state != STARGET_CREATED) { + transport_remove_device(&starget->dev); + device_del(&starget->dev); + } + scsi_target_destroy(starget); +} + +static void scsi_target_reap_ref_put(struct scsi_target *starget) +{ + kref_put(&starget->reap_ref, scsi_target_reap_ref_release); +} + +/** * scsi_alloc_target - allocate a new or find an existing target * @parent: parent of the target (need not be a scsi host) * @channel: target channel number (zero if no channels) @@ -392,7 +424,7 @@ + shost->transportt->target_size; struct scsi_target *starget; struct scsi_target *found_target; - int error; + int error, ref_got; starget = kzalloc(size, GFP_KERNEL); if (!starget) { @@ -401,7 +433,7 @@ } dev = &starget->dev; device_initialize(dev); - starget->reap_ref = 1; + kref_init(&starget->reap_ref); dev->parent = get_device(parent); dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); dev->bus = &scsi_bus_type; @@ -441,29 +473,36 @@ return starget; found: - found_target->reap_ref++; + /* + * release routine already fired if kref is zero, so if we can still + * take the reference, the target must be alive. If we can't, it must + * be dying and we need to wait for a new target + */ + ref_got = kref_get_unless_zero(&found_target->reap_ref); + spin_unlock_irqrestore(shost->host_lock, flags); - if (found_target->state != STARGET_DEL) { + if (ref_got) { put_device(dev); return found_target; } - /* Unfortunately, we found a dying target; need to - * wait until it's dead before we can get a new one */ + /* + * Unfortunately, we found a dying target; need to wait until it's + * dead before we can get a new one. There is an anomaly here. We + * *should* call scsi_target_reap() to balance the kref_get() of the + * reap_ref above. However, since the target being released, it's + * already invisible and the reap_ref is irrelevant. If we call + * scsi_target_reap() we might spuriously do another device_del() on + * an already invisible target. + */ put_device(&found_target->dev); - flush_scheduled_work(); + /* + * length of time is irrelevant here, we just want to yield the CPU + * for a tick to avoid busy waiting for the target to die. + */ + msleep(1); goto retry; } -static void scsi_target_reap_usercontext(struct work_struct *work) -{ - struct scsi_target *starget = - container_of(work, struct scsi_target, ew.work); - - transport_remove_device(&starget->dev); - device_del(&starget->dev); - scsi_target_destroy(starget); -} - /** * scsi_target_reap - check to see if target is in use and destroy if not * @starget: target to be checked @@ -474,28 +513,13 @@ */ void scsi_target_reap(struct scsi_target *starget) { - struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); - unsigned long flags; - enum scsi_target_state state; - int empty = 0; - - spin_lock_irqsave(shost->host_lock, flags); - state = starget->state; - if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { - empty = 1; - starget->state = STARGET_DEL; - } - spin_unlock_irqrestore(shost->host_lock, flags); - - if (!empty) - return; - - BUG_ON(state == STARGET_DEL); - if (state == STARGET_CREATED) - scsi_target_destroy(starget); - else - execute_in_process_context(scsi_target_reap_usercontext, - &starget->ew); + /* + * serious problem if this triggers: STARGET_DEL is only set in the if + * the reap_ref drops to zero, so we're trying to do another final put + * on an already released kref + */ + BUG_ON(starget->state == STARGET_DEL); + scsi_target_reap_ref_put(starget); } /** @@ -898,6 +922,12 @@ if (*bflags & BLIST_USE_10_BYTE_MS) sdev->use_10_for_ms = 1; + /* some devices don't like REPORT SUPPORTED OPERATION CODES + * and will simply timeout causing sd_mod init to take a very + * very long time */ + if (*bflags & BLIST_NO_RSOC) + sdev->no_report_opcodes = 1; + /* set the device running here so that slave configure * may do I/O */ ret = scsi_device_set_state(sdev, SDEV_RUNNING); @@ -926,7 +956,9 @@ sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; - if (*bflags & BLIST_SKIP_VPD_PAGES) + if (*bflags & BLIST_TRY_VPD_PAGES) + sdev->try_vpd_pages = 1; + else if (*bflags & BLIST_SKIP_VPD_PAGES) sdev->skip_vpd_pages = 1; transport_configure_device(&sdev->sdev_gendev); @@ -1532,6 +1564,10 @@ } mutex_unlock(&shost->scan_mutex); scsi_autopm_put_target(starget); + /* + * paired with scsi_alloc_target(). Target will be destroyed unless + * scsi_probe_and_add_lun made an underlying device visible + */ scsi_target_reap(starget); put_device(&starget->dev); @@ -1612,8 +1648,10 @@ out_reap: scsi_autopm_put_target(starget); - /* now determine if the target has any children at all - * and if not, nuke it */ + /* + * paired with scsi_alloc_target(): determine if the target has + * any children at all and if not, nuke it + */ scsi_target_reap(starget); put_device(&starget->dev); --- linux-3.13.0.orig/drivers/scsi/scsi_sysfs.c +++ linux-3.13.0/drivers/scsi/scsi_sysfs.c @@ -369,17 +369,14 @@ { struct scsi_device *sdev; struct device *parent; - struct scsi_target *starget; struct list_head *this, *tmp; unsigned long flags; sdev = container_of(work, struct scsi_device, ew.work); parent = sdev->sdev_gendev.parent; - starget = to_scsi_target(parent); spin_lock_irqsave(sdev->host->host_lock, flags); - starget->reap_ref++; list_del(&sdev->siblings); list_del(&sdev->same_target_siblings); list_del(&sdev->starved_entry); @@ -399,8 +396,6 @@ /* NULL queue means the device can't be used */ sdev->request_queue = NULL; - scsi_target_reap(scsi_target(sdev)); - kfree(sdev->inquiry); kfree(sdev); @@ -1057,6 +1052,13 @@ sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(dev); + /* + * Paired with the kref_get() in scsi_sysfs_initialize(). We have + * remoed sysfs visibility from the device, so make the target + * invisible if this was the last device underneath it. + */ + scsi_target_reap(scsi_target(sdev)); + put_device(dev); } @@ -1119,7 +1121,7 @@ continue; if (starget->dev.parent == dev || &starget->dev == dev) { /* assuming new targets arrive at the end */ - starget->reap_ref++; + kref_get(&starget->reap_ref); spin_unlock_irqrestore(shost->host_lock, flags); if (last) scsi_target_reap(last); @@ -1203,6 +1205,12 @@ list_add_tail(&sdev->same_target_siblings, &starget->devices); list_add_tail(&sdev->siblings, &shost->__devices); spin_unlock_irqrestore(shost->host_lock, flags); + /* + * device can now only be removed via __scsi_remove_device() so hold + * the target. Target will be held in CREATED state until something + * beneath it becomes visible (in which case it moves to RUNNING) + */ + kref_get(&starget->reap_ref); } int scsi_is_sdev_device(const struct device *dev) --- linux-3.13.0.orig/drivers/scsi/scsi_transport_sas.c +++ linux-3.13.0/drivers/scsi/scsi_transport_sas.c @@ -1621,8 +1621,6 @@ list_del(&rphy->list); mutex_unlock(&sas_host->lock); - sas_bsg_remove(shost, rphy); - transport_destroy_device(dev); put_device(dev); @@ -1681,6 +1679,7 @@ } sas_rphy_unlink(rphy); + sas_bsg_remove(NULL, rphy); transport_remove_device(dev); device_del(dev); } --- linux-3.13.0.orig/drivers/scsi/sd.c +++ linux-3.13.0/drivers/scsi/sd.c @@ -830,7 +830,7 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) { - rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER; + rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; rq->retries = SD_MAX_RETRIES; rq->cmd[0] = SYNCHRONIZE_CACHE; rq->cmd_len = 10; @@ -1463,8 +1463,8 @@ sd_print_sense_hdr(sdkp, &sshdr); /* we need to evaluate the error return */ if (scsi_sense_valid(&sshdr) && - /* 0x3a is medium not present */ - sshdr.asc == 0x3a) + (sshdr.asc == 0x3a || /* medium not present */ + sshdr.asc == 0x20)) /* invalid command */ /* this is no error here */ return 0; @@ -2454,7 +2454,10 @@ } sdkp->DPOFUA = (data.device_specific & 0x10) != 0; - if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + if (sdp->broken_fua) { + sd_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); + sdkp->DPOFUA = 0; + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { sd_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; @@ -2688,6 +2691,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp) { + /* Attempt VPD inquiry if the device blacklist explicitly calls + * for it. + */ + if (sdp->try_vpd_pages) + return 1; /* * Although VPD inquiries can go to SCSI-2 type devices, * some USB ones crash on receiving them, and the pages --- linux-3.13.0.orig/drivers/scsi/sg.c +++ linux-3.13.0/drivers/scsi/sg.c @@ -522,7 +522,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) { sg_io_hdr_t *hp = &srp->header; - int err = 0; + int err = 0, err2; int len; if (count < SZ_SG_IO_HDR) { @@ -551,8 +551,8 @@ goto err_out; } err_out: - err = sg_finish_rem_req(srp); - return (0 == err) ? count : err; + err2 = sg_finish_rem_req(srp); + return err ? : err2 ? : count; } static ssize_t --- linux-3.13.0.orig/drivers/scsi/storvsc_drv.c +++ linux-3.13.0/drivers/scsi/storvsc_drv.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -326,21 +327,23 @@ */ static int storvsc_timeout = 180; +static int msft_blist_flags = BLIST_TRY_VPD_PAGES; + #define STORVSC_MAX_IO_REQUESTS 200 static void storvsc_on_channel_callback(void *context); -/* - * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In - * reality, the path/target is not used (ie always set to 0) so our - * scsi host adapter essentially has 1 bus with 1 target that contains - * up to 256 luns. - */ -#define STORVSC_MAX_LUNS_PER_TARGET 64 -#define STORVSC_MAX_TARGETS 1 -#define STORVSC_MAX_CHANNELS 1 - - +#define STORVSC_MAX_LUNS_PER_TARGET 255 +#define STORVSC_MAX_TARGETS 2 +#define STORVSC_MAX_CHANNELS 8 + +#define STORVSC_FC_MAX_LUNS_PER_TARGET 255 +#define STORVSC_FC_MAX_TARGETS 128 +#define STORVSC_FC_MAX_CHANNELS 8 + +#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 +#define STORVSC_IDE_MAX_TARGETS 1 +#define STORVSC_IDE_MAX_CHANNELS 1 struct storvsc_cmd_request { struct list_head entry; @@ -1017,6 +1020,13 @@ case ATA_12: set_host_byte(scmnd, DID_PASSTHROUGH); break; + /* + * On Some Windows hosts TEST_UNIT_READY command can return + * SRB_STATUS_ERROR, let the upper level code deal with it + * based on the sense information. + */ + case TEST_UNIT_READY: + break; default: set_host_byte(scmnd, DID_TARGET_FAILURE); } @@ -1419,6 +1429,9 @@ { struct stor_mem_pools *memp = sdevice->hostdata; + if (!memp) + return; + mempool_destroy(memp->request_mempool); kmem_cache_destroy(memp->request_pool); kfree(memp); @@ -1438,6 +1451,27 @@ sdevice->no_write_same = 1; + /* + * Add blist flags to permit the reading of the VPD pages even when + * the target may claim SPC-2 compliance. MSFT targets currently + * claim SPC-2 compliance while they implement post SPC-2 features. + * With this patch we can correctly handle WRITE_SAME_16 issues. + */ + sdevice->sdev_bflags |= msft_blist_flags; + + /* + * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 + * if the device is a MSFT virtual device. + */ + if (!strncmp(sdevice->vendor, "Msft", 4)) { + switch (vmbus_proto_version) { + case VERSION_WIN8: + case VERSION_WIN8_1: + sdevice->scsi_level = SCSI_SPC_3; + break; + } + } + return 0; } @@ -1515,6 +1549,16 @@ return SUCCESS; } +/* + * The host guarantees to respond to each command, although I/O latencies might + * be unbounded on Azure. Reset the timer unconditionally to give the host a + * chance to perform EH. + */ +static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) +{ + return BLK_EH_RESET_TIMER; +} + static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) { bool allowed = true; @@ -1550,9 +1594,19 @@ struct vmscsi_request *vm_srb; struct stor_mem_pools *memp = scmnd->device->hostdata; - if (!storvsc_scsi_cmd_ok(scmnd)) { - scmnd->scsi_done(scmnd); - return 0; + if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { + /* + * On legacy hosts filter unimplemented commands. + * Future hosts are expected to correctly handle + * unsupported commands. Furthermore, it is + * possible that some of the currently + * unsupported commands maybe supported in + * future versions of the host. + */ + if (!storvsc_scsi_cmd_ok(scmnd)) { + scmnd->scsi_done(scmnd); + return 0; + } } request_size = sizeof(struct storvsc_cmd_request); @@ -1577,26 +1631,24 @@ vm_srb = &cmd_request->vstor_packet.vm_srb; vm_srb->win8_extension.time_out_value = 60; + vm_srb->win8_extension.srb_flags |= + (SRB_FLAGS_QUEUE_ACTION_ENABLE | + SRB_FLAGS_DISABLE_SYNCH_TRANSFER); /* Build the SRB */ switch (scmnd->sc_data_direction) { case DMA_TO_DEVICE: vm_srb->data_in = WRITE_TYPE; vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT; - vm_srb->win8_extension.srb_flags |= - (SRB_FLAGS_QUEUE_ACTION_ENABLE | - SRB_FLAGS_DISABLE_SYNCH_TRANSFER); break; case DMA_FROM_DEVICE: vm_srb->data_in = READ_TYPE; vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; - vm_srb->win8_extension.srb_flags |= - (SRB_FLAGS_QUEUE_ACTION_ENABLE | - SRB_FLAGS_DISABLE_SYNCH_TRANSFER); break; default: vm_srb->data_in = UNKNOWN_TYPE; - vm_srb->win8_extension.srb_flags = 0; + vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN | + SRB_FLAGS_DATA_OUT); break; } @@ -1661,13 +1713,12 @@ if (ret == -EAGAIN) { /* no more space */ - if (cmd_request->bounce_sgl_count) { + if (cmd_request->bounce_sgl_count) destroy_bounce_buffer(cmd_request->bounce_sgl, cmd_request->bounce_sgl_count); - ret = SCSI_MLQUEUE_DEVICE_BUSY; - goto queue_error; - } + ret = SCSI_MLQUEUE_DEVICE_BUSY; + goto queue_error; } return 0; @@ -1684,11 +1735,11 @@ .bios_param = storvsc_get_chs, .queuecommand = storvsc_queuecommand, .eh_host_reset_handler = storvsc_host_reset_handler, + .eh_timed_out = storvsc_eh_timed_out, .slave_alloc = storvsc_device_alloc, .slave_destroy = storvsc_device_destroy, .slave_configure = storvsc_device_configure, - .cmd_per_lun = 1, - /* 64 max_queue * 1 target */ + .cmd_per_lun = 255, .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, .this_id = -1, /* no use setting to 0 since ll_blk_rw reset it to 1 */ @@ -1740,19 +1791,25 @@ * set state to properly communicate with the host. */ - if (vmbus_proto_version == VERSION_WIN8) { - sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; - vmscsi_size_delta = 0; - vmstor_current_major = VMSTOR_WIN8_MAJOR; - vmstor_current_minor = VMSTOR_WIN8_MINOR; - } else { + switch (vmbus_proto_version) { + case VERSION_WS2008: + case VERSION_WIN7: sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); vmstor_current_major = VMSTOR_WIN7_MAJOR; vmstor_current_minor = VMSTOR_WIN7_MINOR; + break; + default: + sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE; + vmscsi_size_delta = 0; + vmstor_current_major = VMSTOR_WIN8_MAJOR; + vmstor_current_minor = VMSTOR_WIN8_MINOR; + break; } - + if (dev_id->driver_data == SFC_GUID) + scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS * + STORVSC_FC_MAX_TARGETS); host = scsi_host_alloc(&scsi_driver, sizeof(struct hv_host_device)); if (!host) @@ -1786,12 +1843,25 @@ host_dev->path = stor_device->path_id; host_dev->target = stor_device->target_id; - /* max # of devices per target */ - host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; - /* max # of targets per channel */ - host->max_id = STORVSC_MAX_TARGETS; - /* max # of channels */ - host->max_channel = STORVSC_MAX_CHANNELS - 1; + switch (dev_id->driver_data) { + case SFC_GUID: + host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET; + host->max_id = STORVSC_FC_MAX_TARGETS; + host->max_channel = STORVSC_FC_MAX_CHANNELS - 1; + break; + + case SCSI_GUID: + host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; + host->max_id = STORVSC_MAX_TARGETS; + host->max_channel = STORVSC_MAX_CHANNELS - 1; + break; + + default: + host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET; + host->max_id = STORVSC_IDE_MAX_TARGETS; + host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1; + break; + } /* max cmd length */ host->max_cmd_len = STORVSC_MAX_CMD_LEN; --- linux-3.13.0.orig/drivers/scsi/sun3_NCR5380.c +++ linux-3.13.0/drivers/scsi/sun3_NCR5380.c @@ -2597,15 +2597,15 @@ * Purpose : abort a command * * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the - * host byte of the result field to, if zero DID_ABORTED is + * host byte of the result field to, if zero DID_ABORTED is * used. * - * Returns : 0 - success, -1 on failure. + * Returns : SUCCESS - success, FAILED on failure. * - * XXX - there is no way to abort the command that is currently - * connected, you have to wait for it to complete. If this is + * XXX - there is no way to abort the command that is currently + * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() - * called where the loop started in NCR5380_main(). + * called where the loop started in NCR5380_main(). */ static int NCR5380_abort(struct scsi_cmnd *cmd) --- linux-3.13.0.orig/drivers/scsi/virtio_scsi.c +++ linux-3.13.0/drivers/scsi/virtio_scsi.c @@ -273,6 +273,16 @@ virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; @@ -291,6 +301,8 @@ virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; +static void virtscsi_handle_event(struct work_struct *work); + static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { @@ -298,6 +310,7 @@ struct scatterlist sg; unsigned long flags; + INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); @@ -415,7 +428,6 @@ { struct virtio_scsi_event_node *event_node = buf; - INIT_WORK(&event_node->work, virtscsi_handle_event); schedule_work(&event_node->work); } @@ -605,6 +617,18 @@ cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + out: mempool_free(cmd, virtscsi_cmd_pool); return ret; @@ -750,8 +774,12 @@ vscsi->affinity_hint_set = true; } else { - for (i = 0; i < vscsi->num_queues; i++) + for (i = 0; i < vscsi->num_queues; i++) { + if (!vscsi->req_vqs[i].vq) + continue; + virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); + } vscsi->affinity_hint_set = false; } @@ -956,6 +984,10 @@ #ifdef CONFIG_PM_SLEEP static int virtscsi_freeze(struct virtio_device *vdev) { + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + unregister_hotcpu_notifier(&vscsi->nb); virtscsi_remove_vqs(vdev); return 0; } @@ -964,8 +996,17 @@ { struct Scsi_Host *sh = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(sh); + int err; + + err = virtscsi_init(vdev, vscsi); + if (err) + return err; + + err = register_hotcpu_notifier(&vscsi->nb); + if (err) + vdev->config->del_vqs(vdev); - return virtscsi_init(vdev, vscsi); + return err; } #endif --- linux-3.13.0.orig/drivers/spi/spi-ath79.c +++ linux-3.13.0/drivers/spi/spi-ath79.c @@ -132,9 +132,9 @@ flags = GPIOF_DIR_OUT; if (spi->mode & SPI_CS_HIGH) - flags |= GPIOF_INIT_HIGH; - else flags |= GPIOF_INIT_LOW; + else + flags |= GPIOF_INIT_HIGH; status = gpio_request_one(cdata->gpio, flags, dev_name(&spi->dev)); --- linux-3.13.0.orig/drivers/spi/spi-bcm63xx.c +++ linux-3.13.0/drivers/spi/spi-bcm63xx.c @@ -169,8 +169,6 @@ transfer_list); } - len -= prepend_len; - init_completion(&bs->done); /* Fill in the Message control register */ --- linux-3.13.0.orig/drivers/spi/spi-coldfire-qspi.c +++ linux-3.13.0/drivers/spi/spi-coldfire-qspi.c @@ -539,7 +539,8 @@ #ifdef CONFIG_PM_RUNTIME static int mcfqspi_runtime_suspend(struct device *dev) { - struct mcfqspi *mcfqspi = dev_get_drvdata(dev); + struct spi_master *master = dev_get_drvdata(dev); + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); clk_disable(mcfqspi->clk); @@ -548,7 +549,8 @@ static int mcfqspi_runtime_resume(struct device *dev) { - struct mcfqspi *mcfqspi = dev_get_drvdata(dev); + struct spi_master *master = dev_get_drvdata(dev); + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); clk_enable(mcfqspi->clk); --- linux-3.13.0.orig/drivers/spi/spi-dw-mid.c +++ linux-3.13.0/drivers/spi/spi-dw-mid.c @@ -89,7 +89,10 @@ static void mid_spi_dma_exit(struct dw_spi *dws) { + dmaengine_terminate_all(dws->txchan); dma_release_channel(dws->txchan); + + dmaengine_terminate_all(dws->rxchan); dma_release_channel(dws->rxchan); } @@ -136,7 +139,7 @@ txconf.dst_addr = dws->dma_addr; txconf.dst_maxburst = LNW_DMA_MSIZE_16; txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + txconf.dst_addr_width = dws->dma_width; txconf.device_fc = false; txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, @@ -159,7 +162,7 @@ rxconf.src_addr = dws->dma_addr; rxconf.src_maxburst = LNW_DMA_MSIZE_16; rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + rxconf.src_addr_width = dws->dma_width; rxconf.device_fc = false; rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, @@ -216,7 +219,6 @@ iounmap(clk_reg); dws->num_cs = 16; - dws->fifo_len = 40; /* FIFO has 40 words buffer */ #ifdef CONFIG_SPI_DW_MID_DMA dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); --- linux-3.13.0.orig/drivers/spi/spi-dw.c +++ linux-3.13.0/drivers/spi/spi-dw.c @@ -394,9 +394,6 @@ chip = dws->cur_chip; spi = message->spi; - if (unlikely(!chip->clk_div)) - chip->clk_div = dws->max_freq / chip->speed_hz; - if (message->state == ERROR_STATE) { message->status = -EIO; goto early_exit; @@ -438,7 +435,7 @@ if (transfer->speed_hz) { speed = chip->speed_hz; - if (transfer->speed_hz != speed) { + if ((transfer->speed_hz != speed) || (!chip->clk_div)) { speed = transfer->speed_hz; if (speed > dws->max_freq) { printk(KERN_ERR "MRST SPI0: unsupported" @@ -658,7 +655,6 @@ dev_err(&spi->dev, "No max speed HZ parameter\n"); return -EINVAL; } - chip->speed_hz = spi->max_speed_hz; chip->tmode = 0; /* Tx & Rx */ /* Default SPI mode is SCPOL = 0, SCPH = 0 */ @@ -675,6 +671,7 @@ { struct chip_data *chip = spi_get_ctldata(spi); kfree(chip); + spi_set_ctldata(spi, NULL); } static int init_queue(struct dw_spi *dws) @@ -765,13 +762,13 @@ */ if (!dws->fifo_len) { u32 fifo; - for (fifo = 2; fifo <= 257; fifo++) { + for (fifo = 1; fifo < 256; fifo++) { dw_writew(dws, DW_SPI_TXFLTR, fifo); if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) break; } - dws->fifo_len = (fifo == 257) ? 0 : fifo; + dws->fifo_len = (fifo == 1) ? 0 : fifo; dw_writew(dws, DW_SPI_TXFLTR, 0); } } --- linux-3.13.0.orig/drivers/spi/spi-efm32.c +++ linux-3.13.0/drivers/spi/spi-efm32.c @@ -487,6 +487,9 @@ static const struct of_device_id efm32_spi_dt_ids[] = { { + .compatible = "energymicro,efm32-spi", + }, { + /* doesn't follow the "vendor,device" scheme, don't use */ .compatible = "efm32,spi", }, { /* sentinel */ --- linux-3.13.0.orig/drivers/spi/spi-fsl-dspi.c +++ linux-3.13.0/drivers/spi/spi-fsl-dspi.c @@ -45,7 +45,7 @@ #define SPI_TCR 0x08 -#define SPI_CTAR(x) (0x0c + (x * 4)) +#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4)) #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27) #define SPI_CTAR_CPOL(x) ((x) << 26) #define SPI_CTAR_CPHA(x) ((x) << 25) @@ -69,7 +69,7 @@ #define SPI_PUSHR 0x34 #define SPI_PUSHR_CONT (1 << 31) -#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28) +#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28) #define SPI_PUSHR_EOQ (1 << 27) #define SPI_PUSHR_CTCNT (1 << 26) #define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16) @@ -336,7 +336,7 @@ /* Only alloc on first setup */ chip = spi_get_ctldata(spi); if (chip == NULL) { - chip = kcalloc(1, sizeof(struct chip_data), GFP_KERNEL); + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) return -ENOMEM; } @@ -347,7 +347,6 @@ fmsz = spi->bits_per_word - 1; } else { pr_err("Invalid wordsize\n"); - kfree(chip); return -ENODEV; } @@ -379,6 +378,16 @@ return dspi_setup_transfer(spi, NULL); } +static void dspi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); + + dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n", + spi->master->bus_num, spi->chip_select); + + kfree(chip); +} + static irqreturn_t dspi_interrupt(int irq, void *dev_id) { struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; @@ -421,7 +430,6 @@ static int dspi_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); struct fsl_dspi *dspi = spi_master_get_devdata(master); @@ -457,6 +465,7 @@ dspi->bitbang.master->setup = dspi_setup; dspi->bitbang.master->dev.of_node = pdev->dev.of_node; + master->cleanup = dspi_cleanup; master->mode_bits = SPI_CPOL | SPI_CPHA; master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | SPI_BPW_MASK(16); @@ -505,7 +514,7 @@ clk_prepare_enable(dspi->clk); init_waitqueue_head(&dspi->waitq); - platform_set_drvdata(pdev, dspi); + platform_set_drvdata(pdev, master); ret = spi_bitbang_start(&dspi->bitbang); if (ret != 0) { @@ -526,7 +535,8 @@ static int dspi_remove(struct platform_device *pdev) { - struct fsl_dspi *dspi = platform_get_drvdata(pdev); + struct spi_master *master = platform_get_drvdata(pdev); + struct fsl_dspi *dspi = spi_master_get_devdata(master); /* Disconnect from the SPI framework */ spi_bitbang_stop(&dspi->bitbang); --- linux-3.13.0.orig/drivers/spi/spi-fsl-espi.c +++ linux-3.13.0/drivers/spi/spi-fsl-espi.c @@ -456,16 +456,16 @@ int retval; u32 hw_mode; u32 loop_mode; - struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); if (!spi->max_speed_hz) return -EINVAL; if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); + cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return -ENOMEM; - spi->controller_state = cs; + spi_set_ctldata(spi, cs); } mpc8xxx_spi = spi_master_get_devdata(spi->master); @@ -500,6 +500,14 @@ return 0; } +static void fsl_espi_cleanup(struct spi_device *spi) +{ + struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); + + kfree(cs); + spi_set_ctldata(spi, NULL); +} + void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) { struct fsl_espi_reg *reg_base = mspi->reg_base; @@ -607,6 +615,7 @@ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); master->setup = fsl_espi_setup; + master->cleanup = fsl_espi_cleanup; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; --- linux-3.13.0.orig/drivers/spi/spi-fsl-spi.c +++ linux-3.13.0/drivers/spi/spi-fsl-spi.c @@ -420,16 +420,16 @@ struct fsl_spi_reg *reg_base; int retval; u32 hw_mode; - struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); if (!spi->max_speed_hz) return -EINVAL; if (!cs) { - cs = kzalloc(sizeof *cs, GFP_KERNEL); + cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return -ENOMEM; - spi->controller_state = cs; + spi_set_ctldata(spi, cs); } mpc8xxx_spi = spi_master_get_devdata(spi->master); @@ -491,9 +491,13 @@ static void fsl_spi_cleanup(struct spi_device *spi) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi); if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio)) gpio_free(spi->cs_gpio); + + kfree(cs); + spi_set_ctldata(spi, NULL); } static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) --- linux-3.13.0.orig/drivers/spi/spi-imx.c +++ linux-3.13.0/drivers/spi/spi-imx.c @@ -925,8 +925,8 @@ spi_bitbang_stop(&spi_imx->bitbang); writel(0, spi_imx->base + MXC_CSPICTRL); - clk_disable_unprepare(spi_imx->clk_ipg); - clk_disable_unprepare(spi_imx->clk_per); + clk_unprepare(spi_imx->clk_ipg); + clk_unprepare(spi_imx->clk_per); spi_master_put(master); return 0; --- linux-3.13.0.orig/drivers/spi/spi-nuc900.c +++ linux-3.13.0/drivers/spi/spi-nuc900.c @@ -363,6 +363,8 @@ init_completion(&hw->done); master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + if (hw->pdata->lsb) + master->mode_bits |= SPI_LSB_FIRST; master->num_chipselect = hw->pdata->num_cs; master->bus_num = hw->pdata->bus_num; hw->bitbang.master = hw->master; --- linux-3.13.0.orig/drivers/spi/spi-omap2-mcspi.c +++ linux-3.13.0/drivers/spi/spi-omap2-mcspi.c @@ -320,7 +320,8 @@ disable_fifo: if (t->rx_buf != NULL) chconf &= ~OMAP2_MCSPI_CHCONF_FFER; - else + + if (t->tx_buf != NULL) chconf &= ~OMAP2_MCSPI_CHCONF_FFET; mcspi_write_chconf0(spi, chconf); --- linux-3.13.0.orig/drivers/spi/spi-orion.c +++ linux-3.13.0/drivers/spi/spi-orion.c @@ -404,8 +404,6 @@ struct resource *r; unsigned long tclk_hz; int status = 0; - const u32 *iprop; - int size; master = spi_alloc_master(&pdev->dev, sizeof(*spi)); if (master == NULL) { @@ -416,10 +414,10 @@ if (pdev->id != -1) master->bus_num = pdev->id; if (pdev->dev.of_node) { - iprop = of_get_property(pdev->dev.of_node, "cell-index", - &size); - if (iprop && size == sizeof(*iprop)) - master->bus_num = *iprop; + u32 cell_index; + if (!of_property_read_u32(pdev->dev.of_node, "cell-index", + &cell_index)) + master->bus_num = cell_index; } /* we support only mode 0, and no options */ --- linux-3.13.0.orig/drivers/spi/spi-pl022.c +++ linux-3.13.0/drivers/spi/spi-pl022.c @@ -503,12 +503,12 @@ pl022->cur_msg = NULL; pl022->cur_transfer = NULL; pl022->cur_chip = NULL; - spi_finalize_current_message(pl022->master); /* disable the SPI/SSP operation */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); + spi_finalize_current_message(pl022->master); } /** @@ -1075,7 +1075,7 @@ pl022->sgt_tx.nents, DMA_TO_DEVICE); err_tx_sgmap: dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, - pl022->sgt_tx.nents, DMA_FROM_DEVICE); + pl022->sgt_rx.nents, DMA_FROM_DEVICE); err_rx_sgmap: sg_free_table(&pl022->sgt_tx); err_alloc_tx_sg: --- linux-3.13.0.orig/drivers/spi/spi-pxa2xx.c +++ linux-3.13.0/drivers/spi/spi-pxa2xx.c @@ -400,8 +400,8 @@ cs_deassert(drv_data); } - spi_finalize_current_message(drv_data->master); drv_data->cur_chip = NULL; + spi_finalize_current_message(drv_data->master); } static void reset_sccr1(struct driver_data *drv_data) @@ -1066,6 +1066,8 @@ pdata->num_chipselect = 1; pdata->enable_dma = true; + pdata->tx_chan_id = -1; + pdata->rx_chan_id = -1; return pdata; } @@ -1076,6 +1078,7 @@ { "INT3430", 0 }, { "INT3431", 0 }, { "80860F0E", 0 }, + { "8086228E", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); @@ -1277,7 +1280,9 @@ if (status != 0) return status; write_SSCR0(0, drv_data->ioaddr); - clk_disable_unprepare(ssp->clk); + + if (!pm_runtime_suspended(dev)) + clk_disable_unprepare(ssp->clk); return 0; } @@ -1291,7 +1296,8 @@ pxa2xx_spi_dma_resume(drv_data); /* Enable the SSP clock */ - clk_prepare_enable(ssp->clk); + if (!pm_runtime_suspended(dev)) + clk_prepare_enable(ssp->clk); /* Restore LPSS private register bits */ lpss_ssp_setup(drv_data); --- linux-3.13.0.orig/drivers/spi/spi-sirf.c +++ linux-3.13.0/drivers/spi/spi-sirf.c @@ -366,7 +366,9 @@ writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN | SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN | SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN | - SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN); + SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN | + SIRFSOC_SPI_RX_IO_DMA_INT_EN, + sspi->base + SIRFSOC_SPI_INT_EN); } writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN); --- linux-3.13.0.orig/drivers/spi/spi.c +++ linux-3.13.0/drivers/spi/spi.c @@ -829,13 +829,14 @@ "failed to unprepare message: %d\n", ret); } } + + trace_spi_message_done(mesg); + master->cur_msg_prepared = false; mesg->state = NULL; if (mesg->complete) mesg->complete(mesg->context); - - trace_spi_message_done(mesg); } EXPORT_SYMBOL_GPL(spi_finalize_current_message); @@ -1551,7 +1552,7 @@ */ int spi_setup(struct spi_device *spi) { - unsigned bad_bits; + unsigned bad_bits, ugly_bits; int status = 0; /* check mode to prevent that DUAL and QUAD set at the same time @@ -1571,6 +1572,15 @@ * that aren't supported with their current master */ bad_bits = spi->mode & ~spi->master->mode_bits; + ugly_bits = bad_bits & + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); + if (ugly_bits) { + dev_warn(&spi->dev, + "setup: ignoring unsupported mode bits %x\n", + ugly_bits); + spi->mode &= ~ugly_bits; + bad_bits &= ~ugly_bits; + } if (bad_bits) { dev_err(&spi->dev, "setup: unsupported mode bits %x\n", bad_bits); --- linux-3.13.0.orig/drivers/staging/Kconfig +++ linux-3.13.0/drivers/staging/Kconfig @@ -52,6 +52,8 @@ source "drivers/staging/rtl8188eu/Kconfig" +source "drivers/staging/rtl8821ae/Kconfig" + source "drivers/staging/rts5139/Kconfig" source "drivers/staging/frontier/Kconfig" @@ -150,4 +152,8 @@ source "drivers/staging/dgap/Kconfig" +source "drivers/staging/fsl_qbman/Kconfig" + +source "drivers/staging/fsl_pme2/Kconfig" + endif # STAGING --- linux-3.13.0.orig/drivers/staging/Makefile +++ linux-3.13.0/drivers/staging/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/ obj-$(CONFIG_R8712U) += rtl8712/ obj-$(CONFIG_R8188EU) += rtl8188eu/ +obj-$(CONFIG_R8821AE) += rtl8821ae/ obj-$(CONFIG_RTS5139) += rts5139/ obj-$(CONFIG_TRANZPORT) += frontier/ obj-$(CONFIG_IDE_PHISON) += phison/ @@ -67,3 +68,5 @@ obj-$(CONFIG_DGNC) += dgnc/ obj-$(CONFIG_DGAP) += dgap/ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/ +obj-$(CONFIG_FSL_DPA) += fsl_qbman/ +obj-$(CONFIG_FSL_PME2) += fsl_pme2/ --- linux-3.13.0.orig/drivers/staging/android/binder.c +++ linux-3.13.0/drivers/staging/android/binder.c @@ -2904,7 +2904,7 @@ refs++; if (!ref->death) - goto out; + continue; death++; @@ -2917,7 +2917,6 @@ BUG(); } -out: binder_debug(BINDER_DEBUG_DEAD_BINDER, "node %d now dead, refs %d, death %d\n", node->debug_id, refs, death); --- linux-3.13.0.orig/drivers/staging/bcm/Bcmnet.c +++ linux-3.13.0/drivers/staging/bcm/Bcmnet.c @@ -40,7 +40,7 @@ } static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { return ClassifyPacket(netdev_priv(dev), skb); } --- linux-3.13.0.orig/drivers/staging/comedi/comedi_compat32.c +++ linux-3.13.0/drivers/staging/comedi/comedi_compat32.c @@ -262,7 +262,7 @@ { struct comedi_cmd __user *cmd; struct comedi32_cmd_struct __user *cmd32; - int rc; + int rc, err; cmd32 = compat_ptr(arg); cmd = compat_alloc_user_space(sizeof(*cmd)); @@ -271,7 +271,15 @@ if (rc) return rc; - return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd); + rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd); + if (rc == -EAGAIN) { + /* Special case: copy cmd back to user. */ + err = put_compat_cmd(cmd32, cmd); + if (err) + rc = err; + } + + return rc; } /* Handle 32-bit COMEDI_CMDTEST ioctl. */ --- linux-3.13.0.orig/drivers/staging/comedi/comedi_fops.c +++ linux-3.13.0/drivers/staging/comedi/comedi_fops.c @@ -1425,6 +1425,7 @@ async->cmd.chanlist_len * sizeof(int)); if (IS_ERR(async->cmd.chanlist)) { ret = PTR_ERR(async->cmd.chanlist); + async->cmd.chanlist = NULL; DPRINTK("memdup_user failed with code %d\n", ret); goto cleanup; } @@ -1547,6 +1548,7 @@ cmd.chanlist_len * sizeof(int)); if (IS_ERR(chanlist)) { ret = PTR_ERR(chanlist); + chanlist = NULL; DPRINTK("memdup_user exited with code %d", ret); goto cleanup; } --- linux-3.13.0.orig/drivers/staging/comedi/drivers/8255_pci.c +++ linux-3.13.0/drivers/staging/comedi/drivers/8255_pci.c @@ -56,6 +56,7 @@ #include "../comedidev.h" #include "8255.h" +#include "mite.h" enum pci_8255_boardid { BOARD_ADLINK_PCI7224, @@ -79,6 +80,7 @@ const char *name; int dio_badr; int n_8255; + unsigned int has_mite:1; }; static const struct pci_8255_boardinfo pci_8255_boards[] = { @@ -126,36 +128,43 @@ .name = "ni_pci-dio-96", .dio_badr = 1, .n_8255 = 4, + .has_mite = 1, }, [BOARD_NI_PCIDIO96B] = { .name = "ni_pci-dio-96b", .dio_badr = 1, .n_8255 = 4, + .has_mite = 1, }, [BOARD_NI_PXI6508] = { .name = "ni_pxi-6508", .dio_badr = 1, .n_8255 = 4, + .has_mite = 1, }, [BOARD_NI_PCI6503] = { .name = "ni_pci-6503", .dio_badr = 1, .n_8255 = 1, + .has_mite = 1, }, [BOARD_NI_PCI6503B] = { .name = "ni_pci-6503b", .dio_badr = 1, .n_8255 = 1, + .has_mite = 1, }, [BOARD_NI_PCI6503X] = { .name = "ni_pci-6503x", .dio_badr = 1, .n_8255 = 1, + .has_mite = 1, }, [BOARD_NI_PXI_6503] = { .name = "ni_pxi-6503", .dio_badr = 1, .n_8255 = 1, + .has_mite = 1, }, }; @@ -163,6 +172,25 @@ void __iomem *mmio_base; }; +static int pci_8255_mite_init(struct pci_dev *pcidev) +{ + void __iomem *mite_base; + u32 main_phys_addr; + + /* ioremap the MITE registers (BAR 0) temporarily */ + mite_base = pci_ioremap_bar(pcidev, 0); + if (!mite_base) + return -ENOMEM; + + /* set data window to main registers (BAR 1) */ + main_phys_addr = pci_resource_start(pcidev, 1); + writel(main_phys_addr | WENAB, mite_base + MITE_IODWBSR); + + /* finished with MITE registers */ + iounmap(mite_base); + return 0; +} + static int pci_8255_mmio(int dir, int port, int data, unsigned long iobase) { void __iomem *mmio_base = (void __iomem *)iobase; @@ -201,6 +229,12 @@ if (ret) return ret; + if (board->has_mite) { + ret = pci_8255_mite_init(pcidev); + if (ret) + return ret; + } + is_mmio = (pci_resource_flags(pcidev, board->dio_badr) & IORESOURCE_MEM) != 0; if (is_mmio) { --- linux-3.13.0.orig/drivers/staging/comedi/drivers/addi_apci_1032.c +++ linux-3.13.0/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -325,8 +325,8 @@ s = &dev->subdevices[1]; if (dev->irq) { dev->read_subdev = s; - s->type = COMEDI_SUBD_DI | SDF_CMD_READ; - s->subdev_flags = SDF_READABLE; + s->type = COMEDI_SUBD_DI; + s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 1; s->maxdata = 1; s->range_table = &range_digital; --- linux-3.13.0.orig/drivers/staging/comedi/drivers/adl_pci9111.c +++ linux-3.13.0/drivers/staging/comedi/drivers/adl_pci9111.c @@ -859,7 +859,7 @@ pci9111_reset(dev); if (pcidev->irq > 0) { - ret = request_irq(dev->irq, pci9111_interrupt, + ret = request_irq(pcidev->irq, pci9111_interrupt, IRQF_SHARED, dev->board_name, dev); if (ret) return ret; --- linux-3.13.0.orig/drivers/staging/comedi/drivers/adv_pci1710.c +++ linux-3.13.0/drivers/staging/comedi/drivers/adv_pci1710.c @@ -489,6 +489,7 @@ struct comedi_insn *insn, unsigned int *data) { struct pci1710_private *devpriv = dev->private; + unsigned int val; int n, chan, range, ofs; chan = CR_CHAN(insn->chanspec); @@ -504,11 +505,14 @@ outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF); ofs = PCI171x_DA1; } + val = devpriv->ao_data[chan]; - for (n = 0; n < insn->n; n++) - outw(data[n], dev->iobase + ofs); + for (n = 0; n < insn->n; n++) { + val = data[n]; + outw(val, dev->iobase + ofs); + } - devpriv->ao_data[chan] = data[n]; + devpriv->ao_data[chan] = val; return n; @@ -674,6 +678,7 @@ struct comedi_insn *insn, unsigned int *data) { struct pci1710_private *devpriv = dev->private; + unsigned int val; int n, rangereg, chan; chan = CR_CHAN(insn->chanspec); @@ -683,13 +688,15 @@ outb(rangereg, dev->iobase + PCI1720_RANGE); devpriv->da_ranges = rangereg; } + val = devpriv->ao_data[chan]; for (n = 0; n < insn->n; n++) { - outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1)); + val = data[n]; + outw(val, dev->iobase + PCI1720_DA0 + (chan << 1)); outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */ } - devpriv->ao_data[chan] = data[n]; + devpriv->ao_data[chan] = val; return n; } --- linux-3.13.0.orig/drivers/staging/fsl_pme2/Kconfig +++ linux-3.13.0/drivers/staging/fsl_pme2/Kconfig @@ -0,0 +1,215 @@ +config FSL_PME2 + bool "Freescale Datapath Pattern Matcher support" + depends on HAS_FSL_PME && FSL_QMAN_PORTAL + default y + +menu "Freescale Datapath PME options" + depends on FSL_PME2 + +config FSL_PME2_CTRL + bool "Freescale PME2 (p4080, etc) device control" + default y + ---help--- + This compiles device support for the Freescale PME2 pattern matching + part contained in datapath-enabled SoCs (ie. accessed via Qman and + Bman portal functionality). At least one guest operating system must + have this driver support, together with the appropriate device-tree + entry, for PME2 functionality to be available. It is responsible for + allocating system memory to the device and configuring it for + operation. For this reason, it must be built into the kernel and will + initialise during early kernel boot. + +config FSL_PME2_PDSRSIZE + int "Pattern Description and Stateful Rule default table size" + depends on FSL_PME2_CTRL + range 74240 1048573 + default 131072 + help + Select the default size of the Pattern Description and Stateful Rule + table as the number of 128 byte entries. This only takes effect if + the device tree node doesn't have the 'fsl,pme-pdsr' property. + range 74240-1048573 (9.5MB-134MB) + default 131072 (16MB) + +if FSL_PME2_CTRL +comment "Statefule Rule Engine" +endif + +config FSL_PME2_SRESIZE + int "SRE Session Context Entries table default table size" + depends on FSL_PME2_CTRL + range 0 134217727 + default 327680 + help + Select the default size of the SRE Context Table as the number of 32 + byte entries. This only takes effect if the device tree node doesn't + have the 'fsl,pme-sre' property. + range 0-134217727 (0-4GB) + default 327680 (10MB) + +config FSL_PME2_SRE_AIM + bool "Alternate Inconclusive Mode" + depends on FSL_PME2_CTRL + default n + help + Select the inconclusive match mode treatment. When true the + “alternate” inconclusive mode is used. When false the “default” + inconclusive mode is used. + +config FSL_PME2_SRE_ESR + bool "End of SUI Simple Report" + depends on FSL_PME2_CTRL + default n + help + Select if an End of SUI will produce a Simple End of SUI report. + +config FSL_PME2_SRE_CTX_SIZE_PER_SESSION + int "Default SRE Context Size per Session (16 => 64KB, 17 => 128KB)" + depends on FSL_PME2_CTRL + range 5 17 + default 17 + help + Select SRE context size per session as a power of 2. + range 5-17 + Examples: + 5 => 32 B + 6 => 64 B + 7 => 128 B + 8 => 256 B + 9 => 512 B + 10 => 1 KB + 11 => 2 KB + 12 => 4 KB + 13 => 8 KB + 14 => 16 KB + 15 => 32 KB + 16 => 64 KB + 17 => 128 KB + +config FSL_PME2_SRE_CNR + int "Configured Number of Stateful Rules as a multiple of 256 (128 => 32768 )" + depends on FSL_PME2_CTRL + range 0 128 + default 128 + help + Select number of stateful rules as a multiple of 256. + range 0-128 + Examples: + 0 => 0 + 1 => 256 + 2 => 512 + ... + 127 => 32512 + 128 => 32768 + +config FSL_PME2_SRE_MAX_INSTRUCTION_LIMIT + int "Maximum number of SRE instructions to be executed per reaction." + depends on FSL_PME2_CTRL + range 0 65535 + default 65535 + help + Select the maximum number of SRE instructions to be executed per + reaction. + range 0 65535 + +config FSL_PME2_SRE_MAX_BLOCK_NUMBER + int "Maximum number of Reaction Head blocks to be traversed per pattern match event" + depends on FSL_PME2_CTRL + range 0 32767 + default 32767 + help + Select the maximum number of reaction head blocks to be traversed per + pattern match event (e.g. a matched pattern or an End of SUI event). + range 0-32767 + +config FSL_PME2_PORTAL + tristate "Freescale PME2 (p4080, etc) device usage" + default y + ---help--- + This compiles I/O support for the Freescale PME2 pattern matching + part contained in datapath-enabled SoCs (ie. accessed via Qman and + Bman portal functionality). + +if FSL_PME2_PORTAL + +config FSL_PME2_TEST_HIGH + tristate "PME2 high-level self-test" + default n + ---help--- + This uses the high-level Qman driver (and the cpu-affine portals it + manages) to perform high-level PME2 API testing with it. + +config FSL_PME2_TEST_SCAN + tristate "PME2 scan self-test" + default n + ---help--- + This uses the high-level Qman driver (and the cpu-affine portals it + manages) to perform scan PME2 API testing with it. + +config FSL_PME2_TEST_SCAN_WITH_BPID + bool "PME2 scan self-test with buffer pool" + depends on FSL_PME2_TEST_SCAN && FSL_BMAN_PORTAL + default y + ---help--- + This uses a buffer pool id for scan test + +config FSL_PME2_TEST_SCAN_WITH_BPID_SIZE + int "Buffer Pool size." + depends on FSL_PME2_TEST_SCAN_WITH_BPID + range 0 11 + default 3 + ---help--- + This uses the specified buffer pool size. + +config FSL_PME2_DB + tristate "PME2 Database support" + depends on FSL_PME2_CTRL + default y + ---help--- + This compiles the database driver for PME2. + +config FSL_PME2_DB_QOSOUT_PRIORITY + int "PME DB output frame queue priority." + depends on FSL_PME2_DB + range 0 7 + default 2 + ---help--- + The PME DB has a scheduled output frame queue. The qos priority level is configurable. + range 0-7 + 0 => High Priority 0 + 1 => High Priority 1 + 2 => Medium Priority + 3 => Medium Priority + 4 => Medium Priority + 5 => Low Priority + 6 => Low Priority + 7 => Low Priority + +config FSL_PME2_SCAN + tristate "PME2 Scan support" + default y + ---help--- + This compiles the scan driver for PME2. + +config FSL_PME2_SCAN_DEBUG + bool "Debug Statements" + default n + depends on FSL_PME2_SCAN + ---help--- + The PME2_SCAN driver can optionally trace with more verbosity + of verbosity. + +endif + +config FSL_PME2_STAT_ACCUMULATOR_UPDATE_INTERVAL + int "Configure the pme2 statistics update interval in milliseconds" + depends on FSL_PME2_CTRL + range 0 10000 + default 3400 + help + The pme accumulator reads the current device statistics and add it + to a running counter. The frequency of these updates may be + controlled. If 0 is specified, no automatic updates is done. + range 0-10000 + +endmenu --- linux-3.13.0.orig/drivers/staging/fsl_pme2/Makefile +++ linux-3.13.0/drivers/staging/fsl_pme2/Makefile @@ -0,0 +1,10 @@ +# PME +obj-$(CONFIG_FSL_PME2_CTRL) += pme2_ctrl.o pme2_sysfs.o +obj-$(CONFIG_FSL_PME2_PORTAL) += pme2.o +pme2-y := pme2_low.o pme2_high.o +obj-$(CONFIG_FSL_PME2_TEST_HIGH) += pme2_test_high.o +obj-$(CONFIG_FSL_PME2_TEST_SCAN) += pme2_test_scanning.o +pme2_test_scanning-y = pme2_test_scan.o pme2_sample_db.o +obj-$(CONFIG_FSL_PME2_DB) += pme2_db.o +obj-$(CONFIG_FSL_PME2_SCAN) += pme2_scan.o + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_ctrl.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_ctrl.c @@ -0,0 +1,1335 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "pme2_private.h" +#include "pme2_regs.h" + +/* PME HW Revision */ +#define PME_REV(rev1_reg) (rev1_reg & 0x0000FFFF) +#define PME_REV_2_0 0x00000200 +#define PME_REV_2_1 0x00000201 +#define DEC1_MAX_REV_2_0 0x000FFFFC +#define DEC1_MAX_REV_2_1 0x0007FFFC + + +/* Driver Name is used in naming the sysfs directory + * /sys/bus/of_platform/drivers/DRV_NAME + */ +#define DRV_NAME "fsl-pme" + +#define DEFAULT_PDSR_SZ (CONFIG_FSL_PME2_PDSRSIZE << 7) +#define DEFAULT_SRE_SZ (CONFIG_FSL_PME2_SRESIZE << 5) +#define PDSR_TBL_ALIGN (1 << 7) +#define SRE_TBL_ALIGN (1 << 5) +#define DEFAULT_SRFCC 400 + +/* Defaults */ +#define DEFAULT_DEC0_MTE 0x3FFF +#define DEFAULT_DLC_MPM 0xFFFF +#define DEFAULT_DLC_MPE 0xFFFF +/* Boot parameters */ +DECLARE_GLOBAL(max_test_line_per_pat, unsigned int, uint, + DEFAULT_DEC0_MTE, + "Maximum allowed Test Line Executions per pattern, " + "scaled by a factor of 8"); +DECLARE_GLOBAL(max_pat_eval_per_sui, unsigned int, uint, + DEFAULT_DLC_MPE, + "Maximum Pattern Evaluations per SUI, scaled by a factor of 8") +DECLARE_GLOBAL(max_pat_matches_per_sui, unsigned int, uint, + DEFAULT_DLC_MPM, + "Maximum Pattern Matches per SUI"); +/* SRE */ +DECLARE_GLOBAL(sre_rule_num, unsigned int, uint, + CONFIG_FSL_PME2_SRE_CNR, + "Configured Number of Stateful Rules"); +DECLARE_GLOBAL(sre_session_ctx_size, unsigned int, uint, + 1 << CONFIG_FSL_PME2_SRE_CTX_SIZE_PER_SESSION, + "SRE Context Size per Session"); + +/************ + * Section 1 + ************ + * This code is called during kernel early-boot and could never be made + * loadable. + */ +static dma_addr_t dxe_a, sre_a; +static size_t dxe_sz = DEFAULT_PDSR_SZ, sre_sz = DEFAULT_SRE_SZ; + +/* Parse the property to extract the memory location and size and + * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default size. */ +static __init int parse_mem_property(struct device_node *node, const char *name, + dma_addr_t *addr, size_t *sz, u64 align, int zero) +{ + const u32 *pint; + int ret; + + pint = of_get_property(node, name, &ret); + if (!pint || (ret != 16)) { + pr_info("pme: No %s property '%s', using memblock_alloc(0x%016zx)\n", + node->full_name, name, *sz); + *addr = memblock_alloc(*sz, align); + if (zero) + memset(phys_to_virt(*addr), 0, *sz); + return 0; + } + pr_info("pme: Using %s property '%s'\n", node->full_name, name); + /* If using a "zero-pma", don't try to zero it, even if you asked */ + if (zero && of_find_property(node, "zero-pma", &ret)) { + pr_info(" it's a 'zero-pma', not zeroing from s/w\n"); + zero = 0; + } + *addr = ((u64)pint[0] << 32) | (u64)pint[1]; + *sz = ((u64)pint[2] << 32) | (u64)pint[3]; + if((u64)*addr & (align - 1)) { + pr_err("pme: Invalid alignment, address %016llx\n",(u64)*addr); + return -EINVAL; + } + /* Keep things simple, it's either all in the DRAM range or it's all + * outside. */ + if (*addr < memblock_end_of_DRAM()) { + if ((u64)*addr + (u64)*sz > memblock_end_of_DRAM()){ + pr_err("pme: outside DRAM range\n"); + return -EINVAL; + } + if (memblock_reserve(*addr, *sz) < 0) { + pr_err("pme: Failed to reserve %s\n", name); + return -ENOMEM; + } + if (zero) + memset(phys_to_virt(*addr), 0, *sz); + } else if (zero) { + /* map as cacheable, non-guarded */ + void *tmpp = ioremap_prot(*addr, *sz, 0); + memset(tmpp, 0, *sz); + iounmap(tmpp); + } + return 0; +} + +/* No errors/interrupts. Physical addresses are assumed <= 32bits. */ +static int __init fsl_pme2_init(struct device_node *node) +{ + const char *s; + int ret = 0; + + s = of_get_property(node, "fsl,hv-claimable", &ret); + if (s && !strcmp(s, "standby")) { + pr_info(" -> in standby mode\n"); + return 0; + } + /* Check if pdsr memory already allocated */ + if (dxe_a) { + pr_err("pme: Error fsl_pme2_init already done\n"); + return -EINVAL; + } + ret = parse_mem_property(node, "fsl,pme-pdsr", &dxe_a, &dxe_sz, + PDSR_TBL_ALIGN, 0); + if (ret) + return ret; + ret = parse_mem_property(node, "fsl,pme-sre", &sre_a, &sre_sz, + SRE_TBL_ALIGN, 0); + return ret; +} + +__init void pme2_init_early(void) +{ + struct device_node *dn; + int ret; + for_each_compatible_node(dn, NULL, "fsl,pme") { + ret = fsl_pme2_init(dn); + if (ret) + pr_err("pme: Error fsl_pme2_init\n"); + } +} + +/************ + * Section 2 + *********** + * This code is called during driver initialisation. It doesn't do anything with + * the device-tree entries nor the PME device, it simply creates the sysfs stuff + * and gives the user something to hold. This could be made loadable, if there + * was any benefit to doing so - but as the device is already "bound" by static + * code, there's little point to hiding the fact. + */ + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("FSL PME2 (p4080) device control"); + +/* Opaque pointer target used to represent the PME CCSR map, ... */ +struct pme; + +/* ... and the instance of it. */ +static struct pme *global_pme; +static int pme_err_irq; + +static inline void __pme_out(struct pme *p, u32 offset, u32 val) +{ + u32 __iomem *regs = (void *)p; + out_be32(regs + (offset >> 2), val); +} +#define pme_out(p, r, v) __pme_out(p, PME_REG_##r, v) +static inline u32 __pme_in(struct pme *p, u32 offset) +{ + u32 __iomem *regs = (void *)p; + return in_be32(regs + (offset >> 2)); +} +#define pme_in(p, r) __pme_in(p, PME_REG_##r) + +#define PME_EFQC(en, fq) \ + ({ \ + /* Assume a default delay of 64 cycles */ \ + u8 __i419 = 0x1; \ + u32 __fq419 = (fq) & 0x00ffffff; \ + ((en) ? 0x80000000 : 0) | (__i419 << 28) | __fq419; \ + }) + +#define PME_FACONF_ENABLE 0x00000002 +#define PME_FACONF_RESET 0x00000001 + +/* pme stats accumulator work */ +static void accumulator_update(struct work_struct *work); +void accumulator_update_interval(u32 interval); +static DECLARE_DELAYED_WORK(accumulator_work, accumulator_update); +u32 pme_stat_interval = CONFIG_FSL_PME2_STAT_ACCUMULATOR_UPDATE_INTERVAL; +#define PME_SBE_ERR 0x01000000 +#define PME_DBE_ERR 0x00080000 +#define PME_PME_ERR 0x00000100 +#define PME_ALL_ERR (PME_SBE_ERR | PME_DBE_ERR | PME_PME_ERR) + +static struct of_device_id of_fsl_pme_ids[] = { + { + .compatible = "fsl,pme", + }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_pme_ids); + +/* Pme interrupt handler */ +static irqreturn_t pme_isr(int irq, void *ptr) +{ + static u32 last_isrstate; + u32 isrstate = pme_in(global_pme, ISR) ^ last_isrstate; + + /* What new ISR state has been raise */ + if (!isrstate) + return IRQ_NONE; + if (isrstate & PME_SBE_ERR) + pr_crit("PME: SBE detected\n"); + if (isrstate & PME_DBE_ERR) + pr_crit("PME: DBE detected\n"); + if (isrstate & PME_PME_ERR) + pr_crit("PME: PME serious detected\n"); + /* Clear the ier interrupt bit */ + last_isrstate |= isrstate; + pme_out(global_pme, IER, ~last_isrstate); + return IRQ_HANDLED; +} + +static int of_fsl_pme_remove(struct platform_device *ofdev) +{ + /* Cancel pme accumulator */ + accumulator_update_interval(0); + cancel_delayed_work_sync(&accumulator_work); + /* Disable PME..TODO need to wait till it's quiet */ + pme_out(global_pme, FACONF, PME_FACONF_RESET); + /* Release interrupt */ + if (likely(pme_err_irq != NO_IRQ)) + free_irq(pme_err_irq, &ofdev->dev); + /* Remove sysfs attribute */ + pme2_remove_sysfs_dev_files(ofdev); + /* Unmap controller region */ + iounmap(global_pme); + global_pme = NULL; + return 0; +} + +static int of_fsl_pme_probe(struct platform_device *ofdev) +{ + int ret, err = 0; + void __iomem *regs; + struct device *dev = &ofdev->dev; + struct device_node *nprop = dev->of_node; + u32 clkfreq = DEFAULT_SRFCC * 1000000; + const u32 *value; + const char *s; + int srec_aim = 0, srec_esr = 0; + u32 srecontextsize_code; + u32 dec1; + + /* TODO: This standby handling won't work properly after failover, it's + * just to allow bring up for now. */ + s = of_get_property(nprop, "fsl,hv-claimable", &ret); + if (s && !strcmp(s, "standby")) + return 0; + pme_err_irq = of_irq_to_resource(nprop, 0, NULL); + if (unlikely(pme_err_irq == NO_IRQ)) + dev_warn(dev, "Can't get %s property '%s'\n", nprop->full_name, + "interrupts"); + + /* Get configuration properties from device tree */ + /* First, get register page */ + regs = of_iomap(nprop, 0); + if (regs == NULL) { + dev_err(dev, "of_iomap() failed\n"); + err = -EINVAL; + goto out; + } + + /* Global configuration, leave pme disabled */ + global_pme = (struct pme *)regs; + pme_out(global_pme, FACONF, 0); + pme_out(global_pme, EFQC, PME_EFQC(0, 0)); + + /* TODO: these coherency settings for PMFA, DXE, and SRE force all + * transactions to snoop, as the kernel does not yet support flushing in + * dma_map_***() APIs (ie. h/w can not treat otherwise coherent memory + * in a non-coherent manner, temporarily or otherwise). When the kernel + * supports this, we should tune these settings back to; + * FAMCR = 0x00010001 + * DMCR = 0x00000000 + * SMCR = 0x00000000 + */ + /* PME HW rev 2.1: Added TWC field in FAMCR */ + pme_out(global_pme, FAMCR, 0x11010101); + pme_out(global_pme, DMCR, 0x00000001); + pme_out(global_pme, SMCR, 0x00000211); + + if (likely(pme_err_irq != NO_IRQ)) { + /* Register the pme ISR handler */ + err = request_irq(pme_err_irq, pme_isr, IRQF_SHARED, "pme-err", + dev); + if (err) { + dev_err(dev, "request_irq() failed\n"); + goto out_unmap_ctrl_region; + } + } + +#ifdef CONFIG_FSL_PME2_SRE_AIM + srec_aim = 1; +#endif +#ifdef CONFIG_FSL_PME2_SRE_ESR + srec_esr = 1; +#endif + /* Validate some parameters */ + if (!sre_session_ctx_size || !is_power_of_2(sre_session_ctx_size) || + (sre_session_ctx_size < 32) || + (sre_session_ctx_size > (131072))) { + dev_err(dev, "invalid sre_session_ctx_size\n"); + err = -EINVAL; + goto out_free_irq; + } + srecontextsize_code = ilog2(sre_session_ctx_size); + srecontextsize_code -= 4; + + /* Configure Clock Frequency */ + value = of_get_property(nprop, "clock-frequency", NULL); + if (value) + clkfreq = *value; + pme_out(global_pme, SFRCC, DIV_ROUND_UP(clkfreq, 1000000)); + + pme_out(global_pme, PDSRBAH, upper_32_bits(dxe_a)); + pme_out(global_pme, PDSRBAL, lower_32_bits(dxe_a)); + pme_out(global_pme, SCBARH, upper_32_bits(sre_a)); + pme_out(global_pme, SCBARL, lower_32_bits(sre_a)); + /* Maximum allocated index into the PDSR table available to the DXE + * Rev 2.0: Max 0xF_FFFC + * Rev 2.1: Max 0x7_FFFC + */ + if (PME_REV(pme_in(global_pme, PM_IP_REV1)) == PME_REV_2_0) { + if (((dxe_sz/PDSR_TBL_ALIGN)-1) > DEC1_MAX_REV_2_0) + dec1 = DEC1_MAX_REV_2_0; + else + dec1 = (dxe_sz/PDSR_TBL_ALIGN)-1; + } else { + if (((dxe_sz/PDSR_TBL_ALIGN)-1) > DEC1_MAX_REV_2_1) + dec1 = DEC1_MAX_REV_2_1; + else + dec1 = (dxe_sz/PDSR_TBL_ALIGN)-1; + } + pme_out(global_pme, DEC1, dec1); + /* Maximum allocated index into the PDSR table available to the SRE */ + pme_out(global_pme, SEC2, dec1); + /* Maximum allocated 32-byte offset into SRE Context Table.*/ + if (sre_sz) + pme_out(global_pme, SEC3, (sre_sz/SRE_TBL_ALIGN)-1); + /* Max test line execution */ + pme_out(global_pme, DEC0, max_test_line_per_pat); + pme_out(global_pme, DLC, + (max_pat_eval_per_sui << 16) | max_pat_matches_per_sui); + + /* SREC - SRE Config */ + pme_out(global_pme, SREC, + /* Number of rules in database */ + (sre_rule_num << 0) | + /* Simple Report Enabled */ + ((srec_esr ? 1 : 0) << 18) | + /* Context Size per Session */ + (srecontextsize_code << 19) | + /* Alternate Inclusive Mode */ + ((srec_aim ? 1 : 0) << 29)); + pme_out(global_pme, SEC1, + (CONFIG_FSL_PME2_SRE_MAX_INSTRUCTION_LIMIT << 16) | + CONFIG_FSL_PME2_SRE_MAX_BLOCK_NUMBER); + + /* Setup Accumulator */ + if (pme_stat_interval) + schedule_delayed_work(&accumulator_work, + msecs_to_jiffies(pme_stat_interval)); + /* Create sysfs entries */ + err = pme2_create_sysfs_dev_files(ofdev); + if (err) + goto out_stop_accumulator; + + /* Enable interrupts */ + pme_out(global_pme, IER, PME_ALL_ERR); + dev_info(dev, "ver: 0x%08x\n", pme_in(global_pme, PM_IP_REV1)); + + /* Enable pme */ + pme_out(global_pme, FACONF, PME_FACONF_ENABLE); + return 0; + +out_stop_accumulator: + if (pme_stat_interval) { + accumulator_update_interval(0); + cancel_delayed_work_sync(&accumulator_work); + } +out_free_irq: + if (likely(pme_err_irq != NO_IRQ)) + free_irq(pme_err_irq, &ofdev->dev); +out_unmap_ctrl_region: + pme_out(global_pme, FACONF, PME_FACONF_RESET); + iounmap(global_pme); + global_pme = NULL; +out: + return err; +} + +static struct platform_driver of_fsl_pme_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .of_match_table = of_fsl_pme_ids, + }, + .probe = of_fsl_pme_probe, + .remove = of_fsl_pme_remove, +}; + +static int pme2_ctrl_init(void) +{ + return platform_driver_register(&of_fsl_pme_driver); +} + +static void pme2_ctrl_exit(void) +{ + platform_driver_unregister(&of_fsl_pme_driver); +} + +module_init(pme2_ctrl_init); +module_exit(pme2_ctrl_exit); + +/************ + * Section 3 + ************ + * These APIs are the only functional hooks into the control driver, besides the + * sysfs attributes. + */ + +int pme2_have_control(void) +{ + return global_pme ? 1 : 0; +} +EXPORT_SYMBOL(pme2_have_control); + +int pme2_exclusive_set(struct qman_fq *fq) +{ + if (!pme2_have_control()) + return -ENODEV; + pme_out(global_pme, EFQC, PME_EFQC(1, qman_fq_fqid(fq))); + return 0; +} +EXPORT_SYMBOL(pme2_exclusive_set); + +int pme2_exclusive_unset(void) +{ + if (!pme2_have_control()) + return -ENODEV; + pme_out(global_pme, EFQC, PME_EFQC(0, 0)); + return 0; +} +EXPORT_SYMBOL(pme2_exclusive_unset); + +int pme_attr_set(enum pme_attr attr, u32 val) +{ + u32 mask; + u32 attr_val; + + if (!pme2_have_control()) + return -ENODEV; + + /* Check if Buffer size configuration */ + if (attr >= pme_attr_bsc_first && attr <= pme_attr_bsc_last) { + u32 bsc_pool_id = attr - pme_attr_bsc_first; + u32 bsc_pool_offset = bsc_pool_id % 8; + u32 bsc_pool_mask = ~(0xF << ((7-bsc_pool_offset)*4)); + /* range for val 0..0xB */ + if (val > 0xb) + return -EINVAL; + /* calculate which sky-blue reg */ + /* 0..7 -> bsc_(0..7), PME_REG_BSC0 */ + /* 8..15 -> bsc_(8..15) PME_REG_BSC1*/ + /* ... */ + /* 56..63 -> bsc_(56..63) PME_REG_BSC7*/ + attr_val = pme_in(global_pme, BSC0 + ((bsc_pool_id/8)*4)); + /* Now mask in the new value */ + attr_val = attr_val & bsc_pool_mask; + attr_val = attr_val | (val << ((7-bsc_pool_offset)*4)); + pme_out(global_pme, BSC0 + ((bsc_pool_id/8)*4), attr_val); + return 0; + } + + switch (attr) { + case pme_attr_efqc_int: + if (val > 4) + return -EINVAL; + mask = 0x8FFFFFFF; + attr_val = pme_in(global_pme, EFQC); + /* clear efqc_int */ + attr_val &= mask; + val <<= 28; + val |= attr_val; + pme_out(global_pme, EFQC, val); + break; + + case pme_attr_sw_db: + pme_out(global_pme, SWDB, val); + break; + + case pme_attr_dmcr: + pme_out(global_pme, DMCR, val); + break; + + case pme_attr_smcr: + pme_out(global_pme, SMCR, val); + break; + + case pme_attr_famcr: + pme_out(global_pme, FAMCR, val); + break; + + case pme_attr_kvlts: + if (val < 2 || val > 16) + return -EINVAL; + /* HW range: 1..15, SW range: 2..16 */ + pme_out(global_pme, KVLTS, --val); + break; + + case pme_attr_max_chain_length: + if (val > 0x7FFF) + val = 0x7FFF; + pme_out(global_pme, KEC, val); + break; + + case pme_attr_pattern_range_counter_idx: + if (val > 0x1FFFF) + val = 0x1FFFF; + pme_out(global_pme, DRCIC, val); + break; + + case pme_attr_pattern_range_counter_mask: + if (val > 0x1FFFF) + val = 0x1FFFF; + pme_out(global_pme, DRCMC, val); + break; + + case pme_attr_max_allowed_test_line_per_pattern: + if (val > 0x3FFF) + val = 0x3FFF; + pme_out(global_pme, DEC0, val); + break; + + case pme_attr_max_pattern_matches_per_sui: + /* mpe, mpm */ + if (val > 0xFFFF) + val = 0xFFFF; + mask = 0xFFFF0000; + attr_val = pme_in(global_pme, DLC); + /* clear mpm */ + attr_val &= mask; + val &= ~mask; + val |= attr_val; + pme_out(global_pme, DLC, val); + break; + + case pme_attr_max_pattern_evaluations_per_sui: + /* mpe, mpm */ + if (val > 0xFFFF) + val = 0xFFFF; + mask = 0x0000FFFF; + attr_val = pme_in(global_pme, DLC); + /* clear mpe */ + attr_val &= mask; + /* clear unwanted bits in val*/ + val &= mask; + val <<= 16; + val |= attr_val; + pme_out(global_pme, DLC, val); + break; + + case pme_attr_report_length_limit: + if (val > 0xFFFF) + val = 0xFFFF; + pme_out(global_pme, RLL, val); + break; + + case pme_attr_end_of_simple_sui_report: + /* bit 13 */ + mask = 0x00040000; + attr_val = pme_in(global_pme, SREC); + if (val) + attr_val |= mask; + else + attr_val &= ~mask; + pme_out(global_pme, SREC, attr_val); + break; + + case pme_attr_aim: + /* bit 2 */ + mask = 0x20000000; + attr_val = pme_in(global_pme, SREC); + if (val) + attr_val |= mask; + else + attr_val &= ~mask; + pme_out(global_pme, SREC, attr_val); + break; + + case pme_attr_end_of_sui_reaction_ptr: + if (val > 0xFFFFF) + val = 0xFFFFF; + pme_out(global_pme, ESRP, val); + break; + + case pme_attr_sre_pscl: + pme_out(global_pme, SFRCC, val); + break; + + case pme_attr_sre_max_block_num: + /* bits 17..31 */ + if (val > 0x7FFF) + val = 0x7FFF; + mask = 0xFFFF8000; + attr_val = pme_in(global_pme, SEC1); + /* clear mbn */ + attr_val &= mask; + /* clear unwanted bits in val*/ + val &= ~mask; + val |= attr_val; + pme_out(global_pme, SEC1, val); + break; + + case pme_attr_sre_max_instruction_limit: + /* bits 0..15 */ + if (val > 0xFFFF) + val = 0xFFFF; + mask = 0x0000FFFF; + attr_val = pme_in(global_pme, SEC1); + /* clear mil */ + attr_val &= mask; + /* clear unwanted bits in val*/ + val &= mask; + val <<= 16; + val |= attr_val; + pme_out(global_pme, SEC1, val); + break; + + case pme_attr_srrv0: + pme_out(global_pme, SRRV0, val); + break; + case pme_attr_srrv1: + pme_out(global_pme, SRRV1, val); + break; + case pme_attr_srrv2: + pme_out(global_pme, SRRV2, val); + break; + case pme_attr_srrv3: + pme_out(global_pme, SRRV3, val); + break; + case pme_attr_srrv4: + pme_out(global_pme, SRRV4, val); + break; + case pme_attr_srrv5: + pme_out(global_pme, SRRV5, val); + break; + case pme_attr_srrv6: + pme_out(global_pme, SRRV6, val); + break; + case pme_attr_srrv7: + pme_out(global_pme, SRRV7, val); + break; + case pme_attr_srrfi: + pme_out(global_pme, SRRFI, val); + break; + case pme_attr_srri: + pme_out(global_pme, SRRI, val); + break; + case pme_attr_srrwc: + pme_out(global_pme, SRRWC, val); + break; + case pme_attr_srrr: + pme_out(global_pme, SRRR, val); + break; + case pme_attr_tbt0ecc1th: + pme_out(global_pme, TBT0ECC1TH, val); + break; + case pme_attr_tbt1ecc1th: + pme_out(global_pme, TBT1ECC1TH, val); + break; + case pme_attr_vlt0ecc1th: + pme_out(global_pme, VLT0ECC1TH, val); + break; + case pme_attr_vlt1ecc1th: + pme_out(global_pme, VLT1ECC1TH, val); + break; + case pme_attr_cmecc1th: + pme_out(global_pme, CMECC1TH, val); + break; + case pme_attr_dxcmecc1th: + pme_out(global_pme, DXCMECC1TH, val); + break; + case pme_attr_dxemecc1th: + pme_out(global_pme, DXEMECC1TH, val); + break; + case pme_attr_esr: + pme_out(global_pme, ESR, val); + break; + case pme_attr_pehd: + pme_out(global_pme, PEHD, val); + break; + case pme_attr_ecc1bes: + pme_out(global_pme, ECC1BES, val); + break; + case pme_attr_ecc2bes: + pme_out(global_pme, ECC2BES, val); + break; + case pme_attr_miace: + pme_out(global_pme, MIA_CE, val); + break; + case pme_attr_miacr: + pme_out(global_pme, MIA_CR, val); + break; + case pme_attr_cdcr: + pme_out(global_pme, CDCR, val); + break; + case pme_attr_pmtr: + pme_out(global_pme, PMTR, val); + break; + + default: + pr_err("pme: Unknown attr %u\n", attr); + return -EINVAL; + }; + return 0; +} +EXPORT_SYMBOL(pme_attr_set); + +int pme_attr_get(enum pme_attr attr, u32 *val) +{ + u32 mask; + u32 attr_val; + + if (!pme2_have_control()) + return -ENODEV; + + /* Check if Buffer size configuration */ + if (attr >= pme_attr_bsc_first && attr <= pme_attr_bsc_last) { + u32 bsc_pool_id = attr - pme_attr_bsc_first; + u32 bsc_pool_offset = bsc_pool_id % 8; + /* calculate which sky-blue reg */ + /* 0..7 -> bsc_(0..7), PME_REG_BSC0 */ + /* 8..15 -> bsc_(8..15) PME_REG_BSC1*/ + /* ... */ + /* 56..63 -> bsc_(56..63) PME_REG_BSC7*/ + attr_val = pme_in(global_pme, BSC0 + ((bsc_pool_id/8)*4)); + attr_val = attr_val >> ((7-bsc_pool_offset)*4); + attr_val = attr_val & 0x0000000F; + *val = attr_val; + return 0; + } + + switch (attr) { + case pme_attr_efqc_int: + mask = 0x8FFFFFFF; + attr_val = pme_in(global_pme, EFQC); + attr_val &= ~mask; + attr_val >>= 28; + break; + + case pme_attr_sw_db: + attr_val = pme_in(global_pme, SWDB); + break; + + case pme_attr_dmcr: + attr_val = pme_in(global_pme, DMCR); + break; + + case pme_attr_smcr: + attr_val = pme_in(global_pme, SMCR); + break; + + case pme_attr_famcr: + attr_val = pme_in(global_pme, FAMCR); + break; + + case pme_attr_kvlts: + /* bit 28-31 */ + attr_val = pme_in(global_pme, KVLTS); + attr_val &= 0x0000000F; + /* HW range: 1..15, SW range: 2..16 */ + attr_val += 1; + break; + + case pme_attr_max_chain_length: + /* bit 17-31 */ + attr_val = pme_in(global_pme, KEC); + attr_val &= 0x00007FFF; + break; + + case pme_attr_pattern_range_counter_idx: + /* bit 15-31 */ + attr_val = pme_in(global_pme, DRCIC); + attr_val &= 0x0001FFFF; + break; + + case pme_attr_pattern_range_counter_mask: + /* bit 15-31 */ + attr_val = pme_in(global_pme, DRCMC); + attr_val &= 0x0001FFFF; + break; + + case pme_attr_max_allowed_test_line_per_pattern: + /* bit 18-31 */ + attr_val = pme_in(global_pme, DEC0); + attr_val &= 0x00003FFF; + break; + + case pme_attr_max_pdsr_index: + /* bit 12-31 */ + attr_val = pme_in(global_pme, DEC1); + attr_val &= 0x000FFFFF; + break; + + case pme_attr_max_pattern_matches_per_sui: + attr_val = pme_in(global_pme, DLC); + attr_val &= 0x0000FFFF; + break; + + case pme_attr_max_pattern_evaluations_per_sui: + attr_val = pme_in(global_pme, DLC); + attr_val >>= 16; + break; + + case pme_attr_report_length_limit: + attr_val = pme_in(global_pme, RLL); + /* clear unwanted bits in val*/ + attr_val &= 0x0000FFFF; + break; + + case pme_attr_end_of_simple_sui_report: + /* bit 13 */ + attr_val = pme_in(global_pme, SREC); + attr_val >>= 18; + /* clear unwanted bits in val*/ + attr_val &= 0x00000001; + break; + + case pme_attr_aim: + /* bit 2 */ + attr_val = pme_in(global_pme, SREC); + attr_val >>= 29; + /* clear unwanted bits in val*/ + attr_val &= 0x00000001; + break; + + case pme_attr_sre_context_size: + /* bits 9..12 */ + attr_val = pme_in(global_pme, SREC); + attr_val >>= 19; + /* clear unwanted bits in val*/ + attr_val &= 0x0000000F; + attr_val += 4; + attr_val = 1 << attr_val; + break; + + case pme_attr_sre_rule_num: + /* bits 24..31 */ + attr_val = pme_in(global_pme, SREC); + /* clear unwanted bits in val*/ + attr_val &= 0x000000FF; + /* Multiply by 256 */ + attr_val <<= 8; + break; + + case pme_attr_sre_session_ctx_num: { + u32 ctx_sz = 0; + /* = sre_table_size / sre_session_ctx_size */ + attr_val = pme_in(global_pme, SEC3); + /* clear unwanted bits in val*/ + attr_val &= 0x07FFFFFF; + attr_val += 1; + attr_val *= 32; + ctx_sz = pme_in(global_pme, SREC); + ctx_sz >>= 19; + /* clear unwanted bits in val*/ + ctx_sz &= 0x0000000F; + ctx_sz += 4; + attr_val /= (1 << ctx_sz); + } + break; + + case pme_attr_end_of_sui_reaction_ptr: + /* bits 12..31 */ + attr_val = pme_in(global_pme, ESRP); + /* clear unwanted bits in val*/ + attr_val &= 0x000FFFFF; + break; + + case pme_attr_sre_pscl: + /* bits 22..31 */ + attr_val = pme_in(global_pme, SFRCC); + break; + + case pme_attr_sre_max_block_num: + /* bits 17..31 */ + attr_val = pme_in(global_pme, SEC1); + /* clear unwanted bits in val*/ + attr_val &= 0x00007FFF; + break; + + case pme_attr_sre_max_instruction_limit: + /* bits 0..15 */ + attr_val = pme_in(global_pme, SEC1); + attr_val >>= 16; + break; + + case pme_attr_sre_max_index_size: + /* bits 12..31 */ + attr_val = pme_in(global_pme, SEC2); + /* clear unwanted bits in val*/ + attr_val &= 0x000FFFFF; + break; + + case pme_attr_sre_max_offset_ctrl: + /* bits 5..31 */ + attr_val = pme_in(global_pme, SEC3); + /* clear unwanted bits in val*/ + attr_val &= 0x07FFFFFF; + break; + + case pme_attr_src_id: + /* bits 24..31 */ + attr_val = pme_in(global_pme, SRCIDR); + /* clear unwanted bits in val*/ + attr_val &= 0x000000FF; + break; + + case pme_attr_liodnr: + /* bits 20..31 */ + attr_val = pme_in(global_pme, LIODNR); + /* clear unwanted bits in val*/ + attr_val &= 0x00000FFF; + break; + + case pme_attr_rev1: + /* bits 0..31 */ + attr_val = pme_in(global_pme, PM_IP_REV1); + break; + + case pme_attr_rev2: + /* bits 0..31 */ + attr_val = pme_in(global_pme, PM_IP_REV2); + break; + + case pme_attr_srrr: + attr_val = pme_in(global_pme, SRRR); + break; + + case pme_attr_trunci: + attr_val = pme_in(global_pme, TRUNCI); + break; + + case pme_attr_rbc: + attr_val = pme_in(global_pme, RBC); + break; + + case pme_attr_tbt0ecc1ec: + attr_val = pme_in(global_pme, TBT0ECC1EC); + break; + + case pme_attr_tbt1ecc1ec: + attr_val = pme_in(global_pme, TBT1ECC1EC); + break; + + case pme_attr_vlt0ecc1ec: + attr_val = pme_in(global_pme, VLT0ECC1EC); + break; + + case pme_attr_vlt1ecc1ec: + attr_val = pme_in(global_pme, VLT1ECC1EC); + break; + + case pme_attr_cmecc1ec: + attr_val = pme_in(global_pme, CMECC1EC); + break; + + case pme_attr_dxcmecc1ec: + attr_val = pme_in(global_pme, DXCMECC1EC); + break; + + case pme_attr_dxemecc1ec: + attr_val = pme_in(global_pme, DXEMECC1EC); + break; + + case pme_attr_tbt0ecc1th: + attr_val = pme_in(global_pme, TBT0ECC1TH); + break; + + case pme_attr_tbt1ecc1th: + attr_val = pme_in(global_pme, TBT1ECC1TH); + break; + + case pme_attr_vlt0ecc1th: + attr_val = pme_in(global_pme, VLT0ECC1TH); + break; + + case pme_attr_vlt1ecc1th: + attr_val = pme_in(global_pme, VLT1ECC1TH); + break; + + case pme_attr_cmecc1th: + attr_val = pme_in(global_pme, CMECC1TH); + break; + + case pme_attr_dxcmecc1th: + attr_val = pme_in(global_pme, DXCMECC1TH); + break; + + case pme_attr_dxemecc1th: + attr_val = pme_in(global_pme, DXEMECC1TH); + break; + + case pme_attr_stnib: + attr_val = pme_in(global_pme, STNIB); + break; + + case pme_attr_stnis: + attr_val = pme_in(global_pme, STNIS); + break; + + case pme_attr_stnth1: + attr_val = pme_in(global_pme, STNTH1); + break; + + case pme_attr_stnth2: + attr_val = pme_in(global_pme, STNTH2); + break; + + case pme_attr_stnthv: + attr_val = pme_in(global_pme, STNTHV); + break; + + case pme_attr_stnths: + attr_val = pme_in(global_pme, STNTHS); + break; + + case pme_attr_stnch: + attr_val = pme_in(global_pme, STNCH); + break; + + case pme_attr_stnpm: + attr_val = pme_in(global_pme, STNPM); + break; + + case pme_attr_stns1m: + attr_val = pme_in(global_pme, STNS1M); + break; + + case pme_attr_stnpmr: + attr_val = pme_in(global_pme, STNPMR); + break; + + case pme_attr_stndsr: + attr_val = pme_in(global_pme, STNDSR); + break; + + case pme_attr_stnesr: + attr_val = pme_in(global_pme, STNESR); + break; + + case pme_attr_stns1r: + attr_val = pme_in(global_pme, STNS1R); + break; + + case pme_attr_stnob: + attr_val = pme_in(global_pme, STNOB); + break; + + case pme_attr_mia_byc: + attr_val = pme_in(global_pme, MIA_BYC); + break; + + case pme_attr_mia_blc: + attr_val = pme_in(global_pme, MIA_BLC); + break; + + case pme_attr_isr: + attr_val = pme_in(global_pme, ISR); + break; + + case pme_attr_ecr0: + attr_val = pme_in(global_pme, ECR0); + break; + + case pme_attr_ecr1: + attr_val = pme_in(global_pme, ECR1); + break; + + case pme_attr_esr: + attr_val = pme_in(global_pme, ESR); + break; + + case pme_attr_pmstat: + attr_val = pme_in(global_pme, PMSTAT); + break; + + case pme_attr_pehd: + attr_val = pme_in(global_pme, PEHD); + break; + + case pme_attr_ecc1bes: + attr_val = pme_in(global_pme, ECC1BES); + break; + + case pme_attr_ecc2bes: + attr_val = pme_in(global_pme, ECC2BES); + break; + + case pme_attr_eccaddr: + attr_val = pme_in(global_pme, ECCADDR); + break; + + case pme_attr_ecccode: + attr_val = pme_in(global_pme, ECCCODE); + break; + + case pme_attr_miace: + attr_val = pme_in(global_pme, MIA_CE); + break; + + case pme_attr_miacr: + attr_val = pme_in(global_pme, MIA_CR); + break; + + case pme_attr_cdcr: + attr_val = pme_in(global_pme, CDCR); + break; + + case pme_attr_pmtr: + attr_val = pme_in(global_pme, PMTR); + break; + + case pme_attr_faconf: + attr_val = pme_in(global_pme, FACONF); + break; + + case pme_attr_pdsrbah: + attr_val = pme_in(global_pme, PDSRBAH); + break; + + case pme_attr_pdsrbal: + attr_val = pme_in(global_pme, PDSRBAL); + break; + + case pme_attr_scbarh: + attr_val = pme_in(global_pme, SCBARH); + break; + + case pme_attr_scbarl: + attr_val = pme_in(global_pme, SCBARL); + break; + + case pme_attr_srrv0: + attr_val = pme_in(global_pme, SRRV0); + break; + + case pme_attr_srrv1: + attr_val = pme_in(global_pme, SRRV1); + break; + + case pme_attr_srrv2: + attr_val = pme_in(global_pme, SRRV2); + break; + + case pme_attr_srrv3: + attr_val = pme_in(global_pme, SRRV3); + break; + + case pme_attr_srrv4: + attr_val = pme_in(global_pme, SRRV4); + break; + + case pme_attr_srrv5: + attr_val = pme_in(global_pme, SRRV5); + break; + + case pme_attr_srrv6: + attr_val = pme_in(global_pme, SRRV6); + break; + + case pme_attr_srrv7: + attr_val = pme_in(global_pme, SRRV7); + break; + + case pme_attr_srrfi: + attr_val = pme_in(global_pme, SRRFI); + break; + + case pme_attr_srri: + attr_val = pme_in(global_pme, SRRI); + break; + + case pme_attr_srrwc: + attr_val = pme_in(global_pme, SRRWC); + break; + + default: + pr_err("pme: Unknown attr %u\n", attr); + return -EINVAL; + }; + *val = attr_val; + return 0; +} +EXPORT_SYMBOL(pme_attr_get); + +static enum pme_attr stat_list[] = { + pme_attr_trunci, + pme_attr_rbc, + pme_attr_tbt0ecc1ec, + pme_attr_tbt1ecc1ec, + pme_attr_vlt0ecc1ec, + pme_attr_vlt1ecc1ec, + pme_attr_cmecc1ec, + pme_attr_dxcmecc1ec, + pme_attr_dxemecc1ec, + pme_attr_stnib, + pme_attr_stnis, + pme_attr_stnth1, + pme_attr_stnth2, + pme_attr_stnthv, + pme_attr_stnths, + pme_attr_stnch, + pme_attr_stnpm, + pme_attr_stns1m, + pme_attr_stnpmr, + pme_attr_stndsr, + pme_attr_stnesr, + pme_attr_stns1r, + pme_attr_stnob, + pme_attr_mia_byc, + pme_attr_mia_blc +}; + +static u64 pme_stats[sizeof(stat_list)/sizeof(enum pme_attr)]; +static DEFINE_SPINLOCK(stat_lock); + +int pme_stat_get(enum pme_attr stat, u64 *value, int reset) +{ + int i, ret = 0; + int value_set = 0; + u32 val; + + spin_lock_irq(&stat_lock); + for (i = 0; i < sizeof(stat_list)/sizeof(enum pme_attr); i++) { + if (stat_list[i] == stat) { + ret = pme_attr_get(stat_list[i], &val); + /* Do I need to check ret */ + pme_stats[i] += val; + *value = pme_stats[i]; + value_set = 1; + if (reset) + pme_stats[i] = 0; + break; + } + } + if (!value_set) { + pr_err("pme: Invalid stat request %d\n", stat); + ret = -EINVAL; + } + spin_unlock_irq(&stat_lock); + return ret; +} +EXPORT_SYMBOL(pme_stat_get); + +void accumulator_update_interval(u32 interval) +{ + int schedule = 0; + + spin_lock_irq(&stat_lock); + if (!pme_stat_interval && interval) + schedule = 1; + pme_stat_interval = interval; + spin_unlock_irq(&stat_lock); + if (schedule) + schedule_delayed_work(&accumulator_work, + msecs_to_jiffies(interval)); +} + +static void accumulator_update(struct work_struct *work) +{ + int i, ret; + u32 local_interval; + u32 val; + + spin_lock_irq(&stat_lock); + local_interval = pme_stat_interval; + for (i = 0; i < sizeof(stat_list)/sizeof(enum pme_attr); i++) { + ret = pme_attr_get(stat_list[i], &val); + pme_stats[i] += val; + } + spin_unlock_irq(&stat_lock); + if (local_interval) + schedule_delayed_work(&accumulator_work, + msecs_to_jiffies(local_interval)); +} + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_db.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_db.c @@ -0,0 +1,572 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pme2_private.h" +#include + +/* Forward declaration */ +static struct miscdevice fsl_pme2_db_dev; + +/* Global spinlock for handling exclusive inc/dec */ +static DEFINE_SPINLOCK(exclusive_lock); + +/* Private structure that is allocated for each open that is done on the + * pme_db device. This is used to maintain the state of a database session */ +struct db_session { + /* The ctx that is needed to communicate with the pme high level */ + struct pme_ctx ctx; + /* Used to track the EXCLUSIVE_INC and EXCLUSIVE_DEC ioctls */ + unsigned int exclusive_counter; +}; + +struct cmd_token { + /* pme high level token */ + struct pme_ctx_token hl_token; + /* data */ + struct qm_fd rx_fd; + /* Completion interface */ + struct completion cb_done; + u8 ern; +}; + +#ifdef CONFIG_COMPAT +static void compat_to_db(struct pme_db *dst, struct compat_pme_db *src) +{ + dst->flags = src->flags; + dst->status = src->status; + dst->input.data = compat_ptr(src->input.data); + dst->input.size = src->input.size; + dst->output.data = compat_ptr(src->output.data); + dst->output.size = src->output.size; +} + +static void db_to_compat(struct compat_pme_db *dst, struct pme_db *src) +{ + dst->flags = src->flags; + dst->status = src->status; + dst->output.data = ptr_to_compat(src->output.data); + dst->output.size = src->output.size; + dst->input.data = ptr_to_compat(src->input.data); + dst->input.size = src->input.size; +} +#endif + +/* PME Compound Frame Index */ +#define INPUT_FRM 1 +#define OUTPUT_FRM 0 + +/* Callback for database operations */ +static void db_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_token *ctx_token) +{ + struct cmd_token *token = (struct cmd_token *)ctx_token; + token->rx_fd = *fd; + complete(&token->cb_done); +} + +static void db_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr, + struct pme_ctx_token *ctx_token) +{ + struct cmd_token *token = (struct cmd_token *)ctx_token; + token->ern = 1; + token->rx_fd = mr->ern.fd; + complete(&token->cb_done); +} + +struct ctrl_op { + struct pme_ctx_ctrl_token ctx_ctr; + struct completion cb_done; + enum pme_status cmd_status; + u8 res_flag; + u8 ern; +}; + +static void ctrl_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_ctrl_token *token) +{ + struct ctrl_op *ctrl = (struct ctrl_op *)token; + ctrl->cmd_status = pme_fd_res_status(fd); + ctrl->res_flag = pme_fd_res_flags(fd) & PME_STATUS_UNRELIABLE; + complete(&ctrl->cb_done); +} + +static void ctrl_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr, + struct pme_ctx_ctrl_token *token) +{ + struct ctrl_op *ctrl = (struct ctrl_op *)token; + ctrl->ern = 1; + complete(&ctrl->cb_done); +} + +static int exclusive_inc(struct file *fp, struct db_session *db) +{ + int ret; + + BUG_ON(!db); + BUG_ON(!(db->ctx.flags & PME_CTX_FLAG_EXCLUSIVE)); + spin_lock(&exclusive_lock); + ret = pme_ctx_exclusive_inc(&db->ctx, + (PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT)); + if (!ret) + db->exclusive_counter++; + spin_unlock(&exclusive_lock); + return ret; +} + +static int exclusive_dec(struct file *fp, struct db_session *db) +{ + int ret = 0; + + BUG_ON(!db); + BUG_ON(!(db->ctx.flags & PME_CTX_FLAG_EXCLUSIVE)); + spin_lock(&exclusive_lock); + if (!db->exclusive_counter) { + PMEPRERR("exclusivity counter already zero\n"); + ret = -EINVAL; + } else { + pme_ctx_exclusive_dec(&db->ctx); + db->exclusive_counter--; + } + spin_unlock(&exclusive_lock); + return ret; +} + +static int execute_cmd(struct file *fp, struct db_session *db, + struct pme_db *db_cmd) +{ + int ret = 0; + struct cmd_token token; + struct qm_sg_entry tx_comp[2]; + struct qm_fd tx_fd; + void *tx_data = NULL; + void *rx_data = NULL; + u32 src_sz, dst_sz; + dma_addr_t dma_addr; + + memset(&token, 0, sizeof(struct cmd_token)); + memset(tx_comp, 0, sizeof(tx_comp)); + memset(&tx_fd, 0, sizeof(struct qm_fd)); + init_completion(&token.cb_done); + + PMEPRINFO("Received User Space Contiguous mem\n"); + PMEPRINFO("length = %d\n", db_cmd->input.size); + tx_data = kmalloc(db_cmd->input.size, GFP_KERNEL); + if (!tx_data) { + PMEPRERR("Err alloc %zd byte\n", db_cmd->input.size); + return -ENOMEM; + } + + if (copy_from_user(tx_data, + (void __user *)db_cmd->input.data, + db_cmd->input.size)) { + PMEPRERR("Error copying contigous user data\n"); + ret = -EFAULT; + goto free_tx_data; + } + + /* Setup input frame */ + tx_comp[INPUT_FRM].final = 1; + tx_comp[INPUT_FRM].length = db_cmd->input.size; + dma_addr = pme_map(tx_data); + if (pme_map_error(dma_addr)) { + PMEPRERR("Error pme_map_error\n"); + ret = -EIO; + goto free_tx_data; + } + set_sg_addr(&tx_comp[INPUT_FRM], dma_addr); + /* setup output frame, if output is expected */ + if (db_cmd->output.size) { + PMEPRINFO("expect output %d\n", db_cmd->output.size); + rx_data = kmalloc(db_cmd->output.size, GFP_KERNEL); + if (!rx_data) { + PMEPRERR("Err alloc %zd byte", db_cmd->output.size); + ret = -ENOMEM; + goto unmap_input_frame; + } + /* Setup output frame */ + tx_comp[OUTPUT_FRM].length = db_cmd->output.size; + dma_addr = pme_map(rx_data); + if (pme_map_error(dma_addr)) { + PMEPRERR("Error pme_map_error\n"); + ret = -EIO; + goto comp_frame_free_rx; + } + set_sg_addr(&tx_comp[OUTPUT_FRM], dma_addr); + tx_fd.format = qm_fd_compound; + /* Build compound frame */ + dma_addr = pme_map(tx_comp); + if (pme_map_error(dma_addr)) { + PMEPRERR("Error pme_map_error\n"); + ret = -EIO; + goto comp_frame_unmap_output; + } + set_fd_addr(&tx_fd, dma_addr); + } else { + tx_fd.format = qm_fd_sg_big; + tx_fd.length29 = db_cmd->input.size; + /* Build sg frame */ + dma_addr = pme_map(&tx_comp[INPUT_FRM]); + if (pme_map_error(dma_addr)) { + PMEPRERR("Error pme_map_error\n"); + ret = -EIO; + goto unmap_input_frame; + } + set_fd_addr(&tx_fd, dma_addr); + } + ret = pme_ctx_pmtcc(&db->ctx, PME_CTX_OP_WAIT, &tx_fd, + (struct pme_ctx_token *)&token); + if (unlikely(ret)) { + PMEPRINFO("pme_ctx_pmtcc error %d\n", ret); + goto unmap_frame; + } + PMEPRINFO("Wait for completion\n"); + /* Wait for the command to complete */ + wait_for_completion(&token.cb_done); + + if (token.ern) { + ret = -EIO; + goto unmap_frame; + } + + PMEPRINFO("pme2_db: process_completed_token\n"); + PMEPRINFO("pme2_db: received %d frame type\n", token.rx_fd.format); + if (token.rx_fd.format == qm_fd_compound) { + /* Need to copy output */ + src_sz = tx_comp[OUTPUT_FRM].length; + dst_sz = db_cmd->output.size; + PMEPRINFO("pme gen %u data, have space for %u\n", + src_sz, dst_sz); + db_cmd->output.size = min(dst_sz, src_sz); + /* Doesn't make sense we generated more than available space + * should have got truncation. + */ + BUG_ON(dst_sz < src_sz); + if (copy_to_user((void __user *)db_cmd->output.data, rx_data, + db_cmd->output.size)) { + PMEPRERR("Error copying to user data\n"); + ret = -EFAULT; + goto comp_frame_unmap_cf; + } + } else if (token.rx_fd.format == qm_fd_sg_big) + db_cmd->output.size = 0; + else + panic("unexpected frame type received %d\n", + token.rx_fd.format); + + db_cmd->flags = pme_fd_res_flags(&token.rx_fd); + db_cmd->status = pme_fd_res_status(&token.rx_fd); + +unmap_frame: + if (token.rx_fd.format == qm_fd_sg_big) + goto single_frame_unmap_frame; + +comp_frame_unmap_cf: +comp_frame_unmap_output: +comp_frame_free_rx: + kfree(rx_data); + goto unmap_input_frame; +single_frame_unmap_frame: +unmap_input_frame: +free_tx_data: + kfree(tx_data); + + return ret; +} + +static int execute_nop(struct file *fp, struct db_session *db) +{ + int ret = 0; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .ctx_ctr.ern_cb = ctrl_ern_cb + }; + init_completion(&ctx_ctrl.cb_done); + + ret = pme_ctx_ctrl_nop(&db->ctx, PME_CTX_OP_WAIT|PME_CTX_OP_WAIT_INT, + &ctx_ctrl.ctx_ctr); + if (!ret) + wait_for_completion(&ctx_ctrl.cb_done); + + if (ctx_ctrl.ern) + ret = -EIO; + return ret; +} + +static atomic_t sre_reset_lock = ATOMIC_INIT(1); +static int ioctl_sre_reset(unsigned long arg) +{ + struct pme_db_sre_reset reset_vals; + int i; + u32 srrr_val; + int ret = 0; + + if (copy_from_user(&reset_vals, (struct pme_db_sre_reset __user *)arg, + sizeof(struct pme_db_sre_reset))) + return -EFAULT; + PMEPRINFO("sre_reset:\n"); + PMEPRINFO(" rule_index = 0x%x:\n", reset_vals.rule_index); + PMEPRINFO(" rule_increment = 0x%x:\n", reset_vals.rule_increment); + PMEPRINFO(" rule_repetitions = 0x%x:\n", reset_vals.rule_repetitions); + PMEPRINFO(" rule_reset_interval = 0x%x:\n", + reset_vals.rule_reset_interval); + PMEPRINFO(" rule_reset_priority = 0x%x:\n", + reset_vals.rule_reset_priority); + + /* Validate ranges */ + if ((reset_vals.rule_index >= PME_PMFA_SRE_INDEX_MAX) || + (reset_vals.rule_increment > PME_PMFA_SRE_INC_MAX) || + (reset_vals.rule_repetitions >= PME_PMFA_SRE_REP_MAX) || + (reset_vals.rule_reset_interval >= + PME_PMFA_SRE_INTERVAL_MAX)) + return -ERANGE; + /* Check and make sure only one caller is present */ + if (!atomic_dec_and_test(&sre_reset_lock)) { + /* Someone else is already in this call */ + atomic_inc(&sre_reset_lock); + return -EBUSY; + }; + /* All validated. Run the command */ + for (i = 0; i < PME_SRE_RULE_VECTOR_SIZE; i++) + pme_attr_set(pme_attr_srrv0 + i, reset_vals.rule_vector[i]); + pme_attr_set(pme_attr_srrfi, reset_vals.rule_index); + pme_attr_set(pme_attr_srri, reset_vals.rule_increment); + pme_attr_set(pme_attr_srrwc, + (0xFFF & reset_vals.rule_reset_interval) << 1 | + (reset_vals.rule_reset_priority ? 1 : 0)); + /* Need to set SRRR last */ + pme_attr_set(pme_attr_srrr, reset_vals.rule_repetitions); + do { + mdelay(PME_PMFA_SRE_POLL_MS); + ret = pme_attr_get(pme_attr_srrr, &srrr_val); + if (ret) { + PMEPRCRIT("pme2: Error reading srrr\n"); + /* bail */ + break; + } + /* Check for error */ + else if (srrr_val & 0x10000000) { + PMEPRERR("pme2: Error in SRRR\n"); + ret = -EIO; + } + PMEPRINFO("pme2: srrr count %d\n", srrr_val); + } while (srrr_val); + atomic_inc(&sre_reset_lock); + return ret; +} + +/** + * fsl_pme2_db_open - open the driver + * + * Open the driver and prepare for requests. + * + * Every time an application opens the driver, we create a db_session object + * for that file handle. + */ +static int fsl_pme2_db_open(struct inode *node, struct file *fp) +{ + int ret; + struct db_session *db = NULL; + + db = kzalloc(sizeof(struct db_session), GFP_KERNEL); + if (!db) + return -ENOMEM; + fp->private_data = db; + db->ctx.cb = db_cb; + db->ctx.ern_cb = db_ern_cb; + + ret = pme_ctx_init(&db->ctx, + PME_CTX_FLAG_EXCLUSIVE | + PME_CTX_FLAG_PMTCC | + PME_CTX_FLAG_DIRECT| + PME_CTX_FLAG_LOCAL, + 0, 4, CONFIG_FSL_PME2_DB_QOSOUT_PRIORITY, 0, NULL); + if (ret) { + PMEPRERR("pme_ctx_init %d\n", ret); + goto free_data; + } + + /* enable the context */ + ret = pme_ctx_enable(&db->ctx); + if (ret) { + PMEPRERR("error enabling ctx %d\n", ret); + pme_ctx_finish(&db->ctx); + goto free_data; + } + PMEPRINFO("pme2_db: Finish pme_db open %d\n", smp_processor_id()); + return 0; +free_data: + kfree(fp->private_data); + fp->private_data = NULL; + return ret; +} + +static int fsl_pme2_db_close(struct inode *node, struct file *fp) +{ + int ret = 0; + struct db_session *db = fp->private_data; + + PMEPRINFO("Start pme_db close\n"); + while (db->exclusive_counter) { + pme_ctx_exclusive_dec(&db->ctx); + db->exclusive_counter--; + } + + /* Disable context. */ + ret = pme_ctx_disable(&db->ctx, PME_CTX_OP_WAIT, NULL); + if (ret) + PMEPRCRIT("Error disabling ctx %d\n", ret); + pme_ctx_finish(&db->ctx); + kfree(db); + PMEPRINFO("Finish pme_db close\n"); + return 0; +} + +/* Main switch loop for ioctl operations */ +static long fsl_pme2_db_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + struct db_session *db = fp->private_data; + int ret = 0; + + switch (cmd) { + + case PMEIO_PMTCC: { + int ret; + struct pme_db db_cmd; + + /* Copy the command to kernel space */ + if (copy_from_user(&db_cmd, (void __user *)arg, + sizeof(db_cmd))) + return -EFAULT; + ret = execute_cmd(fp, db, &db_cmd); + if (!ret) + ret = copy_to_user((struct pme_db __user *)arg, + &db_cmd, sizeof(db_cmd)); + return ret; + } + break; + + case PMEIO_EXL_INC: + return exclusive_inc(fp, db); + case PMEIO_EXL_DEC: + return exclusive_dec(fp, db); + case PMEIO_EXL_GET: + BUG_ON(!db); + BUG_ON(!(db->ctx.flags & PME_CTX_FLAG_EXCLUSIVE)); + if (copy_to_user((void __user *)arg, + &db->exclusive_counter, + sizeof(db->exclusive_counter))) + ret = -EFAULT; + return ret; + case PMEIO_NOP: + return execute_nop(fp, db); + case PMEIO_SRE_RESET: + return ioctl_sre_reset(arg); + +#ifdef CONFIG_COMPAT + case PMEIO_PMTCC32: { + int ret; + struct pme_db db_cmd; + struct compat_pme_db db_cmd32; + struct compat_pme_db __user *user_db_cmd = compat_ptr(arg); + + /* Copy the command to kernel space */ + if (copy_from_user(&db_cmd32, user_db_cmd, sizeof(db_cmd32))) + return -EFAULT; + /* Convert to 64-bit struct */ + compat_to_db(&db_cmd, &db_cmd32); + ret = execute_cmd(fp, db, &db_cmd); + if (!ret) { + /* Convert to compat struct */ + db_to_compat(&db_cmd32, &db_cmd); + ret = copy_to_user(user_db_cmd, &db_cmd32, + sizeof(*user_db_cmd)); + } + return ret; + } + break; +#endif + } + pr_info("Unknown pme_db ioctl cmd %u\n", cmd); + return -EINVAL; +} + +static const struct file_operations fsl_pme2_db_fops = { + .owner = THIS_MODULE, + .open = fsl_pme2_db_open, + .release = fsl_pme2_db_close, + .unlocked_ioctl = fsl_pme2_db_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = fsl_pme2_db_ioctl, +#endif +}; + +static struct miscdevice fsl_pme2_db_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = PME_DEV_DB_NODE, + .fops = &fsl_pme2_db_fops +}; + +static int __init fsl_pme2_db_init(void) +{ + int err = 0; + + pr_info("Freescale pme2 db driver\n"); + if (!pme2_have_control()) { + PMEPRERR("not on ctrl-plane\n"); + return -ENODEV; + } + err = misc_register(&fsl_pme2_db_dev); + if (err) { + PMEPRERR("cannot register device\n"); + return err; + } + PMEPRINFO("device %s registered\n", fsl_pme2_db_dev.name); + return 0; +} + +static void __exit fsl_pme2_db_exit(void) +{ + int err = misc_deregister(&fsl_pme2_db_dev); + if (err) { + PMEPRERR("Failed to deregister device %s, " + "code %d\n", fsl_pme2_db_dev.name, err); + return; + } + PMEPRINFO("device %s deregistered\n", fsl_pme2_db_dev.name); +} + +module_init(fsl_pme2_db_init); +module_exit(fsl_pme2_db_exit); + +MODULE_AUTHOR("Freescale Semiconductor - OTC"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("FSL PME2 db driver"); --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_high.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_high.c @@ -0,0 +1,944 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pme2_private.h" + +/* The pme_ctx state machine is described via the following list of + * internal PME_CTX_FLAG_*** bits and cross-referenced to the APIs (and + * functionality) they track. + * + * DEAD: set at any point, an error has been hit, doesn't "cause" disabling or + * any autonomous ref-decrement (been there, hit the gotchas, won't do it + * again). + * + * DISABLING: set by pme_ctx_disable() at any point that is not already + * disabling, disabled, or in ctrl, and the ref is decremented. DISABLING is + * unset by pme_ctx_enable(). + * + * DISABLED: once pme_ctx_disable() has set DISABLING and refs==0, DISABLED is + * set before returning. (Any failure will clear DISABLING and increment the ref + * count.) DISABLING is unset by pme_ctx_enable(). + * + * ENABLING: set by pme_ctx_enable() provided the context is disabled, not dead, + * not in RECONFIG, and not already enabling. Once set, the ref is incremented + * and the tx FQ is scheduled (for non-exclusive flows). If this fails, the ref + * is decremented and the context is re-disabled. ENABLING is unset once + * pme_ctx_enable() completes. + * + * RECONFIG: set by pme_ctx_reconfigure_[rt]x() provided the context is + * disabled, not dead, and not already in reconfig. RECONFIG is cleared prior to + * the function returning. + * + * Simplifications: the do_flag() wrapper provides synchronised modifications of + * the ctx 'flags', and callers can rely on the following implications to reduce + * the number of flags in the masks being passed in; + * DISABLED implies DISABLING (and enable will clear both) + */ + +/* Internal-only ctx flags, mustn't conflict with exported ones */ +#define PME_CTX_FLAG_DEAD 0x80000000 +#define PME_CTX_FLAG_DISABLING 0x40000000 +#define PME_CTX_FLAG_DISABLED 0x20000000 +#define PME_CTX_FLAG_ENABLING 0x10000000 +#define PME_CTX_FLAG_RECONFIG 0x08000000 +#define PME_CTX_FLAG_PRIVATE 0xf8000000 /* mask of them all */ + +/* Internal-only cmd flags, musn't conflict with exported ones */ +#define PME_CTX_OP_INSIDE_DISABLE 0x80000000 +#define PME_CTX_OP_PRIVATE 0x80000000 /* mask of them all */ + +struct pme_nostash { + struct qman_fq fqin; + struct pme_ctx *parent; +}; + +/* This wrapper simplifies conditional (and locked) read-modify-writes to + * 'flags'. Inlining should allow the compiler to optimise it based on the + * parameters, eg. if 'must_be_set'/'must_not_be_set' are zero it will + * degenerate to an unconditional read-modify-write, if 'to_set'/'to_unset' are + * zero it will degenerate to a read-only flag-check, etc. */ +static inline int do_flags(struct pme_ctx *ctx, + u32 must_be_set, u32 must_not_be_set, + u32 to_set, u32 to_unset) +{ + int err = -EBUSY; + unsigned long irqflags; + + spin_lock_irqsave(&ctx->lock, irqflags); + if (((ctx->flags & must_be_set) == must_be_set) && + !(ctx->flags & must_not_be_set)) { + ctx->flags |= to_set; + ctx->flags &= ~to_unset; + err = 0; + } + spin_unlock_irqrestore(&ctx->lock, irqflags); + return err; +} + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, struct qman_fq *, + const struct qm_dqrr_entry *); +static void cb_ern(struct qman_portal *, struct qman_fq *, + const struct qm_mr_entry *); +static void cb_dc_ern(struct qman_portal *, struct qman_fq *, + const struct qm_mr_entry *); +static void cb_fqs(struct qman_portal *, struct qman_fq *, + const struct qm_mr_entry *); +static const struct qman_fq_cb pme_fq_base_in = { + .fqs = cb_fqs, + .ern = cb_ern +}; +static const struct qman_fq_cb pme_fq_base_out = { + .dqrr = cb_dqrr, + .dc_ern = cb_dc_ern, + .fqs = cb_fqs +}; + +/* Globals related to competition for PME_EFQC, ie. exclusivity */ +static DECLARE_WAIT_QUEUE_HEAD(exclusive_queue); +static spinlock_t exclusive_lock = __SPIN_LOCK_UNLOCKED(exclusive_lock); +static unsigned int exclusive_refs; +static struct pme_ctx *exclusive_ctx; + +/* Index 0..255, bools do indicated which errors are serious + * 0x40, 0x41, 0x48, 0x49, 0x4c, 0x4e, 0x4f, 0x50, 0x51, 0x59, 0x5a, 0x5b, + * 0x5c, 0x5d, 0x5f, 0x60, 0x80, 0xc0, 0xc1, 0xc2, 0xc4, 0xd2, + * 0xd4, 0xd5, 0xd7, 0xd9, 0xda, 0xe0, 0xe7 + */ +static u8 serious_error_vec[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01, 0x01, + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x01, 0x01, 0x00, 0x01, 0x00, 0x01, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +/* TODO: this is hitting the rx FQ with a large blunt instrument, ie. park() + * does a retire, query, oos, and (re)init. It's possible to force-eligible the + * rx FQ instead, then use a DCA_PK within the cb_dqrr() callback to park it. + * Implement this optimisation later if it's an issue (and incur the additional + * complexity in the state-machine). */ +static int park(struct qman_fq *fq, struct qm_mcc_initfq *initfq) +{ + int ret; + u32 flags; + + ret = qman_retire_fq(fq, &flags); + if (ret) + return ret; + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); + /* We can't revert from now on */ + ret = qman_query_fq(fq, &initfq->fqd); + BUG_ON(ret); + ret = qman_oos_fq(fq); + BUG_ON(ret); + /* can't set QM_INITFQ_WE_OAC and QM_INITFQ_WE_TDTHRESH + * at the same time */ + initfq->we_mask = QM_INITFQ_WE_MASK & ~QM_INITFQ_WE_TDTHRESH; + ret = qman_init_fq(fq, 0, initfq); + BUG_ON(ret); + initfq->we_mask = QM_INITFQ_WE_TDTHRESH; + ret = qman_init_fq(fq, 0, initfq); + BUG_ON(ret); + return 0; +} + +static inline int reconfigure_rx(struct pme_ctx *ctx, int to_park, u8 qosout, + enum qm_channel dest, + const struct qm_fqd_stashing *stashing) +{ + struct qm_mcc_initfq initfq; + u32 flags = QMAN_INITFQ_FLAG_SCHED; + int ret; + + ret = do_flags(ctx, PME_CTX_FLAG_DISABLED, + PME_CTX_FLAG_DEAD | PME_CTX_FLAG_RECONFIG, + PME_CTX_FLAG_RECONFIG, 0); + if (ret) + return ret; + if (to_park) { + ret = park(&ctx->fq, &initfq); + if (ret) + goto done; + } + initfq.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL; + initfq.fqd.dest.wq = qosout; + if (stashing) { + initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + initfq.fqd.context_a.stashing = *stashing; + initfq.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; + } else + initfq.fqd.fq_ctrl = 0; /* disable stashing */ + if (ctx->flags & PME_CTX_FLAG_LOCAL) + flags |= QMAN_INITFQ_FLAG_LOCAL; + else { + initfq.fqd.dest.channel = dest; + /* Set hold-active *IFF* it's a pool channel */ + if (dest >= qm_channel_pool1) + initfq.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE; + } + ret = qman_init_fq(&ctx->fq, flags, &initfq); +done: + do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_RECONFIG); + return ret; +} + +/* this code is factored out of pme_ctx_disable() and get_ctrl() */ +static int empty_pipeline(struct pme_ctx *ctx, __maybe_unused u32 flags) +{ + int ret; +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & PME_CTX_OP_WAIT) { + if (flags & PME_CTX_OP_WAIT_INT) { + ret = -EINTR; + wait_event_interruptible(ctx->queue, + !(ret = atomic_read(&ctx->refs))); + } else + wait_event(ctx->queue, + !(ret = atomic_read(&ctx->refs))); + } else +#endif + ret = atomic_read(&ctx->refs); + if (ret) + /* convert a +ve ref-count to a -ve error code */ + ret = -EBUSY; + return ret; +} + +int pme_ctx_init(struct pme_ctx *ctx, u32 flags, u32 bpid, u8 qosin, + u8 qosout, enum qm_channel dest, + const struct qm_fqd_stashing *stashing) +{ + u32 fqid_rx = 0, fqid_tx = 0; + int rxinit = 0, ret = -ENOMEM, fqin_inited = 0; + + ctx->fq.cb = pme_fq_base_out; + atomic_set(&ctx->refs, 0); + ctx->flags = (flags & ~PME_CTX_FLAG_PRIVATE) | PME_CTX_FLAG_DISABLED | + PME_CTX_FLAG_DISABLING; + if (ctx->flags & PME_CTX_FLAG_PMTCC) + ctx->flags |= PME_CTX_FLAG_DIRECT | PME_CTX_FLAG_EXCLUSIVE; + spin_lock_init(&ctx->lock); + init_waitqueue_head(&ctx->queue); + INIT_LIST_HEAD(&ctx->tokens); + ctx->hw_flow = NULL; + ctx->hw_residue = NULL; + + ctx->us_data = kzalloc(sizeof(struct pme_nostash), GFP_KERNEL); + if (!ctx->us_data) + goto err; + ctx->us_data->parent = ctx; + fqid_rx = qm_fq_new(); + fqid_tx = qm_fq_new(); + if (!fqid_rx || !fqid_tx || !ctx->us_data) + goto err; + ctx->us_data->fqin.cb = pme_fq_base_in; + if (qman_create_fq(fqid_rx, QMAN_FQ_FLAG_TO_DCPORTAL | + ((flags & PME_CTX_FLAG_LOCKED) ? + QMAN_FQ_FLAG_LOCKED : 0), + &ctx->us_data->fqin)) + goto err; + fqin_inited = 1; + if (qman_create_fq(fqid_tx, QMAN_FQ_FLAG_NO_ENQUEUE | + ((flags & PME_CTX_FLAG_LOCKED) ? + QMAN_FQ_FLAG_LOCKED : 0), &ctx->fq)) + goto err; + rxinit = 1; + /* Input FQ */ + if (!(flags & PME_CTX_FLAG_DIRECT)) { + ctx->hw_flow = pme_hw_flow_new(); + if (!ctx->hw_flow) + goto err; + } + ret = pme_ctx_reconfigure_tx(ctx, bpid, qosin); + if (ret) + goto err; + /* Output FQ */ + ret = reconfigure_rx(ctx, 0, qosout, dest, stashing); + if (ret) { + /* Need to OOS the FQ before it gets free'd */ + ret = qman_oos_fq(&ctx->us_data->fqin); + BUG_ON(ret); + goto err; + } + return 0; +err: + if (fqid_rx) + qm_fq_free(fqid_rx); + if (fqid_tx) + qm_fq_free(fqid_tx); + if (ctx->hw_flow) + pme_hw_flow_free(ctx->hw_flow); + if (ctx->us_data) { + if (fqin_inited) + qman_destroy_fq(&ctx->us_data->fqin, 0); + kfree(ctx->us_data); + } + if (rxinit) + qman_destroy_fq(&ctx->fq, 0); + return ret; +} +EXPORT_SYMBOL(pme_ctx_init); + +/* NB, we don't lock here because there must be no other callers (even if we + * locked, what does the loser do after we win?) */ +void pme_ctx_finish(struct pme_ctx *ctx) +{ + u32 flags, fqid_rx, fqid_tx; + int ret; + + ret = do_flags(ctx, PME_CTX_FLAG_DISABLED, PME_CTX_FLAG_RECONFIG, 0, 0); + BUG_ON(ret); + /* Rx/Tx are empty (coz ctx is disabled) so retirement should be + * immediate */ + ret = qman_retire_fq(&ctx->us_data->fqin, &flags); + BUG_ON(ret); + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); + ret = qman_retire_fq(&ctx->fq, &flags); + BUG_ON(ret); + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); + /* OOS and free (don't kfree fq, it's a static ctx member) */ + ret = qman_oos_fq(&ctx->us_data->fqin); + BUG_ON(ret); + ret = qman_oos_fq(&ctx->fq); + BUG_ON(ret); + fqid_rx = qman_fq_fqid(&ctx->us_data->fqin); + fqid_tx = qman_fq_fqid(&ctx->fq); + qman_destroy_fq(&ctx->us_data->fqin, 0); + qman_destroy_fq(&ctx->fq, 0); + qm_fq_free(fqid_rx); + qm_fq_free(fqid_tx); + kfree(ctx->us_data); + if (ctx->hw_flow) + pme_hw_flow_free(ctx->hw_flow); + if (ctx->hw_residue) + pme_hw_residue_free(ctx->hw_residue); +} +EXPORT_SYMBOL(pme_ctx_finish); + +int pme_ctx_is_disabled(struct pme_ctx *ctx) +{ + return (ctx->flags & PME_CTX_FLAG_DISABLED); +} +EXPORT_SYMBOL(pme_ctx_is_disabled); + +int pme_ctx_is_dead(struct pme_ctx *ctx) +{ + return (ctx->flags & PME_CTX_FLAG_DEAD); +} +EXPORT_SYMBOL(pme_ctx_is_dead); + +/* predeclare this here because pme_ctx_disable() may invoke it in "privileged + * mode". The code is down with the other ctrl commands, where it belongs. */ +static inline int __update_flow(struct pme_ctx *ctx, u32 flags, + struct pme_flow *params, struct pme_ctx_ctrl_token *token, + int is_disabling); + +/* This gets invoked by pme_ctx_disable() if it runs to completion, otherwise + * it's called from cb_helper. */ +static inline void __disable_done(struct pme_ctx *ctx) +{ + struct qm_mcc_initfq initfq; + int ret = 0; + if (!(ctx->flags & PME_CTX_FLAG_EXCLUSIVE)) { + /* Park fqin (exclusive is always parked) */ + ret = park(&ctx->us_data->fqin, &initfq); + /* All the conditions for park() to succeed should be met. If + * this fails, there's a bug (s/w or h/w). */ + if (ret) + pr_crit("pme2: park() should never fail! (%d)\n", ret); + } + do_flags(ctx, 0, 0, PME_CTX_FLAG_DISABLED, 0); +} + +int pme_ctx_disable(struct pme_ctx *ctx, u32 flags, + struct pme_ctx_ctrl_token *token) +{ + int ret; + + /* We must not (already) be DISABLING */ + ret = do_flags(ctx, 0, PME_CTX_FLAG_DISABLING, + PME_CTX_FLAG_DISABLING, 0); + if (ret) + return ret; + /* Make sure the pipeline is empty */ + atomic_dec(&ctx->refs); + ret = empty_pipeline(ctx, flags); + if (ret) + goto err; + /* We're idle, but is the flow context flushed from PME onboard cache? + * If it's not flushed when the system deallocates it, that 32 bytes + * could be in use later when PME decides to flush a write to it. Need + * to make it coherent again... */ + if (!(ctx->flags & PME_CTX_FLAG_DIRECT)) { + /* Pass on wait flags (if any) but cancel any flow-context field + * writes (this is not the pme_ctx_ctrl_update_flow() API). */ + ret = __update_flow(ctx, flags & ~PME_CMD_FCW_ALL, NULL, + token, 1); + if (ret) + goto err; + return 1; + } + __disable_done(ctx); + return 0; +err: + atomic_inc(&ctx->refs); + do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_DISABLING); + wake_up(&ctx->queue); + return ret; +} +EXPORT_SYMBOL(pme_ctx_disable); + +int pme_ctx_enable(struct pme_ctx *ctx) +{ + int ret; + ret = do_flags(ctx, PME_CTX_FLAG_DISABLED, + PME_CTX_FLAG_DEAD | PME_CTX_FLAG_RECONFIG | + PME_CTX_FLAG_ENABLING, + PME_CTX_FLAG_ENABLING, 0); + if (ret) + return ret; + if (!(ctx->flags & PME_CTX_FLAG_EXCLUSIVE)) { + ret = qman_init_fq(&ctx->us_data->fqin, + QMAN_INITFQ_FLAG_SCHED, NULL); + if (ret) { + do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_ENABLING); + return ret; + } + } + atomic_inc(&ctx->refs); + do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_DISABLED | PME_CTX_FLAG_DISABLING | + PME_CTX_FLAG_ENABLING); + return 0; +} +EXPORT_SYMBOL(pme_ctx_enable); + +int pme_ctx_reconfigure_tx(struct pme_ctx *ctx, u32 bpid, u8 qosin) +{ + struct qm_mcc_initfq initfq; + int ret; + + ret = do_flags(ctx, PME_CTX_FLAG_DISABLED, + PME_CTX_FLAG_DEAD | PME_CTX_FLAG_RECONFIG, + PME_CTX_FLAG_RECONFIG, 0); + if (ret) + return ret; + memset(&initfq,0,sizeof(initfq)); + pme_initfq(&initfq, ctx->hw_flow, qosin, bpid, qman_fq_fqid(&ctx->fq)); + ret = qman_init_fq(&ctx->us_data->fqin, 0, &initfq); + do_flags(ctx, 0, 0, 0, PME_CTX_FLAG_RECONFIG); + return ret; +} +EXPORT_SYMBOL(pme_ctx_reconfigure_tx); + +int pme_ctx_reconfigure_rx(struct pme_ctx *ctx, u8 qosout, + enum qm_channel dest, const struct qm_fqd_stashing *stashing) +{ + return reconfigure_rx(ctx, 1, qosout, dest, stashing); +} +EXPORT_SYMBOL(pme_ctx_reconfigure_rx); + +/* Helpers for 'ctrl' and 'work' APIs. These are used when the 'ctx' in question + * is EXCLUSIVE. */ +static inline void release_exclusive(__maybe_unused struct pme_ctx *ctx) +{ + unsigned long irqflags; + + BUG_ON(exclusive_ctx != ctx); + BUG_ON(!exclusive_refs); + spin_lock_irqsave(&exclusive_lock, irqflags); + if (!(--exclusive_refs)) { + exclusive_ctx = NULL; + pme2_exclusive_unset(); + wake_up(&exclusive_queue); + } + spin_unlock_irqrestore(&exclusive_lock, irqflags); +} +static int __try_exclusive(struct pme_ctx *ctx) +{ + int ret = 0; + unsigned long irqflags; + + spin_lock_irqsave(&exclusive_lock, irqflags); + if (exclusive_refs) { + /* exclusivity already held, continue if we're the owner */ + if (exclusive_ctx != ctx) + ret = -EBUSY; + } else { + /* it's not currently held */ + ret = pme2_exclusive_set(&ctx->us_data->fqin); + if (!ret) + exclusive_ctx = ctx; + } + if (!ret) + exclusive_refs++; + spin_unlock_irqrestore(&exclusive_lock, irqflags); + return ret; +} +/* Use this macro as the wait expression because we don't want to continue + * looping if the reason we're failing is that we don't have CCSR access + * (-ENODEV). */ +#define try_exclusive(ret, ctx) \ + (!(ret = __try_exclusive(ctx)) || (ret == -ENODEV)) +static inline int get_exclusive(struct pme_ctx *ctx, __maybe_unused u32 flags) +{ + int ret; +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & PME_CTX_OP_WAIT) { + if (flags & PME_CTX_OP_WAIT_INT) { + ret = -EINTR; + wait_event_interruptible(exclusive_queue, + try_exclusive(ret, ctx)); + } else + wait_event(exclusive_queue, + try_exclusive(ret, ctx)); + } else +#endif + ret = __try_exclusive(ctx); + return ret; +} + +/* Used for 'work' APIs, convert PME->QMAN wait flags. The PME and + * QMAN "wait" flags have been aligned so that the below conversion should + * compile with good straight-line speed. */ +static inline u32 ctrl2eq(u32 flags) +{ +#ifdef CONFIG_FSL_DPA_CAN_WAIT + return flags & (QMAN_ENQUEUE_FLAG_WAIT | QMAN_ENQUEUE_FLAG_WAIT_INT); +#else + return flags; +#endif +} + +static inline void release_work(struct pme_ctx *ctx) +{ + if (atomic_dec_and_test(&ctx->refs)) + wake_up(&ctx->queue); +} + +#define BLOCK_NORMAL_WORK (PME_CTX_FLAG_DEAD | PME_CTX_FLAG_DISABLING) +static int try_work(struct pme_ctx *ctx, u32 flags) +{ + atomic_inc(&ctx->refs); + if (unlikely(!(flags & PME_CTX_OP_INSIDE_DISABLE) && + (ctx->flags & BLOCK_NORMAL_WORK))) { + release_work(ctx); + return -EIO; + } + return 0; +} + +static int get_work(struct pme_ctx *ctx, u32 flags) +{ + int ret = 0; +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & PME_CTX_OP_WAIT) { + if (flags & PME_CTX_OP_WAIT_INT) { + ret = -EINTR; + wait_event_interruptible(ctx->queue, + !(ret = try_work(ctx, flags))); + } else + wait_event(ctx->queue, !try_work(ctx, flags)); + } else +#endif + ret = try_work(ctx, flags); + return ret; +} + +static inline int do_work(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd, + struct pme_ctx_token *token, struct qman_fq *orp_fq, u16 seqnum) +{ + unsigned long irqflags; + int ret = get_work(ctx, flags); + if (ret) + return ret; + if (ctx->flags & PME_CTX_FLAG_EXCLUSIVE) { + ret = get_exclusive(ctx, flags); + if (ret) { + release_work(ctx); + return ret; + } + } + BUG_ON(sizeof(*fd) != sizeof(token->blob)); + memcpy(&token->blob, fd, sizeof(*fd)); + + spin_lock_irqsave(&ctx->lock, irqflags); + list_add_tail(&token->node, &ctx->tokens); + spin_unlock_irqrestore(&ctx->lock, irqflags); + + if (!orp_fq) + ret = qman_enqueue(&ctx->us_data->fqin, fd, ctrl2eq(flags)); + else + ret = qman_enqueue_orp(&ctx->us_data->fqin, fd, ctrl2eq(flags), + orp_fq, seqnum); + if (ret) { + spin_lock_irqsave(&ctx->lock, irqflags); + list_del(&token->node); + spin_unlock_irqrestore(&ctx->lock, irqflags); + if (ctx->flags & PME_CTX_FLAG_EXCLUSIVE) + release_exclusive(ctx); + release_work(ctx); + } + return ret; +} + +static inline int __update_flow(struct pme_ctx *ctx, u32 flags, + struct pme_flow *params, struct pme_ctx_ctrl_token *token, + int is_disabling) +{ + struct qm_fd fd; + int ret; + int hw_res_used = 0; + struct pme_hw_residue *hw_res = pme_hw_residue_new(); + unsigned long irqflags; + + BUG_ON(ctx->flags & PME_CTX_FLAG_DIRECT); + if (!hw_res) + return -ENOMEM; + token->internal_flow_ptr = pme_hw_flow_new(); + if (!token->internal_flow_ptr) { + pme_hw_residue_free(hw_res); + return -ENOMEM; + } + token->base_token.cmd_type = pme_cmd_flow_write; + + flags &= ~PME_CTX_OP_PRIVATE; + /* The callback will want to know this */ + token->base_token.is_disable_flush = is_disabling ? 1 : 0; + flags |= (is_disabling ? PME_CTX_OP_INSIDE_DISABLE : 0); + spin_lock_irqsave(&ctx->lock, irqflags); + if (flags & PME_CTX_OP_RESETRESLEN) { + if (ctx->hw_residue) { + params->ren = 1; + flags |= PME_CMD_FCW_RES; + } else + flags &= ~PME_CMD_FCW_RES; + } + /* allocate residue memory if it is being added */ + if ((flags & PME_CMD_FCW_RES) && params->ren && !ctx->hw_residue) { + ctx->hw_residue = hw_res; + hw_res_used = 1; + } + spin_unlock_irqrestore(&ctx->lock, irqflags); + if (!hw_res_used) + pme_hw_residue_free(hw_res); + /* enqueue the FCW command to PME */ + memset(&fd, 0, sizeof(fd)); + if (params) + memcpy(token->internal_flow_ptr, params, + sizeof(struct pme_flow)); + pme_fd_cmd_fcw(&fd, flags & PME_CMD_FCW_ALL, + (struct pme_flow *)token->internal_flow_ptr, + ctx->hw_residue); + ret = do_work(ctx, flags, &fd, &token->base_token, NULL, 0); + return ret; +} + +int pme_ctx_ctrl_update_flow(struct pme_ctx *ctx, u32 flags, + struct pme_flow *params, struct pme_ctx_ctrl_token *token) +{ + return __update_flow(ctx, flags, params, token, 0); +} +EXPORT_SYMBOL(pme_ctx_ctrl_update_flow); + +int pme_ctx_ctrl_read_flow(struct pme_ctx *ctx, u32 flags, + struct pme_flow *params, struct pme_ctx_ctrl_token *token) +{ + struct qm_fd fd; + + BUG_ON(ctx->flags & (PME_CTX_FLAG_DIRECT | PME_CTX_FLAG_PMTCC)); + token->base_token.cmd_type = pme_cmd_flow_read; + /* enqueue the FCR command to PME */ + token->usr_flow_ptr = params; + token->internal_flow_ptr = pme_hw_flow_new(); + if (!token->internal_flow_ptr) + return -ENOMEM; + memset(&fd, 0, sizeof(fd)); + pme_fd_cmd_fcr(&fd, (struct pme_flow *)token->internal_flow_ptr); + return do_work(ctx, flags, &fd, &token->base_token, NULL, 0); +} +EXPORT_SYMBOL(pme_ctx_ctrl_read_flow); + +int pme_ctx_ctrl_nop(struct pme_ctx *ctx, u32 flags, + struct pme_ctx_ctrl_token *token) +{ + struct qm_fd fd; + + token->base_token.cmd_type = pme_cmd_nop; + /* enqueue the NOP command to PME */ + memset(&fd, 0, sizeof(fd)); + qm_fd_addr_set64(&fd, (unsigned long)token); + pme_fd_cmd_nop(&fd); + return do_work(ctx, flags, &fd, &token->base_token, NULL, 0); +} +EXPORT_SYMBOL(pme_ctx_ctrl_nop); + +static inline void __prep_scan(__maybe_unused struct pme_ctx *ctx, + struct qm_fd *fd, u32 args, struct pme_ctx_token *token) +{ + BUG_ON(ctx->flags & PME_CTX_FLAG_PMTCC); + token->cmd_type = pme_cmd_scan; + pme_fd_cmd_scan(fd, args); +} + +int pme_ctx_scan(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd, u32 args, + struct pme_ctx_token *token) +{ + __prep_scan(ctx, fd, args, token); + return do_work(ctx, flags, fd, token, NULL, 0); +} +EXPORT_SYMBOL(pme_ctx_scan); + +int pme_ctx_scan_orp(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd, u32 args, + struct pme_ctx_token *token, struct qman_fq *orp_fq, u16 seqnum) +{ + __prep_scan(ctx, fd, args, token); + return do_work(ctx, flags, fd, token, orp_fq, seqnum); +} +EXPORT_SYMBOL(pme_ctx_scan_orp); + +int pme_ctx_pmtcc(struct pme_ctx *ctx, u32 flags, struct qm_fd *fd, + struct pme_ctx_token *token) +{ + BUG_ON(!(ctx->flags & PME_CTX_FLAG_PMTCC)); + token->cmd_type = pme_cmd_pmtcc; + pme_fd_cmd_pmtcc(fd); + return do_work(ctx, flags, fd, token, NULL, 0); +} +EXPORT_SYMBOL(pme_ctx_pmtcc); + +int pme_ctx_exclusive_inc(struct pme_ctx *ctx, u32 flags) +{ + return get_exclusive(ctx, flags); +} +EXPORT_SYMBOL(pme_ctx_exclusive_inc); + +void pme_ctx_exclusive_dec(struct pme_ctx *ctx) +{ + release_exclusive(ctx); +} +EXPORT_SYMBOL(pme_ctx_exclusive_dec); + +/* The 99.99% case is that enqueues happen in order or they get order-restored + * by the ORP, and so dequeues of responses happen in order too, so our FIFO + * linked-list of tokens is append-on-enqueue and pop-on-dequeue, and all's + * well. + * + * *EXCEPT*, if ever an enqueue gets rejected ... what then happens is that we + * have dequeues and ERNs to deal with, and the order we see them in is not + * necessarily the linked-list order. So we need to handle this in DQRR and MR + * callbacks, without sacrificing fast-path performance. Ouch. + * + * We use pop_matching_token() to take care of the mess (inlined, of course). */ +#define MATCH(fd1,fd2) \ + ((qm_fd_addr_get64(fd1) == qm_fd_addr_get64(fd2)) && \ + ((fd1)->opaque == (fd2)->opaque)) +static inline struct pme_ctx_token *pop_matching_token(struct pme_ctx *ctx, + const struct qm_fd *fd) +{ + struct pme_ctx_token *token; + const struct qm_fd *t_fd; + unsigned long irqflags; + + /* The fast-path case is that the for() loop actually degenerates into; + * token = list_first_entry(); + * if (likely(MATCH())) + * [done] + * The penalty of the slow-path case is the for() loop plus the fact + * we're optimising for a "likely" match first time, which might hurt + * when that assumption is wrong a few times in succession. */ + spin_lock_irqsave(&ctx->lock, irqflags); + list_for_each_entry(token, &ctx->tokens, node) { + t_fd = (const struct qm_fd *)&token->blob[0]; + if (likely(MATCH(t_fd, fd))) { + list_del(&token->node); + goto found; + } + } + token = NULL; + pr_err("PME2 Could not find matching token!\n"); + BUG(); +found: + spin_unlock_irqrestore(&ctx->lock, irqflags); + return token; +} + +static inline void cb_helper(__always_unused struct qman_portal *portal, + struct pme_ctx *ctx, const struct qm_fd *fd, int error) +{ + struct pme_ctx_token *token; + struct pme_ctx_ctrl_token *ctrl_token; + + /* Resist the urge to use "unlikely" - 'error' is a constant param to an + * inline fn, so the compiler can collapse this completely. */ + if (error) + do_flags(ctx, 0, 0, PME_CTX_FLAG_DEAD, 0); + token = pop_matching_token(ctx, fd); + if (likely(token->cmd_type == pme_cmd_scan)) + ctx->cb(ctx, fd, token); + else if (token->cmd_type == pme_cmd_pmtcc) + ctx->cb(ctx, fd, token); + else { + /* outcast ctx and call supplied callback */ + ctrl_token = container_of(token, struct pme_ctx_ctrl_token, + base_token); + if (token->cmd_type == pme_cmd_flow_write) { + /* Release the allocated flow context */ + pme_hw_flow_free(ctrl_token->internal_flow_ptr); + /* Is this pme_ctx_disable() completion? */ + if (token->is_disable_flush) + __disable_done(ctx); + } else if (token->cmd_type == pme_cmd_flow_read) { + /* Copy read result */ + memcpy(ctrl_token->usr_flow_ptr, + ctrl_token->internal_flow_ptr, + sizeof(struct pme_flow)); + /* Release the allocated flow context */ + pme_hw_flow_free(ctrl_token->internal_flow_ptr); + } + ctrl_token->cb(ctx, fd, ctrl_token); + } + /* Consume the frame */ + if (ctx->flags & PME_CTX_FLAG_EXCLUSIVE) + release_exclusive(ctx); + if (atomic_dec_and_test(&ctx->refs)) + wake_up(&ctx->queue); +} + +/* TODO: this scheme does not allow PME receivers to use held-active at all. Eg. + * there's no configuration of held-active for 'fq', and if there was, there's + * (a) nothing in the cb_dqrr() to support "park" or "defer" logic, and (b) + * nothing in cb_fqs() to support a delayed FQPN (DCAP_PK) notification. */ +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *portal, + struct qman_fq *fq, const struct qm_dqrr_entry *dq) +{ + u8 status = (u8)pme_fd_res_status(&dq->fd); + u8 flags = pme_fd_res_flags(&dq->fd); + struct pme_ctx *ctx = (struct pme_ctx *)fq; + + /* Put context into dead state is an unreliable or serious error is + * received + */ + if (unlikely(flags & PME_STATUS_UNRELIABLE)) + cb_helper(portal, ctx, &dq->fd, 1); + else if (unlikely((serious_error_vec[status]))) + cb_helper(portal, ctx, &dq->fd, 1); + else + cb_helper(portal, ctx, &dq->fd, 0); + + return qman_cb_dqrr_consume; +} + +static void cb_ern(__always_unused struct qman_portal *portal, + struct qman_fq *fq, const struct qm_mr_entry *mr) +{ + struct pme_ctx *ctx; + struct pme_nostash *data; + struct pme_ctx_token *token; + + data = container_of(fq, struct pme_nostash, fqin); + ctx = data->parent; + + token = pop_matching_token(ctx, &mr->ern.fd); + if (likely(token->cmd_type == pme_cmd_scan)) { + BUG_ON(!ctx->ern_cb); + ctx->ern_cb(ctx, mr, token); + } else if (token->cmd_type == pme_cmd_pmtcc) { + BUG_ON(!ctx->ern_cb); + ctx->ern_cb(ctx, mr, token); + } else { + struct pme_ctx_ctrl_token *ctrl_token; + /* outcast ctx and call supplied callback */ + ctrl_token = container_of(token, struct pme_ctx_ctrl_token, + base_token); + if (token->cmd_type == pme_cmd_flow_write) { + /* Release the allocated flow context */ + pme_hw_flow_free(ctrl_token->internal_flow_ptr); + } else if (token->cmd_type == pme_cmd_flow_read) { + /* Copy read result */ + memcpy(ctrl_token->usr_flow_ptr, + ctrl_token->internal_flow_ptr, + sizeof(struct pme_flow)); + /* Release the allocated flow context */ + pme_hw_flow_free(ctrl_token->internal_flow_ptr); + } + BUG_ON(!ctrl_token->ern_cb); + ctrl_token->ern_cb(ctx, mr, ctrl_token); + } + /* Consume the frame */ + if (ctx->flags & PME_CTX_FLAG_EXCLUSIVE) + release_exclusive(ctx); + if (atomic_dec_and_test(&ctx->refs)) + wake_up(&ctx->queue); +} + +static void cb_dc_ern(struct qman_portal *portal, struct qman_fq *fq, + const struct qm_mr_entry *mr) +{ + struct pme_ctx *ctx = (struct pme_ctx *)fq; + /* This, umm, *shouldn't* happen. It's pretty bad. Things are expected + * to fall apart here, but we'll continue long enough to get out of + * interrupt context and let the user unwind whatever they can. */ + pr_err("PME2 h/w enqueue rejection - expect catastrophe!\n"); + cb_helper(portal, ctx, &mr->dcern.fd, 1); +} + +static void cb_fqs(__always_unused struct qman_portal *portal, + __always_unused struct qman_fq *fq, + const struct qm_mr_entry *mr) +{ + u8 verb = mr->verb & QM_MR_VERB_TYPE_MASK; + if (verb == QM_MR_VERB_FQRNI) + return; + /* nothing else is supposed to occur */ + BUG(); +} + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_low.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_low.c @@ -0,0 +1,276 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pme2_private.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("FSL PME2 (p4080) device usage"); + +#define PME_RESIDUE_SIZE 128 +#define PME_RESIDUE_ALIGN 64 +#define PME_FLOW_SIZE sizeof(struct pme_flow) +#define PME_FLOW_ALIGN 32 +static struct kmem_cache *slab_residue; +static struct kmem_cache *slab_flow; +static struct kmem_cache *slab_fq; + +/* Hack to support "pme_map()". The point of this is that dma_map_single() now + * requires a non-NULL device, so the idea is that address mapping must be + * device-sensitive. Now the PAMU IO-MMU already takes care of this, as can be + * seen by the device-tree structure generated by the hypervisor (each portal + * node has sub-nodes for each h/w end-point it provides access to, and each + * sub-node has its own LIODN configuration). So we just need to map cpu + * pointers to (guest-)physical address and the PAMU takes care of the rest, so + * this doesn't need to be portal-sensitive nor device-sensitive. */ +static struct platform_device *pdev; + +static int pme2_low_init(void) +{ + int ret = -ENOMEM; + + slab_residue = kmem_cache_create("pme2_residue", PME_RESIDUE_SIZE, + PME_RESIDUE_ALIGN, SLAB_HWCACHE_ALIGN, NULL); + if (!slab_residue) + goto end; + slab_flow = kmem_cache_create("pme2_flow", PME_FLOW_SIZE, + PME_FLOW_ALIGN, 0, NULL); + if (!slab_flow) + goto end; + slab_fq = kmem_cache_create("pme2_fqslab", sizeof(struct qman_fq), + __alignof__(struct qman_fq), SLAB_HWCACHE_ALIGN, NULL); + if (!slab_fq) + goto end; + ret = -ENODEV; + pdev = platform_device_alloc("pme", -1); + if (!pdev) + goto end; + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40))) + goto end; + if (platform_device_add(pdev)) + goto end; + return 0; +end: + if (pdev) { + platform_device_put(pdev); + pdev = NULL; + } + if (slab_flow) { + kmem_cache_destroy(slab_flow); + slab_flow = NULL; + } + if (slab_residue) { + kmem_cache_destroy(slab_residue); + slab_residue = NULL; + } + if (slab_fq) { + kmem_cache_destroy(slab_fq); + slab_fq = NULL; + } + return ret; +} + +static void pme2_low_exit(void) +{ + platform_device_del(pdev); + platform_device_put(pdev); + pdev = NULL; + kmem_cache_destroy(slab_fq); + kmem_cache_destroy(slab_flow); + kmem_cache_destroy(slab_residue); + slab_fq = slab_flow = slab_residue = NULL; +} + +module_init(pme2_low_init); +module_exit(pme2_low_exit); + +struct qman_fq *slabfq_alloc(void) +{ + return kmem_cache_alloc(slab_fq, GFP_KERNEL); +} + +void slabfq_free(struct qman_fq *fq) +{ + kmem_cache_free(slab_fq, fq); +} + +/***********************/ +/* low-level functions */ +/***********************/ + +struct pme_hw_residue *pme_hw_residue_new(void) +{ + return kmem_cache_alloc(slab_residue, GFP_KERNEL); +} +EXPORT_SYMBOL(pme_hw_residue_new); + +void pme_hw_residue_free(struct pme_hw_residue *p) +{ + kmem_cache_free(slab_residue, p); +} +EXPORT_SYMBOL(pme_hw_residue_free); + +struct pme_hw_flow *pme_hw_flow_new(void) +{ + struct pme_flow *flow = kmem_cache_zalloc(slab_flow, GFP_KERNEL); + return (struct pme_hw_flow *)flow; +} +EXPORT_SYMBOL(pme_hw_flow_new); + +void pme_hw_flow_free(struct pme_hw_flow *p) +{ + kmem_cache_free(slab_flow, p); +} +EXPORT_SYMBOL(pme_hw_flow_free); + +static const struct pme_flow default_sw_flow = { + .sos = 1, + .srvm = 0, + .esee = 1, + .ren = 0, + .rlen = 0, + .seqnum_hi = 0, + .seqnum_lo = 0, + .sessionid = 0x7ffffff, + .rptr_hi = 0, + .rptr_lo = 0, + .clim = 0xffff, + .mlim = 0xffff +}; + +void pme_sw_flow_init(struct pme_flow *flow) +{ + memcpy(flow, &default_sw_flow, sizeof(*flow)); +} +EXPORT_SYMBOL(pme_sw_flow_init); + +void pme_initfq(struct qm_mcc_initfq *initfq, struct pme_hw_flow *flow, u8 qos, + u8 rbpid, u32 rfqid) +{ + struct pme_context_a *pme_a = + (struct pme_context_a *)&initfq->fqd.context_a; + struct pme_context_b *pme_b = + (struct pme_context_b *)&initfq->fqd.context_b; + + initfq->we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA | + QM_INITFQ_WE_CONTEXTB; + initfq->fqd.dest.channel = qm_channel_pme; + initfq->fqd.dest.wq = qos; + if (flow) { + dma_addr_t fcp = flow_map((struct pme_flow *)flow); + pme_a->mode = pme_mode_flow; + pme_context_a_set64(pme_a, fcp); + } else { + pme_a->mode = pme_mode_direct; + pme_context_a_set64(pme_a, 0); + } + pme_b->rbpid = rbpid; + pme_b->rfqid = rfqid; +} +EXPORT_SYMBOL(pme_initfq); + +void pme_fd_cmd_nop(struct qm_fd *fd) +{ + struct pme_cmd_nop *nop = (struct pme_cmd_nop *)&fd->cmd; + nop->cmd = pme_cmd_nop; +} +EXPORT_SYMBOL(pme_fd_cmd_nop); + +void pme_fd_cmd_fcw(struct qm_fd *fd, u8 flags, struct pme_flow *flow, + struct pme_hw_residue *residue) +{ + dma_addr_t f; + struct pme_cmd_flow_write *fcw = (struct pme_cmd_flow_write *)&fd->cmd; + + BUG_ON(!flow); + BUG_ON((unsigned long)flow & 31); + fcw->cmd = pme_cmd_flow_write; + fcw->flags = flags; + if (flags & PME_CMD_FCW_RES) { + if (residue) { + dma_addr_t rptr = residue_map(residue); + BUG_ON(!residue); + BUG_ON((unsigned long)residue & 63); + pme_flow_rptr_set64(flow, rptr); + } else + pme_flow_rptr_set64(flow, 0); + } + f = flow_map(flow); + qm_fd_addr_set64(fd, f); + fd->format = qm_fd_contig; + fd->offset = 0; + fd->length20 = sizeof(*flow); +} +EXPORT_SYMBOL(pme_fd_cmd_fcw); + +void pme_fd_cmd_fcr(struct qm_fd *fd, struct pme_flow *flow) +{ + dma_addr_t f; + struct pme_cmd_flow_read *fcr = (struct pme_cmd_flow_read *)&fd->cmd; + + BUG_ON(!flow); + BUG_ON((unsigned long)flow & 31); + fcr->cmd = pme_cmd_flow_read; + f = flow_map(flow); + qm_fd_addr_set64(fd, f); + fd->format = qm_fd_contig; + fd->offset = 0; + fd->length20 = sizeof(*flow); +} +EXPORT_SYMBOL(pme_fd_cmd_fcr); + +void pme_fd_cmd_pmtcc(struct qm_fd *fd) +{ + struct pme_cmd_pmtcc *pmtcc = (struct pme_cmd_pmtcc *)&fd->cmd; + pmtcc->cmd = pme_cmd_pmtcc; +} +EXPORT_SYMBOL(pme_fd_cmd_pmtcc); + +void pme_fd_cmd_scan(struct qm_fd *fd, u32 args) +{ + struct pme_cmd_scan *scan = (struct pme_cmd_scan *)&fd->cmd; + fd->cmd = args; + scan->cmd = pme_cmd_scan; +} +EXPORT_SYMBOL(pme_fd_cmd_scan); + +dma_addr_t pme_map(void *ptr) +{ + return dma_map_single(&pdev->dev, ptr, 1, DMA_BIDIRECTIONAL); +} +EXPORT_SYMBOL(pme_map); + +int pme_map_error(dma_addr_t dma_addr) +{ + return dma_mapping_error(&pdev->dev, dma_addr); +} +EXPORT_SYMBOL(pme_map_error); + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_private.h +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_private.h @@ -0,0 +1,180 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pme2_sys.h" +#include + +#undef PME2_DEBUG + +#ifdef PME2_DEBUG +#define PMEPRINFO(fmt, args...) pr_info("PME2: %s: " fmt, __func__, ## args) +#else +#define PMEPRINFO(fmt, args...) +#endif + +#define PMEPRERR(fmt, args...) pr_err("PME2: %s: " fmt, __func__, ## args) +#define PMEPRCRIT(fmt, args...) pr_crit("PME2: %s: " fmt, __func__, ## args) + +#ifdef CONFIG_FSL_PME2_CTRL +/* Hooks */ +int pme2_create_sysfs_dev_files(struct platform_device *ofdev); +void pme2_remove_sysfs_dev_files(struct platform_device *ofdev); +void accumulator_update_interval(u32 interval); +#endif + +static inline void set_fd_addr(struct qm_fd *fd, dma_addr_t addr) +{ + qm_fd_addr_set64(fd, addr); +} +static inline dma_addr_t get_fd_addr(const struct qm_fd *fd) +{ + return (dma_addr_t)qm_fd_addr_get64(fd); +} +static inline void set_sg_addr(struct qm_sg_entry *sg, dma_addr_t addr) +{ + qm_sg_entry_set64(sg, addr); +} +static inline dma_addr_t get_sg_addr(const struct qm_sg_entry *sg) +{ + return (dma_addr_t)qm_sg_entry_get64(sg); +} + +/******************/ +/* Datapath types */ +/******************/ + +enum pme_mode { + pme_mode_direct = 0x00, + pme_mode_flow = 0x80 +}; + +struct pme_context_a { + enum pme_mode mode:8; + u8 __reserved; + /* Flow Context pointer (48-bit), ignored if mode==direct */ + u16 flow_hi; + u32 flow_lo; +} __packed; +static inline u64 pme_context_a_get64(const struct pme_context_a *p) +{ + return ((u64)p->flow_hi << 32) | (u64)p->flow_lo; +} +/* Macro, so we compile better if 'v' isn't always 64-bit */ +#define pme_context_a_set64(p, v) \ + do { \ + struct pme_context_a *__p931 = (p); \ + __p931->flow_hi = upper_32_bits(v); \ + __p931->flow_lo = lower_32_bits(v); \ + } while (0) + +struct pme_context_b { + u32 rbpid:8; + u32 rfqid:24; +} __packed; + + +/* This is the 32-bit frame "cmd/status" field, sent to PME */ +union pme_cmd { + struct pme_cmd_nop { + enum pme_cmd_type cmd:3; + } nop; + struct pme_cmd_flow_read { + enum pme_cmd_type cmd:3; + } fcr; + struct pme_cmd_flow_write { + enum pme_cmd_type cmd:3; + u8 __reserved:5; + u8 flags; /* See PME_CMD_FCW_*** */ + } __packed fcw; + struct pme_cmd_pmtcc { + enum pme_cmd_type cmd:3; + } pmtcc; + struct pme_cmd_scan { + union { + struct { + enum pme_cmd_type cmd:3; + u8 flags:5; /* See PME_CMD_SCAN_*** */ + } __packed; + }; + u8 set; + u16 subset; + } __packed scan; +}; + +/* The exported macro forms a "scan_args" u32 from 3 inputs, these private + * inlines do the inverse, if you need to crack one apart. */ +static inline u8 scan_args_get_flags(u32 args) +{ + return args >> 24; +} +static inline u8 scan_args_get_set(u32 args) +{ + return (args >> 16) & 0xff; +} +static inline u16 scan_args_get_subset(u32 args) +{ + return args & 0xffff; +} + +/* Hook from pme2_high to pme2_low */ +struct qman_fq *slabfq_alloc(void); +void slabfq_free(struct qman_fq *fq); + +/* Hook from pme2_high to pme2_ctrl */ +int pme2_have_control(void); +int pme2_exclusive_set(struct qman_fq *fq); +int pme2_exclusive_unset(void); + +#define DECLARE_GLOBAL(name, t, mt, def, desc) \ + static t name = def; \ + module_param(name, mt, 0644); \ + MODULE_PARM_DESC(name, desc ", default: " __stringify(def)); + +/* Constants used by the SRE ioctl. */ +#define PME_PMFA_SRE_POLL_MS 100 +#define PME_PMFA_SRE_INDEX_MAX (1 << 27) +#define PME_PMFA_SRE_INC_MAX (1 << 12) +#define PME_PMFA_SRE_REP_MAX (1 << 28) +#define PME_PMFA_SRE_INTERVAL_MAX (1 << 12) + +/* Encapsulations for mapping */ +#define flow_map(flow) \ +({ \ + struct pme_flow *__f913 = (flow); \ + pme_map(__f913); \ +}) + +#define residue_map(residue) \ +({ \ + struct pme_hw_residue *__f913 = (residue); \ + pme_map(__f913); \ +}) + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_regs.h +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_regs.h @@ -0,0 +1,173 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PME2_REGS_H +#define PME2_REGS_H + +#define PME_REG_ISR 0x000 +#define PME_REG_IER 0x004 +#define PME_REG_ISDR 0x008 +#define PME_REG_IIR 0x00C +#define PME_REG_RLL 0x014 +#define PME_REG_CDCR 0x018 +#define PME_REG_TRUNCI 0x024 +#define PME_REG_RBC 0x028 +#define PME_REG_ESR 0x02C +#define PME_REG_ECR0 0x030 +#define PME_REG_ECR1 0x034 +#define PME_REG_EFQC 0x050 +#define PME_REG_FACONF 0x060 +#define PME_REG_PMSTAT 0x064 +#define PME_REG_FAMCR 0x068 +#define PME_REG_PMTR 0x06C +#define PME_REG_PEHD 0x074 +#define PME_REG_BSC0 0x080 +#define PME_REG_BSC1 0x084 +#define PME_REG_BSC2 0x088 +#define PME_REG_BSC3 0x08C +#define PME_REG_BSC4 0x090 +#define PME_REG_BSC5 0x094 +#define PME_REG_BSC6 0x098 +#define PME_REG_BSC7 0x09C +#define PME_REG_QMBFD0 0x0E0 +#define PME_REG_QMBFD1 0x0E4 +#define PME_REG_QMBFD2 0x0E8 +#define PME_REG_QMBFD3 0x0EC +#define PME_REG_QMBCTXTAH 0x0F0 +#define PME_REG_QMBCTXTAL 0x0F4 +#define PME_REG_QMBCTXTB 0x0F8 +#define PME_REG_QMBCTL 0x0FC +#define PME_REG_ECC1BES 0x100 +#define PME_REG_ECC2BES 0x104 +#define PME_REG_ECCADDR 0x110 +#define PME_REG_ECCCODE 0x118 +#define PME_REG_TBT0ECC1TH 0x180 +#define PME_REG_TBT0ECC1EC 0x184 +#define PME_REG_TBT1ECC1TH 0x188 +#define PME_REG_TBT1ECC1EC 0x18C +#define PME_REG_VLT0ECC1TH 0x190 +#define PME_REG_VLT0ECC1EC 0x194 +#define PME_REG_VLT1ECC1TH 0x198 +#define PME_REG_VLT1ECC1EC 0x19C +#define PME_REG_CMECC1TH 0x1A0 +#define PME_REG_CMECC1EC 0x1A4 +#define PME_REG_DXCMECC1TH 0x1B0 +#define PME_REG_DXCMECC1EC 0x1B4 +#define PME_REG_DXEMECC1TH 0x1C0 +#define PME_REG_DXEMECC1EC 0x1C4 +#define PME_REG_STNIB 0x200 +#define PME_REG_STNIS 0x204 +#define PME_REG_STNTH1 0x208 +#define PME_REG_STNTH2 0x20C +#define PME_REG_STNTHV 0x210 +#define PME_REG_STNTHS 0x214 +#define PME_REG_STNCH 0x218 +#define PME_REG_SWDB 0x21C +#define PME_REG_KVLTS 0x220 +#define PME_REG_KEC 0x224 +#define PME_REG_STNPM 0x280 +#define PME_REG_STNS1M 0x284 +#define PME_REG_DRCIC 0x288 +#define PME_REG_DRCMC 0x28C +#define PME_REG_STNPMR 0x290 +#define PME_REG_PDSRBAH 0x2A0 +#define PME_REG_PDSRBAL 0x2A4 +#define PME_REG_DMCR 0x2A8 +#define PME_REG_DEC0 0x2AC +#define PME_REG_DEC1 0x2B0 +#define PME_REG_DLC 0x2C0 +#define PME_REG_STNDSR 0x300 +#define PME_REG_STNESR 0x304 +#define PME_REG_STNS1R 0x308 +#define PME_REG_STNOB 0x30C +#define PME_REG_SCBARH 0x310 +#define PME_REG_SCBARL 0x314 +#define PME_REG_SMCR 0x318 +#define PME_REG_SREC 0x320 +#define PME_REG_ESRP 0x328 +#define PME_REG_SRRV0 0x338 +#define PME_REG_SRRV1 0x33C +#define PME_REG_SRRV2 0x340 +#define PME_REG_SRRV3 0x344 +#define PME_REG_SRRV4 0x348 +#define PME_REG_SRRV5 0x34C +#define PME_REG_SRRV6 0x350 +#define PME_REG_SRRV7 0x354 +#define PME_REG_SRRFI 0x358 +#define PME_REG_SRRI 0x360 +#define PME_REG_SRRR 0x364 +#define PME_REG_SRRWC 0x368 +#define PME_REG_SFRCC 0x36C +#define PME_REG_SEC1 0x370 +#define PME_REG_SEC2 0x374 +#define PME_REG_SEC3 0x378 +#define PME_REG_MIA_BYC 0x380 +#define PME_REG_MIA_BLC 0x384 +#define PME_REG_MIA_CE 0x388 +#define PME_REG_MIA_CR 0x390 +#define PME_REG_PPIDMR0 0x800 +#define PME_REG_PPIDMR1 0x804 +#define PME_REG_PPIDMR2 0x808 +#define PME_REG_PPIDMR3 0x80C +#define PME_REG_PPIDMR4 0x810 +#define PME_REG_PPIDMR5 0x814 +#define PME_REG_PPIDMR6 0x818 +#define PME_REG_PPIDMR7 0x81C +#define PME_REG_PPIDMR8 0x820 +#define PME_REG_PPIDMR9 0x824 +#define PME_REG_PPIDMR10 0x828 +#define PME_REG_PPIDMR11 0x82C +#define PME_REG_PPIDMR12 0x830 +#define PME_REG_PPIDMR13 0x834 +#define PME_REG_PPIDMR14 0x838 +#define PME_REG_PPIDMR15 0x83C +#define PME_REG_PPIDMR16 0x840 +#define PME_REG_PPIDMR17 0x844 +#define PME_REG_PPIDMR18 0x848 +#define PME_REG_PPIDMR19 0x84C +#define PME_REG_PPIDMR20 0x850 +#define PME_REG_PPIDMR21 0x854 +#define PME_REG_PPIDMR22 0x858 +#define PME_REG_PPIDMR23 0x85C +#define PME_REG_PPIDMR24 0x860 +#define PME_REG_PPIDMR25 0x864 +#define PME_REG_PPIDMR26 0x868 +#define PME_REG_PPIDMR27 0x86C +#define PME_REG_PPIDMR28 0x870 +#define PME_REG_PPIDMR29 0x874 +#define PME_REG_PPIDMR30 0x878 +#define PME_REG_PPIDMR31 0x87C +#define PME_REG_SRCIDR 0xA00 +#define PME_REG_LIODNR 0xA0C +#define PME_REG_PM_IP_REV1 0xBF8 +#define PME_REG_PM_IP_REV2 0xBFC + +#endif /* REGS_H */ --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_sample_db.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_sample_db.c @@ -0,0 +1,426 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "pme2_test.h" + +static u8 pme_db[] = { + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* Rev 2.1 */ + 0x00, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x90, 0x41, 0x40, 0x20, 0x00, 0x11, +/* Rev 2.0 */ +/* 0x00, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01, + 0x20, 0x41, 0x40, 0x20, 0x00, 0x11, */ + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* Rev 2.1 */ + 0x00, 0x0d, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, + 0x90, 0x41, 0xff, 0x81, 0x00, 0x00, +/* Rev 2.0 */ +/* 0x00, 0x0d, 0x00, 0x00, 0x00, 0x04, 0x00, 0x01, + 0x20, 0x41, 0xff, 0x81, 0x00, 0x00, */ + 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x01, + 0x01, 0xff, 0x80, 0x00, 0x41, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x18, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, + 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, + 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, + 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, + 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, + 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, + 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, + 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, + 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x41, 0x42, 0x43, + 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, + 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, + 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x7b, + 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, + 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, + 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, + 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, + 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, + 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, + 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, + 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, + 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, + 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, + 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, + 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, + 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, + 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, + 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, + 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, + 0xfc, 0xfd, 0xfe, 0xff +}; + +static u8 db_read[] = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, +/* Rev 2.1 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x90, 0x41 +/* Rev 2.0 */ +/* 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01, + 0x20, 0x41 */ +}; + +static u8 db_read_expected_result[] = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, +/* Rev 2.1 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, + 0x90, 0x41, 0x40, 0x20, 0x00, 0x11 +/* Rev 2.0 */ +/* 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01, + 0x20, 0x41, 0x40, 0x20, 0x00, 0x11*/ +}; + +struct pmtcc_ctx { + struct pme_ctx base_ctx; + struct qm_fd result_fd; + struct completion done; + u8 ern; +}; + +static void pmtcc_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_token *ctx_token) +{ + struct pmtcc_ctx *my_ctx = (struct pmtcc_ctx *)ctx; + memcpy(&my_ctx->result_fd, fd, sizeof(*fd)); + complete(&my_ctx->done); +} + +static void pmtcc_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr, + struct pme_ctx_token *ctx_token) +{ + struct pmtcc_ctx *my_ctx = (struct pmtcc_ctx *)ctx; + my_ctx->result_fd = mr->ern.fd; + my_ctx->ern = 1; + complete(&my_ctx->done); +} + +#define FIRST_PMTCC 56 +int pme2_clear_sample_db(void) +{ + struct pmtcc_ctx ctx = { + .base_ctx.cb = pmtcc_cb, + .base_ctx.ern_cb = pmtcc_ern_cb, + .ern = 0 + }; + struct qm_fd fd; + int ret = 0; + enum pme_status status; + struct pme_ctx_token token; + void *mem; + struct cpumask backup_mask = current->cpus_allowed; + struct cpumask new_mask = *qman_affine_cpus(); + + cpumask_and(&new_mask, &new_mask, bman_affine_cpus()); + ret = set_cpus_allowed_ptr(current, &new_mask); + if (ret) { + pr_info("cleanr_sample_db: can't set cpumask\n"); + goto _clear_0; + } + init_completion(&ctx.done); + ret = pme_ctx_init(&ctx.base_ctx, + PME_CTX_FLAG_EXCLUSIVE | + PME_CTX_FLAG_PMTCC | + PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL); + if (ret) { + pr_err("clear_sample_db: can't init ctx\n"); + goto _clear_1; + } + + /* enable the context */ + ret = pme_ctx_enable(&ctx.base_ctx); + if (ret) { + pr_err("clear_sample_db: can't enable ctx\n"); + goto _clear_2; + } + + /* Write the database */ + memset(&fd, 0, sizeof(struct qm_fd)); + mem = kmalloc(FIRST_PMTCC, GFP_KERNEL); + if (!mem) + goto _clear_3; + memcpy(mem, pme_db, FIRST_PMTCC); + + fd.length20 = FIRST_PMTCC; + qm_fd_addr_set64(&fd, pme_map(mem)); + + ret = pme_ctx_pmtcc(&ctx.base_ctx, PME_CTX_OP_WAIT, &fd, &token); + if (ret == -ENODEV) { + pr_err("clear_sample_db: not the control plane, bailing\n"); + goto _clear_4; + } + if (ret) { + pr_err("clear_sample_db: error with pmtcc\n"); + goto _clear_4; + } + wait_for_completion(&ctx.done); + if (ctx.ern) { + pr_err("clear_sample_db: Rx ERN from pmtcc\n"); + goto _clear_4; + } + status = pme_fd_res_status(&ctx.result_fd); + if (status) { + pr_info("clear_sample_db: PMTCC write status failed %d\n", + status); + goto _clear_4; + } +_clear_4: + kfree(mem); +_clear_3: + /* Disable */ + ret = pme_ctx_disable(&ctx.base_ctx, + PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT, NULL); +_clear_2: + pme_ctx_finish(&ctx.base_ctx); +_clear_1: + ret = set_cpus_allowed_ptr(current, &backup_mask); + if (ret) + pr_info("clear_sample_db: can't restore cpumask"); +_clear_0: + if (!ret) + pr_info("clear_sample_db: Done\n"); + else + pr_info("clear_sample_db: Error 0x%x\n", ret); + return ret; + +} + +int pme2_sample_db(void) +{ + struct pmtcc_ctx ctx = { + .base_ctx.cb = pmtcc_cb, + .base_ctx.ern_cb = pmtcc_ern_cb, + .ern = 0 + }; + struct qm_fd fd; + struct qm_sg_entry *sg_table = NULL; + int ret = 0; + enum pme_status status; + struct pme_ctx_token token; + void *mem = NULL, *mem_result = NULL; + u32 pme_rev; + struct cpumask backup_mask = current->cpus_allowed; + struct cpumask new_mask = *qman_affine_cpus(); + + cpumask_and(&new_mask, &new_mask, bman_affine_cpus()); + ret = set_cpus_allowed_ptr(current, &new_mask); + if (ret) { + pr_info("sample_db: can't set cpumask\n"); + goto _finish_0; + } + ret = pme_attr_get(pme_attr_rev1, &pme_rev); + if (ret) { + pr_err("sample_db: can't read pme revision %d\n", ret); + goto _finish_1; + } + /* If Rev 2.0...update database */ + if ((pme_rev & 0x0000FFFF) == 0x00000200) { + pr_info("sample_db: db for pme ver 2.0\n"); + pme_db[133] = 0x01; + pme_db[134] = 0x20; + pme_db[161] = 0x01; + pme_db[162] = 0x20; + db_read[21] = 0x01; + db_read[22] = 0x20; + db_read_expected_result[21] = 0x01; + db_read_expected_result[22] = 0x20; + } else + pr_info("sample_db: db for pme ver 2.1 or greater\n"); + init_completion(&ctx.done); + ret = pme_ctx_init(&ctx.base_ctx, + PME_CTX_FLAG_EXCLUSIVE | + PME_CTX_FLAG_PMTCC | + PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL); + if (ret) { + pr_err("sample_db: can't init ctx\n"); + goto _finish_1; + } + + /* enable the context */ + ret = pme_ctx_enable(&ctx.base_ctx); + if (ret) { + pr_err("sample_db: can't enable ctx\n"); + goto _finish_2; + } + + /* Write the database */ + memset(&fd, 0, sizeof(struct qm_fd)); + mem = kmalloc(sizeof(pme_db), GFP_KERNEL); + if (!mem) + goto _finish_3; + memcpy(mem, pme_db, sizeof(pme_db)); + + fd.length20 = sizeof(pme_db); + qm_fd_addr_set64(&fd, pme_map(mem)); + + ret = pme_ctx_pmtcc(&ctx.base_ctx, PME_CTX_OP_WAIT, &fd, &token); + if (ret == -ENODEV) { + pr_err("sample_db: not the control plane, bailing\n"); + goto _finish_4; + } + if (ret) { + pr_err("sample_db: error with pmtcc\n"); + goto _finish_4; + } + wait_for_completion(&ctx.done); + if (ctx.ern) { + pr_err("sample_db: Rx ERN from pmtcc\n"); + goto _finish_4; + } + status = pme_fd_res_status(&ctx.result_fd); + if (status) { + pr_info("sample_db: PMTCC write status failed %d\n", status); + goto _finish_4; + } + kfree(mem); + mem = NULL; + /* Read back the database */ + init_completion(&ctx.done); + memset(&fd, 0, sizeof(struct qm_fd)); + sg_table = kzalloc(2 * sizeof(*sg_table), GFP_KERNEL | GFP_DMA); + mem_result = kmalloc(28, GFP_KERNEL); + mem = kmalloc(sizeof(db_read), GFP_KERNEL); + if (!sg_table || !mem || !mem_result) { + pr_err("sample_db: out of memory\n"); + ret = -ENOMEM; + goto _finish_4; + } + memcpy(mem, db_read, sizeof(db_read)); + qm_sg_entry_set64(&sg_table[0], pme_map(mem_result)); + sg_table[0].length = 28; + qm_sg_entry_set64(&sg_table[1], pme_map(mem)); + sg_table[1].length = sizeof(db_read); + sg_table[1].final = 1; + fd.format = qm_fd_compound; + qm_fd_addr_set64(&fd, pme_map(sg_table)); + ret = pme_ctx_pmtcc(&ctx.base_ctx, PME_CTX_OP_WAIT, &fd, &token); + if (ret) { + pr_err("sample_db: error with pmtcc\n"); + goto _finish_4; + } + wait_for_completion(&ctx.done); + if (ctx.ern) { + ret = -EINVAL; + pr_err("sample_db: Rx ERN from pmtcc\n"); + goto _finish_4; + } + status = pme_fd_res_status(&ctx.result_fd); + if (status) { + ret = -EINVAL; + pr_err("sample_db: PMTCC read status failed %d\n", status); + goto _finish_4; + } + if (pme_fd_res_flags(&ctx.result_fd) & PME_STATUS_UNRELIABLE) { + pr_err("sample_db: flags result set %x\n", + pme_fd_res_flags(&ctx.result_fd)); + ret = -EINVAL; + goto _finish_4; + } + if (memcmp(db_read_expected_result, mem_result, 28) != 0) { + pr_err("sample_db: DB read result not expected\n"); + pr_err("Expected\n"); + hexdump(db_read_expected_result, + sizeof(db_read_expected_result)); + pr_info("Received\n"); + hexdump(mem_result, 28); + ret = -EINVAL; + } +_finish_4: + kfree(mem_result); + kfree(sg_table); + kfree(mem); +_finish_3: + /* Disable */ + ret = pme_ctx_disable(&ctx.base_ctx, + PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT, NULL); +_finish_2: + pme_ctx_finish(&ctx.base_ctx); +_finish_1: + if (ret) + set_cpus_allowed_ptr(current, &backup_mask); + else { + ret = set_cpus_allowed_ptr(current, &backup_mask); + if (ret) + pr_info("sample_db: can't restore cpumask"); + } + +_finish_0: + if (!ret) + pr_info("pme: sample DB initialised\n"); + else + pr_info("pme: Error during sample DB 0x%x\n", ret); + return ret; +} + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_scan.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_scan.c @@ -0,0 +1,1111 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pme2_private.h" +#include +#include +#include +#include + +#define WAIT_AND_INTERRUPTABLE (PME_CTX_OP_WAIT|PME_CTX_OP_WAIT_INT) +#define INPUT_FRM 1 +#define OUTPUT_FRM 0 +/* Private structure that is allocated for each open that is done on the + * pme_scan device. */ +struct scan_session { + /* The ctx that is needed to communicate with the pme high level */ + struct pme_ctx ctx; + /* Locks completed_commands */ + spinlock_t set_subset_lock; + __u8 set; + __u16 subset; + /* For asynchronous processing */ + wait_queue_head_t waiting_for_completion; + struct list_head completed_commands; + /* Locks completed_commands */ + spinlock_t completed_commands_lock; + u32 completed_count; +}; + +/* Command Token for scan operations. One of these is created for every + * operation on a context. When the context operation is complete cleanup + * is done */ +struct cmd_token { + /* pme high level token */ + struct pme_ctx_token hl_token; + /* The kernels copy of the user op structure */ + struct pme_scan_cmd kernel_op; + /* Set to non zero if this is a synchronous request */ + u8 synchronous; + /* data */ + struct qm_fd tx_fd; + struct qm_sg_entry tx_comp[2]; + struct qm_fd rx_fd; + void *tx_data; + size_t tx_size; + void *rx_data; + size_t rx_size; + /* For blocking requests, we need a wait point and condition */ + wait_queue_head_t *queue; + /* List management for completed async requests */ + struct list_head completed_list; + u8 done; + u8 ern; +}; + +struct ctrl_op { + struct pme_ctx_ctrl_token ctx_ctr; + struct completion cb_done; + enum pme_status cmd_status; + u8 res_flag; + u8 ern; +}; + +#ifdef CONFIG_COMPAT +static void compat_to_scan_cmd(struct pme_scan_cmd *dst, + struct compat_pme_scan_cmd *src) +{ + dst->flags = src->flags; + dst->opaque = compat_ptr(src->opaque); + dst->input.data = compat_ptr(src->input.data); + dst->input.size = src->input.size; + dst->output.data = compat_ptr(src->output.data); + dst->output.size = src->output.size; +} + +static void scan_result_to_compat(struct compat_pme_scan_result *dst, + struct pme_scan_result *src) +{ + dst->flags = src->flags; + dst->opaque = ptr_to_compat(src->opaque); + dst->status = src->status; + dst->output.data = ptr_to_compat(src->output.data); + dst->output.size = src->output.size; +} + +static void compat_to_scan_result(struct pme_scan_result *dst, + struct compat_pme_scan_result *src) +{ + dst->flags = src->flags; + dst->opaque = compat_ptr(src->opaque); + dst->status = src->status; + dst->output.data = compat_ptr(src->output.data); + dst->output.size = src->output.size; +} +#endif + +static void ctrl_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_ctrl_token *token) +{ + struct ctrl_op *ctrl = (struct ctrl_op *)token; + ctrl->cmd_status = pme_fd_res_status(fd); + ctrl->res_flag = pme_fd_res_flags(fd) & PME_STATUS_UNRELIABLE; + complete(&ctrl->cb_done); +} + +static void ctrl_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr, + struct pme_ctx_ctrl_token *token) +{ + struct ctrl_op *ctrl = (struct ctrl_op *)token; + ctrl->ern = 1; + complete(&ctrl->cb_done); +} + +static inline int scan_data_empty(struct scan_session *session) +{ + return list_empty(&session->completed_commands); +} + +/* Cleanup for the execute_cmd method */ +static inline void cleanup_token(struct cmd_token *token_p) +{ + kfree(token_p->tx_data); + kfree(token_p->rx_data); + return; +} + +/* Callback for scan operations */ +static void scan_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_token *ctx_token) +{ + struct cmd_token *token = (struct cmd_token *)ctx_token; + struct scan_session *session = (struct scan_session *)ctx; + + token->rx_fd = *fd; + /* If this is a asynchronous command, queue the token */ + if (!token->synchronous) { + spin_lock(&session->completed_commands_lock); + list_add_tail(&token->completed_list, + &session->completed_commands); + session->completed_count++; + spin_unlock(&session->completed_commands_lock); + } + /* Wake up the thread that's waiting for us */ + token->done = 1; + wake_up(token->queue); + return; +} + +static void scan_ern_cb(struct pme_ctx *ctx, const struct qm_mr_entry *mr, + struct pme_ctx_token *ctx_token) +{ + struct cmd_token *token = (struct cmd_token *)ctx_token; + struct scan_session *session = (struct scan_session *)ctx; + + token->ern = 1; + token->rx_fd = mr->ern.fd; + /* If this is a asynchronous command, queue the token */ + if (!token->synchronous) { + spin_lock(&session->completed_commands_lock); + list_add_tail(&token->completed_list, + &session->completed_commands); + session->completed_count++; + spin_unlock(&session->completed_commands_lock); + } + /* Wake up the thread that's waiting for us */ + token->done = 1; + wake_up(token->queue); + return; +} + +static int process_completed_token(struct file *fp, struct cmd_token *token_p, + struct pme_scan_result *scan_result) +{ + int ret = 0; + u32 src_sz, dst_sz; + + memset(scan_result, 0, sizeof(struct pme_scan_result)); + if (token_p->ern) { + ret = -EIO; + goto done; + } + scan_result->output.data = token_p->kernel_op.output.data; + + if (token_p->rx_fd.format == qm_fd_compound) { + /* Need to copy output */ + src_sz = token_p->tx_comp[OUTPUT_FRM].length; + dst_sz = token_p->kernel_op.output.size; + scan_result->output.size = min(dst_sz, src_sz); + /* Doesn't make sense we generated more than available space + * should have got truncation. + */ + BUG_ON(dst_sz < src_sz); + if (copy_to_user(scan_result->output.data, token_p->rx_data, + scan_result->output.size)) { + pr_err("Error copying to user data\n"); + cleanup_token(token_p); + return -EFAULT; + } + } else if (token_p->rx_fd.format == qm_fd_sg_big) + scan_result->output.size = 0; + else + pr_err("pme2_scan: unexpected frame type received\n"); + + scan_result->flags |= pme_fd_res_flags(&token_p->rx_fd); + scan_result->status |= pme_fd_res_status(&token_p->rx_fd); +done: + scan_result->opaque = token_p->kernel_op.opaque; + cleanup_token(token_p); + return ret; +} + +static int getscan_cmd(struct file *fp, struct scan_session *session, + struct pme_scan_params __user *user_scan_params) +{ + int ret = 0; + struct pme_flow params; + struct pme_scan_params local_scan_params; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .ctx_ctr.ern_cb = ctrl_ern_cb, + .cmd_status = 0, + .res_flag = 0, + .ern = 0 + }; + init_completion(&ctx_ctrl.cb_done); + + memset(&local_scan_params, 0, sizeof(local_scan_params)); + + /* must be enabled */ + if (pme_ctx_is_disabled(&session->ctx)) { + pr_err("pme2_scan: ctx is disabled\n"); + ret = -EINVAL; + goto done; + } + ret = pme_ctx_ctrl_read_flow(&session->ctx, WAIT_AND_INTERRUPTABLE, + ¶ms, &ctx_ctrl.ctx_ctr); + if (ret) { + PMEPRINFO("read flow error %d\n", ret); + goto done; + } + wait_for_completion(&ctx_ctrl.cb_done); + + if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) { + PMEPRINFO("read flow error %d\n", ctx_ctrl.cmd_status); + ret = -EFAULT; + goto done; + } + local_scan_params.residue.enable = params.ren; + local_scan_params.residue.length = params.rlen; + local_scan_params.sre.sessionid = params.sessionid; + local_scan_params.sre.verbose = params.srvm; + local_scan_params.sre.esee = params.esee; + local_scan_params.dxe.clim = params.clim; + local_scan_params.dxe.mlim = params.mlim; + spin_lock(&session->set_subset_lock); + local_scan_params.pattern.set = session->set; + local_scan_params.pattern.subset = session->subset; + spin_unlock(&session->set_subset_lock); + + if (copy_to_user(user_scan_params, &local_scan_params, + sizeof(local_scan_params))) { + pr_err("Error copying to user data\n"); + ret = -EFAULT; + } +done: + return ret; +} + +static int setscan_cmd(struct file *fp, struct scan_session *session, + struct pme_scan_params __user *user_params) +{ + int ret = 0; + u32 flag = WAIT_AND_INTERRUPTABLE; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .ctx_ctr.ern_cb = ctrl_ern_cb, + .cmd_status = 0, + .res_flag = 0, + .ern = 0 + }; + struct pme_flow params; + struct pme_scan_params local_params; + + pme_sw_flow_init(¶ms); + init_completion(&ctx_ctrl.cb_done); + if (copy_from_user(&local_params, user_params, sizeof(local_params))) + return -EFAULT; + + /* must be enabled */ + if (pme_ctx_is_disabled(&session->ctx)) { + ret = -EINVAL; + goto done; + } + /* Only send a flw_ctx_w if PME_SCAN_PARAMS_{RESIDUE, SRE or DXE} + * is being done */ + if (local_params.flags == PME_SCAN_PARAMS_PATTERN) + goto set_subset; + if (local_params.flags & PME_SCAN_PARAMS_RESIDUE) + flag |= PME_CMD_FCW_RES; + if (local_params.flags & PME_SCAN_PARAMS_SRE) + flag |= PME_CMD_FCW_SRE; + if (local_params.flags & PME_SCAN_PARAMS_DXE) + flag |= PME_CMD_FCW_DXE; + params.ren = local_params.residue.enable; + params.sessionid = local_params.sre.sessionid; + params.srvm = local_params.sre.verbose; + params.esee = local_params.sre.esee; + params.clim = local_params.dxe.clim; + params.mlim = local_params.dxe.mlim; + + ret = pme_ctx_ctrl_update_flow(&session->ctx, flag, ¶ms, + &ctx_ctrl.ctx_ctr); + if (ret) { + PMEPRINFO("update flow error %d\n", ret); + goto done; + } + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) { + PMEPRINFO("update flow err %d\n", ctx_ctrl.cmd_status); + ret = -EFAULT; + goto done; + } + +set_subset: + if (local_params.flags & PME_SCAN_PARAMS_PATTERN) { + spin_lock(&session->set_subset_lock); + session->set = local_params.pattern.set; + session->subset = local_params.pattern.subset; + spin_unlock(&session->set_subset_lock); + goto done; + } +done: + return ret; +} + +static int resetseq_cmd(struct file *fp, struct scan_session *session) +{ + int ret = 0; + struct pme_flow params; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .ctx_ctr.ern_cb = ctrl_ern_cb, + .cmd_status = 0, + .res_flag = 0, + .ern = 0 + }; + init_completion(&ctx_ctrl.cb_done); + pme_sw_flow_init(¶ms); + + /* must be enabled */ + if (pme_ctx_is_disabled(&session->ctx)) { + pr_err("pme2_scan: ctx is disabled\n"); + ret = -EINVAL; + goto done; + } + pme_flow_seqnum_set64(¶ms, 0); + params.sos = 1; + + ret = pme_ctx_ctrl_update_flow(&session->ctx, PME_CMD_FCW_SEQ, ¶ms, + &ctx_ctrl.ctx_ctr); + if (ret) { + pr_err("pme2_scan: update flow error %d\n", ret); + return ret; + } + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) { + PMEPRINFO("update flow err %d\n", ctx_ctrl.cmd_status); + ret = -EFAULT; + } +done: + return ret; +} + +static int resetresidue_cmd(struct file *fp, struct scan_session *session) +{ + int ret = 0; + struct pme_flow params; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .ctx_ctr.ern_cb = ctrl_ern_cb, + .cmd_status = 0, + .res_flag = 0, + .ern = 0 + }; + + init_completion(&ctx_ctrl.cb_done); + pme_sw_flow_init(¶ms); + /* must be enabled */ + if (pme_ctx_is_disabled(&session->ctx)) { + pr_err("pme2_scan: ctx is disabled\n"); + ret = -EINVAL; + goto done; + } + params.rlen = 0; + ret = pme_ctx_ctrl_update_flow(&session->ctx, + WAIT_AND_INTERRUPTABLE | PME_CTX_OP_RESETRESLEN, + ¶ms, &ctx_ctrl.ctx_ctr); + if (ret) + pr_info("pme2_scan: update flow error %d\n", ret); + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) { + PMEPRINFO("update flow err %d\n", ctx_ctrl.cmd_status); + ret = -EFAULT; + } +done: + return ret; +} + +static int process_scan_cmd( + struct file *fp, + struct scan_session *session, + struct pme_scan_cmd *user_cmd, + struct pme_scan_result *user_ret, + u8 synchronous) +{ + int ret = 0; + struct cmd_token local_token; + struct cmd_token *token_p = NULL; + DECLARE_WAIT_QUEUE_HEAD(local_waitqueue); + u8 scan_flags = 0; + + BUG_ON(synchronous && !user_ret); + + /* If synchronous, use a local token (from the stack) + * If asynchronous, allocate a token to use */ + if (synchronous) + token_p = &local_token; + else { + token_p = kmalloc(sizeof(*token_p), GFP_KERNEL); + if (!token_p) + return -ENOMEM; + } + memset(token_p, 0, sizeof(*token_p)); + /* Copy the command to kernel space */ + memcpy(&token_p->kernel_op, user_cmd, sizeof(struct pme_scan_cmd)); + /* Copy the input */ + token_p->synchronous = synchronous; + token_p->tx_size = token_p->kernel_op.input.size; + token_p->tx_data = kmalloc(token_p->kernel_op.input.size, GFP_KERNEL); + if (!token_p->tx_data) { + pr_err("pme2_scan: Err alloc %zd byte", token_p->tx_size); + cleanup_token(token_p); + return -ENOMEM; + } + if (copy_from_user(token_p->tx_data, + token_p->kernel_op.input.data, + token_p->kernel_op.input.size)) { + pr_err("Error copying contigous user data\n"); + cleanup_token(token_p); + return -EFAULT; + } + /* Setup input frame */ + token_p->tx_comp[INPUT_FRM].final = 1; + token_p->tx_comp[INPUT_FRM].length = token_p->tx_size; + qm_sg_entry_set64(&token_p->tx_comp[INPUT_FRM], + pme_map(token_p->tx_data)); + /* setup output frame, if output is expected */ + if (token_p->kernel_op.output.size) { + token_p->rx_size = token_p->kernel_op.output.size; + PMEPRINFO("pme2_scan: expect output %d\n", token_p->rx_size); + token_p->rx_data = kmalloc(token_p->rx_size, GFP_KERNEL); + if (!token_p->rx_data) { + pr_err("pme2_scan: Err alloc %zd byte", + token_p->rx_size); + cleanup_token(token_p); + return -ENOMEM; + } + /* Setup output frame */ + token_p->tx_comp[OUTPUT_FRM].length = token_p->rx_size; + qm_sg_entry_set64(&token_p->tx_comp[OUTPUT_FRM], + pme_map(token_p->rx_data)); + token_p->tx_fd.format = qm_fd_compound; + /* Build compound frame */ + qm_fd_addr_set64(&token_p->tx_fd, + pme_map(token_p->tx_comp)); + } else { + token_p->tx_fd.format = qm_fd_sg_big; + /* Build sg frame */ + qm_fd_addr_set64(&token_p->tx_fd, + pme_map(&token_p->tx_comp[INPUT_FRM])); + token_p->tx_fd.length29 = token_p->tx_size; + } + + /* use the local wait queue if synchronous, the shared + * queue if asynchronous */ + if (synchronous) + token_p->queue = &local_waitqueue; + else + token_p->queue = &session->waiting_for_completion; + token_p->done = 0; + + if (token_p->kernel_op.flags & PME_SCAN_CMD_STARTRESET) + scan_flags |= PME_CMD_SCAN_SR; + if (token_p->kernel_op.flags & PME_SCAN_CMD_END) + scan_flags |= PME_CMD_SCAN_E; + ret = pme_ctx_scan(&session->ctx, WAIT_AND_INTERRUPTABLE, + &token_p->tx_fd, + PME_SCAN_ARGS(scan_flags, session->set, session->subset), + &token_p->hl_token); + if (unlikely(ret)) { + cleanup_token(token_p); + return ret; + } + + if (!synchronous) + /* Don't wait. The command is away */ + return 0; + + PMEPRINFO("Wait for completion\n"); + /* Wait for the command to complete */ + /* TODO: Should this be wait_event_interruptible ? + * If so, will need logic to indicate */ + wait_event(*token_p->queue, token_p->done == 1); + return process_completed_token(fp, token_p, user_ret); +} + +/** + * fsl_pme2_scan_open - open the driver + * + * Open the driver and prepare for requests. + * + * Every time an application opens the driver, we create a scan_session object + * for that file handle. + */ +static int fsl_pme2_scan_open(struct inode *node, struct file *fp) +{ + int ret; + struct scan_session *session; + struct pme_flow flow; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .ctx_ctr.ern_cb = ctrl_ern_cb, + .cmd_status = 0, + .res_flag = 0, + .ern = 0 + }; + + pme_sw_flow_init(&flow); + init_completion(&ctx_ctrl.cb_done); + PMEPRINFO("pme2_scan: open %d\n", smp_processor_id()); + fp->private_data = kzalloc(sizeof(*session), GFP_KERNEL); + if (!fp->private_data) + return -ENOMEM; + session = (struct scan_session *)fp->private_data; + /* Set up the structures used for asynchronous requests */ + init_waitqueue_head(&session->waiting_for_completion); + INIT_LIST_HEAD(&session->completed_commands); + spin_lock_init(&session->completed_commands_lock); + spin_lock_init(&session->set_subset_lock); + PMEPRINFO("kmalloc session %p\n", fp->private_data); + session = fp->private_data; + session->ctx.cb = scan_cb; + session->ctx.ern_cb = scan_ern_cb; + + /* qosin, qosout should be driver attributes */ + ret = pme_ctx_init(&session->ctx, PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL); + if (ret) { + pr_err("pme2_scan: pme_ctx_init %d\n", ret); + goto exit; + } + /* enable the context */ + ret = pme_ctx_enable(&session->ctx); + if (ret) { + PMEPRINFO("error enabling ctx %d\n", ret); + pme_ctx_finish(&session->ctx); + goto exit; + } + /* Update flow to set sane defaults in the flow context */ + ret = pme_ctx_ctrl_update_flow(&session->ctx, + PME_CTX_OP_WAIT | PME_CMD_FCW_ALL, &flow, &ctx_ctrl.ctx_ctr); + if (!ret) { + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.ern || ctx_ctrl.cmd_status || ctx_ctrl.res_flag) + ret = -EFAULT; + } + if (ret) { + int my_ret; + PMEPRINFO("error updating flow ctx %d\n", ret); + my_ret = pme_ctx_disable(&session->ctx, PME_CTX_OP_WAIT, + &ctx_ctrl.ctx_ctr); + if (my_ret > 0) + wait_for_completion(&ctx_ctrl.cb_done); + else if (my_ret < 0) + PMEPRINFO("error disabling ctx %d\n", ret); + pme_ctx_finish(&session->ctx); + goto exit; + } + /* Set up the structures used for asynchronous requests */ + PMEPRINFO("pme2_scan: Finish pme_scan open %d\n", smp_processor_id()); + return 0; +exit: + kfree(fp->private_data); + fp->private_data = NULL; + return ret; +} + +static int fsl_pme2_scan_close(struct inode *node, struct file *fp) +{ + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .ctx_ctr.ern_cb = ctrl_ern_cb, + .cmd_status = 0, + .res_flag = 0, + .ern = 0 + }; + int ret = 0; + struct scan_session *session = fp->private_data; + + init_completion(&ctx_ctrl.cb_done); + /* Before disabling check to see if it's already disabled. This can + * happen if a pme serious error has occurred for instance.*/ + if (!pme_ctx_is_disabled(&session->ctx)) { + ret = pme_ctx_disable(&session->ctx, PME_CTX_OP_WAIT, + &ctx_ctrl.ctx_ctr); + if (ret > 0) { + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.ern) + PMEPRCRIT("Unexpected ERN\n"); + } else if (ret < 0) { + pr_err("pme2_scan: Error disabling ctx %d\n", ret); + return ret; + } + } + pme_ctx_finish(&session->ctx); + kfree(session); + PMEPRINFO("pme2_scan: Finish pme_session close\n"); + return 0; +} + +static unsigned int fsl_pme2_scan_poll(struct file *fp, + struct poll_table_struct *wait) +{ + struct scan_session *session; + unsigned int mask = POLLOUT | POLLWRNORM; + + if (!fp->private_data) + return -EINVAL; + + session = (struct scan_session *)fp->private_data; + + poll_wait(fp, &session->waiting_for_completion, wait); + + if (!scan_data_empty(session)) + mask |= (POLLIN | POLLRDNORM); + return mask; +} + + +/* Main switch loop for ioctl operations */ +static long fsl_pme2_scan_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + struct scan_session *session = fp->private_data; + int ret = 0; + + switch (cmd) { + + case PMEIO_GETSCAN: + return getscan_cmd(fp, session, (struct pme_scan_params *)arg); + break; + + case PMEIO_SETSCAN: + return setscan_cmd(fp, session, (struct pme_scan_params *)arg); + break; + + case PMEIO_RESETSEQ: + return resetseq_cmd(fp, session); + break; + + case PMEIO_RESETRES: + return resetresidue_cmd(fp, session); + break; + + case PMEIO_SCAN: + { + int ret; + struct pme_scan scan; + + if (copy_from_user(&scan, (void __user *)arg, sizeof(scan))) + return -EFAULT; + ret = process_scan_cmd(fp, session, &scan.cmd, &scan.result, 1); + if (!ret) { + struct pme_scan_result __user *user_result = + &((struct pme_scan __user *)arg)->result; + ret = copy_to_user(user_result, &scan.result, + sizeof(*user_result)); + } + return ret; + } + break; + + case PMEIO_SCAN_W1: + { + struct pme_scan_cmd scan_cmd; + + if (copy_from_user(&scan_cmd, (void __user *)arg, + sizeof(scan_cmd))) + return -EFAULT; + return process_scan_cmd(fp, session, &scan_cmd, NULL, 0); + } + break; + + case PMEIO_SCAN_R1: + { + struct pme_scan_result result; + struct cmd_token *completed_cmd = NULL; + struct pme_scan_result __user *ur = + (struct pme_scan_result __user *)arg; + int ret; + + if (copy_from_user(&result, (void __user *)arg, + sizeof(result))) + return -EFAULT; + + /* Check to see if any results */ + spin_lock(&session->completed_commands_lock); + if (!list_empty(&session->completed_commands)) { + completed_cmd = list_first_entry( + &session->completed_commands, + struct cmd_token, + completed_list); + list_del(&completed_cmd->completed_list); + session->completed_count--; + } + spin_unlock(&session->completed_commands_lock); + if (completed_cmd) { + ret = process_completed_token(fp, completed_cmd, + &result); + if (!ret) + ret = copy_to_user(ur, &result, sizeof(result)); + return ret; + } else + return -EIO; + } + break; + + case PMEIO_SCAN_Wn: + { + struct pme_scan_cmds scan_cmds; + int i, ret = 0; + + /* Copy the command to kernel space */ + if (copy_from_user(&scan_cmds, (void __user *)arg, + sizeof(scan_cmds))) + return -EFAULT; + PMEPRINFO("Received Wn for %d cmds\n", scan_cmds.num); + for (i = 0; i < scan_cmds.num; i++) { + struct pme_scan_cmd scan_cmd; + + if (copy_from_user(&scan_cmd, &scan_cmds.cmds[i], + sizeof(scan_cmd))) { + pr_err("pme2_scan: Err with %d\n", i); + scan_cmds.num = i; + if (copy_to_user((void __user *)arg, &scan_cmds, + sizeof(scan_cmds))) { + return -EFAULT; + } + return -EFAULT; + } + ret = process_scan_cmd(fp, session, &scan_cmd, NULL, 0); + if (ret) { + pr_err("pme2_scan: Err with %d cmd %d\n", + i, ret); + scan_cmds.num = i; + if (copy_to_user((void *)arg, &scan_cmds, + sizeof(scan_cmds))) { + pr_err("Error copying to user data\n"); + return -EFAULT; + } + return -EINTR; + } + } + return ret; + } + break; + + case PMEIO_SCAN_Rn: + { + struct pme_scan_results results; + struct pme_scan_result result; + struct pme_scan_result __user *ur; + int i = 0, ret = 0; + struct cmd_token *completed_cmd = NULL; + + /* Copy the command to kernel space */ + if (copy_from_user(&results, (void __user *)arg, + sizeof(results))) + return -EFAULT; + ur = ((struct pme_scan_results __user *)arg)->results + PMEPRINFO("pme2_scan: Received Rn for %d res\n", results.num); + if (!results.num) + return 0; + do { + completed_cmd = NULL; + ret = 0; + /* Check to see if any results */ + spin_lock(&session->completed_commands_lock); + if (!list_empty(&session->completed_commands)) { + /* Move to a different list */ + PMEPRINFO("pme2_scan: Pop response\n"); + completed_cmd = list_first_entry( + &session->completed_commands, + struct cmd_token, + completed_list); + list_del(&completed_cmd->completed_list); + session->completed_count--; + } + spin_unlock(&session->completed_commands_lock); + if (completed_cmd) { + if (copy_from_user(&result, (void __user *)ur+i, + sizeof(result))) + return -EFAULT; + ret = process_completed_token(fp, completed_cmd, + &result); + if (!ret) + ret = copy_to_user(ur, &result, + sizeof(struct pme_scan_result)); + if (!ret) { + i++; + ur++; + } + } + } while (!ret && completed_cmd && (i != results.num)); + + if (i != results.num) { + PMEPRINFO("pme2_scan: Only filled %d responses\n", i); + results.num = i; + PMEPRINFO("pme2_scan: results.num = %d\n", results.num); + if (copy_to_user((void __user *)arg, &results, + sizeof(struct pme_scan_results))) { + pr_err("Error copying to user data\n"); + return -EFAULT; + } + } + return ret; + } + break; + + case PMEIO_RELEASE_BUFS: + return -EINVAL; + break; + +#ifdef CONFIG_COMPAT + case PMEIO_SCAN32: + { + int ret; + struct compat_pme_scan scan32; + struct compat_pme_scan __user *user_scan = compat_ptr(arg); + struct pme_scan scan; + + if (copy_from_user(&scan32, user_scan, sizeof(scan32))) + return -EFAULT; + /* Convert to 64-bit structs */ + compat_to_scan_cmd(&scan.cmd, &scan32.cmd); + compat_to_scan_result(&scan.result, &scan32.result); + + ret = process_scan_cmd(fp, session, &scan.cmd, &scan.result, 1); + if (!ret) { + struct compat_pme_scan_result __user *user_result = + &user_scan->result; + /* Convert to 32-bit struct */ + scan_result_to_compat(&scan32.result, &scan.result); + ret = copy_to_user(user_result, &scan32.result, + sizeof(*user_result)); + } + return ret; + } + break; + + case PMEIO_SCAN_W132: + { + struct compat_pme_scan_cmd scan_cmd32; + struct pme_scan_cmd scan_cmd; + + if (copy_from_user(&scan_cmd32, compat_ptr(arg), + sizeof(scan_cmd32))) + return -EFAULT; + /* Convert to 64-bit struct */ + compat_to_scan_cmd(&scan_cmd, &scan_cmd32); + return process_scan_cmd(fp, session, &scan_cmd, NULL, 0); + } + break; + + case PMEIO_SCAN_R132: + { + struct compat_pme_scan_result result32; + struct pme_scan_result result; + struct cmd_token *completed_cmd = NULL; + struct compat_pme_scan_result __user *ur = compat_ptr(arg); + int ret; + + if (copy_from_user(&result32, (void __user *)arg, + sizeof(result32))) + return -EFAULT; + /* copy to 64-bit structure */ + compat_to_scan_result(&result, &result32); + + /* Check to see if any results */ + spin_lock(&session->completed_commands_lock); + if (!list_empty(&session->completed_commands)) { + completed_cmd = list_first_entry( + &session->completed_commands, + struct cmd_token, + completed_list); + list_del(&completed_cmd->completed_list); + session->completed_count--; + } + spin_unlock(&session->completed_commands_lock); + if (completed_cmd) { + ret = process_completed_token(fp, completed_cmd, + &result); + scan_result_to_compat(&result32, &result); + ret = copy_to_user(ur, &result32, sizeof(result32)); + } else + return -EIO; + } + break; + + case PMEIO_SCAN_Wn32: + { + struct compat_pme_scan_cmds scan_cmds32; + int i, ret = 0; + + /* Copy the command to kernel space */ + if (copy_from_user(&scan_cmds32, compat_ptr(arg), + sizeof(scan_cmds32))) + return -EFAULT; + PMEPRINFO("Received Wn for %d cmds\n", scan_cmds32.num); + for (i = 0; i < scan_cmds32.num; i++) { + struct pme_scan_cmd scan_cmd; + struct compat_pme_scan_cmd __user *u_scan_cmd32; + struct compat_pme_scan_cmd scan_cmd32; + + u_scan_cmd32 = compat_ptr(scan_cmds32.cmds); + u_scan_cmd32 += i; + + if (copy_from_user(&scan_cmd32, u_scan_cmd32, + sizeof(scan_cmd32))) { + pr_err("pme2_scan: Err with %d\n", i); + scan_cmds32.num = i; + if (copy_to_user(compat_ptr(arg), &scan_cmds32, + sizeof(scan_cmds32))) + return -EFAULT; + return -EFAULT; + } + compat_to_scan_cmd(&scan_cmd, &scan_cmd32); + ret = process_scan_cmd(fp, session, &scan_cmd, NULL, 0); + if (ret) { + pr_err("pme2_scan: Err with %d cmd %d\n", + i, ret); + scan_cmds32.num = i; + if (copy_to_user(compat_ptr(arg), &scan_cmds32, + sizeof(scan_cmds32))) + return -EFAULT; + return -EINTR; + } + } + return ret; + } + break; + + case PMEIO_SCAN_Rn32: + { + struct compat_pme_scan_results results32; + int i = 0, ret = 0; + struct cmd_token *completed_cmd = NULL; + struct compat_pme_scan_result __user *ur; + + /* Copy the command to kernel space */ + if (copy_from_user(&results32, compat_ptr(arg), + sizeof(results32))) + return -EFAULT; + ur = compat_ptr(results32.results); + PMEPRINFO("pme2_scan: Rx Rn for %d res\n", results32.num); + if (!results32.num) + return 0; + do { + completed_cmd = NULL; + ret = 0; + /* Check to see if any results */ + spin_lock(&session->completed_commands_lock); + if (!list_empty(&session->completed_commands)) { + /* Move to a different list */ + PMEPRINFO("pme2_scan: Pop response\n"); + completed_cmd = list_first_entry( + &session->completed_commands, + struct cmd_token, + completed_list); + list_del(&completed_cmd->completed_list); + session->completed_count--; + } + spin_unlock(&session->completed_commands_lock); + if (completed_cmd) { + struct compat_pme_scan_result l_result32; + struct pme_scan_result result; + + if (copy_from_user(&l_result32, ur+i, + sizeof(l_result32))) + return -EFAULT; + compat_to_scan_result(&result, &l_result32); + ret = process_completed_token(fp, completed_cmd, + &result); + scan_result_to_compat(&l_result32, &result); + ret = copy_to_user(ur+i, &l_result32, + sizeof(l_result32)); + if (!ret) + i++; + } + } while (!ret && completed_cmd && (i != results32.num)); + + if (i != results32.num) { + PMEPRINFO("pme2_scan: Only filled %d responses\n", i); + results32.num = i; + PMEPRINFO("pme2_scan: results32.num = %d\n", + results32.num); + if (copy_to_user(compat_ptr(arg), &results32, + sizeof(struct pme_scan_results))) { + pr_err("Error copying to user data\n"); + return -EFAULT; + } + } + return ret; + } + break; +#endif /* CONFIG_COMPAT */ + + default: + pr_err("UNKNOWN IOCTL cmd 0x%x\n", cmd); + return -EINVAL; + break; + } + + return ret; +} + +static const struct file_operations fsl_pme2_scan_fops = { + .owner = THIS_MODULE, + .open = fsl_pme2_scan_open, + .release = fsl_pme2_scan_close, + .poll = fsl_pme2_scan_poll, + .unlocked_ioctl = fsl_pme2_scan_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = fsl_pme2_scan_ioctl, +#endif +}; + +static struct miscdevice fsl_pme2_scan_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = PME_DEV_SCAN_NODE, + .fops = &fsl_pme2_scan_fops +}; + +static int __init fsl_pme2_scan_init(void) +{ + int err = 0; + + pr_info("Freescale pme2 scan driver\n"); + err = misc_register(&fsl_pme2_scan_dev); + if (err) { + pr_err("fsl-pme2-scan: cannot register device\n"); + return err; + } + pr_info("fsl-pme2-scan: device %s registered\n", + fsl_pme2_scan_dev.name); + return 0; +} + +static void __exit fsl_pme2_scan_exit(void) +{ + int err = misc_deregister(&fsl_pme2_scan_dev); + if (err) + pr_err("fsl-pme2-scan: Failed to deregister device %s, " + "code %d\n", fsl_pme2_scan_dev.name, err); + pr_info("fsl-pme2-scan: device %s deregistered\n", + fsl_pme2_scan_dev.name); +} + +module_init(fsl_pme2_scan_init); +module_exit(fsl_pme2_scan_exit); + +MODULE_AUTHOR("Jeffrey Ladouceur "); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Freescale PME2 scan driver"); --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_sys.h +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_sys.h @@ -0,0 +1,64 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int pme2_create_sysfs_dev_files(struct platform_device *ofdev); +void pme2_remove_sysfs_dev_files(struct platform_device *ofdev); +void accumulator_update_interval(u32 interval); + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_sysfs.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_sysfs.c @@ -0,0 +1,565 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "pme2_regs.h" +#include "pme2_private.h" + +#define MAX_ACCUMULATOR_INTERVAL 10000 +extern u32 pme_stat_interval; + +/* The pme sysfs contains the following types of attributes + * 1) root level: general pme confuration + * 2) bsc: bufferpool size configuration + * 3) stats: pme statistics + */ +static ssize_t pme_store(struct device *dev, struct device_attribute *dev_attr, + const char *buf, size_t count, enum pme_attr attr) +{ + unsigned long val; + size_t ret; + if (strict_strtoul(buf, 0, &val)) { + dev_dbg(dev, "invalid input %s\n",buf); + return -EINVAL; + } + ret = pme_attr_set(attr, val); + if (ret) { + dev_err(dev, "attr_set err attr=%u, val=%lu\n", attr, val); + return ret; + } + return count; +} + +static ssize_t pme_show(struct device *dev, struct device_attribute *dev_attr, + char *buf, enum pme_attr attr, const char *fmt) +{ + u32 data; + int ret; + + ret = pme_attr_get(attr, &data); + if (!ret) + return snprintf(buf, PAGE_SIZE, fmt, data); + return ret; +} + + +static ssize_t pme_stat_show(struct device *dev, + struct device_attribute *dev_attr, char *buf, enum pme_attr attr) +{ + u64 data = 0; + int ret = 0; + + ret = pme_stat_get(attr, &data, 0); + if (!ret) + return snprintf(buf, PAGE_SIZE, "%llu\n", data); + else + return ret; +} + +static ssize_t pme_stat_store(struct device *dev, + struct device_attribute *dev_attr, const char *buf, + size_t count, enum pme_attr attr) +{ + unsigned long val; + u64 data = 0; + size_t ret = 0; + if (strict_strtoul(buf, 0, &val)) { + pr_err("pme: invalid input %s\n", buf); + return -EINVAL; + } + if (val) { + pr_err("pme: invalid input %s\n", buf); + return -EINVAL; + } + ret = pme_stat_get(attr, &data, 1); + return count; +} + + +#define PME_SYSFS_ATTR(pme_attr, perm, showhex) \ +static ssize_t pme_store_##pme_attr(struct device *dev, \ + struct device_attribute *attr, const char *buf, size_t count) \ +{ \ + return pme_store(dev, attr, buf, count, pme_attr_##pme_attr);\ +} \ +static ssize_t pme_show_##pme_attr(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + return pme_show(dev, attr, buf, pme_attr_##pme_attr, showhex);\ +} \ +static DEVICE_ATTR( pme_attr, perm, pme_show_##pme_attr, pme_store_##pme_attr); + + +#define PME_SYSFS_STAT_ATTR(pme_attr, perm) \ +static ssize_t pme_store_##pme_attr(struct device *dev, \ + struct device_attribute *attr, const char *buf, size_t count) \ +{ \ + return pme_stat_store(dev, attr, buf, count, pme_attr_##pme_attr);\ +} \ +static ssize_t pme_show_##pme_attr(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + return pme_stat_show(dev, attr, buf, pme_attr_##pme_attr);\ +} \ +static DEVICE_ATTR(pme_attr, perm, pme_show_##pme_attr, pme_store_##pme_attr); + + +#define PME_SYSFS_BSC_ATTR(bsc_id, perm, showhex) \ +static ssize_t pme_store_bsc_##bsc_id(struct device *dev,\ + struct device_attribute *attr, const char *buf, size_t count) \ +{ \ + return pme_store(dev, attr, buf, count, pme_attr_bsc(bsc_id));\ +} \ +static ssize_t pme_show_bsc_##bsc_id(struct device *dev,\ + struct device_attribute *attr, char *buf) \ +{ \ + return pme_show(dev, attr, buf, pme_attr_bsc(bsc_id), showhex);\ +} \ +static DEVICE_ATTR(bsc_id, perm, pme_show_bsc_##bsc_id, \ + pme_store_bsc_##bsc_id); + +/* Statistics Ctrl: update interval */ +static ssize_t pme_store_update_interval(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long val; + + if (!pme2_have_control()) { + PMEPRERR("not on ctrl-plane\n"); + return -ENODEV; + } + if (strict_strtoul(buf, 0, &val)) { + dev_info(dev, "invalid input %s\n", buf); + return -EINVAL; + } + if (val > MAX_ACCUMULATOR_INTERVAL) { + dev_info(dev, "invalid input %s\n", buf); + return -ERANGE; + } + accumulator_update_interval(val); + return count; +} +static ssize_t pme_show_update_interval(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (!pme2_have_control()) + return -ENODEV; + return snprintf(buf, PAGE_SIZE, "%u\n", pme_stat_interval); +} + +#define FMT_0HEX "0x%08x\n" +#define FMT_HEX "0x%x\n" +#define FMT_DEC "%u\n" +#define PRIV_RO S_IRUSR +#define PRIV_RW (S_IRUSR | S_IWUSR) + +/* Register Interfaces */ +/* read-write; */ +PME_SYSFS_ATTR(efqc_int, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(sw_db, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(dmcr, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(smcr, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(famcr, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(kvlts, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(max_chain_length, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(pattern_range_counter_idx, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(pattern_range_counter_mask, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(max_allowed_test_line_per_pattern, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(max_pattern_matches_per_sui, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(max_pattern_evaluations_per_sui, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(report_length_limit, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(end_of_simple_sui_report, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(aim, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(end_of_sui_reaction_ptr, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(sre_pscl, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(sre_max_block_num, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(sre_max_instruction_limit, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(esr, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(pehd, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(ecc1bes, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(ecc2bes, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(miace, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(miacr, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(cdcr, PRIV_RW, FMT_0HEX); +PME_SYSFS_ATTR(pmtr, PRIV_RW, FMT_DEC); + +/* read-only; */ +PME_SYSFS_ATTR(max_pdsr_index, PRIV_RO, FMT_DEC); +PME_SYSFS_ATTR(sre_context_size, PRIV_RO, FMT_DEC); +PME_SYSFS_ATTR(sre_rule_num, PRIV_RO, FMT_DEC); +PME_SYSFS_ATTR(sre_session_ctx_num, PRIV_RO, FMT_DEC); +PME_SYSFS_ATTR(sre_max_index_size, PRIV_RO, FMT_DEC); +PME_SYSFS_ATTR(sre_max_offset_ctrl, PRIV_RO, FMT_DEC); +PME_SYSFS_ATTR(src_id, PRIV_RO, FMT_DEC); +PME_SYSFS_ATTR(liodnr, PRIV_RO, FMT_DEC); +PME_SYSFS_ATTR(rev1, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(rev2, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(isr, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(ecr0, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(ecr1, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(pmstat, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(eccaddr, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(ecccode, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(faconf, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(pdsrbah, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(pdsrbal, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(scbarh, PRIV_RO, FMT_0HEX); +PME_SYSFS_ATTR(scbarl, PRIV_RO, FMT_0HEX); + + +/* Buffer Pool Size Configuration */ +PME_SYSFS_BSC_ATTR(0, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(1, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(2, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(3, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(4, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(5, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(6, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(7, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(8, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(9, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(10, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(11, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(12, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(13, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(14, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(15, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(16, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(17, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(18, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(19, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(20, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(21, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(22, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(23, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(24, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(25, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(26, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(27, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(28, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(29, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(30, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(31, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(32, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(33, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(34, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(35, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(36, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(37, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(38, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(39, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(40, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(41, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(42, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(43, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(44, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(45, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(46, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(47, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(48, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(49, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(50, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(51, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(52, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(53, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(54, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(55, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(56, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(57, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(58, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(59, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(60, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(61, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(62, PRIV_RW, FMT_DEC); +PME_SYSFS_BSC_ATTR(63, PRIV_RW, FMT_DEC); + +/* Stats Counters*/ +PME_SYSFS_STAT_ATTR(trunci, PRIV_RW); +PME_SYSFS_STAT_ATTR(rbc, PRIV_RW); +PME_SYSFS_STAT_ATTR(tbt0ecc1ec, PRIV_RW); +PME_SYSFS_STAT_ATTR(tbt1ecc1ec, PRIV_RW); +PME_SYSFS_STAT_ATTR(vlt0ecc1ec, PRIV_RW); +PME_SYSFS_STAT_ATTR(vlt1ecc1ec, PRIV_RW); +PME_SYSFS_STAT_ATTR(cmecc1ec, PRIV_RW); +PME_SYSFS_STAT_ATTR(dxcmecc1ec, PRIV_RW); +PME_SYSFS_STAT_ATTR(dxemecc1ec, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnib, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnis, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnth1, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnth2, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnthv, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnths, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnch, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnpm, PRIV_RW); +PME_SYSFS_STAT_ATTR(stns1m, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnpmr, PRIV_RW); +PME_SYSFS_STAT_ATTR(stndsr, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnesr, PRIV_RW); +PME_SYSFS_STAT_ATTR(stns1r, PRIV_RW); +PME_SYSFS_STAT_ATTR(stnob, PRIV_RW); +PME_SYSFS_STAT_ATTR(mia_byc, PRIV_RW); +PME_SYSFS_STAT_ATTR(mia_blc, PRIV_RW); + +/* Stats Control */ +PME_SYSFS_ATTR(tbt0ecc1th, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(tbt1ecc1th, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(vlt0ecc1th, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(vlt1ecc1th, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(cmecc1th, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(dxcmecc1th, PRIV_RW, FMT_DEC); +PME_SYSFS_ATTR(dxemecc1th, PRIV_RW, FMT_DEC); + +static DEVICE_ATTR(update_interval, (S_IRUSR | S_IWUSR), + pme_show_update_interval, pme_store_update_interval); + +static struct attribute *pme_dev_bsc_attributes[] = { + &dev_attr_0.attr, + &dev_attr_1.attr, + &dev_attr_2.attr, + &dev_attr_3.attr, + &dev_attr_4.attr, + &dev_attr_5.attr, + &dev_attr_6.attr, + &dev_attr_7.attr, + &dev_attr_8.attr, + &dev_attr_9.attr, + &dev_attr_10.attr, + &dev_attr_11.attr, + &dev_attr_12.attr, + &dev_attr_13.attr, + &dev_attr_14.attr, + &dev_attr_15.attr, + &dev_attr_16.attr, + &dev_attr_17.attr, + &dev_attr_18.attr, + &dev_attr_19.attr, + &dev_attr_20.attr, + &dev_attr_21.attr, + &dev_attr_22.attr, + &dev_attr_23.attr, + &dev_attr_24.attr, + &dev_attr_25.attr, + &dev_attr_26.attr, + &dev_attr_27.attr, + &dev_attr_28.attr, + &dev_attr_29.attr, + &dev_attr_30.attr, + &dev_attr_31.attr, + &dev_attr_32.attr, + &dev_attr_33.attr, + &dev_attr_34.attr, + &dev_attr_35.attr, + &dev_attr_36.attr, + &dev_attr_37.attr, + &dev_attr_38.attr, + &dev_attr_39.attr, + &dev_attr_40.attr, + &dev_attr_41.attr, + &dev_attr_42.attr, + &dev_attr_43.attr, + &dev_attr_44.attr, + &dev_attr_45.attr, + &dev_attr_46.attr, + &dev_attr_47.attr, + &dev_attr_48.attr, + &dev_attr_49.attr, + &dev_attr_50.attr, + &dev_attr_51.attr, + &dev_attr_52.attr, + &dev_attr_53.attr, + &dev_attr_54.attr, + &dev_attr_55.attr, + &dev_attr_56.attr, + &dev_attr_57.attr, + &dev_attr_58.attr, + &dev_attr_59.attr, + &dev_attr_60.attr, + &dev_attr_61.attr, + &dev_attr_62.attr, + &dev_attr_63.attr, + NULL +}; + +static struct attribute *pme_dev_attributes[] = { + &dev_attr_efqc_int.attr, + &dev_attr_sw_db.attr, + &dev_attr_dmcr.attr, + &dev_attr_smcr.attr, + &dev_attr_famcr.attr, + &dev_attr_kvlts.attr, + &dev_attr_max_chain_length.attr, + &dev_attr_pattern_range_counter_idx.attr, + &dev_attr_pattern_range_counter_mask.attr, + &dev_attr_max_allowed_test_line_per_pattern.attr, + &dev_attr_max_pdsr_index.attr, + &dev_attr_max_pattern_matches_per_sui.attr, + &dev_attr_max_pattern_evaluations_per_sui.attr, + &dev_attr_report_length_limit.attr, + &dev_attr_end_of_simple_sui_report.attr, + &dev_attr_aim.attr, + &dev_attr_sre_context_size.attr, + &dev_attr_sre_rule_num.attr, + &dev_attr_sre_session_ctx_num.attr, + &dev_attr_end_of_sui_reaction_ptr.attr, + &dev_attr_sre_pscl.attr, + &dev_attr_sre_max_block_num.attr, + &dev_attr_sre_max_instruction_limit.attr, + &dev_attr_sre_max_index_size.attr, + &dev_attr_sre_max_offset_ctrl.attr, + &dev_attr_src_id.attr, + &dev_attr_liodnr.attr, + &dev_attr_rev1.attr, + &dev_attr_rev2.attr, + &dev_attr_isr.attr, + &dev_attr_ecr0.attr, + &dev_attr_ecr1.attr, + &dev_attr_esr.attr, + &dev_attr_pmstat.attr, + &dev_attr_pehd.attr, + &dev_attr_ecc1bes.attr, + &dev_attr_ecc2bes.attr, + &dev_attr_eccaddr.attr, + &dev_attr_ecccode.attr, + &dev_attr_miace.attr, + &dev_attr_miacr.attr, + &dev_attr_cdcr.attr, + &dev_attr_pmtr.attr, + &dev_attr_faconf.attr, + &dev_attr_pdsrbah.attr, + &dev_attr_pdsrbal.attr, + &dev_attr_scbarh.attr, + &dev_attr_scbarl.attr, + NULL +}; + +static struct attribute *pme_dev_stats_counter_attributes[] = { + &dev_attr_trunci.attr, + &dev_attr_rbc.attr, + &dev_attr_tbt0ecc1ec.attr, + &dev_attr_tbt1ecc1ec.attr, + &dev_attr_vlt0ecc1ec.attr, + &dev_attr_vlt1ecc1ec.attr, + &dev_attr_cmecc1ec.attr, + &dev_attr_dxcmecc1ec.attr, + &dev_attr_dxemecc1ec.attr, + &dev_attr_stnib.attr, + &dev_attr_stnis.attr, + &dev_attr_stnth1.attr, + &dev_attr_stnth2.attr, + &dev_attr_stnthv.attr, + &dev_attr_stnths.attr, + &dev_attr_stnch.attr, + &dev_attr_stnpm.attr, + &dev_attr_stns1m.attr, + &dev_attr_stnpmr.attr, + &dev_attr_stndsr.attr, + &dev_attr_stnesr.attr, + &dev_attr_stns1r.attr, + &dev_attr_stnob.attr, + &dev_attr_mia_byc.attr, + &dev_attr_mia_blc.attr, + NULL +}; + +static struct attribute *pme_dev_stats_ctrl_attributes[] = { + &dev_attr_update_interval.attr, + &dev_attr_tbt0ecc1th.attr, + &dev_attr_tbt1ecc1th.attr, + &dev_attr_vlt0ecc1th.attr, + &dev_attr_vlt1ecc1th.attr, + &dev_attr_cmecc1th.attr, + &dev_attr_dxcmecc1th.attr, + &dev_attr_dxemecc1th.attr, + NULL +}; + +/* root level */ +static const struct attribute_group pme_dev_attr_grp = { + .name = NULL, /* put in device directory */ + .attrs = pme_dev_attributes +}; + +/* root/bsc */ +static struct attribute_group pme_dev_bsc_attr_grp = { + .name = "bsc", + .attrs = pme_dev_bsc_attributes +}; + +/* root/stats */ +static struct attribute_group pme_dev_stats_counters_attr_grp = { + .name = "stats", + .attrs = pme_dev_stats_counter_attributes +}; + +/* root/stats_ctrl */ +static struct attribute_group pme_dev_stats_ctrl_attr_grp = { + .name = "stats_ctrl", + .attrs = pme_dev_stats_ctrl_attributes +}; + + +int pme2_create_sysfs_dev_files(struct platform_device *ofdev) +{ + int ret; + + ret = sysfs_create_group(&ofdev->dev.kobj, &pme_dev_attr_grp); + if (ret) + goto done; + ret = sysfs_create_group(&ofdev->dev.kobj, &pme_dev_bsc_attr_grp); + if (ret) + goto del_group_1; + ret = sysfs_create_group(&ofdev->dev.kobj, &pme_dev_stats_counters_attr_grp); + if (ret) + goto del_group_2; + ret = sysfs_create_group(&ofdev->dev.kobj, &pme_dev_stats_ctrl_attr_grp); + if (ret) + goto del_group_3; + goto done; +del_group_3: + sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_stats_counters_attr_grp); +del_group_2: + sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_bsc_attr_grp); +del_group_1: + sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_attr_grp); +done: + if (ret) + dev_err(&ofdev->dev, + "Cannot create dev attributes ret=%d\n", ret); + return ret; +} + +void pme2_remove_sysfs_dev_files(struct platform_device *ofdev) +{ + sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_stats_ctrl_attr_grp); + sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_stats_counters_attr_grp); + sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_bsc_attr_grp); + sysfs_remove_group(&ofdev->dev.kobj, &pme_dev_attr_grp); +} + + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_test.h +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_test.h @@ -0,0 +1,74 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pme2_sys.h" + +static inline void __hexdump(unsigned long start, unsigned long end, + unsigned long p, size_t sz, const unsigned char *c) +{ + while (start < end) { + unsigned int pos = 0; + char buf[64]; + int nl = 0; + pos += sprintf(buf + pos, "%08lx: ", start); + do { + if ((start < p) || (start >= (p + sz))) + pos += sprintf(buf + pos, ".."); + else + pos += sprintf(buf + pos, "%02x", *(c++)); + if (!(++start & 15)) { + buf[pos++] = '\n'; + nl = 1; + } else { + nl = 0; + if(!(start & 1)) + buf[pos++] = ' '; + if(!(start & 3)) + buf[pos++] = ' '; + } + } while (start & 15); + if (!nl) + buf[pos++] = '\n'; + buf[pos] = '\0'; + pr_info("%s", buf); + } +} +static inline void hexdump(const void *ptr, size_t sz) +{ + unsigned long p = (unsigned long)ptr; + unsigned long start = p & ~(unsigned long)15; + unsigned long end = (p + sz + 15) & ~(unsigned long)15; + const unsigned char *c = ptr; + __hexdump(start, end, p, sz, c); +} + +int pme2_sample_db(void); +int pme2_clear_sample_db(void); --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_test_high.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_test_high.c @@ -0,0 +1,238 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pme2_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("FSL PME2 (p4080) high-level self-test"); + +/* Default Flow Context State */ +static u8 fl_ctx_exp[]={ + 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xe0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; + +void scan_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_token *token) +{ + hexdump(fd, sizeof(*fd)); +} + +struct ctrl_op { + struct pme_ctx_ctrl_token ctx_ctr; + struct completion cb_done; + enum pme_status cmd_status; + u8 res_flag; +}; + +static void ctrl_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_ctrl_token *token) +{ + struct ctrl_op *ctrl = (struct ctrl_op *)token; + pr_info("pme2_test_high: ctrl_cb() invoked, fd;!\n"); + ctrl->cmd_status = pme_fd_res_status(fd); + ctrl->res_flag = pme_fd_res_flags(fd); + hexdump(fd, sizeof(*fd)); + complete(&ctrl->cb_done); +} + + +#define POST_CTRL(val) \ +do { \ + if (ret) \ + val = -1;\ + else if (pme_ctx_is_dead(&ctx))\ + val = -1;\ + else if (ctx_ctrl.cmd_status)\ + val = -1;\ + else if (ctx_ctrl.res_flag)\ + val = -1;\ +} while (0) + +void pme2_test_high(void) +{ + int post_ctrl = 0; + struct pme_flow flow; + struct qm_fqd_stashing stashing; + struct pme_ctx ctx = { + .cb = scan_cb + }; + int ret; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .cmd_status = 0, + .res_flag = 0 + }; + struct cpumask backup_mask = current->cpus_allowed; + struct cpumask new_mask = *qman_affine_cpus(); + + pr_info("PME2: high-level test starting\n"); + + cpumask_and(&new_mask, &new_mask, bman_affine_cpus()); + ret = set_cpus_allowed_ptr(current, &new_mask); + if (ret) { + post_ctrl = -1; + pr_info("PME2: test high: can't set cpumask\n"); + goto done; + } + + pme_sw_flow_init(&flow); + init_completion(&ctx_ctrl.cb_done); + ret = pme_ctx_init(&ctx, PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL); + POST_CTRL(post_ctrl); + if (post_ctrl) + goto restore_mask; + + /* enable the context */ + pme_ctx_enable(&ctx); + pr_info("PME2: pme_ctx_enable done\n"); + ret = pme_ctx_ctrl_update_flow(&ctx, PME_CTX_OP_WAIT | PME_CMD_FCW_ALL, + &flow, &ctx_ctrl.ctx_ctr); + pr_info("PME2: pme_ctx_ctrl_update_flow done\n"); + wait_for_completion(&ctx_ctrl.cb_done); + POST_CTRL(post_ctrl); + if (post_ctrl) + goto disable_ctx; + /* read back flow settings */ + ret = pme_ctx_ctrl_read_flow(&ctx, PME_CTX_OP_WAIT, &flow, + &ctx_ctrl.ctx_ctr); + pr_info("PME2: pme_ctx_ctrl_read_flow done\n"); + wait_for_completion(&ctx_ctrl.cb_done); + POST_CTRL(post_ctrl); + if (post_ctrl) + goto disable_ctx; + if (memcmp(&flow, fl_ctx_exp, sizeof(flow))) { + pr_info("Default Flow Context Read FAIL\n"); + pr_info("Expected:\n"); + hexdump(fl_ctx_exp, sizeof(fl_ctx_exp)); + pr_info("Received:\n"); + hexdump(&flow, sizeof(flow)); + post_ctrl = -1; + goto disable_ctx; + } else + pr_info("Default Flow Context Read OK\n"); + /* start a NOP */ + ret = pme_ctx_ctrl_nop(&ctx, 0, &ctx_ctrl.ctx_ctr); + pr_info("PME2: pme_ctx_ctrl_nop done\n"); + wait_for_completion(&ctx_ctrl.cb_done); + POST_CTRL(post_ctrl); + if (post_ctrl) + goto disable_ctx; + /* start an update to add residue to the context */ + flow.ren = 1; + ret = pme_ctx_ctrl_update_flow(&ctx, PME_CTX_OP_WAIT | PME_CMD_FCW_RES, + &flow, &ctx_ctrl.ctx_ctr); + pr_info("PME2: pme_ctx_ctrl_update_flow done\n"); + wait_for_completion(&ctx_ctrl.cb_done); + POST_CTRL(post_ctrl); + if (post_ctrl) + goto disable_ctx; + /* start a blocking disable */ + ret = pme_ctx_disable(&ctx, PME_CTX_OP_WAIT, &ctx_ctrl.ctx_ctr); + if (ret < 1) { + post_ctrl = -1; + goto finish_ctx; + } + wait_for_completion(&ctx_ctrl.cb_done); + /* do some reconfiguration */ + ret = pme_ctx_reconfigure_tx(&ctx, 63, 7); + if (ret) { + post_ctrl = -1; + goto finish_ctx; + } + stashing.exclusive = 0; + stashing.annotation_cl = 0; + stashing.data_cl = 2; + stashing.context_cl = 2; + ret = pme_ctx_reconfigure_rx(&ctx, 7, 0, &stashing); + if (ret) { + post_ctrl = -1; + goto finish_ctx; + } + /* reenable */ + ret = pme_ctx_enable(&ctx); + if (ret) { + post_ctrl = -1; + goto finish_ctx; + } + /* read back flow settings */ + ret = pme_ctx_ctrl_read_flow(&ctx, + PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT | PME_CMD_FCW_RES, &flow, + &ctx_ctrl.ctx_ctr); + pr_info("PME2: pme_ctx_ctrl_read_flow done\n"); + wait_for_completion(&ctx_ctrl.cb_done); + POST_CTRL(post_ctrl); + if (post_ctrl) + goto disable_ctx; + /* blocking NOP */ + ret = pme_ctx_ctrl_nop(&ctx, PME_CTX_OP_WAIT | PME_CTX_OP_WAIT_INT, + &ctx_ctrl.ctx_ctr); + pr_info("PME2: pme_ctx_ctrl_nop done\n"); + wait_for_completion(&ctx_ctrl.cb_done); + POST_CTRL(post_ctrl); + /* Disable, and done */ +disable_ctx: + ret = pme_ctx_disable(&ctx, PME_CTX_OP_WAIT, &ctx_ctrl.ctx_ctr); + BUG_ON(ret < 1); + wait_for_completion(&ctx_ctrl.cb_done); +finish_ctx: + pme_ctx_finish(&ctx); +restore_mask: + ret = set_cpus_allowed_ptr(current, &backup_mask); + if (ret) { + pr_err("PME2 test high: can't restore cpumask"); + post_ctrl = -1; + } +done: + if (post_ctrl) + pr_info("PME2: high-level test failed\n"); + else + pr_info("PME2: high-level test passed\n"); +} + +static int pme2_test_high_init(void) +{ + int big_loop = 2; + while (big_loop--) + pme2_test_high(); + return 0; +} + +static void pme2_test_high_exit(void) +{ +} + +module_init(pme2_test_high_init); +module_exit(pme2_test_high_exit); + --- linux-3.13.0.orig/drivers/staging/fsl_pme2/pme2_test_scan.c +++ linux-3.13.0/drivers/staging/fsl_pme2/pme2_test_scan.c @@ -0,0 +1,653 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "pme2_test.h" + +enum scan_ctrl_mode { + no_scan = 0, + do_scan = 1, +}; + +enum db_ctrl_mode { + create_destroy = 0, + create = 1, + destroy = 2, + nothing = 3 +}; + +MODULE_AUTHOR("Jeffrey Ladouceur"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("PME scan testing"); + +static enum db_ctrl_mode db_ctrl; +module_param(db_ctrl, uint, 0644); +MODULE_PARM_DESC(db_ctrl, "PME Database control"); + +static enum scan_ctrl_mode scan_ctrl = 1; +module_param(scan_ctrl, uint, 0644); +MODULE_PARM_DESC(scan_ctrl, "Scan control"); + +static u8 scan_result_direct_mode_inc_mode[] = { + 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00 +}; + +static u8 fl_ctx_exp[] = { + 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xe0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; + +/* same again with 'sos' bit cleared */ +static u8 fl_ctx_exp_post_scan[] = { + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xe0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 +}; + +struct scan_ctx { + struct pme_ctx base_ctx; + struct qm_fd result_fd; +}; + +struct ctrl_op { + struct pme_ctx_ctrl_token ctx_ctr; + struct completion cb_done; + enum pme_status cmd_status; + u8 res_flag; +}; + +static void ctrl_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_ctrl_token *token) +{ + struct ctrl_op *ctrl = (struct ctrl_op *)token; + ctrl->cmd_status = pme_fd_res_status(fd); + ctrl->res_flag = pme_fd_res_flags(fd) & PME_STATUS_UNRELIABLE; + /* hexdump(fd, sizeof(*fd)); */ + complete(&ctrl->cb_done); +} + +static DECLARE_COMPLETION(scan_comp); + +static void scan_cb(struct pme_ctx *ctx, const struct qm_fd *fd, + struct pme_ctx_token *ctx_token) +{ + struct scan_ctx *my_ctx = (struct scan_ctx *)ctx; + memcpy(&my_ctx->result_fd, fd, sizeof(*fd)); + complete(&scan_comp); +} + +#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID + +static struct bman_pool *pool; +static u32 pme_bpid; +static void *bman_buffers_virt_base; +static dma_addr_t bman_buffers_phys_base; + +static void release_buffer(dma_addr_t addr) +{ + struct bm_buffer bufs_in; + bm_buffer_set64(&bufs_in, addr); + if (bman_release(pool, &bufs_in, 1, BMAN_RELEASE_FLAG_WAIT)) + panic("bman_release() failed\n"); +} + +static void empty_buffer(void) +{ + struct bm_buffer bufs_in; + int ret; + + do { + ret = bman_acquire(pool, &bufs_in, 1, 0); + } while (!ret); +} +#endif /*CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID*/ + +static int scan_test_direct(int trunc, int use_bp) +{ + struct scan_ctx a_scan_ctx = { + .base_ctx = { + .cb = scan_cb + } + }; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .cmd_status = 0, + .res_flag = 0 + }; + struct qm_fd fd; + struct qm_sg_entry sg_table[2]; + int ret; + enum pme_status status; + struct pme_ctx_token token; + u8 *scan_result; + u32 scan_result_size; + u8 scan_data[] = { + 0x41, 0x42, 0x43, 0x44, 0x45 + }; + u8 result_data[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 + }; + + init_completion(&ctx_ctrl.cb_done); + scan_result = scan_result_direct_mode_inc_mode; + scan_result_size = sizeof(scan_result_direct_mode_inc_mode); + + ret = pme_ctx_init(&a_scan_ctx.base_ctx, + PME_CTX_FLAG_DIRECT | PME_CTX_FLAG_LOCAL, + 0, 4, 4, 0, NULL); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + return ret; + } + /* enable the context */ + ret = pme_ctx_enable(&a_scan_ctx.base_ctx); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto ctx_finish; + } + + /* Do a pre-built output, scan with match test */ + /* Build a frame descriptor */ + memset(&fd, 0, sizeof(struct qm_fd)); + memset(&sg_table, 0, sizeof(sg_table)); + + if (trunc) { + fd.length20 = sizeof(scan_data); + qm_fd_addr_set64(&fd, pme_map(scan_data)); + } else { + /* build the result */ + qm_sg_entry_set64(&sg_table[0], pme_map(result_data)); + sg_table[0].length = sizeof(result_data); + qm_sg_entry_set64(&sg_table[1], pme_map(scan_data)); + sg_table[1].length = sizeof(scan_data); + sg_table[1].final = 1; + fd._format2 = qm_fd_compound; + qm_fd_addr_set64(&fd, pme_map(sg_table)); + } + + ret = pme_ctx_scan(&a_scan_ctx.base_ctx, 0, &fd, + PME_SCAN_ARGS(PME_CMD_SCAN_SR | PME_CMD_SCAN_E, 0, 0xff00), + &token); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto ctx_disable; + } + wait_for_completion(&scan_comp); + + status = pme_fd_res_status(&a_scan_ctx.result_fd); + if (status) { + pr_err("pme scan test failed 0x%x\n", status); + goto ctx_disable; + } + if (trunc) { + int res_flag = pme_fd_res_flags(&a_scan_ctx.result_fd); + /* Check the response...expect truncation bit to be set */ + if (!(res_flag & PME_STATUS_TRUNCATED)) { + pr_err("pme scan test failed, expected truncation\n"); + goto ctx_disable; + } + } else { + if (memcmp(scan_result, result_data, scan_result_size) != 0) { + pr_err("pme scan test result not expected\n"); + hexdump(scan_result, scan_result_size); + pr_err("Received...\n"); + hexdump(result_data, sizeof(result_data)); + goto ctx_disable; + } + } + + ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, NULL); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto ctx_finish; + } + if (!use_bp) { + pme_ctx_finish(&a_scan_ctx.base_ctx); + return 0; + } + /* use buffer pool */ + /* Check with bman */ + /* reconfigure */ + +#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID + ret = pme_ctx_reconfigure_tx(&a_scan_ctx.base_ctx, pme_bpid, 5); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto ctx_finish; + } + ret = pme_ctx_enable(&a_scan_ctx.base_ctx); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto ctx_finish; + } + /* Do a pre-built output, scan with match test */ + /* Build a frame descriptor */ + memset(&fd, 0, sizeof(struct qm_fd)); + memset(&sg_table, 0, sizeof(sg_table)); + + /* build the result */ + /* result is all zero...use bman */ + qm_sg_entry_set64(&sg_table[1], pme_map(scan_data)); + sg_table[1].length = sizeof(scan_data); + sg_table[1].final = 1; + + fd._format2 = qm_fd_compound; + qm_fd_addr_set64(&fd, pme_map(sg_table)); + + ret = pme_ctx_scan(&a_scan_ctx.base_ctx, 0, &fd, + PME_SCAN_ARGS(PME_CMD_SCAN_SR | PME_CMD_SCAN_E, 0, 0xff00), + &token); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto ctx_disable; + } + wait_for_completion(&scan_comp); + + status = pme_fd_res_status(&a_scan_ctx.result_fd); + if (status) { + pr_err("pme scan test failed 0x%x\n", status); + goto ctx_disable; + } + /* sg result should point to bman buffer */ + if (!qm_sg_entry_get64(&sg_table[0])) { + pr_err("pme scan test failed, sg result not bman buffer\n"); + goto ctx_disable; + } + if (memcmp(scan_result, bman_buffers_virt_base, scan_result_size) + != 0) { + pr_err("pme scan test not expected, Expected\n"); + hexdump(scan_result, scan_result_size); + pr_err("Received...\n"); + hexdump(bman_buffers_virt_base, scan_result_size); + release_buffer(qm_sg_entry_get64(&sg_table[0])); + goto ctx_disable; + } + release_buffer(qm_sg_entry_get64(&sg_table[0])); + ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, NULL); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto ctx_finish; + } + pme_ctx_finish(&a_scan_ctx.base_ctx); + return 0; +#endif + +/* failure path */ +ctx_disable: + ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, NULL); +ctx_finish: + pme_ctx_finish(&a_scan_ctx.base_ctx); + return (!ret) ? -EINVAL : ret; +} + +static int scan_test_flow(void) +{ + struct pme_flow flow; + struct pme_flow rb_flow; + struct scan_ctx a_scan_ctx = { + .base_ctx = { + .cb = scan_cb + } + }; + struct ctrl_op ctx_ctrl = { + .ctx_ctr.cb = ctrl_cb, + .cmd_status = 0, + .res_flag = 0 + }; + struct qm_fd fd; + struct qm_sg_entry sg_table[2]; + int ret; + enum pme_status status; + struct pme_ctx_token token; + u8 *scan_result; + u32 scan_result_size; + u8 scan_data[] = { + 0x41, 0x42, 0x43, 0x44, 0x45 + }; + u8 result_data[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00 + }; + + pme_sw_flow_init(&flow); + init_completion(&ctx_ctrl.cb_done); + scan_result = scan_result_direct_mode_inc_mode; + scan_result_size = sizeof(scan_result_direct_mode_inc_mode); + + ret = pme_ctx_init(&a_scan_ctx.base_ctx, + PME_CTX_FLAG_LOCAL, 0, 4, 4, 0, NULL); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + return ret; + } + /* enable the context */ + ret = pme_ctx_enable(&a_scan_ctx.base_ctx); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_finish; + } + ret = pme_ctx_ctrl_update_flow(&a_scan_ctx.base_ctx, + PME_CTX_OP_WAIT | PME_CMD_FCW_ALL, &flow, &ctx_ctrl.ctx_ctr); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_disable; + } + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.cmd_status || ctx_ctrl.res_flag) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_disable; + } + /* read back flow settings */ + ret = pme_ctx_ctrl_read_flow(&a_scan_ctx.base_ctx, + PME_CTX_OP_WAIT, &rb_flow, &ctx_ctrl.ctx_ctr); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_disable; + } + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.cmd_status || ctx_ctrl.res_flag) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_disable; + } + if (memcmp(&rb_flow, fl_ctx_exp, sizeof(rb_flow)) != 0) { + pr_err("pme scan test Flow Context Read FAIL\n"); + pr_err("Expected\n"); + hexdump(fl_ctx_exp, sizeof(fl_ctx_exp)); + pr_err("Received...\n"); + hexdump(&rb_flow, sizeof(rb_flow)); + goto flow_ctx_disable; + } + + /* Do a pre-built output, scan with match test */ + /* Build a frame descriptor */ + memset(&fd, 0, sizeof(struct qm_fd)); + memset(&sg_table, 0, sizeof(sg_table)); + + /* build the result */ + qm_sg_entry_set64(&sg_table[0], pme_map(result_data)); + sg_table[0].length = sizeof(result_data); + qm_sg_entry_set64(&sg_table[1], pme_map(scan_data)); + sg_table[1].length = sizeof(scan_data); + sg_table[1].final = 1; + + fd._format2 = qm_fd_compound; + qm_fd_addr_set64(&fd, pme_map(sg_table)); + + ret = pme_ctx_scan(&a_scan_ctx.base_ctx, 0, &fd, + PME_SCAN_ARGS(PME_CMD_SCAN_SR | PME_CMD_SCAN_E, 0, 0xff00), + &token); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_disable; + } + wait_for_completion(&scan_comp); + + status = pme_fd_res_status(&a_scan_ctx.result_fd); + if (status) { + pr_err("pme scan test failed 0x%x\n", status); + goto flow_ctx_disable; + } + + if (memcmp(scan_result, result_data, scan_result_size) != 0) { + pr_err("pme scan test result not expected\n"); + hexdump(scan_result, scan_result_size); + pr_err("Received...\n"); + hexdump(result_data, sizeof(result_data)); + goto flow_ctx_disable; + } + + /* read back flow settings */ + ret = pme_ctx_ctrl_read_flow(&a_scan_ctx.base_ctx, + PME_CTX_OP_WAIT, &rb_flow, &ctx_ctrl.ctx_ctr); + if (ret) { + pr_err("pme scan test failed 0x%x\n", status); + goto flow_ctx_disable; + } + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.cmd_status || ctx_ctrl.res_flag) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_disable; + } + if (memcmp(&rb_flow, fl_ctx_exp_post_scan, sizeof(rb_flow)) != 0) { + pr_err("pme scan test Flow Context Read FAIL\n"); + pr_err("Expected\n"); + hexdump(fl_ctx_exp_post_scan, sizeof(fl_ctx_exp_post_scan)); + pr_err("Received\n"); + hexdump(&rb_flow, sizeof(rb_flow)); + goto flow_ctx_disable; + } + + /* Test truncation test */ + /* Build a frame descriptor */ + memset(&fd, 0, sizeof(struct qm_fd)); + + fd.length20 = sizeof(scan_data); + qm_fd_addr_set64(&fd, pme_map(scan_data)); + + ret = pme_ctx_scan(&a_scan_ctx.base_ctx, 0, &fd, + PME_SCAN_ARGS(PME_CMD_SCAN_SR | PME_CMD_SCAN_E, 0, 0xff00), + &token); + if (ret) { + pr_err("pme scan test failed 0x%x\n", status); + goto flow_ctx_disable; + } + wait_for_completion(&scan_comp); + + status = pme_fd_res_status(&a_scan_ctx.result_fd); + if (status) { + pr_err("pme scan test failed 0x%x\n", status); + goto flow_ctx_disable; + } + /* Check the response...expect truncation bit to be set */ + if (!(pme_fd_res_flags(&a_scan_ctx.result_fd) & PME_STATUS_TRUNCATED)) { + pr_err("st: Scan result failed...expected trunc\n"); + goto flow_ctx_disable; + } + + /* read back flow settings */ + ret = pme_ctx_ctrl_read_flow(&a_scan_ctx.base_ctx, + PME_CTX_OP_WAIT, &rb_flow, &ctx_ctrl.ctx_ctr); + if (ret) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_disable; + } + wait_for_completion(&ctx_ctrl.cb_done); + if (ctx_ctrl.cmd_status || ctx_ctrl.res_flag) { + pr_err("pme scan test failed: 0x%x\n", ret); + goto flow_ctx_disable; + } + if (memcmp(&rb_flow, fl_ctx_exp_post_scan, sizeof(rb_flow)) != 0) { + pr_err("pme scan test Flow Context Read FAIL\n"); + pr_err("Expected\n"); + hexdump(fl_ctx_exp_post_scan, sizeof(fl_ctx_exp_post_scan)); + pr_err("Received\n"); + hexdump(&rb_flow, sizeof(rb_flow)); + goto flow_ctx_disable; + } + + /* Disable */ + ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, + &ctx_ctrl.ctx_ctr); + if (ret < 1) { + pr_err("pme scan test failed 0x%x\n", ret); + goto flow_ctx_finish; + } + wait_for_completion(&ctx_ctrl.cb_done); + pme_ctx_finish(&a_scan_ctx.base_ctx); + return 0; + /* error path */ +/* failure path */ +flow_ctx_disable: + ret = pme_ctx_disable(&a_scan_ctx.base_ctx, PME_CTX_OP_WAIT, NULL); +flow_ctx_finish: + pme_ctx_finish(&a_scan_ctx.base_ctx); + return (!ret) ? -EINVAL : ret; +} + +void pme2_test_scan(void) +{ + int ret; + + ret = scan_test_direct(0, 0); + if (ret) + goto done; + ret = scan_test_direct(1, 0); + if (ret) + goto done; +#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID + ret = scan_test_direct(0, 1); + if (ret) + goto done; +#endif + ret = scan_test_flow(); +done: + if (ret) + pr_info("pme scan test FAILED 0x%x\n", ret); + else + pr_info("pme Scan Test Passed\n"); +} + +static int setup_buffer_pool(void) +{ +#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID + u32 bpid_size = CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID_SIZE; + struct bman_pool_params pparams = { + .flags = BMAN_POOL_FLAG_DYNAMIC_BPID, + .thresholds = { + 0, + 0, + 0, + 0 + } + }; + + if (!pme2_have_control()) { + pr_err("pme scan test: Not the ctrl-plane\n"); + return -EINVAL; + } + pool = bman_new_pool(&pparams); + if (!pool) { + pr_err("pme scan test: can't get buffer pool\n"); + return -EINVAL; + } + pme_bpid = bman_get_params(pool)->bpid; + bman_buffers_virt_base = kmalloc(1<<(bpid_size+5), GFP_KERNEL); + bman_buffers_phys_base = pme_map(bman_buffers_virt_base); + if (pme_map_error(bman_buffers_phys_base)) { + pr_info("pme scan test: pme_map_error\n"); + bman_free_pool(pool); + kfree(bman_buffers_virt_base); + return -ENODEV; + } + release_buffer(bman_buffers_phys_base); + /* Configure the buffer pool */ + pme_attr_set(pme_attr_bsc(pme_bpid), bpid_size); + /* realease to the specified buffer pool */ + return 0; +#endif + return 0; +} + +static int teardown_buffer_pool(void) +{ +#ifdef CONFIG_FSL_PME2_TEST_SCAN_WITH_BPID + pme_attr_set(pme_attr_bsc(pme_bpid), 0); + empty_buffer(); + bman_free_pool(pool); + kfree(bman_buffers_virt_base); +#endif + return 0; +} + +static int pme2_test_scan_init(void) +{ + int big_loop = 2; + int ret = 0; + struct cpumask backup_mask = current->cpus_allowed; + struct cpumask new_mask = *qman_affine_cpus(); + + cpumask_and(&new_mask, &new_mask, bman_affine_cpus()); + ret = set_cpus_allowed_ptr(current, &new_mask); + if (ret) { + pr_info("pme scan test: can't set cpumask\n"); + goto done_all; + } + + ret = setup_buffer_pool(); + if (ret) + goto done_cpu_mask; + + /* create sample database */ + if (db_ctrl == create_destroy || db_ctrl == create) { + if (!pme2_have_control()) { + pr_err("pme scan test: Not the ctrl-plane\n"); + ret = -EINVAL; + goto done_scan; + } + if (pme2_sample_db()) { + pr_err("pme scan test: error creating db\n"); + goto done_scan; + } + } + + if (scan_ctrl == do_scan) { + while (big_loop--) + pme2_test_scan(); + } + + if (db_ctrl == create_destroy || db_ctrl == destroy) { + /* Clear database */ + if (pme2_clear_sample_db()) + pr_err("pme scan test: error clearing db\n"); + } + +done_scan: + teardown_buffer_pool(); +done_cpu_mask: + ret = set_cpus_allowed_ptr(current, &backup_mask); + if (ret) + pr_err("PME2 test high: can't restore cpumask"); +done_all: + return ret; +} + +static void pme2_test_scan_exit(void) +{ +} + +module_init(pme2_test_scan_init); +module_exit(pme2_test_scan_exit); --- linux-3.13.0.orig/drivers/staging/fsl_qbman/Kconfig +++ linux-3.13.0/drivers/staging/fsl_qbman/Kconfig @@ -0,0 +1,278 @@ +config FSL_DPA + bool "Freescale Datapath Queue and Buffer management" + depends on HAS_FSL_QBMAN + default y + select FSL_QMAN_FQ_LOOKUP if PPC64 + +menu "Freescale Datapath QMan/BMan options" + depends on FSL_DPA + +config FSL_DPA_CHECKING + bool "additional driver checking" + default n + ---help--- + Compiles in additional checks to sanity-check the drivers and any + use of it by other code. Not recommended for performance. + +config FSL_DPA_HAVE_IRQ + bool + default y + +config FSL_DPA_CAN_WAIT + bool + default y + +config FSL_DPA_CAN_WAIT_SYNC + bool + default y + +config FSL_DPA_PIRQ_FAST + bool "fast-path processing via IRQ" + depends on FSL_DPA_HAVE_IRQ + default y + ---help--- + By default, configure fast-path handling to be triggered by IRQ. + Drivers can modify this at run-time, but for linux it makes sense + for all handling to be triggered by IRQ, unless a special + run-to-completion application is being built. + +config FSL_DPA_PIRQ_SLOW + bool "slow-path processing via IRQ" + depends on FSL_DPA_HAVE_IRQ + default y + ---help--- + By default, configure slow-path handling to be triggered by IRQ. + Drivers can modify this at run-time, but for linux it makes sense + for all handling to be triggered by IRQ, unless a special + run-to-completion application is being built. + +config FSL_DPA_PORTAL_SHARE + bool "allow portals to be affine to multiple CPUs" + default y + ---help--- + If the kernel, via its device-tree, will be required to use a + single Qman or Bman portal for multiple CPUs, then select this + option. Note however that compiling with this option will result + in a very slight (but non-zero) overhead even when the feature + is not being used. + +config FSL_DPA_UIO + tristate "Export USDPAA portals via UIO" + depends on UIO + default y + ---help--- + Portals are exported as UIO devices for use by USDPAA (User + Space DataPath Acceleration Architecture) applications. + +config FSL_BMAN + bool "Freescale Buffer Manager support" + default y + +if FSL_BMAN + +config FSL_BMAN_PORTAL + bool "Bman portal support" + default y + ---help--- + Compiles support to detect and support Bman software corenet portals + (as provided by the device-tree). + +config FSL_BMAN_CONFIG + bool "Bman device management" + default y + ---help--- + If this linux image is running natively, you need this option. If this + linux image is running as a guest OS under the hypervisor, only one + guest OS ("the control plane") needs this option. + +config FSL_BMAN_TEST + tristate "Bman self-tests" + depends on FSL_BMAN_PORTAL + default n + ---help--- + This option compiles self-test code for Bman. + +config FSL_BMAN_TEST_HIGH + bool "Bman high-level self-test" + depends on FSL_BMAN_TEST + default y + ---help--- + This requires the presence of cpu-affine portals, and performs + high-level API testing with them (whichever portal(s) are affine to + the cpu(s) the test executes on). + +config FSL_BMAN_TEST_THRESH + bool "Bman threshold test" + depends on FSL_BMAN_TEST + default y + ---help--- + Multi-threaded (SMP) test of Bman pool depletion. A pool is seeded + before multiple threads (one per cpu) create pool objects to track + depletion state changes. The pool is then drained to empty by a + "drainer" thread, and the other threads that they observe exactly + the depletion state changes that are expected. + +config FSL_BMAN_DEBUGFS + tristate "Bman debugfs interface" + depends on FSL_BMAN_PORTAL && DEBUG_FS + default y + ---help--- + This option compiles bman debugfs code for Bman. + +endif # FSL_BMAN + +config FSL_QMAN + bool "Freescale Queue Manager support" + default y + +if FSL_QMAN + +config FSL_QMAN_PORTAL + bool "Qman portal support" + default y + ---help--- + Compiles support to detect and support Qman software corenet portals + (as provided by the device-tree). + +config FSL_QMAN_BUG_AND_FEATURE_REV1 + bool "workarounds for errata and missing features in p4080 rev1" + depends on FSL_QMAN_PORTAL + default y + ---help--- + If this option is selected, the driver will be compiled with + workarounds for errata as well as feature limitations (relative to + more recent parts) of p4080 rev1. On unaffected revisions, this + support incurs only a negligable overhead, typically only a couple of + instructions per non-fast-path operation (the fast-path operations are + unaffected). + + If in doubt, say Y. + +config FSL_QMAN_POLL_LIMIT + int + default 32 + +config FSL_QMAN_PORTAL_DISABLEAUTO_DCA + bool "disable discrete-consumption support on cpu-affine portals" + default n + ---help--- + By default, auto-initialised cpu-affine portals support + discrete-consumption acknowledgements, but this may be unimplemented + in the simulation model. + +config FSL_QMAN_NULL_FQ_DEMUX + bool "support NULL demux handlers" + default y + ---help--- + Normally, incoming frame dequeues and messages from Qman to a software + portal provide a direct demux to the owner of the corresponding FQ. + However, exotic "zero-conf" possibilities can be supported if other + cpus (or operating systems) can schedule "NULL" FQs to a receiver's + portal. If this option is selected, the driver will support this + feature, but it adds a small overhead to the hottest-path in the + driver, so it should be disabled unless needed. + +config FSL_QMAN_DQRR_PREFETCHING + bool "support prefetching or DQRR (if stashing disabled)" + default y + ---help--- + Normally, portals are configured to stash DQRR entries, but if this is + not the case, then s/w needs to invalidate and prefetch ring entries. + Selecting this option supports both cases and chooses the best one at + run-time, but introduces a small overhead in some hot paths of the + driver, so disable it if you know stashing will be configured. + +config FSL_QMAN_CONFIG + bool "Qman device management" + default y + ---help--- + If this linux image is running natively, you need this option. If this + linux image is running as a guest OS under the hypervisor, only one + guest OS ("the control plane") needs this option. + +config FSL_QMAN_TEST + tristate "Qman self-tests" + depends on FSL_QMAN_PORTAL + default n + ---help--- + This option compiles self-test code for Qman. + +config FSL_QMAN_TEST_STASH_POTATO + bool "Qman 'hot potato' data-stashing self-test" + depends on FSL_QMAN_TEST + default y + ---help--- + This performs a "hot potato" style test enqueuing/dequeuing a frame + across a series of FQs scheduled to different portals (and cpus), with + DQRR, data and context stashing always on. + +config FSL_QMAN_TEST_HIGH + bool "Qman high-level self-test" + depends on FSL_QMAN_TEST + default y + ---help--- + This requires the presence of cpu-affine portals, and performs + high-level API testing with them (whichever portal(s) are affine to + the cpu(s) the test executes on). + +config FSL_QMAN_TEST_ERRATA + bool "Qman errata-handling self-test" + depends on FSL_QMAN_TEST + default y + ---help--- + This requires the presence of cpu-affine portals, and performs + testing that handling for known hardware-errata is correct. + +config FSL_QMAN_DEBUGFS + tristate "Qman debugfs interface" + depends on FSL_QMAN_PORTAL + default y + ---help--- + This option compiles qman debugfs code for Qman. + +# H/w settings that can be hard-coded for now. +config FSL_QMAN_FQD_SZ + int "Size of Frame Queue Descriptor region" + default 9 + ---help--- + This is the size of the FQD region defined as: PAGE_SIZE * (2^value) + ex: 9 => PAGE_SIZE * (2^9) + +# Corenet initiator settings. Stash request queues are 4-deep to match cores' +# ability to snart. Stash priority is 3, other priorities are 2. +config FSL_QMAN_CI_SCHED_CFG_SRCCIV + int + depends on FSL_QMAN_CONFIG + default 4 +config FSL_QMAN_CI_SCHED_CFG_SRQ_W + int + depends on FSL_QMAN_CONFIG + default 3 +config FSL_QMAN_CI_SCHED_CFG_RW_W + int + depends on FSL_QMAN_CONFIG + default 2 +config FSL_QMAN_CI_SCHED_CFG_BMAN_W + int + depends on FSL_QMAN_CONFIG + default 2 + +# portal interrupt settings +config FSL_QMAN_PIRQ_DQRR_ITHRESH + int + default 12 +config FSL_QMAN_PIRQ_MR_ITHRESH + int + default 4 +config FSL_QMAN_PIRQ_IPERIOD + int + default 100 + +# 64 bit kernel support +config FSL_QMAN_FQ_LOOKUP + bool + default n + +endif # FSL_QMAN + +endmenu --- linux-3.13.0.orig/drivers/staging/fsl_qbman/Makefile +++ linux-3.13.0/drivers/staging/fsl_qbman/Makefile @@ -0,0 +1,23 @@ +# Bman +obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o +obj-$(CONFIG_FSL_BMAN_PORTAL) += bman_high.o +obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o +obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o +bman_tester-y = bman_test.o +bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o +bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o +bman_debugfs_interface-y = bman_debugfs.o + +# Qman +obj-$(CONFIG_FSL_QMAN) += qman_utility.o qman_fqalloc.o +obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o +obj-$(CONFIG_FSL_QMAN_PORTAL) += qman_high.o +obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o +qman_tester-y = qman_test.o qman_test_hotpotato.o \ + qman_test_high.o +qman_tester-$(CONFIG_FSL_QMAN_TEST_ERRATA) += qman_test_errata.o +obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o +qman_debugfs_interface-y = qman_debugfs.o + +# USDPAA +obj-$(CONFIG_FSL_DPA_UIO) += dpa_uio.o --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_config.c +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_config.c @@ -0,0 +1,690 @@ +/* Copyright (c) 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CONFIG_SMP +#include /* get_hard_smp_processor_id() */ +#endif +#include +#include + +#include "bman_private.h" + +/* Last updated for v00.79 of the BG */ + +struct bman; + +/* Register offsets */ +#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04)) +#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04)) +#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04)) +#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04)) +#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04)) +#define REG_FBPR_FPC 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FBPR_BARE 0x0c00 +#define REG_FBPR_BAR 0x0c04 +#define REG_FBPR_AR 0x0c10 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */ + +/* Used by all error interrupt registers except 'inhibit' */ +#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */ +#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */ +#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */ +#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */ +#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */ + +/* BMAN_ECIR valid error bit */ +#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI) + +union bman_ecir { + u32 ecir_raw; + struct { + u32 __reserved1:4; + u32 portal_num:4; + u32 __reserved2:12; + u32 numb:4; + u32 __reserved3:2; + u32 pid:6; + } __packed info; +}; + +union bman_eadr { + u32 eadr_raw; + struct { + u32 __reserved1:5; + u32 memid:3; + u32 __reserved2:14; + u32 eadr:10; + } __packed info; +}; + +struct bman_hwerr_txt { + u32 mask; + const char *txt; +}; + +#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b } + +static const struct bman_hwerr_txt bman_hwerr_txts[] = { + BMAN_HWE_TXT(IVCI, "Invalid Command Verb"), + BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"), + BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), + BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), + BMAN_HWE_TXT(BSCN, "Pool State Change Notification"), +}; +#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt)) + +struct bman_error_info_mdata { + u16 addr_mask; + u16 bits; + const char *txt; +}; + +#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} +static const struct bman_error_info_mdata error_mdata[] = { + BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"), + BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"), + BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"), +}; +#define BMAN_ERR_MDATA_COUNT \ + (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata)) + +/* Add this in Kconfig */ +#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI) + +/** + * bm_err_isr__ - Manipulate global interrupt registers + * @v: for accessors that write values, this is the 32-bit value + * + * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All + * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of + * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means + * "write the enable register" rather than "enable the write register"! + */ +#define bm_err_isr_status_read(bm) __bm_err_isr_read(bm, bm_isr_status) +#define bm_err_isr_status_clear(bm, m) __bm_err_isr_write(bm, bm_isr_status,m) +#define bm_err_isr_enable_read(bm) __bm_err_isr_read(bm, bm_isr_enable) +#define bm_err_isr_enable_write(bm, v) __bm_err_isr_write(bm, bm_isr_enable,v) +#define bm_err_isr_disable_read(bm) __bm_err_isr_read(bm, bm_isr_disable) +#define bm_err_isr_disable_write(bm, v) __bm_err_isr_write(bm, bm_isr_disable,v) +#define bm_err_isr_inhibit(bm) __bm_err_isr_write(bm, bm_isr_inhibit,1) +#define bm_err_isr_uninhibit(bm) __bm_err_isr_write(bm, bm_isr_inhibit,0) + +/* + * TODO: unimplemented registers + * + * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT, + * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ + */ + +/* Encapsulate "struct bman *" as a cast of the register space address. */ + +static struct bman *bm_create(void *regs) +{ + return (struct bman *)regs; +} + +static inline u32 __bm_in(struct bman *bm, u32 offset) +{ + return in_be32((void *)bm + offset); +} +static inline void __bm_out(struct bman *bm, u32 offset, u32 val) +{ + out_be32((void *)bm + offset, val); +} +#define bm_in(reg) __bm_in(bm, REG_##reg) +#define bm_out(reg, val) __bm_out(bm, REG_##reg, val) + +static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n) +{ + return __bm_in(bm, REG_ERR_ISR + (n << 2)); +} + +static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val) +{ + __bm_out(bm, REG_ERR_ISR + (n << 2), val); +} + +#if 0 +static void bm_get_details(struct bman *bm, u8 *int_options, u8 *errata, + u8 *conf_options) +{ + u32 v = bm_in(IP_REV_1); + *int_options = (v >> 16) & 0xff; + *errata = (v >> 8) & 0xff; + *conf_options = v & 0xff; +} + +static u8 bm_get_corenet_sourceid(struct bman *bm) +{ + return bm_in(SRCIDR); +} + +static void bm_set_liodn(struct bman *bm, u16 liodn) +{ + bm_out(LIODNR, liodn & 0xfff); +} + +#endif + +static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor) +{ + u32 v = bm_in(IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +static u32 __generate_thresh(u32 val, int roundup) +{ + u32 e = 0; /* co-efficient, exponent */ + int oddbit = 0; + while(val > 0xff) { + oddbit = val & 1; + val >>= 1; + e++; + if(roundup && oddbit) + val++; + } + DPA_ASSERT(e < 0x10); + return (val | (e << 8)); +} + +static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt, + u32 hwdet, u32 hwdxt) +{ + DPA_ASSERT(pool < bman_pool_max); + bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0)); + bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1)); + bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0)); + bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1)); +} + +static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size) +{ + u32 exp = ilog2(size); + /* choke if size isn't within range */ + DPA_ASSERT((size >= 4096) && (size <= 1073741824) && + is_power_of_2(size)); + /* choke if '[e]ba' has lower-alignment than 'size' */ + DPA_ASSERT(!(ba & (size - 1))); + bm_out(FBPR_BARE, upper_32_bits(ba)); + bm_out(FBPR_BAR, lower_32_bits(ba)); + bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1)); +} + +/*****************/ +/* Config driver */ +/*****************/ + +/* We support only one of these. */ +static struct bman *bm; +static struct device_node *bm_node; + +/* TODO: Kconfig these? */ +#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12) + +/* Parse the property to extract the memory location and size and + * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default size. */ +static __init int parse_mem_property(struct device_node *node, const char *name, + dma_addr_t *addr, size_t *sz, int zero) +{ + const u32 *pint; + int ret; + + pint = of_get_property(node, name, &ret); + if (!pint || (ret != 16)) { + pr_info("No %s property '%s', using memblock_alloc(%016zx)\n", + node->full_name, name, *sz); + *addr = memblock_alloc(*sz, *sz); + if (zero) + memset(phys_to_virt(*addr), 0, *sz); + return 0; + } + pr_info("Using %s property '%s'\n", node->full_name, name); + /* If using a "zero-pma", don't try to zero it, even if you asked */ + if (zero && of_find_property(node, "zero-pma", &ret)) { + pr_info(" it's a 'zero-pma', not zeroing from s/w\n"); + zero = 0; + } + *addr = ((u64)pint[0] << 32) | (u64)pint[1]; + *sz = ((u64)pint[2] << 32) | (u64)pint[3]; + /* Keep things simple, it's either all in the DRAM range or it's all + * outside. */ + if (*addr < memblock_end_of_DRAM()) { + BUG_ON((u64)*addr + (u64)*sz > memblock_end_of_DRAM()); + if (memblock_reserve(*addr, *sz) < 0) { + pr_err("Failed to reserve %s\n", name); + return -ENOMEM; + } + if (zero) + memset(phys_to_virt(*addr), 0, *sz); + } else if (zero) { + /* map as cacheable, non-guarded */ + void *tmpp = ioremap_prot(*addr, *sz, 0); + memset(tmpp, 0, *sz); + iounmap(tmpp); + } + return 0; +} + +static int __init fsl_bman_init(struct device_node *node) +{ + struct resource res; + u32 __iomem *regs; + const char *s; + dma_addr_t fbpr_a = 0; /* gcc doesn't know this is unnecessary */ + size_t fbpr_sz = DEFAULT_FBPR_SZ; + int ret, standby = 0; + u16 id; + u8 major, minor; + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + pr_err("Can't get %s property 'reg'\n", + node->full_name); + return ret; + } + s = of_get_property(node, "fsl,hv-claimable", &ret); + if (s && !strcmp(s, "standby")) + standby = 1; + if (!standby) { + ret = parse_mem_property(node, "fsl,bman-fbpr", + &fbpr_a, &fbpr_sz, 0); + BUG_ON(ret); + } + /* Global configuration */ + regs = ioremap(res.start, res.end - res.start + 1); + bm = bm_create(regs); + BUG_ON(!bm); + bm_node = node; + bm_get_version(bm, &id, &major, &minor); + pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor); + if ((major == 1) && (minor == 0)) { + bman_ip_rev = BMAN_REV10; + bman_pool_max = 64; + } else if ((major == 2) && (minor == 0)) { + bman_ip_rev = BMAN_REV20; + bman_pool_max = 8; + } else { + pr_warning("unknown Bman version, default to rev1.0\n"); + } + + if (standby) { + pr_info(" -> in standby mode\n"); + return 0; + } + /* FBPR memory */ + bm_set_memory(bm, fbpr_a, 0, fbpr_sz); + return 0; +} + +int bman_have_ccsr(void) +{ + return (bm ? 1 : 0); +} + +int bm_pool_set(u32 bpid, const u32 *thresholds) +{ + if (!bm) + return -ENODEV; + bm_set_pool(bm, bpid, thresholds[0], thresholds[1], + thresholds[2], thresholds[3]); + return 0; +} +EXPORT_SYMBOL(bm_pool_set); + +__init void bman_init_early(void) +{ + struct device_node *dn; + for_each_compatible_node(dn, NULL, "fsl,bman") { + if (bm) + pr_err("%s: only one 'fsl,bman' allowed\n", + dn->full_name); + else { + int ret = fsl_bman_init(dn); + BUG_ON(ret); + } + } +} + +static void log_edata_bits(u32 bit_count) +{ + u32 i, j, mask = 0xffffffff; + + pr_warning("Bman ErrInt, EDATA:\n"); + i = bit_count/32; + if (bit_count%32) { + i++; + mask = ~(mask << bit_count%32); + } + j = 16-i; + pr_warning(" 0x%08x\n", bm_in(EDATA(j)) & mask); + j++; + for (; j < 16; j++) + pr_warning(" 0x%08x\n", bm_in(EDATA(j))); +} + +static void log_additional_error_info(u32 isr_val, u32 ecsr_val) +{ + union bman_ecir ecir_val; + union bman_eadr eadr_val; + + ecir_val.ecir_raw = bm_in(ECIR); + /* Is portal info valid */ + if (ecsr_val & PORTAL_ECSR_ERR) { + pr_warning("Bman ErrInt: SWP id %d, numb %d, pid %d\n", + ecir_val.info.portal_num, ecir_val.info.numb, + ecir_val.info.pid); + } + if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) { + eadr_val.eadr_raw = bm_in(EADR); + pr_warning("Bman ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[eadr_val.info.memid].txt, + error_mdata[eadr_val.info.memid].addr_mask + & eadr_val.info.eadr); + log_edata_bits(error_mdata[eadr_val.info.memid].bits); + } +} + +/* Bman interrupt handler */ +static irqreturn_t bman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + + ier_val = bm_err_isr_enable_read(bm); + isr_val = bm_err_isr_status_read(bm); + ecsr_val = bm_in(ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + for (i = 0; i < BMAN_HWE_COUNT; i++) { + if (bman_hwerr_txts[i].mask & isr_mask) { + pr_warning("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt); + if (bman_hwerr_txts[i].mask & ecsr_val) { + log_additional_error_info(isr_mask, ecsr_val); + /* Re-arm error capture registers */ + bm_out(ECSR, ecsr_val); + } + if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) { + pr_devel("Bman un-enabling error 0x%x\n", + bman_hwerr_txts[i].mask); + ier_val &= ~bman_hwerr_txts[i].mask; + bm_err_isr_enable_write(bm, ier_val); + } + } + } + bm_err_isr_status_clear(bm, isr_val); + return IRQ_HANDLED; +} + +static int __bind_irq(void) +{ + int ret, err_irq; + + err_irq = of_irq_to_resource(bm_node, 0, NULL); + if (err_irq == NO_IRQ) { + pr_info("Can't get %s property '%s'\n", bm_node->full_name, + "interrupts"); + return -ENODEV; + } + ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node); + if (ret) { + pr_err("request_irq() failed %d for '%s'\n", ret, + bm_node->full_name); + return -ENODEV; + } + /* Disable Buffer Pool State Change */ + bm_err_isr_disable_write(bm, BM_EIRQ_BSCN); + /* Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). */ + bm_err_isr_status_clear(bm, 0xffffffff); + /* Enable Error Interrupts */ + bm_err_isr_enable_write(bm, 0xffffffff); + return 0; +} + +/* Initialise Error Interrupt Handler */ +int bman_init_error_int(struct device_node *node) +{ + if (!bman_have_ccsr()) + return 0; + if (node != bm_node) + return -EINVAL; + return __bind_irq(); +} + +#ifdef CONFIG_SYSFS + +#define DRV_NAME "fsl-bman" + +static ssize_t show_fbpr_fpc(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC)); +}; + +static ssize_t show_pool_count(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + u32 data; + int i; + + if (!sscanf(dev_attr->attr.name, "%d", &i)) + return -EINVAL; + data = bm_in(POOL_CONTENT(i)); + return snprintf(buf, PAGE_SIZE, "%d\n", data); +}; + +static ssize_t show_err_isr(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR)); +}; + +static ssize_t show_sbec(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + int i; + + if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) + return -EINVAL; + return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i))); +}; + +static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); +static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL); + +/* Didn't use DEVICE_ATTR as 64 of this would be required. + * Initialize them when needed. */ +static char *name_attrs_pool_count; /* "xx" + null-terminator */ +static struct device_attribute *dev_attr_buffer_pool_count; + +static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); + +static struct attribute *bman_dev_attributes[] = { + &dev_attr_fbpr_fpc.attr, + &dev_attr_err_isr.attr, + NULL +}; + +static struct attribute *bman_dev_ecr_attributes[] = { + &dev_attr_sbec_0.attr, + &dev_attr_sbec_1.attr, + NULL +}; + +static struct attribute **bman_dev_pool_count_attributes; + + +/* root level */ +static const struct attribute_group bman_dev_attr_grp = { + .name = NULL, + .attrs = bman_dev_attributes +}; +static const struct attribute_group bman_dev_ecr_grp = { + .name = "error_capture", + .attrs = bman_dev_ecr_attributes +}; +static struct attribute_group bman_dev_pool_countent_grp = { + .name = "pool_count", +}; + +static int of_fsl_bman_remove(struct platform_device *ofdev) +{ + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); + return 0; +}; + +static int of_fsl_bman_probe(struct platform_device *ofdev) +{ + int ret, i; + + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp); + if (ret) + goto done; + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp); + if (ret) + goto del_group_0; + + name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3, + GFP_KERNEL); + if (!name_attrs_pool_count) { + pr_err("Can't alloc name_attrs_pool_count\n"); + goto del_group_1; + } + + dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) * + bman_pool_max, GFP_KERNEL); + if (!dev_attr_buffer_pool_count) { + pr_err("Can't alloc dev_attr-buffer_pool_count\n"); + goto del_group_2; + } + + bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) * + (bman_pool_max + 1), GFP_KERNEL); + if (!bman_dev_pool_count_attributes) { + pr_err("can't alloc bman_dev_pool_count_attributes\n"); + goto del_group_3; + } + + for (i = 0; i < (bman_pool_max + 1); i++) { + bman_dev_pool_count_attributes[i] = + kmalloc(sizeof(struct attribute), GFP_KERNEL); + if (!bman_dev_pool_count_attributes[i]) { + pr_err("cannot alloc for each" + " bman_dev_pool_count_attributes\n"); + goto del_group_3; + } + } + + for (i = 0; i < bman_pool_max; i++) { + ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i); + if (!ret) + goto del_group_4; + dev_attr_buffer_pool_count[i].attr.name = + (name_attrs_pool_count + i * 3); + dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR; + dev_attr_buffer_pool_count[i].show = show_pool_count; + bman_dev_pool_count_attributes[i] = + &dev_attr_buffer_pool_count[i].attr; + } + bman_dev_pool_count_attributes[bman_pool_max] = NULL; + + bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes; + + ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp); + if (ret) + goto del_group_4; + + goto done; + +del_group_4: + for (i = 0; i < (bman_pool_max + 1); i++) + kfree(bman_dev_pool_count_attributes[i]); + kfree(bman_dev_pool_count_attributes); +del_group_3: + kfree(dev_attr_buffer_pool_count); +del_group_2: + kfree(name_attrs_pool_count); +del_group_1: + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp); +del_group_0: + sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp); +done: + if (ret) + dev_err(&ofdev->dev, + "Cannot create dev attributes ret=%d\n", ret); + return ret; +}; + +static struct of_device_id of_fsl_bman_ids[] = { + { + .compatible = "fsl,bman", + }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_bman_ids); + +static struct platform_driver of_fsl_bman_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .of_match_table = of_fsl_bman_ids, + }, + .probe = of_fsl_bman_probe, + .remove = of_fsl_bman_remove, +}; + +static int bman_ctrl_init(void) +{ + return platform_driver_register(&of_fsl_bman_driver); +} + +static void bman_ctrl_exit(void) +{ + platform_driver_unregister(&of_fsl_bman_driver); +} + +module_init(bman_ctrl_init); +module_exit(bman_ctrl_exit); + +#endif /* CONFIG_SYSFS */ --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_debugfs.c +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_debugfs.c @@ -0,0 +1,121 @@ +/* Copyright 2010-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include + +static struct dentry *dfs_root; /* debugfs root directory */ + +/******************************************************************************* + * Query Buffer Pool State + ******************************************************************************/ +static int query_bp_state_show(struct seq_file *file, void *offset) +{ + int ret; + struct bm_pool_state state; + int i, j; + u32 mask; + + memset(&state, 0, sizeof(struct bm_pool_state)); + ret = bman_query_pools(&state); + if (ret) { + seq_printf(file, "Error %d\n", ret); + return 0; + } + seq_printf(file, "bp_id free_buffers_avail bp_depleted\n"); + for (i = 0; i < 2; i++) { + mask = 0x80000000; + for (j = 0; j < 32; j++) { + seq_printf(file, + " %-2u %-3s %-3s\n", + (i*32)+j, + (state.as.state.__state[i] & mask) ? "no" : "yes", + (state.ds.state.__state[i] & mask) ? "yes" : "no"); + mask >>= 1; + } + } + return 0; +} + +static int query_bp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, query_bp_state_show, NULL); +} + +static const struct file_operations query_bp_state_fops = { + .owner = THIS_MODULE, + .open = query_bp_state_open, + .read = seq_read, + .release = single_release, +}; + +static int __init bman_debugfs_module_init(void) +{ + int ret = 0; + struct dentry *d; + + dfs_root = debugfs_create_dir("bman", NULL); + + if (dfs_root == NULL) { + ret = -ENOMEM; + pr_err("Cannot create bman debugfs dir\n"); + goto _return; + } + d = debugfs_create_file("query_bp_state", + S_IRUGO, + dfs_root, + NULL, + &query_bp_state_fops); + if (d == NULL) { + ret = -ENOMEM; + pr_err("Cannot create query_bp_state\n"); + goto _return; + } + return 0; + +_return: + if (dfs_root) + debugfs_remove_recursive(dfs_root); + return ret; +} + +static void __exit bman_debugfs_module_exit(void) +{ + debugfs_remove_recursive(dfs_root); +} + + +module_init(bman_debugfs_module_init); +module_exit(bman_debugfs_module_exit); +MODULE_LICENSE("Dual BSD/GPL"); + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_driver.c +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_driver.c @@ -0,0 +1,588 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "bman_private.h" + +/* + * Global variables of the max portal/pool number this bman version supported + */ +u16 bman_ip_rev; +EXPORT_SYMBOL(bman_ip_rev); +u16 bman_pool_max; +EXPORT_SYMBOL(bman_pool_max); + +/*****************/ +/* Portal driver */ +/*****************/ + +/* Compatibility behaviour (when no bpool-range is present) is that; + * (a) on a control plane, all pools that aren't explicitly mentioned in the dtb + * are available for allocation, + * (b) on a non-control plane, there is never any allocation possible at all. + * + * New behaviour is that if any "fsl,bpool-range" nodes are declared, they + * declare what is available for allocation, and this is independent of which + * pools are/aren't mentioned in the dtb. Eg. to stipulate that no allocation is + * possible, a fsl,bpool-range should be specified with zero items in it. + * + * This "pools" struct contains the allocator, and "explicit allocator" + * indicates whether the range is seeded explicitly (via at least one range) or + * implicitly (by being the set of pools that aren't declared). + */ +static struct bman_depletion pools; +static u8 num_pools; +static DEFINE_SPINLOCK(pools_lock); +static int explicit_allocator; + +static struct dpa_uio_class bman_uio = { + .list = LIST_HEAD_INIT(bman_uio.list), + .dev_prefix = "bman-uio-" +}; +const struct dpa_uio_class *dpa_uio_bman(void) +{ + return &bman_uio; +} +EXPORT_SYMBOL(dpa_uio_bman); + +static int __bm_pool_add(u32 bpid, u32 *cfg, int triplets) +{ + u64 total = 0; + BUG_ON(bpid >= bman_pool_max); +#ifdef CONFIG_FSL_BMAN_PORTAL + while (triplets--) { + struct bman_pool_params params = { + .bpid = bpid, + .flags = BMAN_POOL_FLAG_ONLY_RELEASE + }; + u64 c = ((u64)cfg[0] << 32) | cfg[1]; + u64 d = ((u64)cfg[2] << 32) | cfg[3]; + u64 b = ((u64)cfg[4] << 32) | cfg[5]; + struct bman_pool *pobj = bman_new_pool(¶ms); + if (!pobj) + return -ENOMEM; + while (c) { + struct bm_buffer bufs[8]; + int ret, num_bufs = 0; + do { + BUG_ON(b > 0xffffffffffffull); + bufs[num_bufs].bpid = bpid; + bm_buffer_set64(&bufs[num_bufs++], b); + b += d; + } while (--c && (num_bufs < 8)); + ret = bman_release(pobj, bufs, num_bufs, + BMAN_RELEASE_FLAG_WAIT); + if (ret) + panic("Seeding reserved buffer pool failed\n"); + total += num_bufs; + } + bman_free_pool(pobj); + cfg += 6; + } +#endif + /* Remove this pool from the allocator (by treating its declaration as + * an implicit "reservation") iff the allocator is *not* being set up + * explicitly defined via "bpool-range" nodes. */ + if (!explicit_allocator && !bman_depletion_get(&pools, bpid)) { + bman_depletion_set(&pools, bpid); + num_pools++; + } + if (total) + pr_info("Bman: reserved bpid %d, seeded %lld items\n", bpid, + total); + else + pr_info("Bman: reserved bpid %d\n", bpid); + return 0; +} + +int bm_pool_new(u32 *bpid) +{ + int ret = 0, b = bman_pool_max; + spin_lock(&pools_lock); + if (num_pools >= bman_pool_max) + ret = -ENOMEM; + else { + while (b-- && bman_depletion_get(&pools, b)) + ; + BUG_ON(b < 0); + bman_depletion_set(&pools, b); + *bpid = b; + num_pools++; + } + spin_unlock(&pools_lock); + return ret; +} +EXPORT_SYMBOL(bm_pool_new); + +void bm_pool_free(u32 bpid) +{ + spin_lock(&pools_lock); + BUG_ON(bpid >= bman_pool_max); + BUG_ON(!bman_depletion_get(&pools, bpid)); + bman_depletion_unset(&pools, bpid); + num_pools--; + spin_unlock(&pools_lock); +} +EXPORT_SYMBOL(bm_pool_free); + +#ifdef CONFIG_FSL_BMAN_PORTAL +/* To understand the use of this structure and the flow of operation for all + * this portal-setup code, please see qman_driver.c. The Bman case is much the + * same, but simpler (no Qman-specific fiddly bits). */ +struct affine_portal_data { + struct completion done; + const struct bm_portal_config *pconfig; + struct bman_portal *redirect; + int recovery_mode; + struct bman_portal *portal; +}; + +static __init int thread_init_affine_portal(void *__data) +{ + struct affine_portal_data *data = __data; + const struct bm_portal_config *pconfig = data->pconfig; + if (data->redirect) + data->portal = bman_create_affine_slave(data->redirect); + else { + data->portal = bman_create_affine_portal(pconfig, + data->recovery_mode); +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW + if (data->portal) + bman_irqsource_add(BM_PIRQ_RCRI | BM_PIRQ_BSCN); +#endif + } + complete(&data->done); + return 0; +} + +static __init struct bman_portal *init_affine_portal( + struct bm_portal_config *pconfig, + int cpu, struct bman_portal *redirect, + int recovery_mode) +{ + struct affine_portal_data data = { + .done = COMPLETION_INITIALIZER_ONSTACK(data.done), + .pconfig = pconfig, + .redirect = redirect, + .recovery_mode = recovery_mode, + .portal = NULL + }; + struct task_struct *k = kthread_create(thread_init_affine_portal, &data, + "bman_affine%d", cpu); + int ret; + if (IS_ERR(k)) { + pr_err("Failed to init %sBman affine portal for cpu %d\n", + redirect ? "(slave) " : "", cpu); + return NULL; + } + kthread_bind(k, cpu); + wake_up_process(k); + wait_for_completion(&data.done); + ret = kthread_stop(k); + if (ret) { + pr_err("Bman portal initialisation failed, cpu %d, code %d\n", + cpu, ret); + return NULL; + } + if (data.portal) + pr_info("Bman portal %sinitialised, cpu %d\n", + redirect ? "(slave) " : + pconfig->public_cfg.is_shared ? "(shared) " : "", cpu); + return data.portal; +} +#endif + +static struct bm_portal_config * __init fsl_bman_portal_init( + struct device_node *node) +{ + struct bm_portal_config *pcfg; + const u32 *index; + const phandle *ph = NULL; + int irq, ret; + + pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) { + pr_err("can't allocate portal config"); + return NULL; + } + + if (of_device_is_compatible(node, "fsl,bman-portal-1.0")) { + bman_ip_rev = BMAN_REV10; + bman_pool_max = 64; + } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0")) { + bman_ip_rev = BMAN_REV20; + bman_pool_max = 8; + } + + ret = of_address_to_resource(node, BM_ADDR_CE, + &pcfg->addr_phys[BM_ADDR_CE]); + if (ret) { + pr_err("Can't get %s property 'reg::CE'\n", node->full_name); + goto err; + } + ret = of_address_to_resource(node, BM_ADDR_CI, + &pcfg->addr_phys[BM_ADDR_CI]); + if (ret) { + pr_err("Can't get %s property 'reg::CI'\n", node->full_name); + goto err; + } + index = of_get_property(node, "cell-index", &ret); + if (!index || (ret != 4)) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "cell-index"); + goto err; + } + ph = of_get_property(node, "cpu-handle", &ret); + if (ph) { + if (ret != sizeof(phandle)) { + pr_err("Malformed %s property '%s'\n", node->full_name, + "cpu-handle"); + goto err; + } + ret = check_cpu_phandle(*ph); + if (ret < 0) + goto err; + pcfg->public_cfg.cpu = ret; + } else + pcfg->public_cfg.cpu = -1; + + irq = irq_of_parse_and_map(node, 0); + if (irq == NO_IRQ) { + pr_err("Can't get %s property 'interrupts'\n", node->full_name); + goto err; + } + pcfg->public_cfg.irq = irq; + pcfg->public_cfg.index = *index; + bman_depletion_fill(&pcfg->public_cfg.mask); + + if (of_get_property(node, "fsl,usdpaa-portal", &ret)) { + struct dpa_uio_portal *u = kmalloc(sizeof(*u), GFP_KERNEL); + if (!u) + goto err; + u->type = dpa_uio_portal_bman; + u->bm_cfg = pcfg; + list_add_tail(&u->node, &bman_uio.list); + /* Return NULL, otherwise the kernel may share it on CPUs that + * don't have their own portals, which would be ... *bad*. */ + return NULL; + } + + /* Map the portals now we know they aren't for UIO (the UIO code doesn't + * need the CE mapping, and so will do its own CI-only mapping). */ + pcfg->addr_virt[BM_ADDR_CE] = ioremap_prot( + pcfg->addr_phys[BM_ADDR_CE].start, + resource_size(&pcfg->addr_phys[BM_ADDR_CE]), + 0); + pcfg->addr_virt[BM_ADDR_CI] = ioremap_prot( + pcfg->addr_phys[BM_ADDR_CI].start, + resource_size(&pcfg->addr_phys[BM_ADDR_CI]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + return pcfg; +err: + kfree(pcfg); + return NULL; +} + +static void __init fsl_bman_portal_destroy(struct bm_portal_config *pcfg) +{ + iounmap(pcfg->addr_virt[BM_ADDR_CE]); + iounmap(pcfg->addr_virt[BM_ADDR_CI]); + kfree(pcfg); +} + +static int __init fsl_bpool_init(struct device_node *node) +{ + int ret; + u32 *cfg = NULL, *thresh; + struct device_node *tmp_node; + u32 *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret); + if (!bpid || (ret!= 4)) { + pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name); + return -ENODEV; + } + thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret); + if (thresh) { + if (ret != 16) { + pr_err("Invalid %s property '%s'\n", + node->full_name, "fsl,bpool-thresholds"); + return -ENODEV; + } +#ifndef CONFIG_FSL_BMAN_CONFIG + pr_err("Ignoring %s property '%s', no CCSR support\n", + node->full_name, "fsl,bpool-thresholds"); +#endif + } + /* If rebooted, we should not re-seed any pools via bpool-cfg. */ + /* TODO: parsing hypervisor fields to determine qualitative things like + * "was I rebooted" should probably be wrapped in fsl_hypervisor.h. */ + tmp_node = of_find_node_by_name(NULL, "hypervisor"); + if (!tmp_node || !of_find_property(tmp_node, "fsl,hv-stopped-by", + &ret)) + cfg = (u32 *)of_get_property(node, "fsl,bpool-cfg", &ret); + if (cfg && (!ret || (ret % 24))) { + pr_err("Invalid %s property '%s'\n", node->full_name, + "fsl,bpool-cfg"); + return -ENODEV; + } + if (cfg) + ret = __bm_pool_add(*bpid, cfg, ret / 24); + else + ret = __bm_pool_add(*bpid, NULL, 0); + if (ret) { + pr_err("Can't reserve bpid %d from node %s\n", *bpid, + node->full_name); + return ret; + } +#ifdef CONFIG_FSL_BMAN_CONFIG + if (thresh) { + ret = bm_pool_set(*bpid, thresh); + if (ret) + pr_err("No CCSR node for %s property '%s'\n", + node->full_name, "fsl,bpool-thresholds"); + } +#endif + return ret; +} + +static int __init fsl_bpool_range_init(struct device_node *node, + int recovery_mode) +{ + int ret, warned = 0; + u32 bpid; + u32 *range = (u32 *)of_get_property(node, "fsl,bpool-range", &ret); + if (!range) { + pr_err("No 'fsl,bpool-range' property in node %s\n", + node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err("'fsl,bpool-range' is not a 2-cell range in node %s\n", + node->full_name); + return -EINVAL; + } + for (bpid = range[0]; bpid < (range[0] + range[1]); bpid++) { + if (bpid >= bman_pool_max) { + pr_err("BPIDs out of range in node %s\n", + node->full_name); + return -EINVAL; + } + if (!bman_depletion_get(&pools, bpid)) { + if (!warned) { + warned = 1; + pr_err("BPID overlap in node %s, ignoring\n", + node->full_name); + } + } else { + bman_depletion_unset(&pools, bpid); + num_pools--; + } + } +#ifdef CONFIG_FSL_BMAN_PORTAL + /* If in recovery mode *and* we are using a private BPID allocation + * range, then automatically clean up all BPIDs in that range so we can + * automatically exit recovery mode too. */ + if (recovery_mode) { + for (bpid = range[0]; bpid < (range[0] + range[1]); bpid++) { + ret = bman_recovery_cleanup_bpid(bpid); + if (ret) { + pr_err("Failed to recovery BPID %d\n", bpid); + return ret; + } + } + } +#else + BUG_ON(recovery_mode); +#endif + pr_info("Bman: BPID allocator includes range %d:%d%s\n", + range[0], range[1], recovery_mode ? " (recovered)" : ""); + return 0; +} + +#ifdef CONFIG_FSL_BMAN_PORTAL +static __init int __leave_recovery(void *__data) +{ + struct completion *done = __data; + bman_recovery_exit_local(); + complete(done); + return 0; +} + +int bman_recovery_exit(void) +{ + struct completion done = COMPLETION_INITIALIZER_ONSTACK(done); + unsigned int cpu; + + for_each_cpu(cpu, bman_affine_cpus()) { + struct task_struct *k = kthread_create(__leave_recovery, &done, + "bman_recovery"); + int ret; + if (IS_ERR(k)) { + pr_err("Thread failure (recovery) on cpu %d\n", cpu); + return -ENOMEM; + } + kthread_bind(k, cpu); + wake_up_process(k); + wait_for_completion(&done); + ret = kthread_stop(k); + if (ret) { + pr_err("Failed to exit recovery on cpu %d\n", cpu); + return ret; + } + pr_info("Bman portal exited recovery, cpu %d\n", cpu); + } + return 0; +} +EXPORT_SYMBOL(bman_recovery_exit); +#endif + +static __init int bman_init(void) +{ +#ifdef CONFIG_FSL_BMAN_PORTAL + struct cpumask primary_cpus = *cpu_none_mask; + struct cpumask slave_cpus = *cpu_online_mask; + struct bman_portal *sharing_portal = NULL; + int sharing_cpu = -1; +#endif + struct device_node *dn; + struct bm_portal_config *pcfg; + int ret, recovery_mode = 0; + LIST_HEAD(cfg_list); + + for_each_compatible_node(dn, NULL, "fsl,bman") { + if (!bman_init_error_int(dn)) + pr_info("Bman err interrupt handler present\n"); + else + pr_err("Bman err interrupt handler missing\n"); + } + if (!bman_have_ccsr()) { + /* If there's no CCSR, our bpid allocator is empty unless + * fsl,bpool-range nodes are used. */ + bman_depletion_fill(&pools); + num_pools = bman_pool_max; + } +#ifdef CONFIG_FSL_BMAN_PORTAL + if (fsl_dpa_should_recover()) + recovery_mode = 1; + for_each_compatible_node(dn, NULL, "fsl,bman-portal") { + pcfg = fsl_bman_portal_init(dn); + if (pcfg) { + if (pcfg->public_cfg.cpu >= 0) { + cpumask_set_cpu(pcfg->public_cfg.cpu, + &primary_cpus); + list_add(&pcfg->list, &cfg_list); + } else + fsl_bman_portal_destroy(pcfg); + } + } + /* only consider "online" CPUs */ + cpumask_and(&primary_cpus, &primary_cpus, cpu_online_mask); + if (cpumask_empty(&primary_cpus)) + /* No portals, we're done */ + return 0; + if (!cpumask_subset(cpu_online_mask, &primary_cpus)) { + /* Need to do some sharing. In lieu of anything more scientific + * (or configurable), we pick the last-most CPU that has a + * portal and share that one. */ + int next = cpumask_first(&primary_cpus); + while (next < nr_cpu_ids) { + sharing_cpu = next; + next = cpumask_next(next, &primary_cpus); + } + } + /* Parsing is done and sharing decisions are made, now initialise the + * portals and determine which "slave" CPUs are left over. */ + list_for_each_entry(pcfg, &cfg_list, list) { + struct bman_portal *p; + int is_shared = (!sharing_portal && (sharing_cpu >= 0) && + (pcfg->public_cfg.cpu == sharing_cpu)); + pcfg->public_cfg.is_shared = is_shared; + /* If it's not mapped to a CPU, or another portal is already + * initialised to the same CPU, skip this portal. */ + if (pcfg->public_cfg.cpu < 0 || !cpumask_test_cpu( + pcfg->public_cfg.cpu, &slave_cpus)) + continue; + p = init_affine_portal(pcfg, pcfg->public_cfg.cpu, NULL, + recovery_mode); + if (p) { + if (is_shared) + sharing_portal = p; + cpumask_clear_cpu(pcfg->public_cfg.cpu, &slave_cpus); + } + } + + if (sharing_portal) { + int loop; + for_each_cpu(loop, &slave_cpus) { + struct bman_portal *p = init_affine_portal(NULL, loop, + sharing_portal, recovery_mode); + if (!p) + pr_err("Failed slave Bman portal for cpu %d\n", + loop); + } + } +#else + for_each_compatible_node(dn, NULL, "fsl,bman-portal") { + pcfg = fsl_bman_portal_init(dn); + if (pcfg) + /* No kernel portal support, so if USDPAA didn't consume + * the portal, we've no other use for it. */ + fsl_bman_portal_destroy(pcfg); + } +#endif + for_each_compatible_node(dn, NULL, "fsl,bpool-range") { + if (!explicit_allocator) { + explicit_allocator = 1; + bman_depletion_fill(&pools); + num_pools = 64; + } + ret = fsl_bpool_range_init(dn, recovery_mode); + if (ret) + return ret; + } +#ifdef CONFIG_FSL_BMAN_PORTAL + /* If using private BPID allocation, exit recovery mode automatically + * (ie. after automatic recovery) */ + if (recovery_mode && explicit_allocator) { + ret = bman_recovery_exit(); + if (ret) + return ret; + } +#endif + for_each_compatible_node(dn, NULL, "fsl,bpool") { + ret = fsl_bpool_init(dn); + if (ret) + return ret; + } + pr_info("Bman portals initialised\n"); + return 0; +} +subsys_initcall(bman_init); --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_high.c +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_high.c @@ -0,0 +1,1043 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_low.h" + +/* Compilation constants */ +#define RCR_THRESH 2 /* reread h/w CI when running out of space */ +#define IRQNAME "BMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ + +struct bman_portal { + struct bm_portal p; + /* 2-element array. pools[0] is mask, pools[1] is snapshot. */ + struct bman_depletion *pools; + int thresh_set; + unsigned long irq_sources; + u32 slowpoll; /* only used when interrupts are off */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */ +#endif +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + spinlock_t sharing_lock; /* only used if is_shared */ + int is_shared; + struct bman_portal *sharing_redirect; +#endif + /* When the cpu-affine portal is activated, this is non-NULL */ + const struct bm_portal_config *config; + /* 64-entry hash-table of pool objects that are tracking depletion + * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so + * we're not fussy about cache-misses and so forth - whereas the above + * members should all fit in one cacheline. + * BTW, with 64 entries in the hash table and 64 buffer pools to track, + * you'll never guess the hash-function ... */ + struct bman_pool *cb[64]; + char irqname[MAX_IRQNAME]; +}; + +/* For an explanation of the locking, redirection, or affine-portal logic, + * please consult the Qman driver for details. This is the same, only simpler + * (no fiddly Qman-specific bits.) */ +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +#define PORTAL_IRQ_LOCK(p, irqflags) \ + do { \ + if ((p)->is_shared) \ + spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ + else \ + local_irq_save(irqflags); \ + } while (0) +#define PORTAL_IRQ_UNLOCK(p, irqflags) \ + do { \ + if ((p)->is_shared) \ + spin_unlock_irqrestore(&(p)->sharing_lock, irqflags); \ + else \ + local_irq_restore(irqflags); \ + } while (0) +#else +#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) +#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) +#endif + +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); +static inline struct bman_portal *get_raw_affine_portal(void) +{ + return &get_cpu_var(bman_affine_portal); +} +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +static inline struct bman_portal *get_affine_portal(void) +{ + struct bman_portal *p = get_raw_affine_portal(); + if (p->sharing_redirect) + return p->sharing_redirect; + return p; +} +#else +#define get_affine_portal() get_raw_affine_portal() +#endif +static inline void put_affine_portal(void) +{ + put_cpu_var(bman_affine_portal); +} + +/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be + * more than one such object per Bman buffer pool, eg. if different users of the + * pool are operating via different portals. */ +struct bman_pool { + struct bman_pool_params params; + /* Used for hash-table admin when using depletion notifications. */ + struct bman_portal *portal; + struct bman_pool *next; + /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */ + struct bm_buffer *sp; + unsigned int sp_fill; +#ifdef CONFIG_FSL_DPA_CHECKING + atomic_t in_use; +#endif +}; + +/* (De)Registration of depletion notification callbacks */ +static void depletion_link(struct bman_portal *portal, struct bman_pool *pool) +{ + __maybe_unused unsigned long irqflags; + pool->portal = portal; + PORTAL_IRQ_LOCK(portal, irqflags); + pool->next = portal->cb[pool->params.bpid]; + portal->cb[pool->params.bpid] = pool; + if (!pool->next) + /* First object for that bpid on this portal, enable the BSCN + * mask bit. */ + bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1); + PORTAL_IRQ_UNLOCK(portal, irqflags); +} +static void depletion_unlink(struct bman_pool *pool) +{ + struct bman_pool *it, *last = NULL; + struct bman_pool **base = &pool->portal->cb[pool->params.bpid]; + __maybe_unused unsigned long irqflags; + PORTAL_IRQ_LOCK(pool->portal, irqflags); + it = *base; /* <-- gotcha, don't do this prior to the irq_save */ + while (it != pool) { + last = it; + it = it->next; + } + if (!last) + *base = pool->next; + else + last->next = pool->next; + if (!last && !pool->next) { + /* Last object for that bpid on this portal, disable the BSCN + * mask bit. */ + bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0); + /* And "forget" that we last saw this pool as depleted */ + bman_depletion_unset(&pool->portal->pools[1], + pool->params.bpid); + } + PORTAL_IRQ_UNLOCK(pool->portal, irqflags); +} + +/* In the case that the application's core loop calls qman_poll() and + * bman_poll(), we ought to balance how often we incur the overheads of the + * slow-path poll. We'll use two decrementer sources. The idle decrementer + * constant is used when the last slow-poll detected no work to do, and the busy + * decrementer constant when the last slow-poll had work to do. */ +#define SLOW_POLL_IDLE 1000 +#define SLOW_POLL_BUSY 10 +static u32 __poll_portal_slow(struct bman_portal *p, u32 is); + +#ifdef CONFIG_FSL_DPA_HAVE_IRQ +/* Portal interrupt handler */ +static irqreturn_t portal_isr(__always_unused int irq, void *ptr) +{ + struct bman_portal *p = ptr; + u32 clear = p->irq_sources; + u32 is = bm_isr_status_read(&p->p) & p->irq_sources; + clear |= __poll_portal_slow(p, is); + bm_isr_status_clear(&p->p, clear); + return IRQ_HANDLED; +} +#endif + +struct bman_portal *bman_create_affine_portal( + const struct bm_portal_config *config, + int recovery_mode __maybe_unused) +{ + struct bman_portal *portal = get_raw_affine_portal(); + struct bm_portal *__p = &portal->p; + const struct bman_depletion *pools = &config->public_cfg.mask; + int ret; + + /* prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference... */ + __p->addr.addr_ce = config->addr_virt[BM_ADDR_CE]; + __p->addr.addr_ci = config->addr_virt[BM_ADDR_CI]; + if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) { + pr_err("Bman RCR initialisation failed\n"); + goto fail_rcr; + } + if (bm_mc_init(__p)) { + pr_err("Bman MC initialisation failed\n"); + goto fail_mc; + } + if (bm_isr_init(__p)) { + pr_err("Bman ISR initialisation failed\n"); + goto fail_isr; + } + if (!pools) + portal->pools = NULL; + else { + u8 bpid = 0; + portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL); + if (!portal->pools) + goto fail_pools; + portal->pools[0] = *pools; + bman_depletion_init(portal->pools + 1); + while (bpid < bman_pool_max) { + /* Default to all BPIDs disabled, we enable as required + * at run-time. */ + bm_isr_bscn_mask(__p, bpid, 0); + bpid++; + } + } + portal->slowpoll = 0; +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + portal->rcri_owned = NULL; +#endif +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + spin_lock_init(&portal->sharing_lock); + portal->is_shared = config->public_cfg.is_shared; + portal->sharing_redirect = NULL; +#endif + memset(&portal->cb, 0, sizeof(portal->cb)); + /* Write-to-clear any stale interrupt status bits */ + bm_isr_disable_write(__p, 0xffffffff); + portal->irq_sources = 0; + bm_isr_enable_write(__p, portal->irq_sources); + bm_isr_status_clear(__p, 0xffffffff); +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); + if (request_irq(config->public_cfg.irq, portal_isr, + IRQF_NOBALANCING | IRQF_DISABLED, portal->irqname, + portal)) { + pr_err("request_irq() failed\n"); + goto fail_irq; + } + if (config->public_cfg.cpu != -1) { + disable_irq(config->public_cfg.irq); + irq_set_affinity(config->public_cfg.irq, + cpumask_of(config->public_cfg.cpu)); + } + enable_irq(config->public_cfg.irq); + /* Enable the bits that make sense */ + if (!recovery_mode) + bm_isr_uninhibit(__p); +#endif + /* Need RCR to be empty before continuing */ + bm_isr_disable_write(__p, ~BM_PIRQ_RCRI); + ret = bm_rcr_get_fill(__p); + if (ret) { + pr_err("Bman RCR unclean, need recovery\n"); + goto fail_rcr_empty; + } + /* Success */ + portal->config = config; + spin_lock(&affine_mask_lock); + cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + bm_isr_disable_write(__p, 0); + put_affine_portal(); + return portal; +fail_rcr_empty: +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + free_irq(config->public_cfg.irq, portal); +fail_irq: +#endif + if (portal->pools) + kfree(portal->pools); +fail_pools: + bm_isr_finish(__p); +fail_isr: + bm_mc_finish(__p); +fail_mc: + bm_rcr_finish(__p); +fail_rcr: + put_affine_portal(); + return NULL; +} + +struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect) +{ +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + struct bman_portal *p = get_raw_affine_portal(); + BUG_ON(p->config); + BUG_ON(p->is_shared); + BUG_ON(!redirect->config->public_cfg.is_shared); + p->irq_sources = 0; + p->sharing_redirect = redirect; + put_affine_portal(); + return p; +#else + BUG(); + return NULL; +#endif +} + +const struct bm_portal_config *bman_destroy_affine_portal(void) +{ + struct bman_portal *bm = get_raw_affine_portal(); + const struct bm_portal_config *pcfg; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (bm->sharing_redirect) { + bm->sharing_redirect = NULL; + put_affine_portal(); + return NULL; + } + bm->is_shared = 0; +#endif + pcfg = bm->config; + bm_rcr_cce_update(&bm->p); + bm_rcr_cce_update(&bm->p); +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + free_irq(pcfg->public_cfg.irq, bm); +#endif + kfree(bm->pools); + bm_isr_finish(&bm->p); + bm_mc_finish(&bm->p); + bm_rcr_finish(&bm->p); + bm->config = NULL; + spin_lock(&affine_mask_lock); + cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + put_affine_portal(); + return pcfg; +} + +/* When release logic waits on available RCR space, we need a global waitqueue + * in the case of "affine" use (as the waits wake on different cpus which means + * different portals - so we can't wait on any per-portal waitqueue). */ +static DECLARE_WAIT_QUEUE_HEAD(affine_queue); + +static u32 __poll_portal_slow(struct bman_portal *p, u32 is) +{ + struct bman_depletion tmp; + u32 ret = is; + + /* There is a gotcha to be aware of. If we do the query before clearing + * the status register, we may miss state changes that occur between the + * two. If we write to clear the status register before the query, the + * cache-enabled query command may overtake the status register write + * unless we use a heavyweight sync (which we don't want). Instead, we + * write-to-clear the status register then *read it back* before doing + * the query, hence the odd while loop with the 'is' accumulation. */ + if (is & BM_PIRQ_BSCN) { + struct bm_mc_result *mcr; + __maybe_unused unsigned long irqflags; + unsigned int i, j; + u32 __is; + bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); + while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) { + is |= __is; + bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); + } + is &= ~BM_PIRQ_BSCN; + PORTAL_IRQ_LOCK(p, irqflags); + bm_mc_start(&p->p); + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); + while (!(mcr = bm_mc_result(&p->p))) + cpu_relax(); + tmp = mcr->query.ds.state; + PORTAL_IRQ_UNLOCK(p, irqflags); + for (i = 0; i < 2; i++) { + int idx = i * 32; + /* tmp is a mask of currently-depleted pools. + * pools[0] is mask of those we care about. + * pools[1] is our previous view (we only want to + * be told about changes). */ + tmp.__state[i] &= p->pools[0].__state[i]; + if (tmp.__state[i] == p->pools[1].__state[i]) + /* fast-path, nothing to see, move along */ + continue; + for (j = 0; j <= 31; j++, idx++) { + struct bman_pool *pool = p->cb[idx]; + int b4 = bman_depletion_get(&p->pools[1], idx); + int af = bman_depletion_get(&tmp, idx); + if (b4 == af) + continue; + while (pool) { + pool->params.cb(p, pool, + pool->params.cb_ctx, af); + pool = pool->next; + } + } + } + p->pools[1] = tmp; + } + + if (is & BM_PIRQ_RCRI) { + __maybe_unused unsigned long irqflags; + PORTAL_IRQ_LOCK(p, irqflags); + bm_rcr_cce_update(&p->p); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + /* If waiting for sync, we only cancel the interrupt threshold + * when the ring utilisation hits zero. */ + if (p->rcri_owned) { + if (!bm_rcr_get_fill(&p->p)) { + p->rcri_owned = NULL; + bm_rcr_set_ithresh(&p->p, 0); + } + } else +#endif + bm_rcr_set_ithresh(&p->p, 0); + PORTAL_IRQ_UNLOCK(p, irqflags); + wake_up(&affine_queue); + bm_isr_status_clear(&p->p, BM_PIRQ_RCRI); + is &= ~BM_PIRQ_RCRI; + } + + /* There should be no status register bits left undefined */ + DPA_ASSERT(!is); + return ret; +} + +const struct bman_portal_config *bman_get_portal_config(void) +{ + struct bman_portal *p = get_affine_portal(); + const struct bman_portal_config *ret = &p->config->public_cfg; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(bman_get_portal_config); + +u32 bman_irqsource_get(void) +{ + struct bman_portal *p = get_raw_affine_portal(); + u32 ret = p->irq_sources & BM_PIRQ_VISIBLE; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(bman_irqsource_get); + +int bman_irqsource_add(__maybe_unused u32 bits) +{ +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + struct bman_portal *p = get_raw_affine_portal(); + int ret = 0; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (p->sharing_redirect) + ret = -EINVAL; + else +#endif + { + __maybe_unused unsigned long irqflags; + PORTAL_IRQ_LOCK(p, irqflags); + set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); + bm_isr_enable_write(&p->p, p->irq_sources); + PORTAL_IRQ_UNLOCK(p, irqflags); + } + put_affine_portal(); + return ret; +#else + pr_err("No Bman portal IRQ support, mustn't specify IRQ flags!"); + return -EINVAL; +#endif +} +EXPORT_SYMBOL(bman_irqsource_add); + +int bman_irqsource_remove(u32 bits) +{ +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + struct bman_portal *p = get_raw_affine_portal(); + __maybe_unused unsigned long irqflags; + u32 ier; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (p->sharing_redirect) { + put_affine_portal(); + return -EINVAL; + } +#endif + /* Our interrupt handler only processes+clears status register bits that + * are in p->irq_sources. As we're trimming that mask, if one of them + * were to assert in the status register just before we remove it from + * the enable register, there would be an interrupt-storm when we + * release the IRQ lock. So we wait for the enable register update to + * take effect in h/w (by reading it back) and then clear all other bits + * in the status register. Ie. we clear them from ISR once it's certain + * IER won't allow them to reassert. */ + PORTAL_IRQ_LOCK(p, irqflags); + bits &= BM_PIRQ_VISIBLE; + clear_bits(bits, &p->irq_sources); + bm_isr_enable_write(&p->p, p->irq_sources); + ier = bm_isr_enable_read(&p->p); + /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a + * data-dependency, ie. to protect against re-ordering. */ + bm_isr_status_clear(&p->p, ~ier); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +#else + pr_err("No Bman portal IRQ support, mustn't specify IRQ flags!"); + return -EINVAL; +#endif +} +EXPORT_SYMBOL(bman_irqsource_remove); + +const cpumask_t *bman_affine_cpus(void) +{ + return &affine_mask; +} +EXPORT_SYMBOL(bman_affine_cpus); + +u32 bman_poll_slow(void) +{ + struct bman_portal *p = get_raw_affine_portal(); + u32 ret; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + ret = (u32)-1; + else +#endif + { + u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; + ret = __poll_portal_slow(p, is); + bm_isr_status_clear(&p->p, ret); + } + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(bman_poll_slow); + +/* Legacy wrapper */ +void bman_poll(void) +{ + struct bman_portal *p = get_raw_affine_portal(); +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + goto done; +#endif + if (!(p->slowpoll--)) { + u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; + u32 active = __poll_portal_slow(p, is); + if (active) + p->slowpoll = SLOW_POLL_BUSY; + else + p->slowpoll = SLOW_POLL_IDLE; + } +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +done: +#endif + put_affine_portal(); +} +EXPORT_SYMBOL(bman_poll); + +int bman_recovery_cleanup_bpid(u32 bpid) +{ + struct bman_pool pool = { + .params = { + .bpid = bpid + } + }; + struct bm_buffer bufs[8]; + int ret = 0; + unsigned int num_bufs = 0; + do { + /* Acquire is all-or-nothing, so we drain in 8s, then in + * 1s for the remainder. */ + if (ret != 1) + ret = bman_acquire(&pool, bufs, 8, 0); + if (ret < 8) + ret = bman_acquire(&pool, bufs, 1, 0); + if (ret > 0) + num_bufs += ret; + } while (ret > 0); + if (num_bufs) + pr_info("Bman: BPID %d recovered (%d bufs)\n", bpid, num_bufs); + return 0; +} +EXPORT_SYMBOL(bman_recovery_cleanup_bpid); + +/* called from bman_driver.c::bman_recovery_exit() only (if exporting, use + * get_raw_affine_portal() and check for the "SLAVE" bit). */ +void bman_recovery_exit_local(void) +{ + struct bman_portal *p = get_affine_portal(); + bm_isr_status_clear(&p->p, 0xffffffff); + bm_isr_uninhibit(&p->p); + put_affine_portal(); +} + +static const u32 zero_thresholds[4] = {0, 0, 0, 0}; + +struct bman_pool *bman_new_pool(const struct bman_pool_params *params) +{ + struct bman_pool *pool = NULL; + u32 bpid; + + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) { + int ret = bm_pool_new(&bpid); + if (ret) + return NULL; + } else { + if (params->bpid >= bman_pool_max) + return NULL; + bpid = params->bpid; + } +#ifdef CONFIG_FSL_BMAN_CONFIG + if (params->flags & BMAN_POOL_FLAG_THRESH) { + int ret = bm_pool_set(bpid, params->thresholds); + if (ret) + goto err; + } +#else + if (params->flags & BMAN_POOL_FLAG_THRESH) + goto err; +#endif + pool = kmalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + goto err; + pool->sp = NULL; + pool->sp_fill = 0; + pool->params = *params; +#ifdef CONFIG_FSL_DPA_CHECKING + atomic_set(&pool->in_use, 1); +#endif + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) + pool->params.bpid = bpid; + if (params->flags & BMAN_POOL_FLAG_STOCKPILE) { + pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ, + GFP_KERNEL); + if (!pool->sp) + goto err; + } + if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) { + struct bman_portal *p = get_affine_portal(); + if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) { + pr_err("Depletion events disabled for bpid %d\n", bpid); + goto err; + } + depletion_link(p, pool); + put_affine_portal(); + } + return pool; +err: +#ifdef CONFIG_FSL_BMAN_CONFIG + if (params->flags & BMAN_POOL_FLAG_THRESH) + bm_pool_set(bpid, zero_thresholds); +#endif + if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) + bm_pool_free(bpid); + if (pool) { + if (pool->sp) + kfree(pool->sp); + kfree(pool); + } + return NULL; +} +EXPORT_SYMBOL(bman_new_pool); + +void bman_free_pool(struct bman_pool *pool) +{ +#ifdef CONFIG_FSL_BMAN_CONFIG + if (pool->params.flags & BMAN_POOL_FLAG_THRESH) + bm_pool_set(pool->params.bpid, zero_thresholds); +#endif + if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) + depletion_unlink(pool); + if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) { + if (pool->sp_fill) + pr_err("Stockpile not flushed, has %u in bpid %u.\n", + pool->sp_fill, pool->params.bpid); + kfree(pool->sp); + pool->sp = NULL; + pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE; + } + if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID) { + /* When releasing a BPID to the dynamic allocator, that pool + * must be *empty*. This code makes it so by dropping everything + * into the bit-bucket. This ignores whether or not it was a + * mistake (or a leak) on the caller's part not to drain the + * pool beforehand. */ + struct bm_buffer bufs[8]; + int ret = 0; + do { + /* Acquire is all-or-nothing, so we drain in 8s, then in + * 1s for the remainder. */ + if (ret != 1) + ret = bman_acquire(pool, bufs, 8, 0); + if (ret < 8) + ret = bman_acquire(pool, bufs, 1, 0); + } while (ret > 0); + bm_pool_free(pool->params.bpid); + } + kfree(pool); +} +EXPORT_SYMBOL(bman_free_pool); + +const struct bman_pool_params *bman_get_params(const struct bman_pool *pool) +{ + return &pool->params; +} +EXPORT_SYMBOL(bman_get_params); + +static noinline void update_rcr_ci(struct bman_portal *p, u8 avail) +{ + if (avail) + bm_rcr_cce_prefetch(&p->p); + else + bm_rcr_cce_update(&p->p); +} + +int bman_rcr_is_empty(void) +{ + __maybe_unused unsigned long irqflags; + struct bman_portal *p = get_affine_portal(); + u8 avail; + + PORTAL_IRQ_LOCK(p, irqflags); + update_rcr_ci(p, 0); + avail = bm_rcr_get_fill(&p->p); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return (avail == 0); +} +EXPORT_SYMBOL(bman_rcr_is_empty); + +static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p, +#ifdef CONFIG_FSL_DPA_CAN_WAIT + __maybe_unused struct bman_pool *pool, +#endif + __maybe_unused unsigned long *irqflags, + __maybe_unused u32 flags) +{ + struct bm_rcr_entry *r; + u8 avail; + + *p = get_affine_portal(); + PORTAL_IRQ_LOCK(*p, (*irqflags)); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { + if ((*p)->rcri_owned) { + PORTAL_IRQ_UNLOCK(*p, (*irqflags)); + put_affine_portal(); + return NULL; + } + (*p)->rcri_owned = pool; + } +#endif + avail = bm_rcr_get_avail(&(*p)->p); + if (avail < 2) + update_rcr_ci(*p, avail); + r = bm_rcr_start(&(*p)->p); + if (unlikely(!r)) { +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) + (*p)->rcri_owned = NULL; +#endif + PORTAL_IRQ_UNLOCK(*p, (*irqflags)); + put_affine_portal(); + } + return r; +} + +#ifdef CONFIG_FSL_DPA_CAN_WAIT +static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p, + struct bman_pool *pool, + __maybe_unused unsigned long *irqflags, + u32 flags) +{ + struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags); + if (!rcr) + bm_rcr_set_ithresh(&(*p)->p, 1); + return rcr; +} + +static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p, + struct bman_pool *pool, + __maybe_unused unsigned long *irqflags, + u32 flags) +{ + struct bm_rcr_entry *rcr; +#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC + pool = NULL; +#endif + if (flags & BMAN_RELEASE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (rcr = __wait_rel_start(p, pool, irqflags, flags))); + else + wait_event(affine_queue, + (rcr = __wait_rel_start(p, pool, irqflags, flags))); + return rcr; +} +#endif + +/* to facilitate better copying of bufs into the ring without either (a) copying + * noise into the first byte (prematurely triggering the command), nor (b) being + * very inefficient by copying small fields using read-modify-write */ +struct overlay_bm_buffer { + u32 first; + u32 second; +}; + +static inline int __bman_release(struct bman_pool *pool, + const struct bm_buffer *bufs, u8 num, u32 flags) +{ + struct bman_portal *p; + struct bm_rcr_entry *r; + struct overlay_bm_buffer *o_dest; + struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0]; + __maybe_unused unsigned long irqflags; + u32 i = num - 1; + +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & BMAN_RELEASE_FLAG_WAIT) + r = wait_rel_start(&p, pool, &irqflags, flags); + else + r = try_rel_start(&p, pool, &irqflags, flags); +#else + r = try_rel_start(&p, &irqflags, flags); +#endif + if (!r) + return -EBUSY; + /* We can copy all but the first entry, as this can trigger badness + * with the valid-bit. Use the overlay to mask the verb byte. */ + o_dest = (struct overlay_bm_buffer *)&r->bufs[0]; + o_dest->first = (o_src->first & 0x0000ffff) | + (((u32)pool->params.bpid << 16) & 0x00ff0000); + o_dest->second = o_src->second; + if (i) + copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); + bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | + (num & BM_RCR_VERB_BUFCOUNT_MASK)); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + /* if we wish to sync we need to set the threshold after h/w sees the + * new ring entry. As we're mixing cache-enabled and cache-inhibited + * accesses, this requires a heavy-weight sync. */ + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { + hwsync(); + bm_rcr_set_ithresh(&p->p, 1); + } +#endif + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && + (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { + if (flags & BMAN_RELEASE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (p->rcri_owned != pool)); + else + wait_event(affine_queue, (p->rcri_owned != pool)); + } +#endif + return 0; +} + +int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, + u32 flags) +{ + int ret = 0; +#ifdef CONFIG_FSL_DPA_CHECKING + if (!num || (num > 8)) + return -EINVAL; + if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE) + return -EINVAL; + if (!atomic_dec_and_test(&pool->in_use)) { + pr_crit("Parallel attempts to enter bman_released() detected."); + panic("only one instance of bman_released/acquired allowed"); + } +#endif + /* Without stockpile, this API is a pass-through to the h/w operation */ + if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) { + ret = __bman_release(pool, bufs, num, flags); + goto release_done; + } + /* This needs some explanation. Adding the given buffers may take the + * stockpile over the threshold, but in fact the stockpile may already + * *be* over the threshold if a previous release-to-hw attempt had + * failed. So we have 3 cases to cover; + * 1. we add to the stockpile and don't hit the threshold, + * 2. we add to the stockpile, hit the threshold and release-to-hw, + * 3. we have to release-to-hw before adding to the stockpile + * (not enough room in the stockpile for case 2). + * Our constraints on thresholds guarantee that in case 3, there must be + * at least 8 bufs already in the stockpile, so all release-to-hw ops + * are for 8 bufs. Despite all this, the API must indicate whether the + * given buffers were taken off the caller's hands, irrespective of + * whether a release-to-hw was attempted. */ + while (num) { + /* Add buffers to stockpile if they fit */ + if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) { + copy_words(pool->sp + pool->sp_fill, bufs, + sizeof(struct bm_buffer) * num); + pool->sp_fill += num; + num = 0; /* --> will return success no matter what */ + } + /* Do hw op if hitting the high-water threshold */ + if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) { + ret = __bman_release(pool, + pool->sp + (pool->sp_fill - 8), 8, flags); + if (ret) { + ret = (num ? ret : 0); + goto release_done; + } + pool->sp_fill -= 8; + } + } +release_done: +#ifdef CONFIG_FSL_DPA_CHECKING + atomic_inc(&pool->in_use); +#endif + return ret; +} +EXPORT_SYMBOL(bman_release); + +static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, + u8 num) +{ + struct bman_portal *p = get_affine_portal(); + struct bm_mc_command *mcc; + struct bm_mc_result *mcr; + __maybe_unused unsigned long irqflags; + int ret; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = bm_mc_start(&p->p); + mcc->acquire.bpid = pool->params.bpid; + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | + (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); + while (!(mcr = bm_mc_result(&p->p))) + cpu_relax(); + ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; + if (bufs) + copy_words(&bufs[0], &mcr->acquire.bufs[0], + num * sizeof(bufs[0])); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (ret != num) + ret = -ENOMEM; + return ret; +} + +int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, + u32 flags) +{ + int ret = 0; +#ifdef CONFIG_FSL_DPA_CHECKING + if (!num || (num > 8)) + return -EINVAL; + if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE) + return -EINVAL; + if (!atomic_dec_and_test(&pool->in_use)) { + pr_crit("Parallel attempts to enter bman_acquire() detected."); + panic("only one instance of bman_released/acquired allowed"); + } +#endif + /* Without stockpile, this API is a pass-through to the h/w operation */ + if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) { + ret = __bman_acquire(pool, bufs, num); + goto acquire_done; + } + /* Only need a h/w op if we'll hit the low-water thresh */ + if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) && + (pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) { + /* refill stockpile with max amount, but if max amount + * isn't available, try amount the user wants */ + int bufcount = 8; + ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount); + if (ret < 0 && bufcount != num) { + bufcount = num; + /* Maybe buffer pool has less than 8 */ + ret = __bman_acquire(pool, pool->sp + pool->sp_fill, + bufcount); + } + if (ret < 0) + goto hw_starved; + DPA_ASSERT(ret == bufcount); + pool->sp_fill += bufcount; + } else { +hw_starved: + if (pool->sp_fill < num) { + ret = -ENOMEM; + goto acquire_done; + } + } + copy_words(bufs, pool->sp + (pool->sp_fill - num), + sizeof(struct bm_buffer) * num); + pool->sp_fill -= num; + ret = num; +acquire_done: +#ifdef CONFIG_FSL_DPA_CHECKING + atomic_inc(&pool->in_use); +#endif + return ret; +} +EXPORT_SYMBOL(bman_acquire); + +int bman_flush_stockpile(struct bman_pool *pool, u32 flags) +{ + u8 num; + int ret; + + while (pool->sp_fill) { + num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill); + ret = __bman_release(pool, pool->sp + (pool->sp_fill - num), + num, flags); + if (ret) + return ret; + pool->sp_fill -= num; + } + return 0; +} +EXPORT_SYMBOL(bman_flush_stockpile); + +int bman_query_pools(struct bm_pool_state *state) +{ + struct bman_portal *p = get_affine_portal(); + struct bm_mc_result *mcr; + __maybe_unused unsigned long irqflags; + + PORTAL_IRQ_LOCK(p, irqflags); + bm_mc_start(&p->p); + bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); + while (!(mcr = bm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY); + *state = mcr->query; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(bman_query_pools); + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_low.h +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_low.h @@ -0,0 +1,494 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_private.h" + +/***************************/ +/* Portal register assists */ +/***************************/ + +/* Cache-inhibited register offsets */ +#define REG_RCR_PI_CINH 0x0000 +#define REG_RCR_CI_CINH 0x0004 +#define REG_RCR_ITR 0x0008 +#define REG_CFG 0x0100 +#define REG_SCN(n) (0x0200 + ((n) << 2)) +#define REG_ISR 0x0e00 + +/* Cache-enabled register offsets */ +#define CL_CR 0x0000 +#define CL_RR0 0x0100 +#define CL_RR1 0x0140 +#define CL_RCR 0x1000 +#define CL_RCR_PI_CENA 0x3000 +#define CL_RCR_CI_CENA 0x3100 + +/* BTW, the drivers (and h/w programming model) already obtain the required + * synchronisation for portal accesses via lwsync(), hwsync(), and + * data-dependencies. Use of barrier()s or other order-preserving primitives + * simply degrade performance. Hence the use of the __raw_*() interfaces, which + * simply ensure that the compiler treats the portal registers as volatile (ie. + * non-coherent). */ + +/* Cache-inhibited register access. */ +#define __bm_in(bm, o) __raw_readl((bm)->addr_ci + (o)) +#define __bm_out(bm, o, val) __raw_writel((val), (bm)->addr_ci + (o)) +#define bm_in(reg) __bm_in(&portal->addr, REG_##reg) +#define bm_out(reg, val) __bm_out(&portal->addr, REG_##reg, val) + +/* Cache-enabled (index) register access */ +#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o)) +#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o)) +#define __bm_cl_in(bm, o) __raw_readl((bm)->addr_ce + (o)) +#define __bm_cl_out(bm, o, val) \ + do { \ + u32 *__tmpclout = (bm)->addr_ce + (o); \ + __raw_writel((val), __tmpclout); \ + dcbf(__tmpclout); \ + } while (0) +#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o)) +#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, CL_##reg##_CENA) +#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, CL_##reg##_CENA) +#define bm_cl_in(reg) __bm_cl_in(&portal->addr, CL_##reg##_CENA) +#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, CL_##reg##_CENA, val) +#define bm_cl_invalidate(reg) __bm_cl_invalidate(&portal->addr, CL_##reg##_CENA) + +/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf + * analysis, look at using the "extra" bit in the ring index registers to avoid + * cyclic issues. */ +static inline u8 cyc_diff(u8 ringsize, u8 first, u8 last) +{ + /* 'first' is included, 'last' is excluded */ + if (first <= last) + return last - first; + return ringsize + last - first; +} + +/* Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + */ +enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ + bm_rcr_pci = 0, /* PI index, cache-inhibited */ + bm_rcr_pce = 1, /* PI index, cache-enabled */ + bm_rcr_pvb = 2 /* valid-bit */ +}; +enum bm_rcr_cmode { /* s/w-only */ + bm_rcr_cci, /* CI index, cache-inhibited */ + bm_rcr_cce /* CI index, cache-enabled */ +}; + + +/* ------------------------- */ +/* --- Portal structures --- */ + +#define BM_RCR_SIZE 8 + +struct bm_rcr { + struct bm_rcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + u32 busy; + enum bm_rcr_pmode pmode; + enum bm_rcr_cmode cmode; +#endif +}; + +struct bm_mc { + struct bm_mc_command *cr; + struct bm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + enum { + /* Can only be _mc_start()ed */ + mc_idle, + /* Can only be _mc_commit()ed or _mc_abort()ed */ + mc_user, + /* Can only be _mc_retry()ed */ + mc_hw + } state; +#endif +}; + +struct bm_addr { + void __iomem *addr_ce; /* cache-enabled */ + void __iomem *addr_ci; /* cache-inhibited */ +}; + +struct bm_portal { + struct bm_addr addr; + struct bm_rcr rcr; + struct bm_mc mc; + struct bm_portal_config config; +} ____cacheline_aligned; + + +/* --------------- */ +/* --- RCR API --- */ + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +#define RCR_CARRYCLEAR(p) \ + (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6))) + +/* Bit-wise logic to convert a ring pointer to a ring index */ +static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e) +{ + return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1); +} + +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void RCR_INC(struct bm_rcr *rcr) +{ + /* NB: this is odd-looking, but experiments show that it generates + * fast code with essentially no branching overheads. We increment to + * the next RCR pointer and handle overflow and 'vbit'. */ + struct bm_rcr_entry *partial = rcr->cursor + 1; + rcr->cursor = RCR_CARRYCLEAR(partial); + if (partial != rcr->cursor) + rcr->vbit ^= BM_RCR_VERB_VBIT; +} + +static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, + __maybe_unused enum bm_rcr_cmode cmode) +{ + /* This use of 'register', as well as all other occurances, is because + * it has been observed to generate much faster code with gcc than is + * otherwise the case. */ + register struct bm_rcr *rcr = &portal->rcr; + u32 cfg; + u8 pi; + + rcr->ring = portal->addr.addr_ce + CL_RCR; + rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); + pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1); + rcr->cursor = rcr->ring + pi; + rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0; + rcr->available = BM_RCR_SIZE - 1 - cyc_diff(BM_RCR_SIZE, rcr->ci, pi); + rcr->ithresh = bm_in(RCR_ITR); +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; + rcr->pmode = pmode; + rcr->cmode = cmode; +#endif + cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */ + bm_out(CFG, cfg); + return 0; +} + +static inline void bm_rcr_finish(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1); + u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); + DPA_ASSERT(!rcr->busy); + if (pi != RCR_PTR2IDX(rcr->cursor)) + pr_crit("losing uncommited RCR entries\n"); + if (ci != rcr->ci) + pr_crit("missing existing RCR completions\n"); + if (rcr->ci != RCR_PTR2IDX(rcr->cursor)) + pr_crit("RCR destroyed unquiesced\n"); +} + +static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(!rcr->busy); + if (!rcr->available) + return NULL; +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 1; +#endif + dcbz_64(rcr->cursor); + return rcr->cursor; +} + +static inline void bm_rcr_abort(struct bm_portal *portal) +{ + __maybe_unused register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->busy); +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; +#endif +} + +static inline struct bm_rcr_entry *bm_rcr_pend_and_next( + struct bm_portal *portal, u8 myverb) +{ + register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->busy); + DPA_ASSERT(rcr->pmode != bm_rcr_pvb); + if (rcr->available == 1) + return NULL; + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; + dcbf_64(rcr->cursor); + RCR_INC(rcr); + rcr->available--; + dcbz_64(rcr->cursor); + return rcr->cursor; +} + +static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb) +{ + register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->busy); + DPA_ASSERT(rcr->pmode == bm_rcr_pci); + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; + RCR_INC(rcr); + rcr->available--; + hwsync(); + bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor)); +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; +#endif +} + +static inline void bm_rcr_pce_prefetch(struct bm_portal *portal) +{ + __maybe_unused register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->pmode == bm_rcr_pce); + bm_cl_invalidate(RCR_PI); + bm_cl_touch_rw(RCR_PI); +} + +static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb) +{ + register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->busy); + DPA_ASSERT(rcr->pmode == bm_rcr_pce); + rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit; + RCR_INC(rcr); + rcr->available--; + lwsync(); + bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor)); +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; +#endif +} + +static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) +{ + register struct bm_rcr *rcr = &portal->rcr; + struct bm_rcr_entry *rcursor; + DPA_ASSERT(rcr->busy); + DPA_ASSERT(rcr->pmode == bm_rcr_pvb); + lwsync(); + rcursor = rcr->cursor; + rcursor->__dont_write_directly__verb = myverb | rcr->vbit; + dcbf_64(rcursor); + RCR_INC(rcr); + rcr->available--; +#ifdef CONFIG_FSL_DPA_CHECKING + rcr->busy = 0; +#endif +} + +static inline u8 bm_rcr_cci_update(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + u8 diff, old_ci = rcr->ci; + DPA_ASSERT(rcr->cmode == bm_rcr_cci); + rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1); + diff = cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); + rcr->available += diff; + return diff; +} + +static inline void bm_rcr_cce_prefetch(struct bm_portal *portal) +{ + __maybe_unused register struct bm_rcr *rcr = &portal->rcr; + DPA_ASSERT(rcr->cmode == bm_rcr_cce); + bm_cl_touch_ro(RCR_CI); +} + +static inline u8 bm_rcr_cce_update(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + u8 diff, old_ci = rcr->ci; + DPA_ASSERT(rcr->cmode == bm_rcr_cce); + rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1); + bm_cl_invalidate(RCR_CI); + diff = cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); + rcr->available += diff; + return diff; +} + +static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + return rcr->ithresh; +} + +static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) +{ + register struct bm_rcr *rcr = &portal->rcr; + rcr->ithresh = ithresh; + bm_out(RCR_ITR, ithresh); +} + +static inline u8 bm_rcr_get_avail(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + return rcr->available; +} + +static inline u8 bm_rcr_get_fill(struct bm_portal *portal) +{ + register struct bm_rcr *rcr = &portal->rcr; + return BM_RCR_SIZE - 1 - rcr->available; +} + + +/* ------------------------------ */ +/* --- Management command API --- */ + +static inline int bm_mc_init(struct bm_portal *portal) +{ + register struct bm_mc *mc = &portal->mc; + mc->cr = portal->addr.addr_ce + CL_CR; + mc->rr = portal->addr.addr_ce + CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & + BM_MCC_VERB_VBIT) ? 0 : 1; + mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + return 0; +} + +static inline void bm_mc_finish(struct bm_portal *portal) +{ + __maybe_unused register struct bm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPA_CHECKING + if (mc->state != mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) +{ + register struct bm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_user; +#endif + dcbz_64(mc->cr); + return mc->cr; +} + +static inline void bm_mc_abort(struct bm_portal *portal) +{ + __maybe_unused register struct bm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_user); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif +} + +static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) +{ + register struct bm_mc *mc = &portal->mc; + struct bm_mc_result *rr = mc->rr + mc->rridx; + DPA_ASSERT(mc->state == mc_user); + lwsync(); + mc->cr->__dont_write_directly__verb = myverb | mc->vbit; + dcbf(mc->cr); + dcbit_ro(rr); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_hw; +#endif +} + +static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal) +{ + register struct bm_mc *mc = &portal->mc; + struct bm_mc_result *rr = mc->rr + mc->rridx; + DPA_ASSERT(mc->state == mc_hw); + /* The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... */ + if (!__raw_readb(&rr->verb)) { + dcbit_ro(rr); + return NULL; + } + mc->rridx ^= 1; + mc->vbit ^= BM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + return rr; +} + + +/* ------------------------------------- */ +/* --- Portal interrupt register API --- */ + +static inline int bm_isr_init(__always_unused struct bm_portal *portal) +{ + return 0; +} + +static inline void bm_isr_finish(__always_unused struct bm_portal *portal) +{ +} + +#define SCN_REG(bpid) REG_SCN((bpid) / 32) +#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31)) +static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid, + int enable) +{ + u32 val; + DPA_ASSERT(bpid < bman_pool_max); + /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */ + val = __bm_in(&portal->addr, SCN_REG(bpid)); + if (enable) + val |= SCN_BIT(bpid); + else + val &= ~SCN_BIT(bpid); + __bm_out(&portal->addr, SCN_REG(bpid), val); +} + +static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n) +{ + return __bm_in(&portal->addr, REG_ISR + (n << 2)); +} + +static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n, + u32 val) +{ + __bm_out(&portal->addr, REG_ISR + (n << 2), val); +} --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_private.h +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_private.h @@ -0,0 +1,149 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "dpa_sys.h" +#include + +/* Revision info (for errata and feature handling) */ +#define BMAN_REV10 0x0100 +#define BMAN_REV20 0x0200 +extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ + +/* + * Global variables of the max portal/pool number this bman version supported + */ +extern u16 bman_pool_max; + +/* used by CCSR and portal interrupt code */ +enum bm_isr_reg { + bm_isr_status = 0, + bm_isr_enable = 1, + bm_isr_disable = 2, + bm_isr_inhibit = 3 +}; + +#define BM_ADDR_CE 0 +#define BM_ADDR_CI 1 +struct bm_portal_config { + /* Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. */ + __iomem void *addr_virt[2]; + struct resource addr_phys[2]; + /* Allow these to be joined in lists */ + struct list_head list; + /* User-visible portal configuration settings */ + struct bman_portal_config public_cfg; +}; + +#ifdef CONFIG_FSL_BMAN_CONFIG +/* Hooks from bman_driver.c to bman_config.c */ +int bman_init_error_int(struct device_node *node); +#endif + +/* Hooks from bman_driver.c in to bman_high.c */ +struct bman_portal *bman_create_affine_portal( + const struct bm_portal_config *config, + int recovery_mode); +struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect); +const struct bm_portal_config *bman_destroy_affine_portal(void); +void bman_recovery_exit_local(void); + +/* Pool logic in the portal driver, during initialisation, needs to know if + * there's access to CCSR or not (if not, it'll cripple the pool allocator). */ +#ifdef CONFIG_FSL_BMAN_CONFIG +int bman_have_ccsr(void); +#else +#define bman_have_ccsr() 0 +#endif + +/* Stockpile build constants. The _LOW value: when bman_acquire() is called and + * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it + * might fail (if the buffer pool is depleted). So this value provides some + * "stagger" in that the bman_acquire() function will only fail if lots of bufs + * are requested at once or if h/w has been tested a couple of times without + * luck. The _HIGH value: when bman_release() is called and the stockpile + * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if + * the release ring is full). So this value provides some "stagger" so that + * ring-access is retried a couple of times prior to the API returning a + * failure. The following *must* be true; + * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8 + * (to avoid thrashing) + * BMAN_STOCKPILE_SZ >= 16 + * (as the release logic expects to either send 8 buffers to hw prior to + * adding the given buffers to the stockpile or add the buffers to the + * stockpile before sending 8 to hw, as the API must be an all-or-nothing + * success/fail.) + */ +#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */ +#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */ +#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */ + +/*************************************************/ +/* BMan s/w corenet portal, low-level i/face */ +/*************************************************/ + +/* Used by all portal interrupt registers except 'inhibit'. NB, some of these + * definitions are exported for use by the bman_irqsource_***() APIs, so are + * commented-out here. */ +#if 0 +#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */ +#define BM_PIRQ_BSCN 0x00000001 /* Buffer depletion State Change */ +#endif +/* This mask contains all the "irqsource" bits visible to API users */ +#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN) + +/* These are bm__(). So for example, bm_disable_write() means "write + * the disable register" rather than "disable the ability to write". */ +#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status) +#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m) +#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable) +#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v) +#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable) +#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v) +#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1) +#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0) + +/* Allocate/release an unreserved buffer pool id */ +int bm_pool_new(u32 *bpid); +void bm_pool_free(u32 bpid); + +#ifdef CONFIG_FSL_BMAN_CONFIG +/* Set depletion thresholds associated with a buffer pool. Requires that the + * operating system have access to Bman CCSR (ie. compiled in support and + * run-time access courtesy of the device-tree). */ +int bm_pool_set(u32 bpid, const u32 *thresholds); +#define BM_POOL_THRESH_SW_ENTER 0 +#define BM_POOL_THRESH_SW_EXIT 1 +#define BM_POOL_THRESH_HW_ENTER 2 +#define BM_POOL_THRESH_HW_EXIT 3 + +#endif /* CONFIG_FSL_BMAN_CONFIG */ + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_test.c +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_test.c @@ -0,0 +1,57 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Bman testing"); + +static int test_init(void) +{ +#ifdef CONFIG_FSL_BMAN_TEST_HIGH + int loop = 1; + while (loop--) + bman_test_high(); +#endif +#ifdef CONFIG_FSL_BMAN_TEST_THRESH + bman_test_thresh(); +#endif + return 0; +} + +static void test_exit(void) +{ +} + +module_init(test_init); +module_exit(test_exit); + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_test.h +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_test.h @@ -0,0 +1,92 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +void bman_test_high(void); +void bman_test_thresh(void); + +static inline void __hexdump(unsigned long start, unsigned long end, + unsigned long p, size_t sz, unsigned char *c) +{ + while (start < end) { + unsigned int pos = 0; + char buf[64]; + int nl = 0; + pos += sprintf(buf + pos, "%08lx: ", start); + do { + if ((start < p) || (start >= (p + sz))) + pos += sprintf(buf + pos, ".."); + else + pos += sprintf(buf + pos, "%02x", *(c++)); + if (!(++start & 15)) { + buf[pos++] = '\n'; + nl = 1; + } else { + nl = 0; + if(!(start & 1)) + buf[pos++] = ' '; + if(!(start & 3)) + buf[pos++] = ' '; + } + } while (start & 15); + if (!nl) + buf[pos++] = '\n'; + buf[pos] = '\0'; + pr_info("%s", buf); + } +} +static inline void hexdump(void *ptr, size_t sz) +{ + unsigned long p = (unsigned long)ptr; + unsigned long start = p & ~(unsigned long)15; + unsigned long end = (p + sz + 15) & ~(unsigned long)15; + unsigned char *c = ptr; + __hexdump(start, end, p, sz, c); +} +static inline void hexdump_by_cl(void *ptr, size_t sz) +{ + unsigned long p = (unsigned long)ptr; + unsigned long start = p & ~(unsigned long)63; + unsigned long end = (p + sz + 63) & ~(unsigned long)63; + unsigned char *c = ptr; + __hexdump(start, end, p, sz, c); +} + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_test_high.c +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_test_high.c @@ -0,0 +1,181 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" +#include "bman_private.h" + +/*************/ +/* constants */ +/*************/ + +#define PORTAL_OPAQUE (void *)0xf00dbeef +#define POOL_OPAQUE (void *)0xdeadabba +#define NUM_BUFS 93 +#define LOOPS 3 +#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU + +/***************/ +/* global vars */ +/***************/ + +static struct bman_pool *pool; +static int depleted; +static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned; +static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned; +static int bufs_received; + +/* Predeclare the callback so we can instantiate pool parameters */ +static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int); + +/**********************/ +/* internal functions */ +/**********************/ + +static void bufs_init(void) +{ + int i; + for (i = 0; i < NUM_BUFS; i++) + bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i); + bufs_received = 0; +} + +static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) +{ + if (bman_ip_rev == BMAN_REV20) { + /* On SoCs with Bman revison 2.0, Bman only respects the 40 + * LS-bits of buffer addresses, masking off the upper 8-bits on + * release commands. The API provides for 48-bit addresses + * because some SoCs support all 48-bits. When generating + * garbage addresses for testing, we either need to zero the + * upper 8-bits when releasing to Bman (otherwise we'll be + * disappointed when the buffers we acquire back from Bman + * don't match), or we need to mask the upper 8-bits off when + * comparing. We do the latter. + */ + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) + < (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + return -1; + if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) + > (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) + return 1; + } else { + if (bm_buffer_get64(a) < bm_buffer_get64(b)) + return -1; + if (bm_buffer_get64(a) > bm_buffer_get64(b)) + return 1; + } + + return 0; +} + +static void bufs_confirm(void) +{ + int i, j; + for (i = 0; i < NUM_BUFS; i++) { + int matches = 0; + for (j = 0; j < NUM_BUFS; j++) + if (!bufs_cmp(&bufs_in[i], &bufs_out[j])) + matches++; + BUG_ON(matches != 1); + } +} + +/********/ +/* test */ +/********/ + +static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool, + void *pool_ctx, int __depleted) +{ + BUG_ON(__pool != pool); + BUG_ON(pool_ctx != POOL_OPAQUE); + depleted = __depleted; +} + +void bman_test_high(void) +{ + struct bman_pool_params pparams = { + .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID, + .cb = depletion_cb, + .cb_ctx = POOL_OPAQUE, + }; + int i, loops = LOOPS; + + bufs_init(); + + pr_info("BMAN: --- starting high-level test ---\n"); + + pool = bman_new_pool(&pparams); + BUG_ON(!pool); + + /*******************/ + /* Release buffers */ + /*******************/ +do_loop: + i = 0; + while (i < NUM_BUFS) { + u32 flags = BMAN_RELEASE_FLAG_WAIT; + int num = 8; + if ((i + num) > NUM_BUFS) + num = NUM_BUFS - i; + if ((i + num) == NUM_BUFS) + flags |= BMAN_RELEASE_FLAG_WAIT_SYNC; + if (bman_release(pool, bufs_in + i, num, flags)) + panic("bman_release() failed\n"); + i += num; + } + + /*******************/ + /* Acquire buffers */ + /*******************/ + while (i > 0) { + int tmp, num = 8; + if (num > i) + num = i; + tmp = bman_acquire(pool, bufs_out + i - num, num, 0); + BUG_ON(tmp != num); + i -= num; + } + i = bman_acquire(pool, NULL, 1, 0); + BUG_ON(i > 0); + + bufs_confirm(); + + if (--loops) + goto do_loop; + + /************/ + /* Clean up */ + /************/ + bman_free_pool(pool); + pr_info("BMAN: --- finished high-level test ---\n"); +} + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/bman_test_thresh.c +++ linux-3.13.0/drivers/staging/fsl_qbman/bman_test_thresh.c @@ -0,0 +1,197 @@ +/* Copyright 2010-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_test.h" + +/* Test constants */ +#define TEST_NUMBUFS 129728 +#define TEST_EXIT 129536 +#define TEST_ENTRY 129024 + +struct affine_test_data { + struct task_struct *t; + int cpu; + int expect_affinity; + int drain; + int num_enter; + int num_exit; + struct list_head node; + struct completion wakethread; + struct completion wakeparent; +}; + +static void cb_depletion(struct bman_portal *portal, + struct bman_pool *pool, + void *opaque, + int depleted) +{ + struct affine_test_data *data = opaque; + int c = smp_processor_id(); + pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n", + bman_get_params(pool)->bpid, depleted, c, data->cpu); + /* We should be executing on the CPU of the thread that owns the pool if + * and that CPU has an affine portal (ie. it isn't slaved). */ + BUG_ON((c != data->cpu) && data->expect_affinity); + BUG_ON((c == data->cpu) && !data->expect_affinity); + if (depleted) + data->num_enter++; + else + data->num_exit++; +} + +/* Params used to set up a pool, this also dynamically allocates a BPID */ +struct bman_pool_params params_nocb = { + .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH, + .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 } +}; + +/* Params used to set up each cpu's pool with callbacks enabled */ +struct bman_pool_params params_cb = { + .bpid = 0, /* will be replaced to match pool_nocb */ + .flags = BMAN_POOL_FLAG_DEPLETION, + .cb = cb_depletion +}; + +static struct bman_pool *pool_nocb; +static LIST_HEAD(threads); + +static int affine_test(void *__data) +{ + struct bman_pool *pool; + struct affine_test_data *data = __data; + struct bman_pool_params my_params = params_cb; + + pr_info("thread %d: starting\n", data->cpu); + /* create the pool */ + my_params.cb_ctx = data; + pool = bman_new_pool(&my_params); + BUG_ON(!pool); + complete(&data->wakeparent); + wait_for_completion(&data->wakethread); + init_completion(&data->wakethread); + + /* if we're the drainer, we get signalled for that */ + if (data->drain) { + struct bm_buffer buf; + int ret; + pr_info("thread %d: draining...\n", data->cpu); + do { + ret = bman_acquire(pool, &buf, 1, 0); + } while (ret > 0); + pr_info("thread %d: draining done.\n", data->cpu); + complete(&data->wakeparent); + wait_for_completion(&data->wakethread); + init_completion(&data->wakethread); + } + + /* cleanup */ + bman_free_pool(pool); + while (!kthread_should_stop()) + cpu_relax(); + pr_info("thread %d: exiting\n", data->cpu); + return 0; +} + +static struct affine_test_data *start_affine_test(int cpu, int drain) +{ + struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL); + + if (!data) + return NULL; + data->cpu = cpu; + data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus()); + data->drain = drain; + data->num_enter = 0; + data->num_exit = 0; + init_completion(&data->wakethread); + init_completion(&data->wakeparent); + list_add_tail(&data->node, &threads); + data->t = kthread_create(affine_test, data, "threshtest%d", cpu); + BUG_ON(IS_ERR(data->t)); + kthread_bind(data->t, cpu); + wake_up_process(data->t); + return data; +} + +void bman_test_thresh(void) +{ + int loop = TEST_NUMBUFS; + int ret, num_cpus = 0; + struct affine_test_data *data, *drainer = NULL; + + pr_info("bman_test_thresh: start\n"); + + /* allocate a BPID and seed it */ + pool_nocb = bman_new_pool(¶ms_nocb); + BUG_ON(!pool_nocb); + while (loop--) { + struct bm_buffer buf; + bm_buffer_set64(&buf, 0x0badbeef + loop); + ret = bman_release(pool_nocb, &buf, 1, + BMAN_RELEASE_FLAG_WAIT); + BUG_ON(ret); + } + while (!bman_rcr_is_empty()) + cpu_relax(); + pr_info("bman_test_thresh: buffers are in\n"); + + /* create threads and wait for them to create pools */ + params_cb.bpid = bman_get_params(pool_nocb)->bpid; + for_each_cpu(loop, cpu_online_mask) { + data = start_affine_test(loop, drainer ? 0 : 1); + BUG_ON(!data); + if (!drainer) + drainer = data; + num_cpus++; + wait_for_completion(&data->wakeparent); + } + + /* signal the drainer to start draining */ + complete(&drainer->wakethread); + wait_for_completion(&drainer->wakeparent); + init_completion(&drainer->wakeparent); + + /* tear down */ + list_for_each_entry_safe(data, drainer, &threads, node) { + complete(&data->wakethread); + ret = kthread_stop(data->t); + BUG_ON(ret); + list_del(&data->node); + /* check that we get the expected callbacks (and no others) */ + BUG_ON(data->num_enter != 1); + BUG_ON(data->num_exit != 0); + kfree(data); + } + bman_free_pool(pool_nocb); + + pr_info("bman_test_thresh: done\n"); +} + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/dpa_sys.h +++ linux-3.13.0/drivers/staging/fsl_qbman/dpa_sys.h @@ -0,0 +1,324 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DPA_SYS_H +#define DPA_SYS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* When copying aligned words or shorts, try to avoid memcpy() */ +#define CONFIG_TRY_BETTER_MEMCPY + +/* This takes a "phandle" and dereferences to the cpu device-tree node, + * returning the cpu index. Returns negative error codes. */ +static inline int check_cpu_phandle(phandle ph) +{ + const u32 *cpu_val; + struct device_node *tmp_node = of_find_node_by_phandle(ph); + int cpu, ret; + + if (!tmp_node) { + pr_err("Bad 'cpu-handle'\n"); + return -EINVAL; + } + cpu_val = of_get_property(tmp_node, "reg", &ret); + if (!cpu_val || (ret != sizeof(*cpu_val))) { + pr_err("Can't get %s property 'reg'\n", tmp_node->full_name); + return -ENODEV; + } + for_each_present_cpu(cpu) { + if (*cpu_val == get_hard_smp_processor_id(cpu)) + goto done; + } + pr_err("Invalid cpu index %d in %s\n", *cpu_val, tmp_node->full_name); + return -ENODEV; +done: + of_node_put(tmp_node); + return cpu; +} + +/* Handle portals destined for USDPAA (user-space). + * + * The UIO layer is mostly Qman/Bman-agnostic, however the rest of the driver is + * separated along interface and implementation lines. So each Qman/Bman driver + * instantiates a dpa_uio_class and links dpa_uio_portal objects into it. If + * the dpa_uio driver module is built, it queries these two classes and creates + * the UIO devices accordingly. + */ +struct dpa_uio_class { + struct list_head list; + const char *dev_prefix; +}; +struct dpa_uio_portal { + enum { + dpa_uio_portal_bman, + dpa_uio_portal_qman, + } type; + union { + const struct bm_portal_config *bm_cfg; + const struct qm_portal_config *qm_cfg; + }; + struct list_head node; +}; +const struct dpa_uio_class *dpa_uio_bman(void); +const struct dpa_uio_class *dpa_uio_qman(void); + +/* These stubs are re-mapped to hypervisor+failover features in kernel trees + * that contain that support. */ +static inline int fsl_dpa_should_recover(void) +{ + return 0; +} +static inline int pamu_enable_liodn(struct device_node *n, int i) +{ + return 0; +} +/***********************/ +/* Misc inline assists */ +/***********************/ + +/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler + * barriers and that dcb*() won't fall victim to compiler or execution + * reordering with respect to other code/instructions that manipulate the same + * cacheline. */ +#define hwsync() \ + do { \ + __asm__ __volatile__ ("sync" : : : "memory"); \ + } while(0) +#define lwsync() \ + do { \ + __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory"); \ + } while(0) +#define dcbf(p) \ + do { \ + __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory"); \ + } while(0) +#define dcbt_ro(p) \ + do { \ + __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p)); \ + } while(0) +#define dcbt_rw(p) \ + do { \ + __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p)); \ + } while(0) +#define dcbi(p) dcbf(p) +#ifdef CONFIG_PPC_E500MC +#define dcbzl(p) \ + do { \ + __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p)); \ + } while (0) +#define dcbz_64(p) \ + do { \ + dcbzl(p); \ + } while (0) +#define dcbf_64(p) \ + do { \ + dcbf(p); \ + } while (0) +/* Commonly used combo */ +#define dcbit_ro(p) \ + do { \ + dcbi(p); \ + dcbt_ro(p); \ + } while (0) +#else +#define dcbz(p) \ + do { \ + __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p)); \ + } while (0) +#define dcbz_64(p) \ + do { \ + dcbz((u32)p + 32); \ + dcbz(p); \ + } while (0) +#define dcbf_64(p) \ + do { \ + dcbf((u32)p + 32); \ + dcbf(p); \ + } while (0) +/* Commonly used combo */ +#define dcbit_ro(p) \ + do { \ + dcbi(p); \ + dcbi((u32)p + 32); \ + dcbt_ro(p); \ + dcbt_ro((u32)p + 32); \ + } while (0) +#endif /* CONFIG_PPC_E500MC */ + +static inline u64 mfatb(void) +{ + u32 hi, lo, chk; + do { + hi = mfspr(SPRN_ATBU); + lo = mfspr(SPRN_ATBL); + chk = mfspr(SPRN_ATBU); + } while (unlikely(hi != chk)); + return ((u64)hi << 32) | (u64)lo; +} + +#ifdef CONFIG_FSL_DPA_CHECKING +#define DPA_ASSERT(x) \ + do { \ + if (!(x)) { \ + pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \ + __stringify_1(x)); \ + dump_stack(); \ + panic("assertion failure"); \ + } \ + } while(0) +#else +#define DPA_ASSERT(x) +#endif + +/* memcpy() stuff - when you know alignments in advance */ +#ifdef CONFIG_TRY_BETTER_MEMCPY +static inline void copy_words(void *dest, const void *src, size_t sz) +{ + u32 *__dest = dest; + const u32 *__src = src; + size_t __sz = sz >> 2; + BUG_ON((unsigned long)dest & 0x3); + BUG_ON((unsigned long)src & 0x3); + BUG_ON(sz & 0x3); + while (__sz--) + *(__dest++) = *(__src++); +} +static inline void copy_shorts(void *dest, const void *src, size_t sz) +{ + u16 *__dest = dest; + const u16 *__src = src; + size_t __sz = sz >> 1; + BUG_ON((unsigned long)dest & 0x1); + BUG_ON((unsigned long)src & 0x1); + BUG_ON(sz & 0x1); + while (__sz--) + *(__dest++) = *(__src++); +} +static inline void copy_bytes(void *dest, const void *src, size_t sz) +{ + u8 *__dest = dest; + const u8 *__src = src; + while (sz--) + *(__dest++) = *(__src++); +} +#else +#define copy_words memcpy +#define copy_shorts memcpy +#define copy_bytes memcpy +#endif + +/************/ +/* RB-trees */ +/************/ + +/* We encapsulate RB-trees so that its easier to use non-linux forms in + * non-linux systems. This also encapsulates the extra plumbing that linux code + * usually provides when using RB-trees. This encapsulation assumes that the + * data type held by the tree is u32. */ + +struct dpa_rbtree { + struct rb_root root; +}; +#define DPA_RBTREE { .root = RB_ROOT } + +static inline void dpa_rbtree_init(struct dpa_rbtree *tree) +{ + tree->root = RB_ROOT; +} + +#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \ +static inline int name##_push(struct dpa_rbtree *tree, type *obj) \ +{ \ + struct rb_node *parent = NULL, **p = &tree->root.rb_node; \ + while (*p) { \ + u32 item; \ + parent = *p; \ + item = rb_entry(parent, type, node_field)->val_field; \ + if (obj->val_field < item) \ + p = &parent->rb_left; \ + else if (obj->val_field > item) \ + p = &parent->rb_right; \ + else \ + return -EBUSY; \ + } \ + rb_link_node(&obj->node_field, parent, p); \ + rb_insert_color(&obj->node_field, &tree->root); \ + return 0; \ +} \ +static inline void name##_del(struct dpa_rbtree *tree, type *obj) \ +{ \ + rb_erase(&obj->node_field, &tree->root); \ +} \ +static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \ +{ \ + type *ret; \ + struct rb_node *p = tree->root.rb_node; \ + while (p) { \ + ret = rb_entry(p, type, node_field); \ + if (val < ret->val_field) \ + p = p->rb_left; \ + else if (val > ret->val_field) \ + p = p->rb_right; \ + else \ + return ret; \ + } \ + return NULL; \ +} + +#endif /* DPA_SYS_H */ + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/dpa_uio.c +++ linux-3.13.0/drivers/staging/fsl_qbman/dpa_uio.c @@ -0,0 +1,231 @@ +/* Copyright 2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "bman_private.h" +#include "qman_private.h" + +static const char dpa_uio_version[] = "USDPAA UIO portal driver v0.2"; + +static LIST_HEAD(uio_portal_list); + +struct dpa_uio_info { + atomic_t ref; /* exclusive, only one open() at a time */ + struct uio_info uio; + void *addr_ci; + char name[16]; /* big enough for "qman-uio-xx" */ + struct platform_device *pdev; + struct list_head node; +}; + +static int dpa_uio_open(struct uio_info *info, struct inode *inode) +{ + struct dpa_uio_info *i = container_of(info, struct dpa_uio_info, uio); + if (!atomic_dec_and_test(&i->ref)) { + atomic_inc(&i->ref); + return -EBUSY; + } + return 0; +} + +static int dpa_uio_release(struct uio_info *info, struct inode *inode) +{ + struct dpa_uio_info *i = container_of(info, struct dpa_uio_info, uio); + atomic_inc(&i->ref); + return 0; +} + +static int dpa_uio_mmap(struct uio_info *info, struct vm_area_struct *vma) +{ + struct uio_mem *mem; + struct dpa_uio_info *i = container_of(info, struct dpa_uio_info, uio); + + if (vma->vm_pgoff == 0) { + /* CENA */ + mem = &i->uio.mem[0]; + vma->vm_page_prot &= + ~(_PAGE_GUARDED | _PAGE_NO_CACHE | _PAGE_COHERENT); + } else if (vma->vm_pgoff == 1) { + /* CINH */ + mem = &i->uio.mem[1]; + vma->vm_page_prot |= _PAGE_GUARDED | _PAGE_NO_CACHE; + } else { + pr_err("%s: unknown mmap offset %d, rejecting\n", + i->name, (int)vma->vm_pgoff); + return -EINVAL; + } + if ((vma->vm_end - vma->vm_start) != mem->size) { + pr_err("%s: invalid mmap() size %d, expect %d\n", + i->name, (int)(vma->vm_end - vma->vm_start), + (int)mem->size); + return -EINVAL; + } + /* FIXME: UIO appears not to support sizeof(phys_addr_t) > sizeof(void*) + * as mem->addr is 32-bit. Also, it would have been more natural (and in + * keeping with UIO's design intent) to have used the UIO_MEM_PHYS type + * for our two memory regions, and to rely on UIO's own mmap() handler + * (by not declaring our own). Unfortunately UIO does not allow any + * specification of pgprots and assumes cache-inhibited mappings for + * anything physical (see drivers/uio/uio.c, eg. uio_mmap_physical()). + * So UIO could use a couple of improvements as it is not saving us much + * on the kernel nor the user side. The first would be to use PFN + * instead of a raw base address in the uio_mem structs (same reason as + * everywhere else, this covers 4096 times as much address space, and + * why waste lower bits given it has to be page-aligned anyway?). The + * second is to add a pgprot field to uio_mem to be used with _PHYS + * mappings. (Or use a new _PHYS_PGPROT type, for backwards + * compatibility?) */ + /* Normally, we'd ">>PAGE_SHIFT" the mem->addr value here, but due to + * the 36-bit issue, it is already stored as a PFN. */ + return io_remap_pfn_range(vma, vma->vm_start, mem->addr, mem->size, + vma->vm_page_prot); +} + +static irqreturn_t dpa_uio_irq_handler(int irq, struct uio_info *info) +{ + struct dpa_uio_info *i = container_of(info, struct dpa_uio_info, uio); + /* This is the only code outside the regular portal driver that + * manipulates any portal register, so rather than breaking that + * encapsulation I am simply hard-coding the offset to the inhibit + * register here. */ + out_be32(i->addr_ci + 0xe0c, ~(u32)0); + return IRQ_HANDLED; +} + +static void __init dpa_uio_portal_init(struct dpa_uio_portal *p, + const struct dpa_uio_class *c) +{ + struct dpa_uio_info *info; + const struct resource *res; + u32 index; + int irq, ret; + + /* allocate 'info' */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return; + atomic_set(&info->ref, 1); + if (p->type == dpa_uio_portal_bman) { + res = &p->bm_cfg->addr_phys[0]; + index = p->bm_cfg->public_cfg.index; + irq = p->bm_cfg->public_cfg.irq; + } else { + res = &p->qm_cfg->addr_phys[0]; + index = p->qm_cfg->public_cfg.index; + irq = p->qm_cfg->public_cfg.irq; + } + /* We need to map the cache-inhibited region in the kernel for + * interrupt-handling purposes. */ + info->addr_ci = ioremap_prot(res[BM_ADDR_CI].start, + resource_size(&res[BM_ADDR_CI]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + /* Name the UIO device according to the cell-index. It's supposed to be + * unique for each device class (Qman/Bman), and is also a convenient + * way for user-space to find the UIO device that corresponds to a given + * portal device-tree node. */ + sprintf(info->name, "%s%x", c->dev_prefix, index); + info->pdev = platform_device_alloc(info->name, -1); + if (!info->pdev) { + iounmap(info->addr_ci); + kfree(info); + pr_err("dpa_uio_portal: platform_device_alloc() failed\n"); + return; + } + ret = platform_device_add(info->pdev); + if (ret) { + platform_device_put(info->pdev); + iounmap(info->addr_ci); + kfree(info); + pr_err("dpa_uio_portal: platform_device_add() failed\n"); + return; + } + info->uio.name = info->name; + info->uio.version = dpa_uio_version; + /* Work around the 36-bit UIO issue by bit-shifting the addresses */ + info->uio.mem[BM_ADDR_CE].name = "cena"; + info->uio.mem[BM_ADDR_CE].addr = res[BM_ADDR_CE].start >> PAGE_SHIFT; + info->uio.mem[BM_ADDR_CE].size = resource_size(&res[BM_ADDR_CE]); + info->uio.mem[BM_ADDR_CI].name = "cinh"; + info->uio.mem[BM_ADDR_CI].addr = res[BM_ADDR_CI].start >> PAGE_SHIFT; + info->uio.mem[BM_ADDR_CI].size = resource_size(&res[BM_ADDR_CI]); + info->uio.irq = irq; + info->uio.handler = dpa_uio_irq_handler; + info->uio.mmap = dpa_uio_mmap; + info->uio.open = dpa_uio_open; + info->uio.release = dpa_uio_release; + ret = uio_register_device(&info->pdev->dev, &info->uio); + if (ret) { + platform_device_del(info->pdev); + platform_device_put(info->pdev); + iounmap(info->addr_ci); + kfree(info); + pr_err("dpa_uio_portal: UIO registration failed\n"); + return; + } + list_add_tail(&info->node, &uio_portal_list); + pr_info("USDPAA portal initialised, %s\n", info->name); +} + +static int __init dpa_uio_init(void) +{ + const struct dpa_uio_class *classes[3], **c = classes; + classes[0] = dpa_uio_bman(); + classes[1] = dpa_uio_qman(); + classes[2] = NULL; + while (*c) { + struct dpa_uio_portal *p; + list_for_each_entry(p, &(*c)->list, node) + dpa_uio_portal_init(p, *c); + c++; + } + pr_info("USDPAA portal layer loaded\n"); + return 0; +} + +static void __exit dpa_uio_exit(void) +{ + struct dpa_uio_info *info, *tmp; + list_for_each_entry_safe(info, tmp, &uio_portal_list, node) { + list_del(&info->node); + uio_unregister_device(&info->uio); + platform_device_del(info->pdev); + platform_device_put(info->pdev); + iounmap(info->addr_ci); + pr_info("USDPAA portal removed, %s\n", info->name); + kfree(info); + } + pr_info("USDPAA portal layer unloaded\n"); +} + + +module_init(dpa_uio_init) +module_exit(dpa_uio_exit) +MODULE_LICENSE("GPL"); + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_config.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_config.c @@ -0,0 +1,1013 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CONFIG_SMP +#include /* get_hard_smp_processor_id() */ +#endif + +#include +#include + +#include "qman_private.h" + +/* Last updated for v00.800 of the BG */ + +/* Register offsets */ +#define REG_QCSP_PID_CFG(n) (0x0000 + ((n) * 0x10)) +#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10)) +#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10)) +#define REG_DD_CFG 0x0200 +#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10)) +#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10)) +#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10)) +#define REG_PFDR_FPC 0x0400 +#define REG_PFDR_FP_HEAD 0x0404 +#define REG_PFDR_FP_TAIL 0x0408 +#define REG_PFDR_FP_LWIT 0x0410 +#define REG_PFDR_CFG 0x0414 +#define REG_SFDR_CFG 0x0500 +#define REG_SFDR_IN_USE 0x0504 +#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04)) +#define REG_WQ_DEF_ENC_WQID 0x0630 +#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04)) +#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04)) +#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04)) +#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04)) +#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */ +#define REG_CM_CFG 0x0800 +#define REG_ECSR 0x0a00 +#define REG_ECIR 0x0a04 +#define REG_EADR 0x0a08 +#define REG_EDATA(n) (0x0a10 + ((n) * 0x04)) +#define REG_SBEC(n) (0x0a80 + ((n) * 0x04)) +#define REG_MCR 0x0b00 +#define REG_MCP(n) (0x0b04 + ((n) * 0x04)) +#define REG_HID_CFG 0x0bf0 +#define REG_IDLE_STAT 0x0bf4 +#define REG_IP_REV_1 0x0bf8 +#define REG_IP_REV_2 0x0bfc +#define REG_FQD_BARE 0x0c00 +#define REG_PFDR_BARE 0x0c20 +#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */ +#define REG_QCSP_BARE 0x0c80 +#define REG_QCSP_BAR 0x0c84 +#define REG_CI_SCHED_CFG 0x0d00 +#define REG_SRCIDR 0x0d04 +#define REG_LIODNR 0x0d08 +#define REG_CI_RLM_AVG 0x0d14 +#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */ + +/* Assists for QMAN_MCR */ +#define MCR_INIT_PFDR 0x01000000 +#define MCR_get_rslt(v) (u8)((v) >> 24) +#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0)) +#define MCR_rslt_ok(r) (rslt == 0xf0) +#define MCR_rslt_eaccess(r) (rslt == 0xf8) +#define MCR_rslt_inval(r) (rslt == 0xff) + +struct qman; + +/* Follows WQ_CS_CFG0-5 */ +enum qm_wq_class { + qm_wq_portal = 0, + qm_wq_pool = 1, + qm_wq_fman0 = 2, + qm_wq_fman1 = 3, + qm_wq_caam = 4, + qm_wq_pme = 5, + qm_wq_first = qm_wq_portal, + qm_wq_last = qm_wq_pme +}; + +/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */ +enum qm_memory { + qm_memory_fqd, + qm_memory_pfdr +}; + +/* Used by all error interrupt registers except 'inhibit' */ +#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */ +#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */ +#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */ +#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */ +#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */ +#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */ +#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */ +#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */ +#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */ +#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */ +#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */ +#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */ +#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */ +#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */ +#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */ +#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */ + +/* QMAN_ECIR valid error bit */ +#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \ + QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \ + QM_EIRQ_IDDI | QM_EIRQ_ICVI) +#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \ + QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI) + +union qman_ecir { + u32 ecir_raw; + struct { + u32 __reserved:2; + u32 portal_type:1; + u32 portal_num:5; + u32 fqid:24; + } __packed info; +}; + +union qman_eadr { + u32 eadr_raw; + struct { + u32 __reserved1:4; + u32 memid:4; + u32 __reserved2:12; + u32 eadr:12; + } __packed info; +}; + +struct qman_hwerr_txt { + u32 mask; + const char *txt; +}; + +#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b } + +static const struct qman_hwerr_txt qman_hwerr_txts[] = { + QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"), + QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"), + QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"), + QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"), + QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"), + QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"), + QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"), + QMAN_HWE_TXT(ICVI, "Invalid Command Verb"), + QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"), + QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"), + QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"), + QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"), + QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"), + QMAN_HWE_TXT(IESI, "Invalid Enqueue State"), + QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"), + QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue") +}; +#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt)) + +struct qman_error_info_mdata { + u16 addr_mask; + u16 bits; + const char *txt; +}; + +#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c} +static const struct qman_error_info_mdata error_mdata[] = { + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"), + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"), + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"), + QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"), + QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"), + QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"), + QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"), + QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"), + QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"), + QMAN_ERR_MDATA(0x01FF, 256, "SW portal ring memory"), +}; +#define QMAN_ERR_MDATA_COUNT \ + (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata)) + +/* Add this in Kconfig */ +#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI) + +/** + * qm_err_isr__ - Manipulate global interrupt registers + * @v: for accessors that write values, this is the 32-bit value + * + * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All + * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of + * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means + * "write the enable register" rather than "enable the write register"! + */ +#define qm_err_isr_status_read(qm) __qm_err_isr_read(qm, qm_isr_status) +#define qm_err_isr_status_clear(qm, m) __qm_err_isr_write(qm, qm_isr_status,m) +#define qm_err_isr_enable_read(qm) __qm_err_isr_read(qm, qm_isr_enable) +#define qm_err_isr_enable_write(qm, v) __qm_err_isr_write(qm, qm_isr_enable,v) +#define qm_err_isr_disable_read(qm) __qm_err_isr_read(qm, qm_isr_disable) +#define qm_err_isr_disable_write(qm, v) __qm_err_isr_write(qm, qm_isr_disable,v) +#define qm_err_isr_inhibit(qm) __qm_err_isr_write(qm, qm_isr_inhibit,1) +#define qm_err_isr_uninhibit(qm) __qm_err_isr_write(qm, qm_isr_inhibit,0) + +/* + * TODO: unimplemented registers + * + * Keeping a list here of Qman registers I have not yet covered; + * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR, + * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG, + * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12 + */ + +/* Encapsulate "struct qman *" as a cast of the register space address. */ + +static struct qman *qm_create(void *regs) +{ + return (struct qman *)regs; +} + +static inline u32 __qm_in(struct qman *qm, u32 offset) +{ + return in_be32((void *)qm + offset); +} +static inline void __qm_out(struct qman *qm, u32 offset, u32 val) +{ + out_be32((void *)qm + offset, val); +} +#define qm_in(reg) __qm_in(qm, REG_##reg) +#define qm_out(reg, val) __qm_out(qm, REG_##reg, val) + +static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n) +{ + return __qm_in(qm, REG_ERR_ISR + (n << 2)); +} + +static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val) +{ + __qm_out(qm, REG_ERR_ISR + (n << 2), val); +} + +#if 0 + +static void qm_set_portal(struct qman *qm, u8 swportalID, + u16 ec_tp_cfg, u16 ecd_tp_cfg) +{ + qm_out(QCSP_DD_CFG(swportalID), + ((ec_tp_cfg & 0x1ff) << 16) | (ecd_tp_cfg & 0x1ff)); +} + +static void qm_set_ddebug(struct qman *qm, u8 mdd, u8 m_cfg) +{ + qm_out(DD_CFG, ((mdd & 0x3) << 4) | (m_cfg & 0xf)); +} + +static void qm_set_dc_ddebug(struct qman *qm, enum qm_dc_portal portal, u16 ecd_tp_cfg) +{ + qm_out(DCP_DD_CFG(portal), ecd_tp_cfg & 0x1ff); +} + +static u32 qm_get_pfdr_free_pool_count(struct qman *qm) +{ + return qm_in(PFDR_FPC); +} + +static void qm_get_pfdr_free_pool(struct qman *qm, u32 *head, u32 *tail) +{ + *head = qm_in(PFDR_FP_HEAD); + *tail = qm_in(PFDR_FP_TAIL); +} + +static void qm_set_default_wq(struct qman *qm, u16 wqid) +{ + qm_out(WQ_DEF_ENC_WQID, wqid); +} + +static void qm_set_channel_ddebug(struct qman *qm, enum qm_channel channel, + u16 tp_cfg) +{ + u32 offset; + int upperhalf = 0; + if ((channel >= qm_channel_swportal0) && + (channel <= qm_channel_swportal9)) { + offset = (channel - qm_channel_swportal0); + upperhalf = offset & 0x1; + offset = REG_WQ_SC_DD_CFG(offset / 2); + } else if ((channel >= qm_channel_pool1) && + (channel <= qm_channel_pool15)) { + offset = (channel + 1 - qm_channel_pool1); + upperhalf = offset & 0x1; + offset = REG_WQ_PC_DD_CFG(offset / 2); + } else if ((channel >= qm_channel_fman0_sp0) && + (channel <= qm_channel_fman0_sp11)) { + offset = (channel - qm_channel_fman0_sp0); + upperhalf = offset & 0x1; + offset = REG_WQ_DC0_DD_CFG(offset / 2); + } + else if ((channel >= qm_channel_fman1_sp0) && + (channel <= qm_channel_fman1_sp11)) { + offset = (channel - qm_channel_fman1_sp0); + upperhalf = offset & 0x1; + offset = REG_WQ_DC1_DD_CFG(offset / 2); + } + else if (channel == qm_channel_caam) + offset = REG_WQ_DCn_DD_CFG(2); + else if (channel == qm_channel_pme) + offset = REG_WQ_DCn_DD_CFG(3); + else { + pr_crit("Illegal qm_channel type %d\n", channel); + return; + } + __qm_out(qm, offset, upperhalf ? ((u32)tp_cfg << 16) : tp_cfg); +} + +static void qm_get_details(struct qman *qm, u8 *int_options, u8 *errata, + u8 *conf_options) +{ + u32 v = qm_in(IP_REV_1); + *int_options = (v >> 16) & 0xff; + *errata = (v >> 8) & 0xff; + *conf_options = v & 0xff; +} + +static void qm_set_corenet_bar(struct qman *qm, u16 eba, u32 ba) +{ + /* choke if 'ba' isn't properly aligned */ + DPA_ASSERT(!(ba & 0x001fffff)); + qm_out(QCSP_BARE, eba); + qm_out(QCSP_BAR, ba); +} + +static u8 qm_get_corenet_sourceid(struct qman *qm) +{ + return qm_in(SRCIDR); +} + +static u16 qm_get_liodn(struct qman *qm) +{ + return qm_in(LIODNR); +} + +static void qm_set_congestion_config(struct qman *qm, u16 pres) +{ + qm_out(CM_CFG, pres); +} + +#endif + +static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal, + int ed, u8 sernd) +{ + DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) || + (portal == qm_dc_portal_fman1)); + qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f)); +} + +static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class, + u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5, + u8 csw6, u8 csw7) +{ +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 +#define csw(x) \ +do { \ + if (++x == 8) \ + x = 7; \ +} while (0) + if (qman_ip_rev == QMAN_REV10) { + csw(csw2); + csw(csw3); + csw(csw4); + csw(csw5); + csw(csw6); + csw(csw7); + } +#endif + qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) | + ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) | + ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) | + ((csw6 & 0x7) << 4) | (csw7 & 0x7)); +} + +static void qm_set_hid(struct qman *qm) +{ +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if (qman_ip_rev == QMAN_REV10) + qm_out(HID_CFG, 3); + else +#endif + qm_out(HID_CFG, 0); +} + +static void qm_set_corenet_initiator(struct qman *qm) +{ + qm_out(CI_SCHED_CFG, + 0x80000000 | /* write srcciv enable */ + (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) | + (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) | + (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) | + CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W); +} + +static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor) +{ + u32 v = qm_in(IP_REV_1); + *id = (v >> 16); + *major = (v >> 8) & 0xff; + *minor = v & 0xff; +} + +static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba, + int enable, int prio, int stash, u32 size) +{ + u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE; + u32 exp = ilog2(size); + /* choke if size isn't within range */ + DPA_ASSERT((size >= 4096) && (size <= 1073741824) && + is_power_of_2(size)); + /* choke if 'ba' has lower-alignment than 'size' */ + DPA_ASSERT(!(ba & (size - 1))); + __qm_out(qm, offset, upper_32_bits(ba)); + __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba)); + __qm_out(qm, offset + REG_offset_AR, + (enable ? 0x80000000 : 0) | + (prio ? 0x40000000 : 0) | + (stash ? 0x20000000 : 0) | + (exp - 1)); +} + +static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k) +{ + qm_out(PFDR_FP_LWIT, th & 0xffffff); + qm_out(PFDR_CFG, k); +} + +static void qm_set_sfdr_threshold(struct qman *qm, u16 th) +{ + qm_out(SFDR_CFG, th & 0x3ff); +} + +static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num) +{ + u8 rslt = MCR_get_rslt(qm_in(MCR)); + + DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num); + /* Make sure the command interface is 'idle' */ + if(!MCR_rslt_idle(rslt)) + panic("QMAN_MCR isn't idle"); + + /* Write the MCR command params then the verb */ + qm_out(MCP(0), pfdr_start ); + /* TODO: remove this - it's a workaround for a model bug that is + * corrected in more recent versions. We use the workaround until + * everyone has upgraded. */ + qm_out(MCP(1), (pfdr_start + num - 16)); + lwsync(); + qm_out(MCR, MCR_INIT_PFDR); + + /* Poll for the result */ + do { + rslt = MCR_get_rslt(qm_in(MCR)); + } while(!MCR_rslt_idle(rslt)); + if (MCR_rslt_ok(rslt)) + return 0; + if (MCR_rslt_eaccess(rslt)) + return -EACCES; + if (MCR_rslt_inval(rslt)) + return -EINVAL; + pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt); + return -ENOSYS; +} + +/*****************/ +/* Config driver */ +/*****************/ + +/* TODO: Kconfig these? */ +#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ) +#define DEFAULT_PFDR_SZ (PAGE_SIZE << 12) + +/* We support only one of these */ +static struct qman *qm; +static struct device_node *qm_node; + +/* Parse the property to extract the memory location and size and + * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default size. */ +static __init int parse_mem_property(struct device_node *node, const char *name, + dma_addr_t *addr, size_t *sz, int zero) +{ + const u32 *pint; + int ret; + + pint = of_get_property(node, name, &ret); + if (!pint || (ret != 16)) { + pr_info("No %s property '%s', using memblock_alloc(%016zx)\n", + node->full_name, name, *sz); + *addr = memblock_alloc(*sz, *sz); + if (zero) + memset(phys_to_virt(*addr), 0, *sz); + return 0; + } + pr_info("Using %s property '%s'\n", node->full_name, name); + /* If using a "zero-pma", don't try to zero it, even if you asked */ + if (zero && of_find_property(node, "zero-pma", &ret)) { + pr_info(" it's a 'zero-pma', not zeroing from s/w\n"); + zero = 0; + } + *addr = ((u64)pint[0] << 32) | (u64)pint[1]; + *sz = ((u64)pint[2] << 32) | (u64)pint[3]; + /* Keep things simple, it's either all in the DRAM range or it's all + * outside. */ + if (*addr < memblock_end_of_DRAM()) { + BUG_ON((u64)*addr + (u64)*sz > memblock_end_of_DRAM()); + if (memblock_reserve(*addr, *sz) < 0) { + pr_err("Failed to reserve %s\n", name); + return -ENOMEM; + } + if (zero) + memset(phys_to_virt(*addr), 0, *sz); + } else if (zero) { + /* map as cacheable, non-guarded */ + void *tmpp = ioremap_prot(*addr, *sz, 0); + memset(tmpp, 0, *sz); + iounmap(tmpp); + } + return 0; +} + +/* TODO: + * - there is obviously no handling of errors, + * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for + * both memory resources to zero. + */ +static int __init fsl_qman_init(struct device_node *node) +{ + struct resource res; + u32 __iomem *regs; + const char *s; + dma_addr_t fqd_a = 0, pfdr_a = 0; + size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ; + int ret, standby = 0; + u16 id; + u8 major, minor; + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + pr_err("Can't get %s property '%s'\n", node->full_name, "reg"); + return ret; + } + s = of_get_property(node, "fsl,hv-claimable", &ret); + if (s && !strcmp(s, "standby")) + standby = 1; + if (!standby) { + ret = parse_mem_property(node, "fsl,qman-fqd", + &fqd_a, &fqd_sz, 1); + BUG_ON(ret); + ret = parse_mem_property(node, "fsl,qman-pfdr", + &pfdr_a, &pfdr_sz, 0); + BUG_ON(ret); + } + /* Global configuration */ + regs = ioremap(res.start, res.end - res.start + 1); + qm = qm_create(regs); + qm_node = node; + qm_get_version(qm, &id, &major, &minor); + pr_info("Qman ver:%04x,%02x,%02x\n", id, major, minor); + if (!qman_ip_rev) { + if ((major == 1) && (minor == 0)) + qman_ip_rev = QMAN_REV10; + else if ((major == 1) && (minor == 1)) + qman_ip_rev = QMAN_REV11; + else if ((major == 1) && (minor == 2)) + qman_ip_rev = QMAN_REV12; + else if ((major == 2) && (minor == 0)) + qman_ip_rev = QMAN_REV20; + else { + pr_warning("unknown Qman version, default to rev1.1\n"); + qman_ip_rev = QMAN_REV11; + } + } + + if (standby) { + pr_info(" -> in standby mode\n"); + return 0; + } + /* FQD memory */ + qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz); + /* PFDR memory */ + qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz); + qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8); + /* thresholds */ + qm_set_pfdr_threshold(qm, 512, 64); + qm_set_sfdr_threshold(qm, 128); + /* clear stale PEBI bit from interrupt status register */ + qm_err_isr_status_clear(qm, QM_EIRQ_PEBI); + /* corenet initiator settings */ + qm_set_corenet_initiator(qm); + /* HID settings */ + qm_set_hid(qm); + /* Set scheduling weights to defaults */ + for (ret = qm_wq_first; ret <= qm_wq_last; ret++) + qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0); + /* We are not prepared to accept ERNs for hardware enqueues */ + qm_set_dc(qm, qm_dc_portal_fman0, 1, 0); + qm_set_dc(qm, qm_dc_portal_fman1, 1, 0); + return 0; +} + +int qman_have_ccsr(void) +{ + return qm ? 1 : 0; +} + +__init void qman_init_early(void) +{ + struct device_node *dn; + for_each_compatible_node(dn, NULL, "fsl,qman") { + if (qm) + pr_err("%s: only one 'fsl,qman' allowed\n", + dn->full_name); + else { + int ret = fsl_qman_init(dn); + BUG_ON(ret); + } + } +} + +static void log_edata_bits(u32 bit_count) +{ + u32 i, j, mask = 0xffffffff; + + pr_warning("Qman ErrInt, EDATA:\n"); + i = bit_count/32; + if (bit_count%32) { + i++; + mask = ~(mask << bit_count%32); + } + j = 16-i; + pr_warning(" 0x%08x\n", qm_in(EDATA(j)) & mask); + j++; + for (; j < 16; j++) + pr_warning(" 0x%08x\n", qm_in(EDATA(j))); +} + +static void log_additional_error_info(u32 isr_val, u32 ecsr_val) +{ + union qman_ecir ecir_val; + union qman_eadr eadr_val; + + ecir_val.ecir_raw = qm_in(ECIR); + /* Is portal info valid */ + if (ecsr_val & PORTAL_ECSR_ERR) { + pr_warning("Qman ErrInt: %s id %d\n", + (ecir_val.info.portal_type) ? + "DCP" : "SWP", ecir_val.info.portal_num); + } + if (ecsr_val & FQID_ECSR_ERR) { + pr_warning("Qman ErrInt: ecir.fqid 0x%x\n", + ecir_val.info.fqid); + } + if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) { + eadr_val.eadr_raw = qm_in(EADR); + pr_warning("Qman ErrInt: EADR Memory: %s, 0x%x\n", + error_mdata[eadr_val.info.memid].txt, + error_mdata[eadr_val.info.memid].addr_mask + & eadr_val.info.eadr); + log_edata_bits(error_mdata[eadr_val.info.memid].bits); + } +} + +/* Qman interrupt handler */ +static irqreturn_t qman_isr(int irq, void *ptr) +{ + u32 isr_val, ier_val, ecsr_val, isr_mask, i; + + ier_val = qm_err_isr_enable_read(qm); + isr_val = qm_err_isr_status_read(qm); + ecsr_val = qm_in(ECSR); + isr_mask = isr_val & ier_val; + + if (!isr_mask) + return IRQ_NONE; + for (i = 0; i < QMAN_HWE_COUNT; i++) { + if (qman_hwerr_txts[i].mask & isr_mask) { + pr_warning("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt); + if (qman_hwerr_txts[i].mask & ecsr_val) { + log_additional_error_info(isr_mask, ecsr_val); + /* Re-arm error capture registers */ + qm_out(ECSR, ecsr_val); + } + if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) { + pr_devel("Qman un-enabling error 0x%x\n", + qman_hwerr_txts[i].mask); + ier_val &= ~qman_hwerr_txts[i].mask; + qm_err_isr_enable_write(qm, ier_val); + } + } + } + qm_err_isr_status_clear(qm, isr_val); + return IRQ_HANDLED; +} + +static int __bind_irq(void) +{ + int ret, err_irq; + + err_irq = of_irq_to_resource(qm_node, 0, NULL); + if (err_irq == NO_IRQ) { + pr_info("Can't get %s property '%s'\n", qm_node->full_name, + "interrupts"); + return -ENODEV; + } + ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node); + if (ret) { + pr_err("request_irq() failed %d for '%s'\n", ret, + qm_node->full_name); + return -ENODEV; + } + /* Write-to-clear any stale bits, (eg. starvation being asserted prior + * to resource allocation during driver init). */ + qm_err_isr_status_clear(qm, 0xffffffff); + /* Enable Error Interrupts */ + qm_err_isr_enable_write(qm, 0xffffffff); + return 0; +} + +/* Initialise Error Interrupt Handler */ +int qman_init_error_int(struct device_node *node) +{ + if (!qman_have_ccsr()) + return 0; + if (node != qm_node) + return -EINVAL; + return __bind_irq(); +} + +#define PID_CFG_LIODN_MASK 0x0fff0000 +void qman_liodn_fixup(enum qm_channel channel) +{ + static int done; + static u32 liodn_offset; + u32 before, after; + int idx = channel - qm_channel_swportal0; + + if (!qman_have_ccsr()) + return; + before = qm_in(QCSP_PID_CFG(idx)); + if (!done) { + liodn_offset = before & PID_CFG_LIODN_MASK; + done = 1; + return; + } + after = (before & (~PID_CFG_LIODN_MASK)) | liodn_offset; + qm_out(QCSP_PID_CFG(idx), after); +} + +#ifdef CONFIG_SYSFS + +#define DRV_NAME "fsl-qman" + +static ssize_t show_pfdr_fpc(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC)); +}; + +static ssize_t show_dlm_avg(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + u32 data; + int i; + + if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) + return -EINVAL; + data = qm_in(DCP_DLM_AVG(i)); + return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, + (data & 0x000000ff)*390625); +}; + +static ssize_t set_dlm_avg(struct device *dev, + struct device_attribute *dev_attr, const char *buf, size_t count) +{ + unsigned long val; + int i; + + if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i)) + return -EINVAL; + if (strict_strtoul(buf, 0, &val)) { + dev_dbg(dev, "invalid input %s\n", buf); + return -EINVAL; + } + qm_out(DCP_DLM_AVG(i), val); + return count; +}; + +static ssize_t show_pfdr_cfg(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG)); +}; + +static ssize_t set_pfdr_cfg(struct device *dev, + struct device_attribute *dev_attr, const char *buf, size_t count) +{ + unsigned long val; + + if (strict_strtoul(buf, 0, &val)) { + dev_dbg(dev, "invalid input %s\n", buf); + return -EINVAL; + } + qm_out(PFDR_CFG, val); + return count; +}; + +static ssize_t show_sfdr_in_use(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE)); +}; + +static ssize_t show_idle_stat(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT)); +}; + +static ssize_t show_ci_rlm_avg(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + u32 data = qm_in(CI_RLM_AVG); + return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8, + (data & 0x000000ff)*390625); +}; + +static ssize_t set_ci_rlm_avg(struct device *dev, + struct device_attribute *dev_attr, const char *buf, size_t count) +{ + unsigned long val; + + if (strict_strtoul(buf, 0, &val)) { + dev_dbg(dev, "invalid input %s\n", buf); + return -EINVAL; + } + qm_out(CI_RLM_AVG, val); + return count; +}; + +static ssize_t show_err_isr(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR)); +}; + + +static ssize_t show_sbec(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + int i; + + if (!sscanf(dev_attr->attr.name, "sbec_%d", &i)) + return -EINVAL; + return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i))); +}; + +static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL); +static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg); +static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL); +static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUGO), + show_ci_rlm_avg, set_ci_rlm_avg); +static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL); +static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL); + +static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUGO), show_dlm_avg, set_dlm_avg); +static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUGO), show_dlm_avg, set_dlm_avg); +static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUGO), show_dlm_avg, set_dlm_avg); +static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUGO), show_dlm_avg, set_dlm_avg); + +static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL); +static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL); + + +static struct attribute *qman_dev_attributes[] = { + &dev_attr_pfdr_fpc.attr, + &dev_attr_pfdr_cfg.attr, + &dev_attr_idle_stat.attr, + &dev_attr_ci_rlm_avg.attr, + &dev_attr_err_isr.attr, + &dev_attr_dcp0_dlm_avg.attr, + &dev_attr_dcp1_dlm_avg.attr, + &dev_attr_dcp2_dlm_avg.attr, + &dev_attr_dcp3_dlm_avg.attr, + /* sfdr_in_use will be added if necessary */ + NULL +}; + +static struct attribute *qman_dev_ecr_attributes[] = { + &dev_attr_sbec_0.attr, + &dev_attr_sbec_1.attr, + &dev_attr_sbec_2.attr, + &dev_attr_sbec_3.attr, + &dev_attr_sbec_4.attr, + &dev_attr_sbec_5.attr, + &dev_attr_sbec_6.attr, + NULL +}; + +/* root level */ +static const struct attribute_group qman_dev_attr_grp = { + .name = NULL, + .attrs = qman_dev_attributes +}; +static const struct attribute_group qman_dev_ecr_grp = { + .name = "error_capture", + .attrs = qman_dev_ecr_attributes +}; + +static int of_fsl_qman_remove(struct platform_device *ofdev) +{ + sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); + return 0; +}; + +static int of_fsl_qman_probe(struct platform_device *ofdev) +{ + int ret; + + ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp); + if (ret) + goto done; + if (qman_ip_rev != QMAN_REV10) { + ret = sysfs_add_file_to_group(&ofdev->dev.kobj, + &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name); + if (ret) + goto del_group_0; + } + ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp); + if (ret) + goto del_group_0; + + goto done; + +del_group_0: + sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp); +done: + if (ret) + dev_err(&ofdev->dev, + "Cannot create dev attributes ret=%d\n", ret); + return ret; +}; + +static struct of_device_id of_fsl_qman_ids[] = { + { + .compatible = "fsl,qman", + }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_qman_ids); + +static struct platform_driver of_fsl_qman_driver = { + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + .of_match_table = of_fsl_qman_ids, + }, + .probe = of_fsl_qman_probe, + .remove = of_fsl_qman_remove, +}; + +static int qman_ctrl_init(void) +{ + return platform_driver_register(&of_fsl_qman_driver); +} + +static void qman_ctrl_exit(void) +{ + platform_driver_unregister(&of_fsl_qman_driver); +} + +module_init(qman_ctrl_init); +module_exit(qman_ctrl_exit); + +#endif /* CONFIG_SYSFS */ --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_debugfs.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_debugfs.c @@ -0,0 +1,1314 @@ +/* Copyright 2010-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "qman_private.h" + +#define MAX_FQID (0x00ffffff) +#define QM_FQD_BLOCK_SIZE 64 +#define QM_FQD_AR (0xC10) + +static u32 fqid_max; +static u64 qman_ccsr_start; +static u64 qman_ccsr_size; + +static const char *state_txt[] = { + "Out of Service", + "Retired", + "Tentatively Scheduled", + "Truly Scheduled", + "Parked", + "Active, Active Held or Held Suspended", + "Unknown State 6", + "Unknown State 7", + NULL, +}; + +static const u8 fqd_states[] = { + QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED, + QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED, + QM_MCR_NP_STATE_ACTIVE}; +static const u32 fqd_states_count = sizeof(fqd_states)/sizeof(u8); + +struct mask_to_text { + u16 mask; + const char *txt; +}; + +struct mask_filter_s { + u16 mask; + u8 filter; +}; + +static const struct mask_filter_s mask_filter[] = { + {QM_FQCTRL_PREFERINCACHE, 0}, + {QM_FQCTRL_PREFERINCACHE, 1}, + {QM_FQCTRL_HOLDACTIVE, 0}, + {QM_FQCTRL_HOLDACTIVE, 1}, + {QM_FQCTRL_AVOIDBLOCK, 0}, + {QM_FQCTRL_AVOIDBLOCK, 1}, + {QM_FQCTRL_FORCESFDR, 0}, + {QM_FQCTRL_FORCESFDR, 1}, + {QM_FQCTRL_CPCSTASH, 0}, + {QM_FQCTRL_CPCSTASH, 1}, + {QM_FQCTRL_CTXASTASHING, 0}, + {QM_FQCTRL_CTXASTASHING, 1}, + {QM_FQCTRL_ORP, 0}, + {QM_FQCTRL_ORP, 1}, + {QM_FQCTRL_TDE, 0}, + {QM_FQCTRL_TDE, 1}, + {QM_FQCTRL_CGE, 0}, + {QM_FQCTRL_CGE, 1} +}; +static const u32 mask_filter_count = + sizeof(mask_filter)/sizeof(struct mask_filter_s); + +static const struct mask_to_text fq_ctrl_text_list[] = { + { + .mask = QM_FQCTRL_PREFERINCACHE, + .txt = "Prefer in cache", + }, + { + .mask = QM_FQCTRL_HOLDACTIVE, + .txt = "Hold active in portal", + }, + { + .mask = QM_FQCTRL_AVOIDBLOCK, + .txt = "Avoid Blocking", + }, + { + .mask = QM_FQCTRL_FORCESFDR, + .txt = "High-priority SFDRs", + }, + { + .mask = QM_FQCTRL_CPCSTASH, + .txt = "CPC Stash Enable", + }, + { + .mask = QM_FQCTRL_CTXASTASHING, + .txt = "Context-A stashing", + }, + { + .mask = QM_FQCTRL_ORP, + .txt = "ORP Enable", + }, + { + .mask = QM_FQCTRL_TDE, + .txt = "Tail-Drop Enable", + }, + { + .mask = QM_FQCTRL_CGE, + .txt = "Congestion Group Enable", + }, + { + .mask = 0, + .txt = NULL, + } +}; + +static const char *get_fqd_ctrl_text(u16 mask) +{ + int i = 0; + + while (fq_ctrl_text_list[i].txt != NULL) { + if (fq_ctrl_text_list[i].mask == mask) + return fq_ctrl_text_list[i].txt; + i++; + } + return NULL; +} + +static const struct mask_to_text stashing_text_list[] = { + { + .mask = QM_STASHING_EXCL_CTX, + .txt = "FQ Ctx Stash" + }, + { + .mask = QM_STASHING_EXCL_DATA, + .txt = "Frame Data Stash", + }, + { + .mask = QM_STASHING_EXCL_ANNOTATION, + .txt = "Frame Annotation Stash", + }, + { + .mask = 0, + .txt = NULL, + }, +}; + +static int user_input_convert(const char __user *user_buf, size_t count, + unsigned long *val) +{ + char buf[12]; + + if (count > sizeof(buf) - 1) + return -EINVAL; + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + buf[count] = '\0'; + if (strict_strtoul(buf, 0, val)) + return -EINVAL; + return 0; +} + +struct line_buffer_fq { + u32 buf[8]; + u32 buf_cnt; + int line_cnt; +}; + +static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid, + struct seq_file *file) +{ + line_buf->buf[line_buf->buf_cnt] = fqid; + line_buf->buf_cnt++; + if (line_buf->buf_cnt == 8) { + /* Buffer is full, flush it */ + if (line_buf->line_cnt != 0) + seq_printf(file, ",\n"); + seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x," + "0x%06x,0x%06x,0x%06x", + line_buf->buf[0], line_buf->buf[1], line_buf->buf[2], + line_buf->buf[3], line_buf->buf[4], line_buf->buf[5], + line_buf->buf[6], line_buf->buf[7]); + line_buf->buf_cnt = 0; + line_buf->line_cnt++; + } +} + +static void flush_line_buffer(struct line_buffer_fq *line_buf, + struct seq_file *file) +{ + if (line_buf->buf_cnt) { + int y = 0; + if (line_buf->line_cnt != 0) + seq_printf(file, ",\n"); + while (y != line_buf->buf_cnt) { + if (y+1 == line_buf->buf_cnt) + seq_printf(file, "0x%06x", line_buf->buf[y]); + else + seq_printf(file, "0x%06x,", line_buf->buf[y]); + y++; + } + line_buf->line_cnt++; + } + if (line_buf->line_cnt) + seq_printf(file, "\n"); +} + +static struct dentry *dfs_root; /* debugfs root directory */ + +/******************************************************************************* + * Query Frame Queue Non Programmable Fields + ******************************************************************************/ +struct query_fq_np_fields_data_s { + u32 fqid; +}; +static struct query_fq_np_fields_data_s query_fq_np_fields_data = { + .fqid = 1, +}; + +static int query_fq_np_fields_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_queryfq_np np; + struct qman_fq fq; + + fq.fqid = query_fq_np_fields_data.fqid; + ret = qman_query_fq_np(&fq, &np); + if (ret) + return ret; + /* Print state */ + seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n", + fq.fqid); + seq_printf(file, " force eligible pending: %s\n", + (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no"); + seq_printf(file, " retirement pending: %s\n", + (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no"); + seq_printf(file, " state: %s\n", + state_txt[np.state & QM_MCR_NP_STATE_MASK]); + seq_printf(file, " fq_link: 0x%x\n", np.fqd_link); + seq_printf(file, " odp_seq: %u\n", np.odp_seq); + seq_printf(file, " orp_nesn: %u\n", np.orp_nesn); + seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq); + seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq); + seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr); + seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr); + seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr); + seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr); + seq_printf(file, " is: ics_surp contains a %s\n", + (np.is) ? "deficit" : "surplus"); + seq_printf(file, " ics_surp: %u\n", np.ics_surp); + seq_printf(file, " byte_cnt: %u\n", np.byte_cnt); + seq_printf(file, " frm_cnt: %u\n", np.frm_cnt); + seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr); + seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr); + seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr); + seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr); + seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr); + return 0; +} + +static int query_fq_np_fields_open(struct inode *inode, + struct file *file) +{ + return single_open(file, query_fq_np_fields_show, NULL); +} + +static ssize_t query_fq_np_fields_write(struct file *f, + const char __user *buf, size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > MAX_FQID) + return -EINVAL; + query_fq_np_fields_data.fqid = (u32)val; + return count; +} + +static const struct file_operations query_fq_np_fields_fops = { + .owner = THIS_MODULE, + .open = query_fq_np_fields_open, + .read = seq_read, + .write = query_fq_np_fields_write, + .release = single_release, +}; + +/******************************************************************************* + * Frame Queue Programmable Fields + ******************************************************************************/ +struct query_fq_fields_data_s { + u32 fqid; +}; + +static struct query_fq_fields_data_s query_fq_fields_data = { + .fqid = 1, +}; + +static int query_fq_fields_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_fqd fqd; + struct qman_fq fq; + int i = 0; + + memset(&fqd, 0, sizeof(struct qm_fqd)); + fq.fqid = query_fq_fields_data.fqid; + ret = qman_query_fq(&fq, &fqd); + if (ret) + return ret; + seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n", + fq.fqid); + seq_printf(file, " orprws: %u\n", fqd.orprws); + seq_printf(file, " oa: %u\n", fqd.oa); + seq_printf(file, " olws: %u\n", fqd.olws); + + seq_printf(file, " cgid: %u\n", fqd.cgid); + + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0) + seq_printf(file, " fq_ctrl: None\n"); + else { + i = 0; + seq_printf(file, " fq_ctrl:\n"); + while (fq_ctrl_text_list[i].txt != NULL) { + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) & + fq_ctrl_text_list[i].mask) + seq_printf(file, " %s\n", + fq_ctrl_text_list[i].txt); + i++; + } + } + seq_printf(file, " dest_channel: %u\n", fqd.dest.channel); + seq_printf(file, " dest_wq: %u\n", fqd.dest.wq); + seq_printf(file, " ics_cred: %u\n", fqd.ics_cred); + seq_printf(file, " td_mant: %u\n", fqd.td.mant); + seq_printf(file, " td_exp: %u\n", fqd.td.exp); + + seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b); + + seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd)); + /* Any stashing configured */ + if ((fqd.context_a.stashing.exclusive & 0x7) == 0) + seq_printf(file, " ctx_a_stash_exclusive: None\n"); + else { + seq_printf(file, " ctx_a_stash_exclusive:\n"); + i = 0; + while (stashing_text_list[i].txt != NULL) { + if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask) + seq_printf(file, " %s\n", + stashing_text_list[i].txt); + i++; + } + } + seq_printf(file, " ctx_a_stash_annotation_cl: %u\n", + fqd.context_a.stashing.annotation_cl); + seq_printf(file, " ctx_a_stash_data_cl: %u\n", + fqd.context_a.stashing.data_cl); + seq_printf(file, " ctx_a_stash_context_cl: %u\n", + fqd.context_a.stashing.context_cl); + return 0; +} + +static int query_fq_fields_open(struct inode *inode, + struct file *file) +{ + return single_open(file, query_fq_fields_show, NULL); +} + +static ssize_t query_fq_fields_write(struct file *f, + const char __user *buf, size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > MAX_FQID) + return -EINVAL; + query_fq_fields_data.fqid = (u32)val; + return count; +} + +static const struct file_operations query_fq_fields_fops = { + .owner = THIS_MODULE, + .open = query_fq_fields_open, + .read = seq_read, + .write = query_fq_fields_write, + .release = single_release, +}; + +/******************************************************************************* + * Query WQ lengths + ******************************************************************************/ +struct query_wq_lengths_data_s { + union { + u16 channel_wq; /* ignores wq (3 lsbits) */ + struct { + u16 id:13; /* enum qm_channel */ + u16 __reserved:3; + } __packed channel; + }; +}; +static struct query_wq_lengths_data_s query_wq_lengths_data; +static int query_wq_lengths_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_querywq wq; + int i; + + memset(&wq, 0, sizeof(struct qm_mcr_querywq)); + wq.channel.id = query_wq_lengths_data.channel.id; + ret = qman_query_wq(0, &wq); + if (ret) + return ret; + seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id); + for (i = 0; i < 8; i++) + /* mask out upper 4 bits since they are not part of length */ + seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff); + return 0; +} + +static int query_wq_lengths_open(struct inode *inode, + struct file *file) +{ + return single_open(file, query_wq_lengths_show, NULL); +} + +static ssize_t query_wq_lengths_write(struct file *f, + const char __user *buf, size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > 0xfff8) + return -EINVAL; + query_wq_lengths_data.channel.id = (u16)val; + return count; +} + +static const struct file_operations query_wq_lengths_fops = { + .owner = THIS_MODULE, + .open = query_wq_lengths_open, + .read = seq_read, + .write = query_wq_lengths_write, + .release = single_release, +}; + +/******************************************************************************* + * Query CGR + ******************************************************************************/ +struct query_cgr_s { + u8 cgid; +}; +static struct query_cgr_s query_cgr_data; + +static int query_cgr_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_querycgr cgrd; + struct qman_cgr cgr; + + memset(&cgr, 0, sizeof(struct qm_mcr_querycgr)); + cgr.cgrid = query_cgr_data.cgid; + ret = qman_query_cgr(&cgr, &cgrd); + if (ret) + return ret; + seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid); + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn, + cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn, + cgrd.cgr.wr_parm_g.Pn); + + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn, + cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn, + cgrd.cgr.wr_parm_y.Pn); + + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn, + cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn, + cgrd.cgr.wr_parm_r.Pn); + + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", + cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r); + + seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en); + seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ); + seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en); + seq_printf(file, " cs: %u\n", cgrd.cgr.cs); + + seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n", + cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn); + + if (qman_ip_rev != QMAN_REV10) { + seq_printf(file, " mode: %s\n", + (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ? + "frame count" : "byte count"); + } + seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd)); + seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd)); + + return 0; +} + +static int query_cgr_open(struct inode *inode, struct file *file) +{ + return single_open(file, query_cgr_show, NULL); +} + +static ssize_t query_cgr_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > 0xff) + return -EINVAL; + query_cgr_data.cgid = (u8)val; + return count; +} + +static const struct file_operations query_cgr_fops = { + .owner = THIS_MODULE, + .open = query_cgr_open, + .read = seq_read, + .write = query_cgr_write, + .release = single_release, +}; + +/******************************************************************************* + * Test Write CGR + ******************************************************************************/ +struct test_write_cgr_s { + u64 i_bcnt; + u8 cgid; +}; +static struct test_write_cgr_s test_write_cgr_data; + +static int testwrite_cgr_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_cgrtestwrite result; + struct qman_cgr cgr; + u64 i_bcnt; + + memset(&cgr, 0, sizeof(struct qman_cgr)); + memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite)); + cgr.cgrid = test_write_cgr_data.cgid; + i_bcnt = test_write_cgr_data.i_bcnt; + ret = qman_testwrite_cgr(&cgr, i_bcnt, &result); + if (ret) + return ret; + seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid); + seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn, + result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn, + result.cgr.wr_parm_g.Pn); + seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn, + result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn, + result.cgr.wr_parm_y.Pn); + seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n", + result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn, + result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn, + result.cgr.wr_parm_r.Pn); + seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n", + result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r); + seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en); + seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ); + seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en); + seq_printf(file, " cs: %u\n", result.cgr.cs); + seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n", + result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn); + + /* Add Mode for Si 2 */ + if (qman_ip_rev != QMAN_REV10) { + seq_printf(file, " mode: %s\n", + (result.cgr.mode & QMAN_CGR_MODE_FRAME) ? + "frame count" : "byte count"); + } + + seq_printf(file, " i_bcnt: %llu\n", + qm_mcr_cgrtestwrite_i_get64(&result)); + seq_printf(file, " a_bcnt: %llu\n", + qm_mcr_cgrtestwrite_a_get64(&result)); + seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g); + seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y); + seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r); + return 0; +} + +static int testwrite_cgr_open(struct inode *inode, struct file *file) +{ + return single_open(file, testwrite_cgr_show, NULL); +} + +static const struct file_operations testwrite_cgr_fops = { + .owner = THIS_MODULE, + .open = testwrite_cgr_open, + .read = seq_read, + .release = single_release, +}; + + +static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset) +{ + seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt); + return 0; +} +static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file) +{ + return single_open(file, testwrite_cgr_ibcnt_show, NULL); +} + +static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + test_write_cgr_data.i_bcnt = val; + return count; +} + +static const struct file_operations teswrite_cgr_ibcnt_fops = { + .owner = THIS_MODULE, + .open = testwrite_cgr_ibcnt_open, + .read = seq_read, + .write = testwrite_cgr_ibcnt_write, + .release = single_release, +}; + +static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset) +{ + seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid); + return 0; +} +static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file) +{ + return single_open(file, testwrite_cgr_cgrid_show, NULL); +} + +static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > 0xff) + return -EINVAL; + test_write_cgr_data.cgid = (u8)val; + return count; +} + +static const struct file_operations teswrite_cgr_cgrid_fops = { + .owner = THIS_MODULE, + .open = testwrite_cgr_cgrid_open, + .read = seq_read, + .write = testwrite_cgr_cgrid_write, + .release = single_release, +}; + +/******************************************************************************* + * Query Congestion State + ******************************************************************************/ +static int query_congestion_show(struct seq_file *file, void *offset) +{ + int ret; + struct qm_mcr_querycongestion cs; + int i, j, in_cong = 0; + u32 mask; + + memset(&cs, 0, sizeof(struct qm_mcr_querycongestion)); + ret = qman_query_congestion(&cs); + if (ret) + return ret; + seq_printf(file, "Query Congestion Result\n"); + for (i = 0; i < 8; i++) { + mask = 0x80000000; + for (j = 0; j < 32; j++) { + if (cs.state.__state[i] & mask) { + in_cong = 1; + seq_printf(file, " cg %u: %s\n", (i*32)+j, + "in congestion"); + } + mask >>= 1; + } + } + if (!in_cong) + seq_printf(file, " All congestion groups not congested.\n"); + return 0; +} + +static int query_congestion_open(struct inode *inode, struct file *file) +{ + return single_open(file, query_congestion_show, NULL); +} + +static const struct file_operations query_congestion_fops = { + .owner = THIS_MODULE, + .open = query_congestion_open, + .read = seq_read, + .release = single_release, +}; + +/******************************************************************************* + * QMan register + ******************************************************************************/ +struct qman_register_s { + u32 val; +}; +static struct qman_register_s qman_register_data; + +static void init_ccsrmempeek(void) +{ + struct device_node *dn; + const u32 *regaddr_p; + + dn = of_find_compatible_node(NULL, NULL, "fsl,qman"); + if (!dn) { + pr_info("No fsl,qman node\n"); + return; + } + regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL); + if (!regaddr_p) { + of_node_put(dn); + return; + } + qman_ccsr_start = of_translate_address(dn, regaddr_p); + of_node_put(dn); +} +/* This function provides access to QMan ccsr memory map */ +static int qman_ccsrmempeek(u32 *val, u32 offset) +{ + void __iomem *addr; + u64 phys_addr; + + if (!qman_ccsr_start) + return -EINVAL; + + if (offset > (qman_ccsr_size - sizeof(u32))) + return -EINVAL; + + phys_addr = qman_ccsr_start + offset; + addr = ioremap(phys_addr, sizeof(u32)); + if (!addr) { + pr_err("ccsrmempeek, ioremap failed\n"); + return -EINVAL; + } + *val = in_be32(addr); + iounmap(addr); + return 0; +} + +static int qman_ccsrmempeek_show(struct seq_file *file, void *offset) +{ + u32 b; + + qman_ccsrmempeek(&b, qman_register_data.val); + seq_printf(file, "QMan register offset = 0x%x\n", + qman_register_data.val); + seq_printf(file, "value = 0x%08x\n", b); + + return 0; +} + +static int qman_ccsrmempeek_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_ccsrmempeek_show, NULL); +} + +static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + /* multiple of 4 */ + if (val > (qman_ccsr_size - sizeof(u32))) { + pr_info("Input 0x%lx > 0x%llx\n", + val, (qman_ccsr_size - sizeof(u32))); + return -EINVAL; + } + if (val & 0x3) { + pr_info("Input 0x%lx not multiple of 4\n", val); + return -EINVAL; + } + qman_register_data.val = val; + return count; +} + +static const struct file_operations qman_ccsrmempeek_fops = { + .owner = THIS_MODULE, + .open = qman_ccsrmempeek_open, + .read = seq_read, + .write = qman_ccsrmempeek_write, +}; + +/******************************************************************************* + * QMan state + ******************************************************************************/ +static int qman_fqd_state_show(struct seq_file *file, void *offset) +{ + struct qm_mcr_queryfq_np np; + struct qman_fq fq; + struct line_buffer_fq line_buf; + int ret, i; + u8 *state = file->private; + u32 qm_fq_state_cnt[fqd_states_count]; + + memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt)); + memset(&line_buf, 0, sizeof(line_buf)); + + seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]); + + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + ret = qman_query_fq_np(&fq, &np); + if (ret) + return ret; + if (*state == (np.state & QM_MCR_NP_STATE_MASK)) + add_to_line_buffer(&line_buf, fq.fqid, file); + /* Keep a summary count of all states */ + if ((np.state & QM_MCR_NP_STATE_MASK) < fqd_states_count) + qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++; + } + flush_line_buffer(&line_buf, file); + + for (i = 0; i < fqd_states_count; i++) { + seq_printf(file, "%s count = %u\n", state_txt[i], + qm_fq_state_cnt[i]); + } + return 0; +} + +static int qman_fqd_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_state_show, inode->i_private); +} + +static const struct file_operations qman_fqd_state_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_state_open, + .read = seq_read, +}; + +static int qman_fqd_ctrl_show(struct seq_file *file, void *offset) +{ + struct qm_fqd fqd; + struct qman_fq fq; + u32 fq_en_cnt = 0, fq_di_cnt = 0; + int ret, i; + struct mask_filter_s *data = file->private; + const char *ctrl_txt = get_fqd_ctrl_text(data->mask); + struct line_buffer_fq line_buf; + + memset(&line_buf, 0, sizeof(line_buf)); + seq_printf(file, "List of fq ids with: %s :%s\n", + ctrl_txt, (data->filter) ? "enabled" : "disabled"); + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + memset(&fqd, 0, sizeof(struct qm_fqd)); + ret = qman_query_fq(&fq, &fqd); + if (ret) + return ret; + if (data->filter) { + if (fqd.fq_ctrl & data->mask) + add_to_line_buffer(&line_buf, fq.fqid, file); + } else { + if (!(fqd.fq_ctrl & data->mask)) + add_to_line_buffer(&line_buf, fq.fqid, file); + } + if (fqd.fq_ctrl & data->mask) + fq_en_cnt++; + else + fq_di_cnt++; + } + flush_line_buffer(&line_buf, file); + + seq_printf(file, "Total FQD with: %s : enabled = %u\n", + ctrl_txt, fq_en_cnt); + seq_printf(file, "Total FQD with: %s : disabled = %u\n", + ctrl_txt, fq_di_cnt); + return 0; +} + +/******************************************************************************* + * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE + ******************************************************************************/ +static int qman_fqd_ctrl_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_ctrl_show, inode->i_private); +} + +static const struct file_operations qman_fqd_ctrl_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_ctrl_open, + .read = seq_read, +}; + +/******************************************************************************* + * QMan ctrl summary + ******************************************************************************/ +/******************************************************************************* + * QMan summary state + ******************************************************************************/ +static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset) +{ + struct qm_mcr_queryfq_np np; + struct qman_fq fq; + int ret, i; + u32 qm_fq_state_cnt[fqd_states_count]; + + memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt)); + + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + ret = qman_query_fq_np(&fq, &np); + if (ret) + return ret; + /* Keep a summary count of all states */ + if ((np.state & QM_MCR_NP_STATE_MASK) < fqd_states_count) + qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++; + } + + for (i = 0; i < fqd_states_count; i++) { + seq_printf(file, "%s count = %u\n", state_txt[i], + qm_fq_state_cnt[i]); + } + return 0; +} + +static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset) +{ + struct qm_fqd fqd; + struct qman_fq fq; + int ret, i , j; + u32 qm_prog_cnt[mask_filter_count/2]; + + memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt)); + + for (i = 1; i < fqid_max; i++) { + memset(&fqd, 0, sizeof(struct qm_fqd)); + fq.fqid = i; + ret = qman_query_fq(&fq, &fqd); + if (ret) + return ret; + /* Keep a summary count of all states */ + for (j = 0; j < mask_filter_count; j += 2) + if ((fqd.fq_ctrl & QM_FQCTRL_MASK) & + mask_filter[j].mask) + qm_prog_cnt[j/2]++; + } + for (i = 0; i < mask_filter_count/2; i++) { + seq_printf(file, "%s count = %u\n", + get_fqd_ctrl_text(mask_filter[i*2].mask), + qm_prog_cnt[i]); + } + return 0; +} + +static int qman_fqd_summary_show(struct seq_file *file, void *offset) +{ + int ret; + + /* Display summary of non programmable fields */ + ret = qman_fqd_non_prog_summary_show(file, offset); + if (ret) + return ret; + seq_printf(file, "-----------------------------------------\n"); + /* Display programmable fields */ + ret = qman_fqd_prog_summary_show(file, offset); + if (ret) + return ret; + return 0; +} + +static int qman_fqd_summary_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_summary_show, NULL); +} + +static const struct file_operations qman_fqd_summary_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_summary_open, + .read = seq_read, +}; + +/******************************************************************************* + * QMan destination work queue + ******************************************************************************/ +struct qman_dest_wq_s { + u16 wq_id; +}; +static struct qman_dest_wq_s qman_dest_wq_data = { + .wq_id = 0, +}; + +static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset) +{ + struct qm_fqd fqd; + struct qman_fq fq; + int ret, i; + u16 *wq, wq_id = qman_dest_wq_data.wq_id; + struct line_buffer_fq line_buf; + + memset(&line_buf, 0, sizeof(line_buf)); + /* use vmalloc : need to allocate large memory region and don't + * require the memory to be physically contiguous. */ + wq = vmalloc(sizeof(u16) * (0xFFFF+1)); + if (!wq) + return -ENOMEM; + memset(wq, 0, sizeof(u16) * (0xFFFF+1)); + + seq_printf(file, "List of fq ids with destination work queue id" + " = 0x%x\n", wq_id); + + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + memset(&fqd, 0, sizeof(struct qm_fqd)); + ret = qman_query_fq(&fq, &fqd); + if (ret) { + vfree(wq); + return ret; + } + if (wq_id == fqd.dest_wq) + add_to_line_buffer(&line_buf, fq.fqid, file); + wq[fqd.dest_wq]++; + } + flush_line_buffer(&line_buf, file); + + seq_printf(file, "Summary of all FQD destination work queue values\n"); + for (i = 0; i < 0xFFFF; i++) { + if (wq[i]) + seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, " + "count = %u\n", i >> 3, i & 0x3, i, wq[i]); + } + vfree(wq); + return 0; +} + +static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf, + size_t count, loff_t *off) +{ + int ret; + unsigned long val; + + ret = user_input_convert(buf, count, &val); + if (ret) + return ret; + if (val > 0xFFFF) + return -EINVAL; + qman_dest_wq_data.wq_id = val; + return count; +} + +static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_dest_wq_show, NULL); +} + +static const struct file_operations qman_fqd_dest_wq_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_dest_wq_open, + .read = seq_read, + .write = qman_fqd_dest_wq_write, +}; + +/******************************************************************************* + * QMan Intra-Class Scheduling Credit + ******************************************************************************/ +static int qman_fqd_cred_show(struct seq_file *file, void *offset) +{ + struct qm_fqd fqd; + struct qman_fq fq; + int ret, i; + u32 fq_cnt = 0; + struct line_buffer_fq line_buf; + + memset(&line_buf, 0, sizeof(line_buf)); + seq_printf(file, "List of fq ids with Intra-Class Scheduling Credit > 0" + "\n"); + + for (i = 1; i < fqid_max; i++) { + fq.fqid = i; + memset(&fqd, 0, sizeof(struct qm_fqd)); + ret = qman_query_fq(&fq, &fqd); + if (ret) + return ret; + if (fqd.ics_cred > 0) { + add_to_line_buffer(&line_buf, fq.fqid, file); + fq_cnt++; + } + } + flush_line_buffer(&line_buf, file); + + seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt); + return 0; +} + +static int qman_fqd_cred_open(struct inode *inode, struct file *file) +{ + return single_open(file, qman_fqd_cred_show, NULL); +} + +static const struct file_operations qman_fqd_cred_fops = { + .owner = THIS_MODULE, + .open = qman_fqd_cred_open, + .read = seq_read, +}; + +/* helper macros used in qman_debugfs_module_init */ +#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \ + do { \ + d = debugfs_create_file(name, \ + mode, parent, \ + data, \ + fops); \ + if (d == NULL) { \ + ret = -ENOMEM; \ + goto _return; \ + } \ + } while (0) + +/* dfs_root as parent */ +#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \ + QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops) + +/* fqd_root as parent */ +#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \ + QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops) + +/* fqd state */ +#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \ + QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \ + (void *)&mask_filter[index], &qman_fqd_ctrl_fops) + +static int __init qman_debugfs_module_init(void) +{ + int ret = 0; + struct dentry *d, *fqd_root; + u32 reg; + + fqid_max = 0; + init_ccsrmempeek(); + if (qman_ccsr_start) { + if (!qman_ccsrmempeek(®, QM_FQD_AR)) { + /* extract the size of the FQD window */ + reg = reg & 0x3f; + /* calculate valid frame queue descriptor range */ + fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE; + } + } + dfs_root = debugfs_create_dir("qman", NULL); + fqd_root = debugfs_create_dir("fqd", dfs_root); + if (dfs_root == NULL || fqd_root == NULL) { + ret = -ENOMEM; + pr_err("Cannot create qman/fqd debugfs dir\n"); + goto _return; + } + if (fqid_max) { + QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO, + NULL, &qman_ccsrmempeek_fops); + } + QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO, + &query_fq_np_fields_data, &query_fq_np_fields_fops); + + QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO, + &query_fq_fields_data, &query_fq_fields_fops); + + QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO, + &query_wq_lengths_data, &query_wq_lengths_fops); + + QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO, + &query_cgr_data, &query_cgr_fops); + + QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO, + NULL, &query_congestion_fops); + + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO, + NULL, &testwrite_cgr_fops); + + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO, + NULL, &teswrite_cgr_cgrid_fops); + + QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO, + NULL, &teswrite_cgr_ibcnt_fops); + + /* Create files with fqd_root as parent */ + + QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_PARKED], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO, + (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE], + &qman_fqd_state_fops); + + QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17); + + QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16); + + QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15); + + QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14); + + QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13); + + QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12); + + QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11); + + QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10); + + QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9); + + QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8); + + QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7); + + QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6); + + QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5); + + QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4); + + QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3); + + QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2); + + QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1); + + QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0); + + QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO, + NULL, &qman_fqd_summary_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO, + NULL, &qman_fqd_dest_wq_fops); + + QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO, + NULL, &qman_fqd_cred_fops); + + return 0; + +_return: + if (dfs_root) + debugfs_remove_recursive(dfs_root); + return ret; +} + +static void __exit qman_debugfs_module_exit(void) +{ + debugfs_remove_recursive(dfs_root); +} + +module_init(qman_debugfs_module_init); +module_exit(qman_debugfs_module_exit); +MODULE_LICENSE("Dual BSD/GPL"); + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_driver.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_driver.c @@ -0,0 +1,544 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "qman_private.h" + +/* Global variable containing revision id (even on non-control plane systems + * where CCSR isn't available) */ +u16 qman_ip_rev; +EXPORT_SYMBOL(qman_ip_rev); + +/* size of the fqd region in bytes */ +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP +static u32 fqd_size = (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ); +#endif + +/*****************/ +/* Portal driver */ +/*****************/ + +static struct dpa_uio_class qman_uio = { + .list = LIST_HEAD_INIT(qman_uio.list), + .dev_prefix = "qman-uio-" +}; +const struct dpa_uio_class *dpa_uio_qman(void) +{ + return &qman_uio; +} +EXPORT_SYMBOL(dpa_uio_qman); + +#ifdef CONFIG_FSL_QMAN_NULL_FQ_DEMUX +/* Handlers for NULL portal callbacks (ie. where the contextB field, normally + * pointing to the corresponding FQ object, is NULL). */ +static enum qman_cb_dqrr_result null_cb_dqrr(struct qman_portal *qm, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + pr_warning("Ignoring unowned DQRR frame on portal %p.\n", qm); + return qman_cb_dqrr_consume; +} +static void null_cb_mr(struct qman_portal *qm, struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + pr_warning("Ignoring unowned MR msg on portal %p, verb 0x%02x.\n", + qm, msg->verb); +} +static const struct qman_fq_cb null_cb = { + .dqrr = null_cb_dqrr, + .ern = null_cb_mr, + .dc_ern = null_cb_mr, + .fqs = null_cb_mr +}; +#endif + +#ifdef CONFIG_FSL_QMAN_PORTAL +/* This structure carries parameters from the device-tree handling code that + * wants to set up a portal for use on 1 or more CPUs, and each temporary thread + * created to run on those CPUs. The 'portal' member is the return value. */ +struct affine_portal_data { + struct completion done; + const struct qm_portal_config *pconfig; + struct qman_portal *redirect; + int recovery_mode; + struct qman_portal *portal; +}; + +/* This function is called in a temporary thread for each CPU, to initialise the + * "affine" portal that the CPU should use. The thread is created and run from + * the init_affine_portal() bootstrapper. If the CPU has not been assigned its + * own portal, "redirect" will be non-NULL indicating it should share another + * CPU's portal (it becomes a "slave"). */ +static __init int thread_init_affine_portal(void *__data) +{ + struct affine_portal_data *data = __data; + const struct qm_portal_config *pconfig = data->pconfig; + if (data->redirect) + data->portal = qman_create_affine_slave(data->redirect); + else { + /* TODO: cgrs ?? */ + data->portal = qman_create_affine_portal(pconfig, NULL, +#ifdef CONFIG_FSL_QMAN_NULL_FQ_DEMUX + &null_cb, +#endif + data->recovery_mode); + if (data->portal) { + u32 irq_sources = 0; + /* default: enable all (available) pool channels */ + qman_static_dequeue_add(~0); + /* Determine what should be interrupt-vs-poll driven */ +#ifdef CONFIG_FSL_DPA_PIRQ_SLOW + irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | + QM_PIRQ_MRI | QM_PIRQ_CSCI; +#endif +#ifdef CONFIG_FSL_DPA_PIRQ_FAST + irq_sources |= QM_PIRQ_DQRI; +#endif + qman_irqsource_add(irq_sources); + } + } + complete(&data->done); + return 0; +} + +/* This function is just a bootstrap for running thread_init_affine_portal() on + * a given CPU. The parameters are passed in via the (void*) thread-arg (and + * results are received back) using the affine_portal_data struct. */ +static __init struct qman_portal *init_affine_portal( + const struct qm_portal_config *pconfig, + int cpu, struct qman_portal *redirect, + int recovery_mode) +{ + struct affine_portal_data data = { + .done = COMPLETION_INITIALIZER_ONSTACK(data.done), + .pconfig = pconfig, + .redirect = redirect, + .recovery_mode = recovery_mode, + .portal = NULL + }; + struct task_struct *k = kthread_create(thread_init_affine_portal, &data, + "qman_affine%d", cpu); + int ret; + if (IS_ERR(k)) { + pr_err("Failed to init %sQman affine portal for cpu %d\n", + redirect ? "(slave) " : "", cpu); + return NULL; + } + kthread_bind(k, cpu); + wake_up_process(k); + wait_for_completion(&data.done); + ret = kthread_stop(k); + if (ret) { + pr_err("Qman portal initialisation failed, cpu %d, code %d\n", + cpu, ret); + return NULL; + } + if (data.portal) + pr_info("Qman portal %sinitialised, cpu %d\n", + redirect ? "(slave) " : + pconfig->public_cfg.is_shared ? "(shared) " : "", cpu); + return data.portal; +} +#endif + +/* Parses the device-tree node, extracts the configuration, and if appropriate + * initialises the portal for use on one or more CPUs. */ +static __init struct qm_portal_config *fsl_qman_portal_init( + struct device_node *node) +{ + struct qm_portal_config *pcfg; + const u32 *index, *channel; + const phandle *ph; + struct device_node *tmp_node; + int irq, ret, numpools; + u16 ip_rev = 0; + + pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL); + if (!pcfg) { + pr_err("can't allocate portal config"); + return NULL; + } + + if (of_device_is_compatible(node, "fsl,qman-portal-1.0")) + ip_rev = QMAN_REV10; + else if (of_device_is_compatible(node, "fsl,qman-portal-1.1")) + ip_rev = QMAN_REV11; + else if (of_device_is_compatible(node, "fsl,qman-portal-1.2")) + ip_rev = QMAN_REV12; + else if (of_device_is_compatible(node, "fsl,qman-portal-2.0")) + ip_rev = QMAN_REV20; + + if (!qman_ip_rev) { + if (ip_rev) + qman_ip_rev = ip_rev; + else { + pr_warning("unknown Qman version, default to rev1.1\n"); + qman_ip_rev = QMAN_REV11; + } + } else if (ip_rev && (qman_ip_rev != ip_rev)) + pr_warning("Revision=0x%04x, but portal '%s' has 0x%04x\n", + qman_ip_rev, node->full_name, ip_rev); + + ret = of_address_to_resource(node, QM_ADDR_CE, + &pcfg->addr_phys[QM_ADDR_CE]); + if (ret) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "reg::CE"); + goto err; + } + ret = of_address_to_resource(node, QM_ADDR_CI, + &pcfg->addr_phys[QM_ADDR_CI]); + if (ret) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "reg::CI"); + goto err; + } + index = of_get_property(node, "cell-index", &ret); + if (!index || (ret != 4)) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "cell-index"); + goto err; + } + channel = of_get_property(node, "fsl,qman-channel-id", &ret); + if (!channel || (ret != 4)) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "fsl,qman-channel-id"); + goto err; + } + if (*channel != (*index + qm_channel_swportal0)) + pr_err("Warning: node %s has mismatched %s and %s\n", + node->full_name, "cell-index", "fsl,qman-channel-id"); + pcfg->public_cfg.channel = *channel; + /* Parse cpu associations for this portal. This involves dereferencing + * to the cpu device-tree nodes, but it also ensures we only try to work + * with CPUs that exist. (Eg. under a hypervisor.) */ + ph = of_get_property(node, "cpu-handle", &ret); + if (ph) { + if (ret != sizeof(phandle)) { + pr_err("Malformed %s property '%s'\n", node->full_name, + "cpu-handle"); + return NULL; + } + ret = check_cpu_phandle(*ph); + if (ret < 0) + return NULL; + pcfg->public_cfg.cpu = ret; + } else + pcfg->public_cfg.cpu = -1; + + ph = of_get_property(node, "fsl,qman-pool-channels", &ret); + if (ph && (ret % sizeof(phandle))) { + pr_err("Malformed %s property '%s'\n", node->full_name, + "fsl,qman-pool-channels"); + goto err; + } + numpools = ph ? (ret / sizeof(phandle)) : 0; + irq = irq_of_parse_and_map(node, 0); + if (irq == NO_IRQ) { + pr_err("Can't get %s property '%s'\n", node->full_name, + "interrupts"); + goto err; + } + pcfg->public_cfg.irq = irq; + pcfg->public_cfg.index = *index; + if (of_get_property(node, "fsl,hv-dma-handle", &ret)) + pcfg->public_cfg.has_stashing = 1; + else if (qman_ip_rev == QMAN_REV20) + pcfg->public_cfg.has_stashing = 1; + else +#ifdef CONFIG_FSL_PAMU + pcfg->public_cfg.has_stashing = 1; +#else + pcfg->public_cfg.has_stashing = 0; +#endif + pcfg->public_cfg.pools = 0; + pcfg->node = node; +#ifdef CONFIG_FSL_QMAN_CONFIG + /* We need the same LIODN offset for all portals */ + qman_liodn_fixup(pcfg->public_cfg.channel); +#endif + + if (of_get_property(node, "fsl,usdpaa-portal", &ret)) { + struct dpa_uio_portal *u = kmalloc(sizeof(*u), GFP_KERNEL); + if (!u) + goto err; + u->type = dpa_uio_portal_qman; + u->qm_cfg = pcfg; + list_add_tail(&u->node, &qman_uio.list); + /* Return NULL, otherwise the kernel may share it on CPUs that + * don't have their own portals, which would be ... *bad*. */ + return NULL; + } + + /* Map the portals now we know they aren't for UIO (the UIO code doesn't + * need the CE mapping, and so will do its own CI-only mapping). */ + pcfg->addr_virt[QM_ADDR_CE] = ioremap_prot( + pcfg->addr_phys[QM_ADDR_CE].start, + resource_size(&pcfg->addr_phys[QM_ADDR_CE]), + 0); + pcfg->addr_virt[QM_ADDR_CI] = ioremap_prot( + pcfg->addr_phys[QM_ADDR_CI].start, + resource_size(&pcfg->addr_phys[QM_ADDR_CI]), + _PAGE_GUARDED | _PAGE_NO_CACHE); + + while (numpools--) { + for_each_compatible_node(tmp_node, NULL, + "fsl,qman-pool-channel") { + phandle *lph = (phandle *)of_get_property(tmp_node, + "linux,phandle", &ret); + if (*lph == *ph) { + u32 *index = (u32 *)of_get_property(tmp_node, + "cell-index", &ret); + pcfg->public_cfg.pools |= + QM_SDQCR_CHANNELS_POOL(*index); + } + } + ph++; + } + if (pcfg->public_cfg.pools == 0) + panic("Unrecoverable error linking pool channels"); + + return pcfg; +err: + kfree(pcfg); + return NULL; +} + +static void __init fsl_qman_portal_destroy(struct qm_portal_config *pcfg) +{ + iounmap(pcfg->addr_virt[QM_ADDR_CE]); + iounmap(pcfg->addr_virt[QM_ADDR_CI]); + kfree(pcfg); +} + +static __init int fsl_fqid_range_init(struct device_node *node, + int recovery_mode) +{ + int ret; + u32 *range = (u32 *)of_get_property(node, "fsl,fqid-range", &ret); + if (!range) { + pr_err("No 'fsl,fqid-range' property in node %s\n", + node->full_name); + return -EINVAL; + } + if (ret != 8) { + pr_err("'fsl,fqid-range' is not a 2-cell range in node %s\n", + node->full_name); + return -EINVAL; + } + qman_release_fqid_range(range[0], range[1]); +#ifdef CONFIG_FSL_QMAN_PORTAL + /* If in recovery mode *and* we are using a private FQ allocation range, + * then automatically clean up all FQs in that range so we can + * automatically exit recovery mode too. */ + if (recovery_mode) { + u32 fqid; + for (fqid = range[0]; fqid < (range[0] + range[1]); fqid++) { + ret = qman_recovery_cleanup_fq(fqid); + if (ret) { + pr_err("Failed to recovery FQID %d\n", fqid); + return ret; + } + } + } +#else + BUG_ON(recovery_mode); +#endif + pr_info("Qman: FQID allocator includes range %d:%d%s\n", + range[0], range[1], recovery_mode ? " (recovered)" : ""); + return 0; +} + +#ifdef CONFIG_FSL_QMAN_PORTAL +static __init int __leave_recovery(void *__data) +{ + struct completion *done = __data; + qman_recovery_exit_local(); + complete(done); + return 0; +} + +int qman_recovery_exit(void) +{ + struct completion done = COMPLETION_INITIALIZER_ONSTACK(done); + unsigned int cpu; + + for_each_cpu(cpu, qman_affine_cpus()) { + struct task_struct *k = kthread_create(__leave_recovery, &done, + "qman_recovery"); + int ret; + if (IS_ERR(k)) { + pr_err("Thread failure (recovery) on cpu %d\n", cpu); + return -ENOMEM; + } + kthread_bind(k, cpu); + wake_up_process(k); + wait_for_completion(&done); + ret = kthread_stop(k); + if (ret) { + pr_err("Failed to exit recovery on cpu %d\n", cpu); + return ret; + } + pr_info("Qman portal exited recovery, cpu %d\n", cpu); + } + return 0; +} +EXPORT_SYMBOL(qman_recovery_exit); +#endif + +/***************/ +/* Driver load */ +/***************/ + +static __init int qman_init(void) +{ +#ifdef CONFIG_FSL_QMAN_PORTAL + struct qman_cgr cgr; + struct cpumask primary_cpus = *cpu_none_mask; + struct cpumask slave_cpus = *cpu_online_mask; + struct qman_portal *sharing_portal = NULL; + int sharing_cpu = -1; +#endif + struct device_node *dn; + struct qm_portal_config *pcfg; + int ret, use_bpid0 = 1, recovery_mode = 0; + LIST_HEAD(cfg_list); + + for_each_compatible_node(dn, NULL, "fsl,qman") { + if (!qman_init_error_int(dn)) + pr_info("Qman err interrupt handler present\n"); + else + pr_err("Qman err interrupt handler missing\n"); + } +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + ret = qman_setup_fq_lookup_table(fqd_size/64); + if (ret) + return ret; +#endif +#ifdef CONFIG_FSL_QMAN_PORTAL + if (fsl_dpa_should_recover()) + recovery_mode = 1; + for_each_compatible_node(dn, NULL, "fsl,qman-portal") { + pcfg = fsl_qman_portal_init(dn); + if (pcfg) { + if (pcfg->public_cfg.cpu >= 0) { + cpumask_set_cpu(pcfg->public_cfg.cpu, + &primary_cpus); + list_add(&pcfg->list, &cfg_list); + } else + fsl_qman_portal_destroy(pcfg); + } + } + /* only consider "online" CPUs */ + cpumask_and(&primary_cpus, &primary_cpus, cpu_online_mask); + if (cpumask_empty(&primary_cpus)) + /* No portals, we're done */ + return 0; + if (!cpumask_subset(cpu_online_mask, &primary_cpus)) { + /* Need to do some sharing. In lieu of anything more scientific + * (or configurable), we pick the last-most CPU that has a + * portal and share that one. */ + int next = cpumask_first(&primary_cpus); + while (next < nr_cpu_ids) { + sharing_cpu = next; + next = cpumask_next(next, &primary_cpus); + } + } + /* Parsing is done and sharing decisions are made, now initialise the + * portals and determine which "slave" CPUs are left over. */ + list_for_each_entry(pcfg, &cfg_list, list) { + struct qman_portal *p; + int is_shared = (!sharing_portal && (sharing_cpu >= 0) && + (pcfg->public_cfg.cpu == sharing_cpu)); + pcfg->public_cfg.is_shared = is_shared; + /* If it's not mapped to a CPU, or another portal is already + * initialised to the same CPU, skip this portal. */ + if (pcfg->public_cfg.cpu < 0 || !cpumask_test_cpu( + pcfg->public_cfg.cpu, &slave_cpus)) + continue; + p = init_affine_portal(pcfg, pcfg->public_cfg.cpu, NULL, + recovery_mode); + if (p) { + if (is_shared) + sharing_portal = p; + cpumask_clear_cpu(pcfg->public_cfg.cpu, &slave_cpus); + } + } + if (sharing_portal) { + int loop; + for_each_cpu(loop, &slave_cpus) { + struct qman_portal *p = init_affine_portal(NULL, loop, + sharing_portal, recovery_mode); + if (!p) + pr_err("Failed slave Qman portal for cpu %d\n", + loop); + } + } +#else + for_each_compatible_node(dn, NULL, "fsl,qman-portal") { + pcfg = fsl_qman_portal_init(dn); + if (pcfg) + /* No kernel portal support, so if USDPAA didn't consume + * the portal, we've no other use for it. */ + fsl_qman_portal_destroy(pcfg); + } +#endif + for_each_compatible_node(dn, NULL, "fsl,fqid-range") { + use_bpid0 = 0; + ret = fsl_fqid_range_init(dn, recovery_mode); + if (ret) + return ret; + } +#ifdef CONFIG_FSL_QMAN_PORTAL + /* If using private FQ allocation, exit recovery mode automatically (ie. + * after automatic recovery) */ + if (recovery_mode && !use_bpid0) { + ret = qman_recovery_exit(); + if (ret) + return ret; + } + for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) { + /* This is to ensure h/w-internal CGR memory is zeroed out. Note + * that we do this for all conceivable CGRIDs, not all of which + * are necessarily available on the underlying hardware version. + * We ignore any errors for this reason. */ + qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL); + } +#endif + ret = fqalloc_init(use_bpid0); + if (ret) + return ret; + pr_info("Qman portals initialised\n"); + return 0; +} +subsys_initcall(qman_init); --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_fqalloc.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_fqalloc.c @@ -0,0 +1,264 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_private.h" + +#include + +/****************/ +/* FQ allocator */ +/****************/ + +/* Global flag: use BPID==0 (fq_pool), or use the range-allocator? */ +static int use_bman; + +#ifdef CONFIG_FSL_BMAN_PORTAL +static struct bman_pool *fq_pool; +static const struct bman_pool_params fq_pool_params; +#endif + +__init int fqalloc_init(int __use_bman) +{ + use_bman = __use_bman; +#ifdef CONFIG_FSL_BMAN_PORTAL + if (use_bman) { + fq_pool = bman_new_pool(&fq_pool_params); + if (!fq_pool) + return -ENOMEM; + } +#else + BUG_ON(use_bman); +#endif + return 0; +} + +u32 qm_fq_new(void) +{ +#ifdef CONFIG_FSL_BMAN_PORTAL + struct bm_buffer buf; + int ret; +#endif + + if (!use_bman) { + u32 result; + if (qman_alloc_fqid(&result) < 0) + return 0; + return result; + } +#ifdef CONFIG_FSL_BMAN_PORTAL + BUG_ON(!fq_pool); + ret = bman_acquire(fq_pool, &buf, 1, 0); + if (ret != 1) + return 0; + return (u32)bm_buffer_get64(&buf); +#else + BUG(); +#endif +} +EXPORT_SYMBOL(qm_fq_new); + +int qm_fq_free_flags(u32 fqid, __maybe_unused u32 flags) +{ +#ifdef CONFIG_FSL_BMAN_PORTAL + struct bm_buffer buf; + u32 bflags = 0; + int ret; + bm_buffer_set64(&buf, fqid); +#endif + + if (!use_bman) { + qman_release_fqid(fqid); + return 0; + } +#ifdef CONFIG_FSL_BMAN_PORTAL +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QM_FQ_FREE_WAIT) { + bflags |= BMAN_RELEASE_FLAG_WAIT; + if (flags & BMAN_RELEASE_FLAG_WAIT_INT) + bflags |= BMAN_RELEASE_FLAG_WAIT_INT; + if (flags & BMAN_RELEASE_FLAG_WAIT_SYNC) + bflags |= BMAN_RELEASE_FLAG_WAIT_SYNC; + } +#endif + ret = bman_release(fq_pool, &buf, 1, bflags); + return ret; +#else + BUG(); +#endif +} +EXPORT_SYMBOL(qm_fq_free_flags); + +/* Global state for the allocator */ +static DEFINE_SPINLOCK(alloc_lock); +static LIST_HEAD(alloc_list); + +/* The allocator is a (possibly-empty) list of these; */ +struct alloc_node { + struct list_head list; + u32 base; + u32 num; +}; + +/* #define FQRANGE_DEBUG */ + +#ifdef FQRANGE_DEBUG +#define DPRINT pr_info +static void DUMP(void) +{ + int off = 0; + char buf[256]; + struct alloc_node *p; + list_for_each_entry(p, &alloc_list, list) { + if (off < 255) + off += snprintf(buf + off, 255-off, "{%d,%d}", + p->base, p->base + p->num - 1); + } + pr_info("%s\n", buf); +} +#else +#define DPRINT(x...) do { ; } while(0) +#define DUMP() do { ; } while(0) +#endif + +int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial) +{ + struct alloc_node *i = NULL, *next_best = NULL; + u32 base, next_best_base = 0, num = 0, next_best_num = 0; + struct alloc_node *margin_left, *margin_right; + + *result = (u32)-1; + DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial); + DUMP(); + /* If 'align' is 0, it should behave as though it was 1 */ + if (!align) + align = 1; + margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL); + if (!margin_left) + goto err; + margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL); + if (!margin_right) { + kfree(margin_left); + goto err; + } + spin_lock_irq(&alloc_lock); + list_for_each_entry(i, &alloc_list, list) { + base = (i->base + align - 1) / align; + base *= align; + if ((base - i->base) >= i->num) + /* alignment is impossible, regardless of count */ + continue; + num = i->num - (base - i->base); + if (num >= count) { + /* this one will do nicely */ + num = count; + goto done; + } + if (num > next_best_num) { + next_best = i; + next_best_base = base; + next_best_num = num; + } + } + if (partial && next_best) { + i = next_best; + base = next_best_base; + num = next_best_num; + } else + i = NULL; +done: + if (i) { + if (base != i->base) { + margin_left->base = i->base; + margin_left->num = base - i->base; + list_add_tail(&margin_left->list, &i->list); + } else + kfree(margin_left); + if ((base + num) < (i->base + i->num)) { + margin_right->base = base + num; + margin_right->num = (i->base + i->num) - + (base + num); + list_add(&margin_right->list, &i->list); + } else + kfree(margin_right); + list_del(&i->list); + kfree(i); + *result = base; + } + spin_unlock_irq(&alloc_lock); +err: + DPRINT("returning %d\n", i ? num : -ENOMEM); + DUMP(); + return i ? (int)num : -ENOMEM; +} +EXPORT_SYMBOL(qman_alloc_fqid_range); + +void qman_release_fqid_range(u32 fqid, u32 count) +{ + struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_KERNEL); + DPRINT("release_range(%d,%d)\n", fqid, count); + DUMP(); + spin_lock_irq(&alloc_lock); + node->base = fqid; + node->num = count; + list_for_each_entry(i, &alloc_list, list) { + if (i->base >= node->base) { + list_add_tail(&node->list, &i->list); + goto done; + } + } + list_add_tail(&node->list, &alloc_list); +done: + /* Merge to the left */ + i = list_entry(node->list.prev, struct alloc_node, list); + if (node->list.prev != &alloc_list) { + BUG_ON((i->base + i->num) > node->base); + if ((i->base + i->num) == node->base) { + node->base = i->base; + node->num += i->num; + list_del(&i->list); + kfree(i); + } + } + /* Merge to the right */ + i = list_entry(node->list.next, struct alloc_node, list); + if (node->list.next != &alloc_list) { + BUG_ON((node->base + node->num) > i->base); + if ((node->base + node->num) == i->base) { + node->num += i->num; + list_del(&i->list); + kfree(i); + } + } + spin_unlock_irq(&alloc_lock); + DUMP(); +} +EXPORT_SYMBOL(qman_release_fqid_range); + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_high.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_high.c @@ -0,0 +1,2360 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_low.h" + +/* Compilation constants */ +#define DQRR_MAXFILL 15 +#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ +#define IRQNAME "QMan portal %d" +#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ + +/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about + * inter-processor locking only. Note, FQLOCK() is always called either under a + * local_irq_save() or from interrupt context - hence there's no need for irq + * protection (and indeed, attempting to nest irq-protection doesn't work, as + * the "irq en/disable" machinery isn't recursive...). */ +#define FQLOCK(fq) \ + do { \ + struct qman_fq *__fq478 = (fq); \ + if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ + spin_lock(&__fq478->fqlock); \ + } while(0) +#define FQUNLOCK(fq) \ + do { \ + struct qman_fq *__fq478 = (fq); \ + if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \ + spin_unlock(&__fq478->fqlock); \ + } while(0) + +static inline void fq_set(struct qman_fq *fq, u32 mask) +{ + set_bits(mask, &fq->flags); +} +static inline void fq_clear(struct qman_fq *fq, u32 mask) +{ + clear_bits(mask, &fq->flags); +} +static inline int fq_isset(struct qman_fq *fq, u32 mask) +{ + return fq->flags & mask; +} +static inline int fq_isclear(struct qman_fq *fq, u32 mask) +{ + return !(fq->flags & mask); +} + +#define PORTAL_BITS_CI_PREFETCH 0x00020000 /* EQCR::CI prefetched */ +#define PORTAL_BITS_RECOVERY 0x00040000 /* recovery mode */ + +struct qman_portal { + struct qm_portal p; + unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */ + unsigned long irq_sources; + u32 slowpoll; /* only used when interrupts are off */ + struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */ +#endif +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + spinlock_t sharing_lock; /* only used if is_shared */ + int is_shared; + struct qman_portal *sharing_redirect; +#endif + u32 sdqcr; + int dqrr_disable_ref; +#ifdef CONFIG_FSL_QMAN_NULL_FQ_DEMUX + /* If we receive a DQRR or MR ring entry for a "null" FQ, ie. for which + * FQD::contextB is NULL rather than pointing to a FQ object, we use + * these handlers. (This is not considered a fast-path mechanism.) */ + struct qman_fq_cb null_cb; +#endif + /* When the cpu-affine portal is activated, this is non-NULL */ + const struct qm_portal_config *config; + /* This is needed for providing a non-NULL device to dma_map_***() */ + struct platform_device *pdev; + struct dpa_rbtree retire_table; + char irqname[MAX_IRQNAME]; + /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ + struct qman_cgrs *cgrs; + /* 256-element array, each is a linked-list of CSCN handlers. */ + struct list_head cgr_cbs[256]; + /* list lock */ + spinlock_t cgr_lock; +}; + +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +#define PORTAL_IRQ_LOCK(p, irqflags) \ + do { \ + if ((p)->is_shared) \ + spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ + else \ + local_irq_save(irqflags); \ + } while (0) +#define PORTAL_IRQ_UNLOCK(p, irqflags) \ + do { \ + if ((p)->is_shared) \ + spin_unlock_irqrestore(&(p)->sharing_lock, irqflags); \ + else \ + local_irq_restore(irqflags); \ + } while (0) +#else +#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) +#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) +#endif + +static cpumask_t affine_mask; +static DEFINE_SPINLOCK(affine_mask_lock); +static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); +/* "raw" gets the cpu-local struct whether it's a redirect or not. */ +static inline struct qman_portal *get_raw_affine_portal(void) +{ + return &get_cpu_var(qman_affine_portal); +} +/* For ops that can redirect, this obtains the portal to use */ +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +static inline struct qman_portal *get_affine_portal(void) +{ + struct qman_portal *p = get_raw_affine_portal(); + if (p->sharing_redirect) + return p->sharing_redirect; + return p; +} +#else +#define get_affine_portal() get_raw_affine_portal() +#endif +/* For every "get", there must be a "put" */ +static inline void put_affine_portal(void) +{ + put_cpu_var(qman_affine_portal); +} + +/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux + * retirement notifications (the fact they are sometimes h/w-consumed means that + * contextB isn't always a s/w demux - and as we can't know which case it is + * when looking at the notification, we have to use the slow lookup for all of + * them). NB, it's possible to have multiple FQ objects refer to the same FQID + * (though at most one of them should be the consumer), so this table isn't for + * all FQs - FQs are added when retirement commands are issued, and removed when + * they complete, which also massively reduces the size of this table. */ +IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid); + +/* This is what everything can wait on, even if it migrates to a different cpu + * to the one whose affine portal it is waiting on. */ +static DECLARE_WAIT_QUEUE_HEAD(affine_queue); + +static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq) +{ + int ret = fqtree_push(&p->retire_table, fq); + if (ret) + pr_err("ERROR: double FQ-retirement %d\n", fq->fqid); + return ret; +} + +static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq) +{ + fqtree_del(&p->retire_table, fq); +} + +static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid) +{ + return fqtree_find(&p->retire_table, fqid); +} + +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP +static void **qman_fq_lookup_table; +static size_t qman_fq_lookup_table_size; + +int qman_setup_fq_lookup_table(size_t num_entries) +{ + num_entries++; + /* Allocate 1 more entry since the first entry is not used */ + qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *))); + if (!qman_fq_lookup_table) { + pr_err("QMan: Could not allocate fq lookup table\n"); + return -ENOMEM; + } + memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *)); + qman_fq_lookup_table_size = num_entries; + pr_info("QMan: Allocated lookup table at %p, entry count %lu\n", + qman_fq_lookup_table, + (unsigned long)qman_fq_lookup_table_size); + return 0; +} + +/* global structure that maintains fq object mapping */ +static DEFINE_SPINLOCK(fq_hash_table_lock); + +static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq) +{ + u32 i; + + spin_lock(&fq_hash_table_lock); + /* Can't use index zero because this has special meaning + * in context_b field. */ + for (i = 1; i < qman_fq_lookup_table_size; i++) { + if (qman_fq_lookup_table[i] == NULL) { + *entry = i; + qman_fq_lookup_table[i] = fq; + spin_unlock(&fq_hash_table_lock); + return 0; + } + } + spin_unlock(&fq_hash_table_lock); + return -ENOMEM; +} + +static void clear_fq_table_entry(u32 entry) +{ + spin_lock(&fq_hash_table_lock); + BUG_ON(entry >= qman_fq_lookup_table_size); + qman_fq_lookup_table[entry] = NULL; + spin_unlock(&fq_hash_table_lock); +} + +static inline struct qman_fq *get_fq_table_entry(u32 entry) +{ + BUG_ON(entry >= qman_fq_lookup_table_size); + return qman_fq_lookup_table[entry]; +} +#endif + +/* In the case that slow- and fast-path handling are both done by qman_poll() + * (ie. because there is no interrupt handling), we ought to balance how often + * we do the fast-path poll versus the slow-path poll. We'll use two decrementer + * sources, so we call the fast poll 'n' times before calling the slow poll + * once. The idle decrementer constant is used when the last slow-poll detected + * no work to do, and the busy decrementer constant when the last slow-poll had + * work to do. */ +#define SLOW_POLL_IDLE 1000 +#define SLOW_POLL_BUSY 10 +static u32 __poll_portal_slow(struct qman_portal *p, u32 is); +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit); + +#ifdef CONFIG_FSL_DPA_HAVE_IRQ +/* Portal interrupt handler */ +static irqreturn_t portal_isr(__always_unused int irq, void *ptr) +{ + struct qman_portal *p = ptr; + u32 clear = QM_DQAVAIL_MASK | p->irq_sources; + u32 is = qm_isr_status_read(&p->p) & p->irq_sources; + /* DQRR-handling if it's interrupt-driven */ + if (is & QM_PIRQ_DQRI) + __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT); + /* Handling of anything else that's interrupt-driven */ + clear |= __poll_portal_slow(p, is); + qm_isr_status_clear(&p->p, clear); + return IRQ_HANDLED; +} +#endif + +/* This inner version is used privately by qman_create_affine_portal(), as well + * as by the exported qman_stop_dequeues(). */ +static inline void qman_stop_dequeues_ex(struct qman_portal *p) +{ + unsigned long irqflags __maybe_unused; + PORTAL_IRQ_LOCK(p, irqflags); + if (!(p->dqrr_disable_ref++)) + qm_dqrr_set_maxfill(&p->p, 0); + PORTAL_IRQ_UNLOCK(p, irqflags); +} + +static int drain_mr_fqrni(struct qm_portal *p) +{ + const struct qm_mr_entry *msg; +loop: + msg = qm_mr_current(p); + if (!msg) { + /* if MR was full and h/w had other FQRNI entries to produce, we + * need to allow it time to produce those entries once the + * existing entries are consumed. A worst-case situation + * (fully-loaded system) means h/w sequencers may have to do 3-4 + * other things before servicing the portal's MR pump, each of + * which (if slow) may take ~50 qman cycles (which is ~200 + * processor cycles). So rounding up and then multiplying this + * worst-case estimate by a factor of 10, just to be + * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume + * one entry at a time, so h/w has an opportunity to produce new + * entries well before the ring has been fully consumed, so + * we're being *really* paranoid here. */ + u64 now, then = mfatb(); + do { + now = mfatb(); + } while ((then + 10000) > now); + msg = qm_mr_current(p); + if (!msg) + return 0; + } + if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) + /* We aren't draining anything but FQRNIs */ + return -1; + qm_mr_next(p); + qm_mr_cci_consume(p, 1); + goto loop; +} + +/* this is called from qman_create_affine_portal() if not initialising in + * recovery mode, otherwise from qman_recovery_exit_local() after recovery is + * done. */ +static void post_recovery(struct qman_portal *p __always_unused, + const struct qm_portal_config *config) +{ + struct device_node *tmp_node, *node = config->node; + /* Enable DMA on portal LIODNs (stashing) and those of its sub-nodes + * (Fman TX and SEC/PME accelerators, where available). */ + if (pamu_enable_liodn(node, -1)) + /* If there's a PAMU problem, best to continue anyway and let + * the corresponding traffic hit whatever problems it will hit, + * than to fail portal initialisation and trigger a crash in + * dependent code that has no relationship to the PAMU issue. */ + pr_err("Failed to enable portal LIODN %s\n", + node->full_name); + for_each_child_of_node(node, tmp_node) + if (pamu_enable_liodn(tmp_node, -1)) + pr_err("Failed to enable portal LIODN %s\n", + tmp_node->full_name); +} + +struct qman_portal *qman_create_affine_portal( + const struct qm_portal_config *config, + const struct qman_cgrs *cgrs, + const struct qman_fq_cb *null_cb, + int recovery_mode) +{ + struct qman_portal *portal = get_raw_affine_portal(); + struct qm_portal *__p = &portal->p; + char buf[16]; + int ret; + u32 isdr; + + /* A criteria for calling this function (from qman_driver.c) is that + * we're already affine to the cpu and won't schedule onto another cpu. + * This means we can put_affine_portal() and yet continue to use + * "portal", which in turn means aspects of this routine can sleep. */ + put_affine_portal(); +#ifndef CONFIG_FSL_QMAN_NULL_FQ_DEMUX + if (null_cb) { + pr_err("Driver does not support 'NULL FQ' callbacks\n"); + return NULL; + } +#endif + /* prep the low-level portal struct with the mapped addresses from the + * config, everything that follows depends on it and "config" is more + * for (de)reference... */ + __p->addr.addr_ce = config->addr_virt[QM_ADDR_CE]; + __p->addr.addr_ci = config->addr_virt[QM_ADDR_CI]; + if (qm_eqcr_init(__p, qm_eqcr_pvb, qm_eqcr_cce)) { + pr_err("Qman EQCR initialisation failed\n"); + goto fail_eqcr; + } +#ifdef CONFIG_FSL_QMAN_PORTAL_DISABLEAUTO_DCA +#define QM_DQRR_CMODE qm_dqrr_cci +#else +#define QM_DQRR_CMODE qm_dqrr_cdc +#endif + /* for recovery mode, don't enable stashing yet */ + if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb, + recovery_mode ? qm_dqrr_cci : QM_DQRR_CMODE, + DQRR_MAXFILL, recovery_mode)) { + pr_err("Qman DQRR initialisation failed\n"); + goto fail_dqrr; + } + if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) { + pr_err("Qman MR initialisation failed\n"); + goto fail_mr; + } + /* for recovery mode, quiesce SDQCR/VDQCR and drain DQRR+MR until h/w + * wraps up anything it was doing (5ms is ample idle time). */ + if (recovery_mode) { + const struct qm_dqrr_entry *dq; + const struct qm_mr_entry *msg; + int idle = 0; + /* quiesce SDQCR/VDQCR, then drain till h/w wraps up anything it + * was doing (5ms is more than enough to ensure it's done). */ + qm_dqrr_sdqcr_set(__p, 0); + qm_dqrr_vdqcr_set(__p, 0); +drain_loop: + qm_dqrr_pvb_update(__p, 0); + dq = qm_dqrr_current(__p); + qm_mr_pvb_update(__p); + msg = qm_mr_current(__p); + if (dq) { + pr_warning("DQRR recovery: dumping dqrr %02x:%02x for " + "FQID %d\n", dq->verb & QM_DQRR_VERB_MASK, + dq->stat, dq->fqid); + qm_dqrr_next(__p); + qm_dqrr_cci_consume(__p, 1); + } + if (msg) { + pr_warning("MR recovery: dumping msg 0x%02x for " + "FQID %d\n", msg->verb & QM_MR_VERB_TYPE_MASK, + msg->fq.fqid); + qm_mr_next(__p); + qm_mr_cci_consume(__p, 1); + } + if (!dq && !msg) { + if (++idle < 5) { + msleep(1); + goto drain_loop; + } + } else { + idle = 0; + goto drain_loop; + } + } + if (qm_mc_init(__p)) { + pr_err("Qman MC initialisation failed\n"); + goto fail_mc; + } + if (qm_isr_init(__p)) { + pr_err("Qman ISR initialisation failed\n"); + goto fail_isr; + } + /* static interrupt-gating controls */ + qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH); + qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH); + qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD); + portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); + if (!portal->cgrs) + goto fail_cgrs; + /* initial snapshot is no-depletion */ + qman_cgrs_init(&portal->cgrs[1]); + if (cgrs) + portal->cgrs[0] = *cgrs; + else + /* if the given mask is NULL, assume all CGRs can be seen */ + qman_cgrs_fill(&portal->cgrs[0]); + for (ret = 0; ret < __CGR_NUM; ret++) + INIT_LIST_HEAD(&portal->cgr_cbs[ret]); + spin_lock_init(&portal->cgr_lock); + portal->bits = recovery_mode ? PORTAL_BITS_RECOVERY : 0; + portal->slowpoll = 0; +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + portal->eqci_owned = NULL; +#endif +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + spin_lock_init(&portal->sharing_lock); + portal->is_shared = config->public_cfg.is_shared; + portal->sharing_redirect = NULL; +#endif + portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | + QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | + QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; + portal->dqrr_disable_ref = 0; +#ifdef CONFIG_FSL_QMAN_NULL_FQ_DEMUX + if (null_cb) + portal->null_cb = *null_cb; + else + memset(&portal->null_cb, 0, sizeof(*null_cb)); +#endif + sprintf(buf, "qportal-%d", config->public_cfg.channel); + portal->pdev = platform_device_alloc(buf, -1); + if (!portal->pdev) + goto fail_devalloc; + if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40))) + goto fail_devadd; + ret = platform_device_add(portal->pdev); + if (ret) + goto fail_devadd; + dpa_rbtree_init(&portal->retire_table); + isdr = 0xffffffff; + qm_isr_disable_write(__p, isdr); + portal->irq_sources = 0; + qm_isr_enable_write(__p, portal->irq_sources); + qm_isr_status_clear(__p, 0xffffffff); +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); + if (request_irq(config->public_cfg.irq, portal_isr, + IRQF_NOBALANCING | IRQF_DISABLED, portal->irqname, + portal)) { + pr_err("request_irq() failed\n"); + goto fail_irq; + } + if (config->public_cfg.cpu != -1) { + disable_irq(config->public_cfg.irq); + irq_set_affinity(config->public_cfg.irq, + cpumask_of(config->public_cfg.cpu)); + } + enable_irq(config->public_cfg.irq); + + if (recovery_mode) { + qm_isr_inhibit(__p); + } else { + post_recovery(portal, config); + qm_isr_uninhibit(__p); + } +#endif + /* Need EQCR to be empty before continuing */ + isdr ^= QM_PIRQ_EQCI; + qm_isr_disable_write(__p, isdr); + ret = qm_eqcr_get_fill(__p); + if (ret) { + pr_err("Qman EQCR unclean, need recovery\n"); + goto fail_eqcr_empty; + } + isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI); + qm_isr_disable_write(__p, isdr); + if (qm_dqrr_current(__p) != NULL) { + pr_err("Qman DQRR unclean, need recovery\n"); + goto fail_dqrr_mr_empty; + } + if (qm_mr_current(__p) != NULL) { + /* special handling, drain just in case it's a few FQRNIs */ + if (drain_mr_fqrni(__p)) { + pr_err("Qman MR unclean, need recovery\n"); + goto fail_dqrr_mr_empty; + } + } + /* Success */ + portal->config = config; + spin_lock(&affine_mask_lock); + cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + qm_isr_disable_write(__p, 0); + /* Write a sane SDQCR */ + qm_dqrr_sdqcr_set(__p, recovery_mode ? 0 : portal->sdqcr); + return portal; +fail_dqrr_mr_empty: +fail_eqcr_empty: +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + free_irq(config->public_cfg.irq, portal); +fail_irq: +#endif + platform_device_del(portal->pdev); +fail_devadd: + platform_device_put(portal->pdev); +fail_devalloc: + if (portal->cgrs) + kfree(portal->cgrs); +fail_cgrs: + qm_isr_finish(__p); +fail_isr: + qm_mc_finish(__p); +fail_mc: + qm_mr_finish(__p); +fail_mr: + qm_dqrr_finish(__p); +fail_dqrr: + qm_eqcr_finish(__p); +fail_eqcr: + put_affine_portal(); + return NULL; +} + +/* These checks are BUG_ON()s because the driver is already supposed to avoid + * these cases. */ +struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect) +{ +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + struct qman_portal *p = get_raw_affine_portal(); + /* Check that we don't already have our own portal */ + BUG_ON(p->config); + /* Check that we aren't already slaving to another portal */ + BUG_ON(p->is_shared); + /* Check that 'redirect' is prepared to have us */ + BUG_ON(!redirect->config->public_cfg.is_shared); + /* These are the only elements to initialise when redirecting */ + p->irq_sources = 0; + p->sharing_redirect = redirect; + put_affine_portal(); + return p; +#else + BUG(); + return NULL; +#endif +} + +const struct qm_portal_config *qman_destroy_affine_portal(void) +{ + /* We don't want to redirect if we're a slave, use "raw" */ + struct qman_portal *qm = get_raw_affine_portal(); + const struct qm_portal_config *pcfg; + int cpu; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (qm->sharing_redirect) { + qm->sharing_redirect = NULL; + put_affine_portal(); + return NULL; + } + qm->is_shared = 0; +#endif + pcfg = qm->config; + cpu = pcfg->public_cfg.cpu; + /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or + * something related to QM_PIRQ_EQCI, this may need fixing. + * Also, due to the prefetching model used for CI updates in the enqueue + * path, this update will only invalidate the CI cacheline *after* + * working on it, so we need to call this twice to ensure a full update + * irrespective of where the enqueue processing was at when the teardown + * began. */ + qm_eqcr_cce_update(&qm->p); + qm_eqcr_cce_update(&qm->p); +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + free_irq(pcfg->public_cfg.irq, qm); +#endif + kfree(qm->cgrs); + qm_isr_finish(&qm->p); + qm_mc_finish(&qm->p); + qm_mr_finish(&qm->p); + qm_dqrr_finish(&qm->p); + qm_eqcr_finish(&qm->p); + qm->config = NULL; + spin_lock(&affine_mask_lock); + cpumask_clear_cpu(cpu, &affine_mask); + spin_unlock(&affine_mask_lock); + put_affine_portal(); + return pcfg; +} + +const struct qman_portal_config *qman_get_portal_config(void) +{ + struct qman_portal *p = get_affine_portal(); + const struct qman_portal_config *ret = &p->config->public_cfg; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_get_portal_config); + +#ifdef CONFIG_FSL_QMAN_NULL_FQ_DEMUX +void qman_get_null_cb(struct qman_fq_cb *null_cb) +{ + struct qman_portal *p = get_affine_portal(); + *null_cb = p->null_cb; + put_affine_portal(); +} +EXPORT_SYMBOL(qman_get_null_cb); + +void qman_set_null_cb(const struct qman_fq_cb *null_cb) +{ + struct qman_portal *p = get_affine_portal(); + p->null_cb = *null_cb; + put_affine_portal(); +} +EXPORT_SYMBOL(qman_set_null_cb); +#endif + +/* Inline helper to reduce nesting in __poll_portal_slow() */ +static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, + const struct qm_mr_entry *msg, u8 verb) +{ + FQLOCK(fq); + switch(verb) { + case QM_MR_VERB_FQRL: + DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); + fq_clear(fq, QMAN_FQ_STATE_ORL); + table_del_fq(p, fq); + break; + case QM_MR_VERB_FQRN: + DPA_ASSERT((fq->state == qman_fq_state_parked) || + (fq->state == qman_fq_state_sched)); + DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); + fq_clear(fq, QMAN_FQ_STATE_CHANGING); + if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + else + table_del_fq(p, fq); + fq->state = qman_fq_state_retired; + break; + case QM_MR_VERB_FQPN: + DPA_ASSERT(fq->state == qman_fq_state_sched); + DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); + fq->state = qman_fq_state_parked; + } + FQUNLOCK(fq); +} + +static u32 __poll_portal_slow(struct qman_portal *p, u32 is) +{ + const struct qm_mr_entry *msg; + + BUG_ON(p->bits & PORTAL_BITS_RECOVERY); + + if (is & QM_PIRQ_CSCI) { + struct qman_cgrs rr, c; + struct qm_mc_result *mcr; + struct qman_cgr *cgr; + int i; + unsigned long irqflags __maybe_unused; + + spin_lock_irqsave(&p->cgr_lock, irqflags); + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + /* mask out the ones I'm not interested in */ + qman_cgrs_and(&rr, (const struct qman_cgrs *) + &mcr->querycongestion.state, &p->cgrs[0]); + /* check previous snapshot for delta, enter/exit congestion */ + qman_cgrs_xor(&c, &rr, &p->cgrs[1]); + /* update snapshot */ + qman_cgrs_cp(&p->cgrs[1], &rr); + /* Invoke callback */ + qman_cgrs_for_each_1(i, &c) + list_for_each_entry(cgr, &p->cgr_cbs[i], node) { + if (cgr->cb) + cgr->cb(p, cgr, qman_cgrs_get(&rr, i)); + } + spin_unlock_irqrestore(&p->cgr_lock, irqflags); + } + +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (is & QM_PIRQ_EQCI) { + unsigned long irqflags; + PORTAL_IRQ_LOCK(p, irqflags); + p->eqci_owned = NULL; + PORTAL_IRQ_UNLOCK(p, irqflags); + wake_up(&affine_queue); + } +#endif + + if (is & QM_PIRQ_EQRI) { + unsigned long irqflags __maybe_unused; + PORTAL_IRQ_LOCK(p, irqflags); + qm_eqcr_cce_update(&p->p); + qm_eqcr_set_ithresh(&p->p, 0); + PORTAL_IRQ_UNLOCK(p, irqflags); + wake_up(&affine_queue); + } + + if (is & QM_PIRQ_MRI) { + u8 num = 0; +mr_loop: + qm_mr_pvb_update(&p->p); + msg = qm_mr_current(&p->p); + if (msg) { + u8 verb = msg->verb & QM_MR_VERB_TYPE_MASK; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + struct qman_fq *fq; +#else + struct qman_fq *fq = (void *)(uintptr_t)msg->ern.tag; +#endif + if (verb == QM_MR_VERB_FQRNI) { + ; /* nada, we drop FQRNIs on the floor */ + } else if ((verb == QM_MR_VERB_FQRN) || + (verb == QM_MR_VERB_FQRL)) { + /* It's retirement related - need a lookup */ + fq = table_find_fq(p, msg->fq.fqid); + if (!fq) + panic("unexpected FQ retirement"); + fq_state_change(p, fq, msg, verb); + if (fq->cb.fqs) + fq->cb.fqs(p, fq, msg); +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + } else if (likely(msg->ern.tag)) { + fq = get_fq_table_entry(msg->ern.tag); +#else + } else if (likely(fq)) { +#endif + /* As per the header note, this is the way to + * determine if it's a s/w ERN or not. */ + if (likely(!(verb & QM_MR_VERB_DC_ERN))) + fq->cb.ern(p, fq, msg); + else + fq->cb.dc_ern(p, fq, msg); + } +#ifdef CONFIG_FSL_QMAN_NULL_FQ_DEMUX + else { + /* use portal default handlers for 'null's */ + if (likely(!(verb & QM_MR_VERB_DC_ERN))) + p->null_cb.ern(p, NULL, msg); + else if (verb == QM_MR_VERB_DC_ERN) + p->null_cb.dc_ern(p, NULL, msg); + else if (p->null_cb.fqs) + p->null_cb.fqs(p, NULL, msg); + } +#endif + num++; + qm_mr_next(&p->p); + goto mr_loop; + } + qm_mr_cci_consume(&p->p, num); + } + + return is & (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); +} + +/* remove some slowish-path stuff from the "fast path" and make sure it isn't + * inlined. */ +static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) +{ + p->vdqcr_owned = NULL; + FQLOCK(fq); + fq_clear(fq, QMAN_FQ_STATE_VDQCR); + FQUNLOCK(fq); + wake_up(&affine_queue); +} + +/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states + * that would conflict with other things if they ran at the same time on the + * same cpu are; + * + * (i) setting/clearing vdqcr_owned, and + * (ii) clearing the NE (Not Empty) flag. + * + * Both are safe. Because; + * + * (i) this clearing can only occur after qman_volatile_dequeue() has set the + * vdqcr_owned field (which it does before setting VDQCR), and + * qman_volatile_dequeue() blocks interrupts and preemption while this is + * done so that we can't interfere. + * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as + * with (i) that API prevents us from interfering until it's safe. + * + * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far + * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett + * advantage comes from this function not having to "lock" anything at all. + * + * Note also that the callbacks are invoked at points which are safe against the + * above potential conflicts, but that this function itself is not re-entrant + * (this is because the function tracks one end of each FIFO in the portal and + * we do *not* want to lock that). So the consequence is that it is safe for + * user callbacks to call into any Qman API *except* qman_poll() (as that's the + * sole API that could be invoking the callback through this function). + */ +static inline unsigned int __poll_portal_fast(struct qman_portal *p, + unsigned int poll_limit) +{ + const struct qm_dqrr_entry *dq; + struct qman_fq *fq; + enum qman_cb_dqrr_result res; +#ifdef CONFIG_FSL_QMAN_DQRR_PREFETCHING + int coherent = (p->config->public_cfg.has_stashing); +#endif + unsigned int limit = 0; + + BUG_ON(p->bits & PORTAL_BITS_RECOVERY); +loop: +#ifdef CONFIG_FSL_QMAN_DQRR_PREFETCHING + qm_dqrr_pvb_update(&p->p, coherent); +#else + qm_dqrr_pvb_update(&p->p, 1); +#endif + dq = qm_dqrr_current(&p->p); + if (!dq) + goto done; + if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { + /* VDQCR: don't trust contextB as the FQ may have been + * configured for h/w consumption and we're draining it + * post-retirement. */ + fq = p->vdqcr_owned; + /* We only set QMAN_FQ_STATE_NE when retiring, so we only need + * to check for clearing it when doing volatile dequeues. It's + * one less thing to check in the critical path (SDQCR). */ + if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) + fq_clear(fq, QMAN_FQ_STATE_NE); + /* this is duplicated from the SDQCR code, but we have stuff to + * do before *and* after this callback, and we don't want + * multiple if()s in the critical path (SDQCR). */ + res = fq->cb.dqrr(p, fq, dq); + if (res == qman_cb_dqrr_stop) + goto done; + /* Check for VDQCR completion */ + if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) + clear_vdqcr(p, fq); + } else { + /* SDQCR: contextB points to the FQ */ +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + fq = get_fq_table_entry(dq->contextB); +#else + fq = (void *)(uintptr_t)dq->contextB; +#endif +#ifdef CONFIG_FSL_QMAN_NULL_FQ_DEMUX + if (unlikely(!fq)) { + /* use portal default handlers */ + res = p->null_cb.dqrr(p, NULL, dq); + DPA_ASSERT(res == qman_cb_dqrr_consume); + res = qman_cb_dqrr_consume; + } else +#endif + { + /* Now let the callback do its stuff */ + res = fq->cb.dqrr(p, fq, dq); + /* The callback can request that we exit without + * consuming this entry nor advancing; */ + if (res == qman_cb_dqrr_stop) + goto done; + } + } + /* Interpret 'dq' from a driver perspective. */ + /* Parking isn't possible unless HELDACTIVE was set. NB, + * FORCEELIGIBLE implies HELDACTIVE, so we only need to + * check for HELDACTIVE to cover both. */ + DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || + (res != qman_cb_dqrr_park)); +#ifdef CONFIG_FSL_QMAN_PORTAL_DISABLEAUTO_DCA + if (res == qman_cb_dqrr_park) + /* The only thing to do for non-DCA is the park-request */ + qm_dqrr_park_current(&p->p); + /* Move forward */ + qm_dqrr_next(&p->p); + qm_dqrr_cci_consume(&p->p, 1); +#else + /* Defer just means "skip it, I'll consume it myself later on" */ + if (res != qman_cb_dqrr_defer) + qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park)); + /* Move forward */ + qm_dqrr_next(&p->p); +#endif + /* Entry processed and consumed, increment our counter. The callback can + * request that we exit after consuming the entry, and we also exit if + * we reach our processing limit, so loop back only if neither of these + * conditions is met. */ + if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop)) + goto loop; +done: + return limit; +} + +u32 qman_irqsource_get(void) +{ + /* "irqsource" and "poll" APIs mustn't redirect when sharing, they + * should shut the user out if they are not the primary CPU hosting the + * portal. That's why we use the "raw" interface. */ + struct qman_portal *p = get_raw_affine_portal(); + u32 ret = p->irq_sources & QM_PIRQ_VISIBLE; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_irqsource_get); + +int qman_irqsource_add(u32 bits __maybe_unused) +{ +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + struct qman_portal *p = get_raw_affine_portal(); + int ret = 0; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (p->sharing_redirect) + ret = -EINVAL; + else +#endif + { + __maybe_unused unsigned long irqflags; + PORTAL_IRQ_LOCK(p, irqflags); + set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); + qm_isr_enable_write(&p->p, p->irq_sources); + PORTAL_IRQ_UNLOCK(p, irqflags); + } + put_affine_portal(); + return ret; +#else + pr_err("No Qman portal IRQ support, mustn't specify IRQ flags!"); + return -EINVAL; +#endif +} +EXPORT_SYMBOL(qman_irqsource_add); + +int qman_irqsource_remove(u32 bits) +{ +#ifdef CONFIG_FSL_DPA_HAVE_IRQ + struct qman_portal *p = get_raw_affine_portal(); + __maybe_unused unsigned long irqflags; + u32 ier; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (p->sharing_redirect) { + put_affine_portal(); + return -EINVAL; + } +#endif + /* Our interrupt handler only processes+clears status register bits that + * are in p->irq_sources. As we're trimming that mask, if one of them + * were to assert in the status register just before we remove it from + * the enable register, there would be an interrupt-storm when we + * release the IRQ lock. So we wait for the enable register update to + * take effect in h/w (by reading it back) and then clear all other bits + * in the status register. Ie. we clear them from ISR once it's certain + * IER won't allow them to reassert. */ + PORTAL_IRQ_LOCK(p, irqflags); + bits &= QM_PIRQ_VISIBLE; + clear_bits(bits, &p->irq_sources); + qm_isr_enable_write(&p->p, p->irq_sources); + ier = qm_isr_enable_read(&p->p); + /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a + * data-dependency, ie. to protect against re-ordering. */ + qm_isr_status_clear(&p->p, ~ier); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +#else + pr_err("No Qman portal IRQ support, mustn't specify IRQ flags!"); + return -EINVAL; +#endif +} +EXPORT_SYMBOL(qman_irqsource_remove); + +const cpumask_t *qman_affine_cpus(void) +{ + return &affine_mask; +} +EXPORT_SYMBOL(qman_affine_cpus); + +int qman_poll_dqrr(unsigned int limit) +{ + /* We need to fail when called for a "slave", so use "raw" */ + struct qman_portal *p = get_raw_affine_portal(); + int ret; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + ret = -EINVAL; + else +#endif + { + BUG_ON(p->irq_sources & QM_PIRQ_DQRI); + ret = __poll_portal_fast(p, limit); + } + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_poll_dqrr); + +u32 qman_poll_slow(void) +{ + /* We need to fail when called for a "slave", so use "raw" */ + struct qman_portal *p = get_raw_affine_portal(); + u32 ret; +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + ret = (u32)-1; + else +#endif + { + u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; + ret = __poll_portal_slow(p, is); + qm_isr_status_clear(&p->p, ret); + } + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_poll_slow); + +/* Legacy wrapper */ +void qman_poll(void) +{ + struct qman_portal *p = get_raw_affine_portal(); +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE + if (unlikely(p->sharing_redirect)) + goto done; +#endif + if ((~p->irq_sources) & QM_PIRQ_SLOW) { + if (!(p->slowpoll--)) { + u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources; + u32 active = __poll_portal_slow(p, is); + if (active) { + qm_isr_status_clear(&p->p, active); + p->slowpoll = SLOW_POLL_BUSY; + } else + p->slowpoll = SLOW_POLL_IDLE; + } + } + if ((~p->irq_sources) & QM_PIRQ_DQRI) + __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT); +#ifdef CONFIG_FSL_DPA_PORTAL_SHARE +done: +#endif + put_affine_portal(); +} +EXPORT_SYMBOL(qman_poll); + +/* Recovery processing. */ +static int recovery_poll_mr(struct qman_portal *p, u32 fqid) +{ + const struct qm_mr_entry *msg; + enum { + wait_for_fqrn, + wait_for_fqrl, + done + } state = wait_for_fqrn; + u8 v, fqs = 0; + +loop: + qm_mr_pvb_update(&p->p); + msg = qm_mr_current(&p->p); + if (!msg) { + cpu_relax(); + goto loop; + } + v = msg->verb & QM_MR_VERB_TYPE_MASK; + /* all MR messages have "fqid" in the same place */ + if (msg->fq.fqid != fqid) { +ignore_msg: + pr_warning("recovery_poll_mr(), ignoring msg 0x%02x for " + "FQID %d\n", v, msg->fq.fqid); + goto next_msg; + } + if (state == wait_for_fqrn) { + if ((v != QM_MR_VERB_FQRN) && (v != QM_MR_VERB_FQRNI)) + goto ignore_msg; + fqs = msg->fq.fqs; + if (!(fqs & QM_MR_FQS_ORLPRESENT)) + state = done; + else + state = wait_for_fqrl; + } else { + if (v != QM_MR_VERB_FQRL) + goto ignore_msg; + state = done; + } +next_msg: + qm_mr_next(&p->p); + qm_mr_cci_consume(&p->p, 1); + if (state != done) + goto loop; + return (fqs & QM_MR_FQS_NOTEMPTY) ? 1 : 0; +} +static unsigned int recovery_poll_dqrr(struct qman_portal *p, u32 fqid) +{ + const struct qm_dqrr_entry *dq; + u8 empty = 0, num_fds = 0; + +loop: + qm_dqrr_pvb_update(&p->p, 0); + dq = qm_dqrr_current(&p->p); + if (!dq) { + cpu_relax(); + goto loop; + } + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED)) { +ignore_dqrr: + pr_warning("recovery_poll_dqrr(), ignoring dqrr %02x:%02x " + "for FQID %d\n", + dq->verb & QM_DQRR_VERB_MASK, dq->stat, dq->fqid); + goto next_dqrr; + } + if (dq->fqid != fqid) + goto ignore_dqrr; + if (dq->stat & QM_DQRR_STAT_FD_VALID) + num_fds++; + if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) + empty = 1; +next_dqrr: + qm_dqrr_next(&p->p); + qm_dqrr_cci_consume(&p->p, 1); + if (!empty) + goto loop; + return num_fds; +} +int qman_recovery_cleanup_fq(u32 fqid) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + int ret = 0; + unsigned int num_fds = 0; + const char *s; + u8 state; + + /* Lock this whole flow down via the portal's "vdqcr" */ + PORTAL_IRQ_LOCK(p, irqflags); + BUG_ON(!(p->bits & PORTAL_BITS_RECOVERY)); + if (p->vdqcr_owned) + ret = -EBUSY; + else + p->vdqcr_owned = (void *)1; + PORTAL_IRQ_UNLOCK(p, irqflags); + if (ret) + goto out; + + /* Query the FQ's state */ + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + if (mcr->result != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; + + /* OOS: nothing to do */ + if (state == QM_MCR_NP_STATE_OOS) + goto out; + /* Otherwise: must be retired */ + if (state != QM_MCR_NP_STATE_RETIRED) { + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCR_VERB_ALTER_RETIRE); + if ((mcr->result != QM_MCR_RESULT_OK) && + (mcr->result != QM_MCR_RESULT_PENDING)) { + ret = -EIO; + goto out; + } + ret = recovery_poll_mr(p, fqid); + if (!ret) + /* FQ empty */ + goto oos; + } + /* Drain till empty */ + qm_dqrr_vdqcr_set(&p->p, fqid & 0x00ffffff); + num_fds = recovery_poll_dqrr(p, fqid); + +oos: + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); + if (mcr->result != QM_MCR_RESULT_OK) + ret = -EIO; + /* done */ + s = (state == QM_MCR_NP_STATE_RETIRED) ? "retired" : + (state == QM_MCR_NP_STATE_PARKED) ? "parked" : "scheduled"; + pr_info("Qman: %s FQID %d recovered (%d frames)\n", s, fqid, num_fds); +out: + PORTAL_IRQ_LOCK(p, irqflags); + p->vdqcr_owned = NULL; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_recovery_cleanup_fq); + +/* called from qman_driver.c::qman_recovery_exit() only (if exporting, use + * get_raw_affine_portal() and check for the "SLAVE" bit). */ +void qman_recovery_exit_local(void) +{ + struct qman_portal *p = get_affine_portal(); + BUG_ON(!(p->bits & PORTAL_BITS_RECOVERY)); + /* Reinitialise DQRR using expected settings */ + qm_dqrr_finish(&p->p); + post_recovery(p, p->config); + clear_bits(PORTAL_BITS_RECOVERY, &p->bits); + if (qm_dqrr_init(&p->p, p->config, qm_dqrr_dpush, qm_dqrr_pvb, + QM_DQRR_CMODE, DQRR_MAXFILL, 0)) + panic("Qman DQRR initialisation failed, recovery broken"); + qm_dqrr_sdqcr_set(&p->p, p->sdqcr); + qm_isr_status_clear(&p->p, 0xffffffff); + qm_isr_uninhibit(&p->p); + put_affine_portal(); +} + +void qman_stop_dequeues(void) +{ + struct qman_portal *p = get_affine_portal(); + qman_stop_dequeues_ex(p); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_stop_dequeues); + +void qman_start_dequeues(void) +{ + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + PORTAL_IRQ_LOCK(p, irqflags); + DPA_ASSERT(p->dqrr_disable_ref > 0); + if (!(--p->dqrr_disable_ref)) + qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_start_dequeues); + +void qman_static_dequeue_add(u32 pools) +{ + unsigned long irqflags __maybe_unused; + struct qman_portal *p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + pools &= p->config->public_cfg.pools; + p->sdqcr |= pools; + qm_dqrr_sdqcr_set(&p->p, p->sdqcr); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_static_dequeue_add); + +void qman_static_dequeue_del(u32 pools) +{ + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + PORTAL_IRQ_LOCK(p, irqflags); + pools &= p->config->public_cfg.pools; + p->sdqcr &= ~pools; + qm_dqrr_sdqcr_set(&p->p, p->sdqcr); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_static_dequeue_del); + +u32 qman_static_dequeue_get(void) +{ + struct qman_portal *p = get_affine_portal(); + u32 ret = p->sdqcr; + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_static_dequeue_get); + +void qman_dca(struct qm_dqrr_entry *dq, int park_request) +{ + struct qman_portal *p = get_affine_portal(); + qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request); + put_affine_portal(); +} +EXPORT_SYMBOL(qman_dca); + +/*******************/ +/* Frame queue API */ +/*******************/ + +static const char *mcr_result_str(u8 result) +{ + switch (result) { + case QM_MCR_RESULT_NULL: + return "QM_MCR_RESULT_NULL"; + case QM_MCR_RESULT_OK: + return "QM_MCR_RESULT_OK"; + case QM_MCR_RESULT_ERR_FQID: + return "QM_MCR_RESULT_ERR_FQID"; + case QM_MCR_RESULT_ERR_FQSTATE: + return "QM_MCR_RESULT_ERR_FQSTATE"; + case QM_MCR_RESULT_ERR_NOTEMPTY: + return "QM_MCR_RESULT_ERR_NOTEMPTY"; + case QM_MCR_RESULT_PENDING: + return "QM_MCR_RESULT_PENDING"; + case QM_MCR_RESULT_ERR_BADCOMMAND: + return "QM_MCR_RESULT_ERR_BADCOMMAND"; + } + return ""; +} + +int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) +{ + struct qm_fqd fqd; + struct qm_mcr_queryfq_np np; + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { + fqid = qm_fq_new(); + if (!fqid) + return -ENOMEM; + } + spin_lock_init(&fq->fqlock); + fq->fqid = fqid; + fq->flags = flags; + fq->state = qman_fq_state_oos; + fq->cgr_groupid = 0; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) + return -ENOMEM; +#endif + if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) + return 0; + /* Everything else is AS_IS support */ + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); + if (mcr->result != QM_MCR_RESULT_OK) { + pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result)); + goto err; + } + fqd = mcr->queryfq.fqd; + mcc = qm_mc_start(&p->p); + mcc->queryfq_np.fqid = fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); + if (mcr->result != QM_MCR_RESULT_OK) { + pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result)); + goto err; + } + np = mcr->queryfq_np; + /* Phew, have queryfq and queryfq_np results, stitch together + * the FQ object from those. */ + fq->cgr_groupid = fqd.cgid; + switch (np.state & QM_MCR_NP_STATE_MASK) { + case QM_MCR_NP_STATE_OOS: + break; + case QM_MCR_NP_STATE_RETIRED: + fq->state = qman_fq_state_retired; + if (np.frm_cnt) + fq_set(fq, QMAN_FQ_STATE_NE); + break; + case QM_MCR_NP_STATE_TEN_SCHED: + case QM_MCR_NP_STATE_TRU_SCHED: + case QM_MCR_NP_STATE_ACTIVE: + fq->state = qman_fq_state_sched; + if (np.state & QM_MCR_NP_STATE_R) + fq_set(fq, QMAN_FQ_STATE_CHANGING); + break; + case QM_MCR_NP_STATE_PARKED: + fq->state = qman_fq_state_parked; + break; + default: + DPA_ASSERT(NULL == "invalid FQ state"); + } + if (fqd.fq_ctrl & QM_FQCTRL_CGE) + fq->state |= QMAN_FQ_STATE_CGR_EN; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +err: + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) + qm_fq_free(fqid); + return -EIO; +} +EXPORT_SYMBOL(qman_create_fq); + +void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused) +{ + /* We don't need to lock the FQ as it is a pre-condition that the FQ be + * quiesced. Instead, run some checks. */ + switch (fq->state) { + case qman_fq_state_parked: + DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED); + case qman_fq_state_oos: + if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) + qm_fq_free(fq->fqid); +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + clear_fq_table_entry(fq->key); +#endif + return; + default: + break; + } + DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); +} +EXPORT_SYMBOL(qman_destroy_fq); + +u32 qman_fq_fqid(struct qman_fq *fq) +{ + return fq->fqid; +} +EXPORT_SYMBOL(qman_fq_fqid); + +void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags) +{ + if (state) + *state = fq->state; + if (flags) + *flags = fq->flags; +} +EXPORT_SYMBOL(qman_fq_state); + +int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ? + QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; + + DPA_ASSERT((fq->state == qman_fq_state_oos) || + (fq->state == qman_fq_state_parked)); +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) { + /* OAC not supported on rev1.0 */ + if (unlikely(qman_ip_rev == QMAN_REV10)) + return -EINVAL; + /* And can't be set at the same time as TDTHRESH */ + if (opts->we_mask & QM_INITFQ_WE_TDTHRESH) + return -EINVAL; + } + /* Issue an INITFQ_[PARKED|SCHED] management command */ + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || + ((fq->state != qman_fq_state_oos) && + (fq->state != qman_fq_state_parked)))) { + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return -EBUSY; + } + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initfq = *opts; + mcc->initfq.fqid = fq->fqid; + mcc->initfq.count = 0; + /* If INITFQ_FLAG_NULL is passed, contextB is set to zero. Otherwise, + * if the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a + * demux pointer. Otherwise, TO_DCPORTAL is set, so the caller-provided + * value is allowed to stand, don't overwrite it. */ + if ((flags & QMAN_INITFQ_FLAG_NULL) || + fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { + dma_addr_t phys_fq; + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + mcc->initfq.fqd.context_b = (flags & QMAN_INITFQ_FLAG_NULL) ? + 0 : fq->key; +#else + mcc->initfq.fqd.context_b = (flags & QMAN_INITFQ_FLAG_NULL) ? + 0 : (u32)(uintptr_t)fq; +#endif + /* and the physical address - NB, if the user wasn't trying to + * set CONTEXTA, clear the stashing settings. */ + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) { + mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA; + memset(&mcc->initfq.fqd.context_a, 0, + sizeof(mcc->initfq.fqd.context_a)); + } else { + phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq), + DMA_TO_DEVICE); + qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); + } + } + if (flags & QMAN_INITFQ_FLAG_LOCAL) { + mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel; + if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) { + mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ; + mcc->initfq.fqd.dest.wq = 4; + } + } + qm_mc_commit(&p->p, myverb); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return -EIO; + } + if (opts) { + if (opts->we_mask & QM_INITFQ_WE_FQCTRL) { + if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE) + fq_set(fq, QMAN_FQ_STATE_CGR_EN); + else + fq_clear(fq, QMAN_FQ_STATE_CGR_EN); + } + if (opts->we_mask & QM_INITFQ_WE_CGID) + fq->cgr_groupid = opts->fqd.cgid; + } + fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? + qman_fq_state_sched : qman_fq_state_parked; + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return 0; +} +EXPORT_SYMBOL(qman_init_fq); + +int qman_schedule_fq(struct qman_fq *fq) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + int ret = 0; + u8 res; + + DPA_ASSERT(fq->state == qman_fq_state_parked); +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + /* Issue a ALTERFQ_SCHED management command */ + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || + (fq->state != qman_fq_state_parked))) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_sched; +out: + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_schedule_fq); + +int qman_retire_fq(struct qman_fq *fq, u32 *flags) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + int rval; + u8 res; + + DPA_ASSERT((fq->state == qman_fq_state_parked) || + (fq->state == qman_fq_state_sched)); +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) || + (fq->state == qman_fq_state_retired) || + (fq->state == qman_fq_state_oos))) { + rval = -EBUSY; + goto out; + } + rval = table_push_fq(p, fq); + if (rval) + goto out; + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); + res = mcr->result; + /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING, + * and defer the flags until FQRNI or FQRN (respectively) show up. But + * "Friendly" is to process OK immediately, and not set CHANGING. We do + * friendly, otherwise the caller doesn't necessarily have a fully + * "retired" FQ on return even if the retirement was immediate. However + * this does mean some code duplication between here and + * fq_state_change(). */ + if (likely(res == QM_MCR_RESULT_OK)) { + rval = 0; + /* Process 'fq' right away, we'll ignore FQRNI */ + if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) + fq_set(fq, QMAN_FQ_STATE_NE); + if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) + fq_set(fq, QMAN_FQ_STATE_ORL); + else + table_del_fq(p, fq); + if (flags) + *flags = fq->flags; + fq->state = qman_fq_state_retired; + if (fq->cb.fqs) { + /* Another issue with supporting "immediate" retirement + * is that we're forced to drop FQRNIs, because by the + * time they're seen it may already be "too late" (the + * fq may have been OOS'd and free()'d already). But if + * the upper layer wants a callback whether it's + * immediate or not, we have to fake a "MR" entry to + * look like an FQRNI... */ + struct qm_mr_entry msg; + msg.verb = QM_MR_VERB_FQRNI; + msg.fq.fqs = mcr->alterfq.fqs; + msg.fq.fqid = fq->fqid; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + msg.fq.contextB = fq->key; +#else + msg.fq.contextB = (u32)(uintptr_t)fq; +#endif + fq->cb.fqs(p, fq, &msg); + } + } else if (res == QM_MCR_RESULT_PENDING) { + rval = 1; + fq_set(fq, QMAN_FQ_STATE_CHANGING); + } else { + rval = -EIO; + table_del_fq(p, fq); + } +out: + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return rval; +} +EXPORT_SYMBOL(qman_retire_fq); + +int qman_oos_fq(struct qman_fq *fq) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p; + unsigned long irqflags __maybe_unused; + int ret = 0; + u8 res; + + DPA_ASSERT(fq->state == qman_fq_state_retired); +#ifdef CONFIG_FSL_DPA_CHECKING + if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))) + return -EINVAL; +#endif + p = get_affine_portal(); + PORTAL_IRQ_LOCK(p, irqflags); + FQLOCK(fq); + if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) || + (fq->state != qman_fq_state_retired))) { + ret = -EBUSY; + goto out; + } + mcc = qm_mc_start(&p->p); + mcc->alterfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); + res = mcr->result; + if (res != QM_MCR_RESULT_OK) { + ret = -EIO; + goto out; + } + fq->state = qman_fq_state_oos; +out: + FQUNLOCK(fq); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_oos_fq); + +int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *fqd = mcr->queryfq.fqd; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) + return -EIO; + return 0; +} +EXPORT_SYMBOL(qman_query_fq); + +int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->queryfq.fqid = fq->fqid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *np = mcr->queryfq_np; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) + return -EIO; + return 0; +} +EXPORT_SYMBOL(qman_query_fq_np); + +int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res, myverb; + + PORTAL_IRQ_LOCK(p, irqflags); + myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED : + QM_MCR_VERB_QUERYWQ; + mcc = qm_mc_start(&p->p); + mcc->querywq.channel.id = wq->channel.id; + qm_mc_commit(&p->p, myverb); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *wq = mcr->querywq; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("QUERYWQ failed: %s\n", mcr_result_str(res)); + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(qman_query_wq); + +int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, + struct qm_mcr_cgrtestwrite *result) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->cgrtestwrite.cgid = cgr->cgrid; + mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32); + mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt; + qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *result = mcr->cgrtestwrite; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res)); + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(qman_testwrite_cgr); + +int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + mcc->querycgr.cgid = cgr->cgrid; + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *cgrd = mcr->querycgr; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res)); + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(qman_query_cgr); + +int qman_query_congestion(struct qm_mcr_querycongestion *congestion) +{ + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + + PORTAL_IRQ_LOCK(p, irqflags); + qm_mc_start(&p->p); + qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == + QM_MCC_VERB_QUERYCONGESTION); + res = mcr->result; + if (res == QM_MCR_RESULT_OK) + *congestion = mcr->querycongestion; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + if (res != QM_MCR_RESULT_OK) { + pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res)); + return -EIO; + } + return 0; +} +EXPORT_SYMBOL(qman_query_congestion); + +/* internal function used as a wait_event() expression */ +static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) +{ + unsigned long irqflags __maybe_unused; + int ret = -EBUSY; + *p = get_affine_portal(); + PORTAL_IRQ_LOCK(*p, irqflags); + if (!(*p)->vdqcr_owned) { + FQLOCK(fq); + if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) + goto escape; + fq_set(fq, QMAN_FQ_STATE_VDQCR); + FQUNLOCK(fq); + (*p)->vdqcr_owned = fq; + ret = 0; + } +escape: + PORTAL_IRQ_UNLOCK(*p, irqflags); + if (!ret) + qm_dqrr_vdqcr_set(&(*p)->p, vdqcr); + put_affine_portal(); + return ret; +} + +#ifdef CONFIG_FSL_DPA_CAN_WAIT +static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, + u32 vdqcr, u32 flags) +{ + int ret = 0; + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + ret = wait_event_interruptible(affine_queue, + !(ret = set_vdqcr(p, fq, vdqcr))); + else + wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr))); + return ret; +} +#endif + +int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused, + u32 vdqcr) +{ + struct qman_portal *p; + int ret; + + DPA_ASSERT(!fq || (fq->state == qman_fq_state_parked) || + (fq->state == qman_fq_state_retired)); + DPA_ASSERT(!fq || !(vdqcr & QM_VDQCR_FQID_MASK)); + DPA_ASSERT(!fq || !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + if (fq) + vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QMAN_VOLATILE_FLAG_WAIT) + ret = wait_vdqcr_start(&p, fq, vdqcr, flags); + else +#endif + ret = set_vdqcr(&p, fq, vdqcr); + if (ret) + return ret; + /* VDQCR is set */ +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QMAN_VOLATILE_FLAG_FINISH) { + if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) + /* NB: don't propagate any error - the caller wouldn't + * know whether the VDQCR was issued or not. A signal + * could arrive after returning anyway, so the caller + * can check signal_pending() if that's an issue. */ + wait_event_interruptible(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + else + wait_event(affine_queue, + !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); + } +#endif + return 0; +} +EXPORT_SYMBOL(qman_volatile_dequeue); + +static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail) +{ + if (avail) + qm_eqcr_cce_prefetch(&p->p); + else + qm_eqcr_cce_update(&p->p); +} + +int qman_eqcr_is_empty(void) +{ + unsigned long irqflags __maybe_unused; + struct qman_portal *p = get_affine_portal(); + u8 avail; + + PORTAL_IRQ_LOCK(p, irqflags); + update_eqcr_ci(p, 0); + avail = qm_eqcr_get_fill(&p->p); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return (avail == 0); +} +EXPORT_SYMBOL(qman_eqcr_is_empty); + +static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p, + unsigned long *irqflags __maybe_unused, + struct qman_fq *fq, + const struct qm_fd *fd, + u32 flags) +{ + struct qm_eqcr_entry *eq; + u8 avail; + + *p = get_affine_portal(); + PORTAL_IRQ_LOCK(*p, (*irqflags)); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { + if ((*p)->eqci_owned) { + PORTAL_IRQ_UNLOCK(*p, (*irqflags)); + put_affine_portal(); + return NULL; + } + (*p)->eqci_owned = fq; + } +#endif + avail = qm_eqcr_get_avail(&(*p)->p); + if (avail < 2) + update_eqcr_ci(*p, avail); + eq = qm_eqcr_start(&(*p)->p); + if (unlikely(!eq)) { +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) + (*p)->eqci_owned = NULL; +#endif + PORTAL_IRQ_UNLOCK(*p, (*irqflags)); + put_affine_portal(); + return NULL; + } + if (flags & QMAN_ENQUEUE_FLAG_DCA) + eq->dca = QM_EQCR_DCA_ENABLE | + ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ? + QM_EQCR_DCA_PARK : 0) | + ((flags >> 8) & QM_EQCR_DCA_IDXMASK); + eq->fqid = fq->fqid; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + eq->tag = fq->key; +#else + eq->tag = (u32)(uintptr_t)fq; +#endif + /* From p4080 rev1 -> rev2, the FD struct's address went from 48-bit to + * 40-bit but rev1 chips will still interpret it as 48-bit, meaning we + * have to scrub the upper 8-bits, just in case the user left noise in + * there. Doing this selectively via a run-time check of the h/w + * revision (as we do for most errata, for example) is too slow in this + * critical path code. The most inexpensive way to handle this is just + * to reinterpret the FD as 4 32-bit words and to mask the first word + * appropriately, irrespecitive of the h/w revision. The struct fields + * corresponding to this word are; + * u8 dd:2; + * u8 liodn_offset:6; + * u8 bpid; + * u8 eliodn_offset:4; + * u8 __reserved:4; + * u8 addr_hi; + * So we mask this word with 0xc0ff00ff, which implicitly scrubs out + * liodn_offset, eliodn_offset, and __reserved - the latter two fields + * are interpreted as the 8 msbits of the 48-bit address in the case of + * rev1. + */ + { + const u32 *src = (const u32 *)fd; + u32 *dest = (u32 *)&eq->fd; + dest[0] = src[0] & 0xc0ff00ff; + dest[1] = src[1]; + dest[2] = src[2]; + dest[3] = src[3]; + } + return eq; +} + +#ifdef CONFIG_FSL_DPA_CAN_WAIT +static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p, + unsigned long *irqflags __maybe_unused, + struct qman_fq *fq, + const struct qm_fd *fd, + u32 flags) +{ + struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags); + if (!eq) + qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH); + return eq; +} +static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p, + unsigned long *irqflags __maybe_unused, + struct qman_fq *fq, + const struct qm_fd *fd, + u32 flags) +{ + struct qm_eqcr_entry *eq; + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); + else + wait_event(affine_queue, + (eq = __wait_eq_start(p, irqflags, fq, fd, flags))); + return eq; +} +#endif + +int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags) +{ + struct qman_portal *p; + struct qm_eqcr_entry *eq; + unsigned long irqflags __maybe_unused; + +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QMAN_ENQUEUE_FLAG_WAIT) + eq = wait_eq_start(&p, &irqflags, fq, fd, flags); + else +#endif + eq = try_eq_start(&p, &irqflags, fq, fd, flags); + if (!eq) + return -EBUSY; + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE | + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); + /* Factor the below out, it's used from qman_enqueue_orp() too */ + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (p->eqci_owned != fq)); + else + wait_event(affine_queue, (p->eqci_owned != fq)); + } +#endif + return 0; +} +EXPORT_SYMBOL(qman_enqueue); + +int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags, + struct qman_fq *orp, u16 orp_seqnum) +{ + struct qman_portal *p; + struct qm_eqcr_entry *eq; + unsigned long irqflags __maybe_unused; + +#ifdef CONFIG_FSL_DPA_CAN_WAIT + if (flags & QMAN_ENQUEUE_FLAG_WAIT) + eq = wait_eq_start(&p, &irqflags, fq, fd, flags); + else +#endif + eq = try_eq_start(&p, &irqflags, fq, fd, flags); + if (!eq) + return -EBUSY; + /* Process ORP-specifics here */ + if (flags & QMAN_ENQUEUE_FLAG_NLIS) + orp_seqnum |= QM_EQCR_SEQNUM_NLIS; + else { + orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS; + if (flags & QMAN_ENQUEUE_FLAG_NESN) + orp_seqnum |= QM_EQCR_SEQNUM_NESN; + else + /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ + orp_seqnum &= ~QM_EQCR_SEQNUM_NESN; + } + eq->seqnum = orp_seqnum; + eq->orp = orp->fqid; + /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */ + qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP | + ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ? + 0 : QM_EQCR_VERB_CMD_ENQUEUE) | + (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))); + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); +#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC + if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) && + (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) { + if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT) + wait_event_interruptible(affine_queue, + (p->eqci_owned != fq)); + else + wait_event(affine_queue, (p->eqci_owned != fq)); + } +#endif + return 0; +} +EXPORT_SYMBOL(qman_enqueue_orp); + +int qman_modify_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + struct qm_mc_command *mcc; + struct qm_mc_result *mcr; + struct qman_portal *p = get_affine_portal(); + unsigned long irqflags __maybe_unused; + u8 res; + u8 verb = QM_MCC_VERB_MODIFYCGR; + + /* frame mode not supported on rev1.0 */ + if (unlikely(qman_ip_rev == QMAN_REV10)) { + if (opts && (opts->we_mask & QM_CGR_WE_MODE) && + opts->cgr.mode == QMAN_CGR_MODE_FRAME) { + put_affine_portal(); + return -EIO; + } + } + PORTAL_IRQ_LOCK(p, irqflags); + mcc = qm_mc_start(&p->p); + if (opts) + mcc->initcgr = *opts; + mcc->initcgr.cgid = cgr->cgrid; + if (flags & QMAN_CGR_FLAG_USE_INIT) + verb = QM_MCC_VERB_INITCGR; + qm_mc_commit(&p->p, verb); + while (!(mcr = qm_mc_result(&p->p))) + cpu_relax(); + DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); + res = mcr->result; + PORTAL_IRQ_UNLOCK(p, irqflags); + put_affine_portal(); + return (res == QM_MCR_RESULT_OK) ? 0 : -EIO; +} +EXPORT_SYMBOL(qman_modify_cgr); + +#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \ + qm_channel_swportal0)) + +int qman_create_cgr(struct qman_cgr *cgr, u32 flags, + struct qm_mcc_initcgr *opts) +{ + unsigned long irqflags __maybe_unused; + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts; + int ret; + struct qman_portal *p; + + /* We have to check that the provided CGRID is within the limits of the + * data-structures, for obvious reasons. However we'll let h/w take + * care of determining whether it's within the limits of what exists on + * the SoC. */ + if (cgr->cgrid >= __CGR_NUM) + return -EINVAL; + + p = get_affine_portal(); + + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); + cgr->chan = p->config->public_cfg.channel; + spin_lock_irqsave(&p->cgr_lock, irqflags); + + /* if no opts specified and I'm not the first for this portal, just add + * to the list */ + if ((opts == NULL) && !list_empty(&p->cgr_cbs[cgr->cgrid])) + goto add_list; + + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) + goto release_lock; + if (opts) + local_opts = *opts; + /* Overwrite TARG */ + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ | TARG_MASK(p); + local_opts.we_mask |= QM_CGR_WE_CSCN_TARG; + + /* send init if flags indicate so */ + if (opts && (flags & QMAN_CGR_FLAG_USE_INIT)) + ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts); + else + ret = qman_modify_cgr(cgr, 0, &local_opts); + if (ret) + goto release_lock; +add_list: + list_add(&cgr->node, &p->cgr_cbs[cgr->cgrid]); + + /* Determine if newly added object requires its callback to be called */ + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* we can't go back, so proceed and return success, but screen + * and wail to the log file */ + pr_crit("CGR HW state partially modified\n"); + ret = 0; + goto release_lock; + } + if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1], + cgr->cgrid)) + cgr->cb(p, cgr, 1); +release_lock: + spin_unlock_irqrestore(&p->cgr_lock, irqflags); + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_create_cgr); + +int qman_delete_cgr(struct qman_cgr *cgr) +{ + unsigned long irqflags __maybe_unused; + struct qm_mcr_querycgr cgr_state; + struct qm_mcc_initcgr local_opts; + int ret = 0; + struct qman_portal *p = get_affine_portal(); + + if (cgr->chan != p->config->public_cfg.channel) { + pr_crit("Attempting to delete cgr from different portal " + "than it was create: create 0x%x, delete 0x%x\n", + cgr->chan, p->config->public_cfg.channel); + ret = -EINVAL; + goto put_portal; + } + memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); + spin_lock_irqsave(&p->cgr_lock, irqflags); + list_del(&cgr->node); + /* If last in list, CSCN_TARG must be set accordingly */ + if (!list_empty(&p->cgr_cbs[cgr->cgrid])) + goto release_lock; + ret = qman_query_cgr(cgr, &cgr_state); + if (ret) { + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs[cgr->cgrid]); + goto release_lock; + } + /* Overwrite TARG */ + local_opts.we_mask = QM_CGR_WE_CSCN_TARG; + local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ & ~(TARG_MASK(p)); + ret = qman_modify_cgr(cgr, 0, &local_opts); + if (ret) + /* add back to the list */ + list_add(&cgr->node, &p->cgr_cbs[cgr->cgrid]); +release_lock: + spin_unlock_irqrestore(&p->cgr_lock, irqflags); +put_portal: + put_affine_portal(); + return ret; +} +EXPORT_SYMBOL(qman_delete_cgr); + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_low.h +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_low.h @@ -0,0 +1,1186 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_private.h" + +/***************************/ +/* Portal register assists */ +/***************************/ + +/* Cache-inhibited register offsets */ +#define REG_EQCR_PI_CINH 0x0000 +#define REG_EQCR_CI_CINH 0x0004 +#define REG_EQCR_ITR 0x0008 +#define REG_DQRR_PI_CINH 0x0040 +#define REG_DQRR_CI_CINH 0x0044 +#define REG_DQRR_ITR 0x0048 +#define REG_DQRR_DCAP 0x0050 +#define REG_DQRR_SDQCR 0x0054 +#define REG_DQRR_VDQCR 0x0058 +#define REG_DQRR_PDQCR 0x005c +#define REG_MR_PI_CINH 0x0080 +#define REG_MR_CI_CINH 0x0084 +#define REG_MR_ITR 0x0088 +#define REG_CFG 0x0100 +#define REG_ISR 0x0e00 +#define REG_ITPR 0x0e14 + +/* Cache-enabled register offsets */ +#define CL_EQCR 0x0000 +#define CL_DQRR 0x1000 +#define CL_MR 0x2000 +#define CL_EQCR_PI_CENA 0x3000 +#define CL_EQCR_CI_CENA 0x3100 +#define CL_DQRR_PI_CENA 0x3200 +#define CL_DQRR_CI_CENA 0x3300 +#define CL_MR_PI_CENA 0x3400 +#define CL_MR_CI_CENA 0x3500 +#define CL_CR 0x3800 +#define CL_RR0 0x3900 +#define CL_RR1 0x3940 + +/* BTW, the drivers (and h/w programming model) already obtain the required + * synchronisation for portal accesses via lwsync(), hwsync(), and + * data-dependencies. Use of barrier()s or other order-preserving primitives + * simply degrade performance. Hence the use of the __raw_*() interfaces, which + * simply ensure that the compiler treats the portal registers as volatile (ie. + * non-coherent). */ + +/* Cache-inhibited register access. */ +#define __qm_in(qm, o) __raw_readl((qm)->addr_ci + (o)) +#define __qm_out(qm, o, val) __raw_writel((val), (qm)->addr_ci + (o)) +#define qm_in(reg) __qm_in(&portal->addr, REG_##reg) +#define qm_out(reg, val) __qm_out(&portal->addr, REG_##reg, val) + +/* Cache-enabled (index) register access */ +#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o)) +#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o)) +#define __qm_cl_in(qm, o) __raw_readl((qm)->addr_ce + (o)) +#define __qm_cl_out(qm, o, val) \ + do { \ + u32 *__tmpclout = (qm)->addr_ce + (o); \ + __raw_writel((val), __tmpclout); \ + dcbf(__tmpclout); \ + } while (0) +#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o)) +#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, CL_##reg##_CENA) +#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, CL_##reg##_CENA) +#define qm_cl_in(reg) __qm_cl_in(&portal->addr, CL_##reg##_CENA) +#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, CL_##reg##_CENA, val) +#define qm_cl_invalidate(reg) __qm_cl_invalidate(&portal->addr, CL_##reg##_CENA) + +/* Cache-enabled ring access */ +#define qm_cl(base, idx) ((void *)base + ((idx) << 6)) + +/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf + * analysis, look at using the "extra" bit in the ring index registers to avoid + * cyclic issues. */ +static inline u8 cyc_diff(u8 ringsize, u8 first, u8 last) +{ + /* 'first' is included, 'last' is excluded */ + if (first <= last) + return last - first; + return ringsize + last - first; +} + +/* Portal modes. + * Enum types; + * pmode == production mode + * cmode == consumption mode, + * dmode == h/w dequeue mode. + * Enum values use 3 letter codes. First letter matches the portal mode, + * remaining two letters indicate; + * ci == cache-inhibited portal register + * ce == cache-enabled portal register + * vb == in-band valid-bit (cache-enabled) + * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only + * As for "enum qm_dqrr_dmode", it should be self-explanatory. + */ +enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ + qm_eqcr_pci = 0, /* PI index, cache-inhibited */ + qm_eqcr_pce = 1, /* PI index, cache-enabled */ + qm_eqcr_pvb = 2 /* valid-bit */ +}; +enum qm_eqcr_cmode { /* s/w-only */ + qm_eqcr_cci, /* CI index, cache-inhibited */ + qm_eqcr_cce /* CI index, cache-enabled */ +}; +enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ + qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ + qm_dqrr_dpull = 1 /* PDQCR */ +}; +enum qm_dqrr_pmode { /* s/w-only */ + qm_dqrr_pci, /* reads DQRR_PI_CINH */ + qm_dqrr_pce, /* reads DQRR_PI_CENA */ + qm_dqrr_pvb /* reads valid-bit */ +}; +enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ + qm_dqrr_cci = 0, /* CI index, cache-inhibited */ + qm_dqrr_cce = 1, /* CI index, cache-enabled */ + qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */ +}; +enum qm_mr_pmode { /* s/w-only */ + qm_mr_pci, /* reads MR_PI_CINH */ + qm_mr_pce, /* reads MR_PI_CENA */ + qm_mr_pvb /* reads valid-bit */ +}; +enum qm_mr_cmode { /* matches QCSP_CFG::MM */ + qm_mr_cci = 0, /* CI index, cache-inhibited */ + qm_mr_cce = 1 /* CI index, cache-enabled */ +}; + + +/* ------------------------- */ +/* --- Portal structures --- */ + +#define QM_EQCR_SIZE 8 +#define QM_DQRR_SIZE 16 +#define QM_MR_SIZE 8 + +struct qm_eqcr { + struct qm_eqcr_entry *ring, *cursor; + u8 ci, available, ithresh, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + u32 busy; + enum qm_eqcr_pmode pmode; + enum qm_eqcr_cmode cmode; +#endif +}; + +struct qm_dqrr { + const struct qm_dqrr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING +#define QM_DQRR_FLAG_RE 0x01 /* Stash ring entries */ +#define QM_DQRR_FLAG_SE 0x02 /* Stash data */ + u8 flags; + enum qm_dqrr_dmode dmode; + enum qm_dqrr_pmode pmode; + enum qm_dqrr_cmode cmode; +#endif +}; + +struct qm_mr { + const struct qm_mr_entry *ring, *cursor; + u8 pi, ci, fill, ithresh, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + enum qm_mr_pmode pmode; + enum qm_mr_cmode cmode; +#endif +}; + +struct qm_mc { + struct qm_mc_command *cr; + struct qm_mc_result *rr; + u8 rridx, vbit; +#ifdef CONFIG_FSL_DPA_CHECKING + enum { + /* Can be _mc_start()ed */ + mc_idle, + /* Can be _mc_commit()ed or _mc_abort()ed */ + mc_user, + /* Can only be _mc_retry()ed */ + mc_hw + } state; +#endif +}; + +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 +/* For workarounds that require storage. The struct alignment is required for + * cases where operations on "shadow" structs need the same alignment as is + * present on the corresponding h/w data structs (specifically, there is a + * zero-bit present above the range required to address the ring, so that + * iteration can be achieved by incrementing a ring pointer and clearing the + * carry-bit). The "portal" struct needs the same alignment because this type + * goes at its head, so it has a more radical alignment requirement if this + * structure is used. (NB: "64" instead of "L1_CACHE_BYTES", because this + * alignment relates to the h/w interface, not the CPU cache granularity!)*/ +#define QM_PORTAL_ALIGNMENT __attribute__((aligned(32 * 64))) +struct qm_portal_bugs { + /* shadow MR ring, for QMAN9 workaround, 8-CL-aligned */ + struct qm_mr_entry mr[QM_MR_SIZE]; + /* shadow MC result, for QMAN6 and QMAN7 workarounds, CL-aligned */ + struct qm_mc_result result; + /* boolean switch for QMAN7 workaround */ + int initfq_and_sched; +} QM_PORTAL_ALIGNMENT; +#else +#define QM_PORTAL_ALIGNMENT ____cacheline_aligned +#endif + +struct qm_addr { + void __iomem *addr_ce; /* cache-enabled */ + void __iomem *addr_ci; /* cache-inhibited */ +}; + +struct qm_portal { +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + struct qm_portal_bugs bugs; +#endif + /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to + * and including 'mc' fits within a cacheline (yay!). The 'config' part + * is setup-only, so isn't a cause for a concern. In other words, don't + * rearrange this structure on a whim, there be dragons ... */ + struct qm_addr addr; + struct qm_eqcr eqcr; + struct qm_dqrr dqrr; + struct qm_mr mr; + struct qm_mc mc; +} QM_PORTAL_ALIGNMENT; + + +/* ---------------- */ +/* --- EQCR API --- */ + +/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ +#define EQCR_CARRYCLEAR(p) \ + (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6))) + +/* Bit-wise logic to convert a ring pointer to a ring index */ +static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e) +{ + return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1); +} + +/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ +static inline void EQCR_INC(struct qm_eqcr *eqcr) +{ + /* NB: this is odd-looking, but experiments show that it generates fast + * code with essentially no branching overheads. We increment to the + * next EQCR pointer and handle overflow and 'vbit'. */ + struct qm_eqcr_entry *partial = eqcr->cursor + 1; + eqcr->cursor = EQCR_CARRYCLEAR(partial); + if (partial != eqcr->cursor) + eqcr->vbit ^= QM_EQCR_VERB_VBIT; +} + +static inline int qm_eqcr_init(struct qm_portal *portal, + enum qm_eqcr_pmode pmode, + __maybe_unused enum qm_eqcr_cmode cmode) +{ + /* This use of 'register', as well as all other occurances, is because + * it has been observed to generate much faster code with gcc than is + * otherwise the case. */ + register struct qm_eqcr *eqcr = &portal->eqcr; + u32 cfg; + u8 pi; + + eqcr->ring = portal->addr.addr_ce + CL_EQCR; + eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(EQCR_CI); + pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + eqcr->cursor = eqcr->ring + pi; + eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ? + QM_EQCR_VERB_VBIT : 0; + eqcr->available = QM_EQCR_SIZE - 1 - + cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); + eqcr->ithresh = qm_in(EQCR_ITR); +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; + eqcr->pmode = pmode; + eqcr->cmode = cmode; +#endif + cfg = (qm_in(CFG) & 0x00ffffff) | + ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ + qm_out(CFG, cfg); + return 0; +} + +static inline void qm_eqcr_finish(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + u8 pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); + u8 ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + + DPA_ASSERT(!eqcr->busy); + if (pi != EQCR_PTR2IDX(eqcr->cursor)) + pr_crit("losing uncommited EQCR entries\n"); + if (ci != eqcr->ci) + pr_crit("missing existing EQCR completions\n"); + if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor)) + pr_crit("EQCR destroyed unquiesced\n"); +} + +static inline struct qm_eqcr_entry *qm_eqcr_start(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(!eqcr->busy); + if (!eqcr->available) + return NULL; +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 1; +#endif + dcbz_64(eqcr->cursor); + return eqcr->cursor; +} + +static inline void qm_eqcr_abort(struct qm_portal *portal) +{ + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(eqcr->busy); +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next( + struct qm_portal *portal, u8 myverb) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(eqcr->busy); + DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb); + if (eqcr->available == 1) + return NULL; + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; + dcbf(eqcr->cursor); + EQCR_INC(eqcr); + eqcr->available--; + dcbz_64(eqcr->cursor); + return eqcr->cursor; +} + +#define EQCR_COMMIT_CHECKS(eqcr) \ +do { \ + DPA_ASSERT(eqcr->busy); \ + DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \ + DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \ +} while(0) + +static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + EQCR_COMMIT_CHECKS(eqcr); + DPA_ASSERT(eqcr->pmode == qm_eqcr_pci); + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; + EQCR_INC(eqcr); + eqcr->available--; + dcbf(eqcr->cursor); + hwsync(); + qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor)); +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); + qm_cl_invalidate(EQCR_PI); + qm_cl_touch_rw(EQCR_PI); +} + +static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + EQCR_COMMIT_CHECKS(eqcr); + DPA_ASSERT(eqcr->pmode == qm_eqcr_pce); + eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit; + EQCR_INC(eqcr); + eqcr->available--; + dcbf(eqcr->cursor); + lwsync(); + qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor)); +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + struct qm_eqcr_entry *eqcursor; + EQCR_COMMIT_CHECKS(eqcr); + DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb); + lwsync(); + eqcursor = eqcr->cursor; + eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit; + dcbf(eqcursor); + EQCR_INC(eqcr); + eqcr->available--; +#ifdef CONFIG_FSL_DPA_CHECKING + eqcr->busy = 0; +#endif +} + +static inline u8 qm_eqcr_cci_update(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci = eqcr->ci; + DPA_ASSERT(eqcr->cmode == qm_eqcr_cci); + eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); + diff = cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + return diff; +} + +static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr; + DPA_ASSERT(eqcr->cmode == qm_eqcr_cce); + qm_cl_touch_ro(EQCR_CI); +} + +static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + u8 diff, old_ci = eqcr->ci; + DPA_ASSERT(eqcr->cmode == qm_eqcr_cce); + eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1); + qm_cl_invalidate(EQCR_CI); + diff = cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); + eqcr->available += diff; + return diff; +} + +static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + return eqcr->ithresh; +} + +static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + eqcr->ithresh = ithresh; + qm_out(EQCR_ITR, ithresh); +} + +static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + return eqcr->available; +} + +static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) +{ + register struct qm_eqcr *eqcr = &portal->eqcr; + return QM_EQCR_SIZE - 1 - eqcr->available; +} + + +/* ---------------- */ +/* --- DQRR API --- */ + +/* FIXME: many possible improvements; + * - look at changing the API to use pointer rather than index parameters now + * that 'cursor' is a pointer, + * - consider moving other parameters to pointer if it could help (ci) + */ + +#define DQRR_CARRYCLEAR(p) \ + (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6))) + +static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e) +{ + return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1); +} + +static inline const struct qm_dqrr_entry *DQRR_INC( + const struct qm_dqrr_entry *e) +{ + return DQRR_CARRYCLEAR(e + 1); +} + +static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) +{ + qm_out(CFG, (qm_in(CFG) & 0xff0fffff) | + ((mf & (QM_DQRR_SIZE - 1)) << 20)); +} + +static inline int qm_dqrr_init(struct qm_portal *portal, + const struct qm_portal_config *config, + enum qm_dqrr_dmode dmode, + __maybe_unused enum qm_dqrr_pmode pmode, + enum qm_dqrr_cmode cmode, u8 max_fill, + int disable_stash) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + u32 cfg; + + if ((config->public_cfg.has_stashing) && (config->public_cfg.cpu == -1)) + return -EINVAL; + /* Make sure the DQRR will be idle when we enable */ + qm_out(DQRR_SDQCR, 0); + qm_out(DQRR_VDQCR, 0); + qm_out(DQRR_PDQCR, 0); + dqrr->ring = portal->addr.addr_ce + CL_DQRR; + dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); + dqrr->cursor = dqrr->ring + dqrr->ci; + dqrr->fill = cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); + dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ? + QM_DQRR_VERB_VBIT : 0; + dqrr->ithresh = qm_in(DQRR_ITR); +#ifdef CONFIG_FSL_DPA_CHECKING + dqrr->dmode = dmode; + dqrr->pmode = pmode; + dqrr->cmode = cmode; + dqrr->flags = 0; + if (!disable_stash) + dqrr->flags |= QM_DQRR_FLAG_RE | QM_DQRR_FLAG_SE; +#endif + cfg = (qm_in(CFG) & 0xff000f00) | + ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ + ((dmode & 1) << 18) | /* DP */ + ((cmode & 3) << 16) | /* DCM */ + (disable_stash ? 0 : /* RE+SE */ + config->public_cfg.has_stashing ? 0xa0 : 0) | + (0 ? 0x40 : 0) | /* Ignore RP */ + (0 ? 0x10 : 0); /* Ignore SP */ + qm_out(CFG, cfg); + qm_dqrr_set_maxfill(portal, max_fill); + return 0; +} + +static inline void qm_dqrr_finish(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; +#ifdef CONFIG_FSL_DPA_CHECKING + if ((dqrr->cmode != qm_dqrr_cdc) && + (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))) + pr_crit("Ignoring completed DQRR entries\n"); +#endif +} + +static inline const struct qm_dqrr_entry *qm_dqrr_current( + struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + if (!dqrr->fill) + return NULL; + return dqrr->cursor; +} + +static inline u8 qm_dqrr_cursor(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + return DQRR_PTR2IDX(dqrr->cursor); +} + +static inline u8 qm_dqrr_next(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->fill); + dqrr->cursor = DQRR_INC(dqrr->cursor); + return --dqrr->fill; +} + +static inline u8 qm_dqrr_pci_update(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + u8 diff, old_pi = dqrr->pi; + DPA_ASSERT(dqrr->pmode == qm_dqrr_pci); + dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); + diff = cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); + dqrr->fill += diff; + return diff; +} + +static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); + qm_cl_invalidate(DQRR_PI); + qm_cl_touch_ro(DQRR_PI); +} + +static inline u8 qm_dqrr_pce_update(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + u8 diff, old_pi = dqrr->pi; + DPA_ASSERT(dqrr->pmode == qm_dqrr_pce); + dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1); + diff = cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi); + dqrr->fill += diff; + return diff; +} + +static inline void qm_dqrr_pvb_update(struct qm_portal *portal, int coherent) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); + DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb); + /* when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". */ + if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { + dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); + if (!dqrr->pi) + dqrr->vbit ^= QM_DQRR_VERB_VBIT; + dqrr->fill++; + if (!coherent) { + DPA_ASSERT(!(dqrr->flags & QM_DQRR_FLAG_RE)); + dcbit_ro(DQRR_INC(res)); + } + } else if (!coherent) { + DPA_ASSERT(!(dqrr->flags & QM_DQRR_FLAG_RE)); + dcbit_ro(res); + } +} + +static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); + dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); + qm_out(DQRR_CI_CINH, dqrr->ci); +} + +static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cci); + dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); + qm_out(DQRR_CI_CINH, dqrr->ci); +} + +static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); + qm_cl_invalidate(DQRR_CI); + qm_cl_touch_rw(DQRR_CI); +} + +static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); + dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1); + qm_cl_out(DQRR_CI, dqrr->ci); +} + +static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cce); + dqrr->ci = DQRR_PTR2IDX(dqrr->cursor); + qm_cl_out(DQRR_CI, dqrr->ci); +} + +static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx, + int park) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + DPA_ASSERT(idx < QM_DQRR_SIZE); + qm_out(DQRR_DCAP, (0 << 8) | /* S */ + ((park ? 1 : 0) << 6) | /* PK */ + idx); /* DCAP_CI */ +} + +static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, + const struct qm_dqrr_entry *dq, + int park) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + u8 idx = DQRR_PTR2IDX(dq); + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + DPA_ASSERT((dqrr->ring + idx) == dq); + DPA_ASSERT(idx < QM_DQRR_SIZE); + qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ + ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ + idx); /* DQRR_DCAP::DCAP_CI */ +} + +static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ + ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ +} + +static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); +} + +static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + qm_cl_invalidate(DQRR_CI); + qm_cl_touch_ro(DQRR_CI); +} + +static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc); + return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1); +} + +static inline u8 qm_dqrr_get_ci(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); + return dqrr->ci; +} + +static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx) +{ + __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); + qm_out(DQRR_DCAP, (0 << 8) | /* S */ + (1 << 6) | /* PK */ + (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */ +} + +static inline void qm_dqrr_park_current(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc); + qm_out(DQRR_DCAP, (0 << 8) | /* S */ + (1 << 6) | /* PK */ + DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */ +} + +static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) +{ + qm_out(DQRR_SDQCR, sdqcr); +} + +static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal) +{ + return qm_in(DQRR_SDQCR); +} + +static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) +{ + qm_out(DQRR_VDQCR, vdqcr); +} + +static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal) +{ + return qm_in(DQRR_VDQCR); +} + +static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr) +{ + qm_out(DQRR_PDQCR, pdqcr); +} + +static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal) +{ + return qm_in(DQRR_PDQCR); +} + +static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal) +{ + register struct qm_dqrr *dqrr = &portal->dqrr; + return dqrr->ithresh; +} + +static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(DQRR_ITR, ithresh); +} + +static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal) +{ + return (qm_in(CFG) & 0x00f00000) >> 20; +} + + +/* -------------- */ +/* --- MR API --- */ + +#define MR_CARRYCLEAR(p) \ + (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6))) + +static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e) +{ + return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1); +} + +static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e) +{ + return MR_CARRYCLEAR(e + 1); +} + +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 +static inline void __mr_copy_and_fixup(struct qm_portal *p, u8 idx) +{ + if (qman_ip_rev == QMAN_REV10) { + struct qm_mr_entry *shadow = qm_cl(p->bugs.mr, idx); + struct qm_mr_entry *res = qm_cl(p->mr.ring, idx); + copy_words(shadow, res, sizeof(*res)); + /* Bypass the QM_MR_RC_*** definitions, and check the byte value + * directly to handle the erratum. */ + if (shadow->ern.rc == 0x06) + shadow->ern.rc = 0x60; + } +} +#else +#define __mr_copy_and_fixup(p, idx) do { ; } while (0) +#endif + +static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, + enum qm_mr_cmode cmode) +{ + register struct qm_mr *mr = &portal->mr; + u32 cfg; + int loop; + +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if ((qman_ip_rev == QMAN_REV10) && (pmode != qm_mr_pvb)) { + pr_err("Qman is rev1, so QMAN9 workaround requires 'pvb'\n"); + return -EINVAL; + } +#endif + mr->ring = portal->addr.addr_ce + CL_MR; + mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1); + mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1); +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if (qman_ip_rev == QMAN_REV10) + /* Situate the cursor in the shadow ring */ + mr->cursor = portal->bugs.mr + mr->ci; + else +#endif + mr->cursor = mr->ring + mr->ci; + mr->fill = cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); + mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0; + mr->ithresh = qm_in(MR_ITR); +#ifdef CONFIG_FSL_DPA_CHECKING + mr->pmode = pmode; + mr->cmode = cmode; +#endif + /* Only new entries get the copy-and-fixup treatment from + * qm_mr_pvb_update(), so perform it here for any stale entries. */ + for (loop = 0; loop < mr->fill; loop++) + __mr_copy_and_fixup(portal, (mr->ci + loop) & (QM_MR_SIZE - 1)); + cfg = (qm_in(CFG) & 0xfffff0ff) | + ((cmode & 1) << 8); /* QCSP_CFG:MM */ + qm_out(CFG, cfg); + return 0; +} + +static inline void qm_mr_finish(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + if (mr->ci != MR_PTR2IDX(mr->cursor)) + pr_crit("Ignoring completed MR entries\n"); +} + +static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + if (!mr->fill) + return NULL; + return mr->cursor; +} + +static inline u8 qm_mr_cursor(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + return MR_PTR2IDX(mr->cursor); +} + +static inline u8 qm_mr_next(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->fill); + mr->cursor = MR_INC(mr->cursor); + return --mr->fill; +} + +static inline u8 qm_mr_pci_update(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + u8 diff, old_pi = mr->pi; + DPA_ASSERT(mr->pmode == qm_mr_pci); + mr->pi = qm_in(MR_PI_CINH); + diff = cyc_diff(QM_MR_SIZE, old_pi, mr->pi); + mr->fill += diff; + return diff; +} + +static inline void qm_mr_pce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->pmode == qm_mr_pce); + qm_cl_invalidate(MR_PI); + qm_cl_touch_ro(MR_PI); +} + +static inline u8 qm_mr_pce_update(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + u8 diff, old_pi = mr->pi; + DPA_ASSERT(mr->pmode == qm_mr_pce); + mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1); + diff = cyc_diff(QM_MR_SIZE, old_pi, mr->pi); + mr->fill += diff; + return diff; +} + +static inline void qm_mr_pvb_update(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi); + DPA_ASSERT(mr->pmode == qm_mr_pvb); + /* when accessing 'verb', use __raw_readb() to ensure that compiler + * inlining doesn't try to optimise out "excess reads". */ + if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { + __mr_copy_and_fixup(portal, mr->pi); + mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); + if (!mr->pi) + mr->vbit ^= QM_MR_VERB_VBIT; + mr->fill++; + res = MR_INC(res); + } + dcbit_ro(res); +} + +static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); + qm_out(MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cci); + mr->ci = MR_PTR2IDX(mr->cursor); + qm_out(MR_CI_CINH, mr->ci); +} + +static inline void qm_mr_cce_prefetch(struct qm_portal *portal) +{ + __maybe_unused register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cce); + qm_cl_invalidate(MR_CI); + qm_cl_touch_rw(MR_CI); +} + +static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cce); + mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); + qm_cl_out(MR_CI, mr->ci); +} + +static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + DPA_ASSERT(mr->cmode == qm_mr_cce); + mr->ci = MR_PTR2IDX(mr->cursor); + qm_cl_out(MR_CI, mr->ci); +} + +static inline u8 qm_mr_get_ci(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + return mr->ci; +} + +static inline u8 qm_mr_get_ithresh(struct qm_portal *portal) +{ + register struct qm_mr *mr = &portal->mr; + return mr->ithresh; +} + +static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) +{ + qm_out(MR_ITR, ithresh); +} + + +/* ------------------------------ */ +/* --- Management command API --- */ + +static inline int qm_mc_init(struct qm_portal *portal) +{ + register struct qm_mc *mc = &portal->mc; + mc->cr = portal->addr.addr_ce + CL_CR; + mc->rr = portal->addr.addr_ce + CL_RR0; + mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) & + QM_MCC_VERB_VBIT) ? 0 : 1; + mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + return 0; +} + +static inline void qm_mc_finish(struct qm_portal *portal) +{ + __maybe_unused register struct qm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPA_CHECKING + if (mc->state != mc_idle) + pr_crit("Losing incomplete MC command\n"); +#endif +} + +static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal) +{ + register struct qm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_idle); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_user; +#endif + dcbz_64(mc->cr); + return mc->cr; +} + +static inline void qm_mc_abort(struct qm_portal *portal) +{ + __maybe_unused register struct qm_mc *mc = &portal->mc; + DPA_ASSERT(mc->state == mc_user); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif +} + +static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) +{ + register struct qm_mc *mc = &portal->mc; + struct qm_mc_result *rr = mc->rr + mc->rridx; + DPA_ASSERT(mc->state == mc_user); + lwsync(); +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if ((qman_ip_rev == QMAN_REV10) && ((myverb & QM_MCC_VERB_MASK) == + QM_MCC_VERB_INITFQ_SCHED)) { + u32 fqid = mc->cr->initfq.fqid; + /* Do two commands to avoid the hw bug. Note, we poll locally + * rather than using qm_mc_result() because from a DPA_CHECKING + * perspective, we don't want to appear to have "finished" until + * both commands are done. */ + mc->cr->__dont_write_directly__verb = mc->vbit | + QM_MCC_VERB_INITFQ_PARKED; + dcbf(mc->cr); + portal->bugs.initfq_and_sched = 1; + do { + dcbit_ro(rr); + } while (!__raw_readb(&rr->verb)); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + if (rr->result != QM_MCR_RESULT_OK) { +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_hw; +#endif + return; + } + mc->rridx ^= 1; + mc->vbit ^= QM_MCC_VERB_VBIT; + rr = mc->rr + mc->rridx; + dcbz_64(mc->cr); + mc->cr->alterfq.fqid = fqid; + lwsync(); + myverb = QM_MCC_VERB_ALTER_SCHED; + } else + portal->bugs.initfq_and_sched = 0; +#endif + mc->cr->__dont_write_directly__verb = myverb | mc->vbit; + dcbf(mc->cr); + dcbit_ro(rr); +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_hw; +#endif +} + +static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal) +{ + register struct qm_mc *mc = &portal->mc; + struct qm_mc_result *rr = mc->rr + mc->rridx; + DPA_ASSERT(mc->state == mc_hw); + /* The inactive response register's verb byte always returns zero until + * its command is submitted and completed. This includes the valid-bit, + * in case you were wondering... */ + if (!__raw_readb(&rr->verb)) { + dcbit_ro(rr); + return NULL; + } +#ifdef CONFIG_FSL_QMAN_BUG_AND_FEATURE_REV1 + if (qman_ip_rev == QMAN_REV10) { + if ((__raw_readb(&rr->verb) & QM_MCR_VERB_MASK) == + QM_MCR_VERB_QUERYFQ) { + void *misplaced = (void *)rr + 50; + copy_words(&portal->bugs.result, rr, sizeof(*rr)); + rr = &portal->bugs.result; + copy_shorts(&rr->queryfq.fqd.td, misplaced, + sizeof(rr->queryfq.fqd.td)); + } else if (portal->bugs.initfq_and_sched) { + /* We split the user-requested command, make the final + * result match the requested type. */ + copy_words(&portal->bugs.result, rr, sizeof(*rr)); + rr = &portal->bugs.result; + rr->verb = (rr->verb & QM_MCR_VERB_RRID) | + QM_MCR_VERB_INITFQ_SCHED; + } + } +#endif + mc->rridx ^= 1; + mc->vbit ^= QM_MCC_VERB_VBIT; +#ifdef CONFIG_FSL_DPA_CHECKING + mc->state = mc_idle; +#endif + return rr; +} + + +/* ------------------------------------- */ +/* --- Portal interrupt register API --- */ + +static inline int qm_isr_init(__always_unused struct qm_portal *portal) +{ + return 0; +} + +static inline void qm_isr_finish(__always_unused struct qm_portal *portal) +{ +} + +static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod) +{ + qm_out(ITPR, iperiod); +} + +static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n) +{ + return __qm_in(&portal->addr, REG_ISR + (n << 2)); +} + +static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n, + u32 val) +{ + __qm_out(&portal->addr, REG_ISR + (n << 2), val); +} + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_private.h +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_private.h @@ -0,0 +1,292 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "dpa_sys.h" +#include + +#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64) +#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP" +#endif + + /* ----------------- */ + /* Congestion Groups */ + /* ----------------- */ +/* This wrapper represents a bit-array for the state of the 256 Qman congestion + * groups. Is also used as a *mask* for congestion groups, eg. so we ignore + * those that don't concern us. We harness the structure and accessor details + * already used in the management command to query congestion groups. */ +struct qman_cgrs { + struct __qm_mcr_querycongestion q; +}; +static inline void qman_cgrs_init(struct qman_cgrs *c) +{ + memset(c, 0, sizeof(*c)); +} +static inline void qman_cgrs_fill(struct qman_cgrs *c) +{ + memset(c, 0xff, sizeof(*c)); +} +static inline int qman_cgrs_get(struct qman_cgrs *c, int num) +{ + return QM_MCR_QUERYCONGESTION(&c->q, num); +} +static inline void qman_cgrs_set(struct qman_cgrs *c, int num) +{ + c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num)); +} +static inline void qman_cgrs_unset(struct qman_cgrs *c, int num) +{ + c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num)); +} +static inline int qman_cgrs_next(struct qman_cgrs *c, int num) +{ + while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num)) + ; + return num; +} +static inline void qman_cgrs_cp(struct qman_cgrs *dest, + const struct qman_cgrs *src) +{ + memcpy(dest, src, sizeof(*dest)); +} +static inline void qman_cgrs_and(struct qman_cgrs *dest, + const struct qman_cgrs *a, const struct qman_cgrs *b) +{ + int ret; + u32 *_d = dest->q.__state; + const u32 *_a = a->q.__state; + const u32 *_b = b->q.__state; + for (ret = 0; ret < 8; ret++) + *(_d++) = *(_a++) & *(_b++); +} +static inline void qman_cgrs_xor(struct qman_cgrs *dest, + const struct qman_cgrs *a, const struct qman_cgrs *b) +{ + int ret; + u32 *_d = dest->q.__state; + const u32 *_a = a->q.__state; + const u32 *_b = b->q.__state; + for (ret = 0; ret < 8; ret++) + *(_d++) = *(_a++) ^ *(_b++); +} + +#define qman_cgrs_for_each_1(cgr, cgrs) \ + for ((cgr) = -1; (cgr) = qman_cgrs_next((cgrs), (cgr)),\ + (cgr) < __CGR_NUM;) + +/* used by CCSR and portal interrupt code */ +enum qm_isr_reg { + qm_isr_status = 0, + qm_isr_enable = 1, + qm_isr_disable = 2, + qm_isr_inhibit = 3 +}; + +#define QM_ADDR_CE 0 +#define QM_ADDR_CI 1 +struct qm_portal_config { + /* Corenet portal addresses; + * [0]==cache-enabled, [1]==cache-inhibited. */ + __iomem void *addr_virt[2]; + struct resource addr_phys[2]; + struct device_node *node; + /* Allow these to be joined in lists */ + struct list_head list; + /* User-visible portal configuration settings */ + struct qman_portal_config public_cfg; +}; + +/* Hooks for driver initialisation */ +__init int fqalloc_init(int use_bman); + +/* Revision info (for errata and feature handling) */ +#define QMAN_REV10 0x0100 +#define QMAN_REV11 0x0101 +#define QMAN_REV12 0x0102 +#define QMAN_REV20 0x0200 +extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */ + +#ifdef CONFIG_FSL_QMAN_CONFIG +/* Hooks from qman_driver.c to qman_config.c */ +int qman_init_error_int(struct device_node *node); +void qman_liodn_fixup(enum qm_channel channel); +#endif + +/* Hooks from qman_driver.c in to qman_high.c */ +struct qman_portal *qman_create_affine_portal( + const struct qm_portal_config *config, + const struct qman_cgrs *cgrs, + const struct qman_fq_cb *null_cb, + int recovery_mode); +struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect); +const struct qm_portal_config *qman_destroy_affine_portal(void); +void qman_recovery_exit_local(void); + +/* This CGR feature is supported by h/w and required by unit-tests and the + * debugfs hooks, so is implemented in the driver. However it allows an explicit + * corruption of h/w fields by s/w that are usually incorruptible (because the + * counters are usually maintained entirely within h/w). As such, we declare + * this API internally. */ +int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt, + struct qm_mcr_cgrtestwrite *result); + +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP +/* If the fq object pointer is greater than the size of context_b field, + * than a lookup table is required. */ +int qman_setup_fq_lookup_table(size_t num_entries); +#endif + +/*************************************************/ +/* QMan s/w corenet portal, low-level i/face */ +/*************************************************/ + +/* Note: most functions are only used by the high-level interface, so are + * inlined from qman_low.h. The stuff below is for use by other parts of the + * driver. */ + +/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one + * dequeue TYPE. Choose TOKEN (8-bit). + * If SOURCE == CHANNELS, + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n). + * You can choose DEDICATED_PRECEDENCE if the portal channel should have + * priority. + * If SOURCE == SPECIFICWQ, + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the + * same value. + */ +#define QM_SDQCR_SOURCE_CHANNELS 0x0 +#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000 +#define QM_SDQCR_COUNT_EXACT1 0x0 +#define QM_SDQCR_COUNT_UPTO3 0x20000000 +#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000 +#define QM_SDQCR_TYPE_MASK 0x03000000 +#define QM_SDQCR_TYPE_NULL 0x0 +#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000 +#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000 +#define QM_SDQCR_TYPE_ACTIVE 0x03000000 +#define QM_SDQCR_TOKEN_MASK 0x00ff0000 +#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16) +#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff) +#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000 +#if 0 /* These are defined in the external fsl_qman.h API */ +#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff +#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) +#endif +#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7 +#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000 +#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4) +#define QM_SDQCR_SPECIFICWQ_WQ(n) (n) + +/* For qm_dqrr_vdqcr_set(); Choose one PRECEDENCE. EXACT is optional. Use + * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use + * FQID(n) to fill in the frame queue ID. */ +#if 0 /* These are defined in the external fsl_qman.h API */ +#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 +#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 +#define QM_VDQCR_EXACT 0x40000000 +#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 +#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) +#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) +#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) +#endif +#define QM_VDQCR_FQID_MASK 0x00ffffff +#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK) + +/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT. + * If MODE==SCHEDULED + * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE. + * If CHANNELS, + * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels. + * You can choose DEDICATED_PRECEDENCE if the portal channel should have + * priority. + * If SPECIFICWQ, + * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the + * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the + * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the + * same value. + * If MODE==UNSCHEDULED + * Choose FQID(). + */ +#define QM_PDQCR_MODE_SCHEDULED 0x0 +#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000 +#define QM_PDQCR_SCHEDULED_CHANNELS 0x0 +#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000 +#define QM_PDQCR_COUNT_EXACT1 0x0 +#define QM_PDQCR_COUNT_UPTO3 0x20000000 +#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000 +#define QM_PDQCR_TYPE_MASK 0x03000000 +#define QM_PDQCR_TYPE_NULL 0x0 +#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000 +#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000 +#define QM_PDQCR_TYPE_ACTIVE 0x03000000 +#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000 +#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n)) +#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7 +#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000 +#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4) +#define QM_PDQCR_SPECIFICWQ_WQ(n) (n) +#define QM_PDQCR_FQID(n) ((n) & 0xffffff) + +/* Used by all portal interrupt registers except 'inhibit'. NB, some of these + * definitions are exported for use by the qman_irqsource_***() APIs, so are + * commented-out here. */ +#define QM_PIRQ_DQAVAIL 0x0000ffff /* Channels with frame availability */ +#if 0 +#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */ +#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */ +#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */ +#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */ +#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */ +/* This mask contains all the interrupt sources that need handling except DQRI, + * ie. that if present should trigger slow-path processing. */ +#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \ + QM_PIRQ_MRI) +#endif +/* The DQAVAIL interrupt fields break down into these bits; */ +#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */ +#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */ +#define QM_DQAVAIL_MASK 0xffff +/* This mask contains all the "irqsource" bits visible to API users */ +#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI) + +/* These are qm__(). So for example, qm_disable_write() means "write + * the disable register" rather than "disable the ability to write". */ +#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status) +#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m) +#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable) +#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v) +#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable) +#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v) +/* TODO: unfortunate name-clash here, reword? */ +#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1) +#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0) --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_test.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_test.c @@ -0,0 +1,61 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +MODULE_AUTHOR("Geoff Thorpe"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Qman testing"); + +static int test_init(void) +{ + int loop = 1; + while(loop--) { +#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO + qman_test_hotpotato(); +#endif +#ifdef CONFIG_FSL_QMAN_TEST_HIGH + qman_test_high(); +#endif +#ifdef CONFIG_FSL_QMAN_TEST_ERRATA + qman_test_errata(); +#endif + } + return 0; +} + +static void test_exit(void) +{ +} + +module_init(test_init); +module_exit(test_exit); + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_test.h +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_test.h @@ -0,0 +1,85 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +void qman_test_hotpotato(void); +void qman_test_high(void); +void qman_test_errata(void); +void qman_test_fqrange(void); + +static inline void __hexdump(unsigned long start, unsigned long end, + unsigned long p, size_t sz, const unsigned char *c) +{ + while (start < end) { + unsigned int pos = 0; + char buf[64]; + int nl = 0; + pos += sprintf(buf + pos, "%08lx: ", start); + do { + if ((start < p) || (start >= (p + sz))) + pos += sprintf(buf + pos, ".."); + else + pos += sprintf(buf + pos, "%02x", *(c++)); + if (!(++start & 15)) { + buf[pos++] = '\n'; + nl = 1; + } else { + nl = 0; + if(!(start & 1)) + buf[pos++] = ' '; + if(!(start & 3)) + buf[pos++] = ' '; + } + } while (start & 15); + if (!nl) + buf[pos++] = '\n'; + buf[pos] = '\0'; + pr_info("%s", buf); + } +} +static inline void hexdump(const void *ptr, size_t sz) +{ + unsigned long p = (unsigned long)ptr; + unsigned long start = p & ~(unsigned long)15; + unsigned long end = (p + sz + 15) & ~(unsigned long)15; + const unsigned char *c = ptr; + __hexdump(start, end, p, sz, c); +} + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_test_errata.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_test_errata.c @@ -0,0 +1,248 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +/* Waiting on a model fix from virtutech */ +#if 0 +/*********************/ +/* generic utilities */ +/*********************/ + +static int do_enqueues(struct qman_fq *fq, const struct qm_fd *fds, int num) +{ + int ret = 0; + u32 flags = QMAN_ENQUEUE_FLAG_WAIT; + while (num-- && !ret) { + if (!num) + flags |= QMAN_ENQUEUE_FLAG_WAIT_SYNC; + pr_info("about to enqueue\n"); + ret = qman_enqueue(fq, fds++, flags); + } + return ret; +} + +/***************************/ +/* "tdthresh" test (QMAN6) */ +/***************************/ + +/* First thresh == 201 * (2^21) == 421527552 (0x19200000) */ +#define THRESH_MANT 201 +#define THRESH_EXP 21 + +/* first three equal thresh, fourth takes us over */ +static const struct qm_fd td_eq[] = { + QM_FD_FMT_20(0, 0x34, 0x87654321, QM_FD_SG, 0, 79321), + QM_FD_FMT_29(0, 0x34, 0x87654321, QM_FD_COMPOUND, 29923679), + QM_FD_FMT_29(0, 0x0d, 0xacadabba, QM_FD_CONTIG_BIG, 391524552), + QM_FD_FMT_20(0, 0x0b, 0x0fa10ada, QM_FD_CONTIG, 0, 1), + QM_FD_FMT_20(0, 0x0b, 0x0fa10ada, QM_FD_CONTIG, 0, 1), +}; + +struct tdthresh_fq { + struct qman_fq fq; + int got_ern; + int num_dqrr; +}; + +static enum qman_cb_dqrr_result cb_dqrr_tdthresh(struct qman_portal *p, + struct qman_fq *__fq, + const struct qm_dqrr_entry *dqrr) +{ + struct tdthresh_fq *t = (void *)__fq; + t->num_dqrr++; + return qman_cb_dqrr_consume; +} + +static void cb_ern_tdthresh(struct qman_portal *p, struct qman_fq *__fq, + const struct qm_mr_entry *mr) +{ + struct tdthresh_fq *t = (void *)__fq; + t->got_ern = 1; +} + +static void test_tdthresh(void) +{ + struct tdthresh_fq tdfq = { + .fq = { + .cb = { + .dqrr = cb_dqrr_tdthresh, + .ern = cb_ern_tdthresh + } + }, + .got_ern = 0, + .num_dqrr = 0 + }; + struct qman_fq *fq = &tdfq.fq; + struct qm_mcc_initfq opts = { + .we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_TDTHRESH, + .fqd = { + .fq_ctrl = QM_FQCTRL_TDE, + .td = { + .exp = THRESH_EXP, + .mant = THRESH_MANT, + } + } + }; + struct qm_fqd fqd; + u32 flags; + int ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, fq); + BUG_ON(ret); + /* leave it parked, and set it for local dequeue (loopback) */ + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, &opts); + BUG_ON(ret); + /* query it back and confirm everything is ok */ + ret = qman_query_fq(fq, &fqd); + BUG_ON(ret); + if (fqd.fq_ctrl != opts.fqd.fq_ctrl) { + pr_err("queried fq_ctrl=%x, should be=%x\n", fqd.fq_ctrl, + opts.fqd.fq_ctrl); + panic("fail"); + } + if (memcmp(&fqd.td, &opts.fqd.td, sizeof(fqd.td))) { + pr_err("queried td_thresh=%x:%x, should be=%x:%x\n", + fqd.td.exp, fqd.td.mant, + opts.fqd.td.exp, opts.fqd.td.mant); + panic("fail"); + } + ret = do_enqueues(fq, td_eq, 3); + BUG_ON(ret); + pr_info(" tdthresh: eq[0..2] complete\n"); + /* enqueues are flushed, so if Qman is going to throw an ERN, the irq + * assertion will already be on its way. */ + msleep(500); + BUG_ON(tdfq.got_ern); + pr_info(" tdthresh: eq <= thresh OK\n"); + ret = do_enqueues(fq, td_eq + 3, 1); + BUG_ON(ret); + pr_info(" tdthresh: eq[3] complete\n"); + /* enqueues are flushed, so if Qman is going to throw an ERN, the irq + * assertion will already be on its way. */ + msleep(500); + BUG_ON(tdfq.got_ern); + pr_info(" tdthresh: eq <= thresh OK\n"); + ret = do_enqueues(fq, td_eq + 4, 1); + BUG_ON(ret); + pr_info(" tdthresh: eq[4] complete\n"); + /* enqueues are flushed, so if Qman is going to throw an ERN, the irq + * assertion will already be on its way. */ + msleep(500); + BUG_ON(!tdfq.got_ern); + pr_info(" tdthresh: eq > thresh OK\n"); + ret = qman_volatile_dequeue(fq, + QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH, + QM_VDQCR_NUMFRAMES_TILLEMPTY); + BUG_ON(ret); + BUG_ON(tdfq.num_dqrr != 4); + ret = qman_retire_fq(fq, &flags); + BUG_ON(ret); + BUG_ON(flags); + ret = qman_oos_fq(fq); + BUG_ON(ret); +} + +/****************************/ +/* "ern code6" test (QMAN9) */ +/****************************/ + +/* Dummy FD to enqueue out-of-sequence and generate an ERN */ +static const struct qm_fd c6_eq = + QM_FD_FMT_29(0, 0xba, 0xdeadbeef, QM_FD_CONTIG_BIG, 1234); + +struct code6_fq { + struct qman_fq fq; + struct qm_mr_entry mr; + struct completion got_ern; +}; + +static void cb_ern_code6(struct qman_portal *p, struct qman_fq *__fq, + const struct qm_mr_entry *mr) +{ + struct code6_fq *c = (void *)__fq; + memcpy(&c->mr, mr, sizeof(*mr)); + complete(&c->got_ern); +} + +static void test_ern_code6(void) +{ + struct code6_fq c6fq = { + .fq = { + .cb = { + .ern = cb_ern_code6 + } + }, + .got_ern = COMPLETION_INITIALIZER(c6fq.got_ern) + }; + struct qman_fq *fq = &c6fq.fq; + struct qm_mcc_initfq opts = { + .we_mask = QM_INITFQ_WE_FQCTRL, + .fqd = { + .fq_ctrl = QM_FQCTRL_ORP + } + }; + u32 flags; + int ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID, fq); + BUG_ON(ret); + /* leave it parked, and set it for local dequeue (loopback) */ + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, &opts); + BUG_ON(ret); + /* enqueue with ORP using a "too early" sequence number */ + ret = qman_enqueue_orp(fq, &c6_eq, + QMAN_ENQUEUE_FLAG_WAIT | QMAN_ENQUEUE_FLAG_WAIT_SYNC, fq, 5); + BUG_ON(ret); + pr_info(" code6: eq complete\n"); + ret = qman_retire_fq(fq, &flags); + BUG_ON(ret); + pr_info(" code6: retire complete, flags=%08x\n", flags); + BUG_ON(flags != QMAN_FQ_STATE_ORL); + wait_for_completion(&c6fq.got_ern); + pr_info(" code6: ERN, VERB=0x%02x, RC==0x%02x\n", + c6fq.mr.verb, c6fq.mr.ern.rc); + BUG_ON(c6fq.mr.verb & 0x20); + BUG_ON((c6fq.mr.ern.rc & QM_MR_RC_MASK) != QM_MR_RC_ORPWINDOW_RETIRED); + ret = qman_oos_fq(fq); + BUG_ON(ret); +} + +void qman_test_errata(void) +{ + pr_info("Testing Qman errata handling ...\n"); + test_tdthresh(); + test_ern_code6(); + pr_info(" ... SUCCESS!\n"); +} +#else +void qman_test_errata(void) +{ + pr_info("Qman errata-handling test disabled, waiting on model fix\n"); +} +#endif + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_test_high.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_test_high.c @@ -0,0 +1,222 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_test.h" + +/*************/ +/* constants */ +/*************/ + +#define CGR_ID 27 +#define POOL_ID 2 +#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID +#define NUM_ENQUEUES 10 +#define NUM_PARTIAL 4 +#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \ + QM_SDQCR_TYPE_PRIO_QOS | \ + QM_SDQCR_TOKEN_SET(0x98) | \ + QM_SDQCR_CHANNELS_DEDICATED | \ + QM_SDQCR_CHANNELS_POOL(POOL_ID)) +#define PORTAL_OPAQUE (void *)0xf00dbeef +#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH) + +/*************************************/ +/* Predeclarations (eg. for fq_base) */ +/*************************************/ + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *, + struct qman_fq *, + const struct qm_dqrr_entry *); +static void cb_ern(struct qman_portal *, struct qman_fq *, + const struct qm_mr_entry *); +static void cb_dc_ern(struct qman_portal *, struct qman_fq *, + const struct qm_mr_entry *); +static void cb_fqs(struct qman_portal *, struct qman_fq *, + const struct qm_mr_entry *); + +/***************/ +/* global vars */ +/***************/ + +static struct qm_fd fd, fd_dq; +static struct qman_fq fq_base = { + .cb.dqrr = cb_dqrr, + .cb.ern = cb_ern, + .cb.dc_ern = cb_dc_ern, + .cb.fqs = cb_fqs +}; +static DECLARE_WAIT_QUEUE_HEAD(waitqueue); +static int retire_complete, sdqcr_complete; + +/**********************/ +/* internal functions */ +/**********************/ + +/* Helpers for initialising and "incrementing" a frame descriptor */ +static void fd_init(struct qm_fd *__fd) +{ + qm_fd_addr_set64(__fd, 0xabdeadbeefLLU); + __fd->format = qm_fd_contig_big; + __fd->length29 = 0x0000ffff; + __fd->cmd = 0xfeedf00d; +} + +static void fd_inc(struct qm_fd *__fd) +{ + u64 t = qm_fd_addr_get64(__fd); + int z = t >> 40; + t <<= 1; + if (z) + t |= 1; + qm_fd_addr_set64(__fd, t); + __fd->length29--; + __fd->cmd++; +} + +/* The only part of the 'fd' we can't memcmp() is the ppid */ +static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b) +{ + int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1; + if (!r) + r = a->format - b->format; + if (!r) + r = a->opaque - b->opaque; + if (!r) + r = a->cmd - b->cmd; + return r; +} + +/********/ +/* test */ +/********/ + +static void do_enqueues(struct qman_fq *fq) +{ + unsigned int loop; + for (loop = 0; loop < NUM_ENQUEUES; loop++) { + if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT | + (((loop + 1) == NUM_ENQUEUES) ? + QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0))) + panic("qman_enqueue() failed\n"); + fd_inc(&fd); + } +} + +void qman_test_high(void) +{ + int flags, res; + struct qman_fq *fq = &fq_base; + + pr_info("qman_test_high starting\n"); + fd_init(&fd); + fd_init(&fd_dq); + + /* Initialise (parked) FQ */ + if (qman_create_fq(0, FQ_FLAGS, fq)) + panic("qman_create_fq() failed\n"); + if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL)) + panic("qman_init_fq() failed\n"); + + /* Do enqueues + VDQCR, twice. (Parked FQ) */ + do_enqueues(fq); + pr_info("VDQCR (till-empty);\n"); + if (qman_volatile_dequeue(fq, VDQCR_FLAGS, + QM_VDQCR_NUMFRAMES_TILLEMPTY)) + panic("qman_volatile_dequeue() failed\n"); + do_enqueues(fq); + pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES); + if (qman_volatile_dequeue(fq, VDQCR_FLAGS, + QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL))) + panic("qman_volatile_dequeue() failed\n"); + pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL, + NUM_ENQUEUES); + if (qman_volatile_dequeue(fq, VDQCR_FLAGS, + QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL))) + panic("qman_volatile_dequeue() failed\n"); + + do_enqueues(fq); + pr_info("scheduled dequeue (till-empty)\n"); + if (qman_schedule_fq(fq)) + panic("qman_schedule_fq() failed\n"); + wait_event(waitqueue, sdqcr_complete); + + /* Retire and OOS the FQ */ + res = qman_retire_fq(fq, &flags); + if (res < 0) + panic("qman_retire_fq() failed\n"); + wait_event(waitqueue, retire_complete); + if (flags & QMAN_FQ_STATE_BLOCKOOS) + panic("leaking frames\n"); + if (qman_oos_fq(fq)) + panic("qman_oos_fq() failed\n"); + qman_destroy_fq(fq, 0); + pr_info("qman_test_high finished\n"); +} + +static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p, + struct qman_fq *fq, + const struct qm_dqrr_entry *dq) +{ + if (fd_cmp(&fd_dq, &dq->fd)) { + pr_err("BADNESS: dequeued frame doesn't match;\n"); + BUG(); + } + fd_inc(&fd_dq); + if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) { + sdqcr_complete = 1; + wake_up(&waitqueue); + } + return qman_cb_dqrr_consume; +} + +static void cb_ern(struct qman_portal *p, struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + panic("cb_ern() unimplemented"); +} + +static void cb_dc_ern(struct qman_portal *p, struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + panic("cb_dc_ern() unimplemented"); +} + +static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, + const struct qm_mr_entry *msg) +{ + u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK); + if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) + panic("unexpected FQS message"); + pr_info("Retirement message received\n"); + retire_complete = 1; + wake_up(&waitqueue); +} + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_test_hotpotato.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_test_hotpotato.c @@ -0,0 +1,497 @@ +/* Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include "qman_test.h" + +/* Algorithm: + * + * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates + * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The + * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will + * shuttle a "hot potato" frame around them such that every forwarding action + * moves it from one cpu to another. (The use of more than one handler per cpu + * is to allow enough handlers/FQs to truly test the significance of caching - + * ie. when cache-expiries are occuring.) + * + * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the + * first and last words of the frame data will undergo a transformation step on + * each forwarding action. To achieve this, each handler will be assigned a + * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is + * received by a handler, the mixer of the expected sender is XOR'd into all + * words of the entire frame, which is then validated against the original + * values. Then, before forwarding, the entire frame is XOR'd with the mixer of + * the current handler. Apart from validating that the frame is taking the + * expected path, this also provides some quasi-realistic overheads to each + * forwarding action - dereferencing *all* the frame data, computation, and + * conditional branching. There is a "special" handler designated to act as the + * instigator of the test by creating an enqueuing the "hot potato" frame, and + * to determine when the test has completed by counting HP_LOOPS iterations. + * + * Init phases: + * + * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them + * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU + * handlers and link-list them (but do no other handler setup). + * + * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each + * hp_cpu's 'iterator' to point to its first handler. With each loop, + * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler + * and advance the iterator for the next loop. This includes a final fixup, + * which connects the last handler to the first (and which is why phase 2 + * and 3 are separate). + * + * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each + * hp_cpu's 'iterator' to point to its first handler. With each loop, + * initialise FQ objects and advance the iterator for the next loop. + * Moreover, do this initialisation on the cpu it applies to so that Rx FQ + * initialisation targets the correct cpu. + */ + +/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes + * the fn from irq context, which is too restrictive). */ +struct bstrap { + void (*fn)(void); + atomic_t started; +}; +static int bstrap_fn(void *__bstrap) +{ + struct bstrap *bstrap = __bstrap; + atomic_inc(&bstrap->started); + bstrap->fn(); + while (!kthread_should_stop()) + msleep(1); + return 0; +} +static int on_all_cpus(void (*fn)(void)) +{ + int cpu; + for_each_cpu(cpu, cpu_online_mask) { + struct bstrap bstrap = { + .fn = fn, + .started = ATOMIC_INIT(0) + }; + struct task_struct *k = kthread_create(bstrap_fn, &bstrap, + "hotpotato%d", cpu); + int ret; + if (IS_ERR(k)) + return -ENOMEM; + kthread_bind(k, cpu); + wake_up_process(k); + /* If we call kthread_stop() before the "wake up" has had an + * effect, then the thread may exit with -EINTR without ever + * running the function. So poll until it's started before + * requesting it to stop. */ + while (!atomic_read(&bstrap.started)) + msleep(10); + ret = kthread_stop(k); + if (ret) + return ret; + } + return 0; +} + +struct hp_handler { + + /* The following data is stashed when 'rx' is dequeued; */ + /* -------------- */ + /* The Rx FQ, dequeues of which will stash the entire hp_handler */ + struct qman_fq rx; + /* The Tx FQ we should forward to */ + struct qman_fq tx; + /* The value we XOR post-dequeue, prior to validating */ + u32 rx_mixer; + /* The value we XOR pre-enqueue, after validating */ + u32 tx_mixer; + /* what the hotpotato address should be on dequeue */ + dma_addr_t addr; + u32 *frame_ptr; + + /* The following data isn't (necessarily) stashed on dequeue; */ + /* -------------- */ + u32 fqid_rx, fqid_tx; + /* list node for linking us into 'hp_cpu' */ + struct list_head node; + /* Just to check ... */ + unsigned int processor_id; +} ____cacheline_aligned; + +struct hp_cpu { + /* identify the cpu we run on; */ + unsigned int processor_id; + /* root node for the per-cpu list of handlers */ + struct list_head handlers; + /* list node for linking us into 'hp_cpu_list' */ + struct list_head node; + /* when repeatedly scanning 'hp_list', each time linking the n'th + * handlers together, this is used as per-cpu iterator state */ + struct hp_handler *iterator; +}; + +/* Each cpu has one of these */ +static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); + +/* links together the hp_cpu structs, in first-come first-serve order. */ +static LIST_HEAD(hp_cpu_list); +static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock); + +static unsigned int hp_cpu_list_length; + +/* the "special" handler, that starts and terminates the test. */ +static struct hp_handler *special_handler; +static int loop_counter; + +/* handlers are allocated out of this, so they're properly aligned. */ +static struct kmem_cache *hp_handler_slab; + +/* this is the frame data */ +static void *__frame_ptr; +static u32 *frame_ptr; +static dma_addr_t frame_dma; + +/* the main function waits on this */ +static DECLARE_WAIT_QUEUE_HEAD(queue); + +#define HP_PER_CPU 2 +#define HP_LOOPS 8 +/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ +#define HP_NUM_WORDS 80 +/* First word of the LFSR-based frame data */ +#define HP_FIRST_WORD 0xabbaf00d + +static inline u32 do_lfsr(u32 prev) +{ + return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); +} + +static void allocate_frame_data(void) +{ + u32 lfsr = HP_FIRST_WORD; + int loop; + struct platform_device *pdev = platform_device_alloc("foobar", -1); + if (!pdev) + panic("platform_device_alloc() failed"); + if (platform_device_add(pdev)) + panic("platform_device_add() failed"); + __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); + if (!__frame_ptr) + panic("kmalloc() failed"); + frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) & + ~(unsigned long)63); + for (loop = 0; loop < HP_NUM_WORDS; loop++) { + frame_ptr[loop] = lfsr; + lfsr = do_lfsr(lfsr); + } + frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS, + DMA_BIDIRECTIONAL); + platform_device_del(pdev); + platform_device_put(pdev); +} + +static void deallocate_frame_data(void) +{ + kfree(__frame_ptr); +} + +static inline void process_frame_data(struct hp_handler *handler, + const struct qm_fd *fd) +{ + u32 *p = handler->frame_ptr; + u32 lfsr = HP_FIRST_WORD; + int loop; + if (qm_fd_addr_get64(fd) != handler->addr) + panic("bad frame address"); + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { + *p ^= handler->rx_mixer; + if (*p != lfsr) + panic("corrupt frame data"); + *p ^= handler->tx_mixer; + lfsr = do_lfsr(lfsr); + } +} + +static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + struct hp_handler *handler = (struct hp_handler *)fq; + + process_frame_data(handler, &dqrr->fd); + if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) + panic("qman_enqueue() failed"); + return qman_cb_dqrr_consume; +} + +static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, + struct qman_fq *fq, + const struct qm_dqrr_entry *dqrr) +{ + struct hp_handler *handler = (struct hp_handler *)fq; + + process_frame_data(handler, &dqrr->fd); + if (++loop_counter < HP_LOOPS) { + if (qman_enqueue(&handler->tx, &dqrr->fd, 0)) + panic("qman_enqueue() failed"); + } else { + pr_info("Received final (%dth) frame\n", loop_counter); + wake_up(&queue); + } + return qman_cb_dqrr_consume; +} + +static void create_per_cpu_handlers(void) +{ + struct hp_handler *handler; + int loop; + struct hp_cpu *hp_cpu = &__get_cpu_var(hp_cpus); + + hp_cpu->processor_id = smp_processor_id(); + spin_lock(&hp_lock); + list_add_tail(&hp_cpu->node, &hp_cpu_list); + hp_cpu_list_length++; + spin_unlock(&hp_lock); + INIT_LIST_HEAD(&hp_cpu->handlers); + for (loop = 0; loop < HP_PER_CPU; loop++) { + handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); + if (!handler) + panic("kmem_cache_alloc() failed"); + handler->processor_id = hp_cpu->processor_id; + handler->addr = frame_dma; + handler->frame_ptr = frame_ptr; + list_add_tail(&handler->node, &hp_cpu->handlers); + } +} + +static void destroy_per_cpu_handlers(void) +{ + struct list_head *loop, *tmp; + struct hp_cpu *hp_cpu = &__get_cpu_var(hp_cpus); + + spin_lock(&hp_lock); + list_del(&hp_cpu->node); + spin_unlock(&hp_lock); + list_for_each_safe(loop, tmp, &hp_cpu->handlers) { + u32 flags; + struct hp_handler *handler = list_entry(loop, struct hp_handler, + node); + if (qman_retire_fq(&handler->rx, &flags)) + panic("qman_retire_fq(rx) failed"); + BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS); + if (qman_oos_fq(&handler->rx)) + panic("qman_oos_fq(rx) failed"); + qman_destroy_fq(&handler->rx, 0); + qman_destroy_fq(&handler->tx, 0); + qm_fq_free(handler->fqid_rx); + list_del(&handler->node); + kmem_cache_free(hp_handler_slab, handler); + } +} + +static inline u8 num_cachelines(u32 offset) +{ + u8 res = (offset + (L1_CACHE_BYTES - 1)) + / (L1_CACHE_BYTES); + if (res > 3) + return 3; + return res; +} +#define STASH_DATA_CL \ + num_cachelines(HP_NUM_WORDS * 4) +#define STASH_CTX_CL \ + num_cachelines(offsetof(struct hp_handler,fqid_rx)) + +static void init_handler(void *__handler) +{ + struct qm_mcc_initfq opts; + struct hp_handler *handler = __handler; + BUG_ON(handler->processor_id != smp_processor_id()); + /* Set up rx */ + memset(&handler->rx, 0, sizeof(handler->rx)); + if (handler == special_handler) + handler->rx.cb.dqrr = special_dqrr; + else + handler->rx.cb.dqrr = normal_dqrr; + if (qman_create_fq(handler->fqid_rx, 0, &handler->rx)) + panic("qman_create_fq(rx) failed"); + memset(&opts, 0, sizeof(opts)); + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING; + opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL; + opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL; + if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | + QMAN_INITFQ_FLAG_LOCAL, &opts)) + panic("qman_init_fq(rx) failed"); + /* Set up tx */ + memset(&handler->tx, 0, sizeof(handler->tx)); + if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, + &handler->tx)) + panic("qman_create_fq(tx) failed"); +} + +static void init_phase2(void) +{ + int loop; + u32 fqid = 0; + u32 lfsr = 0xdeadbeef; + struct hp_cpu *hp_cpu; + struct hp_handler *handler; + + for (loop = 0; loop < HP_PER_CPU; loop++) { + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { + if (!loop) + hp_cpu->iterator = list_first_entry( + &hp_cpu->handlers, + struct hp_handler, node); + else + hp_cpu->iterator = list_entry( + hp_cpu->iterator->node.next, + struct hp_handler, node); + /* Rx FQID is the previous handler's Tx FQID */ + hp_cpu->iterator->fqid_rx = fqid; + /* Allocate new FQID for Tx */ + fqid = qm_fq_new(); + if (!fqid) + panic("qm_fq_new() failed"); + hp_cpu->iterator->fqid_tx = fqid; + /* Rx mixer is the previous handler's Tx mixer */ + hp_cpu->iterator->rx_mixer = lfsr; + /* Get new mixer for Tx */ + lfsr = do_lfsr(lfsr); + hp_cpu->iterator->tx_mixer = lfsr; + } + } + /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ + hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); + handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); + BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef)); + handler->fqid_rx = fqid; + handler->rx_mixer = lfsr; + /* and tag it as our "special" handler */ + special_handler = handler; +} + +static void init_phase3(void) +{ + int loop; + struct hp_cpu *hp_cpu; + + for (loop = 0; loop < HP_PER_CPU; loop++) { + list_for_each_entry(hp_cpu, &hp_cpu_list, node) { + if (!loop) + hp_cpu->iterator = list_first_entry( + &hp_cpu->handlers, + struct hp_handler, node); + else + hp_cpu->iterator = list_entry( + hp_cpu->iterator->node.next, + struct hp_handler, node); + preempt_disable(); + if (hp_cpu->processor_id == smp_processor_id()) + init_handler(hp_cpu->iterator); + else + smp_call_function_single(hp_cpu->processor_id, + init_handler, hp_cpu->iterator, 1); + preempt_enable(); + } + } +} + +static void send_first_frame(void *ignore) +{ + u32 *p = special_handler->frame_ptr; + u32 lfsr = HP_FIRST_WORD; + int loop; + struct qm_fd fd; + + BUG_ON(special_handler->processor_id != smp_processor_id()); + memset(&fd, 0, sizeof(fd)); + qm_fd_addr_set64(&fd, special_handler->addr); + fd.format = qm_fd_contig_big; + fd.length29 = HP_NUM_WORDS * 4; + for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { + if (*p != lfsr) + panic("corrupt frame data"); + *p ^= special_handler->tx_mixer; + lfsr = do_lfsr(lfsr); + } + pr_info("Sending first frame\n"); + if (qman_enqueue(&special_handler->tx, &fd, 0)) + panic("qman_enqueue() failed"); +} + +void qman_test_hotpotato(void) +{ + if (cpumask_weight(cpu_online_mask) < 2) { + pr_info("qman_test_hotpotato, skip - only 1 CPU\n"); + return; + } + + pr_info("qman_test_hotpotato starting\n"); + + hp_cpu_list_length = 0; + loop_counter = 0; + hp_handler_slab = kmem_cache_create("hp_handler_slab", + sizeof(struct hp_handler), L1_CACHE_BYTES, + SLAB_HWCACHE_ALIGN, NULL); + if (!hp_handler_slab) + panic("kmem_cache_create() failed"); + + allocate_frame_data(); + + /* Init phase 1 */ + pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); + if (on_all_cpus(create_per_cpu_handlers)) + panic("on_each_cpu() failed"); + pr_info("Number of cpus: %d, total of %d handlers\n", + hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); + + init_phase2(); + + init_phase3(); + + preempt_disable(); + if (special_handler->processor_id == smp_processor_id()) + send_first_frame(NULL); + else + smp_call_function_single(special_handler->processor_id, + send_first_frame, NULL, 1); + preempt_enable(); + + wait_event(queue, loop_counter == HP_LOOPS); + deallocate_frame_data(); + if (on_all_cpus(destroy_per_cpu_handlers)) + panic("on_each_cpu() failed"); + kmem_cache_destroy(hp_handler_slab); + pr_info("qman_test_hotpotato finished\n"); +} + --- linux-3.13.0.orig/drivers/staging/fsl_qbman/qman_utility.c +++ linux-3.13.0/drivers/staging/fsl_qbman/qman_utility.c @@ -0,0 +1,131 @@ +/* Copyright 2008-2011 Freescale Semiconductor, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "qman_private.h" + +/* ----------------- */ +/* --- FQID Pool --- */ + +struct qman_fqid_pool { + /* Base and size of the FQID range */ + u32 fqid_base; + u32 total; + /* Number of FQIDs currently "allocated" */ + u32 used; + /* Allocation optimisation. When 'usedfqid_base = fqid_start; + pool->total = num; + pool->used = 0; + pool->next = 0; + pool->bits = kmalloc(QNUM_BYTES(num), GFP_KERNEL); + if (!pool->bits) { + kfree(pool); + return NULL; + } + memset(pool->bits, 0, QNUM_BYTES(num)); + /* If num is not an even multiple of QLONG_BITS (or even 8, for + * byte-oriented searching) then we fill the trailing bits with 1, to + * make them look allocated (permanently). */ + for (i = num + 1; i < QNUM_BITS(num); i++) + set_bit(i, pool->bits); + return pool; +} +EXPORT_SYMBOL(qman_fqid_pool_create); + +int qman_fqid_pool_destroy(struct qman_fqid_pool *pool) +{ + int ret = pool->used; + kfree(pool->bits); + kfree(pool); + return ret; +} +EXPORT_SYMBOL(qman_fqid_pool_destroy); + +int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid) +{ + int ret; + if (pool->used == pool->total) + return -ENOMEM; + *fqid = pool->fqid_base + pool->next; + ret = test_and_set_bit(pool->next, pool->bits); + BUG_ON(ret); + if (++pool->used == pool->total) + return 0; + pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next); + if (pool->next >= pool->total) + pool->next = find_first_zero_bit(pool->bits, pool->total); + BUG_ON(pool->next >= pool->total); + return 0; +} +EXPORT_SYMBOL(qman_fqid_pool_alloc); + +void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid) +{ + int ret; + + fqid -= pool->fqid_base; + ret = test_and_clear_bit(fqid, pool->bits); + BUG_ON(!ret); + if (pool->used-- == pool->total) + pool->next = fqid; +} +EXPORT_SYMBOL(qman_fqid_pool_free); + +u32 qman_fqid_pool_used(struct qman_fqid_pool *pool) +{ + return pool->used; +} +EXPORT_SYMBOL(qman_fqid_pool_used); + --- linux-3.13.0.orig/drivers/staging/iio/adc/ad7291.c +++ linux-3.13.0/drivers/staging/iio/adc/ad7291.c @@ -465,7 +465,7 @@ struct ad7291_platform_data *pdata = client->dev.platform_data; struct ad7291_chip_info *chip; struct iio_dev *indio_dev; - int ret = 0; + int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); if (!indio_dev) @@ -475,7 +475,7 @@ if (pdata && pdata->use_external_ref) { chip->reg = devm_regulator_get(&client->dev, "vref"); if (IS_ERR(chip->reg)) - return ret; + return PTR_ERR(chip->reg); ret = regulator_enable(chip->reg); if (ret) --- linux-3.13.0.orig/drivers/staging/iio/adc/ad799x_core.c +++ linux-3.13.0/drivers/staging/iio/adc/ad799x_core.c @@ -393,7 +393,7 @@ }, { .type = IIO_EV_TYPE_THRESH, .dir = IIO_EV_DIR_FALLING, - .mask_separate = BIT(IIO_EV_INFO_VALUE), + .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), }, { .type = IIO_EV_TYPE_THRESH, @@ -588,7 +588,8 @@ return 0; error_free_irq: - free_irq(client->irq, indio_dev); + if (client->irq > 0) + free_irq(client->irq, indio_dev); error_cleanup_ring: ad799x_ring_cleanup(indio_dev); error_disable_reg: --- linux-3.13.0.orig/drivers/staging/iio/adc/mxs-lradc.c +++ linux-3.13.0/drivers/staging/iio/adc/mxs-lradc.c @@ -156,11 +156,17 @@ struct completion completion; /* - * Touchscreen LRADC channels receives a private slot in the CTRL4 - * register, the slot #7. Therefore only 7 slots instead of 8 in the - * CTRL4 register can be mapped to LRADC channels when using the - * touchscreen. - * + * When the touchscreen is enabled, we give it two private virtual + * channels: #6 and #7. This means that only 6 virtual channels (instead + * of 8) will be available for buffered capture. + */ +#define TOUCHSCREEN_VCHANNEL1 7 +#define TOUCHSCREEN_VCHANNEL2 6 +#define BUFFER_VCHANS_LIMITED 0x3f +#define BUFFER_VCHANS_ALL 0xff + u8 buffer_vchans; + + /* * Furthermore, certain LRADC channels are shared between touchscreen * and/or touch-buttons and generic LRADC block. Therefore when using * either of these, these channels are not available for the regular @@ -283,6 +289,9 @@ #define LRADC_CTRL4 0x140 #define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4)) #define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4) +#define LRADC_CTRL4_LRADCSELECT(n, x) \ + (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \ + LRADC_CTRL4_LRADCSELECT_MASK(n)) #define LRADC_RESOLUTION 12 #define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1) @@ -364,6 +373,14 @@ LRADC_STATUS_TOUCH_DETECT_RAW); } +static void mxs_lradc_map_channel(struct mxs_lradc *lradc, unsigned vch, + unsigned ch) +{ + mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(vch), + LRADC_CTRL4); + mxs_lradc_reg_set(lradc, LRADC_CTRL4_LRADCSELECT(vch, ch), LRADC_CTRL4); +} + static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch) { /* @@ -391,12 +408,8 @@ LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), LRADC_DELAY(3)); - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | - LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) | - LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch), LRADC_CTRL1); - /* wake us again, when the complete conversion is done */ - mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch), LRADC_CTRL1); /* * after changing the touchscreen plates setting * the signals need some initial time to settle. Start the @@ -449,12 +462,8 @@ LRADC_DELAY_DELAY(lradc->over_sample_delay - 1), LRADC_DELAY(3)); - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(2) | - LRADC_CTRL1_LRADC_IRQ(3) | LRADC_CTRL1_LRADC_IRQ(4) | - LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(ch2), LRADC_CTRL1); - /* wake us again, when the conversions are done */ - mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(ch2), LRADC_CTRL1); /* * after changing the touchscreen plates setting * the signals need some initial time to settle. Start the @@ -519,36 +528,6 @@ #define TS_CH_XM 4 #define TS_CH_YM 5 -static int mxs_lradc_read_ts_channel(struct mxs_lradc *lradc) -{ - u32 reg; - int val; - - reg = readl(lradc->base + LRADC_CTRL1); - - /* only channels 3 to 5 are of interest here */ - if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YP)) { - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YP) | - LRADC_CTRL1_LRADC_IRQ(TS_CH_YP), LRADC_CTRL1); - val = mxs_lradc_read_raw_channel(lradc, TS_CH_YP); - } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_XM)) { - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_XM) | - LRADC_CTRL1_LRADC_IRQ(TS_CH_XM), LRADC_CTRL1); - val = mxs_lradc_read_raw_channel(lradc, TS_CH_XM); - } else if (reg & LRADC_CTRL1_LRADC_IRQ(TS_CH_YM)) { - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(TS_CH_YM) | - LRADC_CTRL1_LRADC_IRQ(TS_CH_YM), LRADC_CTRL1); - val = mxs_lradc_read_raw_channel(lradc, TS_CH_YM); - } else { - return -EIO; - } - - mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); - mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); - - return val; -} - /* * YP(open)--+-------------+ * | |--+ @@ -592,7 +571,8 @@ mxs_lradc_reg_set(lradc, mxs_lradc_drive_x_plate(lradc), LRADC_CTRL0); lradc->cur_plate = LRADC_SAMPLE_X; - mxs_lradc_setup_ts_channel(lradc, TS_CH_YP); + mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YP); + mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); } /* @@ -613,7 +593,8 @@ mxs_lradc_reg_set(lradc, mxs_lradc_drive_y_plate(lradc), LRADC_CTRL0); lradc->cur_plate = LRADC_SAMPLE_Y; - mxs_lradc_setup_ts_channel(lradc, TS_CH_XM); + mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_XM); + mxs_lradc_setup_ts_channel(lradc, TOUCHSCREEN_VCHANNEL1); } /* @@ -634,7 +615,10 @@ mxs_lradc_reg_set(lradc, mxs_lradc_drive_pressure(lradc), LRADC_CTRL0); lradc->cur_plate = LRADC_SAMPLE_PRESSURE; - mxs_lradc_setup_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); + mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL1, TS_CH_YM); + mxs_lradc_map_channel(lradc, TOUCHSCREEN_VCHANNEL2, TS_CH_XP); + mxs_lradc_setup_ts_pressure(lradc, TOUCHSCREEN_VCHANNEL2, + TOUCHSCREEN_VCHANNEL1); } static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc) @@ -647,6 +631,19 @@ mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); } +static void mxs_lradc_start_touch_event(struct mxs_lradc *lradc) +{ + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, + LRADC_CTRL1); + mxs_lradc_reg_set(lradc, + LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); + /* + * start with the Y-pos, because it uses nearly the same plate + * settings like the touch detection + */ + mxs_lradc_prepare_y_pos(lradc); +} + static void mxs_lradc_report_ts_event(struct mxs_lradc *lradc) { input_report_abs(lradc->ts_input, ABS_X, lradc->ts_x_pos); @@ -664,10 +661,12 @@ * start a dummy conversion to burn time to settle the signals * note: we are not interested in the conversion's value */ - mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(5)); - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ(5), LRADC_CTRL1); - mxs_lradc_reg_set(lradc, LRADC_CTRL1_LRADC_IRQ_EN(5), LRADC_CTRL1); - mxs_lradc_reg_wrt(lradc, LRADC_DELAY_TRIGGER(1 << 5) | + mxs_lradc_reg_wrt(lradc, 0, LRADC_CH(TOUCHSCREEN_VCHANNEL1)); + mxs_lradc_reg_clear(lradc, + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); + mxs_lradc_reg_wrt(lradc, + LRADC_DELAY_TRIGGER(1 << TOUCHSCREEN_VCHANNEL1) | LRADC_DELAY_KICK | LRADC_DELAY_DELAY(10), /* waste 5 ms */ LRADC_DELAY(2)); } @@ -698,59 +697,46 @@ } /* if it is released, wait for the next touch via IRQ */ - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); + lradc->cur_plate = LRADC_TOUCH; + mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2)); + mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3)); + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ | + LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1), LRADC_CTRL1); mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1); } /* touchscreen's state machine */ static void mxs_lradc_handle_touch(struct mxs_lradc *lradc) { - int val; - switch (lradc->cur_plate) { case LRADC_TOUCH: - /* - * start with the Y-pos, because it uses nearly the same plate - * settings like the touch detection - */ - if (mxs_lradc_check_touch_event(lradc)) { - mxs_lradc_reg_clear(lradc, - LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, - LRADC_CTRL1); - mxs_lradc_prepare_y_pos(lradc); - } + if (mxs_lradc_check_touch_event(lradc)) + mxs_lradc_start_touch_event(lradc); mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1); return; case LRADC_SAMPLE_Y: - val = mxs_lradc_read_ts_channel(lradc); - if (val < 0) { - mxs_lradc_enable_touch_detection(lradc); /* re-start */ - return; - } - lradc->ts_y_pos = val; + lradc->ts_y_pos = mxs_lradc_read_raw_channel(lradc, + TOUCHSCREEN_VCHANNEL1); mxs_lradc_prepare_x_pos(lradc); return; case LRADC_SAMPLE_X: - val = mxs_lradc_read_ts_channel(lradc); - if (val < 0) { - mxs_lradc_enable_touch_detection(lradc); /* re-start */ - return; - } - lradc->ts_x_pos = val; + lradc->ts_x_pos = mxs_lradc_read_raw_channel(lradc, + TOUCHSCREEN_VCHANNEL1); mxs_lradc_prepare_pressure(lradc); return; case LRADC_SAMPLE_PRESSURE: - lradc->ts_pressure = - mxs_lradc_read_ts_pressure(lradc, TS_CH_XP, TS_CH_YM); + lradc->ts_pressure = mxs_lradc_read_ts_pressure(lradc, + TOUCHSCREEN_VCHANNEL2, + TOUCHSCREEN_VCHANNEL1); mxs_lradc_complete_touch_event(lradc); return; case LRADC_SAMPLE_VALID: - val = mxs_lradc_read_ts_channel(lradc); /* ignore the value */ mxs_lradc_finish_touch_event(lradc, 1); break; } @@ -791,9 +777,9 @@ * used if doing raw sampling. */ if (lradc->soc == IMX28_LRADC) - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, + mxs_lradc_reg_clear(lradc, LRADC_CTRL1_LRADC_IRQ_EN(0), LRADC_CTRL1); - mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); + mxs_lradc_reg_clear(lradc, 0x1, LRADC_CTRL0); /* Clean the slot's previous content, then set new one. */ mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0), LRADC_CTRL4); @@ -843,9 +829,8 @@ { /* stop all interrupts from firing */ mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN | - LRADC_CTRL1_LRADC_IRQ_EN(2) | LRADC_CTRL1_LRADC_IRQ_EN(3) | - LRADC_CTRL1_LRADC_IRQ_EN(4) | LRADC_CTRL1_LRADC_IRQ_EN(5), - LRADC_CTRL1); + LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL1) | + LRADC_CTRL1_LRADC_IRQ_EN(TOUCHSCREEN_VCHANNEL2), LRADC_CTRL1); /* Power-down touchscreen touch-detect circuitry. */ mxs_lradc_reg_clear(lradc, mxs_lradc_plate_mask(lradc), LRADC_CTRL0); @@ -911,25 +896,31 @@ struct iio_dev *iio = data; struct mxs_lradc *lradc = iio_priv(iio); unsigned long reg = readl(lradc->base + LRADC_CTRL1); + uint32_t clr_irq = mxs_lradc_irq_mask(lradc); const uint32_t ts_irq_mask = LRADC_CTRL1_TOUCH_DETECT_IRQ | - LRADC_CTRL1_LRADC_IRQ(2) | - LRADC_CTRL1_LRADC_IRQ(3) | - LRADC_CTRL1_LRADC_IRQ(4) | - LRADC_CTRL1_LRADC_IRQ(5); + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2); if (!(reg & mxs_lradc_irq_mask(lradc))) return IRQ_NONE; - if (lradc->use_touchscreen && (reg & ts_irq_mask)) + if (lradc->use_touchscreen && (reg & ts_irq_mask)) { mxs_lradc_handle_touch(lradc); - if (iio_buffer_enabled(iio)) - iio_trigger_poll(iio->trig, iio_get_time_ns()); - else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) + /* Make sure we don't clear the next conversion's interrupt. */ + clr_irq &= ~(LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL1) | + LRADC_CTRL1_LRADC_IRQ(TOUCHSCREEN_VCHANNEL2)); + } + + if (iio_buffer_enabled(iio)) { + if (reg & lradc->buffer_vchans) + iio_trigger_poll(iio->trig, iio_get_time_ns()); + } else if (reg & LRADC_CTRL1_LRADC_IRQ(0)) { complete(&lradc->completion); + } - mxs_lradc_reg_clear(lradc, reg & mxs_lradc_irq_mask(lradc), LRADC_CTRL1); + mxs_lradc_reg_clear(lradc, reg & clr_irq, LRADC_CTRL1); return IRQ_HANDLED; } @@ -1040,9 +1031,10 @@ } if (lradc->soc == IMX28_LRADC) - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, - LRADC_CTRL1); - mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); + mxs_lradc_reg_clear(lradc, + lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, + LRADC_CTRL1); + mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); @@ -1075,10 +1067,11 @@ mxs_lradc_reg_clear(lradc, LRADC_DELAY_TRIGGER_LRADCS_MASK | LRADC_DELAY_KICK, LRADC_DELAY(0)); - mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0); + mxs_lradc_reg_clear(lradc, lradc->buffer_vchans, LRADC_CTRL0); if (lradc->soc == IMX28_LRADC) - mxs_lradc_reg_clear(lradc, LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK, - LRADC_CTRL1); + mxs_lradc_reg_clear(lradc, + lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET, + LRADC_CTRL1); kfree(lradc->buffer); mutex_unlock(&lradc->lock); @@ -1104,7 +1097,7 @@ if (lradc->use_touchbutton) rsvd_chans++; if (lradc->use_touchscreen) - rsvd_chans++; + rsvd_chans += 2; /* Test for attempts to map channels with special mode of operation. */ if (bitmap_intersects(mask, &rsvd_mask, LRADC_MAX_TOTAL_CHANS)) @@ -1303,17 +1296,24 @@ touch_ret = mxs_lradc_probe_touchscreen(lradc, node); + if (touch_ret == 0) + lradc->buffer_vchans = BUFFER_VCHANS_LIMITED; + else + lradc->buffer_vchans = BUFFER_VCHANS_ALL; + /* Grab all IRQ sources */ for (i = 0; i < of_cfg->irq_count; i++) { lradc->irq[i] = platform_get_irq(pdev, i); - if (lradc->irq[i] < 0) - return -EINVAL; + if (lradc->irq[i] < 0) { + ret = lradc->irq[i]; + goto err_clk; + } ret = devm_request_irq(dev, lradc->irq[i], mxs_lradc_handle_irq, 0, of_cfg->irq_name[i], iio); if (ret) - return ret; + goto err_clk; } platform_set_drvdata(pdev, iio); @@ -1333,7 +1333,7 @@ &mxs_lradc_trigger_handler, &mxs_lradc_buffer_ops); if (ret) - return ret; + goto err_clk; ret = mxs_lradc_trigger_init(iio); if (ret) @@ -1368,6 +1368,8 @@ mxs_lradc_trigger_remove(iio); err_trig: iio_triggered_buffer_cleanup(iio); +err_clk: + clk_disable_unprepare(lradc->clk); return ret; } --- linux-3.13.0.orig/drivers/staging/iio/impedance-analyzer/ad5933.c +++ linux-3.13.0/drivers/staging/iio/impedance-analyzer/ad5933.c @@ -115,6 +115,7 @@ .channel = 0, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), .address = AD5933_REG_TEMP_DATA, + .scan_index = -1, .scan_type = { .sign = 's', .realbits = 14, @@ -124,9 +125,7 @@ .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, - .extend_name = "real_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "real", .address = AD5933_REG_REAL_DATA, .scan_index = 0, .scan_type = { @@ -138,9 +137,7 @@ .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, - .extend_name = "imag_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "imag", .address = AD5933_REG_IMAG_DATA, .scan_index = 1, .scan_type = { @@ -629,7 +626,7 @@ struct iio_buffer *buffer; buffer = iio_kfifo_allocate(indio_dev); - if (buffer) + if (!buffer) return -ENOMEM; iio_device_attach_buffer(indio_dev, buffer); @@ -748,14 +745,14 @@ indio_dev->name = id->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = ad5933_channels; - indio_dev->num_channels = 1; /* only register temp0_input */ + indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); ret = ad5933_register_ring_funcs_and_init(indio_dev); if (ret) goto error_disable_reg; - /* skip temp0_input, register in0_(real|imag)_raw */ - ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2); + ret = iio_buffer_register(indio_dev, ad5933_channels, + ARRAY_SIZE(ad5933_channels)); if (ret) goto error_unreg_ring; --- linux-3.13.0.orig/drivers/staging/iio/light/tsl2x7x_core.c +++ linux-3.13.0/drivers/staging/iio/light/tsl2x7x_core.c @@ -667,9 +667,13 @@ chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] = chip->tsl2x7x_settings.prox_pulse_count; chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] = - chip->tsl2x7x_settings.prox_thres_low; + (chip->tsl2x7x_settings.prox_thres_low) & 0xFF; + chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] = + (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF; chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] = - chip->tsl2x7x_settings.prox_thres_high; + (chip->tsl2x7x_settings.prox_thres_high) & 0xFF; + chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] = + (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF; /* and make sure we're not already on */ if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) { --- linux-3.13.0.orig/drivers/staging/iio/meter/ade7758.h +++ linux-3.13.0/drivers/staging/iio/meter/ade7758.h @@ -119,7 +119,6 @@ u8 *tx; u8 *rx; struct mutex buf_lock; - const struct iio_chan_spec *ade7758_ring_channels; struct spi_transfer ring_xfer[4]; struct spi_message ring_msg; /* --- linux-3.13.0.orig/drivers/staging/iio/meter/ade7758_core.c +++ linux-3.13.0/drivers/staging/iio/meter/ade7758_core.c @@ -630,9 +630,6 @@ .type = IIO_VOLTAGE, .indexed = 1, .channel = 0, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE), .scan_index = 0, .scan_type = { @@ -644,9 +641,6 @@ .type = IIO_CURRENT, .indexed = 1, .channel = 0, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT), .scan_index = 1, .scan_type = { @@ -658,9 +652,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 0, - .extend_name = "apparent_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "apparent", .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR), .scan_index = 2, .scan_type = { @@ -672,9 +664,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 0, - .extend_name = "active_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "active", .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR), .scan_index = 3, .scan_type = { @@ -686,9 +676,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 0, - .extend_name = "reactive_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "reactive", .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR), .scan_index = 4, .scan_type = { @@ -700,9 +688,6 @@ .type = IIO_VOLTAGE, .indexed = 1, .channel = 1, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE), .scan_index = 5, .scan_type = { @@ -714,9 +699,6 @@ .type = IIO_CURRENT, .indexed = 1, .channel = 1, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT), .scan_index = 6, .scan_type = { @@ -728,9 +710,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 1, - .extend_name = "apparent_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "apparent", .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR), .scan_index = 7, .scan_type = { @@ -742,9 +722,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 1, - .extend_name = "active_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "active", .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR), .scan_index = 8, .scan_type = { @@ -756,9 +734,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 1, - .extend_name = "reactive_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "reactive", .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR), .scan_index = 9, .scan_type = { @@ -770,9 +746,6 @@ .type = IIO_VOLTAGE, .indexed = 1, .channel = 2, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE), .scan_index = 10, .scan_type = { @@ -784,9 +757,6 @@ .type = IIO_CURRENT, .indexed = 1, .channel = 2, - .extend_name = "raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT), .scan_index = 11, .scan_type = { @@ -798,9 +768,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 2, - .extend_name = "apparent_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "apparent", .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR), .scan_index = 12, .scan_type = { @@ -812,9 +780,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 2, - .extend_name = "active_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "active", .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR), .scan_index = 13, .scan_type = { @@ -826,9 +792,7 @@ .type = IIO_POWER, .indexed = 1, .channel = 2, - .extend_name = "reactive_raw", - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .extend_name = "reactive", .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR), .scan_index = 14, .scan_type = { @@ -869,13 +833,14 @@ goto error_free_rx; } st->us = spi; - st->ade7758_ring_channels = &ade7758_channels[0]; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &ade7758_info; indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = ade7758_channels; + indio_dev->num_channels = ARRAY_SIZE(ade7758_channels); ret = ade7758_configure_ring(indio_dev); if (ret) --- linux-3.13.0.orig/drivers/staging/iio/meter/ade7758_ring.c +++ linux-3.13.0/drivers/staging/iio/meter/ade7758_ring.c @@ -85,17 +85,16 @@ **/ static int ade7758_ring_preenable(struct iio_dev *indio_dev) { - struct ade7758_state *st = iio_priv(indio_dev); unsigned channel; - if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) + if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) return -EINVAL; channel = find_first_bit(indio_dev->active_scan_mask, indio_dev->masklength); ade7758_write_waveform_type(&indio_dev->dev, - st->ade7758_ring_channels[channel].address); + indio_dev->channels[channel].address); return 0; } --- linux-3.13.0.orig/drivers/staging/iio/meter/ade7758_trigger.c +++ linux-3.13.0/drivers/staging/iio/meter/ade7758_trigger.c @@ -85,7 +85,7 @@ ret = iio_trigger_register(st->trig); /* select default trigger */ - indio_dev->trig = st->trig; + indio_dev->trig = iio_trigger_get(st->trig); if (ret) goto error_free_irq; --- linux-3.13.0.orig/drivers/staging/imx-drm/ipuv3-plane.c +++ linux-3.13.0/drivers/staging/imx-drm/ipuv3-plane.c @@ -269,7 +269,8 @@ ipu_idmac_put(ipu_plane->ipu_ch); ipu_dmfc_put(ipu_plane->dmfc); - ipu_dp_put(ipu_plane->dp); + if (ipu_plane->dp) + ipu_dp_put(ipu_plane->dp); } } --- linux-3.13.0.orig/drivers/staging/lustre/lustre/llite/dcache.c +++ linux-3.13.0/drivers/staging/lustre/lustre/llite/dcache.c @@ -278,7 +278,7 @@ inode->i_ino, inode->i_generation, inode); ll_lock_dcache(inode); - ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) { + ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) { CDEBUG(D_DENTRY, "dentry in drop %.*s (%p) parent %p " "inode %p flags %d\n", dentry->d_name.len, dentry->d_name.name, dentry, dentry->d_parent, --- linux-3.13.0.orig/drivers/staging/lustre/lustre/llite/dir.c +++ linux-3.13.0/drivers/staging/lustre/lustre/llite/dir.c @@ -1086,7 +1086,7 @@ break; case Q_GETQUOTA: if (((type == USRQUOTA && - uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || + !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) || (type == GRPQUOTA && !in_egroup_p(make_kgid(&init_user_ns, id)))) && (!cfs_capable(CFS_CAP_SYS_ADMIN) || --- linux-3.13.0.orig/drivers/staging/lustre/lustre/llite/llite_lib.c +++ linux-3.13.0/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -665,7 +665,7 @@ return; list_for_each(tmp, &dentry->d_subdirs) { - struct dentry *d = list_entry(tmp, struct dentry, d_u.d_child); + struct dentry *d = list_entry(tmp, struct dentry, d_child); lustre_dump_dentry(d, recur - 1); } } --- linux-3.13.0.orig/drivers/staging/lustre/lustre/llite/namei.c +++ linux-3.13.0/drivers/staging/lustre/lustre/llite/namei.c @@ -175,14 +175,14 @@ struct ll_d_hlist_node *p; ll_lock_dcache(dir); - ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_alias) { + ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_u.d_alias) { spin_lock(&dentry->d_lock); if (!list_empty(&dentry->d_subdirs)) { struct dentry *child; list_for_each_entry_safe(child, tmp_subdir, &dentry->d_subdirs, - d_u.d_child) { + d_child) { if (child->d_inode == NULL) d_lustre_invalidate(child, 1); } @@ -363,7 +363,7 @@ discon_alias = invalid_alias = NULL; ll_lock_dcache(inode); - ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) { + ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_u.d_alias) { LASSERT(alias != dentry); spin_lock(&alias->d_lock); @@ -953,7 +953,7 @@ { struct dentry *parent, *child; - parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_alias); + parent = ll_d_hlist_entry(dir->i_dentry, struct dentry, d_u.d_alias); child = d_lookup(parent, name); if (child) { if (child->d_inode) --- linux-3.13.0.orig/drivers/staging/lustre/lustre/llite/vvp_io.c +++ linux-3.13.0/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -601,7 +601,7 @@ return 0; } - if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { + if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); return -EFAULT; } --- linux-3.13.0.orig/drivers/staging/lustre/lustre/obdclass/class_obd.c +++ linux-3.13.0/drivers/staging/lustre/lustre/obdclass/class_obd.c @@ -35,7 +35,7 @@ */ #define DEBUG_SUBSYSTEM S_CLASS -# include +# include #include #include --- linux-3.13.0.orig/drivers/staging/lustre/lustre/ptlrpc/niobuf.c +++ linux-3.13.0/drivers/staging/lustre/lustre/ptlrpc/niobuf.c @@ -179,7 +179,7 @@ LNET_UNLINK, LNET_INS_AFTER, &me_h); if (rc != 0) { CERROR("%s: LNetMEAttach failed x"LPU64"/%d: rc = %d\n", - desc->bd_export->exp_obd->obd_name, xid, + desc->bd_import->imp_obd->obd_name, xid, posted_md, rc); break; } @@ -189,7 +189,7 @@ &desc->bd_mds[posted_md]); if (rc != 0) { CERROR("%s: LNetMDAttach failed x"LPU64"/%d: rc = %d\n", - desc->bd_export->exp_obd->obd_name, xid, + desc->bd_import->imp_obd->obd_name, xid, posted_md, rc); rc2 = LNetMEUnlink(me_h); LASSERT(rc2 == 0); @@ -219,7 +219,7 @@ /* Holler if peer manages to touch buffers before he knows the xid */ if (desc->bd_md_count != total_md) CWARN("%s: Peer %s touched %d buffers while I registered\n", - desc->bd_export->exp_obd->obd_name, libcfs_id2str(peer), + desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer), total_md - desc->bd_md_count); spin_unlock(&desc->bd_lock); --- linux-3.13.0.orig/drivers/staging/mt29f_spinand/mt29f_spinand.c +++ linux-3.13.0/drivers/staging/mt29f_spinand/mt29f_spinand.c @@ -924,6 +924,7 @@ static const struct of_device_id spinand_dt[] = { { .compatible = "spinand,mt29f", }, + {} }; /* --- linux-3.13.0.orig/drivers/staging/netlogic/xlr_net.c +++ linux-3.13.0/drivers/staging/netlogic/xlr_net.c @@ -307,7 +307,7 @@ } static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { return (u16)smp_processor_id(); } --- linux-3.13.0.orig/drivers/staging/rtl8188eu/core/rtw_recv.c +++ linux-3.13.0/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -558,21 +558,19 @@ /* set the security information in the recv_frame */ static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame) { - u8 *psta_addr = NULL, *ptr; + u8 *psta_addr, *ptr; uint auth_alg; struct recv_frame_hdr *pfhdr; struct sta_info *psta; struct sta_priv *pstapriv; union recv_frame *prtnframe; - u16 ether_type = 0; + u16 ether_type; u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */ struct rx_pkt_attrib *pattrib; - __be16 be_tmp; _func_enter_; pstapriv = &adapter->stapriv; - psta = rtw_get_stainfo(pstapriv, psta_addr); auth_alg = adapter->securitypriv.dot11AuthAlgrthm; @@ -580,24 +578,23 @@ pfhdr = &precv_frame->u.hdr; pattrib = &pfhdr->attrib; psta_addr = pattrib->ta; + psta = rtw_get_stainfo(pstapriv, psta_addr); prtnframe = NULL; RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:adapter->securitypriv.dot11AuthAlgrthm=%d\n", adapter->securitypriv.dot11AuthAlgrthm)); if (auth_alg == 2) { + /* get ether_type */ + ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE; + memcpy(ðer_type, ptr, 2); + ether_type = ntohs((unsigned short)ether_type); + if ((psta != NULL) && (psta->ieee8021x_blocked)) { /* blocked */ /* only accept EAPOL frame */ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==1\n")); - prtnframe = precv_frame; - - /* get ether_type */ - ptr = ptr+pfhdr->attrib.hdrlen+pfhdr->attrib.iv_len+LLC_HEADER_SIZE; - memcpy(&be_tmp, ptr, 2); - ether_type = ntohs(be_tmp); - if (ether_type == eapol_type) { prtnframe = precv_frame; } else { --- linux-3.13.0.orig/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ linux-3.13.0/drivers/staging/rtl8188eu/os_dep/os_intfs.c @@ -653,7 +653,7 @@ } static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv) + void *accel_priv, select_queue_fallback_t fallback) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; --- linux-3.13.0.orig/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ linux-3.13.0/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -53,8 +53,12 @@ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ /*=== Customer ID ===*/ /****** 8188EUS ********/ - {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */ + {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */ + {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {} /* Terminating entry */ }; --- linux-3.13.0.orig/drivers/staging/rtl8712/rtl871x_recv.c +++ linux-3.13.0/drivers/staging/rtl8712/rtl871x_recv.c @@ -254,7 +254,7 @@ struct sta_info *psta; struct sta_priv *pstapriv; union recv_frame *prtnframe; - u16 ether_type = 0; + u16 ether_type; pstapriv = &adapter->stapriv; ptr = get_recvframe_data(precv_frame); @@ -263,15 +263,14 @@ psta = r8712_get_stainfo(pstapriv, psta_addr); auth_alg = adapter->securitypriv.AuthAlgrthm; if (auth_alg == 2) { + /* get ether_type */ + ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE; + memcpy(ðer_type, ptr, 2); + ether_type = ntohs((unsigned short)ether_type); + if ((psta != NULL) && (psta->ieee8021x_blocked)) { /* blocked * only accept EAPOL frame */ - prtnframe = precv_frame; - /*get ether_type */ - ptr = ptr + pfhdr->attrib.hdrlen + - pfhdr->attrib.iv_len + LLC_HEADER_SIZE; - memcpy(ðer_type, ptr, 2); - ether_type = ntohs((unsigned short)ether_type); if (ether_type == 0x888e) prtnframe = precv_frame; else { --- linux-3.13.0.orig/drivers/staging/rtl8712/usb_intf.c +++ linux-3.13.0/drivers/staging/rtl8712/usb_intf.c @@ -353,6 +353,10 @@ } } +static const struct device_type wlan_type = { + .name = "wlan", +}; + /* * drv_init() - a device potentially for us * @@ -388,6 +392,7 @@ padapter->pusb_intf = pusb_intf; usb_set_intfdata(pusb_intf, pnetdev); SET_NETDEV_DEV(pnetdev, &pusb_intf->dev); + pnetdev->dev.type = &wlan_type; /* step 2. */ padapter->dvobj_init = &r8712_usb_dvobj_init; padapter->dvobj_deinit = &r8712_usb_dvobj_deinit; --- linux-3.13.0.orig/drivers/staging/rtl8821ae/Kconfig +++ linux-3.13.0/drivers/staging/rtl8821ae/Kconfig @@ -0,0 +1,11 @@ +config R8821AE + tristate "RealTek RTL8821AE Wireless LAN NIC driver" + depends on PCI && WLAN && MAC80211 + depends on m + select WIRELESS_EXT + select WEXT_PRIV + select EEPROM_93CX6 + select CRYPTO + default N + ---help--- + If built as a module, it will be called r8821ae.ko. --- linux-3.13.0.orig/drivers/staging/rtl8821ae/Makefile +++ linux-3.13.0/drivers/staging/rtl8821ae/Makefile @@ -0,0 +1,35 @@ +PCI_MAIN_OBJS := base.o \ + rc.o \ + debug.o \ + regd.o \ + efuse.o \ + cam.o \ + ps.o \ + core.o \ + stats.o \ + pci.o \ + +BT_COEXIST_OBJS:= btcoexist/halbtc8192e2ant.o\ + btcoexist/halbtc8723b1ant.o\ + btcoexist/halbtc8723b2ant.o\ + btcoexist/halbtcoutsrc.o\ + btcoexist/rtl_btc.o \ + +PCI_8821AE_HAL_OBJS:= \ + rtl8821ae/hw.o \ + rtl8821ae/table.o \ + rtl8821ae/sw.o \ + rtl8821ae/trx.o \ + rtl8821ae/led.o \ + rtl8821ae/fw.o \ + rtl8821ae/phy.o \ + rtl8821ae/rf.o \ + rtl8821ae/dm.o \ + rtl8821ae/pwrseq.o \ + rtl8821ae/pwrseqcmd.o \ + rtl8821ae/hal_btc.o \ + rtl8821ae/hal_bt_coexist.o \ + +rtl8821ae-objs += $(BT_COEXIST_OBJS) $(PCI_MAIN_OBJS) $(PCI_8821AE_HAL_OBJS) + +obj-$(CONFIG_R8821AE) += rtl8821ae.o --- linux-3.13.0.orig/drivers/staging/rtl8821ae/TODO +++ linux-3.13.0/drivers/staging/rtl8821ae/TODO @@ -0,0 +1,10 @@ +Realtek 8821AE PCI wifi driver TODO: + - remove built-in btcoexist module when the "real" one gets upstream + - remove built-in rtlwifi code by porting driver to use the "real" one + in the drivers/net/ directory. + - fix up coding style issues + +Please send any patches for this driver to: + Greg Kroah-Hartman +and the mailing list. + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/base.c +++ linux-3.13.0/drivers/staging/rtl8821ae/base.c @@ -0,0 +1,1873 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include +#include +#include "wifi.h" +#include "rc.h" +#include "base.h" +#include "efuse.h" +#include "cam.h" +#include "ps.h" +#include "regd.h" +#include "pci.h" + +/* + *NOTICE!!!: This file will be very big, we hsould + *keep it clear under follwing roles: + * + *This file include follwing part, so, if you add new + *functions into this file, please check which part it + *should includes. or check if you should add new part + *for this file: + * + *1) mac80211 init functions + *2) tx information functions + *3) functions called by core.c + *4) wq & timer callback functions + *5) frame process functions + *6) IOT functions + *7) sysfs functions + *8) vif functions + *9) ... + */ + +/********************************************************* + * + * mac80211 init functions + * + *********************************************************/ +static struct ieee80211_channel rtl_channeltable_2g[] = { + {.center_freq = 2412,.hw_value = 1,}, + {.center_freq = 2417,.hw_value = 2,}, + {.center_freq = 2422,.hw_value = 3,}, + {.center_freq = 2427,.hw_value = 4,}, + {.center_freq = 2432,.hw_value = 5,}, + {.center_freq = 2437,.hw_value = 6,}, + {.center_freq = 2442,.hw_value = 7,}, + {.center_freq = 2447,.hw_value = 8,}, + {.center_freq = 2452,.hw_value = 9,}, + {.center_freq = 2457,.hw_value = 10,}, + {.center_freq = 2462,.hw_value = 11,}, + {.center_freq = 2467,.hw_value = 12,}, + {.center_freq = 2472,.hw_value = 13,}, + {.center_freq = 2484,.hw_value = 14,}, +}; + +static struct ieee80211_channel rtl_channeltable_5g[] = { + {.center_freq = 5180,.hw_value = 36,}, + {.center_freq = 5200,.hw_value = 40,}, + {.center_freq = 5220,.hw_value = 44,}, + {.center_freq = 5240,.hw_value = 48,}, + {.center_freq = 5260,.hw_value = 52,}, + {.center_freq = 5280,.hw_value = 56,}, + {.center_freq = 5300,.hw_value = 60,}, + {.center_freq = 5320,.hw_value = 64,}, + {.center_freq = 5500,.hw_value = 100,}, + {.center_freq = 5520,.hw_value = 104,}, + {.center_freq = 5540,.hw_value = 108,}, + {.center_freq = 5560,.hw_value = 112,}, + {.center_freq = 5580,.hw_value = 116,}, + {.center_freq = 5600,.hw_value = 120,}, + {.center_freq = 5620,.hw_value = 124,}, + {.center_freq = 5640,.hw_value = 128,}, + {.center_freq = 5660,.hw_value = 132,}, + {.center_freq = 5680,.hw_value = 136,}, + {.center_freq = 5700,.hw_value = 140,}, + {.center_freq = 5745,.hw_value = 149,}, + {.center_freq = 5765,.hw_value = 153,}, + {.center_freq = 5785,.hw_value = 157,}, + {.center_freq = 5805,.hw_value = 161,}, + {.center_freq = 5825,.hw_value = 165,}, +}; + +static struct ieee80211_rate rtl_ratetable_2g[] = { + {.bitrate = 10,.hw_value = 0x00,}, + {.bitrate = 20,.hw_value = 0x01,}, + {.bitrate = 55,.hw_value = 0x02,}, + {.bitrate = 110,.hw_value = 0x03,}, + {.bitrate = 60,.hw_value = 0x04,}, + {.bitrate = 90,.hw_value = 0x05,}, + {.bitrate = 120,.hw_value = 0x06,}, + {.bitrate = 180,.hw_value = 0x07,}, + {.bitrate = 240,.hw_value = 0x08,}, + {.bitrate = 360,.hw_value = 0x09,}, + {.bitrate = 480,.hw_value = 0x0a,}, + {.bitrate = 540,.hw_value = 0x0b,}, +}; + +static struct ieee80211_rate rtl_ratetable_5g[] = { + {.bitrate = 60,.hw_value = 0x04,}, + {.bitrate = 90,.hw_value = 0x05,}, + {.bitrate = 120,.hw_value = 0x06,}, + {.bitrate = 180,.hw_value = 0x07,}, + {.bitrate = 240,.hw_value = 0x08,}, + {.bitrate = 360,.hw_value = 0x09,}, + {.bitrate = 480,.hw_value = 0x0a,}, + {.bitrate = 540,.hw_value = 0x0b,}, +}; + +static const struct ieee80211_supported_band rtl_band_2ghz = { + .band = IEEE80211_BAND_2GHZ, + + .channels = rtl_channeltable_2g, + .n_channels = ARRAY_SIZE(rtl_channeltable_2g), + + .bitrates = rtl_ratetable_2g, + .n_bitrates = ARRAY_SIZE(rtl_ratetable_2g), + + .ht_cap = {0}, +}; + +static struct ieee80211_supported_band rtl_band_5ghz = { + .band = IEEE80211_BAND_5GHZ, + + .channels = rtl_channeltable_5g, + .n_channels = ARRAY_SIZE(rtl_channeltable_5g), + + .bitrates = rtl_ratetable_5g, + .n_bitrates = ARRAY_SIZE(rtl_ratetable_5g), + + .ht_cap = {0}, +}; + +static const u8 tid_to_ac[] = { + 2, /* IEEE80211_AC_BE */ + 3, /* IEEE80211_AC_BK */ + 3, /* IEEE80211_AC_BK */ + 2, /* IEEE80211_AC_BE */ + 1, /* IEEE80211_AC_VI */ + 1, /* IEEE80211_AC_VI */ + 0, /* IEEE80211_AC_VO */ + 0, /* IEEE80211_AC_VO */ +}; + +u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid) +{ + return tid_to_ac[tid]; +} + +static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, + struct ieee80211_sta_ht_cap *ht_cap) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + ht_cap->ht_supported = true; + ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU; + + if (rtlpriv->rtlhal.disable_amsdu_8k) + ht_cap->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU; + + /* + *Maximum length of AMPDU that the STA can receive. + *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ + ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + + /*Minimum MPDU start spacing , */ + ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + + ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + + /* + *hw->wiphy->bands[IEEE80211_BAND_2GHZ] + *base on ant_num + *rx_mask: RX mask + *if rx_ant =1 rx_mask[0]=0xff;==>MCS0-MCS7 + *if rx_ant =2 rx_mask[1]=0xff;==>MCS8-MCS15 + *if rx_ant >=3 rx_mask[2]=0xff; + *if BW_40 rx_mask[4]=0x01; + *highest supported RX rate + */ + if (rtlpriv->dm.supp_phymode_switch) { + RT_TRACE(COMP_INIT, DBG_EMERG, ("Support phy mode switch\n")); + + ht_cap->mcs.rx_mask[0] = 0xFF; + ht_cap->mcs.rx_mask[1] = 0xFF; + ht_cap->mcs.rx_mask[4] = 0x01; + + ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15; + } else { + if (get_rf_type(rtlphy) == RF_1T2R || + get_rf_type(rtlphy) == RF_2T2R) { + + RT_TRACE(COMP_INIT, DBG_DMESG, ("1T2R or 2T2R\n")); + + ht_cap->mcs.rx_mask[0] = 0xFF; + ht_cap->mcs.rx_mask[1] = 0xFF; + ht_cap->mcs.rx_mask[4] = 0x01; + + ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15; + } else if (get_rf_type(rtlphy) == RF_1T1R) { + + RT_TRACE(COMP_INIT, DBG_DMESG, ("1T1R\n")); + + ht_cap->mcs.rx_mask[0] = 0xFF; + ht_cap->mcs.rx_mask[1] = 0x00; + ht_cap->mcs.rx_mask[4] = 0x01; + + ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7; + } + } +} + +static void _rtl_init_mac80211(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct ieee80211_supported_band *sband; + + + if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY && + rtlhal->bandset == BAND_ON_BOTH) { + /* 1: 2.4 G bands */ + /* <1> use mac->bands as mem for hw->wiphy->bands */ + sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); + + /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] + * to default value(1T1R) */ + memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz, + sizeof(struct ieee80211_supported_band)); + + /* <3> init ht cap base on ant_num */ + _rtl_init_hw_ht_capab(hw, &sband->ht_cap); + + /* <4> set mac->sband to wiphy->sband */ + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + + /* 2: 5 G bands */ + /* <1> use mac->bands as mem for hw->wiphy->bands */ + sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]); + + /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] + * to default value(1T1R) */ + memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz, + sizeof(struct ieee80211_supported_band)); + + /* <3> init ht cap base on ant_num */ + _rtl_init_hw_ht_capab(hw, &sband->ht_cap); + + /* <4> set mac->sband to wiphy->sband */ + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; + } else { + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + /* <1> use mac->bands as mem for hw->wiphy->bands */ + sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]); + + /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ] + * to default value(1T1R) */ + memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), + &rtl_band_2ghz, + sizeof(struct ieee80211_supported_band)); + + /* <3> init ht cap base on ant_num */ + _rtl_init_hw_ht_capab(hw, &sband->ht_cap); + + /* <4> set mac->sband to wiphy->sband */ + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; + } else if (rtlhal->current_bandtype == BAND_ON_5G) { + /* <1> use mac->bands as mem for hw->wiphy->bands */ + sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]); + + /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ] + * to default value(1T1R) */ + memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), + &rtl_band_5ghz, + sizeof(struct ieee80211_supported_band)); + + /* <3> init ht cap base on ant_num */ + _rtl_init_hw_ht_capab(hw, &sband->ht_cap); + + /* <4> set mac->sband to wiphy->sband */ + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; + } else { + RT_TRACE(COMP_INIT, DBG_EMERG, ("Err BAND %d\n", + rtlhal->current_bandtype)); + } + } + /* <5> set hw caps */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_RX_INCLUDES_FCS | +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)) + IEEE80211_HW_BEACON_FILTER | +#endif + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_CONNECTION_MONITOR | + /* IEEE80211_HW_SUPPORTS_CQM_RSSI | */ + IEEE80211_HW_MFP_CAPABLE | 0; + + /* swlps or hwlps has been set in diff chip in init_sw_vars */ + if (rtlpriv->psc.b_swctrl_lps) + hw->flags |= IEEE80211_HW_SUPPORTS_PS | + IEEE80211_HW_PS_NULLFUNC_STACK | + /* IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */ + 0; +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); +#else +/**/ + hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_MESH_POINT) ; +/**/ +#endif +/**/ +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39)) + hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) + hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; +#endif + + hw->wiphy->rts_threshold = 2347; + + hw->queues = AC_MAX; + hw->extra_tx_headroom = RTL_TX_HEADER_SIZE; + + /* TODO: Correct this value for our hw */ + /* TODO: define these hard code value */ + hw->max_listen_interval = 10; + hw->max_rate_tries = 4; + /* hw->max_rates = 1; */ + hw->sta_data_size = sizeof(struct rtl_sta_info); +#ifdef VIF_TODO + hw->vif_data_size = sizeof(struct rtl_vif_info); +#endif + + /* <6> mac address */ + if (is_valid_ether_addr(rtlefuse->dev_addr)) { + SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); + } else { + u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; + get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1); + SET_IEEE80211_PERM_ADDR(hw, rtlmac); + } + +} + +static void _rtl_init_deferred_work(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* <1> timer */ + init_timer(&rtlpriv->works.watchdog_timer); + setup_timer(&rtlpriv->works.watchdog_timer, + rtl_watch_dog_timer_callback, (unsigned long)hw); + init_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer); + setup_timer(&rtlpriv->works.dualmac_easyconcurrent_retrytimer, + rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw); + /* <2> work queue */ + rtlpriv->works.hw = hw; +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +/**/ + rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0); +/**/ +#else + rtlpriv->works.rtl_wq = create_workqueue(rtlpriv->cfg->name); +#endif +/**/ + INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, + (void *)rtl_watchdog_wq_callback); + INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, + (void *)rtl_ips_nic_off_wq_callback); + INIT_DELAYED_WORK(&rtlpriv->works.ps_work, + (void *)rtl_swlps_wq_callback); + INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq, + (void *)rtl_swlps_rfon_wq_callback); + INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, + (void *)rtl_fwevt_wq_callback); + +} + +void rtl_deinit_deferred_work(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + del_timer_sync(&rtlpriv->works.watchdog_timer); + + cancel_delayed_work(&rtlpriv->works.watchdog_wq); + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + cancel_delayed_work(&rtlpriv->works.ps_work); + cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); + cancel_delayed_work(&rtlpriv->works.fwevt_wq); +} + +void rtl_init_rfkill(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + bool radio_state; + bool blocked; + u8 valid = 0; + + /*set init state to on */ + rtlpriv->rfkill.rfkill_state = 1; + wiphy_rfkill_set_hw_state(hw->wiphy, 0); + + radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid); + + if (valid) { + printk(KERN_INFO "rtlwifi: wireless switch is %s\n", + rtlpriv->rfkill.rfkill_state ? "on" : "off"); + + rtlpriv->rfkill.rfkill_state = radio_state; + + blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1; + wiphy_rfkill_set_hw_state(hw->wiphy, blocked); + } + + wiphy_rfkill_start_polling(hw->wiphy); +} + +void rtl_deinit_rfkill(struct ieee80211_hw *hw) +{ + wiphy_rfkill_stop_polling(hw->wiphy); +} + +#ifdef VIF_TODO +static void rtl_init_vif(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + INIT_LIST_HEAD(&rtlpriv->vif_priv.vif_list); + + rtlpriv->vif_priv.vifs = 0; +} +#endif + +int rtl_init_core(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); + + /* <1> init mac80211 */ + _rtl_init_mac80211(hw); + rtlmac->hw = hw; + rtlmac->link_state = MAC80211_NOLINK; + + /* <2> rate control register */ + hw->rate_control_algorithm = "rtl_rc"; + + /* + * <3> init CRDA must come after init + * mac80211 hw in _rtl_init_mac80211. + */ + if (rtl_regd_init(hw, rtl_reg_notifier)) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("REGD init failed\n")); + return 1; + } + + /* <4> locks */ + mutex_init(&rtlpriv->locks.conf_mutex); + spin_lock_init(&rtlpriv->locks.ips_lock); + spin_lock_init(&rtlpriv->locks.irq_th_lock); + spin_lock_init(&rtlpriv->locks.h2c_lock); + spin_lock_init(&rtlpriv->locks.rf_ps_lock); + spin_lock_init(&rtlpriv->locks.rf_lock); + spin_lock_init(&rtlpriv->locks.lps_lock); + spin_lock_init(&rtlpriv->locks.waitq_lock); + spin_lock_init(&rtlpriv->locks.entry_list_lock); + spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock); + spin_lock_init(&rtlpriv->locks.check_sendpkt_lock); + spin_lock_init(&rtlpriv->locks.fw_ps_lock); + spin_lock_init(&rtlpriv->locks.iqk_lock); + /* <5> init list */ + INIT_LIST_HEAD(&rtlpriv->entry_list); + + /* <6> init deferred work */ + _rtl_init_deferred_work(hw); + + /* <7> */ +#ifdef VIF_TODO + rtl_init_vif(hw); +#endif + + return 0; +} + +void rtl_deinit_core(struct ieee80211_hw *hw) +{ +} + +void rtl_init_rx_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf)); +} + +/********************************************************* + * + * tx information functions + * + *********************************************************/ +static void _rtl_qurey_shortpreamble_mode(struct ieee80211_hw *hw, + struct rtl_tcb_desc *tcb_desc, + struct ieee80211_tx_info *info) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 rate_flag = info->control.rates[0].flags; + + tcb_desc->use_shortpreamble = false; + + /* 1M can only use Long Preamble. 11B spec */ + if (tcb_desc->hw_rate == rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M]) + return; + else if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + tcb_desc->use_shortpreamble = true; + + return; +} + +static void _rtl_query_shortgi(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct rtl_tcb_desc *tcb_desc, + struct ieee80211_tx_info *info) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 rate_flag = info->control.rates[0].flags; + u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0; + tcb_desc->use_shortgi = false; + + if (sta == NULL) + return; + + sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; + sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; + + if (!(sta->ht_cap.ht_supported)) + return; + + if (!sgi_40 && !sgi_20) + return; + + if (mac->opmode == NL80211_IFTYPE_STATION) + bw_40 = mac->bw_40; + else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC || + mac->opmode == NL80211_IFTYPE_MESH_POINT) + bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; + + if ((bw_40 == true) && sgi_40) + tcb_desc->use_shortgi = true; + else if ((bw_40 == false) && sgi_20) + tcb_desc->use_shortgi = true; + + if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI)) + tcb_desc->use_shortgi = false; +} + +static void _rtl_query_protection_mode(struct ieee80211_hw *hw, + struct rtl_tcb_desc *tcb_desc, + struct ieee80211_tx_info *info) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 rate_flag = info->control.rates[0].flags; + + /* Common Settings */ + tcb_desc->b_rts_stbc = false; + tcb_desc->b_cts_enable = false; + tcb_desc->rts_sc = 0; + tcb_desc->b_rts_bw = false; + tcb_desc->b_rts_use_shortpreamble = false; + tcb_desc->b_rts_use_shortgi = false; + + if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) { + /* Use CTS-to-SELF in protection mode. */ + tcb_desc->b_rts_enable = true; + tcb_desc->b_cts_enable = true; + tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; + } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { + /* Use RTS-CTS in protection mode. */ + tcb_desc->b_rts_enable = true; + tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; + } +} + +static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct rtl_tcb_desc *tcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_sta_info *sta_entry = NULL; + u8 ratr_index = 7; + + if (sta) { + sta_entry = (struct rtl_sta_info *) sta->drv_priv; + ratr_index = sta_entry->ratr_index; + } + if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) { + if (mac->opmode == NL80211_IFTYPE_STATION) { + tcb_desc->ratr_index = 0; + } else if (mac->opmode == NL80211_IFTYPE_ADHOC || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + if (tcb_desc->b_multicast || tcb_desc->b_broadcast) { + tcb_desc->hw_rate = + rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M]; + tcb_desc->use_driver_rate = 1; + tcb_desc->ratr_index = RATR_INX_WIRELESS_MC; + } else { + tcb_desc->ratr_index = ratr_index; + } + } else if (mac->opmode == NL80211_IFTYPE_AP) { + tcb_desc->ratr_index = ratr_index; + } + } + + if (rtlpriv->dm.b_useramask) { + tcb_desc->ratr_index = ratr_index; + /* TODO we will differentiate adhoc and station futrue */ + if (mac->opmode == NL80211_IFTYPE_STATION || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + tcb_desc->mac_id = 0; + + if (mac->mode == WIRELESS_MODE_N_24G) { + tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB; + } else if (mac->mode == WIRELESS_MODE_N_5G) { + tcb_desc->ratr_index = RATR_INX_WIRELESS_NG; + } else if (mac->mode & WIRELESS_MODE_G) { + tcb_desc->ratr_index = RATR_INX_WIRELESS_GB; + } else if (mac->mode & WIRELESS_MODE_B) { + tcb_desc->ratr_index = RATR_INX_WIRELESS_B; + } else if (mac->mode & WIRELESS_MODE_A) { + tcb_desc->ratr_index = RATR_INX_WIRELESS_G; + } + } else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + if (NULL != sta) { + if (sta->aid > 0) { + tcb_desc->mac_id = sta->aid + 1; + } else { + tcb_desc->mac_id = 1; + } + } else { + tcb_desc->mac_id = 0; + } + } + } +} + +static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct rtl_tcb_desc *tcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + tcb_desc->b_packet_bw = false; + if (!sta) + return; + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC || + mac->opmode == NL80211_IFTYPE_MESH_POINT) { + if (!(sta->ht_cap.ht_supported) || + !(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + return; + } else if (mac->opmode == NL80211_IFTYPE_STATION) { + if (!mac->bw_40 || !(sta->ht_cap.ht_supported)) + return; + } + if (tcb_desc->b_multicast || tcb_desc->b_broadcast) + return; + + /*use legency rate, shall use 20MHz */ + if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]) + return; + + tcb_desc->b_packet_bw = true; +} + +static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 hw_rate; + + if ((get_rf_type(rtlphy) == RF_2T2R) && (sta->ht_cap.mcs.rx_mask[1]!=0)) + hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15]; + else + hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7]; + + return hw_rate; +} + +void rtl_get_tcb_desc(struct ieee80211_hw *hw, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); + struct ieee80211_hdr *hdr = rtl_get_hdr(skb); + struct ieee80211_rate *txrate; + u16 fc = rtl_get_fc(skb); + + txrate = ieee80211_get_tx_rate(hw, info); + if (txrate != NULL) + tcb_desc->hw_rate = txrate->hw_value; + + if (ieee80211_is_data(fc)) { + /* + *we set data rate INX 0 + *in rtl_rc.c if skb is special data or + *mgt which need low data rate. + */ + + /* + *So tcb_desc->hw_rate is just used for + *special data and mgt frames + */ + if (info->control.rates[0].idx == 0 || + ieee80211_is_nullfunc(fc)) { + tcb_desc->use_driver_rate = true; + tcb_desc->ratr_index = RATR_INX_WIRELESS_MC; + + tcb_desc->disable_ratefallback = 1; + } else { + /* + *because hw will nerver use hw_rate + *when tcb_desc->use_driver_rate = false + *so we never set highest N rate here, + *and N rate will all be controled by FW + *when tcb_desc->use_driver_rate = false + */ + if (sta && (sta->ht_cap.ht_supported)) { + tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw, sta); + } else { + if(rtlmac->mode == WIRELESS_MODE_B) { + tcb_desc->hw_rate = + rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M]; + } else { + tcb_desc->hw_rate = + rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]; + } + } + } + + if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) + tcb_desc->b_multicast = 1; + else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + tcb_desc->b_broadcast = 1; + + _rtl_txrate_selectmode(hw, sta, tcb_desc); + _rtl_query_bandwidth_mode(hw, sta, tcb_desc); + _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info); + _rtl_query_shortgi(hw, sta, tcb_desc, info); + _rtl_query_protection_mode(hw, tcb_desc, info); + } else { + tcb_desc->use_driver_rate = true; + tcb_desc->ratr_index = RATR_INX_WIRELESS_MC; + tcb_desc->disable_ratefallback = 1; + tcb_desc->mac_id = 0; + tcb_desc->b_packet_bw = false; + } +} +//EXPORT_SYMBOL(rtl_get_tcb_desc); + +bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 fc = rtl_get_fc(skb); + + if (rtlpriv->dm.supp_phymode_switch && + mac->link_state < MAC80211_LINKED && + (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) { + if (rtlpriv->cfg->ops->check_switch_to_dmdp) + rtlpriv->cfg->ops->check_switch_to_dmdp(hw); + } + if (ieee80211_is_auth(fc)) { + RT_TRACE(COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); + rtl_ips_nic_on(hw); + + mac->link_state = MAC80211_LINKING; + /* Dul mac */ + rtlpriv->phy.b_need_iqk = true; + + } + + return true; +} + +struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, u8 *sa, + u8 *bssid, u16 tid); +bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct ieee80211_hdr *hdr = rtl_get_hdr(skb); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 fc = rtl_get_fc(skb); + u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN)); + u8 category; + + if (!ieee80211_is_action(fc)) + return true; + + category = *act; + act++; + switch (category) { + case ACT_CAT_BA: + switch (*act) { + case ACT_ADDBAREQ: + if (mac->act_scanning) + return false; + + RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG, + ("%s ACT_ADDBAREQ From :%pM\n", + is_tx ? "Tx" : "Rx", hdr->addr2)); + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("req \n"), + skb->data, skb->len); + if (!is_tx) { + struct ieee80211_sta *sta = NULL; + struct rtl_sta_info *sta_entry = NULL; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + u16 capab = 0, tid = 0; + struct rtl_tid_data *tid_data; + struct sk_buff *skb_delba = NULL; + struct ieee80211_rx_status rx_status = { 0 }; + + rcu_read_lock(); + sta = rtl_find_sta(hw, hdr->addr3); + if (sta == NULL) { + RT_TRACE((COMP_SEND | COMP_RECV), + DBG_EMERG, ("sta is NULL\n")); + rcu_read_unlock(); + return true; + } + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + if (!sta_entry) { + rcu_read_unlock(); + return true; + } + capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); + tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; + tid_data = &sta_entry->tids[tid]; + if (tid_data->agg.rx_agg_state == + RTL_RX_AGG_START) { + skb_delba = rtl_make_del_ba(hw, + hdr->addr2, + hdr->addr3, + tid); + if (skb_delba) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + rx_status.freq = hw->conf.chandef.chan->center_freq; + rx_status.band = hw->conf.chandef.chan->band; +#else + rx_status.freq = hw->conf.channel->center_freq; + rx_status.band = hw->conf.channel->band; +#endif + rx_status.flag |= RX_FLAG_DECRYPTED; + rx_status.flag |= RX_FLAG_MACTIME_MPDU; + rx_status.rate_idx = 0; + rx_status.signal = 50 + 10; + memcpy(IEEE80211_SKB_RXCB(skb_delba), &rx_status, + sizeof(rx_status)); + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, + ("fake del\n"), skb_delba->data, + skb_delba->len); + ieee80211_rx_irqsafe(hw, skb_delba); + } + } + rcu_read_unlock(); + } + break; + case ACT_ADDBARSP: + RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG, + ("%s ACT_ADDBARSP From :%pM\n", + is_tx ? "Tx" : "Rx", hdr->addr2)); + break; + case ACT_DELBA: + RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG, + ("ACT_ADDBADEL From :%pM\n", hdr->addr2)); + break; + } + break; + default: + break; + } + + return true; +} + +/*should call before software enc*/ +u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u16 fc = rtl_get_fc(skb); + u16 ether_type; + u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb); + const struct iphdr *ip; + + if (!ieee80211_is_data(fc)) + goto end; + + + ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + + SNAP_SIZE + PROTOC_TYPE_SIZE); + ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); + ether_type = ntohs(ether_type); + + if (ETH_P_IP == ether_type) { + if (IPPROTO_UDP == ip->protocol) { + struct udphdr *udp = (struct udphdr *)((u8 *) ip + + (ip->ihl << 2)); + if (((((u8 *) udp)[1] == 68) && + (((u8 *) udp)[3] == 67)) || + ((((u8 *) udp)[1] == 67) && + (((u8 *) udp)[3] == 68))) { + /* + * 68 : UDP BOOTP client + * 67 : UDP BOOTP server + */ + RT_TRACE((COMP_SEND | COMP_RECV), + DBG_DMESG, ("dhcp %s !!\n", + (is_tx) ? "Tx" : "Rx")); + + if (is_tx) { + rtlpriv->ra.is_special_data = true; + rtl_lps_leave(hw); + ppsc->last_delaylps_stamp_jiffies = + jiffies; + } + + return true; + } + } + } else if (ETH_P_ARP == ether_type) { + if (is_tx) { + rtlpriv->ra.is_special_data = true; + rtl_lps_leave(hw); + ppsc->last_delaylps_stamp_jiffies = jiffies; + } + + return true; + } else if (ETH_P_PAE == ether_type) { + RT_TRACE((COMP_SEND | COMP_RECV), DBG_DMESG, + ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx")); + + if (is_tx) { + rtlpriv->ra.is_special_data = true; + rtl_lps_leave(hw); + ppsc->last_delaylps_stamp_jiffies = jiffies; + } + + return true; + } else if (0x86DD == ether_type) { + return true; + } + +end: + rtlpriv->ra.is_special_data = false; + return false; +} + +/********************************************************* + * + * functions called by core.c + * + *********************************************************/ +int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tid_data *tid_data; + struct rtl_sta_info *sta_entry = NULL; + + if (sta == NULL) + return -EINVAL; + + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + if (!sta_entry) + return -ENXIO; + tid_data = &sta_entry->tids[tid]; + + RT_TRACE(COMP_SEND, DBG_DMESG, + ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid, + tid_data->seq_number)); + + *ssn = tid_data->seq_number; + tid_data->agg.agg_state = RTL_AGG_START; + + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + return 0; +} + +int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tid_data *tid_data; + struct rtl_sta_info *sta_entry = NULL; + + if (sta == NULL) + return -EINVAL; + + if (!sta->addr) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n")); + return -EINVAL; + } + + RT_TRACE(COMP_SEND, DBG_DMESG, + ("on ra = %pM tid = %d\n", sta->addr, tid)); + + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + tid_data = &sta_entry->tids[tid]; + sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP; + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + return 0; +} + +int rtl_rx_agg_start(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tid_data *tid_data; + struct rtl_sta_info *sta_entry = NULL; + + if (sta == NULL) + return -EINVAL; + + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + if (!sta_entry) + return -ENXIO; + tid_data = &sta_entry->tids[tid]; + + RT_TRACE(COMP_RECV, DBG_DMESG, + ("on ra = %pM tid = %d seq:%d\n", sta->addr, tid, + tid_data->seq_number)); + + tid_data->agg.rx_agg_state = RTL_RX_AGG_START; + return 0; +} + +int rtl_rx_agg_stop(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tid_data *tid_data; + struct rtl_sta_info *sta_entry = NULL; + + if (sta == NULL) + return -EINVAL; + + if (!sta->addr) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n")); + return -EINVAL; + } + + RT_TRACE(COMP_SEND, DBG_DMESG, + ("on ra = %pM tid = %d\n", sta->addr, tid)); + + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + tid_data = &sta_entry->tids[tid]; + sta_entry->tids[tid].agg.rx_agg_state = RTL_RX_AGG_STOP; + + return 0; +} +int rtl_tx_agg_oper(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tid_data *tid_data; + struct rtl_sta_info *sta_entry = NULL; + + if (sta == NULL) + return -EINVAL; + + if (!sta->addr) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("ra = NULL\n")); + return -EINVAL; + } + + RT_TRACE(COMP_SEND, DBG_DMESG, + ("on ra = %pM tid = %d\n", sta->addr, tid)); + + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + tid_data = &sta_entry->tids[tid]; + sta_entry->tids[tid].agg.agg_state = RTL_AGG_OPERATIONAL; + + return 0; +} + +/********************************************************* + * + * wq & timer callback functions + * + *********************************************************/ +/* this function is used for roaming */ +void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) + return; + + if (rtlpriv->mac80211.link_state < MAC80211_LINKED) + return; + + /* check if this really is a beacon */ + if (!ieee80211_is_beacon(hdr->frame_control) && + !ieee80211_is_probe_resp(hdr->frame_control)) + return; + + /* min. beacon length + FCS_LEN */ + if (skb->len <= 40 + FCS_LEN) + return; + + /* and only beacons from the associated BSSID, please */ + if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) + return; + + rtlpriv->link_info.bcn_rx_inperiod ++; +} + +void rtl_watchdog_wq_callback(void *data) +{ + struct rtl_works *rtlworks = container_of_dwork_rtl(data, + struct rtl_works, + watchdog_wq); + struct ieee80211_hw *hw = rtlworks->hw; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + bool b_busytraffic = false; + bool b_tx_busy_traffic = false; + bool b_rx_busy_traffic = false; + bool b_higher_busytraffic = false; + bool b_higher_busyrxtraffic = false; + u8 idx, tid; + u32 rx_cnt_inp4eriod = 0; + u32 tx_cnt_inp4eriod = 0; + u32 aver_rx_cnt_inperiod = 0; + u32 aver_tx_cnt_inperiod = 0; + u32 aver_tidtx_inperiod[MAX_TID_COUNT] = {0}; + u32 tidtx_inp4eriod[MAX_TID_COUNT] = {0}; + bool benter_ps = false; + + if (is_hal_stop(rtlhal)) + return; + + /* <1> Determine if action frame is allowed */ + if (mac->link_state > MAC80211_NOLINK) { + if (mac->cnt_after_linked < 20) + mac->cnt_after_linked++; + } else { + mac->cnt_after_linked = 0; + } + + /* <2> to check if traffic busy, if + * busytraffic we don't change channel */ + if (mac->link_state >= MAC80211_LINKED) { + + /* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */ + for (idx = 0; idx <= 2; idx++) { + rtlpriv->link_info.num_rx_in4period[idx] = + rtlpriv->link_info.num_rx_in4period[idx + 1]; + rtlpriv->link_info.num_tx_in4period[idx] = + rtlpriv->link_info.num_tx_in4period[idx + 1]; + } + rtlpriv->link_info.num_rx_in4period[3] = + rtlpriv->link_info.num_rx_inperiod; + rtlpriv->link_info.num_tx_in4period[3] = + rtlpriv->link_info.num_tx_inperiod; + for (idx = 0; idx <= 3; idx++) { + rx_cnt_inp4eriod += + rtlpriv->link_info.num_rx_in4period[idx]; + tx_cnt_inp4eriod += + rtlpriv->link_info.num_tx_in4period[idx]; + } + aver_rx_cnt_inperiod = rx_cnt_inp4eriod / 4; + aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4; + + /* (2) check traffic busy */ + if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) { + b_busytraffic = true; + if (aver_rx_cnt_inperiod > aver_tx_cnt_inperiod) + b_rx_busy_traffic = true; + else + b_tx_busy_traffic = false; + } + + /* Higher Tx/Rx data. */ + if (aver_rx_cnt_inperiod > 4000 || + aver_tx_cnt_inperiod > 4000) { + b_higher_busytraffic = true; + + /* Extremely high Rx data. */ + if (aver_rx_cnt_inperiod > 5000) + b_higher_busyrxtraffic = true; + } + + /* check every tid's tx traffic */ + for (tid = 0; tid <= 7; tid++) { + for (idx = 0; idx <= 2; idx++) + rtlpriv->link_info.tidtx_in4period[tid][idx] = + rtlpriv->link_info.tidtx_in4period[tid] + [idx + 1]; + rtlpriv->link_info.tidtx_in4period[tid][3] = + rtlpriv->link_info.tidtx_inperiod[tid]; + + for (idx = 0; idx <= 3; idx++) + tidtx_inp4eriod[tid] += + rtlpriv->link_info.tidtx_in4period[tid][idx]; + aver_tidtx_inperiod[tid] = tidtx_inp4eriod[tid] / 4; + if (aver_tidtx_inperiod[tid] > 5000) + rtlpriv->link_info.higher_busytxtraffic[tid] = + true; + else + rtlpriv->link_info.higher_busytxtraffic[tid] = + false; + } + + if (((rtlpriv->link_info.num_rx_inperiod + + rtlpriv->link_info.num_tx_inperiod) > 8) || + (rtlpriv->link_info.num_rx_inperiod > 2)) + benter_ps = false; + else + benter_ps = true; + + /* LeisurePS only work in infra mode. */ + if (benter_ps) + rtl_lps_enter(hw); + else + rtl_lps_leave(hw); + } + + rtlpriv->link_info.num_rx_inperiod = 0; + rtlpriv->link_info.num_tx_inperiod = 0; + for (tid = 0; tid <= 7; tid++) + rtlpriv->link_info.tidtx_inperiod[tid] = 0; + + rtlpriv->link_info.b_busytraffic = b_busytraffic; + rtlpriv->link_info.b_rx_busy_traffic = b_rx_busy_traffic; + rtlpriv->link_info.b_tx_busy_traffic = b_tx_busy_traffic; + rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic; + rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic; + + /* <3> DM */ + rtlpriv->cfg->ops->dm_watchdog(hw); + + /* <4> roaming */ + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION) { + if ((rtlpriv->link_info.bcn_rx_inperiod + + rtlpriv->link_info.num_rx_inperiod) == 0) { + rtlpriv->link_info.roam_times++; + RT_TRACE(COMP_ERR, DBG_DMESG, ("AP off for %d s\n", + (rtlpriv->link_info.roam_times * 2))); + + /* if we can't recv beacon for 10s, + * we should reconnect this AP */ + if (rtlpriv->link_info.roam_times >= 5) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("AP off, try to reconnect now\n")); + rtlpriv->link_info.roam_times = 0; + ieee80211_connection_loss(rtlpriv->mac80211.vif); + } + } else { + rtlpriv->link_info.roam_times = 0; + } + } + rtlpriv->link_info.bcn_rx_inperiod = 0; +} + +void rtl_watch_dog_timer_callback(unsigned long data) +{ + struct ieee80211_hw *hw = (struct ieee80211_hw *)data; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + queue_delayed_work(rtlpriv->works.rtl_wq, + &rtlpriv->works.watchdog_wq, 0); + + mod_timer(&rtlpriv->works.watchdog_timer, + jiffies + MSECS(RTL_WATCH_DOG_TIME)); +} +void rtl_fwevt_wq_callback(void *data) +{ + struct rtl_works *rtlworks = + container_of_dwork_rtl(data, struct rtl_works, fwevt_wq); + struct ieee80211_hw *hw = rtlworks->hw; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->cfg->ops->c2h_command_handle(hw); +} +void rtl_easy_concurrent_retrytimer_callback(unsigned long data) +{ + struct ieee80211_hw *hw = (struct ieee80211_hw *)data; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_priv *buddy_priv = rtlpriv->buddy_priv; + + if(buddy_priv == NULL) + return; + + rtlpriv->cfg->ops->dualmac_easy_concurrent(hw); +} +/********************************************************* + * + * frame process functions + * + *********************************************************/ +u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie) +{ + struct ieee80211_mgmt *mgmt = (void *)data; + u8 *pos, *end; + + pos = (u8 *)mgmt->u.beacon.variable; + end = data + len; + while (pos < end) { + if (pos + 2 + pos[1] > end) + return NULL; + + if (pos[0] == ie) + return pos; + + pos += 2 + pos[1]; + } + return NULL; +} + +/* when we use 2 rx ants we send IEEE80211_SMPS_OFF */ +/* when we use 1 rx ant we send IEEE80211_SMPS_STATIC */ +struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw, + enum ieee80211_smps_mode smps, + u8 *da, u8 *bssid) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct sk_buff *skb; + struct ieee80211_mgmt_compat *action_frame; + + /* 27 = header + category + action + smps mode */ + skb = dev_alloc_skb(27 + hw->extra_tx_headroom); + if (!skb) + return NULL; + + skb_reserve(skb, hw->extra_tx_headroom); + action_frame = (void *)skb_put(skb, 27); + memset(action_frame, 0, 27); + memcpy(action_frame->da, da, ETH_ALEN); + memcpy(action_frame->sa, rtlefuse->dev_addr, ETH_ALEN); + memcpy(action_frame->bssid, bssid, ETH_ALEN); + action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + action_frame->u.action.category = WLAN_CATEGORY_HT; + action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS; + switch (smps) { + case IEEE80211_SMPS_AUTOMATIC:/* 0 */ + case IEEE80211_SMPS_NUM_MODES:/* 4 */ + WARN_ON(1); + case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/ + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */ + break; + case IEEE80211_SMPS_STATIC:/* 2 */ /*MIMO_PS_STATIC*/ + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_STATIC;/* 1 */ + break; + case IEEE80211_SMPS_DYNAMIC:/* 3 */ /*MIMO_PS_DYNAMIC*/ + action_frame->u.action.u.ht_smps.smps_control = + WLAN_HT_SMPS_CONTROL_DYNAMIC;/* 3 */ + break; + } + + return skb; +} + +int rtl_send_smps_action(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + enum ieee80211_smps_mode smps) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct sk_buff *skb = NULL; + struct rtl_tcb_desc tcb_desc; + u8 bssid[ETH_ALEN] = {0}; + + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + + if (rtlpriv->mac80211.act_scanning) + goto err_free; + + if (!sta) + goto err_free; + + if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON)) + goto err_free; + + if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) + goto err_free; + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) + memcpy(bssid, rtlpriv->efuse.dev_addr, ETH_ALEN); + else + memcpy(bssid, rtlpriv->mac80211.bssid, ETH_ALEN); + + skb = rtl_make_smps_action(hw, smps, sta->addr, bssid); + /* this is a type = mgmt * stype = action frame */ + if (skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct rtl_sta_info *sta_entry = + (struct rtl_sta_info *) sta->drv_priv; + sta_entry->mimo_ps = smps; + /* rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); */ + + info->control.rates[0].idx = 0; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + info->band = hw->conf.chandef.chan->band; +#else + info->band = hw->conf.channel->band; +#endif +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + info->control.sta = sta; + rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); +#else +/**/ + rtlpriv->intf_ops->adapter_tx(hw, sta, skb, &tcb_desc); +/**/ +#endif +/**/ + } + return 1; + +err_free: + return 0; +} +//EXPORT_SYMBOL(rtl_send_smps_action); + +/* because mac80211 have issues when can receive del ba + * so here we just make a fake del_ba if we receive a ba_req + * but rx_agg was opened to let mac80211 release some ba + * related resources, so please this del_ba for tx */ +struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw, + u8 *sa, u8 *bssid, u16 tid) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct sk_buff *skb; + struct ieee80211_mgmt *action_frame; + u16 params; + + /* 27 = header + category + action + smps mode */ + skb = dev_alloc_skb(34 + hw->extra_tx_headroom); + if (!skb) + return NULL; + + skb_reserve(skb, hw->extra_tx_headroom); + action_frame = (void *)skb_put(skb, 34); + memset(action_frame, 0, 34); + memcpy(action_frame->sa, sa, ETH_ALEN); + memcpy(action_frame->da, rtlefuse->dev_addr, ETH_ALEN); + memcpy(action_frame->bssid, bssid, ETH_ALEN); + action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + action_frame->u.action.category = WLAN_CATEGORY_BACK; + action_frame->u.action.u.delba.action_code = WLAN_ACTION_DELBA; + params = (u16)(1 << 11); /* bit 11 initiator */ + params |= (u16)(tid << 12); /* bit 15:12 TID number */ + + action_frame->u.action.u.delba.params = cpu_to_le16(params); + action_frame->u.action.u.delba.reason_code = + cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT); + + return skb; +} + +/********************************************************* + * + * IOT functions + * + *********************************************************/ +static bool rtl_chk_vendor_ouisub(struct ieee80211_hw *hw, + struct octet_string vendor_ie) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool matched = false; + static u8 athcap_1[] = { 0x00, 0x03, 0x7F }; + static u8 athcap_2[] = { 0x00, 0x13, 0x74 }; + static u8 broadcap_1[] = { 0x00, 0x10, 0x18 }; + static u8 broadcap_2[] = { 0x00, 0x0a, 0xf7 }; + static u8 broadcap_3[] = { 0x00, 0x05, 0xb5 }; + static u8 racap[] = { 0x00, 0x0c, 0x43 }; + static u8 ciscocap[] = { 0x00, 0x40, 0x96 }; + static u8 marvcap[] = { 0x00, 0x50, 0x43 }; + + if (memcmp(vendor_ie.octet, athcap_1, 3) == 0 || + memcmp(vendor_ie.octet, athcap_2, 3) == 0) { + rtlpriv->mac80211.vendor = PEER_ATH; + matched = true; + } else if (memcmp(vendor_ie.octet, broadcap_1, 3) == 0 || + memcmp(vendor_ie.octet, broadcap_2, 3) == 0 || + memcmp(vendor_ie.octet, broadcap_3, 3) == 0) { + rtlpriv->mac80211.vendor = PEER_BROAD; + matched = true; + } else if (memcmp(vendor_ie.octet, racap, 3) == 0) { + rtlpriv->mac80211.vendor = PEER_RAL; + matched = true; + } else if (memcmp(vendor_ie.octet, ciscocap, 3) == 0) { + rtlpriv->mac80211.vendor = PEER_CISCO; + matched = true; + } else if (memcmp(vendor_ie.octet, marvcap, 3) == 0) { + rtlpriv->mac80211.vendor = PEER_MARV; + matched = true; + } + + return matched; +} + +bool rtl_find_221_ie(struct ieee80211_hw *hw, u8 *data, + unsigned int len) +{ + struct ieee80211_mgmt *mgmt = (void *)data; + struct octet_string vendor_ie; + u8 *pos, *end; + + pos = (u8 *)mgmt->u.beacon.variable; + end = data + len; + while (pos < end) { + if (pos[0] == 221) { + vendor_ie.length = pos[1]; + vendor_ie.octet = &pos[2]; + if (rtl_chk_vendor_ouisub(hw, vendor_ie)) + return true; + } + + if (pos + 2 + pos[1] > end) + return false; + + pos += 2 + pos[1]; + } + return false; +} + +void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct ieee80211_hdr *hdr = (void *)data; + u32 vendor = PEER_UNKNOWN; + + static u8 ap3_1[3] = { 0x00, 0x14, 0xbf }; + static u8 ap3_2[3] = { 0x00, 0x1a, 0x70 }; + static u8 ap3_3[3] = { 0x00, 0x1d, 0x7e }; + static u8 ap4_1[3] = { 0x00, 0x90, 0xcc }; + static u8 ap4_2[3] = { 0x00, 0x0e, 0x2e }; + static u8 ap4_3[3] = { 0x00, 0x18, 0x02 }; + static u8 ap4_4[3] = { 0x00, 0x17, 0x3f }; + static u8 ap4_5[3] = { 0x00, 0x1c, 0xdf }; + static u8 ap5_1[3] = { 0x00, 0x1c, 0xf0 }; + static u8 ap5_2[3] = { 0x00, 0x21, 0x91 }; + static u8 ap5_3[3] = { 0x00, 0x24, 0x01 }; + static u8 ap5_4[3] = { 0x00, 0x15, 0xe9 }; + static u8 ap5_5[3] = { 0x00, 0x17, 0x9A }; + static u8 ap5_6[3] = { 0x00, 0x18, 0xE7 }; + static u8 ap6_1[3] = { 0x00, 0x17, 0x94 }; + static u8 ap7_1[3] = { 0x00, 0x14, 0xa4 }; + + if (mac->opmode != NL80211_IFTYPE_STATION) + return; + + if (mac->link_state == MAC80211_NOLINK) { + mac->vendor = PEER_UNKNOWN; + return; + } + + if (mac->cnt_after_linked > 2) + return; + + /* check if this really is a beacon */ + if (!ieee80211_is_beacon(hdr->frame_control)) + return; + + /* min. beacon length + FCS_LEN */ + if (len <= 40 + FCS_LEN) + return; + + /* and only beacons from the associated BSSID, please */ + if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) + return; + + if (rtl_find_221_ie(hw, data, len)) { + vendor = mac->vendor; + } + + if ((memcmp(mac->bssid, ap5_1, 3) == 0) || + (memcmp(mac->bssid, ap5_2, 3) == 0) || + (memcmp(mac->bssid, ap5_3, 3) == 0) || + (memcmp(mac->bssid, ap5_4, 3) == 0) || + (memcmp(mac->bssid, ap5_5, 3) == 0) || + (memcmp(mac->bssid, ap5_6, 3) == 0) || + vendor == PEER_ATH) { + vendor = PEER_ATH; + RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ath find\n")); + } else if ((memcmp(mac->bssid, ap4_4, 3) == 0) || + (memcmp(mac->bssid, ap4_5, 3) == 0) || + (memcmp(mac->bssid, ap4_1, 3) == 0) || + (memcmp(mac->bssid, ap4_2, 3) == 0) || + (memcmp(mac->bssid, ap4_3, 3) == 0) || + vendor == PEER_RAL) { + RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>ral findn\n")); + vendor = PEER_RAL; + } else if (memcmp(mac->bssid, ap6_1, 3) == 0 || + vendor == PEER_CISCO) { + vendor = PEER_CISCO; + RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>cisco find\n")); + } else if ((memcmp(mac->bssid, ap3_1, 3) == 0) || + (memcmp(mac->bssid, ap3_2, 3) == 0) || + (memcmp(mac->bssid, ap3_3, 3) == 0) || + vendor == PEER_BROAD) { + RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>broad find\n")); + vendor = PEER_BROAD; + } else if (memcmp(mac->bssid, ap7_1, 3) == 0 || + vendor == PEER_MARV) { + vendor = PEER_MARV; + RT_TRACE(COMP_MAC80211, DBG_LOUD, ("=>marv find\n")); + } + + mac->vendor = vendor; +} + +/********************************************************* + * + * sysfs functions + * + *********************************************************/ +static ssize_t rtl_show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct ieee80211_hw *hw = dev_get_drvdata(d); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel); +} + +static ssize_t rtl_store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ieee80211_hw *hw = dev_get_drvdata(d); + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) { + printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf); + } else { + rtlpriv->dbg.global_debuglevel = val; + printk(KERN_DEBUG "debuglevel:%x\n", + rtlpriv->dbg.global_debuglevel); + } + + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + rtl_show_debug_level, rtl_store_debug_level); + +static struct attribute *rtl_sysfs_entries[] = { + + &dev_attr_debug_level.attr, + + NULL +}; + +/* + * "name" is folder name witch will be + * put in device directory like : + * sys/devices/pci0000:00/0000:00:1c.4/ + * 0000:06:00.0/rtl_sysfs + */ +struct attribute_group rtl_attribute_group = { + .name = "rtlsysfs", + .attrs = rtl_sysfs_entries, +}; + +#ifdef VIF_TODO +/********************************************************* + * + * vif functions + * + *********************************************************/ +static inline struct ieee80211_vif * +rtl_get_vif(struct rtl_vif_info *vif_priv) +{ + return container_of((void *)vif_priv, struct ieee80211_vif, drv_priv); +} + +/* Protected by ar->mutex or RCU */ +struct ieee80211_vif *rtl_get_main_vif(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_vif_info *cvif; + + list_for_each_entry_rcu(cvif, &rtlpriv->vif_priv.vif_list, list) { + if (cvif->active) + return rtl_get_vif(cvif); + } + + return NULL; +} + +static inline bool is_main_vif(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + bool ret; + + rcu_read_lock(); + ret = (rtl_get_main_vif(hw) == vif); + rcu_read_unlock(); + return ret; +} + +bool rtl_set_vif_info(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct rtl_vif_info *vif_info = (void *) vif->drv_priv; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int vif_id = -1; + + if (rtlpriv->vif_priv.vifs >= MAX_VIRTUAL_MAC) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("vif number can not bigger than %d, now vifs is:%d\n", + MAX_VIRTUAL_MAC, rtlpriv->vif_priv.vifs)); + return false; + } + + rcu_read_lock(); + vif_id = bitmap_find_free_region(&rtlpriv->vif_priv.vif_bitmap, + MAX_VIRTUAL_MAC, 0); + RT_TRACE(COMP_MAC80211, DBG_DMESG, + ("%s vid_id:%d\n", __func__, vif_id)); + + if (vif_id < 0) { + rcu_read_unlock(); + return false; + } + + BUG_ON(rtlpriv->vif_priv.vif[vif_id].id != vif_id); + vif_info->active = true; + vif_info->id = vif_id; + vif_info->enable_beacon = false; + rtlpriv->vif_priv.vifs++; + if (rtlpriv->vif_priv.vifs > 1) { + rtlpriv->psc.b_inactiveps = false; + rtlpriv->psc.b_swctrl_lps = false; + rtlpriv->psc.b_fwctrl_lps = false; + } + + list_add_tail_rcu(&vif_info->list, &rtlpriv->vif_priv.vif_list); + rcu_assign_pointer(rtlpriv->vif_priv.vif[vif_id].vif, vif); + + RT_TRACE(COMP_MAC80211, DBG_DMESG, ("vifaddress:%p %p %p\n", + rtlpriv->vif_priv.vif[vif_id].vif, vif, rtl_get_main_vif(hw))); + + rcu_read_unlock(); + + return true; +} +#endif + + +#if 0 +MODULE_AUTHOR("lizhaoming "); +MODULE_AUTHOR("Realtek WlanFAE "); +MODULE_AUTHOR("Larry Finger "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); +#endif +struct rtl_global_var global_var = {}; + +int rtl_core_module_init(void) +{ + if (rtl_rate_control_register()) + printk(KERN_DEBUG "rtl: Unable to register rtl_rc," + "use default RC !!\n"); + + /* add proc for debug */ + rtl_proc_add_topdir(); + + /* init some global vars */ + INIT_LIST_HEAD(&global_var.glb_priv_list); + spin_lock_init(&global_var.glb_list_lock); + + return 0; +} + +void rtl_core_module_exit(void) +{ + /*RC*/ + rtl_rate_control_unregister(); + + /* add proc for debug */ + rtl_proc_remove_topdir(); +} + +#if 0 +module_init(rtl_core_module_init); +module_exit(rtl_core_module_exit); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/base.h +++ linux-3.13.0/drivers/staging/rtl8821ae/base.h @@ -0,0 +1,159 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_BASE_H__ +#define __RTL_BASE_H__ + +#include "compat.h" + +enum ap_peer { + PEER_UNKNOWN = 0, + PEER_RTL = 1, + PEER_RTL_92SE = 2, + PEER_BROAD = 3, + PEER_RAL = 4, + PEER_ATH = 5, + PEER_CISCO = 6, + PEER_MARV = 7, + PEER_AIRGO = 9, + PEER_MAX = 10, +} ; + +#define RTL_DUMMY_OFFSET 0 +#define RTL_DUMMY_UNIT 8 +#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT) +#define RTL_TX_DESC_SIZE 32 +#define RTL_TX_HEADER_SIZE (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE) + +#define HT_AMSDU_SIZE_4K 3839 +#define HT_AMSDU_SIZE_8K 7935 + +#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */ +#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */ + +#define RTL_RATE_COUNT_LEGACY 12 +#define RTL_CHANNEL_COUNT 14 + +#define FRAME_OFFSET_FRAME_CONTROL 0 +#define FRAME_OFFSET_DURATION 2 +#define FRAME_OFFSET_ADDRESS1 4 +#define FRAME_OFFSET_ADDRESS2 10 +#define FRAME_OFFSET_ADDRESS3 16 +#define FRAME_OFFSET_SEQUENCE 22 +#define FRAME_OFFSET_ADDRESS4 24 + +#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \ + WRITEEF2BYTE(_hdr, _val) +#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \ + WRITEEF1BYTE(_hdr, _val) +#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \ + SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val) +#define SET_80211_HDR_TO_DS(_hdr, _val) \ + SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val) + +#define SET_80211_PS_POLL_AID(_hdr, _val) \ + WRITEEF2BYTE(((u8*)(_hdr))+2, _val) +#define SET_80211_PS_POLL_BSSID(_hdr, _val) \ + CP_MACADDR(((u8*)(_hdr))+4, (u8*)(_val)) +#define SET_80211_PS_POLL_TA(_hdr, _val) \ + CP_MACADDR(((u8*)(_hdr))+10, (u8*)(_val)) + +#define SET_80211_HDR_DURATION(_hdr, _val) \ + WRITEEF2BYTE((u8*)(_hdr)+FRAME_OFFSET_DURATION, _val) +#define SET_80211_HDR_ADDRESS1(_hdr, _val) \ + CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val)) +#define SET_80211_HDR_ADDRESS2(_hdr, _val) \ + CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS2, (u8*)(_val)) +#define SET_80211_HDR_ADDRESS3(_hdr, _val) \ + CP_MACADDR((u8*)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8*)(_val)) +#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \ + WRITEEF2BYTE((u8*)(_hdr)+FRAME_OFFSET_SEQUENCE, _val) + +#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \ + WRITEEF4BYTE(((u8*)(__phdr)) + 24, __val) +#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \ + WRITEEF4BYTE(((u8*)(__phdr)) + 28, __val) +#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \ + WRITEEF2BYTE(((u8*)(__phdr)) + 32, __val) +#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \ + READEF2BYTE(((u8*)(__phdr)) + 34) +#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \ + WRITEEF2BYTE(((u8*)(__phdr)) + 34, __val) +#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \ + SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \ + (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val)))) + +int rtl_init_core(struct ieee80211_hw *hw); +void rtl_deinit_core(struct ieee80211_hw *hw); +void rtl_init_rx_config(struct ieee80211_hw *hw); +void rtl_init_rfkill(struct ieee80211_hw *hw); +void rtl_deinit_rfkill(struct ieee80211_hw *hw); + +void rtl_watch_dog_timer_callback(unsigned long data); +void rtl_deinit_deferred_work(struct ieee80211_hw *hw); + +bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); +bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); +u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); + +void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb); +void rtl_watch_dog_timer_callback(unsigned long data); +int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 * ssn); +int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +int rtl_tx_agg_oper(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid); +int rtl_rx_agg_start(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid); +int rtl_rx_agg_stop(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tid); +void rtl_watchdog_wq_callback(void *data); +void rtl_fwevt_wq_callback(void *data); + +void rtl_get_tcb_desc(struct ieee80211_hw *hw, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc); + +int rtl_send_smps_action(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + enum ieee80211_smps_mode smps); +u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie); +void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len); +u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid); +extern struct attribute_group rtl_attribute_group; +void rtl_easy_concurrent_retrytimer_callback(unsigned long data); +extern struct rtl_global_var global_var; + +#ifdef VIF_TODO +struct ieee80211_vif *rtl_get_main_vif(struct ieee80211_hw *hw); +bool rtl_set_vif_info(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +#endif +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.c @@ -0,0 +1,3976 @@ +//============================================================ +// Description: +// +// This file is for 8812a1ant Co-exist mechanism +// +// History +// 2012/11/15 Cosa first check in. +// +//============================================================ + +//============================================================ +// include files +//============================================================ +#include "halbt_precomp.h" +#if 1 +//============================================================ +// Global variables, these are static variables +//============================================================ +static COEX_DM_8812A_1ANT GLCoexDm8812a1Ant; +static PCOEX_DM_8812A_1ANT coex_dm=&GLCoexDm8812a1Ant; +static COEX_STA_8812A_1ANT GLCoexSta8812a1Ant; +static PCOEX_STA_8812A_1ANT coex_sta=&GLCoexSta8812a1Ant; + +const char *const GLBtInfoSrc8812a1Ant[]={ + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +//============================================================ +// local function proto type if needed +//============================================================ +//============================================================ +// local function start with halbtc8812a1ant_ +//============================================================ +#if 0 +void +halbtc8812a1ant_Reg0x550Bit3( + PBTC_COEXIST btcoexist, + BOOLEAN bSet + ) +{ + u1Byte u1tmp=0; + + u1tmp = btcoexist->btc_read_1byte(btcoexist, 0x550); + if(bSet) + { + u1tmp |= BIT3; + } + else + { + u1tmp &= ~BIT3; + } + btcoexist->btc_write_1byte(btcoexist, 0x550, u1tmp); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], set 0x550[3]=%d\n", (bSet? 1:0))); +} +#endif +u1Byte +halbtc8812a1ant_BtRssiState( + u1Byte level_num, + u1Byte rssi_thresh, + u1Byte rssi_thresh1 + ) +{ + s4Byte bt_rssi=0; + u1Byte bt_rssi_state; + + bt_rssi = coex_sta->bt_rssi; + + if(level_num == 2) + { + if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) + { + if(bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT)) + { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n")); + } + else + { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n")); + } + } + else + { + if(bt_rssi < rssi_thresh) + { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n")); + } + else + { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n")); + } + } + } + else if(level_num == 3) + { + if(rssi_thresh > rssi_thresh1) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n")); + return coex_sta->pre_bt_rssi_state; + } + + if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) + { + if(bt_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT)) + { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n")); + } + else + { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n")); + } + } + else if( (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) + { + if(bt_rssi >= (rssi_thresh1+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT)) + { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n")); + } + else if(bt_rssi < rssi_thresh) + { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n")); + } + else + { + bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n")); + } + } + else + { + if(bt_rssi < rssi_thresh1) + { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n")); + } + else + { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n")); + } + } + } + + coex_sta->pre_bt_rssi_state = bt_rssi_state; + + return bt_rssi_state; +} + +u1Byte +halbtc8812a1ant_WifiRssiState( + PBTC_COEXIST btcoexist, + u1Byte index, + u1Byte level_num, + u1Byte rssi_thresh, + u1Byte rssi_thresh1 + ) +{ + s4Byte wifi_rssi=0; + u1Byte wifi_rssi_state; + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + + if(level_num == 2) + { + if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) + { + if(wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT)) + { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n")); + } + else + { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n")); + } + } + else + { + if(wifi_rssi < rssi_thresh) + { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n")); + } + else + { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n")); + } + } + } + else if(level_num == 3) + { + if(rssi_thresh > rssi_thresh1) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n")); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_LOW)) + { + if(wifi_rssi >= (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT)) + { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n")); + } + else + { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n")); + } + } + else if( (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == BTC_RSSI_STATE_STAY_MEDIUM)) + { + if(wifi_rssi >= (rssi_thresh1+BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT)) + { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n")); + } + else if(wifi_rssi < rssi_thresh) + { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n")); + } + else + { + wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n")); + } + } + else + { + if(wifi_rssi < rssi_thresh1) + { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n")); + } + else + { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n")); + } + } + } + + coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; + + return wifi_rssi_state; +} + +void +halbtc8812a1ant_MonitorBtEnableDisable( + PBTC_COEXIST btcoexist + ) +{ + static BOOLEAN pre_bt_disabled=false; + static u4Byte bt_disable_cnt=0; + BOOLEAN bt_active=true, bt_disable_by68=false, bt_disabled=false; + u4Byte u4_tmp=0; + + // This function check if bt is disabled + + if( coex_sta->high_priority_tx == 0 && + coex_sta->high_priority_rx == 0 && + coex_sta->low_priority_tx == 0 && + coex_sta->low_priority_rx == 0) + { + bt_active = false; + } + if( coex_sta->high_priority_tx == 0xffff && + coex_sta->high_priority_rx == 0xffff && + coex_sta->low_priority_tx == 0xffff && + coex_sta->low_priority_rx == 0xffff) + { + bt_active = false; + } + if(bt_active) + { + bt_disable_cnt = 0; + bt_disabled = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n")); + } + else + { + bt_disable_cnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n", + bt_disable_cnt)); + if(bt_disable_cnt >= 2 ||bt_disable_by68) + { + bt_disabled = true; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n")); + } + } + if(pre_bt_disabled != bt_disabled) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled":"enabled"), + (bt_disabled ? "disabled":"enabled"))); + pre_bt_disabled = bt_disabled; + if(!bt_disabled) + { + } + else + { + } + } +} + +void +halbtc8812a1ant_MonitorBtCtr( + PBTC_COEXIST btcoexist + ) +{ + u4Byte reg_hp_tx_rx, reg_lp_tx_rx, u4_tmp; + u4Byte reg_hp_tx=0, reg_hp_rx=0, reg_lp_tx=0, reg_lp_rx=0; + u1Byte u1_tmp; + + reg_hp_tx_rx = 0x770; + reg_lp_tx_rx = 0x774; + + u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_tx_rx); + reg_hp_tx = u4_tmp & bMaskLWord; + reg_hp_rx = (u4_tmp & bMaskHWord)>>16; + + u4_tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_tx_rx); + reg_lp_tx = u4_tmp & bMaskLWord; + reg_lp_rx = (u4_tmp & bMaskHWord)>>16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", + reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", + reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx)); + + // reset counter + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +void +halbtc8812a1ant_QueryBtInfo( + PBTC_COEXIST btcoexist + ) +{ + u1Byte dataLen=3; + u1Byte buf[5] = {0}; + static u4Byte btInfoCnt=0; + + if(!btInfoCnt || + (coex_sta->bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_BT_RSP]-btInfoCnt)>2) + { + buf[0] = dataLen; + buf[1] = 0x1; // polling enable, 1=enable, 0=disable + buf[2] = 0x2; // polling time in seconds + buf[3] = 0x1; // auto report enable, 1=enable, 0=disable + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_INFO, (PVOID)&buf[0]); + } + btInfoCnt = coex_sta->bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_BT_RSP]; +} +u1Byte +halbtc8812a1ant_ActionAlgorithm( + PBTC_COEXIST btcoexist + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + BOOLEAN bt_hs_on=false; + u1Byte algorithm=BT_8812A_1ANT_COEX_ALGO_UNDEFINED; + u1Byte num_of_diff_profile=0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + if(!stack_info->bt_link_exist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No profile exists!!!\n")); + return algorithm; + } + + if(stack_info->sco_exist) + num_of_diff_profile++; + if(stack_info->hid_exist) + num_of_diff_profile++; + if(stack_info->pan_exist) + num_of_diff_profile++; + if(stack_info->a2dp_exist) + num_of_diff_profile++; + + if(num_of_diff_profile == 1) + { + if(stack_info->sco_exist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_SCO; + } + else + { + if(stack_info->hid_exist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_HID; + } + else if(stack_info->a2dp_exist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_A2DP; + } + else if(stack_info->pan_exist) + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_PANHS; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR; + } + } + } + } + else if(num_of_diff_profile == 2) + { + if(stack_info->sco_exist) + { + if(stack_info->hid_exist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_HID; + } + else if(stack_info->a2dp_exist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_SCO; + } + else if(stack_info->pan_exist) + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_SCO; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + else + { + if( stack_info->hid_exist && + stack_info->a2dp_exist ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP; + } + else if( stack_info->hid_exist && + stack_info->pan_exist ) + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID; + } + } + else if( stack_info->pan_exist && + stack_info->a2dp_exist ) + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } + else if(num_of_diff_profile == 3) + { + if(stack_info->sco_exist) + { + if( stack_info->hid_exist && + stack_info->a2dp_exist ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_HID; + } + else if( stack_info->hid_exist && + stack_info->pan_exist ) + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID; + } + } + else if( stack_info->pan_exist && + stack_info->a2dp_exist ) + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_SCO; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + else + { + if( stack_info->hid_exist && + stack_info->pan_exist && + stack_info->a2dp_exist ) + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } + else if(num_of_diff_profile >= 3) + { + if(stack_info->sco_exist) + { + if( stack_info->hid_exist && + stack_info->pan_exist && + stack_info->a2dp_exist ) + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n")); + + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n")); + algorithm = BT_8812A_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + + return algorithm; +} + +BOOLEAN +halbtc8812a1ant_NeedToDecBtPwr( + PBTC_COEXIST btcoexist + ) +{ + BOOLEAN ret=false; + BOOLEAN bt_hs_on=false, wifi_connected=false; + s4Byte bt_hs_rssi=0; + + if(!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on)) + return false; + if(!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected)) + return false; + if(!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) + return false; + + if(wifi_connected) + { + if(bt_hs_on) + { + if(bt_hs_rssi > 37) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for HS mode!!\n")); + ret = true; + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for Wifi is connected!!\n")); + ret = true; + } + } + + return ret; +} + +void +halbtc8812a1ant_SetFwDacSwingLevel( + PBTC_COEXIST btcoexist, + u1Byte dac_swing_lvl + ) +{ + u1Byte h2c_parameter[1] ={0}; + + // There are several type of dacswing + // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 + h2c_parameter[0] = dac_swing_lvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0])); + + btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); +} + +void +halbtc8812a1ant_SetFwDecBtPwr( + PBTC_COEXIST btcoexist, + BOOLEAN dec_bt_pwr + ) +{ + u1Byte dataLen=3; + u1Byte buf[5] = {0}; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power : %s\n", + (dec_bt_pwr? "Yes!!":"No!!"))); + + buf[0] = dataLen; + buf[1] = 0x3; // OP_Code + buf[2] = 0x1; // OP_Code_Length + if(dec_bt_pwr) + buf[3] = 0x1; // OP_Code_Content + else + buf[3] = 0x0; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]); +} + +void +halbtc8812a1ant_DecBtPwr( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN dec_bt_pwr + ) +{ + return; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power = %s\n", + (force_exec? "force to":""), ((dec_bt_pwr)? "ON":"OFF"))); + coex_dm->cur_dec_bt_pwr = dec_bt_pwr; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_dec_bt_pwr=%d, cur_dec_bt_pwr=%d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr)); + + if(coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) + return; + } + halbtc8812a1ant_SetFwDecBtPwr(btcoexist, coex_dm->cur_dec_bt_pwr); + + coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; +} + +void +halbtc8812a1ant_SetFwBtLnaConstrain( + PBTC_COEXIST btcoexist, + BOOLEAN bt_lna_cons_on + ) +{ + u1Byte dataLen=3; + u1Byte buf[5] = {0}; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT LNA Constrain: %s\n", + (bt_lna_cons_on? "ON!!":"OFF!!"))); + + buf[0] = dataLen; + buf[1] = 0x2; // OP_Code + buf[2] = 0x1; // OP_Code_Length + if(bt_lna_cons_on) + buf[3] = 0x1; // OP_Code_Content + else + buf[3] = 0x0; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]); +} + +void +halbtc8812a1ant_SetBtLnaConstrain( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN bt_lna_cons_on + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Constrain = %s\n", + (force_exec? "force":""), ((bt_lna_cons_on)? "ON":"OFF"))); + coex_dm->bCurBtLnaConstrain = bt_lna_cons_on; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtLnaConstrain=%d, bCurBtLnaConstrain=%d\n", + coex_dm->bPreBtLnaConstrain, coex_dm->bCurBtLnaConstrain)); + + if(coex_dm->bPreBtLnaConstrain == coex_dm->bCurBtLnaConstrain) + return; + } + halbtc8812a1ant_SetFwBtLnaConstrain(btcoexist, coex_dm->bCurBtLnaConstrain); + + coex_dm->bPreBtLnaConstrain = coex_dm->bCurBtLnaConstrain; +} + +void +halbtc8812a1ant_SetFwBtPsdMode( + PBTC_COEXIST btcoexist, + u1Byte bt_psd_mode + ) +{ + u1Byte dataLen=3; + u1Byte buf[5] = {0}; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT PSD mode=0x%x\n", + bt_psd_mode)); + + buf[0] = dataLen; + buf[1] = 0x4; // OP_Code + buf[2] = 0x1; // OP_Code_Length + buf[3] = bt_psd_mode; // OP_Code_Content + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]); +} + + +void +halbtc8812a1ant_SetBtPsdMode( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + u1Byte bt_psd_mode + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT PSD mode = 0x%x\n", + (force_exec? "force":""), bt_psd_mode)); + coex_dm->bCurBtPsdMode = bt_psd_mode; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtPsdMode=0x%x, bCurBtPsdMode=0x%x\n", + coex_dm->bPreBtPsdMode, coex_dm->bCurBtPsdMode)); + + if(coex_dm->bPreBtPsdMode == coex_dm->bCurBtPsdMode) + return; + } + halbtc8812a1ant_SetFwBtPsdMode(btcoexist, coex_dm->bCurBtPsdMode); + + coex_dm->bPreBtPsdMode = coex_dm->bCurBtPsdMode; +} + + +void +halbtc8812a1ant_SetBtAutoReport( + PBTC_COEXIST btcoexist, + BOOLEAN enable_auto_report + ) +{ +#if 0 + u1Byte h2c_parameter[1] ={0}; + + h2c_parameter[0] = 0; + + if(enable_auto_report) + { + h2c_parameter[0] |= BIT0; + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n", + (enable_auto_report? "Enabled!!":"Disabled!!"), h2c_parameter[0])); + + btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); +#else + +#endif +} + +void +halbtc8812a1ant_BtAutoReport( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN enable_auto_report + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Auto report = %s\n", + (force_exec? "force to":""), ((enable_auto_report)? "Enabled":"Disabled"))); + coex_dm->cur_bt_auto_report = enable_auto_report; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_bt_auto_report=%d, cur_bt_auto_report=%d\n", + coex_dm->pre_bt_auto_report, coex_dm->cur_bt_auto_report)); + + if(coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) + return; + } + halbtc8812a1ant_SetBtAutoReport(btcoexist, coex_dm->cur_bt_auto_report); + + coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; +} + +void +halbtc8812a1ant_FwDacSwingLvl( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + u1Byte fw_dac_swing_lvl + ) +{ + return; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec? "force to":""), fw_dac_swing_lvl)); + coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_fw_dac_swing_lvl=%d, cur_fw_dac_swing_lvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, coex_dm->cur_fw_dac_swing_lvl)); + + if(coex_dm->pre_fw_dac_swing_lvl == coex_dm->cur_fw_dac_swing_lvl) + return; + } + + halbtc8812a1ant_SetFwDacSwingLevel(btcoexist, coex_dm->cur_fw_dac_swing_lvl); + + coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; +} + +void +halbtc8812a1ant_SetSwRfRxLpfCorner( + PBTC_COEXIST btcoexist, + BOOLEAN rx_rf_shrink_on + ) +{ + if(rx_rf_shrink_on) + { + //Shrink RF Rx LPF corner + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n")); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7); + } + else + { + //Resume RF Rx LPF corner + // After initialized, we can use coex_dm->bt_rf0x1e_backup + if(btcoexist->bInitilized) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n")); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff, coex_dm->bt_rf0x1e_backup); + } + } +} + +void +halbtc8812a1ant_RfShrink( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN rx_rf_shrink_on + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec? "force to":""), ((rx_rf_shrink_on)? "ON":"OFF"))); + coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_rf_rx_lpf_shrink=%d, cur_rf_rx_lpf_shrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, coex_dm->cur_rf_rx_lpf_shrink)); + + if(coex_dm->pre_rf_rx_lpf_shrink == coex_dm->cur_rf_rx_lpf_shrink) + return; + } + halbtc8812a1ant_SetSwRfRxLpfCorner(btcoexist, coex_dm->cur_rf_rx_lpf_shrink); + + coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; +} + +void +halbtc8812a1ant_SetSwPenaltyTxRateAdaptive( + PBTC_COEXIST btcoexist, + BOOLEAN low_penalty_ra + ) +{ + u1Byte u1_tmp; + + u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x4fd); + u1_tmp |= BIT0; + if(low_penalty_ra) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n")); + u1_tmp &= ~BIT2; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n")); + u1_tmp |= BIT2; + } + + btcoexist->btc_write_1byte(btcoexist, 0x4fd, u1_tmp); +} + +void +halbtc8812a1ant_LowPenaltyRa( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN low_penalty_ra + ) +{ + return; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec? "force to":""), ((low_penalty_ra)? "ON":"OFF"))); + coex_dm->cur_low_penalty_ra = low_penalty_ra; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_low_penalty_ra=%d, cur_low_penalty_ra=%d\n", + coex_dm->pre_low_penalty_ra, coex_dm->cur_low_penalty_ra)); + + if(coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) + return; + } + halbtc8812a1ant_SetSwPenaltyTxRateAdaptive(btcoexist, coex_dm->cur_low_penalty_ra); + + coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; +} + +void +halbtc8812a1ant_SetDacSwingReg( + PBTC_COEXIST btcoexist, + u4Byte level + ) +{ + u1Byte val=(u1Byte)level; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Write SwDacSwing = 0x%x\n", level)); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val); +} + +void +halbtc8812a1ant_SetSwFullTimeDacSwing( + PBTC_COEXIST btcoexist, + BOOLEAN sw_dac_swing_on, + u4Byte sw_dac_swing_lvl + ) +{ + if(sw_dac_swing_on) + { + halbtc8812a1ant_SetDacSwingReg(btcoexist, sw_dac_swing_lvl); + } + else + { + halbtc8812a1ant_SetDacSwingReg(btcoexist, 0x18); + } +} + + +void +halbtc8812a1ant_DacSwing( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN dac_swing_on, + u4Byte dac_swing_lvl + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", + (force_exec? "force to":""), ((dac_swing_on)? "ON":"OFF"), dac_swing_lvl)); + coex_dm->cur_dac_swing_on = dac_swing_on; + coex_dm->cur_dac_swing_lvl = dac_swing_lvl; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_dac_swing_on=%d, pre_dac_swing_lvl=0x%x, cur_dac_swing_on=%d, cur_dac_swing_lvl=0x%x\n", + coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl)); + + if( (coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && + (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl) ) + return; + } + delay_ms(30); + halbtc8812a1ant_SetSwFullTimeDacSwing(btcoexist, dac_swing_on, dac_swing_lvl); + + coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on; + coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; +} + +void +halbtc8812a1ant_SetAdcBackOff( + PBTC_COEXIST btcoexist, + BOOLEAN adc_back_off + ) +{ + if(adc_back_off) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n")); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3); + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n")); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1); + } +} + +void +halbtc8812a1ant_AdcBackOff( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN adc_back_off + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n", + (force_exec? "force to":""), ((adc_back_off)? "ON":"OFF"))); + coex_dm->cur_adc_back_off = adc_back_off; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_adc_back_off=%d, cur_adc_back_off=%d\n", + coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off)); + + if(coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) + return; + } + halbtc8812a1ant_SetAdcBackOff(btcoexist, coex_dm->cur_adc_back_off); + + coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off; +} + +void +halbtc8812a1ant_SetAgcTable( + PBTC_COEXIST btcoexist, + BOOLEAN agc_table_en + ) +{ + u1Byte rssi_adjust_val=0; + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); + if(agc_table_en) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n")); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x3fa58); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x37a58); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x2fa58); + rssi_adjust_val = 8; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n")); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x39258); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x31258); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, 0xfffff, 0x29258); + } + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0); + + // set rssi_adjust_val for wifi module. + btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssi_adjust_val); +} + + +void +halbtc8812a1ant_AgcTable( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN agc_table_en + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n", + (force_exec? "force to":""), ((agc_table_en)? "Enable":"Disable"))); + coex_dm->cur_agc_table_en = agc_table_en; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_agc_table_en=%d, cur_agc_table_en=%d\n", + coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en)); + + if(coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) + return; + } + halbtc8812a1ant_SetAgcTable(btcoexist, agc_table_en); + + coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en; +} + +void +halbtc8812a1ant_SetCoexTable( + PBTC_COEXIST btcoexist, + u4Byte val0x6c0, + u4Byte val0x6c4, + u4Byte val0x6c8, + u1Byte val0x6cc + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0)); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4)); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8)); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc)); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +void +halbtc8812a1ant_CoexTable( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + u4Byte val0x6c0, + u4Byte val0x6c4, + u4Byte val0x6c8, + u1Byte val0x6cc + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + (force_exec? "force to":""), val0x6c0, val0x6c4, val0x6c8, val0x6cc)); + coex_dm->cur_val0x6c0 = val0x6c0; + coex_dm->cur_val0x6c4 = val0x6c4; + coex_dm->cur_val0x6c8 = val0x6c8; + coex_dm->cur_val0x6cc = val0x6cc; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], pre_val0x6c0=0x%x, pre_val0x6c4=0x%x, pre_val0x6c8=0x%x, pre_val0x6cc=0x%x !!\n", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], cur_val0x6c0=0x%x, cur_val0x6c4=0x%x, cur_val0x6c8=0x%x, cur_val0x6cc=0x%x !!\n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc)); + + if( (coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && + (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && + (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) && + (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc) ) + return; + } + halbtc8812a1ant_SetCoexTable(btcoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc); + + coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; + coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; + coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8; + coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; +} + +void +halbtc8812a1ant_SetFwIgnoreWlanAct( + PBTC_COEXIST btcoexist, + BOOLEAN enable + ) +{ + u1Byte dataLen=3; + u1Byte buf[5] = {0}; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], %s BT Ignore Wlan_Act\n", + (enable? "Enable":"Disable"))); + + buf[0] = dataLen; + buf[1] = 0x1; // OP_Code + buf[2] = 0x1; // OP_Code_Length + if(enable) + buf[3] = 0x1; // OP_Code_Content + else + buf[3] = 0x0; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]); +} + +void +halbtc8812a1ant_IgnoreWlanAct( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN enable + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec? "force to":""), (enable? "ON":"OFF"))); + coex_dm->cur_ignore_wlan_act = enable; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n", + coex_dm->pre_ignore_wlan_act, coex_dm->cur_ignore_wlan_act)); + + if(coex_dm->pre_ignore_wlan_act == coex_dm->cur_ignore_wlan_act) + return; + } + halbtc8812a1ant_SetFwIgnoreWlanAct(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +void +halbtc8812a1ant_SetFwPstdma( + PBTC_COEXIST btcoexist, + u1Byte byte1, + u1Byte byte2, + u1Byte byte3, + u1Byte byte4, + u1Byte byte5 + ) +{ + u1Byte h2c_parameter[5] ={0}; + + h2c_parameter[0] = byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = byte5; + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1]<<24|h2c_parameter[2]<<16|h2c_parameter[3]<<8|h2c_parameter[4])); + + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +void +halbtc8812a1ant_SetLpsRpwm( + PBTC_COEXIST btcoexist, + u1Byte lps_val, + u1Byte rpwm_val + ) +{ + u1Byte lps=lps_val; + u1Byte rpwm=rpwm_val; + + btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_LPS, &lps); + btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_RPWM, &rpwm); + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT, NULL); +} + +void +halbtc8812a1ant_LpsRpwm( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + u1Byte lps_val, + u1Byte rpwm_val + ) +{ + BOOLEAN bForceExecPwrCmd=false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set lps/rpwm=0x%x/0x%x \n", + (force_exec? "force to":""), lps_val, rpwm_val)); + coex_dm->cur_lps = lps_val; + coex_dm->cur_rpwm = rpwm_val; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_lps/cur_lps=0x%x/0x%x, pre_rpwm/cur_rpwm=0x%x/0x%x!!\n", + coex_dm->pre_lps, coex_dm->cur_lps, coex_dm->pre_rpwm, coex_dm->cur_rpwm)); + + if( (coex_dm->pre_lps == coex_dm->cur_lps) && + (coex_dm->pre_rpwm == coex_dm->cur_rpwm) ) + { + return; + } + } + halbtc8812a1ant_SetLpsRpwm(btcoexist, lps_val, rpwm_val); + + coex_dm->pre_lps = coex_dm->cur_lps; + coex_dm->pre_rpwm = coex_dm->cur_rpwm; +} + +void +halbtc8812a1ant_SwMechanism1( + PBTC_COEXIST btcoexist, + BOOLEAN shrink_rx_lpf, + BOOLEAN low_penalty_ra, + BOOLEAN limited_dig, + BOOLEAN bt_lna_constrain + ) +{ + //halbtc8812a1ant_RfShrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); + //halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, low_penalty_ra); + + //no limited DIG + //halbtc8812a1ant_SetBtLnaConstrain(btcoexist, NORMAL_EXEC, bt_lna_constrain); +} + +void +halbtc8812a1ant_SwMechanism2( + PBTC_COEXIST btcoexist, + BOOLEAN agc_table_shift, + BOOLEAN adc_back_off, + BOOLEAN sw_dac_swing, + u4Byte dac_swing_lvl + ) +{ + //halbtc8812a1ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift); + //halbtc8812a1ant_AdcBackOff(btcoexist, NORMAL_EXEC, adc_back_off); + //halbtc8812a1ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing, dac_swing_lvl); +} + +void +halbtc8812a1ant_PsTdma( + PBTC_COEXIST btcoexist, + BOOLEAN force_exec, + BOOLEAN turn_on, + u1Byte type + ) +{ + BOOLEAN bTurnOnByCnt=false; + u1Byte psTdmaTypeByCnt=0, rssi_adjust_val=0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec? "force to":""), (turn_on? "ON":"OFF"), type)); + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + if(!force_exec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma)); + + if( (coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma) ) + return; + } + if(turn_on) + { + switch(type) + { + default: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x58); + break; + case 1: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x48); + rssi_adjust_val = 11; + break; + case 2: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x12, 0x12, 0x0, 0x48); + rssi_adjust_val = 14; + break; + case 3: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x40); + break; + case 4: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x14, 0x0); + rssi_adjust_val = 17; + break; + case 5: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x61, 0x15, 0x3, 0x31, 0x0); + break; + case 6: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0x3, 0x0, 0x0); + break; + case 7: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xc, 0x5, 0x0, 0x0); + break; + case 8: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x0); + break; + case 9: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0xa, 0xa, 0x0, 0x48); + rssi_adjust_val = 18; + break; + case 10: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0xa, 0x0, 0x40); + break; + case 11: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x5, 0x5, 0x0, 0x48); + rssi_adjust_val = 20; + break; + case 12: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xeb, 0xa, 0x3, 0x31, 0x18); + break; + + case 15: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0xa, 0x3, 0x8, 0x0); + break; + case 16: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x10, 0x0); + rssi_adjust_val = 18; + break; + + case 18: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x25, 0x3, 0x10, 0x0); + rssi_adjust_val = 14; + break; + + case 20: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0x25, 0x25, 0x0, 0x0); + break; + case 21: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x20, 0x3, 0x10, 0x40); + break; + case 22: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x13, 0x8, 0x8, 0x0, 0x40); + break; + case 23: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x25, 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 24: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x15, 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 25: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0xa, 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 26: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0xa, 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 27: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x25, 0x3, 0x31, 0x98); + rssi_adjust_val = 22; + break; + case 28: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x69, 0x25, 0x3, 0x31, 0x0); + break; + case 29: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xab, 0x1a, 0x1a, 0x1, 0x8); + break; + case 30: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x93, 0x15, 0x3, 0x14, 0x0); + break; + case 31: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0, 0x58); + break; + case 32: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xab, 0xa, 0x3, 0x31, 0x88); + break; + case 33: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xa3, 0x25, 0x3, 0x30, 0x88); + break; + case 34: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x8); + break; + case 35: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, 0x1a, 0x0, 0x8); + break; + case 36: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0xd3, 0x12, 0x3, 0x14, 0x58); + break; + } + } + else + { + // disable PS tdma + switch(type) + { + case 8: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0, 0x0, 0x0); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4); + break; + case 0: + default: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0); + delay_ms(5); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20); + break; + case 9: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x0, 0x0); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4); + break; + case 10: + halbtc8812a1ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, 0x8, 0x0); + delay_ms(5); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20); + break; + } + } + rssi_adjust_val =0; + btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssi_adjust_val); + + // update pre state + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +void +halbtc8812a1ant_CoexAllOff( + PBTC_COEXIST btcoexist + ) +{ + // fw all off + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + // sw all off + halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false); + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + + + // hw all off + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); +} + +void +halbtc8812a1ant_WifiParaAdjust( + PBTC_COEXIST btcoexist, + BOOLEAN enable + ) +{ + if(enable) + { + halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, true); + } + else + { + halbtc8812a1ant_LowPenaltyRa(btcoexist, NORMAL_EXEC, false); + } +} + +BOOLEAN +halbtc8812a1ant_IsCommonAction( + PBTC_COEXIST btcoexist + ) +{ + BOOLEAN common=false, wifi_connected=false, wifi_busy=false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + //halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + + if(!wifi_connected && + BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n")); + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false); + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + + common = true; + } + else if(wifi_connected && + (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n")); + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + + halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false); + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + + common = true; + } + else if(!wifi_connected && + (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n")); + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false); + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + + common = true; + } + else if(wifi_connected && + (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n")); + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + halbtc8812a1ant_SwMechanism1(btcoexist,true,true,true,true); + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + + common = true; + } + else if(!wifi_connected && + (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE != coex_dm->bt_status) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT Busy!!\n")); + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false); + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + + common = true; + } + else + { + halbtc8812a1ant_SwMechanism1(btcoexist,true,true,true,true); + + common = false; + } + + return common; +} + + +void +halbtc8812a1ant_TdmaDurationAdjustForAcl( + PBTC_COEXIST btcoexist + ) +{ + static s4Byte up,dn,m,n,wait_count; + s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration + u1Byte retry_count=0, bt_info_ext; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], halbtc8812a1ant_TdmaDurationAdjustForAcl()\n")); + if(coex_dm->reset_tdma_adjust) + { + coex_dm->reset_tdma_adjust = false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n")); + + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + //============ + up = 0; + dn = 0; + m = 1; + n= 3; + result = 0; + wait_count = 0; + } + else + { + //accquire the BT TRx retry count from BT_Info byte2 + retry_count = coex_sta->bt_retry_cnt; + bt_info_ext = coex_sta->bt_info_ext; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retry_count = %d\n", retry_count)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", + up, dn, m, n, wait_count)); + result = 0; + wait_count++; + + if(retry_count == 0) // no retry in the last 2-second duration + { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if(up >= n) // if s n 2 retry count0, hռeWiFi duration + { + wait_count = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n")); + } + } + else if (retry_count <= 3) // <=3 retry in the last 2-second duration + { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) // if s 2 2 retry count< 3, hկWiFi duration + { + if (wait_count <= 2) + m++; // קK@blevelӦ^ + else + m = 1; + + if ( m >= 20) //m ̤j = 20 ' ̤j120 recheckO_վ WiFi duration. + m = 20; + + n = 3*m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n")); + } + } + else //retry count > 3, un1 retry count > 3, hկWiFi duration + { + if (wait_count == 1) + m++; // קK@blevelӦ^ + else + m = 1; + + if ( m >= 20) //m ̤j = 20 ' ̤j120 recheckO_վ WiFi duration. + m = 20; + + n = 3*m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n")); + } + + if(result == -1) + { + if( (BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && + ((coex_dm->cur_ps_tdma == 1) ||(coex_dm->cur_ps_tdma == 2)) ) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } + else if(coex_dm->cur_ps_tdma == 1) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } + else if(coex_dm->cur_ps_tdma == 2) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } + else if(coex_dm->cur_ps_tdma == 9) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } + } + else if(result == 1) + { + if( (BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && + ((coex_dm->cur_ps_tdma == 1) ||(coex_dm->cur_ps_tdma == 2)) ) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } + else if(coex_dm->cur_ps_tdma == 11) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } + else if(coex_dm->cur_ps_tdma == 9) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } + else if(coex_dm->cur_ps_tdma == 2) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 1); + coex_dm->ps_tdma_du_adj_type = 1; + } + } + + if( coex_dm->cur_ps_tdma != 1 && + coex_dm->cur_ps_tdma != 2 && + coex_dm->cur_ps_tdma != 9 && + coex_dm->cur_ps_tdma != 11 ) + { + // recover to previous adjust type + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, coex_dm->ps_tdma_du_adj_type); + } + } +} + +u1Byte +halbtc8812a1ant_PsTdmaTypeByWifiRssi( + s4Byte wifi_rssi, + s4Byte pre_wifi_rssi, + u1Byte wifi_rssi_thresh + ) +{ + u1Byte ps_tdma_type=0; + + if(wifi_rssi > pre_wifi_rssi) + { + if(wifi_rssi > (wifi_rssi_thresh+5)) + { + ps_tdma_type = 26; + } + else + { + ps_tdma_type = 25; + } + } + else + { + if(wifi_rssi > wifi_rssi_thresh) + { + ps_tdma_type = 26; + } + else + { + ps_tdma_type = 25; + } + } + + return ps_tdma_type; +} + +void +halbtc8812a1ant_PsTdmaCheckForPowerSaveState( + PBTC_COEXIST btcoexist, + BOOLEAN new_ps_state + ) +{ + u1Byte lps_mode=0x0; + + btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode); + + if(lps_mode) // already under LPS state + { + if(new_ps_state) + { + // keep state under LPS, do nothing. + } + else + { + // will leave LPS state, turn off psTdma first + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0); + } + } + else // NO PS state + { + if(new_ps_state) + { + // will enter LPS state, turn off psTdma first + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0); + } + else + { + // keep state under NO PS state, do nothing. + } + } +} + +// SCO only or SCO+PAN(HS) +void +halbtc8812a1ant_ActionSco( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state; + u4Byte wifi_bw; + + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 4); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + + +void +halbtc8812a1ant_ActionHid( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state; + u4Byte wifi_bw; + + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,false,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + +//A2DP only / PAN(EDR) only/ A2DP+PAN(HS) +void +halbtc8812a1ant_ActionA2dp( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state; + u4Byte wifi_bw; + + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + +void +halbtc8812a1ant_ActionA2dpPanHs( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext; + u4Byte wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + +void +halbtc8812a1ant_ActionPanEdr( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state; + u4Byte wifi_bw; + + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + + +//PAN(HS) only +void +halbtc8812a1ant_ActionPanHs( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state; + u4Byte wifi_bw; + + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // fw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + } + else + { + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + } + + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // fw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + } + else + { + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + } + + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + +//PAN(EDR)+A2DP +void +halbtc8812a1ant_ActionPanEdrA2dp( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext; + u4Byte wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + +void +halbtc8812a1ant_ActionPanEdrHid( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state; + u4Byte wifi_bw; + + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + +// HID+A2DP+PAN(EDR) +void +halbtc8812a1ant_ActionHidA2dpPanEdr( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext; + u4Byte wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, NORMAL_EXEC, 6); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + +void +halbtc8812a1ant_ActionHidA2dp( + PBTC_COEXIST btcoexist + ) +{ + u1Byte wifi_rssi_state, bt_rssi_state, bt_info_ext; + u4Byte wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8812a1ant_WifiRssiState(btcoexist, 0, 2, 25, 0); + bt_rssi_state = halbtc8812a1ant_BtRssiState(2, 50, 0); + + if(halbtc8812a1ant_NeedToDecBtPwr(btcoexist)) + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8812a1ant_DecBtPwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 == wifi_bw) + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,true,false,0x18); + } + } + else + { + // sw mechanism + if( (wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8812a1ant_SwMechanism2(btcoexist,true,true,false,0x18); + } + else + { + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + } + } +} + +void +halbtc8812a1ant_ActionHs( + PBTC_COEXIST btcoexist, + BOOLEAN hs_connecting + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action for HS, hs_connecting=%d!!!\n", hs_connecting)); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8); + + if(hs_connecting) + { + halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3); + } + else + { + if((coex_sta->high_priority_tx+coex_sta->high_priority_rx+ + coex_sta->low_priority_tx+coex_sta->low_priority_rx)<=1200) + halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xaaaaaaaa, 0xaaaaaaaa, 0xffff, 0x3); + else + halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0xffffffff, 0xffffffff, 0xffff, 0x3); + } +} + + +void +halbtc8812a1ant_ActionWifiNotConnected( + PBTC_COEXIST btcoexist + ) +{ + BOOLEAN hs_connecting=false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting); + + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + + if(hs_connecting) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is connecting!!!\n")); + halbtc8812a1ant_ActionHs(btcoexist, hs_connecting); + } + else + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } +} + +void +halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan( + PBTC_COEXIST btcoexist + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + BOOLEAN hs_connecting=false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting); + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + + if(hs_connecting) + { + halbtc8812a1ant_ActionHs(btcoexist, hs_connecting); + } + else if(btcoexist->bt_info.bt_disabled) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status) +{ + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 28); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) + { + if(stack_info->hid_exist) + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 35); + else + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 29); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) ) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3); + } + else + { + //error condition, should not reach here, record error number for debugging. + coex_dm->error_condition = 1; + } +} + +void +halbtc8812a1ant_ActionWifiConnectedScan( + PBTC_COEXIST btcoexist + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan()===>\n")); + + if(btcoexist->bt_info.bt_disabled) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + + // psTdma + if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan(), bt is under inquiry/page scan\n")); + if(stack_info->sco_exist) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + } + else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) + { + if(stack_info->hid_exist) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 34); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 4); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + } + else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) ) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 33); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else + { + //error condition, should not reach here + coex_dm->error_condition = 2; + } + } + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ActionConnectedScan()<===\n")); +} + +void +halbtc8812a1ant_ActionWifiConnectedSpecialPacket( + PBTC_COEXIST btcoexist + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + + if(btcoexist->bt_info.bt_disabled) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else + { + if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status) + { + if(stack_info->sco_exist) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + } + else if( (BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) ) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 28); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) + { + if(stack_info->hid_exist) + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 35); + else + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 29); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) ) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3); + } + else + { + //error condition, should not reach here + coex_dm->error_condition = 3; + } + } +} + +void +halbtc8812a1ant_ActionWifiConnected( + PBTC_COEXIST btcoexist + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + BOOLEAN wifi_connected=false, wifi_busy=false, bt_hs_on=false; + BOOLEAN scan=false, link=false, roam=false; + BOOLEAN hs_connecting=false, under4way=false; + u4Byte wifi_bw; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()===>\n")); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + if(!wifi_connected) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi not connected<===\n")); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under4way); + if(under4way) + { + halbtc8812a1ant_ActionWifiConnectedSpecialPacket(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n")); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + if(scan || link || roam) + { + halbtc8812a1ant_ActionWifiConnectedScan(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n")); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + if(!wifi_busy) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi associated-idle!!!\n")); + if(btcoexist->bt_info.bt_disabled) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else + { + if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], bt is under inquiry/page scan!!!\n")); + if(stack_info->sco_exist) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + } + else if(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x26, 0x0); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else if(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x26, 0x0); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) + { + if(stack_info->hid_exist && stack_info->numOfLink==1) + { + // hid only + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3); + coex_dm->reset_tdma_adjust = true; + } + else + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + + if(stack_info->hid_exist) + { + if(stack_info->a2dp_exist) + { + // hid+a2dp + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else if(stack_info->pan_exist) + { + if(bt_hs_on) + { + // hid+hs + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + } + else + { + // hid+pan + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + } + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else + { + coex_dm->error_condition = 4; + } + coex_dm->reset_tdma_adjust = true; + } + else if(stack_info->a2dp_exist) + { + if(stack_info->pan_exist) + { + if(bt_hs_on) + { + // a2dp+hs + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + } + else + { + // a2dp+pan + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 36); + } + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + coex_dm->reset_tdma_adjust = true; + } + else + { + // a2dp only + halbtc8812a1ant_TdmaDurationAdjustForAcl(btcoexist); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + } + else if(stack_info->pan_exist) + { + // pan only + if(bt_hs_on) + { + coex_dm->error_condition = 5; + } + else + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + coex_dm->reset_tdma_adjust = true; + } + else + { + // temp state, do nothing!!! + //DbgPrint("error 6, coex_dm->bt_status=%d\n", coex_dm->bt_status); + //DbgPrint("error 6, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n", + //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist); + //coex_dm->error_condition = 6; + } + } + } + else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) ) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3); + } + else + { + //error condition, should not reach here + coex_dm->error_condition = 7; + } + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi busy!!!\n")); + if(btcoexist->bt_info.bt_disabled) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else + { + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is under progress!!!\n")); + //DbgPrint("coex_dm->bt_status = 0x%x\n", coex_dm->bt_status); + halbtc8812a1ant_ActionHs(btcoexist, hs_connecting); + } + else if(BT_8812A_1ANT_BT_STATUS_INQ_PAGE == coex_dm->bt_status) + { + if(stack_info->sco_exist) + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 32); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 30); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + } + else if(BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3); + } + else if(BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + if(bt_hs_on) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HS is under progress!!!\n")); + halbtc8812a1ant_ActionHs(btcoexist, hs_connecting); + } + else + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5a5a5a5a, 0x5a5a5a5a, 0xffff, 0x3); + } + } + else if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) + { + if(stack_info->hid_exist && stack_info->numOfLink==1) + { + // hid only + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5fff5fff, 0x5fff5fff, 0xffff, 0x3); + coex_dm->reset_tdma_adjust = true; + } + else + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + + if(stack_info->hid_exist) + { + if(stack_info->a2dp_exist) + { + // hid+a2dp + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else if(stack_info->pan_exist) + { + if(bt_hs_on) + { + // hid+hs + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + } + else + { + // hid+pan + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + } + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + else + { + coex_dm->error_condition = 8; + } + coex_dm->reset_tdma_adjust = true; + } + else if(stack_info->a2dp_exist) + { + if(stack_info->pan_exist) + { + if(bt_hs_on) + { + // a2dp+hs + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + } + else + { + // a2dp+pan + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 36); + } + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + coex_dm->reset_tdma_adjust = true; + } + else + { + // a2dp only + halbtc8812a1ant_TdmaDurationAdjustForAcl(btcoexist); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + } + else if(stack_info->pan_exist) + { + // pan only + if(bt_hs_on) + { + coex_dm->error_condition = 9; + } + else + { + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, true, 2); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x5afa5afa, 0xffff, 0x3); + } + coex_dm->reset_tdma_adjust = true; + } + else + { + //DbgPrint("error 10, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n", + //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist); + coex_dm->error_condition = 10; + } + } + } + else if( (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) ) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x5aea5aea, 0x5aea5aea, 0xffff, 0x3); + } + else + { + //DbgPrint("error 11, coex_dm->bt_status=%d\n", coex_dm->bt_status); + //DbgPrint("error 11, stack_info->numOfLink=%d, stack_info->hid_exist=%d, stack_info->a2dp_exist=%d, stack_info->pan_exist=%d, stack_info->sco_exist=%d\n", + //stack_info->numOfLink, stack_info->hid_exist, stack_info->a2dp_exist, stack_info->pan_exist, stack_info->sco_exist); + //error condition, should not reach here + coex_dm->error_condition = 11; + } + } + } + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()<===\n")); +} + +void +halbtc8812a1ant_RunSwCoexistMechanism( + PBTC_COEXIST btcoexist + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + BOOLEAN wifi_under5g=false, wifi_busy=false, wifi_connected=false; + u1Byte bt_info_original=0, bt_retry_cnt=0; + u1Byte algorithm=0; + + return; + if(stack_info->bProfileNotified) + { + algorithm = halbtc8812a1ant_ActionAlgorithm(btcoexist); + coex_dm->cur_algorithm = algorithm; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d \n", coex_dm->cur_algorithm)); + + if(halbtc8812a1ant_IsCommonAction(btcoexist)) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action common.\n")); + } + else + { + switch(coex_dm->cur_algorithm) + { + case BT_8812A_1ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n")); + halbtc8812a1ant_ActionSco(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n")); + halbtc8812a1ant_ActionHid(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n")); + halbtc8812a1ant_ActionA2dp(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n")); + halbtc8812a1ant_ActionA2dpPanHs(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n")); + halbtc8812a1ant_ActionPanEdr(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n")); + halbtc8812a1ant_ActionPanHs(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n")); + halbtc8812a1ant_ActionPanEdrA2dp(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n")); + halbtc8812a1ant_ActionPanEdrHid(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n")); + halbtc8812a1ant_ActionHidA2dpPanEdr(btcoexist); + break; + case BT_8812A_1ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n")); + halbtc8812a1ant_ActionHidA2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n")); + halbtc8812a1ant_CoexAllOff(btcoexist); + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } + } +} + +void +halbtc8812a1ant_RunCoexistMechanism( + PBTC_COEXIST btcoexist + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + BOOLEAN wifi_under5g=false, wifi_busy=false, wifi_connected=false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()===>\n")); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g); + + if(wifi_under5g) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for 5G <===\n")); + return; + } + + if(btcoexist->manual_control) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n")); + return; + } + + if(btcoexist->stop_coex_dm) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n")); + return; + } + + halbtc8812a1ant_RunSwCoexistMechanism(btcoexist); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + if(btcoexist->bt_info.bt_disabled) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], bt is disabled!!!\n")); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + if(wifi_busy) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + } + else + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8812a1ant_LpsRpwm(btcoexist, NORMAL_EXEC, 0x0, 0x4); + // power save must executed before psTdma. + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + } + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + } + else if(coex_sta->under_ips) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n")); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 0); + halbtc8812a1ant_CoexTable(btcoexist, NORMAL_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); + halbtc8812a1ant_WifiParaAdjust(btcoexist, false); + } + else if(!wifi_connected) + { + BOOLEAN scan=false, link=false, roam=false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n")); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if(scan || link || roam) + halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist); + else + halbtc8812a1ant_ActionWifiNotConnected(btcoexist); + } + else // wifi LPS/Busy + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is NOT under IPS!!!\n")); + halbtc8812a1ant_WifiParaAdjust(btcoexist, true); + halbtc8812a1ant_ActionWifiConnected(btcoexist); + } + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()<===\n")); +} + +void +halbtc8812a1ant_InitCoexDm( + PBTC_COEXIST btcoexist + ) +{ + BOOLEAN wifi_connected=false; + // force to reset coex mechanism + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + if(!wifi_connected) // non-connected scan + { + halbtc8812a1ant_ActionWifiNotConnected(btcoexist); + } + else // wifi is connected + { + halbtc8812a1ant_ActionWifiConnected(btcoexist); + } + + halbtc8812a1ant_FwDacSwingLvl(btcoexist, FORCE_EXEC, 6); + halbtc8812a1ant_DecBtPwr(btcoexist, FORCE_EXEC, false); + + // sw all off + halbtc8812a1ant_SwMechanism1(btcoexist,false,false,false,false); + halbtc8812a1ant_SwMechanism2(btcoexist,false,false,false,0x18); + + halbtc8812a1ant_CoexTable(btcoexist, FORCE_EXEC, 0x55555555, 0x55555555, 0xffff, 0x3); +} + +//============================================================ +// work around function start with wa_halbtc8812a1ant_ +//============================================================ +//============================================================ +// extern function start with EXhalbtc8812a1ant_ +//============================================================ +void +EXhalbtc8812a1ant_InitHwConfig( + PBTC_COEXIST btcoexist + ) +{ + u4Byte u4_tmp=0; + u2Byte u2Tmp=0; + u1Byte u1_tmp=0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n")); + + // backup rf 0x1e value + coex_dm->bt_rf0x1e_backup = + btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff); + + //ant sw control to BT + btcoexist->btc_write_4byte(btcoexist, 0x900, 0x00000400); + btcoexist->btc_write_1byte(btcoexist, 0x76d, 0x1); + btcoexist->btc_write_1byte(btcoexist, 0xcb3, 0x77); + btcoexist->btc_write_1byte(btcoexist, 0xcb7, 0x40); + + // 0x790[5:0]=0x5 + u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u1_tmp &= 0xc0; + u1_tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u1_tmp); + + // PTA parameter + btcoexist->btc_write_1byte(btcoexist, 0x6cc, 0x0); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, 0xffff); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, 0x55555555); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, 0x55555555); + + // coex parameters + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1); + + // enable counter statistics + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); + + // enable PTA + btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20); + + // bt clock related + u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x4); + u1_tmp |= BIT7; + btcoexist->btc_write_1byte(btcoexist, 0x4, u1_tmp); + + // bt clock related + u1_tmp = btcoexist->btc_read_1byte(btcoexist, 0x7); + u1_tmp |= BIT1; + btcoexist->btc_write_1byte(btcoexist, 0x7, u1_tmp); +} + +void +EXhalbtc8812a1ant_InitCoexDm( + PBTC_COEXIST btcoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n")); + + btcoexist->stop_coex_dm = false; + + halbtc8812a1ant_InitCoexDm(btcoexist); +} + +void +EXhalbtc8812a1ant_DisplayCoexInfo( + PBTC_COEXIST btcoexist + ) +{ + PBTC_BOARD_INFO board_info=&btcoexist->boardInfo; + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + pu1Byte cli_buf=btcoexist->cli_buf; + u1Byte u1_tmp[4], i, bt_info_ext, psTdmaCase=0; + u4Byte u4_tmp[4]; + BOOLEAN roam=false, scan=false, link=false, wifi_under5g=false; + BOOLEAN bt_hs_on=false, wifi_busy=false; + s4Byte wifi_rssi=0, bt_hs_rssi=0; + u4Byte wifi_bw, wifiTrafficDir; + u1Byte wifiDot11Chnl, wifiHsChnl; + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============"); + CL_PRINTF(cli_buf); + + if(btcoexist->manual_control) + { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============"); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n =========================================="); + CL_PRINTF(cli_buf); + } + if(btcoexist->stop_coex_dm) + { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n ============[Coex is STOPPED]============"); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n =========================================="); + CL_PRINTF(cli_buf); + } + + if(!board_info->bBtExist) + { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); + CL_PRINTF(cli_buf); + return; + } + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \ + board_info->pgAntNum, board_info->btdmAntNum); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \ + ((stack_info->bProfileNotified)? "Yes":"No"), stack_info->hciVersion); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_FW_VER); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \ + wifiDot11Chnl, wifiHsChnl, bt_hs_on); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \ + coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], + coex_dm->wifi_chnl_info[2]); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \ + wifi_rssi, bt_hs_rssi); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan", \ + link, roam, scan); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \ + (wifi_under5g? "5G":"2.4G"), + ((BTC_WIFI_BW_LEGACY==wifi_bw)? "Legacy": (((BTC_WIFI_BW_HT40==wifi_bw)? "HT40":"HT20"))), + ((!wifi_busy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink"))); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \ + ((coex_sta->c2h_bt_inquiry_page)?("inquiry/page scan"):((BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)? "non-connected idle": + ( (BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)? "connected-idle":"busy"))), + coex_sta->bt_rssi, coex_sta->bt_retry_cnt); + CL_PRINTF(cli_buf); + + if(stack_info->bProfileNotified) + { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \ + stack_info->sco_exist, stack_info->hid_exist, stack_info->pan_exist, stack_info->a2dp_exist); + CL_PRINTF(cli_buf); + + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + } + + bt_info_ext = coex_sta->bt_info_ext; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \ + (bt_info_ext&BIT0)? "Basic rate":"EDR rate"); + CL_PRINTF(cli_buf); + + for(i=0; ibt_info_c2h_cnt[i]) + { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8812a1Ant[i], \ + coex_sta->bt_info_c2h[i][0], coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], coex_sta->bt_info_c2h[i][3], + coex_sta->bt_info_c2h[i][4], coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], coex_sta->bt_info_c2h_cnt[i]); + CL_PRINTF(cli_buf); + } + } + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \ + ((coex_sta->under_ips? "IPS ON":"IPS OFF")), + ((coex_sta->under_lps? "LPS ON":"LPS OFF")), + btcoexist->bt_info.lps1Ant, + btcoexist->bt_info.rpwm1Ant); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + if(!btcoexist->manual_control) + { + // Sw mechanism + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============"); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig/ btLna]", \ + coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra, coex_dm->limited_dig, coex_dm->bCurBtLnaConstrain); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \ + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + CL_PRINTF(cli_buf); + + // Fw mechanism + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============"); + CL_PRINTF(cli_buf); + + psTdmaCase = coex_dm->cur_ps_tdma; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \ + coex_dm->ps_tdma_para[0], coex_dm->ps_tdma_para[1], + coex_dm->ps_tdma_para[2], coex_dm->ps_tdma_para[3], + coex_dm->ps_tdma_para[4], psTdmaCase); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \ + coex_dm->error_condition); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \ + coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act); + CL_PRINTF(cli_buf); + } + + // Hw setting + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============"); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \ + coex_dm->bt_rf0x1e_backup); + CL_PRINTF(cli_buf); + + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", \ + u1_tmp[0]); + CL_PRINTF(cli_buf); + + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c); + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x92c/ 0x930", \ + (u1_tmp[0]), u4_tmp[0]); + CL_PRINTF(cli_buf); + + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x40/ 0x4f", \ + u1_tmp[0], u1_tmp[1]); + CL_PRINTF(cli_buf); + + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \ + u4_tmp[0], u1_tmp[0]); + CL_PRINTF(cli_buf); + + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \ + u4_tmp[0]); + CL_PRINTF(cli_buf); + +#if 0 + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48); + u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xf4c); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0xf48/ 0xf4c (FA cnt)", \ + u4_tmp[0], u4_tmp[1]); + CL_PRINTF(cli_buf); +#endif + + u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \ + u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770(hp rx[31:16]/tx[15:0])", \ + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \ + coex_sta->low_priority_rx, coex_sta->low_priority_tx); + CL_PRINTF(cli_buf); + + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + + +void +EXhalbtc8812a1ant_IpsNotify( + PBTC_COEXIST btcoexist, + u1Byte type + ) +{ + u4Byte u4_tmp=0; + + if(btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + + if(BTC_IPS_ENTER == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n")); + coex_sta->under_ips = true; + + // 0x4c[23]=1 + u4_tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u4_tmp |= BIT23; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u4_tmp); + + halbtc8812a1ant_CoexAllOff(btcoexist); + } + else if(BTC_IPS_LEAVE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n")); + coex_sta->under_ips = false; + //halbtc8812a1ant_InitCoexDm(btcoexist); + } +} + +void +EXhalbtc8812a1ant_LpsNotify( + PBTC_COEXIST btcoexist, + u1Byte type + ) +{ + if(btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + + if(BTC_LPS_ENABLE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n")); + coex_sta->under_lps = true; + } + else if(BTC_IPS_LEAVE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n")); + coex_sta->under_lps = false; + } +} + +void +EXhalbtc8812a1ant_ScanNotify( + PBTC_COEXIST btcoexist, + u1Byte type + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + BOOLEAN wifi_connected=false; + + if(btcoexist->manual_control ||btcoexist->stop_coex_dm) + return; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + if(BTC_SCAN_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n")); + if(!wifi_connected) // non-connected scan + { + //set 0x550[3]=1 before PsTdma + //halbtc8812a1ant_Reg0x550Bit3(btcoexist, true); + halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist); + } + else // wifi is connected + { + halbtc8812a1ant_ActionWifiConnectedScan(btcoexist); + } + } + else if(BTC_SCAN_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n")); + if(!wifi_connected) // non-connected scan + { + //halbtc8812a1ant_Reg0x550Bit3(btcoexist, false); + halbtc8812a1ant_ActionWifiNotConnected(btcoexist); + } + else + { + halbtc8812a1ant_ActionWifiConnected(btcoexist); + } + } +} + +void +EXhalbtc8812a1ant_ConnectNotify( + PBTC_COEXIST btcoexist, + u1Byte type + ) +{ + PBTC_STACK_INFO stack_info=&btcoexist->stack_info; + BOOLEAN wifi_connected=false; + + if(btcoexist->manual_control ||btcoexist->stop_coex_dm) + return; + + if(BTC_ASSOCIATE_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n")); + if(btcoexist->bt_info.bt_disabled) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + } + else + { + halbtc8812a1ant_ActionWifiNotConnectedAssoAuthScan(btcoexist); + } + } + else if(BTC_ASSOCIATE_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n")); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + if(!wifi_connected) // non-connected scan + { + //halbtc8812a1ant_Reg0x550Bit3(btcoexist, false); + halbtc8812a1ant_ActionWifiNotConnected(btcoexist); + } + else + { + halbtc8812a1ant_ActionWifiConnected(btcoexist); + } + } +} + +void +EXhalbtc8812a1ant_MediaStatusNotify( + PBTC_COEXIST btcoexist, + u1Byte type + ) +{ + u1Byte dataLen=5; + u1Byte buf[6] = {0}; + u1Byte h2c_parameter[3] ={0}; + BOOLEAN wifi_under5g=false; + u4Byte wifi_bw; + u1Byte wifi_central_chnl; + + if(btcoexist->manual_control ||btcoexist->stop_coex_dm) + return; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g); + + // only 2.4G we need to inform bt the chnl mask + if(!wifi_under5g) + { + if(BTC_MEDIA_CONNECT == type) + { + h2c_parameter[0] = 0x1; + } + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl); + h2c_parameter[1] = wifi_central_chnl; + if(BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + buf[0] = dataLen; + buf[1] = 0x5; // OP_Code + buf[2] = 0x3; // OP_Code_Length + buf[3] = h2c_parameter[0]; // OP_Code_Content + buf[4] = h2c_parameter[1]; + buf[5] = h2c_parameter[2]; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_CTRL_BT_COEX, (PVOID)&buf[0]); +} + +void +EXhalbtc8812a1ant_SpecialPacketNotify( + PBTC_COEXIST btcoexist, + u1Byte type + ) +{ + BOOLEAN bSecurityLink=false; + + if(btcoexist->manual_control ||btcoexist->stop_coex_dm) + return; + + //if(type == BTC_PACKET_DHCP) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], special Packet(%d) notify\n", type)); + if(btcoexist->bt_info.bt_disabled) + { + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + } + else + { + halbtc8812a1ant_ActionWifiConnectedSpecialPacket(btcoexist); + } + } +} + +void +EXhalbtc8812a1ant_BtInfoNotify( + PBTC_COEXIST btcoexist, + pu1Byte tmp_buf, + u1Byte length + ) +{ + u1Byte bt_info=0; + u1Byte i, rsp_source=0; + static u4Byte set_bt_lna_cnt=0, set_bt_psd_mode=0; + BOOLEAN bt_busy=false, limited_dig=false; + BOOLEAN wifi_connected=false; + BOOLEAN bRejApAggPkt=false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify()===>\n")); + + + rsp_source = tmp_buf[0]&0xf; + if(rsp_source >= BT_INFO_SRC_8812A_1ANT_MAX) + rsp_source = BT_INFO_SRC_8812A_1ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rsp_source]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rsp_source, length)); + for(i=0; ibt_info_c2h[rsp_source][i] = tmp_buf[i]; + if(i == 1) + bt_info = tmp_buf[i]; + if(i == length-1) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%2x]\n", tmp_buf[i])); + } + else + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%2x, ", tmp_buf[i])); + } + } + + if(btcoexist->manual_control) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n")); + return; + } + if(btcoexist->stop_coex_dm) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Coex STOPPED!!<===\n")); + return; + } + + if(BT_INFO_SRC_8812A_1ANT_WIFI_FW != rsp_source) + { + coex_sta->bt_retry_cnt = + coex_sta->bt_info_c2h[rsp_source][2]; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rsp_source][3]*2+10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rsp_source][4]; + + // Here we need to resend some wifi info to BT + // because bt is reset and loss of the info. + if( (coex_sta->bt_info_ext & BIT1) ) + { + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, &wifi_connected); + if(wifi_connected) + { + EXhalbtc8812a1ant_MediaStatusNotify(btcoexist, BTC_MEDIA_CONNECT); + } + else + { + EXhalbtc8812a1ant_MediaStatusNotify(btcoexist, BTC_MEDIA_DISCONNECT); + } + + set_bt_psd_mode = 0; + } + + // test-chip bt patch doesn't support, temporary remove. + // need to add back when mp-chip. 12/20/2012 +#if 0 + if(set_bt_psd_mode <= 3) + { + halbtc8812a1ant_SetBtPsdMode(btcoexist, FORCE_EXEC, 0xd); + set_bt_psd_mode++; + } + + if(coex_dm->bCurBtLnaConstrain) + { + if( (coex_sta->bt_info_ext & BIT2) ) + { + } + else + { + if(set_bt_lna_cnt <= 3) + { + halbtc8812a1ant_SetBtLnaConstrain(btcoexist, FORCE_EXEC, true); + set_bt_lna_cnt++; + } + } + } + else + { + set_bt_lna_cnt = 0; + } +#endif + // test-chip bt patch only rsp the status for BT_RSP, + // so temporary we consider the following only under BT_RSP + if(BT_INFO_SRC_8812A_1ANT_BT_RSP == rsp_source) + { + if( (coex_sta->bt_info_ext & BIT3) ) + { + #if 0// temp disable because bt patch report the wrong value. + halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, false); + #endif + } + else + { + // BT already NOT ignore Wlan active, do nothing here. + } + + if( (coex_sta->bt_info_ext & BIT4) ) + { + // BT auto report already enabled, do nothing + } + else + { + halbtc8812a1ant_BtAutoReport(btcoexist, FORCE_EXEC, true); + } + } + } + + // check BIT2 first ==> check if bt is under inquiry or page scan + if(bt_info & BT_INFO_8812A_1ANT_B_INQ_PAGE) + { + coex_sta->c2h_bt_inquiry_page = true; + coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_INQ_PAGE; + } + else + { + coex_sta->c2h_bt_inquiry_page = false; + if(!(bt_info&BT_INFO_8812A_1ANT_B_CONNECTION)) + { + coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-connected idle!!!\n")); + } + else if(bt_info == BT_INFO_8812A_1ANT_B_CONNECTION) // connection exists but no busy + { + coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt connected-idle!!!\n")); + } + else if((bt_info&BT_INFO_8812A_1ANT_B_SCO_ESCO) || + (bt_info&BT_INFO_8812A_1ANT_B_SCO_BUSY)) + { + coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_SCO_BUSY; + bRejApAggPkt = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt sco busy!!!\n")); + } + else if(bt_info&BT_INFO_8812A_1ANT_B_ACL_BUSY) + { + if(BT_8812A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) + coex_dm->reset_tdma_adjust = true; + coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl busy!!!\n")); + } +#if 0 + else if(bt_info&BT_INFO_8812A_1ANT_B_SCO_ESCO) + { + coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl/sco busy!!!\n")); + } +#endif + else + { + //DbgPrint("error, undefined bt_info=0x%x\n", bt_info); + coex_dm->bt_status = BT_8812A_1ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-defined state!!!\n")); + } + + // send delete BA to disable aggregation + //btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejApAggPkt); + } + + if( (BT_8812A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status) ) + { + bt_busy = true; + } + else + { + bt_busy = false; + } + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + if(bt_busy) + { + limited_dig = true; + } + else + { + limited_dig = false; + } + coex_dm->limited_dig = limited_dig; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + + halbtc8812a1ant_RunCoexistMechanism(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify()<===\n")); +} + +void +EXhalbtc8812a1ant_StackOperationNotify( + PBTC_COEXIST btcoexist, + u1Byte type + ) +{ + if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n")); + } + else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n")); + } +} + +void +EXhalbtc8812a1ant_HaltNotify( + PBTC_COEXIST btcoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n")); + + halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true); + halbtc8812a1ant_PsTdma(btcoexist, FORCE_EXEC, false, 0); + btcoexist->btc_write_1byte(btcoexist, 0x4f, 0xf); + halbtc8812a1ant_WifiParaAdjust(btcoexist, false); +} + +void +EXhalbtc8812a1ant_PnpNotify( + PBTC_COEXIST btcoexist, + u1Byte pnpState + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n")); + + if(BTC_WIFI_PNP_SLEEP == pnpState) + { + btcoexist->stop_coex_dm = true; + halbtc8812a1ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true); + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + } + else if(BTC_WIFI_PNP_WAKE_UP == pnpState) + { + + } +} + +void +EXhalbtc8812a1ant_Periodical( + PBTC_COEXIST btcoexist + ) +{ + BOOLEAN wifi_under5g=false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical()===>\n")); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 1Ant Periodical!!\n")); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under5g); + + if(wifi_under5g) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical(), return for 5G<===\n")); + halbtc8812a1ant_CoexAllOff(btcoexist); + return; + } + + halbtc8812a1ant_QueryBtInfo(btcoexist); + halbtc8812a1ant_MonitorBtCtr(btcoexist); + halbtc8812a1ant_MonitorBtEnableDisable(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Periodical()<===\n")); +} + +void +EXhalbtc8812a1ant_DbgControl( + PBTC_COEXIST btcoexist, + u1Byte opCode, + u1Byte opLen, + pu1Byte pData + ) +{ + switch(opCode) + { + case BTC_DBG_SET_COEX_NORMAL: + btcoexist->manual_control = false; + halbtc8812a1ant_InitCoexDm(btcoexist); + break; + case BTC_DBG_SET_COEX_WIFI_ONLY: + btcoexist->manual_control = true; + halbtc8812a1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + halbtc8812a1ant_PsTdma(btcoexist, NORMAL_EXEC, false, 9); + break; + case BTC_DBG_SET_COEX_BT_ONLY: + // todo + break; + default: + break; + } +} +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/HalBtc8812a1Ant.h @@ -0,0 +1,205 @@ +//=========================================== +// The following is for 8812A_1ANT BT Co-exist definition +//=========================================== +#define BT_INFO_8812A_1ANT_B_FTP BIT7 +#define BT_INFO_8812A_1ANT_B_A2DP BIT6 +#define BT_INFO_8812A_1ANT_B_HID BIT5 +#define BT_INFO_8812A_1ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8812A_1ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8812A_1ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8812A_1ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8812A_1ANT_B_CONNECTION BIT0 + +#define BT_INFO_8812A_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \ + (((_BT_INFO_EXT_&BIT0))? true:false) + +#define BTC_RSSI_COEX_THRESH_TOL_8812A_1ANT 2 + +#define +#define OUT + +typedef enum _BT_INFO_SRC_8812A_1ANT{ + BT_INFO_SRC_8812A_1ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8812A_1ANT_BT_RSP = 0x1, + BT_INFO_SRC_8812A_1ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8812A_1ANT_MAX +}BT_INFO_SRC_8812A_1ANT,*PBT_INFO_SRC_8812A_1ANT; + +typedef enum _BT_8812A_1ANT_BT_STATUS{ + BT_8812A_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8812A_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8812A_1ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8812A_1ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8812A_1ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8812A_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8812A_1ANT_BT_STATUS_MAX +}BT_8812A_1ANT_BT_STATUS,*PBT_8812A_1ANT_BT_STATUS; + +typedef enum _BT_8812A_1ANT_COEX_ALGO{ + BT_8812A_1ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8812A_1ANT_COEX_ALGO_SCO = 0x1, + BT_8812A_1ANT_COEX_ALGO_HID = 0x2, + BT_8812A_1ANT_COEX_ALGO_A2DP = 0x3, + BT_8812A_1ANT_COEX_ALGO_A2DP_PANHS = 0x4, + BT_8812A_1ANT_COEX_ALGO_PANEDR = 0x5, + BT_8812A_1ANT_COEX_ALGO_PANHS = 0x6, + BT_8812A_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7, + BT_8812A_1ANT_COEX_ALGO_PANEDR_HID = 0x8, + BT_8812A_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9, + BT_8812A_1ANT_COEX_ALGO_HID_A2DP = 0xa, + BT_8812A_1ANT_COEX_ALGO_MAX = 0xb, +}BT_8812A_1ANT_COEX_ALGO,*PBT_8812A_1ANT_COEX_ALGO; + +typedef struct _COEX_DM_8812A_1ANT{ + // fw mechanism + bool pre_dec_bt_pwr; + bool cur_dec_bt_pwr; + bool bPreBtLnaConstrain; + bool bCurBtLnaConstrain; + u8 bPreBtPsdMode; + u8 bCurBtPsdMode; + u8 pre_fw_dac_swing_lvl; + u8 cur_fw_dac_swing_lvl; + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 ps_tdma_du_adj_type; + bool reset_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + u8 pre_lps; + u8 cur_lps; + u8 pre_rpwm; + u8 cur_rpwm; + + // sw mechanism + bool pre_rf_rx_lpf_shrink; + bool cur_rf_rx_lpf_shrink; + u32 bt_rf0x1e_backup; + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + bool pre_dac_swing_on; + u32 pre_dac_swing_lvl; + bool cur_dac_swing_on; + u32 cur_dac_swing_lvl; + bool pre_adc_back_off; + bool cur_adc_back_off; + bool pre_agc_table_en; + bool cur_agc_table_en; + u32 pre_val0x6c0; + u32 cur_val0x6c0; + u32 pre_val0x6c4; + u32 cur_val0x6c4; + u32 pre_val0x6c8; + u32 cur_val0x6c8; + u8 pre_val0x6cc; + u8 cur_val0x6cc; + bool limited_dig; + + // algorithm related + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; + + u8 error_condition; +} COEX_DM_8812A_1ANT, *PCOEX_DM_8812A_1ANT; + +typedef struct _COEX_STA_8812A_1ANT{ + bool under_lps; + bool under_ips; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8812A_1ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8812A_1ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}COEX_STA_8812A_1ANT, *PCOEX_STA_8812A_1ANT; + +//=========================================== +// The following is interface which will notify coex module. +//=========================================== +void +EXhalbtc8812a1ant_InitHwConfig( + PBTC_COEXIST btcoexist + ); +void +EXhalbtc8812a1ant_InitCoexDm( + PBTC_COEXIST btcoexist + ); +void +EXhalbtc8812a1ant_IpsNotify( + PBTC_COEXIST btcoexist, + u8 type + ); +void +EXhalbtc8812a1ant_LpsNotify( + PBTC_COEXIST btcoexist, + u8 type + ); +void +EXhalbtc8812a1ant_ScanNotify( + PBTC_COEXIST btcoexist, + u8 type + ); +void +EXhalbtc8812a1ant_ConnectNotify( + PBTC_COEXIST btcoexist, + u8 type + ); +void +EXhalbtc8812a1ant_MediaStatusNotify( + PBTC_COEXIST btcoexist, + u8 type + ); +void +EXhalbtc8812a1ant_SpecialPacketNotify( + PBTC_COEXIST btcoexist, + u8 type + ); +void +EXhalbtc8812a1ant_BtInfoNotify( + PBTC_COEXIST btcoexist, + u8 *tmp_buf, + u8 length + ); +void +EXhalbtc8812a1ant_StackOperationNotify( + PBTC_COEXIST btcoexist, + u8 type + ); +void +EXhalbtc8812a1ant_HaltNotify( + PBTC_COEXIST btcoexist + ); +void +EXhalbtc8812a1ant_PnpNotify( + PBTC_COEXIST btcoexist, + u8 pnpState + ); +void +EXhalbtc8812a1ant_Periodical( + PBTC_COEXIST btcoexist + ); +void +EXhalbtc8812a1ant_DisplayCoexInfo( + PBTC_COEXIST btcoexist + ); +void +EXhalbtc8812a1ant_DbgControl( + PBTC_COEXIST btcoexist, + u8 opCode, + u8 opLen, + u8 *pData + ); --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.c @@ -0,0 +1,1614 @@ +//============================================================ +// Description: +// +// This file is for RTL8723A Co-exist mechanism +// +// History +// 2012/08/22 Cosa first check in. +// 2012/11/14 Cosa Revise for 8723A 1Ant out sourcing. +// +//============================================================ + +//============================================================ +// include files +//============================================================ +#include "Mp_Precomp.h" +#if(BT_30_SUPPORT == 1) +//============================================================ +// Global variables, these are static variables +//============================================================ +static COEX_DM_8723A_1ANT GLCoexDm8723a1Ant; +static PCOEX_DM_8723A_1ANT pCoexDm=&GLCoexDm8723a1Ant; +static COEX_STA_8723A_1ANT GLCoexSta8723a1Ant; +static PCOEX_STA_8723A_1ANT pCoexSta=&GLCoexSta8723a1Ant; + +const char *const GLBtInfoSrc8723a1Ant[]={ + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +//============================================================ +// local function proto type if needed +//============================================================ +//============================================================ +// local function start with halbtc8723a1ant_ +//============================================================ +VOID +halbtc8723a1ant_Reg0x550Bit3( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bSet + ) +{ + u1Byte u1tmp=0; + + u1tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x550); + if(bSet) + { + u1tmp |= BIT3; + } + else + { + u1tmp &= ~BIT3; + } + pBtCoexist->btc_write_1byte(pBtCoexist, 0x550, u1tmp); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], set 0x550[3]=%d\n", (bSet? 1:0))); +} + +VOID +halbtc8723a1ant_NotifyFwScan( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte scanType + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + if(BTC_SCAN_START == scanType) + H2C_Parameter[0] = 0x1; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Notify FW for wifi scan, write 0x3b=0x%x\n", + H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3b, 1, H2C_Parameter); +} + +VOID +halbtc8723a1ant_QueryBtInfo( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + pCoexSta->bC2hBtInfoReqSent = true; + + H2C_Parameter[0] |= BIT0; // trigger + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x38=0x%x\n", + H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x38, 1, H2C_Parameter); +} + +VOID +halbtc8723a1ant_SetSwRfRxLpfCorner( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bRxRfShrinkOn + ) +{ + if(bRxRfShrinkOn) + { + //Shrink RF Rx LPF corner + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n")); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7); + } + else + { + //Resume RF Rx LPF corner + // After initialized, we can use pCoexDm->btRf0x1eBackup + if(pBtCoexist->initilized) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n")); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup); + } + } +} + +VOID +halbtc8723a1ant_RfShrink( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bRxRfShrinkOn + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n", + (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF"))); + pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", + pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink)); + + if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink) + return; + } + halbtc8723a1ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink); + + pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink; +} + +VOID +halbtc8723a1ant_SetSwPenaltyTxRateAdaptive( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bLowPenaltyRa + ) +{ + u1Byte tmpU1; + + tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd); + tmpU1 |= BIT0; + if(bLowPenaltyRa) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n")); + tmpU1 &= ~BIT2; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n")); + tmpU1 |= BIT2; + } + + pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1); +} + +VOID +halbtc8723a1ant_LowPenaltyRa( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bLowPenaltyRa + ) +{ + return; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n", + (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF"))); + pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", + pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa)); + + if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa) + return; + } + halbtc8723a1ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa); + + pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa; +} + +VOID +halbtc8723a1ant_SetCoexTable( + IN PBTC_COEXIST pBtCoexist, + IN u4Byte val0x6c0, + IN u4Byte val0x6c8, + IN u1Byte val0x6cc + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0)); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8)); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc)); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc); +} + +VOID +halbtc8723a1ant_CoexTable( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u4Byte val0x6c0, + IN u4Byte val0x6c8, + IN u1Byte val0x6cc + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + (bForceExec? "force to":""), val0x6c0, val0x6c8, val0x6cc)); + pCoexDm->curVal0x6c0 = val0x6c0; + pCoexDm->curVal0x6c8 = val0x6c8; + pCoexDm->curVal0x6cc = val0x6cc; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", + pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", + pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc)); + + if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) && + (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) && + (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) ) + return; + } + halbtc8723a1ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c8, val0x6cc); + + pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0; + pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8; + pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc; +} + +VOID +halbtc8723a1ant_SetFwIgnoreWlanAct( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bEnable + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + if(bEnable) + { + H2C_Parameter[0] |= BIT0; // function enable + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x25=0x%x\n", + H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x25, 1, H2C_Parameter); +} + +VOID +halbtc8723a1ant_IgnoreWlanAct( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bEnable + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n", + (bForceExec? "force to":""), (bEnable? "ON":"OFF"))); + pCoexDm->bCurIgnoreWlanAct = bEnable; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", + pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct)); + + if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct) + return; + } + halbtc8723a1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable); + + pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct; +} + +VOID +halbtc8723a1ant_SetFwPstdma( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type, + IN u1Byte byte1, + IN u1Byte byte2, + IN u1Byte byte3, + IN u1Byte byte4, + IN u1Byte byte5 + ) +{ + u1Byte H2C_Parameter[5] ={0}; + u1Byte realByte1=byte1, realByte5=byte5; + BOOLEAN bApEnable=FALSE; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &bApEnable); + + // byte1[1:0] != 0 means enable pstdma + // for 2Ant bt coexist, if byte1 != 0 means enable pstdma + if(byte1) + { + if(bApEnable) + { + if(type != 5 && type != 12) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], FW for 1Ant AP mode\n")); + realByte1 &= ~BIT4; + realByte1 |= BIT5; + + realByte5 |= BIT5; + realByte5 &= ~BIT6; + } + } + } + H2C_Parameter[0] = realByte1; + H2C_Parameter[1] = byte2; + H2C_Parameter[2] = byte3; + H2C_Parameter[3] = byte4; + H2C_Parameter[4] = realByte5; + + pCoexDm->psTdmaPara[0] = realByte1; + pCoexDm->psTdmaPara[1] = byte2; + pCoexDm->psTdmaPara[2] = byte3; + pCoexDm->psTdmaPara[3] = byte4; + pCoexDm->psTdmaPara[4] = realByte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x3a(5bytes)=0x%x%08x\n", + H2C_Parameter[0], + H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3a, 5, H2C_Parameter); +} + +VOID +halbtc8723a1ant_PsTdma( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bTurnOn, + IN u1Byte type + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n", + (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type)); + pCoexDm->bCurPsTdmaOn = bTurnOn; + pCoexDm->curPsTdma = type; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + pCoexDm->prePsTdma, pCoexDm->curPsTdma)); + + if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) && + (pCoexDm->prePsTdma == pCoexDm->curPsTdma) ) + return; + } + if(pCoexDm->bCurPsTdmaOn) + { + switch(pCoexDm->curPsTdma) + { + case 1: + default: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x1a, 0x1a, 0x0, 0x40); + break; + case 2: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x12, 0x12, 0x0, 0x40); + break; + case 3: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x3f, 0x3, 0x10, 0x40); + break; + case 4: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x15, 0x3, 0x10, 0x0); + break; + case 5: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0xa9, 0x15, 0x3, 0x35, 0xc0); + break; + + case 8: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0); + break; + case 9: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x0, 0x40); + break; + case 10: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x0, 0x40); + break; + case 11: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x0, 0x40); + break; + case 12: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0xa9, 0xa, 0x3, 0x15, 0xc0); + break; + + case 18: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0); + break; + + case 20: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x2a, 0x2a, 0x0, 0x0); + break; + case 21: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x20, 0x3, 0x10, 0x40); + break; + case 22: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x1a, 0x1a, 0x2, 0x40); + break; + case 23: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x12, 0x12, 0x2, 0x40); + break; + case 24: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0xa, 0xa, 0x2, 0x40); + break; + case 25: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x2, 0x40); + break; + case 26: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x93, 0x25, 0x3, 0x10, 0x0); + break; + case 27: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x13, 0x5, 0x5, 0x2, 0x40); + break; + case 28: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x3, 0x2f, 0x2f, 0x0, 0x0); + break; + + } + } + else + { + // disable PS tdma + switch(pCoexDm->curPsTdma) + { + case 8: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x8, 0x0, 0x0, 0x0, 0x0); + break; + case 0: + default: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x0, 0x0, 0x0, 0x0, 0x0); + pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x210); + break; + case 9: + halbtc8723a1ant_SetFwPstdma(pBtCoexist, type, 0x0, 0x0, 0x0, 0x0, 0x0); + pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x110); + break; + + } + } + + // update pre state + pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn; + pCoexDm->prePsTdma = pCoexDm->curPsTdma; +} + + +VOID +halbtc8723a1ant_CoexAllOff( + IN PBTC_COEXIST pBtCoexist + ) +{ + // fw all off + halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + // sw all off + halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE); + + // hw all off + halbtc8723a1ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); +} + +VOID +halbtc8723a1ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ) +{ + // force to reset coex mechanism + halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE); +} + +VOID +halbtc8723a1ant_BtEnableAction( + IN PBTC_COEXIST pBtCoexist + ) +{ + halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE); +} + +VOID +halbtc8723a1ant_MonitorBtCtr( + IN PBTC_COEXIST pBtCoexist + ) +{ + u4Byte regHPTxRx, regLPTxRx, u4Tmp; + u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0; + u1Byte u1Tmp; + + regHPTxRx = 0x770; + regLPTxRx = 0x774; + + u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx); + regHPTx = u4Tmp & MASKLWORD; + regHPRx = (u4Tmp & MASKHWORD)>>16; + + u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx); + regLPTx = u4Tmp & MASKLWORD; + regLPRx = (u4Tmp & MASKHWORD)>>16; + + pCoexSta->highPriorityTx = regHPTx; + pCoexSta->highPriorityRx = regHPRx; + pCoexSta->lowPriorityTx = regLPTx; + pCoexSta->lowPriorityRx = regLPRx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx)); + + // reset counter + pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc); +} + +VOID +halbtc8723a1ant_MonitorBtEnableDisable( + IN PBTC_COEXIST pBtCoexist + ) +{ + static BOOLEAN bPreBtDisabled=FALSE; + static u4Byte btDisableCnt=0; + BOOLEAN bBtActive=true, bBtDisabled=FALSE; + + // This function check if bt is disabled + + if( pCoexSta->highPriorityTx == 0 && + pCoexSta->highPriorityRx == 0 && + pCoexSta->lowPriorityTx == 0 && + pCoexSta->lowPriorityRx == 0) + { + bBtActive = FALSE; + } + if( pCoexSta->highPriorityTx == 0xffff && + pCoexSta->highPriorityRx == 0xffff && + pCoexSta->lowPriorityTx == 0xffff && + pCoexSta->lowPriorityRx == 0xffff) + { + bBtActive = FALSE; + } + if(bBtActive) + { + btDisableCnt = 0; + bBtDisabled = FALSE; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n")); + } + else + { + btDisableCnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n", + btDisableCnt)); + if(btDisableCnt >= 2) + { + bBtDisabled = true; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n")); + } + } + if(bPreBtDisabled != bBtDisabled) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n", + (bPreBtDisabled ? "disabled":"enabled"), + (bBtDisabled ? "disabled":"enabled"))); + bPreBtDisabled = bBtDisabled; + if(!bBtDisabled) + { + halbtc8723a1ant_BtEnableAction(pBtCoexist); + } + else + { + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + } + } +} + +VOID +halbtc8723a1ant_TdmaDurationAdjust( + IN PBTC_COEXIST pBtCoexist + ) +{ + static s4Byte up,dn,m,n,WaitCount; + s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration + u1Byte retryCount=0; + u1Byte btState; + BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE; + u4Byte wifiBw; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + btState = pCoexDm->btStatus; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], TdmaDurationAdjust()\n")); + if(pCoexDm->psTdmaGlobalCnt != pCoexDm->psTdmaMonitorCnt) + { + pCoexDm->psTdmaMonitorCnt = 0; + pCoexDm->psTdmaGlobalCnt = 0; + } + if(pCoexDm->psTdmaMonitorCnt == 0) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], first run BT A2DP + WiFi busy state!!\n")); + if(btState == BT_STATE_8723A_1ANT_ACL_ONLY_BUSY) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1); + pCoexDm->psTdmaDuAdjType = 1; + } + else + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22); + pCoexDm->psTdmaDuAdjType = 22; + } + //============ + up = 0; + dn = 0; + m = 1; + n= 3; + result = 0; + WaitCount = 0; + } + else + { + //accquire the BT TRx retry count from BT_Info byte2 + retryCount = pCoexSta->btRetryCnt; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], retryCount = %d\n", retryCount)); + result = 0; + WaitCount++; + + if(retryCount == 0) // no retry in the last 2-second duration + { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if(up >= n) // if s n 2 retry count0, hռeWiFi duration + { + WaitCount = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Increase wifi duration!!\n")); + } + } + else if (retryCount <= 3) // <=3 retry in the last 2-second duration + { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) // if s 2 2 retry count< 3, hկWiFi duration + { + if (WaitCount <= 2) + m++; // קK@blevelӦ^ + else + m = 1; + + if ( m >= 20) //m ̤j = 20 ' ̤j120 recheckO_վ WiFi duration. + m = 20; + + n = 3*m; + up = 0; + dn = 0; + WaitCount = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n")); + } + } + else //retry count > 3, un1 retry count > 3, hկWiFi duration + { + if (WaitCount == 1) + m++; // קK@blevelӦ^ + else + m = 1; + + if ( m >= 20) //m ̤j = 20 ' ̤j120 recheckO_վ WiFi duration. + m = 20; + + n = 3*m; + up = 0; + dn = 0; + WaitCount = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n")); + } + + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT TxRx counter H+L <= 1200\n")); + if(btState != BT_STATE_8723A_1ANT_ACL_ONLY_BUSY) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], NOT ACL only busy!\n")); + if(BTC_WIFI_BW_HT40 != wifiBw) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], 20MHz\n")); + if(result == -1) + { + if(pCoexDm->curPsTdma == 22) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23); + pCoexDm->psTdmaDuAdjType = 23; + } + else if(pCoexDm->curPsTdma == 23) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24); + pCoexDm->psTdmaDuAdjType = 24; + } + else if(pCoexDm->curPsTdma == 24) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25); + pCoexDm->psTdmaDuAdjType = 25; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 25) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24); + pCoexDm->psTdmaDuAdjType = 24; + } + else if(pCoexDm->curPsTdma == 24) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23); + pCoexDm->psTdmaDuAdjType = 23; + } + else if(pCoexDm->curPsTdma == 23) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 22); + pCoexDm->psTdmaDuAdjType = 22; + } + } + // error handle, if not in the following state, + // set psTdma again. + if( (pCoexDm->psTdmaDuAdjType != 22) && + (pCoexDm->psTdmaDuAdjType != 23) && + (pCoexDm->psTdmaDuAdjType != 24) && + (pCoexDm->psTdmaDuAdjType != 25) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n")); + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23); + pCoexDm->psTdmaDuAdjType = 23; + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], 40MHz\n")); + if(result == -1) + { + if(pCoexDm->curPsTdma == 23) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24); + pCoexDm->psTdmaDuAdjType = 24; + } + else if(pCoexDm->curPsTdma == 24) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25); + pCoexDm->psTdmaDuAdjType = 25; + } + else if(pCoexDm->curPsTdma == 25) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 27); + pCoexDm->psTdmaDuAdjType = 27; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 27) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 25); + pCoexDm->psTdmaDuAdjType = 25; + } + else if(pCoexDm->curPsTdma == 25) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24); + pCoexDm->psTdmaDuAdjType = 24; + } + else if(pCoexDm->curPsTdma == 24) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 23); + pCoexDm->psTdmaDuAdjType = 23; + } + } + // error handle, if not in the following state, + // set psTdma again. + if( (pCoexDm->psTdmaDuAdjType != 23) && + (pCoexDm->psTdmaDuAdjType != 24) && + (pCoexDm->psTdmaDuAdjType != 25) && + (pCoexDm->psTdmaDuAdjType != 27) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n")); + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 24); + pCoexDm->psTdmaDuAdjType = 24; + } + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ACL only busy\n")); + if (result == -1) + { + if(pCoexDm->curPsTdma == 1) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + else if(pCoexDm->curPsTdma == 9) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 11) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + else if(pCoexDm->curPsTdma == 9) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1); + pCoexDm->psTdmaDuAdjType = 1; + } + } + + // error handle, if not in the following state, + // set psTdma again. + if( (pCoexDm->psTdmaDuAdjType != 1) && + (pCoexDm->psTdmaDuAdjType != 2) && + (pCoexDm->psTdmaDuAdjType != 9) && + (pCoexDm->psTdmaDuAdjType != 11) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], duration case out of handle!!\n")); + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + } + } + } + + // if current PsTdma not match with the recorded one (when scan, dhcp...), + // then we have to adjust it back to the previous record one. + if(pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", + pCoexDm->curPsTdma, pCoexDm->psTdmaDuAdjType)); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + + if( !bScan && !bLink && !bRoam) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType); + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n")); + } + } + pCoexDm->psTdmaMonitorCnt++; +} + + +VOID +halbtc8723a1ant_CoexForWifiConnect( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bWifiConnected=FALSE, bWifiBusy=FALSE; + u1Byte btState, btInfoOriginal=0; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + + btState = pCoexDm->btStatus; + btInfoOriginal = pCoexSta->btInfoC2h[BT_INFO_SRC_8723A_1ANT_BT_RSP][0]; + + if(bWifiConnected) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi connected!!\n")); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy); + + if( !bWifiBusy && + ((BT_STATE_8723A_1ANT_NO_CONNECTION == btState) || + (BT_STATE_8723A_1ANT_CONNECT_IDLE == btState)) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], [Wifi is idle] or [Bt is non connected idle or Bt is connected idle]!!\n")); + + if(BT_STATE_8723A_1ANT_NO_CONNECTION == btState) + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); + else if(BT_STATE_8723A_1ANT_CONNECT_IDLE == btState) + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0); + } + else + { + if( (BT_STATE_8723A_1ANT_SCO_ONLY_BUSY == btState) || + (BT_STATE_8723A_1ANT_ACL_SCO_BUSY == btState) || + (BT_STATE_8723A_1ANT_HID_BUSY == btState) || + (BT_STATE_8723A_1ANT_HID_SCO_BUSY == btState) ) + { + pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0x60); + } + else + { + pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0); + } + switch(btState) + { + case BT_STATE_8723A_1ANT_NO_CONNECTION: + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5); + break; + case BT_STATE_8723A_1ANT_CONNECT_IDLE: + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + break; + case BT_STATE_8723A_1ANT_INQ_OR_PAG: + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + break; + case BT_STATE_8723A_1ANT_SCO_ONLY_BUSY: + case BT_STATE_8723A_1ANT_ACL_SCO_BUSY: + case BT_STATE_8723A_1ANT_HID_BUSY: + case BT_STATE_8723A_1ANT_HID_SCO_BUSY: + halbtc8723a1ant_TdmaDurationAdjust(pBtCoexist); + break; + case BT_STATE_8723A_1ANT_ACL_ONLY_BUSY: + if (btInfoOriginal&BT_INFO_8723A_1ANT_B_A2DP) + { + halbtc8723a1ant_TdmaDurationAdjust(pBtCoexist); + } + else if(btInfoOriginal&BT_INFO_8723A_1ANT_B_FTP) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1); + } + else if( (btInfoOriginal&BT_INFO_8723A_1ANT_B_A2DP) && + (btInfoOriginal&BT_INFO_8723A_1ANT_B_FTP) ) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + } + else + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1); + } + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], error!!!, undefined case in halbtc8723a1ant_CoexForWifiConnect()!!\n")); + break; + } + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is disconnected!!\n")); + } + + pCoexDm->psTdmaGlobalCnt++; +} + +//============================================================ +// work around function start with wa_halbtc8723a1ant_ +//============================================================ +VOID +wa_halbtc8723a1ant_MonitorC2h( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte tmp1b=0x0; + u4Byte curC2hTotalCnt=0x0; + static u4Byte preC2hTotalCnt=0x0, sameCntPollingTime=0x0; + + curC2hTotalCnt+=pCoexSta->btInfoC2hCnt[BT_INFO_SRC_8723A_1ANT_BT_RSP]; + + if(curC2hTotalCnt == preC2hTotalCnt) + { + sameCntPollingTime++; + } + else + { + preC2hTotalCnt = curC2hTotalCnt; + sameCntPollingTime = 0; + } + + if(sameCntPollingTime >= 2) + { + tmp1b = pBtCoexist->btc_read_1byte(pBtCoexist, 0x1af); + if(tmp1b != 0x0) + { + pCoexSta->c2hHangDetectCnt++; + pBtCoexist->btc_write_1byte(pBtCoexist, 0x1af, 0x0); + } + } +} + +//============================================================ +// extern function start with EXhalbtc8723a1ant_ +//============================================================ +VOID +EXhalbtc8723a1ant_InitHwConfig( + IN PBTC_COEXIST pBtCoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n")); + + // backup rf 0x1e value + pCoexDm->btRf0x1eBackup = + pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff); + + pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20); + + // enable counter statistics + pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4); + + // coex table + pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, 0x0); // 1-Ant coex + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, 0xffff); // wifi break table + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c4, 0x55555555); //coex table + + // antenna switch control parameter + pBtCoexist->btc_write_4byte(pBtCoexist, 0x858, 0xaaaaaaaa); + + pBtCoexist->btc_write_2byte(pBtCoexist, 0x860, 0x210); //set antenna at wifi side if ANTSW is software control + pBtCoexist->btc_write_4byte(pBtCoexist, 0x870, 0x300); //SPDT(connected with TRSW) control by hardware PTA + pBtCoexist->btc_write_4byte(pBtCoexist, 0x874, 0x22804000); //ANTSW keep by GNT_BT + + // coexistence parameters + pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x1); // enable RTK mode PTA +} + +VOID +EXhalbtc8723a1ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n")); + + halbtc8723a1ant_InitCoexDm(pBtCoexist); +} + +VOID +EXhalbtc8723a1ant_DisplayCoexInfo( + IN PBTC_COEXIST pBtCoexist + ) +{ + struct btc_board_info * pBoardInfo=&pBtCoexist->board_info; + PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info; + pu1Byte cliBuf=pBtCoexist->cli_buf; + u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0; + u4Byte u4Tmp[4]; + BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE; + BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE; + s4Byte wifiRssi=0, btHsRssi=0; + u4Byte wifiBw, wifiTrafficDir; + u1Byte wifiDot11Chnl, wifiHsChnl; + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============"); + CL_PRINTF(cliBuf); + + if(!pBoardInfo->bt_exist) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); + CL_PRINTF(cliBuf); + return; + } + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \ + pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num); + CL_PRINTF(cliBuf); + + if(pBtCoexist->manual_control) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!"); + CL_PRINTF(cliBuf); + } + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \ + ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \ + wifiDot11Chnl, wifiHsChnl, bBtHsOn); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \ + pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1], + pCoexDm->wifiChnlInfo[2]); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \ + wifiRssi, btHsRssi); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \ + bLink, bRoam, bScan); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \ + (bWifiUnder5G? "5G":"2.4G"), + ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))), + ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink"))); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \ + ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8723A_1ANT_BT_STATUS_IDLE == pCoexDm->btStatus)? "idle":( (BT_8723A_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy"))), + pCoexSta->btRssi, pCoexSta->btRetryCnt); + CL_PRINTF(cliBuf); + + if(pStackInfo->bProfileNotified) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \ + pStackInfo->bScoExist, pStackInfo->bHidExist, pStackInfo->bPanExist, pStackInfo->bA2dpExist); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO); + } + + btInfoExt = pCoexSta->btInfoExt; + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \ + (btInfoExt&BIT0)? "Basic rate":"EDR rate"); + CL_PRINTF(cliBuf); + + for(i=0; ibtInfoC2hCnt[i]) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723a1Ant[i], \ + pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1], + pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3], + pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5], + pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]); + CL_PRINTF(cliBuf); + } + } + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "write 0x1af=0x0 num", \ + pCoexSta->c2hHangDetectCnt); + CL_PRINTF(cliBuf); + + // Sw mechanism + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============"); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", "SM1[ShRf/ LpRA/ LimDig]", \ + pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig); + CL_PRINTF(cliBuf); + + // Fw mechanism + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============"); + CL_PRINTF(cliBuf); + + if(!pBtCoexist->manual_control) + { + psTdmaCase = pCoexDm->curPsTdma; + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \ + pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1], + pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3], + pCoexDm->psTdmaPara[4], psTdmaCase); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "IgnWlanAct", \ + pCoexDm->bCurIgnoreWlanAct); + CL_PRINTF(cliBuf); + } + + // Hw setting + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============"); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \ + pCoexDm->btRf0x1eBackup); + CL_PRINTF(cliBuf); + + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778); + u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x783); + u1Tmp[2] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x796); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \ + u1Tmp[0], u1Tmp[1], u1Tmp[2]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x880); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \ + u4Tmp[0]); + CL_PRINTF(cliBuf); + + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \ + u1Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550); + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \ + u4Tmp[0], u1Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x484); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \ + u4Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \ + u4Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda0); + u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda4); + u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda8); + u4Tmp[3] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xdac); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \ + u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0); + u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4); + u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8); + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \ + u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770 (hp rx[31:16]/tx[15:0])", \ + pCoexSta->highPriorityRx, pCoexSta->highPriorityTx); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \ + pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx); + CL_PRINTF(cliBuf); + + // Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x41b); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (mgntQ hang chk == 0xf)", \ + u1Tmp[0]); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + + +VOID +EXhalbtc8723a1ant_IpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_IPS_ENTER == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n")); + halbtc8723a1ant_CoexAllOff(pBtCoexist); + } + else if(BTC_IPS_LEAVE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n")); + //halbtc8723a1ant_InitCoexDm(pBtCoexist); + } +} + +VOID +EXhalbtc8723a1ant_LpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_LPS_ENABLE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n")); + } + else if(BTC_LPS_DISABLE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n")); + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + } +} + +VOID +EXhalbtc8723a1ant_ScanNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + BOOLEAN bWifiConnected=FALSE; + + halbtc8723a1ant_NotifyFwScan(pBtCoexist, type); + + if(pBtCoexist->btInfo.bBtDisabled) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); + } + else + { + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + if(BTC_SCAN_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n")); + if(!bWifiConnected) // non-connected scan + { + //set 0x550[3]=1 before PsTdma + halbtc8723a1ant_Reg0x550Bit3(pBtCoexist, true); + } + + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + } + else if(BTC_SCAN_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n")); + if(!bWifiConnected) // non-connected scan + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + } + else + { + halbtc8723a1ant_CoexForWifiConnect(pBtCoexist); + } + } + } +} + +VOID +EXhalbtc8723a1ant_ConnectNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + BOOLEAN bWifiConnected=FALSE; + + if(pBtCoexist->btInfo.bBtDisabled) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); + } + else + { + if(BTC_ASSOCIATE_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n")); + //set 0x550[3]=1 before PsTdma + halbtc8723a1ant_Reg0x550Bit3(pBtCoexist, true); + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); // extend wifi slot + } + else if(BTC_ASSOCIATE_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n")); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + if(!bWifiConnected) // non-connected scan + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + } + else + { + halbtc8723a1ant_CoexForWifiConnect(pBtCoexist); + } + } + } +} + +VOID +EXhalbtc8723a1ant_MediaStatusNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_MEDIA_CONNECT == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n")); + } + else + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n")); + } +} + +VOID +EXhalbtc8723a1ant_SpecialPacketNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(type == BTC_PACKET_DHCP) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n")); + if(pBtCoexist->btInfo.bBtDisabled) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); + } + else + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 18); + } + } +} + +VOID +EXhalbtc8723a1ant_BtInfoNotify( + IN PBTC_COEXIST pBtCoexist, + IN pu1Byte tmpBuf, + IN u1Byte length + ) +{ + u1Byte btInfo=0; + u1Byte i, rspSource=0; + BOOLEAN bBtHsOn=FALSE, bBtBusy=FALSE, bForceLps=FALSE; + + pCoexSta->bC2hBtInfoReqSent = FALSE; + + rspSource = BT_INFO_SRC_8723A_1ANT_BT_RSP; + pCoexSta->btInfoC2hCnt[rspSource]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length)); + for(i=0; ibtInfoC2h[rspSource][i] = tmpBuf[i]; + if(i == 0) + btInfo = tmpBuf[i]; + if(i == length-1) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i])); + } + else + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i])); + } + } + + if(BT_INFO_SRC_8723A_1ANT_WIFI_FW != rspSource) + { + pCoexSta->btRetryCnt = + pCoexSta->btInfoC2h[rspSource][1]; + + pCoexSta->btRssi = + pCoexSta->btInfoC2h[rspSource][2]*2+10; + + pCoexSta->btInfoExt = + pCoexSta->btInfoC2h[rspSource][3]; + } + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + // check BIT2 first ==> check if bt is under inquiry or page scan + if(btInfo & BT_INFO_8723A_1ANT_B_INQ_PAGE) + { + pCoexSta->bC2hBtInquiryPage = true; + } + else + { + pCoexSta->bC2hBtInquiryPage = FALSE; + } + btInfo &= ~BIT2; + if(!(btInfo & BIT0)) + { + pCoexDm->btStatus = BT_STATE_8723A_1ANT_NO_CONNECTION; + bForceLps = FALSE; + } + else + { + bForceLps = true; + if(btInfo == 0x1) + { + pCoexDm->btStatus = BT_STATE_8723A_1ANT_CONNECT_IDLE; + } + else if(btInfo == 0x9) + { + pCoexDm->btStatus = BT_STATE_8723A_1ANT_ACL_ONLY_BUSY; + bBtBusy = true; + } + else if(btInfo == 0x13) + { + pCoexDm->btStatus = BT_STATE_8723A_1ANT_SCO_ONLY_BUSY; + bBtBusy = true; + } + else if(btInfo == 0x1b) + { + pCoexDm->btStatus = BT_STATE_8723A_1ANT_ACL_SCO_BUSY; + bBtBusy = true; + } + else if(btInfo == 0x29) + { + pCoexDm->btStatus = BT_STATE_8723A_1ANT_HID_BUSY; + bBtBusy = true; + } + else if(btInfo == 0x3b) + { + pCoexDm->btStatus = BT_STATE_8723A_1ANT_HID_SCO_BUSY; + bBtBusy = true; + } + } + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy); + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &bBtBusy); + if(bForceLps) + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL); + else + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + + if( (BT_STATE_8723A_1ANT_NO_CONNECTION == pCoexDm->btStatus) || + (BT_STATE_8723A_1ANT_CONNECT_IDLE == pCoexDm->btStatus) ) + { + if(pCoexSta->bC2hBtInquiryPage) + pCoexDm->btStatus = BT_STATE_8723A_1ANT_INQ_OR_PAG; + } +} + +VOID +EXhalbtc8723a1ant_StackOperationNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n")); + } + else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n")); + } +} + +VOID +EXhalbtc8723a1ant_HaltNotify( + IN PBTC_COEXIST pBtCoexist + ) +{ + halbtc8723a1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0); + + halbtc8723a1ant_LowPenaltyRa(pBtCoexist, FORCE_EXEC, FALSE); + halbtc8723a1ant_RfShrink(pBtCoexist, FORCE_EXEC, FALSE); + + halbtc8723a1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true); + EXhalbtc8723a1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT); +} + +VOID +EXhalbtc8723a1ant_Periodical( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE, bWifiConnected=FALSE; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 1Ant Periodical!!\n")); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + + // work around for c2h hang + wa_halbtc8723a1ant_MonitorC2h(pBtCoexist); + + halbtc8723a1ant_QueryBtInfo(pBtCoexist); + halbtc8723a1ant_MonitorBtCtr(pBtCoexist); + halbtc8723a1ant_MonitorBtEnableDisable(pBtCoexist); + + + if(bScan) + return; + if(bLink) + return; + + if(bWifiConnected) + { + if(pBtCoexist->btInfo.bBtDisabled) + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); + + halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE); + } + else + { + halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a1ant_CoexForWifiConnect(pBtCoexist); + } + } + else + { + halbtc8723a1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + halbtc8723a1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a1ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE); + } +} + + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/habtc8723a1ant.h @@ -0,0 +1,176 @@ +//=========================================== +// The following is for 8723A 1Ant BT Co-exist definition +//=========================================== +#define BT_INFO_8723A_1ANT_B_FTP BIT7 +#define BT_INFO_8723A_1ANT_B_A2DP BIT6 +#define BT_INFO_8723A_1ANT_B_HID BIT5 +#define BT_INFO_8723A_1ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8723A_1ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8723A_1ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8723A_1ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8723A_1ANT_B_CONNECTION BIT0 + +typedef enum _BT_STATE_8723A_1ANT{ + BT_STATE_8723A_1ANT_DISABLED = 0, + BT_STATE_8723A_1ANT_NO_CONNECTION = 1, + BT_STATE_8723A_1ANT_CONNECT_IDLE = 2, + BT_STATE_8723A_1ANT_INQ_OR_PAG = 3, + BT_STATE_8723A_1ANT_ACL_ONLY_BUSY = 4, + BT_STATE_8723A_1ANT_SCO_ONLY_BUSY = 5, + BT_STATE_8723A_1ANT_ACL_SCO_BUSY = 6, + BT_STATE_8723A_1ANT_HID_BUSY = 7, + BT_STATE_8723A_1ANT_HID_SCO_BUSY = 8, + BT_STATE_8723A_1ANT_MAX +}BT_STATE_8723A_1ANT, *PBT_STATE_8723A_1ANT; + +#define BTC_RSSI_COEX_THRESH_TOL_8723A_1ANT 2 + +typedef enum _BT_INFO_SRC_8723A_1ANT{ + BT_INFO_SRC_8723A_1ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8723A_1ANT_BT_RSP = 0x1, + BT_INFO_SRC_8723A_1ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8723A_1ANT_MAX +}BT_INFO_SRC_8723A_1ANT,*PBT_INFO_SRC_8723A_1ANT; + +typedef enum _BT_8723A_1ANT_BT_STATUS{ + BT_8723A_1ANT_BT_STATUS_IDLE = 0x0, + BT_8723A_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8723A_1ANT_BT_STATUS_NON_IDLE = 0x2, + BT_8723A_1ANT_BT_STATUS_MAX +}BT_8723A_1ANT_BT_STATUS,*PBT_8723A_1ANT_BT_STATUS; + +typedef enum _BT_8723A_1ANT_COEX_ALGO{ + BT_8723A_1ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8723A_1ANT_COEX_ALGO_SCO = 0x1, + BT_8723A_1ANT_COEX_ALGO_HID = 0x2, + BT_8723A_1ANT_COEX_ALGO_A2DP = 0x3, + BT_8723A_1ANT_COEX_ALGO_PANEDR = 0x4, + BT_8723A_1ANT_COEX_ALGO_PANHS = 0x5, + BT_8723A_1ANT_COEX_ALGO_PANEDR_A2DP = 0x6, + BT_8723A_1ANT_COEX_ALGO_PANEDR_HID = 0x7, + BT_8723A_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x8, + BT_8723A_1ANT_COEX_ALGO_HID_A2DP = 0x9, + BT_8723A_1ANT_COEX_ALGO_MAX +}BT_8723A_1ANT_COEX_ALGO,*PBT_8723A_1ANT_COEX_ALGO; + +typedef struct _COEX_DM_8723A_1ANT{ + // fw mechanism + BOOLEAN bCurIgnoreWlanAct; + BOOLEAN bPreIgnoreWlanAct; + u1Byte prePsTdma; + u1Byte curPsTdma; + u1Byte psTdmaPara[5]; + u1Byte psTdmaDuAdjType; + u4Byte psTdmaMonitorCnt; + u4Byte psTdmaGlobalCnt; + BOOLEAN bResetTdmaAdjust; + BOOLEAN bPrePsTdmaOn; + BOOLEAN bCurPsTdmaOn; + + // sw mechanism + BOOLEAN bPreRfRxLpfShrink; + BOOLEAN bCurRfRxLpfShrink; + u4Byte btRf0x1eBackup; + BOOLEAN bPreLowPenaltyRa; + BOOLEAN bCurLowPenaltyRa; + u4Byte preVal0x6c0; + u4Byte curVal0x6c0; + u4Byte preVal0x6c8; + u4Byte curVal0x6c8; + u1Byte preVal0x6cc; + u1Byte curVal0x6cc; + BOOLEAN limited_dig; + + // algorithm related + u1Byte preAlgorithm; + u1Byte curAlgorithm; + u1Byte btStatus; + u1Byte wifiChnlInfo[3]; +} COEX_DM_8723A_1ANT, *PCOEX_DM_8723A_1ANT; + +typedef struct _COEX_STA_8723A_1ANT{ + u4Byte highPriorityTx; + u4Byte highPriorityRx; + u4Byte lowPriorityTx; + u4Byte lowPriorityRx; + u1Byte btRssi; + u1Byte preBtRssiState; + u1Byte preBtRssiState1; + u1Byte preWifiRssiState[4]; + BOOLEAN bC2hBtInfoReqSent; + u1Byte btInfoC2h[BT_INFO_SRC_8723A_1ANT_MAX][10]; + u4Byte btInfoC2hCnt[BT_INFO_SRC_8723A_1ANT_MAX]; + BOOLEAN bC2hBtInquiryPage; + u1Byte btRetryCnt; + u1Byte btInfoExt; + //BOOLEAN bHoldForStackOperation; + //u1Byte bHoldPeriodCnt; + // this is for c2h hang work-around + u4Byte c2hHangDetectCnt; +}COEX_STA_8723A_1ANT, *PCOEX_STA_8723A_1ANT; + +//=========================================== +// The following is interface which will notify coex module. +//=========================================== +VOID +EXhalbtc8723a1ant_InitHwConfig( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8723a1ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8723a1ant_IpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a1ant_LpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a1ant_ScanNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a1ant_ConnectNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a1ant_MediaStatusNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a1ant_SpecialPacketNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a1ant_BtInfoNotify( + IN PBTC_COEXIST pBtCoexist, + IN pu1Byte tmpBuf, + IN u1Byte length + ); +VOID +EXhalbtc8723a1ant_StackOperationNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a1ant_HaltNotify( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8723a1ant_Periodical( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8723a1ant_DisplayCoexInfo( + IN PBTC_COEXIST pBtCoexist + ); + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbt_precomp.h @@ -0,0 +1,99 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * + ******************************************************************************/ +#ifndef __HALBT_PRECOMP_H__ +#define __HALBT_PRECOMP_H__ +/************************************************************* + * include files + *************************************************************/ +#include "../wifi.h" +#include "../efuse.h" +#include "../base.h" +#include "../regd.h" +#include "../cam.h" +#include "../ps.h" +#include "../pci.h" +#include "../rtl8821ae/reg.h" +#include "../rtl8821ae/def.h" +#include "../rtl8821ae/phy.h" +#include "../rtl8821ae/dm.h" +#include "../rtl8821ae/fw.h" +#include "../rtl8821ae/led.h" +#include "../rtl8821ae/hw.h" +#include "../rtl8821ae/pwrseqcmd.h" +#include "../rtl8821ae/pwrseq.h" + +#include "halbtcoutsrc.h" + + +#include "halbtc8192e2ant.h" +#include "halbtc8723b1ant.h" +#include "halbtc8723b2ant.h" + + + +#define GetDefaultAdapter(padapter) padapter + + +#define BIT0 0x00000001 +#define BIT1 0x00000002 +#define BIT2 0x00000004 +#define BIT3 0x00000008 +#define BIT4 0x00000010 +#define BIT5 0x00000020 +#define BIT6 0x00000040 +#define BIT7 0x00000080 +#define BIT8 0x00000100 +#define BIT9 0x00000200 +#define BIT10 0x00000400 +#define BIT11 0x00000800 +#define BIT12 0x00001000 +#define BIT13 0x00002000 +#define BIT14 0x00004000 +#define BIT15 0x00008000 +#define BIT16 0x00010000 +#define BIT17 0x00020000 +#define BIT18 0x00040000 +#define BIT19 0x00080000 +#define BIT20 0x00100000 +#define BIT21 0x00200000 +#define BIT22 0x00400000 +#define BIT23 0x00800000 +#define BIT24 0x01000000 +#define BIT25 0x02000000 +#define BIT26 0x04000000 +#define BIT27 0x08000000 +#define BIT28 0x10000000 +#define BIT29 0x20000000 +#define BIT30 0x40000000 +#define BIT31 0x80000000 + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#endif /* __HALBT_PRECOMP_H__ */ --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.c @@ -0,0 +1,3891 @@ +//============================================================ +// Description: +// +// This file is for 8192e1ant Co-exist mechanism +// +// History +// 2012/11/15 Cosa first check in. +// +//============================================================ + +//============================================================ +// include files +//============================================================ +#include "Mp_Precomp.h" +#if(BT_30_SUPPORT == 1) +//============================================================ +// Global variables, these are static variables +//============================================================ +static COEX_DM_8192E_1ANT GLCoexDm8192e1Ant; +static PCOEX_DM_8192E_1ANT pCoexDm=&GLCoexDm8192e1Ant; +static COEX_STA_8192E_1ANT GLCoexSta8192e1Ant; +static PCOEX_STA_8192E_1ANT pCoexSta=&GLCoexSta8192e1Ant; + +const char *const GLBtInfoSrc8192e1Ant[]={ + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +u4Byte GLCoexVerDate8192e1Ant=20130729; +u4Byte GLCoexVer8192e1Ant=0x10; + +//============================================================ +// local function proto type if needed +//============================================================ +//============================================================ +// local function start with halbtc8192e1ant_ +//============================================================ +u1Byte +halbtc8192e1ant_BtRssiState( + u1Byte levelNum, + u1Byte rssiThresh, + u1Byte rssiThresh1 + ) +{ + s4Byte btRssi=0; + u1Byte btRssiState=pCoexSta->preBtRssiState; + + btRssi = pCoexSta->btRssi; + + if(levelNum == 2) + { + if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) || + (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)) + { + if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT)) + { + btRssiState = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n")); + } + } + else + { + if(btRssi < rssiThresh) + { + btRssiState = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n")); + } + } + } + else if(levelNum == 3) + { + if(rssiThresh > rssiThresh1) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n")); + return pCoexSta->preBtRssiState; + } + + if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) || + (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)) + { + if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT)) + { + btRssiState = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n")); + } + } + else if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) || + (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM)) + { + if(btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT)) + { + btRssiState = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n")); + } + else if(btRssi < rssiThresh) + { + btRssiState = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n")); + } + } + else + { + if(btRssi < rssiThresh1) + { + btRssiState = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n")); + } + } + } + + pCoexSta->preBtRssiState = btRssiState; + + return btRssiState; +} + +u1Byte +halbtc8192e1ant_WifiRssiState( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte index, + IN u1Byte levelNum, + IN u1Byte rssiThresh, + IN u1Byte rssiThresh1 + ) +{ + s4Byte wifiRssi=0; + u1Byte wifiRssiState=pCoexSta->preWifiRssiState[index]; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi); + + if(levelNum == 2) + { + if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) || + (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW)) + { + if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT)) + { + wifiRssiState = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n")); + } + } + else + { + if(wifiRssi < rssiThresh) + { + wifiRssiState = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n")); + } + } + } + else if(levelNum == 3) + { + if(rssiThresh > rssiThresh1) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n")); + return pCoexSta->preWifiRssiState[index]; + } + + if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) || + (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW)) + { + if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT)) + { + wifiRssiState = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n")); + } + } + else if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) || + (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM)) + { + if(wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT)) + { + wifiRssiState = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n")); + } + else if(wifiRssi < rssiThresh) + { + wifiRssiState = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n")); + } + } + else + { + if(wifiRssi < rssiThresh1) + { + wifiRssiState = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n")); + } + } + } + + pCoexSta->preWifiRssiState[index] = wifiRssiState; + + return wifiRssiState; +} + +VOID +halbtc8192e1ant_Updatera_mask( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u1Byte type, + IN u4Byte rateMask + ) +{ + if(BTC_RATE_DISABLE == type) + { + pCoexDm->curra_mask |= rateMask; // disable rate + } + else if(BTC_RATE_ENABLE == type) + { + pCoexDm->curra_mask &= ~rateMask; // enable rate + } + + if( bForceExec || (pCoexDm->prera_mask != pCoexDm->curra_mask)) + { + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_UPDATE_ra_mask, &pCoexDm->curra_mask); + } + pCoexDm->prera_mask = pCoexDm->curra_mask; +} + +VOID +halbtc8192e1ant_MonitorBtCtr( + IN PBTC_COEXIST pBtCoexist + ) +{ + u4Byte regHPTxRx, regLPTxRx, u4Tmp; + u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0; + u1Byte u1Tmp; + + regHPTxRx = 0x770; + regLPTxRx = 0x774; + + u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx); + regHPTx = u4Tmp & MASKLWORD; + regHPRx = (u4Tmp & MASKHWORD)>>16; + + u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx); + regLPTx = u4Tmp & MASKLWORD; + regLPRx = (u4Tmp & MASKHWORD)>>16; + + pCoexSta->highPriorityTx = regHPTx; + pCoexSta->highPriorityRx = regHPRx; + pCoexSta->lowPriorityTx = regLPTx; + pCoexSta->lowPriorityRx = regLPRx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx)); + + // reset counter + pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc); +} + +VOID +halbtc8192e1ant_QueryBtInfo( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + pCoexSta->bC2hBtInfoReqSent = true; + + H2C_Parameter[0] |= BIT0; // trigger + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x61=0x%x\n", + H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x61, 1, H2C_Parameter); +} + +BOOLEAN +halbtc8192e1ant_IsWifiStatusChanged( + IN PBTC_COEXIST pBtCoexist + ) +{ + static BOOLEAN bPreWifiBusy=FALSE, bPreUnder4way=FALSE, bPreBtHsOn=FALSE; + BOOLEAN bWifiBusy=FALSE, bUnder4way=FALSE, bBtHsOn=FALSE; + BOOLEAN bWifiConnected=FALSE; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way); + + if(bWifiConnected) + { + if(bWifiBusy != bPreWifiBusy) + { + bPreWifiBusy = bWifiBusy; + return true; + } + if(bUnder4way != bPreUnder4way) + { + bPreUnder4way = bUnder4way; + return true; + } + if(bBtHsOn != bPreBtHsOn) + { + bPreBtHsOn = bBtHsOn; + return true; + } + } + + return FALSE; +} + +VOID +halbtc8192e1ant_UpdateBtLinkInfo( + IN PBTC_COEXIST pBtCoexist + ) +{ + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + + pBtLinkInfo->bBtLinkExist = pCoexSta->bBtLinkExist; + pBtLinkInfo->bScoExist = pCoexSta->bScoExist; + pBtLinkInfo->bA2dpExist = pCoexSta->bA2dpExist; + pBtLinkInfo->bPanExist = pCoexSta->bPanExist; + pBtLinkInfo->bHidExist = pCoexSta->bHidExist; + + // check if Sco only + if( pBtLinkInfo->bScoExist && + !pBtLinkInfo->bA2dpExist && + !pBtLinkInfo->bPanExist && + !pBtLinkInfo->bHidExist ) + pBtLinkInfo->bScoOnly = true; + else + pBtLinkInfo->bScoOnly = FALSE; + + // check if A2dp only + if( !pBtLinkInfo->bScoExist && + pBtLinkInfo->bA2dpExist && + !pBtLinkInfo->bPanExist && + !pBtLinkInfo->bHidExist ) + pBtLinkInfo->bA2dpOnly = true; + else + pBtLinkInfo->bA2dpOnly = FALSE; + + // check if Pan only + if( !pBtLinkInfo->bScoExist && + !pBtLinkInfo->bA2dpExist && + pBtLinkInfo->bPanExist && + !pBtLinkInfo->bHidExist ) + pBtLinkInfo->bPanOnly = true; + else + pBtLinkInfo->bPanOnly = FALSE; + + // check if Hid only + if( !pBtLinkInfo->bScoExist && + !pBtLinkInfo->bA2dpExist && + !pBtLinkInfo->bPanExist && + pBtLinkInfo->bHidExist ) + pBtLinkInfo->bHidOnly = true; + else + pBtLinkInfo->bHidOnly = FALSE; +} + +u1Byte +halbtc8192e1ant_ActionAlgorithm( + IN PBTC_COEXIST pBtCoexist + ) +{ + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + BOOLEAN bBtHsOn=FALSE; + u1Byte algorithm=BT_8192E_1ANT_COEX_ALGO_UNDEFINED; + u1Byte numOfDiffProfile=0; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + + if(!pBtLinkInfo->bBtLinkExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No BT link exists!!!\n")); + return algorithm; + } + + if(pBtLinkInfo->bScoExist) + numOfDiffProfile++; + if(pBtLinkInfo->bHidExist) + numOfDiffProfile++; + if(pBtLinkInfo->bPanExist) + numOfDiffProfile++; + if(pBtLinkInfo->bA2dpExist) + numOfDiffProfile++; + + if(numOfDiffProfile == 1) + { + if(pBtLinkInfo->bScoExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_SCO; + } + else + { + if(pBtLinkInfo->bHidExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_HID; + } + else if(pBtLinkInfo->bA2dpExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_A2DP; + } + else if(pBtLinkInfo->bPanExist) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_PANHS; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR; + } + } + } + } + else if(numOfDiffProfile == 2) + { + if(pBtLinkInfo->bScoExist) + { + if(pBtLinkInfo->bHidExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_HID; + } + else if(pBtLinkInfo->bA2dpExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_SCO; + } + else if(pBtLinkInfo->bPanExist) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_SCO; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + else + { + if( pBtLinkInfo->bHidExist && + pBtLinkInfo->bA2dpExist ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP; + } + else if( pBtLinkInfo->bHidExist && + pBtLinkInfo->bPanExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID; + } + } + else if( pBtLinkInfo->bPanExist && + pBtLinkInfo->bA2dpExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } + else if(numOfDiffProfile == 3) + { + if(pBtLinkInfo->bScoExist) + { + if( pBtLinkInfo->bHidExist && + pBtLinkInfo->bA2dpExist ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_HID; + } + else if( pBtLinkInfo->bHidExist && + pBtLinkInfo->bPanExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID; + } + } + else if( pBtLinkInfo->bPanExist && + pBtLinkInfo->bA2dpExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_SCO; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + else + { + if( pBtLinkInfo->bHidExist && + pBtLinkInfo->bPanExist && + pBtLinkInfo->bA2dpExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } + else if(numOfDiffProfile >= 3) + { + if(pBtLinkInfo->bScoExist) + { + if( pBtLinkInfo->bHidExist && + pBtLinkInfo->bPanExist && + pBtLinkInfo->bA2dpExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n")); + + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n")); + algorithm = BT_8192E_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + + return algorithm; +} + +VOID +halbtc8192e1ant_SetFwDacSwingLevel( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte dacSwingLvl + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + // There are several type of dacswing + // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 + H2C_Parameter[0] = dacSwingLvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dacSwingLvl)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x64=0x%x\n", H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x64, 1, H2C_Parameter); +} + +VOID +halbtc8192e1ant_SetFwDecBtPwr( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte decBtPwrLvl + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + H2C_Parameter[0] = decBtPwrLvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power level = %d, FW write 0x62=0x%x\n", + decBtPwrLvl, H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x62, 1, H2C_Parameter); +} + +VOID +halbtc8192e1ant_DecBtPwr( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u1Byte decBtPwrLvl + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power level = %d\n", + (bForceExec? "force to":""), decBtPwrLvl)); + pCoexDm->curBtDecPwrLvl = decBtPwrLvl; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], BtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", + pCoexDm->preBtDecPwrLvl, pCoexDm->curBtDecPwrLvl)); + + if(pCoexDm->preBtDecPwrLvl == pCoexDm->curBtDecPwrLvl) + return; + } + halbtc8192e1ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->curBtDecPwrLvl); + + pCoexDm->preBtDecPwrLvl = pCoexDm->curBtDecPwrLvl; +} + +VOID +halbtc8192e1ant_SetFwBtLnaConstrain( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bBtLnaConsOn + ) +{ + u1Byte H2C_Parameter[2] ={0}; + + H2C_Parameter[0] = 0x3; // opCode, 0x3=BT_SET_LNA_CONSTRAIN + + if(bBtLnaConsOn) + { + H2C_Parameter[1] |= BIT0; + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT LNA Constrain: %s, FW write 0x69=0x%x\n", + (bBtLnaConsOn? "ON!!":"OFF!!"), + H2C_Parameter[0]<<8|H2C_Parameter[1])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x69, 2, H2C_Parameter); +} + +VOID +halbtc8192e1ant_SetBtLnaConstrain( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bBtLnaConsOn + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Constrain = %s\n", + (bForceExec? "force":""), ((bBtLnaConsOn)? "ON":"OFF"))); + pCoexDm->bCurBtLnaConstrain = bBtLnaConsOn; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtLnaConstrain=%d, bCurBtLnaConstrain=%d\n", + pCoexDm->bPreBtLnaConstrain, pCoexDm->bCurBtLnaConstrain)); + + if(pCoexDm->bPreBtLnaConstrain == pCoexDm->bCurBtLnaConstrain) + return; + } + halbtc8192e1ant_SetFwBtLnaConstrain(pBtCoexist, pCoexDm->bCurBtLnaConstrain); + + pCoexDm->bPreBtLnaConstrain = pCoexDm->bCurBtLnaConstrain; +} + +VOID +halbtc8192e1ant_SetFwBtPsdMode( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte btPsdMode + ) +{ + u1Byte H2C_Parameter[2] ={0}; + + H2C_Parameter[0] = 0x2; // opCode, 0x2=BT_SET_PSD_MODE + + H2C_Parameter[1] = btPsdMode; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set BT PSD mode=0x%x, FW write 0x69=0x%x\n", + H2C_Parameter[1], + H2C_Parameter[0]<<8|H2C_Parameter[1])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x69, 2, H2C_Parameter); +} + + +VOID +halbtc8192e1ant_SetBtPsdMode( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u1Byte btPsdMode + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT PSD mode = 0x%x\n", + (bForceExec? "force":""), btPsdMode)); + pCoexDm->bCurBtPsdMode = btPsdMode; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtPsdMode=0x%x, bCurBtPsdMode=0x%x\n", + pCoexDm->bPreBtPsdMode, pCoexDm->bCurBtPsdMode)); + + if(pCoexDm->bPreBtPsdMode == pCoexDm->bCurBtPsdMode) + return; + } + halbtc8192e1ant_SetFwBtPsdMode(pBtCoexist, pCoexDm->bCurBtPsdMode); + + pCoexDm->bPreBtPsdMode = pCoexDm->bCurBtPsdMode; +} + + +VOID +halbtc8192e1ant_SetBtAutoReport( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bEnableAutoReport + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + H2C_Parameter[0] = 0; + + if(bEnableAutoReport) + { + H2C_Parameter[0] |= BIT0; + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n", + (bEnableAutoReport? "Enabled!!":"Disabled!!"), H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x68, 1, H2C_Parameter); +} + +VOID +halbtc8192e1ant_BtAutoReport( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bEnableAutoReport + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s BT Auto report = %s\n", + (bForceExec? "force to":""), ((bEnableAutoReport)? "Enabled":"Disabled"))); + pCoexDm->bCurBtAutoReport = bEnableAutoReport; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", + pCoexDm->bPreBtAutoReport, pCoexDm->bCurBtAutoReport)); + + if(pCoexDm->bPreBtAutoReport == pCoexDm->bCurBtAutoReport) + return; + } + halbtc8192e1ant_SetBtAutoReport(pBtCoexist, pCoexDm->bCurBtAutoReport); + + pCoexDm->bPreBtAutoReport = pCoexDm->bCurBtAutoReport; +} + +VOID +halbtc8192e1ant_FwDacSwingLvl( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u1Byte fwDacSwingLvl + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n", + (bForceExec? "force to":""), fwDacSwingLvl)); + pCoexDm->curFwDacSwingLvl = fwDacSwingLvl; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", + pCoexDm->preFwDacSwingLvl, pCoexDm->curFwDacSwingLvl)); + + if(pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl) + return; + } + + halbtc8192e1ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl); + + pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl; +} + +VOID +halbtc8192e1ant_SetSwRfRxLpfCorner( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bRxRfShrinkOn + ) +{ + if(bRxRfShrinkOn) + { + //Shrink RF Rx LPF corner + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n")); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7); + } + else + { + //Resume RF Rx LPF corner + // After initialized, we can use pCoexDm->btRf0x1eBackup + if(pBtCoexist->initilized) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n")); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup); + } + } +} + +VOID +halbtc8192e1ant_RfShrink( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bRxRfShrinkOn + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n", + (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF"))); + pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", + pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink)); + + if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink) + return; + } + halbtc8192e1ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink); + + pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink; +} + +VOID +halbtc8192e1ant_SetSwPenaltyTxRateAdaptive( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bLowPenaltyRa + ) +{ + u1Byte tmpU1; + + tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd); + tmpU1 |= BIT0; + if(bLowPenaltyRa) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n")); + tmpU1 &= ~BIT2; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n")); + tmpU1 |= BIT2; + } + + pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1); +} + +VOID +halbtc8192e1ant_LowPenaltyRa( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bLowPenaltyRa + ) +{ + return; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n", + (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF"))); + pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", + pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa)); + + if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa) + return; + } + halbtc8192e1ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa); + + pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa; +} + +VOID +halbtc8192e1ant_SetDacSwingReg( + IN PBTC_COEXIST pBtCoexist, + IN u4Byte level + ) +{ + u1Byte val=(u1Byte)level; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Write SwDacSwing = 0x%x\n", level)); + pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x883, 0x3e, val); +} + +VOID +halbtc8192e1ant_SetSwFullTimeDacSwing( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bSwDacSwingOn, + IN u4Byte swDacSwingLvl + ) +{ + if(bSwDacSwingOn) + { + halbtc8192e1ant_SetDacSwingReg(pBtCoexist, swDacSwingLvl); + } + else + { + halbtc8192e1ant_SetDacSwingReg(pBtCoexist, 0x18); + } +} + + +VOID +halbtc8192e1ant_DacSwing( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bDacSwingOn, + IN u4Byte dacSwingLvl + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dacSwingLvl=0x%x\n", + (bForceExec? "force to":""), ((bDacSwingOn)? "ON":"OFF"), dacSwingLvl)); + pCoexDm->bCurDacSwingOn = bDacSwingOn; + pCoexDm->curDacSwingLvl = dacSwingLvl; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", + pCoexDm->bPreDacSwingOn, pCoexDm->preDacSwingLvl, + pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl)); + + if( (pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) && + (pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl) ) + return; + } + mdelay(30); + halbtc8192e1ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl); + + pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn; + pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl; +} + +VOID +halbtc8192e1ant_SetAdcBackOff( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bAdcBackOff + ) +{ + if(bAdcBackOff) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n")); + pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x8db, 0x60, 0x3); + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n")); + pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0x8db, 0x60, 0x1); + } +} + +VOID +halbtc8192e1ant_AdcBackOff( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bAdcBackOff + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n", + (bForceExec? "force to":""), ((bAdcBackOff)? "ON":"OFF"))); + pCoexDm->bCurAdcBackOff = bAdcBackOff; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n", + pCoexDm->bPreAdcBackOff, pCoexDm->bCurAdcBackOff)); + + if(pCoexDm->bPreAdcBackOff == pCoexDm->bCurAdcBackOff) + return; + } + halbtc8192e1ant_SetAdcBackOff(pBtCoexist, pCoexDm->bCurAdcBackOff); + + pCoexDm->bPreAdcBackOff = pCoexDm->bCurAdcBackOff; +} + +VOID +halbtc8192e1ant_SetAgcTable( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bAgcTableEn + ) +{ + u1Byte rssiAdjustVal=0; + + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); + if(bAgcTableEn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n")); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x3fa58); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x37a58); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x2fa58); + rssiAdjustVal = 8; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n")); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x39258); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x31258); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x3b, 0xfffff, 0x29258); + } + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0xef, 0xfffff, 0x0); + + // set rssiAdjustVal for wifi module. + pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal); +} + + +VOID +halbtc8192e1ant_AgcTable( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bAgcTableEn + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n", + (bForceExec? "force to":""), ((bAgcTableEn)? "Enable":"Disable"))); + pCoexDm->bCurAgcTableEn = bAgcTableEn; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + pCoexDm->bPreAgcTableEn, pCoexDm->bCurAgcTableEn)); + + if(pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn) + return; + } + halbtc8192e1ant_SetAgcTable(pBtCoexist, bAgcTableEn); + + pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn; +} + +VOID +halbtc8192e1ant_SetCoexTable( + IN PBTC_COEXIST pBtCoexist, + IN u4Byte val0x6c0, + IN u4Byte val0x6c4, + IN u4Byte val0x6c8, + IN u1Byte val0x6cc + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0)); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4)); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8)); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc)); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc); +} + +VOID +halbtc8192e1ant_CoexTable( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u4Byte val0x6c0, + IN u4Byte val0x6c4, + IN u4Byte val0x6c8, + IN u1Byte val0x6cc + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + (bForceExec? "force to":""), val0x6c0, val0x6c4, val0x6c8, val0x6cc)); + pCoexDm->curVal0x6c0 = val0x6c0; + pCoexDm->curVal0x6c4 = val0x6c4; + pCoexDm->curVal0x6c8 = val0x6c8; + pCoexDm->curVal0x6cc = val0x6cc; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", + pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c4, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", + pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c4, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc)); + + if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) && + (pCoexDm->preVal0x6c4 == pCoexDm->curVal0x6c4) && + (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) && + (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) ) + return; + } + halbtc8192e1ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c4, val0x6c8, val0x6cc); + + pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0; + pCoexDm->preVal0x6c4 = pCoexDm->curVal0x6c4; + pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8; + pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc; +} + +VOID +halbtc8192e1ant_CoexTableWithType( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u1Byte type + ) +{ + switch(type) + { + case 0: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x55555555, 0xffffff, 0x3); + break; + case 1: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55555555, 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 2: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5a5a5a5a, 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 3: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xaaaaaaaa, 0xaaaaaaaa, 0xffffff, 0x3); + break; + case 4: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xffffffff, 0xffffffff, 0xffffff, 0x3); + break; + case 5: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5fff5fff, 0x5fff5fff, 0xffffff, 0x3); + break; + case 6: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 7: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0xddffddff, 0xddffddff, 0xffffff, 0x3); + break; + case 8: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x55ff55ff, 0x5afa5afa, 0xffffff, 0x3); + break; + case 9: + halbtc8192e1ant_CoexTable(pBtCoexist, bForceExec, 0x5f5f5f5f, 0x5f5f5f5f, 0xffffff, 0x3); + break; + default: + break; + } +} + +VOID +halbtc8192e1ant_SetFwIgnoreWlanAct( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bEnable + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + if(bEnable) + { + H2C_Parameter[0] |= BIT0; // function enable + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n", + H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x63, 1, H2C_Parameter); +} + +VOID +halbtc8192e1ant_IgnoreWlanAct( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bEnable + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n", + (bForceExec? "force to":""), (bEnable? "ON":"OFF"))); + pCoexDm->bCurIgnoreWlanAct = bEnable; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", + pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct)); + + if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct) + return; + } + halbtc8192e1ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable); + + pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct; +} + +VOID +halbtc8192e1ant_SetFwPstdma( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte byte1, + IN u1Byte byte2, + IN u1Byte byte3, + IN u1Byte byte4, + IN u1Byte byte5 + ) +{ + u1Byte H2C_Parameter[5] ={0}; + + H2C_Parameter[0] = byte1; + H2C_Parameter[1] = byte2; + H2C_Parameter[2] = byte3; + H2C_Parameter[3] = byte4; + H2C_Parameter[4] = byte5; + + pCoexDm->psTdmaPara[0] = byte1; + pCoexDm->psTdmaPara[1] = byte2; + pCoexDm->psTdmaPara[2] = byte3; + pCoexDm->psTdmaPara[3] = byte4; + pCoexDm->psTdmaPara[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", + H2C_Parameter[0], + H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x60, 5, H2C_Parameter); +} + +VOID +halbtc8192e1ant_SetLpsRpwm( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte lpsVal, + IN u1Byte rpwmVal + ) +{ + u1Byte lps=lpsVal; + u1Byte rpwm=rpwmVal; + + pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_1ANT_LPS, &lps); + pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_1ANT_RPWM, &rpwm); +} + +VOID +halbtc8192e1ant_LpsRpwm( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u1Byte lpsVal, + IN u1Byte rpwmVal + ) +{ + BOOLEAN bForceExecPwrCmd=FALSE; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set lps/rpwm=0x%x/0x%x \n", + (bForceExec? "force to":""), lpsVal, rpwmVal)); + pCoexDm->curLps = lpsVal; + pCoexDm->curRpwm = rpwmVal; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preLps/curLps=0x%x/0x%x, preRpwm/curRpwm=0x%x/0x%x!!\n", + pCoexDm->preLps, pCoexDm->curLps, pCoexDm->preRpwm, pCoexDm->curRpwm)); + + if( (pCoexDm->preLps == pCoexDm->curLps) && + (pCoexDm->preRpwm == pCoexDm->curRpwm) ) + { + return; + } + } + halbtc8192e1ant_SetLpsRpwm(pBtCoexist, lpsVal, rpwmVal); + + pCoexDm->preLps = pCoexDm->curLps; + pCoexDm->preRpwm = pCoexDm->curRpwm; +} + +VOID +halbtc8192e1ant_SwMechanism1( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bShrinkRxLPF, + IN BOOLEAN bLowPenaltyRA, + IN BOOLEAN limited_dig, + IN BOOLEAN bBTLNAConstrain + ) +{ + //halbtc8192e1ant_RfShrink(pBtCoexist, NORMAL_EXEC, bShrinkRxLPF); + //halbtc8192e1ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, bLowPenaltyRA); + + //no limited DIG + //halbtc8192e1ant_SetBtLnaConstrain(pBtCoexist, NORMAL_EXEC, bBTLNAConstrain); +} + +VOID +halbtc8192e1ant_SwMechanism2( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bAGCTableShift, + IN BOOLEAN bADCBackOff, + IN BOOLEAN bSWDACSwing, + IN u4Byte dacSwingLvl + ) +{ + //halbtc8192e1ant_AgcTable(pBtCoexist, NORMAL_EXEC, bAGCTableShift); + //halbtc8192e1ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, bADCBackOff); + //halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, bSWDACSwing, dacSwingLvl); +} + +VOID +halbtc8192e1ant_PsTdma( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bTurnOn, + IN u1Byte type + ) +{ + BOOLEAN bTurnOnByCnt=FALSE; + u1Byte psTdmaTypeByCnt=0, rssiAdjustVal=0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n", + (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type)); + pCoexDm->bCurPsTdmaOn = bTurnOn; + pCoexDm->curPsTdma = type; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + pCoexDm->prePsTdma, pCoexDm->curPsTdma)); + + if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) && + (pCoexDm->prePsTdma == pCoexDm->curPsTdma) ) + return; + } + if(bTurnOn) + { + switch(type) + { + default: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x2c, 0x03, 0x10, 0x50); + break; + case 1: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x2c, 0x03, 0x10, 0x50); + rssiAdjustVal = 11; + break; + case 2: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x25, 0x03, 0x10, 0x50); + rssiAdjustVal = 14; + break; + case 3: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x40); + break; + case 4: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0); + rssiAdjustVal = 17; + break; + case 5: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x61, 0x15, 0x3, 0x31, 0x0); + break; + case 6: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0x3, 0x0, 0x0); + break; + case 7: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xc, 0x5, 0x0, 0x0); + break; + case 8: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0); + break; + case 9: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x1e, 0x03, 0x10, 0x50); + rssiAdjustVal = 18; + break; + case 10: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0xa, 0x0, 0x40); + break; + case 11: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x12, 0x03, 0x10, 0x50); + rssiAdjustVal = 20; + break; + case 12: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xeb, 0xa, 0x3, 0x31, 0x18); + break; + + case 15: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0xa, 0x3, 0x8, 0x0); + break; + case 16: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x10, 0x0); + rssiAdjustVal = 18; + break; + + case 18: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x25, 0x3, 0x10, 0x0); + rssiAdjustVal = 14; + break; + + case 20: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0x25, 0x25, 0x0, 0x0); + break; + case 21: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x20, 0x3, 0x10, 0x40); + break; + case 22: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x13, 0x8, 0x8, 0x0, 0x40); + break; + case 23: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x18); + rssiAdjustVal = 22; + break; + case 24: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x15, 0x3, 0x31, 0x18); + rssiAdjustVal = 22; + break; + case 25: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18); + rssiAdjustVal = 22; + break; + case 26: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0x3, 0x31, 0x18); + rssiAdjustVal = 22; + break; + case 27: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x3, 0x31, 0x98); + rssiAdjustVal = 22; + break; + case 28: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x69, 0x25, 0x3, 0x31, 0x0); + break; + case 29: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xab, 0x1a, 0x1a, 0x1, 0x10); + break; + case 30: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x93, 0x15, 0x3, 0x14, 0x0); + break; + case 31: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x1a, 0x1a, 0, 0x58); + break; + case 32: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xab, 0xa, 0x3, 0x31, 0x90); + break; + case 33: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xa3, 0x25, 0x3, 0x30, 0x90); + break; + case 34: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x1a, 0x1a, 0x0, 0x10); + break; + case 35: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x0, 0x10); + break; + case 36: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xd3, 0x12, 0x3, 0x14, 0x50); + break; + case 37: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x53, 0x25, 0x3, 0x10, 0x50); + break; + case 38: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x90); + break; + } + } + else + { + // disable PS tdma + switch(type) + { + case 8: //0x778 = 1, ant2PTA + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x8, 0x0, 0x0, 0x0, 0x0); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x4); + break; + case 0: //0x778 = 1, ant2BT + default: + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0); + mdelay(5); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20); + break; + case 9: //0x778 = 1, ant2WIFI + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x4); + break; + case 10: //0x778 = 3, ant2BT + halbtc8192e1ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0); + mdelay(5); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20); + break; + } + } + rssiAdjustVal =0; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, &rssiAdjustVal); + + // update pre state + pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn; + pCoexDm->prePsTdma = pCoexDm->curPsTdma; +} + +VOID +halbtc8192e1ant_SetSwitchSsType( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte ssType + ) +{ + u1Byte mimoPs=BTC_MIMO_PS_DYNAMIC; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], REAL set SS Type = %d\n", ssType)); + + if(ssType == 1) + { + halbtc8192e1ant_Updatera_mask(pBtCoexist, FORCE_EXEC, BTC_RATE_DISABLE, 0xfff00000); // disable 2ss + halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0); + // switch ofdm path + pBtCoexist->btc_write_1byte(pBtCoexist, 0xc04, 0x11); + pBtCoexist->btc_write_1byte(pBtCoexist, 0xd04, 0x1); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x90c, 0x81111111); + // switch cck patch + pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0xe77, 0x4, 0x1); + pBtCoexist->btc_write_1byte(pBtCoexist, 0xa07, 0x81); + mimoPs=BTC_MIMO_PS_STATIC; + } + else if(ssType == 2) + { + halbtc8192e1ant_Updatera_mask(pBtCoexist, FORCE_EXEC, BTC_RATE_ENABLE, 0xfff00000); // enable 2ss + halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 8); + pBtCoexist->btc_write_1byte(pBtCoexist, 0xc04, 0x33); + pBtCoexist->btc_write_1byte(pBtCoexist, 0xd04, 0x3); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x90c, 0x81121313); + pBtCoexist->btc_write_1byte_bitmask(pBtCoexist, 0xe77, 0x4, 0x0); + pBtCoexist->btc_write_1byte(pBtCoexist, 0xa07, 0x41); + mimoPs=BTC_MIMO_PS_DYNAMIC; + } + + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimoPs); // set rx 1ss or 2ss +} + +VOID +halbtc8192e1ant_SwitchSsType( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u1Byte newSsType + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], %s Switch SS Type = %d\n", + (bForceExec? "force to":""), newSsType)); + pCoexDm->curSsType = newSsType; + + if(!bForceExec) + { + if(pCoexDm->preSsType == pCoexDm->curSsType) + return; + } + halbtc8192e1ant_SetSwitchSsType(pBtCoexist, pCoexDm->curSsType); + + pCoexDm->preSsType = pCoexDm->curSsType; +} + +VOID +halbtc8192e1ant_CoexAllOff( + IN PBTC_COEXIST pBtCoexist + ) +{ + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + + // sw all off + halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE); + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + + + // hw all off + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); +} + +BOOLEAN +halbtc8192e1ant_IsCommonAction( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bCommon=FALSE, bWifiConnected=FALSE, bWifiBusy=FALSE; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy); + + if(!bWifiConnected && + BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n")); + halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE); + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + + bCommon = true; + } + else if(bWifiConnected && + (BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT non connected-idle!!\n")); + halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE); + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + + bCommon = true; + } + else if(!bWifiConnected && + (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT connected-idle!!\n")); + halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE); + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + + bCommon = true; + } + else if(bWifiConnected && + (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi connected + BT connected-idle!!\n")); + halbtc8192e1ant_SwMechanism1(pBtCoexist,true,true,true,true); + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + + bCommon = true; + } + else if(!bWifiConnected && + (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non connected-idle + BT Busy!!\n")); + halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE); + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + + bCommon = true; + } + else + { + halbtc8192e1ant_SwMechanism1(pBtCoexist,true,true,true,true); + + bCommon = FALSE; + } + + return bCommon; +} + + +VOID +halbtc8192e1ant_TdmaDurationAdjustForAcl( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte wifiStatus + ) +{ + static s4Byte up,dn,m,n,WaitCount; + s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration + u1Byte retryCount=0, btInfoExt; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjustForAcl()\n")); + + if( (BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) || + (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) || + (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus) ) + { + if( pCoexDm->curPsTdma != 1 && + pCoexDm->curPsTdma != 2 && + pCoexDm->curPsTdma != 3 && + pCoexDm->curPsTdma != 9 ) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + + up = 0; + dn = 0; + m = 1; + n= 3; + result = 0; + WaitCount = 0; + } + return; + } + + if(!pCoexDm->bAutoTdmaAdjust) + { + pCoexDm->bAutoTdmaAdjust = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n")); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + //============ + up = 0; + dn = 0; + m = 1; + n= 3; + result = 0; + WaitCount = 0; + } + else + { + //accquire the BT TRx retry count from BT_Info byte2 + retryCount = pCoexSta->btRetryCnt; + btInfoExt = pCoexSta->btInfoExt; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n", + up, dn, m, n, WaitCount)); + result = 0; + WaitCount++; + + if(retryCount == 0) // no retry in the last 2-second duration + { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if(up >= n) // if s n 2 retry count0, hռeWiFi duration + { + WaitCount = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n")); + } + } + else if (retryCount <= 3) // <=3 retry in the last 2-second duration + { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) // if s 2 2 retry count< 3, hկWiFi duration + { + if (WaitCount <= 2) + m++; // קK@blevelӦ^ + else + m = 1; + + if ( m >= 20) //m ̤j = 20 ' ̤j120 recheckO_վ WiFi duration. + m = 20; + + n = 3*m; + up = 0; + dn = 0; + WaitCount = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n")); + } + } + else //retry count > 3, un1 retry count > 3, hկWiFi duration + { + if (WaitCount == 1) + m++; // קK@blevelӦ^ + else + m = 1; + + if ( m >= 20) //m ̤j = 20 ' ̤j120 recheckO_վ WiFi duration. + m = 20; + + n = 3*m; + up = 0; + dn = 0; + WaitCount = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n")); + } + + if(result == -1) + { + if( (BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(btInfoExt)) && + ((pCoexDm->curPsTdma == 1) ||(pCoexDm->curPsTdma == 2)) ) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + else if(pCoexDm->curPsTdma == 1) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + else if(pCoexDm->curPsTdma == 9) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + } + else if(result == 1) + { + if( (BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(btInfoExt)) && + ((pCoexDm->curPsTdma == 1) ||(pCoexDm->curPsTdma == 2)) ) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + else if(pCoexDm->curPsTdma == 9) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1); + pCoexDm->psTdmaDuAdjType = 1; + } + } + + if( pCoexDm->curPsTdma != 1 && + pCoexDm->curPsTdma != 2 && + pCoexDm->curPsTdma != 9 && + pCoexDm->curPsTdma != 11 ) + { + // recover to previous adjust type + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType); + } + } +} + +u1Byte +halbtc8192e1ant_PsTdmaTypeByWifiRssi( + IN s4Byte wifiRssi, + IN s4Byte preWifiRssi, + IN u1Byte wifiRssiThresh + ) +{ + u1Byte psTdmaType=0; + + if(wifiRssi > preWifiRssi) + { + if(wifiRssi > (wifiRssiThresh+5)) + { + psTdmaType = 26; + } + else + { + psTdmaType = 25; + } + } + else + { + if(wifiRssi > wifiRssiThresh) + { + psTdmaType = 26; + } + else + { + psTdmaType = 25; + } + } + + return psTdmaType; +} + +VOID +halbtc8192e1ant_PsTdmaCheckForPowerSaveState( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bNewPsState + ) +{ + u1Byte lpsMode=0x0; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_LPS_MODE, &lpsMode); + + if(lpsMode) // already under LPS state + { + if(bNewPsState) + { + // keep state under LPS, do nothing. + } + else + { + // will leave LPS state, turn off psTdma first + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + } + } + else // NO PS state + { + if(bNewPsState) + { + // will enter LPS state, turn off psTdma first + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + } + else + { + // keep state under NO PS state, do nothing. + } + } +} + +VOID +halbtc8192e1ant_PowerSaveState( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte psType, + IN u1Byte lpsVal, + IN u1Byte rpwmVal + ) +{ + BOOLEAN bLowPwrDisable=FALSE; + + switch(psType) + { + case BTC_PS_WIFI_NATIVE: + // recover to original 32k low power setting + bLowPwrDisable = FALSE; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable); + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + break; + case BTC_PS_LPS_ON: + halbtc8192e1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, true); + halbtc8192e1ant_LpsRpwm(pBtCoexist, NORMAL_EXEC, lpsVal, rpwmVal); + // when coex force to enter LPS, do not enter 32k low power. + bLowPwrDisable = true; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable); + // power save must executed before psTdma. + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_ENTER_LPS, NULL); + break; + case BTC_PS_LPS_OFF: + halbtc8192e1ant_PsTdmaCheckForPowerSaveState(pBtCoexist, FALSE); + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + break; + default: + break; + } +} + + +VOID +halbtc8192e1ant_ActionWifiOnly( + IN PBTC_COEXIST pBtCoexist + ) +{ + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); +} + +VOID +halbtc8192e1ant_MonitorBtEnableDisable( + IN PBTC_COEXIST pBtCoexist + ) +{ + static BOOLEAN bPreBtDisabled=FALSE; + static u4Byte btDisableCnt=0; + BOOLEAN bBtActive=true, bBtDisabled=FALSE; + + // This function check if bt is disabled + + if( pCoexSta->highPriorityTx == 0 && + pCoexSta->highPriorityRx == 0 && + pCoexSta->lowPriorityTx == 0 && + pCoexSta->lowPriorityRx == 0) + { + bBtActive = FALSE; + } + if( pCoexSta->highPriorityTx == 0xffff && + pCoexSta->highPriorityRx == 0xffff && + pCoexSta->lowPriorityTx == 0xffff && + pCoexSta->lowPriorityRx == 0xffff) + { + bBtActive = FALSE; + } + if(bBtActive) + { + btDisableCnt = 0; + bBtDisabled = FALSE; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n")); + } + else + { + btDisableCnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n", + btDisableCnt)); + if(btDisableCnt >= 2) + { + bBtDisabled = true; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n")); + halbtc8192e1ant_ActionWifiOnly(pBtCoexist); + } + } + if(bPreBtDisabled != bBtDisabled) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n", + (bPreBtDisabled ? "disabled":"enabled"), + (bBtDisabled ? "disabled":"enabled"))); + bPreBtDisabled = bBtDisabled; + if(!bBtDisabled) + { + } + else + { + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + } + } +} + +//============================================= +// +// Software Coex Mechanism start +// +//============================================= + +// SCO only or SCO+PAN(HS) +VOID +halbtc8192e1ant_ActionSco( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState; + u4Byte wifiBw; + + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + + +VOID +halbtc8192e1ant_ActionHid( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState; + u4Byte wifiBw; + + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,FALSE,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + +//A2DP only / PAN(EDR) only/ A2DP+PAN(HS) +VOID +halbtc8192e1ant_ActionA2dp( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState; + u4Byte wifiBw; + + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + +VOID +halbtc8192e1ant_ActionA2dpPanHs( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + +VOID +halbtc8192e1ant_ActionPanEdr( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState; + u4Byte wifiBw; + + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + + +//PAN(HS) only +VOID +halbtc8192e1ant_ActionPanHs( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState; + u4Byte wifiBw; + + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + +//PAN(EDR)+A2DP +VOID +halbtc8192e1ant_ActionPanEdrA2dp( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + +VOID +halbtc8192e1ant_ActionPanEdrHid( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState; + u4Byte wifiBw; + + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + +// HID+A2DP+PAN(EDR) +VOID +halbtc8192e1ant_ActionHidA2dpPanEdr( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + +VOID +halbtc8192e1ant_ActionHidA2dp( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + wifiRssiState = halbtc8192e1ant_WifiRssiState(pBtCoexist, 0, 2, 25, 0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + + if(BTC_WIFI_BW_HT40 == wifiBw) + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,true,FALSE,0x18); + } + } + else + { + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,true,true,FALSE,0x18); + } + else + { + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + } + } +} + +//============================================= +// +// Non-Software Coex Mechanism start +// +//============================================= +VOID +halbtc8192e1ant_ActionBtInquiry( + IN PBTC_COEXIST pBtCoexist + ) +{ + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE; + + // Note: + // Do not do DacSwing here, use original setting. + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + if(bBtHsOn) + return; + + if(!bWifiConnected) + { + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); + } + else if( (pBtLinkInfo->bScoExist) || + (pBtLinkInfo->bHidOnly) ) + { + // SCO/HID-only busy + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 32); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 1); + } + else + { + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0); + + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 30); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); + } +} + +VOID +halbtc8192e1ant_ActionBtScoHidOnlyBusy( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte wifiStatus + ) +{ + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + u1Byte btRssiState=BTC_RSSI_STATE_HIGH; + + if(BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); + } + else + { + if(pBtLinkInfo->bHidOnly) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2); + } + else + { + // dec bt power for diff level + btRssiState = halbtc8192e1ant_BtRssiState(3, 34, 42); + if( (btRssiState == BTC_RSSI_STATE_LOW) || + (btRssiState == BTC_RSSI_STATE_STAY_LOW) ) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + } + else if( (btRssiState == BTC_RSSI_STATE_MEDIUM) || + (btRssiState == BTC_RSSI_STATE_STAY_MEDIUM) ) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 2); + } + else if( (btRssiState == BTC_RSSI_STATE_HIGH) || + (btRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 6); + } + + // sw dacSwing + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 0xc); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 7); + } + } +} + +VOID +halbtc8192e1ant_ActionHs( + IN PBTC_COEXIST pBtCoexist + ) +{ + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action for HS!!!\n")); + + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + if(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) + { + // error, should not be here + pCoexDm->errorCondition = 1; + } + else if(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 6); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 10); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); + } + else if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && + !pBtCoexist->bt_link_info.bHidOnly) + { + if(pCoexDm->curSsType == 1) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, true, 6); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 10); + //halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 38); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); + } + } + else + { + halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } +} + +VOID +halbtc8192e1ant_ActionWifiConnectedBtAclBusy( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte wifiStatus + ) +{ + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + + if(pBtLinkInfo->bHidOnly) + { + halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist, wifiStatus); + pCoexDm->bAutoTdmaAdjust = FALSE; + return; + } + + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + if( (pBtLinkInfo->bA2dpOnly) || + (pBtLinkInfo->bHidExist&&pBtLinkInfo->bA2dpExist) ) + { + halbtc8192e1ant_TdmaDurationAdjustForAcl(pBtCoexist, wifiStatus); + } + else if( (pBtLinkInfo->bPanOnly) || + (pBtLinkInfo->bHidExist&&pBtLinkInfo->bPanExist) ) + { + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->bAutoTdmaAdjust = FALSE; + } + else + { + if( (BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == wifiStatus) || + (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifiStatus) || + (BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifiStatus) ) + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + else + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->bAutoTdmaAdjust = FALSE; + } + + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 1); +} + + +VOID +halbtc8192e1ant_ActionWifiNotConnected( + IN PBTC_COEXIST pBtCoexist + ) +{ + // power save state + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); +} + +VOID +halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan( + IN PBTC_COEXIST pBtCoexist + ) +{ + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 0); +} + +VOID +halbtc8192e1ant_ActionWifiConnectedScan( + IN PBTC_COEXIST pBtCoexist + ) +{ + // power save state + if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly) + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0); + else + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) + { + halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN); + } + else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) || + (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) ) + { + halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN); + } + else + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2); + } +} + + +VOID +halbtc8192e1ant_ActionWifiConnectedSpecialPacket( + IN PBTC_COEXIST pBtCoexist + ) +{ + // power save state + if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly) + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0); + else + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) + { + halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT); + } + else + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2); + } +} + +VOID +halbtc8192e1ant_ActionWifiConnected( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bWifiConnected=FALSE, bWifiBusy=FALSE; + BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE; + BOOLEAN bUnder4way=FALSE; + u4Byte wifiBw; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect()===>\n")); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + if(!bWifiConnected) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi not connected<===\n")); + return; + } + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, &bUnder4way); + if(bUnder4way) + { + halbtc8192e1ant_ActionWifiConnectedSpecialPacket(pBtCoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n")); + return; + } + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + if(bScan || bLink || bRoam) + { + halbtc8192e1ant_ActionWifiConnectedScan(pBtCoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n")); + return; + } + + // power save state + if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus && !pBtCoexist->bt_link_info.bHidOnly) + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_LPS_ON, 0x50, 0x0); + else + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy); + if(!bWifiBusy) + { + if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) + { + halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE); + } + else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) || + (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) ) + { + halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE); + } + else + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2); + } + } + else + { + if(BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2); + } + else if(BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2); + } + else if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) + { + halbtc8192e1ant_ActionWifiConnectedBtAclBusy(pBtCoexist, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } + else if( (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) || + (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) ) + { + halbtc8192e1ant_ActionBtScoHidOnlyBusy(pBtCoexist, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } + else + { + halbtc8192e1ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, 0); + halbtc8192e1ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, NORMAL_EXEC, 2); + } + } +} + +VOID +halbtc8192e1ant_RunSwCoexistMechanism( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bWifiUnder5G=FALSE, bWifiBusy=FALSE, bWifiConnected=FALSE; + u1Byte btInfoOriginal=0, btRetryCnt=0; + u1Byte algorithm=0; + + return; + + algorithm = halbtc8192e1ant_ActionAlgorithm(pBtCoexist); + pCoexDm->curAlgorithm = algorithm; + + if(halbtc8192e1ant_IsCommonAction(pBtCoexist)) + { + } + else + { + switch(pCoexDm->curAlgorithm) + { + case BT_8192E_1ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = SCO.\n")); + halbtc8192e1ant_ActionSco(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID.\n")); + halbtc8192e1ant_ActionHid(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP.\n")); + halbtc8192e1ant_ActionA2dp(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = A2DP+PAN(HS).\n")); + halbtc8192e1ant_ActionA2dpPanHs(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR).\n")); + halbtc8192e1ant_ActionPanEdr(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HS mode.\n")); + halbtc8192e1ant_ActionPanHs(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN+A2DP.\n")); + halbtc8192e1ant_ActionPanEdrA2dp(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = PAN(EDR)+HID.\n")); + halbtc8192e1ant_ActionPanEdrHid(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP+PAN.\n")); + halbtc8192e1ant_ActionHidA2dpPanEdr(pBtCoexist); + break; + case BT_8192E_1ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = HID+A2DP.\n")); + halbtc8192e1ant_ActionHidA2dp(pBtCoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action algorithm = coexist All Off!!\n")); + halbtc8192e1ant_CoexAllOff(pBtCoexist); + break; + } + pCoexDm->preAlgorithm = pCoexDm->curAlgorithm; + } +} + +VOID +halbtc8192e1ant_RunCoexistMechanism( + IN PBTC_COEXIST pBtCoexist + ) +{ + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism()===>\n")); + + if(pBtCoexist->manual_control) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n")); + return; + } + + if(pBtCoexist->bStopCoexDm) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n")); + return; + } + + if(pCoexSta->bUnderIps) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is under IPS !!!\n")); + return; + } + + halbtc8192e1ant_RunSwCoexistMechanism(pBtCoexist); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + if(pCoexSta->bC2hBtInquiryPage) + { + halbtc8192e1ant_ActionBtInquiry(pBtCoexist); + return; + } + + // 1ss or 2ss + if(pBtLinkInfo->bScoExist) + { + halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 1); + } + else if(bBtHsOn) + { + if(pBtLinkInfo->bHidOnly) + halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 2); + else + halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 1); + } + else + halbtc8192e1ant_SwitchSsType(pBtCoexist, NORMAL_EXEC, 2); + + if(bBtHsOn) + { + halbtc8192e1ant_ActionHs(pBtCoexist); + return; + } + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + if(!bWifiConnected) + { + BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], wifi is non connected-idle !!!\n")); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + + if(bScan || bLink || bRoam) + halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist); + else + halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist); + } + else + { + halbtc8192e1ant_ActionWifiConnected(pBtCoexist); + } +} + +VOID +halbtc8192e1ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ) +{ + // force to reset coex mechanism + halbtc8192e1ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 6); + halbtc8192e1ant_DecBtPwr(pBtCoexist, FORCE_EXEC, 0); + + // sw all off + halbtc8192e1ant_SwMechanism1(pBtCoexist,FALSE,FALSE,FALSE,FALSE); + halbtc8192e1ant_SwMechanism2(pBtCoexist,FALSE,FALSE,FALSE,0x18); + + halbtc8192e1ant_SwitchSsType(pBtCoexist, FORCE_EXEC, 2); + + halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 8); + halbtc8192e1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0); +} + +//============================================================ +// work around function start with wa_halbtc8192e1ant_ +//============================================================ +//============================================================ +// extern function start with EXhalbtc8192e1ant_ +//============================================================ +VOID +EXhalbtc8192e1ant_InitHwConfig( + IN PBTC_COEXIST pBtCoexist + ) +{ + u4Byte u4Tmp=0; + u16 u2Tmp=0; + u1Byte u1Tmp=0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 1Ant Init HW Config!!\n")); + + // backup rf 0x1e value + pCoexDm->btRf0x1eBackup = + pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff); + + // antenna sw ctrl to bt + pBtCoexist->btc_write_1byte(pBtCoexist, 0x4f, 0x6); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x944, 0x24); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x930, 0x700700); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x92c, 0x20); + if(pBtCoexist->chipInterface == BTC_INTF_USB) + pBtCoexist->btc_write_4byte(pBtCoexist, 0x64, 0x30430004); + else + pBtCoexist->btc_write_4byte(pBtCoexist, 0x64, 0x30030004); + + halbtc8192e1ant_CoexTableWithType(pBtCoexist, FORCE_EXEC, 0); + + // antenna switch control parameter + pBtCoexist->btc_write_4byte(pBtCoexist, 0x858, 0x55555555); + + // coex parameters + pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x1); + // 0x790[5:0]=0x5 + u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x790); + u1Tmp &= 0xc0; + u1Tmp |= 0x5; + pBtCoexist->btc_write_1byte(pBtCoexist, 0x790, u1Tmp); + + // enable counter statistics + pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4); + + // enable PTA + pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20); + // enable mailbox interface + u2Tmp = pBtCoexist->btc_read_2byte(pBtCoexist, 0x40); + u2Tmp |= BIT9; + pBtCoexist->btc_write_2byte(pBtCoexist, 0x40, u2Tmp); + + // enable PTA I2C mailbox + u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x101); + u1Tmp |= BIT4; + pBtCoexist->btc_write_1byte(pBtCoexist, 0x101, u1Tmp); + + // enable bt clock when wifi is disabled. + u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x93); + u1Tmp |= BIT0; + pBtCoexist->btc_write_1byte(pBtCoexist, 0x93, u1Tmp); + // enable bt clock when suspend. + u1Tmp = pBtCoexist->btc_read_1byte(pBtCoexist, 0x7); + u1Tmp |= BIT0; + pBtCoexist->btc_write_1byte(pBtCoexist, 0x7, u1Tmp); +} + +VOID +EXhalbtc8192e1ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n")); + + pBtCoexist->bStopCoexDm = FALSE; + + halbtc8192e1ant_InitCoexDm(pBtCoexist); +} + +VOID +EXhalbtc8192e1ant_DisplayCoexInfo( + IN PBTC_COEXIST pBtCoexist + ) +{ + struct btc_board_info * pBoardInfo=&pBtCoexist->board_info; + PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info; + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + pu1Byte cliBuf=pBtCoexist->cli_buf; + u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0; + u4Byte u4Tmp[4]; + BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE; + BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE; + s4Byte wifiRssi=0, btHsRssi=0; + u4Byte wifiBw, wifiTrafficDir; + u1Byte wifiDot11Chnl, wifiHsChnl; + u4Byte fwVer=0, btPatchVer=0; + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============"); + CL_PRINTF(cliBuf); + + if(pBtCoexist->manual_control) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Under Manual Control]============"); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n =========================================="); + CL_PRINTF(cliBuf); + } + if(pBtCoexist->bStopCoexDm) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[Coex is STOPPED]============"); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n =========================================="); + CL_PRINTF(cliBuf); + } + + if(!pBoardInfo->bt_exist) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); + CL_PRINTF(cliBuf); + return; + } + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \ + pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \ + ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", "CoexVer/ FwVer/ PatchVer", \ + GLCoexVerDate8192e1Ant, GLCoexVer8192e1Ant, fwVer, btPatchVer, btPatchVer); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \ + wifiDot11Chnl, wifiHsChnl, bBtHsOn); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \ + pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1], + pCoexDm->wifiChnlInfo[2]); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \ + wifiRssi, btHsRssi); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \ + bLink, bRoam, bScan); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \ + (bWifiUnder5G? "5G":"2.4G"), + ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))), + ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink"))); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \ + ((pBtCoexist->btInfo.bBtDisabled)? ("disabled"): ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE == pCoexDm->btStatus)? "non-connected idle": + ( (BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy")))), + pCoexSta->btRssi, pCoexSta->btRetryCnt); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \ + pBtLinkInfo->bScoExist, pBtLinkInfo->bHidExist, pBtLinkInfo->bPanExist, pBtLinkInfo->bA2dpExist); + CL_PRINTF(cliBuf); + pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO); + + btInfoExt = pCoexSta->btInfoExt; + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \ + (btInfoExt&BIT0)? "Basic rate":"EDR rate"); + CL_PRINTF(cliBuf); + + for(i=0; ibtInfoC2hCnt[i]) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8192e1Ant[i], \ + pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1], + pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3], + pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5], + pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]); + CL_PRINTF(cliBuf); + } + } + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s, (0x%x/0x%x)", "PS state, IPS/LPS, (lps/rpwm)", \ + ((pCoexSta->bUnderIps? "IPS ON":"IPS OFF")), + ((pCoexSta->bUnderLps? "LPS ON":"LPS OFF")), + pBtCoexist->btInfo.lps1Ant, + pBtCoexist->btInfo.rpwm_1ant); + CL_PRINTF(cliBuf); + pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "SS Type", \ + pCoexDm->curSsType); + CL_PRINTF(cliBuf); + + if(!pBtCoexist->manual_control) + { + // Sw mechanism + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============"); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d ", "SM1[ShRf/ LpRA/ LimDig/ btLna]", \ + pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig, pCoexDm->bCurBtLnaConstrain); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \ + pCoexDm->bCurAgcTableEn, pCoexDm->bCurAdcBackOff, pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s/ %d ", "DelBA/ BtCtrlAgg/ AggSize", \ + (pBtCoexist->btInfo.reject_agg_pkt? "Yes":"No"), (pBtCoexist->btInfo.b_bt_ctrl_agg_buf_size? "Yes":"No"), + pBtCoexist->btInfo.aggBufSize); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Rate Mask", \ + pBtCoexist->btInfo.ra_mask); + CL_PRINTF(cliBuf); + + // Fw mechanism + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============"); + CL_PRINTF(cliBuf); + + psTdmaCase = pCoexDm->curPsTdma; + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", "PS TDMA", \ + pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1], + pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3], + pCoexDm->psTdmaPara[4], psTdmaCase, pCoexDm->bAutoTdmaAdjust); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Latest error condition(should be 0)", \ + pCoexDm->errorCondition); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwrLvl/ IgnWlanAct", \ + pCoexDm->curBtDecPwrLvl, pCoexDm->bCurIgnoreWlanAct); + CL_PRINTF(cliBuf); + } + + // Hw setting + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============"); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \ + pCoexDm->btRf0x1eBackup); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc04); + u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xd04); + u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x90c); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0xc04/ 0xd04/ 0x90c", \ + u4Tmp[0], u4Tmp[1], u4Tmp[2]); + CL_PRINTF(cliBuf); + + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", \ + u1Tmp[0]); + CL_PRINTF(cliBuf); + + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x92c); + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x930); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x92c/ 0x930", \ + (u1Tmp[0]), u4Tmp[0]); + CL_PRINTF(cliBuf); + + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40); + u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4f); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x40/ 0x4f", \ + u1Tmp[0], u1Tmp[1]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550); + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \ + u4Tmp[0], u1Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \ + u4Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0); + u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4); + u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8); + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \ + u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770(hp rx[31:16]/tx[15:0])", \ + pCoexSta->highPriorityRx, pCoexSta->highPriorityTx); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \ + pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx); + CL_PRINTF(cliBuf); +#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 1) + halbtc8192e1ant_MonitorBtCtr(pBtCoexist); +#endif + + pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + + +VOID +EXhalbtc8192e1ant_IpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + u4Byte u4Tmp=0; + + if(pBtCoexist->manual_control || pBtCoexist->bStopCoexDm) + return; + + if(BTC_IPS_ENTER == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n")); + pCoexSta->bUnderIps = true; + halbtc8192e1ant_CoexAllOff(pBtCoexist); + } + else if(BTC_IPS_LEAVE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n")); + pCoexSta->bUnderIps = FALSE; + } +} + +VOID +EXhalbtc8192e1ant_LpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(pBtCoexist->manual_control || pBtCoexist->bStopCoexDm) + return; + + if(BTC_LPS_ENABLE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n")); + pCoexSta->bUnderLps = true; + } + else if(BTC_LPS_DISABLE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n")); + pCoexSta->bUnderLps = FALSE; + } +} + +VOID +EXhalbtc8192e1ant_ScanNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE; + + if(pBtCoexist->manual_control || + pBtCoexist->bStopCoexDm || + pBtCoexist->btInfo.bBtDisabled ) + return; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + if(pCoexSta->bC2hBtInquiryPage) + { + halbtc8192e1ant_ActionBtInquiry(pBtCoexist); + return; + } + else if(bBtHsOn) + { + halbtc8192e1ant_ActionHs(pBtCoexist); + return; + } + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + if(BTC_SCAN_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n")); + if(!bWifiConnected) // non-connected scan + { + halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist); + } + else // wifi is connected + { + halbtc8192e1ant_ActionWifiConnectedScan(pBtCoexist); + } + } + else if(BTC_SCAN_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n")); + if(!bWifiConnected) // non-connected scan + { + halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist); + } + else + { + halbtc8192e1ant_ActionWifiConnected(pBtCoexist); + } + } +} + +VOID +EXhalbtc8192e1ant_ConnectNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE; + + if(pBtCoexist->manual_control || + pBtCoexist->bStopCoexDm || + pBtCoexist->btInfo.bBtDisabled ) + return; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + if(pCoexSta->bC2hBtInquiryPage) + { + halbtc8192e1ant_ActionBtInquiry(pBtCoexist); + return; + } + else if(bBtHsOn) + { + halbtc8192e1ant_ActionHs(pBtCoexist); + return; + } + + if(BTC_ASSOCIATE_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n")); + halbtc8192e1ant_ActionWifiNotConnectedAssoAuthScan(pBtCoexist); + } + else if(BTC_ASSOCIATE_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n")); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + if(!bWifiConnected) // non-connected scan + { + halbtc8192e1ant_ActionWifiNotConnected(pBtCoexist); + } + else + { + halbtc8192e1ant_ActionWifiConnected(pBtCoexist); + } + } +} + +VOID +EXhalbtc8192e1ant_MediaStatusNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + u1Byte H2C_Parameter[3] ={0}; + u4Byte wifiBw; + u1Byte wifiCentralChnl; + + if(pBtCoexist->manual_control || + pBtCoexist->bStopCoexDm || + pBtCoexist->btInfo.bBtDisabled ) + return; + + if(BTC_MEDIA_CONNECT == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n")); + } + else + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n")); + } + + // only 2.4G we need to inform bt the chnl mask + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl); + if( (BTC_MEDIA_CONNECT == type) && + (wifiCentralChnl <= 14) ) + { + H2C_Parameter[0] = 0x1; + H2C_Parameter[1] = wifiCentralChnl; + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + H2C_Parameter[2] = 0x30; + else + H2C_Parameter[2] = 0x20; + } + + pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0]; + pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1]; + pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x66=0x%x\n", + H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x66, 3, H2C_Parameter); +} + +VOID +EXhalbtc8192e1ant_SpecialPacketNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + BOOLEAN bBtHsOn=FALSE; + + if(pBtCoexist->manual_control || + pBtCoexist->bStopCoexDm || + pBtCoexist->btInfo.bBtDisabled ) + return; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + if(pCoexSta->bC2hBtInquiryPage) + { + halbtc8192e1ant_ActionBtInquiry(pBtCoexist); + return; + } + else if(bBtHsOn) + { + halbtc8192e1ant_ActionHs(pBtCoexist); + return; + } + + if( BTC_PACKET_DHCP == type || + BTC_PACKET_EAPOL == type ) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], special Packet(%d) notify\n", type)); + halbtc8192e1ant_ActionWifiConnectedSpecialPacket(pBtCoexist); + } +} + +VOID +EXhalbtc8192e1ant_BtInfoNotify( + IN PBTC_COEXIST pBtCoexist, + IN pu1Byte tmpBuf, + IN u1Byte length + ) +{ + PBTC_BT_LINK_INFO pBtLinkInfo=&pBtCoexist->bt_link_info; + u1Byte btInfo=0; + u1Byte i, rspSource=0; + static u4Byte setBtPsdMode=0; + BOOLEAN bBtBusy=FALSE, limited_dig=FALSE; + BOOLEAN bWifiConnected=FALSE; + BOOLEAN b_bt_ctrl_agg_buf_size=FALSE; + + pCoexSta->bC2hBtInfoReqSent = FALSE; + + rspSource = tmpBuf[0]&0xf; + if(rspSource >= BT_INFO_SRC_8192E_1ANT_MAX) + rspSource = BT_INFO_SRC_8192E_1ANT_WIFI_FW; + pCoexSta->btInfoC2hCnt[rspSource]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length)); + for(i=0; ibtInfoC2h[rspSource][i] = tmpBuf[i]; + if(i == 1) + btInfo = tmpBuf[i]; + if(i == length-1) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i])); + } + else + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i])); + } + } + + if(pBtCoexist->btInfo.bBtDisabled) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for BT is disabled <===\n")); + return; + } + + if(pBtCoexist->manual_control) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n")); + return; + } + if(pBtCoexist->bStopCoexDm) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), return for Coex STOPPED!!<===\n")); + return; + } + + if(BT_INFO_SRC_8192E_1ANT_WIFI_FW != rspSource) + { + pCoexSta->btRetryCnt = // [3:0] + pCoexSta->btInfoC2h[rspSource][2]&0xf; + + pCoexSta->btRssi = + pCoexSta->btInfoC2h[rspSource][3]*2+10; + + pCoexSta->btInfoExt = + pCoexSta->btInfoC2h[rspSource][4]; + + // Here we need to resend some wifi info to BT + // because bt is reset and loss of the info. + if( (pCoexSta->btInfoExt & BIT1) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n")); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + if(bWifiConnected) + { + EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_CONNECT); + } + else + { + EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT); + } + + setBtPsdMode = 0; + } + + // test-chip bt patch only rsp the status for BT_RSP, + // so temporary we consider the following only under BT_RSP + if(BT_INFO_SRC_8192E_1ANT_BT_RSP == rspSource) + { + if( (pCoexSta->btInfoExt & BIT3) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n")); + halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE); + } + else + { + // BT already NOT ignore Wlan active, do nothing here. + } +#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 0) + if( (pCoexSta->btInfoExt & BIT4) ) + { + // BT auto report already enabled, do nothing + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n")); + halbtc8192e1ant_BtAutoReport(pBtCoexist, FORCE_EXEC, true); + } +#endif + } + } + + // check BIT2 first ==> check if bt is under inquiry or page scan + if(btInfo & BT_INFO_8192E_1ANT_B_INQ_PAGE) + pCoexSta->bC2hBtInquiryPage = true; + else + pCoexSta->bC2hBtInquiryPage = FALSE; + + // set link exist status + if(!(btInfo&BT_INFO_8192E_1ANT_B_CONNECTION)) + { + pCoexSta->bBtLinkExist = FALSE; + pCoexSta->bPanExist = FALSE; + pCoexSta->bA2dpExist = FALSE; + pCoexSta->bHidExist = FALSE; + pCoexSta->bScoExist = FALSE; + } + else // connection exists + { + pCoexSta->bBtLinkExist = true; + if(btInfo & BT_INFO_8192E_1ANT_B_FTP) + pCoexSta->bPanExist = true; + else + pCoexSta->bPanExist = FALSE; + if(btInfo & BT_INFO_8192E_1ANT_B_A2DP) + pCoexSta->bA2dpExist = true; + else + pCoexSta->bA2dpExist = FALSE; + if(btInfo & BT_INFO_8192E_1ANT_B_HID) + pCoexSta->bHidExist = true; + else + pCoexSta->bHidExist = FALSE; + if(btInfo & BT_INFO_8192E_1ANT_B_SCO_ESCO) + pCoexSta->bScoExist = true; + else + pCoexSta->bScoExist = FALSE; + } + + halbtc8192e1ant_UpdateBtLinkInfo(pBtCoexist); + + if(!(btInfo&BT_INFO_8192E_1ANT_B_CONNECTION)) + { + pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-connected idle!!!\n")); + } + else if(btInfo == BT_INFO_8192E_1ANT_B_CONNECTION) // connection exists but no busy + { + pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt connected-idle!!!\n")); + } + else if((btInfo&BT_INFO_8192E_1ANT_B_SCO_ESCO) || + (btInfo&BT_INFO_8192E_1ANT_B_SCO_BUSY)) + { + pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt sco busy!!!\n")); + } + else if( (btInfo&BT_INFO_8192E_1ANT_B_ACL_BUSY) || + (btInfo&BT_INFO_8192E_1ANT_B_A2DP) || + (btInfo&BT_INFO_8192E_1ANT_B_FTP) ) + { + if(BT_8192E_1ANT_BT_STATUS_ACL_BUSY != pCoexDm->btStatus) + pCoexDm->bAutoTdmaAdjust = FALSE; + pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt acl busy!!!\n")); + } + else + { + pCoexDm->btStatus = BT_8192E_1ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], BtInfoNotify(), bt non-defined state!!!\n")); + } + + // ra mask check + if(pBtLinkInfo->bScoExist || pBtLinkInfo->bHidExist) + { + halbtc8192e1ant_Updatera_mask(pBtCoexist, NORMAL_EXEC, BTC_RATE_DISABLE, 0x00000003); // disable tx cck 1M/2M + } + else + { + halbtc8192e1ant_Updatera_mask(pBtCoexist, NORMAL_EXEC, BTC_RATE_ENABLE, 0x00000003); // enable tx cck 1M/2M + } + + if( (BT_8192E_1ANT_BT_STATUS_ACL_BUSY == pCoexDm->btStatus) || + (BT_8192E_1ANT_BT_STATUS_SCO_BUSY == pCoexDm->btStatus) || + (BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY == pCoexDm->btStatus) ) + { + bBtBusy = true; + limited_dig = true; + if(pBtLinkInfo->bHidExist) + b_bt_ctrl_agg_buf_size = true; + } + else + { + bBtBusy = FALSE; + limited_dig = FALSE; + } + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy); + + //============================================ + // Aggregation related setting + //============================================ + // if sco, reject AddBA + //pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, &bRejApAggPkt); + + // decide BT control aggregation buf size or not + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, &b_bt_ctrl_agg_buf_size); + // real update aggregation setting + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); + //============================================ + + pCoexDm->limited_dig = limited_dig; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + + halbtc8192e1ant_RunCoexistMechanism(pBtCoexist); +} + +VOID +EXhalbtc8192e1ant_StackOperationNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n")); + } + else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n")); + } +} + +VOID +EXhalbtc8192e1ant_HaltNotify( + IN PBTC_COEXIST pBtCoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n")); + + pBtCoexist->bStopCoexDm = true; + halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true); + + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + + halbtc8192e1ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x4f, 0xf); + + EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT); +} + +VOID +EXhalbtc8192e1ant_PnpNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte pnpState + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Pnp notify\n")); + + if(BTC_WIFI_PNP_SLEEP == pnpState) + { + pBtCoexist->bStopCoexDm = true; + halbtc8192e1ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true); + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); + } + else if(BTC_WIFI_PNP_WAKE_UP == pnpState) + { + + } +} + +VOID +EXhalbtc8192e1ant_Periodical( + IN PBTC_COEXIST pBtCoexist + ) +{ + static u1Byte disVerInfoCnt=0; + u4Byte fwVer=0, btPatchVer=0; + struct btc_board_info * pBoardInfo=&pBtCoexist->board_info; + PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], ==========================Periodical===========================\n")); + + if(disVerInfoCnt <= 5) + { + disVerInfoCnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n")); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", \ + pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num, pBoardInfo->btdm_ant_pos)); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], BT stack/ hci ext ver = %s / %d\n", \ + ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion)); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_BT_PATCH_VER, &btPatchVer); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_FW_VER, &fwVer); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", \ + GLCoexVerDate8192e1Ant, GLCoexVer8192e1Ant, fwVer, btPatchVer, btPatchVer)); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], ****************************************************************\n")); + } +#if(BT_AUTO_REPORT_ONLY_8192E_1ANT == 0) + halbtc8192e1ant_QueryBtInfo(pBtCoexist); + halbtc8192e1ant_MonitorBtCtr(pBtCoexist); + halbtc8192e1ant_MonitorBtEnableDisable(pBtCoexist); +#else + if( halbtc8192e1ant_IsWifiStatusChanged(pBtCoexist) || + pCoexDm->bAutoTdmaAdjust) + { + halbtc8192e1ant_RunCoexistMechanism(pBtCoexist); + } +#endif +} + +VOID +EXhalbtc8192e1ant_DbgControl( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte opCode, + IN u1Byte opLen, + IN pu1Byte pData + ) +{ + switch(opCode) + { + case BTC_DBG_SET_COEX_NORMAL: + pBtCoexist->manual_control = FALSE; + halbtc8192e1ant_InitCoexDm(pBtCoexist); + break; + case BTC_DBG_SET_COEX_WIFI_ONLY: + pBtCoexist->manual_control = true; + halbtc8192e1ant_PowerSaveState(pBtCoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0); + halbtc8192e1ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 9); + break; + case BTC_DBG_SET_COEX_BT_ONLY: + // todo + break; + default: + break; + } +} +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8192e1ant.h @@ -0,0 +1,226 @@ +//=========================================== +// The following is for 8192E_1ANT BT Co-exist definition +//=========================================== +#define BT_AUTO_REPORT_ONLY_8192E_1ANT 0 + +#define BT_INFO_8192E_1ANT_B_FTP BIT7 +#define BT_INFO_8192E_1ANT_B_A2DP BIT6 +#define BT_INFO_8192E_1ANT_B_HID BIT5 +#define BT_INFO_8192E_1ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8192E_1ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8192E_1ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8192E_1ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8192E_1ANT_B_CONNECTION BIT0 + +#define BT_INFO_8192E_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \ + (((_BT_INFO_EXT_&BIT0))? true:FALSE) + +#define BTC_RSSI_COEX_THRESH_TOL_8192E_1ANT 2 + +typedef enum _BT_INFO_SRC_8192E_1ANT{ + BT_INFO_SRC_8192E_1ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8192E_1ANT_BT_RSP = 0x1, + BT_INFO_SRC_8192E_1ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8192E_1ANT_MAX +}BT_INFO_SRC_8192E_1ANT,*PBT_INFO_SRC_8192E_1ANT; + +typedef enum _BT_8192E_1ANT_BT_STATUS{ + BT_8192E_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8192E_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8192E_1ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8192E_1ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8192E_1ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8192E_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8192E_1ANT_BT_STATUS_MAX +}BT_8192E_1ANT_BT_STATUS,*PBT_8192E_1ANT_BT_STATUS; + +typedef enum _BT_8192E_1ANT_WIFI_STATUS{ + BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8192E_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4, + BT_8192E_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5, + BT_8192E_1ANT_WIFI_STATUS_MAX +}BT_8192E_1ANT_WIFI_STATUS,*PBT_8192E_1ANT_WIFI_STATUS; + +typedef enum _BT_8192E_1ANT_COEX_ALGO{ + BT_8192E_1ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8192E_1ANT_COEX_ALGO_SCO = 0x1, + BT_8192E_1ANT_COEX_ALGO_HID = 0x2, + BT_8192E_1ANT_COEX_ALGO_A2DP = 0x3, + BT_8192E_1ANT_COEX_ALGO_A2DP_PANHS = 0x4, + BT_8192E_1ANT_COEX_ALGO_PANEDR = 0x5, + BT_8192E_1ANT_COEX_ALGO_PANHS = 0x6, + BT_8192E_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7, + BT_8192E_1ANT_COEX_ALGO_PANEDR_HID = 0x8, + BT_8192E_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9, + BT_8192E_1ANT_COEX_ALGO_HID_A2DP = 0xa, + BT_8192E_1ANT_COEX_ALGO_MAX = 0xb, +}BT_8192E_1ANT_COEX_ALGO,*PBT_8192E_1ANT_COEX_ALGO; + +typedef struct _COEX_DM_8192E_1ANT{ + // fw mechanism + u1Byte preBtDecPwrLvl; + u1Byte curBtDecPwrLvl; + BOOLEAN bPreBtLnaConstrain; + BOOLEAN bCurBtLnaConstrain; + u1Byte bPreBtPsdMode; + u1Byte bCurBtPsdMode; + u1Byte preFwDacSwingLvl; + u1Byte curFwDacSwingLvl; + BOOLEAN bCurIgnoreWlanAct; + BOOLEAN bPreIgnoreWlanAct; + u1Byte prePsTdma; + u1Byte curPsTdma; + u1Byte psTdmaPara[5]; + u1Byte psTdmaDuAdjType; + BOOLEAN bAutoTdmaAdjust; + BOOLEAN bPrePsTdmaOn; + BOOLEAN bCurPsTdmaOn; + BOOLEAN bPreBtAutoReport; + BOOLEAN bCurBtAutoReport; + u1Byte preLps; + u1Byte curLps; + u1Byte preRpwm; + u1Byte curRpwm; + + // sw mechanism + BOOLEAN bPreRfRxLpfShrink; + BOOLEAN bCurRfRxLpfShrink; + u4Byte btRf0x1eBackup; + BOOLEAN bPreLowPenaltyRa; + BOOLEAN bCurLowPenaltyRa; + BOOLEAN bPreDacSwingOn; + u4Byte preDacSwingLvl; + BOOLEAN bCurDacSwingOn; + u4Byte curDacSwingLvl; + BOOLEAN bPreAdcBackOff; + BOOLEAN bCurAdcBackOff; + BOOLEAN bPreAgcTableEn; + BOOLEAN bCurAgcTableEn; + u4Byte preVal0x6c0; + u4Byte curVal0x6c0; + u4Byte preVal0x6c4; + u4Byte curVal0x6c4; + u4Byte preVal0x6c8; + u4Byte curVal0x6c8; + u1Byte preVal0x6cc; + u1Byte curVal0x6cc; + BOOLEAN limited_dig; + + // algorithm related + u1Byte preAlgorithm; + u1Byte curAlgorithm; + u1Byte btStatus; + u1Byte wifiChnlInfo[3]; + + u1Byte preSsType; + u1Byte curSsType; + + u4Byte prera_mask; + u4Byte curra_mask; + + u1Byte errorCondition; +} COEX_DM_8192E_1ANT, *PCOEX_DM_8192E_1ANT; + +typedef struct _COEX_STA_8192E_1ANT{ + BOOLEAN bBtLinkExist; + BOOLEAN bScoExist; + BOOLEAN bA2dpExist; + BOOLEAN bHidExist; + BOOLEAN bPanExist; + + BOOLEAN bUnderLps; + BOOLEAN bUnderIps; + u4Byte highPriorityTx; + u4Byte highPriorityRx; + u4Byte lowPriorityTx; + u4Byte lowPriorityRx; + u1Byte btRssi; + u1Byte preBtRssiState; + u1Byte preWifiRssiState[4]; + BOOLEAN bC2hBtInfoReqSent; + u1Byte btInfoC2h[BT_INFO_SRC_8192E_1ANT_MAX][10]; + u4Byte btInfoC2hCnt[BT_INFO_SRC_8192E_1ANT_MAX]; + BOOLEAN bC2hBtInquiryPage; + u1Byte btRetryCnt; + u1Byte btInfoExt; +}COEX_STA_8192E_1ANT, *PCOEX_STA_8192E_1ANT; + +//=========================================== +// The following is interface which will notify coex module. +//=========================================== +VOID +EXhalbtc8192e1ant_InitHwConfig( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8192e1ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8192e1ant_IpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8192e1ant_LpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8192e1ant_ScanNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8192e1ant_ConnectNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8192e1ant_MediaStatusNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8192e1ant_SpecialPacketNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8192e1ant_BtInfoNotify( + IN PBTC_COEXIST pBtCoexist, + IN pu1Byte tmpBuf, + IN u1Byte length + ); +VOID +EXhalbtc8192e1ant_StackOperationNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8192e1ant_HaltNotify( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8192e1ant_PnpNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte pnpState + ); +VOID +EXhalbtc8192e1ant_Periodical( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8192e1ant_DisplayCoexInfo( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8192e1ant_DbgControl( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte opCode, + IN u1Byte opLen, + IN pu1Byte pData + ); --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.c @@ -0,0 +1,4242 @@ +/************************************************************** + * Description: + * + * This file is for RTL8192E Co-exist mechanism + * + * History + * 2012/11/15 Cosa first check in. + * + **************************************************************/ + +/************************************************************** + * include files + **************************************************************/ +#include "halbt_precomp.h" +#if 1 +/************************************************************** + * Global variables, these are static variables + **************************************************************/ +static struct coex_dm_8192e_2ant glcoex_dm_8192e_2ant; +static struct coex_dm_8192e_2ant *coex_dm = &glcoex_dm_8192e_2ant; +static struct coex_sta_8192e_2ant glcoex_sta_8192e_2ant; +static struct coex_sta_8192e_2ant *coex_sta = &glcoex_sta_8192e_2ant; + +const char *const GLBtInfoSrc8192e2Ant[]={ + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +u32 glcoex_ver_date_8192e_2ant = 20130902; +u32 glcoex_ver_8192e_2ant = 0x34; + +/************************************************************** + * local function proto type if needed + **************************************************************/ +/************************************************************** + * local function start with halbtc8192e2ant_ + **************************************************************/ +u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) +{ + int btrssi=0; + u8 btrssi_state = coex_sta->pre_bt_rssi_state; + + btrssi = coex_sta->bt_rssi; + + if (level_num == 2) { + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi pre state=LOW\n"); + if (btrssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + btrssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to High\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Low\n"); + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi pre state=HIGH\n"); + if (btrssi < rssi_thresh) { + btrssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Low\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi thresh error!!\n"); + return coex_sta->pre_bt_rssi_state; + } + + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi pre state=LOW\n"); + if(btrssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + btrssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Medium\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Low\n"); + } + } else if ((coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_STAY_MEDIUM)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi pre state=MEDIUM\n"); + if (btrssi >= (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + btrssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to High\n"); + } else if (btrssi < rssi_thresh) { + btrssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Low\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at Medium\n"); + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi pre state=HIGH\n"); + if (btrssi < rssi_thresh1) { + btrssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state switch to Medium\n"); + } else { + btrssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "BT Rssi state stay at High\n"); + } + } + } + + coex_sta->pre_bt_rssi_state = btrssi_state; + + return btrssi_state; +} + +u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist * btcoexist, u8 index, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) +{ + int wifirssi = 0; + u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index]; + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi); + + if (level_num == 2) { + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifirssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + wifirssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to High\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Low\n"); + } + } else { + if (wifirssi < rssi_thresh) { + wifirssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Low\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, + "wifi RSSI thresh error!!\n"); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifirssi >= (rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + wifirssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Medium\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Low\n"); + } + } else if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (wifirssi >= (rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) { + wifirssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to High\n"); + } else if (wifirssi < rssi_thresh) { + wifirssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Low\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at Medium\n"); + } + } else { + if (wifirssi < rssi_thresh1) { + wifirssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state switch to Medium\n"); + } else { + wifirssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "wifi RSSI state stay at High\n"); + } + } + } + + coex_sta->pre_wifi_rssi_state[index] = wifirssi_state; + + return wifirssi_state; +} + +void halbtc8192e2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist) +{ + static bool pre_bt_disabled = false; + static u32 bt_disable_cnt = 0; + bool bt_active = true, bt_disabled = false; + + /* This function check if bt is disabled */ + + if (coex_sta->high_priority_tx == 0 && + coex_sta->high_priority_rx == 0 && + coex_sta->low_priority_tx == 0 && + coex_sta->low_priority_rx == 0) + bt_active = false; + + if (coex_sta->high_priority_tx == 0xffff && + coex_sta->high_priority_rx == 0xffff && + coex_sta->low_priority_tx == 0xffff && + coex_sta->low_priority_rx == 0xffff) + bt_active = false; + + if (bt_active) { + bt_disable_cnt = 0; + bt_disabled = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); + } else { + bt_disable_cnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], bt all counters=0, %d times!!\n", + bt_disable_cnt); + if (bt_disable_cnt >= 2) { + bt_disabled = true; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); + } + } + if (pre_bt_disabled != bt_disabled) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled":"enabled"), + (bt_disabled ? "disabled":"enabled")); + pre_bt_disabled = bt_disabled; + } +} + +u32 halbtc8192e2ant_decidera_mask(struct btc_coexist *btcoexist, + u8 sstype, u32 ra_masktype) +{ + u32 disra_mask = 0x0; + + switch (ra_masktype) { + case 0: /* normal mode */ + if (sstype == 2) + disra_mask = 0x0; /* enable 2ss */ + else + disra_mask = 0xfff00000;/* disable 2ss */ + break; + case 1: /* disable cck 1/2 */ + if(sstype == 2) + disra_mask = 0x00000003;/* enable 2ss */ + else + disra_mask = 0xfff00003;/* disable 2ss */ + break; + case 2: /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4 */ + if(sstype == 2) + disra_mask = 0x0001f1f7;/* enable 2ss */ + else + disra_mask = 0xfff1f1f7;/* disable 2ss */ + break; + default: + break; + } + + return disra_mask; +} + +void halbtc8192e2ant_Updatera_mask(struct btc_coexist *btcoexist, + bool force_exec, u32 dis_ratemask) +{ + coex_dm->curra_mask = dis_ratemask; + + if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask)) + btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask, + &coex_dm->curra_mask); + coex_dm->prera_mask = coex_dm->curra_mask; +} + +void halbtc8192e2ant_autorate_fallback_retry(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + bool wifi_under_bmode = false; + + coex_dm->cur_arfrtype = type; + + if (force_exec || (coex_dm->pre_arfrtype != coex_dm->cur_arfrtype)) { + switch (coex_dm->cur_arfrtype) { + case 0: /* normal mode */ + btcoexist->btc_write_4byte(btcoexist, 0x430, + coex_dm->backup_arfr_cnt1); + btcoexist->btc_write_4byte(btcoexist, 0x434, + coex_dm->backup_arfr_cnt2); + break; + case 1: + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_UNDER_B_MODE, + &wifi_under_bmode); + if (wifi_under_bmode) { + btcoexist->btc_write_4byte(btcoexist, 0x430, + 0x0); + btcoexist->btc_write_4byte(btcoexist, 0x434, + 0x01010101); + } else { + btcoexist->btc_write_4byte(btcoexist, 0x430, + 0x0); + btcoexist->btc_write_4byte(btcoexist, 0x434, + 0x04030201); + } + break; + default: + break; + } + } + + coex_dm->pre_arfrtype = coex_dm->cur_arfrtype; +} + +void halbtc8192e2ant_retrylimit(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_retrylimit_type = type; + + if (force_exec || (coex_dm->pre_retrylimit_type != + coex_dm->cur_retrylimit_type)) { + switch (coex_dm->cur_retrylimit_type) { + case 0: /* normal mode */ + btcoexist->btc_write_2byte(btcoexist, 0x42a, + coex_dm->backup_retrylimit); + break; + case 1: /* retry limit=8 */ + btcoexist->btc_write_2byte(btcoexist, 0x42a, + 0x0808); + break; + default: + break; + } + } + + coex_dm->pre_retrylimit_type = coex_dm->cur_retrylimit_type; +} + +void halbtc8192e2ant_ampdu_maxtime(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_ampdutime_type = type; + + if (force_exec || (coex_dm->pre_ampdutime_type != + coex_dm->cur_ampdutime_type)) { + switch (coex_dm->cur_ampdutime_type) { + case 0: /* normal mode */ + btcoexist->btc_write_1byte(btcoexist, 0x456, + coex_dm->backup_ampdu_maxtime); + break; + case 1: /* AMPDU timw = 0x38 * 32us */ + btcoexist->btc_write_1byte(btcoexist, 0x456, 0x38); + break; + default: + break; + } + } + + coex_dm->pre_ampdutime_type = coex_dm->cur_ampdutime_type; +} + +void halbtc8192e2ant_limited_tx(struct btc_coexist *btcoexist, + bool force_exec, u8 ra_masktype, u8 arfr_type, + u8 retrylimit_type, u8 ampdutime_type) +{ + u32 disra_mask = 0x0; + + coex_dm->curra_masktype = ra_masktype; + disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, + coex_dm->cur_sstype, + ra_masktype); + halbtc8192e2ant_Updatera_mask(btcoexist, force_exec, disra_mask); + + halbtc8192e2ant_autorate_fallback_retry(btcoexist, force_exec, + arfr_type); + halbtc8192e2ant_retrylimit(btcoexist, force_exec, retrylimit_type); + halbtc8192e2ant_ampdu_maxtime(btcoexist, force_exec, ampdutime_type); +} + +void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist, + bool force_exec, bool rej_ap_agg_pkt, + bool b_bt_ctrl_agg_buf_size, + u8 agg_buf_size) +{ + bool reject_rx_agg = rej_ap_agg_pkt; + bool bt_ctrl_rx_agg_size = b_bt_ctrl_agg_buf_size; + u8 rx_agg_size = agg_buf_size; + + /********************************************* + * Rx Aggregation related setting + *********************************************/ + btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, + &reject_rx_agg); + /* decide BT control aggregation buf size or not */ + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, + &bt_ctrl_rx_agg_size); + /* aggregation buf size, only work + * when BT control Rx aggregation size. */ + btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rx_agg_size); + /* real update aggregation setting */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); + + +} + +void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +{ + u32 reg_hp_txrx, reg_lp_txrx, u32tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_txrx = 0x770; + reg_lp_txrx = 0x774; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); + reg_hp_tx = u32tmp & MASKLWORD; + reg_hp_rx = (u32tmp & MASKHWORD)>>16; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); + reg_lp_tx = u32tmp & MASKLWORD; + reg_lp_rx = (u32tmp & MASKHWORD)>>16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex] High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex] Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + + /* reset counter */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist) +{ + u8 h2c_parameter[1] ={0}; + + coex_sta->c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT0; /* trigger */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); +} + +bool halbtc8192e2ant_iswifi_status_changed(struct btc_coexist *btcoexist) +{ + static bool pre_wifi_busy = false; + static bool pre_under_4way = false, pre_bt_hson = false; + bool wifi_busy = false, under_4way = false, bt_hson = false; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + + if (wifi_connected) { + if (wifi_busy != pre_wifi_busy) { + pre_wifi_busy = wifi_busy; + return true; + } + if (under_4way != pre_under_4way) { + pre_under_4way = under_4way; + return true; + } + if (bt_hson != pre_bt_hson) { + pre_bt_hson = bt_hson; + return true; + } + } + + return false; +} + +void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hson = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + + bt_link_info->bt_link_exist = coex_sta->bt_link_exist; + bt_link_info->sco_exist = coex_sta->sco_exist; + bt_link_info->a2dp_exist = coex_sta->a2dp_exist; + bt_link_info->pan_exist = coex_sta->pan_exist; + bt_link_info->hid_exist = coex_sta->hid_exist; + + /* work around for HS mode. */ + if (bt_hson) { + bt_link_info->pan_exist = true; + bt_link_info->bt_link_exist = true; + } + + /* check if Sco only */ + if (bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->sco_only = true; + else + bt_link_info->sco_only = false; + + /* check if A2dp only */ + if (!bt_link_info->sco_exist && + bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->a2dp_only = true; + else + bt_link_info->a2dp_only = false; + + /* check if Pan only */ + if (!bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + bt_link_info->pan_exist && + !bt_link_info->hid_exist) + bt_link_info->pan_only = true; + else + bt_link_info->pan_only = false; + + /* check if Hid only */ + if (!bt_link_info->sco_exist && + !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && + bt_link_info->hid_exist) + bt_link_info->hid_only = true; + else + bt_link_info->hid_only = false; +} + +u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + bool bt_hson=false; + u8 algorithm = BT_8192E_2ANT_COEX_ALGO_UNDEFINED; + u8 numOfDiffProfile = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + + if (!bt_link_info->bt_link_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "No BT link exists!!!\n"); + return algorithm; + } + + if (bt_link_info->sco_exist) + numOfDiffProfile++; + if (bt_link_info->hid_exist) + numOfDiffProfile++; + if (bt_link_info->pan_exist) + numOfDiffProfile++; + if (bt_link_info->a2dp_exist) + numOfDiffProfile++; + + if (numOfDiffProfile == 1) { + if (bt_link_info->sco_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO only\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID only\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "A2DP only\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP; + } else if (bt_link_info->pan_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "PAN(HS) only\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "PAN(EDR) only\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR; + } + } + } + } else if (numOfDiffProfile == 2) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + HID\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + A2DP ==> SCO\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->pan_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + PAN(HS)\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_SCO_PAN; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + if (stack_info->num_of_hid >= 2) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID*2 + A2DP\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + A2DP\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_HID_A2DP; + } + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + PAN(HS)\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "A2DP + PAN(HS)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "A2DP + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } else if (numOfDiffProfile == 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + HID + A2DP ==> HID\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + HID + PAN(HS)\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + HID + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_SCO_PAN; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + A2DP + PAN(HS)\n"); + algorithm = BT_8192E_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO + A2DP + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + A2DP + PAN(HS)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "HID + A2DP + PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } else if (numOfDiffProfile >= 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hson) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "ErrorSCO+HID+A2DP+PAN(HS)\n"); + + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "SCO+HID+A2DP+PAN(EDR)\n"); + algorithm = + BT_8192E_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + + return algorithm; +} + +void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist, + u8 dac_swinglvl) +{ + u8 h2c_parameter[1] ={0}; + + /* There are several type of dacswing + * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */ + h2c_parameter[0] = dac_swinglvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swinglvl); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); +} + +void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist, + u8 dec_btpwr_lvl) +{ + u8 h2c_parameter[1] ={0}; + + h2c_parameter[0] = dec_btpwr_lvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex] decrease Bt Power level = %d, FW write 0x62=0x%x\n", + dec_btpwr_lvl, h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); +} + +void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist, + bool force_exec, u8 dec_btpwr_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power level = %d\n", + (force_exec? "force to":""), dec_btpwr_lvl); + coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + } + halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr); + + coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; +} + +void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist, + bool enable_autoreport) +{ + u8 h2c_parameter[1] ={0}; + + h2c_parameter[0] = 0; + + if (enable_autoreport) + h2c_parameter[0] |= BIT0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n", + (enable_autoreport? "Enabled!!":"Disabled!!"), + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); +} + +void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist, + bool force_exec, bool enable_autoreport) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec? "force to":""), + ((enable_autoreport)? "Enabled":"Disabled")); + coex_dm->cur_bt_auto_report = enable_autoreport; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); + + if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) + return; + } + halbtc8192e2ant_set_bt_autoreport(btcoexist, + coex_dm->cur_bt_auto_report); + + coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; +} + +void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist, + bool force_exec, u8 fw_dac_swinglvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec? "force to":""), fw_dac_swinglvl); + coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); + + if (coex_dm->pre_fw_dac_swing_lvl == + coex_dm->cur_fw_dac_swing_lvl) + return; + } + + halbtc8192e2ant_setfw_dac_swinglevel(btcoexist, + coex_dm->cur_fw_dac_swing_lvl); + + coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; +} + +void halbtc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, + bool rx_rf_shrink_on) +{ + if (rx_rf_shrink_on) { + /* Shrink RF Rx LPF corner */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, 0xffffc); + } else { + /* Resume RF Rx LPF corner + * After initialized, we can use coex_dm->btRf0x1eBackup */ + if (btcoexist->initilized) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, + coex_dm->bt_rf0x1e_backup); + } + } +} + +void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist, + bool force_exec, bool rx_rf_shrink_on) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec? "force to":""), ((rx_rf_shrink_on)? "ON":"OFF")); + coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); + + if (coex_dm->pre_rf_rx_lpf_shrink == + coex_dm->cur_rf_rx_lpf_shrink) + return; + } + halbtc8192e2ant_set_sw_rf_rx_lpf_corner(btcoexist, + coex_dm->cur_rf_rx_lpf_shrink); + + coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; +} + +void halbtc8192e2ant_set_sw_penalty_tx_rateadaptive( + struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + u8 h2c_parameter[6] ={0}; + + h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */ + + if (low_penalty_ra) { + h2c_parameter[1] |= BIT0; + /* normal rate except MCS7/6/5, OFDM54/48/36 */ + h2c_parameter[2] = 0x00; + h2c_parameter[3] = 0xf7; /* MCS7 or OFDM54 */ + h2c_parameter[4] = 0xf8; /* MCS6 or OFDM48 */ + h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */ + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra? "ON!!":"OFF!!")); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); +} + +void halbtc8192e2ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec? "force to":""), ((low_penalty_ra)? "ON":"OFF")); + coex_dm->cur_low_penalty_ra = low_penalty_ra; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex] bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", + coex_dm->pre_low_penalty_ra, + coex_dm->cur_low_penalty_ra); + + if (coex_dm->pre_low_penalty_ra == + coex_dm->cur_low_penalty_ra) + return; + } + halbtc8192e2ant_set_sw_penalty_tx_rateadaptive(btcoexist, + coex_dm->cur_low_penalty_ra); + + coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; +} + +void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist, + u32 level) +{ + u8 val = (u8)level; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); +} + +void halbtc8192e2ant_setsw_fulltime_dacswing(struct btc_coexist *btcoexist, + bool sw_dac_swingon, + u32 sw_dac_swinglvl) +{ + if (sw_dac_swingon) + halbtc8192e2ant_set_dac_swingreg(btcoexist, sw_dac_swinglvl); + else + halbtc8192e2ant_set_dac_swingreg(btcoexist, 0x18); +} + + +void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swingon, + u32 dac_swinglvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing=%s, dac_swinglvl=0x%x\n", + (force_exec? "force to":""), + ((dac_swingon)? "ON":"OFF"), dac_swinglvl); + coex_dm->cur_dac_swing_on = dac_swingon; + coex_dm->cur_dac_swing_lvl = dac_swinglvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, ", + coex_dm->pre_dac_swing_on, + coex_dm->pre_dac_swing_lvl); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); + + if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && + (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) + return; + } + mdelay(30); + halbtc8192e2ant_setsw_fulltime_dacswing(btcoexist, dac_swingon, + dac_swinglvl); + + coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on; + coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; +} + +void halbtc8192e2ant_set_adc_backoff(struct btc_coexist *btcoexist, + bool adc_backoff) +{ + if(adc_backoff) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level On!\n"); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x3); + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level Off!\n"); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x1); + } +} + +void halbtc8192e2ant_adc_backoff(struct btc_coexist *btcoexist, + bool force_exec, bool adc_backoff) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn AdcBackOff = %s\n", + (force_exec? "force to":""), ((adc_backoff)? "ON":"OFF")); + coex_dm->cur_adc_back_off = adc_backoff; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n", + coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off); + + if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) + return; + } + halbtc8192e2ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_back_off); + + coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off; +} + +void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist, + bool agc_table_en) +{ + + /* BB AGC Gain Table */ + if (agc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table On!\n"); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x071D0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table Off!\n"); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001); + } +} + +void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist, + bool force_exec, bool agc_table_en) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s %s Agc Table\n", + (force_exec? "force to":""), + ((agc_table_en)? "Enable":"Disable")); + coex_dm->cur_agc_table_en = agc_table_en; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en); + + if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) + return; + } + halbtc8192e2ant_set_agc_table(btcoexist, agc_table_en); + + coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en; +} + +void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist, bool force_exec, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0=0x%x, ", + (force_exec? "force to":""), val0x6c0); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + val0x6c4, val0x6c8, val0x6cc); + coex_dm->cur_val0x6c0 = val0x6c0; + coex_dm->cur_val0x6c4 = val0x6c4; + coex_dm->cur_val0x6c8 = val0x6c8; + coex_dm->cur_val0x6cc = val0x6cc; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, ", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", + coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, \n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", + coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); + + if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && + (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && + (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) && + (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) + return; + } + halbtc8192e2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, + val0x6c8, val0x6cc); + + coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; + coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; + coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8; + coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; +} + +void halbtc8192e2ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + switch (type) { + case 0: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 1: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 2: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5ffb5ffb, 0xffffff, 0x3); + break; + case 3: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff, + 0x5fdb5fdb, 0xffffff, 0x3); + break; + case 4: + halbtc8192e2ant_coex_table(btcoexist, force_exec, 0xdfffdfff, + 0x5ffb5ffb, 0xffffff, 0x3); + break; + default: + break; + } +} + +void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist, + bool enable) +{ + u8 h2c_parameter[1] ={0}; + + if (enable) + h2c_parameter[0] |= BIT0; /* function enable */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); +} + +void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist, + bool force_exec, bool enable) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec? "force to":""), (enable? "ON":"OFF")); + coex_dm->cur_ignore_wlan_act = enable; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d ", + coex_dm->pre_ignore_wlan_act); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "bCurIgnoreWlanAct = %d!!\n", + coex_dm->cur_ignore_wlan_act); + + if (coex_dm->pre_ignore_wlan_act == + coex_dm->cur_ignore_wlan_act) + return; + } + halbtc8192e2ant_set_fw_ignore_wlanact(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1, + u8 byte2, u8 byte3, u8 byte4, u8 byte5) +{ + u8 h2c_parameter[5] ={0}; + + h2c_parameter[0] = byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = byte5; + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | h2c_parameter[4]); + + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +void halbtc8192e2ant_sw_mechanism1(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, bool low_penalty_ra, + bool limited_dig, bool btlan_constrain) +{ + halbtc8192e2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); +} + +void halbtc8192e2ant_sw_mechanism2(struct btc_coexist *btcoexist, + bool agc_table_shift, bool adc_backoff, + bool sw_dac_swing, u32 dac_swinglvl) +{ + halbtc8192e2ant_AgcTable(btcoexist, NORMAL_EXEC, agc_table_shift); + halbtc8192e2ant_DacSwing(btcoexist, NORMAL_EXEC, sw_dac_swing, + dac_swinglvl); +} + +void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist, + bool force_exec, bool turn_on, u8 type) +{ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec? "force to":""), (turn_on? "ON":"OFF"), type); + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + + if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) + return; + } + if (turn_on) { + switch (type) { + case 1: + default: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + case 2: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x90); + break; + case 3: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x90); + break; + case 4: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10, + 0x3, 0xf1, 0x90); + break; + case 5: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0x60, 0x90); + break; + case 6: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x12, 0x60, 0x90); + break; + case 7: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0x70, 0x90); + break; + case 8: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xa3, 0x10, + 0x3, 0x70, 0x90); + break; + case 9: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x10); + break; + case 10: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x10); + break; + case 11: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x10); + break; + case 12: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x10, + 0x3, 0xf1, 0x10); + break; + case 13: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe0, 0x10); + break; + case 14: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe0, 0x10); + break; + case 15: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf0, 0x10); + break; + case 16: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x12, + 0x3, 0xf0, 0x10); + break; + case 17: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0x61, 0x20, + 0x03, 0x10, 0x10); + break; + case 18: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x5, + 0x5, 0xe1, 0x90); + break; + case 19: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25, + 0x25, 0xe1, 0x90); + break; + case 20: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x25, + 0x25, 0x60, 0x90); + break; + case 21: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x15, + 0x03, 0x70, 0x90); + break; + case 71: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + } + } else { + /* disable PS tdma */ + switch (type) { + default: + case 0: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0x8, 0x0, 0x0, + 0x0, 0x0); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x4); + break; + case 1: + halbtc8192e2ant_SetFwPstdma(btcoexist, 0x0, 0x0, 0x0, + 0x8, 0x0); + mdelay(5); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20); + break; + } + } + + /* update pre state */ + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist, u8 sstype) +{ + u8 mimops = BTC_MIMO_PS_DYNAMIC; + u32 disra_mask = 0x0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], REAL set SS Type = %d\n", sstype); + + disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype, + coex_dm->curra_masktype); + halbtc8192e2ant_Updatera_mask(btcoexist, FORCE_EXEC, disra_mask); + + if (sstype == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + /* switch ofdm path */ + btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x11); + btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x1); + btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81111111); + /* switch cck patch */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x1); + btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x81); + mimops=BTC_MIMO_PS_STATIC; + } else if (sstype == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); + btcoexist->btc_write_1byte(btcoexist, 0xc04, 0x33); + btcoexist->btc_write_1byte(btcoexist, 0xd04, 0x3); + btcoexist->btc_write_4byte(btcoexist, 0x90c, 0x81121313); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xe77, 0x4, 0x0); + btcoexist->btc_write_1byte(btcoexist, 0xa07, 0x41); + mimops=BTC_MIMO_PS_DYNAMIC; + } + /* set rx 1ss or 2ss */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_SEND_MIMO_PS, &mimops); +} + +void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist, + bool force_exec, u8 new_sstype) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], %s Switch SS Type = %d\n", + (force_exec? "force to":""), new_sstype); + coex_dm->cur_sstype = new_sstype; + + if (!force_exec) { + if (coex_dm->pre_sstype == coex_dm->cur_sstype) + return; + } + halbtc8192e2ant_set_switch_sstype(btcoexist, coex_dm->cur_sstype); + + coex_dm->pre_sstype = coex_dm->cur_sstype; +} + +void halbtc8192e2ant_coex_alloff(struct btc_coexist *btcoexist) +{ + /* fw all off */ + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + /* sw all off */ + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + + /* hw all off */ + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); +} + +void halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + /* force to reset coex mechanism */ + + halbtc8192e2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, FORCE_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, FORCE_EXEC, 0); + + halbtc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); + halbtc8192e2ant_switch_sstype(btcoexist, FORCE_EXEC, 2); + + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); +} + +void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist) +{ + bool low_pwr_disable = true; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); +} + +bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool common = false, wifi_connected = false, wifi_busy = false; + bool bt_hson = false, low_pwr_disable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (bt_link_info->sco_exist || bt_link_info->hid_exist) + halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 0, 0, 0); + else + halbtc8192e2ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + + if (!wifi_connected) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non-connected idle!!\n"); + + if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) || + (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status)) { + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, + 2); + halbtc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, + 0); + } else { + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, + 1); + halbtc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, + 1); + } + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, false, + false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, false, + 0x18); + + common = true; + } else { + if (BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Wifi connected + BT non connected-idle!!\n"); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, + 2); + halbtc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, + 0); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, + 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + common = true; + } else if (BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (bt_hson) + return false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Wifi connected + BT connected-idle!!\n"); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, + 2); + halbtc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, + 0); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, + 6); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + common = true; + } else { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (wifi_busy) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Wifi Connected-Busy + BT Busy!!\n"); + common = false; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Wifi Connected-Idle + BT Busy!!\n"); + + halbtc8192e2ant_switch_sstype(btcoexist, + NORMAL_EXEC, 1); + halbtc8192e2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, + 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 21); + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, + NORMAL_EXEC, 6); + halbtc8192e2ant_dec_btpwr(btcoexist, + NORMAL_EXEC, 0); + halbtc8192e2ant_sw_mechanism1(btcoexist, false, + false, false, + false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, + false, false, + 0x18); + common = true; + } + } + } + return common; +} + +void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, + bool sco_hid, bool tx_pause, + u8 max_interval) +{ + static int up, dn, m, n, wait_cnt; + /* 0: no change, +1: increase WiFi duration, + * -1: decrease WiFi duration */ + int result; + u8 retry_cnt = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjust()\n"); + + if (!coex_dm->auto_tdma_adjust) { + coex_dm->auto_tdma_adjust = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); + if (sco_hid) { + if (tx_pause) { + if (max_interval == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = 13; + } else if (max_interval == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (max_interval == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } + } else { + if (max_interval == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (max_interval == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (max_interval == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } + } + } else { + if (tx_pause) { + if (max_interval == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (max_interval == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (max_interval == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } + } else { + if (max_interval == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = 1; + } else if (max_interval == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (max_interval == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } + } + } + + up = 0; + dn = 0; + m = 1; + n= 3; + result = 0; + wait_cnt = 0; + } else { + /* accquire the BT TRx retry count from BT_Info byte2 */ + retry_cnt = coex_sta->bt_retry_cnt; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], retry_cnt = %d\n", retry_cnt); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n", + up, dn, m, n, wait_cnt); + result = 0; + wait_cnt++; + /* no retry in the last 2-second duration */ + if (retry_cnt == 0) { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if (up >= n) { + wait_cnt = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex]Increase wifi duration!!\n"); + } + } else if (retry_cnt <= 3) { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) { + if (wait_cnt <= 2) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_cnt = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "Reduce wifi duration for retry<3\n"); + } + } else { + if (wait_cnt == 1) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3*m; + up = 0; + dn = 0; + wait_cnt = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "Decrease wifi duration for retryCounter>3!!\n"); + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], max Interval = %d\n", max_interval); + if (max_interval == 1) { + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + + if (coex_dm->cur_ps_tdma == 71) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = 13; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = + 5; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = + 13; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 71); + coex_dm->ps_tdma_du_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 71) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = + 1; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = + 1; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 71); + coex_dm->ps_tdma_du_adj_type = + 71; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = + 9; + } + } + } + } else if (max_interval == 2) { + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if(coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if(coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if(coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } + } + } + } else if (max_interval == 3) { + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8192e2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8192e2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } + } + } + } + } + + /* if current PsTdma not match with + * the recorded one (when scan, dhcp...), + * then we have to adjust it back to the previous record one. */ + if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) { + bool scan = false, link = false, roam = false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], PsTdma type dismatch!!!, " ); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "curPsTdma=%d, recordPsTdma=%d\n", + coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if ( !scan && !link && !roam) + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, + coex_dm->ps_tdma_du_adj_type); + else + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n"); + } +} + +/* SCO only or SCO+PAN(HS) */ +void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4); + + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x6); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x6); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x6); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x6); + } + } +} + +void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_STAY_LOW; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 4); + + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + } + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x6); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x6); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x6); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x6); + } + } +} + +void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state=BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ +void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + bool long_dist = false; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + if ((btrssi_state == BTC_RSSI_STATE_LOW || + btrssi_state == BTC_RSSI_STATE_STAY_LOW) && + (wifirssi_state == BTC_RSSI_STATE_LOW || + wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n"); + long_dist = true; + } + if (long_dist) { + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, true, + 0x4); + } else { + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, + 0x8); + } + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + if (long_dist) + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + else + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + + + if (long_dist) { + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 17); + coex_dm->auto_tdma_adjust = false; + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + } else { + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + true, 1); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 1); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, + false, 1); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + } + } + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 2); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false, + 2); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false, + 2); + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + } + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + true, 0x6); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + true, 0x6); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + true, 0x6); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + true, 0x6); + } + } +} + +void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); + } + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* PAN(HS) only */ +void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + } + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* PAN(EDR)+A2DP */ +void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state=BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, true, 3); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false, + 3); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, false, false, + 3); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* HID+A2DP+PAN(EDR) */ +void halbtc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + halbtc8192e2ant_fw_dac_swinglvl(btcoexist, NORMAL_EXEC, 6); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 3); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 3); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH; + u32 wifi_bw; + + wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0); + btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42); + + halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1); + halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + halbtc8192e2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3); + + if ((btrssi_state == BTC_RSSI_STATE_LOW) || + (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 0); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, true, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_MEDIUM) || + (btrssi_state == BTC_RSSI_STATE_STAY_MEDIUM)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 2); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2); + } else if ((btrssi_state == BTC_RSSI_STATE_HIGH) || + (btrssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_dec_btpwr(btcoexist, NORMAL_EXEC, 4); + halbtc8192e2ant_tdma_duration_adjust(btcoexist, true, false, 2); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifirssi_state == BTC_RSSI_STATE_HIGH) || + (wifirssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8192e2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8192e2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +{ + u8 algorithm = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], return for Manual CTRL <===\n"); + return; + } + + if (coex_sta->under_ips) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); + return; + } + + algorithm = halbtc8192e2ant_action_algorithm(btcoexist); + if (coex_sta->c2h_bt_inquiry_page && + (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT is under inquiry/page scan !!\n"); + halbtc8192e2ant_action_bt_inquiry(btcoexist); + return; + } + + coex_dm->cur_algorithm = algorithm; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Algorithm = %d \n", coex_dm->cur_algorithm); + + if (halbtc8192e2ant_is_common_action(btcoexist)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant common.\n"); + coex_dm->auto_tdma_adjust = false; + } else { + if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n", + coex_dm->pre_algorithm, + coex_dm->cur_algorithm); + coex_dm->auto_tdma_adjust = false; + } + switch (coex_dm->cur_algorithm) { + case BT_8192E_2ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = SCO.\n"); + halbtc8192e2ant_action_sco(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_SCO_PAN: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = SCO+PAN(EDR).\n"); + halbtc8192e2ant_action_sco_pan(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = HID.\n"); + halbtc8192e2ant_action_hid(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = A2DP.\n"); + halbtc8192e2ant_action_a2dp(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = A2DP+PAN(HS).\n"); + halbtc8192e2ant_action_a2dp_pan_hs(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = PAN(EDR).\n"); + halbtc8192e2ant_action_pan_edr(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = HS mode.\n"); + halbtc8192e2ant_action_pan_hs(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = PAN+A2DP.\n"); + halbtc8192e2ant_action_pan_edr_a2dp(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = PAN(EDR)+HID.\n"); + halbtc8192e2ant_action_pan_edr_hid(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = HID+A2DP+PAN.\n"); + halbtc8192e2ant_action_hid_a2dp_pan_edr(btcoexist); + break; + case BT_8192E_2ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = HID+A2DP.\n"); + halbtc8192e2ant_action_hid_a2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "Action 2-Ant, algorithm = unknown!!\n"); + /* halbtc8192e2ant_coex_alloff(btcoexist); */ + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } +} + +void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist, bool backup) +{ + u16 u16tmp = 0; + u8 u8tmp = 0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], 2Ant Init HW Config!!\n"); + + if (backup) { + /* backup rf 0x1e value */ + coex_dm->bt_rf0x1e_backup = + btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, + 0x1e, 0xfffff); + + coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist, + 0x430); + coex_dm->backup_arfr_cnt2 = btcoexist->btc_read_4byte(btcoexist, + 0x434); + coex_dm->backup_retrylimit = btcoexist->btc_read_2byte( + btcoexist, + 0x42a); + coex_dm->backup_ampdu_maxtime = btcoexist->btc_read_1byte( + btcoexist, + 0x456); + } + + /* antenna sw ctrl to bt */ + btcoexist->btc_write_1byte(btcoexist, 0x4f, 0x6); + btcoexist->btc_write_1byte(btcoexist, 0x944, 0x24); + btcoexist->btc_write_4byte(btcoexist, 0x930, 0x700700); + btcoexist->btc_write_1byte(btcoexist, 0x92c, 0x20); + if (btcoexist->chip_interface == BTC_INTF_USB) + btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30430004); + else + btcoexist->btc_write_4byte(btcoexist, 0x64, 0x30030004); + + halbtc8192e2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); + + /* antenna switch control parameter */ + btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555); + + /* coex parameters */ + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); + /* 0x790[5:0]=0x5 */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u8tmp &= 0xc0; + u8tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); + + /* enable counter statistics */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); + + /* enable PTA */ + btcoexist->btc_write_1byte(btcoexist, 0x40, 0x20); + /* enable mailbox interface */ + u16tmp = btcoexist->btc_read_2byte(btcoexist, 0x40); + u16tmp |= BIT9; + btcoexist->btc_write_2byte(btcoexist, 0x40, u16tmp); + + /* enable PTA I2C mailbox */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x101); + u8tmp |= BIT4; + btcoexist->btc_write_1byte(btcoexist, 0x101, u8tmp); + + /* enable bt clock when wifi is disabled. */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x93); + u8tmp |= BIT0; + btcoexist->btc_write_1byte(btcoexist, 0x93, u8tmp); + /* enable bt clock when suspend. */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x7); + u8tmp |= BIT0; + btcoexist->btc_write_1byte(btcoexist, 0x7, u8tmp); +} + +/************************************************************* + * work around function start with wa_halbtc8192e2ant_ + *************************************************************/ + +/************************************************************ + * extern function start with EXhalbtc8192e2ant_ + ************************************************************/ + +void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist) +{ + halbtc8192e2ant_init_hwconfig(btcoexist, true); +} + +void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); + halbtc8192e2ant_init_coex_dm(btcoexist); +} + +void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info*stack_info = &btcoexist->stack_info; + u8 *cli_buf = btcoexist->cli_buf; + u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0; + u16 u16tmp[4]; + u32 u32tmp[4]; + bool roam = false, scan = false, link = false, wifi_under_5g = false; + bool bt_hson = false, wifi_busy = false; + int wifirssi = 0, bt_hs_rssi = 0; + u32 wifi_bw, wifi_traffic_dir; + u8 wifi_dot11_chnl, wifi_hs_chnl; + u32 fw_ver = 0, bt_patch_ver = 0; + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ============[BT Coexist info]============"); + CL_PRINTF(cli_buf); + + if (btcoexist->manual_control) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ===========[Under Manual Control]==========="); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n =========================================="); + CL_PRINTF(cli_buf); + } + + if (!board_info->bt_exist) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); + CL_PRINTF(cli_buf); + return; + } + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", + "BT stack/ hci ext ver", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", + glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, + &wifi_dot11_chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsMode(HsChnl)", + wifi_dot11_chnl, bt_hson, wifi_hs_chnl); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0], + coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + &wifi_traffic_dir); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g ? "5G" : "2.4G"), + ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" : + (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))), + ((!wifi_busy) ? "idle" : + ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ? + "uplink" : "downlink"))); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", + "BT [status/ rssi/ retryCnt]", + ((btcoexist->bt_info.bt_disabled) ? ("disabled") : + ((coex_sta->c2h_bt_inquiry_page) ? + ("inquiry/page scan") : + ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) ? "non-connected idle" : + ((BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) ? "connected-idle" : "busy")))), + coex_sta->bt_rssi, coex_sta->bt_retry_cnt); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", stack_info->sco_exist, + stack_info->hid_exist, stack_info->pan_exist, + stack_info->a2dp_exist); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + + bt_info_ext = coex_sta->bt_info_ext; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate"); + CL_PRINTF(cli_buf); + + for (i=0; ibt_info_c2h_cnt[i]) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %02x %02x %02x %02x ", + GLBtInfoSrc8192e2Ant[i], + coex_sta->bt_info_c2h[i][0], + coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], + coex_sta->bt_info_c2h[i][3]); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "%02x %02x %02x(%d)", + coex_sta->bt_info_c2h[i][4], + coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h_cnt[i]); + CL_PRINTF(cli_buf); + } + } + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s", + "PS state, IPS/LPS", + ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")), + ((coex_sta->under_lps ? "LPS ON" : "LPS OFF"))); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "SS Type", + coex_dm->cur_sstype); + CL_PRINTF(cli_buf); + + /* Sw mechanism */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Sw mechanism]============"); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", "Rate Mask", + btcoexist->bt_info.ra_mask); + CL_PRINTF(cli_buf); + + /* Fw mechanism */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Fw mechanism]============"); + CL_PRINTF(cli_buf); + + ps_tdma_case = coex_dm->cur_ps_tdma; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para[0], + coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], + coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + ps_tdma_case, coex_dm->auto_tdma_adjust); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", + "DecBtPwr/ IgnWlanAct", + coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act); + CL_PRINTF(cli_buf); + + /* Hw setting */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Hw setting]============"); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit, + coex_dm->backup_ampdu_maxtime); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); + u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]); + CL_PRINTF(cli_buf); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x778", + u8tmp[0]); + CL_PRINTF(cli_buf); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]); + CL_PRINTF(cli_buf); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0x40/ 0x4f", u8tmp[0], u8tmp[1]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", + u32tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "0x770(hp rx[31:16]/tx[15:0])", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "0x774(lp rx[31:16]/tx[15:0])", + coex_sta->low_priority_rx, coex_sta->low_priority_tx); + CL_PRINTF(cli_buf); +#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 1) + halbtc8192e2ant_monitor_bt_ctr(btcoexist); +#endif + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + + +void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_IPS_ENTER == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); + coex_sta->under_ips = true; + halbtc8192e2ant_coex_alloff(btcoexist); + } else if (BTC_IPS_LEAVE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); + coex_sta->under_ips = false; + } +} + +void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_LPS_ENABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); + coex_sta->under_lps = true; + } else if (BTC_LPS_DISABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); + coex_sta->under_lps = false; + } +} + +void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_SCAN_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); + else if(BTC_SCAN_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); +} + +void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_ASSOCIATE_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); + else if(BTC_ASSOCIATE_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); +} + +void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) +{ + u8 h2c_parameter[3] ={0}; + u32 wifi_bw; + u8 wifi_center_chnl; + + if (btcoexist->manual_control || + btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + if (BTC_MEDIA_CONNECT == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); + + /* only 2.4G we need to inform bt the chnl mask */ + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, + &wifi_center_chnl); + if ((BTC_MEDIA_CONNECT == type) && + (wifi_center_chnl <= 14)) { + h2c_parameter[0] = 0x1; + h2c_parameter[1] = wifi_center_chnl; + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66=0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); + + btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); +} + +void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) +{ + if (type == BTC_PACKET_DHCP) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], DHCP Packet notify\n"); + } + +void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length ) +{ + u8 bt_info = 0; + u8 i, rspSource = 0; + bool bt_busy = false, limited_dig = false; + bool wifi_connected = false; + + coex_sta->c2h_bt_info_req_sent = false; + + rspSource = tmp_buf[0] & 0xf; + if (rspSource >= BT_INFO_SRC_8192E_2ANT_MAX) + rspSource = BT_INFO_SRC_8192E_2ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rspSource]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data=[", + rspSource, length); + for (i = 0; i < length; i++) { + coex_sta->bt_info_c2h[rspSource][i] = tmp_buf[i]; + if (i == 1) + bt_info = tmp_buf[i]; + if (i == length-1) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); + } + + if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rspSource) { + coex_sta->bt_retry_cnt = /* [3:0] */ + coex_sta->bt_info_c2h[rspSource][2] & 0xf; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rspSource][3] * 2 + 10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rspSource][4]; + + /* Here we need to resend some wifi info to BT + * because bt is reset and loss of the info. */ + if ((coex_sta->bt_info_ext & BIT1)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "bit1, send wifi BW&Chnl to BT!!\n"); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if (wifi_connected) + ex_halbtc8192e2ant_media_status_notify( + btcoexist, + BTC_MEDIA_CONNECT); + else + ex_halbtc8192e2ant_media_status_notify( + btcoexist, + BTC_MEDIA_DISCONNECT); + } + + if ((coex_sta->bt_info_ext & BIT3)) { + if (!btcoexist->manual_control && + !btcoexist->stop_coex_dm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "bit3, BT NOT ignore Wlan active!\n"); + halbtc8192e2ant_IgnoreWlanAct(btcoexist, + FORCE_EXEC, + false); + } + } else { + /* BT already NOT ignore Wlan active, + * do nothing here. */ + } + +#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 0) + if ((coex_sta->bt_info_ext & BIT4)) { + /* BT auto report already enabled, do nothing */ + } else { + halbtc8192e2ant_bt_autoreport(btcoexist, FORCE_EXEC, + true); + } +#endif + } + + /* check BIT2 first ==> check if bt is under inquiry or page scan */ + if(bt_info & BT_INFO_8192E_2ANT_B_INQ_PAGE) + coex_sta->c2h_bt_inquiry_page = true; + else + coex_sta->c2h_bt_inquiry_page = false; + + /* set link exist status */ + if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) { + coex_sta->bt_link_exist = false; + coex_sta->pan_exist = false; + coex_sta->a2dp_exist = false; + coex_sta->hid_exist = false; + coex_sta->sco_exist = false; + } else {/* connection exists */ + coex_sta->bt_link_exist = true; + if (bt_info & BT_INFO_8192E_2ANT_B_FTP) + coex_sta->pan_exist = true; + else + coex_sta->pan_exist = false; + if (bt_info & BT_INFO_8192E_2ANT_B_A2DP) + coex_sta->a2dp_exist = true; + else + coex_sta->a2dp_exist = false; + if (bt_info & BT_INFO_8192E_2ANT_B_HID) + coex_sta->hid_exist = true; + else + coex_sta->hid_exist = false; + if (bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO) + coex_sta->sco_exist = true; + else + coex_sta->sco_exist = false; + } + + halbtc8192e2ant_update_btlink_info(btcoexist); + + if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Non-Connected idle!!!\n"); + } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n"); + } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) || + (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n"); + } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n"); + } else { + coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n"); + } + + if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8192E_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) { + bt_busy = true; + limited_dig = true; + } else { + bt_busy = false; + limited_dig = false; + } + + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + coex_dm->limited_dig = limited_dig; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + + halbtc8192e2ant_run_coexist_mechanism(btcoexist); +} + +void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist, + u8 type) +{ + if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex] StackOP Inquiry/page/pair start notify\n"); + else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex] StackOP Inquiry/page/pair finish notify\n"); +} + +void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + + halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true); + ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); +} + +void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist) +{ + static u8 dis_ver_info_cnt = 0; + u32 fw_ver = 0, bt_patch_ver = 0; + struct btc_board_info *board_info=&btcoexist->board_info; + struct btc_stack_info *stack_info=&btcoexist->stack_info; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "=======================Periodical=======================\n"); + if (dis_ver_info_cnt <= 5) { + dis_ver_info_cnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "************************************************\n"); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n", + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified) ? "Yes" : "No"), + stack_info->hci_version); + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "************************************************\n"); + } + +#if(BT_AUTO_REPORT_ONLY_8192E_2ANT == 0) + halbtc8192e2ant_querybt_info(btcoexist); + halbtc8192e2ant_monitor_bt_ctr(btcoexist); + halbtc8192e2ant_monitor_bt_enable_disable(btcoexist); +#else + if (halbtc8192e2ant_iswifi_status_changed(btcoexist) || + coex_dm->auto_tdma_adjust) + halbtc8192e2ant_run_coexist_mechanism(btcoexist); +#endif +} + + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8192e2ant.h @@ -0,0 +1,162 @@ +/***************************************************************** + * The following is for 8192E 2Ant BT Co-exist definition + *****************************************************************/ +#define BT_AUTO_REPORT_ONLY_8192E_2ANT 0 + +#define BT_INFO_8192E_2ANT_B_FTP BIT7 +#define BT_INFO_8192E_2ANT_B_A2DP BIT6 +#define BT_INFO_8192E_2ANT_B_HID BIT5 +#define BT_INFO_8192E_2ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8192E_2ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8192E_2ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8192E_2ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8192E_2ANT_B_CONNECTION BIT0 + +#define BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT 2 + +enum bt_info_src_8192e_2ant{ + BT_INFO_SRC_8192E_2ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8192E_2ANT_BT_RSP = 0x1, + BT_INFO_SRC_8192E_2ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8192E_2ANT_MAX +}; + +enum bt_8192e_2ant_bt_status{ + BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8192E_2ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8192E_2ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8192E_2ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8192E_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8192E_2ANT_BT_STATUS_MAX +}; + +enum bt_8192e_2ant_coex_algo{ + BT_8192E_2ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8192E_2ANT_COEX_ALGO_SCO = 0x1, + BT_8192E_2ANT_COEX_ALGO_SCO_PAN = 0x2, + BT_8192E_2ANT_COEX_ALGO_HID = 0x3, + BT_8192E_2ANT_COEX_ALGO_A2DP = 0x4, + BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS = 0x5, + BT_8192E_2ANT_COEX_ALGO_PANEDR = 0x6, + BT_8192E_2ANT_COEX_ALGO_PANHS = 0x7, + BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP = 0x8, + BT_8192E_2ANT_COEX_ALGO_PANEDR_HID = 0x9, + BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0xa, + BT_8192E_2ANT_COEX_ALGO_HID_A2DP = 0xb, + BT_8192E_2ANT_COEX_ALGO_MAX = 0xc +}; + +struct coex_dm_8192e_2ant{ + /* fw mechanism */ + u8 pre_dec_bt_pwr; + u8 cur_dec_bt_pwr; + u8 pre_fw_dac_swing_lvl; + u8 cur_fw_dac_swing_lvl; + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 ps_tdma_du_adj_type; + bool reset_tdma_adjust; + bool auto_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + + /* sw mechanism */ + bool pre_rf_rx_lpf_shrink; + bool cur_rf_rx_lpf_shrink; + u32 bt_rf0x1e_backup; + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + bool pre_dac_swing_on; + u32 pre_dac_swing_lvl; + bool cur_dac_swing_on; + u32 cur_dac_swing_lvl; + bool pre_adc_back_off; + bool cur_adc_back_off; + bool pre_agc_table_en; + bool cur_agc_table_en; + u32 pre_val0x6c0; + u32 cur_val0x6c0; + u32 pre_val0x6c4; + u32 cur_val0x6c4; + u32 pre_val0x6c8; + u32 cur_val0x6c8; + u8 pre_val0x6cc; + u8 cur_val0x6cc; + bool limited_dig; + + u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */ + u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */ + u16 backup_retrylimit; + u8 backup_ampdu_maxtime; + + /* algorithm related */ + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; + + u8 pre_sstype; + u8 cur_sstype; + + u32 prera_mask; + u32 curra_mask; + u8 curra_masktype; + u8 pre_arfrtype; + u8 cur_arfrtype; + u8 pre_retrylimit_type; + u8 cur_retrylimit_type; + u8 pre_ampdutime_type; + u8 cur_ampdutime_type; +}; + +struct coex_sta_8192e_2ant{ + bool bt_link_exist; + bool sco_exist; + bool a2dp_exist; + bool hid_exist; + bool pan_exist; + + bool under_lps; + bool under_ips; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8192E_2ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8192E_2ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}; + +/**************************************************************** + * The following is interface which will notify coex module. + ****************************************************************/ +void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist); +void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist); +void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpBuf,u8 length); +void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist); +void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist); +void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist); + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.c @@ -0,0 +1,3780 @@ +//============================================================ +// Description: +// +// This file is for RTL8723A Co-exist mechanism +// +// History +// 2012/08/22 Cosa first check in. +// 2012/11/14 Cosa Revise for 8723A 2Ant out sourcing. +// +//============================================================ + +//============================================================ +// include files +//============================================================ +#include "Mp_Precomp.h" +#if(BT_30_SUPPORT == 1) +//============================================================ +// Global variables, these are static variables +//============================================================ +static COEX_DM_8723A_2ANT GLCoexDm8723a2Ant; +static PCOEX_DM_8723A_2ANT pCoexDm=&GLCoexDm8723a2Ant; +static COEX_STA_8723A_2ANT GLCoexSta8723a2Ant; +static PCOEX_STA_8723A_2ANT pCoexSta=&GLCoexSta8723a2Ant; + +const char *const GLBtInfoSrc8723a2Ant[]={ + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +//============================================================ +// local function proto type if needed +//============================================================ +//============================================================ +// local function start with halbtc8723a2ant_ +//============================================================ +BOOLEAN +halbtc8723a2ant_IsWifiIdle( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bWifiConnected=FALSE, bScan=FALSE, bLink=FALSE, bRoam=FALSE; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + + if(bWifiConnected) + return FALSE; + if(bScan) + return FALSE; + if(bLink) + return FALSE; + if(bRoam) + return FALSE; + + return true; +} + +u1Byte +halbtc8723a2ant_BtRssiState( + u1Byte levelNum, + u1Byte rssiThresh, + u1Byte rssiThresh1 + ) +{ + s4Byte btRssi=0; + u1Byte btRssiState=pCoexSta->preBtRssiState; + + btRssi = pCoexSta->btRssi; + + if(levelNum == 2) + { + if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) || + (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)) + { + if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT)) + { + btRssiState = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n")); + } + } + else + { + if(btRssi < rssiThresh) + { + btRssiState = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n")); + } + } + } + else if(levelNum == 3) + { + if(rssiThresh > rssiThresh1) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi thresh error!!\n")); + return pCoexSta->preBtRssiState; + } + + if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_LOW) || + (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_LOW)) + { + if(btRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT)) + { + btRssiState = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Low\n")); + } + } + else if( (pCoexSta->preBtRssiState == BTC_RSSI_STATE_MEDIUM) || + (pCoexSta->preBtRssiState == BTC_RSSI_STATE_STAY_MEDIUM)) + { + if(btRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT)) + { + btRssiState = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to High\n")); + } + else if(btRssi < rssiThresh) + { + btRssiState = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Low\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at Medium\n")); + } + } + else + { + if(btRssi < rssiThresh1) + { + btRssiState = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state switch to Medium\n")); + } + else + { + btRssiState = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, ("[BTCoex], BT Rssi state stay at High\n")); + } + } + } + + pCoexSta->preBtRssiState = btRssiState; + + return btRssiState; +} + +u1Byte +halbtc8723a2ant_WifiRssiState( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte index, + IN u1Byte levelNum, + IN u1Byte rssiThresh, + IN u1Byte rssiThresh1 + ) +{ + s4Byte wifiRssi=0; + u1Byte wifiRssiState=pCoexSta->preWifiRssiState[index]; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi); + + if(levelNum == 2) + { + if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) || + (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW)) + { + if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT)) + { + wifiRssiState = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n")); + } + } + else + { + if(wifiRssi < rssiThresh) + { + wifiRssiState = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n")); + } + } + } + else if(levelNum == 3) + { + if(rssiThresh > rssiThresh1) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI thresh error!!\n")); + return pCoexSta->preWifiRssiState[index]; + } + + if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_LOW) || + (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_LOW)) + { + if(wifiRssi >= (rssiThresh+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT)) + { + wifiRssiState = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Low\n")); + } + } + else if( (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_MEDIUM) || + (pCoexSta->preWifiRssiState[index] == BTC_RSSI_STATE_STAY_MEDIUM)) + { + if(wifiRssi >= (rssiThresh1+BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT)) + { + wifiRssiState = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to High\n")); + } + else if(wifiRssi < rssiThresh) + { + wifiRssiState = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Low\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at Medium\n")); + } + } + else + { + if(wifiRssi < rssiThresh1) + { + wifiRssiState = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state switch to Medium\n")); + } + else + { + wifiRssiState = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, ("[BTCoex], wifi RSSI state stay at High\n")); + } + } + } + + pCoexSta->preWifiRssiState[index] = wifiRssiState; + + return wifiRssiState; +} + +VOID +halbtc8723a2ant_IndicateWifiChnlBwInfo( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + u1Byte H2C_Parameter[3] ={0}; + u4Byte wifiBw; + u1Byte wifiCentralChnl; + + // only 2.4G we need to inform bt the chnl mask + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifiCentralChnl); + if( (BTC_MEDIA_CONNECT == type) && + (wifiCentralChnl <= 14) ) + { + H2C_Parameter[0] = 0x1; + H2C_Parameter[1] = wifiCentralChnl; + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + H2C_Parameter[2] = 0x30; + else + H2C_Parameter[2] = 0x20; + } + + pCoexDm->wifiChnlInfo[0] = H2C_Parameter[0]; + pCoexDm->wifiChnlInfo[1] = H2C_Parameter[1]; + pCoexDm->wifiChnlInfo[2] = H2C_Parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x19=0x%x\n", + H2C_Parameter[0]<<16|H2C_Parameter[1]<<8|H2C_Parameter[2])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x19, 3, H2C_Parameter); +} + +VOID +halbtc8723a2ant_QueryBtInfo( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + pCoexSta->bC2hBtInfoReqSent = true; + + H2C_Parameter[0] |= BIT0; // trigger + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Query Bt Info, FW write 0x38=0x%x\n", + H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x38, 1, H2C_Parameter); +} +u1Byte +halbtc8723a2ant_ActionAlgorithm( + IN PBTC_COEXIST pBtCoexist + ) +{ + PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info; + BOOLEAN bBtHsOn=FALSE, bBtBusy=FALSE, limited_dig=FALSE; + u1Byte algorithm=BT_8723A_2ANT_COEX_ALGO_UNDEFINED; + u1Byte numOfDiffProfile=0; + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + + //====================== + // here we get BT status first + //====================== + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_IDLE; + + if((pStackInfo->bScoExist) ||(bBtHsOn) ||(pStackInfo->bHidExist)) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO or HID or HS exists, set BT non-idle !!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE; + } + else + { + // A2dp profile + if( (pBtCoexist->stack_info.numOfLink == 1) && + (pStackInfo->bA2dpExist) ) + { + if( (pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 100) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP, low priority tx+rx < 100, set BT connected-idle!!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP, low priority tx+rx >= 100, set BT non-idle!!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE; + } + } + // Pan profile + if( (pBtCoexist->stack_info.numOfLink == 1) && + (pStackInfo->bPanExist) ) + { + if((pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 600) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, low priority tx+rx < 600, set BT connected-idle!!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE; + } + else + { + if(pCoexSta->lowPriorityTx) + { + if((pCoexSta->lowPriorityRx /pCoexSta->lowPriorityTx)>9 ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, low priority rx/tx > 9, set BT connected-idle!!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE; + } + } + } + if(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN, set BT non-idle!!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE; + } + } + // Pan+A2dp profile + if( (pBtCoexist->stack_info.numOfLink == 2) && + (pStackInfo->bA2dpExist) && + (pStackInfo->bPanExist) ) + { + if((pCoexSta->lowPriorityTx+ pCoexSta->lowPriorityRx) < 600) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, low priority tx+rx < 600, set BT connected-idle!!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE; + } + else + { + if(pCoexSta->lowPriorityTx) + { + if((pCoexSta->lowPriorityRx /pCoexSta->lowPriorityTx)>9 ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, low priority rx/tx > 9, set BT connected-idle!!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE; + } + } + } + if(BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE != pCoexDm->btStatus) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN+A2DP, set BT non-idle!!!\n")); + pCoexDm->btStatus = BT_8723A_2ANT_BT_STATUS_NON_IDLE; + } + } + } + if(BT_8723A_2ANT_BT_STATUS_IDLE != pCoexDm->btStatus) + { + bBtBusy = true; + limited_dig = true; + } + else + { + bBtBusy = FALSE; + limited_dig = FALSE; + } + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bBtBusy); + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + //====================== + + if(!pStackInfo->bBtLinkExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], No profile exists!!!\n")); + return algorithm; + } + + if(pStackInfo->bScoExist) + numOfDiffProfile++; + if(pStackInfo->bHidExist) + numOfDiffProfile++; + if(pStackInfo->bPanExist) + numOfDiffProfile++; + if(pStackInfo->bA2dpExist) + numOfDiffProfile++; + + if(numOfDiffProfile == 1) + { + if(pStackInfo->bScoExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO only\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_SCO; + } + else + { + if(pStackInfo->bHidExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID only\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_HID; + } + else if(pStackInfo->bA2dpExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP only\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_A2DP; + } + else if(pStackInfo->bPanExist) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(HS) only\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_PANHS; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], PAN(EDR) only\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR; + } + } + } + } + else if(numOfDiffProfile == 2) + { + if(pStackInfo->bScoExist) + { + if(pStackInfo->bHidExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_HID; + } + else if(pStackInfo->bA2dpExist) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP ==> SCO\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_SCO; + } + else if(pStackInfo->bPanExist) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(HS)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_SCO; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + PAN(EDR)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } + else + { + if( pStackInfo->bHidExist && + pStackInfo->bA2dpExist ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP; + } + else if( pStackInfo->bHidExist && + pStackInfo->bPanExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(HS)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + PAN(EDR)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID; + } + } + else if( pStackInfo->bPanExist && + pStackInfo->bA2dpExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(HS)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], A2DP + PAN(EDR)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } + else if(numOfDiffProfile == 3) + { + if(pStackInfo->bScoExist) + { + if( pStackInfo->bHidExist && + pStackInfo->bA2dpExist ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP ==> HID\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_HID; + } + else if( pStackInfo->bHidExist && + pStackInfo->bPanExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(HS)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + PAN(EDR)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID; + } + } + else if( pStackInfo->bPanExist && + pStackInfo->bA2dpExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(HS)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_SCO; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } + else + { + if( pStackInfo->bHidExist && + pStackInfo->bPanExist && + pStackInfo->bA2dpExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(HS)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], HID + A2DP + PAN(EDR)\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } + else if(numOfDiffProfile >= 3) + { + if(pStackInfo->bScoExist) + { + if( pStackInfo->bHidExist && + pStackInfo->bPanExist && + pStackInfo->bA2dpExist ) + { + if(bBtHsOn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n")); + + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n")); + algorithm = BT_8723A_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + + return algorithm; +} + +BOOLEAN +halbtc8723a2ant_NeedToDecBtPwr( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bRet=FALSE; + BOOLEAN bBtHsOn=FALSE, bWifiConnected=FALSE; + s4Byte btHsRssi=0; + + if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn)) + return FALSE; + if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected)) + return FALSE; + if(!pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi)) + return FALSE; + + if(bWifiConnected) + { + if(bBtHsOn) + { + if(btHsRssi > 37) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for HS mode!!\n")); + bRet = true; + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], Need to decrease bt power for Wifi is connected!!\n")); + bRet = true; + } + } + + return bRet; +} + +VOID +halbtc8723a2ant_SetFwDacSwingLevel( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte dacSwingLvl + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + // There are several type of dacswing + // 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 + H2C_Parameter[0] = dacSwingLvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], Set Dac Swing Level=0x%x\n", dacSwingLvl)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x29=0x%x\n", H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x29, 1, H2C_Parameter); +} + +VOID +halbtc8723a2ant_SetFwDecBtPwr( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bDecBtPwr + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + H2C_Parameter[0] = 0; + + if(bDecBtPwr) + { + H2C_Parameter[0] |= BIT1; + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], decrease Bt Power : %s, FW write 0x21=0x%x\n", + (bDecBtPwr? "Yes!!":"No!!"), H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x21, 1, H2C_Parameter); +} + +VOID +halbtc8723a2ant_DecBtPwr( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bDecBtPwr + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s Dec BT power = %s\n", + (bForceExec? "force to":""), ((bDecBtPwr)? "ON":"OFF"))); + pCoexDm->bCurDecBtPwr = bDecBtPwr; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", + pCoexDm->bPreDecBtPwr, pCoexDm->bCurDecBtPwr)); + + if(pCoexDm->bPreDecBtPwr == pCoexDm->bCurDecBtPwr) + return; + } + halbtc8723a2ant_SetFwDecBtPwr(pBtCoexist, pCoexDm->bCurDecBtPwr); + + pCoexDm->bPreDecBtPwr = pCoexDm->bCurDecBtPwr; +} + +VOID +halbtc8723a2ant_FwDacSwingLvl( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u1Byte fwDacSwingLvl + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s set FW Dac Swing level = %d\n", + (bForceExec? "force to":""), fwDacSwingLvl)); + pCoexDm->curFwDacSwingLvl = fwDacSwingLvl; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n", + pCoexDm->preFwDacSwingLvl, pCoexDm->curFwDacSwingLvl)); + + if(pCoexDm->preFwDacSwingLvl == pCoexDm->curFwDacSwingLvl) + return; + } + + halbtc8723a2ant_SetFwDacSwingLevel(pBtCoexist, pCoexDm->curFwDacSwingLvl); + + pCoexDm->preFwDacSwingLvl = pCoexDm->curFwDacSwingLvl; +} + +VOID +halbtc8723a2ant_SetSwRfRxLpfCorner( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bRxRfShrinkOn + ) +{ + if(bRxRfShrinkOn) + { + //Shrink RF Rx LPF corner + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Shrink RF Rx LPF corner!!\n")); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, 0xf0ff7); + } + else + { + //Resume RF Rx LPF corner + // After initialized, we can use pCoexDm->btRf0x1eBackup + if(pBtCoexist->initilized) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Resume RF Rx LPF corner!!\n")); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff, pCoexDm->btRf0x1eBackup); + } + } +} + +VOID +halbtc8723a2ant_RfShrink( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bRxRfShrinkOn + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn Rx RF Shrink = %s\n", + (bForceExec? "force to":""), ((bRxRfShrinkOn)? "ON":"OFF"))); + pCoexDm->bCurRfRxLpfShrink = bRxRfShrinkOn; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n", + pCoexDm->bPreRfRxLpfShrink, pCoexDm->bCurRfRxLpfShrink)); + + if(pCoexDm->bPreRfRxLpfShrink == pCoexDm->bCurRfRxLpfShrink) + return; + } + halbtc8723a2ant_SetSwRfRxLpfCorner(pBtCoexist, pCoexDm->bCurRfRxLpfShrink); + + pCoexDm->bPreRfRxLpfShrink = pCoexDm->bCurRfRxLpfShrink; +} + +VOID +halbtc8723a2ant_SetSwPenaltyTxRateAdaptive( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bLowPenaltyRa + ) +{ + u1Byte tmpU1; + + tmpU1 = pBtCoexist->btc_read_1byte(pBtCoexist, 0x4fd); + tmpU1 |= BIT0; + if(bLowPenaltyRa) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set low penalty!!\n")); + tmpU1 &= ~BIT2; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Tx rate adaptive, set normal!!\n")); + tmpU1 |= BIT2; + } + + pBtCoexist->btc_write_1byte(pBtCoexist, 0x4fd, tmpU1); +} + +VOID +halbtc8723a2ant_LowPenaltyRa( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bLowPenaltyRa + ) +{ + return; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn LowPenaltyRA = %s\n", + (bForceExec? "force to":""), ((bLowPenaltyRa)? "ON":"OFF"))); + pCoexDm->bCurLowPenaltyRa = bLowPenaltyRa; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n", + pCoexDm->bPreLowPenaltyRa, pCoexDm->bCurLowPenaltyRa)); + + if(pCoexDm->bPreLowPenaltyRa == pCoexDm->bCurLowPenaltyRa) + return; + } + halbtc8723a2ant_SetSwPenaltyTxRateAdaptive(pBtCoexist, pCoexDm->bCurLowPenaltyRa); + + pCoexDm->bPreLowPenaltyRa = pCoexDm->bCurLowPenaltyRa; +} + +VOID +halbtc8723a2ant_SetSwFullTimeDacSwing( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bSwDacSwingOn, + IN u4Byte swDacSwingLvl + ) +{ + if(bSwDacSwingOn) + { + pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, swDacSwingLvl); + } + else + { + pBtCoexist->btc_setBbReg(pBtCoexist, 0x880, 0xff000000, 0xc0); + } +} + + +VOID +halbtc8723a2ant_DacSwing( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bDacSwingOn, + IN u4Byte dacSwingLvl + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn DacSwing=%s, dacSwingLvl=0x%x\n", + (bForceExec? "force to":""), ((bDacSwingOn)? "ON":"OFF"), dacSwingLvl)); + pCoexDm->bCurDacSwingOn = bDacSwingOn; + pCoexDm->curDacSwingLvl = dacSwingLvl; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", + pCoexDm->bPreDacSwingOn, pCoexDm->preDacSwingLvl, + pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl)); + + if( (pCoexDm->bPreDacSwingOn == pCoexDm->bCurDacSwingOn) && + (pCoexDm->preDacSwingLvl == pCoexDm->curDacSwingLvl) ) + return; + } + mdelay(30); + halbtc8723a2ant_SetSwFullTimeDacSwing(pBtCoexist, bDacSwingOn, dacSwingLvl); + + pCoexDm->bPreDacSwingOn = pCoexDm->bCurDacSwingOn; + pCoexDm->preDacSwingLvl = pCoexDm->curDacSwingLvl; +} + +VOID +halbtc8723a2ant_SetAdcBackOff( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bAdcBackOff + ) +{ + if(bAdcBackOff) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level On!\n")); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc04,0x3a07611); + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], BB BackOff Level Off!\n")); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc04,0x3a05611); + } +} + +VOID +halbtc8723a2ant_AdcBackOff( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bAdcBackOff + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s turn AdcBackOff = %s\n", + (bForceExec? "force to":""), ((bAdcBackOff)? "ON":"OFF"))); + pCoexDm->bCurAdcBackOff = bAdcBackOff; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n", + pCoexDm->bPreAdcBackOff, pCoexDm->bCurAdcBackOff)); + + if(pCoexDm->bPreAdcBackOff == pCoexDm->bCurAdcBackOff) + return; + } + halbtc8723a2ant_SetAdcBackOff(pBtCoexist, pCoexDm->bCurAdcBackOff); + + pCoexDm->bPreAdcBackOff = pCoexDm->bCurAdcBackOff; +} + +VOID +halbtc8723a2ant_SetAgcTable( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bAgcTableEn + ) +{ + u1Byte rssiAdjustVal=0; + + if(bAgcTableEn) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table On!\n")); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4e1c0001); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4d1d0001); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4c1e0001); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4b1f0001); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x4a200001); + + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xdc000); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x90000); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x51000); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x12000); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1a, 0xfffff, 0x00355); + + rssiAdjustVal = 6; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], Agc Table Off!\n")); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x641c0001); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x631d0001); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x621e0001); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x611f0001); + pBtCoexist->btc_write_4byte(pBtCoexist, 0xc78,0x60200001); + + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x32000); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0x71000); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xb0000); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x12, 0xfffff, 0xfc000); + pBtCoexist->btc_set_rf_reg(pBtCoexist, BTC_RF_A, 0x1a, 0xfffff, 0x30355); + } + + // set rssiAdjustVal for wifi module. + pBtCoexist->btc_set(pBtCoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, &rssiAdjustVal); +} + + +VOID +halbtc8723a2ant_AgcTable( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bAgcTableEn + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s %s Agc Table\n", + (bForceExec? "force to":""), ((bAgcTableEn)? "Enable":"Disable"))); + pCoexDm->bCurAgcTableEn = bAgcTableEn; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + pCoexDm->bPreAgcTableEn, pCoexDm->bCurAgcTableEn)); + + if(pCoexDm->bPreAgcTableEn == pCoexDm->bCurAgcTableEn) + return; + } + halbtc8723a2ant_SetAgcTable(pBtCoexist, bAgcTableEn); + + pCoexDm->bPreAgcTableEn = pCoexDm->bCurAgcTableEn; +} + +VOID +halbtc8723a2ant_SetCoexTable( + IN PBTC_COEXIST pBtCoexist, + IN u4Byte val0x6c0, + IN u4Byte val0x6c8, + IN u1Byte val0x6cc + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0)); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8)); + pBtCoexist->btc_write_4byte(pBtCoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, ("[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc)); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x6cc, val0x6cc); +} + +VOID +halbtc8723a2ant_CoexTable( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN u4Byte val0x6c0, + IN u4Byte val0x6c8, + IN u1Byte val0x6cc + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, ("[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + (bForceExec? "force to":""), val0x6c0, val0x6c8, val0x6cc)); + pCoexDm->curVal0x6c0 = val0x6c0; + pCoexDm->curVal0x6c8 = val0x6c8; + pCoexDm->curVal0x6cc = val0x6cc; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], preVal0x6c0=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n", + pCoexDm->preVal0x6c0, pCoexDm->preVal0x6c8, pCoexDm->preVal0x6cc)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, ("[BTCoex], curVal0x6c0=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n", + pCoexDm->curVal0x6c0, pCoexDm->curVal0x6c8, pCoexDm->curVal0x6cc)); + + if( (pCoexDm->preVal0x6c0 == pCoexDm->curVal0x6c0) && + (pCoexDm->preVal0x6c8 == pCoexDm->curVal0x6c8) && + (pCoexDm->preVal0x6cc == pCoexDm->curVal0x6cc) ) + return; + } + halbtc8723a2ant_SetCoexTable(pBtCoexist, val0x6c0, val0x6c8, val0x6cc); + + pCoexDm->preVal0x6c0 = pCoexDm->curVal0x6c0; + pCoexDm->preVal0x6c8 = pCoexDm->curVal0x6c8; + pCoexDm->preVal0x6cc = pCoexDm->curVal0x6cc; +} + +VOID +halbtc8723a2ant_SetFwIgnoreWlanAct( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bEnable + ) +{ + u1Byte H2C_Parameter[1] ={0}; + + if(bEnable) + { + H2C_Parameter[0] |= BIT0; // function enable + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x25=0x%x\n", + H2C_Parameter[0])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x25, 1, H2C_Parameter); +} + +VOID +halbtc8723a2ant_IgnoreWlanAct( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bEnable + ) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn Ignore WlanAct %s\n", + (bForceExec? "force to":""), (bEnable? "ON":"OFF"))); + pCoexDm->bCurIgnoreWlanAct = bEnable; + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n", + pCoexDm->bPreIgnoreWlanAct, pCoexDm->bCurIgnoreWlanAct)); + + if(pCoexDm->bPreIgnoreWlanAct == pCoexDm->bCurIgnoreWlanAct) + return; + } + halbtc8723a2ant_SetFwIgnoreWlanAct(pBtCoexist, bEnable); + + pCoexDm->bPreIgnoreWlanAct = pCoexDm->bCurIgnoreWlanAct; +} + +VOID +halbtc8723a2ant_SetFwPstdma( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte byte1, + IN u1Byte byte2, + IN u1Byte byte3, + IN u1Byte byte4, + IN u1Byte byte5 + ) +{ + u1Byte H2C_Parameter[5] ={0}; + + H2C_Parameter[0] = byte1; + H2C_Parameter[1] = byte2; + H2C_Parameter[2] = byte3; + H2C_Parameter[3] = byte4; + H2C_Parameter[4] = byte5; + + pCoexDm->psTdmaPara[0] = byte1; + pCoexDm->psTdmaPara[1] = byte2; + pCoexDm->psTdmaPara[2] = byte3; + pCoexDm->psTdmaPara[3] = byte4; + pCoexDm->psTdmaPara[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write 0x3a(5bytes)=0x%x%08x\n", + H2C_Parameter[0], + H2C_Parameter[1]<<24|H2C_Parameter[2]<<16|H2C_Parameter[3]<<8|H2C_Parameter[4])); + + pBtCoexist->btc_fill_h2c(pBtCoexist, 0x3a, 5, H2C_Parameter); +} + +VOID +halbtc8723a2ant_PsTdma( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bForceExec, + IN BOOLEAN bTurnOn, + IN u1Byte type + ) +{ + u4Byte btTxRxCnt=0; + + btTxRxCnt = pCoexSta->highPriorityTx+pCoexSta->highPriorityRx+ + pCoexSta->lowPriorityTx+pCoexSta->lowPriorityRx; + + if(btTxRxCnt > 3000) + { + pCoexDm->bCurPsTdmaOn = true; + pCoexDm->curPsTdma = 8; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], turn ON PS TDMA, type=%d for BT tx/rx counters=%d(>3000)\n", + pCoexDm->curPsTdma, btTxRxCnt)); + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], %s turn %s PS TDMA, type=%d\n", + (bForceExec? "force to":""), (bTurnOn? "ON":"OFF"), type)); + pCoexDm->bCurPsTdmaOn = bTurnOn; + pCoexDm->curPsTdma = type; + } + + if(!bForceExec) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + pCoexDm->bPrePsTdmaOn, pCoexDm->bCurPsTdmaOn)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + pCoexDm->prePsTdma, pCoexDm->curPsTdma)); + + if( (pCoexDm->bPrePsTdmaOn == pCoexDm->bCurPsTdmaOn) && + (pCoexDm->prePsTdma == pCoexDm->curPsTdma) ) + return; + } + if(pCoexDm->bCurPsTdmaOn) + { + switch(pCoexDm->curPsTdma) + { + case 1: + default: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x98); + break; + case 2: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x98); + break; + case 3: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x98); + break; + case 4: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x5, 0x5, 0xe1, 0x80); + break; + case 5: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x98); + break; + case 6: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x98); + break; + case 7: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x98); + break; + case 8: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x5, 0x5, 0x60, 0x80); + break; + case 9: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0xe1, 0x98); + break; + case 10: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0xe1, 0x98); + break; + case 11: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0xe1, 0x98); + break; + case 12: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x98); + break; + case 13: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x1a, 0x1a, 0x60, 0x98); + break; + case 14: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x12, 0x12, 0x60, 0x98); + break; + case 15: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0xa, 0xa, 0x60, 0x98); + break; + case 16: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0x60, 0x98); + break; + case 17: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xa3, 0x2f, 0x2f, 0x60, 0x80); + break; + case 18: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x5, 0x5, 0xe1, 0x98); + break; + case 19: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0xe1, 0x98); + break; + case 20: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0xe3, 0x25, 0x25, 0x60, 0x98); + break; + } + } + else + { + // disable PS tdma + switch(pCoexDm->curPsTdma) + { + case 0: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0); + break; + case 1: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x0, 0x0); + break; + default: + halbtc8723a2ant_SetFwPstdma(pBtCoexist, 0x0, 0x0, 0x0, 0x8, 0x0); + break; + } + } + + // update pre state + pCoexDm->bPrePsTdmaOn = pCoexDm->bCurPsTdmaOn; + pCoexDm->prePsTdma = pCoexDm->curPsTdma; +} + + +VOID +halbtc8723a2ant_CoexAllOff( + IN PBTC_COEXIST pBtCoexist + ) +{ + // fw all off + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20); + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + + // sw all off + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + + // hw all off + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); +} + +VOID +halbtc8723a2ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ) +{ + // force to reset coex mechanism + halbtc8723a2ant_CoexTable(pBtCoexist, FORCE_EXEC, 0x55555555, 0xffff, 0x3); + halbtc8723a2ant_PsTdma(pBtCoexist, FORCE_EXEC, FALSE, 0); + halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, FORCE_EXEC, 0x20); + halbtc8723a2ant_DecBtPwr(pBtCoexist, FORCE_EXEC, FALSE); + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE); + + halbtc8723a2ant_AgcTable(pBtCoexist, FORCE_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, FORCE_EXEC, FALSE); + halbtc8723a2ant_LowPenaltyRa(pBtCoexist, FORCE_EXEC, FALSE); + halbtc8723a2ant_RfShrink(pBtCoexist, FORCE_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, FORCE_EXEC, FALSE, 0xc0); +} + +VOID +halbtc8723a2ant_BtInquiryPage( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bLowPwrDisable=true; + + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable); + + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); +} + +VOID +halbtc8723a2ant_BtEnableAction( + IN PBTC_COEXIST pBtCoexist + ) +{ + BOOLEAN bWifiConnected=FALSE; + + // Here we need to resend some wifi info to BT + // because bt is reset and loss of the info. + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + if(bWifiConnected) + { + halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, BTC_MEDIA_CONNECT); + } + else + { + halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, BTC_MEDIA_DISCONNECT); + } + + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, FALSE); +} + +VOID +halbtc8723a2ant_MonitorBtCtr( + IN PBTC_COEXIST pBtCoexist + ) +{ + u4Byte regHPTxRx, regLPTxRx, u4Tmp; + u4Byte regHPTx=0, regHPRx=0, regLPTx=0, regLPRx=0; + u1Byte u1Tmp; + + regHPTxRx = 0x770; + regLPTxRx = 0x774; + + u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regHPTxRx); + regHPTx = u4Tmp & MASKLWORD; + regHPRx = (u4Tmp & MASKHWORD)>>16; + + u4Tmp = pBtCoexist->btc_read_4byte(pBtCoexist, regLPTxRx); + regLPTx = u4Tmp & MASKLWORD; + regLPRx = (u4Tmp & MASKHWORD)>>16; + + pCoexSta->highPriorityTx = regHPTx; + pCoexSta->highPriorityRx = regHPRx; + pCoexSta->lowPriorityTx = regLPTx; + pCoexSta->lowPriorityRx = regLPRx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], High Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + regHPTxRx, regHPTx, regHPTx, regHPRx, regHPRx)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], Low Priority Tx/Rx (reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + regLPTxRx, regLPTx, regLPTx, regLPRx, regLPRx)); + + // reset counter + pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0xc); +} + +VOID +halbtc8723a2ant_MonitorBtEnableDisable( + IN PBTC_COEXIST pBtCoexist + ) +{ + static BOOLEAN bPreBtDisabled=FALSE; + static u4Byte btDisableCnt=0; + BOOLEAN bBtActive=true, bBtDisabled=FALSE; + + // This function check if bt is disabled + + if( pCoexSta->highPriorityTx == 0 && + pCoexSta->highPriorityRx == 0 && + pCoexSta->lowPriorityTx == 0 && + pCoexSta->lowPriorityRx == 0) + { + bBtActive = FALSE; + } + if( pCoexSta->highPriorityTx == 0xffff && + pCoexSta->highPriorityRx == 0xffff && + pCoexSta->lowPriorityTx == 0xffff && + pCoexSta->lowPriorityRx == 0xffff) + { + bBtActive = FALSE; + } + if(bBtActive) + { + btDisableCnt = 0; + bBtDisabled = FALSE; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is enabled !!\n")); + } + else + { + btDisableCnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], bt all counters=0, %d times!!\n", + btDisableCnt)); + if(btDisableCnt >= 2) + { + bBtDisabled = true; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_BL_BT_DISABLE, &bBtDisabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is disabled !!\n")); + } + } + if(bPreBtDisabled != bBtDisabled) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, ("[BTCoex], BT is from %s to %s!!\n", + (bPreBtDisabled ? "disabled":"enabled"), + (bBtDisabled ? "disabled":"enabled"))); + bPreBtDisabled = bBtDisabled; + if(!bBtDisabled) + { + halbtc8723a2ant_BtEnableAction(pBtCoexist); + } + } +} + +BOOLEAN +halbtc8723a2ant_IsCommonAction( + IN PBTC_COEXIST pBtCoexist + ) +{ + PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info; + BOOLEAN bCommon=FALSE, bWifiConnected=FALSE; + BOOLEAN bLowPwrDisable=FALSE; + + if(!pStackInfo->bBtLinkExist) + { + bLowPwrDisable = FALSE; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable); + } + else + { + bLowPwrDisable = true; + pBtCoexist->btc_set(pBtCoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &bLowPwrDisable); + } + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_CONNECTED, &bWifiConnected); + + if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) && + BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + Bt idle!!\n")); + + halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20); + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + + bCommon = true; + } + else if(!halbtc8723a2ant_IsWifiIdle(pBtCoexist) && + (BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + BT idle!!\n")); + + halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20); + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + + bCommon = true; + } + else if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) && + (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + Bt connected idle!!\n")); + + halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20); + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + + bCommon = true; + } + else if(!halbtc8723a2ant_IsWifiIdle(pBtCoexist) && + (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + Bt connected idle!!\n")); + + halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20); + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + + bCommon = true; + } + else if(halbtc8723a2ant_IsWifiIdle(pBtCoexist) && + (BT_8723A_2ANT_BT_STATUS_NON_IDLE == pCoexDm->btStatus) ) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi idle + BT non-idle!!\n")); + + halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20); + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + + bCommon = true; + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Wifi non-idle + BT non-idle!!\n")); + halbtc8723a2ant_LowPenaltyRa(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_RfShrink(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_FwDacSwingLvl(pBtCoexist, NORMAL_EXEC, 0x20); + + bCommon = FALSE; + } + + return bCommon; +} +VOID +halbtc8723a2ant_TdmaDurationAdjust( + IN PBTC_COEXIST pBtCoexist, + IN BOOLEAN bScoHid, + IN BOOLEAN bTxPause, + IN u1Byte maxInterval + ) +{ + static s4Byte up,dn,m,n,WaitCount; + s4Byte result; //0: no change, +1: increase WiFi duration, -1: decrease WiFi duration + u1Byte retryCount=0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, ("[BTCoex], TdmaDurationAdjust()\n")); + + if(pCoexDm->bResetTdmaAdjust) + { + pCoexDm->bResetTdmaAdjust = FALSE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], first run TdmaDurationAdjust()!!\n")); + { + if(bScoHid) + { + if(bTxPause) + { + if(maxInterval == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13); + pCoexDm->psTdmaDuAdjType = 13; + } + else if(maxInterval == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + else if(maxInterval == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + } + else + { + if(maxInterval == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + else if(maxInterval == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + else if(maxInterval == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + } + } + else + { + if(bTxPause) + { + if(maxInterval == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5); + pCoexDm->psTdmaDuAdjType = 5; + } + else if(maxInterval == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(maxInterval == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + } + else + { + if(maxInterval == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1); + pCoexDm->psTdmaDuAdjType = 1; + } + else if(maxInterval == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(maxInterval == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + } + } + } + //============ + up = 0; + dn = 0; + m = 1; + n= 3; + result = 0; + WaitCount = 0; + } + else + { + //accquire the BT TRx retry count from BT_Info byte2 + retryCount = pCoexSta->btRetryCnt; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], retryCount = %d\n", retryCount)); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], up=%d, dn=%d, m=%d, n=%d, WaitCount=%d\n", + up, dn, m, n, WaitCount)); + result = 0; + WaitCount++; + + if(retryCount == 0) // no retry in the last 2-second duration + { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if(up >= n) // if s n 2 retry count0, hռeWiFi duration + { + WaitCount = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Increase wifi duration!!\n")); + } + } + else if (retryCount <= 3) // <=3 retry in the last 2-second duration + { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) // if s 2 2 retry count< 3, hկWiFi duration + { + if (WaitCount <= 2) + m++; // קK@blevelӦ^ + else + m = 1; + + if ( m >= 20) //m ̤j = 20 ' ̤j120 recheckO_վ WiFi duration. + m = 20; + + n = 3*m; + up = 0; + dn = 0; + WaitCount = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter<3!!\n")); + } + } + else //retry count > 3, un1 retry count > 3, hկWiFi duration + { + if (WaitCount == 1) + m++; // קK@blevelӦ^ + else + m = 1; + + if ( m >= 20) //m ̤j = 20 ' ̤j120 recheckO_վ WiFi duration. + m = 20; + + n = 3*m; + up = 0; + dn = 0; + WaitCount = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], Decrease wifi duration for retryCounter>3!!\n")); + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], max Interval = %d\n", maxInterval)); + if(maxInterval == 1) + { + if(bTxPause) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n")); + + if(pCoexDm->curPsTdma == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5); + pCoexDm->psTdmaDuAdjType = 5; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 4) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); + pCoexDm->psTdmaDuAdjType = 8; + } + if(pCoexDm->curPsTdma == 9) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13); + pCoexDm->psTdmaDuAdjType = 13; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 12) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16); + pCoexDm->psTdmaDuAdjType = 16; + } + + if(result == -1) + { + if(pCoexDm->curPsTdma == 5) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); + pCoexDm->psTdmaDuAdjType = 8; + } + else if(pCoexDm->curPsTdma == 13) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16); + pCoexDm->psTdmaDuAdjType = 16; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 8) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 5); + pCoexDm->psTdmaDuAdjType = 5; + } + else if(pCoexDm->curPsTdma == 16) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13); + pCoexDm->psTdmaDuAdjType = 13; + } + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n")); + if(pCoexDm->curPsTdma == 5) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1); + pCoexDm->psTdmaDuAdjType = 1; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 8) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + pCoexDm->psTdmaDuAdjType = 4; + } + if(pCoexDm->curPsTdma == 13) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 16) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + pCoexDm->psTdmaDuAdjType = 12; + } + + if(result == -1) + { + if(pCoexDm->curPsTdma == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + pCoexDm->psTdmaDuAdjType = 4; + } + else if(pCoexDm->curPsTdma == 9) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + pCoexDm->psTdmaDuAdjType = 12; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 4) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 1); + pCoexDm->psTdmaDuAdjType = 1; + } + else if(pCoexDm->curPsTdma == 12) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + pCoexDm->psTdmaDuAdjType = 9; + } + } + } + } + else if(maxInterval == 2) + { + if(bTxPause) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n")); + if(pCoexDm->curPsTdma == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 4) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); + pCoexDm->psTdmaDuAdjType = 8; + } + if(pCoexDm->curPsTdma == 9) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 12) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16); + pCoexDm->psTdmaDuAdjType = 16; + } + if(result == -1) + { + if(pCoexDm->curPsTdma == 5) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); + pCoexDm->psTdmaDuAdjType = 8; + } + else if(pCoexDm->curPsTdma == 13) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16); + pCoexDm->psTdmaDuAdjType = 16; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 8) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + pCoexDm->psTdmaDuAdjType = 6; + } + else if(pCoexDm->curPsTdma == 16) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + pCoexDm->psTdmaDuAdjType = 14; + } + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n")); + if(pCoexDm->curPsTdma == 5) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 8) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + pCoexDm->psTdmaDuAdjType = 4; + } + if(pCoexDm->curPsTdma == 13) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 16) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + pCoexDm->psTdmaDuAdjType = 12; + } + if(result == -1) + { + if(pCoexDm->curPsTdma == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + pCoexDm->psTdmaDuAdjType = 4; + } + else if(pCoexDm->curPsTdma == 9) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + pCoexDm->psTdmaDuAdjType = 12; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 4) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + pCoexDm->psTdmaDuAdjType = 2; + } + else if(pCoexDm->curPsTdma == 12) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + pCoexDm->psTdmaDuAdjType = 10; + } + } + } + } + else if(maxInterval == 3) + { + if(bTxPause) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 1\n")); + if(pCoexDm->curPsTdma == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 4) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); + pCoexDm->psTdmaDuAdjType = 8; + } + if(pCoexDm->curPsTdma == 9) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 12) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16); + pCoexDm->psTdmaDuAdjType = 16; + } + if(result == -1) + { + if(pCoexDm->curPsTdma == 5) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); + pCoexDm->psTdmaDuAdjType = 8; + } + else if(pCoexDm->curPsTdma == 13) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16); + pCoexDm->psTdmaDuAdjType = 16; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 8) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 7); + pCoexDm->psTdmaDuAdjType = 7; + } + else if(pCoexDm->curPsTdma == 16) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + pCoexDm->psTdmaDuAdjType = 15; + } + } + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], TxPause = 0\n")); + if(pCoexDm->curPsTdma == 5) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 6) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 7) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 8) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + pCoexDm->psTdmaDuAdjType = 4; + } + if(pCoexDm->curPsTdma == 13) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 14) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 15) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 16) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + pCoexDm->psTdmaDuAdjType = 12; + } + if(result == -1) + { + if(pCoexDm->curPsTdma == 1) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + pCoexDm->psTdmaDuAdjType = 4; + } + else if(pCoexDm->curPsTdma == 9) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + pCoexDm->psTdmaDuAdjType = 12; + } + } + else if (result == 1) + { + if(pCoexDm->curPsTdma == 4) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 3) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 2) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 3); + pCoexDm->psTdmaDuAdjType = 3; + } + else if(pCoexDm->curPsTdma == 12) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 11) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + else if(pCoexDm->curPsTdma == 10) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + pCoexDm->psTdmaDuAdjType = 11; + } + } + } + } + } + + // if current PsTdma not match with the recorded one (when scan, dhcp...), + // then we have to adjust it back to the previous record one. + if(pCoexDm->curPsTdma != pCoexDm->psTdmaDuAdjType) + { + BOOLEAN bScan=FALSE, bLink=FALSE, bRoam=FALSE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", + pCoexDm->curPsTdma, pCoexDm->psTdmaDuAdjType)); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + + if( !bScan && !bLink && !bRoam) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, pCoexDm->psTdmaDuAdjType); + } + else + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, ("[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n")); + } + } +} + +// SCO only or SCO+PAN(HS) +VOID +halbtc8723a2ant_ActionSco( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, wifiRssiState1; + u4Byte wifiBw; + + if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist)) + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + else + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + } + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0); + wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 11); + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 15); + } + + // sw mechanism + if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) || + (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + + +VOID +halbtc8723a2ant_ActionHid( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, wifiRssiState1; + u4Byte wifiBw; + + if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist)) + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + else + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13); + } + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0); + wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 9); + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 13); + } + + // sw mechanism + if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) || + (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + +//A2DP only / PAN(EDR) only/ A2DP+PAN(HS) +VOID +halbtc8723a2ant_ActionA2dp( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, wifiRssiState1, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + + if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist)) + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + else + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 3); + } + else + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 1); + } + } + else + { + if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 3); + } + else + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 1); + } + } + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0); + wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 3); + } + else + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, FALSE, 1); + } + } + else + { + if(btInfoExt&BIT0) //a2dp rate, 1:basic /0:edr + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 3); + } + else + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, FALSE, true, 1); + } + } + + // sw mechanism + if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) || + (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + +VOID +halbtc8723a2ant_ActionPanEdr( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, wifiRssiState1, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + + if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist)) + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + else + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + } + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0); + wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + } + + // sw mechanism + if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) || + (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + + +//PAN(HS) only +VOID +halbtc8723a2ant_ActionPanHs( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState; + u4Byte wifiBw; + + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + } + else + { + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + } + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + } + else + { + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, FALSE, 0); + } + + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + +//PAN(EDR)+A2DP +VOID +halbtc8723a2ant_ActionPanEdrA2dp( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, wifiRssiState1, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + + if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist)) + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + else + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + } + else //a2dp edr rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + } + } + else + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); + } + else //a2dp edr rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + } + } + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0); + wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 47, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 4); + } + else //a2dp edr rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 2); + } + } + else + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 8); + } + else //a2dp edr rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 6); + } + } + + // sw mechanism + if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) || + (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + +VOID +halbtc8723a2ant_ActionPanEdrHid( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, wifiRssiState1; + u4Byte wifiBw; + + if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist)) + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + else + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + } + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0); + wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + } + else + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + } + + // sw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + +// HID+A2DP+PAN(EDR) +VOID +halbtc8723a2ant_ActionHidA2dpPanEdr( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, wifiRssiState1, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + + if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist)) + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + else + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + } + else //a2dp edr rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + } + } + else + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16); + } + else //a2dp edr rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + } + } + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0); + wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 12); + } + else //a2dp edr rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 10); + } + } + else + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 16); + } + else //a2dp edr rate + { + halbtc8723a2ant_PsTdma(pBtCoexist, NORMAL_EXEC, true, 14); + } + } + + // sw mechanism + if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) || + (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + +VOID +halbtc8723a2ant_ActionHidA2dp( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte wifiRssiState, wifiRssiState1, btInfoExt; + u4Byte wifiBw; + + btInfoExt = pCoexSta->btInfoExt; + + if(halbtc8723a2ant_NeedToDecBtPwr(pBtCoexist)) + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, true); + else + halbtc8723a2ant_DecBtPwr(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_CoexTable(pBtCoexist, NORMAL_EXEC, 0x55555555, 0xffff, 0x3); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + if(BTC_WIFI_BW_HT40 == wifiBw) + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 37, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 3); + } + else //a2dp edr rate + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 1); + } + } + else + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3); + } + else //a2dp edr rate + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 1); + } + } + + // sw mechanism + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + wifiRssiState = halbtc8723a2ant_WifiRssiState(pBtCoexist, 0, 2, 27, 0); + wifiRssiState1 = halbtc8723a2ant_WifiRssiState(pBtCoexist, 1, 2, 47, 0); + + // fw mechanism + if( (wifiRssiState == BTC_RSSI_STATE_HIGH) || + (wifiRssiState == BTC_RSSI_STATE_STAY_HIGH) ) + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 3); + } + else //a2dp edr rate + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, FALSE, 1); + } + } + else + { + if(btInfoExt&BIT0) //a2dp basic rate + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 3); + } + else //a2dp edr rate + { + halbtc8723a2ant_TdmaDurationAdjust(pBtCoexist, true, true, 1); + } + } + + // sw mechanism + if( (wifiRssiState1 == BTC_RSSI_STATE_HIGH) || + (wifiRssiState1 == BTC_RSSI_STATE_STAY_HIGH) ) + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, true); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + else + { + halbtc8723a2ant_AgcTable(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_AdcBackOff(pBtCoexist, NORMAL_EXEC, FALSE); + halbtc8723a2ant_DacSwing(pBtCoexist, NORMAL_EXEC, FALSE, 0xc0); + } + } +} + +VOID +halbtc8723a2ant_RunCoexistMechanism( + IN PBTC_COEXIST pBtCoexist + ) +{ + PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info; + u1Byte btInfoOriginal=0, btRetryCnt=0; + u1Byte algorithm=0; + + if(pBtCoexist->manual_control) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Manual control!!!\n")); + return; + } + + if(pStackInfo->bProfileNotified) + { + if(pCoexSta->bHoldForStackOperation) + { + // if bt inquiry/page/pair, do not execute. + return; + } + + algorithm = halbtc8723a2ant_ActionAlgorithm(pBtCoexist); + if(pCoexSta->bHoldPeriodCnt && (BT_8723A_2ANT_COEX_ALGO_PANHS!=algorithm)) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex],Hold BT inquiry/page scan setting (cnt = %d)!!\n", + pCoexSta->bHoldPeriodCnt)); + if(pCoexSta->bHoldPeriodCnt >= 6) + { + pCoexSta->bHoldPeriodCnt = 0; + // next time the coexist parameters should be reset again. + } + else + pCoexSta->bHoldPeriodCnt++; + return; + } + + pCoexDm->curAlgorithm = algorithm; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Algorithm = %d \n", pCoexDm->curAlgorithm)); + if(halbtc8723a2ant_IsCommonAction(pBtCoexist)) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant common.\n")); + pCoexDm->bResetTdmaAdjust = true; + } + else + { + if(pCoexDm->curAlgorithm != pCoexDm->preAlgorithm) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n", + pCoexDm->preAlgorithm, pCoexDm->curAlgorithm)); + pCoexDm->bResetTdmaAdjust = true; + } + switch(pCoexDm->curAlgorithm) + { + case BT_8723A_2ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = SCO.\n")); + halbtc8723a2ant_ActionSco(pBtCoexist); + break; + case BT_8723A_2ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID.\n")); + halbtc8723a2ant_ActionHid(pBtCoexist); + break; + case BT_8723A_2ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = A2DP.\n")); + halbtc8723a2ant_ActionA2dp(pBtCoexist); + break; + case BT_8723A_2ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n")); + halbtc8723a2ant_ActionPanEdr(pBtCoexist); + break; + case BT_8723A_2ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HS mode.\n")); + halbtc8723a2ant_ActionPanHs(pBtCoexist); + break; + case BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n")); + halbtc8723a2ant_ActionPanEdrA2dp(pBtCoexist); + break; + case BT_8723A_2ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n")); + halbtc8723a2ant_ActionPanEdrHid(pBtCoexist); + break; + case BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n")); + halbtc8723a2ant_ActionHidA2dpPanEdr(pBtCoexist); + break; + case BT_8723A_2ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n")); + halbtc8723a2ant_ActionHidA2dp(pBtCoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n")); + halbtc8723a2ant_CoexAllOff(pBtCoexist); + break; + } + pCoexDm->preAlgorithm = pCoexDm->curAlgorithm; + } + } +} + +//============================================================ +// work around function start with wa_halbtc8723a2ant_ +//============================================================ +VOID +wa_halbtc8723a2ant_MonitorC2h( + IN PBTC_COEXIST pBtCoexist + ) +{ + u1Byte tmp1b=0x0; + u4Byte curC2hTotalCnt=0x0; + static u4Byte preC2hTotalCnt=0x0, sameCntPollingTime=0x0; + + curC2hTotalCnt+=pCoexSta->btInfoC2hCnt[BT_INFO_SRC_8723A_2ANT_BT_RSP]; + + if(curC2hTotalCnt == preC2hTotalCnt) + { + sameCntPollingTime++; + } + else + { + preC2hTotalCnt = curC2hTotalCnt; + sameCntPollingTime = 0; + } + + if(sameCntPollingTime >= 2) + { + tmp1b = pBtCoexist->btc_read_1byte(pBtCoexist, 0x1af); + if(tmp1b != 0x0) + { + pCoexSta->c2hHangDetectCnt++; + pBtCoexist->btc_write_1byte(pBtCoexist, 0x1af, 0x0); + } + } +} + +//============================================================ +// extern function start with EXhalbtc8723a2ant_ +//============================================================ +VOID +EXhalbtc8723a2ant_InitHwConfig( + IN PBTC_COEXIST pBtCoexist + ) +{ + u4Byte u4Tmp=0; + u1Byte u1Tmp=0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], 2Ant Init HW Config!!\n")); + + // backup rf 0x1e value + pCoexDm->btRf0x1eBackup = + pBtCoexist->btc_get_rf_reg(pBtCoexist, BTC_RF_A, 0x1e, 0xfffff); + + // Enable counter statistics + pBtCoexist->btc_write_1byte(pBtCoexist, 0x76e, 0x4); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x778, 0x3); + pBtCoexist->btc_write_1byte(pBtCoexist, 0x40, 0x20); +} + +VOID +EXhalbtc8723a2ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, ("[BTCoex], Coex Mechanism Init!!\n")); + + halbtc8723a2ant_InitCoexDm(pBtCoexist); +} + +VOID +EXhalbtc8723a2ant_DisplayCoexInfo( + IN PBTC_COEXIST pBtCoexist + ) +{ + struct btc_board_info * pBoardInfo=&pBtCoexist->board_info; + PBTC_STACK_INFO pStackInfo=&pBtCoexist->stack_info; + pu1Byte cliBuf=pBtCoexist->cli_buf; + u1Byte u1Tmp[4], i, btInfoExt, psTdmaCase=0; + u4Byte u4Tmp[4]; + BOOLEAN bRoam=FALSE, bScan=FALSE, bLink=FALSE, bWifiUnder5G=FALSE; + BOOLEAN bBtHsOn=FALSE, bWifiBusy=FALSE; + s4Byte wifiRssi=0, btHsRssi=0; + u4Byte wifiBw, wifiTrafficDir; + u1Byte wifiDot11Chnl, wifiHsChnl; + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n ============[BT Coexist info]============"); + CL_PRINTF(cliBuf); + + if(!pBoardInfo->bt_exist) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); + CL_PRINTF(cliBuf); + return; + } + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:", \ + pBoardInfo->pg_ant_num, pBoardInfo->btdm_ant_num); + CL_PRINTF(cliBuf); + + if(pBtCoexist->manual_control) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "[Action Manual control]!!"); + CL_PRINTF(cliBuf); + } + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", "BT stack/ hci ext ver", \ + ((pStackInfo->bProfileNotified)? "Yes":"No"), pStackInfo->hciVersion); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_DOT11_CHNL, &wifiDot11Chnl); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifiHsChnl); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", "Dot11 channel / HsChnl(HsMode)", \ + wifiDot11Chnl, wifiHsChnl, bBtHsOn); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", "H2C Wifi inform bt chnl Info", \ + pCoexDm->wifiChnlInfo[0], pCoexDm->wifiChnlInfo[1], + pCoexDm->wifiChnlInfo[2]); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_WIFI_RSSI, &wifiRssi); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_S4_HS_RSSI, &btHsRssi); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi", \ + wifiRssi, btHsRssi); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_SCAN, &bScan); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_LINK, &bLink); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_ROAM, &bRoam); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", "Wifi bLink/ bRoam/ bScan", \ + bLink, bRoam, bScan); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_UNDER_5G, &bWifiUnder5G); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_BW, &wifiBw); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_WIFI_BUSY, &bWifiBusy); + pBtCoexist->btc_get(pBtCoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifiTrafficDir); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", "Wifi status", \ + (bWifiUnder5G? "5G":"2.4G"), + ((BTC_WIFI_BW_LEGACY==wifiBw)? "Legacy": (((BTC_WIFI_BW_HT40==wifiBw)? "HT40":"HT20"))), + ((!bWifiBusy)? "idle": ((BTC_WIFI_TRAFFIC_TX==wifiTrafficDir)? "uplink":"downlink"))); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]", \ + ((pCoexSta->bC2hBtInquiryPage)?("inquiry/page scan"):((BT_8723A_2ANT_BT_STATUS_IDLE == pCoexDm->btStatus)? "idle":( (BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE == pCoexDm->btStatus)? "connected-idle":"busy"))), + pCoexSta->btRssi, pCoexSta->btRetryCnt); + CL_PRINTF(cliBuf); + + if(pStackInfo->bProfileNotified) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP", \ + pStackInfo->bScoExist, pStackInfo->bHidExist, pStackInfo->bPanExist, pStackInfo->bA2dpExist); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_BT_LINK_INFO); + } + + btInfoExt = pCoexSta->btInfoExt; + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", "BT Info A2DP rate", \ + (btInfoExt&BIT0)? "Basic rate":"EDR rate"); + CL_PRINTF(cliBuf); + + for(i=0; ibtInfoC2hCnt[i]) + { + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x %02x(%d)", GLBtInfoSrc8723a2Ant[i], \ + pCoexSta->btInfoC2h[i][0], pCoexSta->btInfoC2h[i][1], + pCoexSta->btInfoC2h[i][2], pCoexSta->btInfoC2h[i][3], + pCoexSta->btInfoC2h[i][4], pCoexSta->btInfoC2h[i][5], + pCoexSta->btInfoC2h[i][6], pCoexSta->btInfoC2hCnt[i]); + CL_PRINTF(cliBuf); + } + } + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "write 0x1af=0x0 num", \ + pCoexSta->c2hHangDetectCnt); + CL_PRINTF(cliBuf); + + // Sw mechanism + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Sw mechanism]============"); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", "SM1[ShRf/ LpRA/ LimDig]", \ + pCoexDm->bCurRfRxLpfShrink, pCoexDm->bCurLowPenaltyRa, pCoexDm->limited_dig); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \ + pCoexDm->bCurAgcTableEn, pCoexDm->bCurAdcBackOff, pCoexDm->bCurDacSwingOn, pCoexDm->curDacSwingLvl); + CL_PRINTF(cliBuf); + + // Fw mechanism + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Fw mechanism]============"); + CL_PRINTF(cliBuf); + + if(!pBtCoexist->manual_control) + { + psTdmaCase = pCoexDm->curPsTdma; + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA", \ + pCoexDm->psTdmaPara[0], pCoexDm->psTdmaPara[1], + pCoexDm->psTdmaPara[2], pCoexDm->psTdmaPara[3], + pCoexDm->psTdmaPara[4], psTdmaCase); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct", \ + pCoexDm->bCurDecBtPwr, pCoexDm->bCurIgnoreWlanAct); + CL_PRINTF(cliBuf); + } + + // Hw setting + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Hw setting]============"); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal", \ + pCoexDm->btRf0x1eBackup); + CL_PRINTF(cliBuf); + + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x778); + u1Tmp[1] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x783); + u1Tmp[2] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x796); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", "0x778/ 0x783/ 0x796", \ + u1Tmp[0], u1Tmp[1], u1Tmp[2]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x880); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x880", \ + u4Tmp[0]); + CL_PRINTF(cliBuf); + + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x40); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x40", \ + u1Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x550); + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x522); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522", \ + u4Tmp[0], u1Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x484); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x484(rate adaptive)", \ + u4Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xc50); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0xc50(dig)", \ + u4Tmp[0]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda0); + u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda4); + u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xda8); + u4Tmp[3] = pBtCoexist->btc_read_4byte(pBtCoexist, 0xdac); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0xda0/0xda4/0xda8/0xdac(FA cnt)", \ + u4Tmp[0], u4Tmp[1], u4Tmp[2], u4Tmp[3]); + CL_PRINTF(cliBuf); + + u4Tmp[0] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c0); + u4Tmp[1] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c4); + u4Tmp[2] = pBtCoexist->btc_read_4byte(pBtCoexist, 0x6c8); + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x6cc); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \ + u4Tmp[0], u4Tmp[1], u4Tmp[2], u1Tmp[0]); + CL_PRINTF(cliBuf); + + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x770 (hp rx[31:16]/tx[15:0])", \ + pCoexSta->highPriorityRx, pCoexSta->highPriorityTx); + CL_PRINTF(cliBuf); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "0x774(lp rx[31:16]/tx[15:0])", \ + pCoexSta->lowPriorityRx, pCoexSta->lowPriorityTx); + CL_PRINTF(cliBuf); + + // Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang + u1Tmp[0] = pBtCoexist->btc_read_1byte(pBtCoexist, 0x41b); + CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "0x41b (mgntQ hang chk == 0xf)", \ + u1Tmp[0]); + CL_PRINTF(cliBuf); + + pBtCoexist->btc_disp_dbg_msg(pBtCoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + + +VOID +EXhalbtc8723a2ant_IpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_IPS_ENTER == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS ENTER notify\n")); + halbtc8723a2ant_CoexAllOff(pBtCoexist); + } + else if(BTC_IPS_LEAVE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], IPS LEAVE notify\n")); + //halbtc8723a2ant_InitCoexDm(pBtCoexist); + } +} + +VOID +EXhalbtc8723a2ant_LpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_LPS_ENABLE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS ENABLE notify\n")); + } + else if(BTC_LPS_DISABLE == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], LPS DISABLE notify\n")); + } +} + +VOID +EXhalbtc8723a2ant_ScanNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_SCAN_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN START notify\n")); + } + else if(BTC_SCAN_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], SCAN FINISH notify\n")); + } +} + +VOID +EXhalbtc8723a2ant_ConnectNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_ASSOCIATE_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT START notify\n")); + } + else if(BTC_ASSOCIATE_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], CONNECT FINISH notify\n")); + } +} + +VOID +EXhalbtc8723a2ant_MediaStatusNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_MEDIA_CONNECT == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA connect notify\n")); + } + else + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], MEDIA disconnect notify\n")); + } + + halbtc8723a2ant_IndicateWifiChnlBwInfo(pBtCoexist, type); +} + +VOID +EXhalbtc8723a2ant_SpecialPacketNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(type == BTC_PACKET_DHCP) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], DHCP Packet notify\n")); + } +} + +VOID +EXhalbtc8723a2ant_BtInfoNotify( + IN PBTC_COEXIST pBtCoexist, + IN pu1Byte tmpBuf, + IN u1Byte length + ) +{ + u1Byte btInfo=0; + u1Byte i, rspSource=0; + BOOLEAN bBtBusy=FALSE, limited_dig=FALSE; + BOOLEAN bWifiConnected=FALSE, bBtHsOn=FALSE; + + pCoexSta->bC2hBtInfoReqSent = FALSE; + + rspSource = BT_INFO_SRC_8723A_2ANT_BT_RSP; + pCoexSta->btInfoC2hCnt[rspSource]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Bt info[%d], length=%d, hex data=[", rspSource, length)); + for(i=0; ibtInfoC2h[rspSource][i] = tmpBuf[i]; + if(i == 0) + btInfo = tmpBuf[i]; + if(i == length-1) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x]\n", tmpBuf[i])); + } + else + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("0x%02x, ", tmpBuf[i])); + } + } + + if(BT_INFO_SRC_8723A_2ANT_WIFI_FW != rspSource) + { + pCoexSta->btRetryCnt = + pCoexSta->btInfoC2h[rspSource][1]; + + pCoexSta->btRssi = + pCoexSta->btInfoC2h[rspSource][2]*2+10; + + pCoexSta->btInfoExt = + pCoexSta->btInfoC2h[rspSource][3]; + } + + pBtCoexist->btc_get(pBtCoexist, BTC_GET_BL_HS_OPERATION, &bBtHsOn); + // check BIT2 first ==> check if bt is under inquiry or page scan + if(btInfo & BT_INFO_8723A_2ANT_B_INQ_PAGE) + { + pCoexSta->bC2hBtInquiryPage = true; + } + else + { + pCoexSta->bC2hBtInquiryPage = FALSE; + } +} + +VOID +EXhalbtc8723a2ant_StackOperationNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ) +{ + if(BTC_STACK_OP_INQ_PAGE_PAIR_START == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair start notify\n")); + pCoexSta->bHoldForStackOperation = true; + pCoexSta->bHoldPeriodCnt = 1; + halbtc8723a2ant_BtInquiryPage(pBtCoexist); + } + else if(BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type) + { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], StackOP Inquiry/page/pair finish notify\n")); + pCoexSta->bHoldForStackOperation = FALSE; + } +} + +VOID +EXhalbtc8723a2ant_HaltNotify( + IN PBTC_COEXIST pBtCoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], Halt notify\n")); + + halbtc8723a2ant_IgnoreWlanAct(pBtCoexist, FORCE_EXEC, true); + EXhalbtc8723a2ant_MediaStatusNotify(pBtCoexist, BTC_MEDIA_DISCONNECT); +} + +VOID +EXhalbtc8723a2ant_Periodical( + IN PBTC_COEXIST pBtCoexist + ) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, ("[BTCoex], 2Ant Periodical!!\n")); + + // work around for c2h hang + wa_halbtc8723a2ant_MonitorC2h(pBtCoexist); + + halbtc8723a2ant_QueryBtInfo(pBtCoexist); + halbtc8723a2ant_MonitorBtCtr(pBtCoexist); + halbtc8723a2ant_MonitorBtEnableDisable(pBtCoexist); + + halbtc8723a2ant_RunCoexistMechanism(pBtCoexist); +} + + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8723a2ant.h @@ -0,0 +1,179 @@ +//=========================================== +// The following is for 8723A 2Ant BT Co-exist definition +//=========================================== +#define BT_INFO_8723A_2ANT_B_FTP BIT7 +#define BT_INFO_8723A_2ANT_B_A2DP BIT6 +#define BT_INFO_8723A_2ANT_B_HID BIT5 +#define BT_INFO_8723A_2ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8723A_2ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8723A_2ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8723A_2ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8723A_2ANT_B_CONNECTION BIT0 + +#define BTC_RSSI_COEX_THRESH_TOL_8723A_2ANT 2 + +typedef enum _BT_INFO_SRC_8723A_2ANT{ + BT_INFO_SRC_8723A_2ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8723A_2ANT_BT_RSP = 0x1, + BT_INFO_SRC_8723A_2ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8723A_2ANT_MAX +}BT_INFO_SRC_8723A_2ANT,*PBT_INFO_SRC_8723A_2ANT; + +typedef enum _BT_8723A_2ANT_BT_STATUS{ + BT_8723A_2ANT_BT_STATUS_IDLE = 0x0, + BT_8723A_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8723A_2ANT_BT_STATUS_NON_IDLE = 0x2, + BT_8723A_2ANT_BT_STATUS_MAX +}BT_8723A_2ANT_BT_STATUS,*PBT_8723A_2ANT_BT_STATUS; + +typedef enum _BT_8723A_2ANT_COEX_ALGO{ + BT_8723A_2ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8723A_2ANT_COEX_ALGO_SCO = 0x1, + BT_8723A_2ANT_COEX_ALGO_HID = 0x2, + BT_8723A_2ANT_COEX_ALGO_A2DP = 0x3, + BT_8723A_2ANT_COEX_ALGO_PANEDR = 0x4, + BT_8723A_2ANT_COEX_ALGO_PANHS = 0x5, + BT_8723A_2ANT_COEX_ALGO_PANEDR_A2DP = 0x6, + BT_8723A_2ANT_COEX_ALGO_PANEDR_HID = 0x7, + BT_8723A_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x8, + BT_8723A_2ANT_COEX_ALGO_HID_A2DP = 0x9, + BT_8723A_2ANT_COEX_ALGO_MAX +}BT_8723A_2ANT_COEX_ALGO,*PBT_8723A_2ANT_COEX_ALGO; + +typedef struct _COEX_DM_8723A_2ANT{ + // fw mechanism + BOOLEAN bPreDecBtPwr; + BOOLEAN bCurDecBtPwr; + //BOOLEAN bPreBtLnaConstrain; + //BOOLEAN bCurBtLnaConstrain; + //u1Byte bPreBtPsdMode; + //u1Byte bCurBtPsdMode; + u1Byte preFwDacSwingLvl; + u1Byte curFwDacSwingLvl; + BOOLEAN bCurIgnoreWlanAct; + BOOLEAN bPreIgnoreWlanAct; + u1Byte prePsTdma; + u1Byte curPsTdma; + u1Byte psTdmaPara[5]; + u1Byte psTdmaDuAdjType; + BOOLEAN bResetTdmaAdjust; + BOOLEAN bPrePsTdmaOn; + BOOLEAN bCurPsTdmaOn; + //BOOLEAN bPreBtAutoReport; + //BOOLEAN bCurBtAutoReport; + + // sw mechanism + BOOLEAN bPreRfRxLpfShrink; + BOOLEAN bCurRfRxLpfShrink; + u4Byte btRf0x1eBackup; + BOOLEAN bPreLowPenaltyRa; + BOOLEAN bCurLowPenaltyRa; + BOOLEAN bPreDacSwingOn; + u4Byte preDacSwingLvl; + BOOLEAN bCurDacSwingOn; + u4Byte curDacSwingLvl; + BOOLEAN bPreAdcBackOff; + BOOLEAN bCurAdcBackOff; + BOOLEAN bPreAgcTableEn; + BOOLEAN bCurAgcTableEn; + u4Byte preVal0x6c0; + u4Byte curVal0x6c0; + u4Byte preVal0x6c8; + u4Byte curVal0x6c8; + u1Byte preVal0x6cc; + u1Byte curVal0x6cc; + BOOLEAN limited_dig; + + // algorithm related + u1Byte preAlgorithm; + u1Byte curAlgorithm; + u1Byte btStatus; + u1Byte wifiChnlInfo[3]; +} COEX_DM_8723A_2ANT, *PCOEX_DM_8723A_2ANT; + +typedef struct _COEX_STA_8723A_2ANT{ + u4Byte highPriorityTx; + u4Byte highPriorityRx; + u4Byte lowPriorityTx; + u4Byte lowPriorityRx; + u1Byte btRssi; + u1Byte preBtRssiState; + u1Byte preBtRssiState1; + u1Byte preWifiRssiState[4]; + BOOLEAN bC2hBtInfoReqSent; + u1Byte btInfoC2h[BT_INFO_SRC_8723A_2ANT_MAX][10]; + u4Byte btInfoC2hCnt[BT_INFO_SRC_8723A_2ANT_MAX]; + BOOLEAN bC2hBtInquiryPage; + u1Byte btRetryCnt; + u1Byte btInfoExt; + BOOLEAN bHoldForStackOperation; + u1Byte bHoldPeriodCnt; + // this is for c2h hang work-around + u4Byte c2hHangDetectCnt; +}COEX_STA_8723A_2ANT, *PCOEX_STA_8723A_2ANT; + +//=========================================== +// The following is interface which will notify coex module. +//=========================================== +VOID +EXhalbtc8723a2ant_InitHwConfig( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8723a2ant_InitCoexDm( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8723a2ant_IpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a2ant_LpsNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a2ant_ScanNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a2ant_ConnectNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a2ant_MediaStatusNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a2ant_SpecialPacketNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a2ant_HaltNotify( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8723a2ant_Periodical( + IN PBTC_COEXIST pBtCoexist + ); +VOID +EXhalbtc8723a2ant_BtInfoNotify( + IN PBTC_COEXIST pBtCoexist, + IN pu1Byte tmpBuf, + IN u1Byte length + ); +VOID +EXhalbtc8723a2ant_StackOperationNotify( + IN PBTC_COEXIST pBtCoexist, + IN u1Byte type + ); +VOID +EXhalbtc8723a2ant_DisplayCoexInfo( + IN PBTC_COEXIST pBtCoexist + ); + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.c @@ -0,0 +1,4104 @@ +/*************************************************************** + * Description: + * + * This file is for RTL8723B Co-exist mechanism + * + * History + * 2012/11/15 Cosa first check in. + * + ***************************************************************/ + + +/*************************************************************** + * include files + ***************************************************************/ +#include "halbt_precomp.h" +#if 1 +/*************************************************************** + * Global variables, these are static variables + ***************************************************************/ +static struct coex_dm_8723b_1ant glcoex_dm_8723b_1ant; +static struct coex_dm_8723b_1ant *coex_dm = &glcoex_dm_8723b_1ant; +static struct coex_sta_8723b_1ant glcoex_sta_8723b_1ant; +static struct coex_sta_8723b_1ant *coex_sta = &glcoex_sta_8723b_1ant; + +const char *const GLBtInfoSrc8723b1Ant[]={ + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +u32 glcoex_ver_date_8723b_1ant = 20130906; +u32 glcoex_ver_8723b_1ant = 0x45; + +/*************************************************************** + * local function proto type if needed + ***************************************************************/ +/*************************************************************** + * local function start with halbtc8723b1ant_ + ***************************************************************/ +u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) +{ + s32 bt_rssi=0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; + + bt_rssi = coex_sta->bt_rssi; + + if (level_num == 2){ + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to High\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Low\n"); + } + } else { + if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); + return coex_sta->pre_bt_rssi_state; + } + + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Low\n"); + } + } else if ((coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (bt_rssi >= rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to High\n"); + } else if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Medium\n"); + } + } else { + if (bt_rssi < rssi_thresh1) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at High\n"); + } + } + } + + coex_sta->pre_bt_rssi_state = bt_rssi_state; + + return bt_rssi_state; +} + +u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, + u8 rssi_thresh, u8 rssi_thresh1) +{ + s32 wifi_rssi=0; + u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; + + btcoexist->btc_get(btcoexist, + BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + + if (level_num == 2) { + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to High\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Low\n"); + } + } else { + if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Low\n"); + } + } else if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (wifi_rssi >= rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to High\n"); + } else if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Medium\n"); + } + } else { + if (wifi_rssi < rssi_thresh1) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at High\n"); + } + } + } + + coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; + + return wifi_rssi_state; +} + +void halbtc8723b1ant_updatera_mask(struct btc_coexist *btcoexist, + bool force_exec, u32 dis_rate_mask) +{ + coex_dm->curra_mask = dis_rate_mask; + + if (force_exec || (coex_dm->prera_mask != coex_dm->curra_mask)) + btcoexist->btc_set(btcoexist, BTC_SET_ACT_UPDATE_ra_mask, + &coex_dm->curra_mask); + + coex_dm->prera_mask = coex_dm->curra_mask; +} + +void halbtc8723b1ant_auto_rate_fallback_retry(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + bool wifi_under_bmode = false; + + coex_dm->cur_arfr_type = type; + + if (force_exec || (coex_dm->pre_arfr_type != coex_dm->cur_arfr_type)) { + switch (coex_dm->cur_arfr_type) { + case 0: /* normal mode */ + btcoexist->btc_write_4byte(btcoexist, 0x430, + coex_dm->backup_arfr_cnt1); + btcoexist->btc_write_4byte(btcoexist, 0x434, + coex_dm->backup_arfr_cnt2); + break; + case 1: + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_UNDER_B_MODE, + &wifi_under_bmode); + if (wifi_under_bmode) { + btcoexist->btc_write_4byte(btcoexist, + 0x430, 0x0); + btcoexist->btc_write_4byte(btcoexist, + 0x434, 0x01010101); + } else { + btcoexist->btc_write_4byte(btcoexist, + 0x430, 0x0); + btcoexist->btc_write_4byte(btcoexist, + 0x434, 0x04030201); + } + break; + default: + break; + } + } + + coex_dm->pre_arfr_type = coex_dm->cur_arfr_type; +} + +void halbtc8723b1ant_retry_limit(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_retry_limit_type = type; + + if (force_exec || (coex_dm->pre_retry_limit_type != + coex_dm->cur_retry_limit_type)) { + + switch (coex_dm->cur_retry_limit_type) { + case 0: /* normal mode */ + btcoexist->btc_write_2byte(btcoexist, 0x42a, + coex_dm->backup_retry_limit); + break; + case 1: /* retry limit=8 */ + btcoexist->btc_write_2byte(btcoexist, 0x42a, 0x0808); + break; + default: + break; + } + } + + coex_dm->pre_retry_limit_type = coex_dm->cur_retry_limit_type; +} + +void halbtc8723b1ant_ampdu_maxtime(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + coex_dm->cur_ampdu_time_type = type; + + if (force_exec || (coex_dm->pre_ampdu_time_type != + coex_dm->cur_ampdu_time_type)) { + switch (coex_dm->cur_ampdu_time_type) { + case 0: /* normal mode */ + btcoexist->btc_write_1byte(btcoexist, 0x456, + coex_dm->backup_ampdu_max_time); + break; + case 1: /* AMPDU timw = 0x38 * 32us */ + btcoexist->btc_write_1byte(btcoexist, + 0x456, 0x38); + break; + default: + break; + } + } + + coex_dm->pre_ampdu_time_type = coex_dm->cur_ampdu_time_type; +} + +void halbtc8723b1ant_limited_tx(struct btc_coexist *btcoexist, + bool force_exec, u8 ra_maskType, u8 arfr_type, + u8 retry_limit_type, u8 ampdu_time_type) +{ + switch (ra_maskType) { + case 0: /* normal mode */ + halbtc8723b1ant_updatera_mask(btcoexist, force_exec, 0x0); + break; + case 1: /* disable cck 1/2 */ + halbtc8723b1ant_updatera_mask(btcoexist, force_exec, + 0x00000003); + break; + /* disable cck 1/2/5.5, ofdm 6/9/12/18/24, mcs 0/1/2/3/4*/ + case 2: + halbtc8723b1ant_updatera_mask(btcoexist, force_exec, + 0x0001f1f7); + break; + default: + break; + } + + halbtc8723b1ant_auto_rate_fallback_retry(btcoexist, force_exec, + arfr_type); + halbtc8723b1ant_retry_limit(btcoexist, force_exec, retry_limit_type); + halbtc8723b1ant_ampdu_maxtime(btcoexist, force_exec, ampdu_time_type); +} + +void halbtc8723b1ant_limited_rx(struct btc_coexist *btcoexist, + bool force_exec, bool rej_ap_agg_pkt, + bool b_bt_ctrl_agg_buf_size, u8 agg_buf_size) +{ + bool reject_rx_agg = rej_ap_agg_pkt; + bool bt_ctrl_rx_agg_size = b_bt_ctrl_agg_buf_size; + u8 rxAggSize = agg_buf_size; + + /********************************************** + * Rx Aggregation related setting + **********************************************/ + btcoexist->btc_set(btcoexist, BTC_SET_BL_TO_REJ_AP_AGG_PKT, + &reject_rx_agg); + /* decide BT control aggregation buf size or not */ + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_CTRL_AGG_SIZE, + &bt_ctrl_rx_agg_size); + /* aggregation buf size, only work + *when BT control Rx aggregation size. */ + btcoexist->btc_set(btcoexist, BTC_SET_U1_AGG_BUF_SIZE, &rxAggSize); + /* real update aggregation setting */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_AGGREGATE_CTRL, NULL); +} + +void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +{ + u32 reg_hp_txrx, reg_lp_txrx, u32tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0; + u32 reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_txrx = 0x770; + reg_lp_txrx = 0x774; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); + reg_hp_tx = u32tmp & MASKLWORD; + reg_hp_rx = (u32tmp & MASKHWORD) >> 16; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); + reg_lp_tx = u32tmp & MASKLWORD; + reg_lp_rx = (u32tmp & MASKHWORD) >> 16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + /* reset counter */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist) +{ + u8 h2c_parameter[1] = {0}; + + coex_sta->c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT0; /* trigger*/ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); +} + +bool halbtc8723b1ant_is_wifi_status_changed(struct btc_coexist *btcoexist) +{ + static bool pre_wifi_busy = false; + static bool pre_under_4way = false, pre_bt_hs_on = false; + bool wifi_busy = false, under_4way = false, bt_hs_on = false; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + + if (wifi_connected) { + if (wifi_busy != pre_wifi_busy) { + pre_wifi_busy = wifi_busy; + return true; + } + if (under_4way != pre_under_4way) { + pre_under_4way = under_4way; + return true; + } + if (bt_hs_on != pre_bt_hs_on) { + pre_bt_hs_on = bt_hs_on; + return true; + } + } + + return false; +} + +void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + bt_link_info->bt_link_exist = coex_sta->bt_link_exist; + bt_link_info->sco_exist = coex_sta->sco_exist; + bt_link_info->a2dp_exist = coex_sta->a2dp_exist; + bt_link_info->pan_exist = coex_sta->pan_exist; + bt_link_info->hid_exist = coex_sta->hid_exist; + + /* work around for HS mode. */ + if (bt_hs_on) { + bt_link_info->pan_exist = true; + bt_link_info->bt_link_exist = true; + } + + /* check if Sco only */ + if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->sco_only = true; + else + bt_link_info->sco_only = false; + + /* check if A2dp only */ + if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->a2dp_only = true; + else + bt_link_info->a2dp_only = false; + + /* check if Pan only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->pan_only = true; + else + bt_link_info->pan_only = false; + + /* check if Hid only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && bt_link_info->hid_exist ) + bt_link_info->hid_only = true; + else + bt_link_info->hid_only = false; +} + +u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED; + u8 numOfDiffProfile = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + if (!bt_link_info->bt_link_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], No BT link exists!!!\n"); + return algorithm; + } + + if (bt_link_info->sco_exist) + numOfDiffProfile++; + if (bt_link_info->hid_exist) + numOfDiffProfile++; + if (bt_link_info->pan_exist) + numOfDiffProfile++; + if (bt_link_info->a2dp_exist) + numOfDiffProfile++; + + if (numOfDiffProfile == 1) { + if (bt_link_info->sco_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO only\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; + } else { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = HID only\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = A2DP only\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "PAN(HS) only\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "PAN(EDR) only\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR; + } + } + } + } else if (numOfDiffProfile == 2) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + HID\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "SCO + A2DP ==> SCO\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile " + "= SCO + PAN(HS)\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile " + "= SCO + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "HID + A2DP\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "HID + PAN(HS)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "HID + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "A2DP + PAN(HS)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "A2DP + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } else if (numOfDiffProfile == 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "SCO + HID + A2DP ==> HID\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_HID; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "SCO + HID + PAN(HS)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "SCO + HID + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "SCO + A2DP + PAN(HS)\n"); + algorithm = BT_8723B_1ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = SCO + " + "A2DP + PAN(EDR) ==> HID\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "HID + A2DP + PAN(HS)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "HID + A2DP + PAN(EDR)\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } else if (numOfDiffProfile >= 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Error!!! " + "BT Profile = SCO + " + "HID + A2DP + PAN(HS)\n"); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT Profile = " + "SCO + HID + A2DP + PAN(EDR)" + "==>PAN(EDR)+HID\n"); + algorithm = + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + + return algorithm; +} + +bool halbtc8723b1ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist) +{ + bool ret = false; + bool bt_hs_on = false, wifi_connected = false; + s32 bt_hs_rssi = 0; + u8 bt_rssi_state; + + if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on)) + return false; + if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected)) + return false; + if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) + return false; + + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 35, 0); + + if (wifi_connected) { + if (bt_hs_on) { + if (bt_hs_rssi > 37) + ret = true; + } else { + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + ret = true; + } + } + + return ret; +} + +void halbtc8723b1ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, + u8 dac_swing_lvl) +{ + u8 h2c_parameter[1] = {0}; + + /* There are several type of dacswing + * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */ + h2c_parameter[0] = dac_swing_lvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); +} + +void halbtc8723b1ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, + bool dec_bt_pwr) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = 0; + + if (dec_bt_pwr) + h2c_parameter[0] |= BIT1; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", + (dec_bt_pwr? "Yes!!":"No!!"),h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); +} + +void halbtc8723b1ant_dec_bt_pwr(struct btc_coexist *btcoexist, + bool force_exec, bool dec_bt_pwr) +{ + return; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power = %s\n", + (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF")); + coex_dm->cur_dec_bt_pwr = dec_bt_pwr; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + + if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) + return; + } + halbtc8723b1ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr); + + coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; +} + +void halbtc8723b1ant_set_bt_auto_report(struct btc_coexist *btcoexist, + bool enable_auto_report) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = 0; + + if (enable_auto_report) + h2c_parameter[0] |= BIT0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n", + (enable_auto_report? "Enabled!!":"Disabled!!"), + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); +} + +void halbtc8723b1ant_bt_auto_report(struct btc_coexist *btcoexist, + bool force_exec, bool enable_auto_report) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec? "force to":""), + ((enable_auto_report)? "Enabled":"Disabled")); + coex_dm->cur_bt_auto_report = enable_auto_report; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreBtAutoReport=%d, " + "bCurBtAutoReport=%d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); + + if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) + return; + } + halbtc8723b1ant_set_bt_auto_report(btcoexist, + coex_dm->cur_bt_auto_report); + + coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; +} + +void halbtc8723b1ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, + bool force_exec, u8 fw_dac_swing_lvl) +{ + return; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec? "force to":""), fw_dac_swing_lvl); + coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], preFwDacSwingLvl=%d, " + "curFwDacSwingLvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); + + if (coex_dm->pre_fw_dac_swing_lvl == + coex_dm->cur_fw_dac_swing_lvl) + return; + } + + halbtc8723b1ant_set_fw_dac_swing_level(btcoexist, + coex_dm->cur_fw_dac_swing_lvl); + + coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; +} + +void halbtc8723b1ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, + bool rx_rf_shrink_on) +{ + if (rx_rf_shrink_on) { + /*Shrink RF Rx LPF corner */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, 0xffff7); + } else { + /*Resume RF Rx LPF corner + * After initialized, we can use coex_dm->btRf0x1eBackup */ + if (btcoexist->initilized) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, + 0x1e, 0xfffff, + coex_dm->bt_rf0x1e_backup); + } + } +} + +void halbtc8723b1ant_rf_shrink(struct btc_coexist *btcoexist, + bool force_exec, bool rx_rf_shrink_on) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec? "force to":""), + ((rx_rf_shrink_on)? "ON":"OFF")); + coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreRfRxLpfShrink=%d, " + "bCurRfRxLpfShrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); + + if (coex_dm->pre_rf_rx_lpf_shrink == + coex_dm->cur_rf_rx_lpf_shrink) + return; + } + halbtc8723b1ant_set_sw_rf_rx_lpf_corner(btcoexist, + coex_dm->cur_rf_rx_lpf_shrink); + + coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; +} + +void halbtc8723b1ant_set_sw_penalty_tx_rate_adaptive( + struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + u8 h2c_parameter[6] = {0}; + + h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */ + + if (low_penalty_ra) { + h2c_parameter[1] |= BIT0; + /*normal rate except MCS7/6/5, OFDM54/48/36 */ + h2c_parameter[2] = 0x00; + h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54 */ + h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48 */ + h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */ + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra ? "ON!!" : "OFF!!")); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); +} + +void halbtc8723b1ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) +{ + coex_dm->cur_low_penalty_ra = low_penalty_ra; + + if (!force_exec) { + if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) + return; + } + halbtc8723b1ant_set_sw_penalty_tx_rate_adaptive(btcoexist, + coex_dm->cur_low_penalty_ra); + + coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; +} + +void halbtc8723b1ant_set_dac_swing_reg(struct btc_coexist *btcoexist, u32 level) +{ + u8 val = (u8) level; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); +} + +void halbtc8723b1ant_set_sw_full_time_dac_swing(struct btc_coexist *btcoexist, + bool sw_dac_swing_on, + u32 sw_dac_swing_lvl) +{ + if (sw_dac_swing_on) + halbtc8723b1ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl); + else + halbtc8723b1ant_set_dac_swing_reg(btcoexist, 0x18); +} + + +void halbtc8723b1ant_dac_swing(struct btc_coexist *btcoexist, bool force_exec, + bool dac_swing_on, u32 dac_swing_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", + (force_exec ? "force to" : ""), (dac_swing_on ? "ON" : "OFF"), + dac_swing_lvl); + + coex_dm->cur_dac_swing_on = dac_swing_on; + coex_dm->cur_dac_swing_lvl = dac_swing_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, " + "bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", + coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); + + if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && + (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) + return; + } + mdelay(30); + halbtc8723b1ant_set_sw_full_time_dac_swing(btcoexist, dac_swing_on, + dac_swing_lvl); + + coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on; + coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; +} + +void halbtc8723b1ant_set_adc_backoff(struct btc_coexist *btcoexist, + bool adc_backoff) +{ + if (adc_backoff) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level On!\n"); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level Off!\n"); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1); + } +} + +void halbtc8723b1ant_adc_backoff(struct btc_coexist *btcoexist, + bool force_exec, bool adc_backoff) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn AdcBackOff = %s\n", + (force_exec ? "force to" : ""), (adc_backoff ? "ON" : "OFF")); + coex_dm->cur_adc_backoff = adc_backoff; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n", + coex_dm->pre_adc_backoff, coex_dm->cur_adc_backoff); + + if(coex_dm->pre_adc_backoff == coex_dm->cur_adc_backoff) + return; + } + halbtc8723b1ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_backoff); + + coex_dm->pre_adc_backoff = + coex_dm->cur_adc_backoff; +} + +void halbtc8723b1ant_set_agc_table(struct btc_coexist *btcoexist, + bool adc_table_en) +{ + u8 rssi_adjust_val = 0; + + btcoexist->btc_set_rf_reg(btcoexist, + BTC_RF_A, 0xef, 0xfffff, 0x02000); + if (adc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table On!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x3fa58); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x37a58); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x2fa58); + rssi_adjust_val = 8; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table Off!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x39258); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x31258); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x29258); + } + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0); + + /* set rssi_adjust_val for wifi module. */ + btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, + &rssi_adjust_val); +} + + +void halbtc8723b1ant_agc_table(struct btc_coexist *btcoexist, + bool force_exec, bool adc_table_en) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s %s Agc Table\n", + (force_exec ? "force to" : ""), + (adc_table_en ? "Enable" : "Disable")); + coex_dm->cur_agc_table_en = adc_table_en; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, + coex_dm->cur_agc_table_en); + + if(coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) + return; + } + halbtc8723b1ant_set_agc_table(btcoexist, adc_table_en); + + coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en; +} + +void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, + u32 val0x6c4, u32 val0x6c8, + u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0=0x%x," + " 0x6c4=0x%x, 0x6cc=0x%x\n", (force_exec ? "force to" : ""), + val0x6c0, val0x6c4, val0x6cc); + coex_dm->cur_val0x6c0 = val0x6c0; + coex_dm->cur_val0x6c4 = val0x6c4; + coex_dm->cur_val0x6c8 = val0x6c8; + coex_dm->cur_val0x6cc = val0x6cc; + + if (!force_exec) { + if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && + (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && + (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) && + (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) + return; + } + halbtc8723b1ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, + val0x6c8, val0x6cc); + + coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; + coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; + coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8; + coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; +} + +void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + switch (type) { + case 0: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x55555555, 0xffffff, 0x3); + break; + case 1: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 2: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffffff, 0x3); + break; + case 3: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0xaaaaaaaa, 0xffffff, 0x3); + break; + case 4: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5aaa5aaa, 0xffffff, 0x3); + break; + case 5: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0xaaaa5a5a, 0xffffff, 0x3); + break; + case 6: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x55555555, + 0xaaaa5a5a, 0xffffff, 0x3); + break; + case 7: + halbtc8723b1ant_coex_table(btcoexist, force_exec, 0x5afa5afa, + 0x5afa5afa, 0xffffff, 0x3); + break; + default: + break; + } +} + +void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist, + bool enable) +{ + u8 h2c_parameter[1] = {0}; + + if (enable) + h2c_parameter[0] |= BIT0; /* function enable */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act," + " FW write 0x63=0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); +} + +void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec ? "force to" : ""), (enable ? "ON" : "OFF")); + coex_dm->cur_ignore_wlan_act = enable; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d, " + "bCurIgnoreWlanAct = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); + + if (coex_dm->pre_ignore_wlan_act == + coex_dm->cur_ignore_wlan_act) + return; + } + halbtc8723b1ant_SetFwIgnoreWlanAct(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, + u8 byte1, u8 byte2, u8 byte3, + u8 byte4, u8 byte5) +{ + u8 h2c_parameter[5] = {0}; + u8 real_byte1 = byte1, real_byte5 = byte5; + bool ap_enable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + + if (ap_enable) { + if ((byte1 & BIT4) && !(byte1 & BIT5)) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], FW for 1Ant AP mode\n"); + real_byte1 &= ~BIT4; + real_byte1 |= BIT5; + + real_byte5 |= BIT5; + real_byte5 &= ~BIT6; + } + } + + h2c_parameter[0] = real_byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = real_byte5; + + coex_dm->ps_tdma_para[0] = real_byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = real_byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | + h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | + h2c_parameter[4]); + + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +void halbtc8723b1ant_SetLpsRpwm(struct btc_coexist *btcoexist, + u8 lps_val, u8 rpwm_val) +{ + u8 lps = lps_val; + u8 rpwm = rpwm_val; + + btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_LPS, &lps); + btcoexist->btc_set(btcoexist, BTC_SET_U1_1ANT_RPWM, &rpwm); +} + +void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist, bool force_exec, + u8 lps_val, u8 rpwm_val) +{ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set lps/rpwm=0x%x/0x%x \n", + (force_exec ? "force to" : ""), lps_val, rpwm_val); + coex_dm->cur_lps = lps_val; + coex_dm->cur_rpwm = rpwm_val; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RxBeaconMode=0x%x , LPS-RPWM=0x%x!!\n", + coex_dm->cur_lps, coex_dm->cur_rpwm); + + if ((coex_dm->pre_lps == coex_dm->cur_lps) && + (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], LPS-RPWM_Last=0x%x" + " , LPS-RPWM_Now=0x%x!!\n", + coex_dm->pre_rpwm, coex_dm->cur_rpwm); + + return; + } + } + halbtc8723b1ant_SetLpsRpwm(btcoexist, lps_val, rpwm_val); + + coex_dm->pre_lps = coex_dm->cur_lps; + coex_dm->pre_rpwm = coex_dm->cur_rpwm; +} + +void halbtc8723b1ant_sw_mechanism1(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, bool low_penalty_ra, + bool limited_dig, bool bt_lna_constrain) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], SM1[ShRf/ LpRA/ LimDig/ btLna] = %d %d %d %d\n", + shrink_rx_lpf, low_penalty_ra, limited_dig, bt_lna_constrain); + + halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); +} + +void halbtc8723b1ant_sw_mechanism2(struct btc_coexist *btcoexist, + bool agc_table_shift, bool adc_backoff, + bool sw_dac_swing, u32 dac_swing_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], SM2[AgcT/ AdcB/ SwDacSwing(lvl)] = %d %d %d\n", + agc_table_shift, adc_backoff, sw_dac_swing); +} + +void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist, + u8 ant_pos_type, bool init_hw_cfg, + bool wifi_off) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u32 fw_ver = 0, u32tmp = 0; + bool pg_ext_switch = false; + bool use_ext_switch = false; + u8 h2c_parameter[2] = {0}; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch); + /* [31:16]=fw ver, [15:0]=fw sub ver */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + + + if ((fw_ver < 0xc0000) || pg_ext_switch) + use_ext_switch = true; + + if (init_hw_cfg){ + /*BT select s0/s1 is controlled by WiFi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1); + + /*Force GNT_BT to Normal */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); + } else if (wifi_off) { + /*Force GNT_BT to High */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3); + /*BT select s0/s1 is controlled by BT */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0); + + /* 0x4c[24:23]=00, Set Antenna control by BT_RFE_CTRL + * BT Vendor 0xac=0xf002 */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp &= ~BIT23; + u32tmp &= ~BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + } + + if (use_ext_switch) { + if (init_hw_cfg) { + /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp &= ~BIT23; + u32tmp |= BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) { + /* Main Ant to BT for IPS case 0x4c[23]=1 */ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x64, 0x1, + 0x1); + + /*tell firmware "no antenna inverse"*/ + h2c_parameter[0] = 0; + h2c_parameter[1] = 1; /*ext switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + /*Aux Ant to BT for IPS case 0x4c[23]=1 */ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x64, 0x1, + 0x0); + + /*tell firmware "antenna inverse"*/ + h2c_parameter[0] = 1; + h2c_parameter[1] = 1; /*ext switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } + + /* fixed internal switch first*/ + /* fixed internal switch S1->WiFi, S0->BT*/ + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + else/* fixed internal switch S0->WiFi, S1->BT*/ + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + + /* ext switch setting */ + switch (ant_pos_type) { + case BTC_ANT_PATH_WIFI: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x1); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x2); + break; + case BTC_ANT_PATH_BT: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x2); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x1); + break; + default: + case BTC_ANT_PATH_PTA: + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x1); + else + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, + 0x2); + break; + } + + } else { + if (init_hw_cfg) { + /* 0x4c[23]=1, 0x4c[24]=0 Antenna control by 0x64*/ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp |= BIT23; + u32tmp &= ~BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + + if (board_info->btdm_ant_pos == + BTC_ANTENNA_AT_MAIN_PORT) { + /*Main Ant to WiFi for IPS case 0x4c[23]=1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x64, 0x1, + 0x0); + + /*tell firmware "no antenna inverse"*/ + h2c_parameter[0] = 0; + h2c_parameter[1] = 0; /*internal switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + /*Aux Ant to BT for IPS case 0x4c[23]=1*/ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x64, 0x1, + 0x1); + + /*tell firmware "antenna inverse"*/ + h2c_parameter[0] = 1; + h2c_parameter[1] = 0; /*internal switch type*/ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } + + /* fixed external switch first*/ + /*Main->WiFi, Aux->BT*/ + if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, + 0x3, 0x1); + else/*Main->BT, Aux->WiFi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, + 0x3, 0x2); + + /* internal switch setting*/ + switch (ant_pos_type) { + case BTC_ANT_PATH_WIFI: + if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x0); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x280); + break; + case BTC_ANT_PATH_BT: + if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x280); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x0); + break; + default: + case BTC_ANT_PATH_PTA: + if(board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x200); + else + btcoexist->btc_write_2byte(btcoexist, 0x948, + 0x80); + break; + } + } +} + +void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, + bool turn_on, u8 type) +{ + bool wifi_busy = false; + u8 rssi_adjust_val = 0; + + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (!force_exec) { + if (coex_dm->cur_ps_tdma_on) + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], ******** TDMA(on, %d) *********\n", + coex_dm->cur_ps_tdma); + else + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], ******** TDMA(off, %d) ********\n", + coex_dm->cur_ps_tdma); + + + if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) + return; + } + if (turn_on) { + switch (type) { + default: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1a, + 0x1a, 0x0, 0x50); + break; + case 1: + if (wifi_busy) + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, + 0x3a, 0x03, + 0x10, 0x50); + else + halbtc8723b1ant_set_fw_ps_tdma(btcoexist,0x51, + 0x3a, 0x03, + 0x10, 0x51); + + rssi_adjust_val = 11; + break; + case 2: + if (wifi_busy) + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, + 0x2b, 0x03, + 0x10, 0x50); + else + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, + 0x2b, 0x03, + 0x10, 0x51); + rssi_adjust_val = 14; + break; + case 3: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x1d, + 0x1d, 0x0, 0x52); + break; + case 4: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15, + 0x3, 0x14, 0x0); + rssi_adjust_val = 17; + break; + case 5: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15, + 0x3, 0x11, 0x10); + break; + case 6: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x20, + 0x3, 0x11, 0x13); + break; + case 7: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xc, + 0x5, 0x0, 0x0); + break; + case 8: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25, + 0x3, 0x10, 0x0); + break; + case 9: + if(wifi_busy) + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, + 0x21, 0x3, + 0x10, 0x50); + else + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, + 0x21, 0x3, + 0x10, 0x50); + rssi_adjust_val = 18; + break; + case 10: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa, + 0xa, 0x0, 0x40); + break; + case 11: + if (wifi_busy) + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, + 0x15, 0x03, + 0x10, 0x50); + else + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, + 0x15, 0x03, + 0x10, 0x50); + rssi_adjust_val = 20; + break; + case 12: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x0a, + 0x0a, 0x0, 0x50); + break; + case 13: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x15, + 0x15, 0x0, 0x50); + break; + case 14: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x21, + 0x3, 0x10, 0x52); + break; + case 15: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x13, 0xa, + 0x3, 0x8, 0x0); + break; + case 16: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x15, + 0x3, 0x10, 0x0); + rssi_adjust_val = 18; + break; + case 18: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x93, 0x25, + 0x3, 0x10, 0x0); + rssi_adjust_val = 14; + break; + case 20: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x35, + 0x03, 0x11, 0x10); + break; + case 21: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x15, + 0x03, 0x11, 0x10); + break; + case 22: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0x25, + 0x03, 0x11, 0x10); + break; + case 23: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 24: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 25: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 26: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0x3, 0x31, 0x18); + rssi_adjust_val = 22; + break; + case 27: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x3, 0x31, 0x98); + rssi_adjust_val = 22; + break; + case 28: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x69, 0x25, + 0x3, 0x31, 0x0); + break; + case 29: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xab, 0x1a, + 0x1a, 0x1, 0x10); + break; + case 30: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x51, 0x14, + 0x3, 0x10, 0x50); + break; + case 31: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x1a, + 0x1a, 0, 0x58); + break; + case 32: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x61, 0xa, + 0x3, 0x10, 0x0); + break; + case 33: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x25, + 0x3, 0x30, 0x90); + break; + case 34: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x53, 0x1a, + 0x1a, 0x0, 0x10); + break; + case 35: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x63, 0x1a, + 0x1a, 0x0, 0x10); + break; + case 36: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0xd3, 0x12, + 0x3, 0x14, 0x50); + break; + /* SoftAP only with no sta associated,BT disable , + * TDMA mode for power saving + * here softap mode screen off will cost 70-80mA for phone */ + case 40: + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x23, 0x18, + 0x00, 0x10, 0x24); + break; + } + } else { + switch (type) { + case 8: /*PTA Control */ + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x8, 0x0, + 0x0, 0x0, 0x0); + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, + false, false); + break; + case 0: + default: /*Software control, Antenna at BT side */ + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, + 0x0, 0x0, 0x0); + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, + false, false); + break; + case 9: /*Software control, Antenna at WiFi side */ + halbtc8723b1ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, + 0x0, 0x0, 0x0); + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_WIFI, + false, false); + break; + } + } + rssi_adjust_val = 0; + btcoexist->btc_set(btcoexist, + BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, + &rssi_adjust_val); + + /* update pre state */ + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +void halbtc8723b1ant_coex_alloff(struct btc_coexist *btcoexist) +{ + /* fw all off */ + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + /* sw all off */ + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + + + /* hw all off */ + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); +} + +bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist) +{ + bool commom = false, wifi_connected = false; + bool wifi_busy = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (!wifi_connected && + BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + " + "BT non connected-idle!!\n"); + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + commom = true; + } else if (wifi_connected && + (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + " + "BT non connected-idle!!\n"); + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + commom = true; + } else if (!wifi_connected && + (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non connected-idle + " + "BT connected-idle!!\n"); + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + commom = true; + } else if (wifi_connected && + (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + BT connected-idle!!\n"); + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + commom = true; + } else if (!wifi_connected && + (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE != + coex_dm->bt_status)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + ("[BTCoex], Wifi non connected-idle + BT Busy!!\n")); + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + commom = true; + } else { + if (wifi_busy) + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Busy" + " + BT Busy!!\n"); + else + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Idle" + " + BT Busy!!\n"); + + commom = false; + } + + return commom; +} + + +void halbtc8723b1ant_tdma_duration_adjust_for_acl(struct btc_coexist *btcoexist, + u8 wifi_status) +{ + static s32 up, dn, m, n, wait_count; + /* 0: no change, +1: increase WiFi duration, + * -1: decrease WiFi duration */ + s32 result; + u8 retry_count = 0, bt_info_ext; + static bool pre_wifi_busy = false; + bool wifi_busy = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjustForAcl()\n"); + + if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status) + wifi_busy = true; + else + wifi_busy = false; + + if ((BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == + wifi_status) || + (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN == wifi_status) || + (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT == wifi_status)) { + if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && + coex_dm->cur_ps_tdma != 3 && coex_dm->cur_ps_tdma != 9) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_count = 0; + } + return; + } + + if (!coex_dm->auto_tdma_adjust) { + coex_dm->auto_tdma_adjust = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); + + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + + up = 0; + dn = 0; + m = 1; + n = 3; + result = 0; + wait_count = 0; + } else { + /*accquire the BT TRx retry count from BT_Info byte2 */ + retry_count = coex_sta->bt_retry_cnt; + bt_info_ext = coex_sta->bt_info_ext; + result = 0; + wait_count++; + /* no retry in the last 2-second duration */ + if (retry_count == 0) { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if (up >= n) { + wait_count = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi " + "duration!!\n"); + } + } else if (retry_count <= 3) { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) { + if (wait_count <= 2) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration" + " for retryCounter<3!!\n"); + } + } else { + if (wait_count == 1) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration" + " for retryCounter>3!!\n"); + } + + if (result == -1) { + if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && + ((coex_dm->cur_ps_tdma == 1) || + (coex_dm->cur_ps_tdma == 2))) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } + } else if(result == 1) { + if ((BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(bt_info_ext)) && + ((coex_dm->cur_ps_tdma == 1) || + (coex_dm->cur_ps_tdma == 2))) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = 1; + } + } else { /*no change */ + /*if busy / idle change */ + if (wifi_busy != pre_wifi_busy) { + pre_wifi_busy = wifi_busy; + halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, + true, + coex_dm->cur_ps_tdma); + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex],********* TDMA(on, %d) ********\n", + coex_dm->cur_ps_tdma); + } + + if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 && + coex_dm->cur_ps_tdma != 9 && coex_dm->cur_ps_tdma != 11) { + /* recover to previous adjust type */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, + coex_dm->ps_tdma_du_adj_type); + } + } +} + +u8 halbtc8723b1ant_ps_tdma_type_by_wifi_rssi(s32 wifi_rssi, s32 pre_wifi_rssi, + u8 wifi_rssi_thresh) +{ + u8 ps_tdma_type=0; + + if (wifi_rssi > pre_wifi_rssi) { + if (wifi_rssi > (wifi_rssi_thresh + 5)) + ps_tdma_type = 26; + else + ps_tdma_type = 25; + } else { + if (wifi_rssi > wifi_rssi_thresh) + ps_tdma_type = 26; + else + ps_tdma_type = 25; + } + + return ps_tdma_type; +} + +void halbtc8723b1ant_PsTdmaCheckForPowerSaveState(struct btc_coexist *btcoexist, + bool new_ps_state) +{ + u8 lps_mode = 0x0; + + btcoexist->btc_get(btcoexist, BTC_GET_U1_LPS_MODE, &lps_mode); + + if (lps_mode) { /* already under LPS state */ + if (new_ps_state) { + /* keep state under LPS, do nothing. */ + } else { + /* will leave LPS state, turn off psTdma first */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); + } + } else { /* NO PS state */ + if (new_ps_state) { + /* will enter LPS state, turn off psTdma first */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 0); + } else { + /* keep state under NO PS state, do nothing. */ + } + } +} + +void halbtc8723b1ant_power_save_state(struct btc_coexist *btcoexist, + u8 ps_type, u8 lps_val, + u8 rpwm_val) +{ + bool low_pwr_disable = false; + + switch (ps_type) { + case BTC_PS_WIFI_NATIVE: + /* recover to original 32k low power setting */ + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, NULL); + break; + case BTC_PS_LPS_ON: + halbtc8723b1ant_PsTdmaCheckForPowerSaveState(btcoexist, true); + halbtc8723b1ant_LpsRpwm(btcoexist, NORMAL_EXEC, lps_val, + rpwm_val); + /* when coex force to enter LPS, do not enter 32k low power. */ + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + /* power save must executed before psTdma. */ + btcoexist->btc_set(btcoexist, BTC_SET_ACT_ENTER_LPS, NULL); + break; + case BTC_PS_LPS_OFF: + halbtc8723b1ant_PsTdmaCheckForPowerSaveState(btcoexist, false); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, NULL); + break; + default: + break; + } +} + +void halbtc8723b1ant_action_wifi_only(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); +} + +void halbtc8723b1ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist) +{ + static bool pre_bt_disabled = false; + static u32 bt_disable_cnt = 0; + bool bt_active = true, bt_disabled = false; + + /* This function check if bt is disabled */ + + if (coex_sta->high_priority_tx == 0 && + coex_sta->high_priority_rx == 0 && + coex_sta->low_priority_tx == 0 && + coex_sta->low_priority_rx == 0) + bt_active = false; + + if (coex_sta->high_priority_tx == 0xffff && + coex_sta->high_priority_rx == 0xffff && + coex_sta->low_priority_tx == 0xffff && + coex_sta->low_priority_rx == 0xffff) + bt_active = false; + + if (bt_active) { + bt_disable_cnt = 0; + bt_disabled = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); + } else { + bt_disable_cnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], bt all counters=0, %d times!!\n", + bt_disable_cnt); + if (bt_disable_cnt >= 2) { + bt_disabled = true; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); + halbtc8723b1ant_action_wifi_only(btcoexist); + } + } + if (pre_bt_disabled != bt_disabled) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled" : "enabled"), + (bt_disabled ? "disabled" : "enabled")); + pre_bt_disabled = bt_disabled; + if (!bt_disabled) { + } else { + btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS, + NULL); + btcoexist->btc_set(btcoexist, BTC_SET_ACT_NORMAL_LPS, + NULL); + } + } +} + +/*************************************************** + * + * Software Coex Mechanism start + * + ***************************************************/ +/* SCO only or SCO+PAN(HS) */ +void halbtc8723b1ant_action_sco(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = + halbtc8723b1ant_wifi_rssi_state(btcoexist, 0, 2, 25, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + + +void halbtc8723b1ant_action_hid(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, + BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ +void halbtc8723b1ant_action_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b1ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b1ant_action_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + + +/* PAN(HS) only */ +void halbtc8723b1ant_action_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* fw mechanism */ + if((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* fw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/*PAN(EDR)+A2DP */ +void halbtc8723b1ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, + BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b1ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* HID+A2DP+PAN(EDR) */ +void halbtc8723b1ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b1ant_action_hid_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state, bt_info_ext; + u32 wifi_bw; + + bt_info_ext = coex_sta->bt_info_ext; + wifi_rssi_state = halbtc8723b1ant_wifi_rssi_state(btcoexist, + 0, 2, 25, 0); + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 50, 0); + + if (halbtc8723b1ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + else + halbtc8723b1ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_HT40 == wifi_bw) { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + /* sw mechanism */ + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } else { + halbtc8723b1ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/***************************************************** + * + * Non-Software Coex Mechanism start + * + *****************************************************/ +void halbtc8723b1ant_action_hs(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 2); +} + +void halbtc8723b1ant_action_bt_inquiry(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_connected = false, ap_enable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + if (!wifi_connected) { + halbtc8723b1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, 0x0, 0x0); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else if (bt_link_info->sco_exist || bt_link_info->hid_only) { + /* SCO/HID-only busy */ + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 32); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else { + if (ap_enable) + halbtc8723b1ant_power_save_state(btcoexist, + BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + else + halbtc8723b1ant_power_save_state(btcoexist, + BTC_PS_LPS_ON, + 0x50, 0x4); + + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 30); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } +} + +void halbtc8723b1ant_action_bt_sco_hid_only_busy(struct btc_coexist * btcoexist, + u8 wifi_status) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + /* tdma and coex table */ + + if (bt_link_info->sco_exist) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + } else { /* HID */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 5); + } +} + +void halbtc8723b1ant_action_wifi_connected_bt_acl_busy( + struct btc_coexist *btcoexist, + u8 wifi_status) +{ + u8 bt_rssi_state; + + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 28, 0); + + if (bt_link_info->hid_only) { /*HID */ + halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist, + wifi_status); + coex_dm->auto_tdma_adjust = false; + return; + } else if (bt_link_info->a2dp_only) { /*A2DP */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_tdma_duration_adjust_for_acl(btcoexist, + wifi_status); + } else { /*for low BT RSSI */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 11); + coex_dm->auto_tdma_adjust = false; + } + + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } else if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { /*HID+A2DP */ + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->auto_tdma_adjust = false; + } else { /*for low BT RSSI*/ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 14); + coex_dm->auto_tdma_adjust = false; + } + + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6); + /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */ + } else if (bt_link_info->pan_only || + (bt_link_info->hid_exist && bt_link_info->pan_exist)) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6); + coex_dm->auto_tdma_adjust = false; + /*A2DP+PAN(OPP,FTP), HID+A2DP+PAN(OPP,FTP)*/ + } else if ((bt_link_info->a2dp_exist && bt_link_info->pan_exist) || + (bt_link_info->hid_exist && bt_link_info->a2dp_exist && + bt_link_info->pan_exist)) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + coex_dm->auto_tdma_adjust = false; + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + coex_dm->auto_tdma_adjust = false; + } +} + +void halbtc8723b1ant_action_wifi_not_connected(struct btc_coexist *btcoexist) +{ + /* power save state */ + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); +} + +void halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan( + struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); +} + +void halbtc8723b1ant_ActionWifiConnectedScan(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table */ + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + if (bt_link_info->a2dp_exist && + bt_link_info->pan_exist) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 22); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } + } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } +} + +void halbtc8723b1ant_action_wifi_connected_special_packet( + struct btc_coexist *btcoexist) +{ + bool hs_connecting = false; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_CONNECTING, &hs_connecting); + + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table */ + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + if (bt_link_info->a2dp_exist && bt_link_info->pan_exist) { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 22); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 20); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 1); + } + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 20); + halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + } +} + +void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist) +{ + bool wifi_busy = false; + bool scan = false, link = false, roam = false; + bool under_4way = false, ap_enable = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect()===>\n"); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + if (under_4way) { + halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), " + "return for wifi is under 4way<===\n"); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (scan || link || roam) { + halbtc8723b1ant_ActionWifiConnectedScan(btcoexist); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], CoexForWifiConnect(), " + "return for wifi is under scan<===\n"); + return; + } + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + /* power save state */ + if (!ap_enable && + BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status && + !btcoexist->bt_link_info.hid_only) + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_LPS_ON, + 0x50, 0x4); + else + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + + /* tdma and coex table */ + btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_BUSY, &wifi_busy); + if (!wifi_busy) { + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE); + } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == + coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, + false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); + } + } else { + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) { + halbtc8723b1ant_action_wifi_connected_bt_acl_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } else if ((BT_8723B_1ANT_BT_STATUS_SCO_BUSY == + coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == + coex_dm->bt_status)) { + halbtc8723b1ant_action_bt_sco_hid_only_busy(btcoexist, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY); + } else { + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 2); + } + } +} + +void halbtc8723b1ant_run_sw_coexist_mechanism(struct btc_coexist *btcoexist) +{ + u8 algorithm = 0; + + algorithm = halbtc8723b1ant_action_algorithm(btcoexist); + coex_dm->cur_algorithm = algorithm; + + if (halbtc8723b1ant_is_common_action(btcoexist)) { + } else { + switch (coex_dm->cur_algorithm) { + case BT_8723B_1ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = SCO.\n"); + halbtc8723b1ant_action_sco(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HID.\n"); + halbtc8723b1ant_action_hid(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = A2DP.\n"); + halbtc8723b1ant_action_a2dp(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = " + "A2DP+PAN(HS).\n"); + halbtc8723b1ant_action_a2dp_pan_hs(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = PAN(EDR).\n"); + halbtc8723b1ant_action_pan_edr(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HS mode.\n"); + halbtc8723b1ant_action_pan_hs(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = PAN+A2DP.\n"); + halbtc8723b1ant_action_pan_edr_a2dp(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = " + "PAN(EDR)+HID.\n"); + halbtc8723b1ant_action_pan_edr_hid(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = " + "HID+A2DP+PAN.\n"); + halbtc8723b1ant_action_hid_a2dp_pan_edr(btcoexist); + break; + case BT_8723B_1ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = HID+A2DP.\n"); + halbtc8723b1ant_action_hid_a2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action algorithm = " + "coexist All Off!!\n"); + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } +} + +void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool wifi_connected = false, bt_hs_on = false; + bool limited_dig = false, bIncreaseScanDevNum = false; + bool b_bt_ctrl_agg_buf_size = false; + u8 agg_buf_size = 5; + u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), " + "return for Manual CTRL <===\n"); + return; + } + + if (btcoexist->stop_coex_dm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), " + "return for Stop Coex DM <===\n"); + return; + } + + if (coex_sta->under_ips) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); + return; + } + + if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) { + limited_dig = true; + bIncreaseScanDevNum = true; + } + + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + btcoexist->btc_set(btcoexist, BTC_SET_BL_INC_SCAN_DEV_NUM, + &bIncreaseScanDevNum); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + if (!bt_link_info->sco_exist && !bt_link_info->hid_exist) { + halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); + } else { + if (wifi_connected) { + wifi_rssi_state = + halbtc8723b1ant_wifi_rssi_state(btcoexist, + 1, 2, 30, 0); + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b1ant_limited_tx(btcoexist, + NORMAL_EXEC, + 1, 1, 1, 1); + } else { + halbtc8723b1ant_limited_tx(btcoexist, + NORMAL_EXEC, + 1, 1, 1, 1); + } + } else { + halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC, + 0, 0, 0, 0); + } + } + + if (bt_link_info->sco_exist) { + b_bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x3; + } else if (bt_link_info->hid_exist) { + b_bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x5; + } else if (bt_link_info->a2dp_exist || bt_link_info->pan_exist) { + b_bt_ctrl_agg_buf_size = true; + agg_buf_size = 0x8; + } + halbtc8723b1ant_limited_rx(btcoexist, NORMAL_EXEC, false, + b_bt_ctrl_agg_buf_size, agg_buf_size); + + halbtc8723b1ant_run_sw_coexist_mechanism(btcoexist); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8723b1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8723b1ant_action_hs(btcoexist); + return; + } + + + if (!wifi_connected) { + bool scan = false, link = false, roam = false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is non connected-idle !!!\n"); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (scan || link || roam) + halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist); + else + halbtc8723b1ant_action_wifi_not_connected(btcoexist); + } else { /* wifi LPS/Busy */ + halbtc8723b1ant_action_wifi_connected(btcoexist); + } +} + +void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + /* force to reset coex mechanism */ + halbtc8723b1ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + halbtc8723b1ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false); + + /* sw all off */ + halbtc8723b1ant_sw_mechanism1(btcoexist, false, false, false, false); + halbtc8723b1ant_sw_mechanism2(btcoexist,false, false, false, 0x18); + + halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8); + halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); +} + +void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist, bool backup) +{ + u32 u32tmp = 0; + u8 u8tmp = 0; + u32 cnt_bt_cal_chk = 0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], 1Ant Init HW Config!!\n"); + + if (backup) {/* backup rf 0x1e value */ + coex_dm->bt_rf0x1e_backup = + btcoexist->btc_get_rf_reg(btcoexist, + BTC_RF_A, 0x1e, 0xfffff); + + coex_dm->backup_arfr_cnt1 = + btcoexist->btc_read_4byte(btcoexist, 0x430); + coex_dm->backup_arfr_cnt2 = + btcoexist->btc_read_4byte(btcoexist, 0x434); + coex_dm->backup_retry_limit = + btcoexist->btc_read_2byte(btcoexist, 0x42a); + coex_dm->backup_ampdu_max_time = + btcoexist->btc_read_1byte(btcoexist, 0x456); + } + + /* WiFi goto standby while GNT_BT 0-->1 */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x780); + /* BT goto standby while GNT_BT 1-->0 */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x2, 0xfffff, 0x500); + + btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3); + btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77); + + + /* BT calibration check */ + while (cnt_bt_cal_chk <= 20) { + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d); + cnt_bt_cal_chk++; + if (u32tmp & BIT0) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ########### BT " + "calibration(cnt=%d) ###########\n", + cnt_bt_cal_chk); + mdelay(50); + } else { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ********** BT NOT " + "calibration (cnt=%d)**********\n", + cnt_bt_cal_chk); + break; + } + } + + /* 0x790[5:0]=0x5 */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u8tmp &= 0xc0; + u8tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); + + /* Enable counter statistics */ + /*0x76e[3] =1, WLAN_Act control by PTA */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x1); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); + + /*Antenna config */ + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_PTA, true, false); + /* PTA parameter */ + halbtc8723b1ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); + +} + +void halbtc8723b1ant_wifi_off_hw_cfg(struct btc_coexist *btcoexist) +{ + /* set wlan_act to low */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0); +} + +/************************************************************** + * work around function start with wa_halbtc8723b1ant_ + **************************************************************/ +/************************************************************** + * extern function start with EXhalbtc8723b1ant_ + **************************************************************/ + +void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist) +{ + halbtc8723b1ant_init_hw_config(btcoexist, true); +} + +void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); + + btcoexist->stop_coex_dm = false; + + halbtc8723b1ant_init_coex_dm(btcoexist); + + halbtc8723b1ant_query_bt_info(btcoexist); +} + +void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + u8 *cli_buf = btcoexist->cli_buf; + u8 u8tmp[4], i, bt_info_ext, psTdmaCase=0; + u16 u16tmp[4]; + u32 u32tmp[4]; + bool roam = false, scan = false; + bool link = false, wifi_under_5g = false; + bool bt_hs_on = false, wifi_busy = false; + s32 wifi_rssi =0, bt_hs_rssi = 0; + u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck; + u8 wifi_dot11_chnl, wifi_hs_chnl; + u32 fw_ver = 0, bt_patch_ver = 0; + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ============[BT Coexist info]============"); + CL_PRINTF(cli_buf); + + if (btcoexist->manual_control) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ============[Under Manual Control]=========="); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n =========================================="); + CL_PRINTF(cli_buf); + } + if (btcoexist->stop_coex_dm) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ============[Coex is STOPPED]============"); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n =========================================="); + CL_PRINTF(cli_buf); + } + + if (!board_info->bt_exist) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); + CL_PRINTF(cli_buf); + return; + } + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", + "Ant PG Num/ Ant Mech/ Ant Pos:", \ + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", + "BT stack/ hci ext ver", \ + ((stack_info->profile_notified)? "Yes":"No"), + stack_info->hci_version); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ FwVer/ PatchVer", \ + glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant, + fw_ver, bt_patch_ver, bt_patch_ver); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, + &wifi_dot11_chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", \ + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", + "H2C Wifi inform bt chnl Info", \ + coex_dm->wifi_chnl_info[0], coex_dm->wifi_chnl_info[1], + coex_dm->wifi_chnl_info[2]); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist,BTC_GET_BL_WIFI_UNDER_5G, + &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + &wifi_traffic_dir); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g? "5G":"2.4G"), + ((BTC_WIFI_BW_LEGACY==wifi_bw)? "Legacy": + (((BTC_WIFI_BW_HT40==wifi_bw)? "HT40":"HT20"))), + ((!wifi_busy)? "idle": + ((BTC_WIFI_TRAFFIC_TX==wifi_traffic_dir)? + "uplink":"downlink"))); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s/ %d/ %d] ", + "BT [status/ rssi/ retryCnt]", + ((btcoexist->bt_info.bt_disabled)? ("disabled"): + ((coex_sta->c2h_bt_inquiry_page)?("inquiry/page scan"): + ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status)? + "non-connected idle": + ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE == coex_dm->bt_status)? + "connected-idle":"busy")))), + coex_sta->bt_rssi, coex_sta->bt_retry_cnt); + CL_PRINTF(cli_buf); + + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", bt_link_info->sco_exist, + bt_link_info->hid_exist, bt_link_info->pan_exist, + bt_link_info->a2dp_exist); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + + bt_info_ext = coex_sta->bt_info_ext; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate"); + CL_PRINTF(cli_buf); + + for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) { + if (coex_sta->bt_info_c2h_cnt[i]) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %02x %02x %02x " + "%02x %02x %02x %02x(%d)", + GLBtInfoSrc8723b1Ant[i], + coex_sta->bt_info_c2h[i][0], + coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], + coex_sta->bt_info_c2h[i][3], + coex_sta->bt_info_c2h[i][4], + coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h_cnt[i]); + CL_PRINTF(cli_buf); + } + } + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %s/%s, (0x%x/0x%x)", + "PS state, IPS/LPS, (lps/rpwm)", \ + ((coex_sta->under_ips? "IPS ON":"IPS OFF")), + ((coex_sta->under_lps? "LPS ON":"LPS OFF")), + btcoexist->bt_info.lps_1ant, + btcoexist->bt_info.rpwm_1ant); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + if (!btcoexist->manual_control) { + /* Sw mechanism */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Sw mechanism]============"); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig]", \ + coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, + btcoexist->bt_info.limited_dig); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", \ + coex_dm->cur_agc_table_en, + coex_dm->cur_adc_backoff, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); + CL_PRINTF(cli_buf); + + + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", + "Rate Mask", btcoexist->bt_info.ra_mask); + CL_PRINTF(cli_buf); + + /* Fw mechanism */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Fw mechanism]============"); + CL_PRINTF(cli_buf); + + psTdmaCase = coex_dm->cur_ps_tdma; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %02x %02x %02x %02x %02x " + "case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para[0], + coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], + coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + psTdmaCase, coex_dm->auto_tdma_adjust); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x ", + "Latest error condition(should be 0)", \ + coex_dm->error_condition); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", + "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr, + coex_dm->cur_ignore_wlan_act); + CL_PRINTF(cli_buf); + } + + /* Hw setting */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Hw setting]============"); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1, + coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit, + coex_dm->backup_ampdu_max_time); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434); + u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x", + "0x430/0x434/0x42a/0x456", + u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]); + CL_PRINTF(cli_buf); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0], + (u32tmp[1] & 0x3e000000) >> 25); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x948/ 0x67[5] / 0x765", + u32tmp[0], ((u8tmp[0] & 0x20)>> 5), u8tmp[1]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", + u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3); + CL_PRINTF(cli_buf); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x38[11]/0x40/0x4c[24:23]/0x64[0]", + ((u8tmp[0] & 0x8)>>3), u8tmp[1], + ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8); + u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); + + fa_ofdm = ((u32tmp[0] & 0xffff0000) >> 16) + + ((u32tmp[1] & 0xffff0000) >> 16) + + (u32tmp[1] & 0xffff) + + (u32tmp[2] & 0xffff) + \ + ((u32tmp[3] & 0xffff0000) >> 16) + + (u32tmp[3] & 0xffff) ; + fa_cck = (u8tmp[0] << 8) + u8tmp[1]; + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "OFDM-CCA/OFDM-FA/CCK-FA", + u32tmp[0] & 0xffff, fa_ofdm, fa_cck); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8(coexTable)", + u32tmp[0], u32tmp[1], u32tmp[2]); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "0x770(high-pri rx/tx)", coex_sta->high_priority_rx, + coex_sta->high_priority_tx); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, + coex_sta->low_priority_tx); + CL_PRINTF(cli_buf); +#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 1) + halbtc8723b1ant_monitor_bt_ctr(btcoexist); +#endif + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS); +} + + +void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + + if (btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + + if (BTC_IPS_ENTER == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); + coex_sta->under_ips = true; + + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, + false, true); + /* set PTA control */ + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); + halbtc8723b1ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); + } else if (BTC_IPS_LEAVE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); + coex_sta->under_ips = false; + + halbtc8723b1ant_run_coexist_mechanism(btcoexist); + } +} + +void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (btcoexist->manual_control || btcoexist->stop_coex_dm) + return; + + if (BTC_LPS_ENABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); + coex_sta->under_lps = true; + } else if (BTC_LPS_DISABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); + coex_sta->under_lps = false; + } +} + +void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + bool wifi_connected = false, bt_hs_on = false; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + halbtc8723b1ant_query_bt_info(btcoexist); + + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8723b1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8723b1ant_action_hs(btcoexist); + return; + } + + if (BTC_SCAN_START == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); + if (!wifi_connected) /* non-connected scan */ + halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist); + else /* wifi is connected */ + halbtc8723b1ant_ActionWifiConnectedScan(btcoexist); + } else if (BTC_SCAN_FINISH == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); + if (!wifi_connected) /* non-connected scan */ + halbtc8723b1ant_action_wifi_not_connected(btcoexist); + else + halbtc8723b1ant_action_wifi_connected(btcoexist); + } +} + +void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +{ + bool wifi_connected = false, bt_hs_on = false; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8723b1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8723b1ant_action_hs(btcoexist); + return; + } + + if (BTC_ASSOCIATE_START == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); + halbtc8723b1ant_action_wifi_not_connected_asso_auth_scan(btcoexist); + } else if (BTC_ASSOCIATE_FINISH == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if (!wifi_connected) /* non-connected scan */ + halbtc8723b1ant_action_wifi_not_connected(btcoexist); + else + halbtc8723b1ant_action_wifi_connected(btcoexist); + } +} + +void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) +{ + u8 h2c_parameter[3] ={0}; + u32 wifi_bw; + u8 wifiCentralChnl; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled ) + return; + + if (BTC_MEDIA_CONNECT == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); + + /* only 2.4G we need to inform bt the chnl mask */ + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL, + &wifiCentralChnl); + + if ((BTC_MEDIA_CONNECT == type) && + (wifiCentralChnl <= 14)) { + h2c_parameter[0] = 0x0; + h2c_parameter[1] = wifiCentralChnl; + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66=0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); + + btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); +} + +void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) +{ + bool bt_hs_on = false; + + if (btcoexist->manual_control || btcoexist->stop_coex_dm || + btcoexist->bt_info.bt_disabled) + return; + + coex_sta->special_pkt_period_cnt = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + if (coex_sta->c2h_bt_inquiry_page) { + halbtc8723b1ant_action_bt_inquiry(btcoexist); + return; + } else if (bt_hs_on) { + halbtc8723b1ant_action_hs(btcoexist); + return; + } + + if (BTC_PACKET_DHCP == type || + BTC_PACKET_EAPOL == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], special Packet(%d) notify\n", type); + halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist); + } +} + +void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) +{ + u8 bt_info = 0; + u8 i, rsp_source = 0; + bool wifi_connected = false; + bool bt_busy = false; + + coex_sta->c2h_bt_info_req_sent = false; + + rsp_source = tmp_buf[0] & 0xf; + if (rsp_source >= BT_INFO_SRC_8723B_1ANT_MAX) + rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rsp_source]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data=[", + rsp_source, length); + for (i=0; ibt_info_c2h[rsp_source][i] = tmp_buf[i]; + if (i == 1) + bt_info = tmp_buf[i]; + if (i == length - 1) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x]\n", tmp_buf[i]); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x, ", tmp_buf[i]); + } + + if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) { + coex_sta->bt_retry_cnt = /* [3:0] */ + coex_sta->bt_info_c2h[rsp_source][2] & 0xf; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rsp_source][4]; + + /* Here we need to resend some wifi info to BT + * because bt is reset and loss of the info.*/ + if(coex_sta->bt_info_ext & BIT1) + { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit1 check, " + "send wifi BW&Chnl to BT!!\n"); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if(wifi_connected) + ex_halbtc8723b1ant_media_status_notify(btcoexist, + BTC_MEDIA_CONNECT); + else + ex_halbtc8723b1ant_media_status_notify(btcoexist, + BTC_MEDIA_DISCONNECT); + } + + if (coex_sta->bt_info_ext & BIT3) { + if (!btcoexist->manual_control && + !btcoexist->stop_coex_dm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit3 check, " + "set BT NOT ignore Wlan active!!\n"); + halbtc8723b1ant_ignore_wlan_act(btcoexist, + FORCE_EXEC, + false); + } + } else { + /* BT already NOT ignore Wlan active, do nothing here.*/ + } +#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 0) + if (coex_sta->bt_info_ext & BIT4) { + /* BT auto report already enabled, do nothing */ + } else { + halbtc8723b1ant_bt_auto_report(btcoexist, FORCE_EXEC, + true); + } +#endif + } + + /* check BIT2 first ==> check if bt is under inquiry or page scan */ + if (bt_info & BT_INFO_8723B_1ANT_B_INQ_PAGE) + coex_sta->c2h_bt_inquiry_page = true; + else + coex_sta->c2h_bt_inquiry_page = false; + + /* set link exist status */ + if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) { + coex_sta->bt_link_exist = false; + coex_sta->pan_exist = false; + coex_sta->a2dp_exist = false; + coex_sta->hid_exist = false; + coex_sta->sco_exist = false; + } else { /* connection exists */ + coex_sta->bt_link_exist = true; + if (bt_info & BT_INFO_8723B_1ANT_B_FTP) + coex_sta->pan_exist = true; + else + coex_sta->pan_exist = false; + if (bt_info & BT_INFO_8723B_1ANT_B_A2DP) + coex_sta->a2dp_exist = true; + else + coex_sta->a2dp_exist = false; + if (bt_info & BT_INFO_8723B_1ANT_B_HID) + coex_sta->hid_exist = true; + else + coex_sta->hid_exist = false; + if (bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) + coex_sta->sco_exist = true; + else + coex_sta->sco_exist = false; + } + + halbtc8723b1ant_update_bt_link_info(btcoexist); + + if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) { + coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), " + "BT Non-Connected idle!!!\n"); + /* connection exists but no busy */ + } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) { + coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) || + (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) { + coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), " + "BT SCO busy!!!\n"); + } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) { + if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status) + coex_dm->auto_tdma_adjust = false; + + coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + } else { + coex_dm->bt_status = + BT_8723B_1ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n"); + } + + if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) + bt_busy = true; + else + bt_busy = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + halbtc8723b1ant_run_coexist_mechanism(btcoexist); +} + +void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + + btcoexist->stop_coex_dm = true; + + halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false, true); + + halbtc8723b1ant_wifi_off_hw_cfg(btcoexist); + halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 0); + + ex_halbtc8723b1ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); +} + +void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n"); + + if (BTC_WIFI_PNP_SLEEP == pnp_state) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Pnp notify to SLEEP\n"); + btcoexist->stop_coex_dm = true; + halbtc8723b1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, + 0x0, 0x0); + halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9); + } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Pnp notify to WAKE UP\n"); + btcoexist->stop_coex_dm = false; + halbtc8723b1ant_init_hw_config(btcoexist, false); + halbtc8723b1ant_init_coex_dm(btcoexist); + halbtc8723b1ant_query_bt_info(btcoexist); + } +} + +void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + static u8 dis_ver_info_cnt = 0; + u32 fw_ver = 0, bt_patch_ver = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], ==========================" + "Periodical===========================\n"); + + if (dis_ver_info_cnt <= 5) { + dis_ver_info_cnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], *************************" + "***************************************\n"); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ " + "Ant Pos = %d/ %d/ %d\n", \ + board_info->pg_ant_num, board_info->btdm_ant_num, + board_info->btdm_ant_pos); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", \ + ((stack_info->profile_notified)? "Yes":"No"), + stack_info->hci_version); + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], CoexVer/ FwVer/ PatchVer " + "= %d_%x/ 0x%x/ 0x%x(%d)\n", \ + glcoex_ver_date_8723b_1ant, + glcoex_ver_8723b_1ant, fw_ver, + bt_patch_ver, bt_patch_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], *****************************" + "***********************************\n"); + } + +#if(BT_AUTO_REPORT_ONLY_8723B_1ANT == 0) + halbtc8723b1ant_query_bt_info(btcoexist); + halbtc8723b1ant_monitor_bt_ctr(btcoexist); + halbtc8723b1ant_monitor_bt_enable_disable(btcoexist); +#else + if (halbtc8723b1ant_is_wifi_status_changed(btcoexist) || + coex_dm->auto_tdma_adjust) { + if (coex_sta->special_pkt_period_cnt > 2) + halbtc8723b1ant_run_coexist_mechanism(btcoexist); + } + + coex_sta->special_pkt_period_cnt++; +#endif +} + + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8723b1ant.h @@ -0,0 +1,175 @@ +/********************************************************************** + * The following is for 8723B 1ANT BT Co-exist definition + **********************************************************************/ +#define BT_AUTO_REPORT_ONLY_8723B_1ANT 1 + +#define BT_INFO_8723B_1ANT_B_FTP BIT7 +#define BT_INFO_8723B_1ANT_B_A2DP BIT6 +#define BT_INFO_8723B_1ANT_B_HID BIT5 +#define BT_INFO_8723B_1ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8723B_1ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8723B_1ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8723B_1ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8723B_1ANT_B_CONNECTION BIT0 + +#define BT_INFO_8723B_1ANT_A2DP_BASIC_RATE(_BT_INFO_EXT_) \ + (((_BT_INFO_EXT_&BIT0))? true:false) + +#define BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT 2 + +typedef enum _BT_INFO_SRC_8723B_1ANT{ + BT_INFO_SRC_8723B_1ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8723B_1ANT_BT_RSP = 0x1, + BT_INFO_SRC_8723B_1ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8723B_1ANT_MAX +}BT_INFO_SRC_8723B_1ANT,*PBT_INFO_SRC_8723B_1ANT; + +typedef enum _BT_8723B_1ANT_BT_STATUS{ + BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8723B_1ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8723B_1ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8723B_1ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8723B_1ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8723B_1ANT_BT_STATUS_MAX +}BT_8723B_1ANT_BT_STATUS,*PBT_8723B_1ANT_BT_STATUS; + +typedef enum _BT_8723B_1ANT_WIFI_STATUS{ + BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN = 0x1, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SCAN = 0x2, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_SPECIAL_PKT = 0x3, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_IDLE = 0x4, + BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY = 0x5, + BT_8723B_1ANT_WIFI_STATUS_MAX +}BT_8723B_1ANT_WIFI_STATUS,*PBT_8723B_1ANT_WIFI_STATUS; + +typedef enum _BT_8723B_1ANT_COEX_ALGO{ + BT_8723B_1ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8723B_1ANT_COEX_ALGO_SCO = 0x1, + BT_8723B_1ANT_COEX_ALGO_HID = 0x2, + BT_8723B_1ANT_COEX_ALGO_A2DP = 0x3, + BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS = 0x4, + BT_8723B_1ANT_COEX_ALGO_PANEDR = 0x5, + BT_8723B_1ANT_COEX_ALGO_PANHS = 0x6, + BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP = 0x7, + BT_8723B_1ANT_COEX_ALGO_PANEDR_HID = 0x8, + BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9, + BT_8723B_1ANT_COEX_ALGO_HID_A2DP = 0xa, + BT_8723B_1ANT_COEX_ALGO_MAX = 0xb, +}BT_8723B_1ANT_COEX_ALGO,*PBT_8723B_1ANT_COEX_ALGO; + +struct coex_dm_8723b_1ant{ + /* fw mechanism */ + bool pre_dec_bt_pwr; + bool cur_dec_bt_pwr; + u8 pre_fw_dac_swing_lvl; + u8 cur_fw_dac_swing_lvl; + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 ps_tdma_du_adj_type; + bool auto_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + u8 pre_lps; + u8 cur_lps; + u8 pre_rpwm; + u8 cur_rpwm; + + /* sw mechanism */ + bool pre_rf_rx_lpf_shrink; + bool cur_rf_rx_lpf_shrink; + u32 bt_rf0x1e_backup; + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + bool pre_dac_swing_on; + u32 pre_dac_swing_lvl; + bool cur_dac_swing_on; + u32 cur_dac_swing_lvl; + bool pre_adc_backoff; + bool cur_adc_backoff; + bool pre_agc_table_en; + bool cur_agc_table_en; + u32 pre_val0x6c0; + u32 cur_val0x6c0; + u32 pre_val0x6c4; + u32 cur_val0x6c4; + u32 pre_val0x6c8; + u32 cur_val0x6c8; + u8 pre_val0x6cc; + u8 cur_val0x6cc; + bool limited_dig; + + u32 backup_arfr_cnt1; /* Auto Rate Fallback Retry cnt */ + u32 backup_arfr_cnt2; /* Auto Rate Fallback Retry cnt */ + u16 backup_retry_limit; + u8 backup_ampdu_max_time; + + /* algorithm related */ + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; + + u32 prera_mask; + u32 curra_mask; + u8 pre_arfr_type; + u8 cur_arfr_type; + u8 pre_retry_limit_type; + u8 cur_retry_limit_type; + u8 pre_ampdu_time_type; + u8 cur_ampdu_time_type; + + u8 error_condition; +}; + +struct coex_sta_8723b_1ant{ + bool bt_link_exist; + bool sco_exist; + bool a2dp_exist; + bool hid_exist; + bool pan_exist; + + bool under_lps; + bool under_ips; + u32 special_pkt_period_cnt; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8723B_1ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_1ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}; + +/************************************************************************* + * The following is interface which will notify coex module. + *************************************************************************/ +void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length); +void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpState); +void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist); +void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist); + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.c @@ -0,0 +1,4185 @@ +/*************************************************************** + * Description: + * + * This file is for RTL8723B Co-exist mechanism + * + * History + * 2012/11/15 Cosa first check in. + * + **************************************************************/ +/************************************************************** + * include files + **************************************************************/ +#include "halbt_precomp.h" +#if 1 +/************************************************************** + * Global variables, these are static variables + **************************************************************/ +static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant; +static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant; +static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant; +static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant; + +const char *const glbt_info_src_8723b_2ant[] = { + "BT Info[wifi fw]", + "BT Info[bt rsp]", + "BT Info[bt auto report]", +}; + +u32 glcoex_ver_date_8723b_2ant = 20131113; +u32 glcoex_ver_8723b_2ant = 0x3f; + +/************************************************************** + * local function proto type if needed + **************************************************************/ +/************************************************************** + * local function start with halbtc8723b2ant_ + **************************************************************/ +u8 halbtc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) +{ + s32 bt_rssi = 0; + u8 bt_rssi_state = coex_sta->pre_bt_rssi_state; + + bt_rssi = coex_sta->bt_rssi; + + if (level_num == 2) { + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to High\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Low\n"); + } + } else { + if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi thresh error!!\n"); + return coex_sta->pre_bt_rssi_state; + } + + if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) || + (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) { + if (bt_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Low\n"); + } + } else if ((coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_bt_rssi_state == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (bt_rssi >= rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + bt_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to High\n"); + } else if (bt_rssi < rssi_thresh) { + bt_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Low\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at Medium\n"); + } + } else { + if (bt_rssi < rssi_thresh1) { + bt_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "switch to Medium\n"); + } else { + bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE, + "[BTCoex], BT Rssi state " + "stay at High\n"); + } + } + } + + coex_sta->pre_bt_rssi_state = bt_rssi_state; + + return bt_rssi_state; +} + +u8 halbtc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist, + u8 index, u8 level_num, + u8 rssi_thresh, u8 rssi_thresh1) +{ + s32 wifi_rssi=0; + u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index]; + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + + if (level_num == 2) { + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if (wifi_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to High\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Low\n"); + } + } else { + if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at High\n"); + } + } + } else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI thresh error!!\n"); + return coex_sta->pre_wifi_rssi_state[index]; + } + + if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_LOW) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_LOW)) { + if(wifi_rssi >= rssi_thresh + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Low\n"); + } + } else if ((coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_MEDIUM) || + (coex_sta->pre_wifi_rssi_state[index] == + BTC_RSSI_STATE_STAY_MEDIUM)) { + if (wifi_rssi >= rssi_thresh1 + + BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) { + wifi_rssi_state = BTC_RSSI_STATE_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to High\n"); + } else if (wifi_rssi < rssi_thresh) { + wifi_rssi_state = BTC_RSSI_STATE_LOW; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Low\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at Medium\n"); + } + } else { + if (wifi_rssi < rssi_thresh1) { + wifi_rssi_state = BTC_RSSI_STATE_MEDIUM; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "switch to Medium\n"); + } else { + wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_WIFI_RSSI_STATE, + "[BTCoex], wifi RSSI state " + "stay at High\n"); + } + } + } + + coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state; + + return wifi_rssi_state; +} + +void halbtc8723b2ant_monitor_bt_enable_disable(struct btc_coexist *btcoexist) +{ + static bool pre_bt_disabled = false; + static u32 bt_disable_cnt = 0; + bool bt_active = true, bt_disabled = false; + + /* This function check if bt is disabled */ + if (coex_sta->high_priority_tx == 0 && + coex_sta->high_priority_rx == 0 && + coex_sta->low_priority_tx == 0 && + coex_sta->low_priority_rx == 0) + bt_active = false; + + if (coex_sta->high_priority_tx == 0xffff && + coex_sta->high_priority_rx == 0xffff && + coex_sta->low_priority_tx == 0xffff && + coex_sta->low_priority_rx == 0xffff) + bt_active = true; + + if (bt_active) { + bt_disable_cnt = 0; + bt_disabled = false; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is enabled !!\n"); + } else { + bt_disable_cnt++; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], bt all counters=0, %d times!!\n", + bt_disable_cnt); + if (bt_disable_cnt >= 2) { + bt_disabled = true; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE, + &bt_disabled); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is disabled !!\n"); + } + } + + if (pre_bt_disabled != bt_disabled) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], BT is from %s to %s!!\n", + (pre_bt_disabled ? "disabled":"enabled"), + (bt_disabled ? "disabled":"enabled")); + + pre_bt_disabled = bt_disabled; + if (!bt_disabled) { + } else { + } + } +} + +void halbtc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist) +{ + u32 reg_hp_txrx, reg_lp_txrx, u32tmp; + u32 reg_hp_tx = 0, reg_hp_rx = 0; + u32 reg_lp_tx = 0, reg_lp_rx = 0; + + reg_hp_txrx = 0x770; + reg_lp_txrx = 0x774; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx); + reg_hp_tx = u32tmp & MASKLWORD; + reg_hp_rx = (u32tmp & MASKHWORD) >> 16; + + u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx); + reg_lp_tx = u32tmp & MASKLWORD; + reg_lp_rx = (u32tmp & MASKHWORD) >> 16; + + coex_sta->high_priority_tx = reg_hp_tx; + coex_sta->high_priority_rx = reg_hp_rx; + coex_sta->low_priority_tx = reg_lp_tx; + coex_sta->low_priority_rx = reg_lp_rx; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR, + "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n", + reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx); + + /* reset counter */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); +} + +void halbtc8723b2ant_query_bt_info(struct btc_coexist *btcoexist) +{ + u8 h2c_parameter[1] ={0}; + + coex_sta->c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT0; /* trigger */ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Query Bt Info, FW write 0x61=0x%x\n", + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter); +} + +bool halbtc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist) +{ + static bool pre_wifi_busy = false; + static bool pre_under_4way = false; + static bool pre_bt_hs_on = false; + bool wifi_busy = false, under_4way = false, bt_hs_on = false; + bool wifi_connected = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS, + &under_4way); + + if (wifi_connected) { + if (wifi_busy != pre_wifi_busy) { + pre_wifi_busy = wifi_busy; + return true; + } + + if (under_4way != pre_under_4way) { + pre_under_4way = under_4way; + return true; + } + + if (bt_hs_on != pre_bt_hs_on) { + pre_bt_hs_on = bt_hs_on; + return true; + } + } + + return false; +} + +void halbtc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist) +{ + /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/ + struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info; + bool bt_hs_on = false; + +#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */ + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + bt_link_info->bt_link_exist = coex_sta->bt_link_exist; + bt_link_info->sco_exist = coex_sta->sco_exist; + bt_link_info->a2dp_exist = coex_sta->a2dp_exist; + bt_link_info->pan_exist = coex_sta->pan_exist; + bt_link_info->hid_exist = coex_sta->hid_exist; + + /* work around for HS mode. */ + if (bt_hs_on) { + bt_link_info->pan_exist = true; + bt_link_info->bt_link_exist = true; + } +#else /* profile from bt stack */ + bt_link_info->bt_link_exist = stack_info->bt_link_exist; + bt_link_info->sco_exist = stack_info->sco_exist; + bt_link_info->a2dp_exist = stack_info->a2dp_exist; + bt_link_info->pan_exist = stack_info->pan_exist; + bt_link_info->hid_exist = stack_info->hid_exist; + + /*for win-8 stack HID report error*/ + if (!stack_info->hid_exist) + stack_info->hid_exist = coex_sta->hid_exist; + /*sync BTInfo with BT firmware and stack*/ + /* when stack HID report error, here we use the info from bt fw.*/ + if (!stack_info->bt_link_exist) + stack_info->bt_link_exist = coex_sta->bt_link_exist; +#endif + /* check if Sco only */ + if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->sco_only = true; + else + bt_link_info->sco_only = false; + + /* check if A2dp only */ + if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->a2dp_only = true; + else + bt_link_info->a2dp_only = false; + + /* check if Pan only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + bt_link_info->pan_exist && !bt_link_info->hid_exist) + bt_link_info->pan_only = true; + else + bt_link_info->pan_only = false; + + /* check if Hid only */ + if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist && + !bt_link_info->pan_exist && bt_link_info->hid_exist) + bt_link_info->hid_only = true; + else + bt_link_info->hid_only = false; +} + +u8 halbtc8723b2ant_action_algorithm(struct btc_coexist *btcoexist) +{ + struct btc_bt_link_info *bt_link_info=&btcoexist->bt_link_info; + bool bt_hs_on = false; + u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED; + u8 num_of_diff_profile = 0; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + + if (!bt_link_info->bt_link_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], No BT link exists!!!\n"); + return algorithm; + } + + if (bt_link_info->sco_exist) + num_of_diff_profile++; + if (bt_link_info->hid_exist) + num_of_diff_profile++; + if (bt_link_info->pan_exist) + num_of_diff_profile++; + if (bt_link_info->a2dp_exist) + num_of_diff_profile++; + + if (num_of_diff_profile == 1) { + if (bt_link_info->sco_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO only\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; + } else { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID only\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2DP only\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], PAN(HS) only\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], PAN(EDR) only\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR; + } + } + } + } else if (num_of_diff_profile == 2) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP ==> SCO\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + PAN(HS)\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_SCO; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + PAN(HS)\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], A2DP + PAN(HS)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex],A2DP + PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP; + } + } + } + } else if (num_of_diff_profile == 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->a2dp_exist) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP" + " ==> HID\n"); + algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else if (bt_link_info->hid_exist && + bt_link_info->pan_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + " + "PAN(HS)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + " + "PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } else if (bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP + " + "PAN(HS)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + A2DP + " + "PAN(EDR) ==> HID\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } else { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP + " + "PAN(HS)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_HID_A2DP; + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], HID + A2DP + " + "PAN(EDR)\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR; + } + } + } + } else if (num_of_diff_profile >= 3) { + if (bt_link_info->sco_exist) { + if (bt_link_info->hid_exist && + bt_link_info->pan_exist && + bt_link_info->a2dp_exist) { + if (bt_hs_on) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Error!!! SCO + HID" + " + A2DP + PAN(HS)\n"); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], SCO + HID + A2DP +" + " PAN(EDR)==>PAN(EDR)+HID\n"); + algorithm = + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID; + } + } + } + } + + return algorithm; +} + +bool halbtc8723b2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist) +{ + bool bRet = false; + bool bt_hs_on = false, wifi_connected = false; + s32 bt_hs_rssi=0; + u8 bt_rssi_state; + + if (!btcoexist->btc_get(btcoexist, + BTC_GET_BL_HS_OPERATION, &bt_hs_on)) + return false; + if (!btcoexist->btc_get(btcoexist, + BTC_GET_BL_WIFI_CONNECTED, &wifi_connected)) + return false; + if (!btcoexist->btc_get(btcoexist, + BTC_GET_S4_HS_RSSI, &bt_hs_rssi)) + return false; + + bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0); + + if (wifi_connected) { + if (bt_hs_on) { + if (bt_hs_rssi > 37) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt " + "power for HS mode!!\n"); + bRet = true; + } + } else { + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], Need to decrease bt " + "power for Wifi is connected!!\n"); + bRet = true; + } + } + } + + return bRet; +} + +void halbtc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist, + u8 dac_swing_lvl) +{ + u8 h2c_parameter[1] ={0}; + + /* There are several type of dacswing + * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6 */ + h2c_parameter[0] = dac_swing_lvl; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter); +} + +void halbtc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist, + bool dec_bt_pwr) +{ + u8 h2c_parameter[1] = {0}; + + h2c_parameter[0] = 0; + + if (dec_bt_pwr) + h2c_parameter[0] |= BIT1; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n", + (dec_bt_pwr? "Yes!!":"No!!"), h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter); +} + +void halbtc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist, + bool force_exec, bool dec_bt_pwr) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s Dec BT power = %s\n", + (force_exec? "force to":""), (dec_bt_pwr? "ON":"OFF")); + coex_dm->cur_dec_bt_pwr = dec_bt_pwr; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n", + coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr); + + if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr) + return; + } + halbtc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr); + + coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr; +} + +void halbtc8723b2ant_set_bt_auto_report(struct btc_coexist *btcoexist, + bool enable_auto_report) +{ + u8 h2c_parameter[1] = {0}; + h2c_parameter[0] = 0; + + if (enable_auto_report) + h2c_parameter[0] |= BIT0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], BT FW auto report : %s, FW write 0x68=0x%x\n", + (enable_auto_report? "Enabled!!":"Disabled!!"), + h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter); +} + +void halbtc8723b2ant_bt_auto_report(struct btc_coexist *btcoexist, + bool force_exec, bool enable_auto_report) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s BT Auto report = %s\n", + (force_exec? "force to":""), + ((enable_auto_report)? "Enabled":"Disabled")); + coex_dm->cur_bt_auto_report = enable_auto_report; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreBtAutoReport=%d, " + "bCurBtAutoReport=%d\n", + coex_dm->pre_bt_auto_report, + coex_dm->cur_bt_auto_report); + + if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report) + return; + } + halbtc8723b2ant_set_bt_auto_report(btcoexist, + coex_dm->cur_bt_auto_report); + + coex_dm->pre_bt_auto_report = coex_dm->cur_bt_auto_report; +} + +void halbtc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist, + bool force_exec, u8 fw_dac_swing_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s set FW Dac Swing level = %d\n", + (force_exec? "force to":""), fw_dac_swing_lvl); + coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], preFwDacSwingLvl=%d, " + "curFwDacSwingLvl=%d\n", + coex_dm->pre_fw_dac_swing_lvl, + coex_dm->cur_fw_dac_swing_lvl); + + if(coex_dm->pre_fw_dac_swing_lvl == + coex_dm->cur_fw_dac_swing_lvl) + return; + } + + halbtc8723b2ant_set_fw_dac_swing_level(btcoexist, + coex_dm->cur_fw_dac_swing_lvl); + coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl; +} + +void halbtc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist, + bool rx_rf_shrink_on) +{ + if (rx_rf_shrink_on) { + /* Shrink RF Rx LPF corner */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Shrink RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, 0xffffc); + } else { + /* Resume RF Rx LPF corner */ + /* After initialized, we can use coex_dm->btRf0x1eBackup */ + if (btcoexist->initilized) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Resume RF Rx LPF corner!!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e, + 0xfffff, + coex_dm->bt_rf0x1e_backup); + } + } +} + +void halbtc8723b2ant_rf_shrink(struct btc_coexist *btcoexist, + bool force_exec, bool rx_rf_shrink_on) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn Rx RF Shrink = %s\n", + (force_exec? "force to":""), (rx_rf_shrink_on? "ON":"OFF")); + coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreRfRxLpfShrink=%d, " + "bCurRfRxLpfShrink=%d\n", + coex_dm->pre_rf_rx_lpf_shrink, + coex_dm->cur_rf_rx_lpf_shrink); + + if (coex_dm->pre_rf_rx_lpf_shrink == + coex_dm->cur_rf_rx_lpf_shrink) + return; + } + halbtc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist, + coex_dm->cur_rf_rx_lpf_shrink); + + coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink; +} + +void halbtc8723b2ant_set_sw_penalty_txrate_adaptive( + struct btc_coexist *btcoexist, + bool low_penalty_ra) +{ + u8 h2c_parameter[6] ={0}; + + h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/ + + if (low_penalty_ra) { + h2c_parameter[1] |= BIT0; + /*normal rate except MCS7/6/5, OFDM54/48/36*/ + h2c_parameter[2] = 0x00; + h2c_parameter[3] = 0xf7; /*MCS7 or OFDM54*/ + h2c_parameter[4] = 0xf8; /*MCS6 or OFDM48*/ + h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/ + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set WiFi Low-Penalty Retry: %s", + (low_penalty_ra? "ON!!":"OFF!!")); + + btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter); +} + +void halbtc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist, + bool force_exec, bool low_penalty_ra) +{ + /*return; */ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn LowPenaltyRA = %s\n", + (force_exec? "force to":""), (low_penalty_ra? "ON":"OFF")); + coex_dm->cur_low_penalty_ra = low_penalty_ra; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreLowPenaltyRa=%d, " + "bCurLowPenaltyRa=%d\n", + coex_dm->pre_low_penalty_ra, + coex_dm->cur_low_penalty_ra); + + if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra) + return; + } + halbtc8723b2ant_set_sw_penalty_txrate_adaptive(btcoexist, + coex_dm->cur_low_penalty_ra); + + coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra; +} + +void halbtc8723b2ant_set_dac_swing_reg(struct btc_coexist * btcoexist, + u32 level) +{ + u8 val = (u8) level; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Write SwDacSwing = 0x%x\n", level); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val); +} + +void halbtc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoexist, + bool sw_dac_swing_on, + u32 sw_dac_swing_lvl) +{ + if(sw_dac_swing_on) + halbtc8723b2ant_set_dac_swing_reg(btcoexist, sw_dac_swing_lvl); + else + halbtc8723b2ant_set_dac_swing_reg(btcoexist, 0x18); +} + + +void halbtc8723b2ant_dac_swing(struct btc_coexist *btcoexist, + bool force_exec, bool dac_swing_on, + u32 dac_swing_lvl) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n", + (force_exec? "force to":""), + (dac_swing_on? "ON":"OFF"), dac_swing_lvl); + coex_dm->cur_dac_swing_on = dac_swing_on; + coex_dm->cur_dac_swing_lvl = dac_swing_lvl; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x," + " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n", + coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl, + coex_dm->cur_dac_swing_on, + coex_dm->cur_dac_swing_lvl); + + if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) && + (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl)) + return; + } + mdelay(30); + halbtc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on, + dac_swing_lvl); + + coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on; + coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl; +} + +void halbtc8723b2ant_set_adc_backoff(struct btc_coexist *btcoexist, + bool adc_backoff) +{ + if (adc_backoff) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level On!\n"); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x3); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB BackOff Level Off!\n"); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc05, 0x30, 0x1); + } +} + +void halbtc8723b2ant_adc_backoff(struct btc_coexist *btcoexist, + bool force_exec, bool adc_backoff) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s turn AdcBackOff = %s\n", + (force_exec? "force to":""), (adc_backoff? "ON":"OFF")); + coex_dm->cur_adc_back_off = adc_backoff; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAdcBackOff=%d, bCurAdcBackOff=%d\n", + coex_dm->pre_adc_back_off, + coex_dm->cur_adc_back_off); + + if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off) + return; + } + halbtc8723b2ant_set_adc_backoff(btcoexist, coex_dm->cur_adc_back_off); + + coex_dm->pre_adc_back_off = coex_dm->cur_adc_back_off; +} + +void halbtc8723b2ant_set_agc_table(struct btc_coexist *btcoexist, + bool agc_table_en) +{ + u8 rssi_adjust_val = 0; + + /* BB AGC Gain Table */ + if (agc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table On!\n"); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], BB Agc Table Off!\n"); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001); + btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001); + } + + + /* RF Gain */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000); + if (agc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table On!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x38fff); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x38ffe); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table Off!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x380c3); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b, + 0xfffff, 0x28ce6); + } + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1); + + if (agc_table_en) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table On!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, + 0xfffff, 0x38fff); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, + 0xfffff, 0x38ffe); + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], Agc Table Off!\n"); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, + 0xfffff, 0x380c3); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40, + 0xfffff, 0x28ce6); + } + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0); + + /* set rssiAdjustVal for wifi module. */ + if (agc_table_en) + rssi_adjust_val = 8; + btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, + &rssi_adjust_val); +} + +void halbtc8723b2ant_agc_table(struct btc_coexist *btcoexist, + bool force_exec, bool agc_table_en) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s %s Agc Table\n", + (force_exec? "force to":""), + (agc_table_en? "Enable":"Disable")); + coex_dm->cur_agc_table_en = agc_table_en; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n", + coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en); + + if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en) + return; + } + halbtc8723b2ant_set_agc_table(btcoexist, agc_table_en); + + coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en; +} + +void halbtc8723b2ant_set_coex_table(struct btc_coexist *btcoexist, + u32 val0x6c0, u32 val0x6c4, + u32 val0x6c8, u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0); + btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4); + btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8); + btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC, + "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc); + btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc); +} + +void halbtc8723b2ant_coex_table(struct btc_coexist *btcoexist, + bool force_exec, u32 val0x6c0, + u32 val0x6c4, u32 val0x6c8, + u8 val0x6cc) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW, + "[BTCoex], %s write Coex Table 0x6c0=0x%x," + " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n", + (force_exec? "force to":""), val0x6c0, + val0x6c4, val0x6c8, val0x6cc); + coex_dm->cur_val0x6c0 = val0x6c0; + coex_dm->cur_val0x6c4 = val0x6c4; + coex_dm->cur_val0x6c8 = val0x6c8; + coex_dm->cur_val0x6cc = val0x6cc; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], preVal0x6c0=0x%x, " + "preVal0x6c4=0x%x, preVal0x6c8=0x%x, " + "preVal0x6cc=0x%x !!\n", + coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4, + coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL, + "[BTCoex], curVal0x6c0=0x%x, " + "curVal0x6c4=0x%x, curVal0x6c8=0x%x, " + "curVal0x6cc=0x%x !!\n", + coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4, + coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc); + + if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) && + (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) && + (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) && + (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc)) + return; + } + halbtc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4, + val0x6c8, val0x6cc); + + coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0; + coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4; + coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8; + coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc; +} + +void halbtc8723b2ant_coex_table_with_type(struct btc_coexist *btcoexist, + bool force_exec, u8 type) +{ + switch (type) { + case 0: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x55555555, 0xffff, 0x3); + break; + case 1: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555, + 0x5afa5afa, 0xffff, 0x3); + break; + case 2: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a, + 0x5a5a5a5a, 0xffff, 0x3); + break; + case 3: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa, + 0xaaaaaaaa, 0xffff, 0x3); + break; + case 4: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff, + 0xffffffff, 0xffff, 0x3); + break; + case 5: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff, + 0x5fff5fff, 0xffff, 0x3); + break; + case 6: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5a5a5a5a, 0xffff, 0x3); + break; + case 7: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5afa5afa, 0xffff, 0x3); + break; + case 8: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea, + 0x5aea5aea, 0xffff, 0x3); + break; + case 9: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5aea5aea, 0xffff, 0x3); + break; + case 10: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5aff5aff, 0xffff, 0x3); + break; + case 11: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5a5f5a5f, 0xffff, 0x3); + break; + case 12: + halbtc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff, + 0x5f5f5f5f, 0xffff, 0x3); + break; + default: + break; + } +} + +void halbtc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist, + bool enable) +{ + u8 h2c_parameter[1] ={0}; + + if (enable) + h2c_parameter[0] |= BIT0;/* function enable*/ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], set FW for BT Ignore Wlan_Act, " + "FW write 0x63=0x%x\n", h2c_parameter[0]); + + btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter); +} + +void halbtc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist, + bool force_exec, bool enable) +{ + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn Ignore WlanAct %s\n", + (force_exec? "force to":""), (enable? "ON":"OFF")); + coex_dm->cur_ignore_wlan_act = enable; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPreIgnoreWlanAct = %d, " + "bCurIgnoreWlanAct = %d!!\n", + coex_dm->pre_ignore_wlan_act, + coex_dm->cur_ignore_wlan_act); + + if (coex_dm->pre_ignore_wlan_act == + coex_dm->cur_ignore_wlan_act) + return; + } + halbtc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable); + + coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act; +} + +void halbtc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1, + u8 byte2, u8 byte3, u8 byte4, u8 byte5) +{ + u8 h2c_parameter[5] ={0}; + + h2c_parameter[0] = byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = byte5; + + coex_dm->ps_tdma_para[0] = byte1; + coex_dm->ps_tdma_para[1] = byte2; + coex_dm->ps_tdma_para[2] = byte3; + coex_dm->ps_tdma_para[3] = byte4; + coex_dm->ps_tdma_para[4] = byte5; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n", + h2c_parameter[0], + h2c_parameter[1] << 24 | h2c_parameter[2] << 16 | + h2c_parameter[3] << 8 | h2c_parameter[4]); + + btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter); +} + +void halbtc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist, + bool shrink_rx_lpf, bool low_penalty_ra, + bool limited_dig, bool bt_lna_constrain) +{ + /* + u32 wifi_bw; + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if(BTC_WIFI_BW_HT40 != wifi_bw) //only shrink RF Rx LPF for HT40 + { + if (shrink_rx_lpf) + shrink_rx_lpf = false; + } + */ + + halbtc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf); + halbtc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra); +} + +void halbtc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist, + bool agc_table_shift, bool adc_backoff, + bool sw_dac_swing, u32 dac_swing_lvl) +{ + halbtc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift); + /*halbtc8723b2ant_adc_backoff(btcoexist, NORMAL_EXEC, adc_backoff);*/ + halbtc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing, + dac_swing_lvl); +} + +void halbtc8723b2ant_set_ant_path(struct btc_coexist *btcoexist, + u8 antpos_type, bool init_hwcfg, + bool wifi_off) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + u32 fw_ver = 0, u32tmp=0; + bool pg_ext_switch = false; + bool use_ext_switch = false; + u8 h2c_parameter[2] ={0}; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_EXT_SWITCH, &pg_ext_switch); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + + if ((fw_ver<0xc0000) || pg_ext_switch) + use_ext_switch = true; + + if (init_hwcfg) { + /* 0x4c[23]=0, 0x4c[24]=1 Antenna control by WL/BT */ + u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u32tmp &= ~BIT23; + u32tmp |= BIT24; + btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp); + + btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3); + btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1); + + /* Force GNT_BT to low */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0); + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + + if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) { + /* tell firmware "no antenna inverse" */ + h2c_parameter[0] = 0; + h2c_parameter[1] = 1; /* ext switch type */ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } else { + /* tell firmware "antenna inverse" */ + h2c_parameter[0] = 1; + h2c_parameter[1] = 1; /* ext switch type */ + btcoexist->btc_fill_h2c(btcoexist, 0x65, 2, + h2c_parameter); + } + } + + /* ext switch setting */ + if (use_ext_switch) { + /* fixed internal switch S1->WiFi, S0->BT */ + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + switch (antpos_type) { + case BTC_ANT_WIFI_AT_MAIN: + /* ext switch main at wifi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, + 0x3, 0x1); + break; + case BTC_ANT_WIFI_AT_AUX: + /* ext switch aux at wifi */ + btcoexist->btc_write_1byte_bitmask(btcoexist, + 0x92c, 0x3, 0x2); + break; + } + } else { /* internal switch */ + /* fixed ext switch */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1); + switch (antpos_type) { + case BTC_ANT_WIFI_AT_MAIN: + /* fixed internal switch S1->WiFi, S0->BT */ + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0); + break; + case BTC_ANT_WIFI_AT_AUX: + /* fixed internal switch S0->WiFi, S1->BT */ + btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280); + break; + } + } +} + + +void halbtc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, + bool turn_on, u8 type) +{ + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], %s turn %s PS TDMA, type=%d\n", + (force_exec? "force to":""), (turn_on? "ON":"OFF"), type); + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + + if (!force_exec) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n", + coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n", + coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma); + + if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) && + (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma)) + return; + } + if (turn_on) { + switch (type) { + case 1: + default: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + case 2: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x90); + break; + case 3: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0xf1, 0x90); + break; + case 4: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, + 0x03, 0xf1, 0x90); + break; + case 5: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0x60, 0x90); + break; + case 6: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, + 0x12, 0x60, 0x90); + break; + case 7: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + 0x3, 0x70, 0x90); + break; + case 8: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10, + 0x3, 0x70, 0x90); + break; + case 9: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + case 10: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, + 0x12, 0xe1, 0x90); + break; + case 11: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0xa, 0xe1, 0x90); + break; + case 12: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, + 0x5, 0xe1, 0x90); + break; + case 13: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0x60, 0x90); + break; + case 14: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12, + 0x12, 0x60, 0x90); + break; + case 15: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa, + 0xa, 0x60, 0x90); + break; + case 16: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, + 0x5, 0x60, 0x90); + break; + case 17: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f, + 0x2f, 0x60, 0x90); + break; + case 18: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5, + 0x5, 0xe1, 0x90); + break; + case 19: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x25, 0xe1, 0x90); + break; + case 20: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25, + 0x25, 0x60, 0x90); + break; + case 21: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15, + 0x03, 0x70, 0x90); + break; + case 71: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a, + 0x1a, 0xe1, 0x90); + break; + } + } else { + /* disable PS tdma */ + switch (type) { + case 0: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x40, 0x0); + break; + case 1: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x48, 0x0); + break; + default: + halbtc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0, + 0x40, 0x0); + break; + } + } + + /* update pre state */ + coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on; + coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma; +} + +void halbtc8723b2ant_coex_alloff(struct btc_coexist *btcoexist) +{ + /* fw all off */ + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + /* sw all off */ + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + + /* hw all off */ + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); +} + +void halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + /* force to reset coex mechanism*/ + + halbtc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1); + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + halbtc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false); + + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); +} + +void halbtc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist) +{ + bool wifi_connected = false; + bool low_pwr_disable = true; + + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + + if (wifi_connected) { + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3); + } else { + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + } + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6); + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18); + + coex_dm->need_recover_0x948 = true; + coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948); + + halbtc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_AUX, + false, false); +} + +bool halbtc8723b2ant_is_common_action(struct btc_coexist *btcoexist) +{ + bool bCommon = false, wifi_connected = false; + bool wifi_busy = false; + bool bt_hs_on = false, low_pwr_disable = false; + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + + if (!wifi_connected) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi non-connected idle!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, + 0x0); + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, false, + false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, false, + 0x18); + + bCommon = true; + } else { + if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE == + coex_dm->bt_status) { + low_pwr_disable = false; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + " + "BT non connected-idle!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x0); + halbtc8723b2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, + 1); + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 0xb); + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + bCommon = true; + } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE == + coex_dm->bt_status) { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if(bt_hs_on) + return false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi connected + " + "BT connected-idle!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x0); + halbtc8723b2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 0); + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, + 1); + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 0xb); + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, + false); + + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + + bCommon = true; + } else { + low_pwr_disable = true; + btcoexist->btc_set(btcoexist, + BTC_SET_ACT_DISABLE_LOW_POWER, + &low_pwr_disable); + + if (wifi_busy) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Busy + " + "BT Busy!!\n"); + bCommon = false; + } else { + if(bt_hs_on) + return false; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Wifi Connected-Idle + " + "BT Busy!!\n"); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, + 0x1, 0xfffff, 0x0); + halbtc8723b2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, + 7); + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, + true, 21); + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, + NORMAL_EXEC, + 0xb); + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, + true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, + NORMAL_EXEC, + false); + halbtc8723b2ant_sw_mechanism1(btcoexist, false, + false, false, + false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, + false, false, + 0x18); + bCommon = true; + } + } + } + + return bCommon; +} +void halbtc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, + bool sco_hid, bool tx_pause, + u8 max_interval) +{ + static s32 up, dn, m, n, wait_count; + /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/ + s32 result; + u8 retryCount=0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW, + "[BTCoex], TdmaDurationAdjust()\n"); + + if (!coex_dm->auto_tdma_adjust) { + coex_dm->auto_tdma_adjust = true; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], first run TdmaDurationAdjust()!!\n"); + if (sco_hid) { + if (tx_pause) { + if (max_interval == 1) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = 13; + }else if (max_interval == 2) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (max_interval == 3) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } + } else { + if(max_interval == 1) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (max_interval == 2) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (max_interval == 3) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } + } + } else { + if (tx_pause) { + if (max_interval == 1) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (max_interval == 2) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (max_interval == 3) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } + } else { + if (max_interval == 1) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = 1; + } else if (max_interval == 2) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (max_interval == 3) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } + } + } + + up = 0; + dn = 0; + m = 1; + n= 3; + result = 0; + wait_count = 0; + } else { + /*accquire the BT TRx retry count from BT_Info byte2*/ + retryCount = coex_sta->bt_retry_cnt; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], retryCount = %d\n", retryCount); + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n", + up, dn, m, n, wait_count); + result = 0; + wait_count++; + /* no retry in the last 2-second duration*/ + if (retryCount == 0) { + up++; + dn--; + + if (dn <= 0) + dn = 0; + + if (up >= n) { + wait_count = 0; + n = 3; + up = 0; + dn = 0; + result = 1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Increase wifi " + "duration!!\n"); + }/* <=3 retry in the last 2-second duration*/ + } else if (retryCount <= 3) { + up--; + dn++; + + if (up <= 0) + up = 0; + + if (dn == 2) { + if (wait_count <= 2) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration " + "for retryCounter<3!!\n"); + } + } else { + if (wait_count == 1) + m++; + else + m = 1; + + if (m >= 20) + m = 20; + + n = 3 * m; + up = 0; + dn = 0; + wait_count = 0; + result = -1; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], Decrease wifi duration " + "for retryCounter>3!!\n"); + } + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], max Interval = %d\n", max_interval); + if (max_interval == 1) { + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + + if (coex_dm->cur_ps_tdma == 71) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = 5; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + + if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = 13; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if(coex_dm->cur_ps_tdma == 14) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if(coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if(coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 5); + coex_dm->ps_tdma_du_adj_type = + 5; + } else if(coex_dm->cur_ps_tdma == 16) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if(coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if(coex_dm->cur_ps_tdma == 14) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 13); + coex_dm->ps_tdma_du_adj_type = + 13; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 71); + coex_dm->ps_tdma_du_adj_type = 71; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + + if (coex_dm->cur_ps_tdma == 13) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = 9; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if(coex_dm->cur_ps_tdma == 16) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + + if (result == -1) { + if (coex_dm->cur_ps_tdma == 71) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = + 1; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if(coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if(coex_dm->cur_ps_tdma == 9) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 1); + coex_dm->ps_tdma_du_adj_type = + 1; + } else if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 71); + coex_dm->ps_tdma_du_adj_type = + 71; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 9); + coex_dm->ps_tdma_du_adj_type = + 9; + } + } + } + } else if(max_interval == 2) { + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = 6; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = 14; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 6); + coex_dm->ps_tdma_du_adj_type = + 6; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 14); + coex_dm->ps_tdma_du_adj_type = + 14; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = 2; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 14){ + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = 10; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 2); + coex_dm->ps_tdma_du_adj_type = + 2; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 10); + coex_dm->ps_tdma_du_adj_type = + 10; + } + } + } + } else if (max_interval == 3) { + if (tx_pause) { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 1\n"); + if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = 7; + } else if (coex_dm->cur_ps_tdma == 4) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = 8; + } + if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = 15; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = 16; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 5) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 8); + coex_dm->ps_tdma_du_adj_type = + 8; + } else if (coex_dm->cur_ps_tdma == 13) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 16); + coex_dm->ps_tdma_du_adj_type = + 16; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 8) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 7); + coex_dm->ps_tdma_du_adj_type = + 7; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 15); + coex_dm->ps_tdma_du_adj_type = + 15; + } + } + } else { + BTC_PRINT(BTC_MSG_ALGORITHM, + ALGO_TRACE_FW_DETAIL, + "[BTCoex], TxPause = 0\n"); + if (coex_dm->cur_ps_tdma == 5) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 6) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 7) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = 3; + } else if (coex_dm->cur_ps_tdma == 8) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = 4; + } + if (coex_dm->cur_ps_tdma == 13) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 14) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 15) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = 11; + } else if (coex_dm->cur_ps_tdma == 16) { + halbtc8723b2ant_ps_tdma(btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = 12; + } + if (result == -1) { + if (coex_dm->cur_ps_tdma == 1) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 4); + coex_dm->ps_tdma_du_adj_type = + 4; + } else if (coex_dm->cur_ps_tdma == 9) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 12); + coex_dm->ps_tdma_du_adj_type = + 12; + } + } else if (result == 1) { + if (coex_dm->cur_ps_tdma == 4) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 3) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 2) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 3); + coex_dm->ps_tdma_du_adj_type = + 3; + } else if (coex_dm->cur_ps_tdma == 12) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 11) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } else if (coex_dm->cur_ps_tdma == 10) { + halbtc8723b2ant_ps_tdma( + btcoexist, + NORMAL_EXEC, + true, 11); + coex_dm->ps_tdma_du_adj_type = + 11; + } + } + } + } + } + + /*if current PsTdma not match with the recorded one (when scan, dhcp..), + *then we have to adjust it back to the previous record one.*/ + if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) { + bool scan = false, link = false, roam = false; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], PsTdma type dismatch!!!, " + "curPsTdma=%d, recordPsTdma=%d\n", + coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + + if (!scan && !link && !roam) + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, + coex_dm->ps_tdma_du_adj_type); + else + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL, + "[BTCoex], roaming/link/scan is under" + " progress, will adjust next time!!!\n"); + } +} + +/* SCO only or SCO+PAN(HS) */ +void halbtc8723b2ant_action_sco(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + /*for SCO quality at 11b/g mode*/ + if (BTC_WIFI_BW_LEGACY == wifi_bw) + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2); + else /*for SCO quality & wifi performance balance at 11n mode*/ + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 8); + + /*for voice quality */ + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + true, 0x4); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + true, 0x4); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + true, 0x4); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + true, 0x4); + } + } +} + + +void halbtc8723b2ant_action_hid(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/ + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + else /*for HID quality & wifi performance balance at 11n mode*/ + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 9); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9); + else + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/ +void halbtc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u32 wifi_bw; + u8 ap_num = 0; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + wifi_rssi_state1 = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 1, 2, 40, 0); + bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0); + + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); + + /* define the office environment */ + /* driver don't know AP num in Linux, so we will never enter this if */ + if (ap_num >= 10 && BTC_RSSI_HIGH(wifi_rssi_state1)) { + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, + 0x0); + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0); + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + true, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + true, 0x18); + } + return; + } + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + halbtc8723b2ant_tdma_duration_adjust(btcoexist,false, false, 1); + else + halbtc8723b2ant_tdma_duration_adjust(btcoexist,false, true, 1); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + + halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, + BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false,0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 10); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1); + else + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); + + /* sw mechanism */ + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + + +/*PAN(HS) only*/ +void halbtc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH) ) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + + halbtc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/*PAN(EDR)+A2DP*/ +void halbtc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_coex_table_with_type(btcoexist,NORMAL_EXEC, 12); + if (BTC_WIFI_BW_HT40 == wifi_bw) + halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, + true, 3); + else + halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, + false, 3); + } else { + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + halbtc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, false, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (BTC_WIFI_BW_HT40 == wifi_bw) { + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 3); + halbtc8723b2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 11); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x780); + } else { + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, + 6); + halbtc8723b2ant_coex_table_with_type(btcoexist, + NORMAL_EXEC, 7); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, + 0xfffff, 0x0); + } + halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2); + } else { + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + halbtc8723b2ant_coex_table_with_type(btcoexist,NORMAL_EXEC, 11); + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, + 0x0); + halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)){ + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +/* HID+A2DP+PAN(EDR) */ +void halbtc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + if (BTC_WIFI_BW_HT40 == wifi_bw) + halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, + true, 2); + else + halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, + false, 3); + } else { + halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3); + } + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist) +{ + u8 wifi_rssi_state, bt_rssi_state; + u32 wifi_bw; + + wifi_rssi_state = halbtc8723b2ant_wifi_rssi_state(btcoexist, + 0, 2, 15, 0); + bt_rssi_state = halbtc8723b2ant_bt_rssi_state(2, 29, 0); + + btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); + + halbtc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6); + + if (halbtc8723b2ant_need_to_dec_bt_pwr(btcoexist)) + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true); + else + halbtc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + + halbtc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 7); + + if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) || + (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) + halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2); + else + halbtc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2); + + /* sw mechanism */ + if (BTC_WIFI_BW_HT40 == wifi_bw) { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, true, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } else { + if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || + (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, true, false, + false, 0x18); + } else { + halbtc8723b2ant_sw_mechanism1(btcoexist, false, true, + false, false); + halbtc8723b2ant_sw_mechanism2(btcoexist, false, false, + false, 0x18); + } + } +} + +void halbtc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist) +{ + u8 algorithm = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism()===>\n"); + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], RunCoexistMechanism(), " + "return for Manual CTRL <===\n"); + return; + } + + if (coex_sta->under_ips) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], wifi is under IPS !!!\n"); + return; + } + + algorithm = halbtc8723b2ant_action_algorithm(btcoexist); + if (coex_sta->c2h_bt_inquiry_page && + (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT is under inquiry/page scan !!\n"); + halbtc8723b2ant_action_bt_inquiry(btcoexist); + return; + } else { + if (coex_dm->need_recover_0x948) { + coex_dm->need_recover_0x948 = false; + btcoexist->btc_write_2byte(btcoexist, 0x948, + coex_dm->backup_0x948); + } + } + + coex_dm->cur_algorithm = algorithm; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d \n", + coex_dm->cur_algorithm); + + if (halbtc8723b2ant_is_common_action(btcoexist)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant common.\n"); + coex_dm->auto_tdma_adjust = false; + } else { + if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], preAlgorithm=%d, " + "curAlgorithm=%d\n", coex_dm->pre_algorithm, + coex_dm->cur_algorithm); + coex_dm->auto_tdma_adjust = false; + } + switch (coex_dm->cur_algorithm) { + case BT_8723B_2ANT_COEX_ALGO_SCO: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = SCO.\n"); + halbtc8723b2ant_action_sco(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, algorithm = HID.\n"); + halbtc8723b2ant_action_hid(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = A2DP.\n"); + halbtc8723b2ant_action_a2dp(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = A2DP+PAN(HS).\n"); + halbtc8723b2ant_action_a2dp_pan_hs(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = PAN(EDR).\n"); + halbtc8723b2ant_action_pan_edr(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_PANHS: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = HS mode.\n"); + halbtc8723b2ant_action_pan_hs(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = PAN+A2DP.\n"); + halbtc8723b2ant_action_pan_edr_a2dp(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = PAN(EDR)+HID.\n"); + halbtc8723b2ant_action_pan_edr_hid(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = HID+A2DP+PAN.\n"); + halbtc8723b2ant_action_hid_a2dp_pan_edr(btcoexist); + break; + case BT_8723B_2ANT_COEX_ALGO_HID_A2DP: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = HID+A2DP.\n"); + halbtc8723b2ant_action_hid_a2dp(btcoexist); + break; + default: + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], Action 2-Ant, " + "algorithm = coexist All Off!!\n"); + halbtc8723b2ant_coex_alloff(btcoexist); + break; + } + coex_dm->pre_algorithm = coex_dm->cur_algorithm; + } +} + +void halbtc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist) +{ + /* set wlan_act to low */ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0x4); + /* Force GNT_BT to High */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x3); + /* BT select s0/s1 is controlled by BT */ + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x0); +} + +/********************************************************************* + * work around function start with wa_halbtc8723b2ant_ + *********************************************************************/ +/********************************************************************* + * extern function start with EXhalbtc8723b2ant_ + *********************************************************************/ +void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist) +{ + u8 u8tmp = 0; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], 2Ant Init HW Config!!\n"); + coex_dm->bt_rf0x1e_backup = + btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff); + + /* 0x790[5:0]=0x5 */ + u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790); + u8tmp &= 0xc0; + u8tmp |= 0x5; + btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp); + + + /*Antenna config */ + halbtc8723b2ant_set_ant_path(btcoexist, BTC_ANT_WIFI_AT_MAIN, + true, false); + + + + + /* PTA parameter */ + halbtc8723b2ant_coex_table_with_type(btcoexist, FORCE_EXEC, 0); + + /* Enable counter statistics */ + /*0x76e[3] =1, WLAN_Act control by PTA*/ + btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc); + btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3); + btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1); +} + +void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Coex Mechanism Init!!\n"); + halbtc8723b2ant_init_coex_dm(btcoexist); +} + +void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + struct btc_bt_link_info* bt_link_info = &btcoexist->bt_link_info; + u8 *cli_buf = btcoexist->cli_buf; + u8 u8tmp[4], i, bt_info_ext, ps_tdma_case=0; + u32 u32tmp[4]; + bool roam = false, scan = false; + bool link = false, wifi_under_5g = false; + bool bt_hs_on = false, wifi_busy = false; + s32 wifi_rssi = 0, bt_hs_rssi = 0; + u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck; + u8 wifi_dot11_chnl, wifi_hs_chnl; + u32 fw_ver = 0, bt_patch_ver = 0; + u8 ap_num = 0; + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ============[BT Coexist info]============"); + CL_PRINTF(cli_buf); + + if (btcoexist->manual_control) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n ==========[Under Manual Control]============"); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n =========================================="); + CL_PRINTF(cli_buf); + } + + if (!board_info->bt_exist) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!"); + CL_PRINTF(cli_buf); + return; + } + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", + "Ant PG number/ Ant mechanism:", + board_info->pg_ant_num, board_info->btdm_ant_num); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d", + "BT stack/ hci ext ver", + ((stack_info->profile_notified)? "Yes":"No"), + stack_info->hci_version); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)", + "CoexVer/ fw_ver/ PatchVer", + glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL, + &wifi_dot11_chnl); + btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)", + "Dot11 channel / HsChnl(HsMode)", + wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ", + "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0], + coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi); + btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d", + "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + "Wifi link/ roam/ scan", link, roam, scan); + CL_PRINTF(cli_buf); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw); + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + &wifi_traffic_dir); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ", + "Wifi status", (wifi_under_5g? "5G":"2.4G"), + ((BTC_WIFI_BW_LEGACY == wifi_bw)? "Legacy": + (((BTC_WIFI_BW_HT40 == wifi_bw)? "HT40":"HT20"))), + ((!wifi_busy)? "idle": + ((BTC_WIFI_TRAFFIC_TX ==wifi_traffic_dir)?\ + "uplink":"downlink"))); + CL_PRINTF(cli_buf); + + + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d", + "SCO/HID/PAN/A2DP", + bt_link_info->sco_exist, bt_link_info->hid_exist, + bt_link_info->pan_exist, bt_link_info->a2dp_exist); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO); + + bt_info_ext = coex_sta->bt_info_ext; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s", + "BT Info A2DP rate", + (bt_info_ext&BIT0)? "Basic rate":"EDR rate"); + CL_PRINTF(cli_buf); + + for (i=0; ibt_info_c2h_cnt[i]) { + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %02x %02x %02x " + "%02x %02x %02x %02x(%d)", + glbt_info_src_8723b_2ant[i], \ + coex_sta->bt_info_c2h[i][0], + coex_sta->bt_info_c2h[i][1], + coex_sta->bt_info_c2h[i][2], + coex_sta->bt_info_c2h[i][3], + coex_sta->bt_info_c2h[i][4], + coex_sta->bt_info_c2h[i][5], + coex_sta->bt_info_c2h[i][6], + coex_sta->bt_info_c2h_cnt[i]); + CL_PRINTF(cli_buf); + } + } + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s", + "PS state, IPS/LPS", + ((coex_sta->under_ips? "IPS ON":"IPS OFF")), + ((coex_sta->under_lps? "LPS ON":"LPS OFF"))); + CL_PRINTF(cli_buf); + btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD); + + /* Sw mechanism */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s", "============[Sw mechanism]============"); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ", + "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink, + coex_dm->cur_low_penalty_ra, coex_dm->limited_dig); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ", + "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]", + coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off, + coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl); + CL_PRINTF(cli_buf); + + /* Fw mechanism */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Fw mechanism]============"); + CL_PRINTF(cli_buf); + + ps_tdma_case = coex_dm->cur_ps_tdma; + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)", + "PS TDMA", coex_dm->ps_tdma_para[0], + coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2], + coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4], + ps_tdma_case, coex_dm->auto_tdma_adjust); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ", + "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr, + coex_dm->cur_ignore_wlan_act); + CL_PRINTF(cli_buf); + + /* Hw setting */ + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s", + "============[Hw setting]============"); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", + "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup); + CL_PRINTF(cli_buf); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0x778/0x880[29:25]", u8tmp[0], + (u32tmp[0]&0x3e000000) >> 25); + CL_PRINTF(cli_buf); + + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x948/ 0x67[5] / 0x765", + u32tmp[0], ((u8tmp[0]&0x20)>> 5), u8tmp[1]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]", + u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3); + CL_PRINTF(cli_buf); + + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40); + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c); + u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x38[11]/0x40/0x4c[24:23]/0x64[0]", + ((u8tmp[0] & 0x8)>>3), u8tmp[1], + ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x", + "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8); + u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0); + + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b); + u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c); + + fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) + + ((u32tmp[1]&0xffff0000) >> 16) + + (u32tmp[1] & 0xffff) + + (u32tmp[2] & 0xffff) + + ((u32tmp[3]&0xffff0000) >> 16) + + (u32tmp[3] & 0xffff) ; + fa_cck = (u8tmp[0] << 8) + u8tmp[1]; + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x", + "OFDM-CCA/OFDM-FA/CCK-FA", \ + u32tmp[0]&0xffff, fa_ofdm, fa_cck); + CL_PRINTF(cli_buf); + + u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0); + u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4); + u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8); + u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, + "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x", + "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)", \ + u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]); + CL_PRINTF(cli_buf); + + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "0x770(high-pri rx/tx)", + coex_sta->high_priority_rx, coex_sta->high_priority_tx); + CL_PRINTF(cli_buf); + CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", + "0x774(low-pri rx/tx)", coex_sta->low_priority_rx, + coex_sta->low_priority_tx); + CL_PRINTF(cli_buf); +#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) + halbtc8723b2ant_monitor_bt_ctr(btcoexist); +#endif + btcoexist->btc_disp_dbg_msg(btcoexist, + BTC_DBG_DISP_COEX_STATISTICS); +} + + +void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_IPS_ENTER == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS ENTER notify\n"); + coex_sta->under_ips = true; + halbtc8723b2ant_wifioff_hwcfg(btcoexist); + halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + halbtc8723b2ant_coex_alloff(btcoexist); + } else if (BTC_IPS_LEAVE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], IPS LEAVE notify\n"); + coex_sta->under_ips = false; + ex_halbtc8723b2ant_init_hwconfig(btcoexist); + halbtc8723b2ant_init_coex_dm(btcoexist); + halbtc8723b2ant_query_bt_info(btcoexist); + } +} + +void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_LPS_ENABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS ENABLE notify\n"); + coex_sta->under_lps = true; + } else if (BTC_LPS_DISABLE == type) { + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], LPS DISABLE notify\n"); + coex_sta->under_lps = false; + } +} + +void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_SCAN_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN START notify\n"); + else if (BTC_SCAN_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], SCAN FINISH notify\n"); +} + +void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type) +{ + if (BTC_ASSOCIATE_START == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT START notify\n"); + else if (BTC_ASSOCIATE_FINISH == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], CONNECT FINISH notify\n"); +} + +void ex_halbtc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type) +{ + u8 h2c_parameter[3] ={0}; + u32 wifi_bw; + u8 wifi_central_chnl; + + if (BTC_MEDIA_CONNECT == type) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA connect notify\n"); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], MEDIA disconnect notify\n"); + + /* only 2.4G we need to inform bt the chnl mask */ + btcoexist->btc_get(btcoexist, + BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl); + if ((BTC_MEDIA_CONNECT == type) && + (wifi_central_chnl <= 14)) { + h2c_parameter[0] = 0x1; + h2c_parameter[1] = wifi_central_chnl; + btcoexist->btc_get(btcoexist, + BTC_GET_U4_WIFI_BW, &wifi_bw); + if (BTC_WIFI_BW_HT40 == wifi_bw) + h2c_parameter[2] = 0x30; + else + h2c_parameter[2] = 0x20; + } + + coex_dm->wifi_chnl_info[0] = h2c_parameter[0]; + coex_dm->wifi_chnl_info[1] = h2c_parameter[1]; + coex_dm->wifi_chnl_info[2] = h2c_parameter[2]; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, + "[BTCoex], FW write 0x66=0x%x\n", + h2c_parameter[0] << 16 | h2c_parameter[1] << 8 | + h2c_parameter[2]); + + btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter); +} + +void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type) +{ + if (type == BTC_PACKET_DHCP) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], DHCP Packet notify\n"); +} + +void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length) +{ + u8 btInfo = 0; + u8 i, rsp_source = 0; + bool bt_busy = false, limited_dig = false; + bool wifi_connected = false; + + coex_sta->c2h_bt_info_req_sent = false; + + rsp_source = tmpbuf[0]&0xf; + if(rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX) + rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW; + coex_sta->bt_info_c2h_cnt[rsp_source]++; + + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "[BTCoex], Bt info[%d], length=%d, hex data=[", + rsp_source, length); + for (i = 0; i < length; i++) { + coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i]; + if (i == 1) + btInfo = tmpbuf[i]; + if (i == length-1) + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x]\n", tmpbuf[i]); + else + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, + "0x%02x, ", tmpbuf[i]); + } + + if (btcoexist->manual_control) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), " + "return for Manual CTRL<===\n"); + return; + } + + if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) { + coex_sta->bt_retry_cnt = /* [3:0]*/ + coex_sta->bt_info_c2h[rsp_source][2] & 0xf; + + coex_sta->bt_rssi = + coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10; + + coex_sta->bt_info_ext = + coex_sta->bt_info_c2h[rsp_source][4]; + + /* Here we need to resend some wifi info to BT + because bt is reset and loss of the info.*/ + if ((coex_sta->bt_info_ext & BIT1)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit1 check," + " send wifi BW&Chnl to BT!!\n"); + btcoexist->btc_get(btcoexist,BTC_GET_BL_WIFI_CONNECTED, + &wifi_connected); + if (wifi_connected) + ex_halbtc8723b2ant_media_status_notify( + btcoexist, + BTC_MEDIA_CONNECT); + else + ex_halbtc8723b2ant_media_status_notify( + btcoexist, + BTC_MEDIA_DISCONNECT); + } + + if ((coex_sta->bt_info_ext & BIT3)) { + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BT ext info bit3 check, " + "set BT NOT to ignore Wlan active!!\n"); + halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, + false); + } else { + /* BT already NOT ignore Wlan active, do nothing here.*/ + } +#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 0) + if ((coex_sta->bt_info_ext & BIT4)) { + /* BT auto report already enabled, do nothing*/ + } else { + halbtc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC, + true); + } +#endif + } + + /* check BIT2 first ==> check if bt is under inquiry or page scan*/ + if (btInfo & BT_INFO_8723B_2ANT_B_INQ_PAGE) + coex_sta->c2h_bt_inquiry_page = true; + else + coex_sta->c2h_bt_inquiry_page = false; + + /* set link exist status*/ + if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) { + coex_sta->bt_link_exist = false; + coex_sta->pan_exist = false; + coex_sta->a2dp_exist = false; + coex_sta->hid_exist = false; + coex_sta->sco_exist = false; + } else {// connection exists + coex_sta->bt_link_exist = true; + if (btInfo & BT_INFO_8723B_2ANT_B_FTP) + coex_sta->pan_exist = true; + else + coex_sta->pan_exist = false; + if (btInfo & BT_INFO_8723B_2ANT_B_A2DP) + coex_sta->a2dp_exist = true; + else + coex_sta->a2dp_exist = false; + if (btInfo & BT_INFO_8723B_2ANT_B_HID) + coex_sta->hid_exist = true; + else + coex_sta->hid_exist = false; + if (btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO) + coex_sta->sco_exist = true; + else + coex_sta->sco_exist = false; + } + + halbtc8723b2ant_update_bt_link_info(btcoexist); + + if (!(btInfo & BT_INFO_8723B_2ANT_B_CONNECTION)) { + coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), " + "BT Non-Connected idle!!!\n"); + /* connection exists but no busy */ + } else if (btInfo == BT_INFO_8723B_2ANT_B_CONNECTION) { + coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n"); + } else if ((btInfo & BT_INFO_8723B_2ANT_B_SCO_ESCO) || + (btInfo & BT_INFO_8723B_2ANT_B_SCO_BUSY)) { + coex_dm->bt_status = + BT_8723B_2ANT_BT_STATUS_SCO_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n"); + } else if (btInfo&BT_INFO_8723B_2ANT_B_ACL_BUSY) { + coex_dm->bt_status = + BT_8723B_2ANT_BT_STATUS_ACL_BUSY; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n"); + } else { + coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], BtInfoNotify(), " + "BT Non-Defined state!!!\n"); + } + + if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) || + (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) || + (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) { + bt_busy = true; + limited_dig = true; + } else { + bt_busy = false; + limited_dig = false; + } + + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy); + + coex_dm->limited_dig = limited_dig; + btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig); + + halbtc8723b2ant_run_coexist_mechanism(btcoexist); +} + +void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist) +{ + BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n"); + + halbtc8723b2ant_wifioff_hwcfg(btcoexist); + halbtc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true); + ex_halbtc8723b2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT); +} + +void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist) +{ + struct btc_board_info *board_info = &btcoexist->board_info; + struct btc_stack_info *stack_info = &btcoexist->stack_info; + static u8 dis_ver_info_cnt = 0; + u32 fw_ver = 0, bt_patch_ver = 0; + + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "[BTCoex], ==========================" + "Periodical===========================\n"); + + if (dis_ver_info_cnt <= 5) { + dis_ver_info_cnt += 1; + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], ****************************" + "************************************\n"); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], Ant PG Num/ Ant Mech/ " + "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num, + board_info->btdm_ant_num, board_info->btdm_ant_pos); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], BT stack/ hci ext ver = %s / %d\n", + ((stack_info->profile_notified)? "Yes":"No"), + stack_info->hci_version); + btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, + &bt_patch_ver); + btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], CoexVer/ fw_ver/ PatchVer = " + "%d_%x/ 0x%x/ 0x%x(%d)\n", + glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant, + fw_ver, bt_patch_ver, bt_patch_ver); + BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT, + "[BTCoex], *****************************" + "***********************************\n"); + } + +#if(BT_AUTO_REPORT_ONLY_8723B_2ANT == 0) + halbtc8723b2ant_query_bt_info(btcoexist); + halbtc8723b2ant_monitor_bt_ctr(btcoexist); + halbtc8723b2ant_monitor_bt_enable_disable(btcoexist); +#else + if (halbtc8723b2ant_is_wifi_status_changed(btcoexist) || + coex_dm->auto_tdma_adjust) + halbtc8723b2ant_run_coexist_mechanism(btcoexist); +#endif +} + + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtc8723b2ant.h @@ -0,0 +1,145 @@ +/************************************************************************ + * The following is for 8723B 2Ant BT Co-exist definition + ************************************************************************/ +#define BT_AUTO_REPORT_ONLY_8723B_2ANT 1 + + +#define BT_INFO_8723B_2ANT_B_FTP BIT7 +#define BT_INFO_8723B_2ANT_B_A2DP BIT6 +#define BT_INFO_8723B_2ANT_B_HID BIT5 +#define BT_INFO_8723B_2ANT_B_SCO_BUSY BIT4 +#define BT_INFO_8723B_2ANT_B_ACL_BUSY BIT3 +#define BT_INFO_8723B_2ANT_B_INQ_PAGE BIT2 +#define BT_INFO_8723B_2ANT_B_SCO_ESCO BIT1 +#define BT_INFO_8723B_2ANT_B_CONNECTION BIT0 + +#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT 2 + +typedef enum _BT_INFO_SRC_8723B_2ANT{ + BT_INFO_SRC_8723B_2ANT_WIFI_FW = 0x0, + BT_INFO_SRC_8723B_2ANT_BT_RSP = 0x1, + BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND = 0x2, + BT_INFO_SRC_8723B_2ANT_MAX +}BT_INFO_SRC_8723B_2ANT,*PBT_INFO_SRC_8723B_2ANT; + +typedef enum _BT_8723B_2ANT_BT_STATUS{ + BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE = 0x0, + BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE = 0x1, + BT_8723B_2ANT_BT_STATUS_INQ_PAGE = 0x2, + BT_8723B_2ANT_BT_STATUS_ACL_BUSY = 0x3, + BT_8723B_2ANT_BT_STATUS_SCO_BUSY = 0x4, + BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY = 0x5, + BT_8723B_2ANT_BT_STATUS_MAX +}BT_8723B_2ANT_BT_STATUS,*PBT_8723B_2ANT_BT_STATUS; + +typedef enum _BT_8723B_2ANT_COEX_ALGO{ + BT_8723B_2ANT_COEX_ALGO_UNDEFINED = 0x0, + BT_8723B_2ANT_COEX_ALGO_SCO = 0x1, + BT_8723B_2ANT_COEX_ALGO_HID = 0x2, + BT_8723B_2ANT_COEX_ALGO_A2DP = 0x3, + BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS = 0x4, + BT_8723B_2ANT_COEX_ALGO_PANEDR = 0x5, + BT_8723B_2ANT_COEX_ALGO_PANHS = 0x6, + BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP = 0x7, + BT_8723B_2ANT_COEX_ALGO_PANEDR_HID = 0x8, + BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR = 0x9, + BT_8723B_2ANT_COEX_ALGO_HID_A2DP = 0xa, + BT_8723B_2ANT_COEX_ALGO_MAX = 0xb, +}BT_8723B_2ANT_COEX_ALGO,*PBT_8723B_2ANT_COEX_ALGO; + +struct coex_dm_8723b_2ant{ + /* fw mechanism */ + bool pre_dec_bt_pwr; + bool cur_dec_bt_pwr; + u8 pre_fw_dac_swing_lvl; + u8 cur_fw_dac_swing_lvl; + bool cur_ignore_wlan_act; + bool pre_ignore_wlan_act; + u8 pre_ps_tdma; + u8 cur_ps_tdma; + u8 ps_tdma_para[5]; + u8 ps_tdma_du_adj_type; + bool reset_tdma_adjust; + bool auto_tdma_adjust; + bool pre_ps_tdma_on; + bool cur_ps_tdma_on; + bool pre_bt_auto_report; + bool cur_bt_auto_report; + + /* sw mechanism */ + bool pre_rf_rx_lpf_shrink; + bool cur_rf_rx_lpf_shrink; + u32 bt_rf0x1e_backup; + bool pre_low_penalty_ra; + bool cur_low_penalty_ra; + bool pre_dac_swing_on; + u32 pre_dac_swing_lvl; + bool cur_dac_swing_on; + u32 cur_dac_swing_lvl; + bool pre_adc_back_off; + bool cur_adc_back_off; + bool pre_agc_table_en; + bool cur_agc_table_en; + u32 pre_val0x6c0; + u32 cur_val0x6c0; + u32 pre_val0x6c4; + u32 cur_val0x6c4; + u32 pre_val0x6c8; + u32 cur_val0x6c8; + u8 pre_val0x6cc; + u8 cur_val0x6cc; + bool limited_dig; + + /* algorithm related */ + u8 pre_algorithm; + u8 cur_algorithm; + u8 bt_status; + u8 wifi_chnl_info[3]; + + bool need_recover_0x948; + u16 backup_0x948; +}; + +struct coex_sta_8723b_2ant{ + bool bt_link_exist; + bool sco_exist; + bool a2dp_exist; + bool hid_exist; + bool pan_exist; + + bool under_lps; + bool under_ips; + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 bt_rssi; + u8 pre_bt_rssi_state; + u8 pre_wifi_rssi_state[4]; + bool c2h_bt_info_req_sent; + u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10]; + u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX]; + bool c2h_bt_inquiry_page; + u8 bt_retry_cnt; + u8 bt_info_ext; +}; + +/********************************************************************* + * The following is interface which will notify coex module. + *********************************************************************/ +void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist); +void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist); +void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type); +void ex_halbtc8723b2ant_media_status_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist, + u8 type); +void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmpbuf, u8 length); +void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist); +void ex_halbtc8723b2ant_periodical(struct btc_coexist * btcoexist); +void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist); + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.c @@ -0,0 +1,1181 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * + ******************************************************************************/ + +#include "halbt_precomp.h" + +/*#if(BT_30_SUPPORT == 1)*/ +#if 1 +/*********************************************** + * Global variables + ***********************************************/ +const char *const bt_profile_string[]={ + "NONE", + "A2DP", + "PAN", + "HID", + "SCO", +}; + +const char *const bt_spec_string[]={ + "1.0b", + "1.1", + "1.2", + "2.0+EDR", + "2.1+EDR", + "3.0+HS", + "4.0", +}; + +const char *const bt_link_role_string[]={ + "Master", + "Slave", +}; + +const char *const h2c_state_string[]={ + "successful", + "h2c busy", + "rf off", + "fw not read", +}; + +const char *const io_state_string[]={ + "IO_STATUS_SUCCESS", + "IO_STATUS_FAIL_CANNOT_IO", + "IO_STATUS_FAIL_RF_OFF", + "IO_STATUS_FAIL_FW_READ_CLEAR_TIMEOUT", + "IO_STATUS_FAIL_WAIT_IO_EVENT_TIMEOUT", + "IO_STATUS_INVALID_LEN", + "IO_STATUS_IO_IDLE_QUEUE_EMPTY", + "IO_STATUS_IO_INSERT_WAIT_QUEUE_FAIL", + "IO_STATUS_UNKNOWN_FAIL", + "IO_STATUS_WRONG_LEVEL", + "IO_STATUS_H2C_STOPPED", +}; + +struct btc_coexist gl_bt_coexist; + +u32 btc_dbg_type[BTC_MSG_MAX]; +u8 btc_dbg_buf[100]; + +/*************************************************** + * Debug related function + ***************************************************/ +bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist) +{ + if (!btcoexist->binded || NULL == btcoexist->adapter) + return false; + + return true; +} + +bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv) +{ + + if (rtlpriv->link_info.b_busytraffic) + return true; + else + return false; +} + + +void halbtc_dbg_init(void) +{ + u8 i; + + for (i = 0; i < BTC_MSG_MAX; i++) + btc_dbg_type[i] = 0; + + btc_dbg_type[BTC_MSG_INTERFACE] = \ +// INTF_INIT | +// INTF_NOTIFY | + 0; + + btc_dbg_type[BTC_MSG_ALGORITHM] = \ +// ALGO_BT_RSSI_STATE | +// ALGO_WIFI_RSSI_STATE | +// ALGO_BT_MONITOR | +// ALGO_TRACE | +// ALGO_TRACE_FW | +// ALGO_TRACE_FW_DETAIL | +// ALGO_TRACE_FW_EXEC | +// ALGO_TRACE_SW | +// ALGO_TRACE_SW_DETAIL | +// ALGO_TRACE_SW_EXEC | + 0; +} + +bool halbtc_is_hw_mailbox_exist(struct btc_coexist *btcoexist) +{ + return true; +} + +bool halbtc_is_bt40(struct rtl_priv *adapter) +{ + struct rtl_priv *rtlpriv = adapter; + struct rtl_phy *rtlphy = &(rtlpriv->phy); + bool is_ht40 = true; + enum ht_channel_width bw = rtlphy->current_chan_bw; + + + if (bw == HT_CHANNEL_WIDTH_20) + is_ht40 = false; + else if (bw == HT_CHANNEL_WIDTH_20_40) + is_ht40 = true; + + return is_ht40; +} + +bool halbtc_legacy(struct rtl_priv *adapter) +{ + struct rtl_priv *rtlpriv = adapter; + struct rtl_mac *mac = rtl_mac(rtlpriv); + + bool is_legacy = false; + + if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B)) + is_legacy = true; + + return is_legacy; +} + +bool halbtc_is_wifi_uplink(struct rtl_priv *adapter) +{ + struct rtl_priv *rtlpriv = adapter; + + if (rtlpriv->link_info.b_tx_busy_traffic) + return true; + else + return false; +} + +u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = + (struct rtl_priv *)btcoexist->adapter; + u32 wifi_bw = BTC_WIFI_BW_HT20; + + if (halbtc_is_bt40(rtlpriv)){ + wifi_bw = BTC_WIFI_BW_HT40; + } else { + if(halbtc_legacy(rtlpriv)) + wifi_bw = BTC_WIFI_BW_LEGACY; + else + wifi_bw = BTC_WIFI_BW_HT20; + } + return wifi_bw; +} + +u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 chnl = 1; + + + if (rtlphy->current_channel != 0) + chnl = rtlphy->current_channel; + BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, + "halbtc_get_wifi_central_chnl:%d\n",chnl); + return chnl; +} + +void halbtc_leave_lps(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv; + struct rtl_ps_ctl *ppsc; + bool ap_enable = false; + + rtlpriv = btcoexist->adapter; + ppsc = rtl_psc(rtlpriv); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + + if (ap_enable) { + printk("halbtc_leave_lps()<--dont leave lps under AP mode\n"); + return; + } + + btcoexist->bt_info.bt_ctrl_lps = true; + btcoexist->bt_info.bt_lps_on = false; +} + +void halbtc_enter_lps(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv; + struct rtl_ps_ctl *ppsc; + bool ap_enable = false; + + rtlpriv = btcoexist->adapter; + ppsc = rtl_psc(rtlpriv); + + btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, + &ap_enable); + + if (ap_enable) { + printk("halbtc_enter_lps()<--dont enter lps under AP mode\n"); + return; + } + + btcoexist->bt_info.bt_ctrl_lps = true; + btcoexist->bt_info.bt_lps_on = false; +} + +void halbtc_normal_lps(struct btc_coexist *btcoexist) +{ + if (btcoexist->bt_info.bt_ctrl_lps) { + btcoexist->bt_info.bt_lps_on = false; + btcoexist->bt_info.bt_ctrl_lps = false; + } + +} + +void halbtc_leave_low_power(void) +{ +} + +void halbtc_nomal_low_power(void) +{ +} + +void halbtc_disable_low_power(void) +{ +} + +void halbtc_aggregation_check(void) +{ +} + + +u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist) +{ + return 0; +} + +s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter) +{ + struct rtl_priv *rtlpriv = adapter; + s32 undecorated_smoothed_pwdb = 0; + + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) + undecorated_smoothed_pwdb = + rtlpriv->dm.undecorated_smoothed_pwdb; + else /* associated entry pwdb */ + undecorated_smoothed_pwdb = + rtlpriv->dm.undecorated_smoothed_pwdb; + return undecorated_smoothed_pwdb; +} + +bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist; + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + bool *bool_tmp = (bool*)out_buf; + int *s32_tmp = (int*)out_buf; + u32 *u32_tmp = (u32*)out_buf; + u8 *u8_tmp = (u8*)out_buf; + bool tmp = false; + + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return false; + + + switch (get_type){ + case BTC_GET_BL_HS_OPERATION: + *bool_tmp = false; + break; + case BTC_GET_BL_HS_CONNECTING: + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_CONNECTED: + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) + tmp = true; + + *bool_tmp = tmp; + break; + case BTC_GET_BL_WIFI_BUSY: + if(halbtc_is_wifi_busy(rtlpriv)) + *bool_tmp = true; + else + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_SCAN: + if (mac->act_scanning == true) + *bool_tmp = true; + else + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_LINK: + if (mac->link_state == MAC80211_LINKING) + *bool_tmp = true; + else + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_ROAM: /*TODO*/ + if (mac->link_state == MAC80211_LINKING) + *bool_tmp = true; + else + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_4_WAY_PROGRESS: /*TODO*/ + *bool_tmp = false; + + break; + case BTC_GET_BL_WIFI_UNDER_5G: + *bool_tmp = false; /*TODO*/ + + case BTC_GET_BL_WIFI_DHCP: /*TODO*/ + break; + case BTC_GET_BL_WIFI_SOFTAP_IDLE: + *bool_tmp = true; + break; + case BTC_GET_BL_WIFI_SOFTAP_LINKING: + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND: + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_AP_MODE_ENABLE: + *bool_tmp = false; + break; + case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION: + if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm) + *bool_tmp = false; + else + *bool_tmp = true; + break; + case BTC_GET_BL_WIFI_UNDER_B_MODE: + *bool_tmp = false; /*TODO*/ + break; + case BTC_GET_BL_EXT_SWITCH: + *bool_tmp = false; + break; + case BTC_GET_S4_WIFI_RSSI: + *s32_tmp = halbtc_get_wifi_rssi(rtlpriv); + break; + case BTC_GET_S4_HS_RSSI: /*TODO*/ + *s32_tmp = halbtc_get_wifi_rssi(rtlpriv); + break; + case BTC_GET_U4_WIFI_BW: + *u32_tmp = halbtc_get_wifi_bw(btcoexist); + break; + case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION: + if (halbtc_is_wifi_uplink(rtlpriv)) + *u32_tmp = BTC_WIFI_TRAFFIC_TX; + else + *u32_tmp = BTC_WIFI_TRAFFIC_RX; + break; + case BTC_GET_U4_WIFI_FW_VER: + *u32_tmp = rtlhal->fw_version; + break; + case BTC_GET_U4_BT_PATCH_VER: + *u32_tmp = halbtc_get_bt_patch_version(btcoexist); + break; + case BTC_GET_U1_WIFI_DOT11_CHNL: + *u8_tmp = rtlphy->current_channel; + break; + case BTC_GET_U1_WIFI_CENTRAL_CHNL: + *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist); + break; + case BTC_GET_U1_WIFI_HS_CHNL: + *u8_tmp = 1;/* BT_OperateChnl(rtlpriv); */ + break; + case BTC_GET_U1_MAC_PHY_MODE: + *u8_tmp = BTC_MP_UNKNOWN; + break; + case BTC_GET_U1_AP_NUM: + /* driver don't know AP num in Linux, + * So, the return value here is not right */ + *u8_tmp = 1;/* pDefMgntInfo->NumBssDesc4Query; */ + break; + + /************* 1Ant **************/ + case BTC_GET_U1_LPS_MODE: + *u8_tmp = btcoexist->pwr_mode_val[0]; + break; + + default: + break; + } + + return true; +} + +bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist; + bool *bool_tmp = (bool *)in_buf; + u8 *u8_tmp = (u8 *)in_buf; + u32 *u32_tmp = (u32 *)in_buf; + + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return false; + + switch (set_type) { + /* set some bool type variables. */ + case BTC_SET_BL_BT_DISABLE: + btcoexist->bt_info.bt_disabled = *bool_tmp; + break; + case BTC_SET_BL_BT_TRAFFIC_BUSY: + btcoexist->bt_info.bt_busy = *bool_tmp; + break; + case BTC_SET_BL_BT_LIMITED_DIG: + btcoexist->bt_info.limited_dig = *bool_tmp; + break; + case BTC_SET_BL_FORCE_TO_ROAM: + btcoexist->bt_info.force_to_roam = *bool_tmp; + break; + case BTC_SET_BL_TO_REJ_AP_AGG_PKT: + btcoexist->bt_info.reject_agg_pkt = *bool_tmp; + break; + case BTC_SET_BL_BT_CTRL_AGG_SIZE: + btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp; + break; + case BTC_SET_BL_INC_SCAN_DEV_NUM: + btcoexist->bt_info.increase_scan_dev_num = *bool_tmp; + break; + /* set some u1Byte type variables. */ + case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON: + btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp; + break; + case BTC_SET_U1_AGG_BUF_SIZE: + btcoexist->bt_info.agg_buf_size = *u8_tmp; + break; + /* the following are some action which will be triggered */ + case BTC_SET_ACT_GET_BT_RSSI: + /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/ + break; + case BTC_SET_ACT_AGGREGATE_CTRL: + halbtc_aggregation_check(); + break; + + /* 1Ant */ + case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE: + btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp; + break; + case BTC_SET_UI_SCAN_SIG_COMPENSATION: + /* rtlpriv->mlmepriv.scan_compensation = *u8_tmp; */ + break; + case BTC_SET_U1_1ANT_LPS: + btcoexist->bt_info.lps_1ant = *u8_tmp; + break; + case BTC_SET_U1_1ANT_RPWM: + btcoexist->bt_info.rpwm_1ant = *u8_tmp; + break; + /* the following are some action which will be triggered */ + case BTC_SET_ACT_LEAVE_LPS: + halbtc_leave_lps(btcoexist); + break; + case BTC_SET_ACT_ENTER_LPS: + halbtc_enter_lps(btcoexist); + break; + case BTC_SET_ACT_NORMAL_LPS: + halbtc_normal_lps(btcoexist); + break; + case BTC_SET_ACT_DISABLE_LOW_POWER: + halbtc_disable_low_power(); + break; + case BTC_SET_ACT_UPDATE_ra_mask: + btcoexist->bt_info.ra_mask = *u32_tmp; + break; + case BTC_SET_ACT_SEND_MIMO_PS: + break; + case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT: + btcoexist->bt_info.force_exec_pwr_cmd_cnt++; + break; + case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/ + break; + case BTC_SET_ACT_CTRL_BT_COEX: + break; + default: + break; + } + + return true; +} + +void halbtc_display_coex_statistics(struct btc_coexist *btcoexist) +{ +} + +void halbtc_display_bt_link_info(struct btc_coexist *btcoexist) +{ +} + +void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist) +{ +} + +void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist) +{ +} + +/************************************************************ + * IO related function + ************************************************************/ +u8 halbtc_read_1byte(void *bt_context, u32 reg_addr) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_read_byte(rtlpriv, reg_addr); +} + + +u16 halbtc_read_2byte(void *bt_context, u32 reg_addr) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_read_word(rtlpriv, reg_addr); +} + + +u32 halbtc_read_4byte(void *bt_context, u32 reg_addr) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_read_dword(rtlpriv, reg_addr); +} + + +void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_write_byte(rtlpriv, reg_addr, data); +} + +void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr, + u8 bit_mask, u8 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 original_value, bit_shift = 0; + u8 i; + + if (bit_mask != MASKDWORD) {/*if not "double word" write*/ + original_value = rtl_read_byte(rtlpriv, reg_addr); + for (i=0; i<=7; i++) { + if((bit_mask>>i)&0x1) + break; + } + bit_shift = i; + data = (original_value & (~bit_mask)) | + ((data << bit_shift) & bit_mask); + } + rtl_write_byte(rtlpriv, reg_addr, data); +} + + +void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_write_word(rtlpriv, reg_addr, data); +} + + +void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data) +{ + struct btc_coexist *btcoexist = + (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_write_dword(rtlpriv, reg_addr, data); +} + + +void halbtc_set_macreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data); +} + + +u32 halbtc_get_macreg(void *bt_context, u32 reg_addr, u32 bit_mask) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask); +} + + +void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask, u32 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data); +} + + +u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_get_bbreg(rtlpriv->mac80211.hw,reg_addr, bit_mask); +} + + +void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr, + u32 bit_mask, u32 data) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data); +} + + +u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr, u32 bit_mask) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask); +} + + +void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id, + u32 cmd_len, u8 *cmd_buf) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + struct rtl_priv *rtlpriv = btcoexist->adapter; + + rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id, + cmd_len, cmd_buf); +} + +void halbtc_display_dbg_msg(void *bt_context, u8 disp_type) +{ + struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context; + switch (disp_type) { + case BTC_DBG_DISP_COEX_STATISTICS: + halbtc_display_coex_statistics(btcoexist); + break; + case BTC_DBG_DISP_BT_LINK_INFO: + halbtc_display_bt_link_info(btcoexist); + break; + case BTC_DBG_DISP_BT_FW_VER: + halbtc_display_bt_fw_info(btcoexist); + break; + case BTC_DBG_DISP_FW_PWR_MODE_CMD: + halbtc_display_fw_pwr_mode_cmd(btcoexist); + break; + default: + break; + } +} + +bool halbtc_under_ips(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + enum rf_pwrstate rtstate; + + if (ppsc->b_inactiveps) { + rtstate = ppsc->rfpwr_state; + + if (rtstate != ERFON && + ppsc->rfoff_reason == RF_CHANGE_BY_IPS) { + + return true; + } + } + + return false; +} + +/***************************************************************** + * Extern functions called by other module + *****************************************************************/ +bool exhalbtc_initlize_variables(struct rtl_priv *adapter) +{ + struct btc_coexist *btcoexist = &gl_bt_coexist; + + btcoexist->statistics.cnt_bind++; + + halbtc_dbg_init(); + + if (btcoexist->binded) + return false; + else + btcoexist->binded = true; + +#if ( defined(CONFIG_PCI_HCI)) + btcoexist->chip_interface = BTC_INTF_PCI; +#elif ( defined(CONFIG_USB_HCI)) + btcoexist->chip_interface = BTC_INTF_USB; +#elif ( defined(CONFIG_SDIO_HCI)) + btcoexist->chip_interface = BTC_INTF_SDIO; +#elif ( defined(CONFIG_GSPI_HCI)) + btcoexist->chip_interface = BTC_INTF_GSPI; +#else + btcoexist->chip_interface = BTC_INTF_UNKNOWN; +#endif + + if (NULL == btcoexist->adapter) + btcoexist->adapter = adapter; + + btcoexist->stack_info.profile_notified = false; + + btcoexist->btc_read_1byte = halbtc_read_1byte; + btcoexist->btc_write_1byte = halbtc_write_1byte; + btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte; + btcoexist->btc_read_2byte = halbtc_read_2byte; + btcoexist->btc_write_2byte = halbtc_write_2byte; + btcoexist->btc_read_4byte = halbtc_read_4byte; + btcoexist->btc_write_4byte = halbtc_write_4byte; + + btcoexist->btc_set_bb_reg = halbtc_set_bbreg; + btcoexist->btc_get_bb_reg = halbtc_get_bbreg; + + btcoexist->btc_set_rf_reg = halbtc_set_rfreg; + btcoexist->btc_get_rf_reg = halbtc_get_rfreg; + + btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd; + btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg; + + btcoexist->btc_get = halbtc_get; + btcoexist->btc_set = halbtc_set; + + btcoexist->cli_buf = &btc_dbg_buf[0]; + + btcoexist->bt_info.b_bt_ctrl_buf_size = false; + btcoexist->bt_info.agg_buf_size = 5; + + btcoexist->bt_info.increase_scan_dev_num = false; + return true; +} + +void exhalbtc_init_hw_config(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->statistics.cnt_init_hw_config++; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_init_hwconfig(btcoexist); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_init_hwconfig(btcoexist); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_init_hwconfig(btcoexist); + } + +} + +void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->statistics.cnt_init_coex_dm++; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_init_coex_dm(btcoexist); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_init_coex_dm(btcoexist); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_init_coex_dm(btcoexist); + } + + btcoexist->initilized = true; +} + +void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 ips_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_ips_notify++; + if (btcoexist->manual_control) + return; + + if (ERFOFF == type) + ips_type = BTC_IPS_ENTER; + else + ips_type = BTC_IPS_LEAVE; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_ips_notify(btcoexist, ips_type); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_ips_notify(btcoexist, ips_type); + } + + halbtc_nomal_low_power(); +} + +void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 lps_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_lps_notify++; + if (btcoexist->manual_control) + return; + + if (EACTIVE == type) + lps_type = BTC_LPS_DISABLE; + else + lps_type = BTC_LPS_ENABLE; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_lps_notify(btcoexist, lps_type); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_lps_notify(btcoexist, lps_type); + } +} + +void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 scan_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_scan_notify++; + if (btcoexist->manual_control) + return; + + if (type) + scan_type = BTC_SCAN_START; + else + scan_type = BTC_SCAN_FINISH; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_scan_notify(btcoexist, scan_type); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_scan_notify(btcoexist, scan_type); + } + + halbtc_nomal_low_power(); +} + +void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 asso_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_connect_notify++; + if (btcoexist->manual_control) + return; + + if (action) + asso_type = BTC_ASSOCIATE_START; + else + asso_type = BTC_ASSOCIATE_FINISH; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_connect_notify(btcoexist, asso_type); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_connect_notify(btcoexist, asso_type); + } + + halbtc_nomal_low_power(); +} + +void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, + enum rt_media_status media_status) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 status; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_media_status_notify++; + if (btcoexist->manual_control) + return; + + if (RT_MEDIA_CONNECT == media_status) + status = BTC_MEDIA_CONNECT; + else + status = BTC_MEDIA_DISCONNECT; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_media_status_notify(btcoexist, status); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_media_status_notify(btcoexist, status); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_media_status_notify(btcoexist, status); + } + + halbtc_nomal_low_power(); +} + +void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type) +{ + u8 packet_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_special_packet_notify++; + if (btcoexist->manual_control) + return; + + /*if(PACKET_DHCP == pkt_type)*/ + packet_type = BTC_PACKET_DHCP; + /*else if(PACKET_EAPOL == pkt_type) + packet_type = BTC_PACKET_EAPOL; + else + packet_type = BTC_PACKET_UNKNOWN;*/ + + halbtc_leave_low_power(); + + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_special_packet_notify(btcoexist, + packet_type); + else if (btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_special_packet_notify(btcoexist, + packet_type); + + halbtc_nomal_low_power(); +} + +void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, + u8 *tmp_buf, u8 length) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_bt_info_notify++; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_bt_info_notify(btcoexist, tmp_buf, length); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + // ex_halbtc8192e2ant_bt_info_notify(btcoexist, tmp_buf, length); + } +} + +void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type) +{ + u8 stack_op_type; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_stack_operation_notify++; + if (btcoexist->manual_control) + return; + + stack_op_type = BTC_STACK_OP_NONE; +} + +void exhalbtc_halt_notify(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_halt_notify(btcoexist); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_halt_notify(btcoexist); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_halt_notify(btcoexist); + } +} + +void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) +{ + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; +} + +void exhalbtc_periodical(struct btc_coexist *btcoexist) +{ + struct rtl_priv *rtlpriv = btcoexist->adapter; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_periodical++; + + halbtc_leave_low_power(); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_periodical(btcoexist); + else if(btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_periodical(btcoexist); + } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) { + ex_halbtc8192e2ant_periodical(btcoexist); + } + + halbtc_nomal_low_power(); +} + +void exhalbtc_dbg_control(struct btc_coexist *btcoexist, + u8 code, u8 len, u8 *data) +{ + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + btcoexist->statistics.cnt_dbg_ctrl++; +} + +void exhalbtc_stack_update_profile_info() +{ +} + +void exhalbtc_update_min_bt_rssi(char bt_rssi) +{ + struct btc_coexist *btcoexist = &gl_bt_coexist; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->stack_info.min_bt_rssi = bt_rssi; +} + + +void exhalbtc_set_hci_version(u16 hci_version) +{ + struct btc_coexist *btcoexist = &gl_bt_coexist; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->stack_info.hci_version = hci_version; +} + +void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version) +{ + struct btc_coexist *btcoexist = &gl_bt_coexist; + + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + btcoexist->bt_info.bt_real_fw_ver = bt_patch_version; + btcoexist->bt_info.bt_hci_ver = bt_hci_version; +} + +void exhalbtc_set_bt_exist(bool bt_exist) +{ + gl_bt_coexist.board_info.bt_exist = bt_exist; +} + +void exhalbtc_set_chip_type(u8 chip_type) +{ + switch (chip_type) { + default: + case BT_2WIRE: + case BT_ISSC_3WIRE: + case BT_ACCEL: + case BT_RTL8756: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF; + break; + case BT_CSR_BC4: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4; + break; + case BT_CSR_BC8: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8; + break; + case BT_RTL8723A: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A; + break; + case BT_RTL8821A: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821; + break; + case BT_RTL8723B: + gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B; + break; + } +} + +void exhalbtc_set_ant_num(u8 type, u8 ant_num) +{ + if (BT_COEX_ANT_TYPE_PG == type) { + gl_bt_coexist.board_info.pg_ant_num = ant_num; + gl_bt_coexist.board_info.btdm_ant_num = ant_num; + } else if (BT_COEX_ANT_TYPE_ANTDIV == type) { + gl_bt_coexist.board_info.btdm_ant_num = ant_num; + } +} + +void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist) +{ + if (!halbtc_is_bt_coexist_available(btcoexist)) + return; + + if (btcoexist->board_info.btdm_ant_num == 2) + ex_halbtc8723b2ant_display_coex_info(btcoexist); + else if (btcoexist->board_info.btdm_ant_num == 1) + ex_halbtc8723b1ant_display_coex_info(btcoexist); +} + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/halbtcoutsrc.h @@ -0,0 +1,549 @@ +#ifndef __HALBTC_OUT_SRC_H__ +#define __HALBTC_OUT_SRC_H__ + +#include "../wifi.h" + +#define NORMAL_EXEC false +#define FORCE_EXEC true + +#define BTC_RF_A RF90_PATH_A +#define BTC_RF_B RF90_PATH_B +#define BTC_RF_C RF90_PATH_C +#define BTC_RF_D RF90_PATH_D + +#define BTC_SMSP SINGLEMAC_SINGLEPHY +#define BTC_DMDP DUALMAC_DUALPHY +#define BTC_DMSP DUALMAC_SINGLEPHY +#define BTC_MP_UNKNOWN 0xff + +#define IN +#define OUT + +#define BT_TMP_BUF_SIZE 100 + +#define BT_COEX_ANT_TYPE_PG 0 +#define BT_COEX_ANT_TYPE_ANTDIV 1 +#define BT_COEX_ANT_TYPE_DETECTED 2 + +#define BTC_MIMO_PS_STATIC 0 +#define BTC_MIMO_PS_DYNAMIC 1 + +#define BTC_RATE_DISABLE 0 +#define BTC_RATE_ENABLE 1 + +/* single Antenna definition */ +#define BTC_ANT_PATH_WIFI 0 +#define BTC_ANT_PATH_BT 1 +#define BTC_ANT_PATH_PTA 2 +/* dual Antenna definition */ +#define BTC_ANT_WIFI_AT_MAIN 0 +#define BTC_ANT_WIFI_AT_AUX 1 +/* coupler Antenna definition */ +#define BTC_ANT_WIFI_AT_CPL_MAIN 0 +#define BTC_ANT_WIFI_AT_CPL_AUX 1 + +enum btc_chip_interface{ + BTC_INTF_UNKNOWN = 0, + BTC_INTF_PCI = 1, + BTC_INTF_USB = 2, + BTC_INTF_SDIO = 3, + BTC_INTF_GSPI = 4, + BTC_INTF_MAX +}; + +enum btc_chip_type{ + BTC_CHIP_UNDEF = 0, + BTC_CHIP_CSR_BC4 = 1, + BTC_CHIP_CSR_BC8 = 2, + BTC_CHIP_RTL8723A = 3, + BTC_CHIP_RTL8821 = 4, + BTC_CHIP_RTL8723B = 5, + BTC_CHIP_MAX +}; + +enum btc_msg_type{ + BTC_MSG_INTERFACE = 0x0, + BTC_MSG_ALGORITHM = 0x1, + BTC_MSG_MAX +}; + +extern u32 btc_dbg_type[]; + +/* following is for BTC_MSG_INTERFACE */ +#define INTF_INIT BIT0 +#define INTF_NOTIFY BIT2 + +/* following is for BTC_ALGORITHM */ +#define ALGO_BT_RSSI_STATE BIT0 +#define ALGO_WIFI_RSSI_STATE BIT1 +#define ALGO_BT_MONITOR BIT2 +#define ALGO_TRACE BIT3 +#define ALGO_TRACE_FW BIT4 +#define ALGO_TRACE_FW_DETAIL BIT5 +#define ALGO_TRACE_FW_EXEC BIT6 +#define ALGO_TRACE_SW BIT7 +#define ALGO_TRACE_SW_DETAIL BIT8 +#define ALGO_TRACE_SW_EXEC BIT9 + + + +#define CL_SPRINTF snprintf +#define CL_PRINTF printk + +#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \ + do { \ + if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\ + printk(printstr, ##__VA_ARGS__); \ + } \ + } while(0) + +#define BTC_PRINT_F(dbgtype, dbgflag, printstr, ...) \ + do { \ + if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\ + printk(KERN_DEBUG "%s: ", __func__); \ + printk(printstr, ##__VA_ARGS__); \ + } \ + } while(0) + +#define BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr) \ + do { \ + if(unlikely(btc_dbg_type[dbgtype] & dbgflag)) { \ + int __i; \ + u8* __ptr = (u8*)_Ptr; \ + printk printstr; \ + for( __i = 0; __i < 6; __i++ ) \ + printk("%02X%s", __ptr[__i], (__i==5)?"":"-");\ + printk(KERN_DEBUG "\n"); \ + }\ + } while(0) + +#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \ + do { \ + if(unlikely(btc_dbg_type[dbgtype] & dbgflag) ) { \ + int __i; \ + u8 *__ptr = (u8*)_hexdata; \ + printk(_titlestring); \ + for( __i = 0; __i < (int)_hexdatalen; __i++ ) { \ + printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \ + == 0)?" ":" ");\ + if (((__i + 1) % 16) == 0) \ + printk("\n"); \ + } \ + printk(KERN_DEBUG "\n"); \ + } \ + } while(0) + + +#define BTC_RSSI_HIGH(_rssi_) \ + ((_rssi_==BTC_RSSI_STATE_HIGH || _rssi_==BTC_RSSI_STATE_STAY_HIGH) ? \ + true : false) + +#define BTC_RSSI_MEDIUM(_rssi_) \ + ((_rssi_==BTC_RSSI_STATE_MEDIUM || _rssi_==BTC_RSSI_STATE_STAY_MEDIUM) \ + ? true : false) + +#define BTC_RSSI_LOW(_rssi_) \ + ((_rssi_==BTC_RSSI_STATE_LOW || _rssi_==BTC_RSSI_STATE_STAY_LOW) ? \ + true : false) + + +enum btc_power_save_type { + BTC_PS_WIFI_NATIVE = 0, + BTC_PS_LPS_ON = 1, + BTC_PS_LPS_OFF = 2, + BTC_PS_LPS_MAX +}; + +struct btc_board_info { + /* The following is some board information */ + u8 bt_chip_type; + u8 pg_ant_num; /* pg ant number */ + u8 btdm_ant_num; /* ant number for btdm */ + u8 btdm_ant_pos; + bool bt_exist; +}; + +enum btc_dbg_opcode{ + BTC_DBG_SET_COEX_NORMAL = 0x0, + BTC_DBG_SET_COEX_WIFI_ONLY = 0x1, + BTC_DBG_SET_COEX_BT_ONLY = 0x2, + BTC_DBG_MAX +}; + +enum btc_rssi_state{ + BTC_RSSI_STATE_HIGH = 0x0, + BTC_RSSI_STATE_MEDIUM = 0x1, + BTC_RSSI_STATE_LOW = 0x2, + BTC_RSSI_STATE_STAY_HIGH = 0x3, + BTC_RSSI_STATE_STAY_MEDIUM = 0x4, + BTC_RSSI_STATE_STAY_LOW = 0x5, + BTC_RSSI_MAX +}; + +enum btc_wifi_role{ + BTC_ROLE_STATION = 0x0, + BTC_ROLE_AP = 0x1, + BTC_ROLE_IBSS = 0x2, + BTC_ROLE_HS_MODE = 0x3, + BTC_ROLE_MAX +}; + +enum btc_wifi_bw_mode{ + BTC_WIFI_BW_LEGACY = 0x0, + BTC_WIFI_BW_HT20 = 0x1, + BTC_WIFI_BW_HT40 = 0x2, + BTC_WIFI_BW_MAX +}; + +enum btc_wifi_traffic_dir{ + BTC_WIFI_TRAFFIC_TX = 0x0, + BTC_WIFI_TRAFFIC_RX = 0x1, + BTC_WIFI_TRAFFIC_MAX +}; + +enum btc_wifi_pnp{ + BTC_WIFI_PNP_WAKE_UP = 0x0, + BTC_WIFI_PNP_SLEEP = 0x1, + BTC_WIFI_PNP_MAX +}; + + +enum btc_get_type{ + /* type bool */ + BTC_GET_BL_HS_OPERATION, + BTC_GET_BL_HS_CONNECTING, + BTC_GET_BL_WIFI_CONNECTED, + BTC_GET_BL_WIFI_BUSY, + BTC_GET_BL_WIFI_SCAN, + BTC_GET_BL_WIFI_LINK, + BTC_GET_BL_WIFI_DHCP, + BTC_GET_BL_WIFI_SOFTAP_IDLE, + BTC_GET_BL_WIFI_SOFTAP_LINKING, + BTC_GET_BL_WIFI_IN_EARLY_SUSPEND, + BTC_GET_BL_WIFI_ROAM, + BTC_GET_BL_WIFI_4_WAY_PROGRESS, + BTC_GET_BL_WIFI_UNDER_5G, + BTC_GET_BL_WIFI_AP_MODE_ENABLE, + BTC_GET_BL_WIFI_ENABLE_ENCRYPTION, + BTC_GET_BL_WIFI_UNDER_B_MODE, + BTC_GET_BL_EXT_SWITCH, + + /* type s4Byte */ + BTC_GET_S4_WIFI_RSSI, + BTC_GET_S4_HS_RSSI, + + /* type u32 */ + BTC_GET_U4_WIFI_BW, + BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, + BTC_GET_U4_WIFI_FW_VER, + BTC_GET_U4_BT_PATCH_VER, + + /* type u1Byte */ + BTC_GET_U1_WIFI_DOT11_CHNL, + BTC_GET_U1_WIFI_CENTRAL_CHNL, + BTC_GET_U1_WIFI_HS_CHNL, + BTC_GET_U1_MAC_PHY_MODE, + BTC_GET_U1_AP_NUM, + + /* for 1Ant */ + BTC_GET_U1_LPS_MODE, + BTC_GET_BL_BT_SCO_BUSY, + + /* for test mode */ + BTC_GET_DRIVER_TEST_CFG, +#if 0 + BTC_GET_U1_LPS, + BTC_GET_U1_RPWM, +#endif + BTC_GET_MAX +}; + + +enum btc_set_type{ + /* type bool */ + BTC_SET_BL_BT_DISABLE, + BTC_SET_BL_BT_TRAFFIC_BUSY, + BTC_SET_BL_BT_LIMITED_DIG, + BTC_SET_BL_FORCE_TO_ROAM, + BTC_SET_BL_TO_REJ_AP_AGG_PKT, + BTC_SET_BL_BT_CTRL_AGG_SIZE, + BTC_SET_BL_INC_SCAN_DEV_NUM, + + /* type u1Byte */ + BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON, + BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE, + BTC_SET_UI_SCAN_SIG_COMPENSATION, + BTC_SET_U1_AGG_BUF_SIZE, + + /* type trigger some action */ + BTC_SET_ACT_GET_BT_RSSI, + BTC_SET_ACT_AGGREGATE_CTRL, + + /********* for 1Ant **********/ + /* type bool */ + BTC_SET_BL_BT_SCO_BUSY, + /* type u1Byte */ + BTC_SET_U1_1ANT_LPS, + BTC_SET_U1_1ANT_RPWM, + /* type trigger some action */ + BTC_SET_ACT_LEAVE_LPS, + BTC_SET_ACT_ENTER_LPS, + BTC_SET_ACT_NORMAL_LPS, + BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT, + BTC_SET_ACT_DISABLE_LOW_POWER, + BTC_SET_ACT_UPDATE_ra_mask, + BTC_SET_ACT_SEND_MIMO_PS, + /* BT Coex related */ + BTC_SET_ACT_CTRL_BT_INFO, + BTC_SET_ACT_CTRL_BT_COEX, + /***************************/ + BTC_SET_MAX +}; + +enum btc_dbg_disp_type{ + BTC_DBG_DISP_COEX_STATISTICS = 0x0, + BTC_DBG_DISP_BT_LINK_INFO = 0x1, + BTC_DBG_DISP_BT_FW_VER = 0x2, + BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3, + BTC_DBG_DISP_MAX +}; + +enum btc_notify_type_ips{ + BTC_IPS_LEAVE = 0x0, + BTC_IPS_ENTER = 0x1, + BTC_IPS_MAX +}; + +enum btc_notify_type_lps{ + BTC_LPS_DISABLE = 0x0, + BTC_LPS_ENABLE = 0x1, + BTC_LPS_MAX +}; + +enum btc_notify_type_scan{ + BTC_SCAN_FINISH = 0x0, + BTC_SCAN_START = 0x1, + BTC_SCAN_MAX +}; + +enum btc_notify_type_associate{ + BTC_ASSOCIATE_FINISH = 0x0, + BTC_ASSOCIATE_START = 0x1, + BTC_ASSOCIATE_MAX +}; + +enum btc_notify_type_media_status{ + BTC_MEDIA_DISCONNECT = 0x0, + BTC_MEDIA_CONNECT = 0x1, + BTC_MEDIA_MAX +}; + +enum btc_notify_type_special_packet{ + BTC_PACKET_UNKNOWN = 0x0, + BTC_PACKET_DHCP = 0x1, + BTC_PACKET_ARP = 0x2, + BTC_PACKET_EAPOL = 0x3, + BTC_PACKET_MAX +}; + +enum btc_notify_type_stack_operation{ + BTC_STACK_OP_NONE = 0x0, + BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1, + BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2, + BTC_STACK_OP_MAX +}; + + +typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr); + +typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr); + +typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr); + +typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data); + +typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr, + u8 bit_mask, u8 data1b); + +typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data); + +typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data); + +typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr, + u8 bit_mask, u8 data); + +typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr, + u32 bit_mask, u32 data); + +typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr, + u32 bit_mask); + +typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr, + u32 bit_mask, u32 data); + +typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path, + u32 reg_addr, u32 bit_mask); + +typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id, + u32 cmd_len, u8 *cmd_buffer); + +typedef bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf); + +typedef bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf); + +typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type); + +struct btc_bt_info { + bool bt_disabled; + u8 rssi_adjust_for_agc_table_on; + u8 rssi_adjust_for_1ant_coex_type; + bool bt_busy; + u8 agg_buf_size; + bool limited_dig; + bool reject_agg_pkt; + bool b_bt_ctrl_buf_size; + bool increase_scan_dev_num; + u16 bt_hci_ver; + u16 bt_real_fw_ver; + u8 bt_fw_ver; + + /* the following is for 1Ant solution */ + bool bt_ctrl_lps; + bool bt_pwr_save_mode; + bool bt_lps_on; + bool force_to_roam; + u8 force_exec_pwr_cmd_cnt; + u8 lps_1ant; + u8 rpwm_1ant; + u32 ra_mask; +}; + +struct btc_stack_info { + bool profile_notified; + u16 hci_version; /* stack hci version */ + u8 num_of_link; + bool bt_link_exist; + bool sco_exist; + bool acl_exist; + bool a2dp_exist; + bool hid_exist; + u8 num_of_hid; + bool pan_exist; + bool unknown_acl_exist; + char min_bt_rssi; +}; + +struct btc_statistics { + u32 cnt_bind; + u32 cnt_init_hw_config; + u32 cnt_init_coex_dm; + u32 cnt_ips_notify; + u32 cnt_lps_notify; + u32 cnt_scan_notify; + u32 cnt_connect_notify; + u32 cnt_media_status_notify; + u32 cnt_special_packet_notify; + u32 cnt_bt_info_notify; + u32 cnt_periodical; + u32 cnt_stack_operation_notify; + u32 cnt_dbg_ctrl; +}; + +struct btc_bt_link_info { + bool bt_link_exist; + bool sco_exist; + bool sco_only; + bool a2dp_exist; + bool a2dp_only; + bool hid_exist; + bool hid_only; + bool pan_exist; + bool pan_only; +}; + +enum btc_antenna_pos { + BTC_ANTENNA_AT_MAIN_PORT = 0x1, + BTC_ANTENNA_AT_AUX_PORT = 0x2, +}; + +struct btc_coexist { + /* make sure only one adapter can bind the data context */ + bool binded; + /* default adapter */ + void *adapter; + struct btc_board_info board_info; + /* some bt info referenced by non-bt module */ + struct btc_bt_info bt_info; + struct btc_stack_info stack_info; + enum btc_chip_interface chip_interface; + struct btc_bt_link_info bt_link_info; + + bool initilized; + bool stop_coex_dm; + bool manual_control; + u8 *cli_buf; + struct btc_statistics statistics; + u8 pwr_mode_val[10]; + + /* function pointers + * io related */ + bfp_btc_r1 btc_read_1byte; + bfp_btc_w1 btc_write_1byte; + bfp_btc_w1_bit_mak btc_write_1byte_bitmask; + bfp_btc_r2 btc_read_2byte; + bfp_btc_w2 btc_write_2byte; + bfp_btc_r4 btc_read_4byte; + bfp_btc_w4 btc_write_4byte; + + bfp_btc_set_bb_reg btc_set_bb_reg; + bfp_btc_get_bb_reg btc_get_bb_reg; + + + bfp_btc_set_rf_reg btc_set_rf_reg; + bfp_btc_get_rf_reg btc_get_rf_reg; + + + bfp_btc_fill_h2c btc_fill_h2c; + + bfp_btc_disp_dbg_msg btc_disp_dbg_msg; + + bfp_btc_get btc_get; + bfp_btc_set btc_set; +}; + +bool halbtc_is_wifi_uplink(struct rtl_priv *adapter); + + +extern struct btc_coexist gl_bt_coexist; + +bool exhalbtc_initlize_variables(struct rtl_priv* adapter); +void exhalbtc_init_hw_config(struct btc_coexist *btcoexist); +void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist); +void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action); +void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, + enum rt_media_status media_status); +void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type); +void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, + u8 length); +void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type); +void exhalbtc_halt_notify(struct btc_coexist *btcoexist); +void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); +void exhalbtc_periodical(struct btc_coexist *btcoexist); +void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len, + u8 *data); +void exhalbtc_stack_update_profile_info(void); +void exhalbtc_set_hci_version(u16 hci_version); +void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version); +void exhalbtc_update_min_bt_rssi(char bt_rssi); +void exhalbtc_set_bt_exist(bool bt_exist); +void exhalbtc_set_chip_type(u8 chip_type); +void exhalbtc_set_ant_num(u8 type, u8 ant_num); +void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist); +void exhalbtc_signal_compensation(struct btc_coexist *btcoexist, + u8 *rssi_wifi, u8 *rssi_bt); +void exhalbtc_lps_leave(struct btc_coexist *btcoexist); +void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/rtl_btc.c @@ -0,0 +1,236 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +#include +#include + +#include "rtl_btc.h" +#include "halbt_precomp.h" + +struct rtl_btc_ops rtl_btc_operation ={ + .btc_init_variables = rtl_btc_init_variables, + .btc_init_hal_vars = rtl_btc_init_hal_vars, + .btc_init_hw_config = rtl_btc_init_hw_config, + .btc_ips_notify = rtl_btc_ips_notify, + .btc_scan_notify = rtl_btc_scan_notify, + .btc_connect_notify = rtl_btc_connect_notify, + .btc_mediastatus_notify = rtl_btc_mediastatus_notify, + .btc_periodical = rtl_btc_periodical, + .btc_halt_notify = rtl_btc_halt_notify, + .btc_btinfo_notify = rtl_btc_btinfo_notify, + .btc_is_limited_dig = rtl_btc_is_limited_dig, + .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo, + .btc_is_bt_disabled = rtl_btc_is_bt_disabled, +}; + + +void rtl_btc_init_variables(struct rtl_priv *rtlpriv) +{ + + exhalbtc_initlize_variables(rtlpriv); +} + +void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv) +{ + u8 ant_num; + u8 bt_exist; + u8 bt_type; + ant_num = rtl_get_hwpg_ant_num(rtlpriv); + RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, antNum is %d\n", __func__, ant_num)); + + bt_exist = rtl_get_hwpg_bt_exist(rtlpriv); + RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_exist is %d\n", __func__, bt_exist)); + exhalbtc_set_bt_exist(bt_exist); + + bt_type = rtl_get_hwpg_bt_type(rtlpriv); + RT_TRACE(COMP_INIT, DBG_DMESG, ("%s, bt_type is %d\n", __func__, bt_type)); + exhalbtc_set_chip_type(bt_type); + + exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num); + +} + + +void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv) +{ + exhalbtc_init_hw_config(&gl_bt_coexist); + exhalbtc_init_coex_dm(&gl_bt_coexist); +} + + +void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type) +{ + exhalbtc_ips_notify(&gl_bt_coexist, type); +} + + +void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype) +{ + exhalbtc_scan_notify(&gl_bt_coexist, scantype); +} + + +void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action) +{ + exhalbtc_connect_notify(&gl_bt_coexist, action); +} + + +void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus) +{ + exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus); +} + +void rtl_btc_periodical(struct rtl_priv *rtlpriv) +{ +// rtl_bt_dm_monitor(); + exhalbtc_periodical(&gl_bt_coexist); +} + +void rtl_btc_halt_notify(void) +{ + exhalbtc_halt_notify(&gl_bt_coexist); +} + +void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmp_buf, u8 length) +{ + exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length); +} + +bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv) +{ + return gl_bt_coexist.bt_info.limited_dig; +} + +bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv) +{ + bool bt_change_edca = false; + u32 cur_edca_val; + u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b; + u32 edca_hs; + u32 edca_addr = 0x504; + + cur_edca_val = rtl_read_dword(rtlpriv, edca_addr); + if (halbtc_is_wifi_uplink(rtlpriv)){ + if (cur_edca_val != edca_bt_hs_uplink){ + edca_hs = edca_bt_hs_uplink; + bt_change_edca = true; + } + }else{ + if (cur_edca_val != edca_bt_hs_downlink){ + edca_hs = edca_bt_hs_downlink; + bt_change_edca = true; + } + } + + if(bt_change_edca) + rtl_write_dword(rtlpriv, edca_addr, edca_hs); + + return true; +} + +bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv) +{ + if (gl_bt_coexist.bt_info.bt_disabled) + return true; + else + return false; +} + +struct rtl_btc_ops *rtl_btc_get_ops_pointer(void) +{ + return &rtl_btc_operation; +} +//EXPORT_SYMBOL(rtl_btc_get_ops_pointer); + +u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) +{ + u8 num; + + if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) + num = 2; + else + num = 1; + + return num; +} + +#if 0 +enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + enum rt_media_status m_status = RT_MEDIA_DISCONNECT; + + u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; + + if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + m_status = RT_MEDIA_CONNECT; + } + + return m_status; +} +#endif + +u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv) +{ + return rtlpriv->btcoexist.btc_info.btcoexist; +} + +u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv) +{ + return rtlpriv->btcoexist.btc_info.bt_type; +} + + +#if 0 + +MODULE_AUTHOR("Page He "); +MODULE_AUTHOR("Realtek WlanFAE "); +MODULE_AUTHOR("Larry Finger "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); + +static int __init rtl_btcoexist_module_init(void) +{ + + //printk("%s, rtlpriv->btc_ops.btc_init_variables addr is %p\n", __func__, rtlpriv->btc_ops.btc_init_variables); + + return 0; +} + +static void __exit rtl_btcoexist_module_exit(void) +{ + return; +} + +module_init(rtl_btcoexist_module_init); +module_exit(rtl_btcoexist_module_exit); + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h +++ linux-3.13.0/drivers/staging/rtl8821ae/btcoexist/rtl_btc.h @@ -0,0 +1,66 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_BTC_H__ +#define __RTL_BTC_H__ + +#include "halbt_precomp.h" + + + +void rtl_btc_init_variables(struct rtl_priv *rtlpriv); +void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv); +void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv); +void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type); +void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype); +void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action); +void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, enum rt_media_status mstatus); +void rtl_btc_periodical(struct rtl_priv *rtlpriv); +void rtl_btc_halt_notify(void); +void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 * tmpBuf, u8 length); +bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv); +bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv); +bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv); + + +//extern struct rtl_btc_ops rtl_btc_operation; +extern struct rtl_btc_ops *rtl_btc_get_ops_pointer(void); + +u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv); +u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv); +u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv); +//enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw); + + + + + + + + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/cam.c +++ linux-3.13.0/drivers/staging/rtl8821ae/cam.c @@ -0,0 +1,354 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +#include "wifi.h" +#include "cam.h" +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +#include +#endif + +void rtl_cam_reset_sec_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->sec.use_defaultkey = false; + rtlpriv->sec.pairwise_enc_algorithm = NO_ENCRYPTION; + rtlpriv->sec.group_enc_algorithm = NO_ENCRYPTION; + memset(rtlpriv->sec.key_buf, 0, KEY_BUF_SIZE * MAX_KEY_LEN); + memset(rtlpriv->sec.key_len, 0, KEY_BUF_SIZE); + rtlpriv->sec.pairwise_key = NULL; +} + +static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no, + u8 *mac_addr, u8 *key_cont_128, u16 us_config) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + u32 target_command; + u32 target_content = 0; + u8 entry_i; + + RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :", + key_cont_128, 16); + + for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { + target_command = entry_i + CAM_CONTENT_COUNT * entry_no; + target_command = target_command | BIT(31) | BIT(16); + + if (entry_i == 0) { + target_content = (u32) (*(mac_addr + 0)) << 16 | + (u32) (*(mac_addr + 1)) << 24 | (u32) us_config; + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], + target_content); + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], + target_command); + + RT_TRACE(COMP_SEC, DBG_LOUD, + ("WRITE %x: %x \n", + rtlpriv->cfg->maps[WCAMI], target_content)); + RT_TRACE(COMP_SEC, DBG_LOUD, + ("The Key ID is %d\n", entry_no)); + RT_TRACE(COMP_SEC, DBG_LOUD, + ("WRITE %x: %x \n", + rtlpriv->cfg->maps[RWCAM], target_command)); + + } else if (entry_i == 1) { + + target_content = (u32) (*(mac_addr + 5)) << 24 | + (u32) (*(mac_addr + 4)) << 16 | + (u32) (*(mac_addr + 3)) << 8 | + (u32) (*(mac_addr + 2)); + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], + target_content); + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], + target_command); + + RT_TRACE(COMP_SEC, DBG_LOUD, + ("WRITE A4: %x \n", target_content)); + RT_TRACE(COMP_SEC, DBG_LOUD, + ("WRITE A0: %x \n", target_command)); + + } else { + + target_content = + (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 3)) << + 24 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 2)) + << 16 | + (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 1)) << 8 + | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 0)); + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], + target_content); + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], + target_command); + udelay(100); + + RT_TRACE(COMP_SEC, DBG_LOUD, + ("WRITE A4: %x \n", target_content)); + RT_TRACE(COMP_SEC, DBG_LOUD, + ("WRITE A0: %x \n", target_command)); + } + } + + RT_TRACE(COMP_SEC, DBG_LOUD, + ("after set key, usconfig:%x\n", us_config)); +} + +u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, + u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, + u32 ul_default_key, u8 *key_content) +{ + u32 us_config; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_SEC, DBG_DMESG, + ("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, " + "ulUseDK=%x MacAddr %pM\n", + ul_entry_idx, ul_key_id, ul_enc_alg, + ul_default_key, mac_addr)); + + if (ul_key_id == TOTAL_CAM_ENTRY) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("ulKeyId exceed!\n")); + return 0; + } + + if (ul_default_key == 1) { + us_config = CFG_VALID | ((u16) (ul_enc_alg) << 2); + } else { + us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id; + } + + rtl_cam_program_entry(hw, ul_entry_idx, mac_addr, + (u8 *) key_content, us_config); + + RT_TRACE(COMP_SEC, DBG_DMESG, ("end \n")); + + return 1; + +} +//EXPORT_SYMBOL(rtl_cam_add_one_entry); + +int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, + u8 *mac_addr, u32 ul_key_id) +{ + u32 ul_command; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_SEC, DBG_DMESG, ("key_idx:%d\n", ul_key_id)); + + ul_command = ul_key_id * CAM_CONTENT_COUNT; + ul_command = ul_command | BIT(31) | BIT(16); + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0); + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command); + + RT_TRACE(COMP_SEC, DBG_DMESG, + ("rtl_cam_delete_one_entry(): WRITE A4: %x \n", 0)); + RT_TRACE(COMP_SEC, DBG_DMESG, + ("rtl_cam_delete_one_entry(): WRITE A0: %x \n", ul_command)); + + return 0; + +} +//EXPORT_SYMBOL(rtl_cam_delete_one_entry); + +void rtl_cam_reset_all_entry(struct ieee80211_hw *hw) +{ + u32 ul_command; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + ul_command = BIT(31) | BIT(30); + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command); +} +//EXPORT_SYMBOL(rtl_cam_reset_all_entry); + +void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + u32 ul_command; + u32 ul_content; + u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES]; + + switch (rtlpriv->sec.pairwise_enc_algorithm) { + case WEP40_ENCRYPTION: + ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP40]; + break; + case WEP104_ENCRYPTION: + ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP104]; + break; + case TKIP_ENCRYPTION: + ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_TKIP]; + break; + case AESCCMP_ENCRYPTION: + ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES]; + break; + default: + ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES]; + } + + ul_content = (uc_index & 3) | ((u16) (ul_enc_algo) << 2); + + ul_content |= BIT(15); + ul_command = CAM_CONTENT_COUNT * uc_index; + ul_command = ul_command | BIT(31) | BIT(16); + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content); + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command); + + RT_TRACE(COMP_SEC, DBG_DMESG, + ("rtl_cam_mark_invalid(): WRITE A4: %x \n", ul_content)); + RT_TRACE(COMP_SEC, DBG_DMESG, + ("rtl_cam_mark_invalid(): WRITE A0: %x \n", ul_command)); +} +//EXPORT_SYMBOL(rtl_cam_mark_invalid); + +void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + u32 ul_command; + u32 ul_content; + u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES]; + u8 entry_i; + + switch (rtlpriv->sec.pairwise_enc_algorithm) { + case WEP40_ENCRYPTION: + ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP40]; + break; + case WEP104_ENCRYPTION: + ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP104]; + break; + case TKIP_ENCRYPTION: + ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_TKIP]; + break; + case AESCCMP_ENCRYPTION: + ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES]; + break; + default: + ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES]; + } + + for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { + + if (entry_i == 0) { + ul_content = + (uc_index & 0x03) | ((u16) (ul_encalgo) << 2); + ul_content |= BIT(15); + + } else { + ul_content = 0; + } + + ul_command = CAM_CONTENT_COUNT * uc_index + entry_i; + ul_command = ul_command | BIT(31) | BIT(16); + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content); + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command); + + RT_TRACE(COMP_SEC, DBG_LOUD, + ("rtl_cam_empty_entry(): WRITE A4: %x \n", + ul_content)); + RT_TRACE(COMP_SEC, DBG_LOUD, + ("rtl_cam_empty_entry(): WRITE A0: %x \n", + ul_command)); + } + +} +//EXPORT_SYMBOL(rtl_cam_empty_entry); + +u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> 4; + u8 entry_idx = 0; + u8 i, *addr; + + if (NULL == sta_addr) { + RT_TRACE(COMP_SEC, DBG_EMERG, + ("sta_addr is NULL.\n")); + return TOTAL_CAM_ENTRY; + } + /* Does STA already exist? */ + for (i = 4; i < TOTAL_CAM_ENTRY; i++) { + addr = rtlpriv->sec.hwsec_cam_sta_addr[i]; + if(memcmp(addr, sta_addr, ETH_ALEN) == 0) + return i; + } + /* Get a free CAM entry. */ + for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) { + if ((bitmap & BIT(0)) == 0) { + RT_TRACE(COMP_SEC, DBG_EMERG, + ("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n", + rtlpriv->sec.hwsec_cam_bitmap, entry_idx)); + rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx; + memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx], + sta_addr, ETH_ALEN); + return entry_idx; + } + bitmap = bitmap >>1; + } + return TOTAL_CAM_ENTRY; +} +//EXPORT_SYMBOL(rtl_cam_get_free_entry); + +void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 bitmap; + u8 i, *addr; + + if (NULL == sta_addr) { + RT_TRACE(COMP_SEC, DBG_EMERG, + ("sta_addr is NULL.\n")); + } + + if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\ + sta_addr[4]|sta_addr[5]) == 0) { + RT_TRACE(COMP_SEC, DBG_EMERG, + ("sta_addr is 00:00:00:00:00:00.\n")); + return; + } + /* Does STA already exist? */ + for (i = 4; i < TOTAL_CAM_ENTRY; i++) { + addr = rtlpriv->sec.hwsec_cam_sta_addr[i]; + bitmap = (rtlpriv->sec.hwsec_cam_bitmap) >> i; + if (((bitmap & BIT(0)) == BIT(0)) && + (memcmp(addr, sta_addr, ETH_ALEN) == 0)) { + /* Remove from HW Security CAM */ + memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN); + rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i); + printk("&&&&&&&&&del entry %d\n",i); + } + } + return; +} +//EXPORT_SYMBOL(rtl_cam_del_entry); --- linux-3.13.0.orig/drivers/staging/rtl8821ae/cam.h +++ linux-3.13.0/drivers/staging/rtl8821ae/cam.h @@ -0,0 +1,56 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_CAM_H_ +#define __RTL_CAM_H_ + +#define CAM_CONTENT_COUNT 8 + +#define CFG_DEFAULT_KEY BIT(5) +#define CFG_VALID BIT(15) + +#define PAIRWISE_KEYIDX 0 +#define CAM_PAIRWISE_KEY_POSITION 4 + +#define CAM_CONFIG_USEDK 1 +#define CAM_CONFIG_NO_USEDK 0 + +extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw); +extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, + u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg, + u32 ul_default_key, u8 *key_content); +int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr, + u32 ul_key_id); +void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index); +void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index); +void rtl_cam_reset_sec_info(struct ieee80211_hw *hw); +u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr); +void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr); + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/compat.h +++ linux-3.13.0/drivers/staging/rtl8821ae/compat.h @@ -0,0 +1,125 @@ +#ifndef __RTL_COMPAT_H__ +#define __RTL_COMPAT_H__ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) +/* + * Use this if you want to use the same suspend and resume callbacks for suspend + * to RAM and hibernation. + */ +#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ +struct dev_pm_ops name = { \ + .suspend = suspend_fn, \ + .resume = resume_fn, \ + .freeze = suspend_fn, \ + .thaw = resume_fn, \ + .poweroff = suspend_fn, \ + .restore = resume_fn, \ +} + +#define compat_pci_suspend(fn) \ + int fn##_compat(struct pci_dev *pdev, pm_message_t state) \ + { \ + int r; \ + \ + r = fn(&pdev->dev); \ + if (r) \ + return r; \ + \ + pci_save_state(pdev); \ + pci_disable_device(pdev); \ + pci_set_power_state(pdev, PCI_D3hot); \ + \ + return 0; \ + } + +#define compat_pci_resume(fn) \ + int fn##_compat(struct pci_dev *pdev) \ + { \ + int r; \ + \ + pci_set_power_state(pdev, PCI_D0); \ + r = pci_enable_device(pdev); \ + if (r) \ + return r; \ + pci_restore_state(pdev); \ + \ + return fn(&pdev->dev); \ + } +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) +#define RX_FLAG_MACTIME_MPDU RX_FLAG_TSFT +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +#define RX_FLAG_MACTIME_MPDU RX_FLAG_MACTIME_START +#else +#endif +//#define NETDEV_TX_OK +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) +#define IEEE80211_KEY_FLAG_SW_MGMT IEEE80211_KEY_FLAG_SW_MGMT_TX +#endif + +struct ieee80211_mgmt_compat { + __le16 frame_control; + __le16 duration; + u8 da[6]; + u8 sa[6]; + u8 bssid[6]; + __le16 seq_ctrl; + union { + struct { + u8 category; + union { + struct { + u8 action_code; + u8 dialog_token; + u8 status_code; + u8 variable[0]; + } __attribute__ ((packed)) wme_action; + struct{ + u8 action_code; + u8 dialog_token; + __le16 capab; + __le16 timeout; + __le16 start_seq_num; + } __attribute__((packed)) addba_req; + struct{ + u8 action_code; + u8 dialog_token; + __le16 status; + __le16 capab; + __le16 timeout; + } __attribute__((packed)) addba_resp; + struct{ + u8 action_code; + __le16 params; + __le16 reason_code; + } __attribute__((packed)) delba; + struct{ + u8 action_code; + /* capab_info for open and confirm, + * reason for close + */ + __le16 aux; + /* Followed in plink_confirm by status + * code, AID and supported rates, + * and directly by supported rates in + * plink_open and plink_close + */ + u8 variable[0]; + } __attribute__((packed)) plink_action; + struct{ + u8 action_code; + u8 variable[0]; + } __attribute__((packed)) mesh_action; + struct { + u8 action; + u8 smps_control; + } __attribute__ ((packed)) ht_smps; + } u; + } __attribute__ ((packed)) action; + } u; +} __attribute__ ((packed)); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/core.c +++ linux-3.13.0/drivers/staging/rtl8821ae/core.c @@ -0,0 +1,1464 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "wifi.h" +#include "core.h" +#include "cam.h" +#include "base.h" +#include "ps.h" + +#include "btcoexist/rtl_btc.h" + +/*mutex for start & stop is must here. */ +static int rtl_op_start(struct ieee80211_hw *hw) +{ + int err = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (!is_hal_stop(rtlhal)) + return 0; + if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) + return 0; + mutex_lock(&rtlpriv->locks.conf_mutex); + err = rtlpriv->intf_ops->adapter_start(hw); + if (err) + goto out; + rtl_watch_dog_timer_callback((unsigned long)hw); + +out: + mutex_unlock(&rtlpriv->locks.conf_mutex); + return err; +} + +static void rtl_op_stop(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + if (is_hal_stop(rtlhal)) + return; + + /* here is must, because adhoc do stop and start, + * but stop with RFOFF may cause something wrong, + * like adhoc TP */ + if (unlikely(ppsc->rfpwr_state == ERFOFF)) + rtl_ips_nic_on(hw); + + mutex_lock(&rtlpriv->locks.conf_mutex); + + mac->link_state = MAC80211_NOLINK; + memset(mac->bssid, 0, 6); + mac->vendor = PEER_UNKNOWN; + + /*reset sec info */ + rtl_cam_reset_sec_info(hw); + + rtl_deinit_deferred_work(hw); + rtlpriv->intf_ops->adapter_stop(hw); + + mutex_unlock(&rtlpriv->locks.conf_mutex); +} + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) +static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_tcb_desc tcb_desc; + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + + if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON)) + goto err_free; + + if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) + goto err_free; + + if (!rtlpriv->intf_ops->waitq_insert(hw, skb)) + rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); + + return NETDEV_TX_OK; + +err_free: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} +#else +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) +static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +#else +/**/ +static void rtl_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +/**/ +#endif +/**/ +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_tcb_desc tcb_desc; + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + + if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON)) + goto err_free; + + if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) + goto err_free; + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + if (!rtlpriv->intf_ops->waitq_insert(hw, skb)) + rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); +#else +/**/ + if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb)) + rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc); +/**/ +#endif +/**/ + return; + +err_free: + dev_kfree_skb_any(skb); + return; +} +/**/ +#endif +/**/ + +static int rtl_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + int err = 0; + + if (mac->vif) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("vif has been set!! mac->vif = 0x%p\n", mac->vif)); + return -EOPNOTSUPP; + } + +/*This flag is not defined before kernel 3.4*/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; +#endif + + rtl_ips_nic_on(hw); + + mutex_lock(&rtlpriv->locks.conf_mutex); +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_P2P_CLIENT: + mac->p2p = P2P_ROLE_CLIENT; + /*fall through*/ +#else +/**/ + switch (vif->type) { +/**/ +#endif +/**/ + case NL80211_IFTYPE_STATION: + if (mac->beacon_enabled == 1) { + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("NL80211_IFTYPE_STATION \n")); + mac->beacon_enabled = 0; + rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, + rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]); + } + break; + case NL80211_IFTYPE_ADHOC: + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("NL80211_IFTYPE_ADHOC \n")); + + mac->link_state = MAC80211_LINKED; + rtlpriv->cfg->ops->set_bcn_reg(hw); + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) + mac->basic_rates = 0xfff; + else + mac->basic_rates = 0xff0; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, + (u8 *) (&mac->basic_rates)); + + break; +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + case NL80211_IFTYPE_P2P_GO: + mac->p2p = P2P_ROLE_GO; + /*fall through*/ +#endif +/**/ + case NL80211_IFTYPE_AP: + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("NL80211_IFTYPE_AP \n")); + + mac->link_state = MAC80211_LINKED; + rtlpriv->cfg->ops->set_bcn_reg(hw); + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) + mac->basic_rates = 0xfff; + else + mac->basic_rates = 0xff0; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, + (u8 *) (&mac->basic_rates)); + break; + case NL80211_IFTYPE_MESH_POINT: + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("NL80211_IFTYPE_MESH_POINT \n")); + + mac->link_state = MAC80211_LINKED; + rtlpriv->cfg->ops->set_bcn_reg(hw); + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) + mac->basic_rates = 0xfff; + else + mac->basic_rates = 0xff0; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, + (u8 *) (&mac->basic_rates)); + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("operation mode %d is not support!\n", vif->type)); + err = -EOPNOTSUPP; + goto out; + } + +#ifdef VIF_TODO + if (!rtl_set_vif_info(hw, vif)) + goto out; +#endif + + if (mac->p2p) { + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("p2p role %x \n",vif->type)); + mac->basic_rates = 0xff0;/*disable cck rate for p2p*/ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, + (u8 *) (&mac->basic_rates)); + } + mac->vif = vif; + mac->opmode = vif->type; + rtlpriv->cfg->ops->set_network_type(hw, vif->type); + memcpy(mac->mac_addr, vif->addr, ETH_ALEN); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); + +out: + mutex_unlock(&rtlpriv->locks.conf_mutex); + return err; +} + +static void rtl_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + mutex_lock(&rtlpriv->locks.conf_mutex); + + /* Free beacon resources */ + if ((vif->type == NL80211_IFTYPE_AP) || + (vif->type == NL80211_IFTYPE_ADHOC) || + (vif->type == NL80211_IFTYPE_MESH_POINT)) { + if (mac->beacon_enabled == 1) { + mac->beacon_enabled = 0; + rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, + rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]); + } + } + + /* + *Note: We assume NL80211_IFTYPE_UNSPECIFIED as + *NO LINK for our hardware. + */ + mac->p2p = 0; + mac->vif = NULL; + mac->link_state = MAC80211_NOLINK; + memset(mac->bssid, 0, 6); + mac->vendor = PEER_UNKNOWN; + mac->opmode = NL80211_IFTYPE_UNSPECIFIED; + rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); + + mutex_unlock(&rtlpriv->locks.conf_mutex); +} +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +/**/ +static int rtl_op_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype new_type, bool p2p) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int ret; + rtl_op_remove_interface(hw, vif); + + vif->type = new_type; + vif->p2p = p2p; + ret = rtl_op_add_interface(hw, vif); + RT_TRACE(COMP_MAC80211, DBG_LOUD, + (" p2p %x\n",p2p)); + return ret; +} +/**/ +#endif +/**/ +static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct ieee80211_conf *conf = &hw->conf; + + if (mac->skip_scan) + return 1; + + + mutex_lock(&rtlpriv->locks.conf_mutex); + if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2) */ + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n")); + } + + /*For IPS */ + if (changed & IEEE80211_CONF_CHANGE_IDLE) { + if (hw->conf.flags & IEEE80211_CONF_IDLE) + rtl_ips_nic_off(hw); + else + rtl_ips_nic_on(hw); + } else { + /* + *although rfoff may not cause by ips, but we will + *check the reason in set_rf_power_state function + */ + if (unlikely(ppsc->rfpwr_state == ERFOFF)) + rtl_ips_nic_on(hw); + } + + /*For LPS */ + if (changed & IEEE80211_CONF_CHANGE_PS) { + cancel_delayed_work(&rtlpriv->works.ps_work); + cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); + if (conf->flags & IEEE80211_CONF_PS) { + rtlpriv->psc.sw_ps_enabled = true; + /* sleep here is must, or we may recv the beacon and + * cause mac80211 into wrong ps state, this will cause + * power save nullfunc send fail, and further cause + * pkt loss, So sleep must quickly but not immediatly + * because that will cause nullfunc send by mac80211 + * fail, and cause pkt loss, we have tested that 5mA + * is worked very well */ + if (!rtlpriv->psc.multi_buffered) + queue_delayed_work(rtlpriv->works.rtl_wq, + &rtlpriv->works.ps_work, + MSECS(5)); + } else { + rtl_swlps_rf_awake(hw); + rtlpriv->psc.sw_ps_enabled = false; + } + } + + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n", + hw->conf.long_frame_max_tx_count)); + mac->retry_long = hw->conf.long_frame_max_tx_count; + mac->retry_short = hw->conf.long_frame_max_tx_count; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, + (u8 *) (&hw->conf.long_frame_max_tx_count)); + } + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + struct ieee80211_channel *channel = hw->conf.chandef.chan; + enum nl80211_channel_type channel_type = + cfg80211_get_chandef_type(&(hw->conf.chandef)); +#else + struct ieee80211_channel *channel = hw->conf.channel; + enum nl80211_channel_type channel_type = hw->conf.channel_type; +#endif + u8 wide_chan = (u8) channel->hw_value; + + if (mac->act_scanning) + mac->n_channels++; + + if (rtlpriv->dm.supp_phymode_switch && + mac->link_state < MAC80211_LINKED && + !mac->act_scanning) { + if (rtlpriv->cfg->ops->check_switch_to_dmdp) + rtlpriv->cfg->ops->check_switch_to_dmdp(hw); + } + + /* + *because we should back channel to + *current_network.chan in in scanning, + *So if set_chan == current_network.chan + *we should set it. + *because mac80211 tell us wrong bw40 + *info for cisco1253 bw20, so we modify + *it here based on UPPER & LOWER + */ + switch (channel_type) { + case NL80211_CHAN_HT20: + case NL80211_CHAN_NO_HT: + /* SC */ + mac->cur_40_prime_sc = + PRIME_CHNL_OFFSET_DONT_CARE; + rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20; + mac->bw_40 = false; + break; + case NL80211_CHAN_HT40MINUS: + /* SC */ + mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER; + rtlphy->current_chan_bw = + HT_CHANNEL_WIDTH_20_40; + mac->bw_40 = true; + + /*wide channel */ + wide_chan -= 2; + + break; + case NL80211_CHAN_HT40PLUS: + /* SC */ + mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER; + rtlphy->current_chan_bw = + HT_CHANNEL_WIDTH_20_40; + mac->bw_40 = true; + + /*wide channel */ + wide_chan += 2; + + break; + default: + mac->bw_40 = false; + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not processed \n")); + break; + } + + if (wide_chan <= 0) + wide_chan = 1; + + /* in scanning, when before we offchannel we may send a ps=1 + * null to AP, and then we may send a ps = 0 null to AP quickly, + * but first null have cause AP's put lots of packet to hw tx + * buffer, these packet must be tx before off channel so we must + * delay more time to let AP flush these packets before + * offchannel, or dis-association or delete BA will happen by AP + */ + if (rtlpriv->mac80211.offchan_deley) { + rtlpriv->mac80211.offchan_deley = false; + mdelay(50); + } + + rtlphy->current_channel = wide_chan; + + rtlpriv->cfg->ops->switch_channel(hw); + rtlpriv->cfg->ops->set_channel_access(hw); + rtlpriv->cfg->ops->set_bw_mode(hw, + channel_type); + } + + mutex_unlock(&rtlpriv->locks.conf_mutex); + + return 0; +} + +static void rtl_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *new_flags, u64 multicast) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + *new_flags &= RTL_SUPPORTED_FILTERS; + if (0 == changed_flags) + return; + + /*TODO: we disable broadcase now, so enable here */ + if (changed_flags & FIF_ALLMULTI) { + if (*new_flags & FIF_ALLMULTI) { + mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] | + rtlpriv->cfg->maps[MAC_RCR_AB]; + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("Enable receive multicast frame.\n")); + } else { + mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] | + rtlpriv->cfg->maps[MAC_RCR_AB]); + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("Disable receive multicast frame.\n")); + } + } + + if (changed_flags & FIF_FCSFAIL) { + if (*new_flags & FIF_FCSFAIL) { + mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32]; + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("Enable receive FCS error frame.\n")); + } else { + mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32]; + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("Disable receive FCS error frame.\n")); + } + } + + /* if ssid not set to hw don't check bssid + * here just used for linked scanning, & linked + * and nolink check bssid is set in set network_type */ + if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) && + (mac->link_state >= MAC80211_LINKED)) { + if (mac->opmode != NL80211_IFTYPE_AP && + mac->opmode != NL80211_IFTYPE_MESH_POINT) { + if (*new_flags & FIF_BCN_PRBRESP_PROMISC) { + rtlpriv->cfg->ops->set_chk_bssid(hw, false); + } else { + rtlpriv->cfg->ops->set_chk_bssid(hw, true); + } + } + } + + if (changed_flags & FIF_CONTROL) { + if (*new_flags & FIF_CONTROL) { + mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF]; + + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("Enable receive control frame.\n")); + } else { + mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF]; + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("Disable receive control frame.\n")); + } + } + + if (changed_flags & FIF_OTHER_BSS) { + if (*new_flags & FIF_OTHER_BSS) { + mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP]; + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("Enable receive other BSS's frame.\n")); + } else { + mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP]; + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("Disable receive other BSS's frame.\n")); + } + } +} +static int rtl_op_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal= rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_sta_info *sta_entry; + + if (sta) { + sta_entry = (struct rtl_sta_info *) sta->drv_priv; + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_add_tail(&sta_entry->list, &rtlpriv->entry_list); + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + sta_entry->wireless_mode = WIRELESS_MODE_G; + if (sta->supp_rates[0] <= 0xf) + sta_entry->wireless_mode = WIRELESS_MODE_B; + if (sta->ht_cap.ht_supported == true) + sta_entry->wireless_mode = WIRELESS_MODE_N_24G; + + if (vif->type == NL80211_IFTYPE_ADHOC) + sta_entry->wireless_mode = WIRELESS_MODE_G; + } else if (rtlhal->current_bandtype == BAND_ON_5G) { + sta_entry->wireless_mode = WIRELESS_MODE_A; + if (sta->ht_cap.ht_supported == true) + sta_entry->wireless_mode = WIRELESS_MODE_N_24G; + + if (vif->type == NL80211_IFTYPE_ADHOC) + sta_entry->wireless_mode = WIRELESS_MODE_A; + } + /*disable cck rate for p2p*/ + if (mac->p2p) + sta->supp_rates[0] &= 0xfffffff0; + + memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN); + RT_TRACE(COMP_MAC80211, DBG_DMESG, + ("Add sta addr is %pM\n",sta->addr)); + rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); + } + + return 0; +} + +static int rtl_op_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_sta_info *sta_entry; + if (sta) { + RT_TRACE(COMP_MAC80211, DBG_DMESG, + ("Remove sta addr is %pM\n",sta->addr)); + sta_entry = (struct rtl_sta_info *) sta->drv_priv; + sta_entry->wireless_mode = 0; + sta_entry->ratr_index = 0; + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_del(&sta_entry->list); + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + } + return 0; +} +static int _rtl_get_hal_qnum(u16 queue) +{ + int qnum; + + switch (queue) { + case 0: + qnum = AC3_VO; + break; + case 1: + qnum = AC2_VI; + break; + case 2: + qnum = AC0_BE; + break; + case 3: + qnum = AC1_BK; + break; + default: + qnum = AC0_BE; + break; + } + return qnum; +} + +/* + *for mac80211 VO=0, VI=1, BE=2, BK=3 + *for rtl819x BE=0, BK=1, VI=2, VO=3 + */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +static int rtl_op_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *param) +#else +static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *param) +#endif +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + int aci; + + if (queue >= AC_MAX) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("queue number %d is incorrect!\n", queue)); + return -EINVAL; + } + + aci = _rtl_get_hal_qnum(queue); + mac->ac[aci].aifs = param->aifs; + mac->ac[aci].cw_min = param->cw_min; + mac->ac[aci].cw_max = param->cw_max; + mac->ac[aci].tx_op = param->txop; + memcpy(&mac->edca_param[aci], param, sizeof(*param)); + rtlpriv->cfg->ops->set_qos(hw, aci); + return 0; +} + +static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + mutex_lock(&rtlpriv->locks.conf_mutex); + if ((vif->type == NL80211_IFTYPE_ADHOC) || + (vif->type == NL80211_IFTYPE_AP) || + (vif->type == NL80211_IFTYPE_MESH_POINT)) { + if ((changed & BSS_CHANGED_BEACON) || + (changed & BSS_CHANGED_BEACON_ENABLED && + bss_conf->enable_beacon)) { + if (mac->beacon_enabled == 0) { + RT_TRACE(COMP_MAC80211, DBG_DMESG, + ("BSS_CHANGED_BEACON_ENABLED \n")); + + /*start hw beacon interrupt. */ + /*rtlpriv->cfg->ops->set_bcn_reg(hw); */ + mac->beacon_enabled = 1; + rtlpriv->cfg->ops->update_interrupt_mask(hw, + rtlpriv->cfg->maps + [RTL_IBSS_INT_MASKS], 0); + + if (rtlpriv->cfg->ops->linked_set_reg) + rtlpriv->cfg->ops->linked_set_reg(hw); + } + } + if ((changed & BSS_CHANGED_BEACON_ENABLED && + !bss_conf->enable_beacon)){ + if (mac->beacon_enabled == 1) { + RT_TRACE(COMP_MAC80211, DBG_DMESG, + ("ADHOC DISABLE BEACON\n")); + + mac->beacon_enabled = 0; + rtlpriv->cfg->ops->update_interrupt_mask(hw, 0, + rtlpriv->cfg->maps + [RTL_IBSS_INT_MASKS]); + } + } + if (changed & BSS_CHANGED_BEACON_INT) { + RT_TRACE(COMP_BEACON, DBG_TRACE, + ("BSS_CHANGED_BEACON_INT\n")); + mac->beacon_interval = bss_conf->beacon_int; + rtlpriv->cfg->ops->set_bcn_intv(hw); + } + } + + /*TODO: reference to enum ieee80211_bss_change */ + if (changed & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + struct ieee80211_sta *sta = NULL; + /* we should reset all sec info & cam + * before set cam after linked, we should not + * reset in disassoc, that will cause tkip->wep + * fail because some flag will be wrong */ + /* reset sec info */ + rtl_cam_reset_sec_info(hw); + /* reset cam to fix wep fail issue + * when change from wpa to wep */ + rtl_cam_reset_all_entry(hw); + + mac->link_state = MAC80211_LINKED; + mac->cnt_after_linked = 0; + mac->assoc_id = bss_conf->aid; + memcpy(mac->bssid, bss_conf->bssid, 6); + + if (rtlpriv->cfg->ops->linked_set_reg) + rtlpriv->cfg->ops->linked_set_reg(hw); + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid); + + if (vif->type == NL80211_IFTYPE_STATION && sta) + rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0); + RT_TRACE(COMP_EASY_CONCURRENT, DBG_LOUD, + ("send PS STATIC frame \n")); + if (rtlpriv->dm.supp_phymode_switch) { + if (sta->ht_cap.ht_supported) + rtl_send_smps_action(hw, sta, + IEEE80211_SMPS_STATIC); + } + rcu_read_unlock(); + + RT_TRACE(COMP_MAC80211, DBG_DMESG, + ("BSS_CHANGED_ASSOC\n")); + } else { + if (mac->link_state == MAC80211_LINKED) + rtl_lps_leave(hw); + if (ppsc->p2p_ps_info.p2p_ps_mode> P2P_PS_NONE) + rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); + mac->link_state = MAC80211_NOLINK; + memset(mac->bssid, 0, 6); + mac->vendor = PEER_UNKNOWN; + + if (rtlpriv->dm.supp_phymode_switch) { + if (rtlpriv->cfg->ops->check_switch_to_dmdp) + rtlpriv->cfg->ops->check_switch_to_dmdp(hw); + } + RT_TRACE(COMP_MAC80211, DBG_DMESG, + ("BSS_CHANGED_UN_ASSOC\n")); + } + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + RT_TRACE(COMP_MAC80211, DBG_TRACE, + ("BSS_CHANGED_ERP_CTS_PROT\n")); + mac->use_cts_protect = bss_conf->use_cts_prot; + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + RT_TRACE(COMP_MAC80211, DBG_LOUD, + ("BSS_CHANGED_ERP_PREAMBLE use short preamble:%x \n", + bss_conf->use_short_preamble)); + + mac->short_preamble = bss_conf->use_short_preamble; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE, + (u8 *) (&mac->short_preamble)); + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + RT_TRACE(COMP_MAC80211, DBG_TRACE, + ("BSS_CHANGED_ERP_SLOT\n")); + + if (bss_conf->use_short_slot) + mac->slot_time = RTL_SLOT_TIME_9; + else + mac->slot_time = RTL_SLOT_TIME_20; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, + (u8 *) (&mac->slot_time)); + } + + if (changed & BSS_CHANGED_HT) { + struct ieee80211_sta *sta = NULL; + + RT_TRACE(COMP_MAC80211, DBG_TRACE, + ("BSS_CHANGED_HT\n")); + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid); + if (sta) { + if (sta->ht_cap.ampdu_density > + mac->current_ampdu_density) + mac->current_ampdu_density = + sta->ht_cap.ampdu_density; + if (sta->ht_cap.ampdu_factor < + mac->current_ampdu_factor) + mac->current_ampdu_factor = + sta->ht_cap.ampdu_factor; + } + rcu_read_unlock(); + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY, + (u8 *) (&mac->max_mss_density)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR, + &mac->current_ampdu_factor); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE, + &mac->current_ampdu_density); + } + + if (changed & BSS_CHANGED_BSSID) { + u32 basic_rates; + struct ieee80211_sta *sta = NULL; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID, + (u8 *) bss_conf->bssid); + + RT_TRACE(COMP_MAC80211, DBG_DMESG, + ("bssid: %pM\n", bss_conf->bssid)); + + mac->vendor = PEER_UNKNOWN; + memcpy(mac->bssid, bss_conf->bssid, 6); + rtlpriv->cfg->ops->set_network_type(hw, vif->type); + + rcu_read_lock(); + sta = ieee80211_find_sta(vif, (u8*)bss_conf->bssid); + if (!sta) { + rcu_read_unlock(); + goto out; + } + + if (rtlhal->current_bandtype == BAND_ON_5G) { + mac->mode = WIRELESS_MODE_A; + } else { + if (sta->supp_rates[0] <= 0xf) + mac->mode = WIRELESS_MODE_B; + else + mac->mode = WIRELESS_MODE_G; + } + + if (sta->ht_cap.ht_supported) { + if (rtlhal->current_bandtype == BAND_ON_2_4G) + mac->mode = WIRELESS_MODE_N_24G; + else + mac->mode = WIRELESS_MODE_N_5G; + } + + /* just station need it, because ibss & ap mode will + * set in sta_add, and will be NULL here */ + if (vif->type == NL80211_IFTYPE_STATION) { + struct rtl_sta_info *sta_entry; + sta_entry = (struct rtl_sta_info *) sta->drv_priv; + sta_entry->wireless_mode = mac->mode; + } + + if (sta->ht_cap.ht_supported) { + mac->ht_enable = true; + + /* + * for cisco 1252 bw20 it's wrong + * if (ht_cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) { + * mac->bw_40 = true; + * } + * */ + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + /* for 5G must << RATE_6M_INDEX=4, + * because 5G have no cck rate*/ + if (rtlhal->current_bandtype == BAND_ON_5G) + basic_rates = sta->supp_rates[1] << 4; + else + basic_rates = sta->supp_rates[0]; + + mac->basic_rates = basic_rates; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, + (u8 *) (&basic_rates)); + } + rcu_read_unlock(); + } + + /* + * For FW LPS and Keep Alive: + * To tell firmware we have connected + * to an AP. For 92SE/CE power save v2. + */ + if (changed & BSS_CHANGED_ASSOC) { + if (bss_conf->assoc) { + u8 keep_alive = 10; + u8 mstatus = RT_MEDIA_CONNECT; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_KEEP_ALIVE, + (u8 *) (&keep_alive)); + + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_JOINBSSRPT, + (u8 *) (&mstatus)); + ppsc->report_linked = true; + + } else { + + u8 mstatus = RT_MEDIA_DISCONNECT; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_JOINBSSRPT, + (u8 *) (&mstatus)); + ppsc->report_linked = false; + + } + + if (rtlpriv->cfg->ops->get_btc_status()){ + rtlpriv->btcoexist.btc_ops->btc_mediastatus_notify( + rtlpriv, ppsc->report_linked); + } + } + +out: + mutex_unlock(&rtlpriv->locks.conf_mutex); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +static u64 rtl_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +#else +static u64 rtl_op_get_tsf(struct ieee80211_hw *hw) +#endif +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u64 tsf; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&tsf)); + return tsf; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +static void rtl_op_set_tsf(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u64 tsf) +#else +static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf) +#endif +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; + + mac->tsf = tsf; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss)); +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +static void rtl_op_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +#else +static void rtl_op_reset_tsf(struct ieee80211_hw *hw) +#endif +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp = 0; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp)); +} + +static void rtl_op_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta) +{ + switch (cmd) { + case STA_NOTIFY_SLEEP: + break; + case STA_NOTIFY_AWAKE: + break; + default: + break; + } +} + +static int rtl_op_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 * ssn +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) +/**/ + ,u8 buf_size +/**/ +#endif +/**/ + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (action) { + case IEEE80211_AMPDU_TX_START: + RT_TRACE(COMP_MAC80211, DBG_TRACE, + ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid)); + return rtl_tx_agg_start(hw, vif, sta, tid, ssn); + break; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) + case IEEE80211_AMPDU_TX_STOP_CONT: + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: +#else + case IEEE80211_AMPDU_TX_STOP: +#endif + RT_TRACE(COMP_MAC80211, DBG_TRACE, + ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid)); + return rtl_tx_agg_stop(hw, vif, sta, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + RT_TRACE(COMP_MAC80211, DBG_TRACE, + ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid)); + rtl_tx_agg_oper(hw, sta, tid); + break; + case IEEE80211_AMPDU_RX_START: + RT_TRACE(COMP_MAC80211, DBG_TRACE, + ("IEEE80211_AMPDU_RX_START:TID:%d\n", tid)); + return rtl_rx_agg_start(hw, sta, tid); + break; + case IEEE80211_AMPDU_RX_STOP: + RT_TRACE(COMP_MAC80211, DBG_TRACE, + ("IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid)); + return rtl_rx_agg_stop(hw, sta, tid); + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("IEEE80211_AMPDU_ERR!!!!:\n")); + return -EOPNOTSUPP; + } + return 0; +} + +static void rtl_op_sw_scan_start(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + RT_TRACE(COMP_MAC80211, DBG_LOUD, ("\n")); + mac->act_scanning = true; + /*rtlpriv->btcops->btc_scan_notify(rtlpriv, 0); */ + if (rtlpriv->link_info.b_higher_busytraffic) { + mac->skip_scan = true; + return; + } + + if (rtlpriv->dm.supp_phymode_switch) { + if (rtlpriv->cfg->ops->check_switch_to_dmdp) + rtlpriv->cfg->ops->check_switch_to_dmdp(hw); + } + + if (mac->link_state == MAC80211_LINKED) { + rtl_lps_leave(hw); + mac->link_state = MAC80211_LINKED_SCANNING; + } else { + rtl_ips_nic_on(hw); + } + + /* Dul mac */ + rtlpriv->rtlhal.b_load_imrandiqk_setting_for2g = false; + + rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY); + + rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP_BAND0); + +} + +static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + RT_TRACE(COMP_MAC80211, DBG_LOUD, ("\n")); + mac->act_scanning = false; + mac->skip_scan = false; + if (rtlpriv->link_info.b_higher_busytraffic) { + return; + } + + /* p2p will use 1/6/11 to scan */ + if (mac->n_channels == 3) + mac->p2p_in_use = true; + else + mac->p2p_in_use = false; + mac->n_channels = 0; + /* Dul mac */ + rtlpriv->rtlhal.b_load_imrandiqk_setting_for2g = false; + + if (mac->link_state == MAC80211_LINKED_SCANNING) { + mac->link_state = MAC80211_LINKED; + if (mac->opmode == NL80211_IFTYPE_STATION) { + /* fix fwlps issue */ + rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); + } + } + + rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE); + /* rtlpriv->btcops->btc_scan_notify(rtlpriv, 1); */ +} + +static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 key_type = NO_ENCRYPTION; + u8 key_idx; + bool group_key = false; + bool wep_only = false; + int err = 0; + u8 mac_addr[ETH_ALEN]; + u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + u8 zero_addr[ETH_ALEN] = { 0 }; + + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("not open hw encryption\n")); + return -ENOSPC; /*User disabled HW-crypto */ + } + /* To support IBSS, use sw-crypto for GTK */ + if(((vif->type == NL80211_IFTYPE_ADHOC) || + (vif->type == NL80211_IFTYPE_MESH_POINT)) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -ENOSPC; + RT_TRACE(COMP_SEC, DBG_DMESG, + ("%s hardware based encryption for keyidx: %d, mac: %pM\n", + cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, + sta ? sta->addr : bcast_addr)); + rtlpriv->sec.being_setkey = true; + rtl_ips_nic_on(hw); + mutex_lock(&rtlpriv->locks.conf_mutex); + /* <1> get encryption alg */ + +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +/**/ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + key_type = WEP40_ENCRYPTION; + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n")); + break; + case WLAN_CIPHER_SUITE_WEP104: + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP104\n")); + key_type = WEP104_ENCRYPTION; + break; + case WLAN_CIPHER_SUITE_TKIP: + key_type = TKIP_ENCRYPTION; + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n")); + break; + case WLAN_CIPHER_SUITE_CCMP: + key_type = AESCCMP_ENCRYPTION; + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n")); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + /* HW don't support CMAC encryption, + * use software CMAC encryption */ + key_type = AESCMAC_ENCRYPTION; + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n")); + RT_TRACE(COMP_SEC, DBG_DMESG, + ("HW don't support CMAC encrypiton, " + "use software CMAC encrypiton\n")); + err = -EOPNOTSUPP; + goto out_unlock; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("alg_err:%x!!!!:\n", key->cipher)); + goto out_unlock; + } +/**/ +#else + switch (key->alg) { + case ALG_WEP: + if (key->keylen == WLAN_KEY_LEN_WEP40) { + key_type = WEP40_ENCRYPTION; + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n")); + } else { + RT_TRACE(COMP_SEC, DBG_DMESG, + ("alg:WEP104\n")); + key_type = WEP104_ENCRYPTION; + } + break; + case ALG_TKIP: + key_type = TKIP_ENCRYPTION; + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n")); + break; + case ALG_CCMP: + key_type = AESCCMP_ENCRYPTION; + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n")); + break; + case ALG_AES_CMAC: + /*HW don't support CMAC encryption, use software CMAC encryption */ + key_type = AESCMAC_ENCRYPTION; + RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n")); + RT_TRACE(COMP_SEC, DBG_DMESG, + ("HW don't support CMAC encrypiton, " + "use software CMAC encrypiton\n")); + err = -EOPNOTSUPP; + goto out_unlock; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("alg_err:%x!!!!:\n", key->alg)); + goto out_unlock; + } +#endif +/**/ + if(key_type == WEP40_ENCRYPTION || + key_type == WEP104_ENCRYPTION || + vif->type == NL80211_IFTYPE_ADHOC) + rtlpriv->sec.use_defaultkey = true; + + /* <2> get key_idx */ + key_idx = (u8) (key->keyidx); + if (key_idx > 3) + goto out_unlock; + /* <3> if pairwise key enable_hw_sec */ + group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE); + + /* wep always be group key, but there are two conditions: + * 1) wep only: is just for wep enc, in this condition + * rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION + * will be true & enable_hw_sec will be set when wep + * ke setting. + * 2) wep(group) + AES(pairwise): some AP like cisco + * may use it, in this condition enable_hw_sec will not + * be set when wep key setting */ + /* we must reset sec_info after lingked before set key, + * or some flag will be wrong*/ + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT) { + if (!group_key || key_type == WEP40_ENCRYPTION || + key_type == WEP104_ENCRYPTION) { + if (group_key) { + wep_only = true; + } + rtlpriv->cfg->ops->enable_hw_sec(hw); + } + } else { + if ((!group_key) || (vif->type == NL80211_IFTYPE_ADHOC) || + rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) { + if (rtlpriv->sec.pairwise_enc_algorithm == + NO_ENCRYPTION && + (key_type == WEP40_ENCRYPTION || + key_type == WEP104_ENCRYPTION)) + wep_only = true; + rtlpriv->sec.pairwise_enc_algorithm = key_type; + RT_TRACE(COMP_SEC, DBG_DMESG, + ("set enable_hw_sec, key_type:%x(OPEN:0 WEP40:" + "1 TKIP:2 AES:4 WEP104:5)\n", key_type)); + rtlpriv->cfg->ops->enable_hw_sec(hw); + } + } + /* <4> set key based on cmd */ + switch (cmd) { + case SET_KEY: + if (wep_only) { + RT_TRACE(COMP_SEC, DBG_DMESG, + ("set WEP(group/pairwise) key\n")); + /* Pairwise key with an assigned MAC address. */ + rtlpriv->sec.pairwise_enc_algorithm = key_type; + rtlpriv->sec.group_enc_algorithm = key_type; + /*set local buf about wep key. */ + memcpy(rtlpriv->sec.key_buf[key_idx], + key->key, key->keylen); + rtlpriv->sec.key_len[key_idx] = key->keylen; + memcpy(mac_addr, zero_addr, ETH_ALEN); + } else if (group_key) { /* group key */ + RT_TRACE(COMP_SEC, DBG_DMESG, + ("set group key\n")); + /* group key */ + rtlpriv->sec.group_enc_algorithm = key_type; + /*set local buf about group key. */ + memcpy(rtlpriv->sec.key_buf[key_idx], + key->key, key->keylen); + rtlpriv->sec.key_len[key_idx] = key->keylen; + memcpy(mac_addr, bcast_addr, ETH_ALEN); + } else { /* pairwise key */ + RT_TRACE(COMP_SEC, DBG_DMESG, + ("set pairwise key\n")); + if (!sta) { + RT_ASSERT(false, ("pairwise key withnot" + "mac_addr\n")); + + err = -EOPNOTSUPP; + goto out_unlock; + } + /* Pairwise key with an assigned MAC address. */ + rtlpriv->sec.pairwise_enc_algorithm = key_type; + /*set local buf about pairwise key. */ + memcpy(rtlpriv->sec.key_buf[PAIRWISE_KEYIDX], + key->key, key->keylen); + rtlpriv->sec.key_len[PAIRWISE_KEYIDX] = key->keylen; + rtlpriv->sec.pairwise_key = + rtlpriv->sec.key_buf[PAIRWISE_KEYIDX]; + memcpy(mac_addr, sta->addr, ETH_ALEN); + } + rtlpriv->cfg->ops->set_key(hw, key_idx, mac_addr, + group_key, key_type, wep_only, + false); + /* <5> tell mac80211 do something: */ + /*must use sw generate IV, or can not work !!!!. */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + key->hw_key_idx = key_idx; + if (key_type == TKIP_ENCRYPTION) + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + /*use software CCMP encryption for management frames (MFP) */ + if (key_type == AESCCMP_ENCRYPTION) + key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; + break; + case DISABLE_KEY: + RT_TRACE(COMP_SEC, DBG_DMESG, + ("disable key delete one entry\n")); + /*set local buf about wep key. */ + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_MESH_POINT) { + if (sta) + rtl_cam_del_entry(hw, sta->addr); + } + memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen); + rtlpriv->sec.key_len[key_idx] = 0; + memcpy(mac_addr, zero_addr, ETH_ALEN); + /* + *mac80211 will delete entrys one by one, + *so don't use rtl_cam_reset_all_entry + *or clear all entry here. + */ + rtl_cam_delete_one_entry(hw, mac_addr, key_idx); + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("cmd_err:%x!!!!:\n", cmd)); + } +out_unlock: + mutex_unlock(&rtlpriv->locks.conf_mutex); + rtlpriv->sec.being_setkey = false; + return err; +} + +static void rtl_op_rfkill_poll(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + bool radio_state; + bool blocked; + u8 valid = 0; + + if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) + return; + + mutex_lock(&rtlpriv->locks.conf_mutex); + + /*if Radio On return true here */ + radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid); + + if (valid) { + if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) { + rtlpriv->rfkill.rfkill_state = radio_state; + + RT_TRACE(COMP_RF, DBG_DMESG, + (KERN_INFO "wireless radio switch turned %s\n", + radio_state ? "on" : "off")); + + blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1; + wiphy_rfkill_set_hw_state(hw->wiphy, blocked); + } + } + + mutex_unlock(&rtlpriv->locks.conf_mutex); +} + +/* this function is called by mac80211 to flush tx buffer + * before switch channle or power save, or tx buffer packet + * maybe send after offchannel or rf sleep, this may cause + * dis-association by AP */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) +static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->intf_ops->flush) + rtlpriv->intf_ops->flush(hw, queues, drop); +} +#else +static void rtl_op_flush(struct ieee80211_hw *hw, bool drop) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->intf_ops->flush) + rtlpriv->intf_ops->flush(hw, drop); +} +#endif + +const struct ieee80211_ops rtl_ops = { + .start = rtl_op_start, + .stop = rtl_op_stop, + .tx = rtl_op_tx, + .add_interface = rtl_op_add_interface, + .remove_interface = rtl_op_remove_interface, +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +/**/ + .change_interface = rtl_op_change_interface, +/**/ +#endif +/**/ + .config = rtl_op_config, + .configure_filter = rtl_op_configure_filter, + .set_key = rtl_op_set_key, + .conf_tx = rtl_op_conf_tx, + .bss_info_changed = rtl_op_bss_info_changed, + .get_tsf = rtl_op_get_tsf, + .set_tsf = rtl_op_set_tsf, + .reset_tsf = rtl_op_reset_tsf, + .sta_notify = rtl_op_sta_notify, + .ampdu_action = rtl_op_ampdu_action, + .sw_scan_start = rtl_op_sw_scan_start, + .sw_scan_complete = rtl_op_sw_scan_complete, + .rfkill_poll = rtl_op_rfkill_poll, + .sta_add = rtl_op_sta_add, + .sta_remove = rtl_op_sta_remove, + .flush = rtl_op_flush, +}; --- linux-3.13.0.orig/drivers/staging/rtl8821ae/core.h +++ linux-3.13.0/drivers/staging/rtl8821ae/core.h @@ -0,0 +1,43 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * Tmis program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * Tmis program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * tmis program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * Tme full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_CORE_H__ +#define __RTL_CORE_H__ + +#define RTL_SUPPORTED_FILTERS \ + (FIF_PROMISC_IN_BSS | \ + FIF_ALLMULTI | FIF_CONTROL | \ + FIF_OTHER_BSS | \ + FIF_FCSFAIL | \ + FIF_BCN_PRBRESP_PROMISC) + +#define RTL_SUPPORTED_CTRL_FILTER 0xFF + +extern const struct ieee80211_ops rtl_ops; +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/debug.c +++ linux-3.13.0/drivers/staging/rtl8821ae/debug.c @@ -0,0 +1,988 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * Tmis program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * Tmis program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * tmis program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * Tme full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "wifi.h" +#include "cam.h" + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) +#define GET_INODE_DATA(__node) PDE_DATA(__node) +#else +#define GET_INODE_DATA(__node) PDE(__node)->data +#endif + + +void rtl_dbgp_flag_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 i; + + rtlpriv->dbg.global_debuglevel = DBG_DMESG; + + rtlpriv->dbg.global_debugcomponents = + COMP_ERR | + COMP_FW | + COMP_INIT | + COMP_RECV | + COMP_SEND | + COMP_MLME | + COMP_SCAN | + COMP_INTR | + COMP_LED | + COMP_SEC | + COMP_BEACON | + COMP_RATE | + COMP_RXDESC | + COMP_DIG | + COMP_TXAGC | + COMP_POWER | + COMP_POWER_TRACKING | + COMP_BB_POWERSAVING | + COMP_SWAS | + COMP_RF | + COMP_TURBO | + COMP_RATR | + COMP_CMD | + COMP_EASY_CONCURRENT | + COMP_EFUSE | + COMP_QOS | COMP_MAC80211 | COMP_REGD | + COMP_CHAN | + COMP_BT_COEXIST | + COMP_IQK | + 0; + + for (i = 0; i < DBGP_TYPE_MAX; i++) + rtlpriv->dbg.dbgp_type[i] = 0; + + /*Init Debug flag enable condition */ +} + +struct proc_dir_entry *proc_topdir; +static int rtl_proc_get_mac_0(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, n, page; + int max = 0xff; + page = 0x000; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_mac_0(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_mac_0, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_mac_0 = { + .open = dl_proc_open_mac_0, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_mac_1(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, n, page; + int max = 0xff; + page = 0x100; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_mac_1(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_mac_1, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_mac_1 = { + .open = dl_proc_open_mac_1, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_mac_2(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, n, page; + int max = 0xff; + page = 0x200; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_mac_2(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_mac_2, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_mac_2 = { + .open = dl_proc_open_mac_2, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_mac_3(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, n, page; + int max = 0xff; + page = 0x300; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_mac_3(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_mac_3, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_mac_3 = { + .open = dl_proc_open_mac_3, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_mac_4(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, n, page; + int max = 0xff; + page = 0x400; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_mac_4(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_mac_4, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_mac_4 = { + .open = dl_proc_open_mac_4, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_mac_5(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, n, page; + int max = 0xff; + page = 0x500; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_mac_5(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_mac_5, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_mac_5 = { + .open = dl_proc_open_mac_5, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_mac_6(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, n, page; + int max = 0xff; + page = 0x600; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_mac_6(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_mac_6, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_mac_6 = { + .open = dl_proc_open_mac_6, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_mac_7(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i, n, page; + int max = 0xff; + page = 0x700; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_read_dword(rtlpriv, (page | n))); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_mac_7(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_mac_7, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_mac_7 = { + .open = dl_proc_open_mac_7, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_bb_8(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n, page; + int max = 0xff; + page = 0x800; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_bb_8(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_bb_8, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_bb_8 = { + .open = dl_proc_open_bb_8, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_bb_9(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n, page; + int max = 0xff; + page = 0x900; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_bb_9(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_bb_9, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_bb_9 = { + .open = dl_proc_open_bb_9, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_bb_a(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n, page; + int max = 0xff; + page = 0xa00; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_bb_a(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_bb_a, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_bb_a = { + .open = dl_proc_open_bb_a, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_bb_b(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n, page; + int max = 0xff; + page = 0xb00; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_bb_b(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_bb_b, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_bb_b = { + .open = dl_proc_open_bb_b, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_bb_c(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n, page; + int max = 0xff; + page = 0xc00; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_bb_c(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_bb_c, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_bb_c = { + .open = dl_proc_open_bb_c, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_bb_d(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n, page; + int max = 0xff; + page = 0xd00; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_bb_d(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_bb_d, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_bb_d = { + .open = dl_proc_open_bb_d, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_bb_e(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n, page; + int max = 0xff; + page = 0xe00; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_bb_e(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_bb_e, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_bb_e = { + .open = dl_proc_open_bb_e, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_bb_f(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n, page; + int max = 0xff; + page = 0xf00; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n + page); + for (i = 0; i < 4 && n <= max; i++, n += 4) + seq_printf(m, "%8.8x ", + rtl_get_bbreg(hw, (page | n), 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_bb_f(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_bb_f, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_bb_f = { + .open = dl_proc_open_bb_f, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_reg_rf_a(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n; + int max = 0x40; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n); + for (i = 0; i < 4 && n <= max; n += 1, i++) + seq_printf(m, "%8.8x ", + rtl_get_rfreg(hw, RF90_PATH_A, n, 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_rf_a(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_reg_rf_a, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_rf_a = { + .open = dl_proc_open_rf_a, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_reg_rf_b(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + int i, n; + int max = 0x40; + + for (n = 0; n <= max; ) { + seq_printf(m, "\n%8.8x ", n); + for (i = 0; i < 4 && n <= max; n += 1, i++) + seq_printf(m, "%8.8x ", + rtl_get_rfreg(hw, RF90_PATH_B, n, + 0xffffffff)); + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_rf_b(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_reg_rf_b, GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_rf_b = { + .open = dl_proc_open_rf_b, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_cam_register_1(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 target_cmd = 0; + u32 target_val=0; + u8 entry_i=0; + u32 ulstatus; + int i = 100, j = 0; + + /* This dump the current register page */ + seq_puts(m, + "\n#################### SECURITY CAM (0-10) ##################\n "); + + for (j = 0; j < 11; j++) { + seq_printf(m, "\nD: %2x > ", j); + for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { + /* polling bit, and No Write enable, and address */ + target_cmd = entry_i + CAM_CONTENT_COUNT * j; + target_cmd = target_cmd | BIT(31); + + /* Check polling bit is clear */ + while ((i--) >= 0) { + ulstatus = rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[RWCAM]); + if (ulstatus & BIT(31)) { + continue; + } else { + break; + } + } + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], + target_cmd); + target_val = rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[RCAMO]); + seq_printf(m, "%8.8x ", target_val); + } + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_cam_1(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_cam_register_1, + GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_cam_1 = { + .open = dl_proc_open_cam_1, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_cam_register_2(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 target_cmd = 0; + u32 target_val = 0; + u8 entry_i = 0; + u32 ulstatus; + int i = 100, j = 0; + + /* This dump the current register page */ + seq_puts(m, + "\n################### SECURITY CAM (11-21) ##################\n "); + + for (j = 11; j < 22; j++) { + seq_printf(m, "\nD: %2x > ", j); + for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { + target_cmd = entry_i + CAM_CONTENT_COUNT * j; + target_cmd = target_cmd | BIT(31); + + while ((i--) >= 0) { + ulstatus = rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[RWCAM]); + if (ulstatus & BIT(31)) { + continue; + } else { + break; + } + } + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], + target_cmd); + target_val = rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[RCAMO]); + seq_printf(m, "%8.8x ", target_val); + } + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_cam_2(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_cam_register_2, + GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_cam_2 = { + .open = dl_proc_open_cam_2, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int rtl_proc_get_cam_register_3(struct seq_file *m, void *v) +{ + struct ieee80211_hw *hw = m->private; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 target_cmd = 0; + u32 target_val = 0; + u8 entry_i = 0; + u32 ulstatus; + int i = 100, j = 0; + + /* This dump the current register page */ + seq_puts(m, + "\n################### SECURITY CAM (22-31) ##################\n "); + + for (j = 22; j < TOTAL_CAM_ENTRY; j++) { + seq_printf(m, "\nD: %2x > ", j); + for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { + target_cmd = entry_i+CAM_CONTENT_COUNT*j; + target_cmd = target_cmd | BIT(31); + + while ((i--) >= 0) { + ulstatus = rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[RWCAM]); + if (ulstatus & BIT(31)) { + continue; + } else { + break; + } + } + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], + target_cmd); + target_val = rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[RCAMO]); + seq_printf(m, "%8.8x ", target_val); + } + } + seq_puts(m, "\n"); + return 0; +} + +static int dl_proc_open_cam_3(struct inode *inode, struct file *file) +{ + return single_open(file, rtl_proc_get_cam_register_3, + GET_INODE_DATA(inode)); +} + +static const struct file_operations file_ops_cam_3 = { + .open = dl_proc_open_cam_3, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +void rtl_proc_add_one(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct proc_dir_entry *entry; + + snprintf(rtlpriv->dbg.proc_name, 18, "%x-%x-%x-%x-%x-%x", + rtlefuse->dev_addr[0], rtlefuse->dev_addr[1], + rtlefuse->dev_addr[2], rtlefuse->dev_addr[3], + rtlefuse->dev_addr[4], rtlefuse->dev_addr[5]); + + rtlpriv->dbg.proc_dir = proc_mkdir(rtlpriv->dbg.proc_name, proc_topdir); + if (!rtlpriv->dbg.proc_dir) { + RT_TRACE(COMP_INIT, DBG_EMERG, ("Unable to init " + "/proc/net/%s/%s\n", rtlpriv->cfg->name, + rtlpriv->dbg.proc_name)); + return; + } + + entry = proc_create_data("mac-0", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_mac_0, hw); + if (!entry) + RT_TRACE(COMP_INIT, DBG_EMERG, + ("Unable to initialize /proc/net/%s/%s/mac-0\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("mac-1", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_mac_1, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/mac-1\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("mac-2", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_mac_2, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/mac-2\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("mac-3", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_mac_3, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/mac-3\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("mac-4", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_mac_4, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/mac-4\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("mac-5", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_mac_5, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/mac-5\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("mac-6", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_mac_6, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/mac-6\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("mac-7", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_mac_7, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/mac-7\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("bb-8", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_bb_8, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/bb-8\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("bb-9", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_bb_9, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/bb-9\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("bb-a", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_bb_a, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/bb-a\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("bb-b", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_bb_b, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/bb-b\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("bb-c", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_bb_c, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/bb-c\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("bb-d", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_bb_d, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/bb-d\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("bb-e", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_bb_e, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/bb-e\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("bb-f", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_bb_f, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/bb-f\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("rf-a", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_rf_a, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/rf-a\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("rf-b", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_rf_b, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/rf-b\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("cam-1", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_cam_1, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/cam-1\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("cam-2", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_cam_2, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/cam-2\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); + + entry = proc_create_data("cam-3", S_IFREG | S_IRUGO, + rtlpriv->dbg.proc_dir, &file_ops_cam_3, hw); + if (!entry) + RT_TRACE(COMP_INIT, COMP_ERR, + ("Unable to initialize /proc/net/%s/%s/cam-3\n", + rtlpriv->cfg->name, rtlpriv->dbg.proc_name)); +} + +void rtl_proc_remove_one(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->dbg.proc_dir) { + remove_proc_entry("mac-0", rtlpriv->dbg.proc_dir); + remove_proc_entry("mac-1", rtlpriv->dbg.proc_dir); + remove_proc_entry("mac-2", rtlpriv->dbg.proc_dir); + remove_proc_entry("mac-3", rtlpriv->dbg.proc_dir); + remove_proc_entry("mac-4", rtlpriv->dbg.proc_dir); + remove_proc_entry("mac-5", rtlpriv->dbg.proc_dir); + remove_proc_entry("mac-6", rtlpriv->dbg.proc_dir); + remove_proc_entry("mac-7", rtlpriv->dbg.proc_dir); + remove_proc_entry("bb-8", rtlpriv->dbg.proc_dir); + remove_proc_entry("bb-9", rtlpriv->dbg.proc_dir); + remove_proc_entry("bb-a", rtlpriv->dbg.proc_dir); + remove_proc_entry("bb-b", rtlpriv->dbg.proc_dir); + remove_proc_entry("bb-c", rtlpriv->dbg.proc_dir); + remove_proc_entry("bb-d", rtlpriv->dbg.proc_dir); + remove_proc_entry("bb-e", rtlpriv->dbg.proc_dir); + remove_proc_entry("bb-f", rtlpriv->dbg.proc_dir); + remove_proc_entry("rf-a", rtlpriv->dbg.proc_dir); + remove_proc_entry("rf-b", rtlpriv->dbg.proc_dir); + remove_proc_entry("cam-1", rtlpriv->dbg.proc_dir); + remove_proc_entry("cam-2", rtlpriv->dbg.proc_dir); + remove_proc_entry("cam-3", rtlpriv->dbg.proc_dir); + + remove_proc_entry(rtlpriv->dbg.proc_name, proc_topdir); + + rtlpriv->dbg.proc_dir = NULL; + } +} + +void rtl_proc_add_topdir(void) +{ + proc_topdir = proc_mkdir("rtlwifi", init_net.proc_net); +} + +void rtl_proc_remove_topdir(void) +{ + if (proc_topdir) + remove_proc_entry("rtlwifi", init_net.proc_net); +} \ No newline at end of file --- linux-3.13.0.orig/drivers/staging/rtl8821ae/debug.h +++ linux-3.13.0/drivers/staging/rtl8821ae/debug.h @@ -0,0 +1,227 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * Tmis program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * Tmis program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * tmis program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * Tme full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_DEBUG_H__ +#define __RTL_DEBUG_H__ + +/*-------------------------------------------------------------- + Debug level +--------------------------------------------------------------*/ +/* + *Fatal bug. + *For example, Tx/Rx/IO locked up, + *memory access violation, + *resource allocation failed, + *unexpected HW behavior, HW BUG + *and so on. + */ +#define DBG_EMERG 0 + +/* + *Abnormal, rare, or unexpeted cases. + *For example, Packet/IO Ctl canceled, + *device suprisely unremoved and so on. + */ +#define DBG_WARNING 2 + +/* + *Normal case driver developer should + *open, we can see link status like + *assoc/AddBA/DHCP/adapter start and + *so on basic and useful infromations. + */ +#define DBG_DMESG 3 + +/* + *Normal case with useful information + *about current SW or HW state. + *For example, Tx/Rx descriptor to fill, + *Tx/Rx descriptor completed status, + *SW protocol state change, dynamic + *mechanism state change and so on. + */ +#define DBG_LOUD 4 + +/* + *Normal case with detail execution + *flow or information. + */ +#define DBG_TRACE 5 + +/*-------------------------------------------------------------- + Define the rt_trace components +--------------------------------------------------------------*/ +#define COMP_ERR BIT(0) +#define COMP_FW BIT(1) +#define COMP_INIT BIT(2) /*For init/deinit */ +#define COMP_RECV BIT(3) /*For Rx. */ +#define COMP_SEND BIT(4) /*For Tx. */ +#define COMP_MLME BIT(5) /*For MLME. */ +#define COMP_SCAN BIT(6) /*For Scan. */ +#define COMP_INTR BIT(7) /*For interrupt Related. */ +#define COMP_LED BIT(8) /*For LED. */ +#define COMP_SEC BIT(9) /*For sec. */ +#define COMP_BEACON BIT(10) /*For beacon. */ +#define COMP_RATE BIT(11) /*For rate. */ +#define COMP_RXDESC BIT(12) /*For rx desc. */ +#define COMP_DIG BIT(13) /*For DIG */ +#define COMP_TXAGC BIT(14) /*For Tx power */ +#define COMP_HIPWR BIT(15) /*For High Power Mechanism */ +#define COMP_POWER BIT(16) /*For lps/ips/aspm. */ +#define COMP_POWER_TRACKING BIT(17) /*For TX POWER TRACKING */ +#define COMP_BB_POWERSAVING BIT(18) +#define COMP_SWAS BIT(19) /*For SW Antenna Switch */ +#define COMP_RF BIT(20) /*For RF. */ +#define COMP_TURBO BIT(21) /*For EDCA TURBO. */ +#define COMP_RATR BIT(22) +#define COMP_CMD BIT(23) +#define COMP_EFUSE BIT(24) +#define COMP_QOS BIT(25) +#define COMP_MAC80211 BIT(26) +#define COMP_REGD BIT(27) +#define COMP_CHAN BIT(28) +#define COMP_EASY_CONCURRENT BIT(29) +#define COMP_BT_COEXIST BIT(30) +#define COMP_IQK BIT(31) + +/*-------------------------------------------------------------- + Define the rt_print components +--------------------------------------------------------------*/ +/* Define EEPROM and EFUSE check module bit*/ +#define EEPROM_W BIT(0) +#define EFUSE_PG BIT(1) +#define EFUSE_READ_ALL BIT(2) + +/* Define init check for module bit*/ +#define INIT_EEPROM BIT(0) +#define INIT_TxPower BIT(1) +#define INIT_IQK BIT(2) +#define INIT_RF BIT(3) + +/* Define PHY-BB/RF/MAC check module bit */ +#define PHY_BBR BIT(0) +#define PHY_BBW BIT(1) +#define PHY_RFR BIT(2) +#define PHY_RFW BIT(3) +#define PHY_MACR BIT(4) +#define PHY_MACW BIT(5) +#define PHY_ALLR BIT(6) +#define PHY_ALLW BIT(7) +#define PHY_TXPWR BIT(8) +#define PHY_PWRDIFF BIT(9) + +/* Define Dynamic Mechanism check module bit --> FDM */ +#define WA_IOT BIT(0) +#define DM_PWDB BIT(1) +#define DM_MONITOR BIT(2) +#define DM_DIG BIT(3) +#define DM_EDCA_TURBO BIT(4) + +enum dbgp_flag_e { + FQOS = 0, + FTX = 1, + FRX = 2, + FSEC = 3, + FMGNT = 4, + FMLME = 5, + FRESOURCE = 6, + FBEACON = 7, + FISR = 8, + FPHY = 9, + FMP = 10, + FEEPROM = 11, + FPWR = 12, + FDM = 13, + FDBGCtrl = 14, + FC2H = 15, + FBT = 16, + FINIT = 17, + FIOCTL = 18, + DBGP_TYPE_MAX +}; + +#define RT_ASSERT(_exp,fmt) \ + do { \ + if(!(_exp)) { \ + printk(KERN_DEBUG "%s:%s(): ", KBUILD_MODNAME, \ + __func__); \ + printk fmt; \ + } \ + } while(0); + +#define RT_DISP(dbgtype, dbgflag, printstr) + +#define RT_TRACE(comp, level, fmt)\ + do { \ + if(unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \ + ((level) <= rtlpriv->dbg.global_debuglevel))) {\ + printk(KERN_DEBUG "%s-%d:%s():<%lx-%x> ", \ + KBUILD_MODNAME, \ + rtlpriv->rtlhal.interfaceindex, __func__, \ + in_interrupt(), in_atomic()); \ + printk fmt; \ + }\ + } while(0); + +#define RTPRINT(rtlpriv, dbgtype, dbgflag, printstr) \ + do { \ + if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \ + printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \ + printk printstr; \ + } \ + } while(0); + +#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \ + _hexdatalen) \ + do {\ + if(unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents ) &&\ + (_level <= rtlpriv->dbg.global_debuglevel ))) { \ + int __i; \ + u8* ptr = (u8*)_hexdata; \ + printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \ + printk(KERN_DEBUG "In process \"%s\" (pid %i):", \ + current->comm, \ + current->pid); \ + printk(_titlestring); \ + for( __i=0; __i<(int)_hexdatalen; __i++ ) { \ + printk("%02X%s", ptr[__i], (((__i + 1) % 4) \ + == 0)?" ":" ");\ + if (((__i + 1) % 16) == 0) \ + printk("\n"); \ + } \ + printk(KERN_DEBUG "\n"); \ + } \ + } while(0); + +void rtl_dbgp_flag_init(struct ieee80211_hw *hw); +void rtl_proc_add_one(struct ieee80211_hw *hw); +void rtl_proc_remove_one(struct ieee80211_hw *hw); +void rtl_proc_add_topdir(void); +void rtl_proc_remove_topdir(void); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/efuse.c +++ linux-3.13.0/drivers/staging/rtl8821ae/efuse.c @@ -0,0 +1,1285 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * Tmis program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * Tmis program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * tmis program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * Tme full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +#include "wifi.h" +#include "efuse.h" +#include "btcoexist/halbt_precomp.h" +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +#include +#endif + +static const u8 MAX_PGPKT_SIZE = 9; +static const u8 PGPKT_DATA_SIZE = 8; +static const int EFUSE_MAX_SIZE = 512; + +static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = { + {0, 0, 0, 2}, + {0, 1, 0, 2}, + {0, 2, 0, 2}, + {1, 0, 0, 1}, + {1, 0, 1, 1}, + {1, 1, 0, 1}, + {1, 1, 1, 3}, + {1, 3, 0, 17}, + {3, 3, 1, 48}, + {10, 0, 0, 6}, + {10, 3, 0, 1}, + {10, 3, 1, 1}, + {11, 0, 0, 28} +}; + +static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset, + u8 * value); +static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset, + u16 * value); +static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, u16 offset, + u32 * value); +static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, u16 offset, + u8 value); +static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset, + u16 value); +static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset, + u32 value); +static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, + u8 data); +static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse); +static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, + u8 *data); +static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset, + u8 word_en, u8 * data); +static void efuse_word_enable_data_read(u8 word_en, u8 * sourdata, + u8 * targetdata); +static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw, + u16 efuse_addr, u8 word_en, u8 * data); +static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, + u8 pwrstate); +static u16 efuse_get_current_size(struct ieee80211_hw *hw); +static u8 efuse_calculate_word_cnts(u8 word_en); + +void efuse_initialize(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 bytetemp; + u8 temp; + + bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1); + temp = bytetemp | 0x20; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1, temp); + + bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1); + temp = bytetemp & 0xFE; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1, temp); + + bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3); + temp = bytetemp | 0x80; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, temp); + + rtl_write_byte(rtlpriv, 0x2F8, 0x3); + + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72); + +} + +u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 data; + u8 bytetemp; + u8 temp; + u32 k = 0; + const u32 efuse_real_content_len = + rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE]; + + if (address < efuse_real_content_len) { + temp = address & 0xFF; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, + temp); + bytetemp = rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 2); + temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2, + temp); + + bytetemp = rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 3); + temp = bytetemp & 0x7F; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, + temp); + + bytetemp = rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 3); + while (!(bytetemp & 0x80)) { + bytetemp = rtl_read_byte(rtlpriv, + rtlpriv->cfg-> + maps[EFUSE_CTRL] + 3); + k++; + if (k == 1000) { + k = 0; + break; + } + } + data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); + return data; + } else + return 0xFF; + +} +//EXPORT_SYMBOL(efuse_read_1byte); + +void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 bytetemp; + u8 temp; + u32 k = 0; + const u32 efuse_real_content_len = + rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE]; + + RT_TRACE(COMP_EFUSE, DBG_LOUD, + ("Addr=%x Data =%x\n", address, value)); + + if (address < efuse_real_content_len) { + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value); + + temp = address & 0xFF; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, + temp); + bytetemp = rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 2); + + temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC); + rtl_write_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 2, temp); + + bytetemp = rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 3); + temp = bytetemp | 0x80; + rtl_write_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 3, temp); + + bytetemp = rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 3); + + while (bytetemp & 0x80) { + bytetemp = rtl_read_byte(rtlpriv, + rtlpriv->cfg-> + maps[EFUSE_CTRL] + 3); + k++; + if (k == 100) { + k = 0; + break; + } + } + } + +} + +void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 value32; + u8 readbyte; + u16 retry; + + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, + (_offset & 0xff)); + readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2, + ((_offset >> 8) & 0x03) | (readbyte & 0xfc)); + + readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, + (readbyte & 0x7f)); + + retry = 0; + value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); + while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) { + value32 = rtl_read_dword(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL]); + retry++; + } + + udelay(50); + value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); + + *pbuf = (u8) (value32 & 0xff); +} + +void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 efuse_tbl[rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]]; + u8 rtemp8[1]; + u16 efuse_addr = 0; + u8 offset, wren; + u8 u1temp = 0; + u16 i; + u16 j; + const u16 efuse_max_section = + rtlpriv->cfg->maps[EFUSE_MAX_SECTION_MAP]; + const u32 efuse_real_content_len = + rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE]; + u16 efuse_word[efuse_max_section][EFUSE_MAX_WORD_UNIT]; + u16 efuse_utilized = 0; + u8 efuse_usage; + + if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) { + RT_TRACE(COMP_EFUSE, DBG_LOUD, + ("read_efuse(): Invalid offset(%#x) with read " + "bytes(%#x)!!\n", _offset, _size_byte)); + return; + } + + for (i = 0; i < efuse_max_section; i++) + for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) + efuse_word[i][j] = 0xFFFF; + + read_efuse_byte(hw, efuse_addr, rtemp8); + if (*rtemp8 != 0xFF) { + efuse_utilized++; + RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL, + ("Addr=%d\n", efuse_addr)); + efuse_addr++; + } + + while ((*rtemp8 != 0xFF) && (efuse_addr < efuse_real_content_len)) { + /* Check PG header for section num. */ + if((*rtemp8 & 0x1F ) == 0x0F) {/* extended header */ + u1temp =( (*rtemp8 & 0xE0) >> 5); + read_efuse_byte(hw, efuse_addr, rtemp8); + + if((*rtemp8 & 0x0F) == 0x0F) { + efuse_addr++; + read_efuse_byte(hw, efuse_addr, rtemp8); + + if (*rtemp8 != 0xFF && + (efuse_addr < efuse_real_content_len)) { + efuse_addr++; + } + continue; + } else { + offset = ((*rtemp8 & 0xF0) >> 1) | u1temp; + wren = (*rtemp8 & 0x0F); + efuse_addr++; + } + } else { + offset = ((*rtemp8 >> 4) & 0x0f); + wren = (*rtemp8 & 0x0f); + } + + if (offset < efuse_max_section) { + RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL, + ("offset-%d Worden=%x\n", offset, wren)); + + for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { + if (!(wren & 0x01)) { + RTPRINT(rtlpriv, FEEPROM, + EFUSE_READ_ALL, ("Addr=%d\n", + efuse_addr)); + + read_efuse_byte(hw, efuse_addr, rtemp8); + efuse_addr++; + efuse_utilized++; + efuse_word[offset][i] = (*rtemp8 & + 0xff); + + if (efuse_addr >= + efuse_real_content_len) + break; + + RTPRINT(rtlpriv, FEEPROM, + EFUSE_READ_ALL, ("Addr=%d\n", + efuse_addr)); + + read_efuse_byte(hw, efuse_addr, rtemp8); + efuse_addr++; + efuse_utilized++; + efuse_word[offset][i] |= + (((u16) * rtemp8 << 8) & 0xff00); + + if (efuse_addr >= efuse_real_content_len) + break; + } + + wren >>= 1; + } + } + + RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL, + ("Addr=%d\n", efuse_addr)); + read_efuse_byte(hw, efuse_addr, rtemp8); + if (*rtemp8 != 0xFF && (efuse_addr < efuse_real_content_len)) { + efuse_utilized++; + efuse_addr++; + } + } + + for (i = 0; i < efuse_max_section; i++) { + for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) { + efuse_tbl[(i * 8) + (j * 2)] = + (efuse_word[i][j] & 0xff); + efuse_tbl[(i * 8) + ((j * 2) + 1)] = + ((efuse_word[i][j] >> 8) & 0xff); + } + } + + for (i = 0; i < _size_byte; i++) + pbuf[i] = efuse_tbl[_offset + i]; + + rtlefuse->efuse_usedbytes = efuse_utilized; + efuse_usage = (u8) ((efuse_utilized * 100) / efuse_real_content_len); + rtlefuse->efuse_usedpercentage = efuse_usage; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES, + (u8 *) & efuse_utilized); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE, + (u8 *) & efuse_usage); +} + +bool efuse_shadow_update_chk(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 section_idx, i, Base; + u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used; + bool bwordchanged, bresult = true; + + for (section_idx = 0; section_idx < 16; section_idx++) { + Base = section_idx * 8; + bwordchanged = false; + + for (i = 0; i < 8; i = i + 2) { + if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] != + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) || + (rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] != + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i + + 1])) { + words_need++; + bwordchanged = true; + } + } + + if (bwordchanged == true) + hdr_num++; + } + + totalbytes = hdr_num + words_need * 2; + efuse_used = rtlefuse->efuse_usedbytes; + + if ((totalbytes + efuse_used) >= (EFUSE_MAX_SIZE - + rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) + bresult = false; + + RT_TRACE(COMP_EFUSE, DBG_LOUD, + ("efuse_shadow_update_chk(): totalbytes(%#x), " + "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n", + totalbytes, hdr_num, words_need, efuse_used)); + + return bresult; +} + +void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, + u16 offset, u32 *value) +{ + if (type == 1) + efuse_shadow_read_1byte(hw, offset, (u8 *) value); + else if (type == 2) + efuse_shadow_read_2byte(hw, offset, (u16 *) value); + else if (type == 4) + efuse_shadow_read_4byte(hw, offset, (u32 *) value); + +} + +void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset, + u32 value) +{ + if (type == 1) + efuse_shadow_write_1byte(hw, offset, (u8) value); + else if (type == 2) + efuse_shadow_write_2byte(hw, offset, (u16) value); + else if (type == 4) + efuse_shadow_write_4byte(hw, offset, (u32) value); + +} + +bool efuse_shadow_update(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u16 i, offset, base; + u8 word_en = 0x0F; + u8 first_pg = false; + + RT_TRACE(COMP_EFUSE, DBG_LOUD, ("\n")); + + if (!efuse_shadow_update_chk(hw)) { + efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); + memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], + &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); + + RT_TRACE(COMP_EFUSE, DBG_LOUD, + ("efuse out of capacity!!\n")); + return false; + } + efuse_power_switch(hw, true, true); + + for (offset = 0; offset < 16; offset++) { + + word_en = 0x0F; + base = offset * 8; + + for (i = 0; i < 8; i++) { + if (first_pg == true) { + + word_en &= ~(BIT(i / 2)); + + rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] = + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]; + } else { + + if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] != + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) { + word_en &= ~(BIT(i / 2)); + + rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] = + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]; + } + } + } + + if (word_en != 0x0F) { + u8 tmpdata[8]; + memcpy(tmpdata, (&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base]), 8); + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, + ("U-efuse\n"), tmpdata, 8); + + if (!efuse_pg_packet_write(hw, (u8) offset, word_en, + tmpdata)) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("PG section(%#x) fail!!\n", offset)); + break; + } + } + + } + + efuse_power_switch(hw, true, false); + efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); + + memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], + &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); + + RT_TRACE(COMP_EFUSE, DBG_LOUD, ("\n")); + return true; +} + +void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + if (rtlefuse->autoload_failflag == true) { + memset((&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), + 0xFF, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); + } else { + efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]); + } + + memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0], + &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]); + +} +//EXPORT_SYMBOL(rtl_efuse_shadow_map_update); + +void efuse_force_write_vendor_Id(struct ieee80211_hw *hw) +{ + u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF }; + + efuse_power_switch(hw, true, true); + + efuse_pg_packet_write(hw, 1, 0xD, tmpdata); + + efuse_power_switch(hw, true, false); + +} + +void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx) +{ +} + +static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, + u16 offset, u8 *value) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset]; +} + +static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, + u16 offset, u16 *value) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset]; + *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8; + +} + +static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, + u16 offset, u32 *value) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset]; + *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8; + *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] << 16; + *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] << 24; +} + +static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, + u16 offset, u8 value) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value; +} + +static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, + u16 offset, u16 value) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value & 0x00FF; + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = value >> 8; + +} + +static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, + u16 offset, u32 value) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = + (u8) (value & 0x000000FF); + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = + (u8) ((value >> 8) & 0x0000FF); + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] = + (u8) ((value >> 16) & 0x00FF); + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] = + (u8) ((value >> 24) & 0xFF); + +} + +int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmpidx = 0; + int bresult; + + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, + (u8) (addr & 0xff)); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2, + ((u8) ((addr >> 8) & 0x03)) | + (rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 2) & + 0xFC)); + + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72); + + while (!(0x80 & rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 3)) + && (tmpidx < 100)) { + tmpidx++; + } + + if (tmpidx < 100) { + *data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); + bresult = true; + } else { + *data = 0xff; + bresult = false; + } + return bresult; +} +//EXPORT_SYMBOL(efuse_one_byte_read); + +static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmpidx = 0; + bool bresult; + + RT_TRACE(COMP_EFUSE, DBG_LOUD, + ("Addr = %x Data=%x\n", addr, data)); + + rtl_write_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff)); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2, + (rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + + 2) & 0xFC) | (u8) ((addr >> 8) & 0x03)); + + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], data); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0xF2); + + while ((0x80 & rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_CTRL] + 3)) + && (tmpidx < 100)) { + tmpidx++; + } + + if (tmpidx < 100) + bresult = true; + else + bresult = false; + + return bresult; +} + +static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + efuse_power_switch(hw, false, true); + read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse); + efuse_power_switch(hw, false, false); +} + +static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, + u8 efuse_data, u8 offset, u8 *tmpdata, + u8 *readstate) +{ + bool bdataempty = true; + u8 hoffset; + u8 tmpidx; + u8 hworden; + u8 word_cnts; + + hoffset = (efuse_data >> 4) & 0x0F; + hworden = efuse_data & 0x0F; + word_cnts = efuse_calculate_word_cnts(hworden); + + if (hoffset == offset) { + for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) { + if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx, + &efuse_data)) { + tmpdata[tmpidx] = efuse_data; + if (efuse_data != 0xff) + bdataempty = true; + } + } + + if (bdataempty == true) { + *readstate = PG_STATE_DATA; + } else { + *efuse_addr = *efuse_addr + (word_cnts * 2) + 1; + *readstate = PG_STATE_HEADER; + } + + } else { + *efuse_addr = *efuse_addr + (word_cnts * 2) + 1; + *readstate = PG_STATE_HEADER; + } +} + +static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) +{ + u8 readstate = PG_STATE_HEADER; + + bool bcontinual = true; + + u8 efuse_data, word_cnts = 0; + u16 efuse_addr = 0; + u8 hworden = 0; + u8 tmpdata[8]; + + if (data == NULL) + return false; + if (offset > 15) + return false; + + memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); + memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8)); + + while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) { + if (readstate & PG_STATE_HEADER) { + if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) + && (efuse_data != 0xFF)) + efuse_read_data_case1(hw, &efuse_addr, efuse_data, offset, + tmpdata, &readstate); + else + bcontinual = false; + } else if (readstate & PG_STATE_DATA) { + efuse_word_enable_data_read(hworden, tmpdata, data); + efuse_addr = efuse_addr + (word_cnts * 2) + 1; + readstate = PG_STATE_HEADER; + } + + } + + if ((data[0] == 0xff) && (data[1] == 0xff) && + (data[2] == 0xff) && (data[3] == 0xff) && + (data[4] == 0xff) && (data[5] == 0xff) && + (data[6] == 0xff) && (data[7] == 0xff)) + return false; + else + return true; + +} + +static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, + u8 efuse_data, u8 offset, int *bcontinual, + u8 *write_state, struct pgpkt_struct *target_pkt, + int *repeat_times, int *bresult, u8 word_en) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct pgpkt_struct tmp_pkt; + int bdataempty = true; + u8 originaldata[8 * sizeof(u8)]; + u8 badworden = 0x0F; + u8 match_word_en, tmp_word_en; + u8 tmpindex; + u8 tmp_header = efuse_data; + u8 tmp_word_cnts; + + tmp_pkt.offset = (tmp_header >> 4) & 0x0F; + tmp_pkt.word_en = tmp_header & 0x0F; + tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); + + if (tmp_pkt.offset != target_pkt->offset) { + *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; + *write_state = PG_STATE_HEADER; + } else { + for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) { + if (efuse_one_byte_read(hw, + (*efuse_addr + 1 + tmpindex), + &efuse_data) && (efuse_data != 0xFF)) + bdataempty = false; + } + + if (bdataempty == false) { + *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; + *write_state = PG_STATE_HEADER; + } else { + match_word_en = 0x0F; + if (!((target_pkt->word_en & BIT(0)) | + (tmp_pkt.word_en & BIT(0)))) + match_word_en &= (~BIT(0)); + + if (!((target_pkt->word_en & BIT(1)) | + (tmp_pkt.word_en & BIT(1)))) + match_word_en &= (~BIT(1)); + + if (!((target_pkt->word_en & BIT(2)) | + (tmp_pkt.word_en & BIT(2)))) + match_word_en &= (~BIT(2)); + + if (!((target_pkt->word_en & BIT(3)) | + (tmp_pkt.word_en & BIT(3)))) + match_word_en &= (~BIT(3)); + + if ((match_word_en & 0x0F) != 0x0F) { + badworden = efuse_word_enable_data_write(hw, + *efuse_addr + 1, + tmp_pkt.word_en, + target_pkt->data); + + if (0x0F != (badworden & 0x0F)) { + u8 reorg_offset = offset; + u8 reorg_worden = badworden; + efuse_pg_packet_write(hw, reorg_offset, + reorg_worden, + originaldata); + } + + tmp_word_en = 0x0F; + if ((target_pkt->word_en & BIT(0)) ^ + (match_word_en & BIT(0))) + tmp_word_en &= (~BIT(0)); + + if ((target_pkt->word_en & BIT(1)) ^ + (match_word_en & BIT(1))) + tmp_word_en &= (~BIT(1)); + + if ((target_pkt->word_en & BIT(2)) ^ + (match_word_en & BIT(2))) + tmp_word_en &= (~BIT(2)); + + if ((target_pkt->word_en & BIT(3)) ^ + (match_word_en & BIT(3))) + tmp_word_en &= (~BIT(3)); + + if ((tmp_word_en & 0x0F) != 0x0F) { + *efuse_addr = efuse_get_current_size(hw); + target_pkt->offset = offset; + target_pkt->word_en = tmp_word_en; + } else { + *bcontinual = false; + } + *write_state = PG_STATE_HEADER; + *repeat_times += 1; + if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) { + *bcontinual = false; + *bresult = false; + } + } else { + *efuse_addr += (2 * tmp_word_cnts) + 1; + target_pkt->offset = offset; + target_pkt->word_en = word_en; + *write_state = PG_STATE_HEADER; + } + } + } + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse PG_STATE_HEADER-1\n")); +} + +static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr, + int *bcontinual, u8 *write_state, + struct pgpkt_struct target_pkt, + int *repeat_times, int *bresult) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct pgpkt_struct tmp_pkt; + u8 pg_header; + u8 tmp_header; + u8 originaldata[8 * sizeof(u8)]; + u8 tmp_word_cnts; + u8 badworden = 0x0F; + + pg_header = ((target_pkt.offset << 4) & 0xf0) | target_pkt.word_en; + efuse_one_byte_write(hw, *efuse_addr, pg_header); + efuse_one_byte_read(hw, *efuse_addr, &tmp_header); + + if (tmp_header == pg_header) { + *write_state = PG_STATE_DATA; + } else if (tmp_header == 0xFF) { + *write_state = PG_STATE_HEADER; + *repeat_times += 1; + if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) { + *bcontinual = false; + *bresult = false; + } + } else { + tmp_pkt.offset = (tmp_header >> 4) & 0x0F; + tmp_pkt.word_en = tmp_header & 0x0F; + + tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en); + + memset(originaldata, 0xff, 8 * sizeof(u8)); + + if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) { + badworden = efuse_word_enable_data_write(hw, + *efuse_addr + 1, + tmp_pkt.word_en, + originaldata); + + if (0x0F != (badworden & 0x0F)) { + u8 reorg_offset = tmp_pkt.offset; + u8 reorg_worden = badworden; + efuse_pg_packet_write(hw, reorg_offset, + reorg_worden, + originaldata); + *efuse_addr = efuse_get_current_size(hw); + } else { + *efuse_addr = *efuse_addr + + (tmp_word_cnts * 2) + 1; + } + } else { + *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1; + } + + *write_state = PG_STATE_HEADER; + *repeat_times += 1; + if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) { + *bcontinual = false; + *bresult = false; + } + + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, + ("efuse PG_STATE_HEADER-2\n")); + } +} + +static int efuse_pg_packet_write(struct ieee80211_hw *hw, + u8 offset, u8 word_en, u8 *data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct pgpkt_struct target_pkt; + u8 write_state = PG_STATE_HEADER; + int bcontinual = true, bdataempty = true, bresult = true; + u16 efuse_addr = 0; + u8 efuse_data; + u8 target_word_cnts = 0; + u8 badworden = 0x0F; + static int repeat_times = 0; + + if (efuse_get_current_size(hw) >= (EFUSE_MAX_SIZE - + rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) { + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, + ("efuse_pg_packet_write error \n")); + return false; + } + + target_pkt.offset = offset; + target_pkt.word_en = word_en; + + memset(target_pkt.data, 0xFF, 8 * sizeof(u8)); + + efuse_word_enable_data_read(word_en, data, target_pkt.data); + target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en); + + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n")); + + while (bcontinual && (efuse_addr < (EFUSE_MAX_SIZE - + rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) { + + if (write_state == PG_STATE_HEADER) { + bdataempty = true; + badworden = 0x0F; + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, + ("efuse PG_STATE_HEADER\n")); + + if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) && + (efuse_data != 0xFF)) + efuse_write_data_case1(hw, &efuse_addr, + efuse_data, offset, + &bcontinual, + &write_state, + &target_pkt, + &repeat_times, &bresult, + word_en); + else + efuse_write_data_case2(hw, &efuse_addr, + &bcontinual, + &write_state, + target_pkt, + &repeat_times, + &bresult); + + } else if (write_state == PG_STATE_DATA) { + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, + ("efuse PG_STATE_DATA\n")); + badworden = 0x0f; + badworden = + efuse_word_enable_data_write(hw, efuse_addr + 1, + target_pkt.word_en, + target_pkt.data); + + if ((badworden & 0x0F) == 0x0F) { + bcontinual = false; + } else { + efuse_addr = + efuse_addr + (2 * target_word_cnts) + 1; + + target_pkt.offset = offset; + target_pkt.word_en = badworden; + target_word_cnts = + efuse_calculate_word_cnts(target_pkt. + word_en); + write_state = PG_STATE_HEADER; + repeat_times++; + if (repeat_times > EFUSE_REPEAT_THRESHOLD_) { + bcontinual = false; + bresult = false; + } + RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, + ("efuse PG_STATE_HEADER-3\n")); + } + } + } + + if (efuse_addr >= (EFUSE_MAX_SIZE - + rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) { + RT_TRACE(COMP_EFUSE, DBG_LOUD, + ("efuse_addr(%#x) Out of size!!\n", efuse_addr)); + } + + return true; +} + +static void efuse_word_enable_data_read(u8 word_en, u8 * sourdata, + u8 *targetdata) +{ + if (!(word_en & BIT(0))) { + targetdata[0] = sourdata[0]; + targetdata[1] = sourdata[1]; + } + + if (!(word_en & BIT(1))) { + targetdata[2] = sourdata[2]; + targetdata[3] = sourdata[3]; + } + + if (!(word_en & BIT(2))) { + targetdata[4] = sourdata[4]; + targetdata[5] = sourdata[5]; + } + + if (!(word_en & BIT(3))) { + targetdata[6] = sourdata[6]; + targetdata[7] = sourdata[7]; + } +} + +static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw, + u16 efuse_addr, u8 word_en, u8 *data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 tmpaddr; + u16 start_addr = efuse_addr; + u8 badworden = 0x0F; + u8 tmpdata[8]; + + memset(tmpdata, 0xff, PGPKT_DATA_SIZE); + RT_TRACE(COMP_EFUSE, DBG_LOUD, + ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr)); + + if (!(word_en & BIT(0))) { + tmpaddr = start_addr; + efuse_one_byte_write(hw, start_addr++, data[0]); + efuse_one_byte_write(hw, start_addr++, data[1]); + + efuse_one_byte_read(hw, tmpaddr, &tmpdata[0]); + efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[1]); + if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1])) + badworden &= (~BIT(0)); + } + + if (!(word_en & BIT(1))) { + tmpaddr = start_addr; + efuse_one_byte_write(hw, start_addr++, data[2]); + efuse_one_byte_write(hw, start_addr++, data[3]); + + efuse_one_byte_read(hw, tmpaddr, &tmpdata[2]); + efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[3]); + if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3])) + badworden &= (~BIT(1)); + } + + if (!(word_en & BIT(2))) { + tmpaddr = start_addr; + efuse_one_byte_write(hw, start_addr++, data[4]); + efuse_one_byte_write(hw, start_addr++, data[5]); + + efuse_one_byte_read(hw, tmpaddr, &tmpdata[4]); + efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[5]); + if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5])) + badworden &= (~BIT(2)); + } + + if (!(word_en & BIT(3))) { + tmpaddr = start_addr; + efuse_one_byte_write(hw, start_addr++, data[6]); + efuse_one_byte_write(hw, start_addr++, data[7]); + + efuse_one_byte_read(hw, tmpaddr, &tmpdata[6]); + efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[7]); + if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7])) + badworden &= (~BIT(3)); + } + + return badworden; +} + +static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tempval; + u16 tmpV16; + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + if (pwrstate == true) + { + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69); + + // 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid + tmpV16 = rtl_read_word(rtlpriv, + rtlpriv->cfg->maps[SYS_ISO_CTRL]); + + printk("SYS_ISO_CTRL=%04x.\n",tmpV16); + if( ! (tmpV16 & PWC_EV12V ) ){ + tmpV16 |= PWC_EV12V ; + //PlatformEFIOWrite2Byte(pAdapter,REG_SYS_ISO_CTRL,tmpV16); + } + // Reset: 0x0000h[28], default valid + tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]); + printk("SYS_FUNC_EN=%04x.\n",tmpV16); + if( !(tmpV16 & FEN_ELDR) ){ + tmpV16 |= FEN_ELDR ; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16); + } + + // Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid + tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK] ); + printk("SYS_CLK=%04x.\n",tmpV16); + if( (!(tmpV16 & LOADER_CLK_EN) ) ||(!(tmpV16 & ANA8M) ) ) + { + tmpV16 |= (LOADER_CLK_EN |ANA8M ) ; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_CLK], tmpV16); + } + + if(bwrite == true) + { + // Enable LDO 2.5V before read/write action + tempval = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3); + printk("EFUSE_TEST=%04x.\n",tmpV16); + tempval &= ~(BIT(3) | BIT(4) |BIT(5) | BIT(6)); + tempval |= (VOLTAGE_V25 << 3); + tempval |= BIT(7); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, tempval); + } + } + else + { + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x00); + if(bwrite == true){ + // Disable LDO 2.5V after read/write action + tempval = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3); + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, (tempval & 0x7F)); + } + } + } + else + { + if (pwrstate == true && (rtlhal->hw_type != + HARDWARE_TYPE_RTL8192SE)) { + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE) + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], + 0x69); + + tmpV16 = rtl_read_word(rtlpriv, + rtlpriv->cfg->maps[SYS_ISO_CTRL]); + if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) { + tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V]; + rtl_write_word(rtlpriv, + rtlpriv->cfg->maps[SYS_ISO_CTRL], + tmpV16); + } + + tmpV16 = rtl_read_word(rtlpriv, + rtlpriv->cfg->maps[SYS_FUNC_EN]); + if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) { + tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR]; + rtl_write_word(rtlpriv, + rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16); + } + + tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]); + if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) || + (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) { + tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] | + rtlpriv->cfg->maps[EFUSE_ANA8M]); + rtl_write_word(rtlpriv, + rtlpriv->cfg->maps[SYS_CLK], tmpV16); + } + } + + if (pwrstate == true) { + if (bwrite == true) { + tempval = rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_TEST] + + 3); + + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) { + tempval &= 0x0F; + tempval |= (VOLTAGE_V25 << 4); + } + + rtl_write_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_TEST] + 3, + (tempval | 0x80)); + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) { + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK], + 0x03); + } + + } else { + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE) + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0); + + if (bwrite == true) { + tempval = rtl_read_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_TEST] + + 3); + rtl_write_byte(rtlpriv, + rtlpriv->cfg->maps[EFUSE_TEST] + 3, + (tempval & 0x7F)); + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) { + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CLK], + 0x02); + } + + } + } + +} + +static u16 efuse_get_current_size(struct ieee80211_hw *hw) +{ + int bcontinual = true; + u16 efuse_addr = 0; + u8 hoffset, hworden; + u8 efuse_data, word_cnts; + + while (bcontinual && efuse_one_byte_read(hw, efuse_addr, &efuse_data) + && (efuse_addr < EFUSE_MAX_SIZE)) { + if (efuse_data != 0xFF) { + hoffset = (efuse_data >> 4) & 0x0F; + hworden = efuse_data & 0x0F; + word_cnts = efuse_calculate_word_cnts(hworden); + efuse_addr = efuse_addr + (word_cnts * 2) + 1; + } else { + bcontinual = false; + } + } + + return efuse_addr; +} + +static u8 efuse_calculate_word_cnts(u8 word_en) +{ + u8 word_cnts = 0; + if (!(word_en & BIT(0))) + word_cnts++; + if (!(word_en & BIT(1))) + word_cnts++; + if (!(word_en & BIT(2))) + word_cnts++; + if (!(word_en & BIT(3))) + word_cnts++; + return word_cnts; +} + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/efuse.h +++ linux-3.13.0/drivers/staging/rtl8821ae/efuse.h @@ -0,0 +1,130 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_EFUSE_H_ +#define __RTL_EFUSE_H_ + +#define EFUSE_IC_ID_OFFSET 506 + +/* +#define EFUSE_REAL_CONTENT_LEN 512 +#define EFUSE_MAP_LEN 128 +#define EFUSE_MAX_SECTION 16 +#define EFUSE_MAX_WORD_UNIT 4 +#define EFUSE_IC_ID_OFFSET 506 +*/ + +#define EFUSE_MAX_WORD_UNIT 4 + +#define EFUSE_INIT_MAP 0 +#define EFUSE_MODIFY_MAP 1 + +#define PG_STATE_HEADER 0x01 +#define PG_STATE_WORD_0 0x02 +#define PG_STATE_WORD_1 0x04 +#define PG_STATE_WORD_2 0x08 +#define PG_STATE_WORD_3 0x10 +#define PG_STATE_DATA 0x20 + +#define PG_SWBYTE_H 0x01 +#define PG_SWBYTE_L 0x02 + +#define _POWERON_DELAY_ +#define _PRE_EXECUTE_READ_CMD_ + +#define EFUSE_REPEAT_THRESHOLD_ 3 +#define EFUSE_ERROE_HANDLE 1 + +struct efuse_map { + u8 offset; + u8 word_start; + u8 byte_start; + u8 byte_cnts; +}; + +struct pgpkt_struct { + u8 offset; + u8 word_en; + u8 data[8]; +}; + +enum efuse_data_item { + EFUSE_CHIP_ID = 0, + EFUSE_LDO_SETTING, + EFUSE_CLK_SETTING, + EFUSE_SDIO_SETTING, + EFUSE_CCCR, + EFUSE_SDIO_MODE, + EFUSE_OCR, + EFUSE_F0CIS, + EFUSE_F1CIS, + EFUSE_MAC_ADDR, + EFUSE_EEPROM_VER, + EFUSE_CHAN_PLAN, + EFUSE_TXPW_TAB +}; + +enum { + VOLTAGE_V25 = 0x03, + LDOE25_SHIFT = 28, +}; + +struct efuse_priv { + u8 id[2]; + u8 ldo_setting[2]; + u8 clk_setting[2]; + u8 cccr; + u8 sdio_mode; + u8 ocr[3]; + u8 cis0[17]; + u8 cis1[48]; + u8 mac_addr[6]; + u8 eeprom_verno; + u8 channel_plan; + u8 tx_power_b[14]; + u8 tx_power_g[14]; +}; + +extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); +extern void efuse_initialize(struct ieee80211_hw *hw); +extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address); +extern int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data); +extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value); +extern void read_efuse(struct ieee80211_hw *hw, u16 _offset, + u16 _size_byte, u8 * pbuf); +extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, + u16 offset, u32 * value); +extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, + u16 offset, u32 value); +extern bool efuse_shadow_update(struct ieee80211_hw *hw); +extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw); +extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); +extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); +extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/pci.c +++ linux-3.13.0/drivers/staging/rtl8821ae/pci.c @@ -0,0 +1,2549 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "core.h" +#include "wifi.h" +#include "pci.h" +#include "base.h" +#include "ps.h" +#include "efuse.h" +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +#include +#endif + +static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { + INTEL_VENDOR_ID, + ATI_VENDOR_ID, + AMD_VENDOR_ID, + SIS_VENDOR_ID +}; + +static const u8 ac_to_hwq[] = { + VO_QUEUE, + VI_QUEUE, + BE_QUEUE, + BK_QUEUE +}; + +u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u16 fc = rtl_get_fc(skb); + u8 queue_index = skb_get_queue_mapping(skb); + + if (unlikely(ieee80211_is_beacon(fc))) + return BEACON_QUEUE; + if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) + return MGNT_QUEUE; + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) + if (ieee80211_is_nullfunc(fc)) + return HIGH_QUEUE; + + return ac_to_hwq[queue_index]; +} + +/* Update PCI dependent default settings*/ +static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; + u8 init_aspm; + + ppsc->reg_rfps_level = 0; + ppsc->b_support_aspm = 0; + + /*Update PCI ASPM setting */ + ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm; + switch (rtlpci->const_pci_aspm) { + case 0: + /*No ASPM */ + break; + + case 1: + /*ASPM dynamically enabled/disable. */ + ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM; + break; + + case 2: + /*ASPM with Clock Req dynamically enabled/disable. */ + ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM | + RT_RF_OFF_LEVL_CLK_REQ); + break; + + case 3: + /* + * Always enable ASPM and Clock Req + * from initialization to halt. + * */ + ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM); + ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM | + RT_RF_OFF_LEVL_CLK_REQ); + break; + + case 4: + /* + * Always enable ASPM without Clock Req + * from initialization to halt. + * */ + ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM | + RT_RF_OFF_LEVL_CLK_REQ); + ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM; + break; + } + + ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC; + + /*Update Radio OFF setting */ + switch (rtlpci->const_hwsw_rfoff_d3) { + case 1: + if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM) + ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM; + break; + + case 2: + if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM) + ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM; + ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC; + break; + + case 3: + ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3; + break; + } + + /*Set HW definition to determine if it supports ASPM. */ + switch (rtlpci->const_support_pciaspm) { + case 0:{ + /*Not support ASPM. */ + bool b_support_aspm = false; + ppsc->b_support_aspm = b_support_aspm; + break; + } + case 1:{ + /*Support ASPM. */ + bool b_support_aspm = true; + bool b_support_backdoor = true; + ppsc->b_support_aspm = b_support_aspm; + + /*if(priv->oem_id == RT_CID_TOSHIBA && + !priv->ndis_adapter.amd_l1_patch) + b_support_backdoor = false; */ + + ppsc->b_support_backdoor = b_support_backdoor; + + break; + } + case 2: + /*ASPM value set by chipset. */ + if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) { + bool b_support_aspm = true; + ppsc->b_support_aspm = b_support_aspm; + } + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + + /* toshiba aspm issue, toshiba will set aspm selfly + * so we should not set aspm in driver */ + pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm); + if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE && + init_aspm == 0x43) + ppsc->b_support_aspm = false; +} + +static bool _rtl_pci_platform_switch_device_pci_aspm(struct ieee80211_hw *hw, + u8 value) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool bresult = false; + + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) + value |= 0x40; + + pci_write_config_byte(rtlpci->pdev, 0x80, value); + + return bresult; +} + +/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/ +static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool bresult = false; + + pci_write_config_byte(rtlpci->pdev, 0x81, value); + bresult = true; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) + udelay(100); + + return bresult; +} + +/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/ +static void rtl_pci_disable_aspm(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; + u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport; + u8 num4bytes = pcipriv->ndis_adapter.num4bytes; + /*Retrieve original configuration settings. */ + u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg; + u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter. + pcibridge_linkctrlreg; + u16 aspmlevel = 0; + + if (!ppsc->b_support_aspm) + return; + + if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) { + RT_TRACE(COMP_POWER, DBG_TRACE, + ("PCI(Bridge) UNKNOWN.\n")); + + return; + } + + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) { + RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ); + _rtl_pci_switch_clk_req(hw, 0x0); + } + + if (1) { + /*for promising device will in L0 state after an I/O. */ + u8 tmp_u1b; + pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b); + } + + /*Set corresponding value. */ + aspmlevel |= BIT(0) | BIT(1); + linkctrl_reg &= ~aspmlevel; + pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1)); + + _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg); + udelay(50); + + /*4 Disable Pci Bridge ASPM */ + rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, + pcicfg_addrport + (num4bytes << 2)); + rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg); + + udelay(50); + +} + +/* + *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for + *power saving We should follow the sequence to enable + *RTL8192SE first then enable Pci Bridge ASPM + *or the system will show bluescreen. + */ +static void rtl_pci_enable_aspm(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; + u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport; + u8 num4bytes = pcipriv->ndis_adapter.num4bytes; + u16 aspmlevel; + u8 u_pcibridge_aspmsetting; + u8 u_device_aspmsetting; + + if (!ppsc->b_support_aspm) + return; + + if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) { + RT_TRACE(COMP_POWER, DBG_TRACE, + ("PCI(Bridge) UNKNOWN.\n")); + return; + } + + /*4 Enable Pci Bridge ASPM */ + rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, + pcicfg_addrport + (num4bytes << 2)); + + u_pcibridge_aspmsetting = + pcipriv->ndis_adapter.pcibridge_linkctrlreg | + rtlpci->const_hostpci_aspm_setting; + + if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) + u_pcibridge_aspmsetting &= ~BIT(0); + + rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting); + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("PlatformEnableASPM(): Write reg[%x] = %x\n", + (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10), + u_pcibridge_aspmsetting)); + + udelay(50); + + /*Get ASPM level (with/without Clock Req) */ + aspmlevel = rtlpci->const_devicepci_aspm_setting; + u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg; + + /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/ + /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */ + + u_device_aspmsetting |= aspmlevel; + + _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting); + + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) { + _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level & + RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ); + } + udelay(100); +} + +static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport; + + bool status = false; + u8 offset_e0; + unsigned offset_e4; + + rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, + pcicfg_addrport + 0xE0); + rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0); + + rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, + pcicfg_addrport + 0xE0); + rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0); + + if (offset_e0 == 0xA0) { + rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, + pcicfg_addrport + 0xE4); + rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4); + if (offset_e4 & BIT(23)) + status = true; + } + + return status; +} + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)) +static u8 _rtl_pci_get_pciehdr_offset(struct ieee80211_hw *hw) +{ + u8 capability_offset; + u8 num4bytes = 0x34/4; + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u32 pcicfg_addr_port = (pcipriv->ndis_adapter.pcibridge_busnum << 16)| + (pcipriv->ndis_adapter.pcibridge_devnum << 11)| + (pcipriv->ndis_adapter.pcibridge_funcnum << 8)| + (1 << 31); + + rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS , pcicfg_addr_port + + (num4bytes << 2)); + rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &capability_offset); + while (capability_offset != 0) { + struct rtl_pci_capabilities_header capability_hdr; + + num4bytes = capability_offset / 4; + /* Read the header of the capability at this offset. + * If the retrieved capability is not the power management + * capability that we are looking for, follow the link to + * the next capability and continue looping. + */ + rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS , + pcicfg_addr_port + + (num4bytes << 2)); + rtl_pci_raw_read_port_ushort(PCI_CONF_DATA, + (u16*)&capability_hdr); + /* Found the PCI express capability. */ + if (capability_hdr.capability_id == + PCI_CAPABILITY_ID_PCI_EXPRESS) + break; + else + capability_offset = capability_hdr.next; + } + return capability_offset; +} +#endif +/**/ + +bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw, + struct rtl_priv **buddy_priv) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + bool b_find_buddy_priv = false; + struct rtl_priv *temp_priv = NULL; + struct rtl_pci_priv *temp_pcipriv = NULL; + + if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) { + list_for_each_entry(temp_priv, &rtlpriv->glb_var->glb_priv_list, + list) { + if (temp_priv) { + temp_pcipriv = + (struct rtl_pci_priv *)temp_priv->priv; + RT_TRACE(COMP_INIT, DBG_LOUD, + (("pcipriv->ndis_adapter.funcnumber %x \n"), + pcipriv->ndis_adapter.funcnumber)); + RT_TRACE(COMP_INIT, DBG_LOUD, + (("temp_pcipriv->ndis_adapter.funcnumber %x \n"), + temp_pcipriv->ndis_adapter.funcnumber)); + + if ((pcipriv->ndis_adapter.busnumber == + temp_pcipriv->ndis_adapter.busnumber) && + (pcipriv->ndis_adapter.devnumber == + temp_pcipriv->ndis_adapter.devnumber) && + (pcipriv->ndis_adapter.funcnumber != + temp_pcipriv->ndis_adapter.funcnumber)) { + b_find_buddy_priv = true; + break; + } + } + } + } + + RT_TRACE(COMP_INIT, DBG_LOUD, + (("b_find_buddy_priv %d \n"), b_find_buddy_priv)); + + if (b_find_buddy_priv) + *buddy_priv = temp_priv; + + return b_find_buddy_priv; +} + +void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset; + u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport; + u8 linkctrl_reg; + u8 num4bbytes; + + num4bbytes = (capabilityoffset + 0x10) / 4; + + /*Read Link Control Register */ + rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS, + pcicfg_addrport + (num4bbytes << 2)); + rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg); + + pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg; +} + +static void rtl_pci_parse_configuration(struct pci_dev *pdev, + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + + u8 tmp; + int pos; + u8 linkctrl_reg; + + /*Link Control Register */ + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg); + pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg; + + RT_TRACE(COMP_INIT, DBG_TRACE, + ("Link Control Register =%x\n", + pcipriv->ndis_adapter.linkctrl_reg)); + + pci_read_config_byte(pdev, 0x98, &tmp); + tmp |= BIT(4); + pci_write_config_byte(pdev, 0x98, tmp); + + tmp = 0x17; + pci_write_config_byte(pdev, 0x70f, tmp); +} + +static void rtl_pci_init_aspm(struct ieee80211_hw *hw) +{ + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + _rtl_pci_update_default_setting(hw); + + if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) { + /*Always enable ASPM & Clock Req. */ + rtl_pci_enable_aspm(hw); + RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM); + } + +} + +static void _rtl_pci_io_handler_init(struct device *dev, + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->io.dev = dev; + + rtlpriv->io.write8_async = pci_write8_async; + rtlpriv->io.write16_async = pci_write16_async; + rtlpriv->io.write32_async = pci_write32_async; + + rtlpriv->io.read8_sync = pci_read8_sync; + rtlpriv->io.read16_sync = pci_read16_sync; + rtlpriv->io.read32_sync = pci_read32_sync; + +} + +static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_tcb_desc *tcb_desc, + u8 tid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 additionlen = FCS_LEN; + struct sk_buff *next_skb; + + /* here open is 4, wep/tkip is 8, aes is 12*/ + if (info->control.hw_key) + additionlen += info->control.hw_key->icv_len; + + /* The most skb num is 6 */ + tcb_desc->empkt_num = 0; + spin_lock_bh(&rtlpriv->locks.waitq_lock); + skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) { + struct ieee80211_tx_info *next_info = + IEEE80211_SKB_CB(next_skb); + if (next_info->flags & IEEE80211_TX_CTL_AMPDU) { + tcb_desc->empkt_len[tcb_desc->empkt_num] = + next_skb->len + additionlen; + tcb_desc->empkt_num++; + } else { + break; + } + + if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid], + next_skb)) + break; + + if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num) + break; + } + spin_unlock_bh(&rtlpriv->locks.waitq_lock); + return true; +} + +/* just for early mode now */ +static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct sk_buff *skb = NULL; + struct ieee80211_tx_info *info = NULL; + int tid; /* should be int */ + + if (!rtlpriv->rtlhal.b_earlymode_enable) + return; + if (rtlpriv->dm.supp_phymode_switch && + (rtlpriv->easy_concurrent_ctl.bswitch_in_process || + (rtlpriv->buddy_priv && + rtlpriv->buddy_priv->easy_concurrent_ctl.bswitch_in_process))) + return; + /* we juse use em for BE/BK/VI/VO */ + for (tid = 7; tid >= 0; tid--) { + u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)]; + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + while (!mac->act_scanning && + rtlpriv->psc.rfpwr_state == ERFON) { + struct rtl_tcb_desc tcb_desc; + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + + spin_lock_bh(&rtlpriv->locks.waitq_lock); + if (!skb_queue_empty(&mac->skb_waitq[tid]) && + (ring->entries - skb_queue_len(&ring->queue) > + rtlhal->max_earlymode_num)) { + skb = skb_dequeue(&mac->skb_waitq[tid]); + } else { + spin_unlock_bh(&rtlpriv->locks.waitq_lock); + break; + } + spin_unlock_bh(&rtlpriv->locks.waitq_lock); + + /* Some macaddr can't do early mode. like + * multicast/broadcast/no_qos data */ + info = IEEE80211_SKB_CB(skb); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + _rtl_pci_update_earlymode_info(hw, skb, + &tcb_desc, tid); + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); +#else +/**/ + rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc); +#endif +/**/ + } + } +} + +static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; + + while (skb_queue_len(&ring->queue)) { + struct sk_buff *skb; + struct ieee80211_tx_info *info; + u16 fc; + u8 tid; + u8 *entry; + + + if (rtlpriv->use_new_trx_flow) + entry = (u8 *)(&ring->buffer_desc[ring->idx]); + else + entry = (u8 *)(&ring->desc[ring->idx]); + + if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx)) + return; + + ring->idx = (ring->idx + 1) % ring->entries; + + skb = __skb_dequeue(&ring->queue); + + pci_unmap_single(rtlpci->pdev, + le32_to_cpu(rtlpriv->cfg->ops-> + get_desc((u8 *) entry, true, + HW_DESC_TXBUFF_ADDR)), + skb->len, PCI_DMA_TODEVICE); + + /* remove early mode header */ + if(rtlpriv->rtlhal.b_earlymode_enable) + skb_pull(skb, EM_HDR_LEN); + + RT_TRACE((COMP_INTR | COMP_SEND), DBG_TRACE, + ("new ring->idx:%d, " + "free: skb_queue_len:%d, free: seq:%d\n", + ring->idx, + skb_queue_len(&ring->queue), + *(u16 *) (skb->data + 22))); + + if(prio == TXCMD_QUEUE) { + dev_kfree_skb(skb); + goto tx_status_ok; + + } + + /* for sw LPS, just after NULL skb send out, we can + * sure AP kown we are sleeped, our we should not let + * rf to sleep*/ + fc = rtl_get_fc(skb); + if (ieee80211_is_nullfunc(fc)) { + if(ieee80211_has_pm(fc)) { + rtlpriv->mac80211.offchan_deley = true; + rtlpriv->psc.state_inap = 1; + } else { + rtlpriv->psc.state_inap = 0; + } + } + if (ieee80211_is_action(fc)) { + struct ieee80211_mgmt_compat *action_frame = + (struct ieee80211_mgmt_compat *)skb->data; + if (action_frame->u.action.u.ht_smps.action == + WLAN_HT_ACTION_SMPS) { + dev_kfree_skb(skb); + goto tx_status_ok; + } + } + + /* update tid tx pkt num */ + tid = rtl_get_tid(skb); + if (tid <= 7) + rtlpriv->link_info.tidtx_inperiod[tid]++; + + info = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(info); + + info->flags |= IEEE80211_TX_STAT_ACK; + /*info->status.rates[0].count = 1; */ + + ieee80211_tx_status_irqsafe(hw, skb); + + if ((ring->entries - skb_queue_len(&ring->queue)) + == 2) { + + RT_TRACE(COMP_ERR, DBG_LOUD, + ("more desc left, wake" + "skb_queue@%d,ring->idx = %d," + "skb_queue_len = 0x%d\n", + prio, ring->idx, + skb_queue_len(&ring->queue))); + + ieee80211_wake_queue(hw, + skb_get_queue_mapping + (skb)); + } +tx_status_ok: + skb = NULL; + } + + if (((rtlpriv->link_info.num_rx_inperiod + + rtlpriv->link_info.num_tx_inperiod) > 8) || + (rtlpriv->link_info.num_rx_inperiod > 2)) { + rtl_lps_leave(hw); + } +} + +static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, + u8 *entry, int rxring_idx, int desc_idx) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 bufferaddress; + u8 tmp_one = 1; + struct sk_buff *skb; + + skb = dev_alloc_skb(rtlpci->rxbuffersize); + if (!skb) + return 0; + rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; + + /* just set skb->cb to mapping addr + * for pci_unmap_single use */ + *((dma_addr_t *) skb->cb) = pci_map_single(rtlpci->pdev, + skb_tail_pointer(skb), rtlpci->rxbuffersize, + PCI_DMA_FROMDEVICE); + bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb)); + if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) + return 0; + if (rtlpriv->use_new_trx_flow) { + rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, + HW_DESC_RX_PREPARE, + (u8 *) & bufferaddress); + } else { + rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, + HW_DESC_RXBUFF_ADDR, + (u8 *) & bufferaddress); + rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, + HW_DESC_RXPKT_LEN, + (u8 *) & rtlpci->rxbuffersize); + rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, + HW_DESC_RXOWN, + (u8 *) & tmp_one); + } + + return 1; +} + +/* inorder to receive 8K AMSDU we have set skb to + * 9100bytes in init rx ring, but if this packet is + * not a AMSDU, this so big packet will be sent to + * TCP/IP directly, this cause big packet ping fail + * like: "ping -s 65507", so here we will realloc skb + * based on the true size of packet, I think mac80211 + * do it will be better, but now mac80211 haven't */ + +/* but some platform will fail when alloc skb sometimes. + * in this condition, we will send the old skb to + * mac80211 directly, this will not cause any other + * issues, but only be losted by TCP/IP */ +static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw, + struct sk_buff *skb, struct ieee80211_rx_status rx_status) +{ + if (unlikely(!rtl_action_proc(hw, skb, false))) { + dev_kfree_skb_any(skb); + } else { + struct sk_buff *uskb = NULL; + u8 *pdata; + + uskb = dev_alloc_skb(skb->len + 128); + if (likely(uskb)) { + memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, + sizeof(rx_status)); + pdata = (u8 *)skb_put(uskb, skb->len); + memcpy(pdata, skb->data, skb->len); + dev_kfree_skb_any(skb); + + ieee80211_rx_irqsafe(hw, uskb); + } else { + ieee80211_rx_irqsafe(hw, skb); + } + } +} + +/*hsisr interrupt handler*/ +static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR], + rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) | + rtlpci->sys_irq_mask); + + +} +static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + int rxring_idx = RTL_PCI_RX_MPDU_QUEUE; + + struct ieee80211_rx_status rx_status = { 0 }; + unsigned int count = rtlpci->rxringcount; + bool unicast = false; + u8 hw_queue = 0; + unsigned int rx_remained_cnt; + u8 own; + u8 tmp_one; + + struct rtl_stats status = { + .signal = 0, + .noise = -98, + .rate = 0, + }; + + /*RX NORMAL PKT */ + while (count--) { + struct ieee80211_hdr *hdr; + u16 fc; + u16 len; + /*rx buffer descriptor */ + struct rtl_rx_buffer_desc *buffer_desc = NULL; + /*if use new trx flow, it means wifi info */ + struct rtl_rx_desc *pdesc = NULL; + /*rx pkt */ + struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ + rtlpci->rx_ring[rxring_idx].idx]; + + if (rtlpriv->use_new_trx_flow) { + rx_remained_cnt = + rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw, + hw_queue); + if (rx_remained_cnt < 1) + return; + + } else { /* rx descriptor */ + pdesc = &rtlpci->rx_ring[rxring_idx].desc[ + rtlpci->rx_ring[rxring_idx].idx]; + + own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, + false, + HW_DESC_OWN); + if (own) /* wait data to be filled by hardware */ + return; + } + + /* Get here means: data is filled already*/ + /* AAAAAAttention !!! + * We can NOT access 'skb' before 'pci_unmap_single' */ + pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb), + rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); + + if (rtlpriv->use_new_trx_flow) { + buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[ + rtlpci->rx_ring[rxring_idx].idx]; + /*means rx wifi info*/ + pdesc = (struct rtl_rx_desc *)skb->data; + } + + rtlpriv->cfg->ops->query_rx_desc(hw, &status, + &rx_status, (u8 *) pdesc, skb); + + if (rtlpriv->use_new_trx_flow) + rtlpriv->cfg->ops->rx_check_dma_ok(hw, + (u8 *)buffer_desc, + hw_queue); + + + len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false, + HW_DESC_RXPKT_LEN); + + if (skb->end - skb->tail > len) { + skb_put(skb, len); + if (rtlpriv->use_new_trx_flow) + skb_reserve(skb, status.rx_drvinfo_size + + status.rx_bufshift + 24); + else + skb_reserve(skb, status.rx_drvinfo_size + + status.rx_bufshift); + + } else { + printk("skb->end - skb->tail = %d, len is %d\n", + skb->end - skb->tail, len); + break; + } + + rtlpriv->cfg->ops->rx_command_packet_handler(hw, status, skb); + + /* + *NOTICE This can not be use for mac80211, + *this is done in mac80211 code, + *if you done here sec DHCP will fail + *skb_trim(skb, skb->len - 4); + */ + + hdr = rtl_get_hdr(skb); + fc = rtl_get_fc(skb); + + if (!status.b_crc && !status.b_hwerror) { + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, + sizeof(rx_status)); + + if (is_broadcast_ether_addr(hdr->addr1)) { + ;/*TODO*/ + } else if (is_multicast_ether_addr(hdr->addr1)) { + ;/*TODO*/ + } else { + unicast = true; + rtlpriv->stats.rxbytesunicast += skb->len; + } + + rtl_is_special_data(hw, skb, false); + + if (ieee80211_is_data(fc)) { + rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); + + if (unicast) + rtlpriv->link_info.num_rx_inperiod++; + } + + /* static bcn for roaming */ + rtl_beacon_statistic(hw, skb); + rtl_p2p_info(hw, (void*)skb->data, skb->len); + /* for sw lps */ + rtl_swlps_beacon(hw, (void*)skb->data, skb->len); + rtl_recognize_peer(hw, (void*)skb->data, skb->len); + if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) && + (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)&& + (ieee80211_is_beacon(fc) || + ieee80211_is_probe_resp(fc))) { + dev_kfree_skb_any(skb); + } else { + _rtl_pci_rx_to_mac80211(hw, skb, rx_status); + } + } else { + dev_kfree_skb_any(skb); + } + if (rtlpriv->use_new_trx_flow) { + rtlpci->rx_ring[hw_queue].next_rx_rp += 1; + rtlpci->rx_ring[hw_queue].next_rx_rp %= + RTL_PCI_MAX_RX_COUNT; + + + rx_remained_cnt--; + if (1/*rx_remained_cnt == 0*/) { + rtl_write_word(rtlpriv, 0x3B4, + rtlpci->rx_ring[hw_queue].next_rx_rp); + } + } + if (((rtlpriv->link_info.num_rx_inperiod + + rtlpriv->link_info.num_tx_inperiod) > 8) || + (rtlpriv->link_info.num_rx_inperiod > 2)) { + rtl_lps_leave(hw); + } + + if (rtlpriv->use_new_trx_flow) { + _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, + rxring_idx, + rtlpci->rx_ring[rxring_idx].idx); + } else { + _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx, + rtlpci->rx_ring[rxring_idx].idx); + + if (rtlpci->rx_ring[rxring_idx].idx == + rtlpci->rxringcount - 1) + rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, + false, + HW_DESC_RXERO, + (u8 *) & tmp_one); + } + rtlpci->rx_ring[rxring_idx].idx = + (rtlpci->rx_ring[rxring_idx].idx + 1) % + rtlpci->rxringcount; + } +} + +static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) +{ + struct ieee80211_hw *hw = dev_id; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + unsigned long flags; + u32 inta = 0; + u32 intb = 0; + + + + if (rtlpci->irq_enabled == 0) + return IRQ_HANDLED; + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,flags); + + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], 0x0); + + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], 0x0); + + + /*read ISR: 4/8bytes */ + rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb); + + + /*Shared IRQ or HW disappared */ + if (!inta || inta == 0xffff) + goto done; + /*<1> beacon related */ + if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) { + RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon ok interrupt!\n")); + } + + if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) { + RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon err interrupt!\n")); + } + + if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) { + RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon interrupt!\n")); + } + + if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) { + RT_TRACE(COMP_INTR, DBG_TRACE, + ("prepare beacon for interrupt!\n")); + tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet); + } + + + /*<2> tx related */ + if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW])) + RT_TRACE(COMP_ERR, DBG_TRACE, ("IMR_TXFOVW!\n")); + + if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) { + RT_TRACE(COMP_INTR, DBG_TRACE, ("Manage ok interrupt!\n")); + _rtl_pci_tx_isr(hw, MGNT_QUEUE); + } + + if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) { + RT_TRACE(COMP_INTR, DBG_TRACE, ("HIGH_QUEUE ok interrupt!\n")); + _rtl_pci_tx_isr(hw, HIGH_QUEUE); + } + + if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) { + rtlpriv->link_info.num_tx_inperiod++; + + RT_TRACE(COMP_INTR, DBG_TRACE, ("BK Tx OK interrupt!\n")); + _rtl_pci_tx_isr(hw, BK_QUEUE); + } + + if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) { + rtlpriv->link_info.num_tx_inperiod++; + + RT_TRACE(COMP_INTR, DBG_TRACE, ("BE TX OK interrupt!\n")); + _rtl_pci_tx_isr(hw, BE_QUEUE); + } + + if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) { + rtlpriv->link_info.num_tx_inperiod++; + + RT_TRACE(COMP_INTR, DBG_TRACE, ("VI TX OK interrupt!\n")); + _rtl_pci_tx_isr(hw, VI_QUEUE); + } + + if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) { + rtlpriv->link_info.num_tx_inperiod++; + + RT_TRACE(COMP_INTR, DBG_TRACE, ("Vo TX OK interrupt!\n")); + _rtl_pci_tx_isr(hw, VO_QUEUE); + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) { + if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) { + rtlpriv->link_info.num_tx_inperiod++; + + RT_TRACE(COMP_INTR, DBG_TRACE, + ("CMD TX OK interrupt!\n")); + _rtl_pci_tx_isr(hw, TXCMD_QUEUE); + } + } + + /*<3> rx related */ + if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) { + RT_TRACE(COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n")); + + _rtl_pci_rx_interrupt(hw); + + } + + if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("rx descriptor unavailable!\n")); + rtl_write_byte(rtlpriv, 0xb4, BIT(1) ); + _rtl_pci_rx_interrupt(hw); + } + + if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) { + RT_TRACE(COMP_ERR, DBG_WARNING, ("rx overflow !\n")); + _rtl_pci_rx_interrupt(hw); + } + + /*<4> fw related*/ + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) { + if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) { + RT_TRACE(COMP_INTR, DBG_TRACE, + ("firmware interrupt!\n")); + queue_delayed_work(rtlpriv->works.rtl_wq, + &rtlpriv->works.fwevt_wq, 0); + } + } + + /*<5> hsisr related*/ + /* Only 8188EE & 8723BE Supported. + * If Other ICs Come in, System will corrupt, + * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR] + * are not initialized*/ + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE || + rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) { + if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) { + RT_TRACE(COMP_INTR, DBG_TRACE, + ("hsisr interrupt!\n")); + _rtl_pci_hs_interrupt(hw); + } + } + + + if(rtlpriv->rtlhal.b_earlymode_enable) + tasklet_schedule(&rtlpriv->works.irq_tasklet); + + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], + rtlpci->irq_mask[0]); + rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], + rtlpci->irq_mask[1]); + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + return IRQ_HANDLED; + +done: + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + return IRQ_HANDLED; +} + +static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw) +{ + _rtl_pci_tx_chk_waitq(hw); +} + +static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl8192_tx_ring *ring = NULL; + struct ieee80211_hdr *hdr = NULL; + struct ieee80211_tx_info *info = NULL; + struct sk_buff *pskb = NULL; + struct rtl_tx_desc *pdesc = NULL; + struct rtl_tcb_desc tcb_desc; + /*This is for new trx flow*/ + struct rtl_tx_buffer_desc *pbuffer_desc = NULL; + u8 temp_one = 1; + + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + ring = &rtlpci->tx_ring[BEACON_QUEUE]; + pskb = __skb_dequeue(&ring->queue); + if (pskb) + kfree_skb(pskb); + + /*NB: the beacon data buffer must be 32-bit aligned. */ + pskb = ieee80211_beacon_get(hw, mac->vif); + if (pskb == NULL) + return; + hdr = rtl_get_hdr(pskb); + info = IEEE80211_SKB_CB(pskb); + pdesc = &ring->desc[0]; + if (rtlpriv->use_new_trx_flow) + pbuffer_desc = &ring->buffer_desc[0]; + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, + (u8 *)pbuffer_desc, info, pskb, + BEACON_QUEUE, &tcb_desc); +#else +/**/ + rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, + (u8 *)pbuffer_desc, info, NULL, pskb, + BEACON_QUEUE, &tcb_desc); +/**/ +#endif +/**/ + + __skb_queue_tail(&ring->queue, pskb); + + rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true, HW_DESC_OWN, + (u8 *) & temp_one); + + return; +} + +static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 i; + u16 desc_num; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) + desc_num = TX_DESC_NUM_92E; + else + desc_num = RT_TXDESC_NUM; + + for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { + rtlpci->txringcount[i] = desc_num; + } + /* + *we just alloc 2 desc for beacon queue, + *because we just need first desc in hw beacon. + */ + rtlpci->txringcount[BEACON_QUEUE] = 2; + + /* + *BE queue need more descriptor for performance + *consideration or, No more tx desc will happen, + *and may cause mac80211 mem leakage. + */ + if (rtl_priv(hw)->use_new_trx_flow == false) + rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE; + + rtlpci->rxbuffersize = 9100; /*2048/1024; */ + rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */ +} + +static void _rtl_pci_init_struct(struct ieee80211_hw *hw, + struct pci_dev *pdev) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + rtlpriv->rtlhal.up_first_time = true; + rtlpriv->rtlhal.being_init_adapter = false; + + rtlhal->hw = hw; + rtlpci->pdev = pdev; + + /*Tx/Rx related var */ + _rtl_pci_init_trx_var(hw); + + /*IBSS*/ mac->beacon_interval = 100; + + /*AMPDU*/ + mac->min_space_cfg = 0; + mac->max_mss_density = 0; + /*set sane AMPDU defaults */ + mac->current_ampdu_density = 7; + mac->current_ampdu_factor = 3; + + /*QOS*/ + rtlpci->acm_method = eAcmWay2_SW; + + /*task */ + tasklet_init(&rtlpriv->works.irq_tasklet, + (void (*)(unsigned long))_rtl_pci_irq_tasklet, + (unsigned long)hw); + tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet, + (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet, + (unsigned long)hw); +} + +static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, + unsigned int prio, unsigned int entries) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tx_buffer_desc *buffer_desc; + struct rtl_tx_desc *desc; + dma_addr_t buffer_desc_dma, desc_dma; + u32 nextdescaddress; + int i; + + /* alloc tx buffer desc for new trx flow*/ + if (rtlpriv->use_new_trx_flow) { + buffer_desc = pci_alloc_consistent(rtlpci->pdev, + sizeof(*buffer_desc) * entries, + &buffer_desc_dma); + + if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Cannot allocate TX ring (prio = %d)\n", + prio)); + return -ENOMEM; + } + + memset(buffer_desc, 0, sizeof(*buffer_desc) * entries); + rtlpci->tx_ring[prio].buffer_desc = buffer_desc; + rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma; + + rtlpci->tx_ring[prio].cur_tx_rp = 0; + rtlpci->tx_ring[prio].cur_tx_wp = 0; + rtlpci->tx_ring[prio].avl_desc = entries; + + } + + /* alloc dma for this ring */ + desc = pci_alloc_consistent(rtlpci->pdev, + sizeof(*desc) * entries, &desc_dma); + + if (!desc || (unsigned long)desc & 0xFF) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Cannot allocate TX ring (prio = %d)\n", prio)); + return -ENOMEM; + } + + memset(desc, 0, sizeof(*desc) * entries); + rtlpci->tx_ring[prio].desc = desc; + rtlpci->tx_ring[prio].dma = desc_dma; + + rtlpci->tx_ring[prio].idx = 0; + rtlpci->tx_ring[prio].entries = entries; + skb_queue_head_init(&rtlpci->tx_ring[prio].queue); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("queue:%d, ring_addr:%p\n", prio, desc)); + + /* init every desc in this ring */ + if (rtlpriv->use_new_trx_flow == false) { + for (i = 0; i < entries; i++) { + nextdescaddress = cpu_to_le32((u32) desc_dma + + ((i + 1) % entries) * + sizeof(*desc)); + + rtlpriv->cfg->ops->set_desc(hw, (u8 *) & (desc[i]), + true, + HW_DESC_TX_NEXTDESC_ADDR, + (u8 *) & nextdescaddress); + } + } + return 0; +} + +static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + int i; + + if (rtlpriv->use_new_trx_flow) { + struct rtl_rx_buffer_desc *entry = NULL; + /* alloc dma for this ring */ + rtlpci->rx_ring[rxring_idx].buffer_desc = + pci_alloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx]. + buffer_desc) * + rtlpci->rxringcount, + &rtlpci->rx_ring[rxring_idx].dma); + if (!rtlpci->rx_ring[rxring_idx].buffer_desc || + (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n")); + return -ENOMEM; + } + + memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0, + sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * + rtlpci->rxringcount); + + /* init every desc in this ring */ + rtlpci->rx_ring[rxring_idx].idx = 0; + for (i = 0; i < rtlpci->rxringcount; i++) { + entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; + if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, + rxring_idx, i)) + return -ENOMEM; + } + } else { + struct rtl_rx_desc *entry = NULL; + u8 tmp_one = 1; + /* alloc dma for this ring */ + rtlpci->rx_ring[rxring_idx].desc = + pci_alloc_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx]. + desc) * rtlpci->rxringcount, + &rtlpci->rx_ring[rxring_idx].dma); + if (!rtlpci->rx_ring[rxring_idx].desc || + (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Cannot allocate RX ring\n")); + return -ENOMEM; + } + + memset(rtlpci->rx_ring[rxring_idx].desc, 0, + sizeof(*rtlpci->rx_ring[rxring_idx].desc) * + rtlpci->rxringcount); + + /* init every desc in this ring */ + rtlpci->rx_ring[rxring_idx].idx = 0; + for (i = 0; i < rtlpci->rxringcount; i++) { + entry = &rtlpci->rx_ring[rxring_idx].desc[i]; + if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, + rxring_idx, i)) + return -ENOMEM; + } + rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false, + HW_DESC_RXERO, (u8 *) & tmp_one); + } + return 0; +} + +static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, + unsigned int prio) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; + + /* free every desc in this ring */ + while (skb_queue_len(&ring->queue)) { + u8 *entry; + struct sk_buff *skb = __skb_dequeue(&ring->queue); + if (rtlpriv->use_new_trx_flow) + entry = (u8 *)(&ring->buffer_desc[ring->idx]); + else + entry = (u8 *)(&ring->desc[ring->idx]); + + pci_unmap_single(rtlpci->pdev, + le32_to_cpu(rtlpriv->cfg->ops->get_desc( + (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)), + skb->len, PCI_DMA_TODEVICE); + kfree_skb(skb); + ring->idx = (ring->idx + 1) % ring->entries; + } + + /* free dma of this ring */ + pci_free_consistent(rtlpci->pdev, + sizeof(*ring->desc) * ring->entries, + ring->desc, ring->dma); + ring->desc = NULL; + if (rtlpriv->use_new_trx_flow) { + pci_free_consistent(rtlpci->pdev, + sizeof(*ring->buffer_desc) * ring->entries, + ring->buffer_desc, ring->buffer_desc_dma); + ring->buffer_desc = NULL; + } +} + +static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + int i; + + /* free every desc in this ring */ + for (i = 0; i < rtlpci->rxringcount; i++) { + struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i]; + if (!skb) + continue; + + pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb), + rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); + kfree_skb(skb); + } + + /* free dma of this ring */ + if (rtlpriv->use_new_trx_flow) { + pci_free_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx]. + buffer_desc) * rtlpci->rxringcount, + rtlpci->rx_ring[rxring_idx].buffer_desc, + rtlpci->rx_ring[rxring_idx].dma); + rtlpci->rx_ring[rxring_idx].buffer_desc = NULL; + } else { + pci_free_consistent(rtlpci->pdev, + sizeof(*rtlpci->rx_ring[rxring_idx].desc) * + rtlpci->rxringcount, + rtlpci->rx_ring[rxring_idx].desc, + rtlpci->rx_ring[rxring_idx].dma); + rtlpci->rx_ring[rxring_idx].desc = NULL; + } +} + +static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + int ret; + int i, rxring_idx; + + /* rxring_idx 0:RX_MPDU_QUEUE + * rxring_idx 1:RX_CMD_QUEUE */ + for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) { + ret = _rtl_pci_init_rx_ring(hw, rxring_idx); + if (ret) + return ret; + } + + for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { + ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]); + if (ret) + goto err_free_rings; + } + + return 0; + +err_free_rings: + for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) + _rtl_pci_free_rx_ring(hw, rxring_idx); + + for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) + if (rtlpci->tx_ring[i].desc || + rtlpci->tx_ring[i].buffer_desc) + _rtl_pci_free_tx_ring(hw, i); + + return 1; +} + +static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw) +{ + u32 i, rxring_idx; + + /*free rx rings */ + for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) + _rtl_pci_free_rx_ring(hw, rxring_idx); + + /*free tx rings */ + for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) + _rtl_pci_free_tx_ring(hw, i); + + return 0; +} + +int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + int i, rxring_idx; + unsigned long flags; + u8 tmp_one = 1; + /* rxring_idx 0:RX_MPDU_QUEUE */ + /* rxring_idx 1:RX_CMD_QUEUE */ + for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) { + /* force the rx_ring[RX_MPDU_QUEUE/ + * RX_CMD_QUEUE].idx to the first one */ + /*new trx flow, do nothing*/ + if ((rtlpriv->use_new_trx_flow == false) && + rtlpci->rx_ring[rxring_idx].desc) { + struct rtl_rx_desc *entry = NULL; + + for (i = 0; i < rtlpci->rxringcount; i++) { + entry = &rtlpci->rx_ring[rxring_idx].desc[i]; + rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, + false, + HW_DESC_RXOWN, + (u8 *) & tmp_one); + } + } + rtlpci->rx_ring[rxring_idx].idx = 0; } + + /* after reset, release previous pending packet, + * and force the tx idx to the first one */ + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) { + if (rtlpci->tx_ring[i].desc || + rtlpci->tx_ring[i].buffer_desc) { + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i]; + + while (skb_queue_len(&ring->queue)) { + u8 *entry; + struct sk_buff *skb = + __skb_dequeue(&ring->queue); + if (rtlpriv->use_new_trx_flow) + entry = (u8 *)(&ring->buffer_desc + [ring->idx]); + else + entry = (u8 *)(&ring->desc[ring->idx]); + + pci_unmap_single(rtlpci->pdev, + le32_to_cpu(rtlpriv->cfg->ops->get_desc( + (u8 *)entry, true, + HW_DESC_TXBUFF_ADDR)), + skb->len, PCI_DMA_TODEVICE); + kfree_skb(skb); + ring->idx = (ring->idx + 1) % ring->entries; + } + ring->idx = 0; + } + } + + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + return 0; +} + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) +static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, + struct sk_buff *skb) +#else +/**/ +static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb) +/**/ +#endif +/**/ +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = info->control.sta; +#endif +/**/ + struct rtl_sta_info *sta_entry = NULL; + u8 tid = rtl_get_tid(skb); + u16 fc = rtl_get_fc(skb); + + if(!sta) + return false; + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + + if (!rtlpriv->rtlhal.b_earlymode_enable) + return false; + if (ieee80211_is_nullfunc(fc)) + return false; + if (ieee80211_is_qos_nullfunc(fc)) + return false; + if (ieee80211_is_pspoll(fc)) { + return false; + } + + if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL) + return false; + if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE) + return false; + if (tid > 7) + return false; + /* maybe every tid should be checked */ + if (!rtlpriv->link_info.higher_busytxtraffic[tid]) + return false; + + spin_lock_bh(&rtlpriv->locks.waitq_lock); + skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb); + spin_unlock_bh(&rtlpriv->locks.waitq_lock); + + return true; +} + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) +int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct rtl_tcb_desc *ptcb_desc) +#else +/**/ +static int rtl_pci_tx(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct rtl_tcb_desc *ptcb_desc) +/**/ +#endif +/**/ +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_sta_info *sta_entry = NULL; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + struct ieee80211_sta *sta = info->control.sta; +#endif +/**/ + struct rtl8192_tx_ring *ring; + struct rtl_tx_desc *pdesc; + struct rtl_tx_buffer_desc *ptx_bd_desc = NULL; + u16 idx; + u8 own; + u8 temp_one = 1; + u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb); + unsigned long flags; + struct ieee80211_hdr *hdr = rtl_get_hdr(skb); + u16 fc = rtl_get_fc(skb); + u8 *pda_addr = hdr->addr1; + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + /*ssn */ + u8 tid = 0; + u16 seq_number = 0; + + + if (ieee80211_is_mgmt(fc)) + rtl_tx_mgmt_proc(hw, skb); + + if (rtlpriv->psc.sw_ps_enabled) { + if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) && + !ieee80211_has_pm(fc)) + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); + } + + rtl_action_proc(hw, skb, true); + + if (is_multicast_ether_addr(pda_addr)) + rtlpriv->stats.txbytesmulticast += skb->len; + else if (is_broadcast_ether_addr(pda_addr)) + rtlpriv->stats.txbytesbroadcast += skb->len; + else + rtlpriv->stats.txbytesunicast += skb->len; + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + ring = &rtlpci->tx_ring[hw_queue]; + if (hw_queue != BEACON_QUEUE) { + if (rtlpriv->use_new_trx_flow) + idx = ring->cur_tx_wp; + else + idx = (ring->idx + skb_queue_len(&ring->queue)) % + ring->entries; + } else { + idx = 0; + } + + pdesc = &ring->desc[idx]; + + if (rtlpriv->use_new_trx_flow) { + ptx_bd_desc = &ring->buffer_desc[idx]; + } else { + own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, + true, HW_DESC_OWN); + + if ((own == 1) && (hw_queue != BEACON_QUEUE)) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("No more TX desc@%d, ring->idx = %d," + "idx = %d, skb_queue_len = 0x%d\n", + hw_queue, ring->idx, idx, + skb_queue_len(&ring->queue))); + + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, + flags); + return skb->len; + } + } + + if (ieee80211_is_data_qos(fc)) { + tid = rtl_get_tid(skb); + if (sta) { + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + seq_number = (le16_to_cpu(hdr->seq_ctrl) & + IEEE80211_SCTL_SEQ) >> 4; + seq_number += 1; + + if (!ieee80211_has_morefrags(hdr->frame_control)) + sta_entry->tids[tid].seq_number = seq_number; + } + } + + if (ieee80211_is_data(fc)) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, + (u8 *)ptx_bd_desc, info, skb, + hw_queue, ptcb_desc); +#else +/**/ + rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc, + (u8 *)ptx_bd_desc, info, sta, skb, + hw_queue, ptcb_desc); +/**/ +#endif +/**/ + + __skb_queue_tail(&ring->queue, skb); + if (rtlpriv->use_new_trx_flow) { + rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true, + HW_DESC_OWN, (u8 *) & hw_queue); + } else { + rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true, + HW_DESC_OWN, (u8 *) & temp_one); + } + + if ((ring->entries - skb_queue_len(&ring->queue)) < 2 && + hw_queue != BEACON_QUEUE) { + + RT_TRACE(COMP_ERR, DBG_LOUD, + ("less desc left, stop skb_queue@%d, " + "ring->idx = %d," + "idx = %d, skb_queue_len = 0x%d\n", + hw_queue, ring->idx, idx, + skb_queue_len(&ring->queue))); + + ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); + } + + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + rtlpriv->cfg->ops->tx_polling(hw, hw_queue); + + return 0; +} +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) +static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop) +#else +static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop) +#endif +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u16 i = 0; + int queue_id; + struct rtl8192_tx_ring *ring; + + if (mac->skip_scan) + return; + + for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) { + u32 queue_len; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + if (((queues >> queue_id) & 0x1) == 0) { + queue_id--; + continue; + } +#endif + ring = &pcipriv->dev.tx_ring[queue_id]; + queue_len = skb_queue_len(&ring->queue); + if (queue_len == 0 || queue_id == BEACON_QUEUE || + queue_id == TXCMD_QUEUE) { + queue_id--; + continue; + } else { + msleep(5); + i++; + } + + /* we just wait 1s for all queues */ + if (rtlpriv->psc.rfpwr_state == ERFOFF || + is_hal_stop(rtlhal) || i >= 200) + return; + } +} + +void rtl_pci_deinit(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + _rtl_pci_deinit_trx_ring(hw); + + synchronize_irq(rtlpci->pdev->irq); + tasklet_kill(&rtlpriv->works.irq_tasklet); + + flush_workqueue(rtlpriv->works.rtl_wq); + destroy_workqueue(rtlpriv->works.rtl_wq); + +} + +int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int err; + + _rtl_pci_init_struct(hw, pdev); + + err = _rtl_pci_init_trx_ring(hw); + if (err) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("tx ring initialization failed")); + return err; + } + + return 1; +} + +int rtl_pci_start(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + int err = 0; + RT_TRACE(COMP_INIT, DBG_DMESG, (" rtl_pci_start \n")); + rtl_pci_reset_trx_ring(hw); + + rtlpriv->rtlhal.driver_is_goingto_unload = false; + err = rtlpriv->cfg->ops->hw_init(hw); + if (err) { + RT_TRACE(COMP_INIT, DBG_DMESG, + ("Failed to config hardware err %x!\n",err)); + return err; + } + + rtlpriv->cfg->ops->enable_interrupt(hw); + RT_TRACE(COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n")); + + rtl_init_rx_config(hw); + + /*should after adapter start and interrupt enable. */ + set_hal_start(rtlhal); + + RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + + rtlpriv->rtlhal.up_first_time = false; + + RT_TRACE(COMP_INIT, DBG_DMESG, ("rtl_pci_start OK\n")); + return 0; +} + +void rtl_pci_stop(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 RFInProgressTimeOut = 0; + + /* + *should before disable interrrupt&adapter + *and will do it immediately. + */ + set_hal_stop(rtlhal); + + rtlpriv->cfg->ops->disable_interrupt(hw); + + spin_lock(&rtlpriv->locks.rf_ps_lock); + while (ppsc->rfchange_inprogress) { + spin_unlock(&rtlpriv->locks.rf_ps_lock); + if (RFInProgressTimeOut > 100) { + spin_lock(&rtlpriv->locks.rf_ps_lock); + break; + } + mdelay(1); + RFInProgressTimeOut++; + spin_lock(&rtlpriv->locks.rf_ps_lock); + } + ppsc->rfchange_inprogress = true; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + + rtlpriv->rtlhal.driver_is_goingto_unload = true; + rtlpriv->cfg->ops->hw_disable(hw); + rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); + + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + + rtl_pci_enable_aspm(hw); +} + +static bool _rtl_pci_find_adapter(struct pci_dev *pdev, + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct pci_dev *bridge_pdev = pdev->bus->self; + u16 venderid; + u16 deviceid; + u8 revisionid; + u16 irqline; + u8 tmp; + + venderid = pdev->vendor; + deviceid = pdev->device; + pci_read_config_byte(pdev, 0x8, &revisionid); + pci_read_config_word(pdev, 0x3C, &irqline); + + if (deviceid == RTL_PCI_8192_DID || + deviceid == RTL_PCI_0044_DID || + deviceid == RTL_PCI_0047_DID || + deviceid == RTL_PCI_8192SE_DID || + deviceid == RTL_PCI_8174_DID || + deviceid == RTL_PCI_8173_DID || + deviceid == RTL_PCI_8172_DID || + deviceid == RTL_PCI_8171_DID) { + switch (revisionid) { + case RTL_PCI_REVISION_ID_8192PCIE: + RT_TRACE(COMP_INIT, DBG_DMESG, + ("8192E is found but not supported now-" + "vid/did=%x/%x\n", venderid, deviceid)); + rtlhal->hw_type = HARDWARE_TYPE_RTL8192E; + return false; + break; + case RTL_PCI_REVISION_ID_8192SE: + RT_TRACE(COMP_INIT, DBG_DMESG, + ("8192SE is found - " + "vid/did=%x/%x\n", venderid, deviceid)); + rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE; + break; + default: + RT_TRACE(COMP_ERR, DBG_WARNING, + ("Err: Unknown device - " + "vid/did=%x/%x\n", venderid, deviceid)); + rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE; + break; + + } + }else if(deviceid == RTL_PCI_8723AE_DID) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE; + RT_TRACE(COMP_INIT, DBG_DMESG, + ("8723AE PCI-E is found - " + "vid/did=%x/%x\n", venderid, deviceid)); + } else if (deviceid == RTL_PCI_8192CET_DID || + deviceid == RTL_PCI_8192CE_DID || + deviceid == RTL_PCI_8191CE_DID || + deviceid == RTL_PCI_8188CE_DID) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE; + RT_TRACE(COMP_INIT, DBG_DMESG, + ("8192C PCI-E is found - " + "vid/did=%x/%x\n", venderid, deviceid)); + } else if (deviceid == RTL_PCI_8192DE_DID || + deviceid == RTL_PCI_8192DE_DID2) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE; + RT_TRACE(COMP_INIT, DBG_DMESG, + ("8192D PCI-E is found - " + "vid/did=%x/%x\n", venderid, deviceid)); + }else if(deviceid == RTL_PCI_8188EE_DID){ + rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE; + RT_TRACE(COMP_INIT,DBG_LOUD, + ("Find adapter, Hardware type is 8188EE\n")); + }else if (deviceid == RTL_PCI_8723BE_DID){ + rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE; + RT_TRACE(COMP_INIT,DBG_LOUD, + ("Find adapter, Hardware type is 8723BE\n")); + }else if (deviceid == RTL_PCI_8192EE_DID){ + rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE; + RT_TRACE(COMP_INIT,DBG_LOUD, + ("Find adapter, Hardware type is 8192EE\n")); + }else if (deviceid == RTL_PCI_8821AE_DID) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE; + RT_TRACE(COMP_INIT,DBG_LOUD, + ("Find adapter, Hardware type is 8821AE\n")); + }else if (deviceid == RTL_PCI_8812AE_DID) { + rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE; + RT_TRACE(COMP_INIT,DBG_LOUD, + ("Find adapter, Hardware type is 8812AE\n")); + }else { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("Err: Unknown device -" + " vid/did=%x/%x\n", venderid, deviceid)); + + rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE; + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) { + if (revisionid == 0 || revisionid == 1) { + if (revisionid == 0) { + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Find 92DE MAC0.\n")); + rtlhal->interfaceindex = 0; + } else if (revisionid == 1) { + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Find 92DE MAC1.\n")); + rtlhal->interfaceindex = 1; + } + } else { + RT_TRACE(COMP_INIT, DBG_LOUD, ("Unknown device - " + "VendorID/DeviceID=%x/%x, Revision=%x\n", + venderid, deviceid, revisionid)); + rtlhal->interfaceindex = 0; + } + } + + /* 92ee use new trx flow */ + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE) + rtlpriv->use_new_trx_flow = true; + else + rtlpriv->use_new_trx_flow = false; + + /*find bus info */ + pcipriv->ndis_adapter.busnumber = pdev->bus->number; + pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); + pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn); + + /*find bridge info */ + pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN; + /* some ARM have no bridge_pdev and will crash here + * so we should check if bridge_pdev is NULL */ + if (bridge_pdev) { + pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor; + for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) { + if (bridge_pdev->vendor == pcibridge_vendors[tmp]) { + pcipriv->ndis_adapter.pcibridge_vendor = tmp; + RT_TRACE(COMP_INIT, DBG_DMESG, + ("Pci Bridge Vendor is found index: %d\n", + tmp)); + break; + } + } + } + + if (pcipriv->ndis_adapter.pcibridge_vendor != + PCI_BRIDGE_VENDOR_UNKNOWN) { + pcipriv->ndis_adapter.pcibridge_busnum = + bridge_pdev->bus->number; + pcipriv->ndis_adapter.pcibridge_devnum = + PCI_SLOT(bridge_pdev->devfn); + pcipriv->ndis_adapter.pcibridge_funcnum = + PCI_FUNC(bridge_pdev->devfn); + pcipriv->ndis_adapter.pcicfg_addrport = + (pcipriv->ndis_adapter.pcibridge_busnum << 16) | + (pcipriv->ndis_adapter.pcibridge_devnum << 11) | + (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31); +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +/**/ + pcipriv->ndis_adapter.pcibridge_pciehdr_offset = + pci_pcie_cap(bridge_pdev); +/**/ +#else + pcipriv->ndis_adapter.pcibridge_pciehdr_offset = + _rtl_pci_get_pciehdr_offset(hw); +#endif +/**/ + pcipriv->ndis_adapter.num4bytes = + (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4; + + rtl_pci_get_linkcontrol_field(hw); + + if (pcipriv->ndis_adapter.pcibridge_vendor == + PCI_BRIDGE_VENDOR_AMD) { + pcipriv->ndis_adapter.amd_l1_patch = + rtl_pci_get_amd_l1_patch(hw); + } + } + + RT_TRACE(COMP_INIT, DBG_DMESG, + ("pcidev busnumber:devnumber:funcnumber:" + "vendor:link_ctl %d:%d:%d:%x:%x\n", + pcipriv->ndis_adapter.busnumber, + pcipriv->ndis_adapter.devnumber, + pcipriv->ndis_adapter.funcnumber, + pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg)); + + RT_TRACE(COMP_INIT, DBG_DMESG, + ("pci_bridge busnumber:devnumber:funcnumber:vendor:" + "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n", + pcipriv->ndis_adapter.pcibridge_busnum, + pcipriv->ndis_adapter.pcibridge_devnum, + pcipriv->ndis_adapter.pcibridge_funcnum, + pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor], + pcipriv->ndis_adapter.pcibridge_pciehdr_offset, + pcipriv->ndis_adapter.pcibridge_linkctrlreg, + pcipriv->ndis_adapter.amd_l1_patch)); + + rtl_pci_parse_configuration(pdev, hw); + list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list); + return true; +} + +static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); + int ret; + ret = pci_enable_msi(rtlpci->pdev); + if (ret < 0) + return ret; + + ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt, + IRQF_SHARED, KBUILD_MODNAME, hw); + if (ret < 0) { + pci_disable_msi(rtlpci->pdev); + return ret; + } + + rtlpci->using_msi = true; + + RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, ("MSI Interrupt Mode!\n")); + return 0; +} + +static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); + int ret; + + ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt, + IRQF_SHARED, KBUILD_MODNAME, hw); + if (ret < 0) { + return ret; + } + + rtlpci->using_msi = false; + RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, + ("Pin-based Interrupt Mode!\n")); + return 0; +} + +static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); + int ret; + if (rtlpci->msi_support == true) { + ret = rtl_pci_intr_mode_msi(hw); + if (ret < 0) + ret = rtl_pci_intr_mode_legacy(hw); + } else { + ret = rtl_pci_intr_mode_legacy(hw); + } + return ret; +} + +/* this is used for other modules get + * hw pointer in rtl_pci_get_hw_pointer */ +struct ieee80211_hw *hw_export = NULL; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +int rtl_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) + +#else +int __devinit rtl_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +#endif +{ + struct ieee80211_hw *hw = NULL; + + struct rtl_priv *rtlpriv = NULL; + struct rtl_pci_priv *pcipriv = NULL; + struct rtl_pci *rtlpci; + unsigned long pmem_start, pmem_len, pmem_flags; + int err; + + + err = pci_enable_device(pdev); + if (err) { + RT_ASSERT(false, + ("%s : Cannot enable new PCI device\n", + pci_name(pdev))); + return err; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + RT_ASSERT(false, ("Unable to obtain 32bit DMA " + "for consistent allocations\n")); + pci_disable_device(pdev); + return -ENOMEM; + } + } + + pci_set_master(pdev); + + hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) + + sizeof(struct rtl_priv), &rtl_ops); + if (!hw) { + RT_ASSERT(false, + ("%s : ieee80211 alloc failed\n", pci_name(pdev))); + err = -ENOMEM; + goto fail1; + } + hw_export = hw; + + SET_IEEE80211_DEV(hw, &pdev->dev); + pci_set_drvdata(pdev, hw); + + rtlpriv = hw->priv; + pcipriv = (void *)rtlpriv->priv; + pcipriv->dev.pdev = pdev; + + /* init cfg & intf_ops */ + rtlpriv->rtlhal.interface = INTF_PCI; + rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); + rtlpriv->intf_ops = &rtl_pci_ops; + rtlpriv->glb_var = &global_var; + + /* + *init dbgp flags before all + *other functions, because we will + *use it in other funtions like + *RT_TRACE/RT_PRINT/RTL_PRINT_DATA + *you can not use these macro + *before this + */ + rtl_dbgp_flag_init(hw); + + /* MEM map */ + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + RT_ASSERT(false, ("Can't obtain PCI resources\n")); + return err; + } + + pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id); + pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id); + pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id); + + /*shared mem start */ + rtlpriv->io.pci_mem_start = + (unsigned long)pci_iomap(pdev, + rtlpriv->cfg->bar_id, pmem_len); + if (rtlpriv->io.pci_mem_start == 0) { + RT_ASSERT(false, ("Can't map PCI mem\n")); + goto fail2; + } + + RT_TRACE(COMP_INIT, DBG_DMESG, + ("mem mapped space: start: 0x%08lx len:%08lx " + "flags:%08lx, after map:0x%08lx\n", + pmem_start, pmem_len, pmem_flags, + rtlpriv->io.pci_mem_start)); + + /* Disable Clk Request */ + pci_write_config_byte(pdev, 0x81, 0); + /* leave D3 mode */ + pci_write_config_byte(pdev, 0x44, 0); + pci_write_config_byte(pdev, 0x04, 0x06); + pci_write_config_byte(pdev, 0x04, 0x07); + + /* find adapter */ + /* if chip not support, will return false */ + if(!_rtl_pci_find_adapter(pdev, hw)) + goto fail3; + + /* Init IO handler */ + _rtl_pci_io_handler_init(&pdev->dev, hw); + + /*like read eeprom and so on */ + rtlpriv->cfg->ops->read_eeprom_info(hw); + + if (rtlpriv->cfg->ops->init_sw_vars(hw)) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("Can't init_sw_vars.\n")); + goto fail3; + } + + rtlpriv->cfg->ops->init_sw_leds(hw); + + /*aspm */ + rtl_pci_init_aspm(hw); + + /* Init mac80211 sw */ + err = rtl_init_core(hw); + if (err) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Can't allocate sw for mac80211.\n")); + goto fail3; + } + + /* Init PCI sw */ + err = !rtl_pci_init(hw, pdev); + if (err) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("Failed to init PCI.\n")); + goto fail3; + } + + err = ieee80211_register_hw(hw); + if (err) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Can't register mac80211 hw.\n")); + goto fail3; + } else { + rtlpriv->mac80211.mac80211_registered = 1; + } + /* the wiphy must have been registed to + * cfg80211 prior to regulatory_hint */ + if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) { + RT_TRACE(COMP_ERR, DBG_WARNING, ("regulatory_hint fail\n")); + } + + err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group); + if (err) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("failed to create sysfs device attributes\n")); + goto fail3; + } + /* add for prov */ + rtl_proc_add_one(hw); + + /*init rfkill */ + rtl_init_rfkill(hw); + + rtlpci = rtl_pcidev(pcipriv); + + err = rtl_pci_intr_mode_decide(hw); + if (err) { + RT_TRACE(COMP_INIT, DBG_DMESG, + ("%s: failed to register IRQ handler\n", + wiphy_name(hw->wiphy))); + goto fail3; + } else { + rtlpci->irq_alloc = 1; + } + + set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); + return 0; + +fail3: + pci_set_drvdata(pdev, NULL); + rtl_deinit_core(hw); + ieee80211_free_hw(hw); + + if (rtlpriv->io.pci_mem_start != 0) + pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start); + +fail2: + pci_release_regions(pdev); + +fail1: + + pci_disable_device(pdev); + + return -ENODEV; + +} +//EXPORT_SYMBOL(rtl_pci_probe); + +struct ieee80211_hw *rtl_pci_get_hw_pointer(void) +{ + return hw_export; +} +//EXPORT_SYMBOL(rtl_pci_get_hw_pointer); + +void rtl_pci_disconnect(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); + struct rtl_mac *rtlmac = rtl_mac(rtlpriv); + + clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); + + sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group); + + /* add for prov */ + rtl_proc_remove_one(hw); + + + /*ieee80211_unregister_hw will call ops_stop */ + if (rtlmac->mac80211_registered == 1) { + ieee80211_unregister_hw(hw); + rtlmac->mac80211_registered = 0; + } else { + rtl_deinit_deferred_work(hw); + rtlpriv->intf_ops->adapter_stop(hw); + } + + /*deinit rfkill */ + rtl_deinit_rfkill(hw); + + rtl_pci_deinit(hw); + rtl_deinit_core(hw); + rtlpriv->cfg->ops->deinit_sw_vars(hw); + + if (rtlpci->irq_alloc) { + synchronize_irq(rtlpci->pdev->irq); + free_irq(rtlpci->pdev->irq, hw); + rtlpci->irq_alloc = 0; + } + + if (rtlpci->using_msi == true) + pci_disable_msi(rtlpci->pdev); + + list_del(&rtlpriv->list); + if (rtlpriv->io.pci_mem_start != 0) { + pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start); + pci_release_regions(pdev); + } + + pci_disable_device(pdev); + + rtl_pci_disable_aspm(hw); + + pci_set_drvdata(pdev, NULL); + + ieee80211_free_hw(hw); +} +//EXPORT_SYMBOL(rtl_pci_disconnect); + +/*************************************** +kernel pci power state define: +PCI_D0 ((pci_power_t __force) 0) +PCI_D1 ((pci_power_t __force) 1) +PCI_D2 ((pci_power_t __force) 2) +PCI_D3hot ((pci_power_t __force) 3) +PCI_D3cold ((pci_power_t __force) 4) +PCI_UNKNOWN ((pci_power_t __force) 5) + +This function is called when system +goes into suspend state mac80211 will +call rtl_mac_stop() from the mac80211 +suspend function first, So there is +no need to call hw_disable here. +****************************************/ +int rtl_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->cfg->ops->hw_suspend(hw); + rtl_deinit_rfkill(hw); + + return 0; +} +//EXPORT_SYMBOL(rtl_pci_suspend); + +int rtl_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->cfg->ops->hw_resume(hw); + rtl_init_rfkill(hw); + + return 0; +} +//EXPORT_SYMBOL(rtl_pci_resume); + +struct rtl_intf_ops rtl_pci_ops = { + .read_efuse_byte = read_efuse_byte, + .adapter_start = rtl_pci_start, + .adapter_stop = rtl_pci_stop, + .check_buddy_priv = rtl_pci_check_buddy_priv, + .adapter_tx = rtl_pci_tx, + .flush = rtl_pci_flush, + .reset_trx_ring = rtl_pci_reset_trx_ring, + .waitq_insert = rtl_pci_tx_chk_waitq_insert, + + .disable_aspm = rtl_pci_disable_aspm, + .enable_aspm = rtl_pci_enable_aspm, +}; --- linux-3.13.0.orig/drivers/staging/rtl8821ae/pci.h +++ linux-3.13.0/drivers/staging/rtl8821ae/pci.h @@ -0,0 +1,353 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_PCI_H__ +#define __RTL_PCI_H__ + +#include +/* +1: MSDU packet queue, +2: Rx Command Queue +*/ +#define RTL_PCI_RX_MPDU_QUEUE 0 +#define RTL_PCI_RX_CMD_QUEUE 1 +#define RTL_PCI_MAX_RX_QUEUE 2 + +#define RTL_PCI_MAX_RX_COUNT 512//64 +#define RTL_PCI_MAX_TX_QUEUE_COUNT 9 + +#define RT_TXDESC_NUM 128 +#define TX_DESC_NUM_92E 512 +#define RT_TXDESC_NUM_BE_QUEUE 256 + +#define BK_QUEUE 0 +#define BE_QUEUE 1 +#define VI_QUEUE 2 +#define VO_QUEUE 3 +#define BEACON_QUEUE 4 +#define TXCMD_QUEUE 5 +#define MGNT_QUEUE 6 +#define HIGH_QUEUE 7 +#define HCCA_QUEUE 8 + +#define RTL_PCI_DEVICE(vend, dev, cfg) \ + .vendor = (vend), \ + .device = (dev), \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID,\ + .driver_data = (kernel_ulong_t)&(cfg) + +#define INTEL_VENDOR_ID 0x8086 +#define SIS_VENDOR_ID 0x1039 +#define ATI_VENDOR_ID 0x1002 +#define ATI_DEVICE_ID 0x7914 +#define AMD_VENDOR_ID 0x1022 + +#define PCI_MAX_BRIDGE_NUMBER 255 +#define PCI_MAX_DEVICES 32 +#define PCI_MAX_FUNCTION 8 + +#define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */ +#define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */ + +#define PCI_CLASS_BRIDGE_DEV 0x06 +#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04 +#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10 +#define PCI_CAP_ID_EXP 0x10 + +#define U1DONTCARE 0xFF +#define U2DONTCARE 0xFFFF +#define U4DONTCARE 0xFFFFFFFF + +#define RTL_PCI_8192_DID 0x8192 /*8192 PCI-E */ +#define RTL_PCI_8192SE_DID 0x8192 /*8192 SE */ +#define RTL_PCI_8174_DID 0x8174 /*8192 SE */ +#define RTL_PCI_8173_DID 0x8173 /*8191 SE Crab */ +#define RTL_PCI_8172_DID 0x8172 /*8191 SE RE */ +#define RTL_PCI_8171_DID 0x8171 /*8191 SE Unicron */ +#define RTL_PCI_0045_DID 0x0045 /*8190 PCI for Ceraga */ +#define RTL_PCI_0046_DID 0x0046 /*8190 Cardbus for Ceraga */ +#define RTL_PCI_0044_DID 0x0044 /*8192e PCIE for Ceraga */ +#define RTL_PCI_0047_DID 0x0047 /*8192e Express Card for Ceraga */ +#define RTL_PCI_700F_DID 0x700F +#define RTL_PCI_701F_DID 0x701F +#define RTL_PCI_DLINK_DID 0x3304 +#define RTL_PCI_8723AE_DID 0x8723 /*8723e */ +#define RTL_PCI_8192CET_DID 0x8191 /*8192ce */ +#define RTL_PCI_8192CE_DID 0x8178 /*8192ce */ +#define RTL_PCI_8191CE_DID 0x8177 /*8192ce */ +#define RTL_PCI_8188CE_DID 0x8176 /*8192ce */ +#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */ +#define RTL_PCI_8192DE_DID 0x8193 /*8192de */ +#define RTL_PCI_8192DE_DID2 0x002B /*92DE*/ +#define RTL_PCI_8188EE_DID 0x8179 /*8188ee*/ +#define RTL_PCI_8723BE_DID 0xB723 /*8723be*/ +#define RTL_PCI_8192EE_DID 0x818B /*8192ee*/ +#define RTL_PCI_8821AE_DID 0x8821 /*8821ae*/ +#define RTL_PCI_8812AE_DID 0x8812 /*8812ae*/ + +/*8192 support 16 pages of IO registers*/ +#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000 +#define RTL_MEM_MAPPED_IO_RANGE_8192PCIE 0x4000 +#define RTL_MEM_MAPPED_IO_RANGE_8192SE 0x4000 +#define RTL_MEM_MAPPED_IO_RANGE_8192CE 0x4000 +#define RTL_MEM_MAPPED_IO_RANGE_8192DE 0x4000 + +#define RTL_PCI_REVISION_ID_8190PCI 0x00 +#define RTL_PCI_REVISION_ID_8192PCIE 0x01 +#define RTL_PCI_REVISION_ID_8192SE 0x10 +#define RTL_PCI_REVISION_ID_8192CE 0x1 +#define RTL_PCI_REVISION_ID_8192DE 0x0 + +#define PCI_VENDOR_ID_REALTEK 0x10ec + +#define RTL_DEFAULT_HARDWARE_TYPE HARDWARE_TYPE_RTL8192CE + +enum pci_bridge_vendor { + PCI_BRIDGE_VENDOR_INTEL = 0x0, /*0b'0000,0001 */ + PCI_BRIDGE_VENDOR_ATI, /*0b'0000,0010*/ + PCI_BRIDGE_VENDOR_AMD, /*0b'0000,0100*/ + PCI_BRIDGE_VENDOR_SIS, /*0b'0000,1000*/ + PCI_BRIDGE_VENDOR_UNKNOWN, /*0b'0100,0000*/ + PCI_BRIDGE_VENDOR_MAX, +}; + +struct rtl_pci_capabilities_header { + u8 capability_id; + u8 next; +}; + +/* In new TRX flow, Buffer_desc is new concept + * But TX wifi info == TX descriptor in old flow + * RX wifi info == RX descriptor in old flow */ +struct rtl_tx_buffer_desc { +#if (RTL8192EE_SEG_NUM == 2) + u32 dword[2*(DMA_IS_64BIT + 1)*8]; //seg = 8 +#elif (RTL8192EE_SEG_NUM == 1) + u32 dword[2*(DMA_IS_64BIT + 1)*4]; //seg = 4 +#elif (RTL8192EE_SEG_NUM == 0) + u32 dword[2*(DMA_IS_64BIT + 1)*2]; //seg = 2 +#endif +} __packed; + +struct rtl_tx_desc {/*old: tx desc*//*new: tx wifi info*/ + u32 dword[16]; +} __packed; + +struct rtl_rx_buffer_desc { /*rx buffer desc*/ + u32 dword[2]; +} __packed; + +struct rtl_rx_desc { /*old: rx desc*//*new: rx wifi info*/ + u32 dword[8]; +} __packed; + +struct rtl_tx_cmd_desc { + u32 dword[16]; +} __packed; + +struct rtl8192_tx_ring { + struct rtl_tx_desc *desc; /*tx desc / tx wifi info*/ + dma_addr_t dma; /*tx desc dma memory / tx wifi info dma memory*/ + unsigned int idx; + unsigned int entries; + struct sk_buff_head queue; + /*add for new trx flow*/ + struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/ + dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/ + u16 avl_desc; /* available_desc_to_write */ + u16 cur_tx_wp; /* current_tx_write_point */ + u16 cur_tx_rp; /* current_tx_read_point */ +}; + +struct rtl8192_rx_ring { + struct rtl_rx_desc *desc;/*for old trx flow, not uesd in new trx*/ + /*dma matches either 'desc' or 'buffer_desc'*/ + dma_addr_t dma; + unsigned int idx; + struct sk_buff *rx_buf[RTL_PCI_MAX_RX_COUNT]; + /*add for new trx flow*/ + struct rtl_rx_buffer_desc *buffer_desc; /*rx buffer descriptor*/ + u16 next_rx_rp; /* next_rx_read_point */ +}; + +struct rtl_pci { + struct pci_dev *pdev; + bool irq_enabled; + + /*Tx */ + struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT]; + int txringcount[RTL_PCI_MAX_TX_QUEUE_COUNT]; + u32 transmit_config; + + /*Rx */ + struct rtl8192_rx_ring rx_ring[RTL_PCI_MAX_RX_QUEUE]; + int rxringcount; + u16 rxbuffersize; + u32 receive_config; + + /*irq */ + u8 irq_alloc; + u32 irq_mask[2]; + u32 sys_irq_mask; + + /*Bcn control register setting */ + u32 reg_bcn_ctrl_val; + + /*ASPM*/ u8 const_pci_aspm; + u8 const_amdpci_aspm; + u8 const_hwsw_rfoff_d3; + u8 const_support_pciaspm; + /*pci-e bridge */ + u8 const_hostpci_aspm_setting; + /*pci-e device */ + u8 const_devicepci_aspm_setting; + /*If it supports ASPM, Offset[560h] = 0x40, + otherwise Offset[560h] = 0x00. */ + bool b_support_aspm; + bool b_support_backdoor; + + /*QOS & EDCA */ + enum acm_method acm_method; + + u16 shortretry_limit; + u16 longretry_limit; + + /* MSI support */ + bool msi_support; + bool using_msi; +}; + +struct mp_adapter { + u8 linkctrl_reg; + + u8 busnumber; + u8 devnumber; + u8 funcnumber; + + u8 pcibridge_busnum; + u8 pcibridge_devnum; + u8 pcibridge_funcnum; + + u8 pcibridge_vendor; + u16 pcibridge_vendorid; + u16 pcibridge_deviceid; + + u32 pcicfg_addrport; + u8 num4bytes; + + u8 pcibridge_pciehdr_offset; + u8 pcibridge_linkctrlreg; + + bool amd_l1_patch; +}; + +struct rtl_pci_priv { + struct rtl_pci dev; + struct mp_adapter ndis_adapter; + struct rtl_led_ctl ledctl; + struct bt_coexist_info btcoexist; +}; + +#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv)) +#define rtl_pcidev(pcipriv) (&((pcipriv)->dev)) + +int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw); + +extern struct rtl_intf_ops rtl_pci_ops; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +int rtl_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id); +#else +int __devinit rtl_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id); +#endif +void rtl_pci_disconnect(struct pci_dev *pdev); +int rtl_pci_suspend(struct device *dev); +int rtl_pci_resume(struct device *dev); + +static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr) +{ + return 0xff & readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr); +} + +static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr) +{ + return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr); +} + +static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr) +{ + return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr); +} + +static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val) +{ + writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr); +} + +static inline void pci_write16_async(struct rtl_priv *rtlpriv, + u32 addr, u16 val) +{ + writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr); +} + +static inline void pci_write32_async(struct rtl_priv *rtlpriv, + u32 addr, u32 val) +{ + writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr); +} + +static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val) +{ + outl(val, port); +} + +static inline void rtl_pci_raw_write_port_uchar(u32 port, u8 val) +{ + outb(val, port); +} + +static inline void rtl_pci_raw_read_port_uchar(u32 port, u8 * pval) +{ + *pval = inb(port); +} + +static inline void rtl_pci_raw_read_port_ushort(u32 port, u16 * pval) +{ + *pval = inw(port); +} + +static inline void rtl_pci_raw_read_port_ulong(u32 port, u32 * pval) +{ + *pval = inl(port); +} + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/ps.c +++ linux-3.13.0/drivers/staging/rtl8821ae/ps.c @@ -0,0 +1,1025 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "wifi.h" +#include "base.h" +#include "ps.h" +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +#include +#endif +#include "btcoexist/rtl_btc.h" + +bool rtl_ps_enable_nic(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool init_status = true; + + /*<1> reset trx ring */ + if (rtlhal->interface == INTF_PCI) + rtlpriv->intf_ops->reset_trx_ring(hw); + + if (is_hal_stop(rtlhal)) + RT_TRACE(COMP_ERR, DBG_WARNING, ("Driver is already down!\n")); + + /*<2> Enable Adapter */ + rtlpriv->cfg->ops->hw_init(hw); + RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + /*init_status = false; */ + + /*<3> Enable Interrupt */ + rtlpriv->cfg->ops->enable_interrupt(hw); + + /* */ + rtl_watch_dog_timer_callback((unsigned long)hw); + + return init_status; +} +//EXPORT_SYMBOL(rtl_ps_enable_nic); + +bool rtl_ps_disable_nic(struct ieee80211_hw *hw) +{ + bool status = true; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /*<1> Stop all timer */ + rtl_deinit_deferred_work(hw); + + /*<2> Disable Interrupt */ + rtlpriv->cfg->ops->disable_interrupt(hw); + + /*<3> Disable Adapter */ + rtlpriv->cfg->ops->hw_disable(hw); + + return status; +} +//EXPORT_SYMBOL(rtl_ps_disable_nic); + +bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, + enum rf_pwrstate state_toset, + u32 changesource, bool protect_or_not) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + enum rf_pwrstate rtstate; + bool b_actionallowed = false; + u16 rfwait_cnt = 0; + + /*protect_or_not = true; */ + + if (protect_or_not) + goto no_protect; + + /* + *Only one thread can change + *the RF state at one time, and others + *should wait to be executed. + */ + while (true) { + spin_lock(&rtlpriv->locks.rf_ps_lock); + if (ppsc->rfchange_inprogress) { + spin_unlock(&rtlpriv->locks.rf_ps_lock); + + RT_TRACE(COMP_ERR, DBG_WARNING, + ("RF Change in progress!" + "Wait to set..state_toset(%d).\n", + state_toset)); + + /* Set RF after the previous action is done. */ + while (ppsc->rfchange_inprogress) { + rfwait_cnt++; + mdelay(1); + /* + *Wait too long, return false to avoid + *to be stuck here. + */ + if (rfwait_cnt > 100) + return false; + } + } else { + ppsc->rfchange_inprogress = true; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + break; + } + } + +no_protect: + rtstate = ppsc->rfpwr_state; + + switch (state_toset) { + case ERFON: + ppsc->rfoff_reason &= (~changesource); + + if ((changesource == RF_CHANGE_BY_HW) && + (ppsc->b_hwradiooff == true)) { + ppsc->b_hwradiooff = false; + } + + if (!ppsc->rfoff_reason) { + ppsc->rfoff_reason = 0; + b_actionallowed = true; + } + + break; + + case ERFOFF: + + if ((changesource == RF_CHANGE_BY_HW) && + (ppsc->b_hwradiooff == false)) { + ppsc->b_hwradiooff = true; + } + + ppsc->rfoff_reason |= changesource; + b_actionallowed = true; + break; + + case ERFSLEEP: + ppsc->rfoff_reason |= changesource; + b_actionallowed = true; + break; + + default: + RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case not process \n")); + break; + } + + if (b_actionallowed) + rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset); + + if (!protect_or_not) { + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } + + return b_actionallowed; +} +//EXPORT_SYMBOL(rtl_ps_set_rf_state); + +static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + ppsc->b_swrf_processing = true; + + if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) { + if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && + RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) && + rtlhal->interface == INTF_PCI) { + rtlpriv->intf_ops->disable_aspm(hw); + RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); + } + } + + if (rtlpriv->cfg->ops->get_btc_status()){ + rtlpriv->btcoexist.btc_ops->btc_ips_notify(rtlpriv, + ppsc->inactive_pwrstate); + } + rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate, + RF_CHANGE_BY_IPS, false); + + if (ppsc->inactive_pwrstate == ERFOFF && + rtlhal->interface == INTF_PCI) { + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && + !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { + rtlpriv->intf_ops->enable_aspm(hw); + RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); + } + } + + ppsc->b_swrf_processing = false; +} + +void rtl_ips_nic_off_wq_callback(void *data) +{ + struct rtl_works *rtlworks = + container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq); + struct ieee80211_hw *hw = rtlworks->hw; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + enum rf_pwrstate rtstate; + + if (mac->opmode != NL80211_IFTYPE_STATION) { + RT_TRACE(COMP_ERR, DBG_WARNING, ("not station return\n")); + return; + } + + if (mac->p2p_in_use) + return; + + if (mac->link_state > MAC80211_NOLINK) + return; + + if (is_hal_stop(rtlhal)) + return; + + if (rtlpriv->sec.being_setkey) + return; + + if(rtlpriv->cfg->ops->bt_turn_off_bt_coexist_before_enter_lps) + rtlpriv->cfg->ops->bt_turn_off_bt_coexist_before_enter_lps(hw); + + if (ppsc->b_inactiveps) { + rtstate = ppsc->rfpwr_state; + + /* + *Do not enter IPS in the following conditions: + *(1) RF is already OFF or Sleep + *(2) b_swrf_processing (indicates the IPS is still under going) + *(3) Connectted (only disconnected can trigger IPS) + *(4) IBSS (send Beacon) + *(5) AP mode (send Beacon) + *(6) monitor mode (rcv packet) + */ + + if (rtstate == ERFON && + !ppsc->b_swrf_processing && + (mac->link_state == MAC80211_NOLINK) && + !mac->act_scanning) { + RT_TRACE(COMP_RF, DBG_LOUD, + ("IPSEnter(): Turn off RF.\n")); + + ppsc->inactive_pwrstate = ERFOFF; + ppsc->b_in_powersavemode = true; + + /*rtl_pci_reset_trx_ring(hw); */ + _rtl_ps_inactive_ps(hw); + } + } +} + +void rtl_ips_nic_off(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* + *because when link with ap, mac80211 will ask us + *to disable nic quickly after scan before linking, + *this will cause link failed, so we delay 100ms here + */ + queue_delayed_work(rtlpriv->works.rtl_wq, + &rtlpriv->works.ips_nic_off_wq, MSECS(100)); +} + +/* NOTICE: any opmode should exc nic_on, or disable without + * nic_on may something wrong, like adhoc TP*/ +void rtl_ips_nic_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + enum rf_pwrstate rtstate; + + cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); + + spin_lock(&rtlpriv->locks.ips_lock); + if (ppsc->b_inactiveps) { + rtstate = ppsc->rfpwr_state; + + if (rtstate != ERFON && + !ppsc->b_swrf_processing && + ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) { + + ppsc->inactive_pwrstate = ERFON; + ppsc->b_in_powersavemode = false; + _rtl_ps_inactive_ps(hw); + } + } + spin_unlock(&rtlpriv->locks.ips_lock); +} + +/*for FW LPS*/ + +/* + *Determine if we can set Fw into PS mode + *in current condition.Return true if it + *can enter PS mode. + */ +static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u32 ps_timediff; + + ps_timediff = jiffies_to_msecs(jiffies - + ppsc->last_delaylps_stamp_jiffies); + + if (ps_timediff < 2000) { + RT_TRACE(COMP_POWER, DBG_LOUD, + ("Delay enter Fw LPS for DHCP, ARP," + " or EAPOL exchanging state.\n")); + return false; + } + + if (mac->link_state != MAC80211_LINKED) + return false; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) + return false; + + return true; +} + +/* Change current and default preamble mode.*/ +void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool enter_fwlps; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) + return; + + if (mac->link_state != MAC80211_LINKED) + return; + + if (ppsc->dot11_psmode == rt_psmode) + return; + + /* Update power save mode configured. */ + ppsc->dot11_psmode = rt_psmode; + + /* + * + *1. Enter PS mode + * Set RPWM to Fw to turn RF off and send H2C fw_pwrmode + * cmd to set Fw into PS mode. + *2. Leave PS mode + * Send H2C fw_pwrmode cmd to Fw to set Fw into Active + * mode and set RPWM to turn RF on. + */ + + if ((ppsc->b_fwctrl_lps) && ppsc->report_linked) { + if (ppsc->dot11_psmode == EACTIVE) { + RT_TRACE(COMP_RF, DBG_DMESG, + ("FW LPS leave ps_mode:%x\n", + FW_PS_ACTIVE_MODE)); + enter_fwlps = false; + ppsc->pwr_mode = FW_PS_ACTIVE_MODE; + ppsc->smart_ps = 0; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_LPS_ACTION, + (u8 *)(&enter_fwlps)); + if (ppsc->p2p_ps_info.opp_ps) + rtl_p2p_ps_cmd(hw,P2P_PS_ENABLE); + + } else { + if (rtl_get_fwlps_doze(hw)) { + RT_TRACE(COMP_RF, DBG_DMESG, + ("FW LPS enter ps_mode:%x\n", + ppsc->fwctrl_psmode)); + enter_fwlps = true; + ppsc->pwr_mode = ppsc->fwctrl_psmode; + ppsc->smart_ps = 2; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_FW_LPS_ACTION, + (u8 *)(&enter_fwlps)); + + } else { + /* Reset the power save related parameters. */ + ppsc->dot11_psmode = EACTIVE; + } + } + } +} + +/*Enter the leisure power save mode.*/ +void rtl_lps_enter(struct ieee80211_hw *hw) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned long flag; + + if (!ppsc->b_fwctrl_lps) + return; + + if (rtlpriv->sec.being_setkey) + return; + + if (rtlpriv->link_info.b_busytraffic) + return; + + /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */ + if (mac->cnt_after_linked < 5) + return; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) + return; + + if (mac->link_state != MAC80211_LINKED) + return; + + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + + /* Idle for a while if we connect to AP a while ago. */ + if (mac->cnt_after_linked >= 2) { + if (ppsc->dot11_psmode == EACTIVE) { + RT_TRACE(COMP_POWER, DBG_LOUD, + ("Enter 802.11 power save mode...\n")); + + rtl_lps_set_psmode(hw, EAUTOPS); + } + } + + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); +} + +/*Leave the leisure power save mode.*/ +void rtl_lps_leave(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + unsigned long flag; + + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + + if (ppsc->b_fwctrl_lps) { + if (ppsc->dot11_psmode != EACTIVE) { + + /*FIX ME */ + rtlpriv->cfg->ops->enable_interrupt(hw); + + if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM && + RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) && + rtlhal->interface == INTF_PCI) { + rtlpriv->intf_ops->disable_aspm(hw); + RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); + } + + RT_TRACE(COMP_POWER, DBG_LOUD, + ("Busy Traffic,Leave 802.11 power save..\n")); + + rtl_lps_set_psmode(hw, EACTIVE); + } + } + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); +} + +/* For sw LPS*/ +void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct ieee80211_hdr *hdr = (void *) data; + struct ieee80211_tim_ie *tim_ie; + u8 *tim; + u8 tim_len; + bool u_buffed; + bool m_buffed; + + if (mac->opmode != NL80211_IFTYPE_STATION) + return; + + if (!rtlpriv->psc.b_swctrl_lps) + return; + + if (rtlpriv->mac80211.link_state != MAC80211_LINKED) + return; + + if (!rtlpriv->psc.sw_ps_enabled) + return; + + if (rtlpriv->psc.b_fwctrl_lps) + return; + + if (likely(!(hw->conf.flags & IEEE80211_CONF_PS))) + return; + + /* check if this really is a beacon */ + if (!ieee80211_is_beacon(hdr->frame_control)) + return; + + /* min. beacon length + FCS_LEN */ + if (len <= 40 + FCS_LEN) + return; + + /* and only beacons from the associated BSSID, please */ + if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) + return; + + rtlpriv->psc.last_beacon = jiffies; + + tim = rtl_find_ie(data, len - FCS_LEN, WLAN_EID_TIM); + if (!tim) + return; + + if (tim[1] < sizeof(*tim_ie)) + return; + + tim_len = tim[1]; + tim_ie = (struct ieee80211_tim_ie *) &tim[2]; + +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +/**/ + if (!WARN_ON_ONCE(!hw->conf.ps_dtim_period)) + rtlpriv->psc.dtim_counter = tim_ie->dtim_count; +/**/ +#else + if (!WARN_ON_ONCE(!mac->vif->bss_conf.dtim_period)) + rtlpriv->psc.dtim_counter = tim_ie->dtim_count; +#endif +/**/ + + /* Check whenever the PHY can be turned off again. */ + + /* 1. What about buffered unicast traffic for our AID? */ + u_buffed = ieee80211_check_tim(tim_ie, tim_len, + rtlpriv->mac80211.assoc_id); + + /* 2. Maybe the AP wants to send multicast/broadcast data? */ + m_buffed = tim_ie->bitmap_ctrl & 0x01; + rtlpriv->psc.multi_buffered = m_buffed; + + /* unicast will process by mac80211 through + * set ~IEEE80211_CONF_PS, So we just check + * multicast frames here */ + if (!m_buffed ) {//&&) {// !rtlpriv->psc.tx_doing) { + /* back to low-power land. and delay is + * prevent null power save frame tx fail */ + queue_delayed_work(rtlpriv->works.rtl_wq, + &rtlpriv->works.ps_work, MSECS(5)); + } else { + RT_TRACE(COMP_POWER, DBG_DMESG, + ("u_bufferd: %x, m_buffered: %x\n", + u_buffed, m_buffed)); + } +} + +void rtl_swlps_rf_awake(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + unsigned long flag; + + if (!rtlpriv->psc.b_swctrl_lps) + return; + if (mac->link_state != MAC80211_LINKED) + return; + + if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM && + RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { + rtlpriv->intf_ops->disable_aspm(hw); + RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); + } + + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); +} + +void rtl_swlps_rfon_wq_callback(void *data) +{ + struct rtl_works *rtlworks = + container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq); + struct ieee80211_hw *hw = rtlworks->hw; + + rtl_swlps_rf_awake(hw); +} + +void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + unsigned long flag; + u8 sleep_intv; + + if (!rtlpriv->psc.sw_ps_enabled) + return; + + if ((rtlpriv->sec.being_setkey) || + (mac->opmode == NL80211_IFTYPE_ADHOC)) + return; + + /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */ + if ((mac->link_state != MAC80211_LINKED) || (mac->cnt_after_linked < 5)) + return; + + if (rtlpriv->link_info.b_busytraffic) + return; + + spin_lock(&rtlpriv->locks.rf_ps_lock); + if (rtlpriv->psc.rfchange_inprogress) { + spin_unlock(&rtlpriv->locks.rf_ps_lock); + return; + } + spin_unlock(&rtlpriv->locks.rf_ps_lock); + + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); + rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS,false); + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); + + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && + !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) { + rtlpriv->intf_ops->enable_aspm(hw); + RT_SET_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM); + } + + /* here is power save alg, when this beacon is DTIM + * we will set sleep time to dtim_period * n; + * when this beacon is not DTIM, we will set sleep + * time to sleep_intv = rtlpriv->psc.dtim_counter or + * MAX_SW_LPS_SLEEP_INTV(default set to 5) */ + +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +/**/ + if (rtlpriv->psc.dtim_counter == 0) { + if (hw->conf.ps_dtim_period == 1) + sleep_intv = hw->conf.ps_dtim_period * 2; + else + sleep_intv = hw->conf.ps_dtim_period; + } else { + sleep_intv = rtlpriv->psc.dtim_counter; + } +/**/ +#else + if (rtlpriv->psc.dtim_counter == 0) { + if (mac->vif->bss_conf.dtim_period == 1) + sleep_intv = mac->vif->bss_conf.dtim_period * 2; + else + sleep_intv = mac->vif->bss_conf.dtim_period; + } else { + sleep_intv = rtlpriv->psc.dtim_counter; + } +#endif +/**/ + + if (sleep_intv > MAX_SW_LPS_SLEEP_INTV) + sleep_intv = MAX_SW_LPS_SLEEP_INTV; + + /* this print should always be dtim_conter = 0 & + * sleep = dtim_period, that meaons, we should + * awake before every dtim */ + RT_TRACE(COMP_POWER, DBG_DMESG, + ("dtim_counter:%x will sleep :%d beacon_intv\n", + rtlpriv->psc.dtim_counter, sleep_intv)); + + /* we tested that 40ms is enough for sw & hw sw delay */ + queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq, + MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40)); +} + + +void rtl_swlps_wq_callback(void *data) +{ + struct rtl_works *rtlworks = + container_of_dwork_rtl(data, struct rtl_works, ps_work); + struct ieee80211_hw *hw = rtlworks->hw; + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool ps = false; + + ps = (hw->conf.flags & IEEE80211_CONF_PS); + + /* we can sleep after ps null send ok */ + if (rtlpriv->psc.state_inap) { + rtl_swlps_rf_sleep(hw); + + if (rtlpriv->psc.state && !ps) { + rtlpriv->psc.sleep_ms = + jiffies_to_msecs(jiffies - + rtlpriv->psc.last_action); + } + + if (ps) + rtlpriv->psc.last_slept = jiffies; + + rtlpriv->psc.last_action = jiffies; + rtlpriv->psc.state = ps; + } +} + + +void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, unsigned int len) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_mgmt *mgmt = (void *)data; + struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info); + u8 *pos, *end, *ie; + u16 noa_len; + static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09}; + u8 noa_num, index,i, noa_index = 0; + bool find_p2p_ie = false , find_p2p_ps_ie = false; + pos = (u8 *)mgmt->u.beacon.variable; + end = data + len; + ie = NULL; + + while (pos + 1 < end) { + + if (pos + 2 + pos[1] > end) + return; + + if (pos[0] == 221 && pos[1] > 4) { + if (memcmp(&pos[2], p2p_oui_ie_type, 4) == 0) { + ie = pos + 2+4; + break; + } + } + pos += 2 + pos[1]; + } + + if (ie == NULL) + return; + find_p2p_ie = true; + /*to find noa ie*/ + while (ie + 1 < end) { + noa_len = READEF2BYTE(&ie[1]); + if (ie + 3 + ie[1] > end) + return; + + if (ie[0] == 12) { + find_p2p_ps_ie = true; + if ( (noa_len - 2) % 13 != 0){ + RT_TRACE(COMP_INIT, DBG_LOUD, + ("P2P notice of absence: " + "invalid length.%d\n",noa_len)); + return; + } else { + noa_num = (noa_len - 2) / 13; + } + noa_index = ie[3]; + if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == P2P_PS_NONE + || noa_index != p2pinfo->noa_index) { + RT_TRACE(COMP_FW, DBG_LOUD, + ("update NOA ie.\n")); + p2pinfo->noa_index = noa_index; + p2pinfo->opp_ps= (ie[4] >> 7); + p2pinfo->ctwindow = ie[4] & 0x7F; + p2pinfo->noa_num = noa_num; + index = 5; + for (i = 0; i< noa_num; i++){ + p2pinfo->noa_count_type[i] = + READEF1BYTE(ie+index); + index += 1; + p2pinfo->noa_duration[i] = + READEF4BYTE(ie+index); + index += 4; + p2pinfo->noa_interval[i] = + READEF4BYTE(ie+index); + index += 4; + p2pinfo->noa_start_time[i] = + READEF4BYTE(ie+index); + index += 4; + } + + if (p2pinfo->opp_ps == 1) { + p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW; + /* Driver should wait LPS + * entering CTWindow*/ + if (rtlpriv->psc.b_fw_current_inpsmode){ + rtl_p2p_ps_cmd(hw, + P2P_PS_ENABLE); + } + } else if (p2pinfo->noa_num > 0) { + p2pinfo->p2p_ps_mode = P2P_PS_NOA; + rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE); + } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) { + rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); + } + } + + break; + } + ie += 3 + noa_len; + } + + if (find_p2p_ie == true) { + if ((p2pinfo->p2p_ps_mode > P2P_PS_NONE) && + (find_p2p_ps_ie == false)) + rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); + } +} + +void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, unsigned int len) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_mgmt *mgmt = (void *)data; + struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info); + bool find_p2p_ie = false , find_p2p_ps_ie = false; + u8 noa_num, index,i, noa_index = 0; + u8 *pos, *end, *ie; + u16 noa_len; + static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09}; + + pos = (u8 *) &mgmt->u.action.category; + end = data + len; + ie = NULL; + + if (pos[0] == 0x7f ) { + if (memcmp(&pos[1], p2p_oui_ie_type, 4) == 0) { + ie = pos + 3+4; + } + } + + if (ie == NULL) + return; + find_p2p_ie = true; + + RT_TRACE(COMP_FW, DBG_LOUD, ("action frame find P2P IE.\n")); + /*to find noa ie*/ + while (ie + 1 < end) { + noa_len = READEF2BYTE(&ie[1]); + if (ie + 3 + ie[1] > end) + return; + + if (ie[0] == 12) { + RT_TRACE(COMP_FW, DBG_LOUD, ("find NOA IE.\n")); + RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, ("noa ie "), + ie, noa_len); + find_p2p_ps_ie = true; + if ( (noa_len - 2) % 13 != 0){ + RT_TRACE(COMP_FW, DBG_LOUD, + ("P2P notice of absence: " + "invalid length.%d\n",noa_len)); + return; + } else { + noa_num = (noa_len - 2) / 13; + } + noa_index = ie[3]; + if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == P2P_PS_NONE + || noa_index != p2pinfo->noa_index) { + p2pinfo->noa_index = noa_index; + p2pinfo->opp_ps= (ie[4] >> 7); + p2pinfo->ctwindow = ie[4] & 0x7F; + p2pinfo->noa_num = noa_num; + index = 5; + for (i = 0; i< noa_num; i++){ + p2pinfo->noa_count_type[i] = + READEF1BYTE(ie+index); + index += 1; + p2pinfo->noa_duration[i] = + READEF4BYTE(ie+index); + index += 4; + p2pinfo->noa_interval[i] = + READEF4BYTE(ie+index); + index += 4; + p2pinfo->noa_start_time[i] = + READEF4BYTE(ie+index); + index += 4; + } + + if (p2pinfo->opp_ps == 1) { + p2pinfo->p2p_ps_mode = P2P_PS_CTWINDOW; + /* Driver should wait LPS + * entering CTWindow */ + if (rtlpriv->psc.b_fw_current_inpsmode){ + rtl_p2p_ps_cmd(hw, + P2P_PS_ENABLE); + } + } else if (p2pinfo->noa_num > 0) { + p2pinfo->p2p_ps_mode = P2P_PS_NOA; + rtl_p2p_ps_cmd(hw, P2P_PS_ENABLE); + } else if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) { + rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE); + } + } + + break; + } + ie += 3 + noa_len; + } + + +} + + +void rtl_p2p_ps_cmd(struct ieee80211_hw *hw,u8 p2p_ps_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw)); + struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info); + + RT_TRACE(COMP_FW, DBG_LOUD, (" p2p state %x\n",p2p_ps_state)); + switch (p2p_ps_state) { + case P2P_PS_DISABLE: + p2pinfo->p2p_ps_state = p2p_ps_state; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_P2P_PS_OFFLOAD, + (u8 *)(&p2p_ps_state)); + + p2pinfo->noa_index = 0; + p2pinfo->ctwindow = 0; + p2pinfo->opp_ps = 0; + p2pinfo->noa_num = 0; + p2pinfo->p2p_ps_mode = P2P_PS_NONE; + if (rtlps->b_fw_current_inpsmode == true) { + if (rtlps->smart_ps == 0) { + rtlps->smart_ps = 2; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&rtlps->pwr_mode)); + } + + } + break; + case P2P_PS_ENABLE: + if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) { + p2pinfo->p2p_ps_state = p2p_ps_state; + + if (p2pinfo->ctwindow > 0) { + if (rtlps->smart_ps != 0){ + rtlps->smart_ps = 0; + rtlpriv->cfg->ops->set_hw_reg( + hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *)(&rtlps->pwr_mode)); + } + } + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_P2P_PS_OFFLOAD, + (u8 *)(&p2p_ps_state)); + + } + break; + case P2P_PS_SCAN: + case P2P_PS_SCAN_DONE: + case P2P_PS_ALLSTASLEEP: + if (p2pinfo->p2p_ps_mode > P2P_PS_NONE) { + p2pinfo->p2p_ps_state = p2p_ps_state; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_P2P_PS_OFFLOAD, + (u8 *)(&p2p_ps_state)); + } + break; + default: + break; + + } + RT_TRACE(COMP_FW, DBG_LOUD, (" ctwindow %x oppps %x \n", + p2pinfo->ctwindow,p2pinfo->opp_ps)); + RT_TRACE(COMP_FW, DBG_LOUD, ("count %x duration %x index %x interval %x" + " start time %x noa num %x\n", + p2pinfo->noa_count_type[0], + p2pinfo->noa_duration[0], + p2pinfo->noa_index, + p2pinfo->noa_interval[0], + p2pinfo->noa_start_time[0], + p2pinfo->noa_num)); + RT_TRACE(COMP_FW, DBG_LOUD, ("end\n")); +} + +void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct ieee80211_hdr *hdr = (void *) data; + + if (!mac->p2p) + return; + if (mac->link_state != MAC80211_LINKED) + return; + /* min. beacon length + FCS_LEN */ + if (len <= 40 + FCS_LEN) + return; + + /* and only beacons from the associated BSSID, please */ + if (ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid)) + return; + + /* check if this really is a beacon */ + if (!(ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control) || + ieee80211_is_action(hdr->frame_control))) + return; + + if (ieee80211_is_action(hdr->frame_control)) { + rtl_p2p_action_ie(hw,data,len - FCS_LEN); + } else { + rtl_p2p_noa_ie(hw,data,len - FCS_LEN); + } + +} --- linux-3.13.0.orig/drivers/staging/rtl8821ae/ps.h +++ linux-3.13.0/drivers/staging/rtl8821ae/ps.h @@ -0,0 +1,55 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __REALTEK_RTL_PCI_PS_H__ +#define __REALTEK_RTL_PCI_PS_H__ + +#define MAX_SW_LPS_SLEEP_INTV 5 + +bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, + enum rf_pwrstate state_toset, u32 changesource, + bool protect_or_not); +bool rtl_ps_enable_nic(struct ieee80211_hw *hw); +bool rtl_ps_disable_nic(struct ieee80211_hw *hw); +void rtl_ips_nic_off(struct ieee80211_hw *hw); +void rtl_ips_nic_on(struct ieee80211_hw *hw); +void rtl_ips_nic_off_wq_callback(void *data); +void rtl_lps_enter(struct ieee80211_hw *hw); +void rtl_lps_leave(struct ieee80211_hw *hw); + +void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode); + +void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len); +void rtl_swlps_wq_callback(void *data); +void rtl_swlps_rfon_wq_callback(void *data); +void rtl_swlps_rf_awake(struct ieee80211_hw *hw); +void rtl_swlps_rf_sleep(struct ieee80211_hw *hw); +void rtl_p2p_ps_cmd(struct ieee80211_hw *hw,u8 p2p_ps_state); +void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rc.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rc.c @@ -0,0 +1,309 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "wifi.h" +#include "base.h" +#include "rc.h" + +/* + *Finds the highest rate index we can use + *if skb is special data like DHCP/EAPOL, we set should + *it to lowest rate CCK_1M, otherwise we set rate to + *highest rate based on wireless mode used for iwconfig + *show Tx rate. + */ +static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, + struct ieee80211_sta *sta, + struct sk_buff *skb, bool not_data) +{ + struct rtl_mac *rtlmac = rtl_mac(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_sta_info *sta_entry = NULL; + u8 wireless_mode = 0; + + /* + *this rate is no use for true rate, firmware + *will control rate at all it just used for + *1.show in iwconfig in B/G mode + *2.in rtl_get_tcb_desc when we check rate is + * 1M we will not use FW rate but user rate. + */ + if (rtlmac->opmode == NL80211_IFTYPE_AP || + rtlmac->opmode == NL80211_IFTYPE_ADHOC || + rtlmac->opmode == NL80211_IFTYPE_MESH_POINT) { + if (sta) { + sta_entry = (struct rtl_sta_info *) sta->drv_priv; + wireless_mode = sta_entry->wireless_mode; + } else { + return 0; + } + } else { + wireless_mode = rtlmac->mode; + } + + if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true) || not_data) { + return 0; + } else { + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + if (wireless_mode == WIRELESS_MODE_B) { + return B_MODE_MAX_RIX; + } else if (wireless_mode == WIRELESS_MODE_G) { + return G_MODE_MAX_RIX; + } else { + if (get_rf_type(rtlphy) != RF_2T2R) + return N_MODE_MCS7_RIX; + else + return N_MODE_MCS15_RIX; + } + } else { + if (wireless_mode == WIRELESS_MODE_A) { + return A_MODE_MAX_RIX; + } else { + if (get_rf_type(rtlphy) != RF_2T2R) + return N_MODE_MCS7_RIX; + else + return N_MODE_MCS15_RIX; + } + } + } +} + +static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, + struct ieee80211_sta *sta, + struct ieee80211_tx_rate *rate, + struct ieee80211_tx_rate_control *txrc, + u8 tries, char rix, int rtsctsenable, + bool not_data) +{ + struct rtl_mac *mac = rtl_mac(rtlpriv); + u8 sgi_20 = 0, sgi_40 = 0; + + if (sta) { + sgi_20 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; + sgi_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; + } + rate->count = tries; + rate->idx = rix >= 0x00 ? rix : 0x00; + + if (!not_data) { + if (txrc->short_preamble) + rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + if (sta && (sta->ht_cap.cap & + IEEE80211_HT_CAP_SUP_WIDTH_20_40)) + rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + } else { + if (mac->bw_40) + rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + } + if (sgi_20 || sgi_40) + rate->flags |= IEEE80211_TX_RC_SHORT_GI; + if (sta && sta->ht_cap.ht_supported) + rate->flags |= IEEE80211_TX_RC_MCS; + } +} + +static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta, + void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + struct rtl_priv *rtlpriv = ppriv; + struct sk_buff *skb = txrc->skb; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rates = tx_info->control.rates; + __le16 fc = rtl_get_fc(skb); + u8 try_per_rate, i, rix; + bool not_data = !ieee80211_is_data(fc); + + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rix = _rtl_rc_get_highest_rix(rtlpriv, sta, skb, not_data); + try_per_rate = 1; + _rtl_rc_rate_set_series(rtlpriv, sta, &rates[0], txrc, + try_per_rate, rix, 1, not_data); + + if (!not_data) { + for (i = 1; i < 4; i++) + _rtl_rc_rate_set_series(rtlpriv, sta, &rates[i], + txrc, i, (rix - i), 1, + not_data); + } +} + +static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv, + struct rtl_sta_info *sta_entry, u16 tid) +{ + struct rtl_mac *mac = rtl_mac(rtlpriv); + + if (mac->act_scanning) + return false; + + if (mac->opmode == NL80211_IFTYPE_STATION && + mac->cnt_after_linked < 3) + return false; + + if (sta_entry->tids[tid].agg.agg_state == RTL_AGG_STOP) + return true; + + return false; +} + +/*mac80211 Rate Control callbacks*/ +static void rtl_tx_status(void *ppriv, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = ppriv; + struct rtl_mac *mac = rtl_mac(rtlpriv); + struct ieee80211_hdr *hdr = rtl_get_hdr(skb); + __le16 fc = rtl_get_fc(skb); + struct rtl_sta_info *sta_entry; + + if (!priv_sta || !ieee80211_is_data(fc)) + return; + + if (rtl_is_special_data(mac->hw, skb, true)) + return; + + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + return; + + if (sta) { + /* Check if aggregation has to be enabled for this tid */ + sta_entry = (struct rtl_sta_info *) sta->drv_priv; + if ((sta->ht_cap.ht_supported == true) && + !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { + if (ieee80211_is_data_qos(fc)) { + u8 tid = rtl_get_tid(skb); + if (_rtl_tx_aggr_check(rtlpriv, sta_entry, + tid)) { + sta_entry->tids[tid].agg.agg_state = + RTL_AGG_PROGRESS; + /**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) + /**/ + ieee80211_start_tx_ba_session(sta, tid, + 5000); + /**/ +#else + ieee80211_start_tx_ba_session(sta, tid); +#endif + /**/ + } + } + } + } +} + +static void rtl_rate_init(void *ppriv, + struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta) +{ +} +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)) +static void rtl_rate_update(void *ppriv, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + u32 changed, + enum nl80211_channel_type oper_chan_type) +{ +} +#else +static void rtl_rate_update(void *ppriv, + struct ieee80211_supported_band *sband, + struct cfg80211_chan_def *chandef, + struct ieee80211_sta *sta, void *priv_sta, + u32 changed) +{ +} +#endif +static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + return rtlpriv; +} + +static void rtl_rate_free(void *rtlpriv) +{ + return; +} + +static void *rtl_rate_alloc_sta(void *ppriv, + struct ieee80211_sta *sta, gfp_t gfp) +{ + struct rtl_priv *rtlpriv = ppriv; + struct rtl_rate_priv *rate_priv; + + rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp); + if (!rate_priv) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Unable to allocate private rc structure\n")); + return NULL; + } + + rtlpriv->rate_priv = rate_priv; + + return rate_priv; +} + +static void rtl_rate_free_sta(void *rtlpriv, + struct ieee80211_sta *sta, void *priv_sta) +{ + struct rtl_rate_priv *rate_priv = priv_sta; + kfree(rate_priv); +} + +static struct rate_control_ops rtl_rate_ops = { + .module = NULL, + .name = "rtl_rc", + .alloc = rtl_rate_alloc, + .free = rtl_rate_free, + .alloc_sta = rtl_rate_alloc_sta, + .free_sta = rtl_rate_free_sta, + .rate_init = rtl_rate_init, + .rate_update = rtl_rate_update, + .tx_status = rtl_tx_status, + .get_rate = rtl_get_rate, +}; + +int rtl_rate_control_register(void) +{ + return ieee80211_rate_control_register(&rtl_rate_ops); +} + +void rtl_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rtl_rate_ops); +} --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rc.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rc.h @@ -0,0 +1,47 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_RC_H__ +#define __RTL_RC_H__ + +#define B_MODE_MAX_RIX 3 +#define G_MODE_MAX_RIX 11 +#define A_MODE_MAX_RIX 7 + +/* in mac80211 mcs0-mcs15 is idx0-idx15*/ +#define N_MODE_MCS7_RIX 7 +#define N_MODE_MCS15_RIX 15 + +struct rtl_rate_priv { + u8 ht_cap; +}; + +int rtl_rate_control_register(void); +void rtl_rate_control_unregister(void); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/regd.c +++ linux-3.13.0/drivers/staging/rtl8821ae/regd.c @@ -0,0 +1,503 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "wifi.h" +#include "regd.h" + +static struct country_code_to_enum_rd allCountries[] = { + {COUNTRY_CODE_FCC, "US"}, + {COUNTRY_CODE_IC, "US"}, + {COUNTRY_CODE_ETSI, "EC"}, + {COUNTRY_CODE_SPAIN, "EC"}, + {COUNTRY_CODE_FRANCE, "EC"}, + {COUNTRY_CODE_MKK, "JP"}, + {COUNTRY_CODE_MKK1, "JP"}, + {COUNTRY_CODE_ISRAEL, "EC"}, + {COUNTRY_CODE_TELEC, "JP"}, + {COUNTRY_CODE_MIC, "JP"}, + {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"}, + {COUNTRY_CODE_WORLD_WIDE_13, "EC"}, + {COUNTRY_CODE_TELEC_NETGEAR, "EC"}, +}; + +/* + *Only these channels all allow active + *scan on all world regulatory domains + */ +#define RTL819x_2GHZ_CH01_11 \ + REG_RULE(2412-10, 2462+10, 40, 0, 20, 0) + +/* + *We enable active scan on these a case + *by case basis by regulatory domain + */ +#define RTL819x_2GHZ_CH12_13 \ + REG_RULE(2467-10, 2472+10, 40, 0, 20,\ + NL80211_RRF_PASSIVE_SCAN) + +#define RTL819x_2GHZ_CH14 \ + REG_RULE(2484-10, 2484+10, 40, 0, 20, \ + NL80211_RRF_PASSIVE_SCAN | \ + NL80211_RRF_NO_OFDM) + +/* 5G chan 36 - chan 64*/ +#define RTL819x_5GHZ_5150_5350 \ + REG_RULE(5150-10, 5350+10, 40, 0, 30, \ + NL80211_RRF_PASSIVE_SCAN | \ + NL80211_RRF_NO_IBSS) + +/* 5G chan 100 - chan 165*/ +#define RTL819x_5GHZ_5470_5850 \ + REG_RULE(5470-10, 5850+10, 40, 0, 30, \ + NL80211_RRF_PASSIVE_SCAN | \ + NL80211_RRF_NO_IBSS) + +/* 5G chan 149 - chan 165*/ +#define RTL819x_5GHZ_5725_5850 \ + REG_RULE(5725-10, 5850+10, 40, 0, 30, \ + NL80211_RRF_PASSIVE_SCAN | \ + NL80211_RRF_NO_IBSS) + +#define RTL819x_5GHZ_ALL \ + RTL819x_5GHZ_5150_5350, RTL819x_5GHZ_5470_5850 + +static const struct ieee80211_regdomain rtl_regdom_11 = { + .n_reg_rules = 1, + .alpha2 = "99", + .reg_rules = { + RTL819x_2GHZ_CH01_11, + } +}; + +static const struct ieee80211_regdomain rtl_regdom_12_13 = { + .n_reg_rules = 2, + .alpha2 = "99", + .reg_rules = { + RTL819x_2GHZ_CH01_11, + RTL819x_2GHZ_CH12_13, + } +}; + +static const struct ieee80211_regdomain rtl_regdom_no_midband = { + .n_reg_rules = 3, + .alpha2 = "99", + .reg_rules = { + RTL819x_2GHZ_CH01_11, + RTL819x_5GHZ_5150_5350, + RTL819x_5GHZ_5725_5850, + } +}; + +static const struct ieee80211_regdomain rtl_regdom_60_64 = { + .n_reg_rules = 3, + .alpha2 = "99", + .reg_rules = { + RTL819x_2GHZ_CH01_11, + RTL819x_2GHZ_CH12_13, + RTL819x_5GHZ_5725_5850, + } +}; + +static const struct ieee80211_regdomain rtl_regdom_14_60_64 = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + RTL819x_2GHZ_CH01_11, + RTL819x_2GHZ_CH12_13, + RTL819x_2GHZ_CH14, + RTL819x_5GHZ_5725_5850, + } +}; + +static const struct ieee80211_regdomain rtl_regdom_14 = { + .n_reg_rules = 3, + .alpha2 = "99", + .reg_rules = { + RTL819x_2GHZ_CH01_11, + RTL819x_2GHZ_CH12_13, + RTL819x_2GHZ_CH14, + } +}; + +static bool _rtl_is_radar_freq(u16 center_freq) +{ + return (center_freq >= 5260 && center_freq <= 5700); +} + +static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator) +{ + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + const struct ieee80211_reg_rule *reg_rule; + struct ieee80211_channel *ch; + unsigned int i; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) + u32 bandwidth = 0; + int r; +#endif + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + + if (!wiphy->bands[band]) + continue; + + sband = wiphy->bands[band]; + + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + if (_rtl_is_radar_freq(ch->center_freq) || + (ch->flags & IEEE80211_CHAN_RADAR)) + continue; + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (IS_ERR(reg_rule)) + continue; +#else + r = freq_reg_info(wiphy, ch->center_freq, + bandwidth, ®_rule); + if (r) + continue; +#endif + + /* + *If 11d had a rule for this channel ensure + *we enable adhoc/beaconing if it allows us to + *use it. Note that we would have disabled it + *by applying our static world regdomain by + *default during init, prior to calling our + *regulatory_hint(). + */ + + if (!(reg_rule->flags & NL80211_RRF_NO_IBSS)) + ch->flags &= ~IEEE80211_CHAN_NO_IBSS; + if (!(reg_rule->flags & + NL80211_RRF_PASSIVE_SCAN)) + ch->flags &= + ~IEEE80211_CHAN_PASSIVE_SCAN; + } else { + if (ch->beacon_found) + ch->flags &= ~(IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_PASSIVE_SCAN); + } + } + } +} + +/* Allows active scan scan on Ch 12 and 13 */ +static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy, + enum nl80211_reg_initiator + initiator) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + const struct ieee80211_reg_rule *reg_rule; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) + u32 bandwidth = 0; + int r; +#endif + + if (!wiphy->bands[IEEE80211_BAND_2GHZ]) + return; + sband = wiphy->bands[IEEE80211_BAND_2GHZ]; + + /* + *If no country IE has been received always enable active scan + *on these channels. This is only done for specific regulatory SKUs + */ + if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) { + ch = &sband->channels[11]; /* CH 12 */ + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + ch = &sband->channels[12]; /* CH 13 */ + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + return; + } + + /* + *If a country IE has been recieved check its rule for this + *channel first before enabling active scan. The passive scan + *would have been enforced by the initial processing of our + *custom regulatory domain. + */ + + ch = &sband->channels[11]; /* CH 12 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (!IS_ERR(reg_rule)) { +#else + r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule); + if (!r) { +#endif + if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + } + + ch = &sband->channels[12]; /* CH 13 */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) + reg_rule = freq_reg_info(wiphy, ch->center_freq); + if (!IS_ERR(reg_rule)) { +#else + r = freq_reg_info(wiphy, ch->center_freq, bandwidth, ®_rule); + if (!r) { +#endif + if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN)) + if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) + ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; + } +} + +/* + *Always apply Radar/DFS rules on + *freq range 5260 MHz - 5700 MHz + */ +static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + unsigned int i; + + if (!wiphy->bands[IEEE80211_BAND_5GHZ]) + return; + + sband = wiphy->bands[IEEE80211_BAND_5GHZ]; + + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + if (!_rtl_is_radar_freq(ch->center_freq)) + continue; + + /* + *We always enable radar detection/DFS on this + *frequency range. Additionally we also apply on + *this frequency range: + *- If STA mode does not yet have DFS supports disable + * active scanning + *- If adhoc mode does not support DFS yet then disable + * adhoc in the frequency. + *- If AP mode does not yet support radar detection/DFS + *do not allow AP mode + */ + if (!(ch->flags & IEEE80211_CHAN_DISABLED)) + ch->flags |= IEEE80211_CHAN_RADAR | + IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_PASSIVE_SCAN; + } +} + +static void _rtl_reg_apply_world_flags(struct wiphy *wiphy, + enum nl80211_reg_initiator initiator, + struct rtl_regulatory *reg) +{ + _rtl_reg_apply_beaconing_flags(wiphy, initiator); + _rtl_reg_apply_active_scan_flags(wiphy, initiator); + return; +} + +static void _rtl_dump_channel_map(struct wiphy *wiphy) +{ + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + unsigned int i; + + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { + if (!wiphy->bands[band]) + continue; + sband = wiphy->bands[band]; + for (i = 0; i < sband->n_channels; i++) + ch = &sband->channels[i]; + } +} + +static int _rtl_reg_notifier_apply(struct wiphy *wiphy, + struct regulatory_request *request, + struct rtl_regulatory *reg) +{ + /* We always apply this */ + _rtl_reg_apply_radar_flags(wiphy); + + switch (request->initiator) { + case NL80211_REGDOM_SET_BY_DRIVER: + case NL80211_REGDOM_SET_BY_CORE: + case NL80211_REGDOM_SET_BY_USER: + break; + case NL80211_REGDOM_SET_BY_COUNTRY_IE: + _rtl_reg_apply_world_flags(wiphy, request->initiator, reg); + break; + } + + _rtl_dump_channel_map(wiphy); + + return 0; +} + +static const struct ieee80211_regdomain *_rtl_regdomain_select( + struct rtl_regulatory *reg) +{ + switch (reg->country_code) { + case COUNTRY_CODE_FCC: + return &rtl_regdom_no_midband; + case COUNTRY_CODE_IC: + return &rtl_regdom_11; + case COUNTRY_CODE_ETSI: + case COUNTRY_CODE_TELEC_NETGEAR: + return &rtl_regdom_60_64; + case COUNTRY_CODE_SPAIN: + case COUNTRY_CODE_FRANCE: + case COUNTRY_CODE_ISRAEL: + case COUNTRY_CODE_WORLD_WIDE_13: + return &rtl_regdom_12_13; + case COUNTRY_CODE_MKK: + case COUNTRY_CODE_MKK1: + case COUNTRY_CODE_TELEC: + case COUNTRY_CODE_MIC: + return &rtl_regdom_14_60_64; + case COUNTRY_CODE_GLOBAL_DOMAIN: + return &rtl_regdom_14; + default: + return &rtl_regdom_no_midband; + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg, + struct wiphy *wiphy, + void (*reg_notifier) (struct wiphy * wiphy, + struct regulatory_request * + request)) +#else +static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg, + struct wiphy *wiphy, + int (*reg_notifier) (struct wiphy * wiphy, + struct regulatory_request * + request)) +#endif +{ + const struct ieee80211_regdomain *regd; + + wiphy->reg_notifier = reg_notifier; + + wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY; + wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY; + wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS; + + regd = _rtl_regdomain_select(reg); + wiphy_apply_custom_regulatory(wiphy, regd); + _rtl_reg_apply_radar_flags(wiphy); + _rtl_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); + return 0; +} + +static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(allCountries); i++) { + if (allCountries[i].countrycode == countrycode) + return &allCountries[i]; + } + return NULL; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +int rtl_regd_init(struct ieee80211_hw *hw, + void (*reg_notifier) (struct wiphy *wiphy, + struct regulatory_request *request)) +#else +int rtl_regd_init(struct ieee80211_hw *hw, + int (*reg_notifier) (struct wiphy *wiphy, + struct regulatory_request *request)) +#endif +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct wiphy *wiphy = hw->wiphy; + struct country_code_to_enum_rd *country = NULL; + + if (wiphy == NULL || &rtlpriv->regd == NULL) + return -EINVAL; + + /* init country_code from efuse channel plan */ + rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan; + + RT_TRACE(COMP_REGD, DBG_TRACE, + (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n", + rtlpriv->regd.country_code)); + + if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) { + RT_TRACE(COMP_REGD, DBG_DMESG, + (KERN_DEBUG "rtl: EEPROM indicates invalid contry code" + "world wide 13 should be used\n")); + + rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13; + } + + country = _rtl_regd_find_country(rtlpriv->regd.country_code); + + if (country) { + rtlpriv->regd.alpha2[0] = country->iso_name[0]; + rtlpriv->regd.alpha2[1] = country->iso_name[1]; + } else { + rtlpriv->regd.alpha2[0] = '0'; + rtlpriv->regd.alpha2[1] = '0'; + } + + RT_TRACE(COMP_REGD, DBG_TRACE, + (KERN_DEBUG "rtl: Country alpha2 being used: %c%c\n", + rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1])); + + _rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier); + + return 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_REGD, DBG_LOUD, ("\n")); + + _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd); +} +#else +int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_REGD, DBG_LOUD, ("\n")); + + return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd); +} +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/regd.h +++ linux-3.13.0/drivers/staging/rtl8821ae/regd.h @@ -0,0 +1,75 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_REGD_H__ +#define __RTL_REGD_H__ + +#define IEEE80211_CHAN_NO_IBSS 1<<2 +#define IEEE80211_CHAN_PASSIVE_SCAN 1<<1 +#define WIPHY_FLAG_CUSTOM_REGULATORY BIT(0) +#define WIPHY_FLAG_STRICT_REGULATORY BIT(1) +#define WIPHY_FLAG_DISABLE_BEACON_HINTS BIT(2) + +struct country_code_to_enum_rd { + u16 countrycode; + const char *iso_name; +}; + +enum country_code_type_t { + COUNTRY_CODE_FCC = 0, + COUNTRY_CODE_IC = 1, + COUNTRY_CODE_ETSI = 2, + COUNTRY_CODE_SPAIN = 3, + COUNTRY_CODE_FRANCE = 4, + COUNTRY_CODE_MKK = 5, + COUNTRY_CODE_MKK1 = 6, + COUNTRY_CODE_ISRAEL = 7, + COUNTRY_CODE_TELEC = 8, + COUNTRY_CODE_MIC = 9, + COUNTRY_CODE_GLOBAL_DOMAIN = 10, + COUNTRY_CODE_WORLD_WIDE_13 = 11, + COUNTRY_CODE_TELEC_NETGEAR = 12, + + /*add new channel plan above this line */ + COUNTRY_CODE_MAX +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +int rtl_regd_init(struct ieee80211_hw *hw, + void (*reg_notifier) (struct wiphy *wiphy, + struct regulatory_request *request)); +void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); +#else +int rtl_regd_init(struct ieee80211_hw *hw, + int (*reg_notifier) (struct wiphy *wiphy, + struct regulatory_request *request)); +int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); +#endif + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/btc.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/btc.h @@ -0,0 +1,87 @@ + +/****************************************************************************** + ** + ** Copyright(c) 2009-2010 Realtek Corporation. + ** + ** This program is free software; you can redistribute it and/or modify it + ** under the terms of version 2 of the GNU General Public License as + ** published by the Free Software Foundation. + ** + ** This program is distributed in the hope that it will be useful, but WITHOUT + ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + ** FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + ** more details. + ** + ** You should have received a copy of the GNU General Public License along with + ** this program; if not, write to the Free Software Foundation, Inc., + ** 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + ** + ** The full GNU General Public License is included in this distribution in the + ** file called LICENSE. + ** + ** Contact Information: + ** wlanfae + ** Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + ** Hsinchu 300, Taiwan. + ** Larry Finger + ** + ******************************************************************************/ + +#ifndef __RTL8821AE_BTC_H__ +#define __RTL8821AE_BTC_H__ + +#include "../wifi.h" +#include "hal_bt_coexist.h" + +struct bt_coexist_c2h_info { + u8 no_parse_c2h; + u8 has_c2h; +}; + +struct btdm_8821ae { + bool b_all_off; + bool b_agc_table_en; + bool b_adc_back_off_on; + bool b2_ant_hid_en; + bool b_low_penalty_rate_adaptive; + bool b_rf_rx_lpf_shrink; + bool b_reject_aggre_pkt; + bool b_tra_tdma_on; + u8 tra_tdma_nav; + u8 tra_tdma_ant; + bool b_tdma_on; + u8 tdma_ant; + u8 tdma_nav; + u8 tdma_dac_swing; + u8 fw_dac_swing_lvl; + bool b_ps_tdma_on; + u8 ps_tdma_byte[5]; + bool b_pta_on; + u32 val_0x6c0; + u32 val_0x6c8; + u32 val_0x6cc; + bool b_sw_dac_swing_on; + u32 sw_dac_swing_lvl; + u32 wlan_act_hi; + u32 wlan_act_lo; + u32 bt_retry_index; + bool b_dec_bt_pwr; + bool b_ignore_wlan_act; +}; + +struct bt_coexist_8821ae { + u32 high_priority_tx; + u32 high_priority_rx; + u32 low_priority_tx; + u32 low_priority_rx; + u8 c2h_bt_info; + bool b_c2h_bt_info_req_sent; + bool b_c2h_bt_inquiry_page; + u32 bt_inq_page_start_time; + u8 bt_retry_cnt; + u8 c2h_bt_info_original; + u8 bt_inquiry_page_cnt; + struct btdm_8821ae btdm; +}; + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/def.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/def.h @@ -0,0 +1,442 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_DEF_H__ +#define __RTL8821AE_DEF_H__ + +/*--------------------------Define -------------------------------------------*/ +/* BIT 7 HT Rate*/ +/*TxHT = 0*/ +#define MGN_1M 0x02 +#define MGN_2M 0x04 +#define MGN_5_5M 0x0b +#define MGN_11M 0x16 + +#define MGN_6M 0x0c +#define MGN_9M 0x12 +#define MGN_12M 0x18 +#define MGN_18M 0x24 +#define MGN_24M 0x30 +#define MGN_36M 0x48 +#define MGN_48M 0x60 +#define MGN_54M 0x6c + +// TxHT = 1 +#define MGN_MCS0 0x80 +#define MGN_MCS1 0x81 +#define MGN_MCS2 0x82 +#define MGN_MCS3 0x83 +#define MGN_MCS4 0x84 +#define MGN_MCS5 0x85 +#define MGN_MCS6 0x86 +#define MGN_MCS7 0x87 +#define MGN_MCS8 0x88 +#define MGN_MCS9 0x89 +#define MGN_MCS10 0x8a +#define MGN_MCS11 0x8b +#define MGN_MCS12 0x8c +#define MGN_MCS13 0x8d +#define MGN_MCS14 0x8e +#define MGN_MCS15 0x8f +//VHT rate +#define MGN_VHT1SS_MCS0 0x90 +#define MGN_VHT1SS_MCS1 0x91 +#define MGN_VHT1SS_MCS2 0x92 +#define MGN_VHT1SS_MCS3 0x93 +#define MGN_VHT1SS_MCS4 0x94 +#define MGN_VHT1SS_MCS5 0x95 +#define MGN_VHT1SS_MCS6 0x96 +#define MGN_VHT1SS_MCS7 0x97 +#define MGN_VHT1SS_MCS8 0x98 +#define MGN_VHT1SS_MCS9 0x99 +#define MGN_VHT2SS_MCS0 0x9a +#define MGN_VHT2SS_MCS1 0x9b +#define MGN_VHT2SS_MCS2 0x9c +#define MGN_VHT2SS_MCS3 0x9d +#define MGN_VHT2SS_MCS4 0x9e +#define MGN_VHT2SS_MCS5 0x9f +#define MGN_VHT2SS_MCS6 0xa0 +#define MGN_VHT2SS_MCS7 0xa1 +#define MGN_VHT2SS_MCS8 0xa2 +#define MGN_VHT2SS_MCS9 0xa3 + +#define MGN_VHT3SS_MCS0 0xa4 +#define MGN_VHT3SS_MCS1 0xa5 +#define MGN_VHT3SS_MCS2 0xa6 +#define MGN_VHT3SS_MCS3 0xa7 +#define MGN_VHT3SS_MCS4 0xa8 +#define MGN_VHT3SS_MCS5 0xa9 +#define MGN_VHT3SS_MCS6 0xaa +#define MGN_VHT3SS_MCS7 0xab +#define MGN_VHT3SS_MCS8 0xac +#define MGN_VHT3SS_MCS9 0xad + +#define MGN_MCS0_SG 0xc0 +#define MGN_MCS1_SG 0xc1 +#define MGN_MCS2_SG 0xc2 +#define MGN_MCS3_SG 0xc3 +#define MGN_MCS4_SG 0xc4 +#define MGN_MCS5_SG 0xc5 +#define MGN_MCS6_SG 0xc6 +#define MGN_MCS7_SG 0xc7 +#define MGN_MCS8_SG 0xc8 +#define MGN_MCS9_SG 0xc9 +#define MGN_MCS10_SG 0xca +#define MGN_MCS11_SG 0xcb +#define MGN_MCS12_SG 0xcc +#define MGN_MCS13_SG 0xcd +#define MGN_MCS14_SG 0xce +#define MGN_MCS15_SG 0xcf + +#define MGN_UNKNOWN 0xff + + +/* 30 ms */ +#define WIFI_NAV_UPPER_US 30000 +#define HAL_92C_NAV_UPPER_UNIT 128 + +#define HAL_RETRY_LIMIT_INFRA 48 +#define HAL_RETRY_LIMIT_AP_ADHOC 7 + +#define RESET_DELAY_8185 20 + +#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER) +#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK) + +#define NUM_OF_FIRMWARE_QUEUE 10 +#define NUM_OF_PAGES_IN_FW 0x100 +#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07 +#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0 +#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0 +#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02 +#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02 +#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2 +#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1 + +#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026 +#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048 +#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048 +#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026 +#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00 + +#define MAX_RX_DMA_BUFFER_SIZE 0x3E80 + + +#define MAX_LINES_HWCONFIG_TXT 1000 +#define MAX_BYTES_LINE_HWCONFIG_TXT 256 + +#define SW_THREE_WIRE 0 +#define HW_THREE_WIRE 2 + +#define BT_DEMO_BOARD 0 +#define BT_QA_BOARD 1 +#define BT_FPGA 2 + +#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0 +#define HAL_PRIME_CHNL_OFFSET_LOWER 1 +#define HAL_PRIME_CHNL_OFFSET_UPPER 2 + +#define MAX_H2C_QUEUE_NUM 10 + +#define RX_MPDU_QUEUE 0 +#define RX_CMD_QUEUE 1 +#define RX_MAX_QUEUE 2 +#define AC2QUEUEID(_AC) (_AC) + +#define C2H_RX_CMD_HDR_LEN 8 +#define GET_C2H_CMD_CMD_LEN(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 0, 16) +#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 16, 8) +#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 24, 7) +#define GET_C2H_CMD_CONTINUE(__prxhdr) \ + LE_BITS_TO_4BYTE((__prxhdr), 31, 1) +#define GET_C2H_CMD_CONTENT(__prxhdr) \ + ((u8*)(__prxhdr) + C2H_RX_CMD_HDR_LEN) + +#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8) +#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8) +#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16) +#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5) +#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1) +#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5) +#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1) +#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) +#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ + LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) + +#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) + +#define CHIP_8812 BIT(2) +#define CHIP_8821 (BIT(0)|BIT(2)) + +#define CHIP_8821A (BIT(0)|BIT(2)) +#define NORMAL_CHIP BIT(3) +#define RF_TYPE_1T1R (~(BIT(4)|BIT(5)|BIT(6))) +#define RF_TYPE_1T2R BIT(4) +#define RF_TYPE_2T2R BIT(5) +#define CHIP_VENDOR_UMC BIT(7) +#define B_CUT_VERSION BIT(12) +#define C_CUT_VERSION BIT(13) +#define D_CUT_VERSION ((BIT(12)|BIT(13))) +#define E_CUT_VERSION BIT(14) +#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28)) + + + +enum version_8821ae { + VERSION_TEST_CHIP_1T1R_8812 = 0x0004, + VERSION_TEST_CHIP_2T2R_8812 = 0x0024, + VERSION_NORMAL_TSMC_CHIP_1T1R_8812 = 0x100c, + VERSION_NORMAL_TSMC_CHIP_2T2R_8812 = 0x102c, + VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT = 0x200c, + VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT = 0x202c, + VERSION_TEST_CHIP_8821 = 0x0005, + VERSION_NORMAL_TSMC_CHIP_8821 = 0x000d, + VERSION_NORMAL_TSMC_CHIP_8821_B_CUT = 0x100d, + VERSION_UNKNOWN = 0xFF, +}; + +enum vht_data_sc{ + VHT_DATA_SC_DONOT_CARE = 0, + VHT_DATA_SC_20_UPPER_OF_80MHZ = 1, + VHT_DATA_SC_20_LOWER_OF_80MHZ = 2, + VHT_DATA_SC_20_UPPERST_OF_80MHZ = 3, + VHT_DATA_SC_20_LOWEST_OF_80MHZ = 4, + VHT_DATA_SC_20_RECV1 = 5, + VHT_DATA_SC_20_RECV2 = 6, + VHT_DATA_SC_20_RECV3 = 7, + VHT_DATA_SC_20_RECV4 = 8, + VHT_DATA_SC_40_UPPER_OF_80MHZ = 9, + VHT_DATA_SC_40_LOWER_OF_80MHZ = 10, +}; + + +/* MASK */ +#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) +#define CHIP_TYPE_MASK BIT(3) +#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6)) +#define MANUFACTUER_MASK BIT(7) +#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8)) +#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12)) + +/* Get element */ +#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK) +#define GET_CVID_CHIP_TYPE(version) ((version) & CHIP_TYPE_MASK) +#define GET_CVID_RF_TYPE(version) ((version) & RF_TYPE_MASK) +#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK) +#define GET_CVID_ROM_VERSION(version) ((version) & ROM_VERSION_MASK) +#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK) + +#define IS_1T1R(version) ((GET_CVID_RF_TYPE(version))? false : true) +#define IS_1T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\ + ? true : false) +#define IS_2T2R(version) ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\ + ? true : false) + +#define IS_8812_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8812)? \ + true : false) +#define IS_8821_SERIES(version) ((GET_CVID_IC_TYPE(version) == CHIP_8821)? \ + true : false) + +#define IS_VENDOR_8812A_TEST_CHIP(version) ((IS_8812_SERIES(version)) ? \ + ((IS_NORMAL_CHIP(version)) ? \ + false : true) : false) +#define IS_VENDOR_8812A_MP_CHIP(version) ((IS_8812_SERIES(version)) ? \ + ((IS_NORMAL_CHIP(version)) ? \ + true : false) : false) +#define IS_VENDOR_8812A_C_CUT(version) ((IS_8812_SERIES(version)) ? \ + ((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? \ + true : false) : false) + +#define IS_VENDOR_8821A_TEST_CHIP(version) ((IS_8821_SERIES(version)) ? \ + ((IS_NORMAL_CHIP(version)) ? \ + false : true) : false) +#define IS_VENDOR_8821A_MP_CHIP(version) ((IS_8821_SERIES(version)) ? \ + ((IS_NORMAL_CHIP(version)) ? \ + true : false) : false) +#define IS_VENDOR_8821A_B_CUT(version) ((IS_8821_SERIES(version)) ? \ + ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? \ + true : false) : false) + + +enum rf_optype { + RF_OP_BY_SW_3WIRE = 0, + RF_OP_BY_FW, + RF_OP_MAX +}; + +enum rf_power_state { + RF_ON, + RF_OFF, + RF_SLEEP, + RF_SHUT_DOWN, +}; + +enum power_save_mode { + POWER_SAVE_MODE_ACTIVE, + POWER_SAVE_MODE_SAVE, +}; + +enum power_polocy_config { + POWERCFG_MAX_POWER_SAVINGS, + POWERCFG_GLOBAL_POWER_SAVINGS, + POWERCFG_LOCAL_POWER_SAVINGS, + POWERCFG_LENOVO, +}; + +enum interface_select_pci { + INTF_SEL1_MINICARD = 0, + INTF_SEL0_PCIE = 1, + INTF_SEL2_RSV = 2, + INTF_SEL3_RSV = 3, +}; + +enum hal_fw_c2h_cmd_id { + HAL_FW_C2H_CMD_Read_MACREG = 0, + HAL_FW_C2H_CMD_Read_BBREG = 1, + HAL_FW_C2H_CMD_Read_RFREG = 2, + HAL_FW_C2H_CMD_Read_EEPROM = 3, + HAL_FW_C2H_CMD_Read_EFUSE = 4, + HAL_FW_C2H_CMD_Read_CAM = 5, + HAL_FW_C2H_CMD_Get_BasicRate = 6, + HAL_FW_C2H_CMD_Get_DataRate = 7, + HAL_FW_C2H_CMD_Survey = 8, + HAL_FW_C2H_CMD_SurveyDone = 9, + HAL_FW_C2H_CMD_JoinBss = 10, + HAL_FW_C2H_CMD_AddSTA = 11, + HAL_FW_C2H_CMD_DelSTA = 12, + HAL_FW_C2H_CMD_AtimDone = 13, + HAL_FW_C2H_CMD_TX_Report = 14, + HAL_FW_C2H_CMD_CCX_Report = 15, + HAL_FW_C2H_CMD_DTM_Report = 16, + HAL_FW_C2H_CMD_TX_Rate_Statistics = 17, + HAL_FW_C2H_CMD_C2HLBK = 18, + HAL_FW_C2H_CMD_C2HDBG = 19, + HAL_FW_C2H_CMD_C2HFEEDBACK = 20, + HAL_FW_C2H_CMD_MAX +}; + +enum rtl_desc_qsel { + QSLT_BK = 0x2, + QSLT_BE = 0x0, + QSLT_VI = 0x5, + QSLT_VO = 0x7, + QSLT_BEACON = 0x10, + QSLT_HIGH = 0x11, + QSLT_MGNT = 0x12, + QSLT_CMD = 0x13, +}; + +enum rtl_desc8821ae_rate { + DESC_RATE1M = 0x00, + DESC_RATE2M = 0x01, + DESC_RATE5_5M = 0x02, + DESC_RATE11M = 0x03, + + DESC_RATE6M = 0x04, + DESC_RATE9M = 0x05, + DESC_RATE12M = 0x06, + DESC_RATE18M = 0x07, + DESC_RATE24M = 0x08, + DESC_RATE36M = 0x09, + DESC_RATE48M = 0x0a, + DESC_RATE54M = 0x0b, + + DESC_RATEMCS0 = 0x0c, + DESC_RATEMCS1 = 0x0d, + DESC_RATEMCS2 = 0x0e, + DESC_RATEMCS3 = 0x0f, + DESC_RATEMCS4 = 0x10, + DESC_RATEMCS5 = 0x11, + DESC_RATEMCS6 = 0x12, + DESC_RATEMCS7 = 0x13, + DESC_RATEMCS8 = 0x14, + DESC_RATEMCS9 = 0x15, + DESC_RATEMCS10 = 0x16, + DESC_RATEMCS11 = 0x17, + DESC_RATEMCS12 = 0x18, + DESC_RATEMCS13 = 0x19, + DESC_RATEMCS14 = 0x1a, + DESC_RATEMCS15 = 0x1b, + DESC_RATEVHT1SS_MCS0 = 0x1c, + DESC_RATEVHT1SS_MCS1 = 0x1d, + DESC_RATEVHT1SS_MCS2 = 0x1e, + DESC_RATEVHT1SS_MCS3 = 0x1f, + DESC_RATEVHT1SS_MCS4 = 0x20, + DESC_RATEVHT1SS_MCS5 = 0x21, + DESC_RATEVHT1SS_MCS6 = 0x22, + DESC_RATEVHT1SS_MCS7 = 0x23, + DESC_RATEVHT1SS_MCS8 = 0x24, + DESC_RATEVHT1SS_MCS9 = 0x25, + DESC_RATEVHT2SS_MCS0 = 0x26, + DESC_RATEVHT2SS_MCS1 = 0x27, + DESC_RATEVHT2SS_MCS2 = 0x28, + DESC_RATEVHT2SS_MCS3 = 0x29, + DESC_RATEVHT2SS_MCS4 = 0x2a, + DESC_RATEVHT2SS_MCS5 = 0x2b, + DESC_RATEVHT2SS_MCS6 = 0x2c, + DESC_RATEVHT2SS_MCS7 = 0x2d, + DESC_RATEVHT2SS_MCS8 = 0x2e, + DESC_RATEVHT2SS_MCS9 = 0x2f, +}; + +enum rx_packet_type{ + NORMAL_RX, + TX_REPORT1, + TX_REPORT2, + HIS_REPORT, + C2H_PACKET, +}; + +struct phy_sts_cck_8821ae_t { + u8 adc_pwdb_X[4]; + u8 sq_rpt; + u8 cck_agc_rpt; +}; + +struct h2c_cmd_8821ae { + u8 element_id; + u32 cmd_len; + u8 *p_cmdbuffer; +}; + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/dm.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/dm.c @@ -0,0 +1,3045 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../base.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "fw.h" +#include "trx.h" +#include "../btcoexist/rtl_btc.h" + +struct dig_t dm_digtable; +static struct ps_t dm_pstable; + +static const u32 rtl8812ae_txscaling_table[TXSCALE_TABLE_SIZE] = +{ + 0x081, // 0, -12.0dB + 0x088, // 1, -11.5dB + 0x090, // 2, -11.0dB + 0x099, // 3, -10.5dB + 0x0A2, // 4, -10.0dB + 0x0AC, // 5, -9.5dB + 0x0B6, // 6, -9.0dB + 0x0C0, // 7, -8.5dB + 0x0CC, // 8, -8.0dB + 0x0D8, // 9, -7.5dB + 0x0E5, // 10, -7.0dB + 0x0F2, // 11, -6.5dB + 0x101, // 12, -6.0dB + 0x110, // 13, -5.5dB + 0x120, // 14, -5.0dB + 0x131, // 15, -4.5dB + 0x143, // 16, -4.0dB + 0x156, // 17, -3.5dB + 0x16A, // 18, -3.0dB + 0x180, // 19, -2.5dB + 0x197, // 20, -2.0dB + 0x1AF, // 21, -1.5dB + 0x1C8, // 22, -1.0dB + 0x1E3, // 23, -0.5dB + 0x200, // 24, +0 dB + 0x21E, // 25, +0.5dB + 0x23E, // 26, +1.0dB + 0x261, // 27, +1.5dB + 0x285, // 28, +2.0dB + 0x2AB, // 29, +2.5dB + 0x2D3, // 30, +3.0dB + 0x2FE, // 31, +3.5dB + 0x32B, // 32, +4.0dB + 0x35C, // 33, +4.5dB + 0x38E, // 34, +5.0dB + 0x3C4, // 35, +5.5dB + 0x3FE // 36, +6.0dB +}; + +static const u32 rtl8821ae_txscaling_table[TXSCALE_TABLE_SIZE] = { + 0x081, // 0, -12.0dB + 0x088, // 1, -11.5dB + 0x090, // 2, -11.0dB + 0x099, // 3, -10.5dB + 0x0A2, // 4, -10.0dB + 0x0AC, // 5, -9.5dB + 0x0B6, // 6, -9.0dB + 0x0C0, // 7, -8.5dB + 0x0CC, // 8, -8.0dB + 0x0D8, // 9, -7.5dB + 0x0E5, // 10, -7.0dB + 0x0F2, // 11, -6.5dB + 0x101, // 12, -6.0dB + 0x110, // 13, -5.5dB + 0x120, // 14, -5.0dB + 0x131, // 15, -4.5dB + 0x143, // 16, -4.0dB + 0x156, // 17, -3.5dB + 0x16A, // 18, -3.0dB + 0x180, // 19, -2.5dB + 0x197, // 20, -2.0dB + 0x1AF, // 21, -1.5dB + 0x1C8, // 22, -1.0dB + 0x1E3, // 23, -0.5dB + 0x200, // 24, +0 dB + 0x21E, // 25, +0.5dB + 0x23E, // 26, +1.0dB + 0x261, // 27, +1.5dB + 0x285, // 28, +2.0dB + 0x2AB, // 29, +2.5dB + 0x2D3, // 30, +3.0dB + 0x2FE, // 31, +3.5dB + 0x32B, // 32, +4.0dB + 0x35C, // 33, +4.5dB + 0x38E, // 34, +5.0dB + 0x3C4, // 35, +5.5dB + 0x3FE // 36, +6.0dB +}; + +static const u32 ofdmswing_table[] = { + 0x0b40002d, // 0, -15.0dB + 0x0c000030, // 1, -14.5dB + 0x0cc00033, // 2, -14.0dB + 0x0d800036, // 3, -13.5dB + 0x0e400039, // 4, -13.0dB + 0x0f00003c, // 5, -12.5dB + 0x10000040, // 6, -12.0dB + 0x11000044, // 7, -11.5dB + 0x12000048, // 8, -11.0dB + 0x1300004c, // 9, -10.5dB + 0x14400051, // 10, -10.0dB + 0x15800056, // 11, -9.5dB + 0x16c0005b, // 12, -9.0dB + 0x18000060, // 13, -8.5dB + 0x19800066, // 14, -8.0dB + 0x1b00006c, // 15, -7.5dB + 0x1c800072, // 16, -7.0dB + 0x1e400079, // 17, -6.5dB + 0x20000080, // 18, -6.0dB + 0x22000088, // 19, -5.5dB + 0x24000090, // 20, -5.0dB + 0x26000098, // 21, -4.5dB + 0x288000a2, // 22, -4.0dB + 0x2ac000ab, // 23, -3.5dB + 0x2d4000b5, // 24, -3.0dB + 0x300000c0, // 25, -2.5dB + 0x32c000cb, // 26, -2.0dB + 0x35c000d7, // 27, -1.5dB + 0x390000e4, // 28, -1.0dB + 0x3c8000f2, // 29, -0.5dB + 0x40000100, // 30, +0dB + 0x43c0010f, // 31, +0.5dB + 0x47c0011f, // 32, +1.0dB + 0x4c000130, // 33, +1.5dB + 0x50800142, // 34, +2.0dB + 0x55400155, // 35, +2.5dB + 0x5a400169, // 36, +3.0dB + 0x5fc0017f, // 37, +3.5dB + 0x65400195, // 38, +4.0dB + 0x6b8001ae, // 39, +4.5dB + 0x71c001c7, // 40, +5.0dB + 0x788001e2, // 41, +5.5dB + 0x7f8001fe // 42, +6.0dB +}; + +static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { + {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, // 0, -16.0dB + {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, // 1, -15.5dB + {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, // 2, -15.0dB + {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, // 3, -14.5dB + {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, // 4, -14.0dB + {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, // 5, -13.5dB + {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, // 6, -13.0dB + {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, // 7, -12.5dB + {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, // 8, -12.0dB + {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, // 9, -11.5dB + {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, // 10, -11.0dB + {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, // 11, -10.5dB + {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 12, -10.0dB + {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, // 13, -9.5dB + {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, // 14, -9.0dB + {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, // 15, -8.5dB + {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, // 16, -8.0dB + {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, // 17, -7.5dB + {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, // 18, -7.0dB + {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, // 19, -6.5dB + {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, // 20, -6.0dB + {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, // 21, -5.5dB + {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, // 22, -5.0dB + {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, // 23, -4.5dB + {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, // 24, -4.0dB + {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, // 25, -3.5dB + {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, // 26, -3.0dB + {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, // 27, -2.5dB + {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, // 28, -2.0dB + {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, // 29, -1.5dB + {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, // 30, -1.0dB + {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, // 31, -0.5dB + {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04} // 32, +0dB +}; + +static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8]= { + {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, // 0, -16.0dB + {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, // 1, -15.5dB + {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, // 2, -15.0dB + {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, // 3, -14.5dB + {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, // 4, -14.0dB + {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, // 5, -13.5dB + {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, // 6, -13.0dB + {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, // 7, -12.5dB + {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, // 8, -12.0dB + {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, // 9, -11.5dB + {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, // 10, -11.0dB + {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, // 11, -10.5dB + {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 12, -10.0dB + {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, // 13, -9.5dB + {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 14, -9.0dB + {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, // 15, -8.5dB + {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 16, -8.0dB + {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, // 17, -7.5dB + {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, // 18, -7.0dB + {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, // 19, -6.5dB + {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 20, -6.0dB + {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, // 21, -5.5dB + {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, // 22, -5.0dB + {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, // 23, -4.5dB + {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, // 24, -4.0dB + {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, // 25, -3.5dB + {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, // 26, -3.0dB + {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, // 27, -2.5dB + {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, // 28, -2.0dB + {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, // 29, -1.5dB + {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, // 30, -1.0dB + {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, // 31, -0.5dB + {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00} // 32, +0dB +}; + +static const u32 edca_setting_dl[PEER_MAX] = { + 0xa44f, /* 0 UNKNOWN */ + 0x5ea44f, /* 1 REALTEK_90 */ + 0x5e4322, /* 2 REALTEK_92SE */ + 0x5ea42b, /* 3 BROAD */ + 0xa44f, /* 4 RAL */ + 0xa630, /* 5 ATH */ + 0x5ea630, /* 6 CISCO */ + 0x5ea42b, /* 7 MARVELL */ +}; + +static const u32 edca_setting_ul[PEER_MAX] = { + 0x5e4322, /* 0 UNKNOWN */ + 0xa44f, /* 1 REALTEK_90 */ + 0x5ea44f, /* 2 REALTEK_92SE */ + 0x5ea32b, /* 3 BROAD */ + 0x5ea422, /* 4 RAL */ + 0x5ea322, /* 5 ATH */ + 0x3ea430, /* 6 CISCO */ + 0x5ea44f, /* 7 MARV */ +}; + +static u8 rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack[] = + {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9}; +static u8 rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack[] = + {0, 0, 0, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11}; + + +u8 rtl8812ae_delta_swing_table_idx_24gb_n_txpwrtrack[] = + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11}; +u8 rtl8812ae_delta_swing_table_idx_24gb_p_txpwrtrack[] = + {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; +u8 rtl8812ae_delta_swing_table_idx_24ga_n_txpwrtrack[] = + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11}; +u8 rtl8812ae_delta_swing_table_idx_24ga_p_txpwrtrack[] = + {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; +u8 rtl8812ae_delta_swing_table_idx_24gcckb_n_txpwrtrack[] = + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11}; +u8 rtl8812ae_delta_swing_table_idx_24gcckb_p_txpwrtrack[] = + {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; +u8 rtl8812ae_delta_swing_table_idx_24gccka_n_txpwrtrack[] = + {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11}; +u8 rtl8812ae_delta_swing_table_idx_24gccka_p_txpwrtrack[] = + {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; + +u8 rtl8812ae_delta_swing_table_idx_5gb_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13}, + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 14, 14, 15, 16, 17, 17, 17, 18, 18, 18}, +}; +u8 rtl8812ae_delta_swing_table_idx_5gb_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; +u8 rtl8812ae_delta_swing_table_idx_5ga_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13}, + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13}, + {0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 18, 18}, +}; +u8 rtl8812ae_delta_swing_table_idx_5ga_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = { + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; + +u8 rtl8821ae_delta_swing_table_idx_24gb_n_txpwrtrack[] = + {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; +u8 rtl8821ae_delta_swing_table_idx_24gb_p_txpwrtrack[] = + {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; +u8 rtl8821ae_delta_swing_table_idx_24ga_n_txpwrtrack[] = + {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; +u8 rtl8821ae_delta_swing_table_idx_24ga_p_txpwrtrack[] = + {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; +u8 rtl8821ae_delta_swing_table_idx_24gcckb_n_txpwrtrack[] = + {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; +u8 rtl8821ae_delta_swing_table_idx_24gcckb_p_txpwrtrack[] = + {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; +u8 rtl8821ae_delta_swing_table_idx_24gccka_n_txpwrtrack[] = + {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; +u8 rtl8821ae_delta_swing_table_idx_24gccka_p_txpwrtrack[] = + {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; + +u8 rtl8821ae_delta_swing_table_idx_5gb_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +u8 rtl8821ae_delta_swing_table_idx_5gb_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +u8 rtl8821ae_delta_swing_table_idx_5ga_n_txpwrtrack[][DELTA_SWINGIDX_SIZE] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +u8 rtl8821ae_delta_swing_table_idx_5ga_p_txpwrtrack[][DELTA_SWINGIDX_SIZE] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +void rtl8812ae_dm_read_and_config_txpower_track( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("===> rtl8821ae_dm_read_and_config_txpower_track\n")); + + + memcpy(rtldm->delta_swing_table_idx_24ga_p, + rtl8812ae_delta_swing_table_idx_24ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24ga_n, + rtl8812ae_delta_swing_table_idx_24ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gb_p, + rtl8812ae_delta_swing_table_idx_24gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gb_n, + rtl8812ae_delta_swing_table_idx_24gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE); + + memcpy(rtldm->delta_swing_table_idx_24gccka_p, + rtl8812ae_delta_swing_table_idx_24gccka_p_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gccka_n, + rtl8812ae_delta_swing_table_idx_24gccka_n_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gcckb_p, + rtl8812ae_delta_swing_table_idx_24gcckb_p_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gcckb_n, + rtl8812ae_delta_swing_table_idx_24gcckb_n_txpwrtrack, DELTA_SWINGIDX_SIZE); + + memcpy(rtldm->delta_swing_table_idx_5ga_p, + rtl8812ae_delta_swing_table_idx_5ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3); + memcpy(rtldm->delta_swing_table_idx_5ga_n, + rtl8812ae_delta_swing_table_idx_5ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3); + memcpy(rtldm->delta_swing_table_idx_5gb_p, + rtl8812ae_delta_swing_table_idx_5gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3); + memcpy(rtldm->delta_swing_table_idx_5gb_n, + rtl8812ae_delta_swing_table_idx_5gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3); +} + +void rtl8821ae_dm_read_and_config_txpower_track( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("===> rtl8821ae_dm_read_and_config_txpower_track\n")); + + + memcpy(rtldm->delta_swing_table_idx_24ga_p, + rtl8821ae_delta_swing_table_idx_24ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24ga_n, + rtl8821ae_delta_swing_table_idx_24ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gb_p, + rtl8821ae_delta_swing_table_idx_24gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gb_n, + rtl8821ae_delta_swing_table_idx_24gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE); + + memcpy(rtldm->delta_swing_table_idx_24gccka_p, + rtl8821ae_delta_swing_table_idx_24gccka_p_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gccka_n, + rtl8821ae_delta_swing_table_idx_24gccka_n_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gcckb_p, + rtl8821ae_delta_swing_table_idx_24gcckb_p_txpwrtrack, DELTA_SWINGIDX_SIZE); + memcpy(rtldm->delta_swing_table_idx_24gcckb_n, + rtl8821ae_delta_swing_table_idx_24gcckb_n_txpwrtrack, DELTA_SWINGIDX_SIZE); + + memcpy(rtldm->delta_swing_table_idx_5ga_p, + rtl8821ae_delta_swing_table_idx_5ga_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3); + memcpy(rtldm->delta_swing_table_idx_5ga_n, + rtl8821ae_delta_swing_table_idx_5ga_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3); + memcpy(rtldm->delta_swing_table_idx_5gb_p, + rtl8821ae_delta_swing_table_idx_5gb_p_txpwrtrack, DELTA_SWINGIDX_SIZE*3); + memcpy(rtldm->delta_swing_table_idx_5gb_n, + rtl8821ae_delta_swing_table_idx_5gb_n_txpwrtrack, DELTA_SWINGIDX_SIZE*3); +} + + + +#define CALCULATE_SWINGTALBE_OFFSET(_offset, _direction, _size, _deltaThermal) \ + do {\ + for(_offset = 0; _offset < _size; _offset++)\ + {\ + if(_deltaThermal < thermal_threshold[_direction][_offset])\ + {\ + if(_offset != 0)\ + _offset--;\ + break;\ + }\ + } \ + if(_offset >= _size)\ + _offset = _size-1;\ + } while(0) + + +void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw, + u8 type,u8 *pdirection, + u32 *poutwrite_val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 pwr_val = 0; + + if (type == 0){ + if (rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_A] <= + rtlpriv->dm.bb_swing_idx_ofdm_base[RF90_PATH_A]) { + *pdirection = 1; + pwr_val = rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A] - rtldm->bb_swing_idx_ofdm[RF90_PATH_A]; + } else { + *pdirection = 2; + pwr_val = rtldm->bb_swing_idx_ofdm[RF90_PATH_A] - rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A]; + } + } else if (type ==1) { + if (rtldm->bb_swing_idx_cck <= rtldm->bb_swing_idx_cck_base) { + *pdirection = 1; + pwr_val = rtldm->bb_swing_idx_cck_base - rtldm->bb_swing_idx_cck; + } else { + *pdirection = 2; + pwr_val = rtldm->bb_swing_idx_cck - rtldm->bb_swing_idx_cck_base; + } + } + + if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1)) + pwr_val = TXPWRTRACK_MAX_IDX; + + *poutwrite_val = pwr_val |(pwr_val << 8)|(pwr_val << 16) | (pwr_val << 24); +} + +void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + u8 p = 0; + rtldm->bb_swing_idx_cck_base = rtldm->default_cck_index; + rtldm->bb_swing_idx_cck = rtldm->default_cck_index; + rtldm->cck_index = 0; + + for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p) { + rtldm->bb_swing_idx_ofdm_base[p] = rtldm->default_ofdm_index; + rtldm->bb_swing_idx_ofdm[p] = rtldm->default_ofdm_index; + rtldm->ofdm_index[p] = rtldm->default_ofdm_index; + + rtldm->power_index_offset[p] = 0; + rtldm->delta_power_index[p] = 0; + rtldm->delta_power_index_last[p] = 0; + + rtldm->aboslute_ofdm_swing_idx[p] = 0; /*Initial Mix mode power tracking*/ + rtldm->remnant_ofdm_swing_idx[p] = 0; + } + + rtldm->modify_txagc_flag_path_a = false; /*Initial at Modify Tx Scaling Mode*/ + rtldm->modify_txagc_flag_path_b = false; /*Initial at Modify Tx Scaling Mode*/ + rtldm->remnant_cck_idx = 0; + rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter; + rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter; + rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter; +} + +u8 rtl8821ae_dm_get_swing_index(struct ieee80211_hw *hw) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 i = 0; + u32 bb_swing; + + bb_swing =rtl8821ae_phy_query_bb_reg(hw, rtlhal->current_bandtype, RF90_PATH_A); + + for (i = 0; i < TXSCALE_TABLE_SIZE; ++i) + if ( bb_swing == rtl8821ae_txscaling_table[i]) + break; + + return i; +} + +void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 default_swing_index = 0; + u8 p = 0; + + rtlpriv->dm.txpower_track_control = true; + rtldm->thermalvalue = rtlefuse->eeprom_thermalmeter; + rtldm->thermalvalue_iqk = rtlefuse->eeprom_thermalmeter; + rtldm->thermalvalue_lck = rtlefuse->eeprom_thermalmeter; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_dm_read_and_config_txpower_track(hw); + else + rtl8821ae_dm_read_and_config_txpower_track(hw); + + default_swing_index = rtl8821ae_dm_get_swing_index(hw); + + rtldm->default_ofdm_index = (default_swing_index == TXSCALE_TABLE_SIZE) ? 24 : default_swing_index; + rtldm->default_cck_index = 24; + + rtldm->bb_swing_idx_cck_base = rtldm->default_cck_index; + rtldm->cck_index = rtldm->default_cck_index; + + for (p = RF90_PATH_A; p < MAX_RF_PATH; ++p) + { + rtldm->bb_swing_idx_ofdm_base[p] = rtldm->default_ofdm_index; + rtldm->ofdm_index[p] = rtldm->default_ofdm_index; + rtldm->delta_power_index[p] = 0; + rtldm->power_index_offset[p] = 0; + rtldm->delta_power_index_last[p] = 0; + } +} + +static void rtl8821ae_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) +{ + dm_pstable.pre_ccastate = CCA_MAX; + dm_pstable.cur_ccasate = CCA_MAX; + dm_pstable.pre_rfstate = RF_MAX; + dm_pstable.cur_rfstate = RF_MAX; + dm_pstable.rssi_val_min = 0; + dm_pstable.initialize = 0; +} + + +static void rtl8821ae_dm_diginit(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + //dm_digtable.dig_enable_flag = true; + dm_digtable.cur_igvalue = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f); + /*dm_digtable.pre_igvalue = 0; + dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; + dm_digtable.presta_connectstate = DIG_STA_DISCONNECT; + dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;*/ + dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; + dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; + dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; + dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; + dm_digtable.rx_gain_range_max = DM_DIG_MAX; + dm_digtable.rx_gain_range_min = DM_DIG_MIN; + dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; + dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; + dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; + dm_digtable.pre_cck_cca_thres = 0xff; + dm_digtable.cur_cck_cca_thres = 0x83; + dm_digtable.forbidden_igi = DM_DIG_MIN; + dm_digtable.large_fa_hit = 0; + dm_digtable.recover_cnt = 0; + dm_digtable.dig_dynamic_min_0 = DM_DIG_MIN; + dm_digtable.dig_dynamic_min_1 = DM_DIG_MIN; + dm_digtable.b_media_connect_0 = false; + dm_digtable.b_media_connect_1 = false; + rtlpriv->dm.b_dm_initialgain_enable = true; + dm_digtable.bt30_cur_igi = 0x32; +} + +static void rtl8821ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.bdynamic_txpower_enable = false; + + rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; +} + + +void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + rtlpriv->dm.bcurrent_turbo_edca = false; + rtlpriv->dm.bis_any_nonbepkts = false; + rtlpriv->dm.bis_cur_rdlstate = false; +} + + +void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rate_adaptive *p_ra = &(rtlpriv->ra); + + p_ra->ratr_state = DM_RATR_STA_INIT; + p_ra->pre_ratr_state = DM_RATR_STA_INIT; + + rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) + rtlpriv->dm.b_useramask = true; + else + rtlpriv->dm.b_useramask = false; + + p_ra->high_rssi_thresh_for_ra = 50; + p_ra->low_rssi_thresh_for_ra = 20; +} + + +static void rtl8821ae_dm_init_txpower_tracking(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.btxpower_tracking = true; + rtlpriv->dm.btxpower_trackinginit = false; + rtlpriv->dm.txpowercount = 0; + rtlpriv->dm.txpower_track_control = true; + rtlpriv->dm.thermalvalue = 0; + + rtlpriv->dm.ofdm_index[0] = 30; + rtlpriv->dm.cck_index = 20; + + rtlpriv->dm.bb_swing_idx_cck_base = rtlpriv->dm.cck_index; + + + rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_A] = rtlpriv->dm.ofdm_index[0]; + rtlpriv->dm.bb_swing_idx_ofdm[RF90_PATH_B] = rtlpriv->dm.ofdm_index[0]; + rtlpriv->dm.delta_power_index[0] = 0; + rtlpriv->dm.delta_power_index_last[0] = 0; + rtlpriv->dm.power_index_offset[0] = 0; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + (" rtlpriv->dm.btxpower_tracking = %d\n", + rtlpriv->dm.btxpower_tracking)); +} + + +void rtl8821ae_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap; + + rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11)); + rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL; +} + + +void rtl8821ae_dm_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); + + rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + rtl8821ae_dm_diginit(hw); + rtl8821ae_dm_init_rate_adaptive_mask(hw); + rtl8812ae_dm_path_diversity_init(hw); + rtl8821ae_dm_init_edca_turbo(hw); + rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(hw); +#if 1 + rtl8821ae_dm_init_dynamic_bb_powersaving(hw); + rtl8821ae_dm_init_dynamic_txpower(hw); + rtl8821ae_dm_init_txpower_tracking(hw); +#endif + rtl8821ae_dm_init_dynamic_atc_switch(hw); +} + +void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dig *rtl_dm_dig = &(rtlpriv->dm.dm_digtable); + struct rtl_mac *mac = rtl_mac(rtlpriv); + + /* Determine the minimum RSSI */ + if ((mac->link_state < MAC80211_LINKED) && + (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { + rtl_dm_dig->min_undecorated_pwdb_for_dm = 0; + RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD, + ("Not connected to any \n")); + } + if (mac->link_state >= MAC80211_LINKED) { + if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_dm_dig->min_undecorated_pwdb_for_dm = + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD, + ("AP Client PWDB = 0x%lx \n", + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb)); + } else { + rtl_dm_dig->min_undecorated_pwdb_for_dm = + rtlpriv->dm.undecorated_smoothed_pwdb; + RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD, + ("STA Default Port PWDB = 0x%x \n", + rtl_dm_dig->min_undecorated_pwdb_for_dm)); + } + } else { + rtl_dm_dig->min_undecorated_pwdb_for_dm = + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + RT_TRACE(COMP_BB_POWERSAVING, DBG_LOUD, + ("AP Ext Port or disconnet PWDB = 0x%x \n", + rtl_dm_dig->min_undecorated_pwdb_for_dm)); + } + RT_TRACE(COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n", + rtl_dm_dig->min_undecorated_pwdb_for_dm)); +} + +#if 0 +void rtl8812ae_dm_rssi_dump_to_register( + struct ieee80211_hw *hw + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, RA_RSSI_DUMP, Adapter->RxStats.RxRSSIPercentage[0]); + rtl_write_byte(rtlpriv, RB_RSSI_DUMP, Adapter->RxStats.RxRSSIPercentage[1]); + + /* Rx EVM*/ + rtl_write_byte(rtlpriv, RS1_RX_EVM_DUMP, Adapter->RxStats.RxEVMdbm[0]); + rtl_write_byte(rtlpriv, RS2_RX_EVM_DUMP, Adapter->RxStats.RxEVMdbm[1]); + + /*Rx SNR*/ + rtl_write_byte(rtlpriv, RA_RX_SNR_DUMP, (u1Byte)(Adapter->RxStats.RxSNRdB[0])); + rtl_write_byte(rtlpriv, RB_RX_SNR_DUMP, (u1Byte)(Adapter->RxStats.RxSNRdB[1])); + + /*Rx Cfo_Short*/ + rtl_write_word(rtlpriv, RA_CFO_SHORT_DUMP, Adapter->RxStats.RxCfoShort[0]); + rtl_write_word(rtlpriv, RB_CFO_SHORT_DUMP, Adapter->RxStats.RxCfoShort[1]); + + /*Rx Cfo_Tail*/ + rtl_write_word(rtlpriv, RA_CFO_LONG_DUMP, Adapter->RxStats.RxCfoTail[0]); + rtl_write_word(rtlpriv, RB_CFO_LONG_DUMP, Adapter->RxStats.RxCfoTail[1]); + +} +#endif + +static void rtl8821ae_dm_check_rssi_monitor(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_sta_info *drv_priv; + u8 h2c_parameter[3] = { 0 }; + long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff; + + + /* AP & ADHOC & MESH */ + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { + if(drv_priv->rssi_stat.undecorated_smoothed_pwdb < tmp_entry_min_pwdb) + tmp_entry_min_pwdb = drv_priv->rssi_stat.undecorated_smoothed_pwdb; + if(drv_priv->rssi_stat.undecorated_smoothed_pwdb > tmp_entry_max_pwdb) + tmp_entry_max_pwdb = drv_priv->rssi_stat.undecorated_smoothed_pwdb; + + /*h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF); + h2c_parameter[1] = 0x20; + h2c_parameter[0] = drv_priv->rssi_stat; + rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);*/ + } + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + + /* If associated entry is found */ + if (tmp_entry_max_pwdb != 0) { + rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = tmp_entry_max_pwdb; + RTPRINT(rtlpriv, FDM, DM_PWDB, ("EntryMaxPWDB = 0x%lx(%ld)\n", + tmp_entry_max_pwdb, tmp_entry_max_pwdb)); + } else { + rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0; + } + /* If associated entry is found */ + if (tmp_entry_min_pwdb != 0xff) { + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = tmp_entry_min_pwdb; + RTPRINT(rtlpriv, FDM, DM_PWDB, ("EntryMinPWDB = 0x%lx(%ld)\n", + tmp_entry_min_pwdb, tmp_entry_min_pwdb)); + } else { + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0; + } + /* Indicate Rx signal strength to FW. */ + if (rtlpriv->dm.b_useramask) { + h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF); + h2c_parameter[1] = 0x20; + h2c_parameter[0] = 0; + rtl8821ae_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter); + } else { + rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undecorated_smoothed_pwdb); + } + rtl8821ae_dm_find_minimum_rssi(hw); + dm_digtable.rssi_val_min = rtlpriv->dm.dm_digtable.min_undecorated_pwdb_for_dm; +} + +void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (dm_digtable.cur_cck_cca_thres != current_cca) + rtl_write_byte(rtlpriv, DM_REG_CCK_CCA_11AC, current_cca); + + dm_digtable.pre_cck_cca_thres = dm_digtable.cur_cck_cca_thres; + dm_digtable.cur_cck_cca_thres = current_cca; +} + +void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + if(dm_digtable.stop_dig) + return; + + if (dm_digtable.cur_igvalue != current_igi){ + rtl_set_bbreg(hw, DM_REG_IGI_A_11AC, DM_BIT_IGI_11AC, current_igi); + if (rtlpriv->phy.rf_type != RF_1T1R) + rtl_set_bbreg(hw, DM_REG_IGI_B_11AC, DM_BIT_IGI_11AC, current_igi); + } + //dm_digtable.pre_igvalue = dm_digtable.cur_igvalue; + dm_digtable.cur_igvalue = current_igi; +} + +static void rtl8821ae_dm_dig(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 dig_dynamic_min; + u8 dig_max_of_min; + bool first_connect, first_disconnect; + u8 dm_dig_max, dm_dig_min, offset; + u8 current_igi =dm_digtable.cur_igvalue; + + + RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig()==>\n")); + + + if (mac->act_scanning == true) { + RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig() Return: In Scan Progress \n")); + return; + } + + /*add by Neil Chen to avoid PSD is processing*/ + dig_dynamic_min = dm_digtable.dig_dynamic_min_0; + first_connect = (mac->link_state >= MAC80211_LINKED) && + (dm_digtable.b_media_connect_0 == false); + first_disconnect = (mac->link_state < MAC80211_LINKED) && + (dm_digtable.b_media_connect_0 == true); + + /*1 Boundary Decision*/ + + + dm_dig_max = 0x5A; + + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE) + dm_dig_min = DM_DIG_MIN; + else + dm_dig_min = 0x1C; + + dig_max_of_min = DM_DIG_MAX_AP; + + if (mac->link_state >= MAC80211_LINKED) { + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8821AE) + offset = 20; + else + offset = 10; + + if ((dm_digtable.rssi_val_min + offset) > dm_dig_max) + dm_digtable.rx_gain_range_max = dm_dig_max; + else if ((dm_digtable.rssi_val_min + offset) < dm_dig_min) + dm_digtable.rx_gain_range_max = dm_dig_min; + else + dm_digtable.rx_gain_range_max = dm_digtable.rssi_val_min + offset; + + if(rtlpriv->dm.b_one_entry_only){ + offset = 0; + + if (dm_digtable.rssi_val_min - offset < dm_dig_min) + dig_dynamic_min = dm_dig_min; + else if (dm_digtable.rssi_val_min - offset > dig_max_of_min) + dig_dynamic_min = dig_max_of_min; + else + dig_dynamic_min = dm_digtable.rssi_val_min - offset; + + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig() : bOneEntryOnly=TRUE, dig_dynamic_min=0x%x\n", + dig_dynamic_min)); + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig() : dm_digtable.rssi_val_min=%d",dm_digtable. + rssi_val_min)); + } else { + dig_dynamic_min = dm_dig_min; + } + } else { + dm_digtable.rx_gain_range_max = dm_dig_max; + dig_dynamic_min = dm_dig_min; + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig() : No Link\n")); + } + + if (rtlpriv->falsealm_cnt.cnt_all > 10000) { + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): Abnornally false alarm case. \n")); + + if (dm_digtable.large_fa_hit != 3) + dm_digtable.large_fa_hit++; + if (dm_digtable.forbidden_igi < current_igi) { + dm_digtable.forbidden_igi = current_igi; + dm_digtable.large_fa_hit = 1; + } + + if (dm_digtable.large_fa_hit >= 3) { + if((dm_digtable.forbidden_igi + 1) > dm_digtable.rx_gain_range_max) + dm_digtable.rx_gain_range_min = dm_digtable.rx_gain_range_max; + else + dm_digtable.rx_gain_range_min = (dm_digtable.forbidden_igi + 1); + dm_digtable.recover_cnt = 3600; + } + + } else { + /*Recovery mechanism for IGI lower bound*/ + if (dm_digtable.recover_cnt != 0) + dm_digtable.recover_cnt --; + else { + if (dm_digtable.large_fa_hit < 3) { + if ((dm_digtable.forbidden_igi -1) < dig_dynamic_min) { + dm_digtable.forbidden_igi = dig_dynamic_min; + dm_digtable.rx_gain_range_min = dig_dynamic_min; + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): Normal Case: At Lower Bound\n")); + } else { + dm_digtable.forbidden_igi --; + dm_digtable.rx_gain_range_min = (dm_digtable.forbidden_igi + 1); + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): Normal Case: Approach Lower Bound\n")); + } + } else { + dm_digtable.large_fa_hit = 0; + } + } + } + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): pDM_DigTable->LargeFAHit=%d\n", + dm_digtable.large_fa_hit)); + + if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10) + dm_digtable.rx_gain_range_min = dm_dig_min; + + if (dm_digtable.rx_gain_range_min > dm_digtable.rx_gain_range_max) + dm_digtable.rx_gain_range_min = dm_digtable.rx_gain_range_max; + + /*Adjust initial gain by false alarm*/ + if (mac->link_state >= MAC80211_LINKED) { + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): DIG AfterLink\n")); + if (first_connect) { + if (dm_digtable.rssi_val_min <= dig_max_of_min) + current_igi = dm_digtable.rssi_val_min; + else + current_igi = dig_max_of_min; + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig: First Connect\n")); + } else { + if(rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2) + current_igi = current_igi + 4; + else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1) + current_igi = current_igi + 2; + else if(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) + current_igi = current_igi - 2; + + if((rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10) + &&(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)) { + current_igi = dm_digtable.rx_gain_range_min; + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n")); + } + } + } else{ + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): DIG BeforeLink\n")); + if (first_disconnect){ + current_igi = dm_digtable.rx_gain_range_min; + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): First DisConnect \n")); + } else { + /*2012.03.30 LukeLee: enable DIG before link but with very high thresholds*/ + if (rtlpriv->falsealm_cnt.cnt_all > 2000) + current_igi = current_igi + 4; + else if (rtlpriv->falsealm_cnt.cnt_all > 600) + current_igi = current_igi + 2; + else if(rtlpriv->falsealm_cnt.cnt_all < 300) + current_igi = current_igi - 2; + if (current_igi >= 0x3e) + current_igi = 0x3e; + RT_TRACE(COMP_DIG, DBG_LOUD,("rtl8821ae_dm_dig(): England DIG \n")); + } + } + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): DIG End Adjust IGI\n")); + /* Check initial gain by upper/lower bound*/ + + if (current_igi > dm_digtable.rx_gain_range_max) + current_igi = dm_digtable.rx_gain_range_max; + if (current_igi < dm_digtable.rx_gain_range_min) + current_igi = dm_digtable.rx_gain_range_min; + + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n", + dm_digtable.rx_gain_range_max, dm_digtable.rx_gain_range_min)); + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all)); + RT_TRACE(COMP_DIG, DBG_LOUD, + ("rtl8821ae_dm_dig(): CurIGValue=0x%x\n", current_igi)); + + rtl8821ae_dm_write_dig(hw, current_igi); + dm_digtable.b_media_connect_0= ((mac->link_state >= MAC80211_LINKED) ? true :false); + dm_digtable.dig_dynamic_min_0 = dig_dynamic_min; +} + +static void rtl8821ae_dm_common_info_self_update(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 cnt = 0; + struct rtl_sta_info *drv_priv; + + rtlpriv->dm.b_one_entry_only = false; + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION && + rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + rtlpriv->dm.b_one_entry_only = true; + return; + } + + if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC || + rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) { + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) { + cnt ++; + } + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); + + if (cnt == 1) + rtlpriv->dm.b_one_entry_only = true; + } +} + + +static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); + u32 cck_enable =0; + + /*read OFDM FA counter*/ + falsealm_cnt->cnt_ofdm_fail = rtl_get_bbreg(hw, ODM_REG_OFDM_FA_11AC, BMASKLWORD); + falsealm_cnt->cnt_cck_fail = rtl_get_bbreg(hw, ODM_REG_CCK_FA_11AC, BMASKLWORD); + + cck_enable = rtl_get_bbreg(hw, ODM_REG_BB_RX_PATH_11AC, BIT(28)); + if (cck_enable) /*if(pDM_Odm->pBandType == ODM_BAND_2_4G)*/ + falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail + falsealm_cnt->cnt_cck_fail; + else + falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail; + + /*reset OFDM FA coutner*/ + rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1); + rtl_set_bbreg(hw, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0); + /* reset CCK FA counter*/ + rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 0); + rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 1); + + RT_TRACE(COMP_DIG, DBG_LOUD, ("Cnt_Cck_fail=%d\n", + falsealm_cnt->cnt_cck_fail)); + RT_TRACE(COMP_DIG, DBG_LOUD, ("cnt_ofdm_fail=%d\n", + falsealm_cnt->cnt_ofdm_fail)); + RT_TRACE(COMP_DIG, DBG_LOUD, ("Total False Alarm=%d\n", + falsealm_cnt->cnt_all)); +} + +void rtl8812ae_dm_check_txpower_tracking_thermalmeter( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + static u8 tm_trigger = 0; + + if (!rtlpriv->dm.btxpower_tracking) + return; + + if (!tm_trigger) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16), 0x03); + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Trigger 8812 Thermal Meter!!\n")); + tm_trigger = 1; + return; + } else { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Schedule TxPowerTracking direct call!!\n")); + rtl8812ae_dm_txpower_tracking_callback_thermalmeter(hw); + tm_trigger = 0; + } +} + +static void rtl8821ae_dm_iq_calibrate(struct ieee80211_hw *hw) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (mac->link_state >= MAC80211_LINKED) { + /*if ((*rtldm->p_channel != rtldm->pre_channel ) + && (!mac->act_scanning)) { + rtldm->pre_channel = *rtldm->p_channel; + rtldm->linked_interval = 0; + }*/ + + if(rtldm->linked_interval < 3) + rtldm->linked_interval ++; + + if(rtldm->linked_interval == 2) + { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_phy_iq_calibrate(hw, false); + else + rtl8821ae_phy_iq_calibrate(hw, false); + } + } else { + rtldm->linked_interval = 0; + } +} + + +void rtl8812ae_get_delta_swing_table( + struct ieee80211_hw *hw, + u8 **temperature_up_a, + u8 **temperature_down_a, + u8 **temperature_up_b, + u8 **temperature_down_b + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 channel = rtlphy->current_channel; + u8 rate = rtldm->tx_rate; + + + if ( 1 <= channel && channel <= 14) { + if (RX_HAL_IS_CCK_RATE(rate)) { + *temperature_up_a = rtldm->delta_swing_table_idx_24gccka_p; + *temperature_down_a = rtldm->delta_swing_table_idx_24gccka_n; + *temperature_up_b = rtldm->delta_swing_table_idx_24gcckb_p; + *temperature_down_b = rtldm->delta_swing_table_idx_24gcckb_n; + } else { + *temperature_up_a = rtldm->delta_swing_table_idx_24ga_p; + *temperature_down_a = rtldm->delta_swing_table_idx_24ga_n; + *temperature_up_b = rtldm->delta_swing_table_idx_24gb_p; + *temperature_down_b = rtldm->delta_swing_table_idx_24gb_n; + } + } else if ( 36 <= channel && channel <= 64) { + *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[0]; + *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[0]; + *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[0]; + *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[0]; + } else if ( 100 <= channel && channel <= 140) { + *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[1]; + *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[1]; + *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[1]; + *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[1]; + } else if ( 149 <= channel && channel <= 173) { + *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[2]; + *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[2]; + *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[2]; + *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[2]; + } else { + *temperature_up_a = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack; + *temperature_down_a =(u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack; + *temperature_up_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack; + *temperature_down_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack; + } + + return; +} + +void rtl8812ae_phy_lccalibrate( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("===> rtl8812ae_phy_lccalibrate\n")); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("<=== rtl8812ae_phy_lccalibrate\n")); + +} + +void rtl8812ae_dm_update_init_rate( + struct ieee80211_hw *hw, + u8 rate + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 p = 0; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Get C2H Command! Rate=0x%x\n", rate)); + + rtldm->tx_rate = rate; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE){ + rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, RF90_PATH_A, 0); + } + else + { + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + { + rtl8812ae_dm_txpwr_track_set_pwr(hw, BBSWING, p, 0); + } + } + +} + +u8 rtl8812ae_hw_rate_to_mrate( + struct ieee80211_hw *hw, + u8 rate + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 ret_rate = MGN_1M; + + + switch(rate) + { + case DESC_RATE1M: ret_rate = MGN_1M; break; + case DESC_RATE2M: ret_rate = MGN_2M; break; + case DESC_RATE5_5M: ret_rate = MGN_5_5M; break; + case DESC_RATE11M: ret_rate = MGN_11M; break; + case DESC_RATE6M: ret_rate = MGN_6M; break; + case DESC_RATE9M: ret_rate = MGN_9M; break; + case DESC_RATE12M: ret_rate = MGN_12M; break; + case DESC_RATE18M: ret_rate = MGN_18M; break; + case DESC_RATE24M: ret_rate = MGN_24M; break; + case DESC_RATE36M: ret_rate = MGN_36M; break; + case DESC_RATE48M: ret_rate = MGN_48M; break; + case DESC_RATE54M: ret_rate = MGN_54M; break; + case DESC_RATEMCS0: ret_rate = MGN_MCS0; break; + case DESC_RATEMCS1: ret_rate = MGN_MCS1; break; + case DESC_RATEMCS2: ret_rate = MGN_MCS2; break; + case DESC_RATEMCS3: ret_rate = MGN_MCS3; break; + case DESC_RATEMCS4: ret_rate = MGN_MCS4; break; + case DESC_RATEMCS5: ret_rate = MGN_MCS5; break; + case DESC_RATEMCS6: ret_rate = MGN_MCS6; break; + case DESC_RATEMCS7: ret_rate = MGN_MCS7; break; + case DESC_RATEMCS8: ret_rate = MGN_MCS8; break; + case DESC_RATEMCS9: ret_rate = MGN_MCS9; break; + case DESC_RATEMCS10: ret_rate = MGN_MCS10; break; + case DESC_RATEMCS11: ret_rate = MGN_MCS11; break; + case DESC_RATEMCS12: ret_rate = MGN_MCS12; break; + case DESC_RATEMCS13: ret_rate = MGN_MCS13; break; + case DESC_RATEMCS14: ret_rate = MGN_MCS14; break; + case DESC_RATEMCS15: ret_rate = MGN_MCS15; break; + case DESC_RATEVHT1SS_MCS0: ret_rate = MGN_VHT1SS_MCS0; break; + case DESC_RATEVHT1SS_MCS1: ret_rate = MGN_VHT1SS_MCS1; break; + case DESC_RATEVHT1SS_MCS2: ret_rate = MGN_VHT1SS_MCS2; break; + case DESC_RATEVHT1SS_MCS3: ret_rate = MGN_VHT1SS_MCS3; break; + case DESC_RATEVHT1SS_MCS4: ret_rate = MGN_VHT1SS_MCS4; break; + case DESC_RATEVHT1SS_MCS5: ret_rate = MGN_VHT1SS_MCS5; break; + case DESC_RATEVHT1SS_MCS6: ret_rate = MGN_VHT1SS_MCS6; break; + case DESC_RATEVHT1SS_MCS7: ret_rate = MGN_VHT1SS_MCS7; break; + case DESC_RATEVHT1SS_MCS8: ret_rate = MGN_VHT1SS_MCS8; break; + case DESC_RATEVHT1SS_MCS9: ret_rate = MGN_VHT1SS_MCS9; break; + case DESC_RATEVHT2SS_MCS0: ret_rate = MGN_VHT2SS_MCS0; break; + case DESC_RATEVHT2SS_MCS1: ret_rate = MGN_VHT2SS_MCS1; break; + case DESC_RATEVHT2SS_MCS2: ret_rate = MGN_VHT2SS_MCS2; break; + case DESC_RATEVHT2SS_MCS3: ret_rate = MGN_VHT2SS_MCS3; break; + case DESC_RATEVHT2SS_MCS4: ret_rate = MGN_VHT2SS_MCS4; break; + case DESC_RATEVHT2SS_MCS5: ret_rate = MGN_VHT2SS_MCS5; break; + case DESC_RATEVHT2SS_MCS6: ret_rate = MGN_VHT2SS_MCS6; break; + case DESC_RATEVHT2SS_MCS7: ret_rate = MGN_VHT2SS_MCS7; break; + case DESC_RATEVHT2SS_MCS8: ret_rate = MGN_VHT2SS_MCS8; break; + case DESC_RATEVHT2SS_MCS9: ret_rate = MGN_VHT2SS_MCS9; break; + + default: + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("HwRateToMRate8812(): Non supported Rate [%x]!!!\n",rate )); + break; + } + return ret_rate; +} + +/*----------------------------------------------------------------------------- + * Function: odm_TxPwrTrackSetPwr88E() + * + * Overview: 88E change all channel tx power accordign to flag. + * OFDM & CCK are all different. + * + * Input: NONE + * + * Output: NONE + * + * Return: NONE + * + * Revised History: + * When Who Remark + * 04/23/2012 MHC Create Version 0. + * + *---------------------------------------------------------------------------*/ +void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 final_bb_swing_idx[2]; + u8 pwr_tracking_limit = 26; /*+1.0dB*/ + u8 tx_rate = 0xFF; + s8 final_ofdm_swing_index = 0; + + if(rtldm->tx_rate != 0xFF) + tx_rate = rtl8812ae_hw_rate_to_mrate(hw, rtldm->tx_rate); + + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("===>rtl8812ae_dm_txpwr_track_set_pwr\n")); + + if(tx_rate != 0xFF) { /*20130429 Mimic Modify High Rate BBSwing Limit.*/ + /*CCK*/ + if((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M)) + pwr_tracking_limit = 32; /*+4dB*/ + /*OFDM*/ + else if((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M)) + pwr_tracking_limit = 30; /*+3dB*/ + else if(tx_rate == MGN_54M) + pwr_tracking_limit = 28; /*+2dB*/ + /*HT*/ + else if((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) /*QPSK/BPSK*/ + pwr_tracking_limit = 34; /*+5dB*/ + else if((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) /*16QAM*/ + pwr_tracking_limit = 30; /*+3dB*/ + else if((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) /*64QAM*/ + pwr_tracking_limit = 28; /*+2dB*/ + + else if((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) /*QPSK/BPSK*/ + pwr_tracking_limit = 34; /*+5dB*/ + else if((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) /*16QAM*/ + pwr_tracking_limit = 30; /*+3dB*/ + else if((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) /*64QAM*/ + pwr_tracking_limit = 28; /*+2dB*/ + + /*2 VHT*/ + else if((tx_rate >= MGN_VHT1SS_MCS0) && (tx_rate <= MGN_VHT1SS_MCS2)) /*QPSK/BPSK*/ + pwr_tracking_limit = 34; /*+5dB*/ + else if((tx_rate >= MGN_VHT1SS_MCS3) && (tx_rate <= MGN_VHT1SS_MCS4)) /*16QAM*/ + pwr_tracking_limit = 30; /*+3dB*/ + else if((tx_rate >= MGN_VHT1SS_MCS5)&&(tx_rate <= MGN_VHT1SS_MCS6)) /*64QAM*/ + pwr_tracking_limit = 28; /*+2dB*/ + else if(tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/ + pwr_tracking_limit = 26; /*+1dB*/ + else if(tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/ + pwr_tracking_limit = 24; /*+0dB*/ + else if(tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/ + pwr_tracking_limit = 22; /*-1dB*/ + + else if((tx_rate >= MGN_VHT2SS_MCS0)&&(tx_rate <= MGN_VHT2SS_MCS2)) /*QPSK/BPSK*/ + pwr_tracking_limit = 34; /*+5dB*/ + else if((tx_rate >= MGN_VHT2SS_MCS3)&&(tx_rate <= MGN_VHT2SS_MCS4)) /*16QAM*/ + pwr_tracking_limit = 30; /*+3dB*/ + else if((tx_rate >= MGN_VHT2SS_MCS5)&&(tx_rate <= MGN_VHT2SS_MCS6)) /*64QAM*/ + pwr_tracking_limit = 28; /*+2dB*/ + else if(tx_rate == MGN_VHT2SS_MCS7) /*64QAM*/ + pwr_tracking_limit = 26; /*+1dB*/ + else if(tx_rate == MGN_VHT2SS_MCS8) /*256QAM*/ + pwr_tracking_limit = 24; /*+0dB*/ + else if(tx_rate == MGN_VHT2SS_MCS9) /*256QAM*/ + pwr_tracking_limit = 22; /*-1dB*/ + else + pwr_tracking_limit = 24; + } + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("TxRate=0x%x, PwrTrackingLimit=%d\n", tx_rate, pwr_tracking_limit)); + + + if (method == BBSWING) { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("===>rtl8812ae_dm_txpwr_track_set_pwr\n")); + + if (rf_path == RF90_PATH_A) { + final_bb_swing_idx[RF90_PATH_A] = + (rtldm->ofdm_index[RF90_PATH_A] > pwr_tracking_limit) ? + pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_A]; + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d, \ + pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n", + rtldm->ofdm_index[RF90_PATH_A], final_bb_swing_idx[RF90_PATH_A])); + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_A]]); + } else { + final_bb_swing_idx[RF90_PATH_B] = + rtldm->ofdm_index[RF90_PATH_B] > pwr_tracking_limit ? \ + pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_B]; + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, \ + pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n", + rtldm->ofdm_index[RF90_PATH_B], final_bb_swing_idx[RF90_PATH_B])); + + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_B]]); + } + } else if (method == MIX_MODE) { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("pDM_Odm->DefaultOfdmIndex=%d, \ + pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n", + rtldm->default_ofdm_index, rtldm->aboslute_ofdm_swing_idx[rf_path], + rf_path )); + + + final_ofdm_swing_index = rtldm->default_ofdm_index + rtldm->aboslute_ofdm_swing_idx[rf_path]; + + if (rf_path == RF90_PATH_A) { + if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/ + + rtldm->remnant_cck_idx = final_ofdm_swing_index - pwr_tracking_limit; + /* CCK Follow the same compensate value as Path A*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit; + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]); + + rtldm->modify_txagc_flag_path_a = true; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n", + pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path])); + } else if (final_ofdm_swing_index < 0) { + rtldm->remnant_cck_idx = final_ofdm_swing_index; + /* CCK Follow the same compensate value as Path A*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index; + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]); + + rtldm->modify_txagc_flag_path_a = true; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n", + rtldm->remnant_ofdm_swing_idx[rf_path])); + } else { + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n", + final_ofdm_swing_index)); + + if(rtldm->modify_txagc_flag_path_a) { /*If TxAGC has changed, reset TxAGC again*/ + rtldm->remnant_cck_idx = 0; + rtldm->remnant_ofdm_swing_idx[rf_path] = 0; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A); + + rtldm->modify_txagc_flag_path_a = false; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE \n")); + } + } + } + + if (rf_path == RF90_PATH_B) { + if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit; + + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]); + + rtldm->modify_txagc_flag_path_b = true; + + /*Set TxAGC Page E{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n", + pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path])); + } else if (final_ofdm_swing_index < 0) { + rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index; + + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]); + + rtldm->modify_txagc_flag_path_b = true; + + /*Set TxAGC Page E{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_B Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n", + rtldm->remnant_ofdm_swing_idx[rf_path] )); + } else { + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_B Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n", + final_ofdm_swing_index)); + + if(rtldm->modify_txagc_flag_path_b) { /*If TxAGC has changed, reset TxAGC again*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = 0; + + /*Set TxAGC Page E{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_B); + + rtldm->modify_txagc_flag_path_b = false; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE \n")); + } + } + } + + } else { + return; + } +} + +void rtl8812ae_dm_txpower_tracking_callback_thermalmeter + (struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0; + u8 thermal_value_avg_count = 0; + u32 thermal_value_avg = 0; + + u8 ofdm_min_index = 6; /*OFDM BB Swing should be less than +3.0dB, which is required by Arthur*/ + u8 index_for_channel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/ + + /* 1. The following TWO tables decide the final index of OFDM/CCK swing table.*/ + u8 *delta_swing_table_idx_tup_a; + u8 *delta_swing_table_idx_tdown_a; + u8 *delta_swing_table_idx_tup_b; + u8 *delta_swing_table_idx_tdown_b; + + /*2. Initilization ( 7 steps in total )*/ + rtl8812ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a, + (u8**)&delta_swing_table_idx_tdown_a, + (u8**)&delta_swing_table_idx_tup_b, + (u8**)&delta_swing_table_idx_tdown_b); + + rtldm->btxpower_trackinginit = true; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter, \ + \n pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:\ + %d, pDM_Odm->DefaultOfdmIndex: %d\n", + rtldm->bb_swing_idx_cck_base, + rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A], + rtldm->default_ofdm_index)); + + thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER_8812A, 0xfc00); /*0x42: RF Reg[15:10] 88E*/ + if( ! rtldm->txpower_track_control || rtlefuse->eeprom_thermalmeter == 0 || + rtlefuse->eeprom_thermalmeter == 0xFF) + return; + + + /* 3. Initialize ThermalValues of RFCalibrateInfo*/ + + if(rtlhal->reloadtxpowerindex) + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("reload ofdm index for band switch\n")); + } + + /*4. Calculate average thermal meter*/ + rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value; + rtldm->thermalvalue_avg_index++; + if(rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A) + /*Average times = c.AverageThermalNum*/ + rtldm->thermalvalue_avg_index = 0; + + for(i = 0; i < AVG_THERMAL_NUM_8812A; i++) + { + if(rtldm->thermalvalue_avg[i]) + { + thermal_value_avg += rtldm->thermalvalue_avg[i]; + thermal_value_avg_count++; + } + } + + if(thermal_value_avg_count) /*Calculate Average ThermalValue after average enough times*/ + { + thermal_value = (u8)(thermal_value_avg / thermal_value_avg_count); + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n", + thermal_value, rtlefuse->eeprom_thermalmeter)); + } + + /*5. Calculate delta, delta_LCK, delta_IQK.*/ + /*"delta" here is used to determine whether thermal value changes or not.*/ + delta = (thermal_value > rtldm->thermalvalue) ? \ + (thermal_value - rtldm->thermalvalue): \ + (rtldm->thermalvalue - thermal_value); + delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? \ + (thermal_value - rtldm->thermalvalue_lck) : \ + (rtldm->thermalvalue_lck - thermal_value); + delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? \ + (thermal_value - rtldm->thermalvalue_iqk) : \ + (rtldm->thermalvalue_iqk - thermal_value); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n", + delta, delta_lck, delta_iqk)); + + /* 6. If necessary, do LCK. */ + + if (delta_lck >= IQK_THRESHOLD) /*Delta temperature is equal to or larger than 20 centigrade.*/ + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("delta_LCK(%d) >= Threshold_IQK(%d)\n", + delta_lck, IQK_THRESHOLD)); + rtldm->thermalvalue_lck = thermal_value; + rtl8812ae_phy_lccalibrate(hw); + } + + /*7. If necessary, move the index of swing table to adjust Tx power.*/ + + if (delta > 0 && rtldm->txpower_track_control) + { + /*"delta" here is used to record the absolute value of differrence.*/ + delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \ + (thermal_value - rtlefuse->eeprom_thermalmeter) : \ + (rtlefuse->eeprom_thermalmeter - thermal_value); + + if (delta >= TXPWR_TRACK_TABLE_SIZE) + delta = TXPWR_TRACK_TABLE_SIZE - 1; + + /*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/ + + if(thermal_value > rtlefuse->eeprom_thermalmeter) { + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("delta_swing_table_idx_tup_a[%d] = %d\n", + delta, delta_swing_table_idx_tup_a[delta])); + rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta]; + + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta]; + /*Record delta swing for mix mode power tracking*/ + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A])); + + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("delta_swing_table_idx_tup_b[%d] = %d\n", + delta, delta_swing_table_idx_tup_b[delta])); + rtldm->delta_power_index_last[RF90_PATH_B] = rtldm->delta_power_index[RF90_PATH_B]; + rtldm->delta_power_index[RF90_PATH_B] = delta_swing_table_idx_tup_b[delta]; + + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B] = delta_swing_table_idx_tup_b[delta]; + /*Record delta swing for mix mode power tracking*/ + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n", + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B])); + + } else { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("delta_swing_table_idx_tdown_a[%d] = %d\n", + delta, delta_swing_table_idx_tdown_a[delta])); + + rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta]; + + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta]; + /* Record delta swing for mix mode power tracking*/ + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A])); + + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("deltaSwingTableIdx_TDOWN_B[%d] = %d\n", + delta, delta_swing_table_idx_tdown_b[delta])); + + rtldm->delta_power_index_last[RF90_PATH_B] = rtldm->delta_power_index[RF90_PATH_B]; + rtldm->delta_power_index[RF90_PATH_B] = -1 * delta_swing_table_idx_tdown_b[delta]; + + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B] = -1 * delta_swing_table_idx_tdown_b[delta]; + /*Record delta swing for mix mode power tracking*/ + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n", + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_B])); + } + + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("\n\n================================ [Path-%c] \ + Calculating PowerIndexOffset ================================\n", + (p == RF90_PATH_A ? 'A' : 'B'))); + + if (rtldm->delta_power_index[p] == rtldm->delta_power_index_last[p]) + /*If Thermal value changes but lookup table value still the same*/ + rtldm->power_index_offset[p] = 0; + else + rtldm->power_index_offset[p] = + rtldm->delta_power_index[p] - rtldm->delta_power_index_last[p]; + /*Power Index Diff between 2 times Power Tracking*/ + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n", + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->power_index_offset[p], + rtldm->delta_power_index[p] , + rtldm->delta_power_index_last[p])); + + rtldm->ofdm_index[p] = + rtldm->bb_swing_idx_ofdm_base[p] + rtldm->power_index_offset[p]; + rtldm->cck_index = + rtldm->bb_swing_idx_cck_base + rtldm->power_index_offset[p]; + + rtldm->bb_swing_idx_cck = rtldm->cck_index; + rtldm->bb_swing_idx_ofdm[p] = rtldm->ofdm_index[p]; + + /*************Print BB Swing Base and Index Offset*************/ + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n", + rtldm->bb_swing_idx_cck, + rtldm->bb_swing_idx_cck_base, + rtldm->power_index_offset[p])); + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n", + rtldm->bb_swing_idx_ofdm[p], + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->bb_swing_idx_ofdm_base[p], + rtldm->power_index_offset[p])); + + /*7.1 Handle boundary conditions of index.*/ + + + if(rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE -1) + { + rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE -1; + } + else if (rtldm->ofdm_index[p] < ofdm_min_index) + { + rtldm->ofdm_index[p] = ofdm_min_index; + } + } + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("\n\n======================================================\ + ==================================================\n")); + if(rtldm->cck_index > TXSCALE_TABLE_SIZE -1) + rtldm->cck_index = TXSCALE_TABLE_SIZE -1; + else if (rtldm->cck_index < 0) + rtldm->cck_index = 0; + } else { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("The thermal meter is unchanged or TxPowerTracking OFF(%d): \ + ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n", + rtldm->txpower_track_control, + thermal_value, + rtldm->thermalvalue)); + + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtldm->power_index_offset[p] = 0; + } + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n", + rtldm->cck_index, rtldm->bb_swing_idx_cck_base)); /*Print Swing base & current*/ + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n", + rtldm->ofdm_index[p], + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->bb_swing_idx_ofdm_base[p])); + } + + if ((rtldm->power_index_offset[RF90_PATH_A] != 0 || + rtldm->power_index_offset[RF90_PATH_B] != 0 ) && + rtldm->txpower_track_control) + { + /*7.2 Configure the Swing Table to adjust Tx Power.*/ + /*Always TRUE after Tx Power is adjusted by power tracking.*/ + /* + 2012/04/23 MH According to Luke's suggestion, we can not write BB digital + to increase TX power. Otherwise, EVM will be bad. + + 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E. + */ + if (thermal_value > rtldm->thermalvalue) + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_A], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue)); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature Increasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_B], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue)); + + } else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/ + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_A], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue)); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_B], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue)); + } + + if (thermal_value > rtlefuse->eeprom_thermalmeter) { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature(%d) higher than PG value(%d)\n", + thermal_value, rtlefuse->eeprom_thermalmeter)); + + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("**********Enter POWER Tracking MIX_MODE**********\n")); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, 0); + + } else { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature(%d) lower than PG value(%d)\n", + thermal_value, rtlefuse->eeprom_thermalmeter)); + + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("**********Enter POWER Tracking MIX_MODE**********\n")); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel); + + } + + rtldm->bb_swing_idx_cck_base = rtldm->bb_swing_idx_cck; /*Record last time Power Tracking result as base.*/ + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) + rtldm->bb_swing_idx_ofdm_base[p] = rtldm->bb_swing_idx_ofdm[p]; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", + rtldm->thermalvalue, thermal_value)); + + rtldm->thermalvalue = thermal_value; /*Record last Power Tracking Thermal Value*/ + + } + /*Delta temperature is equal to or larger than 20 centigrade (When threshold is 8).*/ + if ((delta_iqk >= IQK_THRESHOLD)) { + + if ( !rtlphy->b_iqk_in_progress) { + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = true; + spin_unlock(&rtlpriv->locks.iqk_lock); + + rtl8812ae_do_iqk(hw, delta_iqk, thermal_value, 8); + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); + } + } + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n")); +} + + +void rtl8821ae_get_delta_swing_table( + struct ieee80211_hw *hw, + u8 **temperature_up_a, + u8 **temperature_down_a, + u8 **temperature_up_b, + u8 **temperature_down_b + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 channel = rtlphy->current_channel; + u8 rate = rtldm->tx_rate; + + + if ( 1 <= channel && channel <= 14) { + if (RX_HAL_IS_CCK_RATE(rate)) { + *temperature_up_a = rtldm->delta_swing_table_idx_24gccka_p; + *temperature_down_a = rtldm->delta_swing_table_idx_24gccka_n; + *temperature_up_b = rtldm->delta_swing_table_idx_24gcckb_p; + *temperature_down_b = rtldm->delta_swing_table_idx_24gcckb_n; + } else { + *temperature_up_a = rtldm->delta_swing_table_idx_24ga_p; + *temperature_down_a = rtldm->delta_swing_table_idx_24ga_n; + *temperature_up_b = rtldm->delta_swing_table_idx_24gb_p; + *temperature_down_b = rtldm->delta_swing_table_idx_24gb_n; + } + } else if ( 36 <= channel && channel <= 64) { + *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[0]; + *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[0]; + *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[0]; + *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[0]; + } else if ( 100 <= channel && channel <= 140) { + *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[1]; + *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[1]; + *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[1]; + *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[1]; + } else if ( 149 <= channel && channel <= 173) { + *temperature_up_a = rtldm->delta_swing_table_idx_5ga_p[2]; + *temperature_down_a = rtldm->delta_swing_table_idx_5ga_n[2]; + *temperature_up_b = rtldm->delta_swing_table_idx_5gb_p[2]; + *temperature_down_b = rtldm->delta_swing_table_idx_5gb_n[2]; + } else { + *temperature_up_a = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack; + *temperature_down_a =(u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack; + *temperature_up_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_p_txpwrtrack; + *temperature_down_b = (u8*)rtl8818e_delta_swing_table_idx_24gb_n_txpwrtrack; + } + + return; +} + +void rtl8821ae_phy_lccalibrate( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("===> rtl8812ae_phy_lccalibrate\n")); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("<=== rtl8812ae_phy_lccalibrate\n")); + +} + +/*----------------------------------------------------------------------------- + * Function: odm_TxPwrTrackSetPwr88E() + * + * Overview: 88E change all channel tx power accordign to flag. + * OFDM & CCK are all different. + * + * Input: NONE + * + * Output: NONE + * + * Return: NONE + * + * Revised History: + * When Who Remark + * 04/23/2012 MHC Create Version 0. + * + *---------------------------------------------------------------------------*/ +void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 final_bb_swing_idx[1]; + u8 pwr_tracking_limit = 26; /*+1.0dB*/ + u8 tx_rate = 0xFF; + s8 final_ofdm_swing_index = 0; + + if(rtldm->tx_rate != 0xFF) + tx_rate = rtl8812ae_hw_rate_to_mrate(hw, rtldm->tx_rate); + + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("===>rtl8812ae_dm_txpwr_track_set_pwr\n")); + + if(tx_rate != 0xFF) { /*20130429 Mimic Modify High Rate BBSwing Limit.*/ + /*CCK*/ + if((tx_rate >= MGN_1M) && (tx_rate <= MGN_11M)) + pwr_tracking_limit = 32; /*+4dB*/ + /*OFDM*/ + else if((tx_rate >= MGN_6M) && (tx_rate <= MGN_48M)) + pwr_tracking_limit = 30; /*+3dB*/ + else if(tx_rate == MGN_54M) + pwr_tracking_limit = 28; /*+2dB*/ + /*HT*/ + else if((tx_rate >= MGN_MCS0) && (tx_rate <= MGN_MCS2)) /*QPSK/BPSK*/ + pwr_tracking_limit = 34; /*+5dB*/ + else if((tx_rate >= MGN_MCS3) && (tx_rate <= MGN_MCS4)) /*16QAM*/ + pwr_tracking_limit = 30; /*+3dB*/ + else if((tx_rate >= MGN_MCS5) && (tx_rate <= MGN_MCS7)) /*64QAM*/ + pwr_tracking_limit = 28; /*+2dB*/ +#if 0 + else if((tx_rate >= MGN_MCS8) && (tx_rate <= MGN_MCS10)) /*QPSK/BPSK*/ + pwr_tracking_limit = 34; /*+5dB*/ + else if((tx_rate >= MGN_MCS11) && (tx_rate <= MGN_MCS12)) /*16QAM*/ + pwr_tracking_limit = 30; /*+3dB*/ + else if((tx_rate >= MGN_MCS13) && (tx_rate <= MGN_MCS15)) /*64QAM*/ + pwr_tracking_limit = 28; /*+2dB*/ +#endif + /*2 VHT*/ + else if((tx_rate >= MGN_VHT1SS_MCS0) && (tx_rate <= MGN_VHT1SS_MCS2)) /*QPSK/BPSK*/ + pwr_tracking_limit = 34; /*+5dB*/ + else if((tx_rate >= MGN_VHT1SS_MCS3) && (tx_rate <= MGN_VHT1SS_MCS4)) /*16QAM*/ + pwr_tracking_limit = 30; /*+3dB*/ + else if((tx_rate >= MGN_VHT1SS_MCS5)&&(tx_rate <= MGN_VHT1SS_MCS6)) /*64QAM*/ + pwr_tracking_limit = 28; /*+2dB*/ + else if(tx_rate == MGN_VHT1SS_MCS7) /*64QAM*/ + pwr_tracking_limit = 26; /*+1dB*/ + else if(tx_rate == MGN_VHT1SS_MCS8) /*256QAM*/ + pwr_tracking_limit = 24; /*+0dB*/ + else if(tx_rate == MGN_VHT1SS_MCS9) /*256QAM*/ + pwr_tracking_limit = 22; /*-1dB*/ + else + pwr_tracking_limit = 24; + } + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("TxRate=0x%x, PwrTrackingLimit=%d\n", tx_rate, pwr_tracking_limit)); + + + if (method == BBSWING) { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("===>rtl8812ae_dm_txpwr_track_set_pwr\n")); + + if (rf_path == RF90_PATH_A) { + final_bb_swing_idx[RF90_PATH_A] = + (rtldm->ofdm_index[RF90_PATH_A] > pwr_tracking_limit) ? + pwr_tracking_limit : rtldm->ofdm_index[RF90_PATH_A]; + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d, \ + pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n", + rtldm->ofdm_index[RF90_PATH_A], final_bb_swing_idx[RF90_PATH_A])); + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_bb_swing_idx[RF90_PATH_A]]); + } + } else if (method == MIX_MODE) { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("pDM_Odm->DefaultOfdmIndex=%d, \ + pDM_Odm->Aboslute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n", + rtldm->default_ofdm_index, rtldm->aboslute_ofdm_swing_idx[rf_path], + rf_path )); + + + final_ofdm_swing_index = rtldm->default_ofdm_index + rtldm->aboslute_ofdm_swing_idx[rf_path]; + + if (rf_path == RF90_PATH_A) { + if(final_ofdm_swing_index > pwr_tracking_limit) { /*BBSwing higher then Limit*/ + + rtldm->remnant_cck_idx = final_ofdm_swing_index - pwr_tracking_limit; + /* CCK Follow the same compensate value as Path A*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index - pwr_tracking_limit; + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[pwr_tracking_limit]); + + rtldm->modify_txagc_flag_path_a = true; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d \n", + pwr_tracking_limit, rtldm->remnant_ofdm_swing_idx[rf_path])); + } else if (final_ofdm_swing_index < 0) { + rtldm->remnant_cck_idx = final_ofdm_swing_index; + /* CCK Follow the same compensate value as Path A*/ + rtldm->remnant_ofdm_swing_idx[rf_path] = final_ofdm_swing_index; + + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[0]); + + rtldm->modify_txagc_flag_path_a = true; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d \n", + rtldm->remnant_ofdm_swing_idx[rf_path])); + } else { + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, rtl8812ae_txscaling_table[final_ofdm_swing_index]); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_A Compensate with BBSwing , Final_OFDM_Swing_Index = %d \n", + final_ofdm_swing_index)); + + if(rtldm->modify_txagc_flag_path_a) { /*If TxAGC has changed, reset TxAGC again*/ + rtldm->remnant_cck_idx = 0; + rtldm->remnant_ofdm_swing_idx[rf_path] = 0; + + /*Set TxAGC Page C{};*/ + rtl8821ae_phy_set_txpower_level_by_path(hw, rtlphy->current_channel, RF90_PATH_A); + + rtldm->modify_txagc_flag_path_a = false; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE \n")); + } + } + } + + } else { + return; + } +} + + +void rtl8821ae_dm_txpower_tracking_callback_thermalmeter + (struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + u8 thermal_value = 0, delta, delta_lck, delta_iqk, p = 0, i = 0; + u8 thermal_value_avg_count = 0; + u32 thermal_value_avg = 0; + + u8 ofdm_min_index = 6; /*OFDM BB Swing should be less than +3.0dB, which is required by Arthur*/ + u8 index_for_channel = 0; /* GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/ + + /* 1. The following TWO tables decide the final index of OFDM/CCK swing table.*/ + u8 *delta_swing_table_idx_tup_a; + u8 *delta_swing_table_idx_tdown_a; + u8 *delta_swing_table_idx_tup_b; + u8 *delta_swing_table_idx_tdown_b; + + /*2. Initilization ( 7 steps in total )*/ + rtl8821ae_get_delta_swing_table(hw, (u8**)&delta_swing_table_idx_tup_a, + (u8**)&delta_swing_table_idx_tdown_a, + (u8**)&delta_swing_table_idx_tup_b, + (u8**)&delta_swing_table_idx_tdown_b); + + rtldm->btxpower_trackinginit = true; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("===>rtl8812ae_dm_txpower_tracking_callback_thermalmeter, \ + \n pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:\ + %d, pDM_Odm->DefaultOfdmIndex: %d\n", + rtldm->bb_swing_idx_cck_base, + rtldm->bb_swing_idx_ofdm_base[RF90_PATH_A], + rtldm->default_ofdm_index)); + + thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER_8812A, 0xfc00); /*0x42: RF Reg[15:10] 88E*/ + if( ! rtldm->txpower_track_control || rtlefuse->eeprom_thermalmeter == 0 || + rtlefuse->eeprom_thermalmeter == 0xFF) + return; + + + /* 3. Initialize ThermalValues of RFCalibrateInfo*/ + + if(rtlhal->reloadtxpowerindex) + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("reload ofdm index for band switch\n")); + } + + /*4. Calculate average thermal meter*/ + rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value; + rtldm->thermalvalue_avg_index++; + if(rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8812A) + /*Average times = c.AverageThermalNum*/ + rtldm->thermalvalue_avg_index = 0; + + for(i = 0; i < AVG_THERMAL_NUM_8812A; i++) + { + if(rtldm->thermalvalue_avg[i]) + { + thermal_value_avg += rtldm->thermalvalue_avg[i]; + thermal_value_avg_count++; + } + } + + if(thermal_value_avg_count) /*Calculate Average ThermalValue after average enough times*/ + { + thermal_value = (u8)(thermal_value_avg / thermal_value_avg_count); + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n", + thermal_value, rtlefuse->eeprom_thermalmeter)); + } + + /*5. Calculate delta, delta_LCK, delta_IQK.*/ + /*"delta" here is used to determine whether thermal value changes or not.*/ + delta = (thermal_value > rtldm->thermalvalue) ? \ + (thermal_value - rtldm->thermalvalue): \ + (rtldm->thermalvalue - thermal_value); + delta_lck = (thermal_value > rtldm->thermalvalue_lck) ? \ + (thermal_value - rtldm->thermalvalue_lck) : \ + (rtldm->thermalvalue_lck - thermal_value); + delta_iqk = (thermal_value > rtldm->thermalvalue_iqk) ? \ + (thermal_value - rtldm->thermalvalue_iqk) : \ + (rtldm->thermalvalue_iqk - thermal_value); + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n", + delta, delta_lck, delta_iqk)); + + /* 6. If necessary, do LCK. */ + + if (delta_lck >= IQK_THRESHOLD) /*Delta temperature is equal to or larger than 20 centigrade.*/ + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("delta_LCK(%d) >= Threshold_IQK(%d)\n", + delta_lck, IQK_THRESHOLD)); + rtldm->thermalvalue_lck = thermal_value; + rtl8821ae_phy_lccalibrate(hw); + } + + /*7. If necessary, move the index of swing table to adjust Tx power.*/ + + if (delta > 0 && rtldm->txpower_track_control) + { + /*"delta" here is used to record the absolute value of differrence.*/ + delta = thermal_value > rtlefuse->eeprom_thermalmeter ? \ + (thermal_value - rtlefuse->eeprom_thermalmeter) : \ + (rtlefuse->eeprom_thermalmeter - thermal_value); + + if (delta >= TXSCALE_TABLE_SIZE) + delta = TXSCALE_TABLE_SIZE - 1; + + /*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/ + + if(thermal_value > rtlefuse->eeprom_thermalmeter) { + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("delta_swing_table_idx_tup_a[%d] = %d\n", + delta, delta_swing_table_idx_tup_a[delta])); + rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta]; + + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = delta_swing_table_idx_tup_a[delta]; + /*Record delta swing for mix mode power tracking*/ + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Temp is higher and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A])); + + } else { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("delta_swing_table_idx_tdown_a[%d] = %d\n", + delta, delta_swing_table_idx_tdown_a[delta])); + + rtldm->delta_power_index_last[RF90_PATH_A] = rtldm->delta_power_index[RF90_PATH_A]; + rtldm->delta_power_index[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta]; + + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A] = -1 * delta_swing_table_idx_tdown_a[delta]; + /* Record delta swing for mix mode power tracking*/ + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("******Temp is lower and pDM_Odm->Aboslute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n", + rtldm->aboslute_ofdm_swing_idx[RF90_PATH_A])); + } + + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("\n\n================================ [Path-%c] \ + Calculating PowerIndexOffset ================================\n", + (p == RF90_PATH_A ? 'A' : 'B'))); + + if (rtldm->delta_power_index[p] == rtldm->delta_power_index_last[p]) + /*If Thermal value changes but lookup table value still the same*/ + rtldm->power_index_offset[p] = 0; + else + rtldm->power_index_offset[p] = + rtldm->delta_power_index[p] - rtldm->delta_power_index_last[p]; + /*Power Index Diff between 2 times Power Tracking*/ + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n", + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->power_index_offset[p], + rtldm->delta_power_index[p] , + rtldm->delta_power_index_last[p])); + + rtldm->ofdm_index[p] = + rtldm->bb_swing_idx_ofdm_base[p] + rtldm->power_index_offset[p]; + rtldm->cck_index = + rtldm->bb_swing_idx_cck_base + rtldm->power_index_offset[p]; + + rtldm->bb_swing_idx_cck = rtldm->cck_index; + rtldm->bb_swing_idx_ofdm[p] = rtldm->ofdm_index[p]; + + /*************Print BB Swing Base and Index Offset*************/ + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n", + rtldm->bb_swing_idx_cck, + rtldm->bb_swing_idx_cck_base, + rtldm->power_index_offset[p])); + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n", + rtldm->bb_swing_idx_ofdm[p], + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->bb_swing_idx_ofdm_base[p], + rtldm->power_index_offset[p])); + + /*7.1 Handle boundary conditions of index.*/ + + + if(rtldm->ofdm_index[p] > TXSCALE_TABLE_SIZE -1) + { + rtldm->ofdm_index[p] = TXSCALE_TABLE_SIZE -1; + } + else if (rtldm->ofdm_index[p] < ofdm_min_index) + { + rtldm->ofdm_index[p] = ofdm_min_index; + } + } + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("\n\n======================================================\ + ==================================================\n")); + if(rtldm->cck_index > TXSCALE_TABLE_SIZE -1) + rtldm->cck_index = TXSCALE_TABLE_SIZE -1; + else if (rtldm->cck_index < 0) + rtldm->cck_index = 0; + } else { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("The thermal meter is unchanged or TxPowerTracking OFF(%d): \ + ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n", + rtldm->txpower_track_control, + thermal_value, + rtldm->thermalvalue)); + + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + rtldm->power_index_offset[p] = 0; + } + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n", + rtldm->cck_index, rtldm->bb_swing_idx_cck_base)); /*Print Swing base & current*/ + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n", + rtldm->ofdm_index[p], + (p == RF90_PATH_A ? 'A' : 'B'), + rtldm->bb_swing_idx_ofdm_base[p])); + } + + if ((rtldm->power_index_offset[RF90_PATH_A] != 0 || + rtldm->power_index_offset[RF90_PATH_B] != 0 ) && + rtldm->txpower_track_control) + { + /*7.2 Configure the Swing Table to adjust Tx Power.*/ + /*Always TRUE after Tx Power is adjusted by power tracking.*/ + /* + 2012/04/23 MH According to Luke's suggestion, we can not write BB digital + to increase TX power. Otherwise, EVM will be bad. + + 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E. + */ + if (thermal_value > rtldm->thermalvalue) + { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_A], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue)); + } else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/ + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n", + rtldm->power_index_offset[RF90_PATH_A], + delta, thermal_value, + rtlefuse->eeprom_thermalmeter, + rtldm->thermalvalue)); + } + + if (thermal_value > rtlefuse->eeprom_thermalmeter) { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature(%d) higher than PG value(%d)\n", + thermal_value, rtlefuse->eeprom_thermalmeter)); + + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("**********Enter POWER Tracking MIX_MODE**********\n")); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + rtl8821ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel); + + } else { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Temperature(%d) lower than PG value(%d)\n", + thermal_value, rtlefuse->eeprom_thermalmeter)); + + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("**********Enter POWER Tracking MIX_MODE**********\n")); + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE, p, index_for_channel); + + } + + rtldm->bb_swing_idx_cck_base = rtldm->bb_swing_idx_cck; /*Record last time Power Tracking result as base.*/ + for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) + rtldm->bb_swing_idx_ofdm_base[p] = rtldm->bb_swing_idx_ofdm[p]; + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", + rtldm->thermalvalue, thermal_value)); + + rtldm->thermalvalue = thermal_value; /*Record last Power Tracking Thermal Value*/ + + } + /*Delta temperature is equal to or larger than 20 centigrade (When threshold is 8).*/ + if ((delta_iqk >= IQK_THRESHOLD)) { + + if ( !rtlphy->b_iqk_in_progress) { + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = true; + spin_unlock(&rtlpriv->locks.iqk_lock); + + rtl8821ae_do_iqk(hw, delta_iqk, thermal_value, 8); + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); + } + } + + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n")); +} + + +void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + static u8 tm_trigger = 0; + + //if (!rtlpriv->dm.btxpower_tracking) + // return; + + if (!tm_trigger) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16), + 0x03); + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Trigger 8821ae Thermal Meter!!\n")); + tm_trigger = 1; + return; + } else { + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, + ("Schedule TxPowerTracking !!\n")); + + rtl8821ae_dm_txpower_tracking_callback_thermalmeter(hw); + tm_trigger = 0; + } +} + + +void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rate_adaptive *p_ra = &(rtlpriv->ra); + u32 low_rssithresh_for_ra = p_ra->low2high_rssi_thresh_for_ra; + u32 high_rssithresh_for_ra = p_ra->high_rssi_thresh_for_ra; + u8 go_up_gap = 5; + struct ieee80211_sta *sta = NULL; + + if (is_hal_stop(rtlhal)) { + RT_TRACE(COMP_RATE, DBG_LOUD, + ("driver is going to unload\n")); + return; + } + + if (!rtlpriv->dm.b_useramask) { + RT_TRACE(COMP_RATE, DBG_LOUD, + ("driver does not control rate adaptive mask\n")); + return; + } + + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION) { + + switch (p_ra->pre_ratr_state) { + case DM_RATR_STA_MIDDLE: + high_rssithresh_for_ra += go_up_gap; + break; + case DM_RATR_STA_LOW: + high_rssithresh_for_ra += go_up_gap; + low_rssithresh_for_ra += go_up_gap; + break; + default: + break; + } + + if (rtlpriv->dm.undecorated_smoothed_pwdb > + (long)high_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_HIGH; + else if (rtlpriv->dm.undecorated_smoothed_pwdb > + (long)low_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_MIDDLE; + else + p_ra->ratr_state = DM_RATR_STA_LOW; + + if (p_ra->pre_ratr_state != p_ra->ratr_state ) { + RT_TRACE(COMP_RATE, DBG_LOUD, + ("RSSI = %ld\n", + rtlpriv->dm.undecorated_smoothed_pwdb)); + RT_TRACE(COMP_RATE, DBG_LOUD, + ("RSSI_LEVEL = %d\n", p_ra->ratr_state)); + RT_TRACE(COMP_RATE, DBG_LOUD, + ("PreState = %d, CurState = %d\n", + p_ra->pre_ratr_state, p_ra->ratr_state)); + + rcu_read_lock(); + sta = rtl_find_sta(hw, mac->bssid); + if (sta) + rtlpriv->cfg->ops->update_rate_tbl(hw, sta, p_ra->ratr_state); + rcu_read_unlock(); + + p_ra->pre_ratr_state = p_ra->ratr_state; + } + } +} + +bool rtl8821ae_dm_is_edca_turbo_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv)) + return true; + if (rtlpriv->mac80211.mode == WIRELESS_MODE_B) + return true; + + return false; +} + +void rtl8821ae_dm_edca_choose_traffic_idx( + struct ieee80211_hw *hw, u64 cur_tx_bytes, u64 cur_rx_bytes, bool b_bias_on_rx, + bool *pb_is_cur_rdl_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if(b_bias_on_rx) + { + if (cur_tx_bytes > (cur_rx_bytes*4)) { + *pb_is_cur_rdl_state = false; + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("Uplink Traffic\n ")); + } else { + *pb_is_cur_rdl_state = true; + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("Balance Traffic\n")); + } + } else { + if (cur_rx_bytes > (cur_tx_bytes*4)) { + *pb_is_cur_rdl_state = true; + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("Downlink Traffic\n")); + } else { + *pb_is_cur_rdl_state = false; + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("Balance Traffic\n")); + } + } + return ; +} + +static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + /*Keep past Tx/Rx packet count for RT-to-RT EDCA turbo.*/ + unsigned long cur_tx_ok_cnt = 0; + unsigned long cur_rx_ok_cnt = 0; + u32 edca_be_ul = 0x5ea42b; + u32 edca_be_dl = 0x5ea42b; + u32 edca_be = 0x5ea42b; + u8 iot_peer = 0; + bool *pb_is_cur_rdl_state = NULL; + bool b_last_is_cur_rdl_state = false; + bool b_bias_on_rx = false; + bool b_edca_turbo_on = false; + + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("rtl8821ae_dm_check_edca_turbo=====>")); + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("Orginial BE PARAM: 0x%x\n", + rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N))); + + /*=============================== + list paramter for different platform + ===============================*/ + b_last_is_cur_rdl_state = rtlpriv->dm.bis_cur_rdlstate; + pb_is_cur_rdl_state = &( rtlpriv->dm.bis_cur_rdlstate); + + cur_tx_ok_cnt = rtlpriv->stats.txbytesunicast - rtldm->last_tx_ok_cnt; + cur_rx_ok_cnt = rtlpriv->stats.rxbytesunicast - rtldm->last_rx_ok_cnt; + + rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast; + rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast; + + iot_peer = rtlpriv->mac80211.vendor; + b_bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ? + true : false; + b_edca_turbo_on = ((!rtlpriv->dm.bis_any_nonbepkts) && + (!rtlpriv->dm.b_disable_framebursting)) ? + true : false; + + /*if (rtl8821ae_dm_is_edca_turbo_disable(hw)) + goto dm_CheckEdcaTurbo_EXIT;*/ + + if ((iot_peer == PEER_CISCO) && (mac->mode == WIRELESS_MODE_N_24G)) + { + edca_be_dl = edca_setting_dl[iot_peer]; + edca_be_ul = edca_setting_ul[iot_peer]; + } + + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("bIsAnyNonBEPkts : 0x%x bDisableFrameBursting : 0x%x \n", + rtlpriv->dm.bis_any_nonbepkts, rtlpriv->dm.b_disable_framebursting)); + + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n", + b_edca_turbo_on, b_bias_on_rx)); + + if (b_edca_turbo_on) { + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("curTxOkCnt : 0x%lx \n",cur_tx_ok_cnt)); + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("curRxOkCnt : 0x%lx \n",cur_rx_ok_cnt)); + if(b_bias_on_rx) + rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt, + cur_rx_ok_cnt, true, pb_is_cur_rdl_state); + else + rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt, + cur_rx_ok_cnt, false, pb_is_cur_rdl_state); + + edca_be = ((*pb_is_cur_rdl_state) == true) ? edca_be_dl : edca_be_ul; + + rtl_write_dword(rtlpriv, DM_REG_EDCA_BE_11N, edca_be); + + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("EDCA Turbo on: EDCA_BE:0x%x\n", edca_be)); + + rtlpriv->dm.bcurrent_turbo_edca = true; + + RT_TRACE(COMP_TURBO, DBG_LOUD, + ("EDCA_BE_DL : 0x%x EDCA_BE_UL : 0x%x EDCA_BE : 0x%x \n", + edca_be_dl, edca_be_ul, edca_be)); + } else { + if (rtlpriv->dm.bcurrent_turbo_edca) { + u8 tmp = AC0_BE; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, + (u8 *) (&tmp)); + } + rtlpriv->dm.bcurrent_turbo_edca = false; + } + +/* dm_CheckEdcaTurbo_EXIT: */ + rtlpriv->dm.bis_any_nonbepkts = false; + rtldm->last_tx_ok_cnt = rtlpriv->stats.txbytesunicast; + rtldm->last_rx_ok_cnt = rtlpriv->stats.rxbytesunicast; +} + +static void rtl8821ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 cur_cck_cca_thresh; + + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + /*dm_digtable.rssi_val_min = rtl8821ae_dm_initial_gain_min_pwdb(hw);*/ + if (dm_digtable.rssi_val_min > 25) + cur_cck_cca_thresh = 0xcd; + else if ((dm_digtable.rssi_val_min <= 25) && (dm_digtable.rssi_val_min > 10)) + cur_cck_cca_thresh = 0x83; + else { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) + cur_cck_cca_thresh = 0x83; + else + cur_cck_cca_thresh = 0x40; + } + + } else { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000) + cur_cck_cca_thresh = 0x83; + else + cur_cck_cca_thresh = 0x40; + } + + if (dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh) { + rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh); + } + + dm_digtable.pre_cck_cca_thres = dm_digtable.cur_cck_cca_thres; + dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh; + RT_TRACE(COMP_DIG, DBG_TRACE, + ("CCK cca thresh hold =%x\n", dm_digtable.cur_cck_cca_thres)); + +} + +void rtl8821ae_dm_dynamic_edcca(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool b_fw_current_in_ps_mode = false; + + rtlpriv->cfg->ops->get_hw_reg(hw,HW_VAR_FW_PSMODE_STATUS, \ + (u8*)(&b_fw_current_in_ps_mode)); + if (b_fw_current_in_ps_mode) + return; +} + +void rtl8812ae_dm_update_txpath(struct ieee80211_hw *hw, u8 path) +{ + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtldm->resp_tx_path != path) { + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("Need to Update Tx Path\n")); + if (path == RF90_PATH_A) { + /*Tx by Reg*/ + rtl_set_bbreg(hw, 0x80c, 0xFFF0, 0x111); + /*Resp Tx by Txinfo*/ + rtl_set_bbreg(hw, 0x6d8, BIT(7) | BIT(6), 1); + } else { + /*Tx by Reg*/ + rtl_set_bbreg(hw, 0x80c, 0xFFF0, 0x222); + /*Resp Tx by Txinfo*/ + rtl_set_bbreg(hw, 0x6d8, BIT(7) |BIT(6), 2); + } + } + rtldm->resp_tx_path = path; + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("Path=%s\n",(path == RF90_PATH_A) ? \ + "RF90_PATH_A":"RF90_PATH_A")); +} + +void rtl8812ae_dm_path_diversity_init(struct ieee80211_hw *hw) +{ + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + //rtl_set_bbreg(hw, 0x80c , BIT(29), 1); /*Tx path from Reg*/ + rtl_set_bbreg(hw, 0x80c , 0xFFF0, 0x111); /*Tx by Reg*/ + rtl_set_bbreg(hw, 0x6d8 , BIT(7) | BIT(6), 1); /*Resp Tx by Txinfo*/ + rtl8812ae_dm_update_txpath(hw, RF90_PATH_A); + + rtldm->path_sel = 1; /* TxInfo default at path-A*/ +} + +void rtl812ae_dm_set_txpath_by_txinfo(struct ieee80211_hw *hw, + u8 *pdesc) +{ + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + SET_TX_DESC_TX_ANT(pdesc, rtldm->path_sel); +} + +void rtl8812ae_dm_path_statistics(struct ieee80211_hw *hw, + u32 rssi_a, u32 rssi_b) +{ + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + rtldm->patha_sum += rssi_a; + rtldm->patha_cnt ++; + + rtldm->pathb_sum += rssi_b; + rtldm->pathb_cnt ++; +} + +void rtl8812ae_dm_path_diversity(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u32 rssi_avg_a = 0; + u32 rssi_avg_b = 0; + u32 local_min_rssi = 0; + u32 min_rssi = 0xFF; + u8 tx_resp_path=0, target_path; + struct ieee80211_sta *sta = NULL; + + sta = rtl_find_sta(hw, mac->bssid); + if (sta) { + /*Caculate RSSI per Path*/ + rssi_avg_a = (rtldm->patha_cnt != 0) ? \ + (rtldm->patha_sum / rtldm->patha_cnt) : 0; + rssi_avg_b = (rtldm->pathb_cnt != 0) ? \ + (rtldm->pathb_sum / rtldm->pathb_cnt) : 0; + + target_path = (rssi_avg_a == rssi_avg_b) ? rtldm->resp_tx_path : \ + ((rssi_avg_a>=rssi_avg_b) ? RF90_PATH_A : RF90_PATH_B); + + RT_TRACE(COMP_DIG, DBG_TRACE, \ + ("assoc_id=%d, PathA_Sum=%d, PathA_Cnt=%d\n", \ + mac->assoc_id, rtldm->patha_sum, rtldm->patha_cnt)); + RT_TRACE(COMP_DIG, DBG_TRACE, \ + ("assoc_id=%d, PathB_Sum=%d, PathB_Cnt=%d\n", \ + mac->assoc_id, rtldm->pathb_sum, rtldm->pathb_cnt)); + RT_TRACE(COMP_DIG, DBG_TRACE, \ + ("assoc_id=%d, RssiAvgA= %d, RssiAvgB= %d\n", \ + mac->assoc_id, rssi_avg_a, rssi_avg_b)); + + /*Select Resp Tx Path*/ + local_min_rssi = (rssi_avg_a > rssi_avg_b) ? rssi_avg_b : rssi_avg_a; + if(local_min_rssi < min_rssi) + { + min_rssi = local_min_rssi; + tx_resp_path = target_path; + } + + /*Select Tx DESC*/ + if(target_path == RF90_PATH_A) + rtldm->path_sel = 1; + else + rtldm->path_sel = 2; + + RT_TRACE(COMP_DIG, DBG_TRACE, \ + ("Tx from TxInfo, TargetPath=%s\n", \ + (target_path==RF90_PATH_A) ? \ + "ODM_RF_PATH_A":"ODM_RF_PATH_B")); + RT_TRACE(COMP_DIG, DBG_TRACE, \ + ("pDM_PathDiv->PathSel= %d\n", \ + rtldm->path_sel)); + } + rtldm->patha_cnt = 0; + rtldm->patha_sum = 0; + rtldm->pathb_cnt = 0; + rtldm->pathb_sum = 0; +} + +void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + u8 crystal_cap; + u32 packet_count; + int cfo_khz_a,cfo_khz_b,cfo_ave = 0, adjust_xtal = 0; + int cfo_ave_diff; + + if (rtlpriv->mac80211.link_state < MAC80211_LINKED){ + /*1.Enable ATC*/ + if (rtldm->atc_status == ATC_STATUS_OFF) + { + rtl_set_bbreg(hw, RFC_AREA, BIT(14), ATC_STATUS_ON); + rtldm->atc_status = ATC_STATUS_ON; + } + + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): No link!!\n")); + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): atc_status = %d\n", \ + rtldm->atc_status)); + + if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) + { + rtldm->crystal_cap = rtlpriv->efuse.crystalcap; + crystal_cap = rtldm->crystal_cap & 0x3f; + crystal_cap = crystal_cap & 0x3f; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, \ + 0x7ff80000, (crystal_cap | (crystal_cap << 6))); + } + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): crystal_cap = 0x%x\n", \ + rtldm->crystal_cap)); + }else{ + /*1. Calculate CFO for path-A & path-B*/ + cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280; + cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280; + packet_count = rtldm->packet_count; + + /*2.No new packet*/ + if (packet_count == rtldm->packet_count_pre) { + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): packet counter doesn't change\n")); + return; + } + + rtldm->packet_count_pre = packet_count; + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): packet counter = %d\n", \ + rtldm->packet_count)); + + /*3.Average CFO*/ + if (rtlpriv->phy.rf_type == RF_1T1R) + cfo_ave = cfo_khz_a; + else + cfo_ave = (cfo_khz_a + cfo_khz_b) >> 1; + + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch():" + "cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n", + cfo_khz_a, cfo_khz_b, cfo_ave)); + + /*4.Avoid abnormal large CFO*/ + cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave)? + (rtldm->cfo_ave_pre - cfo_ave): + (cfo_ave - rtldm->cfo_ave_pre); + + if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0){ + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): first large CFO hit\n")); + rtldm->large_cfo_hit = 1; + return; + } + else + rtldm->large_cfo_hit = 0; + + rtldm->cfo_ave_pre = cfo_ave; + + /*CFO tracking by adjusting Xtal cap.*/ + + /*1.Dynamic Xtal threshold*/ + if (cfo_ave >= -rtldm->cfo_threshold && + cfo_ave <= rtldm->cfo_threshold && + rtldm->is_freeze == 0){ + if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL){ + rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10; + rtldm->is_freeze = 1; + } + else + rtldm->cfo_threshold = CFO_THRESHOLD_XTAL; + } + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): Dynamic threshold = %d\n", \ + rtldm->cfo_threshold)); + + /* 2.Calculate Xtal offset*/ + if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f) + adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 2) + 1; + else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) && rtlpriv->dm.crystal_cap > 0) + adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 2) - 1; + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): " + "Crystal cap = 0x%x, Crystal cap offset = %d\n", + rtldm->crystal_cap, adjust_xtal)); + + /*3.Adjudt Crystal Cap.*/ + if (adjust_xtal != 0){ + rtldm->is_freeze = 0; + rtldm->crystal_cap += adjust_xtal; + + if (rtldm->crystal_cap > 0x3f) + rtldm->crystal_cap = 0x3f; + else if (rtldm->crystal_cap < 0) + rtldm->crystal_cap = 0; + + crystal_cap = rtldm->crystal_cap & 0x3f; + crystal_cap = crystal_cap & 0x3f; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, \ + 0x7ff80000, (crystal_cap | (crystal_cap << 6))); + RT_TRACE(COMP_DIG, DBG_LOUD, \ + ("rtl8821ae_dm_dynamic_atc_switch(): New crystal cap = 0x%x \n", \ + rtldm->crystal_cap)); + } + } + +} + +void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool b_fw_current_inpsmode = false; + bool b_fw_ps_awake = true; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *) (&b_fw_current_inpsmode)); + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, + (u8 *) (&b_fw_ps_awake)); + + if(ppsc->p2p_ps_info.p2p_ps_mode) + b_fw_ps_awake = false; + + if((ppsc->rfpwr_state == ERFON) && + ((!b_fw_current_inpsmode) && b_fw_ps_awake) && + (!ppsc->rfchange_inprogress)) { + rtl8821ae_dm_common_info_self_update(hw); + rtl8821ae_dm_false_alarm_counter_statistics(hw); + rtl8821ae_dm_check_rssi_monitor(hw); + rtl8821ae_dm_dig(hw); + rtl8821ae_dm_dynamic_edcca(hw); + rtl8821ae_dm_cck_packet_detection_thresh(hw); + rtl8821ae_dm_refresh_rate_adaptive_mask(hw); + rtl8821ae_dm_check_edca_turbo(hw); + rtl8821ae_dm_dynamic_atc_switch(hw); + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_dm_check_txpower_tracking_thermalmeter(hw); + else + rtl8821ae_dm_check_txpower_tracking_thermalmeter(hw); + rtl8821ae_dm_iq_calibrate(hw); + if (rtlpriv->cfg->ops->get_btc_status()){ + rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv); + } + } + + rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0; +} + +void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, + u8 *pdesc, u32 mac_id) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + struct fast_ant_trainning *pfat_table= &(rtldm->fat_table); + + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8812AE) + return; + + if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || + (rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV)){ + SET_TX_DESC_TX_ANT(pdesc, pfat_table->antsel_a[mac_id]); + } +} --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/dm.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/dm.h @@ -0,0 +1,426 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_DM_H__ +#define __RTL8821AE_DM_H__ + +#define MAIN_ANT 0 +#define AUX_ANT 1 +#define MAIN_ANT_CG_TRX 1 +#define AUX_ANT_CG_TRX 0 +#define MAIN_ANT_CGCS_RX 0 +#define AUX_ANT_CGCS_RX 1 + +#define TXSCALE_TABLE_SIZE 37 + +/*RF REG LIST*/ +#define DM_REG_RF_MODE_11N 0x00 +#define DM_REG_RF_0B_11N 0x0B +#define DM_REG_CHNBW_11N 0x18 +#define DM_REG_T_METER_11N 0x24 +#define DM_REG_RF_25_11N 0x25 +#define DM_REG_RF_26_11N 0x26 +#define DM_REG_RF_27_11N 0x27 +#define DM_REG_RF_2B_11N 0x2B +#define DM_REG_RF_2C_11N 0x2C +#define DM_REG_RXRF_A3_11N 0x3C +#define DM_REG_T_METER_92D_11N 0x42 +#define DM_REG_T_METER_88E_11N 0x42 + + + +/*BB REG LIST*/ +/*PAGE 8 */ +#define DM_REG_BB_CTRL_11N 0x800 +#define DM_REG_RF_PIN_11N 0x804 +#define DM_REG_PSD_CTRL_11N 0x808 +#define DM_REG_TX_ANT_CTRL_11N 0x80C +#define DM_REG_BB_PWR_SAV5_11N 0x818 +#define DM_REG_CCK_RPT_FORMAT_11N 0x824 +#define DM_REG_RX_DEFUALT_A_11N 0x858 +#define DM_REG_RX_DEFUALT_B_11N 0x85A +#define DM_REG_BB_PWR_SAV3_11N 0x85C +#define DM_REG_ANTSEL_CTRL_11N 0x860 +#define DM_REG_RX_ANT_CTRL_11N 0x864 +#define DM_REG_PIN_CTRL_11N 0x870 +#define DM_REG_BB_PWR_SAV1_11N 0x874 +#define DM_REG_ANTSEL_PATH_11N 0x878 +#define DM_REG_BB_3WIRE_11N 0x88C +#define DM_REG_SC_CNT_11N 0x8C4 +#define DM_REG_PSD_DATA_11N 0x8B4 +/*PAGE 9*/ +#define DM_REG_ANT_MAPPING1_11N 0x914 +#define DM_REG_ANT_MAPPING2_11N 0x918 +/*PAGE A*/ +#define DM_REG_CCK_ANTDIV_PARA1_11N 0xA00 +#define DM_REG_CCK_CCA_11N 0xA0A +#define DM_REG_CCK_CCA_11AC 0xA0A +#define DM_REG_CCK_ANTDIV_PARA2_11N 0xA0C +#define DM_REG_CCK_ANTDIV_PARA3_11N 0xA10 +#define DM_REG_CCK_ANTDIV_PARA4_11N 0xA14 +#define DM_REG_CCK_FILTER_PARA1_11N 0xA22 +#define DM_REG_CCK_FILTER_PARA2_11N 0xA23 +#define DM_REG_CCK_FILTER_PARA3_11N 0xA24 +#define DM_REG_CCK_FILTER_PARA4_11N 0xA25 +#define DM_REG_CCK_FILTER_PARA5_11N 0xA26 +#define DM_REG_CCK_FILTER_PARA6_11N 0xA27 +#define DM_REG_CCK_FILTER_PARA7_11N 0xA28 +#define DM_REG_CCK_FILTER_PARA8_11N 0xA29 +#define DM_REG_CCK_FA_RST_11N 0xA2C +#define DM_REG_CCK_FA_MSB_11N 0xA58 +#define DM_REG_CCK_FA_LSB_11N 0xA5C +#define DM_REG_CCK_CCA_CNT_11N 0xA60 +#define DM_REG_BB_PWR_SAV4_11N 0xA74 +/*PAGE B */ +#define DM_REG_LNA_SWITCH_11N 0xB2C +#define DM_REG_PATH_SWITCH_11N 0xB30 +#define DM_REG_RSSI_CTRL_11N 0xB38 +#define DM_REG_CONFIG_ANTA_11N 0xB68 +#define DM_REG_RSSI_BT_11N 0xB9C +/*PAGE C */ +#define DM_REG_OFDM_FA_HOLDC_11N 0xC00 +#define DM_REG_RX_PATH_11N 0xC04 +#define DM_REG_TRMUX_11N 0xC08 +#define DM_REG_OFDM_FA_RSTC_11N 0xC0C +#define DM_REG_RXIQI_MATRIX_11N 0xC14 +#define DM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C +#define DM_REG_IGI_A_11N 0xC50 +#define DM_REG_IGI_A_11AC 0xC50 +#define DM_REG_ANTDIV_PARA2_11N 0xC54 +#define DM_REG_IGI_B_11N 0xC58 +#define DM_REG_IGI_B_11AC 0xE50 +#define DM_REG_ANTDIV_PARA3_11N 0xC5C +#define DM_REG_BB_PWR_SAV2_11N 0xC70 +#define DM_REG_RX_OFF_11N 0xC7C +#define DM_REG_TXIQK_MATRIXA_11N 0xC80 +#define DM_REG_TXIQK_MATRIXB_11N 0xC88 +#define DM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94 +#define DM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C +#define DM_REG_RXIQK_MATRIX_LSB_11N 0xCA0 +#define DM_REG_ANTDIV_PARA1_11N 0xCA4 +#define DM_REG_OFDM_FA_TYPE1_11N 0xCF0 +/*PAGE D */ +#define DM_REG_OFDM_FA_RSTD_11N 0xD00 +#define DM_REG_OFDM_FA_TYPE2_11N 0xDA0 +#define DM_REG_OFDM_FA_TYPE3_11N 0xDA4 +#define DM_REG_OFDM_FA_TYPE4_11N 0xDA8 +/*PAGE E */ +#define DM_REG_TXAGC_A_6_18_11N 0xE00 +#define DM_REG_TXAGC_A_24_54_11N 0xE04 +#define DM_REG_TXAGC_A_1_MCS32_11N 0xE08 +#define DM_REG_TXAGC_A_MCS0_3_11N 0xE10 +#define DM_REG_TXAGC_A_MCS4_7_11N 0xE14 +#define DM_REG_TXAGC_A_MCS8_11_11N 0xE18 +#define DM_REG_TXAGC_A_MCS12_15_11N 0xE1C +#define DM_REG_FPGA0_IQK_11N 0xE28 +#define DM_REG_TXIQK_TONE_A_11N 0xE30 +#define DM_REG_RXIQK_TONE_A_11N 0xE34 +#define DM_REG_TXIQK_PI_A_11N 0xE38 +#define DM_REG_RXIQK_PI_A_11N 0xE3C +#define DM_REG_TXIQK_11N 0xE40 +#define DM_REG_RXIQK_11N 0xE44 +#define DM_REG_IQK_AGC_PTS_11N 0xE48 +#define DM_REG_IQK_AGC_RSP_11N 0xE4C +#define DM_REG_BLUETOOTH_11N 0xE6C +#define DM_REG_RX_WAIT_CCA_11N 0xE70 +#define DM_REG_TX_CCK_RFON_11N 0xE74 +#define DM_REG_TX_CCK_BBON_11N 0xE78 +#define DM_REG_OFDM_RFON_11N 0xE7C +#define DM_REG_OFDM_BBON_11N 0xE80 +#define DM_REG_TX2RX_11N 0xE84 +#define DM_REG_TX2TX_11N 0xE88 +#define DM_REG_RX_CCK_11N 0xE8C +#define DM_REG_RX_OFDM_11N 0xED0 +#define DM_REG_RX_WAIT_RIFS_11N 0xED4 +#define DM_REG_RX2RX_11N 0xED8 +#define DM_REG_STANDBY_11N 0xEDC +#define DM_REG_SLEEP_11N 0xEE0 +#define DM_REG_PMPD_ANAEN_11N 0xEEC + + +/*MAC REG LIST*/ +#define DM_REG_BB_RST_11N 0x02 +#define DM_REG_ANTSEL_PIN_11N 0x4C +#define DM_REG_EARLY_MODE_11N 0x4D0 +#define DM_REG_RSSI_MONITOR_11N 0x4FE +#define DM_REG_EDCA_VO_11N 0x500 +#define DM_REG_EDCA_VI_11N 0x504 +#define DM_REG_EDCA_BE_11N 0x508 +#define DM_REG_EDCA_BK_11N 0x50C +#define DM_REG_TXPAUSE_11N 0x522 +#define DM_REG_RESP_TX_11N 0x6D8 +#define DM_REG_ANT_TRAIN_PARA1_11N 0x7b0 +#define DM_REG_ANT_TRAIN_PARA2_11N 0x7b4 + + +/*DIG Related*/ +#define DM_BIT_IGI_11N 0x0000007F +#define DM_BIT_IGI_11AC 0xFFFFFFFF + + + +#define HAL_DM_DIG_DISABLE BIT(0) +#define HAL_DM_HIPWR_DISABLE BIT(1) + +#define OFDM_TABLE_LENGTH 43 +#define CCK_TABLE_LENGTH 33 + +#define OFDM_TABLE_SIZE 37 +#define CCK_TABLE_SIZE 33 + +#define BW_AUTO_SWITCH_HIGH_LOW 25 +#define BW_AUTO_SWITCH_LOW_HIGH 30 + +#define DM_DIG_THRESH_HIGH 40 +#define DM_DIG_THRESH_LOW 35 + +#define DM_FALSEALARM_THRESH_LOW 400 +#define DM_FALSEALARM_THRESH_HIGH 1000 + +#define DM_DIG_MAX 0x3e +#define DM_DIG_MIN 0x1e + +#define DM_DIG_MAX_AP 0x32 +#define DM_DIG_MIN_AP 0x20 + +#define DM_DIG_FA_UPPER 0x3e +#define DM_DIG_FA_LOWER 0x1e +#define DM_DIG_FA_TH0 0x200 +#define DM_DIG_FA_TH1 0x300 +#define DM_DIG_FA_TH2 0x400 + +#define DM_DIG_BACKOFF_MAX 12 +#define DM_DIG_BACKOFF_MIN -4 +#define DM_DIG_BACKOFF_DEFAULT 10 + +#define RXPATHSELECTION_SS_TH_lOW 30 +#define RXPATHSELECTION_DIFF_TH 18 + +#define DM_RATR_STA_INIT 0 +#define DM_RATR_STA_HIGH 1 +#define DM_RATR_STA_MIDDLE 2 +#define DM_RATR_STA_LOW 3 + +#define CTS2SELF_THVAL 30 +#define REGC38_TH 20 + +#define WAIOTTHVal 25 + +#define TXHIGHPWRLEVEL_NORMAL 0 +#define TXHIGHPWRLEVEL_LEVEL1 1 +#define TXHIGHPWRLEVEL_LEVEL2 2 +#define TXHIGHPWRLEVEL_BT1 3 +#define TXHIGHPWRLEVEL_BT2 4 + +#define DM_TYPE_BYFW 0 +#define DM_TYPE_BYDRIVER 1 + +#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 +#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 +#define TXPWRTRACK_MAX_IDX 6 + +/* Dynamic ATC switch */ +#define ATC_STATUS_OFF 0x0 /* enable */ +#define ATC_STATUS_ON 0x1 /* disable */ +#define CFO_THRESHOLD_XTAL 10 /* kHz */ +#define CFO_THRESHOLD_ATC 80 /* kHz */ + +#define AVG_THERMAL_NUM_8812A 4 +#define TXPWR_TRACK_TABLE_SIZE 30 +#define MAX_PATH_NUM_8812A 2 +#define MAX_PATH_NUM_8821A 1 + + +struct ps_t { + u8 pre_ccastate; + u8 cur_ccasate; + u8 pre_rfstate; + u8 cur_rfstate; + u8 initialize; + long rssi_val_min; + +}; + +struct dig_t { + u8 dig_enable_flag; + u8 dig_ext_port_stage; + u32 rssi_lowthresh; + u32 rssi_highthresh; + + u32 fa_lowthresh; + u32 fa_highthresh; + + u8 cursta_connectctate; + u8 presta_connectstate; + u8 curmultista_connectstate; + + u8 pre_igvalue; + u8 cur_igvalue; + u8 bt30_cur_igi; + u8 backup_igvalue; + u8 stop_dig; + + char backoff_val; + char backoff_val_range_max; + char backoff_val_range_min; + u8 rx_gain_range_max; + u8 rx_gain_range_min; + u8 rssi_val_min; + + u8 pre_cck_cca_thres; + u8 cur_cck_cca_thres; + u8 pre_cck_pd_state; + u8 cur_cck_pd_state; + + u8 large_fa_hit; + u8 forbidden_igi; + u32 recover_cnt; + + u8 dig_dynamic_min_0; + u8 dig_dynamic_min_1; + bool b_media_connect_0; + bool b_media_connect_1; + + u32 antdiv_rssi_max; + u32 rssi_max; +}; + + +enum FAT_STATE { + FAT_NORMAL_STATE = 0, + FAT_TRAINING_STATE = 1, +}; + +enum tag_dynamic_init_gain_operation_type_definition { + DIG_TYPE_THRESH_HIGH = 0, + DIG_TYPE_THRESH_LOW = 1, + DIG_TYPE_BACKOFF = 2, + DIG_TYPE_RX_GAIN_MIN = 3, + DIG_TYPE_RX_GAIN_MAX = 4, + DIG_TYPE_ENABLE = 5, + DIG_TYPE_DISABLE = 6, + DIG_OP_TYPE_MAX +}; + +enum tag_cck_packet_detection_threshold_type_definition { + CCK_PD_STAGE_LowRssi = 0, + CCK_PD_STAGE_HighRssi = 1, + CCK_FA_STAGE_Low = 2, + CCK_FA_STAGE_High = 3, + CCK_PD_STAGE_MAX = 4, +}; + +enum dm_1r_cca_e { + CCA_1R = 0, + CCA_2R = 1, + CCA_MAX = 2, +}; + +enum dm_rf_e { + RF_SAVE = 0, + RF_NORMAL = 1, + RF_MAX = 2, +}; + +enum dm_sw_ant_switch_e { + ANS_ANTENNA_B = 1, + ANS_ANTENNA_A = 2, + ANS_ANTENNA_MAX = 3, +}; + +enum dm_dig_ext_port_alg_e { + DIG_EXT_PORT_STAGE_0 = 0, + DIG_EXT_PORT_STAGE_1 = 1, + DIG_EXT_PORT_STAGE_2 = 2, + DIG_EXT_PORT_STAGE_3 = 3, + DIG_EXT_PORT_STAGE_MAX = 4, +}; + +enum dm_dig_connect_e { + DIG_STA_DISCONNECT = 0, + DIG_STA_CONNECT = 1, + DIG_STA_BEFORE_CONNECT = 2, + DIG_MULTISTA_DISCONNECT = 3, + DIG_MULTISTA_CONNECT = 4, + DIG_CONNECT_MAX +}; + +enum pwr_track_control_method { + BBSWING, + TXAGC, + MIX_MODE +}; + +#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) +#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) +#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) +#define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) +#define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) +#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ + (((struct rtl_priv *)(_priv))->mac80211.opmode == NL80211_IFTYPE_ADHOC)? \ + (((struct rtl_priv *)(_priv))->dm.entry_min_undecoratedsmoothed_pwdb): \ + (((struct rtl_priv *)(_priv))->dm.undecorated_smoothed_pwdb) + +extern struct dig_t dm_digtable; +void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, + u8 *pdesc, u32 mac_id); +void rtl8821ae_dm_ant_sel_statistics(struct ieee80211_hw *hw, + u8 antsel_tr_mux, u32 mac_id, + u32 rx_pwdb_all); +void rtl8821ae_dm_fast_antenna_trainning_callback(unsigned long data); +void rtl8821ae_dm_init(struct ieee80211_hw *hw); +void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw); +void rtl8821ae_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi); +void rtl8821ae_dm_init_edca_turbo(struct ieee80211_hw *hw); +void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw); +void rtl8821ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); +void rtl8821ae_dm_txpower_track_adjust(struct ieee80211_hw *hw, + u8 type,u8 *pdirection, + u32 *poutwrite_val); +void rtl8821ae_dm_clear_txpower_tracking_state(struct ieee80211_hw *hw); +void rtl8821ae_dm_write_cck_cca_thres(struct ieee80211_hw *hw, u8 current_cca); +void rtl8821ae_dm_initialize_txpower_tracking_thermalmeter(struct ieee80211_hw *hw); +void rtl8812ae_dm_path_diversity(struct ieee80211_hw *hw); +void rtl8812ae_dm_path_diversity_init(struct ieee80211_hw *hw); +void rtl8812ae_dm_path_statistics(struct ieee80211_hw *hw, + u32 rssi_a, u32 rssi_b); +void rtl812ae_dm_set_txpath_by_txinfo(struct ieee80211_hw *hw, + u8 *pdesc); +void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, + u8 rf_path, + u8 channel_mapped_index); +void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, + enum pwr_track_control_method method, u8 rf_path, u8 channel_mapped_index); + +void rtl8812ae_dm_update_init_rate(struct ieee80211_hw *hw, u8 rate); +u8 rtl8812ae_hw_rate_to_mrate(struct ieee80211_hw *hw, u8 rate); +void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw); +void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/fw.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/fw.c @@ -0,0 +1,1349 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "reg.h" +#include "def.h" +#include "fw.h" +#include "dm.h" + +static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + + if (enable) { + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x05); + + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); + rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); + + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + //printk("0x80=%02x.\n",tmp); + } else { + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); + tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); + //printk("0x80=%02x.\n",tmp); + } + +} + +static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw, + const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 blockSize = sizeof(u32); + u8 *bufferPtr = (u8 *) buffer; + u32 *pu4BytePtr = (u32 *) buffer; + u32 i, offset, blockCount, remainSize; + + blockCount = size / blockSize; + remainSize = size % blockSize; + + for (i = 0; i < blockCount; i++) { + offset = i * blockSize; + rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset), + *(pu4BytePtr + i)); + } + + if (remainSize) { + offset = blockCount * blockSize; + bufferPtr += offset; + for (i = 0; i < remainSize; i++) { + rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS + + offset + i), *(bufferPtr + i)); + } + } +} + +static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw, + u32 page, const u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value8; + u8 u8page = (u8) (page & 0x07); + + value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page; + + rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8); + _rtl8821ae_fw_block_write(hw, buffer, size); +} + +static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen) +{ + u32 fwlen = *pfwlen; + u8 remain = (u8) (fwlen % 4); + + remain = (remain == 0) ? 0 : (4 - remain); + + while (remain > 0) { + pfwbuf[fwlen] = 0; + fwlen++; + remain--; + } + + *pfwlen = fwlen; +} + +static void _rtl8821ae_write_fw(struct ieee80211_hw *hw, + enum version_8821ae version, + u8 *buffer, u32 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *bufferPtr = (u8 *) buffer; + u32 pageNums, remainSize; + u32 page, offset; + + RT_TRACE(COMP_FW, DBG_LOUD, ("FW size is %d bytes,\n", size)); + + _rtl8821ae_fill_dummy(bufferPtr, &size); + + pageNums = size / FW_8821AE_PAGE_SIZE; + remainSize = size % FW_8821AE_PAGE_SIZE; + + if (pageNums > 8) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Page numbers should not greater then 8\n")); + } + + for (page = 0; page < pageNums; page++) { + offset = page * FW_8821AE_PAGE_SIZE; + _rtl8821ae_fw_page_write(hw, page, (bufferPtr + offset), + FW_8821AE_PAGE_SIZE); + } + + if (remainSize) { + offset = pageNums * FW_8821AE_PAGE_SIZE; + page = pageNums; + _rtl8821ae_fw_page_write(hw, page, (bufferPtr + offset), + remainSize); + } + +} + +static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int err = -EIO; + u32 counter = 0; + u32 value32; + + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + } while ((counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT) && + (!(value32 & FWDL_CHKSUM_RPT))); + + if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) { + RT_TRACE(COMP_ERR, DBG_LOUD, + ("chksum report faill ! REG_MCUFWDL:0x%08x .\n", + value32)); + goto exit; + } + + RT_TRACE(COMP_FW, DBG_EMERG, + ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32)); + + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + value32 |= MCUFWDL_RDY; + value32 &= ~WINTINI_RDY; + rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); + + rtl8821ae_firmware_selfreset(hw); + + counter = 0; + do { + value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + if (value32 & WINTINI_RDY) { + RT_TRACE(COMP_FW, DBG_LOUD, + ("Polling FW ready success!! REG_MCUFWDL:0x%08x .\n", + value32)); + err = 0; + goto exit; + } + + udelay(FW_8821AE_POLLING_DELAY); + + } while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT); + + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32)); + +exit: + return err; +} + +int rtl8821ae_download_fw(struct ieee80211_hw *hw, + bool buse_wake_on_wlan_fw + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl8821a_firmware_header *pfwheader; + u8 *pfwdata; + u32 fwsize; + int err; + enum version_8821ae version = rtlhal->version; + + if(!rtlhal->pfirmware) + return 1; + + pfwheader = (struct rtl8821a_firmware_header *)rtlhal->pfirmware; + pfwdata = (u8 *) rtlhal->pfirmware; + fwsize = rtlhal->fwsize; + RT_TRACE(COMP_FW, DBG_DMESG, + ("normal Firmware SIZE %d \n",fwsize)); + + if (IS_FW_HEADER_EXIST_8812(pfwheader) || IS_FW_HEADER_EXIST_8821(pfwheader)) { + RT_TRACE(COMP_FW, DBG_DMESG, + ("Firmware Version(%d), Signature(%#x),Size(%d)\n", + pfwheader->version, pfwheader->signature, + (int)sizeof(struct rtl8821a_firmware_header))); + + pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header); + fwsize = fwsize - sizeof(struct rtl8821a_firmware_header); + } + + if(rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)){ + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + rtl8821ae_firmware_selfreset(hw); + } + _rtl8821ae_enable_fw_download(hw, true); + _rtl8821ae_write_fw(hw, version, pfwdata, fwsize); + _rtl8821ae_enable_fw_download(hw, false); + + err = _rtl8821ae_fw_free_to_go(hw); + if (err) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Firmware is not ready to run!\n")); + } else { + RT_TRACE(COMP_FW, DBG_LOUD, + ("Firmware is ready to run!\n")); + } + + return 0; +} + +static bool _rtl8821ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 val_hmetfr; + bool result = false; + + val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR); + if (((val_hmetfr >> boxnum) & BIT(0)) == 0) + result = true; + return result; +} + +static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, u8 *p_cmdbuffer) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 boxnum =0; + u16 box_reg = 0, box_extreg = 0; + u8 u1b_tmp = 0; + bool isfw_read = false; + u8 buf_index = 0; + bool bwrite_sucess = false; + u8 wait_h2c_limmit = 100; + /*u8 wait_writeh2c_limmit = 100;*/ + u8 boxcontent[4], boxextcontent[4]; + u32 h2c_waitcounter = 0; + unsigned long flag =0; + u8 idx =0; + + RT_TRACE(COMP_CMD, DBG_LOUD, ("come in\n")); + + while (true) { + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + if (rtlhal->b_h2c_setinprogress) { + RT_TRACE(COMP_CMD, DBG_LOUD, + ("H2C set in progress! Wait to set.." + "element_id(%d).\n", element_id)); + + while (rtlhal->b_h2c_setinprogress) { + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, + flag); + h2c_waitcounter++; + RT_TRACE(COMP_CMD, DBG_LOUD, + ("Wait 100 us (%d times)...\n", + h2c_waitcounter)); + udelay(100); + + if (h2c_waitcounter > 1000) + return; + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, + flag); + } + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + } else { + rtlhal->b_h2c_setinprogress = true; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + break; + } + } + + while (!bwrite_sucess) { + /*cosa remove this because never reach this.*/ +#if 0 + wait_writeh2c_limmit--; + if (wait_writeh2c_limmit == 0) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Write H2C fail because no trigger " + "for FW INT!\n")); + break; + } +#endif + + boxnum = rtlhal->last_hmeboxnum; + switch (boxnum) { + case 0: + box_reg = REG_HMEBOX_0; + box_extreg = REG_HMEBOX_EXT_0; + break; + case 1: + box_reg = REG_HMEBOX_1; + box_extreg = REG_HMEBOX_EXT_1; + break; + case 2: + box_reg = REG_HMEBOX_2; + box_extreg = REG_HMEBOX_EXT_2; + break; + case 3: + box_reg = REG_HMEBOX_3; + box_extreg = REG_HMEBOX_EXT_3; + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + + isfw_read = false; + u1b_tmp = rtl_read_byte(rtlpriv, REG_CR); + + if (u1b_tmp != 0xEA) + isfw_read = true; + else { + if( rtl_read_byte(rtlpriv, REG_TXDMA_STATUS) == 0xEA || + rtl_read_byte(rtlpriv, REG_TXPKT_EMPTY) == 0xEA) + rtl_write_byte(rtlpriv, REG_SYS_CFG1 + 3, 0xFF); + } + + if (isfw_read == true) { + wait_h2c_limmit = 100; + isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum); + while (!isfw_read) { + /*wait until Fw read*/ + wait_h2c_limmit--; + if (wait_h2c_limmit == 0) { + RT_TRACE(COMP_CMD, DBG_LOUD, + ("Wating too long for FW read " + "clear HMEBox(%d)!\n", boxnum)); + break; + } + + udelay(10); + + isfw_read = _rtl8821ae_check_fw_read_last_h2c(hw, boxnum); + u1b_tmp = rtl_read_byte(rtlpriv, 0x130); + RT_TRACE(COMP_CMD, DBG_LOUD, + ("Wating for FW read clear HMEBox(%d)!!! " + "0x130 = %2x\n", boxnum, u1b_tmp)); + } + } + + if (!isfw_read) { + RT_TRACE(COMP_CMD, DBG_LOUD, + ("Write H2C register BOX[%d] fail!!!!! " + "Fw do not read. \n", boxnum)); + break; + } + + memset(boxcontent, 0, sizeof(boxcontent)); + memset(boxextcontent, 0, sizeof(boxextcontent)); + boxcontent[0] = element_id; + RT_TRACE(COMP_CMD, DBG_LOUD, + ("Write element_id box_reg(%4x) = %2x \n", + box_reg, element_id)); + + switch (cmd_len) { + case 1: + case 2: + case 3: + /*boxcontent[0] &= ~(BIT(7));*/ + memcpy((u8 *) (boxcontent) + 1, + p_cmdbuffer + buf_index, cmd_len); + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } + break; + case 4: + case 5: + case 6: + case 7: + /*boxcontent[0] |= (BIT(7));*/ + memcpy((u8 *) (boxextcontent), + p_cmdbuffer + buf_index+3, cmd_len-3); + memcpy((u8 *) (boxcontent) + 1, + p_cmdbuffer + buf_index, 3); + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_extreg + idx, + boxextcontent[idx]); + } + + for (idx = 0; idx < 4; idx++) { + rtl_write_byte(rtlpriv, box_reg + idx, + boxcontent[idx]); + } + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + + bwrite_sucess = true; + + rtlhal->last_hmeboxnum = boxnum + 1; + if (rtlhal->last_hmeboxnum == 4) + rtlhal->last_hmeboxnum = 0; + + RT_TRACE(COMP_CMD, DBG_LOUD, + ("pHalData->last_hmeboxnum = %d\n", + rtlhal->last_hmeboxnum)); + } + + spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); + rtlhal->b_h2c_setinprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); + + RT_TRACE(COMP_CMD, DBG_LOUD, ("go out\n")); +} + +void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, u8 *p_cmdbuffer) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 tmp_cmdbuf[2]; + + if (rtlhal->bfw_ready == false) { + RT_ASSERT(false, ("return H2C cmd because of Fw " + "download fail!!!\n")); + return; + } + + memset(tmp_cmdbuf, 0, 8); + memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len); + _rtl8821ae_fill_h2c_command(hw, element_id, cmd_len, (u8 *) & tmp_cmdbuf); + + return; +} + +void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw) +{ + u8 u1b_tmp; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3)))); + }else { + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(0)))); + } + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2)))); + udelay(50); + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(3))); + }else { + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp | BIT(0))); + } + + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2))); + + RT_TRACE(COMP_INIT, DBG_LOUD, (" _8051Reset8812ae(): 8051 reset success .\n")); + +} + +void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 u1_h2c_set_pwrmode[H2C_8821AE_PWEMODE_LENGTH] = { 0 }; + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + u8 rlbm,power_state = 0; + RT_TRACE(COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode)); + + SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0)); + rlbm = 0;/*YJ,temp,120316. FW now not support RLBM=2.*/ + SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm); + SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, (rtlpriv->mac80211.p2p) ? ppsc->smart_ps : 1); + SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode, ppsc->reg_max_lps_awakeintvl); + SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0); + if(mode == FW_PS_ACTIVE_MODE) + { + power_state |= FW_PWR_STATE_ACTIVE; + } + else + { + power_state |= FW_PWR_STATE_RF_OFF; + } + SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode \n", + u1_h2c_set_pwrmode, H2C_8821AE_PWEMODE_LENGTH); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_SETPWRMODE, H2C_8821AE_PWEMODE_LENGTH, u1_h2c_set_pwrmode); + +} + +void rtl8821ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) +{ + u8 u1_joinbssrpt_parm[1] = { 0 }; + + SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus); + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_JOINBSSRPT, 1, u1_joinbssrpt_parm); +} + +void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, u8 ap_offload_enable) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 u1_apoffload_parm[H2C_8821AE_AP_OFFLOAD_LENGTH] = { 0 }; + + SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable); + SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->bhiddenssid); + SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0); + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AP_OFFLOAD, H2C_8821AE_AP_OFFLOAD_LENGTH, u1_apoffload_parm); + +} + +static bool _rtl8821ae_cmd_send_packet(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + struct rtl_tx_desc *pdesc; + u8 own; + unsigned long flags; + struct sk_buff *pskb = NULL; + + ring = &rtlpci->tx_ring[BEACON_QUEUE]; + + pskb = __skb_dequeue(&ring->queue); + if (pskb) + kfree_skb(pskb); + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + + pdesc = &ring->desc[0]; + own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN); + + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); + + __skb_queue_tail(&ring->queue, skb); + + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); + + return true; +} + +#define BEACON_PG 0 /* ->1 */ +#define PSPOLL_PG 2 +#define NULL_PG 3 +#define PROBERSP_PG 4 /* ->5 */ + +#define BEACON_PG_8812 0 +#define PSPOLL_PG_8812 1 +#define NULL_PG_8812 2 +#define PROBERSP_PG_8812 3 + +#define BEACON_PG_8821 0 +#define PSPOLL_PG_8821 1 +#define NULL_PG_8821 2 +#define PROBERSP_PG_8821 3 + +#define TOTAL_RESERVED_PKT_LEN_8812 2048 +#define TOTAL_RESERVED_PKT_LEN_8821 1024 + + +static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { + /* page 0 */ + 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64, + 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x64, 0x00, 0x20, 0x04, 0x00, 0x06, 0x64, 0x6c, + 0x69, 0x6e, 0x6b, 0x31, 0x01, 0x08, 0x82, 0x84, + 0x8b, 0x96, 0x0c, 0x18, 0x30, 0x48, 0x03, 0x01, + 0x0b, 0x06, 0x02, 0x00, 0x00, 0x2a, 0x01, 0x8b, + 0x32, 0x04, 0x12, 0x24, 0x60, 0x6c, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 1 */ + 0xa4, 0x10, 0x01, 0xc0, 0x40, 0x16, 0x9f, 0x23, + 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 2 */ + 0x48, 0x01, 0x00, 0x00, 0x40, 0x16, 0x9f, 0x23, + 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64, + 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1a, 0x00, 0x28, 0x8c, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* page 3 */ + 0xc8, 0x01, 0x00, 0x00, 0x40, 0x16, 0x9f, 0x23, + 0xd4, 0x46, 0x00, 0xe0, 0x4c, 0x02, 0xe2, 0x64, + 0x40, 0x16, 0x9f, 0x23, 0xd4, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + + +static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { + 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5, + 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x64, 0x00, 0x30, 0x04, 0x00, 0x0C, 0x4E, 0x45, + 0x54, 0x47, 0x45, 0x41, 0x52, 0x5F, 0x31, 0x35, + 0x30, 0x4E, 0x01, 0x08, 0x82, 0x84, 0x8B, 0x96, + 0x0C, 0x12, 0x18, 0x24, 0x03, 0x01, 0x03, 0x06, + 0x02, 0x00, 0x00, 0x2A, 0x01, 0x8A, 0x32, 0x04, + 0x30, 0x48, 0x60, 0x6C, 0xDD, 0x18, 0x00, 0x50, + 0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x02, + 0x01, 0x00, 0x00, 0x50, 0xF2, 0x02, 0x01, 0x00, + 0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0xA4, 0x10, 0x02, 0xC0, 0xE0, 0x46, 0x9A, 0x57, + 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0x48, 0x01, 0x00, 0x00, 0xE0, 0x46, 0x9A, 0x57, + 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5, + 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1A, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0xC8, 0x01, 0x00, 0x00, 0xE0, 0x46, 0x9A, 0x57, + 0x71, 0x30, 0x00, 0xE0, 0x4C, 0x02, 0x53, 0xE5, + 0xE0, 0x46, 0x9A, 0x57, 0x71, 0x30, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct sk_buff *skb = NULL; + + u32 totalpacketlen; + bool rtstatus; + u8 u1RsvdPageLoc[5] = { 0 }; + bool b_dlok = false; + + u8* beacon; + u8* p_pspoll; + u8* nullfunc; + u8* p_probersp; + /*--------------------------------------------------------- + (1) beacon + ---------------------------------------------------------*/ + beacon = &reserved_page_packet_8812[BEACON_PG_8812 * 512]; + SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); + SET_80211_HDR_ADDRESS3(beacon, mac->bssid); + + /*------------------------------------------------------- + (2) ps-poll + --------------------------------------------------------*/ + p_pspoll = &reserved_page_packet_8812[PSPOLL_PG_8812 * 512]; + SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); + SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); + SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); + + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG_8812); + + /*-------------------------------------------------------- + (3) null data + ---------------------------------------------------------*/ + nullfunc = &reserved_page_packet_8812[NULL_PG_8812* 512]; + SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); + SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); + SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG_8812); + + /*--------------------------------------------------------- + (4) probe response + ----------------------------------------------------------*/ + p_probersp = &reserved_page_packet_8812[PROBERSP_PG_8812 * 512]; + SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); + SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); + SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG_8812); + + totalpacketlen = TOTAL_RESERVED_PKT_LEN_8812; + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n", + &reserved_page_packet_8812[0], totalpacketlen); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n", + u1RsvdPageLoc, 3); + + + skb = dev_alloc_skb(totalpacketlen); + memcpy((u8 *) skb_put(skb, totalpacketlen), + &reserved_page_packet_8812, totalpacketlen); + + rtstatus = _rtl8821ae_cmd_send_packet(hw, skb); + + if (rtstatus) + b_dlok = true; + + if (b_dlok) { + RT_TRACE(COMP_POWER, DBG_LOUD, + ("Set RSVD page location to Fw.\n")); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "H2C_RSVDPAGE:\n", + u1RsvdPageLoc, 3); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE, + sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + } else + RT_TRACE(COMP_ERR, DBG_WARNING, + ("Set RSVD page location to Fw FAIL!!!!!!.\n")); +} + +void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct sk_buff *skb = NULL; + + u32 totalpacketlen; + bool rtstatus; + u8 u1RsvdPageLoc[5] = { 0 }; + bool b_dlok = false; + + u8* beacon; + u8* p_pspoll; + u8* nullfunc; + u8* p_probersp; + /*--------------------------------------------------------- + (1) beacon + ---------------------------------------------------------*/ + beacon = &reserved_page_packet_8821[BEACON_PG_8821 * 256]; + SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr); + SET_80211_HDR_ADDRESS3(beacon, mac->bssid); + + /*------------------------------------------------------- + (2) ps-poll + --------------------------------------------------------*/ + p_pspoll = &reserved_page_packet_8821[PSPOLL_PG_8821 * 256]; + SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); + SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); + SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); + + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG_8821); + + /*-------------------------------------------------------- + (3) null data + ---------------------------------------------------------*/ + nullfunc = &reserved_page_packet_8821[NULL_PG_8821 * 256]; + SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); + SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); + SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG_8821); + + /*--------------------------------------------------------- + (4) probe response + ----------------------------------------------------------*/ + p_probersp = &reserved_page_packet_8821[PROBERSP_PG_8821 * 256]; + SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); + SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); + SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); + + SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG_8821); + + totalpacketlen = TOTAL_RESERVED_PKT_LEN_8821; + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n", + &reserved_page_packet_8821[0], totalpacketlen); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "rtl8821ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL \n", + u1RsvdPageLoc, 3); + + + skb = dev_alloc_skb(totalpacketlen); + memcpy((u8 *) skb_put(skb, totalpacketlen), + &reserved_page_packet_8821, totalpacketlen); + + rtstatus = _rtl8821ae_cmd_send_packet(hw, skb); + + if (rtstatus) + b_dlok = true; + + if (b_dlok) { + RT_TRACE(COMP_POWER, DBG_LOUD, + ("Set RSVD page location to Fw.\n")); + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, + "H2C_RSVDPAGE:\n", + u1RsvdPageLoc, 3); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE, + sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + } else + RT_TRACE(COMP_ERR, DBG_WARNING, + ("Set RSVD page location to Fw FAIL!!!!!!.\n")); +} + +/*Shoud check FW support p2p or not.*/ +void rtl8821ae_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw, u8 ctwindow) +{ + u8 u1_ctwindow_period[1] ={ ctwindow}; + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_CTW_CMD, 1, u1_ctwindow_period); + +} + +void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info); + struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload; + u8 i; + u16 ctwindow; + u32 start_time, tsf_low; + + switch(p2p_ps_state) + { + case P2P_PS_DISABLE: + RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_DISABLE \n")); + memset(p2p_ps_offload, 0, 1); + break; + case P2P_PS_ENABLE: + RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_ENABLE \n")); + /* update CTWindow value. */ + if( p2pinfo->ctwindow > 0 ) + { + p2p_ps_offload->CTWindow_En = 1; + ctwindow = p2pinfo->ctwindow; + rtl8821ae_set_p2p_ctw_period_cmd(hw, ctwindow); + } + + /* hw only support 2 set of NoA */ + for( i=0 ; inoa_num ; i++) + { + /* To control the register setting for which NOA*/ + rtl_write_byte(rtlpriv, 0x5cf, (i << 4)); + if(i == 0) + p2p_ps_offload->NoA0_En = 1; + else + p2p_ps_offload->NoA1_En = 1; + + /* config P2P NoA Descriptor Register */ + rtl_write_dword(rtlpriv, 0x5E0, p2pinfo->noa_duration[i]); + rtl_write_dword(rtlpriv, 0x5E4, p2pinfo->noa_interval[i]); + + /*Get Current TSF value */ + tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + start_time = p2pinfo->noa_start_time[i]; + if(p2pinfo->noa_count_type[i] != 1) + { + while( start_time <= (tsf_low+(50*1024) ) ) { + start_time += p2pinfo->noa_interval[i]; + if(p2pinfo->noa_count_type[i] != 255) + p2pinfo->noa_count_type[i]--; + } + } + rtl_write_dword(rtlpriv, 0x5E8, start_time); + rtl_write_dword(rtlpriv, 0x5EC, p2pinfo->noa_count_type[i] ); + + } + + if( (p2pinfo->opp_ps == 1) || (p2pinfo->noa_num > 0) ) + { + /* rst p2p circuit */ + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4)); + + p2p_ps_offload->Offload_En = 1; + + if(P2P_ROLE_GO == rtlpriv->mac80211.p2p) + { + p2p_ps_offload->role= 1; + p2p_ps_offload->AllStaSleep = 0; + } + else + { + p2p_ps_offload->role= 0; + } + + p2p_ps_offload->discovery = 0; + } + break; + case P2P_PS_SCAN: + RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_SCAN \n")); + p2p_ps_offload->discovery = 1; + break; + case P2P_PS_SCAN_DONE: + RT_TRACE(COMP_FW, DBG_LOUD,("P2P_PS_SCAN_DONE \n")); + p2p_ps_offload->discovery = 0; + p2pinfo->p2p_ps_state = P2P_PS_ENABLE; + break; + default: + break; + } + + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); + +} + +void rtl8812ae_c2h_ra_report_handler( + struct ieee80211_hw *hw, + u8 *cmd_buf, + u8 cmd_len +) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 rate = cmd_buf[0] & 0x3F; + + rtlhal->current_ra_rate= rtl8812ae_hw_rate_to_mrate(hw, rate); + + rtl8812ae_dm_update_init_rate(hw, rate); +} + + +void _rtl8812ae_c2h_content_parsing( + struct ieee80211_hw *hw, + u8 c2h_cmd_id, + u8 c2h_cmd_len, + u8 *tmp_buf +) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (c2h_cmd_id) { + case C2H_8812_DBG: + RT_TRACE(COMP_FW, DBG_LOUD,("[C2H], C2H_8812_DBG!!\n")); + break; + + case C2H_8812_RA_RPT: + rtl8812ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); + break; + + default: + break; + } + +} + +void rtl8812ae_c2h_packet_handler( + struct ieee80211_hw *hw, + u8 *buffer, + u8 length + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 c2h_cmd_id=0, c2h_cmd_seq=0, c2h_cmd_len=0; + u8 *tmp_buf=NULL; + + c2h_cmd_id = buffer[0]; + c2h_cmd_seq = buffer[1]; + c2h_cmd_len = length -2; + tmp_buf = buffer + 2; + + RT_TRACE(COMP_FW, DBG_LOUD, + ("[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", + c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len)); + + RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, + "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); + _rtl8812ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); +} + + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/fw.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/fw.h @@ -0,0 +1,321 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE__FW__H__ +#define __RTL8821AE__FW__H__ + +#define FW_8821AE_SIZE 0x8000 +#define FW_8821AE_START_ADDRESS 0x1000 +#define FW_8821AE_END_ADDRESS 0x5FFF +#define FW_8821AE_PAGE_SIZE 4096 +#define FW_8821AE_POLLING_DELAY 5 +#define FW_8821AE_POLLING_TIMEOUT_COUNT 6000 + +#define IS_FW_HEADER_EXIST_8812(_pfwhdr) \ + ((_pfwhdr->signature&0xFFF0) == 0x9500 ) + +#define IS_FW_HEADER_EXIST_8821(_pfwhdr) \ + ((_pfwhdr->signature&0xFFF0) == 0x2100 ) + +#define USE_OLD_WOWLAN_DEBUG_FW 0 + +#define H2C_8821AE_RSVDPAGE_LOC_LEN 5 +#define H2C_8821AE_PWEMODE_LENGTH 5 +#define H2C_8821AE_JOINBSSRPT_LENGTH 1 +#define H2C_8821AE_AP_OFFLOAD_LENGTH 3 +#define H2C_8821AE_WOWLAN_LENGTH 3 +#define H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH 3 +#if(USE_OLD_WOWLAN_DEBUG_FW == 0) +#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN 1 +#else +#define H2C_8821AE_REMOTE_WAKE_CTRL_LEN 3 +#endif +#define H2C_8821AE_AOAC_GLOBAL_INFO_LEN 2 +#define H2C_8821AE_AOAC_RSVDPAGE_LOC_LEN 7 + + +/* Fw PS state for RPWM. +*BIT[2:0] = HW state +*BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state +*BIT[4] = sub-state +*/ +#define FW_PS_GO_ON BIT(0) +#define FW_PS_TX_NULL BIT(1) +#define FW_PS_RF_ON BIT(2) +#define FW_PS_REGISTER_ACTIVE BIT(3) + +#define FW_PS_DPS BIT(0) +#define FW_PS_LCLK (FW_PS_DPS) +#define FW_PS_RF_OFF BIT(1) +#define FW_PS_ALL_ON BIT(2) +#define FW_PS_ST_ACTIVE BIT(3) +#define FW_PS_ISR_ENABLE BIT(4) +#define FW_PS_IMR_ENABLE BIT(5) + + +#define FW_PS_ACK BIT(6) +#define FW_PS_TOGGLE BIT(7) + + /* 8821AE RPWM value*/ + /* BIT[0] = 1: 32k, 0: 40M*/ +#define FW_PS_CLOCK_OFF BIT(0) /* 32k*/ +#define FW_PS_CLOCK_ON 0 /*40M*/ + +#define FW_PS_STATE_MASK (0x0F) +#define FW_PS_STATE_HW_MASK (0x07) +#define FW_PS_STATE_INT_MASK (0x3F) /*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/ + +#define FW_PS_STATE(x) (FW_PS_STATE_MASK & (x)) +#define FW_PS_STATE_HW(x) (FW_PS_STATE_HW_MASK & (x)) +#define FW_PS_STATE_INT(x) (FW_PS_STATE_INT_MASK & (x)) +#define FW_PS_ISR_VAL(x) ((x) & 0x70) +#define FW_PS_IMR_MASK(x) ((x) & 0xDF) +#define FW_PS_KEEP_IMR(x) ((x) & 0x20) + + +#define FW_PS_STATE_S0 (FW_PS_DPS) +#define FW_PS_STATE_S1 (FW_PS_LCLK) +#define FW_PS_STATE_S2 (FW_PS_RF_OFF) +#define FW_PS_STATE_S3 (FW_PS_ALL_ON) +#define FW_PS_STATE_S4 ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON)) + +#define FW_PS_STATE_ALL_ON_8821AE (FW_PS_CLOCK_ON) /* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/ +#define FW_PS_STATE_RF_ON_8821AE (FW_PS_CLOCK_ON) /* (FW_PS_RF_ON)*/ +#define FW_PS_STATE_RF_OFF_8821AE (FW_PS_CLOCK_ON) /* 0x0*/ +#define FW_PS_STATE_RF_OFF_LOW_PWR_8821AE (FW_PS_CLOCK_OFF) /* (FW_PS_STATE_RF_OFF)*/ + +#define FW_PS_STATE_ALL_ON_92C (FW_PS_STATE_S4) +#define FW_PS_STATE_RF_ON_92C (FW_PS_STATE_S3) +#define FW_PS_STATE_RF_OFF_92C (FW_PS_STATE_S2) +#define FW_PS_STATE_RF_OFF_LOW_PWR_92C (FW_PS_STATE_S1) + + +/* For 8821AE H2C PwrMode Cmd ID 5.*/ +#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) +#define FW_PWR_STATE_RF_OFF 0 + +#define FW_PS_IS_ACK(x) ((x) & FW_PS_ACK ) +#define FW_PS_IS_CLK_ON(x) ((x) & (FW_PS_RF_OFF |FW_PS_ALL_ON )) +#define FW_PS_IS_RF_ON(x) ((x) & (FW_PS_ALL_ON)) +#define FW_PS_IS_ACTIVE(x) ((x) & (FW_PS_ST_ACTIVE)) +#define FW_PS_IS_CPWM_INT(x) ((x) & 0x40) + +#define FW_CLR_PS_STATE(x) ((x) = ((x) & (0xF0))) + +#define IS_IN_LOW_POWER_STATE_8821AE(FwPSState) \ + (FW_PS_STATE(FwPSState) == FW_PS_CLOCK_OFF) + +#define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) +#define FW_PWR_STATE_RF_OFF 0 + +struct rtl8821a_firmware_header { + u16 signature; + u8 category; + u8 function; + u16 version; + u8 subversion; + u8 rsvd1; + u8 month; + u8 date; + u8 hour; + u8 minute; + u16 ramcodeSize; + u16 rsvd2; + u32 svnindex; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; +}; + +enum rtl8812_c2h_evt{ + C2H_8812_DBG = 0, + C2H_8812_LB = 1, + C2H_8812_TXBF = 2, + C2H_8812_TX_REPORT = 3, + C2H_8812_BT_INFO = 9, + C2H_8812_BT_MP = 11, + C2H_8812_RA_RPT=12, + + C2H_8812_FW_SWCHNL = 0x10, + C2H_8812_IQK_FINISH = 0x11, + MAX_8812_C2HEVENT +}; + +enum rtl8821a_h2c_cmd { + H2C_8821AE_RSVDPAGE = 0, + H2C_8821AE_JOINBSSRPT = 1, + H2C_8821AE_SCAN = 2, + H2C_8821AE_KEEP_ALIVE_CTRL = 3, + H2C_8821AE_DISCONNECT_DECISION = 4, +#if(USE_OLD_WOWLAN_DEBUG_FW == 1) + H2C_8821AE_WO_WLAN = 5, +#endif + H2C_8821AE_INIT_OFFLOAD = 6, +#if(USE_OLD_WOWLAN_DEBUG_FW == 1) + H2C_8821AE_REMOTE_WAKE_CTRL = 7, +#endif + H2C_8821AE_AP_OFFLOAD = 8, + H2C_8821AE_BCN_RSVDPAGE = 9, + H2C_8821AE_PROBERSP_RSVDPAGE = 10, + + H2C_8821AE_SETPWRMODE = 0x20, + H2C_8821AE_PS_TUNING_PARA = 0x21, + H2C_8821AE_PS_TUNING_PARA2 = 0x22, + H2C_8821AE_PS_LPS_PARA = 0x23, + H2C_8821AE_P2P_PS_OFFLOAD = 024, + +#if(USE_OLD_WOWLAN_DEBUG_FW == 0) + H2C_8821AE_WO_WLAN = 0x80, + H2C_8821AE_REMOTE_WAKE_CTRL = 0x81, + H2C_8821AE_AOAC_GLOBAL_INFO = 0x82, + H2C_8821AE_AOAC_RSVDPAGE = 0x83, +#endif + H2C_RSSI_REPORT = 0x42, + H2C_8821AE_RA_MASK = 0x40, + H2C_8821AE_SELECTIVE_SUSPEND_ROF_CMD, + H2C_8821AE_P2P_PS_MODE, + H2C_8821AE_PSD_RESULT, + /*Not defined CTW CMD for P2P yet*/ + H2C_8821AE_P2P_PS_CTW_CMD, + MAX_8821AE_H2CCMD +}; + +#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1:0)) + +#define SET_8821AE_H2CCMD_WOWLAN_FUNC_ENABLE(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_ALL_PKT_DROP(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 4, 1, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_GPIO_ACTIVE(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 5, 1, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_REKEY_WAKE_UP(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 6, 1, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 7, 1, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_GPIONUM(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value) +#define SET_8821AE_H2CCMD_WOWLAN_GPIO_DURATION(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value) + + +#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_PWRMODE_PARM_RLBM(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 4, __Value) +#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 4, 4, __Value) +#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value) +#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value) +#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value) +#define GET_8821AE_H2CCMD_PWRMODE_PARM_MODE(__pH2CCmd) \ + LE_BITS_TO_1BYTE(__pH2CCmd, 0, 8) + +#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val) +#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ + SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val) + +/* AP_OFFLOAD */ +#define SET_H2CCMD_AP_OFFLOAD_ON(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value) +#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value) +#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value) +#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value) + +/* Keep Alive Control*/ +#define SET_8821AE_H2CCMD_KEEP_ALIVE_ENABLE(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value) +#define SET_8821AE_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value) +#define SET_8821AE_H2CCMD_KEEP_ALIVE_PERIOD(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value) + +/*REMOTE_WAKE_CTRL */ +#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_EN(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 1, __Value) +#if(USE_OLD_WOWLAN_DEBUG_FW == 0) +#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 1, 1, __Value) +#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 2, 1, __Value) +#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 3, 1, __Value) +#else +#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value) +#define SET_8821AE_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value) +#endif + +/* GTK_OFFLOAD */ +#define SET_8821AE_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE(__pH2CCmd, 0, 8, __Value) +#define SET_8821AE_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value) + +/* AOAC_RSVDPAGE_LOC */ +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd), 0, 8, __Value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+1, 0, 8, __Value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+2, 0, 8, __Value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+3, 0, 8, __Value) +#define SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__pH2CCmd, __Value) \ + SET_BITS_TO_LE_1BYTE((__pH2CCmd)+4, 0, 8, __Value) + +int rtl8821ae_download_fw(struct ieee80211_hw *hw, + bool buse_wake_on_wlan_fw); +void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer); +void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw); +void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); +void rtl8821ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); +void rtl8821ae_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw, u8 ap_offload_enable); +void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); +void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); +void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); +void rtl8812ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 length); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.c @@ -0,0 +1,519 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "hal_bt_coexist.h" +#include "../pci.h" +#include "dm.h" +#include "fw.h" +#include "phy.h" +#include "reg.h" +#include "hal_btc.h" + +static bool bt_operation_on = false; + +void rtl8821ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, bool b_reject) +{ +#if 0 + struct rtl_priv rtlpriv = rtl_priv(hw); + PRX_TS_RECORD pRxTs = NULL; + + if(b_reject){ + // Do not allow receiving A-MPDU aggregation. + if (rtlpriv->mac80211.vendor == PEER_CISCO) { + if (pHTInfo->bAcceptAddbaReq) { + RTPRINT(FBT, BT_TRACE, ("BT_Disallow AMPDU \n")); + pHTInfo->bAcceptAddbaReq = FALSE; + if(GetTs(Adapter, (PTS_COMMON_INFO*)(&pRxTs), pMgntInfo->Bssid, 0, RX_DIR, FALSE)) + TsInitDelBA(Adapter, (PTS_COMMON_INFO)pRxTs, RX_DIR); + } + } else { + if (!pHTInfo->bAcceptAddbaReq) { + RTPRINT(FBT, BT_TRACE, ("BT_Allow AMPDU BT Idle\n")); + pHTInfo->bAcceptAddbaReq = TRUE; + } + } + } else { + if(rtlpriv->mac80211.vendor == PEER_CISCO) { + if (!pHTInfo->bAcceptAddbaReq) { + RTPRINT(FBT, BT_TRACE, ("BT_Allow AMPDU \n")); + pHTInfo->bAcceptAddbaReq = TRUE; + } + } + } +#endif +} + +void _rtl8821ae_dm_bt_check_wifi_state(struct ieee80211_hw *hw) +{ +struct rtl_priv *rtlpriv = rtl_priv(hw); +struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); +struct rtl_phy *rtlphy = &(rtlpriv->phy); + +if (rtlpriv->link_info.b_busytraffic) { + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_IDLE; + + if(rtlpriv->link_info.b_tx_busy_traffic) { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_UPLINK; + } else { + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_UPLINK; + } + + if(rtlpriv->link_info.b_rx_busy_traffic) { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_DOWNLINK; + } else { + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_DOWNLINK; + } +} else { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_IDLE; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_UPLINK; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_DOWNLINK; +} + +if (rtlpriv->mac80211.mode == WIRELESS_MODE_G + || rtlpriv->mac80211.mode == WIRELESS_MODE_B) { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_LEGACY; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT20; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT40; +} else { + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_LEGACY; + if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_HT40; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT20; + } else { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_HT20; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_HT40; + } +} + +if (bt_operation_on) { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT30; +} else { + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT30; +} +} + + +u8 rtl8821ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) + +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + long undecoratedsmoothed_pwdb = 0; + u8 bt_rssi_state = 0; + + undecoratedsmoothed_pwdb = rtl8821ae_dm_bt_get_rx_ss(hw); + + if(level_num == 2) { + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; + + if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) || + (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) { + if(undecoratedsmoothed_pwdb >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) { + bt_rssi_state = BT_RSSI_STATE_HIGH; + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_HIGH; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to High\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_LOW; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n")); + } + } else { + if(undecoratedsmoothed_pwdb < rssi_thresh) { + bt_rssi_state = BT_RSSI_STATE_LOW; + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_LOW; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_HIGH; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at High\n")); + } + } + } else if(level_num == 3) { + if(rssi_thresh > rssi_thresh1) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 thresh error!!\n")); + return rtlpcipriv->btcoexist.bt_pre_rssi_state; + } + + if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) || + (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) { + if(undecoratedsmoothed_pwdb >= (rssi_thresh+BT_FW_COEX_THRESH_TOL)) { + bt_rssi_state = BT_RSSI_STATE_MEDIUM; + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Medium\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_LOW; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Low\n")); + } + } else if( (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_MEDIUM) || + (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_MEDIUM)) { + if(undecoratedsmoothed_pwdb >= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) { + bt_rssi_state = BT_RSSI_STATE_HIGH; + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_HIGH; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to High\n")); + } else if(undecoratedsmoothed_pwdb < rssi_thresh) { + bt_rssi_state = BT_RSSI_STATE_LOW; + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_LOW; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state switch to Low\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at Medium\n")); + } + } else { + if(undecoratedsmoothed_pwdb < rssi_thresh1) { + bt_rssi_state = BT_RSSI_STATE_MEDIUM; + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_WIFI_RSSI_1_MEDIUM; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_HIGH; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_1_LOW; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE,("[DM][BT], RSSI_1 state switch to Medium\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_HIGH; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[DM][BT], RSSI_1 state stay at High\n")); + } + } + } + + rtlpcipriv->btcoexist.bt_pre_rssi_state1 = bt_rssi_state; + + return bt_rssi_state; +} + +u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + long undecoratedsmoothed_pwdb = 0; + u8 bt_rssi_state = 0; + + undecoratedsmoothed_pwdb = rtl8821ae_dm_bt_get_rx_ss(hw); + + if (level_num == 2) { + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM; + + if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) || + (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)){ + if (undecoratedsmoothed_pwdb + >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) { + bt_rssi_state = BT_RSSI_STATE_HIGH; + rtlpcipriv->btcoexist.current_state + |= BT_COEX_STATE_WIFI_RSSI_HIGH; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_LOW; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state switch to High\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_LOW; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state stay at Low\n")); + } + } else { + if (undecoratedsmoothed_pwdb < rssi_thresh) { + bt_rssi_state = BT_RSSI_STATE_LOW; + rtlpcipriv->btcoexist.current_state + |= BT_COEX_STATE_WIFI_RSSI_LOW; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_HIGH; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state switch to Low\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_HIGH; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state stay at High\n")); + } + } + } + else if (level_num == 3) { + if (rssi_thresh > rssi_thresh1) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI thresh error!!\n")); + return rtlpcipriv->btcoexist.bt_pre_rssi_state; + } + if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_LOW) || + (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_LOW)) { + if(undecoratedsmoothed_pwdb + >= (rssi_thresh + BT_FW_COEX_THRESH_TOL)) { + bt_rssi_state = BT_RSSI_STATE_MEDIUM; + rtlpcipriv->btcoexist.current_state + |= BT_COEX_STATE_WIFI_RSSI_MEDIUM; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_LOW; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_HIGH; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state switch to Medium\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_LOW; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state stay at Low\n")); + } + } else if ((rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_MEDIUM) || + (rtlpcipriv->btcoexist.bt_pre_rssi_state == BT_RSSI_STATE_STAY_MEDIUM)) { + if (undecoratedsmoothed_pwdb + >= (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) { + bt_rssi_state = BT_RSSI_STATE_HIGH; + rtlpcipriv->btcoexist.current_state + |= BT_COEX_STATE_WIFI_RSSI_HIGH; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_LOW; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state switch to High\n")); + } else if(undecoratedsmoothed_pwdb < rssi_thresh) + { + bt_rssi_state = BT_RSSI_STATE_LOW; + rtlpcipriv->btcoexist.current_state + |= BT_COEX_STATE_WIFI_RSSI_LOW; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_HIGH; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state switch to Low\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state stay at Medium\n")); + } + } else { + if(undecoratedsmoothed_pwdb < rssi_thresh1) { + bt_rssi_state = BT_RSSI_STATE_MEDIUM; + rtlpcipriv->btcoexist.current_state + |= BT_COEX_STATE_WIFI_RSSI_MEDIUM; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_HIGH; + rtlpcipriv->btcoexist.current_state + &= ~BT_COEX_STATE_WIFI_RSSI_LOW; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state switch to Medium\n")); + } else { + bt_rssi_state = BT_RSSI_STATE_STAY_HIGH; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], RSSI state stay at High\n")); + } + } + } + + rtlpcipriv->btcoexist.bt_pre_rssi_state = bt_rssi_state; + return bt_rssi_state; +} +long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + long undecoratedsmoothed_pwdb = 0; + + if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + undecoratedsmoothed_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv); + } else { + undecoratedsmoothed_pwdb + = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + } + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("rtl8821ae_dm_bt_get_rx_ss() = %ld\n", undecoratedsmoothed_pwdb)); + + return undecoratedsmoothed_pwdb; +} + +void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw, + bool b_balance_on, u8 ms0, u8 ms1) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter[3] ={0}; + + if (b_balance_on) { + h2c_parameter[2] = 1; + h2c_parameter[1] = ms1; + h2c_parameter[0] = ms0; + rtlpcipriv->btcoexist.b_fw_coexist_all_off = false; + } else { + h2c_parameter[2] = 0; + h2c_parameter[1] = 0; + h2c_parameter[0] = 0; + } + rtlpcipriv->btcoexist.b_balance_on = b_balance_on; + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n", + b_balance_on?"ON":"OFF", ms0, ms1, + h2c_parameter[0]<<16 | h2c_parameter[1]<<8 | h2c_parameter[2])); + + rtl8821ae_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter); +} + + +void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + + if (type == BT_AGCTABLE_OFF) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]AGCTable Off!\n")); + rtl_write_dword(rtlpriv, 0xc78,0x641c0001); + rtl_write_dword(rtlpriv, 0xc78,0x631d0001); + rtl_write_dword(rtlpriv, 0xc78,0x621e0001); + rtl_write_dword(rtlpriv, 0xc78,0x611f0001); + rtl_write_dword(rtlpriv, 0xc78,0x60200001); + + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_AGC_HP, 0xfffff, 0x32000); + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_AGC_HP, 0xfffff, 0x71000); + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_AGC_HP, 0xfffff, 0xb0000); + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_AGC_HP, 0xfffff, 0xfc000); + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_G1, 0xfffff, 0x30355); + } else if (type == BT_AGCTABLE_ON) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]AGCTable On!\n")); + rtl_write_dword(rtlpriv, 0xc78,0x4e1c0001); + rtl_write_dword(rtlpriv, 0xc78,0x4d1d0001); + rtl_write_dword(rtlpriv, 0xc78,0x4c1e0001); + rtl_write_dword(rtlpriv, 0xc78,0x4b1f0001); + rtl_write_dword(rtlpriv, 0xc78,0x4a200001); + + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_AGC_HP, 0xfffff, 0xdc000); + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_AGC_HP, 0xfffff, 0x90000); + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_AGC_HP, 0xfffff, 0x51000); + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_AGC_HP, 0xfffff, 0x12000); + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, + RF_RX_G1, 0xfffff, 0x00355); + + rtlpcipriv->btcoexist.b_sw_coexist_all_off = false; + } +} + +void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + + if (type == BT_BB_BACKOFF_OFF) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]BBBackOffLevel Off!\n")); + rtl_write_dword(rtlpriv, 0xc04,0x3a05611); + } else if (type == BT_BB_BACKOFF_ON) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BT]BBBackOffLevel On!\n")); + rtl_write_dword(rtlpriv, 0xc04,0x3a07611); + rtlpcipriv->btcoexist.b_sw_coexist_all_off = false; + } +} + +void rtl8821ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("rtl8821ae_dm_bt_fw_coex_all_off()\n")); + + if(rtlpcipriv->btcoexist.b_fw_coexist_all_off) + return; + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("rtl8821ae_dm_bt_fw_coex_all_off(), real Do\n")); + rtl8821ae_dm_bt_fw_coex_all_off_8723a(hw); + rtlpcipriv->btcoexist.b_fw_coexist_all_off = true; +} + +void rtl8821ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("rtl8821ae_dm_bt_sw_coex_all_off()\n")); + + if(rtlpcipriv->btcoexist.b_sw_coexist_all_off) + return; + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("rtl8821ae_dm_bt_sw_coex_all_off(), real Do\n")); + rtl8821ae_dm_bt_sw_coex_all_off_8723a(hw); + rtlpcipriv->btcoexist.b_sw_coexist_all_off = true; +} + +void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("rtl8821ae_dm_bt_hw_coex_all_off()\n")); + + if(rtlpcipriv->btcoexist.b_hw_coexist_all_off) + return; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("rtl8821ae_dm_bt_hw_coex_all_off(), real Do\n")); + + rtl8821ae_dm_bt_hw_coex_all_off_8723a(hw); + + rtlpcipriv->btcoexist.b_hw_coexist_all_off = true; +} + +void rtl8821ae_btdm_coex_all_off(struct ieee80211_hw *hw) +{ + rtl8821ae_dm_bt_fw_coex_all_off(hw); + rtl8821ae_dm_bt_sw_coex_all_off(hw); + rtl8821ae_dm_bt_hw_coex_all_off(hw); +} + +bool rtl8821ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + + if((rtlpcipriv->btcoexist.previous_state + == rtlpcipriv->btcoexist.current_state) + && (rtlpcipriv->btcoexist.previous_state_h + == rtlpcipriv->btcoexist.current_state_h)) + return false; + else + return true; +} + +bool rtl8821ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->link_info.b_tx_busy_traffic) + return true; + else + return false; +} + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/hal_bt_coexist.h @@ -0,0 +1,169 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_HAL_BT_COEXIST_H__ +#define __RTL8821AE_HAL_BT_COEXIST_H__ + +#include "../wifi.h" + +/* The reg define is for 8723 */ +#define REG_HIGH_PRIORITY_TXRX 0x770 +#define REG_LOW_PRIORITY_TXRX 0x774 + +#define BT_FW_COEX_THRESH_TOL 6 +#define BT_FW_COEX_THRESH_20 20 +#define BT_FW_COEX_THRESH_23 23 +#define BT_FW_COEX_THRESH_25 25 +#define BT_FW_COEX_THRESH_30 30 +#define BT_FW_COEX_THRESH_35 35 +#define BT_FW_COEX_THRESH_40 40 +#define BT_FW_COEX_THRESH_45 45 +#define BT_FW_COEX_THRESH_47 47 +#define BT_FW_COEX_THRESH_50 50 +#define BT_FW_COEX_THRESH_55 55 + +#define BT_COEX_STATE_BT30 BIT(0) +#define BT_COEX_STATE_WIFI_HT20 BIT(1) +#define BT_COEX_STATE_WIFI_HT40 BIT(2) +#define BT_COEX_STATE_WIFI_LEGACY BIT(3) + +#define BT_COEX_STATE_WIFI_RSSI_LOW BIT(4) +#define BT_COEX_STATE_WIFI_RSSI_MEDIUM BIT(5) +#define BT_COEX_STATE_WIFI_RSSI_HIGH BIT(6) +#define BT_COEX_STATE_DEC_BT_POWER BIT(7) + +#define BT_COEX_STATE_WIFI_IDLE BIT(8) +#define BT_COEX_STATE_WIFI_UPLINK BIT(9) +#define BT_COEX_STATE_WIFI_DOWNLINK BIT(10) + +#define BT_COEX_STATE_BT_INQ_PAGE BIT(11) +#define BT_COEX_STATE_BT_IDLE BIT(12) +#define BT_COEX_STATE_BT_UPLINK BIT(13) +#define BT_COEX_STATE_BT_DOWNLINK BIT(14) + +#define BT_COEX_STATE_HOLD_FOR_BT_OPERATION BIT(15) +#define BT_COEX_STATE_BT_RSSI_LOW BIT(19) + +#define BT_COEX_STATE_PROFILE_HID BIT(20) +#define BT_COEX_STATE_PROFILE_A2DP BIT(21) +#define BT_COEX_STATE_PROFILE_PAN BIT(22) +#define BT_COEX_STATE_PROFILE_SCO BIT(23) + +#define BT_COEX_STATE_WIFI_RSSI_1_LOW BIT(24) +#define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM BIT(25) +#define BT_COEX_STATE_WIFI_RSSI_1_HIGH BIT(26) + +#define BT_COEX_STATE_BTINFO_COMMON BIT(30) +#define BT_COEX_STATE_BTINFO_B_HID_SCOESCO BIT(31) +#define BT_COEX_STATE_BTINFO_B_FTP_A2DP BIT(29) + +#define BT_COEX_STATE_BT_CNT_LEVEL_0 BIT(0) +#define BT_COEX_STATE_BT_CNT_LEVEL_1 BIT(1) +#define BT_COEX_STATE_BT_CNT_LEVEL_2 BIT(2) +#define BT_COEX_STATE_BT_CNT_LEVEL_3 BIT(3) + +#define BT_RSSI_STATE_HIGH 0 +#define BT_RSSI_STATE_MEDIUM 1 +#define BT_RSSI_STATE_LOW 2 +#define BT_RSSI_STATE_STAY_HIGH 3 +#define BT_RSSI_STATE_STAY_MEDIUM 4 +#define BT_RSSI_STATE_STAY_LOW 5 + +#define BT_AGCTABLE_OFF 0 +#define BT_AGCTABLE_ON 1 +#define BT_BB_BACKOFF_OFF 0 +#define BT_BB_BACKOFF_ON 1 +#define BT_FW_NAV_OFF 0 +#define BT_FW_NAV_ON 1 + +#define BT_COEX_MECH_NONE 0 +#define BT_COEX_MECH_SCO 1 +#define BT_COEX_MECH_HID 2 +#define BT_COEX_MECH_A2DP 3 +#define BT_COEX_MECH_PAN 4 +#define BT_COEX_MECH_HID_A2DP 5 +#define BT_COEX_MECH_HID_PAN 6 +#define BT_COEX_MECH_PAN_A2DP 7 +#define BT_COEX_MECH_HID_SCO_ESCO 8 +#define BT_COEX_MECH_FTP_A2DP 9 +#define BT_COEX_MECH_COMMON 10 +#define BT_COEX_MECH_MAX 11 + +#define BT_DBG_PROFILE_NONE 0 +#define BT_DBG_PROFILE_SCO 1 +#define BT_DBG_PROFILE_HID 2 +#define BT_DBG_PROFILE_A2DP 3 +#define BT_DBG_PROFILE_PAN 4 +#define BT_DBG_PROFILE_HID_A2DP 5 +#define BT_DBG_PROFILE_HID_PAN 6 +#define BT_DBG_PROFILE_PAN_A2DP 7 +#define BT_DBG_PROFILE_MAX 9 + +#define BTINFO_B_FTP BIT(7) +#define BTINFO_B_A2DP BIT(6) +#define BTINFO_B_HID BIT(5) +#define BTINFO_B_SCO_BUSY BIT(4) +#define BTINFO_B_ACL_BUSY BIT(3) +#define BTINFO_B_INQ_PAGE BIT(2) +#define BTINFO_B_SCO_ESCO BIT(1) +#define BTINFO_B_CONNECTION BIT(0) + + +void rtl8821ae_btdm_coex_all_off(struct ieee80211_hw *hw); +void rtl8821ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw); + +void rtl8821ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw); +void rtl8821ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw); +long rtl8821ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw); +void rtl8821ae_dm_bt_balance(struct ieee80211_hw *hw, + bool b_balance_on, u8 ms0, u8 ms1); +void rtl8821ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 tyep); +void rtl8821ae_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type); +u8 rtl8821ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1); +u8 rtl8821ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw, + u8 level_num, u8 rssi_thresh, u8 rssi_thresh1); +void _rtl8821ae_dm_bt_check_wifi_state(struct ieee80211_hw *hw); +void rtl8821ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw, + bool b_reject); + +#if 0 +VOID +BTDM_PWDBMonitor( + PADAPTER Adapter + ); + +BOOLEAN +BTDM_DIGByBTRSSI( + PADAPTER Adapter + ); +#endif +bool rtl8821ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw); +bool rtl8821ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw); +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.c @@ -0,0 +1,2069 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +#include "hal_btc.h" +#include "../pci.h" +#include "phy.h" +#include "fw.h" +#include "reg.h" +#include "def.h" +#include "../btcoexist/rtl_btc.h" + +static struct bt_coexist_8821ae hal_coex_8821ae; + +void rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + if(!rtlpcipriv->btcoexist.bt_coexistence) + return; + + if(ppsc->b_inactiveps) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("[BT][DM], Before enter IPS, turn off all Coexist DM\n")); + rtlpcipriv->btcoexist.current_state = 0; + rtlpcipriv->btcoexist.previous_state = 0; + rtlpcipriv->btcoexist.current_state_h = 0; + rtlpcipriv->btcoexist.previous_state_h = 0; + rtl8821ae_btdm_coex_all_off(hw); + } +} + + +enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + enum rt_media_status m_status = RT_MEDIA_DISCONNECT; + + u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0; + + if(bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED) { + m_status = RT_MEDIA_CONNECT; + } + + return m_status; +} + +void rtl_8821ae_bt_wifi_media_status_notify(struct ieee80211_hw *hw, bool mstatus) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 h2c_parameter[3] ={0}; + u8 chnl; + + if(!rtlpcipriv->btcoexist.bt_coexistence) + return; + + if(RT_MEDIA_CONNECT == mstatus) + h2c_parameter[0] = 0x1; // 0: disconnected, 1:connected + else + h2c_parameter[0] = 0x0; + + if(mgnt_link_status_query(hw)) { + chnl = rtlphy->current_channel; + h2c_parameter[1] = chnl; + } + + if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40){ + h2c_parameter[2] = 0x30; + } else { + h2c_parameter[2] = 0x20; + } + + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("[BTCoex], FW write 0x19=0x%x\n", + h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2])); + + rtl8821ae_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter); + +} + + +bool rtl8821ae_dm_bt_is_wifi_busy(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + if(rtlpriv->link_info.b_busytraffic || + rtlpriv->link_info.b_rx_busy_traffic || + rtlpriv->link_info.b_tx_busy_traffic) + return true; + else + return false; +} +void rtl8821ae_dm_bt_set_fw_3a(struct ieee80211_hw *hw, + u8 byte1, u8 byte2, u8 byte3, u8 byte4, u8 byte5) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter[5] ={0}; + h2c_parameter[0] = byte1; + h2c_parameter[1] = byte2; + h2c_parameter[2] = byte3; + h2c_parameter[3] = byte4; + h2c_parameter[4] = byte5; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], FW write 0x3a(4bytes)=0x%x%8x\n", + h2c_parameter[0], h2c_parameter[1]<<24 | h2c_parameter[2]<<16 | h2c_parameter[3]<<8 | h2c_parameter[4])); + rtl8821ae_fill_h2c_cmd(hw, 0x3a, 5, h2c_parameter); +} + +bool rtl8821ae_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Need to decrease bt power\n")); + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_DEC_BT_POWER; + return true; + } + + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_DEC_BT_POWER; + return false; +} + + +bool rtl8821ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + + if ((rtlpcipriv->btcoexist.previous_state + == rtlpcipriv->btcoexist.current_state) + &&(rtlpcipriv->btcoexist.previous_state_h + == rtlpcipriv->btcoexist.current_state_h)) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[DM][BT], Coexist state do not chang!!\n")); + return true; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[DM][BT], Coexist state changed!!\n")); + return false; + } +} + +void rtl8821ae_dm_bt_set_coex_table(struct ieee80211_hw *hw, + u32 val_0x6c0, u32 val_0x6c8, u32 val_0x6cc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6c0=0x%x\n", val_0x6c0)); + rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6c8=0x%x\n", val_0x6c8)); + rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("set coex table, set 0x6cc=0x%x\n", val_0x6cc)); + rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc); +} + +void rtl8821ae_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (BT_PTA_MODE_ON == b_mode) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("PTA mode on, ")); + /* Enable GPIO 0/1/2/3/8 pins for bt */ + rtl_write_byte(rtlpriv, 0x40, 0x20); + rtlpcipriv->btcoexist.b_hw_coexist_all_off = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("PTA mode off\n")); + rtl_write_byte(rtlpriv, 0x40, 0x0); + } +} + +void rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw, u8 type) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (BT_RF_RX_LPF_CORNER_SHRINK == type) { + /* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] by Jenyu */ + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Shrink RF Rx LPF corner!!\n")); + /* PHY_SetRFReg(Adapter, (RF_RADIO_PATH_E)PathA, 0x1e, 0xf0, 0xf); */ + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff, 0xf0ff7); + rtlpcipriv->btcoexist.b_sw_coexist_all_off = false; + } else if(BT_RF_RX_LPF_CORNER_RESUME == type) { + /*Resume RF Rx LPF corner*/ + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Resume RF Rx LPF corner!!\n")); + /* PHY_SetRFReg(Adapter, (RF_RADIO_PATH_E)PathA, 0x1e, 0xf0, + * pHalData->btcoexist.BtRfRegOrigin1E); */ + rtl8821ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff, + rtlpcipriv->btcoexist.bt_rfreg_origin_1e); + } +} + +void rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(struct ieee80211_hw *hw, + u8 ra_type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + u8 tmp_u1; + + tmp_u1 = rtl_read_byte(rtlpriv, 0x4fd); + tmp_u1 |= BIT(0); + if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Tx rate adaptive, set low penalty!!\n")); + tmp_u1 &= ~BIT(2); + rtlpcipriv->btcoexist.b_sw_coexist_all_off = false; + } else if(BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Tx rate adaptive, set normal!!\n")); + tmp_u1 |= BIT(2); + } + + rtl_write_byte(rtlpriv, 0x4fd, tmp_u1); +} + +void rtl8821ae_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw, + struct btdm_8821ae *p_btdm) +{ + p_btdm->b_all_off = false; + p_btdm->b_agc_table_en = false; + p_btdm->b_adc_back_off_on = false; + p_btdm->b2_ant_hid_en = false; + p_btdm->b_low_penalty_rate_adaptive = false; + p_btdm->b_rf_rx_lpf_shrink = false; + p_btdm->b_reject_aggre_pkt= false; + + p_btdm->b_tdma_on = false; + p_btdm->tdma_ant = TDMA_2ANT; + p_btdm->tdma_nav = TDMA_NAV_OFF; + p_btdm->tdma_dac_swing = TDMA_DAC_SWING_OFF; + p_btdm->fw_dac_swing_lvl = 0x20; + + p_btdm->b_tra_tdma_on = false; + p_btdm->tra_tdma_ant = TDMA_2ANT; + p_btdm->tra_tdma_nav = TDMA_NAV_OFF; + p_btdm->b_ignore_wlan_act = false; + + p_btdm->b_ps_tdma_on = false; + p_btdm->ps_tdma_byte[0] = 0x0; + p_btdm->ps_tdma_byte[1] = 0x0; + p_btdm->ps_tdma_byte[2] = 0x0; + p_btdm->ps_tdma_byte[3] = 0x8; + p_btdm->ps_tdma_byte[4] = 0x0; + + p_btdm->b_pta_on = true; + p_btdm->val_0x6c0 = 0x5a5aaaaa; + p_btdm->val_0x6c8 = 0xcc; + p_btdm->val_0x6cc = 0x3; + + p_btdm->b_sw_dac_swing_on = false; + p_btdm->sw_dac_swing_lvl = 0xc0; + p_btdm->wlan_act_hi = 0x20; + p_btdm->wlan_act_lo = 0x10; + p_btdm->bt_retry_index = 2; + + p_btdm->b_dec_bt_pwr = false; +} + +void rtl8821ae_dm_bt_btdm_structure_reload_all_off(struct ieee80211_hw *hw, + struct btdm_8821ae *p_btdm) +{ + rtl8821ae_dm_bt_btdm_structure_reload(hw, p_btdm); + p_btdm->b_all_off = true; + p_btdm->b_pta_on = false; + p_btdm->wlan_act_hi = 0x10; +} + +bool rtl8821ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct btdm_8821ae btdm8821ae; + bool b_common = false; + + rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae); + + if(!rtl8821ae_dm_bt_is_wifi_busy(hw) + && !rtlpcipriv->btcoexist.b_bt_busy) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("Wifi idle + Bt idle, bt coex mechanism always off!!\n")); + rtl8821ae_dm_bt_btdm_structure_reload_all_off(hw, &btdm8821ae); + b_common = true; + } else if (rtl8821ae_dm_bt_is_wifi_busy(hw) + && !rtlpcipriv->btcoexist.b_bt_busy) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("Wifi non-idle + Bt disabled/idle!!\n")); + btdm8821ae.b_low_penalty_rate_adaptive = true; + btdm8821ae.b_rf_rx_lpf_shrink = false; + btdm8821ae.b_reject_aggre_pkt = false; + + /* sw mechanism */ + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + + btdm8821ae.b_pta_on = true; + btdm8821ae.val_0x6c0 = 0x5a5aaaaa; + btdm8821ae.val_0x6c8 = 0xcccc; + btdm8821ae.val_0x6cc = 0x3; + + btdm8821ae.b_tdma_on = false; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF; + btdm8821ae.b2_ant_hid_en = false; + + b_common = true; + }else if (rtlpcipriv->btcoexist.b_bt_busy) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("Bt non-idle!\n")); + if(mgnt_link_status_query(hw) == RT_MEDIA_CONNECT){ + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi connection exist\n")) + b_common = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("No Wifi connection!\n")); + btdm8821ae.b_rf_rx_lpf_shrink = true; + btdm8821ae.b_low_penalty_rate_adaptive = false; + btdm8821ae.b_reject_aggre_pkt = false; + + /* sw mechanism */ + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + + btdm8821ae.b_pta_on = true; + btdm8821ae.val_0x6c0 = 0x55555555; + btdm8821ae.val_0x6c8 = 0x0000ffff; + btdm8821ae.val_0x6cc = 0x3; + + btdm8821ae.b_tdma_on = false; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF; + btdm8821ae.b2_ant_hid_en = false; + + b_common = true; + } + } + + if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) { + btdm8821ae.b_dec_bt_pwr = true; + } + + if(b_common) + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_COMMON; + + if (b_common && rtl8821ae_dm_bt_is_coexist_state_changed(hw)) + rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae); + + return b_common; +} + +void rtl8821ae_dm_bt_set_sw_full_time_dac_swing( + struct ieee80211_hw * hw, bool b_sw_dac_swing_on, u32 sw_dac_swing_lvl) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (b_sw_dac_swing_on) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl)); + rtl8821ae_phy_set_bb_reg(hw, 0x880, 0xff000000, sw_dac_swing_lvl); + rtlpcipriv->btcoexist.b_sw_coexist_all_off = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], SwDacSwing Off!\n")); + rtl8821ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0); + } +} + +void rtl8821ae_dm_bt_set_fw_dec_bt_pwr( + struct ieee80211_hw *hw, bool b_dec_bt_pwr) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter[1] ={0}; + + h2c_parameter[0] = 0; + + if (b_dec_bt_pwr) { + h2c_parameter[0] |= BIT(1); + rtlpcipriv->btcoexist.b_fw_coexist_all_off = false; + } + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n", + (b_dec_bt_pwr? "Yes!!":"No!!"), h2c_parameter[0])); + + rtl8821ae_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter); +} + + +void rtl8821ae_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw, + bool b_enable, bool b_dac_swing_on) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter[1] ={0}; + + if (b_enable) { + h2c_parameter[0] |= BIT(0); + rtlpcipriv->btcoexist.b_fw_coexist_all_off = false; + } + if (b_dac_swing_on) { + h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */ + } + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n", + (b_enable ? "ON!!":"OFF!!"), (b_dac_swing_on ? "ON":"OFF"), + h2c_parameter[0])); + + rtl8821ae_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter); +} + +void rtl8821ae_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw, + bool b_enable, u8 ant_num, u8 nav_en, u8 dac_swing_en) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + u8 h2c_parameter[1] ={0}; + u8 h2c_parameter1[1] = {0}; + + h2c_parameter[0] = 0; + h2c_parameter1[0] = 0; + + if(b_enable) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], set BT PTA update manager to trigger update!!\n")); + h2c_parameter1[0] |= BIT(0); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], turn TDMA mode ON!!\n")); + h2c_parameter[0] |= BIT(0); /* function enable */ + if (TDMA_1ANT == ant_num) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_1ANT\n")); + h2c_parameter[0] |= BIT(1); + } else if(TDMA_2ANT == ant_num) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_2ANT\n")); + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], Unknown Ant\n")); + } + + if (TDMA_NAV_OFF == nav_en) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_NAV_OFF\n")); + } else if (TDMA_NAV_ON == nav_en) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TDMA_NAV_ON\n")); + h2c_parameter[0] |= BIT(2); + } + + if (TDMA_DAC_SWING_OFF == dac_swing_en) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], TDMA_DAC_SWING_OFF\n")); + } else if(TDMA_DAC_SWING_ON == dac_swing_en) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], TDMA_DAC_SWING_ON\n")); + h2c_parameter[0] |= BIT(4); + } + rtlpcipriv->btcoexist.b_fw_coexist_all_off = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], set BT PTA update manager to no update!!\n")); + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], turn TDMA mode OFF!!\n")); + } + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], FW2AntTDMA, write 0x26=0x%x\n", h2c_parameter1[0])); + rtl8821ae_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], FW2AntTDMA, write 0x14=0x%x\n", h2c_parameter[0])); + rtl8821ae_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter); + + if (!b_enable) { + /* delay_ms(2); + * PlatformEFIOWrite1Byte(Adapter, 0x778, 0x1); */ + } +} + + +void rtl8821ae_dm_bt_set_fw_ignore_wlan_act( struct ieee80211_hw *hw, bool b_enable) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + u8 h2c_parameter[1] ={0}; + + if (b_enable) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], BT Ignore Wlan_Act !!\n")); + h2c_parameter[0] |= BIT(0); // function enable + rtlpcipriv->btcoexist.b_fw_coexist_all_off = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], BT don't ignore Wlan_Act !!\n")); + } + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n", + h2c_parameter[0])); + + rtl8821ae_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter); +} + + +void rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw, + bool b_enable, u8 ant_num, u8 nav_en + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u8 h2c_parameter[2] ={0}; + + + if (b_enable) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], turn TTDMA mode ON!!\n")); + h2c_parameter[0] |= BIT(0); // function enable + if (TDMA_1ANT == ant_num) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_1ANT\n")); + h2c_parameter[0] |= BIT(1); + } else if (TDMA_2ANT == ant_num) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_2ANT\n")); + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], Unknown Ant\n")); + } + + if (TDMA_NAV_OFF == nav_en) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_NAV_OFF\n")); + } else if (TDMA_NAV_ON == nav_en) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], TTDMA_NAV_ON\n")); + h2c_parameter[1] |= BIT(0); + } + + rtlpcipriv->btcoexist.b_fw_coexist_all_off = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], turn TTDMA mode OFF!!\n")); + } + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], FW Traditional TDMA, write 0x33=0x%x\n", + h2c_parameter[0] << 8| h2c_parameter[1])); + + rtl8821ae_fill_h2c_cmd(hw, 0x33, 2, h2c_parameter); +} + + +void rtl8821ae_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw, + u8 dac_swing_lvl) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter[1] ={0}; + h2c_parameter[0] = dac_swing_lvl; + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl)); + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], write 0x29=0x%x\n", h2c_parameter[0])); + + rtl8821ae_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter); +} + +void rtl8821ae_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw, bool b_enable) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter[1] ={0}; + h2c_parameter[0] = 0; + + if(b_enable){ + h2c_parameter[0] |= BIT(0); + rtlpcipriv->btcoexist.b_fw_coexist_all_off = false; + } + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], Set BT HID information=0x%x\n", b_enable)); + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], write 0x24=0x%x\n", h2c_parameter[0])); + + rtl8821ae_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter); +} + +void rtl8821ae_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw, + u8 retry_index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter[1] ={0}; + h2c_parameter[0] = retry_index; + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], Set BT Retry Index=%d\n", retry_index)); + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], write 0x23=0x%x\n", h2c_parameter[0])); + + rtl8821ae_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter); +} + +void rtl8821ae_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw, + u8 wlan_act_hi, u8 wlan_act_lo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter_hi[1] ={0}; + u8 h2c_parameter_lo[1] ={0}; + h2c_parameter_hi[0] = wlan_act_hi; + h2c_parameter_lo[0] = wlan_act_lo; + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], Set WLAN_ACT Hi:Lo=0x%x/0x%x\n", wlan_act_hi, wlan_act_lo)); + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], write 0x22=0x%x\n", h2c_parameter_hi[0])); + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], write 0x11=0x%x\n", h2c_parameter_lo[0])); + + /* WLAN_ACT = High duration, unit:ms */ + rtl8821ae_fill_h2c_cmd(hw, 0x22, 1, h2c_parameter_hi); + /* WLAN_ACT = Low duration, unit:3*625us */ + rtl8821ae_fill_h2c_cmd(hw, 0x11, 1, h2c_parameter_lo); +} + +void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_btdm) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct btdm_8821ae *p_btdm_8821ae = &hal_coex_8821ae.btdm; + u8 i; + + bool b_fw_current_inpsmode = false; + bool b_fw_ps_awake = true; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *) (&b_fw_current_inpsmode)); + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, + (u8 *) (&b_fw_ps_awake)); + + // check new setting is different with the old one, + // if all the same, don't do the setting again. + if (memcmp(p_btdm_8821ae, p_btdm, sizeof(struct btdm_8821ae)) == 0) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], the same coexist setting, return!!\n")); + return; + } else { //save the new coexist setting + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], UPDATE TO NEW COEX SETTING!!\n")); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new bAllOff=0x%x/ 0x%x \n", + p_btdm_8821ae->b_all_off, p_btdm->b_all_off)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new b_agc_table_en=0x%x/ 0x%x \n", + p_btdm_8821ae->b_agc_table_en, p_btdm->b_agc_table_en)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new b_adc_back_off_on=0x%x/ 0x%x \n", + p_btdm_8821ae->b_adc_back_off_on, p_btdm->b_adc_back_off_on)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x \n", + p_btdm_8821ae->b2_ant_hid_en, p_btdm->b2_ant_hid_en)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x \n", + p_btdm_8821ae->b_low_penalty_rate_adaptive, + p_btdm->b_low_penalty_rate_adaptive)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x \n", + p_btdm_8821ae->b_rf_rx_lpf_shrink, p_btdm->b_rf_rx_lpf_shrink)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x \n", + p_btdm_8821ae->b_reject_aggre_pkt, p_btdm->b_reject_aggre_pkt)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new b_tdma_on=0x%x/ 0x%x \n", + p_btdm_8821ae->b_tdma_on, p_btdm->b_tdma_on)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new tdmaAnt=0x%x/ 0x%x \n", + p_btdm_8821ae->tdma_ant, p_btdm->tdma_ant)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new tdmaNav=0x%x/ 0x%x \n", + p_btdm_8821ae->tdma_nav, p_btdm->tdma_nav)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x \n", + p_btdm_8821ae->tdma_dac_swing, p_btdm->tdma_dac_swing)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x \n", + p_btdm_8821ae->fw_dac_swing_lvl, p_btdm->fw_dac_swing_lvl)); + + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x \n", + p_btdm_8821ae->b_tra_tdma_on, p_btdm->b_tra_tdma_on)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x \n", + p_btdm_8821ae->tra_tdma_ant, p_btdm->tra_tdma_ant)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new traTdmaNav=0x%x/ 0x%x \n", + p_btdm_8821ae->tra_tdma_nav, p_btdm->tra_tdma_nav)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x \n", + p_btdm_8821ae->b_ps_tdma_on, p_btdm->b_ps_tdma_on)); + for(i=0; i<5; i++) + { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x \n", + p_btdm_8821ae->ps_tdma_byte[i], p_btdm->ps_tdma_byte[i])); + } + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new bIgnoreWlanAct=0x%x/ 0x%x \n", + p_btdm_8821ae->b_ignore_wlan_act, p_btdm->b_ignore_wlan_act)); + + + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new bPtaOn=0x%x/ 0x%x \n", + p_btdm_8821ae->b_pta_on, p_btdm->b_pta_on)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new val_0x6c0=0x%x/ 0x%x \n", + p_btdm_8821ae->val_0x6c0, p_btdm->val_0x6c0)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new val_0x6c8=0x%x/ 0x%x \n", + p_btdm_8821ae->val_0x6c8, p_btdm->val_0x6c8)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new val_0x6cc=0x%x/ 0x%x \n", + p_btdm_8821ae->val_0x6cc, p_btdm->val_0x6cc)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new b_sw_dac_swing_on=0x%x/ 0x%x \n", + p_btdm_8821ae->b_sw_dac_swing_on, p_btdm->b_sw_dac_swing_on)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x \n", + p_btdm_8821ae->sw_dac_swing_lvl, p_btdm->sw_dac_swing_lvl)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new wlanActHi=0x%x/ 0x%x \n", + p_btdm_8821ae->wlan_act_hi, p_btdm->wlan_act_hi)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new wlanActLo=0x%x/ 0x%x \n", + p_btdm_8821ae->wlan_act_lo, p_btdm->wlan_act_lo)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], original/new btRetryIndex=0x%x/ 0x%x \n", + p_btdm_8821ae->bt_retry_index, p_btdm->bt_retry_index)); + + memcpy(p_btdm_8821ae, p_btdm, sizeof(struct btdm_8821ae)); + } + /* + * Here we only consider when Bt Operation + * inquiry/paging/pairing is ON + * we only need to turn off TDMA */ + + if (rtlpcipriv->btcoexist.b_hold_for_bt_operation) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], set to ignore wlanAct for BT OP!!\n")); + rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, true); + return; + } + + if (p_btdm->b_all_off) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex], disable all coexist mechanism !!\n")); + rtl8821ae_btdm_coex_all_off(hw); + return; + } + + rtl8821ae_dm_bt_reject_ap_aggregated_packet(hw, p_btdm->b_reject_aggre_pkt); + + if(p_btdm->b_low_penalty_rate_adaptive) + rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw, + BT_TX_RATE_ADAPTIVE_LOW_PENALTY); + else + rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw, + BT_TX_RATE_ADAPTIVE_NORMAL); + + if(p_btdm->b_rf_rx_lpf_shrink) + rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_SHRINK); + else + rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME); + + if(p_btdm->b_agc_table_en) + rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_ON); + else + rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF); + + if(p_btdm->b_adc_back_off_on) + rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_ON); + else + rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF); + + rtl8821ae_dm_bt_set_fw_bt_retry_index(hw, p_btdm->bt_retry_index); + + rtl8821ae_dm_bt_set_fw_dac_swing_level(hw, p_btdm->fw_dac_swing_lvl); + rtl8821ae_dm_bt_set_fw_wlan_act(hw, p_btdm->wlan_act_hi, p_btdm->wlan_act_lo); + + rtl8821ae_dm_bt_set_coex_table(hw, p_btdm->val_0x6c0, + p_btdm->val_0x6c8, p_btdm->val_0x6cc); + rtl8821ae_dm_bt_set_hw_pta_mode(hw, p_btdm->b_pta_on); + + /* + * Note: There is a constraint between TDMA and 2AntHID + * Only one of 2AntHid and tdma can be turn on + * We should turn off those mechanisms should be turned off first + * and then turn on those mechanisms should be turned on. + */ +#if 1 + if(p_btdm->b2_ant_hid_en) { + // turn off tdma + rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, + p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav); + rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, + p_btdm->tdma_nav, p_btdm->tdma_dac_swing); + + // turn off Pstdma + rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act); + rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300. + + // turn on 2AntHid + rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, true); + rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, true, true); + } else if(p_btdm->b_tdma_on) { + // turn off 2AntHid + rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + + // turn off pstdma + rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act); + rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300. + + // turn on tdma + rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav); + rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, true, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing); + } else if(p_btdm->b_ps_tdma_on) { + // turn off 2AntHid + rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + + // turn off tdma + rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav); + rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing); + + // turn on pstdma + rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act); + rtl8821ae_dm_bt_set_fw_3a(hw, + p_btdm->ps_tdma_byte[0], + p_btdm->ps_tdma_byte[1], + p_btdm->ps_tdma_byte[2], + p_btdm->ps_tdma_byte[3], + p_btdm->ps_tdma_byte[4]); + } else { + // turn off 2AntHid + rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + + // turn off tdma + rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav); + rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing); + + // turn off pstdma + rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, p_btdm->b_ignore_wlan_act); + rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); // Antenna control by PTA, 0x870 = 0x300. + } +#else + if (p_btdm->b_tdma_on) { + if(p_btdm->b_ps_tdma_on) { + } else { + rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); + } + /* Turn off 2AntHID first then turn tdma ON */ + rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav); + rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, true, + p_btdm->tdma_ant, p_btdm->tdma_nav, p_btdm->tdma_dac_swing); + } else { + /* Turn off tdma first then turn 2AntHID ON if need */ + rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, p_btdm->b_tra_tdma_on, p_btdm->tra_tdma_ant, p_btdm->tra_tdma_nav); + rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, p_btdm->tdma_ant, + p_btdm->tdma_nav, p_btdm->tdma_dac_swing); + if (p_btdm->b2_ant_hid_en) { + rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, true); + rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, true, true); + } else { + rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + } + if(p_btdm->b_ps_tdma_on) { + rtl8821ae_dm_bt_set_fw_3a(hw, p_btdm->ps_tdma_byte[0], p_btdm->ps_tdma_byte[1], + p_btdm->ps_tdma_byte[2], p_btdm->ps_tdma_byte[3], p_btdm->ps_tdma_byte[4]); + } else { + rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); + } + } +#endif + + /* + * Note: + * We should add delay for making sure sw DacSwing can be set sucessfully. + * because of that rtl8821ae_dm_bt_set_fw_2_ant_hid() and rtl8821ae_dm_bt_set_fw_tdma_ctrl() + * will overwrite the reg 0x880. + */ + mdelay(30); + rtl8821ae_dm_bt_set_sw_full_time_dac_swing(hw, + p_btdm->b_sw_dac_swing_on, p_btdm->sw_dac_swing_lvl); + rtl8821ae_dm_bt_set_fw_dec_bt_pwr(hw, p_btdm->b_dec_bt_pwr); +} + +void rtl8821ae_dm_bt_bt_state_update_2_ant_hid(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], HID busy!!\n")); + rtlpcipriv->btcoexist.b_bt_busy = true; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE; +} + +void rtl8821ae_dm_bt_bt_state_update_2_ant_pan(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + bool b_idle = false; + + if (hal_coex_8821ae.low_priority_tx >= + hal_coex_8821ae.low_priority_rx) { + if((hal_coex_8821ae.low_priority_tx/ + hal_coex_8821ae.low_priority_rx) > 10) { + b_idle = true; + } + } else { + if((hal_coex_8821ae.low_priority_rx/ + hal_coex_8821ae.low_priority_tx) > 10) { + b_idle = true; + } + } + + if(!b_idle) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], PAN busy!!\n")); + rtlpcipriv->btcoexist.b_bt_busy = true; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], PAN idle!!\n")); + } +} + +void rtl8821ae_dm_bt_2_ant_sco_action(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct btdm_8821ae btdm8821ae; + u8 bt_rssi_state; + + rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae); + btdm8821ae.b_rf_rx_lpf_shrink = true; + btdm8821ae.b_low_penalty_rate_adaptive = true; + btdm8821ae.b_reject_aggre_pkt = false; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n")); + /* coex table */ + btdm8821ae.val_0x6c0 = 0x5a5aaaaa; + btdm8821ae.val_0x6c8 = 0xcc; + btdm8821ae.val_0x6cc = 0x3; + /* sw mechanism */ + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + /* fw mechanism */ + btdm8821ae.b_tdma_on = false; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n")); + bt_rssi_state + = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, BT_FW_COEX_THRESH_47, 0); + + /* coex table */ + btdm8821ae.val_0x6c0 = 0x5a5aaaaa; + btdm8821ae.val_0x6c8 = 0xcc; + btdm8821ae.val_0x6cc = 0x3; + /* sw mechanism */ + if ((bt_rssi_state == BT_RSSI_STATE_HIGH) || + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) { + btdm8821ae.b_agc_table_en = true; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + } else { + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + } + /* fw mechanism */ + btdm8821ae.b_tdma_on = false; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF; + } + + if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) { + btdm8821ae.b_dec_bt_pwr = true; + } + + if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)) + rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae); +} + +void rtl8821ae_dm_bt_2_ant_hid_action(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct btdm_8821ae btdm8821ae; + u8 bt_rssi_state; + + rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae); + + btdm8821ae.b_rf_rx_lpf_shrink = true; + btdm8821ae.b_low_penalty_rate_adaptive = true; + btdm8821ae.b_reject_aggre_pkt = false; + + // coex table + btdm8821ae.val_0x6c0 = 0x55555555; + btdm8821ae.val_0x6c8 = 0xffff; + btdm8821ae.val_0x6cc = 0x3; + btdm8821ae.b_ignore_wlan_act = true; + + if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n")); + // sw mechanism + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + + // fw mechanism + btdm8821ae.b_ps_tdma_on = true; + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xf; + btdm8821ae.ps_tdma_byte[2] = 0xf; + btdm8821ae.ps_tdma_byte[3] = 0x0; + btdm8821ae.ps_tdma_byte[4] = 0x80; + + btdm8821ae.b_tra_tdma_on = false; + btdm8821ae.b_tdma_on = false; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF; + btdm8821ae.b2_ant_hid_en = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n")); + bt_rssi_state = + rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0); + + if( (bt_rssi_state == BT_RSSI_STATE_HIGH) || + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n")); + // sw mechanism + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = true; + btdm8821ae.sw_dac_swing_lvl = 0x20; + + // fw mechanism + btdm8821ae.b_ps_tdma_on = false; + btdm8821ae.b_tdma_on = false; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF; + btdm8821ae.b2_ant_hid_en = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n")); + // sw mechanism + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + + // fw mechanism + btdm8821ae.b_ps_tdma_on = false; + btdm8821ae.b_tdma_on = false; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_OFF; + btdm8821ae.b2_ant_hid_en = true; + btdm8821ae.fw_dac_swing_lvl = 0x20; + } + } + + if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) { + btdm8821ae.b_dec_bt_pwr = true; + } + + if (rtl8821ae_dm_bt_is_coexist_state_changed(hw)) { + rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae); + } +} + + +void rtl8821ae_dm_bt_2_ant_2_dp_action_no_profile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct btdm_8821ae btdm8821ae; + u8 bt_rssi_state; + + rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae); + + btdm8821ae.b_rf_rx_lpf_shrink = true; + btdm8821ae.b_low_penalty_rate_adaptive = true; + btdm8821ae.b_reject_aggre_pkt = false; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("HT40\n")); + if (rtl8821ae_dm_bt_is_wifi_up_link(hw)) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Uplink\n")); + /* coex table */ + btdm8821ae.val_0x6c0 = 0x5a5a5a5a; + btdm8821ae.val_0x6c8 = 0xcccc; + btdm8821ae.val_0x6cc = 0x3; + // sw mechanism + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + // fw mechanism + btdm8821ae.b_tra_tdma_on = true; + btdm8821ae.b_tdma_on = true; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON; + btdm8821ae.b2_ant_hid_en = false; + //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP); + //if(btSpec >= BT_SPEC_2_1_EDR) + { + btdm8821ae.wlan_act_hi = 0x10; + btdm8821ae.wlan_act_lo = 0x10; + } + //else + //{ + //btdm8821ae.wlanActHi = 0x20; + //btdm8821ae.wlanActLo = 0x20; + //} + btdm8821ae.bt_retry_index = 2; + btdm8821ae.fw_dac_swing_lvl = 0x18; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Downlink\n")); + // coex table + btdm8821ae.val_0x6c0 = 0x5a5a5a5a; + btdm8821ae.val_0x6c8 = 0xcc; + btdm8821ae.val_0x6cc = 0x3; + // sw mechanism + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + // fw mechanism + btdm8821ae.b_tra_tdma_on = true; + btdm8821ae.b_tdma_on = true; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON; + btdm8821ae.b2_ant_hid_en = false; + //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP); + //if(btSpec >= BT_SPEC_2_1_EDR) + { + btdm8821ae.wlan_act_hi = 0x10; + btdm8821ae.wlan_act_lo = 0x10; + } + //else + //{ + // btdm8821ae.wlanActHi = 0x20; + // btdm8821ae.wlanActLo = 0x20; + //} + btdm8821ae.bt_retry_index = 2; + btdm8821ae.fw_dac_swing_lvl = 0x40; + } + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("HT20 or Legacy\n")); + bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, BT_FW_COEX_THRESH_47, 0); + + if(rtl8821ae_dm_bt_is_wifi_up_link(hw)) + { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Uplink\n")); + // coex table + btdm8821ae.val_0x6c0 = 0x5a5a5a5a; + btdm8821ae.val_0x6c8 = 0xcccc; + btdm8821ae.val_0x6cc = 0x3; + // sw mechanism + if( (bt_rssi_state == BT_RSSI_STATE_HIGH) || + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) + { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi high \n")); + btdm8821ae.b_agc_table_en = true; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi low \n")); + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + } + // fw mechanism + btdm8821ae.b_tra_tdma_on = true; + btdm8821ae.b_tdma_on = true; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON; + btdm8821ae.b2_ant_hid_en = false; + //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP); + //if(btSpec >= BT_SPEC_2_1_EDR) + { + btdm8821ae.wlan_act_hi = 0x10; + btdm8821ae.wlan_act_lo = 0x10; + } + //else + //{ + //btdm8821ae.wlanActHi = 0x20; + //btdm8821ae.wlanActLo = 0x20; + //} + btdm8821ae.bt_retry_index = 2; + btdm8821ae.fw_dac_swing_lvl = 0x18; + } + else + { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi Downlink\n")); + // coex table + btdm8821ae.val_0x6c0 = 0x5a5a5a5a; + btdm8821ae.val_0x6c8 = 0xcc; + btdm8821ae.val_0x6cc = 0x3; + // sw mechanism + if( (bt_rssi_state == BT_RSSI_STATE_HIGH) || + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) + { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi high \n")); + btdm8821ae.b_agc_table_en = true; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + } + else + { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("Wifi rssi low \n")); + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + } + // fw mechanism + btdm8821ae.b_tra_tdma_on = true; + btdm8821ae.b_tdma_on = true; + btdm8821ae.tdma_dac_swing = TDMA_DAC_SWING_ON; + btdm8821ae.b2_ant_hid_en = false; + //btSpec = BTHCI_GetBTCoreSpecByProf(Adapter, BT_PROFILE_A2DP); + //if(btSpec >= BT_SPEC_2_1_EDR) + { + btdm8821ae.wlan_act_hi = 0x10; + btdm8821ae.wlan_act_lo = 0x10; + } + //else + //{ + //btdm8821ae.wlanActHi = 0x20; + //btdm8821ae.wlanActLo = 0x20; + //} + btdm8821ae.bt_retry_index = 2; + btdm8821ae.fw_dac_swing_lvl = 0x40; + } + } + + if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) { + btdm8821ae.b_dec_bt_pwr = true; + } + + if (rtl8821ae_dm_bt_is_coexist_state_changed(hw)) { + rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae); + } +} + + +//============================================================ +// extern function start with BTDM_ +//============================================================ +u32 rtl8821ae_dm_bt_tx_rx_couter_h(struct ieee80211_hw *hw) +{ + u32 counters=0; + + counters = hal_coex_8821ae.high_priority_tx + hal_coex_8821ae.high_priority_rx ; + return counters; +} + +u32 rtl8821ae_dm_bt_tx_rx_couter_l(struct ieee80211_hw *hw) +{ + u32 counters=0; + + counters = hal_coex_8821ae.low_priority_tx + hal_coex_8821ae.low_priority_rx ; + return counters; +} + +u8 rtl8821ae_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + u32 bt_tx_rx_cnt = 0; + u8 bt_tx_rx_cnt_lvl = 0; + + bt_tx_rx_cnt = rtl8821ae_dm_bt_tx_rx_couter_h(hw) + + rtl8821ae_dm_bt_tx_rx_couter_l(hw); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt)); + + rtlpcipriv->btcoexist.current_state_h &= ~\ + (BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1| + BT_COEX_STATE_BT_CNT_LEVEL_2); + + if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT TxRx Counters at level 3\n")); + bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3; + rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_3; + } else if(bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT TxRx Counters at level 2\n")); + bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2; + rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_2; + } else if(bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT TxRx Counters at level 1\n")); + bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1; + rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_1; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT TxRx Counters at level 0\n")); + bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0; + rtlpcipriv->btcoexist.current_state_h |= BT_COEX_STATE_BT_CNT_LEVEL_0; + } + return bt_tx_rx_cnt_lvl; +} + + +void rtl8821ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct btdm_8821ae btdm8821ae; + + u8 bt_rssi_state, bt_rssi_state1; + u8 bt_tx_rx_cnt_lvl = 0; + + rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae); + + + btdm8821ae.b_rf_rx_lpf_shrink = true; + btdm8821ae.b_low_penalty_rate_adaptive = true; + btdm8821ae.b_reject_aggre_pkt = false; + + bt_tx_rx_cnt_lvl = rtl8821ae_dm_bt_bt_tx_rx_counter_level(hw); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl)); + + if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) + { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n")); + // coex table + btdm8821ae.val_0x6c0 = 0x55555555; + btdm8821ae.val_0x6c8 = 0xffff; + btdm8821ae.val_0x6cc = 0x3; + + // sw mechanism + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + + // fw mechanism + btdm8821ae.b_ps_tdma_on = true; + if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x2; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xa; + btdm8821ae.ps_tdma_byte[2] = 0xa; + btdm8821ae.ps_tdma_byte[3] = 0x2; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xf; + btdm8821ae.ps_tdma_byte[2] = 0xf; + btdm8821ae.ps_tdma_byte[3] = 0x2; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n")); + bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0); + bt_rssi_state1 = rtl8821ae_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0); + + // coex table + btdm8821ae.val_0x6c0 = 0x55555555; + btdm8821ae.val_0x6c8 = 0xffff; + btdm8821ae.val_0x6cc = 0x3; + + // sw mechanism + if( (bt_rssi_state == BT_RSSI_STATE_HIGH) || + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n")); + btdm8821ae.b_agc_table_en = true; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n")); + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + } + + // fw mechanism + btdm8821ae.b_ps_tdma_on = true; + if( (bt_rssi_state1 == BT_RSSI_STATE_HIGH) || + (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH) ) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG,("Wifi rssi-1 high \n")); + // only rssi high we need to do this, + // when rssi low, the value will modified by fw + rtl_write_byte(rtlpriv, 0x883, 0x40); + if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x83; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xa; + btdm8821ae.ps_tdma_byte[2] = 0xa; + btdm8821ae.ps_tdma_byte[3] = 0x83; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xf; + btdm8821ae.ps_tdma_byte[2] = 0xf; + btdm8821ae.ps_tdma_byte[3] = 0x83; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 low \n")); + if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) + { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x2; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xa; + btdm8821ae.ps_tdma_byte[2] = 0xa; + btdm8821ae.ps_tdma_byte[3] = 0x2; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xf; + btdm8821ae.ps_tdma_byte[2] = 0xf; + btdm8821ae.ps_tdma_byte[3] = 0x2; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + } + } + + if (rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) { + btdm8821ae.b_dec_bt_pwr = true; + } + + // Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO + + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n", + hal_coex_8821ae.bt_inq_page_start_time, bt_tx_rx_cnt_lvl)); + if( (hal_coex_8821ae.bt_inq_page_start_time) || + (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl) ) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], Set BT inquiry / page scan 0x3a setting\n")); + btdm8821ae.b_ps_tdma_on = true; + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x2; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + + if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)) { + rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae); + } +} + +void rtl8821ae_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct btdm_8821ae btdm8821ae; + + u8 bt_rssi_state, bt_rssi_state1; + u32 bt_tx_rx_cnt_lvl = 0; + + rtl8821ae_dm_bt_btdm_structure_reload(hw, &btdm8821ae); + + btdm8821ae.b_rf_rx_lpf_shrink = true; + btdm8821ae.b_low_penalty_rate_adaptive = true; + btdm8821ae.b_reject_aggre_pkt = false; + + bt_tx_rx_cnt_lvl = rtl8821ae_dm_bt_bt_tx_rx_counter_level(hw); + + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl)); + + if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) + { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT40\n")); + bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 37, 0); + + // coex table + btdm8821ae.val_0x6c0 = 0x55555555; + btdm8821ae.val_0x6c8 = 0xffff; + btdm8821ae.val_0x6cc = 0x3; + + // sw mechanism + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + + // fw mechanism + btdm8821ae.b_ps_tdma_on = true; + if ((bt_rssi_state == BT_RSSI_STATE_HIGH) || + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n")); + if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x81; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xa; + btdm8821ae.ps_tdma_byte[2] = 0xa; + btdm8821ae.ps_tdma_byte[3] = 0x81; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xf; + btdm8821ae.ps_tdma_byte[2] = 0xf; + btdm8821ae.ps_tdma_byte[3] = 0x81; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n")); + if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x0; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xa; + btdm8821ae.ps_tdma_byte[2] = 0xa; + btdm8821ae.ps_tdma_byte[3] = 0x0; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xf; + btdm8821ae.ps_tdma_byte[2] = 0xf; + btdm8821ae.ps_tdma_byte[3] = 0x0; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + } + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("HT20 or Legacy\n")); + bt_rssi_state = rtl8821ae_dm_bt_check_coex_rssi_state(hw, 2, 47, 0); + bt_rssi_state1 = rtl8821ae_dm_bt_check_coex_rssi_state1(hw, 2, 27, 0); + + // coex table + btdm8821ae.val_0x6c0 = 0x55555555; + btdm8821ae.val_0x6c8 = 0xffff; + btdm8821ae.val_0x6cc = 0x3; + + // sw mechanism + if( (bt_rssi_state == BT_RSSI_STATE_HIGH) || + (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH) ) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi high \n")); + btdm8821ae.b_agc_table_en = true; + btdm8821ae.b_adc_back_off_on = true; + btdm8821ae.b_sw_dac_swing_on = false; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi low \n")); + btdm8821ae.b_agc_table_en = false; + btdm8821ae.b_adc_back_off_on = false; + btdm8821ae.b_sw_dac_swing_on = false; + } + + // fw mechanism + btdm8821ae.b_ps_tdma_on = true; + if( (bt_rssi_state1 == BT_RSSI_STATE_HIGH) || + (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH) ) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 high \n")); + // only rssi high we need to do this, + // when rssi low, the value will modified by fw + rtl_write_byte(rtlpriv, 0x883, 0x40); + if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x81; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xa; + btdm8821ae.ps_tdma_byte[2] = 0xa; + btdm8821ae.ps_tdma_byte[3] = 0x81; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xf; + btdm8821ae.ps_tdma_byte[2] = 0xf; + btdm8821ae.ps_tdma_byte[3] = 0x81; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Wifi rssi-1 low \n")); + if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x0; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else if(bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters >= 1200 && < 1400\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xa; + btdm8821ae.ps_tdma_byte[2] = 0xa; + btdm8821ae.ps_tdma_byte[3] = 0x0; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT TxRx Counters < 1200\n")); + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0xf; + btdm8821ae.ps_tdma_byte[2] = 0xf; + btdm8821ae.ps_tdma_byte[3] = 0x0; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + } + } + + if(rtl8821ae_dm_bt_need_to_dec_bt_pwr(hw)) { + btdm8821ae.b_dec_bt_pwr = true; + } + + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n", + hal_coex_8821ae.bt_inq_page_start_time, bt_tx_rx_cnt_lvl)); + + if( (hal_coex_8821ae.bt_inq_page_start_time) || + (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl) ) + { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], Set BT inquiry / page scan 0x3a setting\n")); + btdm8821ae.b_ps_tdma_on = true; + btdm8821ae.ps_tdma_byte[0] = 0xa3; + btdm8821ae.ps_tdma_byte[1] = 0x5; + btdm8821ae.ps_tdma_byte[2] = 0x5; + btdm8821ae.ps_tdma_byte[3] = 0x83; + btdm8821ae.ps_tdma_byte[4] = 0x80; + } + + if(rtl8821ae_dm_bt_is_coexist_state_changed(hw)){ + rtl8821ae_dm_bt_set_bt_dm(hw, &btdm8821ae); + } +} + +void rtl8821ae_dm_bt_inq_page_monitor(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 cur_time; + cur_time = jiffies; + if (hal_coex_8821ae.b_c2h_bt_inquiry_page) { + //pHalData->btcoexist.halCoex8821ae.btInquiryPageCnt++; + // bt inquiry or page is started. + if(hal_coex_8821ae.bt_inq_page_start_time == 0){ + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_INQ_PAGE; + hal_coex_8821ae.bt_inq_page_start_time = cur_time; + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT Inquiry/page is started at time : 0x%x \n", + hal_coex_8821ae.bt_inq_page_start_time)); + } + } + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x \n", + hal_coex_8821ae.bt_inq_page_start_time, cur_time)); + + if (hal_coex_8821ae.bt_inq_page_start_time) { + if ((((long)cur_time - (long)hal_coex_8821ae.bt_inq_page_start_time) / HZ) >= 10) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BT Inquiry/page >= 10sec!!!")); + hal_coex_8821ae.bt_inq_page_start_time = 0; + rtlpcipriv->btcoexist.current_state &=~ BT_COEX_STATE_BT_INQ_PAGE; + } + } + +#if 0 + if (hal_coex_8821ae.b_c2h_bt_inquiry_page) { + hal_coex_8821ae.b_c2h_bt_inquiry_page++; + // bt inquiry or page is started. + } if(hal_coex_8821ae.b_c2h_bt_inquiry_page) { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_INQ_PAGE; + if(hal_coex_8821ae.bt_inquiry_page_cnt >= 4) + hal_coex_8821ae.bt_inquiry_page_cnt = 0; + hal_coex_8821ae.bt_inquiry_page_cnt++; + } else { + rtlpcipriv->btcoexist.current_state &=~ BT_COEX_STATE_BT_INQ_PAGE; + } +#endif +} + +void rtl8821ae_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + + rtlpcipriv->btcoexist.current_state &= ~\ + (BT_COEX_STATE_PROFILE_HID | BT_COEX_STATE_PROFILE_A2DP| + BT_COEX_STATE_PROFILE_PAN | BT_COEX_STATE_PROFILE_SCO); + + rtlpcipriv->btcoexist.current_state &= ~\ + (BT_COEX_STATE_BTINFO_COMMON | BT_COEX_STATE_BTINFO_B_HID_SCOESCO| + BT_COEX_STATE_BTINFO_B_FTP_A2DP); +} + +void _rtl8821ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + u8 bt_retry_cnt; + u8 bt_info_original; + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex] Get bt info by fw!!\n")); + + _rtl8821ae_dm_bt_check_wifi_state(hw); + + if (hal_coex_8821ae.b_c2h_bt_info_req_sent) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("[BTCoex] c2h for bt_info not rcvd yet!!\n")); + } + + bt_retry_cnt = hal_coex_8821ae.bt_retry_cnt; + bt_info_original = hal_coex_8821ae.c2h_bt_info_original; + + // when bt inquiry or page scan, we have to set h2c 0x25 + // ignore wlanact for continuous 4x2secs + rtl8821ae_dm_bt_inq_page_monitor(hw); + rtl8821ae_dm_bt_reset_action_profile_state(hw); + + if(rtl8821ae_dm_bt_is_2_ant_common_action(hw)) { + rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_COMMON; + rtlpcipriv->btcoexist.bt_profile_action= BT_COEX_MECH_COMMON; + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("Action 2-Ant common.\n")); + } else { + if( (bt_info_original & BTINFO_B_HID) || + (bt_info_original & BTINFO_B_SCO_BUSY) || + (bt_info_original & BTINFO_B_SCO_ESCO) ) { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO; + rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_HID_SCO_ESCO; + rtlpcipriv->btcoexist.bt_profile_action = BT_COEX_MECH_HID_SCO_ESCO; + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n")); + rtl8821ae_dm_bt_2_ant_hid_sco_esco(hw); + } else if( (bt_info_original & BTINFO_B_FTP) || + (bt_info_original & BTINFO_B_A2DP) ) { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_FTP_A2DP; + rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_FTP_A2DP; + rtlpcipriv->btcoexist.bt_profile_action = BT_COEX_MECH_FTP_A2DP; + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("BTInfo: bFTP|bA2DP\n")); + rtl8821ae_dm_bt_2_ant_ftp_a2dp(hw); + } else { + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BTINFO_B_HID_SCOESCO; + rtlpcipriv->btcoexist.bt_profile_case = BT_COEX_MECH_NONE; + rtlpcipriv->btcoexist.bt_profile_action= BT_COEX_MECH_NONE; + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], BTInfo: undefined case!!!!\n")); + rtl8821ae_dm_bt_2_ant_hid_sco_esco(hw); + } + } +} + +void _rtl8821ae_dm_bt_coexist_1_ant(struct ieee80211_hw *hw) +{ + return; +} + +void rtl8821ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw) +{ + rtl8821ae_dm_bt_set_coex_table(hw, 0x5a5aaaaa, 0xcc, 0x3); + rtl8821ae_dm_bt_set_hw_pta_mode(hw, true); +} + +void rtl8821ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw) +{ + rtl8821ae_dm_bt_set_fw_ignore_wlan_act(hw, false); + rtl8821ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0); + rtl8821ae_dm_bt_set_fw_2_ant_hid(hw, false, false); + rtl8821ae_dm_bt_set_fw_tra_tdma_ctrl(hw, false, TDMA_2ANT, TDMA_NAV_OFF); + rtl8821ae_dm_bt_set_fw_tdma_ctrl(hw, false, TDMA_2ANT, + TDMA_NAV_OFF, TDMA_DAC_SWING_OFF); + rtl8821ae_dm_bt_set_fw_dac_swing_level(hw, 0); + rtl8821ae_dm_bt_set_fw_bt_hid_info(hw, false); + rtl8821ae_dm_bt_set_fw_bt_retry_index(hw, 2); + rtl8821ae_dm_bt_set_fw_wlan_act(hw, 0x10, 0x10); + rtl8821ae_dm_bt_set_fw_dec_bt_pwr(hw, false); +} + +void rtl8821ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw) +{ + rtl8821ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF); + rtl8821ae_dm_bt_bb_back_off_level(hw, BT_BB_BACKOFF_OFF); + rtl8821ae_dm_bt_reject_ap_aggregated_packet(hw, false); + + rtl8821ae_dm_bt_set_sw_penalty_tx_rate_adaptive(hw, + BT_TX_RATE_ADAPTIVE_NORMAL); + rtl8821ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME); + rtl8821ae_dm_bt_set_sw_full_time_dac_swing(hw, false, 0xc0); +} + +void rtl8821ae_dm_bt_query_bt_information(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 h2c_parameter[1] = {0}; + + hal_coex_8821ae.b_c2h_bt_info_req_sent = true; + + h2c_parameter[0] |= BIT(0); + + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("Query Bt information, write 0x38=0x%x\n", h2c_parameter[0])); + + rtl8821ae_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter); +} + +void rtl8821ae_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + u32 reg_hp_tx_rx, reg_lp_tx_rx, u32_tmp; + u32 reg_hp_tx=0, reg_hp_rx=0, reg_lp_tx=0, reg_lp_rx=0; + + reg_hp_tx_rx = REG_HIGH_PRIORITY_TXRX; + reg_lp_tx_rx = REG_LOW_PRIORITY_TXRX; + + u32_tmp = rtl_read_dword(rtlpriv, reg_hp_tx_rx); + reg_hp_tx = u32_tmp & MASKLWORD; + reg_hp_rx = (u32_tmp & MASKHWORD)>>16; + + u32_tmp = rtl_read_dword(rtlpriv, reg_lp_tx_rx); + reg_lp_tx = u32_tmp & MASKLWORD; + reg_lp_rx = (u32_tmp & MASKHWORD)>>16; + + if(rtlpcipriv->btcoexist.lps_counter > 1) { + reg_hp_tx %= rtlpcipriv->btcoexist.lps_counter; + reg_hp_rx %= rtlpcipriv->btcoexist.lps_counter; + reg_lp_tx %= rtlpcipriv->btcoexist.lps_counter; + reg_lp_rx %= rtlpcipriv->btcoexist.lps_counter; + } + + hal_coex_8821ae.high_priority_tx = reg_hp_tx; + hal_coex_8821ae.high_priority_rx = reg_hp_rx; + hal_coex_8821ae.low_priority_tx = reg_lp_tx; + hal_coex_8821ae.low_priority_rx = reg_lp_rx; + + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", + reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx)); + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n", + reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx)); + rtlpcipriv->btcoexist.lps_counter = 0; + //rtl_write_byte(rtlpriv, 0x76e, 0xc); +} + +void rtl8821ae_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + bool bt_alife = true; + + if (hal_coex_8821ae.high_priority_tx == 0 && + hal_coex_8821ae.high_priority_rx == 0 && + hal_coex_8821ae.low_priority_tx == 0 && + hal_coex_8821ae.low_priority_rx == 0) { + bt_alife = false; + } + if (hal_coex_8821ae.high_priority_tx == 0xeaea && + hal_coex_8821ae.high_priority_rx == 0xeaea && + hal_coex_8821ae.low_priority_tx == 0xeaea && + hal_coex_8821ae.low_priority_rx == 0xeaea) { + bt_alife = false; + } + if (hal_coex_8821ae.high_priority_tx == 0xffff && + hal_coex_8821ae.high_priority_rx == 0xffff && + hal_coex_8821ae.low_priority_tx == 0xffff && + hal_coex_8821ae.low_priority_rx == 0xffff) { + bt_alife = false; + } + if (bt_alife) { + rtlpcipriv->btcoexist.bt_active_zero_cnt = 0; + rtlpcipriv->btcoexist.b_cur_bt_disabled = false; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is enabled !!\n")); + } else { + rtlpcipriv->btcoexist.bt_active_zero_cnt++; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, + ("8821AE bt all counters=0, %d times!!\n", + rtlpcipriv->btcoexist.bt_active_zero_cnt)); + if (rtlpcipriv->btcoexist.bt_active_zero_cnt >= 2) { + rtlpcipriv->btcoexist.b_cur_bt_disabled = true; + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is disabled !!\n")); + } + } + if (rtlpcipriv->btcoexist.b_pre_bt_disabled != + rtlpcipriv->btcoexist.b_cur_bt_disabled) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("8821AE BT is from %s to %s!!\n", + (rtlpcipriv->btcoexist.b_pre_bt_disabled ? "disabled":"enabled"), + (rtlpcipriv->btcoexist.b_cur_bt_disabled ? "disabled":"enabled"))); + rtlpcipriv->btcoexist.b_pre_bt_disabled + = rtlpcipriv->btcoexist.b_cur_bt_disabled; + } +} + + +void rtl8821ae_dm_bt_coexist(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + + rtl8821ae_dm_bt_query_bt_information(hw); + rtl8821ae_dm_bt_bt_hw_counters_monitor(hw); + rtl8821ae_dm_bt_bt_enable_disable_check(hw); + + if (rtlpcipriv->btcoexist.bt_ant_num == ANT_X2) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTCoex], 2 Ant mechanism\n")); + _rtl8821ae_dm_bt_coexist_2_ant(hw); + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("[BTCoex], 1 Ant mechanism\n")); + _rtl8821ae_dm_bt_coexist_1_ant(hw); + } + + if (!rtl8821ae_dm_bt_is_same_coexist_state(hw)) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n", + rtlpcipriv->btcoexist.previous_state_h, + rtlpcipriv->btcoexist.previous_state, + rtlpcipriv->btcoexist.current_state_h, + rtlpcipriv->btcoexist.current_state)); + rtlpcipriv->btcoexist.previous_state + = rtlpcipriv->btcoexist.current_state; + rtlpcipriv->btcoexist.previous_state_h + = rtlpcipriv->btcoexist.current_state_h; + } +} + +void rtl8821ae_dm_bt_parse_bt_info(struct ieee80211_hw *hw, u8 * tmp_buf, u8 len) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + u8 bt_info; + u8 i; + + hal_coex_8821ae.b_c2h_bt_info_req_sent = false; + hal_coex_8821ae.bt_retry_cnt = 0; + for (i = 0; i < len; i++) { + if (i == 0) { + hal_coex_8821ae.c2h_bt_info_original = tmp_buf[i]; + } else if (i == 1) { + hal_coex_8821ae.bt_retry_cnt = tmp_buf[i]; + } + if(i == len-1) { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("0x%2x]", tmp_buf[i])); + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_TRACE, ("0x%2x, ", tmp_buf[i])); + } + } + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, + ("BT info bt_info (Data)= 0x%x\n",hal_coex_8821ae.c2h_bt_info_original)); + bt_info = hal_coex_8821ae.c2h_bt_info_original; + + if(bt_info & BIT(2)){ + hal_coex_8821ae.b_c2h_bt_inquiry_page = true; + } else { + hal_coex_8821ae.b_c2h_bt_inquiry_page = false; + } + + if (bt_info & BTINFO_B_CONNECTION) { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTC2H], BTInfo: bConnect=true\n")); + rtlpcipriv->btcoexist.b_bt_busy = true; + rtlpcipriv->btcoexist.current_state &= ~BT_COEX_STATE_BT_IDLE; + } else { + RT_TRACE(COMP_BT_COEXIST, DBG_DMESG, ("[BTC2H], BTInfo: bConnect=false\n")); + rtlpcipriv->btcoexist.b_bt_busy = false; + rtlpcipriv->btcoexist.current_state |= BT_COEX_STATE_BT_IDLE; + } +} +void rtl_8821ae_c2h_command_handle(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct c2h_evt_hdr c2h_event; + u8 * ptmp_buf = NULL; + u8 index = 0; + u8 u1b_tmp = 0; + memset(&c2h_event, 0, sizeof(c2h_event)); + u1b_tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL); + RT_TRACE(COMP_FW, DBG_DMESG, + ("&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1b_tmp)); + c2h_event.cmd_id = u1b_tmp & 0xF; + c2h_event.cmd_len = (u1b_tmp & 0xF0) >> 4; + c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1); + RT_TRACE(COMP_FW, DBG_DMESG, ("cmd_id: %d, cmd_len: %d, cmd_seq: %d\n", + c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq)); + u1b_tmp = rtl_read_byte(rtlpriv, 0x01AF); + if (u1b_tmp == C2H_EVT_HOST_CLOSE) { + return; + } else if (u1b_tmp != C2H_EVT_FW_CLOSE) { + rtl_write_byte(rtlpriv, 0x1AF, 0x00); + return; + } + ptmp_buf = (u8 *) kmalloc(c2h_event.cmd_len, GFP_KERNEL); + if(ptmp_buf == NULL) { + RT_TRACE(COMP_FW, DBG_TRACE, ("malloc cmd buf failed\n")); + return; + } + + /* Read the content */ + for (index = 0; index < c2h_event.cmd_len; index ++) { + ptmp_buf[index] = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 2+ index); + } + + switch(c2h_event.cmd_id) { + case C2H_BT_RSSI: + break; + + case C2H_BT_OP_MODE: + break; + + case BT_INFO: + RT_TRACE(COMP_FW, DBG_TRACE, + ("BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id)); + RT_TRACE(COMP_FW, DBG_TRACE, + ("BT info Byte[1] (Seq) is 0x%x\n", c2h_event.cmd_seq)); + RT_TRACE(COMP_FW, DBG_TRACE, + ("BT info Byte[2] (Data)= 0x%x\n", ptmp_buf[0])); + + if (rtlpriv->cfg->ops->get_btc_status()){ + rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, ptmp_buf, c2h_event.cmd_len); + } + break; + default: + break; + } + + if(ptmp_buf) + kfree(ptmp_buf); + + rtl_write_byte(rtlpriv, 0x01AF, C2H_EVT_HOST_CLOSE); +} + + + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/hal_btc.h @@ -0,0 +1,160 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_HAL_BTC_H__ +#define __RTL8821AE_HAL_BTC_H__ + +#include "../wifi.h" +#include "btc.h" +#include "hal_bt_coexist.h" + +#define BT_TXRX_CNT_THRES_1 1200 +#define BT_TXRX_CNT_THRES_2 1400 +#define BT_TXRX_CNT_THRES_3 3000 +#define BT_TXRX_CNT_LEVEL_0 0 // < 1200 +#define BT_TXRX_CNT_LEVEL_1 1 // >= 1200 && < 1400 +#define BT_TXRX_CNT_LEVEL_2 2 // >= 1400 +#define BT_TXRX_CNT_LEVEL_3 3 + + + +#define BT_COEX_DISABLE 0 +#define BT_Q_PKT_OFF 0 +#define BT_Q_PKT_ON 1 + +#define BT_TX_PWR_OFF 0 +#define BT_TX_PWR_ON 1 + +/* TDMA mode definition */ +#define TDMA_2ANT 0 +#define TDMA_1ANT 1 +#define TDMA_NAV_OFF 0 +#define TDMA_NAV_ON 1 +#define TDMA_DAC_SWING_OFF 0 +#define TDMA_DAC_SWING_ON 1 + +/* PTA mode related definition */ +#define BT_PTA_MODE_OFF 0 +#define BT_PTA_MODE_ON 1 + +/* Penalty Tx Rate Adaptive */ +#define BT_TX_RATE_ADAPTIVE_NORMAL 0 +#define BT_TX_RATE_ADAPTIVE_LOW_PENALTY 1 + +/* RF Corner */ +#define BT_RF_RX_LPF_CORNER_RESUME 0 +#define BT_RF_RX_LPF_CORNER_SHRINK 1 + +#define C2H_EVT_HOST_CLOSE 0x00 +#define C2H_EVT_FW_CLOSE 0xFF + +enum bt_traffic_mode { + BT_MOTOR_EXT_BE = 0x00, + BT_MOTOR_EXT_GUL = 0x01, + BT_MOTOR_EXT_GUB = 0x02, + BT_MOTOR_EXT_GULB = 0x03 +}; + +enum bt_traffic_mode_profile { + BT_PROFILE_NONE, + BT_PROFILE_A2DP, + BT_PROFILE_PAN, + BT_PROFILE_HID, + BT_PROFILE_SCO +}; + +enum hci_ext_bt_operation { + HCI_BT_OP_NONE = 0x0, + HCI_BT_OP_INQUIRE_START = 0x1, + HCI_BT_OP_INQUIRE_FINISH = 0x2, + HCI_BT_OP_PAGING_START = 0x3, + HCI_BT_OP_PAGING_SUCCESS = 0x4, + HCI_BT_OP_PAGING_UNSUCCESS = 0x5, + HCI_BT_OP_PAIRING_START = 0x6, + HCI_BT_OP_PAIRING_FINISH = 0x7, + HCI_BT_OP_BT_DEV_ENABLE = 0x8, + HCI_BT_OP_BT_DEV_DISABLE = 0x9, + HCI_BT_OP_MAX, +}; + +enum bt_spec { + BT_SPEC_1_0_b = 0x00, + BT_SPEC_1_1 = 0x01, + BT_SPEC_1_2 = 0x02, + BT_SPEC_2_0_EDR = 0x03, + BT_SPEC_2_1_EDR = 0x04, + BT_SPEC_3_0_HS = 0x05, + BT_SPEC_4_0 = 0x06 +}; + +struct c2h_evt_hdr { + u8 cmd_id; + u8 cmd_len; + u8 cmd_seq; +}; + +enum bt_state{ + BT_INFO_STATE_DISABLED = 0, + BT_INFO_STATE_NO_CONNECTION = 1, + BT_INFO_STATE_CONNECT_IDLE = 2, + BT_INFO_STATE_INQ_OR_PAG = 3, + BT_INFO_STATE_ACL_ONLY_BUSY = 4, + BT_INFO_STATE_SCO_ONLY_BUSY = 5, + BT_INFO_STATE_ACL_SCO_BUSY = 6, + BT_INFO_STATE_HID_BUSY = 7, + BT_INFO_STATE_HID_SCO_BUSY = 8, + BT_INFO_STATE_MAX = 7 +}; + +enum rtl8723be_c2h_evt { + C2H_DBG = 0, + C2H_TSF = 1, + C2H_AP_RPT_RSP = 2, + C2H_CCX_TX_RPT = 3, // The FW notify the report of the specific tx packet. + C2H_BT_RSSI = 4, + C2H_BT_OP_MODE = 5, + C2H_HW_INFO_EXCH = 10, + C2H_C2H_H2C_TEST = 11, + BT_INFO = 9, + MAX_C2HEVENT +}; + + + +void rtl8821ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw); +void rtl8821ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw); +void rtl8821ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw); +void rtl8821ae_dm_bt_coexist(struct ieee80211_hw *hw); +void rtl8821ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8821ae *p_btdm); +void rtl_8821ae_c2h_command_handle(struct ieee80211_hw * hw); +void rtl_8821ae_bt_wifi_media_status_notify(struct ieee80211_hw * hw, bool mstatus); +void rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw); + + + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/hw.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/hw.c @@ -0,0 +1,3346 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../efuse.h" +#include "../base.h" +#include "../regd.h" +#include "../cam.h" +#include "../ps.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "fw.h" +#include "led.h" +#include "hw.h" +#include "pwrseqcmd.h" +#include "pwrseq.h" +#include "btc.h" +#include "../btcoexist/rtl_btc.h" + +#define LLT_CONFIG 5 + +static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE]; + + while (skb_queue_len(&ring->queue)) { + struct rtl_tx_desc *entry = &ring->desc[ring->idx]; + struct sk_buff *skb = __skb_dequeue(&ring->queue); + + pci_unmap_single(rtlpci->pdev, + le32_to_cpu(rtlpriv->cfg->ops->get_desc( + (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)), + skb->len, PCI_DMA_TODEVICE); + kfree_skb(skb); + ring->idx = (ring->idx + 1) % ring->entries; + } + +} + +static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw, + u8 set_bits, u8 clear_bits) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpci->reg_bcn_ctrl_val |= set_bits; + rtlpci->reg_bcn_ctrl_val &= ~clear_bits; + + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val); +} + +void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp1byte; + + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6))); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); +} + +void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp1byte; + + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte |= BIT(0); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); +} + +static void _rtl8821ae_enable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(1)); +} + +static void _rtl8821ae_disable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(1), 0); +} + +static void _rtl8821ae_set_fw_clock_on(struct ieee80211_hw *hw, + u8 rpwm_val, bool b_need_turn_off_ckk) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool b_support_remote_wake_up; + u32 count = 0,isr_regaddr,content; + bool b_schedule_timer = b_need_turn_off_ckk; + rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN, + (u8 *) (&b_support_remote_wake_up)); + + if (!rtlhal->bfw_ready) + return; + if (!rtlpriv->psc.b_fw_current_inpsmode) + return; + + while (1) { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + if (rtlhal->bfw_clk_change_in_progress) { + while (rtlhal->bfw_clk_change_in_progress) { + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + count++; + udelay(100); + if (count > 1000) + return; + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + } + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } else { + rtlhal->bfw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } + } + + if (IS_IN_LOW_POWER_STATE_8821AE(rtlhal->fw_ps_state)) { + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *) (&rpwm_val)); + if (FW_PS_IS_ACK(rpwm_val)) { + isr_regaddr = REG_HISR; + content = rtl_read_dword(rtlpriv, isr_regaddr); + while (!(content & IMR_CPWM) && (count < 500)) { + udelay(50); + count++; + content = rtl_read_dword(rtlpriv, isr_regaddr); + } + + if (content & IMR_CPWM) { + rtl_write_word(rtlpriv,isr_regaddr, 0x0100); + rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_8821AE; + RT_TRACE(COMP_POWER, DBG_LOUD, ("Receive CPWM INT!!! Set pHalData->FwPSState = %X\n", rtlhal->fw_ps_state)); + } + } + + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->bfw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + if (b_schedule_timer) { + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + } + + } else { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->bfw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } + + +} + +static void _rtl8821ae_set_fw_clock_off(struct ieee80211_hw *hw, + u8 rpwm_val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + enum rf_pwrstate rtstate; + bool b_schedule_timer = false; + u8 queue; + + if (!rtlhal->bfw_ready) + return; + if (!rtlpriv->psc.b_fw_current_inpsmode) + return; + if (!rtlhal->ballow_sw_to_change_hwclc) + return; + rtlpriv->cfg->ops->get_hw_reg(hw,HW_VAR_RF_STATE,(u8 *)(&rtstate)); + if (rtstate == ERFOFF ||rtlpriv->psc.inactive_pwrstate ==ERFOFF) + return; + + for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) { + ring = &rtlpci->tx_ring[queue]; + if (skb_queue_len(&ring->queue)) { + b_schedule_timer = true; + break; + } + } + + if (b_schedule_timer) { + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + return; + } + + if (FW_PS_STATE(rtlhal->fw_ps_state) != FW_PS_STATE_RF_OFF_LOW_PWR_8821AE) { + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + if (!rtlhal->bfw_clk_change_in_progress) { + rtlhal->bfw_clk_change_in_progress = true; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val); + rtl_write_word(rtlpriv, REG_HISR, 0x0100); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *) (&rpwm_val)); + spin_lock_bh(&rtlpriv->locks.fw_ps_lock); + rtlhal->bfw_clk_change_in_progress = false; + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + } else { + spin_unlock_bh(&rtlpriv->locks.fw_ps_lock); + mod_timer(&rtlpriv->works.fw_clockoff_timer, + jiffies + MSECS(10)); + } + } + +} + +static void _rtl8821ae_set_fw_ps_rf_on(struct ieee80211_hw *hw) +{ + u8 rpwm_val = 0; + rpwm_val |= (FW_PS_STATE_RF_OFF_8821AE | FW_PS_ACK); + _rtl8821ae_set_fw_clock_on(hw, rpwm_val, true); +} + +static void _rtl8821ae_fwlps_leave(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool b_fw_current_inps = false; + u8 rpwm_val = 0,fw_pwrmode = FW_PS_ACTIVE_MODE; + + if (ppsc->b_low_power_enable){ + rpwm_val = (FW_PS_STATE_ALL_ON_8821AE|FW_PS_ACK);/* RF on */ + _rtl8821ae_set_fw_clock_on(hw, rpwm_val, false); + rtlhal->ballow_sw_to_change_hwclc = false; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *) (&fw_pwrmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *) (&b_fw_current_inps)); + } else { + rpwm_val = FW_PS_STATE_ALL_ON_8821AE; /* RF on */ + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, + (u8 *) (&rpwm_val)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, + (u8 *) (&fw_pwrmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *) (&b_fw_current_inps)); + } + +} + +static void _rtl8821ae_fwlps_enter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool b_fw_current_inps = true; + u8 rpwm_val; + + if (ppsc->b_low_power_enable){ + rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_8821AE; /* RF off */ + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_FW_PSMODE_STATUS, + (u8 *) (&b_fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_PWRMODE, + (u8 *) (&ppsc->fwctrl_psmode)); + rtlhal->ballow_sw_to_change_hwclc = true; + _rtl8821ae_set_fw_clock_off(hw, rpwm_val); + + + } else { + rpwm_val = FW_PS_STATE_RF_OFF_8821AE; /* RF off */ + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_FW_PSMODE_STATUS, + (u8 *) (&b_fw_current_inps)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_H2C_FW_PWRMODE, + (u8 *) (&ppsc->fwctrl_psmode)); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_SET_RPWM, + (u8 *) (&rpwm_val)); + } + +} + +void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + switch (variable) { + case HW_VAR_ETHER_ADDR: + *((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_MACID); + *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_MACID + 4); + break; + case HW_VAR_BSSID: + *((u32 *)(val)) = rtl_read_dword(rtlpriv, REG_BSSID); + *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4); + break; + case HW_VAR_MEDIA_STATUS: + val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3; + break; + case HW_VAR_SLOT_TIME: + *((u8 *)(val)) = mac->slot_time; + break; + case HW_VAR_BEACON_INTERVAL: + *((u16 *)(val)) = rtl_read_word(rtlpriv, REG_BCN_INTERVAL); + break; + case HW_VAR_ATIM_WINDOW: + *((u16 *)(val)) = rtl_read_word(rtlpriv, REG_ATIMWND); + break; + case HW_VAR_RCR: + *((u32 *) (val)) = rtlpci->receive_config; + break; + case HW_VAR_RF_STATE: + *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; + break; + case HW_VAR_FWLPS_RF_ON:{ + enum rf_pwrstate rfState; + u32 val_rcr; + + rtlpriv->cfg->ops->get_hw_reg(hw, + HW_VAR_RF_STATE, + (u8 *) (&rfState)); + if (rfState == ERFOFF) { + *((bool *) (val)) = true; + } else { + val_rcr = rtl_read_dword(rtlpriv, REG_RCR); + val_rcr &= 0x00070000; + if (val_rcr) + *((bool *) (val)) = false; + else + *((bool *) (val)) = true; + } + break; + } + case HW_VAR_FW_PSMODE_STATUS: + *((bool *) (val)) = ppsc->b_fw_current_inpsmode; + break; + case HW_VAR_CORRECT_TSF:{ + u64 tsf; + u32 *ptsf_low = (u32 *) & tsf; + u32 *ptsf_high = ((u32 *) & tsf) + 1; + + *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); + *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + + *((u64 *) (val)) = tsf; + + break; + } + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process %x\n",variable)); + break; + } +} + + +void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 idx; + + switch (variable) { + case HW_VAR_ETHER_ADDR:{ + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_MACID + idx), + val[idx]); + } + break; + } + case HW_VAR_BASIC_RATE:{ + u16 b_rate_cfg = ((u16 *) val)[0]; + u8 rate_index = 0; + b_rate_cfg = b_rate_cfg & 0x15f; + b_rate_cfg |= 0x01; + rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff); + rtl_write_byte(rtlpriv, REG_RRSR + 1, + (b_rate_cfg >> 8) & 0xff); + while (b_rate_cfg > 0x1) { + b_rate_cfg = (b_rate_cfg >> 1); + rate_index++; + } + rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, + rate_index); + break; + } + case HW_VAR_BSSID:{ + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_BSSID + idx), + val[idx]); + } + break; + } + case HW_VAR_SIFS:{ + rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]); + rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]); + + rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); + + if (!mac->ht_enable) + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + 0x0e0e); + else + rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, + *((u16 *) val)); + break; + } + case HW_VAR_SLOT_TIME:{ + u8 e_aci; + + RT_TRACE(COMP_MLME, DBG_LOUD, + ("HW_VAR_SLOT_TIME %x\n", val[0])); + + rtl_write_byte(rtlpriv, REG_SLOT, val[0]); + + for (e_aci = 0; e_aci < AC_MAX; e_aci++) { + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, + (u8 *) (&e_aci)); + } + break; + } + case HW_VAR_ACK_PREAMBLE:{ + u8 reg_tmp; + u8 short_preamble = (bool) (*(u8 *) val); + reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2); + if (short_preamble){ + reg_tmp |= BIT(1); + rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp); + } else { + reg_tmp &= (~BIT(1)); + rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp); + } + break; + } + case HW_VAR_WPA_CONFIG: + rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); + break; + case HW_VAR_AMPDU_MIN_SPACE:{ + u8 min_spacing_to_set; + u8 sec_min_space; + + min_spacing_to_set = *((u8 *) val); + if (min_spacing_to_set <= 7) { + sec_min_space = 0; + + if (min_spacing_to_set < sec_min_space) + min_spacing_to_set = sec_min_space; + + mac->min_space_cfg = ((mac->min_space_cfg & + 0xf8) | + min_spacing_to_set); + + *val = min_spacing_to_set; + + RT_TRACE(COMP_MLME, DBG_LOUD, + ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", + mac->min_space_cfg)); + + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + } + break; + } + case HW_VAR_SHORTGI_DENSITY:{ + u8 density_to_set; + + density_to_set = *((u8 *) val); + mac->min_space_cfg |= (density_to_set << 3); + + RT_TRACE(COMP_MLME, DBG_LOUD, + ("Set HW_VAR_SHORTGI_DENSITY: %#x\n", + mac->min_space_cfg)); + + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + + break; + } + case HW_VAR_AMPDU_FACTOR:{ + u32 ampdu_len = (*((u8 *)val)); + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + if(ampdu_len < VHT_AGG_SIZE_128K) + ampdu_len = (0x2000 << (*((u8 *)val))) -1; + else + ampdu_len = 0x1ffff; + } else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + if(ampdu_len < HT_AGG_SIZE_64K) + ampdu_len = (0x2000 << (*((u8 *)val))) -1; + else + ampdu_len = 0xffff; + } + ampdu_len |= BIT(31); + + rtl_write_dword(rtlpriv, + REG_AMPDU_MAX_LENGTH_8812, ampdu_len); + break; + } + case HW_VAR_AC_PARAM:{ + u8 e_aci = *((u8 *) val); + rtl8821ae_dm_init_edca_turbo(hw); + + if (rtlpci->acm_method != eAcmWay2_SW) + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_ACM_CTRL, + (u8 *) (&e_aci)); + break; + } + case HW_VAR_ACM_CTRL:{ + u8 e_aci = *((u8 *) val); + union aci_aifsn *p_aci_aifsn = + (union aci_aifsn *)(&(mac->ac[0].aifs)); + u8 acm = p_aci_aifsn->f.acm; + u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); + + acm_ctrl = + acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1); + + if (acm) { + switch (e_aci) { + case AC0_BE: + acm_ctrl |= AcmHw_BeqEn; + break; + case AC2_VI: + acm_ctrl |= AcmHw_ViqEn; + break; + case AC3_VO: + acm_ctrl |= AcmHw_VoqEn; + break; + default: + RT_TRACE(COMP_ERR, DBG_WARNING, + ("HW_VAR_ACM_CTRL acm set " + "failed: eACI is %d\n", acm)); + break; + } + } else { + switch (e_aci) { + case AC0_BE: + acm_ctrl &= (~AcmHw_BeqEn); + break; + case AC2_VI: + acm_ctrl &= (~AcmHw_ViqEn); + break; + case AC3_VO: + acm_ctrl &= (~AcmHw_BeqEn); + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + } + + RT_TRACE(COMP_QOS, DBG_TRACE, + ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] " + "Write 0x%X\n", acm_ctrl)); + rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); + break; + } + case HW_VAR_RCR:{ + rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]); + rtlpci->receive_config = ((u32 *) (val))[0]; + break; + } + case HW_VAR_RETRY_LIMIT:{ + u8 retry_limit = ((u8 *) (val))[0]; + + rtl_write_word(rtlpriv, REG_RL, + retry_limit << RETRY_LIMIT_SHORT_SHIFT | + retry_limit << RETRY_LIMIT_LONG_SHIFT); + break; + } + case HW_VAR_DUAL_TSF_RST: + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); + break; + case HW_VAR_EFUSE_BYTES: + rtlefuse->efuse_usedbytes = *((u16 *) val); + break; + case HW_VAR_EFUSE_USAGE: + rtlefuse->efuse_usedpercentage = *((u8 *) val); + break; + case HW_VAR_IO_CMD: + rtl8821ae_phy_set_io_cmd(hw, (*(enum io_type *)val)); + break; + case HW_VAR_SET_RPWM:{ + u8 rpwm_val; + + rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM); + udelay(1); + + if (rpwm_val & BIT(7)) { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + (*(u8 *) val)); + } else { + rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, + ((*(u8 *) val) | BIT(7))); + } + + break; + } + case HW_VAR_H2C_FW_PWRMODE:{ + rtl8821ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val)); + break; + } + case HW_VAR_FW_PSMODE_STATUS: + ppsc->b_fw_current_inpsmode = *((bool *) val); + break; + + case HW_VAR_RESUME_CLK_ON: + _rtl8821ae_set_fw_ps_rf_on(hw); + break; + + case HW_VAR_FW_LPS_ACTION:{ + bool b_enter_fwlps = *((bool *) val); + + if (b_enter_fwlps) + _rtl8821ae_fwlps_enter(hw); + else + _rtl8821ae_fwlps_leave(hw); + + break; + } + + case HW_VAR_H2C_FW_JOINBSSRPT:{ + u8 mstatus = (*(u8 *) val); + u8 tmp_regcr, tmp_reg422,bcnvalid_reg; + u8 count = 0, dlbcn_count = 0; + bool b_recover = false; + + if (mstatus == RT_MEDIA_CONNECT) { + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, + NULL); + + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr | BIT(0))); + + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3)); + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0); + + tmp_reg422 = + rtl_read_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp_reg422 & (~BIT(6))); + if (tmp_reg422 & BIT(6)) + b_recover = true; + + do { + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2); + rtl_write_byte(rtlpriv, REG_TDECTRL+2,(bcnvalid_reg | BIT(0))); + _rtl8821ae_return_beacon_queue_skb(hw); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_set_fw_rsvdpagepkt(hw, 0); + else + rtl8821ae_set_fw_rsvdpagepkt(hw, 0); + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2); + count = 0; + while (!(bcnvalid_reg & BIT(0)) && count <20){ + count++; + udelay(10); + bcnvalid_reg = rtl_read_byte(rtlpriv, REG_TDECTRL+2); + } + dlbcn_count++; + } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count <5); + + if (bcnvalid_reg & BIT(0)) + rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0)); + + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0); + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4)); + + if (b_recover) { + rtl_write_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2, + tmp_reg422); + } + + rtl_write_byte(rtlpriv, REG_CR + 1, + (tmp_regcr & ~(BIT(0)))); + } + rtl8821ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); + + break; + } + case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:{ + rtl8821ae_set_p2p_ps_offload_cmd(hw, (*(u8 *) val)); + break; + } + + case HW_VAR_AID:{ + u16 u2btmp; + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); + u2btmp &= 0xC000; + rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | + mac->assoc_id)); + + break; + } + case HW_VAR_CORRECT_TSF:{ + u8 btype_ibss = ((u8 *) (val))[0]; + + if (btype_ibss == true) + _rtl8821ae_stop_tx_beacon(hw); + + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(3)); + + rtl_write_dword(rtlpriv, REG_TSFTR, + (u32) (mac->tsf & 0xffffffff)); + rtl_write_dword(rtlpriv, REG_TSFTR + 4, + (u32) ((mac->tsf >> 32) & 0xffffffff)); + + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0); + + if (btype_ibss == true) + _rtl8821ae_resume_tx_beacon(hw); + + break; + + } + case HW_VAR_NAV_UPPER: { + u32 us_nav_upper = ((u32)*val); + + if(us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF) + { + RT_TRACE(COMP_INIT , DBG_WARNING, + ("The setting value (0x%08X us) of NAV_UPPER" + " is larger than (%d * 0xFF)!!!\n", + us_nav_upper, HAL_92C_NAV_UPPER_UNIT)); + break; + } + rtl_write_byte(rtlpriv, REG_NAV_UPPER, + ((u8)((us_nav_upper + HAL_92C_NAV_UPPER_UNIT - 1) / HAL_92C_NAV_UPPER_UNIT))); + break; + } + case HW_VAR_KEEP_ALIVE: { + u8 array[2]; + array[0] = 0xff; + array[1] = *((u8 *)val); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_KEEP_ALIVE_CTRL, 2, array); + } + default: + RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case " + "not process %x\n",variable)); + break; + } +} + +static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool status = true; + long count = 0; + u32 value = _LLT_INIT_ADDR(address) | + _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS); + + rtl_write_dword(rtlpriv, REG_LLT_INIT, value); + + do { + value = rtl_read_dword(rtlpriv, REG_LLT_INIT); + if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) + break; + + if (count > POLLING_LLT_THRESHOLD) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Failed to polling write LLT done at " + "address %d!\n", address)); + status = false; + break; + } + } while (++count); + + return status; +} + +static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned short i; + u8 txpktbuf_bndy; + u8 maxPage; + bool status; + + maxPage = 255; + txpktbuf_bndy = 0xF8; + + + rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy); + rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, MAX_RX_DMA_BUFFER_SIZE - 1); + + rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_PBP, 0x31); + rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4); + + for (i = 0; i < (txpktbuf_bndy - 1); i++) { + status = _rtl8821ae_llt_write(hw, i, i + 1); + if (true != status) + return status; + } + + status = _rtl8821ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); + if (true != status) + return status; + + for (i = txpktbuf_bndy; i < maxPage; i++) { + status = _rtl8821ae_llt_write(hw, i, (i + 1)); + if (true != status) + return status; + } + + status = _rtl8821ae_llt_write(hw, maxPage, txpktbuf_bndy); + if (true != status) + return status; + + rtl_write_dword(rtlpriv, REG_RQPN, 0x80e70808); + rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00); + + return true; +} + +static void _rtl8821ae_gen_refresh_led_state(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (rtlpriv->rtlhal.up_first_time) + return; + + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_on(hw, pLed0); + else + rtl8821ae_sw_led_on(hw, pLed0); + else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT) + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_on(hw, pLed0); + else + rtl8821ae_sw_led_on(hw, pLed0); + else + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_off(hw, pLed0); + else + rtl8821ae_sw_led_off(hw, pLed0); +} + +static bool _rtl8821ae_init_mac(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u8 bytetmp = 0; + u16 wordtmp = 0; + bool b_mac_func_enable = rtlhal->b_mac_func_enable; + + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); + + /*Auto Power Down to CHIP-off State*/ + bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7)); + rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + /* HW Power on sequence*/ + if(!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8812_NIC_ENABLE_FLOW)) { + RT_TRACE(COMP_INIT,DBG_LOUD,("init 8812 MAC Fail as power on failure\n")); + return false; + } + } else { + /* HW Power on sequence */ + if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_A_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8821A_NIC_ENABLE_FLOW)){ + RT_TRACE(COMP_INIT,DBG_LOUD,("init 8821 MAC Fail as power on failure\n")); + return false; + } + } + + bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4); + rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp); + + bytetmp = rtl_read_byte(rtlpriv, REG_CR); + bytetmp = 0xff; + rtl_write_byte(rtlpriv, REG_CR, bytetmp); + mdelay(2); + + bytetmp |= 0x7f; + rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp); + mdelay(2); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3); + if (bytetmp & BIT(0)) { + bytetmp = rtl_read_byte(rtlpriv, 0x7c); + bytetmp |= BIT(6); + rtl_write_byte(rtlpriv, 0x7c, bytetmp); + } + } + + bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1); + bytetmp &= ~BIT(4); + rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp); + + rtl_write_word(rtlpriv, REG_CR, 0x2ff); + + if (!b_mac_func_enable) { + if (!_rtl8821ae_llt_table_init(hw)) + return false; + } + + rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff); + rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff); + + /* Enable FW Beamformer Interrupt */ + bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3); + rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6)); + + wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL); + wordtmp &= 0xf; + wordtmp |= 0xF5B1; + rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp); + + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F); + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF); + /*low address*/ + rtl_write_dword(rtlpriv, REG_BCNQ_DESA, + rtlpci->tx_ring[BEACON_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_MGQ_DESA, + rtlpci->tx_ring[MGNT_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VOQ_DESA, + rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_VIQ_DESA, + rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_BEQ_DESA, + rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_BKQ_DESA, + rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_HQ_DESA, + rtlpci->tx_ring[HIGH_QUEUE].dma & DMA_BIT_MASK(32)); + rtl_write_dword(rtlpriv, REG_RX_DESA, + rtlpci->rx_ring[RX_MPDU_QUEUE].dma & DMA_BIT_MASK(32)); + + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x77); + + rtl_write_dword(rtlpriv, REG_INT_MIG, 0); + + rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3); + _rtl8821ae_gen_refresh_led_state(hw); + + return true; +} + +static void _rtl8821ae_hw_configure(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rrsr; + + reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG; + + rtl_write_dword(rtlpriv, REG_RRSR, reg_rrsr); + /* ARFB table 9 for 11ac 5G 2SS */ + rtl_write_dword(rtlpriv, REG_ARFR0 + 4, 0xfffff000); + /* ARFB table 10 for 11ac 5G 1SS */ + rtl_write_dword(rtlpriv, REG_ARFR1 + 4, 0x003ff000); + /* ARFB table 11 for 11ac 24G 1SS */ + rtl_write_dword(rtlpriv, REG_ARFR2, 0x00000015); + rtl_write_dword(rtlpriv, REG_ARFR2 + 4, 0x003ff000); + /* ARFB table 12 for 11ac 24G 1SS */ + rtl_write_dword(rtlpriv, REG_ARFR3, 0x00000015); + rtl_write_dword(rtlpriv, REG_ARFR3 + 4, 0xffcff000); + /* 0x420[7] = 0 , enable retry AMPDU in new AMPD not singal MPDU. */ + rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F00); + rtl_write_byte(rtlpriv, REG_AMPDU_MAX_TIME, 0x70); + + /*Set retry limit*/ + rtl_write_word(rtlpriv, REG_RL, 0x0707); + + + /* Set Data / Response auto rate fallack retry count*/ + rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504); + rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000); + rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504); + + rtlpci->reg_bcn_ctrl_val = 0x1d; + rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val); + + /* TBTT prohibit hold time. Suggested by designer TimChen. */ + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1,0xff); // 8 ms + + /* AGGR_BK_TIME Reg51A 0x16 */ + rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040); + + /*For Rx TP. Suggested by SD1 Richard. Added by tynli. 2010.04.12.*/ + rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666); + + rtl_write_byte(rtlpriv, REG_HT_SINGLE_AMPDU, 0x80); + rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20); + rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, 0x1F1F); +} + +static u16 _rtl8821ae_mdio_read(struct rtl_priv *rtlpriv, u8 addr) +{ + u16 ret = 0; + u8 tmp = 0, count = 0; + + rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(6)); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6) ; + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(6); + count++; + } + if (0 == tmp) + ret = rtl_read_word(rtlpriv, REG_MDIO_RDATA); + + return ret; +} + +void _rtl8821ae_mdio_write(struct rtl_priv *rtlpriv, u8 addr, u16 data) +{ + u8 tmp = 0, count = 0; + + rtl_write_word(rtlpriv, REG_MDIO_WDATA, data); + rtl_write_byte(rtlpriv, REG_MDIO_CTL, addr | BIT(5)); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5) ; + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_MDIO_CTL) & BIT(5); + count++; + } +} + +static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) +{ + u16 read_addr = addr & 0xfffc; + u8 tmp = 0, count = 0, ret = 0; + + rtl_write_word(rtlpriv, REG_DBI_ADDR, read_addr); + rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x2); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count++; + } + if (0 == tmp) { + read_addr = REG_DBI_RDATA + addr % 4; + ret = rtl_read_word(rtlpriv, read_addr); + } + return ret; +} + +void _rtl8821ae_dbi_write(struct rtl_priv *rtlpriv, u16 addr, u8 data) +{ + u8 tmp = 0, count = 0; + u16 wrtie_addr, remainder = addr % 4; + + wrtie_addr = REG_DBI_WDATA + remainder; + rtl_write_byte(rtlpriv, wrtie_addr, data); + + wrtie_addr = (addr & 0xfffc) | (BIT(0) << (remainder + 12)); + rtl_write_word(rtlpriv, REG_DBI_ADDR, wrtie_addr); + + rtl_write_byte(rtlpriv, REG_DBI_FLAG, 0x1); + + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count = 0; + while (tmp && count < 20) { + udelay(10); + tmp = rtl_read_byte(rtlpriv, REG_DBI_FLAG); + count++; + } + +} + +static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + if (_rtl8821ae_mdio_read(rtlpriv, 0x04) != 0x8544) + _rtl8821ae_mdio_write(rtlpriv, 0x04, 0x8544); + + if (_rtl8821ae_mdio_read(rtlpriv, 0x0b) != 0x0070) + _rtl8821ae_mdio_write(rtlpriv, 0x0b, 0x0070); + } + + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f); + _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7)); + + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719); + _rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4)); + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + tmp = _rtl8821ae_dbi_read(rtlpriv, 0x718); + _rtl8821ae_dbi_write(rtlpriv, 0x718, tmp|BIT(4)); + } +} + +void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 sec_reg_value; + u8 tmp; + + RT_TRACE(COMP_INIT, DBG_DMESG, + ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm)); + + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { + RT_TRACE(COMP_SEC, DBG_DMESG, ("not open hw encryption\n")); + return; + } + + sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable; + + if (rtlpriv->sec.use_defaultkey) { + sec_reg_value |= SCR_TxUseDK; + sec_reg_value |= SCR_RxUseDK; + } + + sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); + + tmp = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1)); + + RT_TRACE(COMP_SEC, DBG_DMESG, + ("The SECR-value %x \n", sec_reg_value)); + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); + +} + +#if 0 +bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL+3); + if (!(tmp&BIT(2))) { + rtl_write_byte(rtlpriv, REG_DBI_CTRL+3, tmp|BIT(2)); + mdelay(100); + } + + tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL+3); + if (tmp&BIT(0) || tmp&BIT(1)) { + RT_TRACE(COMP_INIT, DBG_LOUD, + ("rtl8821ae_check_pcie_dma_hang(): TRUE! Reset PCIE DMA!\n")); + return true; + } else { + return false; + } +} + +void _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw, + bool mac_power_on, bool watch_dog) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 tmp; + bool release_mac_rx_pause; + u8 backup_pcie_dma_pause; + + RT_TRACE(COMP_INIT, DBG_LOUD, ("_rtl8821ae_reset_pcie_interface_dma()\n")); + + tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL); + tmp &= ~BIT(1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL, tmp); + tmp = rtl_read_byte(rtlpriv, REG_PMC_DBG_CTRL2); + tmp |= BIT2; + rtl_write_byte(rtlpriv, REG_PMC_DBG_CTRL2, tmp); + + tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + if (tmp & BIT(2)) { + release_mac_rx_pause = false; + } else { + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp | BIT(2)); + release_mac_rx_pause = true; + } + backup_pcie_dma_pause = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+1); + if (backup_pcie_dma_pause != 0xFF) + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF); + + if (mac_power_on) + rtl_write_byte(rtlpriv, REG_CR, 0); + + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + tmp &= ~BIT(0); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, tmp); + + tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + tmp |= ~BIT(0); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, tmp); + + if (mac_power_on) + rtl_write_byte(rtlpriv, REG_CR, 0xFF); + + tmp = rtl_read_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL+2); + tmp |= BIT1; + rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL+2, tmp); + + if (watch_dog) { + u32 rqpn = 0; + u32 rqpn_npq = 0; + u8 tx_page_boundary = _RQPN_Init_8812E(Adapter, &rqpn_npq, &rqpn); + + if(LLT_table_init_8812(Adapter, TX_PAGE_BOUNDARY, RQPN, RQPN_NPQ) == RT_STATUS_FAILURE) + return false; + + PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK); + PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK); + + // <1> Reset Tx descriptor + Adapter->HalFunc.ResetTxDescHandler(Adapter,Adapter->NumTxDesc); + + // <2> Reset Rx descriptor + Adapter->HalFunc.ResetRxDescHandler(Adapter,Adapter->NumRxDesc); + + // <3> Reset RFDs + FreeRFDs( Adapter, TRUE); + + // <4> Reset TCBs + FreeTCBs( Adapter, TRUE); + + // We should set all Rx desc own bit to 1 to prevent from RDU after enable Rx DMA. 2013.02.18, by tynli. + PrepareAllRxDescBuffer(Adapter); + + PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK); + PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK); + + // + // Initialize TRx DMA address. + // + // Because set 0x100 to 0x0 will cause the Rx descriptor address 0x340 be cleared to zero on 88EE, + // we should re-initialize Rx desc. address before enable DMA. 2012.11.07. by tynli. + InitTRxDescHwAddress8812AE(Adapter); + } + + // In MAC power on state, BB and RF maybe in ON state, if we release TRx DMA here + // it will cause packets to be started to Tx/Rx, so we release Tx/Rx DMA later. + if(!bInMACPowerOn || bInWatchDog) + { + // 8. release TRX DMA + //write 0x284 bit[18] = 1'b0 + //write 0x301 = 0x00 + if(bReleaseMACRxPause) + { + u1Tmp = PlatformEFIORead1Byte(Adapter, REG_RXDMA_CONTROL); + PlatformEFIOWrite1Byte(Adapter, REG_RXDMA_CONTROL, (u1Tmp&~BIT2)); + } + PlatformEFIOWrite1Byte(Adapter, REG_PCIE_CTRL_REG+1, BackUpPcieDMAPause); + } + + if(IS_HARDWARE_TYPE_8821E(Adapter)) + { + //9. lock system register + // write 0xCC bit[2] = 1'b0 + u1Tmp = PlatformEFIORead1Byte(Adapter, REG_PMC_DBG_CTRL2_8723B); + u1Tmp &= ~(BIT2); + PlatformEFIOWrite1Byte(Adapter, REG_PMC_DBG_CTRL2_8723B, u1Tmp); + } + + return RT_STATUS_SUCCESS; +} +#endif + +// Static MacID Mapping (cf. Used in MacIdDoStaticMapping) ---------- +#define MAC_ID_STATIC_FOR_DEFAULT_PORT 0 +#define MAC_ID_STATIC_FOR_BROADCAST_MULTICAST 1 +#define MAC_ID_STATIC_FOR_BT_CLIENT_START 2 +#define MAC_ID_STATIC_FOR_BT_CLIENT_END 3 +// ----------------------------------------------------------- + +void rtl8821ae_macid_initialize_mediastatus(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 media_rpt[4] = {RT_MEDIA_CONNECT, 1, \ + MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, \ + MAC_ID_STATIC_FOR_BT_CLIENT_END}; + + rtlpriv->cfg->ops->set_hw_reg(hw, \ + HW_VAR_H2C_FW_MEDIASTATUSRPT, media_rpt); + + RT_TRACE(COMP_INIT,DBG_LOUD, \ + ("Initialize MacId media status: from %d to %d\n", \ + MAC_ID_STATIC_FOR_BROADCAST_MULTICAST, \ + MAC_ID_STATIC_FOR_BT_CLIENT_END)); +} + +int rtl8821ae_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool rtstatus = true; + int err; + u8 tmp_u1b; + u32 nav_upper = WIFI_NAV_UPPER_US; + + rtlpriv->rtlhal.being_init_adapter = true; + rtlpriv->intf_ops->disable_aspm(hw); + + /*YP wowlan not considered*/ + + tmp_u1b = rtl_read_byte(rtlpriv, REG_CR); + if (tmp_u1b!=0 && tmp_u1b != 0xEA) { + rtlhal->b_mac_func_enable = true; + RT_TRACE(COMP_INIT,DBG_LOUD,(" MAC has already power on.\n")); + } else { + rtlhal->b_mac_func_enable = false; + rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE; + } + +/* if (_rtl8821ae_check_pcie_dma_hang(hw)) { + _rtl8821ae_reset_pcie_interface_dma(hw,rtlhal->b_mac_func_enable,false); + rtlhal->b_mac_func_enable = false; + } */ + + rtstatus = _rtl8821ae_init_mac(hw); + if (rtstatus != true) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("Init MAC failed\n")); + err = 1; + return err; + } + + tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG); + tmp_u1b &= 0x7F; + rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b); + + err = rtl8821ae_download_fw(hw, false); + if (err) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("Failed to download FW. Init HW " + "without FW now..\n")); + err = 1; + rtlhal->bfw_ready = false; + return err; + } else { + rtlhal->bfw_ready = true; + } + rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE; + rtlhal->bfw_clk_change_in_progress = false; + rtlhal->ballow_sw_to_change_hwclc = false; + rtlhal->last_hmeboxnum = 0; + + /*SIC_Init(Adapter); + if(pHalData->AMPDUBurstMode) + PlatformEFIOWrite1Byte(Adapter,REG_AMPDU_BURST_MODE_8812, 0x7F);*/ + + rtl8821ae_phy_mac_config(hw); + /* because last function modify RCR, so we update + * rcr var here, or TP will unstable for receive_config + * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx + * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252 + rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR); + rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV); + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);*/ + rtl8821ae_phy_bb_config(hw); + + rtl8821ae_phy_rf_config(hw); + + _rtl8821ae_hw_configure(hw); + + rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G); + + /*set wireless mode*/ + + rtlhal->b_mac_func_enable = true; + + rtl_cam_reset_all_entry(hw); + + rtl8821ae_enable_hw_security_config(hw); + + ppsc->rfpwr_state = ERFON; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); + _rtl8821ae_enable_aspm_back_door(hw); + rtlpriv->intf_ops->enable_aspm(hw); + + //rtl8821ae_bt_hw_init(hw); + rtlpriv->rtlhal.being_init_adapter = false; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_NAV_UPPER, (u8 *)&nav_upper); + + //rtl8821ae_dm_check_txpower_tracking(hw); + //rtl8821ae_phy_lc_calibrate(hw); + + /* Release Rx DMA*/ + tmp_u1b = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL); + if (tmp_u1b & BIT(2)) { + /* Release Rx DMA if needed*/ + tmp_u1b &= ~BIT(2); + rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, tmp_u1b); + } + + /* Release Tx/Rx PCIE DMA if*/ + rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0); + + rtl8821ae_dm_init(hw); + rtl8821ae_macid_initialize_mediastatus(hw); + + RT_TRACE(COMP_INIT, DBG_LOUD, ("rtl8821ae_hw_init() <====\n")); + return err; +} + +static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum version_8821ae version = VERSION_UNKNOWN; + u32 value32; + + value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1); + RT_TRACE(COMP_INIT, DBG_LOUD, ("ReadChipVersion8812A 0xF0 = 0x%x \n", value32)); + + + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtlphy->rf_type = RF_2T2R; + else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + rtlphy->rf_type = RF_1T1R; + + RT_TRACE(COMP_INIT, DBG_LOUD, ("RF_Type is %x!!\n", rtlphy->rf_type)); + + + if (value32 & TRP_VAUX_EN) + { + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + if(rtlphy->rf_type == RF_2T2R) + version = VERSION_TEST_CHIP_2T2R_8812; + else + version = VERSION_TEST_CHIP_1T1R_8812; + } + else + version = VERSION_TEST_CHIP_8821; + } else { + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + u32 rtl_id = ((value32 & CHIP_VER_RTL_MASK) >> 12) +1 ; + + if(rtlphy->rf_type == RF_2T2R) + version = (enum version_8821ae)(CHIP_8812 | NORMAL_CHIP | RF_TYPE_2T2R); + else + version = (enum version_8821ae)(CHIP_8812 | NORMAL_CHIP); + + version = (enum version_8821ae)(version| (rtl_id << 12)); + } + else if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + { + u32 rtl_id = value32 & CHIP_VER_RTL_MASK; + + version = (enum version_8821ae)(CHIP_8821 | NORMAL_CHIP | rtl_id); + } + } + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ? + "RF_2T2R" : "RF_1T1R")); + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + { + /*WL_HWROF_EN.*/ + value32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL); + rtlphy->hw_rof_enable= ((value32 & WL_HWROF_EN) ? 1 : 0); + } + + switch(version) + { + case VERSION_TEST_CHIP_1T1R_8812: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_TEST_CHIP_1T1R_8812.\n")); + break; + case VERSION_TEST_CHIP_2T2R_8812: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_TEST_CHIP_2T2R_8812.\n")); + break; + case VERSION_NORMAL_TSMC_CHIP_1T1R_8812: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812.\n")); + break; + case VERSION_NORMAL_TSMC_CHIP_2T2R_8812: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812.\n")); + break; + case VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT.\n")); + break; + case VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT.\n")); + break; + case VERSION_TEST_CHIP_8821: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_TEST_CHIP_8821.\n")); + break; + case VERSION_NORMAL_TSMC_CHIP_8821: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT.\n")); + break; + case VERSION_NORMAL_TSMC_CHIP_8821_B_CUT: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT.\n")); + break; + default: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Chip Version ID: Unknow (0x%X).\n", version)); + break; + } + + return version; +} + +static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 bt_msr = rtl_read_byte(rtlpriv, MSR); + enum led_ctl_mode ledaction = LED_CTL_NO_LINK; + bt_msr &= 0xfc; + + rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0); + RT_TRACE(COMP_BEACON, DBG_LOUD, + ("clear 0x550 when set HW_VAR_MEDIA_STATUS\n")); + + if (type == NL80211_IFTYPE_UNSPECIFIED || + type == NL80211_IFTYPE_STATION) { + _rtl8821ae_stop_tx_beacon(hw); + _rtl8821ae_enable_bcn_sub_func(hw); + } else if (type == NL80211_IFTYPE_ADHOC || + type == NL80211_IFTYPE_AP) { + _rtl8821ae_resume_tx_beacon(hw); + _rtl8821ae_disable_bcn_sub_func(hw); + } else { + RT_TRACE(COMP_ERR, DBG_WARNING,("Set HW_VAR_MEDIA_STATUS: " + "No such media status(%x).\n", type)); + } + + switch (type) { + case NL80211_IFTYPE_UNSPECIFIED: + bt_msr |= MSR_NOLINK; + ledaction = LED_CTL_LINK; + RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to NO LINK!\n")); + break; + case NL80211_IFTYPE_ADHOC: + bt_msr |= MSR_ADHOC; + RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to Ad Hoc!\n")); + break; + case NL80211_IFTYPE_STATION: + bt_msr |= MSR_INFRA; + ledaction = LED_CTL_LINK; + RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to STA!\n")); + break; + case NL80211_IFTYPE_AP: + bt_msr |= MSR_AP; + RT_TRACE(COMP_INIT, DBG_TRACE, ("Set Network type to AP!\n")); + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, ("Network type %d not support!\n", type)); + return 1; + break; + + } + + rtl_write_byte(rtlpriv, (MSR), bt_msr); + rtlpriv->cfg->ops->led_control(hw, ledaction); + if ((bt_msr & 0xfc) == MSR_AP) + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); + else + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); + + return 0; +} + +void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u32 reg_rcr = rtlpci->receive_config; + + if (rtlpriv->psc.rfpwr_state != ERFON) + return; + + if (check_bssid == true) { + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *) (®_rcr)); + _rtl8821ae_set_bcn_ctrl_reg(hw, 0, BIT(4)); + } else if (check_bssid == false) { + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(4), 0); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_RCR, (u8 *) (®_rcr)); + } + +} + +int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_INIT, DBG_LOUD, ("rtl8821ae_set_network_type!\n")); + + if (_rtl8821ae_set_media_status(hw, type)) + return -EOPNOTSUPP; + + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { + if (type != NL80211_IFTYPE_AP) + rtl8821ae_set_check_bssid(hw, true); + } else { + rtl8821ae_set_check_bssid(hw, false); + } + + return 0; +} + +/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */ +void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + rtl8821ae_dm_init_edca_turbo(hw); + switch (aci) { + case AC1_BK: + rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); + break; + case AC0_BE: + /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */ + break; + case AC2_VI: + rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322); + break; + case AC3_VO: + rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); + break; + default: + RT_ASSERT(false, ("invalid aci: %d !\n", aci)); + break; + } +} + +void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); + rtlpci->irq_enabled = true; + /* there are some C2H CMDs have been sent before system interrupt is enabled, e.g., C2H, CPWM. + *So we need to clear all C2H events that FW has notified, otherwise FW won't schedule any commands anymore. + */ + //rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0); + /*enable system interrupt*/ + rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF); +} + +void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED); + rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED); + rtlpci->irq_enabled = false; + synchronize_irq(rtlpci->pdev->irq); +} + +static void _rtl8821ae_poweroff_adapter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 u1b_tmp; + + rtlhal->b_mac_func_enable = false; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* Combo (PCIe + USB) Card and PCIe-MF Card */ + /* 1. Run LPS WL RFOFF flow */ + //RT_TRACE(COMP_INIT, DBG_LOUD, ("=====>CardDisableRTL8812E,RTL8821A_NIC_LPS_ENTER_FLOW\n")); + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8821A_NIC_LPS_ENTER_FLOW); + } + /* 2. 0x1F[7:0] = 0 */ + /* turn off RF */ + //rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); + if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && + rtlhal->bfw_ready ) { + rtl8821ae_firmware_selfreset(hw); + } + + /* Reset MCU. Suggested by Filen. */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2)))); + + /* g. MCUFWDL 0x80[1:0]=0 */ + /* reset MCU ready status */ + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* HW card disable configuration. */ + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8821A_NIC_DISABLE_FLOW); + } else { + /* HW card disable configuration. */ + rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, + PWR_INTF_PCI_MSK, RTL8812_NIC_DISABLE_FLOW); + } + + /* Reset MCU IO Wrapper */ + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0)))); + u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); + rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0)); + + /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */ + /* lock ISO/CLK/Power control register */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); +} + +void rtl8821ae_card_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + enum nl80211_iftype opmode; + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("rtl8821ae_card_disable.\n")); + + mac->link_state = MAC80211_NOLINK; + opmode = NL80211_IFTYPE_UNSPECIFIED; + _rtl8821ae_set_media_status(hw, opmode); + if (rtlpriv->rtlhal.driver_is_goingto_unload || + ppsc->rfoff_reason > RF_CHANGE_BY_PS) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + _rtl8821ae_poweroff_adapter(hw); + + /* after power off we should do iqk again */ + rtlpriv->phy.iqk_initialized = false; +} + +void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0]; + rtl_write_dword(rtlpriv, ISR, *p_inta); + + + *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1]; + rtl_write_dword(rtlpriv, REG_HISRE, *p_intb); + +} + + +void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw) +{ + + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u16 bcn_interval, atim_window; + + bcn_interval = mac->beacon_interval; + atim_window = 2; /*FIX MERGE */ + rtl8821ae_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18); + rtl_write_byte(rtlpriv, 0x606, 0x30); + rtlpci->reg_bcn_ctrl_val |= BIT(3); + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val); + rtl8821ae_enable_interrupt(hw); +} + +void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 bcn_interval = mac->beacon_interval; + + RT_TRACE(COMP_BEACON, DBG_DMESG, + ("beacon_interval:%d\n", bcn_interval)); + rtl8821ae_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl8821ae_enable_interrupt(hw); +} + +void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + RT_TRACE(COMP_INTR, DBG_LOUD, + ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr)); + + if (add_msr) + rtlpci->irq_mask[0] |= add_msr; + if (rm_msr) + rtlpci->irq_mask[0] &= (~rm_msr); + rtl8821ae_disable_interrupt(hw); + rtl8821ae_enable_interrupt(hw); +} + +static u8 _rtl8821ae_get_chnl_group(u8 chnl) +{ + u8 group = 0; + + if (chnl <= 14) { + if (1 <= chnl && chnl <= 2 ) + group = 0; + else if (3 <= chnl && chnl <= 5 ) + group = 1; + else if (6 <= chnl && chnl <= 8 ) + group = 2; + else if (9 <= chnl && chnl <= 11) + group = 3; + else /*if (12 <= chnl && chnl <= 14)*/ + group = 4; + } else { + if (36 <= chnl && chnl <= 42) + group = 0; + else if (44 <= chnl && chnl <= 48) + group = 1; + else if (50 <= chnl && chnl <= 58) + group = 2; + else if (60 <= chnl && chnl <= 64) + group = 3; + else if (100 <= chnl && chnl <= 106) + group = 4; + else if (108 <= chnl && chnl <= 114) + group = 5; + else if (116 <= chnl && chnl <= 122) + group = 6; + else if (124 <= chnl && chnl <= 130) + group = 7; + else if (132 <= chnl && chnl <= 138) + group = 8; + else if (140 <= chnl && chnl <= 144) + group = 9; + else if (149 <= chnl && chnl <= 155) + group = 10; + else if (157 <= chnl && chnl <= 161) + group = 11; + else if (165 <= chnl && chnl <= 171) + group = 12; + else if (173 <= chnl && chnl <= 177) + group = 13; + else + /*RT_TRACE(COMP_EFUSE,DBG_LOUD, + ("5G, Channel %d in Group not found \n",chnl));*/ + RT_ASSERT(!COMP_EFUSE, + ("5G, Channel %d in Group not found \n",chnl)); + } + return group; +} + +static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw, + struct txpower_info_2g *pwrinfo24g, + struct txpower_info_5g *pwrinfo5g, + bool autoload_fail, + u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 rfPath, eeAddr=EEPROM_TX_PWR_INX, group,TxCount=0; + + RT_TRACE(COMP_INIT, DBG_LOUD, ("hal_ReadPowerValueFromPROM8821ae(): PROMContent[0x%x]=0x%x\n", (eeAddr+1), hwinfo[eeAddr+1])); + if (0xFF == hwinfo[eeAddr+1]) /*YJ,add,120316*/ + autoload_fail = true; + + if (autoload_fail) + { + RT_TRACE(COMP_INIT, DBG_LOUD, ("auto load fail : Use Default value!\n")); + for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) { + /*2.4G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { + pwrinfo24g->index_cck_base[rfPath][group] = 0x2D; + pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D; + } + for (TxCount = 0;TxCount < MAX_TX_COUNT;TxCount++) { + if (TxCount == 0) { + pwrinfo24g->bw20_diff[rfPath][0] = 0x02; + pwrinfo24g->ofdm_diff[rfPath][0] = 0x04; + } else { + pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE; + pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE; + pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE; + pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE; + } + } + /*5G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) + pwrinfo5g->index_bw40_base[rfPath][group] = 0x2A; + + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + if (TxCount == 0) { + pwrinfo5g->ofdm_diff[rfPath][0] = 0x04; + pwrinfo5g->bw20_diff[rfPath][0] = 0x00; + pwrinfo5g->bw80_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw160_diff[rfPath][0] = 0xFE; + } else { + pwrinfo5g->ofdm_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw20_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw40_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw80_diff[rfPath][0] = 0xFE; + pwrinfo5g->bw160_diff[rfPath][0] = 0xFE; + } + } + } + return; + } + + rtl_priv(hw)->efuse.b_txpwr_fromeprom = true; + + for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) { + /*2.4G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { + pwrinfo24g->index_cck_base[rfPath][group] = hwinfo[eeAddr++]; + if (pwrinfo24g->index_cck_base[rfPath][group] == 0xFF) + pwrinfo24g->index_cck_base[rfPath][group] = 0x2D; + } + for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) { + pwrinfo24g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++]; + if (pwrinfo24g->index_bw40_base[rfPath][group] == 0xFF) + pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D; + } + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount ++) { + if (TxCount == 0) { + pwrinfo24g->bw40_diff[rfPath][TxCount] = 0; + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo24g->bw20_diff[rfPath][TxCount] = 0x02; + } else { + pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; + if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3)) /*bit sign number to 8 bit sign number*/ + pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0; + } + + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0x04; + } else { + pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if(pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3)) /*bit sign number to 8 bit sign number*/ + pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0; + } + pwrinfo24g->cck_diff[rfPath][TxCount] = 0; + eeAddr++; + } else { + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE; + } else { + pwrinfo24g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr]&0xf0) >> 4; + if (pwrinfo24g->bw40_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->bw40_diff[rfPath][TxCount] |= 0xF0; + } + + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE; + } else { + pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0; + } + + eeAddr++; + + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE; + } else { + pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; + if(pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0; + } + + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE; + } else { + pwrinfo24g->cck_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if(pwrinfo24g->cck_diff[rfPath][TxCount] & BIT(3)) + pwrinfo24g->cck_diff[rfPath][TxCount] |= 0xF0; + } + eeAddr++; + } + } + + /*5G default value*/ + for (group = 0 ; group < MAX_CHNL_GROUP_5G; group ++) { + pwrinfo5g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++]; + if (pwrinfo5g->index_bw40_base[rfPath][group] == 0xFF) + pwrinfo5g->index_bw40_base[rfPath][group] = 0xFE; + } + + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + if (TxCount == 0) { + pwrinfo5g->bw40_diff[rfPath][TxCount] = 0; + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo5g->bw20_diff[rfPath][TxCount] = 0x0; + } else { + pwrinfo5g->bw20_diff[rfPath][0] = (hwinfo[eeAddr] & 0xf0) >> 4; + if(pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0; + } + + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo5g->ofdm_diff[rfPath][TxCount] = 0x4; + } else { + pwrinfo5g->ofdm_diff[rfPath][0] = (hwinfo[eeAddr] & 0x0f); + if(pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0; + } + eeAddr++; + } else { + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo5g->bw40_diff[rfPath][TxCount] = 0xFE; + } else { + pwrinfo5g->bw40_diff[rfPath][TxCount]= (hwinfo[eeAddr] & 0xf0) >> 4; + if(pwrinfo5g->bw40_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->bw40_diff[rfPath][TxCount] |= 0xF0; + } + + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo5g->bw20_diff[rfPath][TxCount] = 0xFE; + } else { + pwrinfo5g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); + if(pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0; + } + eeAddr++; + } + } + + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo5g->ofdm_diff[rfPath][1] = 0xFE; + pwrinfo5g->ofdm_diff[rfPath][2] = 0xFE; + } else { + pwrinfo5g->ofdm_diff[rfPath][1] = (hwinfo[eeAddr] & 0xf0) >> 4; + pwrinfo5g->ofdm_diff[rfPath][2] = (hwinfo[eeAddr] & 0x0f); + } + eeAddr++; + if (hwinfo[eeAddr] == 0xFF) + pwrinfo5g->ofdm_diff[rfPath][3] = 0xFE; + else + pwrinfo5g->ofdm_diff[rfPath][3] = (hwinfo[eeAddr] & 0x0f); + + eeAddr++; + + for (TxCount = 1; TxCount < MAX_TX_COUNT; TxCount++) { + if (pwrinfo5g->ofdm_diff[rfPath][TxCount] == 0xFF) + pwrinfo5g->ofdm_diff[rfPath][TxCount] = 0xFE; + else if(pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3)) + pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0; + } + for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo5g->bw80_diff[rfPath][TxCount] = 0xFE; + } else { + pwrinfo5g->bw80_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; + if(pwrinfo5g->bw80_diff[rfPath][TxCount] & BIT(3)) //4bit sign number to 8 bit sign number + pwrinfo5g->bw80_diff[rfPath][TxCount] |= 0xF0; + } + + if (hwinfo[eeAddr] == 0xFF) { + pwrinfo5g->bw160_diff[rfPath][TxCount] = 0xFE; + } else { + pwrinfo5g->bw160_diff[rfPath][TxCount]= (hwinfo[eeAddr] & 0x0f); + if(pwrinfo5g->bw160_diff[rfPath][TxCount] & BIT(3)) //4bit sign number to 8 bit sign number + pwrinfo5g->bw160_diff[rfPath][TxCount] |= 0xF0; + } + eeAddr++; + } + } +} + +static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct txpower_info_2g pwrinfo24g; + struct txpower_info_5g pwrinfo5g; + u8 channel5g[CHANNEL_MAX_NUMBER_5G] = + {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112, + 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151, + 153,155,157,159,161,163,165,167,168,169,171,173,175,177}; + u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171}; + u8 rf_path, index; + u8 i; + + _rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, autoload_fail, hwinfo); + + for (rf_path = 0; rf_path < 2; rf_path++) { + for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) { + index = _rtl8821ae_get_chnl_group(i + 1); + + if (i == CHANNEL_MAX_NUMBER_2G - 1) { + rtlefuse->txpwrlevel_cck[rf_path][i] = + pwrinfo24g.index_cck_base[rf_path][5]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + pwrinfo24g.index_bw40_base[rf_path][index]; + } else { + rtlefuse->txpwrlevel_cck[rf_path][i] = + pwrinfo24g.index_cck_base[rf_path][index]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + pwrinfo24g.index_bw40_base[rf_path][index]; + } + } + + for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) { + index = _rtl8821ae_get_chnl_group(channel5g[i]); + rtlefuse->txpwr_5g_bw40base[rf_path][i] = pwrinfo5g.index_bw40_base[rf_path][index]; + } + for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) { + u8 upper, lower; + index = _rtl8821ae_get_chnl_group(channel5g_80m[i]); + upper = pwrinfo5g.index_bw40_base[rf_path][index]; + lower = pwrinfo5g.index_bw40_base[rf_path][index + 1]; + + rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2; + } + for (i = 0; i < MAX_TX_COUNT; i++) { + rtlefuse->txpwr_cckdiff[rf_path][i] = pwrinfo24g.cck_diff[rf_path][i]; + rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pwrinfo24g.ofdm_diff[rf_path][i]; + rtlefuse->txpwr_ht20diff[rf_path][i] = pwrinfo24g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_ht40diff[rf_path][i] = pwrinfo24g.bw40_diff[rf_path][i]; + + rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = pwrinfo5g.ofdm_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw20diff[rf_path][i] = pwrinfo5g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw40diff[rf_path][i] = pwrinfo5g.bw40_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw80diff[rf_path][i] = pwrinfo5g.bw80_diff[rf_path][i]; + } + } + + if (!autoload_fail){ + rtlefuse->eeprom_regulatory = + hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/ + if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF) + rtlefuse->eeprom_regulatory = 0; + } else { + rtlefuse->eeprom_regulatory = 0; + } + + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory )); +} + +static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct txpower_info_2g pwrinfo24g; + struct txpower_info_5g pwrinfo5g; + u8 channel5g[CHANNEL_MAX_NUMBER_5G] = + {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112, + 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151, + 153,155,157,159,161,163,165,167,168,169,171,173,175,177}; + u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171}; + u8 rf_path, index; + u8 i; + + _rtl8821ae_read_power_value_fromprom(hw, &pwrinfo24g, &pwrinfo5g, autoload_fail, hwinfo); + + for (rf_path = 0; rf_path < 2; rf_path++) { + for (i = 0; i < CHANNEL_MAX_NUMBER_2G; i++) { + index = _rtl8821ae_get_chnl_group(i + 1); + + if (i == CHANNEL_MAX_NUMBER_2G - 1) { + rtlefuse->txpwrlevel_cck[rf_path][i] = pwrinfo24g.index_cck_base[rf_path][5]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = pwrinfo24g.index_bw40_base[rf_path][index]; + } else { + rtlefuse->txpwrlevel_cck[rf_path][i] = pwrinfo24g.index_cck_base[rf_path][index]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = pwrinfo24g.index_bw40_base[rf_path][index]; + } + } + + for (i = 0; i < CHANNEL_MAX_NUMBER_5G; i++) { + index = _rtl8821ae_get_chnl_group(channel5g[i]); + rtlefuse->txpwr_5g_bw40base[rf_path][i] = pwrinfo5g.index_bw40_base[rf_path][index]; + } + for (i = 0; i < CHANNEL_MAX_NUMBER_5G_80M; i++) { + u8 upper, lower; + index = _rtl8821ae_get_chnl_group(channel5g_80m[i]); + upper = pwrinfo5g.index_bw40_base[rf_path][index]; + lower = pwrinfo5g.index_bw40_base[rf_path][index + 1]; + + rtlefuse->txpwr_5g_bw80base[rf_path][i] = (upper + lower) / 2; + } + for (i = 0; i < MAX_TX_COUNT; i++) { + rtlefuse->txpwr_cckdiff[rf_path][i] = pwrinfo24g.cck_diff[rf_path][i]; + rtlefuse->txpwr_legacyhtdiff[rf_path][i] = pwrinfo24g.ofdm_diff[rf_path][i]; + rtlefuse->txpwr_ht20diff[rf_path][i] = pwrinfo24g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_ht40diff[rf_path][i] = pwrinfo24g.bw40_diff[rf_path][i]; + + rtlefuse->txpwr_5g_ofdmdiff[rf_path][i] = pwrinfo5g.ofdm_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw20diff[rf_path][i] = pwrinfo5g.bw20_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw40diff[rf_path][i] = pwrinfo5g.bw40_diff[rf_path][i]; + rtlefuse->txpwr_5g_bw80diff[rf_path][i] = pwrinfo5g.bw80_diff[rf_path][i]; + } + } + + if (!autoload_fail){ + rtlefuse->eeprom_regulatory = hwinfo[EEPROM_RF_BOARD_OPTION] & 0x07;/*bit0~2*/ + if (hwinfo[EEPROM_RF_BOARD_OPTION] == 0xFF) + rtlefuse->eeprom_regulatory = 0; + } else { + rtlefuse->eeprom_regulatory = 0; + } + + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory )); +} + +static void _rtl8812ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u16 i, usvalue; + u8 hwinfo[HWSET_MAX_SIZE]; + u16 eeprom_id; + + if (b_pseudo_test) { + /* need add */ + } + + if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + rtl_efuse_shadow_map_update(hw); + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + HWSET_MAX_SIZE); + } else if (rtlefuse->epromtype == EEPROM_93C46) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("RTL819X Not boot from eeprom, check it !!")); + } + + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP \n"), + hwinfo, HWSET_MAX_SIZE); + + eeprom_id = *((u16 *) & hwinfo[0]); + if (eeprom_id != RTL_EEPROM_ID) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("EEPROM ID(%#x) is invalid!!\n", eeprom_id)); + rtlefuse->autoload_failflag = true; + } else { + RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n")); + rtlefuse->autoload_failflag = false; + } + + if (rtlefuse->autoload_failflag == true) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("RTL8812AE autoload_failflag, check it !!")); + return; + } + + rtlefuse->eeprom_version = *(u8 *) & hwinfo[EEPROM_VERSION]; + if (rtlefuse->eeprom_version == 0xff) + rtlefuse->eeprom_version = 0; + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM version: 0x%2x\n", rtlefuse->eeprom_version)); + + rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID]; + rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID]; + rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID]; + rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID]; + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROMId = 0x%4x\n", eeprom_id)); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid)); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did)); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid)); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid)); + + /*customer ID*/ + rtlefuse->eeprom_oemid = *(u8 *) & hwinfo[EEPROM_CUSTOMER_ID]; + if (rtlefuse->eeprom_oemid == 0xFF) + rtlefuse->eeprom_oemid = 0; + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid)); + + for (i = 0; i < 6; i += 2) { + usvalue = *(u16 *) & hwinfo[EEPROM_MAC_ADDR + i]; + *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; + } + + RT_TRACE(COMP_INIT, DBG_DMESG, + ("dev_addr: %pM\n", rtlefuse->dev_addr)); + + _rtl8812ae_read_txpower_info_from_hwpg(hw, + rtlefuse->autoload_failflag, hwinfo); + + /*board type*/ + rtlefuse->board_type = (((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) & 0xE0 ) >> 5); + if ((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) == 0xff ) + rtlefuse->board_type = 0; + rtlhal->boad_type = rtlefuse->board_type; + + rtl8812ae_read_bt_coexist_info_from_hwpg(hw, + rtlefuse->autoload_failflag, hwinfo); + + rtlefuse->eeprom_channelplan = *(u8 *) & hwinfo[EEPROM_CHANNELPLAN]; + if (rtlefuse->eeprom_channelplan == 0xff) + rtlefuse->eeprom_channelplan = 0x7F; + + /* set channel paln to world wide 13 */ + //rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan; + + /*parse xtal*/ + rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE]; + if ( rtlefuse->crystalcap == 0xFF ) + rtlefuse->crystalcap = 0x20; + + rtlefuse->eeprom_thermalmeter = *(u8 *) & hwinfo[EEPROM_THERMAL_METER]; + if ((rtlefuse->eeprom_thermalmeter == 0xff) ||rtlefuse->autoload_failflag ) + { + rtlefuse->b_apk_thermalmeterignore = true; + rtlefuse->eeprom_thermalmeter = 0xff; + } + + rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; + RT_TRACE(COMP_INIT, DBG_LOUD, + ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter)); + + if (rtlefuse->autoload_failflag == false) { + rtlefuse->antenna_div_cfg = *(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] & 0x18 >> 3; + if (*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] == 0xff) + rtlefuse->antenna_div_cfg = 0x00; + /*if (BT_1ant()) + rtlefuse->antenna_div_cfg = 0;*/ + rtlefuse->antenna_div_type = *(u8 *) & hwinfo[EEPROM_RF_ANTENNA_OPT_88E]; + if (rtlefuse->antenna_div_type == 0xFF) + { + rtlefuse->antenna_div_type = FIXED_HW_ANTDIV; + } + } else { + rtlefuse->antenna_div_cfg = 0; + rtlefuse->antenna_div_type = 0; + } + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n", + rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type)); + + /*Hal_ReadPAType_8821A()*/ + /*Hal_EfuseParseRateIndicationOption8821A()*/ + /*Hal_ReadEfusePCIeCap8821AE()*/ + + pcipriv->ledctl.bled_opendrain = true; + + if (rtlhal->oem_id == RT_CID_DEFAULT) { + switch (rtlefuse->eeprom_oemid) { + case RT_CID_DEFAULT: + break; + case EEPROM_CID_TOSHIBA: + rtlhal->oem_id = RT_CID_TOSHIBA; + break; + case EEPROM_CID_CCX: + rtlhal->oem_id = RT_CID_CCX; + break; + case EEPROM_CID_QMI: + rtlhal->oem_id = RT_CID_819x_QMI; + break; + case EEPROM_CID_WHQL: + break; + default: + break; + + } + } +} + +static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_test ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u16 i, usvalue; + u8 hwinfo[HWSET_MAX_SIZE]; + u16 eeprom_id; + + if (b_pseudo_test) { + /* need add */ + } + + if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + rtl_efuse_shadow_map_update(hw); + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + HWSET_MAX_SIZE); + } else if (rtlefuse->epromtype == EEPROM_93C46) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("RTL819X Not boot from eeprom, check it !!")); + } + + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP \n"), + hwinfo, HWSET_MAX_SIZE); + + eeprom_id = *((u16 *) & hwinfo[0]); + if (eeprom_id != RTL_EEPROM_ID) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("EEPROM ID(%#x) is invalid!!\n", eeprom_id)); + rtlefuse->autoload_failflag = true; + } else { + RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n")); + rtlefuse->autoload_failflag = false; + } + + if (rtlefuse->autoload_failflag == true) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("RTL8812AE autoload_failflag, check it !!")); + return; + } + + rtlefuse->eeprom_version = *(u8 *) & hwinfo[EEPROM_VERSION]; + if (rtlefuse->eeprom_version == 0xff) + rtlefuse->eeprom_version = 0; + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM version: 0x%2x\n", rtlefuse->eeprom_version)); + + rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID]; + rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID]; + rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID]; + rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID]; + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROMId = 0x%4x\n", eeprom_id)); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid)); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did)); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid)); + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid)); + + /*customer ID*/ + rtlefuse->eeprom_oemid = *(u8 *) & hwinfo[EEPROM_CUSTOMER_ID]; + if (rtlefuse->eeprom_oemid == 0xFF) + rtlefuse->eeprom_oemid = 0; + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid)); + + for (i = 0; i < 6; i += 2) { + usvalue = *(u16 *) & hwinfo[EEPROM_MAC_ADDR + i]; + *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; + } + + RT_TRACE(COMP_INIT, DBG_DMESG, + ("dev_addr: %pM\n", rtlefuse->dev_addr)); + + _rtl8821ae_read_txpower_info_from_hwpg(hw, + rtlefuse->autoload_failflag, hwinfo); + + /*board type*/ + rtlefuse->board_type = (((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) & 0xE0 ) >> 5); + if ((*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]) == 0xff ) + rtlefuse->board_type = 0; + rtlhal->boad_type = rtlefuse->board_type; + + rtl8821ae_read_bt_coexist_info_from_hwpg(hw, + rtlefuse->autoload_failflag, hwinfo); + + rtlefuse->eeprom_channelplan = *(u8 *) & hwinfo[EEPROM_CHANNELPLAN]; + if (rtlefuse->eeprom_channelplan == 0xff) + rtlefuse->eeprom_channelplan = 0x7F; + + /* set channel paln to world wide 13 */ + //rtlefuse->channel_plan = (u8) rtlefuse->eeprom_channelplan; + + /*parse xtal*/ + rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE]; + if ( rtlefuse->crystalcap == 0xFF ) + rtlefuse->crystalcap = 0x20; + + rtlefuse->eeprom_thermalmeter = *(u8 *) & hwinfo[EEPROM_THERMAL_METER]; + if ((rtlefuse->eeprom_thermalmeter == 0xff) ||rtlefuse->autoload_failflag ) + { + rtlefuse->b_apk_thermalmeterignore = true; + rtlefuse->eeprom_thermalmeter = 0x18; + } + + rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; + RT_TRACE(COMP_INIT, DBG_LOUD, + ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter)); + + if (rtlefuse->autoload_failflag == false) { + rtlefuse->antenna_div_cfg = (*(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION] & BIT(3))?true:false; + /*if (BT_1ant()) + rtlefuse->antenna_div_cfg = 0;*/ + + rtlefuse->antenna_div_type = CG_TRX_HW_ANTDIV; + } else { + rtlefuse->antenna_div_cfg = 0; + rtlefuse->antenna_div_type = 0; + } + + RT_TRACE(COMP_INIT, DBG_LOUD, + ("SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n", + rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type)); + + pcipriv->ledctl.bled_opendrain = true; + + if (rtlhal->oem_id == RT_CID_DEFAULT) { + switch (rtlefuse->eeprom_oemid) { + case RT_CID_DEFAULT: + break; + case EEPROM_CID_TOSHIBA: + rtlhal->oem_id = RT_CID_TOSHIBA; + break; + case EEPROM_CID_CCX: + rtlhal->oem_id = RT_CID_CCX; + break; + case EEPROM_CID_QMI: + rtlhal->oem_id = RT_CID_819x_QMI; + break; + case EEPROM_CID_WHQL: + break; + default: + break; + } + } +} + + +/*static void _rtl8821ae_hal_customized_behavior(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + pcipriv->ledctl.bled_opendrain = true; + switch (rtlhal->oem_id) { + case RT_CID_819x_HP: + pcipriv->ledctl.bled_opendrain = true; + break; + case RT_CID_819x_Lenovo: + case RT_CID_DEFAULT: + case RT_CID_TOSHIBA: + case RT_CID_CCX: + case RT_CID_819x_Acer: + case RT_CID_WHQL: + default: + break; + } + RT_TRACE(COMP_INIT, DBG_DMESG, + ("RT Customized ID: 0x%02X\n", rtlhal->oem_id)); +}*/ + +void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_u1b; + + rtlhal->version = _rtl8821ae_read_chip_version(hw); + + if (get_rf_type(rtlphy) == RF_1T1R) + rtlpriv->dm.brfpath_rxenable[0] = true; + else + rtlpriv->dm.brfpath_rxenable[0] = + rtlpriv->dm.brfpath_rxenable[1] = true; + RT_TRACE(COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n", + rtlhal->version)); + + tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); + if (tmp_u1b & BIT(4)) { + RT_TRACE(COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n")); + rtlefuse->epromtype = EEPROM_93C46; + } else { + RT_TRACE(COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n")); + rtlefuse->epromtype = EEPROM_BOOT_EFUSE; + } + + if (tmp_u1b & BIT(5)) { + RT_TRACE(COMP_INIT, DBG_LOUD, ("Autoload OK\n")); + rtlefuse->autoload_failflag = false; + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + _rtl8812ae_read_adapter_info(hw, false); + else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + _rtl8821ae_read_adapter_info(hw, false); + } else { + RT_TRACE(COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n")); + } + /*hal_ReadRFType_8812A()*/ + //_rtl8821ae_hal_customized_behavior(hw); +} + +static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 ratr_value; + u8 ratr_index = 0; + u8 b_nmode = mac->ht_enable; + u8 mimo_ps = IEEE80211_SMPS_OFF; + u16 shortgi_rate; + u32 tmp_ratr_value; + u8 b_curtxbw_40mhz = mac->bw_40; + u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; + enum wireless_mode wirelessmode = mac->mode; + + if (rtlhal->current_bandtype == BAND_ON_5G) + ratr_value = sta->supp_rates[1] << 4; + else + ratr_value = sta->supp_rates[0]; + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_value = 0xfff; + ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); + switch (wirelessmode) { + case WIRELESS_MODE_B: + if (ratr_value & 0x0000000c) + ratr_value &= 0x0000000d; + else + ratr_value &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_value &= 0x00000FF5; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + b_nmode = 1; + if (mimo_ps == IEEE80211_SMPS_STATIC) { + ratr_value &= 0x0007F005; + } else { + u32 ratr_mask; + + if (get_rf_type(rtlphy) == RF_1T2R || + get_rf_type(rtlphy) == RF_1T1R) + ratr_mask = 0x000ff005; + else + ratr_mask = 0x0f0ff005; + + ratr_value &= ratr_mask; + } + break; + default: + if (rtlphy->rf_type == RF_1T2R) + ratr_value &= 0x000ff0ff; + else + ratr_value &= 0x0f0ff0ff; + + break; + } + + if ( (rtlpcipriv->btcoexist.bt_coexistence) && + (rtlpcipriv->btcoexist.bt_coexist_type == BT_CSR_BC4) && + (rtlpcipriv->btcoexist.bt_cur_state) && + (rtlpcipriv->btcoexist.bt_ant_isolation) && + ((rtlpcipriv->btcoexist.bt_service == BT_SCO)|| + (rtlpcipriv->btcoexist.bt_service == BT_BUSY)) ) + ratr_value &= 0x0fffcfc0; + else + ratr_value &= 0x0FFFFFFF; + + if (b_nmode && ((b_curtxbw_40mhz && + b_curshortgi_40mhz) || (!b_curtxbw_40mhz && + b_curshortgi_20mhz))) { + + ratr_value |= 0x10000000; + tmp_ratr_value = (ratr_value >> 12); + + for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { + if ((1 << shortgi_rate) & tmp_ratr_value) + break; + } + + shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | + (shortgi_rate << 4) | (shortgi_rate); + } + + rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); + + RT_TRACE(COMP_RATR, DBG_DMESG, + ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0))); +} + + +static u8 _rtl8821ae_mrate_idx_to_arfr_id( + struct ieee80211_hw *hw, u8 rate_index, + enum wireless_mode wirelessmode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 ret = 0; + switch(rate_index){ + case RATR_INX_WIRELESS_NGB: + if(rtlphy->rf_type == RF_1T1R) + ret = 1; + else + ret = 0; + ;break; + case RATR_INX_WIRELESS_N: + case RATR_INX_WIRELESS_NG: + if(rtlphy->rf_type == RF_1T1R) + ret = 5; + else + ret = 4; + ;break; + case RATR_INX_WIRELESS_NB: + if(rtlphy->rf_type == RF_1T1R) + ret = 3; + else + ret = 2; + ;break; + case RATR_INX_WIRELESS_GB: + ret = 6; + break; + case RATR_INX_WIRELESS_G: + ret = 7; + break; + case RATR_INX_WIRELESS_B: + ret = 8; + break; + case RATR_INX_WIRELESS_MC: + if ((wirelessmode == WIRELESS_MODE_B) + || (wirelessmode == WIRELESS_MODE_G) + || (wirelessmode == WIRELESS_MODE_N_24G) + || (wirelessmode == WIRELESS_MODE_AC_24G)) + ret = 6; + else + ret = 7; + case RATR_INX_WIRELESS_AC_5N: + if(rtlphy->rf_type == RF_1T1R) + ret = 10; + else + ret = 9; + break; + case RATR_INX_WIRELESS_AC_24N: + if(rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) + { + if(rtlphy->rf_type == RF_1T1R) + ret = 10; + else + ret = 9; + } else { + if(rtlphy->rf_type == RF_1T1R) + ret = 11; + else + ret = 12; + } + break; + default: + ret = 0;break; + } + return ret; +} + +static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_sta_info * sta_entry = NULL; + u32 ratr_bitmap; + u8 ratr_index; + u8 b_curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + ? 1 : 0; + u8 b_curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ? + 1 : 0; + u8 b_curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ? + 1 : 0; + enum wireless_mode wirelessmode = 0; + bool b_shortgi = false; + u8 rate_mask[7]; + u8 macid = 0; + u8 mimo_ps = IEEE80211_SMPS_OFF; + + sta_entry = (struct rtl_sta_info *) sta->drv_priv; + wirelessmode = sta_entry->wireless_mode; + if (mac->opmode == NL80211_IFTYPE_STATION || + mac->opmode == NL80211_IFTYPE_MESH_POINT) + b_curtxbw_40mhz = mac->bw_40; + else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) + macid = sta->aid + 1; + + ratr_bitmap = sta->supp_rates[0]; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) + ratr_bitmap = 0xfff; + + ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 | + sta->ht_cap.mcs.rx_mask[0] << 12); +/*mac id owner*/ + switch (wirelessmode) { + case WIRELESS_MODE_B: + ratr_index = RATR_INX_WIRELESS_B; + if (ratr_bitmap & 0x0000000c) + ratr_bitmap &= 0x0000000d; + else + ratr_bitmap &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_index = RATR_INX_WIRELESS_GB; + + if (rssi_level == 1) + ratr_bitmap &= 0x00000f00; + else if (rssi_level == 2) + ratr_bitmap &= 0x00000ff0; + else + ratr_bitmap &= 0x00000ff5; + break; + case WIRELESS_MODE_A: + ratr_index = RATR_INX_WIRELESS_G; + ratr_bitmap &= 0x00000ff0; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + if (wirelessmode == WIRELESS_MODE_N_24G) + ratr_index = RATR_INX_WIRELESS_NGB; + else + ratr_index = RATR_INX_WIRELESS_NG; + + if (mimo_ps == IEEE80211_SMPS_STATIC || mimo_ps == IEEE80211_SMPS_DYNAMIC) { + if (rssi_level == 1) + ratr_bitmap &= 0x00070000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0007f000; + else + ratr_bitmap &= 0x0007f005; + } else { + if ( rtlphy->rf_type == RF_1T1R) { + if (b_curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } + } else { + if (b_curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0fff0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0ffff000; + else + ratr_bitmap &= 0x0ffff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x0fff0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0ffff000; + else + ratr_bitmap &= 0x0ffff005; + } + } + } + if ((b_curtxbw_40mhz && b_curshortgi_40mhz) || + (!b_curtxbw_40mhz && b_curshortgi_20mhz)) { + + if (macid == 0) + b_shortgi = true; + else if (macid == 1) + b_shortgi = false; + } + break; + + case WIRELESS_MODE_AC_24G: + ratr_index = RATR_INX_WIRELESS_AC_24N; + if(rssi_level == 1) + ratr_bitmap &= 0xfc3f0000; + else if(rssi_level == 2) + ratr_bitmap &= 0xfffff000; + else + ratr_bitmap &= 0xffffffff; + break; + + case WIRELESS_MODE_AC_5G: + ratr_index = RATR_INX_WIRELESS_AC_5N; + + if (rtlphy->rf_type == RF_1T1R) + { + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + if(rssi_level == 1) /*add by Gary for ac-series*/ + ratr_bitmap &= 0x003f8000; + else if (rssi_level == 2) + ratr_bitmap &= 0x003ff000; + else + ratr_bitmap &= 0x003ff010; + } + else + ratr_bitmap &= 0x000ff010; + } + else + { + if(rssi_level == 1) /* add by Gary for ac-series*/ + ratr_bitmap &= 0xfe3f8000; /*VHT 2SS MCS3~9*/ + else if (rssi_level == 2) + ratr_bitmap &= 0xfffff000; /*VHT 2SS MCS0~9*/ + else + ratr_bitmap &= 0xfffff010; /*All*/ + } + break; + + default: + ratr_index = RATR_INX_WIRELESS_NGB; + + if (rtlphy->rf_type == RF_1T2R) + ratr_bitmap &= 0x000ff0ff; + else + ratr_bitmap &= 0x0f0ff0ff; + break; + + } + + sta_entry->ratr_index = ratr_index; + + RT_TRACE(COMP_RATR, DBG_DMESG, + ("ratr_bitmap :%x\n", ratr_bitmap)); + *(u32 *) & rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | + (ratr_index << 28)); + rate_mask[0] = macid; + rate_mask[1] = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode) | (b_shortgi ? 0x80 : 0x00); + rate_mask[2] = b_curtxbw_40mhz; + /* if (prox_priv->proxim_modeinfo->power_output > 0) + rate_mask[2] |= BIT(6); */ + + rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff); + rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >>8); + rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16); + rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24); + + RT_TRACE(COMP_RATR, DBG_DMESG, ("Rate_index:%x, " + "ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n", + ratr_index, ratr_bitmap, + rate_mask[0], rate_mask[1], + rate_mask[2], rate_mask[3], + rate_mask[4], rate_mask[5], + rate_mask[6])); + rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RA_MASK, 7, rate_mask); + _rtl8821ae_set_bcn_ctrl_reg(hw, BIT(3), 0); +} + +void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + if (rtlpriv->dm.b_useramask) + rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level); + else + /*RT_TRACE(COMP_RATR,DBG_LOUD,("rtl8821ae_update_hal_rate_tbl(): Error! 8821ae FW RA Only"));*/ + rtl8821ae_update_hal_rate_table(hw, sta); +} + +void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 sifs_timer; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, + (u8 *) & mac->slot_time); + if (!mac->ht_enable) + sifs_timer = 0x0a0a; + else + sifs_timer = 0x0e0e; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *) & sifs_timer); +} + +bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; + u8 u1tmp = 0; + bool b_actuallyset = false; + + if (rtlpriv->rtlhal.being_init_adapter) + return false; + + if (ppsc->b_swrf_processing) + return false; + + spin_lock(&rtlpriv->locks.rf_ps_lock); + if (ppsc->rfchange_inprogress) { + spin_unlock(&rtlpriv->locks.rf_ps_lock); + return false; + } else { + ppsc->rfchange_inprogress = true; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } + + cur_rfstate = ppsc->rfpwr_state; + + rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2, + rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1))); + + u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2); + + if (rtlphy->polarity_ctl) { + e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON; + } else { + e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF; + } + + if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) { + RT_TRACE(COMP_RF, DBG_DMESG, + ("GPIOChangeRF - HW Radio ON, RF ON\n")); + + e_rfpowerstate_toset = ERFON; + ppsc->b_hwradiooff = false; + b_actuallyset = true; + } else if ((ppsc->b_hwradiooff == false) + && (e_rfpowerstate_toset == ERFOFF)) { + RT_TRACE(COMP_RF, DBG_DMESG, + ("GPIOChangeRF - HW Radio OFF, RF OFF\n")); + + e_rfpowerstate_toset = ERFOFF; + ppsc->b_hwradiooff = true; + b_actuallyset = true; + } + + if (b_actuallyset) { + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } else { + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); + } + + *valid = 1; + return !ppsc->b_hwradiooff; + +} + +void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 *macaddr = p_macaddr; + u32 entry_id = 0; + bool is_pairwise = false; + + static u8 cam_const_addr[4][6] = { + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} + }; + static u8 cam_const_broad[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + + if (clear_all) { + u8 idx = 0; + u8 cam_offset = 0; + u8 clear_number = 5; + + RT_TRACE(COMP_SEC, DBG_DMESG, ("clear_all\n")); + + for (idx = 0; idx < clear_number; idx++) { + rtl_cam_mark_invalid(hw, cam_offset + idx); + rtl_cam_empty_entry(hw, cam_offset + idx); + + if (idx < 5) { + memset(rtlpriv->sec.key_buf[idx], 0, + MAX_KEY_LEN); + rtlpriv->sec.key_len[idx] = 0; + } + } + + } else { + switch (enc_algo) { + case WEP40_ENCRYPTION: + enc_algo = CAM_WEP40; + break; + case WEP104_ENCRYPTION: + enc_algo = CAM_WEP104; + break; + case TKIP_ENCRYPTION: + enc_algo = CAM_TKIP; + break; + case AESCCMP_ENCRYPTION: + enc_algo = CAM_AES; + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, ("switch case " + "not process \n")); + enc_algo = CAM_TKIP; + break; + } + + if (is_wepkey || rtlpriv->sec.use_defaultkey) { + macaddr = cam_const_addr[key_index]; + entry_id = key_index; + } else { + if (is_group) { + macaddr = cam_const_broad; + entry_id = key_index; + } else { + if (mac->opmode == NL80211_IFTYPE_AP) { + entry_id = rtl_cam_get_free_entry(hw, p_macaddr); + if (entry_id >= TOTAL_CAM_ENTRY) { + RT_TRACE(COMP_SEC, DBG_EMERG, + ("Can not find free hw security cam entry\n")); + return; + } + } else { + entry_id = CAM_PAIRWISE_KEY_POSITION; + } + + key_index = PAIRWISE_KEYIDX; + is_pairwise = true; + } + } + + if (rtlpriv->sec.key_len[key_index] == 0) { + RT_TRACE(COMP_SEC, DBG_DMESG, + ("delete one entry, entry_id is %d\n",entry_id)); + if (mac->opmode == NL80211_IFTYPE_AP) + rtl_cam_del_entry(hw, p_macaddr); + rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); + } else { + RT_TRACE(COMP_SEC, DBG_DMESG, ("add one entry\n")); + if (is_pairwise) { + RT_TRACE(COMP_SEC, DBG_DMESG, ("set Pairwiase key\n")); + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[key_index]); + } else { + RT_TRACE(COMP_SEC, DBG_DMESG, ("set group key\n")); + + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_cam_add_one_entry(hw, + rtlefuse->dev_addr, + PAIRWISE_KEYIDX, + CAM_PAIRWISE_KEY_POSITION, + enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf + [entry_id]); + } + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); + } + + } + } +} + + +void rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool auto_load_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value; + + if (!auto_load_fail) { + value = *(u8 *) & hwinfo[EEPROM_RF_BOARD_OPTION]; + if (((value & 0xe0) >> 5) == 0x1) + rtlpriv->btcoexist.btc_info.btcoexist = 1; + else + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A; + + value = hwinfo[EEPROM_RF_BT_SETTING]; + rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); + } else { + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8812A; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; + } + /*move BT_InitHalVars() to init_sw_vars*/ +} + +void rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool auto_load_fail, u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value; + u32 tmpu_32; + + if (!auto_load_fail) { + tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL); + if(tmpu_32 & BIT(18)) + rtlpriv->btcoexist.btc_info.btcoexist = 1; + else + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A; + + value = hwinfo[EEPROM_RF_BT_SETTING]; + rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); + } else { + rtlpriv->btcoexist.btc_info.btcoexist = 0; + rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8821A; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; + } + /*move BT_InitHalVars() to init_sw_vars*/ +} + +void rtl8821ae_bt_reg_init(struct ieee80211_hw* hw) +{ + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + + /* 0:Low, 1:High, 2:From Efuse. */ + rtlpcipriv->btcoexist.b_reg_bt_iso = 2; + /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */ + rtlpcipriv->btcoexist.b_reg_bt_sco= 3; + /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ + rtlpcipriv->btcoexist.b_reg_bt_sco= 0; +} + + +void rtl8821ae_bt_hw_init(struct ieee80211_hw* hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->cfg->ops->get_btc_status()){ + rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv); + } +} + +void rtl8821ae_suspend(struct ieee80211_hw *hw) +{ +} + +void rtl8821ae_resume(struct ieee80211_hw *hw) +{ +} + +/* Turn on AAP (RCR:bit 0) for promicuous mode. */ +void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw, + bool allow_all_da, bool write_into_reg) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + if (allow_all_da) /* Set BIT0 */ + rtlpci->receive_config |= RCR_AAP; + else /* Clear BIT0 */ + rtlpci->receive_config &= ~RCR_AAP; + + if(write_into_reg) + rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config); + + + RT_TRACE(COMP_TURBO | COMP_INIT, DBG_LOUD, + ("receive_config=0x%08X, write_into_reg=%d\n", + rtlpci->receive_config, write_into_reg )); +} + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/hw.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/hw.h @@ -0,0 +1,75 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_HW_H__ +#define __RTL8821AE_HW_H__ + +void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw); + +void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw, + u32 *p_inta, u32 *p_intb); +int rtl8821ae_hw_init(struct ieee80211_hw *hw); +void rtl8821ae_card_disable(struct ieee80211_hw *hw); +void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw); +void rtl8821ae_disable_interrupt(struct ieee80211_hw *hw); +int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type); +void rtl8821ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); +void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci); +void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw); +void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw); +void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr); +void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level); +void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw); +bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); +void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw); +void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); + +void rtl8821ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8* hwinfo); +void rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8* hwinfo); +void rtl8821ae_bt_reg_init(struct ieee80211_hw* hw); +void rtl8821ae_bt_hw_init(struct ieee80211_hw* hw); +void rtl8821ae_suspend(struct ieee80211_hw *hw); +void rtl8821ae_resume(struct ieee80211_hw *hw); +void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw, + bool allow_all_da, + bool write_into_reg); +void _rtl8821ae_stop_tx_beacon(struct ieee80211_hw *hw); +void _rtl8821ae_resume_tx_beacon(struct ieee80211_hw *hw); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/led.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/led.c @@ -0,0 +1,239 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "reg.h" + +static void _rtl8821ae_init_led(struct ieee80211_hw *hw, + struct rtl_led *pled, + enum rtl_led_pin ledpin) +{ + pled->hw = hw; + pled->ledpin = ledpin; + pled->b_ledon = false; +} + +void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + u8 ledcfg; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(COMP_LED, DBG_LOUD, + ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin)); + + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); + ledcfg &= ~BIT(6); + rtl_write_byte(rtlpriv, + REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5)); + break; + case LED_PIN_LED1: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); + rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10); + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + pled->b_ledon = true; +} + +void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + u16 ledreg = REG_LEDCFG1; + u8 ledcfg = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (pled->ledpin) { + case LED_PIN_LED0: + ledreg = REG_LEDCFG1; + break; + + case LED_PIN_LED1: + ledreg = REG_LEDCFG2; + break; + + case LED_PIN_GPIO0: + default: + break; + } + + RT_TRACE(COMP_LED, DBG_LOUD, ("In SwLedOn, LedAddr:%X LEDPIN=%d \n", ledreg, pled->ledpin)); + + ledcfg = rtl_read_byte(rtlpriv, ledreg); + ledcfg |= BIT(5); /*Set 0x4c[21]*/ + ledcfg &= ~(BIT(7) | BIT(6) | BIT(3) |BIT(2) | BIT(1) |BIT(0)); + /*Clear 0x4c[23:22] and 0x4c[19:16]*/ + rtl_write_byte(rtlpriv, ledreg, ledcfg); /*SW control led0 on.*/ + pled->b_ledon = true; +} + +void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + u8 ledcfg; + + RT_TRACE(COMP_LED, DBG_LOUD, + ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin)); + + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); + + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + ledcfg &= 0xf0; + if (pcipriv->ledctl.bled_opendrain == true) { + ledcfg &= 0x90; /* Set to software control. */ + rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3))); + ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); + ledcfg &= 0xFE; + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg); + } + else { + ledcfg &= ~BIT(6); + rtl_write_byte(rtlpriv, REG_LEDCFG2, + (ledcfg | BIT(3) | BIT(5))); + } + break; + case LED_PIN_LED1: + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1); + ledcfg &= 0x10; /* Set to software control. */ + rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3)); + + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + pled->b_ledon = false; +} + +void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled){ + u16 ledreg = REG_LEDCFG1; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + + switch(pled->ledpin) + { + case LED_PIN_LED0: + ledreg = REG_LEDCFG1; + break; + + case LED_PIN_LED1: + ledreg = REG_LEDCFG2; + break; + + case LED_PIN_GPIO0: + default: + break; + } + + RT_TRACE(COMP_LED,DBG_LOUD,("In SwLedOff,LedAddr:%X LEDPIN=%d\n", ledreg, pled->ledpin)); + + if(pcipriv->ledctl.bled_opendrain == true) /*Open-drain arrangement for controlling the LED*/ + { + u8 ledcfg = rtl_read_byte(rtlpriv, ledreg); + + ledreg &= 0xd0; /* Set to software control.*/ + rtl_write_byte(rtlpriv, ledreg, (ledcfg | BIT(3))); + + /*Open-drain arrangement*/ + ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG); + ledcfg &= 0xFE;/*Set GPIO[8] to input mode*/ + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg); + } + else + { + rtl_write_byte(rtlpriv, ledreg, 0x28); + } + + pled->b_ledon = false; +} + +void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + _rtl8821ae_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0); + _rtl8821ae_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1); +} + +static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + switch (ledaction) { + case LED_CTL_POWER_ON: + case LED_CTL_LINK: + case LED_CTL_NO_LINK: + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtl8812ae_sw_led_on(hw, pLed0); + else + rtl8821ae_sw_led_on(hw, pLed0); + break; + case LED_CTL_POWER_OFF: + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)\ + rtl8812ae_sw_led_off(hw, pLed0); + else + rtl8821ae_sw_led_off(hw, pLed0); + break; + default: + break; + } +} + +void rtl8821ae_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) && + (ledaction == LED_CTL_TX || + ledaction == LED_CTL_RX || + ledaction == LED_CTL_SITE_SURVEY || + ledaction == LED_CTL_LINK || + ledaction == LED_CTL_NO_LINK || + ledaction == LED_CTL_START_TO_LINK || + ledaction == LED_CTL_POWER_ON)) { + return; + } + RT_TRACE(COMP_LED, DBG_LOUD, ("ledaction %d, \n", + ledaction)); + _rtl8821ae_sw_led_control(hw, ledaction); +} --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/led.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/led.h @@ -0,0 +1,40 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_LED_H__ +#define __RTL8821AE_LED_H__ + +void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw); +void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl8821ae_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction); + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/phy.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/phy.c @@ -0,0 +1,5525 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../ps.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" +#include "table.h" +#include "trx.h" +#include "../btcoexist/halbt_precomp.h" +#include "hw.h" + +#define READ_NEXT_PAIR(array_table,v1, v2, i) do { i += 2; v1 = array_table[i]; v2 = array_table[i+1]; } while(0) + +static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset); +static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data); +static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask); +static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw); +static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype); +static bool _rtl8812ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype); +static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype); +static bool _rtl8812ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype); +static void _rtl8821ae_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); + +static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx); +static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw); +static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw); + +void rtl8812ae_fixspur( + struct ieee80211_hw *hw, + enum ht_channel_width band_width, + u8 channel +) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + /*C cut Item12 ADC FIFO CLOCK*/ + if(IS_VENDOR_8812A_C_CUT(rtlhal->version)) + { + if(band_width == HT_CHANNEL_WIDTH_20_40 && channel == 11) + rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x3) ; + /* 0x8AC[11:10] = 2'b11*/ + else + rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x2); + /* 0x8AC[11:10] = 2'b10*/ + + + /* <20120914, Kordan> A workarould to resolve + 2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/ + if (band_width == HT_CHANNEL_WIDTH_20 && + (channel == 13 || channel == 14)) { + rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3); + /*0x8AC[9:8] = 2'b11*/ + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1); + /* 0x8C4[30] = 1*/ + } else if (band_width == HT_CHANNEL_WIDTH_20_40 && + channel == 11) { + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1); + /*0x8C4[30] = 1*/ + } else if (band_width != HT_CHANNEL_WIDTH_80) { + rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2); + /*0x8AC[9:8] = 2'b10*/ + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0); + /*0x8C4[30] = 0*/ + } + } + else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + /* <20120914, Kordan> A workarould to resolve + 2480Mhz spur by setting ADC clock as 160M. (Asked by Binson)*/ + if (band_width == HT_CHANNEL_WIDTH_20 && + (channel == 13 || channel == 14)) + rtl_set_bbreg(hw, RRFMOD, 0x300, 0x3); + /*0x8AC[9:8] = 11*/ + else if (channel <= 14) /*2.4G only*/ + rtl_set_bbreg(hw, RRFMOD, 0x300, 0x2); + /*0x8AC[9:8] = 10*/ + } + +} + +u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 returnvalue, originalvalue, bitshift; + + RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), " + "bitmask(%#x)\n", regaddr, + bitmask)); + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask); + returnvalue = (originalvalue & bitmask) >> bitshift; + + RT_TRACE(COMP_RF, DBG_TRACE, ("BBR MASK=0x%x " + "Addr[0x%x]=0x%x\n", bitmask, + regaddr, originalvalue)); + + return returnvalue; + +} + +void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 originalvalue, bitshift; + + RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)," + " data(%#x)\n", regaddr, bitmask, + data)); + + if (bitmask != MASKDWORD) { + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask); + data = ((originalvalue & (~bitmask)) | ((data << bitshift) & bitmask)); + } + + rtl_write_dword(rtlpriv, regaddr, data); + + RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)," + " data(%#x)\n", regaddr, bitmask, + data)); + +} + +u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, readback_value, bitshift; + unsigned long flags; + + RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), " + "rfpath(%#x), bitmask(%#x)\n", + regaddr, rfpath, bitmask)); + + spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); + + + original_value = _rtl8821ae_phy_rf_serial_read(hw,rfpath, regaddr); + bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask); + readback_value = (original_value & bitmask) >> bitshift; + + spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); + + RT_TRACE(COMP_RF, DBG_TRACE, + ("regaddr(%#x), rfpath(%#x), " + "bitmask(%#x), original_value(%#x)\n", + regaddr, rfpath, bitmask, original_value)); + + return readback_value; +} + +void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, bitshift; + unsigned long flags; + + RT_TRACE(COMP_RF, DBG_TRACE, + ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, rfpath)); + + spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags); + + if (bitmask != RFREG_OFFSET_MASK) { + original_value = _rtl8821ae_phy_rf_serial_read(hw, + rfpath, + regaddr); + bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask); + data = + ((original_value & (~bitmask)) | + (data << bitshift)); + } + + _rtl8821ae_phy_rf_serial_write(hw, rfpath, regaddr, data); + + + spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags); + + RT_TRACE(COMP_RF, DBG_TRACE, ("regaddr(%#x), " + "bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, rfpath)); + +} + +static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool b_is_pi_mode =false; + u32 retvalue = 0; + + /* 2009/06/17 MH We can not execute IO for power save or other accident mode.*/ + if (RT_CANNOT_IO(hw)) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("return all one\n")); + return 0xFFFFFFFF; + } + + /* <20120809, Kordan> CCA OFF(when entering), asked by James to avoid reading the wrong value. + <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/ + if (offset != 0x0 && + !((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + || (IS_VENDOR_8812A_C_CUT(rtlhal->version)))) + rtl_set_bbreg(hw, RCCAONSEC, 0x8, 1); + + offset &= 0xff; + + if (rfpath == RF90_PATH_A) + b_is_pi_mode = (bool) rtl_get_bbreg(hw, 0xC00, 0x4); + else if (rfpath == RF90_PATH_B) + b_is_pi_mode = (bool) rtl_get_bbreg(hw, 0xE00, 0x4); + + rtl_set_bbreg(hw, RHSSIREAD_8821AE, 0xff, offset); + + if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + || (IS_VENDOR_8812A_C_CUT(rtlhal->version))) + udelay(20); + + if (b_is_pi_mode) + { + if (rfpath == RF90_PATH_A) { + retvalue = rtl_get_bbreg(hw, RA_PIREAD_8821A, BLSSIREADBACKDATA); + } + else if (rfpath == RF90_PATH_B){ + retvalue = rtl_get_bbreg(hw, RB_PIREAD_8821A, BLSSIREADBACKDATA); + } + } + else + { + if (rfpath == RF90_PATH_A) { + retvalue = rtl_get_bbreg(hw, RA_SIREAD_8821A, BLSSIREADBACKDATA); + } + else if (rfpath == RF90_PATH_B){ + retvalue = rtl_get_bbreg(hw, RB_SIREAD_8821A, BLSSIREADBACKDATA); + } + } + + /*<20120809, Kordan> CCA ON(when exiting), asked by James to avoid reading the wrong value. + <20120828, Kordan> Toggling CCA would affect RF 0x0, skip it!*/ + if (offset != 0x0 && ! ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + || (IS_VENDOR_8812A_C_CUT(rtlhal->version)))) + rtl_set_bbreg(hw, RCCAONSEC, 0x8, 0); + return retvalue; +} + +#if 0 +static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + u32 newoffset; + u32 tmplong, tmplong2; + u8 rfpi_enable = 0; + u32 retvalue; + + offset &= 0xff; + newoffset = offset; + if (RT_CANNOT_IO(hw)) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("return all one\n")); + return 0xFFFFFFFF; + } + tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); + if (rfpath == RF90_PATH_A) + tmplong2 = tmplong; + else + tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); + tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | + (newoffset << 23) | BLSSIREADEDGE; + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong & (~BLSSIREADEDGE)); + mdelay(1); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); + mdelay(1); + /*rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong | BLSSIREADEDGE);*/ + mdelay(1); + if (rfpath == RF90_PATH_A) + rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + else if (rfpath == RF90_PATH_B) + rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, + BIT(8)); + if (rfpi_enable) + retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi, + BLSSIREADBACKDATA); + else + retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback, + BLSSIREADBACKDATA); + RT_TRACE(COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rflssi_readback, + retvalue)); + return retvalue; +} +#endif + +static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data) +{ + u32 data_and_addr; + u32 newoffset; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + + if (RT_CANNOT_IO(hw)) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("stop\n")); + return; + } + offset &= 0xff; + newoffset = offset; + data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; + rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); + RT_TRACE(COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf3wire_offset, + data_and_addr)); +} + +static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask) +{ + u32 i; + + for (i = 0; i <= 31; i++) { + if (((bitmask >> i) & 0x1) == 1) + break; + } + return i; +} + +bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool rtstatus = 0; + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtstatus = _rtl8812ae_phy_config_mac_with_headerfile(hw); + else + rtstatus = _rtl8821ae_phy_config_mac_with_headerfile(hw); + + return rtstatus; +} + +bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw) +{ + bool rtstatus = true; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 regval; + u8 crystal_cap; + //u32 tmp; + + _rtl8821ae_phy_init_bb_rf_register_definition(hw); + + regval = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN); + regval |= regval | FEN_PCIEA; + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, regval); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, + regval | FEN_BB_GLB_RSTN | FEN_BBRSTB); + + rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x7);/*RF_EN | RF_RSTB | RF_SDMRSTB*/ + rtl_write_byte(rtlpriv, REG_OPT_CTRL + 2, 0x7);/*RF_EN | RF_RSTB | RF_SDMRSTB*/ + + rtstatus = _rtl8821ae_phy_bb8821a_config_parafile(hw); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + { + crystal_cap = rtlefuse->crystalcap & 0x3F; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0x7FF80000, (crystal_cap | (crystal_cap << 6))); + }else{ + crystal_cap = rtlefuse->crystalcap & 0x3F; + rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000, (crystal_cap | (crystal_cap << 6))); + } + rtlphy->reg_837 = rtl_read_byte(rtlpriv, 0x837); + + return rtstatus; +} + +bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw) +{ + return rtl8821ae_phy_rf6052_config(hw); +} + + +u32 phy_get_tx_bb_swing_8812A( + struct ieee80211_hw *hw, + u8 band, + u8 rf_path + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + char bb_swing_2g = (char) (-1 * 0xFF); + char bb_swing_5g = (char) (-1 * 0xFF); + u32 out = 0x200; + const char auto_temp = -1; + + RT_TRACE(COMP_SCAN, DBG_LOUD, + ("===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d\n", + (int)bb_swing_2g, (int)bb_swing_5g)); + + if ( rtlefuse->autoload_failflag) { + if ( band == BAND_ON_2_4G ) { + rtldm->bb_swing_diff_2g = bb_swing_2g; + if (bb_swing_2g == 0) out = 0x200; // 0 dB + else if (bb_swing_2g == -3) out = 0x16A; // -3 dB + else if (bb_swing_2g == -6) out = 0x101; // -6 dB + else if (bb_swing_2g == -9) out = 0x0B6; // -9 dB + else { + rtldm->bb_swing_diff_2g = 0; + out = 0x200; + } + + } else if ( band == BAND_ON_5G ) { + rtldm->bb_swing_diff_5g = bb_swing_5g; + if (bb_swing_5g == 0) out = 0x200; // 0 dB + else if (bb_swing_5g == -3) out = 0x16A; // -3 dB + else if (bb_swing_5g == -6) out = 0x101; // -6 dB + else if (bb_swing_5g == -9) out = 0x0B6; // -9 dB + else { + if ( rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + rtldm->bb_swing_diff_5g = -3; + out = 0x16A; + } else { + rtldm->bb_swing_diff_5g = 0; + out = 0x200; + } + } + } else { + rtldm->bb_swing_diff_2g = -3; + rtldm->bb_swing_diff_5g = -3; + out = 0x16A; // -3 dB + } + } + else + { + u32 swing = 0, swing_a = 0, swing_b = 0; + + if (band == BAND_ON_2_4G) + { + if (0xFF == auto_temp) + { + efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing); + swing = (swing == 0xFF) ? 0x00 : swing; + } + else if (bb_swing_2g == 0) swing = 0x00; // 0 dB + else if (bb_swing_2g == -3) swing = 0x05; // -3 dB + else if (bb_swing_2g == -6) swing = 0x0A; // -6 dB + else if (bb_swing_2g == -9) swing = 0xFF; // -9 dB + else swing = 0x00; + } + else + { + if (0xFF == auto_temp) + { + efuse_shadow_read(hw, 1, 0xC7, (u32 *)&swing); + swing = (swing == 0xFF) ? 0x00 : swing; + } + else if (bb_swing_5g == 0) swing = 0x00; // 0 dB + else if (bb_swing_5g == -3) swing = 0x05; // -3 dB + else if (bb_swing_5g == -6) swing = 0x0A; // -6 dB + else if (bb_swing_5g == -9) swing = 0xFF; // -9 dB + else swing = 0x00; + } + + swing_a = (swing & 0x3) >> 0; // 0xC6/C7[1:0] + swing_b = (swing & 0xC) >> 2; // 0xC6/C7[3:2] + RT_TRACE(COMP_SCAN, DBG_LOUD, + ("===> PHY_GetTxBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n", + swing_a, swing_b)); + + //3 Path-A + if (swing_a == 0x0) { + if (band == BAND_ON_2_4G) + rtldm->bb_swing_diff_2g = 0; + else + rtldm->bb_swing_diff_5g = 0; + out = 0x200; // 0 dB + } else if (swing_a == 0x1) { + if (band == BAND_ON_2_4G) + rtldm->bb_swing_diff_2g = -3; + else + rtldm->bb_swing_diff_5g = -3; + out = 0x16A; // -3 dB + } else if (swing_a == 0x2) { + if (band == BAND_ON_2_4G) + rtldm->bb_swing_diff_2g = -6; + else + rtldm->bb_swing_diff_5g = -6; + out = 0x101; // -6 dB + } else if (swing_a == 0x3) { + if (band == BAND_ON_2_4G) + rtldm->bb_swing_diff_2g = -9; + else + rtldm->bb_swing_diff_5g = -9; + out = 0x0B6; // -9 dB + } + + //3 Path-B + if (swing_b == 0x0) { + if (band == BAND_ON_2_4G) + rtldm->bb_swing_diff_2g = 0; + else + rtldm->bb_swing_diff_5g = 0; + out = 0x200; // 0 dB + } else if (swing_b == 0x1) { + if (band == BAND_ON_2_4G) + rtldm->bb_swing_diff_2g = -3; + else + rtldm->bb_swing_diff_5g = -3; + out = 0x16A; // -3 dB + } else if (swing_b == 0x2) { + if (band == BAND_ON_2_4G) + rtldm->bb_swing_diff_2g = -6; + else + rtldm->bb_swing_diff_5g = -6; + out = 0x101; // -6 dB + } else if (swing_b == 0x3) { + if (band == BAND_ON_2_4G) + rtldm->bb_swing_diff_2g = -9; + else + rtldm->bb_swing_diff_5g = -9; + out = 0x0B6; // -9 dB + } + } + + RT_TRACE(COMP_SCAN, DBG_LOUD, + ("<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out)); + return out; +} +void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_dm *rtldm = rtl_dm(rtlpriv); + u8 current_band = rtlhal->current_bandtype; + u32 txpath, rxpath; + //u8 i, value8; + char bb_diff_between_band; + + RT_TRACE(COMP_INIT, DBG_LOUD, ("\n")); + txpath = rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0); + rxpath = rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000); + rtlhal->current_bandtype = (enum band_type) band; + /* reconfig BB/RF according to wireless mode */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + /* BB & RF Config */ + RT_TRACE(COMP_CMD, DBG_DMESG, ("2.4G\n")); + rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* 0xCB0[15:12] = 0x7 (LNA_On)*/ + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x7); + /* 0xCB0[7:4] = 0x7 (PAPE_A)*/ + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x7); + } + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + rtl_set_bbreg(hw, 0x830, 0xE, 0x4); /*0x830[3:1] = 0x4*/ + rtl_set_bbreg(hw, 0x834, 0x3, 0x1); /*0x834[1:0] = 0x1*/ + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) + rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 0); // 0xC1C[11:8] = 0 + else + rtl_set_bbreg(hw, 0x82c, 0x3, 0); // 0x82C[1:0] = 2b'00 + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); + rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000); + rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000); + } + + rtl_set_bbreg(hw, RTXPATH, 0xf0, txpath); + rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, rxpath); + + rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x0); + } else {/* 5G band */ + u16 count, reg_41a; + RT_TRACE(COMP_CMD, DBG_DMESG, ("5G\n")); + + if(rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /*0xCB0[15:12] = 0x5 (LNA_On)*/ + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF000, 0x5); + /*0xCB0[7:4] = 0x4 (PAPE_A)*/ + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xF0, 0x4); + } + /*CCK_CHECK_en*/ + rtl_write_byte(rtlpriv, REG_CCK_CHECK, 0x80); + + count = 0; + reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY); + RT_TRACE(COMP_SCAN, DBG_LOUD, ("Reg41A value %d", reg_41a)); + reg_41a &= 0x30; + while ((reg_41a!= 0x30) && (count < 50)) { + udelay(50); + RT_TRACE(COMP_SCAN, DBG_LOUD, ("Delay 50us \n")); + + reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY); + reg_41a &= 0x30; + count++; + RT_TRACE(COMP_SCAN, DBG_LOUD, ("Reg41A value %d", reg_41a)); + } + if (count != 0) + RT_TRACE(COMP_MLME, DBG_LOUD, + ("PHY_SwitchWirelessBand8812(): Switch to 5G Band. " + "Count = %d reg41A=0x%x\n", count, reg_41a)); + + // 2012/02/01, Sinda add registry to switch workaround without long-run verification for scan issue. + rtl_set_bbreg(hw, ROFDMCCKEN, BOFDMEN|BCCKEN, 0x03); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + rtl_set_bbreg(hw, 0x830, 0xE, 0x3); /*0x830[3:1] = 0x3*/ + rtl_set_bbreg(hw, 0x834, 0x3, 0x2); /*0x834[1:0] = 0x2*/ + } + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + /* AGC table select */ + rtl_set_bbreg(hw, RA_TXSCALE, 0xF00, 1); /* 0xC1C[11:8] = 1*/ + } else + rtl_set_bbreg(hw, 0x82c, 0x3, 1); // 0x82C[1:0] = 2'b00 + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777); + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); + rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010); + rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010); + } + + rtl_set_bbreg(hw, RTXPATH, 0xf0, txpath); + rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, rxpath); + + RT_TRACE(COMP_SCAN, DBG_LOUD, + ("==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n", + rtlpriv->dm.ofdm_index[RF90_PATH_A])); + } + + if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) || + (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)) { + rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000, + phy_get_tx_bb_swing_8812A(hw, band, RF90_PATH_A)); // 0xC1C[31:21] + rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000, + phy_get_tx_bb_swing_8812A(hw, band, RF90_PATH_B)); // 0xE1C[31:21] + + /* <20121005, Kordan> When TxPowerTrack is ON, we should take care of the change of BB swing. + That is, reset all info to trigger Tx power tracking.*/ + if (band != current_band) { + bb_diff_between_band = (rtldm->bb_swing_diff_2g - rtldm->bb_swing_diff_5g); + bb_diff_between_band = (band == BAND_ON_2_4G) ? bb_diff_between_band : (-1 * bb_diff_between_band); + rtldm->default_ofdm_index += bb_diff_between_band * 2; + } + rtl8821ae_dm_clear_txpower_tracking_state(hw); + } + + RT_TRACE(COMP_SCAN, DBG_TRACE, + ("<==rtl8821ae_phy_switch_wirelessband():Switch Band OK.\n")); + return; +} + +static bool _rtl8821ae_check_condition(struct ieee80211_hw *hw, + const u32 Condition + ) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 _board = rtlefuse->board_type; /*need efuse define*/ + u32 _interface = rtlhal->interface; + u32 _platform = 0x08;/*SupportPlatform */ + u32 cond = Condition; + + if ( Condition == 0xCDCDCDCD ) + return true; + + cond = Condition & 0xFF; + if ( (_board != cond) == 0 && cond != 0xFF) + return false; + + cond = Condition & 0xFF00; + cond = cond >> 8; + if ( (_interface & cond) == 0 && cond != 0x07) + return false; + + cond = Condition & 0xFF0000; + cond = cond >> 16; + if ( (_platform & cond) == 0 && cond != 0x0F) + return false; + return true; +} + +static void _rtl8821ae_config_rf_reg(struct ieee80211_hw *hw, + u32 addr, + u32 data, + enum radio_path rfpath, + u32 regaddr + ) +{ + if ( addr == 0xfe || addr == 0xffe) { + mdelay(50); + } else { + rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data); + udelay(1); + } +} + +static void _rtl8821ae_config_rf_radio_a(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + u32 content = 0x1000; /*RF Content: radio_a_txt*/ + u32 maskforphyset = (u32)(content & 0xE000); + + _rtl8821ae_config_rf_reg(hw, addr, data, RF90_PATH_A, addr | maskforphyset); + +} + +static void _rtl8821ae_config_rf_radio_b(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + u32 content = 0x1001; /*RF Content: radio_b_txt*/ + u32 maskforphyset = (u32)(content & 0xE000); + + _rtl8821ae_config_rf_reg(hw, addr, data, RF90_PATH_B, addr | maskforphyset); + +} + +static void _rtl8812ae_config_bb_reg(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + if ( addr == 0xfe) { + mdelay(50); + } else if ( addr == 0xfd) + mdelay(5); + else if ( addr == 0xfc) + mdelay(1); + else if ( addr == 0xfb) + udelay(50); + else if ( addr == 0xfa) + udelay(5); + else if ( addr == 0xf9) + udelay(1); + else { + rtl_set_bbreg(hw, addr, MASKDWORD,data); + } + udelay(1); +} + +static void _rtl8821ae_config_bb_reg(struct ieee80211_hw *hw, + u32 addr, u32 data) +{ + if ( addr == 0xfe) { + mdelay(50); + } else if ( addr == 0xfd) + mdelay(5); + else if ( addr == 0xfc) + mdelay(1); + else if ( addr == 0xfb) + udelay(50); + else if ( addr == 0xfa) + udelay(5); + else if ( addr == 0xf9) + udelay(1); + + rtl_set_bbreg(hw, addr, MASKDWORD,data); + udelay(1); +} + +static void _rtl8821ae_phy_init_tx_power_by_rate(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + u8 band, rfpath, txnum, rate_section; + + for ( band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band ) + for ( rfpath = 0; rfpath < TX_PWR_BY_RATE_NUM_RF; ++rfpath ) + for ( txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum ) + for ( rate_section = 0; rate_section < TX_PWR_BY_RATE_NUM_SECTION; ++rate_section ) + rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = 0; +} + +void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, + u8 band, u8 path, + u8 rate_section, + u8 txnum, u8 value) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (path > RF90_PATH_D) { + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", path)); + return; + } + + if (band == BAND_ON_2_4G) { + switch (rate_section) { + case CCK: + rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value; + break; + case OFDM: + rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value; + break; + case HT_MCS0_MCS7: + rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value; + break; + case HT_MCS8_MCS15: + rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value; + break; + case VHT_1SSMCS0_1SSMCS9: + rtlphy->txpwr_by_rate_base_24g[path][txnum][4] = value; + break; + case VHT_2SSMCS0_2SSMCS9: + rtlphy->txpwr_by_rate_base_24g[path][txnum][5] = value; + break; + default: + RT_TRACE(COMP_INIT, DBG_LOUD, ( "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n", + rate_section, path, txnum ) ); + break; + }; + } else if (band == BAND_ON_5G) { + switch (rate_section) { + case OFDM: + rtlphy->txpwr_by_rate_base_5g[path][txnum][0] = value; + break; + case HT_MCS0_MCS7: + rtlphy->txpwr_by_rate_base_5g[path][txnum][1] = value; + break; + case HT_MCS8_MCS15: + rtlphy->txpwr_by_rate_base_5g[path][txnum][2] = value; + break; + case VHT_1SSMCS0_1SSMCS9: + rtlphy->txpwr_by_rate_base_5g[path][txnum][3] = value; + break; + case VHT_2SSMCS0_2SSMCS9: + rtlphy->txpwr_by_rate_base_5g[path][txnum][4] = value; + break; + default: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Invalid RateSection %d in Band 5G, Rf Path %d, " + "%dTx in PHY_SetTxPowerByRateBase()\n", + rate_section, path, txnum)); + break; + }; + } else { + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band)); + } + +} + +u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, + u8 band, u8 path, + u8 txnum, u8 rate_section) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 value = 0; + + if (path > RF90_PATH_D) { + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n", path)); + return 0; + } + + if (band == BAND_ON_2_4G) { + switch (rate_section) { + case CCK: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0]; + break; + case OFDM: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1]; + break; + case HT_MCS0_MCS7: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2]; + break; + case HT_MCS8_MCS15: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3]; + break; + case VHT_1SSMCS0_1SSMCS9: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][4]; + break; + case VHT_2SSMCS0_2SSMCS9: + value = rtlphy->txpwr_by_rate_base_24g[path][txnum][5]; + break; + default: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Invalid RateSection %d in Band 2.4G, Rf Path %d," + " %dTx in PHY_GetTxPowerByRateBase()\n", + rate_section, path, txnum)); + break; + }; + } else if (band == BAND_ON_5G) { + switch (rate_section) { + case OFDM: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][0]; + break; + case HT_MCS0_MCS7: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][1]; + break; + case HT_MCS8_MCS15: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][2]; + break; + case VHT_1SSMCS0_1SSMCS9: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][3]; + break; + case VHT_2SSMCS0_2SSMCS9: + value = rtlphy->txpwr_by_rate_base_5g[path][txnum][4]; + break; + default: + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Invalid RateSection %d in Band 5G, Rf Path %d," + " %dTx in PHY_GetTxPowerByRateBase()\n", + rate_section, path, txnum)); + break; + }; + } else { + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band)); + } + + return value; + +} +void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u16 rawValue = 0; + u8 base = 0, path = 0; + + for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) { + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, CCK, RF_1TX, base); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, OFDM, RF_1TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base ); + + rawValue = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF; + base = (rawValue >> 4) * 10 + (rawValue & 0xF); + _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base ); + } +} + +void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, + u8 end, u8 base_val) +{ + char i = 0; + u8 temp_value = 0; + u32 temp_data = 0; + + for (i = 3; i >= 0; --i) + { + if (i >= start && i <= end) { + // Get the exact value + temp_value = (u8) (*data >> (i * 8)) & 0xF; + temp_value += ((u8) ((*data >> (i * 8 + 4)) & 0xF)) * 10; + + // Change the value to a relative value + temp_value = (temp_value > base_val) ? temp_value - base_val : base_val - temp_value; + } else { + temp_value = (u8) (*data >> (i * 8)) & 0xFF; + } + temp_data <<= 8; + temp_data |= temp_value; + } + *data = temp_data; +} + +void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 base = 0, rfPath = 0; + + for (rfPath = RF90_PATH_A; rfPath <= RF90_PATH_B; ++rfPath) { + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, CCK); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G CCK 1TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][0] ), + 0, 3, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, OFDM ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G OFDM 1TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][1] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][2] ), + 0, 3, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7 ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G HTMCS0-7 1TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][3] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][4] ), + 0, 3, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15 ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G HTMCS8-15 2TX: %d\n", base ) ); + + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][5] ), + 0, 3, base ); + + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][6] ), + 0, 3, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9 ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G VHT1SSMCS0-9 1TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][7] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][8] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9] ), + 0, 1, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9 ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 2.4G VHT2SSMCS0-9 2TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9] ), + 2, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][10] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][11] ), + 0, 3, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, OFDM ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G OFDM 1TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][1] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][2] ), + 0, 3, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, HT_MCS0_MCS7 ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G HTMCS0-7 1TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][3] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][4] ), + 0, 3, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, HT_MCS8_MCS15 ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G HTMCS8-15 2TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][5] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][6] ), + 0, 3, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9 ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G VHT1SSMCS0-9 1TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][7] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][8] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9] ), + 0, 1, base ); + + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9 ); + RT_DISP( FPHY, PHY_TXPWR, ( "base of 5G VHT2SSMCS0-9 2TX: %d\n", base ) ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9] ), + 2, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][10] ), + 0, 3, base ); + _phy_convert_txpower_dbm_to_relative_value( + &(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][11] ), + 0, 3, base ); + } + + RT_TRACE(COMP_POWER, DBG_TRACE, + ("<===_rtl8821ae_phy_convert_txpower_dbm_to_relative_value()\n")); + +} + +void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw) +{ + _rtl8821ae_phy_store_txpower_by_rate_base(hw); + _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(hw); +} + +static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool rtstatus; + + /*TX POWER LIMIT + PHY_InitTxPowerLimit + PHY_ConfigRFWithCustomPowerLimitTableParaFile*/ + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtstatus = _rtl8812ae_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_PHY_REG); + else{ + rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_PHY_REG); + } + if (rtstatus != true) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!")); + return false; + } + _rtl8821ae_phy_init_tx_power_by_rate(hw); + if (rtlefuse->autoload_failflag == false) { + //rtlphy->pwrgroup_cnt = 0; + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtstatus = _rtl8812ae_phy_config_bb_with_pgheaderfile(hw, + BASEBAND_CONFIG_PHY_REG); + else{ + rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw, + BASEBAND_CONFIG_PHY_REG); + } + } + if (rtstatus != true) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!")); + return false; + } + + _rtl8821ae_phy_txpower_by_rate_configuration(hw); + + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtstatus = _rtl8812ae_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_AGC_TAB); + else + rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, + BASEBAND_CONFIG_AGC_TAB); + + if (rtstatus != true) { + RT_TRACE(COMP_ERR, DBG_EMERG, ("AGC Table Fail\n")); + return false; + } + rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, + 0x200)); + return true; +} + +static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i, v1, v2; + u32 arraylength; + u32 *ptrarray; + + RT_TRACE(COMP_INIT, DBG_TRACE, ("Read rtl8812AE_MAC_REG_Array\n")); + arraylength = RTL8812AEMAC_1T_ARRAYLEN; + ptrarray = RTL8812AE_MAC_REG_ARRAY; + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Img:RTL8812AE_MAC_REG_ARRAY LEN %d\n",arraylength)); + for (i = 0; i < arraylength; i += 2) { + v1 = ptrarray[i]; + v2 = (u8) ptrarray[i + 1]; + if (v1<0xCDCDCDCD) { + rtl_write_byte(rtlpriv, v1, (u8) v2); + } else { + if (!_rtl8821ae_check_condition(hw,v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(ptrarray, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylength -2) + READ_NEXT_PAIR(ptrarray, v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(ptrarray, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylength -2) { + rtl_write_byte(rtlpriv,v1,v2); + READ_NEXT_PAIR(ptrarray, v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylength -2) + READ_NEXT_PAIR(ptrarray, v1, v2, i); + } + } + } + return true; +} + +static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i, v1, v2; + u32 arraylength; + u32 *ptrarray; + + RT_TRACE(COMP_INIT, DBG_TRACE, ("Read rtl8821AE_MAC_REG_Array\n")); + arraylength = RTL8821AEMAC_1T_ARRAYLEN; + ptrarray = RTL8821AE_MAC_REG_ARRAY; + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Img:RTL8821AE_MAC_REG_ARRAY LEN %d\n",arraylength)); + for (i = 0; i < arraylength; i += 2) { + v1 = ptrarray[i]; + v2 = (u8) ptrarray[i + 1]; + if (v1<0xCDCDCDCD) { + rtl_write_byte(rtlpriv, v1, (u8) v2); + continue; + } else { + if (!_rtl8821ae_check_condition(hw,v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(ptrarray, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylength -2) + READ_NEXT_PAIR(ptrarray, v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(ptrarray, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylength -2) { + rtl_write_byte(rtlpriv,v1,v2); + READ_NEXT_PAIR(ptrarray, v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylength -2) + READ_NEXT_PAIR(ptrarray, v1, v2, i); + } + } + } + return true; +} + +static bool _rtl8812ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype) +{ + int i; + u32 *array_table; + u16 arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 v1 = 0, v2 = 0; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + arraylen = RTL8812AEPHY_REG_1TARRAYLEN; + array_table = RTL8812AE_PHY_REG_ARRAY; + + for (i = 0; i < arraylen; i += 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1<0xCDCDCDCD) { + _rtl8812ae_config_bb_reg(hw, v1, v2); + continue; + } else {/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw,v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(array_table,v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen -2) + READ_NEXT_PAIR(array_table,v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(array_table,v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen -2) { + _rtl8812ae_config_bb_reg(hw,v1,v2); + READ_NEXT_PAIR(array_table,v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylen -2) + READ_NEXT_PAIR(array_table,v1, v2, i); + } + } + } + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + arraylen = RTL8812AEAGCTAB_1TARRAYLEN; + array_table = RTL8812AE_AGC_TAB_ARRAY; + + for (i = 0; i < arraylen; i = i + 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1 < 0xCDCDCDCD) { + rtl_set_bbreg(hw, v1, MASKDWORD, v2); + udelay(1); + continue; + } else {/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw,v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(array_table,v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen -2) + READ_NEXT_PAIR(array_table,v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + }else{/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(array_table,v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen -2) + { + rtl_set_bbreg(hw, v1, MASKDWORD, v2); + udelay(1); + READ_NEXT_PAIR(array_table,v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylen -2) + READ_NEXT_PAIR(array_table,v1, v2, i); + } + } + RT_TRACE(COMP_INIT, DBG_TRACE, + ("The agctab_array_table[0] is " + "%x Rtl818EEPHY_REGArray[1] is %x \n", + array_table[i], + array_table[i + 1])); + } + } + return true; +} + +static bool _rtl8821ae_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype) +{ + int i; + u32 *array_table; + u16 arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 v1 = 0, v2 = 0; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + arraylen = RTL8821AEPHY_REG_1TARRAYLEN; + array_table = RTL8821AE_PHY_REG_ARRAY; + + for (i = 0; i < arraylen; i += 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1<0xCDCDCDCD) { + _rtl8821ae_config_bb_reg(hw, v1, v2); + continue; + } else {/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw,v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(array_table, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen -2) + READ_NEXT_PAIR(array_table, v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(array_table, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen -2) { + _rtl8821ae_config_bb_reg(hw,v1,v2); + READ_NEXT_PAIR(array_table, v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylen -2) + READ_NEXT_PAIR(array_table, v1, v2, i); + } + } + } + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + arraylen = RTL8821AEAGCTAB_1TARRAYLEN; + array_table = RTL8821AE_AGC_TAB_ARRAY; + + for (i = 0; i < arraylen; i = i + 2) { + v1 = array_table[i]; + v2 = array_table[i+1]; + if (v1 < 0xCDCDCDCD) { + rtl_set_bbreg(hw, v1, MASKDWORD, v2); + udelay(1); + continue; + } else {/*This line is the start line of branch.*/ + if (!_rtl8821ae_check_condition(hw,v1)) { + /*Discard the following (offset, data) pairs*/ + READ_NEXT_PAIR(array_table, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen -2) + READ_NEXT_PAIR(array_table, v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + }else{/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_PAIR(array_table, v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < arraylen -2) + { + rtl_set_bbreg(hw, v1, MASKDWORD, v2); + udelay(1); + READ_NEXT_PAIR(array_table, v1, v2, i); + } + + while (v2 != 0xDEAD && i < arraylen -2) + READ_NEXT_PAIR(array_table, v1, v2, i); + } + } + RT_TRACE(COMP_INIT, DBG_TRACE, + ("The agctab_array_table[0] is " + "%x Rtl818EEPHY_REGArray[1] is %x \n", + array_table[i], + array_table[i + 1])); + } + } + return true; +} + +static u8 _rtl8821ae_get_rate_selection_index(u32 regaddr) +{ + u8 index = 0; + + regaddr &= 0xFFF; + if (regaddr >= 0xC20 && regaddr <= 0xC4C) + index = (u8) ((regaddr - 0xC20) / 4); + else if (regaddr >= 0xE20 && regaddr <= 0xE4C) + index = (u8) ((regaddr - 0xE20) / 4); + else + RT_ASSERT(!COMP_INIT, + ("Invalid RegAddr 0x%x in" + "PHY_GetRateSectionIndexOfTxPowerByRate()\n",regaddr)); + + return index; +} + +static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw, + u32 band, u32 rfpath, + u32 txnum, u32 regaddr, + u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 rate_section = _rtl8821ae_get_rate_selection_index(regaddr); + + if (band != BAND_ON_2_4G && band != BAND_ON_5G) + RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid Band %d\n", band)); + + if (rfpath > MAX_RF_PATH) + RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid RfPath %d\n", rfpath)); + + if (txnum > MAX_RF_PATH) + RT_TRACE(COMP_INIT, DBG_WARNING, ("Invalid TxNum %d\n", txnum ) ); + + rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data; + RT_TRACE(COMP_INIT, DBG_WARNING,( "pHalData->TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n", + band, rfpath, txnum, rate_section, rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section])); + +} + +static bool _rtl8812ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i; + u32 *phy_regarray_table_pg; + u16 phy_regarray_pg_len; + u32 v1, v2, v3, v4, v5, v6; + + phy_regarray_pg_len = RTL8812AEPHY_REG_ARRAY_PGLEN; + phy_regarray_table_pg = RTL8812AE_PHY_REG_ARRAY_PG; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_regarray_pg_len; i += 6) { + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + v4 = phy_regarray_table_pg[i+3]; + v5 = phy_regarray_table_pg[i+4]; + v6 = phy_regarray_table_pg[i+5]; + + if (v1<0xCDCDCDCD) { + if ( (v4 == 0xfe) || (v4 == 0xffe)) + mdelay(50); + else + /*_rtl8821ae_store_pwrIndex_diffrate_offset*/ + _rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6); + continue; + } else { + if (!_rtl8821ae_check_condition(hw,v1)) { /*don't need the hw_body*/ + i += 2; /* skip the pair of expression*/ + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + while (v2 != 0xDEAD) { + i += 3; + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + } + } + } + } + } else { + + RT_TRACE(COMP_SEND, DBG_TRACE, + ("configtype != BaseBand_Config_PHY_REG\n")); + } + return true; +} + +static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int i; + u32 *phy_regarray_table_pg; + u16 phy_regarray_pg_len; + u32 v1, v2, v3, v4, v5, v6; + + phy_regarray_pg_len = RTL8821AEPHY_REG_ARRAY_PGLEN; + phy_regarray_table_pg = RTL8821AE_PHY_REG_ARRAY_PG; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_regarray_pg_len; i += 6) { + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + v4 = phy_regarray_table_pg[i+3]; + v5 = phy_regarray_table_pg[i+4]; + v6 = phy_regarray_table_pg[i+5]; + + if (v1<0xCDCDCDCD) { + if (v4 == 0xfe) + mdelay(50); + else if (v4 == 0xfd) + mdelay(5); + else if (v4 == 0xfc) + mdelay(1); + else if (v4 == 0xfb) + udelay(50); + else if (v4 == 0xfa) + udelay(5); + else if (v4 == 0xf9) + udelay(1); + + /*_rtl8821ae_store_pwrIndex_diffrate_offset*/ + _rtl8821ae_store_tx_power_by_rate(hw, v1, v2, v3, v4, v5, v6); + continue; + } else { + if (!_rtl8821ae_check_condition(hw,v1)) { /*don't need the hw_body*/ + i += 2; /* skip the pair of expression*/ + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + while (v2 != 0xDEAD) { + i += 3; + v1 = phy_regarray_table_pg[i]; + v2 = phy_regarray_table_pg[i+1]; + v3 = phy_regarray_table_pg[i+2]; + } + } + } + } + } else { + + RT_TRACE(COMP_SEND, DBG_TRACE, + ("configtype != BaseBand_Config_PHY_REG\n")); + } + return true; +} + +bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw * hw, + enum radio_path rfpath) +{ + #define READ_NEXT_RF_PAIR_8812(radioa_array_table,v1, v2, i) do { i += 2; v1 = radioa_array_table[i]; v2 = radioa_array_table[i+1]; } while(0) + + int i; + bool rtstatus = true; + u32 *radioa_array_table_a, *radioa_array_table_b; + u16 radioa_arraylen_a, radioa_arraylen_b; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 v1 = 0, v2 = 0; + + radioa_arraylen_a = RTL8812AE_RADIOA_1TARRAYLEN; + radioa_array_table_a= RTL8812AE_RADIOA_ARRAY; + radioa_arraylen_b= RTL8812AE_RADIOB_1TARRAYLEN; + radioa_array_table_b = RTL8812AE_RADIOB_ARRAY; + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Radio_A:RTL8821AE_RADIOA_ARRAY %d\n",radioa_arraylen_a)); + RT_TRACE(COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath)); + rtstatus = true; + switch (rfpath) { + case RF90_PATH_A: + for (i = 0; i < radioa_arraylen_a; i = i + 2) { + v1 = radioa_array_table_a[i]; + v2 = radioa_array_table_a[i+1]; + if (v1<0xcdcdcdcd) { + _rtl8821ae_config_rf_radio_a(hw,v1,v2); + continue; + }else{/*This line is the start line of branch.*/ + if(!_rtl8821ae_check_condition(hw,v1)){ + /*Discard the following (offset, data) pairs*/ + READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen_a-2) + READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen_a -2) { + _rtl8821ae_config_rf_radio_a(hw,v1,v2); + READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i); + } + + while (v2 != 0xDEAD && i < radioa_arraylen_a-2) + READ_NEXT_RF_PAIR_8812(radioa_array_table_a,v1, v2, i); + } + } + } + break; + case RF90_PATH_B: + for (i = 0; i < radioa_arraylen_b; i = i + 2) { + v1 = radioa_array_table_b[i]; + v2 = radioa_array_table_b[i+1]; + if (v1<0xcdcdcdcd) { + _rtl8821ae_config_rf_radio_b(hw,v1,v2); + continue; + }else{/*This line is the start line of branch.*/ + if(!_rtl8821ae_check_condition(hw,v1)){ + /*Discard the following (offset, data) pairs*/ + READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen_b-2) + READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen_b-2) { + _rtl8821ae_config_rf_radio_b(hw,v1,v2); + READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i); + } + + while (v2 != 0xDEAD && i < radioa_arraylen_b-2) + READ_NEXT_RF_PAIR_8812(radioa_array_table_b,v1, v2, i); + } + } + } + break; + case RF90_PATH_C: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + case RF90_PATH_D: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + return true; +} + + +bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw * hw, + enum radio_path rfpath) +{ + #define READ_NEXT_RF_PAIR(v1, v2, i) do { i += 2; v1 = radioa_array_table[i]; v2 = radioa_array_table[i+1]; } while(0) + + int i; + bool rtstatus = true; + u32 *radioa_array_table; + u16 radioa_arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 v1 = 0, v2 = 0; + + radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN; + radioa_array_table = RTL8821AE_RADIOA_ARRAY; + RT_TRACE(COMP_INIT, DBG_LOUD, + ("Radio_A:RTL8821AE_RADIOA_ARRAY %d\n",radioa_arraylen)); + RT_TRACE(COMP_INIT, DBG_LOUD, ("Radio No %x\n", rfpath)); + rtstatus = true; + switch (rfpath) { + case RF90_PATH_A: + for (i = 0; i < radioa_arraylen; i = i + 2) { + v1 = radioa_array_table[i]; + v2 = radioa_array_table[i+1]; + if (v1<0xcdcdcdcd) { + _rtl8821ae_config_rf_radio_a(hw,v1,v2); + }else{/*This line is the start line of branch.*/ + if(!_rtl8821ae_check_condition(hw,v1)){ + /*Discard the following (offset, data) pairs*/ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen -2) + READ_NEXT_RF_PAIR(v1, v2, i); + + i -= 2; /* prevent from for-loop += 2*/ + } else {/*Configure matched pairs and skip to end of if-else.*/ + READ_NEXT_RF_PAIR(v1, v2, i); + while (v2 != 0xDEAD && + v2 != 0xCDEF && + v2 != 0xCDCD && i < radioa_arraylen -2) { + _rtl8821ae_config_rf_radio_a(hw,v1,v2); + READ_NEXT_RF_PAIR(v1, v2, i); + } + + while (v2 != 0xDEAD && i < radioa_arraylen -2) + READ_NEXT_RF_PAIR(v1, v2, i); + } + } + } + break; + + case RF90_PATH_B: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + case RF90_PATH_C: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + case RF90_PATH_D: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + return true; +} + +void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + rtlphy->default_initialgain[0] = + (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[1] = + (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[2] = + (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[3] = + (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + + RT_TRACE(COMP_INIT, DBG_TRACE, + ("Default initial gain (c50=0x%x, " + "c58=0x%x, c60=0x%x, c68=0x%x \n", + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3])); + + rtlphy->framesync = (u8) rtl_get_bbreg(hw, + ROFDM0_RXDETECTOR3, MASKBYTE0); + rtlphy->framesync_c34 = rtl_get_bbreg(hw, + ROFDM0_RXDETECTOR2, MASKDWORD); + + RT_TRACE(COMP_INIT, DBG_TRACE, + ("Default framesync (0x%x) = 0x%x \n", + ROFDM0_RXDETECTOR3, rtlphy->framesync)); +} + +static void _rtl8821ae_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = RA_LSSIWRITE_8821A; + rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = RB_LSSIWRITE_8821A; + + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RHSSIREAD_8821AE; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RHSSIREAD_8821AE; + + rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = RA_SIREAD_8821A; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = RB_SIREAD_8821A; + + rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = RA_PIREAD_8821A; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi = RB_PIREAD_8821A; +} + +void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 txpwr_level; + long txpwr_dbm; + + txpwr_level = rtlphy->cur_cck_txpwridx; + txpwr_dbm = _rtl8821ae_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_B, txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, + txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl8821ae_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_N_24G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + _rtl8821ae_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level); + *powerlevel = txpwr_dbm; +} + +static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index) +{ + u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = + {36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,100,102,104,106,108,110,112, + 114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,149,151, + 153,155,157,159,161,163,165,167,168,169,171,173,175,177}; + u8 i = 0; + bool in_24g = true; + + if (channel <= 14) { + in_24g = true; + *chnl_index = channel - 1; + } else { + in_24g = false; + + for (i = 0; i < sizeof(channel_5g) / sizeof(u8); ++i) { + if (channel_5g[i] == channel) { + *chnl_index = i; + return in_24g; + } + } + } + return in_24g; +} + +static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate) +{ + char rate_section = 0; + switch (rate) { + case DESC_RATE1M: + case DESC_RATE2M: + case DESC_RATE5_5M: + case DESC_RATE11M: + rate_section = 0; + break; + + case DESC_RATE6M: + case DESC_RATE9M: + case DESC_RATE12M: + case DESC_RATE18M: + rate_section = 1; + break; + + case DESC_RATE24M: + case DESC_RATE36M: + case DESC_RATE48M: + case DESC_RATE54M: + rate_section = 2; + break; + + case DESC_RATEMCS0: + case DESC_RATEMCS1: + case DESC_RATEMCS2: + case DESC_RATEMCS3: + rate_section = 3; + break; + + case DESC_RATEMCS4: + case DESC_RATEMCS5: + case DESC_RATEMCS6: + case DESC_RATEMCS7: + rate_section = 4; + break; + + case DESC_RATEMCS8: + case DESC_RATEMCS9: + case DESC_RATEMCS10: + case DESC_RATEMCS11: + rate_section = 5; + break; + + case DESC_RATEMCS12: + case DESC_RATEMCS13: + case DESC_RATEMCS14: + case DESC_RATEMCS15: + rate_section = 6; + break; + + case DESC_RATEVHT1SS_MCS0: + case DESC_RATEVHT1SS_MCS1: + case DESC_RATEVHT1SS_MCS2: + case DESC_RATEVHT1SS_MCS3: + rate_section = 7; + break; + + case DESC_RATEVHT1SS_MCS4: + case DESC_RATEVHT1SS_MCS5: + case DESC_RATEVHT1SS_MCS6: + case DESC_RATEVHT1SS_MCS7: + rate_section = 8; + break; + + case DESC_RATEVHT1SS_MCS8: + case DESC_RATEVHT1SS_MCS9: + case DESC_RATEVHT2SS_MCS0: + case DESC_RATEVHT2SS_MCS1: + rate_section = 9; + break; + + case DESC_RATEVHT2SS_MCS2: + case DESC_RATEVHT2SS_MCS3: + case DESC_RATEVHT2SS_MCS4: + case DESC_RATEVHT2SS_MCS5: + rate_section = 10; + break; + + case DESC_RATEVHT2SS_MCS6: + case DESC_RATEVHT2SS_MCS7: + case DESC_RATEVHT2SS_MCS8: + case DESC_RATEVHT2SS_MCS9: + rate_section = 11; + break; + + default: + RT_ASSERT(true, ("Rate_Section is Illegal\n")); + break; + } + + return rate_section; +} + +static char _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw, + u8 band, u8 path, u8 rate) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 shift = 0, rate_section, tx_num; + char tx_pwr_diff = 0; + + rate_section = _rtl8821ae_phy_get_ratesection_intxpower_byrate(path, rate); + tx_num = RF_TX_NUM_NONIMPLEMENT; + + if (tx_num == RF_TX_NUM_NONIMPLEMENT) { + if ((rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15 ) || + (rate >= DESC_RATEVHT2SS_MCS2 && rate <= DESC_RATEVHT2SS_MCS9)) + tx_num = RF_2TX; + else + tx_num = RF_1TX; + } + + switch (rate) { + case DESC_RATE1M: shift = 0; break; + case DESC_RATE2M: shift = 8; break; + case DESC_RATE5_5M: shift = 16; break; + case DESC_RATE11M: shift = 24; break; + + case DESC_RATE6M: shift = 0; break; + case DESC_RATE9M: shift = 8; break; + case DESC_RATE12M: shift = 16; break; + case DESC_RATE18M: shift = 24; break; + + case DESC_RATE24M: shift = 0; break; + case DESC_RATE36M: shift = 8; break; + case DESC_RATE48M: shift = 16; break; + case DESC_RATE54M: shift = 24; break; + + case DESC_RATEMCS0: shift = 0; break; + case DESC_RATEMCS1: shift = 8; break; + case DESC_RATEMCS2: shift = 16; break; + case DESC_RATEMCS3: shift = 24; break; + + case DESC_RATEMCS4: shift = 0; break; + case DESC_RATEMCS5: shift = 8; break; + case DESC_RATEMCS6: shift = 16; break; + case DESC_RATEMCS7: shift = 24; break; + + case DESC_RATEMCS8: shift = 0; break; + case DESC_RATEMCS9: shift = 8; break; + case DESC_RATEMCS10: shift = 16; break; + case DESC_RATEMCS11: shift = 24; break; + + case DESC_RATEMCS12: shift = 0; break; + case DESC_RATEMCS13: shift = 8; break; + case DESC_RATEMCS14: shift = 16; break; + case DESC_RATEMCS15: shift = 24; break; + + case DESC_RATEVHT1SS_MCS0: shift = 0; break; + case DESC_RATEVHT1SS_MCS1: shift = 8; break; + case DESC_RATEVHT1SS_MCS2: shift = 16; break; + case DESC_RATEVHT1SS_MCS3: shift = 24; break; + + case DESC_RATEVHT1SS_MCS4: shift = 0; break; + case DESC_RATEVHT1SS_MCS5: shift = 8; break; + case DESC_RATEVHT1SS_MCS6: shift = 16; break; + case DESC_RATEVHT1SS_MCS7: shift = 24; break; + + case DESC_RATEVHT1SS_MCS8: shift = 0; break; + case DESC_RATEVHT1SS_MCS9: shift = 8; break; + case DESC_RATEVHT2SS_MCS0: shift = 16; break; + case DESC_RATEVHT2SS_MCS1: shift = 24; break; + + case DESC_RATEVHT2SS_MCS2: shift = 0; break; + case DESC_RATEVHT2SS_MCS3: shift = 8; break; + case DESC_RATEVHT2SS_MCS4: shift = 16; break; + case DESC_RATEVHT2SS_MCS5: shift = 24; break; + + case DESC_RATEVHT2SS_MCS6: shift = 0; break; + case DESC_RATEVHT2SS_MCS7: shift = 8; break; + case DESC_RATEVHT2SS_MCS8: shift = 16; break; + case DESC_RATEVHT2SS_MCS9: shift = 24; break; + + default: + RT_ASSERT(true, ("Rate_Section is Illegal\n")); + break; + } + + tx_pwr_diff = (u8) (rtlphy->tx_power_by_rate_offset[band][path][tx_num][rate_section] >> shift) & 0xff; + + return tx_pwr_diff; +} + +static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path, + u8 rate, u8 bandwidth, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 index = (channel - 1); + u8 txpower = 0; + bool in_24g = false; + char powerdiff_byrate = 0; + + if (((rtlhal->current_bandtype == BAND_ON_2_4G) && (channel > 14 || channel < 1)) || + ((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) { + index = 0; + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("Illegal channel!!\n")); + } + + in_24g = _rtl8821ae_phy_get_chnl_index(channel, &index); + if (in_24g) { + if (RX_HAL_IS_CCK_RATE(rate)) + txpower = rtlefuse->txpwrlevel_cck[path][index]; + else if ( DESC_RATE6M <= rate ) + txpower = rtlefuse->txpwrlevel_ht40_1s[path][index]; + else + RT_TRACE(COMP_POWER_TRACKING, DBG_LOUD, ("invalid rate\n")); + + if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && !RX_HAL_IS_CCK_RATE(rate)) + txpower += rtlefuse->txpwr_legacyhtdiff[path][TX_1S]; + + if (bandwidth == HT_CHANNEL_WIDTH_20) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht20diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht20diff[path][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht40diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht40diff[path][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_80) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht40diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_ht40diff[path][TX_2S]; + } + + } else { + if (DESC_RATE6M <= rate) + txpower = rtlefuse->txpwr_5g_bw40base[path][index]; + else + RT_TRACE(COMP_POWER_TRACKING, DBG_WARNING,("INVALID Rate.\n")); + + if (DESC_RATE6M <= rate && rate <= DESC_RATE54M && !RX_HAL_IS_CCK_RATE(rate)) + txpower += rtlefuse->txpwr_5g_ofdmdiff[path][TX_1S]; + + if (bandwidth == HT_CHANNEL_WIDTH_20) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_5g_bw20diff[path][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) { + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S]; + } else if (bandwidth == HT_CHANNEL_WIDTH_80) { + u8 channel_5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171}; + u8 i = 0; + for (i = 0; i < sizeof(channel_5g_80m) / sizeof(u8); ++i) + if (channel_5g_80m[i] == channel) + index = i; + + if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT1SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower = rtlefuse->txpwr_5g_bw80base[path][index] + + rtlefuse->txpwr_5g_bw80diff[path][TX_1S]; + if ((DESC_RATEMCS8 <= rate && rate <= DESC_RATEMCS15) || + (DESC_RATEVHT2SS_MCS0 <= rate && rate <= DESC_RATEVHT2SS_MCS9)) + txpower = rtlefuse->txpwr_5g_bw80base[path][index] + + rtlefuse->txpwr_5g_bw80diff[path][TX_1S] + + rtlefuse->txpwr_5g_bw80diff[path][TX_2S]; + } + } + if (rtlefuse->eeprom_regulatory != 2) + powerdiff_byrate = _rtl8821ae_phy_get_txpower_by_rate(hw, + (u8)(!in_24g), path, rate); + + if (rate == DESC_RATEVHT1SS_MCS8 || rate == DESC_RATEVHT1SS_MCS9 || + rate == DESC_RATEVHT2SS_MCS8 || rate == DESC_RATEVHT2SS_MCS9) + txpower -= powerdiff_byrate; + else + txpower += powerdiff_byrate; + + if (rate > DESC_RATE11M) + txpower += rtlpriv->dm.remnant_ofdm_swing_idx[path]; + else + txpower += rtlpriv->dm.remnant_cck_idx; + + if (txpower > MAX_POWER_INDEX) + txpower = MAX_POWER_INDEX; + + return txpower; +} + +static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw, + u8 power_index, u8 path, u8 rate) +{ + struct rtl_priv* rtlpriv = rtl_priv(hw); + + if (path == RF90_PATH_A) { + switch (rate) { + case DESC_RATE1M: + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE0, power_index); + break; + case DESC_RATE2M: + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE1, power_index); + break; + case DESC_RATE5_5M: + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE2, power_index); + break; + case DESC_RATE11M: + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKBYTE3, power_index); + break; + + case DESC_RATE6M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE0, power_index); + break; + case DESC_RATE9M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE1, power_index); + break; + case DESC_RATE12M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE2, power_index); + break; + case DESC_RATE18M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM18_OFDM6, MASKBYTE3, power_index); + break; + + case DESC_RATE24M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE0, power_index); + break; + case DESC_RATE36M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE1, power_index); + break; + case DESC_RATE48M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE2, power_index); + break; + case DESC_RATE54M: + rtl_set_bbreg(hw, RTXAGC_A_OFDM54_OFDM24, MASKBYTE3, power_index); + break; + + case DESC_RATEMCS0: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE0, power_index); + break; + case DESC_RATEMCS1: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE1, power_index); + break; + case DESC_RATEMCS2: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE2, power_index); + break; + case DESC_RATEMCS3: + rtl_set_bbreg(hw, RTXAGC_A_MCS03_MCS00, MASKBYTE3, power_index); + break; + + case DESC_RATEMCS4: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE0, power_index); + break; + case DESC_RATEMCS5: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE1, power_index); + break; + case DESC_RATEMCS6: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE2, power_index); + break; + case DESC_RATEMCS7: + rtl_set_bbreg(hw, RTXAGC_A_MCS07_MCS04, MASKBYTE3, power_index); + break; + + case DESC_RATEMCS8: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE0, power_index); + break; + case DESC_RATEMCS9: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE1, power_index); + break; + case DESC_RATEMCS10: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE2, power_index); + break; + case DESC_RATEMCS11: + rtl_set_bbreg(hw, RTXAGC_A_MCS11_MCS08, MASKBYTE3, power_index); + break; + + case DESC_RATEMCS12: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE0, power_index); + break; + case DESC_RATEMCS13: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE1, power_index); + break; + case DESC_RATEMCS14: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE2, power_index); + break; + case DESC_RATEMCS15: + rtl_set_bbreg(hw, RTXAGC_A_MCS15_MCS12, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT1SS_MCS0: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS1: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE1, power_index); + break; + case DESC_RATEVHT1SS_MCS2: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE2, power_index); + break; + case DESC_RATEVHT1SS_MCS3: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX3_NSS1INDEX0, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT1SS_MCS4: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS5: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE1, power_index); + break; + case DESC_RATEVHT1SS_MCS6: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE2, power_index); + break; + case DESC_RATEVHT1SS_MCS7: + rtl_set_bbreg(hw, RTXAGC_A_NSS1INDEX7_NSS1INDEX4, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT1SS_MCS8: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS9: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS0: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS1: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX1_NSS1INDEX8, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT2SS_MCS2: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE0, power_index); + break; + case DESC_RATEVHT2SS_MCS3: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS4: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS5: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX5_NSS2INDEX2, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT2SS_MCS6: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE0, power_index); + break; + case DESC_RATEVHT2SS_MCS7: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS8: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS9: + rtl_set_bbreg(hw, RTXAGC_A_NSS2INDEX9_NSS2INDEX6, MASKBYTE3, power_index); + break; + + default: + RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid Rate!!\n")); + break; + } + } else if (path == RF90_PATH_B) { + switch (rate) { + case DESC_RATE1M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE0, power_index); + break; + case DESC_RATE2M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE1, power_index); + break; + case DESC_RATE5_5M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE2, power_index); + break; + case DESC_RATE11M: + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKBYTE3, power_index); + break; + + case DESC_RATE6M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE0, power_index); + break; + case DESC_RATE9M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE1, power_index); + break; + case DESC_RATE12M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE2, power_index); + break; + case DESC_RATE18M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM18_OFDM6, MASKBYTE3, power_index); + break; + + case DESC_RATE24M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE0, power_index); + break; + case DESC_RATE36M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE1, power_index); + break; + case DESC_RATE48M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE2, power_index); + break; + case DESC_RATE54M: + rtl_set_bbreg(hw, RTXAGC_B_OFDM54_OFDM24, MASKBYTE3, power_index); + break; + + case DESC_RATEMCS0: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE0, power_index); + break; + case DESC_RATEMCS1: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE1, power_index); + break; + case DESC_RATEMCS2: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE2, power_index); + break; + case DESC_RATEMCS3: + rtl_set_bbreg(hw, RTXAGC_B_MCS03_MCS00, MASKBYTE3, power_index); + break; + + case DESC_RATEMCS4: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE0, power_index); + break; + case DESC_RATEMCS5: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE1, power_index); + break; + case DESC_RATEMCS6: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE2, power_index); + break; + case DESC_RATEMCS7: + rtl_set_bbreg(hw, RTXAGC_B_MCS07_MCS04, MASKBYTE3, power_index); + break; + + case DESC_RATEMCS8: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE0, power_index); + break; + case DESC_RATEMCS9: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE1, power_index); + break; + case DESC_RATEMCS10: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE2, power_index); + break; + case DESC_RATEMCS11: + rtl_set_bbreg(hw, RTXAGC_B_MCS11_MCS08, MASKBYTE3, power_index); + break; + + case DESC_RATEMCS12: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE0, power_index); + break; + case DESC_RATEMCS13: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE1, power_index); + break; + case DESC_RATEMCS14: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE2, power_index); + break; + case DESC_RATEMCS15: + rtl_set_bbreg(hw, RTXAGC_B_MCS15_MCS12, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT1SS_MCS0: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS1: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE1, power_index); + break; + case DESC_RATEVHT1SS_MCS2: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE2, power_index); + break; + case DESC_RATEVHT1SS_MCS3: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX3_NSS1INDEX0, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT1SS_MCS4: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS5: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE1, power_index); + break; + case DESC_RATEVHT1SS_MCS6: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE2, power_index); + break; + case DESC_RATEVHT1SS_MCS7: + rtl_set_bbreg(hw, RTXAGC_B_NSS1INDEX7_NSS1INDEX4, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT1SS_MCS8: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE0, power_index); + break; + case DESC_RATEVHT1SS_MCS9: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS0: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS1: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX1_NSS1INDEX8, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT2SS_MCS2: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE0, power_index); + break; + case DESC_RATEVHT2SS_MCS3: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS4: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS5: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX5_NSS2INDEX2, MASKBYTE3, power_index); + break; + + case DESC_RATEVHT2SS_MCS6: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE0, power_index); + break; + case DESC_RATEVHT2SS_MCS7: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE1, power_index); + break; + case DESC_RATEVHT2SS_MCS8: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE2, power_index); + break; + case DESC_RATEVHT2SS_MCS9: + rtl_set_bbreg(hw, RTXAGC_B_NSS2INDEX9_NSS2INDEX6, MASKBYTE3, power_index); + break; + + default: + RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid Rate!!\n")); + break; + } + } else { + RT_TRACE(COMP_POWER, DBG_LOUD, ("Invalid RFPath!!\n")); + } +} + +void _rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, + u8 *array, u8 path, u8 channel, + u8 size) +{ + struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy); + u8 i; + u8 power_index; + for (i = 0; i < size; i ++) { + power_index = _rtl8821ae_get_txpower_index(hw, path, array[i], + rtlphy->current_chan_bw, channel); + _rtl8821ae_phy_set_txpower_index(hw, power_index, path, array[i]); + } +} + +static void _rtl8821ae_phy_txpower_training_by_path(struct ieee80211_hw *hw, + u8 bw, u8 channel, u8 path) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + u8 i; + u32 power_level, data, offset; + + if(path >= rtlphy->num_total_rfpath) + return; + + data = 0; + if (path == RF90_PATH_A) { + power_level = + _rtl8821ae_get_txpower_index(hw, RF90_PATH_A, + DESC_RATEMCS7, bw, channel); + offset = RA_TXPWRTRAING; + } else { + power_level = + _rtl8821ae_get_txpower_index(hw, RF90_PATH_A, + DESC_RATEMCS7, bw, channel); + offset = RB_TXPWRTRAING; + } + + for (i = 0; i < 3; i++) { + if (i == 0) + power_level = power_level - 10; + else if (i == 1) + power_level = power_level - 8; + else + power_level = power_level - 6; + + data |= (((power_level > 2) ? (power_level) : 2) << (i * 8)); + } + rtl_set_bbreg(hw, offset, 0xffffff, data); +} + +void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path) +{ + //struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy); + u8 cck_rates[] = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M}; + u8 ofdm_rates[] = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M, + DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M}; + u8 ht_rates_1t[] = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3, + DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7}; + u8 ht_rates_2t[] = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11, + DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15}; + u8 vht_rates_1t[] = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2, + DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4, + DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, + DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9}; + u8 vht_rates_2t[] = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2, + DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4, + DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, + DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9}; + //u8 i,size; + //u8 power_index; + + if (rtlhal->current_bandtype == BAND_ON_2_4G) + _rtl8821ae_phy_set_txpower_level_by_path(hw,cck_rates,path,channel, + sizeof(cck_rates) / sizeof(u8)); + + _rtl8821ae_phy_set_txpower_level_by_path(hw,ofdm_rates,path,channel, + sizeof(ofdm_rates) / sizeof(u8)); + _rtl8821ae_phy_set_txpower_level_by_path(hw,ht_rates_1t,path,channel, + sizeof(ht_rates_1t) / sizeof(u8)); + _rtl8821ae_phy_set_txpower_level_by_path(hw,vht_rates_1t,path,channel, + sizeof(vht_rates_1t) / sizeof(u8)); + + if (rtlphy->num_total_rfpath >= 2) { + _rtl8821ae_phy_set_txpower_level_by_path(hw,ht_rates_2t,path,channel, + sizeof(ht_rates_2t) / sizeof(u8)); + _rtl8821ae_phy_set_txpower_level_by_path(hw,vht_rates_2t,path,channel, + sizeof(vht_rates_2t) / sizeof(u8)); + } + + _rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, channel, path); +} +/*just in case, write txpower in DW, to reduce time*/ +#if 0 +void _rtl8821ae_phy_get_txpower_index_by_rate_array(struct ieee80211_hw *hw, u8 channel, + u8 *rate, u8 path, u8 bw, u8 *power_index, u8 size) +{ + u8 i; + for (i = 0; i < size; i++) + power_index[i] = _rtl8821ae_get_txpower_index(hw, path, rate[i], bw, channel); +} + +void rtl8821ae_phy_set_txpower_level_by_path2(struct ieee80211_hw *hw, u8 channel, u8 path) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtl_priv(hw)->phy); + u8 cck_rates[] = {DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M}; + u8 ofdm_rates[] = {DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M, + DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M}; + u8 ht_rates_1t[] = {DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3, + DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7}; + u8 ht_rates_2t[] = {DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11, + DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15}; + u8 vht_rates_1t[] = {DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4, + DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9}; + u8 vht_rates_2t[] = {DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4, + DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9}; + u8 i, j; + u8 pwridx[48] = {0}; + u8 cs = sizeof(cck_rates) / sizeof(u8); + u8 os = sizeof(ofdm_rates) / sizeof(u8); + u8 h1s = sizeof(ht_rates_1t) / sizeof(u8); + u8 h2s = sizeof(ht_rates_2t) / sizeof(u8); + u8 v1s = sizeof(vht_rates_1t) / sizeof(u8); + u8 v2s = sizeof(vht_rates_2t) / sizeof(u8); + + u8 len, start; + u32 reg_addr, power_index; + u8 bw = rtlphy->current_chan_bw; + + _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel, + ofdm_rates, path, bw, &pwridx[cs], os); + + _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel, + ht_rates_1t, path, bw, &pwridx[cs+os], h1s); + + _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel, + vht_rates_1t, path, bw, &pwridx[cs+os+h1s+h2s], v1s); + + + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel, + cck_rates, path, bw, pwridx, cs); + + start = 0; + } else { + start = cs; + } + + reg_addr = (path == 0) ? RTXAGC_A_CCK11_CCK1 : RTXAGC_B_CCK11_CCK1; + reg_addr += start; + + len = cs + os + h1s + h2s + v1s; + if (rtlphy->num_total_rfpath >= 2) { + _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel, + ht_rates_2t, path, bw, &pwridx[cs+os+h1s], h2s); + + _rtl8821ae_phy_get_txpower_index_by_rate_array(hw, channel, + vht_rates_2t, path, bw, &pwridx[cs+os+h1s+h2s+v1s], v2s); + + len += v2s; + } + for (i = start; i < len; i += 4) { + power_index = 0; + for (j = 0; j < 4; j++) + power_index |= (pwridx[i+j] << (j*8)); + rtl_set_bbreg(hw, reg_addr + i, MASKDWORD, power_index); + } + + _rtl8821ae_phy_txpower_training_by_path(hw, rtlphy->current_chan_bw, channel, path); +} +#endif + +void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 path = 0; + + for (path = RF90_PATH_A; path < rtlphy->num_total_rfpath; ++path ) + rtl8821ae_phy_set_txpower_level_by_path(hw, channel, path); +} + +static long _rtl8821ae_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx) +{ + long offset; + long pwrout_dbm; + + switch (wirelessmode) { + case WIRELESS_MODE_B: + offset = -7; + break; + case WIRELESS_MODE_G: + case WIRELESS_MODE_N_24G: + offset = -8; + break; + default: + offset = -8; + break; + } + pwrout_dbm = txpwridx / 2 + offset; + return pwrout_dbm; +} + +void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum io_type iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN; + + if (!is_hal_stop(rtlhal)) { + switch (operation) { + case SCAN_OPT_BACKUP_BAND0: + iotype = IO_CMD_PAUSE_BAND0_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *) & iotype); + + break; + case SCAN_OPT_BACKUP_BAND1: + iotype = IO_CMD_PAUSE_BAND1_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *) & iotype); + + break; + case SCAN_OPT_RESTORE: + iotype = IO_CMD_RESUME_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *) & iotype); + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Unknown Scan Backup operation.\n")); + break; + } + } +} + +static void _rtl8821ae_phy_set_reg_bw(struct rtl_priv * rtlpriv, u8 bw) +{ + u16 reg_rf_mode_bw, tmp = 0; + reg_rf_mode_bw = rtl_read_word(rtlpriv, REG_TRXPTCL_CTL); + switch (bw) { + case HT_CHANNEL_WIDTH_20: + rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, reg_rf_mode_bw & 0xFE7F); + break; + case HT_CHANNEL_WIDTH_20_40: + tmp = reg_rf_mode_bw | BIT(7); + rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFEFF); + break; + case HT_CHANNEL_WIDTH_80: + tmp = reg_rf_mode_bw | BIT(8); + rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFF7F); + break; + default: + RT_TRACE(COMP_ERR, DBG_WARNING,("unknown Bandwidth: 0x%x\n",bw)); + break; + } +} + +static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv * rtlpriv) +{ + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtlpriv); + u8 sc_set_40 = 0, sc_set_20 =0; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) { + if(mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_LOWER) + sc_set_40 = VHT_DATA_SC_40_LOWER_OF_80MHZ; + else if(mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER) + sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ; + else + RT_TRACE(COMP_ERR, DBG_EMERG, + ("SCMapping: Not Correct Primary40MHz Setting \n")); + + if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) && + (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) + sc_set_20 = VHT_DATA_SC_20_LOWEST_OF_80MHZ; + else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) && + (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER)) + sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ; + else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) && + (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)) + sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ; + else if((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) && + (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER)) + sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ; + else + RT_TRACE(COMP_ERR, DBG_EMERG, + ("SCMapping: Not Correct Primary40MHz Setting \n")); + } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER) + sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ; + else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) + sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ; + else + RT_TRACE(COMP_ERR, DBG_EMERG, + ("SCMapping: Not Correct Primary40MHz Setting \n")); + } + return ((sc_set_40 << 4) | sc_set_20); +} + +void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 sub_chnl = 0; + u8 l1pk_val = 0; + + RT_TRACE(COMP_SCAN, DBG_TRACE, + ("Switch to %s bandwidth\n", + (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : + (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ? + "40MHz" : "80MHz")))) + + + + _rtl8821ae_phy_set_reg_bw(rtlpriv, rtlphy->current_chan_bw); + sub_chnl = _rtl8821ae_phy_get_secondary_chnl(rtlpriv); + rtl_write_byte(rtlpriv, 0x0483, sub_chnl); + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300200); + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0); + + if(rtlphy->rf_type == RF_2T2R) + rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 7); + else + rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, 8); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300201); + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 0); + rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl); + rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl); + + if(rtlphy->reg_837 & BIT(2)) + l1pk_val = 6; + else + { + if(rtlphy->rf_type == RF_2T2R) + l1pk_val = 7; + else + l1pk_val = 8; + } + rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val); // 0x848[25:22] = 0x6 + + if(sub_chnl == VHT_DATA_SC_20_UPPER_OF_80MHZ) + rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 1); + else + rtl_set_bbreg(hw, RCCK_SYSTEM, BCCK_SYSTEM, 0); + break; + + case HT_CHANNEL_WIDTH_80: + rtl_set_bbreg(hw, RRFMOD, 0x003003C3, 0x00300202); // 0x8ac[21,20,9:6,1,0]=8'b11100010 + rtl_set_bbreg(hw, RADC_BUF_CLK, BIT(30), 1); // 0x8c4[30] = 1 + rtl_set_bbreg(hw, RRFMOD, 0x3C, sub_chnl); + rtl_set_bbreg(hw, RCCAONSEC, 0xf0000000, sub_chnl); + + if(rtlphy->reg_837 & BIT(2)) + l1pk_val = 5; + else + { + if(rtlphy->rf_type == RF_2T2R) + l1pk_val = 6; + else + l1pk_val = 7; + } + rtl_set_bbreg(hw, RL1PEAKTH, 0x03C00000, l1pk_val); + + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw)); + break; + } + + rtl8812ae_fixspur(hw, rtlphy->current_chan_bw, rtlphy->current_channel); + + rtl8821ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); + rtlphy->set_bwmode_inprogress = false; + + RT_TRACE(COMP_SCAN, DBG_LOUD, (" \n")); +} + +void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_bw = rtlphy->current_chan_bw; + + if (rtlphy->set_bwmode_inprogress) + return; + rtlphy->set_bwmode_inprogress = true; + if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { + rtl8821ae_phy_set_bw_mode_callback(hw); + } else { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("FALSE driver sleep or unload\n")); + rtlphy->set_bwmode_inprogress = false; + rtlphy->current_chan_bw = tmp_bw; + } +} + +void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 channel = rtlphy->current_channel; + u8 path; + u32 data; + + RT_TRACE(COMP_SCAN, DBG_TRACE, + ("switch to channel%d\n", rtlphy->current_channel)); + if (is_hal_stop(rtlhal)) + return; + + if (36 <= channel && channel <= 48) + data = 0x494; + else if (50 <= channel && channel <= 64) + data = 0x453; + else if (100 <= channel && channel <= 116) + data = 0x452; + else if (118 <= channel) + data = 0x412; + else + data = 0x96a; + rtl_set_bbreg(hw, RFC_AREA, 0x1ffe0000, data); + + + for(path = RF90_PATH_A; path < rtlphy->num_total_rfpath; path++) + { + if (36 <= channel && channel <= 64) + data = 0x101; + else if (100 <= channel && channel <= 140) + data = 0x301; + else if (140 < channel) + data = 0x501; + else + data = 0x000; + rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW, + BIT(18)|BIT(17)|BIT(16)|BIT(9)|BIT(8), data); + + rtl8821ae_phy_set_rf_reg(hw, path, RF_CHNLBW, + BMASKBYTE0, channel); + + if (channel > 14) { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) { + if (36 <= channel && channel <= 64) + data = 0x114E9; + else if (100 <= channel && channel <= 140) + data = 0x110E9; + else + data = 0x110E9; + rtl8821ae_phy_set_rf_reg(hw, path, RF_APK, + BRFREGOFFSETMASK, data); + } + } + } + RT_TRACE(COMP_SCAN, DBG_TRACE, ("\n")); +} + +u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 timeout = 1000, timecount = 0; + u8 channel = rtlphy->current_channel; + + if (rtlphy->sw_chnl_inprogress) + return 0; + if (rtlphy->set_bwmode_inprogress) + return 0; + + if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) { + RT_TRACE(COMP_CHAN, DBG_LOUD, + ("sw_chnl_inprogress false driver sleep or unload\n")); + return 0; + } + while (rtlphy->lck_inprogress && timecount < timeout) { + mdelay(50); + timecount += 50; + } + + if (rtlphy->current_channel > 14 && rtlhal->current_bandtype != BAND_ON_5G) + rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_5G); + else if (rtlphy->current_channel <= 14 && rtlhal->current_bandtype != BAND_ON_2_4G) + rtl8821ae_phy_switch_wirelessband(hw, BAND_ON_2_4G); + + rtlphy->sw_chnl_inprogress = true; + if (channel == 0) + channel = 1; + + RT_TRACE(COMP_SCAN, DBG_TRACE, + ("switch to channel%d, band type is %d\n", rtlphy->current_channel, rtlhal->current_bandtype)); + + rtl8821ae_phy_sw_chnl_callback(hw); + + rtl8821ae_dm_clear_txpower_tracking_state(hw); + rtl8821ae_phy_set_txpower_level(hw, rtlphy->current_channel); + + RT_TRACE(COMP_SCAN, DBG_TRACE, ("\n")); + rtlphy->sw_chnl_inprogress = false; + return 1; +} + +#if 0 +static u8 _rtl8821ae_phy_path_b_iqk(struct ieee80211_hw *hw) +{ + u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc; + u8 result = 0x00; + + rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002); + rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000); + mdelay(IQK_DELAY_TIME); + reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); + reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD); + reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD); + reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD); + reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD); + + if (!(reg_eac & BIT(31)) && + (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) && + (((reg_ebc & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else + return result; + if (!(reg_eac & BIT(30)) && + (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_ecc & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + return result; +} + +static u8 _rtl8821ae_phy_path_a_rx_iqk(struct ieee80211_hw *hw, bool config_pathb) +{ + u32 reg_eac, reg_e94, reg_e9c, reg_ea4,u32temp; + u8 result = 0x00; + + /*Get TXIMR Setting*/ + /*Modify RX IQK mode table*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf117b); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /*IQK Setting*/ + rtl_set_bbreg(hw, RTx_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRx_IQK, MASKDWORD, 0x81004800); + + /*path a IQK setting*/ + rtl_set_bbreg(hw, RTx_IQK_Tone_A, MASKDWORD, 0x10008c1c); + rtl_set_bbreg(hw, RRx_IQK_Tone_A, MASKDWORD, 0x30008c1c); + rtl_set_bbreg(hw, RTx_IQK_PI_A, MASKDWORD, 0x82160804); + rtl_set_bbreg(hw, RRx_IQK_PI_A, MASKDWORD, 0x28160000); + + /*LO calibration Setting*/ + rtl_set_bbreg(hw, RIQK_AGC_Rsp, MASKDWORD, 0x0046a911); + /*one shot,path A LOK & iqk*/ + rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + reg_eac = rtl_get_bbreg(hw, RRx_Power_After_IQK_A_2, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, RTx_Power_Before_IQK_A, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, RTx_Power_After_IQK_A, MASKDWORD); + + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else + return result; + + u32temp = 0x80007C00 | (reg_e94&0x3FF0000) | ((reg_e9c&0x3FF0000) >> 16); + rtl_set_bbreg(hw, RTx_IQK, MASKDWORD, u32temp); + /*RX IQK*/ + /*Modify RX IQK mode table*/ + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x00000000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_WE_LUT, RFREG_OFFSET_MASK, 0x800a0); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK_OS, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G1, RFREG_OFFSET_MASK, 0x0000f); + rtl_set_rfreg(hw, RF90_PATH_A, RF_TXPA_G2, RFREG_OFFSET_MASK, 0xf7ffa); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0x80800000); + + /*IQK Setting*/ + rtl_set_bbreg(hw, RRx_IQK, MASKDWORD, 0x01004800); + + /*path a IQK setting*/ + rtl_set_bbreg(hw, RTx_IQK_Tone_A, MASKDWORD, 0x30008c1c); + rtl_set_bbreg(hw, RRx_IQK_Tone_A, MASKDWORD, 0x10008c1c); + rtl_set_bbreg(hw, RTx_IQK_PI_A, MASKDWORD, 0x82160c05); + rtl_set_bbreg(hw, RRx_IQK_PI_A, MASKDWORD, 0x28160c05); + + /*LO calibration Setting*/ + rtl_set_bbreg(hw, RIQK_AGC_Rsp, MASKDWORD, 0x0046a911); + /*one shot,path A LOK & iqk*/ + rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_Pts, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + reg_eac = rtl_get_bbreg(hw, RRx_Power_After_IQK_A_2, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, RTx_Power_Before_IQK_A, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, RTx_Power_After_IQK_A, MASKDWORD); + reg_ea4 = rtl_get_bbreg(hw, RRx_Power_Before_IQK_A_2, MASKDWORD); + + if (!(reg_eac & BIT(27)) && + (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_eac & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + return result; +} +#endif + +u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl) +{ + u8 channel_all[TARGET_CHNL_NUM_2G_5G_8812] = + {1,2,3,4,5,6,7,8,9,10,11,12,13,14,36,38,40,42,\ + 44,46,48,50,52,54,56,58,60,62,64,100,\ + 102,104,106,108,110,112,114,116,118,\ + 120,122,124,126,128,130,132,134,136,\ + 138,140,149,151,153,155,157,159,161,\ + 163,165}; + u8 place = chnl; + + if(chnl > 14) + { + for(place = 14; place Page C*/ + if (rx_x >> 1 ==0x112 || rx_y >> 1 == 0x3ee){ + rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x100); + rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n", + rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff)); + } + else{ + rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x >> 1); + rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y >> 1); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n", + rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff)); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("0xc10 = %x ====>fill to IQC\n", + rtl_read_dword(rtlpriv, 0xc10))); + } + } + break; + case RF90_PATH_B: + { + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + if (rx_x >> 1 ==0x112 || rx_y >> 1 == 0x3ee){ + rtl_set_bbreg(hw, 0xe10, 0x000003ff, 0x100); + rtl_set_bbreg(hw, 0xe10, 0x03ff0000, 0); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX_X = %x;;RX_Y = %x ====>fill to IQC\n", + rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff)); + } + else{ + rtl_set_bbreg(hw, 0xe10, 0x000003ff, rx_x >> 1); + rtl_set_bbreg(hw, 0xe10, 0x03ff0000, rx_y >> 1); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX_X = %x;;RX_Y = %x====>fill to IQC\n ", + rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff)); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("0xe10 = %x====>fill to IQC\n", + rtl_read_dword(rtlpriv, 0xe10))); + } + } + break; + default: + break; + }; +} + +void _rtl8812ae_iqk_tx_fill_iqc( + struct ieee80211_hw *hw, + enum radio_path path, + u32 tx_x, + u32 tx_y + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (path) { + case RF90_PATH_A: + { + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/ + rtl_write_dword(rtlpriv, 0xc90, 0x00000080); + rtl_write_dword(rtlpriv, 0xcc4, 0x20040000); + rtl_write_dword(rtlpriv, 0xcc8, 0x20000000); + rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y); + rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n", + tx_x & 0x000007ff, tx_y & 0x000007ff)); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("0xcd4 = %x;;0xccc = %x ====>fill to IQC\n", + rtl_get_bbreg(hw, 0xcd4, 0x000007ff), + rtl_get_bbreg(hw, 0xccc, 0x000007ff))); + } + break; + case RF90_PATH_B: + { + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/ + rtl_write_dword(rtlpriv, 0xe90, 0x00000080); + rtl_write_dword(rtlpriv, 0xec4, 0x20040000); + rtl_write_dword(rtlpriv, 0xec8, 0x20000000); + rtl_set_bbreg(hw, 0xecc, 0x000007ff, tx_y); + rtl_set_bbreg(hw, 0xed4, 0x000007ff, tx_x); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n", + tx_x&0x000007ff, tx_y&0x000007ff)); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("0xed4 = %x;;0xecc = %x ====>fill to IQC\n", + rtl_get_bbreg(hw, 0xed4, 0x000007ff), + rtl_get_bbreg(hw, 0xecc, 0x000007ff))); + } + break; + default: + break; + }; +} + +void _rtl8812ae_iqk_backup_macbb( + struct ieee80211_hw *hw, + u32 *macbb_backup, + u32 *backup_macbb_reg, + u32 mac_bb_num + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*save MACBB default value*/ + for (i = 0; i < mac_bb_num; i++) { + macbb_backup[i] =rtl_read_dword(rtlpriv,backup_macbb_reg[i]); + } + + RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupMacBB Success!!!!\n")); +} + +void _rtl8812ae_iqk_backup_afe( + struct ieee80211_hw *hw, + u32 *afe_backup, + u32 *backup_afe_REG, + u32 afe_num + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*Save AFE Parameters */ + for (i = 0; i < afe_num; i++){ + afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]); + } + RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupAFE Success!!!!\n")); +} + +void _rtl8812ae_iqk_backup_rf( + struct ieee80211_hw *hw, + u32 *rfa_backup, + u32 *rfb_backup, + u32 *backup_rf_reg, + u32 rf_num + ) +{ + + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*Save RF Parameters*/ + for (i = 0; i < rf_num; i++){ + rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], BMASKDWORD); + rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], BMASKDWORD); + } + RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupRF Success!!!!\n")); +} + +void _rtl8812ae_iqk_configure_mac( + struct ieee80211_hw *hw + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + /* ========MAC register setting========*/ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + rtl_write_byte(rtlpriv, 0x522, 0x3f); + rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0); + rtl_write_byte(rtlpriv, 0x808, 0x00); /*RX ante off*/ + rtl_set_bbreg(hw, 0x838, 0xf, 0xc); /*CCA off*/ +} + +#define cal_num 10 + +void _rtl8812ae_iqk_tx( + struct ieee80211_hw *hw, + u8 chnl_idx + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u8 delay_count; + u8 cal0_retry, cal1_retry; + u8 tx0_average = 0, tx1_average = 0, rx0_average = 0, rx1_average = 0; + int tx0_x = 0, tx0_y = 0, rx0_x = 0, rx0_y = 0; + int tx_x0[cal_num], tx_y0[cal_num], rx_x0[cal_num], rx_y0[cal_num]; + int tx1_x = 0, tx1_y = 0, rx1_x = 0, rx1_y = 0; + int tx_x1[cal_num], tx_y1[cal_num], rx_x1[cal_num], rx_y1[cal_num]; + bool tx0iqkok= false, rx0iqkok = false, tx0_fail = true, rx0_fail; + bool iqk0_ready = false, tx0_finish = false, rx0_finish = false; + bool tx1iqkok = false, rx1iqkok = false, tx1_fail = true, rx1_fail; + bool iqk1_ready = false, tx1_finish = false, rx1_finish = false, vdf_enable = false; + int i, tx_dt[3] = {0}, rx_dt[3] = {0}, ii, dx = 0, dy = 0; + + RT_TRACE(COMP_IQK, DBG_LOUD, + ("BandWidth = %d.\n", + rtlphy->current_chan_bw)); + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80){ + vdf_enable = true; + } + vdf_enable = false; + + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*========Path-A AFE all on========*/ + /*Port 0 DAC/ADC on*/ + rtl_write_dword(rtlpriv, 0xc60, 0x77777777); + rtl_write_dword(rtlpriv, 0xc64, 0x77777777); + + /* Port 1 DAC/ADC off*/ + rtl_write_dword(rtlpriv, 0xe60, 0x77777777); + rtl_write_dword(rtlpriv, 0xe64, 0x77777777); + + rtl_write_dword(rtlpriv, 0xc68, 0x19791979); + rtl_write_dword(rtlpriv, 0xe68, 0x19791979); + rtl_set_bbreg(hw,0xc00, 0xf, 0x4);/*hardware 3-wire off*/ + rtl_set_bbreg(hw,0xe00, 0xf, 0x4);/*hardware 3-wire off*/ + + /*DAC/ADC sampling rate (160 MHz)*/ + rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7); + rtl_set_bbreg(hw, 0xe5c, BIT(26) | BIT(25) | BIT(24), 0x7); + rtl_set_bbreg(hw, 0x8c4, BIT(30), 0x1); + + /*====== Path A TX IQK RF Setting ======*/ + rtl_set_bbreg(hw,0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + rtl_set_rfreg(hw,RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x80002); + rtl_set_rfreg(hw,RF90_PATH_A, 0x30, BRFREGOFFSETMASK, 0x20000); + rtl_set_rfreg(hw,RF90_PATH_A, 0x31, BRFREGOFFSETMASK, 0x3fffd); + rtl_set_rfreg(hw,RF90_PATH_A, 0x32, BRFREGOFFSETMASK, 0xfe83f); + rtl_set_rfreg(hw,RF90_PATH_A, 0x65, BRFREGOFFSETMASK, 0x931d5); + rtl_set_rfreg(hw,RF90_PATH_A, 0x8f, BRFREGOFFSETMASK, 0x8a001); + /*====== Path A TX IQK RF Setting ======*/ + rtl_set_rfreg(hw,RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x80002); + rtl_set_rfreg(hw,RF90_PATH_B, 0x30, BRFREGOFFSETMASK, 0x20000); + rtl_set_rfreg(hw,RF90_PATH_B, 0x31, BRFREGOFFSETMASK, 0x3fffd); + rtl_set_rfreg(hw,RF90_PATH_B, 0x32, BRFREGOFFSETMASK, 0xfe83f); + rtl_set_rfreg(hw,RF90_PATH_B, 0x65, BRFREGOFFSETMASK, 0x931d5); + rtl_set_rfreg(hw,RF90_PATH_B, 0x8f, BRFREGOFFSETMASK, 0x8a001); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1); + rtl_set_bbreg(hw, 0xe94, BIT(0), 0x1); + rtl_write_dword(rtlpriv, 0x978, 0x29002000);/* TX (X,Y)*/ + rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);/* RX (X,Y)*/ + rtl_write_dword(rtlpriv, 0x984, 0x00462910);/*[0]:AGC_en, [15]:idac_K_Mask*/ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/ + + /*ExternalPA_5G == 0*/ + rtl_write_dword(rtlpriv, 0xc88, 0x821403f1); + rtl_write_dword(rtlpriv, 0xe88, 0x821403f1); + + if (rtlhal->current_bandtype){ + rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96); + rtl_write_dword(rtlpriv, 0xe8c, 0x68163e96); + } + else{ + rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96); + rtl_write_dword(rtlpriv, 0xe8c, 0x28163e96); + } + + if (vdf_enable){} + else{ + rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/ + rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/ + rtl_write_dword(rtlpriv, 0xce8, 0x00000000); + rtl_write_dword(rtlpriv, 0xe80, 0x18008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/ + rtl_write_dword(rtlpriv, 0xe84, 0x38008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/ + rtl_write_dword(rtlpriv, 0xee8, 0x00000000); + + cal0_retry = 0; + cal1_retry = 0; + while(1){ + /*one shot*/ + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);/* cb8[20] N SI/PI ϥv iqk_dpk module*/ + rtl_write_dword(rtlpriv, 0xeb8, 0x00100000);/* cb8[20] N SI/PI ϥv iqk_dpk module*/ + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); /*Delay 25ms*/ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + rtl_write_dword(rtlpriv, 0xeb8, 0x00000000); + delay_count = 0; + while (1){ + if (!tx0_finish) + iqk0_ready = (bool) rtl_get_bbreg(hw, 0xd00, BIT(10)); + if (!tx1_finish) + iqk1_ready = (bool) rtl_get_bbreg(hw, 0xd40, BIT(10)); + if ((iqk0_ready && iqk1_ready) || (delay_count>20)) + break; + else{ + mdelay(1); + delay_count++; + } + } + RT_TRACE(COMP_IQK, DBG_LOUD, ("TX delay_count = %d\n", delay_count)); + if (delay_count < 20){ // If 20ms No Result, then cal_retry++ + /* ============TXIQK Check==============*/ + tx0_fail = (bool) rtl_get_bbreg(hw, 0xd00, BIT(12)); + tx1_fail = (bool) rtl_get_bbreg(hw, 0xd40, BIT(12)); + if (!(tx0_fail || tx0_finish)){ + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + tx_x0[tx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + tx_y0[tx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21; + tx0iqkok = true; + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", + tx0_average, (tx_x0[tx0_average]) >> 21 & 0x000007ff, + tx0_average, (tx_y0[tx0_average]) >> 21 & 0x000007ff)); + + tx0_average++; + } + else{ + tx0iqkok = false; + cal0_retry++; + if (cal0_retry == 10) + break; + } + if (!(tx1_fail || tx1_finish)){ + rtl_write_dword(rtlpriv, 0xeb8, 0x02000000); + tx_x1[tx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21; + rtl_write_dword(rtlpriv, 0xeb8, 0x04000000); + tx_y1[tx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21; + tx1iqkok= true; + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TX_X1[%d] = %x ;; TX_Y1[%d] = %x\n", + tx1_average, (tx_x1[tx1_average]) >> 21 & 0x000007ff, + tx1_average, (tx_y1[tx1_average]) >> 21 & 0x000007ff)); + + tx1_average++; + } + else{ + tx1iqkok = false; + cal1_retry++; + if (cal1_retry == 10) + break; + } + } + else{ + tx0iqkok = false; + tx1iqkok = false; + cal0_retry++; + cal1_retry++; + RT_TRACE(COMP_IQK, DBG_LOUD, + ("Delay 20ms TX IQK Not Ready!!!!!\n")); + if (cal0_retry == 10) + break; + } + if (tx0_average >= 2){ + for (i = 0; i < tx0_average; i++){ + for (ii = i+1; ii > 21) - (tx_x0[ii] >> 21); + if (dx < 4 && dx > -4){ + dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21); + if (dy < 4 && dy > -4){ + tx0_x = ((tx_x0[i] >> 21) + (tx_x0[ii] >> 21)) / 2; + tx0_y = ((tx_y0[i] >> 21) + (tx_y0[ii] >> 21)) / 2; + tx_x0[0] = tx_x0[i]; + tx_y0[1] = tx_y0[ii]; + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TX0_X = %x;;TX0_Y = %x\n", + tx0_x & 0x000007ff, tx0_y & 0x000007ff)); + if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) + && vdf_enable) { + tx_dt[0] = (tx_dt[i] + tx_dt[ii]) / 2; + } + tx0_finish = true; + } + } + } + } + } + if (tx1_average >= 2){ + for (i = 0; i < tx1_average; i++){ + for (ii = i+1; ii < tx1_average; ii++){ + dx = (tx_x1[i] >> 21) - (tx_x1[ii] >> 21); + if (dx < 4 && dx > -4){ + dy = (tx_y1[i] >> 21) - (tx_y1[ii] >> 21); + if (dy < 4 && dy > -4){ + tx1_x = ((tx_x1[i] >> 21) + (tx_x1[ii] >> 21)) / 2; + tx1_y = ((tx_y1[i] >> 21) + (tx_y1[ii] >> 21)) / 2; + tx_x1[0] = tx_x1[i]; + tx_y1[1] = tx_y1[ii]; + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TX1_X = %x;;TX1_Y = %x\n", + tx1_x & 0x000007ff, tx1_y & 0x000007ff)); + if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) + && vdf_enable) { + tx_dt[0] = (tx_dt[i] + tx_dt[ii]) / 2; + } + tx1_finish = true; + } + } + } + } + } + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TX0_Average = %d, TX1_Average = %d\n", + tx0_average, tx1_average)); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TX0_finish = %d, TX1_finish = %d\n", + tx0_finish, tx1_finish)); + if (tx0_finish && tx1_finish) + break; + if ((cal0_retry + tx0_average) >= 10 + || (cal1_retry + tx1_average) >= 10 ) + break; + } + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TXA_cal_retry = %d\n", cal0_retry)); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("TXB_cal_retry = %d\n", cal1_retry)); + + } + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/ + rtl_set_rfreg(hw, RF90_PATH_A, 0x58, 0x7fe00, + rtl_get_rfreg(hw, RF90_PATH_A, 0x8, 0xffc00)); /*Load LOK*/ + rtl_set_rfreg(hw, RF90_PATH_B, 0x58, 0x7fe00, + rtl_get_rfreg(hw, RF90_PATH_B, 0x8, 0xffc00)); /* Load LOK*/ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/ + + + if (vdf_enable) {} + else{ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + if (tx0_finish) { + /*====== Path A RX IQK RF Setting======*/ + rtl_set_rfreg(hw, RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x80000); + rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x3); /* BW 20M*/ + rtl_set_rfreg(hw, RF90_PATH_A, 0x30, BRFREGOFFSETMASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_A, 0x31, BRFREGOFFSETMASK, 0x3f7ff); + rtl_set_rfreg(hw, RF90_PATH_A, 0x32, BRFREGOFFSETMASK, 0xfe7bf); + rtl_set_rfreg(hw, RF90_PATH_A, 0x8f, BRFREGOFFSETMASK, 0x88001); + rtl_set_rfreg(hw, RF90_PATH_A, 0x65, BRFREGOFFSETMASK, 0x931d6); + rtl_set_rfreg(hw, RF90_PATH_A, 0xef, BRFREGOFFSETMASK, 0x00000); + } + if (tx1_finish){ + /*====== Path B RX IQK RF Setting======*/ + rtl_set_rfreg(hw, RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x80000); + rtl_set_rfreg(hw, RF90_PATH_B, 0x30, BRFREGOFFSETMASK, 0x30000); + rtl_set_rfreg(hw, RF90_PATH_B, 0x31, BRFREGOFFSETMASK, 0x3f7ff); + rtl_set_rfreg(hw, RF90_PATH_B, 0x32, BRFREGOFFSETMASK, 0xfe7bf); + rtl_set_rfreg(hw, RF90_PATH_B, 0x8f, BRFREGOFFSETMASK, 0x88001); + rtl_set_rfreg(hw, RF90_PATH_B, 0x65, BRFREGOFFSETMASK, 0x931d1); + rtl_set_rfreg(hw, RF90_PATH_B, 0xef, BRFREGOFFSETMASK, 0x00000); + } + rtl_set_bbreg(hw, 0x978, BIT(31), 0x1); + rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0x984, 0x0046a890); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/ + if (tx0_finish) { + rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/ + rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/ + rtl_write_dword(rtlpriv, 0xc88, 0x02140119); + rtl_write_dword(rtlpriv, 0xc8c, 0x28160cc0); + } + if (tx1_finish){ + rtl_write_dword(rtlpriv, 0xe80, 0x38008c10);/*TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16*/ + rtl_write_dword(rtlpriv, 0xe84, 0x18008c10);/*RX_Tone_idx[9:0], RxK_Mask[29]*/ + rtl_write_dword(rtlpriv, 0xe88, 0x02140119); + rtl_write_dword(rtlpriv, 0xe8c, 0x28160ca0); + } + cal0_retry = 0; + cal1_retry = 0; + while(1){ + /* one shot*/ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + if (tx0_finish){ + rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0[rx0_average % 2]) >> 21 & 0x000007ff); + rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0[rx0_average % 2]) >> 21 & 0x000007ff); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/ + rtl_write_dword(rtlpriv, 0xcb8, 0x00300000);/*cb8[20] N SI/PI ϥv iqk_dpk module*/ + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000); + mdelay(5); /*Delay 10ms*/ + } + if (tx1_finish){ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x1[rx1_average % 2]) >> 21 & 0x000007ff); + rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y1[rx1_average % 2]) >> 21 & 0x000007ff); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 1 --> Page C1*/ + rtl_write_dword(rtlpriv, 0xeb8, 0x00300000);/*cb8[20] N SI/PI ϥv iqk_dpk module*/ + rtl_write_dword(rtlpriv, 0xeb8, 0x00100000);/* cb8[20] N SI/PI ϥv iqk_dpk module*/ + } + mdelay(10); /*Delay 10ms*/ + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + rtl_write_dword(rtlpriv, 0xeb8, 0x00000000); + delay_count = 0; + while (1){ + if (!rx0_finish && tx0_finish) + iqk0_ready = (bool) rtl_get_bbreg(hw, 0xd00, BIT(10)); + if (!rx1_finish && tx1_finish) + iqk1_ready = (bool) rtl_get_bbreg(hw, 0xd40, BIT(10)); + if ((iqk0_ready && iqk1_ready)||(delay_count>20)) + break; + else{ + mdelay(1); + delay_count++; + } + } + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX delay_count = %d\n", delay_count)); + if (delay_count < 20){ // If 20ms No Result, then cal_retry++ + // ============RXIQK Check============== + rx0_fail = (bool) rtl_get_bbreg(hw, 0xd00, BIT(11)); + rx1_fail = (bool) rtl_get_bbreg(hw, 0xd40, BIT(11)); + if (!(rx0_fail || rx0_finish) && tx0_finish){ + rtl_write_dword(rtlpriv, 0xcb8, 0x06000000); + rx_x0[rx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21; + rtl_write_dword(rtlpriv, 0xcb8, 0x08000000); + rx_y0[rx0_average] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000) << 21; + rx0iqkok= true; + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n", + rx0_average, (rx_x0[rx0_average]) >> 21 & 0x000007ff, + rx0_average, (rx_y0[rx0_average]) >> 21 & 0x000007ff)); + + rx0_average++; + } + else{ + RT_TRACE(COMP_IQK, DBG_LOUD, + ("1. RXA_cal_retry = %d\n", cal0_retry)); + rx0iqkok = false; + cal0_retry++; + if (cal0_retry == 10) + break; + } + if (!(rx1_fail || rx1_finish) && tx1_finish){ + rtl_write_dword(rtlpriv, 0xeb8, 0x06000000); + rx_x1[rx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21; + rtl_write_dword(rtlpriv, 0xeb8, 0x08000000); + rx_y1[rx1_average] = rtl_get_bbreg(hw, 0xd40, 0x07ff0000) << 21; + rx1iqkok = true; + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX_X1[%d] = %x ;; RX_Y1[%d] = %x\n", + rx1_average, (rx_x1[rx1_average]) >> 21 & 0x000007ff, + rx1_average, (rx_y1[rx1_average]) >> 21 & 0x000007ff)); + + rx1_average++; + } + else{ + rx1iqkok= false; + cal1_retry++; + if (cal1_retry == 10) + break; + } + + } + else{ + RT_TRACE(COMP_IQK, DBG_LOUD, + ("2. RXA_cal_retry = %d\n", cal0_retry)); + rx0iqkok = false; + rx1iqkok = false; + cal0_retry++; + cal1_retry++; + RT_TRACE(COMP_IQK, DBG_LOUD, + ("Delay 20ms RX IQK Not Ready!!!!!\n")); + if (cal0_retry == 10) + break; + } + RT_TRACE(COMP_IQK, DBG_LOUD, + ("3. RXA_cal_retry = %d\n", cal0_retry)); + if (rx0_average >= 2){ + for (i = 0; i < rx0_average; i++){ + for (ii = i+1; ii < rx0_average; ii++){ + dx = (rx_x0[i] >> 21) - (rx_x0[ii] >> 21); + if (dx < 4 && dx > -4){ + dy = (rx_y0[i] >> 21) - (rx_y0[ii] >> 21); + if (dy < 4 && dy > -4){ + rx0_x = ((rx_x0[i]>>21) + (rx_x0[ii] >> 21)) / 2; + rx0_y = ((rx_y0[i]>>21) + (rx_y0[ii] >> 21)) / 2; + if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) + && vdf_enable) { + rx_dt[0] = (rx_dt[i] + rx_dt[ii]) / 2; + } + rx0_finish = true; + break; + } + } + } + } + } + if (rx1_average >= 2){ + for (i = 0; i < rx1_average; i++){ + for (ii = i+1; ii < rx1_average; ii++){ + dx = (rx_x1[i] >> 21) - (rx_x1[ii] >> 21); + if (dx < 4 && dx > -4){ + dy = (rx_y1[i] >> 21) - (rx_y1[ii] >> 21); + if (dy < 4 && dy > -4){ + rx1_x = ((rx_x1[i] >> 21) + (rx_x1[ii] >> 21)) / 2; + rx1_y = ((rx_y1[i] >> 21) + (rx_y1[ii] >> 21)) / 2; + if ((rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) + && vdf_enable) { + rx_dt[0] = (rx_dt[i] + rx_dt[ii]) / 2; + } + rx1_finish = true; + break; + } + } + } + } + } + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX0_Average = %d, RX1_Average = %d\n", + rx0_average, rx1_average)); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RX0_finish = %d, RX1_finish = %d\n", + rx0_finish, rx1_finish)); + if ((rx0_finish|| !tx0_finish) && (rx1_finish || !tx1_finish) ) + break; + if ((cal0_retry + rx0_average) >= 10 + || (cal1_retry + rx1_average) >= 10 + || rx0_average == 3 + || rx1_average == 3) + break; + } + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RXA_cal_retry = %d\n", cal0_retry)); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RXB_cal_retry = %d\n", cal1_retry)); + } + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/ + switch (rtlphy->current_chan_bw) + { + case HT_CHANNEL_WIDTH_20_40: + { + rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x1); + } + break; + case HT_CHANNEL_WIDTH_80: + { + rtl_set_rfreg(hw, RF90_PATH_A, 0x18, 0x00c00, 0x0); + } + break; + default: + break; + + } + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/ + /*FillIQK Result*/ + RT_TRACE(COMP_IQK, DBG_LOUD, + ("========Path_A =======\n")); + + if (tx0_finish){ + _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_A, tx0_x, tx0_y); + } + else{ + _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_A, 0x200, 0x0); + } + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80 + || vdf_enable){ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 0 --> Page C*/ + rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[0] & 0x00003fff); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + } + + if (rx0_finish == 1){ + _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_A, rx0_x, rx0_y); + } + else{ + _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_A, 0x200, 0x0); + } + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80 + || vdf_enable){ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /*[31] = 0 --> Page C*/ + rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[0] & 0x00003fff); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/ + } + + RT_TRACE(COMP_IQK, DBG_LOUD, + ("========Path_B =======\n")); + + if (tx1_finish){ + _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_B, tx1_x, tx1_y); + } + else{ + _rtl8812ae_iqk_tx_fill_iqc(hw, RF90_PATH_B, 0x200, 0x0); + } + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80 + || vdf_enable){ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/ + rtl_set_bbreg(hw, 0xee8, 0x3fff0000, tx_dt[0] & 0x00003fff); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/ + } + + if (rx1_finish == 1){ + _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_B, rx1_x, rx1_y); + } + else{ + _rtl8812ae_iqk_rx_fill_iqc(hw, RF90_PATH_B, 0x200, 0x0); + } + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80 + || vdf_enable){ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 0 --> Page C*/ + rtl_set_bbreg(hw, 0xee8, 0x00003fff, rx_dt[0] & 0x00003fff); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/ + } +} + +void _rtl8812ae_iqk_restore_rf( + struct ieee80211_hw *hw, + enum radio_path path, + u32 *backup_rf_reg, + u32 *rf_backup, + u32 rf_reg_num + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + for (i = 0; i < rf_reg_num; i++) + rtl_set_rfreg(hw, path, backup_rf_reg[i], BRFREGOFFSETMASK, rf_backup[i]); + + rtl_set_rfreg(hw, path, 0xef, BRFREGOFFSETMASK, 0x0); + + switch(path){ + case RF90_PATH_A: + { + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RestoreRF Path A Success!!!!\n")); + } + break; + case RF90_PATH_B: + { + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RestoreRF Path B Success!!!!\n")); + } + break; + default: + break; + } +} + +void _rtl8812ae_iqk_restore_afe( + struct ieee80211_hw *hw, + u32 *afe_backup, + u32 *backup_afe_reg, + u32 afe_num + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*Reload AFE Parameters */ + for (i = 0; i < afe_num; i++){ + rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]); + } + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); /* [31] = 1 --> Page C1*/ + rtl_write_dword(rtlpriv, 0xc80, 0x0); + rtl_write_dword(rtlpriv, 0xc84, 0x0); + rtl_write_dword(rtlpriv, 0xc88, 0x0); + rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000); + rtl_write_dword(rtlpriv, 0xc90, 0x00000080); + rtl_write_dword(rtlpriv, 0xc94, 0x00000000); + rtl_write_dword(rtlpriv, 0xcc4, 0x20040000); + rtl_write_dword(rtlpriv, 0xcc8, 0x20000000); + rtl_write_dword(rtlpriv, 0xcb8, 0x0); + rtl_write_dword(rtlpriv, 0xe80, 0x0); + rtl_write_dword(rtlpriv, 0xe84, 0x0); + rtl_write_dword(rtlpriv, 0xe88, 0x0); + rtl_write_dword(rtlpriv, 0xe8c, 0x3c000000); + rtl_write_dword(rtlpriv, 0xe90, 0x00000080); + rtl_write_dword(rtlpriv, 0xe94, 0x00000000); + rtl_write_dword(rtlpriv, 0xec4, 0x20040000); + rtl_write_dword(rtlpriv, 0xec8, 0x20000000); + rtl_write_dword(rtlpriv, 0xeb8, 0x0); + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RestoreAFE Success!!!!\n")); +} + +void _rtl8812ae_iqk_restore_macbb( + struct ieee80211_hw *hw, + u32 *macbb_backup, + u32 *backup_macbb_reg, + u32 macbb_num + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C*/ + //Reload MacBB Parameters + for (i = 0; i < macbb_num; i++){ + rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]); + } + RT_TRACE(COMP_IQK, DBG_LOUD, + ("RestoreMacBB Success!!!!\n")); +} + +#define MACBB_REG_NUM 10 +#define AFE_REG_NUM 14 +#define RF_REG_NUM 3 + +static void _rtl8812ae_phy_iq_calibrate( + struct ieee80211_hw *hw, + u8 channel) +{ + u32 macbb_backup[MACBB_REG_NUM]; + u32 afe_backup[AFE_REG_NUM]; + u32 rfa_backup[RF_REG_NUM]; + u32 rfb_backup[RF_REG_NUM]; + u32 backup_macbb_reg[MACBB_REG_NUM] = {0xb00, 0x520, 0x550, + 0x808, 0x90c, 0xc00, 0xe00, + 0x8c4,0x838, 0x82c}; + u32 backup_afe_reg[AFE_REG_NUM] = {0xc5c, 0xc60, 0xc64, 0xc68, + 0xcb8, 0xcb0, 0xcb4,0xe5c, + 0xe60, 0xe64, 0xe68, 0xeb8, + 0xeb0, 0xeb4}; + u32 backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0}; + u8 chnl_idx = _rtl8812ae_get_right_chnl_place_for_iqk(channel); + + _rtl8812ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM); + _rtl8812ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM); + _rtl8812ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, RF_REG_NUM); + + _rtl8812ae_iqk_configure_mac(hw); + _rtl8812ae_iqk_tx(hw, chnl_idx); + _rtl8812ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, RF_REG_NUM); + _rtl8812ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfb_backup, RF_REG_NUM); // PATH_A ? + + _rtl8812ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM); + _rtl8812ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM); +} + + +void _rtl8821ae_iqk_backup_macbb( + struct ieee80211_hw *hw, + u32 *macbb_backup, + u32 *backup_macbb_reg, + u32 mac_bb_num + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*save MACBB default value*/ + for (i = 0; i < mac_bb_num; i++) { + macbb_backup[i] =rtl_read_dword(rtlpriv,backup_macbb_reg[i]); + } + + RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupMacBB Success!!!!\n")); +} + +void _rtl8821ae_iqk_backup_afe( + struct ieee80211_hw *hw, + u32 *afe_backup, + u32 *backup_afe_REG, + u32 afe_num + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*Save AFE Parameters */ + for (i = 0; i < afe_num; i++){ + afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]); + } + RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupAFE Success!!!!\n")); +} + +void _rtl8821ae_iqk_backup_rf( + struct ieee80211_hw *hw, + u32 *rfa_backup, + u32 *rfb_backup, + u32 *backup_rf_reg, + u32 rf_num + ) +{ + + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*Save RF Parameters*/ + for (i = 0; i < rf_num; i++){ + rfa_backup[i] = rtl_get_rfreg(hw, RF90_PATH_A, backup_rf_reg[i], BMASKDWORD); + rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i], BMASKDWORD); + } + RT_TRACE(COMP_IQK, DBG_LOUD, ("BackupRF Success!!!!\n")); +} + +void _rtl8821ae_iqk_configure_mac( + struct ieee80211_hw *hw + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + /* ========MAC register setting========*/ + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + rtl_write_byte(rtlpriv, 0x522, 0x3f); + rtl_set_bbreg(hw, 0x550, BIT(11) | BIT(3), 0x0); + rtl_write_byte(rtlpriv, 0x808, 0x00); /*RX ante off*/ + rtl_set_bbreg(hw, 0x838, 0xf, 0xc); /*CCA off*/ +} + + +void _rtl8821ae_iqk_tx_fill_iqc( + struct ieee80211_hw *hw, + enum radio_path path, + u32 tx_x, + u32 tx_y + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + switch (path) { + case RF90_PATH_A: + { + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + rtl_write_dword(rtlpriv, 0xc90, 0x00000080); + rtl_write_dword(rtlpriv, 0xcc4, 0x20040000); + rtl_write_dword(rtlpriv, 0xcc8, 0x20000000); + rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y); + rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x); + RT_TRACE(COMP_IQK, DBG_LOUD, ("TX_X = %x;;TX_Y = %x =====> fill to IQC\n", tx_x, tx_y)); + RT_TRACE(COMP_IQK, DBG_LOUD, ("0xcd4 = %x;;0xccc = %x ====>fill to IQC\n", rtl_get_bbreg(hw, 0xcd4, 0x000007ff), rtl_get_bbreg(hw, 0xccc, 0x000007ff))); + } + break; + default: + break; + }; +} + + +void _rtl8821ae_iqk_rx_fill_iqc( + struct ieee80211_hw *hw, + enum radio_path path, + u32 rx_x, + u32 rx_y + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + switch (path) { + case RF90_PATH_A: + { + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x>>1); + rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y>>1); + RT_TRACE(COMP_IQK, DBG_LOUD, ("rx_x = %x;;rx_y = %x ====>fill to IQC\n", rx_x>>1, rx_y>>1)); + RT_TRACE(COMP_IQK, DBG_LOUD, ("0xc10 = %x ====>fill to IQC\n", rtl_read_dword(rtlpriv, 0xc10))); + } + break; + default: + break; + }; +} + + + +#define cal_num 10 + +void _rtl8821ae_iqk_tx( + struct ieee80211_hw *hw, + enum radio_path path + ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u32 tx_fail, rx_fail, delay_count, iqk_ready, cal_retry, cal = 0, temp_reg65; + int tx_x = 0, tx_y = 0, rx_x = 0, rx_y = 0, tx_average = 0, rx_average = 0; + int tx_x0[cal_num], tx_y0[cal_num], tx_x0_rxk[cal_num], tx_y0_rxk[cal_num], rx_x0[cal_num], rx_y0[cal_num]; + bool tx0iqkok = false, rx0iqkok = false; + bool vdf_enable = false; + int i, k, vdf_y[3], vdf_x[3], tx_dt[3], rx_dt[3], ii, dx = 0, dy = 0, tx_finish = 0, rx_finish = 0; + + + RT_TRACE(COMP_IQK, DBG_LOUD, + ("BandWidth = %d.\n", + rtlphy->current_chan_bw)); + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80){ + vdf_enable = true; + } + + while (cal < cal_num) { + switch (path) { + case RF90_PATH_A: + { + temp_reg65 = rtl_get_rfreg(hw, path, 0x65, 0xffffffff); + //Path-A LOK + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /*[31] = 0 --> Page C*/ + /*========Path-A AFE all on========*/ + /*Port 0 DAC/ADC on*/ + rtl_write_dword(rtlpriv, 0xc60, 0x77777777); + rtl_write_dword(rtlpriv, 0xc64, 0x77777777); + rtl_write_dword(rtlpriv, 0xc68, 0x19791979); + rtl_write_dword(rtlpriv, 0xc6c, 0x19791979); + rtl_write_dword(rtlpriv, 0xc70, 0x19791979); + rtl_write_dword(rtlpriv, 0xc74, 0x19791979); + rtl_write_dword(rtlpriv, 0xc78, 0x19791979); + rtl_write_dword(rtlpriv, 0xc7c, 0x19791979); + rtl_write_dword(rtlpriv, 0xc80, 0x19791979); + rtl_write_dword(rtlpriv, 0xc84, 0x19791979); + + rtl_set_bbreg(hw, 0xc00, 0xf, 0x4); /*hardware 3-wire off*/ + + // LOK Setting + //====== LOK ====== + /*DAC/ADC sampling rate (160 MHz)*/ + rtl_set_bbreg(hw, 0xc5c, BIT(26) | BIT(25) | BIT(24), 0x7); + + // 2. LoK RF Setting (at BW = 20M) + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80002); + rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x3); // BW 20M + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); + rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1); + rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y) + rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y) + rtl_write_dword(rtlpriv, 0x984, 0x00462910);// [0]:AGC_en, [15]:idac_K_Mask + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + rtl_write_dword(rtlpriv, 0xc88, 0x821403f4); + + if (rtlhal->current_bandtype) + rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96); + else + rtl_write_dword(rtlpriv, 0xc8c, 0x28163e96); + + rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] N SI/PI ϥv iqk_dpk module + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); //Delay 10ms + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + rtl_set_rfreg(hw, path, 0x58, 0x7fe00, rtl_get_rfreg(hw, path, 0x8, 0xffc00)); // Load LOK + + switch (rtlphy->current_chan_bw) + { + case 1: + { + rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x1); + } + break; + case 2: + { + rtl_set_rfreg(hw, path, 0x18, 0x00c00, 0x0); + } + break; + default: + break; + + } + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + + // 3. TX RF Setting + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x20000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0003f); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xf3fc3); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d5); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + //ODM_SetBBReg(pDM_Odm, 0xcb8, 0xf, 0xd); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_set_bbreg(hw, 0xc94, BIT(0), 0x1); + rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y) + rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y) + rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + rtl_write_dword(rtlpriv, 0xc88, 0x821403f1); + if (rtlhal->current_bandtype) + rtl_write_dword(rtlpriv, 0xc8c, 0x40163e96); + else + rtl_write_dword(rtlpriv, 0xc8c, 0x00163e96); + + if (vdf_enable == 1){ + RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_enable\n")); + for (k = 0;k <= 2; k++){ + switch (k){ + case 0: + { + rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); + } + break; + case 1: + { + rtl_set_bbreg(hw, 0xc80, BIT(28), 0x0); + rtl_set_bbreg(hw, 0xc84, BIT(28), 0x0); + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); + } + break; + case 2: + { + RT_TRACE(COMP_IQK, DBG_LOUD, ("vdf_y[1] = %x;;;vdf_y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff)); + RT_TRACE(COMP_IQK, DBG_LOUD, ("vdf_x[1] = %x;;;vdf_x[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff)); + tx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20); + tx_dt[cal] = ((16*tx_dt[cal])*10000/15708); + tx_dt[cal] = (tx_dt[cal] >> 1 )+(tx_dt[cal] & BIT(0)); + rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1); + rtl_set_bbreg(hw, 0xce8, 0x3fff0000, tx_dt[cal] & 0x00003fff); + } + break; + default: + break; + } + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] N SI/PI ϥv iqk_dpk module + cal_retry = 0; + while(1){ + // one shot + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); //Delay 10ms + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1){ + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready) || (delay_count>20)){ + break; + } + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20){ // If 20ms No Result, then cal_retry++ + // ============TXIQK Check============== + tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12)); + + if (~tx_fail){ + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + tx0iqkok = true; + break; + } + else{ + rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0); + rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200); + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) { + break; + } + } + } + else{ + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10){ + break; + } + } + } + } + if (k == 3){ + tx_x0[cal] = vdf_x[k-1] ; + tx_y0[cal] = vdf_y[k-1]; + } + } + + else { + rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] N SI/PI ϥv iqk_dpk module + cal_retry = 0; + while(1){ + // one shot + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); //Delay 10ms + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1){ + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready) || (delay_count>20)) { + break; + } + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20){ // If 20ms No Result, then cal_retry++ + // ============TXIQK Check============== + tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12)); + + if (~tx_fail){ + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + tx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + tx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + tx0iqkok = true; + break; + } + else{ + rtl_set_bbreg(hw, 0xccc, 0x000007ff, 0x0); + rtl_set_bbreg(hw, 0xcd4, 0x000007ff, 0x200); + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) { + break; + } + } + } + else{ + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + } + + + if (tx0iqkok == false) + break; // TXK fail, Don't do RXK + + if (vdf_enable == 1){ + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); // TX VDF Disable + RT_TRACE(COMP_IQK, DBG_LOUD, ("RXVDF Start\n")); + for (k = 0;k <= 2; k++){ + //====== RX mode TXK (RXK Step 1) ====== + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + // 1. TX RF Setting + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + + rtl_set_bbreg(hw, 0xcb8, 0xf, 0xd); + rtl_write_dword(rtlpriv, 0x978, 0x29002000);// TX (X,Y) + rtl_write_dword(rtlpriv, 0x97c, 0xa9002000);// RX (X,Y) + rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + switch (k){ + case 0: + { + rtl_write_dword(rtlpriv, 0xc80, 0x18008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x38008c38);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0); + } + break; + case 1: + { + rtl_write_dword(rtlpriv, 0xc80, 0x08008c38);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x28008c38);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_set_bbreg(hw, 0xce8, BIT(30), 0x0); + } + break; + case 2: + { + RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_Y[1] = %x;;;VDF_Y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff)); + RT_TRACE(COMP_IQK, DBG_LOUD, ("VDF_X[1] = %x;;;VDF_X[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff)); + rx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20); + RT_TRACE(COMP_IQK, DBG_LOUD, ("Rx_dt = %d\n", rx_dt[cal])); + rx_dt[cal] = ((16*rx_dt[cal])*10000/13823); + rx_dt[cal] = (rx_dt[cal] >> 1 )+(rx_dt[cal] & BIT(0)); + rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x38008c20);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_set_bbreg(hw, 0xce8, 0x00003fff, rx_dt[cal] & 0x00003fff); + } + break; + default: + break; + } + rtl_write_dword(rtlpriv, 0xc88, 0x821603e0); + rtl_write_dword(rtlpriv, 0xc8c, 0x68163e96); + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] N SI/PI ϥv iqk_dpk module + cal_retry = 0; + while(1){ + // one shot + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); //Delay 10ms + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1){ + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready)||(delay_count>20)){ + break; + } + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20){ // If 20ms No Result, then cal_retry++ + // ============TXIQK Check============== + tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12)); + + if (~tx_fail){ + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + tx0iqkok = true; + break; + } + else{ + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + else{ + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + + if (tx0iqkok == false){ //If RX mode TXK fail, then take TXK Result + tx_x0_rxk[cal] = tx_x0[cal]; + tx_y0_rxk[cal] = tx_y0[cal]; + tx0iqkok = true; + RT_TRACE(COMP_IQK, DBG_LOUD, ("RXK Step 1 fail\n")); + } + + + //====== RX IQK ====== + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + // 1. RX RF Setting + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + + rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff); + rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff); + rtl_set_bbreg(hw, 0x978, BIT(31), 0x1); + rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0); + rtl_set_bbreg(hw, 0xcb8, 0xF, 0xe); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0x984, 0x0046a911); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + rtl_set_bbreg(hw, 0xc80, BIT(29), 0x1); + rtl_set_bbreg(hw, 0xc84, BIT(29), 0x0); + rtl_write_dword(rtlpriv, 0xc88, 0x02140119); + + rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /* pDM_Odm->SupportInterface == 1 */ + + if (k==2){ + rtl_set_bbreg(hw, 0xce8, BIT(30), 0x1); //RX VDF Enable + } + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] N SI/PI ϥv iqk_dpk module + + cal_retry = 0; + while(1){ + // one shot + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); //Delay 10ms + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1){ + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready)||(delay_count>20)){ + break; + } + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20){ // If 20ms No Result, then cal_retry++ + // ============RXIQK Check============== + rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11)); + if (rx_fail == 0){ + rtl_write_dword(rtlpriv, 0xcb8, 0x06000000); + vdf_x[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x08000000); + vdf_y[k] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rx0iqkok = true; + break; + } + else{ + rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1); + rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1); + rx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + + } + } + else{ + rx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + + } + if (k == 3){ + rx_x0[cal] = vdf_x[k-1] ; + rx_y0[cal] = vdf_y[k-1]; + } + rtl_set_bbreg(hw, 0xce8, BIT(31), 0x1); // TX VDF Enable + } + + else{ + //====== RX mode TXK (RXK Step 1) ====== + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + // 1. TX RF Setting + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x00029); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xd7ffb); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x8a001); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0xb00, 0x03000100); + rtl_write_dword(rtlpriv, 0x984, 0x0046a910);// [0]:AGC_en, [15]:idac_K_Mask + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + rtl_write_dword(rtlpriv, 0xc80, 0x18008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x38008c10);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_write_dword(rtlpriv, 0xc88, 0x821603e0); + //ODM_Write4Byte(pDM_Odm, 0xc8c, 0x68163e96); + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] N SI/PI ϥv iqk_dpk module + cal_retry = 0; + while(1){ + // one shot + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); //Delay 10ms + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1){ + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready)||(delay_count>20)){ + break; + } + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20){ // If 20ms No Result, then cal_retry++ + // ============TXIQK Check============== + tx_fail = rtl_get_bbreg(hw, 0xd00, BIT(12)); + + if (~tx_fail){ + rtl_write_dword(rtlpriv, 0xcb8, 0x02000000); + tx_x0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x04000000); + tx_y0_rxk[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + tx0iqkok = true; + break; + } + else{ + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + else{ + tx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + + + if (tx0iqkok == false){ //If RX mode TXK fail, then take TXK Result + tx_x0_rxk[cal] = tx_x0[cal]; + tx_y0_rxk[cal] = tx_y0[cal]; + tx0iqkok = true; + RT_TRACE(COMP_IQK, DBG_LOUD, ("1")); + } + + + //====== RX IQK ====== + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + // 1. RX RF Setting + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x80000); + rtl_set_rfreg(hw, path, 0x30, RFREG_OFFSET_MASK, 0x30000); + rtl_set_rfreg(hw, path, 0x31, RFREG_OFFSET_MASK, 0x0002f); + rtl_set_rfreg(hw, path, 0x32, RFREG_OFFSET_MASK, 0xfffbb); + rtl_set_rfreg(hw, path, 0x8f, RFREG_OFFSET_MASK, 0x88001); + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, 0x931d8); + rtl_set_rfreg(hw, path, 0xef, RFREG_OFFSET_MASK, 0x00000); + + rtl_set_bbreg(hw, 0x978, 0x03FF8000, (tx_x0_rxk[cal])>>21&0x000007ff); + rtl_set_bbreg(hw, 0x978, 0x000007FF, (tx_y0_rxk[cal])>>21&0x000007ff); + rtl_set_bbreg(hw, 0x978, BIT(31), 0x1); + rtl_set_bbreg(hw, 0x97c, BIT(31), 0x0); + //ODM_SetBBReg(pDM_Odm, 0xcb8, 0xF, 0xe); + rtl_write_dword(rtlpriv, 0x90c, 0x00008000); + rtl_write_dword(rtlpriv, 0x984, 0x0046a911); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + rtl_write_dword(rtlpriv, 0xc80, 0x38008c10);// TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 + rtl_write_dword(rtlpriv, 0xc84, 0x18008c10);// RX_Tone_idx[9:0], RxK_Mask[29] + rtl_write_dword(rtlpriv, 0xc88, 0x02140119); + + rtl_write_dword(rtlpriv, 0xc8c, 0x28160d00); /*pDM_Odm->SupportInterface == 1*/ + + rtl_write_dword(rtlpriv, 0xcb8, 0x00100000);// cb8[20] N SI/PI ϥv iqk_dpk module + + cal_retry = 0; + while(1){ + // one shot + rtl_write_dword(rtlpriv, 0x980, 0xfa000000); + rtl_write_dword(rtlpriv, 0x980, 0xf8000000); + + mdelay(10); //Delay 10ms + rtl_write_dword(rtlpriv, 0xcb8, 0x00000000); + delay_count = 0; + while (1){ + iqk_ready = rtl_get_bbreg(hw, 0xd00, BIT(10)); + if ((~iqk_ready)||(delay_count>20)){ + break; + } + else{ + mdelay(1); + delay_count++; + } + } + + if (delay_count < 20){ // If 20ms No Result, then cal_retry++ + // ============RXIQK Check============== + rx_fail = rtl_get_bbreg(hw, 0xd00, BIT(11)); + if (rx_fail == 0){ + /* + ODM_Write4Byte(pDM_Odm, 0xcb8, 0x05000000); + reg1 = ODM_GetBBReg(pDM_Odm, 0xd00, 0xffffffff); + ODM_Write4Byte(pDM_Odm, 0xcb8, 0x06000000); + reg2 = ODM_GetBBReg(pDM_Odm, 0xd00, 0x0000001f); + DbgPrint("reg1 = %d, reg2 = %d", reg1, reg2); + Image_Power = (reg2<<32)+reg1; + DbgPrint("Before PW = %d\n", Image_Power); + ODM_Write4Byte(pDM_Odm, 0xcb8, 0x07000000); + reg1 = ODM_GetBBReg(pDM_Odm, 0xd00, 0xffffffff); + ODM_Write4Byte(pDM_Odm, 0xcb8, 0x08000000); + reg2 = ODM_GetBBReg(pDM_Odm, 0xd00, 0x0000001f); + Image_Power = (reg2<<32)+reg1; + DbgPrint("After PW = %d\n", Image_Power); + */ + + rtl_write_dword(rtlpriv, 0xcb8, 0x06000000); + rx_x0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rtl_write_dword(rtlpriv, 0xcb8, 0x08000000); + rx_y0[cal] = rtl_get_bbreg(hw, 0xd00, 0x07ff0000)<<21; + rx0iqkok = true; + break; + } + else{ + rtl_set_bbreg(hw, 0xc10, 0x000003ff, 0x200>>1); + rtl_set_bbreg(hw, 0xc10, 0x03ff0000, 0x0>>1); + rx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + + } + } + else{ + rx0iqkok = false; + cal_retry++; + if (cal_retry == 10) + break; + } + } + } + + if (tx0iqkok) + tx_average++; + if (rx0iqkok) + rx_average++; + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + rtl_set_rfreg(hw, path, 0x65, RFREG_OFFSET_MASK, temp_reg65); + } + break; + default: + break; + } + cal++; + } + + // FillIQK Result + switch (path){ + case RF90_PATH_A: + { + RT_TRACE(COMP_IQK, DBG_LOUD, ("========Path_A =======\n")); + if (tx_average == 0) + break; + + for (i = 0; i < tx_average; i++){ + RT_TRACE(COMP_IQK, DBG_LOUD, (" TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i, (tx_x0_rxk[i])>>21&0x000007ff, i, (tx_y0_rxk[i])>>21&0x000007ff)); + RT_TRACE(COMP_IQK, DBG_LOUD, ("TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i, (tx_x0[i])>>21&0x000007ff, i, (tx_y0[i])>>21&0x000007ff)); + } + for (i = 0; i < tx_average; i++){ + for (ii = i+1; ii >21) - (tx_x0[ii]>>21); + if (dx < 3 && dx > -3){ + dy = (tx_y0[i]>>21) - (tx_y0[ii]>>21); + if (dy < 3 && dy > -3){ + tx_x = ((tx_x0[i]>>21) + (tx_x0[ii]>>21))/2; + tx_y = ((tx_y0[i]>>21) + (tx_y0[ii]>>21))/2; + tx_finish = 1; + break; + } + } + } + if (tx_finish == 1) + break; + } + + if (tx_finish == 1){ + _rtl8821ae_iqk_tx_fill_iqc(hw, path, tx_x, tx_y); // ? + } + else{ + _rtl8821ae_iqk_tx_fill_iqc(hw, path, 0x200, 0x0); + } + + if (rx_average == 0) + break; + + for (i = 0; i < rx_average; i++){ + RT_TRACE(COMP_IQK, DBG_LOUD, ("RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n", i, (rx_x0[i])>>21&0x000007ff, i, (rx_y0[i])>>21&0x000007ff)); + } + for (i = 0; i < rx_average; i++){ + for (ii = i+1; ii >21) - (rx_x0[ii]>>21); + if (dx < 4 && dx > -4){ + dy = (rx_y0[i]>>21) - (rx_y0[ii]>>21); + if (dy < 4 && dy > -4){ + rx_x = ((rx_x0[i]>>21) + (rx_x0[ii]>>21))/2; + rx_y = ((rx_y0[i]>>21) + (rx_y0[ii]>>21))/2; + rx_finish = 1; + break; + } + } + } + if (rx_finish == 1) + break; + } + + if (rx_finish == 1){ + _rtl8821ae_iqk_rx_fill_iqc(hw, path, rx_x, rx_y); + } + else{ + _rtl8821ae_iqk_rx_fill_iqc(hw, path, 0x200, 0x0); + } + } + break; + default: + break; + } +} + +void _rtl8821ae_iqk_restore_rf( + struct ieee80211_hw *hw, + enum radio_path path, + u32* backup_rf_reg, + u32* rf_backup, + u32 rf_reg_num + ) +{ + u32 i; + struct rtl_priv* rtlpriv = rtl_priv(hw); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + for (i = 0; i < RF_REG_NUM; i++) + rtl_set_rfreg(hw, path, backup_rf_reg[i], RFREG_OFFSET_MASK, rf_backup[i]); + + switch(path){ + case RF90_PATH_A: + { + RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreRF Path A Success!!!!\n")); + } + break; + default: + break; + } +} + +void _rtl8821ae_iqk_restore_afe( + struct ieee80211_hw *hw, + u32* afe_backup, + u32* backup_afe_reg, + u32 afe_num + ) +{ + u32 i; + struct rtl_priv* rtlpriv = rtl_priv(hw); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + //Reload AFE Parameters + for (i = 0; i < afe_num; i++){ + rtl_write_dword(rtlpriv, backup_afe_reg[i], afe_backup[i]); + } + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x1); // [31] = 1 --> Page C1 + rtl_write_dword(rtlpriv, 0xc80, 0x0); + rtl_write_dword(rtlpriv, 0xc84, 0x0); + rtl_write_dword(rtlpriv, 0xc88, 0x0); + rtl_write_dword(rtlpriv, 0xc8c, 0x3c000000); + rtl_write_dword(rtlpriv, 0xc90, 0x00000080); + rtl_write_dword(rtlpriv, 0xc94, 0x00000000); + rtl_write_dword(rtlpriv, 0xcc4, 0x20040000); + rtl_write_dword(rtlpriv, 0xcc8, 0x20000000); + rtl_write_dword(rtlpriv, 0xcb8, 0x0); + RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreAFE Success!!!!\n")); +} + +void _rtl8821ae_iqk_restore_macbb( + struct ieee80211_hw *hw, + u32* macbb_backup, + u32* backup_macbb_reg, + u32 macbb_num + ) +{ + u32 i; + struct rtl_priv* rtlpriv = rtl_priv(hw); + + rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); // [31] = 0 --> Page C + //Reload MacBB Parameters + for (i = 0; i < macbb_num; i++){ + rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]); + } + RT_TRACE(COMP_IQK, DBG_LOUD, ("RestoreMacBB Success!!!!\n")); +} + + +#undef MACBB_REG_NUM +#undef AFE_REG_NUM +#undef RF_REG_NUM + +#define MACBB_REG_NUM 11 +#define AFE_REG_NUM 12 +#define RF_REG_NUM 3 + +static void _rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw) +{ + u32 macbb_backup[MACBB_REG_NUM]; + u32 afe_backup[AFE_REG_NUM]; + u32 rfa_backup[RF_REG_NUM]; + u32 rfb_backup[RF_REG_NUM]; + u32 backup_macbb_reg[MACBB_REG_NUM] = {0xb00, 0x520, 0x550, 0x808, 0x90c, 0xc00, 0xc50, + 0xe00, 0xe50, 0x838, 0x82c}; + u32 backup_afe_reg[AFE_REG_NUM] = {0xc5c, 0xc60, 0xc64, 0xc68, 0xc6c, 0xc70, 0xc74, + 0xc78, 0xc7c, 0xc80, 0xc84, 0xcb8}; + u32 backup_rf_reg[RF_REG_NUM] = {0x65, 0x8f, 0x0}; + + _rtl8821ae_iqk_backup_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM); + _rtl8821ae_iqk_backup_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM); + _rtl8821ae_iqk_backup_rf(hw, rfa_backup, rfb_backup, backup_rf_reg, RF_REG_NUM); + + _rtl8821ae_iqk_configure_mac(hw); + _rtl8821ae_iqk_tx(hw, RF90_PATH_A); + _rtl8821ae_iqk_restore_rf(hw, RF90_PATH_A, backup_rf_reg, rfa_backup, RF_REG_NUM); + + _rtl8821ae_iqk_restore_afe(hw, afe_backup, backup_afe_reg, AFE_REG_NUM); + _rtl8821ae_iqk_restore_macbb(hw, macbb_backup, backup_macbb_reg, MACBB_REG_NUM); +} + +static void _rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) +{ + u8 tmpreg; + u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + tmpreg = rtl_read_byte(rtlpriv, 0xd03); + + if ((tmpreg & 0x70) != 0) + rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F); + else + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + + if ((tmpreg & 0x70) != 0) { + rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS); + + if (is2t) + rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00, + MASK12BITS); + + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, + (rf_a_mode & 0x8FFFF) | 0x10000); + + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, + (rf_b_mode & 0x8FFFF) | 0x10000); + } + lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS); + + rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0); + /* rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000); */ + rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a); + + mdelay(100); + + rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0); + + if ((tmpreg & 0x70) != 0) { + rtl_write_byte(rtlpriv, 0xd03, tmpreg); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode); + + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, rf_b_mode); + } else { + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + } +RT_TRACE(COMP_INIT,DBG_LOUD,("\n")); + +} + +static void _rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool main) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + //struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + //struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + RT_TRACE(COMP_INIT,DBG_LOUD,("\n")); + + if (main) + rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x1); + else + rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x2); +} + +#undef IQK_ADDA_REG_NUM +#undef IQK_DELAY_TIME + +void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (!rtlphy->b_iqk_in_progress) + { + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = true; + spin_unlock(&rtlpriv->locks.iqk_lock); + + _rtl8812ae_phy_iq_calibrate(hw, rtlphy->current_channel); + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); + } +} + +void rtl8812ae_reset_iqk_result(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 i; + + RT_TRACE(COMP_IQK, DBG_LOUD, + ("rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n", + (int)(sizeof(rtlphy->iqk_matrix_regsetting) / + sizeof(struct iqk_matrix_regs)), + IQK_MATRIX_SETTINGS_NUM)); + + for(i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) { + { + rtlphy->iqk_matrix_regsetting[i].value[0][0] = + rtlphy->iqk_matrix_regsetting[i].value[0][2] = + rtlphy->iqk_matrix_regsetting[i].value[0][4] = + rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100; + + rtlphy->iqk_matrix_regsetting[i].value[0][1] = + rtlphy->iqk_matrix_regsetting[i].value[0][3] = + rtlphy->iqk_matrix_regsetting[i].value[0][5] = + rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0; + + rtlphy->iqk_matrix_regsetting[i].b_iqk_done = false; + + } + } +} + +void rtl8812ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index, + u8 thermal_value, u8 threshold) +{ + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + rtl8812ae_reset_iqk_result(hw); + + rtldm->thermalvalue_iqk= thermal_value; + rtl8812ae_phy_iq_calibrate(hw, false); +} + +void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (!rtlphy->b_iqk_in_progress) + { + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = true; + spin_unlock(&rtlpriv->locks.iqk_lock); + + _rtl8821ae_phy_iq_calibrate(hw); + + spin_lock(&rtlpriv->locks.iqk_lock); + rtlphy->b_iqk_in_progress = false; + spin_unlock(&rtlpriv->locks.iqk_lock); + } +} + +void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 i; + + RT_TRACE(COMP_IQK, DBG_LOUD, + ("rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n", + (int)(sizeof(rtlphy->iqk_matrix_regsetting) / + sizeof(struct iqk_matrix_regs)), + IQK_MATRIX_SETTINGS_NUM)); + + for(i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) { + { + rtlphy->iqk_matrix_regsetting[i].value[0][0] = + rtlphy->iqk_matrix_regsetting[i].value[0][2] = + rtlphy->iqk_matrix_regsetting[i].value[0][4] = + rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100; + + rtlphy->iqk_matrix_regsetting[i].value[0][1] = + rtlphy->iqk_matrix_regsetting[i].value[0][3] = + rtlphy->iqk_matrix_regsetting[i].value[0][5] = + rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0; + + rtlphy->iqk_matrix_regsetting[i].b_iqk_done = false; + + } + } +} + +void rtl8821ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index, + u8 thermal_value, u8 threshold) +{ + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + + rtl8821ae_reset_iqk_result(hw); + + rtldm->thermalvalue_iqk= thermal_value; + rtl8821ae_phy_iq_calibrate(hw, false); +} + +void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = &(rtlpriv->rtlhal); + u32 timeout = 2000, timecount = 0; + + + while (rtlpriv->mac80211.act_scanning && timecount < timeout) { + udelay(50); + timecount += 50; + } + + rtlphy->lck_inprogress = true; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + ("LCK:Start!!! currentband %x delay %d ms\n", + rtlhal->current_bandtype, timecount)); + + _rtl8821ae_phy_lc_calibrate(hw, false); + + rtlphy->lck_inprogress = false; +} + +void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (rtlphy->b_apk_done) + return; + + return; +} + +void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) +{ + _rtl8821ae_phy_set_rfpath_switch(hw, bmain); +} + +bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + bool b_postprocessing = false; + + RT_TRACE(COMP_CMD, DBG_TRACE, + ("-->IO Cmd(%#x), set_io_inprogress(%d)\n", + iotype, rtlphy->set_io_inprogress)); + do { + switch (iotype) { + case IO_CMD_RESUME_DM_BY_SCAN: + RT_TRACE(COMP_CMD, DBG_TRACE, + ("[IO CMD] Resume DM after scan.\n")); + b_postprocessing = true; + break; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + case IO_CMD_PAUSE_BAND1_DM_BY_SCAN: + RT_TRACE(COMP_CMD, DBG_TRACE, + ("[IO CMD] Pause DM before scan.\n")); + b_postprocessing = true; + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + } while (false); + if (b_postprocessing && !rtlphy->set_io_inprogress) { + rtlphy->set_io_inprogress = true; + rtlphy->current_io_type = iotype; + } else { + return false; + } + rtl8821ae_phy_set_io(hw); + RT_TRACE(COMP_CMD, DBG_TRACE, ("IO Type(%#x)\n", iotype)); + return true; +} + +static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + RT_TRACE(COMP_CMD, DBG_TRACE, + ("--->Cmd(%#x), set_io_inprogress(%d)\n", + rtlphy->current_io_type, rtlphy->set_io_inprogress)); + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + if (rtlpriv->mac80211.opmode== NL80211_IFTYPE_ADHOC) + _rtl8821ae_resume_tx_beacon(hw); + rtl8821ae_dm_write_dig(hw, rtlphy->initgain_backup.xaagccore1); + rtl8821ae_dm_write_cck_cca_thres(hw, rtlphy->initgain_backup.cca); + break; + case IO_CMD_PAUSE_BAND0_DM_BY_SCAN: + if (rtlpriv->mac80211.opmode== NL80211_IFTYPE_ADHOC) + _rtl8821ae_stop_tx_beacon(hw); + rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue; + rtl8821ae_dm_write_dig(hw, 0x17); + rtlphy->initgain_backup.cca = dm_digtable.cur_cck_cca_thres; + rtl8821ae_dm_write_cck_cca_thres(hw, 0x40); + break; + case IO_CMD_PAUSE_BAND1_DM_BY_SCAN: + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + break; + } + rtlphy->set_io_inprogress = false; + RT_TRACE(COMP_CMD, DBG_TRACE, + ("(%#x)\n", rtlphy->current_io_type)); +} + +static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); +} + +#if 0 +static void _rtl8821ae_phy_set_rf_sleep(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); + /*rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); + u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK); + while (u4b_tmp != 0 && delay > 0) { + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); + u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK); + delay--; + } + if (delay == 0) { + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + RT_TRACE(COMP_POWER, DBG_TRACE, + ("Switch RF timeout !!!.\n")); + return; + }*/ + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); +} +#endif + +static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool bresult = true; + u8 i, queue_id; + struct rtl8192_tx_ring *ring = NULL; + + switch (rfpwr_state) { + case ERFON:{ + if ((ppsc->rfpwr_state == ERFOFF) && + RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { + bool rtstatus = false; + u32 InitializeCount = 0; + do { + InitializeCount++; + RT_TRACE(COMP_RF, DBG_DMESG, + ("IPS Set eRf nic enable\n")); + rtstatus = rtl_ps_enable_nic(hw); + } while ((rtstatus != true) + && (InitializeCount < 10)); + RT_CLEAR_PS_LEVEL(ppsc, + RT_RF_OFF_LEVL_HALT_NIC); + } else { + RT_TRACE(COMP_RF, DBG_DMESG, + ("Set ERFON sleeped:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc-> + last_sleep_jiffies))); + ppsc->last_awake_jiffies = jiffies; + rtl8821ae_phy_set_rf_on(hw); + } + if (mac->link_state == MAC80211_LINKED) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } + break; + } + case ERFOFF:{ + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("eRf Off/Sleep: %d times " + "TcbBusyQueue[%d] =%d before " + "doze!\n", (i + 1), queue_id, + skb_queue_len(&ring->queue))); + + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("\n ERFSLEEP: %d times " + "TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue))); + break; + } + } + + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) { + RT_TRACE(COMP_RF, DBG_DMESG, + ("IPS Set eRf nic disable\n")); + rtl_ps_disable_nic(hw); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + } else { + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_POWER_OFF); + } + } + break; + } + /*case ERFSLEEP:{ + if (ppsc->rfpwr_state == ERFOFF) + break; + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("eRf Off/Sleep: %d times " + "TcbBusyQueue[%d] =%d before " + "doze!\n", (i + 1), queue_id, + skb_queue_len(&ring->queue))); + + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(COMP_ERR, DBG_WARNING, + ("\n ERFSLEEP: %d times " + "TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue))); + break; + } + } + RT_TRACE(COMP_RF, DBG_DMESG, + ("Set ERFSLEEP awaked:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc->last_awake_jiffies))); + ppsc->last_sleep_jiffies = jiffies; + _rtl8821ae_phy_set_rf_sleep(hw); + break; + }*/ + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("switch case not process \n")); + bresult = false; + break; + } + if (bresult) + ppsc->rfpwr_state = rfpwr_state; + return bresult; +} + +bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + bool bresult = false; + + if (rfpwr_state == ppsc->rfpwr_state) + return bresult; + bresult = _rtl8821ae_phy_set_rf_power_state(hw, rfpwr_state); + return bresult; +} --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/phy.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/phy.h @@ -0,0 +1,258 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_PHY_H__ +#define __RTL8821AE_PHY_H__ + +/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/ +#define MAX_TX_COUNT 4 +#define TX_1S 0 +#define TX_2S 1 +#define TX_3S 2 +#define TX_4S 3 + +#define MAX_POWER_INDEX 0x3F + +#define MAX_PRECMD_CNT 16 +#define MAX_RFDEPENDCMD_CNT 16 +#define MAX_POSTCMD_CNT 16 + +#define MAX_DOZE_WAITING_TIMES_9x 64 + +#define RT_CANNOT_IO(hw) false +#define HIGHPOWER_RADIOA_ARRAYLEN 22 + +#define IQK_ADDA_REG_NUM 16 +#define IQK_BB_REG_NUM 9 +#define MAX_TOLERANCE 5 +#define IQK_DELAY_TIME 10 +#define index_mapping_NUM 15 + +#define APK_BB_REG_NUM 5 +#define APK_AFE_REG_NUM 16 +#define APK_CURVE_REG_NUM 4 +#define PATH_NUM 2 + +#define LOOP_LIMIT 5 +#define MAX_STALL_TIME 50 +#define AntennaDiversityValue 0x80 +#define MAX_TXPWR_IDX_NMODE_92S 63 +#define Reset_Cnt_Limit 3 + +#define IQK_ADDA_REG_NUM 16 +#define IQK_MAC_REG_NUM 4 + +#define RF6052_MAX_PATH 2 + +#define CT_OFFSET_MAC_ADDR 0X16 + +#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A +#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60 +#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66 +#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69 +#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C + +#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F +#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72 + +#define CT_OFFSET_CHANNEL_PLAH 0x75 +#define CT_OFFSET_THERMAL_METER 0x78 +#define CT_OFFSET_RF_OPTION 0x79 +#define CT_OFFSET_VERSION 0x7E +#define CT_OFFSET_CUSTOMER_ID 0x7F + +#define RTL8821AE_MAX_PATH_NUM 2 + +#define TARGET_CHNL_NUM_2G_5G_8812 59 + +enum swchnlcmd_id { + CMDID_END, + CMDID_SET_TXPOWEROWER_LEVEL, + CMDID_BBREGWRITE10, + CMDID_WRITEPORT_ULONG, + CMDID_WRITEPORT_USHORT, + CMDID_WRITEPORT_UCHAR, + CMDID_RF_WRITEREG, +}; + +struct swchnlcmd { + enum swchnlcmd_id cmdid; + u32 para1; + u32 para2; + u32 msdelay; +}; + +enum hw90_block_e { + HW90_BLOCK_MAC = 0, + HW90_BLOCK_PHY0 = 1, + HW90_BLOCK_PHY1 = 2, + HW90_BLOCK_RF = 3, + HW90_BLOCK_MAXIMUM = 4, +}; + +enum baseband_config_type { + BASEBAND_CONFIG_PHY_REG = 0, + BASEBAND_CONFIG_AGC_TAB = 1, +}; + +enum ra_offset_area { + RA_OFFSET_LEGACY_OFDM1, + RA_OFFSET_LEGACY_OFDM2, + RA_OFFSET_HT_OFDM1, + RA_OFFSET_HT_OFDM2, + RA_OFFSET_HT_OFDM3, + RA_OFFSET_HT_OFDM4, + RA_OFFSET_HT_CCK, +}; + +enum antenna_path { + ANTENNA_NONE, + ANTENNA_D, + ANTENNA_C, + ANTENNA_CD, + ANTENNA_B, + ANTENNA_BD, + ANTENNA_BC, + ANTENNA_BCD, + ANTENNA_A, + ANTENNA_AD, + ANTENNA_AC, + ANTENNA_ACD, + ANTENNA_AB, + ANTENNA_ABD, + ANTENNA_ABC, + ANTENNA_ABCD +}; + +struct r_antenna_select_ofdm { + u32 r_tx_antenna:4; + u32 r_ant_l:4; + u32 r_ant_non_ht:4; + u32 r_ant_ht1:4; + u32 r_ant_ht2:4; + u32 r_ant_ht_s1:4; + u32 r_ant_non_ht_s1:4; + u32 ofdm_txsc:2; + u32 reserved:2; +}; + +struct r_antenna_select_cck { + u8 r_cckrx_enable_2:2; + u8 r_cckrx_enable:2; + u8 r_ccktx_enable:4; +}; + + +struct efuse_contents { + u8 mac_addr[ETH_ALEN]; + u8 cck_tx_power_idx[6]; + u8 ht40_1s_tx_power_idx[6]; + u8 ht40_2s_tx_power_idx_diff[3]; + u8 ht20_tx_power_idx_diff[3]; + u8 ofdm_tx_power_idx_diff[3]; + u8 ht40_max_power_offset[3]; + u8 ht20_max_power_offset[3]; + u8 channel_plan; + u8 thermal_meter; + u8 rf_option[5]; + u8 version; + u8 oem_id; + u8 regulatory; +}; + +struct tx_power_struct { + u8 cck[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht40_1s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht40_2s[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht20_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 legacy_ht_diff[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 legacy_ht_txpowerdiff; + u8 groupht20[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 groupht40[RTL8821AE_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 pwrgroup_cnt; + u32 mcs_original_offset[4][16]; +}; +enum _ANT_DIV_TYPE +{ + NO_ANTDIV = 0xFF, + CG_TRX_HW_ANTDIV = 0x01, + CGCS_RX_HW_ANTDIV = 0x02, + FIXED_HW_ANTDIV = 0x03, + CG_TRX_SMART_ANTDIV = 0x04, + CGCS_RX_SW_ANTDIV = 0x05, + +}; + +extern u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask); +extern void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data); +extern u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask); +extern void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask, u32 data); +extern bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw); +extern bool rtl8821ae_phy_bb_config(struct ieee80211_hw *hw); +extern bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw); +extern void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band); +extern void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); +extern void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, + long *powerlevel); +extern void rtl8821ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); +extern void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, + u8 operation); +extern void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw); +extern void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type); +extern void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw); +extern u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw); +extern void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); +extern void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); +void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw); +void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); +bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); +extern bool rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); +u8 _rtl8812ae_get_right_chnl_place_for_iqk(u8 chnl); +void rtl8821ae_phy_set_txpower_level_by_path(struct ieee80211_hw *hw, u8 channel, u8 path); +void rtl8812ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index, + u8 thermal_value, u8 threshold); +void rtl8821ae_do_iqk(struct ieee80211_hw *hw,u8 delta_thermal_index, + u8 thermal_value, u8 threshold); +void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw); + + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.c @@ -0,0 +1,199 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "pwrseqcmd.h" +#include "pwrseq.h" + +/* + drivers should parse below arrays and do the corresponding actions +*/ +//3 Power on Array +struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]= +{ + RTL8812_TRANS_CARDEMU_TO_ACT + RTL8812_TRANS_END +}; + +//3Radio off GPIO Array +struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_END_STEPS]= +{ + RTL8812_TRANS_ACT_TO_CARDEMU + RTL8812_TRANS_END +}; + +//3Card Disable Array +struct wlan_pwr_cfg rtl8812_card_disable_flow[ RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8812_TRANS_END_STEPS ] = +{ + RTL8812_TRANS_ACT_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_CARDDIS + RTL8812_TRANS_END +}; + +//3 Card Enable Array +struct wlan_pwr_cfg rtl8812_card_enable_flow[ RTL8812_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8812_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8812_TRANS_END_STEPS ] = +{ + RTL8812_TRANS_CARDDIS_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_ACT + RTL8812_TRANS_END +}; + +//3Suspend Array +struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]= +{ + RTL8812_TRANS_ACT_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_SUS + RTL8812_TRANS_END +}; + +//3 Resume Array +struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]= +{ + RTL8812_TRANS_SUS_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_ACT + RTL8812_TRANS_END +}; + + + +//3HWPDN Array +struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS]= +{ + RTL8812_TRANS_ACT_TO_CARDEMU + RTL8812_TRANS_CARDEMU_TO_PDN + RTL8812_TRANS_END +}; + +//3 Enter LPS +struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS+RTL8812_TRANS_END_STEPS]= +{ + //FW behavior + RTL8812_TRANS_ACT_TO_LPS + RTL8812_TRANS_END +}; + +//3 Leave LPS +struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]= +{ + //FW behavior + RTL8812_TRANS_LPS_TO_ACT + RTL8812_TRANS_END +}; + + +/* + drivers should parse below arrays and do the corresponding actions +*/ +/*3 Power on Array*/ +struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS] = +{ + RTL8821A_TRANS_CARDEMU_TO_ACT + RTL8821A_TRANS_END +}; + +/*3Radio off GPIO Array */ +struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_END_STEPS] = +{ + RTL8821A_TRANS_ACT_TO_CARDEMU + RTL8821A_TRANS_END +}; + +/*3Card Disable Array*/ +struct wlan_pwr_cfg rtl8821A_card_disable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8821A_TRANS_END_STEPS] = +{ + RTL8821A_TRANS_ACT_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_CARDDIS + RTL8821A_TRANS_END +}; + +/*3 Card Enable Array*/ +struct wlan_pwr_cfg rtl8821A_card_enable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS /*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/ + + RTL8821A_TRANS_END_STEPS] = +{ + RTL8821A_TRANS_CARDDIS_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_ACT + RTL8821A_TRANS_END +}; + +/*3Suspend Array*/ +struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8821A_TRANS_END_STEPS] = +{ + RTL8821A_TRANS_ACT_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_SUS + RTL8821A_TRANS_END +}; + +/*3 Resume Array*/ +struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8821A_TRANS_END_STEPS] = +{ + RTL8821A_TRANS_SUS_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_ACT + RTL8821A_TRANS_END +}; + +/*3HWPDN Array*/ +struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8821A_TRANS_END_STEPS] = +{ + RTL8821A_TRANS_ACT_TO_CARDEMU + RTL8821A_TRANS_CARDEMU_TO_PDN + RTL8821A_TRANS_END +}; + +/*3 Enter LPS */ +struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS + + RTL8821A_TRANS_END_STEPS] = +{ + /*FW behavior*/ + RTL8821A_TRANS_ACT_TO_LPS + RTL8821A_TRANS_END +}; + +/*3 Leave LPS */ +struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS] = +{ + /*FW behavior*/ + RTL8821A_TRANS_LPS_TO_ACT + RTL8821A_TRANS_END +}; + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/pwrseq.h @@ -0,0 +1,413 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_PWRSEQ_H__ +#define __RTL8821AE_PWRSEQ_H__ + +#include "pwrseqcmd.h" +#include "../btcoexist/halbt_precomp.h" + +#define RTL8812_TRANS_CARDEMU_TO_ACT_STEPS 15 +#define RTL8812_TRANS_ACT_TO_CARDEMU_STEPS 15 +#define RTL8812_TRANS_CARDEMU_TO_SUS_STEPS 15 +#define RTL8812_TRANS_SUS_TO_CARDEMU_STEPS 15 +#define RTL8812_TRANS_CARDEMU_TO_PDN_STEPS 25 +#define RTL8812_TRANS_PDN_TO_CARDEMU_STEPS 15 +#define RTL8812_TRANS_ACT_TO_LPS_STEPS 15 +#define RTL8812_TRANS_LPS_TO_ACT_STEPS 15 +#define RTL8812_TRANS_END_STEPS 1 + + +#define RTL8812_TRANS_CARDEMU_TO_ACT \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0},/* disable SW LPS 0x04[10]=0*/ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]=0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0},/* disable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT0, 0},/**/ + +#define RTL8812_TRANS_ACT_TO_CARDEMU \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xc00[7:0] = 4 turn off 3-wire */ \ + {0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xe00[7:0] = 4 turn off 3-wire */ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /* 0x2[0] = 0 RESET BB, CLOSE RF */ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /* Whole BB is reset*/ \ + /*{0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},//0x1F[7:0] = 0 turn off RF*/ \ + /*{0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},//0x4C[23] = 0x4E[7] = 0, switch DPDT_SEL_P output from register 0x65[2] */ \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x2A}, /* 0x07[7:0] = 0x28 sps pwm mode 0x2a for BT coex*/ \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \ + /*{0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0|BIT1, 0}, // 0x02[1:0] = 0 reset BB */ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ + +#define RTL8812_TRANS_CARDEMU_TO_SUS \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xcc},\ + {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xEC},\ + {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x07},/* gpio11 input mode, gpio10~8 output mode */ \ + {0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio 0~7 output same value as input ?? */ \ + {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */ \ + {0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */ \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* suspend option all off */ \ + {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */ \ + {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 trun on ZCD */ \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */ \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'11 enable WL suspend for PCIe*/ + +#define RTL8812_TRANS_SUS_TO_CARDEMU \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/ \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO sleep mode leave */ \ + {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 trun off ZCD */ \ + {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */ \ + {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */ \ + {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */ + +#define RTL8812_TRANS_CARDEMU_TO_CARDDIS \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + /**{0x0194, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, //0x194[0]=0 , disable 32K clock*/ \ + /**{0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x94}, //0x93=0x94 , 90[30] =0 enable 500k ANA clock .switch clock from 12M to 500K , 90 [26] =0 disable EEprom loader clock*/ \ + {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0}, /*0x03[2] = 0, reset 8051*/ \ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x05}, /*0x80=05h if reload fw, fill the default value of host_CPU handshake field*/ \ + {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xcc},\ + {0x0042, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xF0, 0xEC},\ + {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x07},/* gpio11 input mode, gpio10~8 output mode */ \ + {0x0045, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio 0~7 output same value as input ?? */ \ + {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xff},/* gpio0~7 output mode */ \ + {0x0047, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/* 0x47[7:0] = 00 gpio mode */ \ + {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, BIT7},/*0x14[7] = 1 turn on ZCD */ \ + {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, BIT0},/* 0x15[0] =1 trun on ZCD */ \ + {0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/*0x12[0] = 0 force PFM mode */ \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, BIT4},/*0x23[4] = 1 hpon LDO sleep mode */ \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x02, 0},/*0x8[1] = 0 ANA clk =500k */ \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/ \ + {0x001f, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /*0x01f[1]=0 , disable RFC_0 control REG_RF_CTRL_8812 */ \ + {0x0076, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /*0x076[1]=0 , disable RFC_1 control REG_OPT_CTRL_8812 +2 */ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, BIT3}, /*0x04[11] = 2b'01 enable WL suspend*/ + +#define RTL8812_TRANS_CARDDIS_TO_CARDEMU \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0012, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x12[0] = 1 force PWM mode */ \ + {0x0014, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x80, 0},/*0x14[7] = 0 turn off ZCD */ \ + {0x0015, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x01, 0},/* 0x15[0] =0 trun off ZCD */ \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0x10, 0},/*0x23[4] = 0 hpon LDO leave sleep mode */ \ + {0x0046, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio0~7 input mode */ \ + {0x0043, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/* gpio11 input mode, gpio10~8 input mode */ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, 0}, /*0x04[10] = 0, enable SW LPS PCIE only*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3, 0}, /*0x04[11] = 2b'01enable WL suspend*/ \ + {0x0003, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, BIT2}, /*0x03[2] = 1, enable 8051*/ \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/ + + +#define RTL8812_TRANS_CARDEMU_TO_PDN \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/ + +#define RTL8812_TRANS_PDN_TO_CARDEMU \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/ + +#define RTL8812_TRANS_ACT_TO_LPS \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \ + {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \ + {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \ + {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \ + {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \ + {0x0c00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xc00[7:0] = 4 turn off 3-wire */ \ + {0x0e00, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x04}, /* 0xe00[7:0] = 4 turn off 3-wire */ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated,and RF closed*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0}, /* Whole BB is reset*/ \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \ + {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/ + + +#define RTL8812_TRANS_LPS_TO_ACT \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/ \ + {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/ \ + {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/ \ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/ \ + {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0 TSF in 40M*/ \ + {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/ \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/ \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/ \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/ + +#define RTL8812_TRANS_END \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0,PWR_CMD_END, 0, 0}, // + + +extern struct wlan_pwr_cfg rtl8812_power_on_flow[RTL8812_TRANS_CARDEMU_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_radio_off_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_card_disable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_card_enable_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_suspend_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_resume_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_SUS_STEPS+RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_hwpdn_flow[RTL8812_TRANS_ACT_TO_CARDEMU_STEPS+RTL8812_TRANS_CARDEMU_TO_PDN_STEPS+RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_enter_lps_flow[RTL8812_TRANS_ACT_TO_LPS_STEPS+RTL8812_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8812_leave_lps_flow[RTL8812_TRANS_LPS_TO_ACT_STEPS+RTL8812_TRANS_END_STEPS]; + +/* + Check document WM-20130516-JackieLau-RTL8821A_Power_Architecture-R10.vsd + There are 6 HW Power States: + 0: POFF--Power Off + 1: PDN--Power Down + 2: CARDEMU--Card Emulation + 3: ACT--Active Mode + 4: LPS--Low Power State + 5: SUS--Suspend + + The transision from different states are defined below + TRANS_CARDEMU_TO_ACT + TRANS_ACT_TO_CARDEMU + TRANS_CARDEMU_TO_SUS + TRANS_SUS_TO_CARDEMU + TRANS_CARDEMU_TO_PDN + TRANS_ACT_TO_LPS + TRANS_LPS_TO_ACT + + TRANS_END +*/ +#define RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS 25 +#define RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS 15 +#define RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS 15 +#define RTL8821A_TRANS_SUS_TO_CARDEMU_STEPS 15 +#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU_STEPS 15 +#define RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS 15 +#define RTL8821A_TRANS_PDN_TO_CARDEMU_STEPS 15 +#define RTL8821A_TRANS_ACT_TO_LPS_STEPS 15 +#define RTL8821A_TRANS_LPS_TO_ACT_STEPS 15 +#define RTL8821A_TRANS_END_STEPS 1 + + +#define RTL8821A_TRANS_CARDEMU_TO_ACT \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0}, /*0x20[0] = 1b'1 enable LDOA12 MACRO block for all interface*/ \ + {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x67[0] = 0 to disable BT_GPS_SEL pins*/ \ + {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},/*Delay 1ms*/ \ + {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, 0}, /*0x00[5] = 1b'0 release analog Ips to digital ,1:isolation*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT4|BIT3|BIT2), 0},/* disable SW LPS 0x04[10]=0 and WLSUS_EN 0x04[12:11]=0*/ \ + {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0 , BIT0},/* Disable USB suspend */ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \ + {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0 , 0},/* Enable USB suspend */ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* release WLON reset 0x04[16]=1*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* disable HWPDN 0x04[15]=0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT4|BIT3), 0},/* disable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/* polling until return 0*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT0, 0},/**/ \ + {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*0x4C[24] = 0x4F[0] = 1, switch DPDT_SEL_P output from WL BB */\ + {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, (BIT5|BIT4), (BIT5|BIT4)},/*0x66[13] = 0x67[5] = 1, switch for PAPE_G/PAPE_A from WL BB ; 0x66[12] = 0x67[4] = 1, switch LNAON from WL BB */\ + {0x0025, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6, 0},/*anapar_mac<118> , 0x25[6]=0 by wlan single function*/\ + {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable falling edge triggering interrupt*/\ + {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable GPIO9 interrupt mode*/\ + {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Enable GPIO9 input mode*/\ + {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, BIT0},/*Enable HSISR GPIO[C:0] interrupt*/\ + {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1},/*Enable HSISR GPIO9 interrupt*/\ + {0x007A, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x3A},/*0x7A = 0x3A start BT*/\ + {0x002E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF , 0x82 },/* 0x2C[23:12]=0x820 ; XTAL trim */ \ + {0x0010, PWR_CUT_A_MSK , PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6 , BIT6 },/* 0x10[6]=1 ; MPsW0x2CvA0x10[6]]1~WLAN */ \ + + +#define RTL8821A_TRANS_ACT_TO_CARDEMU \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \ + {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*0x4C[24] = 0x4F[0] = 0, switch DPDT_SEL_P output from register 0x65[2] */\ + {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Enable rising edge triggering interrupt*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \ + {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5}, /*0x00[5] = 1b'1 analog Ips to digital ,1:isolation*/ \ + {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /*0x20[0] = 1b'0 disable LDOA12 MACRO block*/ \ + + +#define RTL8821A_TRANS_CARDEMU_TO_SUS \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4|BIT3, (BIT4|BIT3)}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SDIO SOP option to disable BG/MB/ACK/SWR*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4}, /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/ + +#define RTL8821A_TRANS_SUS_TO_CARDEMU \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/ + +#define RTL8821A_TRANS_CARDEMU_TO_CARDDIS \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07=0x20 , SOP option to disable BG/MB*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT2, BIT2}, /*0x04[10] = 1, enable SW LPS*/ \ + {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 1}, /*0x48[16] = 1 to enable GPIO9 as EXT WAKEUP*/ \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/ + +#define RTL8821A_TRANS_CARDDIS_TO_CARDEMU \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3 | BIT7, 0}, /*clear suspend enable and power down enable*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \ + {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\ + {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0}, /*0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/\ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*0x23[4] = 1b'0 12H LDO enter normal mode*/ \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0},/*PCIe DMA start*/ + + +#define RTL8821A_TRANS_CARDEMU_TO_PDN \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, BIT4}, /*0x23[4] = 1b'1 12H LDO enter sleep mode*/ \ + {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK|PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x20}, /*0x07[7:0] = 0x20 SOP option to disable BG/MB/ACK/SWR*/ \ + {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/* 0x04[16] = 0*/\ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/ + +#define RTL8821A_TRANS_PDN_TO_CARDEMU \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/ + +#define RTL8821A_TRANS_ACT_TO_LPS \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*PCIe DMA stop*/ \ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF},/*Tx Pause*/ \ + {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \ + {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \ + {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \ + {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*Whole BB is reset*/ \ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x03},/*Reset MAC TRX*/ \ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \ + {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x00},/*When driver enter Sus/ Disable, enable LOP for BT*/ \ + {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/ \ + + +#define RTL8821A_TRANS_LPS_TO_ACT \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,PWR_BASEADDR_SDIO,PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\ + {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\ + {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\ + {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\ + {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0 TSF in 40M*/\ + {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\ + {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/\ + {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\ + {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\ + {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,PWR_BASEADDR_MAC,PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/ + +#define RTL8821A_TRANS_END \ + /* format */ \ + /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, // comments here*/ \ + {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0,PWR_CMD_END, 0, 0}, // + +extern struct wlan_pwr_cfg rtl8821A_power_on_flow[RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_radio_off_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_card_disable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_card_enable_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_ACT_STEPS/*RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS*/ + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_suspend_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_resume_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_SUS_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_hwpdn_flow[RTL8821A_TRANS_ACT_TO_CARDEMU_STEPS + + RTL8821A_TRANS_CARDEMU_TO_PDN_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_enter_lps_flow[RTL8821A_TRANS_ACT_TO_LPS_STEPS + + RTL8821A_TRANS_END_STEPS]; +extern struct wlan_pwr_cfg rtl8821A_leave_lps_flow[RTL8821A_TRANS_LPS_TO_ACT_STEPS + + RTL8821A_TRANS_END_STEPS]; + +/*RTL8812 Power Configuration CMDs for PCIe interface*/ +#define RTL8812_NIC_PWR_ON_FLOW rtl8812_power_on_flow +#define RTL8812_NIC_RF_OFF_FLOW rtl8812_radio_off_flow +#define RTL8812_NIC_DISABLE_FLOW rtl8812_card_disable_flow +#define RTL8812_NIC_ENABLE_FLOW rtl8812_card_enable_flow +#define RTL8812_NIC_SUSPEND_FLOW rtl8812_suspend_flow +#define RTL8812_NIC_RESUME_FLOW rtl8812_resume_flow +#define RTL8812_NIC_PDN_FLOW rtl8812_hwpdn_flow +#define RTL8812_NIC_LPS_ENTER_FLOW rtl8812_enter_lps_flow +#define RTL8812_NIC_LPS_LEAVE_FLOW rtl8812_leave_lps_flow + +/* RTL8821 Power Configuration CMDs for PCIe interface */ +#define RTL8821A_NIC_PWR_ON_FLOW rtl8821A_power_on_flow +#define RTL8821A_NIC_RF_OFF_FLOW rtl8821A_radio_off_flow +#define RTL8821A_NIC_DISABLE_FLOW rtl8821A_card_disable_flow +#define RTL8821A_NIC_ENABLE_FLOW rtl8821A_card_enable_flow +#define RTL8821A_NIC_SUSPEND_FLOW rtl8821A_suspend_flow +#define RTL8821A_NIC_RESUME_FLOW rtl8821A_resume_flow +#define RTL8821A_NIC_PDN_FLOW rtl8821A_hwpdn_flow +#define RTL8821A_NIC_LPS_ENTER_FLOW rtl8821A_enter_lps_flow +#define RTL8821A_NIC_LPS_LEAVE_FLOW rtl8821A_leave_lps_flow + + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.c @@ -0,0 +1,140 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "pwrseq.h" + + +/* +* Description: +* This routine deal with the Power Configuration CMDs +* parsing for RTL8723/RTL8188E Series IC. +* Assumption: +* We should follow specific format which was released from HW SD. +* +* 2011.07.07, added by Roger. +*/ +bool rtl_hal_pwrseqcmdparsing (struct rtl_priv* rtlpriv, u8 cut_version, + u8 fab_version, u8 interface_type, + struct wlan_pwr_cfg pwrcfgcmd[]) + +{ + struct wlan_pwr_cfg pwr_cfg_cmd = {0}; + bool polling_bit = false; + u32 ary_idx=0; + u8 value = 0; + u32 offset = 0; + u32 polling_count = 0; + u32 max_polling_cnt = 5000; + + do { + pwr_cfg_cmd = pwrcfgcmd[ary_idx]; + RT_TRACE(COMP_INIT, DBG_TRACE, + ("rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), fab_msk(%#x)," + "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n", + GET_PWR_CFG_OFFSET(pwr_cfg_cmd), GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd), + GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd), GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd), + GET_PWR_CFG_BASE(pwr_cfg_cmd), GET_PWR_CFG_CMD(pwr_cfg_cmd), + GET_PWR_CFG_MASK(pwr_cfg_cmd), GET_PWR_CFG_VALUE(pwr_cfg_cmd))); + + if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) && + (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) && + (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) { + switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) { + case PWR_CMD_READ: + RT_TRACE(COMP_INIT, DBG_TRACE, + ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n")); + break; + + case PWR_CMD_WRITE: { + RT_TRACE(COMP_INIT, DBG_TRACE, + ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n")); + offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd); + + /*Read the value from system register*/ + value = rtl_read_byte(rtlpriv, offset); + value = value & (~(GET_PWR_CFG_MASK(pwr_cfg_cmd))); + value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd) + & GET_PWR_CFG_MASK(pwr_cfg_cmd)); + + /*Write the value back to sytem register*/ + rtl_write_byte(rtlpriv, offset, value); + } + break; + + case PWR_CMD_POLLING: + RT_TRACE(COMP_INIT, DBG_TRACE, + ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n")); + polling_bit = false; + offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd); + + do { + value = rtl_read_byte(rtlpriv, offset); + + value = value & GET_PWR_CFG_MASK(pwr_cfg_cmd); + if (value == (GET_PWR_CFG_VALUE(pwr_cfg_cmd) + & GET_PWR_CFG_MASK(pwr_cfg_cmd))) + polling_bit=true; + else + udelay(10); + + if (polling_count++ > max_polling_cnt) { + return false; + } + } while (!polling_bit); + + break; + + case PWR_CMD_DELAY: + RT_TRACE(COMP_INIT, DBG_TRACE, + ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n")); + if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) == PWRSEQ_DELAY_US) + udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd)); + else + mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd)); + break; + + case PWR_CMD_END: + RT_TRACE(COMP_INIT, DBG_TRACE, + ("rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n")); + return true; + break; + + default: + RT_ASSERT(false, + ("rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n")); + break; + } + + } + + ary_idx++; + } while (1); + + return true; +} --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/pwrseqcmd.h @@ -0,0 +1,71 @@ +#ifndef __RTL8821AE_PWRSEQCMD_H__ +#define __RTL8821AE_PWRSEQCMD_H__ + +#include "../wifi.h" +/*---------------------------------------------*/ +/*The value of cmd: 4 bits */ +/*---------------------------------------------*/ +#define PWR_CMD_READ 0x00 +#define PWR_CMD_WRITE 0x01 +#define PWR_CMD_POLLING 0x02 +#define PWR_CMD_DELAY 0x03 +#define PWR_CMD_END 0x04 + +/* define the base address of each block */ +#define PWR_BASEADDR_MAC 0x00 +#define PWR_BASEADDR_USB 0x01 +#define PWR_BASEADDR_PCIE 0x02 +#define PWR_BASEADDR_SDIO 0x03 + +#define PWR_INTF_SDIO_MSK BIT(0) +#define PWR_INTF_USB_MSK BIT(1) +#define PWR_INTF_PCI_MSK BIT(2) +#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3)) + +#define PWR_FAB_TSMC_MSK BIT(0) +#define PWR_FAB_UMC_MSK BIT(1) +#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3)) + +#define PWR_CUT_TESTCHIP_MSK BIT(0) +#define PWR_CUT_A_MSK BIT(1) +#define PWR_CUT_B_MSK BIT(2) +#define PWR_CUT_C_MSK BIT(3) +#define PWR_CUT_D_MSK BIT(4) +#define PWR_CUT_E_MSK BIT(5) +#define PWR_CUT_F_MSK BIT(6) +#define PWR_CUT_G_MSK BIT(7) +#define PWR_CUT_ALL_MSK 0xFF + + +enum pwrseq_delay_unit { + PWRSEQ_DELAY_US, + PWRSEQ_DELAY_MS, +}; + +struct wlan_pwr_cfg { + u16 offset; + u8 cut_msk; + u8 fab_msk:4; + u8 interface_msk:4; + u8 base:4; + u8 cmd:4; + u8 msk; + u8 value; + +}; + +#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset +#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk +#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk +#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk +#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base +#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd +#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk +#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value + +bool rtl_hal_pwrseqcmdparsing(struct rtl_priv * rtlpriv, u8 cut_version, + u8 fab_version, u8 interface_type, + struct wlan_pwr_cfg pwrcfgcmd[]); + +#endif + --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/reg.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/reg.h @@ -0,0 +1,2427 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_REG_H__ +#define __RTL8821AE_REG_H__ + +#define TXPKT_BUF_SELECT 0x69 +#define RXPKT_BUF_SELECT 0xA5 +#define DISABLE_TRXPKT_BUF_ACCESS 0x0 + +#define REG_SYS_ISO_CTRL 0x0000 +#define REG_SYS_FUNC_EN 0x0002 +#define REG_APS_FSMCO 0x0004 +#define REG_SYS_CLKR 0x0008 +#define REG_9346CR 0x000A +#define REG_EE_VPD 0x000C +#define REG_AFE_MISC 0x0010 +#define REG_SPS0_CTRL 0x0011 +#define REG_SPS_OCP_CFG 0x0018 +#define REG_RSV_CTRL 0x001C +#define REG_RF_CTRL 0x001F +#define REG_LDOA15_CTRL 0x0020 +#define REG_LDOV12D_CTRL 0x0021 +#define REG_LDOHCI12_CTRL 0x0022 +#define REG_LPLDO_CTRL 0x0023 +#define REG_AFE_XTAL_CTRL 0x0024 +#define REG_AFE_LDO_CTRL 0x0027 /* 1.5v for 8188EE test chip, 1.4v for MP chip */ +#define REG_AFE_PLL_CTRL 0x0028 +#define REG_MAC_PHY_CTRL 0x002c +#define REG_EFUSE_CTRL 0x0030 +#define REG_EFUSE_TEST 0x0034 +#define REG_PWR_DATA 0x0038 +#define REG_CAL_TIMER 0x003C +#define REG_ACLK_MON 0x003E +#define REG_GPIO_MUXCFG 0x0040 +#define REG_GPIO_IO_SEL 0x0042 +#define REG_MAC_PINMUX_CFG 0x0043 +#define REG_GPIO_PIN_CTRL 0x0044 +#define REG_GPIO_INTM 0x0048 +#define REG_LEDCFG0 0x004C +#define REG_LEDCFG1 0x004D +#define REG_LEDCFG2 0x004E +#define REG_LEDCFG3 0x004F +#define REG_FSIMR 0x0050 +#define REG_FSISR 0x0054 +#define REG_HSIMR 0x0058 +#define REG_HSISR 0x005c +#define REG_GPIO_PIN_CTRL_2 0x0060 +#define REG_GPIO_IO_SEL_2 0x0062 +#define REG_MULTI_FUNC_CTRL 0x0068 +#define REG_GPIO_OUTPUT 0x006c +#define REG_OPT_CTRL 0x0074 +#define REG_AFE_XTAL_CTRL_EXT 0x0078 +#define REG_XCK_OUT_CTRL 0x007c +#define REG_MCUFWDL 0x0080 +#define REG_WOL_EVENT 0x0081 +#define REG_MCUTSTCFG 0x0084 + + +#define REG_HIMR 0x00B0 +#define REG_HISR 0x00B4 +#define REG_HIMRE 0x00B8 +#define REG_HISRE 0x00BC + +#define REG_PMC_DBG_CTRL2 0x00CC + +#define REG_EFUSE_ACCESS 0x00CF + +#define REG_BIST_SCAN 0x00D0 +#define REG_BIST_RPT 0x00D4 +#define REG_BIST_ROM_RPT 0x00D8 +#define REG_USB_SIE_INTF 0x00E0 +#define REG_PCIE_MIO_INTF 0x00E4 +#define REG_PCIE_MIO_INTD 0x00E8 +#define REG_HPON_FSM 0x00EC +#define REG_SYS_CFG 0x00F0 +#define REG_GPIO_OUTSTS 0x00F4 +#define REG_SYS_CFG1 0x00FC +#define REG_ROM_VERSION 0x00FD + +#define REG_CR 0x0100 +#define REG_PBP 0x0104 +#define REG_PKT_BUFF_ACCESS_CTRL 0x0106 +#define REG_TRXDMA_CTRL 0x010C +#define REG_TRXFF_BNDY 0x0114 +#define REG_TRXFF_STATUS 0x0118 +#define REG_RXFF_PTR 0x011C + +#define REG_CPWM 0x012F +#define REG_FWIMR 0x0130 +#define REG_FWISR 0x0134 +#define REG_PKTBUF_DBG_CTRL 0x0140 +#define REG_PKTBUF_DBG_DATA_L 0x0144 +#define REG_PKTBUF_DBG_DATA_H 0x0148 +#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2) + +#define REG_TC0_CTRL 0x0150 +#define REG_TC1_CTRL 0x0154 +#define REG_TC2_CTRL 0x0158 +#define REG_TC3_CTRL 0x015C +#define REG_TC4_CTRL 0x0160 +#define REG_TCUNIT_BASE 0x0164 +#define REG_MBIST_START 0x0174 +#define REG_MBIST_DONE 0x0178 +#define REG_MBIST_FAIL 0x017C +#define REG_32K_CTRL 0x0194 +#define REG_C2HEVT_MSG_NORMAL 0x01A0 +#define REG_C2HEVT_CLEAR 0x01AF +#define REG_C2HEVT_MSG_TEST 0x01B8 +#define REG_MCUTST_1 0x01c0 +#define REG_FMETHR 0x01C8 +#define REG_HMETFR 0x01CC +#define REG_HMEBOX_0 0x01D0 +#define REG_HMEBOX_1 0x01D4 +#define REG_HMEBOX_2 0x01D8 +#define REG_HMEBOX_3 0x01DC + +#define REG_LLT_INIT 0x01E0 +#define REG_BB_ACCEESS_CTRL 0x01E8 +#define REG_BB_ACCESS_DATA 0x01EC + +#define REG_HMEBOX_EXT_0 0x01F0 +#define REG_HMEBOX_EXT_1 0x01F4 +#define REG_HMEBOX_EXT_2 0x01F8 +#define REG_HMEBOX_EXT_3 0x01FC + +#define REG_RQPN 0x0200 +#define REG_FIFOPAGE 0x0204 +#define REG_TDECTRL 0x0208 +#define REG_TXDMA_OFFSET_CHK 0x020C +#define REG_TXDMA_STATUS 0x0210 +#define REG_RQPN_NPQ 0x0214 + +#define REG_RXDMA_AGG_PG_TH 0x0280 +#define REG_FW_UPD_RDPTR 0x0284 /* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */ +#define REG_RXDMA_CONTROL 0x0286 /* Control the RX DMA.*/ +#define REG_RXPKT_NUM 0x0287 /* The number of packets in RXPKTBUF. */ + +#define REG_PCIE_CTRL_REG 0x0300 +#define REG_INT_MIG 0x0304 +#define REG_BCNQ_DESA 0x0308 +#define REG_HQ_DESA 0x0310 +#define REG_MGQ_DESA 0x0318 +#define REG_VOQ_DESA 0x0320 +#define REG_VIQ_DESA 0x0328 +#define REG_BEQ_DESA 0x0330 +#define REG_BKQ_DESA 0x0338 +#define REG_RX_DESA 0x0340 + +#define REG_DBI_WDATA 0x0348 +#define REG_DBI_RDATA 0x034C +#define REG_DBI_ADDR 0x0350 +#define REG_DBI_FLAG 0x0352 +#define REG_MDIO_WDATA 0x0354 +#define REG_MDIO_RDATA 0x0356 +#define REG_MDIO_CTL 0x0358 +#define REG_DBG_SEL 0x0360 +#define REG_PCIE_HRPWM 0x0361 +#define REG_PCIE_HCPWM 0x0363 +#define REG_UART_CTRL 0x0364 +#define REG_WATCH_DOG 0x0368 +#define REG_UART_TX_DESA 0x0370 +#define REG_UART_RX_DESA 0x0378 + + +#define REG_HDAQ_DESA_NODEF 0x0000 +#define REG_CMDQ_DESA_NODEF 0x0000 + +#define REG_VOQ_INFORMATION 0x0400 +#define REG_VIQ_INFORMATION 0x0404 +#define REG_BEQ_INFORMATION 0x0408 +#define REG_BKQ_INFORMATION 0x040C +#define REG_MGQ_INFORMATION 0x0410 +#define REG_HGQ_INFORMATION 0x0414 +#define REG_BCNQ_INFORMATION 0x0418 +#define REG_TXPKT_EMPTY 0x041A + + +#define REG_CPU_MGQ_INFORMATION 0x041C +#define REG_FWHW_TXQ_CTRL 0x0420 +#define REG_HWSEQ_CTRL 0x0423 +#define REG_TXPKTBUF_BCNQ_BDNY 0x0424 +#define REG_TXPKTBUF_MGQ_BDNY 0x0425 +#define REG_MULTI_BCNQ_EN 0x0426 +#define REG_MULTI_BCNQ_OFFSET 0x0427 +#define REG_SPEC_SIFS 0x0428 +#define REG_RL 0x042A +#define REG_DARFRC 0x0430 +#define REG_RARFRC 0x0438 +#define REG_RRSR 0x0440 +#define REG_ARFR0 0x0444 +#define REG_ARFR1 0x044C +#define REG_CCK_CHECK 0x0454 +#define REG_AMPDU_MAX_TIME 0x0456 +#define REG_AGGLEN_LMT 0x0458 +#define REG_AMPDU_MIN_SPACE 0x045C +#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D +#define REG_FAST_EDCA_CTRL 0x0460 +#define REG_RD_RESP_PKT_TH 0x0463 +#define REG_INIRTS_RATE_SEL 0x0480 +#define REG_INIDATA_RATE_SEL 0x0484 +#define REG_ARFR2 0x048C +#define REG_ARFR3 0x0494 +#define REG_POWER_STATUS 0x04A4 +#define REG_POWER_STAGE1 0x04B4 +#define REG_POWER_STAGE2 0x04B8 +#define REG_PKT_LIFE_TIME 0x04C0 +#define REG_STBC_SETTING 0x04C4 +#define REG_HT_SINGLE_AMPDU 0x04C7 +#define REG_PROT_MODE_CTRL 0x04C8 +#define REG_MAX_AGGR_NUM 0x04CA +#define REG_BAR_MODE_CTRL 0x04CC +#define REG_RA_TRY_RATE_AGG_LMT 0x04CF +#define REG_EARLY_MODE_CONTROL 0x04D0 +#define REG_NQOS_SEQ 0x04DC +#define REG_QOS_SEQ 0x04DE +#define REG_NEED_CPU_HANDLE 0x04E0 +#define REG_PKT_LOSE_RPT 0x04E1 +#define REG_PTCL_ERR_STATUS 0x04E2 +#define REG_TX_RPT_CTRL 0x04EC +#define REG_TX_RPT_TIME 0x04F0 +#define REG_DUMMY 0x04FC + +#define REG_EDCA_VO_PARAM 0x0500 +#define REG_EDCA_VI_PARAM 0x0504 +#define REG_EDCA_BE_PARAM 0x0508 +#define REG_EDCA_BK_PARAM 0x050C +#define REG_BCNTCFG 0x0510 +#define REG_PIFS 0x0512 +#define REG_RDG_PIFS 0x0513 +#define REG_SIFS_CTX 0x0514 +#define REG_SIFS_TRX 0x0516 +#define REG_AGGR_BREAK_TIME 0x051A +#define REG_SLOT 0x051B +#define REG_TX_PTCL_CTRL 0x0520 +#define REG_TXPAUSE 0x0522 +#define REG_DIS_TXREQ_CLR 0x0523 +#define REG_RD_CTRL 0x0524 +#define REG_TBTT_PROHIBIT 0x0540 +#define REG_RD_NAV_NXT 0x0544 +#define REG_NAV_PROT_LEN 0x0546 +#define REG_BCN_CTRL 0x0550 +#define REG_USTIME_TSF 0x0551 +#define REG_MBID_NUM 0x0552 +#define REG_DUAL_TSF_RST 0x0553 +#define REG_BCN_INTERVAL 0x0554 +#define REG_MBSSID_BCN_SPACE 0x0554 +#define REG_DRVERLYINT 0x0558 +#define REG_BCNDMATIM 0x0559 +#define REG_ATIMWND 0x055A +#define REG_BCN_MAX_ERR 0x055D +#define REG_RXTSF_OFFSET_CCK 0x055E +#define REG_RXTSF_OFFSET_OFDM 0x055F +#define REG_TSFTR 0x0560 +#define REG_INIT_TSFTR 0x0564 +#define REG_SECONDARY_CCA_CTRL 0x0577 +#define REG_PSTIMER 0x0580 +#define REG_TIMER0 0x0584 +#define REG_TIMER1 0x0588 +#define REG_ACMHWCTRL 0x05C0 +#define REG_ACMRSTCTRL 0x05C1 +#define REG_ACMAVG 0x05C2 +#define REG_VO_ADMTIME 0x05C4 +#define REG_VI_ADMTIME 0x05C6 +#define REG_BE_ADMTIME 0x05C8 +#define REG_EDCA_RANDOM_GEN 0x05CC +#define REG_NOA_DESC_SEL 0x05CF +#define REG_NOA_DESC_DURATION 0x05E0 +#define REG_NOA_DESC_INTERVAL 0x05E4 +#define REG_NOA_DESC_START 0x05E8 +#define REG_NOA_DESC_COUNT 0x05EC +#define REG_SCH_TX_CMD 0x05F8 + +#define REG_APSD_CTRL 0x0600 +#define REG_BWOPMODE 0x0603 +#define REG_TCR 0x0604 +#define REG_RCR 0x0608 +#define REG_RX_PKT_LIMIT 0x060C +#define REG_RX_DLK_TIME 0x060D +#define REG_RX_DRVINFO_SZ 0x060F + +#define REG_MACID 0x0610 +#define REG_BSSID 0x0618 +#define REG_MAR 0x0620 +#define REG_MBIDCAMCFG 0x0628 + +#define REG_USTIME_EDCA 0x0638 +#define REG_MAC_SPEC_SIFS 0x063A +#define REG_RESP_SIFS_CCK 0x063C +#define REG_RESP_SIFS_OFDM 0x063E +#define REG_ACKTO 0x0640 +#define REG_CTS2TO 0x0641 +#define REG_EIFS 0x0642 + +#define REG_NAV_CTRL 0x0650 +#define REG_NAV_UPPER 0x0652 +#define REG_BACAMCMD 0x0654 +#define REG_BACAMCONTENT 0x0658 +#define REG_LBDLY 0x0660 +#define REG_FWDLY 0x0661 +#define REG_RXERR_RPT 0x0664 +#define REG_TRXPTCL_CTL 0x0668 + +#define REG_CAMCMD 0x0670 +#define REG_CAMWRITE 0x0674 +#define REG_CAMREAD 0x0678 +#define REG_CAMDBG 0x067C +#define REG_SECCFG 0x0680 + +#define REG_WOW_CTRL 0x0690 +#define REG_PSSTATUS 0x0691 +#define REG_PS_RX_INFO 0x0692 +#define REG_UAPSD_TID 0x0693 +#define REG_LPNAV_CTRL 0x0694 +#define REG_WKFMCAM_NUM 0x0698 +#define REG_WKFMCAM_RWD 0x069C +#define REG_RXFLTMAP0 0x06A0 +#define REG_RXFLTMAP1 0x06A2 +#define REG_RXFLTMAP2 0x06A4 +#define REG_BCN_PSR_RPT 0x06A8 +#define REG_CALB32K_CTRL 0x06AC +#define REG_PKT_MON_CTRL 0x06B4 +#define REG_BT_COEX_TABLE 0x06C0 +#define REG_WMAC_RESP_TXINFO 0x06D8 + +#define REG_USB_INFO 0xFE17 +#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_DMA_AGG_TO 0xFE5B +#define REG_USB_AGG_TO 0xFE5C +#define REG_USB_AGG_TH 0xFE5D + +#define REG_TEST_USB_TXQS 0xFE48 +#define REG_TEST_SIE_VID 0xFE60 +#define REG_TEST_SIE_PID 0xFE62 +#define REG_TEST_SIE_OPTIONAL 0xFE64 +#define REG_TEST_SIE_CHIRP_K 0xFE65 +#define REG_TEST_SIE_PHY 0xFE66 +#define REG_TEST_SIE_MAC_ADDR 0xFE70 +#define REG_TEST_SIE_STRING 0xFE80 + +#define REG_NORMAL_SIE_VID 0xFE60 +#define REG_NORMAL_SIE_PID 0xFE62 +#define REG_NORMAL_SIE_OPTIONAL 0xFE64 +#define REG_NORMAL_SIE_EP 0xFE65 +#define REG_NORMAL_SIE_PHY 0xFE68 +#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 +#define REG_NORMAL_SIE_STRING 0xFE80 + +#define CR9346 REG_9346CR +#define MSR (REG_CR + 2) +#define ISR REG_HISR +#define TSFR REG_TSFTR + +#define MACIDR0 REG_MACID +#define MACIDR4 (REG_MACID + 4) + +#define PBP REG_PBP + +#define IDR0 MACIDR0 +#define IDR4 MACIDR4 + +#define UNUSED_REGISTER 0x1BF +#define DCAM UNUSED_REGISTER +#define PSR UNUSED_REGISTER +#define BBADDR UNUSED_REGISTER +#define PHYDATAR UNUSED_REGISTER + +#define INVALID_BBRF_VALUE 0x12345678 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A + +#define CMDEEPROM_EN BIT(5) +#define CMDEEPROM_SEL BIT(4) +#define CMD9346CR_9356SEL BIT(4) +#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL) +#define AUTOLOAD_EFUSE CMDEEPROM_EN + +#define GPIOSEL_GPIO 0 +#define GPIOSEL_ENBT BIT(5) + +#define GPIO_IN REG_GPIO_PIN_CTRL +#define GPIO_OUT (REG_GPIO_PIN_CTRL+1) +#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2) +#define GPIO_MOD (REG_GPIO_PIN_CTRL+3) + +/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */ +#define HSIMR_GPIO12_0_INT_EN BIT(0) +#define HSIMR_SPS_OCP_INT_EN BIT(5) +#define HSIMR_RON_INT_EN BIT(6) +#define HSIMR_PDN_INT_EN BIT(7) +#define HSIMR_GPIO9_INT_EN BIT(25) + + +/* +* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) +*/ +#define HSISR_GPIO12_0_INT BIT(0) +#define HSISR_SPS_OCP_INT BIT(5) +#define HSISR_RON_INT_EN BIT(6) +#define HSISR_PDNINT BIT(7) +#define HSISR_GPIO9_INT BIT(25) + +#define MSR_NOLINK 0x00 +#define MSR_ADHOC 0x01 +#define MSR_INFRA 0x02 +#define MSR_AP 0x03 + +#define RRSR_RSC_OFFSET 21 +#define RRSR_SHORT_OFFSET 23 +#define RRSR_RSC_BW_40M 0x600000 +#define RRSR_RSC_UPSUBCHNL 0x400000 +#define RRSR_RSC_LOWSUBCHNL 0x200000 +#define RRSR_SHORT 0x800000 +#define RRSR_1M BIT(0) +#define RRSR_2M BIT(1) +#define RRSR_5_5M BIT(2) +#define RRSR_11M BIT(3) +#define RRSR_6M BIT(4) +#define RRSR_9M BIT(5) +#define RRSR_12M BIT(6) +#define RRSR_18M BIT(7) +#define RRSR_24M BIT(8) +#define RRSR_36M BIT(9) +#define RRSR_48M BIT(10) +#define RRSR_54M BIT(11) +#define RRSR_MCS0 BIT(12) +#define RRSR_MCS1 BIT(13) +#define RRSR_MCS2 BIT(14) +#define RRSR_MCS3 BIT(15) +#define RRSR_MCS4 BIT(16) +#define RRSR_MCS5 BIT(17) +#define RRSR_MCS6 BIT(18) +#define RRSR_MCS7 BIT(19) +#define BRSR_ACKSHORTPMB BIT(23) + +#define RATR_1M 0x00000001 +#define RATR_2M 0x00000002 +#define RATR_55M 0x00000004 +#define RATR_11M 0x00000008 +#define RATR_6M 0x00000010 +#define RATR_9M 0x00000020 +#define RATR_12M 0x00000040 +#define RATR_18M 0x00000080 +#define RATR_24M 0x00000100 +#define RATR_36M 0x00000200 +#define RATR_48M 0x00000400 +#define RATR_54M 0x00000800 +#define RATR_MCS0 0x00001000 +#define RATR_MCS1 0x00002000 +#define RATR_MCS2 0x00004000 +#define RATR_MCS3 0x00008000 +#define RATR_MCS4 0x00010000 +#define RATR_MCS5 0x00020000 +#define RATR_MCS6 0x00040000 +#define RATR_MCS7 0x00080000 +#define RATR_MCS8 0x00100000 +#define RATR_MCS9 0x00200000 +#define RATR_MCS10 0x00400000 +#define RATR_MCS11 0x00800000 +#define RATR_MCS12 0x01000000 +#define RATR_MCS13 0x02000000 +#define RATR_MCS14 0x04000000 +#define RATR_MCS15 0x08000000 + +#define RATE_1M BIT(0) +#define RATE_2M BIT(1) +#define RATE_5_5M BIT(2) +#define RATE_11M BIT(3) +#define RATE_6M BIT(4) +#define RATE_9M BIT(5) +#define RATE_12M BIT(6) +#define RATE_18M BIT(7) +#define RATE_24M BIT(8) +#define RATE_36M BIT(9) +#define RATE_48M BIT(10) +#define RATE_54M BIT(11) +#define RATE_MCS0 BIT(12) +#define RATE_MCS1 BIT(13) +#define RATE_MCS2 BIT(14) +#define RATE_MCS3 BIT(15) +#define RATE_MCS4 BIT(16) +#define RATE_MCS5 BIT(17) +#define RATE_MCS6 BIT(18) +#define RATE_MCS7 BIT(19) +#define RATE_MCS8 BIT(20) +#define RATE_MCS9 BIT(21) +#define RATE_MCS10 BIT(22) +#define RATE_MCS11 BIT(23) +#define RATE_MCS12 BIT(24) +#define RATE_MCS13 BIT(25) +#define RATE_MCS14 BIT(26) +#define RATE_MCS15 BIT(27) + +#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M) +#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\ + RATR_24M| RATR_36M | RATR_48M | RATR_54M) +#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\ + RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\ + RATR_MCS6 | RATR_MCS7) +#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\ + RATR_MCS11| RATR_MCS12 | RATR_MCS13 |\ + RATR_MCS14 | RATR_MCS15) + +#define BW_OPMODE_20MHZ BIT(2) +#define BW_OPMODE_5G BIT(1) +#define BW_OPMODE_11J BIT(0) + +#define CAM_VALID BIT(15) +#define CAM_NOTVALID 0x0000 +#define CAM_USEDK BIT(5) + +#define CAM_NONE 0x0 +#define CAM_WEP40 0x01 +#define CAM_TKIP 0x02 +#define CAM_AES 0x04 +#define CAM_WEP104 0x05 + +#define TOTAL_CAM_ENTRY 32 +#define HALF_CAM_ENTRY 16 + +#define CAM_WRITE BIT(16) +#define CAM_READ 0x00000000 +#define CAM_POLLINIG BIT(31) + +#define SCR_USEDK 0x01 +#define SCR_TXSEC_ENABLE 0x02 +#define SCR_RXSEC_ENABLE 0x04 + +#define WOW_PMEN BIT(0) +#define WOW_WOMEN BIT(1) +#define WOW_MAGIC BIT(2) +#define WOW_UWF BIT(3) + +/********************************************* +* 8188 IMR/ISR bits +**********************************************/ +#define IMR_DISABLED 0x0 +/* IMR DW0(0x0060-0063) Bit 0-31 */ +#define IMR_TXCCK BIT(30) /* TXRPT interrupt when CCX bit of the packet is set */ +#define IMR_PSTIMEOUT BIT(29) /* Power Save Time Out Interrupt */ +#define IMR_GTINT4 BIT(28) /* When GTIMER4 expires, this bit is set to 1 */ +#define IMR_GTINT3 BIT(27) /* When GTIMER3 expires, this bit is set to 1 */ +#define IMR_TBDER BIT(26) /* Transmit Beacon0 Error */ +#define IMR_TBDOK BIT(25) /* Transmit Beacon0 OK */ +#define IMR_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle indication interrupt */ +#define IMR_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */ +#define IMR_BCNDOK0 BIT(16) /* Beacon Queue DMA OK0 */ +#define IMR_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */ +#define IMR_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt Extension for Win7 */ +#define IMR_ATIMEND BIT(12) /* CTWidnow End or ATIM Window End */ +#define IMR_HISR1_IND_INT BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1)*/ +#define IMR_C2HCMD BIT(10) /* CPU to Host Command INT Status, Write 1 clear */ +#define IMR_CPWM2 BIT(9) /* CPU power Mode exchange INT Status, Write 1 clear */ +#define IMR_CPWM BIT(8) /* CPU power Mode exchange INT Status, Write 1 clear */ +#define IMR_HIGHDOK BIT(7) /* High Queue DMA OK */ +#define IMR_MGNTDOK BIT(6) /* Management Queue DMA OK */ +#define IMR_BKDOK BIT(5) /* AC_BK DMA OK */ +#define IMR_BEDOK BIT(4) /* AC_BE DMA OK */ +#define IMR_VIDOK BIT(3) /* AC_VI DMA OK */ +#define IMR_VODOK BIT(2) /* AC_VO DMA OK */ +#define IMR_RDU BIT(1) /* Rx Descriptor Unavailable */ +#define IMR_ROK BIT(0) /* Receive DMA OK */ + +/* IMR DW1(0x00B4-00B7) Bit 0-31 */ +#define IMR_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */ +#define IMR_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */ +#define IMR_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */ +#define IMR_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */ +#define IMR_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */ +#define IMR_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */ +#define IMR_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */ +#define IMR_BCNDOK7 BIT(20) /* Beacon Queue DMA OK Interrup 7 */ +#define IMR_BCNDOK6 BIT(19) /* Beacon Queue DMA OK Interrup 6 */ +#define IMR_BCNDOK5 BIT(18) /* Beacon Queue DMA OK Interrup 5 */ +#define IMR_BCNDOK4 BIT(17) /* Beacon Queue DMA OK Interrup 4 */ +#define IMR_BCNDOK3 BIT(16) /* Beacon Queue DMA OK Interrup 3 */ +#define IMR_BCNDOK2 BIT(15) /* Beacon Queue DMA OK Interrup 2 */ +#define IMR_BCNDOK1 BIT(14) /* Beacon Queue DMA OK Interrup 1 */ +#define IMR_ATIMEND_E BIT(13) /* ATIM Window End Extension for Win7 */ +#define IMR_TXERR BIT(11) /* Tx Error Flag Interrupt Status, write 1 clear. */ +#define IMR_RXERR BIT(10) /* Rx Error Flag INT Status, Write 1 clear */ +#define IMR_TXFOVW BIT(9) /* Transmit FIFO Overflow */ +#define IMR_RXFOVW BIT(8) /* Receive FIFO Overflow */ + + +#define HWSET_MAX_SIZE 512 +#define EFUSE_MAX_SECTION 64 +#define EFUSE_REAL_CONTENT_LEN 256 +#define EFUSE_OOB_PROTECT_BYTES 18 /* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte.*/ + + +#define EEPROM_DEFAULT_TSSI 0x0 +#define EEPROM_DEFAULT_TXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_BOARDTYPE 0x02 +#define EEPROM_DEFAULT_TXPOWER 0x1010 +#define EEPROM_DEFAULT_HT2T_TXPWR 0x10 + +#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 +#define EEPROM_DEFAULT_THERMALMETER 0x18 +#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0 +#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5 +#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22 +#define EEPROM_DEFAULT_HT40_2SDIFF 0x0 +#define EEPROM_DEFAULT_HT20_DIFF 2 +#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3 +#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0 +#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0 + +#define RF_OPTION1 0x79 +#define RF_OPTION2 0x7A +#define RF_OPTION3 0x7B +#define RF_OPTION4 0xC3 + +#define EEPROM_DEFAULT_PID 0x1234 +#define EEPROM_DEFAULT_VID 0x5678 +#define EEPROM_DEFAULT_CUSTOMERID 0xAB +#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD +#define EEPROM_DEFAULT_VERSION 0 + +#define EEPROM_CHANNEL_PLAN_FCC 0x0 +#define EEPROM_CHANNEL_PLAN_IC 0x1 +#define EEPROM_CHANNEL_PLAN_ETSI 0x2 +#define EEPROM_CHANNEL_PLAN_SPAIN 0x3 +#define EEPROM_CHANNEL_PLAN_FRANCE 0x4 +#define EEPROM_CHANNEL_PLAN_MKK 0x5 +#define EEPROM_CHANNEL_PLAN_MKK1 0x6 +#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7 +#define EEPROM_CHANNEL_PLAN_TELEC 0x8 +#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9 +#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA +#define EEPROM_CHANNEL_PLAN_NCC 0xB +#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80 + +#define EEPROM_CID_DEFAULT 0x0 +#define EEPROM_CID_TOSHIBA 0x4 +#define EEPROM_CID_CCX 0x10 +#define EEPROM_CID_QMI 0x0D +#define EEPROM_CID_WHQL 0xFE + +#define RTL_EEPROM_ID 0x8129 + +#define EEPROM_HPON 0x02 +#define EEPROM_CLK 0x06 +#define EEPROM_TESTR 0x08 + + +#define EEPROM_TXPOWERCCK 0x10 +#define EEPROM_TXPOWERHT40_1S 0x16 +#define EEPROM_TXPOWERHT20DIFF 0x1B +#define EEPROM_TXPOWER_OFDMDIFF 0x1B + + + +#define EEPROM_TX_PWR_INX 0x10 + +#define EEPROM_CHANNELPLAN 0xB8 +#define EEPROM_XTAL_8821AE 0xB9 +#define EEPROM_THERMAL_METER 0xBA +#define EEPROM_IQK_LCK_88E 0xBB + +#define EEPROM_RF_BOARD_OPTION 0xC1 +#define EEPROM_RF_FEATURE_OPTION_88E 0xC2 +#define EEPROM_RF_BT_SETTING 0xC3 +#define EEPROM_VERSION 0xC4 +#define EEPROM_CUSTOMER_ID 0xC5 +#define EEPROM_RF_ANTENNA_OPT_88E 0xC9 + +#define EEPROM_MAC_ADDR 0xD0 +#define EEPROM_VID 0xD6 +#define EEPROM_DID 0xD8 +#define EEPROM_SVID 0xDA +#define EEPROM_SMID 0xDC + +#define STOPBECON BIT(6) +#define STOPHIGHT BIT(5) +#define STOPMGT BIT(4) +#define STOPVO BIT(3) +#define STOPVI BIT(2) +#define STOPBE BIT(1) +#define STOPBK BIT(0) + +#define RCR_APPFCS BIT(31) +#define RCR_APP_MIC BIT(30) +#define RCR_APP_ICV BIT(29) +#define RCR_APP_PHYST_RXFF BIT(28) +#define RCR_APP_BA_SSN BIT(27) +#define RCR_NONQOS_VHT BIT(26) +#define RCR_ENMBID BIT(24) +#define RCR_LSIGEN BIT(23) +#define RCR_MFBEN BIT(22) +#define RCR_HTC_LOC_CTRL BIT(14) +#define RCR_AMF BIT(13) +#define RCR_ACF BIT(12) +#define RCR_ADF BIT(11) +#define RCR_AICV BIT(9) +#define RCR_ACRC32 BIT(8) +#define RCR_CBSSID_BCN BIT(7) +#define RCR_CBSSID_DATA BIT(6) +#define RCR_CBSSID RCR_CBSSID_DATA +#define RCR_APWRMGT BIT(5) +#define RCR_ADD3 BIT(4) +#define RCR_AB BIT(3) +#define RCR_AM BIT(2) +#define RCR_APM BIT(1) +#define RCR_AAP BIT(0) +#define RCR_MXDMA_OFFSET 8 +#define RCR_FIFO_OFFSET 13 + +#define RSV_CTRL 0x001C +#define RD_CTRL 0x0524 + +#define REG_USB_INFO 0xFE17 +#define REG_USB_SPECIAL_OPTION 0xFE55 +#define REG_USB_DMA_AGG_TO 0xFE5B +#define REG_USB_AGG_TO 0xFE5C +#define REG_USB_AGG_TH 0xFE5D + +#define REG_USB_VID 0xFE60 +#define REG_USB_PID 0xFE62 +#define REG_USB_OPTIONAL 0xFE64 +#define REG_USB_CHIRP_K 0xFE65 +#define REG_USB_PHY 0xFE66 +#define REG_USB_MAC_ADDR 0xFE70 +#define REG_USB_HRPWM 0xFE58 +#define REG_USB_HCPWM 0xFE57 + +#define SW18_FPWM BIT(3) + +#define ISO_MD2PP BIT(0) +#define ISO_UA2USB BIT(1) +#define ISO_UD2CORE BIT(2) +#define ISO_PA2PCIE BIT(3) +#define ISO_PD2CORE BIT(4) +#define ISO_IP2MAC BIT(5) +#define ISO_DIOP BIT(6) +#define ISO_DIOE BIT(7) +#define ISO_EB2CORE BIT(8) +#define ISO_DIOR BIT(9) + +#define PWC_EV25V BIT(14) +#define PWC_EV12V BIT(15) + +#define FEN_BBRSTB BIT(0) +#define FEN_BB_GLB_RSTN BIT(1) +#define FEN_USBA BIT(2) +#define FEN_UPLL BIT(3) +#define FEN_USBD BIT(4) +#define FEN_DIO_PCIE BIT(5) +#define FEN_PCIEA BIT(6) +#define FEN_PPLL BIT(7) +#define FEN_PCIED BIT(8) +#define FEN_DIOE BIT(9) +#define FEN_CPUEN BIT(10) +#define FEN_DCORE BIT(11) +#define FEN_ELDR BIT(12) +#define FEN_DIO_RF BIT(13) +#define FEN_HWPDN BIT(14) +#define FEN_MREGEN BIT(15) + +#define PFM_LDALL BIT(0) +#define PFM_ALDN BIT(1) +#define PFM_LDKP BIT(2) +#define PFM_WOWL BIT(3) +#define EnPDN BIT(4) +#define PDN_PL BIT(5) +#define APFM_ONMAC BIT(8) +#define APFM_OFF BIT(9) +#define APFM_RSM BIT(10) +#define AFSM_HSUS BIT(11) +#define AFSM_PCIE BIT(12) +#define APDM_MAC BIT(13) +#define APDM_HOST BIT(14) +#define APDM_HPDN BIT(15) +#define RDY_MACON BIT(16) +#define SUS_HOST BIT(17) +#define ROP_ALD BIT(20) +#define ROP_PWR BIT(21) +#define ROP_SPS BIT(22) +#define SOP_MRST BIT(25) +#define SOP_FUSE BIT(26) +#define SOP_ABG BIT(27) +#define SOP_AMB BIT(28) +#define SOP_RCK BIT(29) +#define SOP_A8M BIT(30) +#define XOP_BTCK BIT(31) + +#define ANAD16V_EN BIT(0) +#define ANA8M BIT(1) +#define MACSLP BIT(4) +#define LOADER_CLK_EN BIT(5) +#define _80M_SSC_DIS BIT(7) +#define _80M_SSC_EN_HO BIT(8) +#define PHY_SSC_RSTB BIT(9) +#define SEC_CLK_EN BIT(10) +#define MAC_CLK_EN BIT(11) +#define SYS_CLK_EN BIT(12) +#define RING_CLK_EN BIT(13) + +#define BOOT_FROM_EEPROM BIT(4) +#define EEPROM_EN BIT(5) + +#define AFE_BGEN BIT(0) +#define AFE_MBEN BIT(1) +#define MAC_ID_EN BIT(7) + +#define WLOCK_ALL BIT(0) +#define WLOCK_00 BIT(1) +#define WLOCK_04 BIT(2) +#define WLOCK_08 BIT(3) +#define WLOCK_40 BIT(4) +#define R_DIS_PRST_0 BIT(5) +#define R_DIS_PRST_1 BIT(6) +#define LOCK_ALL_EN BIT(7) + +#define RF_EN BIT(0) +#define RF_RSTB BIT(1) +#define RF_SDMRSTB BIT(2) + +#define LDA15_EN BIT(0) +#define LDA15_STBY BIT(1) +#define LDA15_OBUF BIT(2) +#define LDA15_REG_VOS BIT(3) +#define _LDA15_VOADJ(x) (((x) & 0x7) << 4) + +#define LDV12_EN BIT(0) +#define LDV12_SDBY BIT(1) +#define LPLDO_HSM BIT(2) +#define LPLDO_LSM_DIS BIT(3) +#define _LDV12_VADJ(x) (((x) & 0xF) << 4) + +#define XTAL_EN BIT(0) +#define XTAL_BSEL BIT(1) +#define _XTAL_BOSC(x) (((x) & 0x3) << 2) +#define _XTAL_CADJ(x) (((x) & 0xF) << 4) +#define XTAL_GATE_USB BIT(8) +#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9) +#define XTAL_GATE_AFE BIT(11) +#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12) +#define XTAL_RF_GATE BIT(14) +#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15) +#define XTAL_GATE_DIG BIT(17) +#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18) +#define XTAL_BT_GATE BIT(20) +#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21) +#define _XTAL_GPIO(x) (((x) & 0x7) << 23) + +#define CKDLY_AFE BIT(26) +#define CKDLY_USB BIT(27) +#define CKDLY_DIG BIT(28) +#define CKDLY_BT BIT(29) + +#define APLL_EN BIT(0) +#define APLL_320_EN BIT(1) +#define APLL_FREF_SEL BIT(2) +#define APLL_EDGE_SEL BIT(3) +#define APLL_WDOGB BIT(4) +#define APLL_LPFEN BIT(5) + +#define APLL_REF_CLK_13MHZ 0x1 +#define APLL_REF_CLK_19_2MHZ 0x2 +#define APLL_REF_CLK_20MHZ 0x3 +#define APLL_REF_CLK_25MHZ 0x4 +#define APLL_REF_CLK_26MHZ 0x5 +#define APLL_REF_CLK_38_4MHZ 0x6 +#define APLL_REF_CLK_40MHZ 0x7 + +#define APLL_320EN BIT(14) +#define APLL_80EN BIT(15) +#define APLL_1MEN BIT(24) + +#define ALD_EN BIT(18) +#define EF_PD BIT(19) +#define EF_FLAG BIT(31) + +#define EF_TRPT BIT(7) +#define LDOE25_EN BIT(31) + +#define RSM_EN BIT(0) +#define Timer_EN BIT(4) + +#define TRSW0EN BIT(2) +#define TRSW1EN BIT(3) +#define EROM_EN BIT(4) +#define EnBT BIT(5) +#define EnUart BIT(8) +#define Uart_910 BIT(9) +#define EnPMAC BIT(10) +#define SIC_SWRST BIT(11) +#define EnSIC BIT(12) +#define SIC_23 BIT(13) +#define EnHDP BIT(14) +#define SIC_LBK BIT(15) + +#define LED0PL BIT(4) +#define LED1PL BIT(12) +#define LED0DIS BIT(7) + +#define MCUFWDL_EN BIT(0) +#define MCUFWDL_RDY BIT(1) +#define FWDL_CHKSUM_RPT BIT(2) +#define MACINI_RDY BIT(3) +#define BBINI_RDY BIT(4) +#define RFINI_RDY BIT(5) +#define WINTINI_RDY BIT(6) +#define CPRST BIT(23) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) +#define IC_MACPHY_MODE BIT(11) +#define VENDOR_ID BIT(19) +#define PAD_HWPD_IDN BIT(22) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) + +#define CHIP_VER_RTL_MASK 0xF000 +#define CHIP_VER_RTL_SHIFT 12 + +#define REG_LBMODE (REG_CR + 3) + +#define HCI_TXDMA_EN BIT(0) +#define HCI_RXDMA_EN BIT(1) +#define TXDMA_EN BIT(2) +#define RXDMA_EN BIT(3) +#define PROTOCOL_EN BIT(4) +#define SCHEDULE_EN BIT(5) +#define MACTXEN BIT(6) +#define MACRXEN BIT(7) +#define ENSWBCN BIT(8) +#define ENSEC BIT(9) + +#define _NETTYPE(x) (((x) & 0x3) << 16) +#define MASK_NETTYPE 0x30000 +#define NT_NO_LINK 0x0 +#define NT_LINK_AD_HOC 0x1 +#define NT_LINK_AP 0x2 +#define NT_AS_AP 0x3 + +#define _LBMODE(x) (((x) & 0xF) << 24) +#define MASK_LBMODE 0xF000000 +#define LOOPBACK_NORMAL 0x0 +#define LOOPBACK_IMMEDIATELY 0xB +#define LOOPBACK_MAC_DELAY 0x3 +#define LOOPBACK_PHY 0x1 +#define LOOPBACK_DMA 0x7 + +#define GET_RX_PAGE_SIZE(value) ((value) & 0xF) +#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4) +#define _PSRX_MASK 0xF +#define _PSTX_MASK 0xF0 +#define _PSRX(x) (x) +#define _PSTX(x) ((x) << 4) + +#define PBP_64 0x0 +#define PBP_128 0x1 +#define PBP_256 0x2 +#define PBP_512 0x3 +#define PBP_1024 0x4 + +#define RXDMA_ARBBW_EN BIT(0) +#define RXSHFT_EN BIT(1) +#define RXDMA_AGG_EN BIT(2) +#define QS_VO_QUEUE BIT(8) +#define QS_VI_QUEUE BIT(9) +#define QS_BE_QUEUE BIT(10) +#define QS_BK_QUEUE BIT(11) +#define QS_MANAGER_QUEUE BIT(12) +#define QS_HIGH_QUEUE BIT(13) + +#define HQSEL_VOQ BIT(0) +#define HQSEL_VIQ BIT(1) +#define HQSEL_BEQ BIT(2) +#define HQSEL_BKQ BIT(3) +#define HQSEL_MGTQ BIT(4) +#define HQSEL_HIQ BIT(5) + +#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14) +#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12) +#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10) +#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 ) +#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 ) +#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 ) + +#define QUEUE_LOW 1 +#define QUEUE_NORMAL 2 +#define QUEUE_HIGH 3 + +#define _LLT_NO_ACTIVE 0x0 +#define _LLT_WRITE_ACCESS 0x1 +#define _LLT_READ_ACCESS 0x2 + +#define _LLT_INIT_DATA(x) ((x) & 0xFF) +#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8) +#define _LLT_OP(x) (((x) & 0x3) << 30) +#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3) + +#define BB_WRITE_READ_MASK (BIT(31) | BIT(30)) +#define BB_WRITE_EN BIT(30) +#define BB_READ_EN BIT(31) + +#define _HPQ(x) ((x) & 0xFF) +#define _LPQ(x) (((x) & 0xFF) << 8) +#define _PUBQ(x) (((x) & 0xFF) << 16) +#define _NPQ(x) ((x) & 0xFF) + +#define HPQ_PUBLIC_DIS BIT(24) +#define LPQ_PUBLIC_DIS BIT(25) +#define LD_RQPN BIT(31) + +#define BCN_VALID BIT(16) +#define BCN_HEAD(x) (((x) & 0xFF) << 8) +#define BCN_HEAD_MASK 0xFF00 + +#define BLK_DESC_NUM_SHIFT 4 +#define BLK_DESC_NUM_MASK 0xF + +#define DROP_DATA_EN BIT(9) + +#define EN_AMPDU_RTY_NEW BIT(7) + +#define _INIRTSMCS_SEL(x) ((x) & 0x3F) + +#define _SPEC_SIFS_CCK(x) ((x) & 0xFF) +#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8) + +#define RATE_REG_BITMAP_ALL 0xFFFFF + +#define _RRSC_BITMAP(x) ((x) & 0xFFFFF) + +#define _RRSR_RSC(x) (((x) & 0x3) << 21) +#define RRSR_RSC_RESERVED 0x0 +#define RRSR_RSC_UPPER_SUBCHANNEL 0x1 +#define RRSR_RSC_LOWER_SUBCHANNEL 0x2 +#define RRSR_RSC_DUPLICATE_MODE 0x3 + +#define USE_SHORT_G1 BIT(20) + +#define _AGGLMT_MCS0(x) ((x) & 0xF) +#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4) +#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8) +#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12) +#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16) +#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20) +#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24) +#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28) + +#define RETRY_LIMIT_SHORT_SHIFT 8 +#define RETRY_LIMIT_LONG_SHIFT 0 + +#define _DARF_RC1(x) ((x) & 0x1F) +#define _DARF_RC2(x) (((x) & 0x1F) << 8) +#define _DARF_RC3(x) (((x) & 0x1F) << 16) +#define _DARF_RC4(x) (((x) & 0x1F) << 24) +#define _DARF_RC5(x) ((x) & 0x1F) +#define _DARF_RC6(x) (((x) & 0x1F) << 8) +#define _DARF_RC7(x) (((x) & 0x1F) << 16) +#define _DARF_RC8(x) (((x) & 0x1F) << 24) + +#define _RARF_RC1(x) ((x) & 0x1F) +#define _RARF_RC2(x) (((x) & 0x1F) << 8) +#define _RARF_RC3(x) (((x) & 0x1F) << 16) +#define _RARF_RC4(x) (((x) & 0x1F) << 24) +#define _RARF_RC5(x) ((x) & 0x1F) +#define _RARF_RC6(x) (((x) & 0x1F) << 8) +#define _RARF_RC7(x) (((x) & 0x1F) << 16) +#define _RARF_RC8(x) (((x) & 0x1F) << 24) + +#define AC_PARAM_TXOP_LIMIT_OFFSET 16 +#define AC_PARAM_ECW_MAX_OFFSET 12 +#define AC_PARAM_ECW_MIN_OFFSET 8 +#define AC_PARAM_AIFS_OFFSET 0 + +#define _AIFS(x) (x) +#define _ECW_MAX_MIN(x) ((x) << 8) +#define _TXOP_LIMIT(x) ((x) << 16) + +#define _BCNIFS(x) ((x) & 0xFF) +#define _BCNECW(x) ((((x) & 0xF))<< 8) + +#define _LRL(x) ((x) & 0x3F) +#define _SRL(x) (((x) & 0x3F) << 8) + +#define _SIFS_CCK_CTX(x) ((x) & 0xFF) +#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8); + +#define _SIFS_OFDM_CTX(x) ((x) & 0xFF) +#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8); + +#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8) + +#define DIS_EDCA_CNT_DWN BIT(11) + +#define EN_MBSSID BIT(1) +#define EN_TXBCN_RPT BIT(2) +#define EN_BCN_FUNCTION BIT(3) + +#define TSFTR_RST BIT(0) +#define TSFTR1_RST BIT(1) + +#define STOP_BCNQ BIT(6) + +#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) +#define DIS_TSF_UDT0_TEST_CHIP BIT(5) + +#define AcmHw_HwEn BIT(0) +#define AcmHw_BeqEn BIT(1) +#define AcmHw_ViqEn BIT(2) +#define AcmHw_VoqEn BIT(3) +#define AcmHw_BeqStatus BIT(4) +#define AcmHw_ViqStatus BIT(5) +#define AcmHw_VoqStatus BIT(6) + +#define APSDOFF BIT(6) +#define APSDOFF_STATUS BIT(7) + +#define BW_20MHZ BIT(2) + +#define RATE_BITMAP_ALL 0xFFFFF + +#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1 + +#define TSFRST BIT(0) +#define DIS_GCLK BIT(1) +#define PAD_SEL BIT(2) +#define PWR_ST BIT(6) +#define PWRBIT_OW_EN BIT(7) +#define ACRC BIT(8) +#define CFENDFORM BIT(9) +#define ICV BIT(10) + +#define AAP BIT(0) +#define APM BIT(1) +#define AM BIT(2) +#define AB BIT(3) +#define ADD3 BIT(4) +#define APWRMGT BIT(5) +#define CBSSID BIT(6) +#define CBSSID_DATA BIT(6) +#define CBSSID_BCN BIT(7) +#define ACRC32 BIT(8) +#define AICV BIT(9) +#define ADF BIT(11) +#define ACF BIT(12) +#define AMF BIT(13) +#define HTC_LOC_CTRL BIT(14) +#define UC_DATA_EN BIT(16) +#define BM_DATA_EN BIT(17) +#define MFBEN BIT(22) +#define LSIGEN BIT(23) +#define EnMBID BIT(24) +#define APP_BASSN BIT(27) +#define APP_PHYSTS BIT(28) +#define APP_ICV BIT(29) +#define APP_MIC BIT(30) +#define APP_FCS BIT(31) + +#define _MIN_SPACE(x) ((x) & 0x7) +#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3) + +#define RXERR_TYPE_OFDM_PPDU 0 +#define RXERR_TYPE_OFDM_FALSE_ALARM 1 +#define RXERR_TYPE_OFDM_MPDU_OK 2 +#define RXERR_TYPE_OFDM_MPDU_FAIL 3 +#define RXERR_TYPE_CCK_PPDU 4 +#define RXERR_TYPE_CCK_FALSE_ALARM 5 +#define RXERR_TYPE_CCK_MPDU_OK 6 +#define RXERR_TYPE_CCK_MPDU_FAIL 7 +#define RXERR_TYPE_HT_PPDU 8 +#define RXERR_TYPE_HT_FALSE_ALARM 9 +#define RXERR_TYPE_HT_MPDU_TOTAL 10 +#define RXERR_TYPE_HT_MPDU_OK 11 +#define RXERR_TYPE_HT_MPDU_FAIL 12 +#define RXERR_TYPE_RX_FULL_DROP 15 + +#define RXERR_COUNTER_MASK 0xFFFFF +#define RXERR_RPT_RST BIT(27) +#define _RXERR_RPT_SEL(type) ((type) << 28) + +#define SCR_TxUseDK BIT(0) +#define SCR_RxUseDK BIT(1) +#define SCR_TxEncEnable BIT(2) +#define SCR_RxDecEnable BIT(3) +#define SCR_SKByA2 BIT(4) +#define SCR_NoSKMC BIT(5) +#define SCR_TXBCUSEDK BIT(6) +#define SCR_RXBCUSEDK BIT(7) + +#define XCLK_VLD BIT(0) +#define ACLK_VLD BIT(1) +#define UCLK_VLD BIT(2) +#define PCLK_VLD BIT(3) +#define PCIRSTB BIT(4) +#define V15_VLD BIT(5) +#define TRP_B15V_EN BIT(7) +#define SIC_IDLE BIT(8) +#define BD_MAC2 BIT(9) +#define BD_MAC1 BIT(10) +#define IC_MACPHY_MODE BIT(11) +#define BT_FUNC BIT(16) +#define VENDOR_ID BIT(19) +#define PAD_HWPD_IDN BIT(22) +#define TRP_VAUX_EN BIT(23) +#define TRP_BT_EN BIT(24) +#define BD_PKG_SEL BIT(25) +#define BD_HCI_SEL BIT(26) +#define TYPE_ID BIT(27) + +#define USB_IS_HIGH_SPEED 0 +#define USB_IS_FULL_SPEED 1 +#define USB_SPEED_MASK BIT(5) + +#define USB_NORMAL_SIE_EP_MASK 0xF +#define USB_NORMAL_SIE_EP_SHIFT 4 + +#define USB_TEST_EP_MASK 0x30 +#define USB_TEST_EP_SHIFT 4 + +#define USB_AGG_EN BIT(3) + +#define MAC_ADDR_LEN 6 +#define LAST_ENTRY_OF_TX_PKT_BUFFER 175/*255 88e*/ + +#define POLLING_LLT_THRESHOLD 20 +#define POLLING_READY_TIMEOUT_COUNT 3000 + +#define MAX_MSS_DENSITY_2T 0x13 +#define MAX_MSS_DENSITY_1T 0x0A + +#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6)) +#define EPROM_CMD_CONFIG 0x3 +#define EPROM_CMD_LOAD 1 + +#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE + +#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) + +#define RA_LSSIWRITE_8821A 0xc90 +#define RB_LSSIWRITE_8821A 0xe90 + +#define RA_PIREAD_8821A 0xd04 +#define RB_PIREAD_8821A 0xd44 +#define RA_SIREAD_8821A 0xd08 +#define RB_SIREAD_8821A 0xd48 + +#define RPMAC_RESET 0x100 +#define RPMAC_TXSTART 0x104 +#define RPMAC_TXLEGACYSIG 0x108 +#define RPMAC_TXHTSIG1 0x10c +#define RPMAC_TXHTSIG2 0x110 +#define RPMAC_PHYDEBUG 0x114 +#define RPMAC_TXPACKETNUM 0x118 +#define RPMAC_TXIDLE 0x11c +#define RPMAC_TXMACHEADER0 0x120 +#define RPMAC_TXMACHEADER1 0x124 +#define RPMAC_TXMACHEADER2 0x128 +#define RPMAC_TXMACHEADER3 0x12c +#define RPMAC_TXMACHEADER4 0x130 +#define RPMAC_TXMACHEADER5 0x134 +#define RPMAC_TXDADATYPE 0x138 +#define RPMAC_TXRANDOMSEED 0x13c +#define RPMAC_CCKPLCPPREAMBLE 0x140 +#define RPMAC_CCKPLCPHEADER 0x144 +#define RPMAC_CCKCRC16 0x148 +#define RPMAC_OFDMRXCRC32OK 0x170 +#define RPMAC_OFDMRXCRC32Er 0x174 +#define RPMAC_OFDMRXPARITYER 0x178 +#define RPMAC_OFDMRXCRC8ER 0x17c +#define RPMAC_CCKCRXRC16ER 0x180 +#define RPMAC_CCKCRXRC32ER 0x184 +#define RPMAC_CCKCRXRC32OK 0x188 +#define RPMAC_TXSTATUS 0x18c + +#define RFPGA0_RFMOD 0x800 + +#define RFPGA0_TXINFO 0x804 +#define RFPGA0_PSDFUNCTION 0x808 + +#define RFPGA0_TXGAINSTAGE 0x80c + +#define RFPGA0_RFTIMING1 0x810 +#define RFPGA0_RFTIMING2 0x814 + +#define RFPGA0_XA_HSSIPARAMETER1 0x820 +#define RFPGA0_XA_HSSIPARAMETER2 0x824 +#define RFPGA0_XB_HSSIPARAMETER1 0x828 +#define RFPGA0_XB_HSSIPARAMETER2 0x82c +#define RCCAONSEC 0x838 + +#define RFPGA0_XA_LSSIPARAMETER 0x840 +#define RFPGA0_XB_LSSIPARAMETER 0x844 +#define RL1PEAKTH 0x848 + +#define RFPGA0_RFWAKEUPPARAMETER 0x850 +#define RFPGA0_RFSLEEPUPPARAMETER 0x854 + +#define RFPGA0_XAB_SWITCHCONTROL 0x858 +#define RFPGA0_XCD_SWITCHCONTROL 0x85c + +#define RFPGA0_XA_RFINTERFACEOE 0x860 +#define RFC_AREA 0x860 +#define RFPGA0_XB_RFINTERFACEOE 0x864 + +#define RFPGA0_XAB_RFINTERFACESW 0x870 +#define RFPGA0_XCD_RFINTERFACESW 0x874 + +#define rFPGA0_XAB_RFPARAMETER 0x878 +#define rFPGA0_XCD_RFPARAMETER 0x87c + +#define RFPGA0_ANALOGPARAMETER1 0x880 +#define RFPGA0_ANALOGPARAMETER2 0x884 +#define RFPGA0_ANALOGPARAMETER3 0x888 +#define RFPGA0_ANALOGPARAMETER4 0x88c + +#define RFPGA0_XA_LSSIREADBACK 0x8a0 +#define RFPGA0_XB_LSSIREADBACK 0x8a4 +#define RFPGA0_XC_LSSIREADBACK 0x8a8 +//#define RFPGA0_XD_LSSIREADBACK 0x8ac +#define RRFMOD 0x8ac +#define RHSSIREAD_8821AE 0x8b0 + +#define RFPGA0_PSDREPORT 0x8b4 +#define TRANSCEIVEA_HSPI_READBACK 0x8b8 +#define TRANSCEIVEB_HSPI_READBACK 0x8bc +//#define REG_SC_CNT 0x8c4 +#define RADC_BUF_CLK 0x8c4 +#define RFPGA0_XAB_RFINTERFACERB 0x8e0 +#define RFPGA0_XCD_RFINTERFACERB 0x8e4 + +#define RFPGA1_RFMOD 0x900 + +#define RFPGA1_TXBLOCK 0x904 +#define RFPGA1_DEBUGSELECT 0x908 +#define RFPGA1_TXINFO 0x90c + +#define RCCK_SYSTEM 0xa00 +#define BCCK_SYSTEM 0x10 + + +#define RCCK0_AFESETTING 0xa04 +#define RCCK0_CCA 0xa08 + +#define RCCK0_RXAGC1 0xa0c +#define RCCK0_RXAGC2 0xa10 + +#define RCCK0_RXHP 0xa14 + +#define RCCK0_DSPPARAMETER1 0xa18 +#define RCCK0_DSPPARAMETER2 0xa1c + +#define RCCK0_TXFILTER1 0xa20 +#define RCCK0_TXFILTER2 0xa24 +#define RCCK0_DEBUGPORT 0xa28 +#define RCCK0_FALSEALARMREPORT 0xa2c +#define RCCK0_TRSSIREPORT 0xa50 +#define RCCK0_RXREPORT 0xa54 +#define RCCK0_FACOUNTERLOWER 0xa5c +#define RCCK0_FACOUNTERUPPER 0xa58 +#define RCCK0_CCA_CNT 0xa60 + + +/* PageB(0xB00) */ +#define rPdp_AntA 0xb00 +#define rPdp_AntA_4 0xb04 +#define rPdp_AntA_8 0xb08 +#define rPdp_AntA_C 0xb0c +#define rPdp_AntA_10 0xb10 +#define rPdp_AntA_14 0xb14 +#define rPdp_AntA_18 0xb18 +#define rPdp_AntA_1C 0xb1c +#define rPdp_AntA_20 0xb20 +#define rPdp_AntA_24 0xb24 + +#define rConfig_Pmpd_AntA 0xb28 +#define rConfig_ram64x16 0xb2c + +#define rBndA 0xb30 +#define rHssiPar 0xb34 + +#define rConfig_AntA 0xb68 +#define rConfig_AntB 0xb6c + +#define rPdp_AntB 0xb70 +#define rPdp_AntB_4 0xb74 +#define rPdp_AntB_8 0xb78 +#define rPdp_AntB_C 0xb7c +#define rPdp_AntB_10 0xb80 +#define rPdp_AntB_14 0xb84 +#define rPdp_AntB_18 0xb88 +#define rPdp_AntB_1C 0xb8c +#define rPdp_AntB_20 0xb90 +#define rPdp_AntB_24 0xb94 + +#define rConfig_Pmpd_AntB 0xb98 + +#define rBndB 0xba0 + +#define rAPK 0xbd8 +#define rPm_Rx0_AntA 0xbdc +#define rPm_Rx1_AntA 0xbe0 +#define rPm_Rx2_AntA 0xbe4 +#define rPm_Rx3_AntA 0xbe8 +#define rPm_Rx0_AntB 0xbec +#define rPm_Rx1_AntB 0xbf0 +#define rPm_Rx2_AntB 0xbf4 +#define rPm_Rx3_AntB 0xbf8 + +/*RSSI Dump*/ +#define RA_RSSI_DUMP 0xBF0 +#define RB_RSSI_DUMP 0xBF1 +#define RS1_RX_EVM_DUMP 0xBF4 +#define RS2_RX_EVM_DUMP 0xBF5 +#define RA_RX_SNR_DUMP 0xBF6 +#define RB_RX_SNR_DUMP 0xBF7 +#define RA_CFO_SHORT_DUMP 0xBF8 +#define RB_CFO_SHORT_DUMP 0xBFA +#define RA_CFO_LONG_DUMP 0xBEC +#define RB_CFO_LONG_DUMP 0xBEE + +/*Page C*/ +#define ROFDM0_LSTF 0xc00 + +#define ROFDM0_TRXPATHENABLE 0xc04 +#define ROFDM0_TRMUXPAR 0xc08 +#define ROFDM0_TRSWISOLATION 0xc0c + +#define ROFDM0_XARXAFE 0xc10 +#define ROFDM0_XARXIQIMBALANCE 0xc14 +#define ROFDM0_XBRXAFE 0xc18 +#define ROFDM0_XBRXIQIMBALANCE 0xc1c +#define ROFDM0_XCRXAFE 0xc20 +#define ROFDM0_XCRXIQIMBANLANCE 0xc24 +#define ROFDM0_XDRXAFE 0xc28 +#define ROFDM0_XDRXIQIMBALANCE 0xc2c + +#define ROFDM0_RXDETECTOR1 0xc30 +#define ROFDM0_RXDETECTOR2 0xc34 +#define ROFDM0_RXDETECTOR3 0xc38 +#define ROFDM0_RXDETECTOR4 0xc3c + +#define ROFDM0_RXDSP 0xc40 +#define ROFDM0_CFOANDDAGC 0xc44 +#define ROFDM0_CCADROPTHRESHOLD 0xc48 +#define ROFDM0_ECCATHRESHOLD 0xc4c + +#define ROFDM0_XAAGCCORE1 0xc50 +#define ROFDM0_XAAGCCORE2 0xc54 +#define ROFDM0_XBAGCCORE1 0xc58 +#define ROFDM0_XBAGCCORE2 0xc5c +#define ROFDM0_XCAGCCORE1 0xc60 +#define ROFDM0_XCAGCCORE2 0xc64 +#define ROFDM0_XDAGCCORE1 0xc68 +#define ROFDM0_XDAGCCORE2 0xc6c + +#define ROFDM0_AGCPARAMETER1 0xc70 +#define ROFDM0_AGCPARAMETER2 0xc74 +#define ROFDM0_AGCRSSITABLE 0xc78 +#define ROFDM0_HTSTFAGC 0xc7c + +#define ROFDM0_XATXIQIMBALANCE 0xc80 +#define ROFDM0_XATXAFE 0xc84 +#define ROFDM0_XBTXIQIMBALANCE 0xc88 +#define ROFDM0_XBTXAFE 0xc8c +#define ROFDM0_XCTXIQIMBALANCE 0xc90 +#define ROFDM0_XCTXAFE 0xc94 +#define ROFDM0_XDTXIQIMBALANCE 0xc98 +#define ROFDM0_XDTXAFE 0xc9c + +#define ROFDM0_RXIQEXTANTA 0xca0 +#define ROFDM0_TXCOEFF1 0xca4 +#define ROFDM0_TXCOEFF2 0xca8 +#define ROFDM0_TXCOEFF3 0xcac +#define ROFDM0_TXCOEFF4 0xcb0 +#define ROFDM0_TXCOEFF5 0xcb4 +#define ROFDM0_TXCOEFF6 0xcb8 + +/*Path_A RFE cotrol */ +#define RA_RFE_CTRL_8812 0xcb8 +/*Path_B RFE control*/ +#define RB_RFE_CTRL_8812 0xeb8 + +#define ROFDM0_RXHPPARAMETER 0xce0 +#define ROFDM0_TXPSEUDONOISEWGT 0xce4 +#define ROFDM0_FRAMESYNC 0xcf0 +#define ROFDM0_DFSREPORT 0xcf4 + + +#define ROFDM1_LSTF 0xd00 +#define ROFDM1_TRXPATHENABLE 0xd04 + +#define ROFDM1_CF0 0xd08 +#define ROFDM1_CSI1 0xd10 +#define ROFDM1_SBD 0xd14 +#define ROFDM1_CSI2 0xd18 +#define ROFDM1_CFOTRACKING 0xd2c +#define ROFDM1_TRXMESAURE1 0xd34 +#define ROFDM1_INTFDET 0xd3c +#define ROFDM1_PSEUDONOISESTATEAB 0xd50 +#define ROFDM1_PSEUDONOISESTATECD 0xd54 +#define ROFDM1_RXPSEUDONOISEWGT 0xd58 + +#define ROFDM_PHYCOUNTER1 0xda0 +#define ROFDM_PHYCOUNTER2 0xda4 +#define ROFDM_PHYCOUNTER3 0xda8 + +#define ROFDM_SHORTCFOAB 0xdac +#define ROFDM_SHORTCFOCD 0xdb0 +#define ROFDM_LONGCFOAB 0xdb4 +#define ROFDM_LONGCFOCD 0xdb8 +#define ROFDM_TAILCF0AB 0xdbc +#define ROFDM_TAILCF0CD 0xdc0 +#define ROFDM_PWMEASURE1 0xdc4 +#define ROFDM_PWMEASURE2 0xdc8 +#define ROFDM_BWREPORT 0xdcc +#define ROFDM_AGCREPORT 0xdd0 +#define ROFDM_RXSNR 0xdd4 +#define ROFDM_RXEVMCSI 0xdd8 +#define ROFDM_SIGREPORT 0xddc + +#define RTXAGC_A_CCK11_CCK1 0xc20 +#define RTXAGC_A_OFDM18_OFDM6 0xc24 +#define RTXAGC_A_OFDM54_OFDM24 0xc28 +#define RTXAGC_A_MCS03_MCS00 0xc2c +#define RTXAGC_A_MCS07_MCS04 0xc30 +#define RTXAGC_A_MCS11_MCS08 0xc34 +#define RTXAGC_A_MCS15_MCS12 0xc38 +#define RTXAGC_A_NSS1INDEX3_NSS1INDEX0 0xc3c +#define RTXAGC_A_NSS1INDEX7_NSS1INDEX4 0xc40 +#define RTXAGC_A_NSS2INDEX1_NSS1INDEX8 0xc44 +#define RTXAGC_A_NSS2INDEX5_NSS2INDEX2 0xc48 +#define RTXAGC_A_NSS2INDEX9_NSS2INDEX6 0xc4c +#define RTXAGC_B_CCK11_CCK1 0xe20 +#define RTXAGC_B_OFDM18_OFDM6 0xe24 +#define RTXAGC_B_OFDM54_OFDM24 0xe28 +#define RTXAGC_B_MCS03_MCS00 0xe2c +#define RTXAGC_B_MCS07_MCS04 0xe30 +#define RTXAGC_B_MCS11_MCS08 0xe34 +#define RTXAGC_B_MCS15_MCS12 0xe38 +#define RTXAGC_B_NSS1INDEX3_NSS1INDEX0 0xe3c +#define RTXAGC_B_NSS1INDEX7_NSS1INDEX4 0xe40 +#define RTXAGC_B_NSS2INDEX1_NSS1INDEX8 0xe44 +#define RTXAGC_B_NSS2INDEX5_NSS2INDEX2 0xe48 +#define RTXAGC_B_NSS2INDEX9_NSS2INDEX6 0xe4c + +#define RA_TXPWRTRAING 0xc54 +#define RB_TXPWRTRAING 0xe54 + + +#define RFPGA0_IQK 0xe28 +#define RTx_IQK_Tone_A 0xe30 +#define RRx_IQK_Tone_A 0xe34 +#define RTx_IQK_PI_A 0xe38 +#define RRx_IQK_PI_A 0xe3c + +#define RTx_IQK 0xe40 +#define RRx_IQK 0xe44 +#define RIQK_AGC_Pts 0xe48 +#define RIQK_AGC_Rsp 0xe4c +#define RTx_IQK_Tone_B 0xe50 +#define RRx_IQK_Tone_B 0xe54 +#define RTx_IQK_PI_B 0xe58 +#define RRx_IQK_PI_B 0xe5c +#define RIQK_AGC_Cont 0xe60 + +#define RBlue_Tooth 0xe6c +#define RRx_Wait_CCA 0xe70 +#define RTx_CCK_RFON 0xe74 +#define RTx_CCK_BBON 0xe78 +#define RTx_OFDM_RFON 0xe7c +#define RTx_OFDM_BBON 0xe80 +#define RTx_To_Rx 0xe84 +#define RTx_To_Tx 0xe88 +#define RRx_CCK 0xe8c + +#define RTx_Power_Before_IQK_A 0xe94 +#define RTx_Power_After_IQK_A 0xe9c + +#define RRx_Power_Before_IQK_A 0xea0 +#define RRx_Power_Before_IQK_A_2 0xea4 +#define RRx_Power_After_IQK_A 0xea8 +#define RRx_Power_After_IQK_A_2 0xeac + +#define RTx_Power_Before_IQK_B 0xeb4 +#define RTx_Power_After_IQK_B 0xebc + +#define RRx_Power_Before_IQK_B 0xec0 +#define RRx_Power_Before_IQK_B_2 0xec4 +#define RRx_Power_After_IQK_B 0xec8 +#define RRx_Power_After_IQK_B_2 0xecc + +#define RRx_OFDM 0xed0 +#define RRx_Wait_RIFS 0xed4 +#define RRx_TO_Rx 0xed8 +#define RStandby 0xedc +#define RSleep 0xee0 +#define RPMPD_ANAEN 0xeec + +#define RZEBRA1_HSSIENABLE 0x0 +#define RZEBRA1_TRXENABLE1 0x1 +#define RZEBRA1_TRXENABLE2 0x2 +#define RZEBRA1_AGC 0x4 +#define RZEBRA1_CHARGEPUMP 0x5 +#define RZEBRA1_CHANNEL 0x7 + +#define RZEBRA1_TXGAIN 0x8 +#define RZEBRA1_TXLPF 0x9 +#define RZEBRA1_RXLPF 0xb +#define RZEBRA1_RXHPFCORNER 0xc + +#define RGLOBALCTRL 0 +#define RRTL8256_TXLPF 19 +#define RRTL8256_RXLPF 11 +#define RRTL8258_TXLPF 0x11 +#define RRTL8258_RXLPF 0x13 +#define RRTL8258_RSSILPF 0xa + +#define RF_AC 0x00 + +#define RF_IQADJ_G1 0x01 +#define RF_IQADJ_G2 0x02 +#define RF_POW_TRSW 0x05 + +#define RF_GAIN_RX 0x06 +#define RF_GAIN_TX 0x07 + +#define RF_TXM_IDAC 0x08 +#define RF_BS_IQGEN 0x0F + +#define RF_MODE1 0x10 +#define RF_MODE2 0x11 + +#define RF_RX_AGC_HP 0x12 +#define RF_TX_AGC 0x13 +#define RF_BIAS 0x14 +#define RF_IPA 0x15 +#define RF_POW_ABILITY 0x17 +#define RF_MODE_AG 0x18 +#define RRFCHANNEL 0x18 +#define RF_CHNLBW 0x18 +#define RF_TOP 0x19 + +#define RF_RX_G1 0x1A +#define RF_RX_G2 0x1B + +#define RF_RX_BB2 0x1C +#define RF_RX_BB1 0x1D + +#define RF_RCK1 0x1E +#define RF_RCK2 0x1F + +#define RF_TX_G1 0x20 +#define RF_TX_G2 0x21 +#define RF_TX_G3 0x22 + +#define RF_TX_BB1 0x23 +#define RF_T_METER 0x24 +#define RF_T_METER_88E 0x42 +#define RF_T_METER_8812A 0x42 + +#define RF_SYN_G1 0x25 +#define RF_SYN_G2 0x26 +#define RF_SYN_G3 0x27 +#define RF_SYN_G4 0x28 +#define RF_SYN_G5 0x29 +#define RF_SYN_G6 0x2A +#define RF_SYN_G7 0x2B +#define RF_SYN_G8 0x2C + +#define RF_RCK_OS 0x30 +#define RF_TXPA_G1 0x31 +#define RF_TXPA_G2 0x32 +#define RF_TXPA_G3 0x33 + +#define RF_TX_BIAS_A 0x35 +#define RF_TX_BIAS_D 0x36 +#define RF_LOBF_9 0x38 +#define RF_RXRF_A3 0x3C +#define RF_TRSW 0x3F + +#define RF_TXRF_A2 0x41 +#define RF_TXPA_G4 0x46 +#define RF_TXPA_A4 0x4B + +#define RF_APK 0x63 + +#define RF_WE_LUT 0xEF + +#define BBBRESETB 0x100 +#define BGLOBALRESETB 0x200 +#define BOFDMTXSTART 0x4 +#define BCCKTXSTART 0x8 +#define BCRC32DEBUG 0x100 +#define BPMACLOOPBACK 0x10 +#define BTXLSIG 0xffffff +#define BOFDMTXRATE 0xf +#define BOFDMTXRESERVED 0x10 +#define BOFDMTXLENGTH 0x1ffe0 +#define BOFDMTXPARITY 0x20000 +#define BTXHTSIG1 0xffffff +#define BTXHTMCSRATE 0x7f +#define BTXHTBW 0x80 +#define BTXHTLENGTH 0xffff00 +#define BTXHTSIG2 0xffffff +#define BTXHTSMOOTHING 0x1 +#define BTXHTSOUNDING 0x2 +#define BTXHTRESERVED 0x4 +#define BTXHTAGGREATION 0x8 +#define BTXHTSTBC 0x30 +#define BTXHTADVANCECODING 0x40 +#define BTXHTSHORTGI 0x80 +#define BTXHTNUMBERHT_LTF 0x300 +#define BTXHTCRC8 0x3fc00 +#define BCOUNTERRESET 0x10000 +#define BNUMOFOFDMTX 0xffff +#define BNUMOFCCKTX 0xffff0000 +#define BTXIDLEINTERVAL 0xffff +#define BOFDMSERVICE 0xffff0000 +#define BTXMACHEADER 0xffffffff +#define BTXDATAINIT 0xff +#define BTXHTMODE 0x100 +#define BTXDATATYPE 0x30000 +#define BTXRANDOMSEED 0xffffffff +#define BCCKTXPREAMBLE 0x1 +#define BCCKTXSFD 0xffff0000 +#define BCCKTXSIG 0xff +#define BCCKTXSERVICE 0xff00 +#define BCCKLENGTHEXT 0x8000 +#define BCCKTXLENGHT 0xffff0000 +#define BCCKTXCRC16 0xffff +#define BCCKTXSTATUS 0x1 +#define BOFDMTXSTATUS 0x2 +#define IS_BB_REG_OFFSET_92S(_Offset) \ + ((_Offset >= 0x800) && (_Offset <= 0xfff)) + +#define BRFMOD 0x1 +#define BJAPANMODE 0x2 +#define BCCKTXSC 0x30 +/* Block & Path enable*/ +#define ROFDMCCKEN 0x808 +#define BCCKEN 0x10000000 +#define BOFDMEN 0x20000000 +#define RRXPATH 0x808 /* Rx antenna*/ +#define BRXPATH 0xff +#define RTXPATH 0x80c /* Tx antenna*/ +#define BTXPATH 0x0fffffff +#define RCCK_RX 0xa04 /* for cck rx path selection*/ +#define BCCK_RX 0x0c000000 +#define RVHTLEN_USE_LSIG 0x8c3 /* Use LSIG for VHT length*/ + + +#define BOFDMRXADCPHASE 0x10000 +#define BOFDMTXDACPHASE 0x40000 +#define BXATXAGC 0x3f + +#define BXBTXAGC 0xf00 +#define BXCTXAGC 0xf000 +#define BXDTXAGC 0xf0000 + +#define BPASTART 0xf0000000 +#define BTRSTART 0x00f00000 +#define BRFSTART 0x0000f000 +#define BBBSTART 0x000000f0 +#define BBBCCKSTART 0x0000000f +#define BPAEND 0xf +#define BTREND 0x0f000000 +#define BRFEND 0x000f0000 +#define BCCAMASK 0x000000f0 +#define BR2RCCAMASK 0x00000f00 +#define BHSSI_R2TDELAY 0xf8000000 +#define BHSSI_T2RDELAY 0xf80000 +#define BCONTXHSSI 0x400 +#define BIGFROMCCK 0x200 +#define BAGCADDRESS 0x3f +#define BRXHPTX 0x7000 +#define BRXHP2RX 0x38000 +#define BRXHPCCKINI 0xc0000 +#define BAGCTXCODE 0xc00000 +#define BAGCRXCODE 0x300000 + +#define B3WIREDATALENGTH 0x800 +#define B3WIREADDREAALENGTH 0x400 + +#define B3WIRERFPOWERDOWN 0x1 +#define B5GPAPEPOLARITY 0x40000000 +#define B2GPAPEPOLARITY 0x80000000 +#define BRFSW_TXDEFAULTANT 0x3 +#define BRFSW_TXOPTIONANT 0x30 +#define BRFSW_RXDEFAULTANT 0x300 +#define BRFSW_RXOPTIONANT 0x3000 +#define BRFSI_3WIREDATA 0x1 +#define BRFSI_3WIRECLOCK 0x2 +#define BRFSI_3WIRELOAD 0x4 +#define BRFSI_3WIRERW 0x8 +#define BRFSI_3WIRE 0xf + +#define BRFSI_RFENV 0x10 + +#define BRFSI_TRSW 0x20 +#define BRFSI_TRSWB 0x40 +#define BRFSI_ANTSW 0x100 +#define BRFSI_ANTSWB 0x200 +#define BRFSI_PAPE 0x400 +#define BRFSI_PAPE5G 0x800 +#define BBANDSELECT 0x1 +#define BHTSIG2_GI 0x80 +#define BHTSIG2_SMOOTHING 0x01 +#define BHTSIG2_SOUNDING 0x02 +#define BHTSIG2_AGGREATON 0x08 +#define BHTSIG2_STBC 0x30 +#define BHTSIG2_ADVCODING 0x40 +#define BHTSIG2_NUMOFHTLTF 0x300 +#define BHTSIG2_CRC8 0x3fc +#define BHTSIG1_MCS 0x7f +#define BHTSIG1_BANDWIDTH 0x80 +#define BHTSIG1_HTLENGTH 0xffff +#define BLSIG_RATE 0xf +#define BLSIG_RESERVED 0x10 +#define BLSIG_LENGTH 0x1fffe +#define BLSIG_PARITY 0x20 +#define BCCKRXPHASE 0x4 + +#define BLSSIREADADDRESS 0x7f800000 +#define BLSSIREADEDGE 0x80000000 + +#define BLSSIREADBACKDATA 0xfffff + +#define BLSSIREADOKFLAG 0x1000 +#define BCCKSAMPLERATE 0x8 +#define BREGULATOR0STANDBY 0x1 +#define BREGULATORPLLSTANDBY 0x2 +#define BREGULATOR1STANDBY 0x4 +#define BPLLPOWERUP 0x8 +#define BDPLLPOWERUP 0x10 +#define BDA10POWERUP 0x20 +#define BAD7POWERUP 0x200 +#define BDA6POWERUP 0x2000 +#define BXTALPOWERUP 0x4000 +#define B40MDCLKPOWERUP 0x8000 +#define BDA6DEBUGMODE 0x20000 +#define BDA6SWING 0x380000 + +#define BADCLKPHASE 0x4000000 +#define B80MCLKDELAY 0x18000000 +#define BAFEWATCHDOGENABLE 0x20000000 + +#define BXTALCAP01 0xc0000000 +#define BXTALCAP23 0x3 +#define BXTALCAP92X 0x0f000000 +#define BXTALCAP 0x0f000000 + +#define BINTDIFCLKENABLE 0x400 +#define BEXTSIGCLKENABLE 0x800 +#define BBANDGAP_MBIAS_POWERUP 0x10000 +#define BAD11SH_GAIN 0xc0000 +#define BAD11NPUT_RANGE 0x700000 +#define BAD110P_CURRENT 0x3800000 +#define BLPATH_LOOPBACK 0x4000000 +#define BQPATH_LOOPBACK 0x8000000 +#define BAFE_LOOPBACK 0x10000000 +#define BDA10_SWING 0x7e0 +#define BDA10_REVERSE 0x800 +#define BDA_CLK_SOURCE 0x1000 +#define BDA7INPUT_RANGE 0x6000 +#define BDA7_GAIN 0x38000 +#define BDA7OUTPUT_CM_MODE 0x40000 +#define BDA7INPUT_CM_MODE 0x380000 +#define BDA7CURRENT 0xc00000 +#define BREGULATOR_ADJUST 0x7000000 +#define BAD11POWERUP_ATTX 0x1 +#define BDA10PS_ATTX 0x10 +#define BAD11POWERUP_ATRX 0x100 +#define BDA10PS_ATRX 0x1000 +#define BCCKRX_AGC_FORMAT 0x200 +#define BPSDFFT_SAMPLE_POINT 0xc000 +#define BPSD_AVERAGE_NUM 0x3000 +#define BIQPATH_CONTROL 0xc00 +#define BPSD_FREQ 0x3ff +#define BPSD_ANTENNA_PATH 0x30 +#define BPSD_IQ_SWITCH 0x40 +#define BPSD_RX_TRIGGER 0x400000 +#define BPSD_TX_TRIGGER 0x80000000 +#define BPSD_SINE_TONE_SCALE 0x7f000000 +#define BPSD_REPORT 0xffff + +#define BOFDM_TXSC 0x30000000 +#define BCCK_TXON 0x1 +#define BOFDM_TXON 0x2 +#define BDEBUG_PAGE 0xfff +#define BDEBUG_ITEM 0xff +#define BANTL 0x10 +#define BANT_NONHT 0x100 +#define BANT_HT1 0x1000 +#define BANT_HT2 0x10000 +#define BANT_HT1S1 0x100000 +#define BANT_NONHTS1 0x1000000 + +#define BCCK_BBMODE 0x3 +#define BCCK_TXPOWERSAVING 0x80 +#define BCCK_RXPOWERSAVING 0x40 + +#define BCCK_SIDEBAND 0x10 + +#define BCCK_SCRAMBLE 0x8 +#define BCCK_ANTDIVERSITY 0x8000 +#define BCCK_CARRIER_RECOVERY 0x4000 +#define BCCK_TXRATE 0x3000 +#define BCCK_DCCANCEL 0x0800 +#define BCCK_ISICANCEL 0x0400 +#define BCCK_MATCH_FILTER 0x0200 +#define BCCK_EQUALIZER 0x0100 +#define BCCK_PREAMBLE_DETECT 0x800000 +#define BCCK_FAST_FALSECCA 0x400000 +#define BCCK_CH_ESTSTART 0x300000 +#define BCCK_CCA_COUNT 0x080000 +#define BCCK_CS_LIM 0x070000 +#define BCCK_BIST_MODE 0x80000000 +#define BCCK_CCAMASK 0x40000000 +#define BCCK_TX_DAC_PHASE 0x4 +#define BCCK_RX_ADC_PHASE 0x20000000 +#define BCCKR_CP_MODE 0x0100 +#define BCCK_TXDC_OFFSET 0xf0 +#define BCCK_RXDC_OFFSET 0xf +#define BCCK_CCA_MODE 0xc000 +#define BCCK_FALSECS_LIM 0x3f00 +#define BCCK_CS_RATIO 0xc00000 +#define BCCK_CORGBIT_SEL 0x300000 +#define BCCK_PD_LIM 0x0f0000 +#define BCCK_NEWCCA 0x80000000 +#define BCCK_RXHP_OF_IG 0x8000 +#define BCCK_RXIG 0x7f00 +#define BCCK_LNA_POLARITY 0x800000 +#define BCCK_RX1ST_BAIN 0x7f0000 +#define BCCK_RF_EXTEND 0x20000000 +#define BCCK_RXAGC_SATLEVEL 0x1f000000 +#define BCCK_RXAGC_SATCOUNT 0xe0 +#define bCCKRxRFSettle 0x1f +#define BCCK_FIXED_RXAGC 0x8000 +#define BCCK_ANTENNA_POLARITY 0x2000 +#define BCCK_TXFILTER_TYPE 0x0c00 +#define BCCK_RXAGC_REPORTTYPE 0x0300 +#define BCCK_RXDAGC_EN 0x80000000 +#define BCCK_RXDAGC_PERIOD 0x20000000 +#define BCCK_RXDAGC_SATLEVEL 0x1f000000 +#define BCCK_TIMING_RECOVERY 0x800000 +#define BCCK_TXC0 0x3f0000 +#define BCCK_TXC1 0x3f000000 +#define BCCK_TXC2 0x3f +#define BCCK_TXC3 0x3f00 +#define BCCK_TXC4 0x3f0000 +#define BCCK_TXC5 0x3f000000 +#define BCCK_TXC6 0x3f +#define BCCK_TXC7 0x3f00 +#define BCCK_DEBUGPORT 0xff0000 +#define BCCK_DAC_DEBUG 0x0f000000 +#define BCCK_FALSEALARM_ENABLE 0x8000 +#define BCCK_FALSEALARM_READ 0x4000 +#define BCCK_TRSSI 0x7f +#define BCCK_RXAGC_REPORT 0xfe +#define BCCK_RXREPORT_ANTSEL 0x80000000 +#define BCCK_RXREPORT_MFOFF 0x40000000 +#define BCCK_RXREPORT_SQLOSS 0x20000000 +#define BCCK_RXREPORT_PKTLOSS 0x10000000 +#define BCCK_RXREPORT_LOCKEDBIT 0x08000000 +#define BCCK_RXREPORT_RATEERROR 0x04000000 +#define BCCK_RXREPORT_RXRATE 0x03000000 +#define BCCK_RXFA_COUNTER_LOWER 0xff +#define BCCK_RXFA_COUNTER_UPPER 0xff000000 +#define BCCK_RXHPAGC_START 0xe000 +#define BCCK_RXHPAGC_FINAL 0x1c00 +#define BCCK_RXFALSEALARM_ENABLE 0x8000 +#define BCCK_FACOUNTER_FREEZE 0x4000 +#define BCCK_TXPATH_SEL 0x10000000 +#define BCCK_DEFAULT_RXPATH 0xc000000 +#define BCCK_OPTION_RXPATH 0x3000000 + +#define BNUM_OFSTF 0x3 +#define BSHIFT_L 0xc0 +#define BGI_TH 0xc +#define BRXPATH_A 0x1 +#define BRXPATH_B 0x2 +#define BRXPATH_C 0x4 +#define BRXPATH_D 0x8 +#define BTXPATH_A 0x1 +#define BTXPATH_B 0x2 +#define BTXPATH_C 0x4 +#define BTXPATH_D 0x8 +#define BTRSSI_FREQ 0x200 +#define BADC_BACKOFF 0x3000 +#define BDFIR_BACKOFF 0xc000 +#define BTRSSI_LATCH_PHASE 0x10000 +#define BRX_LDC_OFFSET 0xff +#define BRX_QDC_OFFSET 0xff00 +#define BRX_DFIR_MODE 0x1800000 +#define BRX_DCNF_TYPE 0xe000000 +#define BRXIQIMB_A 0x3ff +#define BRXIQIMB_B 0xfc00 +#define BRXIQIMB_C 0x3f0000 +#define BRXIQIMB_D 0xffc00000 +#define BDC_DC_NOTCH 0x60000 +#define BRXNB_NOTCH 0x1f000000 +#define BPD_TH 0xf +#define BPD_TH_OPT2 0xc000 +#define BPWED_TH 0x700 +#define BIFMF_WIN_L 0x800 +#define BPD_OPTION 0x1000 +#define BMF_WIN_L 0xe000 +#define BBW_SEARCH_L 0x30000 +#define BWIN_ENH_L 0xc0000 +#define BBW_TH 0x700000 +#define BED_TH2 0x3800000 +#define BBW_OPTION 0x4000000 +#define BRADIO_TH 0x18000000 +#define BWINDOW_L 0xe0000000 +#define BSBD_OPTION 0x1 +#define BFRAME_TH 0x1c +#define BFS_OPTION 0x60 +#define BDC_SLOPE_CHECK 0x80 +#define BFGUARD_COUNTER_DC_L 0xe00 +#define BFRAME_WEIGHT_SHORT 0x7000 +#define BSUB_TUNE 0xe00000 +#define BFRAME_DC_LENGTH 0xe000000 +#define BSBD_START_OFFSET 0x30000000 +#define BFRAME_TH_2 0x7 +#define BFRAME_GI2_TH 0x38 +#define BGI2_SYNC_EN 0x40 +#define BSARCH_SHORT_EARLY 0x300 +#define BSARCH_SHORT_LATE 0xc00 +#define BSARCH_GI2_LATE 0x70000 +#define BCFOANTSUM 0x1 +#define BCFOACC 0x2 +#define BCFOSTARTOFFSET 0xc +#define BCFOLOOPBACK 0x70 +#define BCFOSUMWEIGHT 0x80 +#define BDAGCENABLE 0x10000 +#define BTXIQIMB_A 0x3ff +#define BTXIQIMB_b 0xfc00 +#define BTXIQIMB_C 0x3f0000 +#define BTXIQIMB_D 0xffc00000 +#define BTXIDCOFFSET 0xff +#define BTXIQDCOFFSET 0xff00 +#define BTXDFIRMODE 0x10000 +#define BTXPESUDO_NOISEON 0x4000000 +#define BTXPESUDO_NOISE_A 0xff +#define BTXPESUDO_NOISE_B 0xff00 +#define BTXPESUDO_NOISE_C 0xff0000 +#define BTXPESUDO_NOISE_D 0xff000000 +#define BCCA_DROPOPTION 0x20000 +#define BCCA_DROPTHRES 0xfff00000 +#define BEDCCA_H 0xf +#define BEDCCA_L 0xf0 +#define BLAMBDA_ED 0x300 +#define BRX_INITIALGAIN 0x7f +#define BRX_ANTDIV_EN 0x80 +#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00 +#define BRX_HIGHPOWER_FLOW 0x8000 +#define BRX_AGC_FREEZE_THRES 0xc0000 +#define BRX_FREEZESTEP_AGC1 0x300000 +#define BRX_FREEZESTEP_AGC2 0xc00000 +#define BRX_FREEZESTEP_AGC3 0x3000000 +#define BRX_FREEZESTEP_AGC0 0xc000000 +#define BRXRSSI_CMP_EN 0x10000000 +#define BRXQUICK_AGCEN 0x20000000 +#define BRXAGC_FREEZE_THRES_MODE 0x40000000 +#define BRX_OVERFLOW_CHECKTYPE 0x80000000 +#define BRX_AGCSHIFT 0x7f +#define BTRSW_TRI_ONLY 0x80 +#define BPOWER_THRES 0x300 +#define BRXAGC_EN 0x1 +#define BRXAGC_TOGETHER_EN 0x2 +#define BRXAGC_MIN 0x4 +#define BRXHP_INI 0x7 +#define BRXHP_TRLNA 0x70 +#define BRXHP_RSSI 0x700 +#define BRXHP_BBP1 0x7000 +#define BRXHP_BBP2 0x70000 +#define BRXHP_BBP3 0x700000 +#define BRSSI_H 0x7f0000 +#define BRSSI_GEN 0x7f000000 +#define BRXSETTLE_TRSW 0x7 +#define BRXSETTLE_LNA 0x38 +#define BRXSETTLE_RSSI 0x1c0 +#define BRXSETTLE_BBP 0xe00 +#define BRXSETTLE_RXHP 0x7000 +#define BRXSETTLE_ANTSW_RSSI 0x38000 +#define BRXSETTLE_ANTSW 0xc0000 +#define BRXPROCESS_TIME_DAGC 0x300000 +#define BRXSETTLE_HSSI 0x400000 +#define BRXPROCESS_TIME_BBPPW 0x800000 +#define BRXANTENNA_POWER_SHIFT 0x3000000 +#define BRSSI_TABLE_SELECT 0xc000000 +#define BRXHP_FINAL 0x7000000 +#define BRXHPSETTLE_BBP 0x7 +#define BRXHTSETTLE_HSSI 0x8 +#define BRXHTSETTLE_RXHP 0x70 +#define BRXHTSETTLE_BBPPW 0x80 +#define BRXHTSETTLE_IDLE 0x300 +#define BRXHTSETTLE_RESERVED 0x1c00 +#define BRXHT_RXHP_EN 0x8000 +#define BRXAGC_FREEZE_THRES 0x30000 +#define BRXAGC_TOGETHEREN 0x40000 +#define BRXHTAGC_MIN 0x80000 +#define BRXHTAGC_EN 0x100000 +#define BRXHTDAGC_EN 0x200000 +#define BRXHT_RXHP_BBP 0x1c00000 +#define BRXHT_RXHP_FINAL 0xe0000000 +#define BRXPW_RADIO_TH 0x3 +#define BRXPW_RADIO_EN 0x4 +#define BRXMF_HOLD 0x3800 +#define BRXPD_DELAY_TH1 0x38 +#define BRXPD_DELAY_TH2 0x1c0 +#define BRXPD_DC_COUNT_MAX 0x600 +#define BRXPD_DELAY_TH 0x8000 +#define BRXPROCESS_DELAY 0xf0000 +#define BRXSEARCHRANGE_GI2_EARLY 0x700000 +#define BRXFRAME_FUARD_COUNTER_L 0x3800000 +#define BRXSGI_GUARD_L 0xc000000 +#define BRXSGI_SEARCH_L 0x30000000 +#define BRXSGI_TH 0xc0000000 +#define BDFSCNT0 0xff +#define BDFSCNT1 0xff00 +#define BDFSFLAG 0xf0000 +#define BMF_WEIGHT_SUM 0x300000 +#define BMINIDX_TH 0x7f000000 +#define BDAFORMAT 0x40000 +#define BTXCH_EMU_ENABLE 0x01000000 +#define BTRSW_ISOLATION_A 0x7f +#define BTRSW_ISOLATION_B 0x7f00 +#define BTRSW_ISOLATION_C 0x7f0000 +#define BTRSW_ISOLATION_D 0x7f000000 +#define BEXT_LNA_GAIN 0x7c00 + +#define BSTBC_EN 0x4 +#define BANTENNA_MAPPING 0x10 +#define BNSS 0x20 +#define BCFO_ANTSUM_ID 0x200 +#define BPHY_COUNTER_RESET 0x8000000 +#define BCFO_REPORT_GET 0x4000000 +#define BOFDM_CONTINUE_TX 0x10000000 +#define BOFDM_SINGLE_CARRIER 0x20000000 +#define BOFDM_SINGLE_TONE 0x40000000 +#define BHT_DETECT 0x100 +#define BCFOEN 0x10000 +#define BCFOVALUE 0xfff00000 +#define BSIGTONE_RE 0x3f +#define BSIGTONE_IM 0x7f00 +#define BCOUNTER_CCA 0xffff +#define BCOUNTER_PARITYFAIL 0xffff0000 +#define BCOUNTER_RATEILLEGAL 0xffff +#define BCOUNTER_CRC8FAIL 0xffff0000 +#define BCOUNTER_MCSNOSUPPORT 0xffff +#define BCOUNTER_FASTSYNC 0xffff +#define BSHORTCFO 0xfff +#define BSHORTCFOT_LENGTH 12 +#define BSHORTCFOF_LENGTH 11 +#define BLONGCFO 0x7ff +#define BLONGCFOT_LENGTH 11 +#define BLONGCFOF_LENGTH 11 +#define BTAILCFO 0x1fff +#define BTAILCFOT_LENGTH 13 +#define BTAILCFOF_LENGTH 12 +#define BNOISE_EN_PWDB 0xffff +#define BCC_POWER_DB 0xffff0000 +#define BMOISE_PWDB 0xffff +#define BPOWERMEAST_LENGTH 10 +#define BPOWERMEASF_LENGTH 3 +#define BRX_HT_BW 0x1 +#define BRXSC 0x6 +#define BRX_HT 0x8 +#define BNB_INTF_DET_ON 0x1 +#define BINTF_WIN_LEN_CFG 0x30 +#define BNB_INTF_TH_CFG 0x1c0 +#define BRFGAIN 0x3f +#define BTABLESEL 0x40 +#define BTRSW 0x80 +#define BRXSNR_A 0xff +#define BRXSNR_B 0xff00 +#define BRXSNR_C 0xff0000 +#define BRXSNR_D 0xff000000 +#define BSNR_EVMT_LENGTH 8 +#define BSNR_EVMF_LENGTH 1 +#define BCSI1ST 0xff +#define BCSI2ND 0xff00 +#define BRXEVM1ST 0xff0000 +#define BRXEVM2ND 0xff000000 +#define BSIGEVM 0xff +#define BPWDB 0xff00 +#define BSGIEN 0x10000 + +#define BSFACTOR_QMA1 0xf +#define BSFACTOR_QMA2 0xf0 +#define BSFACTOR_QMA3 0xf00 +#define BSFACTOR_QMA4 0xf000 +#define BSFACTOR_QMA5 0xf0000 +#define BSFACTOR_QMA6 0xf0000 +#define BSFACTOR_QMA7 0xf00000 +#define BSFACTOR_QMA8 0xf000000 +#define BSFACTOR_QMA9 0xf0000000 +#define BCSI_SCHEME 0x100000 + +#define BNOISE_LVL_TOP_SET 0x3 +#define BCHSMOOTH 0x4 +#define BCHSMOOTH_CFG1 0x38 +#define BCHSMOOTH_CFG2 0x1c0 +#define BCHSMOOTH_CFG3 0xe00 +#define BCHSMOOTH_CFG4 0x7000 +#define BMRCMODE 0x800000 +#define BTHEVMCFG 0x7000000 + +#define BLOOP_FIT_TYPE 0x1 +#define BUPD_CFO 0x40 +#define BUPD_CFO_OFFDATA 0x80 +#define BADV_UPD_CFO 0x100 +#define BADV_TIME_CTRL 0x800 +#define BUPD_CLKO 0x1000 +#define BFC 0x6000 +#define BTRACKING_MODE 0x8000 +#define BPHCMP_ENABLE 0x10000 +#define BUPD_CLKO_LTF 0x20000 +#define BCOM_CH_CFO 0x40000 +#define BCSI_ESTI_MODE 0x80000 +#define BADV_UPD_EQZ 0x100000 +#define BUCHCFG 0x7000000 +#define BUPDEQZ 0x8000000 + +#define BRX_PESUDO_NOISE_ON 0x20000000 +#define BRX_PESUDO_NOISE_A 0xff +#define BRX_PESUDO_NOISE_B 0xff00 +#define BRX_PESUDO_NOISE_C 0xff0000 +#define BRX_PESUDO_NOISE_D 0xff000000 +#define BRX_PESUDO_NOISESTATE_A 0xffff +#define BRX_PESUDO_NOISESTATE_B 0xffff0000 +#define BRX_PESUDO_NOISESTATE_C 0xffff +#define BRX_PESUDO_NOISESTATE_D 0xffff0000 + +#define BZEBRA1_HSSIENABLE 0x8 +#define BZEBRA1_TRXCONTROL 0xc00 +#define BZEBRA1_TRXGAINSETTING 0x07f +#define BZEBRA1_RXCOUNTER 0xc00 +#define BZEBRA1_TXCHANGEPUMP 0x38 +#define BZEBRA1_RXCHANGEPUMP 0x7 +#define BZEBRA1_CHANNEL_NUM 0xf80 +#define BZEBRA1_TXLPFBW 0x400 +#define BZEBRA1_RXLPFBW 0x600 + +#define BRTL8256REG_MODE_CTRL1 0x100 +#define BRTL8256REG_MODE_CTRL0 0x40 +#define BRTL8256REG_TXLPFBW 0x18 +#define BRTL8256REG_RXLPFBW 0x600 + +#define BRTL8258_TXLPFBW 0xc +#define BRTL8258_RXLPFBW 0xc00 +#define BRTL8258_RSSILPFBW 0xc0 + +#define BBYTE0 0x1 +#define BBYTE1 0x2 +#define BBYTE2 0x4 +#define BBYTE3 0x8 +#define BWORD0 0x3 +#define BWORD1 0xc +#define BWORD 0xf + +#define MASKBYTE0 0xff +#define MASKBYTE1 0xff00 +#define MASKBYTE2 0xff0000 +#define MASKBYTE3 0xff000000 +#define MASKHWORD 0xffff0000 +#define MASKLWORD 0x0000ffff +#define MASKDWORD 0xffffffff +#define MASK12BITS 0xfff +#define MASKH4BITS 0xf0000000 +#define MASKOFDM_D 0xffc00000 +#define MASKCCK 0x3f3f3f3f + +#define MASK4BITS 0x0f +#define MASK20BITS 0xfffff +#define RFREG_OFFSET_MASK 0xfffff + +#define BENABLE 0x1 +#define BDISABLE 0x0 + +#define LEFT_ANTENNA 0x0 +#define RIGHT_ANTENNA 0x1 + +#define TCHECK_TXSTATUS 500 +#define TUPDATE_RXCOUNTER 100 + +#define REG_UN_used_register 0x01bf + +/* WOL bit information */ +#define HAL92C_WOL_PTK_UPDATE_EVENT BIT(0) +#define HAL92C_WOL_GTK_UPDATE_EVENT BIT(1) +#define HAL92C_WOL_DISASSOC_EVENT BIT(2) +#define HAL92C_WOL_DEAUTH_EVENT BIT(3) +#define HAL92C_WOL_FW_DISCONNECT_EVENT BIT(4) + +#define WOL_REASON_PTK_UPDATE BIT(0) +#define WOL_REASON_GTK_UPDATE BIT(1) +#define WOL_REASON_DISASSOC BIT(2) +#define WOL_REASON_DEAUTH BIT(3) +#define WOL_REASON_FW_DISCONNECT BIT(4) + +#define RA_RFE_PINMUX 0xcb0 /* Path_A RFE cotrol pinmux*/ +#define RB_RFE_PINMUX 0xeb0 /* Path_B RFE control pinmux*/ + +#define RA_RFE_INV 0xcb4 +#define RB_RFE_INV 0xeb4 + +/* RXIQC */ +#define RA_RXIQC_AB 0xc10 /*RxIQ imblance matrix coeff. A & B*/ +#define RA_RXIQC_CD 0xc14 /*RxIQ imblance matrix coeff. C & D*/ +#define RA_TXSCALE 0xc1c /* Pah_A TX scaling factor*/ +#define RB_TXSCALE 0xe1c /* Path_B TX scaling factor*/ +#define RB_RXIQC_AB 0xe10 /*RxIQ imblance matrix coeff. A & B*/ +#define RB_RXIQC_CD 0xe14 /*RxIQ imblance matrix coeff. C & D*/ +#define RXIQC_AC 0x02ff /*bit mask for IQC matrix element A & C*/ +#define RXIQC_BD 0x02ff0000 /*bit mask for IQC matrix element A & C*/ + +/* 2 EFUSE_TEST (For RTL8723 partially) */ +#define EFUSE_SEL(x) (((x) & 0x3) << 8) +#define EFUSE_SEL_MASK 0x300 +#define EFUSE_WIFI_SEL_0 0x0 + +/*REG_MULTI_FUNC_CTRL(For RTL8723 Only)*/ +#define WL_HWPDN_EN BIT(0) /* Enable GPIO[9] as WiFi HW PDn source*/ +#define WL_HWPDN_SL BIT(1) /* WiFi HW PDn polarity control*/ +#define WL_FUNC_EN BIT(2) // WiFi function enable +#define WL_HWROF_EN BIT(3) // Enable GPIO[9] as WiFi RF HW PDn source +#define BT_HWPDN_EN BIT(16) // Enable GPIO[11] as BT HW PDn source +#define BT_HWPDN_SL BIT(17) // BT HW PDn polarity control +#define BT_FUNC_EN BIT(18) // BT function enable +#define BT_HWROF_EN BIT(19) // Enable GPIO[11] as BT/GPS RF HW PDn source +#define GPS_HWPDN_EN BIT(20) // Enable GPIO[10] as GPS HW PDn source +#define GPS_HWPDN_SL BIT(21) // GPS HW PDn polarity control +#define GPS_FUNC_EN BIT(22) // GPS function enable + + +#define BMASKBYTE0 0xff +#define BMASKBYTE1 0xff00 +#define BMASKBYTE2 0xff0000 +#define BMASKBYTE3 0xff000000 +#define BMASKHWORD 0xffff0000 +#define BMASKLWORD 0x0000ffff +#define BMASKDWORD 0xffffffff +#define BMASK12BITS 0xfff +#define BMASKH4BITS 0xf0000000 +#define BMASKOFDM_D 0xffc00000 +#define BMASKCCK 0x3f3f3f3f + +#define BRFREGOFFSETMASK 0xfffff + +#define ODM_REG_CCK_RPT_FORMAT_11AC 0x804 +#define ODM_REG_BB_RX_PATH_11AC 0x808 +/*PAGE 9*/ +#define ODM_REG_OFDM_FA_RST_11AC 0x9A4 +/*PAGE A*/ +#define ODM_REG_CCK_CCA_11AC 0xA0A +#define ODM_REG_CCK_FA_RST_11AC 0xA2C +#define ODM_REG_CCK_FA_11AC 0xA5C +/*PAGE C*/ +#define ODM_REG_IGI_A_11AC 0xC50 +/*PAGE E*/ +#define ODM_REG_IGI_B_11AC 0xE50 +/*PAGE F*/ +#define ODM_REG_OFDM_FA_11AC 0xF48 + + +//2 MAC REG LIST + + + + +//DIG Related +#define ODM_BIT_IGI_11AC 0xFFFFFFFF +#define ODM_BIT_CCK_RPT_FORMAT_11AC BIT16 +#define ODM_BIT_BB_RX_PATH_11AC 0xF + +typedef enum AGGRE_SIZE{ + HT_AGG_SIZE_8K = 0, + HT_AGG_SIZE_16K = 1, + HT_AGG_SIZE_32K = 2, + HT_AGG_SIZE_64K = 3, + VHT_AGG_SIZE_128K = 4, + VHT_AGG_SIZE_256K = 5, + VHT_AGG_SIZE_512K = 6, + VHT_AGG_SIZE_1024K = 7, +}AGGRE_SIZE_E, *PAGGRE_SIZE_E; + +#define REG_AMPDU_MAX_LENGTH_8812 0x0458 + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/rf.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/rf.c @@ -0,0 +1,464 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" + +static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw); + +void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (bandwidth) { + case HT_CHANNEL_WIDTH_20: + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 3); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 1); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 1); + break; + case HT_CHANNEL_WIDTH_80: + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 0); + rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0); + break; + default: + RT_TRACE(COMP_ERR, DBG_EMERG, + ("unknown bandwidth: %#X\n", bandwidth)); + break; + } +} + +void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 tx_agc[2] = {0, 0}, tmpval; + bool turbo_scanoff = false; + u8 idx1, idx2; + u8 *ptr; + u8 direction; + u32 pwrtrac_value; + + if (rtlefuse->eeprom_regulatory != 0) + turbo_scanoff = true; + + if (mac->act_scanning == true) { + tx_agc[RF90_PATH_A] = 0x3f3f3f3f; + tx_agc[RF90_PATH_B] = 0x3f3f3f3f; + + if (turbo_scanoff) { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + } + } else { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + + if (rtlefuse->eeprom_regulatory == 0) { + tmpval = + (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][7] << + 8); + tx_agc[RF90_PATH_A] += tmpval; + + tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + + (rtlphy->mcs_txpwrlevel_origoffset[0][15] << + 24); + tx_agc[RF90_PATH_B] += tmpval; + } + } + + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + ptr = (u8 *) (&(tx_agc[idx1])); + for (idx2 = 0; idx2 < 4; idx2++) { + if (*ptr > RF6052_MAX_TX_PWR) + *ptr = RF6052_MAX_TX_PWR; + ptr++; + } + } + rtl8821ae_dm_txpower_track_adjust(hw,1,&direction,&pwrtrac_value); + if (direction ==1){ + tx_agc[0] += pwrtrac_value; + tx_agc[1] += pwrtrac_value; + } else if (direction == 2){ + tx_agc[0] -= pwrtrac_value; + tx_agc[1] -= pwrtrac_value; + } + tmpval = tx_agc[RF90_PATH_A] ; + rtl_set_bbreg(hw, RTXAGC_A_CCK11_CCK1, MASKDWORD, tmpval); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("CCK PWR 1~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_A_CCK11_CCK1)); + + tmpval = tx_agc[RF90_PATH_B] ; + rtl_set_bbreg(hw, RTXAGC_B_CCK11_CCK1, MASKDWORD, tmpval); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_B_CCK11_CCK1)); +} + +static void rtl8821ae_phy_get_power_base(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, u8 *ppowerlevel_bw40, u8 channel, + u32 *ofdmbase, u32 *mcsbase) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 powerBase0, powerBase1; + u8 i, powerlevel[2]; + + for (i = 0; i < 2; i++) { + powerBase0 = ppowerlevel_ofdm[i]; + + powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | + (powerBase0 << 8) | powerBase0; + *(ofdmbase + i) = powerBase0; + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + (" [OFDM power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(ofdmbase + i))); + } + + for (i = 0; i < 2; i++) { + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { + powerlevel[i] = ppowerlevel_bw20[i]; + }else{ + powerlevel[i] = ppowerlevel_bw40[i]; + } + powerBase1 = powerlevel[i]; + powerBase1 = (powerBase1 << 24) | + (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; + + *(mcsbase + i) = powerBase1; + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + (" [MCS power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(mcsbase + i))); + } +} + +static void _rtl8821ae_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, + u8 channel, u8 index, + u32 *powerBase0, + u32 *powerBase1, + u32 *p_outwriteval) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 i, chnlgroup = 0, pwr_diff_limit[4],pwr_diff = 0,customer_pwr_diff; + u32 writeVal, customer_limit, rf; + + for (rf = 0; rf < 2; rf++) { + switch (rtlefuse->eeprom_regulatory) { + case 0: + chnlgroup = 0; + + writeVal = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index + + (rf ? 8 : 0)] + + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("RTK better performance, " + "writeVal(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + case 1: + if (rtlphy->pwrgroup_cnt == 1) + chnlgroup = 0; + else { + if(channel<3) + chnlgroup = 0; + else if (channel <6) + chnlgroup = 1; + else if (channel <9) + chnlgroup = 2; + else if (channel <12) + chnlgroup = 3; + else if (channel < 14) + chnlgroup = 4; + else if (channel == 14) + chnlgroup = 5; + } + + writeVal = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] + [index + (rf ? 8 : 0)] + ((index < 2) ? + powerBase0[rf] : + powerBase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Realtek regulatory, 20MHz, " + "writeVal(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + + break; + case 2: + writeVal = + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Better regulatory, " + "writeVal(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + case 3: + chnlgroup = 0; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("customer's limit, 40MHz " + "rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht40[rf][channel - + 1])); + } else { + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("customer's limit, 20MHz " + "rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht20[rf][channel - + 1])); + } + + if (index < 2) + pwr_diff = rtlefuse->txpwr_legacyhtdiff[rf][channel-1]; + else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) + pwr_diff = rtlefuse->txpwr_ht20diff[rf][channel-1]; + + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) + customer_pwr_diff = rtlefuse->pwrgroup_ht40[rf][channel-1]; + else + customer_pwr_diff = rtlefuse->pwrgroup_ht20[rf][channel-1]; + + if (pwr_diff > customer_pwr_diff) + pwr_diff = 0; + else + pwr_diff = customer_pwr_diff - pwr_diff; + + for (i = 0; i < 4; i++) { + pwr_diff_limit[i] = + (u8) ((rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + (rf ? 8 : 0)] & (0x7f << + (i * 8))) >> (i * 8)); + + if(pwr_diff_limit[i] > pwr_diff) + pwr_diff_limit[i] = pwr_diff; + } + + customer_limit = (pwr_diff_limit[3] << 24) | + (pwr_diff_limit[2] << 16) | + (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Customer's limit rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), customer_limit)); + + writeVal = customer_limit + + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Customer, writeVal rf(%c)= 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + default: + chnlgroup = 0; + writeVal = + rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] + [index + (rf ? 8 : 0)] + + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("RTK better performance, writeVal " + "rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + } + + if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) + writeVal = writeVal - 0x06060606; + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_BT2) + writeVal = writeVal - 0x0c0c0c0c; + *(p_outwriteval + rf) = writeVal; + } +} + +static void _rtl8821ae_write_ofdm_power_reg(struct ieee80211_hw *hw, + u8 index, u32 *pValue) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 regoffset_a[6] = { + RTXAGC_A_OFDM18_OFDM6, RTXAGC_A_OFDM54_OFDM24, + RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, + RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 + }; + u16 regoffset_b[6] = { + RTXAGC_B_OFDM18_OFDM6, RTXAGC_B_OFDM54_OFDM24, + RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, + RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 + }; + u8 i, rf, pwr_val[4]; + u32 writeVal; + u16 regoffset; + + for (rf = 0; rf < 2; rf++) { + writeVal = pValue[rf]; + for (i = 0; i < 4; i++) { + pwr_val[i] = (u8) ((writeVal & (0x7f << + (i * 8))) >> (i * 8)); + + if (pwr_val[i] > RF6052_MAX_TX_PWR) + pwr_val[i] = RF6052_MAX_TX_PWR; + } + writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + (pwr_val[1] << 8) | pwr_val[0]; + + if (rf == 0) + regoffset = regoffset_a[index]; + else + regoffset = regoffset_b[index]; + rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Set 0x%x = %08x\n", regoffset, writeVal)); + } +} + +void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, u8 *ppowerlevel_bw20, u8 *ppowerlevel_bw40, u8 channel) +{ + u32 writeVal[2], powerBase0[2], powerBase1[2]; + u8 index; + u8 direction; + u32 pwrtrac_value; + + rtl8821ae_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20, ppowerlevel_bw40, + channel, &powerBase0[0], &powerBase1[0]); + + rtl8821ae_dm_txpower_track_adjust(hw,1,&direction,&pwrtrac_value); + + for (index = 0; index < 6; index++) { + _rtl8821ae_get_txpower_writeval_by_regulatory(hw, + channel, index, + &powerBase0[0], + &powerBase1[0], + &writeVal[0]); + if (direction ==1){ + writeVal[0] += pwrtrac_value; + writeVal[1] += pwrtrac_value; + } else if (direction == 2){ + writeVal[0] -= pwrtrac_value; + writeVal[1] -= pwrtrac_value; + } + _rtl8821ae_write_ofdm_power_reg(hw, index, &writeVal[0]); + } +} + +bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (rtlphy->rf_type == RF_1T1R) + rtlphy->num_total_rfpath = 1; + else + rtlphy->num_total_rfpath = 2; + + return _rtl8821ae_phy_rf6052_config_parafile(hw); + +} + +static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + //u32 u4_regvalue = 0; + u8 rfpath; + bool rtstatus = true; + //struct bb_reg_def *pphyreg; + + for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { + switch (rfpath) { + case RF90_PATH_A: { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtstatus = rtl8812ae_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + else + rtstatus = rtl8821ae_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + break; + } + case RF90_PATH_B: { + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) + rtstatus = rtl8812ae_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + else + rtstatus = rtl8821ae_phy_config_rf_with_headerfile(hw, + (enum radio_path)rfpath); + break; + } + case RF90_PATH_C: + break; + case RF90_PATH_D: + break; + } + + if (rtstatus != true) { + RT_TRACE(COMP_INIT, DBG_TRACE, + ("Radio[%d] Fail!!", rfpath)); + return false; + } + + } + + /*put arrays in dm.c*/ + /*_rtl8821ae_config_rf_txpwr_track_headerfile(hw);*/ + RT_TRACE(COMP_INIT, DBG_TRACE, ("\n")); + return rtstatus; +} --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/rf.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/rf.h @@ -0,0 +1,46 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_RF_H__ +#define __RTL8821AE_RF_H__ + +#define RF6052_MAX_TX_PWR 0x3F +#define RF6052_MAX_REG 0x3F + +extern void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, + u8 bandwidth); +extern void rtl8821ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel); +extern void rtl8821ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel_ofdm, + u8 *ppowerlevel_bw20, + u8 *ppowerlevel_bw40, + u8 channel); +extern bool rtl8821ae_phy_rf6052_config(struct ieee80211_hw *hw); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/sw.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/sw.c @@ -0,0 +1,499 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include +#include + +#include "../wifi.h" +#include "../core.h" +#include "../pci.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" +#include "hw.h" +#include "sw.h" +#include "trx.h" +#include "led.h" +#include "table.h" +#include "hal_btc.h" +#include "../btcoexist/rtl_btc.h" + +void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + + /*close ASPM for AMD defaultly */ + rtlpci->const_amdpci_aspm = 0; + + /* + * ASPM PS mode. + * 0 - Disable ASPM, + * 1 - Enable ASPM without Clock Req, + * 2 - Enable ASPM with Clock Req, + * 3 - Alwyas Enable ASPM with Clock Req, + * 4 - Always Enable ASPM without Clock Req. + * set defult to RTL8192CE:3 RTL8192E:2 + * */ + rtlpci->const_pci_aspm = 3; + + /*Setting for PCI-E device */ + rtlpci->const_devicepci_aspm_setting = 0x03; + + /*Setting for PCI-E bridge */ + rtlpci->const_hostpci_aspm_setting = 0x02; + + /* + * In Hw/Sw Radio Off situation. + * 0 - Default, + * 1 - From ASPM setting without low Mac Pwr, + * 2 - From ASPM setting with low Mac Pwr, + * 3 - Bus D3 + * set default to RTL8192CE:0 RTL8192SE:2 + */ + rtlpci->const_hwsw_rfoff_d3 = 0; + + /* + * This setting works for those device with + * backdoor ASPM setting such as EPHY setting. + * 0 - Not support ASPM, + * 1 - Support ASPM, + * 2 - According to chipset. + */ + rtlpci->const_support_pciaspm = 1; +} + +/*InitializeVariables8812E*/ +int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) +{ + int err = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + const struct firmware *firmware; + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + char *fw_name = NULL; + + rtl8821ae_bt_reg_init(hw); + rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); + + rtlpriv->dm.b_dm_initialgain_enable = 1; + rtlpriv->dm.dm_flag = 0; + rtlpriv->dm.b_disable_framebursting = 0;; + rtlpriv->dm.thermalvalue = 0; + rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25); + + mac->ht_enable = true; + + rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G; + /*following 2 is for register 5G band, refer to _rtl_init_mac80211()*/ + rtlpriv->rtlhal.bandset = BAND_ON_BOTH; + rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY; + + rtlpci->receive_config = (RCR_APPFCS | + RCR_APP_MIC | + RCR_APP_ICV | + RCR_APP_PHYST_RXFF | + RCR_NONQOS_VHT | + RCR_HTC_LOC_CTRL | + RCR_AMF | + RCR_ACF | + RCR_ADF | /*This bit controls the PS-Poll packet filter.*/ + RCR_AICV | + RCR_ACRC32 | + RCR_AB | + RCR_AM | + RCR_APM | + 0); + + + rtlpci->irq_mask[0] = + (u32) (IMR_PSTIMEOUT | + IMR_GTINT3 | + /*IMR_TBDER | + IMR_TBDOK | + IMR_BCNDMAINT0 |*/ + IMR_HSISR_IND_ON_INT | + IMR_C2HCMD | + IMR_HIGHDOK | + IMR_MGNTDOK | + IMR_BKDOK | + IMR_BEDOK | + IMR_VIDOK | + IMR_VODOK | + IMR_RDU | + IMR_ROK | + 0); + + rtlpci->irq_mask[1] = + (u32)( IMR_RXFOVW | + IMR_TXFOVW | + 0); + + /* for LPS & IPS */ + rtlpriv->psc.b_inactiveps = rtlpriv->cfg->mod_params->b_inactiveps; + rtlpriv->psc.b_swctrl_lps = rtlpriv->cfg->mod_params->b_swctrl_lps; + rtlpriv->psc.b_fwctrl_lps = rtlpriv->cfg->mod_params->b_fwctrl_lps; + rtlpriv->psc.b_reg_fwctrl_lps = 3; + rtlpriv->psc.reg_max_lps_awakeintvl = 5; + /* for ASPM, you can close aspm through + * set const_support_pciaspm = 0 */ + rtl8821ae_init_aspm_vars(hw); + + if (rtlpriv->psc.b_reg_fwctrl_lps == 1) + rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE; + else if (rtlpriv->psc.b_reg_fwctrl_lps == 2) + rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE; + else if (rtlpriv->psc.b_reg_fwctrl_lps == 3) + rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE; + + /* for firmware buf */ + rtlpriv->rtlhal.pfirmware = (u8 *) vmalloc(0x8000); + if (!rtlpriv->rtlhal.pfirmware) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Can't alloc buffer for fw.\n")); + return 1; + } + + fw_name = "rtlwifi/rtl8821aefw.bin"; + err = request_firmware(&firmware, fw_name, rtlpriv->io.dev); + if (err) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Failed to request firmware!\n")); + return 1; + } + + if (firmware->size > 0x8000) { + RT_TRACE(COMP_ERR, DBG_EMERG, + ("Firmware is too big!\n")); + release_firmware(firmware); + return 1; + } + + memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size); + rtlpriv->rtlhal.fwsize = firmware->size; + release_firmware(firmware); + + if (rtlpriv->cfg->ops->get_btc_status()){ + rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv); + rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv); + } + + RT_TRACE(COMP_INIT, DBG_LOUD, (" FirmwareDownload OK\n")); + return err; +} + +void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + //printk("=========>rtl8821ae_deinit_sw_vars().\n"); + if (rtlpriv->cfg->ops->get_btc_status()){ + //printk("=========>rtl8821ae_deinit_sw_vars().get_btc_status\n"); + rtlpriv->btcoexist.btc_ops->btc_halt_notify(); + } + if (rtlpriv->rtlhal.pfirmware) { + //printk("=========>rtl8821ae_deinit_sw_vars().rtlpriv->rtlhal.pfirmware\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + } + //printk("<=========rtl8821ae_deinit_sw_vars().\n"); +} + +u32 rtl8812ae_rx_command_packet_handler( + struct ieee80211_hw *hw, + struct rtl_stats status, + struct sk_buff *skb + ) +{ + u32 result = 0; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (status.packet_report_type) { + case NORMAL_RX: + result = 0; + break; + case C2H_PACKET: + rtl8812ae_c2h_packet_handler(hw, skb->data, (u8) skb->len); + result = 1; + RT_TRACE(COMP_RECV, DBG_LOUD, + ("===>rtl8821ae_rx_command_packet_handler(): (u8) skb->len=%d\n\n", skb->len)); + break; + default: + RT_TRACE(COMP_RECV, DBG_LOUD, + ("===>rtl8821ae_rx_command_packet_handler(): No this packet type!!\n")); + break; + } + + return result; +} + + +/* get bt coexist status */ +bool rtl8821ae_get_btc_status(void) +{ + return true; +} + +struct rtl_hal_ops rtl8821ae_hal_ops = { + .init_sw_vars = rtl8821ae_init_sw_vars, + .deinit_sw_vars = rtl8821ae_deinit_sw_vars, + .read_eeprom_info = rtl8821ae_read_eeprom_info, + .interrupt_recognized = rtl8821ae_interrupt_recognized, + .hw_init = rtl8821ae_hw_init, + .hw_disable = rtl8821ae_card_disable, + .hw_suspend = rtl8821ae_suspend, + .hw_resume = rtl8821ae_resume, + .enable_interrupt = rtl8821ae_enable_interrupt, + .disable_interrupt = rtl8821ae_disable_interrupt, + .set_network_type = rtl8821ae_set_network_type, + .set_chk_bssid = rtl8821ae_set_check_bssid, + .set_qos = rtl8821ae_set_qos, + .set_bcn_reg = rtl8821ae_set_beacon_related_registers, + .set_bcn_intv = rtl8821ae_set_beacon_interval, + .update_interrupt_mask = rtl8821ae_update_interrupt_mask, + .get_hw_reg = rtl8821ae_get_hw_reg, + .set_hw_reg = rtl8821ae_set_hw_reg, + .update_rate_tbl = rtl8821ae_update_hal_rate_tbl, + .fill_tx_desc = rtl8821ae_tx_fill_desc, + .fill_tx_cmddesc = rtl8821ae_tx_fill_cmddesc, + .query_rx_desc = rtl8821ae_rx_query_desc, + .set_channel_access = rtl8821ae_update_channel_access_setting, + .radio_onoff_checking = rtl8821ae_gpio_radio_on_off_checking, + .set_bw_mode = rtl8821ae_phy_set_bw_mode, + .switch_channel = rtl8821ae_phy_sw_chnl, + .dm_watchdog = rtl8821ae_dm_watchdog, + .scan_operation_backup = rtl8821ae_phy_scan_operation_backup, + .set_rf_power_state = rtl8821ae_phy_set_rf_power_state, + .led_control = rtl8821ae_led_control, + .set_desc = rtl8821ae_set_desc, + .get_desc = rtl8821ae_get_desc, + .is_tx_desc_closed = rtl8821ae_is_tx_desc_closed, + .tx_polling = rtl8821ae_tx_polling, + .enable_hw_sec = rtl8821ae_enable_hw_security_config, + .set_key = rtl8821ae_set_key, + .init_sw_leds = rtl8821ae_init_sw_leds, + .allow_all_destaddr = rtl8821ae_allow_all_destaddr, + .get_bbreg = rtl8821ae_phy_query_bb_reg, + .set_bbreg = rtl8821ae_phy_set_bb_reg, + .get_rfreg = rtl8821ae_phy_query_rf_reg, + .set_rfreg = rtl8821ae_phy_set_rf_reg, + .c2h_command_handle = rtl_8821ae_c2h_command_handle, + .bt_wifi_media_status_notify = rtl_8821ae_bt_wifi_media_status_notify, + .bt_turn_off_bt_coexist_before_enter_lps = rtl8821ae_dm_bt_turn_off_bt_coexist_before_enter_lps, + .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd, + .get_btc_status = rtl8821ae_get_btc_status, + .rx_command_packet_handler = rtl8812ae_rx_command_packet_handler, +}; + +struct rtl_mod_params rtl8821ae_mod_params = { + .sw_crypto = false, + .b_inactiveps = false,//true, + .b_swctrl_lps = false, + .b_fwctrl_lps = false, //true, +}; + +struct rtl_hal_cfg rtl8821ae_hal_cfg = { + .bar_id = 2, + .write_readback = true, + .name = "rtl8821ae_pci", + .fw_name = "rtlwifi/rtl8821aefw.bin", + .ops = &rtl8821ae_hal_ops, + .mod_params = &rtl8821ae_mod_params, + .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, + .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, + .maps[SYS_CLK] = REG_SYS_CLKR, + .maps[MAC_RCR_AM] = AM, + .maps[MAC_RCR_AB] = AB, + .maps[MAC_RCR_ACRC32] = ACRC32, + .maps[MAC_RCR_ACF] = ACF, + .maps[MAC_RCR_AAP] = AAP, + .maps[MAC_HIMR] = REG_HIMR, + .maps[MAC_HIMRE] = REG_HIMRE, + + + .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, + + .maps[EFUSE_TEST] = REG_EFUSE_TEST, + .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_CLK] = 0, + .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_PWC_EV12V] = PWC_EV12V, + .maps[EFUSE_FEN_ELDR] = FEN_ELDR, + .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, + .maps[EFUSE_ANA8M] = ANA8M, + .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, + .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, + .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, + .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES, + + .maps[RWCAM] = REG_CAMCMD, + .maps[WCAMI] = REG_CAMWRITE, + .maps[RCAMO] = REG_CAMREAD, + .maps[CAMDBG] = REG_CAMDBG, + .maps[SECR] = REG_SECCFG, + .maps[SEC_CAM_NONE] = CAM_NONE, + .maps[SEC_CAM_WEP40] = CAM_WEP40, + .maps[SEC_CAM_TKIP] = CAM_TKIP, + .maps[SEC_CAM_AES] = CAM_AES, + .maps[SEC_CAM_WEP104] = CAM_WEP104, + + .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, + .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, + .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, + .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, + .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, + .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, +/* .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, */ /*need check*/ + .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, + .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, + .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, + .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, + .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, + .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, + .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, +/* .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,*/ +/* .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,*/ + + .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, + .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, + .maps[RTL_IMR_BcnInt] = IMR_BCNDMAINT0, + .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, + .maps[RTL_IMR_RDU] = IMR_RDU, + .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, + .maps[RTL_IMR_BDOK] = IMR_BCNDOK0, + .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, + .maps[RTL_IMR_TBDER] = IMR_TBDER, + .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, + .maps[RTL_IMR_TBDOK] = IMR_TBDOK, + .maps[RTL_IMR_BKDOK] = IMR_BKDOK, + .maps[RTL_IMR_BEDOK] = IMR_BEDOK, + .maps[RTL_IMR_VIDOK] = IMR_VIDOK, + .maps[RTL_IMR_VODOK] = IMR_VODOK, + .maps[RTL_IMR_ROK] = IMR_ROK, + .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER), + + .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M, + .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M, + .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M, + .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M, + .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M, + .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M, + .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M, + .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M, + .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M, + .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M, + .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M, + .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M, + + .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7, + .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, +}; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) +static struct pci_device_id rtl8821ae_pci_ids[] = { + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)}, + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)}, + {}, +}; +#else +static struct pci_device_id rtl8821ae_pci_ids[] __devinitdata = { + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)}, + {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)}, + {}, +}; +#endif + +MODULE_DEVICE_TABLE(pci, rtl8821ae_pci_ids); + +MODULE_AUTHOR("Ping Yan"); +MODULE_AUTHOR("Realtek WlanFAE "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless"); +MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); + +module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444); +module_param_named(ips, rtl8821ae_mod_params.b_inactiveps, bool, 0444); +module_param_named(swlps, rtl8821ae_mod_params.b_swctrl_lps, bool, 0444); +module_param_named(fwlps, rtl8821ae_mod_params.b_fwctrl_lps, bool, 0444); +MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n"); +MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n"); +MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n"); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) +static const SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)) +compat_pci_suspend(rtl_pci_suspend) +compat_pci_resume(rtl_pci_resume) +#endif + +static struct pci_driver rtl8821ae_driver = { + .name = KBUILD_MODNAME, + .id_table = rtl8821ae_pci_ids, + .probe = rtl_pci_probe, + .remove = rtl_pci_disconnect, + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)) + .driver.pm = &rtlwifi_pm_ops, +#elif defined(CONFIG_PM) + .suspend = rtl_pci_suspend_compat, + .resume = rtl_pci_resume_compat, +#endif + +}; + + +extern int rtl_core_module_init(void); +extern void rtl_core_module_exit(void); + +static int __init rtl8821ae_module_init(void) +{ + int ret; + + ret = rtl_core_module_init(); + if (ret) + return ret; + + //printk("==========>rtl8821ae_module_init().\n"); + ret = pci_register_driver(&rtl8821ae_driver); + if (ret) + RT_ASSERT(false, (": No device found\n")); + + return ret; +} + +static void __exit rtl8821ae_module_exit(void) +{ + pci_unregister_driver(&rtl8821ae_driver); + rtl_core_module_exit(); +} + +module_init(rtl8821ae_module_init); +module_exit(rtl8821ae_module_exit); --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/sw.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/sw.h @@ -0,0 +1,39 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_SW_H__ +#define __RTL8821AE_SW_H__ + +int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw); +void rtl8821ae_deinit_sw_vars(struct ieee80211_hw *hw); +void rtl8821ae_init_var_map(struct ieee80211_hw *hw); +bool rtl8821ae_get_btc_status(void); + + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/table.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/table.c @@ -0,0 +1,4002 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Created on 2010/ 5/18, 1:41 + * + * Larry Finger + * + *****************************************************************************/ + +#include "table.h" +u32 RTL8812AE_PHY_REG_ARRAY[] = { + 0x800, 0x8020D010, + 0x804, 0x080112E0, + 0x808, 0x0E028233, + 0x80C, 0x12131113, + 0x810, 0x20101263, + 0x814, 0x020C3D10, + 0x818, 0x03A00385, + 0x820, 0x00000000, + 0x824, 0x00030FE0, + 0x828, 0x00000000, + 0x82C, 0x002083DD, + 0x830, 0x2AAA6C86, + 0x834, 0x0037A706, + 0x838, 0x06C89B44, + 0x83C, 0x0000095B, + 0x840, 0xC0000001, + 0x844, 0x40003CDE, + 0x848, 0x6210FF8B, + 0x84C, 0x6CFDFFB8, + 0x850, 0x28874706, + 0x854, 0x0001520C, + 0x858, 0x8060E000, + 0x85C, 0x74210168, + 0x860, 0x6929C321, + 0x864, 0x79727432, + 0x868, 0x8CA7A314, + 0x86C, 0x338C2878, + 0x870, 0x03333333, + 0x874, 0x31602C2E, + 0x878, 0x00003152, + 0x87C, 0x000FC000, + 0x8A0, 0x00000013, + 0x8A4, 0x7F7F7F7F, + 0x8A8, 0xA202033E, + 0x8AC, 0x0FF0FA0A, + 0x8B0, 0x00000600, + 0x8B4, 0x000FC080, + 0x8B8, 0x6C0057FF, + 0x8BC, 0x4CA520A3, + 0x8C0, 0x27F00020, + 0x8C4, 0x00000000, + 0x8C8, 0x00013169, + 0x8CC, 0x08248492, + 0x8D0, 0x0000B800, + 0x8DC, 0x00000000, + 0x8D4, 0x940008A0, + 0x8D8, 0x290B5612, + 0x8F8, 0x400002C0, + 0x8FC, 0x00000000, + 0xFF0F07D8, 0xABCD, + 0x900, 0x00000700, + 0xFF0F07D0, 0xCDEF, + 0x900, 0x00000700, + 0xCDCDCDCD, 0xCDCD, + 0x900, 0x00000700, + 0xFF0F07D8, 0xDEAD, + 0x90C, 0x00000000, + 0x910, 0x0000FC00, + 0x914, 0x00000404, + 0x918, 0x1C1028C0, + 0x91C, 0x64B11A1C, + 0x920, 0xE0767233, + 0x924, 0x055AA500, + 0x928, 0x00000004, + 0x92C, 0xFFFE0000, + 0x930, 0xFFFFFFFE, + 0x934, 0x001FFFFF, + 0x960, 0x00000000, + 0x964, 0x00000000, + 0x968, 0x00000000, + 0x96C, 0x00000000, + 0x970, 0x801FFFFF, + 0x978, 0x00000000, + 0x97C, 0x00000000, + 0x980, 0x00000000, + 0x984, 0x00000000, + 0x988, 0x00000000, + 0x990, 0x27100000, + 0x994, 0xFFFF0100, + 0x998, 0xFFFFFF5C, + 0x99C, 0xFFFFFFFF, + 0x9A0, 0x000000FF, + 0x9A4, 0x00080080, + 0x9A8, 0x00000000, + 0x9AC, 0x00000000, + 0x9B0, 0x81081008, + 0x9B4, 0x00000000, + 0x9B8, 0x01081008, + 0x9BC, 0x01081008, + 0x9D0, 0x00000000, + 0x9D4, 0x00000000, + 0x9D8, 0x00000000, + 0x9DC, 0x00000000, + 0x9E4, 0x00000002, + 0x9E8, 0x000002D5, + 0xA00, 0x00D047C8, + 0xA04, 0x01FF000C, + 0xA08, 0x8C838300, + 0xA0C, 0x2E7F000F, + 0xA10, 0x9500BB78, + 0xA14, 0x11144028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00900000, + 0xA70, 0x101FFF00, + 0xA74, 0x00000008, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x218075B2, + 0xA84, 0x001F8C80, + 0xB00, 0x03100000, + 0xB04, 0x0000B000, + 0xB08, 0xAE0201EB, + 0xB0C, 0x01003207, + 0xB10, 0x00009807, + 0xB14, 0x01000000, + 0xB18, 0x00000002, + 0xB1C, 0x00000002, + 0xB20, 0x0000001F, + 0xB24, 0x03020100, + 0xB28, 0x07060504, + 0xB2C, 0x0B0A0908, + 0xB30, 0x0F0E0D0C, + 0xB34, 0x13121110, + 0xB38, 0x17161514, + 0xB3C, 0x0000003A, + 0xB40, 0x00000000, + 0xB44, 0x00000000, + 0xB48, 0x13000032, + 0xB4C, 0x48080000, + 0xB50, 0x00000000, + 0xB54, 0x00000000, + 0xB58, 0x00000000, + 0xB5C, 0x00000000, + 0xC00, 0x00000007, + 0xC04, 0x00042020, + 0xC08, 0x80410231, + 0xC0C, 0x00000000, + 0xC10, 0x00000100, + 0xC14, 0x01000000, + 0xC1C, 0x40000003, + 0xC20, 0x12121212, + 0xC24, 0x12121212, + 0xC28, 0x12121212, + 0xC2C, 0x12121212, + 0xC30, 0x12121212, + 0xC34, 0x12121212, + 0xC38, 0x12121212, + 0xC3C, 0x12121212, + 0xC40, 0x12121212, + 0xC44, 0x12121212, + 0xC48, 0x12121212, + 0xC4C, 0x12121212, + 0xC50, 0x00000020, + 0xC54, 0x0008121C, + 0xC58, 0x30000C1C, + 0xC5C, 0x00000058, + 0xC60, 0x34344443, + 0xC64, 0x07003333, + 0xC68, 0x59791979, + 0xC6C, 0x59795979, + 0xC70, 0x19795979, + 0xC74, 0x19795979, + 0xC78, 0x19791979, + 0xC7C, 0x19791979, + 0xC80, 0x19791979, + 0xC84, 0x19791979, + 0xC94, 0x0100005C, + 0xC98, 0x00000000, + 0xC9C, 0x00000000, + 0xCA0, 0x00000029, + 0xCA4, 0x08040201, + 0xCA8, 0x80402010, + 0xFF0F0740, 0xABCD, + 0xCB0, 0x77547717, + 0xFF0F01C0, 0xCDEF, + 0xCB0, 0x77547717, + 0xFF0F02C0, 0xCDEF, + 0xCB0, 0x77547717, + 0xFF0F07D8, 0xCDEF, + 0xCB0, 0x54547710, + 0xFF0F07D0, 0xCDEF, + 0xCB0, 0x54547710, + 0xCDCDCDCD, 0xCDCD, + 0xCB0, 0x77547777, + 0xFF0F0740, 0xDEAD, + 0xCB4, 0x00000077, + 0xCB8, 0x00508242, + 0xE00, 0x00000007, + 0xE04, 0x00042020, + 0xE08, 0x80410231, + 0xE0C, 0x00000000, + 0xE10, 0x00000100, + 0xE14, 0x01000000, + 0xE1C, 0x40000003, + 0xE20, 0x12121212, + 0xE24, 0x12121212, + 0xE28, 0x12121212, + 0xE2C, 0x12121212, + 0xE30, 0x12121212, + 0xE34, 0x12121212, + 0xE38, 0x12121212, + 0xE3C, 0x12121212, + 0xE40, 0x12121212, + 0xE44, 0x12121212, + 0xE48, 0x12121212, + 0xE4C, 0x12121212, + 0xE50, 0x00000020, + 0xE54, 0x0008121C, + 0xE58, 0x30000C1C, + 0xE5C, 0x00000058, + 0xE60, 0x34344443, + 0xE64, 0x07003333, + 0xE68, 0x59791979, + 0xE6C, 0x59795979, + 0xE70, 0x19795979, + 0xE74, 0x19795979, + 0xE78, 0x19791979, + 0xE7C, 0x19791979, + 0xE80, 0x19791979, + 0xE84, 0x19791979, + 0xE94, 0x0100005C, + 0xE98, 0x00000000, + 0xE9C, 0x00000000, + 0xEA0, 0x00000029, + 0xEA4, 0x08040201, + 0xEA8, 0x80402010, + 0xFF0F0740, 0xABCD, + 0xEB0, 0x77547717, + 0xFF0F01C0, 0xCDEF, + 0xEB0, 0x77547717, + 0xFF0F02C0, 0xCDEF, + 0xEB0, 0x77547717, + 0xFF0F07D8, 0xCDEF, + 0xEB0, 0x54547710, + 0xFF0F07D0, 0xCDEF, + 0xEB0, 0x54547710, + 0xCDCDCDCD, 0xCDCD, + 0xEB0, 0x77547777, + 0xFF0F0740, 0xDEAD, + 0xEB4, 0x00000077, + 0xEB8, 0x00508242, +}; + +u32 RTL8821AE_PHY_REG_ARRAY[] = { + 0x800, 0x0020D090, + 0x804, 0x080112E0, + 0x808, 0x0E028211, + 0x80C, 0x92131111, + 0x810, 0x20101261, + 0x814, 0x020C3D10, + 0x818, 0x03A00385, + 0x820, 0x00000000, + 0x824, 0x00030FE0, + 0x828, 0x00000000, + 0x82C, 0x002081DD, + 0x830, 0x2AAA8E24, + 0x834, 0x0037A706, + 0x838, 0x06489B44, + 0x83C, 0x0000095B, + 0x840, 0xC0000001, + 0x844, 0x40003CDE, + 0x848, 0x62103F8B, + 0x84C, 0x6CFDFFB8, + 0x850, 0x28874706, + 0x854, 0x0001520C, + 0x858, 0x8060E000, + 0x85C, 0x74210168, + 0x860, 0x6929C321, + 0x864, 0x79727432, + 0x868, 0x8CA7A314, + 0x86C, 0x888C2878, + 0x870, 0x08888888, + 0x874, 0x31612C2E, + 0x878, 0x00000152, + 0x87C, 0x000FD000, + 0x8A0, 0x00000013, + 0x8A4, 0x7F7F7F7F, + 0x8A8, 0xA2000338, + 0x8AC, 0x0FF0FA0A, + 0x8B4, 0x000FC080, + 0x8B8, 0x6C10D7FF, + 0x8BC, 0x0CA52090, + 0x8C0, 0x1BF00020, + 0x8C4, 0x00000000, + 0x8C8, 0x00013169, + 0x8CC, 0x08248492, + 0x8D4, 0x940008A0, + 0x8D8, 0x290B5612, + 0x8F8, 0x400002C0, + 0x8FC, 0x00000000, + 0x900, 0x00000700, + 0x90C, 0x00000000, + 0x910, 0x0000FC00, + 0x914, 0x00000404, + 0x918, 0x1C1028C0, + 0x91C, 0x64B11A1C, + 0x920, 0xE0767233, + 0x924, 0x055AA500, + 0x928, 0x00000004, + 0x92C, 0xFFFE0000, + 0x930, 0xFFFFFFFE, + 0x934, 0x001FFFFF, + 0x960, 0x00000000, + 0x964, 0x00000000, + 0x968, 0x00000000, + 0x96C, 0x00000000, + 0x970, 0x801FFFFF, + 0x974, 0x000003FF, + 0x978, 0x00000000, + 0x97C, 0x00000000, + 0x980, 0x00000000, + 0x984, 0x00000000, + 0x988, 0x00000000, + 0x990, 0x27100000, + 0x994, 0xFFFF0100, + 0x998, 0xFFFFFF5C, + 0x99C, 0xFFFFFFFF, + 0x9A0, 0x000000FF, + 0x9A4, 0x00480080, + 0x9A8, 0x00000000, + 0x9AC, 0x00000000, + 0x9B0, 0x81081008, + 0x9B4, 0x01081008, + 0x9B8, 0x01081008, + 0x9BC, 0x01081008, + 0x9D0, 0x00000000, + 0x9D4, 0x00000000, + 0x9D8, 0x00000000, + 0x9DC, 0x00000000, + 0x9E0, 0x00005D00, + 0x9E4, 0x00000002, + 0x9E8, 0x00000001, + 0xA00, 0x00D047C8, + 0xA04, 0x01FF000C, + 0xA08, 0x8C8A8300, + 0xA0C, 0x2E68000F, + 0xA10, 0x9500BB78, + 0xA14, 0x11144028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00900000, + 0xA70, 0x101FFF00, + 0xA74, 0x00000008, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x21805490, + 0xA84, 0x001F0000, + 0xB00, 0x03100040, + 0xB04, 0x0000B000, + 0xB08, 0xAE0201EB, + 0xB0C, 0x01003207, + 0xB10, 0x00009807, + 0xB14, 0x01000000, + 0xB18, 0x00000002, + 0xB1C, 0x00000002, + 0xB20, 0x0000001F, + 0xB24, 0x03020100, + 0xB28, 0x07060504, + 0xB2C, 0x0B0A0908, + 0xB30, 0x0F0E0D0C, + 0xB34, 0x13121110, + 0xB38, 0x17161514, + 0xB3C, 0x0000003A, + 0xB40, 0x00000000, + 0xB44, 0x00000000, + 0xB48, 0x13000032, + 0xB4C, 0x48080000, + 0xB50, 0x00000000, + 0xB54, 0x00000000, + 0xB58, 0x00000000, + 0xB5C, 0x00000000, + 0xC00, 0x00000007, + 0xC04, 0x00042020, + 0xC08, 0x80410231, + 0xC0C, 0x00000000, + 0xC10, 0x00000100, + 0xC14, 0x01000000, + 0xC1C, 0x40000003, + 0xC20, 0x2C2C2C2C, + 0xC24, 0x30303030, + 0xC28, 0x30303030, + 0xC2C, 0x2C2C2C2C, + 0xC30, 0x2C2C2C2C, + 0xC34, 0x2C2C2C2C, + 0xC38, 0x2C2C2C2C, + 0xC3C, 0x2A2A2A2A, + 0xC40, 0x2A2A2A2A, + 0xC44, 0x2A2A2A2A, + 0xC48, 0x2A2A2A2A, + 0xC4C, 0x2A2A2A2A, + 0xC50, 0x00000020, + 0xC54, 0x001C1208, + 0xC58, 0x30000C1C, + 0xC5C, 0x00000058, + 0xC60, 0x34344443, + 0xC64, 0x07003333, + 0xC68, 0x19791979, + 0xC6C, 0x19791979, + 0xC70, 0x19791979, + 0xC74, 0x19791979, + 0xC78, 0x19791979, + 0xC7C, 0x19791979, + 0xC80, 0x19791979, + 0xC84, 0x19791979, + 0xC94, 0x0100005C, + 0xC98, 0x00000000, + 0xC9C, 0x00000000, + 0xCA0, 0x00000029, + 0xCA4, 0x08040201, + 0xCA8, 0x80402010, + 0xCB0, 0x77775747, + 0xCB4, 0x10000077, + 0xCB8, 0x00508240, +}; + +u32 RTL8812AE_PHY_REG_ARRAY_PG[] = { + 0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840, + 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444, + 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323638, + 0, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444, + 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303236, + 0, 0, 1, 0x00000c34, 0xffffffff, 0x38404242, + 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283034, + 0, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444, + 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303236, + 0, 0, 0, 0x00000c44, 0xffffffff, 0x42422426, + 0, 0, 1, 0x00000c48, 0xffffffff, 0x30343840, + 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, + 0, 1, 0, 0x00000e20, 0xffffffff, 0x34363840, + 0, 1, 0, 0x00000e24, 0xffffffff, 0x42424444, + 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323638, + 0, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444, + 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303236, + 0, 1, 1, 0x00000e34, 0xffffffff, 0x38404242, + 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283034, + 0, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444, + 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303236, + 0, 1, 0, 0x00000e44, 0xffffffff, 0x42422426, + 0, 1, 1, 0x00000e48, 0xffffffff, 0x30343840, + 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, + 1, 0, 0, 0x00000c24, 0xffffffff, 0x42424444, + 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323640, + 1, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444, + 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303236, + 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404242, + 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283034, + 1, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444, + 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303236, + 1, 0, 0, 0x00000c44, 0xffffffff, 0x42422426, + 1, 0, 1, 0x00000c48, 0xffffffff, 0x30343840, + 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, + 1, 1, 0, 0x00000e24, 0xffffffff, 0x42424444, + 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323640, + 1, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444, + 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303236, + 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404242, + 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283034, + 1, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444, + 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303236, + 1, 1, 0, 0x00000e44, 0xffffffff, 0x42422426, + 1, 1, 1, 0x00000e48, 0xffffffff, 0x30343840, + 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628 +}; + +u32 RTL8821AE_PHY_REG_ARRAY_PG[] = { + 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, + 0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838, + 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, + 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363838, + 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, + 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636, + 0, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, + 0, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022, + 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343636, + 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, + 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343636, + 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, + 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636, + 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, + 1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022 +}; + +/* it seems not used +u8 *RTL8821AE_TXPWR_LMT_ARRAY[] = { + "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "02", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "02", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "02", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "03", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "03", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "03", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "04", "34", + "ETSI", "2.4G", "20M", "CCK", "1T", "04", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "04", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "05", "34", + "ETSI", "2.4G", "20M", "CCK", "1T", "05", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "05", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "06", "34", + "ETSI", "2.4G", "20M", "CCK", "1T", "06", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "06", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "07", "34", + "ETSI", "2.4G", "20M", "CCK", "1T", "07", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "07", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "08", "34", + "ETSI", "2.4G", "20M", "CCK", "1T", "08", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "08", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "09", "34", + "ETSI", "2.4G", "20M", "CCK", "1T", "09", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "09", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "10", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "10", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "10", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "11", "32", + "ETSI", "2.4G", "20M", "CCK", "1T", "11", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "11", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "12", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "12", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "12", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "13", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "13", "32", + "MKK", "2.4G", "20M", "CCK", "1T", "13", "32", + "FCC", "2.4G", "20M", "CCK", "1T", "14", "63", + "ETSI", "2.4G", "20M", "CCK", "1T", "14", "63", + "MKK", "2.4G", "20M", "CCK", "1T", "14", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "01", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "01", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "01", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "02", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "02", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "02", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "03", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "03", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "03", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "04", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "04", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "04", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "05", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "05", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "05", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "06", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "06", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "06", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "07", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "07", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "07", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "08", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "08", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "08", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "09", "32", + "ETSI", "2.4G", "20M", "OFDM", "1T", "09", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "09", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "10", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "10", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "10", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "11", "30", + "ETSI", "2.4G", "20M", "OFDM", "1T", "11", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "11", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "12", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "12", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "12", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "13", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "13", "32", + "MKK", "2.4G", "20M", "OFDM", "1T", "13", "32", + "FCC", "2.4G", "20M", "OFDM", "1T", "14", "63", + "ETSI", "2.4G", "20M", "OFDM", "1T", "14", "63", + "MKK", "2.4G", "20M", "OFDM", "1T", "14", "63", + "FCC", "2.4G", "20M", "HT", "1T", "01", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "01", "32", + "MKK", "2.4G", "20M", "HT", "1T", "01", "32", + "FCC", "2.4G", "20M", "HT", "1T", "02", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "02", "32", + "MKK", "2.4G", "20M", "HT", "1T", "02", "32", + "FCC", "2.4G", "20M", "HT", "1T", "03", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "03", "32", + "MKK", "2.4G", "20M", "HT", "1T", "03", "32", + "FCC", "2.4G", "20M", "HT", "1T", "04", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "04", "32", + "MKK", "2.4G", "20M", "HT", "1T", "04", "32", + "FCC", "2.4G", "20M", "HT", "1T", "05", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "05", "32", + "MKK", "2.4G", "20M", "HT", "1T", "05", "32", + "FCC", "2.4G", "20M", "HT", "1T", "06", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "06", "32", + "MKK", "2.4G", "20M", "HT", "1T", "06", "32", + "FCC", "2.4G", "20M", "HT", "1T", "07", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "07", "32", + "MKK", "2.4G", "20M", "HT", "1T", "07", "32", + "FCC", "2.4G", "20M", "HT", "1T", "08", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "08", "32", + "MKK", "2.4G", "20M", "HT", "1T", "08", "32", + "FCC", "2.4G", "20M", "HT", "1T", "09", "32", + "ETSI", "2.4G", "20M", "HT", "1T", "09", "32", + "MKK", "2.4G", "20M", "HT", "1T", "09", "32", + "FCC", "2.4G", "20M", "HT", "1T", "10", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "10", "32", + "MKK", "2.4G", "20M", "HT", "1T", "10", "32", + "FCC", "2.4G", "20M", "HT", "1T", "11", "26", + "ETSI", "2.4G", "20M", "HT", "1T", "11", "32", + "MKK", "2.4G", "20M", "HT", "1T", "11", "32", + "FCC", "2.4G", "20M", "HT", "1T", "12", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "12", "32", + "MKK", "2.4G", "20M", "HT", "1T", "12", "32", + "FCC", "2.4G", "20M", "HT", "1T", "13", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "13", "32", + "MKK", "2.4G", "20M", "HT", "1T", "13", "32", + "FCC", "2.4G", "20M", "HT", "1T", "14", "63", + "ETSI", "2.4G", "20M", "HT", "1T", "14", "63", + "MKK", "2.4G", "20M", "HT", "1T", "14", "63", + "FCC", "2.4G", "20M", "HT", "2T", "01", "30", + "ETSI", "2.4G", "20M", "HT", "2T", "01", "32", + "MKK", "2.4G", "20M", "HT", "2T", "01", "32", + "FCC", "2.4G", "20M", "HT", "2T", "02", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "02", "32", + "MKK", "2.4G", "20M", "HT", "2T", "02", "32", + "FCC", "2.4G", "20M", "HT", "2T", "03", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "03", "32", + "MKK", "2.4G", "20M", "HT", "2T", "03", "32", + "FCC", "2.4G", "20M", "HT", "2T", "04", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "04", "32", + "MKK", "2.4G", "20M", "HT", "2T", "04", "32", + "FCC", "2.4G", "20M", "HT", "2T", "05", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "05", "32", + "MKK", "2.4G", "20M", "HT", "2T", "05", "32", + "FCC", "2.4G", "20M", "HT", "2T", "06", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "06", "32", + "MKK", "2.4G", "20M", "HT", "2T", "06", "32", + "FCC", "2.4G", "20M", "HT", "2T", "07", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "07", "32", + "MKK", "2.4G", "20M", "HT", "2T", "07", "32", + "FCC", "2.4G", "20M", "HT", "2T", "08", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "08", "32", + "MKK", "2.4G", "20M", "HT", "2T", "08", "32", + "FCC", "2.4G", "20M", "HT", "2T", "09", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "09", "32", + "MKK", "2.4G", "20M", "HT", "2T", "09", "32", + "FCC", "2.4G", "20M", "HT", "2T", "10", "32", + "ETSI", "2.4G", "20M", "HT", "2T", "10", "32", + "MKK", "2.4G", "20M", "HT", "2T", "10", "32", + "FCC", "2.4G", "20M", "HT", "2T", "11", "30", + "ETSI", "2.4G", "20M", "HT", "2T", "11", "32", + "MKK", "2.4G", "20M", "HT", "2T", "11", "32", + "FCC", "2.4G", "20M", "HT", "2T", "12", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "12", "32", + "MKK", "2.4G", "20M", "HT", "2T", "12", "32", + "FCC", "2.4G", "20M", "HT", "2T", "13", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "13", "32", + "MKK", "2.4G", "20M", "HT", "2T", "13", "32", + "FCC", "2.4G", "20M", "HT", "2T", "14", "63", + "ETSI", "2.4G", "20M", "HT", "2T", "14", "63", + "MKK", "2.4G", "20M", "HT", "2T", "14", "63", + "FCC", "2.4G", "40M", "HT", "1T", "01", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "01", "63", + "MKK", "2.4G", "40M", "HT", "1T", "01", "63", + "FCC", "2.4G", "40M", "HT", "1T", "02", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "02", "63", + "MKK", "2.4G", "40M", "HT", "1T", "02", "63", + "FCC", "2.4G", "40M", "HT", "1T", "03", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "03", "32", + "MKK", "2.4G", "40M", "HT", "1T", "03", "32", + "FCC", "2.4G", "40M", "HT", "1T", "04", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "04", "32", + "MKK", "2.4G", "40M", "HT", "1T", "04", "32", + "FCC", "2.4G", "40M", "HT", "1T", "05", "32", + "ETSI", "2.4G", "40M", "HT", "1T", "05", "32", + "MKK", "2.4G", "40M", "HT", "1T", "05", "32", + "FCC", "2.4G", "40M", "HT", "1T", "06", "32", + "ETSI", "2.4G", "40M", "HT", "1T", "06", "32", + "MKK", "2.4G", "40M", "HT", "1T", "06", "32", + "FCC", "2.4G", "40M", "HT", "1T", "07", "32", + "ETSI", "2.4G", "40M", "HT", "1T", "07", "32", + "MKK", "2.4G", "40M", "HT", "1T", "07", "32", + "FCC", "2.4G", "40M", "HT", "1T", "08", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "08", "32", + "MKK", "2.4G", "40M", "HT", "1T", "08", "32", + "FCC", "2.4G", "40M", "HT", "1T", "09", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "09", "32", + "MKK", "2.4G", "40M", "HT", "1T", "09", "32", + "FCC", "2.4G", "40M", "HT", "1T", "10", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "10", "32", + "MKK", "2.4G", "40M", "HT", "1T", "10", "32", + "FCC", "2.4G", "40M", "HT", "1T", "11", "26", + "ETSI", "2.4G", "40M", "HT", "1T", "11", "32", + "MKK", "2.4G", "40M", "HT", "1T", "11", "32", + "FCC", "2.4G", "40M", "HT", "1T", "12", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "12", "32", + "MKK", "2.4G", "40M", "HT", "1T", "12", "32", + "FCC", "2.4G", "40M", "HT", "1T", "13", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "13", "32", + "MKK", "2.4G", "40M", "HT", "1T", "13", "32", + "FCC", "2.4G", "40M", "HT", "1T", "14", "63", + "ETSI", "2.4G", "40M", "HT", "1T", "14", "63", + "MKK", "2.4G", "40M", "HT", "1T", "14", "63", + "FCC", "2.4G", "40M", "HT", "2T", "01", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "01", "63", + "MKK", "2.4G", "40M", "HT", "2T", "01", "63", + "FCC", "2.4G", "40M", "HT", "2T", "02", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "02", "63", + "MKK", "2.4G", "40M", "HT", "2T", "02", "63", + "FCC", "2.4G", "40M", "HT", "2T", "03", "30", + "ETSI", "2.4G", "40M", "HT", "2T", "03", "30", + "MKK", "2.4G", "40M", "HT", "2T", "03", "30", + "FCC", "2.4G", "40M", "HT", "2T", "04", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "04", "30", + "MKK", "2.4G", "40M", "HT", "2T", "04", "30", + "FCC", "2.4G", "40M", "HT", "2T", "05", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "05", "30", + "MKK", "2.4G", "40M", "HT", "2T", "05", "30", + "FCC", "2.4G", "40M", "HT", "2T", "06", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "06", "30", + "MKK", "2.4G", "40M", "HT", "2T", "06", "30", + "FCC", "2.4G", "40M", "HT", "2T", "07", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "07", "30", + "MKK", "2.4G", "40M", "HT", "2T", "07", "30", + "FCC", "2.4G", "40M", "HT", "2T", "08", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "08", "30", + "MKK", "2.4G", "40M", "HT", "2T", "08", "30", + "FCC", "2.4G", "40M", "HT", "2T", "09", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "09", "30", + "MKK", "2.4G", "40M", "HT", "2T", "09", "30", + "FCC", "2.4G", "40M", "HT", "2T", "10", "32", + "ETSI", "2.4G", "40M", "HT", "2T", "10", "30", + "MKK", "2.4G", "40M", "HT", "2T", "10", "30", + "FCC", "2.4G", "40M", "HT", "2T", "11", "30", + "ETSI", "2.4G", "40M", "HT", "2T", "11", "30", + "MKK", "2.4G", "40M", "HT", "2T", "11", "30", + "FCC", "2.4G", "40M", "HT", "2T", "12", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "12", "32", + "MKK", "2.4G", "40M", "HT", "2T", "12", "32", + "FCC", "2.4G", "40M", "HT", "2T", "13", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "13", "32", + "MKK", "2.4G", "40M", "HT", "2T", "13", "32", + "FCC", "2.4G", "40M", "HT", "2T", "14", "63", + "ETSI", "2.4G", "40M", "HT", "2T", "14", "63", + "MKK", "2.4G", "40M", "HT", "2T", "14", "63", + "FCC", "5G", "20M", "OFDM", "1T", "36", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "36", "30", + "MKK", "5G", "20M", "OFDM", "1T", "36", "30", + "FCC", "5G", "20M", "OFDM", "1T", "40", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "40", "30", + "MKK", "5G", "20M", "OFDM", "1T", "40", "30", + "FCC", "5G", "20M", "OFDM", "1T", "44", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "44", "30", + "MKK", "5G", "20M", "OFDM", "1T", "44", "30", + "FCC", "5G", "20M", "OFDM", "1T", "48", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "48", "30", + "MKK", "5G", "20M", "OFDM", "1T", "48", "30", + "FCC", "5G", "20M", "OFDM", "1T", "52", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "52", "30", + "MKK", "5G", "20M", "OFDM", "1T", "52", "30", + "FCC", "5G", "20M", "OFDM", "1T", "56", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "56", "30", + "MKK", "5G", "20M", "OFDM", "1T", "56", "30", + "FCC", "5G", "20M", "OFDM", "1T", "60", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "60", "30", + "MKK", "5G", "20M", "OFDM", "1T", "60", "30", + "FCC", "5G", "20M", "OFDM", "1T", "64", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "64", "30", + "MKK", "5G", "20M", "OFDM", "1T", "64", "30", + "FCC", "5G", "20M", "OFDM", "1T", "100", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "100", "30", + "MKK", "5G", "20M", "OFDM", "1T", "100", "30", + "FCC", "5G", "20M", "OFDM", "1T", "114", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "114", "30", + "MKK", "5G", "20M", "OFDM", "1T", "114", "30", + "FCC", "5G", "20M", "OFDM", "1T", "108", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "108", "30", + "MKK", "5G", "20M", "OFDM", "1T", "108", "30", + "FCC", "5G", "20M", "OFDM", "1T", "112", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "112", "30", + "MKK", "5G", "20M", "OFDM", "1T", "112", "30", + "FCC", "5G", "20M", "OFDM", "1T", "116", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "116", "30", + "MKK", "5G", "20M", "OFDM", "1T", "116", "30", + "FCC", "5G", "20M", "OFDM", "1T", "120", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "120", "30", + "MKK", "5G", "20M", "OFDM", "1T", "120", "30", + "FCC", "5G", "20M", "OFDM", "1T", "124", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "124", "30", + "MKK", "5G", "20M", "OFDM", "1T", "124", "30", + "FCC", "5G", "20M", "OFDM", "1T", "128", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "128", "30", + "MKK", "5G", "20M", "OFDM", "1T", "128", "30", + "FCC", "5G", "20M", "OFDM", "1T", "132", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "132", "30", + "MKK", "5G", "20M", "OFDM", "1T", "132", "30", + "FCC", "5G", "20M", "OFDM", "1T", "136", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "136", "30", + "MKK", "5G", "20M", "OFDM", "1T", "136", "30", + "FCC", "5G", "20M", "OFDM", "1T", "140", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "140", "30", + "MKK", "5G", "20M", "OFDM", "1T", "140", "30", + "FCC", "5G", "20M", "OFDM", "1T", "149", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "149", "30", + "MKK", "5G", "20M", "OFDM", "1T", "149", "63", + "FCC", "5G", "20M", "OFDM", "1T", "153", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "153", "30", + "MKK", "5G", "20M", "OFDM", "1T", "153", "63", + "FCC", "5G", "20M", "OFDM", "1T", "157", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "157", "30", + "MKK", "5G", "20M", "OFDM", "1T", "157", "63", + "FCC", "5G", "20M", "OFDM", "1T", "161", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "161", "30", + "MKK", "5G", "20M", "OFDM", "1T", "161", "63", + "FCC", "5G", "20M", "OFDM", "1T", "165", "30", + "ETSI", "5G", "20M", "OFDM", "1T", "165", "30", + "MKK", "5G", "20M", "OFDM", "1T", "165", "63", + "FCC", "5G", "20M", "HT", "1T", "36", "30", + "ETSI", "5G", "20M", "HT", "1T", "36", "30", + "MKK", "5G", "20M", "HT", "1T", "36", "30", + "FCC", "5G", "20M", "HT", "1T", "40", "30", + "ETSI", "5G", "20M", "HT", "1T", "40", "30", + "MKK", "5G", "20M", "HT", "1T", "40", "30", + "FCC", "5G", "20M", "HT", "1T", "44", "30", + "ETSI", "5G", "20M", "HT", "1T", "44", "30", + "MKK", "5G", "20M", "HT", "1T", "44", "30", + "FCC", "5G", "20M", "HT", "1T", "48", "30", + "ETSI", "5G", "20M", "HT", "1T", "48", "30", + "MKK", "5G", "20M", "HT", "1T", "48", "30", + "FCC", "5G", "20M", "HT", "1T", "52", "30", + "ETSI", "5G", "20M", "HT", "1T", "52", "30", + "MKK", "5G", "20M", "HT", "1T", "52", "30", + "FCC", "5G", "20M", "HT", "1T", "56", "30", + "ETSI", "5G", "20M", "HT", "1T", "56", "30", + "MKK", "5G", "20M", "HT", "1T", "56", "30", + "FCC", "5G", "20M", "HT", "1T", "60", "30", + "ETSI", "5G", "20M", "HT", "1T", "60", "30", + "MKK", "5G", "20M", "HT", "1T", "60", "30", + "FCC", "5G", "20M", "HT", "1T", "64", "30", + "ETSI", "5G", "20M", "HT", "1T", "64", "30", + "MKK", "5G", "20M", "HT", "1T", "64", "30", + "FCC", "5G", "20M", "HT", "1T", "100", "30", + "ETSI", "5G", "20M", "HT", "1T", "100", "30", + "MKK", "5G", "20M", "HT", "1T", "100", "30", + "FCC", "5G", "20M", "HT", "1T", "114", "30", + "ETSI", "5G", "20M", "HT", "1T", "114", "30", + "MKK", "5G", "20M", "HT", "1T", "114", "30", + "FCC", "5G", "20M", "HT", "1T", "108", "30", + "ETSI", "5G", "20M", "HT", "1T", "108", "30", + "MKK", "5G", "20M", "HT", "1T", "108", "30", + "FCC", "5G", "20M", "HT", "1T", "112", "30", + "ETSI", "5G", "20M", "HT", "1T", "112", "30", + "MKK", "5G", "20M", "HT", "1T", "112", "30", + "FCC", "5G", "20M", "HT", "1T", "116", "30", + "ETSI", "5G", "20M", "HT", "1T", "116", "30", + "MKK", "5G", "20M", "HT", "1T", "116", "30", + "FCC", "5G", "20M", "HT", "1T", "120", "30", + "ETSI", "5G", "20M", "HT", "1T", "120", "30", + "MKK", "5G", "20M", "HT", "1T", "120", "30", + "FCC", "5G", "20M", "HT", "1T", "124", "30", + "ETSI", "5G", "20M", "HT", "1T", "124", "30", + "MKK", "5G", "20M", "HT", "1T", "124", "30", + "FCC", "5G", "20M", "HT", "1T", "128", "30", + "ETSI", "5G", "20M", "HT", "1T", "128", "30", + "MKK", "5G", "20M", "HT", "1T", "128", "30", + "FCC", "5G", "20M", "HT", "1T", "132", "30", + "ETSI", "5G", "20M", "HT", "1T", "132", "30", + "MKK", "5G", "20M", "HT", "1T", "132", "30", + "FCC", "5G", "20M", "HT", "1T", "136", "30", + "ETSI", "5G", "20M", "HT", "1T", "136", "30", + "MKK", "5G", "20M", "HT", "1T", "136", "30", + "FCC", "5G", "20M", "HT", "1T", "140", "30", + "ETSI", "5G", "20M", "HT", "1T", "140", "30", + "MKK", "5G", "20M", "HT", "1T", "140", "30", + "FCC", "5G", "20M", "HT", "1T", "149", "30", + "ETSI", "5G", "20M", "HT", "1T", "149", "30", + "MKK", "5G", "20M", "HT", "1T", "149", "63", + "FCC", "5G", "20M", "HT", "1T", "153", "30", + "ETSI", "5G", "20M", "HT", "1T", "153", "30", + "MKK", "5G", "20M", "HT", "1T", "153", "63", + "FCC", "5G", "20M", "HT", "1T", "157", "30", + "ETSI", "5G", "20M", "HT", "1T", "157", "30", + "MKK", "5G", "20M", "HT", "1T", "157", "63", + "FCC", "5G", "20M", "HT", "1T", "161", "30", + "ETSI", "5G", "20M", "HT", "1T", "161", "30", + "MKK", "5G", "20M", "HT", "1T", "161", "63", + "FCC", "5G", "20M", "HT", "1T", "165", "30", + "ETSI", "5G", "20M", "HT", "1T", "165", "30", + "MKK", "5G", "20M", "HT", "1T", "165", "63", + "FCC", "5G", "20M", "HT", "2T", "36", "28", + "ETSI", "5G", "20M", "HT", "2T", "36", "30", + "MKK", "5G", "20M", "HT", "2T", "36", "30", + "FCC", "5G", "20M", "HT", "2T", "40", "28", + "ETSI", "5G", "20M", "HT", "2T", "40", "30", + "MKK", "5G", "20M", "HT", "2T", "40", "30", + "FCC", "5G", "20M", "HT", "2T", "44", "28", + "ETSI", "5G", "20M", "HT", "2T", "44", "30", + "MKK", "5G", "20M", "HT", "2T", "44", "30", + "FCC", "5G", "20M", "HT", "2T", "48", "28", + "ETSI", "5G", "20M", "HT", "2T", "48", "30", + "MKK", "5G", "20M", "HT", "2T", "48", "30", + "FCC", "5G", "20M", "HT", "2T", "52", "34", + "ETSI", "5G", "20M", "HT", "2T", "52", "30", + "MKK", "5G", "20M", "HT", "2T", "52", "30", + "FCC", "5G", "20M", "HT", "2T", "56", "32", + "ETSI", "5G", "20M", "HT", "2T", "56", "30", + "MKK", "5G", "20M", "HT", "2T", "56", "30", + "FCC", "5G", "20M", "HT", "2T", "60", "30", + "ETSI", "5G", "20M", "HT", "2T", "60", "30", + "MKK", "5G", "20M", "HT", "2T", "60", "30", + "FCC", "5G", "20M", "HT", "2T", "64", "26", + "ETSI", "5G", "20M", "HT", "2T", "64", "30", + "MKK", "5G", "20M", "HT", "2T", "64", "30", + "FCC", "5G", "20M", "HT", "2T", "100", "28", + "ETSI", "5G", "20M", "HT", "2T", "100", "30", + "MKK", "5G", "20M", "HT", "2T", "100", "30", + "FCC", "5G", "20M", "HT", "2T", "114", "28", + "ETSI", "5G", "20M", "HT", "2T", "114", "30", + "MKK", "5G", "20M", "HT", "2T", "114", "30", + "FCC", "5G", "20M", "HT", "2T", "108", "30", + "ETSI", "5G", "20M", "HT", "2T", "108", "30", + "MKK", "5G", "20M", "HT", "2T", "108", "30", + "FCC", "5G", "20M", "HT", "2T", "112", "32", + "ETSI", "5G", "20M", "HT", "2T", "112", "30", + "MKK", "5G", "20M", "HT", "2T", "112", "30", + "FCC", "5G", "20M", "HT", "2T", "116", "32", + "ETSI", "5G", "20M", "HT", "2T", "116", "30", + "MKK", "5G", "20M", "HT", "2T", "116", "30", + "FCC", "5G", "20M", "HT", "2T", "120", "34", + "ETSI", "5G", "20M", "HT", "2T", "120", "30", + "MKK", "5G", "20M", "HT", "2T", "120", "30", + "FCC", "5G", "20M", "HT", "2T", "124", "32", + "ETSI", "5G", "20M", "HT", "2T", "124", "30", + "MKK", "5G", "20M", "HT", "2T", "124", "30", + "FCC", "5G", "20M", "HT", "2T", "128", "30", + "ETSI", "5G", "20M", "HT", "2T", "128", "30", + "MKK", "5G", "20M", "HT", "2T", "128", "30", + "FCC", "5G", "20M", "HT", "2T", "132", "28", + "ETSI", "5G", "20M", "HT", "2T", "132", "30", + "MKK", "5G", "20M", "HT", "2T", "132", "30", + "FCC", "5G", "20M", "HT", "2T", "136", "28", + "ETSI", "5G", "20M", "HT", "2T", "136", "30", + "MKK", "5G", "20M", "HT", "2T", "136", "30", + "FCC", "5G", "20M", "HT", "2T", "140", "26", + "ETSI", "5G", "20M", "HT", "2T", "140", "30", + "MKK", "5G", "20M", "HT", "2T", "140", "30", + "FCC", "5G", "20M", "HT", "2T", "149", "34", + "ETSI", "5G", "20M", "HT", "2T", "149", "30", + "MKK", "5G", "20M", "HT", "2T", "149", "63", + "FCC", "5G", "20M", "HT", "2T", "153", "34", + "ETSI", "5G", "20M", "HT", "2T", "153", "30", + "MKK", "5G", "20M", "HT", "2T", "153", "63", + "FCC", "5G", "20M", "HT", "2T", "157", "34", + "ETSI", "5G", "20M", "HT", "2T", "157", "30", + "MKK", "5G", "20M", "HT", "2T", "157", "63", + "FCC", "5G", "20M", "HT", "2T", "161", "34", + "ETSI", "5G", "20M", "HT", "2T", "161", "30", + "MKK", "5G", "20M", "HT", "2T", "161", "63", + "FCC", "5G", "20M", "HT", "2T", "165", "34", + "ETSI", "5G", "20M", "HT", "2T", "165", "30", + "MKK", "5G", "20M", "HT", "2T", "165", "63", + "FCC", "5G", "40M", "HT", "1T", "38", "26", + "ETSI", "5G", "40M", "HT", "1T", "38", "30", + "MKK", "5G", "40M", "HT", "1T", "38", "30", + "FCC", "5G", "40M", "HT", "1T", "46", "30", + "ETSI", "5G", "40M", "HT", "1T", "46", "30", + "MKK", "5G", "40M", "HT", "1T", "46", "30", + "FCC", "5G", "40M", "HT", "1T", "54", "30", + "ETSI", "5G", "40M", "HT", "1T", "54", "30", + "MKK", "5G", "40M", "HT", "1T", "54", "30", + "FCC", "5G", "40M", "HT", "1T", "62", "26", + "ETSI", "5G", "40M", "HT", "1T", "62", "30", + "MKK", "5G", "40M", "HT", "1T", "62", "30", + "FCC", "5G", "40M", "HT", "1T", "102", "24", + "ETSI", "5G", "40M", "HT", "1T", "102", "30", + "MKK", "5G", "40M", "HT", "1T", "102", "30", + "FCC", "5G", "40M", "HT", "1T", "110", "30", + "ETSI", "5G", "40M", "HT", "1T", "110", "30", + "MKK", "5G", "40M", "HT", "1T", "110", "30", + "FCC", "5G", "40M", "HT", "1T", "118", "30", + "ETSI", "5G", "40M", "HT", "1T", "118", "30", + "MKK", "5G", "40M", "HT", "1T", "118", "30", + "FCC", "5G", "40M", "HT", "1T", "126", "30", + "ETSI", "5G", "40M", "HT", "1T", "126", "30", + "MKK", "5G", "40M", "HT", "1T", "126", "30", + "FCC", "5G", "40M", "HT", "1T", "134", "30", + "ETSI", "5G", "40M", "HT", "1T", "134", "30", + "MKK", "5G", "40M", "HT", "1T", "134", "30", + "FCC", "5G", "40M", "HT", "1T", "151", "30", + "ETSI", "5G", "40M", "HT", "1T", "151", "30", + "MKK", "5G", "40M", "HT", "1T", "151", "63", + "FCC", "5G", "40M", "HT", "1T", "159", "30", + "ETSI", "5G", "40M", "HT", "1T", "159", "30", + "MKK", "5G", "40M", "HT", "1T", "159", "63", + "FCC", "5G", "40M", "HT", "2T", "38", "28", + "ETSI", "5G", "40M", "HT", "2T", "38", "30", + "MKK", "5G", "40M", "HT", "2T", "38", "30", + "FCC", "5G", "40M", "HT", "2T", "46", "28", + "ETSI", "5G", "40M", "HT", "2T", "46", "30", + "MKK", "5G", "40M", "HT", "2T", "46", "30", + "FCC", "5G", "40M", "HT", "2T", "54", "30", + "ETSI", "5G", "40M", "HT", "2T", "54", "30", + "MKK", "5G", "40M", "HT", "2T", "54", "30", + "FCC", "5G", "40M", "HT", "2T", "62", "30", + "ETSI", "5G", "40M", "HT", "2T", "62", "30", + "MKK", "5G", "40M", "HT", "2T", "62", "30", + "FCC", "5G", "40M", "HT", "2T", "102", "26", + "ETSI", "5G", "40M", "HT", "2T", "102", "30", + "MKK", "5G", "40M", "HT", "2T", "102", "30", + "FCC", "5G", "40M", "HT", "2T", "110", "30", + "ETSI", "5G", "40M", "HT", "2T", "110", "30", + "MKK", "5G", "40M", "HT", "2T", "110", "30", + "FCC", "5G", "40M", "HT", "2T", "118", "34", + "ETSI", "5G", "40M", "HT", "2T", "118", "30", + "MKK", "5G", "40M", "HT", "2T", "118", "30", + "FCC", "5G", "40M", "HT", "2T", "126", "32", + "ETSI", "5G", "40M", "HT", "2T", "126", "30", + "MKK", "5G", "40M", "HT", "2T", "126", "30", + "FCC", "5G", "40M", "HT", "2T", "134", "30", + "ETSI", "5G", "40M", "HT", "2T", "134", "30", + "MKK", "5G", "40M", "HT", "2T", "134", "30", + "FCC", "5G", "40M", "HT", "2T", "151", "34", + "ETSI", "5G", "40M", "HT", "2T", "151", "30", + "MKK", "5G", "40M", "HT", "2T", "151", "63", + "FCC", "5G", "40M", "HT", "2T", "159", "34", + "ETSI", "5G", "40M", "HT", "2T", "159", "30", + "MKK", "5G", "40M", "HT", "2T", "159", "63", + "FCC", "5G", "80M", "VHT", "1T", "42", "22", + "ETSI", "5G", "80M", "VHT", "1T", "42", "30", + "MKK", "5G", "80M", "VHT", "1T", "42", "30", + "FCC", "5G", "80M", "VHT", "1T", "58", "20", + "ETSI", "5G", "80M", "VHT", "1T", "58", "30", + "MKK", "5G", "80M", "VHT", "1T", "58", "30", + "FCC", "5G", "80M", "VHT", "1T", "106", "20", + "ETSI", "5G", "80M", "VHT", "1T", "106", "30", + "MKK", "5G", "80M", "VHT", "1T", "106", "30", + "FCC", "5G", "80M", "VHT", "1T", "122", "28", + "ETSI", "5G", "80M", "VHT", "1T", "122", "30", + "MKK", "5G", "80M", "VHT", "1T", "122", "30", + "FCC", "5G", "80M", "VHT", "1T", "155", "30", + "ETSI", "5G", "80M", "VHT", "1T", "155", "30", + "MKK", "5G", "80M", "VHT", "1T", "155", "63", + "FCC", "5G", "80M", "VHT", "2T", "42", "28", + "ETSI", "5G", "80M", "VHT", "2T", "42", "30", + "MKK", "5G", "80M", "VHT", "2T", "42", "30", + "FCC", "5G", "80M", "VHT", "2T", "58", "26", + "ETSI", "5G", "80M", "VHT", "2T", "58", "30", + "MKK", "5G", "80M", "VHT", "2T", "58", "30", + "FCC", "5G", "80M", "VHT", "2T", "106", "28", + "ETSI", "5G", "80M", "VHT", "2T", "106", "30", + "MKK", "5G", "80M", "VHT", "2T", "106", "30", + "FCC", "5G", "80M", "VHT", "2T", "122", "32", + "ETSI", "5G", "80M", "VHT", "2T", "122", "30", + "MKK", "5G", "80M", "VHT", "2T", "122", "30", + "FCC", "5G", "80M", "VHT", "2T", "155", "34", + "ETSI", "5G", "80M", "VHT", "2T", "155", "30", + "MKK", "5G", "80M", "VHT", "2T", "155", "63" +};*/ + +u32 RTL8812AE_RADIOA_ARRAY[] = { + 0x000, 0x00010000, + 0x018, 0x0001712A, + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x01E, 0x00080000, + 0x089, 0x00000080, + 0xFF0F0740, 0xABCD, + 0x086, 0x00014B38, + 0xFF0F02C0, 0xCDEF, + 0x086, 0x00014B38, + 0xFF0F01C0, 0xCDEF, + 0x086, 0x00014B38, + 0xFF0F07D8, 0xCDEF, + 0x086, 0x00014B3A, + 0xFF0F07D0, 0xCDEF, + 0x086, 0x00014B3A, + 0xCDCDCDCD, 0xCDCD, + 0x086, 0x00014B38, + 0xFF0F0740, 0xDEAD, + 0x0B1, 0x0001FC1A, + 0x0B3, 0x000F0810, + 0x0B4, 0x0001A78D, + 0x0BA, 0x00086180, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0xFF0F07D8, 0xABCD, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xFF0F07D0, 0xCDEF, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xCDCDCDCD, 0xCDCD, + 0x03B, 0x00038A58, + 0x03B, 0x00037A58, + 0x03B, 0x0002A590, + 0x03B, 0x00027A50, + 0x03B, 0x00018248, + 0x03B, 0x00010240, + 0x03B, 0x00008240, + 0xFF0F07D8, 0xDEAD, + 0x0EF, 0x00000100, + 0xFF0F07D8, 0xABCD, + 0x034, 0x0000A4EE, + 0x034, 0x00009076, + 0x034, 0x00008073, + 0x034, 0x00007070, + 0x034, 0x0000606D, + 0x034, 0x0000506A, + 0x034, 0x00004049, + 0x034, 0x00003046, + 0x034, 0x00002028, + 0x034, 0x00001025, + 0x034, 0x00000022, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000ADF4, + 0x034, 0x00009DF1, + 0x034, 0x00008DEE, + 0x034, 0x00007DEB, + 0x034, 0x00006DE8, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000034EA, + 0x034, 0x000024E7, + 0x034, 0x0000146B, + 0x034, 0x0000006D, + 0xFF0F07D8, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x0EF, 0x00000002, + 0x008, 0x00008400, + 0x018, 0x0001712A, + 0x0EF, 0x00001000, + 0x03A, 0x00000080, + 0x03B, 0x0003A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0003202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0002B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00023070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0001B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00012085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0000A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00002080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x0007A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0007202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0006B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00023070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0005B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00052085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0004A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00042080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x000BA02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x000B202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x000AB064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x000A3070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0009B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00092085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0008A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00082080, + 0x03C, 0x00010000, + 0x0EF, 0x00001100, + 0xFF0F0740, 0xABCD, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045DC9, + 0x034, 0x00044CE8, + 0x034, 0x000438CA, + 0x034, 0x00042889, + 0x034, 0x0004184A, + 0x034, 0x0004044A, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0002ADF5, + 0x034, 0x00029DF2, + 0x034, 0x00028DEF, + 0x034, 0x00027DEC, + 0x034, 0x00026DE9, + 0x034, 0x00025DC9, + 0x034, 0x00024CE8, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x0002184A, + 0x034, 0x0002044A, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000AFF7, + 0x034, 0x00009DF7, + 0x034, 0x00008DF4, + 0x034, 0x00007DF1, + 0x034, 0x00006DEE, + 0x034, 0x00005DCD, + 0x034, 0x00004CEB, + 0x034, 0x000038CC, + 0x034, 0x0000288B, + 0x034, 0x0000184C, + 0x034, 0x0000044C, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0xFF0F0740, 0xABCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xFF0F02C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xFF0F01C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xFF0F07D8, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xFF0F07D0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xCDCDCDCD, 0xCDCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x00000188, + 0x035, 0x00008147, + 0x035, 0x00010147, + 0x035, 0x000201D7, + 0x035, 0x000281D7, + 0x035, 0x000301D7, + 0x035, 0x000401D8, + 0x035, 0x000481D8, + 0x035, 0x000501D8, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0xFF0F0740, 0xABCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xFF0F02C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xFF0F01C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xFF0F07D8, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xFF0F07D0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xCDCDCDCD, 0xCDCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00084EB4, + 0x036, 0x0008CC35, + 0x036, 0x00094C35, + 0x036, 0x0009CC35, + 0x036, 0x000A4935, + 0x036, 0x000ACC35, + 0x036, 0x000B4C35, + 0x036, 0x000BCC35, + 0x036, 0x000C4EB4, + 0x036, 0x000CCEB5, + 0x036, 0x000D4EB5, + 0x036, 0x000DCEB5, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0xFF0F0740, 0xABCD, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xFF0F02C0, 0xCDEF, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xFF0F01C0, 0xCDEF, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xFF0F07D8, 0xCDEF, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xFF0F07D0, 0xCDEF, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xCDCDCDCD, 0xCDCD, + 0x03C, 0x000002A8, + 0x03C, 0x000005A2, + 0x03C, 0x00000880, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0x0DF, 0x00000080, + 0x01F, 0x00040064, + 0xFF0F0740, 0xABCD, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xFF0F02C0, 0xCDEF, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xFF0F01C0, 0xCDEF, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xFF0F07D8, 0xCDEF, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xFF0F07D0, 0xCDEF, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0xCDCDCDCD, 0xCDCD, + 0x061, 0x000E5D53, + 0x062, 0x00038FCD, + 0x063, 0x000314EB, + 0x064, 0x000196AC, + 0x065, 0x000911D7, + 0xFF0F0740, 0xDEAD, + 0x008, 0x00008400, + 0x01C, 0x000739D2, + 0x0B4, 0x0001E78D, + 0x018, 0x0001F12A, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x0B4, 0x0001A78D, + 0x018, 0x0001712A, +}; + +u32 RTL8812AE_RADIOB_ARRAY[] = { + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x089, 0x00000080, + 0xFF0F0740, 0xABCD, + 0x086, 0x00014B38, + 0xFF0F01C0, 0xCDEF, + 0x086, 0x00014B38, + 0xFF0F02C0, 0xCDEF, + 0x086, 0x00014B38, + 0xFF0F07D8, 0xCDEF, + 0x086, 0x00014B3A, + 0xFF0F07D0, 0xCDEF, + 0x086, 0x00014B3A, + 0xCDCDCDCD, 0xCDCD, + 0x086, 0x00014B38, + 0xFF0F0740, 0xDEAD, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0xFF0F07D8, 0xABCD, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xFF0F07D0, 0xCDEF, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xCDCDCDCD, 0xCDCD, + 0x03B, 0x00038A58, + 0x03B, 0x00037A58, + 0x03B, 0x0002A590, + 0x03B, 0x00027A50, + 0x03B, 0x00018248, + 0x03B, 0x00010240, + 0x03B, 0x00008240, + 0xFF0F07D8, 0xDEAD, + 0x0EF, 0x00000100, + 0xFF0F07D8, 0xABCD, + 0x034, 0x0000A4EE, + 0x034, 0x00009076, + 0x034, 0x00008073, + 0x034, 0x00007070, + 0x034, 0x0000606D, + 0x034, 0x0000506A, + 0x034, 0x00004049, + 0x034, 0x00003046, + 0x034, 0x00002028, + 0x034, 0x00001025, + 0x034, 0x00000022, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000ADF4, + 0x034, 0x00009DF1, + 0x034, 0x00008DEE, + 0x034, 0x00007DEB, + 0x034, 0x00006DE8, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000034EA, + 0x034, 0x000024E7, + 0x034, 0x0000146B, + 0x034, 0x0000006D, + 0xFF0F07D8, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x0EF, 0x00000002, + 0x008, 0x00008400, + 0x018, 0x0001712A, + 0x0EF, 0x00001000, + 0x03A, 0x00000080, + 0x03B, 0x0003A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0003202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0002B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00023070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0001B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00012085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0000A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00002080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x0007A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0007202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0006B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00063070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0005B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00052085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0004A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00042080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x000BA02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x000B202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x000AB064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x000A3070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0009B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00092085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0008A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00082080, + 0x03C, 0x00010000, + 0x0EF, 0x00001100, + 0xFF0F0740, 0xABCD, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045DC9, + 0x034, 0x00044CE8, + 0x034, 0x000438CA, + 0x034, 0x00042889, + 0x034, 0x0004184A, + 0x034, 0x0004044A, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0002ADF5, + 0x034, 0x00029DF2, + 0x034, 0x00028DEF, + 0x034, 0x00027DEC, + 0x034, 0x00026DE9, + 0x034, 0x00025DC9, + 0x034, 0x00024CE8, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x0002184A, + 0x034, 0x0002044A, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F01C0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F07D8, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xFF0F07D0, 0xCDEF, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000AFF7, + 0x034, 0x00009DF7, + 0x034, 0x00008DF4, + 0x034, 0x00007DF1, + 0x034, 0x00006DEE, + 0x034, 0x00005DCD, + 0x034, 0x00004CEB, + 0x034, 0x000038CC, + 0x034, 0x0000288B, + 0x034, 0x0000184C, + 0x034, 0x0000044C, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0xFF0F0740, 0xABCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xFF0F01C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xFF0F02C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xFF0F07D8, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xFF0F07D0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xCDCDCDCD, 0xCDCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x00000186, + 0x035, 0x00008186, + 0x035, 0x00010185, + 0x035, 0x000201D5, + 0x035, 0x000281D5, + 0x035, 0x000301D5, + 0x035, 0x000401D5, + 0x035, 0x000481D5, + 0x035, 0x000501D5, + 0x0EF, 0x00000000, + 0xFF0F0740, 0xDEAD, + 0xFF0F0740, 0xABCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xFF0F01C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xFF0F02C0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xFF0F07D8, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xFF0F07D0, 0xCDEF, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xCDCDCDCD, 0xCDCD, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00084EB4, + 0x036, 0x0008C9B4, + 0x036, 0x000949B4, + 0x036, 0x0009C9B4, + 0x036, 0x000A4935, + 0x036, 0x000AC935, + 0x036, 0x000B4935, + 0x036, 0x000BC935, + 0x036, 0x000C4EB4, + 0x036, 0x000CCEB4, + 0x036, 0x000D4EB4, + 0x036, 0x000DCEB4, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0xFF0F0740, 0xABCD, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xFF0F01C0, 0xCDEF, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xFF0F02C0, 0xCDEF, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xFF0F07D8, 0xCDEF, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xFF0F07D0, 0xCDEF, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xCDCDCDCD, 0xCDCD, + 0x03C, 0x000002AA, + 0x03C, 0x000005A2, + 0x03C, 0x00000880, + 0xFF0F0740, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0x0DF, 0x00000080, + 0xFF0F0740, 0xABCD, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xFF0F01C0, 0xCDEF, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xFF0F02C0, 0xCDEF, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xFF0F07D8, 0xCDEF, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xFF0F07D0, 0xCDEF, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xCDCDCDCD, 0xCDCD, + 0x061, 0x000E5D53, + 0x062, 0x00038FCD, + 0x063, 0x000314EB, + 0x064, 0x000196AC, + 0x065, 0x000931D7, + 0xFF0F0740, 0xDEAD, + 0x008, 0x00008400, +}; + +u32 RTL8821AE_RADIOA_ARRAY[] = { + 0x018, 0x0001712A, + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x000, 0x00010000, + 0x01E, 0x00080000, + 0x082, 0x00000830, + 0x083, 0x00021800, + 0x084, 0x00028000, + 0x085, 0x00048000, + 0x086, 0x00094838, + 0x087, 0x00044980, + 0x088, 0x00048000, + 0x089, 0x0000D480, + 0x08A, 0x00042240, + 0x08B, 0x000F0380, + 0x08C, 0x00090000, + 0x08D, 0x00022852, + 0x08E, 0x00065540, + 0x08F, 0x00088001, + 0x0EF, 0x00020000, + 0x03E, 0x00000380, + 0x03F, 0x00090018, + 0x03E, 0x00020380, + 0x03F, 0x000A0018, + 0x03E, 0x00040308, + 0x03F, 0x000A0018, + 0x03E, 0x00060018, + 0x03F, 0x000A0018, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x089, 0x00000080, + 0x08B, 0x00080180, + 0x0EF, 0x00001000, + 0x03A, 0x00000244, + 0x03B, 0x00038027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x00030113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x00028027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x00027027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0001F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00017F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00008027, + 0x03C, 0x000CA000, + 0x03A, 0x00000244, + 0x03B, 0x00078027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x00070113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x00068027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x00067027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0005F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00057F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00048027, + 0x03C, 0x000CA000, + 0x03A, 0x00000244, + 0x03B, 0x000B8027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x000B0113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x000A8027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x000A7027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0009F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00097F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00088027, + 0x03C, 0x000CA000, + 0x0EF, 0x00000000, + 0x0EF, 0x00001100, + 0xFF0F0104, 0xABCD, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xFF0F0204, 0xCDEF, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xFF0F0404, 0xCDEF, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xFF0F0200, 0xCDEF, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0004A0F3, + 0x034, 0x000490B1, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0004ADF7, + 0x034, 0x00049DF3, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xFF0F0204, 0xCDEF, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xFF0F0404, 0xCDEF, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x000480AE, + 0x034, 0x000470AB, + 0x034, 0x0004608B, + 0x034, 0x00045069, + 0x034, 0x00044048, + 0x034, 0x00043045, + 0x034, 0x00042026, + 0x034, 0x00041023, + 0x034, 0x00040002, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045CCB, + 0x034, 0x0004488D, + 0x034, 0x0004348D, + 0x034, 0x0004248A, + 0x034, 0x0004108D, + 0x034, 0x0004008A, + 0xFF0F0104, 0xDEAD, + 0xFF0F0200, 0xABCD, + 0x034, 0x0002ADF4, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0002A0F3, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0002ADF7, + 0xFF0F0200, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x034, 0x00029DF4, + 0xFF0F0204, 0xCDEF, + 0x034, 0x00029DF4, + 0xFF0F0404, 0xCDEF, + 0x034, 0x00029DF4, + 0xFF0F0200, 0xCDEF, + 0x034, 0x00029DF1, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x000290F0, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x00029DF2, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xFF0F0204, 0xCDEF, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xFF0F0404, 0xCDEF, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x000280AF, + 0x034, 0x000270AC, + 0x034, 0x0002608B, + 0x034, 0x00025069, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020002, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x00028DEE, + 0x034, 0x00027DEB, + 0x034, 0x00026CCD, + 0x034, 0x00025CCA, + 0x034, 0x0002488C, + 0x034, 0x0002384C, + 0x034, 0x00022849, + 0x034, 0x00021449, + 0x034, 0x0002004D, + 0xFF0F0104, 0xDEAD, + 0xFF0F02C0, 0xABCD, + 0x034, 0x0000A0D7, + 0x034, 0x000090D3, + 0x034, 0x000080B1, + 0x034, 0x000070AE, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x0000ADF7, + 0x034, 0x00009DF4, + 0x034, 0x00008DF1, + 0x034, 0x00007DEE, + 0xFF0F02C0, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xFF0F0204, 0xCDEF, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xFF0F0404, 0xCDEF, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xFF0F02C0, 0xCDEF, + 0x034, 0x0000608D, + 0x034, 0x0000506B, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x00002044, + 0x034, 0x00001025, + 0x034, 0x00000004, + 0xCDCDCDCD, 0xCDCD, + 0x034, 0x00006DCD, + 0x034, 0x00005CCD, + 0x034, 0x00004CCA, + 0x034, 0x0000388C, + 0x034, 0x00002888, + 0x034, 0x00001488, + 0x034, 0x00000486, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0xFF0F0104, 0xABCD, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0xFF0F0204, 0xCDEF, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0xFF0F0404, 0xCDEF, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0xCDCDCDCD, 0xCDCD, + 0x035, 0x00000145, + 0x035, 0x00008145, + 0x035, 0x00010145, + 0x035, 0x00020196, + 0x035, 0x00028196, + 0x035, 0x00030196, + 0x035, 0x000401C7, + 0x035, 0x000481C7, + 0x035, 0x000501C7, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0xFF0F0104, 0xABCD, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0xFF0F0204, 0xCDEF, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0xFF0F0404, 0xCDEF, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0xCDCDCDCD, 0xCDCD, + 0x036, 0x000056B3, + 0x036, 0x0000D6B3, + 0x036, 0x000156B3, + 0x036, 0x0001D6B3, + 0x036, 0x00026634, + 0x036, 0x0002E634, + 0x036, 0x00036634, + 0x036, 0x0003E634, + 0x036, 0x000467B4, + 0x036, 0x0004E7B4, + 0x036, 0x000567B4, + 0x036, 0x0005E7B4, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0xFF0F0104, 0xABCD, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0xFF0F0204, 0xCDEF, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0xFF0F0404, 0xCDEF, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0xCDCDCDCD, 0xCDCD, + 0x03C, 0x0000022A, + 0x03C, 0x00000594, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x03C, 0x00000800, + 0xFF0F0204, 0xCDEF, + 0x03C, 0x00000800, + 0xFF0F0404, 0xCDEF, + 0x03C, 0x00000800, + 0xFF0F02C0, 0xCDEF, + 0x03C, 0x00000820, + 0xCDCDCDCD, 0xCDCD, + 0x03C, 0x00000900, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0xFF0F0104, 0xABCD, + 0x008, 0x0004E400, + 0xFF0F0204, 0xCDEF, + 0x008, 0x0004E400, + 0xFF0F0404, 0xCDEF, + 0x008, 0x0004E400, + 0xCDCDCDCD, 0xCDCD, + 0x008, 0x00002000, + 0xFF0F0104, 0xDEAD, + 0x0EF, 0x00000000, + 0x0DF, 0x000000C0, + 0x01F, 0x00040064, + 0xFF0F0104, 0xABCD, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0xFF0F0204, 0xCDEF, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0xFF0F0404, 0xCDEF, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0xCDCDCDCD, 0xCDCD, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0xFF0F0204, 0xCDEF, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0xFF0F0404, 0xCDEF, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0xCDCDCDCD, 0xCDCD, + 0x061, 0x000EAD53, + 0x062, 0x00093BC4, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x063, 0x000110E9, + 0xFF0F0204, 0xCDEF, + 0x063, 0x000110E9, + 0xFF0F0404, 0xCDEF, + 0x063, 0x000110E9, + 0xFF0F0200, 0xCDEF, + 0x063, 0x000710E9, + 0xFF0F02C0, 0xCDEF, + 0x063, 0x000110E9, + 0xCDCDCDCD, 0xCDCD, + 0x063, 0x000714E9, + 0xFF0F0104, 0xDEAD, + 0xFF0F0104, 0xABCD, + 0x064, 0x0001C27C, + 0xFF0F0204, 0xCDEF, + 0x064, 0x0001C27C, + 0xFF0F0404, 0xCDEF, + 0x064, 0x0001C27C, + 0xCDCDCDCD, 0xCDCD, + 0x064, 0x0001C67C, + 0xFF0F0104, 0xDEAD, + 0xFF0F0200, 0xABCD, + 0x065, 0x00093016, + 0xFF0F02C0, 0xCDEF, + 0x065, 0x00093015, + 0xCDCDCDCD, 0xCDCD, + 0x065, 0x00091016, + 0xFF0F0200, 0xDEAD, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0x03B, 0x0003824B, + 0x03B, 0x0003024B, + 0x03B, 0x0002844B, + 0x03B, 0x00020F4B, + 0x03B, 0x00018F4B, + 0x03B, 0x000104B2, + 0x03B, 0x00008049, + 0x03B, 0x00000148, + 0x03B, 0x0007824B, + 0x03B, 0x0007024B, + 0x03B, 0x0006824B, + 0x03B, 0x00060F4B, + 0x03B, 0x00058F4B, + 0x03B, 0x000504B2, + 0x03B, 0x00048049, + 0x03B, 0x00040148, + 0x0EF, 0x00000000, + 0x0EF, 0x00000100, + 0x034, 0x0000ADF3, + 0x034, 0x00009DEF, + 0x034, 0x00008DEC, + 0x034, 0x00007DE9, + 0x034, 0x00006CED, + 0x034, 0x00005CE9, + 0x034, 0x000044E9, + 0x034, 0x000034E6, + 0x034, 0x0000246A, + 0x034, 0x00001467, + 0x034, 0x00000068, + 0x0EF, 0x00000000, + 0x0ED, 0x00000010, + 0x044, 0x0000ADF2, + 0x044, 0x00009DEF, + 0x044, 0x00008DEC, + 0x044, 0x00007DE9, + 0x044, 0x00006CEC, + 0x044, 0x00005CE9, + 0x044, 0x000044EC, + 0x044, 0x000034E9, + 0x044, 0x0000246C, + 0x044, 0x00001469, + 0x044, 0x0000006C, + 0x0ED, 0x00000000, + 0x0ED, 0x00000001, + 0x040, 0x00038DA7, + 0x040, 0x000300C2, + 0x040, 0x000288E2, + 0x040, 0x000200B8, + 0x040, 0x000188A5, + 0x040, 0x00010FBC, + 0x040, 0x00008F71, + 0x040, 0x00000240, + 0x0ED, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000120, + 0x035, 0x00008120, + 0x035, 0x00010120, + 0x036, 0x00000085, + 0x036, 0x00008085, + 0x036, 0x00010085, + 0x036, 0x00018085, + 0x0EF, 0x00000000, + 0x051, 0x00000C31, + 0x052, 0x00000622, + 0x053, 0x000FC70B, + 0x054, 0x0000017E, + 0x056, 0x00051DF3, + 0x051, 0x00000C01, + 0x052, 0x000006D6, + 0x053, 0x000FC649, + 0x070, 0x00049661, + 0x071, 0x0007843E, + 0x072, 0x00000382, + 0x074, 0x00051400, + 0x035, 0x00000160, + 0x035, 0x00008160, + 0x035, 0x00010160, + 0x036, 0x00000124, + 0x036, 0x00008124, + 0x036, 0x00010124, + 0x036, 0x00018124, + 0x0ED, 0x0000000C, + 0x045, 0x00000140, + 0x045, 0x00008140, + 0x045, 0x00010140, + 0x046, 0x00000124, + 0x046, 0x00008124, + 0x046, 0x00010124, + 0x046, 0x00018124, + 0x0DF, 0x00000088, + 0x0B3, 0x000F0E18, + 0x0B4, 0x0001214C, + 0x0B7, 0x0003000C, + 0x01C, 0x000539D2, + 0x018, 0x0001F12A, + 0x0FE, 0x00000000, + 0x0FE, 0x00000000, + 0x018, 0x0001712A, +}; + +u32 RTL8812AE_MAC_REG_ARRAY[] = { + 0x010, 0x0000000C, + 0xFF0F0180, 0xABCD, + 0x025, 0x0000000F, + 0xFF0F01C0, 0xCDEF, + 0x025, 0x0000000F, + 0xCDCDCDCD, 0xCDCD, + 0x025, 0x0000006F, + 0xFF0F0180, 0xDEAD, + 0x072, 0x00000000, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x45B, 0x00000080, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x604, 0x00000001, + 0x605, 0x00000030, + 0x607, 0x00000003, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000080, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, +}; + +u32 RTL8821AE_MAC_REG_ARRAY[] = { + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x456, 0x0000005E, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x0000003F, + 0x4C9, 0x000000FF, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x605, 0x00000030, + 0x607, 0x00000007, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, +}; + +u32 RTL8812AE_AGC_TAB_ARRAY[] = { + 0xFF0F07D8, 0xABCD, + 0x81C, 0xFC000001, + 0x81C, 0xFB020001, + 0x81C, 0xFA040001, + 0x81C, 0xF9060001, + 0x81C, 0xF8080001, + 0x81C, 0xF70A0001, + 0x81C, 0xF60C0001, + 0x81C, 0xF50E0001, + 0x81C, 0xF4100001, + 0x81C, 0xF3120001, + 0x81C, 0xF2140001, + 0x81C, 0xF1160001, + 0x81C, 0xF0180001, + 0x81C, 0xEF1A0001, + 0x81C, 0xEE1C0001, + 0x81C, 0xED1E0001, + 0x81C, 0xEC200001, + 0x81C, 0xEB220001, + 0x81C, 0xEA240001, + 0x81C, 0xCD260001, + 0x81C, 0xCC280001, + 0x81C, 0xCB2A0001, + 0x81C, 0xCA2C0001, + 0x81C, 0xC92E0001, + 0x81C, 0xC8300001, + 0x81C, 0xA6320001, + 0x81C, 0xA5340001, + 0x81C, 0xA4360001, + 0x81C, 0xA3380001, + 0x81C, 0xA23A0001, + 0x81C, 0x883C0001, + 0x81C, 0x873E0001, + 0x81C, 0x86400001, + 0x81C, 0x85420001, + 0x81C, 0x84440001, + 0x81C, 0x83460001, + 0x81C, 0x82480001, + 0x81C, 0x814A0001, + 0x81C, 0x484C0001, + 0x81C, 0x474E0001, + 0x81C, 0x46500001, + 0x81C, 0x45520001, + 0x81C, 0x44540001, + 0x81C, 0x43560001, + 0x81C, 0x42580001, + 0x81C, 0x415A0001, + 0x81C, 0x255C0001, + 0x81C, 0x245E0001, + 0x81C, 0x23600001, + 0x81C, 0x22620001, + 0x81C, 0x21640001, + 0x81C, 0x21660001, + 0x81C, 0x21680001, + 0x81C, 0x216A0001, + 0x81C, 0x216C0001, + 0x81C, 0x216E0001, + 0x81C, 0x21700001, + 0x81C, 0x21720001, + 0x81C, 0x21740001, + 0x81C, 0x21760001, + 0x81C, 0x21780001, + 0x81C, 0x217A0001, + 0x81C, 0x217C0001, + 0x81C, 0x217E0001, + 0xFF0F07D0, 0xCDEF, + 0x81C, 0xF9000001, + 0x81C, 0xF8020001, + 0x81C, 0xF7040001, + 0x81C, 0xF6060001, + 0x81C, 0xF5080001, + 0x81C, 0xF40A0001, + 0x81C, 0xF30C0001, + 0x81C, 0xF20E0001, + 0x81C, 0xF1100001, + 0x81C, 0xF0120001, + 0x81C, 0xEF140001, + 0x81C, 0xEE160001, + 0x81C, 0xED180001, + 0x81C, 0xEC1A0001, + 0x81C, 0xEB1C0001, + 0x81C, 0xEA1E0001, + 0x81C, 0xCD200001, + 0x81C, 0xCC220001, + 0x81C, 0xCB240001, + 0x81C, 0xCA260001, + 0x81C, 0xC9280001, + 0x81C, 0xC82A0001, + 0x81C, 0xC72C0001, + 0x81C, 0xC62E0001, + 0x81C, 0xA5300001, + 0x81C, 0xA4320001, + 0x81C, 0xA3340001, + 0x81C, 0xA2360001, + 0x81C, 0x88380001, + 0x81C, 0x873A0001, + 0x81C, 0x863C0001, + 0x81C, 0x853E0001, + 0x81C, 0x84400001, + 0x81C, 0x83420001, + 0x81C, 0x82440001, + 0x81C, 0x81460001, + 0x81C, 0x48480001, + 0x81C, 0x474A0001, + 0x81C, 0x464C0001, + 0x81C, 0x454E0001, + 0x81C, 0x44500001, + 0x81C, 0x43520001, + 0x81C, 0x42540001, + 0x81C, 0x41560001, + 0x81C, 0x25580001, + 0x81C, 0x245A0001, + 0x81C, 0x235C0001, + 0x81C, 0x225E0001, + 0x81C, 0x21600001, + 0x81C, 0x21620001, + 0x81C, 0x21640001, + 0x81C, 0x21660001, + 0x81C, 0x21680001, + 0x81C, 0x216A0001, + 0x81C, 0x236C0001, + 0x81C, 0x226E0001, + 0x81C, 0x21700001, + 0x81C, 0x21720001, + 0x81C, 0x21740001, + 0x81C, 0x21760001, + 0x81C, 0x21780001, + 0x81C, 0x217A0001, + 0x81C, 0x217C0001, + 0x81C, 0x217E0001, + 0xCDCDCDCD, 0xCDCD, + 0x81C, 0xFF000001, + 0x81C, 0xFF020001, + 0x81C, 0xFF040001, + 0x81C, 0xFF060001, + 0x81C, 0xFF080001, + 0x81C, 0xFE0A0001, + 0x81C, 0xFD0C0001, + 0x81C, 0xFC0E0001, + 0x81C, 0xFB100001, + 0x81C, 0xFA120001, + 0x81C, 0xF9140001, + 0x81C, 0xF8160001, + 0x81C, 0xF7180001, + 0x81C, 0xF61A0001, + 0x81C, 0xF51C0001, + 0x81C, 0xF41E0001, + 0x81C, 0xF3200001, + 0x81C, 0xF2220001, + 0x81C, 0xF1240001, + 0x81C, 0xF0260001, + 0x81C, 0xEF280001, + 0x81C, 0xEE2A0001, + 0x81C, 0xED2C0001, + 0x81C, 0xEC2E0001, + 0x81C, 0xEB300001, + 0x81C, 0xEA320001, + 0x81C, 0xE9340001, + 0x81C, 0xE8360001, + 0x81C, 0xE7380001, + 0x81C, 0xE63A0001, + 0x81C, 0xE53C0001, + 0x81C, 0xC73E0001, + 0x81C, 0xC6400001, + 0x81C, 0xC5420001, + 0x81C, 0xC4440001, + 0x81C, 0xC3460001, + 0x81C, 0xC2480001, + 0x81C, 0xC14A0001, + 0x81C, 0xA74C0001, + 0x81C, 0xA64E0001, + 0x81C, 0xA5500001, + 0x81C, 0xA4520001, + 0x81C, 0xA3540001, + 0x81C, 0xA2560001, + 0x81C, 0xA1580001, + 0x81C, 0x675A0001, + 0x81C, 0x665C0001, + 0x81C, 0x655E0001, + 0x81C, 0x64600001, + 0x81C, 0x63620001, + 0x81C, 0x48640001, + 0x81C, 0x47660001, + 0x81C, 0x46680001, + 0x81C, 0x456A0001, + 0x81C, 0x446C0001, + 0x81C, 0x436E0001, + 0x81C, 0x42700001, + 0x81C, 0x41720001, + 0x81C, 0x41740001, + 0x81C, 0x41760001, + 0x81C, 0x41780001, + 0x81C, 0x417A0001, + 0x81C, 0x417C0001, + 0x81C, 0x417E0001, + 0xFF0F07D8, 0xDEAD, + 0xFF0F0180, 0xABCD, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F0280, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F01C0, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F02C0, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F07D8, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F07D0, 0xCDEF, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xCDCDCDCD, 0xCDCD, + 0x81C, 0xFF800001, + 0x81C, 0xFF820001, + 0x81C, 0xFF840001, + 0x81C, 0xFE860001, + 0x81C, 0xFD880001, + 0x81C, 0xFC8A0001, + 0x81C, 0xFB8C0001, + 0x81C, 0xFA8E0001, + 0x81C, 0xF9900001, + 0x81C, 0xF8920001, + 0x81C, 0xF7940001, + 0x81C, 0xF6960001, + 0x81C, 0xF5980001, + 0x81C, 0xF49A0001, + 0x81C, 0xF39C0001, + 0x81C, 0xF29E0001, + 0x81C, 0xF1A00001, + 0x81C, 0xF0A20001, + 0x81C, 0xEFA40001, + 0x81C, 0xEEA60001, + 0x81C, 0xEDA80001, + 0x81C, 0xECAA0001, + 0x81C, 0xEBAC0001, + 0x81C, 0xEAAE0001, + 0x81C, 0xE9B00001, + 0x81C, 0xE8B20001, + 0x81C, 0xE7B40001, + 0x81C, 0xE6B60001, + 0x81C, 0xE5B80001, + 0x81C, 0xE4BA0001, + 0x81C, 0xE3BC0001, + 0x81C, 0xA8BE0001, + 0x81C, 0xA7C00001, + 0x81C, 0xA6C20001, + 0x81C, 0xA5C40001, + 0x81C, 0xA4C60001, + 0x81C, 0xA3C80001, + 0x81C, 0xA2CA0001, + 0x81C, 0xA1CC0001, + 0x81C, 0x68CE0001, + 0x81C, 0x67D00001, + 0x81C, 0x66D20001, + 0x81C, 0x65D40001, + 0x81C, 0x64D60001, + 0x81C, 0x47D80001, + 0x81C, 0x46DA0001, + 0x81C, 0x45DC0001, + 0x81C, 0x44DE0001, + 0x81C, 0x43E00001, + 0x81C, 0x42E20001, + 0x81C, 0x08E40001, + 0x81C, 0x07E60001, + 0x81C, 0x06E80001, + 0x81C, 0x05EA0001, + 0x81C, 0x04EC0001, + 0x81C, 0x03EE0001, + 0x81C, 0x02F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xFF0F0180, 0xDEAD, + 0xC50, 0x00000022, + 0xC50, 0x00000020, + 0xE50, 0x00000022, + 0xE50, 0x00000020, +}; + +u32 RTL8821AE_AGC_TAB_ARRAY[] = { + 0x81C, 0xBF000001, + 0x81C, 0xBF020001, + 0x81C, 0xBF040001, + 0x81C, 0xBF060001, + 0x81C, 0xBE080001, + 0x81C, 0xBD0A0001, + 0x81C, 0xBC0C0001, + 0x81C, 0xBA0E0001, + 0x81C, 0xB9100001, + 0x81C, 0xB8120001, + 0x81C, 0xB7140001, + 0x81C, 0xB6160001, + 0x81C, 0xB5180001, + 0x81C, 0xB41A0001, + 0x81C, 0xB31C0001, + 0x81C, 0xB21E0001, + 0x81C, 0xB1200001, + 0x81C, 0xB0220001, + 0x81C, 0xAF240001, + 0x81C, 0xAE260001, + 0x81C, 0xAD280001, + 0x81C, 0xAC2A0001, + 0x81C, 0xAB2C0001, + 0x81C, 0xAA2E0001, + 0x81C, 0xA9300001, + 0x81C, 0xA8320001, + 0x81C, 0xA7340001, + 0x81C, 0xA6360001, + 0x81C, 0xA5380001, + 0x81C, 0xA43A0001, + 0x81C, 0xA33C0001, + 0x81C, 0x673E0001, + 0x81C, 0x66400001, + 0x81C, 0x65420001, + 0x81C, 0x64440001, + 0x81C, 0x63460001, + 0x81C, 0x62480001, + 0x81C, 0x614A0001, + 0x81C, 0x474C0001, + 0x81C, 0x464E0001, + 0x81C, 0x45500001, + 0x81C, 0x44520001, + 0x81C, 0x43540001, + 0x81C, 0x42560001, + 0x81C, 0x41580001, + 0x81C, 0x285A0001, + 0x81C, 0x275C0001, + 0x81C, 0x265E0001, + 0x81C, 0x25600001, + 0x81C, 0x24620001, + 0x81C, 0x0A640001, + 0x81C, 0x09660001, + 0x81C, 0x08680001, + 0x81C, 0x076A0001, + 0x81C, 0x066C0001, + 0x81C, 0x056E0001, + 0x81C, 0x04700001, + 0x81C, 0x03720001, + 0x81C, 0x02740001, + 0x81C, 0x01760001, + 0x81C, 0x01780001, + 0x81C, 0x017A0001, + 0x81C, 0x017C0001, + 0x81C, 0x017E0001, + 0xFF0F02C0, 0xABCD, + 0x81C, 0xFB000101, + 0x81C, 0xFA020101, + 0x81C, 0xF9040101, + 0x81C, 0xF8060101, + 0x81C, 0xF7080101, + 0x81C, 0xF60A0101, + 0x81C, 0xF50C0101, + 0x81C, 0xF40E0101, + 0x81C, 0xF3100101, + 0x81C, 0xF2120101, + 0x81C, 0xF1140101, + 0x81C, 0xF0160101, + 0x81C, 0xEF180101, + 0x81C, 0xEE1A0101, + 0x81C, 0xED1C0101, + 0x81C, 0xEC1E0101, + 0x81C, 0xEB200101, + 0x81C, 0xEA220101, + 0x81C, 0xE9240101, + 0x81C, 0xE8260101, + 0x81C, 0xE7280101, + 0x81C, 0xE62A0101, + 0x81C, 0xE52C0101, + 0x81C, 0xE42E0101, + 0x81C, 0xE3300101, + 0x81C, 0xA5320101, + 0x81C, 0xA4340101, + 0x81C, 0xA3360101, + 0x81C, 0x87380101, + 0x81C, 0x863A0101, + 0x81C, 0x853C0101, + 0x81C, 0x843E0101, + 0x81C, 0x69400101, + 0x81C, 0x68420101, + 0x81C, 0x67440101, + 0x81C, 0x66460101, + 0x81C, 0x49480101, + 0x81C, 0x484A0101, + 0x81C, 0x474C0101, + 0x81C, 0x2A4E0101, + 0x81C, 0x29500101, + 0x81C, 0x28520101, + 0x81C, 0x27540101, + 0x81C, 0x26560101, + 0x81C, 0x25580101, + 0x81C, 0x245A0101, + 0x81C, 0x235C0101, + 0x81C, 0x055E0101, + 0x81C, 0x04600101, + 0x81C, 0x03620101, + 0x81C, 0x02640101, + 0x81C, 0x01660101, + 0x81C, 0x01680101, + 0x81C, 0x016A0101, + 0x81C, 0x016C0101, + 0x81C, 0x016E0101, + 0x81C, 0x01700101, + 0x81C, 0x01720101, + 0xCDCDCDCD, 0xCDCD, + 0x81C, 0xFF000101, + 0x81C, 0xFF020101, + 0x81C, 0xFE040101, + 0x81C, 0xFD060101, + 0x81C, 0xFC080101, + 0x81C, 0xFD0A0101, + 0x81C, 0xFC0C0101, + 0x81C, 0xFB0E0101, + 0x81C, 0xFA100101, + 0x81C, 0xF9120101, + 0x81C, 0xF8140101, + 0x81C, 0xF7160101, + 0x81C, 0xF6180101, + 0x81C, 0xF51A0101, + 0x81C, 0xF41C0101, + 0x81C, 0xF31E0101, + 0x81C, 0xF2200101, + 0x81C, 0xF1220101, + 0x81C, 0xF0240101, + 0x81C, 0xEF260101, + 0x81C, 0xEE280101, + 0x81C, 0xED2A0101, + 0x81C, 0xEC2C0101, + 0x81C, 0xEB2E0101, + 0x81C, 0xEA300101, + 0x81C, 0xE9320101, + 0x81C, 0xE8340101, + 0x81C, 0xE7360101, + 0x81C, 0xE6380101, + 0x81C, 0xE53A0101, + 0x81C, 0xE43C0101, + 0x81C, 0xE33E0101, + 0x81C, 0xA5400101, + 0x81C, 0xA4420101, + 0x81C, 0xA3440101, + 0x81C, 0x87460101, + 0x81C, 0x86480101, + 0x81C, 0x854A0101, + 0x81C, 0x844C0101, + 0x81C, 0x694E0101, + 0x81C, 0x68500101, + 0x81C, 0x67520101, + 0x81C, 0x66540101, + 0x81C, 0x49560101, + 0x81C, 0x48580101, + 0x81C, 0x475A0101, + 0x81C, 0x2A5C0101, + 0x81C, 0x295E0101, + 0x81C, 0x28600101, + 0x81C, 0x27620101, + 0x81C, 0x26640101, + 0x81C, 0x25660101, + 0x81C, 0x24680101, + 0x81C, 0x236A0101, + 0x81C, 0x056C0101, + 0x81C, 0x046E0101, + 0x81C, 0x03700101, + 0x81C, 0x02720101, + 0xFF0F02C0, 0xDEAD, + 0x81C, 0x01740101, + 0x81C, 0x01760101, + 0x81C, 0x01780101, + 0x81C, 0x017A0101, + 0x81C, 0x017C0101, + 0x81C, 0x017E0101, + 0xC50, 0x00000022, + 0xC50, 0x00000020, + +}; --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/table.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/table.h @@ -0,0 +1,62 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Created on 2010/ 5/18, 1:41 + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_TABLE__H_ +#define __RTL8821AE_TABLE__H_ + +#include +#define RTL8821AEPHY_REG_1TARRAYLEN 344 +extern u32 RTL8821AE_PHY_REG_ARRAY[]; +#define RTL8812AEPHY_REG_1TARRAYLEN 490 +extern u32 RTL8812AE_PHY_REG_ARRAY[]; +#define RTL8821AEPHY_REG_ARRAY_PGLEN 90 +extern u32 RTL8821AE_PHY_REG_ARRAY_PG[]; +#define RTL8812AEPHY_REG_ARRAY_PGLEN 276 +extern u32 RTL8812AE_PHY_REG_ARRAY_PG[]; +//#define RTL8723BE_RADIOA_1TARRAYLEN 206 +//extern u8 *RTL8821AE_TXPWR_LMT_ARRAY[]; +#define RTL8812AE_RADIOA_1TARRAYLEN 1264 +extern u32 RTL8812AE_RADIOA_ARRAY[]; +#define RTL8812AE_RADIOB_1TARRAYLEN 1240 +extern u32 RTL8812AE_RADIOB_ARRAY[]; +#define RTL8821AE_RADIOA_1TARRAYLEN 1176 +extern u32 RTL8821AE_RADIOA_ARRAY[]; +#define RTL8821AEMAC_1T_ARRAYLEN 194 +extern u32 RTL8821AE_MAC_REG_ARRAY[]; +#define RTL8812AEMAC_1T_ARRAYLEN 214 +extern u32 RTL8812AE_MAC_REG_ARRAY[]; +#define RTL8821AEAGCTAB_1TARRAYLEN 382 +extern u32 RTL8821AE_AGC_TAB_ARRAY[]; +#define RTL8812AEAGCTAB_1TARRAYLEN 1312 +extern u32 RTL8812AE_AGC_TAB_ARRAY[]; + + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/trx.c +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/trx.c @@ -0,0 +1,1050 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../base.h" +#include "../stats.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "trx.h" +#include "led.h" +#include "dm.h" +#include "phy.h" +u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) +{ + u16 fc = rtl_get_fc(skb); + + if (unlikely(ieee80211_is_beacon(fc))) + return QSLT_BEACON; + if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) + return QSLT_MGNT; + + return skb->priority; +} + +/* mac80211's rate_idx is like this: + * + * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ + * + * B/G rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC_RATE1M-->DESC_RATE54M ==> idx is 0-->11, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15 + * + * 5G band:rx_status->band == IEEE80211_BAND_5GHZ + * A rate: + * (rx_status->flag & RX_FLAG_HT) = 0, + * DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7, + * + * N rate: + * (rx_status->flag & RX_FLAG_HT) = 1, + * DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15 + */ +static int _rtl8821ae_rate_mapping(struct ieee80211_hw *hw, + bool isht, u8 desc_rate) +{ + int rate_idx; + + if (false == isht) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) { +#else + if (IEEE80211_BAND_2GHZ == hw->conf.channel->band) { +#endif + switch (desc_rate) { + case DESC_RATE1M: + rate_idx = 0; + break; + case DESC_RATE2M: + rate_idx = 1; + break; + case DESC_RATE5_5M: + rate_idx = 2; + break; + case DESC_RATE11M: + rate_idx = 3; + break; + case DESC_RATE6M: + rate_idx = 4; + break; + case DESC_RATE9M: + rate_idx = 5; + break; + case DESC_RATE12M: + rate_idx = 6; + break; + case DESC_RATE18M: + rate_idx = 7; + break; + case DESC_RATE24M: + rate_idx = 8; + break; + case DESC_RATE36M: + rate_idx = 9; + break; + case DESC_RATE48M: + rate_idx = 10; + break; + case DESC_RATE54M: + rate_idx = 11; + break; + default: + rate_idx = 0; + break; + } + } else { + switch (desc_rate) { + case DESC_RATE6M: + rate_idx = 0; + break; + case DESC_RATE9M: + rate_idx = 1; + break; + case DESC_RATE12M: + rate_idx = 2; + break; + case DESC_RATE18M: + rate_idx = 3; + break; + case DESC_RATE24M: + rate_idx = 4; + break; + case DESC_RATE36M: + rate_idx = 5; + break; + case DESC_RATE48M: + rate_idx = 6; + break; + case DESC_RATE54M: + rate_idx = 7; + break; + default: + rate_idx = 0; + break; + } + } + } else { + switch(desc_rate) { + case DESC_RATEMCS0: + rate_idx = 0; + break; + case DESC_RATEMCS1: + rate_idx = 1; + break; + case DESC_RATEMCS2: + rate_idx = 2; + break; + case DESC_RATEMCS3: + rate_idx = 3; + break; + case DESC_RATEMCS4: + rate_idx = 4; + break; + case DESC_RATEMCS5: + rate_idx = 5; + break; + case DESC_RATEMCS6: + rate_idx = 6; + break; + case DESC_RATEMCS7: + rate_idx = 7; + break; + case DESC_RATEMCS8: + rate_idx = 8; + break; + case DESC_RATEMCS9: + rate_idx = 9; + break; + case DESC_RATEMCS10: + rate_idx = 10; + break; + case DESC_RATEMCS11: + rate_idx = 11; + break; + case DESC_RATEMCS12: + rate_idx = 12; + break; + case DESC_RATEMCS13: + rate_idx = 13; + break; + case DESC_RATEMCS14: + rate_idx = 14; + break; + case DESC_RATEMCS15: + rate_idx = 15; + break; + default: + rate_idx = 0; + break; + } + } + return rate_idx; +} + +static void _rtl8821ae_query_rxphystatus(struct ieee80211_hw *hw, + struct rtl_stats *pstatus, u8 *pdesc, + struct rx_fwinfo_8821ae *p_drvinfo, bool bpacket_match_bssid, + bool bpacket_toself, bool b_packet_beacon) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + struct phy_sts_cck_8821ae_t *cck_buf; + struct phy_status_rpt *p_phystRpt = (struct phy_status_rpt *)p_drvinfo; + struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); + char rx_pwr_all = 0, rx_pwr[4]; + u8 rf_rx_num = 0, evm, pwdb_all; + u8 i, max_spatial_stream; + u32 rssi, total_rssi = 0; + bool b_is_cck = pstatus->b_is_cck; + u8 lan_idx,vga_idx; + + /* Record it for next packet processing */ + pstatus->b_packet_matchbssid = bpacket_match_bssid; + pstatus->b_packet_toself = bpacket_toself; + pstatus->b_packet_beacon = b_packet_beacon; + pstatus->rx_mimo_signalquality[0] = -1; + pstatus->rx_mimo_signalquality[1] = -1; + + if (b_is_cck) { + u8 cck_highpwr; + u8 cck_agc_rpt; + /* CCK Driver info Structure is not the same as OFDM packet. */ + cck_buf = (struct phy_sts_cck_8821ae_t *)p_drvinfo; + cck_agc_rpt = cck_buf->cck_agc_rpt; + + /* (1)Hardware does not provide RSSI for CCK */ + /* (2)PWDB, Average PWDB cacluated by + * hardware (for rate adaptive) */ + if (ppsc->rfpwr_state == ERFON) + cck_highpwr = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, + BIT(9)); + else + cck_highpwr = false; + + lan_idx = ((cck_agc_rpt & 0xE0) >> 5); + vga_idx = (cck_agc_rpt & 0x1f); + switch (lan_idx) { + case 7: + if(vga_idx <= 27) + rx_pwr_all = -100 + 2*(27-vga_idx); /*VGA_idx = 27~2*/ + else + rx_pwr_all = -100; + break; + case 6: + rx_pwr_all = -48 + 2*(2-vga_idx); /*VGA_idx = 2~0*/ + break; + case 5: + rx_pwr_all = -42 + 2*(7-vga_idx); /*VGA_idx = 7~5*/ + break; + case 4: + rx_pwr_all = -36 + 2*(7-vga_idx); /*VGA_idx = 7~4*/ + break; + case 3: + rx_pwr_all = -24 + 2*(7-vga_idx); /*VGA_idx = 7~0*/ + break; + case 2: + if(cck_highpwr) + rx_pwr_all = -12 + 2*(5-vga_idx); /*VGA_idx = 5~0*/ + else + rx_pwr_all = -6+ 2*(5-vga_idx); + break; + case 1: + rx_pwr_all = 8-2*vga_idx; + break; + case 0: + rx_pwr_all = 14-2*vga_idx; + break; + default: + break; + } + rx_pwr_all += 6; + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + /* CCK gain is smaller than OFDM/MCS gain, */ + /* so we add gain diff by experiences, + * the val is 6 */ + pwdb_all += 6; + if(pwdb_all > 100) + pwdb_all = 100; + /* modify the offset to make the same + * gain index with OFDM. */ + if(pwdb_all > 34 && pwdb_all <= 42) + pwdb_all -= 2; + else if(pwdb_all > 26 && pwdb_all <= 34) + pwdb_all -= 6; + else if(pwdb_all > 14 && pwdb_all <= 26) + pwdb_all -= 8; + else if(pwdb_all > 4 && pwdb_all <= 14) + pwdb_all -= 4; + if (cck_highpwr == false){ + if (pwdb_all >= 80) + pwdb_all =((pwdb_all-80)<<1)+((pwdb_all-80)>>1)+80; + else if((pwdb_all <= 78) && (pwdb_all >= 20)) + pwdb_all += 3; + if(pwdb_all>100) + pwdb_all = 100; + } + + pstatus->rx_pwdb_all = pwdb_all; + pstatus->recvsignalpower = rx_pwr_all; + + /* (3) Get Signal Quality (EVM) */ + if (bpacket_match_bssid) { + u8 sq; + + if (pstatus->rx_pwdb_all > 40) + sq = 100; + else { + sq = cck_buf->sq_rpt; + if (sq > 64) + sq = 0; + else if (sq < 20) + sq = 100; + else + sq = ((64 - sq) * 100) / 44; + } + + pstatus->signalquality = sq; + pstatus->rx_mimo_signalquality[0] = sq; + pstatus->rx_mimo_signalquality[1] = -1; + } + } else { + rtlpriv->dm.brfpath_rxenable[0] = + rtlpriv->dm.brfpath_rxenable[1] = true; + + /* (1)Get RSSI for HT rate */ + for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) { + + /* we will judge RF RX path now. */ + if (rtlpriv->dm.brfpath_rxenable[i]) + rf_rx_num++; + + rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110; + + /* Translate DBM to percentage. */ + rssi = rtl_query_rxpwrpercentage(rx_pwr[i]); + total_rssi += rssi; + + /* Get Rx snr value in DB */ + rtlpriv->stats.rx_snr_db[i] = (long)(p_drvinfo->rxsnr[i] / 2); + + /* Record Signal Strength for next packet */ + if (bpacket_match_bssid) + pstatus->rx_mimo_signalstrength[i] = (u8) rssi; + } + + /* (2)PWDB, Average PWDB cacluated by + * hardware (for rate adaptive) */ + rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; + + pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all); + pstatus->rx_pwdb_all = pwdb_all; + pstatus->rxpower = rx_pwr_all; + pstatus->recvsignalpower = rx_pwr_all; + + /* (3)EVM of HT rate */ + if (pstatus->b_is_ht && pstatus->rate >= DESC_RATEMCS8 && + pstatus->rate <= DESC_RATEMCS15) + max_spatial_stream = 2; + else + max_spatial_stream = 1; + + for (i = 0; i < max_spatial_stream; i++) { + evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]); + + if (bpacket_match_bssid) { + /* Fill value in RFD, Get the first + * spatial stream only */ + if (i == 0) + pstatus->signalquality = (u8) (evm & 0xff); + pstatus->rx_mimo_signalquality[i] = (u8) (evm & 0xff); + } + } + } + + /* UI BSS List signal strength(in percentage), + * make it good looking, from 0~100. */ + if (b_is_cck) + pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + pwdb_all)); + else if (rf_rx_num != 0) + pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw, + total_rssi /= rf_rx_num)); + /*HW antenna diversity*/ + rtldm->fat_table.antsel_rx_keep_0 = p_phystRpt->ant_sel; + rtldm->fat_table.antsel_rx_keep_1 = p_phystRpt->ant_sel_b; + rtldm->fat_table.antsel_rx_keep_2 = p_phystRpt->antsel_rx_keep_2; + +} +#if 0 +static void _rtl8821ae_smart_antenna(struct ieee80211_hw *hw, + struct rtl_stats *pstatus) +{ + struct rtl_dm *rtldm= rtl_dm(rtl_priv(hw)); + struct rtl_efuse *rtlefuse =rtl_efuse(rtl_priv(hw)); + u8 antsel_tr_mux; + struct fast_ant_trainning *pfat_table = &(rtldm->fat_table); + + if (rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV) { + if (pfat_table->fat_state == FAT_TRAINING_STATE) { + if (pstatus->b_packet_toself) { + antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) | + (pfat_table->antsel_rx_keep_1 << 1) | pfat_table->antsel_rx_keep_0; + pfat_table->ant_sum_rssi[antsel_tr_mux] += pstatus->rx_pwdb_all; + pfat_table->ant_rssi_cnt[antsel_tr_mux]++; + } + } + } else if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) || + (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)) { + if (pstatus->b_packet_toself || pstatus->b_packet_matchbssid) { + antsel_tr_mux = (pfat_table->antsel_rx_keep_2 << 2) | + (pfat_table->antsel_rx_keep_1 << 1) | pfat_table->antsel_rx_keep_0; + rtl8821ae_dm_ant_sel_statistics(hw, antsel_tr_mux, 0, pstatus->rx_pwdb_all); + } + + } +} +#endif +static void _rtl8821ae_translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, struct rtl_stats *pstatus, + u8 *pdesc, struct rx_fwinfo_8821ae *p_drvinfo) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct ieee80211_hdr *hdr; + u8 *tmp_buf; + u8 *praddr; + u8 *psaddr; + u16 fc, type; + bool b_packet_matchbssid, b_packet_toself, b_packet_beacon; + + tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; + + hdr = (struct ieee80211_hdr *)tmp_buf; + fc = le16_to_cpu(hdr->frame_control); + type = WLAN_FC_GET_TYPE(fc); + praddr = hdr->addr1; + psaddr = ieee80211_get_SA(hdr); + memcpy(pstatus->psaddr, psaddr, ETH_ALEN); + + b_packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && + (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ? + hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? + hdr->addr2 : hdr->addr3)) && (!pstatus->b_hwerror) && + (!pstatus->b_crc) && (!pstatus->b_icv)); + + b_packet_toself = b_packet_matchbssid && + (!ether_addr_equal(praddr, rtlefuse->dev_addr)); + + if (ieee80211_is_beacon(fc)) + b_packet_beacon = true; + else + b_packet_beacon = false; + + if (b_packet_beacon && b_packet_matchbssid) + rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++; + + _rtl8821ae_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo, + b_packet_matchbssid, b_packet_toself, + b_packet_beacon); + /*_rtl8821ae_smart_antenna(hw, pstatus); */ + rtl_process_phyinfo(hw, tmp_buf, pstatus); +} + +static void _rtl8821ae_insert_emcontent(struct rtl_tcb_desc *ptcb_desc, + u8 *virtualaddress) +{ + u32 dwtmp = 0; + memset(virtualaddress, 0, 8); + + SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num); + if (ptcb_desc->empkt_num == 1) + dwtmp = ptcb_desc->empkt_len[0]; + else { + dwtmp = ptcb_desc->empkt_len[0]; + dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4; + dwtmp += ptcb_desc->empkt_len[1]; + } + SET_EARLYMODE_LEN0(virtualaddress, dwtmp); + + if (ptcb_desc->empkt_num <= 3) + dwtmp = ptcb_desc->empkt_len[2]; + else { + dwtmp = ptcb_desc->empkt_len[2]; + dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4; + dwtmp += ptcb_desc->empkt_len[3]; + } + SET_EARLYMODE_LEN1(virtualaddress, dwtmp); + if (ptcb_desc->empkt_num <= 5) + dwtmp = ptcb_desc->empkt_len[4]; + else { + dwtmp = ptcb_desc->empkt_len[4]; + dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4; + dwtmp += ptcb_desc->empkt_len[5]; + } + SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF); + SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4); + if (ptcb_desc->empkt_num <= 7) + dwtmp = ptcb_desc->empkt_len[6]; + else { + dwtmp = ptcb_desc->empkt_len[6]; + dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4; + dwtmp += ptcb_desc->empkt_len[7]; + } + SET_EARLYMODE_LEN3(virtualaddress, dwtmp); + if (ptcb_desc->empkt_num <= 9) + dwtmp = ptcb_desc->empkt_len[8]; + else { + dwtmp = ptcb_desc->empkt_len[8]; + dwtmp += ((dwtmp%4)?(4-dwtmp%4):0)+4; + dwtmp += ptcb_desc->empkt_len[9]; + } + SET_EARLYMODE_LEN4(virtualaddress, dwtmp); +} + +bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rx_fwinfo_8821ae *p_drvinfo; + struct ieee80211_hdr *hdr; + + u32 phystatus = GET_RX_DESC_PHYST(pdesc); + + status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); + status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * + RX_DRV_INFO_SIZE_UNIT; + status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); + status->b_icv = (u16) GET_RX_DESC_ICV(pdesc); + status->b_crc = (u16) GET_RX_DESC_CRC32(pdesc); + status->b_hwerror = (status->b_crc | status->b_icv); + status->decrypted = !GET_RX_DESC_SWDEC(pdesc); + status->rate = (u8) GET_RX_DESC_RXMCS(pdesc); + status->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); + status->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); + status->b_isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); + status->timestamp_low = GET_RX_DESC_TSFL(pdesc); + status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + status->macid = GET_RX_DESC_MACID(pdesc); + status->b_is_ht = (bool)GET_RX_DESC_RXHT(pdesc); + + status->b_is_cck = RX_HAL_IS_CCK_RATE(status->rate); + + if (GET_RX_STATUS_DESC_RPT_SEL(pdesc)) + status->packet_report_type = C2H_PACKET; + else + status->packet_report_type = NORMAL_RX; + + if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc)) + status->wake_match = BIT(2); + else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc)) + status->wake_match = BIT(1); + else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc)) + status->wake_match = BIT(0); + else + status->wake_match = 0; + + if (status->wake_match) + RT_TRACE(COMP_RXDESC,DBG_LOUD, + ("GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",status->wake_match )); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; +#else + rx_status->freq = hw->conf.channel->center_freq; + rx_status->band = hw->conf.channel->band; +#endif + + hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size + + status->rx_bufshift); + + if (status->b_crc) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (status->rx_is40Mhzpacket) + rx_status->flag |= RX_FLAG_40MHZ; + + if (status->b_is_ht) + rx_status->flag |= RX_FLAG_HT; + + rx_status->flag |= RX_FLAG_MACTIME_MPDU; + + /* hw will set status->decrypted true, if it finds the + * frame is open data frame or mgmt frame. */ + /* So hw will not decryption robust managment frame + * for IEEE80211w but still set status->decrypted + * true, so here we should set it back to undecrypted + * for IEEE80211w frame, and mac80211 sw will help + * to decrypt it */ + if (status->decrypted) { + if (!hdr) { + WARN_ON_ONCE(true); + pr_err("decrypted is true but hdr NULL, from skb %p\n", + rtl_get_hdr(skb)); + return false; + } + + if ((_ieee80211_is_robust_mgmt_frame(hdr)) && + (ieee80211_has_protected(hdr->frame_control))) + rx_status->flag &= ~RX_FLAG_DECRYPTED; + else + rx_status->flag |= RX_FLAG_DECRYPTED; + } + + /* rate_idx: index of data rate into band's + * supported rates or MCS index if HT rates + * are use (RX_FLAG_HT)*/ + /* Notice: this is diff with windows define */ + rx_status->rate_idx = _rtl8821ae_rate_mapping(hw, + status->b_is_ht, status->rate); + + rx_status->mactime = status->timestamp_low; + if (phystatus == true) { + p_drvinfo = (struct rx_fwinfo_8821ae *)(skb->data + + status->rx_bufshift); + + _rtl8821ae_translate_rx_signal_stuff(hw, + skb, status, pdesc, + p_drvinfo); + } + + /*rx_status->qual = status->signal; */ + rx_status->signal = status->recvsignalpower + 10; + /*rx_status->noise = -status->noise; */ + if (status->packet_report_type == TX_REPORT2){ + status->macid_valid_entry[0] = GET_RX_RPT2_DESC_MACID_VALID_1(pdesc); + status->macid_valid_entry[1] = GET_RX_RPT2_DESC_MACID_VALID_2(pdesc); + } + return true; +} + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) +void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd, + struct ieee80211_tx_info *info, struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) +#else +/**/ +void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc) +/**/ +#endif +/**/ +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + struct ieee80211_sta *sta = info->control.sta; +#endif +/**/ + u8 *pdesc = (u8 *) pdesc_tx; + u16 seq_number; + u16 fc = le16_to_cpu(hdr->frame_control); + unsigned int buf_len = 0; + unsigned int skb_len = skb->len; + u8 fw_qsel = _rtl8821ae_map_hwqueue_to_fwqueue(skb, hw_queue); + bool b_firstseg = ((hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); + bool b_lastseg = ((hdr->frame_control & + cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); + dma_addr_t mapping; + u8 bw_40 = 0; + u8 short_gi = 0; + + if (mac->opmode == NL80211_IFTYPE_STATION) { + bw_40 = mac->bw_40; + } else if (mac->opmode == NL80211_IFTYPE_AP || + mac->opmode == NL80211_IFTYPE_ADHOC) { + if (sta) + bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; + } + seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc); + /* reserve 8 byte for AMPDU early mode */ + if (rtlhal->b_earlymode_enable) { + skb_push(skb, EM_HDR_LEN); + memset(skb->data, 0, EM_HDR_LEN); + } + buf_len = skb->len; + mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { + RT_TRACE(COMP_SEND, DBG_TRACE, + ("DMA mapping error")); + return; + } + CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae)); + if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) { + b_firstseg = true; + b_lastseg = true; + } + if (b_firstseg) { + if (rtlhal->b_earlymode_enable) { + SET_TX_DESC_PKT_OFFSET(pdesc, 1); + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN + EM_HDR_LEN); + if (ptcb_desc->empkt_num) { + RT_TRACE(COMP_SEND, DBG_TRACE, + ("Insert 8 byte.pTcb->EMPktNum:%d\n", + ptcb_desc->empkt_num)); + _rtl8821ae_insert_emcontent(ptcb_desc, (u8 *)(skb->data)); + } + } else { + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + } + + /* ptcb_desc->use_driver_rate = true; */ + SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); + if (ptcb_desc->hw_rate > DESC_RATEMCS0) { + short_gi = (ptcb_desc->use_shortgi) ? 1 : 0; + } else { + short_gi = (ptcb_desc->use_shortpreamble) ? 1 :0; + } + SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi); + + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + SET_TX_DESC_AGG_ENABLE(pdesc, 1); + SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14); + } + SET_TX_DESC_SEQ(pdesc, seq_number); + SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->b_rts_enable && + !ptcb_desc->b_cts_enable) ? 1 : 0)); + SET_TX_DESC_HW_RTS_ENABLE(pdesc,0); + SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->b_cts_enable) ? 1 : 0)); + /* SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->b_rts_stbc) ? 1 : 0));*/ + + SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate); + /* SET_TX_DESC_RTS_BW(pdesc, 0);*/ + SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc); + SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <= DESC_RATE54M) ? + (ptcb_desc->b_rts_use_shortpreamble ? 1 : 0) : + (ptcb_desc->b_rts_use_shortgi ? 1 : 0))); + + if(ptcb_desc->btx_enable_sw_calc_duration) + SET_TX_DESC_NAV_USE_HDR(pdesc, 1); + + if (bw_40) { + if (ptcb_desc->b_packet_bw) { + SET_TX_DESC_DATA_BW(pdesc, 1); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); + } else { + SET_TX_DESC_DATA_BW(pdesc, 0); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc); + } + } else { + SET_TX_DESC_DATA_BW(pdesc, 0); + SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0); + } + + SET_TX_DESC_LINIP(pdesc, 0); + SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len); + if (sta) { + u8 ampdu_density = sta->ht_cap.ampdu_density; + SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); + } + if (info->control.hw_key) { + struct ieee80211_key_conf *keyconf = info->control.hw_key; +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) +/**/ + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + break; + case WLAN_CIPHER_SUITE_CCMP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + break; + default: + SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + break; + + } +/**/ +#else + switch (keyconf->alg) { + case ALG_WEP: + case ALG_TKIP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x1); + break; + case ALG_CCMP: + SET_TX_DESC_SEC_TYPE(pdesc, 0x3); + break; + default: + SET_TX_DESC_SEC_TYPE(pdesc, 0x0); + break; + + } +#endif +/**/ + } + + SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel); + SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F); + SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF); + SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ? 1 : 0); + SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0); + +#if 0 + SET_TX_DESC_USE_RATE(pdesc, 1); + SET_TX_DESC_TX_RATE(pdesc, 0x04); + + SET_TX_DESC_RETRY_LIMIT_ENABLE(pdesc, 1); + SET_TX_DESC_DATA_RETRY_LIMIT(pdesc, 0x3f); +#endif + + /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/ + /* Set TxRate and RTSRate in TxDesc */ + /* This prevent Tx initial rate of new-coming packets */ + /* from being overwritten by retried packet rate.*/ + if (!ptcb_desc->use_driver_rate) { + /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */ + /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */ + } + if (ieee80211_is_data_qos(fc)) { + if (mac->rdg_en) { + RT_TRACE(COMP_SEND, DBG_TRACE, + ("Enable RDG function.\n")); + SET_TX_DESC_RDG_ENABLE(pdesc, 1); + SET_TX_DESC_HTC(pdesc, 1); + } + } + } + + SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0)); + SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0)); + SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len); + SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); + //if (rtlpriv->dm.b_useramask) { + if(1){ + SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + } else { + SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index); + SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id); + } +/* if (ieee80211_is_data_qos(fc)) + SET_TX_DESC_QOS(pdesc, 1); +*/ + if (!ieee80211_is_data_qos(fc)) { + SET_TX_DESC_HWSEQ_EN(pdesc, 1); + SET_TX_DESC_HWSEQ_SEL(pdesc, 0); + } + SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1)); + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { + SET_TX_DESC_BMC(pdesc, 1); + } + + rtl8821ae_dm_set_tx_ant_by_tx_info(hw,pdesc,ptcb_desc->mac_id); + RT_TRACE(COMP_SEND, DBG_TRACE, ("\n")); +} + +void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, + u8 *pdesc, bool b_firstseg, + bool b_lastseg, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + u8 fw_queue = QSLT_BEACON; + + dma_addr_t mapping = pci_map_single(rtlpci->pdev, + skb->data, skb->len, + PCI_DMA_TODEVICE); + + if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { + RT_TRACE(COMP_SEND, DBG_TRACE, + ("DMA mapping error")); + return; + } + CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); + + SET_TX_DESC_FIRST_SEG(pdesc, 1); + SET_TX_DESC_LAST_SEG(pdesc, 1); + + SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); + + SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); + + SET_TX_DESC_USE_RATE(pdesc, 1); + SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M); + SET_TX_DESC_DISABLE_FB(pdesc, 1); + + SET_TX_DESC_DATA_BW(pdesc, 0); + + SET_TX_DESC_HWSEQ_EN(pdesc, 1); + + SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); +/* + if(IsCtrlNDPA(VirtualAddress) || IsMgntNDPA(VirtualAddress)) + { + SET_TX_DESC_DATA_RETRY_LIMIT_8812(pDesc, 5); + SET_TX_DESC_RETRY_LIMIT_ENABLE_8812(pDesc, 1); + + if(IsMgntNDPA(VirtualAddress)) + { + SET_TX_DESC_NDPA_8812(pDesc, 1); + SET_TX_DESC_RTS_SC_8812(pDesc, SCMapping_8812(Adapter, pTcb)); + } + else + { + SET_TX_DESC_NDPA_8812(pDesc, 2); + SET_TX_DESC_RTS_SC_8812(pDesc, SCMapping_8812(Adapter, pTcb)); + } + }*/ + + SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len)); + + SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); + + SET_TX_DESC_MACID(pdesc, 0); + + SET_TX_DESC_OWN(pdesc, 1); + + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, + "H2C Tx Cmd Content\n", + pdesc, TX_DESC_SIZE); +} + +void rtl8821ae_set_desc(struct ieee80211_hw * hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val) +{ + if (istx == true) { + switch (desc_name) { + case HW_DESC_OWN: + SET_TX_DESC_OWN(pdesc, 1); + break; + case HW_DESC_TX_NEXTDESC_ADDR: + SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val); + break; + default: + RT_ASSERT(false, ("ERR txdesc :%d" + " not process\n", desc_name)); + break; + } + } else { + switch (desc_name) { + case HW_DESC_RXOWN: + SET_RX_DESC_OWN(pdesc, 1); + break; + case HW_DESC_RXBUFF_ADDR: + SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val); + break; + case HW_DESC_RXPKT_LEN: + SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val); + break; + case HW_DESC_RXERO: + SET_RX_DESC_EOR(pdesc, 1); + break; + default: + RT_ASSERT(false, ("ERR rxdesc :%d " + "not process\n", desc_name)); + break; + } + } +} + +u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name) +{ + u32 ret = 0; + + if (istx == true) { + switch (desc_name) { + case HW_DESC_OWN: + ret = GET_TX_DESC_OWN(pdesc); + break; + case HW_DESC_TXBUFF_ADDR: + ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); + break; + default: + RT_ASSERT(false, ("ERR txdesc :%d " + "not process\n", desc_name)); + break; + } + } else { + switch (desc_name) { + case HW_DESC_OWN: + ret = GET_RX_DESC_OWN(pdesc); + break; + case HW_DESC_RXPKT_LEN: + ret = GET_RX_DESC_PKT_LEN(pdesc); + break; + default: + RT_ASSERT(false, ("ERR rxdesc :%d " + "not process\n", desc_name)); + break; + } + } + return ret; +} + +bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8) rtl8821ae_get_desc(entry, true, HW_DESC_OWN); + + /* + *beacon packet will only use the first + *descriptor defautly,and the own may not + *be cleared by the hardware + */ + if (own) + return false; + else + return true; +} + + +void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (hw_queue == BEACON_QUEUE) { + rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4)); + } else { + rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, + BIT(0) << (hw_queue)); + } +} --- linux-3.13.0.orig/drivers/staging/rtl8821ae/rtl8821ae/trx.h +++ linux-3.13.0/drivers/staging/rtl8821ae/rtl8821ae/trx.h @@ -0,0 +1,641 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL8821AE_TRX_H__ +#define __RTL8821AE_TRX_H__ + +#define TX_DESC_SIZE 40 +#define TX_DESC_AGGR_SUBFRAME_SIZE 32 + +#define RX_DESC_SIZE 32 +#define RX_DRV_INFO_SIZE_UNIT 8 + +#define TX_DESC_NEXT_DESC_OFFSET 40 +#define USB_HWDESC_HEADER_LEN 40 +#define CRCLENGTH 4 + +#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) +#define SET_TX_DESC_OFFSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) +#define SET_TX_DESC_BMC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) +#define SET_TX_DESC_HTC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) +#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) +#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) +#define SET_TX_DESC_LINIP(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) +#define SET_TX_DESC_NO_ACM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) +#define SET_TX_DESC_GF(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) +#define SET_TX_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +#define GET_TX_DESC_PKT_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 0, 16) +#define GET_TX_DESC_OFFSET(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 16, 8) +#define GET_TX_DESC_BMC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 24, 1) +#define GET_TX_DESC_HTC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 25, 1) +#define GET_TX_DESC_LAST_SEG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 26, 1) +#define GET_TX_DESC_FIRST_SEG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 27, 1) +#define GET_TX_DESC_LINIP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 28, 1) +#define GET_TX_DESC_NO_ACM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 29, 1) +#define GET_TX_DESC_GF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 30, 1) +#define GET_TX_DESC_OWN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 31, 1) + +#define SET_TX_DESC_MACID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val) +#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) +#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) +#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) +#define SET_TX_DESC_PIFS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) +#define SET_TX_DESC_RATE_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val) +#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) +#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) +#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val) + + +#define SET_TX_DESC_PAID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val) +#define SET_TX_DESC_CCA_RTS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val) +#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val) +#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val) +#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) +#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val) +#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) +#define SET_TX_DESC_RAW(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) +#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) +#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) +#define SET_TX_DESC_BT_INT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val) +#define SET_TX_DESC_GID(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val) + + +#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val) +#define SET_TX_DESC_CHK_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val) +#define SET_TX_DESC_EARLY_MODE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val) +#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val) +#define SET_TX_DESC_USE_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val) +#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val) +#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val) +#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val) +#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val) +#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val) +#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val) +#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val) +#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val) +#define SET_TX_DESC_NDPA(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val) +#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val) +#define SET_TX_DESC_TX_ANT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 4, __val) + +#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val) +#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val) +#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val) +#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val) +#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val) +#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val) + + +#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val) +#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ + SET_BITS_TO_LE_1BYTE(__pdesc+20, 6, 1, __val) +#define SET_TX_DESC_DATA_BW(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val) +#define SET_TX_DESC_DATA_LDPC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) +#define SET_TX_DESC_DATA_STBC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val) +#define SET_TX_DESC_CTROL_STBC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val) +#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val) +#define SET_TX_DESC_RTS_SC(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) + + +#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) + +#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) + +#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val) + +#define SET_TX_DESC_SEQ(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val) + +#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) + +#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) + + +#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val) + +#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+48, 0, 32) + +#define GET_RX_DESC_PKT_LEN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 0, 14) +#define GET_RX_DESC_CRC32(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 14, 1) +#define GET_RX_DESC_ICV(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 15, 1) +#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 16, 4) +#define GET_RX_DESC_SECURITY(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 20, 3) +#define GET_RX_DESC_QOS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 23, 1) +#define GET_RX_DESC_SHIFT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 24, 2) +#define GET_RX_DESC_PHYST(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 26, 1) +#define GET_RX_DESC_SWDEC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 27, 1) +#define GET_RX_DESC_LS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 28, 1) +#define GET_RX_DESC_FS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 29, 1) +#define GET_RX_DESC_EOR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 30, 1) +#define GET_RX_DESC_OWN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc, 31, 1) + +#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) +#define SET_RX_DESC_EOR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) +#define SET_RX_DESC_OWN(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + +#define GET_RX_DESC_MACID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 0, 7) +#define GET_RX_DESC_TID(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 8, 4) +#define GET_RX_DESC_AMSDU(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) +#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) +#define GET_RX_DESC_PAGGR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) +#define GET_RX_DESC_A1_FIT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) +#define GET_RX_DESC_CHKERR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) +#define GET_RX_DESC_IPVER(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) +#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 22, 1) +#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 23, 1) +#define GET_RX_DESC_PAM(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) +#define GET_RX_DESC_PWR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) +#define GET_RX_DESC_MD(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) +#define GET_RX_DESC_MF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) +#define GET_RX_DESC_TYPE(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) +#define GET_RX_DESC_MC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) +#define GET_RX_DESC_BC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) + + +#define GET_RX_DESC_SEQ(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) +#define GET_RX_DESC_FRAG(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) +#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 16, 1) +#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 18, 6) +#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+8, 28, 1) + + +#define GET_RX_DESC_RXMCS(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 0, 7) +#define GET_RX_DESC_RXHT(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) +#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 7, 1) +#define GET_RX_DESC_HTC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) +#define GET_RX_STATUS_DESC_EOSP(__pdesc) \ + LE_BITS_TO_4BYTE( __pdesc+12, 11, 1) +#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \ + LE_BITS_TO_4BYTE( __pdesc+12, 12, 2) + +#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE( __pdesc+12, 29, 1) +#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE( __pdesc+12, 30, 1) +#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \ + LE_BITS_TO_4BYTE( __pdesc+12, 31, 1) + +#define GET_RX_DESC_SPLCP(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 0, 1) +#define GET_RX_STATUS_DESC_LDPC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 1, 1) +#define GET_RX_STATUS_DESC_STBC(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 2, 1) +#define GET_RX_DESC_BW(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+16, 4, 2) + +#define GET_RX_DESC_TSFL(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) + +#define GET_RX_DESC_BUFF_ADDR(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) +#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ + LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) + +#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) +#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ + SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) + + +/* TX report 2 format in Rx desc*/ + +#define GET_RX_RPT2_DESC_PKT_LEN(__pRxStatusDesc) \ + LE_BITS_TO_4BYTE( __pRxStatusDesc, 0, 9) +#define GET_RX_RPT2_DESC_MACID_VALID_1(__pRxStatusDesc) \ + LE_BITS_TO_4BYTE( __pRxStatusDesc+16, 0, 32) +#define GET_RX_RPT2_DESC_MACID_VALID_2(__pRxStatusDesc) \ + LE_BITS_TO_4BYTE( __pRxStatusDesc+20, 0, 32) + +#define SET_EARLYMODE_PKTNUM(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value) +#define SET_EARLYMODE_LEN0(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value) +#define SET_EARLYMODE_LEN1(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value) +#define SET_EARLYMODE_LEN2_1(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value) +#define SET_EARLYMODE_LEN2_2(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value) +#define SET_EARLYMODE_LEN3(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value) +#define SET_EARLYMODE_LEN4(__paddr, __value) \ + SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value) + +#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ +do { \ + if(_size > TX_DESC_NEXT_DESC_OFFSET) \ + memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \ + else \ + memset(__pdesc, 0, _size); \ +} while (0); + +#define RX_HAL_IS_CCK_RATE(rxmcs)\ + (rxmcs == DESC_RATE1M ||\ + rxmcs == DESC_RATE2M ||\ + rxmcs == DESC_RATE5_5M ||\ + rxmcs == DESC_RATE11M) + +#define IS_LITTLE_ENDIAN 1 + +struct phy_rx_agc_info_t { + #if IS_LITTLE_ENDIAN + u8 gain:7,trsw:1; + #else + u8 trsw:1,gain:7; + #endif +}; +struct phy_status_rpt{ + struct phy_rx_agc_info_t path_agc[2]; + u8 ch_corr[2]; + u8 cck_sig_qual_ofdm_pwdb_all; + u8 cck_agc_rpt_ofdm_cfosho_a; + u8 cck_rpt_b_ofdm_cfosho_b; + u8 rsvd_1;//ch_corr_msb; + u8 noise_power_db_msb; + u8 path_cfotail[2]; + u8 pcts_mask[2]; + u8 stream_rxevm[2]; + u8 path_rxsnr[2]; + u8 noise_power_db_lsb; + u8 rsvd_2[3]; + u8 stream_csi[2]; + u8 stream_target_csi[2]; + u8 sig_evm; + u8 rsvd_3; +#if IS_LITTLE_ENDIAN + u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ + u8 sgi_en:1; + u8 rxsc:2; + u8 idle_long:1; + u8 r_ant_train_en:1; + u8 ant_sel_b:1; + u8 ant_sel:1; +#else /* _BIG_ENDIAN_ */ + u8 ant_sel:1; + u8 ant_sel_b:1; + u8 r_ant_train_en:1; + u8 idle_long:1; + u8 rxsc:2; + u8 sgi_en:1; + u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ +#endif +}__packed; + +struct rx_fwinfo_8821ae { + u8 gain_trsw[4]; + u8 pwdb_all; + u8 cfosho[4]; + u8 cfotail[4]; + char rxevm[2]; + char rxsnr[4]; + u8 pdsnr[2]; + u8 csi_current[2]; + u8 csi_target[2]; + u8 sigevm; + u8 max_ex_pwr; + u8 ex_intf_flag:1; + u8 sgi_en:1; + u8 rxsc:2; + u8 reserve:4; +} __packed; + +struct tx_desc_8821ae { + u32 pktsize:16; + u32 offset:8; + u32 bmc:1; + u32 htc:1; + u32 lastseg:1; + u32 firstseg:1; + u32 linip:1; + u32 noacm:1; + u32 gf:1; + u32 own:1; + + u32 macid:6; + u32 rsvd0:2; + u32 queuesel:5; + u32 rd_nav_ext:1; + u32 lsig_txop_en:1; + u32 pifs:1; + u32 rateid:4; + u32 nav_usehdr:1; + u32 en_descid:1; + u32 sectype:2; + u32 pktoffset:8; + + u32 rts_rc:6; + u32 data_rc:6; + u32 agg_en:1; + u32 rdg_en:1; + u32 bar_retryht:2; + u32 agg_break:1; + u32 morefrag:1; + u32 raw:1; + u32 ccx:1; + u32 ampdudensity:3; + u32 bt_int:1; + u32 ant_sela:1; + u32 ant_selb:1; + u32 txant_cck:2; + u32 txant_l:2; + u32 txant_ht:2; + + u32 nextheadpage:8; + u32 tailpage:8; + u32 seq:12; + u32 cpu_handle:1; + u32 tag1:1; + u32 trigger_int:1; + u32 hwseq_en:1; + + u32 rtsrate:5; + u32 apdcfe:1; + u32 qos:1; + u32 hwseq_ssn:1; + u32 userrate:1; + u32 dis_rtsfb:1; + u32 dis_datafb:1; + u32 cts2self:1; + u32 rts_en:1; + u32 hwrts_en:1; + u32 portid:1; + u32 pwr_status:3; + u32 waitdcts:1; + u32 cts2ap_en:1; + u32 txsc:2; + u32 stbc:2; + u32 txshort:1; + u32 txbw:1; + u32 rtsshort:1; + u32 rtsbw:1; + u32 rtssc:2; + u32 rtsstbc:2; + + u32 txrate:6; + u32 shortgi:1; + u32 ccxt:1; + u32 txrate_fb_lmt:5; + u32 rtsrate_fb_lmt:4; + u32 retrylmt_en:1; + u32 txretrylmt:6; + u32 usb_txaggnum:8; + + u32 txagca:5; + u32 txagcb:5; + u32 usemaxlen:1; + u32 maxaggnum:5; + u32 mcsg1maxlen:4; + u32 mcsg2maxlen:4; + u32 mcsg3maxlen:4; + u32 mcs7sgimaxlen:4; + + u32 txbuffersize:16; + u32 sw_offset30:8; + u32 sw_offset31:4; + u32 rsvd1:1; + u32 antsel_c:1; + u32 null_0:1; + u32 null_1:1; + + u32 txbuffaddr; + u32 txbufferaddr64; + u32 nextdescaddress; + u32 nextdescaddress64; + + u32 reserve_pass_pcie_mm_limit[4]; +} __packed; + +struct rx_desc_8821ae { + u32 length:14; + u32 crc32:1; + u32 icverror:1; + u32 drv_infosize:4; + u32 security:3; + u32 qos:1; + u32 shift:2; + u32 phystatus:1; + u32 swdec:1; + u32 lastseg:1; + u32 firstseg:1; + u32 eor:1; + u32 own:1; + + u32 macid:6; + u32 tid:4; + u32 hwrsvd:5; + u32 paggr:1; + u32 faggr:1; + u32 a1_fit:4; + u32 a2_fit:4; + u32 pam:1; + u32 pwr:1; + u32 moredata:1; + u32 morefrag:1; + u32 type:2; + u32 mc:1; + u32 bc:1; + + u32 seq:12; + u32 frag:4; + u32 nextpktlen:14; + u32 nextind:1; + u32 rsvd:1; + + u32 rxmcs:6; + u32 rxht:1; + u32 amsdu:1; + u32 splcp:1; + u32 bandwidth:1; + u32 htc:1; + u32 tcpchk_rpt:1; + u32 ipcchk_rpt:1; + u32 tcpchk_valid:1; + u32 hwpcerr:1; + u32 hwpcind:1; + u32 iv0:16; + + u32 iv1; + + u32 tsfl; + + u32 bufferaddress; + u32 bufferaddress64; + +} __packed; + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) +void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd, + struct ieee80211_tx_info *info, struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); +#else +/**/ +void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, u8 *txbd, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); +/**/ +#endif +/**/ +bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *status, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb); +void rtl8821ae_set_desc(struct ieee80211_hw * hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val); +u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name); +bool rtl8821ae_is_tx_desc_closed(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); +void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); +void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, + bool b_firstseg, bool b_lastseg, + struct sk_buff *skb); +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/stats.c +++ linux-3.13.0/drivers/staging/rtl8821ae/stats.c @@ -0,0 +1,283 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ +#include "wifi.h" +#include "stats.h" +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +#include +#endif + +u8 rtl_query_rxpwrpercentage(char antpower) +{ + if ((antpower <= -100) || (antpower >= 20)) + return 0; + else if (antpower >= 0) + return 100; + else + return (100 + antpower); +} +//EXPORT_SYMBOL(rtl_query_rxpwrpercentage); + +u8 rtl_evm_db_to_percentage(char value) +{ + char ret_val; + ret_val = value; + + if (ret_val >= 0) + ret_val = 0; + if (ret_val <= -33) + ret_val = -33; + ret_val = 0 - ret_val; + ret_val *= 3; + if (ret_val == 99) + ret_val = 100; + + return ret_val; +} +//EXPORT_SYMBOL(rtl_evm_db_to_percentage); + +long rtl_translate_todbm(struct ieee80211_hw *hw, + u8 signal_strength_index) +{ + long signal_power; + + signal_power = (long)((signal_strength_index + 1) >> 1); + signal_power -= 95; + return signal_power; +} + +long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig) +{ + long retsig; + + if (currsig >= 61 && currsig <= 100) + retsig = 90 + ((currsig - 60) / 4); + else if (currsig >= 41 && currsig <= 60) + retsig = 78 + ((currsig - 40) / 2); + else if (currsig >= 31 && currsig <= 40) + retsig = 66 + (currsig - 30); + else if (currsig >= 21 && currsig <= 30) + retsig = 54 + (currsig - 20); + else if (currsig >= 5 && currsig <= 20) + retsig = 42 + (((currsig - 5) * 2) / 3); + else if (currsig == 4) + retsig = 36; + else if (currsig == 3) + retsig = 27; + else if (currsig == 2) + retsig = 18; + else if (currsig == 1) + retsig = 9; + else + retsig = currsig; + + return retsig; +} +//EXPORT_SYMBOL(rtl_signal_scale_mapping); + +void rtl_process_ui_rssi(struct ieee80211_hw *hw, struct rtl_stats *pstatus) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 rfpath; + u32 last_rssi, tmpval; + + if (!pstatus->b_packet_toself && !pstatus->b_packet_beacon) + return; + + rtlpriv->stats.pwdb_all_cnt += pstatus->rx_pwdb_all; + rtlpriv->stats.rssi_calculate_cnt++; + + if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) { + rtlpriv->stats.ui_rssi.total_num = PHY_RSSI_SLID_WIN_MAX; + last_rssi = rtlpriv->stats.ui_rssi.elements[ + rtlpriv->stats.ui_rssi.index]; + rtlpriv->stats.ui_rssi.total_val -= last_rssi; + } + rtlpriv->stats.ui_rssi.total_val += pstatus->signalstrength; + rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++] = + pstatus->signalstrength; + if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX) + rtlpriv->stats.ui_rssi.index = 0; + tmpval = rtlpriv->stats.ui_rssi.total_val / + rtlpriv->stats.ui_rssi.total_num; + rtlpriv->stats.signal_strength = rtl_translate_todbm(hw, + (u8) tmpval); + pstatus->rssi = rtlpriv->stats.signal_strength; + + if (pstatus->b_is_cck) + return; + + for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; + rfpath++) { + if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + pstatus->rx_mimo_signalstrength[rfpath]; + + } + if (pstatus->rx_mimo_signalstrength[rfpath] > + rtlpriv->stats.rx_rssi_percentage[rfpath]) { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + ((rtlpriv->stats.rx_rssi_percentage[rfpath] * + (RX_SMOOTH_FACTOR - 1)) + + (pstatus->rx_mimo_signalstrength[rfpath])) / + (RX_SMOOTH_FACTOR); + rtlpriv->stats.rx_rssi_percentage[rfpath] = + rtlpriv->stats.rx_rssi_percentage[rfpath] + 1; + } else { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + ((rtlpriv->stats.rx_rssi_percentage[rfpath] * + (RX_SMOOTH_FACTOR - 1)) + + (pstatus->rx_mimo_signalstrength[rfpath])) / + (RX_SMOOTH_FACTOR); + } + rtlpriv->stats.rx_snr_db[rfpath] = pstatus->rx_snr[rfpath]; + rtlpriv->stats.rx_evm_dbm[rfpath] = + pstatus->rx_mimo_evm_dbm[rfpath]; + rtlpriv->stats.rx_cfo_short[rfpath] = + pstatus->cfo_short[rfpath]; + rtlpriv->stats.rx_cfo_tail[rfpath] = pstatus->cfo_tail[rfpath]; + } +} + +static void rtl_update_rxsignalstatistics(struct ieee80211_hw *hw, + struct rtl_stats *pstatus) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int weighting = 0; + + if (rtlpriv->stats.recv_signal_power == 0) + rtlpriv->stats.recv_signal_power = pstatus->recvsignalpower; + if (pstatus->recvsignalpower > rtlpriv->stats.recv_signal_power) + weighting = 5; + else if (pstatus->recvsignalpower < rtlpriv->stats.recv_signal_power) + weighting = (-5); + rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power * + 5 + pstatus->recvsignalpower + weighting) / 6; +} + +static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_sta_info *drv_priv = NULL; + struct ieee80211_sta *sta = NULL; + long undecorated_smoothed_pwdb; + + rcu_read_lock(); + if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) + sta = rtl_find_sta(hw, pstatus->psaddr); + + /* adhoc or ap mode */ + if (sta) { + drv_priv = (struct rtl_sta_info *) sta->drv_priv; + undecorated_smoothed_pwdb = + drv_priv->rssi_stat.undecorated_smoothed_pwdb; + } else { + undecorated_smoothed_pwdb = + rtlpriv->dm.undecorated_smoothed_pwdb; + } + + if (undecorated_smoothed_pwdb < 0) + undecorated_smoothed_pwdb = pstatus->rx_pwdb_all; + if (pstatus->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) { + undecorated_smoothed_pwdb = (((undecorated_smoothed_pwdb) * + (RX_SMOOTH_FACTOR - 1)) + + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); + undecorated_smoothed_pwdb = undecorated_smoothed_pwdb + 1; + } else { + undecorated_smoothed_pwdb = (((undecorated_smoothed_pwdb) * + (RX_SMOOTH_FACTOR - 1)) + + (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); + } + + if(sta) { + drv_priv->rssi_stat.undecorated_smoothed_pwdb = + undecorated_smoothed_pwdb; + } else { + rtlpriv->dm.undecorated_smoothed_pwdb = undecorated_smoothed_pwdb; + } + rcu_read_unlock(); + + rtl_update_rxsignalstatistics(hw, pstatus); +} + +static void rtl_process_ui_link_quality(struct ieee80211_hw *hw, + struct rtl_stats *pstatus) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 last_evm, n_stream, tmpval; + + if (pstatus->signalquality == 0) + return; + + if (rtlpriv->stats.ui_link_quality.total_num++ >= + PHY_LINKQUALITY_SLID_WIN_MAX) { + rtlpriv->stats.ui_link_quality.total_num = + PHY_LINKQUALITY_SLID_WIN_MAX; + last_evm = rtlpriv->stats.ui_link_quality.elements[ + rtlpriv->stats.ui_link_quality.index]; + rtlpriv->stats.ui_link_quality.total_val -= last_evm; + } + rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality; + rtlpriv->stats.ui_link_quality.elements[ + rtlpriv->stats.ui_link_quality.index++] = + pstatus->signalquality; + if (rtlpriv->stats.ui_link_quality.index >= + PHY_LINKQUALITY_SLID_WIN_MAX) + rtlpriv->stats.ui_link_quality.index = 0; + tmpval = rtlpriv->stats.ui_link_quality.total_val / + rtlpriv->stats.ui_link_quality.total_num; + rtlpriv->stats.signal_quality = tmpval; + rtlpriv->stats.last_sigstrength_inpercent = tmpval; + for (n_stream = 0; n_stream < 2; n_stream++) { + if (pstatus->rx_mimo_signalquality[n_stream] != -1) { + if (rtlpriv->stats.rx_evm_percentage[n_stream] == 0) { + rtlpriv->stats.rx_evm_percentage[n_stream] = + pstatus->rx_mimo_signalquality[n_stream]; + } + rtlpriv->stats.rx_evm_percentage[n_stream] = + ((rtlpriv->stats.rx_evm_percentage[n_stream] + * (RX_SMOOTH_FACTOR - 1)) + + (pstatus->rx_mimo_signalquality[n_stream] * 1)) / + (RX_SMOOTH_FACTOR); + } + } +} + +void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer, + struct rtl_stats *pstatus) +{ + + if (!pstatus->b_packet_matchbssid) + return; + + rtl_process_ui_rssi(hw, pstatus); + rtl_process_pwdb(hw, pstatus); + rtl_process_ui_link_quality(hw, pstatus); +} +//EXPORT_SYMBOL(rtl_process_phyinfo); --- linux-3.13.0.orig/drivers/staging/rtl8821ae/stats.h +++ linux-3.13.0/drivers/staging/rtl8821ae/stats.h @@ -0,0 +1,46 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_STATS_H__ +#define __RTL_STATS_H__ + +#define PHY_RSSI_SLID_WIN_MAX 100 +#define PHY_LINKQUALITY_SLID_WIN_MAX 20 +#define PHY_BEACON_RSSI_SLID_WIN_MAX 10 + +/* Rx smooth factor */ +#define RX_SMOOTH_FACTOR 20 + +u8 rtl_query_rxpwrpercentage(char antpower); +u8 rtl_evm_db_to_percentage(char value); +long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig); +void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer, + struct rtl_stats *pstatus); + +#endif --- linux-3.13.0.orig/drivers/staging/rtl8821ae/wifi.h +++ linux-3.13.0/drivers/staging/rtl8821ae/wifi.h @@ -0,0 +1,2532 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger + * + *****************************************************************************/ + +#ifndef __RTL_WIFI_H__ +#define __RTL_WIFI_H__ + +#include +#include +#include +#include +#include +#include +#include "debug.h" + + +#define RF_CHANGE_BY_INIT 0 +#define RF_CHANGE_BY_IPS BIT(28) +#define RF_CHANGE_BY_PS BIT(29) +#define RF_CHANGE_BY_HW BIT(30) +#define RF_CHANGE_BY_SW BIT(31) + +#define IQK_ADDA_REG_NUM 16 +#define IQK_MAC_REG_NUM 4 +#define IQK_THRESHOLD 8 + +#define MAX_KEY_LEN 61 +#define KEY_BUF_SIZE 5 + +/* QoS related. */ +/*aci: 0x00 Best Effort*/ +/*aci: 0x01 Background*/ +/*aci: 0x10 Video*/ +/*aci: 0x11 Voice*/ +/*Max: define total number.*/ +#define AC0_BE 0 +#define AC1_BK 1 +#define AC2_VI 2 +#define AC3_VO 3 +#define AC_MAX 4 +#define QOS_QUEUE_NUM 4 +#define RTL_MAC80211_NUM_QUEUE 5 + +#define QBSS_LOAD_SIZE 5 +#define MAX_WMMELE_LENGTH 64 + +#define TOTAL_CAM_ENTRY 32 + +/*slot time for 11g. */ +#define RTL_SLOT_TIME_9 9 +#define RTL_SLOT_TIME_20 20 + +/*related with tcp/ip. */ +/*if_ehther.h*/ +#define ETH_P_PAE 0x888E /*Port Access Entity + *(IEEE 802.1X) */ +#define ETH_P_IP 0x0800 /*Internet Protocol packet */ +#define ETH_P_ARP 0x0806 /*Address Resolution packet */ +#define SNAP_SIZE 6 +#define PROTOC_TYPE_SIZE 2 + +/*related with 802.11 frame*/ +#define MAC80211_3ADDR_LEN 24 +#define MAC80211_4ADDR_LEN 30 + +#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max + * channel number */ +#define CHANNEL_MAX_NUMBER_2G 14 +#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to + *"phy_GetChnlGroup8812A" and + * "Hal_ReadTxPowerInfo8812A"*/ +#define CHANNEL_MAX_NUMBER_5G_80M 7 +#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, ch4~9, ch10~14 + * total three groups */ +#define MAX_PG_GROUP 13 +#define CHANNEL_GROUP_MAX_2G 3 +#define CHANNEL_GROUP_IDX_5GL 3 +#define CHANNEL_GROUP_IDX_5GM 6 +#define CHANNEL_GROUP_IDX_5GH 9 +#define CHANNEL_GROUP_MAX_5G 9 +#define CHANNEL_MAX_NUMBER_2G 14 +#define AVG_THERMAL_NUM 8 +#define AVG_THERMAL_NUM_92E 4 +#define AVG_THERMAL_NUM_88E 4 +#define AVG_THERMAL_NUM_8723BE 4 +#define MAX_TID_COUNT 9 +#define MAX_NUM_RATES 264 + +/*for 88E use*/ +/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/ +#define MAX_TX_COUNT 4 +#define MAX_RF_PATH 4 +#define MAX_CHNL_GROUP_24G 6 +#define MAX_CHNL_GROUP_5G 14 + +/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */ +#define MAX_TX_QUEUE 9 + +#define TX_PWR_BY_RATE_NUM_BAND 2 +#define TX_PWR_BY_RATE_NUM_RF 4 +#define TX_PWR_BY_RATE_NUM_SECTION 12 +#define MAX_BASE_NUM_IN_PHY_REG_PG_24G 6 +#define MAX_BASE_NUM_IN_PHY_REG_PG_5G 5 + +#define DELTA_SWINGIDX_SIZE 30 +#define BAND_NUM 3 +/*Now, it's just for 8192ee + *not OK yet, keep it 0*/ +#define DMA_IS_64BIT 0 +#define RTL8192EE_SEG_NUM 1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */ + +struct txpower_info_2g { + u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; + u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; + /*If only one tx, only BW20 and OFDM are used.*/ + u8 cck_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT]; +}; + +struct txpower_info_5g { + u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_5G]; + /*If only one tx, only BW20, OFDM, BW80 and BW160 are used.*/ + u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT]; + u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT]; +}; + + +/* for early mode */ +#define EM_HDR_LEN 8 +#define FCS_LEN 4 + +#define MAX_VIRTUAL_MAC 1 + +enum rf_tx_num { + RF_1TX = 0, + RF_2TX, + RF_MAX_TX_NUM, + RF_TX_NUM_NONIMPLEMENT, +}; + +enum rate_section { + CCK = 0, + OFDM, + HT_MCS0_MCS7, + HT_MCS8_MCS15, + VHT_1SSMCS0_1SSMCS9, + VHT_2SSMCS0_2SSMCS9, +}; + +enum intf_type { + INTF_PCI = 0, + INTF_USB = 1, +}; + +enum radio_path { + RF90_PATH_A = 0, + RF90_PATH_B = 1, + RF90_PATH_C = 2, + RF90_PATH_D = 3, +}; + +enum rt_eeprom_type { + EEPROM_93C46, + EEPROM_93C56, + EEPROM_BOOT_EFUSE, +}; + +enum rtl_status { + RTL_STATUS_INTERFACE_START = 0, +}; + +enum hardware_type { + HARDWARE_TYPE_RTL8192E, + HARDWARE_TYPE_RTL8192U, + HARDWARE_TYPE_RTL8192SE, + HARDWARE_TYPE_RTL8192SU, + HARDWARE_TYPE_RTL8192CE, + HARDWARE_TYPE_RTL8192CU, + HARDWARE_TYPE_RTL8192DE, + HARDWARE_TYPE_RTL8192DU, + HARDWARE_TYPE_RTL8723AE, + HARDWARE_TYPE_RTL8188EE, + HARDWARE_TYPE_RTL8723BE, + HARDWARE_TYPE_RTL8192EE, + HARDWARE_TYPE_RTL8821AE, + HARDWARE_TYPE_RTL8812AE, + /* keep it last */ + HARDWARE_TYPE_NUM +}; + +enum scan_operation_backup_opt { + SCAN_OPT_BACKUP_BAND0=0, + SCAN_OPT_BACKUP_BAND1, + SCAN_OPT_RESTORE, + SCAN_OPT_MAX +}; + +/*RF state.*/ +enum rf_pwrstate { + ERFON, + ERFSLEEP, + ERFOFF +}; + +struct bb_reg_def { + u32 rfintfs; + u32 rfintfi; + u32 rfintfo; + u32 rfintfe; + u32 rf3wire_offset; + u32 rflssi_select; + u32 rftxgain_stage; + u32 rfhssi_para1; + u32 rfhssi_para2; + u32 rfswitch_control; + u32 rfagc_control1; + u32 rfagc_control2; + u32 rfrxiq_imbalance; + u32 rfrx_afe; + u32 rftxiq_imbalance; + u32 rftx_afe; + u32 rflssi_readback; + u32 rflssi_readbackpi; +}; + +enum io_type { + IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0, + IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1, + IO_CMD_RESUME_DM_BY_SCAN = 2, +}; + +enum hw_variables { + HW_VAR_ETHER_ADDR, + HW_VAR_MULTICAST_REG, + HW_VAR_BASIC_RATE, + HW_VAR_BSSID, + HW_VAR_MEDIA_STATUS, + HW_VAR_SECURITY_CONF, + HW_VAR_BEACON_INTERVAL, + HW_VAR_ATIM_WINDOW, + HW_VAR_LISTEN_INTERVAL, + HW_VAR_CS_COUNTER, + HW_VAR_DEFAULTKEY0, + HW_VAR_DEFAULTKEY1, + HW_VAR_DEFAULTKEY2, + HW_VAR_DEFAULTKEY3, + HW_VAR_SIFS, + HW_VAR_DIFS, + HW_VAR_EIFS, + HW_VAR_SLOT_TIME, + HW_VAR_ACK_PREAMBLE, + HW_VAR_CW_CONFIG, + HW_VAR_CW_VALUES, + HW_VAR_RATE_FALLBACK_CONTROL, + HW_VAR_CONTENTION_WINDOW, + HW_VAR_RETRY_COUNT, + HW_VAR_TR_SWITCH, + HW_VAR_COMMAND, + HW_VAR_WPA_CONFIG, + HW_VAR_AMPDU_MIN_SPACE, + HW_VAR_SHORTGI_DENSITY, + HW_VAR_AMPDU_FACTOR, + HW_VAR_MCS_RATE_AVAILABLE, + HW_VAR_AC_PARAM, + HW_VAR_ACM_CTRL, + HW_VAR_DIS_Req_Qsize, + HW_VAR_CCX_CHNL_LOAD, + HW_VAR_CCX_NOISE_HISTOGRAM, + HW_VAR_CCX_CLM_NHM, + HW_VAR_TxOPLimit, + HW_VAR_TURBO_MODE, + HW_VAR_RF_STATE, + HW_VAR_RF_OFF_BY_HW, + HW_VAR_BUS_SPEED, + HW_VAR_SET_DEV_POWER, + + HW_VAR_RCR, + HW_VAR_RATR_0, + HW_VAR_RRSR, + HW_VAR_CPU_RST, + HW_VAR_CECHK_BSSID, + HW_VAR_LBK_MODE, + HW_VAR_AES_11N_FIX, + HW_VAR_USB_RX_AGGR, + HW_VAR_USER_CONTROL_TURBO_MODE, + HW_VAR_RETRY_LIMIT, + HW_VAR_INIT_TX_RATE, + HW_VAR_TX_RATE_REG, + HW_VAR_EFUSE_USAGE, + HW_VAR_EFUSE_BYTES, + HW_VAR_AUTOLOAD_STATUS, + HW_VAR_RF_2R_DISABLE, + HW_VAR_SET_RPWM, + HW_VAR_H2C_FW_PWRMODE, + HW_VAR_H2C_FW_JOINBSSRPT, + HW_VAR_H2C_FW_MEDIASTATUSRPT, + HW_VAR_H2C_FW_P2P_PS_OFFLOAD, + HW_VAR_FW_PSMODE_STATUS, + HW_VAR_RESUME_CLK_ON, + HW_VAR_FW_LPS_ACTION, + HW_VAR_1X1_RECV_COMBINE, + HW_VAR_STOP_SEND_BEACON, + HW_VAR_TSF_TIMER, + HW_VAR_IO_CMD, + + HW_VAR_RF_RECOVERY, + HW_VAR_H2C_FW_UPDATE_GTK, + HW_VAR_WF_MASK, + HW_VAR_WF_CRC, + HW_VAR_WF_IS_MAC_ADDR, + HW_VAR_H2C_FW_OFFLOAD, + HW_VAR_RESET_WFCRC, + + HW_VAR_HANDLE_FW_C2H, + HW_VAR_DL_FW_RSVD_PAGE, + HW_VAR_AID, + HW_VAR_HW_SEQ_ENABLE, + HW_VAR_CORRECT_TSF, + HW_VAR_BCN_VALID, + HW_VAR_FWLPS_RF_ON, + HW_VAR_DUAL_TSF_RST, + HW_VAR_SWITCH_EPHY_WoWLAN, + HW_VAR_INT_MIGRATION, + HW_VAR_INT_AC, + HW_VAR_RF_TIMING, + + HAL_DEF_WOWLAN, + HW_VAR_MRC, + HW_VAR_KEEP_ALIVE, + HW_VAR_NAV_UPPER, +}; + +enum rt_media_status { + RT_MEDIA_DISCONNECT = 0, + RT_MEDIA_CONNECT = 1 +}; + +enum rt_oem_id { + RT_CID_DEFAULT = 0, + RT_CID_8187_ALPHA0 = 1, + RT_CID_8187_SERCOMM_PS = 2, + RT_CID_8187_HW_LED = 3, + RT_CID_8187_NETGEAR = 4, + RT_CID_WHQL = 5, + RT_CID_819x_CAMEO = 6, + RT_CID_819x_RUNTOP = 7, + RT_CID_819x_Senao = 8, + RT_CID_TOSHIBA = 9, + RT_CID_819x_Netcore = 10, + RT_CID_Nettronix = 11, + RT_CID_DLINK = 12, + RT_CID_PRONET = 13, + RT_CID_COREGA = 14, + RT_CID_819x_ALPHA = 15, + RT_CID_819x_Sitecom = 16, + RT_CID_CCX = 17, + RT_CID_819x_Lenovo = 18, + RT_CID_819x_QMI = 19, + RT_CID_819x_Edimax_Belkin = 20, + RT_CID_819x_Sercomm_Belkin = 21, + RT_CID_819x_CAMEO1 = 22, + RT_CID_819x_MSI = 23, + RT_CID_819x_Acer = 24, + RT_CID_819x_HP = 27, + RT_CID_819x_CLEVO = 28, + RT_CID_819x_Arcadyan_Belkin = 29, + RT_CID_819x_SAMSUNG = 30, + RT_CID_819x_WNC_COREGA = 31, + RT_CID_819x_Foxcoon = 32, + RT_CID_819x_DELL = 33, + RT_CID_819x_PRONETS = 34, + RT_CID_819x_Edimax_ASUS = 35, + RT_CID_NETGEAR = 36, + RT_CID_PLANEX = 37, + RT_CID_CC_C = 38, +}; + +enum hw_descs { + HW_DESC_OWN, + HW_DESC_RXOWN, + HW_DESC_TX_NEXTDESC_ADDR, + HW_DESC_TXBUFF_ADDR, + HW_DESC_RXBUFF_ADDR, + HW_DESC_RXPKT_LEN, + HW_DESC_RXERO, + HW_DESC_RX_PREPARE, +}; + +enum prime_sc { + PRIME_CHNL_OFFSET_DONT_CARE = 0, + PRIME_CHNL_OFFSET_LOWER = 1, + PRIME_CHNL_OFFSET_UPPER = 2, +}; + +enum rf_type { + RF_1T1R = 0, + RF_1T2R = 1, + RF_2T2R = 2, + RF_2T2R_GREEN = 3, +}; + +enum ht_channel_width { + HT_CHANNEL_WIDTH_20 = 0, + HT_CHANNEL_WIDTH_20_40 = 1, + HT_CHANNEL_WIDTH_80 = 2, +}; + +/* Ref: 802.11i sepc D10.0 7.3.2.25.1 +Cipher Suites Encryption Algorithms */ +enum rt_enc_alg { + NO_ENCRYPTION = 0, + WEP40_ENCRYPTION = 1, + TKIP_ENCRYPTION = 2, + RSERVED_ENCRYPTION = 3, + AESCCMP_ENCRYPTION = 4, + WEP104_ENCRYPTION = 5, + AESCMAC_ENCRYPTION = 6, /*IEEE802.11w */ +}; + +enum rtl_hal_state { + _HAL_STATE_STOP = 0, + _HAL_STATE_START = 1, +}; + +enum rtl_var_map { + /*reg map */ + SYS_ISO_CTRL = 0, + SYS_FUNC_EN, + SYS_CLK, + MAC_RCR_AM, + MAC_RCR_AB, + MAC_RCR_ACRC32, + MAC_RCR_ACF, + MAC_RCR_AAP, + MAC_HIMR, + MAC_HIMRE, + MAC_HSISR, + + /*efuse map */ + EFUSE_TEST, + EFUSE_CTRL, + EFUSE_CLK, + EFUSE_CLK_CTRL, + EFUSE_PWC_EV12V, + EFUSE_FEN_ELDR, + EFUSE_LOADER_CLK_EN, + EFUSE_ANA8M, + EFUSE_HWSET_MAX_SIZE, + EFUSE_MAX_SECTION_MAP, + EFUSE_REAL_CONTENT_SIZE, + EFUSE_OOB_PROTECT_BYTES_LEN, + EFUSE_ACCESS, + /*CAM map */ + RWCAM, + WCAMI, + RCAMO, + CAMDBG, + SECR, + SEC_CAM_NONE, + SEC_CAM_WEP40, + SEC_CAM_TKIP, + SEC_CAM_AES, + SEC_CAM_WEP104, + + /*IMR map */ + RTL_IMR_BCNDMAINT6, /*Beacon DMA Interrupt 6 */ + RTL_IMR_BCNDMAINT5, /*Beacon DMA Interrupt 5 */ + RTL_IMR_BCNDMAINT4, /*Beacon DMA Interrupt 4 */ + RTL_IMR_BCNDMAINT3, /*Beacon DMA Interrupt 3 */ + RTL_IMR_BCNDMAINT2, /*Beacon DMA Interrupt 2 */ + RTL_IMR_BCNDMAINT1, /*Beacon DMA Interrupt 1 */ + RTL_IMR_BCNDOK8, /*Beacon Queue DMA OK Interrup 8 */ + RTL_IMR_BCNDOK7, /*Beacon Queue DMA OK Interrup 7 */ + RTL_IMR_BCNDOK6, /*Beacon Queue DMA OK Interrup 6 */ + RTL_IMR_BCNDOK5, /*Beacon Queue DMA OK Interrup 5 */ + RTL_IMR_BCNDOK4, /*Beacon Queue DMA OK Interrup 4 */ + RTL_IMR_BCNDOK3, /*Beacon Queue DMA OK Interrup 3 */ + RTL_IMR_BCNDOK2, /*Beacon Queue DMA OK Interrup 2 */ + RTL_IMR_BCNDOK1, /*Beacon Queue DMA OK Interrup 1 */ + RTL_IMR_TIMEOUT2, /*Timeout interrupt 2 */ + RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */ + RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */ + RTL_IMR_PSTIMEOUT, /*Power save time out interrupt */ + RTL_IMR_BcnInt, /*Beacon DMA Interrupt 0 */ + RTL_IMR_RXFOVW, /*Receive FIFO Overflow */ + RTL_IMR_RDU, /*Receive Descriptor Unavailable */ + RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */ + RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */ + RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */ + RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/ + RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */ + RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */ + RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */ + RTL_IMR_BKDOK, /*AC_BK DMA OK Interrupt */ + RTL_IMR_BEDOK, /*AC_BE DMA OK Interrupt */ + RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */ + RTL_IMR_VODOK, /*AC_VO DMA Interrupt */ + RTL_IMR_ROK, /*Receive DMA OK Interrupt */ + RTL_IMR_HSISR_IND, /*HSISR Interrupt*/ + RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK | + * RTL_IMR_TBDER) */ + RTL_IMR_C2HCMD, /*fw interrupt*/ + + /*CCK Rates, TxHT = 0 */ + RTL_RC_CCK_RATE1M, + RTL_RC_CCK_RATE2M, + RTL_RC_CCK_RATE5_5M, + RTL_RC_CCK_RATE11M, + + /*OFDM Rates, TxHT = 0 */ + RTL_RC_OFDM_RATE6M, + RTL_RC_OFDM_RATE9M, + RTL_RC_OFDM_RATE12M, + RTL_RC_OFDM_RATE18M, + RTL_RC_OFDM_RATE24M, + RTL_RC_OFDM_RATE36M, + RTL_RC_OFDM_RATE48M, + RTL_RC_OFDM_RATE54M, + + RTL_RC_HT_RATEMCS7, + RTL_RC_HT_RATEMCS15, + + /*keep it last */ + RTL_VAR_MAP_MAX, +}; + +/*Firmware PS mode for control LPS.*/ +enum _fw_ps_mode { + FW_PS_ACTIVE_MODE = 0, + FW_PS_MIN_MODE = 1, + FW_PS_MAX_MODE = 2, + FW_PS_DTIM_MODE = 3, + FW_PS_VOIP_MODE = 4, + FW_PS_UAPSD_WMM_MODE = 5, + FW_PS_UAPSD_MODE = 6, + FW_PS_IBSS_MODE = 7, + FW_PS_WWLAN_MODE = 8, + FW_PS_PM_Radio_Off = 9, + FW_PS_PM_Card_Disable = 10, +}; + +enum rt_psmode { + EACTIVE, /*Active/Continuous access. */ + EMAXPS, /*Max power save mode. */ + EFASTPS, /*Fast power save mode. */ + EAUTOPS, /*Auto power save mode. */ +}; + +/*LED related.*/ +enum led_ctl_mode { + LED_CTL_POWER_ON = 1, + LED_CTL_LINK = 2, + LED_CTL_NO_LINK = 3, + LED_CTL_TX = 4, + LED_CTL_RX = 5, + LED_CTL_SITE_SURVEY = 6, + LED_CTL_POWER_OFF = 7, + LED_CTL_START_TO_LINK = 8, + LED_CTL_START_WPS = 9, + LED_CTL_STOP_WPS = 10, +}; + +enum rtl_led_pin { + LED_PIN_GPIO0, + LED_PIN_LED0, + LED_PIN_LED1, + LED_PIN_LED2 +}; + +/*QoS related.*/ +/*acm implementation method.*/ +enum acm_method { + eAcmWay0_SwAndHw = 0, + eAcmWay1_HW = 1, + eAcmWay2_SW = 2, +}; + +enum macphy_mode { + SINGLEMAC_SINGLEPHY = 0, + DUALMAC_DUALPHY, + DUALMAC_SINGLEPHY, +}; + +enum band_type { + BAND_ON_2_4G = 0, + BAND_ON_5G, + BAND_ON_BOTH, + BANDMAX +}; + +/*aci/aifsn Field. +Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/ +union aci_aifsn { + u8 char_data; + + struct { + u8 aifsn:4; + u8 acm:1; + u8 aci:2; + u8 reserved:1; + } f; /* Field */ +}; + +/*mlme related.*/ +enum wireless_mode { + WIRELESS_MODE_UNKNOWN = 0x00, + WIRELESS_MODE_A = 0x01, + WIRELESS_MODE_B = 0x02, + WIRELESS_MODE_G = 0x04, + WIRELESS_MODE_AUTO = 0x08, + WIRELESS_MODE_N_24G = 0x10, + WIRELESS_MODE_N_5G = 0x20, + WIRELESS_MODE_AC_5G = 0x40, + WIRELESS_MODE_AC_24G = 0x80 +}; + +enum ratr_table_mode { + RATR_INX_WIRELESS_NGB = 0, // BGN 40 Mhz 2SS 1SS + RATR_INX_WIRELESS_NG = 1, // GN or N + RATR_INX_WIRELESS_NB = 2, // BGN 20 Mhz 2SS 1SS or BN + RATR_INX_WIRELESS_N = 3, + RATR_INX_WIRELESS_GB = 4, + RATR_INX_WIRELESS_G = 5, + RATR_INX_WIRELESS_B = 6, + RATR_INX_WIRELESS_MC = 7, + RATR_INX_WIRELESS_AC_5N = 8, + RATR_INX_WIRELESS_AC_24N = 9, +}; + +enum rtl_link_state { + MAC80211_NOLINK = 0, + MAC80211_LINKING = 1, + MAC80211_LINKED = 2, + MAC80211_LINKED_SCANNING = 3, +}; + +enum act_category { + ACT_CAT_QOS = 1, + ACT_CAT_DLS = 2, + ACT_CAT_BA = 3, + ACT_CAT_HT = 7, + ACT_CAT_WMM = 17, +}; + +enum ba_action { + ACT_ADDBAREQ = 0, + ACT_ADDBARSP = 1, + ACT_DELBA = 2, +}; + +enum rt_polarity_ctl { + RT_POLARITY_LOW_ACT = 0, + RT_POLARITY_HIGH_ACT = 1, +}; + + +struct octet_string { + u8 *octet; + u16 length; +}; + +struct rtl_hdr_3addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + u8 payload[0]; +} __packed; + +struct rtl_info_element { + u8 id; + u8 len; + u8 data[0]; +} __packed; + +struct rtl_probe_rsp { + struct rtl_hdr_3addr header; + u32 time_stamp[2]; + __le16 beacon_interval; + __le16 capability; + /*SSID, supported rates, FH params, DS params, + CF params, IBSS params, TIM (if beacon), RSN */ + struct rtl_info_element info_element[0]; +} __packed; + +/*LED related.*/ +/*ledpin Identify how to implement this SW led.*/ +struct rtl_led { + void *hw; + enum rtl_led_pin ledpin; + bool b_ledon; +}; + +struct rtl_led_ctl { + bool bled_opendrain; + struct rtl_led sw_led0; + struct rtl_led sw_led1; +}; + +struct rtl_qos_parameters { + __le16 cw_min; + __le16 cw_max; + u8 aifs; + u8 flag; + __le16 tx_op; +} __packed; + +struct rt_smooth_data { + u32 elements[100]; /*array to store values */ + u32 index; /*index to current array to store */ + u32 total_num; /*num of valid elements */ + u32 total_val; /*sum of valid elements */ +}; + +struct rtl_ht_agg { + u16 txq_id; + u16 wait_for_ba; + u16 start_idx; + u64 bitmap; + u32 rate_n_flags; + u8 agg_state; + u8 rx_agg_state; +}; + +struct rtl_tid_data { + u16 seq_number; + struct rtl_ht_agg agg; +}; + +struct rssi_sta{ + long undecorated_smoothed_pwdb; +}; + +struct rtl_sta_info { + struct list_head list; + u8 ratr_index; + u8 wireless_mode; + u8 mimo_ps; + u8 mac_addr[6]; + struct rtl_tid_data tids[MAX_TID_COUNT]; + + /* just used for ap adhoc or mesh*/ + struct rssi_sta rssi_stat; +} __packed; + +#ifdef VIF_TODO +struct rtl_vif { + unsigned int id; + /* struct ieee80211_vif __rcu *vif; */ + struct ieee80211_vif *vif; +}; + +struct rtl_vif_info { + struct list_head list; + bool active; + unsigned int id; + struct sk_buff *beacon; + bool enable_beacon; +}; + +struct vif_priv { + struct list_head vif_list; + + /* interface mode settings */ + unsigned long vif_bitmap; + unsigned int vifs; + struct rtl_vif vif[MAX_VIRTUAL_MAC]; + + /* beaconing */ + spinlock_t beacon_lock; + unsigned int global_pretbtt; + unsigned int global_beacon_int; + /* struct rtl_vif_info __rcu *beacon_iter; */ + struct rtl_vif_info *beacon_iter; + unsigned int beacon_enabled; +}; +#endif + +struct false_alarm_statistics { + u32 cnt_parity_fail; + u32 cnt_rate_illegal; + u32 cnt_crc8_fail; + u32 cnt_mcs_fail; + u32 cnt_fast_fsync_fail; + u32 cnt_sb_search_fail; + u32 cnt_ofdm_fail; + u32 cnt_cck_fail; + u32 cnt_all; + u32 cnt_ofdm_cca; + u32 cnt_cck_cca; + u32 cnt_cca_all; + u32 cnt_bw_usc; + u32 cnt_bw_lsc; +}; + +struct init_gain { + u8 xaagccore1; + u8 xbagccore1; + u8 xcagccore1; + u8 xdagccore1; + u8 cca; + +}; + +struct wireless_stats { + unsigned long txbytesunicast; + unsigned long txbytesmulticast; + unsigned long txbytesbroadcast; + unsigned long rxbytesunicast; + + long rx_snr_db[4]; + /*Correct smoothed ss in Dbm, only used + in driver to report real power now. */ + long recv_signal_power; + long signal_quality; + long last_sigstrength_inpercent; + + u32 rssi_calculate_cnt; + u32 pwdb_all_cnt; + + /*Transformed, in dbm. Beautified signal + strength for UI, not correct. */ + long signal_strength; + + u8 rx_rssi_percentage[4]; + u8 rx_evm_dbm[4]; + u8 rx_evm_percentage[2]; + + u16 rx_cfo_short[4]; + u16 rx_cfo_tail[4]; + + struct rt_smooth_data ui_rssi; + struct rt_smooth_data ui_link_quality; +}; + +struct rate_adaptive { + u8 rate_adaptive_disabled; + u8 ratr_state; + u16 reserve; + + u32 high_rssi_thresh_for_ra; + u32 high2low_rssi_thresh_for_ra; + u8 low2high_rssi_thresh_for_ra; + u32 low_rssi_thresh_for_ra; + u32 upper_rssi_threshold_ratr; + u32 middleupper_rssi_threshold_ratr; + u32 middle_rssi_threshold_ratr; + u32 middlelow_rssi_threshold_ratr; + u32 low_rssi_threshold_ratr; + u32 ultralow_rssi_threshold_ratr; + u32 low_rssi_threshold_ratr_40m; + u32 low_rssi_threshold_ratr_20m; + u8 ping_rssi_enable; + u32 ping_rssi_ratr; + u32 ping_rssi_thresh_for_ra; + u32 last_ratr; + u8 pre_ratr_state; + u8 ldpc_thres; + bool use_ldpc; + bool lower_rts_rate; + bool is_special_data; +}; + +struct regd_pair_mapping { + u16 reg_dmnenum; + u16 reg_5ghz_ctl; + u16 reg_2ghz_ctl; +}; + +struct dynamic_primary_cca{ + u8 pricca_flag; + u8 intf_flag; + u8 intf_type; + u8 dup_rts_flag; + u8 monitor_flag; + u8 ch_offset; + u8 mf_state; +}; + +struct rtl_regulatory { + char alpha2[2]; + u16 country_code; + u16 max_power_level; + u32 tp_scale; + u16 current_rd; + u16 current_rd_ext; + int16_t power_limit; + struct regd_pair_mapping *regpair; +}; + +struct rtl_rfkill { + bool rfkill_state; /*0 is off, 1 is on */ +}; + +/*for P2P PS**/ +#define P2P_MAX_NOA_NUM 2 + +enum p2p_role { + P2P_ROLE_DISABLE = 0, + P2P_ROLE_DEVICE = 1, + P2P_ROLE_CLIENT = 2, + P2P_ROLE_GO = 3 +}; + +enum p2p_ps_state { + P2P_PS_DISABLE = 0, + P2P_PS_ENABLE = 1, + P2P_PS_SCAN = 2, + P2P_PS_SCAN_DONE = 3, + P2P_PS_ALLSTASLEEP = 4, // for P2P GO +}; + +enum p2p_ps_mode { + P2P_PS_NONE = 0, + P2P_PS_CTWINDOW = 1, + P2P_PS_NOA = 2, + P2P_PS_MIX = 3, // CTWindow and NoA +}; + +struct rtl_p2p_ps_info { + enum p2p_ps_mode p2p_ps_mode; /* indicate p2p ps mode */ + enum p2p_ps_state p2p_ps_state; /* indicate p2p ps state */ + u8 noa_index; /* Identifies and instance of Notice of Absence timing. */ + /* Client traffic window. A period of time in TU after TBTT. */ + u8 ctwindow; + u8 opp_ps; /* opportunistic power save. */ + u8 noa_num; /* number of NoA descriptor in P2P IE. */ + /* Count for owner, Type of client. */ + u8 noa_count_type[P2P_MAX_NOA_NUM]; + /* Max duration for owner, preferred or + * min acceptable duration for client. */ + u32 noa_duration[P2P_MAX_NOA_NUM]; + /* Length of interval for owner, preferred or + * max acceptable interval of client. */ + u32 noa_interval[P2P_MAX_NOA_NUM]; + /* schedule expressed in terms of the lower 4 bytes of the TSF timer. */ + u32 noa_start_time[P2P_MAX_NOA_NUM]; +}; + + struct p2p_ps_offload_t { + u8 Offload_En:1; + u8 role:1; /* 1: Owner, 0: Client */ + u8 CTWindow_En:1; + u8 NoA0_En:1; + u8 NoA1_En:1; + u8 AllStaSleep:1; + u8 discovery:1; + u8 reserved:1; +}; + +#define IQK_MATRIX_REG_NUM 8 +#define IQK_MATRIX_SETTINGS_NUM (14+24+21) // Channels_2_4G_NUM + Channels_5G_20M_NUM + Channels_5G +struct iqk_matrix_regs { + bool b_iqk_done; + long value[1][IQK_MATRIX_REG_NUM]; +}; + +struct rtl_phy { + struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */ + struct init_gain initgain_backup; + enum io_type current_io_type; + + u8 rf_mode; + u8 rf_type; + u8 current_chan_bw; + u8 set_bwmode_inprogress; + u8 sw_chnl_inprogress; + u8 sw_chnl_stage; + u8 sw_chnl_step; + u8 current_channel; + u8 h2c_box_num; + u8 set_io_inprogress; + u8 lck_inprogress; + + /* record for power tracking */ + s32 reg_e94; + s32 reg_e9c; + s32 reg_ea4; + s32 reg_eac; + s32 reg_eb4; + s32 reg_ebc; + s32 reg_ec4; + s32 reg_ecc; + u8 rfpienable; + u8 reserve_0; + u16 reserve_1; + u32 reg_c04, reg_c08, reg_874; + u32 adda_backup[16]; + u32 iqk_mac_backup[IQK_MAC_REG_NUM]; + u32 iqk_bb_backup[10]; + bool iqk_initialized; + + bool rfpath_rx_enable[MAX_RF_PATH]; + /*Jaguar*/ + u8 reg_837; + /* Dul mac */ + bool b_need_iqk; + struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM]; + + bool b_rfpi_enable; + + bool b_iqk_in_progress; + + u8 pwrgroup_cnt; + u8 bcck_high_power; + /* this is for 88E & 8723A */ + u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16]; + /* this is for 92EE */ + u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND] + [TX_PWR_BY_RATE_NUM_RF] + [TX_PWR_BY_RATE_NUM_RF] + [TX_PWR_BY_RATE_NUM_SECTION]; + u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF] + [TX_PWR_BY_RATE_NUM_RF] + [MAX_BASE_NUM_IN_PHY_REG_PG_24G]; + + u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF] + [TX_PWR_BY_RATE_NUM_RF] + [MAX_BASE_NUM_IN_PHY_REG_PG_5G]; + u8 default_initialgain[4]; + + /* the current Tx power level */ + u8 cur_cck_txpwridx; + u8 cur_ofdm24g_txpwridx; + u8 cur_bw20_txpwridx; + u8 cur_bw40_txpwridx; + + u32 rfreg_chnlval[2]; + bool b_apk_done; + u32 reg_rf3c[2]; /* pathA / pathB */ + + u32 backup_rf_0x1a;/*92ee*/ + /* bfsync */ + u8 framesync; + u32 framesync_c34; + + u8 num_total_rfpath; + u16 rf_pathmap; + + u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/ + + enum rt_polarity_ctl polarity_ctl; +}; + +#define RTL_AGG_STOP 0 +#define RTL_AGG_PROGRESS 1 +#define RTL_AGG_START 2 +#define RTL_AGG_OPERATIONAL 3 +#define RTL_RX_AGG_START 1 +#define RTL_RX_AGG_STOP 0 + +struct rtl_priv; +struct rtl_io { + struct device *dev; + + /*PCI MEM map */ + unsigned long pci_mem_end; /*shared mem end */ + unsigned long pci_mem_start; /*shared mem start */ + + /*PCI IO map */ + unsigned long pci_base_addr; /*device I/O address */ + + void (*write8_async) (struct rtl_priv * rtlpriv, u32 addr, u8 val); + void (*write16_async) (struct rtl_priv * rtlpriv, u32 addr, u16 val); + void (*write32_async) (struct rtl_priv * rtlpriv, u32 addr, u32 val); + + u8(*read8_sync) (struct rtl_priv * rtlpriv, u32 addr); + u16(*read16_sync) (struct rtl_priv * rtlpriv, u32 addr); + u32(*read32_sync) (struct rtl_priv * rtlpriv, u32 addr); + +}; + +struct rtl_mac { + u8 mac_addr[ETH_ALEN]; + u8 mac80211_registered; + u8 beacon_enabled; + + u32 tx_ss_num; + u32 rx_ss_num; + + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + struct ieee80211_hw *hw; + struct ieee80211_vif *vif; + enum nl80211_iftype opmode; + + /*Probe Beacon management */ + enum rtl_link_state link_state; + + int n_channels; + int n_bitrates; + + bool offchan_deley; + u8 p2p; /*using p2p role*/ + bool p2p_in_use; + + /*filters */ + u32 rx_conf; + + bool act_scanning; + u8 cnt_after_linked; + bool skip_scan; + + /* early mode */ + /* skb wait queue */ + struct sk_buff_head skb_waitq[MAX_TID_COUNT]; + + /*RDG*/ + bool rdg_en; + + /*AP*/ + u8 bssid[6]; + u32 vendor; + u32 basic_rates; /* b/g rates */ + u8 ht_enable; + u8 bw_40; + u8 mode; /* wireless mode */ + u8 slot_time; + u8 short_preamble; + u8 use_cts_protect; + u8 cur_40_prime_sc; + u8 cur_40_prime_sc_bk; + u8 cur_80_prime_sc; + u64 tsf; + u8 retry_short; + u8 retry_long; + u16 assoc_id; + bool bhiddenssid; + + /*IBSS*/ + int beacon_interval; + + /*AMPDU*/ + u8 min_space_cfg; /*For Min spacing configurations */ + u8 max_mss_density; + u8 current_ampdu_factor; + u8 current_ampdu_density; + + /*QOS & EDCA */ + struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE]; + struct rtl_qos_parameters ac[AC_MAX]; +}; + +struct rtl_hal { + struct ieee80211_hw *hw; + + bool driver_is_goingto_unload; + bool up_first_time; + bool bfirst_init; + bool being_init_adapter; + bool b_bbrf_ready; + bool b_mac_func_enable; + bool b_pre_edcca_enable; + + enum intf_type interface; + u16 hw_type; /*92c or 92d or 92s and so on */ + u8 ic_class; + u8 oem_id; + u32 version; /*version of chip */ + u8 state; /*stop 0, start 1 */ + u8 boad_type; + + /*firmware */ + u32 fwsize; + u8 *pfirmware; + u16 fw_version; + u16 fw_subversion; + bool b_h2c_setinprogress; + u8 last_hmeboxnum; + bool bfw_ready; + + /*Reserve page start offset except beacon in TxQ. */ + u8 fw_rsvdpage_startoffset; + u8 h2c_txcmd_seq; + u8 current_ra_rate; + + /* FW Cmd IO related */ + u16 fwcmd_iomap; + u32 fwcmd_ioparam; + bool set_fwcmd_inprogress; + u8 current_fwcmd_io; + + bool bfw_clk_change_in_progress; + bool ballow_sw_to_change_hwclc; + u8 fw_ps_state; + struct p2p_ps_offload_t p2p_ps_offload; + /**/ + bool driver_going2unload; + + /*AMPDU init min space*/ + u8 minspace_cfg; /*For Min spacing configurations */ + + /* Dul mac */ + enum macphy_mode macphymode; + enum band_type current_bandtype; /* 0:2.4G, 1:5G */ + enum band_type current_bandtypebackup; + enum band_type bandset; + /* dual MAC 0--Mac0 1--Mac1 */ + u32 interfaceindex; + /* just for DulMac S3S4 */ + u8 macphyctl_reg; + bool b_earlymode_enable; + u8 max_earlymode_num; + /* Dul mac*/ + bool during_mac0init_radiob; + bool during_mac1init_radioa; + bool reloadtxpowerindex; + /* True if IMR or IQK have done + for 2.4G in scan progress */ + bool b_load_imrandiqk_setting_for2g; + + bool disable_amsdu_8k; + bool bmaster_of_dmsp; + bool bslave_of_dmsp; + + u16 rx_tag;/*for 92ee*/ + u8 rts_en; +}; + +struct rtl_security { + /*default 0 */ + bool use_sw_sec; + + bool being_setkey; + bool use_defaultkey; + /*Encryption Algorithm for Unicast Packet */ + enum rt_enc_alg pairwise_enc_algorithm; + /*Encryption Algorithm for Brocast/Multicast */ + enum rt_enc_alg group_enc_algorithm; + /*Cam Entry Bitmap */ + u32 hwsec_cam_bitmap; + u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN]; + /*local Key buffer, indx 0 is for + pairwise key 1-4 is for agoup key. */ + u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN]; + u8 key_len[KEY_BUF_SIZE]; + + /*The pointer of Pairwise Key, + it always points to KeyBuf[4] */ + u8 *pairwise_key; +}; + +struct rtl_dig { + u8 dig_enable_flag; + u8 dig_ext_port_stage; + + u32 rssi_lowthresh; + u32 rssi_highthresh; + + u32 fa_lowthresh; + u32 fa_highthresh; + + u8 cursta_connectstate; + u8 presta_connectstate; + u8 curmultista_connectstate; + + u8 pre_igvalue; + u8 cur_igvalue; + + char backoff_val; + char backoff_val_range_max; + char backoff_val_range_min; + u8 rx_gain_range_max; + u8 rx_gain_range_min; + u8 rssi_val_min; + u8 min_undecorated_pwdb_for_dm; + long last_min_undecorated_pwdb_for_dm; + + u8 pre_cck_pd_state; + u8 cur_cck_pd_state; + + u8 large_fa_hit; + u8 forbidden_igi; + u32 recover_cnt; + +}; + +struct rtl_pstbl { + u8 pre_ccastate; + u8 cur_ccasate; + + u8 pre_rfstate; + u8 cur_rfstate; + + long rssi_val_min; + +}; + +#define ASSOCIATE_ENTRY_NUM 32+1 + +struct fast_ant_trainning{ + u8 bssid[6]; + u8 antsel_rx_keep_0; + u8 antsel_rx_keep_1; + u8 antsel_rx_keep_2; + u32 ant_sum_rssi[7]; + u32 ant_rssi_cnt[7]; + u32 ant_ave_rssi[7]; + u8 fat_state; + u32 train_idx; + u8 antsel_a[ASSOCIATE_ENTRY_NUM]; + u8 antsel_b[ASSOCIATE_ENTRY_NUM]; + u8 antsel_c[ASSOCIATE_ENTRY_NUM]; + u32 main_ant_sum[ASSOCIATE_ENTRY_NUM]; + u32 aux_ant_sum[ASSOCIATE_ENTRY_NUM]; + u32 main_ant_cnt[ASSOCIATE_ENTRY_NUM]; + u32 aux_ant_cnt[ASSOCIATE_ENTRY_NUM]; + u8 rx_idle_ant; + bool b_becomelinked; +}; + +struct dm_phy_dbg_info { + char rx_snrdb[4]; + u64 num_qry_phy_status; + u64 num_qry_phy_status_cck; + u64 num_qry_phy_status_ofdm; + u16 num_qry_beacon_pkt; + u16 num_non_be_pkt; + s32 rx_evm[4]; +}; + +struct rtl_dm { + /*PHY status for DM */ + long entry_min_undecoratedsmoothed_pwdb; + long undecorated_smoothed_pwdb; /*out dm */ + long entry_max_undecoratedsmoothed_pwdb; + bool b_dm_initialgain_enable; + bool bdynamic_txpower_enable; + bool bcurrent_turbo_edca; + bool bis_any_nonbepkts; /*out dm */ + bool bis_cur_rdlstate; + bool btxpower_trackinginit; + bool b_disable_framebursting; + bool b_cck_inch14; + bool btxpower_tracking; + bool b_useramask; + bool brfpath_rxenable[4]; + bool binform_fw_driverctrldm; + bool bcurrent_mrc_switch; + u8 txpowercount; + + u8 thermalvalue_rxgain; + u8 thermalvalue_iqk; + u8 thermalvalue_lck; + u8 thermalvalue; + u8 thermalvalue_avg[AVG_THERMAL_NUM]; + u8 thermalvalue_avg_index; + bool bdone_txpower; + u8 last_dtp_lvl; + u8 dynamic_txhighpower_lvl; /*Tx high power level */ + u8 dm_flag; /*Indicate if each dynamic mechanism's status. */ + u8 dm_type; + u8 txpower_track_control; + bool binterrupt_migration; + bool bdisable_tx_int; + char ofdm_index[MAX_RF_PATH]; + u8 default_ofdm_index; + u8 default_cck_index; + char cck_index; + char delta_power_index[MAX_RF_PATH]; + char delta_power_index_last[MAX_RF_PATH]; + char power_index_offset[MAX_RF_PATH]; + char aboslute_ofdm_swing_idx[MAX_RF_PATH]; + char remnant_ofdm_swing_idx[MAX_RF_PATH]; + char remnant_cck_idx; + bool modify_txagc_flag_path_a; + bool modify_txagc_flag_path_b; + + bool b_one_entry_only; + struct dm_phy_dbg_info dbginfo; + /* Dynamic ATC switch */ + + bool atc_status; + bool large_cfo_hit; + bool is_freeze; + int cfo_tail[2]; + int cfo_ave_pre; + int crystal_cap; + u8 cfo_threshold; + u32 packet_count; + u32 packet_count_pre; + u8 tx_rate; + + + /*88e tx power tracking*/ + u8 bb_swing_idx_ofdm[MAX_RF_PATH]; + u8 bb_swing_idx_ofdm_current; + u8 bb_swing_idx_ofdm_base[MAX_RF_PATH]; + bool bb_swing_flag_Ofdm; + u8 bb_swing_idx_cck; + u8 bb_swing_idx_cck_current; + u8 bb_swing_idx_cck_base; + bool bb_swing_flag_cck; + + char bb_swing_diff_2g; + char bb_swing_diff_5g; + + u8 delta_swing_table_idx_24gccka_p[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24gccka_n[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24gcckb_p[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24gcckb_n[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24ga_p[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24ga_n[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24gb_p[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24gb_n[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_5ga_p[BAND_NUM][DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_5ga_n[BAND_NUM][DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_5gb_p[BAND_NUM][DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_5gb_n[BAND_NUM][DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24ga_p_8188e[DELTA_SWINGIDX_SIZE]; + u8 delta_swing_table_idx_24ga_n_8188e[DELTA_SWINGIDX_SIZE]; + + + /* DMSP */ + bool supp_phymode_switch; + + /* DulMac */ + struct rtl_dig dm_digtable; + struct rtl_pstbl dm_pstable; + struct fast_ant_trainning fat_table; + + u8 resp_tx_path; + u8 path_sel; + u32 patha_sum; + u32 pathb_sum; + u32 patha_cnt; + u32 pathb_cnt; + + u8 pre_channel; + u8 *p_channel; + u8 linked_interval; + + u64 last_tx_ok_cnt; + u64 last_rx_ok_cnt; +}; + +#define EFUSE_MAX_LOGICAL_SIZE 256 + +struct rtl_efuse { + bool bautoLoad_ok; + bool bootfromefuse; + u16 max_physical_size; + + u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE]; + u16 efuse_usedbytes; + u8 efuse_usedpercentage; +#ifdef EFUSE_REPG_WORKAROUND + bool efuse_re_pg_sec1flag; + u8 efuse_re_pg_data[8]; +#endif + + u8 autoload_failflag; + u8 autoload_status; + + short epromtype; + u16 eeprom_vid; + u16 eeprom_did; + u16 eeprom_svid; + u16 eeprom_smid; + u8 eeprom_oemid; + u16 eeprom_channelplan; + u8 eeprom_version; + + u8 dev_addr[6]; + u8 board_type; + u8 wowlan_enable; + u8 antenna_div_cfg; + u8 antenna_div_type; + + bool b_txpwr_fromeprom; + u8 eeprom_crystalcap; + u8 eeprom_tssi[2]; + u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */ + u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX]; + u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX]; + u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G]; + u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX]; + u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX]; + + + u8 internal_pa_5g[2]; /* pathA / pathB */ + u8 eeprom_c9; + u8 eeprom_cc; + + /*For power group */ + u8 eeprom_pwrgroup[2][3]; + u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER]; + u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER]; + + u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G]; + /*For HT 40MHZ pwr */ + u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + /*For HT 40MHZ pwr */ + u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + char txpwr_cckdiff[MAX_RF_PATH][MAX_TX_COUNT]; /*CCK_24G_Diff*/ + /*HT 20<->40 Pwr diff */ + char txpwr_ht20diff[MAX_RF_PATH][MAX_TX_COUNT]; /*BW20_24G_Diff*/ + char txpwr_ht40diff[MAX_RF_PATH][MAX_TX_COUNT];/*BW40_24G_Diff*/ + /*For HT<->legacy pwr diff */ + char txpwr_legacyhtdiff[MAX_RF_PATH][MAX_TX_COUNT];/*OFDM_24G_Diff*/ + + u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M]; + char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT]; + char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT]; + char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT]; + char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT]; + + u8 txpwr_safetyflag; /* Band edge enable flag */ + u16 eeprom_txpowerdiff; + u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */ + u8 antenna_txpwdiff[3]; + + u8 eeprom_regulatory; + u8 eeprom_thermalmeter; + u8 thermalmeter[2];/*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */ + u16 tssi_13dbm; + u8 crystalcap; /* CrystalCap. */ + u8 delta_iqk; + u8 delta_lck; + + u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */ + bool b_apk_thermalmeterignore; + + bool b1x1_recvcombine; + bool b1ss_support; + + /*channel plan */ + u8 channel_plan; +}; + +struct rtl_ps_ctl { + bool pwrdomain_protect; + bool b_in_powersavemode; + bool rfchange_inprogress; + bool b_swrf_processing; + bool b_hwradiooff; + /* + * just for PCIE ASPM + * If it supports ASPM, Offset[560h] = 0x40, + * otherwise Offset[560h] = 0x00. + * */ + bool b_support_aspm; + bool b_support_backdoor; + + /*for LPS */ + enum rt_psmode dot11_psmode; /*Power save mode configured. */ + bool b_swctrl_lps; + bool b_fwctrl_lps; + u8 fwctrl_psmode; + /*For Fw control LPS mode */ + u8 b_reg_fwctrl_lps; + /*Record Fw PS mode status. */ + bool b_fw_current_inpsmode; + u8 reg_max_lps_awakeintvl; + bool report_linked; + bool b_low_power_enable;/*for 32k*/ + + /*for IPS */ + bool b_inactiveps; + + u32 rfoff_reason; + + /*RF OFF Level */ + u32 cur_ps_level; + u32 reg_rfps_level; + + /*just for PCIE ASPM */ + u8 const_amdpci_aspm; + + enum rf_pwrstate inactive_pwrstate; + enum rf_pwrstate rfpwr_state; /*cur power state */ + + /* for SW LPS*/ + bool sw_ps_enabled; + bool state; + bool state_inap; + bool multi_buffered; + u16 nullfunc_seq; + unsigned int dtim_counter; + unsigned int sleep_ms; + unsigned long last_sleep_jiffies; + unsigned long last_awake_jiffies; + unsigned long last_delaylps_stamp_jiffies; + unsigned long last_dtim; + unsigned long last_beacon; + unsigned long last_action; + unsigned long last_slept; + + /*For P2P PS */ + struct rtl_p2p_ps_info p2p_ps_info; + u8 pwr_mode; + u8 smart_ps; +}; + +struct rtl_stats { + u8 psaddr[ETH_ALEN]; + u32 mac_time[2]; + s8 rssi; + u8 signal; + u8 noise; + u8 rate; /* hw desc rate */ + u8 rawdata; + u8 received_channel; + u8 control; + u8 mask; + u8 freq; + u16 len; + u64 tsf; + u32 beacon_time; + u8 nic_type; + u16 length; + u8 signalquality; /*in 0-100 index. */ + /* + * Real power in dBm for this packet, + * no beautification and aggregation. + * */ + s32 recvsignalpower; + s8 rxpower; /*in dBm Translate from PWdB */ + u8 signalstrength; /*in 0-100 index. */ + u16 b_hwerror:1; + u16 b_crc:1; + u16 b_icv:1; + u16 b_shortpreamble:1; + u16 antenna:1; + u16 decrypted:1; + u16 wakeup:1; + u32 timestamp_low; + u32 timestamp_high; + bool b_shift; + + u8 rx_drvinfo_size; + u8 rx_bufshift; + bool b_isampdu; + bool b_isfirst_ampdu; + bool rx_is40Mhzpacket; + u32 rx_pwdb_all; + u8 rx_mimo_signalstrength[4]; /*in 0~100 index */ + s8 rx_mimo_signalquality[4]; + u8 rx_mimo_evm_dbm[4]; + u16 cfo_short[4]; /* per-path's Cfo_short */ + u16 cfo_tail[4]; + + u8 rx_pwr[4]; /* per-path's pwdb */ + u8 rx_snr[4]; /* per-path's SNR */ + u8 bandwidth; + u8 bt_coex_pwr_adjust; + bool b_packet_matchbssid; + bool b_is_cck; + bool b_is_ht; + bool b_packet_toself; + bool b_packet_beacon; /*for rssi */ + char cck_adc_pwdb[4]; /*for rx path selection */ + + u8 packet_report_type; + + u32 macid; + u8 wake_match; + u32 bt_rx_rssi_percentage; + u32 macid_valid_entry[2]; +}; + +struct rt_link_detect { + /* count for raoming */ + u32 bcn_rx_inperiod; + u32 roam_times; + + u32 num_tx_in4period[4]; + u32 num_rx_in4period[4]; + + u32 num_tx_inperiod; + u32 num_rx_inperiod; + + bool b_busytraffic; + bool b_tx_busy_traffic; + bool b_rx_busy_traffic; + bool b_higher_busytraffic; + bool b_higher_busyrxtraffic; + + u32 tidtx_in4period[MAX_TID_COUNT][4]; + u32 tidtx_inperiod[MAX_TID_COUNT]; + bool higher_busytxtraffic[MAX_TID_COUNT]; +}; + +struct rtl_tcb_desc { + u8 b_packet_bw:1; + u8 b_multicast:1; + u8 b_broadcast:1; + + u8 b_rts_stbc:1; + u8 b_rts_enable:1; + u8 b_cts_enable:1; + u8 b_rts_use_shortpreamble:1; + u8 b_rts_use_shortgi:1; + u8 rts_sc:1; + u8 b_rts_bw:1; + u8 rts_rate; + + u8 use_shortgi:1; + u8 use_shortpreamble:1; + u8 use_driver_rate:1; + u8 disable_ratefallback:1; + + u8 ratr_index; + u8 mac_id; + u8 hw_rate; + + u8 b_last_inipkt:1; + u8 b_cmd_or_init:1; + u8 queue_index; + + /* early mode */ + u8 empkt_num; + /* The max value by HW */ + u32 empkt_len[10]; + bool btx_enable_sw_calc_duration; + /* used for hal construct pkt, + * we may set desc when tx */ + u8 self_desc; +}; + +struct proxim { + bool proxim_on; + + void *proximity_priv; + int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status, + struct sk_buff *skb); + u8 (*proxim_get_var)(struct ieee80211_hw *hw, u8 type); +}; + +struct rtl_hal_ops { + int (*init_sw_vars) (struct ieee80211_hw * hw); + void (*deinit_sw_vars) (struct ieee80211_hw * hw); + void (*read_eeprom_info) (struct ieee80211_hw * hw); + void (*interrupt_recognized) (struct ieee80211_hw * hw, + u32 * p_inta, u32 * p_intb); + int (*hw_init) (struct ieee80211_hw * hw); + void (*hw_disable) (struct ieee80211_hw * hw); + void (*hw_suspend) (struct ieee80211_hw * hw); + void (*hw_resume) (struct ieee80211_hw * hw); + void (*enable_interrupt) (struct ieee80211_hw * hw); + void (*disable_interrupt) (struct ieee80211_hw * hw); + int (*set_network_type) (struct ieee80211_hw * hw, + enum nl80211_iftype type); + void (*set_chk_bssid)(struct ieee80211_hw *hw, + bool check_bssid); + void (*set_bw_mode) (struct ieee80211_hw * hw, + enum nl80211_channel_type ch_type); + u8(*switch_channel) (struct ieee80211_hw * hw); + void (*set_qos) (struct ieee80211_hw * hw, int aci); + void (*set_bcn_reg) (struct ieee80211_hw * hw); + void (*set_bcn_intv) (struct ieee80211_hw * hw); + void (*update_interrupt_mask) (struct ieee80211_hw * hw, + u32 add_msr, u32 rm_msr); + void (*get_hw_reg) (struct ieee80211_hw * hw, u8 variable, u8 * val); + void (*set_hw_reg) (struct ieee80211_hw * hw, u8 variable, u8 * val); + void (*update_rate_tbl) (struct ieee80211_hw * hw, + struct ieee80211_sta *sta, u8 rssi_level); + void (*pre_fill_tx_bd_desc) (struct ieee80211_hw *hw, u8 *tx_bd_desc, + u8 *desc, u8 queue_index, + struct sk_buff *skb, dma_addr_t addr); + u16 (*rx_desc_buff_remained_cnt) (struct ieee80211_hw *hw, + u8 queue_index); + void (*rx_check_dma_ok) (struct ieee80211_hw *hw, u8 *header_desc, + u8 queue_index); + void (*fill_tx_desc) (struct ieee80211_hw * hw, + struct ieee80211_hdr * hdr, + u8 * pdesc_tx, u8 * pbd_desc, + struct ieee80211_tx_info * info, +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) +/**/ + struct ieee80211_sta *sta, +/**/ +#endif +/**/ + struct sk_buff * skb, u8 hw_queue, + struct rtl_tcb_desc *ptcb_desc); + void (*fill_tx_cmddesc) (struct ieee80211_hw * hw, u8 * pdesc, + bool b_firstseg, bool b_lastseg, + struct sk_buff * skb); + bool(*query_rx_desc) (struct ieee80211_hw * hw, + struct rtl_stats * status, + struct ieee80211_rx_status * rx_status, + u8 * pdesc, struct sk_buff * skb); + void (*set_channel_access) (struct ieee80211_hw * hw); + bool(*radio_onoff_checking) (struct ieee80211_hw * hw, u8 * valid); + void (*dm_watchdog) (struct ieee80211_hw * hw); + void (*scan_operation_backup) (struct ieee80211_hw * hw, u8 operation); + bool(*set_rf_power_state) (struct ieee80211_hw * hw, + enum rf_pwrstate rfpwr_state); + void (*led_control) (struct ieee80211_hw * hw, + enum led_ctl_mode ledaction); + void (*set_desc) (struct ieee80211_hw *hw, u8 * pdesc, bool istx, + u8 desc_name, u8 * val); + u32(*get_desc) (u8 * pdesc, bool istx, u8 desc_name); + bool (*is_tx_desc_closed) (struct ieee80211_hw *hw, + u8 hw_queue, u16 index); + void (*tx_polling) (struct ieee80211_hw * hw, u8 hw_queue); + void (*enable_hw_sec) (struct ieee80211_hw * hw); + void (*set_key) (struct ieee80211_hw * hw, u32 key_index, + u8 * p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); + void (*init_sw_leds) (struct ieee80211_hw * hw); + u32(*get_bbreg) (struct ieee80211_hw * hw, u32 regaddr, u32 bitmask); + void (*set_bbreg) (struct ieee80211_hw * hw, u32 regaddr, u32 bitmask, + u32 data); + u32(*get_rfreg) (struct ieee80211_hw * hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask); + void (*set_rfreg) (struct ieee80211_hw * hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data); + void (*allow_all_destaddr)(struct ieee80211_hw *hw, + bool allow_all_da, bool write_into_reg); + void (*linked_set_reg) (struct ieee80211_hw * hw); + void (*check_switch_to_dmdp) (struct ieee80211_hw * hw); + void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw); + void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw); + void (*c2h_command_handle) (struct ieee80211_hw *hw); + void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, bool mstate); + void (*bt_turn_off_bt_coexist_before_enter_lps) (struct ieee80211_hw *hw); + void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer); + bool (*get_btc_status) (void); + u32 (*rx_command_packet_handler)(struct ieee80211_hw *hw, struct rtl_stats status, struct sk_buff *skb); +}; + +struct rtl_intf_ops { + /*com */ + void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); + int (*adapter_start) (struct ieee80211_hw * hw); + void (*adapter_stop) (struct ieee80211_hw * hw); + bool (*check_buddy_priv)(struct ieee80211_hw *hw, + struct rtl_priv **buddy_priv); + +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + int (*adapter_tx) (struct ieee80211_hw * hw, struct sk_buff * skb, + struct rtl_tcb_desc *ptcb_desc); +#else +/**/ + int (*adapter_tx) (struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct rtl_tcb_desc *ptcb_desc); +/**/ +#endif +/**/ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) + void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop); +#else + void (*flush)(struct ieee80211_hw *hw, bool drop); +#endif + int (*reset_trx_ring) (struct ieee80211_hw * hw); +/**/ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) + bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb); +#else +/**/ + bool (*waitq_insert) (struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb); +/**/ +#endif +/**/ + + /*pci */ + void (*disable_aspm) (struct ieee80211_hw * hw); + void (*enable_aspm) (struct ieee80211_hw * hw); + + /*usb */ +}; + +struct rtl_mod_params { + /* default: 0 = using hardware encryption */ + bool sw_crypto; + + /* default: 1 = using no linked power save */ + bool b_inactiveps; + + /* default: 1 = using linked sw power save */ + bool b_swctrl_lps; + + /* default: 1 = using linked fw power save */ + bool b_fwctrl_lps; +}; + +struct rtl_hal_cfg { + u8 bar_id; + bool write_readback; + char *name; + char *fw_name; + struct rtl_hal_ops *ops; + struct rtl_mod_params *mod_params; + + /*this map used for some registers or vars + defined int HAL but used in MAIN */ + u32 maps[RTL_VAR_MAP_MAX]; + +}; + +struct rtl_locks { + /* mutex */ + struct mutex conf_mutex; + + /*spin lock */ + spinlock_t ips_lock; + spinlock_t irq_th_lock; + spinlock_t h2c_lock; + spinlock_t rf_ps_lock; + spinlock_t rf_lock; + spinlock_t lps_lock; + spinlock_t waitq_lock; + spinlock_t entry_list_lock; + + /*FW clock change */ + spinlock_t fw_ps_lock; + + /*Dul mac*/ + spinlock_t cck_and_rw_pagea_lock; + + /*Easy concurrent*/ + spinlock_t check_sendpkt_lock; + + spinlock_t iqk_lock; +}; + +struct rtl_works { + struct ieee80211_hw *hw; + + /*timer */ + struct timer_list watchdog_timer; + struct timer_list dualmac_easyconcurrent_retrytimer; + struct timer_list fw_clockoff_timer; + struct timer_list fast_antenna_trainning_timer; + /*task */ + struct tasklet_struct irq_tasklet; + struct tasklet_struct irq_prepare_bcn_tasklet; + + /*work queue */ + struct workqueue_struct *rtl_wq; + struct delayed_work watchdog_wq; + struct delayed_work ips_nic_off_wq; + + /* For SW LPS */ + struct delayed_work ps_work; + struct delayed_work ps_rfon_wq; + struct delayed_work fwevt_wq; +}; + +struct rtl_debug { + u32 dbgp_type[DBGP_TYPE_MAX]; + u32 global_debuglevel; + u64 global_debugcomponents; + + /* add for proc debug */ + struct proc_dir_entry *proc_dir; + char proc_name[20]; +}; + +#define MIMO_PS_STATIC 0 +#define MIMO_PS_DYNAMIC 1 +#define MIMO_PS_NOLIMIT 3 + +struct rtl_dualmac_easy_concurrent_ctl { + enum band_type currentbandtype_backfordmdp; + bool bclose_bbandrf_for_dmsp; + bool bchange_to_dmdp; + bool bchange_to_dmsp; + bool bswitch_in_process; +}; + +struct rtl_dmsp_ctl { + bool bactivescan_for_slaveofdmsp; + bool bscan_for_anothermac_fordmsp; + bool bscan_for_itself_fordmsp; + bool bwritedig_for_anothermacofdmsp; + u32 curdigvalue_for_anothermacofdmsp; + bool bchangecckpdstate_for_anothermacofdmsp; + u8 curcckpdstate_for_anothermacofdmsp; + bool bchangetxhighpowerlvl_for_anothermacofdmsp; + u8 curtxhighlvl_for_anothermacofdmsp; + long rssivalmin_for_anothermacofdmsp; +}; + +struct rtl_global_var { + /* from this list we can get + * other adapter's rtl_priv */ + struct list_head glb_priv_list; + spinlock_t glb_list_lock; +}; + +struct rtl_btc_info { + u8 bt_type; + u8 btcoexist; + u8 ant_num; +}; + +struct rtl_btc_ops { + void (*btc_init_variables) (struct rtl_priv *rtlpriv); + void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); + void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); + void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); + void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype); + void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action); + void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv, + enum rt_media_status mstatus); + void (*btc_periodical) (struct rtl_priv *rtlpriv); + void (*btc_halt_notify) (void); + void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, + u8 * tmp_buf, u8 length); + bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv); + bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv); + bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); +}; + +struct rtl_bt_coexist { + struct rtl_btc_ops *btc_ops; + struct rtl_btc_info btc_info; +}; + + +struct rtl_priv { + struct list_head list; +#ifdef VIF_TODO + struct vif_priv vif_priv; +#endif + struct rtl_priv *buddy_priv; + struct rtl_global_var *glb_var; + struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl; + struct rtl_dmsp_ctl dmsp_ctl; + struct rtl_locks locks; + struct rtl_works works; + struct rtl_mac mac80211; + struct rtl_hal rtlhal; + struct rtl_regulatory regd; + struct rtl_rfkill rfkill; + struct rtl_io io; + struct rtl_phy phy; + struct rtl_dm dm; + struct rtl_security sec; + struct rtl_efuse efuse; + + struct rtl_ps_ctl psc; + struct rate_adaptive ra; + struct dynamic_primary_cca primarycca; + struct wireless_stats stats; + struct rt_link_detect link_info; + struct false_alarm_statistics falsealm_cnt; + + struct rtl_rate_priv *rate_priv; + + struct rtl_debug dbg; + + /* sta entry list for ap adhoc or mesh */ + struct list_head entry_list; + + /* + *hal_cfg : for diff cards + *intf_ops : for diff interrface usb/pcie + */ + struct rtl_hal_cfg *cfg; + struct rtl_intf_ops *intf_ops; + + /*this var will be set by set_bit, + and was used to indicate status of + interface or hardware */ + unsigned long status; + + /* intel Proximity, should be alloc mem + * in intel Proximity module and can only + * be used in intel Proximity mode */ + struct proxim proximity; + + /*for bt coexist use*/ + struct rtl_bt_coexist btcoexist; + + /* seperate 92ee from other ICs, + * 92ee use new trx flow. */ + bool use_new_trx_flow; + /*This must be the last item so + that it points to the data allocated + beyond this structure like: + rtl_pci_priv or rtl_usb_priv */ + u8 priv[0]; +}; + +#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) +#define rtl_mac(rtlpriv) (&((rtlpriv)->mac80211)) +#define rtl_hal(rtlpriv) (&((rtlpriv)->rtlhal)) +#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse)) +#define rtl_psc(rtlpriv) (&((rtlpriv)->psc)) +#define rtl_sec(rtlpriv) (&((rtlpriv)->sec)) +#define rtl_dm(rtlpriv) (&((rtlpriv)->dm)) +/*************************************** + Bluetooth Co-existance Related +****************************************/ + +enum bt_ant_num { + ANT_X2 = 0, + ANT_X1 = 1, +}; + +enum bt_co_type { + BT_2WIRE = 0, + BT_ISSC_3WIRE = 1, + BT_ACCEL = 2, + BT_CSR_BC4 = 3, + BT_CSR_BC8 = 4, + BT_RTL8756 = 5, + BT_RTL8723A = 6, + BT_RTL8821A = 7, + BT_RTL8723B = 8, + BT_RTL8192E = 9, + BT_RTL8812A = 11, +}; + +enum bt_total_ant_num{ + ANT_TOTAL_X2 = 0, + ANT_TOTAL_X1 = 1 +}; + +enum bt_cur_state { + BT_OFF = 0, + BT_ON = 1, +}; + +enum bt_service_type { + BT_SCO = 0, + BT_A2DP = 1, + BT_HID = 2, + BT_HID_IDLE = 3, + BT_SCAN = 4, + BT_IDLE = 5, + BT_OTHER_ACTION = 6, + BT_BUSY = 7, + BT_OTHERBUSY = 8, + BT_PAN = 9, +}; + +enum bt_radio_shared { + BT_RADIO_SHARED = 0, + BT_RADIO_INDIVIDUAL = 1, +}; + +struct bt_coexist_info { + + /* EEPROM BT info. */ + u8 eeprom_bt_coexist; + u8 eeprom_bt_type; + u8 eeprom_bt_ant_num; + u8 eeprom_bt_ant_isolation; + u8 eeprom_bt_radio_shared; + + u8 bt_coexistence; + u8 bt_ant_num; + u8 bt_coexist_type; + u8 bt_state; + u8 bt_cur_state; /* 0:on, 1:off */ + u8 bt_ant_isolation; /* 0:good, 1:bad */ + u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */ + u8 bt_service; + u8 bt_radio_shared_type; + u8 bt_rfreg_origin_1e; + u8 bt_rfreg_origin_1f; + u8 bt_rssi_state; + u32 ratio_tx; + u32 ratio_pri; + u32 bt_edca_ul; + u32 bt_edca_dl; + + bool b_init_set; + bool b_bt_busy_traffic; + bool b_bt_traffic_mode_set; + bool b_bt_non_traffic_mode_set; + + bool b_fw_coexist_all_off; + bool b_sw_coexist_all_off; + bool b_hw_coexist_all_off; + u32 current_state; + u32 previous_state; + u32 current_state_h; + u32 previous_state_h; + + u8 bt_pre_rssi_state; + u8 bt_pre_rssi_state1; + + u8 b_reg_bt_iso; + u8 b_reg_bt_sco; + bool b_balance_on; + u8 bt_active_zero_cnt; + bool b_cur_bt_disabled; + bool b_pre_bt_disabled; + + u8 bt_profile_case; + u8 bt_profile_action; + bool b_bt_busy; + bool b_hold_for_bt_operation; + u8 lps_counter; +}; + + +/**************************************** + mem access macro define start + Call endian free function when + 1. Read/write packet content. + 2. Before write integer to IO. + 3. After read integer from IO. +****************************************/ +/* Convert little data endian to host */ +#define EF1BYTE(_val) \ + ((u8)(_val)) +#define EF2BYTE(_val) \ + (le16_to_cpu(_val)) +#define EF4BYTE(_val) \ + (le32_to_cpu(_val)) + +/* Read data from memory */ +#define READEF1BYTE(_ptr) \ + EF1BYTE(*((u8 *)(_ptr))) +#define READEF2BYTE(_ptr) \ + EF2BYTE(*((u16 *)(_ptr))) +#define READEF4BYTE(_ptr) \ + EF4BYTE(*((u32 *)(_ptr))) + +/* Write data to memory */ +#define WRITEEF1BYTE(_ptr, _val) \ + (*((u8 *)(_ptr)))=EF1BYTE(_val) +#define WRITEEF2BYTE(_ptr, _val) \ + (*((u16 *)(_ptr)))=EF2BYTE(_val) +#define WRITEEF4BYTE(_ptr, _val) \ + (*((u32 *)(_ptr)))=EF4BYTE(_val) + +/*Example: +BIT_LEN_MASK_32(0) => 0x00000000 +BIT_LEN_MASK_32(1) => 0x00000001 +BIT_LEN_MASK_32(2) => 0x00000003 +BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/ +#define BIT_LEN_MASK_32(__bitlen) \ + (0xFFFFFFFF >> (32 - (__bitlen))) +#define BIT_LEN_MASK_16(__bitlen) \ + (0xFFFF >> (16 - (__bitlen))) +#define BIT_LEN_MASK_8(__bitlen) \ + (0xFF >> (8 - (__bitlen))) + +/*Example: +BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003 +BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/ +#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \ + (BIT_LEN_MASK_32(__bitlen) << (__bitoffset)) +#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \ + (BIT_LEN_MASK_16(__bitlen) << (__bitoffset)) +#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \ + (BIT_LEN_MASK_8(__bitlen) << (__bitoffset)) + +/*Description: +Return 4-byte value in host byte ordering from +4-byte pointer in little-endian system.*/ +#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \ + (EF4BYTE(*((u32 *)(__pstart)))) +#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \ + (EF2BYTE(*((u16 *)(__pstart)))) +#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \ + (EF1BYTE(*((u8 *)(__pstart)))) + +/*Description: +Translate subfield (continuous bits in little-endian) of 4-byte +value to host byte ordering.*/ +#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ + ( \ + ( LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset) ) & \ + BIT_LEN_MASK_32(__bitlen) \ + ) +#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \ + ( \ + ( LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset) ) & \ + BIT_LEN_MASK_16(__bitlen) \ + ) +#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \ + ( \ + ( LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset) ) & \ + BIT_LEN_MASK_8(__bitlen) \ + ) + +/*Description: +Mask subfield (continuous bits in little-endian) of 4-byte value +and return the result in 4-byte value in host byte ordering.*/ +#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ + ( \ + LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \ + ( ~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) ) \ + ) +#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \ + ( \ + LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \ + ( ~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) ) \ + ) +#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \ + ( \ + LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \ + ( ~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) ) \ + ) + +/*Description: +Set subfield of little-endian 4-byte value to specified value. */ +#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \ + *((u32 *)(__pstart)) = EF4BYTE \ + ( \ + LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \ + ( (((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset) )\ + ); +#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \ + *((u16 *)(__pstart)) = EF2BYTE \ + ( \ + LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \ + ( (((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset) )\ + ); +#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \ + *((u8 *)(__pstart)) = EF1BYTE \ + ( \ + LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \ + ( (((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset) ) \ + ); + +#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \ + (__value) : (((__value + __aligment - 1) / __aligment) * __aligment)) + +/**************************************** + mem access macro define end +****************************************/ + +#define byte(x,n) ((x >> (8 * n)) & 0xff) + +#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC) +#define RTL_WATCH_DOG_TIME 2000 +#define MSECS(t) msecs_to_jiffies(t) +#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS) +#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE) +#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) +#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA) +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */ +#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */ +#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /*PCI D3 mode */ +/*NIC halt, re-initialize hw parameters*/ +#define RT_RF_OFF_LEVL_HALT_NIC BIT(3) +#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /*FW free, re-download the FW */ +#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */ +/*Always enable ASPM and Clock Req in initialization.*/ +#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) +/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/ +#define RT_PS_LEVEL_ASPM BIT(7) +/*When LPS is on, disable 2R if no packet is received or transmittd.*/ +#define RT_RF_LPS_DISALBE_2R BIT(30) +#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */ +#define RT_IN_PS_LEVEL(ppsc, _ps_flg) \ + ((ppsc->cur_ps_level & _ps_flg) ? true : false) +#define RT_CLEAR_PS_LEVEL(ppsc, _ps_flg) \ + (ppsc->cur_ps_level &= (~(_ps_flg))) +#define RT_SET_PS_LEVEL(ppsc, _ps_flg) \ + (ppsc->cur_ps_level |= _ps_flg) + +#define container_of_dwork_rtl(x,y,z) \ + container_of(container_of(x, struct delayed_work, work), y, z) + +#define FILL_OCTET_STRING(_os,_octet,_len) \ + (_os).octet=(u8*)(_octet); \ + (_os).length=(_len); + +#define CP_MACADDR(des,src) \ + ((des)[0]=(src)[0],(des)[1]=(src)[1],\ + (des)[2]=(src)[2],(des)[3]=(src)[3],\ + (des)[4]=(src)[4],(des)[5]=(src)[5]) + +static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr) +{ + return rtlpriv->io.read8_sync(rtlpriv, addr); +} + +static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr) +{ + return rtlpriv->io.read16_sync(rtlpriv, addr); +} + +static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr) +{ + return rtlpriv->io.read32_sync(rtlpriv, addr); +} + +static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8) +{ + rtlpriv->io.write8_async(rtlpriv, addr, val8); + + if (rtlpriv->cfg->write_readback) + rtlpriv->io.read8_sync(rtlpriv, addr); +} + +static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16) +{ + rtlpriv->io.write16_async(rtlpriv, addr, val16); + + if (rtlpriv->cfg->write_readback) + rtlpriv->io.read16_sync(rtlpriv, addr); +} + +static inline void rtl_write_dword(struct rtl_priv *rtlpriv, + u32 addr, u32 val32) +{ + rtlpriv->io.write32_async(rtlpriv, addr, val32); + + if (rtlpriv->cfg->write_readback) + rtlpriv->io.read32_sync(rtlpriv, addr); +} + +static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask) +{ + return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw, + regaddr, + bitmask); +} + +static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr, + u32 bitmask, u32 data) +{ + ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw, + regaddr, bitmask, + data); + +} + +static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask) +{ + return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw, + rfpath, + regaddr, + bitmask); +} + +static inline void rtl_set_rfreg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask, u32 data) +{ + ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw, + rfpath, regaddr, + bitmask, data); +} + +static inline bool is_hal_stop(struct rtl_hal *rtlhal) +{ + return (_HAL_STATE_STOP == rtlhal->state); +} + +static inline void set_hal_start(struct rtl_hal *rtlhal) +{ + rtlhal->state = _HAL_STATE_START; +} + +static inline void set_hal_stop(struct rtl_hal *rtlhal) +{ + rtlhal->state = _HAL_STATE_STOP; +} + +static inline u8 get_rf_type(struct rtl_phy *rtlphy) +{ + return rtlphy->rf_type; +} + +static inline struct ieee80211_hdr *rtl_get_hdr(struct sk_buff *skb) +{ + return (struct ieee80211_hdr *)(skb->data); +} + +static inline u16 rtl_get_fc(struct sk_buff *skb) +{ + return le16_to_cpu(rtl_get_hdr(skb)->frame_control); +} + +static inline u16 rtl_get_tid_h(struct ieee80211_hdr *hdr) +{ + return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK; +} + +static inline u16 rtl_get_tid(struct sk_buff *skb) +{ + return rtl_get_tid_h(rtl_get_hdr(skb)); +} + +static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw, + u8 *mac_addr) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + return ieee80211_find_sta(mac->vif, mac_addr); +} + +struct ieee80211_hw *rtl_pci_get_hw_pointer(void); +#endif --- linux-3.13.0.orig/drivers/staging/serqt_usb2/serqt_usb2.c +++ linux-3.13.0/drivers/staging/serqt_usb2/serqt_usb2.c @@ -725,7 +725,7 @@ goto startup_error; } - switch (serial->dev->descriptor.idProduct) { + switch (le16_to_cpu(serial->dev->descriptor.idProduct)) { case QUATECH_DSU100: case QUATECH_QSU100: case QUATECH_ESU100A: --- linux-3.13.0.orig/drivers/staging/speakup/main.c +++ linux-3.13.0/drivers/staging/speakup/main.c @@ -2220,6 +2220,7 @@ unregister_keyboard_notifier(&keyboard_notifier_block); unregister_vt_notifier(&vt_notifier_block); speakup_unregister_devsynth(); + speakup_cancel_paste(); del_timer(&cursor_timer); kthread_stop(speakup_task); speakup_task = NULL; --- linux-3.13.0.orig/drivers/staging/speakup/selection.c +++ linux-3.13.0/drivers/staging/speakup/selection.c @@ -4,6 +4,8 @@ #include #include /* for dev_warn */ #include +#include +#include #include "speakup.h" @@ -121,20 +123,24 @@ return 0; } -/* TODO: move to some helper thread, probably. That'd fix having to check for - * in_atomic(). */ -int speakup_paste_selection(struct tty_struct *tty) -{ +struct speakup_paste_work { + struct work_struct work; + struct tty_struct *tty; +}; + +static void __speakup_paste_selection(struct work_struct *work) +{ + struct speakup_paste_work *spw = + container_of(work, struct speakup_paste_work, work); + struct tty_struct *tty = xchg(&spw->tty, NULL); struct vc_data *vc = (struct vc_data *) tty->driver_data; int pasted = 0, count; DECLARE_WAITQUEUE(wait, current); + add_wait_queue(&vc->paste_wait, &wait); while (sel_buffer && sel_buffer_lth > pasted) { set_current_state(TASK_INTERRUPTIBLE); if (test_bit(TTY_THROTTLED, &tty->flags)) { - if (in_atomic()) - /* if we are in an interrupt handler, abort */ - break; schedule(); continue; } @@ -146,6 +152,26 @@ } remove_wait_queue(&vc->paste_wait, &wait); current->state = TASK_RUNNING; + tty_kref_put(tty); +} + +static struct speakup_paste_work speakup_paste_work = { + .work = __WORK_INITIALIZER(speakup_paste_work.work, + __speakup_paste_selection) +}; + +int speakup_paste_selection(struct tty_struct *tty) +{ + if (cmpxchg(&speakup_paste_work.tty, NULL, tty) != NULL) + return -EBUSY; + + tty_kref_get(tty); + schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); return 0; } +void speakup_cancel_paste(void) +{ + cancel_work_sync(&speakup_paste_work.work); + tty_kref_put(speakup_paste_work.tty); +} --- linux-3.13.0.orig/drivers/staging/speakup/speakup.h +++ linux-3.13.0/drivers/staging/speakup/speakup.h @@ -77,6 +77,7 @@ extern void speakup_clear_selection(void); extern int speakup_set_selection(struct tty_struct *tty); extern int speakup_paste_selection(struct tty_struct *tty); +extern void speakup_cancel_paste(void); extern void speakup_register_devsynth(void); extern void speakup_unregister_devsynth(void); extern void synth_write(const char *buf, size_t count); --- linux-3.13.0.orig/drivers/staging/vt6655/bssdb.c +++ linux-3.13.0/drivers/staging/vt6655/bssdb.c @@ -1029,7 +1029,7 @@ pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1)); } - { + if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) { pDevice->byReAssocCount++; if ((pDevice->byReAssocCount > 10) && (pDevice->bLinkPass != true)) { //10 sec timeout printk("Re-association timeout!!!\n"); --- linux-3.13.0.orig/drivers/staging/vt6655/device_main.c +++ linux-3.13.0/drivers/staging/vt6655/device_main.c @@ -2431,6 +2431,7 @@ unsigned char byData = 0; int ii = 0; // unsigned char byRSSI; + unsigned long flags; MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr); @@ -2455,7 +2456,8 @@ handled = 1; MACvIntDisable(pDevice->PortOffset); - spin_lock_irq(&pDevice->lock); + + spin_lock_irqsave(&pDevice->lock, flags); //Make sure current page is 0 VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel); @@ -2696,7 +2698,8 @@ MACvSelectPage1(pDevice->PortOffset); } - spin_unlock_irq(&pDevice->lock); + spin_unlock_irqrestore(&pDevice->lock, flags); + MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE); return IRQ_RETVAL(handled); --- linux-3.13.0.orig/drivers/staging/vt6655/rf.c +++ linux-3.13.0/drivers/staging/vt6655/rf.c @@ -936,6 +936,7 @@ break; case RATE_6M: case RATE_9M: + case RATE_12M: case RATE_18M: byPwr = pDevice->abyOFDMPwrTbl[uCH]; if (pDevice->byRFType == RF_UW2452) { --- linux-3.13.0.orig/drivers/staging/vt6656/baseband.c +++ linux-3.13.0/drivers/staging/vt6656/baseband.c @@ -1464,7 +1464,6 @@ if( bScanning ) { // need Max sensitivity //RSSI -69, -70,.... - if(pDevice->byBBPreEDIndex == 0) break; pDevice->byBBPreEDIndex = 0; ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9) ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE) @@ -1607,7 +1606,6 @@ if( bScanning ) { // need Max sensitivity //RSSI -69, -70, ... - if(pDevice->byBBPreEDIndex == 0) break; pDevice->byBBPreEDIndex = 0; ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9) ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x24); //CR206(0xCE) @@ -1759,7 +1757,6 @@ case RF_VT3342A0: //RobertYu:20060627, testing table if( bScanning ) { // need Max sensitivity //RSSI -67, -68, ... - if(pDevice->byBBPreEDIndex == 0) break; pDevice->byBBPreEDIndex = 0; ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9) ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x38); //CR206(0xCE) --- linux-3.13.0.orig/drivers/staging/vt6656/card.c +++ linux-3.13.0/drivers/staging/vt6656/card.c @@ -731,7 +731,7 @@ uBeaconInterval = wBeaconInterval * 1024; // Next TBTT = ((local_current_TSF / beacon_interval) + 1 ) * beacon_interval - uLowNextTBTT = ((qwTSF & 0xffffffffU) >> 10) << 10; + uLowNextTBTT = ((qwTSF & 0xffffffffULL) >> 10) << 10; uLowRemain = (uLowNextTBTT) % uBeaconInterval; uHighRemain = ((0x80000000 % uBeaconInterval) * 2 * (u32)(qwTSF >> 32)) % uBeaconInterval; --- linux-3.13.0.orig/drivers/staging/zram/zram_drv.c +++ linux-3.13.0/drivers/staging/zram/zram_drv.c @@ -430,7 +430,8 @@ } if (page_zero_filled(uncmem)) { - kunmap_atomic(user_mem); + if (user_mem) + kunmap_atomic(user_mem); /* Free memory associated with this sector now. */ zram_free_page(zram, index); @@ -552,14 +553,14 @@ size_t index; struct zram_meta *meta; - flush_work(&zram->free_work); - down_write(&zram->init_lock); if (!zram->init_done) { up_write(&zram->init_lock); return; } + flush_work(&zram->free_work); + meta = zram->meta; zram->init_done = 0; @@ -621,6 +622,8 @@ disksize = PAGE_ALIGN(disksize); meta = zram_meta_alloc(disksize); + if (!meta) + return -ENOMEM; down_write(&zram->init_lock); if (zram->init_done) { up_write(&zram->init_lock); --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target.c @@ -52,7 +52,7 @@ static LIST_HEAD(g_tiqn_list); static LIST_HEAD(g_np_list); static DEFINE_SPINLOCK(tiqn_lock); -static DEFINE_SPINLOCK(np_lock); +static DEFINE_MUTEX(np_lock); static struct idr tiqn_idr; struct idr sess_idr; @@ -307,6 +307,9 @@ return false; } +/* + * Called with mutex np_lock held + */ static struct iscsi_np *iscsit_get_np( struct __kernel_sockaddr_storage *sockaddr, int network_transport) @@ -314,11 +317,10 @@ struct iscsi_np *np; bool match; - spin_lock_bh(&np_lock); list_for_each_entry(np, &g_np_list, np_list) { - spin_lock(&np->np_thread_lock); + spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) { - spin_unlock(&np->np_thread_lock); + spin_unlock_bh(&np->np_thread_lock); continue; } @@ -330,13 +332,11 @@ * while iscsi_tpg_add_network_portal() is called. */ np->np_exports++; - spin_unlock(&np->np_thread_lock); - spin_unlock_bh(&np_lock); + spin_unlock_bh(&np->np_thread_lock); return np; } - spin_unlock(&np->np_thread_lock); + spin_unlock_bh(&np->np_thread_lock); } - spin_unlock_bh(&np_lock); return NULL; } @@ -350,16 +350,22 @@ struct sockaddr_in6 *sock_in6; struct iscsi_np *np; int ret; + + mutex_lock(&np_lock); + /* * Locate the existing struct iscsi_np if already active.. */ np = iscsit_get_np(sockaddr, network_transport); - if (np) + if (np) { + mutex_unlock(&np_lock); return np; + } np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL); if (!np) { pr_err("Unable to allocate memory for struct iscsi_np\n"); + mutex_unlock(&np_lock); return ERR_PTR(-ENOMEM); } @@ -382,6 +388,7 @@ ret = iscsi_target_setup_login_socket(np, sockaddr); if (ret != 0) { kfree(np); + mutex_unlock(&np_lock); return ERR_PTR(ret); } @@ -390,6 +397,7 @@ pr_err("Unable to create kthread: iscsi_np\n"); ret = PTR_ERR(np->np_thread); kfree(np); + mutex_unlock(&np_lock); return ERR_PTR(ret); } /* @@ -400,10 +408,10 @@ * point because iscsi_np has not been added to g_np_list yet. */ np->np_exports = 1; + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; - spin_lock_bh(&np_lock); list_add_tail(&np->np_list, &g_np_list); - spin_unlock_bh(&np_lock); + mutex_unlock(&np_lock); pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n", np->np_ip, np->np_port, np->np_transport->name); @@ -470,9 +478,9 @@ np->np_transport->iscsit_free_np(np); - spin_lock_bh(&np_lock); + mutex_lock(&np_lock); list_del(&np->np_list); - spin_unlock_bh(&np_lock); + mutex_unlock(&np_lock); pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n", np->np_ip, np->np_port, np->np_transport->name); @@ -622,7 +630,7 @@ { struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) return -1; @@ -777,7 +785,7 @@ spin_unlock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); iscsit_free_cmd(cmd, false); } } @@ -1156,7 +1164,7 @@ * traditional iSCSI block I/O. */ if (iscsit_allocate_iovecs(cmd) < 0) { - return iscsit_add_reject_cmd(cmd, + return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); } immed_data = cmd->immediate_data; @@ -1281,7 +1289,7 @@ if (cmd->data_direction != DMA_TO_DEVICE) { pr_err("Command ITT: 0x%08x received DataOUT for a" " NON-WRITE command.\n", cmd->init_task_tag); - return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); + return iscsit_dump_data_payload(conn, payload_length, 1); } se_cmd = &cmd->se_cmd; iscsit_mod_dataout_timer(cmd); @@ -1556,7 +1564,9 @@ * Initiator is expecting a NopIN ping reply.. */ if (hdr->itt != RESERVED_ITT) { - BUG_ON(!cmd); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); spin_lock_bh(&conn->cmd_lock); list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); @@ -2460,6 +2470,7 @@ { struct iscsi_cmd *cmd; struct iscsi_conn *conn_p; + bool found = false; /* * Only send a Asynchronous Message on connections whos network @@ -2468,14 +2479,15 @@ list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) { if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) { iscsit_inc_conn_usage_count(conn_p); + found = true; break; } } - if (!conn_p) + if (!found) return; - cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC); + cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING); if (!cmd) { iscsit_dec_conn_usage_count(conn_p); return; @@ -3348,7 +3360,9 @@ #define SENDTARGETS_BUF_LIMIT 32768U -static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd) +static int +iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, + enum iscsit_transport_type network_transport) { char *payload = NULL; struct iscsi_conn *conn = cmd->conn; @@ -3425,6 +3439,9 @@ struct iscsi_np *np = tpg_np->tpg_np; bool inaddr_any = iscsit_check_inaddr_any(np); + if (np->np_network_transport != network_transport) + continue; + if (!target_name_printed) { len = sprintf(buf, "TargetName=%s", tiqn->tiqn); @@ -3478,11 +3495,12 @@ int iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, - struct iscsi_text_rsp *hdr) + struct iscsi_text_rsp *hdr, + enum iscsit_transport_type network_transport) { int text_length, padding; - text_length = iscsit_build_sendtargets_response(cmd); + text_length = iscsit_build_sendtargets_response(cmd, network_transport); if (text_length < 0) return text_length; @@ -3520,7 +3538,7 @@ u32 tx_size = 0; int text_length, iov_count = 0, rc; - rc = iscsit_build_text_rsp(cmd, conn, hdr); + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP); if (rc < 0) return rc; @@ -3700,7 +3718,7 @@ break; case ISTATE_REMOVE: spin_lock_bh(&conn->cmd_lock); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, false); @@ -3951,7 +3969,7 @@ switch (hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_SCSI_CMD: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; @@ -3963,28 +3981,28 @@ case ISCSI_OP_NOOP_OUT: cmd = NULL; if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) { - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; } ret = iscsit_handle_nop_out(conn, cmd, buf); break; case ISCSI_OP_SCSI_TMFUNC: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); break; case ISCSI_OP_TEXT: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; ret = iscsit_handle_text_cmd(conn, cmd, buf); break; case ISCSI_OP_LOGOUT: - cmd = iscsit_allocate_cmd(conn, GFP_KERNEL); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) goto reject; @@ -4143,7 +4161,7 @@ spin_lock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); @@ -4177,18 +4195,26 @@ pr_debug("Closing iSCSI connection CID %hu on SID:" " %u\n", conn->cid, sess->sid); /* - * Always up conn_logout_comp just in case the RX Thread is sleeping - * and the logout response never got sent because the connection - * failed. + * Always up conn_logout_comp for the traditional TCP case just in case + * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout + * response never got sent because the connection failed. + * + * However for iser-target, isert_wait4logout() is using conn_logout_comp + * to signal logout response TX interrupt completion. Go ahead and skip + * this for iser since isert_rx_opcode() does not wait on logout failure, + * and to avoid iscsi_conn pointer dereference in iser-target code. */ - complete(&conn->conn_logout_comp); + if (conn->conn_transport->transport_type == ISCSI_TCP) + complete(&conn->conn_logout_comp); iscsi_release_thread_set(conn); iscsit_stop_timers_for_cmds(conn); iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); - iscsit_free_queue_reqs_for_conn(conn); + + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); /* * During Connection recovery drop unacknowledged out of order @@ -4206,6 +4232,7 @@ iscsit_clear_ooo_cmdsns_for_conn(conn); iscsit_release_commands_from_conn(conn); } + iscsit_free_queue_reqs_for_conn(conn); /* * Handle decrementing session or connection usage count if @@ -4491,6 +4518,7 @@ { struct iscsi_conn *l_conn; struct iscsi_session *sess = conn->sess; + bool conn_found = false; if (!sess) return; @@ -4499,12 +4527,13 @@ list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) { if (l_conn->cid == cid) { iscsit_inc_conn_usage_count(l_conn); + conn_found = true; break; } } spin_unlock_bh(&sess->conn_lock); - if (!l_conn) + if (!conn_found) return; if (l_conn->sock) --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_auth.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_auth.c @@ -314,6 +314,16 @@ goto out; } /* + * During mutual authentication, the CHAP_C generated by the + * initiator must not match the original CHAP_C generated by + * the target. + */ + if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) { + pr_err("initiator CHAP_C matches target CHAP_C, failing" + " login attempt\n"); + goto out; + } + /* * Generate CHAP_N and CHAP_R for mutual authentication. */ tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_erl2.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_erl2.c @@ -138,7 +138,7 @@ list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd, true); @@ -160,7 +160,7 @@ list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd, true); @@ -216,7 +216,7 @@ } cr = cmd->cr; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); return --cr->cmd_count; } @@ -297,7 +297,7 @@ if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) continue; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); @@ -335,7 +335,7 @@ /* * Only perform connection recovery on ISCSI_OP_SCSI_CMD or * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call - * list_del(&cmd->i_conn_node); to release the command to the + * list_del_init(&cmd->i_conn_node); to release the command to the * session pool and remove it from the connection's list. * * Also stop the DataOUT timer, which will be restarted after @@ -351,7 +351,7 @@ " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); @@ -371,7 +371,7 @@ */ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); @@ -393,7 +393,7 @@ cmd->sess = conn->sess; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_all_datain_reqs(cmd); --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_login.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_login.c @@ -249,6 +249,28 @@ mutex_unlock(&auth_id_lock); } +static __printf(2, 3) int iscsi_change_param_sprintf( + struct iscsi_conn *conn, + const char *fmt, ...) +{ + va_list args; + unsigned char buf[64]; + + memset(buf, 0, sizeof buf); + + va_start(args, fmt); + vsnprintf(buf, sizeof buf, fmt, args); + va_end(args); + + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + + return 0; +} + /* * This is the leading connection of a new session, * or session reinstatement. @@ -337,7 +359,6 @@ { struct iscsi_node_attrib *na; struct iscsi_session *sess = conn->sess; - unsigned char buf[32]; bool iser = false; sess->tpg = conn->tpg; @@ -378,26 +399,16 @@ * * In our case, we have already located the struct iscsi_tiqn at this point. */ - memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) return -1; - } /* * Workaround for Initiators that have broken connection recovery logic. * * "We would really like to get rid of this." Linux-iSCSI.org team */ - memset(buf, 0, 32); - sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl)) return -1; - } if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0) return -1; @@ -409,12 +420,9 @@ unsigned long mrdsl, off; int rc; - sprintf(buf, "RDMAExtensions=Yes"); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes")) return -1; - } + /* * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for * Immediate Data + Unsolicitied Data-OUT if necessary.. @@ -444,12 +452,8 @@ pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down" " to PAGE_SIZE\n", mrdsl); - sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl)) return -1; - } } return 0; @@ -591,13 +595,8 @@ * * In our case, we have already located the struct iscsi_tiqn at this point. */ - memset(buf, 0, 32); - sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt); - if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_NO_RESOURCES); + if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt)) return -1; - } return iscsi_login_disable_FIM_keys(conn->param_list, conn); } @@ -1186,6 +1185,9 @@ conn->sock = NULL; } + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + if (conn->conn_transport->iscsit_free_conn) conn->conn_transport->iscsit_free_conn(conn); @@ -1196,7 +1198,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) { u8 *buffer, zero_tsih = 0; - int ret = 0, rc, stop; + int ret = 0, rc; struct iscsi_conn *conn = NULL; struct iscsi_login *login; struct iscsi_portal_group *tpg = NULL; @@ -1210,6 +1212,9 @@ if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; complete(&np->np_restart_comp); + } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { + spin_unlock_bh(&np->np_thread_lock); + goto exit; } else { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; } @@ -1402,10 +1407,8 @@ } out: - stop = kthread_should_stop(); - /* Wait for another socket.. */ - if (!stop) - return 1; + return 1; + exit: iscsi_stop_login_thread_timer(np); spin_lock_bh(&np->np_thread_lock); @@ -1422,7 +1425,7 @@ allow_signal(SIGINT); - while (!kthread_should_stop()) { + while (1) { ret = __iscsi_target_login_thread(np); /* * We break and exit here unless another sock_accept() call --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_nego.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_nego.c @@ -1192,7 +1192,7 @@ */ alloc_tags: tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth); - tag_num += (tag_num / 2) + ISCSIT_EXTRA_TAGS; + tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS; tag_size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size); --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_parameters.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_parameters.c @@ -601,7 +601,7 @@ param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); if (!param_list) { pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); - goto err_out; + return -1; } INIT_LIST_HEAD(¶m_list->param_list); INIT_LIST_HEAD(¶m_list->extra_response_list); --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_tpg.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_tpg.c @@ -137,7 +137,7 @@ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { spin_lock(&tpg->tpg_state_lock); - if (tpg->tpg_state == TPG_STATE_FREE) { + if (tpg->tpg_state != TPG_STATE_ACTIVE) { spin_unlock(&tpg->tpg_state_lock); continue; } --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_tq.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_tq.c @@ -24,36 +24,22 @@ #include "iscsi_target_tq.h" #include "iscsi_target.h" -static LIST_HEAD(active_ts_list); static LIST_HEAD(inactive_ts_list); -static DEFINE_SPINLOCK(active_ts_lock); static DEFINE_SPINLOCK(inactive_ts_lock); static DEFINE_SPINLOCK(ts_bitmap_lock); -static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts) -{ - spin_lock(&active_ts_lock); - list_add_tail(&ts->ts_list, &active_ts_list); - iscsit_global->active_ts++; - spin_unlock(&active_ts_lock); -} - static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) { + if (!list_empty(&ts->ts_list)) { + WARN_ON(1); + return; + } spin_lock(&inactive_ts_lock); list_add_tail(&ts->ts_list, &inactive_ts_list); iscsit_global->inactive_ts++; spin_unlock(&inactive_ts_lock); } -static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts) -{ - spin_lock(&active_ts_lock); - list_del(&ts->ts_list); - iscsit_global->active_ts--; - spin_unlock(&active_ts_lock); -} - static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) { struct iscsi_thread_set *ts; @@ -66,7 +52,7 @@ ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); - list_del(&ts->ts_list); + list_del_init(&ts->ts_list); iscsit_global->inactive_ts--; spin_unlock(&inactive_ts_lock); @@ -204,8 +190,6 @@ void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) { - iscsi_add_ts_to_active_list(ts); - spin_lock_bh(&ts->ts_state_lock); conn->thread_set = ts; ts->conn = conn; @@ -397,7 +381,6 @@ if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); - iscsi_del_ts_from_active_list(ts); if (!iscsit_global->in_shutdown) iscsi_deallocate_extra_thread_sets(); @@ -452,7 +435,6 @@ if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); - iscsi_del_ts_from_active_list(ts); if (!iscsit_global->in_shutdown) iscsi_deallocate_extra_thread_sets(); --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_util.c +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_util.c @@ -152,13 +152,16 @@ * May be called from software interrupt (timer) context for allocating * iSCSI NopINs. */ -struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask) +struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) { struct iscsi_cmd *cmd; struct se_session *se_sess = conn->sess->se_sess; int size, tag; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, gfp_mask); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); + if (tag < 0) + return NULL; + size = sizeof(struct iscsi_cmd) + conn->conn_transport->priv_size; cmd = (struct iscsi_cmd *)(se_sess->sess_cmd_map + (tag * size)); memset(cmd, 0, size); @@ -926,7 +929,7 @@ u8 state; struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC); + cmd = iscsit_allocate_cmd(conn, TASK_RUNNING); if (!cmd) return -1; @@ -1292,6 +1295,8 @@ login->login_failed = 1; iscsit_collect_login_stats(conn, status_class, status_detail); + memset(&login->rsp[0], 0, ISCSI_HDR_LEN); + hdr = (struct iscsi_login_rsp *)&login->rsp[0]; hdr->opcode = ISCSI_OP_LOGIN_RSP; hdr->status_class = status_class; @@ -1351,15 +1356,15 @@ struct iscsi_conn *conn, struct iscsi_data_count *count) { - int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; + int ret, iov_len; struct kvec *iov_p; struct msghdr msg; if (!conn || !conn->sock || !conn->conn_ops) return -1; - if (data <= 0) { - pr_err("Data length is: %d\n", data); + if (count->data_length <= 0) { + pr_err("Data length is: %d\n", count->data_length); return -1; } @@ -1368,20 +1373,16 @@ iov_p = count->iov; iov_len = count->iov_count; - while (total_tx < data) { - tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, - (data - total_tx)); - if (tx_loop <= 0) { - pr_debug("tx_loop: %d total_tx %d\n", - tx_loop, total_tx); - return tx_loop; - } - total_tx += tx_loop; - pr_debug("tx_loop: %d, total_tx: %d, data: %d\n", - tx_loop, total_tx, data); + ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, + count->data_length); + if (ret != count->data_length) { + pr_err("Unexpected ret: %d send data %d\n", + ret, count->data_length); + return -EPIPE; } + pr_debug("ret: %d, sent data: %d\n", ret, count->data_length); - return total_tx; + return ret; } int rx_data( --- linux-3.13.0.orig/drivers/target/iscsi/iscsi_target_util.h +++ linux-3.13.0/drivers/target/iscsi/iscsi_target_util.h @@ -9,7 +9,7 @@ extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *); extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *); extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t); -extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t); +extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int); extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32); extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *); extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); --- linux-3.13.0.orig/drivers/target/loopback/tcm_loop.c +++ linux-3.13.0/drivers/target/loopback/tcm_loop.c @@ -197,7 +197,7 @@ set_host_byte(sc, DID_TRANSPORT_DISRUPTED); goto out_done; } - tl_nexus = tl_hba->tl_nexus; + tl_nexus = tl_tpg->tl_nexus; if (!tl_nexus) { scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" " does not exist\n"); @@ -262,16 +262,26 @@ * to struct scsi_device */ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, - struct tcm_loop_nexus *tl_nexus, int lun, int task, enum tcm_tmreq_table tmr) { struct se_cmd *se_cmd = NULL; struct se_session *se_sess; struct se_portal_group *se_tpg; + struct tcm_loop_nexus *tl_nexus; struct tcm_loop_cmd *tl_cmd = NULL; struct tcm_loop_tmr *tl_tmr = NULL; int ret = TMR_FUNCTION_FAILED, rc; + /* + * Locate the tl_nexus and se_sess pointers + */ + tl_nexus = tl_tpg->tl_nexus; + if (!tl_nexus) { + pr_err("Unable to perform device reset without" + " active I_T Nexus\n"); + return ret; + } + tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { pr_err("Unable to allocate memory for tl_cmd\n"); @@ -287,7 +297,7 @@ se_cmd = &tl_cmd->tl_se_cmd; se_tpg = &tl_tpg->tl_se_tpg; - se_sess = tl_nexus->se_sess; + se_sess = tl_tpg->tl_nexus->se_sess; /* * Initialize struct se_cmd descriptor from target_core_mod infrastructure */ @@ -332,7 +342,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) { struct tcm_loop_hba *tl_hba; - struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tpg *tl_tpg; int ret = FAILED; @@ -340,21 +349,8 @@ * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); - /* - * Locate the tl_nexus and se_sess pointers - */ - tl_nexus = tl_hba->tl_nexus; - if (!tl_nexus) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); - return FAILED; - } - - /* - * Locate the tl_tpg pointer from TargetID in sc->device->id - */ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; - ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, sc->tag, TMR_ABORT_TASK); return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; } @@ -366,7 +362,6 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) { struct tcm_loop_hba *tl_hba; - struct tcm_loop_nexus *tl_nexus; struct tcm_loop_tpg *tl_tpg; int ret = FAILED; @@ -374,20 +369,9 @@ * Locate the tcm_loop_hba_t pointer */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); - /* - * Locate the tl_nexus and se_sess pointers - */ - tl_nexus = tl_hba->tl_nexus; - if (!tl_nexus) { - pr_err("Unable to perform device reset without" - " active I_T Nexus\n"); - return FAILED; - } - /* - * Locate the tl_tpg pointer from TargetID in sc->device->id - */ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; - ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, + + ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, 0, TMR_LUN_RESET); return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; } @@ -987,8 +971,8 @@ struct tcm_loop_nexus *tl_nexus; int ret = -ENOMEM; - if (tl_tpg->tl_hba->tl_nexus) { - pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); + if (tl_tpg->tl_nexus) { + pr_debug("tl_tpg->tl_nexus already exists\n"); return -EEXIST; } se_tpg = &tl_tpg->tl_se_tpg; @@ -1023,7 +1007,7 @@ */ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, tl_nexus->se_sess, tl_nexus); - tl_tpg->tl_hba->tl_nexus = tl_nexus; + tl_tpg->tl_nexus = tl_nexus; pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), name); @@ -1039,12 +1023,8 @@ { struct se_session *se_sess; struct tcm_loop_nexus *tl_nexus; - struct tcm_loop_hba *tl_hba = tpg->tl_hba; - if (!tl_hba) - return -ENODEV; - - tl_nexus = tl_hba->tl_nexus; + tl_nexus = tpg->tl_nexus; if (!tl_nexus) return -ENODEV; @@ -1060,13 +1040,13 @@ } pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" - " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), + " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba), tl_nexus->se_sess->se_node_acl->initiatorname); /* * Release the SCSI I_T Nexus to the emulated SAS Target Port */ transport_deregister_session(tl_nexus->se_sess); - tpg->tl_hba->tl_nexus = NULL; + tpg->tl_nexus = NULL; kfree(tl_nexus); return 0; } @@ -1082,7 +1062,7 @@ struct tcm_loop_nexus *tl_nexus; ssize_t ret; - tl_nexus = tl_tpg->tl_hba->tl_nexus; + tl_nexus = tl_tpg->tl_nexus; if (!tl_nexus) return -ENODEV; --- linux-3.13.0.orig/drivers/target/loopback/tcm_loop.h +++ linux-3.13.0/drivers/target/loopback/tcm_loop.h @@ -27,11 +27,6 @@ }; struct tcm_loop_nexus { - int it_nexus_active; - /* - * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h - */ - struct scsi_host *sh; /* * Pointer to TCM session for I_T Nexus */ @@ -51,6 +46,7 @@ atomic_t tl_tpg_port_count; struct se_portal_group tl_se_tpg; struct tcm_loop_hba *tl_hba; + struct tcm_loop_nexus *tl_nexus; }; struct tcm_loop_hba { @@ -59,7 +55,6 @@ struct se_hba_s *se_hba; struct se_lun *tl_hba_lun; struct se_port *tl_hba_lun_sep; - struct tcm_loop_nexus *tl_nexus; struct device dev; struct Scsi_Host *sh; struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; --- linux-3.13.0.orig/drivers/target/target_core_configfs.c +++ linux-3.13.0/drivers/target/target_core_configfs.c @@ -2040,6 +2040,11 @@ " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } + if (!(dev->dev_flags & DF_CONFIGURED)) { + pr_err("Unable to set alua_access_state while device is" + " not configured\n"); + return -ENODEV; + } ret = kstrtoul(page, 0, &tmp); if (ret < 0) { --- linux-3.13.0.orig/drivers/target/target_core_device.c +++ linux-3.13.0/drivers/target/target_core_device.c @@ -616,6 +616,7 @@ dev->export_count--; spin_unlock(&hba->device_lock); + lun->lun_sep = NULL; lun->lun_se_dev = NULL; } @@ -798,10 +799,10 @@ pr_err("emulate_write_cache not supported for pSCSI\n"); return -EINVAL; } - if (dev->transport->get_write_cache) { - pr_warn("emulate_write_cache cannot be changed when underlying" - " HW reports WriteCacheEnabled, ignoring request\n"); - return 0; + if (flag && + dev->transport->get_write_cache) { + pr_err("emulate_write_cache not supported for this device\n"); + return -EINVAL; } dev->dev_attrib.emulate_write_cache = flag; @@ -1064,10 +1065,10 @@ " changed for TCM/pSCSI\n", dev); return -EINVAL; } - if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { + if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { pr_err("dev[%p]: Passed optimal_sectors %u cannot be" - " greater than fabric_max_sectors: %u\n", dev, - optimal_sectors, dev->dev_attrib.fabric_max_sectors); + " greater than hw_max_sectors: %u\n", dev, + optimal_sectors, dev->dev_attrib.hw_max_sectors); return -EINVAL; } @@ -1320,7 +1321,8 @@ * Check to see if there are any existing persistent reservation APTPL * pre-registrations that need to be enabled for this LUN ACL.. */ - core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); + core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl, + lacl->mapped_lun); return 0; } @@ -1471,7 +1473,6 @@ DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; - dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; xcopy_lun = &dev->xcopy_lun; xcopy_lun->lun_se_dev = dev; @@ -1498,8 +1499,6 @@ ret = dev->transport->configure_device(dev); if (ret) goto out; - dev->dev_flags |= DF_CONFIGURED; - /* * XXX: there is not much point to have two different values here.. */ @@ -1512,6 +1511,7 @@ dev->dev_attrib.hw_max_sectors = se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, dev->dev_attrib.hw_block_size); + dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); dev->creation_time = get_jiffies_64(); @@ -1560,6 +1560,8 @@ list_add_tail(&dev->g_dev_node, &g_device_list); mutex_unlock(&g_device_mutex); + dev->dev_flags |= DF_CONFIGURED; + return 0; out_free_alua: --- linux-3.13.0.orig/drivers/target/target_core_file.c +++ linux-3.13.0/drivers/target/target_core_file.c @@ -552,7 +552,16 @@ { struct se_device *dev = cmd->se_dev; int ret = 0; - + /* + * We are currently limited by the number of iovecs (2048) per + * single vfs_[writev,readv] call. + */ + if (cmd->data_length > FD_MAX_BYTES) { + pr_err("FILEIO: Not able to process I/O of %u bytes due to" + "FD_MAX_BYTES: %u iovec count limitiation\n", + cmd->data_length, FD_MAX_BYTES); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } /* * Call vectorized fileio functions to map struct scatterlist * physical memory addresses to struct iovec virtual memory. --- linux-3.13.0.orig/drivers/target/target_core_iblock.c +++ linux-3.13.0/drivers/target/target_core_iblock.c @@ -122,7 +122,7 @@ q = bdev_get_queue(bd); dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); - dev->dev_attrib.hw_max_sectors = UINT_MAX; + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_queue_depth = q->nr_requests; /* --- linux-3.13.0.orig/drivers/target/target_core_pr.c +++ linux-3.13.0/drivers/target/target_core_pr.c @@ -944,10 +944,10 @@ struct se_device *dev, struct se_portal_group *tpg, struct se_lun *lun, - struct se_lun_acl *lun_acl) + struct se_node_acl *nacl, + u32 mapped_lun) { - struct se_node_acl *nacl = lun_acl->se_lun_nacl; - struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun]; + struct se_dev_entry *deve = nacl->device_list[mapped_lun]; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return 0; @@ -1877,8 +1877,8 @@ } if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { - pr_err("Unable to update renaming" - " APTPL metadata\n"); + pr_err("Unable to update renaming APTPL metadata," + " reallocating larger buffer\n"); ret = -EMSGSIZE; goto out; } @@ -1895,8 +1895,8 @@ lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { - pr_err("Unable to update renaming" - " APTPL metadata\n"); + pr_err("Unable to update renaming APTPL metadata," + " reallocating larger buffer\n"); ret = -EMSGSIZE; goto out; } @@ -1959,7 +1959,7 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) { unsigned char *buf; - int rc; + int rc, len = PR_APTPL_BUF_LEN; if (!aptpl) { char *null_buf = "No Registrations or Reservations\n"; @@ -1973,25 +1973,26 @@ return 0; } - - buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL); +retry: + buf = vzalloc(len); if (!buf) return TCM_OUT_OF_RESOURCES; - rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN); + rc = core_scsi3_update_aptpl_buf(dev, buf, len); if (rc < 0) { - kfree(buf); - return TCM_OUT_OF_RESOURCES; + vfree(buf); + len *= 2; + goto retry; } rc = __core_scsi3_write_aptpl_to_file(dev, buf); if (rc != 0) { pr_err("SPC-3 PR: Could not update APTPL\n"); - kfree(buf); + vfree(buf); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } dev->t10_pr.pr_aptpl_active = 1; - kfree(buf); + vfree(buf); pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); return 0; } @@ -2009,7 +2010,7 @@ struct t10_reservation *pr_tmpl = &dev->t10_pr; unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; sense_reason_t ret = TCM_NO_SENSE; - int pr_holder = 0; + int pr_holder = 0, type; if (!se_sess || !se_lun) { pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); @@ -2131,6 +2132,7 @@ ret = TCM_RESERVATION_CONFLICT; goto out; } + type = pr_reg->pr_res_type; spin_lock(&pr_tmpl->registration_lock); /* @@ -2161,6 +2163,7 @@ * Release the calling I_T Nexus registration now.. */ __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); + pr_reg = NULL; /* * From spc4r17, section 5.7.11.3 Unregistering @@ -2174,8 +2177,8 @@ * RESERVATIONS RELEASED. */ if (pr_holder && - (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || - pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { + (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY || + type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) { list_for_each_entry(pr_reg_p, &pr_tmpl->registration_list, pr_reg_list) { @@ -2194,7 +2197,8 @@ ret = core_scsi3_update_and_write_aptpl(dev, aptpl); out: - core_scsi3_put_pr_reg(pr_reg); + if (pr_reg) + core_scsi3_put_pr_reg(pr_reg); return ret; } --- linux-3.13.0.orig/drivers/target/target_core_pr.h +++ linux-3.13.0/drivers/target/target_core_pr.h @@ -55,7 +55,7 @@ unsigned char *, u16, u32, int, int, u8); extern int core_scsi3_check_aptpl_registration(struct se_device *, struct se_portal_group *, struct se_lun *, - struct se_lun_acl *); + struct se_node_acl *, u32); extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, struct se_node_acl *); extern void core_scsi3_free_all_registrations(struct se_device *); --- linux-3.13.0.orig/drivers/target/target_core_pscsi.c +++ linux-3.13.0/drivers/target/target_core_pscsi.c @@ -1111,7 +1111,7 @@ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); struct scsi_device *sd = pdv->pdv_sd; - return sd->type; + return (sd) ? sd->type : TYPE_NO_LUN; } static sector_t pscsi_get_blocks(struct se_device *dev) --- linux-3.13.0.orig/drivers/target/target_core_rd.c +++ linux-3.13.0/drivers/target/target_core_rd.c @@ -178,7 +178,7 @@ - 1; for (j = 0; j < sg_per_table; j++) { - pg = alloc_pages(GFP_KERNEL, 0); + pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); if (!pg) { pr_err("Unable to allocate scatterlist" " pages for struct rd_dev_sg_table\n"); --- linux-3.13.0.orig/drivers/target/target_core_sbc.c +++ linux-3.13.0/drivers/target/target_core_sbc.c @@ -80,7 +80,7 @@ transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 8); return 0; } @@ -128,7 +128,7 @@ transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 32); return 0; } @@ -260,6 +260,8 @@ static sense_reason_t sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) { + struct se_device *dev = cmd->se_dev; + sector_t end_lba = dev->transport->get_blocks(dev) + 1; unsigned int sectors = sbc_get_write_same_sectors(cmd); if ((flags[0] & 0x04) || (flags[0] & 0x02)) { @@ -273,6 +275,16 @@ sectors, cmd->se_dev->dev_attrib.max_write_same_len); return TCM_INVALID_CDB_FIELD; } + /* + * Sanity check for LBA wrap and request past end of device. + */ + if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || + ((cmd->t_task_lba + sectors) > end_lba)) { + pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", + (unsigned long long)end_lba, cmd->t_task_lba, sectors); + return TCM_ADDRESS_OUT_OF_RANGE; + } + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ if (flags[0] & 0x10) { pr_warn("WRITE SAME with ANCHOR not supported\n"); @@ -419,13 +431,14 @@ goto out; } - write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents, + write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, GFP_KERNEL); if (!write_sg) { pr_err("Unable to allocate compare_and_write sg\n"); ret = TCM_OUT_OF_RESOURCES; goto out; } + sg_init_table(write_sg, cmd->t_data_nents); /* * Setup verify and write data payloads from total NumberLBAs. */ @@ -838,23 +851,9 @@ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { unsigned long long end_lba; - if (sectors > dev->dev_attrib.fabric_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" - " big sectors %u exceeds fabric_max_sectors:" - " %u\n", cdb[0], sectors, - dev->dev_attrib.fabric_max_sectors); - return TCM_INVALID_CDB_FIELD; - } - if (sectors > dev->dev_attrib.hw_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" - " big sectors %u exceeds backend hw_max_sectors:" - " %u\n", cdb[0], sectors, - dev->dev_attrib.hw_max_sectors); - return TCM_INVALID_CDB_FIELD; - } - end_lba = dev->transport->get_blocks(dev) + 1; - if (cmd->t_task_lba + sectors > end_lba) { + if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || + ((cmd->t_task_lba + sectors) > end_lba)) { pr_err("cmd exceeds last lba %llu " "(lba %llu, sectors %u)\n", end_lba, cmd->t_task_lba, sectors); --- linux-3.13.0.orig/drivers/target/target_core_spc.c +++ linux-3.13.0/drivers/target/target_core_spc.c @@ -450,7 +450,6 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; - u32 max_sectors; int have_tp = 0; int opt, min; @@ -484,9 +483,7 @@ /* * Set MAXIMUM TRANSFER LENGTH */ - max_sectors = min(dev->dev_attrib.fabric_max_sectors, - dev->dev_attrib.hw_max_sectors); - put_unaligned_be32(max_sectors, &buf[8]); + put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH @@ -646,6 +643,7 @@ unsigned char buf[SE_INQUIRY_BUF]; sense_reason_t ret; int p; + int len = 0; memset(buf, 0, SE_INQUIRY_BUF); @@ -663,6 +661,7 @@ } ret = spc_emulate_inquiry_std(cmd, buf); + len = buf[4] + 5; goto out; } @@ -670,6 +669,7 @@ if (cdb[2] == evpd_handlers[p].page) { buf[1] = cdb[2]; ret = evpd_handlers[p].emulate(cmd, buf); + len = get_unaligned_be16(&buf[2]) + 4; goto out; } } @@ -685,7 +685,7 @@ } if (!ret) - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, len); return ret; } @@ -1003,7 +1003,7 @@ transport_kunmap_data_sg(cmd); } - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, length); return 0; } @@ -1180,7 +1180,7 @@ buf[3] = (lun_count & 0xff); transport_kunmap_data_sg(cmd); - target_complete_cmd(cmd, GOOD); + target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); return 0; } EXPORT_SYMBOL(spc_emulate_report_luns); --- linux-3.13.0.orig/drivers/target/target_core_tpg.c +++ linux-3.13.0/drivers/target/target_core_tpg.c @@ -40,6 +40,7 @@ #include #include "target_core_internal.h" +#include "target_core_pr.h" extern struct se_device *g_lun0_dev; @@ -166,6 +167,13 @@ core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, lun_access, acl, tpg); + /* + * Check to see if there are any existing persistent reservation + * APTPL pre-registrations that need to be enabled for this dynamic + * LUN ACL now.. + */ + core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, + lun->unpacked_lun); spin_lock(&tpg->tpg_lun_lock); } spin_unlock(&tpg->tpg_lun_lock); --- linux-3.13.0.orig/drivers/target/target_core_transport.c +++ linux-3.13.0/drivers/target/target_core_transport.c @@ -534,7 +534,7 @@ spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return 1; } @@ -568,10 +568,11 @@ { struct se_lun *lun = cmd->se_lun; - if (!lun || !cmd->lun_ref_active) + if (!lun) return; - percpu_ref_put(&lun->lun_ref); + if (cmpxchg(&cmd->lun_ref_active, true, false)) + percpu_ref_put(&lun->lun_ref); } void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) @@ -652,7 +653,7 @@ if (cmd->transport_state & CMD_T_ABORTED && cmd->transport_state & CMD_T_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return; } else if (cmd->transport_state & CMD_T_FAILED) { INIT_WORK(&cmd->work, target_complete_failure_work); @@ -668,6 +669,23 @@ } EXPORT_SYMBOL(target_complete_cmd); +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + cmd->residual_count += cmd->data_length - length; + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = cmd->data_length - length; + } + + cmd->data_length = length; + } + + target_complete_cmd(cmd, scsi_status); +} +EXPORT_SYMBOL(target_complete_cmd_with_length); + static void target_add_to_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1078,6 +1096,7 @@ init_completion(&cmd->cmd_wait_comp); init_completion(&cmd->task_stop_comp); spin_lock_init(&cmd->t_state_lock); + kref_init(&cmd->cmd_kref); cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; @@ -1708,7 +1727,7 @@ cmd->se_tfo->get_task_tag(cmd)); spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return; } @@ -1798,8 +1817,7 @@ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret) - goto out; + goto out; } switch (cmd->data_direction) { @@ -2174,7 +2192,7 @@ * and let it call back once the write buffers are ready. */ target_add_to_state_list(cmd); - if (cmd->data_direction != DMA_TO_DEVICE) { + if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { target_execute_cmd(cmd); return 0; } @@ -2253,7 +2271,6 @@ unsigned long flags; int ret = 0; - kref_init(&se_cmd->cmd_kref); /* * Add a second kref if the fabric caller is expecting to handle * fabric acknowledgement that requires two target_put_sess_cmd() @@ -2272,6 +2289,10 @@ list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); out: spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + if (ret && ack_kref) + target_put_sess_cmd(se_sess, se_cmd); + return ret; } EXPORT_SYMBOL(target_get_sess_cmd); @@ -2303,6 +2324,10 @@ */ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) { + if (!se_sess) { + se_cmd->se_tfo->release_cmd(se_cmd); + return 1; + } return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, &se_sess->sess_cmd_lock); } @@ -2779,6 +2804,12 @@ int transport_generic_handle_tmr( struct se_cmd *cmd) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->transport_state |= CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + INIT_WORK(&cmd->work, target_tmr_work); queue_work(cmd->se_dev->tmr_wq, &cmd->work); return 0; --- linux-3.13.0.orig/drivers/target/tcm_fc/tfc_cmd.c +++ linux-3.13.0/drivers/target/tcm_fc/tfc_cmd.c @@ -90,18 +90,18 @@ { struct fc_frame *fp; struct fc_lport *lport; - struct se_session *se_sess; + struct ft_sess *sess; if (!cmd) return; - se_sess = cmd->sess->se_sess; + sess = cmd->sess; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); - percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); - ft_sess_put(cmd->sess); /* undo get from lookup at recv */ + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); + ft_sess_put(sess); /* undo get from lookup at recv */ } void ft_release_cmd(struct se_cmd *se_cmd) @@ -438,7 +438,7 @@ struct se_session *se_sess = sess->se_sess; int tag; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) goto busy; --- linux-3.13.0.orig/drivers/target/tcm_fc/tfc_io.c +++ linux-3.13.0/drivers/target/tcm_fc/tfc_io.c @@ -346,7 +346,7 @@ ep = fc_seq_exch(seq); if (ep) { lport = ep->lp; - if (lport && (ep->xid <= lport->lro_xid)) + if (lport && (ep->xid <= lport->lro_xid)) { /* * "ddp_done" trigger invalidation of HW * specific DDP context @@ -361,6 +361,7 @@ * identified using ep->xid) */ cmd->was_ddp_setup = 0; + } } } } --- linux-3.13.0.orig/drivers/target/tcm_fc/tfc_sess.c +++ linux-3.13.0/drivers/target/tcm_fc/tfc_sess.c @@ -68,6 +68,7 @@ if (tport) { tport->tpg = tpg; + tpg->tport = tport; return tport; } --- linux-3.13.0.orig/drivers/thermal/intel_powerclamp.c +++ linux-3.13.0/drivers/thermal/intel_powerclamp.c @@ -426,7 +426,6 @@ * allowed. thus jiffies are updated properly. */ preempt_disable(); - tick_nohz_idle_enter(); /* mwait until target jiffies is reached */ while (time_before(jiffies, target_jiffies)) { unsigned long ecx = 1; @@ -444,7 +443,6 @@ start_critical_timings(); atomic_inc(&idle_wakeup_counter); } - tick_nohz_idle_exit(); preempt_enable_no_resched(); } del_timer_sync(&wakeup_timer); --- linux-3.13.0.orig/drivers/thermal/thermal_core.c +++ linux-3.13.0/drivers/thermal/thermal_core.c @@ -1742,10 +1742,10 @@ return 0; -unregister_governors: - thermal_unregister_governors(); unregister_class: class_unregister(&thermal_class); +unregister_governors: + thermal_unregister_governors(); error: idr_destroy(&thermal_tz_idr); idr_destroy(&thermal_cdev_idr); --- linux-3.13.0.orig/drivers/thermal/thermal_hwmon.c +++ linux-3.13.0/drivers/thermal/thermal_hwmon.c @@ -140,6 +140,12 @@ return NULL; } +static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz) +{ + unsigned long temp; + return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp); +} + int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; @@ -189,21 +195,18 @@ if (result) goto free_temp_mem; - if (tz->ops->get_crit_temp) { - unsigned long temperature; - if (!tz->ops->get_crit_temp(tz, &temperature)) { - snprintf(temp->temp_crit.name, - sizeof(temp->temp_crit.name), + if (thermal_zone_crit_temp_valid(tz)) { + snprintf(temp->temp_crit.name, + sizeof(temp->temp_crit.name), "temp%d_crit", hwmon->count); - temp->temp_crit.attr.attr.name = temp->temp_crit.name; - temp->temp_crit.attr.attr.mode = 0444; - temp->temp_crit.attr.show = temp_crit_show; - sysfs_attr_init(&temp->temp_crit.attr.attr); - result = device_create_file(hwmon->device, - &temp->temp_crit.attr); - if (result) - goto unregister_input; - } + temp->temp_crit.attr.attr.name = temp->temp_crit.name; + temp->temp_crit.attr.attr.mode = 0444; + temp->temp_crit.attr.show = temp_crit_show; + sysfs_attr_init(&temp->temp_crit.attr.attr); + result = device_create_file(hwmon->device, + &temp->temp_crit.attr); + if (result) + goto unregister_input; } mutex_lock(&thermal_hwmon_list_lock); @@ -250,7 +253,7 @@ } device_remove_file(hwmon->device, &temp->temp_input.attr); - if (tz->ops->get_crit_temp) + if (thermal_zone_crit_temp_valid(tz)) device_remove_file(hwmon->device, &temp->temp_crit.attr); mutex_lock(&thermal_hwmon_list_lock); --- linux-3.13.0.orig/drivers/thermal/x86_pkg_temp_thermal.c +++ linux-3.13.0/drivers/thermal/x86_pkg_temp_thermal.c @@ -68,6 +68,10 @@ struct thermal_zone_device *tzone; }; +static const struct thermal_zone_params pkg_temp_tz_params = { + .no_hwmon = true, +}; + /* List maintaining number of package instances */ static LIST_HEAD(phy_dev_list); static DEFINE_MUTEX(phy_dev_list_mutex); @@ -446,7 +450,7 @@ thres_count, (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01, - phy_dev_entry, &tzone_ops, NULL, 0, 0); + phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0); if (IS_ERR(phy_dev_entry->tzone)) { err = PTR_ERR(phy_dev_entry->tzone); goto err_ret_free; --- linux-3.13.0.orig/drivers/tty/Kconfig +++ linux-3.13.0/drivers/tty/Kconfig @@ -366,7 +366,7 @@ "Trace data router for MIPI P1149.7 cJTAG standard". config PPC_EPAPR_HV_BYTECHAN - tristate "ePAPR hypervisor byte channel driver" + bool "ePAPR hypervisor byte channel driver" depends on PPC select EPAPR_PARAVIRT help --- linux-3.13.0.orig/drivers/tty/hvc/hvc_console.c +++ linux-3.13.0/drivers/tty/hvc/hvc_console.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -70,6 +71,9 @@ /* Picks up late kicks after list walk but before schedule() */ static int hvc_kicked; +/* hvc_init is triggered from hvc_alloc, i.e. only when actually used */ +static atomic_t hvc_needs_init __read_mostly = ATOMIC_INIT(-1); + static int hvc_init(void); #ifdef CONFIG_MAGIC_SYSRQ @@ -186,7 +190,7 @@ return hvc_driver; } -static int __init hvc_console_setup(struct console *co, char *options) +static int hvc_console_setup(struct console *co, char *options) { if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) return -ENODEV; @@ -851,7 +855,7 @@ int i; /* We wait until a driver actually comes along */ - if (!hvc_driver) { + if (atomic_inc_not_zero(&hvc_needs_init)) { int err = hvc_init(); if (err) return ERR_PTR(err); --- linux-3.13.0.orig/drivers/tty/ipwireless/tty.c +++ linux-3.13.0/drivers/tty/ipwireless/tty.c @@ -177,9 +177,6 @@ ": %d chars not inserted to flip buffer!\n", length - work); - /* - * This may sleep if ->low_latency is set - */ if (work) tty_flip_buffer_push(&tty->port); } --- linux-3.13.0.orig/drivers/tty/n_gsm.c +++ linux-3.13.0/drivers/tty/n_gsm.c @@ -1089,6 +1089,7 @@ { unsigned int addr = 0; unsigned int modem = 0; + unsigned int brk = 0; struct gsm_dlci *dlci; int len = clen; u8 *dp = data; @@ -1115,6 +1116,16 @@ if (len == 0) return; } + len--; + if (len > 0) { + while (gsm_read_ea(&brk, *dp++) == 0) { + len--; + if (len == 0) + return; + } + modem <<= 7; + modem |= (brk & 0x7f); + } tty = tty_port_tty_get(&dlci->port); gsm_process_modem(tty, dlci, modem, clen); if (tty) { --- linux-3.13.0.orig/drivers/tty/n_tty.c +++ linux-3.13.0/drivers/tty/n_tty.c @@ -105,6 +105,7 @@ /* must hold exclusive termios_rwsem to reset these */ unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; + unsigned char push:1; /* shared by producer and consumer */ char read_buf[N_TTY_BUF_SIZE]; @@ -246,8 +247,6 @@ static void n_tty_check_throttle(struct tty_struct *tty) { - if (tty->driver->type == TTY_DRIVER_TYPE_PTY) - return; /* * Check the remaining room for the input canonicalization * mode. We don't want to throttle the driver if we're in @@ -319,7 +318,8 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) { - *read_buf_addr(ldata, ldata->read_head++) = c; + *read_buf_addr(ldata, ldata->read_head) = c; + ldata->read_head++; } /** @@ -342,6 +342,7 @@ ldata->erasing = 0; bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); + ldata->push = 0; } static void n_tty_packet_mode_flush(struct tty_struct *tty) @@ -813,8 +814,7 @@ struct n_tty_data *ldata = tty->disc_data; size_t echoed; - if ((!L_ECHO(tty) && !L_ECHONL(tty)) || - ldata->echo_mark == ldata->echo_tail) + if (ldata->echo_mark == ldata->echo_tail) return; mutex_lock(&ldata->output_lock); @@ -1238,7 +1238,8 @@ if (L_ECHO(tty)) { echo_char(c, tty); commit_echoes(tty); - } + } else + process_echoes(tty); isig(signal, tty); return; } @@ -1269,7 +1270,7 @@ if (I_IXON(tty)) { if (c == START_CHAR(tty)) { start_tty(tty); - commit_echoes(tty); + process_echoes(tty); return 0; } if (c == STOP_CHAR(tty)) { @@ -1509,23 +1510,6 @@ n_tty_receive_char_flagged(tty, c, flag); } -/** - * n_tty_receive_buf - data receive - * @tty: terminal device - * @cp: buffer - * @fp: flag buffer - * @count: characters - * - * Called by the terminal driver when a block of characters has - * been received. This function must be called from soft contexts - * not from interrupt context. The driver is responsible for making - * calls one at a time and in order (or using flush_to_ldisc) - * - * n_tty_receive_buf()/producer path: - * claims non-exclusive termios_rwsem - * publishes read_head and canon_head - */ - static void n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) @@ -1681,47 +1665,85 @@ } } -static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) +/** + * n_tty_receive_buf_common - process input + * @tty: device to receive input + * @cp: input chars + * @fp: flags for each char (if NULL, all chars are TTY_NORMAL) + * @count: number of input chars in @cp + * + * Called by the terminal driver when a block of characters has + * been received. This function must be called from soft contexts + * not from interrupt context. The driver is responsible for making + * calls one at a time and in order (or using flush_to_ldisc) + * + * Returns the # of input chars from @cp which were processed. + * + * In canonical mode, the maximum line length is 4096 chars (including + * the line termination char); lines longer than 4096 chars are + * truncated. After 4095 chars, input data is still processed but + * not stored. Overflow processing ensures the tty can always + * receive more input until at least one line can be read. + * + * In non-canonical mode, the read buffer will only accept 4095 chars; + * this provides the necessary space for a newline char if the input + * mode is switched to canonical. + * + * Note it is possible for the read buffer to _contain_ 4096 chars + * in non-canonical mode: the read buffer could already contain the + * maximum canon line of 4096 chars when the mode is switched to + * non-canonical. + * + * n_tty_receive_buf()/producer path: + * claims non-exclusive termios_rwsem + * publishes commit_head or canon_head + */ +static int +n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, + char *fp, int count, int flow) { - int room, n; + struct n_tty_data *ldata = tty->disc_data; + int room, n, rcvd = 0, overflow; down_read(&tty->termios_rwsem); while (1) { - room = receive_room(tty); + /* + * When PARMRK is set, each input char may take up to 3 chars + * in the read buf; reduce the buffer space avail by 3x + * + * If we are doing input canonicalization, and there are no + * pending newlines, let characters through without limit, so + * that erase characters will be handled. Other excess + * characters will be beeped. + * + * paired with store in *_copy_from_read_buf() -- guarantees + * the consumer has loaded the data in read_buf up to the new + * read_tail (so this producer will not overwrite unread data) + */ + size_t tail = ldata->read_tail; + + room = N_TTY_BUF_SIZE - (ldata->read_head - tail); + if (I_PARMRK(tty)) + room = (room + 2) / 3; + room--; + if (room <= 0) { + overflow = ldata->icanon && ldata->canon_head == tail; + if (overflow && room < 0) + ldata->read_head--; + room = overflow; + ldata->no_room = flow && !room; + } else + overflow = 0; + n = min(count, room); if (!n) break; - __receive_buf(tty, cp, fp, n); - cp += n; - if (fp) - fp += n; - count -= n; - } - tty->receive_room = room; - n_tty_check_throttle(tty); - up_read(&tty->termios_rwsem); -} - -static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp, - char *fp, int count) -{ - struct n_tty_data *ldata = tty->disc_data; - int room, n, rcvd = 0; + /* ignore parity errors if handling overflow */ + if (!overflow || !fp || *fp != TTY_PARITY) + __receive_buf(tty, cp, fp, n); - down_read(&tty->termios_rwsem); - - while (1) { - room = receive_room(tty); - n = min(count, room); - if (!n) { - if (!room) - ldata->no_room = 1; - break; - } - __receive_buf(tty, cp, fp, n); cp += n; if (fp) fp += n; @@ -1730,12 +1752,34 @@ } tty->receive_room = room; - n_tty_check_throttle(tty); + + /* Unthrottle if handling overflow on pty */ + if (tty->driver->type == TTY_DRIVER_TYPE_PTY) { + if (overflow) { + tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE); + tty_unthrottle_safe(tty); + __tty_set_flow_change(tty, 0); + } + } else + n_tty_check_throttle(tty); + up_read(&tty->termios_rwsem); return rcvd; } +static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, + char *fp, int count) +{ + n_tty_receive_buf_common(tty, cp, fp, count, 0); +} + +static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp, + char *fp, int count) +{ + return n_tty_receive_buf_common(tty, cp, fp, count, 1); +} + int is_ignored(int sig) { return (sigismember(¤t->blocked, sig) || @@ -1762,7 +1806,16 @@ if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); - ldata->line_start = ldata->canon_head = ldata->read_tail; + ldata->line_start = ldata->read_tail; + if (!L_ICANON(tty) || !read_cnt(ldata)) { + ldata->canon_head = ldata->read_tail; + ldata->push = 0; + } else { + set_bit((ldata->read_head - 1) & (N_TTY_BUF_SIZE - 1), + ldata->read_flags); + ldata->canon_head = ldata->read_head; + ldata->push = 1; + } ldata->erasing = 0; ldata->lnext = 0; } @@ -1821,8 +1874,10 @@ * Fix tty hang when I_IXON(tty) is cleared, but the tty * been stopped by STOP_CHAR(tty) before it. */ - if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) + if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { start_tty(tty); + process_echoes(tty); + } /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); @@ -1965,6 +2020,12 @@ * it copies one line of input up to and including the line-delimiting * character into the user-space buffer. * + * NB: When termios is changed from non-canonical to canonical mode and + * the read buffer contains data, n_tty_set_termios() simulates an EOF + * push (as if C-d were input) _without_ the DISABLED_CHAR in the buffer. + * This causes data already processed as input to be immediately available + * as input although a newline has not been received. + * * Called under the atomic_read_lock mutex * * n_tty_read()/consumer path: @@ -2011,7 +2072,7 @@ n += found; c = n; - if (found && read_buf(ldata, eol) == __DISABLED_CHAR) { + if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { n--; eof_push = !n && ldata->read_tail != ldata->line_start; } @@ -2038,7 +2099,10 @@ ldata->read_tail += c; if (found) { - ldata->line_start = ldata->read_tail; + if (!ldata->push) + ldata->line_start = ldata->read_tail; + else + ldata->push = 0; tty_audit_push(tty); } return eof_push ? -EAGAIN : 0; @@ -2343,8 +2407,12 @@ if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } else { + struct n_tty_data *ldata = tty->disc_data; + while (nr > 0) { + mutex_lock(&ldata->output_lock); c = tty->ops->write(tty, b, nr); + mutex_unlock(&ldata->output_lock); if (c < 0) { retval = c; goto break_out; --- linux-3.13.0.orig/drivers/tty/pty.c +++ linux-3.13.0/drivers/tty/pty.c @@ -209,6 +209,9 @@ unsigned long flags; struct pid *pgrp; + if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP) + return -EINVAL; + if (tty->link) { spin_lock_irqsave(&tty->link->ctrl_lock, flags); pgrp = get_pid(tty->link->pgrp); --- linux-3.13.0.orig/drivers/tty/serial/8250/8250_core.c +++ linux-3.13.0/drivers/tty/serial/8250/8250_core.c @@ -555,7 +555,7 @@ */ if ((p->port.type == PORT_XR17V35X) || (p->port.type == PORT_XR17D15X)) { - serial_out(p, UART_EXAR_SLEEP, 0xff); + serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0); return; } @@ -1520,7 +1520,7 @@ status = serial8250_rx_chars(up, status); } serial8250_modem_status(up); - if (status & UART_LSR_THRE) + if (!up->dma && (status & UART_LSR_THRE)) serial8250_tx_chars(up); spin_unlock_irqrestore(&port->lock, flags); @@ -2334,9 +2334,11 @@ * the trigger, or the MCR RTS bit is cleared. In the case where * the remote UART is not using CTS auto flow control, we must * have sufficient FIFO entries for the latency of the remote - * UART to respond. IOW, at least 32 bytes of FIFO. + * UART to respond. IOW, at least 32 bytes of FIFO. Also enable + * AFE if hw flow control is supported */ - if (up->capabilities & UART_CAP_AFE && port->fifosize >= 32) { + if ((up->capabilities & UART_CAP_AFE && (port->fifosize >= 32)) || + (port->flags & UPF_HARD_FLOW)) { up->mcr &= ~UART_MCR_AFE; if (termios->c_cflag & CRTSCTS) up->mcr |= UART_MCR_AFE; @@ -2670,6 +2672,10 @@ if (port->type == PORT_16550A && port->iotype == UPIO_AU) up->bugs |= UART_BUG_NOMSR; + /* HW bugs may trigger IRQ while IIR == NO_INT */ + if (port->type == PORT_TEGRA) + up->bugs |= UART_BUG_NOMSR; + if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) autoconfig_irq(up); --- linux-3.13.0.orig/drivers/tty/serial/8250/8250_dma.c +++ linux-3.13.0/drivers/tty/serial/8250/8250_dma.c @@ -20,12 +20,15 @@ struct uart_8250_port *p = param; struct uart_8250_dma *dma = p->dma; struct circ_buf *xmit = &p->port.state->xmit; - - dma->tx_running = 0; + unsigned long flags; dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, UART_XMIT_SIZE, DMA_TO_DEVICE); + spin_lock_irqsave(&p->port.lock, flags); + + dma->tx_running = 0; + xmit->tail += dma->tx_size; xmit->tail &= UART_XMIT_SIZE - 1; p->port.icount.tx += dma->tx_size; @@ -35,6 +38,8 @@ if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) serial8250_tx_dma(p); + + spin_unlock_irqrestore(&p->port.lock, flags); } static void __dma_rx_complete(void *param) --- linux-3.13.0.orig/drivers/tty/serial/8250/8250_pci.c +++ linux-3.13.0/drivers/tty/serial/8250/8250_pci.c @@ -67,7 +67,7 @@ "Please send the output of lspci -vv, this\n" "message (0x%04x,0x%04x,0x%04x,0x%04x), the\n" "manufacturer and name of serial board or\n" - "modem board to rmk+serial@arm.linux.org.uk.\n", + "modem board to .\n", pci_name(dev), str, dev->vendor, dev->device, dev->subsystem_vendor, dev->subsystem_device); } @@ -1259,10 +1259,10 @@ unsigned long base = pci_resource_start(dev, 0); if (base) { u32 tmp; - outl(inl(base + 0x38), base + 0x38); + outl(inl(base + 0x38) | 0x00002000, base + 0x38); tmp = inl(base + 0x3c); outl(tmp | 0x01000000, base + 0x3c); - outl(tmp, base + 0x3c); + outl(tmp &= ~0x01000000, base + 0x3c); } } return 0; @@ -1744,6 +1744,7 @@ #define PCI_DEVICE_ID_TITAN_800E 0xA014 #define PCI_DEVICE_ID_TITAN_200EI 0xA016 #define PCI_DEVICE_ID_TITAN_200EISI 0xA017 +#define PCI_DEVICE_ID_TITAN_200V3 0xA306 #define PCI_DEVICE_ID_TITAN_400V3 0xA310 #define PCI_DEVICE_ID_TITAN_410V3 0xA312 #define PCI_DEVICE_ID_TITAN_800V3 0xA314 @@ -1765,6 +1766,7 @@ #define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022 #define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a #define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e +#define PCI_DEVICE_ID_INTEL_QRK_UART 0x0936 #define PCI_VENDOR_ID_SUNIX 0x1fd4 #define PCI_DEVICE_ID_SUNIX_1999 0x1999 @@ -1875,6 +1877,13 @@ .subdevice = PCI_ANY_ID, .setup = byt_serial_setup, }, + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_QRK_UART, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_default_setup, + }, /* * ITE */ @@ -2709,6 +2718,7 @@ pbn_ADDIDATA_PCIe_8_3906250, pbn_ce4100_1_115200, pbn_byt, + pbn_qrk, pbn_omegapci, pbn_NETMOS9900_2s_115200, pbn_brcm_trumanage, @@ -3455,6 +3465,12 @@ .uart_offset = 0x80, .reg_shift = 2, }, + [pbn_qrk] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 2764800, + .reg_shift = 2, + }, [pbn_omegapci] = { .flags = FL_BASE0, .num_ports = 8, @@ -4427,6 +4443,9 @@ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_2_4000000 }, + { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_921600 }, { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_4_921600 }, @@ -5146,6 +5165,12 @@ pbn_byt }, /* + * Intel Quark x1000 + */ + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_UART, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_qrk }, + /* * Cronyx Omega PCI */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_CRONYX_OMEGA, --- linux-3.13.0.orig/drivers/tty/serial/amba-pl011.c +++ linux-3.13.0/drivers/tty/serial/amba-pl011.c @@ -1537,6 +1537,8 @@ /* * Provoke TX FIFO interrupt into asserting. */ + spin_lock_irq(&uap->port.lock); + cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; writew(cr, uap->port.membase + UART011_CR); writew(0, uap->port.membase + UART011_FBRD); @@ -1561,6 +1563,8 @@ cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; writew(cr, uap->port.membase + UART011_CR); + spin_unlock_irq(&uap->port.lock); + /* * initialise the old status of the modem signals */ @@ -1629,11 +1633,13 @@ * it during startup(). */ uap->autorts = false; + spin_lock_irq(&uap->port.lock); cr = readw(uap->port.membase + UART011_CR); uap->old_cr = cr; cr &= UART011_CR_RTS | UART011_CR_DTR; cr |= UART01x_CR_UARTEN | UART011_CR_TXE; writew(cr, uap->port.membase + UART011_CR); + spin_unlock_irq(&uap->port.lock); /* * disable break condition and fifos --- linux-3.13.0.orig/drivers/tty/serial/atmel_serial.c +++ linux-3.13.0/drivers/tty/serial/atmel_serial.c @@ -825,9 +825,6 @@ atmel_port->desc_rx = NULL; atmel_port->chan_rx = NULL; atmel_port->cookie_rx = -EINVAL; - - if (!atmel_port->is_usart) - del_timer_sync(&atmel_port->uart_timer); } static void atmel_rx_from_dma(struct uart_port *port) @@ -1229,9 +1226,6 @@ DMA_FROM_DEVICE); kfree(pdc->buf); } - - if (!atmel_port->is_usart) - del_timer_sync(&atmel_port->uart_timer); } static void atmel_rx_from_pdc(struct uart_port *port) @@ -1604,12 +1598,13 @@ /* enable xmit & rcvr */ UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); + setup_timer(&atmel_port->uart_timer, + atmel_uart_timer_callback, + (unsigned long)port); + if (atmel_use_pdc_rx(port)) { /* set UART timeout */ if (!atmel_port->is_usart) { - setup_timer(&atmel_port->uart_timer, - atmel_uart_timer_callback, - (unsigned long)port); mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port)); /* set USART timeout */ @@ -1624,9 +1619,6 @@ } else if (atmel_use_dma_rx(port)) { /* set UART timeout */ if (!atmel_port->is_usart) { - setup_timer(&atmel_port->uart_timer, - atmel_uart_timer_callback, - (unsigned long)port); mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port)); /* set USART timeout */ @@ -1650,12 +1642,30 @@ static void atmel_shutdown(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + + /* + * Prevent any tasklets being scheduled during + * cleanup + */ + del_timer_sync(&atmel_port->uart_timer); + /* - * Ensure everything is stopped. + * Clear out any scheduled tasklets before + * we destroy the buffers + */ + tasklet_kill(&atmel_port->tasklet); + + /* + * Ensure everything is stopped and + * disable all interrupts, port and break condition. */ atmel_stop_rx(port); atmel_stop_tx(port); + UART_PUT_CR(port, ATMEL_US_RSTSTA); + UART_PUT_IDR(port, -1); + + /* * Shut-down the DMA. */ @@ -1665,10 +1675,10 @@ atmel_port->release_tx(port); /* - * Disable all interrupts, port and break condition. + * Reset ring buffer pointers */ - UART_PUT_CR(port, ATMEL_US_RSTSTA); - UART_PUT_IDR(port, -1); + atmel_port->rx_ring.head = 0; + atmel_port->rx_ring.tail = 0; /* * Free the interrupt @@ -2382,7 +2392,7 @@ ret = atmel_init_port(port, pdev); if (ret) - goto err; + goto err_clear_bit; if (!atmel_use_pdc_rx(&port->uart)) { ret = -ENOMEM; @@ -2411,6 +2421,12 @@ device_init_wakeup(&pdev->dev, 1); platform_set_drvdata(pdev, port); + /* + * The peripheral clock has been disabled by atmel_init_port(): + * enable it before accessing I/O registers + */ + clk_prepare_enable(port->clk); + if (port->rs485.flags & SER_RS485_ENABLED) { UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL); UART_PUT_CR(&port->uart, ATMEL_US_RTSEN); @@ -2421,6 +2437,12 @@ */ atmel_get_ip_name(&port->uart); + /* + * The peripheral clock can now safely be disabled till the port + * is used + */ + clk_disable_unprepare(port->clk); + return 0; err_add_port: @@ -2431,6 +2453,8 @@ clk_put(port->clk); port->clk = NULL; } +err_clear_bit: + clear_bit(port->uart.line, atmel_ports_in_use); err: return ret; } @@ -2441,11 +2465,12 @@ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); int ret = 0; + tasklet_kill(&atmel_port->tasklet); + device_init_wakeup(&pdev->dev, 0); ret = uart_remove_one_port(&atmel_uart, port); - tasklet_kill(&atmel_port->tasklet); kfree(atmel_port->rx_ring.buf); /* "port" is allocated statically, so we shouldn't free it */ --- linux-3.13.0.orig/drivers/tty/serial/fsl_lpuart.c +++ linux-3.13.0/drivers/tty/serial/fsl_lpuart.c @@ -362,6 +362,9 @@ writeb(val | UARTPFIFO_TXFE | UARTPFIFO_RXFE, sport->port.membase + UARTPFIFO); + /* explicitly clear RDRF */ + readb(sport->port.membase + UARTSR1); + /* flush Tx and Rx FIFO */ writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH, sport->port.membase + UARTCFIFO); --- linux-3.13.0.orig/drivers/tty/serial/of_serial.c +++ linux-3.13.0/drivers/tty/serial/of_serial.c @@ -183,6 +183,10 @@ "auto-flow-control")) port8250.capabilities |= UART_CAP_AFE; + if (of_property_read_bool(ofdev->dev.of_node, + "has-hw-flow-control")) + port8250.port.flags |= UPF_HARD_FLOW; + ret = serial8250_register_8250_port(&port8250); break; } --- linux-3.13.0.orig/drivers/tty/serial/omap-serial.c +++ linux-3.13.0/drivers/tty/serial/omap-serial.c @@ -225,14 +225,19 @@ if (enable) enable_irq(up->wakeirq); else - disable_irq(up->wakeirq); + disable_irq_nosync(up->wakeirq); } static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) { struct omap_uart_port_info *pdata = dev_get_platdata(up->dev); + if (enable == up->wakeups_enabled) + return; + serial_omap_enable_wakeirq(up, enable); + up->wakeups_enabled = enable; + if (!pdata || !pdata->enable_wakeup) return; @@ -255,8 +260,16 @@ { unsigned int n13 = port->uartclk / (13 * baud); unsigned int n16 = port->uartclk / (16 * baud); - int baudAbsDiff13 = baud - (port->uartclk / (13 * n13)); - int baudAbsDiff16 = baud - (port->uartclk / (16 * n16)); + int baudAbsDiff13; + int baudAbsDiff16; + + if (n13 == 0) + n13 = 1; + if (n16 == 0) + n16 = 1; + + baudAbsDiff13 = baud - (port->uartclk / (13 * n13)); + baudAbsDiff16 = baud - (port->uartclk / (16 * n16)); if (baudAbsDiff13 < 0) baudAbsDiff13 = -baudAbsDiff13; if (baudAbsDiff16 < 0) @@ -738,9 +751,6 @@ return retval; } disable_irq(up->wakeirq); - } else { - dev_info(up->port.dev, "no wakeirq for uart%d\n", - up->port.line); } dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); @@ -1491,6 +1501,11 @@ uart_suspend_port(&serial_omap_reg, &up->port); flush_work(&up->qos_work); + if (device_may_wakeup(dev)) + serial_omap_enable_wakeup(up, true); + else + serial_omap_enable_wakeup(up, false); + return 0; } @@ -1498,6 +1513,9 @@ { struct uart_omap_port *up = dev_get_drvdata(dev); + if (device_may_wakeup(dev)) + serial_omap_enable_wakeup(up, false); + uart_resume_port(&serial_omap_reg, &up->port); return 0; @@ -1687,6 +1705,9 @@ up->port.iotype = UPIO_MEM; up->port.irq = uartirq; up->wakeirq = wakeirq; + if (!up->wakeirq) + dev_info(up->port.dev, "no wakeirq for uart%d\n", + up->port.line); up->port.regshift = 2; up->port.fifosize = 64; @@ -1867,17 +1888,7 @@ up->context_loss_cnt = serial_omap_get_context_loss_count(up); - if (device_may_wakeup(dev)) { - if (!up->wakeups_enabled) { - serial_omap_enable_wakeup(up, true); - up->wakeups_enabled = true; - } - } else { - if (up->wakeups_enabled) { - serial_omap_enable_wakeup(up, false); - up->wakeups_enabled = false; - } - } + serial_omap_enable_wakeup(up, true); up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; schedule_work(&up->qos_work); @@ -1891,6 +1902,8 @@ int loss_cnt = serial_omap_get_context_loss_count(up); + serial_omap_enable_wakeup(up, false); + if (loss_cnt < 0) { dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n", loss_cnt); --- linux-3.13.0.orig/drivers/tty/serial/samsung.c +++ linux-3.13.0/drivers/tty/serial/samsung.c @@ -544,11 +544,15 @@ unsigned int old) { struct s3c24xx_uart_port *ourport = to_ourport(port); + int timeout = 10000; ourport->pm_level = level; switch (level) { case 3: + while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) + udelay(100); + if (!IS_ERR(ourport->baudclk)) clk_disable_unprepare(ourport->baudclk); --- linux-3.13.0.orig/drivers/tty/serial/serial_core.c +++ linux-3.13.0/drivers/tty/serial/serial_core.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -170,8 +171,12 @@ if (tty->termios.c_cflag & CBAUD) uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR); } - - if (tty_port_cts_enabled(port)) { + /* + * if hw support flow control without software intervention, + * then skip the below check + */ + if (tty_port_cts_enabled(port) && + !(uport->flags & UPF_HARD_FLOW)) { spin_lock_irq(&uport->lock); if (!(uport->ops->get_mctrl(uport) & TIOCM_CTS)) tty->hw_stopped = 1; @@ -235,6 +240,9 @@ /* * Turn off DTR and RTS early. */ + if (uart_console(uport) && tty) + uport->cons->cflag = tty->termios.c_cflag; + if (!tty || (tty->termios.c_cflag & HUPCL)) uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS); @@ -350,7 +358,7 @@ * The spd_hi, spd_vhi, spd_shi, spd_warp kludge... * Die! Die! Die! */ - if (baud == 38400) + if (try == 0 && baud == 38400) baud = altbaud; /* @@ -2600,6 +2608,8 @@ spin_lock_init(&uport->lock); lockdep_set_class(&uport->lock, &port_lock_key); } + if (uport->cons && uport->dev) + of_console_check(uport->dev->of_node, uport->cons->name, uport->line); uart_configure_port(drv, state, uport); @@ -2754,7 +2764,9 @@ uport->icount.cts++; - if (tty_port_cts_enabled(port)) { + /* skip below code if the hw flow control is supported */ + if (tty_port_cts_enabled(port) && + !(uport->flags & UPF_HARD_FLOW)) { if (tty->hw_stopped) { if (status) { tty->hw_stopped = 0; --- linux-3.13.0.orig/drivers/tty/serial/sirfsoc_uart.c +++ linux-3.13.0/drivers/tty/serial/sirfsoc_uart.c @@ -540,8 +540,10 @@ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | SIRFUART_IO_MODE); - sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); spin_unlock_irqrestore(&sirfport->rx_lock, flags); + spin_lock(&port->lock); + sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); + spin_unlock(&port->lock); if (sirfport->rx_io_count == 4) { spin_lock_irqsave(&sirfport->rx_lock, flags); sirfport->rx_io_count = 0; --- linux-3.13.0.orig/drivers/tty/serial/sunsab.c +++ linux-3.13.0/drivers/tty/serial/sunsab.c @@ -157,6 +157,15 @@ (up->port.line == up->port.cons->index)) saw_console_brk = 1; + if (count == 0) { + if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) { + stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR | + SAB82532_ISR0_FERR); + up->port.icount.brk++; + uart_handle_break(&up->port); + } + } + for (i = 0; i < count; i++) { unsigned char ch = buf[i], flag; --- linux-3.13.0.orig/drivers/tty/tty_buffer.c +++ linux-3.13.0/drivers/tty/tty_buffer.c @@ -248,7 +248,11 @@ if ((n = tty_buffer_alloc(port, size)) != NULL) { buf->tail = n; b->commit = b->used; - smp_mb(); + /* paired w/ barrier in flush_to_ldisc(); ensures the + * latest commit value can be read before the head is + * advanced to the next buffer + */ + smp_wmb(); b->next = n; } else size = left; @@ -332,14 +336,11 @@ * Takes any pending buffers and transfers their ownership to the * ldisc side of the queue. It then schedules those characters for * processing by the line discipline. - * Note that this function can only be used when the low_latency flag - * is unset. Otherwise the workqueue won't be flushed. */ void tty_schedule_flip(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; - WARN_ON(port->low_latency); buf->tail->commit = buf->tail->used; schedule_work(&buf->work); @@ -452,17 +453,24 @@ while (1) { struct tty_buffer *head = buf->head; + struct tty_buffer *next; int count; /* Ldisc or user is trying to gain exclusive access */ if (atomic_read(&buf->priority)) break; + next = head->next; + /* paired w/ barrier in __tty_buffer_request_room(); + * ensures commit value read is not stale if the head + * is advancing to the next buffer + */ + smp_rmb(); count = head->commit - head->read; if (!count) { - if (head->next == NULL) + if (next == NULL) break; - buf->head = head->next; + buf->head = next; tty_buffer_free(port, head); continue; } @@ -487,17 +495,15 @@ */ void tty_flush_to_ldisc(struct tty_struct *tty) { - if (!tty->port->low_latency) - flush_work(&tty->port->buf.work); + flush_work(&tty->port->buf.work); } /** * tty_flip_buffer_push - terminal * @port: tty port to push * - * Queue a push of the terminal flip buffers to the line discipline. This - * function must not be called from IRQ context if port->low_latency is - * set. + * Queue a push of the terminal flip buffers to the line discipline. + * Can be called from IRQ/atomic context. * * In the event of the queue being busy for flipping the work will be * held off and retried later. @@ -505,14 +511,7 @@ void tty_flip_buffer_push(struct tty_port *port) { - struct tty_bufhead *buf = &port->buf; - - buf->tail->commit = buf->tail->used; - - if (port->low_latency) - flush_to_ldisc(&buf->work); - else - schedule_work(&buf->work); + tty_schedule_flip(port); } EXPORT_SYMBOL(tty_flip_buffer_push); --- linux-3.13.0.orig/drivers/tty/tty_io.c +++ linux-3.13.0/drivers/tty/tty_io.c @@ -996,8 +996,8 @@ /* We limit tty time update visibility to every 8 seconds or so. */ static void tty_update_time(struct timespec *time) { - unsigned long sec = get_seconds() & ~7; - if ((long)(sec - time->tv_sec) > 0) + unsigned long sec = get_seconds(); + if (abs(sec - time->tv_sec) & ~7) time->tv_sec = sec; } @@ -1271,12 +1271,13 @@ * * Locking: None */ -static void tty_line_name(struct tty_driver *driver, int index, char *p) +static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) { if (driver->flags & TTY_DRIVER_UNNUMBERED_NODE) - strcpy(p, driver->name); + return sprintf(p, "%s", driver->name); else - sprintf(p, "%s%d", driver->name, index + driver->name_base); + return sprintf(p, "%s%d", driver->name, + index + driver->name_base); } /** @@ -1700,6 +1701,8 @@ int pty_master, tty_closing, o_tty_closing, do_sleep; int idx; char buf[64]; + long timeout = 0; + int once = 1; if (tty_paranoia_check(tty, inode, __func__)) return 0; @@ -1780,11 +1783,18 @@ if (!do_sleep) break; - printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", - __func__, tty_name(tty, buf)); + if (once) { + once = 0; + printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", + __func__, tty_name(tty, buf)); + } tty_unlock_pair(tty, o_tty); mutex_unlock(&tty_mutex); - schedule(); + schedule_timeout_killable(timeout); + if (timeout < 120 * HZ) + timeout = 2 * timeout + 1; + else + timeout = MAX_SCHEDULE_TIMEOUT; } /* @@ -3545,9 +3555,19 @@ if (i >= ARRAY_SIZE(cs)) break; } - while (i--) - count += sprintf(buf + count, "%s%d%c", - cs[i]->name, cs[i]->index, i ? ' ':'\n'); + while (i--) { + int index = cs[i]->index; + struct tty_driver *drv = cs[i]->device(cs[i], &index); + + /* don't resolve tty0 as some programs depend on it */ + if (drv && (cs[i]->index > 0 || drv->major != TTY_MAJOR)) + count += tty_line_name(drv, index, buf + count); + else + count += sprintf(buf + count, "%s%d", + cs[i]->name, cs[i]->index); + + count += sprintf(buf + count, "%c", i ? ' ':'\n'); + } console_unlock(); return count; --- linux-3.13.0.orig/drivers/tty/tty_ioctl.c +++ linux-3.13.0/drivers/tty/tty_ioctl.c @@ -217,11 +217,17 @@ #endif if (!timeout) timeout = MAX_SCHEDULE_TIMEOUT; + if (wait_event_interruptible_timeout(tty->write_wait, - !tty_chars_in_buffer(tty), timeout) >= 0) { - if (tty->ops->wait_until_sent) - tty->ops->wait_until_sent(tty, timeout); + !tty_chars_in_buffer(tty), timeout) < 0) { + return; } + + if (timeout == MAX_SCHEDULE_TIMEOUT) + timeout = 0; + + if (tty->ops->wait_until_sent) + tty->ops->wait_until_sent(tty, timeout); } EXPORT_SYMBOL(tty_wait_until_sent); --- linux-3.13.0.orig/drivers/tty/vt/consolemap.c +++ linux-3.13.0/drivers/tty/vt/consolemap.c @@ -540,6 +540,13 @@ /* Save original vc_unipagdir_loc in case we allocate a new one */ p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; + + if (!p) { + err = -EINVAL; + + goto out_unlock; + } + if (p->readonly) { console_unlock(); return -EIO; @@ -633,6 +640,7 @@ set_inverse_transl(vc, p, i); /* Update inverse translations */ set_inverse_trans_unicode(vc, p); +out_unlock: console_unlock(); return err; } --- linux-3.13.0.orig/drivers/tty/vt/vt.c +++ linux-3.13.0/drivers/tty/vt/vt.c @@ -498,6 +498,7 @@ #endif if (DO_UPDATE(vc)) do_update_region(vc, (unsigned long) p, count); + notify_update(vc); } /* used by selection: complement pointer position */ @@ -514,6 +515,7 @@ scr_writew(old, screenpos(vc, old_offset, 1)); if (DO_UPDATE(vc)) vc->vc_sw->con_putc(vc, old, oldy, oldx); + notify_update(vc); } old_offset = offset; @@ -531,8 +533,8 @@ oldy = (offset >> 1) / vc->vc_cols; vc->vc_sw->con_putc(vc, new, oldy, oldx); } + notify_update(vc); } - } static void insert_char(struct vc_data *vc, unsigned int nr) @@ -1164,6 +1166,8 @@ scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char, vc->vc_screenbuf_size >> 1); set_origin(vc); + if (CON_IS_VISIBLE(vc)) + update_screen(vc); /* fall through */ case 2: /* erase whole display */ count = vc->vc_cols * vc->vc_rows; --- linux-3.13.0.orig/drivers/uio/uio.c +++ linux-3.13.0/drivers/uio/uio.c @@ -847,7 +847,7 @@ info->uio_dev = idev; if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { - ret = devm_request_irq(parent, info->irq, uio_interrupt, + ret = devm_request_irq(idev->dev, info->irq, uio_interrupt, info->irq_flags, info->name, idev); if (ret) goto err_request_irq; --- linux-3.13.0.orig/drivers/usb/chipidea/ci.h +++ linux-3.13.0/drivers/usb/chipidea/ci.h @@ -135,6 +135,7 @@ * @id_event: indicates there is an id event, and handled at ci_otg_work * @b_sess_valid_event: indicates there is a vbus event, and handled * at ci_otg_work + * @imx28_write_fix: Freescale imx28 needs swp instruction for writing */ struct ci_hdrc { struct device *dev; @@ -173,6 +174,7 @@ struct dentry *debugfs; bool id_event; bool b_sess_valid_event; + bool imx28_write_fix; }; static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci) @@ -253,6 +255,26 @@ return ioread32(ci->hw_bank.regmap[reg]) & mask; } +#ifdef CONFIG_SOC_IMX28 +static inline void imx28_ci_writel(u32 val, volatile void __iomem *addr) +{ + __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr)); +} +#else +static inline void imx28_ci_writel(u32 val, volatile void __iomem *addr) +{ +} +#endif + +static inline void __hw_write(struct ci_hdrc *ci, u32 val, + void __iomem *addr) +{ + if (ci->imx28_write_fix) + imx28_ci_writel(val, addr); + else + iowrite32(val, addr); +} + /** * hw_write: writes to a hw register * @reg: register index @@ -266,7 +288,7 @@ data = (ioread32(ci->hw_bank.regmap[reg]) & ~mask) | (data & mask); - iowrite32(data, ci->hw_bank.regmap[reg]); + __hw_write(ci, data, ci->hw_bank.regmap[reg]); } /** @@ -281,7 +303,7 @@ { u32 val = ioread32(ci->hw_bank.regmap[reg]) & mask; - iowrite32(val, ci->hw_bank.regmap[reg]); + __hw_write(ci, val, ci->hw_bank.regmap[reg]); return val; } --- linux-3.13.0.orig/drivers/usb/chipidea/ci_hdrc_imx.c +++ linux-3.13.0/drivers/usb/chipidea/ci_hdrc_imx.c @@ -23,6 +23,26 @@ #include "ci.h" #include "ci_hdrc_imx.h" +#define CI_HDRC_IMX_IMX28_WRITE_FIX BIT(0) + +struct ci_hdrc_imx_platform_flag { + unsigned int flags; +}; + +static const struct ci_hdrc_imx_platform_flag imx27_usb_data = { +}; + +static const struct ci_hdrc_imx_platform_flag imx28_usb_data = { + .flags = CI_HDRC_IMX_IMX28_WRITE_FIX, +}; + +static const struct of_device_id ci_hdrc_imx_dt_ids[] = { + { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data}, + { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); + struct ci_hdrc_imx_data { struct usb_phy *phy; struct platform_device *ci_pdev; @@ -82,6 +102,9 @@ CI_HDRC_DISABLE_STREAMING, }; int ret; + const struct of_device_id *of_id = + of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); + const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) { @@ -115,6 +138,9 @@ pdata.phy = data->phy; + if (imx_platform_flag->flags & CI_HDRC_IMX_IMX28_WRITE_FIX) + pdata.flags |= CI_HDRC_IMX28_WRITE_FIX; + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) goto err_clk; @@ -173,12 +199,6 @@ return 0; } -static const struct of_device_id ci_hdrc_imx_dt_ids[] = { - { .compatible = "fsl,imx27-usb", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); - static struct platform_driver ci_hdrc_imx_driver = { .probe = ci_hdrc_imx_probe, .remove = ci_hdrc_imx_remove, --- linux-3.13.0.orig/drivers/usb/chipidea/core.c +++ linux-3.13.0/drivers/usb/chipidea/core.c @@ -554,6 +554,8 @@ ci->dev = dev; ci->platdata = dev->platform_data; + ci->imx28_write_fix = !!(ci->platdata->flags & + CI_HDRC_IMX28_WRITE_FIX); ret = hw_device_init(ci, base); if (ret < 0) { --- linux-3.13.0.orig/drivers/usb/chipidea/host.c +++ linux-3.13.0/drivers/usb/chipidea/host.c @@ -65,6 +65,7 @@ ehci->caps = ci->hw_bank.cap; ehci->has_hostpc = ci->hw_bank.lpm; ehci->has_tdi_phy_lpm = ci->hw_bank.lpm; + ehci->imx28_write_fix = ci->imx28_write_fix; if (ci->platdata->reg_vbus) { ret = regulator_enable(ci->platdata->reg_vbus); --- linux-3.13.0.orig/drivers/usb/chipidea/otg.h +++ linux-3.13.0/drivers/usb/chipidea/otg.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Freescale Semiconductor, Inc. + * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. * * Author: Peter Chen * @@ -19,12 +19,12 @@ static inline void ci_enable_otg_interrupt(struct ci_hdrc *ci, u32 bits) { - hw_write(ci, OP_OTGSC, bits, bits); + hw_write(ci, OP_OTGSC, bits | OTGSC_INT_STATUS_BITS, bits); } static inline void ci_disable_otg_interrupt(struct ci_hdrc *ci, u32 bits) { - hw_write(ci, OP_OTGSC, bits, 0); + hw_write(ci, OP_OTGSC, bits | OTGSC_INT_STATUS_BITS, 0); } int ci_hdrc_otg_init(struct ci_hdrc *ci); --- linux-3.13.0.orig/drivers/usb/chipidea/udc.c +++ linux-3.13.0/drivers/usb/chipidea/udc.c @@ -105,7 +105,7 @@ do { /* flush any pending transfer */ - hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n)); + hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n)); while (hw_read(ci, OP_ENDPTFLUSH, BIT(n))) cpu_relax(); } while (hw_read(ci, OP_ENDPTSTAT, BIT(n))); @@ -205,7 +205,7 @@ if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num))) return -EAGAIN; - hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n)); + hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n)); while (hw_read(ci, OP_ENDPTPRIME, BIT(n))) cpu_relax(); @@ -393,6 +393,14 @@ node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES)); node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES); node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE); + if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) { + u32 mul = hwreq->req.length / hwep->ep.maxpacket; + + if (hwreq->req.length == 0 + || hwreq->req.length % hwep->ep.maxpacket) + mul++; + node->ptr->token |= mul << __ffs(TD_MULTO); + } temp = (u32) (hwreq->req.dma + hwreq->req.actual); if (length) { @@ -515,10 +523,11 @@ hwep->qh.ptr->td.token &= cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE)); - if (hwep->type == USB_ENDPOINT_XFER_ISOC) { + if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) { u32 mul = hwreq->req.length / hwep->ep.maxpacket; - if (hwreq->req.length % hwep->ep.maxpacket) + if (hwreq->req.length == 0 + || hwreq->req.length % hwep->ep.maxpacket) mul++; hwep->qh.ptr->cap |= mul << __ffs(QH_MULT); } @@ -1170,9 +1179,15 @@ if (hwep->type == USB_ENDPOINT_XFER_CONTROL) cap |= QH_IOS; - if (hwep->num) - cap |= QH_ZLT; + + cap |= QH_ZLT; cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; + /* + * For ISO-TX, we set mult at QH as the largest value, and use + * MultO at TD as real mult value. + */ + if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) + cap |= 3 << __ffs(QH_MULT); hwep->qh.ptr->cap = cpu_to_le32(cap); @@ -1311,6 +1326,7 @@ struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); unsigned long flags; + struct td_node *node, *tmpnode; if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || hwep->ep.desc == NULL || list_empty(&hwreq->queue) || @@ -1321,6 +1337,12 @@ hw_ep_flush(hwep->ci, hwep->num, hwep->dir); + list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { + dma_pool_free(hwep->td_pool, node->ptr, node->dma); + list_del(&node->td); + kfree(node); + } + /* pop request */ list_del_init(&hwreq->queue); --- linux-3.13.0.orig/drivers/usb/class/cdc-acm.c +++ linux-3.13.0/drivers/usb/class/cdc-acm.c @@ -845,11 +845,12 @@ /* FIXME: Needs to clear unsupported bits in the termios */ acm->clocal = ((termios->c_cflag & CLOCAL) != 0); - if (!newline.dwDTERate) { + if (C_BAUD(tty) == B0) { newline.dwDTERate = acm->line.dwDTERate; newctrl &= ~ACM_CTRL_DTR; - } else + } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) { newctrl |= ACM_CTRL_DTR; + } if (newctrl != acm->ctrlout) acm_set_control(acm, acm->ctrlout = newctrl); @@ -942,6 +943,7 @@ unsigned long quirks; int num_rx_buf; int i; + unsigned int elength = 0; int combined_interfaces = 0; struct device *tty_dev; int rv = -ENOMEM; @@ -987,9 +989,12 @@ dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } + elength = buffer[0]; switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ + if (elength < sizeof(struct usb_cdc_union_desc)) + goto next_desc; if (union_header) { dev_err(&intf->dev, "More than one " "union descriptor, skipping ...\n"); @@ -998,31 +1003,38 @@ union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/ + if (elength < sizeof(struct usb_cdc_country_functional_desc)) + goto next_desc; cfd = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: /* maybe check version */ break; /* for now we ignore it */ case USB_CDC_ACM_TYPE: + if (elength < 4) + goto next_desc; ac_management_function = buffer[3]; break; case USB_CDC_CALL_MANAGEMENT_TYPE: + if (elength < 5) + goto next_desc; call_management_function = buffer[3]; call_interface_num = buffer[4]; if ((quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3) dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n"); break; default: - /* there are LOTS more CDC descriptors that + /* + * there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: " - "type %02x, length %d\n", - buffer[2], buffer[0]); + "type %02x, length %ud\n", + buffer[2], elength); break; } next_desc: - buflen -= buffer[0]; - buffer += buffer[0]; + buflen -= elength; + buffer += elength; } if (!union_header) { @@ -1048,10 +1060,11 @@ } else { control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0)); - if (!control_interface || !data_interface) { - dev_dbg(&intf->dev, "no interfaces\n"); - return -ENODEV; - } + } + + if (!control_interface || !data_interface) { + dev_dbg(&intf->dev, "no interfaces\n"); + return -ENODEV; } if (data_interface_num != call_interface_num) @@ -1325,6 +1338,7 @@ &dev_attr_wCountryCodes); device_remove_file(&acm->control->dev, &dev_attr_iCountryCodeRelDate); + kfree(acm->country_codes); } device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); alloc_fail7: @@ -1556,17 +1570,32 @@ { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, + { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ }, /* Motorola H24 HSPA module: */ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */ - { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */ - { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */ - { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */ - { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */ - { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */ - { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */ - { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */ + { USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */ + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ + }, + { USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */ + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ + }, + { USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */ + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ + }, + { USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */ + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ + }, + { USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */ + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ + }, + { USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */ + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ + }, + { USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */ + .driver_info = NO_UNION_NORMAL, /* handle only modem interface */ + }, { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on --- linux-3.13.0.orig/drivers/usb/class/cdc-wdm.c +++ linux-3.13.0/drivers/usb/class/cdc-wdm.c @@ -432,6 +432,38 @@ return rv < 0 ? rv : count; } +/* + * clear WDM_READ flag and possibly submit the read urb if resp_count + * is non-zero. + * + * Called with desc->iuspin locked + */ +static int clear_wdm_read_flag(struct wdm_device *desc) +{ + int rv = 0; + + clear_bit(WDM_READ, &desc->flags); + + /* submit read urb only if the device is waiting for it */ + if (!desc->resp_count || !--desc->resp_count) + goto out; + + set_bit(WDM_RESPONDING, &desc->flags); + spin_unlock_irq(&desc->iuspin); + rv = usb_submit_urb(desc->response, GFP_KERNEL); + spin_lock_irq(&desc->iuspin); + if (rv) { + dev_err(&desc->intf->dev, + "usb_submit_urb failed with result %d\n", rv); + + /* make sure the next notification trigger a submit */ + clear_bit(WDM_RESPONDING, &desc->flags); + desc->resp_count = 0; + } +out: + return rv; +} + static ssize_t wdm_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos) { @@ -503,8 +535,10 @@ if (!desc->reslength) { /* zero length read */ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__); - clear_bit(WDM_READ, &desc->flags); + rv = clear_wdm_read_flag(desc); spin_unlock_irq(&desc->iuspin); + if (rv < 0) + goto err; goto retry; } cntr = desc->length; @@ -526,37 +560,9 @@ desc->length -= cntr; /* in case we had outstanding data */ - if (!desc->length) { - clear_bit(WDM_READ, &desc->flags); - - if (--desc->resp_count) { - set_bit(WDM_RESPONDING, &desc->flags); - spin_unlock_irq(&desc->iuspin); - - rv = usb_submit_urb(desc->response, GFP_KERNEL); - if (rv) { - dev_err(&desc->intf->dev, - "%s: usb_submit_urb failed with result %d\n", - __func__, rv); - spin_lock_irq(&desc->iuspin); - clear_bit(WDM_RESPONDING, &desc->flags); - spin_unlock_irq(&desc->iuspin); - - if (rv == -ENOMEM) { - rv = schedule_work(&desc->rxwork); - if (rv) - dev_err(&desc->intf->dev, "Cannot schedule work\n"); - } else { - spin_lock_irq(&desc->iuspin); - desc->resp_count = 0; - spin_unlock_irq(&desc->iuspin); - } - } - } else - spin_unlock_irq(&desc->iuspin); - } else - spin_unlock_irq(&desc->iuspin); - + if (!desc->length) + clear_wdm_read_flag(desc); + spin_unlock_irq(&desc->iuspin); rv = cntr; err: --- linux-3.13.0.orig/drivers/usb/core/buffer.c +++ linux-3.13.0/drivers/usb/core/buffer.c @@ -22,17 +22,25 @@ */ /* FIXME tune these based on pool statistics ... */ -static const size_t pool_max[HCD_BUFFER_POOLS] = { - /* platforms without dma-friendly caches might need to - * prevent cacheline sharing... - */ - 32, - 128, - 512, - PAGE_SIZE / 2 - /* bigger --> allocate pages */ +static size_t pool_max[HCD_BUFFER_POOLS] = { + 32, 128, 512, 2048, }; +void __init usb_init_pool_max(void) +{ + /* + * The pool_max values must never be smaller than + * ARCH_KMALLOC_MINALIGN. + */ + if (ARCH_KMALLOC_MINALIGN <= 32) + ; /* Original value is okay */ + else if (ARCH_KMALLOC_MINALIGN <= 64) + pool_max[0] = 64; + else if (ARCH_KMALLOC_MINALIGN <= 128) + pool_max[0] = 0; /* Don't use this pool */ + else + BUILD_BUG(); /* We don't allow this */ +} /* SETUP primitives */ --- linux-3.13.0.orig/drivers/usb/core/config.c +++ linux-3.13.0/drivers/usb/core/config.c @@ -651,10 +651,6 @@ * * hub-only!! ... and only in reset path, or usb_new_device() * (used by real hubs and virtual root hubs) - * - * NOTE: if this is a WUSB device and is not authorized, we skip the - * whole thing. A non-authorized USB device has no - * configurations. */ int usb_get_configuration(struct usb_device *dev) { @@ -666,8 +662,6 @@ struct usb_config_descriptor *desc; cfgno = 0; - if (dev->authorized == 0) /* Not really an error */ - goto out_not_authorized; result = -ENOMEM; if (ncfg > USB_MAXCONFIG) { dev_warn(ddev, "too many configurations: %d, " @@ -724,6 +718,10 @@ result = -ENOMEM; goto err; } + + if (dev->quirks & USB_QUIRK_DELAY_INIT) + msleep(100); + result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, bigbuffer, length); if (result < 0) { @@ -751,7 +749,6 @@ err: kfree(desc); -out_not_authorized: dev->descriptor.bNumConfigurations = cfgno; err2: if (result == -ENOMEM) --- linux-3.13.0.orig/drivers/usb/core/devio.c +++ linux-3.13.0/drivers/usb/core/devio.c @@ -501,6 +501,7 @@ as->status = urb->status; signr = as->signr; if (signr) { + memset(&sinfo, 0, sizeof(sinfo)); sinfo.si_signo = as->signr; sinfo.si_errno = as->status; sinfo.si_code = SI_ASYNCIO; @@ -1411,7 +1412,7 @@ u = (is_in ? URB_DIR_IN : URB_DIR_OUT); if (uurb->flags & USBDEVFS_URB_ISO_ASAP) u |= URB_ISO_ASAP; - if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) + if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) u |= URB_SHORT_NOT_OK; if (uurb->flags & USBDEVFS_URB_NO_FSBR) u |= URB_NO_FSBR; @@ -2227,6 +2228,7 @@ wake_up_all(&ps->wait); list_del_init(&ps->list); if (ps->discsignr) { + memset(&sinfo, 0, sizeof(sinfo)); sinfo.si_signo = ps->discsignr; sinfo.si_errno = EPIPE; sinfo.si_code = SI_ASYNCIO; --- linux-3.13.0.orig/drivers/usb/core/driver.c +++ linux-3.13.0/drivers/usb/core/driver.c @@ -959,8 +959,7 @@ * it doesn't support pre_reset/post_reset/reset_resume or * because it doesn't support suspend/resume. * - * The caller must hold @intf's device's lock, but not its pm_mutex - * and not @intf->dev.sem. + * The caller must hold @intf's device's lock, but not @intf's lock. */ void usb_forced_unbind_intf(struct usb_interface *intf) { @@ -973,16 +972,37 @@ intf->needs_binding = 1; } +/* + * Unbind drivers for @udev's marked interfaces. These interfaces have + * the needs_binding flag set, for example by usb_resume_interface(). + * + * The caller must hold @udev's device lock. + */ +static void unbind_marked_interfaces(struct usb_device *udev) +{ + struct usb_host_config *config; + int i; + struct usb_interface *intf; + + config = udev->actconfig; + if (config) { + for (i = 0; i < config->desc.bNumInterfaces; ++i) { + intf = config->interface[i]; + if (intf->dev.driver && intf->needs_binding) + usb_forced_unbind_intf(intf); + } + } +} + /* Delayed forced unbinding of a USB interface driver and scan * for rebinding. * - * The caller must hold @intf's device's lock, but not its pm_mutex - * and not @intf->dev.sem. + * The caller must hold @intf's device's lock, but not @intf's lock. * * Note: Rebinds will be skipped if a system sleep transition is in * progress and the PM "complete" callback hasn't occurred yet. */ -void usb_rebind_intf(struct usb_interface *intf) +static void usb_rebind_intf(struct usb_interface *intf) { int rc; @@ -999,68 +1019,66 @@ } } -#ifdef CONFIG_PM - -/* Unbind drivers for @udev's interfaces that don't support suspend/resume - * There is no check for reset_resume here because it can be determined - * only during resume whether reset_resume is needed. +/* + * Rebind drivers to @udev's marked interfaces. These interfaces have + * the needs_binding flag set. * * The caller must hold @udev's device lock. */ -static void unbind_no_pm_drivers_interfaces(struct usb_device *udev) +static void rebind_marked_interfaces(struct usb_device *udev) { struct usb_host_config *config; int i; struct usb_interface *intf; - struct usb_driver *drv; config = udev->actconfig; if (config) { for (i = 0; i < config->desc.bNumInterfaces; ++i) { intf = config->interface[i]; - - if (intf->dev.driver) { - drv = to_usb_driver(intf->dev.driver); - if (!drv->suspend || !drv->resume) - usb_forced_unbind_intf(intf); - } + if (intf->needs_binding) + usb_rebind_intf(intf); } } } -/* Unbind drivers for @udev's interfaces that failed to support reset-resume. - * These interfaces have the needs_binding flag set by usb_resume_interface(). +/* + * Unbind all of @udev's marked interfaces and then rebind all of them. + * This ordering is necessary because some drivers claim several interfaces + * when they are first probed. * * The caller must hold @udev's device lock. */ -static void unbind_no_reset_resume_drivers_interfaces(struct usb_device *udev) +void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev) { - struct usb_host_config *config; - int i; - struct usb_interface *intf; - - config = udev->actconfig; - if (config) { - for (i = 0; i < config->desc.bNumInterfaces; ++i) { - intf = config->interface[i]; - if (intf->dev.driver && intf->needs_binding) - usb_forced_unbind_intf(intf); - } - } + unbind_marked_interfaces(udev); + rebind_marked_interfaces(udev); } -static void do_rebind_interfaces(struct usb_device *udev) +#ifdef CONFIG_PM + +/* Unbind drivers for @udev's interfaces that don't support suspend/resume + * There is no check for reset_resume here because it can be determined + * only during resume whether reset_resume is needed. + * + * The caller must hold @udev's device lock. + */ +static void unbind_no_pm_drivers_interfaces(struct usb_device *udev) { struct usb_host_config *config; int i; struct usb_interface *intf; + struct usb_driver *drv; config = udev->actconfig; if (config) { for (i = 0; i < config->desc.bNumInterfaces; ++i) { intf = config->interface[i]; - if (intf->needs_binding) - usb_rebind_intf(intf); + + if (intf->dev.driver) { + drv = to_usb_driver(intf->dev.driver); + if (!drv->suspend || !drv->resume) + usb_forced_unbind_intf(intf); + } } } } @@ -1389,7 +1407,7 @@ * whose needs_binding flag is set */ if (udev->state != USB_STATE_NOTATTACHED) - do_rebind_interfaces(udev); + rebind_marked_interfaces(udev); return 0; } @@ -1411,7 +1429,7 @@ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); - unbind_no_reset_resume_drivers_interfaces(udev); + unbind_marked_interfaces(udev); } /* Avoid PM error messages for devices disconnected while suspended @@ -1730,6 +1748,18 @@ dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n"); return -EOPNOTSUPP; } + + /* + * If the device is a direct child of the root hub and the HCD + * doesn't handle wakeup requests, don't allow autosuspend when + * wakeup is needed. + */ + if (w && udev->parent == udev->bus->root_hub && + bus_to_hcd(udev->bus)->cant_recv_wakeups) { + dev_dbg(&udev->dev, "HCD doesn't handle wakeup requests\n"); + return -EOPNOTSUPP; + } + udev->do_remote_wakeup = w; return 0; } @@ -1752,10 +1782,13 @@ if (status == -EAGAIN || status == -EBUSY) usb_mark_last_busy(udev); - /* The PM core reacts badly unless the return code is 0, - * -EAGAIN, or -EBUSY, so always return -EBUSY on an error. + /* + * The PM core reacts badly unless the return code is 0, + * -EAGAIN, or -EBUSY, so always return -EBUSY on an error + * (except for root hubs, because they don't suspend through + * an upstream port like other USB devices). */ - if (status != 0) + if (status != 0 && udev->parent) return -EBUSY; return status; } --- linux-3.13.0.orig/drivers/usb/core/hcd-pci.c +++ linux-3.13.0/drivers/usb/core/hcd-pci.c @@ -75,7 +75,7 @@ PCI_SLOT(companion->devfn) != slot) continue; companion_hcd = pci_get_drvdata(companion); - if (!companion_hcd) + if (!companion_hcd || !companion_hcd->self.root_hub) continue; fn(pdev, hcd, companion, companion_hcd); } --- linux-3.13.0.orig/drivers/usb/core/hcd.c +++ linux-3.13.0/drivers/usb/core/hcd.c @@ -1031,7 +1031,6 @@ dev_name(&usb_dev->dev), retval); return retval; } - usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); } retval = usb_new_device (usb_dev); @@ -1617,6 +1616,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) { struct usb_hcd *hcd; + struct usb_device *udev = urb->dev; int retval = -EIDRM; unsigned long flags; @@ -1628,20 +1628,19 @@ spin_lock_irqsave(&hcd_urb_unlink_lock, flags); if (atomic_read(&urb->use_count) > 0) { retval = 0; - usb_get_dev(urb->dev); + usb_get_dev(udev); } spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags); if (retval == 0) { hcd = bus_to_hcd(urb->dev->bus); retval = unlink1(hcd, urb, status); - usb_put_dev(urb->dev); + if (retval == 0) + retval = -EINPROGRESS; + else if (retval != -EIDRM && retval != -EBUSY) + dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", + urb, retval); + usb_put_dev(udev); } - - if (retval == 0) - retval = -EINPROGRESS; - else if (retval != -EIDRM && retval != -EBUSY) - dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n", - urb, retval); return retval; } @@ -2057,6 +2056,8 @@ return -EINVAL; if (dev->speed != USB_SPEED_SUPER) return -EINVAL; + if (dev->state < USB_STATE_CONFIGURED) + return -ENODEV; /* Streams only apply to bulk endpoints. */ for (i = 0; i < num_eps; i++) --- linux-3.13.0.orig/drivers/usb/core/hub.c +++ linux-3.13.0/drivers/usb/core/hub.c @@ -135,7 +135,7 @@ return usb_get_intfdata(hdev->actconfig->interface[0]); } -int usb_device_supports_lpm(struct usb_device *udev) +static int usb_device_supports_lpm(struct usb_device *udev) { /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. @@ -156,11 +156,6 @@ "Power management will be impacted.\n"); return 0; } - - /* udev is root hub */ - if (!udev->parent) - return 1; - if (udev->parent->lpm_capable) return 1; @@ -896,6 +891,25 @@ if (!hub_is_superspeed(hub->hdev)) return -EINVAL; + ret = hub_port_status(hub, port1, &portstatus, &portchange); + if (ret < 0) + return ret; + + /* + * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI + * Controller [1022:7814] will have spurious result making the following + * usb 3.0 device hotplugging route to the 2.0 root hub and recognized + * as high-speed device if we set the usb 3.0 port link state to + * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we + * check the state here to avoid the bug. + */ + if ((portstatus & USB_PORT_STAT_LINK_STATE) == + USB_SS_PORT_LS_RX_DETECT) { + dev_dbg(&hub->ports[port1 - 1]->dev, + "Not disabling port; link state is RxDetect\n"); + return ret; + } + ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); if (ret) return ret; @@ -1607,7 +1621,7 @@ { struct usb_hub *hub = usb_get_intfdata(intf); struct usb_device *hdev = interface_to_usbdev(intf); - int i; + int port1; /* Take the hub off the event list and don't let it be added again */ spin_lock_irq(&hub_event_lock); @@ -1622,11 +1636,15 @@ hub->error = 0; hub_quiesce(hub, HUB_DISCONNECT); - usb_set_intfdata (intf, NULL); + /* Avoid races with recursively_mark_NOTATTACHED() */ + spin_lock_irq(&device_state_lock); + port1 = hdev->maxchild; + hdev->maxchild = 0; + usb_set_intfdata(intf, NULL); + spin_unlock_irq(&device_state_lock); - for (i = 0; i < hdev->maxchild; i++) - usb_hub_remove_port_device(hub, i + 1); - hub->hdev->maxchild = 0; + for (; port1 > 0; --port1) + usb_hub_remove_port_device(hub, port1); if (hub->hdev->speed == USB_SPEED_HIGH) highspeed_hubs--; @@ -1683,11 +1701,28 @@ * - Change autosuspend delay of hub can avoid unnecessary auto * suspend timer for hub, also may decrease power consumption * of USB bus. + * + * - If user has indicated to prevent autosuspend by passing + * usbcore.autosuspend = -1 then keep autosuspend disabled. */ - pm_runtime_set_autosuspend_delay(&hdev->dev, 0); +#ifdef CONFIG_PM_RUNTIME + if (hdev->dev.power.autosuspend_delay >= 0) + pm_runtime_set_autosuspend_delay(&hdev->dev, 0); +#endif + + /* + * Hubs have proper suspend/resume support, except for root hubs + * where the controller driver doesn't have bus_suspend and + * bus_resume methods. + */ + if (hdev->parent) { /* normal device */ + usb_enable_autosuspend(hdev); + } else { /* root hub */ + const struct hc_driver *drv = bus_to_hcd(hdev->bus)->driver; - /* Hubs have proper suspend/resume support. */ - usb_enable_autosuspend(hdev); + if (drv->bus_suspend && drv->bus_resume) + usb_enable_autosuspend(hdev); + } if (hdev->level == MAX_TOPO_LEVEL) { dev_err(&intf->dev, @@ -1919,8 +1954,10 @@ || new_state == USB_STATE_SUSPENDED) ; /* No change to wakeup settings */ else if (new_state == USB_STATE_CONFIGURED) - wakeup = udev->actconfig->desc.bmAttributes - & USB_CONFIG_ATT_WAKEUP; + wakeup = (udev->quirks & + USB_QUIRK_IGNORE_REMOTE_WAKEUP) ? 0 : + udev->actconfig->desc.bmAttributes & + USB_CONFIG_ATT_WAKEUP; else wakeup = 0; } @@ -2235,17 +2272,13 @@ return err; } } - if (udev->wusb == 1 && udev->authorized == 0) { - udev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); - udev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); - udev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); - } else { - /* read the standard strings and cache them if present */ - udev->product = usb_cache_string(udev, udev->descriptor.iProduct); - udev->manufacturer = usb_cache_string(udev, - udev->descriptor.iManufacturer); - udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); - } + + /* read the standard strings and cache them if present */ + udev->product = usb_cache_string(udev, udev->descriptor.iProduct); + udev->manufacturer = usb_cache_string(udev, + udev->descriptor.iManufacturer); + udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); + err = usb_enumerate_device_otg(udev); if (err < 0) return err; @@ -2427,16 +2460,6 @@ usb_dev->authorized = 0; usb_set_configuration(usb_dev, -1); - kfree(usb_dev->product); - usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); - kfree(usb_dev->manufacturer); - usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); - kfree(usb_dev->serial); - usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); - - usb_destroy_configuration(usb_dev); - usb_dev->descriptor.bNumConfigurations = 0; - out_unauthorized: usb_unlock_device(usb_dev); return 0; @@ -2464,17 +2487,7 @@ goto error_device_descriptor; } - kfree(usb_dev->product); - usb_dev->product = NULL; - kfree(usb_dev->manufacturer); - usb_dev->manufacturer = NULL; - kfree(usb_dev->serial); - usb_dev->serial = NULL; - usb_dev->authorized = 1; - result = usb_enumerate_device(usb_dev); - if (result < 0) - goto error_enumerate; /* Choose and set the configuration. This registers the interfaces * with the driver core and lets interface drivers bind to them. */ @@ -2490,7 +2503,6 @@ } dev_info(&usb_dev->dev, "authorized to connect\n"); -error_enumerate: error_device_descriptor: usb_autosuspend_device(usb_dev); error_autoresume: @@ -3161,6 +3173,43 @@ } /* + * There are some SS USB devices which take longer time for link training. + * XHCI specs 4.19.4 says that when Link training is successful, port + * sets CSC bit to 1. So if SW reads port status before successful link + * training, then it will not find device to be present. + * USB Analyzer log with such buggy devices show that in some cases + * device switch on the RX termination after long delay of host enabling + * the VBUS. In few other cases it has been seen that device fails to + * negotiate link training in first attempt. It has been + * reported till now that few devices take as long as 2000 ms to train + * the link after host enabling its VBUS and termination. Following + * routine implements a 2000 ms timeout for link training. If in a case + * link trains before timeout, loop will exit earlier. + * + * FIXME: If a device was connected before suspend, but was removed + * while system was asleep, then the loop in the following routine will + * only exit at timeout. + * + * This routine should only be called when persist is enabled for a SS + * device. + */ +static int wait_for_ss_port_enable(struct usb_device *udev, + struct usb_hub *hub, int *port1, + u16 *portchange, u16 *portstatus) +{ + int status = 0, delay_ms = 0; + + while (delay_ms < 2000) { + if (status || *portstatus & USB_PORT_STAT_CONNECTION) + break; + msleep(20); + delay_ms += 20; + status = hub_port_status(hub, *port1, portstatus, portchange); + } + return status; +} + +/* * usb_port_resume - re-activate a suspended usb device's upstream port * @udev: device to re-activate, not a root hub * Context: must be able to sleep; device not locked; pm locks held @@ -3262,6 +3311,10 @@ clear_bit(port1, hub->busy_bits); + if (udev->persist_enabled && hub_is_superspeed(hub->hdev)) + status = wait_for_ss_port_enable(udev, hub, &port1, &portchange, + &portstatus); + status = check_port_resume_type(udev, hub, port1, status, portchange, portstatus); if (status == 0) @@ -4678,9 +4731,10 @@ hub = list_entry(tmp, struct usb_hub, event_list); kref_get(&hub->kref); + hdev = hub->hdev; + usb_get_dev(hdev); spin_unlock_irq(&hub_event_lock); - hdev = hub->hdev; hub_dev = hub->intfdev; intf = to_usb_interface(hub_dev); dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", @@ -4893,6 +4947,7 @@ usb_autopm_put_interface(intf); loop_disconnected: usb_unlock_device(hdev); + usb_put_dev(hdev); kref_put(&hub->kref, hub_release); } /* end while (1) */ @@ -5331,10 +5386,11 @@ else if (cintf->condition == USB_INTERFACE_BOUND) rebind = 1; + if (rebind) + cintf->needs_binding = 1; } - if (ret == 0 && rebind) - usb_rebind_intf(cintf); } + usb_unbind_and_rebind_marked_interfaces(udev); } usb_autosuspend_device(udev); --- linux-3.13.0.orig/drivers/usb/core/quirks.c +++ linux-3.13.0/drivers/usb/core/quirks.c @@ -44,9 +44,16 @@ /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft Wireless Laser Mouse 6000 Receiver */ + { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech HD Pro Webcams C920 and C930e */ + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech Quickcam Fusion */ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -151,6 +158,10 @@ /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, + /* ASUS Base Station(T100) */ + { USB_DEVICE(0x0b05, 0x17e0), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + { } /* terminating entry must be last */ }; --- linux-3.13.0.orig/drivers/usb/core/usb.c +++ linux-3.13.0/drivers/usb/core/usb.c @@ -1050,6 +1050,7 @@ pr_info("%s: USB support disabled\n", usbcore_name); return 0; } + usb_init_pool_max(); retval = usb_debugfs_init(); if (retval) --- linux-3.13.0.orig/drivers/usb/core/usb.h +++ linux-3.13.0/drivers/usb/core/usb.h @@ -35,7 +35,6 @@ unsigned int size); extern int usb_get_bos_descriptor(struct usb_device *dev); extern void usb_release_bos_descriptor(struct usb_device *dev); -extern int usb_device_supports_lpm(struct usb_device *udev); extern char *usb_cache_string(struct usb_device *udev, int index); extern int usb_set_configuration(struct usb_device *dev, int configuration); extern int usb_choose_configuration(struct usb_device *udev); @@ -56,7 +55,7 @@ extern int usb_match_device(struct usb_device *dev, const struct usb_device_id *id); extern void usb_forced_unbind_intf(struct usb_interface *intf); -extern void usb_rebind_intf(struct usb_interface *intf); +extern void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev); extern int usb_hub_claim_port(struct usb_device *hdev, unsigned port, struct dev_state *owner); --- linux-3.13.0.orig/drivers/usb/dwc3/core.c +++ linux-3.13.0/drivers/usb/dwc3/core.c @@ -583,12 +583,6 @@ { struct dwc3 *dwc = platform_get_drvdata(pdev); - usb_phy_set_suspend(dwc->usb2_phy, 1); - usb_phy_set_suspend(dwc->usb3_phy, 1); - - pm_runtime_put_sync(&pdev->dev); - pm_runtime_disable(&pdev->dev); - dwc3_debugfs_exit(dwc); switch (dwc->dr_mode) { @@ -609,8 +603,15 @@ dwc3_event_buffers_cleanup(dwc); dwc3_free_event_buffers(dwc); + + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + dwc3_core_exit(dwc); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; } --- linux-3.13.0.orig/drivers/usb/dwc3/core.h +++ linux-3.13.0/drivers/usb/dwc3/core.h @@ -815,15 +815,15 @@ * 12 - VndrDevTstRcved * @reserved15_12: Reserved, not used * @event_info: Information about this event - * @reserved31_24: Reserved, not used + * @reserved31_25: Reserved, not used */ struct dwc3_event_devt { u32 one_bit:1; u32 device_event:7; u32 type:4; u32 reserved15_12:4; - u32 event_info:8; - u32 reserved31_24:8; + u32 event_info:9; + u32 reserved31_25:7; } __packed; /** @@ -856,6 +856,19 @@ struct dwc3_event_gevt gevt; }; +/** + * struct dwc3_gadget_ep_cmd_params - representation of endpoint command + * parameters + * @param2: third parameter + * @param1: second parameter + * @param0: first parameter + */ +struct dwc3_gadget_ep_cmd_params { + u32 param2; + u32 param1; + u32 param0; +}; + /* * DWC3 Features to be used as Driver Data */ @@ -881,11 +894,28 @@ #if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_gadget_init(struct dwc3 *dwc); void dwc3_gadget_exit(struct dwc3 *dwc); +int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode); +int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state); +int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, + unsigned cmd, struct dwc3_gadget_ep_cmd_params *params); +int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param); #else static inline int dwc3_gadget_init(struct dwc3 *dwc) { return 0; } static inline void dwc3_gadget_exit(struct dwc3 *dwc) { } +static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode) +{ return 0; } +static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc, + enum dwc3_link_state state) +{ return 0; } + +static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, + unsigned cmd, struct dwc3_gadget_ep_cmd_params *params) +{ return 0; } +static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc, + int cmd, u32 param) +{ return 0; } #endif /* power management interface */ --- linux-3.13.0.orig/drivers/usb/dwc3/dwc3-exynos.c +++ linux-3.13.0/drivers/usb/dwc3/dwc3-exynos.c @@ -50,6 +50,7 @@ exynos->usb2_phy = pdev; pdata.type = USB_PHY_TYPE_USB2; + pdata.gpio_reset = -1; ret = platform_device_add_data(exynos->usb2_phy, &pdata, sizeof(pdata)); if (ret) --- linux-3.13.0.orig/drivers/usb/dwc3/dwc3-omap.c +++ linux-3.13.0/drivers/usb/dwc3/dwc3-omap.c @@ -215,6 +215,18 @@ omap->irq0_offset, value); } +static void dwc3_omap_write_irqmisc_clr(struct dwc3_omap *omap, u32 value) +{ + dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_MISC + + omap->irqmisc_offset, value); +} + +static void dwc3_omap_write_irq0_clr(struct dwc3_omap *omap, u32 value) +{ + dwc3_omap_writel(omap->base, USBOTGSS_IRQENABLE_CLR_0 - + omap->irq0_offset, value); +} + static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, enum omap_dwc3_vbus_id_status status) { @@ -359,9 +371,23 @@ static void dwc3_omap_disable_irqs(struct dwc3_omap *omap) { + u32 reg; + /* disable all IRQs */ - dwc3_omap_write_irqmisc_set(omap, 0x00); - dwc3_omap_write_irq0_set(omap, 0x00); + reg = USBOTGSS_IRQO_COREIRQ_ST; + dwc3_omap_write_irq0_clr(omap, reg); + + reg = (USBOTGSS_IRQMISC_OEVT | + USBOTGSS_IRQMISC_DRVVBUS_RISE | + USBOTGSS_IRQMISC_CHRGVBUS_RISE | + USBOTGSS_IRQMISC_DISCHRGVBUS_RISE | + USBOTGSS_IRQMISC_IDPULLUP_RISE | + USBOTGSS_IRQMISC_DRVVBUS_FALL | + USBOTGSS_IRQMISC_CHRGVBUS_FALL | + USBOTGSS_IRQMISC_DISCHRGVBUS_FALL | + USBOTGSS_IRQMISC_IDPULLUP_FALL); + + dwc3_omap_write_irqmisc_clr(omap, reg); } static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32); @@ -592,9 +618,9 @@ if (omap->extcon_id_dev.edev) extcon_unregister_interest(&omap->extcon_id_dev); dwc3_omap_disable_irqs(omap); + device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core); return 0; } --- linux-3.13.0.orig/drivers/usb/dwc3/dwc3-pci.c +++ linux-3.13.0/drivers/usb/dwc3/dwc3-pci.c @@ -52,6 +52,7 @@ glue->usb2_phy = pdev; pdata.type = USB_PHY_TYPE_USB2; + pdata.gpio_reset = -1; ret = platform_device_add_data(glue->usb2_phy, &pdata, sizeof(pdata)); if (ret) --- linux-3.13.0.orig/drivers/usb/dwc3/ep0.c +++ linux-3.13.0/drivers/usb/dwc3/ep0.c @@ -251,7 +251,7 @@ /* stall is always issued on EP0 */ dep = dwc->eps[0]; - __dwc3_gadget_ep_set_halt(dep, 1); + __dwc3_gadget_ep_set_halt(dep, 1, false); dep->flags = DWC3_EP_ENABLED; dwc->delayed_status = false; @@ -461,7 +461,7 @@ return -EINVAL; if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) break; - ret = __dwc3_gadget_ep_set_halt(dep, set); + ret = __dwc3_gadget_ep_set_halt(dep, set, true); if (ret) return -EINVAL; break; --- linux-3.13.0.orig/drivers/usb/dwc3/gadget.c +++ linux-3.13.0/drivers/usb/dwc3/gadget.c @@ -532,12 +532,11 @@ if (!usb_endpoint_xfer_isoc(desc)) return 0; - memset(&trb_link, 0, sizeof(trb_link)); - /* Link TRB for ISOC. The HWO bit is never reset */ trb_st_hw = &dep->trb_pool[0]; trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; + memset(trb_link, 0, sizeof(*trb_link)); trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); @@ -586,6 +585,10 @@ dwc3_remove_requests(dwc, dep); + /* make sure HW endpoint isn't stalled */ + if (dep->flags & DWC3_EP_STALL) + __dwc3_gadget_ep_set_halt(dep, 0, false); + reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); reg &= ~DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); @@ -884,8 +887,7 @@ if (i == (request->num_mapped_sgs - 1) || sg_is_last(s)) { - if (list_is_last(&req->list, - &dep->request_list)) + if (list_empty(&dep->request_list)) last_one = true; chain = false; } @@ -903,6 +905,9 @@ if (last_one) break; } + + if (last_one) + break; } else { dma = req->request.dma; length = req->request.length; @@ -1182,7 +1187,7 @@ return ret; } -int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) +int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) { struct dwc3_gadget_ep_cmd_params params; struct dwc3 *dwc = dep->dwc; @@ -1191,6 +1196,14 @@ memset(¶ms, 0x00, sizeof(params)); if (value) { + if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || + (!list_empty(&dep->req_queued) || + !list_empty(&dep->request_list)))) { + dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", + dep->name); + return -EAGAIN; + } + ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, DWC3_DEPCMD_SETSTALL, ¶ms); if (ret) @@ -1230,7 +1243,7 @@ goto out; } - ret = __dwc3_gadget_ep_set_halt(dep, value); + ret = __dwc3_gadget_ep_set_halt(dep, value, false); out: spin_unlock_irqrestore(&dwc->lock, flags); --- linux-3.13.0.orig/drivers/usb/dwc3/gadget.h +++ linux-3.13.0/drivers/usb/dwc3/gadget.h @@ -56,12 +56,6 @@ /* DEPXFERCFG parameter 0 */ #define DWC3_DEPXFERCFG_NUM_XFER_RES(n) ((n) & 0xffff) -struct dwc3_gadget_ep_cmd_params { - u32 param2; - u32 param1; - u32 param0; -}; - /* -------------------------------------------------------------------------- */ #define to_dwc3_request(r) (container_of(r, struct dwc3_request, request)) @@ -85,19 +79,13 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, int status); -int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode); -int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state); - void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event); void dwc3_ep0_out_start(struct dwc3 *dwc); int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value); int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags); -int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value); -int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, - unsigned cmd, struct dwc3_gadget_ep_cmd_params *params); -int dwc3_send_gadget_generic_command(struct dwc3 *dwc, int cmd, u32 param); +int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol); /** * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW --- linux-3.13.0.orig/drivers/usb/gadget/at91_udc.c +++ linux-3.13.0/drivers/usb/gadget/at91_udc.c @@ -871,12 +871,10 @@ return; udc->clocked = 1; - if (IS_ENABLED(CONFIG_COMMON_CLK)) { - clk_set_rate(udc->uclk, 48000000); - clk_prepare_enable(udc->uclk); - } - clk_prepare_enable(udc->iclk); - clk_prepare_enable(udc->fclk); + if (IS_ENABLED(CONFIG_COMMON_CLK)) + clk_enable(udc->uclk); + clk_enable(udc->iclk); + clk_enable(udc->fclk); } static void clk_off(struct at91_udc *udc) @@ -885,10 +883,10 @@ return; udc->clocked = 0; udc->gadget.speed = USB_SPEED_UNKNOWN; - clk_disable_unprepare(udc->fclk); - clk_disable_unprepare(udc->iclk); + clk_disable(udc->fclk); + clk_disable(udc->iclk); if (IS_ENABLED(CONFIG_COMMON_CLK)) - clk_disable_unprepare(udc->uclk); + clk_disable(udc->uclk); } /* @@ -1791,14 +1789,24 @@ } /* don't do anything until we have both gadget driver and VBUS */ + if (IS_ENABLED(CONFIG_COMMON_CLK)) { + clk_set_rate(udc->uclk, 48000000); + retval = clk_prepare(udc->uclk); + if (retval) + goto fail1; + } + retval = clk_prepare(udc->fclk); + if (retval) + goto fail1a; + retval = clk_prepare_enable(udc->iclk); if (retval) - goto fail1; + goto fail1b; at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff); /* Clear all pending interrupts - UDP may be used by bootloader. */ at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff); - clk_disable_unprepare(udc->iclk); + clk_disable(udc->iclk); /* request UDC and maybe VBUS irqs */ udc->udp_irq = platform_get_irq(pdev, 0); @@ -1806,7 +1814,7 @@ 0, driver_name, udc); if (retval < 0) { DBG("request irq %d failed\n", udc->udp_irq); - goto fail1; + goto fail1c; } if (gpio_is_valid(udc->board.vbus_pin)) { retval = gpio_request(udc->board.vbus_pin, "udc_vbus"); @@ -1859,6 +1867,13 @@ gpio_free(udc->board.vbus_pin); fail2: free_irq(udc->udp_irq, udc); +fail1c: + clk_unprepare(udc->iclk); +fail1b: + clk_unprepare(udc->fclk); +fail1a: + if (IS_ENABLED(CONFIG_COMMON_CLK)) + clk_unprepare(udc->uclk); fail1: if (IS_ENABLED(CONFIG_COMMON_CLK) && !IS_ERR(udc->uclk)) clk_put(udc->uclk); @@ -1907,6 +1922,11 @@ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); + if (IS_ENABLED(CONFIG_COMMON_CLK)) + clk_unprepare(udc->uclk); + clk_unprepare(udc->fclk); + clk_unprepare(udc->iclk); + clk_put(udc->iclk); clk_put(udc->fclk); if (IS_ENABLED(CONFIG_COMMON_CLK)) --- linux-3.13.0.orig/drivers/usb/gadget/atmel_usba_udc.c +++ linux-3.13.0/drivers/usb/gadget/atmel_usba_udc.c @@ -716,10 +716,10 @@ req->using_dma = 1; req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE - | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; + | USBA_DMA_END_BUF_EN; - if (ep->is_in) - req->ctrl |= USBA_DMA_END_BUF_EN; + if (!ep->is_in) + req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; /* * Add this request to the queue and submit for DMA if @@ -828,7 +828,7 @@ { struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; - struct usba_request *req = to_usba_req(_req); + struct usba_request *req; unsigned long flags; u32 status; @@ -837,6 +837,16 @@ spin_lock_irqsave(&udc->lock, flags); + list_for_each_entry(req, &ep->queue, queue) { + if (&req->req == _req) + break; + } + + if (&req->req != _req) { + spin_unlock_irqrestore(&udc->lock, flags); + return -EINVAL; + } + if (req->using_dma) { /* * If this request is currently being transferred, @@ -1572,7 +1582,6 @@ if ((epstatus & epctrl) & USBA_RX_BK_RDY) { DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); receive_data(ep); - usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); } } @@ -1827,12 +1836,12 @@ toggle_bias(0); usba_writel(udc, CTRL, USBA_DISABLE_MASK); - udc->driver = NULL; - clk_disable_unprepare(udc->hclk); clk_disable_unprepare(udc->pclk); - DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name); + DBG(DBG_GADGET, "unregistered driver `%s'\n", udc->driver->driver.name); + + udc->driver = NULL; return 0; } --- linux-3.13.0.orig/drivers/usb/gadget/bcm63xx_udc.c +++ linux-3.13.0/drivers/usb/gadget/bcm63xx_udc.c @@ -361,24 +361,30 @@ bcm_writel(val, udc->iudma_regs + off); } -static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off) +static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan) { - return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off); + return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off + + (ENETDMA_CHAN_WIDTH * chan)); } -static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off) +static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off, + int chan) { - bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off); + bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off + + (ENETDMA_CHAN_WIDTH * chan)); } -static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off) +static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan) { - return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off); + return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off + + (ENETDMA_CHAN_WIDTH * chan)); } -static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off) +static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off, + int chan) { - bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off); + bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off + + (ENETDMA_CHAN_WIDTH * chan)); } static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled) @@ -639,7 +645,7 @@ } while (!last_bd); usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK, - ENETDMAC_CHANCFG_REG(iudma->ch_idx)); + ENETDMAC_CHANCFG_REG, iudma->ch_idx); } /** @@ -695,9 +701,9 @@ bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num)); /* stop DMA, then wait for the hardware to wrap up */ - usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx)); + usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx); - while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) & + while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) & ENETDMAC_CHANCFG_EN_MASK) { udelay(1); @@ -714,10 +720,10 @@ dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n", ch_idx); usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK, - ENETDMAC_CHANCFG_REG(ch_idx)); + ENETDMAC_CHANCFG_REG, ch_idx); } } - usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx)); + usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx); /* don't leave "live" HW-owned entries for the next guy to step on */ for (d = iudma->bd_ring; d <= iudma->end_bd; d++) @@ -729,11 +735,11 @@ /* set up IRQs, UBUS burst size, and BD base for this channel */ usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, - ENETDMAC_IRMASK_REG(ch_idx)); - usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx)); + ENETDMAC_IRMASK_REG, ch_idx); + usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx); - usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx)); - usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx)); + usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx); + usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx); } /** @@ -2036,7 +2042,7 @@ spin_lock(&udc->lock); usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK, - ENETDMAC_IR_REG(iudma->ch_idx)); + ENETDMAC_IR_REG, iudma->ch_idx); bep = iudma->bep; rc = iudma_read(udc, iudma); @@ -2176,18 +2182,18 @@ seq_printf(s, " [ep%d]:\n", max_t(int, iudma_defaults[ch_idx].ep_num, 0)); seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n", - usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)), - usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)), - usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)), - usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx))); + usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx), + usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx), + usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx), + usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx)); - sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx)); - sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx)); + sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx); + sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx); seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n", - usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)), + usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx), sram2 >> 16, sram2 & 0xffff, sram3 >> 16, sram3 & 0xffff, - usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx))); + usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx)); seq_printf(s, " desc: %d/%d used", iudma->n_bds_used, iudma->n_bds); --- linux-3.13.0.orig/drivers/usb/gadget/f_acm.c +++ linux-3.13.0/drivers/usb/gadget/f_acm.c @@ -430,11 +430,12 @@ if (acm->notify->driver_data) { VDBG(cdev, "reset acm control interface %d\n", intf); usb_ep_disable(acm->notify); - } else { - VDBG(cdev, "init acm ctrl interface %d\n", intf); + } + + if (!acm->notify->desc) if (config_ep_by_speed(cdev->gadget, f, acm->notify)) return -EINVAL; - } + usb_ep_enable(acm->notify); acm->notify->driver_data = acm; --- linux-3.13.0.orig/drivers/usb/gadget/f_fs.c +++ linux-3.13.0/drivers/usb/gadget/f_fs.c @@ -1389,11 +1389,13 @@ ffs->ep0req->context = ffs; lang = ffs->stringtabs; - for (lang = ffs->stringtabs; *lang; ++lang) { - struct usb_string *str = (*lang)->strings; - int id = first_id; - for (; str->s; ++id, ++str) - str->id = id; + if (lang) { + for (; *lang; ++lang) { + struct usb_string *str = (*lang)->strings; + int id = first_id; + for (; str->s; ++id, ++str) + str->id = id; + } } ffs->gadget = cdev->gadget; --- linux-3.13.0.orig/drivers/usb/gadget/fusb300_udc.h +++ linux-3.13.0/drivers/usb/gadget/fusb300_udc.h @@ -12,7 +12,7 @@ #ifndef __FUSB300_UDC_H__ -#define __FUSB300_UDC_H_ +#define __FUSB300_UDC_H__ #include --- linux-3.13.0.orig/drivers/usb/gadget/tcm_usb_gadget.c +++ linux-3.13.0/drivers/usb/gadget/tcm_usb_gadget.c @@ -1613,7 +1613,7 @@ return ERR_PTR(-ENOMEM); } tport->tport_wwpn = wwpn; - snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name); + snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name); return &tport->tport_wwn; } --- linux-3.13.0.orig/drivers/usb/gadget/u_serial.c +++ linux-3.13.0/drivers/usb/gadget/u_serial.c @@ -549,8 +549,8 @@ port->read_started--; } - /* Push from tty to ldisc; without low_latency set this is handled by - * a workqueue, so we won't get callbacks and can hold port_lock + /* Push from tty to ldisc; this is handled by a workqueue, + * so we won't get callbacks and can hold port_lock */ if (do_push) tty_flip_buffer_push(&port->port); --- linux-3.13.0.orig/drivers/usb/gadget/udc-core.c +++ linux-3.13.0/drivers/usb/gadget/udc-core.c @@ -456,6 +456,11 @@ { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); + if (!udc->driver) { + dev_err(dev, "soft-connect without a gadget driver\n"); + return -EOPNOTSUPP; + } + if (sysfs_streq(buf, "connect")) { usb_gadget_udc_start(udc->gadget, udc->driver); usb_gadget_connect(udc->gadget); --- linux-3.13.0.orig/drivers/usb/gadget/zero.c +++ linux-3.13.0/drivers/usb/gadget/zero.c @@ -300,7 +300,7 @@ ss_opts->isoc_interval = gzero_options.isoc_interval; ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; ss_opts->isoc_mult = gzero_options.isoc_mult; - ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket; + ss_opts->isoc_maxburst = gzero_options.isoc_maxburst; ss_opts->bulk_buflen = gzero_options.bulk_buflen; func_ss = usb_get_function(func_inst_ss); --- linux-3.13.0.orig/drivers/usb/host/ehci-fsl.c +++ linux-3.13.0/drivers/usb/host/ehci-fsl.c @@ -261,7 +261,8 @@ break; } - if (pdata->have_sysif_regs && pdata->controller_ver && + if (pdata->have_sysif_regs && + pdata->controller_ver > FSL_USB_VER_1_6 && (phy_mode == FSL_USB2_PHY_ULPI)) { /* check PHY_CLK_VALID to get phy clk valid */ if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & --- linux-3.13.0.orig/drivers/usb/host/ehci-hcd.c +++ linux-3.13.0/drivers/usb/host/ehci-hcd.c @@ -686,8 +686,15 @@ struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 status, masked_status, pcd_status = 0, cmd; int bh; + unsigned long flags; - spin_lock (&ehci->lock); + /* + * For threadirqs option we use spin_lock_irqsave() variant to prevent + * deadlock with ehci hrtimer callback, because hrtimer callbacks run + * in interrupt context even when threadirqs is specified. We can go + * back to spin_lock() variant when hrtimer callbacks become threaded. + */ + spin_lock_irqsave(&ehci->lock, flags); status = ehci_readl(ehci, &ehci->regs->status); @@ -705,7 +712,7 @@ /* Shared IRQ? */ if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { - spin_unlock(&ehci->lock); + spin_unlock_irqrestore(&ehci->lock, flags); return IRQ_NONE; } @@ -823,7 +830,7 @@ if (bh) ehci_work (ehci); - spin_unlock (&ehci->lock); + spin_unlock_irqrestore(&ehci->lock, flags); if (pcd_status) usb_hcd_poll_rh_status(hcd); return IRQ_HANDLED; @@ -966,8 +973,6 @@ } qh->exception = 1; - if (ehci->rh_state < EHCI_RH_RUNNING) - qh->qh_state = QH_STATE_IDLE; switch (qh->qh_state) { case QH_STATE_LINKED: WARN_ON(!list_empty(&qh->qtd_list)); --- linux-3.13.0.orig/drivers/usb/host/ehci-hub.c +++ linux-3.13.0/drivers/usb/host/ehci-hub.c @@ -238,6 +238,7 @@ int port; int mask; int changed; + bool fs_idle_delay; ehci_dbg(ehci, "suspend root hub\n"); @@ -272,6 +273,7 @@ ehci->bus_suspended = 0; ehci->owned_ports = 0; changed = 0; + fs_idle_delay = false; port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *reg = &ehci->regs->port_status [port]; @@ -300,16 +302,32 @@ } if (t1 != t2) { + /* + * On some controllers, Wake-On-Disconnect will + * generate false wakeup signals until the bus + * switches over to full-speed idle. For their + * sake, add a delay if we need one. + */ + if ((t2 & PORT_WKDISC_E) && + ehci_port_speed(ehci, t2) == + USB_PORT_STAT_HIGH_SPEED) + fs_idle_delay = true; ehci_writel(ehci, t2, reg); changed = 1; } } + spin_unlock_irq(&ehci->lock); + + if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) { + /* + * Wait for HCD to enter low-power mode or for the bus + * to switch to full-speed idle. + */ + usleep_range(5000, 5500); + } if (changed && ehci->has_tdi_phy_lpm) { - spin_unlock_irq(&ehci->lock); - msleep(5); /* 5 ms for HCD to enter low-power mode */ spin_lock_irq(&ehci->lock); - port = HCS_N_PORTS(ehci->hcs_params); while (port--) { u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; @@ -322,8 +340,8 @@ port, (t3 & HOSTPC_PHCD) ? "succeeded" : "failed"); } + spin_unlock_irq(&ehci->lock); } - spin_unlock_irq(&ehci->lock); /* Apparently some devices need a >= 1-uframe delay here */ if (ehci->bus_suspended) @@ -1223,7 +1241,7 @@ if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) { spin_unlock_irqrestore(&ehci->lock, flags); retval = ehset_single_step_set_feature(hcd, - wIndex); + wIndex + 1); spin_lock_irqsave(&ehci->lock, flags); break; } --- linux-3.13.0.orig/drivers/usb/host/ehci-pci.c +++ linux-3.13.0/drivers/usb/host/ehci-pci.c @@ -35,6 +35,21 @@ #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 /*-------------------------------------------------------------------------*/ +#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939 +static inline bool is_intel_quark_x1000(struct pci_dev *pdev) +{ + return pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC; +} + +/* + * 0x84 is the offset of in/out threshold register, + * and it is the same offset as the register of 'hostpc'. + */ +#define intel_quark_x1000_insnreg01 hostpc + +/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */ +#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f /* called after powerup, by probe or system-pm "wakeup" */ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) @@ -50,6 +65,16 @@ if (!retval) ehci_dbg(ehci, "MWI active\n"); + /* Reset the threshold limit */ + if (is_intel_quark_x1000(pdev)) { + /* + * For the Intel QUARK X1000, raise the I/O threshold to the + * maximum usable value in order to improve performance. + */ + ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD, + ehci->regs->intel_quark_x1000_insnreg01); + } + return 0; } --- linux-3.13.0.orig/drivers/usb/host/ehci-sched.c +++ linux-3.13.0/drivers/usb/host/ehci-sched.c @@ -1581,6 +1581,10 @@ else next = (now + 2 + 7) & ~0x07; /* full frame cache */ + /* If needed, initialize last_iso_frame so that this URB will be seen */ + if (ehci->isoc_count == 0) + ehci->last_iso_frame = now >> 3; + /* * Use ehci->last_iso_frame as the base. There can't be any * TDs scheduled for earlier than that. @@ -1671,10 +1675,6 @@ urb->start_frame = start & (mod - 1); if (!stream->highspeed) urb->start_frame >>= 3; - - /* Make sure scan_isoc() sees these */ - if (ehci->isoc_count == 0) - ehci->last_iso_frame = now >> 3; return status; fail: --- linux-3.13.0.orig/drivers/usb/host/ehci.h +++ linux-3.13.0/drivers/usb/host/ehci.h @@ -225,6 +225,7 @@ unsigned has_synopsys_hc_bug:1; /* Synopsys HC */ unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */ unsigned need_oc_pp_cycle:1; /* MPC834X port power */ + unsigned imx28_write_fix:1; /* For Freescale i.MX28 */ /* required for usb32 quirk */ #define OHCI_CTRL_HCFS (3 << 6) @@ -728,6 +729,18 @@ #endif } +#ifdef CONFIG_SOC_IMX28 +static inline void imx28_ehci_writel(const unsigned int val, + volatile __u32 __iomem *addr) +{ + __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr)); +} +#else +static inline void imx28_ehci_writel(const unsigned int val, + volatile __u32 __iomem *addr) +{ +} +#endif static inline void ehci_writel(const struct ehci_hcd *ehci, const unsigned int val, __u32 __iomem *regs) { @@ -736,7 +749,10 @@ writel_be(val, regs) : writel(val, regs); #else - writel(val, regs); + if (ehci->imx28_write_fix) + imx28_ehci_writel(val, regs); + else + writel(val, regs); #endif } --- linux-3.13.0.orig/drivers/usb/host/isp1760-hcd.c +++ linux-3.13.0/drivers/usb/host/isp1760-hcd.c @@ -2247,6 +2247,9 @@ hcd->rsrc_start = res_start; hcd->rsrc_len = res_len; + /* This driver doesn't support wakeup requests */ + hcd->cant_recv_wakeups = 1; + ret = usb_add_hcd(hcd, irq, irqflags); if (ret) goto err_unmap; --- linux-3.13.0.orig/drivers/usb/host/ohci-dbg.c +++ linux-3.13.0/drivers/usb/host/ohci-dbg.c @@ -289,7 +289,7 @@ } } -static void ohci_dump (struct ohci_hcd *controller, int verbose) +static void ohci_dump(struct ohci_hcd *controller) { ohci_dbg (controller, "OHCI controller state\n"); @@ -408,7 +408,7 @@ } #else -static inline void ohci_dump (struct ohci_hcd *controller, int verbose) {} +static inline void ohci_dump (struct ohci_hcd *controller) {} #undef OHCI_VERBOSE_DEBUG @@ -531,15 +531,16 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf) { struct ohci_hcd *ohci; - size_t temp; + size_t temp, size; unsigned long flags; ohci = buf->ohci; + size = PAGE_SIZE; /* display control and bulk lists together, for simplicity */ spin_lock_irqsave (&ohci->lock, flags); - temp = show_list(ohci, buf->page, buf->count, ohci->ed_controltail); - temp += show_list(ohci, buf->page + temp, buf->count - temp, + temp = show_list(ohci, buf->page, size, ohci->ed_controltail); + temp += show_list(ohci, buf->page + temp, size - temp, ohci->ed_bulktail); spin_unlock_irqrestore (&ohci->lock, flags); --- linux-3.13.0.orig/drivers/usb/host/ohci-hcd.c +++ linux-3.13.0/drivers/usb/host/ohci-hcd.c @@ -78,8 +78,8 @@ #include "ohci.h" #include "pci-quirks.h" -static void ohci_dump (struct ohci_hcd *ohci, int verbose); -static void ohci_stop (struct usb_hcd *hcd); +static void ohci_dump(struct ohci_hcd *ohci); +static void ohci_stop(struct usb_hcd *hcd); #include "ohci-hub.c" #include "ohci-dbg.c" @@ -754,7 +754,7 @@ ohci->ed_to_check = NULL; } - ohci_dump (ohci, 1); + ohci_dump(ohci); return 0; } @@ -835,7 +835,7 @@ usb_hc_died(hcd); } - ohci_dump (ohci, 1); + ohci_dump(ohci); ohci_usb_reset (ohci); } @@ -935,7 +935,7 @@ { struct ohci_hcd *ohci = hcd_to_ohci (hcd); - ohci_dump (ohci, 1); + ohci_dump(ohci); if (quirk_nec(ohci)) flush_work(&ohci->nec_work); --- linux-3.13.0.orig/drivers/usb/host/ohci-hub.c +++ linux-3.13.0/drivers/usb/host/ohci-hub.c @@ -90,6 +90,24 @@ dl_done_list (ohci); finish_unlinks (ohci, ohci_frame_no(ohci)); + /* + * Some controllers don't handle "global" suspend properly if + * there are unsuspended ports. For these controllers, put all + * the enabled ports into suspend before suspending the root hub. + */ + if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) { + __hc32 __iomem *portstat = ohci->regs->roothub.portstatus; + int i; + unsigned temp; + + for (i = 0; i < ohci->num_ports; (++i, ++portstat)) { + temp = ohci_readl(ohci, portstat); + if ((temp & (RH_PS_PES | RH_PS_PSS)) == + RH_PS_PES) + ohci_writel(ohci, RH_PS_PSS, portstat); + } + } + /* maybe resume can wake root hub */ if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) { ohci->hc_control |= OHCI_CTRL_RWE; --- linux-3.13.0.orig/drivers/usb/host/ohci-pci.c +++ linux-3.13.0/drivers/usb/host/ohci-pci.c @@ -160,6 +160,7 @@ ohci_dbg(ohci, "enabled AMD prefetch quirk\n"); } + ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND; return 0; } --- linux-3.13.0.orig/drivers/usb/host/ohci-q.c +++ linux-3.13.0/drivers/usb/host/ohci-q.c @@ -315,8 +315,7 @@ * - ED_OPER: when there's any request queued, the ED gets rescheduled * immediately. HC should be working on them. * - * - ED_IDLE: when there's no TD queue. there's no reason for the HC - * to care about this ED; safe to disable the endpoint. + * - ED_IDLE: when there's no TD queue or the HC isn't running. * * When finish_unlinks() runs later, after SOF interrupt, it will often * complete one or more URB unlinks before making that state change. @@ -930,6 +929,10 @@ int completed, modified; __hc32 *prev; + /* Is this ED already invisible to the hardware? */ + if (ed->state == ED_IDLE) + goto ed_idle; + /* only take off EDs that the HC isn't using, accounting for * frame counter wraps and EDs with partially retired TDs */ @@ -959,12 +962,20 @@ } } + /* ED's now officially unlinked, hc doesn't see */ + ed->state = ED_IDLE; + if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) + ohci->eds_scheduled--; + ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); + ed->hwNextED = 0; + wmb(); + ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); +ed_idle: + /* reentrancy: if we drop the schedule lock, someone might * have modified this list. normally it's just prepending * entries (which we'd ignore), but paranoia won't hurt. */ - *last = ed->ed_next; - ed->ed_next = NULL; modified = 0; /* unlink urbs as requested, but rescan the list after @@ -1022,19 +1033,20 @@ if (completed && !list_empty (&ed->td_list)) goto rescan_this; - /* ED's now officially unlinked, hc doesn't see */ - ed->state = ED_IDLE; - if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) - ohci->eds_scheduled--; - ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); - ed->hwNextED = 0; - wmb (); - ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE); - - /* but if there's work queued, reschedule */ - if (!list_empty (&ed->td_list)) { - if (ohci->rh_state == OHCI_RH_RUNNING) - ed_schedule (ohci, ed); + /* + * If no TDs are queued, take ED off the ed_rm_list. + * Otherwise, if the HC is running, reschedule. + * If not, leave it on the list for further dequeues. + */ + if (list_empty(&ed->td_list)) { + *last = ed->ed_next; + ed->ed_next = NULL; + } else if (ohci->rh_state == OHCI_RH_RUNNING) { + *last = ed->ed_next; + ed->ed_next = NULL; + ed_schedule(ohci, ed); + } else { + last = &ed->ed_next; } if (modified) --- linux-3.13.0.orig/drivers/usb/host/ohci.h +++ linux-3.13.0/drivers/usb/host/ohci.h @@ -405,6 +405,8 @@ #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ +#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */ + // there are also chip quirks/bugs in init logic struct work_struct nec_work; /* Worker for NEC quirk */ --- linux-3.13.0.orig/drivers/usb/host/pci-quirks.c +++ linux-3.13.0/drivers/usb/host/pci-quirks.c @@ -572,7 +572,8 @@ { void __iomem *base; u32 control; - u32 fminterval; + u32 fminterval = 0; + bool no_fminterval = false; int cnt; if (!mmio_resource_enabled(pdev, 0)) @@ -582,6 +583,13 @@ if (base == NULL) return; + /* + * ULi M5237 OHCI controller locks the whole system when accessing + * the OHCI_FMINTERVAL offset. + */ + if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237) + no_fminterval = true; + control = readl(base + OHCI_CONTROL); /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ @@ -620,7 +628,9 @@ } /* software reset of the controller, preserving HcFmInterval */ - fminterval = readl(base + OHCI_FMINTERVAL); + if (!no_fminterval) + fminterval = readl(base + OHCI_FMINTERVAL); + writel(OHCI_HCR, base + OHCI_CMDSTATUS); /* reset requires max 10 us delay */ @@ -629,7 +639,9 @@ break; udelay(1); } - writel(fminterval, base + OHCI_FMINTERVAL); + + if (!no_fminterval) + writel(fminterval, base + OHCI_FMINTERVAL); /* Now the controller is safely in SUSPEND and nothing can wake it up */ iounmap(base); @@ -657,6 +669,14 @@ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), }, }, + { + /* HASEE E200 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"), + DMI_MATCH(DMI_BOARD_NAME, "E210"), + DMI_MATCH(DMI_BIOS_VERSION, "6.00"), + }, + }, { } }; @@ -666,9 +686,14 @@ { int try_handoff = 1, tried_handoff = 0; - /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying - * the handoff on its unused controller. Skip it. */ - if (pdev->vendor == 0x8086 && pdev->device == 0x283a) { + /* + * The Pegatron Lucid tablet sporadically waits for 98 seconds trying + * the handoff on its unused controller. Skip it. + * + * The HASEE E200 hangs when the semaphore is set (bugzilla #77021). + */ + if (pdev->vendor == 0x8086 && (pdev->device == 0x283a || + pdev->device == 0x27cc)) { if (dmi_check_system(ehci_dmi_nohandoff_table)) try_handoff = 0; } --- linux-3.13.0.orig/drivers/usb/host/xhci-hub.c +++ linux-3.13.0/drivers/usb/host/xhci-hub.c @@ -383,6 +383,10 @@ status = PORT_PLC; port_change_bit = "link state"; break; + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: + status = PORT_CEC; + port_change_bit = "config error"; + break; default: /* Should never happen */ return; @@ -470,7 +474,8 @@ } /* Updates Link Status for super Speed port */ -static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) +static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, + u32 *status, u32 status_reg) { u32 pls = status_reg & PORT_PLS_MASK; @@ -509,7 +514,8 @@ * in which sometimes the port enters compliance mode * caused by a delay on the host-device negotiation. */ - if (pls == USB_SS_PORT_LS_COMP_MOD) + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (pls == USB_SS_PORT_LS_COMP_MOD)) pls |= USB_PORT_STAT_CONNECTION; } @@ -582,6 +588,8 @@ status |= USB_PORT_STAT_C_LINK_STATE << 16; if ((raw_port_status & PORT_WRC)) status |= USB_PORT_STAT_C_BH_RESET << 16; + if ((raw_port_status & PORT_CEC)) + status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; } if (hcd->speed != HCD_USB3) { @@ -669,7 +677,7 @@ } /* Update Port Link State */ if (hcd->speed == HCD_USB3) { - xhci_hub_report_usb3_link_state(&status, raw_port_status); + xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status); /* * Verify if all USB3 Ports Have entered U0 already. * Delete Compliance Mode Timer if so. @@ -1000,6 +1008,7 @@ case USB_PORT_FEAT_C_OVER_CURRENT: case USB_PORT_FEAT_C_ENABLE: case USB_PORT_FEAT_C_PORT_LINK_STATE: + case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: xhci_clear_port_change_bit(xhci, wValue, wIndex, port_array[wIndex], temp); break; @@ -1065,7 +1074,7 @@ */ status = bus_state->resuming_ports; - mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC; + mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; spin_lock_irqsave(&xhci->lock, flags); /* For each port, did anything change? If so, set that bit in buf. */ --- linux-3.13.0.orig/drivers/usb/host/xhci-mem.c +++ linux-3.13.0/drivers/usb/host/xhci-mem.c @@ -1711,6 +1711,7 @@ if (xhci->lpm_command) xhci_free_command(xhci, xhci->lpm_command); + xhci->lpm_command = NULL; xhci->cmd_ring_reserved_trbs = 0; if (xhci->cmd_ring) xhci_ring_free(xhci, xhci->cmd_ring); @@ -1722,6 +1723,16 @@ kfree(cur_cd); } + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + for (i = 0; i < num_ports && xhci->rh_bw; i++) { + struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; + for (j = 0; j < XHCI_MAX_INTERVAL; j++) { + struct list_head *ep = &bwt->interval_bw[j].endpoints; + while (!list_empty(ep)) + list_del_init(ep->next); + } + } + for (i = 1; i < MAX_HC_SLOTS; ++i) xhci_free_virt_device(xhci, i); @@ -1757,16 +1768,6 @@ if (!xhci->rh_bw) goto no_bw; - num_ports = HCS_MAX_PORTS(xhci->hcs_params1); - for (i = 0; i < num_ports; i++) { - struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; - for (j = 0; j < XHCI_MAX_INTERVAL; j++) { - struct list_head *ep = &bwt->interval_bw[j].endpoints; - while (!list_empty(ep)) - list_del_init(ep->next); - } - } - for (i = 0; i < num_ports; i++) { struct xhci_tt_bw_info *tt, *n; list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) { --- linux-3.13.0.orig/drivers/usb/host/xhci-pci.c +++ linux-3.13.0/drivers/usb/host/xhci-pci.c @@ -37,6 +37,9 @@ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 +#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 +#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f +#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f static const char hcd_name[] = "xhci_hcd"; @@ -101,9 +104,14 @@ /* AMD PLL quirk */ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; + + if (pdev->vendor == PCI_VENDOR_ID_AMD) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; + xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { @@ -119,21 +127,16 @@ * PPT chipsets. */ xhci->quirks |= XHCI_SPURIOUS_REBOOT; - xhci->quirks |= XHCI_AVOID_BEI; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && - (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) { - /* Workaround for occasional spurious wakeups from S5 (or - * any other sleep) on Haswell machines with LPT and LPT-LP - * with the new Intel BIOS - */ - /* Limit the quirk to only known vendors, as this triggers - * yet another BIOS bug on some other machines - * https://bugzilla.kernel.org/show_bug.cgi?id=66171 - */ - if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) - xhci->quirks |= XHCI_SPURIOUS_WAKEUP; + pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { + xhci->quirks |= XHCI_SPURIOUS_REBOOT; + } + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { + xhci->quirks |= XHCI_PME_STUCK_QUIRK; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { @@ -142,10 +145,30 @@ "QUIRK: Resetting on resume"); xhci->quirks |= XHCI_TRUST_TX_LENGTH; } + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && + pdev->device == 0x0015 && + pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG && + pdev->subsystem_device == 0xc0cd) + xhci->quirks |= XHCI_RESET_ON_RESUME; if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; } +/* + * Make sure PME works on some Intel xHCI controllers by writing 1 to clear + * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 + */ +static void xhci_pme_quirk(struct xhci_hcd *xhci) +{ + u32 val; + void __iomem *reg; + + reg = (void __iomem *) xhci->cap_regs + 0x80a4; + val = readl(reg); + writel(val | BIT(28), reg); + readl(reg); +} + /* called during probe() after chip reset completes */ static int xhci_pci_setup(struct usb_hcd *hcd) { @@ -185,6 +208,10 @@ struct usb_hcd *hcd; driver = (struct hc_driver *)id->driver_data; + + /* Prevent runtime suspending between USB-2 and USB-3 initialization */ + pm_runtime_get_noresume(&dev->dev); + /* Register the USB 2.0 roothub. * FIXME: USB core must know to register the USB 2.0 roothub first. * This is sort of silly, because we could just set the HCD driver flags @@ -194,7 +221,7 @@ retval = usb_hcd_pci_probe(dev, id); if (retval) - return retval; + goto put_runtime_pm; /* USB 2.0 roothub is stored in the PCI device now. */ hcd = dev_get_drvdata(&dev->dev); @@ -223,12 +250,17 @@ if (xhci->quirks & XHCI_LPM_SUPPORT) hcd_to_bus(xhci->shared_hcd)->root_hub->lpm_capable = 1; + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ + pm_runtime_put_noidle(&dev->dev); + return 0; put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); dealloc_usb2_hcd: usb_hcd_pci_remove(dev); +put_runtime_pm: + pm_runtime_put_noidle(&dev->dev); return retval; } @@ -263,7 +295,10 @@ if (xhci_compliance_mode_recovery_timer_quirk_check()) pdev->no_d3cold = true; - return xhci_suspend(xhci); + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) + xhci_pme_quirk(xhci); + + return xhci_suspend(xhci, do_wakeup); } static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) @@ -293,6 +328,9 @@ if (pdev->vendor == PCI_VENDOR_ID_INTEL) usb_enable_intel_xhci_ports(pdev); + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) + xhci_pme_quirk(xhci); + retval = xhci_resume(xhci, hibernated); return retval; } --- linux-3.13.0.orig/drivers/usb/host/xhci-plat.c +++ linux-3.13.0/drivers/usb/host/xhci-plat.c @@ -203,7 +203,15 @@ struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); - return xhci_suspend(xhci); + /* + * xhci_suspend() needs `do_wakeup` to know whether host is allowed + * to do wakeup during suspend. Since xhci_plat_suspend is currently + * only designed for system suspend, device_may_wakeup() is enough + * to dertermine whether host is allowed to do wakeup. Need to + * reconsider this when xhci_plat_suspend enlarges its scope, e.g., + * also applies to runtime suspend. + */ + return xhci_suspend(xhci, device_may_wakeup(dev)); } static int xhci_plat_resume(struct device *dev) --- linux-3.13.0.orig/drivers/usb/host/xhci-ring.c +++ linux-3.13.0/drivers/usb/host/xhci-ring.c @@ -552,10 +552,14 @@ struct xhci_dequeue_state *state) { struct xhci_virt_device *dev = xhci->devs[slot_id]; + struct xhci_virt_ep *ep = &dev->eps[ep_index]; struct xhci_ring *ep_ring; - struct xhci_generic_trb *trb; - struct xhci_ep_ctx *ep_ctx; + struct xhci_segment *new_seg; + union xhci_trb *new_deq; dma_addr_t addr; + u64 hw_dequeue; + bool cycle_found = false; + bool td_last_trb_found = false; ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, ep_index, stream_id); @@ -565,56 +569,65 @@ stream_id); return; } - state->new_cycle_state = 0; - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Finding segment containing stopped TRB."); - state->new_deq_seg = find_trb_seg(cur_td->start_seg, - dev->eps[ep_index].stopped_trb, - &state->new_cycle_state); - if (!state->new_deq_seg) { - WARN_ON(1); - return; - } /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Finding endpoint context"); - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); - state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); - - state->new_deq_ptr = cur_td->last_trb; - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "Finding segment containing last TRB in TD."); - state->new_deq_seg = find_trb_seg(state->new_deq_seg, - state->new_deq_ptr, - &state->new_cycle_state); - if (!state->new_deq_seg) { - WARN_ON(1); - return; + /* 4.6.9 the css flag is written to the stream context for streams */ + if (ep->ep_state & EP_HAS_STREAMS) { + struct xhci_stream_ctx *ctx = + &ep->stream_info->stream_ctx_array[stream_id]; + hw_dequeue = le64_to_cpu(ctx->stream_ring); + } else { + struct xhci_ep_ctx *ep_ctx + = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + hw_dequeue = le64_to_cpu(ep_ctx->deq); } - trb = &state->new_deq_ptr->generic; - if (TRB_TYPE_LINK_LE32(trb->field[3]) && - (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) - state->new_cycle_state ^= 0x1; - next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); + new_seg = ep_ring->deq_seg; + new_deq = ep_ring->dequeue; + state->new_cycle_state = hw_dequeue & 0x1; /* - * If there is only one segment in a ring, find_trb_seg()'s while loop - * will not run, and it will return before it has a chance to see if it - * needs to toggle the cycle bit. It can't tell if the stalled transfer - * ended just before the link TRB on a one-segment ring, or if the TD - * wrapped around the top of the ring, because it doesn't have the TD in - * question. Look for the one-segment case where stalled TRB's address - * is greater than the new dequeue pointer address. - */ - if (ep_ring->first_seg == ep_ring->first_seg->next && - state->new_deq_ptr < dev->eps[ep_index].stopped_trb) - state->new_cycle_state ^= 0x1; + * We want to find the pointer, segment and cycle state of the new trb + * (the one after current TD's last_trb). We know the cycle state at + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are + * found. + */ + do { + if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) + == (dma_addr_t)(hw_dequeue & ~0xf)) { + cycle_found = true; + if (td_last_trb_found) + break; + } + if (new_deq == cur_td->last_trb) + td_last_trb_found = true; + + if (cycle_found && + TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) && + new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE)) + state->new_cycle_state ^= 0x1; + + next_trb(xhci, ep_ring, &new_seg, &new_deq); + + /* Search wrapped around, bail out */ + if (new_deq == ep->ring->dequeue) { + xhci_err(xhci, "Error: Failed finding new dequeue state\n"); + state->new_deq_seg = NULL; + state->new_deq_ptr = NULL; + return; + } + + } while (!cycle_found || !td_last_trb_found); + + state->new_deq_seg = new_seg; + state->new_deq_ptr = new_deq; + + /* Don't update the ring cycle state for the producer (us). */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Cycle state = 0x%x", state->new_cycle_state); - /* Don't update the ring cycle state for the producer (us). */ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "New dequeue segment = %p (virtual)", state->new_deq_seg); @@ -797,7 +810,6 @@ if (list_empty(&ep->cancelled_td_list)) { xhci_stop_watchdog_timer_in_irq(xhci, ep); ep->stopped_td = NULL; - ep->stopped_trb = NULL; ring_doorbell_for_active_rings(xhci, slot_id, ep_index); return; } @@ -865,11 +877,9 @@ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } - /* Clear stopped_td and stopped_trb if endpoint is not halted */ - if (!(ep->ep_state & EP_HALTED)) { + /* Clear stopped_td if endpoint is not halted */ + if (!(ep->ep_state & EP_HALTED)) ep->stopped_td = NULL; - ep->stopped_trb = NULL; - } /* * Drop the lock and complete the URBs in the cancelled TD list. @@ -1187,9 +1197,8 @@ false); xhci_ring_cmd_db(xhci); } else { - /* Clear our internal halted state and restart the ring(s) */ + /* Clear our internal halted state */ xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } } @@ -1585,8 +1594,11 @@ xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_RESET_DEV: - WARN_ON(slot_id != TRB_TO_SLOT_ID( - le32_to_cpu(cmd_trb->generic.field[3]))); + /* SLOT_ID field in reset device cmd completion event TRB is 0. + * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) + */ + slot_id = TRB_TO_SLOT_ID( + le32_to_cpu(cmd_trb->generic.field[3])); xhci_handle_cmd_reset_dev(xhci, slot_id, event); break; case TRB_NEC_GET_FW: @@ -1914,14 +1926,12 @@ struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; ep->ep_state |= EP_HALTED; ep->stopped_td = td; - ep->stopped_trb = event_trb; ep->stopped_stream = stream_id; xhci_queue_reset_ep(xhci, slot_id, ep_index); xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); ep->stopped_td = NULL; - ep->stopped_trb = NULL; ep->stopped_stream = 0; xhci_ring_cmd_db(xhci); @@ -2003,26 +2013,15 @@ * the ring dequeue pointer or take this TD off any lists yet. */ ep->stopped_td = td; - ep->stopped_trb = event_trb; return 0; } else { - if (trb_comp_code == COMP_STALL) { - /* The transfer is completed from the driver's - * perspective, but we need to issue a set dequeue - * command for this stalled endpoint to move the dequeue - * pointer past the TD. We can't do that here because - * the halt condition must be cleared first. Let the - * USB class driver clear the stall later. - */ - ep->stopped_td = td; - ep->stopped_trb = event_trb; - ep->stopped_stream = ep_ring->stream_id; - } else if (xhci_requires_manual_halt_cleanup(xhci, - ep_ctx, trb_comp_code)) { - /* Other types of errors halt the endpoint, but the - * class driver doesn't call usb_reset_endpoint() unless - * the error is -EPIPE. Clear the halted status in the - * xHCI hardware manually. + if (trb_comp_code == COMP_STALL || + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, + trb_comp_code)) { + /* Issue a reset endpoint command to clear the host side + * halt, followed by a set dequeue command to move the + * dequeue pointer past the TD. + * The class driver clears the device side halt later. */ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, ep_ring->stream_id, @@ -2142,9 +2141,7 @@ else td->urb->actual_length = 0; - xhci_cleanup_halted_endpoint(xhci, - slot_id, ep_index, 0, td, event_trb); - return finish_td(xhci, td, event_trb, event, ep, status, true); + return finish_td(xhci, td, event_trb, event, ep, status, false); } /* * Did we transfer any data, despite the errors that might have @@ -2153,7 +2150,7 @@ if (event_trb != ep_ring->dequeue) { /* The event was for the status stage */ if (event_trb == td->last_trb) { - if (td->urb->actual_length != 0) { + if (td->urb_length_set) { /* Don't overwrite a previously set error code */ if ((*status == -EINPROGRESS || *status == 0) && @@ -2167,7 +2164,13 @@ td->urb->transfer_buffer_length; } } else { - /* Maybe the event was for the data stage? */ + /* + * Maybe the event was for the data stage? If so, update + * already the actual_length of the URB and flag it as + * set, so that it is not overwritten in the event for + * the last TRB. + */ + td->urb_length_set = true; td->urb->actual_length = td->urb->transfer_buffer_length - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -2620,7 +2623,8 @@ * last TRB of the previous TD. The command completion handle * will take care the rest. */ - if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { + if (!event_seg && (trb_comp_code == COMP_STOP || + trb_comp_code == COMP_STOP_INVAL)) { ret = 0; goto cleanup; } @@ -2697,17 +2701,8 @@ if (ret) { urb = td->urb; urb_priv = urb->hcpriv; - /* Leave the TD around for the reset endpoint function - * to use(but only if it's not a control endpoint, - * since we already queued the Set TR dequeue pointer - * command for stalled control endpoints). - */ - if (usb_endpoint_xfer_control(&urb->ep->desc) || - (trb_comp_code != COMP_STALL && - trb_comp_code != COMP_BABBLE)) - xhci_urb_free_priv(xhci, urb_priv); - else - kfree(urb_priv); + + xhci_urb_free_priv(xhci, urb_priv); usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); if ((urb->actual_length != urb->transfer_buffer_length && @@ -2973,58 +2968,8 @@ } while (1) { - if (room_on_ring(xhci, ep_ring, num_trbs)) { - union xhci_trb *trb = ep_ring->enqueue; - unsigned int usable = ep_ring->enq_seg->trbs + - TRBS_PER_SEGMENT - 1 - trb; - u32 nop_cmd; - - /* - * Section 4.11.7.1 TD Fragments states that a link - * TRB must only occur at the boundary between - * data bursts (eg 512 bytes for 480M). - * While it is possible to split a large fragment - * we don't know the size yet. - * Simplest solution is to fill the trb before the - * LINK with nop commands. - */ - if (num_trbs == 1 || num_trbs <= usable || usable == 0) - break; - - if (ep_ring->type != TYPE_BULK) - /* - * While isoc transfers might have a buffer that - * crosses a 64k boundary it is unlikely. - * Since we can't add NOPs without generating - * gaps in the traffic just hope it never - * happens at the end of the ring. - * This could be fixed by writing a LINK TRB - * instead of the first NOP - however the - * TRB_TYPE_LINK_LE32() calls would all need - * changing to check the ring length. - */ - break; - - if (num_trbs >= TRBS_PER_SEGMENT) { - xhci_err(xhci, "Too many fragments %d, max %d\n", - num_trbs, TRBS_PER_SEGMENT - 1); - return -ENOMEM; - } - - nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) | - ep_ring->cycle_state); - ep_ring->num_trbs_free -= usable; - do { - trb->generic.field[0] = 0; - trb->generic.field[1] = 0; - trb->generic.field[2] = 0; - trb->generic.field[3] = nop_cmd; - trb++; - } while (--usable); - ep_ring->enqueue = trb; - if (room_on_ring(xhci, ep_ring, num_trbs)) - break; - } + if (room_on_ring(xhci, ep_ring, num_trbs)) + break; if (ep_ring == xhci->cmd_ring) { xhci_err(xhci, "Do not support expand command ring\n"); @@ -3724,7 +3669,7 @@ return 0; max_burst = urb->ep->ss_ep_comp.bMaxBurst; - return roundup(total_packet_count, max_burst + 1) - 1; + return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; } /* --- linux-3.13.0.orig/drivers/usb/host/xhci.c +++ linux-3.13.0/drivers/usb/host/xhci.c @@ -35,6 +35,8 @@ #define DRIVER_AUTHOR "Sarah Sharp" #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" +#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) + /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ static int link_quirk; module_param(link_quirk, int, S_IRUGO | S_IWUSR); @@ -321,6 +323,9 @@ struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + if (xhci->quirks & XHCI_PLAT) + return; + xhci_free_irq(xhci); if (xhci->msix_entries) { @@ -837,13 +842,47 @@ xhci_set_cmd_ring_deq(xhci); } +static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) +{ + int port_index; + __le32 __iomem **port_array; + unsigned long flags; + u32 t1, t2; + + spin_lock_irqsave(&xhci->lock, flags); + + /* disble usb3 ports Wake bits*/ + port_index = xhci->num_usb3_ports; + port_array = xhci->usb3_ports; + while (port_index--) { + t1 = readl(port_array[port_index]); + t1 = xhci_port_state_to_neutral(t1); + t2 = t1 & ~PORT_WAKE_BITS; + if (t1 != t2) + writel(t2, port_array[port_index]); + } + + /* disble usb2 ports Wake bits*/ + port_index = xhci->num_usb2_ports; + port_array = xhci->usb2_ports; + while (port_index--) { + t1 = readl(port_array[port_index]); + t1 = xhci_port_state_to_neutral(t1); + t2 = t1 & ~PORT_WAKE_BITS; + if (t1 != t2) + writel(t2, port_array[port_index]); + } + + spin_unlock_irqrestore(&xhci->lock, flags); +} + /* * Stop HC (not bus-specific) * * This is called when the machine transition into S3/S4 mode. * */ -int xhci_suspend(struct xhci_hcd *xhci) +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) { int rc = 0; unsigned int delay = XHCI_MAX_HALT_USEC; @@ -854,6 +893,10 @@ xhci->shared_hcd->state != HC_STATE_SUSPENDED) return -EINVAL; + /* Clear root port wake on bits if wakeup not allowed. */ + if (!do_wakeup) + xhci_disable_port_wake_on_bits(xhci); + /* Don't poll the roothubs on bus suspend. */ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); @@ -923,7 +966,7 @@ */ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { - u32 command, temp = 0; + u32 command, temp = 0, status; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct usb_hcd *secondary_hcd; int retval = 0; @@ -1042,8 +1085,12 @@ done: if (retval == 0) { - usb_hcd_resume_root_hub(hcd); - usb_hcd_resume_root_hub(xhci->shared_hcd); + /* Resume root hubs only when have pending events. */ + status = readl(&xhci->op_regs->status); + if (status & STS_EINT) { + usb_hcd_resume_root_hub(hcd); + usb_hcd_resume_root_hub(xhci->shared_hcd); + } } /* @@ -2855,6 +2902,9 @@ ep_index, ep->stopped_stream, ep->stopped_td, &deq_state); + if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) + return; + /* HW with the reset endpoint quirk will use the saved dequeue state to * issue a configure endpoint command later. */ @@ -2877,64 +2927,32 @@ } } -/* Deal with stalled endpoints. The core should have sent the control message - * to clear the halt condition. However, we need to make the xHCI hardware - * reset its sequence number, since a device will expect a sequence number of - * zero after the halt condition is cleared. +/* Called when clearing halted device. The core should have sent the control + * message to clear the device halt condition. The host side of the halt should + * already be cleared with a reset endpoint command issued when the STALL tx + * event was received. + * * Context: in_interrupt */ + void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { struct xhci_hcd *xhci; - struct usb_device *udev; - unsigned int ep_index; - unsigned long flags; - int ret; - struct xhci_virt_ep *virt_ep; xhci = hcd_to_xhci(hcd); - udev = (struct usb_device *) ep->hcpriv; - /* Called with a root hub endpoint (or an endpoint that wasn't added - * with xhci_add_endpoint() - */ - if (!ep->hcpriv) - return; - ep_index = xhci_get_endpoint_index(&ep->desc); - virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; - if (!virt_ep->stopped_td) { - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Endpoint 0x%x not halted, refusing to reset.", - ep->desc.bEndpointAddress); - return; - } - if (usb_endpoint_xfer_control(&ep->desc)) { - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Control endpoint stall already handled."); - return; - } - - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, - "Queueing reset endpoint command"); - spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); /* - * Can't change the ring dequeue pointer until it's transitioned to the - * stopped state, which is only upon a successful reset endpoint - * command. Better hope that last command worked! + * We might need to implement the config ep cmd in xhci 4.8.1 note: + * The Reset Endpoint Command may only be issued to endpoints in the + * Halted state. If software wishes reset the Data Toggle or Sequence + * Number of an endpoint that isn't in the Halted state, then software + * may issue a Configure Endpoint Command with the Drop and Add bits set + * for the target endpoint. that is in the Stopped state. */ - if (!ret) { - xhci_cleanup_stalled_ring(xhci, udev, ep_index); - kfree(virt_ep->stopped_td); - xhci_ring_cmd_db(xhci); - } - virt_ep->stopped_td = NULL; - virt_ep->stopped_trb = NULL; - virt_ep->stopped_stream = 0; - spin_unlock_irqrestore(&xhci->lock, flags); - if (ret) - xhci_warn(xhci, "FIXME allocate a new ring segment\n"); + /* For now just print debug to follow the situation */ + xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", + ep->desc.bEndpointAddress); } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, @@ -3908,13 +3926,21 @@ int ret; spin_lock_irqsave(&xhci->lock, flags); - if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) { + + virt_dev = xhci->devs[udev->slot_id]; + + /* + * virt_dev might not exists yet if xHC resumed from hibernate (S4) and + * xHC was re-initialized. Exit latency will be set later after + * hub_port_finish_reset() is done and xhci->devs[] are re-allocated + */ + + if (!virt_dev || max_exit_latency == virt_dev->current_mel) { spin_unlock_irqrestore(&xhci->lock, flags); return 0; } /* Attempt to issue an Evaluate Context command to change the MEL. */ - virt_dev = xhci->devs[udev->slot_id]; command = xhci->lpm_command; ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); if (!ctrl_ctx) { --- linux-3.13.0.orig/drivers/usb/host/xhci.h +++ linux-3.13.0/drivers/usb/host/xhci.h @@ -1,3 +1,4 @@ + /* * xHCI host controller driver * @@ -88,9 +89,10 @@ #define HCS_IST(p) (((p) >> 0) & 0xf) /* bits 4:7, max number of Event Ring segments */ #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) +/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */ /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ -/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ -#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f) +/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */ +#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f)) /* HCSPARAMS3 - hcs_params3 - bitmasks */ /* bits 0:7, Max U1 to U0 latency for the roothub ports */ @@ -864,8 +866,6 @@ #define EP_GETTING_NO_STREAMS (1 << 5) /* ---- Related to URB cancellation ---- */ struct list_head cancelled_td_list; - /* The TRB that was last reported in a stopped endpoint ring */ - union xhci_trb *stopped_trb; struct xhci_td *stopped_td; unsigned int stopped_stream; /* Watchdog timer for stop endpoint command to cancel URBs */ @@ -1283,6 +1283,8 @@ struct xhci_segment *start_seg; union xhci_trb *first_trb; union xhci_trb *last_trb; + /* actual_length of the URB has already been set */ + bool urb_length_set; }; /* xHCI command default timeout value */ @@ -1549,6 +1551,7 @@ #define XHCI_PLAT (1 << 16) #define XHCI_SLOW_SUSPEND (1 << 17) #define XHCI_SPURIOUS_WAKEUP (1 << 18) +#define XHCI_PME_STUCK_QUIRK (1 << 20) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1767,7 +1770,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); #ifdef CONFIG_PM -int xhci_suspend(struct xhci_hcd *xhci); +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); int xhci_resume(struct xhci_hcd *xhci, bool hibernated); #else #define xhci_suspend NULL --- linux-3.13.0.orig/drivers/usb/misc/sisusbvga/sisusb.c +++ linux-3.13.0/drivers/usb/misc/sisusbvga/sisusb.c @@ -3248,6 +3248,7 @@ { USB_DEVICE(0x0711, 0x0918) }, { USB_DEVICE(0x0711, 0x0920) }, { USB_DEVICE(0x0711, 0x0950) }, + { USB_DEVICE(0x0711, 0x5200) }, { USB_DEVICE(0x182d, 0x021c) }, { USB_DEVICE(0x182d, 0x0269) }, { } --- linux-3.13.0.orig/drivers/usb/misc/usbtest.c +++ linux-3.13.0/drivers/usb/misc/usbtest.c @@ -1303,6 +1303,11 @@ urb->context = &completion; urb->complete = unlink1_callback; + if (usb_pipeout(urb->pipe)) { + simple_fill_buf(urb); + urb->transfer_flags |= URB_ZERO_PACKET; + } + /* keep the endpoint busy. there are lots of hc/hcd-internal * states, and testing should get to all of them over time. * @@ -1433,6 +1438,11 @@ unlink_queued_callback, &ctx); ctx.urbs[i]->transfer_dma = buf_dma; ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; + + if (usb_pipeout(ctx.urbs[i]->pipe)) { + simple_fill_buf(ctx.urbs[i]); + ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET; + } } /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */ --- linux-3.13.0.orig/drivers/usb/musb/Kconfig +++ linux-3.13.0/drivers/usb/musb/Kconfig @@ -74,7 +74,7 @@ config USB_MUSB_OMAP2PLUS tristate "OMAP2430 and onwards" - depends on ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS && USB select GENERIC_PHY config USB_MUSB_AM35X --- linux-3.13.0.orig/drivers/usb/musb/musb_am335x.c +++ linux-3.13.0/drivers/usb/musb/musb_am335x.c @@ -20,21 +20,6 @@ return ret; } -static int of_remove_populated_child(struct device *dev, void *d) -{ - struct platform_device *pdev = to_platform_device(dev); - - of_device_unregister(pdev); - return 0; -} - -static int am335x_child_remove(struct platform_device *pdev) -{ - device_for_each_child(&pdev->dev, NULL, of_remove_populated_child); - pm_runtime_disable(&pdev->dev); - return 0; -} - static const struct of_device_id am335x_child_of_match[] = { { .compatible = "ti,am33xx-usb" }, { }, @@ -43,13 +28,17 @@ static struct platform_driver am335x_child_driver = { .probe = am335x_child_probe, - .remove = am335x_child_remove, .driver = { .name = "am335x-usb-childs", .of_match_table = am335x_child_of_match, }, }; -module_platform_driver(am335x_child_driver); +static int __init am335x_child_init(void) +{ + return platform_driver_register(&am335x_child_driver); +} +module_init(am335x_child_init); + MODULE_DESCRIPTION("AM33xx child devices"); MODULE_LICENSE("GPL v2"); --- linux-3.13.0.orig/drivers/usb/musb/musb_core.c +++ linux-3.13.0/drivers/usb/musb/musb_core.c @@ -439,7 +439,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, u8 devctl) { - struct usb_otg *otg = musb->xceiv->otg; irqreturn_t handled = IRQ_NONE; dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl, @@ -654,7 +653,7 @@ break; case OTG_STATE_B_PERIPHERAL: musb_g_suspend(musb); - musb->is_active = otg->gadget->b_hnp_enable; + musb->is_active = musb->g.b_hnp_enable; if (musb->is_active) { musb->xceiv->state = OTG_STATE_B_WAIT_ACON; dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n"); @@ -670,7 +669,7 @@ break; case OTG_STATE_A_HOST: musb->xceiv->state = OTG_STATE_A_SUSPEND; - musb->is_active = otg->host->b_hnp_enable; + musb->is_active = musb->hcd->self.b_hnp_enable; break; case OTG_STATE_B_HOST: /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ --- linux-3.13.0.orig/drivers/usb/musb/musb_cppi41.c +++ linux-3.13.0/drivers/usb/musb/musb_cppi41.c @@ -190,7 +190,8 @@ } } - if (!list_empty(&controller->early_tx_list)) { + if (!list_empty(&controller->early_tx_list) && + !hrtimer_is_queued(&controller->early_tx)) { ret = HRTIMER_RESTART; hrtimer_forward_now(&controller->early_tx, ktime_set(0, 150 * NSEC_PER_USEC)); @@ -266,7 +267,7 @@ } list_add_tail(&cppi41_channel->tx_check, &controller->early_tx_list); - if (!hrtimer_active(&controller->early_tx)) { + if (!hrtimer_is_queued(&controller->early_tx)) { hrtimer_start_range_ns(&controller->early_tx, ktime_set(0, 140 * NSEC_PER_USEC), 40 * NSEC_PER_USEC, --- linux-3.13.0.orig/drivers/usb/musb/musb_host.c +++ linux-3.13.0/drivers/usb/musb/musb_host.c @@ -2631,7 +2631,6 @@ if (musb->port_mode == MUSB_PORT_MODE_GADGET) return; usb_remove_hcd(musb->hcd); - musb->hcd = NULL; } void musb_host_free(struct musb *musb) --- linux-3.13.0.orig/drivers/usb/musb/ux500.c +++ linux-3.13.0/drivers/usb/musb/ux500.c @@ -275,7 +275,6 @@ musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &pdev->dev.coherent_dma_mask; musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; - musb->dev.of_node = pdev->dev.of_node; glue->dev = &pdev->dev; glue->musb = musb; --- linux-3.13.0.orig/drivers/usb/phy/phy-fsl-usb.c +++ linux-3.13.0/drivers/usb/phy/phy-fsl-usb.c @@ -76,7 +76,7 @@ .otg_port = 1, }; -#ifdef CONFIG_PPC32 +#ifdef CONFIG_PPC static u32 _fsl_readl_be(const unsigned __iomem *p) { return in_be32(p); @@ -106,7 +106,7 @@ #else #define fsl_readl(addr) readl(addr) #define fsl_writel(val, addr) writel(val, addr) -#endif /* CONFIG_PPC32 */ +#endif /* CONFIG_PPC */ /* Routines to access transceiver ULPI registers */ u8 view_ulpi(u8 addr) @@ -914,6 +914,7 @@ if (pdata->init && pdata->init(pdev) != 0) return -EINVAL; +#ifdef CONFIG_PPC if (pdata->big_endian_mmio) { _fsl_readl = _fsl_readl_be; _fsl_writel = _fsl_writel_be; @@ -921,6 +922,7 @@ _fsl_readl = _fsl_readl_le; _fsl_writel = _fsl_writel_le; } +#endif /* request irq */ p_otg->irq = platform_get_irq(pdev, 0); --- linux-3.13.0.orig/drivers/usb/phy/phy-tegra-usb.c +++ linux-3.13.0/drivers/usb/phy/phy-tegra-usb.c @@ -881,8 +881,8 @@ return -ENOMEM; } - tegra_phy->config = devm_kzalloc(&pdev->dev, - sizeof(*tegra_phy->config), GFP_KERNEL); + tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config), + GFP_KERNEL); if (!tegra_phy->config) { dev_err(&pdev->dev, "unable to allocate memory for USB UTMIP config\n"); --- linux-3.13.0.orig/drivers/usb/phy/phy-ulpi.c +++ linux-3.13.0/drivers/usb/phy/phy-ulpi.c @@ -47,6 +47,8 @@ static struct ulpi_info ulpi_ids[] = { ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"), ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"), + ULPI_INFO(ULPI_ID(0x0424, 0x0007), "SMSC USB3320"), + ULPI_INFO(ULPI_ID(0x0451, 0x1507), "TI TUSB1210"), }; static int ulpi_set_otg_flags(struct usb_phy *phy) --- linux-3.13.0.orig/drivers/usb/renesas_usbhs/mod_gadget.c +++ linux-3.13.0/drivers/usb/renesas_usbhs/mod_gadget.c @@ -600,6 +600,10 @@ static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); + struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + + if (!pipe) + return -EINVAL; usbhsg_pipe_disable(uep); --- linux-3.13.0.orig/drivers/usb/serial/bus.c +++ linux-3.13.0/drivers/usb/serial/bus.c @@ -51,6 +51,7 @@ { struct usb_serial_driver *driver; struct usb_serial_port *port; + struct device *tty_dev; int retval = 0; int minor; @@ -75,12 +76,20 @@ retval = device_create_file(dev, &dev_attr_port_number); if (retval) { if (driver->port_remove) - retval = driver->port_remove(port); + driver->port_remove(port); goto exit_with_autopm; } minor = port->minor; - tty_register_device(usb_serial_tty_driver, minor, dev); + tty_dev = tty_register_device(usb_serial_tty_driver, minor, dev); + if (IS_ERR(tty_dev)) { + retval = PTR_ERR(tty_dev); + device_remove_file(dev, &dev_attr_port_number); + if (driver->port_remove) + driver->port_remove(port); + goto exit_with_autopm; + } + dev_info(&port->serial->dev->dev, "%s converter now attached to ttyUSB%d\n", driver->description, minor); --- linux-3.13.0.orig/drivers/usb/serial/console.c +++ linux-3.13.0/drivers/usb/serial/console.c @@ -47,6 +47,8 @@ * ------------------------------------------------------------ */ +static const struct tty_operations usb_console_fake_tty_ops = { +}; /* * The parsing of the command line works exactly like the @@ -139,14 +141,18 @@ goto reset_open_count; } kref_init(&tty->kref); - tty_port_tty_set(&port->port, tty); tty->driver = usb_serial_tty_driver; tty->index = co->index; + init_ldsem(&tty->ldisc_sem); + INIT_LIST_HEAD(&tty->tty_files); + kref_get(&tty->driver->kref); + tty->ops = &usb_console_fake_tty_ops; if (tty_init_termios(tty)) { retval = -ENOMEM; dev_err(&port->dev, "no more memory\n"); - goto free_tty; + goto put_tty; } + tty_port_tty_set(&port->port, tty); } /* only call the device specific open if this @@ -164,7 +170,7 @@ serial->type->set_termios(tty, port, &dummy); tty_port_tty_set(&port->port, NULL); - kfree(tty); + tty_kref_put(tty); } set_bit(ASYNCB_INITIALIZED, &port->port.flags); } @@ -180,8 +186,8 @@ fail: tty_port_tty_set(&port->port, NULL); - free_tty: - kfree(tty); + put_tty: + tty_kref_put(tty); reset_open_count: port->port.count = 0; usb_autopm_put_interface(serial->interface); --- linux-3.13.0.orig/drivers/usb/serial/cp210x.c +++ linux-3.13.0/drivers/usb/serial/cp210x.c @@ -56,6 +56,7 @@ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ + { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ @@ -104,6 +105,7 @@ { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ @@ -119,8 +121,12 @@ { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */ + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ @@ -141,6 +147,8 @@ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */ + { USB_DEVICE(0x16C0, 0x09B0) }, /* Lunatico Seletek */ + { USB_DEVICE(0x16C0, 0x09B1) }, /* Lunatico Seletek */ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ @@ -152,7 +160,10 @@ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ + { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ { USB_DEVICE(0x1FB9, 0x0100) }, /* Lake Shore Model 121 Current Source */ --- linux-3.13.0.orig/drivers/usb/serial/cypress_m8.h +++ linux-3.13.0/drivers/usb/serial/cypress_m8.h @@ -63,7 +63,7 @@ #define UART_DSR 0x20 /* data set ready - flow control - device to host */ #define CONTROL_RTS 0x10 /* request to send - flow control - host to device */ #define UART_CTS 0x10 /* clear to send - flow control - device to host */ -#define UART_RI 0x10 /* ring indicator - modem - device to host */ +#define UART_RI 0x80 /* ring indicator - modem - device to host */ #define UART_CD 0x40 /* carrier detect - modem - device to host */ #define CYP_ERROR 0x08 /* received from input report - device to host */ /* Note - the below has nothing to do with the "feature report" reset */ --- linux-3.13.0.orig/drivers/usb/serial/ftdi_sio.c +++ linux-3.13.0/drivers/usb/serial/ftdi_sio.c @@ -146,13 +146,16 @@ * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report. */ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) }, @@ -192,6 +195,8 @@ { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, + { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) }, { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, @@ -479,6 +484,39 @@ { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) }, + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, @@ -578,6 +616,9 @@ { USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TIAO_UMPA_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, /* * ELV devices: */ @@ -669,6 +710,10 @@ { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, @@ -716,7 +761,8 @@ { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, - { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_1_PID) }, + { USB_DEVICE(TESTO_VID, TESTO_3_PID) }, { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, @@ -733,6 +779,7 @@ { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, + { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, @@ -767,6 +814,8 @@ { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, + { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), @@ -905,6 +954,64 @@ /* Crucible Devices */ { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) }, { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) }, + /* Cressi Devices */ + { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) }, + /* Brainboxes Devices */ + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, + /* ekey Devices */ + { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, + /* Infineon Devices */ + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, + /* GE Healthcare devices */ + { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, + /* Active Research (Actisense) devices */ + { USB_DEVICE(FTDI_VID, ACTISENSE_NDC_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_NMEA2000_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ETHERNET_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_WIFI_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, { } /* Terminating entry */ }; @@ -1527,14 +1634,17 @@ struct usb_device *udev = serial->dev; struct usb_interface *interface = serial->interface; - struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; + struct usb_endpoint_descriptor *ep_desc; unsigned num_endpoints; - int i; + unsigned i; num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); + if (!num_endpoints) + return; + /* NOTE: some customers have programmed FT232R/FT245R devices * with an endpoint size of 0 - not good. In this case, we * want to override the endpoint descriptor setting and use a @@ -1796,8 +1906,12 @@ { struct usb_device *udev = serial->dev; - if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || - (udev->product && !strcmp(udev->product, "BeagleBone/XDS100V2"))) + if (udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) + return ftdi_jtag_probe(serial); + + if (udev->product && + (!strcmp(udev->product, "BeagleBone/XDS100V2") || + !strcmp(udev->product, "SNAP Connect E10"))) return ftdi_jtag_probe(serial); return 0; @@ -2124,10 +2238,20 @@ } /* - * All FTDI UART chips are limited to CS7/8. We won't pretend to + * All FTDI UART chips are limited to CS7/8. We shouldn't pretend to * support CS5/6 and revert the CSIZE setting instead. + * + * CS5 however is used to control some smartcard readers which abuse + * this limitation to switch modes. Original FTDI chips fall back to + * eight data bits. + * + * TODO: Implement a quirk to only allow this with mentioned + * readers. One I know of (Argolis Smartreader V1) + * returns "USB smartcard server" as iInterface string. + * The vendor didn't bother with a custom VID/PID of + * course. */ - if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) { + if (C_CSIZE(tty) == CS6) { dev_warn(ddev, "requested CSIZE setting not supported\n"); termios->c_cflag &= ~CSIZE; @@ -2174,6 +2298,9 @@ urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE; } switch (cflag & CSIZE) { + case CS5: + dev_dbg(ddev, "Setting CS5 quirk\n"); + break; case CS7: urb_value |= 7; dev_dbg(ddev, "Setting CS7\n"); --- linux-3.13.0.orig/drivers/usb/serial/ftdi_sio_ids.h +++ linux-3.13.0/drivers/usb/serial/ftdi_sio_ids.h @@ -30,8 +30,17 @@ /*** third-party PIDs (using FTDI_VID) ***/ +/* + * Certain versions of the official Windows FTDI driver reprogrammed + * counterfeit FTDI devices to PID 0. Support these devices anyway. + */ +#define FTDI_BRICK_PID 0x0000 + #define FTDI_LUMEL_PD12_PID 0x6002 +/* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */ +#define CYBER_CORTEX_AV_PID 0x8698 + /* * Marvell OpenRD Base, Client * http://www.open-rd.org @@ -42,6 +51,8 @@ /* www.candapter.com Ewert Energy Systems CANdapter device */ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ +#define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */ + /* * Texas Instruments XDS100v2 JTAG / BeagleBone A3 * http://processors.wiki.ti.com/index.php/XDS100 @@ -50,6 +61,7 @@ #define TI_XDS100V2_PID 0xa6d0 #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ +#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */ /* US Interface Navigator (http://www.usinterface.com/) */ #define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ @@ -139,12 +151,19 @@ /* * Xsens Technologies BV products (http://www.xsens.com). */ -#define XSENS_CONVERTER_0_PID 0xD388 -#define XSENS_CONVERTER_1_PID 0xD389 +#define XSENS_VID 0x2639 +#define XSENS_AWINDA_STATION_PID 0x0101 +#define XSENS_AWINDA_DONGLE_PID 0x0102 +#define XSENS_MTW_PID 0x0200 /* Xsens MTw */ +#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ + +/* Xsens devices using FTDI VID */ +#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */ +#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */ #define XSENS_CONVERTER_2_PID 0xD38A -#define XSENS_CONVERTER_3_PID 0xD38B -#define XSENS_CONVERTER_4_PID 0xD38C -#define XSENS_CONVERTER_5_PID 0xD38D +#define XSENS_CONVERTER_3_PID 0xD38B /* Xsens USB-serial converter */ +#define XSENS_CONVERTER_4_PID 0xD38C /* Xsens Wireless Receiver */ +#define XSENS_CONVERTER_5_PID 0xD38D /* Xsens Awinda Station */ #define XSENS_CONVERTER_6_PID 0xD38E #define XSENS_CONVERTER_7_PID 0xD38F @@ -363,6 +382,12 @@ /* Sprog II (Andrew Crosland's SprogII DCC interface) */ #define FTDI_SPROG_II 0xF0C8 +/* + * Two of the Tagsys RFID Readers + */ +#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/ +#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/ + /* an infrared receiver for user access control with IR tags */ #define FTDI_PIEGROUP_PID 0xF208 /* Product Id */ @@ -531,6 +556,17 @@ */ #define FTDI_TIAO_UMPA_PID 0x8a98 /* TIAO/DIYGADGET USB Multi-Protocol Adapter */ +/* + * NovaTech product ids (FTDI_VID) + */ +#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ + +/* + * Synapse Wireless product ids (FTDI_VID) + * http://www.synapse-wireless.com + */ +#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */ + /********************************/ /** third-party VID/PID combos **/ @@ -572,6 +608,12 @@ #define RATOC_PRODUCT_ID_USB60F 0xb020 /* + * Infineon Technologies + */ +#define INFINEON_VID 0x058b +#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */ + +/* * Acton Research Corp. */ #define ACTON_VID 0x0647 /* Vendor ID */ @@ -786,7 +828,8 @@ * Submitted by Colin Leroy */ #define TESTO_VID 0x128D -#define TESTO_USB_INTERFACE_PID 0x0001 +#define TESTO_1_PID 0x0001 +#define TESTO_3_PID 0x0003 /* * Mobility Electronics products. @@ -813,6 +856,12 @@ #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ /* + * NOVITUS printers + */ +#define NOVITUS_VID 0x1a28 +#define NOVITUS_BONO_E_PID 0x6010 + +/* * RT Systems programming cables for various ham radios */ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ @@ -886,8 +935,8 @@ #define BAYER_CONTOUR_CABLE_PID 0x6001 /* - * The following are the values for the Matrix Orbital FTDI Range - * Anything in this range will use an FT232RL. + * Matrix Orbital Intelligent USB displays. + * http://www.matrixorbital.com */ #define MTXORB_VID 0x1B3D #define MTXORB_FTDI_RANGE_0100_PID 0x0100 @@ -1146,8 +1195,39 @@ #define MTXORB_FTDI_RANGE_01FD_PID 0x01FD #define MTXORB_FTDI_RANGE_01FE_PID 0x01FE #define MTXORB_FTDI_RANGE_01FF_PID 0x01FF - - +#define MTXORB_FTDI_RANGE_4701_PID 0x4701 +#define MTXORB_FTDI_RANGE_9300_PID 0x9300 +#define MTXORB_FTDI_RANGE_9301_PID 0x9301 +#define MTXORB_FTDI_RANGE_9302_PID 0x9302 +#define MTXORB_FTDI_RANGE_9303_PID 0x9303 +#define MTXORB_FTDI_RANGE_9304_PID 0x9304 +#define MTXORB_FTDI_RANGE_9305_PID 0x9305 +#define MTXORB_FTDI_RANGE_9306_PID 0x9306 +#define MTXORB_FTDI_RANGE_9307_PID 0x9307 +#define MTXORB_FTDI_RANGE_9308_PID 0x9308 +#define MTXORB_FTDI_RANGE_9309_PID 0x9309 +#define MTXORB_FTDI_RANGE_930A_PID 0x930A +#define MTXORB_FTDI_RANGE_930B_PID 0x930B +#define MTXORB_FTDI_RANGE_930C_PID 0x930C +#define MTXORB_FTDI_RANGE_930D_PID 0x930D +#define MTXORB_FTDI_RANGE_930E_PID 0x930E +#define MTXORB_FTDI_RANGE_930F_PID 0x930F +#define MTXORB_FTDI_RANGE_9310_PID 0x9310 +#define MTXORB_FTDI_RANGE_9311_PID 0x9311 +#define MTXORB_FTDI_RANGE_9312_PID 0x9312 +#define MTXORB_FTDI_RANGE_9313_PID 0x9313 +#define MTXORB_FTDI_RANGE_9314_PID 0x9314 +#define MTXORB_FTDI_RANGE_9315_PID 0x9315 +#define MTXORB_FTDI_RANGE_9316_PID 0x9316 +#define MTXORB_FTDI_RANGE_9317_PID 0x9317 +#define MTXORB_FTDI_RANGE_9318_PID 0x9318 +#define MTXORB_FTDI_RANGE_9319_PID 0x9319 +#define MTXORB_FTDI_RANGE_931A_PID 0x931A +#define MTXORB_FTDI_RANGE_931B_PID 0x931B +#define MTXORB_FTDI_RANGE_931C_PID 0x931C +#define MTXORB_FTDI_RANGE_931D_PID 0x931D +#define MTXORB_FTDI_RANGE_931E_PID 0x931E +#define MTXORB_FTDI_RANGE_931F_PID 0x931F /* * The Mobility Lab (TML) @@ -1313,3 +1393,77 @@ * Manufacturer: Smart GSM Team */ #define FTDI_Z3X_PID 0x0011 + +/* + * Product: Cressi PC Interface + * Manufacturer: Cressi + */ +#define FTDI_CRESSI_PID 0x87d0 + +/* + * Brainboxes devices + */ +#define BRAINBOXES_VID 0x05d1 +#define BRAINBOXES_VX_001_PID 0x1001 /* VX-001 ExpressCard 1 Port RS232 */ +#define BRAINBOXES_VX_012_PID 0x1002 /* VX-012 ExpressCard 2 Port RS232 */ +#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */ +#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */ +#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */ +#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */ +#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */ +#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */ +#define BRAINBOXES_US_606_3_PID 0x2003 /* US-606 6 Port RS232 Serial Port 4 and 6 */ +#define BRAINBOXES_US_701_1_PID 0x2011 /* US-701 4xRS232 1Mbaud Port 1 and 2 */ +#define BRAINBOXES_US_701_2_PID 0x2012 /* US-701 4xRS422 1Mbaud Port 3 and 4 */ +#define BRAINBOXES_US_279_1_PID 0x2021 /* US-279 8xRS422 1Mbaud Port 1 and 2 */ +#define BRAINBOXES_US_279_2_PID 0x2022 /* US-279 8xRS422 1Mbaud Port 3 and 4 */ +#define BRAINBOXES_US_279_3_PID 0x2023 /* US-279 8xRS422 1Mbaud Port 5 and 6 */ +#define BRAINBOXES_US_279_4_PID 0x2024 /* US-279 8xRS422 1Mbaud Port 7 and 8 */ +#define BRAINBOXES_US_346_1_PID 0x3011 /* US-346 4xRS422/485 1Mbaud Port 1 and 2 */ +#define BRAINBOXES_US_346_2_PID 0x3012 /* US-346 4xRS422/485 1Mbaud Port 3 and 4 */ +#define BRAINBOXES_US_257_PID 0x5001 /* US-257 2xRS232 1Mbaud */ +#define BRAINBOXES_US_313_PID 0x6001 /* US-313 2xRS422/485 1Mbaud */ +#define BRAINBOXES_US_357_PID 0x7001 /* US_357 1xRS232/422/485 */ +#define BRAINBOXES_US_842_1_PID 0x8001 /* US-842 8xRS422/485 1Mbaud Port 1 and 2 */ +#define BRAINBOXES_US_842_2_PID 0x8002 /* US-842 8xRS422/485 1Mbaud Port 3 and 4 */ +#define BRAINBOXES_US_842_3_PID 0x8003 /* US-842 8xRS422/485 1Mbaud Port 5 and 6 */ +#define BRAINBOXES_US_842_4_PID 0x8004 /* US-842 8xRS422/485 1Mbaud Port 7 and 8 */ +#define BRAINBOXES_US_160_1_PID 0x9001 /* US-160 16xRS232 1Mbaud Port 1 and 2 */ +#define BRAINBOXES_US_160_2_PID 0x9002 /* US-160 16xRS232 1Mbaud Port 3 and 4 */ +#define BRAINBOXES_US_160_3_PID 0x9003 /* US-160 16xRS232 1Mbaud Port 5 and 6 */ +#define BRAINBOXES_US_160_4_PID 0x9004 /* US-160 16xRS232 1Mbaud Port 7 and 8 */ +#define BRAINBOXES_US_160_5_PID 0x9005 /* US-160 16xRS232 1Mbaud Port 9 and 10 */ +#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */ +#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */ +#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */ + +/* + * ekey biometric systems GmbH (http://ekey.net/) + */ +#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */ + +/* + * GE Healthcare devices + */ +#define GE_HEALTHCARE_VID 0x1901 +#define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 + +/* + * Active Research (Actisense) devices + */ +#define ACTISENSE_NDC_PID 0xD9A8 /* NDC USB Serial Adapter */ +#define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */ +#define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */ +#define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */ +#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */ +#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */ +#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */ +#define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */ +#define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */ +#define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */ +#define CHETCO_SEASMART_NMEA2000_PID 0xA54A /* SeaSmart NMEA2000 Gateway */ +#define CHETCO_SEASMART_ETHERNET_PID 0xA54B /* SeaSmart Ethernet Gateway */ +#define CHETCO_SEASMART_WIFI_PID 0xA5AC /* SeaSmart Wifi Gateway */ +#define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */ +#define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */ +#define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ --- linux-3.13.0.orig/drivers/usb/serial/generic.c +++ linux-3.13.0/drivers/usb/serial/generic.c @@ -258,7 +258,8 @@ * character or at least one jiffy. */ period = max_t(unsigned long, (10 * HZ / bps), 1); - period = min_t(unsigned long, period, timeout); + if (timeout) + period = min_t(unsigned long, period, timeout); dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", __func__, jiffies_to_msecs(timeout), @@ -268,7 +269,7 @@ schedule_timeout_interruptible(period); if (signal_pending(current)) break; - if (time_after(jiffies, expire)) + if (timeout && time_after(jiffies, expire)) break; } } --- linux-3.13.0.orig/drivers/usb/serial/io_ti.c +++ linux-3.13.0/drivers/usb/serial/io_ti.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -281,7 +282,7 @@ { int status = 0; __u8 read_length; - __be16 be_start_address; + u16 be_start_address; dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length); @@ -297,10 +298,14 @@ if (read_length > 1) { dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length); } - be_start_address = cpu_to_be16(start_address); + /* + * NOTE: Must use swab as wIndex is sent in little-endian + * byte order regardless of host byte order. + */ + be_start_address = swab16((u16)start_address); status = ti_vread_sync(dev, UMPC_MEMORY_READ, (__u16)address_type, - (__force __u16)be_start_address, + be_start_address, buffer, read_length); if (status) { @@ -397,7 +402,7 @@ struct device *dev = &serial->serial->dev->dev; int status = 0; int write_length; - __be16 be_start_address; + u16 be_start_address; /* We can only send a maximum of 1 aligned byte page at a time */ @@ -412,11 +417,16 @@ __func__, start_address, write_length); usb_serial_debug_data(dev, __func__, write_length, buffer); - /* Write first page */ - be_start_address = cpu_to_be16(start_address); + /* + * Write first page. + * + * NOTE: Must use swab as wIndex is sent in little-endian byte order + * regardless of host byte order. + */ + be_start_address = swab16((u16)start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (__u16)address_type, - (__force __u16)be_start_address, + be_start_address, buffer, write_length); if (status) { dev_dbg(dev, "%s - ERROR %d\n", __func__, status); @@ -439,11 +449,16 @@ __func__, start_address, write_length); usb_serial_debug_data(dev, __func__, write_length, buffer); - /* Write next page */ - be_start_address = cpu_to_be16(start_address); + /* + * Write next page. + * + * NOTE: Must use swab as wIndex is sent in little-endian byte + * order regardless of host byte order. + */ + be_start_address = swab16((u16)start_address); status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, (__u16)address_type, - (__force __u16)be_start_address, + be_start_address, buffer, write_length); if (status) { dev_err(dev, "%s - ERROR %d\n", __func__, status); @@ -590,8 +605,8 @@ if (rom_desc->Type == desc_type) return start_address; - start_address = start_address + sizeof(struct ti_i2c_desc) - + rom_desc->Size; + start_address = start_address + sizeof(struct ti_i2c_desc) + + le16_to_cpu(rom_desc->Size); } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type); @@ -604,7 +619,7 @@ __u16 i; __u8 cs = 0; - for (i = 0; i < rom_desc->Size; i++) + for (i = 0; i < le16_to_cpu(rom_desc->Size); i++) cs = (__u8)(cs + buffer[i]); if (cs != rom_desc->CheckSum) { @@ -658,7 +673,7 @@ break; if ((start_address + sizeof(struct ti_i2c_desc) + - rom_desc->Size) > TI_MAX_I2C_SIZE) { + le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) { status = -ENODEV; dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__); break; @@ -673,7 +688,8 @@ /* Read the descriptor data */ status = read_rom(serial, start_address + sizeof(struct ti_i2c_desc), - rom_desc->Size, buffer); + le16_to_cpu(rom_desc->Size), + buffer); if (status) break; @@ -682,7 +698,7 @@ break; } start_address = start_address + sizeof(struct ti_i2c_desc) + - rom_desc->Size; + le16_to_cpu(rom_desc->Size); } while ((rom_desc->Type != I2C_DESC_TYPE_ION) && (start_address < TI_MAX_I2C_SIZE)); @@ -721,7 +737,7 @@ /* Read the descriptor data */ status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc), - rom_desc->Size, buffer); + le16_to_cpu(rom_desc->Size), buffer); if (status) goto exit; @@ -816,7 +832,7 @@ firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data; i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK; - i2c_header->Size = (__u16)buffer_size; + i2c_header->Size = cpu_to_le16(buffer_size); i2c_header->CheckSum = cs; firmware_rec->Ver_Major = OperationalMajorVersion; firmware_rec->Ver_Minor = OperationalMinorVersion; --- linux-3.13.0.orig/drivers/usb/serial/io_usbvend.h +++ linux-3.13.0/drivers/usb/serial/io_usbvend.h @@ -594,7 +594,7 @@ struct ti_i2c_desc { __u8 Type; // Type of descriptor - __u16 Size; // Size of data only not including header + __le16 Size; // Size of data only not including header __u8 CheckSum; // Checksum (8 bit sum of data only) __u8 Data[0]; // Data starts here } __attribute__((packed)); --- linux-3.13.0.orig/drivers/usb/serial/keyspan.c +++ linux-3.13.0/drivers/usb/serial/keyspan.c @@ -312,24 +312,30 @@ if ((data[0] & 0x80) == 0) { /* no errors on individual bytes, only possible overrun err */ - if (data[0] & RXERROR_OVERRUN) - err = TTY_OVERRUN; - else - err = 0; + if (data[0] & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } for (i = 1; i < urb->actual_length ; ++i) - tty_insert_flip_char(&port->port, data[i], err); + tty_insert_flip_char(&port->port, data[i], + TTY_NORMAL); } else { /* some bytes had errors, every byte has status */ dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } @@ -416,6 +422,8 @@ } port = serial->port[msg->port]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -426,7 +434,7 @@ if (old_dcd_state != p_priv->dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) @@ -536,6 +544,8 @@ } port = serial->port[msg->port]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -546,7 +556,7 @@ if (old_dcd_state != p_priv->dcd_state && old_dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) @@ -619,6 +629,8 @@ } port = serial->port[msg->portNumber]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -629,7 +641,7 @@ if (old_dcd_state != p_priv->dcd_state && old_dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) @@ -667,14 +679,19 @@ } else { /* some bytes had errors, every byte has status */ for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } @@ -731,15 +748,19 @@ */ for (x = 0; x + 1 < len && i + 1 < urb->actual_length; x += 2) { - int stat = data[i], flag = 0; + int stat = data[i]; + int flag = TTY_NORMAL; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); i += 2; @@ -791,25 +812,31 @@ if ((data[0] & 0x80) == 0) { /* no errors on individual bytes, only possible overrun err*/ - if (data[0] & RXERROR_OVERRUN) - err = TTY_OVERRUN; - else - err = 0; + if (data[0] & RXERROR_OVERRUN) { + tty_insert_flip_char(&port->port, 0, + TTY_OVERRUN); + } for (i = 1; i < urb->actual_length ; ++i) tty_insert_flip_char(&port->port, - data[i], err); + data[i], TTY_NORMAL); } else { /* some bytes had errors, every byte has status */ dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); for (i = 0; i + 1 < urb->actual_length; i += 2) { - int stat = data[i], flag = 0; - if (stat & RXERROR_OVERRUN) - flag |= TTY_OVERRUN; - if (stat & RXERROR_FRAMING) - flag |= TTY_FRAME; - if (stat & RXERROR_PARITY) - flag |= TTY_PARITY; + int stat = data[i]; + int flag = TTY_NORMAL; + + if (stat & RXERROR_OVERRUN) { + tty_insert_flip_char( + &port->port, 0, + TTY_OVERRUN); + } /* XXX should handle break (0x10) */ + if (stat & RXERROR_PARITY) + flag = TTY_PARITY; + else if (stat & RXERROR_FRAMING) + flag = TTY_FRAME; + tty_insert_flip_char(&port->port, data[i+1], flag); } @@ -852,6 +879,8 @@ port = serial->port[0]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -862,7 +891,7 @@ if (old_dcd_state != p_priv->dcd_state && old_dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) @@ -923,6 +952,8 @@ port = serial->port[msg->port]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -931,7 +962,7 @@ if (old_dcd_state != p_priv->dcd_state && old_dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) --- linux-3.13.0.orig/drivers/usb/serial/keyspan_pda.c +++ linux-3.13.0/drivers/usb/serial/keyspan_pda.c @@ -62,6 +62,7 @@ /* For Xircom PGSDB9 and older Entregra version of the same device */ #define XIRCOM_VENDOR_ID 0x085a #define XIRCOM_FAKE_ID 0x8027 +#define XIRCOM_FAKE_ID_2 0x8025 /* "PGMFHUB" serial */ #define ENTREGRA_VENDOR_ID 0x1645 #define ENTREGRA_FAKE_ID 0x8093 @@ -71,6 +72,7 @@ #endif #ifdef XIRCOM { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, + { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) }, #endif { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, @@ -94,6 +96,7 @@ #ifdef XIRCOM static const struct usb_device_id id_table_fake_xircom[] = { { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, + { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID_2) }, { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) }, { } }; --- linux-3.13.0.orig/drivers/usb/serial/kobil_sct.c +++ linux-3.13.0/drivers/usb/serial/kobil_sct.c @@ -336,7 +336,8 @@ port->interrupt_out_urb->transfer_buffer_length = length; priv->cur_pos = priv->cur_pos + length; - result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO); + result = usb_submit_urb(port->interrupt_out_urb, + GFP_ATOMIC); dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result); todo = priv->filled - priv->cur_pos; @@ -351,7 +352,7 @@ if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID || priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) { result = usb_submit_urb(port->interrupt_in_urb, - GFP_NOIO); + GFP_ATOMIC); dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result); } } --- linux-3.13.0.orig/drivers/usb/serial/opticon.c +++ linux-3.13.0/drivers/usb/serial/opticon.c @@ -219,7 +219,7 @@ /* The conncected devices do not have a bulk write endpoint, * to transmit data to de barcode device the control endpoint is used */ - dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); + dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); if (!dr) { dev_err(&port->dev, "out of memory\n"); count = -ENOMEM; --- linux-3.13.0.orig/drivers/usb/serial/option.c +++ linux-3.13.0/drivers/usb/serial/option.c @@ -161,6 +161,7 @@ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 #define NOVATELWIRELESS_PRODUCT_E362 0x9010 +#define NOVATELWIRELESS_PRODUCT_E371 0x9011 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 @@ -233,9 +234,34 @@ #define QUALCOMM_VENDOR_ID 0x05C6 +#define SIERRA_VENDOR_ID 0x1199 + #define CMOTECH_VENDOR_ID 0x16d8 -#define CMOTECH_PRODUCT_6008 0x6008 -#define CMOTECH_PRODUCT_6280 0x6280 +#define CMOTECH_PRODUCT_6001 0x6001 +#define CMOTECH_PRODUCT_CMU_300 0x6002 +#define CMOTECH_PRODUCT_6003 0x6003 +#define CMOTECH_PRODUCT_6004 0x6004 +#define CMOTECH_PRODUCT_6005 0x6005 +#define CMOTECH_PRODUCT_CGU_628A 0x6006 +#define CMOTECH_PRODUCT_CHE_628S 0x6007 +#define CMOTECH_PRODUCT_CMU_301 0x6008 +#define CMOTECH_PRODUCT_CHU_628 0x6280 +#define CMOTECH_PRODUCT_CHU_628S 0x6281 +#define CMOTECH_PRODUCT_CDU_680 0x6803 +#define CMOTECH_PRODUCT_CDU_685A 0x6804 +#define CMOTECH_PRODUCT_CHU_720S 0x7001 +#define CMOTECH_PRODUCT_7002 0x7002 +#define CMOTECH_PRODUCT_CHU_629K 0x7003 +#define CMOTECH_PRODUCT_7004 0x7004 +#define CMOTECH_PRODUCT_7005 0x7005 +#define CMOTECH_PRODUCT_CGU_629 0x7006 +#define CMOTECH_PRODUCT_CHU_629S 0x700a +#define CMOTECH_PRODUCT_CHU_720I 0x7211 +#define CMOTECH_PRODUCT_7212 0x7212 +#define CMOTECH_PRODUCT_7213 0x7213 +#define CMOTECH_PRODUCT_7251 0x7251 +#define CMOTECH_PRODUCT_7252 0x7252 +#define CMOTECH_PRODUCT_7253 0x7253 #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 @@ -243,15 +269,21 @@ #define TELIT_PRODUCT_CC864_DUAL 0x1005 #define TELIT_PRODUCT_CC864_SINGLE 0x1006 #define TELIT_PRODUCT_DE910_DUAL 0x1010 +#define TELIT_PRODUCT_UE910_V2 0x1012 #define TELIT_PRODUCT_LE920 0x1200 +#define TELIT_PRODUCT_LE910 0x1201 /* ZTE PRODUCTS */ #define ZTE_VENDOR_ID 0x19d2 #define ZTE_PRODUCT_MF622 0x0001 #define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF626 0x0031 -#define ZTE_PRODUCT_MC2718 0xffe8 #define ZTE_PRODUCT_AC2726 0xfff1 +#define ZTE_PRODUCT_CDMA_TECH 0xfffe +#define ZTE_PRODUCT_AC8710T 0xffff +#define ZTE_PRODUCT_MC2718 0xffe8 +#define ZTE_PRODUCT_AD3812 0xffeb +#define ZTE_PRODUCT_MC2716 0xffed #define BENQ_VENDOR_ID 0x04a5 #define BENQ_PRODUCT_H10 0x4068 @@ -286,6 +318,7 @@ #define ALCATEL_PRODUCT_X060S_X200 0x0000 #define ALCATEL_PRODUCT_X220_X500D 0x0017 #define ALCATEL_PRODUCT_L100V 0x011e +#define ALCATEL_PRODUCT_L800MA 0x0203 #define PIRELLI_VENDOR_ID 0x1266 #define PIRELLI_PRODUCT_C100_1 0x1002 @@ -320,11 +353,18 @@ * It seems to contain a Qualcomm QSC6240/6290 chipset */ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 +/* iBall 3.5G connect wireless modem */ +#define IBALL_3_5G_CONNECT 0x9605 + /* Zoom */ #define ZOOM_PRODUCT_4597 0x9607 +/* SpeedUp SU9800 usb 3g modem */ +#define SPEEDUP_PRODUCT_SU9800 0x9800 + /* Haier products */ #define HAIER_VENDOR_ID 0x201e +#define HAIER_PRODUCT_CE81B 0x10f8 #define HAIER_PRODUCT_CE100 0x2009 /* Cinterion (formerly Siemens) products */ @@ -343,8 +383,13 @@ /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 +#define OLIVETTI_PRODUCT_OLICARD120 0xc001 +#define OLIVETTI_PRODUCT_OLICARD140 0xc002 #define OLIVETTI_PRODUCT_OLICARD145 0xc003 +#define OLIVETTI_PRODUCT_OLICARD155 0xc004 #define OLIVETTI_PRODUCT_OLICARD200 0xc005 +#define OLIVETTI_PRODUCT_OLICARD160 0xc00a +#define OLIVETTI_PRODUCT_OLICARD500 0xc00b /* Celot products */ #define CELOT_VENDOR_ID 0x211f @@ -457,6 +502,10 @@ #define INOVIA_VENDOR_ID 0x20a6 #define INOVIA_SEW858 0x1105 +/* VIA Telecom */ +#define VIATELECOM_VENDOR_ID 0x15eb +#define VIATELECOM_PRODUCT_CDS7 0x0001 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -464,7 +513,7 @@ OPTION_BLACKLIST_RESERVED_IF = 2 }; -#define MAX_BL_NUM 8 +#define MAX_BL_NUM 11 struct option_blacklist_info { /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */ const unsigned long sendsetup; @@ -490,14 +539,26 @@ .reserved = BIT(4), }; +static const struct option_blacklist_info zte_ad3812_z_blacklist = { + .sendsetup = BIT(0) | BIT(1) | BIT(2), +}; + static const struct option_blacklist_info zte_mc2718_z_blacklist = { .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4), }; +static const struct option_blacklist_info zte_mc2716_z_blacklist = { + .sendsetup = BIT(1) | BIT(2) | BIT(3), +}; + static const struct option_blacklist_info huawei_cdc12_blacklist = { .reserved = BIT(1) | BIT(2), }; +static const struct option_blacklist_info net_intf0_blacklist = { + .reserved = BIT(0), +}; + static const struct option_blacklist_info net_intf1_blacklist = { .reserved = BIT(1), }; @@ -531,11 +592,21 @@ .reserved = BIT(3) | BIT(4), }; +static const struct option_blacklist_info telit_le910_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(2), +}; + static const struct option_blacklist_info telit_le920_blacklist = { .sendsetup = BIT(0), .reserved = BIT(1) | BIT(5), }; +static const struct option_blacklist_info sierra_mc73xx_blacklist = { + .sendsetup = BIT(0) | BIT(2), + .reserved = BIT(8) | BIT(10) | BIT(11), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -979,6 +1050,7 @@ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, @@ -1028,16 +1100,61 @@ { USB_DEVICE_INTERFACE_CLASS(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012, 0xff) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ - { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ - { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213), + .driver_info = (kernel_ulong_t)&net_intf0_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), + .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ @@ -1359,7 +1476,8 @@ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, @@ -1404,6 +1522,8 @@ .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, @@ -1447,14 +1567,30 @@ .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8e, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8f, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff90, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff91, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) }, - /* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, @@ -1483,13 +1619,18 @@ .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, + { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, + { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) }, /* Pirelli */ { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) }, { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) }, @@ -1510,7 +1651,8 @@ /* Cinterion */ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, @@ -1520,12 +1662,21 @@ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, - - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), - .driver_info = (kernel_ulong_t)&net_intf6_blacklist - }, + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, @@ -1614,6 +1765,7 @@ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -1807,6 +1959,8 @@ dev_dbg(dev, "%s: type %x req %x\n", __func__, req_pkt->bRequestType, req_pkt->bRequest); } + } else if (status == -ENOENT || status == -ESHUTDOWN) { + dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status); } else dev_err(dev, "%s: error %d\n", __func__, status); --- linux-3.13.0.orig/drivers/usb/serial/pl2303.c +++ linux-3.13.0/drivers/usb/serial/pl2303.c @@ -48,6 +48,7 @@ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, @@ -83,6 +84,9 @@ { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) }, { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, @@ -142,6 +146,8 @@ spinlock_t lock; u8 line_control; u8 line_status; + + u8 line_settings[7]; }; static int pl2303_vendor_read(__u16 value, __u16 index, @@ -339,11 +345,6 @@ int i; u8 control; - /* - * The PL2303 is reported to lose bytes if you change serial settings - * even to the same values as before. Thus we actually need to filter - * in this specific case. - */ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) return; @@ -428,10 +429,29 @@ dev_dbg(&port->dev, "parity = none\n"); } - i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE, - 0, 0, buf, 7, 100); - dev_dbg(&port->dev, "0x21:0x20:0:0 %d\n", i); + /* + * Some PL2303 are known to lose bytes if you change serial settings + * even to the same values as before. Thus we actually need to filter + * in this specific case. + * + * Note that the tty_termios_hw_change check above is not sufficient + * as a previously requested baud rate may differ from the one + * actually used (and stored in old_termios). + * + * NOTE: No additional locking needed for line_settings as it is + * only used in set_termios, which is serialised against itself. + */ + if (!old_termios || memcmp(buf, priv->line_settings, 7)) { + i = usb_control_msg(serial->dev, + usb_sndctrlpipe(serial->dev, 0), + SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE, + 0, 0, buf, 7, 100); + + dev_dbg(&port->dev, "0x21:0x20:0:0 %d\n", i); + + if (i == 7) + memcpy(priv->line_settings, buf, 7); + } /* change control lines if we are switching to or from B0 */ spin_lock_irqsave(&priv->lock, flags); --- linux-3.13.0.orig/drivers/usb/serial/pl2303.h +++ linux-3.13.0/drivers/usb/serial/pl2303.h @@ -22,6 +22,7 @@ #define PL2303_PRODUCT_ID_GPRS 0x0609 #define PL2303_PRODUCT_ID_HCR331 0x331a #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 +#define PL2303_PRODUCT_ID_ZTEK 0xe1f1 #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 @@ -121,8 +122,11 @@ #define SUPERIAL_VENDOR_ID 0x5372 #define SUPERIAL_PRODUCT_ID 0x2303 -/* Hewlett-Packard LD220-HP POS Pole Display */ +/* Hewlett-Packard POS Pole Displays */ #define HP_VENDOR_ID 0x03f0 +#define HP_LD960_PRODUCT_ID 0x0b39 +#define HP_LCM220_PRODUCT_ID 0x3139 +#define HP_LCM960_PRODUCT_ID 0x3239 #define HP_LD220_PRODUCT_ID 0x3524 /* Cressi Edy (diving computer) PC interface */ --- linux-3.13.0.orig/drivers/usb/serial/qcserial.c +++ linux-3.13.0/drivers/usb/serial/qcserial.c @@ -139,6 +139,51 @@ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 0)}, /* Netgear AirCard 341U Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 2)}, /* Netgear AirCard 341U NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 3)}, /* Netgear AirCard 341U Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */ { } /* Terminating entry */ }; --- linux-3.13.0.orig/drivers/usb/serial/sierra.c +++ linux-3.13.0/drivers/usb/serial/sierra.c @@ -281,17 +281,21 @@ /* Sierra Wireless HSPA Non-Composite Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ - { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ + /* Sierra Wireless Direct IP modems */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF), + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, + { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, /* AT&T Direct IP LTE modems */ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, - { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ + /* Airprime/Sierra Wireless Direct IP modems */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, - { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ { } }; --- linux-3.13.0.orig/drivers/usb/serial/ssu100.c +++ linux-3.13.0/drivers/usb/serial/ssu100.c @@ -495,10 +495,9 @@ if (*tty_flag == TTY_NORMAL) *tty_flag = TTY_FRAME; } - if (lsr & UART_LSR_OE){ + if (lsr & UART_LSR_OE) { port->icount.overrun++; - if (*tty_flag == TTY_NORMAL) - *tty_flag = TTY_OVERRUN; + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } @@ -516,12 +515,8 @@ if ((len >= 4) && (packet[0] == 0x1b) && (packet[1] == 0x1b) && ((packet[2] == 0x00) || (packet[2] == 0x01))) { - if (packet[2] == 0x00) { + if (packet[2] == 0x00) ssu100_update_lsr(port, packet[3], &flag); - if (flag == TTY_OVERRUN) - tty_insert_flip_char(&port->port, 0, - TTY_OVERRUN); - } if (packet[2] == 0x01) ssu100_update_msr(port, packet[3]); --- linux-3.13.0.orig/drivers/usb/serial/usb-serial-simple.c +++ linux-3.13.0/drivers/usb/serial/usb-serial-simple.c @@ -72,7 +72,8 @@ /* Suunto ANT+ USB Driver */ #define SUUNTO_IDS() \ - { USB_DEVICE(0x0fcf, 0x1008) } + { USB_DEVICE(0x0fcf, 0x1008) }, \ + { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */ DEVICE(suunto, SUUNTO_IDS); /* Siemens USB/MPI adapter */ --- linux-3.13.0.orig/drivers/usb/serial/usb-serial.c +++ linux-3.13.0/drivers/usb/serial/usb-serial.c @@ -764,29 +764,39 @@ if (usb_endpoint_is_bulk_in(endpoint)) { /* we found a bulk in endpoint */ dev_dbg(ddev, "found bulk in on endpoint %d\n", i); - bulk_in_endpoint[num_bulk_in] = endpoint; - ++num_bulk_in; + if (num_bulk_in < MAX_NUM_PORTS) { + bulk_in_endpoint[num_bulk_in] = endpoint; + ++num_bulk_in; + } } if (usb_endpoint_is_bulk_out(endpoint)) { /* we found a bulk out endpoint */ dev_dbg(ddev, "found bulk out on endpoint %d\n", i); - bulk_out_endpoint[num_bulk_out] = endpoint; - ++num_bulk_out; + if (num_bulk_out < MAX_NUM_PORTS) { + bulk_out_endpoint[num_bulk_out] = endpoint; + ++num_bulk_out; + } } if (usb_endpoint_is_int_in(endpoint)) { /* we found a interrupt in endpoint */ dev_dbg(ddev, "found interrupt in on endpoint %d\n", i); - interrupt_in_endpoint[num_interrupt_in] = endpoint; - ++num_interrupt_in; + if (num_interrupt_in < MAX_NUM_PORTS) { + interrupt_in_endpoint[num_interrupt_in] = + endpoint; + ++num_interrupt_in; + } } if (usb_endpoint_is_int_out(endpoint)) { /* we found an interrupt out endpoint */ dev_dbg(ddev, "found interrupt out on endpoint %d\n", i); - interrupt_out_endpoint[num_interrupt_out] = endpoint; - ++num_interrupt_out; + if (num_interrupt_out < MAX_NUM_PORTS) { + interrupt_out_endpoint[num_interrupt_out] = + endpoint; + ++num_interrupt_out; + } } } @@ -809,8 +819,10 @@ if (usb_endpoint_is_int_in(endpoint)) { /* we found a interrupt in endpoint */ dev_dbg(ddev, "found interrupt in for Prolific device on separate interface\n"); - interrupt_in_endpoint[num_interrupt_in] = endpoint; - ++num_interrupt_in; + if (num_interrupt_in < MAX_NUM_PORTS) { + interrupt_in_endpoint[num_interrupt_in] = endpoint; + ++num_interrupt_in; + } } } } @@ -850,6 +862,11 @@ num_ports = type->num_ports; } + if (num_ports > MAX_NUM_PORTS) { + dev_warn(ddev, "too many ports requested: %d\n", num_ports); + num_ports = MAX_NUM_PORTS; + } + serial->num_ports = num_ports; serial->num_bulk_in = num_bulk_in; serial->num_bulk_out = num_bulk_out; @@ -1348,10 +1365,12 @@ static void usb_serial_deregister(struct usb_serial_driver *device) { pr_info("USB Serial deregistering driver %s\n", device->description); + mutex_lock(&table_lock); list_del(&device->driver_list); - usb_serial_bus_deregister(device); mutex_unlock(&table_lock); + + usb_serial_bus_deregister(device); } /** --- linux-3.13.0.orig/drivers/usb/serial/usb_wwan.c +++ linux-3.13.0/drivers/usb/serial/usb_wwan.c @@ -470,6 +470,9 @@ int err; int i; + if (!port->bulk_in_size || !port->bulk_out_size) + return -ENODEV; + portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); if (!portdata) return -ENOMEM; @@ -477,9 +480,6 @@ init_usb_anchor(&portdata->delayed); for (i = 0; i < N_IN_URB; i++) { - if (!port->bulk_in_size) - break; - buffer = (u8 *)__get_free_page(GFP_KERNEL); if (!buffer) goto bail_out_error; @@ -493,9 +493,6 @@ } for (i = 0; i < N_OUT_URB; i++) { - if (!port->bulk_out_size) - break; - buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); if (!buffer) goto bail_out_error2; --- linux-3.13.0.orig/drivers/usb/serial/whiteheat.c +++ linux-3.13.0/drivers/usb/serial/whiteheat.c @@ -521,6 +521,10 @@ dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__); return; } + if (!urb->actual_length) { + dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__); + return; + } if (status) { dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status); if (status != -ENOENT) @@ -541,7 +545,8 @@ /* These are unsolicited reports from the firmware, hence no waiting command to wakeup */ dev_dbg(&urb->dev->dev, "%s - event received\n", __func__); - } else if (data[0] == WHITEHEAT_GET_DTR_RTS) { + } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) && + (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) { memcpy(command_info->result_buffer, &data[1], urb->actual_length - 1); command_info->command_finished = WHITEHEAT_CMD_COMPLETE; --- linux-3.13.0.orig/drivers/usb/serial/zte_ev.c +++ linux-3.13.0/drivers/usb/serial/zte_ev.c @@ -273,28 +273,16 @@ } static const struct usb_device_id id_table[] = { - /* AC8710, AC8710T */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffff, 0xff, 0xff, 0xff) }, - /* AC8700 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfffe, 0xff, 0xff, 0xff) }, - /* MG880 */ - { USB_DEVICE(0x19d2, 0xfffd) }, - { USB_DEVICE(0x19d2, 0xfffc) }, - { USB_DEVICE(0x19d2, 0xfffb) }, - /* AC8710_V3 */ + { USB_DEVICE(0x19d2, 0xffec) }, + { USB_DEVICE(0x19d2, 0xffee) }, { USB_DEVICE(0x19d2, 0xfff6) }, { USB_DEVICE(0x19d2, 0xfff7) }, { USB_DEVICE(0x19d2, 0xfff8) }, { USB_DEVICE(0x19d2, 0xfff9) }, - { USB_DEVICE(0x19d2, 0xffee) }, - /* AC2716, MC2716 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffed, 0xff, 0xff, 0xff) }, - /* AD3812 */ - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xffeb, 0xff, 0xff, 0xff) }, - { USB_DEVICE(0x19d2, 0xffec) }, - { USB_DEVICE(0x05C6, 0x3197) }, - { USB_DEVICE(0x05C6, 0x6000) }, - { USB_DEVICE(0x05C6, 0x9008) }, + { USB_DEVICE(0x19d2, 0xfffb) }, + { USB_DEVICE(0x19d2, 0xfffc) }, + /* MG880 */ + { USB_DEVICE(0x19d2, 0xfffd) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); --- linux-3.13.0.orig/drivers/usb/storage/Kconfig +++ linux-3.13.0/drivers/usb/storage/Kconfig @@ -18,7 +18,9 @@ This option depends on 'SCSI' support being enabled, but you probably also need 'SCSI device support: SCSI disk support' - (BLK_DEV_SD) for most USB storage devices. + (BLK_DEV_SD) for most USB storage devices. Some devices also + will require 'Probe all LUNs on each SCSI device' + (SCSI_MULTI_LUN). To compile this driver as a module, choose M here: the module will be called usb-storage. --- linux-3.13.0.orig/drivers/usb/storage/scsiglue.c +++ linux-3.13.0/drivers/usb/storage/scsiglue.c @@ -78,6 +78,8 @@ static int slave_alloc (struct scsi_device *sdev) { + struct us_data *us = host_to_us(sdev->host); + /* * Set the INQUIRY transfer length to 36. We don't use any of * the extra data and many devices choke if asked for more or @@ -102,6 +104,10 @@ */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); + /* Tell the SCSI layer if we know there is more than one LUN */ + if (us->protocol == USB_PR_BULK && us->max_lun > 0) + sdev->sdev_bflags |= BLIST_FORCELUN; + return 0; } @@ -250,6 +256,10 @@ if (us->fflags & US_FL_WRITE_CACHE) sdev->wce_default_on = 1; + /* A few buggy USB-ATA bridges don't understand FUA */ + if (us->fflags & US_FL_BROKEN_FUA) + sdev->broken_fua = 1; + } else { /* Non-disk-type devices don't need to blacklist any pages --- linux-3.13.0.orig/drivers/usb/storage/shuttle_usbat.c +++ linux-3.13.0/drivers/usb/storage/shuttle_usbat.c @@ -1851,7 +1851,7 @@ us->transport_name = "Shuttle USBAT"; us->transport = usbat_flash_transport; us->transport_reset = usb_stor_CB_reset; - us->max_lun = 1; + us->max_lun = 0; result = usb_stor_probe2(us); return result; --- linux-3.13.0.orig/drivers/usb/storage/transport.c +++ linux-3.13.0/drivers/usb/storage/transport.c @@ -1118,6 +1118,31 @@ */ if (result == USB_STOR_XFER_LONG) fake_sense = 1; + + /* + * Sometimes a device will mistakenly skip the data phase + * and go directly to the status phase without sending a + * zero-length packet. If we get a 13-byte response here, + * check whether it really is a CSW. + */ + if (result == USB_STOR_XFER_SHORT && + srb->sc_data_direction == DMA_FROM_DEVICE && + transfer_length - scsi_get_resid(srb) == + US_BULK_CS_WRAP_LEN) { + struct scatterlist *sg = NULL; + unsigned int offset = 0; + + if (usb_stor_access_xfer_buf((unsigned char *) bcs, + US_BULK_CS_WRAP_LEN, srb, &sg, + &offset, FROM_XFER_BUF) == + US_BULK_CS_WRAP_LEN && + bcs->Signature == + cpu_to_le32(US_BULK_CS_SIGN)) { + usb_stor_dbg(us, "Device skipped data phase\n"); + scsi_set_resid(srb, transfer_length); + goto skipped_data_phase; + } + } } /* See flow chart on pg 15 of the Bulk Only Transport spec for @@ -1153,6 +1178,7 @@ if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; + skipped_data_phase: /* check bulk status */ residue = le32_to_cpu(bcs->Residue); usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n", --- linux-3.13.0.orig/drivers/usb/storage/unusual_cypress.h +++ linux-3.13.0/drivers/usb/storage/unusual_cypress.h @@ -31,7 +31,7 @@ "Cypress ISD-300LP", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), -UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219, +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160, "Super Top", "USB 2.0 SATA BRIDGE", USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0), --- linux-3.13.0.orig/drivers/usb/storage/unusual_devs.h +++ linux-3.13.0/drivers/usb/storage/unusual_devs.h @@ -101,6 +101,12 @@ "PhotoSmart R707", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY), +UNUSUAL_DEV( 0x03f3, 0x0001, 0x0000, 0x9999, + "Adaptec", + "USBConnect 2000", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Reported by Sebastian Kapfer * and Olaf Hering (different bcd's, same vendor/product) * for USB floppies that need the SINGLE_LUN enforcement. @@ -234,6 +240,27 @@ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 ), +/* Reported by Daniele Forsi */ +UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350, + "Nokia", + "5300", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64 ), + +/* Patch submitted by Victor A. Santos */ +UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742, + "Nokia", + "305", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64), + +/* Patch submitted by Mikhail Zolotaryov */ +UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110, + "Nokia", + "502", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64 ), + #ifdef NO_SDDR09 UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100, "Microtech", @@ -720,6 +747,12 @@ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_SINGLE_LUN ), +UNUSUAL_DEV( 0x059b, 0x0040, 0x0100, 0x0100, + "Iomega", + "Jaz USB Adapter", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SINGLE_LUN ), + /* Reported by */ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, "LaCie", @@ -1092,6 +1125,18 @@ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE), +UNUSUAL_DEV( 0x085a, 0x0026, 0x0100, 0x0133, + "Xircom", + "PortGear USB-SCSI (Mac USB Dock)", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + +UNUSUAL_DEV( 0x085a, 0x0028, 0x0100, 0x0133, + "Xircom", + "PortGear USB to SCSI Converter", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Submitted by Jan De Luyck */ UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000, "CITIZEN", @@ -1191,6 +1236,13 @@ USB_SC_DEVICE, USB_PR_DEVICE, option_ms_init, 0), +/* Reported by Timo Aaltonen */ +UNUSUAL_DEV( 0x0af0, 0x7011, 0x0000, 0x9999, + "Option", + "Mass Storage", + USB_SC_DEVICE, USB_PR_DEVICE, option_ms_init, + 0 ), + /* Reported by F. Aben * This device (wrongly) has a vendor-specific device descriptor. * The entry is needed so usb-storage can bind to it's mass-storage @@ -1448,6 +1500,13 @@ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY ), +/* Reported by Moritz Moeller-Herrmann */ +UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201, + "Research In Motion", + "BlackBerry Bold 9000", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64 ), + /* Reported by Michael Stattmann */ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, "Sony Ericsson", @@ -1908,6 +1967,13 @@ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Reported by Michael Büsch */ +UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported by Alexandre Oliva * JMicron responds to USN and several other SCSI ioctls with a * residue that causes subsequent I/O requests to fail. */ @@ -1917,6 +1983,21 @@ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), +/* Reported by Dmitry Nezhevenko */ +UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + +/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) + * and Mac USB Dock USB-SCSI */ +UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133, + "Entrega Technologies", + "USB to SCSI Converter", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Reported by Robert Schedel * Note: this is a 'super top' device like the above 14cd/6600 device */ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, @@ -1939,6 +2020,12 @@ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), +UNUSUAL_DEV( 0x1822, 0x0001, 0x0000, 0x9999, + "Ariston Technologies", + "iConnect USB to SCSI adapter", + USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, + US_FL_SCM_MULT_TARG ), + /* Reported by Hans de Goede * These Appotech controllers are found in Picture Frames, they provide a * (buggy) emulation of a cdrom drive which contains the windows software --- linux-3.13.0.orig/drivers/uwb/lc-dev.c +++ linux-3.13.0/drivers/uwb/lc-dev.c @@ -431,16 +431,19 @@ uwb_dev->mac_addr = *bce->mac_addr; uwb_dev->dev_addr = bce->dev_addr; dev_set_name(&uwb_dev->dev, "%s", macbuf); + + /* plug the beacon cache */ + bce->uwb_dev = uwb_dev; + uwb_dev->bce = bce; + uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ + result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); if (result < 0) { dev_err(dev, "new device %s: cannot instantiate device\n", macbuf); goto error_dev_add; } - /* plug the beacon cache */ - bce->uwb_dev = uwb_dev; - uwb_dev->bce = bce; - uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ + dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name, dev_name(rc->uwb_dev.dev.parent)); @@ -448,6 +451,8 @@ return; error_dev_add: + bce->uwb_dev = NULL; + uwb_bce_put(bce); kfree(uwb_dev); return; } --- linux-3.13.0.orig/drivers/vfio/pci/vfio_pci.c +++ linux-3.13.0/drivers/vfio/pci/vfio_pci.c @@ -821,13 +821,11 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - u8 type; struct vfio_pci_device *vdev; struct iommu_group *group; int ret; - pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type); - if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) return -EINVAL; group = iommu_group_get(&pdev->dev); --- linux-3.13.0.orig/drivers/vfio/vfio_iommu_type1.c +++ linux-3.13.0/drivers/vfio/vfio_iommu_type1.c @@ -186,12 +186,12 @@ if (pfn_valid(pfn)) { bool reserved; struct page *tail = pfn_to_page(pfn); - struct page *head = compound_trans_head(tail); + struct page *head = compound_head(tail); reserved = !!(PageReserved(head)); if (head != tail) { /* * "head" is not a dangling pointer - * (compound_trans_head takes care of that) + * (compound_head takes care of that) * but the hugepage may have been split * from under us (and we may not hold a * reference count on the head page so it can --- linux-3.13.0.orig/drivers/vhost/net.c +++ linux-3.13.0/drivers/vhost/net.c @@ -70,7 +70,12 @@ }; struct vhost_net_ubuf_ref { - struct kref kref; + /* refcount follows semantics similar to kref: + * 0: object is released + * 1: no outstanding ubufs + * >1: outstanding ubufs + */ + atomic_t refcount; wait_queue_head_t wait; struct vhost_virtqueue *vq; }; @@ -116,14 +121,6 @@ vhost_net_zcopy_mask |= 0x1 << vq; } -static void vhost_net_zerocopy_done_signal(struct kref *kref) -{ - struct vhost_net_ubuf_ref *ubufs; - - ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref); - wake_up(&ubufs->wait); -} - static struct vhost_net_ubuf_ref * vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) { @@ -134,21 +131,24 @@ ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); if (!ubufs) return ERR_PTR(-ENOMEM); - kref_init(&ubufs->kref); + atomic_set(&ubufs->refcount, 1); init_waitqueue_head(&ubufs->wait); ubufs->vq = vq; return ubufs; } -static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) +static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); + int r = atomic_sub_return(1, &ubufs->refcount); + if (unlikely(!r)) + wake_up(&ubufs->wait); + return r; } static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) { - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal); - wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); + vhost_net_ubuf_put(ubufs); + wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); } static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) @@ -306,22 +306,21 @@ { struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; struct vhost_virtqueue *vq = ubufs->vq; - int cnt = atomic_read(&ubufs->kref.refcount); + int cnt; /* set len to mark this desc buffers done DMA */ vq->heads[ubuf->desc].len = success ? VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; - vhost_net_ubuf_put(ubufs); + cnt = vhost_net_ubuf_put(ubufs); /* * Trigger polling thread if guest stopped submitting new buffers: - * in this case, the refcount after decrement will eventually reach 1 - * so here it is 2. + * in this case, the refcount after decrement will eventually reach 1. * We also trigger polling periodically after each 16 packets * (the value 16 here is more or less arbitrary, it's tuned to trigger * less than 10% of times). */ - if (cnt <= 2 || !(cnt % 16)) + if (cnt <= 1 || !(cnt % 16)) vhost_poll_queue(&vq->poll); } @@ -420,7 +419,7 @@ msg.msg_control = ubuf; msg.msg_controllen = sizeof(ubuf); ubufs = nvq->ubufs; - kref_get(&ubufs->kref); + atomic_inc(&ubufs->refcount); nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; } else { msg.msg_control = NULL; @@ -502,9 +501,13 @@ r = -ENOBUFS; goto err; } - d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, + r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, ARRAY_SIZE(vq->iov) - seg, &out, &in, log, log_num); + if (unlikely(r < 0)) + goto err; + + d = r; if (d == vq->num) { r = 0; goto err; @@ -529,6 +532,12 @@ *iovcount = seg; if (unlikely(log)) *log_num = nlogs; + + /* Detect overrun */ + if (unlikely(datalen > 0)) { + r = UIO_MAXIOV + 1; + goto err; + } return headcount; err: vhost_discard_vq_desc(vq, headcount); @@ -584,6 +593,14 @@ /* On error, stop handling until the next kick. */ if (unlikely(headcount < 0)) break; + /* On overrun, truncate and discard */ + if (unlikely(headcount > UIO_MAXIOV)) { + msg.msg_iovlen = 1; + err = sock->ops->recvmsg(NULL, sock, &msg, + 1, MSG_DONTWAIT | MSG_TRUNC); + pr_debug("Discarded rx packet: len %zd\n", sock_len); + continue; + } /* OK, now we need to know about added descriptors. */ if (!headcount) { if (unlikely(vhost_enable_notify(&net->dev, vq))) { @@ -785,7 +802,7 @@ vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n->tx_flush = false; - kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref); + atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); } } --- linux-3.13.0.orig/drivers/vhost/scsi.c +++ linux-3.13.0/drivers/vhost/scsi.c @@ -728,7 +728,7 @@ } se_sess = tv_nexus->tvn_se_sess; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC); + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) { pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); return ERR_PTR(-ENOMEM); @@ -1115,7 +1115,7 @@ * lun[4-7] need to be zero according to virtio-scsi spec. */ evt->event.lun[0] = 0x01; - evt->event.lun[1] = tpg->tport_tpgt & 0xFF; + evt->event.lun[1] = tpg->tport_tpgt; if (lun->unpacked_lun >= 256) evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; evt->event.lun[3] = lun->unpacked_lun & 0xFF; @@ -1194,6 +1194,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { + struct se_portal_group *se_tpg; struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tpg; struct tcm_vhost_tpg **vs_tpg; @@ -1241,6 +1242,21 @@ ret = -EEXIST; goto out; } + /* + * In order to ensure individual vhost-scsi configfs + * groups cannot be removed while in use by vhost ioctl, + * go ahead and take an explicit se_tpg->tpg_group.cg_item + * dependency now. + */ + se_tpg = &tpg->se_tpg; + ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); + if (ret) { + pr_warn("configfs_depend_item() failed: %d\n", ret); + kfree(vs_tpg); + mutex_unlock(&tpg->tv_tpg_mutex); + goto out; + } tpg->tv_tpg_vhost_count++; tpg->vhost_scsi = vs; vs_tpg[tpg->tport_tpgt] = tpg; @@ -1283,6 +1299,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { + struct se_portal_group *se_tpg; struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tpg; struct vhost_virtqueue *vq; @@ -1331,6 +1348,13 @@ vs->vs_tpg[target] = NULL; match = true; mutex_unlock(&tpg->tv_tpg_mutex); + /* + * Release se_tpg->tpg_group.cg_item configfs dependency now + * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. + */ + se_tpg = &tpg->se_tpg; + configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); } if (match) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { @@ -1961,12 +1985,12 @@ struct tcm_vhost_tport, tport_wwn); struct tcm_vhost_tpg *tpg; - unsigned long tpgt; + u16 tpgt; int ret; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); - if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET) return ERR_PTR(-EINVAL); tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); --- linux-3.13.0.orig/drivers/video/aty/mach64_accel.c +++ linux-3.13.0/drivers/video/aty/mach64_accel.c @@ -4,6 +4,7 @@ */ #include +#include #include #include